summaryrefslogtreecommitdiff
path: root/tools/perf/scripts/python/exported-sql-viewer.py
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/scripts/python/exported-sql-viewer.py')
0 files changed, 0 insertions, 0 deletions
c61c779d9edcb'>diff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig14
-rw-r--r--drivers/Makefile7
-rw-r--r--drivers/accel/Kconfig2
-rw-r--r--drivers/accel/Makefile2
-rw-r--r--drivers/accel/amdxdna/Makefile2
-rw-r--r--drivers/accel/amdxdna/TODO1
-rw-r--r--drivers/accel/amdxdna/aie2_ctx.c265
-rw-r--r--drivers/accel/amdxdna/aie2_error.c95
-rw-r--r--drivers/accel/amdxdna/aie2_message.c668
-rw-r--r--drivers/accel/amdxdna/aie2_msg_priv.h88
-rw-r--r--drivers/accel/amdxdna/aie2_pci.c474
-rw-r--r--drivers/accel/amdxdna/aie2_pci.h59
-rw-r--r--drivers/accel/amdxdna/aie2_psp.c4
-rw-r--r--drivers/accel/amdxdna/aie2_smu.c49
-rw-r--r--drivers/accel/amdxdna/amdxdna_ctx.c156
-rw-r--r--drivers/accel/amdxdna/amdxdna_ctx.h55
-rw-r--r--drivers/accel/amdxdna/amdxdna_error.h59
-rw-r--r--drivers/accel/amdxdna/amdxdna_gem.c333
-rw-r--r--drivers/accel/amdxdna/amdxdna_gem.h9
-rw-r--r--drivers/accel/amdxdna/amdxdna_mailbox.c14
-rw-r--r--drivers/accel/amdxdna/amdxdna_mailbox_helper.h6
-rw-r--r--drivers/accel/amdxdna/amdxdna_pci_drv.c155
-rw-r--r--drivers/accel/amdxdna/amdxdna_pci_drv.h10
-rw-r--r--drivers/accel/amdxdna/amdxdna_pm.c94
-rw-r--r--drivers/accel/amdxdna/amdxdna_pm.h18
-rw-r--r--drivers/accel/amdxdna/amdxdna_ubuf.c232
-rw-r--r--drivers/accel/amdxdna/amdxdna_ubuf.h19
-rw-r--r--drivers/accel/amdxdna/npu1_regs.c8
-rw-r--r--drivers/accel/amdxdna/npu2_regs.c2
-rw-r--r--drivers/accel/amdxdna/npu4_regs.c12
-rw-r--r--drivers/accel/amdxdna/npu5_regs.c2
-rw-r--r--drivers/accel/amdxdna/npu6_regs.c2
-rw-r--r--drivers/accel/drm_accel.c16
-rw-r--r--drivers/accel/ethosu/Kconfig11
-rw-r--r--drivers/accel/ethosu/Makefile4
-rw-r--r--drivers/accel/ethosu/ethosu_device.h197
-rw-r--r--drivers/accel/ethosu/ethosu_drv.c403
-rw-r--r--drivers/accel/ethosu/ethosu_drv.h15
-rw-r--r--drivers/accel/ethosu/ethosu_gem.c704
-rw-r--r--drivers/accel/ethosu/ethosu_gem.h46
-rw-r--r--drivers/accel/ethosu/ethosu_job.c497
-rw-r--r--drivers/accel/ethosu/ethosu_job.h40
-rw-r--r--drivers/accel/habanalabs/Kconfig23
-rw-r--r--drivers/accel/habanalabs/common/Makefile5
-rw-r--r--drivers/accel/habanalabs/common/debugfs.c324
-rw-r--r--drivers/accel/habanalabs/common/device.c48
-rw-r--r--drivers/accel/habanalabs/common/habanalabs.h56
-rw-r--r--drivers/accel/habanalabs/common/habanalabs_ioctl.c6
-rw-r--r--drivers/accel/habanalabs/common/hldio.c437
-rw-r--r--drivers/accel/habanalabs/common/hldio.h146
-rw-r--r--drivers/accel/habanalabs/common/memory.c32
-rw-r--r--drivers/accel/habanalabs/common/memory_mgr.c5
-rw-r--r--drivers/accel/habanalabs/common/sysfs.c15
-rw-r--r--drivers/accel/habanalabs/gaudi/gaudi.c19
-rw-r--r--drivers/accel/habanalabs/gaudi2/gaudi2.c388
-rw-r--r--drivers/accel/habanalabs/gaudi2/gaudi2P.h9
-rw-r--r--drivers/accel/habanalabs/gaudi2/gaudi2_coresight.c2
-rw-r--r--drivers/accel/ivpu/Makefile1
-rw-r--r--drivers/accel/ivpu/ivpu_debugfs.c38
-rw-r--r--drivers/accel/ivpu/ivpu_drv.c21
-rw-r--r--drivers/accel/ivpu/ivpu_drv.h20
-rw-r--r--drivers/accel/ivpu/ivpu_fw.c229
-rw-r--r--drivers/accel/ivpu/ivpu_fw.h16
-rw-r--r--drivers/accel/ivpu/ivpu_gem.c161
-rw-r--r--drivers/accel/ivpu/ivpu_gem.h22
-rw-r--r--drivers/accel/ivpu/ivpu_gem_userptr.c213
-rw-r--r--drivers/accel/ivpu/ivpu_hw.c63
-rw-r--r--drivers/accel/ivpu/ivpu_hw.h10
-rw-r--r--drivers/accel/ivpu/ivpu_hw_btrs.c23
-rw-r--r--drivers/accel/ivpu/ivpu_hw_btrs.h2
-rw-r--r--drivers/accel/ivpu/ivpu_hw_btrs_lnl_reg.h3
-rw-r--r--drivers/accel/ivpu/ivpu_hw_ip.c11
-rw-r--r--drivers/accel/ivpu/ivpu_ipc.c3
-rw-r--r--drivers/accel/ivpu/ivpu_job.c338
-rw-r--r--drivers/accel/ivpu/ivpu_job.h49
-rw-r--r--drivers/accel/ivpu/ivpu_mmu.c2
-rw-r--r--drivers/accel/ivpu/ivpu_mmu_context.c9
-rw-r--r--drivers/accel/ivpu/ivpu_mmu_context.h2
-rw-r--r--drivers/accel/ivpu/ivpu_ms.c25
-rw-r--r--drivers/accel/ivpu/ivpu_pm.c37
-rw-r--r--drivers/accel/ivpu/ivpu_pm.h2
-rw-r--r--drivers/accel/ivpu/ivpu_sysfs.c3
-rw-r--r--drivers/accel/ivpu/vpu_jsm_api.h653
-rw-r--r--drivers/accel/qaic/Kconfig1
-rw-r--r--drivers/accel/qaic/Makefile3
-rw-r--r--drivers/accel/qaic/qaic.h52
-rw-r--r--drivers/accel/qaic/qaic_control.c27
-rw-r--r--drivers/accel/qaic/qaic_data.c177
-rw-r--r--drivers/accel/qaic/qaic_debugfs.c5
-rw-r--r--drivers/accel/qaic/qaic_drv.c125
-rw-r--r--drivers/accel/qaic/qaic_ras.c642
-rw-r--r--drivers/accel/qaic/qaic_ras.h10
-rw-r--r--drivers/accel/qaic/qaic_ssr.c815
-rw-r--r--drivers/accel/qaic/qaic_ssr.h17
-rw-r--r--drivers/accel/qaic/qaic_sysfs.c109
-rw-r--r--drivers/accel/qaic/qaic_timesync.c9
-rw-r--r--drivers/accel/qaic/qaic_timesync.h3
-rw-r--r--drivers/accel/qaic/sahara.c164
-rw-r--r--drivers/accel/rocket/Kconfig24
-rw-r--r--drivers/accel/rocket/Makefile10
-rw-r--r--drivers/accel/rocket/rocket_core.c110
-rw-r--r--drivers/accel/rocket/rocket_core.h64
-rw-r--r--drivers/accel/rocket/rocket_device.c60
-rw-r--r--drivers/accel/rocket/rocket_device.h30
-rw-r--r--drivers/accel/rocket/rocket_drv.c290
-rw-r--r--drivers/accel/rocket/rocket_drv.h32
-rw-r--r--drivers/accel/rocket/rocket_gem.c182
-rw-r--r--drivers/accel/rocket/rocket_gem.h34
-rw-r--r--drivers/accel/rocket/rocket_job.c637
-rw-r--r--drivers/accel/rocket/rocket_job.h52
-rw-r--r--drivers/accel/rocket/rocket_registers.h4404
-rw-r--r--drivers/acpi/Kconfig7
-rw-r--r--drivers/acpi/acpi_dbg.c26
-rw-r--r--drivers/acpi/acpi_mrrm.c46
-rw-r--r--drivers/acpi/acpi_pad.c2
-rw-r--r--drivers/acpi/acpi_processor.c6
-rw-r--r--drivers/acpi/acpi_tad.c78
-rw-r--r--drivers/acpi/acpi_video.c4
-rw-r--r--drivers/acpi/acpica/acdebug.h2
-rw-r--r--drivers/acpi/acpica/aclocal.h2
-rw-r--r--drivers/acpi/acpica/acpredef.h3
-rw-r--r--drivers/acpi/acpica/dsmethod.c24
-rw-r--r--drivers/acpi/acpica/evglock.c4
-rw-r--r--drivers/acpi/acpica/extrace.c4
-rw-r--r--drivers/acpi/acpica/nswalk.c9
-rw-r--r--drivers/acpi/acpica/psopinfo.c4
-rw-r--r--drivers/acpi/acpica/tbprint.c14
-rw-r--r--drivers/acpi/apei/apei-internal.h2
-rw-r--r--drivers/acpi/apei/einj-core.c483
-rw-r--r--drivers/acpi/apei/einj-cxl.c2
-rw-r--r--drivers/acpi/apei/erst-dbg.c8
-rw-r--r--drivers/acpi/apei/ghes.c159
-rw-r--r--drivers/acpi/arm64/Kconfig3
-rw-r--r--drivers/acpi/arm64/Makefile1
-rw-r--r--drivers/acpi/arm64/gtdt.c63
-rw-r--r--drivers/acpi/arm64/iort.c4
-rw-r--r--drivers/acpi/arm64/mpam.c411
-rw-r--r--drivers/acpi/battery.c74
-rw-r--r--drivers/acpi/bgrt.c2
-rw-r--r--drivers/acpi/bus.c2
-rw-r--r--drivers/acpi/button.c4
-rw-r--r--drivers/acpi/cppc_acpi.c24
-rw-r--r--drivers/acpi/device_pm.c8
-rw-r--r--drivers/acpi/device_sysfs.c2
-rw-r--r--drivers/acpi/dptf/Makefile1
-rw-r--r--drivers/acpi/dptf/dptf_pch_fivr.c2
-rw-r--r--drivers/acpi/dptf/dptf_power.c4
-rw-r--r--drivers/acpi/dptf/int340x_thermal.c87
-rw-r--r--drivers/acpi/ec.c24
-rw-r--r--drivers/acpi/fan.h49
-rw-r--r--drivers/acpi/fan_attr.c10
-rw-r--r--drivers/acpi/fan_core.c279
-rw-r--r--drivers/acpi/fan_hwmon.c32
-rw-r--r--drivers/acpi/internal.h8
-rw-r--r--drivers/acpi/irq.c19
-rw-r--r--drivers/acpi/nfit/core.c2
-rw-r--r--drivers/acpi/nfit/intel.c119
-rw-r--r--drivers/acpi/numa/hmat.c99
-rw-r--r--drivers/acpi/numa/srat.c2
-rw-r--r--drivers/acpi/osl.c6
-rw-r--r--drivers/acpi/pci_irq.c3
-rw-r--r--drivers/acpi/pci_link.c12
-rw-r--r--drivers/acpi/pfr_update.c65
-rw-r--r--drivers/acpi/platform_profile.c7
-rw-r--r--drivers/acpi/power.c90
-rw-r--r--drivers/acpi/pptt.c280
-rw-r--r--drivers/acpi/prmt.c45
-rw-r--r--drivers/acpi/proc.c17
-rw-r--r--drivers/acpi/processor_core.c2
-rw-r--r--drivers/acpi/processor_driver.c3
-rw-r--r--drivers/acpi/processor_idle.c80
-rw-r--r--drivers/acpi/processor_perflib.c11
-rw-r--r--drivers/acpi/processor_thermal.c52
-rw-r--r--drivers/acpi/processor_throttling.c2
-rw-r--r--drivers/acpi/property.c333
-rw-r--r--drivers/acpi/resource.c17
-rw-r--r--drivers/acpi/riscv/Kconfig7
-rw-r--r--drivers/acpi/riscv/Makefile1
-rw-r--r--drivers/acpi/riscv/cppc.c6
-rw-r--r--drivers/acpi/riscv/init.c2
-rw-r--r--drivers/acpi/riscv/init.h1
-rw-r--r--drivers/acpi/riscv/irq.c75
-rw-r--r--drivers/acpi/riscv/rimt.c520
-rw-r--r--drivers/acpi/sbs.c2
-rw-r--r--drivers/acpi/scan.c12
-rw-r--r--drivers/acpi/sleep.c14
-rw-r--r--drivers/acpi/sleep.h3
-rw-r--r--drivers/acpi/spcr.c13
-rw-r--r--drivers/acpi/sysfs.c4
-rw-r--r--drivers/acpi/tables.c2
-rw-r--r--drivers/acpi/thermal.c11
-rw-r--r--drivers/acpi/video_detect.c8
-rw-r--r--drivers/acpi/wakeup.c4
-rw-r--r--drivers/acpi/x86/lpss.c5
-rw-r--r--drivers/acpi/x86/s2idle.c65
-rw-r--r--drivers/amba/Kconfig2
-rw-r--r--drivers/amba/bus.c11
-rw-r--r--drivers/amba/tegra-ahb.c1
-rw-r--r--drivers/android/Kconfig31
-rw-r--r--drivers/android/Makefile5
-rw-r--r--drivers/android/binder.c271
-rw-r--r--drivers/android/binder/Makefile9
-rw-r--r--drivers/android/binder/allocation.rs602
-rw-r--r--drivers/android/binder/context.rs180
-rw-r--r--drivers/android/binder/deferred_close.rs204
-rw-r--r--drivers/android/binder/defs.rs182
-rw-r--r--drivers/android/binder/error.rs100
-rw-r--r--drivers/android/binder/freeze.rs398
-rw-r--r--drivers/android/binder/node.rs1131
-rw-r--r--drivers/android/binder/node/wrapper.rs78
-rw-r--r--drivers/android/binder/page_range.rs734
-rw-r--r--drivers/android/binder/page_range_helper.c24
-rw-r--r--drivers/android/binder/page_range_helper.h15
-rw-r--r--drivers/android/binder/process.rs1745
-rw-r--r--drivers/android/binder/range_alloc/array.rs251
-rw-r--r--drivers/android/binder/range_alloc/mod.rs329
-rw-r--r--drivers/android/binder/range_alloc/tree.rs488
-rw-r--r--drivers/android/binder/rust_binder.h23
-rw-r--r--drivers/android/binder/rust_binder_events.c59
-rw-r--r--drivers/android/binder/rust_binder_events.h36
-rw-r--r--drivers/android/binder/rust_binder_internal.h87
-rw-r--r--drivers/android/binder/rust_binder_main.rs611
-rw-r--r--drivers/android/binder/rust_binderfs.c795
-rw-r--r--drivers/android/binder/stats.rs89
-rw-r--r--drivers/android/binder/thread.rs1596
-rw-r--r--drivers/android/binder/trace.rs16
-rw-r--r--drivers/android/binder/transaction.rs456
-rw-r--r--drivers/android/binder_alloc.c53
-rw-r--r--drivers/android/binder_alloc.h22
-rw-r--r--drivers/android/binder_alloc_selftest.c306
-rw-r--r--drivers/android/binder_internal.h10
-rw-r--r--drivers/android/binder_netlink.c32
-rw-r--r--drivers/android/binder_netlink.h21
-rw-r--r--drivers/android/binder_trace.h58
-rw-r--r--drivers/android/binderfs.c113
-rw-r--r--drivers/android/dbitmap.h1
-rw-r--r--drivers/android/tests/.kunitconfig7
-rw-r--r--drivers/android/tests/Makefile6
-rw-r--r--drivers/android/tests/binder_alloc_kunit.c572
-rw-r--r--drivers/ata/Kconfig36
-rw-r--r--drivers/ata/ahci.c121
-rw-r--r--drivers/ata/ahci.h1
-rw-r--r--drivers/ata/ahci_da850.c6
-rw-r--r--drivers/ata/ahci_dm816.c2
-rw-r--r--drivers/ata/ahci_imx.c13
-rw-r--r--drivers/ata/ahci_qoriq.c4
-rw-r--r--drivers/ata/ahci_xgene.c15
-rw-r--r--drivers/ata/ata_piix.c5
-rw-r--r--drivers/ata/libahci.c11
-rw-r--r--drivers/ata/libata-acpi.c91
-rw-r--r--drivers/ata/libata-core.c163
-rw-r--r--drivers/ata/libata-eh.c438
-rw-r--r--drivers/ata/libata-pmp.c26
-rw-r--r--drivers/ata/libata-sata.c54
-rw-r--r--drivers/ata/libata-scsi.c90
-rw-r--r--drivers/ata/libata-sff.c27
-rw-r--r--drivers/ata/libata-transport.c4
-rw-r--r--drivers/ata/libata.h28
-rw-r--r--drivers/ata/pata_acpi.c2
-rw-r--r--drivers/ata/pata_ali.c10
-rw-r--r--drivers/ata/pata_amd.c4
-rw-r--r--drivers/ata/pata_artop.c4
-rw-r--r--drivers/ata/pata_atiixp.c2
-rw-r--r--drivers/ata/pata_cs5536.c2
-rw-r--r--drivers/ata/pata_efar.c2
-rw-r--r--drivers/ata/pata_ep93xx.c4
-rw-r--r--drivers/ata/pata_hpt366.c2
-rw-r--r--drivers/ata/pata_hpt37x.c4
-rw-r--r--drivers/ata/pata_hpt3x2n.c2
-rw-r--r--drivers/ata/pata_icside.c2
-rw-r--r--drivers/ata/pata_it8213.c2
-rw-r--r--drivers/ata/pata_it821x.c5
-rw-r--r--drivers/ata/pata_jmicron.c2
-rw-r--r--drivers/ata/pata_macio.c4
-rw-r--r--drivers/ata/pata_marvell.c2
-rw-r--r--drivers/ata/pata_mpiix.c2
-rw-r--r--drivers/ata/pata_ns87410.c2
-rw-r--r--drivers/ata/pata_octeon_cf.c2
-rw-r--r--drivers/ata/pata_oldpiix.c2
-rw-r--r--drivers/ata/pata_opti.c2
-rw-r--r--drivers/ata/pata_optidma.c6
-rw-r--r--drivers/ata/pata_parport/pata_parport.c4
-rw-r--r--drivers/ata/pata_pcmcia.c5
-rw-r--r--drivers/ata/pata_pdc2027x.c16
-rw-r--r--drivers/ata/pata_rdc.c6
-rw-r--r--drivers/ata/pata_sis.c2
-rw-r--r--drivers/ata/pata_sl82c105.c2
-rw-r--r--drivers/ata/pata_triflex.c2
-rw-r--r--drivers/ata/pata_via.c11
-rw-r--r--drivers/ata/pdc_adma.c2
-rw-r--r--drivers/ata/sata_dwc_460ex.c2
-rw-r--r--drivers/ata/sata_fsl.c6
-rw-r--r--drivers/ata/sata_highbank.c2
-rw-r--r--drivers/ata/sata_inic162x.c2
-rw-r--r--drivers/ata/sata_mv.c10
-rw-r--r--drivers/ata/sata_nv.c2
-rw-r--r--drivers/ata/sata_promise.c4
-rw-r--r--drivers/ata/sata_qstor.c4
-rw-r--r--drivers/ata/sata_rcar.c2
-rw-r--r--drivers/ata/sata_sil.c2
-rw-r--r--drivers/ata/sata_sil24.c8
-rw-r--r--drivers/ata/sata_svw.c4
-rw-r--r--drivers/ata/sata_sx4.c2
-rw-r--r--drivers/ata/sata_uli.c2
-rw-r--r--drivers/ata/sata_via.c4
-rw-r--r--drivers/atm/atmtcp.c15
-rw-r--r--drivers/atm/fore200e.c2
-rw-r--r--drivers/atm/idt77252.c5
-rw-r--r--drivers/atm/lanai.c2
-rw-r--r--drivers/auxdisplay/line-display.c240
-rw-r--r--drivers/auxdisplay/line-display.h4
-rw-r--r--drivers/base/Kconfig6
-rw-r--r--drivers/base/arch_topology.c98
-rw-r--r--drivers/base/auxiliary.c27
-rw-r--r--drivers/base/base.h25
-rw-r--r--drivers/base/bus.c41
-rw-r--r--drivers/base/cacheinfo.c50
-rw-r--r--drivers/base/core.c114
-rw-r--r--drivers/base/cpu.c34
-rw-r--r--drivers/base/dd.c14
-rw-r--r--drivers/base/devcoredump.c138
-rw-r--r--drivers/base/devres.c46
-rw-r--r--drivers/base/devtmpfs.c30
-rw-r--r--drivers/base/faux.c4
-rw-r--r--drivers/base/firmware_loader/Kconfig2
-rw-r--r--drivers/base/firmware_loader/main.c102
-rw-r--r--drivers/base/firmware_loader/sysfs.c16
-rw-r--r--drivers/base/firmware_loader/sysfs_upload.c6
-rw-r--r--drivers/base/memory.c103
-rw-r--r--drivers/base/node.c253
-rw-r--r--drivers/base/platform.c80
-rw-r--r--drivers/base/power/Makefile1
-rw-r--r--drivers/base/power/common.c9
-rw-r--r--drivers/base/power/generic_ops.c85
-rw-r--r--drivers/base/power/main.c270
-rw-r--r--drivers/base/power/runtime-test.c249
-rw-r--r--drivers/base/power/runtime.c203
-rw-r--r--drivers/base/power/trace.c4
-rw-r--r--drivers/base/power/wakeup.c24
-rw-r--r--drivers/base/property.c2
-rw-r--r--drivers/base/regmap/internal.h2
-rw-r--r--drivers/base/regmap/regcache-flat.c107
-rw-r--r--drivers/base/regmap/regcache-maple.c47
-rw-r--r--drivers/base/regmap/regcache-rbtree.c31
-rw-r--r--drivers/base/regmap/regcache.c17
-rw-r--r--drivers/base/regmap/regmap-debugfs.c10
-rw-r--r--drivers/base/regmap/regmap-i3c.c9
-rw-r--r--drivers/base/regmap/regmap-irq.c30
-rw-r--r--drivers/base/regmap/regmap-kunit.c24
-rw-r--r--drivers/base/regmap/regmap-mmio.c1
-rw-r--r--drivers/base/regmap/regmap-sdw-mbq.c26
-rw-r--r--drivers/base/regmap/regmap-slimbus.c6
-rw-r--r--drivers/base/regmap/regmap.c15
-rw-r--r--drivers/base/swnode.c35
-rw-r--r--drivers/base/syscore.c82
-rw-r--r--drivers/base/topology.c2
-rw-r--r--drivers/bcma/driver_gpio.c2
-rw-r--r--drivers/bcma/main.c6
-rw-r--r--drivers/block/Kconfig53
-rw-r--r--drivers/block/Makefile5
-rw-r--r--drivers/block/amiflop.c10
-rw-r--r--drivers/block/aoe/aoe.h1
-rw-r--r--drivers/block/aoe/aoeblk.c4
-rw-r--r--drivers/block/aoe/aoecmd.c10
-rw-r--r--drivers/block/aoe/aoedev.c13
-rw-r--r--drivers/block/aoe/aoemain.c2
-rw-r--r--drivers/block/brd.c71
-rw-r--r--drivers/block/drbd/drbd_bitmap.c10
-rw-r--r--drivers/block/drbd/drbd_int.h39
-rw-r--r--drivers/block/drbd/drbd_main.c59
-rw-r--r--drivers/block/drbd/drbd_nl.c1
-rw-r--r--drivers/block/drbd/drbd_receiver.c284
-rw-r--r--drivers/block/drbd/drbd_worker.c56
-rw-r--r--drivers/block/floppy.c63
-rw-r--r--drivers/block/loop.c114
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c33
-rw-r--r--drivers/block/nbd.c72
-rw-r--r--drivers/block/null_blk/main.c83
-rw-r--r--drivers/block/null_blk/null_blk.h3
-rw-r--r--drivers/block/null_blk/zoned.c6
-rw-r--r--drivers/block/pktcdvd.c2916
-rw-r--r--drivers/block/ps3disk.c4
-rw-r--r--drivers/block/rbd.c2
-rw-r--r--drivers/block/rnbd/rnbd-clt.c6
-rw-r--r--drivers/block/rnbd/rnbd-proto.h15
-rw-r--r--drivers/block/rnull.rs80
-rw-r--r--drivers/block/rnull/Kconfig13
-rw-r--r--drivers/block/rnull/Makefile3
-rw-r--r--drivers/block/rnull/configfs.rs263
-rw-r--r--drivers/block/rnull/rnull.rs103
-rw-r--r--drivers/block/sunvdc.c11
-rw-r--r--drivers/block/swim.c4
-rw-r--r--drivers/block/ublk_drv.c1242
-rw-r--r--drivers/block/virtio_blk.c37
-rw-r--r--drivers/block/xen-blkfront.c4
-rw-r--r--drivers/block/zloop.c168
-rw-r--r--drivers/block/zram/zcomp.c15
-rw-r--r--drivers/block/zram/zcomp.h2
-rw-r--r--drivers/block/zram/zram_drv.c537
-rw-r--r--drivers/block/zram/zram_drv.h2
-rw-r--r--drivers/bluetooth/Kconfig7
-rw-r--r--drivers/bluetooth/bfusb.c2
-rw-r--r--drivers/bluetooth/bpa10x.c8
-rw-r--r--drivers/bluetooth/btbcm.c12
-rw-r--r--drivers/bluetooth/btintel.c39
-rw-r--r--drivers/bluetooth/btintel.h2
-rw-r--r--drivers/bluetooth/btintel_pcie.c881
-rw-r--r--drivers/bluetooth/btintel_pcie.h20
-rw-r--r--drivers/bluetooth/btmtk.c7
-rw-r--r--drivers/bluetooth/btmtksdio.c19
-rw-r--r--drivers/bluetooth/btmtkuart.c10
-rw-r--r--drivers/bluetooth/btnxpuart.c147
-rw-r--r--drivers/bluetooth/btqca.c2
-rw-r--r--drivers/bluetooth/btqcomsmd.c2
-rw-r--r--drivers/bluetooth/btrtl.c56
-rw-r--r--drivers/bluetooth/btsdio.c2
-rw-r--r--drivers/bluetooth/btusb.c411
-rw-r--r--drivers/bluetooth/h4_recv.h153
-rw-r--r--drivers/bluetooth/hci_ag6xx.c2
-rw-r--r--drivers/bluetooth/hci_aml.c4
-rw-r--r--drivers/bluetooth/hci_ath.c2
-rw-r--r--drivers/bluetooth/hci_bcm.c12
-rw-r--r--drivers/bluetooth/hci_bcm4377.c12
-rw-r--r--drivers/bluetooth/hci_bcsp.c3
-rw-r--r--drivers/bluetooth/hci_h4.c6
-rw-r--r--drivers/bluetooth/hci_h5.c53
-rw-r--r--drivers/bluetooth/hci_intel.c17
-rw-r--r--drivers/bluetooth/hci_ldisc.c6
-rw-r--r--drivers/bluetooth/hci_ll.c6
-rw-r--r--drivers/bluetooth/hci_mrvl.c6
-rw-r--r--drivers/bluetooth/hci_nokia.c6
-rw-r--r--drivers/bluetooth/hci_qca.c30
-rw-r--r--drivers/bluetooth/hci_serdev.c8
-rw-r--r--drivers/bluetooth/hci_uart.h10
-rw-r--r--drivers/bluetooth/hci_vhci.c65
-rw-r--r--drivers/bluetooth/virtio_bt.c10
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-bus.c28
-rw-r--r--drivers/bus/fsl-mc/mc-sys.c2
-rw-r--r--drivers/bus/mhi/ep/internal.h2
-rw-r--r--drivers/bus/mhi/ep/main.c41
-rw-r--r--drivers/bus/mhi/host/boot.c8
-rw-r--r--drivers/bus/mhi/host/debugfs.c3
-rw-r--r--drivers/bus/mhi/host/init.c13
-rw-r--r--drivers/bus/mhi/host/internal.h14
-rw-r--r--drivers/bus/mhi/host/main.c15
-rw-r--r--drivers/bus/mhi/host/pci_generic.c167
-rw-r--r--drivers/bus/mhi/host/pm.c29
-rw-r--r--drivers/bus/moxtet.c3
-rw-r--r--drivers/bus/mvebu-mbus.c16
-rw-r--r--drivers/bus/stm32_rifsc.c597
-rw-r--r--drivers/bus/sunxi-rsb.c2
-rw-r--r--drivers/bus/ti-sysc.c14
-rw-r--r--drivers/cache/Kconfig37
-rw-r--r--drivers/cache/Makefile2
-rw-r--r--drivers/cache/hisi_soc_hha.c194
-rw-r--r--drivers/cache/sifive_ccache.c8
-rw-r--r--drivers/cdrom/cdrom.c8
-rw-r--r--drivers/cdx/Kconfig2
-rw-r--r--drivers/cdx/cdx.c8
-rw-r--r--drivers/cdx/cdx_msi.c1
-rw-r--r--drivers/cdx/controller/Kconfig2
-rw-r--r--drivers/cdx/controller/bitfield.h90
-rw-r--r--drivers/cdx/controller/cdx_controller.c32
-rw-r--r--drivers/cdx/controller/cdx_rpmsg.c5
-rw-r--r--drivers/cdx/controller/mcdi.c43
-rw-r--r--drivers/cdx/controller/mcdi.h242
-rw-r--r--drivers/cdx/controller/mcdi_functions.c1
-rw-r--r--drivers/cdx/controller/mcdi_functions.h3
-rw-r--r--drivers/cdx/controller/mcdid.h63
-rw-r--r--drivers/char/Kconfig2
-rw-r--r--drivers/char/Makefile1
-rw-r--r--drivers/char/adi.c8
-rw-r--r--drivers/char/agp/amd64-agp.c16
-rw-r--r--drivers/char/apm-emulation.c10
-rw-r--r--drivers/char/applicom.c5
-rw-r--r--drivers/char/hangcheck-timer.c24
-rw-r--r--drivers/char/hpet.c2
-rw-r--r--drivers/char/hw_random/Kconfig3
-rw-r--r--drivers/char/hw_random/atmel-rng.c1
-rw-r--r--drivers/char/hw_random/bcm2835-rng.c11
-rw-r--r--drivers/char/hw_random/cctrng.c1
-rw-r--r--drivers/char/hw_random/cn10k-rng.c2
-rw-r--r--drivers/char/hw_random/core.c11
-rw-r--r--drivers/char/hw_random/ks-sa-rng.c4
-rw-r--r--drivers/char/hw_random/mtk-rng.c5
-rw-r--r--drivers/char/hw_random/n2rng.h4
-rw-r--r--drivers/char/hw_random/npcm-rng.c1
-rw-r--r--drivers/char/hw_random/omap3-rom-rng.c1
-rw-r--r--drivers/char/hw_random/rockchip-rng.c3
-rw-r--r--drivers/char/hw_random/s390-trng.c3
-rw-r--r--drivers/char/hw_random/stm32-rng.c1
-rw-r--r--drivers/char/hw_random/timeriomem-rng.c2
-rw-r--r--drivers/char/ipmi/Kconfig7
-rw-r--r--drivers/char/ipmi/Makefile1
-rw-r--r--drivers/char/ipmi/ipmi_ipmb.c4
-rw-r--r--drivers/char/ipmi/ipmi_kcs_sm.c16
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c638
-rw-r--r--drivers/char/ipmi/ipmi_powernv.c17
-rw-r--r--drivers/char/ipmi/ipmi_si.h7
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c81
-rw-r--r--drivers/char/ipmi/ipmi_si_ls2k.c189
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c10
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c59
-rw-r--r--drivers/char/mem.c105
-rw-r--r--drivers/char/misc.c31
-rw-r--r--drivers/char/misc_minor_kunit.c689
-rw-r--r--drivers/char/mwave/3780i.c218
-rw-r--r--drivers/char/mwave/3780i.h12
-rw-r--r--drivers/char/mwave/Makefile6
-rw-r--r--drivers/char/mwave/README10
-rw-r--r--drivers/char/mwave/mwavedd.c337
-rw-r--r--drivers/char/mwave/mwavedd.h76
-rw-r--r--drivers/char/mwave/mwavepub.h22
-rw-r--r--drivers/char/mwave/smapi.c244
-rw-r--r--drivers/char/mwave/smapi.h6
-rw-r--r--drivers/char/mwave/tp3780i.c209
-rw-r--r--drivers/char/mwave/tp3780i.h30
-rw-r--r--drivers/char/random.c44
-rw-r--r--drivers/char/tpm/Kconfig12
-rw-r--r--drivers/char/tpm/Makefile1
-rw-r--r--drivers/char/tpm/eventlog/common.c46
-rw-r--r--drivers/char/tpm/eventlog/of.c8
-rw-r--r--drivers/char/tpm/st33zp24/st33zp24.c2
-rw-r--r--drivers/char/tpm/tpm-chip.c37
-rw-r--r--drivers/char/tpm/tpm-dev-common.c3
-rw-r--r--drivers/char/tpm/tpm-interface.c61
-rw-r--r--drivers/char/tpm/tpm.h3
-rw-r--r--drivers/char/tpm/tpm1-cmd.c5
-rw-r--r--drivers/char/tpm/tpm2-cmd.c191
-rw-r--r--drivers/char/tpm/tpm2-sessions.c309
-rw-r--r--drivers/char/tpm/tpm_atmel.c3
-rw-r--r--drivers/char/tpm/tpm_crb.c35
-rw-r--r--drivers/char/tpm/tpm_crb_ffa.c94
-rw-r--r--drivers/char/tpm/tpm_crb_ffa.h2
-rw-r--r--drivers/char/tpm/tpm_ftpm_tee.c66
-rw-r--r--drivers/char/tpm/tpm_ftpm_tee.h4
-rw-r--r--drivers/char/tpm/tpm_i2c_atmel.c3
-rw-r--r--drivers/char/tpm/tpm_i2c_infineon.c3
-rw-r--r--drivers/char/tpm/tpm_i2c_nuvoton.c3
-rw-r--r--drivers/char/tpm/tpm_ibmvtpm.c6
-rw-r--r--drivers/char/tpm/tpm_infineon.c3
-rw-r--r--drivers/char/tpm/tpm_loongson.c84
-rw-r--r--drivers/char/tpm/tpm_nsc.c3
-rw-r--r--drivers/char/tpm/tpm_ppi.c137
-rw-r--r--drivers/char/tpm/tpm_svsm.c28
-rw-r--r--drivers/char/tpm/tpm_tis_core.c10
-rw-r--r--drivers/char/tpm/tpm_tis_i2c_cr50.c6
-rw-r--r--drivers/char/tpm/tpm_vtpm_proxy.c4
-rw-r--r--drivers/char/tpm/xen-tpmfront.c3
-rw-r--r--drivers/char/xillybus/xillybus_core.c2
-rw-r--r--drivers/char/xillybus/xillyusb.c4
-rw-r--r--drivers/clk/Kconfig21
-rw-r--r--drivers/clk/Makefile6
-rw-r--r--drivers/clk/actions/owl-common.c1
-rw-r--r--drivers/clk/actions/owl-common.h2
-rw-r--r--drivers/clk/actions/owl-composite.c8
-rw-r--r--drivers/clk/actions/owl-composite.h2
-rw-r--r--drivers/clk/actions/owl-divider.c13
-rw-r--r--drivers/clk/actions/owl-divider.h2
-rw-r--r--drivers/clk/actions/owl-factor.c12
-rw-r--r--drivers/clk/actions/owl-factor.h2
-rw-r--r--drivers/clk/actions/owl-gate.h2
-rw-r--r--drivers/clk/actions/owl-mux.h2
-rw-r--r--drivers/clk/actions/owl-pll.c25
-rw-r--r--drivers/clk/actions/owl-pll.h2
-rw-r--r--drivers/clk/at91/clk-audio-pll.c42
-rw-r--r--drivers/clk/at91/clk-h32mx.c33
-rw-r--r--drivers/clk/at91/clk-master.c3
-rw-r--r--drivers/clk/at91/clk-peripheral.c49
-rw-r--r--drivers/clk/at91/clk-pll.c12
-rw-r--r--drivers/clk/at91/clk-plldiv.c34
-rw-r--r--drivers/clk/at91/clk-sam9x60-pll.c111
-rw-r--r--drivers/clk/at91/clk-usb.c20
-rw-r--r--drivers/clk/at91/pmc.c12
-rw-r--r--drivers/clk/at91/pmc.h4
-rw-r--r--drivers/clk/at91/sam9x60.c2
-rw-r--r--drivers/clk/at91/sam9x7.c26
-rw-r--r--drivers/clk/at91/sama7d65.c4
-rw-r--r--drivers/clk/at91/sama7g5.c2
-rw-r--r--drivers/clk/axs10x/i2s_pll_clock.c14
-rw-r--r--drivers/clk/axs10x/pll_clock.c12
-rw-r--r--drivers/clk/baikal-t1/ccu-div.c27
-rw-r--r--drivers/clk/baikal-t1/ccu-pll.c14
-rw-r--r--drivers/clk/baikal-t1/clk-ccu-div.c2
-rw-r--r--drivers/clk/baikal-t1/clk-ccu-pll.c2
-rw-r--r--drivers/clk/bcm/clk-bcm2835.c19
-rw-r--r--drivers/clk/bcm/clk-bcm53573-ilp.c2
-rw-r--r--drivers/clk/bcm/clk-iproc-asiu.c25
-rw-r--r--drivers/clk/bcm/clk-raspberrypi.c72
-rw-r--r--drivers/clk/berlin/berlin2-avpll.c2
-rw-r--r--drivers/clk/clk-apple-nco.c14
-rw-r--r--drivers/clk/clk-asm9260.c4
-rw-r--r--drivers/clk/clk-ast2600.c2
-rw-r--r--drivers/clk/clk-axi-clkgen.c159
-rw-r--r--drivers/clk/clk-axm5516.c1
-rw-r--r--drivers/clk/clk-bm1880.c21
-rw-r--r--drivers/clk/clk-cdce706.c16
-rw-r--r--drivers/clk/clk-cdce925.c50
-rw-r--r--drivers/clk/clk-clps711x.c2
-rw-r--r--drivers/clk/clk-cs2000-cp.c14
-rw-r--r--drivers/clk/clk-divider.c23
-rw-r--r--drivers/clk/clk-en7523.c64
-rw-r--r--drivers/clk/clk-ep93xx.c21
-rw-r--r--drivers/clk/clk-eyeq.c2
-rw-r--r--drivers/clk/clk-fixed-factor.c16
-rw-r--r--drivers/clk/clk-fractional-divider.c25
-rw-r--r--drivers/clk/clk-gate.c2
-rw-r--r--drivers/clk/clk-gemini.c15
-rw-r--r--drivers/clk/clk-highbank.c26
-rw-r--r--drivers/clk/clk-hsdk-pll.c14
-rw-r--r--drivers/clk/clk-lan966x.c2
-rw-r--r--drivers/clk/clk-lmk04832.c53
-rw-r--r--drivers/clk/clk-loongson1.c12
-rw-r--r--drivers/clk/clk-loongson2.c122
-rw-r--r--drivers/clk/clk-max9485.c27
-rw-r--r--drivers/clk/clk-milbeaut.c22
-rw-r--r--drivers/clk/clk-multiplier.c12
-rw-r--r--drivers/clk/clk-pwm.c49
-rw-r--r--drivers/clk/clk-rp1.c2462
-rw-r--r--drivers/clk/clk-rpmi.c620
-rw-r--r--drivers/clk/clk-s2mps11.c10
-rw-r--r--drivers/clk/clk-scmi.c66
-rw-r--r--drivers/clk/clk-scpi.c18
-rw-r--r--drivers/clk/clk-si514.c24
-rw-r--r--drivers/clk/clk-si521xx.c14
-rw-r--r--drivers/clk/clk-si5341.c22
-rw-r--r--drivers/clk/clk-si5351.c6
-rw-r--r--drivers/clk/clk-si544.c12
-rw-r--r--drivers/clk/clk-si570.c28
-rw-r--r--drivers/clk/clk-sp7021.c46
-rw-r--r--drivers/clk/clk-sparx5.c10
-rw-r--r--drivers/clk/clk-stm32f4.c28
-rw-r--r--drivers/clk/clk-tps68470.c12
-rw-r--r--drivers/clk/clk-versaclock3.c70
-rw-r--r--drivers/clk/clk-versaclock5.c73
-rw-r--r--drivers/clk/clk-versaclock7.c32
-rw-r--r--drivers/clk/clk-vt8500.c59
-rw-r--r--drivers/clk/clk-wm831x.c14
-rw-r--r--drivers/clk/clk-xgene.c41
-rw-r--r--drivers/clk/clk.c78
-rw-r--r--drivers/clk/clk_test.c226
-rw-r--r--drivers/clk/davinci/pll.h2
-rw-r--r--drivers/clk/davinci/psc-da850.c7
-rw-r--r--drivers/clk/davinci/psc.c5
-rw-r--r--drivers/clk/hisilicon/clk-hi3660-stub.c18
-rw-r--r--drivers/clk/hisilicon/clk-hi6220-stub.c12
-rw-r--r--drivers/clk/hisilicon/clkdivider-hi6220.c12
-rw-r--r--drivers/clk/hisilicon/clkgate-separated.c16
-rw-r--r--drivers/clk/imx/Kconfig1
-rw-r--r--drivers/clk/imx/Makefile1
-rw-r--r--drivers/clk/imx/clk-busy.c8
-rw-r--r--drivers/clk/imx/clk-composite-7ulp.c13
-rw-r--r--drivers/clk/imx/clk-composite-8m.c16
-rw-r--r--drivers/clk/imx/clk-composite-93.c7
-rw-r--r--drivers/clk/imx/clk-cpu.c10
-rw-r--r--drivers/clk/imx/clk-fixup-div.c10
-rw-r--r--drivers/clk/imx/clk-fixup-mux.c2
-rw-r--r--drivers/clk/imx/clk-frac-pll.c20
-rw-r--r--drivers/clk/imx/clk-fracn-gppll.c17
-rw-r--r--drivers/clk/imx/clk-gate-exclusive.c2
-rw-r--r--drivers/clk/imx/clk-imx5.c2
-rw-r--r--drivers/clk/imx/clk-imx8-acm.c2
-rw-r--r--drivers/clk/imx/clk-imx8mp-audiomix.c39
-rw-r--r--drivers/clk/imx/clk-imx8qxp-lpcg.c1
-rw-r--r--drivers/clk/imx/clk-imx8ulp-sim-lpav.c156
-rw-r--r--drivers/clk/imx/clk-imx95-blk-ctl.c150
-rw-r--r--drivers/clk/imx/clk-pfd.c18
-rw-r--r--drivers/clk/imx/clk-pll14xx.c29
-rw-r--r--drivers/clk/imx/clk-pllv2.c23
-rw-r--r--drivers/clk/imx/clk-pllv3.c72
-rw-r--r--drivers/clk/imx/clk-pllv4.c29
-rw-r--r--drivers/clk/imx/clk-scu.c39
-rw-r--r--drivers/clk/imx/clk-vf610.c12
-rw-r--r--drivers/clk/ingenic/cgu.c12
-rw-r--r--drivers/clk/ingenic/cgu.h2
-rw-r--r--drivers/clk/ingenic/jz4725b-cgu.c2
-rw-r--r--drivers/clk/ingenic/jz4740-cgu.c2
-rw-r--r--drivers/clk/ingenic/jz4755-cgu.c2
-rw-r--r--drivers/clk/ingenic/jz4760-cgu.c2
-rw-r--r--drivers/clk/ingenic/jz4770-cgu.c2
-rw-r--r--drivers/clk/ingenic/jz4780-cgu.c26
-rw-r--r--drivers/clk/ingenic/pm.c14
-rw-r--r--drivers/clk/ingenic/pm.h2
-rw-r--r--drivers/clk/ingenic/tcu.c12
-rw-r--r--drivers/clk/ingenic/x1000-cgu.c21
-rw-r--r--drivers/clk/ingenic/x1830-cgu.c2
-rw-r--r--drivers/clk/keystone/sci-clk.c9
-rw-r--r--drivers/clk/keystone/syscon-clk.c2
-rw-r--r--drivers/clk/kunit_clk_hw_get_dev_of_node.dtso10
-rw-r--r--drivers/clk/mediatek/Kconfig71
-rw-r--r--drivers/clk/mediatek/Makefile13
-rw-r--r--drivers/clk/mediatek/clk-gate.c117
-rw-r--r--drivers/clk/mediatek/clk-gate.h3
-rw-r--r--drivers/clk/mediatek/clk-mt7622-aud.c1
-rw-r--r--drivers/clk/mediatek/clk-mt8195-infra_ao.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8196-apmixedsys.c204
-rw-r--r--drivers/clk/mediatek/clk-mt8196-disp0.c170
-rw-r--r--drivers/clk/mediatek/clk-mt8196-disp1.c170
-rw-r--r--drivers/clk/mediatek/clk-mt8196-imp_iic_wrap.c118
-rw-r--r--drivers/clk/mediatek/clk-mt8196-mcu.c167
-rw-r--r--drivers/clk/mediatek/clk-mt8196-mdpsys.c186
-rw-r--r--drivers/clk/mediatek/clk-mt8196-mfg.c150
-rw-r--r--drivers/clk/mediatek/clk-mt8196-ovl0.c154
-rw-r--r--drivers/clk/mediatek/clk-mt8196-ovl1.c154
-rw-r--r--drivers/clk/mediatek/clk-mt8196-peri_ao.c142
-rw-r--r--drivers/clk/mediatek/clk-mt8196-pextp.c131
-rw-r--r--drivers/clk/mediatek/clk-mt8196-topckgen.c985
-rw-r--r--drivers/clk/mediatek/clk-mt8196-topckgen2.c568
-rw-r--r--drivers/clk/mediatek/clk-mt8196-ufs_ao.c108
-rw-r--r--drivers/clk/mediatek/clk-mt8196-vdec.c253
-rw-r--r--drivers/clk/mediatek/clk-mt8196-vdisp_ao.c80
-rw-r--r--drivers/clk/mediatek/clk-mt8196-venc.c236
-rw-r--r--drivers/clk/mediatek/clk-mt8196-vlpckgen.c725
-rw-r--r--drivers/clk/mediatek/clk-mtk.c16
-rw-r--r--drivers/clk/mediatek/clk-mtk.h22
-rw-r--r--drivers/clk/mediatek/clk-mux.c122
-rw-r--r--drivers/clk/mediatek/clk-mux.h87
-rw-r--r--drivers/clk/mediatek/clk-pll.c58
-rw-r--r--drivers/clk/mediatek/clk-pll.h11
-rw-r--r--drivers/clk/mediatek/clk-pllfh.c2
-rw-r--r--drivers/clk/meson/Kconfig17
-rw-r--r--drivers/clk/meson/Makefile1
-rw-r--r--drivers/clk/meson/a1-peripherals.c1187
-rw-r--r--drivers/clk/meson/a1-peripherals.h46
-rw-r--r--drivers/clk/meson/a1-pll.c150
-rw-r--r--drivers/clk/meson/a1-pll.h28
-rw-r--r--drivers/clk/meson/axg-aoclk.c175
-rw-r--r--drivers/clk/meson/axg-audio.c603
-rw-r--r--drivers/clk/meson/axg-audio.h70
-rw-r--r--drivers/clk/meson/axg.c457
-rw-r--r--drivers/clk/meson/axg.h105
-rw-r--r--drivers/clk/meson/c3-peripherals.c2245
-rw-r--r--drivers/clk/meson/c3-pll.c275
-rw-r--r--drivers/clk/meson/clk-cpu-dyndiv.c1
-rw-r--r--drivers/clk/meson/clk-dualdiv.c2
-rw-r--r--drivers/clk/meson/clk-mpll.c6
-rw-r--r--drivers/clk/meson/clk-phase.c11
-rw-r--r--drivers/clk/meson/clk-pll.c7
-rw-r--r--drivers/clk/meson/clk-regmap.c49
-rw-r--r--drivers/clk/meson/clk-regmap.h24
-rw-r--r--drivers/clk/meson/g12a-aoclk.c272
-rw-r--r--drivers/clk/meson/g12a.c2372
-rw-r--r--drivers/clk/meson/g12a.h130
-rw-r--r--drivers/clk/meson/gxbb-aoclk.c142
-rw-r--r--drivers/clk/meson/gxbb.c1103
-rw-r--r--drivers/clk/meson/gxbb.h115
-rw-r--r--drivers/clk/meson/meson-aoclk.c37
-rw-r--r--drivers/clk/meson/meson-aoclk.h4
-rw-r--r--drivers/clk/meson/meson-clkc-utils.c86
-rw-r--r--drivers/clk/meson/meson-clkc-utils.h89
-rw-r--r--drivers/clk/meson/meson-eeclk.c64
-rw-r--r--drivers/clk/meson/meson-eeclk.h26
-rw-r--r--drivers/clk/meson/meson8-ddr.c71
-rw-r--r--drivers/clk/meson/meson8b.c1013
-rw-r--r--drivers/clk/meson/meson8b.h80
-rw-r--r--drivers/clk/meson/s4-peripherals.c1552
-rw-r--r--drivers/clk/meson/s4-peripherals.h56
-rw-r--r--drivers/clk/meson/s4-pll.c142
-rw-r--r--drivers/clk/meson/s4-pll.h38
-rw-r--r--drivers/clk/meson/sclk-div.c5
-rw-r--r--drivers/clk/meson/vclk.c2
-rw-r--r--drivers/clk/meson/vid-pll-div.c1
-rw-r--r--drivers/clk/microchip/Kconfig2
-rw-r--r--drivers/clk/microchip/clk-core.c57
-rw-r--r--drivers/clk/microchip/clk-mpfs.c227
-rw-r--r--drivers/clk/mmp/Kconfig10
-rw-r--r--drivers/clk/mmp/Makefile5
-rw-r--r--drivers/clk/mmp/clk-audio.c18
-rw-r--r--drivers/clk/mmp/clk-frac.c27
-rw-r--r--drivers/clk/mmp/clk-gate.c2
-rw-r--r--drivers/clk/mmp/clk-pxa1908-apmu.c7
-rw-r--r--drivers/clk/mstar/clk-msc313-cpupll.c18
-rw-r--r--drivers/clk/mvebu/ap-cpu-clk.c12
-rw-r--r--drivers/clk/mvebu/armada-37xx-periph.c15
-rw-r--r--drivers/clk/mvebu/armada-xp.c5
-rw-r--r--drivers/clk/mvebu/clk-corediv.c18
-rw-r--r--drivers/clk/mvebu/clk-cpu.c12
-rw-r--r--drivers/clk/mvebu/common.c12
-rw-r--r--drivers/clk/mvebu/cp110-system-controller.c20
-rw-r--r--drivers/clk/mvebu/dove-divider.c16
-rw-r--r--drivers/clk/mxs/clk-div.c10
-rw-r--r--drivers/clk/mxs/clk-frac.c16
-rw-r--r--drivers/clk/mxs/clk-ref.c16
-rw-r--r--drivers/clk/nuvoton/Kconfig4
-rw-r--r--drivers/clk/nuvoton/clk-ma35d1-divider.c12
-rw-r--r--drivers/clk/nuvoton/clk-ma35d1-pll.c28
-rw-r--r--drivers/clk/nxp/clk-lpc18xx-ccu.c2
-rw-r--r--drivers/clk/nxp/clk-lpc18xx-cgu.c20
-rw-r--r--drivers/clk/nxp/clk-lpc32xx.c60
-rw-r--r--drivers/clk/pistachio/clk-pll.c20
-rw-r--r--drivers/clk/qcom/Kconfig174
-rw-r--r--drivers/clk/qcom/Makefile17
-rw-r--r--drivers/clk/qcom/a53-pll.c1
-rw-r--r--drivers/clk/qcom/a7-pll.c3
-rw-r--r--drivers/clk/qcom/apcs-sdx55.c2
-rw-r--r--drivers/clk/qcom/apss-ipq-pll.c1
-rw-r--r--drivers/clk/qcom/apss-ipq5424.c258
-rw-r--r--drivers/clk/qcom/camcc-milos.c2161
-rw-r--r--drivers/clk/qcom/camcc-qcs615.c1597
-rw-r--r--drivers/clk/qcom/camcc-sc8180x.c2889
-rw-r--r--drivers/clk/qcom/camcc-sdm845.c3
-rw-r--r--drivers/clk/qcom/camcc-sm6350.c13
-rw-r--r--drivers/clk/qcom/camcc-sm7150.c11
-rw-r--r--drivers/clk/qcom/camcc-sm8250.c3
-rw-r--r--drivers/clk/qcom/camcc-sm8450.c92
-rw-r--r--drivers/clk/qcom/camcc-sm8550.c95
-rw-r--r--drivers/clk/qcom/camcc-sm8650.c83
-rw-r--r--drivers/clk/qcom/camcc-x1e80100.c67
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.c409
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.h10
-rw-r--r--drivers/clk/qcom/clk-branch.c8
-rw-r--r--drivers/clk/qcom/clk-branch.h4
-rw-r--r--drivers/clk/qcom/clk-cbf-8996.c1
-rw-r--r--drivers/clk/qcom/clk-cpu-8996.c1
-rw-r--r--drivers/clk/qcom/clk-rcg.c2
-rw-r--r--drivers/clk/qcom/clk-rcg2.c8
-rw-r--r--drivers/clk/qcom/clk-regmap-divider.c27
-rw-r--r--drivers/clk/qcom/clk-rpm.c10
-rw-r--r--drivers/clk/qcom/clk-rpmh.c63
-rw-r--r--drivers/clk/qcom/clk-smd-rpm.c16
-rw-r--r--drivers/clk/qcom/clk-spmi-pmic-div.c12
-rw-r--r--drivers/clk/qcom/common.c95
-rw-r--r--drivers/clk/qcom/common.h12
-rw-r--r--drivers/clk/qcom/dispcc-glymur.c1982
-rw-r--r--drivers/clk/qcom/dispcc-milos.c974
-rw-r--r--drivers/clk/qcom/dispcc-qcs615.c792
-rw-r--r--drivers/clk/qcom/dispcc-sc7280.c8
-rw-r--r--drivers/clk/qcom/dispcc-sm6350.c7
-rw-r--r--drivers/clk/qcom/dispcc-sm7150.c9
-rw-r--r--drivers/clk/qcom/dispcc-sm8750.c10
-rw-r--r--drivers/clk/qcom/dispcc-x1e80100.c3
-rw-r--r--drivers/clk/qcom/ecpricc-qdu1000.c30
-rw-r--r--drivers/clk/qcom/gcc-glymur.c8615
-rw-r--r--drivers/clk/qcom/gcc-ipq4019.c14
-rw-r--r--drivers/clk/qcom/gcc-ipq5018.c4
-rw-r--r--drivers/clk/qcom/gcc-ipq5424.c28
-rw-r--r--drivers/clk/qcom/gcc-ipq6018.c60
-rw-r--r--drivers/clk/qcom/gcc-ipq8074.c6
-rw-r--r--drivers/clk/qcom/gcc-milos.c3225
-rw-r--r--drivers/clk/qcom/gcc-msm8917.c617
-rw-r--r--drivers/clk/qcom/gcc-qcm2290.c1
-rw-r--r--drivers/clk/qcom/gcc-qcs404.c2
-rw-r--r--drivers/clk/qcom/gcc-qcs615.c6
-rw-r--r--drivers/clk/qcom/gcc-sc8280xp.c5
-rw-r--r--drivers/clk/qcom/gcc-sdm660.c72
-rw-r--r--drivers/clk/qcom/gcc-sm8150.c6
-rw-r--r--drivers/clk/qcom/gcc-sm8750.c1
-rw-r--r--drivers/clk/qcom/gcc-x1e80100.c701
-rw-r--r--drivers/clk/qcom/gpucc-milos.c562
-rw-r--r--drivers/clk/qcom/gpucc-qcs615.c531
-rw-r--r--drivers/clk/qcom/gpucc-sa8775p.c6
-rw-r--r--drivers/clk/qcom/gpucc-sc7180.c2
-rw-r--r--drivers/clk/qcom/gpucc-sm6350.c4
-rw-r--r--drivers/clk/qcom/gpucc-sm8150.c2
-rw-r--r--drivers/clk/qcom/gpucc-sm8250.c2
-rw-r--r--drivers/clk/qcom/hfpll.c1
-rw-r--r--drivers/clk/qcom/ipq-cmn-pll.c51
-rw-r--r--drivers/clk/qcom/lpassaudiocc-sc7280.c6
-rw-r--r--drivers/clk/qcom/lpasscc-sc8280xp.c4
-rw-r--r--drivers/clk/qcom/lpasscc-sm6115.c2
-rw-r--r--drivers/clk/qcom/lpasscorecc-sc7180.c4
-rw-r--r--drivers/clk/qcom/mmcc-sdm660.c3
-rw-r--r--drivers/clk/qcom/nsscc-ipq5424.c1340
-rw-r--r--drivers/clk/qcom/nsscc-ipq9574.c2
-rw-r--r--drivers/clk/qcom/tcsrcc-glymur.c313
-rw-r--r--drivers/clk/qcom/tcsrcc-sm8650.c8
-rw-r--r--drivers/clk/qcom/tcsrcc-x1e80100.c4
-rw-r--r--drivers/clk/qcom/videocc-milos.c403
-rw-r--r--drivers/clk/qcom/videocc-qcs615.c338
-rw-r--r--drivers/clk/qcom/videocc-sc7180.c2
-rw-r--r--drivers/clk/qcom/videocc-sdm845.c4
-rw-r--r--drivers/clk/qcom/videocc-sm6350.c355
-rw-r--r--drivers/clk/qcom/videocc-sm7150.c4
-rw-r--r--drivers/clk/qcom/videocc-sm8150.c4
-rw-r--r--drivers/clk/qcom/videocc-sm8450.c62
-rw-r--r--drivers/clk/qcom/videocc-sm8550.c91
-rw-r--r--drivers/clk/qcom/videocc-sm8750.c463
-rw-r--r--drivers/clk/renesas/Kconfig10
-rw-r--r--drivers/clk/renesas/Makefile2
-rw-r--r--drivers/clk/renesas/clk-div6.c6
-rw-r--r--drivers/clk/renesas/clk-mstp.c20
-rw-r--r--drivers/clk/renesas/r7s9210-cpg-mssr.c7
-rw-r--r--drivers/clk/renesas/r8a77970-cpg-mssr.c8
-rw-r--r--drivers/clk/renesas/r8a779a0-cpg-mssr.c7
-rw-r--r--drivers/clk/renesas/r9a06g032-clocks.c6
-rw-r--r--drivers/clk/renesas/r9a07g043-cpg.c140
-rw-r--r--drivers/clk/renesas/r9a07g044-cpg.c176
-rw-r--r--drivers/clk/renesas/r9a08g045-cpg.c252
-rw-r--r--drivers/clk/renesas/r9a09g011-cpg.c116
-rw-r--r--drivers/clk/renesas/r9a09g047-cpg.c250
-rw-r--r--drivers/clk/renesas/r9a09g056-cpg.c326
-rw-r--r--drivers/clk/renesas/r9a09g057-cpg.c236
-rw-r--r--drivers/clk/renesas/r9a09g077-cpg.c320
-rw-r--r--drivers/clk/renesas/rcar-cpg-lib.c2
-rw-r--r--drivers/clk/renesas/rcar-gen2-cpg.c5
-rw-r--r--drivers/clk/renesas/rcar-gen2-cpg.h3
-rw-r--r--drivers/clk/renesas/rcar-gen3-cpg.c21
-rw-r--r--drivers/clk/renesas/rcar-gen3-cpg.h3
-rw-r--r--drivers/clk/renesas/rcar-gen4-cpg.c24
-rw-r--r--drivers/clk/renesas/rcar-gen4-cpg.h3
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.c359
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.h32
-rw-r--r--drivers/clk/renesas/rzg2l-cpg.c572
-rw-r--r--drivers/clk/renesas/rzg2l-cpg.h67
-rw-r--r--drivers/clk/renesas/rzv2h-cpg.c666
-rw-r--r--drivers/clk/renesas/rzv2h-cpg.h80
-rw-r--r--drivers/clk/rockchip/Kconfig14
-rw-r--r--drivers/clk/rockchip/Makefile2
-rw-r--r--drivers/clk/rockchip/clk-cpu.c171
-rw-r--r--drivers/clk/rockchip/clk-ddr.c13
-rw-r--r--drivers/clk/rockchip/clk-half-divider.c12
-rw-r--r--drivers/clk/rockchip/clk-mmc-phase.c4
-rw-r--r--drivers/clk/rockchip/clk-pll.c25
-rw-r--r--drivers/clk/rockchip/clk-rk3288.c12
-rw-r--r--drivers/clk/rockchip/clk-rk3368.c2
-rw-r--r--drivers/clk/rockchip/clk-rk3506.c869
-rw-r--r--drivers/clk/rockchip/clk-rk3568.c6
-rw-r--r--drivers/clk/rockchip/clk-rv1126b.c1117
-rw-r--r--drivers/clk/rockchip/clk.c24
-rw-r--r--drivers/clk/rockchip/clk.h98
-rw-r--r--drivers/clk/rockchip/rst-rk3506.c226
-rw-r--r--drivers/clk/rockchip/rst-rv1126b.c443
-rw-r--r--drivers/clk/samsung/Kconfig10
-rw-r--r--drivers/clk/samsung/Makefile2
-rw-r--r--drivers/clk/samsung/clk-acpm.c185
-rw-r--r--drivers/clk/samsung/clk-artpec8.c1044
-rw-r--r--drivers/clk/samsung/clk-cpu.c14
-rw-r--r--drivers/clk/samsung/clk-exynos-clkout.c2
-rw-r--r--drivers/clk/samsung/clk-exynos850.c2
-rw-r--r--drivers/clk/samsung/clk-exynos990.c1240
-rw-r--r--drivers/clk/samsung/clk-exynosautov920.c162
-rw-r--r--drivers/clk/samsung/clk-fsd.c28
-rw-r--r--drivers/clk/samsung/clk-gs101.c4
-rw-r--r--drivers/clk/samsung/clk-pll.c200
-rw-r--r--drivers/clk/samsung/clk-pll.h2
-rw-r--r--drivers/clk/samsung/clk-s5pv210-audss.c12
-rw-r--r--drivers/clk/samsung/clk.c12
-rw-r--r--drivers/clk/sifive/fu540-prci.h2
-rw-r--r--drivers/clk/sifive/fu740-prci.h2
-rw-r--r--drivers/clk/sifive/sifive-prci.c11
-rw-r--r--drivers/clk/sifive/sifive-prci.h4
-rw-r--r--drivers/clk/socfpga/Kconfig2
-rw-r--r--drivers/clk/socfpga/Makefile2
-rw-r--r--drivers/clk/socfpga/clk-agilex5.c561
-rw-r--r--drivers/clk/socfpga/clk-gate-s10.c53
-rw-r--r--drivers/clk/socfpga/clk-periph-s10.c41
-rw-r--r--drivers/clk/socfpga/clk-pll-s10.c36
-rw-r--r--drivers/clk/socfpga/stratix10-clk.h43
-rw-r--r--drivers/clk/sophgo/clk-cv18xx-ip.c10
-rw-r--r--drivers/clk/sophgo/clk-sg2042-clkgen.c19
-rw-r--r--drivers/clk/sophgo/clk-sg2042-pll.c30
-rw-r--r--drivers/clk/spacemit/Kconfig1
-rw-r--r--drivers/clk/spacemit/ccu-k1.c307
-rw-r--r--drivers/clk/spacemit/ccu_ddn.c23
-rw-r--r--drivers/clk/spacemit/ccu_ddn.h6
-rw-r--r--drivers/clk/spacemit/ccu_mix.c12
-rw-r--r--drivers/clk/spacemit/ccu_mix.h13
-rw-r--r--drivers/clk/spacemit/ccu_pll.c12
-rw-r--r--drivers/clk/spear/clk-aux-synth.c12
-rw-r--r--drivers/clk/spear/clk-frac-synth.c12
-rw-r--r--drivers/clk/spear/clk-gpt-synth.c12
-rw-r--r--drivers/clk/spear/clk-vco-pll.c23
-rw-r--r--drivers/clk/spear/spear1340_clock.c2
-rw-r--r--drivers/clk/sprd/div.c13
-rw-r--r--drivers/clk/sprd/gate.h2
-rw-r--r--drivers/clk/sprd/pll.c8
-rw-r--r--drivers/clk/sprd/sc9860-clk.c8
-rw-r--r--drivers/clk/sprd/ums512-clk.c4
-rw-r--r--drivers/clk/st/clk-flexgen.c80
-rw-r--r--drivers/clk/st/clkgen-fsyn.c33
-rw-r--r--drivers/clk/st/clkgen-pll.c38
-rw-r--r--drivers/clk/starfive/clk-starfive-jh7110-sys.c2
-rw-r--r--drivers/clk/stm32/Kconfig15
-rw-r--r--drivers/clk/stm32/Makefile1
-rw-r--r--drivers/clk/stm32/clk-stm32-core.c28
-rw-r--r--drivers/clk/stm32/clk-stm32mp1.c15
-rw-r--r--drivers/clk/stm32/clk-stm32mp21.c1586
-rw-r--r--drivers/clk/stm32/stm32mp21_rcc.h651
-rw-r--r--drivers/clk/sunxi-ng/Kconfig5
-rw-r--r--drivers/clk/sunxi-ng/Makefile2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun55i-a523-mcu.c469
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun55i-a523-r.c5
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun55i-a523.c26
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun55i-a523.h14
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun6i-rtc.c11
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-r40.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-v3s.c36
-rw-r--r--drivers/clk/sunxi-ng/ccu_common.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu_div.h18
-rw-r--r--drivers/clk/sunxi-ng/ccu_gate.c14
-rw-r--r--drivers/clk/sunxi-ng/ccu_mp.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu_nk.c14
-rw-r--r--drivers/clk/sunxi-ng/ccu_nkmp.c23
-rw-r--r--drivers/clk/sunxi-ng/ccu_nm.c43
-rw-r--r--drivers/clk/tegra/Kconfig2
-rw-r--r--drivers/clk/tegra/clk-audio-sync.c10
-rw-r--r--drivers/clk/tegra/clk-bpmp.c2
-rw-r--r--drivers/clk/tegra/clk-dfll.c2
-rw-r--r--drivers/clk/tegra/clk-divider.c28
-rw-r--r--drivers/clk/tegra/clk-periph.c10
-rw-r--r--drivers/clk/tegra/clk-pll.c52
-rw-r--r--drivers/clk/tegra/clk-super.c9
-rw-r--r--drivers/clk/tegra/clk-tegra114.c30
-rw-r--r--drivers/clk/tegra/clk-tegra124-dfll-fcpu.c158
-rw-r--r--drivers/clk/tegra/clk-tegra210-emc.c24
-rw-r--r--drivers/clk/tegra/clk-tegra210.c14
-rw-r--r--drivers/clk/tegra/clk-tegra30.c1
-rw-r--r--drivers/clk/tegra/clk.h3
-rw-r--r--drivers/clk/thead/clk-th1520-ap.c615
-rw-r--r--drivers/clk/ti/autoidle.c2
-rw-r--r--drivers/clk/ti/clk-33xx.c2
-rw-r--r--drivers/clk/ti/clk-43xx.c2
-rw-r--r--drivers/clk/ti/clk-dra7-atl.c12
-rw-r--r--drivers/clk/ti/clk.c27
-rw-r--r--drivers/clk/ti/clkt_dpll.c36
-rw-r--r--drivers/clk/ti/clock.h6
-rw-r--r--drivers/clk/ti/composite.c6
-rw-r--r--drivers/clk/ti/divider.c12
-rw-r--r--drivers/clk/ti/dpll.c10
-rw-r--r--drivers/clk/ti/dpll3xxx.c7
-rw-r--r--drivers/clk/ti/dpll44xx.c89
-rw-r--r--drivers/clk/ti/fapll.c48
-rw-r--r--drivers/clk/ti/mux.c2
-rw-r--r--drivers/clk/ux500/clk-prcmu.c14
-rw-r--r--drivers/clk/versatile/clk-icst.c74
-rw-r--r--drivers/clk/versatile/clk-vexpress-osc.c16
-rw-r--r--drivers/clk/visconti/clkc-tmpv770x.c79
-rw-r--r--drivers/clk/visconti/pll-tmpv770x.c5
-rw-r--r--drivers/clk/visconti/pll.c19
-rw-r--r--drivers/clk/x86/clk-cgu.c35
-rw-r--r--drivers/clk/xilinx/clk-xlnx-clock-wizard.c89
-rw-r--r--drivers/clk/xilinx/xlnx_vcu.c48
-rw-r--r--drivers/clk/zynq/pll.c12
-rw-r--r--drivers/clk/zynqmp/divider.c23
-rw-r--r--drivers/clk/zynqmp/pll.c24
-rw-r--r--drivers/clocksource/Kconfig24
-rw-r--r--drivers/clocksource/Makefile4
-rw-r--r--drivers/clocksource/arm_arch_timer.c688
-rw-r--r--drivers/clocksource/arm_arch_timer_mmio.c442
-rw-r--r--drivers/clocksource/arm_global_timer.c44
-rw-r--r--drivers/clocksource/clps711x-timer.c23
-rw-r--r--drivers/clocksource/hyperv_timer.c11
-rw-r--r--drivers/clocksource/ingenic-sysost.c27
-rw-r--r--drivers/clocksource/scx200_hrt.c1
-rw-r--r--drivers/clocksource/sh_cmt.c106
-rw-r--r--drivers/clocksource/timer-armada-370-xp.c12
-rw-r--r--drivers/clocksource/timer-cs5535.c1
-rw-r--r--drivers/clocksource/timer-econet-en751221.c2
-rw-r--r--drivers/clocksource/timer-nxp-pit.c383
-rw-r--r--drivers/clocksource/timer-nxp-stm.c25
-rw-r--r--drivers/clocksource/timer-orion.c2
-rw-r--r--drivers/clocksource/timer-ralink.c11
-rw-r--r--drivers/clocksource/timer-rda.c9
-rw-r--r--drivers/clocksource/timer-realtek.c150
-rw-r--r--drivers/clocksource/timer-rtl-otto.c42
-rw-r--r--drivers/clocksource/timer-sp804.c24
-rw-r--r--drivers/clocksource/timer-sprd.c24
-rw-r--r--drivers/clocksource/timer-stm32-lp.c2
-rw-r--r--drivers/clocksource/timer-sun5i.c2
-rw-r--r--drivers/clocksource/timer-tegra186.c38
-rw-r--r--drivers/clocksource/timer-ti-dm.c119
-rw-r--r--drivers/clocksource/timer-vf-pit.c194
-rw-r--r--drivers/comedi/Kconfig9
-rw-r--r--drivers/comedi/comedi_buf.c276
-rw-r--r--drivers/comedi/comedi_fops.c253
-rw-r--r--drivers/comedi/comedi_internal.h13
-rw-r--r--drivers/comedi/drivers.c177
-rw-r--r--drivers/comedi/drivers/8255.c20
-rw-r--r--drivers/comedi/drivers/Makefile1
-rw-r--r--drivers/comedi/drivers/adl_pci7250.c220
-rw-r--r--drivers/comedi/drivers/aio_iiro_16.c3
-rw-r--r--drivers/comedi/drivers/c6xdigio.c46
-rw-r--r--drivers/comedi/drivers/comedi_bond.c4
-rw-r--r--drivers/comedi/drivers/comedi_test.c2
-rw-r--r--drivers/comedi/drivers/das16m1.c3
-rw-r--r--drivers/comedi/drivers/das6402.c3
-rw-r--r--drivers/comedi/drivers/multiq3.c9
-rw-r--r--drivers/comedi/drivers/ni_670x.c2
-rw-r--r--drivers/comedi/drivers/pcl726.c3
-rw-r--r--drivers/comedi/drivers/pcl812.c3
-rw-r--r--drivers/comedi/drivers/pcl818.c5
-rw-r--r--drivers/comedi/kcomedilib/kcomedilib_main.c120
-rw-r--r--drivers/counter/microchip-tcb-capture.c2
-rw-r--r--drivers/counter/ti-ecap-capture.c12
-rw-r--r--drivers/cpufreq/Kconfig.arm3
-rw-r--r--drivers/cpufreq/Makefile1
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c11
-rw-r--r--drivers/cpufreq/airoha-cpufreq.c1
-rw-r--r--drivers/cpufreq/amd-pstate.c75
-rw-r--r--drivers/cpufreq/armada-37xx-cpufreq.c4
-rw-r--r--drivers/cpufreq/armada-8k-cpufreq.c5
-rw-r--r--drivers/cpufreq/brcmstb-avs-cpufreq.c6
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c108
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c20
-rw-r--r--drivers/cpufreq/cpufreq-dt.c13
-rw-r--r--drivers/cpufreq/cpufreq-dt.h2
-rw-r--r--drivers/cpufreq/cpufreq-nforce2.c3
-rw-r--r--drivers/cpufreq/cpufreq.c113
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c24
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c25
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.h23
-rw-r--r--drivers/cpufreq/cpufreq_userspace.c1
-rw-r--r--drivers/cpufreq/freq_table.c22
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c2
-rw-r--r--drivers/cpufreq/intel_pstate.c421
-rw-r--r--drivers/cpufreq/longhaul.c3
-rw-r--r--drivers/cpufreq/mediatek-cpufreq-hw.c136
-rw-r--r--drivers/cpufreq/mediatek-cpufreq.c37
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c4
-rw-r--r--drivers/cpufreq/powernv-trace.h44
-rw-r--r--drivers/cpufreq/qcom-cpufreq-nvmem.c40
-rw-r--r--drivers/cpufreq/rcpufreq_dt.rs26
-rw-r--r--drivers/cpufreq/s5pv210-cpufreq.c10
-rw-r--r--drivers/cpufreq/scmi-cpufreq.c12
-rw-r--r--drivers/cpufreq/scpi-cpufreq.c2
-rw-r--r--drivers/cpufreq/sh-cpufreq.c6
-rw-r--r--drivers/cpufreq/spear-cpufreq.c2
-rw-r--r--drivers/cpufreq/speedstep-lib.c12
-rw-r--r--drivers/cpufreq/speedstep-lib.h10
-rw-r--r--drivers/cpufreq/sun50i-cpufreq-nvmem.c11
-rw-r--r--drivers/cpufreq/tegra124-cpufreq.c49
-rw-r--r--drivers/cpufreq/tegra186-cpufreq.c185
-rw-r--r--drivers/cpufreq/tegra194-cpufreq.c3
-rw-r--r--drivers/cpufreq/ti-cpufreq.c12
-rw-r--r--drivers/cpufreq/virtual-cpufreq.c2
-rw-r--r--drivers/cpuidle/cpuidle-big_little.c11
-rw-r--r--drivers/cpuidle/cpuidle-psci-domain.c14
-rw-r--r--drivers/cpuidle/cpuidle-psci.c39
-rw-r--r--drivers/cpuidle/cpuidle-qcom-spm.c11
-rw-r--r--drivers/cpuidle/cpuidle-riscv-sbi.c19
-rw-r--r--drivers/cpuidle/cpuidle.c20
-rw-r--r--drivers/cpuidle/driver.c10
-rw-r--r--drivers/cpuidle/dt_idle_states.c14
-rw-r--r--drivers/cpuidle/governor.c4
-rw-r--r--drivers/cpuidle/governors/menu.c145
-rw-r--r--drivers/cpuidle/governors/teo.c159
-rw-r--r--drivers/cpuidle/poll_state.c4
-rw-r--r--drivers/cpuidle/sysfs.c34
-rw-r--r--drivers/crypto/Kconfig30
-rw-r--r--drivers/crypto/Makefile2
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c98
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c35
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c149
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c1
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c1
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h29
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c2
-rw-r--r--drivers/crypto/aspeed/aspeed-acry.c2
-rw-r--r--drivers/crypto/aspeed/aspeed-hace-crypto.c2
-rw-r--r--drivers/crypto/aspeed/aspeed-hace-hash.c800
-rw-r--r--drivers/crypto/aspeed/aspeed-hace.h28
-rw-r--r--drivers/crypto/atmel-aes.c1
-rw-r--r--drivers/crypto/atmel-i2c.c2
-rw-r--r--drivers/crypto/atmel-sha.c1
-rw-r--r--drivers/crypto/atmel-tdes.c2
-rw-r--r--drivers/crypto/axis/artpec6_crypto.c9
-rw-r--r--drivers/crypto/caam/Makefile4
-rw-r--r--drivers/crypto/caam/blob_gen.c86
-rw-r--r--drivers/crypto/caam/caamalg.c128
-rw-r--r--drivers/crypto/caam/caamalg_desc.c87
-rw-r--r--drivers/crypto/caam/caamalg_desc.h13
-rw-r--r--drivers/crypto/caam/caamrng.c4
-rw-r--r--drivers/crypto/caam/ctrl.c23
-rw-r--r--drivers/crypto/caam/debugfs.c2
-rw-r--r--drivers/crypto/caam/debugfs.h2
-rw-r--r--drivers/crypto/caam/desc.h9
-rw-r--r--drivers/crypto/caam/desc_constr.h8
-rw-r--r--drivers/crypto/caam/intern.h5
-rw-r--r--drivers/crypto/caam/jr.c3
-rw-r--r--drivers/crypto/caam/qi.c5
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_mbx.c2
-rw-r--r--drivers/crypto/ccp/Kconfig1
-rw-r--r--drivers/crypto/ccp/Makefile7
-rw-r--r--drivers/crypto/ccp/ccp-debugfs.c3
-rw-r--r--drivers/crypto/ccp/ccp-dev.c2
-rw-r--r--drivers/crypto/ccp/ccp-ops.c163
-rw-r--r--drivers/crypto/ccp/hsti.c8
-rw-r--r--drivers/crypto/ccp/psp-dev.c20
-rw-r--r--drivers/crypto/ccp/psp-dev.h8
-rw-r--r--drivers/crypto/ccp/sev-dev-tio.c864
-rw-r--r--drivers/crypto/ccp/sev-dev-tio.h123
-rw-r--r--drivers/crypto/ccp/sev-dev-tsm.c405
-rw-r--r--drivers/crypto/ccp/sev-dev.c471
-rw-r--r--drivers/crypto/ccp/sev-dev.h20
-rw-r--r--drivers/crypto/ccp/sfs.c311
-rw-r--r--drivers/crypto/ccp/sfs.h47
-rw-r--r--drivers/crypto/ccp/sp-dev.h2
-rw-r--r--drivers/crypto/ccp/sp-pci.c20
-rw-r--r--drivers/crypto/ccp/sp-platform.c17
-rw-r--r--drivers/crypto/ccree/cc_buffer_mgr.c60
-rw-r--r--drivers/crypto/ccree/cc_cipher.c4
-rw-r--r--drivers/crypto/ccree/cc_hash.c30
-rw-r--r--drivers/crypto/ccree/cc_pm.c1
-rw-r--r--drivers/crypto/chelsio/Kconfig6
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c259
-rw-r--r--drivers/crypto/chelsio/chcr_crypto.h1
-rw-r--r--drivers/crypto/hifn_795x.c7
-rw-r--r--drivers/crypto/hisilicon/Kconfig1
-rw-r--r--drivers/crypto/hisilicon/debugfs.c1
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_crypto.c405
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_main.c179
-rw-r--r--drivers/crypto/hisilicon/qm.c303
-rw-r--r--drivers/crypto/hisilicon/sec/sec_drv.c3
-rw-r--r--drivers/crypto/hisilicon/sec2/sec.h63
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.c585
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_main.c229
-rw-r--r--drivers/crypto/hisilicon/sgl.c20
-rw-r--r--drivers/crypto/hisilicon/zip/dae_main.c19
-rw-r--r--drivers/crypto/hisilicon/zip/zip_crypto.c13
-rw-r--r--drivers/crypto/hisilicon/zip/zip_main.c234
-rw-r--r--drivers/crypto/img-hash.c10
-rw-r--r--drivers/crypto/inside-secure/safexcel.c1
-rw-r--r--drivers/crypto/inside-secure/safexcel.h1
-rw-r--r--drivers/crypto/inside-secure/safexcel_hash.c100
-rw-r--r--drivers/crypto/intel/iaa/iaa_crypto_main.c2
-rw-r--r--drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c13
-rw-r--r--drivers/crypto/intel/keembay/ocs-aes.c4
-rw-r--r--drivers/crypto/intel/qat/Kconfig7
-rw-r--r--drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c18
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c14
-rw-r--r--drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.c129
-rw-r--r--drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.h22
-rw-r--r--drivers/crypto/intel/qat/qat_common/Makefile4
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_accel_devices.h40
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_aer.c8
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_bank_state.c238
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_bank_state.h49
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg_common.h1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg_services.c45
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg_services.h13
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_common_drv.h2
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_ctl_drv.c40
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c229
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h10
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c105
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.c7
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen6_pm.h24
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen6_pm_dbgfs.c124
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen6_shared.c7
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen6_shared.h2
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen6_tl.c258
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen6_tl.h198
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_init.c1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_isr.c8
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs_utils.c46
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs_utils.h36
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_rl.c86
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_rl.h11
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_rl_admin.c1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_sriov.c4
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_sysfs.c2
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.c21
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_telemetry.c19
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_telemetry.h5
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c55
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.h5
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_transport_debug.c21
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_vf_isr.c3
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_algs.c203
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_bl.c6
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_compression.c8
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_uclo.c20
-rw-r--r--drivers/crypto/loongson/Kconfig5
-rw-r--r--drivers/crypto/loongson/Makefile1
-rw-r--r--drivers/crypto/loongson/loongson-rng.c209
-rw-r--r--drivers/crypto/marvell/cesa/cesa.c7
-rw-r--r--drivers/crypto/marvell/cesa/cipher.c4
-rw-r--r--drivers/crypto/marvell/cesa/hash.c10
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_common.h5
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c6
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_reqmgr.h128
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptlf.h3
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c13
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c62
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c6
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c26
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c13
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c14
-rw-r--r--drivers/crypto/nx/nx-common-powernv.c6
-rw-r--r--drivers/crypto/nx/nx-common-pseries.c6
-rw-r--r--drivers/crypto/omap-aes-gcm.c1
-rw-r--r--drivers/crypto/omap-aes.c16
-rw-r--r--drivers/crypto/omap-aes.h2
-rw-r--r--drivers/crypto/omap-des.c18
-rw-r--r--drivers/crypto/omap-sham.c16
-rw-r--r--drivers/crypto/qce/core.c3
-rw-r--r--drivers/crypto/qce/dma.c6
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_ahash.c2
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_skcipher.c3
-rw-r--r--drivers/crypto/starfive/jh7110-aes.c12
-rw-r--r--drivers/crypto/starfive/jh7110-hash.c25
-rw-r--r--drivers/crypto/stm32/Kconfig9
-rw-r--r--drivers/crypto/stm32/Makefile1
-rw-r--r--drivers/crypto/stm32/stm32-crc32.c480
-rw-r--r--drivers/crypto/stm32/stm32-cryp.c3
-rw-r--r--drivers/crypto/stm32/stm32-hash.c1
-rw-r--r--drivers/crypto/tegra/tegra-se-hash.c3
-rw-r--r--drivers/crypto/tegra/tegra-se-main.c2
-rw-r--r--drivers/crypto/ti/Kconfig15
-rw-r--r--drivers/crypto/ti/Makefile3
-rw-r--r--drivers/crypto/ti/dthev2-aes.c538
-rw-r--r--drivers/crypto/ti/dthev2-common.c217
-rw-r--r--drivers/crypto/ti/dthev2-common.h109
-rw-r--r--drivers/crypto/virtio/virtio_crypto_common.h2
-rw-r--r--drivers/crypto/virtio/virtio_crypto_core.c2
-rw-r--r--drivers/crypto/virtio/virtio_crypto_mgr.c36
-rw-r--r--drivers/crypto/xilinx/Makefile1
-rw-r--r--drivers/crypto/xilinx/xilinx-trng.c430
-rw-r--r--drivers/cxl/acpi.c132
-rw-r--r--drivers/cxl/core/Makefile1
-rw-r--r--drivers/cxl/core/acpi.c11
-rw-r--r--drivers/cxl/core/cdat.c46
-rw-r--r--drivers/cxl/core/core.h48
-rw-r--r--drivers/cxl/core/edac.c73
-rw-r--r--drivers/cxl/core/features.c5
-rw-r--r--drivers/cxl/core/hdm.c235
-rw-r--r--drivers/cxl/core/mbox.c37
-rw-r--r--drivers/cxl/core/mce.h2
-rw-r--r--drivers/cxl/core/memdev.c96
-rw-r--r--drivers/cxl/core/pci.c140
-rw-r--r--drivers/cxl/core/port.c349
-rw-r--r--drivers/cxl/core/ras.c47
-rw-r--r--drivers/cxl/core/region.c987
-rw-r--r--drivers/cxl/core/trace.h135
-rw-r--r--drivers/cxl/cxl.h102
-rw-r--r--drivers/cxl/cxlmem.h14
-rw-r--r--drivers/cxl/cxlpci.h3
-rw-r--r--drivers/cxl/pci.c4
-rw-r--r--drivers/cxl/port.c49
-rw-r--r--drivers/dax/device.c60
-rw-r--r--drivers/dax/hmem/hmem.c1
-rw-r--r--drivers/dax/kmem.c1
-rw-r--r--drivers/dax/pmem.c1
-rw-r--r--drivers/dax/super.c7
-rw-r--r--drivers/devfreq/Kconfig11
-rw-r--r--drivers/devfreq/Makefile1
-rw-r--r--drivers/devfreq/devfreq.c25
-rw-r--r--drivers/devfreq/event/rockchip-dfi.c110
-rw-r--r--drivers/devfreq/governor.h127
-rw-r--r--drivers/devfreq/governor_passive.c27
-rw-r--r--drivers/devfreq/governor_performance.c2
-rw-r--r--drivers/devfreq/governor_powersave.c2
-rw-r--r--drivers/devfreq/governor_simpleondemand.c6
-rw-r--r--drivers/devfreq/governor_userspace.c8
-rw-r--r--drivers/devfreq/hisi_uncore_freq.c658
-rw-r--r--drivers/devfreq/mtk-cci-devfreq.c5
-rw-r--r--drivers/devfreq/sun8i-a33-mbus.c38
-rw-r--r--drivers/devfreq/tegra30-devfreq.c15
-rw-r--r--drivers/dibs/Kconfig23
-rw-r--r--drivers/dibs/Makefile8
-rw-r--r--drivers/dibs/dibs_loopback.c361
-rw-r--r--drivers/dibs/dibs_loopback.h57
-rw-r--r--drivers/dibs/dibs_main.c274
-rw-r--r--drivers/dma-buf/Kconfig1
-rw-r--r--drivers/dma-buf/Makefile2
-rw-r--r--drivers/dma-buf/dma-buf-mapping.c248
-rw-r--r--drivers/dma-buf/dma-buf.c12
-rw-r--r--drivers/dma-buf/dma-fence-chain.c7
-rw-r--r--drivers/dma-buf/dma-fence.c205
-rw-r--r--drivers/dma-buf/dma-heap.c4
-rw-r--r--drivers/dma-buf/dma-resv.c12
-rw-r--r--drivers/dma-buf/heaps/cma_heap.c53
-rw-r--r--drivers/dma-buf/heaps/system_heap.c76
-rw-r--r--drivers/dma-buf/sw_sync.c6
-rw-r--r--drivers/dma-buf/sync_debug.c2
-rw-r--r--drivers/dma-buf/sync_file.c24
-rw-r--r--drivers/dma-buf/udmabuf.c28
-rw-r--r--drivers/dma/Kconfig18
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/at_hdmac.c6
-rw-r--r--drivers/dma/bcm2835-dma.c1
-rw-r--r--drivers/dma/cv1800b-dmamux.c259
-rw-r--r--drivers/dma/dma-axi-dmac.c2
-rw-r--r--drivers/dma/dmaengine.c30
-rw-r--r--drivers/dma/dw-edma/dw-edma-core.c34
-rw-r--r--drivers/dma/dw-edma/dw-edma-pcie.c60
-rw-r--r--drivers/dma/dw/platform.c5
-rw-r--r--drivers/dma/dw/rzn1-dmamux.c15
-rw-r--r--drivers/dma/fsl-dpaa2-qdma/dpdmai.c5
-rw-r--r--drivers/dma/fsl-edma-common.c45
-rw-r--r--drivers/dma/fsl-edma-main.c1
-rw-r--r--drivers/dma/fsl-qdma.c4
-rw-r--r--drivers/dma/idxd/defaults.c6
-rw-r--r--drivers/dma/idxd/device.c19
-rw-r--r--drivers/dma/idxd/init.c42
-rw-r--r--drivers/dma/idxd/registers.h65
-rw-r--r--drivers/dma/imx-sdma.c2
-rw-r--r--drivers/dma/ioat/dma.h2
-rw-r--r--drivers/dma/ioat/hw.h3
-rw-r--r--drivers/dma/ioat/init.c1
-rw-r--r--drivers/dma/k3dma.c1
-rw-r--r--drivers/dma/mediatek/mtk-cqdma.c4
-rw-r--r--drivers/dma/mmp_pdma.c289
-rw-r--r--drivers/dma/mmp_tdma.c6
-rw-r--r--drivers/dma/mv_xor.c25
-rw-r--r--drivers/dma/nbpfaxi.c30
-rw-r--r--drivers/dma/ppc4xx/adma.c4
-rw-r--r--drivers/dma/qcom/bam_dma.c8
-rw-r--r--drivers/dma/qcom/gpi.c22
-rw-r--r--drivers/dma/sh/Kconfig4
-rw-r--r--drivers/dma/sh/rcar-dmac.c16
-rw-r--r--drivers/dma/sh/shdma-base.c25
-rw-r--r--drivers/dma/sh/shdmac.c17
-rw-r--r--drivers/dma/sh/usb-dmac.c11
-rw-r--r--drivers/dma/sprd-dma.c1
-rw-r--r--drivers/dma/st_fdma.c1
-rw-r--r--drivers/dma/stm32/stm32-dma.c12
-rw-r--r--drivers/dma/stm32/stm32-dma3.c10
-rw-r--r--drivers/dma/stm32/stm32-mdma.c8
-rw-r--r--drivers/dma/sun4i-dma.c46
-rw-r--r--drivers/dma/tegra210-adma.c1
-rw-r--r--drivers/dma/ti/Kconfig4
-rw-r--r--drivers/dma/ti/edma.c4
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c94
-rw-r--r--drivers/dma/xilinx/zynqmp_dma.c5
-rw-r--r--drivers/dpll/Kconfig6
-rw-r--r--drivers/dpll/Makefile2
-rw-r--r--drivers/dpll/dpll_core.c45
-rw-r--r--drivers/dpll/dpll_core.h3
-rw-r--r--drivers/dpll/dpll_netlink.c363
-rw-r--r--drivers/dpll/dpll_netlink.h2
-rw-r--r--drivers/dpll/dpll_nl.c17
-rw-r--r--drivers/dpll/dpll_nl.h2
-rw-r--r--drivers/dpll/zl3073x/Kconfig39
-rw-r--r--drivers/dpll/zl3073x/Makefile11
-rw-r--r--drivers/dpll/zl3073x/core.c1080
-rw-r--r--drivers/dpll/zl3073x/core.h367
-rw-r--r--drivers/dpll/zl3073x/devlink.c390
-rw-r--r--drivers/dpll/zl3073x/devlink.h15
-rw-r--r--drivers/dpll/zl3073x/dpll.c1938
-rw-r--r--drivers/dpll/zl3073x/dpll.h48
-rw-r--r--drivers/dpll/zl3073x/flash.c666
-rw-r--r--drivers/dpll/zl3073x/flash.h29
-rw-r--r--drivers/dpll/zl3073x/fw.c419
-rw-r--r--drivers/dpll/zl3073x/fw.h52
-rw-r--r--drivers/dpll/zl3073x/i2c.c76
-rw-r--r--drivers/dpll/zl3073x/out.c157
-rw-r--r--drivers/dpll/zl3073x/out.h93
-rw-r--r--drivers/dpll/zl3073x/prop.c369
-rw-r--r--drivers/dpll/zl3073x/prop.h34
-rw-r--r--drivers/dpll/zl3073x/ref.c204
-rw-r--r--drivers/dpll/zl3073x/ref.h134
-rw-r--r--drivers/dpll/zl3073x/regs.h317
-rw-r--r--drivers/dpll/zl3073x/spi.c76
-rw-r--r--drivers/dpll/zl3073x/synth.c87
-rw-r--r--drivers/dpll/zl3073x/synth.h72
-rw-r--r--drivers/edac/Kconfig36
-rw-r--r--drivers/edac/Makefile5
-rw-r--r--drivers/edac/a72_edac.c225
-rw-r--r--drivers/edac/altera_edac.c27
-rw-r--r--drivers/edac/amd64_edac.c117
-rw-r--r--drivers/edac/amd64_edac.h7
-rw-r--r--[-rwxr-xr-x]drivers/edac/ecs.c4
-rw-r--r--drivers/edac/edac_mc_sysfs.c380
-rw-r--r--drivers/edac/ghes_edac.c7
-rw-r--r--drivers/edac/i10nm_base.c60
-rw-r--r--drivers/edac/ie31200_edac.c48
-rw-r--r--drivers/edac/igen6_edac.c43
-rw-r--r--drivers/edac/imh_base.c602
-rw-r--r--[-rwxr-xr-x]drivers/edac/mem_repair.c57
-rw-r--r--[-rwxr-xr-x]drivers/edac/scrub.c1
-rw-r--r--drivers/edac/skx_base.c35
-rw-r--r--drivers/edac/skx_common.c91
-rw-r--r--drivers/edac/skx_common.h126
-rw-r--r--drivers/edac/synopsys_edac.c97
-rw-r--r--drivers/edac/versalnet_edac.c962
-rw-r--r--drivers/eisa/eisa-bus.c2
-rw-r--r--drivers/extcon/Kconfig13
-rw-r--r--drivers/extcon/Makefile1
-rw-r--r--drivers/extcon/extcon-adc-jack.c2
-rw-r--r--drivers/extcon/extcon-axp288.c2
-rw-r--r--drivers/extcon/extcon-fsa9480.c2
-rw-r--r--drivers/extcon/extcon-max14526.c302
-rw-r--r--drivers/extcon/extcon-qcom-spmi-misc.c2
-rw-r--r--drivers/firewire/core-card.c506
-rw-r--r--drivers/firewire/core-cdev.c41
-rw-r--r--drivers/firewire/core-device.c236
-rw-r--r--drivers/firewire/core-topology.c92
-rw-r--r--drivers/firewire/core-transaction.c306
-rw-r--r--drivers/firewire/core.h27
-rw-r--r--drivers/firewire/init_ohci1394_dma.c10
-rw-r--r--drivers/firewire/net.c4
-rw-r--r--drivers/firewire/ohci.c554
-rw-r--r--drivers/firmware/arm_ffa/driver.c110
-rw-r--r--drivers/firmware/arm_scmi/bus.c39
-rw-r--r--drivers/firmware/arm_scmi/common.h34
-rw-r--r--drivers/firmware/arm_scmi/driver.c83
-rw-r--r--drivers/firmware/arm_scmi/notify.c39
-rw-r--r--drivers/firmware/arm_scmi/perf.c2
-rw-r--r--drivers/firmware/arm_scmi/quirks.c15
-rw-r--r--drivers/firmware/arm_scmi/raw_mode.c6
-rw-r--r--drivers/firmware/arm_scmi/scmi_power_control.c22
-rw-r--r--drivers/firmware/arm_scmi/transports/mailbox.c7
-rw-r--r--drivers/firmware/arm_scmi/transports/optee.c2
-rw-r--r--drivers/firmware/arm_scmi/transports/virtio.c3
-rw-r--r--drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c111
-rw-r--r--drivers/firmware/arm_scmi/vendors/imx/imx95.rst25
-rw-r--r--drivers/firmware/arm_scmi/voltage.c2
-rw-r--r--drivers/firmware/broadcom/bcm47xx_sprom.c2
-rw-r--r--drivers/firmware/cirrus/cs_dsp.c214
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_test_callbacks.c1
-rw-r--r--drivers/firmware/efi/Kconfig11
-rw-r--r--drivers/firmware/efi/Makefile1
-rw-r--r--drivers/firmware/efi/arm-runtime.c4
-rw-r--r--drivers/firmware/efi/cper-arm.c52
-rw-r--r--drivers/firmware/efi/cper.c62
-rw-r--r--drivers/firmware/efi/efi-init.c29
-rw-r--r--drivers/firmware/efi/efi.c11
-rw-r--r--drivers/firmware/efi/libstub/Makefile12
-rw-r--r--drivers/firmware/efi/libstub/Makefile.zboot2
-rw-r--r--drivers/firmware/efi/libstub/efi-stub.c2
-rw-r--r--drivers/firmware/efi/libstub/efistub.h31
-rw-r--r--drivers/firmware/efi/libstub/gop.c137
-rw-r--r--drivers/firmware/efi/libstub/printk.c4
-rw-r--r--drivers/firmware/efi/libstub/x86-5lvl.c4
-rw-r--r--drivers/firmware/efi/libstub/x86-stub.c118
-rw-r--r--drivers/firmware/efi/libstub/zboot.lds6
-rw-r--r--drivers/firmware/efi/memattr.c7
-rw-r--r--drivers/firmware/efi/mokvar-table.c2
-rw-r--r--drivers/firmware/efi/ovmf-debug-log.c111
-rw-r--r--drivers/firmware/efi/riscv-runtime.c14
-rw-r--r--drivers/firmware/efi/runtime-wrappers.c17
-rw-r--r--drivers/firmware/efi/stmm/mm_communication.h6
-rw-r--r--drivers/firmware/efi/stmm/tee_stmm_efi.c61
-rw-r--r--drivers/firmware/google/cbmem.c2
-rw-r--r--drivers/firmware/google/gsmi.c2
-rw-r--r--drivers/firmware/google/memconsole.c2
-rw-r--r--drivers/firmware/google/vpd.c4
-rw-r--r--drivers/firmware/imx/imx-scu-irq.c32
-rw-r--r--drivers/firmware/imx/imx-scu.c11
-rw-r--r--drivers/firmware/meson/Kconfig2
-rw-r--r--drivers/firmware/meson/meson_sm.c7
-rw-r--r--drivers/firmware/qcom/qcom_scm.c237
-rw-r--r--drivers/firmware/qcom/qcom_scm.h8
-rw-r--r--drivers/firmware/qcom/qcom_tzmem.c67
-rw-r--r--drivers/firmware/qemu_fw_cfg.c2
-rw-r--r--drivers/firmware/samsung/Makefile4
-rw-r--r--drivers/firmware/samsung/exynos-acpm-dvfs.c80
-rw-r--r--drivers/firmware/samsung/exynos-acpm-dvfs.h21
-rw-r--r--drivers/firmware/samsung/exynos-acpm-pmic.c25
-rw-r--r--drivers/firmware/samsung/exynos-acpm.c51
-rw-r--r--drivers/firmware/smccc/smccc.c5
-rw-r--r--drivers/firmware/stratix10-rsu.c279
-rw-r--r--drivers/firmware/stratix10-svc.c768
-rw-r--r--drivers/firmware/tegra/Kconfig5
-rw-r--r--drivers/firmware/tegra/Makefile1
-rw-r--r--drivers/firmware/tegra/bpmp-private.h6
-rw-r--r--drivers/firmware/tegra/bpmp-tegra186.c14
-rw-r--r--drivers/firmware/tegra/bpmp.c6
-rw-r--r--drivers/firmware/ti_sci.c204
-rw-r--r--drivers/firmware/ti_sci.h10
-rw-r--r--drivers/firmware/xilinx/Makefile2
-rw-r--r--drivers/firmware/xilinx/zynqmp-debug.c13
-rw-r--r--drivers/firmware/xilinx/zynqmp-ufs.c118
-rw-r--r--drivers/firmware/xilinx/zynqmp.c178
-rw-r--r--drivers/fpga/altera-cvp.c20
-rw-r--r--drivers/fpga/xilinx-spi.c7
-rw-r--r--drivers/fpga/zynq-fpga.c10
-rw-r--r--drivers/fsi/fsi-core.c6
-rw-r--r--drivers/fsi/fsi-master-ast-cf.c11
-rw-r--r--drivers/fsi/fsi-occ.c16
-rw-r--r--drivers/fwctl/mlx5/main.c9
-rw-r--r--drivers/fwctl/pds/main.c18
-rw-r--r--drivers/gnss/ubx.c8
-rw-r--r--drivers/gpib/Kconfig (renamed from drivers/staging/gpib/Kconfig)8
-rw-r--r--drivers/gpib/Makefile (renamed from drivers/staging/gpib/Makefile)2
-rw-r--r--drivers/gpib/TODO10
-rw-r--r--drivers/gpib/agilent_82350b/Makefile (renamed from drivers/staging/gpib/agilent_82350b/Makefile)0
-rw-r--r--drivers/gpib/agilent_82350b/agilent_82350b.c (renamed from drivers/staging/gpib/agilent_82350b/agilent_82350b.c)12
-rw-r--r--drivers/gpib/agilent_82350b/agilent_82350b.h (renamed from drivers/staging/gpib/agilent_82350b/agilent_82350b.h)0
-rw-r--r--drivers/gpib/agilent_82357a/Makefile (renamed from drivers/staging/gpib/agilent_82357a/Makefile)0
-rw-r--r--drivers/gpib/agilent_82357a/agilent_82357a.c (renamed from drivers/staging/gpib/agilent_82357a/agilent_82357a.c)18
-rw-r--r--drivers/gpib/agilent_82357a/agilent_82357a.h (renamed from drivers/staging/gpib/agilent_82357a/agilent_82357a.h)10
-rw-r--r--drivers/gpib/cb7210/Makefile (renamed from drivers/staging/gpib/cb7210/Makefile)0
-rw-r--r--drivers/gpib/cb7210/cb7210.c (renamed from drivers/staging/gpib/cb7210/cb7210.c)27
-rw-r--r--drivers/gpib/cb7210/cb7210.h (renamed from drivers/staging/gpib/cb7210/cb7210.h)4
-rw-r--r--drivers/gpib/cec/Makefile (renamed from drivers/staging/gpib/cec/Makefile)0
-rw-r--r--drivers/gpib/cec/cec.h (renamed from drivers/staging/gpib/cec/cec.h)0
-rw-r--r--drivers/gpib/cec/cec_gpib.c (renamed from drivers/staging/gpib/cec/cec_gpib.c)4
-rw-r--r--drivers/gpib/common/Makefile (renamed from drivers/staging/gpib/common/Makefile)0
-rw-r--r--drivers/gpib/common/gpib_os.c (renamed from drivers/staging/gpib/common/gpib_os.c)8
-rw-r--r--drivers/gpib/common/iblib.c (renamed from drivers/staging/gpib/common/iblib.c)2
-rw-r--r--drivers/gpib/common/ibsys.h (renamed from drivers/staging/gpib/common/ibsys.h)0
-rw-r--r--drivers/gpib/eastwood/Makefile (renamed from drivers/staging/gpib/eastwood/Makefile)0
-rw-r--r--drivers/gpib/eastwood/fluke_gpib.c (renamed from drivers/staging/gpib/eastwood/fluke_gpib.c)2
-rw-r--r--drivers/gpib/eastwood/fluke_gpib.h (renamed from drivers/staging/gpib/eastwood/fluke_gpib.h)0
-rw-r--r--drivers/gpib/fmh_gpib/Makefile (renamed from drivers/staging/gpib/fmh_gpib/Makefile)0
-rw-r--r--drivers/gpib/fmh_gpib/fmh_gpib.c (renamed from drivers/staging/gpib/fmh_gpib/fmh_gpib.c)7
-rw-r--r--drivers/gpib/fmh_gpib/fmh_gpib.h (renamed from drivers/staging/gpib/fmh_gpib/fmh_gpib.h)0
-rw-r--r--drivers/gpib/gpio/Makefile (renamed from drivers/staging/gpib/gpio/Makefile)0
-rw-r--r--drivers/gpib/gpio/gpib_bitbang.c (renamed from drivers/staging/gpib/gpio/gpib_bitbang.c)18
-rw-r--r--drivers/gpib/hp_82335/Makefile (renamed from drivers/staging/gpib/hp_82335/Makefile)0
-rw-r--r--drivers/gpib/hp_82335/hp82335.c (renamed from drivers/staging/gpib/hp_82335/hp82335.c)0
-rw-r--r--drivers/gpib/hp_82335/hp82335.h (renamed from drivers/staging/gpib/hp_82335/hp82335.h)0
-rw-r--r--drivers/gpib/hp_82341/Makefile (renamed from drivers/staging/gpib/hp_82341/Makefile)0
-rw-r--r--drivers/gpib/hp_82341/hp_82341.c (renamed from drivers/staging/gpib/hp_82341/hp_82341.c)22
-rw-r--r--drivers/gpib/hp_82341/hp_82341.h (renamed from drivers/staging/gpib/hp_82341/hp_82341.h)40
-rw-r--r--drivers/gpib/include/amcc5920.h (renamed from drivers/staging/gpib/include/amcc5920.h)0
-rw-r--r--drivers/gpib/include/amccs5933.h (renamed from drivers/staging/gpib/include/amccs5933.h)4
-rw-r--r--drivers/gpib/include/gpibP.h (renamed from drivers/staging/gpib/include/gpibP.h)5
-rw-r--r--drivers/gpib/include/gpib_cmd.h112
-rw-r--r--drivers/gpib/include/gpib_pci_ids.h (renamed from drivers/staging/gpib/include/gpib_pci_ids.h)0
-rw-r--r--drivers/gpib/include/gpib_proto.h (renamed from drivers/staging/gpib/include/gpib_proto.h)0
-rw-r--r--drivers/gpib/include/gpib_state_machines.h (renamed from drivers/staging/gpib/include/gpib_state_machines.h)0
-rw-r--r--drivers/gpib/include/gpib_types.h (renamed from drivers/staging/gpib/include/gpib_types.h)5
-rw-r--r--drivers/gpib/include/nec7210.h (renamed from drivers/staging/gpib/include/nec7210.h)26
-rw-r--r--drivers/gpib/include/nec7210_registers.h (renamed from drivers/staging/gpib/include/nec7210_registers.h)4
-rw-r--r--drivers/gpib/include/plx9050.h (renamed from drivers/staging/gpib/include/plx9050.h)8
-rw-r--r--drivers/gpib/include/quancom_pci.h (renamed from drivers/staging/gpib/include/quancom_pci.h)0
-rw-r--r--drivers/gpib/include/tms9914.h (renamed from drivers/staging/gpib/include/tms9914.h)90
-rw-r--r--drivers/gpib/include/tnt4882_registers.h (renamed from drivers/staging/gpib/include/tnt4882_registers.h)22
-rw-r--r--drivers/gpib/ines/Makefile (renamed from drivers/staging/gpib/ines/Makefile)0
-rw-r--r--drivers/gpib/ines/ines.h (renamed from drivers/staging/gpib/ines/ines.h)12
-rw-r--r--drivers/gpib/ines/ines_gpib.c (renamed from drivers/staging/gpib/ines/ines_gpib.c)4
-rw-r--r--drivers/gpib/lpvo_usb_gpib/Makefile (renamed from drivers/staging/gpib/lpvo_usb_gpib/Makefile)0
-rw-r--r--drivers/gpib/lpvo_usb_gpib/lpvo_usb_gpib.c (renamed from drivers/staging/gpib/lpvo_usb_gpib/lpvo_usb_gpib.c)1
-rw-r--r--drivers/gpib/nec7210/Makefile (renamed from drivers/staging/gpib/nec7210/Makefile)0
-rw-r--r--drivers/gpib/nec7210/board.h (renamed from drivers/staging/gpib/nec7210/board.h)0
-rw-r--r--drivers/gpib/nec7210/nec7210.c (renamed from drivers/staging/gpib/nec7210/nec7210.c)6
-rw-r--r--drivers/gpib/ni_usb/Makefile (renamed from drivers/staging/gpib/ni_usb/Makefile)0
-rw-r--r--drivers/gpib/ni_usb/ni_usb_gpib.c (renamed from drivers/staging/gpib/ni_usb/ni_usb_gpib.c)35
-rw-r--r--drivers/gpib/ni_usb/ni_usb_gpib.h (renamed from drivers/staging/gpib/ni_usb/ni_usb_gpib.h)10
-rw-r--r--drivers/gpib/pc2/Makefile (renamed from drivers/staging/gpib/pc2/Makefile)0
-rw-r--r--drivers/gpib/pc2/pc2_gpib.c (renamed from drivers/staging/gpib/pc2/pc2_gpib.c)4
-rw-r--r--drivers/gpib/tms9914/Makefile (renamed from drivers/staging/gpib/tms9914/Makefile)0
-rw-r--r--drivers/gpib/tms9914/tms9914.c (renamed from drivers/staging/gpib/tms9914/tms9914.c)12
-rw-r--r--drivers/gpib/tnt4882/Makefile (renamed from drivers/staging/gpib/tnt4882/Makefile)0
-rw-r--r--drivers/gpib/tnt4882/mite.c (renamed from drivers/staging/gpib/tnt4882/mite.c)0
-rw-r--r--drivers/gpib/tnt4882/mite.h (renamed from drivers/staging/gpib/tnt4882/mite.h)10
-rw-r--r--drivers/gpib/tnt4882/tnt4882_gpib.c (renamed from drivers/staging/gpib/tnt4882/tnt4882_gpib.c)5
-rw-r--r--drivers/gpio/Kconfig120
-rw-r--r--drivers/gpio/Makefile9
-rw-r--r--drivers/gpio/TODO37
-rw-r--r--drivers/gpio/gpio-104-idio-16.c1
-rw-r--r--drivers/gpio/gpio-74x164.c4
-rw-r--r--drivers/gpio/gpio-74xx-mmio.c32
-rw-r--r--drivers/gpio/gpio-adnp.c2
-rw-r--r--drivers/gpio/gpio-adp5520.c2
-rw-r--r--drivers/gpio/gpio-adp5585.c366
-rw-r--r--drivers/gpio/gpio-aggregator.c397
-rw-r--r--drivers/gpio/gpio-altera-a10sr.c2
-rw-r--r--drivers/gpio/gpio-altera.c2
-rw-r--r--drivers/gpio/gpio-amd-fch.c2
-rw-r--r--drivers/gpio/gpio-amd8111.c2
-rw-r--r--drivers/gpio/gpio-amdpt.c44
-rw-r--r--drivers/gpio/gpio-arizona.c4
-rw-r--r--drivers/gpio/gpio-aspeed-sgpio.c2
-rw-r--r--drivers/gpio/gpio-aspeed.c14
-rw-r--r--drivers/gpio/gpio-ath79.c88
-rw-r--r--drivers/gpio/gpio-bcm-kona.c2
-rw-r--r--drivers/gpio/gpio-bd71815.c2
-rw-r--r--drivers/gpio/gpio-bd71828.c2
-rw-r--r--drivers/gpio/gpio-bd9571mwv.c2
-rw-r--r--drivers/gpio/gpio-blzp1600.c39
-rw-r--r--drivers/gpio/gpio-brcmstb.c136
-rw-r--r--drivers/gpio/gpio-bt8xx.c32
-rw-r--r--drivers/gpio/gpio-cadence.c59
-rw-r--r--drivers/gpio/gpio-cgbc.c2
-rw-r--r--drivers/gpio/gpio-clps711x.c28
-rw-r--r--drivers/gpio/gpio-creg-snps.c2
-rw-r--r--drivers/gpio/gpio-cros-ec.c2
-rw-r--r--drivers/gpio/gpio-crystalcove.c2
-rw-r--r--drivers/gpio/gpio-cs5535.c2
-rw-r--r--drivers/gpio/gpio-da9052.c2
-rw-r--r--drivers/gpio/gpio-da9055.c2
-rw-r--r--drivers/gpio/gpio-davinci.c4
-rw-r--r--drivers/gpio/gpio-dln2.c2
-rw-r--r--drivers/gpio/gpio-dwapb.c180
-rw-r--r--drivers/gpio/gpio-eic-sprd.c2
-rw-r--r--drivers/gpio/gpio-elkhartlake.c36
-rw-r--r--drivers/gpio/gpio-em.c5
-rw-r--r--drivers/gpio/gpio-en7523.c36
-rw-r--r--drivers/gpio/gpio-ep93xx.c33
-rw-r--r--drivers/gpio/gpio-exar.c2
-rw-r--r--drivers/gpio/gpio-f7188x.c2
-rw-r--r--drivers/gpio/gpio-ftgpio010.c46
-rw-r--r--drivers/gpio/gpio-fxl6408.c13
-rw-r--r--drivers/gpio/gpio-ge.c25
-rw-r--r--drivers/gpio/gpio-graniterapids.c2
-rw-r--r--drivers/gpio/gpio-grgpio.c100
-rw-r--r--drivers/gpio/gpio-gw-pld.c2
-rw-r--r--drivers/gpio/gpio-hisi.c48
-rw-r--r--drivers/gpio/gpio-hlwd.c105
-rw-r--r--drivers/gpio/gpio-htc-egpio.c23
-rw-r--r--drivers/gpio/gpio-ich.c2
-rw-r--r--drivers/gpio/gpio-idio-16.c5
-rw-r--r--drivers/gpio/gpio-idt3243x.c45
-rw-r--r--drivers/gpio/gpio-imx-scu.c2
-rw-r--r--drivers/gpio/gpio-it87.c2
-rw-r--r--drivers/gpio/gpio-ixp4xx.c72
-rw-r--r--drivers/gpio/gpio-janz-ttl.c2
-rw-r--r--drivers/gpio/gpio-kempld.c2
-rw-r--r--drivers/gpio/gpio-latch.c6
-rw-r--r--drivers/gpio/gpio-ljca.c16
-rw-r--r--drivers/gpio/gpio-logicvc.c2
-rw-r--r--drivers/gpio/gpio-loongson-64bit.c245
-rw-r--r--drivers/gpio/gpio-loongson.c2
-rw-r--r--drivers/gpio/gpio-loongson1.c40
-rw-r--r--drivers/gpio/gpio-lp3943.c2
-rw-r--r--drivers/gpio/gpio-lp873x.c2
-rw-r--r--drivers/gpio/gpio-lp87565.c2
-rw-r--r--drivers/gpio/gpio-lpc18xx.c6
-rw-r--r--drivers/gpio/gpio-lpc32xx.c10
-rw-r--r--drivers/gpio/gpio-macsmc.c292
-rw-r--r--drivers/gpio/gpio-madera.c2
-rw-r--r--drivers/gpio/gpio-max730x.c2
-rw-r--r--drivers/gpio/gpio-max732x.c4
-rw-r--r--drivers/gpio/gpio-max7360.c257
-rw-r--r--drivers/gpio/gpio-max77620.c2
-rw-r--r--drivers/gpio/gpio-max77650.c2
-rw-r--r--drivers/gpio/gpio-max77759.c2
-rw-r--r--drivers/gpio/gpio-mb86s7x.c2
-rw-r--r--drivers/gpio/gpio-mc33880.c2
-rw-r--r--drivers/gpio/gpio-menz127.c55
-rw-r--r--drivers/gpio/gpio-ml-ioh.c14
-rw-r--r--drivers/gpio/gpio-mlxbf.c25
-rw-r--r--drivers/gpio/gpio-mlxbf2.c91
-rw-r--r--drivers/gpio/gpio-mlxbf3.c105
-rw-r--r--drivers/gpio/gpio-mm-lantiq.c67
-rw-r--r--drivers/gpio/gpio-mmio.c659
-rw-r--r--drivers/gpio/gpio-mockup.c4
-rw-r--r--drivers/gpio/gpio-moxtet.c14
-rw-r--r--drivers/gpio/gpio-mpc5200.c86
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c105
-rw-r--r--drivers/gpio/gpio-mpfs.c11
-rw-r--r--drivers/gpio/gpio-mpsse.c247
-rw-r--r--drivers/gpio/gpio-msc313.c12
-rw-r--r--drivers/gpio/gpio-mt7621.c80
-rw-r--r--drivers/gpio/gpio-mvebu.c12
-rw-r--r--drivers/gpio/gpio-mxc.c101
-rw-r--r--drivers/gpio/gpio-mxs.c33
-rw-r--r--drivers/gpio/gpio-nct6694.c499
-rw-r--r--drivers/gpio/gpio-nomadik.c33
-rw-r--r--drivers/gpio/gpio-npcm-sgpio.c8
-rw-r--r--drivers/gpio/gpio-octeon.c5
-rw-r--r--drivers/gpio/gpio-omap.c25
-rw-r--r--drivers/gpio/gpio-palmas.c24
-rw-r--r--drivers/gpio/gpio-pca953x.c188
-rw-r--r--drivers/gpio/gpio-pca9570.c3
-rw-r--r--drivers/gpio/gpio-pcf857x.c13
-rw-r--r--drivers/gpio/gpio-pch.c16
-rw-r--r--drivers/gpio/gpio-pci-idio-16.c1
-rw-r--r--drivers/gpio/gpio-pisosr.c16
-rw-r--r--drivers/gpio/gpio-pl061.c21
-rw-r--r--drivers/gpio/gpio-pmic-eic-sprd.c7
-rw-r--r--drivers/gpio/gpio-pxa.c29
-rw-r--r--drivers/gpio/gpio-qixis-fpga.c111
-rw-r--r--drivers/gpio/gpio-raspberrypi-exp.c8
-rw-r--r--drivers/gpio/gpio-rc5t583.c17
-rw-r--r--drivers/gpio/gpio-rcar.c31
-rw-r--r--drivers/gpio/gpio-rda.c35
-rw-r--r--drivers/gpio/gpio-rdc321x.c6
-rw-r--r--drivers/gpio/gpio-realtek-otto.c41
-rw-r--r--drivers/gpio/gpio-reg.c10
-rw-r--r--drivers/gpio/gpio-regmap.c78
-rw-r--r--drivers/gpio/gpio-rockchip.c12
-rw-r--r--drivers/gpio/gpio-rtd.c4
-rw-r--r--drivers/gpio/gpio-sa1100.c17
-rw-r--r--drivers/gpio/gpio-sama5d2-piobu.c6
-rw-r--r--drivers/gpio/gpio-sch.c7
-rw-r--r--drivers/gpio/gpio-sch311x.c6
-rw-r--r--drivers/gpio/gpio-shared-proxy.c334
-rw-r--r--drivers/gpio/gpio-sifive.c74
-rw-r--r--drivers/gpio/gpio-sim.c90
-rw-r--r--drivers/gpio/gpio-siox.c9
-rw-r--r--drivers/gpio/gpio-sloppy-logic-analyzer.c2
-rw-r--r--drivers/gpio/gpio-sodaville.c24
-rw-r--r--drivers/gpio/gpio-spacemit-k1.c30
-rw-r--r--drivers/gpio/gpio-spear-spics.c19
-rw-r--r--drivers/gpio/gpio-sprd.c6
-rw-r--r--drivers/gpio/gpio-stmpe.c47
-rw-r--r--drivers/gpio/gpio-stp-xway.c8
-rw-r--r--drivers/gpio/gpio-syscon.c31
-rw-r--r--drivers/gpio/gpio-tangier.c4
-rw-r--r--drivers/gpio/gpio-tb10x.c94
-rw-r--r--drivers/gpio/gpio-tc3589x.c9
-rw-r--r--drivers/gpio/gpio-tegra.c6
-rw-r--r--drivers/gpio/gpio-tegra186.c235
-rw-r--r--drivers/gpio/gpio-thunderx.c14
-rw-r--r--drivers/gpio/gpio-timberdale.c7
-rw-r--r--drivers/gpio/gpio-tpic2810.c23
-rw-r--r--drivers/gpio/gpio-tps65086.c14
-rw-r--r--drivers/gpio/gpio-tps65218.c29
-rw-r--r--drivers/gpio/gpio-tps65219.c122
-rw-r--r--drivers/gpio/gpio-tps6586x.c13
-rw-r--r--drivers/gpio/gpio-tps65910.c19
-rw-r--r--drivers/gpio/gpio-tps65912.c15
-rw-r--r--drivers/gpio/gpio-tps68470.c12
-rw-r--r--drivers/gpio/gpio-tqmx86.c15
-rw-r--r--drivers/gpio/gpio-ts4800.c39
-rw-r--r--drivers/gpio/gpio-ts4900.c12
-rw-r--r--drivers/gpio/gpio-ts5500.c4
-rw-r--r--drivers/gpio/gpio-twl4030.c27
-rw-r--r--drivers/gpio/gpio-twl6040.c21
-rw-r--r--drivers/gpio/gpio-uniphier.c21
-rw-r--r--drivers/gpio/gpio-usbio.c248
-rw-r--r--drivers/gpio/gpio-vf610.c31
-rw-r--r--drivers/gpio/gpio-viperboard.c126
-rw-r--r--drivers/gpio/gpio-virtio.c14
-rw-r--r--drivers/gpio/gpio-virtuser.c12
-rw-r--r--drivers/gpio/gpio-visconti.c25
-rw-r--r--drivers/gpio/gpio-vx855.c7
-rw-r--r--drivers/gpio/gpio-wcd934x.c16
-rw-r--r--drivers/gpio/gpio-wcove.c9
-rw-r--r--drivers/gpio/gpio-winbond.c14
-rw-r--r--drivers/gpio/gpio-wm831x.c16
-rw-r--r--drivers/gpio/gpio-wm8350.c13
-rw-r--r--drivers/gpio/gpio-wm8994.c12
-rw-r--r--drivers/gpio/gpio-xgene-sb.c58
-rw-r--r--drivers/gpio/gpio-xgene.c12
-rw-r--r--drivers/gpio/gpio-xgs-iproc.c34
-rw-r--r--drivers/gpio/gpio-xilinx.c25
-rw-r--r--drivers/gpio/gpio-xlp.c8
-rw-r--r--drivers/gpio/gpio-xra1403.c14
-rw-r--r--drivers/gpio/gpio-xtensa.c11
-rw-r--r--drivers/gpio/gpio-zevio.c4
-rw-r--r--drivers/gpio/gpio-zynq.c21
-rw-r--r--drivers/gpio/gpio-zynqmp-modepin.c8
-rw-r--r--drivers/gpio/gpiolib-acpi-core.c34
-rw-r--r--drivers/gpio/gpiolib-acpi-quirks.c39
-rw-r--r--drivers/gpio/gpiolib-cdev.c183
-rw-r--r--drivers/gpio/gpiolib-devres.c2
-rw-r--r--drivers/gpio/gpiolib-legacy.c82
-rw-r--r--drivers/gpio/gpiolib-of.c83
-rw-r--r--drivers/gpio/gpiolib-of.h2
-rw-r--r--drivers/gpio/gpiolib-shared.c656
-rw-r--r--drivers/gpio/gpiolib-shared.h71
-rw-r--r--drivers/gpio/gpiolib-swnode.c5
-rw-r--r--drivers/gpio/gpiolib-sysfs.c720
-rw-r--r--drivers/gpio/gpiolib.c512
-rw-r--r--drivers/gpio/gpiolib.h88
-rw-r--r--drivers/gpu/drm/Kconfig4
-rw-r--r--drivers/gpu/drm/Kconfig.debug1
-rw-r--r--drivers/gpu/drm/Makefile18
-rw-r--r--drivers/gpu/drm/adp/adp-mipi.c8
-rw-r--r--drivers/gpu/drm/adp/adp_drv.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Kconfig24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aldebaran.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h129
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c58
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h55
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v12.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c122
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c123
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cper.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c130
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c94
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c1300
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c393
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c70
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c79
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c309
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c299
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c245
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c106
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c267
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c90
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c46
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c124
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ip.c96
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ip.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c177
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c142
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.h25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c79
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c49
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c119
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c209
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c771
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h66
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c545
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c192
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h77
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c90
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c314
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c148
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c996
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c59
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_utils.h91
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c179
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c280
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c400
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h53
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c267
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h60
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c65
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c110
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c257
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c50
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h100
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c321
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atom.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atom.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_ih.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_ih.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c3823
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c97
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c101
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c79
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c81
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c81
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c106
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c85
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c71
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c94
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c101
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c351
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_ih.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v6_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v6_1.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v7_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/imu_v11_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/imu_v12_0.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c186
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c80
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c54
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_userqueue.c160
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v11_0.c58
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v12_0.c128
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c57
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c105
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c87
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v10_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c91
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0_8.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v12_0.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0.c58
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v14_0.c72
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v3_1.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c74
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c67
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_ih.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sid.h40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v12_0.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v1_0.c839
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v1_0.h (renamed from drivers/gpu/drm/amd/amdgpu/dce_v11_0.h)14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v2_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v4_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c127
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c159
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c144
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c201
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c152
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c131
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c138
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c179
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/Kconfig2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c44
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c179
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c98
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c13
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.c101
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.h1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_module.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c61
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h16
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c18
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c20
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_queue.c12
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c107
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.h1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c13
-rw-r--r--drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c57
-rw-r--r--drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.h1
-rw-r--r--drivers/gpu/drm/amd/display/Makefile1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/Makefile3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c1262
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h36
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c858
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.c209
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.h36
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c90
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c45
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c37
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c15
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c18
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c129
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c49
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c8
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c5
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c5
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h27
-rw-r--r--drivers/gpu/drm/amd/display/dc/Makefile3
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/vector.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser.c100
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table.c288
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c42
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c36
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dalsmc.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr.c36
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr.h31
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr_smu_msg.c118
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr_smu_msg.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c144
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c89
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c157
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c154
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.c130
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c1425
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_debug.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c2881
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c196
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stat.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_state.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c142
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h742
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_bios_types.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c603
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h146
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h72
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_helper.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_spl_translate.c23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h63
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h64
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.c123
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c124
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_abm.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_audio.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c67
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c93
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_transform.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_transform.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c34
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_link_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn20/dcn20_stream_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn31/dcn31_dio_link_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn32/dcn32_dio_stream_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_stream_encoder.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c48
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c36
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn401/dcn401_fpu.c239
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn401/dcn401_fpu.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/Makefile140
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h66
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/Makefile140
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/cmntypes.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/cmntypes.h)18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c)6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core_structs.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h)4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_lib_defines.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/display_mode_lib_defines.h)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_util.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_util.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.h)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_translation_helper.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c)406
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_translation_helper.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h)4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_utils.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c)14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_utils.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c)67
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h)13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/bounding_boxes/dcn4_soc_bb.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn4_soc_bb.h)1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml2_external_lib_deps.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml2_external_lib_deps.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h)1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_dchub_registers.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h)4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_display_cfg_types.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h)25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_policy_types.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_policy_types.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_soc_parameter_types.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h)18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_types.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h)29
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c)1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c)209
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.c)4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_shared_types.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h)230
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c)4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c)3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.c)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.h)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.c)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn3.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.c)21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn3.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c)477
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.c)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.h)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_standalone_libraries/lib_float_math.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_standalone_libraries/lib_float_math.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_interfaces.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_legacy.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_legacy.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_legacy.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_legacy.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_soc15.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_soc15.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/inc/dml2_debug.h189
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/inc/dml2_internal_shared_types.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h)86
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c)6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_types.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_types.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_internal_types.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_mall_phantom.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c)9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_mall_phantom.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_policy.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_policy.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_translation_helper.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c)23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_translation_helper.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_utils.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_utils.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_wrapper.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c)267
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_wrapper.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h)6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml_assert.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml_assert.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml_depedencies.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml_depedencies.h)1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml_display_rq_dlg_calc.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml_display_rq_dlg_calc.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml_display_rq_dlg_calc.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml_display_rq_dlg_calc.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml_logging.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml_logging.h)1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c40
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c67
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c36
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c200
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.c32
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dsc.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c39
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c51
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h160
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c69
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn21/dcn21_hubp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c121
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c71
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c67
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c137
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c176
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c62
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c56
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c75
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c289
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c1787
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h111
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h1453
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h36
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_status.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h42
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/cursor_reg_cache.h28
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h123
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h56
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h20
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h30
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h57
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h100
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/opp.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h131
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/link_service.h (renamed from drivers/gpu/drm/amd/display/dc/inc/link.h)21
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/soc_and_ip_translator.h24
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c77
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_detection.c182
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_detection.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_dpms.c49
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_dpms.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_factory.c78
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_factory.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_resource.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_validation.c126
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_validation.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c172
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c328
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h41
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c207
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/mmhubbub/dcn20/dcn20_mmhubbub.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h18
-rw-r--r--drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h38
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c131
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/os_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/pg/dcn35/dcn35_pg_cntl.c78
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c65
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c23
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c127
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c94
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c47
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c38
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c34
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c65
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c62
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c45
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c46
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c45
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c68
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/Makefile19
-rw-r--r--drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.c304
-rw-r--r--drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.h22
-rw-r--r--drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.c27
-rw-r--r--drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/soc_and_ip_translator.c37
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c70
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h3
-rw-r--r--drivers/gpu/drm/amd/display/dmub/dmub_srv.h49
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h1199
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c7
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c75
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h8
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c68
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h2
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c22
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c38
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c8
-rw-r--r--drivers/gpu/drm/amd/display/include/bios_parser_types.h11
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_asic_id.h5
-rw-r--r--drivers/gpu/drm/amd/display/include/dpcd_defs.h16
-rw-r--r--drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h1
-rw-r--r--drivers/gpu/drm/amd/display/include/grph_object_id.h7
-rw-r--r--drivers/gpu/drm/amd/display/include/signal_types.h12
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c25
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c11
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h3
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c13
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c87
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c61
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c2
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c122
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h6
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c3
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h149
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.c33
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.h7
-rw-r--r--drivers/gpu/drm/amd/include/amd_cper.h2
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h101
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h7
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vce/vce_1_0_d.h5
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vce/vce_1_0_sh_mask.h10
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h30
-rw-r--r--drivers/gpu/drm/amd/include/dm_pp_interface.h1
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_5_0.h2
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h212
-rw-r--r--drivers/gpu/drm/amd/include/mes_v11_api_def.h6
-rw-r--r--drivers/gpu/drm/amd/include/mes_v12_api_def.h36
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_dpm.c134
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c86
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c432
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h14
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_dpm_internal.h6
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c76
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c127
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.h7
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c169
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.h557
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c26
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c21
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c4
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c3
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c7
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c7
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c5
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c4
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c5
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c4
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c4
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c8
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c309
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h159
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h101
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_ppsmc.h23
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h18
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_0_pptable.h (renamed from drivers/gpu/drm/amd/pm/inc/smu_v13_0_0_pptable.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c21
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c75
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c46
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c7
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c63
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c46
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c74
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c58
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c132
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c650
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c572
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h178
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c112
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c11
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c65
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c146
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c40
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h136
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_internal.h1
-rw-r--r--drivers/gpu/drm/amd/ras/Makefile34
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/Makefile33
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_cmd.c285
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_cmd.h54
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_eeprom_i2c.c182
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_eeprom_i2c.h27
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mgr.c648
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mgr.h83
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mp1_v13_0.c94
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mp1_v13_0.h30
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_nbio_v7_9.c125
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_nbio_v7_9.h30
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_process.c190
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_process.h41
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_sys.c279
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/ras_sys.h110
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/Makefile44
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras.h370
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_aca.c672
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_aca.h164
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_aca_v1_0.c379
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_aca_v1_0.h71
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_cmd.c522
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_cmd.h426
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_core.c603
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_core_status.h37
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_cper.c315
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_cper.h304
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_eeprom.c1339
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_eeprom.h197
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_gfx.c70
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_gfx.h43
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_gfx_v9_0.c426
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_gfx_v9_0.h259
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_log_ring.c317
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_log_ring.h93
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_mp1.c81
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_mp1.h50
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_mp1_v13_0.c105
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_mp1_v13_0.h30
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_nbio.c96
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_nbio.h46
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_nbio_v7_9.c123
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_nbio_v7_9.h31
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_process.c322
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_process.h53
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_psp.c750
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_psp.h145
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_psp_v13_0.c46
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_psp_v13_0.h31
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_ta_if.h231
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_umc.c707
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_umc.h166
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_umc_v12_0.c511
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_umc_v12_0.h314
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_crtc.c31
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c4
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.h1
-rw-r--r--drivers/gpu/drm/arm/hdlcd_crtc.c24
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.c1
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c13
-rw-r--r--drivers/gpu/drm/arm/malidp_mw.c1
-rw-r--r--drivers/gpu/drm/arm/malidp_planes.c4
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c1
-rw-r--r--drivers/gpu/drm/armada/armada_debugfs.c1
-rw-r--r--drivers/gpu/drm/armada/armada_fb.c13
-rw-r--r--drivers/gpu/drm/armada/armada_fb.h4
-rw-r--r--drivers/gpu/drm/armada/armada_fbdev.c20
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c1
-rw-r--r--drivers/gpu/drm/armada/armada_overlay.c1
-rw-r--r--drivers/gpu/drm/armada/armada_plane.c8
-rw-r--r--drivers/gpu/drm/ast/Makefile8
-rw-r--r--drivers/gpu/drm/ast/ast_2000.c257
-rw-r--r--drivers/gpu/drm/ast/ast_2100.c480
-rw-r--r--drivers/gpu/drm/ast/ast_2200.c92
-rw-r--r--drivers/gpu/drm/ast/ast_2300.c1463
-rw-r--r--drivers/gpu/drm/ast/ast_2400.c100
-rw-r--r--drivers/gpu/drm/ast/ast_2500.c675
-rw-r--r--drivers/gpu/drm/ast/ast_2600.c116
-rw-r--r--drivers/gpu/drm/ast/ast_dp.c2
-rw-r--r--drivers/gpu/drm/ast/ast_dram_tables.h207
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c71
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h143
-rw-r--r--drivers/gpu/drm/ast/ast_main.c394
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c146
-rw-r--r--drivers/gpu/drm/ast/ast_post.c2027
-rw-r--r--drivers/gpu/drm/ast/ast_post.h50
-rw-r--r--drivers/gpu/drm/ast/ast_reg.h1
-rw-r--r--drivers/gpu/drm/ast/ast_tables.h60
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c21
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c15
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h3
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c3
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c53
-rw-r--r--drivers/gpu/drm/bridge/Kconfig29
-rw-r--r--drivers/gpu/drm/bridge/Makefile2
-rw-r--r--drivers/gpu/drm/bridge/adv7511/Kconfig5
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511.h59
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_audio.c98
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_cec.c57
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c419
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7533.c9
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c9
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-i2c-dptx.c2
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c45
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.h3
-rw-r--r--drivers/gpu/drm/bridge/analogix/anx7625.c16
-rw-r--r--drivers/gpu/drm/bridge/aux-bridge.c11
-rw-r--r--drivers/gpu/drm/bridge/aux-hpd-bridge.c13
-rw-r--r--drivers/gpu/drm/bridge/cadence/Kconfig1
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c283
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c17
-rw-r--r--drivers/gpu/drm/bridge/chipone-icn6211.c8
-rw-r--r--drivers/gpu/drm/bridge/chrontel-ch7033.c10
-rw-r--r--drivers/gpu/drm/bridge/cros-ec-anx7688.c8
-rw-r--r--drivers/gpu/drm/bridge/display-connector.c18
-rw-r--r--drivers/gpu/drm/bridge/fsl-ldb.c7
-rw-r--r--drivers/gpu/drm/bridge/imx/Kconfig11
-rw-r--r--drivers/gpu/drm/bridge/imx/Makefile1
-rw-r--r--drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c10
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pai.c158
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c8
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8mp-hdmi-tx.c65
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c7
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c27
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c8
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c8
-rw-r--r--drivers/gpu/drm/bridge/imx/imx93-mipi-dsi.c12
-rw-r--r--drivers/gpu/drm/bridge/ite-it6263.c75
-rw-r--r--drivers/gpu/drm/bridge/ite-it6505.c43
-rw-r--r--drivers/gpu/drm/bridge/ite-it66121.c79
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt8912b.c14
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9211.c10
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611.c23
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611uxc.c3
-rw-r--r--drivers/gpu/drm/bridge/lvds-codec.c9
-rw-r--r--drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c16
-rw-r--r--drivers/gpu/drm/bridge/microchip-lvds.c8
-rw-r--r--drivers/gpu/drm/bridge/nwl-dsi.c8
-rw-r--r--drivers/gpu/drm/bridge/nxp-ptn3460.c9
-rw-r--r--drivers/gpu/drm/bridge/panel.c18
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8622.c8
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8640.c8
-rw-r--r--drivers/gpu/drm/bridge/samsung-dsim.c427
-rw-r--r--drivers/gpu/drm/bridge/sii902x.c30
-rw-r--r--drivers/gpu/drm/bridge/sii9234.c8
-rw-r--r--drivers/gpu/drm/bridge/sil-sii8620.c8
-rw-r--r--drivers/gpu/drm/bridge/simple-bridge.c30
-rw-r--r--drivers/gpu/drm/bridge/ssd2825.c775
-rw-r--r--drivers/gpu/drm/bridge/synopsys/Kconfig15
-rw-r--r--drivers/gpu/drm/bridge/synopsys/Makefile1
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-dp.c2097
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-gp-audio.c5
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c258
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.h14
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c22
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c9
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c9
-rw-r--r--drivers/gpu/drm/bridge/tc358762.c8
-rw-r--r--drivers/gpu/drm/bridge/tc358764.c8
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c62
-rw-r--r--drivers/gpu/drm/bridge/tc358768.c8
-rw-r--r--drivers/gpu/drm/bridge/tc358775.c8
-rw-r--r--drivers/gpu/drm/bridge/thc63lvd1024.c8
-rw-r--r--drivers/gpu/drm/bridge/ti-dlpc3433.c8
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c263
-rw-r--r--drivers/gpu/drm/bridge/ti-tdp158.c8
-rw-r--r--drivers/gpu/drm/bridge/ti-tfp410.c10
-rw-r--r--drivers/gpu/drm/bridge/ti-tpd12s015.c16
-rw-r--r--drivers/gpu/drm/bridge/waveshare-dsi.c203
-rw-r--r--drivers/gpu/drm/ci/build-igt.sh2
-rw-r--r--drivers/gpu/drm/ci/build.sh17
-rw-r--r--drivers/gpu/drm/ci/build.yml10
-rw-r--r--drivers/gpu/drm/ci/check-devicetrees.yml50
-rw-r--r--drivers/gpu/drm/ci/container.yml30
-rwxr-xr-xdrivers/gpu/drm/ci/dt-binding-check.sh19
-rwxr-xr-xdrivers/gpu/drm/ci/dtbs-check.sh22
-rw-r--r--drivers/gpu/drm/ci/gitlab-ci.yml58
-rwxr-xr-xdrivers/gpu/drm/ci/igt_runner.sh1
-rw-r--r--drivers/gpu/drm/ci/image-tags.yml20
-rwxr-xr-xdrivers/gpu/drm/ci/kunit.sh16
-rw-r--r--drivers/gpu/drm/ci/kunit.yml37
-rwxr-xr-xdrivers/gpu/drm/ci/lava-submit.sh6
-rwxr-xr-xdrivers/gpu/drm/ci/setup-llvm-links.sh13
-rw-r--r--drivers/gpu/drm/ci/test.yml47
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-skips.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-skips.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt29
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt139
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt350
-rw-r--r--drivers/gpu/drm/clients/drm_client_setup.c11
-rw-r--r--drivers/gpu/drm/clients/drm_fbdev_client.c37
-rw-r--r--drivers/gpu/drm/clients/drm_log.c43
-rw-r--r--drivers/gpu/drm/display/Kconfig13
-rw-r--r--drivers/gpu/drm/display/Makefile4
-rw-r--r--drivers/gpu/drm/display/drm_bridge_connector.c214
-rw-r--r--drivers/gpu/drm/display/drm_dp_aux_bus.c3
-rw-r--r--drivers/gpu/drm/display/drm_dp_cec.c3
-rw-r--r--drivers/gpu/drm/display/drm_dp_helper.c373
-rw-r--r--drivers/gpu/drm/display/drm_dp_mst_topology.c1
-rw-r--r--drivers/gpu/drm/display/drm_dp_tunnel.c3
-rw-r--r--drivers/gpu/drm/display/drm_dsc_helper.c1
-rw-r--r--drivers/gpu/drm/display/drm_hdmi_audio_helper.c4
-rw-r--r--drivers/gpu/drm/display/drm_hdmi_cec_helper.c193
-rw-r--r--drivers/gpu/drm/display/drm_hdmi_cec_notifier_helper.c65
-rw-r--r--drivers/gpu/drm/display/drm_hdmi_helper.c3
-rw-r--r--drivers/gpu/drm/display/drm_hdmi_state_helper.c129
-rw-r--r--drivers/gpu/drm/display/drm_scdc_helper.c1
-rw-r--r--drivers/gpu/drm/drm_atomic.c227
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c192
-rw-r--r--drivers/gpu/drm/drm_atomic_state_helper.c6
-rw-r--r--drivers/gpu/drm/drm_atomic_uapi.c184
-rw-r--r--drivers/gpu/drm/drm_auth.c65
-rw-r--r--drivers/gpu/drm/drm_bridge.c122
-rw-r--r--drivers/gpu/drm/drm_bridge_helper.c2
-rw-r--r--drivers/gpu/drm/drm_buddy.c415
-rw-r--r--drivers/gpu/drm/drm_cache.c9
-rw-r--r--drivers/gpu/drm/drm_client.c228
-rw-r--r--drivers/gpu/drm/drm_client_event.c30
-rw-r--r--drivers/gpu/drm/drm_client_modeset.c46
-rw-r--r--drivers/gpu/drm/drm_client_sysrq.c65
-rw-r--r--drivers/gpu/drm/drm_color_mgmt.c287
-rw-r--r--drivers/gpu/drm/drm_colorop.c599
-rw-r--r--drivers/gpu/drm/drm_connector.c45
-rw-r--r--drivers/gpu/drm/drm_crtc.c35
-rw-r--r--drivers/gpu/drm/drm_crtc_internal.h1
-rw-r--r--drivers/gpu/drm/drm_damage_helper.c2
-rw-r--r--drivers/gpu/drm/drm_debugfs.c128
-rw-r--r--drivers/gpu/drm/drm_debugfs_crc.c1
-rw-r--r--drivers/gpu/drm/drm_displayid.c58
-rw-r--r--drivers/gpu/drm/drm_displayid_internal.h2
-rw-r--r--drivers/gpu/drm/drm_draw.c2
-rw-r--r--drivers/gpu/drm/drm_draw_internal.h2
-rw-r--r--drivers/gpu/drm/drm_drv.c45
-rw-r--r--drivers/gpu/drm/drm_dumb_buffers.c171
-rw-r--r--drivers/gpu/drm/drm_edid.c255
-rw-r--r--drivers/gpu/drm/drm_exec.c2
-rw-r--r--drivers/gpu/drm/drm_fb_dma_helper.c2
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c153
-rw-r--r--drivers/gpu/drm/drm_fbdev_dma.c26
-rw-r--r--drivers/gpu/drm/drm_fbdev_shmem.c22
-rw-r--r--drivers/gpu/drm/drm_fbdev_ttm.c25
-rw-r--r--drivers/gpu/drm/drm_file.c20
-rw-r--r--drivers/gpu/drm/drm_flip_work.c1
-rw-r--r--drivers/gpu/drm/drm_format_helper.c463
-rw-r--r--drivers/gpu/drm/drm_format_internal.h16
-rw-r--r--drivers/gpu/drm/drm_fourcc.c45
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c60
-rw-r--r--drivers/gpu/drm/drm_gem.c241
-rw-r--r--drivers/gpu/drm/drm_gem_atomic_helper.c11
-rw-r--r--drivers/gpu/drm/drm_gem_dma_helper.c12
-rw-r--r--drivers/gpu/drm/drm_gem_framebuffer_helper.c57
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c179
-rw-r--r--drivers/gpu/drm/drm_gem_ttm_helper.c2
-rw-r--r--drivers/gpu/drm/drm_gem_vram_helper.c91
-rw-r--r--drivers/gpu/drm/drm_gpusvm.c1216
-rw-r--r--drivers/gpu/drm/drm_gpuvm.c691
-rw-r--r--drivers/gpu/drm/drm_internal.h27
-rw-r--r--drivers/gpu/drm/drm_ioctl.c8
-rw-r--r--drivers/gpu/drm/drm_managed.c1
-rw-r--r--drivers/gpu/drm/drm_mipi_dbi.c12
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c123
-rw-r--r--drivers/gpu/drm/drm_mm.c1
-rw-r--r--drivers/gpu/drm/drm_mode_config.c8
-rw-r--r--drivers/gpu/drm/drm_mode_object.c18
-rw-r--r--drivers/gpu/drm/drm_modeset_helper.c12
-rw-r--r--drivers/gpu/drm/drm_modeset_lock.c2
-rw-r--r--drivers/gpu/drm/drm_of.c7
-rw-r--r--drivers/gpu/drm/drm_pagemap.c882
-rw-r--r--drivers/gpu/drm/drm_panel.c125
-rw-r--r--drivers/gpu/drm/drm_panel_backlight_quirks.c114
-rw-r--r--drivers/gpu/drm/drm_panel_orientation_quirks.c1
-rw-r--r--drivers/gpu/drm/drm_panic.c61
-rw-r--r--drivers/gpu/drm/drm_panic_qr.rs30
-rw-r--r--drivers/gpu/drm/drm_pci.c1
-rw-r--r--drivers/gpu/drm/drm_plane.c66
-rw-r--r--drivers/gpu/drm/drm_plane_helper.c1
-rw-r--r--drivers/gpu/drm/drm_prime.c73
-rw-r--r--drivers/gpu/drm/drm_print.c1
-rw-r--r--drivers/gpu/drm/drm_privacy_screen.c1
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c1
-rw-r--r--drivers/gpu/drm/drm_self_refresh_helper.c1
-rw-r--r--drivers/gpu/drm/drm_simple_kms_helper.c1
-rw-r--r--drivers/gpu/drm/drm_suballoc.c2
-rw-r--r--drivers/gpu/drm/drm_syncobj.c1
-rw-r--r--drivers/gpu/drm/drm_sysfs.c45
-rw-r--r--drivers/gpu/drm/drm_vblank.c180
-rw-r--r--drivers/gpu/drm/drm_vblank_helper.c176
-rw-r--r--drivers/gpu/drm/drm_vblank_work.c4
-rw-r--r--drivers/gpu/drm/drm_vma_manager.c1
-rw-r--r--drivers/gpu/drm/drm_writeback.c8
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_buffer.c3
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c1
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c1
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c3
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_hwdb.c32
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_mmu.c2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_sched.c14
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c41
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c9
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.h1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c17
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c13
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c12
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c33
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c1
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c1
-rw-r--r--drivers/gpu/drm/gma500/backlight.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.c1
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_display.c1
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c1
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_hdmi.c1
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_lvds.c1
-rw-r--r--drivers/gpu/drm/gma500/fbdev.c68
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c14
-rw-r--r--drivers/gpu/drm/gma500/gem.c1
-rw-r--r--drivers/gpu/drm/gma500/intel_bios.c1
-rw-r--r--drivers/gpu/drm/gma500/intel_gmbus.c2
-rw-r--r--drivers/gpu/drm/gma500/mid_bios.c1
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_crtc.c1
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi.c3
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c3
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds.c1
-rw-r--r--drivers/gpu/drm/gma500/opregion.c3
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c1
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h1
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c1
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c1
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c1
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.c37
-rw-r--r--drivers/gpu/drm/gud/gud_connector.c33
-rw-r--r--drivers/gpu/drm/gud/gud_drv.c97
-rw-r--r--drivers/gpu/drm/gud/gud_internal.h13
-rw-r--r--drivers/gpu/drm/gud/gud_pipe.c85
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c14
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c22
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h1
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c5
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c11
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c1
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c1
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm.h4
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm_drv.c1
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm_modeset.c218
-rw-r--r--drivers/gpu/drm/i915/Kconfig.debug2
-rw-r--r--drivers/gpu/drm/i915/Makefile31
-rw-r--r--drivers/gpu/drm/i915/display/g4x_dp.c54
-rw-r--r--drivers/gpu/drm/i915/display/g4x_hdmi.c16
-rw-r--r--drivers/gpu/drm/i915/display/hsw_ips.c77
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_display_sr.c2
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_plane.c130
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_plane.h6
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_wm.c73
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c79
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi_regs.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_acpi.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_alpm.c248
-rw-r--r--drivers/gpu/drm/i915/display/intel_alpm.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_backlight.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c62
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.h176
-rw-r--r--drivers/gpu/drm/i915/display/intel_bo.c42
-rw-r--r--drivers/gpu/drm/i915/display/intel_bo.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c564
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.h59
-rw-r--r--drivers/gpu/drm/i915/display/intel_casf.c290
-rw-r--r--drivers/gpu/drm/i915/display/intel_casf.h21
-rw-r--r--drivers/gpu/drm/i915/display/intel_casf_regs.h33
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c726
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.h64
-rw-r--r--drivers/gpu/drm/i915/display/intel_cmtg.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_cmtg_regs.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c423
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_color_pipeline.c99
-rw-r--r--drivers/gpu/drm/i915/display/intel_color_pipeline.h14
-rw-r--r--drivers/gpu/drm/i915/display/intel_color_regs.h29
-rw-r--r--drivers/gpu/drm/i915/display/intel_colorop.c35
-rw-r--r--drivers/gpu/drm/i915/display/intel_colorop.h15
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy_regs.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_connector.c35
-rw-r--r--drivers/gpu/drm/i915/display/intel_connector.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c24
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc.c106
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc.h11
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc_state_dump.c16
-rw-r--r--drivers/gpu/drm/i915/display/intel_cursor.c71
-rw-r--r--drivers/gpu/drm/i915/display/intel_cursor.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy.c299
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy.h23
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h37
-rw-r--r--drivers/gpu/drm/i915/display/intel_dbuf_bw.c295
-rw-r--r--drivers/gpu/drm/i915/display/intel_dbuf_bw.h37
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c313
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c83
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h9
-rw-r--r--drivers/gpu/drm/i915/display/intel_de.h107
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c555
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h34
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_conversion.c19
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_conversion.h12
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_core.h60
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c31
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs_params.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.c49
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.h29
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_driver.c73
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_irq.c166
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_irq.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_jiffies.h43
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_limits.h9
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_params.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_params.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c59
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_map.c85
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_well.c214
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_regs.h2934
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_reset.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_rpm.c34
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_rps.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h142
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_utils.c32
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_utils.h31
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_wa.c39
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_wa.h12
-rw-r--r--drivers/gpu/drm/i915/display/intel_dkl_phy.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dkl_phy_regs.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.c618
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.h20
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc_regs.h489
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc_wl.c31
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c485
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.h22
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux.c11
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c158
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_hdcp.c38
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c159
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c154
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_test.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpio_phy.c213
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.c232
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c570
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.h91
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt_common.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_drrs.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.c313
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.h13
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb_regs.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt.c42
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt_defs.h197
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_encoder.c48
-rw-r--r--drivers/gpu/drm/i915/display/intel_encoder.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.c123
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_bo.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_bo.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_pin.c49
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_pin.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c234
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c77
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev_fb.c53
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev_fb.h14
-rw-r--r--drivers/gpu/drm/i915/display/intel_fdi.c33
-rw-r--r--drivers/gpu/drm/i915/display/intel_fdi.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_fifo_underrun.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_flipq.c472
-rw-r--r--drivers/gpu/drm/i915/display/intel_flipq.h37
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.c144
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.h18
-rw-r--r--drivers/gpu/drm/i915/display/intel_global_state.c32
-rw-r--r--drivers/gpu/drm/i915/display/intel_global_state.h36
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.c57
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus_regs.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c110
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_gsc.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_regs.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c43
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.c31
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug_irq.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_hti_regs.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_link_bw.c280
-rw-r--r--drivers/gpu/drm/i915/display/intel_link_bw.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_lpe_audio.c11
-rw-r--r--drivers/gpu/drm/i915/display/intel_lspcon.c17
-rw-r--r--drivers/gpu/drm/i915/display/intel_lt_phy.c2327
-rw-r--r--drivers/gpu/drm/i915/display/intel_lt_phy.h47
-rw-r--r--drivers/gpu/drm/i915/display/intel_lt_phy_regs.h90
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.c12
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_setup.c27
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_verify.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.c20
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.c132
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_panic.c27
-rw-r--r--drivers/gpu/drm/i915/display/intel_panic.h14
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_display.c33
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_refclk.c166
-rw-r--r--drivers/gpu/drm/i915/display/intel_pfit.c15
-rw-r--r--drivers/gpu/drm/i915/display/intel_pfit.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_pipe_crc.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane.c (renamed from drivers/gpu/drm/i915/display/intel_atomic_plane.c)322
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane.h (renamed from drivers/gpu/drm/i915/display/intel_atomic_plane.h)26
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane_initial.c21
-rw-r--r--drivers/gpu/drm/i915/display/intel_pmdemand.c64
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.c19
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps_regs.h15
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c764
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.h11
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr_regs.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_qp_tables.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_quirks.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_quirks.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_sbi.c92
-rw-r--r--drivers/gpu/drm/i915/display/intel_sbi.h27
-rw-r--r--drivers/gpu/drm/i915/display/intel_sbi_regs.h65
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c12
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_phy.c12
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c77
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c262
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.h75
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_vblank.c32
-rw-r--r--drivers/gpu/drm/i915/display/intel_vblank.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_vbt_defs.h22
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c26
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_vga.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.c493
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr_regs.h121
-rw-r--r--drivers/gpu/drm/i915/display/intel_wm.c9
-rw-r--r--drivers/gpu/drm/i915/display/skl_prefill.c157
-rw-r--r--drivers/gpu/drm/i915/display/skl_prefill.h46
-rw-r--r--drivers/gpu/drm/i915/display/skl_scaler.c306
-rw-r--r--drivers/gpu/drm/i915/display/skl_scaler.h30
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.c311
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane_regs.h139
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.c852
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.h36
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark_regs.h52
-rw-r--r--drivers/gpu/drm/i915/display/vlv_clock.c88
-rw-r--r--drivers/gpu/drm/i915/display/vlv_clock.h38
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c96
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi_pll.c82
-rw-r--r--drivers/gpu/drm/i915/display/vlv_sideband.c50
-rw-r--r--drivers/gpu/drm/i915/display/vlv_sideband.h156
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c13
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_create.c5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_domain.c1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c62
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c21
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h8
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.c103
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.h56
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c134
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_phys.c1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c134
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.c6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c105
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.h34
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_tiling.c5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.c8
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_wait.c22
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gemfs.c18
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c6
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c3
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c72
-rw-r--r--drivers/gpu/drm/i915/gt/gen2_engine_cs.c8
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_engine_cs.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_breadcrumbs.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_types.h1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine.h31
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_user.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.c8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gsc.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_debugfs.c7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_mcr.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.c6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_requests.c10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6.c7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_region_lmem.c26
-rw-r--r--drivers/gpu/drm/i915/gt/intel_renderstate.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c14
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset_types.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_submission.c10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c91
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sa_media.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline.h1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_tlb.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_wopcm.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c58
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_context.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_execlists.c3
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_hangcheck.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_migrate.c9
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_tlb.c6
-rw-r--r--drivers/gpu/drm/i915/gt/sysfs_engines.c1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c6
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c4
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.c15
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c23
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c12
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.c15
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.h8
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c13
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c10
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/cfg_space.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c12
-rw-r--r--drivers/gpu/drm/i915/gvt/debugfs.c12
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c13
-rw-r--r--drivers/gpu/drm/i915/gvt/display.h13
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/edid.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c41
-rw-r--r--drivers/gpu/drm/i915/gvt/interrupt.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c291
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c2
-rw-r--r--drivers/gpu/drm/i915/i915_active.c5
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c1
-rw-r--r--drivers/gpu/drm/i915/i915_config.c2
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c24
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs_params.c4
-rw-r--r--drivers/gpu/drm/i915/i915_driver.c244
-rw-r--r--drivers/gpu/drm/i915/i915_driver.h2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h92
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c6
-rw-r--r--drivers/gpu/drm/i915/i915_gem.h2
-rw-r--r--drivers/gpu/drm/i915/i915_getparam.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c114
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h1
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c267
-rw-r--r--drivers/gpu/drm/i915/i915_jiffies.h16
-rw-r--r--drivers/gpu/drm/i915/i915_list_util.h23
-rw-r--r--drivers/gpu/drm/i915/i915_mmio_range.c18
-rw-r--r--drivers/gpu/drm/i915/i915_mmio_range.h19
-rw-r--r--drivers/gpu/drm/i915/i915_module.c1
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c1
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c67
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c12
-rw-r--r--drivers/gpu/drm/i915/i915_ptr_util.h66
-rw-r--r--drivers/gpu/drm/i915/i915_query.c2
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h2987
-rw-r--r--drivers/gpu/drm/i915/i915_reg_defs.h10
-rw-r--r--drivers/gpu/drm/i915/i915_request.c9
-rw-r--r--drivers/gpu/drm/i915/i915_request.h5
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c10
-rw-r--r--drivers/gpu/drm/i915/i915_switcheroo.c11
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c10
-rw-r--r--drivers/gpu/drm/i915/i915_timer_util.c36
-rw-r--r--drivers/gpu/drm/i915/i915_timer_util.h23
-rw-r--r--drivers/gpu/drm/i915/i915_ttm_buddy_manager.c4
-rw-r--r--drivers/gpu/drm/i915/i915_utils.c31
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h251
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.c2
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c46
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h33
-rw-r--r--drivers/gpu/drm/i915/i915_wait_util.h119
-rw-r--r--drivers/gpu/drm/i915/intel_clock_gating.c43
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.c2
-rw-r--r--drivers/gpu/drm/i915/intel_gvt_mmio_table.c269
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.c1
-rw-r--r--drivers/gpu/drm/i915/intel_pcode.c32
-rw-r--r--drivers/gpu/drm/i915/intel_pcode.h15
-rw-r--r--drivers/gpu/drm/i915/intel_region_ttm.c2
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c81
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.h3
-rw-r--r--drivers/gpu/drm/i915/intel_sbi.c94
-rw-r--r--drivers/gpu/drm/i915/intel_sbi.h27
-rw-r--r--drivers/gpu/drm/i915/intel_step.c2
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c32
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.h13
-rw-r--r--drivers/gpu/drm/i915/intel_wakeref.c5
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp.c6
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c8
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.c2
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_huc.c2
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_session.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_active.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c27
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_selftest.c3
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.c5
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_memory_region.c14
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_uncore.c12
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c14
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_request.c2
-rw-r--r--drivers/gpu/drm/i915/soc/intel_dram.c184
-rw-r--r--drivers/gpu/drm/i915/soc/intel_dram.h34
-rw-r--r--drivers/gpu/drm/i915/soc/intel_gmch.c6
-rw-r--r--drivers/gpu/drm/i915/soc/intel_rom.c7
-rw-r--r--drivers/gpu/drm/i915/soc/intel_rom.h6
-rw-r--r--drivers/gpu/drm/i915/vlv_iosf_sb.c (renamed from drivers/gpu/drm/i915/vlv_sideband.c)180
-rw-r--r--drivers/gpu/drm/i915/vlv_iosf_sb.h37
-rw-r--r--drivers/gpu/drm/i915/vlv_iosf_sb_reg.h (renamed from drivers/gpu/drm/i915/vlv_sideband_reg.h)6
-rw-r--r--drivers/gpu/drm/i915/vlv_sideband.h125
-rw-r--r--drivers/gpu/drm/i915/vlv_suspend.c5
-rw-r--r--drivers/gpu/drm/imagination/Kconfig4
-rw-r--r--drivers/gpu/drm/imagination/pvr_ccb.c1
-rw-r--r--drivers/gpu/drm/imagination/pvr_device.c24
-rw-r--r--drivers/gpu/drm/imagination/pvr_device.h25
-rw-r--r--drivers/gpu/drm/imagination/pvr_drv.c23
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw.c1
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_meta.c2
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_trace.c1
-rw-r--r--drivers/gpu/drm/imagination/pvr_job.c2
-rw-r--r--drivers/gpu/drm/imagination/pvr_power.c222
-rw-r--r--drivers/gpu/drm/imagination/pvr_power.h15
-rw-r--r--drivers/gpu/drm/imagination/pvr_queue.c9
-rw-r--r--drivers/gpu/drm/imagination/pvr_queue.h2
-rw-r--r--drivers/gpu/drm/imagination/pvr_vm.c16
-rw-r--r--drivers/gpu/drm/imx/Kconfig1
-rw-r--r--drivers/gpu/drm/imx/Makefile1
-rw-r--r--drivers/gpu/drm/imx/dc/Kconfig13
-rw-r--r--drivers/gpu/drm/imx/dc/Makefile7
-rw-r--r--drivers/gpu/drm/imx/dc/dc-cf.c172
-rw-r--r--drivers/gpu/drm/imx/dc/dc-crtc.c555
-rw-r--r--drivers/gpu/drm/imx/dc/dc-de.c177
-rw-r--r--drivers/gpu/drm/imx/dc/dc-de.h59
-rw-r--r--drivers/gpu/drm/imx/dc/dc-drv.c293
-rw-r--r--drivers/gpu/drm/imx/dc/dc-drv.h102
-rw-r--r--drivers/gpu/drm/imx/dc/dc-ed.c288
-rw-r--r--drivers/gpu/drm/imx/dc/dc-fg.c376
-rw-r--r--drivers/gpu/drm/imx/dc/dc-fl.c185
-rw-r--r--drivers/gpu/drm/imx/dc/dc-fu.c258
-rw-r--r--drivers/gpu/drm/imx/dc/dc-fu.h129
-rw-r--r--drivers/gpu/drm/imx/dc/dc-fw.c222
-rw-r--r--drivers/gpu/drm/imx/dc/dc-ic.c282
-rw-r--r--drivers/gpu/drm/imx/dc/dc-kms.c143
-rw-r--r--drivers/gpu/drm/imx/dc/dc-kms.h131
-rw-r--r--drivers/gpu/drm/imx/dc/dc-lb.c325
-rw-r--r--drivers/gpu/drm/imx/dc/dc-pe.c158
-rw-r--r--drivers/gpu/drm/imx/dc/dc-pe.h101
-rw-r--r--drivers/gpu/drm/imx/dc/dc-plane.c224
-rw-r--r--drivers/gpu/drm/imx/dc/dc-tc.c141
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-plane.c5
-rw-r--r--drivers/gpu/drm/imx/ipuv3/dw_hdmi-imx.c1
-rw-r--r--drivers/gpu/drm/imx/ipuv3/imx-drm-core.c31
-rw-r--r--drivers/gpu/drm/imx/ipuv3/imx-ldb.c1
-rw-r--r--drivers/gpu/drm/imx/ipuv3/imx-tve.c18
-rw-r--r--drivers/gpu/drm/imx/ipuv3/ipuv3-plane.c4
-rw-r--r--drivers/gpu/drm/imx/ipuv3/parallel-display.c23
-rw-r--r--drivers/gpu/drm/imx/lcdc/imx-lcdc.c1
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-drm-drv.c18
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-ipu.c4
-rw-r--r--drivers/gpu/drm/kmb/kmb_drv.c1
-rw-r--r--drivers/gpu/drm/kmb/kmb_plane.c4
-rw-r--r--drivers/gpu/drm/lib/drm_random.c1
-rw-r--r--drivers/gpu/drm/lima/lima_drv.c4
-rw-r--r--drivers/gpu/drm/lima/lima_gem.c2
-rw-r--r--drivers/gpu/drm/lima/lima_sched.c14
-rw-r--r--drivers/gpu/drm/lima/lima_sched.h3
-rw-r--r--drivers/gpu/drm/lima/lima_trace.h6
-rw-r--r--drivers/gpu/drm/logicvc/logicvc_layer.c4
-rw-r--r--drivers/gpu/drm/loongson/lsdc_benchmark.c1
-rw-r--r--drivers/gpu/drm/loongson/lsdc_crtc.c1
-rw-r--r--drivers/gpu/drm/loongson/lsdc_debugfs.c1
-rw-r--r--drivers/gpu/drm/loongson/lsdc_drv.c1
-rw-r--r--drivers/gpu/drm/loongson/lsdc_gem.c32
-rw-r--r--drivers/gpu/drm/loongson/lsdc_i2c.c1
-rw-r--r--drivers/gpu/drm/loongson/lsdc_irq.c1
-rw-r--r--drivers/gpu/drm/loongson/lsdc_output_7a1000.c1
-rw-r--r--drivers/gpu/drm/loongson/lsdc_output_7a2000.c1
-rw-r--r--drivers/gpu/drm/loongson/lsdc_pixpll.c1
-rw-r--r--drivers/gpu/drm/loongson/lsdc_plane.c3
-rw-r--r--drivers/gpu/drm/loongson/lsdc_ttm.c4
-rw-r--r--drivers/gpu/drm/mcde/mcde_clk_div.c13
-rw-r--r--drivers/gpu/drm/mcde/mcde_display.c1
-rw-r--r--drivers/gpu/drm/mediatek/Kconfig23
-rw-r--r--drivers/gpu/drm/mediatek/Makefile3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_crtc.c44
-rw-r--r--drivers/gpu/drm/mediatek/mtk_crtc.h1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_ddp_comp.c34
-rw-r--r--drivers/gpu/drm/mediatek/mtk_ddp_comp.h11
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ccorr.c23
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_drv.h1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl.c7
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c12
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dp.c12
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi.c12
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c21
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c14
-rw-r--r--drivers/gpu/drm/mediatek/mtk_gem.c1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c554
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi_common.c456
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi_common.h198
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi_ddc_v2.c396
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi_regs_v2.h263
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi_v2.c1521
-rw-r--r--drivers/gpu/drm/mediatek/mtk_plane.c41
-rw-r--r--drivers/gpu/drm/mediatek/mtk_plane.h3
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_cvbs.c10
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_dsi.c10
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_hdmi.c12
-rw-r--r--drivers/gpu/drm/meson/meson_overlay.c1
-rw-r--r--drivers/gpu/drm/meson/meson_plane.c1
-rw-r--r--drivers/gpu/drm/meson/meson_vclk.c55
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ddc.c1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h8
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200.c1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200eh.c1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200eh3.c1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200eh5.c1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200er.c5
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200ev.c5
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200ew3.c1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200se.c5
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200wb.c1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c82
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_vga.c1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_vga_bmc.c1
-rw-r--r--drivers/gpu/drm/msm/Kconfig36
-rw-r--r--drivers/gpu/drm/msm/Makefile28
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx_catalog.c7
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx_gpu.c75
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx_gpu.h2
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx_gpummu.c10
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_catalog.c13
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c67
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.h2
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_catalog.c7
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c69
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.h2
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_catalog.c17
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_debugfs.c4
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c103
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.h1
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_power.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_preempt.c10
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_catalog.c523
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c473
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.h37
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c847
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.h38
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c70
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h114
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_hfi.c108
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_hfi.h17
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_preempt.c56
-rw-r--r--drivers/gpu/drm/msm/adreno/a8xx_gpu.c1201
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c99
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gen7_0_0_snapshot.h435
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gen7_2_0_snapshot.h340
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h506
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c174
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h126
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h49
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_12_0_sm8750.h494
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_12_2_glymur.h541
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h17
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h11
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h39
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h44
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_2_sm7150.h29
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h19
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h16
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h38
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h14
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h16
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_9_sm6375.h6
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h40
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h22
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h46
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h43
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h49
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h43
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_1_sar2130p.h43
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h48
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c35
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c188
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c71
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c19
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c20
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c20
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c79
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h107
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c145
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h21
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c21
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h9
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc_1_2.c5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c14
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c14
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c224
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h21
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c11
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h6
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c11
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c70
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c475
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h14
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c41
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h10
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c13
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c18
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c56
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h4
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c47
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c18
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c9
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c36
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c25
-rw-r--r--drivers/gpu/drm/msm/disp/msm_disp_snapshot.h13
-rw-r--r--drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c15
-rw-r--r--drivers/gpu/drm/msm/dp/dp_audio.c138
-rw-r--r--drivers/gpu/drm/msm/dp/dp_audio.h15
-rw-r--r--drivers/gpu/drm/msm/dp/dp_aux.c216
-rw-r--r--drivers/gpu/drm/msm/dp/dp_aux.h15
-rw-r--r--drivers/gpu/drm/msm/dp/dp_catalog.c1298
-rw-r--r--drivers/gpu/drm/msm/dp/dp_catalog.h113
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.c607
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.h19
-rw-r--r--drivers/gpu/drm/msm/dp/dp_debug.c5
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c172
-rw-r--r--drivers/gpu/drm/msm/dp/dp_drm.c3
-rw-r--r--drivers/gpu/drm/msm/dp/dp_link.c118
-rw-r--r--drivers/gpu/drm/msm/dp/dp_link.h5
-rw-r--r--drivers/gpu/drm/msm/dp/dp_panel.c334
-rw-r--r--drivers/gpu/drm/msm/dp/dp_panel.h16
-rw-r--r--drivers/gpu/drm/msm/dp/dp_reg.h19
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.c4
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.h2
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.c14
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.h1
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c75
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c61
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.h2
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c23
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c34
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c21
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c32
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c160
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c9
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h10
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_audio.c8
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_bridge.c2
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_hpd.c4
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c16
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c16
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c12
-rw-r--r--drivers/gpu/drm/msm/msm_debugfs.c136
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c382
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h87
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c51
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c13
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c599
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h300
-rw-r--r--drivers/gpu/drm/msm/msm_gem_prime.c67
-rw-r--r--drivers/gpu/drm/msm/msm_gem_shrinker.c104
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c358
-rw-r--r--drivers/gpu/drm/msm/msm_gem_vma.c1609
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c250
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h173
-rw-r--r--drivers/gpu/drm/msm/msm_gpu_trace.h26
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c339
-rw-r--r--drivers/gpu/drm/msm/msm_kms.c81
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h48
-rw-r--r--drivers/gpu/drm/msm/msm_mdss.c338
-rw-r--r--drivers/gpu/drm/msm/msm_mdss.h28
-rw-r--r--drivers/gpu/drm/msm/msm_mmu.h40
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c62
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c10
-rw-r--r--drivers/gpu/drm/msm/msm_submitqueue.c100
-rw-r--r--drivers/gpu/drm/msm/msm_syncobj.c172
-rw-r--r--drivers/gpu/drm/msm/msm_syncobj.h37
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a6xx.xml6143
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a6xx_descriptors.xml158
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a6xx_enums.xml429
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a6xx_gmu.xml278
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a6xx_perfcntrs.xml600
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a7xx_enums.xml216
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a7xx_perfcntrs.xml1030
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a8xx_descriptors.xml121
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a8xx_enums.xml299
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/adreno_common.xml12
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml697
-rw-r--r--drivers/gpu/drm/msm/registers/display/dsi.xml28
-rw-r--r--drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml25
-rw-r--r--drivers/gpu/drm/msm/registers/gen_header.py212
-rw-r--r--drivers/gpu/drm/mxsfb/lcdif_kms.c5
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.c9
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_kms.c1
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig9
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.h1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.c28
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndwca7e.c33
-rw-r--r--drivers/gpu/drm/nouveau/gv100_fence.c7
-rw-r--r--drivers/gpu/drm/nouveau/include/nvfw/hs.h4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/class/clc36f.h85
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c22
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.c311
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_exec.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c92
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_platform.c25
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sched.c19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_uvmm.c113
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_uvmm.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvif/chan.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvif/vmm.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/enum.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga100.c23
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga102.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/fw.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/gm200.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a_devfreq.c320
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a_devfreq.h24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gp10b.c185
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gp10b.h18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb100.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb202.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gh100.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb202.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gh100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rpc.c21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/vmm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c69
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp10b.c4
-rw-r--r--drivers/gpu/drm/nova/Kconfig2
-rw-r--r--drivers/gpu/drm/nova/driver.rs8
-rw-r--r--drivers/gpu/drm/nova/file.rs25
-rw-r--r--drivers/gpu/drm/nova/gem.rs10
-rw-r--r--drivers/gpu/drm/nova/nova.rs3
-rw-r--r--drivers/gpu/drm/nova/uapi.rs61
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dpi.c7
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c7
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4.c26
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5.c26
-rw-r--r--drivers/gpu/drm/omapdrm/dss/sdi.c25
-rw-r--r--drivers/gpu/drm/omapdrm/dss/venc.c23
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_debugfs.c1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c7
-rw-r--r--drivers/gpu/drm/omapdrm/omap_encoder.c4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c28
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.h5
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c17
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c23
-rw-r--r--drivers/gpu/drm/omapdrm/omap_irq.c1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_overlay.c1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.c3
-rw-r--r--drivers/gpu/drm/panel/Kconfig121
-rw-r--r--drivers/gpu/drm/panel/Makefile9
-rw-r--r--drivers/gpu/drm/panel/panel-boe-himax8279d.c11
-rw-r--r--drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c10
-rw-r--r--drivers/gpu/drm/panel/panel-edp.c73
-rw-r--r--drivers/gpu/drm/panel/panel-elida-kd35t133.c10
-rw-r--r--drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c11
-rw-r--r--drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c10
-rw-r--r--drivers/gpu/drm/panel/panel-himax-hx8279.c2
-rw-r--r--drivers/gpu/drm/panel/panel-himax-hx83102.c10
-rw-r--r--drivers/gpu/drm/panel/panel-himax-hx83112a.c10
-rw-r--r--drivers/gpu/drm/panel/panel-himax-hx83112b.c430
-rw-r--r--drivers/gpu/drm/panel/panel-himax-hx8394.c153
-rw-r--r--drivers/gpu/drm/panel/panel-hydis-hv101hd1.c188
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9322.c10
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9341.c11
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9805.c12
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9806e.c10
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9881c.c1765
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9882t.c79
-rw-r--r--drivers/gpu/drm/panel/panel-innolux-ej030na.c11
-rw-r--r--drivers/gpu/drm/panel/panel-innolux-p079zca.c11
-rw-r--r--drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c31
-rw-r--r--drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c10
-rw-r--r--drivers/gpu/drm/panel/panel-jdi-lpm102a188a.c204
-rw-r--r--drivers/gpu/drm/panel/panel-jdi-lt070me05000.c11
-rw-r--r--drivers/gpu/drm/panel/panel-khadas-ts050.c13
-rw-r--r--drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c14
-rw-r--r--drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c11
-rw-r--r--drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c11
-rw-r--r--drivers/gpu/drm/panel/panel-lg-lb035q02.c10
-rw-r--r--drivers/gpu/drm/panel/panel-lg-ld070wx3.c184
-rw-r--r--drivers/gpu/drm/panel/panel-lg-lg4573.c11
-rw-r--r--drivers/gpu/drm/panel/panel-lg-sw43408.c10
-rw-r--r--drivers/gpu/drm/panel/panel-lincolntech-lcd197.c11
-rw-r--r--drivers/gpu/drm/panel/panel-lvds.c14
-rw-r--r--drivers/gpu/drm/panel/panel-magnachip-d53e6ea8966.c11
-rw-r--r--drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c11
-rw-r--r--drivers/gpu/drm/panel/panel-nec-nl8048hl11.c10
-rw-r--r--drivers/gpu/drm/panel/panel-newvision-nv3051d.c11
-rw-r--r--drivers/gpu/drm/panel/panel-newvision-nv3052c.c418
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt35510.c12
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt35560.c210
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt35950.c10
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt36523.c813
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt36672a.c10
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt36672e.c10
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt39016.c10
-rw-r--r--drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c11
-rw-r--r--drivers/gpu/drm/panel/panel-orisetech-ota5601a.c18
-rw-r--r--drivers/gpu/drm/panel/panel-orisetech-otm8009a.c11
-rw-r--r--drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c12
-rw-r--r--drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c12
-rw-r--r--drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c12
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm67191.c10
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm67200.c50
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm68200.c11
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm692e5.c10
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm69380.c10
-rw-r--r--drivers/gpu/drm/panel/panel-renesas-r61307.c325
-rw-r--r--drivers/gpu/drm/panel/panel-renesas-r69328.c281
-rw-r--r--drivers/gpu/drm/panel/panel-ronbo-rb070d30.c19
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-ams581vf01.c10
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-ams639rq08.c10
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-atna33xc20.c11
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-db7430.c11
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-ld9040.c11
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6d16d0.c11
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6d27a1.c11
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c12
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e3fa7.c10
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e3fc2x01.c385
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c10
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e3ha8.c10
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c10
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e63m0.c1
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams427ap24.c12
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c11
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c12
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e8aa5x01-ams561ra01.c981
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-sofef00.c114
-rw-r--r--drivers/gpu/drm/panel/panel-seiko-43wvf1g.c11
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-lq079l1sx01.c225
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c11
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c10
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-ls060t1sx01.c11
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c312
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7701.c132
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7703.c13
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7789v.c17
-rw-r--r--drivers/gpu/drm/panel/panel-sony-acx565akm.c10
-rw-r--r--drivers/gpu/drm/panel/panel-sony-td4353-jdi.c11
-rw-r--r--drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c11
-rw-r--r--drivers/gpu/drm/panel/panel-summit.c12
-rw-r--r--drivers/gpu/drm/panel/panel-synaptics-r63353.c11
-rw-r--r--drivers/gpu/drm/panel/panel-synaptics-tddi.c277
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-td028ttec1.c11
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-td043mtea1.c10
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-tpg110.c11
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-r66451.c9
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-rm69299.c326
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-rm692e5.c10
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-vtdr6130.c11
-rw-r--r--drivers/gpu/drm/panel/panel-widechips-ws2401.c11
-rw-r--r--drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c10
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_devfreq.c10
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.c73
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.h39
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c377
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_dump.c8
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.c193
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.h66
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c4
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gpu.c66
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.c348
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.h38
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.c115
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.h3
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_perfcnt.c30
-rw-r--r--drivers/gpu/drm/panthor/Makefile2
-rw-r--r--drivers/gpu/drm/panthor/panthor_devfreq.c64
-rw-r--r--drivers/gpu/drm/panthor/panthor_devfreq.h2
-rw-r--r--drivers/gpu/drm/panthor/panthor_device.c48
-rw-r--r--drivers/gpu/drm/panthor/panthor_device.h114
-rw-r--r--drivers/gpu/drm/panthor/panthor_drv.c103
-rw-r--r--drivers/gpu/drm/panthor/panthor_fw.c143
-rw-r--r--drivers/gpu/drm/panthor/panthor_fw.h32
-rw-r--r--drivers/gpu/drm/panthor/panthor_gem.c73
-rw-r--r--drivers/gpu/drm/panthor/panthor_gem.h15
-rw-r--r--drivers/gpu/drm/panthor/panthor_gpu.c282
-rw-r--r--drivers/gpu/drm/panthor/panthor_gpu.h13
-rw-r--r--drivers/gpu/drm/panthor/panthor_heap.c1
-rw-r--r--drivers/gpu/drm/panthor/panthor_hw.c224
-rw-r--r--drivers/gpu/drm/panthor/panthor_hw.h56
-rw-r--r--drivers/gpu/drm/panthor/panthor_mmu.c273
-rw-r--r--drivers/gpu/drm/panthor/panthor_mmu.h1
-rw-r--r--drivers/gpu/drm/panthor/panthor_pwr.c549
-rw-r--r--drivers/gpu/drm/panthor/panthor_pwr.h23
-rw-r--r--drivers/gpu/drm/panthor/panthor_regs.h184
-rw-r--r--drivers/gpu/drm/panthor/panthor_sched.c416
-rw-r--r--drivers/gpu/drm/panthor/panthor_sched.h6
-rw-r--r--drivers/gpu/drm/pl111/pl111_display.c14
-rw-r--r--drivers/gpu/drm/qxl/qxl_cmd.c1
-rw-r--r--drivers/gpu/drm/qxl/qxl_debugfs.c1
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c33
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c1
-rw-r--r--drivers/gpu/drm/qxl/qxl_gem.c3
-rw-r--r--drivers/gpu/drm/qxl/qxl_image.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_irq.c1
-rw-r--r--drivers/gpu/drm/qxl/qxl_kms.c1
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c3
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c2
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c14
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c589
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/r100.c215
-rw-r--r--drivers/gpu/drm/radeon/r200.c34
-rw-r--r--drivers/gpu/drm/radeon/r300.c66
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c449
-rw-r--r--drivers/gpu/drm/radeon/radeon.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_acpi.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c27
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c116
-rw-r--r--drivers/gpu/drm/radeon/radeon_fbdev.c27
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_vce.c6
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_du_crtc.c3
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_du_drv.c1
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c10
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_du_plane.h2
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c12
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c288
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi_regs.h342
-rw-r--r--drivers/gpu/drm/renesas/rz-du/Kconfig2
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c12
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_encoder.c44
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c3
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c363
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi_regs.h56
-rw-r--r--drivers/gpu/drm/renesas/shmobile/shmob_drm_kms.c3
-rw-r--r--drivers/gpu/drm/rockchip/Kconfig10
-rw-r--r--drivers/gpu/drm/rockchip/Makefile1
-rw-r--r--drivers/gpu/drm/rockchip/analogix_dp-rockchip.c42
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.c294
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.h8
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-reg.c2
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c163
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi2-rockchip.c21
-rw-r--r--drivers/gpu/drm/rockchip/dw_dp-rockchip.c150
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c96
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c263
-rw-r--r--drivers/gpu/drm/rockchip/inno_hdmi.c460
-rw-r--r--drivers/gpu/drm/rockchip/inno_hdmi.h349
-rw-r--r--drivers/gpu/drm/rockchip/rk3066_hdmi.c3
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c5
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.h1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.c20
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c13
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c7
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop2.c180
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop2.h34
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_lvds.c69
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_lvds.h21
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_rgb.c1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop2_reg.c153
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.c1
-rw-r--r--drivers/gpu/drm/scheduler/gpu_scheduler_trace.h103
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c106
-rw-r--r--drivers/gpu/drm/scheduler/sched_fence.c6
-rw-r--r--drivers/gpu/drm/scheduler/sched_internal.h2
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c289
-rw-r--r--drivers/gpu/drm/scheduler/tests/mock_scheduler.c109
-rw-r--r--drivers/gpu/drm/scheduler/tests/sched_tests.h12
-rw-r--r--drivers/gpu/drm/scheduler/tests/tests_basic.c93
-rw-r--r--drivers/gpu/drm/sitronix/Kconfig11
-rw-r--r--drivers/gpu/drm/sitronix/st7571-i2c.c139
-rw-r--r--drivers/gpu/drm/sitronix/st7586.c1
-rw-r--r--drivers/gpu/drm/sitronix/st7735r.c1
-rw-r--r--drivers/gpu/drm/solomon/ssd130x-spi.c3
-rw-r--r--drivers/gpu/drm/solomon/ssd130x.c89
-rw-r--r--drivers/gpu/drm/sti/sti_cursor.c1
-rw-r--r--drivers/gpu/drm/sti/sti_drv.c19
-rw-r--r--drivers/gpu/drm/sti/sti_dvo.c29
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.c1
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c32
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c28
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.h2
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.c3
-rw-r--r--drivers/gpu/drm/sti/sti_plane.c1
-rw-r--r--drivers/gpu/drm/sti/sti_vtg.c7
-rw-r--r--drivers/gpu/drm/stm/drv.c13
-rw-r--r--drivers/gpu/drm/stm/dw_mipi_dsi-stm.c14
-rw-r--r--drivers/gpu/drm/stm/ltdc.c198
-rw-r--r--drivers/gpu/drm/stm/ltdc.h6
-rw-r--r--drivers/gpu/drm/stm/lvds.c19
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.c1
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c1
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_frontend.c1
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c12
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon_dclk.c18
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_csc.c113
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_csc.h16
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.c378
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.h86
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_ui_layer.c182
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_ui_layer.h7
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_ui_scaler.c44
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_ui_scaler.h4
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_layer.c246
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_layer.h7
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_scaler.c49
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_scaler.h6
-rw-r--r--drivers/gpu/drm/sysfb/drm_sysfb_helper.h40
-rw-r--r--drivers/gpu/drm/sysfb/drm_sysfb_modeset.c314
-rw-r--r--drivers/gpu/drm/sysfb/drm_sysfb_screen_info.c21
-rw-r--r--drivers/gpu/drm/sysfb/efidrm.c7
-rw-r--r--drivers/gpu/drm/sysfb/ofdrm.c86
-rw-r--r--drivers/gpu/drm/sysfb/simpledrm.c22
-rw-r--r--drivers/gpu/drm/sysfb/vesadrm.c252
-rw-r--r--drivers/gpu/drm/tegra/Makefile1
-rw-r--r--drivers/gpu/drm/tegra/dc.c4
-rw-r--r--drivers/gpu/drm/tegra/drm.c3
-rw-r--r--drivers/gpu/drm/tegra/drm.h3
-rw-r--r--drivers/gpu/drm/tegra/dsi.c65
-rw-r--r--drivers/gpu/drm/tegra/fb.c8
-rw-r--r--drivers/gpu/drm/tegra/fbdev.c15
-rw-r--r--drivers/gpu/drm/tegra/gem.c10
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c5
-rw-r--r--drivers/gpu/drm/tegra/hub.c1
-rw-r--r--drivers/gpu/drm/tegra/nvdec.c6
-rw-r--r--drivers/gpu/drm/tegra/nvjpg.c330
-rw-r--r--drivers/gpu/drm/tegra/sor.c5
-rw-r--r--drivers/gpu/drm/tegra/uapi.c7
-rw-r--r--drivers/gpu/drm/tests/.kunitconfig2
-rw-r--r--drivers/gpu/drm/tests/Makefile4
-rw-r--r--drivers/gpu/drm/tests/drm_bridge_test.c176
-rw-r--r--drivers/gpu/drm/tests/drm_buddy_test.c105
-rw-r--r--drivers/gpu/drm/tests/drm_exec_test.c22
-rw-r--r--drivers/gpu/drm/tests/drm_fixp_test.c71
-rw-r--r--drivers/gpu/drm/tests/drm_format_helper_test.c269
-rw-r--r--drivers/gpu/drm/tests/drm_framebuffer_test.c1
-rw-r--r--drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c651
-rw-r--r--drivers/gpu/drm/tests/drm_kunit_edid.h374
-rw-r--r--drivers/gpu/drm/tests/drm_kunit_helpers.c1
-rw-r--r--drivers/gpu/drm/tests/drm_mm_test.c1
-rw-r--r--drivers/gpu/drm/tests/drm_sysfb_modeset_test.c168
-rw-r--r--drivers/gpu/drm/tidss/Makefile3
-rw-r--r--drivers/gpu/drm/tidss/tidss_crtc.c53
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc.c707
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc.h29
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc_regs.h105
-rw-r--r--drivers/gpu/drm/tidss/tidss_drv.c35
-rw-r--r--drivers/gpu/drm/tidss/tidss_drv.h9
-rw-r--r--drivers/gpu/drm/tidss/tidss_encoder.c10
-rw-r--r--drivers/gpu/drm/tidss/tidss_kms.c8
-rw-r--r--drivers/gpu/drm/tidss/tidss_oldi.c619
-rw-r--r--drivers/gpu/drm/tidss/tidss_oldi.h43
-rw-r--r--drivers/gpu/drm/tidss/tidss_plane.c10
-rw-r--r--drivers/gpu/drm/tidss/tidss_plane.h2
-rw-r--r--drivers/gpu/drm/tidss/tidss_scale_coefs.h2
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_crtc.c9
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_plane.c3
-rw-r--r--drivers/gpu/drm/tiny/Kconfig16
-rw-r--r--drivers/gpu/drm/tiny/Makefile1
-rw-r--r--drivers/gpu/drm/tiny/bochs.c32
-rw-r--r--drivers/gpu/drm/tiny/cirrus-qemu.c12
-rw-r--r--drivers/gpu/drm/tiny/gm12u320.c1
-rw-r--r--drivers/gpu/drm/tiny/hx8357d.c1
-rw-r--r--drivers/gpu/drm/tiny/ili9163.c1
-rw-r--r--drivers/gpu/drm/tiny/ili9225.c1
-rw-r--r--drivers/gpu/drm/tiny/ili9341.c1
-rw-r--r--drivers/gpu/drm/tiny/ili9486.c1
-rw-r--r--drivers/gpu/drm/tiny/mi0283qt.c1
-rw-r--r--drivers/gpu/drm/tiny/panel-mipi-dbi.c1
-rw-r--r--drivers/gpu/drm/tiny/pixpaper.c1166
-rw-r--r--drivers/gpu/drm/tiny/repaper.c17
-rw-r--r--drivers/gpu/drm/tiny/sharp-memory.c27
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_bo_test.c28
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c133
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_device_test.c33
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c25
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.h7
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_mock_manager.c4
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_pool_test.c24
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_resource_test.c5
-rw-r--r--drivers/gpu/drm/ttm/ttm_agp_backend.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_backup.c11
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c108
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_internal.h60
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c281
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_device.c33
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_module.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool.c72
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool_internal.h25
-rw-r--r--drivers/gpu/drm/ttm/ttm_range_manager.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_resource.c39
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c12
-rw-r--r--drivers/gpu/drm/tve200/tve200_display.c1
-rw-r--r--drivers/gpu/drm/tyr/Kconfig19
-rw-r--r--drivers/gpu/drm/tyr/Makefile3
-rw-r--r--drivers/gpu/drm/tyr/driver.rs205
-rw-r--r--drivers/gpu/drm/tyr/file.rs56
-rw-r--r--drivers/gpu/drm/tyr/gem.rs18
-rw-r--r--drivers/gpu/drm/tyr/gpu.rs219
-rw-r--r--drivers/gpu/drm/tyr/regs.rs108
-rw-r--r--drivers/gpu/drm/tyr/tyr.rs22
-rw-r--r--drivers/gpu/drm/udl/udl_edid.c1
-rw-r--r--drivers/gpu/drm/v3d/v3d_bo.c3
-rw-r--r--drivers/gpu/drm/v3d/v3d_debugfs.c1
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.c26
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.h43
-rw-r--r--drivers/gpu/drm/v3d/v3d_fence.c11
-rw-r--r--drivers/gpu/drm/v3d/v3d_gem.c14
-rw-r--r--drivers/gpu/drm/v3d/v3d_gemfs.c18
-rw-r--r--drivers/gpu/drm/v3d/v3d_irq.c107
-rw-r--r--drivers/gpu/drm/v3d/v3d_sched.c105
-rw-r--r--drivers/gpu/drm/v3d/v3d_submit.c5
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_irq.c1
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_main.c1
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_mode.c9
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_ttm.c1
-rw-r--r--drivers/gpu/drm/vc4/Kconfig1
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_debugfs.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_dpi.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_dsi.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c146
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.h1
-rw-r--r--drivers/gpu/drm/vc4/vc4_hvs.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_irq.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c4
-rw-r--r--drivers/gpu/drm/vc4/vc4_perfmon.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c7
-rw-r--r--drivers/gpu/drm/vc4/vc4_render_cl.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_txp.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_v3d.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate_shaders.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_vec.c1
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c30
-rw-r--r--drivers/gpu/drm/vgem/vgem_fence.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_debugfs.c1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c44
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c9
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_kms.c21
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_object.c6
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c3
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_prime.c5
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c3
-rw-r--r--drivers/gpu/drm/vkms/Kconfig1
-rw-r--r--drivers/gpu/drm/vkms/Makefile5
-rw-r--r--drivers/gpu/drm/vkms/tests/Makefile7
-rw-r--r--drivers/gpu/drm/vkms/tests/vkms_color_test.c414
-rw-r--r--drivers/gpu/drm/vkms/tests/vkms_config_test.c122
-rw-r--r--drivers/gpu/drm/vkms/tests/vkms_format_test.c279
-rw-r--r--drivers/gpu/drm/vkms/vkms_colorop.c120
-rw-r--r--drivers/gpu/drm/vkms/vkms_composer.c136
-rw-r--r--drivers/gpu/drm/vkms/vkms_composer.h28
-rw-r--r--drivers/gpu/drm/vkms/vkms_config.c15
-rw-r--r--drivers/gpu/drm/vkms/vkms_config.h54
-rw-r--r--drivers/gpu/drm/vkms/vkms_configfs.c843
-rw-r--r--drivers/gpu/drm/vkms/vkms_configfs.h8
-rw-r--r--drivers/gpu/drm/vkms/vkms_connector.c35
-rw-r--r--drivers/gpu/drm/vkms/vkms_connector.h9
-rw-r--r--drivers/gpu/drm/vkms/vkms_crtc.c90
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.c55
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.h73
-rw-r--r--drivers/gpu/drm/vkms/vkms_formats.c644
-rw-r--r--drivers/gpu/drm/vkms/vkms_formats.h9
-rw-r--r--drivers/gpu/drm/vkms/vkms_luts.c811
-rw-r--r--drivers/gpu/drm/vkms/vkms_luts.h12
-rw-r--r--drivers/gpu/drm/vkms/vkms_output.c20
-rw-r--r--drivers/gpu/drm/vkms/vkms_plane.c52
-rw-r--r--drivers/gpu/drm/vkms/vkms_writeback.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.c16
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.h1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c28
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h20
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c51
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c510
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.h21
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gem.c13
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c61
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c18
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c21
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_vkms.c6
-rw-r--r--drivers/gpu/drm/xe/Kconfig26
-rw-r--r--drivers/gpu/drm/xe/Kconfig.debug28
-rw-r--r--drivers/gpu/drm/xe/Makefile60
-rw-r--r--drivers/gpu/drm/xe/abi/guc_actions_abi.h38
-rw-r--r--drivers/gpu/drm/xe/abi/guc_actions_slpc_abi.h5
-rw-r--r--drivers/gpu/drm/xe/abi/guc_errors_abi.h17
-rw-r--r--drivers/gpu/drm/xe/abi/guc_klvs_abi.h55
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h4
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h112
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h26
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_scheduler_types.h13
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_utils.h9
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_vma.h2
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/intel_pcode.h31
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h37
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/vlv_iosf_sb.h42
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/vlv_iosf_sb_reg.h (renamed from drivers/gpu/drm/xe/compat-i915-headers/vlv_sideband_reg.h)2
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/vlv_sideband.h132
-rw-r--r--drivers/gpu/drm/xe/display/ext/i915_utils.c26
-rw-r--r--drivers/gpu/drm/xe/display/intel_bo.c60
-rw-r--r--drivers/gpu/drm/xe/display/intel_fb_bo.c3
-rw-r--r--drivers/gpu/drm/xe/display/intel_fbdev_fb.c83
-rw-r--r--drivers/gpu/drm/xe/display/xe_display.c153
-rw-r--r--drivers/gpu/drm/xe/display/xe_display.h8
-rw-r--r--drivers/gpu/drm/xe/display/xe_display_rpm.c63
-rw-r--r--drivers/gpu/drm/xe/display/xe_display_rpm.h11
-rw-r--r--drivers/gpu/drm/xe/display/xe_display_wa.c5
-rw-r--r--drivers/gpu/drm/xe/display/xe_dsb_buffer.c21
-rw-r--r--drivers/gpu/drm/xe/display/xe_fb_pin.c153
-rw-r--r--drivers/gpu/drm/xe/display/xe_hdcp_gsc.c10
-rw-r--r--drivers/gpu/drm/xe/display/xe_panic.c102
-rw-r--r--drivers/gpu/drm/xe/display/xe_plane_initial.c28
-rw-r--r--drivers/gpu/drm/xe/display/xe_stolen.c123
-rw-r--r--drivers/gpu/drm/xe/display/xe_tdf.c4
-rw-r--r--drivers/gpu/drm/xe/instructions/xe_gpu_commands.h6
-rw-r--r--drivers/gpu/drm/xe/instructions/xe_mi_commands.h1
-rw-r--r--drivers/gpu/drm/xe/regs/xe_bars.h1
-rw-r--r--drivers/gpu/drm/xe/regs/xe_engine_regs.h7
-rw-r--r--drivers/gpu/drm/xe/regs/xe_gsc_regs.h6
-rw-r--r--drivers/gpu/drm/xe/regs/xe_gt_regs.h35
-rw-r--r--drivers/gpu/drm/xe/regs/xe_hw_error_regs.h20
-rw-r--r--drivers/gpu/drm/xe/regs/xe_i2c_regs.h23
-rw-r--r--drivers/gpu/drm/xe/regs/xe_irq_regs.h10
-rw-r--r--drivers/gpu/drm/xe/regs/xe_lrc_layout.h4
-rw-r--r--drivers/gpu/drm/xe/regs/xe_mchbar_regs.h1
-rw-r--r--drivers/gpu/drm/xe/regs/xe_oa_regs.h3
-rw-r--r--drivers/gpu/drm/xe/regs/xe_pcode_regs.h2
-rw-r--r--drivers/gpu/drm/xe/regs/xe_pmt.h18
-rw-r--r--drivers/gpu/drm/xe/regs/xe_regs.h4
-rw-r--r--drivers/gpu/drm/xe/tests/xe_bo.c42
-rw-r--r--drivers/gpu/drm/xe/tests/xe_dma_buf.c56
-rw-r--r--drivers/gpu/drm/xe/tests/xe_gt_sriov_pf_config_kunit.c208
-rw-r--r--drivers/gpu/drm/xe/tests/xe_gt_sriov_pf_service_test.c232
-rw-r--r--drivers/gpu/drm/xe/tests/xe_guc_buf_kunit.c13
-rw-r--r--drivers/gpu/drm/xe/tests/xe_guc_g2g_test.c776
-rw-r--r--drivers/gpu/drm/xe/tests/xe_live_test_mod.c2
-rw-r--r--drivers/gpu/drm/xe/tests/xe_migrate.c118
-rw-r--r--drivers/gpu/drm/xe/tests/xe_mocs.c2
-rw-r--r--drivers/gpu/drm/xe/tests/xe_pci.c329
-rw-r--r--drivers/gpu/drm/xe/tests/xe_pci_test.c34
-rw-r--r--drivers/gpu/drm/xe/tests/xe_pci_test.h21
-rw-r--r--drivers/gpu/drm/xe/tests/xe_rtp_test.c6
-rw-r--r--drivers/gpu/drm/xe/tests/xe_sriov_pf_service_kunit.c227
-rw-r--r--drivers/gpu/drm/xe/tests/xe_wa_test.c90
-rw-r--r--drivers/gpu/drm/xe/xe_assert.h4
-rw-r--r--drivers/gpu/drm/xe/xe_bb.c37
-rw-r--r--drivers/gpu/drm/xe/xe_bb.h5
-rw-r--r--drivers/gpu/drm/xe/xe_bo.c1123
-rw-r--r--drivers/gpu/drm/xe/xe_bo.h106
-rw-r--r--drivers/gpu/drm/xe/xe_bo_doc.h8
-rw-r--r--drivers/gpu/drm/xe/xe_bo_evict.c25
-rw-r--r--drivers/gpu/drm/xe/xe_bo_types.h29
-rw-r--r--drivers/gpu/drm/xe/xe_configfs.c1153
-rw-r--r--drivers/gpu/drm/xe/xe_configfs.h33
-rw-r--r--drivers/gpu/drm/xe/xe_debugfs.c202
-rw-r--r--drivers/gpu/drm/xe/xe_dep_job_types.h29
-rw-r--r--drivers/gpu/drm/xe/xe_dep_scheduler.c143
-rw-r--r--drivers/gpu/drm/xe/xe_dep_scheduler.h21
-rw-r--r--drivers/gpu/drm/xe/xe_devcoredump.c54
-rw-r--r--drivers/gpu/drm/xe/xe_device.c379
-rw-r--r--drivers/gpu/drm/xe/xe_device.h56
-rw-r--r--drivers/gpu/drm/xe/xe_device_sysfs.c162
-rw-r--r--drivers/gpu/drm/xe/xe_device_types.h242
-rw-r--r--drivers/gpu/drm/xe/xe_device_wa_oob.rules5
-rw-r--r--drivers/gpu/drm/xe/xe_dma_buf.c121
-rw-r--r--drivers/gpu/drm/xe/xe_drm_client.c2
-rw-r--r--drivers/gpu/drm/xe/xe_drv.h2
-rw-r--r--drivers/gpu/drm/xe/xe_eu_stall.c51
-rw-r--r--drivers/gpu/drm/xe/xe_exec.c69
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue.c313
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue.h29
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue_types.h43
-rw-r--r--drivers/gpu/drm/xe/xe_execlist.c27
-rw-r--r--drivers/gpu/drm/xe/xe_execlist_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_force_wake.c2
-rw-r--r--drivers/gpu/drm/xe/xe_force_wake_types.h26
-rw-r--r--drivers/gpu/drm/xe/xe_gen_wa_oob.c53
-rw-r--r--drivers/gpu/drm/xe/xe_ggtt.c455
-rw-r--r--drivers/gpu/drm/xe/xe_ggtt.h32
-rw-r--r--drivers/gpu/drm/xe/xe_ggtt_types.h6
-rw-r--r--drivers/gpu/drm/xe/xe_gpu_scheduler.c14
-rw-r--r--drivers/gpu/drm/xe/xe_gpu_scheduler.h29
-rw-r--r--drivers/gpu/drm/xe/xe_gsc.c20
-rw-r--r--drivers/gpu/drm/xe/xe_gsc_proxy.c3
-rw-r--r--drivers/gpu/drm/xe/xe_gt.c392
-rw-r--r--drivers/gpu/drm/xe/xe_gt.h27
-rw-r--r--drivers/gpu/drm/xe/xe_gt_clock.c26
-rw-r--r--drivers/gpu/drm/xe/xe_gt_debugfs.c304
-rw-r--r--drivers/gpu/drm/xe/xe_gt_debugfs.h1
-rw-r--r--drivers/gpu/drm/xe/xe_gt_freq.c65
-rw-r--r--drivers/gpu/drm/xe/xe_gt_idle.c31
-rw-r--r--drivers/gpu/drm/xe/xe_gt_idle.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_mcr.c127
-rw-r--r--drivers/gpu/drm/xe/xe_gt_mcr.h3
-rw-r--r--drivers/gpu/drm/xe/xe_gt_pagefault.c713
-rw-r--r--drivers/gpu/drm/xe/xe_gt_pagefault.h19
-rw-r--r--drivers/gpu/drm/xe/xe_gt_printk.h32
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf.c132
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf.h6
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c538
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h16
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c757
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_control.h12
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h36
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c470
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.h1
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c1022
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h48
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_migration_types.h34
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c189
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_service.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h5
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_printk.h7
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_vf.c719
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_vf.h13
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h63
-rw-r--r--drivers/gpu/drm/xe/xe_gt_stats.c57
-rw-r--r--drivers/gpu/drm/xe/xe_gt_stats.h1
-rw-r--r--drivers/gpu/drm/xe/xe_gt_stats_types.h33
-rw-r--r--drivers/gpu/drm/xe/xe_gt_throttle.c355
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c578
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h42
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation_types.h32
-rw-r--r--drivers/gpu/drm/xe/xe_gt_topology.c115
-rw-r--r--drivers/gpu/drm/xe/xe_gt_topology.h10
-rw-r--r--drivers/gpu/drm/xe/xe_gt_types.h114
-rw-r--r--drivers/gpu/drm/xe/xe_guard.h119
-rw-r--r--drivers/gpu/drm/xe/xe_guc.c559
-rw-r--r--drivers/gpu/drm/xe/xe_guc.h7
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ads.c169
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ads_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_buf.c63
-rw-r--r--drivers/gpu/drm/xe/xe_guc_buf.h2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_capture.c35
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct.c497
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct.h18
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct_types.h17
-rw-r--r--drivers/gpu/drm/xe/xe_guc_engine_activity.c15
-rw-r--r--drivers/gpu/drm/xe/xe_guc_exec_queue_types.h21
-rw-r--r--drivers/gpu/drm/xe/xe_guc_fwif.h38
-rw-r--r--drivers/gpu/drm/xe/xe_guc_log.c2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_log.h4
-rw-r--r--drivers/gpu/drm/xe/xe_guc_log_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pagefault.c95
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pagefault.h15
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc.c485
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc.h4
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc_types.h8
-rw-r--r--drivers/gpu/drm/xe/xe_guc_relay.c17
-rw-r--r--drivers/gpu/drm/xe/xe_guc_relay_types.h4
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.c786
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.h9
-rw-r--r--drivers/gpu/drm/xe/xe_guc_tlb_inval.c242
-rw-r--r--drivers/gpu/drm/xe/xe_guc_tlb_inval.h19
-rw-r--r--drivers/gpu/drm/xe/xe_guc_types.h6
-rw-r--r--drivers/gpu/drm/xe/xe_heci_gsc.c9
-rw-r--r--drivers/gpu/drm/xe/xe_hmm.c325
-rw-r--r--drivers/gpu/drm/xe/xe_hmm.h18
-rw-r--r--drivers/gpu/drm/xe/xe_huc.c12
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine.c102
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine_group.c57
-rw-r--r--drivers/gpu/drm/xe/xe_hw_error.c182
-rw-r--r--drivers/gpu/drm/xe/xe_hw_error.h15
-rw-r--r--drivers/gpu/drm/xe/xe_hw_fence.c5
-rw-r--r--drivers/gpu/drm/xe/xe_hwmon.c352
-rw-r--r--drivers/gpu/drm/xe/xe_i2c.c372
-rw-r--r--drivers/gpu/drm/xe/xe_i2c.h68
-rw-r--r--drivers/gpu/drm/xe/xe_irq.c167
-rw-r--r--drivers/gpu/drm/xe/xe_late_bind_fw.c464
-rw-r--r--drivers/gpu/drm/xe/xe_late_bind_fw.h17
-rw-r--r--drivers/gpu/drm/xe/xe_late_bind_fw_types.h75
-rw-r--r--drivers/gpu/drm/xe/xe_lmtt.c93
-rw-r--r--drivers/gpu/drm/xe/xe_lmtt.h1
-rw-r--r--drivers/gpu/drm/xe/xe_lrc.c559
-rw-r--r--drivers/gpu/drm/xe/xe_lrc.h27
-rw-r--r--drivers/gpu/drm/xe/xe_lrc_types.h8
-rw-r--r--drivers/gpu/drm/xe/xe_map.h4
-rw-r--r--drivers/gpu/drm/xe/xe_memirq.c57
-rw-r--r--drivers/gpu/drm/xe/xe_memirq.h2
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.c896
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.h45
-rw-r--r--drivers/gpu/drm/xe/xe_migrate_doc.h2
-rw-r--r--drivers/gpu/drm/xe/xe_mmio.c66
-rw-r--r--drivers/gpu/drm/xe/xe_mmio.h4
-rw-r--r--drivers/gpu/drm/xe/xe_mmio_gem.c226
-rw-r--r--drivers/gpu/drm/xe/xe_mmio_gem.h20
-rw-r--r--drivers/gpu/drm/xe/xe_mocs.c42
-rw-r--r--drivers/gpu/drm/xe/xe_mocs.h8
-rw-r--r--drivers/gpu/drm/xe/xe_module.c77
-rw-r--r--drivers/gpu/drm/xe/xe_nvm.c170
-rw-r--r--drivers/gpu/drm/xe/xe_nvm.h15
-rw-r--r--drivers/gpu/drm/xe/xe_oa.c303
-rw-r--r--drivers/gpu/drm/xe/xe_oa_types.h17
-rw-r--r--drivers/gpu/drm/xe/xe_pagefault.c444
-rw-r--r--drivers/gpu/drm/xe/xe_pagefault.h19
-rw-r--r--drivers/gpu/drm/xe/xe_pagefault_types.h136
-rw-r--r--drivers/gpu/drm/xe/xe_pat.c189
-rw-r--r--drivers/gpu/drm/xe/xe_pat.h12
-rw-r--r--drivers/gpu/drm/xe/xe_pci.c485
-rw-r--r--drivers/gpu/drm/xe/xe_pci.h3
-rw-r--r--drivers/gpu/drm/xe/xe_pci_sriov.c144
-rw-r--r--drivers/gpu/drm/xe/xe_pci_sriov.h1
-rw-r--r--drivers/gpu/drm/xe/xe_pci_types.h49
-rw-r--r--drivers/gpu/drm/xe/xe_pcode.c70
-rw-r--r--drivers/gpu/drm/xe/xe_pcode.h12
-rw-r--r--drivers/gpu/drm/xe/xe_pcode_api.h21
-rw-r--r--drivers/gpu/drm/xe/xe_platform_types.h3
-rw-r--r--drivers/gpu/drm/xe/xe_pm.c180
-rw-r--r--drivers/gpu/drm/xe/xe_pm.h19
-rw-r--r--drivers/gpu/drm/xe/xe_pmu.c18
-rw-r--r--drivers/gpu/drm/xe/xe_preempt_fence.c11
-rw-r--r--drivers/gpu/drm/xe/xe_preempt_fence_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_printk.h129
-rw-r--r--drivers/gpu/drm/xe/xe_psmi.c294
-rw-r--r--drivers/gpu/drm/xe/xe_psmi.h14
-rw-r--r--drivers/gpu/drm/xe/xe_pt.c540
-rw-r--r--drivers/gpu/drm/xe/xe_pt.h3
-rw-r--r--drivers/gpu/drm/xe/xe_pt_types.h5
-rw-r--r--drivers/gpu/drm/xe/xe_pxp.c148
-rw-r--r--drivers/gpu/drm/xe/xe_pxp_submit.c36
-rw-r--r--drivers/gpu/drm/xe/xe_query.c64
-rw-r--r--drivers/gpu/drm/xe/xe_range_fence.h4
-rw-r--r--drivers/gpu/drm/xe/xe_reg_whitelist.c10
-rw-r--r--drivers/gpu/drm/xe/xe_res_cursor.h10
-rw-r--r--drivers/gpu/drm/xe/xe_ring_ops.c61
-rw-r--r--drivers/gpu/drm/xe/xe_rtp.c69
-rw-r--r--drivers/gpu/drm/xe/xe_rtp.h40
-rw-r--r--drivers/gpu/drm/xe/xe_rtp_types.h6
-rw-r--r--drivers/gpu/drm/xe/xe_sa.c22
-rw-r--r--drivers/gpu/drm/xe/xe_sa.h16
-rw-r--r--drivers/gpu/drm/xe/xe_sa_types.h1
-rw-r--r--drivers/gpu/drm/xe/xe_sched_job.c42
-rw-r--r--drivers/gpu/drm/xe/xe_sched_job.h13
-rw-r--r--drivers/gpu/drm/xe/xe_sched_job_types.h11
-rw-r--r--drivers/gpu/drm/xe/xe_shrinker.c96
-rw-r--r--drivers/gpu/drm/xe/xe_shrinker.h4
-rw-r--r--drivers/gpu/drm/xe/xe_sriov.c17
-rw-r--r--drivers/gpu/drm/xe/xe_sriov.h1
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_packet.c520
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_packet.h30
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_packet_types.h75
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf.c181
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf.h19
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_control.c279
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_control.h22
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_debugfs.c395
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_debugfs.h18
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_helpers.h27
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_migration.c365
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_migration.h30
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_migration_types.h37
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_provision.c438
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_provision.h45
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_provision_types.h36
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_service.c216
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_service.h23
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_service_types.h36
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_sysfs.c647
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_sysfs.h16
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_types.h70
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_printk.h12
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_types.h36
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vf.c174
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vf.h8
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vf_ccs.c480
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vf_ccs.h35
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vf_ccs_types.h51
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vf_types.h47
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vfio.c80
-rw-r--r--drivers/gpu/drm/xe/xe_step.c2
-rw-r--r--drivers/gpu/drm/xe/xe_survivability_mode.c200
-rw-r--r--drivers/gpu/drm/xe/xe_survivability_mode.h5
-rw-r--r--drivers/gpu/drm/xe/xe_survivability_mode_types.h8
-rw-r--r--drivers/gpu/drm/xe/xe_svm.c1044
-rw-r--r--drivers/gpu/drm/xe/xe_svm.h230
-rw-r--r--drivers/gpu/drm/xe/xe_sync.c93
-rw-r--r--drivers/gpu/drm/xe/xe_sync.h3
-rw-r--r--drivers/gpu/drm/xe/xe_sync_types.h3
-rw-r--r--drivers/gpu/drm/xe/xe_tile.c87
-rw-r--r--drivers/gpu/drm/xe/xe_tile.h7
-rw-r--r--drivers/gpu/drm/xe/xe_tile_debugfs.c142
-rw-r--r--drivers/gpu/drm/xe/xe_tile_debugfs.h16
-rw-r--r--drivers/gpu/drm/xe/xe_tile_printk.h127
-rw-r--r--drivers/gpu/drm/xe/xe_tile_sriov_pf_debugfs.c253
-rw-r--r--drivers/gpu/drm/xe/xe_tile_sriov_pf_debugfs.h15
-rw-r--r--drivers/gpu/drm/xe/xe_tile_sriov_printk.h33
-rw-r--r--drivers/gpu/drm/xe/xe_tile_sriov_vf.c350
-rw-r--r--drivers/gpu/drm/xe/xe_tile_sriov_vf.h23
-rw-r--r--drivers/gpu/drm/xe/xe_tile_sriov_vf_types.h23
-rw-r--r--drivers/gpu/drm/xe/xe_tile_sysfs.c12
-rw-r--r--drivers/gpu/drm/xe/xe_tlb_inval.c433
-rw-r--r--drivers/gpu/drm/xe/xe_tlb_inval.h46
-rw-r--r--drivers/gpu/drm/xe/xe_tlb_inval_job.c285
-rw-r--r--drivers/gpu/drm/xe/xe_tlb_inval_job.h34
-rw-r--r--drivers/gpu/drm/xe/xe_tlb_inval_types.h130
-rw-r--r--drivers/gpu/drm/xe/xe_trace.h63
-rw-r--r--drivers/gpu/drm/xe/xe_trace_bo.h4
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c18
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_sys_mgr.c6
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_vram_mgr.c28
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_vram_mgr.h3
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_vram_mgr_types.h4
-rw-r--r--drivers/gpu/drm/xe/xe_tuning.c32
-rw-r--r--drivers/gpu/drm/xe/xe_tuning.h2
-rw-r--r--drivers/gpu/drm/xe/xe_uc.c78
-rw-r--r--drivers/gpu/drm/xe/xe_uc.h5
-rw-r--r--drivers/gpu/drm/xe/xe_uc_fw.c89
-rw-r--r--drivers/gpu/drm/xe/xe_uc_fw_abi.h130
-rw-r--r--drivers/gpu/drm/xe/xe_uc_fw_types.h11
-rw-r--r--drivers/gpu/drm/xe/xe_uc_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_userptr.c322
-rw-r--r--drivers/gpu/drm/xe/xe_userptr.h107
-rw-r--r--drivers/gpu/drm/xe/xe_validation.c278
-rw-r--r--drivers/gpu/drm/xe/xe_validation.h192
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c1788
-rw-r--r--drivers/gpu/drm/xe/xe_vm.h104
-rw-r--r--drivers/gpu/drm/xe/xe_vm_doc.h8
-rw-r--r--drivers/gpu/drm/xe/xe_vm_madvise.c431
-rw-r--r--drivers/gpu/drm/xe/xe_vm_madvise.h15
-rw-r--r--drivers/gpu/drm/xe/xe_vm_types.h185
-rw-r--r--drivers/gpu/drm/xe/xe_vram.c291
-rw-r--r--drivers/gpu/drm/xe/xe_vram.h12
-rw-r--r--drivers/gpu/drm/xe/xe_vram_freq.c4
-rw-r--r--drivers/gpu/drm/xe/xe_vram_types.h85
-rw-r--r--drivers/gpu/drm/xe/xe_vsec.c24
-rw-r--r--drivers/gpu/drm/xe/xe_vsec.h4
-rw-r--r--drivers/gpu/drm/xe/xe_wa.c238
-rw-r--r--drivers/gpu/drm/xe/xe_wa.h30
-rw-r--r--drivers/gpu/drm/xe/xe_wa_oob.rules39
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front.c1
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_gem.c1
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_kms.c4
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dp.c72
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dpsub.c1
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_kms.c10
-rw-r--r--drivers/gpu/host1x/bus.c12
-rw-r--r--drivers/gpu/host1x/dev.c20
-rw-r--r--drivers/gpu/host1x/dev.h3
-rw-r--r--drivers/gpu/host1x/hw/channel_hw.c106
-rw-r--r--drivers/gpu/host1x/hw/intr_hw.c56
-rw-r--r--drivers/gpu/host1x/syncpt.c4
-rw-r--r--drivers/gpu/nova-core/Kconfig1
-rw-r--r--drivers/gpu/nova-core/bitfield.rs330
-rw-r--r--drivers/gpu/nova-core/dma.rs54
-rw-r--r--drivers/gpu/nova-core/driver.rs94
-rw-r--r--drivers/gpu/nova-core/falcon.rs664
-rw-r--r--drivers/gpu/nova-core/falcon/gsp.rs57
-rw-r--r--drivers/gpu/nova-core/falcon/hal.rs60
-rw-r--r--drivers/gpu/nova-core/falcon/hal/ga102.rs120
-rw-r--r--drivers/gpu/nova-core/falcon/sec2.rs25
-rw-r--r--drivers/gpu/nova-core/fb.rs217
-rw-r--r--drivers/gpu/nova-core/fb/hal.rs41
-rw-r--r--drivers/gpu/nova-core/fb/hal/ga100.rs63
-rw-r--r--drivers/gpu/nova-core/fb/hal/ga102.rs38
-rw-r--r--drivers/gpu/nova-core/fb/hal/tu102.rs59
-rw-r--r--drivers/gpu/nova-core/firmware.rs227
-rw-r--r--drivers/gpu/nova-core/firmware/booter.rs401
-rw-r--r--drivers/gpu/nova-core/firmware/fwsec.rs438
-rw-r--r--drivers/gpu/nova-core/firmware/gsp.rs258
-rw-r--r--drivers/gpu/nova-core/firmware/riscv.rs95
-rw-r--r--drivers/gpu/nova-core/gfw.rs71
-rw-r--r--drivers/gpu/nova-core/gpu.rs199
-rw-r--r--drivers/gpu/nova-core/gsp.rs161
-rw-r--r--drivers/gpu/nova-core/gsp/boot.rs252
-rw-r--r--drivers/gpu/nova-core/gsp/cmdq.rs679
-rw-r--r--drivers/gpu/nova-core/gsp/commands.rs227
-rw-r--r--drivers/gpu/nova-core/gsp/fw.rs928
-rw-r--r--drivers/gpu/nova-core/gsp/fw/commands.rs128
-rw-r--r--drivers/gpu/nova-core/gsp/fw/r570_144.rs31
-rw-r--r--drivers/gpu/nova-core/gsp/fw/r570_144/bindings.rs951
-rw-r--r--drivers/gpu/nova-core/gsp/sequencer.rs407
-rw-r--r--drivers/gpu/nova-core/nova_core.rs13
-rw-r--r--drivers/gpu/nova-core/num.rs217
-rw-r--r--drivers/gpu/nova-core/regs.rs390
-rw-r--r--drivers/gpu/nova-core/regs/macros.rs803
-rw-r--r--drivers/gpu/nova-core/sbuffer.rs227
-rw-r--r--drivers/gpu/nova-core/util.rs29
-rw-r--r--drivers/gpu/nova-core/vbios.rs1097
-rw-r--r--drivers/gpu/trace/Kconfig11
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c2
-rw-r--r--drivers/greybus/gb-beagleplay.c37
-rw-r--r--drivers/greybus/operation.c2
-rw-r--r--drivers/greybus/svc.c3
-rw-r--r--drivers/hid/Kconfig21
-rw-r--r--drivers/hid/Makefile1
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_client.c35
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_common.h3
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_hid.h2
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_pcie.c8
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_pcie.h1
-rw-r--r--drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c2
-rw-r--r--drivers/hid/bpf/progs/Huion__Inspiroy-2-M.bpf.c563
-rw-r--r--drivers/hid/bpf/progs/Huion__Inspiroy-2-S.bpf.c29
-rw-r--r--drivers/hid/bpf/progs/Huion__Kamvas-Pro-19.bpf.c6
-rw-r--r--drivers/hid/bpf/progs/Huion__Kamvas13Gen3.bpf.c1395
-rw-r--r--drivers/hid/bpf/progs/Huion__Kamvas16Gen3.bpf.c724
-rw-r--r--drivers/hid/bpf/progs/Logitech__SpaceNavigator.bpf.c86
-rw-r--r--drivers/hid/bpf/progs/WALTOP__Batteryless-Tablet.bpf.c321
-rw-r--r--drivers/hid/bpf/progs/XPPen__Deco01V3.bpf.c305
-rw-r--r--drivers/hid/bpf/progs/XPPen__Deco02.bpf.c359
-rw-r--r--drivers/hid/bpf/progs/hid_report_helpers.h10
-rw-r--r--drivers/hid/hid-apple.c245
-rw-r--r--drivers/hid/hid-appletb-kbd.c13
-rw-r--r--drivers/hid/hid-asus.c18
-rw-r--r--drivers/hid/hid-core.c80
-rw-r--r--drivers/hid/hid-corsair-void.c5
-rw-r--r--drivers/hid/hid-cp2112.c39
-rw-r--r--drivers/hid/hid-debug.c10
-rw-r--r--drivers/hid/hid-elecom.c14
-rw-r--r--drivers/hid/hid-evision.c21
-rw-r--r--drivers/hid/hid-generic.c9
-rw-r--r--drivers/hid/hid-haptic.c580
-rw-r--r--drivers/hid/hid-haptic.h127
-rw-r--r--drivers/hid/hid-ids.h60
-rw-r--r--drivers/hid/hid-input-test.c10
-rw-r--r--drivers/hid/hid-input.c106
-rw-r--r--drivers/hid/hid-lenovo.c40
-rw-r--r--drivers/hid/hid-lg-g15.c483
-rw-r--r--drivers/hid/hid-lg4ff.c6
-rw-r--r--drivers/hid/hid-logitech-dj.c196
-rw-r--r--drivers/hid/hid-logitech-hidpp.c35
-rw-r--r--drivers/hid/hid-magicmouse.c66
-rw-r--r--drivers/hid/hid-mcp2200.c4
-rw-r--r--drivers/hid/hid-mcp2221.c114
-rw-r--r--drivers/hid/hid-multitouch.c153
-rw-r--r--drivers/hid/hid-nintendo.c49
-rw-r--r--drivers/hid/hid-ntrig.c10
-rw-r--r--drivers/hid/hid-playstation.c1075
-rw-r--r--drivers/hid/hid-quirks.c36
-rw-r--r--drivers/hid/hid-roccat-arvo.c2
-rw-r--r--drivers/hid/hid-roccat-common.h8
-rw-r--r--drivers/hid/hid-roccat-isku.c10
-rw-r--r--drivers/hid/hid-roccat-kone.c6
-rw-r--r--drivers/hid/hid-roccat-koneplus.c14
-rw-r--r--drivers/hid/hid-roccat-konepure.c2
-rw-r--r--drivers/hid/hid-roccat-kovaplus.c12
-rw-r--r--drivers/hid/hid-roccat-lua.c4
-rw-r--r--drivers/hid/hid-roccat-pyra.c14
-rw-r--r--drivers/hid/hid-roccat-ryos.c2
-rw-r--r--drivers/hid/hid-roccat-savu.c2
-rw-r--r--drivers/hid/hid-steam.c35
-rw-r--r--drivers/hid/hid-steelseries.c108
-rw-r--r--drivers/hid/hid-uclogic-core.c85
-rw-r--r--drivers/hid/hid-uclogic-params.c173
-rw-r--r--drivers/hid/hid-uclogic-params.h10
-rw-r--r--drivers/hid/hid-uclogic-rdesc.c169
-rw-r--r--drivers/hid/hid-uclogic-rdesc.h12
-rw-r--r--drivers/hid/hid-universal-pidff.c62
-rw-r--r--drivers/hid/hid-winwing.c171
-rw-r--r--drivers/hid/hidraw.c224
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-acpi.c8
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-core.c74
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-of-elan.c11
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.h2
-rw-r--r--drivers/hid/intel-ish-hid/ipc/hw-ish.h1
-rw-r--r--drivers/hid/intel-ish-hid/ipc/ipc.c118
-rw-r--r--drivers/hid/intel-ish-hid/ipc/pci-ish.c46
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-hid-client.c18
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/bus.c21
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/client.c6
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/hbm.c4
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h6
-rw-r--r--drivers/hid/intel-thc-hid/Makefile1
-rw-r--r--drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c244
-rw-r--r--drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-dev.h79
-rw-r--r--drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-hid.c1
-rw-r--r--drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-protocol.c26
-rw-r--r--drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c25
-rw-r--r--drivers/hid/intel-thc-hid/intel-quickspi/quickspi-dev.h4
-rw-r--r--drivers/hid/intel-thc-hid/intel-quickspi/quickspi-hid.c1
-rw-r--r--drivers/hid/intel-thc-hid/intel-quickspi/quickspi-protocol.c3
-rw-r--r--drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.c147
-rw-r--r--drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.h33
-rw-r--r--drivers/hid/intel-thc-hid/intel-thc/intel-thc-dma.c40
-rw-r--r--drivers/hid/intel-thc-hid/intel-thc/intel-thc-dma.h38
-rw-r--r--drivers/hid/intel-thc-hid/intel-thc/intel-thc-hw.h5
-rw-r--r--drivers/hid/intel-thc-hid/intel-thc/intel-thc-wot.c94
-rw-r--r--drivers/hid/intel-thc-hid/intel-thc/intel-thc-wot.h26
-rw-r--r--drivers/hid/usbhid/hid-pidff.c750
-rw-r--r--drivers/hid/usbhid/hid-pidff.h5
-rw-r--r--drivers/hid/wacom_sys.c7
-rw-r--r--drivers/hid/wacom_wac.c1
-rw-r--r--drivers/hsi/controllers/omap_ssi_port.c11
-rw-r--r--drivers/hv/Kconfig46
-rw-r--r--drivers/hv/Makefile13
-rw-r--r--drivers/hv/channel.c78
-rw-r--r--drivers/hv/channel_mgmt.c28
-rw-r--r--drivers/hv/connection.c11
-rw-r--r--drivers/hv/hv.c383
-rw-r--r--drivers/hv/hv_common.c49
-rw-r--r--drivers/hv/hv_proc.c1
-rw-r--r--drivers/hv/hv_util.c2
-rw-r--r--drivers/hv/hv_utils_transport.c10
-rw-r--r--drivers/hv/hyperv_vmbus.h76
-rw-r--r--drivers/hv/mshv.h2
-rw-r--r--drivers/hv/mshv_common.c102
-rw-r--r--drivers/hv/mshv_eventfd.c30
-rw-r--r--drivers/hv/mshv_irq.c4
-rw-r--r--drivers/hv/mshv_regions.c555
-rw-r--r--drivers/hv/mshv_root.h57
-rw-r--r--drivers/hv/mshv_root_hv_call.c197
-rw-r--r--drivers/hv/mshv_root_main.c835
-rw-r--r--drivers/hv/mshv_synic.c6
-rw-r--r--drivers/hv/mshv_vtl.h25
-rw-r--r--drivers/hv/mshv_vtl_main.c1392
-rw-r--r--drivers/hv/ring_buffer.c6
-rw-r--r--drivers/hv/vmbus_drv.c221
-rw-r--r--drivers/hwmon/Kconfig96
-rw-r--r--drivers/hwmon/Makefile6
-rw-r--r--drivers/hwmon/adm1026.c16
-rw-r--r--drivers/hwmon/adm1029.c3
-rw-r--r--drivers/hwmon/adm9240.c17
-rw-r--r--drivers/hwmon/adt7410.c11
-rw-r--r--drivers/hwmon/adt7411.c59
-rw-r--r--drivers/hwmon/adt7475.c20
-rw-r--r--drivers/hwmon/adt7x10.c27
-rw-r--r--drivers/hwmon/aht10.c43
-rw-r--r--drivers/hwmon/amc6821.c127
-rw-r--r--drivers/hwmon/aquacomputer_d5next.c37
-rw-r--r--drivers/hwmon/aspeed-g6-pwm-tach.c3
-rw-r--r--drivers/hwmon/asus-ec-sensors.c445
-rw-r--r--drivers/hwmon/asus_rog_ryujin.c48
-rw-r--r--drivers/hwmon/axi-fan-control.c2
-rw-r--r--drivers/hwmon/cgbc-hwmon.c3
-rw-r--r--drivers/hwmon/chipcap2.c7
-rw-r--r--drivers/hwmon/coretemp.c76
-rw-r--r--drivers/hwmon/corsair-cpro.c13
-rw-r--r--drivers/hwmon/corsair-psu.c14
-rw-r--r--drivers/hwmon/cros_ec_hwmon.c313
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c108
-rw-r--r--drivers/hwmon/drivetemp.c5
-rw-r--r--drivers/hwmon/emc1403.c46
-rw-r--r--drivers/hwmon/emc2103.c4
-rw-r--r--drivers/hwmon/emc2305.c181
-rw-r--r--drivers/hwmon/ftsteutates.c93
-rw-r--r--drivers/hwmon/gpd-fan.c683
-rw-r--r--drivers/hwmon/gsc-hwmon.c4
-rw-r--r--drivers/hwmon/hs3001.c10
-rw-r--r--drivers/hwmon/hwmon.c56
-rw-r--r--drivers/hwmon/i5500_temp.c3
-rw-r--r--drivers/hwmon/ibmaem.c27
-rw-r--r--drivers/hwmon/ina238.c578
-rw-r--r--drivers/hwmon/ina2xx.c28
-rw-r--r--drivers/hwmon/ina3221.c19
-rw-r--r--drivers/hwmon/jc42.c11
-rw-r--r--drivers/hwmon/k10temp.c22
-rw-r--r--drivers/hwmon/lenovo-ec-sensors.c34
-rw-r--r--drivers/hwmon/lm75.c21
-rw-r--r--drivers/hwmon/lm78.c5
-rw-r--r--drivers/hwmon/lm87.c16
-rw-r--r--drivers/hwmon/lm90.c25
-rw-r--r--drivers/hwmon/lm92.c11
-rw-r--r--drivers/hwmon/lm95234.c12
-rw-r--r--drivers/hwmon/lm95241.c16
-rw-r--r--drivers/hwmon/lm95245.c16
-rw-r--r--drivers/hwmon/lochnagar-hwmon.c18
-rw-r--r--drivers/hwmon/ltc2947-core.c92
-rw-r--r--drivers/hwmon/ltc2992.c4
-rw-r--r--drivers/hwmon/ltc4245.c8
-rw-r--r--drivers/hwmon/ltc4282.c92
-rw-r--r--drivers/hwmon/macsmc-hwmon.c851
-rw-r--r--drivers/hwmon/max127.c23
-rw-r--r--drivers/hwmon/max16065.c7
-rw-r--r--drivers/hwmon/max31790.c48
-rw-r--r--drivers/hwmon/max31827.c62
-rw-r--r--drivers/hwmon/max6620.c43
-rw-r--r--drivers/hwmon/max6639.c23
-rw-r--r--drivers/hwmon/max6697.c11
-rw-r--r--drivers/hwmon/mc33xs2410_hwmon.c178
-rw-r--r--drivers/hwmon/mlxreg-fan.c47
-rw-r--r--drivers/hwmon/mr75203.c1
-rw-r--r--drivers/hwmon/nct6694-hwmon.c949
-rw-r--r--drivers/hwmon/nct6775-platform.c4
-rw-r--r--drivers/hwmon/nct7363.c2
-rw-r--r--drivers/hwmon/nct7904.c63
-rw-r--r--drivers/hwmon/npcm750-pwm-fan.c11
-rw-r--r--drivers/hwmon/ntc_thermistor.c43
-rw-r--r--drivers/hwmon/nzxt-smart2.c8
-rw-r--r--drivers/hwmon/occ/common.c240
-rw-r--r--drivers/hwmon/peci/common.h3
-rw-r--r--drivers/hwmon/peci/cputemp.c90
-rw-r--r--drivers/hwmon/peci/dimmtemp.c36
-rw-r--r--drivers/hwmon/pmbus/Kconfig58
-rw-r--r--drivers/hwmon/pmbus/Makefile5
-rw-r--r--drivers/hwmon/pmbus/adm1275.c11
-rw-r--r--drivers/hwmon/pmbus/adp1050.c72
-rw-r--r--drivers/hwmon/pmbus/isl68137.c26
-rw-r--r--drivers/hwmon/pmbus/max17616.c73
-rw-r--r--drivers/hwmon/pmbus/max34440.c56
-rw-r--r--drivers/hwmon/pmbus/mp2869.c659
-rw-r--r--drivers/hwmon/pmbus/mp2925.c316
-rw-r--r--drivers/hwmon/pmbus/mp29502.c670
-rw-r--r--drivers/hwmon/pmbus/mp5990.c67
-rw-r--r--drivers/hwmon/pmbus/mp9945.c243
-rw-r--r--drivers/hwmon/pmbus/tps53679.c37
-rw-r--r--drivers/hwmon/pmbus/ucd9000.c10
-rw-r--r--drivers/hwmon/powr1220.c17
-rw-r--r--drivers/hwmon/pwm-fan.c18
-rw-r--r--drivers/hwmon/sa67mcu-hwmon.c161
-rw-r--r--drivers/hwmon/sbtsi_temp.c63
-rw-r--r--drivers/hwmon/sch56xx-common.c4
-rw-r--r--drivers/hwmon/scmi-hwmon.c9
-rw-r--r--drivers/hwmon/sfctemp.c36
-rw-r--r--drivers/hwmon/sht21.c15
-rw-r--r--drivers/hwmon/sht3x.c27
-rw-r--r--drivers/hwmon/sht4x.c40
-rw-r--r--drivers/hwmon/sy7636a-hwmon.c8
-rw-r--r--drivers/hwmon/tmp102.c24
-rw-r--r--drivers/hwmon/tmp103.c3
-rw-r--r--drivers/hwmon/tmp108.c1
-rw-r--r--drivers/hwmon/tmp401.c8
-rw-r--r--drivers/hwmon/tmp421.c28
-rw-r--r--drivers/hwmon/tmp464.c13
-rw-r--r--drivers/hwmon/tsc1641.c748
-rw-r--r--drivers/hwmon/vt1211.c53
-rw-r--r--drivers/hwmon/vt8231.c18
-rw-r--r--drivers/hwmon/w83627ehf.c9
-rw-r--r--drivers/hwmon/w83781d.c5
-rw-r--r--drivers/hwmon/w83791d.c17
-rw-r--r--drivers/hwmon/w83l786ng.c26
-rw-r--r--drivers/hwtracing/coresight/Kconfig12
-rw-r--r--drivers/hwtracing/coresight/Makefile1
-rw-r--r--drivers/hwtracing/coresight/coresight-catu.c63
-rw-r--r--drivers/hwtracing/coresight/coresight-catu.h1
-rw-r--r--drivers/hwtracing/coresight/coresight-core.c84
-rw-r--r--drivers/hwtracing/coresight/coresight-cpu-debug.c41
-rw-r--r--drivers/hwtracing/coresight/coresight-ctcu-core.c33
-rw-r--r--drivers/hwtracing/coresight/coresight-cti-core.c5
-rw-r--r--drivers/hwtracing/coresight/coresight-cti.h5
-rw-r--r--drivers/hwtracing/coresight/coresight-dummy.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-etb10.c26
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.c7
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x-core.c76
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-core.c184
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-sysfs.c1
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.h11
-rw-r--r--drivers/hwtracing/coresight/coresight-funnel.c66
-rw-r--r--drivers/hwtracing/coresight/coresight-priv.h3
-rw-r--r--drivers/hwtracing/coresight/coresight-replicator.c63
-rw-r--r--drivers/hwtracing/coresight/coresight-stm.c42
-rw-r--r--drivers/hwtracing/coresight/coresight-syscfg.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-sysfs.c73
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-core.c70
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etf.c10
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etr.c22
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.h5
-rw-r--r--drivers/hwtracing/coresight/coresight-tnoc.c246
-rw-r--r--drivers/hwtracing/coresight/coresight-tpda.c10
-rw-r--r--drivers/hwtracing/coresight/coresight-tpdm.c174
-rw-r--r--drivers/hwtracing/coresight/coresight-tpdm.h12
-rw-r--r--drivers/hwtracing/coresight/coresight-tpiu.c38
-rw-r--r--drivers/hwtracing/coresight/coresight-trbe.c25
-rw-r--r--drivers/hwtracing/coresight/ultrasoc-smb.c9
-rw-r--r--drivers/hwtracing/coresight/ultrasoc-smb.h1
-rw-r--r--drivers/hwtracing/intel_th/core.c22
-rw-r--r--drivers/hwtracing/intel_th/msu.c3
-rw-r--r--drivers/i2c/algos/i2c-algo-bit.c4
-rw-r--r--drivers/i2c/algos/i2c-algo-pca.c6
-rw-r--r--drivers/i2c/algos/i2c-algo-pcf.c109
-rw-r--r--drivers/i2c/busses/Kconfig32
-rw-r--r--drivers/i2c/busses/Makefile2
-rw-r--r--drivers/i2c/busses/i2c-amd-mp2-pci.c5
-rw-r--r--drivers/i2c/busses/i2c-amd-mp2-plat.c2
-rw-r--r--drivers/i2c/busses/i2c-amd-mp2.h1
-rw-r--r--drivers/i2c/busses/i2c-aspeed.c8
-rw-r--r--drivers/i2c/busses/i2c-at91-core.c1
-rw-r--r--drivers/i2c/busses/i2c-at91-master.c5
-rw-r--r--drivers/i2c/busses/i2c-axxia.c2
-rw-r--r--drivers/i2c/busses/i2c-bcm-iproc.c2
-rw-r--r--drivers/i2c/busses/i2c-bcm2835.c12
-rw-r--r--drivers/i2c/busses/i2c-cadence.c11
-rw-r--r--drivers/i2c/busses/i2c-cgbc.c4
-rw-r--r--drivers/i2c/busses/i2c-davinci.c2
-rw-r--r--drivers/i2c/busses/i2c-designware-amdisp.c2
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h2
-rw-r--r--drivers/i2c/busses/i2c-designware-master.c23
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c42
-rw-r--r--drivers/i2c/busses/i2c-designware-slave.c9
-rw-r--r--drivers/i2c/busses/i2c-eg20t.c2
-rw-r--r--drivers/i2c/busses/i2c-emev2.c6
-rw-r--r--drivers/i2c/busses/i2c-exynos5.c6
-rw-r--r--drivers/i2c/busses/i2c-gxp.c6
-rw-r--r--drivers/i2c/busses/i2c-hix5hd2.c3
-rw-r--r--drivers/i2c/busses/i2c-i801.c9
-rw-r--r--drivers/i2c/busses/i2c-img-scb.c5
-rw-r--r--drivers/i2c/busses/i2c-imx-lpi2c.c268
-rw-r--r--drivers/i2c/busses/i2c-imx.c51
-rw-r--r--drivers/i2c/busses/i2c-k1.c92
-rw-r--r--drivers/i2c/busses/i2c-keba.c2
-rw-r--r--drivers/i2c/busses/i2c-mchp-pci1xxxx.c2
-rw-r--r--drivers/i2c/busses/i2c-meson.c4
-rw-r--r--drivers/i2c/busses/i2c-microchip-corei2c.c8
-rw-r--r--drivers/i2c/busses/i2c-mt65xx.c30
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c1
-rw-r--r--drivers/i2c/busses/i2c-mxs.c2
-rw-r--r--drivers/i2c/busses/i2c-nct6694.c196
-rw-r--r--drivers/i2c/busses/i2c-nomadik.c4
-rw-r--r--drivers/i2c/busses/i2c-npcm7xx.c6
-rw-r--r--drivers/i2c/busses/i2c-nvidia-gpu.c1
-rw-r--r--drivers/i2c/busses/i2c-omap.c23
-rw-r--r--drivers/i2c/busses/i2c-pca-isa.c2
-rw-r--r--drivers/i2c/busses/i2c-pca-platform.c2
-rw-r--r--drivers/i2c/busses/i2c-piix4.c2
-rw-r--r--drivers/i2c/busses/i2c-pnx.c2
-rw-r--r--drivers/i2c/busses/i2c-pxa.c16
-rw-r--r--drivers/i2c/busses/i2c-qcom-cci.c52
-rw-r--r--drivers/i2c/busses/i2c-qcom-geni.c267
-rw-r--r--drivers/i2c/busses/i2c-qup.c15
-rw-r--r--drivers/i2c/busses/i2c-rcar.c10
-rw-r--r--drivers/i2c/busses/i2c-riic.c58
-rw-r--r--drivers/i2c/busses/i2c-robotfuzz-osif.c6
-rw-r--r--drivers/i2c/busses/i2c-rtl9300.c476
-rw-r--r--drivers/i2c/busses/i2c-rzv2m.c1
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c7
-rw-r--r--drivers/i2c/busses/i2c-sh7760.c4
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c4
-rw-r--r--drivers/i2c/busses/i2c-sprd.c4
-rw-r--r--drivers/i2c/busses/i2c-st.c21
-rw-r--r--drivers/i2c/busses/i2c-stm32.c15
-rw-r--r--drivers/i2c/busses/i2c-stm32f7.c68
-rw-r--r--drivers/i2c/busses/i2c-synquacer.c4
-rw-r--r--drivers/i2c/busses/i2c-tegra.c119
-rw-r--r--drivers/i2c/busses/i2c-tiny-usb.c6
-rw-r--r--drivers/i2c/busses/i2c-usbio.c321
-rw-r--r--drivers/i2c/busses/i2c-viperboard.c2
-rw-r--r--drivers/i2c/busses/i2c-virtio.c15
-rw-r--r--drivers/i2c/busses/i2c-xiic.c5
-rw-r--r--drivers/i2c/busses/i2c-xlp9xx.c2
-rw-r--r--drivers/i2c/i2c-atr.c2
-rw-r--r--drivers/i2c/i2c-core-acpi.c3
-rw-r--r--drivers/i2c/i2c-core-base.c17
-rw-r--r--drivers/i2c/i2c-core-slave.c3
-rw-r--r--drivers/i2c/i2c-mux.c15
-rw-r--r--drivers/i2c/i2c-slave-eeprom.c4
-rw-r--r--drivers/i2c/muxes/i2c-demux-pinctrl.c4
-rw-r--r--drivers/i2c/muxes/i2c-mux-ltc4306.c2
-rw-r--r--drivers/i2c/muxes/i2c-mux-mule.c3
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca9541.c12
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca954x.c3
-rw-r--r--drivers/i3c/device.c38
-rw-r--r--drivers/i3c/internals.h52
-rw-r--r--drivers/i3c/master.c147
-rw-r--r--drivers/i3c/master/Kconfig21
-rw-r--r--drivers/i3c/master/Makefile2
-rw-r--r--drivers/i3c/master/adi-i3c-master.c1019
-rw-r--r--drivers/i3c/master/dw-i3c-master.c101
-rw-r--r--drivers/i3c/master/i3c-master-cdns.c90
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/cmd_v1.c9
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/cmd_v2.c7
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/core.c76
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/dma.c96
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/ext_caps.c11
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/hci.h6
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/mipi-i3c-hci-pci.c226
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/pio.c75
-rw-r--r--drivers/i3c/master/renesas-i3c.c1404
-rw-r--r--drivers/i3c/master/svc-i3c-master.c198
-rw-r--r--drivers/idle/intel_idle.c270
-rw-r--r--drivers/iio/accel/Kconfig19
-rw-r--r--drivers/iio/accel/Makefile4
-rw-r--r--drivers/iio/accel/adxl313.h33
-rw-r--r--drivers/iio/accel/adxl313_core.c923
-rw-r--r--drivers/iio/accel/adxl313_i2c.c6
-rw-r--r--drivers/iio/accel/adxl313_spi.c6
-rw-r--r--drivers/iio/accel/adxl345.h3
-rw-r--r--drivers/iio/accel/adxl345_core.c1062
-rw-r--r--drivers/iio/accel/adxl355_core.c44
-rw-r--r--drivers/iio/accel/adxl372.c3
-rw-r--r--drivers/iio/accel/adxl380.c134
-rw-r--r--drivers/iio/accel/adxl380.h4
-rw-r--r--drivers/iio/accel/adxl380_i2c.c4
-rw-r--r--drivers/iio/accel/adxl380_spi.c4
-rw-r--r--drivers/iio/accel/bma180.c16
-rw-r--r--drivers/iio/accel/bma220.h28
-rw-r--r--drivers/iio/accel/bma220_core.c585
-rw-r--r--drivers/iio/accel/bma220_i2c.c69
-rw-r--r--drivers/iio/accel/bma220_spi.c320
-rw-r--r--drivers/iio/accel/bma400.h155
-rw-r--r--drivers/iio/accel/bma400_core.c349
-rw-r--r--drivers/iio/accel/bmc150-accel-core.c17
-rw-r--r--drivers/iio/accel/bmc150-accel.h1
-rw-r--r--drivers/iio/accel/bmi088-accel-core.c3
-rw-r--r--drivers/iio/accel/dmard06.c4
-rw-r--r--drivers/iio/accel/dmard09.c4
-rw-r--r--drivers/iio/accel/dmard10.c4
-rw-r--r--drivers/iio/accel/fxls8962af-core.c3
-rw-r--r--drivers/iio/accel/kionix-kx022a.c12
-rw-r--r--drivers/iio/accel/kxcjk-1013.c11
-rw-r--r--drivers/iio/accel/kxsd9.c3
-rw-r--r--drivers/iio/accel/mc3230.c4
-rw-r--r--drivers/iio/accel/mma7660.c4
-rw-r--r--drivers/iio/accel/mma8452.c7
-rw-r--r--drivers/iio/accel/mma9551.c6
-rw-r--r--drivers/iio/accel/mma9551_core.c5
-rw-r--r--drivers/iio/accel/mma9553.c11
-rw-r--r--drivers/iio/accel/msa311.c20
-rw-r--r--drivers/iio/accel/mxc4005.c6
-rw-r--r--drivers/iio/accel/mxc6255.c3
-rw-r--r--drivers/iio/accel/sca3000.c29
-rw-r--r--drivers/iio/accel/sca3300.c6
-rw-r--r--drivers/iio/accel/st_accel_core.c10
-rw-r--r--drivers/iio/accel/stk8312.c7
-rw-r--r--drivers/iio/accel/stk8ba50.c7
-rw-r--r--drivers/iio/adc/88pm886-gpadc.c393
-rw-r--r--drivers/iio/adc/Kconfig130
-rw-r--r--drivers/iio/adc/Makefile10
-rw-r--r--drivers/iio/adc/ab8500-gpadc.c1
-rw-r--r--drivers/iio/adc/ad4000.c2
-rw-r--r--drivers/iio/adc/ad4030.c6
-rw-r--r--drivers/iio/adc/ad4080.c701
-rw-r--r--drivers/iio/adc/ad4130.c5
-rw-r--r--drivers/iio/adc/ad4170-4.c3027
-rw-r--r--drivers/iio/adc/ad4851.c16
-rw-r--r--drivers/iio/adc/ad7091r5.c2
-rw-r--r--drivers/iio/adc/ad7091r8.c6
-rw-r--r--drivers/iio/adc/ad7124.c898
-rw-r--r--drivers/iio/adc/ad7173.c378
-rw-r--r--drivers/iio/adc/ad7280a.c2
-rw-r--r--drivers/iio/adc/ad7380.c19
-rw-r--r--drivers/iio/adc/ad7405.c253
-rw-r--r--drivers/iio/adc/ad7476.c466
-rw-r--r--drivers/iio/adc/ad7606.c361
-rw-r--r--drivers/iio/adc/ad7606.h22
-rw-r--r--drivers/iio/adc/ad7768-1.c951
-rw-r--r--drivers/iio/adc/ad7779.c192
-rw-r--r--drivers/iio/adc/ad7949.c11
-rw-r--r--drivers/iio/adc/ad799x.c30
-rw-r--r--drivers/iio/adc/ad_sigma_delta.c299
-rw-r--r--drivers/iio/adc/ade9000.c1799
-rw-r--r--drivers/iio/adc/adi-axi-adc.c106
-rw-r--r--drivers/iio/adc/aspeed_adc.c34
-rw-r--r--drivers/iio/adc/at91-sama5d2_adc.c13
-rw-r--r--drivers/iio/adc/at91_adc.c10
-rw-r--r--drivers/iio/adc/axp20x_adc.c3
-rw-r--r--drivers/iio/adc/bcm_iproc_adc.c4
-rw-r--r--drivers/iio/adc/cpcap-adc.c6
-rw-r--r--drivers/iio/adc/da9150-gpadc.c5
-rw-r--r--drivers/iio/adc/dln2-adc.c13
-rw-r--r--drivers/iio/adc/exynos_adc.c286
-rw-r--r--drivers/iio/adc/hi8435.c4
-rw-r--r--drivers/iio/adc/hx711.c2
-rw-r--r--drivers/iio/adc/imx7d_adc.c4
-rw-r--r--drivers/iio/adc/imx8qxp-adc.c6
-rw-r--r--drivers/iio/adc/imx93_adc.c26
-rw-r--r--drivers/iio/adc/intel_dc_ti_adc.c328
-rw-r--r--drivers/iio/adc/max1363.c43
-rw-r--r--drivers/iio/adc/max14001.c391
-rw-r--r--drivers/iio/adc/max9611.c4
-rw-r--r--drivers/iio/adc/mcp3564.c4
-rw-r--r--drivers/iio/adc/meson_saradc.c8
-rw-r--r--drivers/iio/adc/mp2629_adc.c2
-rw-r--r--drivers/iio/adc/mt6359-auxadc.c440
-rw-r--r--drivers/iio/adc/mt6360-adc.c5
-rw-r--r--drivers/iio/adc/mt6577_auxadc.c3
-rw-r--r--drivers/iio/adc/mxs-lradc-adc.c4
-rw-r--r--drivers/iio/adc/pac1921.c11
-rw-r--r--drivers/iio/adc/pac1934.c33
-rw-r--r--drivers/iio/adc/palmas_gpadc.c4
-rw-r--r--drivers/iio/adc/qcom-spmi-rradc.c2
-rw-r--r--drivers/iio/adc/qcom-vadc-common.c2
-rw-r--r--drivers/iio/adc/rcar-gyroadc.c8
-rw-r--r--drivers/iio/adc/rn5t618-adc.c4
-rw-r--r--drivers/iio/adc/rockchip_saradc.c10
-rw-r--r--drivers/iio/adc/rohm-bd79112.c551
-rw-r--r--drivers/iio/adc/rohm-bd79124.c43
-rw-r--r--drivers/iio/adc/rtq6056.c6
-rw-r--r--drivers/iio/adc/rzg2l_adc.c35
-rw-r--r--drivers/iio/adc/rzn1-adc.c490
-rw-r--r--drivers/iio/adc/rzt2h_adc.c304
-rw-r--r--drivers/iio/adc/spear_adc.c12
-rw-r--r--drivers/iio/adc/stm32-adc-core.c11
-rw-r--r--drivers/iio/adc/stm32-adc.c16
-rw-r--r--drivers/iio/adc/stm32-dfsdm-adc.c10
-rw-r--r--drivers/iio/adc/stm32-dfsdm-core.c1
-rw-r--r--drivers/iio/adc/stmpe-adc.c4
-rw-r--r--drivers/iio/adc/sun4i-gpadc-iio.c3
-rw-r--r--drivers/iio/adc/ti-adc081c.c40
-rw-r--r--drivers/iio/adc/ti-adc084s021.c4
-rw-r--r--drivers/iio/adc/ti-adc12138.c30
-rw-r--r--drivers/iio/adc/ti-adc128s052.c132
-rw-r--r--drivers/iio/adc/ti-ads1015.c10
-rw-r--r--drivers/iio/adc/ti-ads1100.c1
-rw-r--r--drivers/iio/adc/ti-ads1119.c15
-rw-r--r--drivers/iio/adc/ti-ads131e08.c20
-rw-r--r--drivers/iio/adc/ti-ads7924.c9
-rw-r--r--drivers/iio/adc/ti-ads7950.c2
-rw-r--r--drivers/iio/adc/ti-lmp92064.c4
-rw-r--r--drivers/iio/adc/ti-tsc2046.c9
-rw-r--r--drivers/iio/adc/ti_am335x_adc.c7
-rw-r--r--drivers/iio/adc/twl4030-madc.c4
-rw-r--r--drivers/iio/adc/vf610_adc.c7
-rw-r--r--drivers/iio/adc/viperboard_adc.c4
-rw-r--r--drivers/iio/adc/xilinx-ams.c47
-rw-r--r--drivers/iio/addac/ad74115.c2
-rw-r--r--drivers/iio/addac/ad74413r.c4
-rw-r--r--drivers/iio/amplifiers/ad8366.c6
-rw-r--r--drivers/iio/amplifiers/ada4250.c55
-rw-r--r--drivers/iio/buffer/industrialio-buffer-cb.c2
-rw-r--r--drivers/iio/buffer/industrialio-buffer-dma.c6
-rw-r--r--drivers/iio/buffer/industrialio-buffer-dmaengine.c2
-rw-r--r--drivers/iio/buffer/industrialio-triggered-buffer.c2
-rw-r--r--drivers/iio/chemical/atlas-ezo-sensor.c2
-rw-r--r--drivers/iio/chemical/atlas-sensor.c5
-rw-r--r--drivers/iio/chemical/bme680_core.c5
-rw-r--r--drivers/iio/chemical/ens160_core.c3
-rw-r--r--drivers/iio/chemical/scd30_core.c5
-rw-r--r--drivers/iio/chemical/scd4x.c3
-rw-r--r--drivers/iio/chemical/sunrise_co2.c6
-rw-r--r--drivers/iio/common/cros_ec_sensors/Kconfig9
-rw-r--r--drivers/iio/common/cros_ec_sensors/Makefile1
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_activity.c307
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c10
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-attributes.c2
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-trigger.c1
-rw-r--r--drivers/iio/common/scmi_sensors/scmi_iio.c15
-rw-r--r--drivers/iio/common/ssp_sensors/ssp_dev.c4
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_core.c36
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_trigger.c20
-rw-r--r--drivers/iio/dac/Kconfig31
-rw-r--r--drivers/iio/dac/Makefile2
-rw-r--r--drivers/iio/dac/ad3530r.c7
-rw-r--r--drivers/iio/dac/ad3552r.c3
-rw-r--r--drivers/iio/dac/ad5360.c2
-rw-r--r--drivers/iio/dac/ad5380.c6
-rw-r--r--drivers/iio/dac/ad5421.c2
-rw-r--r--drivers/iio/dac/ad5446-i2c.c102
-rw-r--r--drivers/iio/dac/ad5446-spi.c252
-rw-r--r--drivers/iio/dac/ad5446.c506
-rw-r--r--drivers/iio/dac/ad5446.h77
-rw-r--r--drivers/iio/dac/ad5592r-base.c2
-rw-r--r--drivers/iio/dac/ad5764.c4
-rw-r--r--drivers/iio/dac/ad5770r.c2
-rw-r--r--drivers/iio/dac/ad5791.c4
-rw-r--r--drivers/iio/dac/adi-axi-dac.c44
-rw-r--r--drivers/iio/dac/ds4424.c4
-rw-r--r--drivers/iio/dac/ltc2688.c34
-rw-r--r--drivers/iio/dac/max517.c4
-rw-r--r--drivers/iio/dac/mcp4725.c4
-rw-r--r--drivers/iio/dac/rohm-bd79703.c2
-rw-r--r--drivers/iio/dac/stm32-dac.c19
-rw-r--r--drivers/iio/dac/ti-dac7311.c4
-rw-r--r--drivers/iio/dac/vf610_dac.c23
-rw-r--r--drivers/iio/frequency/adf4350.c23
-rw-r--r--drivers/iio/gyro/bmg160_core.c8
-rw-r--r--drivers/iio/gyro/fxas21002c_core.c2
-rw-r--r--drivers/iio/gyro/mpu3050-core.c3
-rw-r--r--drivers/iio/gyro/mpu3050-i2c.c1
-rw-r--r--drivers/iio/health/afe4403.c50
-rw-r--r--drivers/iio/health/afe4404.c50
-rw-r--r--drivers/iio/health/max30100.c41
-rw-r--r--drivers/iio/health/max30102.c3
-rw-r--r--drivers/iio/humidity/am2315.c4
-rw-r--r--drivers/iio/humidity/dht11.c8
-rw-r--r--drivers/iio/humidity/hdc3020.c73
-rw-r--r--drivers/iio/imu/Kconfig2
-rw-r--r--drivers/iio/imu/Makefile2
-rw-r--r--drivers/iio/imu/adis16400.c314
-rw-r--r--drivers/iio/imu/adis16475.c1
-rw-r--r--drivers/iio/imu/bmi160/bmi160.h2
-rw-r--r--drivers/iio/imu/bmi160/bmi160_core.c21
-rw-r--r--drivers/iio/imu/bmi160/bmi160_i2c.c2
-rw-r--r--drivers/iio/imu/bmi160/bmi160_spi.c2
-rw-r--r--drivers/iio/imu/bmi270/bmi270.h2
-rw-r--r--drivers/iio/imu/bmi270/bmi270_core.c672
-rw-r--r--drivers/iio/imu/bmi270/bmi270_i2c.c4
-rw-r--r--drivers/iio/imu/bmi270/bmi270_spi.c4
-rw-r--r--drivers/iio/imu/bmi323/bmi323_core.c3
-rw-r--r--drivers/iio/imu/bno055/bno055.c60
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600.h63
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c385
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c85
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.h10
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_core.c216
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c70
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_temp.c13
-rw-r--r--drivers/iio/imu/inv_icm45600/Kconfig70
-rw-r--r--drivers/iio/imu/inv_icm45600/Makefile16
-rw-r--r--drivers/iio/imu/inv_icm45600/inv_icm45600.h385
-rw-r--r--drivers/iio/imu/inv_icm45600/inv_icm45600_accel.c782
-rw-r--r--drivers/iio/imu/inv_icm45600/inv_icm45600_buffer.c558
-rw-r--r--drivers/iio/imu/inv_icm45600/inv_icm45600_buffer.h101
-rw-r--r--drivers/iio/imu/inv_icm45600/inv_icm45600_core.c988
-rw-r--r--drivers/iio/imu/inv_icm45600/inv_icm45600_gyro.c791
-rw-r--r--drivers/iio/imu/inv_icm45600/inv_icm45600_i2c.c98
-rw-r--r--drivers/iio/imu/inv_icm45600/inv_icm45600_i3c.c79
-rw-r--r--drivers/iio/imu/inv_icm45600/inv_icm45600_spi.c108
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c4
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_aux.c56
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c11
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c6
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c1
-rw-r--r--drivers/iio/imu/kmx61.c13
-rw-r--r--drivers/iio/imu/smi330/Kconfig33
-rw-r--r--drivers/iio/imu/smi330/Makefile7
-rw-r--r--drivers/iio/imu/smi330/smi330.h25
-rw-r--r--drivers/iio/imu/smi330/smi330_core.c918
-rw-r--r--drivers/iio/imu/smi330/smi330_i2c.c133
-rw-r--r--drivers/iio/imu/smi330/smi330_spi.c85
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h44
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c71
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c40
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c2
-rw-r--r--drivers/iio/industrialio-backend.c74
-rw-r--r--drivers/iio/industrialio-buffer.c33
-rw-r--r--drivers/iio/industrialio-core.c35
-rw-r--r--drivers/iio/inkern.c81
-rw-r--r--drivers/iio/light/Kconfig13
-rw-r--r--drivers/iio/light/Makefile1
-rw-r--r--drivers/iio/light/acpi-als.c19
-rw-r--r--drivers/iio/light/adjd_s311.c12
-rw-r--r--drivers/iio/light/adux1020.c3
-rw-r--r--drivers/iio/light/al3000a.c2
-rw-r--r--drivers/iio/light/apds9160.c4
-rw-r--r--drivers/iio/light/apds9300.c3
-rw-r--r--drivers/iio/light/apds9306.c42
-rw-r--r--drivers/iio/light/apds9960.c6
-rw-r--r--drivers/iio/light/as73211.c2
-rw-r--r--drivers/iio/light/bh1745.c11
-rw-r--r--drivers/iio/light/bh1780.c1
-rw-r--r--drivers/iio/light/cm3232.c18
-rw-r--r--drivers/iio/light/gp2ap002.c2
-rw-r--r--drivers/iio/light/hid-sensor-als.c5
-rw-r--r--drivers/iio/light/isl29028.c13
-rw-r--r--drivers/iio/light/isl29125.c14
-rw-r--r--drivers/iio/light/isl76682.c2
-rw-r--r--drivers/iio/light/jsa1212.c3
-rw-r--r--drivers/iio/light/ltr390.c205
-rw-r--r--drivers/iio/light/ltr501.c18
-rw-r--r--drivers/iio/light/ltrf216a.c1
-rw-r--r--drivers/iio/light/max44000.c18
-rw-r--r--drivers/iio/light/opt4001.c3
-rw-r--r--drivers/iio/light/opt4060.c13
-rw-r--r--drivers/iio/light/pa12203001.c11
-rw-r--r--drivers/iio/light/rohm-bu27034.c3
-rw-r--r--drivers/iio/light/rpr0521.c16
-rw-r--r--drivers/iio/light/si1145.c5
-rw-r--r--drivers/iio/light/st_uvis25.h5
-rw-r--r--drivers/iio/light/st_uvis25_core.c12
-rw-r--r--drivers/iio/light/stk3310.c10
-rw-r--r--drivers/iio/light/tcs3414.c15
-rw-r--r--drivers/iio/light/tcs3472.c14
-rw-r--r--drivers/iio/light/tsl2583.c12
-rw-r--r--drivers/iio/light/tsl2591.c2
-rw-r--r--drivers/iio/light/us5182d.c12
-rw-r--r--drivers/iio/light/vcnl4000.c22
-rw-r--r--drivers/iio/light/vcnl4035.c17
-rw-r--r--drivers/iio/light/veml3235.c2
-rw-r--r--drivers/iio/light/veml6030.c6
-rw-r--r--drivers/iio/light/veml6040.c3
-rw-r--r--drivers/iio/light/veml6046x00.c1030
-rw-r--r--drivers/iio/light/vl6180.c16
-rw-r--r--drivers/iio/light/zopt2201.c6
-rw-r--r--drivers/iio/magnetometer/Kconfig15
-rw-r--r--drivers/iio/magnetometer/Makefile2
-rw-r--r--drivers/iio/magnetometer/af8133j.c4
-rw-r--r--drivers/iio/magnetometer/ak8974.c2
-rw-r--r--drivers/iio/magnetometer/ak8975.c1
-rw-r--r--drivers/iio/magnetometer/als31300.c5
-rw-r--r--drivers/iio/magnetometer/bmc150_magn.c18
-rw-r--r--drivers/iio/magnetometer/mmc35240.c3
-rw-r--r--drivers/iio/magnetometer/tlv493d.c526
-rw-r--r--drivers/iio/magnetometer/tmag5273.c5
-rw-r--r--drivers/iio/magnetometer/yamaha-yas530.c2
-rw-r--r--drivers/iio/position/hid-sensor-custom-intel-hinge.c2
-rw-r--r--drivers/iio/potentiometer/ds1803.c1
-rw-r--r--drivers/iio/potentiometer/mcp4131.c1
-rw-r--r--drivers/iio/potentiostat/lmp91000.c4
-rw-r--r--drivers/iio/pressure/Kconfig12
-rw-r--r--drivers/iio/pressure/Makefile8
-rw-r--r--drivers/iio/pressure/abp060mg.c4
-rw-r--r--drivers/iio/pressure/adp810.c225
-rw-r--r--drivers/iio/pressure/bmp280-core.c33
-rw-r--r--drivers/iio/pressure/dlhl60d.c51
-rw-r--r--drivers/iio/pressure/icp10100.c1
-rw-r--r--drivers/iio/pressure/mpl115.c2
-rw-r--r--drivers/iio/pressure/mpl3115.c550
-rw-r--r--drivers/iio/pressure/mprls0025pa_i2c.c5
-rw-r--r--drivers/iio/pressure/zpa2326.c6
-rw-r--r--drivers/iio/proximity/Kconfig9
-rw-r--r--drivers/iio/proximity/Makefile1
-rw-r--r--drivers/iio/proximity/d3323aa.c815
-rw-r--r--drivers/iio/proximity/hx9023s.c3
-rw-r--r--drivers/iio/proximity/irsd200.c28
-rw-r--r--drivers/iio/proximity/isl29501.c16
-rw-r--r--drivers/iio/proximity/mb1232.c15
-rw-r--r--drivers/iio/proximity/ping.c4
-rw-r--r--drivers/iio/proximity/pulsedlight-lidar-lite-v2.c16
-rw-r--r--drivers/iio/proximity/srf04.c8
-rw-r--r--drivers/iio/proximity/srf08.c18
-rw-r--r--drivers/iio/proximity/sx9500.c30
-rw-r--r--drivers/iio/proximity/vcnl3020.c16
-rw-r--r--drivers/iio/proximity/vl53l0x-i2c.c27
-rw-r--r--drivers/iio/resolver/ad2s1200.c3
-rw-r--r--drivers/iio/resolver/ad2s1210.c30
-rw-r--r--drivers/iio/temperature/Kconfig8
-rw-r--r--drivers/iio/temperature/maxim_thermocouple.c26
-rw-r--r--drivers/iio/temperature/mcp9600.c151
-rw-r--r--drivers/iio/temperature/mlx90614.c6
-rw-r--r--drivers/iio/temperature/mlx90632.c5
-rw-r--r--drivers/iio/temperature/mlx90635.c9
-rw-r--r--drivers/iio/temperature/tmp006.c4
-rw-r--r--drivers/iio/test/Kconfig12
-rw-r--r--drivers/iio/test/Makefile1
-rw-r--r--drivers/iio/test/iio-test-multiply.c212
-rw-r--r--drivers/iio/trigger/stm32-lptimer-trigger.c1
-rw-r--r--drivers/iio/trigger/stm32-timer-trigger.c1
-rw-r--r--drivers/infiniband/Kconfig3
-rw-r--r--drivers/infiniband/core/Makefile1
-rw-r--r--drivers/infiniband/core/addr.c83
-rw-r--r--drivers/infiniband/core/agent.c3
-rw-r--r--drivers/infiniband/core/cache.c4
-rw-r--r--drivers/infiniband/core/cm.c60
-rw-r--r--drivers/infiniband/core/cma.c138
-rw-r--r--drivers/infiniband/core/cma_priv.h4
-rw-r--r--drivers/infiniband/core/counters.c2
-rw-r--r--drivers/infiniband/core/cq.c12
-rw-r--r--drivers/infiniband/core/device.c53
-rw-r--r--drivers/infiniband/core/mad.c468
-rw-r--r--drivers/infiniband/core/mad_priv.h76
-rw-r--r--drivers/infiniband/core/mad_rmpp.c41
-rw-r--r--drivers/infiniband/core/nldev.c24
-rw-r--r--drivers/infiniband/core/rdma_core.c29
-rw-r--r--drivers/infiniband/core/rdma_core.h1
-rw-r--r--drivers/infiniband/core/restrack.c6
-rw-r--r--drivers/infiniband/core/sa_query.c283
-rw-r--r--drivers/infiniband/core/ucma.c122
-rw-r--r--drivers/infiniband/core/umem.c8
-rw-r--r--drivers/infiniband/core/umem_odp.c15
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c13
-rw-r--r--drivers/infiniband/core/uverbs_std_types_cq.c88
-rw-r--r--drivers/infiniband/core/uverbs_std_types_dmah.c145
-rw-r--r--drivers/infiniband/core/uverbs_std_types_mr.c172
-rw-r--r--drivers/infiniband/core/uverbs_std_types_qp.c2
-rw-r--r--drivers/infiniband/core/uverbs_uapi.c1
-rw-r--r--drivers/infiniband/core/verbs.c8
-rw-r--r--drivers/infiniband/hw/Makefile3
-rw-r--r--drivers/infiniband/hw/bng_re/Kconfig10
-rw-r--r--drivers/infiniband/hw/bng_re/Makefile8
-rw-r--r--drivers/infiniband/hw/bng_re/bng_debugfs.c39
-rw-r--r--drivers/infiniband/hw/bng_re/bng_debugfs.h12
-rw-r--r--drivers/infiniband/hw/bng_re/bng_dev.c534
-rw-r--r--drivers/infiniband/hw/bng_re/bng_fw.c767
-rw-r--r--drivers/infiniband/hw/bng_re/bng_fw.h211
-rw-r--r--drivers/infiniband/hw/bng_re/bng_re.h85
-rw-r--r--drivers/infiniband/hw/bng_re/bng_res.c279
-rw-r--r--drivers/infiniband/hw/bng_re/bng_res.h215
-rw-r--r--drivers/infiniband/hw/bng_re/bng_sp.c131
-rw-r--r--drivers/infiniband/hw/bng_re/bng_sp.h47
-rw-r--r--drivers/infiniband/hw/bng_re/bng_tlv.h128
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h21
-rw-r--r--drivers/infiniband/hw/bnxt_re/debugfs.c165
-rw-r--r--drivers/infiniband/hw/bnxt_re/debugfs.h19
-rw-r--r--drivers/infiniband/hw/bnxt_re/hw_counters.c109
-rw-r--r--drivers/infiniband/hw/bnxt_re/hw_counters.h26
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c191
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.h12
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c402
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c74
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.h8
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c10
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.h1
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.c40
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.h21
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c108
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.h9
-rw-r--r--drivers/infiniband/hw/bnxt_re/roce_hsi.h48
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c8
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h1
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c8
-rw-r--r--drivers/infiniband/hw/efa/efa.h5
-rw-r--r--drivers/infiniband/hw/efa/efa_admin_cmds_defs.h17
-rw-r--r--drivers/infiniband/hw/efa/efa_com.c18
-rw-r--r--drivers/infiniband/hw/efa/efa_com_cmd.c53
-rw-r--r--drivers/infiniband/hw/efa/efa_com_cmd.h11
-rw-r--r--drivers/infiniband/hw/efa/efa_main.c1
-rw-r--r--drivers/infiniband/hw/efa/efa_verbs.c93
-rw-r--r--drivers/infiniband/hw/erdma/erdma_cm.c6
-rw-r--r--drivers/infiniband/hw/erdma/erdma_verbs.c121
-rw-r--r--drivers/infiniband/hw/erdma/erdma_verbs.h7
-rw-r--r--drivers/infiniband/hw/hfi1/affinity.c96
-rw-r--r--drivers/infiniband/hw/hfi1/debugfs.c28
-rw-r--r--drivers/infiniband/hw/hfi1/debugfs.h9
-rw-r--r--drivers/infiniband/hw/hfi1/device.c4
-rw-r--r--drivers/infiniband/hw/hfi1/fault.c9
-rw-r--r--drivers/infiniband/hw/hfi1/init.c4
-rw-r--r--drivers/infiniband/hw/hfi1/opfn.c4
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c2
-rw-r--r--drivers/infiniband/hw/hfi1/sysfs.c2
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.c4
-rw-r--r--drivers/infiniband/hw/hns/Makefile4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_ah.c1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_bond.c1012
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_bond.h95
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cq.c58
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h41
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.c18
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c293
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h36
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c221
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c128
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_pd.c1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c11
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_restrack.c9
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_srq.c1
-rw-r--r--drivers/infiniband/hw/ionic/Kconfig15
-rw-r--r--drivers/infiniband/hw/ionic/Makefile9
-rw-r--r--drivers/infiniband/hw/ionic/ionic_admin.c1229
-rw-r--r--drivers/infiniband/hw/ionic/ionic_controlpath.c2679
-rw-r--r--drivers/infiniband/hw/ionic/ionic_datapath.c1399
-rw-r--r--drivers/infiniband/hw/ionic/ionic_fw.h1029
-rw-r--r--drivers/infiniband/hw/ionic/ionic_hw_stats.c484
-rw-r--r--drivers/infiniband/hw/ionic/ionic_ibdev.c440
-rw-r--r--drivers/infiniband/hw/ionic/ionic_ibdev.h517
-rw-r--r--drivers/infiniband/hw/ionic/ionic_lif_cfg.c111
-rw-r--r--drivers/infiniband/hw/ionic/ionic_lif_cfg.h66
-rw-r--r--drivers/infiniband/hw/ionic/ionic_pgtbl.c143
-rw-r--r--drivers/infiniband/hw/ionic/ionic_queue.c52
-rw-r--r--drivers/infiniband/hw/ionic/ionic_queue.h234
-rw-r--r--drivers/infiniband/hw/ionic/ionic_res.h154
-rw-r--r--drivers/infiniband/hw/irdma/Kconfig7
-rw-r--r--drivers/infiniband/hw/irdma/Makefile4
-rw-r--r--drivers/infiniband/hw/irdma/cm.c2
-rw-r--r--drivers/infiniband/hw/irdma/ctrl.c1535
-rw-r--r--drivers/infiniband/hw/irdma/defs.h264
-rw-r--r--drivers/infiniband/hw/irdma/hmc.c18
-rw-r--r--drivers/infiniband/hw/irdma/hmc.h19
-rw-r--r--drivers/infiniband/hw/irdma/hw.c366
-rw-r--r--drivers/infiniband/hw/irdma/i40iw_hw.c2
-rw-r--r--drivers/infiniband/hw/irdma/i40iw_hw.h2
-rw-r--r--drivers/infiniband/hw/irdma/i40iw_if.c3
-rw-r--r--drivers/infiniband/hw/irdma/icrdma_hw.c3
-rw-r--r--drivers/infiniband/hw/irdma/icrdma_hw.h5
-rw-r--r--drivers/infiniband/hw/irdma/icrdma_if.c347
-rw-r--r--drivers/infiniband/hw/irdma/ig3rdma_hw.c170
-rw-r--r--drivers/infiniband/hw/irdma/ig3rdma_hw.h32
-rw-r--r--drivers/infiniband/hw/irdma/ig3rdma_if.c236
-rw-r--r--drivers/infiniband/hw/irdma/irdma.h22
-rw-r--r--drivers/infiniband/hw/irdma/main.c371
-rw-r--r--drivers/infiniband/hw/irdma/main.h38
-rw-r--r--drivers/infiniband/hw/irdma/pble.c28
-rw-r--r--drivers/infiniband/hw/irdma/protos.h1
-rw-r--r--drivers/infiniband/hw/irdma/puda.c20
-rw-r--r--drivers/infiniband/hw/irdma/puda.h4
-rw-r--r--drivers/infiniband/hw/irdma/type.h228
-rw-r--r--drivers/infiniband/hw/irdma/uda_d.h5
-rw-r--r--drivers/infiniband/hw/irdma/uk.c370
-rw-r--r--drivers/infiniband/hw/irdma/user.h271
-rw-r--r--drivers/infiniband/hw/irdma/utils.c162
-rw-r--r--drivers/infiniband/hw/irdma/verbs.c889
-rw-r--r--drivers/infiniband/hw/irdma/verbs.h55
-rw-r--r--drivers/infiniband/hw/irdma/virtchnl.c618
-rw-r--r--drivers/infiniband/hw/irdma/virtchnl.h176
-rw-r--r--drivers/infiniband/hw/mana/counters.c78
-rw-r--r--drivers/infiniband/hw/mana/counters.h18
-rw-r--r--drivers/infiniband/hw/mana/cq.c26
-rw-r--r--drivers/infiniband/hw/mana/device.c123
-rw-r--r--drivers/infiniband/hw/mana/main.c18
-rw-r--r--drivers/infiniband/hw/mana/mana_ib.h44
-rw-r--r--drivers/infiniband/hw/mana/mr.c14
-rw-r--r--drivers/infiniband/hw/mana/qp.c11
-rw-r--r--drivers/infiniband/hw/mlx4/cm.c2
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c8
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h1
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c4
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c3
-rw-r--r--drivers/infiniband/hw/mlx5/Makefile1
-rw-r--r--drivers/infiniband/hw/mlx5/counters.c34
-rw-r--r--drivers/infiniband/hw/mlx5/counters.h13
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c34
-rw-r--r--drivers/infiniband/hw/mlx5/data_direct.c2
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c31
-rw-r--r--drivers/infiniband/hw/mlx5/dm.c2
-rw-r--r--drivers/infiniband/hw/mlx5/dmah.c54
-rw-r--r--drivers/infiniband/hw/mlx5/dmah.h23
-rw-r--r--drivers/infiniband/hw/mlx5/fs.c186
-rw-r--r--drivers/infiniband/hw/mlx5/fs.h8
-rw-r--r--drivers/infiniband/hw/mlx5/gsi.c15
-rw-r--r--drivers/infiniband/hw/mlx5/ib_rep.c77
-rw-r--r--drivers/infiniband/hw/mlx5/main.c157
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h106
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c188
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c133
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c5
-rw-r--r--drivers/infiniband/hw/mlx5/std_types.c27
-rw-r--r--drivers/infiniband/hw/mlx5/umr.c313
-rw-r--r--drivers/infiniband/hw/mlx5/umr.h13
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c6
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c6
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.h3
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c6
-rw-r--r--drivers/infiniband/hw/qedr/verbs.h3
-rw-r--r--drivers/infiniband/hw/qib/Kconfig17
-rw-r--r--drivers/infiniband/hw/qib/Makefile17
-rw-r--r--drivers/infiniband/hw/qib/qib.h1492
-rw-r--r--drivers/infiniband/hw/qib/qib_6120_regs.h977
-rw-r--r--drivers/infiniband/hw/qib/qib_7220.h149
-rw-r--r--drivers/infiniband/hw/qib/qib_7220_regs.h1496
-rw-r--r--drivers/infiniband/hw/qib/qib_7322_regs.h3163
-rw-r--r--drivers/infiniband/hw/qib/qib_common.h798
-rw-r--r--drivers/infiniband/hw/qib/qib_debugfs.c274
-rw-r--r--drivers/infiniband/hw/qib/qib_debugfs.h45
-rw-r--r--drivers/infiniband/hw/qib/qib_diag.c906
-rw-r--r--drivers/infiniband/hw/qib/qib_driver.c798
-rw-r--r--drivers/infiniband/hw/qib/qib_eeprom.c271
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c2401
-rw-r--r--drivers/infiniband/hw/qib/qib_fs.c549
-rw-r--r--drivers/infiniband/hw/qib/qib_iba6120.c3533
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7220.c4596
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c8475
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c1782
-rw-r--r--drivers/infiniband/hw/qib/qib_intr.c241
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.c2450
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.h300
-rw-r--r--drivers/infiniband/hw/qib/qib_pcie.c598
-rw-r--r--drivers/infiniband/hw/qib/qib_pio_copy.c64
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c454
-rw-r--r--drivers/infiniband/hw/qib/qib_qsfp.c549
-rw-r--r--drivers/infiniband/hw/qib/qib_qsfp.h188
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c2131
-rw-r--r--drivers/infiniband/hw/qib/qib_ruc.c314
-rw-r--r--drivers/infiniband/hw/qib/qib_sd7220.c1445
-rw-r--r--drivers/infiniband/hw/qib/qib_sdma.c999
-rw-r--r--drivers/infiniband/hw/qib/qib_sysfs.c731
-rw-r--r--drivers/infiniband/hw/qib/qib_twsi.c502
-rw-r--r--drivers/infiniband/hw/qib/qib_tx.c566
-rw-r--r--drivers/infiniband/hw/qib/qib_uc.c521
-rw-r--r--drivers/infiniband/hw/qib/qib_ud.c583
-rw-r--r--drivers/infiniband/hw/qib/qib_user_pages.c137
-rw-r--r--drivers/infiniband/hw/qib/qib_user_sdma.c1470
-rw-r--r--drivers/infiniband/hw/qib/qib_user_sdma.h52
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c1705
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h398
-rw-r--r--drivers/infiniband/hw/qib/qib_wc_ppc64.c62
-rw-r--r--drivers/infiniband/hw/qib/qib_wc_x86_64.c150
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.c4
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.h1
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.h4
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c5
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h1
-rw-r--r--drivers/infiniband/sw/rdmavt/cq.c3
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.c5
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.h1
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c13
-rw-r--r--drivers/infiniband/sw/rdmavt/vt.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe.c7
-rw-r--r--drivers/infiniband/sw/rxe/rxe_loc.h12
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c1
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c78
-rw-r--r--drivers/infiniband/sw/rxe/rxe_odp.c193
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c51
-rw-r--r--drivers/infiniband/sw/rxe/rxe_srq.c7
-rw-r--r--drivers/infiniband/sw/rxe/rxe_task.c8
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c6
-rw-r--r--drivers/infiniband/sw/siw/siw_cm.c59
-rw-r--r--drivers/infiniband/sw/siw/siw_qp_tx.c27
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.c32
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.h3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c52
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c8
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c2
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv.c2
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c5
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c16
-rw-r--r--drivers/input/Makefile2
-rw-r--r--drivers/input/evdev.c8
-rw-r--r--drivers/input/ff-core.c2
-rw-r--r--drivers/input/ff-memless.c1
-rw-r--r--drivers/input/gameport/gameport.c1
-rw-r--r--drivers/input/input-compat.c30
-rw-r--r--drivers/input/input-compat.h3
-rw-r--r--drivers/input/input-mt.c14
-rw-r--r--drivers/input/input-poller.c1
-rw-r--r--drivers/input/input.c38
-rw-r--r--drivers/input/joystick/fsia6b.c2
-rw-r--r--drivers/input/joystick/iforce/iforce-main.c1
-rw-r--r--drivers/input/joystick/iforce/iforce-packets.c1
-rw-r--r--drivers/input/joystick/psxpad-spi.c6
-rw-r--r--drivers/input/joystick/xpad.c77
-rw-r--r--drivers/input/keyboard/Kconfig51
-rw-r--r--drivers/input/keyboard/Makefile4
-rw-r--r--drivers/input/keyboard/adp5585-keys.c371
-rw-r--r--drivers/input/keyboard/adp5588-keys.c7
-rw-r--r--drivers/input/keyboard/adp5589-keys.c1066
-rw-r--r--drivers/input/keyboard/atkbd.c16
-rw-r--r--drivers/input/keyboard/cros_ec_keyb.c12
-rw-r--r--drivers/input/keyboard/imx_sc_key.c2
-rw-r--r--drivers/input/keyboard/max7360-keypad.c308
-rw-r--r--drivers/input/keyboard/mtk-pmic-keys.c22
-rw-r--r--drivers/input/keyboard/pxa27x_keypad.c530
-rw-r--r--drivers/input/keyboard/samsung-keypad.c137
-rw-r--r--drivers/input/keyboard/spear-keyboard.c71
-rw-r--r--drivers/input/keyboard/tca6416-keypad.c305
-rw-r--r--drivers/input/keyboard/tca8418_keypad.c13
-rw-r--r--drivers/input/keyboard/twl4030_keypad.c35
-rw-r--r--drivers/input/misc/Kconfig49
-rw-r--r--drivers/input/misc/Makefile5
-rw-r--r--drivers/input/misc/ad714x.c1
-rw-r--r--drivers/input/misc/adxl34x.c1
-rw-r--r--drivers/input/misc/arizona-haptics.c14
-rw-r--r--drivers/input/misc/aw86927.c846
-rw-r--r--drivers/input/misc/cma3000_d0x.c1
-rw-r--r--drivers/input/misc/cs40l50-vibra.c3
-rw-r--r--drivers/input/misc/gpio-beeper.c2
-rw-r--r--drivers/input/misc/iqs626a.c2
-rw-r--r--drivers/input/misc/iqs7222.c10
-rw-r--r--drivers/input/misc/max7360-rotary.c192
-rw-r--r--drivers/input/misc/max77693-haptic.c41
-rw-r--r--drivers/input/misc/max8997_haptic.c96
-rw-r--r--drivers/input/misc/mc13783-pwrbutton.c1
-rw-r--r--drivers/input/misc/pcf50633-input.c113
-rw-r--r--drivers/input/misc/pf1550-onkey.c197
-rw-r--r--drivers/input/misc/pm8941-pwrkey.c12
-rw-r--r--drivers/input/misc/tps6594-pwrbutton.c126
-rw-r--r--drivers/input/misc/uinput.c1
-rw-r--r--drivers/input/mouse/alps.c10
-rw-r--r--drivers/input/mouse/lifebook.c4
-rw-r--r--drivers/input/mouse/psmouse-base.c2
-rw-r--r--drivers/input/rmi4/Kconfig15
-rw-r--r--drivers/input/rmi4/Makefile2
-rw-r--r--drivers/input/rmi4/rmi_2d_sensor.c1
-rw-r--r--drivers/input/rmi4/rmi_2d_sensor.h3
-rw-r--r--drivers/input/rmi4/rmi_bus.c7
-rw-r--r--drivers/input/rmi4/rmi_driver.c1
-rw-r--r--drivers/input/rmi4/rmi_driver.h2
-rw-r--r--drivers/input/rmi4/rmi_f1a.c143
-rw-r--r--drivers/input/rmi4/rmi_f21.c179
-rw-r--r--drivers/input/serio/Kconfig4
-rw-r--r--drivers/input/serio/hil_mlc.c1
-rw-r--r--drivers/input/serio/hp_sdc.c1
-rw-r--r--drivers/input/serio/i8042-acpipnpio.h14
-rw-r--r--drivers/input/serio/i8042.c1
-rw-r--r--drivers/input/serio/libps2.c1
-rw-r--r--drivers/input/serio/ps2-gpio.c2
-rw-r--r--drivers/input/serio/serio.c1
-rw-r--r--drivers/input/sparse-keymap.c1
-rw-r--r--drivers/input/tablet/pegasus_notetaker.c9
-rw-r--r--drivers/input/touch-overlay.c278
-rw-r--r--drivers/input/touchscreen.c1
-rw-r--r--drivers/input/touchscreen/Kconfig23
-rw-r--r--drivers/input/touchscreen/Makefile2
-rw-r--r--drivers/input/touchscreen/ad7879.c10
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c13
-rw-r--r--drivers/input/touchscreen/cyttsp_core.c1
-rw-r--r--drivers/input/touchscreen/edt-ft5x06.c26
-rw-r--r--drivers/input/touchscreen/fsl-imx25-tcq.c1
-rw-r--r--drivers/input/touchscreen/goodix.c78
-rw-r--r--drivers/input/touchscreen/goodix.h1
-rw-r--r--drivers/input/touchscreen/goodix_berlin_core.c3
-rw-r--r--drivers/input/touchscreen/himax_hx852x.c503
-rw-r--r--drivers/input/touchscreen/hynitron-cst816x.c253
-rw-r--r--drivers/input/touchscreen/imx6ul_tsc.c121
-rw-r--r--drivers/input/touchscreen/mc13783_ts.c4
-rw-r--r--drivers/input/touchscreen/melfas_mip4.c2
-rw-r--r--drivers/input/touchscreen/st1232.c35
-rw-r--r--drivers/input/touchscreen/tsc2007_core.c39
-rw-r--r--drivers/input/touchscreen/tsc200x-core.c1
-rw-r--r--drivers/input/touchscreen/wm9705.c1
-rw-r--r--drivers/input/touchscreen/wm9712.c1
-rw-r--r--drivers/input/touchscreen/wm9713.c1
-rw-r--r--drivers/input/touchscreen/wm97xx-core.c1
-rw-r--r--drivers/interconnect/core.c36
-rw-r--r--drivers/interconnect/debugfs-client.c7
-rw-r--r--drivers/interconnect/icc-clk.c2
-rw-r--r--drivers/interconnect/qcom/Kconfig27
-rw-r--r--drivers/interconnect/qcom/Makefile6
-rw-r--r--drivers/interconnect/qcom/glymur.c2522
-rw-r--r--drivers/interconnect/qcom/icc-rpmh.c46
-rw-r--r--drivers/interconnect/qcom/icc-rpmh.h9
-rw-r--r--drivers/interconnect/qcom/kaanapali.c1855
-rw-r--r--drivers/interconnect/qcom/milos.c1919
-rw-r--r--drivers/interconnect/qcom/msm8996.c1
-rw-r--r--drivers/interconnect/qcom/osm-l3.c7
-rw-r--r--drivers/interconnect/qcom/qcs615.c553
-rw-r--r--drivers/interconnect/qcom/qcs615.h128
-rw-r--r--drivers/interconnect/qcom/qcs8300.c671
-rw-r--r--drivers/interconnect/qcom/qcs8300.h177
-rw-r--r--drivers/interconnect/qcom/qdu1000.c348
-rw-r--r--drivers/interconnect/qcom/qdu1000.h95
-rw-r--r--drivers/interconnect/qcom/sa8775p.c639
-rw-r--r--drivers/interconnect/qcom/sar2130p.c630
-rw-r--r--drivers/interconnect/qcom/sc7180.c678
-rw-r--r--drivers/interconnect/qcom/sc7180.h149
-rw-r--r--drivers/interconnect/qcom/sc7280.c618
-rw-r--r--drivers/interconnect/qcom/sc7280.h154
-rw-r--r--drivers/interconnect/qcom/sc8180x.c654
-rw-r--r--drivers/interconnect/qcom/sc8180x.h179
-rw-r--r--drivers/interconnect/qcom/sc8280xp.c826
-rw-r--r--drivers/interconnect/qcom/sc8280xp.h209
-rw-r--r--drivers/interconnect/qcom/sdm670.c522
-rw-r--r--drivers/interconnect/qcom/sdm670.h128
-rw-r--r--drivers/interconnect/qcom/sdm845.c766
-rw-r--r--drivers/interconnect/qcom/sdm845.h140
-rw-r--r--drivers/interconnect/qcom/sdx55.c489
-rw-r--r--drivers/interconnect/qcom/sdx55.h70
-rw-r--r--drivers/interconnect/qcom/sdx65.c457
-rw-r--r--drivers/interconnect/qcom/sdx65.h65
-rw-r--r--drivers/interconnect/qcom/sdx75.c395
-rw-r--r--drivers/interconnect/qcom/sdx75.h97
-rw-r--r--drivers/interconnect/qcom/sm6350.c927
-rw-r--r--drivers/interconnect/qcom/sm6350.h139
-rw-r--r--drivers/interconnect/qcom/sm7150.c653
-rw-r--r--drivers/interconnect/qcom/sm7150.h140
-rw-r--r--drivers/interconnect/qcom/sm8150.c706
-rw-r--r--drivers/interconnect/qcom/sm8150.h152
-rw-r--r--drivers/interconnect/qcom/sm8250.c736
-rw-r--r--drivers/interconnect/qcom/sm8250.h168
-rw-r--r--drivers/interconnect/qcom/sm8350.c684
-rw-r--r--drivers/interconnect/qcom/sm8350.h158
-rw-r--r--drivers/interconnect/qcom/sm8450.c601
-rw-r--r--drivers/interconnect/qcom/sm8450.h169
-rw-r--r--drivers/interconnect/qcom/sm8550.c501
-rw-r--r--drivers/interconnect/qcom/sm8550.h138
-rw-r--r--drivers/interconnect/qcom/sm8650.c527
-rw-r--r--drivers/interconnect/qcom/sm8650.h144
-rw-r--r--drivers/interconnect/qcom/sm8750.c602
-rw-r--r--drivers/interconnect/qcom/x1e80100.c610
-rw-r--r--drivers/interconnect/qcom/x1e80100.h192
-rw-r--r--drivers/interconnect/samsung/exynos.c5
-rw-r--r--drivers/iommu/Kconfig17
-rw-r--r--drivers/iommu/Makefile2
-rw-r--r--drivers/iommu/amd/Kconfig6
-rw-r--r--drivers/iommu/amd/Makefile2
-rw-r--r--drivers/iommu/amd/amd_iommu.h7
-rw-r--r--drivers/iommu/amd/amd_iommu_types.h136
-rw-r--r--drivers/iommu/amd/debugfs.c378
-rw-r--r--drivers/iommu/amd/init.c389
-rw-r--r--drivers/iommu/amd/io_pgtable.c560
-rw-r--r--drivers/iommu/amd/io_pgtable_v2.c370
-rw-r--r--drivers/iommu/amd/iommu.c727
-rw-r--r--drivers/iommu/apple-dart.c67
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c88
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c3
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c72
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h35
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c495
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c32
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.c21
-rw-r--r--drivers/iommu/arm/arm-smmu/qcom_iommu.c27
-rw-r--r--drivers/iommu/dma-iommu.c70
-rw-r--r--drivers/iommu/exynos-iommu.c25
-rw-r--r--drivers/iommu/fsl_pamu_domain.c12
-rw-r--r--drivers/iommu/generic_pt/.kunitconfig14
-rw-r--r--drivers/iommu/generic_pt/Kconfig79
-rw-r--r--drivers/iommu/generic_pt/fmt/Makefile28
-rw-r--r--drivers/iommu/generic_pt/fmt/amdv1.h411
-rw-r--r--drivers/iommu/generic_pt/fmt/defs_amdv1.h21
-rw-r--r--drivers/iommu/generic_pt/fmt/defs_vtdss.h21
-rw-r--r--drivers/iommu/generic_pt/fmt/defs_x86_64.h21
-rw-r--r--drivers/iommu/generic_pt/fmt/iommu_amdv1.c15
-rw-r--r--drivers/iommu/generic_pt/fmt/iommu_mock.c10
-rw-r--r--drivers/iommu/generic_pt/fmt/iommu_template.h48
-rw-r--r--drivers/iommu/generic_pt/fmt/iommu_vtdss.c10
-rw-r--r--drivers/iommu/generic_pt/fmt/iommu_x86_64.c11
-rw-r--r--drivers/iommu/generic_pt/fmt/vtdss.h285
-rw-r--r--drivers/iommu/generic_pt/fmt/x86_64.h279
-rw-r--r--drivers/iommu/generic_pt/iommu_pt.h1289
-rw-r--r--drivers/iommu/generic_pt/kunit_generic_pt.h823
-rw-r--r--drivers/iommu/generic_pt/kunit_iommu.h184
-rw-r--r--drivers/iommu/generic_pt/kunit_iommu_pt.h487
-rw-r--r--drivers/iommu/generic_pt/pt_common.h389
-rw-r--r--drivers/iommu/generic_pt/pt_defs.h332
-rw-r--r--drivers/iommu/generic_pt/pt_fmt_defaults.h295
-rw-r--r--drivers/iommu/generic_pt/pt_iter.h636
-rw-r--r--drivers/iommu/generic_pt/pt_log2.h122
-rw-r--r--drivers/iommu/hyperv-iommu.c33
-rw-r--r--drivers/iommu/intel/Kconfig6
-rw-r--r--drivers/iommu/intel/cache.c60
-rw-r--r--drivers/iommu/intel/debugfs.c29
-rw-r--r--drivers/iommu/intel/dmar.c3
-rw-r--r--drivers/iommu/intel/iommu.c1236
-rw-r--r--drivers/iommu/intel/iommu.h128
-rw-r--r--drivers/iommu/intel/irq_remapping.c38
-rw-r--r--drivers/iommu/intel/nested.c11
-rw-r--r--drivers/iommu/intel/pasid.c61
-rw-r--r--drivers/iommu/intel/pasid.h12
-rw-r--r--drivers/iommu/intel/perf.c10
-rw-r--r--drivers/iommu/intel/perf.h5
-rw-r--r--drivers/iommu/intel/prq.c7
-rw-r--r--drivers/iommu/intel/svm.c4
-rw-r--r--drivers/iommu/intel/trace.h5
-rw-r--r--drivers/iommu/io-pgtable-arm-selftests.c214
-rw-r--r--drivers/iommu/io-pgtable-arm.c210
-rw-r--r--drivers/iommu/io-pgtable-dart.c139
-rw-r--r--drivers/iommu/io-pgtable.c4
-rw-r--r--drivers/iommu/iommu-pages.c136
-rw-r--r--drivers/iommu/iommu-pages.h51
-rw-r--r--drivers/iommu/iommu-priv.h2
-rw-r--r--drivers/iommu/iommu-sva.c29
-rw-r--r--drivers/iommu/iommu.c77
-rw-r--r--drivers/iommu/iommufd/Kconfig1
-rw-r--r--drivers/iommu/iommufd/device.c146
-rw-r--r--drivers/iommu/iommufd/driver.c115
-rw-r--r--drivers/iommu/iommufd/eventq.c23
-rw-r--r--drivers/iommu/iommufd/hw_pagetable.c10
-rw-r--r--drivers/iommu/iommufd/io_pagetable.c147
-rw-r--r--drivers/iommu/iommufd/io_pagetable.h59
-rw-r--r--drivers/iommu/iommufd/ioas.c12
-rw-r--r--drivers/iommu/iommufd/iommufd_private.h156
-rw-r--r--drivers/iommu/iommufd/iommufd_test.h41
-rw-r--r--drivers/iommu/iommufd/iova_bitmap.c6
-rw-r--r--drivers/iommu/iommufd/main.c263
-rw-r--r--drivers/iommu/iommufd/pages.c435
-rw-r--r--drivers/iommu/iommufd/selftest.c779
-rw-r--r--drivers/iommu/iommufd/viommu.c309
-rw-r--r--drivers/iommu/ipmmu-vmsa.c16
-rw-r--r--drivers/iommu/msm_iommu.c18
-rw-r--r--drivers/iommu/mtk_iommu.c183
-rw-r--r--drivers/iommu/mtk_iommu_v1.c46
-rw-r--r--drivers/iommu/omap-iommu.c48
-rw-r--r--drivers/iommu/omap-iommu.h2
-rw-r--r--drivers/iommu/riscv/iommu-platform.c17
-rw-r--r--drivers/iommu/riscv/iommu.c22
-rw-r--r--drivers/iommu/rockchip-iommu.c26
-rw-r--r--drivers/iommu/s390-iommu.c44
-rw-r--r--drivers/iommu/sprd-iommu.c6
-rw-r--r--drivers/iommu/sun50i-iommu.c13
-rw-r--r--drivers/iommu/tegra-smmu.c22
-rw-r--r--drivers/iommu/virtio-iommu.c25
-rw-r--r--drivers/irqchip/Kconfig49
-rw-r--r--drivers/irqchip/Makefile9
-rw-r--r--drivers/irqchip/exynos-combiner.c14
-rw-r--r--drivers/irqchip/irq-aclint-sswi.c (renamed from drivers/irqchip/irq-thead-c900-aclint-sswi.c)115
-rw-r--r--drivers/irqchip/irq-alpine-msi.c155
-rw-r--r--drivers/irqchip/irq-apple-aic.c69
-rw-r--r--drivers/irqchip/irq-armada-370-xp.c60
-rw-r--r--drivers/irqchip/irq-aspeed-scu-ic.c256
-rw-r--r--drivers/irqchip/irq-ath79-misc.c20
-rw-r--r--drivers/irqchip/irq-atmel-aic-common.c15
-rw-r--r--drivers/irqchip/irq-atmel-aic.c2
-rw-r--r--drivers/irqchip/irq-atmel-aic5.c2
-rw-r--r--drivers/irqchip/irq-bcm2712-mip.c31
-rw-r--r--drivers/irqchip/irq-bcm7038-l1.c29
-rw-r--r--drivers/irqchip/irq-bcm7120-l2.c31
-rw-r--r--drivers/irqchip/irq-brcmstb-l2.c25
-rw-r--r--drivers/irqchip/irq-gic-common.h2
-rw-r--r--drivers/irqchip/irq-gic-its-msi-parent.c (renamed from drivers/irqchip/irq-gic-v3-its-msi-parent.c)171
-rw-r--r--drivers/irqchip/irq-gic-its-msi-parent.h12
-rw-r--r--drivers/irqchip/irq-gic-v2m.c13
-rw-r--r--drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c2
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c13
-rw-r--r--drivers/irqchip/irq-gic-v3.c231
-rw-r--r--drivers/irqchip/irq-gic-v4.c4
-rw-r--r--drivers/irqchip/irq-gic-v5-irs.c827
-rw-r--r--drivers/irqchip/irq-gic-v5-its.c1233
-rw-r--r--drivers/irqchip/irq-gic-v5-iwb.c277
-rw-r--r--drivers/irqchip/irq-gic-v5.c1130
-rw-r--r--drivers/irqchip/irq-gic.c5
-rw-r--r--drivers/irqchip/irq-i8259.c12
-rw-r--r--drivers/irqchip/irq-imgpdc.c4
-rw-r--r--drivers/irqchip/irq-imx-gpcv2.c16
-rw-r--r--drivers/irqchip/irq-imx-irqsteer.c4
-rw-r--r--drivers/irqchip/irq-imx-mu-msi.c42
-rw-r--r--drivers/irqchip/irq-keystone.c4
-rw-r--r--drivers/irqchip/irq-loongson-eiointc.c117
-rw-r--r--drivers/irqchip/irq-loongson-htpic.c10
-rw-r--r--drivers/irqchip/irq-loongson-htvec.c12
-rw-r--r--drivers/irqchip/irq-loongson-pch-lpc.c21
-rw-r--r--drivers/irqchip/irq-loongson-pch-msi.c25
-rw-r--r--drivers/irqchip/irq-loongson-pch-pic.c12
-rw-r--r--drivers/irqchip/irq-ls-scfg-msi.c50
-rw-r--r--drivers/irqchip/irq-mchp-eic.c17
-rw-r--r--drivers/irqchip/irq-meson-gpio.c17
-rw-r--r--drivers/irqchip/irq-mips-gic.c8
-rw-r--r--drivers/irqchip/irq-msi-lib.c19
-rw-r--r--drivers/irqchip/irq-mst-intc.c12
-rw-r--r--drivers/irqchip/irq-mtk-cirq.c12
-rw-r--r--drivers/irqchip/irq-mvebu-gicp.c10
-rw-r--r--drivers/irqchip/irq-mvebu-pic.c4
-rw-r--r--drivers/irqchip/irq-nvic.c3
-rw-r--r--drivers/irqchip/irq-partition-percpu.c241
-rw-r--r--drivers/irqchip/irq-pruss-intc.c2
-rw-r--r--drivers/irqchip/irq-qcom-mpm.c6
-rw-r--r--drivers/irqchip/irq-renesas-intc-irqpin.c12
-rw-r--r--drivers/irqchip/irq-renesas-irqc.c8
-rw-r--r--drivers/irqchip/irq-renesas-rza1.c8
-rw-r--r--drivers/irqchip/irq-renesas-rzg2l.c56
-rw-r--r--drivers/irqchip/irq-renesas-rzv2h.c41
-rw-r--r--drivers/irqchip/irq-riscv-aplic-direct.c16
-rw-r--r--drivers/irqchip/irq-riscv-imsic-early.c33
-rw-r--r--drivers/irqchip/irq-riscv-imsic-platform.c16
-rw-r--r--drivers/irqchip/irq-riscv-imsic-state.c27
-rw-r--r--drivers/irqchip/irq-riscv-imsic-state.h5
-rw-r--r--drivers/irqchip/irq-riscv-intc.c3
-rw-r--r--drivers/irqchip/irq-riscv-rpmi-sysmsi.c328
-rw-r--r--drivers/irqchip/irq-sa11x0.c12
-rw-r--r--drivers/irqchip/irq-sg2042-msi.c46
-rw-r--r--drivers/irqchip/irq-sifive-plic.c173
-rw-r--r--drivers/irqchip/irq-starfive-jh8100-intc.c6
-rw-r--r--drivers/irqchip/irq-stm32mp-exti.c4
-rw-r--r--drivers/irqchip/irq-sun6i-r.c18
-rw-r--r--drivers/irqchip/irq-tegra.c12
-rw-r--r--drivers/irqchip/irq-ti-sci-inta.c3
-rw-r--r--drivers/irqchip/irq-ti-sci-intr.c3
-rw-r--r--drivers/irqchip/irq-ts4800.c3
-rw-r--r--drivers/irqchip/irq-vic.c12
-rw-r--r--drivers/irqchip/irqchip.c10
-rw-r--r--drivers/irqchip/qcom-irq-combiner.c6
-rw-r--r--drivers/irqchip/qcom-pdc.c5
-rw-r--r--drivers/isdn/capi/capi.c8
-rw-r--r--drivers/isdn/capi/kcapi.c2
-rw-r--r--drivers/isdn/hardware/mISDN/hfcpci.c12
-rw-r--r--drivers/isdn/hardware/mISDN/hfcsusb.c18
-rw-r--r--drivers/isdn/mISDN/dsp_hwec.c6
-rw-r--r--drivers/isdn/mISDN/l1oip_core.c2
-rw-r--r--drivers/isdn/mISDN/socket.c4
-rw-r--r--drivers/leds/Kconfig10
-rw-r--r--drivers/leds/blink/leds-lgm-sso.c2
-rw-r--r--drivers/leds/flash/Kconfig1
-rw-r--r--drivers/leds/flash/leds-qcom-flash.c102
-rw-r--r--drivers/leds/flash/leds-rt4505.c2
-rw-r--r--drivers/leds/flash/leds-rt8515.c2
-rw-r--r--drivers/leds/flash/leds-sgm3140.c3
-rw-r--r--drivers/leds/flash/leds-tps6131x.c2
-rw-r--r--drivers/leds/led-class.c26
-rw-r--r--drivers/leds/leds-cros_ec.c5
-rw-r--r--drivers/leds/leds-is31fl319x.c8
-rw-r--r--drivers/leds/leds-is31fl32xx.c47
-rw-r--r--drivers/leds/leds-lp50xx.c78
-rw-r--r--drivers/leds/leds-lp55xx-common.c2
-rw-r--r--drivers/leds/leds-lp8860.c4
-rw-r--r--drivers/leds/leds-max5970.c2
-rw-r--r--drivers/leds/leds-max77705.c4
-rw-r--r--drivers/leds/leds-netxbig.c36
-rw-r--r--drivers/leds/leds-pca9532.c2
-rw-r--r--drivers/leds/leds-pca955x.c6
-rw-r--r--drivers/leds/leds-pwm.c27
-rw-r--r--drivers/leds/leds-qnap-mcu.c175
-rw-r--r--drivers/leds/leds-tca6507.c2
-rw-r--r--drivers/leds/leds-upboard.c2
-rw-r--r--drivers/leds/rgb/leds-ktd202x.c4
-rw-r--r--drivers/leds/rgb/leds-ncp5623.c2
-rw-r--r--drivers/leds/rgb/leds-qcom-lpg.c10
-rw-r--r--drivers/leds/trigger/ledtrig-cpu.c14
-rw-r--r--drivers/leds/trigger/ledtrig-input-events.c2
-rw-r--r--drivers/leds/trigger/ledtrig-netdev.c16
-rw-r--r--drivers/macintosh/mac_hid.c3
-rw-r--r--drivers/macintosh/via-pmu-backlight.c2
-rw-r--r--drivers/macintosh/via-pmu.c12
-rw-r--r--drivers/mailbox/Kconfig50
-rw-r--r--drivers/mailbox/Makefile10
-rw-r--r--drivers/mailbox/arm_mhuv3.c2
-rw-r--r--drivers/mailbox/ast2700-mailbox.c235
-rw-r--r--drivers/mailbox/bcm74110-mailbox.c656
-rw-r--r--drivers/mailbox/cix-mailbox.c645
-rw-r--r--drivers/mailbox/mailbox-test.c2
-rw-r--r--drivers/mailbox/mailbox-th1520.c4
-rw-r--r--drivers/mailbox/mailbox.c65
-rw-r--r--drivers/mailbox/mtk-cmdq-mailbox.c63
-rw-r--r--drivers/mailbox/mtk-gpueb-mailbox.c319
-rw-r--r--drivers/mailbox/omap-mailbox.c35
-rw-r--r--drivers/mailbox/pcc.c110
-rw-r--r--drivers/mailbox/qcom-apcs-ipc-mailbox.c1
-rw-r--r--drivers/mailbox/qcom-ipcc.c3
-rw-r--r--drivers/mailbox/riscv-sbi-mpxy-mbox.c1019
-rw-r--r--drivers/mailbox/zynqmp-ipi-mailbox.c24
-rw-r--r--drivers/mcb/mcb-core.c8
-rw-r--r--drivers/md/Kconfig31
-rw-r--r--drivers/md/Makefile5
-rw-r--r--drivers/md/bcache/Kconfig1
-rw-r--r--drivers/md/bcache/alloc.c82
-rw-r--r--drivers/md/bcache/bcache.h8
-rw-r--r--drivers/md/bcache/bset.c116
-rw-r--r--drivers/md/bcache/bset.h44
-rw-r--r--drivers/md/bcache/btree.c122
-rw-r--r--drivers/md/bcache/debug.c3
-rw-r--r--drivers/md/bcache/extents.c45
-rw-r--r--drivers/md/bcache/io.c3
-rw-r--r--drivers/md/bcache/journal.c93
-rw-r--r--drivers/md/bcache/journal.h13
-rw-r--r--drivers/md/bcache/movinggc.c41
-rw-r--r--drivers/md/bcache/super.c60
-rw-r--r--drivers/md/bcache/sysfs.c19
-rw-r--r--drivers/md/bcache/util.h67
-rw-r--r--drivers/md/bcache/writeback.c26
-rw-r--r--drivers/md/dm-bufio.c18
-rw-r--r--drivers/md/dm-cache-policy-smq.c2
-rw-r--r--drivers/md/dm-core.h2
-rw-r--r--drivers/md/dm-crypt.c64
-rw-r--r--drivers/md/dm-flakey.c11
-rw-r--r--drivers/md/dm-ima.c110
-rw-r--r--drivers/md/dm-integrity.c373
-rw-r--r--drivers/md/dm-linear.c2
-rw-r--r--drivers/md/dm-log-writes.c4
-rw-r--r--drivers/md/dm-path-selector.c8
-rw-r--r--drivers/md/dm-path-selector.h2
-rw-r--r--drivers/md/dm-pcache/Kconfig17
-rw-r--r--drivers/md/dm-pcache/Makefile3
-rw-r--r--drivers/md/dm-pcache/backing_dev.c374
-rw-r--r--drivers/md/dm-pcache/backing_dev.h127
-rw-r--r--drivers/md/dm-pcache/cache.c445
-rw-r--r--drivers/md/dm-pcache/cache.h635
-rw-r--r--drivers/md/dm-pcache/cache_dev.c303
-rw-r--r--drivers/md/dm-pcache/cache_dev.h70
-rw-r--r--drivers/md/dm-pcache/cache_gc.c170
-rw-r--r--drivers/md/dm-pcache/cache_key.c888
-rw-r--r--drivers/md/dm-pcache/cache_req.c836
-rw-r--r--drivers/md/dm-pcache/cache_segment.c305
-rw-r--r--drivers/md/dm-pcache/cache_writeback.c261
-rw-r--r--drivers/md/dm-pcache/dm_pcache.c497
-rw-r--r--drivers/md/dm-pcache/dm_pcache.h67
-rw-r--r--drivers/md/dm-pcache/pcache_internal.h117
-rw-r--r--drivers/md/dm-pcache/segment.c61
-rw-r--r--drivers/md/dm-pcache/segment.h74
-rw-r--r--drivers/md/dm-ps-historical-service-time.c9
-rw-r--r--drivers/md/dm-ps-io-affinity.c5
-rw-r--r--drivers/md/dm-ps-queue-length.c9
-rw-r--r--drivers/md/dm-ps-round-robin.c9
-rw-r--r--drivers/md/dm-ps-service-time.c9
-rw-r--r--drivers/md/dm-raid.c88
-rw-r--r--drivers/md/dm-region-hash.c2
-rw-r--r--drivers/md/dm-stripe.c13
-rw-r--r--drivers/md/dm-switch.c4
-rw-r--r--drivers/md/dm-table.c14
-rw-r--r--drivers/md/dm-target.c5
-rw-r--r--drivers/md/dm-thin.c11
-rw-r--r--drivers/md/dm-vdo/data-vio.c17
-rw-r--r--drivers/md/dm-vdo/funnel-workqueue.c3
-rw-r--r--drivers/md/dm-vdo/indexer/volume-index.c4
-rw-r--r--drivers/md/dm-vdo/logger.c2
-rw-r--r--drivers/md/dm-vdo/vio.c2
-rw-r--r--drivers/md/dm-verity-fec.c10
-rw-r--r--drivers/md/dm-verity-target.c185
-rw-r--r--drivers/md/dm-verity.h22
-rw-r--r--drivers/md/dm-writecache.c11
-rw-r--r--drivers/md/dm-zone.c65
-rw-r--r--drivers/md/dm-zoned-target.c2
-rw-r--r--drivers/md/dm.c116
-rw-r--r--drivers/md/dm.h3
-rw-r--r--drivers/md/md-bitmap.c100
-rw-r--r--drivers/md/md-bitmap.h107
-rw-r--r--drivers/md/md-cluster.c22
-rw-r--r--drivers/md/md-linear.c17
-rw-r--r--drivers/md/md-llbitmap.c1626
-rw-r--r--drivers/md/md.c866
-rw-r--r--drivers/md/md.h62
-rw-r--r--drivers/md/raid0.c58
-rw-r--r--drivers/md/raid1-10.c4
-rw-r--r--drivers/md/raid1.c219
-rw-r--r--drivers/md/raid1.h26
-rw-r--r--drivers/md/raid10.c141
-rw-r--r--drivers/md/raid10.h2
-rw-r--r--drivers/md/raid5-cache.c2
-rw-r--r--drivers/md/raid5-ppl.c6
-rw-r--r--drivers/md/raid5.c112
-rw-r--r--drivers/media/cec/core/cec-core.c3
-rw-r--r--drivers/media/cec/core/cec-pin-error-inj.c59
-rw-r--r--drivers/media/cec/core/cec-pin-priv.h8
-rw-r--r--drivers/media/cec/core/cec-pin.c31
-rw-r--r--drivers/media/cec/platform/cec-gpio/cec-gpio.c60
-rw-r--r--drivers/media/cec/platform/stm32/stm32-cec.c1
-rw-r--r--drivers/media/cec/usb/extron-da-hd-4k-plus/Makefile6
-rw-r--r--drivers/media/cec/usb/extron-da-hd-4k-plus/extron-da-hd-4k-plus.c6
-rw-r--r--drivers/media/cec/usb/pulse8/pulse8-cec.c4
-rw-r--r--drivers/media/cec/usb/rainshadow/rainshadow-cec.c7
-rw-r--r--drivers/media/common/b2c2/flexcop-i2c.c2
-rw-r--r--drivers/media/common/b2c2/flexcop-sram.c2
-rw-r--r--drivers/media/common/b2c2/flexcop.c22
-rw-r--r--drivers/media/common/cx2341x.c2
-rw-r--r--drivers/media/common/saa7146/saa7146_fops.c4
-rw-r--r--drivers/media/common/siano/smsir.c2
-rw-r--r--drivers/media/common/videobuf2/videobuf2-dma-contig.c1
-rw-r--r--drivers/media/common/videobuf2/videobuf2-v4l2.c17
-rw-r--r--drivers/media/dvb-core/dmxdev.c4
-rw-r--r--drivers/media/dvb-core/dvb_ca_en50221.c2
-rw-r--r--drivers/media/dvb-core/dvb_demux.c28
-rw-r--r--drivers/media/dvb-core/dvb_ringbuffer.c36
-rw-r--r--drivers/media/dvb-core/dvbdev.c4
-rw-r--r--drivers/media/dvb-frontends/Kconfig4
-rw-r--r--drivers/media/dvb-frontends/cxd2820r_core.c4
-rw-r--r--drivers/media/dvb-frontends/cxd2841er.c3
-rw-r--r--drivers/media/dvb-frontends/dib7000p.c8
-rw-r--r--drivers/media/dvb-frontends/drx39xyj/drxj.c2
-rw-r--r--drivers/media/dvb-frontends/drxk_hard.c3
-rw-r--r--drivers/media/dvb-frontends/lgdt330x.c4
-rw-r--r--drivers/media/dvb-frontends/mn88443x.c7
-rw-r--r--drivers/media/i2c/Kconfig101
-rw-r--r--drivers/media/i2c/Makefile6
-rw-r--r--drivers/media/i2c/adv7180.c348
-rw-r--r--drivers/media/i2c/adv748x/adv748x-afe.c17
-rw-r--r--drivers/media/i2c/adv748x/adv748x-hdmi.c10
-rw-r--r--drivers/media/i2c/adv7604.c10
-rw-r--r--drivers/media/i2c/adv7842.c17
-rw-r--r--drivers/media/i2c/alvium-csi2.c1
-rw-r--r--drivers/media/i2c/ar0521.c13
-rw-r--r--drivers/media/i2c/ccs/ccs-core.c15
-rw-r--r--drivers/media/i2c/cx25840/cx25840-core.c4
-rw-r--r--drivers/media/i2c/ds90ub913.c31
-rw-r--r--drivers/media/i2c/ds90ub953.c31
-rw-r--r--drivers/media/i2c/ds90ub960.c8
-rw-r--r--drivers/media/i2c/dw9714.c62
-rw-r--r--drivers/media/i2c/dw9719.c128
-rw-r--r--drivers/media/i2c/dw9768.c1
-rw-r--r--drivers/media/i2c/et8ek8/et8ek8_driver.c34
-rw-r--r--drivers/media/i2c/et8ek8/et8ek8_mode.c9
-rw-r--r--drivers/media/i2c/et8ek8/et8ek8_reg.h1
-rw-r--r--drivers/media/i2c/gc0308.c3
-rw-r--r--drivers/media/i2c/gc0310.c783
-rw-r--r--drivers/media/i2c/gc05a2.c8
-rw-r--r--drivers/media/i2c/gc08a3.c8
-rw-r--r--drivers/media/i2c/gc2145.c5
-rw-r--r--drivers/media/i2c/hi556.c161
-rw-r--r--drivers/media/i2c/hi846.c11
-rw-r--r--drivers/media/i2c/hi847.c84
-rw-r--r--drivers/media/i2c/imx111.c1610
-rw-r--r--drivers/media/i2c/imx208.c91
-rw-r--r--drivers/media/i2c/imx214.c289
-rw-r--r--drivers/media/i2c/imx219.c107
-rw-r--r--drivers/media/i2c/imx258.c105
-rw-r--r--drivers/media/i2c/imx274.c5
-rw-r--r--drivers/media/i2c/imx283.c8
-rw-r--r--drivers/media/i2c/imx290.c31
-rw-r--r--drivers/media/i2c/imx296.c5
-rw-r--r--drivers/media/i2c/imx319.c92
-rw-r--r--drivers/media/i2c/imx334.c15
-rw-r--r--drivers/media/i2c/imx335.c522
-rw-r--r--drivers/media/i2c/imx355.c90
-rw-r--r--drivers/media/i2c/imx412.c13
-rw-r--r--drivers/media/i2c/imx415.c3
-rw-r--r--drivers/media/i2c/ir-kbd-i2c.c6
-rw-r--r--drivers/media/i2c/lt6911uxe.c2
-rw-r--r--drivers/media/i2c/max9286.c10
-rw-r--r--drivers/media/i2c/max96714.c7
-rw-r--r--drivers/media/i2c/max96717.c27
-rw-r--r--drivers/media/i2c/msp3400-kthreads.c2
-rw-r--r--drivers/media/i2c/mt9m001.c5
-rw-r--r--drivers/media/i2c/mt9m111.c9
-rw-r--r--drivers/media/i2c/mt9m114.c252
-rw-r--r--drivers/media/i2c/mt9p031.c9
-rw-r--r--drivers/media/i2c/mt9t112.c11
-rw-r--r--drivers/media/i2c/mt9v032.c105
-rw-r--r--drivers/media/i2c/mt9v111.c21
-rw-r--r--drivers/media/i2c/og01a1b.c115
-rw-r--r--drivers/media/i2c/og0ve1b.c816
-rw-r--r--drivers/media/i2c/ov02a10.c45
-rw-r--r--drivers/media/i2c/ov02c10.c135
-rw-r--r--drivers/media/i2c/ov02e10.c107
-rw-r--r--drivers/media/i2c/ov08d10.c82
-rw-r--r--drivers/media/i2c/ov08x40.c95
-rw-r--r--drivers/media/i2c/ov13858.c69
-rw-r--r--drivers/media/i2c/ov13b10.c111
-rw-r--r--drivers/media/i2c/ov2659.c8
-rw-r--r--drivers/media/i2c/ov2680.c29
-rw-r--r--drivers/media/i2c/ov2685.c16
-rw-r--r--drivers/media/i2c/ov2735.c1109
-rw-r--r--drivers/media/i2c/ov2740.c109
-rw-r--r--drivers/media/i2c/ov4689.c15
-rw-r--r--drivers/media/i2c/ov5640.c13
-rw-r--r--drivers/media/i2c/ov5645.c16
-rw-r--r--drivers/media/i2c/ov5647.c9
-rw-r--r--drivers/media/i2c/ov5648.c10
-rw-r--r--drivers/media/i2c/ov5670.c112
-rw-r--r--drivers/media/i2c/ov5675.c93
-rw-r--r--drivers/media/i2c/ov5693.c27
-rw-r--r--drivers/media/i2c/ov5695.c16
-rw-r--r--drivers/media/i2c/ov6211.c793
-rw-r--r--drivers/media/i2c/ov64a40.c9
-rw-r--r--drivers/media/i2c/ov6650.c1149
-rw-r--r--drivers/media/i2c/ov7251.c33
-rw-r--r--drivers/media/i2c/ov7740.c11
-rw-r--r--drivers/media/i2c/ov8856.c95
-rw-r--r--drivers/media/i2c/ov8858.c4
-rw-r--r--drivers/media/i2c/ov8865.c53
-rw-r--r--drivers/media/i2c/ov9282.c13
-rw-r--r--drivers/media/i2c/ov9640.c5
-rw-r--r--drivers/media/i2c/ov9650.c5
-rw-r--r--drivers/media/i2c/ov9734.c82
-rw-r--r--drivers/media/i2c/rj54n1cb0c.c17
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3-core.c19
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3.h2
-rw-r--r--drivers/media/i2c/s5k5baf.c21
-rw-r--r--drivers/media/i2c/s5k6a3.c20
-rw-r--r--drivers/media/i2c/saa6752hs.c2
-rw-r--r--drivers/media/i2c/saa7115.c14
-rw-r--r--drivers/media/i2c/saa7127.c2
-rw-r--r--drivers/media/i2c/saa717x.c2
-rw-r--r--drivers/media/i2c/st-mipid02.c6
-rw-r--r--drivers/media/i2c/tc358743.c251
-rw-r--r--drivers/media/i2c/tc358743_regs.h57
-rw-r--r--drivers/media/i2c/tc358746.c17
-rw-r--r--drivers/media/i2c/tda1997x.c5
-rw-r--r--drivers/media/i2c/tda9840.c2
-rw-r--r--drivers/media/i2c/tea6415c.c2
-rw-r--r--drivers/media/i2c/tea6420.c2
-rw-r--r--drivers/media/i2c/thp7312.c4
-rw-r--r--drivers/media/i2c/ths7303.c2
-rw-r--r--drivers/media/i2c/tlv320aic23b.c2
-rw-r--r--drivers/media/i2c/upd64031a.c2
-rw-r--r--drivers/media/i2c/upd64083.c2
-rw-r--r--drivers/media/i2c/vd55g1.c272
-rw-r--r--drivers/media/i2c/vd56g3.c6
-rw-r--r--drivers/media/i2c/vgxy61.c26
-rw-r--r--drivers/media/i2c/video-i2c.c4
-rw-r--r--drivers/media/i2c/vp27smpx.c2
-rw-r--r--drivers/media/i2c/wm8739.c2
-rw-r--r--drivers/media/i2c/wm8775.c2
-rw-r--r--drivers/media/mc/mc-devnode.c6
-rw-r--r--drivers/media/mc/mc-entity.c6
-rw-r--r--drivers/media/mc/mc-request.c36
-rw-r--r--drivers/media/pci/b2c2/flexcop-pci.c2
-rw-r--r--drivers/media/pci/bt8xx/bttv-driver.c14
-rw-r--r--drivers/media/pci/bt8xx/bttv-vbi.c6
-rw-r--r--drivers/media/pci/cobalt/cobalt-driver.c2
-rw-r--r--drivers/media/pci/cobalt/cobalt-v4l2.c60
-rw-r--r--drivers/media/pci/cx18/cx18-audio.c2
-rw-r--r--drivers/media/pci/cx18/cx18-audio.h2
-rw-r--r--drivers/media/pci/cx18/cx18-av-audio.c2
-rw-r--r--drivers/media/pci/cx18/cx18-av-core.c2
-rw-r--r--drivers/media/pci/cx18/cx18-av-core.h2
-rw-r--r--drivers/media/pci/cx18/cx18-av-firmware.c2
-rw-r--r--drivers/media/pci/cx18/cx18-av-vbi.c14
-rw-r--r--drivers/media/pci/cx18/cx18-cards.c2
-rw-r--r--drivers/media/pci/cx18/cx18-cards.h2
-rw-r--r--drivers/media/pci/cx18/cx18-controls.c2
-rw-r--r--drivers/media/pci/cx18/cx18-controls.h2
-rw-r--r--drivers/media/pci/cx18/cx18-driver.c11
-rw-r--r--drivers/media/pci/cx18/cx18-driver.h16
-rw-r--r--drivers/media/pci/cx18/cx18-fileops.c15
-rw-r--r--drivers/media/pci/cx18/cx18-fileops.h2
-rw-r--r--drivers/media/pci/cx18/cx18-firmware.c2
-rw-r--r--drivers/media/pci/cx18/cx18-firmware.h2
-rw-r--r--drivers/media/pci/cx18/cx18-gpio.c2
-rw-r--r--drivers/media/pci/cx18/cx18-gpio.h2
-rw-r--r--drivers/media/pci/cx18/cx18-i2c.c2
-rw-r--r--drivers/media/pci/cx18/cx18-i2c.h2
-rw-r--r--drivers/media/pci/cx18/cx18-io.c2
-rw-r--r--drivers/media/pci/cx18/cx18-io.h2
-rw-r--r--drivers/media/pci/cx18/cx18-ioctl.c92
-rw-r--r--drivers/media/pci/cx18/cx18-ioctl.h10
-rw-r--r--drivers/media/pci/cx18/cx18-irq.c2
-rw-r--r--drivers/media/pci/cx18/cx18-irq.h2
-rw-r--r--drivers/media/pci/cx18/cx18-mailbox.c2
-rw-r--r--drivers/media/pci/cx18/cx18-mailbox.h2
-rw-r--r--drivers/media/pci/cx18/cx18-queue.c15
-rw-r--r--drivers/media/pci/cx18/cx18-queue.h2
-rw-r--r--drivers/media/pci/cx18/cx18-scb.c2
-rw-r--r--drivers/media/pci/cx18/cx18-scb.h2
-rw-r--r--drivers/media/pci/cx18/cx18-streams.c2
-rw-r--r--drivers/media/pci/cx18/cx18-streams.h2
-rw-r--r--drivers/media/pci/cx18/cx18-vbi.c2
-rw-r--r--drivers/media/pci/cx18/cx18-vbi.h2
-rw-r--r--drivers/media/pci/cx18/cx18-version.h2
-rw-r--r--drivers/media/pci/cx18/cx18-video.c2
-rw-r--r--drivers/media/pci/cx18/cx18-video.h2
-rw-r--r--drivers/media/pci/cx18/cx23418.h2
-rw-r--r--drivers/media/pci/intel/ipu-bridge.c21
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-cio2.c86
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-cio2.h2
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c22
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-subdev.c40
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-video.c15
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys.h2
-rw-r--r--drivers/media/pci/intel/ivsc/mei_ace.c8
-rw-r--r--drivers/media/pci/intel/ivsc/mei_csi.c4
-rw-r--r--drivers/media/pci/ivtv/ivtv-alsa-pcm.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-cards.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-cards.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-controls.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-controls.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-driver.c28
-rw-r--r--drivers/media/pci/ivtv/ivtv-driver.h24
-rw-r--r--drivers/media/pci/ivtv/ivtv-fileops.c42
-rw-r--r--drivers/media/pci/ivtv/ivtv-fileops.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-firmware.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-firmware.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-gpio.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-gpio.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-i2c.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-i2c.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-ioctl.c142
-rw-r--r--drivers/media/pci/ivtv/ivtv-ioctl.h8
-rw-r--r--drivers/media/pci/ivtv/ivtv-irq.c8
-rw-r--r--drivers/media/pci/ivtv/ivtv-irq.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-mailbox.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-mailbox.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-queue.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-queue.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-routing.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-routing.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-streams.c17
-rw-r--r--drivers/media/pci/ivtv/ivtv-streams.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-udma.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-udma.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-vbi.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-vbi.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-version.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-yuv.c8
-rw-r--r--drivers/media/pci/mgb4/mgb4_trigger.c7
-rw-r--r--drivers/media/pci/mgb4/mgb4_vin.c7
-rw-r--r--drivers/media/pci/mgb4/mgb4_vout.c13
-rw-r--r--drivers/media/pci/pt1/pt1.c2
-rw-r--r--drivers/media/pci/saa7134/saa7134-video.c4
-rw-r--r--drivers/media/pci/saa7164/saa7164-buffer.c20
-rw-r--r--drivers/media/pci/saa7164/saa7164-cmd.c28
-rw-r--r--drivers/media/pci/saa7164/saa7164-encoder.c30
-rw-r--r--drivers/media/pci/saa7164/saa7164-vbi.c25
-rw-r--r--drivers/media/pci/saa7164/saa7164.h12
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-core.c2
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-gpio.c20
-rw-r--r--drivers/media/pci/tw68/tw68-core.c4
-rw-r--r--drivers/media/pci/tw68/tw68-reg.h2
-rw-r--r--drivers/media/pci/tw68/tw68-risc.c2
-rw-r--r--drivers/media/pci/tw68/tw68-video.c2
-rw-r--r--drivers/media/pci/tw68/tw68.h2
-rw-r--r--drivers/media/pci/zoran/zoran.h6
-rw-r--r--drivers/media/pci/zoran/zoran_card.c4
-rw-r--r--drivers/media/pci/zoran/zoran_card.h2
-rw-r--r--drivers/media/pci/zoran/zoran_driver.c35
-rw-r--r--drivers/media/platform/Kconfig1
-rw-r--r--drivers/media/platform/Makefile1
-rw-r--r--drivers/media/platform/allegro-dvt/allegro-core.c151
-rw-r--r--drivers/media/platform/amlogic/c3/isp/Kconfig1
-rw-r--r--drivers/media/platform/amlogic/c3/isp/c3-isp-params.c166
-rw-r--r--drivers/media/platform/amlogic/c3/mipi-csi2/c3-mipi-csi2.c7
-rw-r--r--drivers/media/platform/amlogic/meson-ge2d/ge2d.c30
-rw-r--r--drivers/media/platform/amphion/vdec.c298
-rw-r--r--drivers/media/platform/amphion/venc.c4
-rw-r--r--drivers/media/platform/amphion/vpu.h9
-rw-r--r--drivers/media/platform/amphion/vpu_color.c73
-rw-r--r--drivers/media/platform/amphion/vpu_core.c40
-rw-r--r--drivers/media/platform/amphion/vpu_dbg.c15
-rw-r--r--drivers/media/platform/amphion/vpu_defs.h12
-rw-r--r--drivers/media/platform/amphion/vpu_drv.c26
-rw-r--r--drivers/media/platform/amphion/vpu_helpers.c123
-rw-r--r--drivers/media/platform/amphion/vpu_helpers.h12
-rw-r--r--drivers/media/platform/amphion/vpu_malone.c28
-rw-r--r--drivers/media/platform/amphion/vpu_mbox.c4
-rw-r--r--drivers/media/platform/amphion/vpu_mbox.h1
-rw-r--r--drivers/media/platform/amphion/vpu_v4l2.c49
-rw-r--r--drivers/media/platform/amphion/vpu_v4l2.h18
-rw-r--r--drivers/media/platform/arm/Kconfig5
-rw-r--r--drivers/media/platform/arm/Makefile2
-rw-r--r--drivers/media/platform/arm/mali-c55/Kconfig18
-rw-r--r--drivers/media/platform/arm/mali-c55/Makefile11
-rw-r--r--drivers/media/platform/arm/mali-c55/mali-c55-capture.c959
-rw-r--r--drivers/media/platform/arm/mali-c55/mali-c55-common.h310
-rw-r--r--drivers/media/platform/arm/mali-c55/mali-c55-core.c917
-rw-r--r--drivers/media/platform/arm/mali-c55/mali-c55-isp.c665
-rw-r--r--drivers/media/platform/arm/mali-c55/mali-c55-params.c819
-rw-r--r--drivers/media/platform/arm/mali-c55/mali-c55-registers.h449
-rw-r--r--drivers/media/platform/arm/mali-c55/mali-c55-resizer.c1156
-rw-r--r--drivers/media/platform/arm/mali-c55/mali-c55-stats.c323
-rw-r--r--drivers/media/platform/arm/mali-c55/mali-c55-tpg.c437
-rw-r--r--drivers/media/platform/aspeed/aspeed-video.c199
-rw-r--r--drivers/media/platform/cadence/cdns-csi2rx.c206
-rw-r--r--drivers/media/platform/chips-media/coda/coda-bit.c2
-rw-r--r--drivers/media/platform/chips-media/coda/coda-common.c54
-rw-r--r--drivers/media/platform/chips-media/coda/coda-jpeg.c4
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-helper.c10
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-helper.h2
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c27
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vpu-enc.c36
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vpu.h5
-rw-r--r--drivers/media/platform/imagination/e5010-jpeg-enc.c29
-rw-r--r--drivers/media/platform/imagination/e5010-jpeg-enc.h5
-rw-r--r--drivers/media/platform/m2m-deinterlace.c33
-rw-r--r--drivers/media/platform/marvell/cafe-driver.c2
-rw-r--r--drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c48
-rw-r--r--drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_hw.c4
-rw-r--r--drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c4
-rw-r--r--drivers/media/platform/mediatek/mdp/mtk_mdp_m2m.c29
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c3
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c16
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c27
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-vpu.c2
-rw-r--r--drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_dbgfs.c4
-rw-r--r--drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_vpu.c14
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec.c43
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.c21
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.h7
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_av1_req_lat_if.c6
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_if.c2
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_multi_if.c14
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_hevc_req_multi_if.c5
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_req_if.c2
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_req_lat_if.c8
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec_vpu_if.c5
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.c51
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.c21
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.h6
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c5
-rw-r--r--drivers/media/platform/nvidia/tegra-vde/h264.c4
-rw-r--r--drivers/media/platform/nvidia/tegra-vde/v4l2.c35
-rw-r--r--drivers/media/platform/nxp/dw100/dw100.c16
-rw-r--r--drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c98
-rw-r--r--drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.h1
-rw-r--r--drivers/media/platform/nxp/imx-mipi-csis.c443
-rw-r--r--drivers/media/platform/nxp/imx-pxp.c14
-rw-r--r--drivers/media/platform/nxp/imx7-media-csi.c1
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-core.c155
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-core.h21
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-crossbar.c18
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-gasket.c22
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-hw.c2
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c296
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-pipe.c2
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c156
-rw-r--r--drivers/media/platform/nxp/imx8mq-mipi-csi2.c174
-rw-r--r--drivers/media/platform/nxp/mx2_emmaprp.c31
-rw-r--r--drivers/media/platform/qcom/camss/Makefile7
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid-340.c190
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid-gen3.c (renamed from drivers/media/platform/qcom/camss/camss-csid-780.c)34
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid-gen3.h (renamed from drivers/media/platform/qcom/camss/camss-csid-780.h)8
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid.h3
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c280
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy.c6
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy.h1
-rw-r--r--drivers/media/platform/qcom/camss/camss-ispif.c8
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-340.c320
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-4-1.c12
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-gen3.c (renamed from drivers/media/platform/qcom/camss/camss-vfe-780.c)76
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-vbif.c31
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-vbif.h19
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe.c43
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe.h6
-rw-r--r--drivers/media/platform/qcom/camss/camss-video.c82
-rw-r--r--drivers/media/platform/qcom/camss/camss.c1289
-rw-r--r--drivers/media/platform/qcom/camss/camss.h7
-rw-r--r--drivers/media/platform/qcom/iris/Makefile7
-rw-r--r--drivers/media/platform/qcom/iris/iris_buffer.c270
-rw-r--r--drivers/media/platform/qcom/iris/iris_buffer.h10
-rw-r--r--drivers/media/platform/qcom/iris/iris_common.c235
-rw-r--r--drivers/media/platform/qcom/iris/iris_common.h18
-rw-r--r--drivers/media/platform/qcom/iris/iris_core.c10
-rw-r--r--drivers/media/platform/qcom/iris/iris_core.h20
-rw-r--r--drivers/media/platform/qcom/iris/iris_ctrls.c720
-rw-r--r--drivers/media/platform/qcom/iris/iris_ctrls.h15
-rw-r--r--drivers/media/platform/qcom/iris/iris_firmware.c33
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_common.h3
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_gen1_command.c509
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_gen1_defines.h113
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_gen1_response.c93
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_gen2_command.c465
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_gen2_defines.h48
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_gen2_response.c100
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_queue.c3
-rw-r--r--drivers/media/platform/qcom/iris/iris_instance.h35
-rw-r--r--drivers/media/platform/qcom/iris/iris_platform_common.h108
-rw-r--r--drivers/media/platform/qcom/iris/iris_platform_gen1.c417
-rw-r--r--drivers/media/platform/qcom/iris/iris_platform_gen2.c783
-rw-r--r--drivers/media/platform/qcom/iris/iris_platform_qcs8300.h113
-rw-r--r--drivers/media/platform/qcom/iris/iris_platform_sc7280.h26
-rw-r--r--drivers/media/platform/qcom/iris/iris_platform_sm8250.c149
-rw-r--r--drivers/media/platform/qcom/iris/iris_platform_sm8750.h22
-rw-r--r--drivers/media/platform/qcom/iris/iris_probe.c43
-rw-r--r--drivers/media/platform/qcom/iris/iris_resources.c2
-rw-r--r--drivers/media/platform/qcom/iris/iris_state.c11
-rw-r--r--drivers/media/platform/qcom/iris/iris_state.h2
-rw-r--r--drivers/media/platform/qcom/iris/iris_utils.c39
-rw-r--r--drivers/media/platform/qcom/iris/iris_utils.h2
-rw-r--r--drivers/media/platform/qcom/iris/iris_vb2.c84
-rw-r--r--drivers/media/platform/qcom/iris/iris_vdec.c408
-rw-r--r--drivers/media/platform/qcom/iris/iris_vdec.h2
-rw-r--r--drivers/media/platform/qcom/iris/iris_venc.c616
-rw-r--r--drivers/media/platform/qcom/iris/iris_venc.h27
-rw-r--r--drivers/media/platform/qcom/iris/iris_vidc.c371
-rw-r--r--drivers/media/platform/qcom/iris/iris_vpu2.c8
-rw-r--r--drivers/media/platform/qcom/iris/iris_vpu3x.c202
-rw-r--r--drivers/media/platform/qcom/iris/iris_vpu_buffer.c1317
-rw-r--r--drivers/media/platform/qcom/iris/iris_vpu_buffer.h70
-rw-r--r--drivers/media/platform/qcom/iris/iris_vpu_common.c48
-rw-r--r--drivers/media/platform/qcom/iris/iris_vpu_common.h6
-rw-r--r--drivers/media/platform/qcom/venus/core.c132
-rw-r--r--drivers/media/platform/qcom/venus/core.h24
-rw-r--r--drivers/media/platform/qcom/venus/firmware.c61
-rw-r--r--drivers/media/platform/qcom/venus/firmware.h2
-rw-r--r--drivers/media/platform/qcom/venus/helpers.c12
-rw-r--r--drivers/media/platform/qcom/venus/hfi_msgs.c94
-rw-r--r--drivers/media/platform/qcom/venus/hfi_parser.c2
-rw-r--r--drivers/media/platform/qcom/venus/hfi_platform.c23
-rw-r--r--drivers/media/platform/qcom/venus/hfi_platform.h34
-rw-r--r--drivers/media/platform/qcom/venus/hfi_platform_v4.c188
-rw-r--r--drivers/media/platform/qcom/venus/hfi_platform_v6.c33
-rw-r--r--drivers/media/platform/qcom/venus/hfi_venus.c30
-rw-r--r--drivers/media/platform/qcom/venus/hfi_venus_io.h4
-rw-r--r--drivers/media/platform/qcom/venus/pm_helpers.c69
-rw-r--r--drivers/media/platform/qcom/venus/vdec.c21
-rw-r--r--drivers/media/platform/qcom/venus/venc.c21
-rw-r--r--drivers/media/platform/raspberrypi/pisp_be/Kconfig1
-rw-r--r--drivers/media/platform/raspberrypi/pisp_be/pisp_be.c209
-rw-r--r--drivers/media/platform/raspberrypi/rp1-cfe/cfe.c4
-rw-r--r--drivers/media/platform/raspberrypi/rp1-cfe/csi2.c2
-rw-r--r--drivers/media/platform/renesas/Kconfig1
-rw-r--r--drivers/media/platform/renesas/Makefile1
-rw-r--r--drivers/media/platform/renesas/rcar-csi2.c336
-rw-r--r--drivers/media/platform/renesas/rcar-fcp.c36
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-core.c702
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-dma.c77
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-v4l2.c494
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-vin.h16
-rw-r--r--drivers/media/platform/renesas/rcar_drif.c13
-rw-r--r--drivers/media/platform/renesas/rcar_fdp1.c33
-rw-r--r--drivers/media/platform/renesas/rcar_jpu.c45
-rw-r--r--drivers/media/platform/renesas/renesas-ceu.c10
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-core.c8
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-cru.h23
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c49
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-ip.c108
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c73
-rw-r--r--drivers/media/platform/renesas/rzv2h-ivc/Kconfig18
-rw-r--r--drivers/media/platform/renesas/rzv2h-ivc/Makefile5
-rw-r--r--drivers/media/platform/renesas/rzv2h-ivc/rzv2h-ivc-dev.c251
-rw-r--r--drivers/media/platform/renesas/rzv2h-ivc/rzv2h-ivc-subdev.c376
-rw-r--r--drivers/media/platform/renesas/rzv2h-ivc/rzv2h-ivc-video.c531
-rw-r--r--drivers/media/platform/renesas/rzv2h-ivc/rzv2h-ivc.h130
-rw-r--r--drivers/media/platform/renesas/vsp1/Makefile1
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1.h1
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_dl.c25
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_drm.c1
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_drv.c39
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_histo.c6
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_pipe.c3
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_regs.h1
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_video.c18
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_vspx.c634
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_vspx.h16
-rw-r--r--drivers/media/platform/rockchip/Kconfig2
-rw-r--r--drivers/media/platform/rockchip/Makefile2
-rw-r--r--drivers/media/platform/rockchip/rga/rga.c36
-rw-r--r--drivers/media/platform/rockchip/rga/rga.h5
-rw-r--r--drivers/media/platform/rockchip/rkcif/Kconfig18
-rw-r--r--drivers/media/platform/rockchip/rkcif/Makefile8
-rw-r--r--drivers/media/platform/rockchip/rkcif/rkcif-capture-dvp.c865
-rw-r--r--drivers/media/platform/rockchip/rkcif/rkcif-capture-dvp.h25
-rw-r--r--drivers/media/platform/rockchip/rkcif/rkcif-capture-mipi.c777
-rw-r--r--drivers/media/platform/rockchip/rkcif/rkcif-capture-mipi.h23
-rw-r--r--drivers/media/platform/rockchip/rkcif/rkcif-common.h250
-rw-r--r--drivers/media/platform/rockchip/rkcif/rkcif-dev.c303
-rw-r--r--drivers/media/platform/rockchip/rkcif/rkcif-interface.c442
-rw-r--r--drivers/media/platform/rockchip/rkcif/rkcif-interface.h31
-rw-r--r--drivers/media/platform/rockchip/rkcif/rkcif-regs.h153
-rw-r--r--drivers/media/platform/rockchip/rkcif/rkcif-stream.c636
-rw-r--r--drivers/media/platform/rockchip/rkcif/rkcif-stream.h32
-rw-r--r--drivers/media/platform/rockchip/rkisp1/Kconfig1
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-common.h20
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c4
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c123
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c31
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-params.c299
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h99
-rw-r--r--drivers/media/platform/rockchip/rkvdec/Kconfig (renamed from drivers/staging/media/rkvdec/Kconfig)0
-rw-r--r--drivers/media/platform/rockchip/rkvdec/Makefile3
-rw-r--r--drivers/media/platform/rockchip/rkvdec/rkvdec-h264.c (renamed from drivers/staging/media/rkvdec/rkvdec-h264.c)0
-rw-r--r--drivers/media/platform/rockchip/rkvdec/rkvdec-hevc-data.c1848
-rw-r--r--drivers/media/platform/rockchip/rkvdec/rkvdec-hevc.c820
-rw-r--r--drivers/media/platform/rockchip/rkvdec/rkvdec-regs.h (renamed from drivers/staging/media/rkvdec/rkvdec-regs.h)4
-rw-r--r--drivers/media/platform/rockchip/rkvdec/rkvdec-vp9.c (renamed from drivers/staging/media/rkvdec/rkvdec-vp9.c)4
-rw-r--r--drivers/media/platform/rockchip/rkvdec/rkvdec.c (renamed from drivers/staging/media/rkvdec/rkvdec.c)267
-rw-r--r--drivers/media/platform/rockchip/rkvdec/rkvdec.h (renamed from drivers/staging/media/rkvdec/rkvdec.h)22
-rw-r--r--drivers/media/platform/samsung/exynos-gsc/gsc-core.h6
-rw-r--r--drivers/media/platform/samsung/exynos-gsc/gsc-m2m.c37
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-core.h5
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-is-i2c.c2
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-is-i2c.h2
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-is.c3
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-lite.c1
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-m2m.c19
-rw-r--r--drivers/media/platform/samsung/exynos4-is/media-dev.c41
-rw-r--r--drivers/media/platform/samsung/s3c-camif/camif-capture.c26
-rw-r--r--drivers/media/platform/samsung/s5p-g2d/g2d.c44
-rw-r--r--drivers/media/platform/samsung/s5p-jpeg/jpeg-core.c40
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c17
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd_v6.c35
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_common.h6
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_dec.c34
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.c38
-rw-r--r--drivers/media/platform/st/Makefile1
-rw-r--r--drivers/media/platform/st/sti/Kconfig1
-rw-r--r--drivers/media/platform/st/sti/Makefile1
-rw-r--r--drivers/media/platform/st/sti/bdisp/bdisp-v4l2.c30
-rw-r--r--drivers/media/platform/st/sti/c8sectpfe/Kconfig28
-rw-r--r--drivers/media/platform/st/sti/c8sectpfe/Makefile11
-rw-r--r--drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-common.c262
-rw-r--r--drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-common.h60
-rw-r--r--drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c1168
-rw-r--r--drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.h287
-rw-r--r--drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-debugfs.c244
-rw-r--r--drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-debugfs.h23
-rw-r--r--drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-dvb.c235
-rw-r--r--drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-dvb.h17
-rw-r--r--drivers/media/platform/st/sti/delta/delta-mjpeg-dec.c20
-rw-r--r--drivers/media/platform/st/sti/delta/delta-v4l2.c41
-rw-r--r--drivers/media/platform/st/sti/hva/hva-v4l2.c38
-rw-r--r--drivers/media/platform/st/sti/hva/hva.h2
-rw-r--r--drivers/media/platform/st/stm32/dma2d/dma2d.c35
-rw-r--r--drivers/media/platform/st/stm32/stm32-csi.c4
-rw-r--r--drivers/media/platform/st/stm32/stm32-dcmi.c4
-rw-r--r--drivers/media/platform/sunxi/sun6i-csi/sun6i_csi_capture.c16
-rw-r--r--drivers/media/platform/sunxi/sun8i-di/sun8i-di.c12
-rw-r--r--drivers/media/platform/sunxi/sun8i-rotate/sun8i_rotate.c12
-rw-r--r--drivers/media/platform/synopsys/hdmirx/snps_hdmirx.c10
-rw-r--r--drivers/media/platform/synopsys/hdmirx/snps_hdmirx.h6
-rw-r--r--drivers/media/platform/ti/Kconfig3
-rw-r--r--drivers/media/platform/ti/cal/cal.c3
-rw-r--r--drivers/media/platform/ti/davinci/vpif_capture.c4
-rw-r--r--drivers/media/platform/ti/davinci/vpif_display.c4
-rw-r--r--drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c69
-rw-r--r--drivers/media/platform/ti/omap/omap_vout.c6
-rw-r--r--drivers/media/platform/ti/omap3isp/isp.c10
-rw-r--r--drivers/media/platform/ti/omap3isp/ispccdc.c8
-rw-r--r--drivers/media/platform/ti/omap3isp/isph3a_aewb.c2
-rw-r--r--drivers/media/platform/ti/omap3isp/isph3a_af.c2
-rw-r--r--drivers/media/platform/ti/omap3isp/isphist.c2
-rw-r--r--drivers/media/platform/ti/omap3isp/ispstat.c7
-rw-r--r--drivers/media/platform/ti/omap3isp/ispstat.h3
-rw-r--r--drivers/media/platform/ti/omap3isp/ispvideo.c36
-rw-r--r--drivers/media/platform/ti/omap3isp/ispvideo.h6
-rw-r--r--drivers/media/platform/ti/vpe/vpdma.c32
-rw-r--r--drivers/media/platform/ti/vpe/vpdma.h3
-rw-r--r--drivers/media/platform/ti/vpe/vpe.c28
-rw-r--r--drivers/media/platform/verisilicon/hantro.h6
-rw-r--r--drivers/media/platform/verisilicon/hantro_drv.c15
-rw-r--r--drivers/media/platform/verisilicon/hantro_g1_regs.h2
-rw-r--r--drivers/media/platform/verisilicon/hantro_g2.c88
-rw-r--r--drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c17
-rw-r--r--drivers/media/platform/verisilicon/hantro_g2_regs.h13
-rw-r--r--drivers/media/platform/verisilicon/hantro_g2_vp9_dec.c2
-rw-r--r--drivers/media/platform/verisilicon/hantro_h264.c6
-rw-r--r--drivers/media/platform/verisilicon/hantro_hw.h1
-rw-r--r--drivers/media/platform/verisilicon/hantro_postproc.c6
-rw-r--r--drivers/media/platform/verisilicon/hantro_v4l2.c28
-rw-r--r--drivers/media/platform/verisilicon/imx8m_vpu_hw.c22
-rw-r--r--drivers/media/platform/verisilicon/rockchip_vpu_hw.c9
-rw-r--r--drivers/media/platform/xilinx/xilinx-dma.c10
-rw-r--r--drivers/media/platform/xilinx/xilinx-vipp.c7
-rw-r--r--drivers/media/radio/Kconfig17
-rw-r--r--drivers/media/radio/Makefile1
-rw-r--r--drivers/media/radio/radio-aimslab.c2
-rw-r--r--drivers/media/radio/radio-aztech.c2
-rw-r--r--drivers/media/radio/radio-gemtek.c2
-rw-r--r--drivers/media/radio/radio-isa.c2
-rw-r--r--drivers/media/radio/radio-isa.h2
-rw-r--r--drivers/media/radio/radio-keene.c4
-rw-r--r--drivers/media/radio/radio-miropcm20.c2
-rw-r--r--drivers/media/radio/radio-raremono.c4
-rw-r--r--drivers/media/radio/radio-rtrack2.c2
-rw-r--r--drivers/media/radio/radio-terratec.c2
-rw-r--r--drivers/media/radio/radio-wl1273.c2159
-rw-r--r--drivers/media/radio/radio-zoltrix.c2
-rw-r--r--drivers/media/radio/si470x/radio-si470x-i2c.c2
-rw-r--r--drivers/media/radio/si4713/radio-platform-si4713.c10
-rw-r--r--drivers/media/rc/gpio-ir-recv.c4
-rw-r--r--drivers/media/rc/imon.c99
-rw-r--r--drivers/media/rc/ir-hix5hd2.c1
-rw-r--r--drivers/media/rc/ir-spi.c40
-rw-r--r--drivers/media/rc/lirc_dev.c9
-rw-r--r--drivers/media/rc/pwm-ir-tx.c5
-rw-r--r--drivers/media/rc/redrat3.c2
-rw-r--r--drivers/media/rc/st_rc.c2
-rw-r--r--drivers/media/test-drivers/vicodec/vicodec-core.c34
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_channel.c3
-rw-r--r--drivers/media/test-drivers/vim2m.c37
-rw-r--r--drivers/media/test-drivers/vimc/vimc-capture.c4
-rw-r--r--drivers/media/test-drivers/vimc/vimc-core.c2
-rw-r--r--drivers/media/test-drivers/visl/visl-core.c5
-rw-r--r--drivers/media/test-drivers/visl/visl-dec.c2
-rw-r--r--drivers/media/test-drivers/visl/visl.h7
-rw-r--r--drivers/media/test-drivers/vivid/vivid-cec.c12
-rw-r--r--drivers/media/test-drivers/vivid/vivid-core.c106
-rw-r--r--drivers/media/test-drivers/vivid/vivid-ctrls.c3
-rw-r--r--drivers/media/test-drivers/vivid/vivid-radio-rx.c12
-rw-r--r--drivers/media/test-drivers/vivid/vivid-radio-rx.h8
-rw-r--r--drivers/media/test-drivers/vivid/vivid-radio-tx.c8
-rw-r--r--drivers/media/test-drivers/vivid/vivid-radio-tx.h4
-rw-r--r--drivers/media/test-drivers/vivid/vivid-sdr-cap.c18
-rw-r--r--drivers/media/test-drivers/vivid/vivid-sdr-cap.h18
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vbi-cap.c10
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vbi-cap.h8
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vbi-gen.c8
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vbi-out.c8
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vbi-out.h6
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vid-cap.c32
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vid-cap.h24
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vid-common.c8
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vid-common.h8
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vid-out.c16
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vid-out.h16
-rw-r--r--drivers/media/tuners/xc2028.c9
-rw-r--r--drivers/media/tuners/xc4000.c8
-rw-r--r--drivers/media/tuners/xc5000.c14
-rw-r--r--drivers/media/usb/au0828/au0828-video.c5
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-417.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/lmedm04.c12
-rw-r--r--drivers/media/usb/dvb-usb/dtv5100.c5
-rw-r--r--drivers/media/usb/dvb-usb/pctv452e.c7
-rw-r--r--drivers/media/usb/em28xx/Kconfig1
-rw-r--r--drivers/media/usb/em28xx/em28xx-dvb.c4
-rw-r--r--drivers/media/usb/gspca/gspca.c18
-rw-r--r--drivers/media/usb/gspca/vicam.c10
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-i2c.c30
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-video.c69
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-hdw.c2
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-v4l2.c69
-rw-r--r--drivers/media/usb/stk1160/stk1160-core.c3
-rw-r--r--drivers/media/usb/stk1160/stk1160-v4l.c4
-rw-r--r--drivers/media/usb/stk1160/stk1160-video.c48
-rw-r--r--drivers/media/usb/stk1160/stk1160.h7
-rw-r--r--drivers/media/usb/usbtv/usbtv-video.c4
-rw-r--r--drivers/media/usb/uvc/uvc_ctrl.c181
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c204
-rw-r--r--drivers/media/usb/uvc/uvc_entity.c4
-rw-r--r--drivers/media/usb/uvc/uvc_metadata.c163
-rw-r--r--drivers/media/usb/uvc/uvc_queue.c199
-rw-r--r--drivers/media/usb/uvc/uvc_status.c7
-rw-r--r--drivers/media/usb/uvc/uvc_v4l2.c492
-rw-r--r--drivers/media/usb/uvc/uvc_video.c114
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h71
-rw-r--r--drivers/media/v4l2-core/Kconfig4
-rw-r--r--drivers/media/v4l2-core/Makefile1
-rw-r--r--drivers/media/v4l2-core/v4l2-common.c159
-rw-r--r--drivers/media/v4l2-core/v4l2-compat-ioctl32.c11
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-api.c13
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-core.c153
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-defs.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-priv.h2
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-request.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-dev.c45
-rw-r--r--drivers/media/v4l2-core/v4l2-device.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-dv-timings.c4
-rw-r--r--drivers/media/v4l2-core/v4l2-fh.c16
-rw-r--r--drivers/media/v4l2-core/v4l2-i2c.c3
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c468
-rw-r--r--drivers/media/v4l2-core/v4l2-isp.c132
-rw-r--r--drivers/media/v4l2-core/v4l2-jpeg.c80
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c71
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev.c73
-rw-r--r--drivers/memory/brcmstb_memc.c56
-rw-r--r--drivers/memory/emif.c1
-rw-r--r--drivers/memory/mtk-smi.c33
-rw-r--r--drivers/memory/omap-gpmc.c4
-rw-r--r--drivers/memory/renesas-rpc-if.c58
-rw-r--r--drivers/memory/samsung/exynos-srom.c10
-rw-r--r--drivers/memory/stm32_omm.c23
-rw-r--r--drivers/memory/tegra/Makefile2
-rw-r--r--drivers/memory/tegra/mc.c5
-rw-r--r--drivers/memory/tegra/mc.h9
-rw-r--r--drivers/memory/tegra/tegra124-emc.c140
-rw-r--r--drivers/memory/tegra/tegra186-emc.c40
-rw-r--r--drivers/memory/tegra/tegra186.c17
-rw-r--r--drivers/memory/tegra/tegra20-emc.c150
-rw-r--r--drivers/memory/tegra/tegra210.c146
-rw-r--r--drivers/memory/tegra/tegra264-bwmgr.h50
-rw-r--r--drivers/memory/tegra/tegra264.c313
-rw-r--r--drivers/memory/tegra/tegra30-emc.c119
-rw-r--r--drivers/memstick/core/memstick.c11
-rw-r--r--drivers/memstick/core/ms_block.c4
-rw-r--r--drivers/memstick/core/mspro_block.c7
-rw-r--r--drivers/memstick/host/jmb38x_ms.c3
-rw-r--r--drivers/memstick/host/rtsx_usb_ms.c6
-rw-r--r--drivers/memstick/host/tifm_ms.c3
-rw-r--r--drivers/message/fusion/mptbase.c7
-rw-r--r--drivers/message/fusion/mptscsih.c2
-rw-r--r--drivers/message/fusion/mptscsih.h2
-rw-r--r--drivers/mfd/88pm860x-core.c3
-rw-r--r--drivers/mfd/88pm886.c1
-rw-r--r--drivers/mfd/Kconfig235
-rw-r--r--drivers/mfd/Makefile12
-rw-r--r--drivers/mfd/ab8500-core.c3
-rw-r--r--drivers/mfd/adp5585.c738
-rw-r--r--drivers/mfd/altera-sysmgr.c2
-rw-r--r--drivers/mfd/arizona-irq.c6
-rw-r--r--drivers/mfd/atmel-smc.c9
-rw-r--r--drivers/mfd/axp20x.c8
-rw-r--r--drivers/mfd/bcm2835-pm.c1
-rw-r--r--drivers/mfd/bq257xx.c99
-rw-r--r--drivers/mfd/cros_ec_dev.c10
-rw-r--r--drivers/mfd/cs40l50-core.c3
-rw-r--r--drivers/mfd/cs42l43.c33
-rw-r--r--drivers/mfd/da9055-core.c2
-rw-r--r--drivers/mfd/da9063-i2c.c30
-rw-r--r--drivers/mfd/exynos-lpass.c1
-rw-r--r--drivers/mfd/fsl-imx25-tsadc.c5
-rw-r--r--drivers/mfd/intel-lpss-pci.c13
-rw-r--r--drivers/mfd/intel_soc_pmic_chtdc_ti.c2
-rw-r--r--drivers/mfd/ioc3.c2
-rw-r--r--drivers/mfd/kempld-core.c36
-rw-r--r--drivers/mfd/loongson-se.c253
-rw-r--r--drivers/mfd/lp8788-irq.c2
-rw-r--r--drivers/mfd/ls2k-bmc-core.c532
-rw-r--r--drivers/mfd/macsmc.c499
-rw-r--r--drivers/mfd/madera-core.c4
-rw-r--r--drivers/mfd/max7360.c171
-rw-r--r--drivers/mfd/max77620.c15
-rw-r--r--drivers/mfd/max77705.c38
-rw-r--r--drivers/mfd/max8925-core.c6
-rw-r--r--drivers/mfd/max8997.c4
-rw-r--r--drivers/mfd/max8998.c4
-rw-r--r--drivers/mfd/mfd-core.c1
-rw-r--r--drivers/mfd/mt6358-irq.c4
-rw-r--r--drivers/mfd/mt6370.c2
-rw-r--r--drivers/mfd/mt6370.h2
-rw-r--r--drivers/mfd/mt6397-core.c12
-rw-r--r--drivers/mfd/mt6397-irq.c5
-rw-r--r--drivers/mfd/nct6694.c388
-rw-r--r--drivers/mfd/pf1550.c367
-rw-r--r--drivers/mfd/qcom-pm8xxx.c4
-rw-r--r--drivers/mfd/qnap-mcu.c109
-rw-r--r--drivers/mfd/rk8xx-core.c12
-rw-r--r--drivers/mfd/rohm-bd71828.c52
-rw-r--r--drivers/mfd/rohm-bd718x7.c9
-rw-r--r--drivers/mfd/rz-mtu3.c2
-rw-r--r--drivers/mfd/sec-acpm.c23
-rw-r--r--drivers/mfd/sec-irq.c73
-rw-r--r--drivers/mfd/simple-mfd-i2c.c36
-rw-r--r--drivers/mfd/sm501.c2
-rw-r--r--drivers/mfd/stm32-lptimer.c1
-rw-r--r--drivers/mfd/stm32-timers.c1
-rw-r--r--drivers/mfd/stmfx.c5
-rw-r--r--drivers/mfd/stmpe-i2c.c14
-rw-r--r--drivers/mfd/stmpe-spi.c14
-rw-r--r--drivers/mfd/stmpe.c9
-rw-r--r--drivers/mfd/sun4i-gpadc.c1
-rw-r--r--drivers/mfd/syscon.c2
-rw-r--r--drivers/mfd/tps65010.c2
-rw-r--r--drivers/mfd/tps65217.c4
-rw-r--r--drivers/mfd/tps65219.c17
-rw-r--r--drivers/mfd/tps6586x.c6
-rw-r--r--drivers/mfd/tps6594-core.c147
-rw-r--r--drivers/mfd/tps6594-i2c.c10
-rw-r--r--drivers/mfd/tps6594-spi.c10
-rw-r--r--drivers/mfd/tqmx86.c8
-rw-r--r--drivers/mfd/twl4030-irq.c3
-rw-r--r--drivers/mfd/twl6030-irq.c79
-rw-r--r--drivers/mfd/twl6040.c2
-rw-r--r--drivers/mfd/ucb1x00-core.c2
-rw-r--r--drivers/mfd/vexpress-sysreg.c71
-rw-r--r--drivers/mfd/wl1273-core.c262
-rw-r--r--drivers/mfd/wm831x-irq.c10
-rw-r--r--drivers/misc/Kconfig3
-rw-r--r--drivers/misc/Makefile2
-rw-r--r--drivers/misc/ad525x_dpot.c7
-rw-r--r--drivers/misc/amd-sbi/Kconfig7
-rw-r--r--drivers/misc/amd-sbi/rmi-core.c218
-rw-r--r--drivers/misc/amd-sbi/rmi-i2c.c124
-rw-r--r--drivers/misc/apds990x.c1
-rw-r--r--drivers/misc/bh1770glc.c4
-rw-r--r--drivers/misc/c2port/core.c2
-rw-r--r--drivers/misc/cardreader/rts5227.c13
-rw-r--r--drivers/misc/cardreader/rts5228.c12
-rw-r--r--drivers/misc/cardreader/rts5249.c16
-rw-r--r--drivers/misc/cardreader/rts5264.c83
-rw-r--r--drivers/misc/cardreader/rts5264.h7
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.c2
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.h2
-rw-r--r--drivers/misc/cardreader/rtsx_usb.c23
-rw-r--r--drivers/misc/cb710/core.c8
-rw-r--r--drivers/misc/ds1682.c4
-rw-r--r--drivers/misc/dw-xdata-pcie.c5
-rw-r--r--drivers/misc/eeprom/Kconfig19
-rw-r--r--drivers/misc/eeprom/Makefile1
-rw-r--r--drivers/misc/eeprom/at25.c378
-rw-r--r--drivers/misc/eeprom/m24lr.c606
-rw-r--r--drivers/misc/eeprom/max6875.c2
-rw-r--r--drivers/misc/enclosure.c3
-rw-r--r--drivers/misc/fastrpc.c164
-rw-r--r--drivers/misc/genwqe/card_ddcb.c2
-rw-r--r--drivers/misc/hi6421v600-irq.c4
-rw-r--r--drivers/misc/hisi_hikey_usb.c3
-rw-r--r--drivers/misc/ibmasm/ibmasmfs.c38
-rw-r--r--drivers/misc/lis3lv02d/Kconfig4
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d.c6
-rw-r--r--drivers/misc/lkdtm/Makefile2
-rw-r--r--drivers/misc/lkdtm/cfi.c2
-rw-r--r--drivers/misc/lkdtm/fortify.c6
-rw-r--r--drivers/misc/lkdtm/kstack_erase.c (renamed from drivers/misc/lkdtm/stackleak.c)26
-rw-r--r--drivers/misc/lkdtm/perms.c5
-rw-r--r--drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c2
-rw-r--r--drivers/misc/mei/Kconfig15
-rw-r--r--drivers/misc/mei/Makefile1
-rw-r--r--drivers/misc/mei/bus-fixup.c16
-rw-r--r--drivers/misc/mei/bus.c80
-rw-r--r--drivers/misc/mei/client.c78
-rw-r--r--drivers/misc/mei/client.h6
-rw-r--r--drivers/misc/mei/dma-ring.c8
-rw-r--r--drivers/misc/mei/gsc-me.c20
-rw-r--r--drivers/misc/mei/hbm.c135
-rw-r--r--drivers/misc/mei/hw-me-regs.h2
-rw-r--r--drivers/misc/mei/hw-me.c153
-rw-r--r--drivers/misc/mei/hw-txe.c60
-rw-r--r--drivers/misc/mei/hw.h2
-rw-r--r--drivers/misc/mei/init.c66
-rw-r--r--drivers/misc/mei/interrupt.c45
-rw-r--r--drivers/misc/mei/main.c193
-rw-r--r--drivers/misc/mei/mei_dev.h24
-rw-r--r--drivers/misc/mei/mei_lb.c311
-rw-r--r--drivers/misc/mei/pci-me.c25
-rw-r--r--drivers/misc/mei/pci-txe.c21
-rw-r--r--drivers/misc/mei/platform-vsc.c39
-rw-r--r--drivers/misc/mei/vsc-tp.c80
-rw-r--r--drivers/misc/mei/vsc-tp.h3
-rw-r--r--drivers/misc/misc_minor_kunit.c69
-rw-r--r--drivers/misc/ntsync.c21
-rw-r--r--drivers/misc/ocxl/afu_irq.c2
-rw-r--r--drivers/misc/ocxl/sysfs.c14
-rw-r--r--drivers/misc/pch_phub.c4
-rw-r--r--drivers/misc/pci_endpoint_test.c99
-rw-r--r--drivers/misc/rp1/Kconfig20
-rw-r--r--drivers/misc/rp1/Makefile3
-rw-r--r--drivers/misc/rp1/rp1-pci.dtso25
-rw-r--r--drivers/misc/rp1/rp1_pci.c336
-rw-r--r--drivers/misc/sram.c14
-rw-r--r--drivers/misc/ti_fpc202.c13
-rw-r--r--drivers/misc/tps6594-pfsm.c31
-rw-r--r--drivers/misc/vmw_balloon.c11
-rw-r--r--drivers/misc/vmw_vmci/vmci_context.c56
-rw-r--r--drivers/misc/vmw_vmci/vmci_context.h4
-rw-r--r--drivers/misc/vmw_vmci/vmci_doorbell.c53
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.c133
-rw-r--r--drivers/mmc/core/block.c104
-rw-r--r--drivers/mmc/core/bus.c12
-rw-r--r--drivers/mmc/core/bus.h2
-rw-r--r--drivers/mmc/core/card.h9
-rw-r--r--drivers/mmc/core/core.c33
-rw-r--r--drivers/mmc/core/core.h6
-rw-r--r--drivers/mmc/core/debugfs.c10
-rw-r--r--drivers/mmc/core/host.c4
-rw-r--r--drivers/mmc/core/mmc.c74
-rw-r--r--drivers/mmc/core/mmc_ops.c72
-rw-r--r--drivers/mmc/core/mmc_test.c34
-rw-r--r--drivers/mmc/core/quirks.h12
-rw-r--r--drivers/mmc/core/regulator.c77
-rw-r--r--drivers/mmc/core/sd.c11
-rw-r--r--drivers/mmc/core/sd_uhs2.c4
-rw-r--r--drivers/mmc/core/sdio.c6
-rw-r--r--drivers/mmc/core/sdio_bus.c5
-rw-r--r--drivers/mmc/host/Kconfig30
-rw-r--r--drivers/mmc/host/Makefile1
-rw-r--r--drivers/mmc/host/alcor.c28
-rw-r--r--drivers/mmc/host/atmel-mci.c31
-rw-r--r--drivers/mmc/host/au1xmmc.c32
-rw-r--r--drivers/mmc/host/bcm2835.c8
-rw-r--r--drivers/mmc/host/cavium.c10
-rw-r--r--drivers/mmc/host/cb710-mmc.c27
-rw-r--r--drivers/mmc/host/cqhci.h1
-rw-r--r--drivers/mmc/host/davinci_mmc.c44
-rw-r--r--drivers/mmc/host/dw_mmc-exynos.c13
-rw-r--r--drivers/mmc/host/dw_mmc-k3.c9
-rw-r--r--drivers/mmc/host/dw_mmc-pci.c9
-rw-r--r--drivers/mmc/host/dw_mmc-rockchip.c28
-rw-r--r--drivers/mmc/host/dw_mmc.c30
-rw-r--r--drivers/mmc/host/dw_mmc.h3
-rw-r--r--drivers/mmc/host/jz4740_mmc.c40
-rw-r--r--drivers/mmc/host/litex_mmc.c12
-rw-r--r--drivers/mmc/host/loongson2-mmc.c1030
-rw-r--r--drivers/mmc/host/meson-mx-sdhc-clkc.c4
-rw-r--r--drivers/mmc/host/meson-mx-sdhc-mmc.c13
-rw-r--r--drivers/mmc/host/meson-mx-sdio.c339
-rw-r--r--drivers/mmc/host/mmc_spi.c8
-rw-r--r--drivers/mmc/host/mmci.c41
-rw-r--r--drivers/mmc/host/moxart-mmc.c40
-rw-r--r--drivers/mmc/host/mtk-sd.c39
-rw-r--r--drivers/mmc/host/mvsdio.c26
-rw-r--r--drivers/mmc/host/mxcmmc.c31
-rw-r--r--drivers/mmc/host/mxs-mmc.c37
-rw-r--r--drivers/mmc/host/omap.c31
-rw-r--r--drivers/mmc/host/omap_hsmmc.c37
-rw-r--r--drivers/mmc/host/owl-mmc.c37
-rw-r--r--drivers/mmc/host/pxamci.c88
-rw-r--r--drivers/mmc/host/renesas_sdhi.h4
-rw-r--r--drivers/mmc/host/renesas_sdhi_core.c99
-rw-r--r--drivers/mmc/host/renesas_sdhi_internal_dmac.c18
-rw-r--r--drivers/mmc/host/renesas_sdhi_sys_dmac.c3
-rw-r--r--drivers/mmc/host/rtsx_pci_sdmmc.c5
-rw-r--r--drivers/mmc/host/rtsx_usb_sdmmc.c82
-rw-r--r--drivers/mmc/host/sdhci-acpi.c29
-rw-r--r--drivers/mmc/host/sdhci-bcm-kona.c2
-rw-r--r--drivers/mmc/host/sdhci-brcmstb.c163
-rw-r--r--drivers/mmc/host/sdhci-cadence.c104
-rw-r--r--drivers/mmc/host/sdhci-dove.c12
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c91
-rw-r--r--drivers/mmc/host/sdhci-esdhc-mcf.c25
-rw-r--r--drivers/mmc/host/sdhci-iproc.c18
-rw-r--r--drivers/mmc/host/sdhci-milbeaut.c19
-rw-r--r--drivers/mmc/host/sdhci-msm.c89
-rw-r--r--drivers/mmc/host/sdhci-npcm.c15
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c69
-rw-r--r--drivers/mmc/host/sdhci-of-aspeed.c10
-rw-r--r--drivers/mmc/host/sdhci-of-at91.c50
-rw-r--r--drivers/mmc/host/sdhci-of-dwcmshc.c662
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c19
-rw-r--r--drivers/mmc/host/sdhci-of-k1.c144
-rw-r--r--drivers/mmc/host/sdhci-of-ma35d1.c23
-rw-r--r--drivers/mmc/host/sdhci-of-sparx5.c24
-rw-r--r--drivers/mmc/host/sdhci-omap.c41
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c46
-rw-r--r--drivers/mmc/host/sdhci-pci-gli.c105
-rw-r--r--drivers/mmc/host/sdhci-pic32.c9
-rw-r--r--drivers/mmc/host/sdhci-pltfm.c16
-rw-r--r--drivers/mmc/host/sdhci-pltfm.h1
-rw-r--r--drivers/mmc/host/sdhci-pxav2.c26
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c69
-rw-r--r--drivers/mmc/host/sdhci-s3c.c37
-rw-r--r--drivers/mmc/host/sdhci-spear.c17
-rw-r--r--drivers/mmc/host/sdhci-sprd.c44
-rw-r--r--drivers/mmc/host/sdhci-st.c12
-rw-r--r--drivers/mmc/host/sdhci-tegra.c22
-rw-r--r--drivers/mmc/host/sdhci-uhs2.c23
-rw-r--r--drivers/mmc/host/sdhci-xenon.c37
-rw-r--r--drivers/mmc/host/sdhci.c60
-rw-r--r--drivers/mmc/host/sdhci.h28
-rw-r--r--drivers/mmc/host/sdhci_am654.c67
-rw-r--r--drivers/mmc/host/sdhci_f_sdh30.c13
-rw-r--r--drivers/mmc/host/sdricoh_cs.c10
-rw-r--r--drivers/mmc/host/sh_mmcif.c30
-rw-r--r--drivers/mmc/host/sunxi-mmc.c33
-rw-r--r--drivers/mmc/host/tifm_sd.c11
-rw-r--r--drivers/mmc/host/tmio_mmc.h19
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c57
-rw-r--r--drivers/mmc/host/toshsd.c12
-rw-r--r--drivers/mmc/host/uniphier-sd.c8
-rw-r--r--drivers/mmc/host/usdhi6rol0.c34
-rw-r--r--drivers/mmc/host/ushc.c4
-rw-r--r--drivers/mmc/host/via-sdmmc.c17
-rw-r--r--drivers/mmc/host/vub300.c16
-rw-r--r--drivers/mmc/host/wbsd.c4
-rw-r--r--drivers/mmc/host/wmt-sdmmc.c24
-rw-r--r--drivers/most/core.c2
-rw-r--r--drivers/most/most_usb.c27
-rw-r--r--drivers/mtd/chips/cfi_probe.c2
-rw-r--r--drivers/mtd/chips/jedec_probe.c4
-rw-r--r--drivers/mtd/devices/Kconfig11
-rw-r--r--drivers/mtd/devices/Makefile1
-rw-r--r--drivers/mtd/devices/docg3.h2
-rw-r--r--drivers/mtd/devices/mtd_intel_dg.c880
-rw-r--r--drivers/mtd/ftl.c4
-rw-r--r--drivers/mtd/hyperbus/hbmc-am654.c1
-rw-r--r--drivers/mtd/lpddr/lpddr_cmds.c18
-rw-r--r--drivers/mtd/lpddr/qinfo_probe.c4
-rw-r--r--drivers/mtd/maps/pcmciamtd.c1
-rw-r--r--drivers/mtd/mtd_blkdevs.c4
-rw-r--r--drivers/mtd/mtdchar.c8
-rw-r--r--drivers/mtd/mtdcore.c213
-rw-r--r--drivers/mtd/mtdcore.h2
-rw-r--r--drivers/mtd/mtdoops.c5
-rw-r--r--drivers/mtd/mtdpart.c23
-rw-r--r--drivers/mtd/mtdswap.c4
-rw-r--r--drivers/mtd/nand/Kconfig8
-rw-r--r--drivers/mtd/nand/Makefile1
-rw-r--r--drivers/mtd/nand/core.c131
-rw-r--r--drivers/mtd/nand/ecc-mxic.c14
-rw-r--r--drivers/mtd/nand/ecc-realtek.c464
-rw-r--r--drivers/mtd/nand/ecc.c2
-rw-r--r--drivers/mtd/nand/onenand/onenand_omap2.c1
-rw-r--r--drivers/mtd/nand/onenand/onenand_samsung.c2
-rw-r--r--drivers/mtd/nand/qpic_common.c36
-rw-r--r--drivers/mtd/nand/raw/Kconfig34
-rw-r--r--drivers/mtd/nand/raw/Makefile3
-rw-r--r--drivers/mtd/nand/raw/atmel/nand-controller.c35
-rw-r--r--drivers/mtd/nand/raw/atmel/pmecc.c7
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmnand.c62
-rw-r--r--drivers/mtd/nand/raw/cadence-nand-controller.c276
-rw-r--r--drivers/mtd/nand/raw/fsmc_nand.c8
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c17
-rw-r--r--drivers/mtd/nand/raw/loongson-nand-controller.c1024
-rw-r--r--drivers/mtd/nand/raw/loongson1-nand-controller.c836
-rw-r--r--drivers/mtd/nand/raw/lpc32xx_slc.c2
-rw-r--r--drivers/mtd/nand/raw/marvell_nand.c13
-rw-r--r--drivers/mtd/nand/raw/nand_base.c144
-rw-r--r--drivers/mtd/nand/raw/nand_hynix.c4
-rw-r--r--drivers/mtd/nand/raw/nandsim.c7
-rw-r--r--drivers/mtd/nand/raw/nuvoton-ma35d1-nand-controller.c4
-rw-r--r--drivers/mtd/nand/raw/omap2.c27
-rw-r--r--drivers/mtd/nand/raw/pl35x-nand-controller.c3
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c6
-rw-r--r--drivers/mtd/nand/raw/renesas-nand-controller.c11
-rw-r--r--drivers/mtd/nand/raw/rockchip-nand-controller.c16
-rw-r--r--drivers/mtd/nand/raw/s3c2410.c1230
-rw-r--r--drivers/mtd/nand/raw/stm32_fmc2_nand.c47
-rw-r--r--drivers/mtd/nand/raw/sunxi_nand.c410
-rw-r--r--drivers/mtd/nand/spi/Makefile2
-rw-r--r--drivers/mtd/nand/spi/alliancememory.c12
-rw-r--r--drivers/mtd/nand/spi/ato.c6
-rw-r--r--drivers/mtd/nand/spi/core.c104
-rw-r--r--drivers/mtd/nand/spi/esmt.c32
-rw-r--r--drivers/mtd/nand/spi/fmsh.c146
-rw-r--r--drivers/mtd/nand/spi/foresee.c8
-rw-r--r--drivers/mtd/nand/spi/gigadevice.c171
-rw-r--r--drivers/mtd/nand/spi/macronix.c8
-rw-r--r--drivers/mtd/nand/spi/micron.c20
-rw-r--r--drivers/mtd/nand/spi/paragon.c12
-rw-r--r--drivers/mtd/nand/spi/skyhigh.c12
-rw-r--r--drivers/mtd/nand/spi/toshiba.c8
-rw-r--r--drivers/mtd/nand/spi/winbond.c208
-rw-r--r--drivers/mtd/nand/spi/xtx.c12
-rw-r--r--drivers/mtd/nftlcore.c43
-rw-r--r--drivers/mtd/rfd_ftl.c4
-rw-r--r--drivers/mtd/sm_ftl.c5
-rw-r--r--drivers/mtd/spi-nor/core.c155
-rw-r--r--drivers/mtd/spi-nor/core.h6
-rw-r--r--drivers/mtd/spi-nor/micron-st.c109
-rw-r--r--drivers/mtd/spi-nor/sfdp.c30
-rw-r--r--drivers/mtd/spi-nor/spansion.c73
-rw-r--r--drivers/mtd/spi-nor/swp.c19
-rw-r--r--drivers/mtd/spi-nor/sysfs.c2
-rw-r--r--drivers/mtd/spi-nor/winbond.c24
-rw-r--r--drivers/mtd/ubi/attach.c4
-rw-r--r--drivers/mtd/ubi/block.c4
-rw-r--r--drivers/mtd/ubi/fastmap-wl.c8
-rw-r--r--drivers/mtd/ubi/io.c10
-rw-r--r--drivers/mtd/ubi/kapi.c27
-rw-r--r--drivers/mtd/ubi/ubi.h12
-rw-r--r--drivers/mux/Kconfig1
-rw-r--r--drivers/mux/core.c7
-rw-r--r--drivers/mux/mmio.c82
-rw-r--r--drivers/net/Kconfig15
-rw-r--r--drivers/net/Space.c3
-rw-r--r--drivers/net/amt.c17
-rw-r--r--drivers/net/bareudp.c7
-rw-r--r--drivers/net/bonding/bond_3ad.c131
-rw-r--r--drivers/net/bonding/bond_main.c360
-rw-r--r--drivers/net/bonding/bond_netlink.c62
-rw-r--r--drivers/net/bonding/bond_options.c90
-rw-r--r--drivers/net/bonding/bond_sysfs.c6
-rw-r--r--drivers/net/can/Kconfig20
-rw-r--r--drivers/net/can/Makefile3
-rw-r--r--drivers/net/can/at91_can.c1
-rw-r--r--drivers/net/can/bxcan.c5
-rw-r--r--drivers/net/can/c_can/c_can_main.c1
-rw-r--r--drivers/net/can/can327.c1
-rw-r--r--drivers/net/can/cc770/cc770.c1
-rw-r--r--drivers/net/can/ctucanfd/ctucanfd_base.c12
-rw-r--r--drivers/net/can/dev/bittiming.c63
-rw-r--r--drivers/net/can/dev/calc_bittiming.c124
-rw-r--r--drivers/net/can/dev/dev.c167
-rw-r--r--drivers/net/can/dev/netlink.c891
-rw-r--r--drivers/net/can/dummy_can.c285
-rw-r--r--drivers/net/can/esd/esd_402_pci-core.c4
-rw-r--r--drivers/net/can/esd/esdacc.c2
-rw-r--r--drivers/net/can/flexcan/flexcan-core.c1
-rw-r--r--drivers/net/can/grcan.c1
-rw-r--r--drivers/net/can/ifi_canfd/ifi_canfd.c1
-rw-r--r--drivers/net/can/janz-ican3.c3
-rw-r--r--drivers/net/can/kvaser_pciefd/Makefile3
-rw-r--r--drivers/net/can/kvaser_pciefd/kvaser_pciefd.h96
-rw-r--r--drivers/net/can/kvaser_pciefd/kvaser_pciefd_core.c (renamed from drivers/net/can/kvaser_pciefd.c)148
-rw-r--r--drivers/net/can/kvaser_pciefd/kvaser_pciefd_devlink.c60
-rw-r--r--drivers/net/can/m_can/m_can.c330
-rw-r--r--drivers/net/can/m_can/m_can.h5
-rw-r--r--drivers/net/can/m_can/m_can_pci.c4
-rw-r--r--drivers/net/can/m_can/m_can_platform.c10
-rw-r--r--drivers/net/can/m_can/tcan4x5x-core.c74
-rw-r--r--drivers/net/can/mscan/mscan.c1
-rw-r--r--drivers/net/can/peak_canfd/peak_canfd.c40
-rw-r--r--drivers/net/can/peak_canfd/peak_canfd_user.h4
-rw-r--r--drivers/net/can/peak_canfd/peak_pciefd_main.c6
-rw-r--r--drivers/net/can/rcar/rcar_can.c310
-rw-r--r--drivers/net/can/rcar/rcar_canfd.c670
-rw-r--r--drivers/net/can/rockchip/rockchip_canfd-core.c1
-rw-r--r--drivers/net/can/rockchip/rockchip_canfd-timestamp.c2
-rw-r--r--drivers/net/can/rockchip/rockchip_canfd-tx.c2
-rw-r--r--drivers/net/can/sja1000/Kconfig2
-rw-r--r--drivers/net/can/sja1000/peak_pci.c6
-rw-r--r--drivers/net/can/sja1000/peak_pcmcia.c8
-rw-r--r--drivers/net/can/sja1000/sja1000.c5
-rw-r--r--drivers/net/can/slcan/slcan-core.c1
-rw-r--r--drivers/net/can/softing/softing_main.c1
-rw-r--r--drivers/net/can/spi/hi311x.c34
-rw-r--r--drivers/net/can/spi/mcp251x.c68
-rw-r--r--drivers/net/can/spi/mcp251xfd/Kconfig1
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c277
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c114
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c2
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd.h8
-rw-r--r--drivers/net/can/sun4i_can.c4
-rw-r--r--drivers/net/can/ti_hecc.c3
-rw-r--r--drivers/net/can/usb/Kconfig12
-rw-r--r--drivers/net/can/usb/Makefile1
-rw-r--r--drivers/net/can/usb/ems_usb.c1
-rw-r--r--drivers/net/can/usb/esd_usb.c65
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_core.c5
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_fd.c2
-rw-r--r--drivers/net/can/usb/f81604.c1
-rw-r--r--drivers/net/can/usb/gs_usb.c146
-rw-r--r--drivers/net/can/usb/kvaser_usb/Makefile2
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb.h33
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c143
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_devlink.c87
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c65
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c79
-rw-r--r--drivers/net/can/usb/nct6694_canfd.c831
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb.c6
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c48
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.h4
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_fd.c20
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.c4
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.h4
-rw-r--r--drivers/net/can/usb/ucan.c1
-rw-r--r--drivers/net/can/usb/usb_8dev.c1
-rw-r--r--drivers/net/can/vcan.c2
-rw-r--r--drivers/net/can/vxcan.c2
-rw-r--r--drivers/net/can/xilinx_can.c19
-rw-r--r--drivers/net/dsa/Kconfig25
-rw-r--r--drivers/net/dsa/Makefile7
-rw-r--r--drivers/net/dsa/b53/Kconfig1
-rw-r--r--drivers/net/dsa/b53/b53_common.c562
-rw-r--r--drivers/net/dsa/b53/b53_mmap.c134
-rw-r--r--drivers/net/dsa/b53/b53_priv.h156
-rw-r--r--drivers/net/dsa/b53/b53_regs.h71
-rw-r--r--drivers/net/dsa/dsa_loop.c84
-rw-r--r--drivers/net/dsa/dsa_loop.h20
-rw-r--r--drivers/net/dsa/dsa_loop_bdinfo.c36
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek.c22
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek_ptp.c14
-rw-r--r--drivers/net/dsa/ks8995.c857
-rw-r--r--drivers/net/dsa/lantiq/Kconfig24
-rw-r--r--drivers/net/dsa/lantiq/Makefile3
-rw-r--r--drivers/net/dsa/lantiq/lantiq_gswip.c518
-rw-r--r--drivers/net/dsa/lantiq/lantiq_gswip.h301
-rw-r--r--drivers/net/dsa/lantiq/lantiq_gswip_common.c1739
-rw-r--r--drivers/net/dsa/lantiq/lantiq_pce.h (renamed from drivers/net/dsa/lantiq_pce.h)9
-rw-r--r--drivers/net/dsa/lantiq/mxl-gsw1xx.c733
-rw-r--r--drivers/net/dsa/lantiq/mxl-gsw1xx.h126
-rw-r--r--drivers/net/dsa/lantiq/mxl-gsw1xx_pce.h154
-rw-r--r--drivers/net/dsa/lantiq_gswip.c2270
-rw-r--r--drivers/net/dsa/microchip/ksz8.c207
-rw-r--r--drivers/net/dsa/microchip/ksz8.h4
-rw-r--r--drivers/net/dsa/microchip/ksz8_reg.h53
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c100
-rw-r--r--drivers/net/dsa/microchip/ksz9477_reg.h3
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c250
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h39
-rw-r--r--drivers/net/dsa/microchip/ksz_dcb.c10
-rw-r--r--drivers/net/dsa/microchip/ksz_ptp.c26
-rw-r--r--drivers/net/dsa/microchip/ksz_spi.c104
-rw-r--r--drivers/net/dsa/microchip/lan937x_main.c1
-rw-r--r--drivers/net/dsa/mt7530-mdio.c21
-rw-r--r--drivers/net/dsa/mt7530-mmio.c21
-rw-r--r--drivers/net/dsa/mt7530.c9
-rw-r--r--drivers/net/dsa/mt7530.h1
-rw-r--r--drivers/net/dsa/mv88e6060.c2
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c17
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h4
-rw-r--r--drivers/net/dsa/mv88e6xxx/devlink.c31
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.c6
-rw-r--r--drivers/net/dsa/mv88e6xxx/hwtstamp.c2
-rw-r--r--drivers/net/dsa/mv88e6xxx/hwtstamp.h1
-rw-r--r--drivers/net/dsa/mv88e6xxx/leds.c17
-rw-r--r--drivers/net/dsa/mv88e6xxx/ptp.c76
-rw-r--r--drivers/net/dsa/mv88e6xxx/ptp.h133
-rw-r--r--drivers/net/dsa/ocelot/felix.c74
-rw-r--r--drivers/net/dsa/ocelot/felix.h3
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c3
-rw-r--r--drivers/net/dsa/qca/ar9331.c4
-rw-r--r--drivers/net/dsa/realtek/realtek.h3
-rw-r--r--drivers/net/dsa/realtek/rtl8365mb.c2
-rw-r--r--drivers/net/dsa/realtek/rtl8366rb.c2
-rw-r--r--drivers/net/dsa/rzn1_a5psw.c24
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c7
-rw-r--r--drivers/net/dsa/sja1105/sja1105_tas.c8
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx-core.c8
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x.c11
-rw-r--r--drivers/net/dsa/yt921x.c3006
-rw-r--r--drivers/net/dsa/yt921x.h567
-rw-r--r--drivers/net/ethernet/3com/3c515.c4
-rw-r--r--drivers/net/ethernet/Kconfig2
-rw-r--r--drivers/net/ethernet/Makefile2
-rw-r--r--drivers/net/ethernet/agere/et131x.c36
-rw-r--r--drivers/net/ethernet/airoha/airoha_eth.c476
-rw-r--r--drivers/net/ethernet/airoha/airoha_eth.h99
-rw-r--r--drivers/net/ethernet/airoha/airoha_npu.c317
-rw-r--r--drivers/net/ethernet/airoha/airoha_npu.h36
-rw-r--r--drivers/net/ethernet/airoha/airoha_ppe.c546
-rw-r--r--drivers/net/ethernet/airoha/airoha_ppe_debugfs.c3
-rw-r--r--drivers/net/ethernet/airoha/airoha_regs.h125
-rw-r--r--drivers/net/ethernet/altera/altera_tse.h3
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c47
-rw-r--r--drivers/net/ethernet/amazon/Kconfig2
-rw-r--r--drivers/net/ethernet/amazon/ena/Makefile2
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_admin_defs.h76
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c267
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.h84
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_debugfs.c62
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_debugfs.h27
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_devlink.c210
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_devlink.h21
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c60
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c62
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h14
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_phc.c233
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_phc.h37
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_regs_defs.h8
-rw-r--r--drivers/net/ethernet/amd/Kconfig1
-rw-r--r--drivers/net/ethernet/amd/pds_core/core.h3
-rw-r--r--drivers/net/ethernet/amd/pds_core/devlink.c3
-rw-r--r--drivers/net/ethernet/amd/pds_core/main.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/Makefile4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h42
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c161
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c329
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c37
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-hwtstamp.c399
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-i2c.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c14
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-pci.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c29
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-pps.c74
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ptp.c101
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-selftest.c346
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h84
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c22
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c66
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ptp.c6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ptp.h8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c5
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c19
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c39
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c9
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c2
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c79
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig11
-rw-r--r--drivers/net/ethernet/broadcom/Makefile1
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c35
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c9
-rw-r--r--drivers/net/ethernet/broadcom/b44.c39
-rw-r--r--drivers/net/ethernet/broadcom/bnge/Makefile13
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge.h255
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_auxr.c258
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_auxr.h84
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_core.c420
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_db.h34
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_devlink.c306
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_devlink.h18
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_ethtool.c33
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_ethtool.h9
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_hwrm.c548
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_hwrm.h112
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.c1185
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.h58
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_netdev.c2485
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_netdev.h454
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_resc.c617
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_resc.h97
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_rmem.c499
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_rmem.h202
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c53
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c78
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c384
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h21
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c29
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c25
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c209
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h10914
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hwmon.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c46
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h7
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c82
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c39
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c11
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c3
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c43
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c82
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c102
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h2
-rw-r--r--drivers/net/ethernet/cadence/macb.h146
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c838
-rw-r--r--drivers/net/ethernet/cadence/macb_ptp.c16
-rw-r--r--drivers/net/ethernet/cavium/common/cavium_ptp.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c39
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h3
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_core.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c58
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c51
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_main.h2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_nic.h4
-rw-r--r--drivers/net/ethernet/cavium/liquidio/request_manager.c4
-rw-r--r--drivers/net/ethernet/cavium/liquidio/response_manager.c3
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c62
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c53
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c57
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c24
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/pm3393.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.c37
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c105
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c162
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c40
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.c44
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.h12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c2
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c7
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c24
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h7
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c13
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c8
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/xircom_cb.c4
-rw-r--r--drivers/net/ethernet/dlink/Kconfig20
-rw-r--r--drivers/net/ethernet/dlink/Makefile1
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c99
-rw-r--r--drivers/net/ethernet/dlink/dl2k.h4
-rw-r--r--drivers/net/ethernet/dlink/sundance.c1990
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c56
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c18
-rw-r--r--drivers/net/ethernet/engleder/tsnep.h8
-rw-r--r--drivers/net/ethernet/engleder/tsnep_main.c14
-rw-r--r--drivers/net/ethernet/engleder/tsnep_ptp.c88
-rw-r--r--drivers/net/ethernet/faraday/Kconfig1
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c35
-rw-r--r--drivers/net/ethernet/fealnx.c4
-rw-r--r--drivers/net/ethernet/freescale/Kconfig1
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c93
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c46
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c47
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c17
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c2
-rw-r--r--drivers/net/ethernet/freescale/enetc/Kconfig9
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c274
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h56
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc4_hw.h42
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc4_pf.c23
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ethtool.c295
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_hw.h8
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c14
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf_common.c19
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ptp.c5
-rw-r--r--drivers/net/ethernet/freescale/enetc/netc_blk_ctrl.c400
-rw-r--r--drivers/net/ethernet/freescale/enetc/ntmp.c15
-rw-r--r--drivers/net/ethernet/freescale/fec.h57
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c427
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c2
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c106
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.c93
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c2
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.h14
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c6
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c17
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c39
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth.h4
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_ethtool.c3
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_main.c40
-rw-r--r--drivers/net/ethernet/google/Kconfig1
-rw-r--r--drivers/net/ethernet/google/gve/Makefile4
-rw-r--r--drivers/net/ethernet/google/gve/gve.h107
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.c105
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.h30
-rw-r--r--drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c30
-rw-r--r--drivers/net/ethernet/google/gve/gve_desc_dqo.h6
-rw-r--r--drivers/net/ethernet/google/gve/gve_dqo.h4
-rw-r--r--drivers/net/ethernet/google/gve/gve_ethtool.c131
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c514
-rw-r--r--drivers/net/ethernet/google/gve/gve_ptp.c166
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx.c14
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx_dqo.c291
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx.c6
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx_dqo.c390
-rw-r--r--drivers/net/ethernet/hisilicon/Kconfig2
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/Makefile1
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h10
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c27
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c75
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c19
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c40
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h12
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_trace.h84
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c217
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.h7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h25
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c1050
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h16
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c110
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c143
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c1367
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c103
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c11
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c41
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h11
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c14
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c27
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_devlink.c10
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_ethtool.c47
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c2
-rw-r--r--drivers/net/ethernet/huawei/hinic3/Makefile6
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_cmdq.c915
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_cmdq.h156
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_common.c23
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_common.h27
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_csr.h79
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_eqs.c776
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_eqs.h122
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.c211
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.h4
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.c394
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.h34
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_intf.h151
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.c541
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hwif.c417
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hwif.h32
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_irq.c138
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_lld.c9
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_main.c69
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mbox.c848
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mbox.h126
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.c21
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.h2
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h119
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c426
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c152
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h20
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h19
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c870
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h39
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_pci_id_tbl.h9
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_rss.c336
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_rss.h14
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_rx.c226
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_rx.h38
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_tx.c213
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_tx.h30
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_wq.c109
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_wq.h19
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c220
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.h21
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c86
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h14
-rw-r--r--drivers/net/ethernet/intel/Kconfig6
-rw-r--r--drivers/net/ethernet/intel/Makefile2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000.h2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.c4
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c11
-rw-r--r--drivers/net/ethernet/intel/e1000e/defines.h3
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h3
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c130
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c43
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c24
-rw-r--r--drivers/net/ethernet/intel/e1000e/nvm.c10
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c15
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k.h3
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_common.c5
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_common.h2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c53
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c6
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.c2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_vf.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h19
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c68
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.h12
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h156
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.c11
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c764
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.c10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c169
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_devlink.c55
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c184
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c340
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c18
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h17
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c45
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c43
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h47
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h38
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c231
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h3
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h12
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_adminq.c62
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_adminq.h12
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_adminq_cmd.h83
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_adv_rss.c119
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_adv_rss.h31
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_common.c110
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ethtool.c159
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c93
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_prototype.h3
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ptp.c7
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c17
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.h42
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_type.h34
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c64
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile11
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/devlink.c45
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/health.c9
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/port.c2
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/port.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h52
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adapter.c60
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adapter.h9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h414
-rw-r--r--drivers/net/ethernet/intel/ice/ice_arfs.c48
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c551
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_cgu_regs.h181
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c891
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h66
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c53
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.c36
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ddp.c93
-rw-r--r--drivers/net/ethernet/intel/ice/ice_debugfs.c633
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devids.h18
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dpll.c1424
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dpll.h33
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c326
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fdir.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.c179
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.h7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_type.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.c318
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.h162
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fw_update.c40
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fwlog.c472
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fwlog.h79
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h15
-rw-r--r--drivers/net/ethernet/intel/ice/ice_idc.c10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.c1022
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.h24
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h44
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c58
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c469
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.c38
-rw-r--r--drivers/net/ethernet/intel/ice/ice_protocol_type.h20
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c356
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.h22
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_consts.h177
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.c582
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.h55
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sbq_cmd.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c18
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c28
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.h11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c55
-rw-r--r--drivers/net/ethernet/intel/ice/ice_trace.h10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tspll.c626
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tspll.h31
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c931
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h151
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.c65
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.h23
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h33
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c24
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.h76
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_mbx.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vlan_mode.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c24
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c299
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.h28
-rw-r--r--drivers/net/ethernet/intel/ice/virt/allowlist.c (renamed from drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c)4
-rw-r--r--drivers/net/ethernet/intel/ice/virt/allowlist.h (renamed from drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.h)0
-rw-r--r--drivers/net/ethernet/intel/ice/virt/fdir.c (renamed from drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c)0
-rw-r--r--drivers/net/ethernet/intel/ice/virt/fdir.h (renamed from drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h)0
-rw-r--r--drivers/net/ethernet/intel/ice/virt/queues.c975
-rw-r--r--drivers/net/ethernet/intel/ice/virt/queues.h20
-rw-r--r--drivers/net/ethernet/intel/ice/virt/rss.c1922
-rw-r--r--drivers/net/ethernet/intel/ice/virt/rss.h18
-rw-r--r--drivers/net/ethernet/intel/ice/virt/virtchnl.c (renamed from drivers/net/ethernet/intel/ice/ice_virtchnl.c)1763
-rw-r--r--drivers/net/ethernet/intel/ice/virt/virtchnl.h (renamed from drivers/net/ethernet/intel/ice/ice_virtchnl.h)23
-rw-r--r--drivers/net/ethernet/intel/idpf/Kconfig2
-rw-r--r--drivers/net/ethernet/intel/idpf/Makefile4
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf.h237
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_controlq.c37
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_controlq.h18
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_controlq_api.h2
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_dev.c60
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ethtool.c433
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_idc.c503
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h6
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lib.c349
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_main.c141
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_mem.h8
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ptp.c150
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ptp.h17
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c175
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c1742
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.h288
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_vf_dev.c56
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.c1564
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.h42
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c60
-rw-r--r--drivers/net/ethernet/intel/idpf/virtchnl2.h278
-rw-r--r--drivers/net/ethernet/intel/idpf/xdp.c486
-rw-r--r--drivers/net/ethernet/intel/idpf/xdp.h175
-rw-r--r--drivers/net/ethernet/intel/idpf/xsk.c633
-rw-r--r--drivers/net/ethernet/intel/idpf/xsk.h33
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.c4
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h11
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c47
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c14
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c48
-rw-r--r--drivers/net/ethernet/intel/igb/igb_xsk.c3
-rw-r--r--drivers/net/ethernet/intel/igbvf/ethtool.c6
-rw-r--r--drivers/net/ethernet/intel/igbvf/igbvf.h27
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c13
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h58
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.h8
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h5
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c72
-rw-r--r--drivers/net/ethernet/intel/igc/igc_i225.c2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_mac.c2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c138
-rw-r--r--drivers/net/ethernet/intel/igc/igc_nvm.c4
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ptp.c36
-rw-r--r--drivers/net/ethernet/intel/igc/igc_tsn.c118
-rw-r--r--drivers/net/ethernet/intel/igc/igc_tsn.h5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/devlink/devlink.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/devlink/region.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h20
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c406
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_e610.h14
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c53
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fw_update.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c9
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c305
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h17
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c48
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c132
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h48
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h228
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c164
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.h5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/defines.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c20
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ipsec.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h28
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c36
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.h8
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c184
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.h1
-rw-r--r--drivers/net/ethernet/intel/libeth/Kconfig10
-rw-r--r--drivers/net/ethernet/intel/libeth/Makefile8
-rw-r--r--drivers/net/ethernet/intel/libeth/priv.h37
-rw-r--r--drivers/net/ethernet/intel/libeth/rx.c42
-rw-r--r--drivers/net/ethernet/intel/libeth/tx.c41
-rw-r--r--drivers/net/ethernet/intel/libeth/xdp.c451
-rw-r--r--drivers/net/ethernet/intel/libeth/xsk.c271
-rw-r--r--drivers/net/ethernet/intel/libie/Kconfig15
-rw-r--r--drivers/net/ethernet/intel/libie/Makefile8
-rw-r--r--drivers/net/ethernet/intel/libie/adminq.c52
-rw-r--r--drivers/net/ethernet/intel/libie/fwlog.c1115
-rw-r--r--drivers/net/ethernet/intel/libie/rx.c7
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c33
-rw-r--r--drivers/net/ethernet/marvell/mvneta_bm.h2
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c6
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h6
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c61
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c10
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.c16
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c3
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/Makefile3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c93
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.h5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h33
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cn20k/api.h32
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cn20k/debugfs.c218
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cn20k/debugfs.h28
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cn20k/mbox_init.c424
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cn20k/nix.c20
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cn20k/npa.c21
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cn20k/reg.h81
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cn20k/struct.h380
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/common.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.c106
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h97
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c246
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h109
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c100
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c6
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c226
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c47
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c148
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c29
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c16
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c15
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h37
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/Makefile2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c28
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn20k.c450
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn20k.h17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c72
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h74
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c6
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c235
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c237
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h49
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c58
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/rep.c20
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/rep.h1
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_main.c2
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_pci.c2
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c6
-rw-r--r--drivers/net/ethernet/mediatek/Kconfig1
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c239
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h18
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_offload.c2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.c66
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.h2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_mcu.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_clock.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c67
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c65
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cq.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c202
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h58
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs_ethtool.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/mapping.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/mapping.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c87
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/pcie_cong_event.c376
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/pcie_cong_event.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/qos.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c42
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rss.c98
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rss.h34
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c84
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c43
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_hmfs.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/int_port.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tir.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tir.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/trap.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/trap.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c121
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c104
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.c1155
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.h77
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp_rxtx.c201
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp_rxtx.h121
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c101
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c58
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dim.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c261
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c598
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c362
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c131
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c61
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/adj_vport.c202
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c47
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c1165
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/vporttbl.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c295
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h91
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c493
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c346
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c153
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c146
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c97
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c395
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c116
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/nv_param.c799
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/nv_param.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/st.c185
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c141
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c62
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/diag/dev_tracepoint.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c100
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c61
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c69
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c601
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c1857
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.h60
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c105
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c171
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_cmd.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c101
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wc.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_linecards.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h1
-rw-r--r--drivers/net/ethernet/meta/Kconfig3
-rw-r--r--drivers/net/ethernet/meta/fbnic/Makefile2
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic.h32
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_csr.h200
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_debugfs.c29
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_devlink.c253
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c461
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw.c713
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw.h144
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw_log.c123
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw_log.h45
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.c66
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h47
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_irq.c34
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_mac.c309
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_mac.h74
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_mdio.c195
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_netdev.c199
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_netdev.h38
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_pci.c109
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_phylink.c273
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_rpc.c145
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_rpc.h4
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_time.c2
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_tlv.h2
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_txrx.c1051
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_txrx.h48
-rw-r--r--drivers/net/ethernet/micrel/ks8842.c2
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.c31
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c1
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.h4
-rw-r--r--drivers/net/ethernet/microchip/lan865x/lan865x.c30
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c18
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.c2
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.h4
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c5
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c8
-rw-r--r--drivers/net/ethernet/microchip/sparx5/Kconfig2
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c18
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.c7
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c12
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c10
-rw-r--r--drivers/net/ethernet/microsoft/Kconfig1
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c697
-rw-r--r--drivers/net/ethernet/microsoft/mana/hw_channel.c31
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_bpf.c46
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c671
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_ethtool.c167
-rw-r--r--drivers/net/ethernet/mscc/ocelot_stats.c2
-rw-r--r--drivers/net/ethernet/mucse/Kconfig33
-rw-r--r--drivers/net/ethernet/mucse/Makefile7
-rw-r--r--drivers/net/ethernet/mucse/rnpgbe/Makefile11
-rw-r--r--drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h71
-rw-r--r--drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c143
-rw-r--r--drivers/net/ethernet/mucse/rnpgbe/rnpgbe_hw.h17
-rw-r--r--drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c320
-rw-r--r--drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.c406
-rw-r--r--drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.h20
-rw-r--r--drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.c191
-rw-r--r--drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.h88
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c4
-rw-r--r--drivers/net/ethernet/natsemi/ns83820.c13
-rw-r--r--drivers/net/ethernet/neterion/s2io.c5
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/tls.c9
-rw-r--r--drivers/net/ethernet/netronome/nfp/devlink_param.c3
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/metadata.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfd3/dp.c16
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfdk/dp.c16
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c9
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c15
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c19
-rw-r--r--drivers/net/ethernet/oa_tc6.c3
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c38
-rw-r--r--drivers/net/ethernet/pensando/Kconfig1
-rw-r--r--drivers/net/ethernet/pensando/ionic/Makefile2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic.h7
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_api.h131
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_aux.c102
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_aux.h10
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c8
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.c274
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.h28
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_if.h120
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c71
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.h21
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c7
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_phc.c63
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c46
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c10
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c7
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_devlink.c12
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ooo.c9
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ptp.c2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c22
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_filter.c3
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c5
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c22
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ptp.c78
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ptp.h6
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c34
-rw-r--r--drivers/net/ethernet/qualcomm/Kconfig15
-rw-r--r--drivers/net/ethernet/qualcomm/Makefile1
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-sgmii.c2
-rw-r--r--drivers/net/ethernet/qualcomm/ppe/Makefile7
-rw-r--r--drivers/net/ethernet/qualcomm/ppe/ppe.c239
-rw-r--r--drivers/net/ethernet/qualcomm/ppe/ppe.h39
-rw-r--r--drivers/net/ethernet/qualcomm/ppe/ppe_config.c2034
-rw-r--r--drivers/net/ethernet/qualcomm/ppe/ppe_config.h317
-rw-r--r--drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.c847
-rw-r--r--drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.h16
-rw-r--r--drivers/net/ethernet/qualcomm/ppe/ppe_regs.h591
-rw-r--r--drivers/net/ethernet/realtek/Kconfig2
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c127
-rw-r--r--drivers/net/ethernet/realtek/rtase/rtase.h3
-rw-r--r--drivers/net/ethernet/realtek/rtase/rtase_main.c39
-rw-r--r--drivers/net/ethernet/renesas/Makefile1
-rw-r--r--drivers/net/ethernet/renesas/ravb.h16
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c172
-rw-r--r--drivers/net/ethernet/renesas/rcar_gen4_ptp.c76
-rw-r--r--drivers/net/ethernet/renesas/rcar_gen4_ptp.h46
-rw-r--r--drivers/net/ethernet/renesas/rswitch.h46
-rw-r--r--drivers/net/ethernet/renesas/rswitch_l2.c316
-rw-r--r--drivers/net/ethernet/renesas/rswitch_l2.h15
-rw-r--r--drivers/net/ethernet/renesas/rswitch_main.c (renamed from drivers/net/ethernet/renesas/rswitch.c)177
-rw-r--r--drivers/net/ethernet/renesas/rtsn.c55
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c34
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c45
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c4
-rw-r--r--drivers/net/ethernet/sfc/ef10.c1
-rw-r--r--drivers/net/ethernet/sfc/ef100_tx.c17
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.c6
-rw-r--r--drivers/net/ethernet/sfc/efx_common.c3
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c5
-rw-r--r--drivers/net/ethernet/sfc/ethtool_common.c99
-rw-r--r--drivers/net/ethernet/sfc/ethtool_common.h2
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.c8
-rw-r--r--drivers/net/ethernet/sfc/falcon/ethtool.c55
-rw-r--r--drivers/net/ethernet/sfc/mae.c4
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h6
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h2
-rw-r--r--drivers/net/ethernet/sfc/rx_common.c6
-rw-r--r--drivers/net/ethernet/sfc/siena/efx_channels.c6
-rw-r--r--drivers/net/ethernet/sfc/siena/efx_common.c3
-rw-r--r--drivers/net/ethernet/sfc/siena/ethtool.c4
-rw-r--r--drivers/net/ethernet/sfc/siena/ethtool_common.c77
-rw-r--r--drivers/net/ethernet/sfc/siena/ethtool_common.h2
-rw-r--r--drivers/net/ethernet/sfc/siena/farch.c2
-rw-r--r--drivers/net/ethernet/sfc/siena/mcdi_pcol.h12
-rw-r--r--drivers/net/ethernet/sfc/siena/net_driver.h2
-rw-r--r--drivers/net/ethernet/sfc/siena/rx_common.c6
-rw-r--r--drivers/net/ethernet/sfc/tc_encap_actions.c6
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c16
-rw-r--r--drivers/net/ethernet/spacemit/Kconfig29
-rw-r--r--drivers/net/ethernet/spacemit/Makefile6
-rw-r--r--drivers/net/ethernet/spacemit/k1_emac.c2162
-rw-r--r--drivers/net/ethernet/spacemit/k1_emac.h416
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig45
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/chain_mode.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h55
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c50
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-eic7700.c235
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c150
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c160
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c147
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c92
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-loongson1.c99
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c44
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c87
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c30
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c280
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c113
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c1133
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c26
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c169
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sophgo.c20
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c28
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c54
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c138
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun55i.c159
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c72
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c49
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c147
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c104
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c35
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c108
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c32
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h16
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c50
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c66
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.c253
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h25
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/ring_mode.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h24
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_est.c13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_est.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c184
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c28
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_libpci.c48
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_libpci.h12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c943
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c475
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c97
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.c67
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h57
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c201
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c58
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c17
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c2
-rw-r--r--drivers/net/ethernet/sun/niu.c50
-rw-r--r--drivers/net/ethernet/sun/niu.h8
-rw-r--r--drivers/net/ethernet/sun/sunhme.c2
-rw-r--r--drivers/net/ethernet/sun/sunqe.h2
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c2
-rw-r--r--drivers/net/ethernet/ti/Kconfig12
-rw-r--r--drivers/net/ethernet/ti/Makefile3
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-ethtool.c27
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c85
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-qos.c51
-rw-r--r--drivers/net/ethernet/ti/am65-cpts.c63
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c6
-rw-r--r--drivers/net/ethernet/ti/cpts.c2
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c21
-rw-r--r--drivers/net/ethernet/ti/icssg/icss_iep.c127
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_common.c548
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_config.c165
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_config.h80
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.c666
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.h50
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c11
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_switch_map.h3
-rw-r--r--drivers/net/ethernet/ti/icssm/icssm_prueth.c1746
-rw-r--r--drivers/net/ethernet/ti/icssm/icssm_prueth.h262
-rw-r--r--drivers/net/ethernet/ti/icssm/icssm_prueth_ptp.h85
-rw-r--r--drivers/net/ethernet/ti/icssm/icssm_switch.h257
-rw-r--r--drivers/net/ethernet/ti/netcp.h5
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c68
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c72
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c58
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.h1
-rw-r--r--drivers/net/ethernet/wangxun/Kconfig36
-rw-r--r--drivers/net/ethernet/wangxun/Makefile2
-rw-r--r--drivers/net/ethernet/wangxun/libwx/Makefile1
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ethtool.c297
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ethtool.h13
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.c232
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.h7
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.c312
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_mbx.c243
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_mbx.h22
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ptp.c2
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_sriov.c30
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_type.h99
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_vf.c599
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_vf.h129
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_vf_common.c414
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_vf_common.h22
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_vf_lib.c292
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_vf_lib.h15
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c9
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_main.c10
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_type.h2
-rw-r--r--drivers/net/ethernet/wangxun/ngbevf/Makefile9
-rw-r--r--drivers/net/ethernet/wangxun/ngbevf/ngbevf_main.c266
-rw-r--r--drivers/net/ethernet/wangxun/ngbevf/ngbevf_type.h29
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c299
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_aml.h5
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c47
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c18
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_main.c51
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c2
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_type.h43
-rw-r--r--drivers/net/ethernet/wangxun/txgbevf/Makefile9
-rw-r--r--drivers/net/ethernet/wangxun/txgbevf/txgbevf_main.c331
-rw-r--r--drivers/net/ethernet/wangxun/txgbevf/txgbevf_type.h26
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c2
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c4
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c28
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c2
-rw-r--r--drivers/net/ethernet/xircom/xirc2ps_cs.c2
-rw-r--r--drivers/net/fjes/fjes_main.c5
-rw-r--r--drivers/net/geneve.c18
-rw-r--r--drivers/net/gtp.c21
-rw-r--r--drivers/net/hamradio/6pack.c57
-rw-r--r--drivers/net/hamradio/bpqether.c2
-rw-r--r--drivers/net/hyperv/Kconfig2
-rw-r--r--drivers/net/hyperv/hyperv_net.h3
-rw-r--r--drivers/net/hyperv/netvsc.c17
-rw-r--r--drivers/net/hyperv/netvsc_drv.c77
-rw-r--r--drivers/net/hyperv/rndis_filter.c23
-rw-r--r--drivers/net/ifb.c1
-rw-r--r--drivers/net/ipa/Kconfig2
-rw-r--r--drivers/net/ipa/ipa_interrupt.c1
-rw-r--r--drivers/net/ipa/ipa_main.c13
-rw-r--r--drivers/net/ipa/ipa_modem.c4
-rw-r--r--drivers/net/ipa/ipa_smp2p.c2
-rw-r--r--drivers/net/ipa/ipa_sysfs.c6
-rw-r--r--drivers/net/ipa/ipa_uc.c2
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c8
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c7
-rw-r--r--drivers/net/macsec.c224
-rw-r--r--drivers/net/macvlan.c2
-rw-r--r--drivers/net/mctp/mctp-i3c.c8
-rw-r--r--drivers/net/mctp/mctp-usb.c9
-rw-r--r--drivers/net/mdio/Kconfig12
-rw-r--r--drivers/net/mdio/Makefile1
-rw-r--r--drivers/net/mdio/fwnode_mdio.c31
-rw-r--r--drivers/net/mdio/mdio-airoha.c278
-rw-r--r--drivers/net/mdio/mdio-bcm-unimac.c9
-rw-r--r--drivers/net/mdio/mdio-i2c.c39
-rw-r--r--drivers/net/mdio/mdio-mux-gpio.c3
-rw-r--r--drivers/net/mdio/of_mdio.c8
-rw-r--r--drivers/net/netconsole.c697
-rw-r--r--drivers/net/netdevsim/Makefile4
-rw-r--r--drivers/net/netdevsim/bus.c29
-rw-r--r--drivers/net/netdevsim/dev.c128
-rw-r--r--drivers/net/netdevsim/ethtool.c46
-rw-r--r--drivers/net/netdevsim/health.c4
-rw-r--r--drivers/net/netdevsim/hwstats.c5
-rw-r--r--drivers/net/netdevsim/ipsec.c1
-rw-r--r--drivers/net/netdevsim/netdev.c223
-rw-r--r--drivers/net/netdevsim/netdevsim.h49
-rw-r--r--drivers/net/netdevsim/psp.c252
-rw-r--r--drivers/net/netdevsim/udp_tunnels.c12
-rw-r--r--drivers/net/netkit.c16
-rw-r--r--drivers/net/ovpn/io.c7
-rw-r--r--drivers/net/ovpn/netlink-gen.c62
-rw-r--r--drivers/net/ovpn/netlink-gen.h7
-rw-r--r--drivers/net/ovpn/netlink.c51
-rw-r--r--drivers/net/ovpn/tcp.c26
-rw-r--r--drivers/net/ovpn/udp.c5
-rw-r--r--drivers/net/pcs/Kconfig11
-rw-r--r--drivers/net/pcs/pcs-lynx.c88
-rw-r--r--drivers/net/pcs/pcs-rzn1-miic.c319
-rw-r--r--drivers/net/pcs/pcs-xpcs-plat.c11
-rw-r--r--drivers/net/pcs/pcs-xpcs.c136
-rw-r--r--drivers/net/phy/Kconfig21
-rw-r--r--drivers/net/phy/Makefile6
-rw-r--r--drivers/net/phy/adin1100.c7
-rw-r--r--drivers/net/phy/air_en8811h.c45
-rw-r--r--drivers/net/phy/aquantia/aquantia.h52
-rw-r--r--drivers/net/phy/aquantia/aquantia_firmware.c2
-rw-r--r--drivers/net/phy/aquantia/aquantia_main.c702
-rw-r--r--drivers/net/phy/as21xxx.c7
-rw-r--r--drivers/net/phy/ax88796b.c5
-rw-r--r--drivers/net/phy/bcm-phy-ptp.c27
-rw-r--r--drivers/net/phy/broadcom.c202
-rw-r--r--drivers/net/phy/dp83640.c87
-rw-r--r--drivers/net/phy/dp83822.c7
-rw-r--r--drivers/net/phy/dp83867.c42
-rw-r--r--drivers/net/phy/dp83869.c11
-rw-r--r--drivers/net/phy/dp83td510.c62
-rw-r--r--drivers/net/phy/dp83tg720.c185
-rw-r--r--drivers/net/phy/fixed_phy.c264
-rw-r--r--drivers/net/phy/intel-xway.c7
-rw-r--r--drivers/net/phy/marvell-88x2222.c13
-rw-r--r--drivers/net/phy/marvell.c47
-rw-r--r--drivers/net/phy/marvell10g.c7
-rw-r--r--drivers/net/phy/mdio-boardinfo.c80
-rw-r--r--drivers/net/phy/mdio-boardinfo.h23
-rw-r--r--drivers/net/phy/mdio-open-alliance.h49
-rw-r--r--drivers/net/phy/mdio-private.h11
-rw-r--r--drivers/net/phy/mdio_bus.c104
-rw-r--r--drivers/net/phy/mdio_bus_provider.c50
-rw-r--r--drivers/net/phy/mdio_device.c65
-rw-r--r--drivers/net/phy/mediatek/Kconfig1
-rw-r--r--drivers/net/phy/mediatek/mtk-2p5ge.c104
-rw-r--r--drivers/net/phy/micrel.c1620
-rw-r--r--drivers/net/phy/microchip.c3
-rw-r--r--drivers/net/phy/microchip_rds_ptp.c8
-rw-r--r--drivers/net/phy/microchip_t1s.c100
-rw-r--r--drivers/net/phy/motorcomm.c120
-rw-r--r--drivers/net/phy/mscc/mscc.h31
-rw-r--r--drivers/net/phy/mscc/mscc_main.c521
-rw-r--r--drivers/net/phy/mscc/mscc_ptp.c121
-rw-r--r--drivers/net/phy/mscc/mscc_ptp.h1
-rw-r--r--drivers/net/phy/mxl-86110.c392
-rw-r--r--drivers/net/phy/mxl-gpy.c135
-rw-r--r--drivers/net/phy/nxp-c45-tja11xx-macsec.c8
-rw-r--r--drivers/net/phy/nxp-c45-tja11xx.c45
-rw-r--r--drivers/net/phy/phy-c45.c292
-rw-r--r--drivers/net/phy/phy-caps.h3
-rw-r--r--drivers/net/phy/phy-core.c126
-rw-r--r--drivers/net/phy/phy.c41
-rw-r--r--drivers/net/phy/phy_caps.c35
-rw-r--r--drivers/net/phy/phy_device.c254
-rw-r--r--drivers/net/phy/phy_package.c71
-rw-r--r--drivers/net/phy/phylib-internal.h6
-rw-r--r--drivers/net/phy/phylink.c314
-rw-r--r--drivers/net/phy/qcom/Kconfig3
-rw-r--r--drivers/net/phy/qcom/at803x.c203
-rw-r--r--drivers/net/phy/qcom/qca807x.c47
-rw-r--r--drivers/net/phy/qcom/qca808x.c25
-rw-r--r--drivers/net/phy/qcom/qcom-phy-lib.c100
-rw-r--r--drivers/net/phy/qcom/qcom.h28
-rw-r--r--drivers/net/phy/qt2025.rs10
-rw-r--r--drivers/net/phy/realtek/realtek_main.c696
-rw-r--r--drivers/net/phy/sfp-bus.c107
-rw-r--r--drivers/net/phy/sfp.c109
-rw-r--r--drivers/net/phy/sfp.h4
-rw-r--r--drivers/net/phy/smsc.c58
-rw-r--r--drivers/net/phy/spi_ks8995.c506
-rw-r--r--drivers/net/ppp/Kconfig3
-rw-r--r--drivers/net/ppp/bsd_comp.c4
-rw-r--r--drivers/net/ppp/ppp_generic.c223
-rw-r--r--drivers/net/ppp/ppp_mppe.c108
-rw-r--r--drivers/net/ppp/pppoe.c139
-rw-r--r--drivers/net/ppp/pptp.c26
-rw-r--r--drivers/net/pse-pd/Kconfig11
-rw-r--r--drivers/net/pse-pd/Makefile1
-rw-r--r--drivers/net/pse-pd/pd692x0.c393
-rw-r--r--drivers/net/pse-pd/pse_core.c1066
-rw-r--r--drivers/net/pse-pd/si3474.c578
-rw-r--r--drivers/net/pse-pd/tps23881.c474
-rw-r--r--drivers/net/sungem_phy.c2
-rw-r--r--drivers/net/tap.c10
-rw-r--r--drivers/net/team/team_core.c201
-rw-r--r--drivers/net/team/team_mode_activebackup.c3
-rw-r--r--drivers/net/team/team_mode_loadbalance.c13
-rw-r--r--drivers/net/team/team_nl.c1
-rw-r--r--drivers/net/team/team_nl.h1
-rw-r--r--drivers/net/thunderbolt/main.c21
-rw-r--r--drivers/net/tun.c79
-rw-r--r--drivers/net/tun_vnet.h101
-rw-r--r--drivers/net/usb/Kconfig4
-rw-r--r--drivers/net/usb/asix_devices.c42
-rw-r--r--drivers/net/usb/cdc_ncm.c27
-rw-r--r--drivers/net/usb/lan78xx.c778
-rw-r--r--drivers/net/usb/qmi_wwan.c11
-rw-r--r--drivers/net/usb/r8152.c9
-rw-r--r--drivers/net/usb/rtl8150.c13
-rw-r--r--drivers/net/usb/sierra_net.c4
-rw-r--r--drivers/net/usb/smsc95xx.c72
-rw-r--r--drivers/net/usb/usbnet.c352
-rw-r--r--drivers/net/veth.c45
-rw-r--r--drivers/net/virtio_net.c450
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c92
-rw-r--r--drivers/net/vrf.c6
-rw-r--r--drivers/net/vxlan/vxlan_core.c103
-rw-r--r--drivers/net/vxlan/vxlan_private.h8
-rw-r--r--drivers/net/vxlan/vxlan_vnifilter.c31
-rw-r--r--drivers/net/wan/framer/pef2256/pef2256.c35
-rw-r--r--drivers/net/wan/hdlc_ppp.c4
-rw-r--r--drivers/net/wan/lapbether.c2
-rw-r--r--drivers/net/wireguard/Makefile2
-rw-r--r--drivers/net/wireguard/cookie.c18
-rw-r--r--drivers/net/wireguard/device.c8
-rw-r--r--drivers/net/wireguard/generated/netlink.c73
-rw-r--r--drivers/net/wireguard/generated/netlink.h30
-rw-r--r--drivers/net/wireguard/netlink.c68
-rw-r--r--drivers/net/wireguard/noise.c32
-rw-r--r--drivers/net/wireguard/peer.h2
-rw-r--r--drivers/net/wireguard/queueing.h13
-rw-r--r--drivers/net/wireguard/socket.c4
-rw-r--r--drivers/net/wireless/admtek/adm8211.c2
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c5
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c62
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h19
-rw-r--r--drivers/net/wireless/ath/ath10k/coredump.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c8
-rw-r--r--drivers/net/wireless/ath/ath10k/debugfs_sta.c7
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c11
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.c1
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h10
-rw-r--r--drivers/net/wireless/ath/ath10k/leds.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c82
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.c20
-rw-r--r--drivers/net/wireless/ath/ath10k/testmode.c253
-rw-r--r--drivers/net/wireless/ath/ath10k/testmode_i.h15
-rw-r--r--drivers/net/wireless/ath/ath10k/trace.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c7
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h19
-rw-r--r--drivers/net/wireless/ath/ath11k/ahb.c19
-rw-r--r--drivers/net/wireless/ath/ath11k/ce.c10
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c100
-rw-r--r--drivers/net/wireless/ath/ath11k/core.h19
-rw-r--r--drivers/net/wireless/ath/ath11k/coredump.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/dbring.c3
-rw-r--r--drivers/net/wireless/ath/ath11k/debug.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs.c188
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs.h10
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c15
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_sta.c11
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.c4
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.c46
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_tx.c15
-rw-r--r--drivers/net/wireless/ath/ath11k/fw.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.c57
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.h39
-rw-r--r--drivers/net/wireless/ath/ath11k/htc.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c587
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.h4
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.c24
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.h18
-rw-r--r--drivers/net/wireless/ath/ath11k/pcic.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.c23
-rw-r--r--drivers/net/wireless/ath/ath11k/spectral.c3
-rw-r--r--drivers/net/wireless/ath/ath11k/trace.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c84
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.h18
-rw-r--r--drivers/net/wireless/ath/ath12k/ahb.c3
-rw-r--r--drivers/net/wireless/ath/ath12k/ce.c10
-rw-r--r--drivers/net/wireless/ath/ath12k/core.c117
-rw-r--r--drivers/net/wireless/ath/ath12k/core.h76
-rw-r--r--drivers/net/wireless/ath/ath12k/dbring.c3
-rw-r--r--drivers/net/wireless/ath/ath12k/debug.h1
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs.c80
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs.h7
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c564
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h207
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.c139
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.h55
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_mon.c103
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.c517
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.h18
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_tx.c160
-rw-r--r--drivers/net/wireless/ath/ath12k/hal.c40
-rw-r--r--drivers/net/wireless/ath/ath12k/hal.h4
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_desc.h1
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_rx.c13
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_rx.h12
-rw-r--r--drivers/net/wireless/ath/ath12k/hw.c63
-rw-r--r--drivers/net/wireless/ath/ath12k/hw.h34
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.c3265
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.h43
-rw-r--r--drivers/net/wireless/ath/ath12k/p2p.c3
-rw-r--r--drivers/net/wireless/ath/ath12k/pci.c36
-rw-r--r--drivers/net/wireless/ath/ath12k/pci.h4
-rw-r--r--drivers/net/wireless/ath/ath12k/peer.c5
-rw-r--r--drivers/net/wireless/ath/ath12k/peer.h28
-rw-r--r--drivers/net/wireless/ath/ath12k/qmi.c39
-rw-r--r--drivers/net/wireless/ath/ath12k/qmi.h27
-rw-r--r--drivers/net/wireless/ath/ath12k/reg.c148
-rw-r--r--drivers/net/wireless/ath/ath12k/reg.h3
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.c1453
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.h423
-rw-r--r--drivers/net/wireless/ath/ath12k/wow.c1
-rw-r--r--drivers/net/wireless/ath/ath5k/mac80211-ops.c12
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c12
-rw-r--r--drivers/net/wireless/ath/ath5k/reg.h2
-rw-r--r--drivers/net/wireless/ath/ath6kl/bmi.c4
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c7
-rw-r--r--drivers/net/wireless/ath/ath6kl/core.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/hif.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc.h6
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_mbox.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_pipe.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/init.c4
-rw-r--r--drivers/net/wireless/ath/ath6kl/main.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/sdio.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/usb.c6
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.h10
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c60
-rw-r--r--drivers/net/wireless/ath/ath9k/common-beacon.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/common-debug.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/common-init.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/common-spectral.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/common.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/dynack.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c9
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/rx.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/usb.c19
-rw-r--r--drivers/net/wireless/ath/main.c1
-rw-r--r--drivers/net/wireless/ath/wcn36xx/hal.h74
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c5
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c60
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.h1
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c4
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c26
-rw-r--r--drivers/net/wireless/ath/wil6210/pm.c1
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.h4
-rw-r--r--drivers/net/wireless/atmel/at76c50x-usb.c2
-rw-r--r--drivers/net/wireless/broadcom/b43/main.c6
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/main.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c97
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/core.c26
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/fwil_types.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c14
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c14
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c32
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c45
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c11
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c22
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c443
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_hal.h27
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_int.h11
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c25
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h3
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2100.c6
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.c2
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_module.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-mac.c7
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-rs.c3
-rw-r--r--drivers/net/wireless/intel/iwlegacy/commands.h2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.h2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/iwl-spectrum.h24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Kconfig1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Makefile9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/8000.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/9000.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/ax210.c35
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/bz.c29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/dr.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-fm.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-gf.c43
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-hr.c49
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-jf.c29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-pe.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-wh.c24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/sc.c36
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/agn.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/commands.h16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/dev.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/devices.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/eeprom.c35
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/lib.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/main.c13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/power.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/power.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rs.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rx.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rxon.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/tx.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.c31
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/alive.h17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/cmdhdr.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/coex.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/commands.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/d3.h242
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/debug.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/location.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h137
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/offload.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/power.h100
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rs.h35
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rx.h310
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/scan.h78
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/sta.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/stats.h39
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tx.h37
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c83
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/debugfs.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dump.c58
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/error-dump.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h80
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.h21
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/pnvm.c111
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/pnvm.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/regulatory.c112
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/regulatory.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h32
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h62
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c105
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.c109
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.h11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-modparams.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c194
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h91
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.c147
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h95
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-utils.c113
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-utils.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/sap.h32
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/Makefile4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/agg.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/ap.c24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/coex.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/constants.h11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/d3.c705
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/debugfs.c13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/ftm-initiator.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/fw.c16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/iface.c67
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/iface.h20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/key.c50
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/key.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/link.c431
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/link.h38
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/low_latency.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mac80211.c258
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mcc.c66
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mld.c33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mld.h35
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mlo.c232
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mlo.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/notif.c29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/phy.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/power.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/ptp.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/regulatory.c130
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/roc.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/rx.c1767
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/rx.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/scan.c176
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/scan.h39
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/sta.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/stats.c13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/emlsr_with_bt.c140
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/link-selection.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tlc.c75
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/coex.c131
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/constants.h20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c811
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c94
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c86
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/link.c869
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c97
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c183
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c141
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h201
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c121
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/power.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ptp.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c164
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c157
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c119
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c222
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c95
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h27
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tests/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tests/links.c433
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c42
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c101
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c186
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-v2.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c453
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/internal.h (renamed from drivers/net/wireless/intel/iwlwifi/pcie/internal.h)111
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c (renamed from drivers/net/wireless/intel/iwlwifi/pcie/rx.c)34
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans-gen2.c (renamed from drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c)37
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c (renamed from drivers/net/wireless/intel/iwlwifi/pcie/trans.c)779
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx-gen2.c (renamed from drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c)0
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx.c (renamed from drivers/net/wireless/intel/iwlwifi/pcie/tx.c)69
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/iwl-context-info-v2.h (renamed from drivers/net/wireless/intel/iwlwifi/iwl-context-info-v2.h)6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/iwl-context-info.h (renamed from drivers/net/wireless/intel/iwlwifi/iwl-context-info.h)0
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/utils.c104
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/utils.h40
-rw-r--r--drivers/net/wireless/intel/iwlwifi/tests/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/tests/devinfo.c102
-rw-r--r--drivers/net/wireless/intel/iwlwifi/tests/nvm_parse.c72
-rw-r--r--drivers/net/wireless/intel/iwlwifi/tests/utils.c (renamed from drivers/net/wireless/intel/iwlwifi/mvm/tests/scan.c)43
-rw-r--r--drivers/net/wireless/intersil/p54/main.c3
-rw-r--r--drivers/net/wireless/intersil/p54/p54spi.c4
-rw-r--r--drivers/net/wireless/intersil/p54/txrx.c2
-rw-r--r--drivers/net/wireless/marvell/libertas/cfg.c13
-rw-r--r--drivers/net/wireless/marvell/libertas/if_sdio.c3
-rw-r--r--drivers/net/wireless/marvell/libertas/if_spi.c3
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/main.c4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n.c6
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c30
-rw-r--r--drivers/net/wireless/marvell/mwifiex/fw.h4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c9
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmd.c113
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_ioctl.c58
-rw-r--r--drivers/net/wireless/marvell/mwifiex/util.c4
-rw-r--r--drivers/net/wireless/marvell/mwl8k.c87
-rw-r--r--drivers/net/wireless/mediatek/mt76/Kconfig6
-rw-r--r--drivers/net/wireless/mediatek/mt76/Makefile3
-rw-r--r--drivers/net/wireless/mediatek/mt76/agg-rx.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/channel.c19
-rw-r--r--drivers/net/wireless/mediatek/mt76/debugfs.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c313
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.h98
-rw-r--r--drivers/net/wireless/mediatek/mt76/eeprom.c86
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c111
-rw-r--r--drivers/net/wireless/mediatek/mt76/mcu.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mmio.c14
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h256
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/Kconfig2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/Makefile2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/beacon.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/core.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/dma.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/eeprom.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/init.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mac.c12
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mac.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/main.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mcu.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/pci.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/regs.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/soc.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/Kconfig2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/Makefile2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/dma.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/init.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/main.c13
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mmio.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mt7615_trace.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/regs.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/sdio.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/soc.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/testmode.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/trace.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c23
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c41
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/main.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02.h11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_dfs.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_dma.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_phy.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_phy.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_regs.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_trace.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_trace.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_util.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/Kconfig2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/Makefile2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/init.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mac.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mac.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mcu.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2u.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/phy.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/Kconfig2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/Makefile2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/coredump.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/coredump.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c76
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/dma.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/init.c13
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.c28
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/main.c17
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.c235
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.h8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mmio.c13
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/pci.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/regs.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/soc.c23
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/testmode.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/testmode.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/Kconfig2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/Makefile2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/init.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mac.c10
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/main.c20
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/regs.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c60
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/testmode.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/usb.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/Kconfig2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/Makefile4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/debugfs.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/init.c158
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mac.c14
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mac.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/main.c130
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mcu.c222
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mcu.h15
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/pci.c31
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/pci_mac.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/pci_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/regd.c265
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/regd.h19
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/regs.h4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/testmode.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/usb.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x.h8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_core.c43
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_debugfs.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_dma.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_mac.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_regs.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_trace.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_trace.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_usb.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/Kconfig9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/Makefile3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/coredump.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/coredump.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c74
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/dma.c343
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/eeprom.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/eeprom.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/init.c382
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mac.c979
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mac.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/main.c679
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mcu.c601
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mcu.h19
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mmio.c113
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h161
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/npu.c352
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/pci.c10
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/regs.h34
-rw-r--r--drivers/net/wireless/mediatek/mt76/npu.c501
-rw-r--r--drivers/net/wireless/mediatek/mt76/pci.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/scan.c15
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio_txrx.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/testmode.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/testmode.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/trace.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/trace.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c32
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb_trace.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb_trace.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/util.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/util.h3
-rw-r--r--drivers/net/wireless/mediatek/mt76/wed.c26
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/main.c5
-rw-r--r--drivers/net/wireless/microchip/wilc1000/cfg80211.c14
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan.c5
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan_cfg.c37
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan_cfg.h5
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/mac.c16
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/mac.h2
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/usb.c29
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.c8
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.c3
-rw-r--r--drivers/net/wireless/ralink/rt2x00/Kconfig11
-rw-r--r--drivers/net/wireless/ralink/rt2x00/Makefile1
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c39
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.h5
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800pci.c3
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800soc.c116
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00.h10
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00dev.c16
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00mac.c8
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00queue.c2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00soc.c153
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00soc.h29
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c11
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c32
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/8192c.c80
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/8723a.c115
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/core.c227
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/regs.h1
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c23
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/ps.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c25
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c23
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c7
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c21
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c38
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/bf.c8
-rw-r--r--drivers/net/wireless/realtek/rtw88/bf.h7
-rw-r--r--drivers/net/wireless/realtek/rtw88/coex.c22
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.c8
-rw-r--r--drivers/net/wireless/realtek/rtw88/led.c13
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.c32
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.h1
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac80211.c9
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c25
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.h15
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.c49
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.h1
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8703b.c3
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723d.c3
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723de.c1
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723x.c9
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723x.h6
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8812a.c3
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8814a.c3
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821a.c3
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821c.c3
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821ce.c1
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.c4
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822be.c1
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822bu.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.c4
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822ce.c1
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822cu.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/sdio.c12
-rw-r--r--drivers/net/wireless/realtek/rtw88/usb.c3
-rw-r--r--drivers/net/wireless/realtek/rtw89/Kconfig48
-rw-r--r--drivers/net/wireless/realtek/rtw89/Makefile15
-rw-r--r--drivers/net/wireless/realtek/rtw89/acpi.c95
-rw-r--r--drivers/net/wireless/realtek/rtw89/acpi.h33
-rw-r--r--drivers/net/wireless/realtek/rtw89/cam.c173
-rw-r--r--drivers/net/wireless/realtek/rtw89/cam.h446
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.c568
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.h83
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.c1292
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.h7
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.c1186
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.h462
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.c439
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.h1
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.c959
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.h252
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.c450
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.h154
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac80211.c220
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac_be.c11
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.c520
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.h133
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci_be.c18
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.c858
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.h27
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy_be.c13
-rw-r--r--drivers/net/wireless/realtek/rtw89/ps.c76
-rw-r--r--drivers/net/wireless/realtek/rtw89/ps.h3
-rw-r--r--drivers/net/wireless/realtek/rtw89/reg.h116
-rw-r--r--drivers/net/wireless/realtek/rtw89/regd.c171
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b.c178
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.c321
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b_rfk_table.c77
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b_rfk_table.h2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b_table.c501
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851be.c5
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851bu.c66
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a.c132
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c16
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852ae.c5
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852au.c79
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b.c107
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_common.c22
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c83
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.h3
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852be.c5
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt.c23
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c83
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.h3
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bte.c5
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bu.c81
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c.c175
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c.h2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c69
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852ce.c5
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852cu.c69
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a.c77
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c52
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922ae.c5
-rw-r--r--drivers/net/wireless/realtek/rtw89/sar.c20
-rw-r--r--drivers/net/wireless/realtek/rtw89/sar.h1
-rw-r--r--drivers/net/wireless/realtek/rtw89/ser.c19
-rw-r--r--drivers/net/wireless/realtek/rtw89/txrx.h46
-rw-r--r--drivers/net/wireless/realtek/rtw89/usb.c1071
-rw-r--r--drivers/net/wireless/realtek/rtw89/usb.h77
-rw-r--r--drivers/net/wireless/realtek/rtw89/wow.c105
-rw-r--r--drivers/net/wireless/realtek/rtw89/wow.h20
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c9
-rw-r--r--drivers/net/wireless/silabs/wfx/main.c2
-rw-r--r--drivers/net/wireless/silabs/wfx/sta.c4
-rw-r--r--drivers/net/wireless/silabs/wfx/sta.h4
-rw-r--r--drivers/net/wireless/st/cw1200/bh.c11
-rw-r--r--drivers/net/wireless/st/cw1200/sta.c7
-rw-r--r--drivers/net/wireless/st/cw1200/sta.h5
-rw-r--r--drivers/net/wireless/ti/wl1251/acx.c35
-rw-r--r--drivers/net/wireless/ti/wl1251/acx.h1
-rw-r--r--drivers/net/wireless/ti/wl1251/cmd.c79
-rw-r--r--drivers/net/wireless/ti/wl1251/cmd.h3
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c5
-rw-r--r--drivers/net/wireless/ti/wl1251/reg.h6
-rw-r--r--drivers/net/wireless/ti/wl12xx/reg.h6
-rw-r--r--drivers/net/wireless/ti/wl18xx/debugfs.c3
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c27
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.h1
-rw-r--r--drivers/net/wireless/ti/wlcore/debugfs.c11
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c44
-rw-r--r--drivers/net/wireless/ti/wlcore/scan.c1
-rw-r--r--drivers/net/wireless/ti/wlcore/sysfs.c3
-rw-r--r--drivers/net/wireless/ti/wlcore/testmode.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.c1
-rw-r--r--drivers/net/wireless/ti/wlcore/vendor_cmd.c3
-rw-r--r--drivers/net/wireless/virtual/mac80211_hwsim.c290
-rw-r--r--drivers/net/wireless/virtual/mac80211_hwsim.h18
-rw-r--r--drivers/net/wireless/virtual/virt_wifi.c4
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_mac.c8
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_usb.c3
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_devlink.c3
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_pcie.c2
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_trace.c3
-rw-r--r--drivers/net/wwan/mhi_wwan_mbim.c19
-rw-r--r--drivers/net/wwan/qcom_bam_dmux.c2
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_cldma.c5
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_cldma.h2
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c5
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c2
-rw-r--r--drivers/net/wwan/t7xx/t7xx_pci.c1
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port_trace.c2
-rw-r--r--drivers/net/wwan/wwan_hwsim.c2
-rw-r--r--drivers/net/xen-netfront.c10
-rw-r--r--drivers/nfc/mei_phy.h4
-rw-r--r--drivers/nfc/pn533/pn533.c12
-rw-r--r--drivers/nfc/s3fwrn5/Kconfig3
-rw-r--r--drivers/nfc/s3fwrn5/firmware.c17
-rw-r--r--drivers/nfc/trf7970a.c91
-rw-r--r--drivers/ntb/hw/amd/ntb_hw_amd.c18
-rw-r--r--drivers/ntb/hw/amd/ntb_hw_amd.h1
-rw-r--r--drivers/ntb/hw/epf/ntb_hw_epf.c118
-rw-r--r--drivers/ntb/ntb_transport.c7
-rw-r--r--drivers/nvdimm/Kconfig19
-rw-r--r--drivers/nvdimm/Makefile1
-rw-r--r--drivers/nvdimm/badrange.c3
-rw-r--r--drivers/nvdimm/btt.c6
-rw-r--r--drivers/nvdimm/btt_devs.c24
-rw-r--r--drivers/nvdimm/bus.c72
-rw-r--r--drivers/nvdimm/claim.c7
-rw-r--r--drivers/nvdimm/core.c17
-rw-r--r--drivers/nvdimm/dax_devs.c12
-rw-r--r--drivers/nvdimm/dimm.c5
-rw-r--r--drivers/nvdimm/dimm_devs.c48
-rw-r--r--drivers/nvdimm/namespace_devs.c113
-rw-r--r--drivers/nvdimm/nd.h3
-rw-r--r--drivers/nvdimm/pfn_devs.c63
-rw-r--r--drivers/nvdimm/pmem.c8
-rw-r--r--drivers/nvdimm/pmem.h4
-rw-r--r--drivers/nvdimm/ramdax.c282
-rw-r--r--drivers/nvdimm/region.c18
-rw-r--r--drivers/nvdimm/region_devs.c120
-rw-r--r--drivers/nvdimm/security.c14
-rw-r--r--drivers/nvme/common/auth.c90
-rw-r--r--drivers/nvme/host/apple.c202
-rw-r--r--drivers/nvme/host/auth.c17
-rw-r--r--drivers/nvme/host/constants.c4
-rw-r--r--drivers/nvme/host/core.c216
-rw-r--r--drivers/nvme/host/fabrics.c2
-rw-r--r--drivers/nvme/host/fabrics.h6
-rw-r--r--drivers/nvme/host/fc.c48
-rw-r--r--drivers/nvme/host/ioctl.c35
-rw-r--r--drivers/nvme/host/multipath.c22
-rw-r--r--drivers/nvme/host/nvme.h16
-rw-r--r--drivers/nvme/host/pci.c857
-rw-r--r--drivers/nvme/host/pr.c6
-rw-r--r--drivers/nvme/host/rdma.c3
-rw-r--r--drivers/nvme/host/tcp.c24
-rw-r--r--drivers/nvme/host/zns.c10
-rw-r--r--drivers/nvme/target/admin-cmd.c2
-rw-r--r--drivers/nvme/target/auth.c21
-rw-r--r--drivers/nvme/target/core.c36
-rw-r--r--drivers/nvme/target/fabrics-cmd-auth.c1
-rw-r--r--drivers/nvme/target/fc.c89
-rw-r--r--drivers/nvme/target/fcloop.c17
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c6
-rw-r--r--drivers/nvme/target/loop.c1
-rw-r--r--drivers/nvme/target/nvmet.h4
-rw-r--r--drivers/nvme/target/passthru.c8
-rw-r--r--drivers/nvme/target/pci-epf.c39
-rw-r--r--drivers/nvme/target/rdma.c18
-rw-r--r--drivers/nvme/target/tcp.c12
-rw-r--r--drivers/nvme/target/zns.c2
-rw-r--r--drivers/nvmem/Kconfig31
-rw-r--r--drivers/nvmem/Makefile6
-rw-r--r--drivers/nvmem/an8855-efuse.c68
-rw-r--r--drivers/nvmem/core.c26
-rw-r--r--drivers/nvmem/imx-ocotp-ele.c25
-rw-r--r--drivers/nvmem/imx-ocotp.c5
-rw-r--r--drivers/nvmem/layouts.c13
-rw-r--r--drivers/nvmem/layouts/u-boot-env.c12
-rw-r--r--drivers/nvmem/qnap-mcu-eeprom.c111
-rw-r--r--drivers/nvmem/rcar-efuse.c1
-rw-r--r--drivers/nvmem/s32g-ocotp-nvmem.c100
-rw-r--r--drivers/of/address.c4
-rw-r--r--drivers/of/base.c47
-rw-r--r--drivers/of/device.c4
-rw-r--r--drivers/of/dynamic.c9
-rw-r--r--drivers/of/fdt.c101
-rw-r--r--drivers/of/irq.c100
-rw-r--r--drivers/of/kobj.c2
-rw-r--r--drivers/of/of_kunit_helpers.c5
-rw-r--r--drivers/of/of_numa.c5
-rw-r--r--drivers/of/of_reserved_mem.c86
-rw-r--r--drivers/of/overlay.c5
-rw-r--r--drivers/of/unittest-data/tests-platform.dtsi10
-rw-r--r--drivers/of/unittest.c3
-rw-r--r--drivers/opp/core.c168
-rw-r--r--drivers/opp/cpu.c16
-rw-r--r--drivers/opp/of.c125
-rw-r--r--drivers/parisc/ccio-dma.c54
-rw-r--r--drivers/parisc/eisa_eeprom.c2
-rw-r--r--drivers/parisc/gsc.c4
-rw-r--r--drivers/parisc/iommu-helpers.h10
-rw-r--r--drivers/parisc/power.c20
-rw-r--r--drivers/parisc/sba_iommu.c54
-rw-r--r--drivers/pci/Kconfig22
-rw-r--r--drivers/pci/Makefile4
-rw-r--r--drivers/pci/bus.c64
-rw-r--r--drivers/pci/controller/Kconfig29
-rw-r--r--drivers/pci/controller/Makefile1
-rw-r--r--drivers/pci/controller/cadence/Kconfig31
-rw-r--r--drivers/pci/controller/cadence/Makefile12
-rw-r--r--drivers/pci/controller/cadence/pci-j721e.c61
-rw-r--r--drivers/pci/controller/cadence/pci-sky1.c238
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-ep.c42
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-host-common.c288
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-host-common.h46
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-host-hpa.c368
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-host.c280
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-hpa-regs.h193
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-hpa.c167
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-lga-regs.h230
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-plat.c9
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.c30
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.h468
-rw-r--r--drivers/pci/controller/cadence/pcie-sg2042.c131
-rw-r--r--drivers/pci/controller/dwc/Kconfig76
-rw-r--r--drivers/pci/controller/dwc/Makefile8
-rw-r--r--drivers/pci/controller/dwc/pci-dra7xx.c1
-rw-r--r--drivers/pci/controller/dwc/pci-exynos.c62
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c48
-rw-r--r--drivers/pci/controller/dwc/pci-keystone.c89
-rw-r--r--drivers/pci/controller/dwc/pci-meson.c18
-rw-r--r--drivers/pci/controller/dwc/pcie-al.c1
-rw-r--r--drivers/pci/controller/dwc/pcie-amd-mdb.c52
-rw-r--r--drivers/pci/controller/dwc/pcie-artpec6.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-debugfs.c16
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c32
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c287
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-plat.c1
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.c134
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h95
-rw-r--r--drivers/pci/controller/dwc/pcie-dw-rockchip.c119
-rw-r--r--drivers/pci/controller/dwc/pcie-keembay.c1
-rw-r--r--drivers/pci/controller/dwc/pcie-nxp-s32g.c406
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom-common.c58
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom-common.h2
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom-ep.c23
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c372
-rw-r--r--drivers/pci/controller/dwc/pcie-rcar-gen4.c30
-rw-r--r--drivers/pci/controller/dwc/pcie-sophgo.c257
-rw-r--r--drivers/pci/controller/dwc/pcie-spacemit-k1.c357
-rw-r--r--drivers/pci/controller/dwc/pcie-stm32-ep.c343
-rw-r--r--drivers/pci/controller/dwc/pcie-stm32.c370
-rw-r--r--drivers/pci/controller/dwc/pcie-stm32.h19
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194.c99
-rw-r--r--drivers/pci/controller/mobiveil/Kconfig1
-rw-r--r--drivers/pci/controller/mobiveil/pcie-mobiveil-host.c48
-rw-r--r--drivers/pci/controller/mobiveil/pcie-mobiveil.h1
-rw-r--r--drivers/pci/controller/pci-aardvark.c59
-rw-r--r--drivers/pci/controller/pci-host-common.c22
-rw-r--r--drivers/pci/controller/pci-host-common.h3
-rw-r--r--drivers/pci/controller/pci-hyperv-intf.c1
-rw-r--r--drivers/pci/controller/pci-hyperv.c195
-rw-r--r--drivers/pci/controller/pci-ixp4xx.c6
-rw-r--r--drivers/pci/controller/pci-mvebu.c27
-rw-r--r--drivers/pci/controller/pci-tegra.c29
-rw-r--r--drivers/pci/controller/pci-xgene-msi.c428
-rw-r--r--drivers/pci/controller/pci-xgene.c33
-rw-r--r--drivers/pci/controller/pcie-altera-msi.c45
-rw-r--r--drivers/pci/controller/pcie-altera.c3
-rw-r--r--drivers/pci/controller/pcie-apple.c26
-rw-r--r--drivers/pci/controller/pcie-brcmstb.c287
-rw-r--r--drivers/pci/controller/pcie-iproc-msi.c46
-rw-r--r--drivers/pci/controller/pcie-iproc.c22
-rw-r--r--drivers/pci/controller/pcie-mediatek-gen3.c91
-rw-r--r--drivers/pci/controller/pcie-mediatek.c161
-rw-r--r--drivers/pci/controller/pcie-rcar-ep.c2
-rw-r--r--drivers/pci/controller/pcie-rcar-host.c112
-rw-r--r--drivers/pci/controller/pcie-rockchip-ep.c5
-rw-r--r--drivers/pci/controller/pcie-rockchip-host.c64
-rw-r--r--drivers/pci/controller/pcie-rockchip.h61
-rw-r--r--drivers/pci/controller/pcie-rzg3s-host.c1761
-rw-r--r--drivers/pci/controller/pcie-xilinx-dma-pl.c49
-rw-r--r--drivers/pci/controller/pcie-xilinx-nwl.c53
-rw-r--r--drivers/pci/controller/pcie-xilinx.c58
-rw-r--r--drivers/pci/controller/plda/Kconfig1
-rw-r--r--drivers/pci/controller/plda/pcie-plda-host.c48
-rw-r--r--drivers/pci/controller/plda/pcie-plda.h1
-rw-r--r--drivers/pci/controller/plda/pcie-starfive.c2
-rw-r--r--drivers/pci/controller/vmd.c293
-rw-r--r--drivers/pci/doe.c2
-rw-r--r--drivers/pci/ecam.c2
-rw-r--r--drivers/pci/endpoint/Kconfig8
-rw-r--r--drivers/pci/endpoint/Makefile1
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c172
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-vntb.c297
-rw-r--r--drivers/pci/endpoint/pci-ep-cfs.c1
-rw-r--r--drivers/pci/endpoint/pci-ep-msi.c100
-rw-r--r--drivers/pci/endpoint/pci-epf-core.c199
-rw-r--r--drivers/pci/host-bridge.c1
-rw-r--r--drivers/pci/hotplug/TODO4
-rw-r--r--drivers/pci/hotplug/acpiphp_ibm.c2
-rw-r--r--drivers/pci/hotplug/cpqphp_pci.c8
-rw-r--r--drivers/pci/hotplug/ibmphp_hpc.c6
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c4
-rw-r--r--drivers/pci/hotplug/pnv_php.c248
-rw-r--r--drivers/pci/hotplug/s390_pci_hpc.c3
-rw-r--r--drivers/pci/ide.c815
-rw-r--r--drivers/pci/iov.c145
-rw-r--r--drivers/pci/msi/irqdomain.c168
-rw-r--r--drivers/pci/msi/msi.c12
-rw-r--r--drivers/pci/of_property.c22
-rw-r--r--drivers/pci/p2pdma.c198
-rw-r--r--drivers/pci/pci-acpi.c36
-rw-r--r--drivers/pci/pci-driver.c19
-rw-r--r--drivers/pci/pci-sysfs.c88
-rw-r--r--drivers/pci/pci.c280
-rw-r--r--drivers/pci/pci.h209
-rw-r--r--drivers/pci/pcie/aer.c58
-rw-r--r--drivers/pci/pcie/aspm.c63
-rw-r--r--drivers/pci/pcie/err.c40
-rw-r--r--drivers/pci/pcie/portdrv.c3
-rw-r--r--drivers/pci/pcie/ptm.c27
-rw-r--r--drivers/pci/probe.c145
-rw-r--r--drivers/pci/pwrctrl/Kconfig15
-rw-r--r--drivers/pci/pwrctrl/Makefile2
-rw-r--r--drivers/pci/pwrctrl/pci-pwrctrl-tc9563.c648
-rw-r--r--drivers/pci/pwrctrl/slot.c20
-rw-r--r--drivers/pci/quirks.c52
-rw-r--r--drivers/pci/rebar.c328
-rw-r--r--drivers/pci/remove.c10
-rw-r--r--drivers/pci/search.c62
-rw-r--r--drivers/pci/setup-bus.c965
-rw-r--r--drivers/pci/setup-res.c95
-rw-r--r--drivers/pci/switch/switchtec.c25
-rw-r--r--drivers/pci/tph.c27
-rw-r--r--drivers/pci/tsm.c900
-rw-r--r--drivers/pci/vgaarb.c31
-rw-r--r--drivers/pci/vpd.c2
-rw-r--r--drivers/pcmcia/Kconfig3
-rw-r--r--drivers/pcmcia/Makefile1
-rw-r--r--drivers/pcmcia/cistpl.c4
-rw-r--r--drivers/pcmcia/cs.c17
-rw-r--r--drivers/pcmcia/cs_internal.h1
-rw-r--r--drivers/pcmcia/ds.c2
-rw-r--r--drivers/pcmcia/omap_cf.c10
-rw-r--r--drivers/pcmcia/rsrc_iodyn.c168
-rw-r--r--drivers/pcmcia/rsrc_nonstatic.c4
-rw-r--r--drivers/pcmcia/socket_sysfs.c5
-rw-r--r--drivers/peci/controller/peci-aspeed.c12
-rw-r--r--drivers/peci/controller/peci-npcm.c1
-rw-r--r--drivers/peci/cpu.c4
-rw-r--r--drivers/perf/Kconfig20
-rw-r--r--drivers/perf/Makefile2
-rw-r--r--drivers/perf/arm-ccn.c2
-rw-r--r--drivers/perf/arm-cmn.c29
-rw-r--r--drivers/perf/arm-ni.c242
-rw-r--r--drivers/perf/arm_brbe.c805
-rw-r--r--drivers/perf/arm_brbe.h47
-rw-r--r--drivers/perf/arm_cspmu/arm_cspmu.c52
-rw-r--r--drivers/perf/arm_cspmu/arm_cspmu.h39
-rw-r--r--drivers/perf/arm_cspmu/nvidia_cspmu.c194
-rw-r--r--drivers/perf/arm_pmu.c69
-rw-r--r--drivers/perf/arm_pmu_acpi.c2
-rw-r--r--drivers/perf/arm_pmu_platform.c20
-rw-r--r--drivers/perf/arm_pmuv3.c158
-rw-r--r--drivers/perf/arm_spe_pmu.c182
-rw-r--r--drivers/perf/cxl_pmu.c12
-rw-r--r--drivers/perf/dwc_pcie_pmu.c161
-rw-r--r--drivers/perf/fsl_imx8_ddr_perf.c93
-rw-r--r--drivers/perf/fsl_imx9_ddr_perf.c14
-rw-r--r--drivers/perf/fujitsu_uncore_pmu.c613
-rw-r--r--drivers/perf/hisilicon/Makefile3
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c354
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_hha_pmu.c6
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c557
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_mn_pmu.c411
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_noc_pmu.c443
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pa_pmu.c2
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pmu.c16
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pmu.h8
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c220
-rw-r--r--drivers/perf/riscv_pmu_sbi.c201
-rw-r--r--drivers/phy/Kconfig1
-rw-r--r--drivers/phy/Makefile1
-rw-r--r--drivers/phy/allwinner/phy-sun4i-usb.c40
-rw-r--r--drivers/phy/broadcom/phy-bcm-ns2-pcie.c2
-rw-r--r--drivers/phy/broadcom/phy-bcm-ns2-usbdrd.c1
-rw-r--r--drivers/phy/broadcom/phy-bcm-sr-pcie.c2
-rw-r--r--drivers/phy/broadcom/phy-bcm63xx-usbh.c6
-rw-r--r--drivers/phy/broadcom/phy-brcm-sata.c3
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb.c1
-rw-r--r--drivers/phy/cadence/cdns-dphy-rx.c3
-rw-r--r--drivers/phy/cadence/cdns-dphy.c154
-rw-r--r--drivers/phy/cadence/phy-cadence-sierra.c181
-rw-r--r--drivers/phy/cadence/phy-cadence-torrent.c288
-rw-r--r--drivers/phy/freescale/phy-fsl-imx8mq-usb.c23
-rw-r--r--drivers/phy/freescale/phy-fsl-imx8qm-hsio.c5
-rw-r--r--drivers/phy/freescale/phy-fsl-lynx-28g.c16
-rw-r--r--drivers/phy/hisilicon/phy-hi6220-usb.c1
-rw-r--r--drivers/phy/hisilicon/phy-histb-combphy.c2
-rw-r--r--drivers/phy/ingenic/phy-ingenic-usb.c8
-rw-r--r--drivers/phy/marvell/phy-pxa-usb.c1
-rw-r--r--drivers/phy/mediatek/phy-mtk-tphy.c65
-rw-r--r--drivers/phy/phy-can-transceiver.c158
-rw-r--r--drivers/phy/phy-core.c32
-rw-r--r--drivers/phy/phy-snps-eusb2.c52
-rw-r--r--drivers/phy/qualcomm/Kconfig16
-rw-r--r--drivers/phy/qualcomm/Makefile1
-rw-r--r--drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c104
-rw-r--r--drivers/phy/qualcomm/phy-qcom-ipq806x-usb.c1
-rw-r--r--drivers/phy/qualcomm/phy-qcom-m31-eusb2.c326
-rw-r--r--drivers/phy/qualcomm/phy-qcom-m31.c16
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-combo.c582
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcie.c295
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5_20.h2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-usb-v8.h38
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-v5_20.h4
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-v7.h2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-v8.h32
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-v8_50.h13
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com-v8.h64
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-ln-shrd-v5.h11
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v7.h4
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v8.h68
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-ufs.c298
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.h8
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qusb2.c4
-rw-r--r--drivers/phy/renesas/Kconfig7
-rw-r--r--drivers/phy/renesas/Makefile1
-rw-r--r--drivers/phy/renesas/phy-rcar-gen3-pcie.c2
-rw-r--r--drivers/phy/renesas/phy-rcar-gen3-usb2.c202
-rw-r--r--drivers/phy/renesas/phy-rcar-gen3-usb3.c2
-rw-r--r--drivers/phy/renesas/phy-rzg3e-usb3.c259
-rw-r--r--drivers/phy/renesas/r8a779f0-ether-serdes.c97
-rw-r--r--drivers/phy/rockchip/phy-rockchip-emmc.c3
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-csidphy.c67
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c91
-rw-r--r--drivers/phy/rockchip/phy-rockchip-naneng-combphy.c776
-rw-r--r--drivers/phy/rockchip/phy-rockchip-pcie.c75
-rw-r--r--drivers/phy/rockchip/phy-rockchip-samsung-dcphy.c11
-rw-r--r--drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c28
-rw-r--r--drivers/phy/rockchip/phy-rockchip-usb.c51
-rw-r--r--drivers/phy/rockchip/phy-rockchip-usbdp.c3
-rw-r--r--drivers/phy/samsung/phy-exynos-mipi-video.c52
-rw-r--r--drivers/phy/samsung/phy-exynos5-usbdrd.c35
-rw-r--r--drivers/phy/samsung/phy-gs101-ufs.c28
-rw-r--r--drivers/phy/samsung/phy-samsung-ufs.c40
-rw-r--r--drivers/phy/samsung/phy-samsung-ufs.h7
-rw-r--r--drivers/phy/samsung/phy-samsung-usb2.c1
-rw-r--r--drivers/phy/sophgo/Kconfig19
-rw-r--r--drivers/phy/sophgo/Makefile2
-rw-r--r--drivers/phy/sophgo/phy-cv1800-usb2.c169
-rw-r--r--drivers/phy/st/phy-stih407-usb.c2
-rw-r--r--drivers/phy/st/phy-stm32-usbphyc.c4
-rw-r--r--drivers/phy/tegra/xusb-tegra186.c75
-rw-r--r--drivers/phy/tegra/xusb-tegra210.c6
-rw-r--r--drivers/phy/tegra/xusb.h1
-rw-r--r--drivers/phy/ti/Kconfig2
-rw-r--r--drivers/phy/ti/phy-am654-serdes.c1
-rw-r--r--drivers/phy/ti/phy-dm816x-usb.c1
-rw-r--r--drivers/phy/ti/phy-gmii-sel.c49
-rw-r--r--drivers/phy/ti/phy-j721e-wiz.c1
-rw-r--r--drivers/phy/ti/phy-omap-control.c1
-rw-r--r--drivers/phy/ti/phy-omap-usb2.c14
-rw-r--r--drivers/phy/ti/phy-ti-pipe3.c14
-rw-r--r--drivers/phy/ti/phy-twl4030-usb.c1
-rw-r--r--drivers/pinctrl/Kconfig86
-rw-r--r--drivers/pinctrl/Makefile9
-rw-r--r--drivers/pinctrl/actions/pinctrl-owl.c2
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g4.c2
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c2
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c14
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed.c2
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed.h2
-rw-r--r--drivers/pinctrl/bcm/Kconfig12
-rw-r--r--drivers/pinctrl/bcm/Kconfig.stb10
-rw-r--r--drivers/pinctrl/bcm/Makefile2
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c10
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm4908.c2
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm6358.c4
-rw-r--r--drivers/pinctrl/bcm/pinctrl-brcmstb-bcm2712.c747
-rw-r--r--drivers/pinctrl/bcm/pinctrl-brcmstb.c442
-rw-r--r--drivers/pinctrl/bcm/pinctrl-brcmstb.h93
-rw-r--r--drivers/pinctrl/bcm/pinctrl-cygnus-mux.c8
-rw-r--r--drivers/pinctrl/bcm/pinctrl-iproc-gpio.c2
-rw-r--r--drivers/pinctrl/bcm/pinctrl-ns.c2
-rw-r--r--drivers/pinctrl/bcm/pinctrl-ns2-mux.c8
-rw-r--r--drivers/pinctrl/bcm/pinctrl-nsp-gpio.c2
-rw-r--r--drivers/pinctrl/bcm/pinctrl-nsp-mux.c8
-rw-r--r--drivers/pinctrl/berlin/berlin.c10
-rw-r--r--drivers/pinctrl/cirrus/pinctrl-cs42l43.c44
-rw-r--r--drivers/pinctrl/cirrus/pinctrl-lochnagar.c23
-rw-r--r--drivers/pinctrl/cirrus/pinctrl-madera-core.c18
-rw-r--r--drivers/pinctrl/cix/Kconfig15
-rw-r--r--drivers/pinctrl/cix/Makefile4
-rw-r--r--drivers/pinctrl/cix/pinctrl-sky1-base.c587
-rw-r--r--drivers/pinctrl/cix/pinctrl-sky1.c559
-rw-r--r--drivers/pinctrl/cix/pinctrl-sky1.h48
-rw-r--r--drivers/pinctrl/core.c29
-rw-r--r--drivers/pinctrl/core.h2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.c45
-rw-r--r--drivers/pinctrl/intel/pinctrl-alderlake.c68
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c26
-rw-r--r--drivers/pinctrl/intel/pinctrl-cannonlake.c68
-rw-r--r--drivers/pinctrl/intel/pinctrl-cedarfork.c37
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c90
-rw-r--r--drivers/pinctrl/intel/pinctrl-denverton.c21
-rw-r--r--drivers/pinctrl/intel/pinctrl-elkhartlake.c43
-rw-r--r--drivers/pinctrl/intel/pinctrl-emmitsburg.c33
-rw-r--r--drivers/pinctrl/intel/pinctrl-icelake.c60
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c54
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.h11
-rw-r--r--drivers/pinctrl/intel/pinctrl-jasperlake.c34
-rw-r--r--drivers/pinctrl/intel/pinctrl-lakefield.c26
-rw-r--r--drivers/pinctrl/intel/pinctrl-lynxpoint.c32
-rw-r--r--drivers/pinctrl/intel/pinctrl-meteorlake.c54
-rw-r--r--drivers/pinctrl/intel/pinctrl-meteorpoint.c46
-rw-r--r--drivers/pinctrl/intel/pinctrl-sunrisepoint.c26
-rw-r--r--drivers/pinctrl/intel/pinctrl-tangier.c3
-rw-r--r--drivers/pinctrl/intel/pinctrl-tigerlake.c70
-rw-r--r--drivers/pinctrl/mediatek/Kconfig22
-rw-r--r--drivers/pinctrl/mediatek/Makefile2
-rw-r--r--drivers/pinctrl/mediatek/mtk-eint.c9
-rw-r--r--drivers/pinctrl/mediatek/mtk-eint.h1
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-airoha.c2395
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-moore.c15
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-moore.h7
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt6878.c1478
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7622.c2
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7623.c2
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7629.c2
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7981.c2
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7986.c2
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7988.c44
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8189.c1698
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8196.c6
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h2
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common.c4
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-mt6878.h2248
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-mt8189.h2452
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-paris.c6
-rw-r--r--drivers/pinctrl/meson/pinctrl-amlogic-a4.c128
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-g12a.c30
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-gxl.c10
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.c8
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-37xx.c8
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-abx500.c8
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.c2
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-ma35.c21
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c189
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c162
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-wpcm450.c48
-rw-r--r--drivers/pinctrl/nxp/pinctrl-s32cc.c3
-rw-r--r--drivers/pinctrl/pinconf-generic.c71
-rw-r--r--drivers/pinctrl/pinctrl-amd.c62
-rw-r--r--drivers/pinctrl/pinctrl-amdisp.c4
-rw-r--r--drivers/pinctrl/pinctrl-apple-gpio.c5
-rw-r--r--drivers/pinctrl/pinctrl-artpec6.c2
-rw-r--r--drivers/pinctrl/pinctrl-as3722.c21
-rw-r--r--drivers/pinctrl/pinctrl-at91-pio4.c12
-rw-r--r--drivers/pinctrl/pinctrl-at91.c4
-rw-r--r--drivers/pinctrl/pinctrl-aw9523.c30
-rw-r--r--drivers/pinctrl/pinctrl-axp209.c4
-rw-r--r--drivers/pinctrl/pinctrl-bm1880.c2
-rw-r--r--drivers/pinctrl/pinctrl-cy8c95x0.c6
-rw-r--r--drivers/pinctrl/pinctrl-da9062.c12
-rw-r--r--drivers/pinctrl/pinctrl-digicolor.c6
-rw-r--r--drivers/pinctrl/pinctrl-eic7700.c704
-rw-r--r--drivers/pinctrl/pinctrl-equilibrium.c39
-rw-r--r--drivers/pinctrl/pinctrl-equilibrium.h2
-rw-r--r--drivers/pinctrl/pinctrl-falcon.c2
-rw-r--r--drivers/pinctrl/pinctrl-ingenic.c56
-rw-r--r--drivers/pinctrl/pinctrl-k210.c4
-rw-r--r--drivers/pinctrl/pinctrl-k230.c13
-rw-r--r--drivers/pinctrl/pinctrl-keembay.c45
-rw-r--r--drivers/pinctrl/pinctrl-lpc18xx.c2
-rw-r--r--drivers/pinctrl/pinctrl-max7360.c215
-rw-r--r--drivers/pinctrl/pinctrl-max77620.c9
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08.c56
-rw-r--r--drivers/pinctrl/pinctrl-microchip-sgpio.c8
-rw-r--r--drivers/pinctrl/pinctrl-mlxbf3.c2
-rw-r--r--drivers/pinctrl/pinctrl-mpfs-iomux0.c278
-rw-r--r--drivers/pinctrl/pinctrl-ocelot.c6
-rw-r--r--drivers/pinctrl/pinctrl-palmas.c4
-rw-r--r--drivers/pinctrl/pinctrl-pic32.c10
-rw-r--r--drivers/pinctrl/pinctrl-pic64gx-gpio2.c356
-rw-r--r--drivers/pinctrl/pinctrl-pistachio.c10
-rw-r--r--drivers/pinctrl/pinctrl-rk805.c6
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c448
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.h4
-rw-r--r--drivers/pinctrl/pinctrl-rp1.c1913
-rw-r--r--drivers/pinctrl/pinctrl-scmi.c4
-rw-r--r--drivers/pinctrl/pinctrl-single.c15
-rw-r--r--drivers/pinctrl/pinctrl-st.c9
-rw-r--r--drivers/pinctrl/pinctrl-stmfx.c6
-rw-r--r--drivers/pinctrl/pinctrl-sx150x.c16
-rw-r--r--drivers/pinctrl/pinctrl-tb10x.c4
-rw-r--r--drivers/pinctrl/pinctrl-tps6594.c35
-rw-r--r--drivers/pinctrl/pinctrl-upboard.c1070
-rw-r--r--drivers/pinctrl/pinctrl-xway.c16
-rw-r--r--drivers/pinctrl/pinctrl-zynq.c2
-rw-r--r--drivers/pinctrl/pinctrl-zynqmp.c9
-rw-r--r--drivers/pinctrl/pinmux.c113
-rw-r--r--drivers/pinctrl/pinmux.h19
-rw-r--r--drivers/pinctrl/qcom/Kconfig11
-rw-r--r--drivers/pinctrl/qcom/Kconfig.msm26
-rw-r--r--drivers/pinctrl/qcom/Makefile4
-rw-r--r--drivers/pinctrl/qcom/pinctrl-apq8064.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-apq8084.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-glymur.c1777
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq4019.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq5018.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq5332.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq5424.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq6018.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq8064.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq8074.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq9574.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-kaanapali.c1803
-rw-r--r--drivers/pinctrl/qcom/pinctrl-lpass-lpi.c28
-rw-r--r--drivers/pinctrl/qcom/pinctrl-lpass-lpi.h18
-rw-r--r--drivers/pinctrl/qcom/pinctrl-mdm9607.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-mdm9615.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-milos.c1339
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c86
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.h6
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8226.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8660.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8909.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8916.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8917.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8953.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8960.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8976.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8994.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8996.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8998.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8x74.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qcm2290.c14
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qcs404.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qcs615.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qcs8300.c5
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qdf2xxx.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qdu1000.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sa8775p.c5
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sar2130p.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc7180.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc7280.c5
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc8180x.c5
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc8280xp.c5
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdm660-lpass-lpi.c160
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdm660.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdm670.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdm845.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdx55.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdx65.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdx75.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm4450.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm6115.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm6125.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm6350.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm6375.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm7150.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8150.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8250.c84
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8350.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8450.c5
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8550.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8650.c5
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8750.c5
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c21
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-mpp.c10
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c6
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c6
-rw-r--r--drivers/pinctrl/qcom/pinctrl-x1e80100.c3
-rw-r--r--drivers/pinctrl/qcom/tlmm-test.c47
-rw-r--r--drivers/pinctrl/realtek/Kconfig1
-rw-r--r--drivers/pinctrl/renesas/Kconfig262
-rw-r--r--drivers/pinctrl/renesas/Makefile1
-rw-r--r--drivers/pinctrl/renesas/gpio.c4
-rw-r--r--drivers/pinctrl/renesas/pfc-emev2.c1
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a73a4.c2
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a7778.c1
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a77951.c1
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a7796.c1
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a77965.c1
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a77970.c1
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a77980.c1
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a77995.c2
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a779f0.c1
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a779g0.c102
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a779h0.c7
-rw-r--r--drivers/pinctrl/renesas/pfc-sh7723.c1
-rw-r--r--drivers/pinctrl/renesas/pfc-sh7724.c1
-rw-r--r--drivers/pinctrl/renesas/pfc-sh7734.c1
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rza1.c16
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rza2.c7
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzg2l.c400
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzn1.c4
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzt2h.c813
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzv2m.c18
-rw-r--r--drivers/pinctrl/renesas/pinctrl.c3
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos-arm64.c262
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.c103
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.h10
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c8
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.h11
-rw-r--r--drivers/pinctrl/spacemit/pinctrl-k1.c4
-rw-r--r--drivers/pinctrl/spear/pinctrl-plgpio.c9
-rw-r--r--drivers/pinctrl/sprd/pinctrl-sprd.c9
-rw-r--r--drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c8
-rw-r--r--drivers/pinctrl/starfive/pinctrl-starfive-jh7110-aon.c2
-rw-r--r--drivers/pinctrl/starfive/pinctrl-starfive-jh7110-sys.c2
-rw-r--r--drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c7
-rw-r--r--drivers/pinctrl/starfive/pinctrl-starfive-jh7110.h1
-rw-r--r--drivers/pinctrl/stm32/Kconfig20
-rw-r--r--drivers/pinctrl/stm32/Makefile1
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32-hdp.c726
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c536
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.h23
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32mp257.c17
-rw-r--r--drivers/pinctrl/sunplus/sppctl.c8
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-v3s.c2
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi-dt.c19
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c19
-rw-r--r--drivers/pinctrl/tegra/Kconfig4
-rw-r--r--drivers/pinctrl/tegra/Makefile1
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra186.c1979
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra20.c11
-rw-r--r--drivers/pinctrl/vt8500/pinctrl-wmt.c15
-rw-r--r--drivers/platform/Kconfig4
-rw-r--r--drivers/platform/Makefile2
-rw-r--r--drivers/platform/arm64/Kconfig20
-rw-r--r--drivers/platform/arm64/Makefile1
-rw-r--r--drivers/platform/arm64/huawei-gaokun-ec.c2
-rw-r--r--drivers/platform/arm64/lenovo-thinkpad-t14s.c662
-rw-r--r--drivers/platform/arm64/lenovo-yoga-c630.c40
-rw-r--r--drivers/platform/chrome/Kconfig2
-rw-r--r--drivers/platform/chrome/chromeos_laptop.c9
-rw-r--r--drivers/platform/chrome/chromeos_pstore.c7
-rw-r--r--drivers/platform/chrome/cros_ec.c93
-rw-r--r--drivers/platform/chrome/cros_ec.h3
-rw-r--r--drivers/platform/chrome/cros_ec_chardev.c72
-rw-r--r--drivers/platform/chrome/cros_ec_i2c.c9
-rw-r--r--drivers/platform/chrome/cros_ec_ishtp.c7
-rw-r--r--drivers/platform/chrome/cros_ec_lightbar.c16
-rw-r--r--drivers/platform/chrome/cros_ec_lpc.c6
-rw-r--r--drivers/platform/chrome/cros_ec_proto.c15
-rw-r--r--drivers/platform/chrome/cros_ec_rpmsg.c6
-rw-r--r--drivers/platform/chrome/cros_ec_sensorhub.c23
-rw-r--r--drivers/platform/chrome/cros_ec_sensorhub_ring.c11
-rw-r--r--drivers/platform/chrome/cros_ec_spi.c7
-rw-r--r--drivers/platform/chrome/cros_ec_typec.c91
-rw-r--r--drivers/platform/chrome/cros_ec_uart.c6
-rw-r--r--drivers/platform/chrome/cros_ec_vbc.c2
-rw-r--r--drivers/platform/chrome/cros_usbpd_notify.c17
-rw-r--r--drivers/platform/chrome/wilco_ec/telemetry.c2
-rw-r--r--drivers/platform/cznic/turris-omnia-mcu-gpio.c31
-rw-r--r--drivers/platform/mellanox/mlxbf-bootctl.c2
-rw-r--r--drivers/platform/mellanox/mlxbf-pmc.c28
-rw-r--r--drivers/platform/mellanox/mlxbf-tmfifo.c5
-rw-r--r--drivers/platform/mellanox/mlxreg-dpu.c2
-rw-r--r--drivers/platform/mellanox/mlxreg-lc.c12
-rw-r--r--drivers/platform/mellanox/nvsw-sn2201.c2
-rw-r--r--drivers/platform/raspberrypi/Kconfig52
-rw-r--r--drivers/platform/raspberrypi/Makefile15
-rw-r--r--drivers/platform/raspberrypi/vchiq-interface/TESTING (renamed from drivers/staging/vc04_services/interface/TESTING)0
-rw-r--r--drivers/platform/raspberrypi/vchiq-interface/TODO4
-rw-r--r--drivers/platform/raspberrypi/vchiq-interface/vchiq_arm.c (renamed from drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c)118
-rw-r--r--drivers/platform/raspberrypi/vchiq-interface/vchiq_bus.c (renamed from drivers/staging/vc04_services/interface/vchiq_arm/vchiq_bus.c)4
-rw-r--r--drivers/platform/raspberrypi/vchiq-interface/vchiq_core.c (renamed from drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c)10
-rw-r--r--drivers/platform/raspberrypi/vchiq-interface/vchiq_debugfs.c (renamed from drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c)6
-rw-r--r--drivers/platform/raspberrypi/vchiq-interface/vchiq_dev.c (renamed from drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c)7
-rw-r--r--drivers/platform/raspberrypi/vchiq-interface/vchiq_ioctl.h (renamed from drivers/staging/vc04_services/interface/vchiq_arm/vchiq_ioctl.h)3
-rw-r--r--drivers/platform/raspberrypi/vchiq-mmal/Kconfig (renamed from drivers/staging/vc04_services/vchiq-mmal/Kconfig)0
-rw-r--r--drivers/platform/raspberrypi/vchiq-mmal/Makefile (renamed from drivers/staging/vc04_services/vchiq-mmal/Makefile)0
-rw-r--r--drivers/platform/raspberrypi/vchiq-mmal/mmal-common.h (renamed from drivers/staging/vc04_services/vchiq-mmal/mmal-common.h)0
-rw-r--r--drivers/platform/raspberrypi/vchiq-mmal/mmal-encodings.h (renamed from drivers/staging/vc04_services/vchiq-mmal/mmal-encodings.h)0
-rw-r--r--drivers/platform/raspberrypi/vchiq-mmal/mmal-msg-common.h (renamed from drivers/staging/vc04_services/vchiq-mmal/mmal-msg-common.h)0
-rw-r--r--drivers/platform/raspberrypi/vchiq-mmal/mmal-msg-format.h (renamed from drivers/staging/vc04_services/vchiq-mmal/mmal-msg-format.h)0
-rw-r--r--drivers/platform/raspberrypi/vchiq-mmal/mmal-msg-port.h (renamed from drivers/staging/vc04_services/vchiq-mmal/mmal-msg-port.h)0
-rw-r--r--drivers/platform/raspberrypi/vchiq-mmal/mmal-msg.h (renamed from drivers/staging/vc04_services/vchiq-mmal/mmal-msg.h)2
-rw-r--r--drivers/platform/raspberrypi/vchiq-mmal/mmal-parameters.h (renamed from drivers/staging/vc04_services/vchiq-mmal/mmal-parameters.h)0
-rw-r--r--drivers/platform/raspberrypi/vchiq-mmal/mmal-vchiq.c (renamed from drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c)7
-rw-r--r--drivers/platform/raspberrypi/vchiq-mmal/mmal-vchiq.h (renamed from drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.h)2
-rw-r--r--drivers/platform/surface/aggregator/core.c2
-rw-r--r--drivers/platform/surface/aggregator/ssh_packet_layer.c2
-rw-r--r--drivers/platform/surface/aggregator/ssh_request_layer.c2
-rw-r--r--drivers/platform/surface/surface_acpi_notify.c2
-rw-r--r--drivers/platform/surface/surface_aggregator_registry.c13
-rw-r--r--drivers/platform/wmi/Kconfig34
-rw-r--r--drivers/platform/wmi/Makefile8
-rw-r--r--drivers/platform/wmi/core.c (renamed from drivers/platform/x86/wmi.c)87
-rw-r--r--drivers/platform/x86/Kconfig308
-rw-r--r--drivers/platform/x86/Makefile25
-rw-r--r--drivers/platform/x86/acer-wmi.c365
-rw-r--r--drivers/platform/x86/amd/Kconfig1
-rw-r--r--drivers/platform/x86/amd/Makefile1
-rw-r--r--drivers/platform/x86/amd/amd_isp4.c184
-rw-r--r--drivers/platform/x86/amd/hfi/Kconfig18
-rw-r--r--drivers/platform/x86/amd/hfi/Makefile7
-rw-r--r--drivers/platform/x86/amd/hfi/hfi.c546
-rw-r--r--drivers/platform/x86/amd/hsmp/acpi.c24
-rw-r--r--drivers/platform/x86/amd/hsmp/hsmp.c19
-rw-r--r--drivers/platform/x86/amd/hsmp/hsmp.h3
-rw-r--r--drivers/platform/x86/amd/hsmp/plat.c36
-rw-r--r--drivers/platform/x86/amd/pmc/pmc-quirks.c126
-rw-r--r--drivers/platform/x86/amd/pmc/pmc.c18
-rw-r--r--drivers/platform/x86/amd/pmc/pmc.h1
-rw-r--r--drivers/platform/x86/amd/pmf/acpi.c87
-rw-r--r--drivers/platform/x86/amd/pmf/auto-mode.c14
-rw-r--r--drivers/platform/x86/amd/pmf/cnqf.c14
-rw-r--r--drivers/platform/x86/amd/pmf/core.c27
-rw-r--r--drivers/platform/x86/amd/pmf/pmf.h100
-rw-r--r--drivers/platform/x86/amd/pmf/spc.c82
-rw-r--r--drivers/platform/x86/amd/pmf/sps.c40
-rw-r--r--drivers/platform/x86/amd/pmf/tee-if.c216
-rw-r--r--drivers/platform/x86/asus-armoury.c1161
-rw-r--r--drivers/platform/x86/asus-armoury.h1541
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c37
-rw-r--r--drivers/platform/x86/asus-wmi.c194
-rw-r--r--drivers/platform/x86/asus-wmi.h3
-rw-r--r--drivers/platform/x86/ayaneo-ec.c593
-rw-r--r--drivers/platform/x86/barco-p50-gpio.c106
-rw-r--r--drivers/platform/x86/dell/alienware-wmi-wmax.c321
-rw-r--r--drivers/platform/x86/dell/dcdbas.c2
-rw-r--r--drivers/platform/x86/dell/dell-lis3lv02d.c3
-rw-r--r--drivers/platform/x86/dell/dell-pc.c9
-rw-r--r--drivers/platform/x86/dell/dell-smbios-base.c19
-rw-r--r--drivers/platform/x86/dell/dell-smbios-smm.c3
-rw-r--r--drivers/platform/x86/dell/dell-smbios-wmi.c4
-rw-r--r--drivers/platform/x86/dell/dell-smbios.h2
-rw-r--r--drivers/platform/x86/dell/dell-uart-backlight.c2
-rw-r--r--drivers/platform/x86/dell/dell-wmi-base.c12
-rw-r--r--drivers/platform/x86/dell/dell-wmi-ddv.c10
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/dell-wmi-sysman.h5
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/enum-attributes.c5
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/int-attributes.c5
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c5
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/string-attributes.c5
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/sysman.c12
-rw-r--r--drivers/platform/x86/dell/dell_rbu.c26
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c8
-rw-r--r--drivers/platform/x86/gpd-pocket-fan.c4
-rw-r--r--drivers/platform/x86/hp/hp-bioscfg/bioscfg.c4
-rw-r--r--drivers/platform/x86/hp/hp-wmi.c34
-rw-r--r--drivers/platform/x86/huawei-wmi.c4
-rw-r--r--drivers/platform/x86/intel/Kconfig13
-rw-r--r--drivers/platform/x86/intel/Makefile1
-rw-r--r--drivers/platform/x86/intel/chtwc_int33fe.c29
-rw-r--r--drivers/platform/x86/intel/ehl_pse_io.c86
-rw-r--r--drivers/platform/x86/intel/hid.c14
-rw-r--r--drivers/platform/x86/intel/int0002_vgpio.c2
-rw-r--r--drivers/platform/x86/intel/int3472/clk_and_regulator.c5
-rw-r--r--drivers/platform/x86/intel/int3472/discrete.c64
-rw-r--r--drivers/platform/x86/intel/int3472/led.c2
-rw-r--r--drivers/platform/x86/intel/int3472/tps68470_board_data.c128
-rw-r--r--drivers/platform/x86/intel/plr_tpmi.c3
-rw-r--r--drivers/platform/x86/intel/pmc/Makefile2
-rw-r--r--drivers/platform/x86/intel/pmc/arl.c16
-rw-r--r--drivers/platform/x86/intel/pmc/core.c321
-rw-r--r--drivers/platform/x86/intel/pmc/core.h50
-rw-r--r--drivers/platform/x86/intel/pmc/lnl.c18
-rw-r--r--drivers/platform/x86/intel/pmc/mtl.c11
-rw-r--r--drivers/platform/x86/intel/pmc/ptl.c36
-rw-r--r--drivers/platform/x86/intel/pmc/ssram_telemetry.c4
-rw-r--r--drivers/platform/x86/intel/pmc/tgl.c4
-rw-r--r--drivers/platform/x86/intel/pmc/wcl.c504
-rw-r--r--drivers/platform/x86/intel/pmt/Kconfig28
-rw-r--r--drivers/platform/x86/intel/pmt/Makefile4
-rw-r--r--drivers/platform/x86/intel/pmt/class.c52
-rw-r--r--drivers/platform/x86/intel/pmt/class.h12
-rw-r--r--drivers/platform/x86/intel/pmt/crashlog.c459
-rw-r--r--drivers/platform/x86/intel/pmt/discovery-kunit.c116
-rw-r--r--drivers/platform/x86/intel/pmt/discovery.c635
-rw-r--r--drivers/platform/x86/intel/pmt/features.c205
-rw-r--r--drivers/platform/x86/intel/pmt/telemetry.c94
-rw-r--r--drivers/platform/x86/intel/punit_ipc.c2
-rw-r--r--drivers/platform/x86/intel/sdsi.c2
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_if_common.c2
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_if_mmio.c4
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c9
-rw-r--r--drivers/platform/x86/intel/telemetry/core.c177
-rw-r--r--drivers/platform/x86/intel/telemetry/pltdrv.c231
-rw-r--r--drivers/platform/x86/intel/tpmi_power_domains.c8
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c2
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h9
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c97
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c4
-rw-r--r--drivers/platform/x86/intel/vsec.c374
-rw-r--r--drivers/platform/x86/intel/vsec_tpmi.c8
-rw-r--r--drivers/platform/x86/lenovo/Kconfig276
-rw-r--r--drivers/platform/x86/lenovo/Makefile28
-rw-r--r--drivers/platform/x86/lenovo/ideapad-laptop.c (renamed from drivers/platform/x86/ideapad-laptop.c)301
-rw-r--r--drivers/platform/x86/lenovo/ideapad-laptop.h (renamed from drivers/platform/x86/ideapad-laptop.h)0
-rw-r--r--drivers/platform/x86/lenovo/think-lmi.c (renamed from drivers/platform/x86/think-lmi.c)198
-rw-r--r--drivers/platform/x86/lenovo/think-lmi.h (renamed from drivers/platform/x86/think-lmi.h)14
-rw-r--r--drivers/platform/x86/lenovo/thinkpad_acpi.c (renamed from drivers/platform/x86/thinkpad_acpi.c)7
-rw-r--r--drivers/platform/x86/lenovo/wmi-camera.c (renamed from drivers/platform/x86/lenovo-wmi-camera.c)0
-rw-r--r--drivers/platform/x86/lenovo/wmi-capdata01.c302
-rw-r--r--drivers/platform/x86/lenovo/wmi-capdata01.h25
-rw-r--r--drivers/platform/x86/lenovo/wmi-events.c196
-rw-r--r--drivers/platform/x86/lenovo/wmi-events.h20
-rw-r--r--drivers/platform/x86/lenovo/wmi-gamezone.c414
-rw-r--r--drivers/platform/x86/lenovo/wmi-gamezone.h20
-rw-r--r--drivers/platform/x86/lenovo/wmi-helpers.c74
-rw-r--r--drivers/platform/x86/lenovo/wmi-helpers.h20
-rw-r--r--drivers/platform/x86/lenovo/wmi-hotkey-utilities.c (renamed from drivers/platform/x86/lenovo-wmi-hotkey-utilities.c)30
-rw-r--r--drivers/platform/x86/lenovo/wmi-other.c665
-rw-r--r--drivers/platform/x86/lenovo/wmi-other.h16
-rw-r--r--drivers/platform/x86/lenovo/ymc.c (renamed from drivers/platform/x86/lenovo-ymc.c)0
-rw-r--r--drivers/platform/x86/lenovo/yoga-tab2-pro-1380-fastcharger.c (renamed from drivers/platform/x86/lenovo-yoga-tab2-pro-1380-fastcharger.c)40
-rw-r--r--drivers/platform/x86/lenovo/yogabook.c (renamed from drivers/platform/x86/lenovo-yogabook.c)0
-rw-r--r--drivers/platform/x86/lg-laptop.c45
-rw-r--r--drivers/platform/x86/meraki-mx100.c404
-rw-r--r--drivers/platform/x86/msi-wmi-platform.c43
-rw-r--r--drivers/platform/x86/oxpec.c165
-rw-r--r--drivers/platform/x86/pcengines-apuv2.c192
-rw-r--r--drivers/platform/x86/portwell-ec.c197
-rw-r--r--drivers/platform/x86/quickstart.c10
-rw-r--r--drivers/platform/x86/redmi-wmi.c130
-rw-r--r--drivers/platform/x86/samsung-galaxybook.c1
-rw-r--r--drivers/platform/x86/samsung-laptop.c110
-rw-r--r--drivers/platform/x86/serial-multi-instantiate.c13
-rw-r--r--drivers/platform/x86/silicom-platform.c6
-rw-r--r--drivers/platform/x86/uniwill/Kconfig38
-rw-r--r--drivers/platform/x86/uniwill/Makefile8
-rw-r--r--drivers/platform/x86/uniwill/uniwill-acpi.c1912
-rw-r--r--drivers/platform/x86/uniwill/uniwill-wmi.c92
-rw-r--r--drivers/platform/x86/uniwill/uniwill-wmi.h129
-rw-r--r--drivers/platform/x86/wmi-bmof.c2
-rw-r--r--drivers/platform/x86/x86-android-tablets/Makefile2
-rw-r--r--drivers/platform/x86/x86-android-tablets/acer.c247
-rw-r--r--drivers/platform/x86/x86-android-tablets/asus.c127
-rw-r--r--drivers/platform/x86/x86-android-tablets/core.c121
-rw-r--r--drivers/platform/x86/x86-android-tablets/dmi.c12
-rw-r--r--drivers/platform/x86/x86-android-tablets/lenovo.c291
-rw-r--r--drivers/platform/x86/x86-android-tablets/other.c334
-rw-r--r--drivers/platform/x86/x86-android-tablets/shared-psy-info.c110
-rw-r--r--drivers/platform/x86/x86-android-tablets/shared-psy-info.h9
-rw-r--r--drivers/platform/x86/x86-android-tablets/vexia_atla10_ec.c4
-rw-r--r--drivers/platform/x86/x86-android-tablets/x86-android-tablets.h28
-rw-r--r--drivers/platform/x86/xiaomi-wmi.c10
-rw-r--r--drivers/pmdomain/Kconfig1
-rw-r--r--drivers/pmdomain/Makefile1
-rw-r--r--drivers/pmdomain/amlogic/meson-secure-pwrc.c107
-rw-r--r--drivers/pmdomain/apple/Kconfig1
-rw-r--r--drivers/pmdomain/apple/pmgr-pwrstate.c1
-rw-r--r--drivers/pmdomain/arm/scmi_pm_domain.c25
-rw-r--r--drivers/pmdomain/bcm/bcm2835-power.c17
-rw-r--r--drivers/pmdomain/core.c278
-rw-r--r--drivers/pmdomain/governor.c63
-rw-r--r--drivers/pmdomain/imx/gpc.c3
-rw-r--r--drivers/pmdomain/imx/imx8m-blk-ctrl.c10
-rw-r--r--drivers/pmdomain/imx/imx93-blk-ctrl.c23
-rw-r--r--drivers/pmdomain/marvell/Kconfig18
-rw-r--r--drivers/pmdomain/marvell/Makefile3
-rw-r--r--drivers/pmdomain/marvell/pxa1908-power-controller.c274
-rw-r--r--drivers/pmdomain/mediatek/Kconfig17
-rw-r--r--drivers/pmdomain/mediatek/Makefile1
-rw-r--r--drivers/pmdomain/mediatek/airoha-cpu-pmdomain.c8
-rw-r--r--drivers/pmdomain/mediatek/mt6795-pm-domains.h5
-rw-r--r--drivers/pmdomain/mediatek/mt8167-pm-domains.h5
-rw-r--r--drivers/pmdomain/mediatek/mt8173-pm-domains.h5
-rw-r--r--drivers/pmdomain/mediatek/mt8183-pm-domains.h5
-rw-r--r--drivers/pmdomain/mediatek/mt8186-pm-domains.h5
-rw-r--r--drivers/pmdomain/mediatek/mt8188-pm-domains.h6
-rw-r--r--drivers/pmdomain/mediatek/mt8192-pm-domains.h5
-rw-r--r--drivers/pmdomain/mediatek/mt8195-pm-domains.h6
-rw-r--r--drivers/pmdomain/mediatek/mt8196-pm-domains.h625
-rw-r--r--drivers/pmdomain/mediatek/mt8365-pm-domains.h14
-rw-r--r--drivers/pmdomain/mediatek/mtk-mfg-pmdomain.c1044
-rw-r--r--drivers/pmdomain/mediatek/mtk-pm-domains.c707
-rw-r--r--drivers/pmdomain/mediatek/mtk-pm-domains.h123
-rw-r--r--drivers/pmdomain/qcom/rpmhpd.c75
-rw-r--r--drivers/pmdomain/qcom/rpmpd.c114
-rw-r--r--drivers/pmdomain/renesas/Kconfig124
-rw-r--r--drivers/pmdomain/renesas/rcar-gen4-sysc.c3
-rw-r--r--drivers/pmdomain/renesas/rcar-sysc.c20
-rw-r--r--drivers/pmdomain/renesas/rmobile-sysc.c6
-rw-r--r--drivers/pmdomain/rockchip/Kconfig1
-rw-r--r--drivers/pmdomain/rockchip/pm-domains.c70
-rw-r--r--drivers/pmdomain/samsung/exynos-pm-domains.c20
-rw-r--r--drivers/pmdomain/sunxi/Kconfig19
-rw-r--r--drivers/pmdomain/sunxi/Makefile1
-rw-r--r--drivers/pmdomain/sunxi/sun20i-ppu.c17
-rw-r--r--drivers/pmdomain/sunxi/sun55i-pck600.c234
-rw-r--r--drivers/pmdomain/tegra/powergate-bpmp.c1
-rw-r--r--drivers/pmdomain/thead/Kconfig1
-rw-r--r--drivers/pmdomain/thead/th1520-pm-domains.c67
-rw-r--r--drivers/pmdomain/ti/Kconfig2
-rw-r--r--drivers/pmdomain/ti/ti_sci_pm_domains.c24
-rw-r--r--drivers/pmdomain/xilinx/zynqmp-pm-domains.c16
-rw-r--r--drivers/pnp/driver.c19
-rw-r--r--drivers/pnp/isapnp/core.c3
-rw-r--r--drivers/power/reset/Kconfig26
-rw-r--r--drivers/power/reset/Makefile3
-rw-r--r--drivers/power/reset/at91-sama5d2_shdwc.c2
-rw-r--r--drivers/power/reset/macsmc-reboot.c290
-rw-r--r--drivers/power/reset/qcom-pon.c30
-rw-r--r--drivers/power/reset/sc27xx-poweroff.c10
-rw-r--r--drivers/power/reset/spacemit-p1-reboot.c88
-rw-r--r--drivers/power/reset/th1520-aon-reboot.c98
-rw-r--r--drivers/power/sequencing/Kconfig10
-rw-r--r--drivers/power/sequencing/Makefile1
-rw-r--r--drivers/power/sequencing/core.c6
-rw-r--r--drivers/power/sequencing/pwrseq-qcom-wcn.c10
-rw-r--r--drivers/power/sequencing/pwrseq-thead-gpu.c249
-rw-r--r--drivers/power/supply/88pm860x_charger.c8
-rw-r--r--drivers/power/supply/Kconfig58
-rw-r--r--drivers/power/supply/Makefile8
-rw-r--r--drivers/power/supply/ab8500_btemp.c3
-rw-r--r--drivers/power/supply/adc-battery-helper.c327
-rw-r--r--drivers/power/supply/adc-battery-helper.h62
-rw-r--r--drivers/power/supply/apm_power.c3
-rw-r--r--drivers/power/supply/bd71828-power.c1049
-rw-r--r--drivers/power/supply/bq2415x_charger.c6
-rw-r--r--drivers/power/supply/bq24190_charger.c18
-rw-r--r--drivers/power/supply/bq256xx_charger.c6
-rw-r--r--drivers/power/supply/bq257xx_charger.c755
-rw-r--r--drivers/power/supply/bq25980_charger.c6
-rw-r--r--drivers/power/supply/bq27xxx_battery.c21
-rw-r--r--drivers/power/supply/cpcap-charger.c5
-rw-r--r--drivers/power/supply/cw2015_battery.c14
-rw-r--r--drivers/power/supply/ds2760_battery.c2
-rw-r--r--drivers/power/supply/ds2780_battery.c10
-rw-r--r--drivers/power/supply/ds2781_battery.c10
-rw-r--r--drivers/power/supply/gpio-charger.c7
-rw-r--r--drivers/power/supply/intel_dc_ti_battery.c391
-rw-r--r--drivers/power/supply/ipaq_micro_battery.c3
-rw-r--r--drivers/power/supply/max14577_charger.c4
-rw-r--r--drivers/power/supply/max17040_battery.c6
-rw-r--r--drivers/power/supply/max1720x_battery.c13
-rw-r--r--drivers/power/supply/max77705_charger.c388
-rw-r--r--drivers/power/supply/max77976_charger.c12
-rw-r--r--drivers/power/supply/mt6370-charger.c18
-rw-r--r--drivers/power/supply/olpc_battery.c4
-rw-r--r--drivers/power/supply/pf1550-charger.c641
-rw-r--r--drivers/power/supply/power_supply_core.c267
-rw-r--r--drivers/power/supply/power_supply_sysfs.c2
-rw-r--r--drivers/power/supply/qcom_battmgr.c339
-rw-r--r--drivers/power/supply/qcom_smbx.c (renamed from drivers/power/supply/qcom_pmi8998_charger.c)152
-rw-r--r--drivers/power/supply/rk817_charger.c6
-rw-r--r--drivers/power/supply/rt5033_charger.c2
-rw-r--r--drivers/power/supply/rt9467-charger.c53
-rw-r--r--drivers/power/supply/rt9756.c955
-rw-r--r--drivers/power/supply/rx51_battery.c2
-rw-r--r--drivers/power/supply/sbs-charger.c16
-rw-r--r--drivers/power/supply/sbs-manager.c2
-rw-r--r--drivers/power/supply/test_power.c4
-rw-r--r--drivers/power/supply/twl4030_charger.c1
-rw-r--r--drivers/power/supply/ucs1002_power.c2
-rw-r--r--drivers/power/supply/ug3105_battery.c397
-rw-r--r--drivers/power/supply/wm831x_power.c10
-rw-r--r--drivers/powercap/dtpm.c16
-rw-r--r--drivers/powercap/dtpm_cpu.c2
-rw-r--r--drivers/powercap/idle_inject.c5
-rw-r--r--drivers/powercap/intel_rapl_common.c58
-rw-r--r--drivers/powercap/intel_rapl_msr.c44
-rw-r--r--drivers/powercap/intel_rapl_tpmi.c11
-rw-r--r--drivers/pps/clients/pps-gpio.c5
-rw-r--r--drivers/pps/generators/pps_gen_parport.c3
-rw-r--r--drivers/pps/kapi.c8
-rw-r--r--drivers/pps/pps.c16
-rw-r--r--drivers/ps3/ps3stor_lib.c3
-rw-r--r--drivers/ptp/Kconfig13
-rw-r--r--drivers/ptp/Makefile5
-rw-r--r--drivers/ptp/ptp_chardev.c798
-rw-r--r--drivers/ptp/ptp_clock.c159
-rw-r--r--drivers/ptp/ptp_clockmatrix.c2
-rw-r--r--drivers/ptp/ptp_ines.c31
-rw-r--r--drivers/ptp/ptp_mock.c2
-rw-r--r--drivers/ptp/ptp_netc.c1043
-rw-r--r--drivers/ptp/ptp_ocp.c78
-rw-r--r--drivers/ptp/ptp_private.h20
-rw-r--r--drivers/ptp/ptp_qoriq.c24
-rw-r--r--drivers/ptp/ptp_qoriq_debugfs.c101
-rw-r--r--drivers/ptp/ptp_sysfs.c2
-rw-r--r--drivers/ptp/ptp_vclock.c9
-rw-r--r--drivers/pwm/Kconfig66
-rw-r--r--drivers/pwm/Makefile4
-rw-r--r--drivers/pwm/core.c466
-rw-r--r--drivers/pwm/pwm-adp5585.c78
-rw-r--r--drivers/pwm/pwm-airoha.c622
-rw-r--r--drivers/pwm/pwm-argon-fan-hat.c109
-rw-r--r--drivers/pwm/pwm-atmel.c12
-rw-r--r--drivers/pwm/pwm-axi-pwmgen.c2
-rw-r--r--drivers/pwm/pwm-bcm2835.c28
-rw-r--r--drivers/pwm/pwm-berlin.c4
-rw-r--r--drivers/pwm/pwm-clps711x.c8
-rw-r--r--drivers/pwm/pwm-cros-ec.c10
-rw-r--r--drivers/pwm/pwm-fsl-ftm.c63
-rw-r--r--drivers/pwm/pwm-img.c2
-rw-r--r--drivers/pwm/pwm-imx-tpm.c9
-rw-r--r--drivers/pwm/pwm-loongson.c2
-rw-r--r--drivers/pwm/pwm-lpc18xx-sct.c14
-rw-r--r--drivers/pwm/pwm-max7360.c209
-rw-r--r--drivers/pwm/pwm-mc33xs2410.c20
-rw-r--r--drivers/pwm/pwm-mediatek.c475
-rw-r--r--drivers/pwm/pwm-microchip-core.c17
-rw-r--r--drivers/pwm/pwm-pca9685.c515
-rw-r--r--drivers/pwm/pwm-pxa.c6
-rw-r--r--drivers/pwm/pwm-rockchip.c33
-rw-r--r--drivers/pwm/pwm-rzg2l-gpt.c15
-rw-r--r--drivers/pwm/pwm-sifive.c52
-rw-r--r--drivers/pwm/pwm-sophgo-sg2042.c141
-rw-r--r--drivers/pwm/pwm-sti.c23
-rw-r--r--drivers/pwm/pwm-stm32.c42
-rw-r--r--drivers/pwm/pwm-sun4i.c10
-rw-r--r--drivers/pwm/pwm-tiecap.c4
-rw-r--r--drivers/pwm/pwm-tiehrpwm.c154
-rw-r--r--drivers/pwm/pwm-twl-led.c49
-rw-r--r--drivers/pwm/pwm_th1520.rs387
-rw-r--r--drivers/rapidio/rio-driver.c2
-rw-r--r--drivers/rapidio/rio-sysfs.c6
-rw-r--r--drivers/rapidio/rio_cm.c3
-rw-r--r--drivers/ras/amd/atl/core.c7
-rw-r--r--drivers/ras/amd/atl/internal.h6
-rw-r--r--drivers/ras/amd/atl/prm.c4
-rw-r--r--drivers/ras/amd/atl/system.c30
-rw-r--r--drivers/ras/amd/atl/umc.c23
-rw-r--r--drivers/ras/cec.c2
-rw-r--r--drivers/ras/ras.c41
-rw-r--r--drivers/regulator/Kconfig124
-rw-r--r--drivers/regulator/Makefile12
-rw-r--r--drivers/regulator/act8865-regulator.c2
-rw-r--r--drivers/regulator/arizona-micsupp.c8
-rw-r--r--drivers/regulator/bd71815-regulator.c8
-rw-r--r--drivers/regulator/bd71828-regulator.c4
-rw-r--r--drivers/regulator/bd718x7-regulator.c35
-rw-r--r--drivers/regulator/bd96801-regulator.c10
-rw-r--r--drivers/regulator/bq257xx-regulator.c186
-rw-r--r--drivers/regulator/core.c217
-rw-r--r--drivers/regulator/fan53555.c14
-rw-r--r--drivers/regulator/fixed.c1
-rw-r--r--drivers/regulator/fp9931.c551
-rw-r--r--drivers/regulator/gpio-regulator.c8
-rw-r--r--drivers/regulator/hi6421-regulator.c10
-rw-r--r--drivers/regulator/hi6421v530-regulator.c4
-rw-r--r--drivers/regulator/hi6421v600-regulator.c6
-rw-r--r--drivers/regulator/irq_helpers.c2
-rw-r--r--drivers/regulator/max20086-regulator.c6
-rw-r--r--drivers/regulator/max77650-regulator.c6
-rw-r--r--drivers/regulator/max77838-regulator.c221
-rw-r--r--drivers/regulator/mp886x.c3
-rw-r--r--drivers/regulator/mt6315-regulator.c6
-rw-r--r--drivers/regulator/mt6316-regulator.c345
-rw-r--r--drivers/regulator/mt6358-regulator.c2
-rw-r--r--drivers/regulator/mt6363-regulator.c938
-rw-r--r--drivers/regulator/mt6370-regulator.c4
-rw-r--r--drivers/regulator/mtk-dvfsrc-regulator.c38
-rw-r--r--drivers/regulator/of_regulator.c6
-rw-r--r--drivers/regulator/pca9450-regulator.c353
-rw-r--r--drivers/regulator/pf0900-regulator.c975
-rw-r--r--drivers/regulator/pf1550-regulator.c429
-rw-r--r--drivers/regulator/pf530x-regulator.c375
-rw-r--r--drivers/regulator/pf9453-regulator.c42
-rw-r--r--drivers/regulator/qcom-labibb-regulator.c4
-rw-r--r--drivers/regulator/qcom-pm8008-regulator.c2
-rw-r--r--drivers/regulator/qcom-refgen-regulator.c1
-rw-r--r--drivers/regulator/qcom-rpmh-regulator.c1312
-rw-r--r--drivers/regulator/renesas-usb-vbus-regulator.c2
-rw-r--r--drivers/regulator/rpi-panel-attiny-regulator.c2
-rw-r--r--drivers/regulator/rpi-panel-v2-regulator.c125
-rw-r--r--drivers/regulator/rt5133-regulator.c642
-rw-r--r--drivers/regulator/rt5739.c9
-rw-r--r--drivers/regulator/rt6160-regulator.c19
-rw-r--r--drivers/regulator/rtq2208-regulator.c6
-rw-r--r--drivers/regulator/s2dos05-regulator.c165
-rw-r--r--drivers/regulator/scmi-regulator.c3
-rw-r--r--drivers/regulator/spacemit-p1.c157
-rw-r--r--drivers/regulator/stm32-vrefbuf.c6
-rw-r--r--drivers/regulator/sy7636a-regulator.c34
-rw-r--r--drivers/regulator/sy8824x.c5
-rw-r--r--drivers/regulator/sy8827n.c3
-rw-r--r--drivers/regulator/tps6286x-regulator.c9
-rw-r--r--drivers/regulator/tps6287x-regulator.c7
-rw-r--r--drivers/regulator/tps65219-regulator.c40
-rw-r--r--drivers/regulator/tps6524x-regulator.c1
-rw-r--r--drivers/regulator/tps6594-regulator.c273
-rw-r--r--drivers/remoteproc/Kconfig11
-rw-r--r--drivers/remoteproc/da8xx_remoteproc.c57
-rw-r--r--drivers/remoteproc/imx_dsp_rproc.c447
-rw-r--r--drivers/remoteproc/imx_rproc.c654
-rw-r--r--drivers/remoteproc/imx_rproc.h23
-rw-r--r--drivers/remoteproc/keystone_remoteproc.c95
-rw-r--r--drivers/remoteproc/mtk_scp.c65
-rw-r--r--drivers/remoteproc/omap_remoteproc.c5
-rw-r--r--drivers/remoteproc/pru_rproc.c5
-rw-r--r--drivers/remoteproc/qcom_q6v5.c8
-rw-r--r--drivers/remoteproc/qcom_q6v5_adsp.c31
-rw-r--r--drivers/remoteproc/qcom_q6v5_mss.c71
-rw-r--r--drivers/remoteproc/qcom_q6v5_pas.c688
-rw-r--r--drivers/remoteproc/qcom_q6v5_wcss.c42
-rw-r--r--drivers/remoteproc/qcom_wcnss.c27
-rw-r--r--drivers/remoteproc/rcar_rproc.c38
-rw-r--r--drivers/remoteproc/remoteproc_core.c33
-rw-r--r--drivers/remoteproc/remoteproc_virtio.c2
-rw-r--r--drivers/remoteproc/st_remoteproc.c44
-rw-r--r--drivers/remoteproc/st_slim_rproc.c2
-rw-r--r--drivers/remoteproc/stm32_rproc.c46
-rw-r--r--drivers/remoteproc/ti_k3_common.c53
-rw-r--r--drivers/remoteproc/ti_k3_dsp_remoteproc.c2
-rw-r--r--drivers/remoteproc/ti_k3_r5_remoteproc.c4
-rw-r--r--drivers/remoteproc/wkup_m3_rproc.c69
-rw-r--r--drivers/remoteproc/xlnx_r5_remoteproc.c127
-rw-r--r--drivers/resctrl/Kconfig24
-rw-r--r--drivers/resctrl/Makefile4
-rw-r--r--drivers/resctrl/mpam_devices.c2723
-rw-r--r--drivers/resctrl/mpam_internal.h658
-rw-r--r--drivers/resctrl/test_mpam_devices.c389
-rw-r--r--drivers/reset/Kconfig52
-rw-r--r--drivers/reset/Makefile4
-rw-r--r--drivers/reset/core.c262
-rw-r--r--drivers/reset/reset-aspeed.c253
-rw-r--r--drivers/reset/reset-bcm6345.c1
-rw-r--r--drivers/reset/reset-eic7700.c429
-rw-r--r--drivers/reset/reset-eyeq.c11
-rw-r--r--drivers/reset/reset-gpio.c19
-rw-r--r--drivers/reset/reset-imx8mp-audiomix.c4
-rw-r--r--drivers/reset/reset-intel-gw.c1
-rw-r--r--drivers/reset/reset-k230.c371
-rw-r--r--drivers/reset/reset-mpfs.c129
-rw-r--r--drivers/reset/reset-qcom-pdc.c1
-rw-r--r--drivers/reset/reset-rzg2l-usbphy-ctrl.c60
-rw-r--r--drivers/reset/reset-simple.c2
-rw-r--r--drivers/reset/reset-spacemit.c304
-rw-r--r--drivers/reset/reset-th1520.c876
-rw-r--r--drivers/rpmsg/qcom_glink_native.c37
-rw-r--r--drivers/rpmsg/qcom_smd.c4
-rw-r--r--drivers/rpmsg/rpmsg_char.c3
-rw-r--r--drivers/rpmsg/rpmsg_core.c5
-rw-r--r--drivers/rpmsg/virtio_rpmsg_bus.c2
-rw-r--r--drivers/rtc/Kconfig69
-rw-r--r--drivers/rtc/Makefile4
-rw-r--r--drivers/rtc/interface.c27
-rw-r--r--drivers/rtc/lib.c40
-rw-r--r--drivers/rtc/rtc-amlogic-a4.c14
-rw-r--r--drivers/rtc/rtc-cmos.c10
-rw-r--r--drivers/rtc/rtc-ds1307.c30
-rw-r--r--drivers/rtc/rtc-ds1685.c4
-rw-r--r--drivers/rtc/rtc-efi.c76
-rw-r--r--drivers/rtc/rtc-hym8563.c15
-rw-r--r--drivers/rtc/rtc-isl12022.c1
-rw-r--r--drivers/rtc/rtc-m41t80.c25
-rw-r--r--drivers/rtc/rtc-max31335.c12
-rw-r--r--drivers/rtc/rtc-mc13xxx.c13
-rw-r--r--drivers/rtc/rtc-meson.c1
-rw-r--r--drivers/rtc/rtc-nct3018y.c15
-rw-r--r--drivers/rtc/rtc-nct6694.c297
-rw-r--r--drivers/rtc/rtc-optee.c465
-rw-r--r--drivers/rtc/rtc-pcf2127.c26
-rw-r--r--drivers/rtc/rtc-pcf85063.c267
-rw-r--r--drivers/rtc/rtc-pcf8563.c15
-rw-r--r--drivers/rtc/rtc-rv3028.c15
-rw-r--r--drivers/rtc/rtc-rv3032.c21
-rw-r--r--drivers/rtc/rtc-rx8025.c2
-rw-r--r--drivers/rtc/rtc-s3c.c51
-rw-r--r--drivers/rtc/rtc-s3c.h19
-rw-r--r--drivers/rtc/rtc-s5m.c197
-rw-r--r--drivers/rtc/rtc-sd2405al.c4
-rw-r--r--drivers/rtc/rtc-sh.c8
-rw-r--r--drivers/rtc/rtc-spacemit-p1.c167
-rw-r--r--drivers/rtc/rtc-stm32.c2
-rw-r--r--drivers/rtc/rtc-x1205.c2
-rw-r--r--drivers/rtc/rtc-zynqmp.c19
-rw-r--r--drivers/rtc/sysfs.c64
-rw-r--r--drivers/rtc/test_rtc_lib.c (renamed from drivers/rtc/lib_test.c)0
-rw-r--r--drivers/s390/block/Kconfig12
-rw-r--r--drivers/s390/block/dasd.c93
-rw-r--r--drivers/s390/block/dasd_devmap.c4
-rw-r--r--drivers/s390/block/dasd_eckd.c19
-rw-r--r--drivers/s390/block/dasd_eer.c1
-rw-r--r--drivers/s390/block/dasd_erp.c1
-rw-r--r--drivers/s390/block/dasd_fba.c1
-rw-r--r--drivers/s390/block/dasd_genhd.c80
-rw-r--r--drivers/s390/block/dasd_ioctl.c7
-rw-r--r--drivers/s390/block/dcssblk.c52
-rw-r--r--drivers/s390/block/scm_blk.c3
-rw-r--r--drivers/s390/block/scm_drv.c3
-rw-r--r--drivers/s390/char/Makefile1
-rw-r--r--drivers/s390/char/con3270.c39
-rw-r--r--drivers/s390/char/diag_ftp.c3
-rw-r--r--drivers/s390/char/fs3270.c7
-rw-r--r--drivers/s390/char/hmcdrv_cache.c3
-rw-r--r--drivers/s390/char/hmcdrv_dev.c22
-rw-r--r--drivers/s390/char/hmcdrv_ftp.c3
-rw-r--r--drivers/s390/char/hmcdrv_mod.c3
-rw-r--r--drivers/s390/char/keyboard.c1
-rw-r--r--drivers/s390/char/monreader.c3
-rw-r--r--drivers/s390/char/monwriter.c3
-rw-r--r--drivers/s390/char/raw3270.c1
-rw-r--r--drivers/s390/char/sclp.c16
-rw-r--r--drivers/s390/char/sclp_ap.c3
-rw-r--r--drivers/s390/char/sclp_cmd.c482
-rw-r--r--drivers/s390/char/sclp_config.c5
-rw-r--r--drivers/s390/char/sclp_cpi_sys.c3
-rw-r--r--drivers/s390/char/sclp_ctl.c12
-rw-r--r--drivers/s390/char/sclp_early.c4
-rw-r--r--drivers/s390/char/sclp_early_core.c2
-rw-r--r--drivers/s390/char/sclp_ftp.c3
-rw-r--r--drivers/s390/char/sclp_mem.c521
-rw-r--r--drivers/s390/char/sclp_ocf.c4
-rw-r--r--drivers/s390/char/sclp_pci.c3
-rw-r--r--drivers/s390/char/sclp_sd.c9
-rw-r--r--drivers/s390/char/sclp_sdias.c3
-rw-r--r--drivers/s390/char/tape.h21
-rw-r--r--drivers/s390/char/tape_34xx.c32
-rw-r--r--drivers/s390/char/tape_3590.c95
-rw-r--r--drivers/s390/char/tape_char.c142
-rw-r--r--drivers/s390/char/tape_class.c4
-rw-r--r--drivers/s390/char/tape_core.c39
-rw-r--r--drivers/s390/char/tape_proc.c3
-rw-r--r--drivers/s390/char/tape_std.c84
-rw-r--r--drivers/s390/char/tape_std.h9
-rw-r--r--drivers/s390/char/vmcp.c8
-rw-r--r--drivers/s390/char/vmlogrdr.c3
-rw-r--r--drivers/s390/char/vmur.c3
-rw-r--r--drivers/s390/char/zcore.c3
-rw-r--r--drivers/s390/cio/airq.c1
-rw-r--r--drivers/s390/cio/blacklist.c3
-rw-r--r--drivers/s390/cio/ccwgroup.c8
-rw-r--r--drivers/s390/cio/ccwreq.c3
-rw-r--r--drivers/s390/cio/chp.c7
-rw-r--r--drivers/s390/cio/chsc.c14
-rw-r--r--drivers/s390/cio/chsc_sch.c7
-rw-r--r--drivers/s390/cio/cio.c6
-rw-r--r--drivers/s390/cio/cio_inject.c3
-rw-r--r--drivers/s390/cio/cmf.c5
-rw-r--r--drivers/s390/cio/css.c3
-rw-r--r--drivers/s390/cio/device.c40
-rw-r--r--drivers/s390/cio/device_fsm.c1
-rw-r--r--drivers/s390/cio/device_status.c2
-rw-r--r--drivers/s390/cio/eadm_sch.c1
-rw-r--r--drivers/s390/cio/fcx.c1
-rw-r--r--drivers/s390/cio/ioasm.c7
-rw-r--r--drivers/s390/cio/isc.c1
-rw-r--r--drivers/s390/cio/itcw.c1
-rw-r--r--drivers/s390/cio/qdio_debug.c1
-rw-r--r--drivers/s390/cio/qdio_main.c2
-rw-r--r--drivers/s390/cio/scm.c1
-rw-r--r--drivers/s390/cio/vfio_ccw_ops.c47
-rw-r--r--drivers/s390/crypto/ap_bus.c197
-rw-r--r--drivers/s390/crypto/ap_bus.h7
-rw-r--r--drivers/s390/crypto/ap_card.c3
-rw-r--r--drivers/s390/crypto/ap_queue.c76
-rw-r--r--drivers/s390/crypto/pkey_api.c6
-rw-r--r--drivers/s390/crypto/pkey_base.c4
-rw-r--r--drivers/s390/crypto/pkey_cca.c3
-rw-r--r--drivers/s390/crypto/pkey_ep11.c3
-rw-r--r--drivers/s390/crypto/pkey_pckmo.c3
-rw-r--r--drivers/s390/crypto/pkey_sysfs.c11
-rw-r--r--drivers/s390/crypto/pkey_uv.c3
-rw-r--r--drivers/s390/crypto/vfio_ap_ops.c16
-rw-r--r--drivers/s390/crypto/zcrypt_api.c258
-rw-r--r--drivers/s390/crypto/zcrypt_card.c2
-rw-r--r--drivers/s390/crypto/zcrypt_ccamisc.c4
-rw-r--r--drivers/s390/crypto/zcrypt_ep11misc.c8
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype50.c3
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c3
-rw-r--r--drivers/s390/crypto/zcrypt_queue.c2
-rw-r--r--drivers/s390/net/Kconfig15
-rw-r--r--drivers/s390/net/Makefile1
-rw-r--r--drivers/s390/net/ctcm_fsms.c17
-rw-r--r--drivers/s390/net/ctcm_main.c3
-rw-r--r--drivers/s390/net/ctcm_mpc.c5
-rw-r--r--drivers/s390/net/ctcm_sysfs.c3
-rw-r--r--drivers/s390/net/fsm.c1
-rw-r--r--drivers/s390/net/ism.h53
-rw-r--r--drivers/s390/net/ism_drv.c580
-rw-r--r--drivers/s390/net/netiucv.c2083
-rw-r--r--drivers/s390/net/qeth_core_main.c10
-rw-r--r--drivers/s390/net/qeth_core_mpc.c247
-rw-r--r--drivers/s390/net/qeth_core_mpc.h20
-rw-r--r--drivers/s390/net/qeth_core_sys.c25
-rw-r--r--drivers/s390/net/qeth_ethtool.c3
-rw-r--r--drivers/s390/net/qeth_l2_main.c4
-rw-r--r--drivers/s390/net/qeth_l3_main.c4
-rw-r--r--drivers/s390/net/smsgiucv.c1
-rw-r--r--drivers/s390/net/smsgiucv_app.c12
-rw-r--r--drivers/s390/scsi/zfcp_aux.c3
-rw-r--r--drivers/s390/scsi/zfcp_ccw.c3
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c3
-rw-r--r--drivers/s390/scsi/zfcp_erp.c3
-rw-r--r--drivers/s390/scsi/zfcp_fc.c3
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c3
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c3
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c3
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c5
-rw-r--r--drivers/scsi/3w-9xxx.c2
-rw-r--r--drivers/scsi/3w-sas.c6
-rw-r--r--drivers/scsi/3w-xxxx.c2
-rw-r--r--drivers/scsi/BusLogic.c8
-rw-r--r--drivers/scsi/BusLogic.h2
-rw-r--r--drivers/scsi/Kconfig2
-rw-r--r--drivers/scsi/aacraid/comminit.c3
-rw-r--r--drivers/scsi/aacraid/linit.c8
-rw-r--r--drivers/scsi/advansys.c5
-rw-r--r--drivers/scsi/aha152x.c4
-rw-r--r--drivers/scsi/aha1542.c2
-rw-r--r--drivers/scsi/aha1740.c2
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c4
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c4
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c3
-rw-r--r--drivers/scsi/aic94xx/aic94xx_task.c1
-rw-r--r--drivers/scsi/arcmsr/arcmsr_attr.c6
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c6
-rw-r--r--drivers/scsi/atp870u.c2
-rw-r--r--drivers/scsi/be2iscsi/be_main.c3
-rw-r--r--drivers/scsi/bfa/bfa_core.c1
-rw-r--r--drivers/scsi/bfa/bfad.c1
-rw-r--r--drivers/scsi/bfa/bfad_im.c1
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c2
-rw-r--r--drivers/scsi/csiostor/csio_init.c1
-rw-r--r--drivers/scsi/csiostor/csio_wr.c4
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c2
-rw-r--r--drivers/scsi/elx/efct/efct_hw.c5
-rw-r--r--drivers/scsi/elx/efct/efct_lio.c2
-rw-r--r--drivers/scsi/esas2r/esas2r_main.c6
-rw-r--r--drivers/scsi/fcoe/fcoe.c24
-rw-r--r--drivers/scsi/fdomain.c4
-rw-r--r--drivers/scsi/fnic/fdls_disc.c187
-rw-r--r--drivers/scsi/fnic/fnic.h4
-rw-r--r--drivers/scsi/fnic/fnic_fcs.c2
-rw-r--r--drivers/scsi/fnic/fnic_fdls.h1
-rw-r--r--drivers/scsi/fnic/fnic_res.c1
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c2
-rw-r--r--drivers/scsi/fnic/fnic_trace.c57
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c2
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c12
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c12
-rw-r--r--drivers/scsi/hosts.c42
-rw-r--r--drivers/scsi/hpsa.c53
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c2
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c5
-rw-r--r--drivers/scsi/ibmvscsi_tgt/libsrp.c6
-rw-r--r--drivers/scsi/imm.c2
-rw-r--r--drivers/scsi/initio.c4
-rw-r--r--drivers/scsi/ipr.c27
-rw-r--r--drivers/scsi/ips.c2
-rw-r--r--drivers/scsi/ips.h2
-rw-r--r--drivers/scsi/isci/remote_device.c2
-rw-r--r--drivers/scsi/isci/request.c2
-rw-r--r--drivers/scsi/isci/task.h10
-rw-r--r--drivers/scsi/libfc/fc_encode.h2
-rw-r--r--drivers/scsi/libfc/fc_fcp.c2
-rw-r--r--drivers/scsi/libiscsi.c3
-rw-r--r--drivers/scsi/libsas/sas_ata.c14
-rw-r--r--drivers/scsi/libsas/sas_discover.c2
-rw-r--r--drivers/scsi/libsas/sas_expander.c5
-rw-r--r--drivers/scsi/libsas/sas_internal.h78
-rw-r--r--drivers/scsi/libsas/sas_phy.c6
-rw-r--r--drivers/scsi/libsas/sas_port.c13
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c2
-rw-r--r--drivers/scsi/lpfc/lpfc.h56
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c64
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c666
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.h16
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c283
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c17
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h28
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h26
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c118
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c46
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c23
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c114
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c2
-rw-r--r--drivers/scsi/megaraid.c4
-rw-r--r--drivers/scsi/megaraid.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c25
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.h17
-rw-r--r--drivers/scsi/mesh.c1
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h38
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_pci.h2
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_sas.h1
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_transport.h2
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr.h14
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_app.c10
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_fw.c30
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_os.c54
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_transport.c11
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c8
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h4
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c3
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c26
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c11
-rw-r--r--drivers/scsi/mvsas/mv_defs.h4
-rw-r--r--drivers/scsi/mvsas/mv_init.c2
-rw-r--r--drivers/scsi/mvsas/mv_sas.c6
-rw-r--r--drivers/scsi/mvumi.c2
-rw-r--r--drivers/scsi/myrb.c2
-rw-r--r--drivers/scsi/myrs.c8
-rw-r--r--drivers/scsi/pcmcia/sym53c500_cs.c2
-rw-r--r--drivers/scsi/pm8001/pm8001_ctl.c24
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c11
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.h4
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c3
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c34
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h15
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c70
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.h4
-rw-r--r--drivers/scsi/ppa.c2
-rw-r--r--drivers/scsi/qedf/qedf_attr.c4
-rw-r--r--drivers/scsi/qedf/qedf_main.c15
-rw-r--r--drivers/scsi/qedi/qedi_main.c2
-rw-r--r--drivers/scsi/qla1280.c37
-rw-r--r--drivers/scsi/qla2xxx/Kconfig6
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c28
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c20
-rw-r--r--drivers/scsi/qla2xxx/qla_edif.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h3
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c9
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c42
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c52
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c57
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c1777
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h112
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c17
-rw-r--r--drivers/scsi/qla4xxx/ql4_attr.c4
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c4
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c17
-rw-r--r--drivers/scsi/qlogicfas408.c2
-rw-r--r--drivers/scsi/qlogicfas408.h2
-rw-r--r--drivers/scsi/scsi.c27
-rw-r--r--drivers/scsi/scsi_debug.c242
-rw-r--r--drivers/scsi/scsi_devinfo.c11
-rw-r--r--drivers/scsi/scsi_error.c10
-rw-r--r--drivers/scsi/scsi_lib.c109
-rw-r--r--drivers/scsi/scsi_logging.c21
-rw-r--r--drivers/scsi/scsi_pm.c1
-rw-r--r--drivers/scsi/scsi_priv.h1
-rw-r--r--drivers/scsi/scsi_scan.c79
-rw-r--r--drivers/scsi/scsi_sysfs.c89
-rw-r--r--drivers/scsi/scsi_transport_fc.c75
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c15
-rw-r--r--drivers/scsi/scsi_transport_sas.c60
-rw-r--r--drivers/scsi/scsicam.c16
-rw-r--r--drivers/scsi/sd.c134
-rw-r--r--drivers/scsi/sd.h2
-rw-r--r--drivers/scsi/sd_dif.c3
-rw-r--r--drivers/scsi/sd_zbc.c20
-rw-r--r--drivers/scsi/sg.c13
-rw-r--r--drivers/scsi/sim710.c2
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c71
-rw-r--r--drivers/scsi/snic/snic_debugfs.c10
-rw-r--r--drivers/scsi/snic/snic_trc.c5
-rw-r--r--drivers/scsi/sr.c16
-rw-r--r--drivers/scsi/st.c89
-rw-r--r--drivers/scsi/stex.c4
-rw-r--r--drivers/scsi/storvsc_drv.c112
-rw-r--r--drivers/scsi/virtio_scsi.c1
-rw-r--r--drivers/scsi/wd719x.c2
-rw-r--r--drivers/sh/clk/core.c10
-rw-r--r--drivers/sh/intc/core.c12
-rw-r--r--drivers/siox/siox-bus-gpio.c3
-rw-r--r--drivers/slimbus/Kconfig7
-rw-r--r--drivers/slimbus/Makefile3
-rw-r--r--drivers/slimbus/messaging.c4
-rw-r--r--drivers/slimbus/qcom-ctrl.c735
-rw-r--r--drivers/slimbus/qcom-ngd-ctrl.c3
-rw-r--r--drivers/soc/amlogic/meson-canvas.c12
-rw-r--r--drivers/soc/amlogic/meson-gx-socinfo.c6
-rw-r--r--drivers/soc/apple/Kconfig3
-rw-r--r--drivers/soc/apple/mailbox.c34
-rw-r--r--drivers/soc/apple/rtkit.c3
-rw-r--r--drivers/soc/apple/sart.c73
-rw-r--r--drivers/soc/aspeed/aspeed-lpc-ctrl.c14
-rw-r--r--drivers/soc/aspeed/aspeed-lpc-snoop.c228
-rw-r--r--drivers/soc/aspeed/aspeed-p2a-ctrl.c14
-rw-r--r--drivers/soc/aspeed/aspeed-socinfo.c4
-rw-r--r--drivers/soc/bcm/brcmstb/biuctrl.c12
-rw-r--r--drivers/soc/bcm/brcmstb/pm/pm.h2
-rw-r--r--drivers/soc/fsl/qbman/qman.c2
-rw-r--r--drivers/soc/fsl/qbman/qman_test_stash.c4
-rw-r--r--drivers/soc/fsl/qe/gpio.c149
-rw-r--r--drivers/soc/fsl/qe/qe_ic.c3
-rw-r--r--drivers/soc/fsl/qe/qmc.c44
-rw-r--r--drivers/soc/hisilicon/kunpeng_hccs.c6
-rw-r--r--drivers/soc/mediatek/mtk-mutex.c109
-rw-r--r--drivers/soc/mediatek/mtk-socinfo.c3
-rw-r--r--drivers/soc/mediatek/mtk-svs.c23
-rw-r--r--drivers/soc/microchip/Kconfig12
-rw-r--r--drivers/soc/microchip/Makefile1
-rw-r--r--drivers/soc/microchip/mpfs-control-scb.c38
-rw-r--r--drivers/soc/microchip/mpfs-mss-top-sysreg.c44
-rw-r--r--drivers/soc/qcom/Kconfig8
-rw-r--r--drivers/soc/qcom/Makefile1
-rw-r--r--drivers/soc/qcom/icc-bwmon.c3
-rw-r--r--drivers/soc/qcom/ice.c81
-rw-r--r--drivers/soc/qcom/llcc-qcom.c374
-rw-r--r--drivers/soc/qcom/mdt_loader.c123
-rw-r--r--drivers/soc/qcom/ocmem.c2
-rw-r--r--drivers/soc/qcom/pmic_glink.c18
-rw-r--r--drivers/soc/qcom/qcom-geni-se.c519
-rw-r--r--drivers/soc/qcom/qcom-pbs.c2
-rw-r--r--drivers/soc/qcom/qcom_gsbi.c8
-rw-r--r--drivers/soc/qcom/qcom_pd_mapper.c11
-rw-r--r--drivers/soc/qcom/qcom_stats.c133
-rw-r--r--drivers/soc/qcom/qmi_encdec.c52
-rw-r--r--drivers/soc/qcom/qmi_interface.c6
-rw-r--r--drivers/soc/qcom/ramp_controller.c1
-rw-r--r--drivers/soc/qcom/rpm_master_stats.c2
-rw-r--r--drivers/soc/qcom/rpmh-rsc.c9
-rw-r--r--drivers/soc/qcom/smem.c35
-rw-r--r--drivers/soc/qcom/socinfo.c113
-rw-r--r--drivers/soc/qcom/ubwc_config.c317
-rw-r--r--drivers/soc/renesas/Kconfig331
-rw-r--r--drivers/soc/renesas/pwc-rzv2m.c6
-rw-r--r--drivers/soc/renesas/r9a08g045-sysc.c70
-rw-r--r--drivers/soc/renesas/r9a09g047-sys.c80
-rw-r--r--drivers/soc/renesas/r9a09g056-sys.c69
-rw-r--r--drivers/soc/renesas/r9a09g057-sys.c102
-rw-r--r--drivers/soc/renesas/rcar-rst.c3
-rw-r--r--drivers/soc/renesas/renesas-soc.c16
-rw-r--r--drivers/soc/renesas/rz-sysc.c35
-rw-r--r--drivers/soc/renesas/rz-sysc.h6
-rw-r--r--drivers/soc/rockchip/grf.c50
-rw-r--r--drivers/soc/samsung/Makefile3
-rw-r--r--drivers/soc/samsung/exynos-chipid.c18
-rw-r--r--drivers/soc/samsung/exynos-pmu.c411
-rw-r--r--drivers/soc/samsung/exynos-pmu.h37
-rw-r--r--drivers/soc/samsung/gs101-pmu.c446
-rw-r--r--drivers/soc/sunxi/sunxi_sram.c14
-rw-r--r--drivers/soc/tegra/Kconfig18
-rw-r--r--drivers/soc/tegra/cbb/tegra194-cbb.c36
-rw-r--r--drivers/soc/tegra/cbb/tegra234-cbb.c758
-rw-r--r--drivers/soc/tegra/common.c12
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra.c2
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra30.c122
-rw-r--r--drivers/soc/tegra/fuse/speedo-tegra210.c63
-rw-r--r--drivers/soc/tegra/fuse/tegra-apbmisc.c1
-rw-r--r--drivers/soc/tegra/pmc.c239
-rw-r--r--drivers/soc/ti/k3-socinfo.c10
-rw-r--r--drivers/soc/ti/knav_dma.c14
-rw-r--r--drivers/soc/ti/pm33xx.c2
-rw-r--r--drivers/soc/ti/pruss.c2
-rw-r--r--drivers/soc/xilinx/xlnx_event_manager.c8
-rw-r--r--drivers/soc/xilinx/zynqmp_power.c10
-rw-r--r--drivers/soundwire/amd_manager.c18
-rw-r--r--drivers/soundwire/bus.c18
-rw-r--r--drivers/soundwire/bus_type.c5
-rw-r--r--drivers/soundwire/debugfs.c8
-rw-r--r--drivers/soundwire/intel_ace2x.c11
-rw-r--r--drivers/soundwire/intel_auxdevice.c1
-rw-r--r--drivers/soundwire/mipi_disco.c4
-rw-r--r--drivers/soundwire/qcom.c37
-rw-r--r--drivers/soundwire/slave.c6
-rw-r--r--drivers/soundwire/stream.c2
-rw-r--r--drivers/spi/Kconfig87
-rw-r--r--drivers/spi/Makefile8
-rw-r--r--drivers/spi/atmel-quadspi.c187
-rw-r--r--drivers/spi/spi-airoha-snfi.c538
-rw-r--r--drivers/spi/spi-altera-platform.c1
-rw-r--r--drivers/spi/spi-amd-pci.c5
-rw-r--r--drivers/spi/spi-amd.c2
-rw-r--r--drivers/spi/spi-amlogic-spifc-a1.c4
-rw-r--r--drivers/spi/spi-amlogic-spifc-a4.c1222
-rw-r--r--drivers/spi/spi-amlogic-spisg.c888
-rw-r--r--drivers/spi/spi-apple.c1
-rw-r--r--drivers/spi/spi-aspeed-smc.c747
-rw-r--r--drivers/spi/spi-atmel.c78
-rw-r--r--drivers/spi/spi-axi-spi-engine.c19
-rw-r--r--drivers/spi/spi-bcm2835.c2
-rw-r--r--drivers/spi/spi-bcm63xx.c18
-rw-r--r--drivers/spi/spi-cadence-quadspi.c128
-rw-r--r--drivers/spi/spi-cadence.c107
-rw-r--r--drivers/spi/spi-ch341.c2
-rw-r--r--drivers/spi/spi-cs42l43.c42
-rw-r--r--drivers/spi/spi-davinci.c64
-rw-r--r--drivers/spi/spi-dw-bt1.c4
-rw-r--r--drivers/spi/spi-dw-core.c188
-rw-r--r--drivers/spi/spi-dw-dma.c22
-rw-r--r--drivers/spi/spi-dw-mmio.c13
-rw-r--r--drivers/spi/spi-dw-pci.c8
-rw-r--r--drivers/spi/spi-dw.h12
-rw-r--r--drivers/spi/spi-falcon.c5
-rw-r--r--drivers/spi/spi-fsl-dspi.c599
-rw-r--r--drivers/spi/spi-fsl-espi.c2
-rw-r--r--drivers/spi/spi-fsl-lpspi.c71
-rw-r--r--drivers/spi/spi-fsl-qspi.c88
-rw-r--r--drivers/spi/spi-geni-qcom.c6
-rw-r--r--drivers/spi/spi-gpio.c16
-rw-r--r--drivers/spi/spi-imx.c76
-rw-r--r--drivers/spi/spi-intel-pci.c3
-rw-r--r--drivers/spi/spi-intel.c19
-rw-r--r--drivers/spi/spi-ljca.c2
-rw-r--r--drivers/spi/spi-loongson-core.c1
-rw-r--r--drivers/spi/spi-loopback-test.c12
-rw-r--r--drivers/spi/spi-mem.c36
-rw-r--r--drivers/spi/spi-microchip-core-qspi.c241
-rw-r--r--drivers/spi/spi-microchip-core-spi.c429
-rw-r--r--drivers/spi/spi-mpfs.c (renamed from drivers/spi/spi-microchip-core.c)204
-rw-r--r--drivers/spi/spi-mt65xx.c41
-rw-r--r--drivers/spi/spi-mtk-nor.c1
-rw-r--r--drivers/spi/spi-mtk-snfi.c1
-rw-r--r--drivers/spi/spi-mxs.c2
-rw-r--r--drivers/spi/spi-npcm-fiu.c6
-rw-r--r--drivers/spi/spi-nxp-fspi.c156
-rw-r--r--drivers/spi/spi-offload-trigger-adi-util-sigma-delta.c62
-rw-r--r--drivers/spi/spi-offload-trigger-pwm.c3
-rw-r--r--drivers/spi/spi-offload.c2
-rw-r--r--drivers/spi/spi-omap2-mcspi.c34
-rw-r--r--drivers/spi/spi-pci1xxxx.c289
-rw-r--r--drivers/spi/spi-pl022.c13
-rw-r--r--drivers/spi/spi-pxa2xx.c2
-rw-r--r--drivers/spi/spi-qpic-snand.c178
-rw-r--r--drivers/spi/spi-rb4xx.c36
-rw-r--r--drivers/spi/spi-rockchip-sfc.c15
-rw-r--r--drivers/spi/spi-rpc-if.c12
-rw-r--r--drivers/spi/spi-rspi.c9
-rw-r--r--drivers/spi/spi-rzv2h-rspi.c687
-rw-r--r--drivers/spi/spi-s3c64xx.c22
-rw-r--r--drivers/spi/spi-sg2044-nor.c33
-rw-r--r--drivers/spi/spi-sh-msiof.c11
-rw-r--r--drivers/spi/spi-sprd.c1
-rw-r--r--drivers/spi/spi-st-ssc4.c14
-rw-r--r--drivers/spi/spi-stm32-ospi.c55
-rw-r--r--drivers/spi/spi-stm32-qspi.c7
-rw-r--r--drivers/spi/spi-stm32.c318
-rw-r--r--drivers/spi/spi-sunplus-sp7021.c6
-rw-r--r--drivers/spi/spi-tegra210-quad.c188
-rw-r--r--drivers/spi/spi-ti-qspi.c2
-rw-r--r--drivers/spi/spi-tle62x0.c2
-rw-r--r--drivers/spi/spi-virtio.c431
-rw-r--r--drivers/spi/spi-xcomm.c2
-rw-r--r--drivers/spi/spi-xilinx.c7
-rw-r--r--drivers/spi/spi-zynqmp-gqspi.c1
-rw-r--r--drivers/spi/spi.c114
-rw-r--r--drivers/spi/spidev.c4
-rw-r--r--drivers/ssb/driver_gpio.c12
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/axis-fifo/axis-fifo.c536
-rw-r--r--drivers/staging/axis-fifo/axis-fifo.txt5
-rw-r--r--drivers/staging/fbtft/fbtft-core.c42
-rw-r--r--drivers/staging/gpib/TODO21
-rw-r--r--drivers/staging/gpib/uapi/gpib.h302
-rw-r--r--drivers/staging/gpib/uapi/gpib_ioctl.h163
-rw-r--r--drivers/staging/greybus/Documentation/firmware/firmware.c28
-rw-r--r--drivers/staging/greybus/audio_codec.c16
-rw-r--r--drivers/staging/greybus/audio_helper.c9
-rw-r--r--drivers/staging/greybus/audio_topology.c24
-rw-r--r--drivers/staging/greybus/camera.c2
-rw-r--r--drivers/staging/greybus/gbphy.c6
-rw-r--r--drivers/staging/greybus/gpio.c8
-rw-r--r--drivers/staging/greybus/power_supply.c14
-rw-r--r--drivers/staging/greybus/uart.c15
-rw-r--r--drivers/staging/iio/adc/ad7816.c2
-rw-r--r--drivers/staging/iio/addac/adt7316.c102
-rw-r--r--drivers/staging/iio/frequency/ad9834.c3
-rw-r--r--drivers/staging/iio/frequency/ad9834.h10
-rw-r--r--drivers/staging/media/Kconfig4
-rw-r--r--drivers/staging/media/Makefile2
-rw-r--r--drivers/staging/media/atomisp/Kconfig1
-rw-r--r--drivers/staging/media/atomisp/Makefile1
-rw-r--r--drivers/staging/media/atomisp/TODO2
-rw-r--r--drivers/staging/media/atomisp/i2c/Kconfig8
-rw-r--r--drivers/staging/media/atomisp/i2c/Makefile1
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-gc0310.c712
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-gc2235.c6
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-ov2722.c6
-rw-r--r--drivers/staging/media/atomisp/i2c/gc2235.h16
-rw-r--r--drivers/staging/media/atomisp/i2c/ov2722.h16
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_compat_css20.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_csi2.h17
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_csi2_bridge.c233
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_drvfs.c155
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_drvfs.h15
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_fops.c5
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c9
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_ioctl.c129
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_subdev.c9
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_subdev.h3
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_v4l2.c5
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/math_support.h5
-rw-r--r--drivers/staging/media/atomisp/pci/hmm/hmm.c91
-rw-r--r--drivers/staging/media/atomisp/pci/hmm/hmm_bo.c5
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_pipe.h2
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_1.0/ia_css_anr_types.h4
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dpc2/ia_css_dpc2_param.h6
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dvs/dvs_1.0/ia_css_dvs.host.c4
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8_param.h22
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.c6
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sc/sc_1.0/ia_css_sc_param.h2
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf.host.c3
-rw-r--r--drivers/staging/media/atomisp/pci/isp/modes/interface/input_buf.isp.h6
-rw-r--r--drivers/staging/media/atomisp/pci/isp/modes/interface/isp_const.h157
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c4
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/debug/src/ia_css_debug.c1
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/frame/src/frame.c29
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/ifmtr/src/ifmtr.c11
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/virtual_isys.c2
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/pipeline/interface/ia_css_pipeline.h1
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/pipeline/src/pipeline.c2
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css.c27
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_defs.h12
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_internal.h8
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_mipi.c11
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_mipi.h2
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_param_dvs.h22
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_params.c12
-rw-r--r--drivers/staging/media/av7110/av7110.c2
-rw-r--r--drivers/staging/media/av7110/av7110_ca.c2
-rw-r--r--drivers/staging/media/av7110/av7110_v4l.c4
-rw-r--r--drivers/staging/media/imx/imx-media-csc-scaler.c30
-rw-r--r--drivers/staging/media/imx/imx-media-csi.c8
-rw-r--r--drivers/staging/media/ipu3/ipu3-css.c3
-rw-r--r--drivers/staging/media/ipu3/ipu3-v4l2.c5
-rw-r--r--drivers/staging/media/ipu3/ipu3.c3
-rw-r--r--drivers/staging/media/ipu3/ipu3.h1
-rw-r--r--drivers/staging/media/ipu7/Kconfig19
-rw-r--r--drivers/staging/media/ipu7/Makefile23
-rw-r--r--drivers/staging/media/ipu7/TODO28
-rw-r--r--drivers/staging/media/ipu7/abi/ipu7_fw_boot_abi.h163
-rw-r--r--drivers/staging/media/ipu7/abi/ipu7_fw_common_abi.h175
-rw-r--r--drivers/staging/media/ipu7/abi/ipu7_fw_config_abi.h19
-rw-r--r--drivers/staging/media/ipu7/abi/ipu7_fw_insys_config_abi.h19
-rw-r--r--drivers/staging/media/ipu7/abi/ipu7_fw_isys_abi.h412
-rw-r--r--drivers/staging/media/ipu7/abi/ipu7_fw_msg_abi.h465
-rw-r--r--drivers/staging/media/ipu7/abi/ipu7_fw_psys_config_abi.h24
-rw-r--r--drivers/staging/media/ipu7/abi/ipu7_fw_syscom_abi.h49
-rw-r--r--drivers/staging/media/ipu7/ipu7-boot.c430
-rw-r--r--drivers/staging/media/ipu7/ipu7-boot.h25
-rw-r--r--drivers/staging/media/ipu7/ipu7-bus.c158
-rw-r--r--drivers/staging/media/ipu7/ipu7-bus.h69
-rw-r--r--drivers/staging/media/ipu7/ipu7-buttress-regs.h461
-rw-r--r--drivers/staging/media/ipu7/ipu7-buttress.c1192
-rw-r--r--drivers/staging/media/ipu7/ipu7-buttress.h77
-rw-r--r--drivers/staging/media/ipu7/ipu7-cpd.c276
-rw-r--r--drivers/staging/media/ipu7/ipu7-cpd.h16
-rw-r--r--drivers/staging/media/ipu7/ipu7-dma.c477
-rw-r--r--drivers/staging/media/ipu7/ipu7-dma.h46
-rw-r--r--drivers/staging/media/ipu7/ipu7-fw-isys.c301
-rw-r--r--drivers/staging/media/ipu7/ipu7-fw-isys.h39
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys-csi-phy.c1034
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys-csi-phy.h16
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys-csi2-regs.h1197
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys-csi2.c543
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys-csi2.h64
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys-queue.c828
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys-queue.h72
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys-subdev.c333
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys-subdev.h52
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys-video.c1078
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys-video.h117
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys.c1166
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys.h140
-rw-r--r--drivers/staging/media/ipu7/ipu7-mmu.c853
-rw-r--r--drivers/staging/media/ipu7/ipu7-mmu.h414
-rw-r--r--drivers/staging/media/ipu7/ipu7-platform-regs.h82
-rw-r--r--drivers/staging/media/ipu7/ipu7-syscom.c78
-rw-r--r--drivers/staging/media/ipu7/ipu7-syscom.h35
-rw-r--r--drivers/staging/media/ipu7/ipu7.c2778
-rw-r--r--drivers/staging/media/ipu7/ipu7.h242
-rw-r--r--drivers/staging/media/meson/vdec/vdec.c29
-rw-r--r--drivers/staging/media/meson/vdec/vdec.h5
-rw-r--r--drivers/staging/media/rkvdec/Makefile3
-rw-r--r--drivers/staging/media/rkvdec/TODO11
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.c8
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.h5
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_dec.c2
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_hw.c19
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_video.c23
-rw-r--r--drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_capture.c16
-rw-r--r--drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_params.c6
-rw-r--r--drivers/staging/media/tegra-video/tegra20.c6
-rw-r--r--drivers/staging/most/Kconfig2
-rw-r--r--drivers/staging/most/Makefile1
-rw-r--r--drivers/staging/most/i2c/Kconfig13
-rw-r--r--drivers/staging/most/i2c/Makefile4
-rw-r--r--drivers/staging/most/i2c/i2c.c374
-rw-r--r--drivers/staging/most/video/video.c19
-rw-r--r--drivers/staging/nvec/nvec_power.c2
-rw-r--r--drivers/staging/nvec/nvec_ps2.c12
-rw-r--r--drivers/staging/octeon/ethernet-tx.c43
-rw-r--r--drivers/staging/octeon/octeon-stubs.h134
-rw-r--r--drivers/staging/rtl8723bs/Makefile2
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ap.c329
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_cmd.c3
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_efuse.c195
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ieee80211.c38
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_io.c48
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme.c457
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme_ext.c210
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_pwrctrl.c20
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_recv.c194
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_security.c357
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_sta_mgt.c12
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_wlan_util.c88
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_xmit.c4
-rw-r--r--drivers/staging/rtl8723bs/hal/HalPhyRf.h2
-rw-r--r--drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c10
-rw-r--r--drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.h7
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_com.c72
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_com_phycfg.c5
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_intf.c42
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_pwr_seq.c2
-rw-r--r--drivers/staging/rtl8723bs/hal/odm.c175
-rw-r--r--drivers/staging/rtl8723bs/hal/odm.h6
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_DIG.c1
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_cmd.c60
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_dm.c7
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c482
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c6
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c2
-rw-r--r--drivers/staging/rtl8723bs/hal/sdio_halinit.c29
-rw-r--r--drivers/staging/rtl8723bs/hal/sdio_ops.c5
-rw-r--r--drivers/staging/rtl8723bs/include/basic_types.h46
-rw-r--r--drivers/staging/rtl8723bs/include/drv_types.h10
-rw-r--r--drivers/staging/rtl8723bs/include/hal_com.h4
-rw-r--r--drivers/staging/rtl8723bs/include/hal_com_reg.h4
-rw-r--r--drivers/staging/rtl8723bs/include/hal_intf.h26
-rw-r--r--drivers/staging/rtl8723bs/include/ioctl_cfg80211.h1
-rw-r--r--drivers/staging/rtl8723bs/include/mlme_osdep.h19
-rw-r--r--drivers/staging/rtl8723bs/include/recv_osdep.h40
-rw-r--r--drivers/staging/rtl8723bs/include/rtl8723b_cmd.h3
-rw-r--r--drivers/staging/rtl8723bs/include/rtl8723b_hal.h5
-rw-r--r--drivers/staging/rtl8723bs/include/rtl8723b_xmit.h1
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_efuse.h16
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_mlme.h7
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_mlme_ext.h6
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_recv.h4
-rw-r--r--drivers/staging/rtl8723bs/include/sdio_hal.h2
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c41
-rw-r--r--drivers/staging/rtl8723bs/os_dep/mlme_linux.c179
-rw-r--r--drivers/staging/rtl8723bs/os_dep/os_intfs.c19
-rw-r--r--drivers/staging/rtl8723bs/os_dep/recv_linux.c225
-rw-r--r--drivers/staging/rtl8723bs/os_dep/sdio_intf.c6
-rw-r--r--drivers/staging/rtl8723bs/os_dep/sdio_ops_linux.c1
-rw-r--r--drivers/staging/rtl8723bs/os_dep/wifi_regd.c16
-rw-r--r--drivers/staging/sm750fb/sm750.c49
-rw-r--r--drivers/staging/sm750fb/sm750.h10
-rw-r--r--drivers/staging/sm750fb/sm750_accel.c18
-rw-r--r--drivers/staging/sm750fb/sm750_hw.c4
-rw-r--r--drivers/staging/vc04_services/Kconfig49
-rw-r--r--drivers/staging/vc04_services/Makefile14
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c5
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835.c3
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835.h3
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/Kconfig13
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/Makefile6
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/TODO17
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c2011
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.h142
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/controls.c1399
-rw-r--r--drivers/staging/vc04_services/include/linux/raspberrypi/vchiq.h112
-rw-r--r--drivers/staging/vc04_services/interface/TODO28
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h164
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_bus.h60
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_cfg.h41
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h598
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.h22
-rw-r--r--drivers/staging/vme_user/vme.c6
-rw-r--r--drivers/staging/vme_user/vme_fake.c2
-rw-r--r--drivers/staging/vme_user/vme_tsi148.h2
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c6
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_tmr.c3
-rw-r--r--drivers/target/loopback/tcm_loop.c3
-rw-r--r--drivers/target/sbp/sbp_target.c8
-rw-r--r--drivers/target/target_core_configfs.c52
-rw-r--r--drivers/target/target_core_device.c24
-rw-r--r--drivers/target/target_core_fabric_configfs.c2
-rw-r--r--drivers/target/target_core_fabric_lib.c63
-rw-r--r--drivers/target/target_core_file.c4
-rw-r--r--drivers/target/target_core_iblock.c42
-rw-r--r--drivers/target/target_core_iblock.h1
-rw-r--r--drivers/target/target_core_internal.h5
-rw-r--r--drivers/target/target_core_pr.c22
-rw-r--r--drivers/target/target_core_pscsi.c2
-rw-r--r--drivers/target/target_core_sbc.c51
-rw-r--r--drivers/target/target_core_spc.c49
-rw-r--r--drivers/target/target_core_stat.c268
-rw-r--r--drivers/target/target_core_tpg.c23
-rw-r--r--drivers/target/target_core_transport.c26
-rw-r--r--drivers/target/target_core_xcopy.c2
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c2
-rw-r--r--drivers/tee/Kconfig9
-rw-r--r--drivers/tee/Makefile2
-rw-r--r--drivers/tee/optee/Kconfig5
-rw-r--r--drivers/tee/optee/Makefile1
-rw-r--r--drivers/tee/optee/core.c9
-rw-r--r--drivers/tee/optee/ffa_abi.c189
-rw-r--r--drivers/tee/optee/optee_ffa.h27
-rw-r--r--drivers/tee/optee/optee_msg.h84
-rw-r--r--drivers/tee/optee/optee_private.h17
-rw-r--r--drivers/tee/optee/optee_smc.h37
-rw-r--r--drivers/tee/optee/protmem.c335
-rw-r--r--drivers/tee/optee/smc_abi.c141
-rw-r--r--drivers/tee/qcomtee/Kconfig13
-rw-r--r--drivers/tee/qcomtee/Makefile9
-rw-r--r--drivers/tee/qcomtee/async.c182
-rw-r--r--drivers/tee/qcomtee/call.c820
-rw-r--r--drivers/tee/qcomtee/core.c915
-rw-r--r--drivers/tee/qcomtee/mem_obj.c169
-rw-r--r--drivers/tee/qcomtee/primordial_obj.c113
-rw-r--r--drivers/tee/qcomtee/qcomtee.h185
-rw-r--r--drivers/tee/qcomtee/qcomtee_msg.h304
-rw-r--r--drivers/tee/qcomtee/qcomtee_object.h316
-rw-r--r--drivers/tee/qcomtee/shm.c150
-rw-r--r--drivers/tee/qcomtee/user_obj.c692
-rw-r--r--drivers/tee/tee_core.c342
-rw-r--r--drivers/tee/tee_heap.c500
-rw-r--r--drivers/tee/tee_private.h20
-rw-r--r--drivers/tee/tee_shm.c179
-rw-r--r--drivers/thermal/Kconfig10
-rw-r--r--drivers/thermal/Makefile1
-rw-r--r--drivers/thermal/armada_thermal.c2
-rw-r--r--drivers/thermal/da9062-thermal.c2
-rw-r--r--drivers/thermal/dove_thermal.c2
-rw-r--r--drivers/thermal/gov_step_wise.c25
-rw-r--r--drivers/thermal/imx91_thermal.c384
-rw-r--r--drivers/thermal/imx_thermal.c2
-rw-r--r--drivers/thermal/intel/Kconfig3
-rw-r--r--drivers/thermal/intel/int340x_thermal/Kconfig1
-rw-r--r--drivers/thermal/intel/int340x_thermal/Makefile1
-rw-r--r--drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c3
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3400_thermal.c16
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3403_thermal.c2
-rw-r--r--drivers/thermal/intel/int340x_thermal/platform_temperature_control.c72
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device.c20
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device.h9
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c15
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_rapl.c2
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c16
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_soc_slider.c284
-rw-r--r--drivers/thermal/intel/intel_hfi.c12
-rw-r--r--drivers/thermal/k3_j72xx_bandgap.c4
-rw-r--r--drivers/thermal/kirkwood_thermal.c2
-rw-r--r--drivers/thermal/loongson2_thermal.c15
-rw-r--r--drivers/thermal/mediatek/lvts_thermal.c78
-rw-r--r--drivers/thermal/qcom/Kconfig3
-rw-r--r--drivers/thermal/qcom/lmh.c7
-rw-r--r--drivers/thermal/qcom/qcom-spmi-temp-alarm.c596
-rw-r--r--drivers/thermal/renesas/Kconfig21
-rw-r--r--drivers/thermal/renesas/Makefile2
-rw-r--r--drivers/thermal/renesas/rcar_gen3_thermal.c73
-rw-r--r--drivers/thermal/renesas/rcar_thermal.c10
-rw-r--r--drivers/thermal/renesas/rzg3e_thermal.c547
-rw-r--r--drivers/thermal/renesas/rzg3s_thermal.c272
-rw-r--r--drivers/thermal/rockchip_thermal.c301
-rw-r--r--drivers/thermal/spear_thermal.c2
-rw-r--r--drivers/thermal/st/st_thermal.c2
-rw-r--r--drivers/thermal/tegra/Makefile1
-rw-r--r--drivers/thermal/tegra/soctherm-fuse.c18
-rw-r--r--drivers/thermal/tegra/soctherm.c26
-rw-r--r--drivers/thermal/tegra/soctherm.h11
-rw-r--r--drivers/thermal/tegra/tegra114-soctherm.c209
-rw-r--r--drivers/thermal/tegra/tegra124-soctherm.c4
-rw-r--r--drivers/thermal/tegra/tegra132-soctherm.c4
-rw-r--r--drivers/thermal/tegra/tegra210-soctherm.c4
-rw-r--r--drivers/thermal/testing/command.c30
-rw-r--r--drivers/thermal/testing/zone.c33
-rw-r--r--drivers/thermal/thermal-generic-adc.c55
-rw-r--r--drivers/thermal/thermal_hwmon.c2
-rw-r--r--drivers/thermal/thermal_sysfs.c9
-rw-r--r--drivers/thunderbolt/Kconfig4
-rw-r--r--drivers/thunderbolt/acpi.c28
-rw-r--r--drivers/thunderbolt/cap.c49
-rw-r--r--drivers/thunderbolt/clx.c12
-rw-r--r--drivers/thunderbolt/ctl.c35
-rw-r--r--drivers/thunderbolt/ctl.h1
-rw-r--r--drivers/thunderbolt/debugfs.c7
-rw-r--r--drivers/thunderbolt/dma_port.c21
-rw-r--r--drivers/thunderbolt/domain.c77
-rw-r--r--drivers/thunderbolt/eeprom.c6
-rw-r--r--drivers/thunderbolt/icm.c8
-rw-r--r--drivers/thunderbolt/lc.c60
-rw-r--r--drivers/thunderbolt/nhi.c24
-rw-r--r--drivers/thunderbolt/nhi.h1
-rw-r--r--drivers/thunderbolt/nhi_regs.h6
-rw-r--r--drivers/thunderbolt/nvm.c44
-rw-r--r--drivers/thunderbolt/path.c14
-rw-r--r--drivers/thunderbolt/property.c38
-rw-r--r--drivers/thunderbolt/retimer.c9
-rw-r--r--drivers/thunderbolt/switch.c158
-rw-r--r--drivers/thunderbolt/tb.c48
-rw-r--r--drivers/thunderbolt/tb.h61
-rw-r--r--drivers/thunderbolt/tb_regs.h6
-rw-r--r--drivers/thunderbolt/tmu.c20
-rw-r--r--drivers/thunderbolt/tunnel.c104
-rw-r--r--drivers/thunderbolt/tunnel.h9
-rw-r--r--drivers/thunderbolt/usb4.c384
-rw-r--r--drivers/thunderbolt/usb4_port.c7
-rw-r--r--drivers/thunderbolt/xdomain.c57
-rw-r--r--drivers/tty/amiserial.c14
-rw-r--r--drivers/tty/hvc/hvc_console.c8
-rw-r--r--drivers/tty/moxa.c169
-rw-r--r--drivers/tty/mxser.c259
-rw-r--r--drivers/tty/n_gsm.c25
-rw-r--r--drivers/tty/n_hdlc.c79
-rw-r--r--drivers/tty/n_tty.c109
-rw-r--r--drivers/tty/pty.c154
-rw-r--r--drivers/tty/serdev/core.c11
-rw-r--r--drivers/tty/serial/8250/8250.h26
-rw-r--r--drivers/tty/serial/8250/8250_ce4100.c93
-rw-r--r--drivers/tty/serial/8250/8250_core.c395
-rw-r--r--drivers/tty/serial/8250/8250_dw.c44
-rw-r--r--drivers/tty/serial/8250/8250_em.c4
-rw-r--r--drivers/tty/serial/8250/8250_exar.c15
-rw-r--r--drivers/tty/serial/8250/8250_ingenic.c8
-rw-r--r--drivers/tty/serial/8250/8250_ioc3.c6
-rw-r--r--drivers/tty/serial/8250/8250_keba.c280
-rw-r--r--drivers/tty/serial/8250/8250_loongson.c238
-rw-r--r--drivers/tty/serial/8250/8250_lpc18xx.c2
-rw-r--r--drivers/tty/serial/8250/8250_mtk.c6
-rw-r--r--drivers/tty/serial/8250/8250_ni.c56
-rw-r--r--drivers/tty/serial/8250/8250_of.c2
-rw-r--r--drivers/tty/serial/8250/8250_omap.c234
-rw-r--r--drivers/tty/serial/8250/8250_pci.c54
-rw-r--r--drivers/tty/serial/8250/8250_pci1xxxx.c10
-rw-r--r--drivers/tty/serial/8250/8250_pcilib.c7
-rw-r--r--drivers/tty/serial/8250/8250_pcilib.h2
-rw-r--r--drivers/tty/serial/8250/8250_platform.c142
-rw-r--r--drivers/tty/serial/8250/8250_port.c978
-rw-r--r--drivers/tty/serial/8250/8250_rsa.c125
-rw-r--r--drivers/tty/serial/8250/8250_rt288x.c4
-rw-r--r--drivers/tty/serial/8250/8250_uniphier.c4
-rw-r--r--drivers/tty/serial/8250/Kconfig40
-rw-r--r--drivers/tty/serial/8250/Makefile7
-rw-r--r--drivers/tty/serial/Kconfig23
-rw-r--r--drivers/tty/serial/Makefile1
-rw-r--r--drivers/tty/serial/amba-pl011.c2
-rw-r--r--drivers/tty/serial/ar933x_uart.c62
-rw-r--r--drivers/tty/serial/fsl_lpuart.c16
-rw-r--r--drivers/tty/serial/icom.c9
-rw-r--r--drivers/tty/serial/imx.c41
-rw-r--r--drivers/tty/serial/ip22zilog.c352
-rw-r--r--drivers/tty/serial/jsm/jsm_driver.c1
-rw-r--r--drivers/tty/serial/kgdboc.c1
-rw-r--r--drivers/tty/serial/max3100.c2
-rw-r--r--drivers/tty/serial/max310x.c30
-rw-r--r--drivers/tty/serial/msm_serial.c2
-rw-r--r--drivers/tty/serial/mux.c2
-rw-r--r--drivers/tty/serial/mvebu-uart.c10
-rw-r--r--drivers/tty/serial/pch_uart.c2
-rw-r--r--drivers/tty/serial/qcom_geni_serial.c542
-rw-r--r--drivers/tty/serial/rsci.c480
-rw-r--r--drivers/tty/serial/rsci.h10
-rw-r--r--drivers/tty/serial/samsung_tty.c2
-rw-r--r--drivers/tty/serial/sc16is7xx.c438
-rw-r--r--drivers/tty/serial/sc16is7xx.h1
-rw-r--r--drivers/tty/serial/sc16is7xx_i2c.c4
-rw-r--r--drivers/tty/serial/sc16is7xx_spi.c4
-rw-r--r--drivers/tty/serial/serial_base_bus.c4
-rw-r--r--drivers/tty/serial/serial_core.c389
-rw-r--r--drivers/tty/serial/sh-sci-common.h8
-rw-r--r--drivers/tty/serial/sh-sci.c431
-rw-r--r--drivers/tty/serial/sh-sci.h178
-rw-r--r--drivers/tty/serial/sprd_serial.c6
-rw-r--r--drivers/tty/serial/xilinx_uartps.c25
-rw-r--r--drivers/tty/synclink_gt.c20
-rw-r--r--drivers/tty/sysrq.c40
-rw-r--r--drivers/tty/tty_buffer.c11
-rw-r--r--drivers/tty/tty_port.c175
-rw-r--r--drivers/tty/vt/consolemap.c116
-rw-r--r--drivers/tty/vt/defkeymap.c_shipped112
-rw-r--r--drivers/tty/vt/keyboard.c320
-rw-r--r--drivers/tty/vt/selection.c29
-rw-r--r--drivers/tty/vt/ucs.c2
-rw-r--r--drivers/tty/vt/vc_screen.c74
-rw-r--r--drivers/tty/vt/vt.c252
-rw-r--r--drivers/tty/vt/vt_ioctl.c194
-rw-r--r--drivers/ufs/core/Makefile1
-rw-r--r--drivers/ufs/core/ufs-mcq.c77
-rw-r--r--drivers/ufs/core/ufs-rpmb.c254
-rw-r--r--drivers/ufs/core/ufs-sysfs.c202
-rw-r--r--drivers/ufs/core/ufs_bsg.c2
-rw-r--r--drivers/ufs/core/ufs_trace.h2
-rw-r--r--drivers/ufs/core/ufs_trace_types.h23
-rw-r--r--drivers/ufs/core/ufshcd-crypto.h18
-rw-r--r--drivers/ufs/core/ufshcd-priv.h54
-rw-r--r--drivers/ufs/core/ufshcd.c1257
-rw-r--r--drivers/ufs/host/Kconfig13
-rw-r--r--drivers/ufs/host/Makefile1
-rw-r--r--drivers/ufs/host/ti-j721e-ufs.c37
-rw-r--r--drivers/ufs/host/ufs-amd-versal2.c564
-rw-r--r--drivers/ufs/host/ufs-exynos.c14
-rw-r--r--drivers/ufs/host/ufs-mediatek.c786
-rw-r--r--drivers/ufs/host/ufs-mediatek.h37
-rw-r--r--drivers/ufs/host/ufs-qcom.c373
-rw-r--r--drivers/ufs/host/ufs-qcom.h37
-rw-r--r--drivers/ufs/host/ufs-rockchip.c20
-rw-r--r--drivers/ufs/host/ufshcd-dwc.h46
-rw-r--r--drivers/ufs/host/ufshcd-pci.c104
-rw-r--r--drivers/ufs/host/ufshcd-pltfrm.c33
-rw-r--r--drivers/ufs/host/ufshcd-pltfrm.h1
-rw-r--r--drivers/uio/Kconfig14
-rw-r--r--drivers/uio/Makefile1
-rw-r--r--drivers/uio/uio_aec.c2
-rw-r--r--drivers/uio/uio_cif.c2
-rw-r--r--drivers/uio/uio_dmem_genirq.c23
-rw-r--r--drivers/uio/uio_fsl_elbc_gpcm.c7
-rw-r--r--drivers/uio/uio_hv_generic.c7
-rw-r--r--drivers/uio/uio_netx.c2
-rw-r--r--drivers/uio/uio_pci_generic_sva.c192
-rw-r--r--drivers/uio/uio_pdrv_genirq.c24
-rw-r--r--drivers/uio/uio_sercos3.c2
-rw-r--r--drivers/usb/atm/cxacru.c106
-rw-r--r--drivers/usb/cdns3/cdns3-gadget.c1
-rw-r--r--drivers/usb/cdns3/cdns3-pci-wrap.c5
-rw-r--r--drivers/usb/cdns3/cdns3-trace.h61
-rw-r--r--drivers/usb/cdns3/cdnsp-debug.h5
-rw-r--r--drivers/usb/cdns3/cdnsp-ep0.c18
-rw-r--r--drivers/usb/cdns3/cdnsp-gadget.c9
-rw-r--r--drivers/usb/cdns3/cdnsp-gadget.h6
-rw-r--r--drivers/usb/cdns3/cdnsp-pci.c5
-rw-r--r--drivers/usb/cdns3/cdnsp-ring.c7
-rw-r--r--drivers/usb/cdns3/cdnsp-trace.h25
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c23
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.h1
-rw-r--r--drivers/usb/chipidea/core.c4
-rw-r--r--drivers/usb/chipidea/otg_fsm.c1
-rw-r--r--drivers/usb/chipidea/udc.c12
-rw-r--r--drivers/usb/chipidea/usbmisc_imx.c149
-rw-r--r--drivers/usb/class/cdc-acm.c20
-rw-r--r--drivers/usb/class/usblp.c36
-rw-r--r--drivers/usb/class/usbtmc.c12
-rw-r--r--drivers/usb/core/Makefile6
-rw-r--r--drivers/usb/core/config.c14
-rw-r--r--drivers/usb/core/driver.c62
-rw-r--r--drivers/usb/core/generic.c2
-rw-r--r--drivers/usb/core/hcd-pci.c2
-rw-r--r--drivers/usb/core/hcd.c65
-rw-r--r--drivers/usb/core/hub.c82
-rw-r--r--drivers/usb/core/hub.h1
-rw-r--r--drivers/usb/core/message.c2
-rw-r--r--drivers/usb/core/offload.c136
-rw-r--r--drivers/usb/core/quirks.c8
-rw-r--r--drivers/usb/core/sysfs.c2
-rw-r--r--drivers/usb/core/trace.c6
-rw-r--r--drivers/usb/core/trace.h61
-rw-r--r--drivers/usb/core/urb.c45
-rw-r--r--drivers/usb/core/usb-acpi.c4
-rw-r--r--drivers/usb/core/usb.c133
-rw-r--r--drivers/usb/dwc2/gadget.c38
-rw-r--r--drivers/usb/dwc2/params.c28
-rw-r--r--drivers/usb/dwc2/platform.c16
-rw-r--r--drivers/usb/dwc3/Kconfig22
-rw-r--r--drivers/usb/dwc3/Makefile2
-rw-r--r--drivers/usb/dwc3/core.c48
-rw-r--r--drivers/usb/dwc3/core.h26
-rw-r--r--drivers/usb/dwc3/debug.h18
-rw-r--r--drivers/usb/dwc3/debugfs.c12
-rw-r--r--drivers/usb/dwc3/drd.c2
-rw-r--r--drivers/usb/dwc3/dwc3-am62.c1
-rw-r--r--drivers/usb/dwc3/dwc3-apple.c489
-rw-r--r--drivers/usb/dwc3/dwc3-generic-plat.c233
-rw-r--r--drivers/usb/dwc3/dwc3-imx8mp.c17
-rw-r--r--drivers/usb/dwc3/dwc3-meson-g12a.c3
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c79
-rw-r--r--drivers/usb/dwc3/dwc3-qcom.c182
-rw-r--r--drivers/usb/dwc3/dwc3-xilinx.c36
-rw-r--r--drivers/usb/dwc3/ep0.c21
-rw-r--r--drivers/usb/dwc3/gadget.c71
-rw-r--r--drivers/usb/dwc3/glue.h157
-rw-r--r--drivers/usb/dwc3/host.c7
-rw-r--r--drivers/usb/dwc3/trace.h17
-rw-r--r--drivers/usb/early/xhci-dbc.c4
-rw-r--r--drivers/usb/gadget/composite.c31
-rw-r--r--drivers/usb/gadget/config.c53
-rw-r--r--drivers/usb/gadget/configfs.c6
-rw-r--r--drivers/usb/gadget/function/f_acm.c42
-rw-r--r--drivers/usb/gadget/function/f_ecm.c48
-rw-r--r--drivers/usb/gadget/function/f_eem.c7
-rw-r--r--drivers/usb/gadget/function/f_fs.c171
-rw-r--r--drivers/usb/gadget/function/f_hid.c14
-rw-r--r--drivers/usb/gadget/function/f_midi2.c11
-rw-r--r--drivers/usb/gadget/function/f_ncm.c81
-rw-r--r--drivers/usb/gadget/function/f_rndis.c85
-rw-r--r--drivers/usb/gadget/function/f_uac1.c2
-rw-r--r--drivers/usb/gadget/function/f_uac2.c2
-rw-r--r--drivers/usb/gadget/function/u_serial.c18
-rw-r--r--drivers/usb/gadget/function/uvc.h5
-rw-r--r--drivers/usb/gadget/function/uvc_configfs.c10
-rw-r--r--drivers/usb/gadget/function/uvc_v4l2.c8
-rw-r--r--drivers/usb/gadget/legacy/inode.c56
-rw-r--r--drivers/usb/gadget/legacy/raw_gadget.c3
-rw-r--r--drivers/usb/gadget/legacy/zero.c27
-rw-r--r--drivers/usb/gadget/udc/cdns2/cdns2-gadget.c1
-rw-r--r--drivers/usb/gadget/udc/cdns2/cdns2-trace.h69
-rw-r--r--drivers/usb/gadget/udc/core.c21
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c10
-rw-r--r--drivers/usb/gadget/udc/m66592-udc.c6
-rw-r--r--drivers/usb/gadget/udc/net2280.c8
-rw-r--r--drivers/usb/gadget/udc/pch_udc.c2
-rw-r--r--drivers/usb/gadget/udc/pxa25x_udc.c5
-rw-r--r--drivers/usb/gadget/udc/renesas_usb3.c10
-rw-r--r--drivers/usb/gadget/udc/renesas_usbf.c4
-rw-r--r--drivers/usb/gadget/udc/tegra-xudc.c18
-rw-r--r--drivers/usb/gadget/udc/trace.h5
-rw-r--r--drivers/usb/gadget/udc/udc-xilinx.c4
-rw-r--r--drivers/usb/host/Kconfig2
-rw-r--r--drivers/usb/host/ehci-platform.c40
-rw-r--r--drivers/usb/host/ehci-sysfs.c18
-rw-r--r--drivers/usb/host/fsl-mph-dr-of.c3
-rw-r--r--drivers/usb/host/max3421-hcd.c2
-rw-r--r--drivers/usb/host/ohci-at91.c2
-rw-r--r--drivers/usb/host/ohci-da8xx.c17
-rw-r--r--drivers/usb/host/ohci-platform.c24
-rw-r--r--drivers/usb/host/ohci-s3c2410.c8
-rw-r--r--drivers/usb/host/ohci-spear.c3
-rw-r--r--drivers/usb/host/sl811-hcd.c1
-rw-r--r--drivers/usb/host/uhci-hcd.h1
-rw-r--r--drivers/usb/host/uhci-platform.c28
-rw-r--r--drivers/usb/host/xen-hcd.c4
-rw-r--r--drivers/usb/host/xhci-caps.h165
-rw-r--r--drivers/usb/host/xhci-dbgcap.c121
-rw-r--r--drivers/usb/host/xhci-dbgcap.h1
-rw-r--r--drivers/usb/host/xhci-dbgtty.c24
-rw-r--r--drivers/usb/host/xhci-debugfs.c57
-rw-r--r--drivers/usb/host/xhci-hub.c128
-rw-r--r--drivers/usb/host/xhci-mem.c148
-rw-r--r--drivers/usb/host/xhci-mtk.c1
-rw-r--r--drivers/usb/host/xhci-mtk.h10
-rw-r--r--drivers/usb/host/xhci-pci-renesas.c7
-rw-r--r--drivers/usb/host/xhci-pci.c74
-rw-r--r--drivers/usb/host/xhci-plat.c62
-rw-r--r--drivers/usb/host/xhci-plat.h2
-rw-r--r--drivers/usb/host/xhci-port.h5
-rw-r--r--drivers/usb/host/xhci-rcar-regs.h49
-rw-r--r--drivers/usb/host/xhci-rcar.c100
-rw-r--r--drivers/usb/host/xhci-ring.c311
-rw-r--r--drivers/usb/host/xhci-rzg3e-regs.h12
-rw-r--r--drivers/usb/host/xhci-sideband.c124
-rw-r--r--drivers/usb/host/xhci-tegra.c97
-rw-r--r--drivers/usb/host/xhci-trace.h59
-rw-r--r--drivers/usb/host/xhci.c169
-rw-r--r--drivers/usb/host/xhci.h127
-rw-r--r--drivers/usb/misc/Kconfig20
-rw-r--r--drivers/usb/misc/Makefile1
-rw-r--r--drivers/usb/misc/apple-mfi-fastcharge.c25
-rw-r--r--drivers/usb/misc/chaoskey.c16
-rw-r--r--drivers/usb/misc/onboard_usb_dev.c2
-rw-r--r--drivers/usb/misc/onboard_usb_dev.h8
-rw-r--r--drivers/usb/misc/qcom_eud.c36
-rw-r--r--drivers/usb/misc/usb-ljca.c39
-rw-r--r--drivers/usb/misc/usb251xb.c108
-rw-r--r--drivers/usb/misc/usbio.c749
-rw-r--r--drivers/usb/mon/mon_bin.c14
-rw-r--r--drivers/usb/mtu3/mtu3.h34
-rw-r--r--drivers/usb/mtu3/mtu3_core.c2
-rw-r--r--drivers/usb/mtu3/mtu3_plat.c1
-rw-r--r--drivers/usb/mtu3/mtu3_qmu.c2
-rw-r--r--drivers/usb/musb/Kconfig3
-rw-r--r--drivers/usb/musb/musb_core.c5
-rw-r--r--drivers/usb/musb/musb_debugfs.c5
-rw-r--r--drivers/usb/musb/musb_dsps.c3
-rw-r--r--drivers/usb/musb/musb_gadget.c6
-rw-r--r--drivers/usb/musb/omap2430.c38
-rw-r--r--drivers/usb/phy/phy-tegra-usb.c89
-rw-r--r--drivers/usb/phy/phy-twl6030-usb.c3
-rw-r--r--drivers/usb/phy/phy.c4
-rw-r--r--drivers/usb/renesas_usbhs/common.c59
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c2
-rw-r--r--drivers/usb/serial/belkin_sa.c42
-rw-r--r--drivers/usb/serial/cp210x.c8
-rw-r--r--drivers/usb/serial/ftdi_sio.c215
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h4
-rw-r--r--drivers/usb/serial/kobil_sct.c210
-rw-r--r--drivers/usb/serial/option.c66
-rw-r--r--drivers/usb/serial/oti6858.c2
-rw-r--r--drivers/usb/serial/usb-serial.c7
-rw-r--r--drivers/usb/storage/protocol.c3
-rw-r--r--drivers/usb/storage/realtek_cr.c8
-rw-r--r--drivers/usb/storage/sddr55.c6
-rw-r--r--drivers/usb/storage/transport.c16
-rw-r--r--drivers/usb/storage/uas.c30
-rw-r--r--drivers/usb/storage/unusual_devs.h29
-rw-r--r--drivers/usb/storage/unusual_uas.h2
-rw-r--r--drivers/usb/typec/altmodes/displayport.c37
-rw-r--r--drivers/usb/typec/anx7411.c3
-rw-r--r--drivers/usb/typec/class.c13
-rw-r--r--drivers/usb/typec/hd3ss3220.c75
-rw-r--r--drivers/usb/typec/mux/intel_pmc_mux.c2
-rw-r--r--drivers/usb/typec/mux/ps883x.c135
-rw-r--r--drivers/usb/typec/mux/tusb1046.c2
-rw-r--r--drivers/usb/typec/pd.c95
-rw-r--r--drivers/usb/typec/tcpm/fusb302.c8
-rw-r--r--drivers/usb/typec/tcpm/maxim_contaminant.c58
-rw-r--r--drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c2
-rw-r--r--drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.c2
-rw-r--r--drivers/usb/typec/tcpm/tcpci.c33
-rw-r--r--drivers/usb/typec/tcpm/tcpci_maxim.h1
-rw-r--r--drivers/usb/typec/tcpm/tcpci_maxim_core.c51
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c65
-rw-r--r--drivers/usb/typec/tipd/core.c564
-rw-r--r--drivers/usb/typec/tipd/tps6598x.h5
-rw-r--r--drivers/usb/typec/tipd/trace.h39
-rw-r--r--drivers/usb/typec/ucsi/Kconfig2
-rw-r--r--drivers/usb/typec/ucsi/cros_ec_ucsi.c6
-rw-r--r--drivers/usb/typec/ucsi/debugfs.c68
-rw-r--r--drivers/usb/typec/ucsi/displayport.c11
-rw-r--r--drivers/usb/typec/ucsi/psy.c33
-rw-r--r--drivers/usb/typec/ucsi/trace.c17
-rw-r--r--drivers/usb/typec/ucsi/trace.h1
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c179
-rw-r--r--drivers/usb/typec/ucsi/ucsi.h54
-rw-r--r--drivers/usb/typec/ucsi/ucsi_acpi.c25
-rw-r--r--drivers/usb/typec/ucsi/ucsi_ccg.c15
-rw-r--r--drivers/usb/typec/ucsi/ucsi_glink.c88
-rw-r--r--drivers/usb/typec/ucsi/ucsi_huawei_gaokun.c2
-rw-r--r--drivers/usb/typec/ucsi/ucsi_stm32g0.c7
-rw-r--r--drivers/usb/typec/ucsi/ucsi_yoga_c630.c175
-rw-r--r--drivers/usb/usbip/stub_tx.c9
-rw-r--r--drivers/usb/usbip/vhci_hcd.c118
-rw-r--r--drivers/usb/usbip/vudc_sysfs.c2
-rw-r--r--drivers/vdpa/Kconfig8
-rw-r--r--drivers/vdpa/alibaba/eni_vdpa.c5
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_main.c5
-rw-r--r--drivers/vdpa/mlx5/core/mr.c7
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c35
-rw-r--r--drivers/vdpa/octeon_ep/octep_vdpa_main.c7
-rw-r--r--drivers/vdpa/pds/vdpa_dev.c7
-rw-r--r--drivers/vdpa/solidrun/snet_main.c8
-rw-r--r--drivers/vdpa/vdpa.c5
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim.c4
-rw-r--r--drivers/vdpa/vdpa_user/iova_domain.c134
-rw-r--r--drivers/vdpa/vdpa_user/iova_domain.h7
-rw-r--r--drivers/vdpa/vdpa_user/vduse_dev.c83
-rw-r--r--drivers/vdpa/virtio_pci/vp_vdpa.c5
-rw-r--r--drivers/vfio/cdx/Makefile6
-rw-r--r--drivers/vfio/cdx/main.c29
-rw-r--r--drivers/vfio/cdx/private.h14
-rw-r--r--drivers/vfio/debugfs.c19
-rw-r--r--drivers/vfio/device_cdev.c38
-rw-r--r--drivers/vfio/fsl-mc/Kconfig5
-rw-r--r--drivers/vfio/fsl-mc/vfio_fsl_mc.c45
-rw-r--r--drivers/vfio/group.c35
-rw-r--r--drivers/vfio/iommufd.c4
-rw-r--r--drivers/vfio/pci/Kconfig5
-rw-r--r--drivers/vfio/pci/Makefile3
-rw-r--r--drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c178
-rw-r--r--drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h23
-rw-r--r--drivers/vfio/pci/mlx5/cmd.c4
-rw-r--r--drivers/vfio/pci/mlx5/main.c2
-rw-r--r--drivers/vfio/pci/nvgrace-gpu/main.c348
-rw-r--r--drivers/vfio/pci/pds/dirty.c2
-rw-r--r--drivers/vfio/pci/pds/lm.c3
-rw-r--r--drivers/vfio/pci/pds/vfio_dev.c3
-rw-r--r--drivers/vfio/pci/qat/main.c6
-rw-r--r--drivers/vfio/pci/vfio_pci.c7
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c23
-rw-r--r--drivers/vfio/pci/vfio_pci_core.c327
-rw-r--r--drivers/vfio/pci/vfio_pci_dmabuf.c350
-rw-r--r--drivers/vfio/pci/vfio_pci_igd.c3
-rw-r--r--drivers/vfio/pci/vfio_pci_intrs.c71
-rw-r--r--drivers/vfio/pci/vfio_pci_priv.h28
-rw-r--r--drivers/vfio/pci/virtio/common.h5
-rw-r--r--drivers/vfio/pci/virtio/legacy_io.c38
-rw-r--r--drivers/vfio/pci/virtio/main.c8
-rw-r--r--drivers/vfio/pci/virtio/migrate.c3
-rw-r--r--drivers/vfio/pci/xe/Kconfig12
-rw-r--r--drivers/vfio/pci/xe/Makefile3
-rw-r--r--drivers/vfio/pci/xe/main.c573
-rw-r--r--drivers/vfio/platform/Kconfig5
-rw-r--r--drivers/vfio/platform/reset/Kconfig6
-rw-r--r--drivers/vfio/platform/reset/vfio_platform_amdxgbe.c2
-rw-r--r--drivers/vfio/platform/reset/vfio_platform_bcmflexrm.c2
-rw-r--r--drivers/vfio/platform/reset/vfio_platform_calxedaxgmac.c2
-rw-r--r--drivers/vfio/platform/vfio_amba.c3
-rw-r--r--drivers/vfio/platform/vfio_platform.c1
-rw-r--r--drivers/vfio/platform/vfio_platform_common.c40
-rw-r--r--drivers/vfio/platform/vfio_platform_private.h3
-rw-r--r--drivers/vfio/vfio_iommu_type1.c292
-rw-r--r--drivers/vfio/vfio_main.c76
-rw-r--r--drivers/vhost/Kconfig18
-rw-r--r--drivers/vhost/net.c298
-rw-r--r--drivers/vhost/scsi.c35
-rw-r--r--drivers/vhost/test.c10
-rw-r--r--drivers/vhost/vdpa.c16
-rw-r--r--drivers/vhost/vhost.c458
-rw-r--r--drivers/vhost/vhost.h86
-rw-r--r--drivers/vhost/vringh.c132
-rw-r--r--drivers/vhost/vsock.c25
-rw-r--r--drivers/video/Kconfig18
-rw-r--r--drivers/video/backlight/Kconfig9
-rw-r--r--drivers/video/backlight/Makefile1
-rw-r--r--drivers/video/backlight/apple_dwi_bl.c1
-rw-r--r--drivers/video/backlight/as3711_bl.c1
-rw-r--r--drivers/video/backlight/aw99706.c471
-rw-r--r--drivers/video/backlight/backlight.c1
-rw-r--r--drivers/video/backlight/da9052_bl.c1
-rw-r--r--drivers/video/backlight/jornada720_bl.c1
-rw-r--r--drivers/video/backlight/ktd2801-backlight.c1
-rw-r--r--drivers/video/backlight/led_bl.c18
-rw-r--r--drivers/video/backlight/lp855x_bl.c2
-rw-r--r--drivers/video/backlight/mp3309c.c14
-rw-r--r--drivers/video/backlight/rave-sp-backlight.c2
-rw-r--r--drivers/video/backlight/rt4831-backlight.c1
-rw-r--r--drivers/video/console/vgacon.c2
-rw-r--r--drivers/video/fbdev/Kconfig25
-rw-r--r--drivers/video/fbdev/aty/atyfb_base.c8
-rw-r--r--drivers/video/fbdev/aty/radeon_base.c4
-rw-r--r--drivers/video/fbdev/c2p_iplan2.c1
-rw-r--r--drivers/video/fbdev/c2p_planar.c1
-rw-r--r--drivers/video/fbdev/core/Kconfig19
-rw-r--r--drivers/video/fbdev/core/bitblit.c155
-rw-r--r--drivers/video/fbdev/core/cfbcopyarea.c2
-rw-r--r--drivers/video/fbdev/core/cfbfillrect.c2
-rw-r--r--drivers/video/fbdev/core/cfbimgblt.c2
-rw-r--r--drivers/video/fbdev/core/fb_cmdline.c2
-rw-r--r--drivers/video/fbdev/core/fb_ddc.c1
-rw-r--r--drivers/video/fbdev/core/fb_defio.c1
-rw-r--r--drivers/video/fbdev/core/fb_fillrect.h3
-rw-r--r--drivers/video/fbdev/core/fb_io_fops.c1
-rw-r--r--drivers/video/fbdev/core/fb_sys_fops.c2
-rw-r--r--drivers/video/fbdev/core/fbcmap.c1
-rw-r--r--drivers/video/fbdev/core/fbcon.c578
-rw-r--r--drivers/video/fbdev/core/fbcon.h17
-rw-r--r--drivers/video/fbdev/core/fbcon_ccw.c151
-rw-r--r--drivers/video/fbdev/core/fbcon_cw.c151
-rw-r--r--drivers/video/fbdev/core/fbcon_rotate.c47
-rw-r--r--drivers/video/fbdev/core/fbcon_rotate.h18
-rw-r--r--drivers/video/fbdev/core/fbcon_ud.c167
-rw-r--r--drivers/video/fbdev/core/fbmem.c4
-rw-r--r--drivers/video/fbdev/core/fbmon.c12
-rw-r--r--drivers/video/fbdev/core/modedb.c1
-rw-r--r--drivers/video/fbdev/core/softcursor.c18
-rw-r--r--drivers/video/fbdev/core/svgalib.c96
-rw-r--r--drivers/video/fbdev/core/syscopyarea.c2
-rw-r--r--drivers/video/fbdev/core/sysfillrect.c2
-rw-r--r--drivers/video/fbdev/core/sysimgblt.c2
-rw-r--r--drivers/video/fbdev/core/tileblit.c32
-rw-r--r--drivers/video/fbdev/cyber2000fb.c36
-rw-r--r--drivers/video/fbdev/cyber2000fb.h2
-rw-r--r--drivers/video/fbdev/gbefb.c5
-rw-r--r--drivers/video/fbdev/gxt4500.c2
-rw-r--r--drivers/video/fbdev/hyperv_fb.c2
-rw-r--r--drivers/video/fbdev/i810/i810_main.c46
-rw-r--r--drivers/video/fbdev/imxfb.c9
-rw-r--r--drivers/video/fbdev/kyro/fbdev.c24
-rw-r--r--drivers/video/fbdev/macmodes.c3
-rw-r--r--drivers/video/fbdev/matrox/g450_pll.c26
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_DAC1064.c47
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_Ti3026.c1
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_accel.c2
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_base.c1
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_g450.c62
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_misc.c21
-rw-r--r--drivers/video/fbdev/mb862xx/mb862xx-i2c.c1
-rw-r--r--drivers/video/fbdev/mb862xx/mb862xxfbdrv.c2
-rw-r--r--drivers/video/fbdev/nvidia/nv_local.h2
-rw-r--r--drivers/video/fbdev/nvidia/nvidia.c3
-rw-r--r--drivers/video/fbdev/omap/lcd_dma.c1
-rw-r--r--drivers/video/fbdev/omap/lcdc.c2
-rw-r--r--drivers/video/fbdev/omap/omapfb_main.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/apply.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/core.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/display.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dpi.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dss-of.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dss_features.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/manager.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/output.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/overlay.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/sdi.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/venc.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/vrfb.c1
-rw-r--r--drivers/video/fbdev/pvr2fb.c2
-rw-r--r--drivers/video/fbdev/pxafb.c32
-rw-r--r--drivers/video/fbdev/s3fb.c177
-rw-r--r--drivers/video/fbdev/sbuslib.c1
-rw-r--r--drivers/video/fbdev/simplefb.c54
-rw-r--r--drivers/video/fbdev/sis/sis.h2
-rw-r--r--drivers/video/fbdev/sis/sis_main.c25
-rw-r--r--drivers/video/fbdev/ssd1307fb.c4
-rw-r--r--drivers/video/fbdev/tcx.c2
-rw-r--r--drivers/video/fbdev/tridentfb.c4
-rw-r--r--drivers/video/fbdev/udlfb.c4
-rw-r--r--drivers/video/fbdev/valkyriefb.c2
-rw-r--r--drivers/video/fbdev/vesafb.c29
-rw-r--r--drivers/video/fbdev/vga16fb.c21
-rw-r--r--drivers/video/fbdev/via/via-core.c1
-rw-r--r--drivers/video/fbdev/via/via-gpio.c3
-rw-r--r--drivers/video/fbdev/via/via_i2c.c1
-rw-r--r--drivers/video/fbdev/wmt_ge_rops.c1
-rw-r--r--drivers/video/fbdev/xen-fbfront.c2
-rw-r--r--drivers/video/screen_info_generic.c55
-rw-r--r--drivers/virt/Kconfig4
-rw-r--r--drivers/virt/acrn/ioreq.c4
-rw-r--r--drivers/virt/acrn/mm.c8
-rw-r--r--drivers/virt/coco/Kconfig5
-rw-r--r--drivers/virt/coco/Makefile1
-rw-r--r--drivers/virt/coco/efi_secret/Kconfig2
-rw-r--r--drivers/virt/coco/efi_secret/efi_secret.c47
-rw-r--r--drivers/virt/coco/guest/tsm-mr.c8
-rw-r--r--drivers/virt/coco/sev-guest/sev-guest.c36
-rw-r--r--drivers/virt/coco/tsm-core.c163
-rw-r--r--drivers/virtio/virtio.c50
-rw-r--r--drivers/virtio/virtio_balloon.c19
-rw-r--r--drivers/virtio/virtio_debug.c27
-rw-r--r--drivers/virtio/virtio_dma_buf.c2
-rw-r--r--drivers/virtio/virtio_input.c4
-rw-r--r--drivers/virtio/virtio_mem.c2
-rw-r--r--drivers/virtio/virtio_mmio.c52
-rw-r--r--drivers/virtio/virtio_pci_legacy_dev.c4
-rw-r--r--drivers/virtio/virtio_pci_modern.c10
-rw-r--r--drivers/virtio/virtio_pci_modern_dev.c73
-rw-r--r--drivers/virtio/virtio_ring.c476
-rw-r--r--drivers/virtio/virtio_vdpa.c77
-rw-r--r--drivers/w1/masters/matrox_w1.c10
-rw-r--r--drivers/w1/masters/omap_hdq.c5
-rw-r--r--drivers/w1/slaves/w1_ds2406.c4
-rw-r--r--drivers/w1/slaves/w1_ds2408.c2
-rw-r--r--drivers/w1/slaves/w1_ds2413.c2
-rw-r--r--drivers/w1/slaves/w1_ds2430.c2
-rw-r--r--drivers/w1/slaves/w1_ds2431.c2
-rw-r--r--drivers/w1/slaves/w1_ds2433.c12
-rw-r--r--drivers/w1/slaves/w1_ds2438.c2
-rw-r--r--drivers/w1/slaves/w1_ds2780.c2
-rw-r--r--drivers/w1/slaves/w1_ds2781.c2
-rw-r--r--drivers/w1/slaves/w1_ds2805.c4
-rw-r--r--drivers/w1/slaves/w1_ds28e04.c2
-rw-r--r--drivers/w1/slaves/w1_ds28e17.c4
-rw-r--r--drivers/w1/w1.c22
-rw-r--r--drivers/watchdog/Kconfig23
-rw-r--r--drivers/watchdog/Makefile2
-rw-r--r--drivers/watchdog/aspeed_wdt.c30
-rw-r--r--drivers/watchdog/diag288_wdt.c9
-rw-r--r--drivers/watchdog/dw_wdt.c2
-rw-r--r--drivers/watchdog/iTCO_wdt.c6
-rw-r--r--drivers/watchdog/intel_oc_wdt.c8
-rw-r--r--drivers/watchdog/it87_wdt.c4
-rw-r--r--drivers/watchdog/loongson1_wdt.c89
-rw-r--r--drivers/watchdog/mpc8xxx_wdt.c2
-rw-r--r--drivers/watchdog/nct6694_wdt.c307
-rw-r--r--drivers/watchdog/renesas_wdt.c8
-rw-r--r--drivers/watchdog/renesas_wwdt.c163
-rw-r--r--drivers/watchdog/rti_wdt.c14
-rw-r--r--drivers/watchdog/rzg2l_wdt.c4
-rw-r--r--drivers/watchdog/rzv2h_wdt.c150
-rw-r--r--drivers/watchdog/s3c2410_wdt.c46
-rw-r--r--drivers/watchdog/sbsa_gwdt.c50
-rw-r--r--drivers/watchdog/starfive-wdt.c4
-rw-r--r--drivers/watchdog/via_wdt.c1
-rw-r--r--drivers/watchdog/visconti_wdt.c5
-rw-r--r--drivers/watchdog/watchdog_core.h8
-rw-r--r--drivers/watchdog/watchdog_pretimeout.c2
-rw-r--r--drivers/watchdog/wdat_wdt.c64
-rw-r--r--drivers/watchdog/ziirave_wdt.c3
-rw-r--r--drivers/xen/Kconfig1
-rw-r--r--drivers/xen/balloon.c4
-rw-r--r--drivers/xen/events/events_base.c37
-rw-r--r--drivers/xen/gntdev-common.h4
-rw-r--r--drivers/xen/gntdev-dmabuf.c35
-rw-r--r--drivers/xen/gntdev-dmabuf.h2
-rw-r--r--drivers/xen/gntdev.c109
-rw-r--r--drivers/xen/grant-dma-ops.c20
-rw-r--r--drivers/xen/grant-table.c8
-rw-r--r--drivers/xen/manage.c20
-rw-r--r--drivers/xen/privcmd.c14
-rw-r--r--drivers/xen/pvcalls-back.c4
-rw-r--r--drivers/xen/swiotlb-xen.c44
-rw-r--r--drivers/xen/time.c8
-rw-r--r--drivers/xen/unpopulated-alloc.c4
-rw-r--r--drivers/xen/xen-acpi-processor.c12
-rw-r--r--drivers/xen/xen-pciback/pci_stub.c12
-rw-r--r--drivers/xen/xenbus/xenbus_client.c4
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c60
-rw-r--r--drivers/xen/xenfs/super.c2
-rw-r--r--drivers/zorro/names.c12
-rw-r--r--drivers/zorro/zorro-sysfs.c4
11857 files changed, 804163 insertions, 319123 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 7c556c5ac4fd..c0f1fb893ec0 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -10,6 +10,12 @@ source "drivers/cxl/Kconfig"
source "drivers/pcmcia/Kconfig"
source "drivers/rapidio/Kconfig"
+config PC104
+ bool "PC/104 support" if EXPERT
+ help
+ Expose PC/104 form factor device drivers and options available for
+ selection and configuration. Enable this option if your target
+ machine has a PC/104 bus.
source "drivers/base/Kconfig"
@@ -77,6 +83,8 @@ source "drivers/pps/Kconfig"
source "drivers/ptp/Kconfig"
+source "drivers/dpll/Kconfig"
+
source "drivers/pinctrl/Kconfig"
source "drivers/gpio/Kconfig"
@@ -153,6 +161,8 @@ source "drivers/greybus/Kconfig"
source "drivers/comedi/Kconfig"
+source "drivers/gpib/Kconfig"
+
source "drivers/staging/Kconfig"
source "drivers/platform/Kconfig"
@@ -209,8 +219,6 @@ source "drivers/thunderbolt/Kconfig"
source "drivers/android/Kconfig"
-source "drivers/gpu/trace/Kconfig"
-
source "drivers/nvdimm/Kconfig"
source "drivers/dax/Kconfig"
@@ -245,6 +253,6 @@ source "drivers/hte/Kconfig"
source "drivers/cdx/Kconfig"
-source "drivers/dpll/Kconfig"
+source "drivers/resctrl/Kconfig"
endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index b5749cf67044..ccc05f1eae3e 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -150,6 +150,7 @@ obj-$(CONFIG_VHOST_IOTLB) += vhost/
obj-$(CONFIG_VHOST) += vhost/
obj-$(CONFIG_GREYBUS) += greybus/
obj-$(CONFIG_COMEDI) += comedi/
+obj-$(CONFIG_GPIB) += gpib/
obj-$(CONFIG_STAGING) += staging/
obj-y += platform/
@@ -160,8 +161,8 @@ obj-$(CONFIG_RPMSG) += rpmsg/
obj-$(CONFIG_SOUNDWIRE) += soundwire/
# Virtualization drivers
-obj-$(CONFIG_VIRT_DRIVERS) += virt/
-obj-$(subst m,y,$(CONFIG_HYPERV)) += hv/
+obj-y += virt/
+obj-$(CONFIG_HYPERV) += hv/
obj-$(CONFIG_PM_DEVFREQ) += devfreq/
obj-$(CONFIG_EXTCON) += extcon/
@@ -194,5 +195,7 @@ obj-$(CONFIG_HTE) += hte/
obj-$(CONFIG_DRM_ACCEL) += accel/
obj-$(CONFIG_CDX_BUS) += cdx/
obj-$(CONFIG_DPLL) += dpll/
+obj-y += resctrl/
+obj-$(CONFIG_DIBS) += dibs/
obj-$(CONFIG_S390) += s390/
diff --git a/drivers/accel/Kconfig b/drivers/accel/Kconfig
index 5b9490367a39..bdf48ccafcf2 100644
--- a/drivers/accel/Kconfig
+++ b/drivers/accel/Kconfig
@@ -25,8 +25,10 @@ menuconfig DRM_ACCEL
and debugfs).
source "drivers/accel/amdxdna/Kconfig"
+source "drivers/accel/ethosu/Kconfig"
source "drivers/accel/habanalabs/Kconfig"
source "drivers/accel/ivpu/Kconfig"
source "drivers/accel/qaic/Kconfig"
+source "drivers/accel/rocket/Kconfig"
endif
diff --git a/drivers/accel/Makefile b/drivers/accel/Makefile
index a301fb6089d4..1d3a7251b950 100644
--- a/drivers/accel/Makefile
+++ b/drivers/accel/Makefile
@@ -1,6 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_DRM_ACCEL_AMDXDNA) += amdxdna/
+obj-$(CONFIG_DRM_ACCEL_ARM_ETHOSU) += ethosu/
obj-$(CONFIG_DRM_ACCEL_HABANALABS) += habanalabs/
obj-$(CONFIG_DRM_ACCEL_IVPU) += ivpu/
obj-$(CONFIG_DRM_ACCEL_QAIC) += qaic/
+obj-$(CONFIG_DRM_ACCEL_ROCKET) += rocket/ \ No newline at end of file
diff --git a/drivers/accel/amdxdna/Makefile b/drivers/accel/amdxdna/Makefile
index 0e9adf6890a0..6344aaf523fa 100644
--- a/drivers/accel/amdxdna/Makefile
+++ b/drivers/accel/amdxdna/Makefile
@@ -14,7 +14,9 @@ amdxdna-y := \
amdxdna_mailbox.o \
amdxdna_mailbox_helper.o \
amdxdna_pci_drv.o \
+ amdxdna_pm.o \
amdxdna_sysfs.o \
+ amdxdna_ubuf.o \
npu1_regs.o \
npu2_regs.o \
npu4_regs.o \
diff --git a/drivers/accel/amdxdna/TODO b/drivers/accel/amdxdna/TODO
index ad8ac6e315b6..0e4bbebeaedf 100644
--- a/drivers/accel/amdxdna/TODO
+++ b/drivers/accel/amdxdna/TODO
@@ -1,2 +1 @@
- Add debugfs support
-- Add debug BO support
diff --git a/drivers/accel/amdxdna/aie2_ctx.c b/drivers/accel/amdxdna/aie2_ctx.c
index e04549f64d69..42d876a427c5 100644
--- a/drivers/accel/amdxdna/aie2_ctx.c
+++ b/drivers/accel/amdxdna/aie2_ctx.c
@@ -21,6 +21,7 @@
#include "amdxdna_gem.h"
#include "amdxdna_mailbox.h"
#include "amdxdna_pci_drv.h"
+#include "amdxdna_pm.h"
static bool force_cmdlist;
module_param(force_cmdlist, bool, 0600);
@@ -46,6 +47,17 @@ static void aie2_job_put(struct amdxdna_sched_job *job)
kref_put(&job->refcnt, aie2_job_release);
}
+static void aie2_hwctx_status_shift_stop(struct amdxdna_hwctx *hwctx)
+{
+ hwctx->old_status = hwctx->status;
+ hwctx->status = HWCTX_STAT_STOP;
+}
+
+static void aie2_hwctx_status_restore(struct amdxdna_hwctx *hwctx)
+{
+ hwctx->status = hwctx->old_status;
+}
+
/* The bad_job is used in aie2_sched_job_timedout, otherwise, set it to NULL */
static void aie2_hwctx_stop(struct amdxdna_dev *xdna, struct amdxdna_hwctx *hwctx,
struct drm_sched_job *bad_job)
@@ -77,7 +89,7 @@ static int aie2_hwctx_restart(struct amdxdna_dev *xdna, struct amdxdna_hwctx *hw
goto out;
}
- ret = aie2_config_cu(hwctx);
+ ret = aie2_config_cu(hwctx, NULL);
if (ret) {
XDNA_ERR(xdna, "Config cu failed, ret %d", ret);
goto out;
@@ -89,25 +101,6 @@ out:
return ret;
}
-void aie2_restart_ctx(struct amdxdna_client *client)
-{
- struct amdxdna_dev *xdna = client->xdna;
- struct amdxdna_hwctx *hwctx;
- unsigned long hwctx_id;
-
- drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
- mutex_lock(&client->hwctx_lock);
- amdxdna_for_each_hwctx(client, hwctx_id, hwctx) {
- if (hwctx->status != HWCTX_STAT_STOP)
- continue;
-
- hwctx->status = hwctx->old_status;
- XDNA_DBG(xdna, "Resetting %s", hwctx->name);
- aie2_hwctx_restart(xdna, hwctx);
- }
- mutex_unlock(&client->hwctx_lock);
-}
-
static struct dma_fence *aie2_cmd_get_out_fence(struct amdxdna_hwctx *hwctx, u64 seq)
{
struct dma_fence *fence, *out_fence = NULL;
@@ -141,34 +134,46 @@ static void aie2_hwctx_wait_for_idle(struct amdxdna_hwctx *hwctx)
dma_fence_put(fence);
}
-void aie2_hwctx_suspend(struct amdxdna_hwctx *hwctx)
+static int aie2_hwctx_suspend_cb(struct amdxdna_hwctx *hwctx, void *arg)
{
struct amdxdna_dev *xdna = hwctx->client->xdna;
+ aie2_hwctx_wait_for_idle(hwctx);
+ aie2_hwctx_stop(xdna, hwctx, NULL);
+ aie2_hwctx_status_shift_stop(hwctx);
+
+ return 0;
+}
+
+void aie2_hwctx_suspend(struct amdxdna_client *client)
+{
+ struct amdxdna_dev *xdna = client->xdna;
+
/*
* Command timeout is unlikely. But if it happens, it doesn't
* break the system. aie2_hwctx_stop() will destroy mailbox
* and abort all commands.
*/
drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
- aie2_hwctx_wait_for_idle(hwctx);
- aie2_hwctx_stop(xdna, hwctx, NULL);
- hwctx->old_status = hwctx->status;
- hwctx->status = HWCTX_STAT_STOP;
+ amdxdna_hwctx_walk(client, NULL, aie2_hwctx_suspend_cb);
}
-void aie2_hwctx_resume(struct amdxdna_hwctx *hwctx)
+static int aie2_hwctx_resume_cb(struct amdxdna_hwctx *hwctx, void *arg)
{
struct amdxdna_dev *xdna = hwctx->client->xdna;
+ aie2_hwctx_status_restore(hwctx);
+ return aie2_hwctx_restart(xdna, hwctx);
+}
+
+int aie2_hwctx_resume(struct amdxdna_client *client)
+{
/*
* The resume path cannot guarantee that mailbox channel can be
* regenerated. If this happen, when submit message to this
* mailbox channel, error will return.
*/
- drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
- hwctx->status = hwctx->old_status;
- aie2_hwctx_restart(xdna, hwctx);
+ return amdxdna_hwctx_walk(client, NULL, aie2_hwctx_resume_cb);
}
static void
@@ -177,12 +182,13 @@ aie2_sched_notify(struct amdxdna_sched_job *job)
struct dma_fence *fence = job->fence;
trace_xdna_job(&job->base, job->hwctx->name, "signaled fence", job->seq);
+
+ amdxdna_pm_suspend_put(job->hwctx->client->xdna);
job->hwctx->priv->completed++;
dma_fence_signal(fence);
up(&job->hwctx->priv->job_sem);
job->job_done = true;
- dma_fence_put(fence);
mmput_async(job->mm);
aie2_job_put(job);
}
@@ -192,15 +198,18 @@ aie2_sched_resp_handler(void *handle, void __iomem *data, size_t size)
{
struct amdxdna_sched_job *job = handle;
struct amdxdna_gem_obj *cmd_abo;
- u32 ret = 0;
+ int ret = 0;
u32 status;
cmd_abo = job->cmd_bo;
- if (unlikely(!data))
+ if (unlikely(job->job_timeout)) {
+ amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_TIMEOUT);
+ ret = -EINVAL;
goto out;
+ }
- if (unlikely(size != sizeof(u32))) {
+ if (unlikely(!data) || unlikely(size != sizeof(u32))) {
amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_ABORT);
ret = -EINVAL;
goto out;
@@ -219,11 +228,10 @@ out:
}
static int
-aie2_sched_nocmd_resp_handler(void *handle, void __iomem *data, size_t size)
+aie2_sched_drvcmd_resp_handler(void *handle, void __iomem *data, size_t size)
{
struct amdxdna_sched_job *job = handle;
- u32 ret = 0;
- u32 status;
+ int ret = 0;
if (unlikely(!data))
goto out;
@@ -233,8 +241,7 @@ aie2_sched_nocmd_resp_handler(void *handle, void __iomem *data, size_t size)
goto out;
}
- status = readl(data);
- XDNA_DBG(job->hwctx->client->xdna, "Resp status 0x%x", status);
+ job->drv_cmd->result = readl(data);
out:
aie2_sched_notify(job);
@@ -250,9 +257,16 @@ aie2_sched_cmdlist_resp_handler(void *handle, void __iomem *data, size_t size)
u32 fail_cmd_status;
u32 fail_cmd_idx;
u32 cmd_status;
- u32 ret = 0;
+ int ret = 0;
cmd_abo = job->cmd_bo;
+
+ if (unlikely(job->job_timeout)) {
+ amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_TIMEOUT);
+ ret = -EINVAL;
+ goto out;
+ }
+
if (unlikely(!data) || unlikely(size != sizeof(u32) * 3)) {
amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_ABORT);
ret = -EINVAL;
@@ -307,8 +321,18 @@ aie2_sched_job_run(struct drm_sched_job *sched_job)
kref_get(&job->refcnt);
fence = dma_fence_get(job->fence);
- if (unlikely(!cmd_abo)) {
- ret = aie2_sync_bo(hwctx, job, aie2_sched_nocmd_resp_handler);
+ if (job->drv_cmd) {
+ switch (job->drv_cmd->opcode) {
+ case SYNC_DEBUG_BO:
+ ret = aie2_sync_bo(hwctx, job, aie2_sched_drvcmd_resp_handler);
+ break;
+ case ATTACH_DEBUG_BO:
+ ret = aie2_config_debug_bo(hwctx, job, aie2_sched_drvcmd_resp_handler);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
goto out;
}
@@ -355,13 +379,14 @@ aie2_sched_job_timedout(struct drm_sched_job *sched_job)
xdna = hwctx->client->xdna;
trace_xdna_job(sched_job, hwctx->name, "job timedout", job->seq);
+ job->job_timeout = true;
mutex_lock(&xdna->dev_lock);
aie2_hwctx_stop(xdna, hwctx, sched_job);
aie2_hwctx_restart(xdna, hwctx);
mutex_unlock(&xdna->dev_lock);
- return DRM_GPU_SCHED_STAT_NOMINAL;
+ return DRM_GPU_SCHED_STAT_RESET;
}
static const struct drm_sched_backend_ops sched_ops = {
@@ -524,13 +549,12 @@ int aie2_hwctx_init(struct amdxdna_hwctx *hwctx)
.num_rqs = DRM_SCHED_PRIORITY_COUNT,
.credit_limit = HWCTX_MAX_CMDS,
.timeout = msecs_to_jiffies(HWCTX_MAX_TIMEOUT),
- .name = hwctx->name,
+ .name = "amdxdna_js",
.dev = xdna->ddev.dev,
};
struct drm_gpu_scheduler *sched;
struct amdxdna_hwctx_priv *priv;
struct amdxdna_gem_obj *heap;
- struct amdxdna_dev_hdl *ndev;
int i, ret;
priv = kzalloc(sizeof(*hwctx->priv), GFP_KERNEL);
@@ -566,7 +590,7 @@ int aie2_hwctx_init(struct amdxdna_hwctx *hwctx)
.size = MAX_CHAIN_CMDBUF_SIZE,
};
- abo = amdxdna_drm_alloc_dev_bo(&xdna->ddev, &args, client->filp, true);
+ abo = amdxdna_drm_alloc_dev_bo(&xdna->ddev, &args, client->filp);
if (IS_ERR(abo)) {
ret = PTR_ERR(abo);
goto free_cmd_bufs;
@@ -603,10 +627,14 @@ int aie2_hwctx_init(struct amdxdna_hwctx *hwctx)
goto free_entity;
}
+ ret = amdxdna_pm_resume_get(xdna);
+ if (ret)
+ goto free_col_list;
+
ret = aie2_alloc_resource(hwctx);
if (ret) {
XDNA_ERR(xdna, "Alloc hw resource failed, ret %d", ret);
- goto free_col_list;
+ goto suspend_put;
}
ret = aie2_map_host_buf(xdna->dev_handle, hwctx->fw_ctx_id,
@@ -621,10 +649,9 @@ int aie2_hwctx_init(struct amdxdna_hwctx *hwctx)
XDNA_ERR(xdna, "Create syncobj failed, ret %d", ret);
goto release_resource;
}
+ amdxdna_pm_suspend_put(xdna);
hwctx->status = HWCTX_STAT_INIT;
- ndev = xdna->dev_handle;
- ndev->hwctx_num++;
init_waitqueue_head(&priv->job_free_wq);
XDNA_DBG(xdna, "hwctx %s init completed", hwctx->name);
@@ -633,6 +660,8 @@ int aie2_hwctx_init(struct amdxdna_hwctx *hwctx)
release_resource:
aie2_release_resource(hwctx);
+suspend_put:
+ amdxdna_pm_suspend_put(xdna);
free_col_list:
kfree(hwctx->col_list);
free_entity:
@@ -655,26 +684,25 @@ free_priv:
void aie2_hwctx_fini(struct amdxdna_hwctx *hwctx)
{
- struct amdxdna_dev_hdl *ndev;
struct amdxdna_dev *xdna;
int idx;
xdna = hwctx->client->xdna;
- ndev = xdna->dev_handle;
- ndev->hwctx_num--;
XDNA_DBG(xdna, "%s sequence number %lld", hwctx->name, hwctx->priv->seq);
- drm_sched_entity_destroy(&hwctx->priv->entity);
-
aie2_hwctx_wait_for_idle(hwctx);
/* Request fw to destroy hwctx and cancel the rest pending requests */
aie2_release_resource(hwctx);
+ mutex_unlock(&xdna->dev_lock);
+ drm_sched_entity_destroy(&hwctx->priv->entity);
+
/* Wait for all submitted jobs to be completed or canceled */
wait_event(hwctx->priv->job_free_wq,
atomic64_read(&hwctx->job_submit_cnt) ==
atomic64_read(&hwctx->job_free_cnt));
+ mutex_lock(&xdna->dev_lock);
drm_sched_fini(&hwctx->priv->sched);
aie2_ctx_syncobj_destroy(hwctx);
@@ -690,6 +718,14 @@ void aie2_hwctx_fini(struct amdxdna_hwctx *hwctx)
kfree(hwctx->cus);
}
+static int aie2_config_cu_resp_handler(void *handle, void __iomem *data, size_t size)
+{
+ struct amdxdna_hwctx *hwctx = handle;
+
+ amdxdna_pm_suspend_put(hwctx->client->xdna);
+ return 0;
+}
+
static int aie2_hwctx_cu_config(struct amdxdna_hwctx *hwctx, void *buf, u32 size)
{
struct amdxdna_hwctx_param_config_cu *config = buf;
@@ -721,10 +757,14 @@ static int aie2_hwctx_cu_config(struct amdxdna_hwctx *hwctx, void *buf, u32 size
if (!hwctx->cus)
return -ENOMEM;
- ret = aie2_config_cu(hwctx);
+ ret = amdxdna_pm_resume_get(xdna);
+ if (ret)
+ goto free_cus;
+
+ ret = aie2_config_cu(hwctx, aie2_config_cu_resp_handler);
if (ret) {
XDNA_ERR(xdna, "Config CU to firmware failed, ret %d", ret);
- goto free_cus;
+ goto pm_suspend_put;
}
wmb(); /* To avoid locking in command submit when check status */
@@ -732,12 +772,82 @@ static int aie2_hwctx_cu_config(struct amdxdna_hwctx *hwctx, void *buf, u32 size
return 0;
+pm_suspend_put:
+ amdxdna_pm_suspend_put(xdna);
free_cus:
kfree(hwctx->cus);
hwctx->cus = NULL;
return ret;
}
+static void aie2_cmd_wait(struct amdxdna_hwctx *hwctx, u64 seq)
+{
+ struct dma_fence *out_fence = aie2_cmd_get_out_fence(hwctx, seq);
+
+ if (!out_fence) {
+ XDNA_ERR(hwctx->client->xdna, "Failed to get fence");
+ return;
+ }
+
+ dma_fence_wait_timeout(out_fence, false, MAX_SCHEDULE_TIMEOUT);
+ dma_fence_put(out_fence);
+}
+
+static int aie2_hwctx_cfg_debug_bo(struct amdxdna_hwctx *hwctx, u32 bo_hdl,
+ bool attach)
+{
+ struct amdxdna_client *client = hwctx->client;
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_drv_cmd cmd = { 0 };
+ struct amdxdna_gem_obj *abo;
+ u64 seq;
+ int ret;
+
+ abo = amdxdna_gem_get_obj(client, bo_hdl, AMDXDNA_BO_DEV);
+ if (!abo) {
+ XDNA_ERR(xdna, "Get bo %d failed", bo_hdl);
+ return -EINVAL;
+ }
+
+ if (attach) {
+ if (abo->assigned_hwctx != AMDXDNA_INVALID_CTX_HANDLE) {
+ ret = -EBUSY;
+ goto put_obj;
+ }
+ cmd.opcode = ATTACH_DEBUG_BO;
+ } else {
+ if (abo->assigned_hwctx != hwctx->id) {
+ ret = -EINVAL;
+ goto put_obj;
+ }
+ cmd.opcode = DETACH_DEBUG_BO;
+ }
+
+ ret = amdxdna_cmd_submit(client, &cmd, AMDXDNA_INVALID_BO_HANDLE,
+ &bo_hdl, 1, hwctx->id, &seq);
+ if (ret) {
+ XDNA_ERR(xdna, "Submit command failed");
+ goto put_obj;
+ }
+
+ aie2_cmd_wait(hwctx, seq);
+ if (cmd.result) {
+ XDNA_ERR(xdna, "Response failure 0x%x", cmd.result);
+ goto put_obj;
+ }
+
+ if (attach)
+ abo->assigned_hwctx = hwctx->id;
+ else
+ abo->assigned_hwctx = AMDXDNA_INVALID_CTX_HANDLE;
+
+ XDNA_DBG(xdna, "Config debug BO %d to %s", bo_hdl, hwctx->name);
+
+put_obj:
+ amdxdna_gem_put_obj(abo);
+ return ret;
+}
+
int aie2_hwctx_config(struct amdxdna_hwctx *hwctx, u32 type, u64 value, void *buf, u32 size)
{
struct amdxdna_dev *xdna = hwctx->client->xdna;
@@ -747,14 +857,40 @@ int aie2_hwctx_config(struct amdxdna_hwctx *hwctx, u32 type, u64 value, void *bu
case DRM_AMDXDNA_HWCTX_CONFIG_CU:
return aie2_hwctx_cu_config(hwctx, buf, size);
case DRM_AMDXDNA_HWCTX_ASSIGN_DBG_BUF:
+ return aie2_hwctx_cfg_debug_bo(hwctx, (u32)value, true);
case DRM_AMDXDNA_HWCTX_REMOVE_DBG_BUF:
- return -EOPNOTSUPP;
+ return aie2_hwctx_cfg_debug_bo(hwctx, (u32)value, false);
default:
XDNA_DBG(xdna, "Not supported type %d", type);
return -EOPNOTSUPP;
}
}
+int aie2_hwctx_sync_debug_bo(struct amdxdna_hwctx *hwctx, u32 debug_bo_hdl)
+{
+ struct amdxdna_client *client = hwctx->client;
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_drv_cmd cmd = { 0 };
+ u64 seq;
+ int ret;
+
+ cmd.opcode = SYNC_DEBUG_BO;
+ ret = amdxdna_cmd_submit(client, &cmd, AMDXDNA_INVALID_BO_HANDLE,
+ &debug_bo_hdl, 1, hwctx->id, &seq);
+ if (ret) {
+ XDNA_ERR(xdna, "Submit command failed");
+ return ret;
+ }
+
+ aie2_cmd_wait(hwctx, seq);
+ if (cmd.result) {
+ XDNA_ERR(xdna, "Response failure 0x%x", cmd.result);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int aie2_populate_range(struct amdxdna_gem_obj *abo)
{
struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
@@ -848,17 +984,22 @@ int aie2_cmd_submit(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job,
goto up_sem;
}
- ret = drm_sched_job_init(&job->base, &hwctx->priv->entity, 1, hwctx);
+ ret = drm_sched_job_init(&job->base, &hwctx->priv->entity, 1, hwctx,
+ hwctx->client->filp->client_id);
if (ret) {
XDNA_ERR(xdna, "DRM job init failed, ret %d", ret);
goto free_chain;
}
+ ret = amdxdna_pm_resume_get(xdna);
+ if (ret)
+ goto cleanup_job;
+
retry:
ret = drm_gem_lock_reservations(job->bos, job->bo_cnt, &acquire_ctx);
if (ret) {
XDNA_WARN(xdna, "Failed to lock BOs, ret %d", ret);
- goto cleanup_job;
+ goto suspend_put;
}
for (i = 0; i < job->bo_cnt; i++) {
@@ -866,7 +1007,7 @@ retry:
if (ret) {
XDNA_WARN(xdna, "Failed to reserve fences %d", ret);
drm_gem_unlock_reservations(job->bos, job->bo_cnt, &acquire_ctx);
- goto cleanup_job;
+ goto suspend_put;
}
}
@@ -881,12 +1022,12 @@ retry:
msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
} else if (time_after(jiffies, timeout)) {
ret = -ETIME;
- goto cleanup_job;
+ goto suspend_put;
}
ret = aie2_populate_range(abo);
if (ret)
- goto cleanup_job;
+ goto suspend_put;
goto retry;
}
}
@@ -912,6 +1053,8 @@ retry:
return 0;
+suspend_put:
+ amdxdna_pm_suspend_put(xdna);
cleanup_job:
drm_sched_job_cleanup(&job->base);
free_chain:
diff --git a/drivers/accel/amdxdna/aie2_error.c b/drivers/accel/amdxdna/aie2_error.c
index 5ee905632a39..d452008ec4f4 100644
--- a/drivers/accel/amdxdna/aie2_error.c
+++ b/drivers/accel/amdxdna/aie2_error.c
@@ -13,6 +13,7 @@
#include "aie2_msg_priv.h"
#include "aie2_pci.h"
+#include "amdxdna_error.h"
#include "amdxdna_mailbox.h"
#include "amdxdna_pci_drv.h"
@@ -46,6 +47,7 @@ enum aie_module_type {
AIE_MEM_MOD = 0,
AIE_CORE_MOD,
AIE_PL_MOD,
+ AIE_UNKNOWN_MOD,
};
enum aie_error_category {
@@ -143,6 +145,31 @@ static const struct aie_event_category aie_ml_shim_tile_event_cat[] = {
EVENT_CATEGORY(74U, AIE_ERROR_LOCK),
};
+static const enum amdxdna_error_num aie_cat_err_num_map[] = {
+ [AIE_ERROR_SATURATION] = AMDXDNA_ERROR_NUM_AIE_SATURATION,
+ [AIE_ERROR_FP] = AMDXDNA_ERROR_NUM_AIE_FP,
+ [AIE_ERROR_STREAM] = AMDXDNA_ERROR_NUM_AIE_STREAM,
+ [AIE_ERROR_ACCESS] = AMDXDNA_ERROR_NUM_AIE_ACCESS,
+ [AIE_ERROR_BUS] = AMDXDNA_ERROR_NUM_AIE_BUS,
+ [AIE_ERROR_INSTRUCTION] = AMDXDNA_ERROR_NUM_AIE_INSTRUCTION,
+ [AIE_ERROR_ECC] = AMDXDNA_ERROR_NUM_AIE_ECC,
+ [AIE_ERROR_LOCK] = AMDXDNA_ERROR_NUM_AIE_LOCK,
+ [AIE_ERROR_DMA] = AMDXDNA_ERROR_NUM_AIE_DMA,
+ [AIE_ERROR_MEM_PARITY] = AMDXDNA_ERROR_NUM_AIE_MEM_PARITY,
+ [AIE_ERROR_UNKNOWN] = AMDXDNA_ERROR_NUM_UNKNOWN,
+};
+
+static_assert(ARRAY_SIZE(aie_cat_err_num_map) == AIE_ERROR_UNKNOWN + 1);
+
+static const enum amdxdna_error_module aie_err_mod_map[] = {
+ [AIE_MEM_MOD] = AMDXDNA_ERROR_MODULE_AIE_MEMORY,
+ [AIE_CORE_MOD] = AMDXDNA_ERROR_MODULE_AIE_CORE,
+ [AIE_PL_MOD] = AMDXDNA_ERROR_MODULE_AIE_PL,
+ [AIE_UNKNOWN_MOD] = AMDXDNA_ERROR_MODULE_UNKNOWN,
+};
+
+static_assert(ARRAY_SIZE(aie_err_mod_map) == AIE_UNKNOWN_MOD + 1);
+
static enum aie_error_category
aie_get_error_category(u8 row, u8 event_id, enum aie_module_type mod_type)
{
@@ -176,12 +203,40 @@ aie_get_error_category(u8 row, u8 event_id, enum aie_module_type mod_type)
if (event_id != lut[i].event_id)
continue;
+ if (lut[i].category > AIE_ERROR_UNKNOWN)
+ return AIE_ERROR_UNKNOWN;
+
return lut[i].category;
}
return AIE_ERROR_UNKNOWN;
}
+static void aie2_update_last_async_error(struct amdxdna_dev_hdl *ndev, void *err_info, u32 num_err)
+{
+ struct aie_error *errs = err_info;
+ enum amdxdna_error_module err_mod;
+ enum aie_error_category aie_err;
+ enum amdxdna_error_num err_num;
+ struct aie_error *last_err;
+
+ last_err = &errs[num_err - 1];
+ if (last_err->mod_type >= AIE_UNKNOWN_MOD) {
+ err_num = aie_cat_err_num_map[AIE_ERROR_UNKNOWN];
+ err_mod = aie_err_mod_map[AIE_UNKNOWN_MOD];
+ } else {
+ aie_err = aie_get_error_category(last_err->row,
+ last_err->event_id,
+ last_err->mod_type);
+ err_num = aie_cat_err_num_map[aie_err];
+ err_mod = aie_err_mod_map[last_err->mod_type];
+ }
+
+ ndev->last_async_err.err_code = AMDXDNA_ERROR_ENCODE(err_num, err_mod);
+ ndev->last_async_err.ts_us = ktime_to_us(ktime_get_real());
+ ndev->last_async_err.ex_err_code = AMDXDNA_EXTRA_ERR_ENCODE(last_err->row, last_err->col);
+}
+
static u32 aie2_error_backtrack(struct amdxdna_dev_hdl *ndev, void *err_info, u32 num_err)
{
struct aie_error *errs = err_info;
@@ -264,29 +319,14 @@ static void aie2_error_worker(struct work_struct *err_work)
}
mutex_lock(&xdna->dev_lock);
+ aie2_update_last_async_error(e->ndev, info->payload, info->err_cnt);
+
/* Re-sent this event to firmware */
if (aie2_error_event_send(e))
XDNA_WARN(xdna, "Unable to register async event");
mutex_unlock(&xdna->dev_lock);
}
-int aie2_error_async_events_send(struct amdxdna_dev_hdl *ndev)
-{
- struct amdxdna_dev *xdna = ndev->xdna;
- struct async_event *e;
- int i, ret;
-
- drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
- for (i = 0; i < ndev->async_events->event_cnt; i++) {
- e = &ndev->async_events->event[i];
- ret = aie2_error_event_send(e);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
void aie2_error_async_events_free(struct amdxdna_dev_hdl *ndev)
{
struct amdxdna_dev *xdna = ndev->xdna;
@@ -341,6 +381,10 @@ int aie2_error_async_events_alloc(struct amdxdna_dev_hdl *ndev)
e->size = ASYNC_BUF_SIZE;
e->resp.status = MAX_AIE2_STATUS_CODE;
INIT_WORK(&e->work, aie2_error_worker);
+
+ ret = aie2_error_event_send(e);
+ if (ret)
+ goto free_wq;
}
ndev->async_events = events;
@@ -349,6 +393,8 @@ int aie2_error_async_events_alloc(struct amdxdna_dev_hdl *ndev)
events->event_cnt, events->size);
return 0;
+free_wq:
+ destroy_workqueue(events->wq);
free_buf:
dma_free_noncoherent(xdna->ddev.dev, events->size, events->buf,
events->addr, DMA_FROM_DEVICE);
@@ -356,3 +402,18 @@ free_events:
kfree(events);
return ret;
}
+
+int aie2_get_array_async_error(struct amdxdna_dev_hdl *ndev, struct amdxdna_drm_get_array *args)
+{
+ struct amdxdna_dev *xdna = ndev->xdna;
+
+ drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
+
+ args->num_element = 1;
+ args->element_size = sizeof(ndev->last_async_err);
+ if (copy_to_user(u64_to_user_ptr(args->buffer),
+ &ndev->last_async_err, args->element_size))
+ return -EFAULT;
+
+ return 0;
+}
diff --git a/drivers/accel/amdxdna/aie2_message.c b/drivers/accel/amdxdna/aie2_message.c
index 82412eec9a4b..d493bb1c3360 100644
--- a/drivers/accel/amdxdna/aie2_message.c
+++ b/drivers/accel/amdxdna/aie2_message.c
@@ -27,6 +27,8 @@
#define DECLARE_AIE2_MSG(name, op) \
DECLARE_XDNA_MSG_COMMON(name, op, MAX_AIE2_STATUS_CODE)
+#define EXEC_MSG_OPS(xdna) ((xdna)->dev_handle->exec_msg_ops)
+
static int aie2_send_mgmt_msg_wait(struct amdxdna_dev_hdl *ndev,
struct xdna_mailbox_msg *msg)
{
@@ -37,7 +39,7 @@ static int aie2_send_mgmt_msg_wait(struct amdxdna_dev_hdl *ndev,
if (!ndev->mgmt_chann)
return -ENODEV;
- drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
+ drm_WARN_ON(&xdna->ddev, xdna->rpm_on && !mutex_is_locked(&xdna->dev_lock));
ret = xdna_send_msg_wait(xdna, ndev->mgmt_chann, msg);
if (ret == -ETIME) {
xdna_mailbox_stop_channel(ndev->mgmt_chann);
@@ -45,7 +47,7 @@ static int aie2_send_mgmt_msg_wait(struct amdxdna_dev_hdl *ndev,
ndev->mgmt_chann = NULL;
}
- if (!ret && *hdl->data != AIE2_STATUS_SUCCESS) {
+ if (!ret && *hdl->status != AIE2_STATUS_SUCCESS) {
XDNA_ERR(xdna, "command opcode 0x%x failed, status 0x%x",
msg->opcode, *hdl->data);
ret = -EINVAL;
@@ -208,6 +210,14 @@ int aie2_create_context(struct amdxdna_dev_hdl *ndev, struct amdxdna_hwctx *hwct
hwctx->fw_ctx_id = resp.context_id;
WARN_ONCE(hwctx->fw_ctx_id == -1, "Unexpected context id");
+ if (ndev->force_preempt_enabled) {
+ ret = aie2_runtime_cfg(ndev, AIE2_RT_CFG_FORCE_PREEMPT, &hwctx->fw_ctx_id);
+ if (ret) {
+ XDNA_ERR(xdna, "failed to enable force preempt %d", ret);
+ return ret;
+ }
+ }
+
cq_pair = &resp.cq_pair[0];
x2i.mb_head_ptr_reg = AIE2_MBOX_OFF(ndev, cq_pair->x2i_q.head_addr);
x2i.mb_tail_ptr_reg = AIE2_MBOX_OFF(ndev, cq_pair->x2i_q.tail_addr);
@@ -233,6 +243,7 @@ int aie2_create_context(struct amdxdna_dev_hdl *ndev, struct amdxdna_hwctx *hwct
ret = -EINVAL;
goto out_destroy_context;
}
+ ndev->hwctx_num++;
XDNA_DBG(xdna, "%s mailbox channel irq: %d, msix_id: %d",
hwctx->name, ret, resp.msix_id);
@@ -267,6 +278,7 @@ int aie2_destroy_context(struct amdxdna_dev_hdl *ndev, struct amdxdna_hwctx *hwc
hwctx->fw_ctx_id);
hwctx->priv->mbox_chann = NULL;
hwctx->fw_ctx_id = -1;
+ ndev->hwctx_num--;
return ret;
}
@@ -290,18 +302,25 @@ int aie2_map_host_buf(struct amdxdna_dev_hdl *ndev, u32 context_id, u64 addr, u6
return 0;
}
+static int amdxdna_hwctx_col_map(struct amdxdna_hwctx *hwctx, void *arg)
+{
+ u32 *bitmap = arg;
+
+ *bitmap |= GENMASK(hwctx->start_col + hwctx->num_col - 1, hwctx->start_col);
+
+ return 0;
+}
+
int aie2_query_status(struct amdxdna_dev_hdl *ndev, char __user *buf,
u32 size, u32 *cols_filled)
{
DECLARE_AIE2_MSG(aie_column_info, MSG_OP_QUERY_COL_STATUS);
struct amdxdna_dev *xdna = ndev->xdna;
struct amdxdna_client *client;
- struct amdxdna_hwctx *hwctx;
- unsigned long hwctx_id;
dma_addr_t dma_addr;
u32 aie_bitmap = 0;
u8 *buff_addr;
- int ret, idx;
+ int ret;
buff_addr = dma_alloc_noncoherent(xdna->ddev.dev, size, &dma_addr,
DMA_FROM_DEVICE, GFP_KERNEL);
@@ -309,12 +328,8 @@ int aie2_query_status(struct amdxdna_dev_hdl *ndev, char __user *buf,
return -ENOMEM;
/* Go through each hardware context and mark the AIE columns that are active */
- list_for_each_entry(client, &xdna->client_list, node) {
- idx = srcu_read_lock(&client->hwctx_srcu);
- amdxdna_for_each_hwctx(client, hwctx_id, hwctx)
- aie_bitmap |= amdxdna_hwctx_col_map(hwctx);
- srcu_read_unlock(&client->hwctx_srcu, idx);
- }
+ list_for_each_entry(client, &xdna->client_list, node)
+ amdxdna_hwctx_walk(client, &aie_bitmap, amdxdna_hwctx_col_map);
*cols_filled = 0;
req.dump_buff_addr = dma_addr;
@@ -329,11 +344,6 @@ int aie2_query_status(struct amdxdna_dev_hdl *ndev, char __user *buf,
goto fail;
}
- if (resp.status != AIE2_STATUS_SUCCESS) {
- XDNA_ERR(xdna, "Query NPU status failed, status 0x%x", resp.status);
- ret = -EINVAL;
- goto fail;
- }
XDNA_DBG(xdna, "Query NPU status completed");
if (size < resp.size) {
@@ -355,6 +365,55 @@ fail:
return ret;
}
+int aie2_query_telemetry(struct amdxdna_dev_hdl *ndev,
+ char __user *buf, u32 size,
+ struct amdxdna_drm_query_telemetry_header *header)
+{
+ DECLARE_AIE2_MSG(get_telemetry, MSG_OP_GET_TELEMETRY);
+ struct amdxdna_dev *xdna = ndev->xdna;
+ dma_addr_t dma_addr;
+ u8 *addr;
+ int ret;
+
+ if (header->type >= MAX_TELEMETRY_TYPE)
+ return -EINVAL;
+
+ addr = dma_alloc_noncoherent(xdna->ddev.dev, size, &dma_addr,
+ DMA_FROM_DEVICE, GFP_KERNEL);
+ if (!addr)
+ return -ENOMEM;
+
+ req.buf_addr = dma_addr;
+ req.buf_size = size;
+ req.type = header->type;
+
+ drm_clflush_virt_range(addr, size); /* device can access */
+ ret = aie2_send_mgmt_msg_wait(ndev, &msg);
+ if (ret) {
+ XDNA_ERR(xdna, "Query telemetry failed, status %d", ret);
+ goto free_buf;
+ }
+
+ if (size < resp.size) {
+ ret = -EINVAL;
+ XDNA_ERR(xdna, "Bad buffer size. Available: %u. Needs: %u", size, resp.size);
+ goto free_buf;
+ }
+
+ if (copy_to_user(buf, addr, resp.size)) {
+ ret = -EFAULT;
+ XDNA_ERR(xdna, "Failed to copy telemetry to user space");
+ goto free_buf;
+ }
+
+ header->major = resp.major;
+ header->minor = resp.minor;
+
+free_buf:
+ dma_free_noncoherent(xdna->ddev.dev, size, addr, dma_addr, DMA_FROM_DEVICE);
+ return ret;
+}
+
int aie2_register_asyn_event_msg(struct amdxdna_dev_hdl *ndev, dma_addr_t addr, u32 size,
void *handle, int (*cb)(void*, void __iomem *, size_t))
{
@@ -374,15 +433,17 @@ int aie2_register_asyn_event_msg(struct amdxdna_dev_hdl *ndev, dma_addr_t addr,
return xdna_mailbox_send_msg(ndev->mgmt_chann, &msg, TX_TIMEOUT);
}
-int aie2_config_cu(struct amdxdna_hwctx *hwctx)
+int aie2_config_cu(struct amdxdna_hwctx *hwctx,
+ int (*notify_cb)(void *, void __iomem *, size_t))
{
struct mailbox_channel *chann = hwctx->priv->mbox_chann;
struct amdxdna_dev *xdna = hwctx->client->xdna;
u32 shift = xdna->dev_info->dev_mem_buf_shift;
- DECLARE_AIE2_MSG(config_cu, MSG_OP_CONFIG_CU);
+ struct config_cu_req req = { 0 };
+ struct xdna_mailbox_msg msg;
struct drm_gem_object *gobj;
struct amdxdna_gem_obj *abo;
- int ret, i;
+ int i;
if (!chann)
return -ENODEV;
@@ -420,191 +481,386 @@ int aie2_config_cu(struct amdxdna_hwctx *hwctx)
}
req.num_cus = hwctx->cus->num_cus;
- ret = xdna_send_msg_wait(xdna, chann, &msg);
- if (ret == -ETIME)
- aie2_destroy_context(xdna->dev_handle, hwctx);
+ msg.send_data = (u8 *)&req;
+ msg.send_size = sizeof(req);
+ msg.handle = hwctx;
+ msg.opcode = MSG_OP_CONFIG_CU;
+ msg.notify_cb = notify_cb;
+ return xdna_mailbox_send_msg(chann, &msg, TX_TIMEOUT);
+}
- if (resp.status == AIE2_STATUS_SUCCESS) {
- XDNA_DBG(xdna, "Configure %d CUs, ret %d", req.num_cus, ret);
- return 0;
- }
+static int aie2_init_exec_cu_req(struct amdxdna_gem_obj *cmd_bo, void *req,
+ size_t *size, u32 *msg_op)
+{
+ struct execute_buffer_req *cu_req = req;
+ u32 cmd_len;
+ void *cmd;
- XDNA_ERR(xdna, "Command opcode 0x%x failed, status 0x%x ret %d",
- msg.opcode, resp.status, ret);
- return ret;
+ cmd = amdxdna_cmd_get_payload(cmd_bo, &cmd_len);
+ if (cmd_len > sizeof(cu_req->payload))
+ return -EINVAL;
+
+ cu_req->cu_idx = amdxdna_cmd_get_cu_idx(cmd_bo);
+ if (cu_req->cu_idx == INVALID_CU_IDX)
+ return -EINVAL;
+
+ memcpy(cu_req->payload, cmd, cmd_len);
+
+ *size = sizeof(*cu_req);
+ *msg_op = MSG_OP_EXECUTE_BUFFER_CF;
+ return 0;
}
-int aie2_execbuf(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job,
- int (*notify_cb)(void *, void __iomem *, size_t))
+static int aie2_init_exec_dpu_req(struct amdxdna_gem_obj *cmd_bo, void *req,
+ size_t *size, u32 *msg_op)
{
- struct mailbox_channel *chann = hwctx->priv->mbox_chann;
- struct amdxdna_dev *xdna = hwctx->client->xdna;
- struct amdxdna_gem_obj *cmd_abo = job->cmd_bo;
- union {
- struct execute_buffer_req ebuf;
- struct exec_dpu_req dpu;
- } req;
- struct xdna_mailbox_msg msg;
- u32 payload_len;
- void *payload;
- int cu_idx;
- int ret;
- u32 op;
+ struct exec_dpu_req *dpu_req = req;
+ struct amdxdna_cmd_start_npu *sn;
+ u32 cmd_len;
- if (!chann)
- return -ENODEV;
+ sn = amdxdna_cmd_get_payload(cmd_bo, &cmd_len);
+ if (cmd_len - sizeof(*sn) > sizeof(dpu_req->payload))
+ return -EINVAL;
- payload = amdxdna_cmd_get_payload(cmd_abo, &payload_len);
- if (!payload) {
- XDNA_ERR(xdna, "Invalid command, cannot get payload");
+ dpu_req->cu_idx = amdxdna_cmd_get_cu_idx(cmd_bo);
+ if (dpu_req->cu_idx == INVALID_CU_IDX)
return -EINVAL;
- }
- cu_idx = amdxdna_cmd_get_cu_idx(cmd_abo);
- if (cu_idx < 0) {
- XDNA_DBG(xdna, "Invalid cu idx");
+ dpu_req->inst_buf_addr = sn->buffer;
+ dpu_req->inst_size = sn->buffer_size;
+ dpu_req->inst_prop_cnt = sn->prop_count;
+ memcpy(dpu_req->payload, sn->prop_args, cmd_len - sizeof(*sn));
+
+ *size = sizeof(*dpu_req);
+ *msg_op = MSG_OP_EXEC_DPU;
+ return 0;
+}
+
+static void aie2_init_exec_chain_req(void *req, u64 slot_addr, size_t size, u32 cmd_cnt)
+{
+ struct cmd_chain_req *chain_req = req;
+
+ chain_req->buf_addr = slot_addr;
+ chain_req->buf_size = size;
+ chain_req->count = cmd_cnt;
+}
+
+static void aie2_init_npu_chain_req(void *req, u64 slot_addr, size_t size, u32 cmd_cnt)
+{
+ struct cmd_chain_npu_req *npu_chain_req = req;
+
+ npu_chain_req->flags = 0;
+ npu_chain_req->reserved = 0;
+ npu_chain_req->buf_addr = slot_addr;
+ npu_chain_req->buf_size = size;
+ npu_chain_req->count = cmd_cnt;
+}
+
+static int
+aie2_cmdlist_fill_cf(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t *size)
+{
+ struct cmd_chain_slot_execbuf_cf *cf_slot = slot;
+ u32 cmd_len;
+ void *cmd;
+
+ cmd = amdxdna_cmd_get_payload(cmd_bo, &cmd_len);
+ if (*size < sizeof(*cf_slot) + cmd_len)
return -EINVAL;
- }
- op = amdxdna_cmd_get_op(cmd_abo);
- switch (op) {
+ cf_slot->cu_idx = amdxdna_cmd_get_cu_idx(cmd_bo);
+ if (cf_slot->cu_idx == INVALID_CU_IDX)
+ return -EINVAL;
+
+ cf_slot->arg_cnt = cmd_len / sizeof(u32);
+ memcpy(cf_slot->args, cmd, cmd_len);
+ /* Accurate slot size to hint firmware to do necessary copy */
+ *size = sizeof(*cf_slot) + cmd_len;
+ return 0;
+}
+
+static int
+aie2_cmdlist_fill_dpu(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t *size)
+{
+ struct cmd_chain_slot_dpu *dpu_slot = slot;
+ struct amdxdna_cmd_start_npu *sn;
+ u32 cmd_len;
+ u32 arg_sz;
+
+ sn = amdxdna_cmd_get_payload(cmd_bo, &cmd_len);
+ arg_sz = cmd_len - sizeof(*sn);
+ if (cmd_len < sizeof(*sn) || arg_sz > MAX_DPU_ARGS_SIZE)
+ return -EINVAL;
+
+ if (*size < sizeof(*dpu_slot) + arg_sz)
+ return -EINVAL;
+
+ dpu_slot->cu_idx = amdxdna_cmd_get_cu_idx(cmd_bo);
+ if (dpu_slot->cu_idx == INVALID_CU_IDX)
+ return -EINVAL;
+
+ dpu_slot->inst_buf_addr = sn->buffer;
+ dpu_slot->inst_size = sn->buffer_size;
+ dpu_slot->inst_prop_cnt = sn->prop_count;
+ dpu_slot->arg_cnt = arg_sz / sizeof(u32);
+ memcpy(dpu_slot->args, sn->prop_args, arg_sz);
+
+ /* Accurate slot size to hint firmware to do necessary copy */
+ *size = sizeof(*dpu_slot) + arg_sz;
+ return 0;
+}
+
+static int aie2_cmdlist_unsupp(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t *size)
+{
+ return -EOPNOTSUPP;
+}
+
+static u32 aie2_get_chain_msg_op(u32 cmd_op)
+{
+ switch (cmd_op) {
case ERT_START_CU:
- if (unlikely(payload_len > sizeof(req.ebuf.payload)))
- XDNA_DBG(xdna, "Invalid ebuf payload len: %d", payload_len);
- req.ebuf.cu_idx = cu_idx;
- memcpy(req.ebuf.payload, payload, sizeof(req.ebuf.payload));
- msg.send_size = sizeof(req.ebuf);
- msg.opcode = MSG_OP_EXECUTE_BUFFER_CF;
- break;
- case ERT_START_NPU: {
- struct amdxdna_cmd_start_npu *sn = payload;
-
- if (unlikely(payload_len - sizeof(*sn) > sizeof(req.dpu.payload)))
- XDNA_DBG(xdna, "Invalid dpu payload len: %d", payload_len);
- req.dpu.inst_buf_addr = sn->buffer;
- req.dpu.inst_size = sn->buffer_size;
- req.dpu.inst_prop_cnt = sn->prop_count;
- req.dpu.cu_idx = cu_idx;
- memcpy(req.dpu.payload, sn->prop_args, sizeof(req.dpu.payload));
- msg.send_size = sizeof(req.dpu);
- msg.opcode = MSG_OP_EXEC_DPU;
+ return MSG_OP_CHAIN_EXEC_BUFFER_CF;
+ case ERT_START_NPU:
+ return MSG_OP_CHAIN_EXEC_DPU;
+ default:
break;
}
- default:
- XDNA_DBG(xdna, "Invalid ERT cmd op code: %d", op);
+
+ return MSG_OP_MAX_OPCODE;
+}
+
+static struct aie2_exec_msg_ops legacy_exec_message_ops = {
+ .init_cu_req = aie2_init_exec_cu_req,
+ .init_dpu_req = aie2_init_exec_dpu_req,
+ .init_chain_req = aie2_init_exec_chain_req,
+ .fill_cf_slot = aie2_cmdlist_fill_cf,
+ .fill_dpu_slot = aie2_cmdlist_fill_dpu,
+ .fill_preempt_slot = aie2_cmdlist_unsupp,
+ .fill_elf_slot = aie2_cmdlist_unsupp,
+ .get_chain_msg_op = aie2_get_chain_msg_op,
+};
+
+static int
+aie2_cmdlist_fill_npu_cf(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t *size)
+{
+ struct cmd_chain_slot_npu *npu_slot = slot;
+ u32 cmd_len;
+ void *cmd;
+
+ cmd = amdxdna_cmd_get_payload(cmd_bo, &cmd_len);
+ if (*size < sizeof(*npu_slot) + cmd_len)
return -EINVAL;
- }
- msg.handle = job;
- msg.notify_cb = notify_cb;
- msg.send_data = (u8 *)&req;
- print_hex_dump_debug("cmd: ", DUMP_PREFIX_OFFSET, 16, 4, &req,
- 0x40, false);
- ret = xdna_mailbox_send_msg(chann, &msg, TX_TIMEOUT);
- if (ret) {
- XDNA_ERR(xdna, "Send message failed");
- return ret;
- }
+ npu_slot->cu_idx = amdxdna_cmd_get_cu_idx(cmd_bo);
+ if (npu_slot->cu_idx == INVALID_CU_IDX)
+ return -EINVAL;
+
+ memset(npu_slot, 0, sizeof(*npu_slot));
+ npu_slot->type = EXEC_NPU_TYPE_NON_ELF;
+ npu_slot->arg_cnt = cmd_len / sizeof(u32);
+ memcpy(npu_slot->args, cmd, cmd_len);
+ *size = sizeof(*npu_slot) + cmd_len;
return 0;
}
static int
-aie2_cmdlist_fill_one_slot_cf(void *cmd_buf, u32 offset,
- struct amdxdna_gem_obj *abo, u32 *size)
+aie2_cmdlist_fill_npu_dpu(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t *size)
{
- struct cmd_chain_slot_execbuf_cf *buf = cmd_buf + offset;
- int cu_idx = amdxdna_cmd_get_cu_idx(abo);
- u32 payload_len;
- void *payload;
+ struct cmd_chain_slot_npu *npu_slot = slot;
+ struct amdxdna_cmd_start_npu *sn;
+ u32 cmd_len;
+ u32 arg_sz;
+
+ sn = amdxdna_cmd_get_payload(cmd_bo, &cmd_len);
+ arg_sz = cmd_len - sizeof(*sn);
+ if (cmd_len < sizeof(*sn) || arg_sz > MAX_NPU_ARGS_SIZE)
+ return -EINVAL;
- if (cu_idx < 0)
+ if (*size < sizeof(*npu_slot) + arg_sz)
return -EINVAL;
- payload = amdxdna_cmd_get_payload(abo, &payload_len);
- if (!payload)
+ npu_slot->cu_idx = amdxdna_cmd_get_cu_idx(cmd_bo);
+ if (npu_slot->cu_idx == INVALID_CU_IDX)
return -EINVAL;
- if (!slot_has_space(*buf, offset, payload_len))
- return -ENOSPC;
+ memset(npu_slot, 0, sizeof(*npu_slot));
+ npu_slot->type = EXEC_NPU_TYPE_PARTIAL_ELF;
+ npu_slot->inst_buf_addr = sn->buffer;
+ npu_slot->inst_size = sn->buffer_size;
+ npu_slot->inst_prop_cnt = sn->prop_count;
+ npu_slot->arg_cnt = arg_sz / sizeof(u32);
+ memcpy(npu_slot->args, sn->prop_args, arg_sz);
- buf->cu_idx = cu_idx;
- buf->arg_cnt = payload_len / sizeof(u32);
- memcpy(buf->args, payload, payload_len);
- /* Accurate buf size to hint firmware to do necessary copy */
- *size = sizeof(*buf) + payload_len;
+ *size = sizeof(*npu_slot) + arg_sz;
return 0;
}
static int
-aie2_cmdlist_fill_one_slot_dpu(void *cmd_buf, u32 offset,
- struct amdxdna_gem_obj *abo, u32 *size)
+aie2_cmdlist_fill_npu_preempt(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t *size)
{
- struct cmd_chain_slot_dpu *buf = cmd_buf + offset;
- int cu_idx = amdxdna_cmd_get_cu_idx(abo);
- struct amdxdna_cmd_start_npu *sn;
- u32 payload_len;
- void *payload;
+ struct cmd_chain_slot_npu *npu_slot = slot;
+ struct amdxdna_cmd_preempt_data *pd;
+ u32 cmd_len;
u32 arg_sz;
- if (cu_idx < 0)
+ pd = amdxdna_cmd_get_payload(cmd_bo, &cmd_len);
+ arg_sz = cmd_len - sizeof(*pd);
+ if (cmd_len < sizeof(*pd) || arg_sz > MAX_NPU_ARGS_SIZE)
return -EINVAL;
- payload = amdxdna_cmd_get_payload(abo, &payload_len);
- if (!payload)
+ if (*size < sizeof(*npu_slot) + arg_sz)
return -EINVAL;
- sn = payload;
- arg_sz = payload_len - sizeof(*sn);
- if (payload_len < sizeof(*sn) || arg_sz > MAX_DPU_ARGS_SIZE)
+
+ npu_slot->cu_idx = amdxdna_cmd_get_cu_idx(cmd_bo);
+ if (npu_slot->cu_idx == INVALID_CU_IDX)
return -EINVAL;
- if (!slot_has_space(*buf, offset, arg_sz))
- return -ENOSPC;
+ memset(npu_slot, 0, sizeof(*npu_slot));
+ npu_slot->type = EXEC_NPU_TYPE_PREEMPT;
+ npu_slot->inst_buf_addr = pd->inst_buf;
+ npu_slot->save_buf_addr = pd->save_buf;
+ npu_slot->restore_buf_addr = pd->restore_buf;
+ npu_slot->inst_size = pd->inst_size;
+ npu_slot->save_size = pd->save_size;
+ npu_slot->restore_size = pd->restore_size;
+ npu_slot->inst_prop_cnt = pd->inst_prop_cnt;
+ npu_slot->arg_cnt = arg_sz / sizeof(u32);
+ memcpy(npu_slot->args, pd->prop_args, arg_sz);
+
+ *size = sizeof(*npu_slot) + arg_sz;
+ return 0;
+}
+
+static int
+aie2_cmdlist_fill_npu_elf(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t *size)
+{
+ struct cmd_chain_slot_npu *npu_slot = slot;
+ struct amdxdna_cmd_preempt_data *pd;
+ u32 cmd_len;
+ u32 arg_sz;
+
+ pd = amdxdna_cmd_get_payload(cmd_bo, &cmd_len);
+ arg_sz = cmd_len - sizeof(*pd);
+ if (cmd_len < sizeof(*pd) || arg_sz > MAX_NPU_ARGS_SIZE)
+ return -EINVAL;
- buf->inst_buf_addr = sn->buffer;
- buf->inst_size = sn->buffer_size;
- buf->inst_prop_cnt = sn->prop_count;
- buf->cu_idx = cu_idx;
- buf->arg_cnt = arg_sz / sizeof(u32);
- memcpy(buf->args, sn->prop_args, arg_sz);
+ if (*size < sizeof(*npu_slot) + arg_sz)
+ return -EINVAL;
- /* Accurate buf size to hint firmware to do necessary copy */
- *size = sizeof(*buf) + arg_sz;
+ memset(npu_slot, 0, sizeof(*npu_slot));
+ npu_slot->type = EXEC_NPU_TYPE_ELF;
+ npu_slot->inst_buf_addr = pd->inst_buf;
+ npu_slot->save_buf_addr = pd->save_buf;
+ npu_slot->restore_buf_addr = pd->restore_buf;
+ npu_slot->inst_size = pd->inst_size;
+ npu_slot->save_size = pd->save_size;
+ npu_slot->restore_size = pd->restore_size;
+ npu_slot->inst_prop_cnt = pd->inst_prop_cnt;
+ npu_slot->arg_cnt = 1;
+ npu_slot->args[0] = AIE2_EXEC_BUFFER_KERNEL_OP_TXN;
+
+ *size = struct_size(npu_slot, args, npu_slot->arg_cnt);
return 0;
}
-static int
-aie2_cmdlist_fill_one_slot(u32 op, struct amdxdna_gem_obj *cmdbuf_abo, u32 offset,
- struct amdxdna_gem_obj *abo, u32 *size)
+static u32 aie2_get_npu_chain_msg_op(u32 cmd_op)
+{
+ return MSG_OP_CHAIN_EXEC_NPU;
+}
+
+static struct aie2_exec_msg_ops npu_exec_message_ops = {
+ .init_cu_req = aie2_init_exec_cu_req,
+ .init_dpu_req = aie2_init_exec_dpu_req,
+ .init_chain_req = aie2_init_npu_chain_req,
+ .fill_cf_slot = aie2_cmdlist_fill_npu_cf,
+ .fill_dpu_slot = aie2_cmdlist_fill_npu_dpu,
+ .fill_preempt_slot = aie2_cmdlist_fill_npu_preempt,
+ .fill_elf_slot = aie2_cmdlist_fill_npu_elf,
+ .get_chain_msg_op = aie2_get_npu_chain_msg_op,
+};
+
+static int aie2_init_exec_req(void *req, struct amdxdna_gem_obj *cmd_abo,
+ size_t *size, u32 *msg_op)
{
- u32 this_op = amdxdna_cmd_get_op(abo);
- void *cmd_buf = cmdbuf_abo->mem.kva;
+ struct amdxdna_dev *xdna = cmd_abo->client->xdna;
int ret;
+ u32 op;
- if (this_op != op) {
- ret = -EINVAL;
- goto done;
- }
+ op = amdxdna_cmd_get_op(cmd_abo);
switch (op) {
case ERT_START_CU:
- ret = aie2_cmdlist_fill_one_slot_cf(cmd_buf, offset, abo, size);
+ ret = EXEC_MSG_OPS(xdna)->init_cu_req(cmd_abo, req, size, msg_op);
+ if (ret) {
+ XDNA_DBG(xdna, "Init CU req failed ret %d", ret);
+ return ret;
+ }
break;
case ERT_START_NPU:
- ret = aie2_cmdlist_fill_one_slot_dpu(cmd_buf, offset, abo, size);
+ ret = EXEC_MSG_OPS(xdna)->init_dpu_req(cmd_abo, req, size, msg_op);
+ if (ret) {
+ XDNA_DBG(xdna, "Init DPU req failed ret %d", ret);
+ return ret;
+ }
+
break;
default:
+ XDNA_ERR(xdna, "Unsupported op %d", op);
ret = -EOPNOTSUPP;
+ break;
}
-done:
- if (ret) {
- XDNA_ERR(abo->client->xdna, "Can't fill slot for cmd op %d ret %d",
- op, ret);
+ return ret;
+}
+
+static int
+aie2_cmdlist_fill_slot(void *slot, struct amdxdna_gem_obj *cmd_abo,
+ size_t *size, u32 *cmd_op)
+{
+ struct amdxdna_dev *xdna = cmd_abo->client->xdna;
+ int ret;
+ u32 op;
+
+ op = amdxdna_cmd_get_op(cmd_abo);
+ if (*cmd_op == ERT_INVALID_CMD)
+ *cmd_op = op;
+ else if (op != *cmd_op)
+ return -EINVAL;
+
+ switch (op) {
+ case ERT_START_CU:
+ ret = EXEC_MSG_OPS(xdna)->fill_cf_slot(cmd_abo, slot, size);
+ break;
+ case ERT_START_NPU:
+ ret = EXEC_MSG_OPS(xdna)->fill_dpu_slot(cmd_abo, slot, size);
+ break;
+ case ERT_START_NPU_PREEMPT:
+ if (!AIE2_FEATURE_ON(xdna->dev_handle, AIE2_PREEMPT))
+ return -EOPNOTSUPP;
+ ret = EXEC_MSG_OPS(xdna)->fill_preempt_slot(cmd_abo, slot, size);
+ break;
+ case ERT_START_NPU_PREEMPT_ELF:
+ if (!AIE2_FEATURE_ON(xdna->dev_handle, AIE2_PREEMPT))
+ return -EOPNOTSUPP;
+ ret = EXEC_MSG_OPS(xdna)->fill_elf_slot(cmd_abo, slot, size);
+ break;
+ default:
+ XDNA_INFO(xdna, "Unsupported op %d", op);
+ ret = -EOPNOTSUPP;
+ break;
}
+
return ret;
}
+void aie2_msg_init(struct amdxdna_dev_hdl *ndev)
+{
+ if (AIE2_FEATURE_ON(ndev, AIE2_NPU_COMMAND))
+ ndev->exec_msg_ops = &npu_exec_message_ops;
+ else
+ ndev->exec_msg_ops = &legacy_exec_message_ops;
+}
+
static inline struct amdxdna_gem_obj *
aie2_cmdlist_get_cmd_buf(struct amdxdna_sched_job *job)
{
@@ -613,29 +869,36 @@ aie2_cmdlist_get_cmd_buf(struct amdxdna_sched_job *job)
return job->hwctx->priv->cmd_buf[idx];
}
-static void
-aie2_cmdlist_prepare_request(struct cmd_chain_req *req,
- struct amdxdna_gem_obj *cmdbuf_abo, u32 size, u32 cnt)
+int aie2_execbuf(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job,
+ int (*notify_cb)(void *, void __iomem *, size_t))
{
- req->buf_addr = cmdbuf_abo->mem.dev_addr;
- req->buf_size = size;
- req->count = cnt;
- drm_clflush_virt_range(cmdbuf_abo->mem.kva, size);
- XDNA_DBG(cmdbuf_abo->client->xdna, "Command buf addr 0x%llx size 0x%x count %d",
- req->buf_addr, size, cnt);
-}
+ struct mailbox_channel *chann = hwctx->priv->mbox_chann;
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+ struct amdxdna_gem_obj *cmd_abo = job->cmd_bo;
+ struct xdna_mailbox_msg msg;
+ union exec_req req;
+ int ret;
-static inline u32
-aie2_cmd_op_to_msg_op(u32 op)
-{
- switch (op) {
- case ERT_START_CU:
- return MSG_OP_CHAIN_EXEC_BUFFER_CF;
- case ERT_START_NPU:
- return MSG_OP_CHAIN_EXEC_DPU;
- default:
- return MSG_OP_MAX_OPCODE;
+ if (!chann)
+ return -ENODEV;
+
+ ret = aie2_init_exec_req(&req, cmd_abo, &msg.send_size, &msg.opcode);
+ if (ret)
+ return ret;
+
+ msg.handle = job;
+ msg.notify_cb = notify_cb;
+ msg.send_data = (u8 *)&req;
+ print_hex_dump_debug("cmd: ", DUMP_PREFIX_OFFSET, 16, 4, &req,
+ 0x40, false);
+
+ ret = xdna_mailbox_send_msg(chann, &msg, TX_TIMEOUT);
+ if (ret) {
+ XDNA_ERR(xdna, "Send message failed");
+ return ret;
}
+
+ return 0;
}
int aie2_cmdlist_multi_execbuf(struct amdxdna_hwctx *hwctx,
@@ -646,12 +909,13 @@ int aie2_cmdlist_multi_execbuf(struct amdxdna_hwctx *hwctx,
struct mailbox_channel *chann = hwctx->priv->mbox_chann;
struct amdxdna_client *client = hwctx->client;
struct amdxdna_gem_obj *cmd_abo = job->cmd_bo;
+ struct amdxdna_dev *xdna = client->xdna;
struct amdxdna_cmd_chain *payload;
struct xdna_mailbox_msg msg;
- struct cmd_chain_req req;
+ union exec_chain_req req;
u32 payload_len;
u32 offset = 0;
- u32 size;
+ size_t size;
int ret;
u32 op;
u32 i;
@@ -662,41 +926,42 @@ int aie2_cmdlist_multi_execbuf(struct amdxdna_hwctx *hwctx,
payload_len < struct_size(payload, data, payload->command_count))
return -EINVAL;
+ op = ERT_INVALID_CMD;
for (i = 0; i < payload->command_count; i++) {
u32 boh = (u32)(payload->data[i]);
struct amdxdna_gem_obj *abo;
abo = amdxdna_gem_get_obj(client, boh, AMDXDNA_BO_CMD);
if (!abo) {
- XDNA_ERR(client->xdna, "Failed to find cmd BO %d", boh);
+ XDNA_ERR(xdna, "Failed to find cmd BO %d", boh);
return -ENOENT;
}
- /* All sub-cmd should have same op, use the first one. */
- if (i == 0)
- op = amdxdna_cmd_get_op(abo);
-
- ret = aie2_cmdlist_fill_one_slot(op, cmdbuf_abo, offset, abo, &size);
+ size = cmdbuf_abo->mem.size - offset;
+ ret = aie2_cmdlist_fill_slot(cmdbuf_abo->mem.kva + offset,
+ abo, &size, &op);
amdxdna_gem_put_obj(abo);
if (ret)
- return -EINVAL;
+ return ret;
offset += size;
}
+ msg.opcode = EXEC_MSG_OPS(xdna)->get_chain_msg_op(op);
+ if (msg.opcode == MSG_OP_MAX_OPCODE)
+ return -EOPNOTSUPP;
/* The offset is the accumulated total size of the cmd buffer */
- aie2_cmdlist_prepare_request(&req, cmdbuf_abo, offset, payload->command_count);
+ EXEC_MSG_OPS(xdna)->init_chain_req(&req, cmdbuf_abo->mem.dev_addr,
+ offset, payload->command_count);
+ drm_clflush_virt_range(cmdbuf_abo->mem.kva, offset);
- msg.opcode = aie2_cmd_op_to_msg_op(op);
- if (msg.opcode == MSG_OP_MAX_OPCODE)
- return -EOPNOTSUPP;
msg.handle = job;
msg.notify_cb = notify_cb;
msg.send_data = (u8 *)&req;
msg.send_size = sizeof(req);
ret = xdna_mailbox_send_msg(chann, &msg, TX_TIMEOUT);
if (ret) {
- XDNA_ERR(hwctx->client->xdna, "Send message failed");
+ XDNA_ERR(xdna, "Send message failed");
return ret;
}
@@ -709,23 +974,27 @@ int aie2_cmdlist_single_execbuf(struct amdxdna_hwctx *hwctx,
{
struct amdxdna_gem_obj *cmdbuf_abo = aie2_cmdlist_get_cmd_buf(job);
struct mailbox_channel *chann = hwctx->priv->mbox_chann;
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
struct amdxdna_gem_obj *cmd_abo = job->cmd_bo;
struct xdna_mailbox_msg msg;
- struct cmd_chain_req req;
- u32 size;
+ union exec_chain_req req;
+ u32 op = ERT_INVALID_CMD;
+ size_t size;
int ret;
- u32 op;
- op = amdxdna_cmd_get_op(cmd_abo);
- ret = aie2_cmdlist_fill_one_slot(op, cmdbuf_abo, 0, cmd_abo, &size);
+ size = cmdbuf_abo->mem.size;
+ ret = aie2_cmdlist_fill_slot(cmdbuf_abo->mem.kva, cmd_abo, &size, &op);
if (ret)
return ret;
- aie2_cmdlist_prepare_request(&req, cmdbuf_abo, size, 1);
-
- msg.opcode = aie2_cmd_op_to_msg_op(op);
+ msg.opcode = EXEC_MSG_OPS(xdna)->get_chain_msg_op(op);
if (msg.opcode == MSG_OP_MAX_OPCODE)
return -EOPNOTSUPP;
+
+ EXEC_MSG_OPS(xdna)->init_chain_req(&req, cmdbuf_abo->mem.dev_addr,
+ size, 1);
+ drm_clflush_virt_range(cmdbuf_abo->mem.kva, size);
+
msg.handle = job;
msg.notify_cb = notify_cb;
msg.send_data = (u8 *)&req;
@@ -750,7 +1019,7 @@ int aie2_sync_bo(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job,
int ret = 0;
req.src_addr = 0;
- req.dst_addr = abo->mem.dev_addr - hwctx->client->dev_heap->mem.dev_addr;
+ req.dst_addr = amdxdna_dev_bo_offset(abo);
req.size = abo->mem.size;
/* Device to Host */
@@ -774,3 +1043,32 @@ int aie2_sync_bo(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job,
return 0;
}
+
+int aie2_config_debug_bo(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job,
+ int (*notify_cb)(void *, void __iomem *, size_t))
+{
+ struct mailbox_channel *chann = hwctx->priv->mbox_chann;
+ struct amdxdna_gem_obj *abo = to_xdna_obj(job->bos[0]);
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+ struct config_debug_bo_req req;
+ struct xdna_mailbox_msg msg;
+
+ if (job->drv_cmd->opcode == ATTACH_DEBUG_BO)
+ req.config = DEBUG_BO_REGISTER;
+ else
+ req.config = DEBUG_BO_UNREGISTER;
+
+ req.offset = amdxdna_dev_bo_offset(abo);
+ req.size = abo->mem.size;
+
+ XDNA_DBG(xdna, "offset 0x%llx size 0x%llx config %d",
+ req.offset, req.size, req.config);
+
+ msg.handle = job;
+ msg.notify_cb = notify_cb;
+ msg.send_data = (u8 *)&req;
+ msg.send_size = sizeof(req);
+ msg.opcode = MSG_OP_CONFIG_DEBUG_BO;
+
+ return xdna_mailbox_send_msg(chann, &msg, TX_TIMEOUT);
+}
diff --git a/drivers/accel/amdxdna/aie2_msg_priv.h b/drivers/accel/amdxdna/aie2_msg_priv.h
index 6df9065b13f6..1c957a6298d3 100644
--- a/drivers/accel/amdxdna/aie2_msg_priv.h
+++ b/drivers/accel/amdxdna/aie2_msg_priv.h
@@ -9,7 +9,8 @@
enum aie2_msg_opcode {
MSG_OP_CREATE_CONTEXT = 0x2,
MSG_OP_DESTROY_CONTEXT = 0x3,
- MSG_OP_SYNC_BO = 0x7,
+ MSG_OP_GET_TELEMETRY = 0x4,
+ MSG_OP_SYNC_BO = 0x7,
MSG_OP_EXECUTE_BUFFER_CF = 0xC,
MSG_OP_QUERY_COL_STATUS = 0xD,
MSG_OP_QUERY_AIE_TILE_INFO = 0xE,
@@ -18,6 +19,8 @@ enum aie2_msg_opcode {
MSG_OP_CONFIG_CU = 0x11,
MSG_OP_CHAIN_EXEC_BUFFER_CF = 0x12,
MSG_OP_CHAIN_EXEC_DPU = 0x13,
+ MSG_OP_CONFIG_DEBUG_BO = 0x14,
+ MSG_OP_CHAIN_EXEC_NPU = 0x18,
MSG_OP_MAX_XRT_OPCODE,
MSG_OP_SUSPEND = 0x101,
MSG_OP_RESUME = 0x102,
@@ -135,6 +138,28 @@ struct destroy_ctx_resp {
enum aie2_msg_status status;
} __packed;
+enum telemetry_type {
+ TELEMETRY_TYPE_DISABLED,
+ TELEMETRY_TYPE_HEALTH,
+ TELEMETRY_TYPE_ERROR_INFO,
+ TELEMETRY_TYPE_PROFILING,
+ TELEMETRY_TYPE_DEBUG,
+ MAX_TELEMETRY_TYPE
+};
+
+struct get_telemetry_req {
+ enum telemetry_type type;
+ __u64 buf_addr;
+ __u32 buf_size;
+} __packed;
+
+struct get_telemetry_resp {
+ __u32 major;
+ __u32 minor;
+ __u32 size;
+ enum aie2_msg_status status;
+} __packed;
+
struct execute_buffer_req {
__u32 cu_idx;
__u32 payload[19];
@@ -148,6 +173,18 @@ struct exec_dpu_req {
__u32 payload[35];
} __packed;
+enum exec_npu_type {
+ EXEC_NPU_TYPE_NON_ELF = 0x1,
+ EXEC_NPU_TYPE_PARTIAL_ELF = 0x2,
+ EXEC_NPU_TYPE_PREEMPT = 0x3,
+ EXEC_NPU_TYPE_ELF = 0x4,
+};
+
+union exec_req {
+ struct execute_buffer_req ebuf;
+ struct exec_dpu_req dpu_req;
+};
+
struct execute_buffer_resp {
enum aie2_msg_status status;
} __packed;
@@ -319,9 +356,6 @@ struct async_event_msg_resp {
} __packed;
#define MAX_CHAIN_CMDBUF_SIZE SZ_4K
-#define slot_has_space(slot, offset, payload_size) \
- (MAX_CHAIN_CMDBUF_SIZE >= (offset) + (payload_size) + \
- sizeof(typeof(slot)))
struct cmd_chain_slot_execbuf_cf {
__u32 cu_idx;
@@ -339,12 +373,41 @@ struct cmd_chain_slot_dpu {
__u32 args[] __counted_by(arg_cnt);
};
+#define MAX_NPU_ARGS_SIZE (26 * sizeof(__u32))
+#define AIE2_EXEC_BUFFER_KERNEL_OP_TXN 3
+struct cmd_chain_slot_npu {
+ enum exec_npu_type type;
+ u64 inst_buf_addr;
+ u64 save_buf_addr;
+ u64 restore_buf_addr;
+ u32 inst_size;
+ u32 save_size;
+ u32 restore_size;
+ u32 inst_prop_cnt;
+ u32 cu_idx;
+ u32 arg_cnt;
+ u32 args[] __counted_by(arg_cnt);
+} __packed;
+
struct cmd_chain_req {
__u64 buf_addr;
__u32 buf_size;
__u32 count;
} __packed;
+struct cmd_chain_npu_req {
+ u32 flags;
+ u32 reserved;
+ u64 buf_addr;
+ u32 buf_size;
+ u32 count;
+} __packed;
+
+union exec_chain_req {
+ struct cmd_chain_npu_req npu_req;
+ struct cmd_chain_req req;
+};
+
struct cmd_chain_resp {
enum aie2_msg_status status;
__u32 fail_cmd_idx;
@@ -365,4 +428,21 @@ struct sync_bo_req {
struct sync_bo_resp {
enum aie2_msg_status status;
} __packed;
+
+#define DEBUG_BO_UNREGISTER 0
+#define DEBUG_BO_REGISTER 1
+struct config_debug_bo_req {
+ __u64 offset;
+ __u64 size;
+ /*
+ * config operations.
+ * DEBUG_BO_REGISTER: Register debug buffer
+ * DEBUG_BO_UNREGISTER: Unregister debug buffer
+ */
+ __u32 config;
+} __packed;
+
+struct config_debug_bo_resp {
+ enum aie2_msg_status status;
+} __packed;
#endif /* _AIE2_MSG_PRIV_H_ */
diff --git a/drivers/accel/amdxdna/aie2_pci.c b/drivers/accel/amdxdna/aie2_pci.c
index c6cf7068d23c..ceef1c502e9e 100644
--- a/drivers/accel/amdxdna/aie2_pci.c
+++ b/drivers/accel/amdxdna/aie2_pci.c
@@ -10,6 +10,7 @@
#include <drm/drm_managed.h>
#include <drm/drm_print.h>
#include <drm/gpu_scheduler.h>
+#include <linux/cleanup.h>
#include <linux/errno.h>
#include <linux/firmware.h>
#include <linux/iommu.h>
@@ -24,6 +25,7 @@
#include "amdxdna_gem.h"
#include "amdxdna_mailbox.h"
#include "amdxdna_pci_drv.h"
+#include "amdxdna_pm.h"
static int aie2_max_col = XRS_MAX_COL;
module_param(aie2_max_col, uint, 0600);
@@ -53,6 +55,7 @@ struct mgmt_mbox_chann_info {
static int aie2_check_protocol(struct amdxdna_dev_hdl *ndev, u32 fw_major, u32 fw_minor)
{
+ const struct aie2_fw_feature_tbl *feature;
struct amdxdna_dev *xdna = ndev->xdna;
/*
@@ -76,6 +79,17 @@ static int aie2_check_protocol(struct amdxdna_dev_hdl *ndev, u32 fw_major, u32 f
XDNA_ERR(xdna, "Firmware minor version smaller than supported");
return -EINVAL;
}
+
+ for (feature = ndev->priv->fw_feature_tbl; feature && feature->min_minor;
+ feature++) {
+ if (fw_minor < feature->min_minor)
+ continue;
+ if (feature->max_minor > 0 && fw_minor > feature->max_minor)
+ continue;
+
+ set_bit(feature->feature, &ndev->feature_mask);
+ }
+
return 0;
}
@@ -169,6 +183,10 @@ int aie2_runtime_cfg(struct amdxdna_dev_hdl *ndev,
if (cfg->category != category)
continue;
+ if (cfg->feature_mask &&
+ bitmap_subset(&cfg->feature_mask, &ndev->feature_mask, AIE2_FEATURE_MAX))
+ continue;
+
value = val ? *val : cfg->value;
ret = aie2_set_runtime_cfg(ndev, cfg->type, value);
if (ret) {
@@ -222,15 +240,6 @@ static int aie2_mgmt_fw_init(struct amdxdna_dev_hdl *ndev)
return ret;
}
- if (!ndev->async_events)
- return 0;
-
- ret = aie2_error_async_events_send(ndev);
- if (ret) {
- XDNA_ERR(ndev->xdna, "Send async events failed");
- return ret;
- }
-
return 0;
}
@@ -256,6 +265,8 @@ static int aie2_mgmt_fw_query(struct amdxdna_dev_hdl *ndev)
return ret;
}
+ ndev->total_col = min(aie2_max_col, ndev->metadata.cols);
+
return 0;
}
@@ -337,6 +348,7 @@ static void aie2_hw_stop(struct amdxdna_dev *xdna)
ndev->mbox = NULL;
aie2_psp_stop(ndev->psp_hdl);
aie2_smu_fini(ndev);
+ aie2_error_async_events_free(ndev);
pci_disable_device(pdev);
ndev->dev_status = AIE2_DEV_INIT;
@@ -423,6 +435,18 @@ static int aie2_hw_start(struct amdxdna_dev *xdna)
goto destroy_mgmt_chann;
}
+ ret = aie2_mgmt_fw_query(ndev);
+ if (ret) {
+ XDNA_ERR(xdna, "failed to query fw, ret %d", ret);
+ goto destroy_mgmt_chann;
+ }
+
+ ret = aie2_error_async_events_alloc(ndev);
+ if (ret) {
+ XDNA_ERR(xdna, "Allocate async events failed, ret %d", ret);
+ goto destroy_mgmt_chann;
+ }
+
ndev->dev_status = AIE2_DEV_START;
return 0;
@@ -440,6 +464,39 @@ disable_dev:
return ret;
}
+static int aie2_hw_suspend(struct amdxdna_dev *xdna)
+{
+ struct amdxdna_client *client;
+
+ guard(mutex)(&xdna->dev_lock);
+ list_for_each_entry(client, &xdna->client_list, node)
+ aie2_hwctx_suspend(client);
+
+ aie2_hw_stop(xdna);
+
+ return 0;
+}
+
+static int aie2_hw_resume(struct amdxdna_dev *xdna)
+{
+ struct amdxdna_client *client;
+ int ret;
+
+ ret = aie2_hw_start(xdna);
+ if (ret) {
+ XDNA_ERR(xdna, "Start hardware failed, %d", ret);
+ return ret;
+ }
+
+ list_for_each_entry(client, &xdna->client_list, node) {
+ ret = aie2_hwctx_resume(client);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
static int aie2_init(struct amdxdna_dev *xdna)
{
struct pci_dev *pdev = to_pci_dev(xdna->ddev.dev);
@@ -520,22 +577,15 @@ static int aie2_init(struct amdxdna_dev *xdna)
if (!ndev->psp_hdl) {
XDNA_ERR(xdna, "failed to create psp");
ret = -ENOMEM;
- goto free_irq;
+ goto release_fw;
}
xdna->dev_handle = ndev;
ret = aie2_hw_start(xdna);
if (ret) {
XDNA_ERR(xdna, "start npu failed, ret %d", ret);
- goto free_irq;
- }
-
- ret = aie2_mgmt_fw_query(ndev);
- if (ret) {
- XDNA_ERR(xdna, "Query firmware failed, ret %d", ret);
- goto stop_hw;
+ goto release_fw;
}
- ndev->total_col = min(aie2_max_col, ndev->metadata.cols);
xrs_cfg.clk_list.num_levels = ndev->max_dpm_level + 1;
for (i = 0; i < xrs_cfg.clk_list.num_levels; i++)
@@ -552,34 +602,13 @@ static int aie2_init(struct amdxdna_dev *xdna)
goto stop_hw;
}
- ret = aie2_error_async_events_alloc(ndev);
- if (ret) {
- XDNA_ERR(xdna, "Allocate async events failed, ret %d", ret);
- goto stop_hw;
- }
-
- ret = aie2_error_async_events_send(ndev);
- if (ret) {
- XDNA_ERR(xdna, "Send async events failed, ret %d", ret);
- goto async_event_free;
- }
-
- /* Issue a command to make sure firmware handled async events */
- ret = aie2_query_firmware_version(ndev, &ndev->xdna->fw_ver);
- if (ret) {
- XDNA_ERR(xdna, "Re-query firmware version failed");
- goto async_event_free;
- }
-
release_firmware(fw);
+ aie2_msg_init(ndev);
+ amdxdna_pm_init(xdna);
return 0;
-async_event_free:
- aie2_error_async_events_free(ndev);
stop_hw:
aie2_hw_stop(xdna);
-free_irq:
- pci_free_irq_vectors(pdev);
release_fw:
release_firmware(fw);
@@ -588,12 +617,8 @@ release_fw:
static void aie2_fini(struct amdxdna_dev *xdna)
{
- struct pci_dev *pdev = to_pci_dev(xdna->ddev.dev);
- struct amdxdna_dev_hdl *ndev = xdna->dev_handle;
-
+ amdxdna_pm_fini(xdna);
aie2_hw_stop(xdna);
- aie2_error_async_events_free(ndev);
- pci_free_irq_vectors(pdev);
}
static int aie2_get_aie_status(struct amdxdna_client *client,
@@ -752,66 +777,182 @@ static int aie2_get_clock_metadata(struct amdxdna_client *client,
return ret;
}
+static int aie2_hwctx_status_cb(struct amdxdna_hwctx *hwctx, void *arg)
+{
+ struct amdxdna_drm_hwctx_entry *tmp __free(kfree) = NULL;
+ struct amdxdna_drm_get_array *array_args = arg;
+ struct amdxdna_drm_hwctx_entry __user *buf;
+ u32 size;
+
+ if (!array_args->num_element)
+ return -EINVAL;
+
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ tmp->pid = hwctx->client->pid;
+ tmp->context_id = hwctx->id;
+ tmp->start_col = hwctx->start_col;
+ tmp->num_col = hwctx->num_col;
+ tmp->command_submissions = hwctx->priv->seq;
+ tmp->command_completions = hwctx->priv->completed;
+ tmp->pasid = hwctx->client->pasid;
+ tmp->priority = hwctx->qos.priority;
+ tmp->gops = hwctx->qos.gops;
+ tmp->fps = hwctx->qos.fps;
+ tmp->dma_bandwidth = hwctx->qos.dma_bandwidth;
+ tmp->latency = hwctx->qos.latency;
+ tmp->frame_exec_time = hwctx->qos.frame_exec_time;
+ tmp->state = AMDXDNA_HWCTX_STATE_ACTIVE;
+
+ buf = u64_to_user_ptr(array_args->buffer);
+ size = min(sizeof(*tmp), array_args->element_size);
+
+ if (copy_to_user(buf, tmp, size))
+ return -EFAULT;
+
+ array_args->buffer += size;
+ array_args->num_element--;
+
+ return 0;
+}
+
static int aie2_get_hwctx_status(struct amdxdna_client *client,
struct amdxdna_drm_get_info *args)
{
- struct amdxdna_drm_query_hwctx __user *buf;
+ struct amdxdna_drm_get_array array_args;
struct amdxdna_dev *xdna = client->xdna;
- struct amdxdna_drm_query_hwctx *tmp;
struct amdxdna_client *tmp_client;
- struct amdxdna_hwctx *hwctx;
- unsigned long hwctx_id;
- bool overflow = false;
- u32 req_bytes = 0;
- u32 hw_i = 0;
- int ret = 0;
- int idx;
+ int ret;
drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
- tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
- if (!tmp)
+ array_args.element_size = sizeof(struct amdxdna_drm_query_hwctx);
+ array_args.buffer = args->buffer;
+ array_args.num_element = args->buffer_size / array_args.element_size;
+ list_for_each_entry(tmp_client, &xdna->client_list, node) {
+ ret = amdxdna_hwctx_walk(tmp_client, &array_args,
+ aie2_hwctx_status_cb);
+ if (ret)
+ break;
+ }
+
+ args->buffer_size -= (u32)(array_args.buffer - args->buffer);
+ return 0;
+}
+
+static int aie2_query_resource_info(struct amdxdna_client *client,
+ struct amdxdna_drm_get_info *args)
+{
+ struct amdxdna_drm_get_resource_info res_info;
+ const struct amdxdna_dev_priv *priv;
+ struct amdxdna_dev_hdl *ndev;
+ struct amdxdna_dev *xdna;
+
+ xdna = client->xdna;
+ ndev = xdna->dev_handle;
+ priv = ndev->priv;
+
+ res_info.npu_clk_max = priv->dpm_clk_tbl[ndev->max_dpm_level].hclk;
+ res_info.npu_tops_max = ndev->max_tops;
+ res_info.npu_task_max = priv->hwctx_limit;
+ res_info.npu_tops_curr = ndev->curr_tops;
+ res_info.npu_task_curr = ndev->hwctx_num;
+
+ if (copy_to_user(u64_to_user_ptr(args->buffer), &res_info, sizeof(res_info)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int aie2_fill_hwctx_map(struct amdxdna_hwctx *hwctx, void *arg)
+{
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+ u32 *map = arg;
+
+ if (hwctx->fw_ctx_id >= xdna->dev_handle->priv->hwctx_limit) {
+ XDNA_ERR(xdna, "Invalid fw ctx id %d/%d ", hwctx->fw_ctx_id,
+ xdna->dev_handle->priv->hwctx_limit);
+ return -EINVAL;
+ }
+
+ map[hwctx->fw_ctx_id] = hwctx->id;
+ return 0;
+}
+
+static int aie2_get_telemetry(struct amdxdna_client *client,
+ struct amdxdna_drm_get_info *args)
+{
+ struct amdxdna_drm_query_telemetry_header *header __free(kfree) = NULL;
+ u32 telemetry_data_sz, header_sz, elem_num;
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_client *tmp_client;
+ int ret;
+
+ elem_num = xdna->dev_handle->priv->hwctx_limit;
+ header_sz = struct_size(header, map, elem_num);
+ if (args->buffer_size <= header_sz) {
+ XDNA_ERR(xdna, "Invalid buffer size");
+ return -EINVAL;
+ }
+
+ telemetry_data_sz = args->buffer_size - header_sz;
+ if (telemetry_data_sz > SZ_4M) {
+ XDNA_ERR(xdna, "Buffer size is too big, %d", telemetry_data_sz);
+ return -EINVAL;
+ }
+
+ header = kzalloc(header_sz, GFP_KERNEL);
+ if (!header)
return -ENOMEM;
- buf = u64_to_user_ptr(args->buffer);
+ if (copy_from_user(header, u64_to_user_ptr(args->buffer), sizeof(*header))) {
+ XDNA_ERR(xdna, "Failed to copy telemetry header from user");
+ return -EFAULT;
+ }
+
+ header->map_num_elements = elem_num;
list_for_each_entry(tmp_client, &xdna->client_list, node) {
- idx = srcu_read_lock(&tmp_client->hwctx_srcu);
- amdxdna_for_each_hwctx(tmp_client, hwctx_id, hwctx) {
- req_bytes += sizeof(*tmp);
- if (args->buffer_size < req_bytes) {
- /* Continue iterating to get the required size */
- overflow = true;
- continue;
- }
-
- memset(tmp, 0, sizeof(*tmp));
- tmp->pid = tmp_client->pid;
- tmp->context_id = hwctx->id;
- tmp->start_col = hwctx->start_col;
- tmp->num_col = hwctx->num_col;
- tmp->command_submissions = hwctx->priv->seq;
- tmp->command_completions = hwctx->priv->completed;
-
- if (copy_to_user(&buf[hw_i], tmp, sizeof(*tmp))) {
- ret = -EFAULT;
- srcu_read_unlock(&tmp_client->hwctx_srcu, idx);
- goto out;
- }
- hw_i++;
- }
- srcu_read_unlock(&tmp_client->hwctx_srcu, idx);
+ ret = amdxdna_hwctx_walk(tmp_client, &header->map,
+ aie2_fill_hwctx_map);
+ if (ret)
+ return ret;
}
- if (overflow) {
- XDNA_ERR(xdna, "Invalid buffer size. Given: %u Need: %u.",
- args->buffer_size, req_bytes);
- ret = -EINVAL;
+ ret = aie2_query_telemetry(xdna->dev_handle,
+ u64_to_user_ptr(args->buffer + header_sz),
+ telemetry_data_sz, header);
+ if (ret) {
+ XDNA_ERR(xdna, "Query telemetry failed ret %d", ret);
+ return ret;
}
-out:
- kfree(tmp);
- args->buffer_size = req_bytes;
- return ret;
+ if (copy_to_user(u64_to_user_ptr(args->buffer), header, header_sz)) {
+ XDNA_ERR(xdna, "Copy header failed");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int aie2_get_preempt_state(struct amdxdna_client *client,
+ struct amdxdna_drm_get_info *args)
+{
+ struct amdxdna_drm_attribute_state state = {};
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_dev_hdl *ndev;
+
+ ndev = xdna->dev_handle;
+ if (args->param == DRM_AMDXDNA_GET_FORCE_PREEMPT_STATE)
+ state.state = ndev->force_preempt_enabled;
+ else if (args->param == DRM_AMDXDNA_GET_FRAME_BOUNDARY_PREEMPT_STATE)
+ state.state = ndev->frame_boundary_preempt;
+
+ if (copy_to_user(u64_to_user_ptr(args->buffer), &state, sizeof(state)))
+ return -EFAULT;
+
+ return 0;
}
static int aie2_get_info(struct amdxdna_client *client, struct amdxdna_drm_get_info *args)
@@ -822,6 +963,10 @@ static int aie2_get_info(struct amdxdna_client *client, struct amdxdna_drm_get_i
if (!drm_dev_enter(&xdna->ddev, &idx))
return -ENODEV;
+ ret = amdxdna_pm_resume_get(xdna);
+ if (ret)
+ goto dev_exit;
+
switch (args->param) {
case DRM_AMDXDNA_QUERY_AIE_STATUS:
ret = aie2_get_aie_status(client, args);
@@ -844,12 +989,93 @@ static int aie2_get_info(struct amdxdna_client *client, struct amdxdna_drm_get_i
case DRM_AMDXDNA_GET_POWER_MODE:
ret = aie2_get_power_mode(client, args);
break;
+ case DRM_AMDXDNA_QUERY_TELEMETRY:
+ ret = aie2_get_telemetry(client, args);
+ break;
+ case DRM_AMDXDNA_QUERY_RESOURCE_INFO:
+ ret = aie2_query_resource_info(client, args);
+ break;
+ case DRM_AMDXDNA_GET_FORCE_PREEMPT_STATE:
+ case DRM_AMDXDNA_GET_FRAME_BOUNDARY_PREEMPT_STATE:
+ ret = aie2_get_preempt_state(client, args);
+ break;
default:
XDNA_ERR(xdna, "Not supported request parameter %u", args->param);
ret = -EOPNOTSUPP;
}
+
+ amdxdna_pm_suspend_put(xdna);
XDNA_DBG(xdna, "Got param %d", args->param);
+dev_exit:
+ drm_dev_exit(idx);
+ return ret;
+}
+
+static int aie2_query_ctx_status_array(struct amdxdna_client *client,
+ struct amdxdna_drm_get_array *args)
+{
+ struct amdxdna_drm_get_array array_args;
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_client *tmp_client;
+ int ret;
+
+ drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
+
+ if (args->element_size > SZ_4K || args->num_element > SZ_1K) {
+ XDNA_DBG(xdna, "Invalid element size %d or number of element %d",
+ args->element_size, args->num_element);
+ return -EINVAL;
+ }
+
+ array_args.element_size = min(args->element_size,
+ sizeof(struct amdxdna_drm_hwctx_entry));
+ array_args.buffer = args->buffer;
+ array_args.num_element = args->num_element * args->element_size /
+ array_args.element_size;
+ list_for_each_entry(tmp_client, &xdna->client_list, node) {
+ ret = amdxdna_hwctx_walk(tmp_client, &array_args,
+ aie2_hwctx_status_cb);
+ if (ret)
+ break;
+ }
+
+ args->element_size = array_args.element_size;
+ args->num_element = (u32)((array_args.buffer - args->buffer) /
+ args->element_size);
+
+ return 0;
+}
+
+static int aie2_get_array(struct amdxdna_client *client,
+ struct amdxdna_drm_get_array *args)
+{
+ struct amdxdna_dev *xdna = client->xdna;
+ int ret, idx;
+
+ if (!drm_dev_enter(&xdna->ddev, &idx))
+ return -ENODEV;
+
+ ret = amdxdna_pm_resume_get(xdna);
+ if (ret)
+ goto dev_exit;
+
+ switch (args->param) {
+ case DRM_AMDXDNA_HW_CONTEXT_ALL:
+ ret = aie2_query_ctx_status_array(client, args);
+ break;
+ case DRM_AMDXDNA_HW_LAST_ASYNC_ERR:
+ ret = aie2_get_array_async_error(xdna->dev_handle, args);
+ break;
+ default:
+ XDNA_ERR(xdna, "Not supported request parameter %u", args->param);
+ ret = -EOPNOTSUPP;
+ }
+
+ amdxdna_pm_suspend_put(xdna);
+ XDNA_DBG(xdna, "Got param %d", args->param);
+
+dev_exit:
drm_dev_exit(idx);
return ret;
}
@@ -879,6 +1105,38 @@ static int aie2_set_power_mode(struct amdxdna_client *client,
return aie2_pm_set_mode(xdna->dev_handle, power_mode);
}
+static int aie2_set_preempt_state(struct amdxdna_client *client,
+ struct amdxdna_drm_set_state *args)
+{
+ struct amdxdna_dev_hdl *ndev = client->xdna->dev_handle;
+ struct amdxdna_drm_attribute_state state;
+ u32 val;
+ int ret;
+
+ if (copy_from_user(&state, u64_to_user_ptr(args->buffer), sizeof(state)))
+ return -EFAULT;
+
+ if (state.state > 1)
+ return -EINVAL;
+
+ if (XDNA_MBZ_DBG(client->xdna, state.pad, sizeof(state.pad)))
+ return -EINVAL;
+
+ if (args->param == DRM_AMDXDNA_SET_FORCE_PREEMPT) {
+ ndev->force_preempt_enabled = state.state;
+ } else if (args->param == DRM_AMDXDNA_SET_FRAME_BOUNDARY_PREEMPT) {
+ val = state.state;
+ ret = aie2_runtime_cfg(ndev, AIE2_RT_CFG_FRAME_BOUNDARY_PREEMPT,
+ &val);
+ if (ret)
+ return ret;
+
+ ndev->frame_boundary_preempt = state.state;
+ }
+
+ return 0;
+}
+
static int aie2_set_state(struct amdxdna_client *client,
struct amdxdna_drm_set_state *args)
{
@@ -888,32 +1146,42 @@ static int aie2_set_state(struct amdxdna_client *client,
if (!drm_dev_enter(&xdna->ddev, &idx))
return -ENODEV;
+ ret = amdxdna_pm_resume_get(xdna);
+ if (ret)
+ goto dev_exit;
+
switch (args->param) {
case DRM_AMDXDNA_SET_POWER_MODE:
ret = aie2_set_power_mode(client, args);
break;
+ case DRM_AMDXDNA_SET_FORCE_PREEMPT:
+ case DRM_AMDXDNA_SET_FRAME_BOUNDARY_PREEMPT:
+ ret = aie2_set_preempt_state(client, args);
+ break;
default:
XDNA_ERR(xdna, "Not supported request parameter %u", args->param);
ret = -EOPNOTSUPP;
break;
}
+ amdxdna_pm_suspend_put(xdna);
+dev_exit:
drm_dev_exit(idx);
return ret;
}
const struct amdxdna_dev_ops aie2_ops = {
- .init = aie2_init,
- .fini = aie2_fini,
- .resume = aie2_hw_start,
- .suspend = aie2_hw_stop,
- .get_aie_info = aie2_get_info,
- .set_aie_state = aie2_set_state,
- .hwctx_init = aie2_hwctx_init,
- .hwctx_fini = aie2_hwctx_fini,
- .hwctx_config = aie2_hwctx_config,
- .cmd_submit = aie2_cmd_submit,
+ .init = aie2_init,
+ .fini = aie2_fini,
+ .resume = aie2_hw_resume,
+ .suspend = aie2_hw_suspend,
+ .get_aie_info = aie2_get_info,
+ .set_aie_state = aie2_set_state,
+ .hwctx_init = aie2_hwctx_init,
+ .hwctx_fini = aie2_hwctx_fini,
+ .hwctx_config = aie2_hwctx_config,
+ .hwctx_sync_debug_bo = aie2_hwctx_sync_debug_bo,
+ .cmd_submit = aie2_cmd_submit,
.hmm_invalidate = aie2_hmm_invalidate,
- .hwctx_suspend = aie2_hwctx_suspend,
- .hwctx_resume = aie2_hwctx_resume,
+ .get_array = aie2_get_array,
};
diff --git a/drivers/accel/amdxdna/aie2_pci.h b/drivers/accel/amdxdna/aie2_pci.h
index 385914840eaa..a5f9c42155d1 100644
--- a/drivers/accel/amdxdna/aie2_pci.h
+++ b/drivers/accel/amdxdna/aie2_pci.h
@@ -110,12 +110,15 @@ struct aie_metadata {
enum rt_config_category {
AIE2_RT_CFG_INIT,
AIE2_RT_CFG_CLK_GATING,
+ AIE2_RT_CFG_FORCE_PREEMPT,
+ AIE2_RT_CFG_FRAME_BOUNDARY_PREEMPT,
};
struct rt_config {
u32 type;
u32 value;
u32 category;
+ unsigned long feature_mask;
};
struct dpm_clk_freq {
@@ -156,6 +159,19 @@ enum aie2_dev_status {
AIE2_DEV_START,
};
+struct aie2_exec_msg_ops {
+ int (*init_cu_req)(struct amdxdna_gem_obj *cmd_bo, void *req,
+ size_t *size, u32 *msg_op);
+ int (*init_dpu_req)(struct amdxdna_gem_obj *cmd_bo, void *req,
+ size_t *size, u32 *msg_op);
+ void (*init_chain_req)(void *req, u64 slot_addr, size_t size, u32 cmd_cnt);
+ int (*fill_cf_slot)(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t *size);
+ int (*fill_dpu_slot)(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t *size);
+ int (*fill_preempt_slot)(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t *size);
+ int (*fill_elf_slot)(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t *size);
+ u32 (*get_chain_msg_op)(u32 cmd_op);
+};
+
struct amdxdna_dev_hdl {
struct amdxdna_dev *xdna;
const struct amdxdna_dev_priv *priv;
@@ -173,6 +189,8 @@ struct amdxdna_dev_hdl {
u32 total_col;
struct aie_version version;
struct aie_metadata metadata;
+ unsigned long feature_mask;
+ struct aie2_exec_msg_ops *exec_msg_ops;
/* power management and clock*/
enum amdxdna_power_mode_type pw_mode;
@@ -182,6 +200,10 @@ struct amdxdna_dev_hdl {
u32 clk_gating;
u32 npuclk_freq;
u32 hclk_freq;
+ u32 max_tops;
+ u32 curr_tops;
+ u32 force_preempt_enabled;
+ u32 frame_boundary_preempt;
/* Mailbox and the management channel */
struct mailbox *mbox;
@@ -190,6 +212,8 @@ struct amdxdna_dev_hdl {
enum aie2_dev_status dev_status;
u32 hwctx_num;
+
+ struct amdxdna_async_error last_async_err;
};
#define DEFINE_BAR_OFFSET(reg_name, bar, reg_addr) \
@@ -204,12 +228,27 @@ struct aie2_hw_ops {
int (*set_dpm)(struct amdxdna_dev_hdl *ndev, u32 dpm_level);
};
+enum aie2_fw_feature {
+ AIE2_NPU_COMMAND,
+ AIE2_PREEMPT,
+ AIE2_FEATURE_MAX
+};
+
+struct aie2_fw_feature_tbl {
+ enum aie2_fw_feature feature;
+ u32 max_minor;
+ u32 min_minor;
+};
+
+#define AIE2_FEATURE_ON(ndev, feature) test_bit(feature, &(ndev)->feature_mask)
+
struct amdxdna_dev_priv {
const char *fw_path;
u64 protocol_major;
u64 protocol_minor;
const struct rt_config *rt_config;
const struct dpm_clk_freq *dpm_clk_tbl;
+ const struct aie2_fw_feature_tbl *fw_feature_tbl;
#define COL_ALIGN_NONE 0
#define COL_ALIGN_NATURE 1
@@ -217,6 +256,7 @@ struct amdxdna_dev_priv {
u32 mbox_dev_addr;
/* If mbox_size is 0, use BAR size. See MBOX_SIZE macro */
u32 mbox_size;
+ u32 hwctx_limit;
u32 sram_dev_addr;
struct aie2_bar_off_pair sram_offs[SRAM_MAX_INDEX];
struct aie2_bar_off_pair psp_regs_off[PSP_MAX_REGS];
@@ -234,6 +274,7 @@ extern const struct dpm_clk_freq npu1_dpm_clk_table[];
extern const struct dpm_clk_freq npu4_dpm_clk_table[];
extern const struct rt_config npu1_default_rt_cfg[];
extern const struct rt_config npu4_default_rt_cfg[];
+extern const struct aie2_fw_feature_tbl npu4_fw_feature_table[];
/* aie2_smu.c */
int aie2_smu_init(struct amdxdna_dev_hdl *ndev);
@@ -253,10 +294,12 @@ void aie2_psp_stop(struct psp_device *psp);
/* aie2_error.c */
int aie2_error_async_events_alloc(struct amdxdna_dev_hdl *ndev);
void aie2_error_async_events_free(struct amdxdna_dev_hdl *ndev);
-int aie2_error_async_events_send(struct amdxdna_dev_hdl *ndev);
int aie2_error_async_msg_thread(void *data);
+int aie2_get_array_async_error(struct amdxdna_dev_hdl *ndev,
+ struct amdxdna_drm_get_array *args);
/* aie2_message.c */
+void aie2_msg_init(struct amdxdna_dev_hdl *ndev);
int aie2_suspend_fw(struct amdxdna_dev_hdl *ndev);
int aie2_resume_fw(struct amdxdna_dev_hdl *ndev);
int aie2_set_runtime_cfg(struct amdxdna_dev_hdl *ndev, u32 type, u64 value);
@@ -270,9 +313,13 @@ int aie2_create_context(struct amdxdna_dev_hdl *ndev, struct amdxdna_hwctx *hwct
int aie2_destroy_context(struct amdxdna_dev_hdl *ndev, struct amdxdna_hwctx *hwctx);
int aie2_map_host_buf(struct amdxdna_dev_hdl *ndev, u32 context_id, u64 addr, u64 size);
int aie2_query_status(struct amdxdna_dev_hdl *ndev, char __user *buf, u32 size, u32 *cols_filled);
+int aie2_query_telemetry(struct amdxdna_dev_hdl *ndev,
+ char __user *buf, u32 size,
+ struct amdxdna_drm_query_telemetry_header *header);
int aie2_register_asyn_event_msg(struct amdxdna_dev_hdl *ndev, dma_addr_t addr, u32 size,
void *handle, int (*cb)(void*, void __iomem *, size_t));
-int aie2_config_cu(struct amdxdna_hwctx *hwctx);
+int aie2_config_cu(struct amdxdna_hwctx *hwctx,
+ int (*notify_cb)(void *, void __iomem *, size_t));
int aie2_execbuf(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job,
int (*notify_cb)(void *, void __iomem *, size_t));
int aie2_cmdlist_single_execbuf(struct amdxdna_hwctx *hwctx,
@@ -283,15 +330,17 @@ int aie2_cmdlist_multi_execbuf(struct amdxdna_hwctx *hwctx,
int (*notify_cb)(void *, void __iomem *, size_t));
int aie2_sync_bo(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job,
int (*notify_cb)(void *, void __iomem *, size_t));
+int aie2_config_debug_bo(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job,
+ int (*notify_cb)(void *, void __iomem *, size_t));
/* aie2_hwctx.c */
int aie2_hwctx_init(struct amdxdna_hwctx *hwctx);
void aie2_hwctx_fini(struct amdxdna_hwctx *hwctx);
int aie2_hwctx_config(struct amdxdna_hwctx *hwctx, u32 type, u64 value, void *buf, u32 size);
-void aie2_hwctx_suspend(struct amdxdna_hwctx *hwctx);
-void aie2_hwctx_resume(struct amdxdna_hwctx *hwctx);
+int aie2_hwctx_sync_debug_bo(struct amdxdna_hwctx *hwctx, u32 debug_bo_hdl);
+void aie2_hwctx_suspend(struct amdxdna_client *client);
+int aie2_hwctx_resume(struct amdxdna_client *client);
int aie2_cmd_submit(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, u64 *seq);
void aie2_hmm_invalidate(struct amdxdna_gem_obj *abo, unsigned long cur_seq);
-void aie2_restart_ctx(struct amdxdna_client *client);
#endif /* _AIE2_PCI_H_ */
diff --git a/drivers/accel/amdxdna/aie2_psp.c b/drivers/accel/amdxdna/aie2_psp.c
index dc3a072ce3b6..f28a060a8810 100644
--- a/drivers/accel/amdxdna/aie2_psp.c
+++ b/drivers/accel/amdxdna/aie2_psp.c
@@ -126,8 +126,8 @@ struct psp_device *aie2m_psp_create(struct drm_device *ddev, struct psp_config *
psp->ddev = ddev;
memcpy(psp->psp_regs, conf->psp_regs, sizeof(psp->psp_regs));
- psp->fw_buf_sz = ALIGN(conf->fw_size, PSP_FW_ALIGN) + PSP_FW_ALIGN;
- psp->fw_buffer = drmm_kmalloc(ddev, psp->fw_buf_sz, GFP_KERNEL);
+ psp->fw_buf_sz = ALIGN(conf->fw_size, PSP_FW_ALIGN);
+ psp->fw_buffer = drmm_kmalloc(ddev, psp->fw_buf_sz + PSP_FW_ALIGN, GFP_KERNEL);
if (!psp->fw_buffer) {
drm_err(ddev, "no memory for fw buffer");
return NULL;
diff --git a/drivers/accel/amdxdna/aie2_smu.c b/drivers/accel/amdxdna/aie2_smu.c
index d303701b0ded..bd94ee96c2bc 100644
--- a/drivers/accel/amdxdna/aie2_smu.c
+++ b/drivers/accel/amdxdna/aie2_smu.c
@@ -11,6 +11,7 @@
#include "aie2_pci.h"
#include "amdxdna_pci_drv.h"
+#include "amdxdna_pm.h"
#define SMU_RESULT_OK 1
@@ -22,6 +23,13 @@
#define AIE2_SMU_SET_SOFT_DPMLEVEL 0x7
#define AIE2_SMU_SET_HARD_DPMLEVEL 0x8
+#define NPU4_DPM_TOPS(ndev, dpm_level) \
+({ \
+ typeof(ndev) _ndev = ndev; \
+ (4096 * (_ndev)->total_col * \
+ (_ndev)->priv->dpm_clk_tbl[dpm_level].hclk / 1000000); \
+})
+
static int aie2_smu_exec(struct amdxdna_dev_hdl *ndev, u32 reg_cmd,
u32 reg_arg, u32 *out)
{
@@ -59,12 +67,16 @@ int npu1_set_dpm(struct amdxdna_dev_hdl *ndev, u32 dpm_level)
u32 freq;
int ret;
+ ret = amdxdna_pm_resume_get(ndev->xdna);
+ if (ret)
+ return ret;
+
ret = aie2_smu_exec(ndev, AIE2_SMU_SET_MPNPUCLK_FREQ,
ndev->priv->dpm_clk_tbl[dpm_level].npuclk, &freq);
if (ret) {
XDNA_ERR(ndev->xdna, "Set npu clock to %d failed, ret %d\n",
ndev->priv->dpm_clk_tbl[dpm_level].npuclk, ret);
- return ret;
+ goto suspend_put;
}
ndev->npuclk_freq = freq;
@@ -73,49 +85,78 @@ int npu1_set_dpm(struct amdxdna_dev_hdl *ndev, u32 dpm_level)
if (ret) {
XDNA_ERR(ndev->xdna, "Set h clock to %d failed, ret %d\n",
ndev->priv->dpm_clk_tbl[dpm_level].hclk, ret);
- return ret;
+ goto suspend_put;
}
+
+ amdxdna_pm_suspend_put(ndev->xdna);
ndev->hclk_freq = freq;
ndev->dpm_level = dpm_level;
+ ndev->max_tops = 2 * ndev->total_col;
+ ndev->curr_tops = ndev->max_tops * freq / 1028;
XDNA_DBG(ndev->xdna, "MP-NPU clock %d, H clock %d\n",
ndev->npuclk_freq, ndev->hclk_freq);
return 0;
+
+suspend_put:
+ amdxdna_pm_suspend_put(ndev->xdna);
+ return ret;
}
int npu4_set_dpm(struct amdxdna_dev_hdl *ndev, u32 dpm_level)
{
int ret;
+ ret = amdxdna_pm_resume_get(ndev->xdna);
+ if (ret)
+ return ret;
+
ret = aie2_smu_exec(ndev, AIE2_SMU_SET_HARD_DPMLEVEL, dpm_level, NULL);
if (ret) {
XDNA_ERR(ndev->xdna, "Set hard dpm level %d failed, ret %d ",
dpm_level, ret);
- return ret;
+ goto suspend_put;
}
ret = aie2_smu_exec(ndev, AIE2_SMU_SET_SOFT_DPMLEVEL, dpm_level, NULL);
if (ret) {
XDNA_ERR(ndev->xdna, "Set soft dpm level %d failed, ret %d",
dpm_level, ret);
- return ret;
+ goto suspend_put;
}
+ amdxdna_pm_suspend_put(ndev->xdna);
ndev->npuclk_freq = ndev->priv->dpm_clk_tbl[dpm_level].npuclk;
ndev->hclk_freq = ndev->priv->dpm_clk_tbl[dpm_level].hclk;
ndev->dpm_level = dpm_level;
+ ndev->max_tops = NPU4_DPM_TOPS(ndev, ndev->max_dpm_level);
+ ndev->curr_tops = NPU4_DPM_TOPS(ndev, dpm_level);
XDNA_DBG(ndev->xdna, "MP-NPU clock %d, H clock %d\n",
ndev->npuclk_freq, ndev->hclk_freq);
return 0;
+
+suspend_put:
+ amdxdna_pm_suspend_put(ndev->xdna);
+ return ret;
}
int aie2_smu_init(struct amdxdna_dev_hdl *ndev)
{
int ret;
+ /*
+ * Failing to set power off indicates an unrecoverable hardware or
+ * firmware error.
+ */
+ ret = aie2_smu_exec(ndev, AIE2_SMU_POWER_OFF, 0, NULL);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Access power failed, ret %d", ret);
+ return ret;
+ }
+
ret = aie2_smu_exec(ndev, AIE2_SMU_POWER_ON, 0, NULL);
if (ret) {
XDNA_ERR(ndev->xdna, "Power on failed, ret %d", ret);
diff --git a/drivers/accel/amdxdna/amdxdna_ctx.c b/drivers/accel/amdxdna/amdxdna_ctx.c
index be073224bd69..d17aef89a0ad 100644
--- a/drivers/accel/amdxdna/amdxdna_ctx.c
+++ b/drivers/accel/amdxdna/amdxdna_ctx.c
@@ -60,32 +60,6 @@ static struct dma_fence *amdxdna_fence_create(struct amdxdna_hwctx *hwctx)
return &fence->base;
}
-void amdxdna_hwctx_suspend(struct amdxdna_client *client)
-{
- struct amdxdna_dev *xdna = client->xdna;
- struct amdxdna_hwctx *hwctx;
- unsigned long hwctx_id;
-
- drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
- mutex_lock(&client->hwctx_lock);
- amdxdna_for_each_hwctx(client, hwctx_id, hwctx)
- xdna->dev_info->ops->hwctx_suspend(hwctx);
- mutex_unlock(&client->hwctx_lock);
-}
-
-void amdxdna_hwctx_resume(struct amdxdna_client *client)
-{
- struct amdxdna_dev *xdna = client->xdna;
- struct amdxdna_hwctx *hwctx;
- unsigned long hwctx_id;
-
- drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
- mutex_lock(&client->hwctx_lock);
- amdxdna_for_each_hwctx(client, hwctx_id, hwctx)
- xdna->dev_info->ops->hwctx_resume(hwctx);
- mutex_unlock(&client->hwctx_lock);
-}
-
static void amdxdna_hwctx_destroy_rcu(struct amdxdna_hwctx *hwctx,
struct srcu_struct *ss)
{
@@ -94,14 +68,30 @@ static void amdxdna_hwctx_destroy_rcu(struct amdxdna_hwctx *hwctx,
synchronize_srcu(ss);
/* At this point, user is not able to submit new commands */
- mutex_lock(&xdna->dev_lock);
xdna->dev_info->ops->hwctx_fini(hwctx);
- mutex_unlock(&xdna->dev_lock);
kfree(hwctx->name);
kfree(hwctx);
}
+int amdxdna_hwctx_walk(struct amdxdna_client *client, void *arg,
+ int (*walk)(struct amdxdna_hwctx *hwctx, void *arg))
+{
+ struct amdxdna_hwctx *hwctx;
+ unsigned long hwctx_id;
+ int ret = 0, idx;
+
+ idx = srcu_read_lock(&client->hwctx_srcu);
+ amdxdna_for_each_hwctx(client, hwctx_id, hwctx) {
+ ret = walk(hwctx, arg);
+ if (ret)
+ break;
+ }
+ srcu_read_unlock(&client->hwctx_srcu, idx);
+
+ return ret;
+}
+
void *amdxdna_cmd_get_payload(struct amdxdna_gem_obj *abo, u32 *size)
{
struct amdxdna_cmd *cmd = abo->mem.kva;
@@ -123,14 +113,14 @@ void *amdxdna_cmd_get_payload(struct amdxdna_gem_obj *abo, u32 *size)
return &cmd->data[num_masks];
}
-int amdxdna_cmd_get_cu_idx(struct amdxdna_gem_obj *abo)
+u32 amdxdna_cmd_get_cu_idx(struct amdxdna_gem_obj *abo)
{
struct amdxdna_cmd *cmd = abo->mem.kva;
u32 num_masks, i;
u32 *cu_mask;
if (amdxdna_cmd_get_op(abo) == ERT_CMD_CHAIN)
- return -1;
+ return INVALID_CU_IDX;
num_masks = 1 + FIELD_GET(AMDXDNA_CMD_EXTRA_CU_MASK, cmd->header);
cu_mask = cmd->data;
@@ -139,7 +129,7 @@ int amdxdna_cmd_get_cu_idx(struct amdxdna_gem_obj *abo)
return ffs(cu_mask[i]) - 1;
}
- return -1;
+ return INVALID_CU_IDX;
}
/*
@@ -152,16 +142,12 @@ void amdxdna_hwctx_remove_all(struct amdxdna_client *client)
struct amdxdna_hwctx *hwctx;
unsigned long hwctx_id;
- mutex_lock(&client->hwctx_lock);
amdxdna_for_each_hwctx(client, hwctx_id, hwctx) {
XDNA_DBG(client->xdna, "PID %d close HW context %d",
client->pid, hwctx->id);
xa_erase(&client->hwctx_xa, hwctx->id);
- mutex_unlock(&client->hwctx_lock);
amdxdna_hwctx_destroy_rcu(hwctx, &client->hwctx_srcu);
- mutex_lock(&client->hwctx_lock);
}
- mutex_unlock(&client->hwctx_lock);
}
int amdxdna_drm_create_hwctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
@@ -175,19 +161,14 @@ int amdxdna_drm_create_hwctx_ioctl(struct drm_device *dev, void *data, struct dr
if (args->ext || args->ext_flags)
return -EINVAL;
- if (!drm_dev_enter(dev, &idx))
- return -ENODEV;
-
hwctx = kzalloc(sizeof(*hwctx), GFP_KERNEL);
- if (!hwctx) {
- ret = -ENOMEM;
- goto exit;
- }
+ if (!hwctx)
+ return -ENOMEM;
if (copy_from_user(&hwctx->qos, u64_to_user_ptr(args->qos_p), sizeof(hwctx->qos))) {
XDNA_ERR(xdna, "Access QoS info failed");
- ret = -EFAULT;
- goto free_hwctx;
+ kfree(hwctx);
+ return -EFAULT;
}
hwctx->client = client;
@@ -195,30 +176,36 @@ int amdxdna_drm_create_hwctx_ioctl(struct drm_device *dev, void *data, struct dr
hwctx->num_tiles = args->num_tiles;
hwctx->mem_size = args->mem_size;
hwctx->max_opc = args->max_opc;
- ret = xa_alloc_cyclic(&client->hwctx_xa, &hwctx->id, hwctx,
- XA_LIMIT(AMDXDNA_INVALID_CTX_HANDLE + 1, MAX_HWCTX_ID),
- &client->next_hwctxid, GFP_KERNEL);
- if (ret < 0) {
- XDNA_ERR(xdna, "Allocate hwctx ID failed, ret %d", ret);
+
+ guard(mutex)(&xdna->dev_lock);
+
+ if (!drm_dev_enter(dev, &idx)) {
+ ret = -ENODEV;
goto free_hwctx;
}
- hwctx->name = kasprintf(GFP_KERNEL, "hwctx.%d.%d", client->pid, hwctx->id);
+ ret = xdna->dev_info->ops->hwctx_init(hwctx);
+ if (ret) {
+ XDNA_ERR(xdna, "Init hwctx failed, ret %d", ret);
+ goto dev_exit;
+ }
+
+ hwctx->name = kasprintf(GFP_KERNEL, "hwctx.%d.%d", client->pid, hwctx->fw_ctx_id);
if (!hwctx->name) {
ret = -ENOMEM;
- goto rm_id;
+ goto fini_hwctx;
}
- mutex_lock(&xdna->dev_lock);
- ret = xdna->dev_info->ops->hwctx_init(hwctx);
- if (ret) {
- mutex_unlock(&xdna->dev_lock);
- XDNA_ERR(xdna, "Init hwctx failed, ret %d", ret);
+ ret = xa_alloc_cyclic(&client->hwctx_xa, &hwctx->id, hwctx,
+ XA_LIMIT(AMDXDNA_INVALID_CTX_HANDLE + 1, MAX_HWCTX_ID),
+ &client->next_hwctxid, GFP_KERNEL);
+ if (ret < 0) {
+ XDNA_ERR(xdna, "Allocate hwctx ID failed, ret %d", ret);
goto free_name;
}
+
args->handle = hwctx->id;
args->syncobj_handle = hwctx->syncobj_hdl;
- mutex_unlock(&xdna->dev_lock);
atomic64_set(&hwctx->job_submit_cnt, 0);
atomic64_set(&hwctx->job_free_cnt, 0);
@@ -228,12 +215,12 @@ int amdxdna_drm_create_hwctx_ioctl(struct drm_device *dev, void *data, struct dr
free_name:
kfree(hwctx->name);
-rm_id:
- xa_erase(&client->hwctx_xa, hwctx->id);
+fini_hwctx:
+ xdna->dev_info->ops->hwctx_fini(hwctx);
+dev_exit:
+ drm_dev_exit(idx);
free_hwctx:
kfree(hwctx);
-exit:
- drm_dev_exit(idx);
return ret;
}
@@ -251,6 +238,7 @@ int amdxdna_drm_destroy_hwctx_ioctl(struct drm_device *dev, void *data, struct d
if (!drm_dev_enter(dev, &idx))
return -ENODEV;
+ mutex_lock(&xdna->dev_lock);
hwctx = xa_erase(&client->hwctx_xa, args->handle);
if (!hwctx) {
ret = -EINVAL;
@@ -267,6 +255,7 @@ int amdxdna_drm_destroy_hwctx_ioctl(struct drm_device *dev, void *data, struct d
XDNA_DBG(xdna, "PID %d destroyed HW context %d", client->pid, args->handle);
out:
+ mutex_unlock(&xdna->dev_lock);
drm_dev_exit(idx);
return ret;
}
@@ -339,6 +328,38 @@ unlock_srcu:
return ret;
}
+int amdxdna_hwctx_sync_debug_bo(struct amdxdna_client *client, u32 debug_bo_hdl)
+{
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_hwctx *hwctx;
+ struct amdxdna_gem_obj *abo;
+ struct drm_gem_object *gobj;
+ int ret, idx;
+
+ if (!xdna->dev_info->ops->hwctx_sync_debug_bo)
+ return -EOPNOTSUPP;
+
+ gobj = drm_gem_object_lookup(client->filp, debug_bo_hdl);
+ if (!gobj)
+ return -EINVAL;
+
+ abo = to_xdna_obj(gobj);
+ guard(mutex)(&xdna->dev_lock);
+ idx = srcu_read_lock(&client->hwctx_srcu);
+ hwctx = xa_load(&client->hwctx_xa, abo->assigned_hwctx);
+ if (!hwctx) {
+ ret = -EINVAL;
+ goto unlock_srcu;
+ }
+
+ ret = xdna->dev_info->ops->hwctx_sync_debug_bo(hwctx, debug_bo_hdl);
+
+unlock_srcu:
+ srcu_read_unlock(&client->hwctx_srcu, idx);
+ drm_gem_object_put(gobj);
+ return ret;
+}
+
static void
amdxdna_arg_bos_put(struct amdxdna_sched_job *job)
{
@@ -401,9 +422,11 @@ void amdxdna_sched_job_cleanup(struct amdxdna_sched_job *job)
trace_amdxdna_debug_point(job->hwctx->name, job->seq, "job release");
amdxdna_arg_bos_put(job);
amdxdna_gem_put_obj(job->cmd_bo);
+ dma_fence_put(job->fence);
}
int amdxdna_cmd_submit(struct amdxdna_client *client,
+ struct amdxdna_drv_cmd *drv_cmd,
u32 cmd_bo_hdl, u32 *arg_bo_hdls, u32 arg_bo_cnt,
u32 hwctx_hdl, u64 *seq)
{
@@ -417,6 +440,8 @@ int amdxdna_cmd_submit(struct amdxdna_client *client,
if (!job)
return -ENOMEM;
+ job->drv_cmd = drv_cmd;
+
if (cmd_bo_hdl != AMDXDNA_INVALID_BO_HANDLE) {
job->cmd_bo = amdxdna_gem_get_obj(client, cmd_bo_hdl, AMDXDNA_BO_CMD);
if (!job->cmd_bo) {
@@ -424,8 +449,6 @@ int amdxdna_cmd_submit(struct amdxdna_client *client,
ret = -EINVAL;
goto free_job;
}
- } else {
- job->cmd_bo = NULL;
}
ret = amdxdna_arg_bos_lookup(client, job, arg_bo_hdls, arg_bo_cnt);
@@ -443,11 +466,6 @@ int amdxdna_cmd_submit(struct amdxdna_client *client,
goto unlock_srcu;
}
- if (hwctx->status != HWCTX_STAT_READY) {
- XDNA_ERR(xdna, "HW Context is not ready");
- ret = -EINVAL;
- goto unlock_srcu;
- }
job->hwctx = hwctx;
job->mm = current->mm;
@@ -524,7 +542,7 @@ static int amdxdna_drm_submit_execbuf(struct amdxdna_client *client,
}
}
- ret = amdxdna_cmd_submit(client, cmd_bo_hdl, arg_bo_hdls,
+ ret = amdxdna_cmd_submit(client, NULL, cmd_bo_hdl, arg_bo_hdls,
args->arg_count, args->hwctx, &args->seq);
if (ret)
XDNA_DBG(xdna, "Submit cmds failed, ret %d", ret);
diff --git a/drivers/accel/amdxdna/amdxdna_ctx.h b/drivers/accel/amdxdna/amdxdna_ctx.h
index f0a4a8586d85..b6151244d64f 100644
--- a/drivers/accel/amdxdna/amdxdna_ctx.h
+++ b/drivers/accel/amdxdna/amdxdna_ctx.h
@@ -13,9 +13,12 @@
struct amdxdna_hwctx_priv;
enum ert_cmd_opcode {
- ERT_START_CU = 0,
- ERT_CMD_CHAIN = 19,
- ERT_START_NPU = 20,
+ ERT_START_CU = 0,
+ ERT_CMD_CHAIN = 19,
+ ERT_START_NPU = 20,
+ ERT_START_NPU_PREEMPT = 21,
+ ERT_START_NPU_PREEMPT_ELF = 22,
+ ERT_INVALID_CMD = ~0U,
};
enum ert_cmd_state {
@@ -54,6 +57,21 @@ struct amdxdna_cmd_chain {
u64 data[] __counted_by(command_count);
};
+/*
+ * Interpretation of the beginning of data payload for ERT_START_NPU_PREEMPT in
+ * amdxdna_cmd. The rest of the payload in amdxdna_cmd is regular kernel args.
+ */
+struct amdxdna_cmd_preempt_data {
+ u64 inst_buf; /* instruction buffer address */
+ u64 save_buf; /* save buffer address */
+ u64 restore_buf; /* restore buffer address */
+ u32 inst_size; /* size of instruction buffer in bytes */
+ u32 save_size; /* size of save buffer in bytes */
+ u32 restore_size; /* size of restore buffer in bytes */
+ u32 inst_prop_cnt; /* properties count */
+ u32 prop_args[]; /* properties and regular kernel arguments */
+};
+
/* Exec buffer command header format */
#define AMDXDNA_CMD_STATE GENMASK(3, 0)
#define AMDXDNA_CMD_EXTRA_CU_MASK GENMASK(11, 10)
@@ -64,6 +82,8 @@ struct amdxdna_cmd {
u32 data[];
};
+#define INVALID_CU_IDX (~0U)
+
struct amdxdna_hwctx {
struct amdxdna_client *client;
struct amdxdna_hwctx_priv *priv;
@@ -95,6 +115,17 @@ struct amdxdna_hwctx {
#define drm_job_to_xdna_job(j) \
container_of(j, struct amdxdna_sched_job, base)
+enum amdxdna_job_opcode {
+ SYNC_DEBUG_BO,
+ ATTACH_DEBUG_BO,
+ DETACH_DEBUG_BO,
+};
+
+struct amdxdna_drv_cmd {
+ enum amdxdna_job_opcode opcode;
+ u32 result;
+};
+
struct amdxdna_sched_job {
struct drm_sched_job base;
struct kref refcnt;
@@ -105,7 +136,9 @@ struct amdxdna_sched_job {
/* user can wait on this fence */
struct dma_fence *out_fence;
bool job_done;
+ bool job_timeout;
u64 seq;
+ struct amdxdna_drv_cmd *drv_cmd;
struct amdxdna_gem_obj *cmd_bo;
size_t bo_cnt;
struct drm_gem_object *bos[] __counted_by(bo_cnt);
@@ -137,21 +170,17 @@ amdxdna_cmd_get_state(struct amdxdna_gem_obj *abo)
}
void *amdxdna_cmd_get_payload(struct amdxdna_gem_obj *abo, u32 *size);
-int amdxdna_cmd_get_cu_idx(struct amdxdna_gem_obj *abo);
-
-static inline u32 amdxdna_hwctx_col_map(struct amdxdna_hwctx *hwctx)
-{
- return GENMASK(hwctx->start_col + hwctx->num_col - 1,
- hwctx->start_col);
-}
+u32 amdxdna_cmd_get_cu_idx(struct amdxdna_gem_obj *abo);
void amdxdna_sched_job_cleanup(struct amdxdna_sched_job *job);
void amdxdna_hwctx_remove_all(struct amdxdna_client *client);
-void amdxdna_hwctx_suspend(struct amdxdna_client *client);
-void amdxdna_hwctx_resume(struct amdxdna_client *client);
+int amdxdna_hwctx_walk(struct amdxdna_client *client, void *arg,
+ int (*walk)(struct amdxdna_hwctx *hwctx, void *arg));
+int amdxdna_hwctx_sync_debug_bo(struct amdxdna_client *client, u32 debug_bo_hdl);
int amdxdna_cmd_submit(struct amdxdna_client *client,
- u32 cmd_bo_hdls, u32 *arg_bo_hdls, u32 arg_bo_cnt,
+ struct amdxdna_drv_cmd *drv_cmd, u32 cmd_bo_hdls,
+ u32 *arg_bo_hdls, u32 arg_bo_cnt,
u32 hwctx_hdl, u64 *seq);
int amdxdna_cmd_wait(struct amdxdna_client *client, u32 hwctx_hdl,
diff --git a/drivers/accel/amdxdna/amdxdna_error.h b/drivers/accel/amdxdna/amdxdna_error.h
new file mode 100644
index 000000000000..c51de86ec12b
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_error.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2025, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _AMDXDNA_ERROR_H_
+#define _AMDXDNA_ERROR_H_
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+
+#define AMDXDNA_ERR_DRV_AIE 4
+#define AMDXDNA_ERR_SEV_CRITICAL 3
+#define AMDXDNA_ERR_CLASS_AIE 2
+
+#define AMDXDNA_ERR_NUM_MASK GENMASK_U64(15, 0)
+#define AMDXDNA_ERR_DRV_MASK GENMASK_U64(23, 16)
+#define AMDXDNA_ERR_SEV_MASK GENMASK_U64(31, 24)
+#define AMDXDNA_ERR_MOD_MASK GENMASK_U64(39, 32)
+#define AMDXDNA_ERR_CLASS_MASK GENMASK_U64(47, 40)
+
+enum amdxdna_error_num {
+ AMDXDNA_ERROR_NUM_AIE_SATURATION = 3,
+ AMDXDNA_ERROR_NUM_AIE_FP,
+ AMDXDNA_ERROR_NUM_AIE_STREAM,
+ AMDXDNA_ERROR_NUM_AIE_ACCESS,
+ AMDXDNA_ERROR_NUM_AIE_BUS,
+ AMDXDNA_ERROR_NUM_AIE_INSTRUCTION,
+ AMDXDNA_ERROR_NUM_AIE_ECC,
+ AMDXDNA_ERROR_NUM_AIE_LOCK,
+ AMDXDNA_ERROR_NUM_AIE_DMA,
+ AMDXDNA_ERROR_NUM_AIE_MEM_PARITY,
+ AMDXDNA_ERROR_NUM_UNKNOWN = 15,
+};
+
+enum amdxdna_error_module {
+ AMDXDNA_ERROR_MODULE_AIE_CORE = 3,
+ AMDXDNA_ERROR_MODULE_AIE_MEMORY,
+ AMDXDNA_ERROR_MODULE_AIE_SHIM,
+ AMDXDNA_ERROR_MODULE_AIE_NOC,
+ AMDXDNA_ERROR_MODULE_AIE_PL,
+ AMDXDNA_ERROR_MODULE_UNKNOWN = 8,
+};
+
+#define AMDXDNA_ERROR_ENCODE(err_num, err_mod) \
+ (FIELD_PREP(AMDXDNA_ERR_NUM_MASK, err_num) | \
+ FIELD_PREP_CONST(AMDXDNA_ERR_DRV_MASK, AMDXDNA_ERR_DRV_AIE) | \
+ FIELD_PREP_CONST(AMDXDNA_ERR_SEV_MASK, AMDXDNA_ERR_SEV_CRITICAL) | \
+ FIELD_PREP(AMDXDNA_ERR_MOD_MASK, err_mod) | \
+ FIELD_PREP_CONST(AMDXDNA_ERR_CLASS_MASK, AMDXDNA_ERR_CLASS_AIE))
+
+#define AMDXDNA_EXTRA_ERR_COL_MASK GENMASK_U64(7, 0)
+#define AMDXDNA_EXTRA_ERR_ROW_MASK GENMASK_U64(15, 8)
+
+#define AMDXDNA_EXTRA_ERR_ENCODE(row, col) \
+ (FIELD_PREP(AMDXDNA_EXTRA_ERR_COL_MASK, col) | \
+ FIELD_PREP(AMDXDNA_EXTRA_ERR_ROW_MASK, row))
+
+#endif /* _AMDXDNA_ERROR_H_ */
diff --git a/drivers/accel/amdxdna/amdxdna_gem.c b/drivers/accel/amdxdna/amdxdna_gem.c
index 26831ec69f89..dfa916eeb2d9 100644
--- a/drivers/accel/amdxdna/amdxdna_gem.c
+++ b/drivers/accel/amdxdna/amdxdna_gem.c
@@ -8,6 +8,7 @@
#include <drm/drm_device.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_print.h>
#include <drm/gpu_scheduler.h>
#include <linux/dma-buf.h>
#include <linux/dma-direct.h>
@@ -18,46 +19,86 @@
#include "amdxdna_ctx.h"
#include "amdxdna_gem.h"
#include "amdxdna_pci_drv.h"
+#include "amdxdna_ubuf.h"
#define XDNA_MAX_CMD_BO_SIZE SZ_32K
MODULE_IMPORT_NS("DMA_BUF");
static int
-amdxdna_gem_insert_node_locked(struct amdxdna_gem_obj *abo, bool use_vmap)
+amdxdna_gem_heap_alloc(struct amdxdna_gem_obj *abo)
{
struct amdxdna_client *client = abo->client;
struct amdxdna_dev *xdna = client->xdna;
struct amdxdna_mem *mem = &abo->mem;
+ struct amdxdna_gem_obj *heap;
u64 offset;
u32 align;
int ret;
+ mutex_lock(&client->mm_lock);
+
+ heap = client->dev_heap;
+ if (!heap) {
+ ret = -EINVAL;
+ goto unlock_out;
+ }
+
+ if (heap->mem.userptr == AMDXDNA_INVALID_ADDR) {
+ XDNA_ERR(xdna, "Invalid dev heap userptr");
+ ret = -EINVAL;
+ goto unlock_out;
+ }
+
+ if (mem->size == 0 || mem->size > heap->mem.size) {
+ XDNA_ERR(xdna, "Invalid dev bo size 0x%lx, limit 0x%lx",
+ mem->size, heap->mem.size);
+ ret = -EINVAL;
+ goto unlock_out;
+ }
+
align = 1 << max(PAGE_SHIFT, xdna->dev_info->dev_mem_buf_shift);
- ret = drm_mm_insert_node_generic(&abo->dev_heap->mm, &abo->mm_node,
+ ret = drm_mm_insert_node_generic(&heap->mm, &abo->mm_node,
mem->size, align,
0, DRM_MM_INSERT_BEST);
if (ret) {
XDNA_ERR(xdna, "Failed to alloc dev bo memory, ret %d", ret);
- return ret;
+ goto unlock_out;
}
mem->dev_addr = abo->mm_node.start;
- offset = mem->dev_addr - abo->dev_heap->mem.dev_addr;
- mem->userptr = abo->dev_heap->mem.userptr + offset;
- mem->pages = &abo->dev_heap->base.pages[offset >> PAGE_SHIFT];
- mem->nr_pages = mem->size >> PAGE_SHIFT;
-
- if (use_vmap) {
- mem->kva = vmap(mem->pages, mem->nr_pages, VM_MAP, PAGE_KERNEL);
- if (!mem->kva) {
- XDNA_ERR(xdna, "Failed to vmap");
- drm_mm_remove_node(&abo->mm_node);
- return -EFAULT;
- }
- }
+ offset = mem->dev_addr - heap->mem.dev_addr;
+ mem->userptr = heap->mem.userptr + offset;
+ mem->kva = heap->mem.kva + offset;
- return 0;
+ drm_gem_object_get(to_gobj(heap));
+
+unlock_out:
+ mutex_unlock(&client->mm_lock);
+
+ return ret;
+}
+
+static void
+amdxdna_gem_destroy_obj(struct amdxdna_gem_obj *abo)
+{
+ mutex_destroy(&abo->lock);
+ kfree(abo);
+}
+
+static void
+amdxdna_gem_heap_free(struct amdxdna_gem_obj *abo)
+{
+ struct amdxdna_gem_obj *heap;
+
+ mutex_lock(&abo->client->mm_lock);
+
+ drm_mm_remove_node(&abo->mm_node);
+
+ heap = abo->client->dev_heap;
+ drm_gem_object_put(to_gobj(heap));
+
+ mutex_unlock(&abo->client->mm_lock);
}
static bool amdxdna_hmm_invalidate(struct mmu_interval_notifier *mni,
@@ -213,6 +254,20 @@ free_map:
return ret;
}
+static void amdxdna_gem_dev_obj_free(struct drm_gem_object *gobj)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(gobj->dev);
+ struct amdxdna_gem_obj *abo = to_xdna_obj(gobj);
+
+ XDNA_DBG(xdna, "BO type %d xdna_addr 0x%llx", abo->type, abo->mem.dev_addr);
+ if (abo->pinned)
+ amdxdna_gem_unpin(abo);
+
+ amdxdna_gem_heap_free(abo);
+ drm_gem_object_release(gobj);
+ amdxdna_gem_destroy_obj(abo);
+}
+
static int amdxdna_insert_pages(struct amdxdna_gem_obj *abo,
struct vm_area_struct *vma)
{
@@ -243,7 +298,7 @@ static int amdxdna_insert_pages(struct amdxdna_gem_obj *abo,
vma->vm_private_data = NULL;
vma->vm_ops = NULL;
- ret = dma_buf_mmap(to_gobj(abo)->dma_buf, vma, 0);
+ ret = dma_buf_mmap(abo->dma_buf, vma, 0);
if (ret) {
XDNA_ERR(xdna, "Failed to mmap dma buf %d", ret);
return ret;
@@ -338,10 +393,45 @@ static const struct dma_buf_ops amdxdna_dmabuf_ops = {
.vunmap = drm_gem_dmabuf_vunmap,
};
+static int amdxdna_gem_obj_vmap(struct amdxdna_gem_obj *abo, void **vaddr)
+{
+ struct iosys_map map = IOSYS_MAP_INIT_VADDR(NULL);
+ int ret;
+
+ if (is_import_bo(abo))
+ ret = dma_buf_vmap_unlocked(abo->dma_buf, &map);
+ else
+ ret = drm_gem_vmap(to_gobj(abo), &map);
+
+ *vaddr = map.vaddr;
+ return ret;
+}
+
+static void amdxdna_gem_obj_vunmap(struct amdxdna_gem_obj *abo)
+{
+ struct iosys_map map;
+
+ if (!abo->mem.kva)
+ return;
+
+ iosys_map_set_vaddr(&map, abo->mem.kva);
+
+ if (is_import_bo(abo))
+ dma_buf_vunmap_unlocked(abo->dma_buf, &map);
+ else
+ drm_gem_vunmap(to_gobj(abo), &map);
+}
+
static struct dma_buf *amdxdna_gem_prime_export(struct drm_gem_object *gobj, int flags)
{
+ struct amdxdna_gem_obj *abo = to_xdna_obj(gobj);
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+ if (abo->dma_buf) {
+ get_dma_buf(abo->dma_buf);
+ return abo->dma_buf;
+ }
+
exp_info.ops = &amdxdna_dmabuf_ops;
exp_info.size = gobj->size;
exp_info.flags = flags;
@@ -364,7 +454,6 @@ static void amdxdna_gem_obj_free(struct drm_gem_object *gobj)
{
struct amdxdna_dev *xdna = to_xdna_dev(gobj->dev);
struct amdxdna_gem_obj *abo = to_xdna_obj(gobj);
- struct iosys_map map = IOSYS_MAP_INIT_VADDR(abo->mem.kva);
XDNA_DBG(xdna, "BO type %d xdna_addr 0x%llx", abo->type, abo->mem.dev_addr);
@@ -374,23 +463,10 @@ static void amdxdna_gem_obj_free(struct drm_gem_object *gobj)
if (abo->pinned)
amdxdna_gem_unpin(abo);
- if (abo->type == AMDXDNA_BO_DEV) {
- mutex_lock(&abo->client->mm_lock);
- drm_mm_remove_node(&abo->mm_node);
- mutex_unlock(&abo->client->mm_lock);
-
- vunmap(abo->mem.kva);
- drm_gem_object_put(to_gobj(abo->dev_heap));
- drm_gem_object_release(gobj);
- mutex_destroy(&abo->lock);
- kfree(abo);
- return;
- }
-
if (abo->type == AMDXDNA_BO_DEV_HEAP)
drm_mm_takedown(&abo->mm);
- drm_gem_vunmap(gobj, &map);
+ amdxdna_gem_obj_vunmap(abo);
mutex_destroy(&abo->lock);
if (is_import_bo(abo)) {
@@ -402,7 +478,7 @@ static void amdxdna_gem_obj_free(struct drm_gem_object *gobj)
}
static const struct drm_gem_object_funcs amdxdna_gem_dev_obj_funcs = {
- .free = amdxdna_gem_obj_free,
+ .free = amdxdna_gem_dev_obj_free,
};
static const struct drm_gem_object_funcs amdxdna_gem_shmem_funcs = {
@@ -454,6 +530,68 @@ amdxdna_gem_create_object_cb(struct drm_device *dev, size_t size)
return to_gobj(abo);
}
+static struct amdxdna_gem_obj *
+amdxdna_gem_create_shmem_object(struct drm_device *dev, size_t size)
+{
+ struct drm_gem_shmem_object *shmem = drm_gem_shmem_create(dev, size);
+
+ if (IS_ERR(shmem))
+ return ERR_CAST(shmem);
+
+ shmem->map_wc = false;
+ return to_xdna_obj(&shmem->base);
+}
+
+static struct amdxdna_gem_obj *
+amdxdna_gem_create_ubuf_object(struct drm_device *dev, struct amdxdna_drm_create_bo *args)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ enum amdxdna_ubuf_flag flags = 0;
+ struct amdxdna_drm_va_tbl va_tbl;
+ struct drm_gem_object *gobj;
+ struct dma_buf *dma_buf;
+
+ if (copy_from_user(&va_tbl, u64_to_user_ptr(args->vaddr), sizeof(va_tbl))) {
+ XDNA_DBG(xdna, "Access va table failed");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (va_tbl.num_entries) {
+ if (args->type == AMDXDNA_BO_CMD)
+ flags |= AMDXDNA_UBUF_FLAG_MAP_DMA;
+
+ dma_buf = amdxdna_get_ubuf(dev, flags, va_tbl.num_entries,
+ u64_to_user_ptr(args->vaddr + sizeof(va_tbl)));
+ } else {
+ dma_buf = dma_buf_get(va_tbl.dmabuf_fd);
+ }
+
+ if (IS_ERR(dma_buf))
+ return ERR_CAST(dma_buf);
+
+ gobj = amdxdna_gem_prime_import(dev, dma_buf);
+ if (IS_ERR(gobj)) {
+ dma_buf_put(dma_buf);
+ return ERR_CAST(gobj);
+ }
+
+ dma_buf_put(dma_buf);
+
+ return to_xdna_obj(gobj);
+}
+
+static struct amdxdna_gem_obj *
+amdxdna_gem_create_object(struct drm_device *dev,
+ struct amdxdna_drm_create_bo *args)
+{
+ size_t aligned_sz = PAGE_ALIGN(args->size);
+
+ if (args->vaddr)
+ return amdxdna_gem_create_ubuf_object(dev, args);
+
+ return amdxdna_gem_create_shmem_object(dev, aligned_sz);
+}
+
struct drm_gem_object *
amdxdna_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf)
{
@@ -505,16 +643,12 @@ amdxdna_drm_alloc_shmem(struct drm_device *dev,
struct drm_file *filp)
{
struct amdxdna_client *client = filp->driver_priv;
- struct drm_gem_shmem_object *shmem;
struct amdxdna_gem_obj *abo;
- shmem = drm_gem_shmem_create(dev, args->size);
- if (IS_ERR(shmem))
- return ERR_CAST(shmem);
-
- shmem->map_wc = false;
+ abo = amdxdna_gem_create_object(dev, args);
+ if (IS_ERR(abo))
+ return ERR_CAST(abo);
- abo = to_xdna_obj(&shmem->base);
abo->client = client;
abo->type = AMDXDNA_BO_SHMEM;
@@ -528,7 +662,6 @@ amdxdna_drm_create_dev_heap(struct drm_device *dev,
{
struct amdxdna_client *client = filp->driver_priv;
struct amdxdna_dev *xdna = to_xdna_dev(dev);
- struct drm_gem_shmem_object *shmem;
struct amdxdna_gem_obj *abo;
int ret;
@@ -545,26 +678,31 @@ amdxdna_drm_create_dev_heap(struct drm_device *dev,
goto mm_unlock;
}
- shmem = drm_gem_shmem_create(dev, args->size);
- if (IS_ERR(shmem)) {
- ret = PTR_ERR(shmem);
+ abo = amdxdna_gem_create_object(dev, args);
+ if (IS_ERR(abo)) {
+ ret = PTR_ERR(abo);
goto mm_unlock;
}
- shmem->map_wc = false;
- abo = to_xdna_obj(&shmem->base);
-
abo->type = AMDXDNA_BO_DEV_HEAP;
abo->client = client;
abo->mem.dev_addr = client->xdna->dev_info->dev_mem_base;
drm_mm_init(&abo->mm, abo->mem.dev_addr, abo->mem.size);
+ ret = amdxdna_gem_obj_vmap(abo, &abo->mem.kva);
+ if (ret) {
+ XDNA_ERR(xdna, "Vmap heap bo failed, ret %d", ret);
+ goto release_obj;
+ }
+
client->dev_heap = abo;
drm_gem_object_get(to_gobj(abo));
mutex_unlock(&client->mm_lock);
return abo;
+release_obj:
+ drm_gem_object_put(to_gobj(abo));
mm_unlock:
mutex_unlock(&client->mm_lock);
return ERR_PTR(ret);
@@ -573,58 +711,32 @@ mm_unlock:
struct amdxdna_gem_obj *
amdxdna_drm_alloc_dev_bo(struct drm_device *dev,
struct amdxdna_drm_create_bo *args,
- struct drm_file *filp, bool use_vmap)
+ struct drm_file *filp)
{
struct amdxdna_client *client = filp->driver_priv;
struct amdxdna_dev *xdna = to_xdna_dev(dev);
size_t aligned_sz = PAGE_ALIGN(args->size);
- struct amdxdna_gem_obj *abo, *heap;
+ struct amdxdna_gem_obj *abo;
int ret;
- mutex_lock(&client->mm_lock);
- heap = client->dev_heap;
- if (!heap) {
- ret = -EINVAL;
- goto mm_unlock;
- }
-
- if (heap->mem.userptr == AMDXDNA_INVALID_ADDR) {
- XDNA_ERR(xdna, "Invalid dev heap userptr");
- ret = -EINVAL;
- goto mm_unlock;
- }
-
- if (args->size > heap->mem.size) {
- XDNA_ERR(xdna, "Invalid dev bo size 0x%llx, limit 0x%lx",
- args->size, heap->mem.size);
- ret = -EINVAL;
- goto mm_unlock;
- }
-
abo = amdxdna_gem_create_obj(&xdna->ddev, aligned_sz);
- if (IS_ERR(abo)) {
- ret = PTR_ERR(abo);
- goto mm_unlock;
- }
+ if (IS_ERR(abo))
+ return abo;
+
to_gobj(abo)->funcs = &amdxdna_gem_dev_obj_funcs;
abo->type = AMDXDNA_BO_DEV;
abo->client = client;
- abo->dev_heap = heap;
- ret = amdxdna_gem_insert_node_locked(abo, use_vmap);
+
+ ret = amdxdna_gem_heap_alloc(abo);
if (ret) {
XDNA_ERR(xdna, "Failed to alloc dev bo memory, ret %d", ret);
- goto mm_unlock;
+ amdxdna_gem_destroy_obj(abo);
+ return ERR_PTR(ret);
}
- drm_gem_object_get(to_gobj(heap));
drm_gem_private_object_init(&xdna->ddev, to_gobj(abo), aligned_sz);
- mutex_unlock(&client->mm_lock);
return abo;
-
-mm_unlock:
- mutex_unlock(&client->mm_lock);
- return ERR_PTR(ret);
}
static struct amdxdna_gem_obj *
@@ -633,9 +745,7 @@ amdxdna_drm_create_cmd_bo(struct drm_device *dev,
struct drm_file *filp)
{
struct amdxdna_dev *xdna = to_xdna_dev(dev);
- struct drm_gem_shmem_object *shmem;
struct amdxdna_gem_obj *abo;
- struct iosys_map map;
int ret;
if (args->size > XDNA_MAX_CMD_BO_SIZE) {
@@ -648,27 +758,23 @@ amdxdna_drm_create_cmd_bo(struct drm_device *dev,
return ERR_PTR(-EINVAL);
}
- shmem = drm_gem_shmem_create(dev, args->size);
- if (IS_ERR(shmem))
- return ERR_CAST(shmem);
-
- shmem->map_wc = false;
- abo = to_xdna_obj(&shmem->base);
+ abo = amdxdna_gem_create_object(dev, args);
+ if (IS_ERR(abo))
+ return ERR_CAST(abo);
abo->type = AMDXDNA_BO_CMD;
abo->client = filp->driver_priv;
- ret = drm_gem_vmap(to_gobj(abo), &map);
+ ret = amdxdna_gem_obj_vmap(abo, &abo->mem.kva);
if (ret) {
XDNA_ERR(xdna, "Vmap cmd bo failed, ret %d", ret);
goto release_obj;
}
- abo->mem.kva = map.vaddr;
return abo;
release_obj:
- drm_gem_shmem_free(shmem);
+ drm_gem_object_put(to_gobj(abo));
return ERR_PTR(ret);
}
@@ -679,7 +785,7 @@ int amdxdna_drm_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_f
struct amdxdna_gem_obj *abo;
int ret;
- if (args->flags || args->vaddr || !args->size)
+ if (args->flags)
return -EINVAL;
XDNA_DBG(xdna, "BO arg type %d vaddr 0x%llx size 0x%llx flags 0x%llx",
@@ -692,7 +798,7 @@ int amdxdna_drm_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_f
abo = amdxdna_drm_create_dev_heap(dev, args, filp);
break;
case AMDXDNA_BO_DEV:
- abo = amdxdna_drm_alloc_dev_bo(dev, args, filp, false);
+ abo = amdxdna_drm_alloc_dev_bo(dev, args, filp);
break;
case AMDXDNA_BO_CMD:
abo = amdxdna_drm_create_cmd_bo(dev, args, filp);
@@ -724,20 +830,13 @@ int amdxdna_gem_pin_nolock(struct amdxdna_gem_obj *abo)
struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
int ret;
+ if (abo->type == AMDXDNA_BO_DEV)
+ abo = abo->client->dev_heap;
+
if (is_import_bo(abo))
return 0;
- switch (abo->type) {
- case AMDXDNA_BO_SHMEM:
- case AMDXDNA_BO_DEV_HEAP:
- ret = drm_gem_shmem_pin(&abo->base);
- break;
- case AMDXDNA_BO_DEV:
- ret = drm_gem_shmem_pin(&abo->dev_heap->base);
- break;
- default:
- ret = -EOPNOTSUPP;
- }
+ ret = drm_gem_shmem_pin(&abo->base);
XDNA_DBG(xdna, "BO type %d ret %d", abo->type, ret);
return ret;
@@ -747,9 +846,6 @@ int amdxdna_gem_pin(struct amdxdna_gem_obj *abo)
{
int ret;
- if (abo->type == AMDXDNA_BO_DEV)
- abo = abo->dev_heap;
-
mutex_lock(&abo->lock);
ret = amdxdna_gem_pin_nolock(abo);
mutex_unlock(&abo->lock);
@@ -759,12 +855,12 @@ int amdxdna_gem_pin(struct amdxdna_gem_obj *abo)
void amdxdna_gem_unpin(struct amdxdna_gem_obj *abo)
{
+ if (abo->type == AMDXDNA_BO_DEV)
+ abo = abo->client->dev_heap;
+
if (is_import_bo(abo))
return;
- if (abo->type == AMDXDNA_BO_DEV)
- abo = abo->dev_heap;
-
mutex_lock(&abo->lock);
drm_gem_shmem_unpin(&abo->base);
mutex_unlock(&abo->lock);
@@ -855,16 +951,21 @@ int amdxdna_drm_sync_bo_ioctl(struct drm_device *dev,
if (is_import_bo(abo))
drm_clflush_sg(abo->base.sgt);
- else if (abo->type == AMDXDNA_BO_DEV)
- drm_clflush_pages(abo->mem.pages, abo->mem.nr_pages);
- else
+ else if (abo->mem.kva)
+ drm_clflush_virt_range(abo->mem.kva + args->offset, args->size);
+ else if (abo->base.pages)
drm_clflush_pages(abo->base.pages, gobj->size >> PAGE_SHIFT);
+ else
+ drm_WARN(&xdna->ddev, 1, "Can not get flush memory");
amdxdna_gem_unpin(abo);
XDNA_DBG(xdna, "Sync bo %d offset 0x%llx, size 0x%llx\n",
args->handle, args->offset, args->size);
+ if (args->direction == SYNC_DIRECT_FROM_DEVICE)
+ ret = amdxdna_hwctx_sync_debug_bo(abo->client, args->handle);
+
put_obj:
drm_gem_object_put(gobj);
return ret;
diff --git a/drivers/accel/amdxdna/amdxdna_gem.h b/drivers/accel/amdxdna/amdxdna_gem.h
index aee97e971d6d..f79fc7f3c93b 100644
--- a/drivers/accel/amdxdna/amdxdna_gem.h
+++ b/drivers/accel/amdxdna/amdxdna_gem.h
@@ -7,6 +7,7 @@
#define _AMDXDNA_GEM_H_
#include <linux/hmm.h>
+#include "amdxdna_pci_drv.h"
struct amdxdna_umap {
struct vm_area_struct *vma;
@@ -41,7 +42,6 @@ struct amdxdna_gem_obj {
/* Below members is uninitialized when needed */
struct drm_mm mm; /* For AMDXDNA_BO_DEV_HEAP */
- struct amdxdna_gem_obj *dev_heap; /* For AMDXDNA_BO_DEV */
struct drm_mm_node mm_node; /* For AMDXDNA_BO_DEV */
u32 assigned_hwctx;
struct dma_buf *dma_buf;
@@ -63,6 +63,11 @@ static inline void amdxdna_gem_put_obj(struct amdxdna_gem_obj *abo)
drm_gem_object_put(to_gobj(abo));
}
+static inline u64 amdxdna_dev_bo_offset(struct amdxdna_gem_obj *abo)
+{
+ return abo->mem.dev_addr - abo->client->dev_heap->mem.dev_addr;
+}
+
void amdxdna_umap_put(struct amdxdna_umap *mapp);
struct drm_gem_object *
@@ -72,7 +77,7 @@ amdxdna_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf);
struct amdxdna_gem_obj *
amdxdna_drm_alloc_dev_bo(struct drm_device *dev,
struct amdxdna_drm_create_bo *args,
- struct drm_file *filp, bool use_vmap);
+ struct drm_file *filp);
int amdxdna_gem_pin_nolock(struct amdxdna_gem_obj *abo);
int amdxdna_gem_pin(struct amdxdna_gem_obj *abo);
diff --git a/drivers/accel/amdxdna/amdxdna_mailbox.c b/drivers/accel/amdxdna/amdxdna_mailbox.c
index da1ac89bb78f..858df97cd3fb 100644
--- a/drivers/accel/amdxdna/amdxdna_mailbox.c
+++ b/drivers/accel/amdxdna/amdxdna_mailbox.c
@@ -194,7 +194,8 @@ static void mailbox_release_msg(struct mailbox_channel *mb_chann,
{
MB_DBG(mb_chann, "msg_id 0x%x msg opcode 0x%x",
mb_msg->pkg.header.id, mb_msg->pkg.header.opcode);
- mb_msg->notify_cb(mb_msg->handle, NULL, 0);
+ if (mb_msg->notify_cb)
+ mb_msg->notify_cb(mb_msg->handle, NULL, 0);
kfree(mb_msg);
}
@@ -248,7 +249,7 @@ mailbox_get_resp(struct mailbox_channel *mb_chann, struct xdna_msg_header *heade
{
struct mailbox_msg *mb_msg;
int msg_id;
- int ret;
+ int ret = 0;
msg_id = header->id;
if (!mailbox_validate_msgid(msg_id)) {
@@ -265,9 +266,11 @@ mailbox_get_resp(struct mailbox_channel *mb_chann, struct xdna_msg_header *heade
MB_DBG(mb_chann, "opcode 0x%x size %d id 0x%x",
header->opcode, header->total_size, header->id);
- ret = mb_msg->notify_cb(mb_msg->handle, data, header->total_size);
- if (unlikely(ret))
- MB_ERR(mb_chann, "Message callback ret %d", ret);
+ if (mb_msg->notify_cb) {
+ ret = mb_msg->notify_cb(mb_msg->handle, data, header->total_size);
+ if (unlikely(ret))
+ MB_ERR(mb_chann, "Message callback ret %d", ret);
+ }
kfree(mb_msg);
return ret;
@@ -513,6 +516,7 @@ xdna_mailbox_create_channel(struct mailbox *mb,
}
mb_chann->bad_state = false;
+ mailbox_reg_write(mb_chann, mb_chann->iohub_int_addr, 0);
MB_DBG(mb_chann, "Mailbox channel created (irq: %d)", mb_chann->msix_irq);
return mb_chann;
diff --git a/drivers/accel/amdxdna/amdxdna_mailbox_helper.h b/drivers/accel/amdxdna/amdxdna_mailbox_helper.h
index 710ff8873d61..556c712cad0a 100644
--- a/drivers/accel/amdxdna/amdxdna_mailbox_helper.h
+++ b/drivers/accel/amdxdna/amdxdna_mailbox_helper.h
@@ -16,16 +16,18 @@ struct xdna_notify {
u32 *data;
size_t size;
int error;
+ u32 *status;
};
-#define DECLARE_XDNA_MSG_COMMON(name, op, status) \
+#define DECLARE_XDNA_MSG_COMMON(name, op, s) \
struct name##_req req = { 0 }; \
- struct name##_resp resp = { status }; \
+ struct name##_resp resp = { .status = s }; \
struct xdna_notify hdl = { \
.error = 0, \
.data = (u32 *)&resp, \
.size = sizeof(resp), \
.comp = COMPLETION_INITIALIZER_ONSTACK(hdl.comp), \
+ .status = (u32 *)&resp.status, \
}; \
struct xdna_mailbox_msg msg = { \
.send_data = (u8 *)&req, \
diff --git a/drivers/accel/amdxdna/amdxdna_pci_drv.c b/drivers/accel/amdxdna/amdxdna_pci_drv.c
index f2bf1d374cc7..1973ab67721b 100644
--- a/drivers/accel/amdxdna/amdxdna_pci_drv.c
+++ b/drivers/accel/amdxdna/amdxdna_pci_drv.c
@@ -13,13 +13,11 @@
#include <drm/gpu_scheduler.h>
#include <linux/iommu.h>
#include <linux/pci.h>
-#include <linux/pm_runtime.h>
#include "amdxdna_ctx.h"
#include "amdxdna_gem.h"
#include "amdxdna_pci_drv.h"
-
-#define AMDXDNA_AUTOSUSPEND_DELAY 5000 /* milliseconds */
+#include "amdxdna_pm.h"
MODULE_FIRMWARE("amdnpu/1502_00/npu.sbin");
MODULE_FIRMWARE("amdnpu/17f0_10/npu.sbin");
@@ -27,6 +25,18 @@ MODULE_FIRMWARE("amdnpu/17f0_11/npu.sbin");
MODULE_FIRMWARE("amdnpu/17f0_20/npu.sbin");
/*
+ * 0.0: Initial version
+ * 0.1: Support getting all hardware contexts by DRM_IOCTL_AMDXDNA_GET_ARRAY
+ * 0.2: Support getting last error hardware error
+ * 0.3: Support firmware debug buffer
+ * 0.4: Support getting resource information
+ * 0.5: Support getting telemetry data
+ * 0.6: Support preemption
+ */
+#define AMDXDNA_DRIVER_MAJOR 0
+#define AMDXDNA_DRIVER_MINOR 6
+
+/*
* Bind the driver base on (vendor_id, device_id) pair and later use the
* (device_id, rev_id) pair as a key to select the devices. The devices with
* same device_id have very similar interface to host driver.
@@ -54,17 +64,9 @@ static int amdxdna_drm_open(struct drm_device *ddev, struct drm_file *filp)
struct amdxdna_client *client;
int ret;
- ret = pm_runtime_resume_and_get(ddev->dev);
- if (ret) {
- XDNA_ERR(xdna, "Failed to get rpm, ret %d", ret);
- return ret;
- }
-
client = kzalloc(sizeof(*client), GFP_KERNEL);
- if (!client) {
- ret = -ENOMEM;
- goto put_rpm;
- }
+ if (!client)
+ return -ENOMEM;
client->pid = pid_nr(rcu_access_pointer(filp->pid));
client->xdna = xdna;
@@ -81,7 +83,6 @@ static int amdxdna_drm_open(struct drm_device *ddev, struct drm_file *filp)
ret = -ENODEV;
goto unbind_sva;
}
- mutex_init(&client->hwctx_lock);
init_srcu_struct(&client->hwctx_srcu);
xa_init_flags(&client->hwctx_xa, XA_FLAGS_ALLOC);
mutex_init(&client->mm_lock);
@@ -100,9 +101,6 @@ unbind_sva:
iommu_sva_unbind_device(client->sva);
failed:
kfree(client);
-put_rpm:
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
return ret;
}
@@ -116,7 +114,6 @@ static void amdxdna_drm_close(struct drm_device *ddev, struct drm_file *filp)
xa_destroy(&client->hwctx_xa);
cleanup_srcu_struct(&client->hwctx_srcu);
- mutex_destroy(&client->hwctx_lock);
mutex_destroy(&client->mm_lock);
if (client->dev_heap)
drm_gem_object_put(to_gobj(client->dev_heap));
@@ -125,8 +122,6 @@ static void amdxdna_drm_close(struct drm_device *ddev, struct drm_file *filp)
XDNA_DBG(xdna, "pid %d closed", client->pid);
kfree(client);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
}
static int amdxdna_flush(struct file *f, fl_owner_t id)
@@ -142,8 +137,8 @@ static int amdxdna_flush(struct file *f, fl_owner_t id)
mutex_lock(&xdna->dev_lock);
list_del_init(&client->node);
- mutex_unlock(&xdna->dev_lock);
amdxdna_hwctx_remove_all(client);
+ mutex_unlock(&xdna->dev_lock);
drm_dev_exit(idx);
return 0;
@@ -166,6 +161,23 @@ static int amdxdna_drm_get_info_ioctl(struct drm_device *dev, void *data, struct
return ret;
}
+static int amdxdna_drm_get_array_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ struct amdxdna_client *client = filp->driver_priv;
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ struct amdxdna_drm_get_array *args = data;
+
+ if (!xdna->dev_info->ops->get_array)
+ return -EOPNOTSUPP;
+
+ if (args->pad || !args->num_element || !args->element_size)
+ return -EINVAL;
+
+ guard(mutex)(&xdna->dev_lock);
+ return xdna->dev_info->ops->get_array(client, args);
+}
+
static int amdxdna_drm_set_state_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
struct amdxdna_client *client = filp->driver_priv;
@@ -197,6 +209,7 @@ static const struct drm_ioctl_desc amdxdna_drm_ioctls[] = {
DRM_IOCTL_DEF_DRV(AMDXDNA_EXEC_CMD, amdxdna_drm_submit_cmd_ioctl, 0),
/* AIE hardware */
DRM_IOCTL_DEF_DRV(AMDXDNA_GET_INFO, amdxdna_drm_get_info_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(AMDXDNA_GET_ARRAY, amdxdna_drm_get_array_ioctl, 0),
DRM_IOCTL_DEF_DRV(AMDXDNA_SET_STATE, amdxdna_drm_set_state_ioctl, DRM_ROOT_ONLY),
};
@@ -220,6 +233,8 @@ const struct drm_driver amdxdna_drm_drv = {
.fops = &amdxdna_fops,
.name = "amdxdna_accel_driver",
.desc = "AMD XDNA DRM implementation",
+ .major = AMDXDNA_DRIVER_MAJOR,
+ .minor = AMDXDNA_DRIVER_MINOR,
.open = amdxdna_drm_open,
.postclose = amdxdna_drm_close,
.ioctls = amdxdna_drm_ioctls,
@@ -285,19 +300,12 @@ static int amdxdna_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto failed_dev_fini;
}
- pm_runtime_set_autosuspend_delay(dev, AMDXDNA_AUTOSUSPEND_DELAY);
- pm_runtime_use_autosuspend(dev);
- pm_runtime_allow(dev);
-
ret = drm_dev_register(&xdna->ddev, 0);
if (ret) {
XDNA_ERR(xdna, "DRM register failed, ret %d", ret);
- pm_runtime_forbid(dev);
goto failed_sysfs_fini;
}
- pm_runtime_mark_last_busy(dev);
- pm_runtime_put_autosuspend(dev);
return 0;
failed_sysfs_fini:
@@ -314,14 +322,10 @@ destroy_notifier_wq:
static void amdxdna_remove(struct pci_dev *pdev)
{
struct amdxdna_dev *xdna = pci_get_drvdata(pdev);
- struct device *dev = &pdev->dev;
struct amdxdna_client *client;
destroy_workqueue(xdna->notifier_wq);
- pm_runtime_get_noresume(dev);
- pm_runtime_forbid(dev);
-
drm_dev_unplug(&xdna->ddev);
amdxdna_sysfs_fini(xdna);
@@ -330,11 +334,8 @@ static void amdxdna_remove(struct pci_dev *pdev)
struct amdxdna_client, node);
while (client) {
list_del_init(&client->node);
- mutex_unlock(&xdna->dev_lock);
-
amdxdna_hwctx_remove_all(client);
- mutex_lock(&xdna->dev_lock);
client = list_first_entry_or_null(&xdna->client_list,
struct amdxdna_client, node);
}
@@ -343,89 +344,9 @@ static void amdxdna_remove(struct pci_dev *pdev)
mutex_unlock(&xdna->dev_lock);
}
-static int amdxdna_dev_suspend_nolock(struct amdxdna_dev *xdna)
-{
- if (xdna->dev_info->ops->suspend)
- xdna->dev_info->ops->suspend(xdna);
-
- return 0;
-}
-
-static int amdxdna_dev_resume_nolock(struct amdxdna_dev *xdna)
-{
- if (xdna->dev_info->ops->resume)
- return xdna->dev_info->ops->resume(xdna);
-
- return 0;
-}
-
-static int amdxdna_pmops_suspend(struct device *dev)
-{
- struct amdxdna_dev *xdna = pci_get_drvdata(to_pci_dev(dev));
- struct amdxdna_client *client;
-
- mutex_lock(&xdna->dev_lock);
- list_for_each_entry(client, &xdna->client_list, node)
- amdxdna_hwctx_suspend(client);
-
- amdxdna_dev_suspend_nolock(xdna);
- mutex_unlock(&xdna->dev_lock);
-
- return 0;
-}
-
-static int amdxdna_pmops_resume(struct device *dev)
-{
- struct amdxdna_dev *xdna = pci_get_drvdata(to_pci_dev(dev));
- struct amdxdna_client *client;
- int ret;
-
- XDNA_INFO(xdna, "firmware resuming...");
- mutex_lock(&xdna->dev_lock);
- ret = amdxdna_dev_resume_nolock(xdna);
- if (ret) {
- XDNA_ERR(xdna, "resume NPU firmware failed");
- mutex_unlock(&xdna->dev_lock);
- return ret;
- }
-
- XDNA_INFO(xdna, "hardware context resuming...");
- list_for_each_entry(client, &xdna->client_list, node)
- amdxdna_hwctx_resume(client);
- mutex_unlock(&xdna->dev_lock);
-
- return 0;
-}
-
-static int amdxdna_rpmops_suspend(struct device *dev)
-{
- struct amdxdna_dev *xdna = pci_get_drvdata(to_pci_dev(dev));
- int ret;
-
- mutex_lock(&xdna->dev_lock);
- ret = amdxdna_dev_suspend_nolock(xdna);
- mutex_unlock(&xdna->dev_lock);
-
- XDNA_DBG(xdna, "Runtime suspend done ret: %d", ret);
- return ret;
-}
-
-static int amdxdna_rpmops_resume(struct device *dev)
-{
- struct amdxdna_dev *xdna = pci_get_drvdata(to_pci_dev(dev));
- int ret;
-
- mutex_lock(&xdna->dev_lock);
- ret = amdxdna_dev_resume_nolock(xdna);
- mutex_unlock(&xdna->dev_lock);
-
- XDNA_DBG(xdna, "Runtime resume done ret: %d", ret);
- return ret;
-}
-
static const struct dev_pm_ops amdxdna_pm_ops = {
- SYSTEM_SLEEP_PM_OPS(amdxdna_pmops_suspend, amdxdna_pmops_resume)
- RUNTIME_PM_OPS(amdxdna_rpmops_suspend, amdxdna_rpmops_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(amdxdna_pm_suspend, amdxdna_pm_resume)
+ RUNTIME_PM_OPS(amdxdna_pm_suspend, amdxdna_pm_resume, NULL)
};
static struct pci_driver amdxdna_pci_driver = {
diff --git a/drivers/accel/amdxdna/amdxdna_pci_drv.h b/drivers/accel/amdxdna/amdxdna_pci_drv.h
index ab79600911aa..c99477f5e454 100644
--- a/drivers/accel/amdxdna/amdxdna_pci_drv.h
+++ b/drivers/accel/amdxdna/amdxdna_pci_drv.h
@@ -6,6 +6,7 @@
#ifndef _AMDXDNA_PCI_DRV_H_
#define _AMDXDNA_PCI_DRV_H_
+#include <drm/drm_print.h>
#include <linux/workqueue.h>
#include <linux/xarray.h>
@@ -50,16 +51,16 @@ struct amdxdna_dev_ops {
int (*init)(struct amdxdna_dev *xdna);
void (*fini)(struct amdxdna_dev *xdna);
int (*resume)(struct amdxdna_dev *xdna);
- void (*suspend)(struct amdxdna_dev *xdna);
+ int (*suspend)(struct amdxdna_dev *xdna);
int (*hwctx_init)(struct amdxdna_hwctx *hwctx);
void (*hwctx_fini)(struct amdxdna_hwctx *hwctx);
int (*hwctx_config)(struct amdxdna_hwctx *hwctx, u32 type, u64 value, void *buf, u32 size);
+ int (*hwctx_sync_debug_bo)(struct amdxdna_hwctx *hwctx, u32 debug_bo_hdl);
void (*hmm_invalidate)(struct amdxdna_gem_obj *abo, unsigned long cur_seq);
- void (*hwctx_suspend)(struct amdxdna_hwctx *hwctx);
- void (*hwctx_resume)(struct amdxdna_hwctx *hwctx);
int (*cmd_submit)(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, u64 *seq);
int (*get_aie_info)(struct amdxdna_client *client, struct amdxdna_drm_get_info *args);
int (*set_aie_state)(struct amdxdna_client *client, struct amdxdna_drm_set_state *args);
+ int (*get_array)(struct amdxdna_client *client, struct amdxdna_drm_get_array *args);
};
/*
@@ -100,6 +101,7 @@ struct amdxdna_dev {
struct amdxdna_fw_ver fw_ver;
struct rw_semaphore notifier_lock; /* for mmu notifier*/
struct workqueue_struct *notifier_wq;
+ bool rpm_on;
};
/*
@@ -118,8 +120,6 @@ struct amdxdna_device_id {
struct amdxdna_client {
struct list_head node;
pid_t pid;
- struct mutex hwctx_lock; /* protect hwctx */
- /* do NOT wait this srcu when hwctx_lock is held */
struct srcu_struct hwctx_srcu;
struct xarray hwctx_xa;
u32 next_hwctxid;
diff --git a/drivers/accel/amdxdna/amdxdna_pm.c b/drivers/accel/amdxdna/amdxdna_pm.c
new file mode 100644
index 000000000000..fa38e65d617c
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_pm.c
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2025, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_drv.h>
+#include <linux/pm_runtime.h>
+
+#include "amdxdna_pm.h"
+
+#define AMDXDNA_AUTOSUSPEND_DELAY 5000 /* milliseconds */
+
+int amdxdna_pm_suspend(struct device *dev)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(dev_get_drvdata(dev));
+ int ret = -EOPNOTSUPP;
+ bool rpm;
+
+ if (xdna->dev_info->ops->suspend) {
+ rpm = xdna->rpm_on;
+ xdna->rpm_on = false;
+ ret = xdna->dev_info->ops->suspend(xdna);
+ xdna->rpm_on = rpm;
+ }
+
+ XDNA_DBG(xdna, "Suspend done ret %d", ret);
+ return ret;
+}
+
+int amdxdna_pm_resume(struct device *dev)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(dev_get_drvdata(dev));
+ int ret = -EOPNOTSUPP;
+ bool rpm;
+
+ if (xdna->dev_info->ops->resume) {
+ rpm = xdna->rpm_on;
+ xdna->rpm_on = false;
+ ret = xdna->dev_info->ops->resume(xdna);
+ xdna->rpm_on = rpm;
+ }
+
+ XDNA_DBG(xdna, "Resume done ret %d", ret);
+ return ret;
+}
+
+int amdxdna_pm_resume_get(struct amdxdna_dev *xdna)
+{
+ struct device *dev = xdna->ddev.dev;
+ int ret;
+
+ if (!xdna->rpm_on)
+ return 0;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret) {
+ XDNA_ERR(xdna, "Resume failed: %d", ret);
+ pm_runtime_set_suspended(dev);
+ }
+
+ return ret;
+}
+
+void amdxdna_pm_suspend_put(struct amdxdna_dev *xdna)
+{
+ struct device *dev = xdna->ddev.dev;
+
+ if (!xdna->rpm_on)
+ return;
+
+ pm_runtime_put_autosuspend(dev);
+}
+
+void amdxdna_pm_init(struct amdxdna_dev *xdna)
+{
+ struct device *dev = xdna->ddev.dev;
+
+ pm_runtime_set_active(dev);
+ pm_runtime_set_autosuspend_delay(dev, AMDXDNA_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_allow(dev);
+ pm_runtime_put_autosuspend(dev);
+ xdna->rpm_on = true;
+}
+
+void amdxdna_pm_fini(struct amdxdna_dev *xdna)
+{
+ struct device *dev = xdna->ddev.dev;
+
+ xdna->rpm_on = false;
+ pm_runtime_get_noresume(dev);
+ pm_runtime_forbid(dev);
+}
diff --git a/drivers/accel/amdxdna/amdxdna_pm.h b/drivers/accel/amdxdna/amdxdna_pm.h
new file mode 100644
index 000000000000..77b2d6e45570
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_pm.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2025, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _AMDXDNA_PM_H_
+#define _AMDXDNA_PM_H_
+
+#include "amdxdna_pci_drv.h"
+
+int amdxdna_pm_suspend(struct device *dev);
+int amdxdna_pm_resume(struct device *dev);
+int amdxdna_pm_resume_get(struct amdxdna_dev *xdna);
+void amdxdna_pm_suspend_put(struct amdxdna_dev *xdna);
+void amdxdna_pm_init(struct amdxdna_dev *xdna);
+void amdxdna_pm_fini(struct amdxdna_dev *xdna);
+
+#endif /* _AMDXDNA_PM_H_ */
diff --git a/drivers/accel/amdxdna/amdxdna_ubuf.c b/drivers/accel/amdxdna/amdxdna_ubuf.c
new file mode 100644
index 000000000000..077b2261cf2a
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_ubuf.c
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2025, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_device.h>
+#include <drm/drm_print.h>
+#include <linux/dma-buf.h>
+#include <linux/pagemap.h>
+#include <linux/vmalloc.h>
+
+#include "amdxdna_pci_drv.h"
+#include "amdxdna_ubuf.h"
+
+struct amdxdna_ubuf_priv {
+ struct page **pages;
+ u64 nr_pages;
+ enum amdxdna_ubuf_flag flags;
+ struct mm_struct *mm;
+};
+
+static struct sg_table *amdxdna_ubuf_map(struct dma_buf_attachment *attach,
+ enum dma_data_direction direction)
+{
+ struct amdxdna_ubuf_priv *ubuf = attach->dmabuf->priv;
+ struct sg_table *sg;
+ int ret;
+
+ sg = kzalloc(sizeof(*sg), GFP_KERNEL);
+ if (!sg)
+ return ERR_PTR(-ENOMEM);
+
+ ret = sg_alloc_table_from_pages(sg, ubuf->pages, ubuf->nr_pages, 0,
+ ubuf->nr_pages << PAGE_SHIFT, GFP_KERNEL);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (ubuf->flags & AMDXDNA_UBUF_FLAG_MAP_DMA) {
+ ret = dma_map_sgtable(attach->dev, sg, direction, 0);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ return sg;
+}
+
+static void amdxdna_ubuf_unmap(struct dma_buf_attachment *attach,
+ struct sg_table *sg,
+ enum dma_data_direction direction)
+{
+ struct amdxdna_ubuf_priv *ubuf = attach->dmabuf->priv;
+
+ if (ubuf->flags & AMDXDNA_UBUF_FLAG_MAP_DMA)
+ dma_unmap_sgtable(attach->dev, sg, direction, 0);
+
+ sg_free_table(sg);
+ kfree(sg);
+}
+
+static void amdxdna_ubuf_release(struct dma_buf *dbuf)
+{
+ struct amdxdna_ubuf_priv *ubuf = dbuf->priv;
+
+ unpin_user_pages(ubuf->pages, ubuf->nr_pages);
+ kvfree(ubuf->pages);
+ atomic64_sub(ubuf->nr_pages, &ubuf->mm->pinned_vm);
+ mmdrop(ubuf->mm);
+ kfree(ubuf);
+}
+
+static vm_fault_t amdxdna_ubuf_vm_fault(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct amdxdna_ubuf_priv *ubuf;
+ unsigned long pfn;
+ pgoff_t pgoff;
+
+ ubuf = vma->vm_private_data;
+ pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
+
+ pfn = page_to_pfn(ubuf->pages[pgoff]);
+ return vmf_insert_pfn(vma, vmf->address, pfn);
+}
+
+static const struct vm_operations_struct amdxdna_ubuf_vm_ops = {
+ .fault = amdxdna_ubuf_vm_fault,
+};
+
+static int amdxdna_ubuf_mmap(struct dma_buf *dbuf, struct vm_area_struct *vma)
+{
+ struct amdxdna_ubuf_priv *ubuf = dbuf->priv;
+
+ vma->vm_ops = &amdxdna_ubuf_vm_ops;
+ vma->vm_private_data = ubuf;
+ vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
+
+ return 0;
+}
+
+static int amdxdna_ubuf_vmap(struct dma_buf *dbuf, struct iosys_map *map)
+{
+ struct amdxdna_ubuf_priv *ubuf = dbuf->priv;
+ void *kva;
+
+ kva = vmap(ubuf->pages, ubuf->nr_pages, VM_MAP, PAGE_KERNEL);
+ if (!kva)
+ return -EINVAL;
+
+ iosys_map_set_vaddr(map, kva);
+ return 0;
+}
+
+static void amdxdna_ubuf_vunmap(struct dma_buf *dbuf, struct iosys_map *map)
+{
+ vunmap(map->vaddr);
+}
+
+static const struct dma_buf_ops amdxdna_ubuf_dmabuf_ops = {
+ .map_dma_buf = amdxdna_ubuf_map,
+ .unmap_dma_buf = amdxdna_ubuf_unmap,
+ .release = amdxdna_ubuf_release,
+ .mmap = amdxdna_ubuf_mmap,
+ .vmap = amdxdna_ubuf_vmap,
+ .vunmap = amdxdna_ubuf_vunmap,
+};
+
+struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
+ enum amdxdna_ubuf_flag flags,
+ u32 num_entries, void __user *va_entries)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ unsigned long lock_limit, new_pinned;
+ struct amdxdna_drm_va_entry *va_ent;
+ struct amdxdna_ubuf_priv *ubuf;
+ u32 npages, start = 0;
+ struct dma_buf *dbuf;
+ int i, ret;
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+
+ if (!can_do_mlock())
+ return ERR_PTR(-EPERM);
+
+ ubuf = kzalloc(sizeof(*ubuf), GFP_KERNEL);
+ if (!ubuf)
+ return ERR_PTR(-ENOMEM);
+
+ ubuf->flags = flags;
+ ubuf->mm = current->mm;
+ mmgrab(ubuf->mm);
+
+ va_ent = kvcalloc(num_entries, sizeof(*va_ent), GFP_KERNEL);
+ if (!va_ent) {
+ ret = -ENOMEM;
+ goto free_ubuf;
+ }
+
+ if (copy_from_user(va_ent, va_entries, sizeof(*va_ent) * num_entries)) {
+ XDNA_DBG(xdna, "Access va entries failed");
+ ret = -EINVAL;
+ goto free_ent;
+ }
+
+ for (i = 0, exp_info.size = 0; i < num_entries; i++) {
+ if (!IS_ALIGNED(va_ent[i].vaddr, PAGE_SIZE) ||
+ !IS_ALIGNED(va_ent[i].len, PAGE_SIZE)) {
+ XDNA_ERR(xdna, "Invalid address or len %llx, %llx",
+ va_ent[i].vaddr, va_ent[i].len);
+ ret = -EINVAL;
+ goto free_ent;
+ }
+
+ exp_info.size += va_ent[i].len;
+ }
+
+ ubuf->nr_pages = exp_info.size >> PAGE_SHIFT;
+ lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+ new_pinned = atomic64_add_return(ubuf->nr_pages, &ubuf->mm->pinned_vm);
+ if (new_pinned > lock_limit && !capable(CAP_IPC_LOCK)) {
+ XDNA_DBG(xdna, "New pin %ld, limit %ld, cap %d",
+ new_pinned, lock_limit, capable(CAP_IPC_LOCK));
+ ret = -ENOMEM;
+ goto sub_pin_cnt;
+ }
+
+ ubuf->pages = kvmalloc_array(ubuf->nr_pages, sizeof(*ubuf->pages), GFP_KERNEL);
+ if (!ubuf->pages) {
+ ret = -ENOMEM;
+ goto sub_pin_cnt;
+ }
+
+ for (i = 0; i < num_entries; i++) {
+ npages = va_ent[i].len >> PAGE_SHIFT;
+
+ ret = pin_user_pages_fast(va_ent[i].vaddr, npages,
+ FOLL_WRITE | FOLL_LONGTERM,
+ &ubuf->pages[start]);
+ if (ret < 0 || ret != npages) {
+ ret = -ENOMEM;
+ XDNA_ERR(xdna, "Failed to pin pages ret %d", ret);
+ goto destroy_pages;
+ }
+
+ start += ret;
+ }
+
+ exp_info.ops = &amdxdna_ubuf_dmabuf_ops;
+ exp_info.priv = ubuf;
+ exp_info.flags = O_RDWR | O_CLOEXEC;
+
+ dbuf = dma_buf_export(&exp_info);
+ if (IS_ERR(dbuf)) {
+ ret = PTR_ERR(dbuf);
+ goto destroy_pages;
+ }
+ kvfree(va_ent);
+
+ return dbuf;
+
+destroy_pages:
+ if (start)
+ unpin_user_pages(ubuf->pages, start);
+ kvfree(ubuf->pages);
+sub_pin_cnt:
+ atomic64_sub(ubuf->nr_pages, &ubuf->mm->pinned_vm);
+free_ent:
+ kvfree(va_ent);
+free_ubuf:
+ mmdrop(ubuf->mm);
+ kfree(ubuf);
+ return ERR_PTR(ret);
+}
diff --git a/drivers/accel/amdxdna/amdxdna_ubuf.h b/drivers/accel/amdxdna/amdxdna_ubuf.h
new file mode 100644
index 000000000000..e5cb3bdb3ec9
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_ubuf.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2025, Advanced Micro Devices, Inc.
+ */
+#ifndef _AMDXDNA_UBUF_H_
+#define _AMDXDNA_UBUF_H_
+
+#include <drm/drm_device.h>
+#include <linux/dma-buf.h>
+
+enum amdxdna_ubuf_flag {
+ AMDXDNA_UBUF_FLAG_MAP_DMA = 1,
+};
+
+struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
+ enum amdxdna_ubuf_flag flags,
+ u32 num_entries, void __user *va_entries);
+
+#endif /* _AMDXDNA_UBUF_H_ */
diff --git a/drivers/accel/amdxdna/npu1_regs.c b/drivers/accel/amdxdna/npu1_regs.c
index e4f6dac7d00f..ec407f3b48fc 100644
--- a/drivers/accel/amdxdna/npu1_regs.c
+++ b/drivers/accel/amdxdna/npu1_regs.c
@@ -46,6 +46,7 @@
const struct rt_config npu1_default_rt_cfg[] = {
{ 2, 1, AIE2_RT_CFG_INIT }, /* PDI APP LOAD MODE */
+ { 4, 1, AIE2_RT_CFG_INIT }, /* Debug BO */
{ 1, 1, AIE2_RT_CFG_CLK_GATING }, /* Clock gating on */
{ 0 },
};
@@ -62,16 +63,23 @@ const struct dpm_clk_freq npu1_dpm_clk_table[] = {
{ 0 }
};
+static const struct aie2_fw_feature_tbl npu1_fw_feature_table[] = {
+ { .feature = AIE2_NPU_COMMAND, .min_minor = 8 },
+ { 0 }
+};
+
static const struct amdxdna_dev_priv npu1_dev_priv = {
.fw_path = "amdnpu/1502_00/npu.sbin",
.protocol_major = 0x5,
.protocol_minor = 0x7,
.rt_config = npu1_default_rt_cfg,
.dpm_clk_tbl = npu1_dpm_clk_table,
+ .fw_feature_tbl = npu1_fw_feature_table,
.col_align = COL_ALIGN_NONE,
.mbox_dev_addr = NPU1_MBOX_BAR_BASE,
.mbox_size = 0, /* Use BAR size */
.sram_dev_addr = NPU1_SRAM_BAR_BASE,
+ .hwctx_limit = 6,
.sram_offs = {
DEFINE_BAR_OFFSET(MBOX_CHANN_OFF, NPU1_SRAM, MPNPU_SRAM_X2I_MAILBOX_0),
DEFINE_BAR_OFFSET(FW_ALIVE_OFF, NPU1_SRAM, MPNPU_SRAM_I2X_MAILBOX_15),
diff --git a/drivers/accel/amdxdna/npu2_regs.c b/drivers/accel/amdxdna/npu2_regs.c
index a081cac75ee0..86f87d0d1354 100644
--- a/drivers/accel/amdxdna/npu2_regs.c
+++ b/drivers/accel/amdxdna/npu2_regs.c
@@ -67,10 +67,12 @@ static const struct amdxdna_dev_priv npu2_dev_priv = {
.protocol_minor = 0x6,
.rt_config = npu4_default_rt_cfg,
.dpm_clk_tbl = npu4_dpm_clk_table,
+ .fw_feature_tbl = npu4_fw_feature_table,
.col_align = COL_ALIGN_NATURE,
.mbox_dev_addr = NPU2_MBOX_BAR_BASE,
.mbox_size = 0, /* Use BAR size */
.sram_dev_addr = NPU2_SRAM_BAR_BASE,
+ .hwctx_limit = 16,
.sram_offs = {
DEFINE_BAR_OFFSET(MBOX_CHANN_OFF, NPU2_SRAM, MPNPU_SRAM_X2I_MAILBOX_0),
DEFINE_BAR_OFFSET(FW_ALIVE_OFF, NPU2_SRAM, MPNPU_SRAM_X2I_MAILBOX_15),
diff --git a/drivers/accel/amdxdna/npu4_regs.c b/drivers/accel/amdxdna/npu4_regs.c
index 9f2e33182ec6..986a5f28ba24 100644
--- a/drivers/accel/amdxdna/npu4_regs.c
+++ b/drivers/accel/amdxdna/npu4_regs.c
@@ -63,10 +63,14 @@
const struct rt_config npu4_default_rt_cfg[] = {
{ 5, 1, AIE2_RT_CFG_INIT }, /* PDI APP LOAD MODE */
+ { 10, 1, AIE2_RT_CFG_INIT }, /* DEBUG BUF */
+ { 14, 0, AIE2_RT_CFG_INIT, BIT_U64(AIE2_PREEMPT) }, /* Frame boundary preemption */
{ 1, 1, AIE2_RT_CFG_CLK_GATING }, /* Clock gating on */
{ 2, 1, AIE2_RT_CFG_CLK_GATING }, /* Clock gating on */
{ 3, 1, AIE2_RT_CFG_CLK_GATING }, /* Clock gating on */
{ 4, 1, AIE2_RT_CFG_CLK_GATING }, /* Clock gating on */
+ { 13, 0, AIE2_RT_CFG_FORCE_PREEMPT },
+ { 14, 0, AIE2_RT_CFG_FRAME_BOUNDARY_PREEMPT },
{ 0 },
};
@@ -82,16 +86,24 @@ const struct dpm_clk_freq npu4_dpm_clk_table[] = {
{ 0 }
};
+const struct aie2_fw_feature_tbl npu4_fw_feature_table[] = {
+ { .feature = AIE2_NPU_COMMAND, .min_minor = 15 },
+ { .feature = AIE2_PREEMPT, .min_minor = 12 },
+ { 0 }
+};
+
static const struct amdxdna_dev_priv npu4_dev_priv = {
.fw_path = "amdnpu/17f0_10/npu.sbin",
.protocol_major = 0x6,
.protocol_minor = 12,
.rt_config = npu4_default_rt_cfg,
.dpm_clk_tbl = npu4_dpm_clk_table,
+ .fw_feature_tbl = npu4_fw_feature_table,
.col_align = COL_ALIGN_NATURE,
.mbox_dev_addr = NPU4_MBOX_BAR_BASE,
.mbox_size = 0, /* Use BAR size */
.sram_dev_addr = NPU4_SRAM_BAR_BASE,
+ .hwctx_limit = 16,
.sram_offs = {
DEFINE_BAR_OFFSET(MBOX_CHANN_OFF, NPU4_SRAM, MPNPU_SRAM_X2I_MAILBOX_0),
DEFINE_BAR_OFFSET(FW_ALIVE_OFF, NPU4_SRAM, MPNPU_SRAM_X2I_MAILBOX_15),
diff --git a/drivers/accel/amdxdna/npu5_regs.c b/drivers/accel/amdxdna/npu5_regs.c
index 5f1cf83461c4..75ad97f0b937 100644
--- a/drivers/accel/amdxdna/npu5_regs.c
+++ b/drivers/accel/amdxdna/npu5_regs.c
@@ -67,10 +67,12 @@ static const struct amdxdna_dev_priv npu5_dev_priv = {
.protocol_minor = 12,
.rt_config = npu4_default_rt_cfg,
.dpm_clk_tbl = npu4_dpm_clk_table,
+ .fw_feature_tbl = npu4_fw_feature_table,
.col_align = COL_ALIGN_NATURE,
.mbox_dev_addr = NPU5_MBOX_BAR_BASE,
.mbox_size = 0, /* Use BAR size */
.sram_dev_addr = NPU5_SRAM_BAR_BASE,
+ .hwctx_limit = 16,
.sram_offs = {
DEFINE_BAR_OFFSET(MBOX_CHANN_OFF, NPU5_SRAM, MPNPU_SRAM_X2I_MAILBOX_0),
DEFINE_BAR_OFFSET(FW_ALIVE_OFF, NPU5_SRAM, MPNPU_SRAM_X2I_MAILBOX_15),
diff --git a/drivers/accel/amdxdna/npu6_regs.c b/drivers/accel/amdxdna/npu6_regs.c
index 94a7005685a7..758dc013fe13 100644
--- a/drivers/accel/amdxdna/npu6_regs.c
+++ b/drivers/accel/amdxdna/npu6_regs.c
@@ -67,10 +67,12 @@ static const struct amdxdna_dev_priv npu6_dev_priv = {
.protocol_minor = 12,
.rt_config = npu4_default_rt_cfg,
.dpm_clk_tbl = npu4_dpm_clk_table,
+ .fw_feature_tbl = npu4_fw_feature_table,
.col_align = COL_ALIGN_NATURE,
.mbox_dev_addr = NPU6_MBOX_BAR_BASE,
.mbox_size = 0, /* Use BAR size */
.sram_dev_addr = NPU6_SRAM_BAR_BASE,
+ .hwctx_limit = 16,
.sram_offs = {
DEFINE_BAR_OFFSET(MBOX_CHANN_OFF, NPU6_SRAM, MPNPU_SRAM_X2I_MAILBOX_0),
DEFINE_BAR_OFFSET(FW_ALIVE_OFF, NPU6_SRAM, MPNPU_SRAM_X2I_MAILBOX_15),
diff --git a/drivers/accel/drm_accel.c b/drivers/accel/drm_accel.c
index aa826033b0ce..ca3357acd127 100644
--- a/drivers/accel/drm_accel.c
+++ b/drivers/accel/drm_accel.c
@@ -20,8 +20,6 @@
DEFINE_XARRAY_ALLOC(accel_minors_xa);
-static struct dentry *accel_debugfs_root;
-
static const struct device_type accel_sysfs_device_minor = {
.name = "accel_minor"
};
@@ -74,17 +72,6 @@ static const struct drm_info_list accel_debugfs_list[] = {
#define ACCEL_DEBUGFS_ENTRIES ARRAY_SIZE(accel_debugfs_list)
/**
- * accel_debugfs_init() - Initialize debugfs for device
- * @dev: Pointer to the device instance.
- *
- * This function creates a root directory for the device in debugfs.
- */
-void accel_debugfs_init(struct drm_device *dev)
-{
- drm_debugfs_dev_init(dev, accel_debugfs_root);
-}
-
-/**
* accel_debugfs_register() - Register debugfs for device
* @dev: Pointer to the device instance.
*
@@ -194,7 +181,6 @@ static const struct file_operations accel_stub_fops = {
void accel_core_exit(void)
{
unregister_chrdev(ACCEL_MAJOR, "accel");
- debugfs_remove(accel_debugfs_root);
accel_sysfs_destroy();
WARN_ON(!xa_empty(&accel_minors_xa));
}
@@ -209,8 +195,6 @@ int __init accel_core_init(void)
goto error;
}
- accel_debugfs_root = debugfs_create_dir("accel", NULL);
-
ret = register_chrdev(ACCEL_MAJOR, "accel", &accel_stub_fops);
if (ret < 0)
DRM_ERROR("Cannot register ACCEL major: %d\n", ret);
diff --git a/drivers/accel/ethosu/Kconfig b/drivers/accel/ethosu/Kconfig
new file mode 100644
index 000000000000..d25f9b3eb317
--- /dev/null
+++ b/drivers/accel/ethosu/Kconfig
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config DRM_ACCEL_ARM_ETHOSU
+ tristate "Arm Ethos-U65/U85 NPU"
+ depends on HAS_IOMEM
+ depends on DRM_ACCEL
+ select DRM_GEM_DMA_HELPER
+ select DRM_SCHED
+ select GENERIC_ALLOCATOR
+ help
+ Enables driver for Arm Ethos-U65/U85 NPUs
diff --git a/drivers/accel/ethosu/Makefile b/drivers/accel/ethosu/Makefile
new file mode 100644
index 000000000000..17db5a600416
--- /dev/null
+++ b/drivers/accel/ethosu/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-$(CONFIG_DRM_ACCEL_ARM_ETHOSU) := ethosu.o
+ethosu-y += ethosu_drv.o ethosu_gem.o ethosu_job.o
diff --git a/drivers/accel/ethosu/ethosu_device.h b/drivers/accel/ethosu/ethosu_device.h
new file mode 100644
index 000000000000..b189fa783d6a
--- /dev/null
+++ b/drivers/accel/ethosu/ethosu_device.h
@@ -0,0 +1,197 @@
+/* SPDX-License-Identifier: GPL-2.0-only or MIT */
+/* Copyright 2025 Arm, Ltd. */
+
+#ifndef __ETHOSU_DEVICE_H__
+#define __ETHOSU_DEVICE_H__
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/types.h>
+
+#include <drm/drm_device.h>
+#include <drm/gpu_scheduler.h>
+
+#include <drm/ethosu_accel.h>
+
+struct clk;
+struct gen_pool;
+
+#define NPU_REG_ID 0x0000
+#define NPU_REG_STATUS 0x0004
+#define NPU_REG_CMD 0x0008
+#define NPU_REG_RESET 0x000c
+#define NPU_REG_QBASE 0x0010
+#define NPU_REG_QBASE_HI 0x0014
+#define NPU_REG_QREAD 0x0018
+#define NPU_REG_QCONFIG 0x001c
+#define NPU_REG_QSIZE 0x0020
+#define NPU_REG_PROT 0x0024
+#define NPU_REG_CONFIG 0x0028
+#define NPU_REG_REGIONCFG 0x003c
+#define NPU_REG_AXILIMIT0 0x0040 // U65
+#define NPU_REG_AXILIMIT1 0x0044 // U65
+#define NPU_REG_AXILIMIT2 0x0048 // U65
+#define NPU_REG_AXILIMIT3 0x004c // U65
+#define NPU_REG_MEM_ATTR0 0x0040 // U85
+#define NPU_REG_MEM_ATTR1 0x0044 // U85
+#define NPU_REG_MEM_ATTR2 0x0048 // U85
+#define NPU_REG_MEM_ATTR3 0x004c // U85
+#define NPU_REG_AXI_SRAM 0x0050 // U85
+#define NPU_REG_AXI_EXT 0x0054 // U85
+
+#define NPU_REG_BASEP(x) (0x0080 + (x) * 8)
+#define NPU_REG_BASEP_HI(x) (0x0084 + (x) * 8)
+#define NPU_BASEP_REGION_MAX 8
+
+#define ID_ARCH_MAJOR_MASK GENMASK(31, 28)
+#define ID_ARCH_MINOR_MASK GENMASK(27, 20)
+#define ID_ARCH_PATCH_MASK GENMASK(19, 16)
+#define ID_VER_MAJOR_MASK GENMASK(11, 8)
+#define ID_VER_MINOR_MASK GENMASK(7, 4)
+
+#define CONFIG_MACS_PER_CC_MASK GENMASK(3, 0)
+#define CONFIG_CMD_STREAM_VER_MASK GENMASK(7, 4)
+
+#define STATUS_STATE_RUNNING BIT(0)
+#define STATUS_IRQ_RAISED BIT(1)
+#define STATUS_BUS_STATUS BIT(2)
+#define STATUS_RESET_STATUS BIT(3)
+#define STATUS_CMD_PARSE_ERR BIT(4)
+#define STATUS_CMD_END_REACHED BIT(5)
+
+#define CMD_CLEAR_IRQ BIT(1)
+#define CMD_TRANSITION_TO_RUN BIT(0)
+
+#define RESET_PENDING_CSL BIT(1)
+#define RESET_PENDING_CPL BIT(0)
+
+#define PROT_ACTIVE_CSL BIT(1)
+
+enum ethosu_cmds {
+ NPU_OP_CONV = 0x2,
+ NPU_OP_DEPTHWISE = 0x3,
+ NPU_OP_POOL = 0x5,
+ NPU_OP_ELEMENTWISE = 0x6,
+ NPU_OP_RESIZE = 0x7, // U85 only
+ NPU_OP_DMA_START = 0x10,
+ NPU_SET_IFM_PAD_TOP = 0x100,
+ NPU_SET_IFM_PAD_LEFT = 0x101,
+ NPU_SET_IFM_PAD_RIGHT = 0x102,
+ NPU_SET_IFM_PAD_BOTTOM = 0x103,
+ NPU_SET_IFM_DEPTH_M1 = 0x104,
+ NPU_SET_IFM_PRECISION = 0x105,
+ NPU_SET_IFM_BROADCAST = 0x108,
+ NPU_SET_IFM_WIDTH0_M1 = 0x10a,
+ NPU_SET_IFM_HEIGHT0_M1 = 0x10b,
+ NPU_SET_IFM_HEIGHT1_M1 = 0x10c,
+ NPU_SET_IFM_REGION = 0x10f,
+ NPU_SET_OFM_WIDTH_M1 = 0x111,
+ NPU_SET_OFM_HEIGHT_M1 = 0x112,
+ NPU_SET_OFM_DEPTH_M1 = 0x113,
+ NPU_SET_OFM_PRECISION = 0x114,
+ NPU_SET_OFM_WIDTH0_M1 = 0x11a,
+ NPU_SET_OFM_HEIGHT0_M1 = 0x11b,
+ NPU_SET_OFM_HEIGHT1_M1 = 0x11c,
+ NPU_SET_OFM_REGION = 0x11f,
+ NPU_SET_KERNEL_WIDTH_M1 = 0x120,
+ NPU_SET_KERNEL_HEIGHT_M1 = 0x121,
+ NPU_SET_KERNEL_STRIDE = 0x122,
+ NPU_SET_WEIGHT_REGION = 0x128,
+ NPU_SET_SCALE_REGION = 0x129,
+ NPU_SET_DMA0_SRC_REGION = 0x130,
+ NPU_SET_DMA0_DST_REGION = 0x131,
+ NPU_SET_DMA0_SIZE0 = 0x132,
+ NPU_SET_DMA0_SIZE1 = 0x133,
+ NPU_SET_IFM2_BROADCAST = 0x180,
+ NPU_SET_IFM2_PRECISION = 0x185,
+ NPU_SET_IFM2_WIDTH0_M1 = 0x18a,
+ NPU_SET_IFM2_HEIGHT0_M1 = 0x18b,
+ NPU_SET_IFM2_HEIGHT1_M1 = 0x18c,
+ NPU_SET_IFM2_REGION = 0x18f,
+ NPU_SET_IFM_BASE0 = 0x4000,
+ NPU_SET_IFM_BASE1 = 0x4001,
+ NPU_SET_IFM_BASE2 = 0x4002,
+ NPU_SET_IFM_BASE3 = 0x4003,
+ NPU_SET_IFM_STRIDE_X = 0x4004,
+ NPU_SET_IFM_STRIDE_Y = 0x4005,
+ NPU_SET_IFM_STRIDE_C = 0x4006,
+ NPU_SET_OFM_BASE0 = 0x4010,
+ NPU_SET_OFM_BASE1 = 0x4011,
+ NPU_SET_OFM_BASE2 = 0x4012,
+ NPU_SET_OFM_BASE3 = 0x4013,
+ NPU_SET_OFM_STRIDE_X = 0x4014,
+ NPU_SET_OFM_STRIDE_Y = 0x4015,
+ NPU_SET_OFM_STRIDE_C = 0x4016,
+ NPU_SET_WEIGHT_BASE = 0x4020,
+ NPU_SET_WEIGHT_LENGTH = 0x4021,
+ NPU_SET_SCALE_BASE = 0x4022,
+ NPU_SET_SCALE_LENGTH = 0x4023,
+ NPU_SET_DMA0_SRC = 0x4030,
+ NPU_SET_DMA0_DST = 0x4031,
+ NPU_SET_DMA0_LEN = 0x4032,
+ NPU_SET_DMA0_SRC_STRIDE0 = 0x4033,
+ NPU_SET_DMA0_SRC_STRIDE1 = 0x4034,
+ NPU_SET_DMA0_DST_STRIDE0 = 0x4035,
+ NPU_SET_DMA0_DST_STRIDE1 = 0x4036,
+ NPU_SET_IFM2_BASE0 = 0x4080,
+ NPU_SET_IFM2_BASE1 = 0x4081,
+ NPU_SET_IFM2_BASE2 = 0x4082,
+ NPU_SET_IFM2_BASE3 = 0x4083,
+ NPU_SET_IFM2_STRIDE_X = 0x4084,
+ NPU_SET_IFM2_STRIDE_Y = 0x4085,
+ NPU_SET_IFM2_STRIDE_C = 0x4086,
+ NPU_SET_WEIGHT1_BASE = 0x4090,
+ NPU_SET_WEIGHT1_LENGTH = 0x4091,
+ NPU_SET_SCALE1_BASE = 0x4092,
+ NPU_SET_WEIGHT2_BASE = 0x4092,
+ NPU_SET_SCALE1_LENGTH = 0x4093,
+ NPU_SET_WEIGHT2_LENGTH = 0x4093,
+ NPU_SET_WEIGHT3_BASE = 0x4094,
+ NPU_SET_WEIGHT3_LENGTH = 0x4095,
+};
+
+#define ETHOSU_SRAM_REGION 2 /* Matching Vela compiler */
+
+/**
+ * struct ethosu_device - Ethosu device
+ */
+struct ethosu_device {
+ /** @base: Base drm_device. */
+ struct drm_device base;
+
+ /** @iomem: CPU mapping of the registers. */
+ void __iomem *regs;
+
+ void __iomem *sram;
+ struct gen_pool *srampool;
+ dma_addr_t sramphys;
+
+ struct clk_bulk_data *clks;
+ int num_clks;
+ int irq;
+
+ struct drm_ethosu_npu_info npu_info;
+
+ struct ethosu_job *in_flight_job;
+ /* For in_flight_job and ethosu_job_hw_submit() */
+ struct mutex job_lock;
+
+ /* For dma_fence */
+ spinlock_t fence_lock;
+
+ struct drm_gpu_scheduler sched;
+ /* For ethosu_job_do_push() */
+ struct mutex sched_lock;
+ u64 fence_context;
+ u64 emit_seqno;
+};
+
+#define to_ethosu_device(drm_dev) \
+ ((struct ethosu_device *)container_of(drm_dev, struct ethosu_device, base))
+
+static inline bool ethosu_is_u65(const struct ethosu_device *ethosudev)
+{
+ return FIELD_GET(ID_ARCH_MAJOR_MASK, ethosudev->npu_info.id) == 1;
+}
+
+#endif
diff --git a/drivers/accel/ethosu/ethosu_drv.c b/drivers/accel/ethosu/ethosu_drv.c
new file mode 100644
index 000000000000..e05a69bf5574
--- /dev/null
+++ b/drivers/accel/ethosu/ethosu_drv.c
@@ -0,0 +1,403 @@
+// SPDX-License-Identifier: GPL-2.0-only or MIT
+// Copyright (C) 2025 Arm, Ltd.
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/genalloc.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/drm_drv.h>
+#include <drm/drm_ioctl.h>
+#include <drm/drm_utils.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_accel.h>
+#include <drm/ethosu_accel.h>
+
+#include "ethosu_drv.h"
+#include "ethosu_device.h"
+#include "ethosu_gem.h"
+#include "ethosu_job.h"
+
+static int ethosu_ioctl_dev_query(struct drm_device *ddev, void *data,
+ struct drm_file *file)
+{
+ struct ethosu_device *ethosudev = to_ethosu_device(ddev);
+ struct drm_ethosu_dev_query *args = data;
+
+ if (!args->pointer) {
+ switch (args->type) {
+ case DRM_ETHOSU_DEV_QUERY_NPU_INFO:
+ args->size = sizeof(ethosudev->npu_info);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ switch (args->type) {
+ case DRM_ETHOSU_DEV_QUERY_NPU_INFO:
+ if (args->size < offsetofend(struct drm_ethosu_npu_info, sram_size))
+ return -EINVAL;
+ return copy_struct_to_user(u64_to_user_ptr(args->pointer),
+ args->size,
+ &ethosudev->npu_info,
+ sizeof(ethosudev->npu_info), NULL);
+ default:
+ return -EINVAL;
+ }
+}
+
+#define ETHOSU_BO_FLAGS DRM_ETHOSU_BO_NO_MMAP
+
+static int ethosu_ioctl_bo_create(struct drm_device *ddev, void *data,
+ struct drm_file *file)
+{
+ struct drm_ethosu_bo_create *args = data;
+ int cookie, ret;
+
+ if (!drm_dev_enter(ddev, &cookie))
+ return -ENODEV;
+
+ if (!args->size || (args->flags & ~ETHOSU_BO_FLAGS)) {
+ ret = -EINVAL;
+ goto out_dev_exit;
+ }
+
+ ret = ethosu_gem_create_with_handle(file, ddev, &args->size,
+ args->flags, &args->handle);
+
+out_dev_exit:
+ drm_dev_exit(cookie);
+ return ret;
+}
+
+static int ethosu_ioctl_bo_wait(struct drm_device *ddev, void *data,
+ struct drm_file *file)
+{
+ struct drm_ethosu_bo_wait *args = data;
+ int cookie, ret;
+ unsigned long timeout = drm_timeout_abs_to_jiffies(args->timeout_ns);
+
+ if (args->pad)
+ return -EINVAL;
+
+ if (!drm_dev_enter(ddev, &cookie))
+ return -ENODEV;
+
+ ret = drm_gem_dma_resv_wait(file, args->handle, true, timeout);
+
+ drm_dev_exit(cookie);
+ return ret;
+}
+
+static int ethosu_ioctl_bo_mmap_offset(struct drm_device *ddev, void *data,
+ struct drm_file *file)
+{
+ struct drm_ethosu_bo_mmap_offset *args = data;
+ struct drm_gem_object *obj;
+
+ if (args->pad)
+ return -EINVAL;
+
+ obj = drm_gem_object_lookup(file, args->handle);
+ if (!obj)
+ return -ENOENT;
+
+ args->offset = drm_vma_node_offset_addr(&obj->vma_node);
+ drm_gem_object_put(obj);
+ return 0;
+}
+
+static int ethosu_ioctl_cmdstream_bo_create(struct drm_device *ddev, void *data,
+ struct drm_file *file)
+{
+ struct drm_ethosu_cmdstream_bo_create *args = data;
+ int cookie, ret;
+
+ if (!drm_dev_enter(ddev, &cookie))
+ return -ENODEV;
+
+ if (!args->size || !args->data || args->pad || args->flags) {
+ ret = -EINVAL;
+ goto out_dev_exit;
+ }
+
+ args->flags |= DRM_ETHOSU_BO_NO_MMAP;
+
+ ret = ethosu_gem_cmdstream_create(file, ddev, args->size, args->data,
+ args->flags, &args->handle);
+
+out_dev_exit:
+ drm_dev_exit(cookie);
+ return ret;
+}
+
+static int ethosu_open(struct drm_device *ddev, struct drm_file *file)
+{
+ int ret = 0;
+
+ if (!try_module_get(THIS_MODULE))
+ return -EINVAL;
+
+ struct ethosu_file_priv __free(kfree) *priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ ret = -ENOMEM;
+ goto err_put_mod;
+ }
+ priv->edev = to_ethosu_device(ddev);
+
+ ret = ethosu_job_open(priv);
+ if (ret)
+ goto err_put_mod;
+
+ file->driver_priv = no_free_ptr(priv);
+ return 0;
+
+err_put_mod:
+ module_put(THIS_MODULE);
+ return ret;
+}
+
+static void ethosu_postclose(struct drm_device *ddev, struct drm_file *file)
+{
+ ethosu_job_close(file->driver_priv);
+ kfree(file->driver_priv);
+ module_put(THIS_MODULE);
+}
+
+static const struct drm_ioctl_desc ethosu_drm_driver_ioctls[] = {
+#define ETHOSU_IOCTL(n, func, flags) \
+ DRM_IOCTL_DEF_DRV(ETHOSU_##n, ethosu_ioctl_##func, flags)
+
+ ETHOSU_IOCTL(DEV_QUERY, dev_query, 0),
+ ETHOSU_IOCTL(BO_CREATE, bo_create, 0),
+ ETHOSU_IOCTL(BO_WAIT, bo_wait, 0),
+ ETHOSU_IOCTL(BO_MMAP_OFFSET, bo_mmap_offset, 0),
+ ETHOSU_IOCTL(CMDSTREAM_BO_CREATE, cmdstream_bo_create, 0),
+ ETHOSU_IOCTL(SUBMIT, submit, 0),
+};
+
+DEFINE_DRM_ACCEL_FOPS(ethosu_drm_driver_fops);
+
+/*
+ * Ethosu driver version:
+ * - 1.0 - initial interface
+ */
+static const struct drm_driver ethosu_drm_driver = {
+ .driver_features = DRIVER_COMPUTE_ACCEL | DRIVER_GEM,
+ .open = ethosu_open,
+ .postclose = ethosu_postclose,
+ .ioctls = ethosu_drm_driver_ioctls,
+ .num_ioctls = ARRAY_SIZE(ethosu_drm_driver_ioctls),
+ .fops = &ethosu_drm_driver_fops,
+ .name = "ethosu",
+ .desc = "Arm Ethos-U Accel driver",
+ .major = 1,
+ .minor = 0,
+
+ .gem_create_object = ethosu_gem_create_object,
+};
+
+#define U65_DRAM_AXI_LIMIT_CFG 0x1f3f0002
+#define U65_SRAM_AXI_LIMIT_CFG 0x1f3f00b0
+#define U85_AXI_EXT_CFG 0x00021f3f
+#define U85_AXI_SRAM_CFG 0x00021f3f
+#define U85_MEM_ATTR0_CFG 0x00000000
+#define U85_MEM_ATTR2_CFG 0x000000b7
+
+static int ethosu_reset(struct ethosu_device *ethosudev)
+{
+ int ret;
+ u32 reg;
+
+ writel_relaxed(RESET_PENDING_CSL, ethosudev->regs + NPU_REG_RESET);
+ ret = readl_poll_timeout(ethosudev->regs + NPU_REG_STATUS, reg,
+ !FIELD_GET(STATUS_RESET_STATUS, reg),
+ USEC_PER_MSEC, USEC_PER_SEC);
+ if (ret)
+ return ret;
+
+ if (!FIELD_GET(PROT_ACTIVE_CSL, readl_relaxed(ethosudev->regs + NPU_REG_PROT))) {
+ dev_warn(ethosudev->base.dev, "Could not reset to non-secure mode (PROT = %x)\n",
+ readl_relaxed(ethosudev->regs + NPU_REG_PROT));
+ }
+
+ /*
+ * Assign region 2 (SRAM) to AXI M0 (AXILIMIT0),
+ * everything else to AXI M1 (AXILIMIT2)
+ */
+ writel_relaxed(0x0000aa8a, ethosudev->regs + NPU_REG_REGIONCFG);
+ if (ethosu_is_u65(ethosudev)) {
+ writel_relaxed(U65_SRAM_AXI_LIMIT_CFG, ethosudev->regs + NPU_REG_AXILIMIT0);
+ writel_relaxed(U65_DRAM_AXI_LIMIT_CFG, ethosudev->regs + NPU_REG_AXILIMIT2);
+ } else {
+ writel_relaxed(U85_AXI_SRAM_CFG, ethosudev->regs + NPU_REG_AXI_SRAM);
+ writel_relaxed(U85_AXI_EXT_CFG, ethosudev->regs + NPU_REG_AXI_EXT);
+ writel_relaxed(U85_MEM_ATTR0_CFG, ethosudev->regs + NPU_REG_MEM_ATTR0); // SRAM
+ writel_relaxed(U85_MEM_ATTR2_CFG, ethosudev->regs + NPU_REG_MEM_ATTR2); // DRAM
+ }
+
+ if (ethosudev->sram)
+ memset_io(ethosudev->sram, 0, ethosudev->npu_info.sram_size);
+
+ return 0;
+}
+
+static int ethosu_device_resume(struct device *dev)
+{
+ struct ethosu_device *ethosudev = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_bulk_prepare_enable(ethosudev->num_clks, ethosudev->clks);
+ if (ret)
+ return ret;
+
+ ret = ethosu_reset(ethosudev);
+ if (!ret)
+ return 0;
+
+ clk_bulk_disable_unprepare(ethosudev->num_clks, ethosudev->clks);
+ return ret;
+}
+
+static int ethosu_device_suspend(struct device *dev)
+{
+ struct ethosu_device *ethosudev = dev_get_drvdata(dev);
+
+ clk_bulk_disable_unprepare(ethosudev->num_clks, ethosudev->clks);
+ return 0;
+}
+
+static int ethosu_sram_init(struct ethosu_device *ethosudev)
+{
+ ethosudev->npu_info.sram_size = 0;
+
+ ethosudev->srampool = of_gen_pool_get(ethosudev->base.dev->of_node, "sram", 0);
+ if (!ethosudev->srampool)
+ return 0;
+
+ ethosudev->npu_info.sram_size = gen_pool_size(ethosudev->srampool);
+
+ ethosudev->sram = (void __iomem *)gen_pool_dma_alloc(ethosudev->srampool,
+ ethosudev->npu_info.sram_size,
+ &ethosudev->sramphys);
+ if (!ethosudev->sram) {
+ dev_err(ethosudev->base.dev, "failed to allocate from SRAM pool\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int ethosu_init(struct ethosu_device *ethosudev)
+{
+ int ret;
+ u32 id, config;
+
+ ret = ethosu_device_resume(ethosudev->base.dev);
+ if (ret)
+ return ret;
+
+ pm_runtime_set_autosuspend_delay(ethosudev->base.dev, 50);
+ pm_runtime_use_autosuspend(ethosudev->base.dev);
+ ret = devm_pm_runtime_set_active_enabled(ethosudev->base.dev);
+ if (ret)
+ return ret;
+ pm_runtime_get_noresume(ethosudev->base.dev);
+
+ ethosudev->npu_info.id = id = readl_relaxed(ethosudev->regs + NPU_REG_ID);
+ ethosudev->npu_info.config = config = readl_relaxed(ethosudev->regs + NPU_REG_CONFIG);
+
+ ethosu_sram_init(ethosudev);
+
+ dev_info(ethosudev->base.dev,
+ "Ethos-U NPU, arch v%ld.%ld.%ld, rev r%ldp%ld, cmd stream ver%ld, %d MACs, %dKB SRAM\n",
+ FIELD_GET(ID_ARCH_MAJOR_MASK, id),
+ FIELD_GET(ID_ARCH_MINOR_MASK, id),
+ FIELD_GET(ID_ARCH_PATCH_MASK, id),
+ FIELD_GET(ID_VER_MAJOR_MASK, id),
+ FIELD_GET(ID_VER_MINOR_MASK, id),
+ FIELD_GET(CONFIG_CMD_STREAM_VER_MASK, config),
+ 1 << FIELD_GET(CONFIG_MACS_PER_CC_MASK, config),
+ ethosudev->npu_info.sram_size / 1024);
+
+ return 0;
+}
+
+static int ethosu_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct ethosu_device *ethosudev;
+
+ ethosudev = devm_drm_dev_alloc(&pdev->dev, &ethosu_drm_driver,
+ struct ethosu_device, base);
+ if (IS_ERR(ethosudev))
+ return -ENOMEM;
+ platform_set_drvdata(pdev, ethosudev);
+
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
+
+ ethosudev->regs = devm_platform_ioremap_resource(pdev, 0);
+
+ ethosudev->num_clks = devm_clk_bulk_get_all(&pdev->dev, &ethosudev->clks);
+ if (ethosudev->num_clks < 0)
+ return ethosudev->num_clks;
+
+ ret = ethosu_job_init(ethosudev);
+ if (ret)
+ return ret;
+
+ ret = ethosu_init(ethosudev);
+ if (ret)
+ return ret;
+
+ ret = drm_dev_register(&ethosudev->base, 0);
+ if (ret)
+ pm_runtime_dont_use_autosuspend(ethosudev->base.dev);
+
+ pm_runtime_put_autosuspend(ethosudev->base.dev);
+ return ret;
+}
+
+static void ethosu_remove(struct platform_device *pdev)
+{
+ struct ethosu_device *ethosudev = dev_get_drvdata(&pdev->dev);
+
+ drm_dev_unregister(&ethosudev->base);
+ ethosu_job_fini(ethosudev);
+ if (ethosudev->sram)
+ gen_pool_free(ethosudev->srampool, (unsigned long)ethosudev->sram,
+ ethosudev->npu_info.sram_size);
+}
+
+static const struct of_device_id dt_match[] = {
+ { .compatible = "arm,ethos-u65" },
+ { .compatible = "arm,ethos-u85" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, dt_match);
+
+static DEFINE_RUNTIME_DEV_PM_OPS(ethosu_pm_ops,
+ ethosu_device_suspend,
+ ethosu_device_resume,
+ NULL);
+
+static struct platform_driver ethosu_driver = {
+ .probe = ethosu_probe,
+ .remove = ethosu_remove,
+ .driver = {
+ .name = "ethosu",
+ .pm = pm_ptr(&ethosu_pm_ops),
+ .of_match_table = dt_match,
+ },
+};
+module_platform_driver(ethosu_driver);
+
+MODULE_AUTHOR("Rob Herring <robh@kernel.org>");
+MODULE_DESCRIPTION("Arm Ethos-U Accel Driver");
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/accel/ethosu/ethosu_drv.h b/drivers/accel/ethosu/ethosu_drv.h
new file mode 100644
index 000000000000..9e21dfe94184
--- /dev/null
+++ b/drivers/accel/ethosu/ethosu_drv.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright 2025 Arm, Ltd. */
+#ifndef __ETHOSU_DRV_H__
+#define __ETHOSU_DRV_H__
+
+#include <drm/gpu_scheduler.h>
+
+struct ethosu_device;
+
+struct ethosu_file_priv {
+ struct ethosu_device *edev;
+ struct drm_sched_entity sched_entity;
+};
+
+#endif
diff --git a/drivers/accel/ethosu/ethosu_gem.c b/drivers/accel/ethosu/ethosu_gem.c
new file mode 100644
index 000000000000..473b5f5d7514
--- /dev/null
+++ b/drivers/accel/ethosu/ethosu_gem.c
@@ -0,0 +1,704 @@
+// SPDX-License-Identifier: GPL-2.0-only or MIT
+/* Copyright 2025 Arm, Ltd. */
+
+#include <linux/err.h>
+#include <linux/slab.h>
+
+#include <drm/ethosu_accel.h>
+
+#include "ethosu_device.h"
+#include "ethosu_gem.h"
+
+static void ethosu_gem_free_object(struct drm_gem_object *obj)
+{
+ struct ethosu_gem_object *bo = to_ethosu_bo(obj);
+
+ kfree(bo->info);
+ drm_gem_free_mmap_offset(&bo->base.base);
+ drm_gem_dma_free(&bo->base);
+}
+
+static int ethosu_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+{
+ struct ethosu_gem_object *bo = to_ethosu_bo(obj);
+
+ /* Don't allow mmap on objects that have the NO_MMAP flag set. */
+ if (bo->flags & DRM_ETHOSU_BO_NO_MMAP)
+ return -EINVAL;
+
+ return drm_gem_dma_object_mmap(obj, vma);
+}
+
+static const struct drm_gem_object_funcs ethosu_gem_funcs = {
+ .free = ethosu_gem_free_object,
+ .print_info = drm_gem_dma_object_print_info,
+ .get_sg_table = drm_gem_dma_object_get_sg_table,
+ .vmap = drm_gem_dma_object_vmap,
+ .mmap = ethosu_gem_mmap,
+ .vm_ops = &drm_gem_dma_vm_ops,
+};
+
+/**
+ * ethosu_gem_create_object - Implementation of driver->gem_create_object.
+ * @ddev: DRM device
+ * @size: Size in bytes of the memory the object will reference
+ *
+ * This lets the GEM helpers allocate object structs for us, and keep
+ * our BO stats correct.
+ */
+struct drm_gem_object *ethosu_gem_create_object(struct drm_device *ddev, size_t size)
+{
+ struct ethosu_gem_object *obj;
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (!obj)
+ return ERR_PTR(-ENOMEM);
+
+ obj->base.base.funcs = &ethosu_gem_funcs;
+ return &obj->base.base;
+}
+
+/**
+ * ethosu_gem_create_with_handle() - Create a GEM object and attach it to a handle.
+ * @file: DRM file.
+ * @ddev: DRM device.
+ * @size: Size of the GEM object to allocate.
+ * @flags: Combination of drm_ethosu_bo_flags flags.
+ * @handle: Pointer holding the handle pointing to the new GEM object.
+ *
+ * Return: Zero on success
+ */
+int ethosu_gem_create_with_handle(struct drm_file *file,
+ struct drm_device *ddev,
+ u64 *size, u32 flags, u32 *handle)
+{
+ struct drm_gem_dma_object *mem;
+ struct ethosu_gem_object *bo;
+ int ret;
+
+ mem = drm_gem_dma_create(ddev, *size);
+ if (IS_ERR(mem))
+ return PTR_ERR(mem);
+
+ bo = to_ethosu_bo(&mem->base);
+ bo->flags = flags;
+
+ /*
+ * Allocate an id of idr table where the obj is registered
+ * and handle has the id what user can see.
+ */
+ ret = drm_gem_handle_create(file, &mem->base, handle);
+ if (!ret)
+ *size = bo->base.base.size;
+
+ /* drop reference from allocate - handle holds it now. */
+ drm_gem_object_put(&mem->base);
+
+ return ret;
+}
+
+struct dma {
+ s8 region;
+ u64 len;
+ u64 offset;
+ s64 stride[2];
+};
+
+struct dma_state {
+ u16 size0;
+ u16 size1;
+ s8 mode;
+ struct dma src;
+ struct dma dst;
+};
+
+struct buffer {
+ u64 base;
+ u32 length;
+ s8 region;
+};
+
+struct feat_matrix {
+ u64 base[4];
+ s64 stride_x;
+ s64 stride_y;
+ s64 stride_c;
+ s8 region;
+ u8 broadcast;
+ u16 stride_kernel;
+ u16 precision;
+ u16 depth;
+ u16 width;
+ u16 width0;
+ u16 height[3];
+ u8 pad_top;
+ u8 pad_left;
+ u8 pad_bottom;
+ u8 pad_right;
+};
+
+struct cmd_state {
+ struct dma_state dma;
+ struct buffer scale[2];
+ struct buffer weight[4];
+ struct feat_matrix ofm;
+ struct feat_matrix ifm;
+ struct feat_matrix ifm2;
+};
+
+static void cmd_state_init(struct cmd_state *st)
+{
+ /* Initialize to all 1s to detect missing setup */
+ memset(st, 0xff, sizeof(*st));
+}
+
+static u64 cmd_to_addr(u32 *cmd)
+{
+ return ((u64)((cmd[0] & 0xff0000) << 16)) | cmd[1];
+}
+
+static u64 dma_length(struct ethosu_validated_cmdstream_info *info,
+ struct dma_state *dma_st, struct dma *dma)
+{
+ s8 mode = dma_st->mode;
+ u64 len = dma->len;
+
+ if (mode >= 1) {
+ len += dma->stride[0];
+ len *= dma_st->size0;
+ }
+ if (mode == 2) {
+ len += dma->stride[1];
+ len *= dma_st->size1;
+ }
+ if (dma->region >= 0)
+ info->region_size[dma->region] = max(info->region_size[dma->region],
+ len + dma->offset);
+
+ return len;
+}
+
+static u64 feat_matrix_length(struct ethosu_validated_cmdstream_info *info,
+ struct feat_matrix *fm,
+ u32 x, u32 y, u32 c)
+{
+ u32 element_size, storage = fm->precision >> 14;
+ int tile = 0;
+ u64 addr;
+
+ if (fm->region < 0)
+ return U64_MAX;
+
+ switch (storage) {
+ case 0:
+ if (x >= fm->width0 + 1) {
+ x -= fm->width0 + 1;
+ tile += 1;
+ }
+ if (y >= fm->height[tile] + 1) {
+ y -= fm->height[tile] + 1;
+ tile += 2;
+ }
+ break;
+ case 1:
+ if (y >= fm->height[1] + 1) {
+ y -= fm->height[1] + 1;
+ tile = 2;
+ } else if (y >= fm->height[0] + 1) {
+ y -= fm->height[0] + 1;
+ tile = 1;
+ }
+ break;
+ }
+ if (fm->base[tile] == U64_MAX)
+ return U64_MAX;
+
+ addr = fm->base[tile] + y * fm->stride_y;
+
+ switch ((fm->precision >> 6) & 0x3) { // format
+ case 0: //nhwc:
+ addr += x * fm->stride_x + c;
+ break;
+ case 1: //nhcwb16:
+ element_size = BIT((fm->precision >> 1) & 0x3);
+
+ addr += (c / 16) * fm->stride_c + (16 * x + (c & 0xf)) * element_size;
+ break;
+ }
+
+ info->region_size[fm->region] = max(info->region_size[fm->region], addr + 1);
+
+ return addr;
+}
+
+static int calc_sizes(struct drm_device *ddev,
+ struct ethosu_validated_cmdstream_info *info,
+ u16 op, struct cmd_state *st,
+ bool ifm, bool ifm2, bool weight, bool scale)
+{
+ u64 len;
+
+ if (ifm) {
+ if (st->ifm.stride_kernel == U16_MAX)
+ return -EINVAL;
+ u32 stride_y = ((st->ifm.stride_kernel >> 8) & 0x2) +
+ ((st->ifm.stride_kernel >> 1) & 0x1) + 1;
+ u32 stride_x = ((st->ifm.stride_kernel >> 5) & 0x2) +
+ (st->ifm.stride_kernel & 0x1) + 1;
+ u32 ifm_height = st->ofm.height[2] * stride_y +
+ st->ifm.height[2] - (st->ifm.pad_top + st->ifm.pad_bottom);
+ u32 ifm_width = st->ofm.width * stride_x +
+ st->ifm.width - (st->ifm.pad_left + st->ifm.pad_right);
+
+ len = feat_matrix_length(info, &st->ifm, ifm_width,
+ ifm_height, st->ifm.depth);
+ dev_dbg(ddev->dev, "op %d: IFM:%d:0x%llx-0x%llx\n",
+ op, st->ifm.region, st->ifm.base[0], len);
+ if (len == U64_MAX)
+ return -EINVAL;
+ }
+
+ if (ifm2) {
+ len = feat_matrix_length(info, &st->ifm2, st->ifm.depth,
+ 0, st->ofm.depth);
+ dev_dbg(ddev->dev, "op %d: IFM2:%d:0x%llx-0x%llx\n",
+ op, st->ifm2.region, st->ifm2.base[0], len);
+ if (len == U64_MAX)
+ return -EINVAL;
+ }
+
+ if (weight) {
+ dev_dbg(ddev->dev, "op %d: W:%d:0x%llx-0x%llx\n",
+ op, st->weight[0].region, st->weight[0].base,
+ st->weight[0].base + st->weight[0].length - 1);
+ if (st->weight[0].region < 0 || st->weight[0].base == U64_MAX ||
+ st->weight[0].length == U32_MAX)
+ return -EINVAL;
+ info->region_size[st->weight[0].region] =
+ max(info->region_size[st->weight[0].region],
+ st->weight[0].base + st->weight[0].length);
+ }
+
+ if (scale) {
+ dev_dbg(ddev->dev, "op %d: S:%d:0x%llx-0x%llx\n",
+ op, st->scale[0].region, st->scale[0].base,
+ st->scale[0].base + st->scale[0].length - 1);
+ if (st->scale[0].region < 0 || st->scale[0].base == U64_MAX ||
+ st->scale[0].length == U32_MAX)
+ return -EINVAL;
+ info->region_size[st->scale[0].region] =
+ max(info->region_size[st->scale[0].region],
+ st->scale[0].base + st->scale[0].length);
+ }
+
+ len = feat_matrix_length(info, &st->ofm, st->ofm.width,
+ st->ofm.height[2], st->ofm.depth);
+ dev_dbg(ddev->dev, "op %d: OFM:%d:0x%llx-0x%llx\n",
+ op, st->ofm.region, st->ofm.base[0], len);
+ if (len == U64_MAX)
+ return -EINVAL;
+ info->output_region[st->ofm.region] = true;
+
+ return 0;
+}
+
+static int calc_sizes_elemwise(struct drm_device *ddev,
+ struct ethosu_validated_cmdstream_info *info,
+ u16 op, struct cmd_state *st,
+ bool ifm, bool ifm2)
+{
+ u32 height, width, depth;
+ u64 len;
+
+ if (ifm) {
+ height = st->ifm.broadcast & 0x1 ? 0 : st->ofm.height[2];
+ width = st->ifm.broadcast & 0x2 ? 0 : st->ofm.width;
+ depth = st->ifm.broadcast & 0x4 ? 0 : st->ofm.depth;
+
+ len = feat_matrix_length(info, &st->ifm, width,
+ height, depth);
+ dev_dbg(ddev->dev, "op %d: IFM:%d:0x%llx-0x%llx\n",
+ op, st->ifm.region, st->ifm.base[0], len);
+ if (len == U64_MAX)
+ return -EINVAL;
+ }
+
+ if (ifm2) {
+ height = st->ifm2.broadcast & 0x1 ? 0 : st->ofm.height[2];
+ width = st->ifm2.broadcast & 0x2 ? 0 : st->ofm.width;
+ depth = st->ifm2.broadcast & 0x4 ? 0 : st->ofm.depth;
+
+ len = feat_matrix_length(info, &st->ifm2, width,
+ height, depth);
+ dev_dbg(ddev->dev, "op %d: IFM2:%d:0x%llx-0x%llx\n",
+ op, st->ifm2.region, st->ifm2.base[0], len);
+ if (len == U64_MAX)
+ return -EINVAL;
+ }
+
+ len = feat_matrix_length(info, &st->ofm, st->ofm.width,
+ st->ofm.height[2], st->ofm.depth);
+ dev_dbg(ddev->dev, "op %d: OFM:%d:0x%llx-0x%llx\n",
+ op, st->ofm.region, st->ofm.base[0], len);
+ if (len == U64_MAX)
+ return -EINVAL;
+ info->output_region[st->ofm.region] = true;
+
+ return 0;
+}
+
+static int ethosu_gem_cmdstream_copy_and_validate(struct drm_device *ddev,
+ u32 __user *ucmds,
+ struct ethosu_gem_object *bo,
+ u32 size)
+{
+ struct ethosu_validated_cmdstream_info __free(kfree) *info = kzalloc(sizeof(*info), GFP_KERNEL);
+ struct ethosu_device *edev = to_ethosu_device(ddev);
+ u32 *bocmds = bo->base.vaddr;
+ struct cmd_state st;
+ int i, ret;
+
+ if (!info)
+ return -ENOMEM;
+ info->cmd_size = size;
+
+ cmd_state_init(&st);
+
+ for (i = 0; i < size / 4; i++) {
+ bool use_ifm, use_ifm2, use_scale;
+ u64 dstlen, srclen;
+ u16 cmd, param;
+ u32 cmds[2];
+ u64 addr;
+
+ if (get_user(cmds[0], ucmds++))
+ return -EFAULT;
+
+ bocmds[i] = cmds[0];
+
+ cmd = cmds[0];
+ param = cmds[0] >> 16;
+
+ if (cmd & 0x4000) {
+ if (get_user(cmds[1], ucmds++))
+ return -EFAULT;
+
+ i++;
+ bocmds[i] = cmds[1];
+ addr = cmd_to_addr(cmds);
+ }
+
+ switch (cmd) {
+ case NPU_OP_DMA_START:
+ srclen = dma_length(info, &st.dma, &st.dma.src);
+ dstlen = dma_length(info, &st.dma, &st.dma.dst);
+
+ if (st.dma.dst.region >= 0)
+ info->output_region[st.dma.dst.region] = true;
+ dev_dbg(ddev->dev, "cmd: DMA SRC:%d:0x%llx+0x%llx DST:%d:0x%llx+0x%llx\n",
+ st.dma.src.region, st.dma.src.offset, srclen,
+ st.dma.dst.region, st.dma.dst.offset, dstlen);
+ break;
+ case NPU_OP_CONV:
+ case NPU_OP_DEPTHWISE:
+ use_ifm2 = param & 0x1; // weights_ifm2
+ use_scale = !(st.ofm.precision & 0x100);
+ ret = calc_sizes(ddev, info, cmd, &st, true, use_ifm2,
+ !use_ifm2, use_scale);
+ if (ret)
+ return ret;
+ break;
+ case NPU_OP_POOL:
+ use_ifm = param != 0x4; // pooling mode
+ use_scale = !(st.ofm.precision & 0x100);
+ ret = calc_sizes(ddev, info, cmd, &st, use_ifm, false,
+ false, use_scale);
+ if (ret)
+ return ret;
+ break;
+ case NPU_OP_ELEMENTWISE:
+ use_ifm2 = !((st.ifm2.broadcast == 8) || (param == 5) ||
+ (param == 6) || (param == 7) || (param == 0x24));
+ use_ifm = st.ifm.broadcast != 8;
+ ret = calc_sizes_elemwise(ddev, info, cmd, &st, use_ifm, use_ifm2);
+ if (ret)
+ return ret;
+ break;
+ case NPU_OP_RESIZE: // U85 only
+ WARN_ON(1); // TODO
+ break;
+ case NPU_SET_KERNEL_WIDTH_M1:
+ st.ifm.width = param;
+ break;
+ case NPU_SET_KERNEL_HEIGHT_M1:
+ st.ifm.height[2] = param;
+ break;
+ case NPU_SET_KERNEL_STRIDE:
+ st.ifm.stride_kernel = param;
+ break;
+ case NPU_SET_IFM_PAD_TOP:
+ st.ifm.pad_top = param & 0x7f;
+ break;
+ case NPU_SET_IFM_PAD_LEFT:
+ st.ifm.pad_left = param & 0x7f;
+ break;
+ case NPU_SET_IFM_PAD_RIGHT:
+ st.ifm.pad_right = param & 0xff;
+ break;
+ case NPU_SET_IFM_PAD_BOTTOM:
+ st.ifm.pad_bottom = param & 0xff;
+ break;
+ case NPU_SET_IFM_DEPTH_M1:
+ st.ifm.depth = param;
+ break;
+ case NPU_SET_IFM_PRECISION:
+ st.ifm.precision = param;
+ break;
+ case NPU_SET_IFM_BROADCAST:
+ st.ifm.broadcast = param;
+ break;
+ case NPU_SET_IFM_REGION:
+ st.ifm.region = param & 0x7f;
+ break;
+ case NPU_SET_IFM_WIDTH0_M1:
+ st.ifm.width0 = param;
+ break;
+ case NPU_SET_IFM_HEIGHT0_M1:
+ st.ifm.height[0] = param;
+ break;
+ case NPU_SET_IFM_HEIGHT1_M1:
+ st.ifm.height[1] = param;
+ break;
+ case NPU_SET_IFM_BASE0:
+ case NPU_SET_IFM_BASE1:
+ case NPU_SET_IFM_BASE2:
+ case NPU_SET_IFM_BASE3:
+ st.ifm.base[cmd & 0x3] = addr;
+ break;
+ case NPU_SET_IFM_STRIDE_X:
+ st.ifm.stride_x = addr;
+ break;
+ case NPU_SET_IFM_STRIDE_Y:
+ st.ifm.stride_y = addr;
+ break;
+ case NPU_SET_IFM_STRIDE_C:
+ st.ifm.stride_c = addr;
+ break;
+
+ case NPU_SET_OFM_WIDTH_M1:
+ st.ofm.width = param;
+ break;
+ case NPU_SET_OFM_HEIGHT_M1:
+ st.ofm.height[2] = param;
+ break;
+ case NPU_SET_OFM_DEPTH_M1:
+ st.ofm.depth = param;
+ break;
+ case NPU_SET_OFM_PRECISION:
+ st.ofm.precision = param;
+ break;
+ case NPU_SET_OFM_REGION:
+ st.ofm.region = param & 0x7;
+ break;
+ case NPU_SET_OFM_WIDTH0_M1:
+ st.ofm.width0 = param;
+ break;
+ case NPU_SET_OFM_HEIGHT0_M1:
+ st.ofm.height[0] = param;
+ break;
+ case NPU_SET_OFM_HEIGHT1_M1:
+ st.ofm.height[1] = param;
+ break;
+ case NPU_SET_OFM_BASE0:
+ case NPU_SET_OFM_BASE1:
+ case NPU_SET_OFM_BASE2:
+ case NPU_SET_OFM_BASE3:
+ st.ofm.base[cmd & 0x3] = addr;
+ break;
+ case NPU_SET_OFM_STRIDE_X:
+ st.ofm.stride_x = addr;
+ break;
+ case NPU_SET_OFM_STRIDE_Y:
+ st.ofm.stride_y = addr;
+ break;
+ case NPU_SET_OFM_STRIDE_C:
+ st.ofm.stride_c = addr;
+ break;
+
+ case NPU_SET_IFM2_BROADCAST:
+ st.ifm2.broadcast = param;
+ break;
+ case NPU_SET_IFM2_PRECISION:
+ st.ifm2.precision = param;
+ break;
+ case NPU_SET_IFM2_REGION:
+ st.ifm2.region = param & 0x7;
+ break;
+ case NPU_SET_IFM2_WIDTH0_M1:
+ st.ifm2.width0 = param;
+ break;
+ case NPU_SET_IFM2_HEIGHT0_M1:
+ st.ifm2.height[0] = param;
+ break;
+ case NPU_SET_IFM2_HEIGHT1_M1:
+ st.ifm2.height[1] = param;
+ break;
+ case NPU_SET_IFM2_BASE0:
+ case NPU_SET_IFM2_BASE1:
+ case NPU_SET_IFM2_BASE2:
+ case NPU_SET_IFM2_BASE3:
+ st.ifm2.base[cmd & 0x3] = addr;
+ break;
+ case NPU_SET_IFM2_STRIDE_X:
+ st.ifm2.stride_x = addr;
+ break;
+ case NPU_SET_IFM2_STRIDE_Y:
+ st.ifm2.stride_y = addr;
+ break;
+ case NPU_SET_IFM2_STRIDE_C:
+ st.ifm2.stride_c = addr;
+ break;
+
+ case NPU_SET_WEIGHT_REGION:
+ st.weight[0].region = param & 0x7;
+ break;
+ case NPU_SET_SCALE_REGION:
+ st.scale[0].region = param & 0x7;
+ break;
+ case NPU_SET_WEIGHT_BASE:
+ st.weight[0].base = addr;
+ break;
+ case NPU_SET_WEIGHT_LENGTH:
+ st.weight[0].length = cmds[1];
+ break;
+ case NPU_SET_SCALE_BASE:
+ st.scale[0].base = addr;
+ break;
+ case NPU_SET_SCALE_LENGTH:
+ st.scale[0].length = cmds[1];
+ break;
+ case NPU_SET_WEIGHT1_BASE:
+ st.weight[1].base = addr;
+ break;
+ case NPU_SET_WEIGHT1_LENGTH:
+ st.weight[1].length = cmds[1];
+ break;
+ case NPU_SET_SCALE1_BASE: // NPU_SET_WEIGHT2_BASE (U85)
+ if (ethosu_is_u65(edev))
+ st.scale[1].base = addr;
+ else
+ st.weight[2].base = addr;
+ break;
+ case NPU_SET_SCALE1_LENGTH: // NPU_SET_WEIGHT2_LENGTH (U85)
+ if (ethosu_is_u65(edev))
+ st.scale[1].length = cmds[1];
+ else
+ st.weight[1].length = cmds[1];
+ break;
+ case NPU_SET_WEIGHT3_BASE:
+ st.weight[3].base = addr;
+ break;
+ case NPU_SET_WEIGHT3_LENGTH:
+ st.weight[3].length = cmds[1];
+ break;
+
+ case NPU_SET_DMA0_SRC_REGION:
+ if (param & 0x100)
+ st.dma.src.region = -1;
+ else
+ st.dma.src.region = param & 0x7;
+ st.dma.mode = (param >> 9) & 0x3;
+ break;
+ case NPU_SET_DMA0_DST_REGION:
+ if (param & 0x100)
+ st.dma.dst.region = -1;
+ else
+ st.dma.dst.region = param & 0x7;
+ break;
+ case NPU_SET_DMA0_SIZE0:
+ st.dma.size0 = param;
+ break;
+ case NPU_SET_DMA0_SIZE1:
+ st.dma.size1 = param;
+ break;
+ case NPU_SET_DMA0_SRC_STRIDE0:
+ st.dma.src.stride[0] = ((s64)addr << 24) >> 24;
+ break;
+ case NPU_SET_DMA0_SRC_STRIDE1:
+ st.dma.src.stride[1] = ((s64)addr << 24) >> 24;
+ break;
+ case NPU_SET_DMA0_DST_STRIDE0:
+ st.dma.dst.stride[0] = ((s64)addr << 24) >> 24;
+ break;
+ case NPU_SET_DMA0_DST_STRIDE1:
+ st.dma.dst.stride[1] = ((s64)addr << 24) >> 24;
+ break;
+ case NPU_SET_DMA0_SRC:
+ st.dma.src.offset = addr;
+ break;
+ case NPU_SET_DMA0_DST:
+ st.dma.dst.offset = addr;
+ break;
+ case NPU_SET_DMA0_LEN:
+ st.dma.src.len = st.dma.dst.len = addr;
+ break;
+ default:
+ break;
+ }
+ }
+
+ for (i = 0; i < NPU_BASEP_REGION_MAX; i++) {
+ if (!info->region_size[i])
+ continue;
+ dev_dbg(ddev->dev, "region %d max size: 0x%llx\n",
+ i, info->region_size[i]);
+ }
+
+ bo->info = no_free_ptr(info);
+ return 0;
+}
+
+/**
+ * ethosu_gem_cmdstream_create() - Create a GEM object and attach it to a handle.
+ * @file: DRM file.
+ * @ddev: DRM device.
+ * @exclusive_vm: Exclusive VM. Not NULL if the GEM object can't be shared.
+ * @size: Size of the GEM object to allocate.
+ * @flags: Combination of drm_ethosu_bo_flags flags.
+ * @handle: Pointer holding the handle pointing to the new GEM object.
+ *
+ * Return: Zero on success
+ */
+int ethosu_gem_cmdstream_create(struct drm_file *file,
+ struct drm_device *ddev,
+ u32 size, u64 data, u32 flags, u32 *handle)
+{
+ int ret;
+ struct drm_gem_dma_object *mem;
+ struct ethosu_gem_object *bo;
+
+ mem = drm_gem_dma_create(ddev, size);
+ if (IS_ERR(mem))
+ return PTR_ERR(mem);
+
+ bo = to_ethosu_bo(&mem->base);
+ bo->flags = flags;
+
+ ret = ethosu_gem_cmdstream_copy_and_validate(ddev,
+ (void __user *)(uintptr_t)data,
+ bo, size);
+ if (ret)
+ goto fail;
+
+ /*
+ * Allocate an id of idr table where the obj is registered
+ * and handle has the id what user can see.
+ */
+ ret = drm_gem_handle_create(file, &mem->base, handle);
+
+fail:
+ /* drop reference from allocate - handle holds it now. */
+ drm_gem_object_put(&mem->base);
+
+ return ret;
+}
diff --git a/drivers/accel/ethosu/ethosu_gem.h b/drivers/accel/ethosu/ethosu_gem.h
new file mode 100644
index 000000000000..3922895a60fb
--- /dev/null
+++ b/drivers/accel/ethosu/ethosu_gem.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 or MIT */
+/* Copyright 2025 Arm, Ltd. */
+
+#ifndef __ETHOSU_GEM_H__
+#define __ETHOSU_GEM_H__
+
+#include "ethosu_device.h"
+#include <drm/drm_gem_dma_helper.h>
+
+struct ethosu_validated_cmdstream_info {
+ u32 cmd_size;
+ u64 region_size[NPU_BASEP_REGION_MAX];
+ bool output_region[NPU_BASEP_REGION_MAX];
+};
+
+/**
+ * struct ethosu_gem_object - Driver specific GEM object.
+ */
+struct ethosu_gem_object {
+ /** @base: Inherit from drm_gem_shmem_object. */
+ struct drm_gem_dma_object base;
+
+ struct ethosu_validated_cmdstream_info *info;
+
+ /** @flags: Combination of drm_ethosu_bo_flags flags. */
+ u32 flags;
+};
+
+static inline
+struct ethosu_gem_object *to_ethosu_bo(struct drm_gem_object *obj)
+{
+ return container_of(to_drm_gem_dma_obj(obj), struct ethosu_gem_object, base);
+}
+
+struct drm_gem_object *ethosu_gem_create_object(struct drm_device *ddev,
+ size_t size);
+
+int ethosu_gem_create_with_handle(struct drm_file *file,
+ struct drm_device *ddev,
+ u64 *size, u32 flags, uint32_t *handle);
+
+int ethosu_gem_cmdstream_create(struct drm_file *file,
+ struct drm_device *ddev,
+ u32 size, u64 data, u32 flags, u32 *handle);
+
+#endif /* __ETHOSU_GEM_H__ */
diff --git a/drivers/accel/ethosu/ethosu_job.c b/drivers/accel/ethosu/ethosu_job.c
new file mode 100644
index 000000000000..26e7a2f64d71
--- /dev/null
+++ b/drivers/accel/ethosu/ethosu_job.c
@@ -0,0 +1,497 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */
+/* Copyright 2025 Arm, Ltd. */
+
+#include <linux/bitfield.h>
+#include <linux/genalloc.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/drm_file.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_print.h>
+#include <drm/ethosu_accel.h>
+
+#include "ethosu_device.h"
+#include "ethosu_drv.h"
+#include "ethosu_gem.h"
+#include "ethosu_job.h"
+
+#define JOB_TIMEOUT_MS 500
+
+static struct ethosu_job *to_ethosu_job(struct drm_sched_job *sched_job)
+{
+ return container_of(sched_job, struct ethosu_job, base);
+}
+
+static const char *ethosu_fence_get_driver_name(struct dma_fence *fence)
+{
+ return "ethosu";
+}
+
+static const char *ethosu_fence_get_timeline_name(struct dma_fence *fence)
+{
+ return "ethosu-npu";
+}
+
+static const struct dma_fence_ops ethosu_fence_ops = {
+ .get_driver_name = ethosu_fence_get_driver_name,
+ .get_timeline_name = ethosu_fence_get_timeline_name,
+};
+
+static void ethosu_job_hw_submit(struct ethosu_device *dev, struct ethosu_job *job)
+{
+ struct drm_gem_dma_object *cmd_bo = to_drm_gem_dma_obj(job->cmd_bo);
+ struct ethosu_validated_cmdstream_info *cmd_info = to_ethosu_bo(job->cmd_bo)->info;
+
+ for (int i = 0; i < job->region_cnt; i++) {
+ struct drm_gem_dma_object *bo;
+ int region = job->region_bo_num[i];
+
+ bo = to_drm_gem_dma_obj(job->region_bo[i]);
+ writel_relaxed(lower_32_bits(bo->dma_addr), dev->regs + NPU_REG_BASEP(region));
+ writel_relaxed(upper_32_bits(bo->dma_addr), dev->regs + NPU_REG_BASEP_HI(region));
+ dev_dbg(dev->base.dev, "Region %d base addr = %pad\n", region, &bo->dma_addr);
+ }
+
+ if (job->sram_size) {
+ writel_relaxed(lower_32_bits(dev->sramphys),
+ dev->regs + NPU_REG_BASEP(ETHOSU_SRAM_REGION));
+ writel_relaxed(upper_32_bits(dev->sramphys),
+ dev->regs + NPU_REG_BASEP_HI(ETHOSU_SRAM_REGION));
+ dev_dbg(dev->base.dev, "Region %d base addr = %pad (SRAM)\n",
+ ETHOSU_SRAM_REGION, &dev->sramphys);
+ }
+
+ writel_relaxed(lower_32_bits(cmd_bo->dma_addr), dev->regs + NPU_REG_QBASE);
+ writel_relaxed(upper_32_bits(cmd_bo->dma_addr), dev->regs + NPU_REG_QBASE_HI);
+ writel_relaxed(cmd_info->cmd_size, dev->regs + NPU_REG_QSIZE);
+
+ writel(CMD_TRANSITION_TO_RUN, dev->regs + NPU_REG_CMD);
+
+ dev_dbg(dev->base.dev,
+ "Submitted cmd at %pad to core\n", &cmd_bo->dma_addr);
+}
+
+static int ethosu_acquire_object_fences(struct ethosu_job *job)
+{
+ int i, ret;
+ struct drm_gem_object **bos = job->region_bo;
+ struct ethosu_validated_cmdstream_info *info = to_ethosu_bo(job->cmd_bo)->info;
+
+ for (i = 0; i < job->region_cnt; i++) {
+ bool is_write;
+
+ if (!bos[i])
+ break;
+
+ ret = dma_resv_reserve_fences(bos[i]->resv, 1);
+ if (ret)
+ return ret;
+
+ is_write = info->output_region[job->region_bo_num[i]];
+ ret = drm_sched_job_add_implicit_dependencies(&job->base, bos[i],
+ is_write);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ethosu_attach_object_fences(struct ethosu_job *job)
+{
+ int i;
+ struct dma_fence *fence = job->inference_done_fence;
+ struct drm_gem_object **bos = job->region_bo;
+ struct ethosu_validated_cmdstream_info *info = to_ethosu_bo(job->cmd_bo)->info;
+
+ for (i = 0; i < job->region_cnt; i++)
+ if (info->output_region[job->region_bo_num[i]])
+ dma_resv_add_fence(bos[i]->resv, fence, DMA_RESV_USAGE_WRITE);
+}
+
+static int ethosu_job_push(struct ethosu_job *job)
+{
+ struct ww_acquire_ctx acquire_ctx;
+ int ret;
+
+ ret = drm_gem_lock_reservations(job->region_bo, job->region_cnt, &acquire_ctx);
+ if (ret)
+ return ret;
+
+ ret = ethosu_acquire_object_fences(job);
+ if (ret)
+ goto out;
+
+ ret = pm_runtime_resume_and_get(job->dev->base.dev);
+ if (!ret) {
+ guard(mutex)(&job->dev->sched_lock);
+
+ drm_sched_job_arm(&job->base);
+ job->inference_done_fence = dma_fence_get(&job->base.s_fence->finished);
+ kref_get(&job->refcount); /* put by scheduler job completion */
+ drm_sched_entity_push_job(&job->base);
+ ethosu_attach_object_fences(job);
+ }
+
+out:
+ drm_gem_unlock_reservations(job->region_bo, job->region_cnt, &acquire_ctx);
+ return ret;
+}
+
+static void ethosu_job_cleanup(struct kref *ref)
+{
+ struct ethosu_job *job = container_of(ref, struct ethosu_job,
+ refcount);
+ unsigned int i;
+
+ pm_runtime_put_autosuspend(job->dev->base.dev);
+
+ dma_fence_put(job->done_fence);
+ dma_fence_put(job->inference_done_fence);
+
+ for (i = 0; i < job->region_cnt; i++)
+ drm_gem_object_put(job->region_bo[i]);
+
+ drm_gem_object_put(job->cmd_bo);
+
+ kfree(job);
+}
+
+static void ethosu_job_put(struct ethosu_job *job)
+{
+ kref_put(&job->refcount, ethosu_job_cleanup);
+}
+
+static void ethosu_job_free(struct drm_sched_job *sched_job)
+{
+ struct ethosu_job *job = to_ethosu_job(sched_job);
+
+ drm_sched_job_cleanup(sched_job);
+ ethosu_job_put(job);
+}
+
+static struct dma_fence *ethosu_job_run(struct drm_sched_job *sched_job)
+{
+ struct ethosu_job *job = to_ethosu_job(sched_job);
+ struct ethosu_device *dev = job->dev;
+ struct dma_fence *fence = job->done_fence;
+
+ if (unlikely(job->base.s_fence->finished.error))
+ return NULL;
+
+ dma_fence_init(fence, &ethosu_fence_ops, &dev->fence_lock,
+ dev->fence_context, ++dev->emit_seqno);
+ dma_fence_get(fence);
+
+ scoped_guard(mutex, &dev->job_lock) {
+ dev->in_flight_job = job;
+ ethosu_job_hw_submit(dev, job);
+ }
+
+ return fence;
+}
+
+static void ethosu_job_handle_irq(struct ethosu_device *dev)
+{
+ u32 status = readl_relaxed(dev->regs + NPU_REG_STATUS);
+
+ if (status & (STATUS_BUS_STATUS | STATUS_CMD_PARSE_ERR)) {
+ dev_err(dev->base.dev, "Error IRQ - %x\n", status);
+ drm_sched_fault(&dev->sched);
+ return;
+ }
+
+ scoped_guard(mutex, &dev->job_lock) {
+ if (dev->in_flight_job) {
+ dma_fence_signal(dev->in_flight_job->done_fence);
+ dev->in_flight_job = NULL;
+ }
+ }
+}
+
+static irqreturn_t ethosu_job_irq_handler_thread(int irq, void *data)
+{
+ struct ethosu_device *dev = data;
+
+ ethosu_job_handle_irq(dev);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ethosu_job_irq_handler(int irq, void *data)
+{
+ struct ethosu_device *dev = data;
+ u32 status = readl_relaxed(dev->regs + NPU_REG_STATUS);
+
+ if (!(status & STATUS_IRQ_RAISED))
+ return IRQ_NONE;
+
+ writel_relaxed(CMD_CLEAR_IRQ, dev->regs + NPU_REG_CMD);
+ return IRQ_WAKE_THREAD;
+}
+
+static enum drm_gpu_sched_stat ethosu_job_timedout(struct drm_sched_job *bad)
+{
+ struct ethosu_job *job = to_ethosu_job(bad);
+ struct ethosu_device *dev = job->dev;
+ bool running;
+ u32 *bocmds = to_drm_gem_dma_obj(job->cmd_bo)->vaddr;
+ u32 cmdaddr;
+
+ cmdaddr = readl_relaxed(dev->regs + NPU_REG_QREAD);
+ running = FIELD_GET(STATUS_STATE_RUNNING, readl_relaxed(dev->regs + NPU_REG_STATUS));
+
+ if (running) {
+ int ret;
+ u32 reg;
+
+ ret = readl_relaxed_poll_timeout(dev->regs + NPU_REG_QREAD,
+ reg,
+ reg != cmdaddr,
+ USEC_PER_MSEC, 100 * USEC_PER_MSEC);
+
+ /* If still running and progress is being made, just return */
+ if (!ret)
+ return DRM_GPU_SCHED_STAT_NO_HANG;
+ }
+
+ dev_err(dev->base.dev, "NPU sched timed out: NPU %s, cmdstream offset 0x%x: 0x%x\n",
+ running ? "running" : "stopped",
+ cmdaddr, bocmds[cmdaddr / 4]);
+
+ drm_sched_stop(&dev->sched, bad);
+
+ scoped_guard(mutex, &dev->job_lock)
+ dev->in_flight_job = NULL;
+
+ /* Proceed with reset now. */
+ pm_runtime_force_suspend(dev->base.dev);
+ pm_runtime_force_resume(dev->base.dev);
+
+ /* Restart the scheduler */
+ drm_sched_start(&dev->sched, 0);
+
+ return DRM_GPU_SCHED_STAT_RESET;
+}
+
+static const struct drm_sched_backend_ops ethosu_sched_ops = {
+ .run_job = ethosu_job_run,
+ .timedout_job = ethosu_job_timedout,
+ .free_job = ethosu_job_free
+};
+
+int ethosu_job_init(struct ethosu_device *edev)
+{
+ struct device *dev = edev->base.dev;
+ struct drm_sched_init_args args = {
+ .ops = &ethosu_sched_ops,
+ .num_rqs = DRM_SCHED_PRIORITY_COUNT,
+ .credit_limit = 1,
+ .timeout = msecs_to_jiffies(JOB_TIMEOUT_MS),
+ .name = dev_name(dev),
+ .dev = dev,
+ };
+ int ret;
+
+ spin_lock_init(&edev->fence_lock);
+ ret = devm_mutex_init(dev, &edev->job_lock);
+ if (ret)
+ return ret;
+ ret = devm_mutex_init(dev, &edev->sched_lock);
+ if (ret)
+ return ret;
+
+ edev->irq = platform_get_irq(to_platform_device(dev), 0);
+ if (edev->irq < 0)
+ return edev->irq;
+
+ ret = devm_request_threaded_irq(dev, edev->irq,
+ ethosu_job_irq_handler,
+ ethosu_job_irq_handler_thread,
+ IRQF_SHARED, KBUILD_MODNAME,
+ edev);
+ if (ret) {
+ dev_err(dev, "failed to request irq\n");
+ return ret;
+ }
+
+ edev->fence_context = dma_fence_context_alloc(1);
+
+ ret = drm_sched_init(&edev->sched, &args);
+ if (ret) {
+ dev_err(dev, "Failed to create scheduler: %d\n", ret);
+ goto err_sched;
+ }
+
+ return 0;
+
+err_sched:
+ drm_sched_fini(&edev->sched);
+ return ret;
+}
+
+void ethosu_job_fini(struct ethosu_device *dev)
+{
+ drm_sched_fini(&dev->sched);
+}
+
+int ethosu_job_open(struct ethosu_file_priv *ethosu_priv)
+{
+ struct ethosu_device *dev = ethosu_priv->edev;
+ struct drm_gpu_scheduler *sched = &dev->sched;
+ int ret;
+
+ ret = drm_sched_entity_init(&ethosu_priv->sched_entity,
+ DRM_SCHED_PRIORITY_NORMAL,
+ &sched, 1, NULL);
+ return WARN_ON(ret);
+}
+
+void ethosu_job_close(struct ethosu_file_priv *ethosu_priv)
+{
+ struct drm_sched_entity *entity = &ethosu_priv->sched_entity;
+
+ drm_sched_entity_destroy(entity);
+}
+
+static int ethosu_ioctl_submit_job(struct drm_device *dev, struct drm_file *file,
+ struct drm_ethosu_job *job)
+{
+ struct ethosu_device *edev = to_ethosu_device(dev);
+ struct ethosu_file_priv *file_priv = file->driver_priv;
+ struct ethosu_job *ejob = NULL;
+ struct ethosu_validated_cmdstream_info *cmd_info;
+ int ret = 0;
+
+ /* BO region 2 is reserved if SRAM is used */
+ if (job->region_bo_handles[ETHOSU_SRAM_REGION] && job->sram_size)
+ return -EINVAL;
+
+ if (edev->npu_info.sram_size < job->sram_size)
+ return -EINVAL;
+
+ ejob = kzalloc(sizeof(*ejob), GFP_KERNEL);
+ if (!ejob)
+ return -ENOMEM;
+
+ kref_init(&ejob->refcount);
+
+ ejob->dev = edev;
+ ejob->sram_size = job->sram_size;
+
+ ejob->done_fence = kzalloc(sizeof(*ejob->done_fence), GFP_KERNEL);
+ if (!ejob->done_fence) {
+ ret = -ENOMEM;
+ goto out_cleanup_job;
+ }
+
+ ret = drm_sched_job_init(&ejob->base,
+ &file_priv->sched_entity,
+ 1, NULL, file->client_id);
+ if (ret)
+ goto out_put_job;
+
+ ejob->cmd_bo = drm_gem_object_lookup(file, job->cmd_bo);
+ if (!ejob->cmd_bo) {
+ ret = -ENOENT;
+ goto out_cleanup_job;
+ }
+ cmd_info = to_ethosu_bo(ejob->cmd_bo)->info;
+ if (!cmd_info) {
+ ret = -EINVAL;
+ goto out_cleanup_job;
+ }
+
+ for (int i = 0; i < NPU_BASEP_REGION_MAX; i++) {
+ struct drm_gem_object *gem;
+
+ /* Can only omit a BO handle if the region is not used or used for SRAM */
+ if (!job->region_bo_handles[i] &&
+ (!cmd_info->region_size[i] || (i == ETHOSU_SRAM_REGION && job->sram_size)))
+ continue;
+
+ if (job->region_bo_handles[i] && !cmd_info->region_size[i]) {
+ dev_err(dev->dev,
+ "Cmdstream BO handle %d set for unused region %d\n",
+ job->region_bo_handles[i], i);
+ ret = -EINVAL;
+ goto out_cleanup_job;
+ }
+
+ gem = drm_gem_object_lookup(file, job->region_bo_handles[i]);
+ if (!gem) {
+ dev_err(dev->dev,
+ "Invalid BO handle %d for region %d\n",
+ job->region_bo_handles[i], i);
+ ret = -ENOENT;
+ goto out_cleanup_job;
+ }
+
+ ejob->region_bo[ejob->region_cnt] = gem;
+ ejob->region_bo_num[ejob->region_cnt] = i;
+ ejob->region_cnt++;
+
+ if (to_ethosu_bo(gem)->info) {
+ dev_err(dev->dev,
+ "Cmdstream BO handle %d used for region %d\n",
+ job->region_bo_handles[i], i);
+ ret = -EINVAL;
+ goto out_cleanup_job;
+ }
+
+ /* Verify the command stream doesn't have accesses outside the BO */
+ if (cmd_info->region_size[i] > gem->size) {
+ dev_err(dev->dev,
+ "cmd stream region %d size greater than BO size (%llu > %zu)\n",
+ i, cmd_info->region_size[i], gem->size);
+ ret = -EOVERFLOW;
+ goto out_cleanup_job;
+ }
+ }
+ ret = ethosu_job_push(ejob);
+
+out_cleanup_job:
+ if (ret)
+ drm_sched_job_cleanup(&ejob->base);
+out_put_job:
+ ethosu_job_put(ejob);
+
+ return ret;
+}
+
+int ethosu_ioctl_submit(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct drm_ethosu_submit *args = data;
+ int ret = 0;
+ unsigned int i = 0;
+
+ if (args->pad) {
+ drm_dbg(dev, "Reserved field in drm_ethosu_submit struct should be 0.\n");
+ return -EINVAL;
+ }
+
+ struct drm_ethosu_job __free(kvfree) *jobs =
+ kvmalloc_array(args->job_count, sizeof(*jobs), GFP_KERNEL);
+ if (!jobs)
+ return -ENOMEM;
+
+ if (copy_from_user(jobs,
+ (void __user *)(uintptr_t)args->jobs,
+ args->job_count * sizeof(*jobs))) {
+ drm_dbg(dev, "Failed to copy incoming job array\n");
+ return -EFAULT;
+ }
+
+ for (i = 0; i < args->job_count; i++) {
+ ret = ethosu_ioctl_submit_job(dev, file, &jobs[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/accel/ethosu/ethosu_job.h b/drivers/accel/ethosu/ethosu_job.h
new file mode 100644
index 000000000000..ff1cf448d094
--- /dev/null
+++ b/drivers/accel/ethosu/ethosu_job.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */
+/* Copyright 2025 Arm, Ltd. */
+
+#ifndef __ETHOSU_JOB_H__
+#define __ETHOSU_JOB_H__
+
+#include <linux/kref.h>
+#include <drm/gpu_scheduler.h>
+
+struct ethosu_device;
+struct ethosu_file_priv;
+
+struct ethosu_job {
+ struct drm_sched_job base;
+ struct ethosu_device *dev;
+
+ struct drm_gem_object *cmd_bo;
+ struct drm_gem_object *region_bo[NPU_BASEP_REGION_MAX];
+ u8 region_bo_num[NPU_BASEP_REGION_MAX];
+ u8 region_cnt;
+ u32 sram_size;
+
+ /* Fence to be signaled by drm-sched once its done with the job */
+ struct dma_fence *inference_done_fence;
+
+ /* Fence to be signaled by IRQ handler when the job is complete. */
+ struct dma_fence *done_fence;
+
+ struct kref refcount;
+};
+
+int ethosu_ioctl_submit(struct drm_device *dev, void *data, struct drm_file *file);
+
+int ethosu_job_init(struct ethosu_device *dev);
+void ethosu_job_fini(struct ethosu_device *dev);
+int ethosu_job_open(struct ethosu_file_priv *ethosu_priv);
+void ethosu_job_close(struct ethosu_file_priv *ethosu_priv);
+
+#endif
diff --git a/drivers/accel/habanalabs/Kconfig b/drivers/accel/habanalabs/Kconfig
index 1919fbb169c7..6d1506acbd72 100644
--- a/drivers/accel/habanalabs/Kconfig
+++ b/drivers/accel/habanalabs/Kconfig
@@ -27,3 +27,26 @@ config DRM_ACCEL_HABANALABS
To compile this driver as a module, choose M here: the
module will be called habanalabs.
+
+if DRM_ACCEL_HABANALABS
+
+config HL_HLDIO
+ bool "Habanalabs NVMe Direct I/O (HLDIO)"
+ depends on PCI_P2PDMA
+ depends on BLOCK
+ help
+ Enable NVMe peer-to-peer direct I/O support for Habanalabs AI
+ accelerators.
+
+ This allows direct data transfers between NVMe storage devices
+ and Habanalabs accelerators without involving system memory,
+ using PCI peer-to-peer DMA capabilities.
+
+ Requirements:
+ - CONFIG_PCI_P2PDMA=y
+ - NVMe device and Habanalabs accelerator under same PCI root complex
+ - IOMMU disabled or in passthrough mode
+ - Hardware supporting PCI P2P DMA
+
+ If unsure, say N
+endif # DRM_ACCEL_HABANALABS
diff --git a/drivers/accel/habanalabs/common/Makefile b/drivers/accel/habanalabs/common/Makefile
index e6abffea9f87..b6d00de09db5 100644
--- a/drivers/accel/habanalabs/common/Makefile
+++ b/drivers/accel/habanalabs/common/Makefile
@@ -13,3 +13,8 @@ HL_COMMON_FILES := common/habanalabs_drv.o common/device.o common/context.o \
common/command_submission.o common/firmware_if.o \
common/security.o common/state_dump.o \
common/memory_mgr.o common/decoder.o
+
+# Conditionally add HLDIO support
+ifdef CONFIG_HL_HLDIO
+HL_COMMON_FILES += common/hldio.o
+endif \ No newline at end of file
diff --git a/drivers/accel/habanalabs/common/debugfs.c b/drivers/accel/habanalabs/common/debugfs.c
index 4b391807e5f2..5f0820b19ccb 100644
--- a/drivers/accel/habanalabs/common/debugfs.c
+++ b/drivers/accel/habanalabs/common/debugfs.c
@@ -6,6 +6,7 @@
*/
#include "habanalabs.h"
+#include "hldio.h"
#include "../include/hw_ip/mmu/mmu_general.h"
#include <linux/pci.h>
@@ -602,6 +603,198 @@ static int engines_show(struct seq_file *s, void *data)
return 0;
}
+#ifdef CONFIG_HL_HLDIO
+/* DIO debugfs functions following the standard pattern */
+static int dio_ssd2hl_show(struct seq_file *s, void *data)
+{
+ struct hl_debugfs_entry *entry = s->private;
+ struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
+ struct hl_device *hdev = dev_entry->hdev;
+
+ if (!hdev->asic_prop.supports_nvme) {
+ seq_puts(s, "NVMe Direct I/O not supported\\n");
+ return 0;
+ }
+
+ seq_puts(s, "Usage: echo \"fd=N va=0xADDR off=N len=N\" > dio_ssd2hl\n");
+ seq_printf(s, "Last transfer: %zu bytes\\n", dev_entry->dio_stats.last_len_read);
+ seq_puts(s, "Note: All parameters must be page-aligned (4KB)\\n");
+
+ return 0;
+}
+
+static ssize_t dio_ssd2hl_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ struct seq_file *s = file->private_data;
+ struct hl_debugfs_entry *entry = s->private;
+ struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
+ struct hl_device *hdev = dev_entry->hdev;
+ struct hl_ctx *ctx = hdev->kernel_ctx;
+ char kbuf[128];
+ u64 device_va = 0, off_bytes = 0, len_bytes = 0;
+ u32 fd = 0;
+ size_t len_read = 0;
+ int rc, parsed;
+
+ if (!hdev->asic_prop.supports_nvme)
+ return -EOPNOTSUPP;
+
+ if (count >= sizeof(kbuf))
+ return -EINVAL;
+
+ if (copy_from_user(kbuf, buf, count))
+ return -EFAULT;
+
+ kbuf[count] = 0;
+
+ /* Parse: fd=N va=0xADDR off=N len=N */
+ parsed = sscanf(kbuf, "fd=%u va=0x%llx off=%llu len=%llu",
+ &fd, &device_va, &off_bytes, &len_bytes);
+ if (parsed != 4) {
+ dev_err(hdev->dev, "Invalid format. Expected: fd=N va=0xADDR off=N len=N\\n");
+ return -EINVAL;
+ }
+
+ /* Validate file descriptor */
+ if (fd == 0) {
+ dev_err(hdev->dev, "Invalid file descriptor: %u\\n", fd);
+ return -EINVAL;
+ }
+
+ /* Validate alignment requirements */
+ if (!IS_ALIGNED(device_va, PAGE_SIZE) ||
+ !IS_ALIGNED(off_bytes, PAGE_SIZE) ||
+ !IS_ALIGNED(len_bytes, PAGE_SIZE)) {
+ dev_err(hdev->dev,
+ "All parameters must be page-aligned (4KB)\\n");
+ return -EINVAL;
+ }
+
+ /* Validate transfer size */
+ if (len_bytes == 0 || len_bytes > SZ_1G) {
+ dev_err(hdev->dev, "Invalid length: %llu (max 1GB)\\n",
+ len_bytes);
+ return -EINVAL;
+ }
+
+ dev_dbg(hdev->dev, "DIO SSD2HL: fd=%u va=0x%llx off=%llu len=%llu\\n",
+ fd, device_va, off_bytes, len_bytes);
+
+ rc = hl_dio_ssd2hl(hdev, ctx, fd, device_va, off_bytes, len_bytes, &len_read);
+ if (rc < 0) {
+ dev_entry->dio_stats.failed_ops++;
+ dev_err(hdev->dev, "SSD2HL operation failed: %d\\n", rc);
+ return rc;
+ }
+
+ /* Update statistics */
+ dev_entry->dio_stats.total_ops++;
+ dev_entry->dio_stats.successful_ops++;
+ dev_entry->dio_stats.bytes_transferred += len_read;
+ dev_entry->dio_stats.last_len_read = len_read;
+
+ dev_dbg(hdev->dev, "DIO SSD2HL completed: %zu bytes transferred\\n", len_read);
+
+ return count;
+}
+
+static int dio_hl2ssd_show(struct seq_file *s, void *data)
+{
+ seq_puts(s, "HL2SSD (device-to-SSD) transfers not implemented\\n");
+ return 0;
+}
+
+static ssize_t dio_hl2ssd_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ struct seq_file *s = file->private_data;
+ struct hl_debugfs_entry *entry = s->private;
+ struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
+ struct hl_device *hdev = dev_entry->hdev;
+
+ if (!hdev->asic_prop.supports_nvme)
+ return -EOPNOTSUPP;
+
+ dev_dbg(hdev->dev, "HL2SSD operation not implemented\\n");
+ return -EOPNOTSUPP;
+}
+
+static int dio_stats_show(struct seq_file *s, void *data)
+{
+ struct hl_debugfs_entry *entry = s->private;
+ struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
+ struct hl_device *hdev = dev_entry->hdev;
+ struct hl_dio_stats *stats = &dev_entry->dio_stats;
+ u64 avg_bytes_per_op = 0, success_rate = 0;
+
+ if (!hdev->asic_prop.supports_nvme) {
+ seq_puts(s, "NVMe Direct I/O not supported\\n");
+ return 0;
+ }
+
+ if (stats->successful_ops > 0)
+ avg_bytes_per_op = stats->bytes_transferred / stats->successful_ops;
+
+ if (stats->total_ops > 0)
+ success_rate = (stats->successful_ops * 100) / stats->total_ops;
+
+ seq_puts(s, "=== Habanalabs Direct I/O Statistics ===\\n");
+ seq_printf(s, "Total operations: %llu\\n", stats->total_ops);
+ seq_printf(s, "Successful ops: %llu\\n", stats->successful_ops);
+ seq_printf(s, "Failed ops: %llu\\n", stats->failed_ops);
+ seq_printf(s, "Success rate: %llu%%\\n", success_rate);
+ seq_printf(s, "Total bytes: %llu\\n", stats->bytes_transferred);
+ seq_printf(s, "Avg bytes per op: %llu\\n", avg_bytes_per_op);
+ seq_printf(s, "Last transfer: %zu bytes\\n", stats->last_len_read);
+
+ return 0;
+}
+
+static int dio_reset_show(struct seq_file *s, void *data)
+{
+ seq_puts(s, "Write '1' to reset DIO statistics\\n");
+ return 0;
+}
+
+static ssize_t dio_reset_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ struct seq_file *s = file->private_data;
+ struct hl_debugfs_entry *entry = s->private;
+ struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
+ struct hl_device *hdev = dev_entry->hdev;
+ char kbuf[8];
+ unsigned long val;
+ int rc;
+
+ if (!hdev->asic_prop.supports_nvme)
+ return -EOPNOTSUPP;
+
+ if (count >= sizeof(kbuf))
+ return -EINVAL;
+
+ if (copy_from_user(kbuf, buf, count))
+ return -EFAULT;
+
+ kbuf[count] = 0;
+
+ rc = kstrtoul(kbuf, 0, &val);
+ if (rc)
+ return rc;
+
+ if (val == 1) {
+ memset(&dev_entry->dio_stats, 0, sizeof(dev_entry->dio_stats));
+ dev_dbg(hdev->dev, "DIO statistics reset\\n");
+ } else {
+ dev_err(hdev->dev, "Write '1' to reset statistics\\n");
+ return -EINVAL;
+ }
+
+ return count;
+}
+#endif
+
static ssize_t hl_memory_scrub(struct file *f, const char __user *buf,
size_t count, loff_t *ppos)
{
@@ -788,6 +981,113 @@ static void hl_access_host_mem(struct hl_device *hdev, u64 addr, u64 *val,
}
}
+static void dump_cfg_access_entry(struct hl_device *hdev,
+ struct hl_debugfs_cfg_access_entry *entry)
+{
+ char *access_type = "";
+ struct tm tm;
+
+ switch (entry->debugfs_type) {
+ case DEBUGFS_READ32:
+ access_type = "READ32 from";
+ break;
+ case DEBUGFS_WRITE32:
+ access_type = "WRITE32 to";
+ break;
+ case DEBUGFS_READ64:
+ access_type = "READ64 from";
+ break;
+ case DEBUGFS_WRITE64:
+ access_type = "WRITE64 to";
+ break;
+ default:
+ dev_err(hdev->dev, "Invalid DEBUGFS access type (%u)\n", entry->debugfs_type);
+ return;
+ }
+
+ time64_to_tm(entry->seconds_since_epoch, 0, &tm);
+ dev_info(hdev->dev,
+ "%ld-%02d-%02d %02d:%02d:%02d (UTC): %s %#llx\n", tm.tm_year + 1900, tm.tm_mon + 1,
+ tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec, access_type, entry->addr);
+}
+
+void hl_debugfs_cfg_access_history_dump(struct hl_device *hdev)
+{
+ struct hl_debugfs_cfg_access *dbgfs = &hdev->debugfs_cfg_accesses;
+ u32 i, head, count = 0;
+ time64_t entry_time, now;
+ unsigned long flags;
+
+ now = ktime_get_real_seconds();
+
+ spin_lock_irqsave(&dbgfs->lock, flags);
+ head = dbgfs->head;
+ if (head == 0)
+ i = HL_DBGFS_CFG_ACCESS_HIST_LEN - 1;
+ else
+ i = head - 1;
+
+ /* Walk back until timeout or invalid entry */
+ while (dbgfs->cfg_access_list[i].valid) {
+ entry_time = dbgfs->cfg_access_list[i].seconds_since_epoch;
+ /* Stop when entry is older than timeout */
+ if (now - entry_time > HL_DBGFS_CFG_ACCESS_HIST_TIMEOUT_SEC)
+ break;
+
+ /* print single entry under lock */
+ {
+ struct hl_debugfs_cfg_access_entry entry = dbgfs->cfg_access_list[i];
+ /*
+ * We copy the entry out under lock and then print after
+ * releasing the lock to minimize time under lock.
+ */
+ spin_unlock_irqrestore(&dbgfs->lock, flags);
+ dump_cfg_access_entry(hdev, &entry);
+ spin_lock_irqsave(&dbgfs->lock, flags);
+ }
+
+ /* mark consumed */
+ dbgfs->cfg_access_list[i].valid = false;
+
+ if (i == 0)
+ i = HL_DBGFS_CFG_ACCESS_HIST_LEN - 1;
+ else
+ i--;
+ count++;
+ if (count >= HL_DBGFS_CFG_ACCESS_HIST_LEN)
+ break;
+ }
+ spin_unlock_irqrestore(&dbgfs->lock, flags);
+}
+
+static void check_if_cfg_access_and_log(struct hl_device *hdev, u64 addr, size_t access_size,
+ enum debugfs_access_type access_type)
+{
+ struct hl_debugfs_cfg_access *dbgfs_cfg_accesses = &hdev->debugfs_cfg_accesses;
+ struct pci_mem_region *mem_reg = &hdev->pci_mem_region[PCI_REGION_CFG];
+ struct hl_debugfs_cfg_access_entry *new_entry;
+ unsigned long flags;
+
+ /* Check if address is in config memory */
+ if (addr >= mem_reg->region_base &&
+ mem_reg->region_size >= access_size &&
+ addr <= mem_reg->region_base + mem_reg->region_size - access_size) {
+
+ spin_lock_irqsave(&dbgfs_cfg_accesses->lock, flags);
+
+ new_entry = &dbgfs_cfg_accesses->cfg_access_list[dbgfs_cfg_accesses->head];
+ new_entry->seconds_since_epoch = ktime_get_real_seconds();
+ new_entry->addr = addr;
+ new_entry->debugfs_type = access_type;
+ new_entry->valid = true;
+ dbgfs_cfg_accesses->head = (dbgfs_cfg_accesses->head + 1)
+ % HL_DBGFS_CFG_ACCESS_HIST_LEN;
+
+ spin_unlock_irqrestore(&dbgfs_cfg_accesses->lock, flags);
+
+ }
+}
+
static int hl_access_mem(struct hl_device *hdev, u64 addr, u64 *val,
enum debugfs_access_type acc_type)
{
@@ -805,6 +1105,7 @@ static int hl_access_mem(struct hl_device *hdev, u64 addr, u64 *val,
return rc;
}
+ check_if_cfg_access_and_log(hdev, addr, acc_size, acc_type);
rc = hl_access_dev_mem_by_region(hdev, addr, val, acc_type, &found);
if (rc) {
dev_err(hdev->dev,
@@ -1525,6 +1826,13 @@ static const struct hl_info_list hl_debugfs_list[] = {
{"mmu", mmu_show, mmu_asid_va_write},
{"mmu_error", mmu_ack_error, mmu_ack_error_value_write},
{"engines", engines_show, NULL},
+#ifdef CONFIG_HL_HLDIO
+ /* DIO entries - only created if NVMe is supported */
+ {"dio_ssd2hl", dio_ssd2hl_show, dio_ssd2hl_write},
+ {"dio_stats", dio_stats_show, NULL},
+ {"dio_reset", dio_reset_show, dio_reset_write},
+ {"dio_hl2ssd", dio_hl2ssd_show, dio_hl2ssd_write},
+#endif
};
static int hl_debugfs_open(struct inode *inode, struct file *file)
@@ -1723,6 +2031,11 @@ static void add_files_to_device(struct hl_device *hdev, struct hl_dbg_device_ent
&hdev->asic_prop.server_type);
for (i = 0, entry = dev_entry->entry_arr ; i < count ; i++, entry++) {
+ /* Skip DIO entries if NVMe is not supported */
+ if (strncmp(hl_debugfs_list[i].name, "dio_", 4) == 0 &&
+ !hdev->asic_prop.supports_nvme)
+ continue;
+
debugfs_create_file(hl_debugfs_list[i].name,
0644,
root,
@@ -1762,6 +2075,14 @@ int hl_debugfs_device_init(struct hl_device *hdev)
spin_lock_init(&dev_entry->userptr_spinlock);
mutex_init(&dev_entry->ctx_mem_hash_mutex);
+ spin_lock_init(&hdev->debugfs_cfg_accesses.lock);
+ hdev->debugfs_cfg_accesses.head = 0; /* already zero by alloc but explicit init is fine */
+
+#ifdef CONFIG_HL_HLDIO
+ /* Initialize DIO statistics */
+ memset(&dev_entry->dio_stats, 0, sizeof(dev_entry->dio_stats));
+#endif
+
return 0;
}
@@ -1780,6 +2101,7 @@ void hl_debugfs_device_fini(struct hl_device *hdev)
vfree(entry->state_dump[i]);
kfree(entry->entry_arr);
+
}
void hl_debugfs_add_device(struct hl_device *hdev)
@@ -1792,6 +2114,7 @@ void hl_debugfs_add_device(struct hl_device *hdev)
if (!hdev->asic_prop.fw_security_enabled)
add_secured_nodes(dev_entry, dev_entry->root);
+
}
void hl_debugfs_add_file(struct hl_fpriv *hpriv)
@@ -1924,3 +2247,4 @@ void hl_debugfs_set_state_dump(struct hl_device *hdev, char *data,
up_write(&dev_entry->state_dump_sem);
}
+
diff --git a/drivers/accel/habanalabs/common/device.c b/drivers/accel/habanalabs/common/device.c
index 68eebed3b050..999c92d7036e 100644
--- a/drivers/accel/habanalabs/common/device.c
+++ b/drivers/accel/habanalabs/common/device.c
@@ -1066,28 +1066,11 @@ static bool is_pci_link_healthy(struct hl_device *hdev)
return (device_id == hdev->pdev->device);
}
-static void stringify_time_of_last_heartbeat(struct hl_device *hdev, char *time_str, size_t size,
- bool is_pq_hb)
-{
- time64_t seconds = is_pq_hb ? hdev->heartbeat_debug_info.last_pq_heartbeat_ts
- : hdev->heartbeat_debug_info.last_eq_heartbeat_ts;
- struct tm tm;
-
- if (!seconds)
- return;
-
- time64_to_tm(seconds, 0, &tm);
-
- snprintf(time_str, size, "%ld-%02d-%02d %02d:%02d:%02d (UTC)",
- tm.tm_year + 1900, tm.tm_mon, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec);
-}
-
static bool hl_device_eq_heartbeat_received(struct hl_device *hdev)
{
struct eq_heartbeat_debug_info *heartbeat_debug_info = &hdev->heartbeat_debug_info;
u32 cpu_q_id = heartbeat_debug_info->cpu_queue_id, pq_pi_mask = (HL_QUEUE_LENGTH << 1) - 1;
struct asic_fixed_properties *prop = &hdev->asic_prop;
- char pq_time_str[64] = "N/A", eq_time_str[64] = "N/A";
if (!prop->cpucp_info.eq_health_check_supported)
return true;
@@ -1095,17 +1078,15 @@ static bool hl_device_eq_heartbeat_received(struct hl_device *hdev)
if (!hdev->eq_heartbeat_received) {
dev_err(hdev->dev, "EQ heartbeat event was not received!\n");
- stringify_time_of_last_heartbeat(hdev, pq_time_str, sizeof(pq_time_str), true);
- stringify_time_of_last_heartbeat(hdev, eq_time_str, sizeof(eq_time_str), false);
dev_err(hdev->dev,
- "EQ: {CI %u, HB counter %u, last HB time: %s}, PQ: {PI: %u, CI: %u (%u), last HB time: %s}\n",
+ "EQ: {CI %u, HB counter %u, last HB time: %ptTs}, PQ: {PI: %u, CI: %u (%u), last HB time: %ptTs}\n",
hdev->event_queue.ci,
heartbeat_debug_info->heartbeat_event_counter,
- eq_time_str,
+ &hdev->heartbeat_debug_info.last_eq_heartbeat_ts,
hdev->kernel_queues[cpu_q_id].pi,
atomic_read(&hdev->kernel_queues[cpu_q_id].ci),
atomic_read(&hdev->kernel_queues[cpu_q_id].ci) & pq_pi_mask,
- pq_time_str);
+ &hdev->heartbeat_debug_info.last_pq_heartbeat_ts);
hl_eq_dump(hdev, &hdev->event_queue);
@@ -1649,6 +1630,11 @@ int hl_device_reset(struct hl_device *hdev, u32 flags)
from_watchdog_thread = !!(flags & HL_DRV_RESET_FROM_WD_THR);
reset_upon_device_release = hdev->reset_upon_device_release && from_dev_release;
+ if (hdev->cpld_shutdown) {
+ dev_err(hdev->dev, "Cannot reset device, cpld is shutdown! Device is NOT usable\n");
+ return -EIO;
+ }
+
if (!hard_reset && (hl_device_status(hdev) == HL_DEVICE_STATUS_MALFUNCTION)) {
dev_dbg(hdev->dev, "soft-reset isn't supported on a malfunctioning device\n");
return 0;
@@ -2595,6 +2581,14 @@ void hl_device_fini(struct hl_device *hdev)
if (rc)
dev_err(hdev->dev, "hw_fini failed in device fini while removing device %d\n", rc);
+ /* Reset the H/W (if it accessible). It will be in idle state after this returns */
+ if (!hdev->cpld_shutdown) {
+ rc = hdev->asic_funcs->hw_fini(hdev, true, false);
+ if (rc)
+ dev_err(hdev->dev,
+ "hw_fini failed in device fini while removing device %d\n", rc);
+ }
+
hdev->fw_loader.fw_comp_loaded = FW_TYPE_NONE;
/* Release kernel context */
@@ -2962,3 +2956,13 @@ void hl_handle_clk_change_event(struct hl_device *hdev, u16 event_type, u64 *eve
mutex_unlock(&clk_throttle->lock);
}
+
+void hl_eq_cpld_shutdown_event_handle(struct hl_device *hdev, u16 event_id, u64 *event_mask)
+{
+ hl_handle_critical_hw_err(hdev, event_id, event_mask);
+ *event_mask |= HL_NOTIFIER_EVENT_DEVICE_UNAVAILABLE;
+
+ /* Avoid any new accesses to the H/W */
+ hdev->disabled = true;
+ hdev->cpld_shutdown = true;
+}
diff --git a/drivers/accel/habanalabs/common/habanalabs.h b/drivers/accel/habanalabs/common/habanalabs.h
index 6f27ce4fa01b..d94c2ba22a6a 100644
--- a/drivers/accel/habanalabs/common/habanalabs.h
+++ b/drivers/accel/habanalabs/common/habanalabs.h
@@ -90,7 +90,9 @@ struct hl_fpriv;
#define HL_COMMON_USER_CQ_INTERRUPT_ID 0xFFF
#define HL_COMMON_DEC_INTERRUPT_ID 0xFFE
-#define HL_STATE_DUMP_HIST_LEN 5
+#define HL_STATE_DUMP_HIST_LEN 5
+#define HL_DBGFS_CFG_ACCESS_HIST_LEN 20
+#define HL_DBGFS_CFG_ACCESS_HIST_TIMEOUT_SEC 2 /* 2s */
/* Default value for device reset trigger , an invalid value */
#define HL_RESET_TRIGGER_DEFAULT 0xFF
@@ -702,6 +704,7 @@ struct hl_hints_range {
* @supports_advanced_cpucp_rc: true if new cpucp opcodes are supported.
* @supports_engine_modes: true if changing engines/engine_cores modes is supported.
* @support_dynamic_resereved_fw_size: true if we support dynamic reserved size for fw.
+ * @supports_nvme: indicates whether the asic supports NVMe P2P DMA.
*/
struct asic_fixed_properties {
struct hw_queue_properties *hw_queues_props;
@@ -822,6 +825,7 @@ struct asic_fixed_properties {
u8 supports_advanced_cpucp_rc;
u8 supports_engine_modes;
u8 support_dynamic_resereved_fw_size;
+ u8 supports_nvme;
};
/**
@@ -2274,6 +2278,9 @@ struct hl_vm {
u8 init_done;
};
+#ifdef CONFIG_HL_HLDIO
+#include "hldio.h"
+#endif
/*
* DEBUG, PROFILING STRUCTURE
@@ -2344,7 +2351,6 @@ struct hl_fpriv {
struct mutex ctx_lock;
};
-
/*
* DebugFS
*/
@@ -2372,6 +2378,7 @@ struct hl_debugfs_entry {
struct hl_dbg_device_entry *dev_entry;
};
+
/**
* struct hl_dbg_device_entry - ASIC specific debugfs manager.
* @root: root dentry.
@@ -2403,6 +2410,7 @@ struct hl_debugfs_entry {
* @i2c_addr: generic u8 debugfs file for address value to use in i2c_data_read.
* @i2c_reg: generic u8 debugfs file for register value to use in i2c_data_read.
* @i2c_len: generic u8 debugfs file for length value to use in i2c_data_read.
+ * @dio_stats: Direct I/O statistics
*/
struct hl_dbg_device_entry {
struct dentry *root;
@@ -2434,6 +2442,35 @@ struct hl_dbg_device_entry {
u8 i2c_addr;
u8 i2c_reg;
u8 i2c_len;
+#ifdef CONFIG_HL_HLDIO
+ struct hl_dio_stats dio_stats;
+#endif
+};
+
+/**
+ * struct hl_debugfs_cfg_access_entry - single debugfs config access object, member of
+ * hl_debugfs_cfg_access.
+ * @seconds_since_epoch: seconds since January 1, 1970, used for time comparisons.
+ * @debugfs_type: the debugfs operation requested, can be READ32, WRITE32, READ64 or WRITE64.
+ * @addr: the requested address to access.
+ * @valid: if set, this entry has valid data for dumping at interrupt time.
+ */
+struct hl_debugfs_cfg_access_entry {
+ ktime_t seconds_since_epoch;
+ enum debugfs_access_type debugfs_type;
+ u64 addr;
+ bool valid;
+};
+
+/**
+ * struct hl_debugfs_cfg_access - saves debugfs config region access requests history.
+ * @cfg_access_list: list of objects describing config region access requests.
+ * @head: next valid index to add new entry to in cfg_access_list.
+ */
+struct hl_debugfs_cfg_access {
+ struct hl_debugfs_cfg_access_entry cfg_access_list[HL_DBGFS_CFG_ACCESS_HIST_LEN];
+ u32 head;
+ spinlock_t lock; /* protects head and entries */
};
/**
@@ -3281,6 +3318,7 @@ struct eq_heartbeat_debug_info {
* @hl_chip_info: ASIC's sensors information.
* @device_status_description: device status description.
* @hl_debugfs: device's debugfs manager.
+ * @debugfs_cfg_accesses: list of last debugfs config region accesses.
* @cb_pool: list of pre allocated CBs.
* @cb_pool_lock: protects the CB pool.
* @internal_cb_pool_virt_addr: internal command buffer pool virtual address.
@@ -3305,6 +3343,7 @@ struct eq_heartbeat_debug_info {
* @captured_err_info: holds information about errors.
* @reset_info: holds current device reset information.
* @heartbeat_debug_info: counters used to debug heartbeat failures.
+ * @hldio: describes habanalabs direct storage interaction interface.
* @irq_affinity_mask: mask of available CPU cores for user and decoder interrupt handling.
* @stream_master_qid_arr: pointer to array with QIDs of master streams.
* @fw_inner_major_ver: the major of current loaded preboot inner version.
@@ -3357,6 +3396,7 @@ struct eq_heartbeat_debug_info {
* addresses.
* @is_in_dram_scrub: true if dram scrub operation is on going.
* @disabled: is device disabled.
+ * @cpld_shutdown: is cpld shutdown.
* @late_init_done: is late init stage was done during initialization.
* @hwmon_initialized: is H/W monitor sensors was initialized.
* @reset_on_lockup: true if a reset should be done in case of stuck CS, false
@@ -3461,6 +3501,7 @@ struct hl_device {
struct hwmon_chip_info *hl_chip_info;
struct hl_dbg_device_entry hl_debugfs;
+ struct hl_debugfs_cfg_access debugfs_cfg_accesses;
struct list_head cb_pool;
spinlock_t cb_pool_lock;
@@ -3496,7 +3537,9 @@ struct hl_device {
struct hl_reset_info reset_info;
struct eq_heartbeat_debug_info heartbeat_debug_info;
-
+#ifdef CONFIG_HL_HLDIO
+ struct hl_dio hldio;
+#endif
cpumask_t irq_affinity_mask;
u32 *stream_master_qid_arr;
@@ -3532,6 +3575,7 @@ struct hl_device {
u16 cpu_pci_msb_addr;
u8 is_in_dram_scrub;
u8 disabled;
+ u8 cpld_shutdown;
u8 late_init_done;
u8 hwmon_initialized;
u8 reset_on_lockup;
@@ -4089,6 +4133,7 @@ void hl_init_cpu_for_irq(struct hl_device *hdev);
void hl_set_irq_affinity(struct hl_device *hdev, int irq);
void hl_eq_heartbeat_event_handle(struct hl_device *hdev);
void hl_handle_clk_change_event(struct hl_device *hdev, u16 event_type, u64 *event_mask);
+void hl_eq_cpld_shutdown_event_handle(struct hl_device *hdev, u16 event_id, u64 *event_mask);
#ifdef CONFIG_DEBUG_FS
@@ -4110,6 +4155,7 @@ void hl_debugfs_add_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx);
void hl_debugfs_remove_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx);
void hl_debugfs_set_state_dump(struct hl_device *hdev, char *data,
unsigned long length);
+void hl_debugfs_cfg_access_history_dump(struct hl_device *hdev);
#else
@@ -4185,6 +4231,10 @@ static inline void hl_debugfs_set_state_dump(struct hl_device *hdev,
{
}
+static inline void hl_debugfs_cfg_access_history_dump(struct hl_device *hdev)
+{
+}
+
#endif
/* Security */
diff --git a/drivers/accel/habanalabs/common/habanalabs_ioctl.c b/drivers/accel/habanalabs/common/habanalabs_ioctl.c
index dc80ca921d90..fdfdabc85e54 100644
--- a/drivers/accel/habanalabs/common/habanalabs_ioctl.c
+++ b/drivers/accel/habanalabs/common/habanalabs_ioctl.c
@@ -961,6 +961,12 @@ static int send_fw_generic_request(struct hl_device *hdev, struct hl_info_args *
case HL_PASSTHROUGH_VERSIONS:
need_input_buff = false;
break;
+ case HL_GET_ERR_COUNTERS_CMD:
+ need_input_buff = true;
+ break;
+ case HL_GET_P_STATE:
+ need_input_buff = false;
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/accel/habanalabs/common/hldio.c b/drivers/accel/habanalabs/common/hldio.c
new file mode 100644
index 000000000000..083ae5610875
--- /dev/null
+++ b/drivers/accel/habanalabs/common/hldio.c
@@ -0,0 +1,437 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2024 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+
+#include "habanalabs.h"
+#include "hldio.h"
+#include <generated/uapi/linux/version.h>
+#include <linux/pci-p2pdma.h>
+#include <linux/blkdev.h>
+#include <linux/vmalloc.h>
+
+/*
+ * NVMe Direct I/O implementation for habanalabs driver
+ *
+ * ASSUMPTIONS
+ * ===========
+ * 1. No IOMMU (well, technically it can work with IOMMU, but it is *almost useless).
+ * 2. Only READ operations (can extend in the future).
+ * 3. No sparse files (can overcome this in the future).
+ * 4. Kernel version >= 6.9
+ * 5. Requiring page alignment is OK (I don't see a solution to this one right,
+ * now, how do we read partial pages?)
+ * 6. Kernel compiled with CONFIG_PCI_P2PDMA. This requires a CUSTOM kernel.
+ * Theoretically I have a slight idea on how this could be solvable, but it
+ * is probably inacceptable for the upstream. Also may not work in the end.
+ * 7. Either make sure our cards and disks are under the same PCI bridge, or
+ * compile a custom kernel to hack around this.
+ */
+
+#define IO_STABILIZE_TIMEOUT 10000000 /* 10 seconds in microseconds */
+
+/*
+ * This struct contains all the useful data I could milk out of the file handle
+ * provided by the user.
+ * @TODO: right now it is retrieved on each IO, but can be done once with some
+ * dedicated IOCTL, call it for example HL_REGISTER_HANDLE.
+ */
+struct hl_dio_fd {
+ /* Back pointer in case we need it in async completion */
+ struct hl_ctx *ctx;
+ /* Associated fd struct */
+ struct file *filp;
+};
+
+/*
+ * This is a single IO descriptor
+ */
+struct hl_direct_io {
+ struct hl_dio_fd f;
+ struct kiocb kio;
+ struct bio_vec *bv;
+ struct iov_iter iter;
+ u64 device_va;
+ u64 off_bytes;
+ u64 len_bytes;
+ u32 type;
+};
+
+bool hl_device_supports_nvme(struct hl_device *hdev)
+{
+ return hdev->asic_prop.supports_nvme;
+}
+
+static int hl_dio_fd_register(struct hl_ctx *ctx, int fd, struct hl_dio_fd *f)
+{
+ struct hl_device *hdev = ctx->hdev;
+ struct block_device *bd;
+ struct super_block *sb;
+ struct inode *inode;
+ struct gendisk *gd;
+ struct device *disk_dev;
+ int rc;
+
+ f->filp = fget(fd);
+ if (!f->filp) {
+ rc = -ENOENT;
+ goto out;
+ }
+
+ if (!(f->filp->f_flags & O_DIRECT)) {
+ dev_err(hdev->dev, "file is not in the direct mode\n");
+ rc = -EINVAL;
+ goto fput;
+ }
+
+ if (!f->filp->f_op->read_iter) {
+ dev_err(hdev->dev, "read iter is not supported, need to fall back to legacy\n");
+ rc = -EINVAL;
+ goto fput;
+ }
+
+ inode = file_inode(f->filp);
+ sb = inode->i_sb;
+ bd = sb->s_bdev;
+ gd = bd->bd_disk;
+
+ if (inode->i_blocks << sb->s_blocksize_bits < i_size_read(inode)) {
+ dev_err(hdev->dev, "sparse files are not currently supported\n");
+ rc = -EINVAL;
+ goto fput;
+ }
+
+ if (!bd || !gd) {
+ dev_err(hdev->dev, "invalid block device\n");
+ rc = -ENODEV;
+ goto fput;
+ }
+ /* Get the underlying device from the block device */
+ disk_dev = disk_to_dev(gd);
+ if (!dma_pci_p2pdma_supported(disk_dev)) {
+ dev_err(hdev->dev, "device does not support PCI P2P DMA\n");
+ rc = -EOPNOTSUPP;
+ goto fput;
+ }
+
+ /*
+ * @TODO: Maybe we need additional checks here
+ */
+
+ f->ctx = ctx;
+ rc = 0;
+
+ goto out;
+fput:
+ fput(f->filp);
+out:
+ return rc;
+}
+
+static void hl_dio_fd_unregister(struct hl_dio_fd *f)
+{
+ fput(f->filp);
+}
+
+static long hl_dio_count_io(struct hl_device *hdev)
+{
+ s64 sum = 0;
+ int i;
+
+ for_each_possible_cpu(i)
+ sum += per_cpu(*hdev->hldio.inflight_ios, i);
+
+ return sum;
+}
+
+static bool hl_dio_get_iopath(struct hl_ctx *ctx)
+{
+ struct hl_device *hdev = ctx->hdev;
+
+ if (hdev->hldio.io_enabled) {
+ this_cpu_inc(*hdev->hldio.inflight_ios);
+
+ /* Avoid race conditions */
+ if (!hdev->hldio.io_enabled) {
+ this_cpu_dec(*hdev->hldio.inflight_ios);
+ return false;
+ }
+
+ hl_ctx_get(ctx);
+
+ return true;
+ }
+
+ return false;
+}
+
+static void hl_dio_put_iopath(struct hl_ctx *ctx)
+{
+ struct hl_device *hdev = ctx->hdev;
+
+ hl_ctx_put(ctx);
+ this_cpu_dec(*hdev->hldio.inflight_ios);
+}
+
+static void hl_dio_set_io_enabled(struct hl_device *hdev, bool enabled)
+{
+ hdev->hldio.io_enabled = enabled;
+}
+
+static bool hl_dio_validate_io(struct hl_device *hdev, struct hl_direct_io *io)
+{
+ if ((u64)io->device_va & ~PAGE_MASK) {
+ dev_dbg(hdev->dev, "device address must be 4K aligned\n");
+ return false;
+ }
+
+ if (io->len_bytes & ~PAGE_MASK) {
+ dev_dbg(hdev->dev, "IO length must be 4K aligned\n");
+ return false;
+ }
+
+ if (io->off_bytes & ~PAGE_MASK) {
+ dev_dbg(hdev->dev, "IO offset must be 4K aligned\n");
+ return false;
+ }
+
+ return true;
+}
+
+static struct page *hl_dio_va2page(struct hl_device *hdev, struct hl_ctx *ctx, u64 device_va)
+{
+ struct hl_dio *hldio = &hdev->hldio;
+ u64 device_pa;
+ int rc, i;
+
+ rc = hl_mmu_va_to_pa(ctx, device_va, &device_pa);
+ if (rc) {
+ dev_err(hdev->dev, "device virtual address translation error: %#llx (%d)",
+ device_va, rc);
+ return NULL;
+ }
+
+ for (i = 0 ; i < hldio->np2prs ; ++i) {
+ if (device_pa >= hldio->p2prs[i].device_pa &&
+ device_pa < hldio->p2prs[i].device_pa + hldio->p2prs[i].size)
+ return hldio->p2prs[i].p2ppages[(device_pa - hldio->p2prs[i].device_pa) >>
+ PAGE_SHIFT];
+ }
+
+ return NULL;
+}
+
+static ssize_t hl_direct_io(struct hl_device *hdev, struct hl_direct_io *io)
+{
+ u64 npages, device_va;
+ ssize_t rc;
+ int i;
+
+ if (!hl_dio_validate_io(hdev, io))
+ return -EINVAL;
+
+ if (!hl_dio_get_iopath(io->f.ctx)) {
+ dev_info(hdev->dev, "can't schedule a new IO, IO is disabled\n");
+ return -ESHUTDOWN;
+ }
+
+ init_sync_kiocb(&io->kio, io->f.filp);
+ io->kio.ki_pos = io->off_bytes;
+
+ npages = (io->len_bytes >> PAGE_SHIFT);
+
+ /* @TODO: this can be implemented smarter, vmalloc in iopath is not
+ * ideal. Maybe some variation of genpool. Number of pages may differ
+ * greatly, so maybe even use pools of different sizes and chose the
+ * closest one.
+ */
+ io->bv = vzalloc(npages * sizeof(struct bio_vec));
+ if (!io->bv)
+ return -ENOMEM;
+
+ for (i = 0, device_va = io->device_va; i < npages ; ++i, device_va += PAGE_SIZE) {
+ io->bv[i].bv_page = hl_dio_va2page(hdev, io->f.ctx, device_va);
+ if (!io->bv[i].bv_page) {
+ dev_err(hdev->dev, "error getting page struct for device va %#llx",
+ device_va);
+ rc = -EFAULT;
+ goto cleanup;
+ }
+ io->bv[i].bv_offset = 0;
+ io->bv[i].bv_len = PAGE_SIZE;
+ }
+
+ iov_iter_bvec(&io->iter, io->type, io->bv, 1, io->len_bytes);
+ if (io->f.filp->f_op && io->f.filp->f_op->read_iter)
+ rc = io->f.filp->f_op->read_iter(&io->kio, &io->iter);
+ else
+ rc = -EINVAL;
+
+cleanup:
+ vfree(io->bv);
+ hl_dio_put_iopath(io->f.ctx);
+
+ dev_dbg(hdev->dev, "IO ended with %ld\n", rc);
+
+ return rc;
+}
+
+/*
+ * @TODO: This function can be used as a callback for io completion under
+ * kio->ki_complete in order to implement async IO.
+ * Note that on more recent kernels there is no ret2.
+ */
+__maybe_unused static void hl_direct_io_complete(struct kiocb *kio, long ret, long ret2)
+{
+ struct hl_direct_io *io = container_of(kio, struct hl_direct_io, kio);
+
+ dev_dbg(io->f.ctx->hdev->dev, "IO completed with %ld\n", ret);
+
+ /* Do something to copy result to user / notify completion */
+
+ hl_dio_put_iopath(io->f.ctx);
+
+ hl_dio_fd_unregister(&io->f);
+}
+
+/*
+ * DMA disk to ASIC, wait for results. Must be invoked from the user context
+ */
+int hl_dio_ssd2hl(struct hl_device *hdev, struct hl_ctx *ctx, int fd,
+ u64 device_va, off_t off_bytes, size_t len_bytes,
+ size_t *len_read)
+{
+ struct hl_direct_io *io;
+ ssize_t rc;
+
+ dev_dbg(hdev->dev, "SSD2HL fd=%d va=%#llx len=%#lx\n", fd, device_va, len_bytes);
+
+ io = kzalloc(sizeof(*io), GFP_KERNEL);
+ if (!io) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ *io = (struct hl_direct_io){
+ .device_va = device_va,
+ .len_bytes = len_bytes,
+ .off_bytes = off_bytes,
+ .type = READ,
+ };
+
+ rc = hl_dio_fd_register(ctx, fd, &io->f);
+ if (rc)
+ goto kfree_io;
+
+ rc = hl_direct_io(hdev, io);
+ if (rc >= 0) {
+ *len_read = rc;
+ rc = 0;
+ }
+
+ /* This shall be called only in the case of a sync IO */
+ hl_dio_fd_unregister(&io->f);
+kfree_io:
+ kfree(io);
+out:
+ return rc;
+}
+
+static void hl_p2p_region_fini(struct hl_device *hdev, struct hl_p2p_region *p2pr)
+{
+ if (p2pr->p2ppages) {
+ vfree(p2pr->p2ppages);
+ p2pr->p2ppages = NULL;
+ }
+
+ if (p2pr->p2pmem) {
+ dev_dbg(hdev->dev, "freeing P2P mem from %p, size=%#llx\n",
+ p2pr->p2pmem, p2pr->size);
+ pci_free_p2pmem(hdev->pdev, p2pr->p2pmem, p2pr->size);
+ p2pr->p2pmem = NULL;
+ }
+}
+
+void hl_p2p_region_fini_all(struct hl_device *hdev)
+{
+ int i;
+
+ for (i = 0 ; i < hdev->hldio.np2prs ; ++i)
+ hl_p2p_region_fini(hdev, &hdev->hldio.p2prs[i]);
+
+ kvfree(hdev->hldio.p2prs);
+ hdev->hldio.p2prs = NULL;
+ hdev->hldio.np2prs = 0;
+}
+
+int hl_p2p_region_init(struct hl_device *hdev, struct hl_p2p_region *p2pr)
+{
+ void *addr;
+ int rc, i;
+
+ /* Start by publishing our p2p memory */
+ rc = pci_p2pdma_add_resource(hdev->pdev, p2pr->bar, p2pr->size, p2pr->bar_offset);
+ if (rc) {
+ dev_err(hdev->dev, "error adding p2p resource: %d\n", rc);
+ goto err;
+ }
+
+ /* Alloc all p2p mem */
+ p2pr->p2pmem = pci_alloc_p2pmem(hdev->pdev, p2pr->size);
+ if (!p2pr->p2pmem) {
+ dev_err(hdev->dev, "error allocating p2p memory\n");
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ p2pr->p2ppages = vmalloc((p2pr->size >> PAGE_SHIFT) * sizeof(struct page *));
+ if (!p2pr->p2ppages) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ for (i = 0, addr = p2pr->p2pmem ; i < (p2pr->size >> PAGE_SHIFT) ; ++i, addr += PAGE_SIZE) {
+ p2pr->p2ppages[i] = virt_to_page(addr);
+ if (!p2pr->p2ppages[i]) {
+ rc = -EFAULT;
+ goto err;
+ }
+ }
+
+ return 0;
+err:
+ hl_p2p_region_fini(hdev, p2pr);
+ return rc;
+}
+
+int hl_dio_start(struct hl_device *hdev)
+{
+ dev_dbg(hdev->dev, "initializing HLDIO\n");
+
+ /* Initialize the IO counter and enable IO */
+ hdev->hldio.inflight_ios = alloc_percpu(s64);
+ if (!hdev->hldio.inflight_ios)
+ return -ENOMEM;
+
+ hl_dio_set_io_enabled(hdev, true);
+
+ return 0;
+}
+
+void hl_dio_stop(struct hl_device *hdev)
+{
+ dev_dbg(hdev->dev, "deinitializing HLDIO\n");
+
+ if (hdev->hldio.io_enabled) {
+ /* Wait for all the IO to finish */
+ hl_dio_set_io_enabled(hdev, false);
+ hl_poll_timeout_condition(hdev, !hl_dio_count_io(hdev), 1000, IO_STABILIZE_TIMEOUT);
+ }
+
+ if (hdev->hldio.inflight_ios) {
+ free_percpu(hdev->hldio.inflight_ios);
+ hdev->hldio.inflight_ios = NULL;
+ }
+}
diff --git a/drivers/accel/habanalabs/common/hldio.h b/drivers/accel/habanalabs/common/hldio.h
new file mode 100644
index 000000000000..2874388f2851
--- /dev/null
+++ b/drivers/accel/habanalabs/common/hldio.h
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * hldio.h - NVMe Direct I/O (HLDIO) infrastructure for Habana Labs Driver
+ *
+ * This feature requires specific hardware setup and must not be built
+ * under COMPILE_TEST.
+ */
+
+#ifndef __HL_HLDIO_H__
+#define __HL_HLDIO_H__
+
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/seq_file.h>
+#include <linux/ktime.h> /* ktime functions */
+#include <linux/delay.h> /* usleep_range */
+#include <linux/kernel.h> /* might_sleep_if */
+#include <linux/errno.h> /* error codes */
+
+/* Forward declarations */
+struct hl_device;
+struct file;
+
+/* Enable only if Kconfig selected */
+#ifdef CONFIG_HL_HLDIO
+/**
+ * struct hl_p2p_region - describes a single P2P memory region
+ * @p2ppages: array of page structs for the P2P memory
+ * @p2pmem: virtual address of the P2P memory region
+ * @device_pa: physical address on the device
+ * @bar_offset: offset within the BAR
+ * @size: size of the region in bytes
+ * @bar: BAR number containing this region
+ */
+struct hl_p2p_region {
+ struct page **p2ppages;
+ void *p2pmem;
+ u64 device_pa;
+ u64 bar_offset;
+ u64 size;
+ int bar;
+};
+
+/**
+ * struct hl_dio_stats - Direct I/O statistics
+ * @total_ops: total number of operations attempted
+ * @successful_ops: number of successful operations
+ * @failed_ops: number of failed operations
+ * @bytes_transferred: total bytes successfully transferred
+ * @last_len_read: length of the last read operation
+ */
+struct hl_dio_stats {
+ u64 total_ops;
+ u64 successful_ops;
+ u64 failed_ops;
+ u64 bytes_transferred;
+ size_t last_len_read;
+};
+
+/**
+ * struct hl_dio - describes habanalabs direct storage interaction interface
+ * @p2prs: array of p2p regions
+ * @inflight_ios: percpu counter for inflight ios
+ * @np2prs: number of elements in p2prs
+ * @io_enabled: 1 if io is enabled 0 otherwise
+ */
+struct hl_dio {
+ struct hl_p2p_region *p2prs;
+ s64 __percpu *inflight_ios;
+ u8 np2prs;
+ u8 io_enabled;
+};
+
+int hl_dio_ssd2hl(struct hl_device *hdev, struct hl_ctx *ctx, int fd,
+ u64 device_va, off_t off_bytes, size_t len_bytes,
+ size_t *len_read);
+void hl_p2p_region_fini_all(struct hl_device *hdev);
+int hl_p2p_region_init(struct hl_device *hdev, struct hl_p2p_region *p2pr);
+int hl_dio_start(struct hl_device *hdev);
+void hl_dio_stop(struct hl_device *hdev);
+
+/* Init/teardown */
+int hl_hldio_init(struct hl_device *hdev);
+void hl_hldio_fini(struct hl_device *hdev);
+
+/* File operations */
+long hl_hldio_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
+
+/* DebugFS hooks */
+#ifdef CONFIG_DEBUG_FS
+void hl_hldio_debugfs_init(struct hl_device *hdev);
+void hl_hldio_debugfs_fini(struct hl_device *hdev);
+#else
+static inline void hl_hldio_debugfs_init(struct hl_device *hdev) { }
+static inline void hl_hldio_debugfs_fini(struct hl_device *hdev) { }
+#endif
+
+#else /* !CONFIG_HL_HLDIO */
+
+struct hl_p2p_region;
+/* Stubs when HLDIO is disabled */
+static inline int hl_dio_ssd2hl(struct hl_device *hdev, struct hl_ctx *ctx, int fd,
+ u64 device_va, off_t off_bytes, size_t len_bytes,
+ size_t *len_read)
+{ return -EOPNOTSUPP; }
+static inline void hl_p2p_region_fini_all(struct hl_device *hdev) {}
+static inline int hl_p2p_region_init(struct hl_device *hdev, struct hl_p2p_region *p2pr)
+{ return -EOPNOTSUPP; }
+static inline int hl_dio_start(struct hl_device *hdev) { return -EOPNOTSUPP; }
+static inline void hl_dio_stop(struct hl_device *hdev) {}
+
+static inline int hl_hldio_init(struct hl_device *hdev) { return 0; }
+static inline void hl_hldio_fini(struct hl_device *hdev) { }
+static inline long hl_hldio_ioctl(struct file *f, unsigned int c,
+ unsigned long a)
+{ return -ENOTTY; }
+static inline void hl_hldio_debugfs_init(struct hl_device *hdev) { }
+static inline void hl_hldio_debugfs_fini(struct hl_device *hdev) { }
+
+#endif /* CONFIG_HL_HLDIO */
+
+/* Simplified polling macro for HLDIO (no simulator support) */
+#define hl_poll_timeout_condition(hdev, cond, sleep_us, timeout_us) \
+({ \
+ ktime_t __timeout = ktime_add_us(ktime_get(), timeout_us); \
+ might_sleep_if(sleep_us); \
+ (void)(hdev); /* keep signature consistent, hdev unused */ \
+ for (;;) { \
+ mb(); /* ensure ordering of memory operations */ \
+ if (cond) \
+ break; \
+ if (timeout_us && ktime_compare(ktime_get(), __timeout) > 0) \
+ break; \
+ if (sleep_us) \
+ usleep_range((sleep_us >> 2) + 1, sleep_us); \
+ } \
+ (cond) ? 0 : -ETIMEDOUT; \
+})
+
+#ifdef CONFIG_HL_HLDIO
+bool hl_device_supports_nvme(struct hl_device *hdev);
+#else
+static inline bool hl_device_supports_nvme(struct hl_device *hdev) { return false; }
+#endif
+
+#endif /* __HL_HLDIO_H__ */
diff --git a/drivers/accel/habanalabs/common/memory.c b/drivers/accel/habanalabs/common/memory.c
index 601fdbe70179..633db4bff46f 100644
--- a/drivers/accel/habanalabs/common/memory.c
+++ b/drivers/accel/habanalabs/common/memory.c
@@ -1829,9 +1829,6 @@ static void hl_release_dmabuf(struct dma_buf *dmabuf)
struct hl_dmabuf_priv *hl_dmabuf = dmabuf->priv;
struct hl_ctx *ctx;
- if (!hl_dmabuf)
- return;
-
ctx = hl_dmabuf->ctx;
if (hl_dmabuf->memhash_hnode)
@@ -1840,7 +1837,12 @@ static void hl_release_dmabuf(struct dma_buf *dmabuf)
atomic_dec(&ctx->hdev->dmabuf_export_cnt);
hl_ctx_put(ctx);
- /* Paired with get_file() in export_dmabuf() */
+ /*
+ * Paired with get_file() in export_dmabuf().
+ * 'ctx' can be still used here to get the file pointer, even after hl_ctx_put() was called,
+ * because releasing the compute device file involves another reference decrement, and it
+ * would be possible only after calling fput().
+ */
fput(ctx->hpriv->file_priv->filp);
kfree(hl_dmabuf);
@@ -1859,7 +1861,12 @@ static int export_dmabuf(struct hl_ctx *ctx,
{
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
struct hl_device *hdev = ctx->hdev;
- int rc, fd;
+ CLASS(get_unused_fd, fd)(flags);
+
+ if (fd < 0) {
+ dev_err(hdev->dev, "failed to get a file descriptor for a dma-buf, %d\n", fd);
+ return fd;
+ }
exp_info.ops = &habanalabs_dmabuf_ops;
exp_info.size = total_size;
@@ -1872,13 +1879,6 @@ static int export_dmabuf(struct hl_ctx *ctx,
return PTR_ERR(hl_dmabuf->dmabuf);
}
- fd = dma_buf_fd(hl_dmabuf->dmabuf, flags);
- if (fd < 0) {
- dev_err(hdev->dev, "failed to get a file descriptor for a dma-buf, %d\n", fd);
- rc = fd;
- goto err_dma_buf_put;
- }
-
hl_dmabuf->ctx = ctx;
hl_ctx_get(hl_dmabuf->ctx);
atomic_inc(&ctx->hdev->dmabuf_export_cnt);
@@ -1890,13 +1890,9 @@ static int export_dmabuf(struct hl_ctx *ctx,
get_file(ctx->hpriv->file_priv->filp);
*dmabuf_fd = fd;
+ fd_install(take_fd(fd), hl_dmabuf->dmabuf->file);
return 0;
-
-err_dma_buf_put:
- hl_dmabuf->dmabuf->priv = NULL;
- dma_buf_put(hl_dmabuf->dmabuf);
- return rc;
}
static int validate_export_params_common(struct hl_device *hdev, u64 addr, u64 size, u64 offset)
@@ -2341,7 +2337,7 @@ static int get_user_memory(struct hl_device *hdev, u64 addr, u64 size,
if (rc < 0)
goto destroy_pages;
npages = rc;
- rc = -EFAULT;
+ rc = -ENOMEM;
goto put_pages;
}
userptr->npages = npages;
diff --git a/drivers/accel/habanalabs/common/memory_mgr.c b/drivers/accel/habanalabs/common/memory_mgr.c
index 99cd83139d46..4401beb99e42 100644
--- a/drivers/accel/habanalabs/common/memory_mgr.c
+++ b/drivers/accel/habanalabs/common/memory_mgr.c
@@ -259,13 +259,8 @@ int hl_mem_mgr_mmap(struct hl_mem_mgr *mmg, struct vm_area_struct *vma,
goto put_mem;
}
-#ifdef _HAS_TYPE_ARG_IN_ACCESS_OK
- if (!access_ok(VERIFY_WRITE, (void __user *)(uintptr_t)vma->vm_start,
- user_mem_size)) {
-#else
if (!access_ok((void __user *)(uintptr_t)vma->vm_start,
user_mem_size)) {
-#endif
dev_err(mmg->dev, "%s: User pointer is invalid - 0x%lx\n",
buf->behavior->topic, vma->vm_start);
diff --git a/drivers/accel/habanalabs/common/sysfs.c b/drivers/accel/habanalabs/common/sysfs.c
index 9d58efa2ff38..8f55ba3b4e73 100644
--- a/drivers/accel/habanalabs/common/sysfs.c
+++ b/drivers/accel/habanalabs/common/sysfs.c
@@ -96,14 +96,21 @@ static ssize_t vrm_ver_show(struct device *dev, struct device_attribute *attr, c
infineon_second_stage_third_instance =
(infineon_second_stage_version >> 16) & mask;
- if (cpucp_info->infineon_second_stage_version)
+ if (cpucp_info->infineon_version && cpucp_info->infineon_second_stage_version)
return sprintf(buf, "%#04x %#04x:%#04x:%#04x\n",
le32_to_cpu(cpucp_info->infineon_version),
infineon_second_stage_first_instance,
infineon_second_stage_second_instance,
infineon_second_stage_third_instance);
- else
+ else if (cpucp_info->infineon_second_stage_version)
+ return sprintf(buf, "%#04x:%#04x:%#04x\n",
+ infineon_second_stage_first_instance,
+ infineon_second_stage_second_instance,
+ infineon_second_stage_third_instance);
+ else if (cpucp_info->infineon_version)
return sprintf(buf, "%#04x\n", le32_to_cpu(cpucp_info->infineon_version));
+
+ return 0;
}
static DEVICE_ATTR_RO(vrm_ver);
@@ -446,7 +453,7 @@ static DEVICE_ATTR_RO(parent_device);
static const struct bin_attribute bin_attr_eeprom = {
.attr = {.name = "eeprom", .mode = (0444)},
.size = PAGE_SIZE,
- .read_new = eeprom_read_handler
+ .read = eeprom_read_handler
};
static struct attribute *hl_dev_attrs[] = {
@@ -479,7 +486,7 @@ static const struct bin_attribute *const hl_dev_bin_attrs[] = {
static struct attribute_group hl_dev_attr_group = {
.attrs = hl_dev_attrs,
- .bin_attrs_new = hl_dev_bin_attrs,
+ .bin_attrs = hl_dev_bin_attrs,
};
static struct attribute_group hl_dev_clks_attr_group;
diff --git a/drivers/accel/habanalabs/gaudi/gaudi.c b/drivers/accel/habanalabs/gaudi/gaudi.c
index fa893a9b826e..34771d75da9d 100644
--- a/drivers/accel/habanalabs/gaudi/gaudi.c
+++ b/drivers/accel/habanalabs/gaudi/gaudi.c
@@ -4168,10 +4168,29 @@ static int gaudi_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
VM_DONTCOPY | VM_NORESERVE);
+#ifdef _HAS_DMA_MMAP_COHERENT
+ /*
+ * If dma_alloc_coherent() returns a vmalloc address, set VM_MIXEDMAP
+ * so vm_insert_page() can handle it safely. Without this, the kernel
+ * may BUG_ON due to VM_PFNMAP.
+ */
+ if (is_vmalloc_addr(cpu_addr))
+ vm_flags_set(vma, VM_MIXEDMAP);
+
rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr,
(dma_addr - HOST_PHYS_BASE), size);
if (rc)
dev_err(hdev->dev, "dma_mmap_coherent error %d", rc);
+#else
+
+ rc = remap_pfn_range(vma, vma->vm_start,
+ virt_to_phys(cpu_addr) >> PAGE_SHIFT,
+ size, vma->vm_page_prot);
+ if (rc)
+ dev_err(hdev->dev, "remap_pfn_range error %d", rc);
+
+ #endif
+
return rc;
}
diff --git a/drivers/accel/habanalabs/gaudi2/gaudi2.c b/drivers/accel/habanalabs/gaudi2/gaudi2.c
index a38b88baadf2..b8c0689dba64 100644
--- a/drivers/accel/habanalabs/gaudi2/gaudi2.c
+++ b/drivers/accel/habanalabs/gaudi2/gaudi2.c
@@ -728,6 +728,354 @@ static const int gaudi2_dma_core_async_event_id[] = {
[DMA_CORE_ID_KDMA] = GAUDI2_EVENT_KDMA0_CORE,
};
+const char *gaudi2_engine_id_str[] = {
+ __stringify(GAUDI2_DCORE0_ENGINE_ID_EDMA_0),
+ __stringify(GAUDI2_DCORE0_ENGINE_ID_EDMA_1),
+ __stringify(GAUDI2_DCORE0_ENGINE_ID_MME),
+ __stringify(GAUDI2_DCORE0_ENGINE_ID_TPC_0),
+ __stringify(GAUDI2_DCORE0_ENGINE_ID_TPC_1),
+ __stringify(GAUDI2_DCORE0_ENGINE_ID_TPC_2),
+ __stringify(GAUDI2_DCORE0_ENGINE_ID_TPC_3),
+ __stringify(GAUDI2_DCORE0_ENGINE_ID_TPC_4),
+ __stringify(GAUDI2_DCORE0_ENGINE_ID_TPC_5),
+ __stringify(GAUDI2_DCORE0_ENGINE_ID_DEC_0),
+ __stringify(GAUDI2_DCORE0_ENGINE_ID_DEC_1),
+ __stringify(GAUDI2_DCORE1_ENGINE_ID_EDMA_0),
+ __stringify(GAUDI2_DCORE1_ENGINE_ID_EDMA_1),
+ __stringify(GAUDI2_DCORE1_ENGINE_ID_MME),
+ __stringify(GAUDI2_DCORE1_ENGINE_ID_TPC_0),
+ __stringify(GAUDI2_DCORE1_ENGINE_ID_TPC_1),
+ __stringify(GAUDI2_DCORE1_ENGINE_ID_TPC_2),
+ __stringify(GAUDI2_DCORE1_ENGINE_ID_TPC_3),
+ __stringify(GAUDI2_DCORE1_ENGINE_ID_TPC_4),
+ __stringify(GAUDI2_DCORE1_ENGINE_ID_TPC_5),
+ __stringify(GAUDI2_DCORE1_ENGINE_ID_DEC_0),
+ __stringify(GAUDI2_DCORE1_ENGINE_ID_DEC_1),
+ __stringify(GAUDI2_DCORE2_ENGINE_ID_EDMA_0),
+ __stringify(GAUDI2_DCORE2_ENGINE_ID_EDMA_1),
+ __stringify(GAUDI2_DCORE2_ENGINE_ID_MME),
+ __stringify(GAUDI2_DCORE2_ENGINE_ID_TPC_0),
+ __stringify(GAUDI2_DCORE2_ENGINE_ID_TPC_1),
+ __stringify(GAUDI2_DCORE2_ENGINE_ID_TPC_2),
+ __stringify(GAUDI2_DCORE2_ENGINE_ID_TPC_3),
+ __stringify(GAUDI2_DCORE2_ENGINE_ID_TPC_4),
+ __stringify(GAUDI2_DCORE2_ENGINE_ID_TPC_5),
+ __stringify(GAUDI2_DCORE2_ENGINE_ID_DEC_0),
+ __stringify(GAUDI2_DCORE2_ENGINE_ID_DEC_1),
+ __stringify(GAUDI2_DCORE3_ENGINE_ID_EDMA_0),
+ __stringify(GAUDI2_DCORE3_ENGINE_ID_EDMA_1),
+ __stringify(GAUDI2_DCORE3_ENGINE_ID_MME),
+ __stringify(GAUDI2_DCORE3_ENGINE_ID_TPC_0),
+ __stringify(GAUDI2_DCORE3_ENGINE_ID_TPC_1),
+ __stringify(GAUDI2_DCORE3_ENGINE_ID_TPC_2),
+ __stringify(GAUDI2_DCORE3_ENGINE_ID_TPC_3),
+ __stringify(GAUDI2_DCORE3_ENGINE_ID_TPC_4),
+ __stringify(GAUDI2_DCORE3_ENGINE_ID_TPC_5),
+ __stringify(GAUDI2_DCORE3_ENGINE_ID_DEC_0),
+ __stringify(GAUDI2_DCORE3_ENGINE_ID_DEC_1),
+ __stringify(GAUDI2_DCORE0_ENGINE_ID_TPC_6),
+ __stringify(GAUDI2_ENGINE_ID_PDMA_0),
+ __stringify(GAUDI2_ENGINE_ID_PDMA_1),
+ __stringify(GAUDI2_ENGINE_ID_ROT_0),
+ __stringify(GAUDI2_ENGINE_ID_ROT_1),
+ __stringify(GAUDI2_PCIE_ENGINE_ID_DEC_0),
+ __stringify(GAUDI2_PCIE_ENGINE_ID_DEC_1),
+ __stringify(GAUDI2_ENGINE_ID_NIC0_0),
+ __stringify(GAUDI2_ENGINE_ID_NIC0_1),
+ __stringify(GAUDI2_ENGINE_ID_NIC1_0),
+ __stringify(GAUDI2_ENGINE_ID_NIC1_1),
+ __stringify(GAUDI2_ENGINE_ID_NIC2_0),
+ __stringify(GAUDI2_ENGINE_ID_NIC2_1),
+ __stringify(GAUDI2_ENGINE_ID_NIC3_0),
+ __stringify(GAUDI2_ENGINE_ID_NIC3_1),
+ __stringify(GAUDI2_ENGINE_ID_NIC4_0),
+ __stringify(GAUDI2_ENGINE_ID_NIC4_1),
+ __stringify(GAUDI2_ENGINE_ID_NIC5_0),
+ __stringify(GAUDI2_ENGINE_ID_NIC5_1),
+ __stringify(GAUDI2_ENGINE_ID_NIC6_0),
+ __stringify(GAUDI2_ENGINE_ID_NIC6_1),
+ __stringify(GAUDI2_ENGINE_ID_NIC7_0),
+ __stringify(GAUDI2_ENGINE_ID_NIC7_1),
+ __stringify(GAUDI2_ENGINE_ID_NIC8_0),
+ __stringify(GAUDI2_ENGINE_ID_NIC8_1),
+ __stringify(GAUDI2_ENGINE_ID_NIC9_0),
+ __stringify(GAUDI2_ENGINE_ID_NIC9_1),
+ __stringify(GAUDI2_ENGINE_ID_NIC10_0),
+ __stringify(GAUDI2_ENGINE_ID_NIC10_1),
+ __stringify(GAUDI2_ENGINE_ID_NIC11_0),
+ __stringify(GAUDI2_ENGINE_ID_NIC11_1),
+ __stringify(GAUDI2_ENGINE_ID_PCIE),
+ __stringify(GAUDI2_ENGINE_ID_PSOC),
+ __stringify(GAUDI2_ENGINE_ID_ARC_FARM),
+ __stringify(GAUDI2_ENGINE_ID_KDMA),
+ __stringify(GAUDI2_ENGINE_ID_SIZE),
+};
+
+const char *gaudi2_queue_id_str[] = {
+ __stringify(GAUDI2_QUEUE_ID_PDMA_0_0),
+ __stringify(GAUDI2_QUEUE_ID_PDMA_0_1),
+ __stringify(GAUDI2_QUEUE_ID_PDMA_0_2),
+ __stringify(GAUDI2_QUEUE_ID_PDMA_0_3),
+ __stringify(GAUDI2_QUEUE_ID_PDMA_1_0),
+ __stringify(GAUDI2_QUEUE_ID_PDMA_1_1),
+ __stringify(GAUDI2_QUEUE_ID_PDMA_1_2),
+ __stringify(GAUDI2_QUEUE_ID_PDMA_1_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_EDMA_0_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_EDMA_0_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_EDMA_0_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_EDMA_0_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_EDMA_1_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_EDMA_1_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_EDMA_1_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_EDMA_1_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_MME_0_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_MME_0_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_MME_0_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_MME_0_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_0_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_0_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_0_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_0_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_1_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_1_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_1_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_1_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_2_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_2_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_2_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_2_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_3_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_3_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_3_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_3_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_4_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_4_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_4_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_4_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_5_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_5_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_5_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_5_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_6_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_6_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_6_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_6_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_EDMA_0_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_EDMA_0_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_EDMA_0_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_EDMA_0_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_EDMA_1_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_EDMA_1_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_EDMA_1_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_EDMA_1_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_MME_0_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_MME_0_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_MME_0_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_MME_0_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_0_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_0_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_0_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_0_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_1_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_1_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_1_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_1_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_2_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_2_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_2_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_2_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_3_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_3_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_3_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_3_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_4_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_4_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_4_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_4_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_5_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_5_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_5_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_5_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_EDMA_0_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_EDMA_0_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_EDMA_0_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_EDMA_0_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_EDMA_1_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_EDMA_1_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_EDMA_1_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_EDMA_1_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_MME_0_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_MME_0_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_MME_0_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_MME_0_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_0_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_0_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_0_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_0_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_1_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_1_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_1_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_1_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_2_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_2_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_2_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_2_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_3_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_3_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_3_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_3_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_4_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_4_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_4_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_4_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_5_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_5_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_5_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_5_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_EDMA_0_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_EDMA_0_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_EDMA_0_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_EDMA_0_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_EDMA_1_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_EDMA_1_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_EDMA_1_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_EDMA_1_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_MME_0_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_MME_0_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_MME_0_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_MME_0_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_0_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_0_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_0_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_0_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_1_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_1_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_1_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_1_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_2_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_2_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_2_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_2_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_3_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_3_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_3_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_3_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_4_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_4_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_4_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_4_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_5_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_5_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_5_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_5_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_0_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_0_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_0_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_0_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_1_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_1_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_1_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_1_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_2_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_2_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_2_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_2_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_3_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_3_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_3_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_3_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_4_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_4_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_4_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_4_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_5_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_5_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_5_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_5_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_6_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_6_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_6_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_6_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_7_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_7_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_7_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_7_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_8_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_8_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_8_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_8_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_9_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_9_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_9_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_9_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_10_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_10_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_10_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_10_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_11_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_11_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_11_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_11_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_12_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_12_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_12_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_12_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_13_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_13_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_13_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_13_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_14_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_14_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_14_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_14_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_15_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_15_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_15_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_15_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_16_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_16_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_16_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_16_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_17_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_17_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_17_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_17_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_18_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_18_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_18_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_18_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_19_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_19_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_19_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_19_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_20_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_20_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_20_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_20_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_21_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_21_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_21_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_21_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_22_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_22_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_22_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_22_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_23_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_23_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_23_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_23_3),
+ __stringify(GAUDI2_QUEUE_ID_ROT_0_0),
+ __stringify(GAUDI2_QUEUE_ID_ROT_0_1),
+ __stringify(GAUDI2_QUEUE_ID_ROT_0_2),
+ __stringify(GAUDI2_QUEUE_ID_ROT_0_3),
+ __stringify(GAUDI2_QUEUE_ID_ROT_1_0),
+ __stringify(GAUDI2_QUEUE_ID_ROT_1_1),
+ __stringify(GAUDI2_QUEUE_ID_ROT_1_2),
+ __stringify(GAUDI2_QUEUE_ID_ROT_1_3),
+ __stringify(GAUDI2_QUEUE_ID_CPU_PQ),
+ __stringify(GAUDI2_QUEUE_ID_SIZE),
+};
+
static const char * const gaudi2_qm_sei_error_cause[GAUDI2_NUM_OF_QM_SEI_ERR_CAUSE] = {
"qman sei intr",
"arc sei intr"
@@ -3150,7 +3498,6 @@ static int gaudi2_early_init(struct hl_device *hdev)
rc = hl_fw_read_preboot_status(hdev);
if (rc) {
if (hdev->reset_on_preboot_fail)
- /* we are already on failure flow, so don't check if hw_fini fails. */
hdev->asic_funcs->hw_fini(hdev, true, false);
goto pci_fini;
}
@@ -3162,6 +3509,13 @@ static int gaudi2_early_init(struct hl_device *hdev)
dev_err(hdev->dev, "failed to reset HW in dirty state (%d)\n", rc);
goto pci_fini;
}
+
+ rc = hl_fw_read_preboot_status(hdev);
+ if (rc) {
+ if (hdev->reset_on_preboot_fail)
+ hdev->asic_funcs->hw_fini(hdev, true, false);
+ goto pci_fini;
+ }
}
return 0;
@@ -4836,7 +5190,7 @@ static void gaudi2_halt_engines(struct hl_device *hdev, bool hard_reset, bool fw
else
wait_timeout_ms = GAUDI2_RESET_WAIT_MSEC;
- if (fw_reset)
+ if (fw_reset || hdev->cpld_shutdown)
goto skip_engines;
gaudi2_stop_dma_qmans(hdev);
@@ -6484,6 +6838,13 @@ static int gaudi2_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
VM_DONTCOPY | VM_NORESERVE);
#ifdef _HAS_DMA_MMAP_COHERENT
+ /*
+ * If dma_alloc_coherent() returns a vmalloc address, set VM_MIXEDMAP
+ * so vm_insert_page() can handle it safely. Without this, the kernel
+ * may BUG_ON due to VM_PFNMAP.
+ */
+ if (is_vmalloc_addr(cpu_addr))
+ vm_flags_set(vma, VM_MIXEDMAP);
rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr, dma_addr, size);
if (rc)
@@ -6774,7 +7135,8 @@ static int gaudi2_validate_cb_address(struct hl_device *hdev, struct hl_cs_parse
struct gaudi2_device *gaudi2 = hdev->asic_specific;
if (!gaudi2_is_queue_enabled(hdev, parser->hw_queue_id)) {
- dev_err(hdev->dev, "h/w queue %d is disabled\n", parser->hw_queue_id);
+ dev_err(hdev->dev, "h/w queue %s is disabled\n",
+ GAUDI2_QUEUE_ID_TO_STR(parser->hw_queue_id));
return -EINVAL;
}
@@ -7026,7 +7388,8 @@ static int gaudi2_test_queue_send_msg_short(struct hl_device *hdev, u32 hw_queue
rc = hl_hw_queue_send_cb_no_cmpl(hdev, hw_queue_id, pkt_size, msg_info->dma_addr);
if (rc)
dev_err(hdev->dev,
- "Failed to send msg_short packet to H/W queue %d\n", hw_queue_id);
+ "Failed to send msg_short packet to H/W queue %s\n",
+ GAUDI2_QUEUE_ID_TO_STR(hw_queue_id));
return rc;
}
@@ -7052,8 +7415,8 @@ static int gaudi2_test_queue_wait_completion(struct hl_device *hdev, u32 hw_queu
timeout_usec);
if (rc == -ETIMEDOUT) {
- dev_err(hdev->dev, "H/W queue %d test failed (SOB_OBJ_0 == 0x%x)\n",
- hw_queue_id, tmp);
+ dev_err(hdev->dev, "H/W queue %s test failed (SOB_OBJ_0 == 0x%x)\n",
+ GAUDI2_QUEUE_ID_TO_STR(hw_queue_id), tmp);
rc = -EIO;
}
@@ -9603,8 +9966,8 @@ static int hl_arc_event_handle(struct hl_device *hdev, u16 event_type,
q = (struct hl_engine_arc_dccm_queue_full_irq *) &payload;
gaudi2_print_event(hdev, event_type, true,
- "ARC DCCM Full event: EngId: %u, Intr_type: %u, Qidx: %u",
- engine_id, intr_type, q->queue_index);
+ "ARC DCCM Full event: Eng: %s, Intr_type: %u, Qidx: %u",
+ GAUDI2_ENG_ID_TO_STR(engine_id), intr_type, q->queue_index);
return 1;
default:
gaudi2_print_event(hdev, event_type, true, "Unknown ARC event type");
@@ -10172,7 +10535,7 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
dev_err(hdev->dev, "CPLD shutdown event, reset reason: 0x%llx\n",
le64_to_cpu(eq_entry->data[0]));
error_count = GAUDI2_NA_EVENT_CAUSE;
- event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
+ hl_eq_cpld_shutdown_event_handle(hdev, event_type, &event_mask);
break;
case GAUDI2_EVENT_CPU_PKT_SANITY_FAILED:
@@ -10260,6 +10623,7 @@ reset_device:
if (event_mask & HL_NOTIFIER_EVENT_GENERAL_HW_ERR)
hl_handle_critical_hw_err(hdev, event_type, &event_mask);
+ hl_debugfs_cfg_access_history_dump(hdev);
event_mask |= HL_NOTIFIER_EVENT_DEVICE_RESET;
hl_device_cond_reset(hdev, reset_flags, event_mask);
}
@@ -10296,8 +10660,8 @@ static int gaudi2_memset_memory_chunk_using_edma_qm(struct hl_device *hdev,
rc = hl_hw_queue_send_cb_no_cmpl(hdev, hw_queue_id, pkt_size, phys_addr);
if (rc)
- dev_err(hdev->dev, "Failed to send lin_dma packet to H/W queue %d\n",
- hw_queue_id);
+ dev_err(hdev->dev, "Failed to send lin_dma packet to H/W queue %s\n",
+ GAUDI2_QUEUE_ID_TO_STR(hw_queue_id));
return rc;
}
@@ -10437,7 +10801,7 @@ end:
(u64 *)(lin_dma_pkts_arr), DEBUGFS_WRITE64);
WREG32(sob_addr, 0);
- kfree(lin_dma_pkts_arr);
+ kvfree(lin_dma_pkts_arr);
return rc;
}
diff --git a/drivers/accel/habanalabs/gaudi2/gaudi2P.h b/drivers/accel/habanalabs/gaudi2/gaudi2P.h
index 05117272cac7..bdf5c1bd2d63 100644
--- a/drivers/accel/habanalabs/gaudi2/gaudi2P.h
+++ b/drivers/accel/habanalabs/gaudi2/gaudi2P.h
@@ -240,6 +240,15 @@
#define GAUDI2_NUM_TESTED_QS (GAUDI2_QUEUE_ID_CPU_PQ - GAUDI2_QUEUE_ID_PDMA_0_0)
+extern const char *gaudi2_engine_id_str[];
+extern const char *gaudi2_queue_id_str[];
+
+#define GAUDI2_ENG_ID_TO_STR(initiator) ((initiator) >= GAUDI2_ENGINE_ID_SIZE ? "not found" : \
+ gaudi2_engine_id_str[initiator])
+
+#define GAUDI2_QUEUE_ID_TO_STR(initiator) ((initiator) >= GAUDI2_QUEUE_ID_SIZE ? "not found" : \
+ gaudi2_queue_id_str[initiator])
+
enum gaudi2_reserved_sob_id {
GAUDI2_RESERVED_SOB_CS_COMPLETION_FIRST,
GAUDI2_RESERVED_SOB_CS_COMPLETION_LAST =
diff --git a/drivers/accel/habanalabs/gaudi2/gaudi2_coresight.c b/drivers/accel/habanalabs/gaudi2/gaudi2_coresight.c
index 2423620ff358..bc3c57bda5cd 100644
--- a/drivers/accel/habanalabs/gaudi2/gaudi2_coresight.c
+++ b/drivers/accel/habanalabs/gaudi2/gaudi2_coresight.c
@@ -2426,7 +2426,7 @@ static int gaudi2_config_bmon(struct hl_device *hdev, struct hl_debug_params *pa
WREG32(base_reg + mmBMON_ADDRH_E3_OFFSET, 0);
WREG32(base_reg + mmBMON_REDUCTION_OFFSET, 0);
WREG32(base_reg + mmBMON_STM_TRC_OFFSET, 0x7 | (0xA << 8));
- WREG32(base_reg + mmBMON_CR_OFFSET, 0x77 | 0xf << 24);
+ WREG32(base_reg + mmBMON_CR_OFFSET, 0x41);
}
return 0;
diff --git a/drivers/accel/ivpu/Makefile b/drivers/accel/ivpu/Makefile
index 1029e0bab061..dbf76b8a5b4c 100644
--- a/drivers/accel/ivpu/Makefile
+++ b/drivers/accel/ivpu/Makefile
@@ -6,6 +6,7 @@ intel_vpu-y := \
ivpu_fw.o \
ivpu_fw_log.o \
ivpu_gem.o \
+ ivpu_gem_userptr.o \
ivpu_hw.o \
ivpu_hw_btrs.o \
ivpu_hw_ip.o \
diff --git a/drivers/accel/ivpu/ivpu_debugfs.c b/drivers/accel/ivpu/ivpu_debugfs.c
index cd24ccd20ba6..3bd85ee6c26b 100644
--- a/drivers/accel/ivpu/ivpu_debugfs.c
+++ b/drivers/accel/ivpu/ivpu_debugfs.c
@@ -398,35 +398,25 @@ static int dct_active_set(void *data, u64 active_percent)
DEFINE_DEBUGFS_ATTRIBUTE(ivpu_dct_fops, dct_active_get, dct_active_set, "%llu\n");
+static void print_priority_band(struct seq_file *s, struct ivpu_hw_info *hw,
+ int band, const char *name)
+{
+ seq_printf(s, "%-9s: grace_period %9u process_grace_period %9u process_quantum %9u\n",
+ name,
+ hw->hws.grace_period[band],
+ hw->hws.process_grace_period[band],
+ hw->hws.process_quantum[band]);
+}
+
static int priority_bands_show(struct seq_file *s, void *v)
{
struct ivpu_device *vdev = s->private;
struct ivpu_hw_info *hw = vdev->hw;
- for (int band = VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE;
- band < VPU_JOB_SCHEDULING_PRIORITY_BAND_COUNT; band++) {
- switch (band) {
- case VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE:
- seq_puts(s, "Idle: ");
- break;
-
- case VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL:
- seq_puts(s, "Normal: ");
- break;
-
- case VPU_JOB_SCHEDULING_PRIORITY_BAND_FOCUS:
- seq_puts(s, "Focus: ");
- break;
-
- case VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME:
- seq_puts(s, "Realtime: ");
- break;
- }
-
- seq_printf(s, "grace_period %9u process_grace_period %9u process_quantum %9u\n",
- hw->hws.grace_period[band], hw->hws.process_grace_period[band],
- hw->hws.process_quantum[band]);
- }
+ print_priority_band(s, hw, VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE, "Idle");
+ print_priority_band(s, hw, VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL, "Normal");
+ print_priority_band(s, hw, VPU_JOB_SCHEDULING_PRIORITY_BAND_FOCUS, "Focus");
+ print_priority_band(s, hw, VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME, "Realtime");
return 0;
}
diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c
index 0e7748c5e117..3d6fccdefdd6 100644
--- a/drivers/accel/ivpu/ivpu_drv.c
+++ b/drivers/accel/ivpu/ivpu_drv.c
@@ -57,7 +57,7 @@ MODULE_PARM_DESC(pll_max_ratio, "Maximum PLL ratio used to set NPU frequency");
int ivpu_sched_mode = IVPU_SCHED_MODE_AUTO;
module_param_named(sched_mode, ivpu_sched_mode, int, 0444);
-MODULE_PARM_DESC(sched_mode, "Scheduler mode: -1 - Use default scheduler, 0 - Use OS scheduler, 1 - Use HW scheduler");
+MODULE_PARM_DESC(sched_mode, "Scheduler mode: -1 - Use default scheduler, 0 - Use OS scheduler (supported on 27XX - 50XX), 1 - Use HW scheduler");
bool ivpu_disable_mmu_cont_pages;
module_param_named(disable_mmu_cont_pages, ivpu_disable_mmu_cont_pages, bool, 0444);
@@ -134,6 +134,8 @@ bool ivpu_is_capable(struct ivpu_device *vdev, u32 capability)
return true;
case DRM_IVPU_CAP_DMA_MEMORY_RANGE:
return true;
+ case DRM_IVPU_CAP_BO_CREATE_FROM_USERPTR:
+ return true;
case DRM_IVPU_CAP_MANAGE_CMDQ:
return vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW;
default:
@@ -200,6 +202,9 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f
case DRM_IVPU_PARAM_CAPABILITIES:
args->value = ivpu_is_capable(vdev, args->index);
break;
+ case DRM_IVPU_PARAM_PREEMPT_BUFFER_SIZE:
+ args->value = ivpu_fw_preempt_buf_size(vdev);
+ break;
default:
ret = -EINVAL;
break;
@@ -310,6 +315,7 @@ static const struct drm_ioctl_desc ivpu_drm_ioctls[] = {
DRM_IOCTL_DEF_DRV(IVPU_CMDQ_CREATE, ivpu_cmdq_create_ioctl, 0),
DRM_IOCTL_DEF_DRV(IVPU_CMDQ_DESTROY, ivpu_cmdq_destroy_ioctl, 0),
DRM_IOCTL_DEF_DRV(IVPU_CMDQ_SUBMIT, ivpu_cmdq_submit_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(IVPU_BO_CREATE_FROM_USERPTR, ivpu_bo_create_from_userptr_ioctl, 0),
};
static int ivpu_wait_for_ready(struct ivpu_device *vdev)
@@ -377,8 +383,7 @@ int ivpu_boot(struct ivpu_device *vdev)
drm_WARN_ON(&vdev->drm, atomic_read(&vdev->job_timeout_counter));
drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->submitted_jobs_xa));
- /* Update boot params located at first 4KB of FW memory */
- ivpu_fw_boot_params_setup(vdev, ivpu_bo_vaddr(vdev->fw->mem));
+ ivpu_fw_boot_params_setup(vdev, ivpu_bo_vaddr(vdev->fw->mem_bp));
ret = ivpu_hw_boot_fw(vdev);
if (ret) {
@@ -450,6 +455,9 @@ int ivpu_shutdown(struct ivpu_device *vdev)
static const struct file_operations ivpu_fops = {
.owner = THIS_MODULE,
DRM_ACCEL_FOPS,
+#ifdef CONFIG_PROC_FS
+ .show_fdinfo = drm_show_fdinfo,
+#endif
};
static const struct drm_driver driver = {
@@ -464,6 +472,9 @@ static const struct drm_driver driver = {
.ioctls = ivpu_drm_ioctls,
.num_ioctls = ARRAY_SIZE(ivpu_drm_ioctls),
.fops = &ivpu_fops,
+#ifdef CONFIG_PROC_FS
+ .show_fdinfo = drm_show_memory_stats,
+#endif
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
@@ -677,7 +688,7 @@ static void ivpu_bo_unbind_all_user_contexts(struct ivpu_device *vdev)
static void ivpu_dev_fini(struct ivpu_device *vdev)
{
ivpu_jobs_abort_all(vdev);
- ivpu_pm_cancel_recovery(vdev);
+ ivpu_pm_disable_recovery(vdev);
ivpu_pm_disable(vdev);
ivpu_prepare_for_reset(vdev);
ivpu_shutdown(vdev);
@@ -704,6 +715,8 @@ static struct pci_device_id ivpu_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_ARL) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_LNL) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PTL_P) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_WCL) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_NVL) },
{ }
};
MODULE_DEVICE_TABLE(pci, ivpu_pci_ids);
diff --git a/drivers/accel/ivpu/ivpu_drv.h b/drivers/accel/ivpu/ivpu_drv.h
index 5497e7030e91..5b34b6f50e69 100644
--- a/drivers/accel/ivpu/ivpu_drv.h
+++ b/drivers/accel/ivpu/ivpu_drv.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2020-2024 Intel Corporation
+ * Copyright (C) 2020-2025 Intel Corporation
*/
#ifndef __IVPU_DRV_H__
@@ -26,6 +26,8 @@
#define PCI_DEVICE_ID_ARL 0xad1d
#define PCI_DEVICE_ID_LNL 0x643e
#define PCI_DEVICE_ID_PTL_P 0xb03e
+#define PCI_DEVICE_ID_WCL 0xfd3e
+#define PCI_DEVICE_ID_NVL 0xd71d
#define IVPU_HW_IP_37XX 37
#define IVPU_HW_IP_40XX 40
@@ -77,6 +79,7 @@
#define IVPU_DBG_KREF BIT(11)
#define IVPU_DBG_RPM BIT(12)
#define IVPU_DBG_MMU_MAP BIT(13)
+#define IVPU_DBG_IOCTL BIT(14)
#define ivpu_err(vdev, fmt, ...) \
drm_err(&(vdev)->drm, "%s(): " fmt, __func__, ##__VA_ARGS__)
@@ -165,6 +168,7 @@ struct ivpu_device {
int boot;
int jsm;
int tdr;
+ int inference;
int autosuspend;
int d0i3_entry_msg;
int state_dump_msg;
@@ -207,10 +211,11 @@ extern bool ivpu_force_snoop;
#define IVPU_TEST_MODE_D0I3_MSG_ENABLE BIT(5)
#define IVPU_TEST_MODE_MIP_DISABLE BIT(6)
#define IVPU_TEST_MODE_DISABLE_TIMEOUTS BIT(8)
-#define IVPU_TEST_MODE_TURBO BIT(9)
-#define IVPU_TEST_MODE_CLK_RELINQ_DISABLE BIT(10)
-#define IVPU_TEST_MODE_CLK_RELINQ_ENABLE BIT(11)
-#define IVPU_TEST_MODE_D0I2_DISABLE BIT(12)
+#define IVPU_TEST_MODE_TURBO_ENABLE BIT(9)
+#define IVPU_TEST_MODE_TURBO_DISABLE BIT(10)
+#define IVPU_TEST_MODE_CLK_RELINQ_DISABLE BIT(11)
+#define IVPU_TEST_MODE_CLK_RELINQ_ENABLE BIT(12)
+#define IVPU_TEST_MODE_D0I2_DISABLE BIT(13)
extern int ivpu_test_mode;
struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv);
@@ -240,7 +245,10 @@ static inline int ivpu_hw_ip_gen(struct ivpu_device *vdev)
case PCI_DEVICE_ID_LNL:
return IVPU_HW_IP_40XX;
case PCI_DEVICE_ID_PTL_P:
+ case PCI_DEVICE_ID_WCL:
return IVPU_HW_IP_50XX;
+ case PCI_DEVICE_ID_NVL:
+ return IVPU_HW_IP_60XX;
default:
dump_stack();
ivpu_err(vdev, "Unknown NPU IP generation\n");
@@ -256,6 +264,8 @@ static inline int ivpu_hw_btrs_gen(struct ivpu_device *vdev)
return IVPU_HW_BTRS_MTL;
case PCI_DEVICE_ID_LNL:
case PCI_DEVICE_ID_PTL_P:
+ case PCI_DEVICE_ID_WCL:
+ case PCI_DEVICE_ID_NVL:
return IVPU_HW_BTRS_LNL;
default:
dump_stack();
diff --git a/drivers/accel/ivpu/ivpu_fw.c b/drivers/accel/ivpu/ivpu_fw.c
index 9db741695401..48386d2cddbb 100644
--- a/drivers/accel/ivpu/ivpu_fw.c
+++ b/drivers/accel/ivpu/ivpu_fw.c
@@ -17,15 +17,10 @@
#include "ivpu_ipc.h"
#include "ivpu_pm.h"
-#define FW_GLOBAL_MEM_START (2ull * SZ_1G)
-#define FW_GLOBAL_MEM_END (3ull * SZ_1G)
-#define FW_SHARED_MEM_SIZE SZ_256M /* Must be aligned to FW_SHARED_MEM_ALIGNMENT */
-#define FW_SHARED_MEM_ALIGNMENT SZ_128K /* VPU MTRR limitation */
-#define FW_RUNTIME_MAX_SIZE SZ_512M
#define FW_SHAVE_NN_MAX_SIZE SZ_2M
-#define FW_RUNTIME_MIN_ADDR (FW_GLOBAL_MEM_START)
-#define FW_RUNTIME_MAX_ADDR (FW_GLOBAL_MEM_END - FW_SHARED_MEM_SIZE)
#define FW_FILE_IMAGE_OFFSET (VPU_FW_HEADER_SIZE + FW_VERSION_HEADER_SIZE)
+#define FW_PREEMPT_BUF_MIN_SIZE SZ_4K
+#define FW_PREEMPT_BUF_MAX_SIZE SZ_32M
#define WATCHDOG_MSS_REDIRECT 32
#define WATCHDOG_NCE_REDIRECT 33
@@ -61,12 +56,14 @@ static struct {
{ IVPU_HW_IP_40XX, "intel/vpu/vpu_40xx_v0.0.bin" },
{ IVPU_HW_IP_50XX, "intel/vpu/vpu_50xx_v1.bin" },
{ IVPU_HW_IP_50XX, "intel/vpu/vpu_50xx_v0.0.bin" },
+ { IVPU_HW_IP_60XX, "intel/vpu/vpu_60xx_v1.bin" },
};
/* Production fw_names from the table above */
MODULE_FIRMWARE("intel/vpu/vpu_37xx_v1.bin");
MODULE_FIRMWARE("intel/vpu/vpu_40xx_v1.bin");
MODULE_FIRMWARE("intel/vpu/vpu_50xx_v1.bin");
+MODULE_FIRMWARE("intel/vpu/vpu_60xx_v1.bin");
static int ivpu_fw_request(struct ivpu_device *vdev)
{
@@ -131,9 +128,14 @@ ivpu_fw_check_api_ver_lt(struct ivpu_device *vdev, const struct vpu_firmware_hea
return false;
}
-static bool is_within_range(u64 addr, size_t size, u64 range_start, size_t range_size)
+bool ivpu_is_within_range(u64 addr, size_t size, struct ivpu_addr_range *range)
{
- if (addr < range_start || addr + size > range_start + range_size)
+ u64 addr_end;
+
+ if (!range || check_add_overflow(addr, size, &addr_end))
+ return false;
+
+ if (addr < range->start || addr_end > range->end)
return false;
return true;
@@ -142,6 +144,12 @@ static bool is_within_range(u64 addr, size_t size, u64 range_start, size_t range
static u32
ivpu_fw_sched_mode_select(struct ivpu_device *vdev, const struct vpu_firmware_header *fw_hdr)
{
+ if (ivpu_hw_ip_gen(vdev) >= IVPU_HW_IP_60XX &&
+ ivpu_sched_mode == VPU_SCHEDULING_MODE_OS) {
+ ivpu_warn(vdev, "OS sched mode is not supported, using HW mode\n");
+ return VPU_SCHEDULING_MODE_HW;
+ }
+
if (ivpu_sched_mode != IVPU_SCHED_MODE_AUTO)
return ivpu_sched_mode;
@@ -151,11 +159,56 @@ ivpu_fw_sched_mode_select(struct ivpu_device *vdev, const struct vpu_firmware_he
return VPU_SCHEDULING_MODE_HW;
}
+static void
+ivpu_preemption_config_parse(struct ivpu_device *vdev, const struct vpu_firmware_header *fw_hdr)
+{
+ struct ivpu_fw_info *fw = vdev->fw;
+ u32 primary_preempt_buf_size, secondary_preempt_buf_size;
+
+ if (fw_hdr->preemption_buffer_1_max_size)
+ primary_preempt_buf_size = fw_hdr->preemption_buffer_1_max_size;
+ else
+ primary_preempt_buf_size = fw_hdr->preemption_buffer_1_size;
+
+ if (fw_hdr->preemption_buffer_2_max_size)
+ secondary_preempt_buf_size = fw_hdr->preemption_buffer_2_max_size;
+ else
+ secondary_preempt_buf_size = fw_hdr->preemption_buffer_2_size;
+
+ ivpu_dbg(vdev, FW_BOOT, "Preemption buffer size, primary: %u, secondary: %u\n",
+ primary_preempt_buf_size, secondary_preempt_buf_size);
+
+ if (primary_preempt_buf_size < FW_PREEMPT_BUF_MIN_SIZE ||
+ secondary_preempt_buf_size < FW_PREEMPT_BUF_MIN_SIZE) {
+ ivpu_warn(vdev, "Preemption buffers size too small\n");
+ return;
+ }
+
+ if (primary_preempt_buf_size > FW_PREEMPT_BUF_MAX_SIZE ||
+ secondary_preempt_buf_size > FW_PREEMPT_BUF_MAX_SIZE) {
+ ivpu_warn(vdev, "Preemption buffers size too big\n");
+ return;
+ }
+
+ if (fw->sched_mode != VPU_SCHEDULING_MODE_HW)
+ return;
+
+ if (ivpu_test_mode & IVPU_TEST_MODE_MIP_DISABLE)
+ return;
+
+ vdev->fw->primary_preempt_buf_size = ALIGN(primary_preempt_buf_size, PAGE_SIZE);
+ vdev->fw->secondary_preempt_buf_size = ALIGN(secondary_preempt_buf_size, PAGE_SIZE);
+}
+
static int ivpu_fw_parse(struct ivpu_device *vdev)
{
struct ivpu_fw_info *fw = vdev->fw;
const struct vpu_firmware_header *fw_hdr = (const void *)fw->file->data;
- u64 runtime_addr, image_load_addr, runtime_size, image_size;
+ struct ivpu_addr_range fw_image_range;
+ u64 boot_params_addr, boot_params_size;
+ u64 fw_version_addr, fw_version_size;
+ u64 runtime_addr, runtime_size;
+ u64 image_load_addr, image_size;
if (fw->file->size <= FW_FILE_IMAGE_OFFSET) {
ivpu_err(vdev, "Firmware file is too small: %zu\n", fw->file->size);
@@ -167,18 +220,37 @@ static int ivpu_fw_parse(struct ivpu_device *vdev)
return -EINVAL;
}
- runtime_addr = fw_hdr->boot_params_load_address;
- runtime_size = fw_hdr->runtime_size;
- image_load_addr = fw_hdr->image_load_address;
- image_size = fw_hdr->image_size;
+ boot_params_addr = fw_hdr->boot_params_load_address;
+ boot_params_size = SZ_4K;
- if (runtime_addr < FW_RUNTIME_MIN_ADDR || runtime_addr > FW_RUNTIME_MAX_ADDR) {
- ivpu_err(vdev, "Invalid firmware runtime address: 0x%llx\n", runtime_addr);
+ if (!ivpu_is_within_range(boot_params_addr, boot_params_size, &vdev->hw->ranges.runtime)) {
+ ivpu_err(vdev, "Invalid boot params address: 0x%llx\n", boot_params_addr);
return -EINVAL;
}
- if (runtime_size < fw->file->size || runtime_size > FW_RUNTIME_MAX_SIZE) {
- ivpu_err(vdev, "Invalid firmware runtime size: %llu\n", runtime_size);
+ fw_version_addr = fw_hdr->firmware_version_load_address;
+ fw_version_size = ALIGN(fw_hdr->firmware_version_size, SZ_4K);
+
+ if (fw_version_size != SZ_4K) {
+ ivpu_err(vdev, "Invalid firmware version size: %u\n",
+ fw_hdr->firmware_version_size);
+ return -EINVAL;
+ }
+
+ if (!ivpu_is_within_range(fw_version_addr, fw_version_size, &vdev->hw->ranges.runtime)) {
+ ivpu_err(vdev, "Invalid firmware version address: 0x%llx\n", fw_version_addr);
+ return -EINVAL;
+ }
+
+ runtime_addr = fw_hdr->image_load_address;
+ runtime_size = fw_hdr->runtime_size - boot_params_size - fw_version_size;
+
+ image_load_addr = fw_hdr->image_load_address;
+ image_size = fw_hdr->image_size;
+
+ if (!ivpu_is_within_range(runtime_addr, runtime_size, &vdev->hw->ranges.runtime)) {
+ ivpu_err(vdev, "Invalid firmware runtime address: 0x%llx and size %llu\n",
+ runtime_addr, runtime_size);
return -EINVAL;
}
@@ -187,23 +259,25 @@ static int ivpu_fw_parse(struct ivpu_device *vdev)
return -EINVAL;
}
- if (image_load_addr < runtime_addr ||
- image_load_addr + image_size > runtime_addr + runtime_size) {
- ivpu_err(vdev, "Invalid firmware load address size: 0x%llx and size %llu\n",
+ if (!ivpu_is_within_range(image_load_addr, image_size, &vdev->hw->ranges.runtime)) {
+ ivpu_err(vdev, "Invalid firmware load address: 0x%llx and size %llu\n",
image_load_addr, image_size);
return -EINVAL;
}
- if (fw_hdr->shave_nn_fw_size > FW_SHAVE_NN_MAX_SIZE) {
- ivpu_err(vdev, "SHAVE NN firmware is too big: %u\n", fw_hdr->shave_nn_fw_size);
+ if (ivpu_hw_range_init(vdev, &fw_image_range, image_load_addr, image_size))
return -EINVAL;
- }
- if (fw_hdr->entry_point < image_load_addr ||
- fw_hdr->entry_point >= image_load_addr + image_size) {
+ if (!ivpu_is_within_range(fw_hdr->entry_point, SZ_4K, &fw_image_range)) {
ivpu_err(vdev, "Invalid entry point: 0x%llx\n", fw_hdr->entry_point);
return -EINVAL;
}
+
+ if (fw_hdr->shave_nn_fw_size > FW_SHAVE_NN_MAX_SIZE) {
+ ivpu_err(vdev, "SHAVE NN firmware is too big: %u\n", fw_hdr->shave_nn_fw_size);
+ return -EINVAL;
+ }
+
ivpu_dbg(vdev, FW_BOOT, "Header version: 0x%x, format 0x%x\n",
fw_hdr->header_version, fw_hdr->image_format);
@@ -217,6 +291,10 @@ static int ivpu_fw_parse(struct ivpu_device *vdev)
if (IVPU_FW_CHECK_API_COMPAT(vdev, fw_hdr, JSM, 3))
return -EINVAL;
+ fw->boot_params_addr = boot_params_addr;
+ fw->boot_params_size = boot_params_size;
+ fw->fw_version_addr = fw_version_addr;
+ fw->fw_version_size = fw_version_size;
fw->runtime_addr = runtime_addr;
fw->runtime_size = runtime_size;
fw->image_load_offset = image_load_addr - runtime_addr;
@@ -235,22 +313,13 @@ static int ivpu_fw_parse(struct ivpu_device *vdev)
fw->sched_mode = ivpu_fw_sched_mode_select(vdev, fw_hdr);
ivpu_info(vdev, "Scheduler mode: %s\n", fw->sched_mode ? "HW" : "OS");
- if (fw_hdr->preemption_buffer_1_max_size)
- fw->primary_preempt_buf_size = fw_hdr->preemption_buffer_1_max_size;
- else
- fw->primary_preempt_buf_size = fw_hdr->preemption_buffer_1_size;
+ ivpu_preemption_config_parse(vdev, fw_hdr);
+ ivpu_dbg(vdev, FW_BOOT, "Mid-inference preemption %s supported\n",
+ ivpu_fw_preempt_buf_size(vdev) ? "is" : "is not");
- if (fw_hdr->preemption_buffer_2_max_size)
- fw->secondary_preempt_buf_size = fw_hdr->preemption_buffer_2_max_size;
- else
- fw->secondary_preempt_buf_size = fw_hdr->preemption_buffer_2_size;
- ivpu_dbg(vdev, FW_BOOT, "Preemption buffer sizes: primary %u, secondary %u\n",
- fw->primary_preempt_buf_size, fw->secondary_preempt_buf_size);
-
- if (fw_hdr->ro_section_start_address && !is_within_range(fw_hdr->ro_section_start_address,
- fw_hdr->ro_section_size,
- fw_hdr->image_load_address,
- fw_hdr->image_size)) {
+ if (fw_hdr->ro_section_start_address &&
+ !ivpu_is_within_range(fw_hdr->ro_section_start_address, fw_hdr->ro_section_size,
+ &fw_image_range)) {
ivpu_err(vdev, "Invalid read-only section: start address 0x%llx, size %u\n",
fw_hdr->ro_section_start_address, fw_hdr->ro_section_size);
return -EINVAL;
@@ -259,12 +328,18 @@ static int ivpu_fw_parse(struct ivpu_device *vdev)
fw->read_only_addr = fw_hdr->ro_section_start_address;
fw->read_only_size = fw_hdr->ro_section_size;
- ivpu_dbg(vdev, FW_BOOT, "Size: file %lu image %u runtime %u shavenn %u\n",
- fw->file->size, fw->image_size, fw->runtime_size, fw->shave_nn_size);
- ivpu_dbg(vdev, FW_BOOT, "Address: runtime 0x%llx, load 0x%llx, entry point 0x%llx\n",
- fw->runtime_addr, image_load_addr, fw->entry_point);
+ ivpu_dbg(vdev, FW_BOOT, "Boot params: address 0x%llx, size %llu\n",
+ fw->boot_params_addr, fw->boot_params_size);
+ ivpu_dbg(vdev, FW_BOOT, "FW version: address 0x%llx, size %llu\n",
+ fw->fw_version_addr, fw->fw_version_size);
+ ivpu_dbg(vdev, FW_BOOT, "Runtime: address 0x%llx, size %u\n",
+ fw->runtime_addr, fw->runtime_size);
+ ivpu_dbg(vdev, FW_BOOT, "Image load offset: 0x%llx, size %u\n",
+ fw->image_load_offset, fw->image_size);
ivpu_dbg(vdev, FW_BOOT, "Read-only section: address 0x%llx, size %u\n",
fw->read_only_addr, fw->read_only_size);
+ ivpu_dbg(vdev, FW_BOOT, "FW entry point: 0x%llx\n", fw->entry_point);
+ ivpu_dbg(vdev, FW_BOOT, "SHAVE NN size: %u\n", fw->shave_nn_size);
return 0;
}
@@ -291,39 +366,33 @@ ivpu_fw_init_wa(struct ivpu_device *vdev)
IVPU_PRINT_WA(disable_d0i3_msg);
}
-static int ivpu_fw_update_global_range(struct ivpu_device *vdev)
-{
- struct ivpu_fw_info *fw = vdev->fw;
- u64 start = ALIGN(fw->runtime_addr + fw->runtime_size, FW_SHARED_MEM_ALIGNMENT);
- u64 size = FW_SHARED_MEM_SIZE;
-
- if (start + size > FW_GLOBAL_MEM_END) {
- ivpu_err(vdev, "No space for shared region, start %lld, size %lld\n", start, size);
- return -EINVAL;
- }
-
- ivpu_hw_range_init(&vdev->hw->ranges.global, start, size);
- return 0;
-}
-
static int ivpu_fw_mem_init(struct ivpu_device *vdev)
{
struct ivpu_fw_info *fw = vdev->fw;
- struct ivpu_addr_range fw_range;
int log_verb_size;
int ret;
- ret = ivpu_fw_update_global_range(vdev);
- if (ret)
- return ret;
+ fw->mem_bp = ivpu_bo_create_runtime(vdev, fw->boot_params_addr, fw->boot_params_size,
+ DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE);
+ if (!fw->mem_bp) {
+ ivpu_err(vdev, "Failed to create firmware boot params memory buffer\n");
+ return -ENOMEM;
+ }
- fw_range.start = fw->runtime_addr;
- fw_range.end = fw->runtime_addr + fw->runtime_size;
- fw->mem = ivpu_bo_create(vdev, &vdev->gctx, &fw_range, fw->runtime_size,
- DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE);
+ fw->mem_fw_ver = ivpu_bo_create_runtime(vdev, fw->fw_version_addr, fw->fw_version_size,
+ DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE);
+ if (!fw->mem_fw_ver) {
+ ivpu_err(vdev, "Failed to create firmware version memory buffer\n");
+ ret = -ENOMEM;
+ goto err_free_bp;
+ }
+
+ fw->mem = ivpu_bo_create_runtime(vdev, fw->runtime_addr, fw->runtime_size,
+ DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE);
if (!fw->mem) {
ivpu_err(vdev, "Failed to create firmware runtime memory buffer\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_free_fw_ver;
}
ret = ivpu_mmu_context_set_pages_ro(vdev, &vdev->gctx, fw->read_only_addr,
@@ -372,6 +441,10 @@ err_free_log_crit:
ivpu_bo_free(fw->mem_log_crit);
err_free_fw_mem:
ivpu_bo_free(fw->mem);
+err_free_fw_ver:
+ ivpu_bo_free(fw->mem_fw_ver);
+err_free_bp:
+ ivpu_bo_free(fw->mem_bp);
return ret;
}
@@ -387,10 +460,14 @@ static void ivpu_fw_mem_fini(struct ivpu_device *vdev)
ivpu_bo_free(fw->mem_log_verb);
ivpu_bo_free(fw->mem_log_crit);
ivpu_bo_free(fw->mem);
+ ivpu_bo_free(fw->mem_fw_ver);
+ ivpu_bo_free(fw->mem_bp);
fw->mem_log_verb = NULL;
fw->mem_log_crit = NULL;
fw->mem = NULL;
+ fw->mem_fw_ver = NULL;
+ fw->mem_bp = NULL;
}
int ivpu_fw_init(struct ivpu_device *vdev)
@@ -483,11 +560,6 @@ static void ivpu_fw_boot_params_print(struct ivpu_device *vdev, struct vpu_boot_
ivpu_dbg(vdev, FW_BOOT, "boot_params.cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].cfg = 0x%x\n",
boot_params->cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].cfg);
- ivpu_dbg(vdev, FW_BOOT, "boot_params.global_memory_allocator_base = 0x%llx\n",
- boot_params->global_memory_allocator_base);
- ivpu_dbg(vdev, FW_BOOT, "boot_params.global_memory_allocator_size = 0x%x\n",
- boot_params->global_memory_allocator_size);
-
ivpu_dbg(vdev, FW_BOOT, "boot_params.shave_nn_fw_base = 0x%llx\n",
boot_params->shave_nn_fw_base);
@@ -495,10 +567,6 @@ static void ivpu_fw_boot_params_print(struct ivpu_device *vdev, struct vpu_boot_
boot_params->watchdog_irq_mss);
ivpu_dbg(vdev, FW_BOOT, "boot_params.watchdog_irq_nce = 0x%x\n",
boot_params->watchdog_irq_nce);
- ivpu_dbg(vdev, FW_BOOT, "boot_params.host_to_vpu_irq = 0x%x\n",
- boot_params->host_to_vpu_irq);
- ivpu_dbg(vdev, FW_BOOT, "boot_params.job_done_irq = 0x%x\n",
- boot_params->job_done_irq);
ivpu_dbg(vdev, FW_BOOT, "boot_params.host_version_id = 0x%x\n",
boot_params->host_version_id);
@@ -546,6 +614,8 @@ static void ivpu_fw_boot_params_print(struct ivpu_device *vdev, struct vpu_boot_
boot_params->system_time_us);
ivpu_dbg(vdev, FW_BOOT, "boot_params.power_profile = 0x%x\n",
boot_params->power_profile);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.vpu_uses_ecc_mca_signal = 0x%x\n",
+ boot_params->vpu_uses_ecc_mca_signal);
}
void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params *boot_params)
@@ -572,6 +642,7 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
return;
}
+ memset(boot_params, 0, sizeof(*boot_params));
vdev->pm->is_warmboot = false;
boot_params->magic = VPU_BOOT_PARAMS_MAGIC;
@@ -647,6 +718,8 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
boot_params->d0i3_entry_vpu_ts = 0;
if (IVPU_WA(disable_d0i2))
boot_params->power_profile |= BIT(1);
+ boot_params->vpu_uses_ecc_mca_signal =
+ ivpu_hw_uses_ecc_mca_signal(vdev) ? VPU_BOOT_MCA_ECC_BOTH : 0;
boot_params->system_time_us = ktime_to_us(ktime_get_real());
wmb(); /* Flush WC buffers after writing bootparams */
diff --git a/drivers/accel/ivpu/ivpu_fw.h b/drivers/accel/ivpu/ivpu_fw.h
index 9a3935be1c05..00945892b55e 100644
--- a/drivers/accel/ivpu/ivpu_fw.h
+++ b/drivers/accel/ivpu/ivpu_fw.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2020-2024 Intel Corporation
+ * Copyright (C) 2020-2025 Intel Corporation
*/
#ifndef __IVPU_FW_H__
@@ -19,10 +19,16 @@ struct ivpu_fw_info {
const struct firmware *file;
const char *name;
char version[FW_VERSION_STR_SIZE];
+ struct ivpu_bo *mem_bp;
+ struct ivpu_bo *mem_fw_ver;
struct ivpu_bo *mem;
struct ivpu_bo *mem_shave_nn;
struct ivpu_bo *mem_log_crit;
struct ivpu_bo *mem_log_verb;
+ u64 boot_params_addr;
+ u64 boot_params_size;
+ u64 fw_version_addr;
+ u64 fw_version_size;
u64 runtime_addr;
u32 runtime_size;
u64 image_load_offset;
@@ -42,14 +48,20 @@ struct ivpu_fw_info {
u64 last_heartbeat;
};
+bool ivpu_is_within_range(u64 addr, size_t size, struct ivpu_addr_range *range);
int ivpu_fw_init(struct ivpu_device *vdev);
void ivpu_fw_fini(struct ivpu_device *vdev);
void ivpu_fw_load(struct ivpu_device *vdev);
-void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params *bp);
+void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params *boot_params);
static inline bool ivpu_fw_is_cold_boot(struct ivpu_device *vdev)
{
return vdev->fw->entry_point == vdev->fw->cold_boot_entry_point;
}
+static inline u32 ivpu_fw_preempt_buf_size(struct ivpu_device *vdev)
+{
+ return vdev->fw->primary_preempt_buf_size + vdev->fw->secondary_preempt_buf_size;
+}
+
#endif /* __IVPU_FW_H__ */
diff --git a/drivers/accel/ivpu/ivpu_gem.c b/drivers/accel/ivpu/ivpu_gem.c
index 59cfcf3eaded..ece68f570b7e 100644
--- a/drivers/accel/ivpu/ivpu_gem.c
+++ b/drivers/accel/ivpu/ivpu_gem.c
@@ -15,6 +15,7 @@
#include <drm/drm_utils.h>
#include "ivpu_drv.h"
+#include "ivpu_fw.h"
#include "ivpu_gem.h"
#include "ivpu_hw.h"
#include "ivpu_mmu.h"
@@ -27,8 +28,8 @@ static const struct drm_gem_object_funcs ivpu_gem_funcs;
static inline void ivpu_dbg_bo(struct ivpu_device *vdev, struct ivpu_bo *bo, const char *action)
{
ivpu_dbg(vdev, BO,
- "%6s: bo %8p vpu_addr %9llx size %8zu ctx %d has_pages %d dma_mapped %d mmu_mapped %d wc %d imported %d\n",
- action, bo, bo->vpu_addr, ivpu_bo_size(bo), bo->ctx_id,
+ "%6s: bo %8p size %9zu ctx %d vpu_addr %9llx pages %d sgt %d mmu_mapped %d wc %d imported %d\n",
+ action, bo, ivpu_bo_size(bo), bo->ctx_id, bo->vpu_addr,
(bool)bo->base.pages, (bool)bo->base.sgt, bo->mmu_mapped, bo->base.map_wc,
(bool)drm_gem_is_imported(&bo->base.base));
}
@@ -43,22 +44,47 @@ static inline void ivpu_bo_unlock(struct ivpu_bo *bo)
dma_resv_unlock(bo->base.base.resv);
}
+static struct sg_table *ivpu_bo_map_attachment(struct ivpu_device *vdev, struct ivpu_bo *bo)
+{
+ struct sg_table *sgt;
+
+ drm_WARN_ON(&vdev->drm, !bo->base.base.import_attach);
+
+ ivpu_bo_lock(bo);
+
+ sgt = bo->base.sgt;
+ if (!sgt) {
+ sgt = dma_buf_map_attachment(bo->base.base.import_attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sgt))
+ ivpu_err(vdev, "Failed to map BO in IOMMU: %ld\n", PTR_ERR(sgt));
+ else
+ bo->base.sgt = sgt;
+ }
+
+ ivpu_bo_unlock(bo);
+
+ return sgt;
+}
+
/*
- * ivpu_bo_pin() - pin the backing physical pages and map them to VPU.
+ * ivpu_bo_bind() - pin the backing physical pages and map them to VPU.
*
* This function pins physical memory pages, then maps the physical pages
* to IOMMU address space and finally updates the VPU MMU page tables
* to allow the VPU to translate VPU address to IOMMU address.
*/
-int __must_check ivpu_bo_pin(struct ivpu_bo *bo)
+int __must_check ivpu_bo_bind(struct ivpu_bo *bo)
{
struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
struct sg_table *sgt;
int ret = 0;
- ivpu_dbg_bo(vdev, bo, "pin");
+ ivpu_dbg_bo(vdev, bo, "bind");
- sgt = drm_gem_shmem_get_pages_sgt(&bo->base);
+ if (bo->base.base.import_attach)
+ sgt = ivpu_bo_map_attachment(vdev, bo);
+ else
+ sgt = drm_gem_shmem_get_pages_sgt(&bo->base);
if (IS_ERR(sgt)) {
ret = PTR_ERR(sgt);
ivpu_err(vdev, "Failed to map BO in IOMMU: %d\n", ret);
@@ -70,7 +96,7 @@ int __must_check ivpu_bo_pin(struct ivpu_bo *bo)
if (!bo->mmu_mapped) {
drm_WARN_ON(&vdev->drm, !bo->ctx);
ret = ivpu_mmu_context_map_sgt(vdev, bo->ctx, bo->vpu_addr, sgt,
- ivpu_bo_is_snooped(bo));
+ ivpu_bo_is_snooped(bo), ivpu_bo_is_read_only(bo));
if (ret) {
ivpu_err(vdev, "Failed to map BO in MMU: %d\n", ret);
goto unlock;
@@ -99,9 +125,9 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx,
ret = ivpu_mmu_context_insert_node(ctx, range, ivpu_bo_size(bo), &bo->mm_node);
if (!ret) {
bo->ctx = ctx;
+ bo->ctx_id = ctx->id;
bo->vpu_addr = bo->mm_node.start;
- } else {
- ivpu_err(vdev, "Failed to add BO to context %u: %d\n", ctx->id, ret);
+ ivpu_dbg_bo(vdev, bo, "vaddr");
}
ivpu_bo_unlock(bo);
@@ -115,7 +141,7 @@ static void ivpu_bo_unbind_locked(struct ivpu_bo *bo)
{
struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
- lockdep_assert(dma_resv_held(bo->base.base.resv) || !kref_read(&bo->base.base.refcount));
+ dma_resv_assert_held(bo->base.base.resv);
if (bo->mmu_mapped) {
drm_WARN_ON(&vdev->drm, !bo->ctx);
@@ -130,13 +156,15 @@ static void ivpu_bo_unbind_locked(struct ivpu_bo *bo)
bo->ctx = NULL;
}
- if (drm_gem_is_imported(&bo->base.base))
- return;
-
if (bo->base.sgt) {
- dma_unmap_sgtable(vdev->drm.dev, bo->base.sgt, DMA_BIDIRECTIONAL, 0);
- sg_free_table(bo->base.sgt);
- kfree(bo->base.sgt);
+ if (bo->base.base.import_attach) {
+ dma_buf_unmap_attachment(bo->base.base.import_attach,
+ bo->base.sgt, DMA_BIDIRECTIONAL);
+ } else {
+ dma_unmap_sgtable(vdev->drm.dev, bo->base.sgt, DMA_BIDIRECTIONAL, 0);
+ sg_free_table(bo->base.sgt);
+ kfree(bo->base.sgt);
+ }
bo->base.sgt = NULL;
}
}
@@ -182,10 +210,11 @@ struct drm_gem_object *ivpu_gem_create_object(struct drm_device *dev, size_t siz
struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf)
{
+ struct ivpu_device *vdev = to_ivpu_device(dev);
struct device *attach_dev = dev->dev;
struct dma_buf_attachment *attach;
- struct sg_table *sgt;
struct drm_gem_object *obj;
+ struct ivpu_bo *bo;
int ret;
attach = dma_buf_attach(dma_buf, attach_dev);
@@ -194,25 +223,25 @@ struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev,
get_dma_buf(dma_buf);
- sgt = dma_buf_map_attachment_unlocked(attach, DMA_BIDIRECTIONAL);
- if (IS_ERR(sgt)) {
- ret = PTR_ERR(sgt);
- goto fail_detach;
- }
-
- obj = drm_gem_shmem_prime_import_sg_table(dev, attach, sgt);
+ obj = drm_gem_shmem_prime_import_sg_table(dev, attach, NULL);
if (IS_ERR(obj)) {
ret = PTR_ERR(obj);
- goto fail_unmap;
+ goto fail_detach;
}
obj->import_attach = attach;
obj->resv = dma_buf->resv;
+ bo = to_ivpu_bo(obj);
+
+ mutex_lock(&vdev->bo_list_lock);
+ list_add_tail(&bo->bo_list_node, &vdev->bo_list);
+ mutex_unlock(&vdev->bo_list_lock);
+
+ ivpu_dbg(vdev, BO, "import: bo %8p size %9zu\n", bo, ivpu_bo_size(bo));
+
return obj;
-fail_unmap:
- dma_buf_unmap_attachment_unlocked(attach, sgt, DMA_BIDIRECTIONAL);
fail_detach:
dma_buf_detach(dma_buf, attach);
dma_buf_put(dma_buf);
@@ -220,7 +249,7 @@ fail_detach:
return ERR_PTR(ret);
}
-static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 flags, u32 ctx_id)
+static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 flags)
{
struct drm_gem_shmem_object *shmem;
struct ivpu_bo *bo;
@@ -238,7 +267,6 @@ static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 fla
return ERR_CAST(shmem);
bo = to_ivpu_bo(&shmem->base);
- bo->ctx_id = ctx_id;
bo->base.map_wc = flags & DRM_IVPU_BO_WC;
bo->flags = flags;
@@ -246,7 +274,7 @@ static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 fla
list_add_tail(&bo->bo_list_node, &vdev->bo_list);
mutex_unlock(&vdev->bo_list_lock);
- ivpu_dbg_bo(vdev, bo, "alloc");
+ ivpu_dbg(vdev, BO, " alloc: bo %8p size %9llu\n", bo, size);
return bo;
}
@@ -259,8 +287,8 @@ static int ivpu_gem_bo_open(struct drm_gem_object *obj, struct drm_file *file)
struct ivpu_addr_range *range;
if (bo->ctx) {
- ivpu_warn(vdev, "Can't add BO to ctx %u: already in ctx %u\n",
- file_priv->ctx.id, bo->ctx->id);
+ ivpu_dbg(vdev, IOCTL, "Can't add BO %pe to ctx %u: already in ctx %u\n",
+ bo, file_priv->ctx.id, bo->ctx->id);
return -EALREADY;
}
@@ -281,23 +309,41 @@ static void ivpu_gem_bo_free(struct drm_gem_object *obj)
ivpu_dbg_bo(vdev, bo, "free");
+ drm_WARN_ON(&vdev->drm, list_empty(&bo->bo_list_node));
+
mutex_lock(&vdev->bo_list_lock);
list_del(&bo->bo_list_node);
- mutex_unlock(&vdev->bo_list_lock);
drm_WARN_ON(&vdev->drm, !drm_gem_is_imported(&bo->base.base) &&
!dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ));
drm_WARN_ON(&vdev->drm, ivpu_bo_size(bo) == 0);
drm_WARN_ON(&vdev->drm, bo->base.vaddr);
+ ivpu_bo_lock(bo);
ivpu_bo_unbind_locked(bo);
+ ivpu_bo_unlock(bo);
+
+ mutex_unlock(&vdev->bo_list_lock);
+
drm_WARN_ON(&vdev->drm, bo->mmu_mapped);
drm_WARN_ON(&vdev->drm, bo->ctx);
drm_WARN_ON(obj->dev, refcount_read(&bo->base.pages_use_count) > 1);
+ drm_WARN_ON(obj->dev, bo->base.base.vma_node.vm_files.rb_node);
drm_gem_shmem_free(&bo->base);
}
+static enum drm_gem_object_status ivpu_gem_status(struct drm_gem_object *obj)
+{
+ struct ivpu_bo *bo = to_ivpu_bo(obj);
+ enum drm_gem_object_status status = 0;
+
+ if (ivpu_bo_is_resident(bo))
+ status |= DRM_GEM_OBJECT_RESIDENT;
+
+ return status;
+}
+
static const struct drm_gem_object_funcs ivpu_gem_funcs = {
.free = ivpu_gem_bo_free,
.open = ivpu_gem_bo_open,
@@ -308,6 +354,7 @@ static const struct drm_gem_object_funcs ivpu_gem_funcs = {
.vmap = drm_gem_shmem_object_vmap,
.vunmap = drm_gem_shmem_object_vunmap,
.mmap = drm_gem_shmem_object_mmap,
+ .status = ivpu_gem_status,
.vm_ops = &drm_gem_shmem_vm_ops,
};
@@ -320,25 +367,33 @@ int ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
struct ivpu_bo *bo;
int ret;
- if (args->flags & ~DRM_IVPU_BO_FLAGS)
+ if (args->flags & ~DRM_IVPU_BO_FLAGS) {
+ ivpu_dbg(vdev, IOCTL, "Invalid BO flags 0x%x\n", args->flags);
return -EINVAL;
+ }
- if (size == 0)
+ if (size == 0) {
+ ivpu_dbg(vdev, IOCTL, "Invalid BO size %llu\n", args->size);
return -EINVAL;
+ }
- bo = ivpu_bo_alloc(vdev, size, args->flags, file_priv->ctx.id);
+ bo = ivpu_bo_alloc(vdev, size, args->flags);
if (IS_ERR(bo)) {
- ivpu_err(vdev, "Failed to allocate BO: %pe (ctx %u size %llu flags 0x%x)",
+ ivpu_dbg(vdev, IOCTL, "Failed to allocate BO: %pe ctx %u size %llu flags 0x%x\n",
bo, file_priv->ctx.id, args->size, args->flags);
return PTR_ERR(bo);
}
+ drm_WARN_ON(&vdev->drm, bo->base.base.handle_count != 0);
+
ret = drm_gem_handle_create(file, &bo->base.base, &args->handle);
- if (ret)
- ivpu_err(vdev, "Failed to create handle for BO: %pe (ctx %u size %llu flags 0x%x)",
+ if (ret) {
+ ivpu_dbg(vdev, IOCTL, "Failed to create handle for BO: %pe ctx %u size %llu flags 0x%x\n",
bo, file_priv->ctx.id, args->size, args->flags);
- else
+ } else {
args->vpu_addr = bo->vpu_addr;
+ drm_WARN_ON(&vdev->drm, bo->base.base.handle_count != 1);
+ }
drm_gem_object_put(&bo->base.base);
@@ -360,18 +415,21 @@ ivpu_bo_create(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(range->end));
drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(size));
- bo = ivpu_bo_alloc(vdev, size, flags, IVPU_GLOBAL_CONTEXT_MMU_SSID);
+ bo = ivpu_bo_alloc(vdev, size, flags);
if (IS_ERR(bo)) {
- ivpu_err(vdev, "Failed to allocate BO: %pe (vpu_addr 0x%llx size %llu flags 0x%x)",
+ ivpu_err(vdev, "Failed to allocate BO: %pe vpu_addr 0x%llx size %llu flags 0x%x\n",
bo, range->start, size, flags);
return NULL;
}
ret = ivpu_bo_alloc_vpu_addr(bo, ctx, range);
- if (ret)
+ if (ret) {
+ ivpu_err(vdev, "Failed to allocate NPU address for BO: %pe ctx %u size %llu: %d\n",
+ bo, ctx->id, size, ret);
goto err_put;
+ }
- ret = ivpu_bo_pin(bo);
+ ret = ivpu_bo_bind(bo);
if (ret)
goto err_put;
@@ -391,6 +449,21 @@ err_put:
return NULL;
}
+struct ivpu_bo *ivpu_bo_create_runtime(struct ivpu_device *vdev, u64 addr, u64 size, u32 flags)
+{
+ struct ivpu_addr_range range;
+
+ if (!ivpu_is_within_range(addr, size, &vdev->hw->ranges.runtime)) {
+ ivpu_err(vdev, "Invalid runtime BO address 0x%llx size %llu\n", addr, size);
+ return NULL;
+ }
+
+ if (ivpu_hw_range_init(vdev, &range, addr, size))
+ return NULL;
+
+ return ivpu_bo_create(vdev, &vdev->gctx, &range, size, flags);
+}
+
struct ivpu_bo *ivpu_bo_create_global(struct ivpu_device *vdev, u64 size, u32 flags)
{
return ivpu_bo_create(vdev, &vdev->gctx, &vdev->hw->ranges.global, size, flags);
diff --git a/drivers/accel/ivpu/ivpu_gem.h b/drivers/accel/ivpu/ivpu_gem.h
index aa8ff14f7aae..0c3350f22b55 100644
--- a/drivers/accel/ivpu/ivpu_gem.h
+++ b/drivers/accel/ivpu/ivpu_gem.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2020-2023 Intel Corporation
+ * Copyright (C) 2020-2025 Intel Corporation
*/
#ifndef __IVPU_GEM_H__
#define __IVPU_GEM_H__
@@ -24,19 +24,22 @@ struct ivpu_bo {
bool mmu_mapped;
};
-int ivpu_bo_pin(struct ivpu_bo *bo);
+int ivpu_bo_bind(struct ivpu_bo *bo);
void ivpu_bo_unbind_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx);
struct drm_gem_object *ivpu_gem_create_object(struct drm_device *dev, size_t size);
struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf);
struct ivpu_bo *ivpu_bo_create(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
struct ivpu_addr_range *range, u64 size, u32 flags);
+struct ivpu_bo *ivpu_bo_create_runtime(struct ivpu_device *vdev, u64 addr, u64 size, u32 flags);
struct ivpu_bo *ivpu_bo_create_global(struct ivpu_device *vdev, u64 size, u32 flags);
void ivpu_bo_free(struct ivpu_bo *bo);
int ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
int ivpu_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
int ivpu_bo_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
+int ivpu_bo_create_from_userptr_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
void ivpu_bo_list(struct drm_device *dev, struct drm_printer *p);
void ivpu_bo_list_print(struct drm_device *dev);
@@ -74,6 +77,16 @@ static inline bool ivpu_bo_is_snooped(struct ivpu_bo *bo)
return ivpu_bo_cache_mode(bo) == DRM_IVPU_BO_CACHED;
}
+static inline bool ivpu_bo_is_read_only(struct ivpu_bo *bo)
+{
+ return bo->flags & DRM_IVPU_BO_READ_ONLY;
+}
+
+static inline bool ivpu_bo_is_resident(struct ivpu_bo *bo)
+{
+ return !!bo->base.pages;
+}
+
static inline void *ivpu_to_cpu_addr(struct ivpu_bo *bo, u32 vpu_addr)
{
if (vpu_addr < bo->vpu_addr)
@@ -96,4 +109,9 @@ static inline u32 cpu_to_vpu_addr(struct ivpu_bo *bo, void *cpu_addr)
return bo->vpu_addr + (cpu_addr - ivpu_bo_vaddr(bo));
}
+static inline bool ivpu_bo_is_mappable(struct ivpu_bo *bo)
+{
+ return bo->flags & DRM_IVPU_BO_MAPPABLE;
+}
+
#endif /* __IVPU_GEM_H__ */
diff --git a/drivers/accel/ivpu/ivpu_gem_userptr.c b/drivers/accel/ivpu/ivpu_gem_userptr.c
new file mode 100644
index 000000000000..25ba606164c0
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_gem_userptr.c
@@ -0,0 +1,213 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-2025 Intel Corporation
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/err.h>
+#include <linux/highmem.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/capability.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_file.h>
+#include <drm/drm_gem.h>
+
+#include "ivpu_drv.h"
+#include "ivpu_gem.h"
+
+static struct sg_table *
+ivpu_gem_userptr_dmabuf_map(struct dma_buf_attachment *attachment,
+ enum dma_data_direction direction)
+{
+ struct sg_table *sgt = attachment->dmabuf->priv;
+ int ret;
+
+ ret = dma_map_sgtable(attachment->dev, sgt, direction, DMA_ATTR_SKIP_CPU_SYNC);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return sgt;
+}
+
+static void ivpu_gem_userptr_dmabuf_unmap(struct dma_buf_attachment *attachment,
+ struct sg_table *sgt,
+ enum dma_data_direction direction)
+{
+ dma_unmap_sgtable(attachment->dev, sgt, direction, DMA_ATTR_SKIP_CPU_SYNC);
+}
+
+static void ivpu_gem_userptr_dmabuf_release(struct dma_buf *dma_buf)
+{
+ struct sg_table *sgt = dma_buf->priv;
+ struct sg_page_iter page_iter;
+ struct page *page;
+
+ for_each_sgtable_page(sgt, &page_iter, 0) {
+ page = sg_page_iter_page(&page_iter);
+ unpin_user_page(page);
+ }
+
+ sg_free_table(sgt);
+ kfree(sgt);
+}
+
+static const struct dma_buf_ops ivpu_gem_userptr_dmabuf_ops = {
+ .map_dma_buf = ivpu_gem_userptr_dmabuf_map,
+ .unmap_dma_buf = ivpu_gem_userptr_dmabuf_unmap,
+ .release = ivpu_gem_userptr_dmabuf_release,
+};
+
+static struct dma_buf *
+ivpu_create_userptr_dmabuf(struct ivpu_device *vdev, void __user *user_ptr,
+ size_t size, uint32_t flags)
+{
+ struct dma_buf_export_info exp_info = {};
+ struct dma_buf *dma_buf;
+ struct sg_table *sgt;
+ struct page **pages;
+ unsigned long nr_pages = size >> PAGE_SHIFT;
+ unsigned int gup_flags = FOLL_LONGTERM;
+ int ret, i, pinned;
+
+ /* Add FOLL_WRITE only if the BO is not read-only */
+ if (!(flags & DRM_IVPU_BO_READ_ONLY))
+ gup_flags |= FOLL_WRITE;
+
+ pages = kvmalloc_array(nr_pages, sizeof(*pages), GFP_KERNEL);
+ if (!pages)
+ return ERR_PTR(-ENOMEM);
+
+ pinned = pin_user_pages_fast((unsigned long)user_ptr, nr_pages, gup_flags, pages);
+ if (pinned < 0) {
+ ret = pinned;
+ ivpu_dbg(vdev, IOCTL, "Failed to pin user pages: %d\n", ret);
+ goto free_pages_array;
+ }
+
+ if (pinned != nr_pages) {
+ ivpu_dbg(vdev, IOCTL, "Pinned %d pages, expected %lu\n", pinned, nr_pages);
+ ret = -EFAULT;
+ goto unpin_pages;
+ }
+
+ sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt) {
+ ret = -ENOMEM;
+ goto unpin_pages;
+ }
+
+ ret = sg_alloc_table_from_pages(sgt, pages, nr_pages, 0, size, GFP_KERNEL);
+ if (ret) {
+ ivpu_dbg(vdev, IOCTL, "Failed to create sg table: %d\n", ret);
+ goto free_sgt;
+ }
+
+ exp_info.exp_name = "ivpu_userptr_dmabuf";
+ exp_info.owner = THIS_MODULE;
+ exp_info.ops = &ivpu_gem_userptr_dmabuf_ops;
+ exp_info.size = size;
+ exp_info.flags = O_RDWR | O_CLOEXEC;
+ exp_info.priv = sgt;
+
+ dma_buf = dma_buf_export(&exp_info);
+ if (IS_ERR(dma_buf)) {
+ ret = PTR_ERR(dma_buf);
+ ivpu_dbg(vdev, IOCTL, "Failed to export userptr dma-buf: %d\n", ret);
+ goto free_sg_table;
+ }
+
+ kvfree(pages);
+ return dma_buf;
+
+free_sg_table:
+ sg_free_table(sgt);
+free_sgt:
+ kfree(sgt);
+unpin_pages:
+ for (i = 0; i < pinned; i++)
+ unpin_user_page(pages[i]);
+free_pages_array:
+ kvfree(pages);
+ return ERR_PTR(ret);
+}
+
+static struct ivpu_bo *
+ivpu_bo_create_from_userptr(struct ivpu_device *vdev, void __user *user_ptr,
+ size_t size, uint32_t flags)
+{
+ struct dma_buf *dma_buf;
+ struct drm_gem_object *obj;
+ struct ivpu_bo *bo;
+
+ dma_buf = ivpu_create_userptr_dmabuf(vdev, user_ptr, size, flags);
+ if (IS_ERR(dma_buf))
+ return ERR_CAST(dma_buf);
+
+ obj = ivpu_gem_prime_import(&vdev->drm, dma_buf);
+ if (IS_ERR(obj)) {
+ dma_buf_put(dma_buf);
+ return ERR_CAST(obj);
+ }
+
+ dma_buf_put(dma_buf);
+
+ bo = to_ivpu_bo(obj);
+ bo->flags = flags;
+
+ return bo;
+}
+
+int ivpu_bo_create_from_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct drm_ivpu_bo_create_from_userptr *args = data;
+ struct ivpu_file_priv *file_priv = file->driver_priv;
+ struct ivpu_device *vdev = to_ivpu_device(dev);
+ void __user *user_ptr = u64_to_user_ptr(args->user_ptr);
+ struct ivpu_bo *bo;
+ int ret;
+
+ if (args->flags & ~(DRM_IVPU_BO_HIGH_MEM | DRM_IVPU_BO_DMA_MEM | DRM_IVPU_BO_READ_ONLY)) {
+ ivpu_dbg(vdev, IOCTL, "Invalid BO flags: 0x%x\n", args->flags);
+ return -EINVAL;
+ }
+
+ if (!args->user_ptr || !args->size) {
+ ivpu_dbg(vdev, IOCTL, "Userptr or size are zero: ptr %llx size %llu\n",
+ args->user_ptr, args->size);
+ return -EINVAL;
+ }
+
+ if (!PAGE_ALIGNED(args->user_ptr) || !PAGE_ALIGNED(args->size)) {
+ ivpu_dbg(vdev, IOCTL, "Userptr or size not page aligned: ptr %llx size %llu\n",
+ args->user_ptr, args->size);
+ return -EINVAL;
+ }
+
+ if (!access_ok(user_ptr, args->size)) {
+ ivpu_dbg(vdev, IOCTL, "Userptr is not accessible: ptr %llx size %llu\n",
+ args->user_ptr, args->size);
+ return -EFAULT;
+ }
+
+ bo = ivpu_bo_create_from_userptr(vdev, user_ptr, args->size, args->flags);
+ if (IS_ERR(bo))
+ return PTR_ERR(bo);
+
+ ret = drm_gem_handle_create(file, &bo->base.base, &args->handle);
+ if (ret) {
+ ivpu_dbg(vdev, IOCTL, "Failed to create handle for BO: %pe ctx %u size %llu flags 0x%x\n",
+ bo, file_priv->ctx.id, args->size, args->flags);
+ } else {
+ ivpu_dbg(vdev, BO, "Created userptr BO: handle=%u vpu_addr=0x%llx size=%llu flags=0x%x\n",
+ args->handle, bo->vpu_addr, args->size, bo->flags);
+ args->vpu_addr = bo->vpu_addr;
+ }
+
+ drm_gem_object_put(&bo->base.base);
+
+ return ret;
+}
diff --git a/drivers/accel/ivpu/ivpu_hw.c b/drivers/accel/ivpu/ivpu_hw.c
index 633160470c93..d69cd0d93569 100644
--- a/drivers/accel/ivpu/ivpu_hw.c
+++ b/drivers/accel/ivpu/ivpu_hw.c
@@ -8,6 +8,8 @@
#include "ivpu_hw_btrs.h"
#include "ivpu_hw_ip.h"
+#include <asm/msr-index.h>
+#include <asm/msr.h>
#include <linux/dmi.h>
#include <linux/fault-inject.h>
#include <linux/pm_runtime.h>
@@ -20,6 +22,10 @@ module_param_named_unsafe(fail_hw, ivpu_fail_hw, charp, 0444);
MODULE_PARM_DESC(fail_hw, "<interval>,<probability>,<space>,<times>");
#endif
+#define FW_SHARED_MEM_ALIGNMENT SZ_512K /* VPU MTRR limitation */
+
+#define ECC_MCA_SIGNAL_ENABLE_MASK 0xff
+
static char *platform_to_str(u32 platform)
{
switch (platform) {
@@ -94,12 +100,14 @@ static void timeouts_init(struct ivpu_device *vdev)
vdev->timeout.boot = -1;
vdev->timeout.jsm = -1;
vdev->timeout.tdr = -1;
+ vdev->timeout.inference = -1;
vdev->timeout.autosuspend = -1;
vdev->timeout.d0i3_entry_msg = -1;
} else if (ivpu_is_fpga(vdev)) {
vdev->timeout.boot = 50;
vdev->timeout.jsm = 15000;
vdev->timeout.tdr = 30000;
+ vdev->timeout.inference = 900000;
vdev->timeout.autosuspend = -1;
vdev->timeout.d0i3_entry_msg = 500;
vdev->timeout.state_dump_msg = 10000;
@@ -107,6 +115,7 @@ static void timeouts_init(struct ivpu_device *vdev)
vdev->timeout.boot = 50;
vdev->timeout.jsm = 500;
vdev->timeout.tdr = 10000;
+ vdev->timeout.inference = 300000;
vdev->timeout.autosuspend = 100;
vdev->timeout.d0i3_entry_msg = 100;
vdev->timeout.state_dump_msg = 10;
@@ -114,6 +123,7 @@ static void timeouts_init(struct ivpu_device *vdev)
vdev->timeout.boot = 1000;
vdev->timeout.jsm = 500;
vdev->timeout.tdr = 2000;
+ vdev->timeout.inference = 60000;
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
vdev->timeout.autosuspend = 10;
else
@@ -143,19 +153,39 @@ static void priority_bands_init(struct ivpu_device *vdev)
vdev->hw->hws.process_quantum[VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME] = 200000;
}
+int ivpu_hw_range_init(struct ivpu_device *vdev, struct ivpu_addr_range *range, u64 start, u64 size)
+{
+ u64 end;
+
+ if (!range || check_add_overflow(start, size, &end)) {
+ ivpu_err(vdev, "Invalid range: start 0x%llx size %llu\n", start, size);
+ return -EINVAL;
+ }
+
+ range->start = start;
+ range->end = end;
+
+ return 0;
+}
+
static void memory_ranges_init(struct ivpu_device *vdev)
{
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) {
- ivpu_hw_range_init(&vdev->hw->ranges.global, 0x80000000, SZ_512M);
- ivpu_hw_range_init(&vdev->hw->ranges.user, 0x88000000, 511 * SZ_1M);
- ivpu_hw_range_init(&vdev->hw->ranges.shave, 0x180000000, SZ_2G);
- ivpu_hw_range_init(&vdev->hw->ranges.dma, 0x200000000, SZ_128G);
+ ivpu_hw_range_init(vdev, &vdev->hw->ranges.runtime, 0x84800000, SZ_64M);
+ ivpu_hw_range_init(vdev, &vdev->hw->ranges.global, 0x90000000, SZ_256M);
+ ivpu_hw_range_init(vdev, &vdev->hw->ranges.user, 0xa0000000, 511 * SZ_1M);
+ ivpu_hw_range_init(vdev, &vdev->hw->ranges.shave, 0x180000000, SZ_2G);
+ ivpu_hw_range_init(vdev, &vdev->hw->ranges.dma, 0x200000000, SZ_128G);
} else {
- ivpu_hw_range_init(&vdev->hw->ranges.global, 0x80000000, SZ_512M);
- ivpu_hw_range_init(&vdev->hw->ranges.shave, 0x80000000, SZ_2G);
- ivpu_hw_range_init(&vdev->hw->ranges.user, 0x100000000, SZ_256G);
+ ivpu_hw_range_init(vdev, &vdev->hw->ranges.runtime, 0x80000000, SZ_64M);
+ ivpu_hw_range_init(vdev, &vdev->hw->ranges.global, 0x90000000, SZ_256M);
+ ivpu_hw_range_init(vdev, &vdev->hw->ranges.shave, 0x80000000, SZ_2G);
+ ivpu_hw_range_init(vdev, &vdev->hw->ranges.user, 0x100000000, SZ_256G);
vdev->hw->ranges.dma = vdev->hw->ranges.user;
}
+
+ drm_WARN_ON(&vdev->drm, !IS_ALIGNED(vdev->hw->ranges.global.start,
+ FW_SHARED_MEM_ALIGNMENT));
}
static int wp_enable(struct ivpu_device *vdev)
@@ -369,3 +399,22 @@ irqreturn_t ivpu_hw_irq_handler(int irq, void *ptr)
pm_runtime_mark_last_busy(vdev->drm.dev);
return IRQ_HANDLED;
}
+
+bool ivpu_hw_uses_ecc_mca_signal(struct ivpu_device *vdev)
+{
+ unsigned long long msr_integrity_caps;
+ int ret;
+
+ if (ivpu_hw_ip_gen(vdev) < IVPU_HW_IP_50XX)
+ return false;
+
+ ret = rdmsrq_safe(MSR_INTEGRITY_CAPS, &msr_integrity_caps);
+ if (ret) {
+ ivpu_warn(vdev, "Error reading MSR_INTEGRITY_CAPS: %d", ret);
+ return false;
+ }
+
+ ivpu_dbg(vdev, MISC, "MSR_INTEGRITY_CAPS: 0x%llx\n", msr_integrity_caps);
+
+ return msr_integrity_caps & ECC_MCA_SIGNAL_ENABLE_MASK;
+}
diff --git a/drivers/accel/ivpu/ivpu_hw.h b/drivers/accel/ivpu/ivpu_hw.h
index d79668fe1609..b6d0f0d0dccc 100644
--- a/drivers/accel/ivpu/ivpu_hw.h
+++ b/drivers/accel/ivpu/ivpu_hw.h
@@ -21,6 +21,7 @@ struct ivpu_hw_info {
bool (*ip_irq_handler)(struct ivpu_device *vdev, int irq);
} irq;
struct {
+ struct ivpu_addr_range runtime;
struct ivpu_addr_range global;
struct ivpu_addr_range user;
struct ivpu_addr_range shave;
@@ -51,6 +52,8 @@ struct ivpu_hw_info {
};
int ivpu_hw_init(struct ivpu_device *vdev);
+int ivpu_hw_range_init(struct ivpu_device *vdev, struct ivpu_addr_range *range, u64 start,
+ u64 size);
int ivpu_hw_power_up(struct ivpu_device *vdev);
int ivpu_hw_power_down(struct ivpu_device *vdev);
int ivpu_hw_reset(struct ivpu_device *vdev);
@@ -60,6 +63,7 @@ void ivpu_irq_handlers_init(struct ivpu_device *vdev);
void ivpu_hw_irq_enable(struct ivpu_device *vdev);
void ivpu_hw_irq_disable(struct ivpu_device *vdev);
irqreturn_t ivpu_hw_irq_handler(int irq, void *ptr);
+bool ivpu_hw_uses_ecc_mca_signal(struct ivpu_device *vdev);
static inline u32 ivpu_hw_btrs_irq_handler(struct ivpu_device *vdev, int irq)
{
@@ -71,12 +75,6 @@ static inline u32 ivpu_hw_ip_irq_handler(struct ivpu_device *vdev, int irq)
return vdev->hw->irq.ip_irq_handler(vdev, irq);
}
-static inline void ivpu_hw_range_init(struct ivpu_addr_range *range, u64 start, u64 size)
-{
- range->start = start;
- range->end = start + size;
-}
-
static inline u64 ivpu_hw_range_size(const struct ivpu_addr_range *range)
{
return range->end - range->start;
diff --git a/drivers/accel/ivpu/ivpu_hw_btrs.c b/drivers/accel/ivpu/ivpu_hw_btrs.c
index b236c7234daa..06e65c592618 100644
--- a/drivers/accel/ivpu/ivpu_hw_btrs.c
+++ b/drivers/accel/ivpu/ivpu_hw_btrs.c
@@ -33,7 +33,6 @@
#define PLL_CDYN_DEFAULT 0x80
#define PLL_EPP_DEFAULT 0x80
-#define PLL_CONFIG_DEFAULT 0x0
#define PLL_REF_CLK_FREQ 50000000ull
#define PLL_RATIO_TO_FREQ(x) ((x) * PLL_REF_CLK_FREQ)
@@ -303,7 +302,7 @@ static void prepare_wp_request(struct ivpu_device *vdev, struct wp_request *wp,
wp->epp = 0;
} else {
wp->target = hw->pll.pn_ratio;
- wp->cfg = enable ? PLL_CONFIG_DEFAULT : 0;
+ wp->cfg = 0;
wp->cdyn = enable ? PLL_CDYN_DEFAULT : 0;
wp->epp = enable ? PLL_EPP_DEFAULT : 0;
}
@@ -322,6 +321,14 @@ static int wait_for_pll_lock(struct ivpu_device *vdev, bool enable)
return REGB_POLL_FLD(VPU_HW_BTRS_MTL_PLL_STATUS, LOCK, exp_val, PLL_TIMEOUT_US);
}
+static int wait_for_cdyn_deassert(struct ivpu_device *vdev)
+{
+ if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
+ return 0;
+
+ return REGB_POLL_FLD(VPU_HW_BTRS_LNL_CDYN, CDYN, 0, PLL_TIMEOUT_US);
+}
+
int ivpu_hw_btrs_wp_drive(struct ivpu_device *vdev, bool enable)
{
struct wp_request wp;
@@ -355,6 +362,14 @@ int ivpu_hw_btrs_wp_drive(struct ivpu_device *vdev, bool enable)
return ret;
}
+ if (!enable) {
+ ret = wait_for_cdyn_deassert(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Timed out waiting for CDYN deassert\n");
+ return ret;
+ }
+ }
+
return 0;
}
@@ -674,7 +689,7 @@ bool ivpu_hw_btrs_irq_handler_lnl(struct ivpu_device *vdev, int irq)
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, SURV_ERR, status)) {
ivpu_dbg(vdev, IRQ, "Survivability IRQ\n");
- queue_work(system_wq, &vdev->irq_dct_work);
+ queue_work(system_percpu_wq, &vdev->irq_dct_work);
}
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, FREQ_CHANGE, status)) {
@@ -753,7 +768,7 @@ int ivpu_hw_btrs_dct_get_request(struct ivpu_device *vdev, bool *enable)
}
}
-void ivpu_hw_btrs_dct_set_status(struct ivpu_device *vdev, bool enable, u32 active_percent)
+void ivpu_hw_btrs_dct_set_status(struct ivpu_device *vdev, bool enable, u8 active_percent)
{
u32 val = 0;
u32 cmd = enable ? DCT_ENABLE : DCT_DISABLE;
diff --git a/drivers/accel/ivpu/ivpu_hw_btrs.h b/drivers/accel/ivpu/ivpu_hw_btrs.h
index d2d82651976d..c4c10e22f30f 100644
--- a/drivers/accel/ivpu/ivpu_hw_btrs.h
+++ b/drivers/accel/ivpu/ivpu_hw_btrs.h
@@ -36,7 +36,7 @@ u32 ivpu_hw_btrs_dpu_freq_get(struct ivpu_device *vdev);
bool ivpu_hw_btrs_irq_handler_mtl(struct ivpu_device *vdev, int irq);
bool ivpu_hw_btrs_irq_handler_lnl(struct ivpu_device *vdev, int irq);
int ivpu_hw_btrs_dct_get_request(struct ivpu_device *vdev, bool *enable);
-void ivpu_hw_btrs_dct_set_status(struct ivpu_device *vdev, bool enable, u32 dct_percent);
+void ivpu_hw_btrs_dct_set_status(struct ivpu_device *vdev, bool enable, u8 active_percent);
u32 ivpu_hw_btrs_telemetry_offset_get(struct ivpu_device *vdev);
u32 ivpu_hw_btrs_telemetry_size_get(struct ivpu_device *vdev);
u32 ivpu_hw_btrs_telemetry_enable_get(struct ivpu_device *vdev);
diff --git a/drivers/accel/ivpu/ivpu_hw_btrs_lnl_reg.h b/drivers/accel/ivpu/ivpu_hw_btrs_lnl_reg.h
index fff2ef2cada6..a81a9ba540fa 100644
--- a/drivers/accel/ivpu/ivpu_hw_btrs_lnl_reg.h
+++ b/drivers/accel/ivpu/ivpu_hw_btrs_lnl_reg.h
@@ -74,6 +74,9 @@
#define VPU_HW_BTRS_LNL_PLL_FREQ 0x00000148u
#define VPU_HW_BTRS_LNL_PLL_FREQ_RATIO_MASK GENMASK(15, 0)
+#define VPU_HW_BTRS_LNL_CDYN 0x0000014cu
+#define VPU_HW_BTRS_LNL_CDYN_CDYN_MASK GENMASK(15, 0)
+
#define VPU_HW_BTRS_LNL_TILE_FUSE 0x00000150u
#define VPU_HW_BTRS_LNL_TILE_FUSE_VALID_MASK BIT_MASK(0)
#define VPU_HW_BTRS_LNL_TILE_FUSE_CONFIG_MASK GENMASK(6, 1)
diff --git a/drivers/accel/ivpu/ivpu_hw_ip.c b/drivers/accel/ivpu/ivpu_hw_ip.c
index 823f6a57dc54..06aa1e7dc50b 100644
--- a/drivers/accel/ivpu/ivpu_hw_ip.c
+++ b/drivers/accel/ivpu/ivpu_hw_ip.c
@@ -683,6 +683,7 @@ static void pwr_island_delay_set(struct ivpu_device *vdev)
return;
switch (ivpu_device_id(vdev)) {
+ case PCI_DEVICE_ID_WCL:
case PCI_DEVICE_ID_PTL_P:
post = high ? 18 : 0;
post1 = 0;
@@ -690,6 +691,13 @@ static void pwr_island_delay_set(struct ivpu_device *vdev)
status = high ? 46 : 3;
break;
+ case PCI_DEVICE_ID_NVL:
+ post = high ? 198 : 17;
+ post1 = 0;
+ post2 = high ? 198 : 17;
+ status = 0;
+ break;
+
default:
dump_stack();
ivpu_err(vdev, "Unknown device ID\n");
@@ -888,6 +896,9 @@ static int soc_cpu_drive_40xx(struct ivpu_device *vdev, bool enable)
static int soc_cpu_enable(struct ivpu_device *vdev)
{
+ if (ivpu_hw_ip_gen(vdev) >= IVPU_HW_IP_60XX)
+ return 0;
+
return soc_cpu_drive_40xx(vdev, true);
}
diff --git a/drivers/accel/ivpu/ivpu_ipc.c b/drivers/accel/ivpu/ivpu_ipc.c
index 39f83225c181..1f13bf95b2b3 100644
--- a/drivers/accel/ivpu/ivpu_ipc.c
+++ b/drivers/accel/ivpu/ivpu_ipc.c
@@ -141,7 +141,6 @@ ivpu_ipc_rx_msg_add(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
struct ivpu_ipc_rx_msg *rx_msg;
lockdep_assert_held(&ipc->cons_lock);
- lockdep_assert_irqs_disabled();
rx_msg = kzalloc(sizeof(*rx_msg), GFP_ATOMIC);
if (!rx_msg) {
@@ -460,7 +459,7 @@ void ivpu_ipc_irq_handler(struct ivpu_device *vdev)
}
}
- queue_work(system_wq, &vdev->irq_ipc_work);
+ queue_work(system_percpu_wq, &vdev->irq_ipc_work);
}
void ivpu_ipc_irq_work_fn(struct work_struct *work)
diff --git a/drivers/accel/ivpu/ivpu_job.c b/drivers/accel/ivpu/ivpu_job.c
index fae8351aa330..4f8564e2878a 100644
--- a/drivers/accel/ivpu/ivpu_job.c
+++ b/drivers/accel/ivpu/ivpu_job.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2020-2024 Intel Corporation
+ * Copyright (C) 2020-2025 Intel Corporation
*/
#include <drm/drm_file.h>
@@ -34,22 +34,20 @@ static void ivpu_cmdq_ring_db(struct ivpu_device *vdev, struct ivpu_cmdq *cmdq)
static int ivpu_preemption_buffers_create(struct ivpu_device *vdev,
struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
{
- u64 primary_size = ALIGN(vdev->fw->primary_preempt_buf_size, PAGE_SIZE);
- u64 secondary_size = ALIGN(vdev->fw->secondary_preempt_buf_size, PAGE_SIZE);
-
- if (vdev->fw->sched_mode != VPU_SCHEDULING_MODE_HW ||
- ivpu_test_mode & IVPU_TEST_MODE_MIP_DISABLE)
+ if (ivpu_fw_preempt_buf_size(vdev) == 0)
return 0;
cmdq->primary_preempt_buf = ivpu_bo_create(vdev, &file_priv->ctx, &vdev->hw->ranges.user,
- primary_size, DRM_IVPU_BO_WC);
+ vdev->fw->primary_preempt_buf_size,
+ DRM_IVPU_BO_WC);
if (!cmdq->primary_preempt_buf) {
ivpu_err(vdev, "Failed to create primary preemption buffer\n");
return -ENOMEM;
}
cmdq->secondary_preempt_buf = ivpu_bo_create(vdev, &file_priv->ctx, &vdev->hw->ranges.dma,
- secondary_size, DRM_IVPU_BO_WC);
+ vdev->fw->secondary_preempt_buf_size,
+ DRM_IVPU_BO_WC);
if (!cmdq->secondary_preempt_buf) {
ivpu_err(vdev, "Failed to create secondary preemption buffer\n");
goto err_free_primary;
@@ -66,20 +64,39 @@ err_free_primary:
static void ivpu_preemption_buffers_free(struct ivpu_device *vdev,
struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
{
- if (vdev->fw->sched_mode != VPU_SCHEDULING_MODE_HW)
- return;
-
if (cmdq->primary_preempt_buf)
ivpu_bo_free(cmdq->primary_preempt_buf);
if (cmdq->secondary_preempt_buf)
ivpu_bo_free(cmdq->secondary_preempt_buf);
}
+static int ivpu_preemption_job_init(struct ivpu_device *vdev, struct ivpu_file_priv *file_priv,
+ struct ivpu_cmdq *cmdq, struct ivpu_job *job)
+{
+ int ret;
+
+ /* Use preemption buffer provided by the user space */
+ if (job->primary_preempt_buf)
+ return 0;
+
+ if (!cmdq->primary_preempt_buf) {
+ /* Allocate per command queue preemption buffers */
+ ret = ivpu_preemption_buffers_create(vdev, file_priv, cmdq);
+ if (ret)
+ return ret;
+ }
+
+ /* Use preemption buffers allocated by the kernel */
+ job->primary_preempt_buf = cmdq->primary_preempt_buf;
+ job->secondary_preempt_buf = cmdq->secondary_preempt_buf;
+
+ return 0;
+}
+
static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv)
{
struct ivpu_device *vdev = file_priv->vdev;
struct ivpu_cmdq *cmdq;
- int ret;
cmdq = kzalloc(sizeof(*cmdq), GFP_KERNEL);
if (!cmdq)
@@ -89,10 +106,6 @@ static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv)
if (!cmdq->mem)
goto err_free_cmdq;
- ret = ivpu_preemption_buffers_create(vdev, file_priv, cmdq);
- if (ret)
- ivpu_warn(vdev, "Failed to allocate preemption buffers, preemption limited\n");
-
return cmdq;
err_free_cmdq:
@@ -100,6 +113,43 @@ err_free_cmdq:
return NULL;
}
+/**
+ * ivpu_cmdq_get_entry_count - Calculate the number of entries in the command queue.
+ * @cmdq: Pointer to the command queue structure.
+ *
+ * Returns the number of entries that can fit in the command queue memory.
+ */
+static inline u32 ivpu_cmdq_get_entry_count(struct ivpu_cmdq *cmdq)
+{
+ size_t size = ivpu_bo_size(cmdq->mem) - sizeof(struct vpu_job_queue_header);
+
+ return size / sizeof(struct vpu_job_queue_entry);
+}
+
+/**
+ * ivpu_cmdq_get_flags - Get command queue flags based on input flags and test mode.
+ * @vdev: Pointer to the ivpu device structure.
+ * @flags: Input flags to determine the command queue flags.
+ *
+ * Returns the calculated command queue flags, considering both the input flags
+ * and the current test mode settings.
+ */
+static u32 ivpu_cmdq_get_flags(struct ivpu_device *vdev, u32 flags)
+{
+ u32 cmdq_flags = 0;
+
+ if ((flags & DRM_IVPU_CMDQ_FLAG_TURBO) && (ivpu_hw_ip_gen(vdev) >= IVPU_HW_IP_40XX))
+ cmdq_flags |= VPU_JOB_QUEUE_FLAGS_TURBO_MODE;
+
+ /* Test mode can override the TURBO flag coming from the application */
+ if (ivpu_test_mode & IVPU_TEST_MODE_TURBO_ENABLE)
+ cmdq_flags |= VPU_JOB_QUEUE_FLAGS_TURBO_MODE;
+ if (ivpu_test_mode & IVPU_TEST_MODE_TURBO_DISABLE)
+ cmdq_flags &= ~VPU_JOB_QUEUE_FLAGS_TURBO_MODE;
+
+ return cmdq_flags;
+}
+
static void ivpu_cmdq_free(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
{
ivpu_preemption_buffers_free(file_priv->vdev, file_priv, cmdq);
@@ -107,8 +157,7 @@ static void ivpu_cmdq_free(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *c
kfree(cmdq);
}
-static struct ivpu_cmdq *ivpu_cmdq_create(struct ivpu_file_priv *file_priv, u8 priority,
- bool is_legacy)
+static struct ivpu_cmdq *ivpu_cmdq_create(struct ivpu_file_priv *file_priv, u8 priority, u32 flags)
{
struct ivpu_device *vdev = file_priv->vdev;
struct ivpu_cmdq *cmdq = NULL;
@@ -121,10 +170,6 @@ static struct ivpu_cmdq *ivpu_cmdq_create(struct ivpu_file_priv *file_priv, u8 p
ivpu_err(vdev, "Failed to allocate command queue\n");
return NULL;
}
-
- cmdq->priority = priority;
- cmdq->is_legacy = is_legacy;
-
ret = xa_alloc_cyclic(&file_priv->cmdq_xa, &cmdq->id, cmdq, file_priv->cmdq_limit,
&file_priv->cmdq_id_next, GFP_KERNEL);
if (ret < 0) {
@@ -132,7 +177,15 @@ static struct ivpu_cmdq *ivpu_cmdq_create(struct ivpu_file_priv *file_priv, u8 p
goto err_free_cmdq;
}
- ivpu_dbg(vdev, JOB, "Command queue %d created, ctx %d\n", cmdq->id, file_priv->ctx.id);
+ cmdq->entry_count = ivpu_cmdq_get_entry_count(cmdq);
+ cmdq->priority = priority;
+
+ cmdq->jobq = (struct vpu_job_queue *)ivpu_bo_vaddr(cmdq->mem);
+ cmdq->jobq->header.engine_idx = VPU_ENGINE_COMPUTE;
+ cmdq->jobq->header.flags = ivpu_cmdq_get_flags(vdev, flags);
+
+ ivpu_dbg(vdev, JOB, "Command queue %d created, ctx %d, flags 0x%08x\n",
+ cmdq->id, file_priv->ctx.id, cmdq->jobq->header.flags);
return cmdq;
err_free_cmdq:
@@ -179,36 +232,25 @@ static int ivpu_register_db(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *
ret = ivpu_jsm_register_db(vdev, file_priv->ctx.id, cmdq->db_id,
cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem));
- if (!ret)
+ if (!ret) {
ivpu_dbg(vdev, JOB, "DB %d registered to cmdq %d ctx %d priority %d\n",
cmdq->db_id, cmdq->id, file_priv->ctx.id, cmdq->priority);
- else
+ } else {
xa_erase(&vdev->db_xa, cmdq->db_id);
+ cmdq->db_id = 0;
+ }
return ret;
}
-static void ivpu_cmdq_jobq_init(struct ivpu_device *vdev, struct vpu_job_queue *jobq)
+static void ivpu_cmdq_jobq_reset(struct ivpu_device *vdev, struct vpu_job_queue *jobq)
{
- jobq->header.engine_idx = VPU_ENGINE_COMPUTE;
jobq->header.head = 0;
jobq->header.tail = 0;
- if (ivpu_test_mode & IVPU_TEST_MODE_TURBO) {
- ivpu_dbg(vdev, JOB, "Turbo mode enabled");
- jobq->header.flags = VPU_JOB_QUEUE_FLAGS_TURBO_MODE;
- }
-
wmb(); /* Flush WC buffer for jobq->header */
}
-static inline u32 ivpu_cmdq_get_entry_count(struct ivpu_cmdq *cmdq)
-{
- size_t size = ivpu_bo_size(cmdq->mem) - sizeof(struct vpu_job_queue_header);
-
- return size / sizeof(struct vpu_job_queue_entry);
-}
-
static int ivpu_cmdq_register(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
{
struct ivpu_device *vdev = file_priv->vdev;
@@ -219,10 +261,7 @@ static int ivpu_cmdq_register(struct ivpu_file_priv *file_priv, struct ivpu_cmdq
if (cmdq->db_id)
return 0;
- cmdq->entry_count = ivpu_cmdq_get_entry_count(cmdq);
- cmdq->jobq = (struct vpu_job_queue *)ivpu_bo_vaddr(cmdq->mem);
-
- ivpu_cmdq_jobq_init(vdev, cmdq->jobq);
+ ivpu_cmdq_jobq_reset(vdev, cmdq->jobq);
if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) {
ret = ivpu_hws_cmdq_init(file_priv, cmdq, VPU_ENGINE_COMPUTE, cmdq->priority);
@@ -291,9 +330,10 @@ static struct ivpu_cmdq *ivpu_cmdq_acquire_legacy(struct ivpu_file_priv *file_pr
break;
if (!cmdq) {
- cmdq = ivpu_cmdq_create(file_priv, priority, true);
+ cmdq = ivpu_cmdq_create(file_priv, priority, 0);
if (!cmdq)
return NULL;
+ cmdq->is_legacy = true;
}
return cmdq;
@@ -308,7 +348,7 @@ static struct ivpu_cmdq *ivpu_cmdq_acquire(struct ivpu_file_priv *file_priv, u32
cmdq = xa_load(&file_priv->cmdq_xa, cmdq_id);
if (!cmdq) {
- ivpu_warn_ratelimited(vdev, "Failed to find command queue with ID: %u\n", cmdq_id);
+ ivpu_dbg(vdev, IOCTL, "Failed to find command queue with ID: %u\n", cmdq_id);
return NULL;
}
@@ -402,17 +442,14 @@ static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job)
if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_SUBMISSION))
entry->flags = VPU_JOB_FLAGS_NULL_SUBMISSION_MASK;
- if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) {
- if (cmdq->primary_preempt_buf) {
- entry->primary_preempt_buf_addr = cmdq->primary_preempt_buf->vpu_addr;
- entry->primary_preempt_buf_size = ivpu_bo_size(cmdq->primary_preempt_buf);
- }
+ if (job->primary_preempt_buf) {
+ entry->primary_preempt_buf_addr = job->primary_preempt_buf->vpu_addr;
+ entry->primary_preempt_buf_size = ivpu_bo_size(job->primary_preempt_buf);
+ }
- if (cmdq->secondary_preempt_buf) {
- entry->secondary_preempt_buf_addr = cmdq->secondary_preempt_buf->vpu_addr;
- entry->secondary_preempt_buf_size =
- ivpu_bo_size(cmdq->secondary_preempt_buf);
- }
+ if (job->secondary_preempt_buf) {
+ entry->secondary_preempt_buf_addr = job->secondary_preempt_buf->vpu_addr;
+ entry->secondary_preempt_buf_size = ivpu_bo_size(job->secondary_preempt_buf);
}
wmb(); /* Ensure that tail is updated after filling entry */
@@ -497,7 +534,7 @@ ivpu_job_create(struct ivpu_file_priv *file_priv, u32 engine_idx, u32 bo_count)
job->bo_count = bo_count;
job->done_fence = ivpu_fence_create(vdev);
if (!job->done_fence) {
- ivpu_warn_ratelimited(vdev, "Failed to create a fence\n");
+ ivpu_err(vdev, "Failed to create a fence\n");
goto err_free_job;
}
@@ -527,21 +564,26 @@ static struct ivpu_job *ivpu_job_remove_from_submitted_jobs(struct ivpu_device *
return job;
}
-static int ivpu_job_signal_and_destroy(struct ivpu_device *vdev, u32 job_id, u32 job_status)
+bool ivpu_job_handle_engine_error(struct ivpu_device *vdev, u32 job_id, u32 job_status)
{
- struct ivpu_job *job;
-
lockdep_assert_held(&vdev->submitted_jobs_lock);
- job = xa_load(&vdev->submitted_jobs_xa, job_id);
- if (!job)
- return -ENOENT;
+ switch (job_status) {
+ case VPU_JSM_STATUS_PROCESSING_ERR:
+ case VPU_JSM_STATUS_ENGINE_RESET_REQUIRED_MIN ... VPU_JSM_STATUS_ENGINE_RESET_REQUIRED_MAX:
+ {
+ struct ivpu_job *job = xa_load(&vdev->submitted_jobs_xa, job_id);
- if (job_status == VPU_JSM_STATUS_MVNCI_CONTEXT_VIOLATION_HW) {
+ if (!job)
+ return false;
+
+ /* Trigger an engine reset */
guard(mutex)(&job->file_priv->lock);
+ job->job_status = job_status;
+
if (job->file_priv->has_mmu_faults)
- return 0;
+ return false;
/*
* Mark context as faulty and defer destruction of the job to jobs abort thread
@@ -549,23 +591,43 @@ static int ivpu_job_signal_and_destroy(struct ivpu_device *vdev, u32 job_id, u32
* status and ensure both are handled in the same way
*/
job->file_priv->has_mmu_faults = true;
- queue_work(system_wq, &vdev->context_abort_work);
- return 0;
+ queue_work(system_percpu_wq, &vdev->context_abort_work);
+ return true;
+ }
+ default:
+ /* Complete job with error status, engine reset not required */
+ break;
}
- job = ivpu_job_remove_from_submitted_jobs(vdev, job_id);
+ return false;
+}
+
+static int ivpu_job_signal_and_destroy(struct ivpu_device *vdev, u32 job_id, u32 job_status)
+{
+ struct ivpu_job *job;
+
+ lockdep_assert_held(&vdev->submitted_jobs_lock);
+
+ job = xa_load(&vdev->submitted_jobs_xa, job_id);
if (!job)
return -ENOENT;
- if (job->file_priv->has_mmu_faults)
- job_status = DRM_IVPU_JOB_STATUS_ABORTED;
+ ivpu_job_remove_from_submitted_jobs(vdev, job_id);
- job->bos[CMD_BUF_IDX]->job_status = job_status;
+ if (job->job_status == VPU_JSM_STATUS_SUCCESS) {
+ if (job->file_priv->has_mmu_faults)
+ job->job_status = DRM_IVPU_JOB_STATUS_ABORTED;
+ else
+ job->job_status = job_status;
+ }
+
+ job->bos[CMD_BUF_IDX]->job_status = job->job_status;
dma_fence_signal(job->done_fence);
trace_job("done", job);
ivpu_dbg(vdev, JOB, "Job complete: id %3u ctx %2d cmdq_id %u engine %d status 0x%x\n",
- job->job_id, job->file_priv->ctx.id, job->cmdq_id, job->engine_idx, job_status);
+ job->job_id, job->file_priv->ctx.id, job->cmdq_id, job->engine_idx,
+ job->job_status);
ivpu_job_destroy(job);
ivpu_stop_job_timeout_detection(vdev);
@@ -625,7 +687,6 @@ static int ivpu_job_submit(struct ivpu_job *job, u8 priority, u32 cmdq_id)
else
cmdq = ivpu_cmdq_acquire(file_priv, cmdq_id);
if (!cmdq) {
- ivpu_warn_ratelimited(vdev, "Failed to get job queue, ctx %d\n", file_priv->ctx.id);
ret = -EINVAL;
goto err_unlock;
}
@@ -636,6 +697,13 @@ static int ivpu_job_submit(struct ivpu_job *job, u8 priority, u32 cmdq_id)
goto err_unlock;
}
+ ret = ivpu_preemption_job_init(vdev, file_priv, cmdq, job);
+ if (ret) {
+ ivpu_err(vdev, "Failed to initialize preemption buffers for job %d: %d\n",
+ job->job_id, ret);
+ goto err_unlock;
+ }
+
job->cmdq_id = cmdq->id;
is_first_job = xa_empty(&vdev->submitted_jobs_xa);
@@ -689,7 +757,7 @@ err_unlock:
static int
ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32 *buf_handles,
- u32 buf_count, u32 commands_offset)
+ u32 buf_count, u32 commands_offset, u32 preempt_buffer_index)
{
struct ivpu_file_priv *file_priv = job->file_priv;
struct ivpu_device *vdev = file_priv->vdev;
@@ -702,40 +770,58 @@ ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32
for (i = 0; i < buf_count; i++) {
struct drm_gem_object *obj = drm_gem_object_lookup(file, buf_handles[i]);
- if (!obj)
+ if (!obj) {
+ ivpu_dbg(vdev, IOCTL, "Failed to lookup GEM object with handle %u\n",
+ buf_handles[i]);
return -ENOENT;
+ }
job->bos[i] = to_ivpu_bo(obj);
- ret = ivpu_bo_pin(job->bos[i]);
+ ret = ivpu_bo_bind(job->bos[i]);
if (ret)
return ret;
}
bo = job->bos[CMD_BUF_IDX];
if (!dma_resv_test_signaled(bo->base.base.resv, DMA_RESV_USAGE_READ)) {
- ivpu_warn(vdev, "Buffer is already in use\n");
+ ivpu_dbg(vdev, IOCTL, "Buffer is already in use by another job\n");
return -EBUSY;
}
if (commands_offset >= ivpu_bo_size(bo)) {
- ivpu_warn(vdev, "Invalid command buffer offset %u\n", commands_offset);
+ ivpu_dbg(vdev, IOCTL, "Invalid commands offset %u for buffer size %zu\n",
+ commands_offset, ivpu_bo_size(bo));
return -EINVAL;
}
job->cmd_buf_vpu_addr = bo->vpu_addr + commands_offset;
+ if (preempt_buffer_index) {
+ struct ivpu_bo *preempt_bo = job->bos[preempt_buffer_index];
+
+ if (ivpu_bo_size(preempt_bo) < ivpu_fw_preempt_buf_size(vdev)) {
+ ivpu_dbg(vdev, IOCTL, "Preemption buffer is too small\n");
+ return -EINVAL;
+ }
+ if (ivpu_bo_is_mappable(preempt_bo)) {
+ ivpu_dbg(vdev, IOCTL, "Preemption buffer cannot be mappable\n");
+ return -EINVAL;
+ }
+ job->primary_preempt_buf = preempt_bo;
+ }
+
ret = drm_gem_lock_reservations((struct drm_gem_object **)job->bos, buf_count,
&acquire_ctx);
if (ret) {
- ivpu_warn(vdev, "Failed to lock reservations: %d\n", ret);
+ ivpu_warn_ratelimited(vdev, "Failed to lock reservations: %d\n", ret);
return ret;
}
for (i = 0; i < buf_count; i++) {
ret = dma_resv_reserve_fences(job->bos[i]->base.base.resv, 1);
if (ret) {
- ivpu_warn(vdev, "Failed to reserve fences: %d\n", ret);
+ ivpu_warn_ratelimited(vdev, "Failed to reserve fences: %d\n", ret);
goto unlock_reservations;
}
}
@@ -755,7 +841,7 @@ unlock_reservations:
static int ivpu_submit(struct drm_file *file, struct ivpu_file_priv *file_priv, u32 cmdq_id,
u32 buffer_count, u32 engine, void __user *buffers_ptr, u32 cmds_offset,
- u8 priority)
+ u32 preempt_buffer_index, u8 priority)
{
struct ivpu_device *vdev = file_priv->vdev;
struct ivpu_job *job;
@@ -782,16 +868,14 @@ static int ivpu_submit(struct drm_file *file, struct ivpu_file_priv *file_priv,
job = ivpu_job_create(file_priv, engine, buffer_count);
if (!job) {
- ivpu_err(vdev, "Failed to create job\n");
ret = -ENOMEM;
goto err_exit_dev;
}
- ret = ivpu_job_prepare_bos_for_submit(file, job, buf_handles, buffer_count, cmds_offset);
- if (ret) {
- ivpu_err(vdev, "Failed to prepare job: %d\n", ret);
+ ret = ivpu_job_prepare_bos_for_submit(file, job, buf_handles, buffer_count, cmds_offset,
+ preempt_buffer_index);
+ if (ret)
goto err_destroy_job;
- }
down_read(&vdev->pm->reset_lock);
ret = ivpu_job_submit(job, priority, cmdq_id);
@@ -817,58 +901,91 @@ err_free_handles:
int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
struct ivpu_file_priv *file_priv = file->driver_priv;
+ struct ivpu_device *vdev = file_priv->vdev;
struct drm_ivpu_submit *args = data;
u8 priority;
- if (args->engine != DRM_IVPU_ENGINE_COMPUTE)
+ if (args->engine != DRM_IVPU_ENGINE_COMPUTE) {
+ ivpu_dbg(vdev, IOCTL, "Invalid engine %d\n", args->engine);
return -EINVAL;
+ }
- if (args->priority > DRM_IVPU_JOB_PRIORITY_REALTIME)
+ if (args->priority > DRM_IVPU_JOB_PRIORITY_REALTIME) {
+ ivpu_dbg(vdev, IOCTL, "Invalid priority %d\n", args->priority);
return -EINVAL;
+ }
- if (args->buffer_count == 0 || args->buffer_count > JOB_MAX_BUFFER_COUNT)
+ if (args->buffer_count == 0 || args->buffer_count > JOB_MAX_BUFFER_COUNT) {
+ ivpu_dbg(vdev, IOCTL, "Invalid buffer count %u\n", args->buffer_count);
return -EINVAL;
+ }
- if (!IS_ALIGNED(args->commands_offset, 8))
+ if (!IS_ALIGNED(args->commands_offset, 8)) {
+ ivpu_dbg(vdev, IOCTL, "Invalid commands offset %u\n", args->commands_offset);
return -EINVAL;
+ }
- if (!file_priv->ctx.id)
+ if (!file_priv->ctx.id) {
+ ivpu_dbg(vdev, IOCTL, "Context not initialized\n");
return -EINVAL;
+ }
- if (file_priv->has_mmu_faults)
+ if (file_priv->has_mmu_faults) {
+ ivpu_dbg(vdev, IOCTL, "Context %u has MMU faults\n", file_priv->ctx.id);
return -EBADFD;
+ }
priority = ivpu_job_to_jsm_priority(args->priority);
return ivpu_submit(file, file_priv, 0, args->buffer_count, args->engine,
- (void __user *)args->buffers_ptr, args->commands_offset, priority);
+ (void __user *)args->buffers_ptr, args->commands_offset, 0, priority);
}
int ivpu_cmdq_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
struct ivpu_file_priv *file_priv = file->driver_priv;
+ struct ivpu_device *vdev = file_priv->vdev;
struct drm_ivpu_cmdq_submit *args = data;
- if (!ivpu_is_capable(file_priv->vdev, DRM_IVPU_CAP_MANAGE_CMDQ))
+ if (!ivpu_is_capable(file_priv->vdev, DRM_IVPU_CAP_MANAGE_CMDQ)) {
+ ivpu_dbg(vdev, IOCTL, "Command queue management not supported\n");
return -ENODEV;
+ }
- if (args->cmdq_id < IVPU_CMDQ_MIN_ID || args->cmdq_id > IVPU_CMDQ_MAX_ID)
+ if (args->cmdq_id < IVPU_CMDQ_MIN_ID || args->cmdq_id > IVPU_CMDQ_MAX_ID) {
+ ivpu_dbg(vdev, IOCTL, "Invalid command queue ID %u\n", args->cmdq_id);
return -EINVAL;
+ }
- if (args->buffer_count == 0 || args->buffer_count > JOB_MAX_BUFFER_COUNT)
+ if (args->buffer_count == 0 || args->buffer_count > JOB_MAX_BUFFER_COUNT) {
+ ivpu_dbg(vdev, IOCTL, "Invalid buffer count %u\n", args->buffer_count);
return -EINVAL;
+ }
- if (!IS_ALIGNED(args->commands_offset, 8))
+ if (args->preempt_buffer_index >= args->buffer_count) {
+ ivpu_dbg(vdev, IOCTL, "Invalid preemption buffer index %u\n",
+ args->preempt_buffer_index);
return -EINVAL;
+ }
- if (!file_priv->ctx.id)
+ if (!IS_ALIGNED(args->commands_offset, 8)) {
+ ivpu_dbg(vdev, IOCTL, "Invalid commands offset %u\n", args->commands_offset);
return -EINVAL;
+ }
- if (file_priv->has_mmu_faults)
+ if (!file_priv->ctx.id) {
+ ivpu_dbg(vdev, IOCTL, "Context not initialized\n");
+ return -EINVAL;
+ }
+
+ if (file_priv->has_mmu_faults) {
+ ivpu_dbg(vdev, IOCTL, "Context %u has MMU faults\n", file_priv->ctx.id);
return -EBADFD;
+ }
return ivpu_submit(file, file_priv, args->cmdq_id, args->buffer_count, VPU_ENGINE_COMPUTE,
- (void __user *)args->buffers_ptr, args->commands_offset, 0);
+ (void __user *)args->buffers_ptr, args->commands_offset,
+ args->preempt_buffer_index, 0);
}
int ivpu_cmdq_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
@@ -879,11 +996,15 @@ int ivpu_cmdq_create_ioctl(struct drm_device *dev, void *data, struct drm_file *
struct ivpu_cmdq *cmdq;
int ret;
- if (!ivpu_is_capable(vdev, DRM_IVPU_CAP_MANAGE_CMDQ))
+ if (!ivpu_is_capable(vdev, DRM_IVPU_CAP_MANAGE_CMDQ)) {
+ ivpu_dbg(vdev, IOCTL, "Command queue management not supported\n");
return -ENODEV;
+ }
- if (args->priority > DRM_IVPU_JOB_PRIORITY_REALTIME)
+ if (args->priority > DRM_IVPU_JOB_PRIORITY_REALTIME) {
+ ivpu_dbg(vdev, IOCTL, "Invalid priority %d\n", args->priority);
return -EINVAL;
+ }
ret = ivpu_rpm_get(vdev);
if (ret < 0)
@@ -891,7 +1012,7 @@ int ivpu_cmdq_create_ioctl(struct drm_device *dev, void *data, struct drm_file *
mutex_lock(&file_priv->lock);
- cmdq = ivpu_cmdq_create(file_priv, ivpu_job_to_jsm_priority(args->priority), false);
+ cmdq = ivpu_cmdq_create(file_priv, ivpu_job_to_jsm_priority(args->priority), args->flags);
if (cmdq)
args->cmdq_id = cmdq->id;
@@ -911,8 +1032,10 @@ int ivpu_cmdq_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file
u32 cmdq_id = 0;
int ret;
- if (!ivpu_is_capable(vdev, DRM_IVPU_CAP_MANAGE_CMDQ))
+ if (!ivpu_is_capable(vdev, DRM_IVPU_CAP_MANAGE_CMDQ)) {
+ ivpu_dbg(vdev, IOCTL, "Command queue management not supported\n");
return -ENODEV;
+ }
ret = ivpu_rpm_get(vdev);
if (ret < 0)
@@ -959,7 +1082,9 @@ ivpu_job_done_callback(struct ivpu_device *vdev, struct ivpu_ipc_hdr *ipc_hdr,
payload = (struct vpu_ipc_msg_payload_job_done *)&jsm_msg->payload;
mutex_lock(&vdev->submitted_jobs_lock);
- ivpu_job_signal_and_destroy(vdev, payload->job_id, payload->job_status);
+ if (!ivpu_job_handle_engine_error(vdev, payload->job_id, payload->job_status))
+ /* No engine error, complete the job normally */
+ ivpu_job_signal_and_destroy(vdev, payload->job_id, payload->job_status);
mutex_unlock(&vdev->submitted_jobs_lock);
}
@@ -987,7 +1112,7 @@ void ivpu_context_abort_work_fn(struct work_struct *work)
if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW)
if (ivpu_jsm_reset_engine(vdev, 0))
- return;
+ goto runtime_put;
mutex_lock(&vdev->context_list_lock);
xa_for_each(&vdev->context_xa, ctx_id, file_priv) {
@@ -1011,7 +1136,7 @@ void ivpu_context_abort_work_fn(struct work_struct *work)
goto runtime_put;
if (ivpu_jsm_hws_resume_engine(vdev, 0))
- return;
+ goto runtime_put;
/*
* In hardware scheduling mode NPU already has stopped processing jobs
* and won't send us any further notifications, thus we have to free job related resources
@@ -1024,6 +1149,5 @@ void ivpu_context_abort_work_fn(struct work_struct *work)
mutex_unlock(&vdev->submitted_jobs_lock);
runtime_put:
- pm_runtime_mark_last_busy(vdev->drm.dev);
pm_runtime_put_autosuspend(vdev->drm.dev);
}
diff --git a/drivers/accel/ivpu/ivpu_job.h b/drivers/accel/ivpu/ivpu_job.h
index 2e301c2eea7b..3ab61e6a5616 100644
--- a/drivers/accel/ivpu/ivpu_job.h
+++ b/drivers/accel/ivpu/ivpu_job.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2020-2024 Intel Corporation
+ * Copyright (C) 2020-2025 Intel Corporation
*/
#ifndef __IVPU_JOB_H__
@@ -15,12 +15,17 @@ struct ivpu_device;
struct ivpu_file_priv;
/**
- * struct ivpu_cmdq - Object representing device queue used to send jobs.
- * @jobq: Pointer to job queue memory shared with the device
- * @mem: Memory allocated for the job queue, shared with device
- * @entry_count Number of job entries in the queue
- * @db_id: Doorbell assigned to this job queue
- * @db_registered: True if doorbell is registered in device
+ * struct ivpu_cmdq - Represents a command queue for submitting jobs to the VPU.
+ * Tracks queue memory, preemption buffers, and metadata for job management.
+ * @jobq: Pointer to job queue memory shared with the device
+ * @primary_preempt_buf: Primary preemption buffer for this queue (optional)
+ * @secondary_preempt_buf: Secondary preemption buffer for this queue (optional)
+ * @mem: Memory allocated for the job queue, shared with device
+ * @entry_count: Number of job entries in the queue
+ * @id: Unique command queue ID
+ * @db_id: Doorbell ID assigned to this job queue
+ * @priority: Priority level of the command queue
+ * @is_legacy: True if this is a legacy command queue
*/
struct ivpu_cmdq {
struct vpu_job_queue *jobq;
@@ -35,16 +40,22 @@ struct ivpu_cmdq {
};
/**
- * struct ivpu_job - KMD object that represents batchbuffer / DMA buffer.
- * Each batch / DMA buffer is a job to be submitted and executed by the VPU FW.
- * This is a unit of execution, and be tracked by the job_id for
- * any status reporting from VPU FW through IPC JOB RET/DONE message.
- * @file_priv: The client that submitted this job
- * @job_id: Job ID for KMD tracking and job status reporting from VPU FW
- * @status: Status of the Job from IPC JOB RET/DONE message
- * @batch_buffer: CPU vaddr points to the batch buffer memory allocated for the job
- * @submit_status_offset: Offset within batch buffer where job completion handler
- will update the job status
+ * struct ivpu_job - Representing a batch or DMA buffer submitted to the VPU.
+ * Each job is a unit of execution, tracked by job_id for status reporting from VPU FW.
+ * The structure holds all resources and metadata needed for job submission, execution,
+ * and completion handling.
+ * @vdev: Pointer to the VPU device
+ * @file_priv: The client context that submitted this job
+ * @done_fence: Fence signaled when job completes
+ * @cmd_buf_vpu_addr: VPU address of the command buffer for this job
+ * @cmdq_id: Command queue ID used for submission
+ * @job_id: Unique job ID for tracking and status reporting
+ * @engine_idx: Engine index for job execution
+ * @job_status: Status reported by firmware for this job
+ * @primary_preempt_buf: Primary preemption buffer for job
+ * @secondary_preempt_buf: Secondary preemption buffer for job (optional)
+ * @bo_count: Number of buffer objects associated with this job
+ * @bos: Array of buffer objects used by the job (batch buffer is at index 0)
*/
struct ivpu_job {
struct ivpu_device *vdev;
@@ -54,6 +65,9 @@ struct ivpu_job {
u32 cmdq_id;
u32 job_id;
u32 engine_idx;
+ u32 job_status;
+ struct ivpu_bo *primary_preempt_buf;
+ struct ivpu_bo *secondary_preempt_buf;
size_t bo_count;
struct ivpu_bo *bos[] __counted_by(bo_count);
};
@@ -71,6 +85,7 @@ void ivpu_cmdq_abort_all_jobs(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_id)
void ivpu_job_done_consumer_init(struct ivpu_device *vdev);
void ivpu_job_done_consumer_fini(struct ivpu_device *vdev);
+bool ivpu_job_handle_engine_error(struct ivpu_device *vdev, u32 job_id, u32 job_status);
void ivpu_context_abort_work_fn(struct work_struct *work);
void ivpu_jobs_abort_all(struct ivpu_device *vdev);
diff --git a/drivers/accel/ivpu/ivpu_mmu.c b/drivers/accel/ivpu/ivpu_mmu.c
index 5ea010568faa..e1baf6b64935 100644
--- a/drivers/accel/ivpu/ivpu_mmu.c
+++ b/drivers/accel/ivpu/ivpu_mmu.c
@@ -970,7 +970,7 @@ void ivpu_mmu_irq_evtq_handler(struct ivpu_device *vdev)
}
}
- queue_work(system_wq, &vdev->context_abort_work);
+ queue_work(system_percpu_wq, &vdev->context_abort_work);
}
void ivpu_mmu_evtq_dump(struct ivpu_device *vdev)
diff --git a/drivers/accel/ivpu/ivpu_mmu_context.c b/drivers/accel/ivpu/ivpu_mmu_context.c
index f0267efa55aa..87ad593ef47d 100644
--- a/drivers/accel/ivpu/ivpu_mmu_context.c
+++ b/drivers/accel/ivpu/ivpu_mmu_context.c
@@ -430,7 +430,7 @@ static void ivpu_mmu_context_unmap_pages(struct ivpu_mmu_context *ctx, u64 vpu_a
int
ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
- u64 vpu_addr, struct sg_table *sgt, bool llc_coherent)
+ u64 vpu_addr, struct sg_table *sgt, bool llc_coherent, bool read_only)
{
size_t start_vpu_addr = vpu_addr;
struct scatterlist *sg;
@@ -450,6 +450,8 @@ ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
prot = IVPU_MMU_ENTRY_MAPPED;
if (llc_coherent)
prot |= IVPU_MMU_ENTRY_FLAG_LLC_COHERENT;
+ if (read_only)
+ prot |= IVPU_MMU_ENTRY_FLAG_RO;
mutex_lock(&ctx->lock);
@@ -527,7 +529,8 @@ ivpu_mmu_context_unmap_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ct
ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id);
if (ret)
- ivpu_warn(vdev, "Failed to invalidate TLB for ctx %u: %d\n", ctx->id, ret);
+ ivpu_warn_ratelimited(vdev, "Failed to invalidate TLB for ctx %u: %d\n",
+ ctx->id, ret);
}
int
@@ -568,7 +571,7 @@ void ivpu_mmu_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ct
mutex_init(&ctx->lock);
if (!context_id) {
- start = vdev->hw->ranges.global.start;
+ start = vdev->hw->ranges.runtime.start;
end = vdev->hw->ranges.shave.end;
} else {
start = min_t(u64, vdev->hw->ranges.user.start, vdev->hw->ranges.shave.start);
diff --git a/drivers/accel/ivpu/ivpu_mmu_context.h b/drivers/accel/ivpu/ivpu_mmu_context.h
index f255310968cf..663a11a9db11 100644
--- a/drivers/accel/ivpu/ivpu_mmu_context.h
+++ b/drivers/accel/ivpu/ivpu_mmu_context.h
@@ -42,7 +42,7 @@ int ivpu_mmu_context_insert_node(struct ivpu_mmu_context *ctx, const struct ivpu
void ivpu_mmu_context_remove_node(struct ivpu_mmu_context *ctx, struct drm_mm_node *node);
int ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
- u64 vpu_addr, struct sg_table *sgt, bool llc_coherent);
+ u64 vpu_addr, struct sg_table *sgt, bool llc_coherent, bool read_only);
void ivpu_mmu_context_unmap_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
u64 vpu_addr, struct sg_table *sgt);
int ivpu_mmu_context_set_pages_ro(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
diff --git a/drivers/accel/ivpu/ivpu_ms.c b/drivers/accel/ivpu/ivpu_ms.c
index 2a043baf10ca..1d9c1cb17924 100644
--- a/drivers/accel/ivpu/ivpu_ms.c
+++ b/drivers/accel/ivpu/ivpu_ms.c
@@ -8,6 +8,7 @@
#include "ivpu_drv.h"
#include "ivpu_gem.h"
+#include "ivpu_hw.h"
#include "ivpu_jsm_msg.h"
#include "ivpu_ms.h"
#include "ivpu_pm.h"
@@ -37,8 +38,8 @@ int ivpu_ms_start_ioctl(struct drm_device *dev, void *data, struct drm_file *fil
struct drm_ivpu_metric_streamer_start *args = data;
struct ivpu_device *vdev = file_priv->vdev;
struct ivpu_ms_instance *ms;
- u64 single_buff_size;
u32 sample_size;
+ u64 buf_size;
int ret;
if (!args->metric_group_mask || !args->read_period_samples ||
@@ -52,7 +53,8 @@ int ivpu_ms_start_ioctl(struct drm_device *dev, void *data, struct drm_file *fil
mutex_lock(&file_priv->ms_lock);
if (get_instance_by_mask(file_priv, args->metric_group_mask)) {
- ivpu_err(vdev, "Instance already exists (mask %#llx)\n", args->metric_group_mask);
+ ivpu_dbg(vdev, IOCTL, "Instance already exists (mask %#llx)\n",
+ args->metric_group_mask);
ret = -EALREADY;
goto unlock;
}
@@ -69,12 +71,18 @@ int ivpu_ms_start_ioctl(struct drm_device *dev, void *data, struct drm_file *fil
if (ret)
goto err_free_ms;
- single_buff_size = sample_size *
- ((u64)args->read_period_samples * MS_READ_PERIOD_MULTIPLIER);
- ms->bo = ivpu_bo_create_global(vdev, PAGE_ALIGN(single_buff_size * MS_NUM_BUFFERS),
- DRM_IVPU_BO_CACHED | DRM_IVPU_BO_MAPPABLE);
+ buf_size = PAGE_ALIGN((u64)args->read_period_samples * sample_size *
+ MS_READ_PERIOD_MULTIPLIER * MS_NUM_BUFFERS);
+ if (buf_size > ivpu_hw_range_size(&vdev->hw->ranges.global)) {
+ ivpu_dbg(vdev, IOCTL, "Requested MS buffer size %llu exceeds range size %llu\n",
+ buf_size, ivpu_hw_range_size(&vdev->hw->ranges.global));
+ ret = -EINVAL;
+ goto err_free_ms;
+ }
+
+ ms->bo = ivpu_bo_create_global(vdev, buf_size, DRM_IVPU_BO_CACHED | DRM_IVPU_BO_MAPPABLE);
if (!ms->bo) {
- ivpu_err(vdev, "Failed to allocate MS buffer (size %llu)\n", single_buff_size);
+ ivpu_dbg(vdev, IOCTL, "Failed to allocate MS buffer (size %llu)\n", buf_size);
ret = -ENOMEM;
goto err_free_ms;
}
@@ -175,7 +183,8 @@ int ivpu_ms_get_data_ioctl(struct drm_device *dev, void *data, struct drm_file *
ms = get_instance_by_mask(file_priv, args->metric_group_mask);
if (!ms) {
- ivpu_err(vdev, "Instance doesn't exist for mask: %#llx\n", args->metric_group_mask);
+ ivpu_dbg(vdev, IOCTL, "Instance doesn't exist for mask: %#llx\n",
+ args->metric_group_mask);
ret = -EINVAL;
goto unlock;
}
diff --git a/drivers/accel/ivpu/ivpu_pm.c b/drivers/accel/ivpu/ivpu_pm.c
index ea30db181cd7..480c075d87f6 100644
--- a/drivers/accel/ivpu/ivpu_pm.c
+++ b/drivers/accel/ivpu/ivpu_pm.c
@@ -33,8 +33,11 @@ static unsigned long ivpu_tdr_timeout_ms;
module_param_named(tdr_timeout_ms, ivpu_tdr_timeout_ms, ulong, 0644);
MODULE_PARM_DESC(tdr_timeout_ms, "Timeout for device hang detection, in milliseconds, 0 - default");
+static unsigned long ivpu_inference_timeout_ms;
+module_param_named(inference_timeout_ms, ivpu_inference_timeout_ms, ulong, 0644);
+MODULE_PARM_DESC(inference_timeout_ms, "Inference maximum duration, in milliseconds, 0 - default");
+
#define PM_RESCHEDULE_LIMIT 5
-#define PM_TDR_HEARTBEAT_LIMIT 30
static void ivpu_pm_prepare_cold_boot(struct ivpu_device *vdev)
{
@@ -51,7 +54,7 @@ static void ivpu_pm_prepare_cold_boot(struct ivpu_device *vdev)
static void ivpu_pm_prepare_warm_boot(struct ivpu_device *vdev)
{
struct ivpu_fw_info *fw = vdev->fw;
- struct vpu_boot_params *bp = ivpu_bo_vaddr(fw->mem);
+ struct vpu_boot_params *bp = ivpu_bo_vaddr(fw->mem_bp);
if (!bp->save_restore_ret_address) {
ivpu_pm_prepare_cold_boot(vdev);
@@ -183,7 +186,7 @@ void ivpu_pm_trigger_recovery(struct ivpu_device *vdev, const char *reason)
if (atomic_cmpxchg(&vdev->pm->reset_pending, 0, 1) == 0) {
ivpu_hw_diagnose_failure(vdev);
ivpu_hw_irq_disable(vdev); /* Disable IRQ early to protect from IRQ storm */
- queue_work(system_unbound_wq, &vdev->pm->recovery_work);
+ queue_work(system_dfl_wq, &vdev->pm->recovery_work);
}
}
@@ -191,6 +194,10 @@ static void ivpu_job_timeout_work(struct work_struct *work)
{
struct ivpu_pm_info *pm = container_of(work, struct ivpu_pm_info, job_timeout_work.work);
struct ivpu_device *vdev = pm->vdev;
+ unsigned long timeout_ms = ivpu_tdr_timeout_ms ? ivpu_tdr_timeout_ms : vdev->timeout.tdr;
+ unsigned long inference_timeout_ms = ivpu_inference_timeout_ms ? ivpu_inference_timeout_ms :
+ vdev->timeout.inference;
+ u64 inference_max_retries;
u64 heartbeat;
if (ivpu_jsm_get_heartbeat(vdev, 0, &heartbeat) || heartbeat <= vdev->fw->last_heartbeat) {
@@ -198,8 +205,10 @@ static void ivpu_job_timeout_work(struct work_struct *work)
goto recovery;
}
- if (atomic_fetch_inc(&vdev->job_timeout_counter) > PM_TDR_HEARTBEAT_LIMIT) {
- ivpu_err(vdev, "Job timeout detected, heartbeat limit exceeded\n");
+ inference_max_retries = DIV_ROUND_UP(inference_timeout_ms, timeout_ms);
+ if (atomic_fetch_inc(&vdev->job_timeout_counter) >= inference_max_retries) {
+ ivpu_err(vdev, "Job timeout detected, heartbeat limit (%lld) exceeded\n",
+ inference_max_retries);
goto recovery;
}
@@ -217,7 +226,8 @@ void ivpu_start_job_timeout_detection(struct ivpu_device *vdev)
unsigned long timeout_ms = ivpu_tdr_timeout_ms ? ivpu_tdr_timeout_ms : vdev->timeout.tdr;
/* No-op if already queued */
- queue_delayed_work(system_wq, &vdev->pm->job_timeout_work, msecs_to_jiffies(timeout_ms));
+ queue_delayed_work(system_percpu_wq, &vdev->pm->job_timeout_work,
+ msecs_to_jiffies(timeout_ms));
}
void ivpu_stop_job_timeout_detection(struct ivpu_device *vdev)
@@ -350,7 +360,6 @@ int ivpu_rpm_get(struct ivpu_device *vdev)
void ivpu_rpm_put(struct ivpu_device *vdev)
{
- pm_runtime_mark_last_busy(vdev->drm.dev);
pm_runtime_put_autosuspend(vdev->drm.dev);
}
@@ -408,10 +417,10 @@ void ivpu_pm_init(struct ivpu_device *vdev)
ivpu_dbg(vdev, PM, "Autosuspend delay = %d\n", delay);
}
-void ivpu_pm_cancel_recovery(struct ivpu_device *vdev)
+void ivpu_pm_disable_recovery(struct ivpu_device *vdev)
{
drm_WARN_ON(&vdev->drm, delayed_work_pending(&vdev->pm->job_timeout_work));
- cancel_work_sync(&vdev->pm->recovery_work);
+ disable_work_sync(&vdev->pm->recovery_work);
}
void ivpu_pm_enable(struct ivpu_device *vdev)
@@ -419,7 +428,6 @@ void ivpu_pm_enable(struct ivpu_device *vdev)
struct device *dev = vdev->drm.dev;
pm_runtime_allow(dev);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
}
@@ -493,6 +501,11 @@ void ivpu_pm_irq_dct_work_fn(struct work_struct *work)
else
ret = ivpu_pm_dct_disable(vdev);
- if (!ret)
- ivpu_hw_btrs_dct_set_status(vdev, enable, vdev->pm->dct_active_percent);
+ if (!ret) {
+ /* Convert percent to U1.7 format */
+ u8 val = DIV_ROUND_CLOSEST(vdev->pm->dct_active_percent * 128, 100);
+
+ ivpu_hw_btrs_dct_set_status(vdev, enable, val);
+ }
+
}
diff --git a/drivers/accel/ivpu/ivpu_pm.h b/drivers/accel/ivpu/ivpu_pm.h
index 89b264cc0e3e..a2aa7a27f32e 100644
--- a/drivers/accel/ivpu/ivpu_pm.h
+++ b/drivers/accel/ivpu/ivpu_pm.h
@@ -25,7 +25,7 @@ struct ivpu_pm_info {
void ivpu_pm_init(struct ivpu_device *vdev);
void ivpu_pm_enable(struct ivpu_device *vdev);
void ivpu_pm_disable(struct ivpu_device *vdev);
-void ivpu_pm_cancel_recovery(struct ivpu_device *vdev);
+void ivpu_pm_disable_recovery(struct ivpu_device *vdev);
int ivpu_pm_suspend_cb(struct device *dev);
int ivpu_pm_resume_cb(struct device *dev);
diff --git a/drivers/accel/ivpu/ivpu_sysfs.c b/drivers/accel/ivpu/ivpu_sysfs.c
index 268ab7744a8b..d250a10caca9 100644
--- a/drivers/accel/ivpu/ivpu_sysfs.c
+++ b/drivers/accel/ivpu/ivpu_sysfs.c
@@ -63,7 +63,8 @@ npu_memory_utilization_show(struct device *dev, struct device_attribute *attr, c
mutex_lock(&vdev->bo_list_lock);
list_for_each_entry(bo, &vdev->bo_list, bo_list_node)
- total_npu_memory += bo->base.base.size;
+ if (ivpu_bo_is_resident(bo))
+ total_npu_memory += ivpu_bo_size(bo);
mutex_unlock(&vdev->bo_list_lock);
return sysfs_emit(buf, "%lld\n", total_npu_memory);
diff --git a/drivers/accel/ivpu/vpu_jsm_api.h b/drivers/accel/ivpu/vpu_jsm_api.h
index 4b6b2b3d2583..bca6a44dc041 100644
--- a/drivers/accel/ivpu/vpu_jsm_api.h
+++ b/drivers/accel/ivpu/vpu_jsm_api.h
@@ -1,15 +1,16 @@
/* SPDX-License-Identifier: MIT */
/*
- * Copyright (c) 2020-2024, Intel Corporation.
+ * Copyright (c) 2020-2025, Intel Corporation.
+ */
+
+/**
+ * @addtogroup Jsm
+ * @{
*/
/**
* @file
* @brief JSM shared definitions
- *
- * @ingroup Jsm
- * @brief JSM shared definitions
- * @{
*/
#ifndef VPU_JSM_API_H
#define VPU_JSM_API_H
@@ -22,7 +23,7 @@
/*
* Minor version changes when API backward compatibility is preserved.
*/
-#define VPU_JSM_API_VER_MINOR 29
+#define VPU_JSM_API_VER_MINOR 33
/*
* API header changed (field names, documentation, formatting) but API itself has not been changed
@@ -71,9 +72,15 @@
#define VPU_JSM_STATUS_MVNCI_OUT_OF_RESOURCES 0xAU
#define VPU_JSM_STATUS_MVNCI_NOT_IMPLEMENTED 0xBU
#define VPU_JSM_STATUS_MVNCI_INTERNAL_ERROR 0xCU
-/* Job status returned when the job was preempted mid-inference */
+/* @deprecated (use VPU_JSM_STATUS_PREEMPTED_MID_COMMAND instead) */
#define VPU_JSM_STATUS_PREEMPTED_MID_INFERENCE 0xDU
+/* Job status returned when the job was preempted mid-command */
+#define VPU_JSM_STATUS_PREEMPTED_MID_COMMAND 0xDU
+/* Range of status codes that require engine reset */
+#define VPU_JSM_STATUS_ENGINE_RESET_REQUIRED_MIN 0xEU
#define VPU_JSM_STATUS_MVNCI_CONTEXT_VIOLATION_HW 0xEU
+#define VPU_JSM_STATUS_MVNCI_PREEMPTION_TIMED_OUT 0xFU
+#define VPU_JSM_STATUS_ENGINE_RESET_REQUIRED_MAX 0x1FU
/*
* Host <-> VPU IPC channels.
@@ -134,11 +141,21 @@ enum {
* 2. Native fence queues are only supported on VPU 40xx onwards.
*/
VPU_JOB_QUEUE_FLAGS_USE_NATIVE_FENCE_MASK = (1 << 1U),
-
/*
* Enable turbo mode for testing NPU performance; not recommended for regular usage.
*/
- VPU_JOB_QUEUE_FLAGS_TURBO_MODE = (1 << 2U)
+ VPU_JOB_QUEUE_FLAGS_TURBO_MODE = (1 << 2U),
+ /*
+ * Queue error detection mode flag
+ * For 'interactive' queues (this bit not set), the FW will identify queues that have not
+ * completed a job inside the TDR timeout as in error as part of engine reset sequence.
+ * For 'non-interactive' queues (this bit set), the FW will identify queues that have not
+ * progressed the heartbeat inside the non-interactive no-progress timeout as in error as
+ * part of engine reset sequence. Additionally, there is an upper limit applied to these
+ * queues: even if they progress the heartbeat, if they run longer than non-interactive
+ * timeout, then the FW will also identify them as in error.
+ */
+ VPU_JOB_QUEUE_FLAGS_NON_INTERACTIVE = (1 << 3U)
};
/*
@@ -209,7 +226,7 @@ enum {
*/
#define VPU_INLINE_CMD_TYPE_FENCE_SIGNAL 0x2
-/*
+/**
* Job scheduling priority bands for both hardware scheduling and OS scheduling.
*/
enum vpu_job_scheduling_priority_band {
@@ -220,16 +237,16 @@ enum vpu_job_scheduling_priority_band {
VPU_JOB_SCHEDULING_PRIORITY_BAND_COUNT = 4,
};
-/*
+/**
* Job format.
* Jobs defines the actual workloads to be executed by a given engine.
*/
struct vpu_job_queue_entry {
- /**< Address of VPU commands batch buffer */
+ /** Address of VPU commands batch buffer */
u64 batch_buf_addr;
- /**< Job ID */
+ /** Job ID */
u32 job_id;
- /**< Flags bit field, see VPU_JOB_FLAGS_* above */
+ /** Flags bit field, see VPU_JOB_FLAGS_* above */
u32 flags;
/**
* Doorbell ring timestamp taken by KMD from SoC's global system clock, in
@@ -237,20 +254,20 @@ struct vpu_job_queue_entry {
* to match other profiling timestamps.
*/
u64 doorbell_timestamp;
- /**< Extra id for job tracking, used only in the firmware perf traces */
+ /** Extra id for job tracking, used only in the firmware perf traces */
u64 host_tracking_id;
- /**< Address of the primary preemption buffer to use for this job */
+ /** Address of the primary preemption buffer to use for this job */
u64 primary_preempt_buf_addr;
- /**< Size of the primary preemption buffer to use for this job */
+ /** Size of the primary preemption buffer to use for this job */
u32 primary_preempt_buf_size;
- /**< Size of secondary preemption buffer to use for this job */
+ /** Size of secondary preemption buffer to use for this job */
u32 secondary_preempt_buf_size;
- /**< Address of secondary preemption buffer to use for this job */
+ /** Address of secondary preemption buffer to use for this job */
u64 secondary_preempt_buf_addr;
u64 reserved_0;
};
-/*
+/**
* Inline command format.
* Inline commands are the commands executed at scheduler level (typically,
* synchronization directives). Inline command and job objects must be of
@@ -258,34 +275,36 @@ struct vpu_job_queue_entry {
*/
struct vpu_inline_cmd {
u64 reserved_0;
- /* Inline command type, see VPU_INLINE_CMD_TYPE_* defines. */
+ /** Inline command type, see VPU_INLINE_CMD_TYPE_* defines. */
u32 type;
- /* Flags bit field, see VPU_JOB_FLAGS_* above. */
+ /** Flags bit field, see VPU_JOB_FLAGS_* above. */
u32 flags;
- /* Inline command payload. Depends on inline command type. */
- union {
- /* Fence (wait and signal) commands' payload. */
- struct {
- /* Fence object handle. */
+ /** Inline command payload. Depends on inline command type. */
+ union payload {
+ /** Fence (wait and signal) commands' payload. */
+ struct fence {
+ /** Fence object handle. */
u64 fence_handle;
- /* User VA of the current fence value. */
+ /** User VA of the current fence value. */
u64 current_value_va;
- /* User VA of the monitored fence value (read-only). */
+ /** User VA of the monitored fence value (read-only). */
u64 monitored_value_va;
- /* Value to wait for or write in fence location. */
+ /** Value to wait for or write in fence location. */
u64 value;
- /* User VA of the log buffer in which to add log entry on completion. */
+ /** User VA of the log buffer in which to add log entry on completion. */
u64 log_buffer_va;
- /* NPU private data. */
+ /** NPU private data. */
u64 npu_private_data;
} fence;
- /* Other commands do not have a payload. */
- /* Payload definition for future inline commands can be inserted here. */
+ /**
+ * Other commands do not have a payload:
+ * Payload definition for future inline commands can be inserted here.
+ */
u64 reserved_1[6];
} payload;
};
-/*
+/**
* Job queue slots can be populated either with job objects or inline command objects.
*/
union vpu_jobq_slot {
@@ -293,7 +312,7 @@ union vpu_jobq_slot {
struct vpu_inline_cmd inline_cmd;
};
-/*
+/**
* Job queue control registers.
*/
struct vpu_job_queue_header {
@@ -301,18 +320,18 @@ struct vpu_job_queue_header {
u32 head;
u32 tail;
u32 flags;
- /* Set to 1 to indicate priority_band field is valid */
+ /** Set to 1 to indicate priority_band field is valid */
u32 priority_band_valid;
- /*
+ /**
* Priority for the work of this job queue, valid only if the HWS is NOT used
- * and the `priority_band_valid` is set to 1. It is applied only during
- * the VPU_JSM_MSG_REGISTER_DB message processing.
- * The device firmware might use the `priority_band` to optimize the power
+ * and the @ref priority_band_valid is set to 1. It is applied only during
+ * the @ref VPU_JSM_MSG_REGISTER_DB message processing.
+ * The device firmware might use the priority_band to optimize the power
* management logic, but it will not affect the order of jobs.
* Available priority bands: @see enum vpu_job_scheduling_priority_band
*/
u32 priority_band;
- /* Inside realtime band assigns a further priority, limited to 0..31 range */
+ /** Inside realtime band assigns a further priority, limited to 0..31 range */
u32 realtime_priority_level;
u32 reserved_0[9];
};
@@ -337,16 +356,16 @@ enum vpu_trace_entity_type {
VPU_TRACE_ENTITY_TYPE_HW_COMPONENT = 2,
};
-/*
+/**
* HWS specific log buffer header details.
* Total size is 32 bytes.
*/
struct vpu_hws_log_buffer_header {
- /* Written by VPU after adding a log entry. Initialised by host to 0. */
+ /** Written by VPU after adding a log entry. Initialised by host to 0. */
u32 first_free_entry_index;
- /* Incremented by VPU every time the VPU writes the 0th entry; initialised by host to 0. */
+ /** Incremented by VPU every time the VPU writes the 0th entry; initialised by host to 0. */
u32 wraparound_count;
- /*
+ /**
* This is the number of buffers that can be stored in the log buffer provided by the host.
* It is written by host before passing buffer to VPU. VPU should consider it read-only.
*/
@@ -354,14 +373,14 @@ struct vpu_hws_log_buffer_header {
u64 reserved[2];
};
-/*
+/**
* HWS specific log buffer entry details.
* Total size is 32 bytes.
*/
struct vpu_hws_log_buffer_entry {
- /* VPU timestamp must be an invariant timer tick (not impacted by DVFS) */
+ /** VPU timestamp must be an invariant timer tick (not impacted by DVFS) */
u64 vpu_timestamp;
- /*
+ /**
* Operation type:
* 0 - context state change
* 1 - queue new work
@@ -371,7 +390,7 @@ struct vpu_hws_log_buffer_entry {
*/
u32 operation_type;
u32 reserved;
- /* Operation data depends on operation type */
+ /** Operation data depends on operation type */
u64 operation_data[2];
};
@@ -381,51 +400,54 @@ enum vpu_hws_native_fence_log_type {
VPU_HWS_NATIVE_FENCE_LOG_TYPE_SIGNALS = 2
};
-/* HWS native fence log buffer header. */
+/** HWS native fence log buffer header. */
struct vpu_hws_native_fence_log_header {
union {
struct {
- /* Index of the first free entry in buffer. */
+ /** Index of the first free entry in buffer. */
u32 first_free_entry_idx;
- /* Incremented each time NPU wraps around the buffer to write next entry. */
+ /**
+ * Incremented whenever the NPU wraps around the buffer and writes
+ * to the first entry again.
+ */
u32 wraparound_count;
};
- /* Field allowing atomic update of both fields above. */
+ /** Field allowing atomic update of both fields above. */
u64 atomic_wraparound_and_entry_idx;
};
- /* Log buffer type, see enum vpu_hws_native_fence_log_type. */
+ /** Log buffer type, see enum vpu_hws_native_fence_log_type. */
u64 type;
- /* Allocated number of entries in the log buffer. */
+ /** Allocated number of entries in the log buffer. */
u64 entry_nb;
u64 reserved[2];
};
-/* Native fence log operation types. */
+/** Native fence log operation types. */
enum vpu_hws_native_fence_log_op {
VPU_HWS_NATIVE_FENCE_LOG_OP_SIGNAL_EXECUTED = 0,
VPU_HWS_NATIVE_FENCE_LOG_OP_WAIT_UNBLOCKED = 1
};
-/* HWS native fence log entry. */
+/** HWS native fence log entry. */
struct vpu_hws_native_fence_log_entry {
- /* Newly signaled/unblocked fence value. */
+ /** Newly signaled/unblocked fence value. */
u64 fence_value;
- /* Native fence object handle to which this operation belongs. */
+ /** Native fence object handle to which this operation belongs. */
u64 fence_handle;
- /* Operation type, see enum vpu_hws_native_fence_log_op. */
+ /** Operation type, see enum vpu_hws_native_fence_log_op. */
u64 op_type;
u64 reserved_0;
- /*
+ /**
* VPU_HWS_NATIVE_FENCE_LOG_OP_WAIT_UNBLOCKED only: Timestamp at which fence
* wait was started (in NPU SysTime).
*/
u64 fence_wait_start_ts;
u64 reserved_1;
- /* Timestamp at which fence operation was completed (in NPU SysTime). */
+ /** Timestamp at which fence operation was completed (in NPU SysTime). */
u64 fence_end_ts;
};
-/* Native fence log buffer. */
+/** Native fence log buffer. */
struct vpu_hws_native_fence_log_buffer {
struct vpu_hws_native_fence_log_header header;
struct vpu_hws_native_fence_log_entry entry[];
@@ -435,10 +457,17 @@ struct vpu_hws_native_fence_log_buffer {
* Host <-> VPU IPC messages types.
*/
enum vpu_ipc_msg_type {
+ /** Unsupported command */
VPU_JSM_MSG_UNKNOWN = 0xFFFFFFFF,
- /* IPC Host -> Device, Async commands */
+ /** IPC Host -> Device, base id for async commands */
VPU_JSM_MSG_ASYNC_CMD = 0x1100,
+ /**
+ * Reset engine. The NPU cancels all the jobs currently executing on the target
+ * engine making the engine become idle and then does a HW reset, before returning
+ * to the host.
+ * @see struct vpu_ipc_msg_payload_engine_reset
+ */
VPU_JSM_MSG_ENGINE_RESET = VPU_JSM_MSG_ASYNC_CMD,
/**
* Preempt engine. The NPU stops (preempts) all the jobs currently
@@ -448,10 +477,24 @@ enum vpu_ipc_msg_type {
* the target engine, but it stops processing them (until the queue doorbell
* is rung again); the host is responsible to reset the job queue, either
* after preemption or when resubmitting jobs to the queue.
+ * @see vpu_ipc_msg_payload_engine_preempt
*/
VPU_JSM_MSG_ENGINE_PREEMPT = 0x1101,
+ /**
+ * OS scheduling doorbell register command
+ * @see vpu_ipc_msg_payload_register_db
+ */
VPU_JSM_MSG_REGISTER_DB = 0x1102,
+ /**
+ * OS scheduling doorbell unregister command
+ * @see vpu_ipc_msg_payload_unregister_db
+ */
VPU_JSM_MSG_UNREGISTER_DB = 0x1103,
+ /**
+ * Query engine heartbeat. Heartbeat is expected to increase monotonically
+ * and increase while work is being progressed by NPU.
+ * @see vpu_ipc_msg_payload_query_engine_hb
+ */
VPU_JSM_MSG_QUERY_ENGINE_HB = 0x1104,
VPU_JSM_MSG_GET_POWER_LEVEL_COUNT = 0x1105,
VPU_JSM_MSG_GET_POWER_LEVEL = 0x1106,
@@ -477,6 +520,7 @@ enum vpu_ipc_msg_type {
* aborted and removed from internal scheduling queues. All doorbells assigned
* to the host_ssid are unregistered and any internal FW resources belonging to
* the host_ssid are released.
+ * @see vpu_ipc_msg_payload_ssid_release
*/
VPU_JSM_MSG_SSID_RELEASE = 0x110e,
/**
@@ -504,43 +548,78 @@ enum vpu_ipc_msg_type {
* @see vpu_jsm_metric_streamer_start
*/
VPU_JSM_MSG_METRIC_STREAMER_INFO = 0x1112,
- /** Control command: Priority band setup */
+ /**
+ * Control command: Priority band setup
+ * @see vpu_ipc_msg_payload_hws_priority_band_setup
+ */
VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP = 0x1113,
- /** Control command: Create command queue */
+ /**
+ * Control command: Create command queue
+ * @see vpu_ipc_msg_payload_hws_create_cmdq
+ */
VPU_JSM_MSG_CREATE_CMD_QUEUE = 0x1114,
- /** Control command: Destroy command queue */
+ /**
+ * Control command: Destroy command queue
+ * @see vpu_ipc_msg_payload_hws_destroy_cmdq
+ */
VPU_JSM_MSG_DESTROY_CMD_QUEUE = 0x1115,
- /** Control command: Set context scheduling properties */
+ /**
+ * Control command: Set context scheduling properties
+ * @see vpu_ipc_msg_payload_hws_set_context_sched_properties
+ */
VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES = 0x1116,
- /*
+ /**
* Register a doorbell to notify VPU of new work. The doorbell may later be
* deallocated or reassigned to another context.
+ * @see vpu_jsm_hws_register_db
*/
VPU_JSM_MSG_HWS_REGISTER_DB = 0x1117,
- /** Control command: Log buffer setting */
+ /**
+ * Control command: Log buffer setting
+ * @see vpu_ipc_msg_payload_hws_set_scheduling_log
+ */
VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG = 0x1118,
- /* Control command: Suspend command queue. */
+ /**
+ * Control command: Suspend command queue.
+ * @see vpu_ipc_msg_payload_hws_suspend_cmdq
+ */
VPU_JSM_MSG_HWS_SUSPEND_CMDQ = 0x1119,
- /* Control command: Resume command queue */
+ /**
+ * Control command: Resume command queue
+ * @see vpu_ipc_msg_payload_hws_resume_cmdq
+ */
VPU_JSM_MSG_HWS_RESUME_CMDQ = 0x111a,
- /* Control command: Resume engine after reset */
+ /**
+ * Control command: Resume engine after reset
+ * @see vpu_ipc_msg_payload_hws_resume_engine
+ */
VPU_JSM_MSG_HWS_ENGINE_RESUME = 0x111b,
- /* Control command: Enable survivability/DCT mode */
+ /**
+ * Control command: Enable survivability/DCT mode
+ * @see vpu_ipc_msg_payload_pwr_dct_control
+ */
VPU_JSM_MSG_DCT_ENABLE = 0x111c,
- /* Control command: Disable survivability/DCT mode */
+ /**
+ * Control command: Disable survivability/DCT mode
+ * This command has no payload
+ */
VPU_JSM_MSG_DCT_DISABLE = 0x111d,
/**
* Dump VPU state. To be used for debug purposes only.
- * NOTE: Please introduce new ASYNC commands before this one. *
+ * This command has no payload.
+ * NOTE: Please introduce new ASYNC commands before this one.
*/
VPU_JSM_MSG_STATE_DUMP = 0x11FF,
- /* IPC Host -> Device, General commands */
+ /** IPC Host -> Device, base id for general commands */
VPU_JSM_MSG_GENERAL_CMD = 0x1200,
+ /** Unsupported command */
VPU_JSM_MSG_BLOB_DEINIT_DEPRECATED = VPU_JSM_MSG_GENERAL_CMD,
/**
* Control dyndbg behavior by executing a dyndbg command; equivalent to
- * Linux command: `echo '<dyndbg_cmd>' > <debugfs>/dynamic_debug/control`.
+ * Linux command:
+ * @verbatim echo '<dyndbg_cmd>' > <debugfs>/dynamic_debug/control @endverbatim
+ * @see vpu_ipc_msg_payload_dyndbg_control
*/
VPU_JSM_MSG_DYNDBG_CONTROL = 0x1201,
/**
@@ -548,17 +627,35 @@ enum vpu_ipc_msg_type {
*/
VPU_JSM_MSG_PWR_D0I3_ENTER = 0x1202,
- /* IPC Device -> Host, Job completion */
+ /**
+ * IPC Device -> Host, Job completion
+ * @see struct vpu_ipc_msg_payload_job_done
+ */
VPU_JSM_MSG_JOB_DONE = 0x2100,
- /* IPC Device -> Host, Fence signalled */
+ /**
+ * IPC Device -> Host, Fence signalled
+ * @see vpu_ipc_msg_payload_native_fence_signalled
+ */
VPU_JSM_MSG_NATIVE_FENCE_SIGNALLED = 0x2101,
/* IPC Device -> Host, Async command completion */
VPU_JSM_MSG_ASYNC_CMD_DONE = 0x2200,
+ /**
+ * IPC Device -> Host, engine reset complete
+ * @see vpu_ipc_msg_payload_engine_reset_done
+ */
VPU_JSM_MSG_ENGINE_RESET_DONE = VPU_JSM_MSG_ASYNC_CMD_DONE,
+ /**
+ * Preempt complete message
+ * @see vpu_ipc_msg_payload_engine_preempt_done
+ */
VPU_JSM_MSG_ENGINE_PREEMPT_DONE = 0x2201,
VPU_JSM_MSG_REGISTER_DB_DONE = 0x2202,
VPU_JSM_MSG_UNREGISTER_DB_DONE = 0x2203,
+ /**
+ * Response to query engine heartbeat.
+ * @see vpu_ipc_msg_payload_query_engine_hb_done
+ */
VPU_JSM_MSG_QUERY_ENGINE_HB_DONE = 0x2204,
VPU_JSM_MSG_GET_POWER_LEVEL_COUNT_DONE = 0x2205,
VPU_JSM_MSG_GET_POWER_LEVEL_DONE = 0x2206,
@@ -575,7 +672,10 @@ enum vpu_ipc_msg_type {
VPU_JSM_MSG_TRACE_GET_CAPABILITY_RSP = 0x220c,
/** Response to VPU_JSM_MSG_TRACE_GET_NAME. */
VPU_JSM_MSG_TRACE_GET_NAME_RSP = 0x220d,
- /** Response to VPU_JSM_MSG_SSID_RELEASE. */
+ /**
+ * Response to VPU_JSM_MSG_SSID_RELEASE.
+ * @see vpu_ipc_msg_payload_ssid_release
+ */
VPU_JSM_MSG_SSID_RELEASE_DONE = 0x220e,
/**
* Response to VPU_JSM_MSG_METRIC_STREAMER_START.
@@ -605,37 +705,71 @@ enum vpu_ipc_msg_type {
/**
* Asynchronous event sent from the VPU to the host either when the current
* metric buffer is full or when the VPU has collected a multiple of
- * @notify_sample_count samples as indicated through the start command
- * (VPU_JSM_MSG_METRIC_STREAMER_START). Returns information about collected
- * metric data.
+ * @ref vpu_jsm_metric_streamer_start::notify_sample_count samples as indicated
+ * through the start command (VPU_JSM_MSG_METRIC_STREAMER_START). Returns
+ * information about collected metric data.
* @see vpu_jsm_metric_streamer_done
*/
VPU_JSM_MSG_METRIC_STREAMER_NOTIFICATION = 0x2213,
- /** Response to control command: Priority band setup */
+ /**
+ * Response to control command: Priority band setup
+ * @see vpu_ipc_msg_payload_hws_priority_band_setup
+ */
VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP_RSP = 0x2214,
- /** Response to control command: Create command queue */
+ /**
+ * Response to control command: Create command queue
+ * @see vpu_ipc_msg_payload_hws_create_cmdq_rsp
+ */
VPU_JSM_MSG_CREATE_CMD_QUEUE_RSP = 0x2215,
- /** Response to control command: Destroy command queue */
+ /**
+ * Response to control command: Destroy command queue
+ * @see vpu_ipc_msg_payload_hws_destroy_cmdq
+ */
VPU_JSM_MSG_DESTROY_CMD_QUEUE_RSP = 0x2216,
- /** Response to control command: Set context scheduling properties */
+ /**
+ * Response to control command: Set context scheduling properties
+ * @see vpu_ipc_msg_payload_hws_set_context_sched_properties
+ */
VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES_RSP = 0x2217,
- /** Response to control command: Log buffer setting */
+ /**
+ * Response to control command: Log buffer setting
+ * @see vpu_ipc_msg_payload_hws_set_scheduling_log
+ */
VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG_RSP = 0x2218,
- /* IPC Device -> Host, HWS notify index entry of log buffer written */
+ /**
+ * IPC Device -> Host, HWS notify index entry of log buffer written
+ * @see vpu_ipc_msg_payload_hws_scheduling_log_notification
+ */
VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION = 0x2219,
- /* IPC Device -> Host, HWS completion of a context suspend request */
+ /**
+ * IPC Device -> Host, HWS completion of a context suspend request
+ * @see vpu_ipc_msg_payload_hws_suspend_cmdq
+ */
VPU_JSM_MSG_HWS_SUSPEND_CMDQ_DONE = 0x221a,
- /* Response to control command: Resume command queue */
+ /**
+ * Response to control command: Resume command queue
+ * @see vpu_ipc_msg_payload_hws_resume_cmdq
+ */
VPU_JSM_MSG_HWS_RESUME_CMDQ_RSP = 0x221b,
- /* Response to control command: Resume engine command response */
+ /**
+ * Response to control command: Resume engine command response
+ * @see vpu_ipc_msg_payload_hws_resume_engine
+ */
VPU_JSM_MSG_HWS_RESUME_ENGINE_DONE = 0x221c,
- /* Response to control command: Enable survivability/DCT mode */
+ /**
+ * Response to control command: Enable survivability/DCT mode
+ * This command has no payload
+ */
VPU_JSM_MSG_DCT_ENABLE_DONE = 0x221d,
- /* Response to control command: Disable survivability/DCT mode */
+ /**
+ * Response to control command: Disable survivability/DCT mode
+ * This command has no payload
+ */
VPU_JSM_MSG_DCT_DISABLE_DONE = 0x221e,
/**
* Response to state dump control command.
- * NOTE: Please introduce new ASYNC responses before this one. *
+ * This command has no payload.
+ * NOTE: Please introduce new ASYNC responses before this one.
*/
VPU_JSM_MSG_STATE_DUMP_RSP = 0x22FF,
@@ -653,57 +787,66 @@ enum vpu_ipc_msg_type {
enum vpu_ipc_msg_status { VPU_JSM_MSG_FREE, VPU_JSM_MSG_ALLOCATED };
-/*
- * Host <-> LRT IPC message payload definitions
+/**
+ * Engine reset request payload
+ * @see VPU_JSM_MSG_ENGINE_RESET
*/
struct vpu_ipc_msg_payload_engine_reset {
- /* Engine to be reset. */
+ /** Engine to be reset. */
u32 engine_idx;
- /* Reserved */
+ /** Reserved */
u32 reserved_0;
};
+/**
+ * Engine preemption request struct
+ * @see VPU_JSM_MSG_ENGINE_PREEMPT
+ */
struct vpu_ipc_msg_payload_engine_preempt {
- /* Engine to be preempted. */
+ /** Engine to be preempted. */
u32 engine_idx;
- /* ID of the preemption request. */
+ /** ID of the preemption request. */
u32 preempt_id;
};
-/*
- * @brief Register doorbell command structure.
+/**
+ * Register doorbell command structure.
* This structure supports doorbell registration for only OS scheduling.
* @see VPU_JSM_MSG_REGISTER_DB
*/
struct vpu_ipc_msg_payload_register_db {
- /* Index of the doorbell to register. */
+ /** Index of the doorbell to register. */
u32 db_idx;
- /* Reserved */
+ /** Reserved */
u32 reserved_0;
- /* Virtual address in Global GTT pointing to the start of job queue. */
+ /** Virtual address in Global GTT pointing to the start of job queue. */
u64 jobq_base;
- /* Size of the job queue in bytes. */
+ /** Size of the job queue in bytes. */
u32 jobq_size;
- /* Host sub-stream ID for the context assigned to the doorbell. */
+ /** Host sub-stream ID for the context assigned to the doorbell. */
u32 host_ssid;
};
/**
- * @brief Unregister doorbell command structure.
+ * Unregister doorbell command structure.
* Request structure to unregister a doorbell for both HW and OS scheduling.
* @see VPU_JSM_MSG_UNREGISTER_DB
*/
struct vpu_ipc_msg_payload_unregister_db {
- /* Index of the doorbell to unregister. */
+ /** Index of the doorbell to unregister. */
u32 db_idx;
- /* Reserved */
+ /** Reserved */
u32 reserved_0;
};
+/**
+ * Heartbeat request structure
+ * @see VPU_JSM_MSG_QUERY_ENGINE_HB
+ */
struct vpu_ipc_msg_payload_query_engine_hb {
- /* Engine to return heartbeat value. */
+ /** Engine to return heartbeat value. */
u32 engine_idx;
- /* Reserved */
+ /** Reserved */
u32 reserved_0;
};
@@ -723,10 +866,14 @@ struct vpu_ipc_msg_payload_power_level {
u32 reserved_0;
};
+/**
+ * Structure for requesting ssid release
+ * @see VPU_JSM_MSG_SSID_RELEASE
+ */
struct vpu_ipc_msg_payload_ssid_release {
- /* Host sub-stream ID for the context to be released. */
+ /** Host sub-stream ID for the context to be released. */
u32 host_ssid;
- /* Reserved */
+ /** Reserved */
u32 reserved_0;
};
@@ -752,7 +899,7 @@ struct vpu_jsm_metric_streamer_start {
u64 sampling_rate;
/**
* If > 0 the VPU will send a VPU_JSM_MSG_METRIC_STREAMER_NOTIFICATION message
- * after every @notify_sample_count samples is collected or dropped by the VPU.
+ * after every @ref notify_sample_count samples is collected or dropped by the VPU.
* If set to UINT_MAX the VPU will only generate a notification when the metric
* buffer is full. If set to 0 the VPU will never generate a notification.
*/
@@ -762,9 +909,9 @@ struct vpu_jsm_metric_streamer_start {
* Address and size of the buffer where the VPU will write metric data. The
* VPU writes all counters from enabled metric groups one after another. If
* there is no space left to write data at the next sample period the VPU
- * will switch to the next buffer (@see next_buffer_addr) and will optionally
- * send a notification to the host driver if @notify_sample_count is non-zero.
- * If @next_buffer_addr is NULL the VPU will stop collecting metric data.
+ * will switch to the next buffer (@ref next_buffer_addr) and will optionally
+ * send a notification to the host driver if @ref notify_sample_count is non-zero.
+ * If @ref next_buffer_addr is NULL the VPU will stop collecting metric data.
*/
u64 buffer_addr;
u64 buffer_size;
@@ -827,63 +974,80 @@ struct vpu_jsm_metric_streamer_update {
u64 next_buffer_size;
};
+/**
+ * Device -> host job completion message.
+ * @see VPU_JSM_MSG_JOB_DONE
+ */
struct vpu_ipc_msg_payload_job_done {
- /* Engine to which the job was submitted. */
+ /** Engine to which the job was submitted. */
u32 engine_idx;
- /* Index of the doorbell to which the job was submitted */
+ /** Index of the doorbell to which the job was submitted */
u32 db_idx;
- /* ID of the completed job */
+ /** ID of the completed job */
u32 job_id;
- /* Status of the completed job */
+ /** Status of the completed job */
u32 job_status;
- /* Host SSID */
+ /** Host SSID */
u32 host_ssid;
- /* Zero Padding */
+ /** Zero Padding */
u32 reserved_0;
- /* Command queue id */
+ /** Command queue id */
u64 cmdq_id;
};
-/*
+/**
* Notification message upon native fence signalling.
* @see VPU_JSM_MSG_NATIVE_FENCE_SIGNALLED
*/
struct vpu_ipc_msg_payload_native_fence_signalled {
- /* Engine ID. */
+ /** Engine ID. */
u32 engine_idx;
- /* Host SSID. */
+ /** Host SSID. */
u32 host_ssid;
- /* CMDQ ID */
+ /** CMDQ ID */
u64 cmdq_id;
- /* Fence object handle. */
+ /** Fence object handle. */
u64 fence_handle;
};
+/**
+ * vpu_ipc_msg_payload_engine_reset_done will contain an array of this structure
+ * which contains which queues caused reset if FW was able to detect any error.
+ * @see vpu_ipc_msg_payload_engine_reset_done
+ */
struct vpu_jsm_engine_reset_context {
- /* Host SSID */
+ /** Host SSID */
u32 host_ssid;
- /* Zero Padding */
+ /** Zero Padding */
u32 reserved_0;
- /* Command queue id */
+ /** Command queue id */
u64 cmdq_id;
- /* See VPU_ENGINE_RESET_CONTEXT_* defines */
+ /** See VPU_ENGINE_RESET_CONTEXT_* defines */
u64 flags;
};
+/**
+ * Engine reset response.
+ * @see VPU_JSM_MSG_ENGINE_RESET_DONE
+ */
struct vpu_ipc_msg_payload_engine_reset_done {
- /* Engine ordinal */
+ /** Engine ordinal */
u32 engine_idx;
- /* Number of impacted contexts */
+ /** Number of impacted contexts */
u32 num_impacted_contexts;
- /* Array of impacted command queue ids and their flags */
+ /** Array of impacted command queue ids and their flags */
struct vpu_jsm_engine_reset_context
impacted_contexts[VPU_MAX_ENGINE_RESET_IMPACTED_CONTEXTS];
};
+/**
+ * Preemption response struct
+ * @see VPU_JSM_MSG_ENGINE_PREEMPT_DONE
+ */
struct vpu_ipc_msg_payload_engine_preempt_done {
- /* Engine preempted. */
+ /** Engine preempted. */
u32 engine_idx;
- /* ID of the preemption request. */
+ /** ID of the preemption request. */
u32 preempt_id;
};
@@ -912,12 +1076,16 @@ struct vpu_ipc_msg_payload_unregister_db_done {
u32 reserved_0;
};
+/**
+ * Structure for heartbeat response
+ * @see VPU_JSM_MSG_QUERY_ENGINE_HB_DONE
+ */
struct vpu_ipc_msg_payload_query_engine_hb_done {
- /* Engine returning heartbeat value. */
+ /** Engine returning heartbeat value. */
u32 engine_idx;
- /* Reserved */
+ /** Reserved */
u32 reserved_0;
- /* Heartbeat value. */
+ /** Heartbeat value. */
u64 heartbeat;
};
@@ -937,7 +1105,10 @@ struct vpu_ipc_msg_payload_get_power_level_count_done {
u8 power_limit[16];
};
-/* HWS priority band setup request / response */
+/**
+ * HWS priority band setup request / response
+ * @see VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP
+ */
struct vpu_ipc_msg_payload_hws_priority_band_setup {
/*
* Grace period in 100ns units when preempting another priority band for
@@ -964,15 +1135,23 @@ struct vpu_ipc_msg_payload_hws_priority_band_setup {
* TDR timeout value in milliseconds. Default value of 0 meaning no timeout.
*/
u32 tdr_timeout;
+ /* Non-interactive queue timeout for no progress of heartbeat in milliseconds.
+ * Default value of 0 meaning no timeout.
+ */
+ u32 non_interactive_no_progress_timeout;
+ /*
+ * Non-interactive queue upper limit timeout value in milliseconds. Default
+ * value of 0 meaning no timeout.
+ */
+ u32 non_interactive_timeout;
};
-/*
+/**
* @brief HWS create command queue request.
* Host will create a command queue via this command.
* Note: Cmdq group is a handle of an object which
* may contain one or more command queues.
* @see VPU_JSM_MSG_CREATE_CMD_QUEUE
- * @see VPU_JSM_MSG_CREATE_CMD_QUEUE_RSP
*/
struct vpu_ipc_msg_payload_hws_create_cmdq {
/* Process id */
@@ -993,66 +1172,73 @@ struct vpu_ipc_msg_payload_hws_create_cmdq {
u32 reserved_0;
};
-/*
- * @brief HWS create command queue response.
- * @see VPU_JSM_MSG_CREATE_CMD_QUEUE
+/**
+ * HWS create command queue response.
* @see VPU_JSM_MSG_CREATE_CMD_QUEUE_RSP
*/
struct vpu_ipc_msg_payload_hws_create_cmdq_rsp {
- /* Process id */
+ /** Process id */
u64 process_id;
- /* Host SSID */
+ /** Host SSID */
u32 host_ssid;
- /* Engine for which queue is being created */
+ /** Engine for which queue is being created */
u32 engine_idx;
- /* Command queue group */
+ /** Command queue group */
u64 cmdq_group;
- /* Command queue id */
+ /** Command queue id */
u64 cmdq_id;
};
-/* HWS destroy command queue request / response */
+/**
+ * HWS destroy command queue request / response
+ * @see VPU_JSM_MSG_DESTROY_CMD_QUEUE
+ * @see VPU_JSM_MSG_DESTROY_CMD_QUEUE_RSP
+ */
struct vpu_ipc_msg_payload_hws_destroy_cmdq {
- /* Host SSID */
+ /** Host SSID */
u32 host_ssid;
- /* Zero Padding */
+ /** Zero Padding */
u32 reserved;
- /* Command queue id */
+ /** Command queue id */
u64 cmdq_id;
};
-/* HWS set context scheduling properties request / response */
+/**
+ * HWS set context scheduling properties request / response
+ * @see VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES
+ * @see VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES_RSP
+ */
struct vpu_ipc_msg_payload_hws_set_context_sched_properties {
- /* Host SSID */
+ /** Host SSID */
u32 host_ssid;
- /* Zero Padding */
+ /** Zero Padding */
u32 reserved_0;
- /* Command queue id */
+ /** Command queue id */
u64 cmdq_id;
- /*
+ /**
* Priority band to assign to work of this context.
* Available priority bands: @see enum vpu_job_scheduling_priority_band
*/
u32 priority_band;
- /* Inside realtime band assigns a further priority */
+ /** Inside realtime band assigns a further priority */
u32 realtime_priority_level;
- /* Priority relative to other contexts in the same process */
+ /** Priority relative to other contexts in the same process */
s32 in_process_priority;
- /* Zero padding / Reserved */
+ /** Zero padding / Reserved */
u32 reserved_1;
- /*
+ /**
* Context quantum relative to other contexts of same priority in the same process
* Minimum value supported by NPU is 1ms (10000 in 100ns units).
*/
u64 context_quantum;
- /* Grace period when preempting context of the same priority within the same process */
+ /** Grace period when preempting context of the same priority within the same process */
u64 grace_period_same_priority;
- /* Grace period when preempting context of a lower priority within the same process */
+ /** Grace period when preempting context of a lower priority within the same process */
u64 grace_period_lower_priority;
};
-/*
- * @brief Register doorbell command structure.
+/**
+ * Register doorbell command structure.
* This structure supports doorbell registration for both HW and OS scheduling.
* Note: Queue base and size are added here so that the same structure can be used for
* OS scheduling and HW scheduling. For OS scheduling, cmdq_id will be ignored
@@ -1061,27 +1247,27 @@ struct vpu_ipc_msg_payload_hws_set_context_sched_properties {
* @see VPU_JSM_MSG_HWS_REGISTER_DB
*/
struct vpu_jsm_hws_register_db {
- /* Index of the doorbell to register. */
+ /** Index of the doorbell to register. */
u32 db_id;
- /* Host sub-stream ID for the context assigned to the doorbell. */
+ /** Host sub-stream ID for the context assigned to the doorbell. */
u32 host_ssid;
- /* ID of the command queue associated with the doorbell. */
+ /** ID of the command queue associated with the doorbell. */
u64 cmdq_id;
- /* Virtual address pointing to the start of command queue. */
+ /** Virtual address pointing to the start of command queue. */
u64 cmdq_base;
- /* Size of the command queue in bytes. */
+ /** Size of the command queue in bytes. */
u64 cmdq_size;
};
-/*
- * @brief Structure to set another buffer to be used for scheduling-related logging.
+/**
+ * Structure to set another buffer to be used for scheduling-related logging.
* The size of the logging buffer and the number of entries is defined as part of the
* buffer itself as described next.
* The log buffer received from the host is made up of;
- * - header: 32 bytes in size, as shown in 'struct vpu_hws_log_buffer_header'.
+ * - header: 32 bytes in size, as shown in @ref vpu_hws_log_buffer_header.
* The header contains the number of log entries in the buffer.
* - log entry: 0 to n-1, each log entry is 32 bytes in size, as shown in
- * 'struct vpu_hws_log_buffer_entry'.
+ * @ref vpu_hws_log_buffer_entry.
* The entry contains the VPU timestamp, operation type and data.
* The host should provide the notify index value of log buffer to VPU. This is a
* value defined within the log buffer and when written to will generate the
@@ -1095,30 +1281,30 @@ struct vpu_jsm_hws_register_db {
* @see VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION
*/
struct vpu_ipc_msg_payload_hws_set_scheduling_log {
- /* Engine ordinal */
+ /** Engine ordinal */
u32 engine_idx;
- /* Host SSID */
+ /** Host SSID */
u32 host_ssid;
- /*
+ /**
* VPU log buffer virtual address.
* Set to 0 to disable logging for this engine.
*/
u64 vpu_log_buffer_va;
- /*
+ /**
* Notify index of log buffer. VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION
* is generated when an event log is written to this index.
*/
u64 notify_index;
- /*
+ /**
* Field is now deprecated, will be removed when KMD is updated to support removal
*/
u32 enable_extra_events;
- /* Zero Padding */
+ /** Zero Padding */
u32 reserved_0;
};
-/*
- * @brief The scheduling log notification is generated by VPU when it writes
+/**
+ * The scheduling log notification is generated by VPU when it writes
* an event into the log buffer at the notify_index. VPU notifies host with
* VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION. This is an asynchronous
* message from VPU to host.
@@ -1126,14 +1312,14 @@ struct vpu_ipc_msg_payload_hws_set_scheduling_log {
* @see VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG
*/
struct vpu_ipc_msg_payload_hws_scheduling_log_notification {
- /* Engine ordinal */
+ /** Engine ordinal */
u32 engine_idx;
- /* Zero Padding */
+ /** Zero Padding */
u32 reserved_0;
};
-/*
- * @brief HWS suspend command queue request and done structure.
+/**
+ * HWS suspend command queue request and done structure.
* Host will request the suspend of contexts and VPU will;
* - Suspend all work on this context
* - Preempt any running work
@@ -1152,21 +1338,21 @@ struct vpu_ipc_msg_payload_hws_scheduling_log_notification {
* @see VPU_JSM_MSG_HWS_SUSPEND_CMDQ_DONE
*/
struct vpu_ipc_msg_payload_hws_suspend_cmdq {
- /* Host SSID */
+ /** Host SSID */
u32 host_ssid;
- /* Zero Padding */
+ /** Zero Padding */
u32 reserved_0;
- /* Command queue id */
+ /** Command queue id */
u64 cmdq_id;
- /*
+ /**
* Suspend fence value - reported by the VPU suspend context
* completed once suspend is complete.
*/
u64 suspend_fence_value;
};
-/*
- * @brief HWS Resume command queue request / response structure.
+/**
+ * HWS Resume command queue request / response structure.
* Host will request the resume of a context;
* - VPU will resume all work on this context
* - Scheduler will allow this context to be scheduled
@@ -1174,25 +1360,25 @@ struct vpu_ipc_msg_payload_hws_suspend_cmdq {
* @see VPU_JSM_MSG_HWS_RESUME_CMDQ_RSP
*/
struct vpu_ipc_msg_payload_hws_resume_cmdq {
- /* Host SSID */
+ /** Host SSID */
u32 host_ssid;
- /* Zero Padding */
+ /** Zero Padding */
u32 reserved_0;
- /* Command queue id */
+ /** Command queue id */
u64 cmdq_id;
};
-/*
- * @brief HWS Resume engine request / response structure.
- * After a HWS engine reset, all scheduling is stopped on VPU until a engine resume.
+/**
+ * HWS Resume engine request / response structure.
+ * After a HWS engine reset, all scheduling is stopped on VPU until an engine resume.
* Host shall send this command to resume scheduling of any valid queue.
- * @see VPU_JSM_MSG_HWS_RESUME_ENGINE
+ * @see VPU_JSM_MSG_HWS_ENGINE_RESUME
* @see VPU_JSM_MSG_HWS_RESUME_ENGINE_DONE
*/
struct vpu_ipc_msg_payload_hws_resume_engine {
- /* Engine to be resumed */
+ /** Engine to be resumed */
u32 engine_idx;
- /* Reserved */
+ /** Reserved */
u32 reserved_0;
};
@@ -1326,7 +1512,7 @@ struct vpu_jsm_metric_streamer_done {
/**
* Metric group description placed in the metric buffer after successful completion
* of the VPU_JSM_MSG_METRIC_STREAMER_INFO command. This is followed by one or more
- * @vpu_jsm_metric_counter_descriptor records.
+ * @ref vpu_jsm_metric_counter_descriptor records.
* @see VPU_JSM_MSG_METRIC_STREAMER_INFO
*/
struct vpu_jsm_metric_group_descriptor {
@@ -1413,29 +1599,24 @@ struct vpu_jsm_metric_counter_descriptor {
};
/**
- * Payload for VPU_JSM_MSG_DYNDBG_CONTROL requests.
+ * Payload for @ref VPU_JSM_MSG_DYNDBG_CONTROL requests.
*
- * VPU_JSM_MSG_DYNDBG_CONTROL are used to control the VPU FW Dynamic Debug
- * feature, which allows developers to selectively enable / disable MVLOG_DEBUG
- * messages. This is equivalent to the Dynamic Debug functionality provided by
- * Linux
- * (https://www.kernel.org/doc/html/latest/admin-guide/dynamic-debug-howto.html)
- * The host can control Dynamic Debug behavior by sending dyndbg commands, which
- * have the same syntax as Linux
- * dyndbg commands.
+ * VPU_JSM_MSG_DYNDBG_CONTROL requests are used to control the VPU FW dynamic debug
+ * feature, which allows developers to selectively enable/disable code to obtain
+ * additional FW information. This is equivalent to the dynamic debug functionality
+ * provided by Linux. The host can control dynamic debug behavior by sending dyndbg
+ * commands, using the same syntax as for Linux dynamic debug commands.
*
- * NOTE: in order for MVLOG_DEBUG messages to be actually printed, the host
- * still has to set the logging level to MVLOG_DEBUG, using the
- * VPU_JSM_MSG_TRACE_SET_CONFIG command.
+ * @see https://www.kernel.org/doc/html/latest/admin-guide/dynamic-debug-howto.html.
*
- * The host can see the current dynamic debug configuration by executing a
- * special 'show' command. The dyndbg configuration will be printed to the
- * configured logging destination using MVLOG_INFO logging level.
+ * NOTE:
+ * As the dynamic debug feature uses MVLOG messages to provide information, the host
+ * must first set the logging level to MVLOG_DEBUG, using the @ref VPU_JSM_MSG_TRACE_SET_CONFIG
+ * command.
*/
struct vpu_ipc_msg_payload_dyndbg_control {
/**
- * Dyndbg command (same format as Linux dyndbg); must be a NULL-terminated
- * string.
+ * Dyndbg command to be executed.
*/
char dyndbg_cmd[VPU_DYNDBG_CMD_MAX_LEN];
};
@@ -1456,7 +1637,7 @@ struct vpu_ipc_msg_payload_pwr_d0i3_enter {
};
/**
- * Payload for VPU_JSM_MSG_DCT_ENABLE message.
+ * Payload for @ref VPU_JSM_MSG_DCT_ENABLE message.
*
* Default values for DCT active/inactive times are 5.3ms and 30ms respectively,
* corresponding to a 85% duty cycle. This payload allows the host to tune these
@@ -1513,28 +1694,28 @@ union vpu_ipc_msg_payload {
struct vpu_ipc_msg_payload_pwr_dct_control pwr_dct_control;
};
-/*
- * Host <-> LRT IPC message base structure.
+/**
+ * Host <-> NPU IPC message base structure.
*
* NOTE: All instances of this object must be aligned on a 64B boundary
* to allow proper handling of VPU cache operations.
*/
struct vpu_jsm_msg {
- /* Reserved */
+ /** Reserved */
u64 reserved_0;
- /* Message type, see vpu_ipc_msg_type enum. */
+ /** Message type, see @ref vpu_ipc_msg_type. */
u32 type;
- /* Buffer status, see vpu_ipc_msg_status enum. */
+ /** Buffer status, see @ref vpu_ipc_msg_status. */
u32 status;
- /*
+ /**
* Request ID, provided by the host in a request message and passed
* back by VPU in the response message.
*/
u32 request_id;
- /* Request return code set by the VPU, see VPU_JSM_STATUS_* defines. */
+ /** Request return code set by the VPU, see VPU_JSM_STATUS_* defines. */
u32 result;
u64 reserved_1;
- /* Message payload depending on message type, see vpu_ipc_msg_payload union. */
+ /** Message payload depending on message type, see vpu_ipc_msg_payload union. */
union vpu_ipc_msg_payload payload;
};
diff --git a/drivers/accel/qaic/Kconfig b/drivers/accel/qaic/Kconfig
index 5e405a19c157..116e42d152ca 100644
--- a/drivers/accel/qaic/Kconfig
+++ b/drivers/accel/qaic/Kconfig
@@ -9,6 +9,7 @@ config DRM_ACCEL_QAIC
depends on PCI && HAS_IOMEM
depends on MHI_BUS
select CRC32
+ select WANT_DEV_COREDUMP
help
Enables driver for Qualcomm's Cloud AI accelerator PCIe cards that are
designed to accelerate Deep Learning inference workloads.
diff --git a/drivers/accel/qaic/Makefile b/drivers/accel/qaic/Makefile
index 35e883515629..71f727b74da3 100644
--- a/drivers/accel/qaic/Makefile
+++ b/drivers/accel/qaic/Makefile
@@ -10,6 +10,9 @@ qaic-y := \
qaic_control.o \
qaic_data.o \
qaic_drv.o \
+ qaic_ras.o \
+ qaic_ssr.o \
+ qaic_sysfs.o \
qaic_timesync.o \
sahara.o
diff --git a/drivers/accel/qaic/qaic.h b/drivers/accel/qaic/qaic.h
index 0dbb8e32e4b9..fa7a8155658c 100644
--- a/drivers/accel/qaic/qaic.h
+++ b/drivers/accel/qaic/qaic.h
@@ -21,6 +21,7 @@
#define QAIC_DBC_BASE SZ_128K
#define QAIC_DBC_SIZE SZ_4K
+#define QAIC_SSR_DBC_SENTINEL U32_MAX /* No ongoing SSR sentinel */
#define QAIC_NO_PARTITION -1
@@ -47,6 +48,22 @@ enum __packed dev_states {
QAIC_ONLINE,
};
+enum dbc_states {
+ /* DBC is free and can be activated */
+ DBC_STATE_IDLE,
+ /* DBC is activated and a workload is running on device */
+ DBC_STATE_ASSIGNED,
+ /* Sub-system associated with this workload has crashed and it will shutdown soon */
+ DBC_STATE_BEFORE_SHUTDOWN,
+ /* Sub-system associated with this workload has crashed and it has shutdown */
+ DBC_STATE_AFTER_SHUTDOWN,
+ /* Sub-system associated with this workload is shutdown and it will be powered up soon */
+ DBC_STATE_BEFORE_POWER_UP,
+ /* Sub-system associated with this workload is now powered up */
+ DBC_STATE_AFTER_POWER_UP,
+ DBC_STATE_MAX,
+};
+
extern bool datapath_polling;
struct qaic_user {
@@ -97,6 +114,8 @@ struct dma_bridge_chan {
* response queue's head and tail pointer of this DBC.
*/
void __iomem *dbc_base;
+ /* Synchronizes access to Request queue's head and tail pointer */
+ struct mutex req_lock;
/* Head of list where each node is a memory handle queued in request queue */
struct list_head xfer_list;
/* Synchronizes DBC readers during cleanup */
@@ -112,6 +131,8 @@ struct dma_bridge_chan {
unsigned int irq;
/* Polling work item to simulate interrupts */
struct work_struct poll_work;
+ /* Represents various states of this DBC from enum dbc_states */
+ unsigned int state;
};
struct qaic_device {
@@ -159,6 +180,8 @@ struct qaic_device {
struct mhi_device *qts_ch;
/* Work queue for tasks related to MHI "QAIC_TIMESYNC" channel */
struct workqueue_struct *qts_wq;
+ /* MHI "QAIC_TIMESYNC_PERIODIC" channel device */
+ struct mhi_device *mqts_ch;
/* Head of list of page allocated by MHI bootlog device */
struct list_head bootlog;
/* MHI bootlog channel device */
@@ -167,6 +190,22 @@ struct qaic_device {
struct workqueue_struct *bootlog_wq;
/* Synchronizes access of pages in MHI bootlog device */
struct mutex bootlog_mutex;
+ /* MHI RAS channel device */
+ struct mhi_device *ras_ch;
+ /* Correctable error count */
+ unsigned int ce_count;
+ /* Un-correctable error count */
+ unsigned int ue_count;
+ /* Un-correctable non-fatal error count */
+ unsigned int ue_nf_count;
+ /* MHI SSR channel device */
+ struct mhi_device *ssr_ch;
+ /* Work queue for tasks related to MHI SSR device */
+ struct workqueue_struct *ssr_wq;
+ /* Buffer to collect SSR crashdump via SSR MHI channel */
+ void *ssr_mhi_buf;
+ /* DBC which is under SSR. Sentinel U32_MAX would mean that no SSR in progress */
+ u32 ssr_dbc;
};
struct qaic_drm_device {
@@ -185,6 +224,8 @@ struct qaic_drm_device {
struct list_head users;
/* Synchronizes access to users list */
struct mutex users_mutex;
+ /* Pointer to array of DBC sysfs attributes */
+ void *sysfs_attrs;
};
struct qaic_bo {
@@ -213,8 +254,6 @@ struct qaic_bo {
bool sliced;
/* Request ID of this BO if it is queued for execution */
u16 req_id;
- /* Handle assigned to this BO */
- u32 handle;
/* Wait on this for completion of DMA transfer of this BO */
struct completion xfer_done;
/*
@@ -309,6 +348,13 @@ int qaic_partial_execute_bo_ioctl(struct drm_device *dev, void *data, struct drm
int qaic_wait_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
int qaic_perf_stats_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
int qaic_detach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
-void irq_polling_work(struct work_struct *work);
+void qaic_irq_polling_work(struct work_struct *work);
+void qaic_dbc_enter_ssr(struct qaic_device *qdev, u32 dbc_id);
+void qaic_dbc_exit_ssr(struct qaic_device *qdev);
+
+/* qaic_sysfs.c */
+int qaic_sysfs_init(struct qaic_drm_device *qddev);
+void qaic_sysfs_remove(struct qaic_drm_device *qddev);
+void set_dbc_state(struct qaic_device *qdev, u32 dbc_id, unsigned int state);
#endif /* _QAIC_H_ */
diff --git a/drivers/accel/qaic/qaic_control.c b/drivers/accel/qaic/qaic_control.c
index d8bdab69f800..428d8f65bff3 100644
--- a/drivers/accel/qaic/qaic_control.c
+++ b/drivers/accel/qaic/qaic_control.c
@@ -17,6 +17,7 @@
#include <linux/overflow.h>
#include <linux/pci.h>
#include <linux/scatterlist.h>
+#include <linux/sched/signal.h>
#include <linux/types.h>
#include <linux/uaccess.h>
#include <linux/workqueue.h>
@@ -30,7 +31,7 @@
#define MANAGE_MAGIC_NUMBER ((__force __le32)0x43494151) /* "QAIC" in little endian */
#define QAIC_DBC_Q_GAP SZ_256
#define QAIC_DBC_Q_BUF_ALIGN SZ_4K
-#define QAIC_MANAGE_EXT_MSG_LENGTH SZ_64K /* Max DMA message length */
+#define QAIC_MANAGE_WIRE_MSG_LENGTH SZ_64K /* Max DMA message length */
#define QAIC_WRAPPER_MAX_SIZE SZ_4K
#define QAIC_MHI_RETRY_WAIT_MS 100
#define QAIC_MHI_RETRY_MAX 20
@@ -309,6 +310,7 @@ static void save_dbc_buf(struct qaic_device *qdev, struct ioctl_resources *resou
enable_dbc(qdev, dbc_id, usr);
qdev->dbc[dbc_id].in_use = true;
resources->buf = NULL;
+ set_dbc_state(qdev, dbc_id, DBC_STATE_ASSIGNED);
}
}
@@ -367,7 +369,7 @@ static int encode_passthrough(struct qaic_device *qdev, void *trans, struct wrap
if (in_trans->hdr.len % 8 != 0)
return -EINVAL;
- if (size_add(msg_hdr_len, in_trans->hdr.len) > QAIC_MANAGE_EXT_MSG_LENGTH)
+ if (size_add(msg_hdr_len, in_trans->hdr.len) > QAIC_MANAGE_WIRE_MSG_LENGTH)
return -ENOSPC;
trans_wrapper = add_wrapper(wrappers,
@@ -407,7 +409,7 @@ static int find_and_map_user_pages(struct qaic_device *qdev,
return -EINVAL;
remaining = in_trans->size - resources->xferred_dma_size;
if (remaining == 0)
- return 0;
+ return -EINVAL;
if (check_add_overflow(xfer_start_addr, remaining, &end))
return -EINVAL;
@@ -495,7 +497,7 @@ static int encode_addr_size_pairs(struct dma_xfer *xfer, struct wrapper_list *wr
nents = sgt->nents;
nents_dma = nents;
- *size = QAIC_MANAGE_EXT_MSG_LENGTH - msg_hdr_len - sizeof(**out_trans);
+ *size = QAIC_MANAGE_WIRE_MSG_LENGTH - msg_hdr_len - sizeof(**out_trans);
for_each_sgtable_dma_sg(sgt, sg, i) {
*size -= sizeof(*asp);
/* Save 1K for possible follow-up transactions. */
@@ -576,7 +578,7 @@ static int encode_dma(struct qaic_device *qdev, void *trans, struct wrapper_list
/* There should be enough space to hold at least one ASP entry. */
if (size_add(msg_hdr_len, sizeof(*out_trans) + sizeof(struct wire_addr_size_pair)) >
- QAIC_MANAGE_EXT_MSG_LENGTH)
+ QAIC_MANAGE_WIRE_MSG_LENGTH)
return -ENOMEM;
xfer = kmalloc(sizeof(*xfer), GFP_KERNEL);
@@ -645,7 +647,7 @@ static int encode_activate(struct qaic_device *qdev, void *trans, struct wrapper
msg = &wrapper->msg;
msg_hdr_len = le32_to_cpu(msg->hdr.len);
- if (size_add(msg_hdr_len, sizeof(*out_trans)) > QAIC_MANAGE_MAX_MSG_LENGTH)
+ if (size_add(msg_hdr_len, sizeof(*out_trans)) > QAIC_MANAGE_WIRE_MSG_LENGTH)
return -ENOSPC;
if (!in_trans->queue_size)
@@ -655,8 +657,9 @@ static int encode_activate(struct qaic_device *qdev, void *trans, struct wrapper
return -EINVAL;
nelem = in_trans->queue_size;
- size = (get_dbc_req_elem_size() + get_dbc_rsp_elem_size()) * nelem;
- if (size / nelem != get_dbc_req_elem_size() + get_dbc_rsp_elem_size())
+ if (check_mul_overflow((u32)(get_dbc_req_elem_size() + get_dbc_rsp_elem_size()),
+ nelem,
+ &size))
return -EINVAL;
if (size + QAIC_DBC_Q_GAP + QAIC_DBC_Q_BUF_ALIGN < size)
@@ -729,7 +732,7 @@ static int encode_status(struct qaic_device *qdev, void *trans, struct wrapper_l
msg = &wrapper->msg;
msg_hdr_len = le32_to_cpu(msg->hdr.len);
- if (size_add(msg_hdr_len, in_trans->hdr.len) > QAIC_MANAGE_MAX_MSG_LENGTH)
+ if (size_add(msg_hdr_len, in_trans->hdr.len) > QAIC_MANAGE_WIRE_MSG_LENGTH)
return -ENOSPC;
trans_wrapper = add_wrapper(wrappers, sizeof(*trans_wrapper));
@@ -810,7 +813,7 @@ static int encode_message(struct qaic_device *qdev, struct manage_msg *user_msg,
}
if (ret)
- break;
+ goto out;
}
if (user_len != user_msg->len)
@@ -921,6 +924,7 @@ static int decode_deactivate(struct qaic_device *qdev, void *trans, u32 *msg_len
}
release_dbc(qdev, dbc_id);
+ set_dbc_state(qdev, dbc_id, DBC_STATE_IDLE);
*msg_len += sizeof(*in_trans);
return 0;
@@ -1052,7 +1056,7 @@ static void *msg_xfer(struct qaic_device *qdev, struct wrapper_list *wrappers, u
init_completion(&elem.xfer_done);
if (likely(!qdev->cntl_lost_buf)) {
/*
- * The max size of request to device is QAIC_MANAGE_EXT_MSG_LENGTH.
+ * The max size of request to device is QAIC_MANAGE_WIRE_MSG_LENGTH.
* The max size of response from device is QAIC_MANAGE_MAX_MSG_LENGTH.
*/
out_buf = kmalloc(QAIC_MANAGE_MAX_MSG_LENGTH, GFP_KERNEL);
@@ -1079,7 +1083,6 @@ static void *msg_xfer(struct qaic_device *qdev, struct wrapper_list *wrappers, u
list_for_each_entry(w, &wrappers->list, list) {
kref_get(&w->ref_count);
- retry_count = 0;
ret = mhi_queue_buf(qdev->cntl_ch, DMA_TO_DEVICE, &w->msg, w->len,
list_is_last(&w->list, &wrappers->list) ? MHI_EOT : MHI_CHAIN);
if (ret) {
diff --git a/drivers/accel/qaic/qaic_data.c b/drivers/accel/qaic/qaic_data.c
index 1bce1af7c72c..60cb4d65d48e 100644
--- a/drivers/accel/qaic/qaic_data.c
+++ b/drivers/accel/qaic/qaic_data.c
@@ -18,6 +18,7 @@
#include <linux/scatterlist.h>
#include <linux/spinlock.h>
#include <linux/srcu.h>
+#include <linux/string.h>
#include <linux/types.h>
#include <linux/uaccess.h>
#include <linux/wait.h>
@@ -165,7 +166,7 @@ static void free_slice(struct kref *kref)
drm_gem_object_put(&slice->bo->base);
sg_free_table(slice->sgt);
kfree(slice->sgt);
- kfree(slice->reqs);
+ kvfree(slice->reqs);
kfree(slice);
}
@@ -404,7 +405,7 @@ static int qaic_map_one_slice(struct qaic_device *qdev, struct qaic_bo *bo,
goto free_sgt;
}
- slice->reqs = kcalloc(sgt->nents, sizeof(*slice->reqs), GFP_KERNEL);
+ slice->reqs = kvcalloc(sgt->nents, sizeof(*slice->reqs), GFP_KERNEL);
if (!slice->reqs) {
ret = -ENOMEM;
goto free_slice;
@@ -430,7 +431,7 @@ static int qaic_map_one_slice(struct qaic_device *qdev, struct qaic_bo *bo,
return 0;
free_req:
- kfree(slice->reqs);
+ kvfree(slice->reqs);
free_slice:
kfree(slice);
free_sgt:
@@ -643,8 +644,36 @@ static void qaic_free_object(struct drm_gem_object *obj)
kfree(bo);
}
+static struct sg_table *qaic_get_sg_table(struct drm_gem_object *obj)
+{
+ struct qaic_bo *bo = to_qaic_bo(obj);
+ struct scatterlist *sg, *sg_in;
+ struct sg_table *sgt, *sgt_in;
+ int i;
+
+ sgt_in = bo->sgt;
+
+ sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt)
+ return ERR_PTR(-ENOMEM);
+
+ if (sg_alloc_table(sgt, sgt_in->orig_nents, GFP_KERNEL)) {
+ kfree(sgt);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ sg = sgt->sgl;
+ for_each_sgtable_sg(sgt_in, sg_in, i) {
+ memcpy(sg, sg_in, sizeof(*sg));
+ sg = sg_next(sg);
+ }
+
+ return sgt;
+}
+
static const struct drm_gem_object_funcs qaic_gem_funcs = {
.free = qaic_free_object,
+ .get_sg_table = qaic_get_sg_table,
.print_info = qaic_gem_print_info,
.mmap = qaic_gem_object_mmap,
.vm_ops = &drm_vm_ops,
@@ -731,7 +760,6 @@ int qaic_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
if (ret)
goto free_bo;
- bo->handle = args->handle;
drm_gem_object_put(obj);
srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
@@ -954,8 +982,9 @@ int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi
if (args->hdr.count == 0)
return -EINVAL;
- arg_size = args->hdr.count * sizeof(*slice_ent);
- if (arg_size / args->hdr.count != sizeof(*slice_ent))
+ if (check_mul_overflow((unsigned long)args->hdr.count,
+ (unsigned long)sizeof(*slice_ent),
+ &arg_size))
return -EINVAL;
if (!(args->hdr.dir == DMA_TO_DEVICE || args->hdr.dir == DMA_FROM_DEVICE))
@@ -985,18 +1014,12 @@ int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi
user_data = u64_to_user_ptr(args->data);
- slice_ent = kzalloc(arg_size, GFP_KERNEL);
- if (!slice_ent) {
- ret = -EINVAL;
+ slice_ent = memdup_user(user_data, arg_size);
+ if (IS_ERR(slice_ent)) {
+ ret = PTR_ERR(slice_ent);
goto unlock_dev_srcu;
}
- ret = copy_from_user(slice_ent, user_data, arg_size);
- if (ret) {
- ret = -EFAULT;
- goto free_slice_ent;
- }
-
obj = drm_gem_object_lookup(file_priv, args->hdr.handle);
if (!obj) {
ret = -ENOENT;
@@ -1024,6 +1047,11 @@ int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi
goto unlock_ch_srcu;
}
+ if (dbc->id == qdev->ssr_dbc) {
+ ret = -EPIPE;
+ goto unlock_ch_srcu;
+ }
+
ret = qaic_prepare_bo(qdev, bo, &args->hdr);
if (ret)
goto unlock_ch_srcu;
@@ -1301,8 +1329,6 @@ static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct dr
int usr_rcu_id, qdev_rcu_id;
struct qaic_device *qdev;
struct qaic_user *usr;
- u8 __user *user_data;
- unsigned long n;
u64 received_ts;
u32 queue_level;
u64 submit_ts;
@@ -1315,20 +1341,12 @@ static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct dr
received_ts = ktime_get_ns();
size = is_partial ? sizeof(struct qaic_partial_execute_entry) : sizeof(*exec);
- n = (unsigned long)size * args->hdr.count;
- if (args->hdr.count == 0 || n / args->hdr.count != size)
+ if (args->hdr.count == 0)
return -EINVAL;
- user_data = u64_to_user_ptr(args->data);
-
- exec = kcalloc(args->hdr.count, size, GFP_KERNEL);
- if (!exec)
- return -ENOMEM;
-
- if (copy_from_user(exec, user_data, n)) {
- ret = -EFAULT;
- goto free_exec;
- }
+ exec = memdup_array_user(u64_to_user_ptr(args->data), args->hdr.count, size);
+ if (IS_ERR(exec))
+ return PTR_ERR(exec);
usr = file_priv->driver_priv;
usr_rcu_id = srcu_read_lock(&usr->qddev_lock);
@@ -1357,13 +1375,22 @@ static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct dr
goto release_ch_rcu;
}
+ if (dbc->id == qdev->ssr_dbc) {
+ ret = -EPIPE;
+ goto release_ch_rcu;
+ }
+
+ ret = mutex_lock_interruptible(&dbc->req_lock);
+ if (ret)
+ goto release_ch_rcu;
+
head = readl(dbc->dbc_base + REQHP_OFF);
tail = readl(dbc->dbc_base + REQTP_OFF);
if (head == U32_MAX || tail == U32_MAX) {
/* PCI link error */
ret = -ENODEV;
- goto release_ch_rcu;
+ goto unlock_req_lock;
}
queue_level = head <= tail ? tail - head : dbc->nelem - (head - tail);
@@ -1371,11 +1398,12 @@ static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct dr
ret = send_bo_list_to_device(qdev, file_priv, exec, args->hdr.count, is_partial, dbc,
head, &tail);
if (ret)
- goto release_ch_rcu;
+ goto unlock_req_lock;
/* Finalize commit to hardware */
submit_ts = ktime_get_ns();
writel(tail, dbc->dbc_base + REQTP_OFF);
+ mutex_unlock(&dbc->req_lock);
update_profiling_data(file_priv, exec, args->hdr.count, is_partial, received_ts,
submit_ts, queue_level);
@@ -1383,13 +1411,15 @@ static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct dr
if (datapath_polling)
schedule_work(&dbc->poll_work);
+unlock_req_lock:
+ if (ret)
+ mutex_unlock(&dbc->req_lock);
release_ch_rcu:
srcu_read_unlock(&dbc->ch_lock, rcu_id);
unlock_dev_srcu:
srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
unlock_usr_srcu:
srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
-free_exec:
kfree(exec);
return ret;
}
@@ -1484,7 +1514,7 @@ irqreturn_t dbc_irq_handler(int irq, void *data)
return IRQ_WAKE_THREAD;
}
-void irq_polling_work(struct work_struct *work)
+void qaic_irq_polling_work(struct work_struct *work)
{
struct dma_bridge_chan *dbc = container_of(work, struct dma_bridge_chan, poll_work);
unsigned long flags;
@@ -1702,6 +1732,11 @@ int qaic_wait_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file
goto unlock_ch_srcu;
}
+ if (dbc->id == qdev->ssr_dbc) {
+ ret = -EPIPE;
+ goto unlock_ch_srcu;
+ }
+
obj = drm_gem_object_lookup(file_priv, args->handle);
if (!obj) {
ret = -ENOENT;
@@ -1722,6 +1757,9 @@ int qaic_wait_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file
if (!dbc->usr)
ret = -EPERM;
+ if (dbc->id == qdev->ssr_dbc)
+ ret = -EPIPE;
+
put_obj:
drm_gem_object_put(obj);
unlock_ch_srcu:
@@ -1742,7 +1780,8 @@ int qaic_perf_stats_bo_ioctl(struct drm_device *dev, void *data, struct drm_file
struct qaic_device *qdev;
struct qaic_user *usr;
struct qaic_bo *bo;
- int ret, i;
+ int ret = 0;
+ int i;
usr = file_priv->driver_priv;
usr_rcu_id = srcu_read_lock(&usr->qddev_lock);
@@ -1763,18 +1802,12 @@ int qaic_perf_stats_bo_ioctl(struct drm_device *dev, void *data, struct drm_file
goto unlock_dev_srcu;
}
- ent = kcalloc(args->hdr.count, sizeof(*ent), GFP_KERNEL);
- if (!ent) {
- ret = -EINVAL;
+ ent = memdup_array_user(u64_to_user_ptr(args->data), args->hdr.count, sizeof(*ent));
+ if (IS_ERR(ent)) {
+ ret = PTR_ERR(ent);
goto unlock_dev_srcu;
}
- ret = copy_from_user(ent, u64_to_user_ptr(args->data), args->hdr.count * sizeof(*ent));
- if (ret) {
- ret = -EFAULT;
- goto free_ent;
- }
-
for (i = 0; i < args->hdr.count; i++) {
obj = drm_gem_object_lookup(file_priv, ent[i].handle);
if (!obj) {
@@ -1782,6 +1815,16 @@ int qaic_perf_stats_bo_ioctl(struct drm_device *dev, void *data, struct drm_file
goto free_ent;
}
bo = to_qaic_bo(obj);
+ if (!bo->sliced) {
+ drm_gem_object_put(obj);
+ ret = -EINVAL;
+ goto free_ent;
+ }
+ if (bo->dbc->id != args->hdr.dbc_id) {
+ drm_gem_object_put(obj);
+ ret = -EINVAL;
+ goto free_ent;
+ }
/*
* perf stats ioctl is called before wait ioctl is complete then
* the latency information is invalid.
@@ -1920,6 +1963,17 @@ static void empty_xfer_list(struct qaic_device *qdev, struct dma_bridge_chan *db
spin_unlock_irqrestore(&dbc->xfer_lock, flags);
}
+static void sync_empty_xfer_list(struct qaic_device *qdev, struct dma_bridge_chan *dbc)
+{
+ empty_xfer_list(qdev, dbc);
+ synchronize_srcu(&dbc->ch_lock);
+ /*
+ * Threads holding channel lock, may add more elements in the xfer_list.
+ * Flush out these elements from xfer_list.
+ */
+ empty_xfer_list(qdev, dbc);
+}
+
int disable_dbc(struct qaic_device *qdev, u32 dbc_id, struct qaic_user *usr)
{
if (!qdev->dbc[dbc_id].usr || qdev->dbc[dbc_id].usr->handle != usr->handle)
@@ -1934,7 +1988,7 @@ int disable_dbc(struct qaic_device *qdev, u32 dbc_id, struct qaic_user *usr)
* enable_dbc - Enable the DBC. DBCs are disabled by removing the context of
* user. Add user context back to DBC to enable it. This function trusts the
* DBC ID passed and expects the DBC to be disabled.
- * @qdev: Qranium device handle
+ * @qdev: qaic device handle
* @dbc_id: ID of the DBC
* @usr: User context
*/
@@ -1948,13 +2002,7 @@ void wakeup_dbc(struct qaic_device *qdev, u32 dbc_id)
struct dma_bridge_chan *dbc = &qdev->dbc[dbc_id];
dbc->usr = NULL;
- empty_xfer_list(qdev, dbc);
- synchronize_srcu(&dbc->ch_lock);
- /*
- * Threads holding channel lock, may add more elements in the xfer_list.
- * Flush out these elements from xfer_list.
- */
- empty_xfer_list(qdev, dbc);
+ sync_empty_xfer_list(qdev, dbc);
}
void release_dbc(struct qaic_device *qdev, u32 dbc_id)
@@ -1995,3 +2043,30 @@ void qaic_data_get_fifo_info(struct dma_bridge_chan *dbc, u32 *head, u32 *tail)
*head = readl(dbc->dbc_base + REQHP_OFF);
*tail = readl(dbc->dbc_base + REQTP_OFF);
}
+
+/*
+ * qaic_dbc_enter_ssr - Prepare to enter in sub system reset(SSR) for given DBC ID.
+ * @qdev: qaic device handle
+ * @dbc_id: ID of the DBC which will enter SSR
+ *
+ * The device will automatically deactivate the workload as not
+ * all errors can be silently recovered. The user will be
+ * notified and will need to decide the required recovery
+ * action to take.
+ */
+void qaic_dbc_enter_ssr(struct qaic_device *qdev, u32 dbc_id)
+{
+ qdev->ssr_dbc = dbc_id;
+ release_dbc(qdev, dbc_id);
+}
+
+/*
+ * qaic_dbc_exit_ssr - Prepare to exit from sub system reset(SSR) for given DBC ID.
+ * @qdev: qaic device handle
+ *
+ * The DBC returns to an operational state and begins accepting work after exiting SSR.
+ */
+void qaic_dbc_exit_ssr(struct qaic_device *qdev)
+{
+ qdev->ssr_dbc = QAIC_SSR_DBC_SENTINEL;
+}
diff --git a/drivers/accel/qaic/qaic_debugfs.c b/drivers/accel/qaic/qaic_debugfs.c
index a991b8198dc4..8dc4fe5bb560 100644
--- a/drivers/accel/qaic/qaic_debugfs.c
+++ b/drivers/accel/qaic/qaic_debugfs.c
@@ -218,6 +218,9 @@ static int qaic_bootlog_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_d
if (ret)
goto destroy_workqueue;
+ dev_set_drvdata(&mhi_dev->dev, qdev);
+ qdev->bootlog_ch = mhi_dev;
+
for (i = 0; i < BOOTLOG_POOL_SIZE; i++) {
msg = devm_kzalloc(&qdev->pdev->dev, sizeof(*msg), GFP_KERNEL);
if (!msg) {
@@ -233,8 +236,6 @@ static int qaic_bootlog_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_d
goto mhi_unprepare;
}
- dev_set_drvdata(&mhi_dev->dev, qdev);
- qdev->bootlog_ch = mhi_dev;
return 0;
mhi_unprepare:
diff --git a/drivers/accel/qaic/qaic_drv.c b/drivers/accel/qaic/qaic_drv.c
index 3b415e2c9431..4c70bd949d53 100644
--- a/drivers/accel/qaic/qaic_drv.c
+++ b/drivers/accel/qaic/qaic_drv.c
@@ -29,6 +29,8 @@
#include "mhi_controller.h"
#include "qaic.h"
#include "qaic_debugfs.h"
+#include "qaic_ras.h"
+#include "qaic_ssr.h"
#include "qaic_timesync.h"
#include "sahara.h"
@@ -269,6 +271,13 @@ static int qaic_create_drm_device(struct qaic_device *qdev, s32 partition_id)
return ret;
}
+ ret = qaic_sysfs_init(qddev);
+ if (ret) {
+ drm_dev_unregister(drm);
+ pci_dbg(qdev->pdev, "qaic_sysfs_init failed %d\n", ret);
+ return ret;
+ }
+
qaic_debugfs_init(qddev);
return ret;
@@ -280,6 +289,7 @@ static void qaic_destroy_drm_device(struct qaic_device *qdev, s32 partition_id)
struct drm_device *drm = to_drm(qddev);
struct qaic_user *usr;
+ qaic_sysfs_remove(qddev);
drm_dev_unregister(drm);
qddev->partition_id = 0;
/*
@@ -381,6 +391,7 @@ void qaic_dev_reset_clean_local_state(struct qaic_device *qdev)
qaic_notify_reset(qdev);
/* start tearing things down */
+ qaic_clean_up_ssr(qdev);
for (i = 0; i < qdev->num_dbc; ++i)
release_dbc(qdev, i);
}
@@ -430,11 +441,18 @@ static struct qaic_device *create_qdev(struct pci_dev *pdev,
qdev->qts_wq = qaicm_wq_init(drm, "qaic_ts");
if (IS_ERR(qdev->qts_wq))
return NULL;
+ qdev->ssr_wq = qaicm_wq_init(drm, "qaic_ssr");
+ if (IS_ERR(qdev->ssr_wq))
+ return NULL;
ret = qaicm_srcu_init(drm, &qdev->dev_lock);
if (ret)
return NULL;
+ ret = qaic_ssr_init(qdev, drm);
+ if (ret)
+ pci_info(pdev, "QAIC SSR crashdump collection not supported.\n");
+
qdev->qddev = qddev;
qdev->pdev = pdev;
qddev->qdev = qdev;
@@ -453,6 +471,9 @@ static struct qaic_device *create_qdev(struct pci_dev *pdev,
return NULL;
init_waitqueue_head(&qdev->dbc[i].dbc_release);
INIT_LIST_HEAD(&qdev->dbc[i].bo_lists);
+ ret = drmm_mutex_init(drm, &qdev->dbc[i].req_lock);
+ if (ret)
+ return NULL;
}
return qdev;
@@ -541,7 +562,7 @@ static int init_msi(struct qaic_device *qdev, struct pci_dev *pdev)
qdev->dbc[i].irq = pci_irq_vector(pdev, qdev->single_msi ? 0 : i + 1);
if (!qdev->single_msi)
disable_irq_nosync(qdev->dbc[i].irq);
- INIT_WORK(&qdev->dbc[i].poll_work, irq_polling_work);
+ INIT_WORK(&qdev->dbc[i].poll_work, qaic_irq_polling_work);
}
}
@@ -656,6 +677,92 @@ static const struct pci_error_handlers qaic_pci_err_handler = {
.reset_done = qaic_pci_reset_done,
};
+static bool qaic_is_under_reset(struct qaic_device *qdev)
+{
+ int rcu_id;
+ bool ret;
+
+ rcu_id = srcu_read_lock(&qdev->dev_lock);
+ ret = qdev->dev_state != QAIC_ONLINE;
+ srcu_read_unlock(&qdev->dev_lock, rcu_id);
+ return ret;
+}
+
+static bool qaic_data_path_busy(struct qaic_device *qdev)
+{
+ bool ret = false;
+ int dev_rcu_id;
+ int i;
+
+ dev_rcu_id = srcu_read_lock(&qdev->dev_lock);
+ if (qdev->dev_state != QAIC_ONLINE) {
+ srcu_read_unlock(&qdev->dev_lock, dev_rcu_id);
+ return false;
+ }
+ for (i = 0; i < qdev->num_dbc; i++) {
+ struct dma_bridge_chan *dbc = &qdev->dbc[i];
+ unsigned long flags;
+ int ch_rcu_id;
+
+ ch_rcu_id = srcu_read_lock(&dbc->ch_lock);
+ if (!dbc->usr || !dbc->in_use) {
+ srcu_read_unlock(&dbc->ch_lock, ch_rcu_id);
+ continue;
+ }
+ spin_lock_irqsave(&dbc->xfer_lock, flags);
+ ret = !list_empty(&dbc->xfer_list);
+ spin_unlock_irqrestore(&dbc->xfer_lock, flags);
+ srcu_read_unlock(&dbc->ch_lock, ch_rcu_id);
+ if (ret)
+ break;
+ }
+ srcu_read_unlock(&qdev->dev_lock, dev_rcu_id);
+ return ret;
+}
+
+static int qaic_pm_suspend(struct device *dev)
+{
+ struct qaic_device *qdev = pci_get_drvdata(to_pci_dev(dev));
+
+ dev_dbg(dev, "Suspending..\n");
+ if (qaic_data_path_busy(qdev)) {
+ dev_dbg(dev, "Device's datapath is busy. Aborting suspend..\n");
+ return -EBUSY;
+ }
+ if (qaic_is_under_reset(qdev)) {
+ dev_dbg(dev, "Device is under reset. Aborting suspend..\n");
+ return -EBUSY;
+ }
+ qaic_mqts_ch_stop_timer(qdev->mqts_ch);
+ qaic_pci_reset_prepare(qdev->pdev);
+ pci_save_state(qdev->pdev);
+ pci_disable_device(qdev->pdev);
+ pci_set_power_state(qdev->pdev, PCI_D3hot);
+ return 0;
+}
+
+static int qaic_pm_resume(struct device *dev)
+{
+ struct qaic_device *qdev = pci_get_drvdata(to_pci_dev(dev));
+ int ret;
+
+ dev_dbg(dev, "Resuming..\n");
+ pci_set_power_state(qdev->pdev, PCI_D0);
+ pci_restore_state(qdev->pdev);
+ ret = pci_enable_device(qdev->pdev);
+ if (ret) {
+ dev_err(dev, "pci_enable_device failed on resume %d\n", ret);
+ return ret;
+ }
+ pci_set_master(qdev->pdev);
+ qaic_pci_reset_done(qdev->pdev);
+ return 0;
+}
+
+static const struct dev_pm_ops qaic_pm_ops = {
+ SYSTEM_SLEEP_PM_OPS(qaic_pm_suspend, qaic_pm_resume)
+};
+
static struct pci_driver qaic_pci_driver = {
.name = QAIC_NAME,
.id_table = qaic_ids,
@@ -663,6 +770,9 @@ static struct pci_driver qaic_pci_driver = {
.remove = qaic_pci_remove,
.shutdown = qaic_pci_shutdown,
.err_handler = &qaic_pci_err_handler,
+ .driver = {
+ .pm = pm_sleep_ptr(&qaic_pm_ops),
+ },
};
static int __init qaic_init(void)
@@ -695,8 +805,19 @@ static int __init qaic_init(void)
if (ret)
pr_debug("qaic: qaic_bootlog_register failed %d\n", ret);
+ ret = qaic_ras_register();
+ if (ret)
+ pr_debug("qaic: qaic_ras_register failed %d\n", ret);
+ ret = qaic_ssr_register();
+ if (ret) {
+ pr_debug("qaic: qaic_ssr_register failed %d\n", ret);
+ goto free_bootlog;
+ }
+
return 0;
+free_bootlog:
+ qaic_bootlog_unregister();
free_mhi:
mhi_driver_unregister(&qaic_mhi_driver);
free_pci:
@@ -722,6 +843,8 @@ static void __exit qaic_exit(void)
* reinitializing the link_up state after the cleanup is done.
*/
link_up = true;
+ qaic_ssr_unregister();
+ qaic_ras_unregister();
qaic_bootlog_unregister();
qaic_timesync_deinit();
sahara_unregister();
diff --git a/drivers/accel/qaic/qaic_ras.c b/drivers/accel/qaic/qaic_ras.c
new file mode 100644
index 000000000000..f1d52a710136
--- /dev/null
+++ b/drivers/accel/qaic/qaic_ras.c
@@ -0,0 +1,642 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. */
+/* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. */
+/* Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */
+
+#include <asm/byteorder.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/mhi.h>
+
+#include "qaic.h"
+#include "qaic_ras.h"
+
+#define MAGIC 0x55AA
+#define VERSION 0x2
+#define HDR_SZ 12
+#define NUM_TEMP_LVL 3
+#define POWER_BREAK BIT(0)
+
+enum msg_type {
+ MSG_PUSH, /* async push from device */
+ MSG_REQ, /* sync request to device */
+ MSG_RESP, /* sync response from device */
+};
+
+enum err_type {
+ CE, /* correctable error */
+ UE, /* uncorrectable error */
+ UE_NF, /* uncorrectable error that is non-fatal, expect a disruption */
+ ERR_TYPE_MAX,
+};
+
+static const char * const err_type_str[] = {
+ [CE] = "Correctable",
+ [UE] = "Uncorrectable",
+ [UE_NF] = "Uncorrectable Non-Fatal",
+};
+
+static const char * const err_class_str[] = {
+ [CE] = "Warning",
+ [UE] = "Fatal",
+ [UE_NF] = "Warning",
+};
+
+enum err_source {
+ SOC_MEM,
+ PCIE,
+ DDR,
+ SYS_BUS1,
+ SYS_BUS2,
+ NSP_MEM,
+ TSENS,
+};
+
+static const char * const err_src_str[TSENS + 1] = {
+ [SOC_MEM] = "SoC Memory",
+ [PCIE] = "PCIE",
+ [DDR] = "DDR",
+ [SYS_BUS1] = "System Bus source 1",
+ [SYS_BUS2] = "System Bus source 2",
+ [NSP_MEM] = "NSP Memory",
+ [TSENS] = "Temperature Sensors",
+};
+
+struct ras_data {
+ /* header start */
+ /* Magic number to validate the message */
+ u16 magic;
+ /* RAS version number */
+ u16 ver;
+ u32 seq_num;
+ /* RAS message type */
+ u8 type;
+ u8 id;
+ /* Size of RAS message without the header in byte */
+ u16 len;
+ /* header end */
+ s32 result;
+ /*
+ * Error source
+ * 0 : SoC Memory
+ * 1 : PCIE
+ * 2 : DDR
+ * 3 : System Bus source 1
+ * 4 : System Bus source 2
+ * 5 : NSP Memory
+ * 6 : Temperature Sensors
+ */
+ u32 source;
+ /*
+ * Stores the error type, there are three types of error in RAS
+ * 0 : correctable error (CE)
+ * 1 : uncorrectable error (UE)
+ * 2 : uncorrectable error that is non-fatal (UE_NF)
+ */
+ u32 err_type;
+ u32 err_threshold;
+ u32 ce_count;
+ u32 ue_count;
+ u32 intr_num;
+ /* Data specific to error source */
+ u8 syndrome[64];
+} __packed;
+
+struct soc_mem_syndrome {
+ u64 error_address[8];
+} __packed;
+
+struct nsp_mem_syndrome {
+ u32 error_address[8];
+ u8 nsp_id;
+} __packed;
+
+struct ddr_syndrome {
+ u32 count;
+ u32 irq_status;
+ u32 data_31_0[2];
+ u32 data_63_32[2];
+ u32 data_95_64[2];
+ u32 data_127_96[2];
+ u32 addr_lsb;
+ u16 addr_msb;
+ u16 parity_bits;
+ u16 instance;
+ u16 err_type;
+} __packed;
+
+struct tsens_syndrome {
+ u32 threshold_type;
+ s32 temp;
+} __packed;
+
+struct sysbus1_syndrome {
+ u32 slave;
+ u32 err_type;
+ u16 addr[8];
+ u8 instance;
+} __packed;
+
+struct sysbus2_syndrome {
+ u32 lsb3;
+ u32 msb3;
+ u32 lsb2;
+ u32 msb2;
+ u32 ext_id;
+ u16 path;
+ u16 op_type;
+ u16 len;
+ u16 redirect;
+ u8 valid;
+ u8 word_error;
+ u8 non_secure;
+ u8 opc;
+ u8 error_code;
+ u8 trans_type;
+ u8 addr_space;
+ u8 instance;
+} __packed;
+
+struct pcie_syndrome {
+ /* CE info */
+ u32 bad_tlp;
+ u32 bad_dllp;
+ u32 replay_rollover;
+ u32 replay_timeout;
+ u32 rx_err;
+ u32 internal_ce_count;
+ /* UE_NF info */
+ u32 fc_timeout;
+ u32 poison_tlp;
+ u32 ecrc_err;
+ u32 unsupported_req;
+ u32 completer_abort;
+ u32 completion_timeout;
+ /* UE info */
+ u32 addr;
+ u8 index;
+ /*
+ * Flag to indicate specific event of PCIe
+ * BIT(0): Power break (low power)
+ * BIT(1) to BIT(7): Reserved
+ */
+ u8 flag;
+} __packed;
+
+static const char * const threshold_type_str[NUM_TEMP_LVL] = {
+ [0] = "lower",
+ [1] = "upper",
+ [2] = "critical",
+};
+
+static void ras_msg_to_cpu(struct ras_data *msg)
+{
+ struct sysbus1_syndrome *sysbus1_syndrome = (struct sysbus1_syndrome *)&msg->syndrome[0];
+ struct sysbus2_syndrome *sysbus2_syndrome = (struct sysbus2_syndrome *)&msg->syndrome[0];
+ struct soc_mem_syndrome *soc_syndrome = (struct soc_mem_syndrome *)&msg->syndrome[0];
+ struct nsp_mem_syndrome *nsp_syndrome = (struct nsp_mem_syndrome *)&msg->syndrome[0];
+ struct tsens_syndrome *tsens_syndrome = (struct tsens_syndrome *)&msg->syndrome[0];
+ struct pcie_syndrome *pcie_syndrome = (struct pcie_syndrome *)&msg->syndrome[0];
+ struct ddr_syndrome *ddr_syndrome = (struct ddr_syndrome *)&msg->syndrome[0];
+ int i;
+
+ le16_to_cpus(&msg->magic);
+ le16_to_cpus(&msg->ver);
+ le32_to_cpus(&msg->seq_num);
+ le16_to_cpus(&msg->len);
+ le32_to_cpus(&msg->result);
+ le32_to_cpus(&msg->source);
+ le32_to_cpus(&msg->err_type);
+ le32_to_cpus(&msg->err_threshold);
+ le32_to_cpus(&msg->ce_count);
+ le32_to_cpus(&msg->ue_count);
+ le32_to_cpus(&msg->intr_num);
+
+ switch (msg->source) {
+ case SOC_MEM:
+ for (i = 0; i < 8; i++)
+ le64_to_cpus(&soc_syndrome->error_address[i]);
+ break;
+ case PCIE:
+ le32_to_cpus(&pcie_syndrome->bad_tlp);
+ le32_to_cpus(&pcie_syndrome->bad_dllp);
+ le32_to_cpus(&pcie_syndrome->replay_rollover);
+ le32_to_cpus(&pcie_syndrome->replay_timeout);
+ le32_to_cpus(&pcie_syndrome->rx_err);
+ le32_to_cpus(&pcie_syndrome->internal_ce_count);
+ le32_to_cpus(&pcie_syndrome->fc_timeout);
+ le32_to_cpus(&pcie_syndrome->poison_tlp);
+ le32_to_cpus(&pcie_syndrome->ecrc_err);
+ le32_to_cpus(&pcie_syndrome->unsupported_req);
+ le32_to_cpus(&pcie_syndrome->completer_abort);
+ le32_to_cpus(&pcie_syndrome->completion_timeout);
+ le32_to_cpus(&pcie_syndrome->addr);
+ break;
+ case DDR:
+ le16_to_cpus(&ddr_syndrome->instance);
+ le16_to_cpus(&ddr_syndrome->err_type);
+ le32_to_cpus(&ddr_syndrome->count);
+ le32_to_cpus(&ddr_syndrome->irq_status);
+ le32_to_cpus(&ddr_syndrome->data_31_0[0]);
+ le32_to_cpus(&ddr_syndrome->data_31_0[1]);
+ le32_to_cpus(&ddr_syndrome->data_63_32[0]);
+ le32_to_cpus(&ddr_syndrome->data_63_32[1]);
+ le32_to_cpus(&ddr_syndrome->data_95_64[0]);
+ le32_to_cpus(&ddr_syndrome->data_95_64[1]);
+ le32_to_cpus(&ddr_syndrome->data_127_96[0]);
+ le32_to_cpus(&ddr_syndrome->data_127_96[1]);
+ le16_to_cpus(&ddr_syndrome->parity_bits);
+ le16_to_cpus(&ddr_syndrome->addr_msb);
+ le32_to_cpus(&ddr_syndrome->addr_lsb);
+ break;
+ case SYS_BUS1:
+ le32_to_cpus(&sysbus1_syndrome->slave);
+ le32_to_cpus(&sysbus1_syndrome->err_type);
+ for (i = 0; i < 8; i++)
+ le16_to_cpus(&sysbus1_syndrome->addr[i]);
+ break;
+ case SYS_BUS2:
+ le16_to_cpus(&sysbus2_syndrome->op_type);
+ le16_to_cpus(&sysbus2_syndrome->len);
+ le16_to_cpus(&sysbus2_syndrome->redirect);
+ le16_to_cpus(&sysbus2_syndrome->path);
+ le32_to_cpus(&sysbus2_syndrome->ext_id);
+ le32_to_cpus(&sysbus2_syndrome->lsb2);
+ le32_to_cpus(&sysbus2_syndrome->msb2);
+ le32_to_cpus(&sysbus2_syndrome->lsb3);
+ le32_to_cpus(&sysbus2_syndrome->msb3);
+ break;
+ case NSP_MEM:
+ for (i = 0; i < 8; i++)
+ le32_to_cpus(&nsp_syndrome->error_address[i]);
+ break;
+ case TSENS:
+ le32_to_cpus(&tsens_syndrome->threshold_type);
+ le32_to_cpus(&tsens_syndrome->temp);
+ break;
+ }
+}
+
+static void decode_ras_msg(struct qaic_device *qdev, struct ras_data *msg)
+{
+ struct sysbus1_syndrome *sysbus1_syndrome = (struct sysbus1_syndrome *)&msg->syndrome[0];
+ struct sysbus2_syndrome *sysbus2_syndrome = (struct sysbus2_syndrome *)&msg->syndrome[0];
+ struct soc_mem_syndrome *soc_syndrome = (struct soc_mem_syndrome *)&msg->syndrome[0];
+ struct nsp_mem_syndrome *nsp_syndrome = (struct nsp_mem_syndrome *)&msg->syndrome[0];
+ struct tsens_syndrome *tsens_syndrome = (struct tsens_syndrome *)&msg->syndrome[0];
+ struct pcie_syndrome *pcie_syndrome = (struct pcie_syndrome *)&msg->syndrome[0];
+ struct ddr_syndrome *ddr_syndrome = (struct ddr_syndrome *)&msg->syndrome[0];
+ char *class;
+ char *level;
+
+ if (msg->magic != MAGIC) {
+ pci_warn(qdev->pdev, "Dropping RAS message with invalid magic %x\n", msg->magic);
+ return;
+ }
+
+ if (!msg->ver || msg->ver > VERSION) {
+ pci_warn(qdev->pdev, "Dropping RAS message with invalid version %d\n", msg->ver);
+ return;
+ }
+
+ if (msg->type != MSG_PUSH) {
+ pci_warn(qdev->pdev, "Dropping non-PUSH RAS message\n");
+ return;
+ }
+
+ if (msg->len != sizeof(*msg) - HDR_SZ) {
+ pci_warn(qdev->pdev, "Dropping RAS message with invalid len %d\n", msg->len);
+ return;
+ }
+
+ if (msg->err_type >= ERR_TYPE_MAX) {
+ pci_warn(qdev->pdev, "Dropping RAS message with err type %d\n", msg->err_type);
+ return;
+ }
+
+ if (msg->err_type == UE)
+ level = KERN_ERR;
+ else
+ level = KERN_WARNING;
+
+ switch (msg->source) {
+ case SOC_MEM:
+ dev_printk(level, &qdev->pdev->dev, "RAS event.\nClass:%s\nDescription:%s %s %s\nError Threshold for this report %d\nSyndrome:\n 0x%llx\n 0x%llx\n 0x%llx\n 0x%llx\n 0x%llx\n 0x%llx\n 0x%llx\n 0x%llx\n",
+ err_class_str[msg->err_type],
+ err_type_str[msg->err_type],
+ "error from",
+ err_src_str[msg->source],
+ msg->err_threshold,
+ soc_syndrome->error_address[0],
+ soc_syndrome->error_address[1],
+ soc_syndrome->error_address[2],
+ soc_syndrome->error_address[3],
+ soc_syndrome->error_address[4],
+ soc_syndrome->error_address[5],
+ soc_syndrome->error_address[6],
+ soc_syndrome->error_address[7]);
+ break;
+ case PCIE:
+ dev_printk(level, &qdev->pdev->dev, "RAS event.\nClass:%s\nDescription:%s %s %s\nError Threshold for this report %d\n",
+ err_class_str[msg->err_type],
+ err_type_str[msg->err_type],
+ "error from",
+ err_src_str[msg->source],
+ msg->err_threshold);
+
+ switch (msg->err_type) {
+ case CE:
+ /*
+ * Modeled after AER prints. This continues the dev_printk() from a few
+ * lines up. We reduce duplication of code, but also avoid re-printing the
+ * PCI device info so that the end result looks uniform to the log user.
+ */
+ printk(KERN_WARNING pr_fmt("Syndrome:\n Bad TLP count %d\n Bad DLLP count %d\n Replay Rollover count %d\n Replay Timeout count %d\n Recv Error count %d\n Internal CE count %d\n"),
+ pcie_syndrome->bad_tlp,
+ pcie_syndrome->bad_dllp,
+ pcie_syndrome->replay_rollover,
+ pcie_syndrome->replay_timeout,
+ pcie_syndrome->rx_err,
+ pcie_syndrome->internal_ce_count);
+ if (msg->ver > 0x1)
+ pr_warn(" Power break %s\n",
+ pcie_syndrome->flag & POWER_BREAK ? "ON" : "OFF");
+ break;
+ case UE:
+ printk(KERN_ERR pr_fmt("Syndrome:\n Index %d\n Address 0x%x\n"),
+ pcie_syndrome->index, pcie_syndrome->addr);
+ break;
+ case UE_NF:
+ printk(KERN_WARNING pr_fmt("Syndrome:\n FC timeout count %d\n Poisoned TLP count %d\n ECRC error count %d\n Unsupported request count %d\n Completer abort count %d\n Completion timeout count %d\n"),
+ pcie_syndrome->fc_timeout,
+ pcie_syndrome->poison_tlp,
+ pcie_syndrome->ecrc_err,
+ pcie_syndrome->unsupported_req,
+ pcie_syndrome->completer_abort,
+ pcie_syndrome->completion_timeout);
+ break;
+ default:
+ break;
+ }
+ break;
+ case DDR:
+ dev_printk(level, &qdev->pdev->dev, "RAS event.\nClass:%s\nDescription:%s %s %s\nError Threshold for this report %d\nSyndrome:\n Instance %d\n Count %d\n Data 31_0 0x%x 0x%x\n Data 63_32 0x%x 0x%x\n Data 95_64 0x%x 0x%x\n Data 127_96 0x%x 0x%x\n Parity bits 0x%x\n Address msb 0x%x\n Address lsb 0x%x\n",
+ err_class_str[msg->err_type],
+ err_type_str[msg->err_type],
+ "error from",
+ err_src_str[msg->source],
+ msg->err_threshold,
+ ddr_syndrome->instance,
+ ddr_syndrome->count,
+ ddr_syndrome->data_31_0[1],
+ ddr_syndrome->data_31_0[0],
+ ddr_syndrome->data_63_32[1],
+ ddr_syndrome->data_63_32[0],
+ ddr_syndrome->data_95_64[1],
+ ddr_syndrome->data_95_64[0],
+ ddr_syndrome->data_127_96[1],
+ ddr_syndrome->data_127_96[0],
+ ddr_syndrome->parity_bits,
+ ddr_syndrome->addr_msb,
+ ddr_syndrome->addr_lsb);
+ break;
+ case SYS_BUS1:
+ dev_printk(level, &qdev->pdev->dev, "RAS event.\nClass:%s\nDescription:%s %s %s\nError Threshold for this report %d\nSyndrome:\n instance %d\n %s\n err_type %d\n address0 0x%x\n address1 0x%x\n address2 0x%x\n address3 0x%x\n address4 0x%x\n address5 0x%x\n address6 0x%x\n address7 0x%x\n",
+ err_class_str[msg->err_type],
+ err_type_str[msg->err_type],
+ "error from",
+ err_src_str[msg->source],
+ msg->err_threshold,
+ sysbus1_syndrome->instance,
+ sysbus1_syndrome->slave ? "Slave" : "Master",
+ sysbus1_syndrome->err_type,
+ sysbus1_syndrome->addr[0],
+ sysbus1_syndrome->addr[1],
+ sysbus1_syndrome->addr[2],
+ sysbus1_syndrome->addr[3],
+ sysbus1_syndrome->addr[4],
+ sysbus1_syndrome->addr[5],
+ sysbus1_syndrome->addr[6],
+ sysbus1_syndrome->addr[7]);
+ break;
+ case SYS_BUS2:
+ dev_printk(level, &qdev->pdev->dev, "RAS event.\nClass:%s\nDescription:%s %s %s\nError Threshold for this report %d\nSyndrome:\n instance %d\n valid %d\n word error %d\n non-secure %d\n opc %d\n error code %d\n transaction type %d\n address space %d\n operation type %d\n len %d\n redirect %d\n path %d\n ext_id %d\n lsb2 %d\n msb2 %d\n lsb3 %d\n msb3 %d\n",
+ err_class_str[msg->err_type],
+ err_type_str[msg->err_type],
+ "error from",
+ err_src_str[msg->source],
+ msg->err_threshold,
+ sysbus2_syndrome->instance,
+ sysbus2_syndrome->valid,
+ sysbus2_syndrome->word_error,
+ sysbus2_syndrome->non_secure,
+ sysbus2_syndrome->opc,
+ sysbus2_syndrome->error_code,
+ sysbus2_syndrome->trans_type,
+ sysbus2_syndrome->addr_space,
+ sysbus2_syndrome->op_type,
+ sysbus2_syndrome->len,
+ sysbus2_syndrome->redirect,
+ sysbus2_syndrome->path,
+ sysbus2_syndrome->ext_id,
+ sysbus2_syndrome->lsb2,
+ sysbus2_syndrome->msb2,
+ sysbus2_syndrome->lsb3,
+ sysbus2_syndrome->msb3);
+ break;
+ case NSP_MEM:
+ dev_printk(level, &qdev->pdev->dev, "RAS event.\nClass:%s\nDescription:%s %s %s\nError Threshold for this report %d\nSyndrome:\n NSP ID %d\n 0x%x\n 0x%x\n 0x%x\n 0x%x\n 0x%x\n 0x%x\n 0x%x\n 0x%x\n",
+ err_class_str[msg->err_type],
+ err_type_str[msg->err_type],
+ "error from",
+ err_src_str[msg->source],
+ msg->err_threshold,
+ nsp_syndrome->nsp_id,
+ nsp_syndrome->error_address[0],
+ nsp_syndrome->error_address[1],
+ nsp_syndrome->error_address[2],
+ nsp_syndrome->error_address[3],
+ nsp_syndrome->error_address[4],
+ nsp_syndrome->error_address[5],
+ nsp_syndrome->error_address[6],
+ nsp_syndrome->error_address[7]);
+ break;
+ case TSENS:
+ if (tsens_syndrome->threshold_type >= NUM_TEMP_LVL) {
+ pci_warn(qdev->pdev, "Dropping RAS message with invalid temp threshold %d\n",
+ tsens_syndrome->threshold_type);
+ break;
+ }
+
+ if (msg->err_type)
+ class = "Fatal";
+ else if (tsens_syndrome->threshold_type)
+ class = "Critical";
+ else
+ class = "Warning";
+
+ dev_printk(level, &qdev->pdev->dev, "RAS event.\nClass:%s\nDescription:%s %s %s\nError Threshold for this report %d\nSyndrome:\n %s threshold\n %d deg C\n",
+ class,
+ err_type_str[msg->err_type],
+ "error from",
+ err_src_str[msg->source],
+ msg->err_threshold,
+ threshold_type_str[tsens_syndrome->threshold_type],
+ tsens_syndrome->temp);
+ break;
+ }
+
+ /* Uncorrectable errors are fatal */
+ if (msg->err_type == UE)
+ mhi_soc_reset(qdev->mhi_cntrl);
+
+ switch (msg->err_type) {
+ case CE:
+ if (qdev->ce_count != UINT_MAX)
+ qdev->ce_count++;
+ break;
+ case UE:
+ if (qdev->ce_count != UINT_MAX)
+ qdev->ue_count++;
+ break;
+ case UE_NF:
+ if (qdev->ce_count != UINT_MAX)
+ qdev->ue_nf_count++;
+ break;
+ default:
+ /* not possible */
+ break;
+ }
+}
+
+static ssize_t ce_count_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct qaic_device *qdev = pci_get_drvdata(to_pci_dev(dev));
+
+ return sysfs_emit(buf, "%d\n", qdev->ce_count);
+}
+
+static ssize_t ue_count_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct qaic_device *qdev = pci_get_drvdata(to_pci_dev(dev));
+
+ return sysfs_emit(buf, "%d\n", qdev->ue_count);
+}
+
+static ssize_t ue_nonfatal_count_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct qaic_device *qdev = pci_get_drvdata(to_pci_dev(dev));
+
+ return sysfs_emit(buf, "%d\n", qdev->ue_nf_count);
+}
+
+static DEVICE_ATTR_RO(ce_count);
+static DEVICE_ATTR_RO(ue_count);
+static DEVICE_ATTR_RO(ue_nonfatal_count);
+
+static struct attribute *ras_attrs[] = {
+ &dev_attr_ce_count.attr,
+ &dev_attr_ue_count.attr,
+ &dev_attr_ue_nonfatal_count.attr,
+ NULL,
+};
+
+static struct attribute_group ras_group = {
+ .attrs = ras_attrs,
+};
+
+static int qaic_ras_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id)
+{
+ struct qaic_device *qdev = pci_get_drvdata(to_pci_dev(mhi_dev->mhi_cntrl->cntrl_dev));
+ struct ras_data *resp;
+ int ret;
+
+ ret = mhi_prepare_for_transfer(mhi_dev);
+ if (ret)
+ return ret;
+
+ resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+ if (!resp) {
+ mhi_unprepare_from_transfer(mhi_dev);
+ return -ENOMEM;
+ }
+
+ ret = mhi_queue_buf(mhi_dev, DMA_FROM_DEVICE, resp, sizeof(*resp), MHI_EOT);
+ if (ret) {
+ kfree(resp);
+ mhi_unprepare_from_transfer(mhi_dev);
+ return ret;
+ }
+
+ ret = device_add_group(&qdev->pdev->dev, &ras_group);
+ if (ret) {
+ mhi_unprepare_from_transfer(mhi_dev);
+ pci_dbg(qdev->pdev, "ras add sysfs failed %d\n", ret);
+ return ret;
+ }
+
+ dev_set_drvdata(&mhi_dev->dev, qdev);
+ qdev->ras_ch = mhi_dev;
+
+ return ret;
+}
+
+static void qaic_ras_mhi_remove(struct mhi_device *mhi_dev)
+{
+ struct qaic_device *qdev;
+
+ qdev = dev_get_drvdata(&mhi_dev->dev);
+ qdev->ras_ch = NULL;
+ device_remove_group(&qdev->pdev->dev, &ras_group);
+ mhi_unprepare_from_transfer(mhi_dev);
+}
+
+static void qaic_ras_mhi_ul_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result) {}
+
+static void qaic_ras_mhi_dl_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
+{
+ struct qaic_device *qdev = dev_get_drvdata(&mhi_dev->dev);
+ struct ras_data *msg = mhi_result->buf_addr;
+ int ret;
+
+ if (mhi_result->transaction_status) {
+ kfree(msg);
+ return;
+ }
+
+ ras_msg_to_cpu(msg);
+ decode_ras_msg(qdev, msg);
+
+ ret = mhi_queue_buf(qdev->ras_ch, DMA_FROM_DEVICE, msg, sizeof(*msg), MHI_EOT);
+ if (ret) {
+ dev_err(&mhi_dev->dev, "Cannot requeue RAS recv buf %d\n", ret);
+ kfree(msg);
+ }
+}
+
+static const struct mhi_device_id qaic_ras_mhi_match_table[] = {
+ { .chan = "QAIC_STATUS", },
+ {},
+};
+
+static struct mhi_driver qaic_ras_mhi_driver = {
+ .id_table = qaic_ras_mhi_match_table,
+ .remove = qaic_ras_mhi_remove,
+ .probe = qaic_ras_mhi_probe,
+ .ul_xfer_cb = qaic_ras_mhi_ul_xfer_cb,
+ .dl_xfer_cb = qaic_ras_mhi_dl_xfer_cb,
+ .driver = {
+ .name = "qaic_ras",
+ },
+};
+
+int qaic_ras_register(void)
+{
+ return mhi_driver_register(&qaic_ras_mhi_driver);
+}
+
+void qaic_ras_unregister(void)
+{
+ mhi_driver_unregister(&qaic_ras_mhi_driver);
+}
diff --git a/drivers/accel/qaic/qaic_ras.h b/drivers/accel/qaic/qaic_ras.h
new file mode 100644
index 000000000000..d44a4eeeb060
--- /dev/null
+++ b/drivers/accel/qaic/qaic_ras.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2020, The Linux Foundation. All rights reserved. */
+
+#ifndef __QAIC_RAS_H__
+#define __QAIC_RAS_H__
+
+int qaic_ras_register(void);
+void qaic_ras_unregister(void);
+
+#endif /* __QAIC_RAS_H__ */
diff --git a/drivers/accel/qaic/qaic_ssr.c b/drivers/accel/qaic/qaic_ssr.c
new file mode 100644
index 000000000000..9b662d690371
--- /dev/null
+++ b/drivers/accel/qaic/qaic_ssr.c
@@ -0,0 +1,815 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. */
+/* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. */
+
+#include <asm/byteorder.h>
+#include <drm/drm_file.h>
+#include <drm/drm_managed.h>
+#include <linux/devcoredump.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/mhi.h>
+#include <linux/workqueue.h>
+
+#include "qaic.h"
+#include "qaic_ssr.h"
+
+#define SSR_RESP_MSG_SZ 32
+#define SSR_MHI_BUF_SIZE SZ_64K
+#define SSR_MEM_READ_DATA_SIZE ((u64)SSR_MHI_BUF_SIZE - sizeof(struct ssr_crashdump))
+#define SSR_MEM_READ_CHUNK_SIZE ((u64)SSR_MEM_READ_DATA_SIZE - sizeof(struct ssr_memory_read_rsp))
+
+#define DEBUG_TRANSFER_INFO BIT(0)
+#define DEBUG_TRANSFER_INFO_RSP BIT(1)
+#define MEMORY_READ BIT(2)
+#define MEMORY_READ_RSP BIT(3)
+#define DEBUG_TRANSFER_DONE BIT(4)
+#define DEBUG_TRANSFER_DONE_RSP BIT(5)
+#define SSR_EVENT BIT(8)
+#define SSR_EVENT_RSP BIT(9)
+
+#define SSR_EVENT_NACK BIT(0)
+#define BEFORE_SHUTDOWN BIT(1)
+#define AFTER_SHUTDOWN BIT(2)
+#define BEFORE_POWER_UP BIT(3)
+#define AFTER_POWER_UP BIT(4)
+
+struct debug_info_table {
+ /* Save preferences. Default is mandatory */
+ u64 save_perf;
+ /* Base address of the debug region */
+ u64 mem_base;
+ /* Size of debug region in bytes */
+ u64 len;
+ /* Description */
+ char desc[20];
+ /* Filename of debug region */
+ char filename[20];
+};
+
+struct _ssr_hdr {
+ __le32 cmd;
+ __le32 len;
+ __le32 dbc_id;
+};
+
+struct ssr_hdr {
+ u32 cmd;
+ u32 len;
+ u32 dbc_id;
+};
+
+struct ssr_debug_transfer_info {
+ struct ssr_hdr hdr;
+ u32 resv;
+ u64 tbl_addr;
+ u64 tbl_len;
+} __packed;
+
+struct ssr_debug_transfer_info_rsp {
+ struct _ssr_hdr hdr;
+ __le32 ret;
+} __packed;
+
+struct ssr_memory_read {
+ struct _ssr_hdr hdr;
+ __le32 resv;
+ __le64 addr;
+ __le64 len;
+} __packed;
+
+struct ssr_memory_read_rsp {
+ struct _ssr_hdr hdr;
+ __le32 resv;
+ u8 data[];
+} __packed;
+
+struct ssr_debug_transfer_done {
+ struct _ssr_hdr hdr;
+ __le32 resv;
+} __packed;
+
+struct ssr_debug_transfer_done_rsp {
+ struct _ssr_hdr hdr;
+ __le32 ret;
+} __packed;
+
+struct ssr_event {
+ struct ssr_hdr hdr;
+ u32 event;
+} __packed;
+
+struct ssr_event_rsp {
+ struct _ssr_hdr hdr;
+ __le32 event;
+} __packed;
+
+struct ssr_resp {
+ /* Work struct to schedule work coming on QAIC_SSR channel */
+ struct work_struct work;
+ /* Root struct of device, used to access device resources */
+ struct qaic_device *qdev;
+ /* Buffer used by MHI for transfer requests */
+ u8 data[] __aligned(8);
+};
+
+/* SSR crashdump book keeping structure */
+struct ssr_dump_info {
+ /* DBC associated with this SSR crashdump */
+ struct dma_bridge_chan *dbc;
+ /*
+ * It will be used when we complete the crashdump download and switch
+ * to waiting on SSR events
+ */
+ struct ssr_resp *resp;
+ /* MEMORY READ request MHI buffer.*/
+ struct ssr_memory_read *read_buf_req;
+ /* TRUE: ->read_buf_req is queued for MHI transaction. FALSE: Otherwise */
+ bool read_buf_req_queued;
+ /* Address of table in host */
+ void *tbl_addr;
+ /* Total size of table */
+ u64 tbl_len;
+ /* Offset of table(->tbl_addr) where the new chunk will be dumped */
+ u64 tbl_off;
+ /* Address of table in device/target */
+ u64 tbl_addr_dev;
+ /* Ptr to the entire dump */
+ void *dump_addr;
+ /* Entire crashdump size */
+ u64 dump_sz;
+ /* Offset of crashdump(->dump_addr) where the new chunk will be dumped */
+ u64 dump_off;
+ /* Points to the table entry we are currently downloading */
+ struct debug_info_table *tbl_ent;
+ /* Offset in the current table entry(->tbl_ent) for next chuck */
+ u64 tbl_ent_off;
+};
+
+struct ssr_crashdump {
+ /*
+ * Points to a book keeping struct maintained by MHI SSR device while
+ * downloading a SSR crashdump. It is NULL when crashdump downloading
+ * not in progress.
+ */
+ struct ssr_dump_info *dump_info;
+ /* Work struct to schedule work coming on QAIC_SSR channel */
+ struct work_struct work;
+ /* Root struct of device, used to access device resources */
+ struct qaic_device *qdev;
+ /* Buffer used by MHI for transfer requests */
+ u8 data[];
+};
+
+#define QAIC_SSR_DUMP_V1_MAGIC 0x1234567890abcdef
+#define QAIC_SSR_DUMP_V1_VER 1
+struct dump_file_meta {
+ u64 magic;
+ u64 version;
+ u64 size; /* Total size of the entire dump */
+ u64 tbl_len; /* Length of the table in byte */
+};
+
+/*
+ * Layout of crashdump
+ * +------------------------------------------+
+ * | Crashdump Meta structure |
+ * | type: struct dump_file_meta |
+ * +------------------------------------------+
+ * | Crashdump Table |
+ * | type: array of struct debug_info_table |
+ * | |
+ * | |
+ * | |
+ * +------------------------------------------+
+ * | Crashdump |
+ * | |
+ * | |
+ * | |
+ * | |
+ * | |
+ * +------------------------------------------+
+ */
+
+static void free_ssr_dump_info(struct ssr_crashdump *ssr_crash)
+{
+ struct ssr_dump_info *dump_info = ssr_crash->dump_info;
+
+ ssr_crash->dump_info = NULL;
+ if (!dump_info)
+ return;
+ if (!dump_info->read_buf_req_queued)
+ kfree(dump_info->read_buf_req);
+ vfree(dump_info->tbl_addr);
+ vfree(dump_info->dump_addr);
+ kfree(dump_info);
+}
+
+void qaic_clean_up_ssr(struct qaic_device *qdev)
+{
+ struct ssr_crashdump *ssr_crash = qdev->ssr_mhi_buf;
+
+ if (!ssr_crash)
+ return;
+
+ qaic_dbc_exit_ssr(qdev);
+ free_ssr_dump_info(ssr_crash);
+}
+
+static int alloc_dump(struct ssr_dump_info *dump_info)
+{
+ struct debug_info_table *tbl_ent = dump_info->tbl_addr;
+ struct dump_file_meta *dump_meta;
+ u64 tbl_sz_lp = 0;
+ u64 dump_size = 0;
+
+ while (tbl_sz_lp < dump_info->tbl_len) {
+ le64_to_cpus(&tbl_ent->save_perf);
+ le64_to_cpus(&tbl_ent->mem_base);
+ le64_to_cpus(&tbl_ent->len);
+
+ if (tbl_ent->len == 0)
+ return -EINVAL;
+
+ dump_size += tbl_ent->len;
+ tbl_ent++;
+ tbl_sz_lp += sizeof(*tbl_ent);
+ }
+
+ dump_info->dump_sz = dump_size + dump_info->tbl_len + sizeof(*dump_meta);
+ dump_info->dump_addr = vzalloc(dump_info->dump_sz);
+ if (!dump_info->dump_addr)
+ return -ENOMEM;
+
+ /* Copy crashdump meta and table */
+ dump_meta = dump_info->dump_addr;
+ dump_meta->magic = QAIC_SSR_DUMP_V1_MAGIC;
+ dump_meta->version = QAIC_SSR_DUMP_V1_VER;
+ dump_meta->size = dump_info->dump_sz;
+ dump_meta->tbl_len = dump_info->tbl_len;
+ memcpy(dump_info->dump_addr + sizeof(*dump_meta), dump_info->tbl_addr, dump_info->tbl_len);
+ /* Offset by crashdump meta and table (copied above) */
+ dump_info->dump_off = dump_info->tbl_len + sizeof(*dump_meta);
+
+ return 0;
+}
+
+static int send_xfer_done(struct qaic_device *qdev, void *resp, u32 dbc_id)
+{
+ struct ssr_debug_transfer_done *xfer_done;
+ int ret;
+
+ xfer_done = kmalloc(sizeof(*xfer_done), GFP_KERNEL);
+ if (!xfer_done) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = mhi_queue_buf(qdev->ssr_ch, DMA_FROM_DEVICE, resp, SSR_RESP_MSG_SZ, MHI_EOT);
+ if (ret)
+ goto free_xfer_done;
+
+ xfer_done->hdr.cmd = cpu_to_le32(DEBUG_TRANSFER_DONE);
+ xfer_done->hdr.len = cpu_to_le32(sizeof(*xfer_done));
+ xfer_done->hdr.dbc_id = cpu_to_le32(dbc_id);
+
+ ret = mhi_queue_buf(qdev->ssr_ch, DMA_TO_DEVICE, xfer_done, sizeof(*xfer_done), MHI_EOT);
+ if (ret)
+ goto free_xfer_done;
+
+ return 0;
+
+free_xfer_done:
+ kfree(xfer_done);
+out:
+ return ret;
+}
+
+static int mem_read_req(struct qaic_device *qdev, u64 dest_addr, u64 dest_len)
+{
+ struct ssr_crashdump *ssr_crash = qdev->ssr_mhi_buf;
+ struct ssr_memory_read *read_buf_req;
+ struct ssr_dump_info *dump_info;
+ int ret;
+
+ dump_info = ssr_crash->dump_info;
+ ret = mhi_queue_buf(qdev->ssr_ch, DMA_FROM_DEVICE, ssr_crash->data, SSR_MEM_READ_DATA_SIZE,
+ MHI_EOT);
+ if (ret)
+ goto out;
+
+ read_buf_req = dump_info->read_buf_req;
+ read_buf_req->hdr.cmd = cpu_to_le32(MEMORY_READ);
+ read_buf_req->hdr.len = cpu_to_le32(sizeof(*read_buf_req));
+ read_buf_req->hdr.dbc_id = cpu_to_le32(qdev->ssr_dbc);
+ read_buf_req->addr = cpu_to_le64(dest_addr);
+ read_buf_req->len = cpu_to_le64(dest_len);
+
+ ret = mhi_queue_buf(qdev->ssr_ch, DMA_TO_DEVICE, read_buf_req, sizeof(*read_buf_req),
+ MHI_EOT);
+ if (!ret)
+ dump_info->read_buf_req_queued = true;
+
+out:
+ return ret;
+}
+
+static int ssr_copy_table(struct ssr_dump_info *dump_info, void *data, u64 len)
+{
+ if (len > dump_info->tbl_len - dump_info->tbl_off)
+ return -EINVAL;
+
+ memcpy(dump_info->tbl_addr + dump_info->tbl_off, data, len);
+ dump_info->tbl_off += len;
+
+ /* Entire table has been downloaded, alloc dump memory */
+ if (dump_info->tbl_off == dump_info->tbl_len) {
+ dump_info->tbl_ent = dump_info->tbl_addr;
+ return alloc_dump(dump_info);
+ }
+
+ return 0;
+}
+
+static int ssr_copy_dump(struct ssr_dump_info *dump_info, void *data, u64 len)
+{
+ struct debug_info_table *tbl_ent;
+
+ tbl_ent = dump_info->tbl_ent;
+
+ if (len > tbl_ent->len - dump_info->tbl_ent_off)
+ return -EINVAL;
+
+ memcpy(dump_info->dump_addr + dump_info->dump_off, data, len);
+ dump_info->dump_off += len;
+ dump_info->tbl_ent_off += len;
+
+ /*
+ * Current segment (a entry in table) of the crashdump is complete,
+ * move to next one
+ */
+ if (tbl_ent->len == dump_info->tbl_ent_off) {
+ dump_info->tbl_ent++;
+ dump_info->tbl_ent_off = 0;
+ }
+
+ return 0;
+}
+
+static void ssr_dump_worker(struct work_struct *work)
+{
+ struct ssr_crashdump *ssr_crash = container_of(work, struct ssr_crashdump, work);
+ struct qaic_device *qdev = ssr_crash->qdev;
+ struct ssr_memory_read_rsp *mem_rd_resp;
+ struct debug_info_table *tbl_ent;
+ struct ssr_dump_info *dump_info;
+ u64 dest_addr, dest_len;
+ struct _ssr_hdr *_hdr;
+ struct ssr_hdr hdr;
+ u64 data_len;
+ int ret;
+
+ mem_rd_resp = (struct ssr_memory_read_rsp *)ssr_crash->data;
+ _hdr = &mem_rd_resp->hdr;
+ hdr.cmd = le32_to_cpu(_hdr->cmd);
+ hdr.len = le32_to_cpu(_hdr->len);
+ hdr.dbc_id = le32_to_cpu(_hdr->dbc_id);
+
+ if (hdr.dbc_id != qdev->ssr_dbc)
+ goto reset_device;
+
+ dump_info = ssr_crash->dump_info;
+ if (!dump_info)
+ goto reset_device;
+
+ if (hdr.cmd != MEMORY_READ_RSP)
+ goto free_dump_info;
+
+ if (hdr.len > SSR_MEM_READ_DATA_SIZE)
+ goto free_dump_info;
+
+ data_len = hdr.len - sizeof(*mem_rd_resp);
+
+ if (dump_info->tbl_off < dump_info->tbl_len) /* Chunk belongs to table */
+ ret = ssr_copy_table(dump_info, mem_rd_resp->data, data_len);
+ else /* Chunk belongs to crashdump */
+ ret = ssr_copy_dump(dump_info, mem_rd_resp->data, data_len);
+
+ if (ret)
+ goto free_dump_info;
+
+ if (dump_info->tbl_off < dump_info->tbl_len) {
+ /* Continue downloading table */
+ dest_addr = dump_info->tbl_addr_dev + dump_info->tbl_off;
+ dest_len = min(SSR_MEM_READ_CHUNK_SIZE, dump_info->tbl_len - dump_info->tbl_off);
+ ret = mem_read_req(qdev, dest_addr, dest_len);
+ } else if (dump_info->dump_off < dump_info->dump_sz) {
+ /* Continue downloading crashdump */
+ tbl_ent = dump_info->tbl_ent;
+ dest_addr = tbl_ent->mem_base + dump_info->tbl_ent_off;
+ dest_len = min(SSR_MEM_READ_CHUNK_SIZE, tbl_ent->len - dump_info->tbl_ent_off);
+ ret = mem_read_req(qdev, dest_addr, dest_len);
+ } else {
+ /* Crashdump download complete */
+ ret = send_xfer_done(qdev, dump_info->resp->data, hdr.dbc_id);
+ }
+
+ /* Most likely a MHI xfer has failed */
+ if (ret)
+ goto free_dump_info;
+
+ return;
+
+free_dump_info:
+ /* Free the allocated memory */
+ free_ssr_dump_info(ssr_crash);
+reset_device:
+ /*
+ * After subsystem crashes in device crashdump collection begins but
+ * something went wrong while collecting crashdump, now instead of
+ * handling this error we just reset the device as the best effort has
+ * been made
+ */
+ mhi_soc_reset(qdev->mhi_cntrl);
+}
+
+static struct ssr_dump_info *alloc_dump_info(struct qaic_device *qdev,
+ struct ssr_debug_transfer_info *debug_info)
+{
+ struct ssr_dump_info *dump_info;
+ int ret;
+
+ le64_to_cpus(&debug_info->tbl_len);
+ le64_to_cpus(&debug_info->tbl_addr);
+
+ if (debug_info->tbl_len == 0 ||
+ debug_info->tbl_len % sizeof(struct debug_info_table) != 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Allocate SSR crashdump book keeping structure */
+ dump_info = kzalloc(sizeof(*dump_info), GFP_KERNEL);
+ if (!dump_info) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* Buffer used to send MEMORY READ request to device via MHI */
+ dump_info->read_buf_req = kzalloc(sizeof(*dump_info->read_buf_req), GFP_KERNEL);
+ if (!dump_info->read_buf_req) {
+ ret = -ENOMEM;
+ goto free_dump_info;
+ }
+
+ /* Crashdump meta table buffer */
+ dump_info->tbl_addr = vzalloc(debug_info->tbl_len);
+ if (!dump_info->tbl_addr) {
+ ret = -ENOMEM;
+ goto free_read_buf_req;
+ }
+
+ dump_info->tbl_addr_dev = debug_info->tbl_addr;
+ dump_info->tbl_len = debug_info->tbl_len;
+
+ return dump_info;
+
+free_read_buf_req:
+ kfree(dump_info->read_buf_req);
+free_dump_info:
+ kfree(dump_info);
+out:
+ return ERR_PTR(ret);
+}
+
+static int dbg_xfer_info_rsp(struct qaic_device *qdev, struct dma_bridge_chan *dbc,
+ struct ssr_debug_transfer_info *debug_info)
+{
+ struct ssr_debug_transfer_info_rsp *debug_rsp;
+ struct ssr_crashdump *ssr_crash = NULL;
+ int ret = 0, ret2;
+
+ debug_rsp = kmalloc(sizeof(*debug_rsp), GFP_KERNEL);
+ if (!debug_rsp)
+ return -ENOMEM;
+
+ if (!qdev->ssr_mhi_buf) {
+ ret = -ENOMEM;
+ goto send_rsp;
+ }
+
+ if (dbc->state != DBC_STATE_BEFORE_POWER_UP) {
+ ret = -EINVAL;
+ goto send_rsp;
+ }
+
+ ssr_crash = qdev->ssr_mhi_buf;
+ ssr_crash->dump_info = alloc_dump_info(qdev, debug_info);
+ if (IS_ERR(ssr_crash->dump_info)) {
+ ret = PTR_ERR(ssr_crash->dump_info);
+ ssr_crash->dump_info = NULL;
+ }
+
+send_rsp:
+ debug_rsp->hdr.cmd = cpu_to_le32(DEBUG_TRANSFER_INFO_RSP);
+ debug_rsp->hdr.len = cpu_to_le32(sizeof(*debug_rsp));
+ debug_rsp->hdr.dbc_id = cpu_to_le32(dbc->id);
+ /*
+ * 0 = Return an ACK confirming the host is ready to download crashdump
+ * 1 = Return an NACK confirming the host is not ready to download crashdump
+ */
+ debug_rsp->ret = cpu_to_le32(ret ? 1 : 0);
+
+ ret2 = mhi_queue_buf(qdev->ssr_ch, DMA_TO_DEVICE, debug_rsp, sizeof(*debug_rsp), MHI_EOT);
+ if (ret2) {
+ free_ssr_dump_info(ssr_crash);
+ kfree(debug_rsp);
+ return ret2;
+ }
+
+ return ret;
+}
+
+static void dbg_xfer_done_rsp(struct qaic_device *qdev, struct dma_bridge_chan *dbc,
+ struct ssr_debug_transfer_done_rsp *xfer_rsp)
+{
+ struct ssr_crashdump *ssr_crash = qdev->ssr_mhi_buf;
+ u32 status = le32_to_cpu(xfer_rsp->ret);
+ struct device *dev = &qdev->pdev->dev;
+ struct ssr_dump_info *dump_info;
+
+ dump_info = ssr_crash->dump_info;
+ if (!dump_info)
+ return;
+
+ if (status) {
+ free_ssr_dump_info(ssr_crash);
+ return;
+ }
+
+ dev_coredumpv(dev, dump_info->dump_addr, dump_info->dump_sz, GFP_KERNEL);
+ /* dev_coredumpv will free dump_info->dump_addr */
+ dump_info->dump_addr = NULL;
+ free_ssr_dump_info(ssr_crash);
+}
+
+static void ssr_worker(struct work_struct *work)
+{
+ struct ssr_resp *resp = container_of(work, struct ssr_resp, work);
+ struct ssr_hdr *hdr = (struct ssr_hdr *)resp->data;
+ struct ssr_dump_info *dump_info = NULL;
+ struct qaic_device *qdev = resp->qdev;
+ struct ssr_crashdump *ssr_crash;
+ struct ssr_event_rsp *event_rsp;
+ struct dma_bridge_chan *dbc;
+ struct ssr_event *event;
+ u32 ssr_event_ack;
+ int ret;
+
+ le32_to_cpus(&hdr->cmd);
+ le32_to_cpus(&hdr->len);
+ le32_to_cpus(&hdr->dbc_id);
+
+ if (hdr->len > SSR_RESP_MSG_SZ)
+ goto out;
+
+ if (hdr->dbc_id >= qdev->num_dbc)
+ goto out;
+
+ dbc = &qdev->dbc[hdr->dbc_id];
+
+ switch (hdr->cmd) {
+ case DEBUG_TRANSFER_INFO:
+ ret = dbg_xfer_info_rsp(qdev, dbc, (struct ssr_debug_transfer_info *)resp->data);
+ if (ret)
+ break;
+
+ ssr_crash = qdev->ssr_mhi_buf;
+ dump_info = ssr_crash->dump_info;
+ dump_info->dbc = dbc;
+ dump_info->resp = resp;
+
+ /* Start by downloading debug table */
+ ret = mem_read_req(qdev, dump_info->tbl_addr_dev,
+ min(dump_info->tbl_len, SSR_MEM_READ_CHUNK_SIZE));
+ if (ret) {
+ free_ssr_dump_info(ssr_crash);
+ break;
+ }
+
+ /*
+ * Till now everything went fine, which means that we will be
+ * collecting crashdump chunk by chunk. Do not queue a response
+ * buffer for SSR cmds till the crashdump is complete.
+ */
+ return;
+ case SSR_EVENT:
+ event = (struct ssr_event *)hdr;
+ le32_to_cpus(&event->event);
+ ssr_event_ack = event->event;
+ ssr_crash = qdev->ssr_mhi_buf;
+
+ switch (event->event) {
+ case BEFORE_SHUTDOWN:
+ set_dbc_state(qdev, hdr->dbc_id, DBC_STATE_BEFORE_SHUTDOWN);
+ qaic_dbc_enter_ssr(qdev, hdr->dbc_id);
+ break;
+ case AFTER_SHUTDOWN:
+ set_dbc_state(qdev, hdr->dbc_id, DBC_STATE_AFTER_SHUTDOWN);
+ break;
+ case BEFORE_POWER_UP:
+ set_dbc_state(qdev, hdr->dbc_id, DBC_STATE_BEFORE_POWER_UP);
+ break;
+ case AFTER_POWER_UP:
+ /*
+ * If dump info is a non NULL value it means that we
+ * have received this SSR event while downloading a
+ * crashdump for this DBC is still in progress. NACK
+ * the SSR event
+ */
+ if (ssr_crash && ssr_crash->dump_info) {
+ free_ssr_dump_info(ssr_crash);
+ ssr_event_ack = SSR_EVENT_NACK;
+ break;
+ }
+
+ set_dbc_state(qdev, hdr->dbc_id, DBC_STATE_AFTER_POWER_UP);
+ break;
+ default:
+ break;
+ }
+
+ event_rsp = kmalloc(sizeof(*event_rsp), GFP_KERNEL);
+ if (!event_rsp)
+ break;
+
+ event_rsp->hdr.cmd = cpu_to_le32(SSR_EVENT_RSP);
+ event_rsp->hdr.len = cpu_to_le32(sizeof(*event_rsp));
+ event_rsp->hdr.dbc_id = cpu_to_le32(hdr->dbc_id);
+ event_rsp->event = cpu_to_le32(ssr_event_ack);
+
+ ret = mhi_queue_buf(qdev->ssr_ch, DMA_TO_DEVICE, event_rsp, sizeof(*event_rsp),
+ MHI_EOT);
+ if (ret)
+ kfree(event_rsp);
+
+ if (event->event == AFTER_POWER_UP && ssr_event_ack != SSR_EVENT_NACK) {
+ qaic_dbc_exit_ssr(qdev);
+ set_dbc_state(qdev, hdr->dbc_id, DBC_STATE_IDLE);
+ }
+
+ break;
+ case DEBUG_TRANSFER_DONE_RSP:
+ dbg_xfer_done_rsp(qdev, dbc, (struct ssr_debug_transfer_done_rsp *)hdr);
+ break;
+ default:
+ break;
+ }
+
+out:
+ ret = mhi_queue_buf(qdev->ssr_ch, DMA_FROM_DEVICE, resp->data, SSR_RESP_MSG_SZ, MHI_EOT);
+ if (ret)
+ kfree(resp);
+}
+
+static int qaic_ssr_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id)
+{
+ struct qaic_device *qdev = pci_get_drvdata(to_pci_dev(mhi_dev->mhi_cntrl->cntrl_dev));
+ struct ssr_resp *resp;
+ int ret;
+
+ ret = mhi_prepare_for_transfer(mhi_dev);
+ if (ret)
+ return ret;
+
+ resp = kzalloc(sizeof(*resp) + SSR_RESP_MSG_SZ, GFP_KERNEL);
+ if (!resp) {
+ mhi_unprepare_from_transfer(mhi_dev);
+ return -ENOMEM;
+ }
+
+ resp->qdev = qdev;
+ INIT_WORK(&resp->work, ssr_worker);
+
+ ret = mhi_queue_buf(mhi_dev, DMA_FROM_DEVICE, resp->data, SSR_RESP_MSG_SZ, MHI_EOT);
+ if (ret) {
+ kfree(resp);
+ mhi_unprepare_from_transfer(mhi_dev);
+ return ret;
+ }
+
+ dev_set_drvdata(&mhi_dev->dev, qdev);
+ qdev->ssr_ch = mhi_dev;
+
+ return 0;
+}
+
+static void qaic_ssr_mhi_remove(struct mhi_device *mhi_dev)
+{
+ struct qaic_device *qdev;
+
+ qdev = dev_get_drvdata(&mhi_dev->dev);
+ mhi_unprepare_from_transfer(qdev->ssr_ch);
+ qdev->ssr_ch = NULL;
+}
+
+static void qaic_ssr_mhi_ul_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
+{
+ struct qaic_device *qdev = dev_get_drvdata(&mhi_dev->dev);
+ struct ssr_crashdump *ssr_crash = qdev->ssr_mhi_buf;
+ struct _ssr_hdr *hdr = mhi_result->buf_addr;
+ struct ssr_dump_info *dump_info;
+
+ if (mhi_result->transaction_status) {
+ kfree(mhi_result->buf_addr);
+ return;
+ }
+
+ /*
+ * MEMORY READ is used to download crashdump. And crashdump is
+ * downloaded chunk by chunk in a series of MEMORY READ SSR commands.
+ * Hence to avoid too many kmalloc() and kfree() of the same MEMORY READ
+ * request buffer, we allocate only one such buffer and free it only
+ * once.
+ */
+ if (le32_to_cpu(hdr->cmd) == MEMORY_READ) {
+ dump_info = ssr_crash->dump_info;
+ if (dump_info) {
+ dump_info->read_buf_req_queued = false;
+ return;
+ }
+ }
+
+ kfree(mhi_result->buf_addr);
+}
+
+static void qaic_ssr_mhi_dl_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
+{
+ struct ssr_resp *resp = container_of(mhi_result->buf_addr, struct ssr_resp, data);
+ struct qaic_device *qdev = dev_get_drvdata(&mhi_dev->dev);
+ struct ssr_crashdump *ssr_crash = qdev->ssr_mhi_buf;
+ bool memory_read_rsp = false;
+
+ if (ssr_crash && ssr_crash->data == mhi_result->buf_addr)
+ memory_read_rsp = true;
+
+ if (mhi_result->transaction_status) {
+ /* Do not free SSR crashdump buffer as it allocated via managed APIs */
+ if (!memory_read_rsp)
+ kfree(resp);
+ return;
+ }
+
+ if (memory_read_rsp)
+ queue_work(qdev->ssr_wq, &ssr_crash->work);
+ else
+ queue_work(qdev->ssr_wq, &resp->work);
+}
+
+static const struct mhi_device_id qaic_ssr_mhi_match_table[] = {
+ { .chan = "QAIC_SSR", },
+ {},
+};
+
+static struct mhi_driver qaic_ssr_mhi_driver = {
+ .id_table = qaic_ssr_mhi_match_table,
+ .remove = qaic_ssr_mhi_remove,
+ .probe = qaic_ssr_mhi_probe,
+ .ul_xfer_cb = qaic_ssr_mhi_ul_xfer_cb,
+ .dl_xfer_cb = qaic_ssr_mhi_dl_xfer_cb,
+ .driver = {
+ .name = "qaic_ssr",
+ },
+};
+
+int qaic_ssr_init(struct qaic_device *qdev, struct drm_device *drm)
+{
+ struct ssr_crashdump *ssr_crash;
+
+ qdev->ssr_dbc = QAIC_SSR_DBC_SENTINEL;
+
+ /*
+ * Device requests only one SSR at a time. So allocating only one
+ * buffer to download crashdump is good enough.
+ */
+ ssr_crash = drmm_kzalloc(drm, SSR_MHI_BUF_SIZE, GFP_KERNEL);
+ if (!ssr_crash)
+ return -ENOMEM;
+
+ ssr_crash->qdev = qdev;
+ INIT_WORK(&ssr_crash->work, ssr_dump_worker);
+ qdev->ssr_mhi_buf = ssr_crash;
+
+ return 0;
+}
+
+int qaic_ssr_register(void)
+{
+ return mhi_driver_register(&qaic_ssr_mhi_driver);
+}
+
+void qaic_ssr_unregister(void)
+{
+ mhi_driver_unregister(&qaic_ssr_mhi_driver);
+}
diff --git a/drivers/accel/qaic/qaic_ssr.h b/drivers/accel/qaic/qaic_ssr.h
new file mode 100644
index 000000000000..97ccff305750
--- /dev/null
+++ b/drivers/accel/qaic/qaic_ssr.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __QAIC_SSR_H__
+#define __QAIC_SSR_H__
+
+struct drm_device;
+struct qaic_device;
+
+int qaic_ssr_register(void);
+void qaic_ssr_unregister(void);
+void qaic_clean_up_ssr(struct qaic_device *qdev);
+int qaic_ssr_init(struct qaic_device *qdev, struct drm_device *drm);
+#endif /* __QAIC_SSR_H__ */
diff --git a/drivers/accel/qaic/qaic_sysfs.c b/drivers/accel/qaic/qaic_sysfs.c
new file mode 100644
index 000000000000..e0afb0ffb589
--- /dev/null
+++ b/drivers/accel/qaic/qaic_sysfs.c
@@ -0,0 +1,109 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/* Copyright (c) 2020-2025, The Linux Foundation. All rights reserved. */
+
+#include <drm/drm_file.h>
+#include <drm/drm_managed.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/kobject.h>
+#include <linux/mutex.h>
+#include <linux/sysfs.h>
+
+#include "qaic.h"
+
+#define NAME_LEN 14
+
+struct dbc_attribute {
+ struct device_attribute dev_attr;
+ u32 dbc_id;
+ char name[NAME_LEN];
+};
+
+static ssize_t dbc_state_show(struct device *dev, struct device_attribute *a, char *buf)
+{
+ struct dbc_attribute *dbc_attr = container_of(a, struct dbc_attribute, dev_attr);
+ struct drm_minor *minor = dev_get_drvdata(dev);
+ struct qaic_device *qdev;
+
+ qdev = to_qaic_device(minor->dev);
+ return sysfs_emit(buf, "%d\n", qdev->dbc[dbc_attr->dbc_id].state);
+}
+
+void set_dbc_state(struct qaic_device *qdev, u32 dbc_id, unsigned int state)
+{
+ struct device *kdev = to_accel_kdev(qdev->qddev);
+ char *envp[3] = {};
+ char state_str[16];
+ char id_str[12];
+
+ envp[0] = id_str;
+ envp[1] = state_str;
+
+ if (state >= DBC_STATE_MAX)
+ return;
+ if (dbc_id >= qdev->num_dbc)
+ return;
+ if (state == qdev->dbc[dbc_id].state)
+ return;
+
+ scnprintf(id_str, ARRAY_SIZE(id_str), "DBC_ID=%d", dbc_id);
+ scnprintf(state_str, ARRAY_SIZE(state_str), "DBC_STATE=%d", state);
+
+ qdev->dbc[dbc_id].state = state;
+ kobject_uevent_env(&kdev->kobj, KOBJ_CHANGE, envp);
+}
+
+int qaic_sysfs_init(struct qaic_drm_device *qddev)
+{
+ struct device *kdev = to_accel_kdev(qddev);
+ struct drm_device *drm = to_drm(qddev);
+ u32 num_dbc = qddev->qdev->num_dbc;
+ struct dbc_attribute *dbc_attrs;
+ int i, ret;
+
+ dbc_attrs = drmm_kcalloc(drm, num_dbc, sizeof(*dbc_attrs), GFP_KERNEL);
+ if (!dbc_attrs)
+ return -ENOMEM;
+
+ for (i = 0; i < num_dbc; ++i) {
+ struct dbc_attribute *dbc_attr = &dbc_attrs[i];
+
+ sysfs_attr_init(&dbc_attr->dev_attr.attr);
+ dbc_attr->dbc_id = i;
+ scnprintf(dbc_attr->name, NAME_LEN, "dbc%d_state", i);
+ dbc_attr->dev_attr.attr.name = dbc_attr->name;
+ dbc_attr->dev_attr.attr.mode = 0444;
+ dbc_attr->dev_attr.show = dbc_state_show;
+ ret = sysfs_create_file(&kdev->kobj, &dbc_attr->dev_attr.attr);
+ if (ret) {
+ int j;
+
+ for (j = 0; j < i; ++j) {
+ dbc_attr = &dbc_attrs[j];
+ sysfs_remove_file(&kdev->kobj, &dbc_attr->dev_attr.attr);
+ }
+ drmm_kfree(drm, dbc_attrs);
+ return ret;
+ }
+ }
+
+ qddev->sysfs_attrs = dbc_attrs;
+ return 0;
+}
+
+void qaic_sysfs_remove(struct qaic_drm_device *qddev)
+{
+ struct dbc_attribute *dbc_attrs = qddev->sysfs_attrs;
+ struct device *kdev = to_accel_kdev(qddev);
+ u32 num_dbc = qddev->qdev->num_dbc;
+ int i;
+
+ if (!dbc_attrs)
+ return;
+
+ qddev->sysfs_attrs = NULL;
+ for (i = 0; i < num_dbc; ++i)
+ sysfs_remove_file(&kdev->kobj, &dbc_attrs[i].dev_attr.attr);
+ drmm_kfree(to_drm(qddev), dbc_attrs);
+}
diff --git a/drivers/accel/qaic/qaic_timesync.c b/drivers/accel/qaic/qaic_timesync.c
index 3fac540f8e03..8af2475f4f36 100644
--- a/drivers/accel/qaic/qaic_timesync.c
+++ b/drivers/accel/qaic/qaic_timesync.c
@@ -171,6 +171,13 @@ mod_timer:
dev_err(mqtsdev->dev, "%s mod_timer error:%d\n", __func__, ret);
}
+void qaic_mqts_ch_stop_timer(struct mhi_device *mhi_dev)
+{
+ struct mqts_dev *mqtsdev = dev_get_drvdata(&mhi_dev->dev);
+
+ timer_delete_sync(&mqtsdev->timer);
+}
+
static int qaic_timesync_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id)
{
struct qaic_device *qdev = pci_get_drvdata(to_pci_dev(mhi_dev->mhi_cntrl->cntrl_dev));
@@ -206,6 +213,7 @@ static int qaic_timesync_probe(struct mhi_device *mhi_dev, const struct mhi_devi
timer->expires = jiffies + msecs_to_jiffies(timesync_delay_ms);
add_timer(timer);
dev_set_drvdata(&mhi_dev->dev, mqtsdev);
+ qdev->mqts_ch = mhi_dev;
return 0;
@@ -221,6 +229,7 @@ static void qaic_timesync_remove(struct mhi_device *mhi_dev)
{
struct mqts_dev *mqtsdev = dev_get_drvdata(&mhi_dev->dev);
+ mqtsdev->qdev->mqts_ch = NULL;
timer_delete_sync(&mqtsdev->timer);
mhi_unprepare_from_transfer(mqtsdev->mhi_dev);
kfree(mqtsdev->sync_msg);
diff --git a/drivers/accel/qaic/qaic_timesync.h b/drivers/accel/qaic/qaic_timesync.h
index 851b7acd43bb..77b9c2b55057 100644
--- a/drivers/accel/qaic/qaic_timesync.h
+++ b/drivers/accel/qaic/qaic_timesync.h
@@ -6,6 +6,9 @@
#ifndef __QAIC_TIMESYNC_H__
#define __QAIC_TIMESYNC_H__
+#include <linux/mhi.h>
+
int qaic_timesync_init(void);
void qaic_timesync_deinit(void);
+void qaic_mqts_ch_stop_timer(struct mhi_device *mhi_dev);
#endif /* __QAIC_TIMESYNC_H__ */
diff --git a/drivers/accel/qaic/sahara.c b/drivers/accel/qaic/sahara.c
index 3ebcc1f7ff58..fd3c3b2d1fd3 100644
--- a/drivers/accel/qaic/sahara.c
+++ b/drivers/accel/qaic/sahara.c
@@ -159,6 +159,7 @@ struct sahara_context {
struct sahara_packet *rx;
struct work_struct fw_work;
struct work_struct dump_work;
+ struct work_struct read_data_work;
struct mhi_device *mhi_dev;
const char * const *image_table;
u32 table_size;
@@ -174,7 +175,10 @@ struct sahara_context {
u64 dump_image_offset;
void *mem_dump_freespace;
u64 dump_images_left;
+ u32 read_data_offset;
+ u32 read_data_length;
bool is_mem_dump_mode;
+ bool non_streaming;
};
static const char * const aic100_image_table[] = {
@@ -194,6 +198,7 @@ static const char * const aic200_image_table[] = {
[23] = "qcom/aic200/aop.mbn",
[32] = "qcom/aic200/tz.mbn",
[33] = "qcom/aic200/hypvm.mbn",
+ [38] = "qcom/aic200/xbl_config.elf",
[39] = "qcom/aic200/aic200_abl.elf",
[40] = "qcom/aic200/apdp.mbn",
[41] = "qcom/aic200/devcfg.mbn",
@@ -202,6 +207,7 @@ static const char * const aic200_image_table[] = {
[49] = "qcom/aic200/shrm.elf",
[50] = "qcom/aic200/cpucp.elf",
[51] = "qcom/aic200/aop_devcfg.mbn",
+ [54] = "qcom/aic200/qupv3fw.elf",
[57] = "qcom/aic200/cpucp_dtbs.elf",
[62] = "qcom/aic200/uefi_dtbs.elf",
[63] = "qcom/aic200/xbl_ac_config.mbn",
@@ -213,9 +219,15 @@ static const char * const aic200_image_table[] = {
[69] = "qcom/aic200/dcd.mbn",
[73] = "qcom/aic200/gearvm.mbn",
[74] = "qcom/aic200/sti.bin",
- [75] = "qcom/aic200/pvs.bin",
+ [76] = "qcom/aic200/tz_qti_config.mbn",
+ [78] = "qcom/aic200/pvs.bin",
};
+static bool is_streaming(struct sahara_context *context)
+{
+ return !context->non_streaming;
+}
+
static int sahara_find_image(struct sahara_context *context, u32 image_id)
{
int ret;
@@ -265,6 +277,8 @@ static void sahara_send_reset(struct sahara_context *context)
int ret;
context->is_mem_dump_mode = false;
+ context->read_data_offset = 0;
+ context->read_data_length = 0;
context->tx[0]->cmd = cpu_to_le32(SAHARA_RESET_CMD);
context->tx[0]->length = cpu_to_le32(SAHARA_RESET_LENGTH);
@@ -319,9 +333,39 @@ static void sahara_hello(struct sahara_context *context)
dev_err(&context->mhi_dev->dev, "Unable to send hello response %d\n", ret);
}
+static int read_data_helper(struct sahara_context *context, int buf_index)
+{
+ enum mhi_flags mhi_flag;
+ u32 pkt_data_len;
+ int ret;
+
+ pkt_data_len = min(context->read_data_length, SAHARA_PACKET_MAX_SIZE);
+
+ memcpy(context->tx[buf_index],
+ &context->firmware->data[context->read_data_offset],
+ pkt_data_len);
+
+ context->read_data_offset += pkt_data_len;
+ context->read_data_length -= pkt_data_len;
+
+ if (is_streaming(context) || !context->read_data_length)
+ mhi_flag = MHI_EOT;
+ else
+ mhi_flag = MHI_CHAIN;
+
+ ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE,
+ context->tx[buf_index], pkt_data_len, mhi_flag);
+ if (ret) {
+ dev_err(&context->mhi_dev->dev, "Unable to send read_data response %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static void sahara_read_data(struct sahara_context *context)
{
- u32 image_id, data_offset, data_len, pkt_data_len;
+ u32 image_id, data_offset, data_len;
int ret;
int i;
@@ -357,7 +401,7 @@ static void sahara_read_data(struct sahara_context *context)
* and is not needed here on error.
*/
- if (data_len > SAHARA_TRANSFER_MAX_SIZE) {
+ if (context->non_streaming && data_len > SAHARA_TRANSFER_MAX_SIZE) {
dev_err(&context->mhi_dev->dev, "Malformed read_data packet - data len %d exceeds max xfer size %d\n",
data_len, SAHARA_TRANSFER_MAX_SIZE);
sahara_send_reset(context);
@@ -378,22 +422,18 @@ static void sahara_read_data(struct sahara_context *context)
return;
}
- for (i = 0; i < SAHARA_NUM_TX_BUF && data_len; ++i) {
- pkt_data_len = min(data_len, SAHARA_PACKET_MAX_SIZE);
-
- memcpy(context->tx[i], &context->firmware->data[data_offset], pkt_data_len);
+ context->read_data_offset = data_offset;
+ context->read_data_length = data_len;
- data_offset += pkt_data_len;
- data_len -= pkt_data_len;
+ if (is_streaming(context)) {
+ schedule_work(&context->read_data_work);
+ return;
+ }
- ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE,
- context->tx[i], pkt_data_len,
- !data_len ? MHI_EOT : MHI_CHAIN);
- if (ret) {
- dev_err(&context->mhi_dev->dev, "Unable to send read_data response %d\n",
- ret);
- return;
- }
+ for (i = 0; i < SAHARA_NUM_TX_BUF && context->read_data_length; ++i) {
+ ret = read_data_helper(context, i);
+ if (ret)
+ break;
}
}
@@ -538,6 +578,7 @@ static void sahara_parse_dump_table(struct sahara_context *context)
struct sahara_memory_dump_meta_v1 *dump_meta;
u64 table_nents;
u64 dump_length;
+ u64 mul_bytes;
int ret;
u64 i;
@@ -551,8 +592,9 @@ static void sahara_parse_dump_table(struct sahara_context *context)
dev_table[i].description[SAHARA_TABLE_ENTRY_STR_LEN - 1] = 0;
dev_table[i].filename[SAHARA_TABLE_ENTRY_STR_LEN - 1] = 0;
- dump_length = size_add(dump_length, le64_to_cpu(dev_table[i].length));
- if (dump_length == SIZE_MAX) {
+ if (check_add_overflow(dump_length,
+ le64_to_cpu(dev_table[i].length),
+ &dump_length)) {
/* Discard the dump */
sahara_send_reset(context);
return;
@@ -568,14 +610,17 @@ static void sahara_parse_dump_table(struct sahara_context *context)
dev_table[i].filename);
}
- dump_length = size_add(dump_length, sizeof(*dump_meta));
- if (dump_length == SIZE_MAX) {
+ if (check_add_overflow(dump_length, (u64)sizeof(*dump_meta), &dump_length)) {
/* Discard the dump */
sahara_send_reset(context);
return;
}
- dump_length = size_add(dump_length, size_mul(sizeof(*image_out_table), table_nents));
- if (dump_length == SIZE_MAX) {
+ if (check_mul_overflow((u64)sizeof(*image_out_table), table_nents, &mul_bytes)) {
+ /* Discard the dump */
+ sahara_send_reset(context);
+ return;
+ }
+ if (check_add_overflow(dump_length, mul_bytes, &dump_length)) {
/* Discard the dump */
sahara_send_reset(context);
return;
@@ -615,7 +660,7 @@ static void sahara_parse_dump_table(struct sahara_context *context)
/* Request the first chunk of the first image */
context->dump_image = &image_out_table[0];
- dump_length = min(context->dump_image->length, SAHARA_READ_MAX_SIZE);
+ dump_length = min_t(u64, context->dump_image->length, SAHARA_READ_MAX_SIZE);
/* Avoid requesting EOI sized data so that we can identify errors */
if (dump_length == SAHARA_END_OF_IMAGE_LENGTH)
dump_length = SAHARA_END_OF_IMAGE_LENGTH / 2;
@@ -663,7 +708,7 @@ static void sahara_parse_dump_image(struct sahara_context *context)
/* Get next image chunk */
dump_length = context->dump_image->length - context->dump_image_offset;
- dump_length = min(dump_length, SAHARA_READ_MAX_SIZE);
+ dump_length = min_t(u64, dump_length, SAHARA_READ_MAX_SIZE);
/* Avoid requesting EOI sized data so that we can identify errors */
if (dump_length == SAHARA_END_OF_IMAGE_LENGTH)
dump_length = SAHARA_END_OF_IMAGE_LENGTH / 2;
@@ -742,6 +787,13 @@ error:
sahara_send_reset(context);
}
+static void sahara_read_data_processing(struct work_struct *work)
+{
+ struct sahara_context *context = container_of(work, struct sahara_context, read_data_work);
+
+ read_data_helper(context, 0);
+}
+
static int sahara_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id)
{
struct sahara_context *context;
@@ -756,34 +808,56 @@ static int sahara_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_
if (!context->rx)
return -ENOMEM;
+ if (!strcmp(mhi_dev->mhi_cntrl->name, "AIC200")) {
+ context->image_table = aic200_image_table;
+ context->table_size = ARRAY_SIZE(aic200_image_table);
+ } else {
+ context->image_table = aic100_image_table;
+ context->table_size = ARRAY_SIZE(aic100_image_table);
+ context->non_streaming = true;
+ }
+
/*
- * AIC100 defines SAHARA_TRANSFER_MAX_SIZE as the largest value it
- * will request for READ_DATA. This is larger than
- * SAHARA_PACKET_MAX_SIZE, and we need 9x SAHARA_PACKET_MAX_SIZE to
- * cover SAHARA_TRANSFER_MAX_SIZE. When the remote side issues a
- * READ_DATA, it requires a transfer of the exact size requested. We
- * can use MHI_CHAIN to link multiple buffers into a single transfer
- * but the remote side will not consume the buffers until it sees an
- * EOT, thus we need to allocate enough buffers to put in the tx fifo
- * to cover an entire READ_DATA request of the max size.
+ * There are two firmware implementations for READ_DATA handling.
+ * The older "SBL" implementation defines a Sahara transfer size, and
+ * expects that the response is a single transport transfer. If the
+ * FW wants to transfer a file that is larger than the transfer size,
+ * the FW will issue multiple READ_DATA commands. For this
+ * implementation, we need to allocate enough buffers to contain the
+ * entire Sahara transfer size.
+ *
+ * The newer "XBL" implementation does not define a maximum transfer
+ * size and instead expects the data to be streamed over using the
+ * transport level MTU. The FW will issue a single READ_DATA command
+ * of whatever size, and consume multiple transport level transfers
+ * until the expected amount of data is consumed. For this
+ * implementation we only need a single buffer of the transport MTU
+ * but we'll need to be able to use it multiple times for a single
+ * READ_DATA request.
+ *
+ * AIC100 is the SBL implementation and defines SAHARA_TRANSFER_MAX_SIZE
+ * and we need 9x SAHARA_PACKET_MAX_SIZE to cover that. We can use
+ * MHI_CHAIN to link multiple buffers into a single transfer but the
+ * remote side will not consume the buffers until it sees an EOT, thus
+ * we need to allocate enough buffers to put in the tx fifo to cover an
+ * entire READ_DATA request of the max size.
+ *
+ * AIC200 is the XBL implementation, and so a single buffer will work.
*/
for (i = 0; i < SAHARA_NUM_TX_BUF; ++i) {
- context->tx[i] = devm_kzalloc(&mhi_dev->dev, SAHARA_PACKET_MAX_SIZE, GFP_KERNEL);
+ context->tx[i] = devm_kzalloc(&mhi_dev->dev,
+ SAHARA_PACKET_MAX_SIZE,
+ GFP_KERNEL);
if (!context->tx[i])
return -ENOMEM;
+ if (is_streaming(context))
+ break;
}
context->mhi_dev = mhi_dev;
INIT_WORK(&context->fw_work, sahara_processing);
INIT_WORK(&context->dump_work, sahara_dump_processing);
-
- if (!strcmp(mhi_dev->mhi_cntrl->name, "AIC200")) {
- context->image_table = aic200_image_table;
- context->table_size = ARRAY_SIZE(aic200_image_table);
- } else {
- context->image_table = aic100_image_table;
- context->table_size = ARRAY_SIZE(aic100_image_table);
- }
+ INIT_WORK(&context->read_data_work, sahara_read_data_processing);
context->active_image_id = SAHARA_IMAGE_ID_NONE;
dev_set_drvdata(&mhi_dev->dev, context);
@@ -814,6 +888,10 @@ static void sahara_mhi_remove(struct mhi_device *mhi_dev)
static void sahara_mhi_ul_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
{
+ struct sahara_context *context = dev_get_drvdata(&mhi_dev->dev);
+
+ if (!mhi_result->transaction_status && context->read_data_length && is_streaming(context))
+ schedule_work(&context->read_data_work);
}
static void sahara_mhi_dl_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
diff --git a/drivers/accel/rocket/Kconfig b/drivers/accel/rocket/Kconfig
new file mode 100644
index 000000000000..16465abe0660
--- /dev/null
+++ b/drivers/accel/rocket/Kconfig
@@ -0,0 +1,24 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config DRM_ACCEL_ROCKET
+ tristate "Rocket (support for Rockchip NPUs)"
+ depends on DRM_ACCEL
+ depends on (ARCH_ROCKCHIP && ARM64) || COMPILE_TEST
+ depends on ROCKCHIP_IOMMU || COMPILE_TEST
+ depends on MMU
+ select DRM_SCHED
+ select DRM_GEM_SHMEM_HELPER
+ help
+ Choose this option if you have a Rockchip SoC that contains a
+ compatible Neural Processing Unit (NPU), such as the RK3588. Called by
+ Rockchip either RKNN or RKNPU, it accelerates inference of neural
+ networks.
+
+ The interface exposed to userspace is described in
+ include/uapi/drm/rocket_accel.h and is used by the Rocket userspace
+ driver in Mesa3D.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called rocket.
diff --git a/drivers/accel/rocket/Makefile b/drivers/accel/rocket/Makefile
new file mode 100644
index 000000000000..3713dfe223d6
--- /dev/null
+++ b/drivers/accel/rocket/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-$(CONFIG_DRM_ACCEL_ROCKET) := rocket.o
+
+rocket-y := \
+ rocket_core.o \
+ rocket_device.o \
+ rocket_drv.o \
+ rocket_gem.o \
+ rocket_job.o
diff --git a/drivers/accel/rocket/rocket_core.c b/drivers/accel/rocket/rocket_core.c
new file mode 100644
index 000000000000..abe7719c1db4
--- /dev/null
+++ b/drivers/accel/rocket/rocket_core.c
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dev_printk.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/iommu.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+
+#include "rocket_core.h"
+#include "rocket_job.h"
+
+int rocket_core_init(struct rocket_core *core)
+{
+ struct device *dev = core->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ u32 version;
+ int err = 0;
+
+ core->resets[0].id = "srst_a";
+ core->resets[1].id = "srst_h";
+ err = devm_reset_control_bulk_get_exclusive(&pdev->dev, ARRAY_SIZE(core->resets),
+ core->resets);
+ if (err)
+ return dev_err_probe(dev, err, "failed to get resets for core %d\n", core->index);
+
+ err = devm_clk_bulk_get(dev, ARRAY_SIZE(core->clks), core->clks);
+ if (err)
+ return dev_err_probe(dev, err, "failed to get clocks for core %d\n", core->index);
+
+ core->pc_iomem = devm_platform_ioremap_resource_byname(pdev, "pc");
+ if (IS_ERR(core->pc_iomem)) {
+ dev_err(dev, "couldn't find PC registers %ld\n", PTR_ERR(core->pc_iomem));
+ return PTR_ERR(core->pc_iomem);
+ }
+
+ core->cna_iomem = devm_platform_ioremap_resource_byname(pdev, "cna");
+ if (IS_ERR(core->cna_iomem)) {
+ dev_err(dev, "couldn't find CNA registers %ld\n", PTR_ERR(core->cna_iomem));
+ return PTR_ERR(core->cna_iomem);
+ }
+
+ core->core_iomem = devm_platform_ioremap_resource_byname(pdev, "core");
+ if (IS_ERR(core->core_iomem)) {
+ dev_err(dev, "couldn't find CORE registers %ld\n", PTR_ERR(core->core_iomem));
+ return PTR_ERR(core->core_iomem);
+ }
+
+ dma_set_max_seg_size(dev, UINT_MAX);
+
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
+ if (err)
+ return err;
+
+ core->iommu_group = iommu_group_get(dev);
+
+ err = rocket_job_init(core);
+ if (err)
+ return err;
+
+ pm_runtime_use_autosuspend(dev);
+
+ /*
+ * As this NPU will be most often used as part of a media pipeline that
+ * ends presenting in a display, choose 50 ms (~3 frames at 60Hz) as an
+ * autosuspend delay as that will keep the device powered up while the
+ * pipeline is running.
+ */
+ pm_runtime_set_autosuspend_delay(dev, 50);
+
+ pm_runtime_enable(dev);
+
+ err = pm_runtime_resume_and_get(dev);
+ if (err) {
+ rocket_job_fini(core);
+ return err;
+ }
+
+ version = rocket_pc_readl(core, VERSION);
+ version += rocket_pc_readl(core, VERSION_NUM) & 0xffff;
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ dev_info(dev, "Rockchip NPU core %d version: %d\n", core->index, version);
+
+ return 0;
+}
+
+void rocket_core_fini(struct rocket_core *core)
+{
+ pm_runtime_dont_use_autosuspend(core->dev);
+ pm_runtime_disable(core->dev);
+ iommu_group_put(core->iommu_group);
+ core->iommu_group = NULL;
+ rocket_job_fini(core);
+}
+
+void rocket_core_reset(struct rocket_core *core)
+{
+ reset_control_bulk_assert(ARRAY_SIZE(core->resets), core->resets);
+
+ udelay(10);
+
+ reset_control_bulk_deassert(ARRAY_SIZE(core->resets), core->resets);
+}
diff --git a/drivers/accel/rocket/rocket_core.h b/drivers/accel/rocket/rocket_core.h
new file mode 100644
index 000000000000..f6d7382854ca
--- /dev/null
+++ b/drivers/accel/rocket/rocket_core.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */
+
+#ifndef __ROCKET_CORE_H__
+#define __ROCKET_CORE_H__
+
+#include <drm/gpu_scheduler.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/mutex_types.h>
+#include <linux/reset.h>
+
+#include "rocket_registers.h"
+
+#define rocket_pc_readl(core, reg) \
+ readl((core)->pc_iomem + (REG_PC_##reg))
+#define rocket_pc_writel(core, reg, value) \
+ writel(value, (core)->pc_iomem + (REG_PC_##reg))
+
+#define rocket_cna_readl(core, reg) \
+ readl((core)->cna_iomem + (REG_CNA_##reg) - REG_CNA_S_STATUS)
+#define rocket_cna_writel(core, reg, value) \
+ writel(value, (core)->cna_iomem + (REG_CNA_##reg) - REG_CNA_S_STATUS)
+
+#define rocket_core_readl(core, reg) \
+ readl((core)->core_iomem + (REG_CORE_##reg) - REG_CORE_S_STATUS)
+#define rocket_core_writel(core, reg, value) \
+ writel(value, (core)->core_iomem + (REG_CORE_##reg) - REG_CORE_S_STATUS)
+
+struct rocket_core {
+ struct device *dev;
+ struct rocket_device *rdev;
+ unsigned int index;
+
+ int irq;
+ void __iomem *pc_iomem;
+ void __iomem *cna_iomem;
+ void __iomem *core_iomem;
+ struct clk_bulk_data clks[4];
+ struct reset_control_bulk_data resets[2];
+
+ struct iommu_group *iommu_group;
+
+ struct mutex job_lock;
+ struct rocket_job *in_flight_job;
+
+ spinlock_t fence_lock;
+
+ struct {
+ struct workqueue_struct *wq;
+ struct work_struct work;
+ atomic_t pending;
+ } reset;
+
+ struct drm_gpu_scheduler sched;
+ u64 fence_context;
+ u64 emit_seqno;
+};
+
+int rocket_core_init(struct rocket_core *core);
+void rocket_core_fini(struct rocket_core *core);
+void rocket_core_reset(struct rocket_core *core);
+
+#endif
diff --git a/drivers/accel/rocket/rocket_device.c b/drivers/accel/rocket/rocket_device.c
new file mode 100644
index 000000000000..46e6ee1e72c5
--- /dev/null
+++ b/drivers/accel/rocket/rocket_device.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */
+
+#include <drm/drm_drv.h>
+#include <linux/array_size.h>
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+
+#include "rocket_device.h"
+
+struct rocket_device *rocket_device_init(struct platform_device *pdev,
+ const struct drm_driver *rocket_drm_driver)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *core_node;
+ struct rocket_device *rdev;
+ struct drm_device *ddev;
+ unsigned int num_cores = 0;
+ int err;
+
+ rdev = devm_drm_dev_alloc(dev, rocket_drm_driver, struct rocket_device, ddev);
+ if (IS_ERR(rdev))
+ return rdev;
+
+ ddev = &rdev->ddev;
+ dev_set_drvdata(dev, rdev);
+
+ for_each_compatible_node(core_node, NULL, "rockchip,rk3588-rknn-core")
+ if (of_device_is_available(core_node))
+ num_cores++;
+
+ rdev->cores = devm_kcalloc(dev, num_cores, sizeof(*rdev->cores), GFP_KERNEL);
+ if (!rdev->cores)
+ return ERR_PTR(-ENOMEM);
+
+ dma_set_max_seg_size(dev, UINT_MAX);
+
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
+ if (err)
+ return ERR_PTR(err);
+
+ err = devm_mutex_init(dev, &rdev->sched_lock);
+ if (err)
+ return ERR_PTR(-ENOMEM);
+
+ err = drm_dev_register(ddev, 0);
+ if (err)
+ return ERR_PTR(err);
+
+ return rdev;
+}
+
+void rocket_device_fini(struct rocket_device *rdev)
+{
+ WARN_ON(rdev->num_cores > 0);
+
+ drm_dev_unregister(&rdev->ddev);
+}
diff --git a/drivers/accel/rocket/rocket_device.h b/drivers/accel/rocket/rocket_device.h
new file mode 100644
index 000000000000..ce662abc01d3
--- /dev/null
+++ b/drivers/accel/rocket/rocket_device.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */
+
+#ifndef __ROCKET_DEVICE_H__
+#define __ROCKET_DEVICE_H__
+
+#include <drm/drm_device.h>
+#include <linux/clk.h>
+#include <linux/container_of.h>
+#include <linux/iommu.h>
+#include <linux/platform_device.h>
+
+#include "rocket_core.h"
+
+struct rocket_device {
+ struct drm_device ddev;
+
+ struct mutex sched_lock;
+
+ struct rocket_core *cores;
+ unsigned int num_cores;
+};
+
+struct rocket_device *rocket_device_init(struct platform_device *pdev,
+ const struct drm_driver *rocket_drm_driver);
+void rocket_device_fini(struct rocket_device *rdev);
+#define to_rocket_device(drm_dev) \
+ ((struct rocket_device *)(container_of((drm_dev), struct rocket_device, ddev)))
+
+#endif /* __ROCKET_DEVICE_H__ */
diff --git a/drivers/accel/rocket/rocket_drv.c b/drivers/accel/rocket/rocket_drv.c
new file mode 100644
index 000000000000..5c0b63f0a8f0
--- /dev/null
+++ b/drivers/accel/rocket/rocket_drv.c
@@ -0,0 +1,290 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */
+
+#include <drm/drm_accel.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_ioctl.h>
+#include <drm/rocket_accel.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/iommu.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include "rocket_drv.h"
+#include "rocket_gem.h"
+#include "rocket_job.h"
+
+/*
+ * Facade device, used to expose a single DRM device to userspace, that
+ * schedules jobs to any RKNN cores in the system.
+ */
+static struct platform_device *drm_dev;
+static struct rocket_device *rdev;
+
+static void
+rocket_iommu_domain_destroy(struct kref *kref)
+{
+ struct rocket_iommu_domain *domain = container_of(kref, struct rocket_iommu_domain, kref);
+
+ iommu_domain_free(domain->domain);
+ domain->domain = NULL;
+ kfree(domain);
+}
+
+static struct rocket_iommu_domain*
+rocket_iommu_domain_create(struct device *dev)
+{
+ struct rocket_iommu_domain *domain = kmalloc(sizeof(*domain), GFP_KERNEL);
+ void *err;
+
+ if (!domain)
+ return ERR_PTR(-ENOMEM);
+
+ domain->domain = iommu_paging_domain_alloc(dev);
+ if (IS_ERR(domain->domain)) {
+ err = ERR_CAST(domain->domain);
+ kfree(domain);
+ return err;
+ }
+ kref_init(&domain->kref);
+
+ return domain;
+}
+
+struct rocket_iommu_domain *
+rocket_iommu_domain_get(struct rocket_file_priv *rocket_priv)
+{
+ kref_get(&rocket_priv->domain->kref);
+ return rocket_priv->domain;
+}
+
+void
+rocket_iommu_domain_put(struct rocket_iommu_domain *domain)
+{
+ kref_put(&domain->kref, rocket_iommu_domain_destroy);
+}
+
+static int
+rocket_open(struct drm_device *dev, struct drm_file *file)
+{
+ struct rocket_device *rdev = to_rocket_device(dev);
+ struct rocket_file_priv *rocket_priv;
+ u64 start, end;
+ int ret;
+
+ if (!try_module_get(THIS_MODULE))
+ return -EINVAL;
+
+ rocket_priv = kzalloc(sizeof(*rocket_priv), GFP_KERNEL);
+ if (!rocket_priv) {
+ ret = -ENOMEM;
+ goto err_put_mod;
+ }
+
+ rocket_priv->rdev = rdev;
+ rocket_priv->domain = rocket_iommu_domain_create(rdev->cores[0].dev);
+ if (IS_ERR(rocket_priv->domain)) {
+ ret = PTR_ERR(rocket_priv->domain);
+ goto err_free;
+ }
+
+ file->driver_priv = rocket_priv;
+
+ start = rocket_priv->domain->domain->geometry.aperture_start;
+ end = rocket_priv->domain->domain->geometry.aperture_end;
+ drm_mm_init(&rocket_priv->mm, start, end - start + 1);
+ mutex_init(&rocket_priv->mm_lock);
+
+ ret = rocket_job_open(rocket_priv);
+ if (ret)
+ goto err_mm_takedown;
+
+ return 0;
+
+err_mm_takedown:
+ mutex_destroy(&rocket_priv->mm_lock);
+ drm_mm_takedown(&rocket_priv->mm);
+ rocket_iommu_domain_put(rocket_priv->domain);
+err_free:
+ kfree(rocket_priv);
+err_put_mod:
+ module_put(THIS_MODULE);
+ return ret;
+}
+
+static void
+rocket_postclose(struct drm_device *dev, struct drm_file *file)
+{
+ struct rocket_file_priv *rocket_priv = file->driver_priv;
+
+ rocket_job_close(rocket_priv);
+ mutex_destroy(&rocket_priv->mm_lock);
+ drm_mm_takedown(&rocket_priv->mm);
+ rocket_iommu_domain_put(rocket_priv->domain);
+ kfree(rocket_priv);
+ module_put(THIS_MODULE);
+}
+
+static const struct drm_ioctl_desc rocket_drm_driver_ioctls[] = {
+#define ROCKET_IOCTL(n, func) \
+ DRM_IOCTL_DEF_DRV(ROCKET_##n, rocket_ioctl_##func, 0)
+
+ ROCKET_IOCTL(CREATE_BO, create_bo),
+ ROCKET_IOCTL(SUBMIT, submit),
+ ROCKET_IOCTL(PREP_BO, prep_bo),
+ ROCKET_IOCTL(FINI_BO, fini_bo),
+};
+
+DEFINE_DRM_ACCEL_FOPS(rocket_accel_driver_fops);
+
+/*
+ * Rocket driver version:
+ * - 1.0 - initial interface
+ */
+static const struct drm_driver rocket_drm_driver = {
+ .driver_features = DRIVER_COMPUTE_ACCEL | DRIVER_GEM,
+ .open = rocket_open,
+ .postclose = rocket_postclose,
+ .gem_create_object = rocket_gem_create_object,
+ .ioctls = rocket_drm_driver_ioctls,
+ .num_ioctls = ARRAY_SIZE(rocket_drm_driver_ioctls),
+ .fops = &rocket_accel_driver_fops,
+ .name = "rocket",
+ .desc = "rocket DRM",
+};
+
+static int rocket_probe(struct platform_device *pdev)
+{
+ if (rdev == NULL) {
+ /* First core probing, initialize DRM device. */
+ rdev = rocket_device_init(drm_dev, &rocket_drm_driver);
+ if (IS_ERR(rdev)) {
+ dev_err(&pdev->dev, "failed to initialize rocket device\n");
+ return PTR_ERR(rdev);
+ }
+ }
+
+ unsigned int core = rdev->num_cores;
+
+ dev_set_drvdata(&pdev->dev, rdev);
+
+ rdev->cores[core].rdev = rdev;
+ rdev->cores[core].dev = &pdev->dev;
+ rdev->cores[core].index = core;
+
+ rdev->num_cores++;
+
+ return rocket_core_init(&rdev->cores[core]);
+}
+
+static void rocket_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ for (unsigned int core = 0; core < rdev->num_cores; core++) {
+ if (rdev->cores[core].dev == dev) {
+ rocket_core_fini(&rdev->cores[core]);
+ rdev->num_cores--;
+ break;
+ }
+ }
+
+ if (rdev->num_cores == 0) {
+ /* Last core removed, deinitialize DRM device. */
+ rocket_device_fini(rdev);
+ rdev = NULL;
+ }
+}
+
+static const struct of_device_id dt_match[] = {
+ { .compatible = "rockchip,rk3588-rknn-core" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, dt_match);
+
+static int find_core_for_dev(struct device *dev)
+{
+ struct rocket_device *rdev = dev_get_drvdata(dev);
+
+ for (unsigned int core = 0; core < rdev->num_cores; core++) {
+ if (dev == rdev->cores[core].dev)
+ return core;
+ }
+
+ return -1;
+}
+
+static int rocket_device_runtime_resume(struct device *dev)
+{
+ struct rocket_device *rdev = dev_get_drvdata(dev);
+ int core = find_core_for_dev(dev);
+ int err = 0;
+
+ if (core < 0)
+ return -ENODEV;
+
+ err = clk_bulk_prepare_enable(ARRAY_SIZE(rdev->cores[core].clks), rdev->cores[core].clks);
+ if (err) {
+ dev_err(dev, "failed to enable (%d) clocks for core %d\n", err, core);
+ return err;
+ }
+
+ return 0;
+}
+
+static int rocket_device_runtime_suspend(struct device *dev)
+{
+ struct rocket_device *rdev = dev_get_drvdata(dev);
+ int core = find_core_for_dev(dev);
+
+ if (core < 0)
+ return -ENODEV;
+
+ if (!rocket_job_is_idle(&rdev->cores[core]))
+ return -EBUSY;
+
+ clk_bulk_disable_unprepare(ARRAY_SIZE(rdev->cores[core].clks), rdev->cores[core].clks);
+
+ return 0;
+}
+
+EXPORT_GPL_DEV_PM_OPS(rocket_pm_ops) = {
+ RUNTIME_PM_OPS(rocket_device_runtime_suspend, rocket_device_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
+};
+
+static struct platform_driver rocket_driver = {
+ .probe = rocket_probe,
+ .remove = rocket_remove,
+ .driver = {
+ .name = "rocket",
+ .pm = pm_ptr(&rocket_pm_ops),
+ .of_match_table = dt_match,
+ },
+};
+
+static int __init rocket_register(void)
+{
+ drm_dev = platform_device_register_simple("rknn", -1, NULL, 0);
+ if (IS_ERR(drm_dev))
+ return PTR_ERR(drm_dev);
+
+ return platform_driver_register(&rocket_driver);
+}
+
+static void __exit rocket_unregister(void)
+{
+ platform_driver_unregister(&rocket_driver);
+
+ platform_device_unregister(drm_dev);
+}
+
+module_init(rocket_register);
+module_exit(rocket_unregister);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("DRM driver for the Rockchip NPU IP");
+MODULE_AUTHOR("Tomeu Vizoso");
diff --git a/drivers/accel/rocket/rocket_drv.h b/drivers/accel/rocket/rocket_drv.h
new file mode 100644
index 000000000000..2c673bb99ccc
--- /dev/null
+++ b/drivers/accel/rocket/rocket_drv.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */
+
+#ifndef __ROCKET_DRV_H__
+#define __ROCKET_DRV_H__
+
+#include <drm/drm_mm.h>
+#include <drm/gpu_scheduler.h>
+
+#include "rocket_device.h"
+
+extern const struct dev_pm_ops rocket_pm_ops;
+
+struct rocket_iommu_domain {
+ struct iommu_domain *domain;
+ struct kref kref;
+};
+
+struct rocket_file_priv {
+ struct rocket_device *rdev;
+
+ struct rocket_iommu_domain *domain;
+ struct drm_mm mm;
+ struct mutex mm_lock;
+
+ struct drm_sched_entity sched_entity;
+};
+
+struct rocket_iommu_domain *rocket_iommu_domain_get(struct rocket_file_priv *rocket_priv);
+void rocket_iommu_domain_put(struct rocket_iommu_domain *domain);
+
+#endif
diff --git a/drivers/accel/rocket/rocket_gem.c b/drivers/accel/rocket/rocket_gem.c
new file mode 100644
index 000000000000..624c4ecf5a34
--- /dev/null
+++ b/drivers/accel/rocket/rocket_gem.c
@@ -0,0 +1,182 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */
+
+#include <drm/drm_device.h>
+#include <drm/drm_print.h>
+#include <drm/drm_utils.h>
+#include <drm/rocket_accel.h>
+#include <linux/dma-mapping.h>
+#include <linux/iommu.h>
+
+#include "rocket_drv.h"
+#include "rocket_gem.h"
+
+static void rocket_gem_bo_free(struct drm_gem_object *obj)
+{
+ struct rocket_gem_object *bo = to_rocket_bo(obj);
+ struct rocket_file_priv *rocket_priv = bo->driver_priv;
+ size_t unmapped;
+
+ drm_WARN_ON(obj->dev, refcount_read(&bo->base.pages_use_count) > 1);
+
+ unmapped = iommu_unmap(bo->domain->domain, bo->mm.start, bo->size);
+ drm_WARN_ON(obj->dev, unmapped != bo->size);
+
+ mutex_lock(&rocket_priv->mm_lock);
+ drm_mm_remove_node(&bo->mm);
+ mutex_unlock(&rocket_priv->mm_lock);
+
+ rocket_iommu_domain_put(bo->domain);
+ bo->domain = NULL;
+
+ drm_gem_shmem_free(&bo->base);
+}
+
+static const struct drm_gem_object_funcs rocket_gem_funcs = {
+ .free = rocket_gem_bo_free,
+ .print_info = drm_gem_shmem_object_print_info,
+ .pin = drm_gem_shmem_object_pin,
+ .unpin = drm_gem_shmem_object_unpin,
+ .get_sg_table = drm_gem_shmem_object_get_sg_table,
+ .vmap = drm_gem_shmem_object_vmap,
+ .vunmap = drm_gem_shmem_object_vunmap,
+ .mmap = drm_gem_shmem_object_mmap,
+ .vm_ops = &drm_gem_shmem_vm_ops,
+};
+
+struct drm_gem_object *rocket_gem_create_object(struct drm_device *dev, size_t size)
+{
+ struct rocket_gem_object *obj;
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (!obj)
+ return ERR_PTR(-ENOMEM);
+
+ obj->base.base.funcs = &rocket_gem_funcs;
+
+ return &obj->base.base;
+}
+
+int rocket_ioctl_create_bo(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct rocket_file_priv *rocket_priv = file->driver_priv;
+ struct drm_rocket_create_bo *args = data;
+ struct drm_gem_shmem_object *shmem_obj;
+ struct rocket_gem_object *rkt_obj;
+ struct drm_gem_object *gem_obj;
+ struct sg_table *sgt;
+ int ret;
+
+ shmem_obj = drm_gem_shmem_create(dev, args->size);
+ if (IS_ERR(shmem_obj))
+ return PTR_ERR(shmem_obj);
+
+ gem_obj = &shmem_obj->base;
+ rkt_obj = to_rocket_bo(gem_obj);
+
+ rkt_obj->driver_priv = rocket_priv;
+ rkt_obj->domain = rocket_iommu_domain_get(rocket_priv);
+ rkt_obj->size = args->size;
+ rkt_obj->offset = 0;
+
+ ret = drm_gem_handle_create(file, gem_obj, &args->handle);
+ drm_gem_object_put(gem_obj);
+ if (ret)
+ goto err;
+
+ sgt = drm_gem_shmem_get_pages_sgt(shmem_obj);
+ if (IS_ERR(sgt)) {
+ ret = PTR_ERR(sgt);
+ goto err;
+ }
+
+ mutex_lock(&rocket_priv->mm_lock);
+ ret = drm_mm_insert_node_generic(&rocket_priv->mm, &rkt_obj->mm,
+ rkt_obj->size, PAGE_SIZE,
+ 0, 0);
+ mutex_unlock(&rocket_priv->mm_lock);
+
+ ret = iommu_map_sgtable(rocket_priv->domain->domain,
+ rkt_obj->mm.start,
+ shmem_obj->sgt,
+ IOMMU_READ | IOMMU_WRITE);
+ if (ret < 0 || ret < args->size) {
+ drm_err(dev, "failed to map buffer: size=%d request_size=%u\n",
+ ret, args->size);
+ ret = -ENOMEM;
+ goto err_remove_node;
+ }
+
+ /* iommu_map_sgtable might have aligned the size */
+ rkt_obj->size = ret;
+ args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
+ args->dma_address = rkt_obj->mm.start;
+
+ return 0;
+
+err_remove_node:
+ mutex_lock(&rocket_priv->mm_lock);
+ drm_mm_remove_node(&rkt_obj->mm);
+ mutex_unlock(&rocket_priv->mm_lock);
+
+err:
+ drm_gem_shmem_object_free(gem_obj);
+
+ return ret;
+}
+
+int rocket_ioctl_prep_bo(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct drm_rocket_prep_bo *args = data;
+ unsigned long timeout = drm_timeout_abs_to_jiffies(args->timeout_ns);
+ struct drm_gem_object *gem_obj;
+ struct drm_gem_shmem_object *shmem_obj;
+ long ret = 0;
+
+ if (args->reserved != 0) {
+ drm_dbg(dev, "Reserved field in drm_rocket_prep_bo struct should be 0.\n");
+ return -EINVAL;
+ }
+
+ gem_obj = drm_gem_object_lookup(file, args->handle);
+ if (!gem_obj)
+ return -ENOENT;
+
+ ret = dma_resv_wait_timeout(gem_obj->resv, DMA_RESV_USAGE_WRITE, true, timeout);
+ if (!ret)
+ ret = timeout ? -ETIMEDOUT : -EBUSY;
+
+ shmem_obj = &to_rocket_bo(gem_obj)->base;
+
+ dma_sync_sgtable_for_cpu(dev->dev, shmem_obj->sgt, DMA_BIDIRECTIONAL);
+
+ drm_gem_object_put(gem_obj);
+
+ return ret;
+}
+
+int rocket_ioctl_fini_bo(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct drm_rocket_fini_bo *args = data;
+ struct drm_gem_shmem_object *shmem_obj;
+ struct rocket_gem_object *rkt_obj;
+ struct drm_gem_object *gem_obj;
+
+ if (args->reserved != 0) {
+ drm_dbg(dev, "Reserved field in drm_rocket_fini_bo struct should be 0.\n");
+ return -EINVAL;
+ }
+
+ gem_obj = drm_gem_object_lookup(file, args->handle);
+ if (!gem_obj)
+ return -ENOENT;
+
+ rkt_obj = to_rocket_bo(gem_obj);
+ shmem_obj = &rkt_obj->base;
+
+ dma_sync_sgtable_for_device(dev->dev, shmem_obj->sgt, DMA_BIDIRECTIONAL);
+
+ drm_gem_object_put(gem_obj);
+
+ return 0;
+}
diff --git a/drivers/accel/rocket/rocket_gem.h b/drivers/accel/rocket/rocket_gem.h
new file mode 100644
index 000000000000..240430334509
--- /dev/null
+++ b/drivers/accel/rocket/rocket_gem.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */
+
+#ifndef __ROCKET_GEM_H__
+#define __ROCKET_GEM_H__
+
+#include <drm/drm_gem_shmem_helper.h>
+
+struct rocket_gem_object {
+ struct drm_gem_shmem_object base;
+
+ struct rocket_file_priv *driver_priv;
+
+ struct rocket_iommu_domain *domain;
+ struct drm_mm_node mm;
+ size_t size;
+ u32 offset;
+};
+
+struct drm_gem_object *rocket_gem_create_object(struct drm_device *dev, size_t size);
+
+int rocket_ioctl_create_bo(struct drm_device *dev, void *data, struct drm_file *file);
+
+int rocket_ioctl_prep_bo(struct drm_device *dev, void *data, struct drm_file *file);
+
+int rocket_ioctl_fini_bo(struct drm_device *dev, void *data, struct drm_file *file);
+
+static inline
+struct rocket_gem_object *to_rocket_bo(struct drm_gem_object *obj)
+{
+ return container_of(to_drm_gem_shmem_obj(obj), struct rocket_gem_object, base);
+}
+
+#endif
diff --git a/drivers/accel/rocket/rocket_job.c b/drivers/accel/rocket/rocket_job.c
new file mode 100644
index 000000000000..acd606160dc9
--- /dev/null
+++ b/drivers/accel/rocket/rocket_job.c
@@ -0,0 +1,637 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
+/* Copyright 2019 Collabora ltd. */
+/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */
+
+#include <drm/drm_print.h>
+#include <drm/drm_file.h>
+#include <drm/drm_gem.h>
+#include <drm/rocket_accel.h>
+#include <linux/interrupt.h>
+#include <linux/iommu.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include "rocket_core.h"
+#include "rocket_device.h"
+#include "rocket_drv.h"
+#include "rocket_job.h"
+#include "rocket_registers.h"
+
+#define JOB_TIMEOUT_MS 500
+
+static struct rocket_job *
+to_rocket_job(struct drm_sched_job *sched_job)
+{
+ return container_of(sched_job, struct rocket_job, base);
+}
+
+static const char *rocket_fence_get_driver_name(struct dma_fence *fence)
+{
+ return "rocket";
+}
+
+static const char *rocket_fence_get_timeline_name(struct dma_fence *fence)
+{
+ return "rockchip-npu";
+}
+
+static const struct dma_fence_ops rocket_fence_ops = {
+ .get_driver_name = rocket_fence_get_driver_name,
+ .get_timeline_name = rocket_fence_get_timeline_name,
+};
+
+static struct dma_fence *rocket_fence_create(struct rocket_core *core)
+{
+ struct dma_fence *fence;
+
+ fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+ if (!fence)
+ return ERR_PTR(-ENOMEM);
+
+ dma_fence_init(fence, &rocket_fence_ops, &core->fence_lock,
+ core->fence_context, ++core->emit_seqno);
+
+ return fence;
+}
+
+static int
+rocket_copy_tasks(struct drm_device *dev,
+ struct drm_file *file_priv,
+ struct drm_rocket_job *job,
+ struct rocket_job *rjob)
+{
+ int ret = 0;
+
+ if (job->task_struct_size < sizeof(struct drm_rocket_task))
+ return -EINVAL;
+
+ rjob->task_count = job->task_count;
+
+ if (!rjob->task_count)
+ return 0;
+
+ rjob->tasks = kvmalloc_array(job->task_count, sizeof(*rjob->tasks), GFP_KERNEL);
+ if (!rjob->tasks) {
+ drm_dbg(dev, "Failed to allocate task array\n");
+ return -ENOMEM;
+ }
+
+ for (int i = 0; i < rjob->task_count; i++) {
+ struct drm_rocket_task task = {0};
+
+ if (copy_from_user(&task,
+ u64_to_user_ptr(job->tasks) + i * job->task_struct_size,
+ sizeof(task))) {
+ drm_dbg(dev, "Failed to copy incoming tasks\n");
+ ret = -EFAULT;
+ goto fail;
+ }
+
+ if (task.regcmd_count == 0) {
+ drm_dbg(dev, "regcmd_count field in drm_rocket_task should be > 0.\n");
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ rjob->tasks[i].regcmd = task.regcmd;
+ rjob->tasks[i].regcmd_count = task.regcmd_count;
+ }
+
+ return 0;
+
+fail:
+ kvfree(rjob->tasks);
+ return ret;
+}
+
+static void rocket_job_hw_submit(struct rocket_core *core, struct rocket_job *job)
+{
+ struct rocket_task *task;
+ unsigned int extra_bit;
+
+ /* Don't queue the job if a reset is in progress */
+ if (atomic_read(&core->reset.pending))
+ return;
+
+ /* GO ! */
+
+ task = &job->tasks[job->next_task_idx];
+ job->next_task_idx++;
+
+ rocket_pc_writel(core, BASE_ADDRESS, 0x1);
+
+ /* From rknpu, in the TRM this bit is marked as reserved */
+ extra_bit = 0x10000000 * core->index;
+ rocket_cna_writel(core, S_POINTER, CNA_S_POINTER_POINTER_PP_EN(1) |
+ CNA_S_POINTER_EXECUTER_PP_EN(1) |
+ CNA_S_POINTER_POINTER_PP_MODE(1) |
+ extra_bit);
+
+ rocket_core_writel(core, S_POINTER, CORE_S_POINTER_POINTER_PP_EN(1) |
+ CORE_S_POINTER_EXECUTER_PP_EN(1) |
+ CORE_S_POINTER_POINTER_PP_MODE(1) |
+ extra_bit);
+
+ rocket_pc_writel(core, BASE_ADDRESS, task->regcmd);
+ rocket_pc_writel(core, REGISTER_AMOUNTS,
+ PC_REGISTER_AMOUNTS_PC_DATA_AMOUNT((task->regcmd_count + 1) / 2 - 1));
+
+ rocket_pc_writel(core, INTERRUPT_MASK, PC_INTERRUPT_MASK_DPU_0 | PC_INTERRUPT_MASK_DPU_1);
+ rocket_pc_writel(core, INTERRUPT_CLEAR, PC_INTERRUPT_CLEAR_DPU_0 | PC_INTERRUPT_CLEAR_DPU_1);
+
+ rocket_pc_writel(core, TASK_CON, PC_TASK_CON_RESERVED_0(1) |
+ PC_TASK_CON_TASK_COUNT_CLEAR(1) |
+ PC_TASK_CON_TASK_NUMBER(1) |
+ PC_TASK_CON_TASK_PP_EN(1));
+
+ rocket_pc_writel(core, TASK_DMA_BASE_ADDR, PC_TASK_DMA_BASE_ADDR_DMA_BASE_ADDR(0x0));
+
+ rocket_pc_writel(core, OPERATION_ENABLE, PC_OPERATION_ENABLE_OP_EN(1));
+
+ dev_dbg(core->dev, "Submitted regcmd at 0x%llx to core %d", task->regcmd, core->index);
+}
+
+static int rocket_acquire_object_fences(struct drm_gem_object **bos,
+ int bo_count,
+ struct drm_sched_job *job,
+ bool is_write)
+{
+ int i, ret;
+
+ for (i = 0; i < bo_count; i++) {
+ ret = dma_resv_reserve_fences(bos[i]->resv, 1);
+ if (ret)
+ return ret;
+
+ ret = drm_sched_job_add_implicit_dependencies(job, bos[i],
+ is_write);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void rocket_attach_object_fences(struct drm_gem_object **bos,
+ int bo_count,
+ struct dma_fence *fence)
+{
+ int i;
+
+ for (i = 0; i < bo_count; i++)
+ dma_resv_add_fence(bos[i]->resv, fence, DMA_RESV_USAGE_WRITE);
+}
+
+static int rocket_job_push(struct rocket_job *job)
+{
+ struct rocket_device *rdev = job->rdev;
+ struct drm_gem_object **bos;
+ struct ww_acquire_ctx acquire_ctx;
+ int ret = 0;
+
+ bos = kvmalloc_array(job->in_bo_count + job->out_bo_count, sizeof(void *),
+ GFP_KERNEL);
+ memcpy(bos, job->in_bos, job->in_bo_count * sizeof(void *));
+ memcpy(&bos[job->in_bo_count], job->out_bos, job->out_bo_count * sizeof(void *));
+
+ ret = drm_gem_lock_reservations(bos, job->in_bo_count + job->out_bo_count, &acquire_ctx);
+ if (ret)
+ goto err;
+
+ scoped_guard(mutex, &rdev->sched_lock) {
+ drm_sched_job_arm(&job->base);
+
+ job->inference_done_fence = dma_fence_get(&job->base.s_fence->finished);
+
+ ret = rocket_acquire_object_fences(job->in_bos, job->in_bo_count, &job->base, false);
+ if (ret)
+ goto err_unlock;
+
+ ret = rocket_acquire_object_fences(job->out_bos, job->out_bo_count, &job->base, true);
+ if (ret)
+ goto err_unlock;
+
+ kref_get(&job->refcount); /* put by scheduler job completion */
+
+ drm_sched_entity_push_job(&job->base);
+ }
+
+ rocket_attach_object_fences(job->out_bos, job->out_bo_count, job->inference_done_fence);
+
+err_unlock:
+ drm_gem_unlock_reservations(bos, job->in_bo_count + job->out_bo_count, &acquire_ctx);
+err:
+ kvfree(bos);
+
+ return ret;
+}
+
+static void rocket_job_cleanup(struct kref *ref)
+{
+ struct rocket_job *job = container_of(ref, struct rocket_job,
+ refcount);
+ unsigned int i;
+
+ rocket_iommu_domain_put(job->domain);
+
+ dma_fence_put(job->done_fence);
+ dma_fence_put(job->inference_done_fence);
+
+ if (job->in_bos) {
+ for (i = 0; i < job->in_bo_count; i++)
+ drm_gem_object_put(job->in_bos[i]);
+
+ kvfree(job->in_bos);
+ }
+
+ if (job->out_bos) {
+ for (i = 0; i < job->out_bo_count; i++)
+ drm_gem_object_put(job->out_bos[i]);
+
+ kvfree(job->out_bos);
+ }
+
+ kvfree(job->tasks);
+
+ kfree(job);
+}
+
+static void rocket_job_put(struct rocket_job *job)
+{
+ kref_put(&job->refcount, rocket_job_cleanup);
+}
+
+static void rocket_job_free(struct drm_sched_job *sched_job)
+{
+ struct rocket_job *job = to_rocket_job(sched_job);
+
+ drm_sched_job_cleanup(sched_job);
+
+ rocket_job_put(job);
+}
+
+static struct rocket_core *sched_to_core(struct rocket_device *rdev,
+ struct drm_gpu_scheduler *sched)
+{
+ unsigned int core;
+
+ for (core = 0; core < rdev->num_cores; core++) {
+ if (&rdev->cores[core].sched == sched)
+ return &rdev->cores[core];
+ }
+
+ return NULL;
+}
+
+static struct dma_fence *rocket_job_run(struct drm_sched_job *sched_job)
+{
+ struct rocket_job *job = to_rocket_job(sched_job);
+ struct rocket_device *rdev = job->rdev;
+ struct rocket_core *core = sched_to_core(rdev, sched_job->sched);
+ struct dma_fence *fence = NULL;
+ int ret;
+
+ if (unlikely(job->base.s_fence->finished.error))
+ return NULL;
+
+ /*
+ * Nothing to execute: can happen if the job has finished while
+ * we were resetting the NPU.
+ */
+ if (job->next_task_idx == job->task_count)
+ return NULL;
+
+ fence = rocket_fence_create(core);
+ if (IS_ERR(fence))
+ return fence;
+
+ if (job->done_fence)
+ dma_fence_put(job->done_fence);
+ job->done_fence = dma_fence_get(fence);
+
+ ret = pm_runtime_get_sync(core->dev);
+ if (ret < 0)
+ return fence;
+
+ ret = iommu_attach_group(job->domain->domain, core->iommu_group);
+ if (ret < 0)
+ return fence;
+
+ scoped_guard(mutex, &core->job_lock) {
+ core->in_flight_job = job;
+ rocket_job_hw_submit(core, job);
+ }
+
+ return fence;
+}
+
+static void rocket_job_handle_irq(struct rocket_core *core)
+{
+ pm_runtime_mark_last_busy(core->dev);
+
+ rocket_pc_writel(core, OPERATION_ENABLE, 0x0);
+ rocket_pc_writel(core, INTERRUPT_CLEAR, 0x1ffff);
+
+ scoped_guard(mutex, &core->job_lock)
+ if (core->in_flight_job) {
+ if (core->in_flight_job->next_task_idx < core->in_flight_job->task_count) {
+ rocket_job_hw_submit(core, core->in_flight_job);
+ return;
+ }
+
+ iommu_detach_group(NULL, iommu_group_get(core->dev));
+ dma_fence_signal(core->in_flight_job->done_fence);
+ pm_runtime_put_autosuspend(core->dev);
+ core->in_flight_job = NULL;
+ }
+}
+
+static void
+rocket_reset(struct rocket_core *core, struct drm_sched_job *bad)
+{
+ if (!atomic_read(&core->reset.pending))
+ return;
+
+ drm_sched_stop(&core->sched, bad);
+
+ /*
+ * Remaining interrupts have been handled, but we might still have
+ * stuck jobs. Let's make sure the PM counters stay balanced by
+ * manually calling pm_runtime_put_noidle().
+ */
+ scoped_guard(mutex, &core->job_lock) {
+ if (core->in_flight_job)
+ pm_runtime_put_noidle(core->dev);
+
+ iommu_detach_group(NULL, core->iommu_group);
+
+ core->in_flight_job = NULL;
+ }
+
+ /* Proceed with reset now. */
+ rocket_core_reset(core);
+
+ /* NPU has been reset, we can clear the reset pending bit. */
+ atomic_set(&core->reset.pending, 0);
+
+ /* Restart the scheduler */
+ drm_sched_start(&core->sched, 0);
+}
+
+static enum drm_gpu_sched_stat rocket_job_timedout(struct drm_sched_job *sched_job)
+{
+ struct rocket_job *job = to_rocket_job(sched_job);
+ struct rocket_device *rdev = job->rdev;
+ struct rocket_core *core = sched_to_core(rdev, sched_job->sched);
+
+ dev_err(core->dev, "NPU job timed out");
+
+ atomic_set(&core->reset.pending, 1);
+ rocket_reset(core, sched_job);
+
+ return DRM_GPU_SCHED_STAT_RESET;
+}
+
+static void rocket_reset_work(struct work_struct *work)
+{
+ struct rocket_core *core;
+
+ core = container_of(work, struct rocket_core, reset.work);
+ rocket_reset(core, NULL);
+}
+
+static const struct drm_sched_backend_ops rocket_sched_ops = {
+ .run_job = rocket_job_run,
+ .timedout_job = rocket_job_timedout,
+ .free_job = rocket_job_free
+};
+
+static irqreturn_t rocket_job_irq_handler_thread(int irq, void *data)
+{
+ struct rocket_core *core = data;
+
+ rocket_job_handle_irq(core);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rocket_job_irq_handler(int irq, void *data)
+{
+ struct rocket_core *core = data;
+ u32 raw_status = rocket_pc_readl(core, INTERRUPT_RAW_STATUS);
+
+ WARN_ON(raw_status & PC_INTERRUPT_RAW_STATUS_DMA_READ_ERROR);
+ WARN_ON(raw_status & PC_INTERRUPT_RAW_STATUS_DMA_WRITE_ERROR);
+
+ if (!(raw_status & PC_INTERRUPT_RAW_STATUS_DPU_0 ||
+ raw_status & PC_INTERRUPT_RAW_STATUS_DPU_1))
+ return IRQ_NONE;
+
+ rocket_pc_writel(core, INTERRUPT_MASK, 0x0);
+
+ return IRQ_WAKE_THREAD;
+}
+
+int rocket_job_init(struct rocket_core *core)
+{
+ struct drm_sched_init_args args = {
+ .ops = &rocket_sched_ops,
+ .num_rqs = DRM_SCHED_PRIORITY_COUNT,
+ .credit_limit = 1,
+ .timeout = msecs_to_jiffies(JOB_TIMEOUT_MS),
+ .name = dev_name(core->dev),
+ .dev = core->dev,
+ };
+ int ret;
+
+ INIT_WORK(&core->reset.work, rocket_reset_work);
+ spin_lock_init(&core->fence_lock);
+ mutex_init(&core->job_lock);
+
+ core->irq = platform_get_irq(to_platform_device(core->dev), 0);
+ if (core->irq < 0)
+ return core->irq;
+
+ ret = devm_request_threaded_irq(core->dev, core->irq,
+ rocket_job_irq_handler,
+ rocket_job_irq_handler_thread,
+ IRQF_SHARED, dev_name(core->dev),
+ core);
+ if (ret) {
+ dev_err(core->dev, "failed to request job irq");
+ return ret;
+ }
+
+ core->reset.wq = alloc_ordered_workqueue("rocket-reset-%d", 0, core->index);
+ if (!core->reset.wq)
+ return -ENOMEM;
+
+ core->fence_context = dma_fence_context_alloc(1);
+
+ args.timeout_wq = core->reset.wq;
+ ret = drm_sched_init(&core->sched, &args);
+ if (ret) {
+ dev_err(core->dev, "Failed to create scheduler: %d.", ret);
+ goto err_sched;
+ }
+
+ return 0;
+
+err_sched:
+ drm_sched_fini(&core->sched);
+
+ destroy_workqueue(core->reset.wq);
+ return ret;
+}
+
+void rocket_job_fini(struct rocket_core *core)
+{
+ drm_sched_fini(&core->sched);
+
+ cancel_work_sync(&core->reset.work);
+ destroy_workqueue(core->reset.wq);
+}
+
+int rocket_job_open(struct rocket_file_priv *rocket_priv)
+{
+ struct rocket_device *rdev = rocket_priv->rdev;
+ struct drm_gpu_scheduler **scheds = kmalloc_array(rdev->num_cores,
+ sizeof(*scheds),
+ GFP_KERNEL);
+ unsigned int core;
+ int ret;
+
+ for (core = 0; core < rdev->num_cores; core++)
+ scheds[core] = &rdev->cores[core].sched;
+
+ ret = drm_sched_entity_init(&rocket_priv->sched_entity,
+ DRM_SCHED_PRIORITY_NORMAL,
+ scheds,
+ rdev->num_cores, NULL);
+ if (WARN_ON(ret))
+ return ret;
+
+ return 0;
+}
+
+void rocket_job_close(struct rocket_file_priv *rocket_priv)
+{
+ struct drm_sched_entity *entity = &rocket_priv->sched_entity;
+
+ kfree(entity->sched_list);
+ drm_sched_entity_destroy(entity);
+}
+
+int rocket_job_is_idle(struct rocket_core *core)
+{
+ /* If there are any jobs in this HW queue, we're not idle */
+ if (atomic_read(&core->sched.credit_count))
+ return false;
+
+ return true;
+}
+
+static int rocket_ioctl_submit_job(struct drm_device *dev, struct drm_file *file,
+ struct drm_rocket_job *job)
+{
+ struct rocket_device *rdev = to_rocket_device(dev);
+ struct rocket_file_priv *file_priv = file->driver_priv;
+ struct rocket_job *rjob = NULL;
+ int ret = 0;
+
+ if (job->task_count == 0)
+ return -EINVAL;
+
+ rjob = kzalloc(sizeof(*rjob), GFP_KERNEL);
+ if (!rjob)
+ return -ENOMEM;
+
+ kref_init(&rjob->refcount);
+
+ rjob->rdev = rdev;
+
+ ret = drm_sched_job_init(&rjob->base,
+ &file_priv->sched_entity,
+ 1, NULL, file->client_id);
+ if (ret)
+ goto out_put_job;
+
+ ret = rocket_copy_tasks(dev, file, job, rjob);
+ if (ret)
+ goto out_cleanup_job;
+
+ ret = drm_gem_objects_lookup(file, u64_to_user_ptr(job->in_bo_handles),
+ job->in_bo_handle_count, &rjob->in_bos);
+ if (ret)
+ goto out_cleanup_job;
+
+ rjob->in_bo_count = job->in_bo_handle_count;
+
+ ret = drm_gem_objects_lookup(file, u64_to_user_ptr(job->out_bo_handles),
+ job->out_bo_handle_count, &rjob->out_bos);
+ if (ret)
+ goto out_cleanup_job;
+
+ rjob->out_bo_count = job->out_bo_handle_count;
+
+ rjob->domain = rocket_iommu_domain_get(file_priv);
+
+ ret = rocket_job_push(rjob);
+ if (ret)
+ goto out_cleanup_job;
+
+out_cleanup_job:
+ if (ret)
+ drm_sched_job_cleanup(&rjob->base);
+out_put_job:
+ rocket_job_put(rjob);
+
+ return ret;
+}
+
+int rocket_ioctl_submit(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct drm_rocket_submit *args = data;
+ struct drm_rocket_job *jobs;
+ int ret = 0;
+ unsigned int i = 0;
+
+ if (args->job_count == 0)
+ return 0;
+
+ if (args->job_struct_size < sizeof(struct drm_rocket_job)) {
+ drm_dbg(dev, "job_struct_size field in drm_rocket_submit struct is too small.\n");
+ return -EINVAL;
+ }
+
+ if (args->reserved != 0) {
+ drm_dbg(dev, "Reserved field in drm_rocket_submit struct should be 0.\n");
+ return -EINVAL;
+ }
+
+ jobs = kvmalloc_array(args->job_count, sizeof(*jobs), GFP_KERNEL);
+ if (!jobs) {
+ drm_dbg(dev, "Failed to allocate incoming job array\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < args->job_count; i++) {
+ if (copy_from_user(&jobs[i],
+ u64_to_user_ptr(args->jobs) + i * args->job_struct_size,
+ sizeof(*jobs))) {
+ ret = -EFAULT;
+ drm_dbg(dev, "Failed to copy incoming job array\n");
+ goto exit;
+ }
+ }
+
+
+ for (i = 0; i < args->job_count; i++)
+ rocket_ioctl_submit_job(dev, file, &jobs[i]);
+
+exit:
+ kvfree(jobs);
+
+ return ret;
+}
diff --git a/drivers/accel/rocket/rocket_job.h b/drivers/accel/rocket/rocket_job.h
new file mode 100644
index 000000000000..4ae00feec3b9
--- /dev/null
+++ b/drivers/accel/rocket/rocket_job.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */
+
+#ifndef __ROCKET_JOB_H__
+#define __ROCKET_JOB_H__
+
+#include <drm/drm_drv.h>
+#include <drm/gpu_scheduler.h>
+
+#include "rocket_core.h"
+#include "rocket_drv.h"
+
+struct rocket_task {
+ u64 regcmd;
+ u32 regcmd_count;
+};
+
+struct rocket_job {
+ struct drm_sched_job base;
+
+ struct rocket_device *rdev;
+
+ struct drm_gem_object **in_bos;
+ struct drm_gem_object **out_bos;
+
+ u32 in_bo_count;
+ u32 out_bo_count;
+
+ struct rocket_task *tasks;
+ u32 task_count;
+ u32 next_task_idx;
+
+ /* Fence to be signaled by drm-sched once its done with the job */
+ struct dma_fence *inference_done_fence;
+
+ /* Fence to be signaled by IRQ handler when the job is complete. */
+ struct dma_fence *done_fence;
+
+ struct rocket_iommu_domain *domain;
+
+ struct kref refcount;
+};
+
+int rocket_ioctl_submit(struct drm_device *dev, void *data, struct drm_file *file);
+
+int rocket_job_init(struct rocket_core *core);
+void rocket_job_fini(struct rocket_core *core);
+int rocket_job_open(struct rocket_file_priv *rocket_priv);
+void rocket_job_close(struct rocket_file_priv *rocket_priv);
+int rocket_job_is_idle(struct rocket_core *core);
+
+#endif
diff --git a/drivers/accel/rocket/rocket_registers.h b/drivers/accel/rocket/rocket_registers.h
new file mode 100644
index 000000000000..9aef614c3470
--- /dev/null
+++ b/drivers/accel/rocket/rocket_registers.h
@@ -0,0 +1,4404 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+
+#ifndef __ROCKET_REGISTERS_XML__
+#define __ROCKET_REGISTERS_XML__
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng gen_header.py tool in this git repository:
+http://gitlab.freedesktop.org/mesa/mesa/
+git clone https://gitlab.freedesktop.org/mesa/mesa.git
+
+The rules-ng-ng source files this header was generated from are:
+
+- /home/tomeu/src/mesa/src/gallium/drivers/rocket/registers.xml ( 60076 bytes, from Wed Jun 12 10:02:25 2024)
+
+Copyright (C) 2024-2025 by the following authors:
+- Tomeu Vizoso <tomeu@tomeuvizoso.net>
+*/
+
+#define REG_PC_VERSION 0x00000000
+#define PC_VERSION_VERSION__MASK 0xffffffff
+#define PC_VERSION_VERSION__SHIFT 0
+static inline uint32_t PC_VERSION_VERSION(uint32_t val)
+{
+ return ((val) << PC_VERSION_VERSION__SHIFT) & PC_VERSION_VERSION__MASK;
+}
+
+#define REG_PC_VERSION_NUM 0x00000004
+#define PC_VERSION_NUM_VERSION_NUM__MASK 0xffffffff
+#define PC_VERSION_NUM_VERSION_NUM__SHIFT 0
+static inline uint32_t PC_VERSION_NUM_VERSION_NUM(uint32_t val)
+{
+ return ((val) << PC_VERSION_NUM_VERSION_NUM__SHIFT) & PC_VERSION_NUM_VERSION_NUM__MASK;
+}
+
+#define REG_PC_OPERATION_ENABLE 0x00000008
+#define PC_OPERATION_ENABLE_RESERVED_0__MASK 0xfffffffe
+#define PC_OPERATION_ENABLE_RESERVED_0__SHIFT 1
+static inline uint32_t PC_OPERATION_ENABLE_RESERVED_0(uint32_t val)
+{
+ return ((val) << PC_OPERATION_ENABLE_RESERVED_0__SHIFT) & PC_OPERATION_ENABLE_RESERVED_0__MASK;
+}
+#define PC_OPERATION_ENABLE_OP_EN__MASK 0x00000001
+#define PC_OPERATION_ENABLE_OP_EN__SHIFT 0
+static inline uint32_t PC_OPERATION_ENABLE_OP_EN(uint32_t val)
+{
+ return ((val) << PC_OPERATION_ENABLE_OP_EN__SHIFT) & PC_OPERATION_ENABLE_OP_EN__MASK;
+}
+
+#define REG_PC_BASE_ADDRESS 0x00000010
+#define PC_BASE_ADDRESS_PC_SOURCE_ADDR__MASK 0xfffffff0
+#define PC_BASE_ADDRESS_PC_SOURCE_ADDR__SHIFT 4
+static inline uint32_t PC_BASE_ADDRESS_PC_SOURCE_ADDR(uint32_t val)
+{
+ return ((val) << PC_BASE_ADDRESS_PC_SOURCE_ADDR__SHIFT) & PC_BASE_ADDRESS_PC_SOURCE_ADDR__MASK;
+}
+#define PC_BASE_ADDRESS_RESERVED_0__MASK 0x0000000e
+#define PC_BASE_ADDRESS_RESERVED_0__SHIFT 1
+static inline uint32_t PC_BASE_ADDRESS_RESERVED_0(uint32_t val)
+{
+ return ((val) << PC_BASE_ADDRESS_RESERVED_0__SHIFT) & PC_BASE_ADDRESS_RESERVED_0__MASK;
+}
+#define PC_BASE_ADDRESS_PC_SEL__MASK 0x00000001
+#define PC_BASE_ADDRESS_PC_SEL__SHIFT 0
+static inline uint32_t PC_BASE_ADDRESS_PC_SEL(uint32_t val)
+{
+ return ((val) << PC_BASE_ADDRESS_PC_SEL__SHIFT) & PC_BASE_ADDRESS_PC_SEL__MASK;
+}
+
+#define REG_PC_REGISTER_AMOUNTS 0x00000014
+#define PC_REGISTER_AMOUNTS_RESERVED_0__MASK 0xffff0000
+#define PC_REGISTER_AMOUNTS_RESERVED_0__SHIFT 16
+static inline uint32_t PC_REGISTER_AMOUNTS_RESERVED_0(uint32_t val)
+{
+ return ((val) << PC_REGISTER_AMOUNTS_RESERVED_0__SHIFT) & PC_REGISTER_AMOUNTS_RESERVED_0__MASK;
+}
+#define PC_REGISTER_AMOUNTS_PC_DATA_AMOUNT__MASK 0x0000ffff
+#define PC_REGISTER_AMOUNTS_PC_DATA_AMOUNT__SHIFT 0
+static inline uint32_t PC_REGISTER_AMOUNTS_PC_DATA_AMOUNT(uint32_t val)
+{
+ return ((val) << PC_REGISTER_AMOUNTS_PC_DATA_AMOUNT__SHIFT) & PC_REGISTER_AMOUNTS_PC_DATA_AMOUNT__MASK;
+}
+
+#define REG_PC_INTERRUPT_MASK 0x00000020
+#define PC_INTERRUPT_MASK_RESERVED_0__MASK 0xffffc000
+#define PC_INTERRUPT_MASK_RESERVED_0__SHIFT 14
+static inline uint32_t PC_INTERRUPT_MASK_RESERVED_0(uint32_t val)
+{
+ return ((val) << PC_INTERRUPT_MASK_RESERVED_0__SHIFT) & PC_INTERRUPT_MASK_RESERVED_0__MASK;
+}
+#define PC_INTERRUPT_MASK_DMA_WRITE_ERROR 0x00002000
+#define PC_INTERRUPT_MASK_DMA_READ_ERROR 0x00001000
+#define PC_INTERRUPT_MASK_PPU_1 0x00000800
+#define PC_INTERRUPT_MASK_PPU_0 0x00000400
+#define PC_INTERRUPT_MASK_DPU_1 0x00000200
+#define PC_INTERRUPT_MASK_DPU_0 0x00000100
+#define PC_INTERRUPT_MASK_CORE_1 0x00000080
+#define PC_INTERRUPT_MASK_CORE_0 0x00000040
+#define PC_INTERRUPT_MASK_CNA_CSC_1 0x00000020
+#define PC_INTERRUPT_MASK_CNA_CSC_0 0x00000010
+#define PC_INTERRUPT_MASK_CNA_WEIGHT_1 0x00000008
+#define PC_INTERRUPT_MASK_CNA_WEIGHT_0 0x00000004
+#define PC_INTERRUPT_MASK_CNA_FEATURE_1 0x00000002
+#define PC_INTERRUPT_MASK_CNA_FEATURE_0 0x00000001
+
+#define REG_PC_INTERRUPT_CLEAR 0x00000024
+#define PC_INTERRUPT_CLEAR_RESERVED_0__MASK 0xffffc000
+#define PC_INTERRUPT_CLEAR_RESERVED_0__SHIFT 14
+static inline uint32_t PC_INTERRUPT_CLEAR_RESERVED_0(uint32_t val)
+{
+ return ((val) << PC_INTERRUPT_CLEAR_RESERVED_0__SHIFT) & PC_INTERRUPT_CLEAR_RESERVED_0__MASK;
+}
+#define PC_INTERRUPT_CLEAR_DMA_WRITE_ERROR 0x00002000
+#define PC_INTERRUPT_CLEAR_DMA_READ_ERROR 0x00001000
+#define PC_INTERRUPT_CLEAR_PPU_1 0x00000800
+#define PC_INTERRUPT_CLEAR_PPU_0 0x00000400
+#define PC_INTERRUPT_CLEAR_DPU_1 0x00000200
+#define PC_INTERRUPT_CLEAR_DPU_0 0x00000100
+#define PC_INTERRUPT_CLEAR_CORE_1 0x00000080
+#define PC_INTERRUPT_CLEAR_CORE_0 0x00000040
+#define PC_INTERRUPT_CLEAR_CNA_CSC_1 0x00000020
+#define PC_INTERRUPT_CLEAR_CNA_CSC_0 0x00000010
+#define PC_INTERRUPT_CLEAR_CNA_WEIGHT_1 0x00000008
+#define PC_INTERRUPT_CLEAR_CNA_WEIGHT_0 0x00000004
+#define PC_INTERRUPT_CLEAR_CNA_FEATURE_1 0x00000002
+#define PC_INTERRUPT_CLEAR_CNA_FEATURE_0 0x00000001
+
+#define REG_PC_INTERRUPT_STATUS 0x00000028
+#define PC_INTERRUPT_STATUS_RESERVED_0__MASK 0xffffc000
+#define PC_INTERRUPT_STATUS_RESERVED_0__SHIFT 14
+static inline uint32_t PC_INTERRUPT_STATUS_RESERVED_0(uint32_t val)
+{
+ return ((val) << PC_INTERRUPT_STATUS_RESERVED_0__SHIFT) & PC_INTERRUPT_STATUS_RESERVED_0__MASK;
+}
+#define PC_INTERRUPT_STATUS_DMA_WRITE_ERROR 0x00002000
+#define PC_INTERRUPT_STATUS_DMA_READ_ERROR 0x00001000
+#define PC_INTERRUPT_STATUS_PPU_1 0x00000800
+#define PC_INTERRUPT_STATUS_PPU_0 0x00000400
+#define PC_INTERRUPT_STATUS_DPU_1 0x00000200
+#define PC_INTERRUPT_STATUS_DPU_0 0x00000100
+#define PC_INTERRUPT_STATUS_CORE_1 0x00000080
+#define PC_INTERRUPT_STATUS_CORE_0 0x00000040
+#define PC_INTERRUPT_STATUS_CNA_CSC_1 0x00000020
+#define PC_INTERRUPT_STATUS_CNA_CSC_0 0x00000010
+#define PC_INTERRUPT_STATUS_CNA_WEIGHT_1 0x00000008
+#define PC_INTERRUPT_STATUS_CNA_WEIGHT_0 0x00000004
+#define PC_INTERRUPT_STATUS_CNA_FEATURE_1 0x00000002
+#define PC_INTERRUPT_STATUS_CNA_FEATURE_0 0x00000001
+
+#define REG_PC_INTERRUPT_RAW_STATUS 0x0000002c
+#define PC_INTERRUPT_RAW_STATUS_RESERVED_0__MASK 0xffffc000
+#define PC_INTERRUPT_RAW_STATUS_RESERVED_0__SHIFT 14
+static inline uint32_t PC_INTERRUPT_RAW_STATUS_RESERVED_0(uint32_t val)
+{
+ return ((val) << PC_INTERRUPT_RAW_STATUS_RESERVED_0__SHIFT) & PC_INTERRUPT_RAW_STATUS_RESERVED_0__MASK;
+}
+#define PC_INTERRUPT_RAW_STATUS_DMA_WRITE_ERROR 0x00002000
+#define PC_INTERRUPT_RAW_STATUS_DMA_READ_ERROR 0x00001000
+#define PC_INTERRUPT_RAW_STATUS_PPU_1 0x00000800
+#define PC_INTERRUPT_RAW_STATUS_PPU_0 0x00000400
+#define PC_INTERRUPT_RAW_STATUS_DPU_1 0x00000200
+#define PC_INTERRUPT_RAW_STATUS_DPU_0 0x00000100
+#define PC_INTERRUPT_RAW_STATUS_CORE_1 0x00000080
+#define PC_INTERRUPT_RAW_STATUS_CORE_0 0x00000040
+#define PC_INTERRUPT_RAW_STATUS_CNA_CSC_1 0x00000020
+#define PC_INTERRUPT_RAW_STATUS_CNA_CSC_0 0x00000010
+#define PC_INTERRUPT_RAW_STATUS_CNA_WEIGHT_1 0x00000008
+#define PC_INTERRUPT_RAW_STATUS_CNA_WEIGHT_0 0x00000004
+#define PC_INTERRUPT_RAW_STATUS_CNA_FEATURE_1 0x00000002
+#define PC_INTERRUPT_RAW_STATUS_CNA_FEATURE_0 0x00000001
+
+#define REG_PC_TASK_CON 0x00000030
+#define PC_TASK_CON_RESERVED_0__MASK 0xffffc000
+#define PC_TASK_CON_RESERVED_0__SHIFT 14
+static inline uint32_t PC_TASK_CON_RESERVED_0(uint32_t val)
+{
+ return ((val) << PC_TASK_CON_RESERVED_0__SHIFT) & PC_TASK_CON_RESERVED_0__MASK;
+}
+#define PC_TASK_CON_TASK_COUNT_CLEAR__MASK 0x00002000
+#define PC_TASK_CON_TASK_COUNT_CLEAR__SHIFT 13
+static inline uint32_t PC_TASK_CON_TASK_COUNT_CLEAR(uint32_t val)
+{
+ return ((val) << PC_TASK_CON_TASK_COUNT_CLEAR__SHIFT) & PC_TASK_CON_TASK_COUNT_CLEAR__MASK;
+}
+#define PC_TASK_CON_TASK_PP_EN__MASK 0x00001000
+#define PC_TASK_CON_TASK_PP_EN__SHIFT 12
+static inline uint32_t PC_TASK_CON_TASK_PP_EN(uint32_t val)
+{
+ return ((val) << PC_TASK_CON_TASK_PP_EN__SHIFT) & PC_TASK_CON_TASK_PP_EN__MASK;
+}
+#define PC_TASK_CON_TASK_NUMBER__MASK 0x00000fff
+#define PC_TASK_CON_TASK_NUMBER__SHIFT 0
+static inline uint32_t PC_TASK_CON_TASK_NUMBER(uint32_t val)
+{
+ return ((val) << PC_TASK_CON_TASK_NUMBER__SHIFT) & PC_TASK_CON_TASK_NUMBER__MASK;
+}
+
+#define REG_PC_TASK_DMA_BASE_ADDR 0x00000034
+#define PC_TASK_DMA_BASE_ADDR_DMA_BASE_ADDR__MASK 0xfffffff0
+#define PC_TASK_DMA_BASE_ADDR_DMA_BASE_ADDR__SHIFT 4
+static inline uint32_t PC_TASK_DMA_BASE_ADDR_DMA_BASE_ADDR(uint32_t val)
+{
+ return ((val) << PC_TASK_DMA_BASE_ADDR_DMA_BASE_ADDR__SHIFT) & PC_TASK_DMA_BASE_ADDR_DMA_BASE_ADDR__MASK;
+}
+#define PC_TASK_DMA_BASE_ADDR_RESERVED_0__MASK 0x0000000f
+#define PC_TASK_DMA_BASE_ADDR_RESERVED_0__SHIFT 0
+static inline uint32_t PC_TASK_DMA_BASE_ADDR_RESERVED_0(uint32_t val)
+{
+ return ((val) << PC_TASK_DMA_BASE_ADDR_RESERVED_0__SHIFT) & PC_TASK_DMA_BASE_ADDR_RESERVED_0__MASK;
+}
+
+#define REG_PC_TASK_STATUS 0x0000003c
+#define PC_TASK_STATUS_RESERVED_0__MASK 0xf0000000
+#define PC_TASK_STATUS_RESERVED_0__SHIFT 28
+static inline uint32_t PC_TASK_STATUS_RESERVED_0(uint32_t val)
+{
+ return ((val) << PC_TASK_STATUS_RESERVED_0__SHIFT) & PC_TASK_STATUS_RESERVED_0__MASK;
+}
+#define PC_TASK_STATUS_TASK_STATUS__MASK 0x0fffffff
+#define PC_TASK_STATUS_TASK_STATUS__SHIFT 0
+static inline uint32_t PC_TASK_STATUS_TASK_STATUS(uint32_t val)
+{
+ return ((val) << PC_TASK_STATUS_TASK_STATUS__SHIFT) & PC_TASK_STATUS_TASK_STATUS__MASK;
+}
+
+#define REG_CNA_S_STATUS 0x00001000
+#define CNA_S_STATUS_RESERVED_0__MASK 0xfffc0000
+#define CNA_S_STATUS_RESERVED_0__SHIFT 18
+static inline uint32_t CNA_S_STATUS_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_S_STATUS_RESERVED_0__SHIFT) & CNA_S_STATUS_RESERVED_0__MASK;
+}
+#define CNA_S_STATUS_STATUS_1__MASK 0x00030000
+#define CNA_S_STATUS_STATUS_1__SHIFT 16
+static inline uint32_t CNA_S_STATUS_STATUS_1(uint32_t val)
+{
+ return ((val) << CNA_S_STATUS_STATUS_1__SHIFT) & CNA_S_STATUS_STATUS_1__MASK;
+}
+#define CNA_S_STATUS_RESERVED_1__MASK 0x0000fffc
+#define CNA_S_STATUS_RESERVED_1__SHIFT 2
+static inline uint32_t CNA_S_STATUS_RESERVED_1(uint32_t val)
+{
+ return ((val) << CNA_S_STATUS_RESERVED_1__SHIFT) & CNA_S_STATUS_RESERVED_1__MASK;
+}
+#define CNA_S_STATUS_STATUS_0__MASK 0x00000003
+#define CNA_S_STATUS_STATUS_0__SHIFT 0
+static inline uint32_t CNA_S_STATUS_STATUS_0(uint32_t val)
+{
+ return ((val) << CNA_S_STATUS_STATUS_0__SHIFT) & CNA_S_STATUS_STATUS_0__MASK;
+}
+
+#define REG_CNA_S_POINTER 0x00001004
+#define CNA_S_POINTER_RESERVED_0__MASK 0xfffe0000
+#define CNA_S_POINTER_RESERVED_0__SHIFT 17
+static inline uint32_t CNA_S_POINTER_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_S_POINTER_RESERVED_0__SHIFT) & CNA_S_POINTER_RESERVED_0__MASK;
+}
+#define CNA_S_POINTER_EXECUTER__MASK 0x00010000
+#define CNA_S_POINTER_EXECUTER__SHIFT 16
+static inline uint32_t CNA_S_POINTER_EXECUTER(uint32_t val)
+{
+ return ((val) << CNA_S_POINTER_EXECUTER__SHIFT) & CNA_S_POINTER_EXECUTER__MASK;
+}
+#define CNA_S_POINTER_RESERVED_1__MASK 0x0000ffc0
+#define CNA_S_POINTER_RESERVED_1__SHIFT 6
+static inline uint32_t CNA_S_POINTER_RESERVED_1(uint32_t val)
+{
+ return ((val) << CNA_S_POINTER_RESERVED_1__SHIFT) & CNA_S_POINTER_RESERVED_1__MASK;
+}
+#define CNA_S_POINTER_EXECUTER_PP_CLEAR__MASK 0x00000020
+#define CNA_S_POINTER_EXECUTER_PP_CLEAR__SHIFT 5
+static inline uint32_t CNA_S_POINTER_EXECUTER_PP_CLEAR(uint32_t val)
+{
+ return ((val) << CNA_S_POINTER_EXECUTER_PP_CLEAR__SHIFT) & CNA_S_POINTER_EXECUTER_PP_CLEAR__MASK;
+}
+#define CNA_S_POINTER_POINTER_PP_CLEAR__MASK 0x00000010
+#define CNA_S_POINTER_POINTER_PP_CLEAR__SHIFT 4
+static inline uint32_t CNA_S_POINTER_POINTER_PP_CLEAR(uint32_t val)
+{
+ return ((val) << CNA_S_POINTER_POINTER_PP_CLEAR__SHIFT) & CNA_S_POINTER_POINTER_PP_CLEAR__MASK;
+}
+#define CNA_S_POINTER_POINTER_PP_MODE__MASK 0x00000008
+#define CNA_S_POINTER_POINTER_PP_MODE__SHIFT 3
+static inline uint32_t CNA_S_POINTER_POINTER_PP_MODE(uint32_t val)
+{
+ return ((val) << CNA_S_POINTER_POINTER_PP_MODE__SHIFT) & CNA_S_POINTER_POINTER_PP_MODE__MASK;
+}
+#define CNA_S_POINTER_EXECUTER_PP_EN__MASK 0x00000004
+#define CNA_S_POINTER_EXECUTER_PP_EN__SHIFT 2
+static inline uint32_t CNA_S_POINTER_EXECUTER_PP_EN(uint32_t val)
+{
+ return ((val) << CNA_S_POINTER_EXECUTER_PP_EN__SHIFT) & CNA_S_POINTER_EXECUTER_PP_EN__MASK;
+}
+#define CNA_S_POINTER_POINTER_PP_EN__MASK 0x00000002
+#define CNA_S_POINTER_POINTER_PP_EN__SHIFT 1
+static inline uint32_t CNA_S_POINTER_POINTER_PP_EN(uint32_t val)
+{
+ return ((val) << CNA_S_POINTER_POINTER_PP_EN__SHIFT) & CNA_S_POINTER_POINTER_PP_EN__MASK;
+}
+#define CNA_S_POINTER_POINTER__MASK 0x00000001
+#define CNA_S_POINTER_POINTER__SHIFT 0
+static inline uint32_t CNA_S_POINTER_POINTER(uint32_t val)
+{
+ return ((val) << CNA_S_POINTER_POINTER__SHIFT) & CNA_S_POINTER_POINTER__MASK;
+}
+
+#define REG_CNA_OPERATION_ENABLE 0x00001008
+#define CNA_OPERATION_ENABLE_RESERVED_0__MASK 0xfffffffe
+#define CNA_OPERATION_ENABLE_RESERVED_0__SHIFT 1
+static inline uint32_t CNA_OPERATION_ENABLE_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_OPERATION_ENABLE_RESERVED_0__SHIFT) & CNA_OPERATION_ENABLE_RESERVED_0__MASK;
+}
+#define CNA_OPERATION_ENABLE_OP_EN__MASK 0x00000001
+#define CNA_OPERATION_ENABLE_OP_EN__SHIFT 0
+static inline uint32_t CNA_OPERATION_ENABLE_OP_EN(uint32_t val)
+{
+ return ((val) << CNA_OPERATION_ENABLE_OP_EN__SHIFT) & CNA_OPERATION_ENABLE_OP_EN__MASK;
+}
+
+#define REG_CNA_CONV_CON1 0x0000100c
+#define CNA_CONV_CON1_RESERVED_0__MASK 0x80000000
+#define CNA_CONV_CON1_RESERVED_0__SHIFT 31
+static inline uint32_t CNA_CONV_CON1_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON1_RESERVED_0__SHIFT) & CNA_CONV_CON1_RESERVED_0__MASK;
+}
+#define CNA_CONV_CON1_NONALIGN_DMA__MASK 0x40000000
+#define CNA_CONV_CON1_NONALIGN_DMA__SHIFT 30
+static inline uint32_t CNA_CONV_CON1_NONALIGN_DMA(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON1_NONALIGN_DMA__SHIFT) & CNA_CONV_CON1_NONALIGN_DMA__MASK;
+}
+#define CNA_CONV_CON1_GROUP_LINE_OFF__MASK 0x20000000
+#define CNA_CONV_CON1_GROUP_LINE_OFF__SHIFT 29
+static inline uint32_t CNA_CONV_CON1_GROUP_LINE_OFF(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON1_GROUP_LINE_OFF__SHIFT) & CNA_CONV_CON1_GROUP_LINE_OFF__MASK;
+}
+#define CNA_CONV_CON1_RESERVED_1__MASK 0x1ffe0000
+#define CNA_CONV_CON1_RESERVED_1__SHIFT 17
+static inline uint32_t CNA_CONV_CON1_RESERVED_1(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON1_RESERVED_1__SHIFT) & CNA_CONV_CON1_RESERVED_1__MASK;
+}
+#define CNA_CONV_CON1_DECONV__MASK 0x00010000
+#define CNA_CONV_CON1_DECONV__SHIFT 16
+static inline uint32_t CNA_CONV_CON1_DECONV(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON1_DECONV__SHIFT) & CNA_CONV_CON1_DECONV__MASK;
+}
+#define CNA_CONV_CON1_ARGB_IN__MASK 0x0000f000
+#define CNA_CONV_CON1_ARGB_IN__SHIFT 12
+static inline uint32_t CNA_CONV_CON1_ARGB_IN(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON1_ARGB_IN__SHIFT) & CNA_CONV_CON1_ARGB_IN__MASK;
+}
+#define CNA_CONV_CON1_RESERVED_2__MASK 0x00000c00
+#define CNA_CONV_CON1_RESERVED_2__SHIFT 10
+static inline uint32_t CNA_CONV_CON1_RESERVED_2(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON1_RESERVED_2__SHIFT) & CNA_CONV_CON1_RESERVED_2__MASK;
+}
+#define CNA_CONV_CON1_PROC_PRECISION__MASK 0x00000380
+#define CNA_CONV_CON1_PROC_PRECISION__SHIFT 7
+static inline uint32_t CNA_CONV_CON1_PROC_PRECISION(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON1_PROC_PRECISION__SHIFT) & CNA_CONV_CON1_PROC_PRECISION__MASK;
+}
+#define CNA_CONV_CON1_IN_PRECISION__MASK 0x00000070
+#define CNA_CONV_CON1_IN_PRECISION__SHIFT 4
+static inline uint32_t CNA_CONV_CON1_IN_PRECISION(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON1_IN_PRECISION__SHIFT) & CNA_CONV_CON1_IN_PRECISION__MASK;
+}
+#define CNA_CONV_CON1_CONV_MODE__MASK 0x0000000f
+#define CNA_CONV_CON1_CONV_MODE__SHIFT 0
+static inline uint32_t CNA_CONV_CON1_CONV_MODE(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON1_CONV_MODE__SHIFT) & CNA_CONV_CON1_CONV_MODE__MASK;
+}
+
+#define REG_CNA_CONV_CON2 0x00001010
+#define CNA_CONV_CON2_RESERVED_0__MASK 0xff000000
+#define CNA_CONV_CON2_RESERVED_0__SHIFT 24
+static inline uint32_t CNA_CONV_CON2_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON2_RESERVED_0__SHIFT) & CNA_CONV_CON2_RESERVED_0__MASK;
+}
+#define CNA_CONV_CON2_KERNEL_GROUP__MASK 0x00ff0000
+#define CNA_CONV_CON2_KERNEL_GROUP__SHIFT 16
+static inline uint32_t CNA_CONV_CON2_KERNEL_GROUP(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON2_KERNEL_GROUP__SHIFT) & CNA_CONV_CON2_KERNEL_GROUP__MASK;
+}
+#define CNA_CONV_CON2_RESERVED_1__MASK 0x0000c000
+#define CNA_CONV_CON2_RESERVED_1__SHIFT 14
+static inline uint32_t CNA_CONV_CON2_RESERVED_1(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON2_RESERVED_1__SHIFT) & CNA_CONV_CON2_RESERVED_1__MASK;
+}
+#define CNA_CONV_CON2_FEATURE_GRAINS__MASK 0x00003ff0
+#define CNA_CONV_CON2_FEATURE_GRAINS__SHIFT 4
+static inline uint32_t CNA_CONV_CON2_FEATURE_GRAINS(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON2_FEATURE_GRAINS__SHIFT) & CNA_CONV_CON2_FEATURE_GRAINS__MASK;
+}
+#define CNA_CONV_CON2_RESERVED_2__MASK 0x00000008
+#define CNA_CONV_CON2_RESERVED_2__SHIFT 3
+static inline uint32_t CNA_CONV_CON2_RESERVED_2(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON2_RESERVED_2__SHIFT) & CNA_CONV_CON2_RESERVED_2__MASK;
+}
+#define CNA_CONV_CON2_CSC_WO_EN__MASK 0x00000004
+#define CNA_CONV_CON2_CSC_WO_EN__SHIFT 2
+static inline uint32_t CNA_CONV_CON2_CSC_WO_EN(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON2_CSC_WO_EN__SHIFT) & CNA_CONV_CON2_CSC_WO_EN__MASK;
+}
+#define CNA_CONV_CON2_CSC_DO_EN__MASK 0x00000002
+#define CNA_CONV_CON2_CSC_DO_EN__SHIFT 1
+static inline uint32_t CNA_CONV_CON2_CSC_DO_EN(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON2_CSC_DO_EN__SHIFT) & CNA_CONV_CON2_CSC_DO_EN__MASK;
+}
+#define CNA_CONV_CON2_CMD_FIFO_SRST__MASK 0x00000001
+#define CNA_CONV_CON2_CMD_FIFO_SRST__SHIFT 0
+static inline uint32_t CNA_CONV_CON2_CMD_FIFO_SRST(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON2_CMD_FIFO_SRST__SHIFT) & CNA_CONV_CON2_CMD_FIFO_SRST__MASK;
+}
+
+#define REG_CNA_CONV_CON3 0x00001014
+#define CNA_CONV_CON3_RESERVED_0__MASK 0x80000000
+#define CNA_CONV_CON3_RESERVED_0__SHIFT 31
+static inline uint32_t CNA_CONV_CON3_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON3_RESERVED_0__SHIFT) & CNA_CONV_CON3_RESERVED_0__MASK;
+}
+#define CNA_CONV_CON3_NN_MODE__MASK 0x70000000
+#define CNA_CONV_CON3_NN_MODE__SHIFT 28
+static inline uint32_t CNA_CONV_CON3_NN_MODE(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON3_NN_MODE__SHIFT) & CNA_CONV_CON3_NN_MODE__MASK;
+}
+#define CNA_CONV_CON3_RESERVED_1__MASK 0x0c000000
+#define CNA_CONV_CON3_RESERVED_1__SHIFT 26
+static inline uint32_t CNA_CONV_CON3_RESERVED_1(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON3_RESERVED_1__SHIFT) & CNA_CONV_CON3_RESERVED_1__MASK;
+}
+#define CNA_CONV_CON3_ATROUS_Y_DILATION__MASK 0x03e00000
+#define CNA_CONV_CON3_ATROUS_Y_DILATION__SHIFT 21
+static inline uint32_t CNA_CONV_CON3_ATROUS_Y_DILATION(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON3_ATROUS_Y_DILATION__SHIFT) & CNA_CONV_CON3_ATROUS_Y_DILATION__MASK;
+}
+#define CNA_CONV_CON3_ATROUS_X_DILATION__MASK 0x001f0000
+#define CNA_CONV_CON3_ATROUS_X_DILATION__SHIFT 16
+static inline uint32_t CNA_CONV_CON3_ATROUS_X_DILATION(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON3_ATROUS_X_DILATION__SHIFT) & CNA_CONV_CON3_ATROUS_X_DILATION__MASK;
+}
+#define CNA_CONV_CON3_RESERVED_2__MASK 0x0000c000
+#define CNA_CONV_CON3_RESERVED_2__SHIFT 14
+static inline uint32_t CNA_CONV_CON3_RESERVED_2(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON3_RESERVED_2__SHIFT) & CNA_CONV_CON3_RESERVED_2__MASK;
+}
+#define CNA_CONV_CON3_DECONV_Y_STRIDE__MASK 0x00003800
+#define CNA_CONV_CON3_DECONV_Y_STRIDE__SHIFT 11
+static inline uint32_t CNA_CONV_CON3_DECONV_Y_STRIDE(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON3_DECONV_Y_STRIDE__SHIFT) & CNA_CONV_CON3_DECONV_Y_STRIDE__MASK;
+}
+#define CNA_CONV_CON3_DECONV_X_STRIDE__MASK 0x00000700
+#define CNA_CONV_CON3_DECONV_X_STRIDE__SHIFT 8
+static inline uint32_t CNA_CONV_CON3_DECONV_X_STRIDE(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON3_DECONV_X_STRIDE__SHIFT) & CNA_CONV_CON3_DECONV_X_STRIDE__MASK;
+}
+#define CNA_CONV_CON3_RESERVED_3__MASK 0x000000c0
+#define CNA_CONV_CON3_RESERVED_3__SHIFT 6
+static inline uint32_t CNA_CONV_CON3_RESERVED_3(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON3_RESERVED_3__SHIFT) & CNA_CONV_CON3_RESERVED_3__MASK;
+}
+#define CNA_CONV_CON3_CONV_Y_STRIDE__MASK 0x00000038
+#define CNA_CONV_CON3_CONV_Y_STRIDE__SHIFT 3
+static inline uint32_t CNA_CONV_CON3_CONV_Y_STRIDE(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON3_CONV_Y_STRIDE__SHIFT) & CNA_CONV_CON3_CONV_Y_STRIDE__MASK;
+}
+#define CNA_CONV_CON3_CONV_X_STRIDE__MASK 0x00000007
+#define CNA_CONV_CON3_CONV_X_STRIDE__SHIFT 0
+static inline uint32_t CNA_CONV_CON3_CONV_X_STRIDE(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON3_CONV_X_STRIDE__SHIFT) & CNA_CONV_CON3_CONV_X_STRIDE__MASK;
+}
+
+#define REG_CNA_DATA_SIZE0 0x00001020
+#define CNA_DATA_SIZE0_RESERVED_0__MASK 0xf8000000
+#define CNA_DATA_SIZE0_RESERVED_0__SHIFT 27
+static inline uint32_t CNA_DATA_SIZE0_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_DATA_SIZE0_RESERVED_0__SHIFT) & CNA_DATA_SIZE0_RESERVED_0__MASK;
+}
+#define CNA_DATA_SIZE0_DATAIN_WIDTH__MASK 0x07ff0000
+#define CNA_DATA_SIZE0_DATAIN_WIDTH__SHIFT 16
+static inline uint32_t CNA_DATA_SIZE0_DATAIN_WIDTH(uint32_t val)
+{
+ return ((val) << CNA_DATA_SIZE0_DATAIN_WIDTH__SHIFT) & CNA_DATA_SIZE0_DATAIN_WIDTH__MASK;
+}
+#define CNA_DATA_SIZE0_RESERVED_1__MASK 0x0000f800
+#define CNA_DATA_SIZE0_RESERVED_1__SHIFT 11
+static inline uint32_t CNA_DATA_SIZE0_RESERVED_1(uint32_t val)
+{
+ return ((val) << CNA_DATA_SIZE0_RESERVED_1__SHIFT) & CNA_DATA_SIZE0_RESERVED_1__MASK;
+}
+#define CNA_DATA_SIZE0_DATAIN_HEIGHT__MASK 0x000007ff
+#define CNA_DATA_SIZE0_DATAIN_HEIGHT__SHIFT 0
+static inline uint32_t CNA_DATA_SIZE0_DATAIN_HEIGHT(uint32_t val)
+{
+ return ((val) << CNA_DATA_SIZE0_DATAIN_HEIGHT__SHIFT) & CNA_DATA_SIZE0_DATAIN_HEIGHT__MASK;
+}
+
+#define REG_CNA_DATA_SIZE1 0x00001024
+#define CNA_DATA_SIZE1_RESERVED_0__MASK 0xc0000000
+#define CNA_DATA_SIZE1_RESERVED_0__SHIFT 30
+static inline uint32_t CNA_DATA_SIZE1_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_DATA_SIZE1_RESERVED_0__SHIFT) & CNA_DATA_SIZE1_RESERVED_0__MASK;
+}
+#define CNA_DATA_SIZE1_DATAIN_CHANNEL_REAL__MASK 0x3fff0000
+#define CNA_DATA_SIZE1_DATAIN_CHANNEL_REAL__SHIFT 16
+static inline uint32_t CNA_DATA_SIZE1_DATAIN_CHANNEL_REAL(uint32_t val)
+{
+ return ((val) << CNA_DATA_SIZE1_DATAIN_CHANNEL_REAL__SHIFT) & CNA_DATA_SIZE1_DATAIN_CHANNEL_REAL__MASK;
+}
+#define CNA_DATA_SIZE1_DATAIN_CHANNEL__MASK 0x0000ffff
+#define CNA_DATA_SIZE1_DATAIN_CHANNEL__SHIFT 0
+static inline uint32_t CNA_DATA_SIZE1_DATAIN_CHANNEL(uint32_t val)
+{
+ return ((val) << CNA_DATA_SIZE1_DATAIN_CHANNEL__SHIFT) & CNA_DATA_SIZE1_DATAIN_CHANNEL__MASK;
+}
+
+#define REG_CNA_DATA_SIZE2 0x00001028
+#define CNA_DATA_SIZE2_RESERVED_0__MASK 0xfffff800
+#define CNA_DATA_SIZE2_RESERVED_0__SHIFT 11
+static inline uint32_t CNA_DATA_SIZE2_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_DATA_SIZE2_RESERVED_0__SHIFT) & CNA_DATA_SIZE2_RESERVED_0__MASK;
+}
+#define CNA_DATA_SIZE2_DATAOUT_WIDTH__MASK 0x000007ff
+#define CNA_DATA_SIZE2_DATAOUT_WIDTH__SHIFT 0
+static inline uint32_t CNA_DATA_SIZE2_DATAOUT_WIDTH(uint32_t val)
+{
+ return ((val) << CNA_DATA_SIZE2_DATAOUT_WIDTH__SHIFT) & CNA_DATA_SIZE2_DATAOUT_WIDTH__MASK;
+}
+
+#define REG_CNA_DATA_SIZE3 0x0000102c
+#define CNA_DATA_SIZE3_RESERVED_0__MASK 0xff000000
+#define CNA_DATA_SIZE3_RESERVED_0__SHIFT 24
+static inline uint32_t CNA_DATA_SIZE3_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_DATA_SIZE3_RESERVED_0__SHIFT) & CNA_DATA_SIZE3_RESERVED_0__MASK;
+}
+#define CNA_DATA_SIZE3_SURF_MODE__MASK 0x00c00000
+#define CNA_DATA_SIZE3_SURF_MODE__SHIFT 22
+static inline uint32_t CNA_DATA_SIZE3_SURF_MODE(uint32_t val)
+{
+ return ((val) << CNA_DATA_SIZE3_SURF_MODE__SHIFT) & CNA_DATA_SIZE3_SURF_MODE__MASK;
+}
+#define CNA_DATA_SIZE3_DATAOUT_ATOMICS__MASK 0x003fffff
+#define CNA_DATA_SIZE3_DATAOUT_ATOMICS__SHIFT 0
+static inline uint32_t CNA_DATA_SIZE3_DATAOUT_ATOMICS(uint32_t val)
+{
+ return ((val) << CNA_DATA_SIZE3_DATAOUT_ATOMICS__SHIFT) & CNA_DATA_SIZE3_DATAOUT_ATOMICS__MASK;
+}
+
+#define REG_CNA_WEIGHT_SIZE0 0x00001030
+#define CNA_WEIGHT_SIZE0_WEIGHT_BYTES__MASK 0xffffffff
+#define CNA_WEIGHT_SIZE0_WEIGHT_BYTES__SHIFT 0
+static inline uint32_t CNA_WEIGHT_SIZE0_WEIGHT_BYTES(uint32_t val)
+{
+ return ((val) << CNA_WEIGHT_SIZE0_WEIGHT_BYTES__SHIFT) & CNA_WEIGHT_SIZE0_WEIGHT_BYTES__MASK;
+}
+
+#define REG_CNA_WEIGHT_SIZE1 0x00001034
+#define CNA_WEIGHT_SIZE1_RESERVED_0__MASK 0xfff80000
+#define CNA_WEIGHT_SIZE1_RESERVED_0__SHIFT 19
+static inline uint32_t CNA_WEIGHT_SIZE1_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_WEIGHT_SIZE1_RESERVED_0__SHIFT) & CNA_WEIGHT_SIZE1_RESERVED_0__MASK;
+}
+#define CNA_WEIGHT_SIZE1_WEIGHT_BYTES_PER_KERNEL__MASK 0x0007ffff
+#define CNA_WEIGHT_SIZE1_WEIGHT_BYTES_PER_KERNEL__SHIFT 0
+static inline uint32_t CNA_WEIGHT_SIZE1_WEIGHT_BYTES_PER_KERNEL(uint32_t val)
+{
+ return ((val) << CNA_WEIGHT_SIZE1_WEIGHT_BYTES_PER_KERNEL__SHIFT) & CNA_WEIGHT_SIZE1_WEIGHT_BYTES_PER_KERNEL__MASK;
+}
+
+#define REG_CNA_WEIGHT_SIZE2 0x00001038
+#define CNA_WEIGHT_SIZE2_RESERVED_0__MASK 0xe0000000
+#define CNA_WEIGHT_SIZE2_RESERVED_0__SHIFT 29
+static inline uint32_t CNA_WEIGHT_SIZE2_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_WEIGHT_SIZE2_RESERVED_0__SHIFT) & CNA_WEIGHT_SIZE2_RESERVED_0__MASK;
+}
+#define CNA_WEIGHT_SIZE2_WEIGHT_WIDTH__MASK 0x1f000000
+#define CNA_WEIGHT_SIZE2_WEIGHT_WIDTH__SHIFT 24
+static inline uint32_t CNA_WEIGHT_SIZE2_WEIGHT_WIDTH(uint32_t val)
+{
+ return ((val) << CNA_WEIGHT_SIZE2_WEIGHT_WIDTH__SHIFT) & CNA_WEIGHT_SIZE2_WEIGHT_WIDTH__MASK;
+}
+#define CNA_WEIGHT_SIZE2_RESERVED_1__MASK 0x00e00000
+#define CNA_WEIGHT_SIZE2_RESERVED_1__SHIFT 21
+static inline uint32_t CNA_WEIGHT_SIZE2_RESERVED_1(uint32_t val)
+{
+ return ((val) << CNA_WEIGHT_SIZE2_RESERVED_1__SHIFT) & CNA_WEIGHT_SIZE2_RESERVED_1__MASK;
+}
+#define CNA_WEIGHT_SIZE2_WEIGHT_HEIGHT__MASK 0x001f0000
+#define CNA_WEIGHT_SIZE2_WEIGHT_HEIGHT__SHIFT 16
+static inline uint32_t CNA_WEIGHT_SIZE2_WEIGHT_HEIGHT(uint32_t val)
+{
+ return ((val) << CNA_WEIGHT_SIZE2_WEIGHT_HEIGHT__SHIFT) & CNA_WEIGHT_SIZE2_WEIGHT_HEIGHT__MASK;
+}
+#define CNA_WEIGHT_SIZE2_RESERVED_2__MASK 0x0000c000
+#define CNA_WEIGHT_SIZE2_RESERVED_2__SHIFT 14
+static inline uint32_t CNA_WEIGHT_SIZE2_RESERVED_2(uint32_t val)
+{
+ return ((val) << CNA_WEIGHT_SIZE2_RESERVED_2__SHIFT) & CNA_WEIGHT_SIZE2_RESERVED_2__MASK;
+}
+#define CNA_WEIGHT_SIZE2_WEIGHT_KERNELS__MASK 0x00003fff
+#define CNA_WEIGHT_SIZE2_WEIGHT_KERNELS__SHIFT 0
+static inline uint32_t CNA_WEIGHT_SIZE2_WEIGHT_KERNELS(uint32_t val)
+{
+ return ((val) << CNA_WEIGHT_SIZE2_WEIGHT_KERNELS__SHIFT) & CNA_WEIGHT_SIZE2_WEIGHT_KERNELS__MASK;
+}
+
+#define REG_CNA_CBUF_CON0 0x00001040
+#define CNA_CBUF_CON0_RESERVED_0__MASK 0xffffc000
+#define CNA_CBUF_CON0_RESERVED_0__SHIFT 14
+static inline uint32_t CNA_CBUF_CON0_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_CBUF_CON0_RESERVED_0__SHIFT) & CNA_CBUF_CON0_RESERVED_0__MASK;
+}
+#define CNA_CBUF_CON0_WEIGHT_REUSE__MASK 0x00002000
+#define CNA_CBUF_CON0_WEIGHT_REUSE__SHIFT 13
+static inline uint32_t CNA_CBUF_CON0_WEIGHT_REUSE(uint32_t val)
+{
+ return ((val) << CNA_CBUF_CON0_WEIGHT_REUSE__SHIFT) & CNA_CBUF_CON0_WEIGHT_REUSE__MASK;
+}
+#define CNA_CBUF_CON0_DATA_REUSE__MASK 0x00001000
+#define CNA_CBUF_CON0_DATA_REUSE__SHIFT 12
+static inline uint32_t CNA_CBUF_CON0_DATA_REUSE(uint32_t val)
+{
+ return ((val) << CNA_CBUF_CON0_DATA_REUSE__SHIFT) & CNA_CBUF_CON0_DATA_REUSE__MASK;
+}
+#define CNA_CBUF_CON0_RESERVED_1__MASK 0x00000800
+#define CNA_CBUF_CON0_RESERVED_1__SHIFT 11
+static inline uint32_t CNA_CBUF_CON0_RESERVED_1(uint32_t val)
+{
+ return ((val) << CNA_CBUF_CON0_RESERVED_1__SHIFT) & CNA_CBUF_CON0_RESERVED_1__MASK;
+}
+#define CNA_CBUF_CON0_FC_DATA_BANK__MASK 0x00000700
+#define CNA_CBUF_CON0_FC_DATA_BANK__SHIFT 8
+static inline uint32_t CNA_CBUF_CON0_FC_DATA_BANK(uint32_t val)
+{
+ return ((val) << CNA_CBUF_CON0_FC_DATA_BANK__SHIFT) & CNA_CBUF_CON0_FC_DATA_BANK__MASK;
+}
+#define CNA_CBUF_CON0_WEIGHT_BANK__MASK 0x000000f0
+#define CNA_CBUF_CON0_WEIGHT_BANK__SHIFT 4
+static inline uint32_t CNA_CBUF_CON0_WEIGHT_BANK(uint32_t val)
+{
+ return ((val) << CNA_CBUF_CON0_WEIGHT_BANK__SHIFT) & CNA_CBUF_CON0_WEIGHT_BANK__MASK;
+}
+#define CNA_CBUF_CON0_DATA_BANK__MASK 0x0000000f
+#define CNA_CBUF_CON0_DATA_BANK__SHIFT 0
+static inline uint32_t CNA_CBUF_CON0_DATA_BANK(uint32_t val)
+{
+ return ((val) << CNA_CBUF_CON0_DATA_BANK__SHIFT) & CNA_CBUF_CON0_DATA_BANK__MASK;
+}
+
+#define REG_CNA_CBUF_CON1 0x00001044
+#define CNA_CBUF_CON1_RESERVED_0__MASK 0xffffc000
+#define CNA_CBUF_CON1_RESERVED_0__SHIFT 14
+static inline uint32_t CNA_CBUF_CON1_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_CBUF_CON1_RESERVED_0__SHIFT) & CNA_CBUF_CON1_RESERVED_0__MASK;
+}
+#define CNA_CBUF_CON1_DATA_ENTRIES__MASK 0x00003fff
+#define CNA_CBUF_CON1_DATA_ENTRIES__SHIFT 0
+static inline uint32_t CNA_CBUF_CON1_DATA_ENTRIES(uint32_t val)
+{
+ return ((val) << CNA_CBUF_CON1_DATA_ENTRIES__SHIFT) & CNA_CBUF_CON1_DATA_ENTRIES__MASK;
+}
+
+#define REG_CNA_CVT_CON0 0x0000104c
+#define CNA_CVT_CON0_RESERVED_0__MASK 0xf0000000
+#define CNA_CVT_CON0_RESERVED_0__SHIFT 28
+static inline uint32_t CNA_CVT_CON0_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON0_RESERVED_0__SHIFT) & CNA_CVT_CON0_RESERVED_0__MASK;
+}
+#define CNA_CVT_CON0_CVT_TRUNCATE_3__MASK 0x0fc00000
+#define CNA_CVT_CON0_CVT_TRUNCATE_3__SHIFT 22
+static inline uint32_t CNA_CVT_CON0_CVT_TRUNCATE_3(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON0_CVT_TRUNCATE_3__SHIFT) & CNA_CVT_CON0_CVT_TRUNCATE_3__MASK;
+}
+#define CNA_CVT_CON0_CVT_TRUNCATE_2__MASK 0x003f0000
+#define CNA_CVT_CON0_CVT_TRUNCATE_2__SHIFT 16
+static inline uint32_t CNA_CVT_CON0_CVT_TRUNCATE_2(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON0_CVT_TRUNCATE_2__SHIFT) & CNA_CVT_CON0_CVT_TRUNCATE_2__MASK;
+}
+#define CNA_CVT_CON0_CVT_TRUNCATE_1__MASK 0x0000fc00
+#define CNA_CVT_CON0_CVT_TRUNCATE_1__SHIFT 10
+static inline uint32_t CNA_CVT_CON0_CVT_TRUNCATE_1(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON0_CVT_TRUNCATE_1__SHIFT) & CNA_CVT_CON0_CVT_TRUNCATE_1__MASK;
+}
+#define CNA_CVT_CON0_CVT_TRUNCATE_0__MASK 0x000003f0
+#define CNA_CVT_CON0_CVT_TRUNCATE_0__SHIFT 4
+static inline uint32_t CNA_CVT_CON0_CVT_TRUNCATE_0(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON0_CVT_TRUNCATE_0__SHIFT) & CNA_CVT_CON0_CVT_TRUNCATE_0__MASK;
+}
+#define CNA_CVT_CON0_DATA_SIGN__MASK 0x00000008
+#define CNA_CVT_CON0_DATA_SIGN__SHIFT 3
+static inline uint32_t CNA_CVT_CON0_DATA_SIGN(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON0_DATA_SIGN__SHIFT) & CNA_CVT_CON0_DATA_SIGN__MASK;
+}
+#define CNA_CVT_CON0_ROUND_TYPE__MASK 0x00000004
+#define CNA_CVT_CON0_ROUND_TYPE__SHIFT 2
+static inline uint32_t CNA_CVT_CON0_ROUND_TYPE(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON0_ROUND_TYPE__SHIFT) & CNA_CVT_CON0_ROUND_TYPE__MASK;
+}
+#define CNA_CVT_CON0_CVT_TYPE__MASK 0x00000002
+#define CNA_CVT_CON0_CVT_TYPE__SHIFT 1
+static inline uint32_t CNA_CVT_CON0_CVT_TYPE(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON0_CVT_TYPE__SHIFT) & CNA_CVT_CON0_CVT_TYPE__MASK;
+}
+#define CNA_CVT_CON0_CVT_BYPASS__MASK 0x00000001
+#define CNA_CVT_CON0_CVT_BYPASS__SHIFT 0
+static inline uint32_t CNA_CVT_CON0_CVT_BYPASS(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON0_CVT_BYPASS__SHIFT) & CNA_CVT_CON0_CVT_BYPASS__MASK;
+}
+
+#define REG_CNA_CVT_CON1 0x00001050
+#define CNA_CVT_CON1_CVT_SCALE0__MASK 0xffff0000
+#define CNA_CVT_CON1_CVT_SCALE0__SHIFT 16
+static inline uint32_t CNA_CVT_CON1_CVT_SCALE0(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON1_CVT_SCALE0__SHIFT) & CNA_CVT_CON1_CVT_SCALE0__MASK;
+}
+#define CNA_CVT_CON1_CVT_OFFSET0__MASK 0x0000ffff
+#define CNA_CVT_CON1_CVT_OFFSET0__SHIFT 0
+static inline uint32_t CNA_CVT_CON1_CVT_OFFSET0(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON1_CVT_OFFSET0__SHIFT) & CNA_CVT_CON1_CVT_OFFSET0__MASK;
+}
+
+#define REG_CNA_CVT_CON2 0x00001054
+#define CNA_CVT_CON2_CVT_SCALE1__MASK 0xffff0000
+#define CNA_CVT_CON2_CVT_SCALE1__SHIFT 16
+static inline uint32_t CNA_CVT_CON2_CVT_SCALE1(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON2_CVT_SCALE1__SHIFT) & CNA_CVT_CON2_CVT_SCALE1__MASK;
+}
+#define CNA_CVT_CON2_CVT_OFFSET1__MASK 0x0000ffff
+#define CNA_CVT_CON2_CVT_OFFSET1__SHIFT 0
+static inline uint32_t CNA_CVT_CON2_CVT_OFFSET1(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON2_CVT_OFFSET1__SHIFT) & CNA_CVT_CON2_CVT_OFFSET1__MASK;
+}
+
+#define REG_CNA_CVT_CON3 0x00001058
+#define CNA_CVT_CON3_CVT_SCALE2__MASK 0xffff0000
+#define CNA_CVT_CON3_CVT_SCALE2__SHIFT 16
+static inline uint32_t CNA_CVT_CON3_CVT_SCALE2(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON3_CVT_SCALE2__SHIFT) & CNA_CVT_CON3_CVT_SCALE2__MASK;
+}
+#define CNA_CVT_CON3_CVT_OFFSET2__MASK 0x0000ffff
+#define CNA_CVT_CON3_CVT_OFFSET2__SHIFT 0
+static inline uint32_t CNA_CVT_CON3_CVT_OFFSET2(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON3_CVT_OFFSET2__SHIFT) & CNA_CVT_CON3_CVT_OFFSET2__MASK;
+}
+
+#define REG_CNA_CVT_CON4 0x0000105c
+#define CNA_CVT_CON4_CVT_SCALE3__MASK 0xffff0000
+#define CNA_CVT_CON4_CVT_SCALE3__SHIFT 16
+static inline uint32_t CNA_CVT_CON4_CVT_SCALE3(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON4_CVT_SCALE3__SHIFT) & CNA_CVT_CON4_CVT_SCALE3__MASK;
+}
+#define CNA_CVT_CON4_CVT_OFFSET3__MASK 0x0000ffff
+#define CNA_CVT_CON4_CVT_OFFSET3__SHIFT 0
+static inline uint32_t CNA_CVT_CON4_CVT_OFFSET3(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON4_CVT_OFFSET3__SHIFT) & CNA_CVT_CON4_CVT_OFFSET3__MASK;
+}
+
+#define REG_CNA_FC_CON0 0x00001060
+#define CNA_FC_CON0_FC_SKIP_DATA__MASK 0xffff0000
+#define CNA_FC_CON0_FC_SKIP_DATA__SHIFT 16
+static inline uint32_t CNA_FC_CON0_FC_SKIP_DATA(uint32_t val)
+{
+ return ((val) << CNA_FC_CON0_FC_SKIP_DATA__SHIFT) & CNA_FC_CON0_FC_SKIP_DATA__MASK;
+}
+#define CNA_FC_CON0_RESERVED_0__MASK 0x0000fffe
+#define CNA_FC_CON0_RESERVED_0__SHIFT 1
+static inline uint32_t CNA_FC_CON0_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_FC_CON0_RESERVED_0__SHIFT) & CNA_FC_CON0_RESERVED_0__MASK;
+}
+#define CNA_FC_CON0_FC_SKIP_EN__MASK 0x00000001
+#define CNA_FC_CON0_FC_SKIP_EN__SHIFT 0
+static inline uint32_t CNA_FC_CON0_FC_SKIP_EN(uint32_t val)
+{
+ return ((val) << CNA_FC_CON0_FC_SKIP_EN__SHIFT) & CNA_FC_CON0_FC_SKIP_EN__MASK;
+}
+
+#define REG_CNA_FC_CON1 0x00001064
+#define CNA_FC_CON1_RESERVED_0__MASK 0xfffe0000
+#define CNA_FC_CON1_RESERVED_0__SHIFT 17
+static inline uint32_t CNA_FC_CON1_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_FC_CON1_RESERVED_0__SHIFT) & CNA_FC_CON1_RESERVED_0__MASK;
+}
+#define CNA_FC_CON1_DATA_OFFSET__MASK 0x0001ffff
+#define CNA_FC_CON1_DATA_OFFSET__SHIFT 0
+static inline uint32_t CNA_FC_CON1_DATA_OFFSET(uint32_t val)
+{
+ return ((val) << CNA_FC_CON1_DATA_OFFSET__SHIFT) & CNA_FC_CON1_DATA_OFFSET__MASK;
+}
+
+#define REG_CNA_PAD_CON0 0x00001068
+#define CNA_PAD_CON0_RESERVED_0__MASK 0xffffff00
+#define CNA_PAD_CON0_RESERVED_0__SHIFT 8
+static inline uint32_t CNA_PAD_CON0_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_PAD_CON0_RESERVED_0__SHIFT) & CNA_PAD_CON0_RESERVED_0__MASK;
+}
+#define CNA_PAD_CON0_PAD_LEFT__MASK 0x000000f0
+#define CNA_PAD_CON0_PAD_LEFT__SHIFT 4
+static inline uint32_t CNA_PAD_CON0_PAD_LEFT(uint32_t val)
+{
+ return ((val) << CNA_PAD_CON0_PAD_LEFT__SHIFT) & CNA_PAD_CON0_PAD_LEFT__MASK;
+}
+#define CNA_PAD_CON0_PAD_TOP__MASK 0x0000000f
+#define CNA_PAD_CON0_PAD_TOP__SHIFT 0
+static inline uint32_t CNA_PAD_CON0_PAD_TOP(uint32_t val)
+{
+ return ((val) << CNA_PAD_CON0_PAD_TOP__SHIFT) & CNA_PAD_CON0_PAD_TOP__MASK;
+}
+
+#define REG_CNA_FEATURE_DATA_ADDR 0x00001070
+#define CNA_FEATURE_DATA_ADDR_FEATURE_BASE_ADDR__MASK 0xffffffff
+#define CNA_FEATURE_DATA_ADDR_FEATURE_BASE_ADDR__SHIFT 0
+static inline uint32_t CNA_FEATURE_DATA_ADDR_FEATURE_BASE_ADDR(uint32_t val)
+{
+ return ((val) << CNA_FEATURE_DATA_ADDR_FEATURE_BASE_ADDR__SHIFT) & CNA_FEATURE_DATA_ADDR_FEATURE_BASE_ADDR__MASK;
+}
+
+#define REG_CNA_FC_CON2 0x00001074
+#define CNA_FC_CON2_RESERVED_0__MASK 0xfffe0000
+#define CNA_FC_CON2_RESERVED_0__SHIFT 17
+static inline uint32_t CNA_FC_CON2_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_FC_CON2_RESERVED_0__SHIFT) & CNA_FC_CON2_RESERVED_0__MASK;
+}
+#define CNA_FC_CON2_WEIGHT_OFFSET__MASK 0x0001ffff
+#define CNA_FC_CON2_WEIGHT_OFFSET__SHIFT 0
+static inline uint32_t CNA_FC_CON2_WEIGHT_OFFSET(uint32_t val)
+{
+ return ((val) << CNA_FC_CON2_WEIGHT_OFFSET__SHIFT) & CNA_FC_CON2_WEIGHT_OFFSET__MASK;
+}
+
+#define REG_CNA_DMA_CON0 0x00001078
+#define CNA_DMA_CON0_OV4K_BYPASS__MASK 0x80000000
+#define CNA_DMA_CON0_OV4K_BYPASS__SHIFT 31
+static inline uint32_t CNA_DMA_CON0_OV4K_BYPASS(uint32_t val)
+{
+ return ((val) << CNA_DMA_CON0_OV4K_BYPASS__SHIFT) & CNA_DMA_CON0_OV4K_BYPASS__MASK;
+}
+#define CNA_DMA_CON0_RESERVED_0__MASK 0x7ff00000
+#define CNA_DMA_CON0_RESERVED_0__SHIFT 20
+static inline uint32_t CNA_DMA_CON0_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_DMA_CON0_RESERVED_0__SHIFT) & CNA_DMA_CON0_RESERVED_0__MASK;
+}
+#define CNA_DMA_CON0_WEIGHT_BURST_LEN__MASK 0x000f0000
+#define CNA_DMA_CON0_WEIGHT_BURST_LEN__SHIFT 16
+static inline uint32_t CNA_DMA_CON0_WEIGHT_BURST_LEN(uint32_t val)
+{
+ return ((val) << CNA_DMA_CON0_WEIGHT_BURST_LEN__SHIFT) & CNA_DMA_CON0_WEIGHT_BURST_LEN__MASK;
+}
+#define CNA_DMA_CON0_RESERVED_1__MASK 0x0000fff0
+#define CNA_DMA_CON0_RESERVED_1__SHIFT 4
+static inline uint32_t CNA_DMA_CON0_RESERVED_1(uint32_t val)
+{
+ return ((val) << CNA_DMA_CON0_RESERVED_1__SHIFT) & CNA_DMA_CON0_RESERVED_1__MASK;
+}
+#define CNA_DMA_CON0_DATA_BURST_LEN__MASK 0x0000000f
+#define CNA_DMA_CON0_DATA_BURST_LEN__SHIFT 0
+static inline uint32_t CNA_DMA_CON0_DATA_BURST_LEN(uint32_t val)
+{
+ return ((val) << CNA_DMA_CON0_DATA_BURST_LEN__SHIFT) & CNA_DMA_CON0_DATA_BURST_LEN__MASK;
+}
+
+#define REG_CNA_DMA_CON1 0x0000107c
+#define CNA_DMA_CON1_RESERVED_0__MASK 0xf0000000
+#define CNA_DMA_CON1_RESERVED_0__SHIFT 28
+static inline uint32_t CNA_DMA_CON1_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_DMA_CON1_RESERVED_0__SHIFT) & CNA_DMA_CON1_RESERVED_0__MASK;
+}
+#define CNA_DMA_CON1_LINE_STRIDE__MASK 0x0fffffff
+#define CNA_DMA_CON1_LINE_STRIDE__SHIFT 0
+static inline uint32_t CNA_DMA_CON1_LINE_STRIDE(uint32_t val)
+{
+ return ((val) << CNA_DMA_CON1_LINE_STRIDE__SHIFT) & CNA_DMA_CON1_LINE_STRIDE__MASK;
+}
+
+#define REG_CNA_DMA_CON2 0x00001080
+#define CNA_DMA_CON2_RESERVED_0__MASK 0xf0000000
+#define CNA_DMA_CON2_RESERVED_0__SHIFT 28
+static inline uint32_t CNA_DMA_CON2_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_DMA_CON2_RESERVED_0__SHIFT) & CNA_DMA_CON2_RESERVED_0__MASK;
+}
+#define CNA_DMA_CON2_SURF_STRIDE__MASK 0x0fffffff
+#define CNA_DMA_CON2_SURF_STRIDE__SHIFT 0
+static inline uint32_t CNA_DMA_CON2_SURF_STRIDE(uint32_t val)
+{
+ return ((val) << CNA_DMA_CON2_SURF_STRIDE__SHIFT) & CNA_DMA_CON2_SURF_STRIDE__MASK;
+}
+
+#define REG_CNA_FC_DATA_SIZE0 0x00001084
+#define CNA_FC_DATA_SIZE0_RESERVED_0__MASK 0xc0000000
+#define CNA_FC_DATA_SIZE0_RESERVED_0__SHIFT 30
+static inline uint32_t CNA_FC_DATA_SIZE0_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_FC_DATA_SIZE0_RESERVED_0__SHIFT) & CNA_FC_DATA_SIZE0_RESERVED_0__MASK;
+}
+#define CNA_FC_DATA_SIZE0_DMA_WIDTH__MASK 0x3fff0000
+#define CNA_FC_DATA_SIZE0_DMA_WIDTH__SHIFT 16
+static inline uint32_t CNA_FC_DATA_SIZE0_DMA_WIDTH(uint32_t val)
+{
+ return ((val) << CNA_FC_DATA_SIZE0_DMA_WIDTH__SHIFT) & CNA_FC_DATA_SIZE0_DMA_WIDTH__MASK;
+}
+#define CNA_FC_DATA_SIZE0_RESERVED_1__MASK 0x0000f800
+#define CNA_FC_DATA_SIZE0_RESERVED_1__SHIFT 11
+static inline uint32_t CNA_FC_DATA_SIZE0_RESERVED_1(uint32_t val)
+{
+ return ((val) << CNA_FC_DATA_SIZE0_RESERVED_1__SHIFT) & CNA_FC_DATA_SIZE0_RESERVED_1__MASK;
+}
+#define CNA_FC_DATA_SIZE0_DMA_HEIGHT__MASK 0x000007ff
+#define CNA_FC_DATA_SIZE0_DMA_HEIGHT__SHIFT 0
+static inline uint32_t CNA_FC_DATA_SIZE0_DMA_HEIGHT(uint32_t val)
+{
+ return ((val) << CNA_FC_DATA_SIZE0_DMA_HEIGHT__SHIFT) & CNA_FC_DATA_SIZE0_DMA_HEIGHT__MASK;
+}
+
+#define REG_CNA_FC_DATA_SIZE1 0x00001088
+#define CNA_FC_DATA_SIZE1_RESERVED_0__MASK 0xffff0000
+#define CNA_FC_DATA_SIZE1_RESERVED_0__SHIFT 16
+static inline uint32_t CNA_FC_DATA_SIZE1_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_FC_DATA_SIZE1_RESERVED_0__SHIFT) & CNA_FC_DATA_SIZE1_RESERVED_0__MASK;
+}
+#define CNA_FC_DATA_SIZE1_DMA_CHANNEL__MASK 0x0000ffff
+#define CNA_FC_DATA_SIZE1_DMA_CHANNEL__SHIFT 0
+static inline uint32_t CNA_FC_DATA_SIZE1_DMA_CHANNEL(uint32_t val)
+{
+ return ((val) << CNA_FC_DATA_SIZE1_DMA_CHANNEL__SHIFT) & CNA_FC_DATA_SIZE1_DMA_CHANNEL__MASK;
+}
+
+#define REG_CNA_CLK_GATE 0x00001090
+#define CNA_CLK_GATE_RESERVED_0__MASK 0xffffffe0
+#define CNA_CLK_GATE_RESERVED_0__SHIFT 5
+static inline uint32_t CNA_CLK_GATE_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_CLK_GATE_RESERVED_0__SHIFT) & CNA_CLK_GATE_RESERVED_0__MASK;
+}
+#define CNA_CLK_GATE_CBUF_CS_DISABLE_CLKGATE__MASK 0x00000010
+#define CNA_CLK_GATE_CBUF_CS_DISABLE_CLKGATE__SHIFT 4
+static inline uint32_t CNA_CLK_GATE_CBUF_CS_DISABLE_CLKGATE(uint32_t val)
+{
+ return ((val) << CNA_CLK_GATE_CBUF_CS_DISABLE_CLKGATE__SHIFT) & CNA_CLK_GATE_CBUF_CS_DISABLE_CLKGATE__MASK;
+}
+#define CNA_CLK_GATE_RESERVED_1__MASK 0x00000008
+#define CNA_CLK_GATE_RESERVED_1__SHIFT 3
+static inline uint32_t CNA_CLK_GATE_RESERVED_1(uint32_t val)
+{
+ return ((val) << CNA_CLK_GATE_RESERVED_1__SHIFT) & CNA_CLK_GATE_RESERVED_1__MASK;
+}
+#define CNA_CLK_GATE_CSC_DISABLE_CLKGATE__MASK 0x00000004
+#define CNA_CLK_GATE_CSC_DISABLE_CLKGATE__SHIFT 2
+static inline uint32_t CNA_CLK_GATE_CSC_DISABLE_CLKGATE(uint32_t val)
+{
+ return ((val) << CNA_CLK_GATE_CSC_DISABLE_CLKGATE__SHIFT) & CNA_CLK_GATE_CSC_DISABLE_CLKGATE__MASK;
+}
+#define CNA_CLK_GATE_CNA_WEIGHT_DISABLE_CLKGATE__MASK 0x00000002
+#define CNA_CLK_GATE_CNA_WEIGHT_DISABLE_CLKGATE__SHIFT 1
+static inline uint32_t CNA_CLK_GATE_CNA_WEIGHT_DISABLE_CLKGATE(uint32_t val)
+{
+ return ((val) << CNA_CLK_GATE_CNA_WEIGHT_DISABLE_CLKGATE__SHIFT) & CNA_CLK_GATE_CNA_WEIGHT_DISABLE_CLKGATE__MASK;
+}
+#define CNA_CLK_GATE_CNA_FEATURE_DISABLE_CLKGATE__MASK 0x00000001
+#define CNA_CLK_GATE_CNA_FEATURE_DISABLE_CLKGATE__SHIFT 0
+static inline uint32_t CNA_CLK_GATE_CNA_FEATURE_DISABLE_CLKGATE(uint32_t val)
+{
+ return ((val) << CNA_CLK_GATE_CNA_FEATURE_DISABLE_CLKGATE__SHIFT) & CNA_CLK_GATE_CNA_FEATURE_DISABLE_CLKGATE__MASK;
+}
+
+#define REG_CNA_DCOMP_CTRL 0x00001100
+#define CNA_DCOMP_CTRL_RESERVED_0__MASK 0xfffffff0
+#define CNA_DCOMP_CTRL_RESERVED_0__SHIFT 4
+static inline uint32_t CNA_DCOMP_CTRL_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_CTRL_RESERVED_0__SHIFT) & CNA_DCOMP_CTRL_RESERVED_0__MASK;
+}
+#define CNA_DCOMP_CTRL_WT_DEC_BYPASS__MASK 0x00000008
+#define CNA_DCOMP_CTRL_WT_DEC_BYPASS__SHIFT 3
+static inline uint32_t CNA_DCOMP_CTRL_WT_DEC_BYPASS(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_CTRL_WT_DEC_BYPASS__SHIFT) & CNA_DCOMP_CTRL_WT_DEC_BYPASS__MASK;
+}
+#define CNA_DCOMP_CTRL_DECOMP_CONTROL__MASK 0x00000007
+#define CNA_DCOMP_CTRL_DECOMP_CONTROL__SHIFT 0
+static inline uint32_t CNA_DCOMP_CTRL_DECOMP_CONTROL(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_CTRL_DECOMP_CONTROL__SHIFT) & CNA_DCOMP_CTRL_DECOMP_CONTROL__MASK;
+}
+
+#define REG_CNA_DCOMP_REGNUM 0x00001104
+#define CNA_DCOMP_REGNUM_DCOMP_REGNUM__MASK 0xffffffff
+#define CNA_DCOMP_REGNUM_DCOMP_REGNUM__SHIFT 0
+static inline uint32_t CNA_DCOMP_REGNUM_DCOMP_REGNUM(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_REGNUM_DCOMP_REGNUM__SHIFT) & CNA_DCOMP_REGNUM_DCOMP_REGNUM__MASK;
+}
+
+#define REG_CNA_DCOMP_ADDR0 0x00001110
+#define CNA_DCOMP_ADDR0_DECOMPRESS_ADDR0__MASK 0xffffffff
+#define CNA_DCOMP_ADDR0_DECOMPRESS_ADDR0__SHIFT 0
+static inline uint32_t CNA_DCOMP_ADDR0_DECOMPRESS_ADDR0(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_ADDR0_DECOMPRESS_ADDR0__SHIFT) & CNA_DCOMP_ADDR0_DECOMPRESS_ADDR0__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT0 0x00001140
+#define CNA_DCOMP_AMOUNT0_DCOMP_AMOUNT0__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT0_DCOMP_AMOUNT0__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT0_DCOMP_AMOUNT0(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT0_DCOMP_AMOUNT0__SHIFT) & CNA_DCOMP_AMOUNT0_DCOMP_AMOUNT0__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT1 0x00001144
+#define CNA_DCOMP_AMOUNT1_DCOMP_AMOUNT1__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT1_DCOMP_AMOUNT1__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT1_DCOMP_AMOUNT1(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT1_DCOMP_AMOUNT1__SHIFT) & CNA_DCOMP_AMOUNT1_DCOMP_AMOUNT1__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT2 0x00001148
+#define CNA_DCOMP_AMOUNT2_DCOMP_AMOUNT2__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT2_DCOMP_AMOUNT2__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT2_DCOMP_AMOUNT2(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT2_DCOMP_AMOUNT2__SHIFT) & CNA_DCOMP_AMOUNT2_DCOMP_AMOUNT2__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT3 0x0000114c
+#define CNA_DCOMP_AMOUNT3_DCOMP_AMOUNT3__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT3_DCOMP_AMOUNT3__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT3_DCOMP_AMOUNT3(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT3_DCOMP_AMOUNT3__SHIFT) & CNA_DCOMP_AMOUNT3_DCOMP_AMOUNT3__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT4 0x00001150
+#define CNA_DCOMP_AMOUNT4_DCOMP_AMOUNT4__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT4_DCOMP_AMOUNT4__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT4_DCOMP_AMOUNT4(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT4_DCOMP_AMOUNT4__SHIFT) & CNA_DCOMP_AMOUNT4_DCOMP_AMOUNT4__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT5 0x00001154
+#define CNA_DCOMP_AMOUNT5_DCOMP_AMOUNT5__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT5_DCOMP_AMOUNT5__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT5_DCOMP_AMOUNT5(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT5_DCOMP_AMOUNT5__SHIFT) & CNA_DCOMP_AMOUNT5_DCOMP_AMOUNT5__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT6 0x00001158
+#define CNA_DCOMP_AMOUNT6_DCOMP_AMOUNT6__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT6_DCOMP_AMOUNT6__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT6_DCOMP_AMOUNT6(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT6_DCOMP_AMOUNT6__SHIFT) & CNA_DCOMP_AMOUNT6_DCOMP_AMOUNT6__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT7 0x0000115c
+#define CNA_DCOMP_AMOUNT7_DCOMP_AMOUNT7__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT7_DCOMP_AMOUNT7__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT7_DCOMP_AMOUNT7(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT7_DCOMP_AMOUNT7__SHIFT) & CNA_DCOMP_AMOUNT7_DCOMP_AMOUNT7__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT8 0x00001160
+#define CNA_DCOMP_AMOUNT8_DCOMP_AMOUNT8__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT8_DCOMP_AMOUNT8__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT8_DCOMP_AMOUNT8(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT8_DCOMP_AMOUNT8__SHIFT) & CNA_DCOMP_AMOUNT8_DCOMP_AMOUNT8__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT9 0x00001164
+#define CNA_DCOMP_AMOUNT9_DCOMP_AMOUNT9__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT9_DCOMP_AMOUNT9__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT9_DCOMP_AMOUNT9(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT9_DCOMP_AMOUNT9__SHIFT) & CNA_DCOMP_AMOUNT9_DCOMP_AMOUNT9__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT10 0x00001168
+#define CNA_DCOMP_AMOUNT10_DCOMP_AMOUNT10__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT10_DCOMP_AMOUNT10__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT10_DCOMP_AMOUNT10(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT10_DCOMP_AMOUNT10__SHIFT) & CNA_DCOMP_AMOUNT10_DCOMP_AMOUNT10__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT11 0x0000116c
+#define CNA_DCOMP_AMOUNT11_DCOMP_AMOUNT11__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT11_DCOMP_AMOUNT11__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT11_DCOMP_AMOUNT11(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT11_DCOMP_AMOUNT11__SHIFT) & CNA_DCOMP_AMOUNT11_DCOMP_AMOUNT11__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT12 0x00001170
+#define CNA_DCOMP_AMOUNT12_DCOMP_AMOUNT12__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT12_DCOMP_AMOUNT12__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT12_DCOMP_AMOUNT12(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT12_DCOMP_AMOUNT12__SHIFT) & CNA_DCOMP_AMOUNT12_DCOMP_AMOUNT12__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT13 0x00001174
+#define CNA_DCOMP_AMOUNT13_DCOMP_AMOUNT13__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT13_DCOMP_AMOUNT13__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT13_DCOMP_AMOUNT13(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT13_DCOMP_AMOUNT13__SHIFT) & CNA_DCOMP_AMOUNT13_DCOMP_AMOUNT13__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT14 0x00001178
+#define CNA_DCOMP_AMOUNT14_DCOMP_AMOUNT14__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT14_DCOMP_AMOUNT14__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT14_DCOMP_AMOUNT14(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT14_DCOMP_AMOUNT14__SHIFT) & CNA_DCOMP_AMOUNT14_DCOMP_AMOUNT14__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT15 0x0000117c
+#define CNA_DCOMP_AMOUNT15_DCOMP_AMOUNT15__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT15_DCOMP_AMOUNT15__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT15_DCOMP_AMOUNT15(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT15_DCOMP_AMOUNT15__SHIFT) & CNA_DCOMP_AMOUNT15_DCOMP_AMOUNT15__MASK;
+}
+
+#define REG_CNA_CVT_CON5 0x00001180
+#define CNA_CVT_CON5_PER_CHANNEL_CVT_EN__MASK 0xffffffff
+#define CNA_CVT_CON5_PER_CHANNEL_CVT_EN__SHIFT 0
+static inline uint32_t CNA_CVT_CON5_PER_CHANNEL_CVT_EN(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON5_PER_CHANNEL_CVT_EN__SHIFT) & CNA_CVT_CON5_PER_CHANNEL_CVT_EN__MASK;
+}
+
+#define REG_CNA_PAD_CON1 0x00001184
+#define CNA_PAD_CON1_PAD_VALUE__MASK 0xffffffff
+#define CNA_PAD_CON1_PAD_VALUE__SHIFT 0
+static inline uint32_t CNA_PAD_CON1_PAD_VALUE(uint32_t val)
+{
+ return ((val) << CNA_PAD_CON1_PAD_VALUE__SHIFT) & CNA_PAD_CON1_PAD_VALUE__MASK;
+}
+
+#define REG_CORE_S_STATUS 0x00003000
+#define CORE_S_STATUS_RESERVED_0__MASK 0xfffc0000
+#define CORE_S_STATUS_RESERVED_0__SHIFT 18
+static inline uint32_t CORE_S_STATUS_RESERVED_0(uint32_t val)
+{
+ return ((val) << CORE_S_STATUS_RESERVED_0__SHIFT) & CORE_S_STATUS_RESERVED_0__MASK;
+}
+#define CORE_S_STATUS_STATUS_1__MASK 0x00030000
+#define CORE_S_STATUS_STATUS_1__SHIFT 16
+static inline uint32_t CORE_S_STATUS_STATUS_1(uint32_t val)
+{
+ return ((val) << CORE_S_STATUS_STATUS_1__SHIFT) & CORE_S_STATUS_STATUS_1__MASK;
+}
+#define CORE_S_STATUS_RESERVED_1__MASK 0x0000fffc
+#define CORE_S_STATUS_RESERVED_1__SHIFT 2
+static inline uint32_t CORE_S_STATUS_RESERVED_1(uint32_t val)
+{
+ return ((val) << CORE_S_STATUS_RESERVED_1__SHIFT) & CORE_S_STATUS_RESERVED_1__MASK;
+}
+#define CORE_S_STATUS_STATUS_0__MASK 0x00000003
+#define CORE_S_STATUS_STATUS_0__SHIFT 0
+static inline uint32_t CORE_S_STATUS_STATUS_0(uint32_t val)
+{
+ return ((val) << CORE_S_STATUS_STATUS_0__SHIFT) & CORE_S_STATUS_STATUS_0__MASK;
+}
+
+#define REG_CORE_S_POINTER 0x00003004
+#define CORE_S_POINTER_RESERVED_0__MASK 0xfffe0000
+#define CORE_S_POINTER_RESERVED_0__SHIFT 17
+static inline uint32_t CORE_S_POINTER_RESERVED_0(uint32_t val)
+{
+ return ((val) << CORE_S_POINTER_RESERVED_0__SHIFT) & CORE_S_POINTER_RESERVED_0__MASK;
+}
+#define CORE_S_POINTER_EXECUTER__MASK 0x00010000
+#define CORE_S_POINTER_EXECUTER__SHIFT 16
+static inline uint32_t CORE_S_POINTER_EXECUTER(uint32_t val)
+{
+ return ((val) << CORE_S_POINTER_EXECUTER__SHIFT) & CORE_S_POINTER_EXECUTER__MASK;
+}
+#define CORE_S_POINTER_RESERVED_1__MASK 0x0000ffc0
+#define CORE_S_POINTER_RESERVED_1__SHIFT 6
+static inline uint32_t CORE_S_POINTER_RESERVED_1(uint32_t val)
+{
+ return ((val) << CORE_S_POINTER_RESERVED_1__SHIFT) & CORE_S_POINTER_RESERVED_1__MASK;
+}
+#define CORE_S_POINTER_EXECUTER_PP_CLEAR__MASK 0x00000020
+#define CORE_S_POINTER_EXECUTER_PP_CLEAR__SHIFT 5
+static inline uint32_t CORE_S_POINTER_EXECUTER_PP_CLEAR(uint32_t val)
+{
+ return ((val) << CORE_S_POINTER_EXECUTER_PP_CLEAR__SHIFT) & CORE_S_POINTER_EXECUTER_PP_CLEAR__MASK;
+}
+#define CORE_S_POINTER_POINTER_PP_CLEAR__MASK 0x00000010
+#define CORE_S_POINTER_POINTER_PP_CLEAR__SHIFT 4
+static inline uint32_t CORE_S_POINTER_POINTER_PP_CLEAR(uint32_t val)
+{
+ return ((val) << CORE_S_POINTER_POINTER_PP_CLEAR__SHIFT) & CORE_S_POINTER_POINTER_PP_CLEAR__MASK;
+}
+#define CORE_S_POINTER_POINTER_PP_MODE__MASK 0x00000008
+#define CORE_S_POINTER_POINTER_PP_MODE__SHIFT 3
+static inline uint32_t CORE_S_POINTER_POINTER_PP_MODE(uint32_t val)
+{
+ return ((val) << CORE_S_POINTER_POINTER_PP_MODE__SHIFT) & CORE_S_POINTER_POINTER_PP_MODE__MASK;
+}
+#define CORE_S_POINTER_EXECUTER_PP_EN__MASK 0x00000004
+#define CORE_S_POINTER_EXECUTER_PP_EN__SHIFT 2
+static inline uint32_t CORE_S_POINTER_EXECUTER_PP_EN(uint32_t val)
+{
+ return ((val) << CORE_S_POINTER_EXECUTER_PP_EN__SHIFT) & CORE_S_POINTER_EXECUTER_PP_EN__MASK;
+}
+#define CORE_S_POINTER_POINTER_PP_EN__MASK 0x00000002
+#define CORE_S_POINTER_POINTER_PP_EN__SHIFT 1
+static inline uint32_t CORE_S_POINTER_POINTER_PP_EN(uint32_t val)
+{
+ return ((val) << CORE_S_POINTER_POINTER_PP_EN__SHIFT) & CORE_S_POINTER_POINTER_PP_EN__MASK;
+}
+#define CORE_S_POINTER_POINTER__MASK 0x00000001
+#define CORE_S_POINTER_POINTER__SHIFT 0
+static inline uint32_t CORE_S_POINTER_POINTER(uint32_t val)
+{
+ return ((val) << CORE_S_POINTER_POINTER__SHIFT) & CORE_S_POINTER_POINTER__MASK;
+}
+
+#define REG_CORE_OPERATION_ENABLE 0x00003008
+#define CORE_OPERATION_ENABLE_RESERVED_0__MASK 0xfffffffe
+#define CORE_OPERATION_ENABLE_RESERVED_0__SHIFT 1
+static inline uint32_t CORE_OPERATION_ENABLE_RESERVED_0(uint32_t val)
+{
+ return ((val) << CORE_OPERATION_ENABLE_RESERVED_0__SHIFT) & CORE_OPERATION_ENABLE_RESERVED_0__MASK;
+}
+#define CORE_OPERATION_ENABLE_OP_EN__MASK 0x00000001
+#define CORE_OPERATION_ENABLE_OP_EN__SHIFT 0
+static inline uint32_t CORE_OPERATION_ENABLE_OP_EN(uint32_t val)
+{
+ return ((val) << CORE_OPERATION_ENABLE_OP_EN__SHIFT) & CORE_OPERATION_ENABLE_OP_EN__MASK;
+}
+
+#define REG_CORE_MAC_GATING 0x0000300c
+#define CORE_MAC_GATING_RESERVED_0__MASK 0xf8000000
+#define CORE_MAC_GATING_RESERVED_0__SHIFT 27
+static inline uint32_t CORE_MAC_GATING_RESERVED_0(uint32_t val)
+{
+ return ((val) << CORE_MAC_GATING_RESERVED_0__SHIFT) & CORE_MAC_GATING_RESERVED_0__MASK;
+}
+#define CORE_MAC_GATING_SLCG_OP_EN__MASK 0x07ffffff
+#define CORE_MAC_GATING_SLCG_OP_EN__SHIFT 0
+static inline uint32_t CORE_MAC_GATING_SLCG_OP_EN(uint32_t val)
+{
+ return ((val) << CORE_MAC_GATING_SLCG_OP_EN__SHIFT) & CORE_MAC_GATING_SLCG_OP_EN__MASK;
+}
+
+#define REG_CORE_MISC_CFG 0x00003010
+#define CORE_MISC_CFG_RESERVED_0__MASK 0xfff00000
+#define CORE_MISC_CFG_RESERVED_0__SHIFT 20
+static inline uint32_t CORE_MISC_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << CORE_MISC_CFG_RESERVED_0__SHIFT) & CORE_MISC_CFG_RESERVED_0__MASK;
+}
+#define CORE_MISC_CFG_SOFT_GATING__MASK 0x000fc000
+#define CORE_MISC_CFG_SOFT_GATING__SHIFT 14
+static inline uint32_t CORE_MISC_CFG_SOFT_GATING(uint32_t val)
+{
+ return ((val) << CORE_MISC_CFG_SOFT_GATING__SHIFT) & CORE_MISC_CFG_SOFT_GATING__MASK;
+}
+#define CORE_MISC_CFG_RESERVED_1__MASK 0x00003800
+#define CORE_MISC_CFG_RESERVED_1__SHIFT 11
+static inline uint32_t CORE_MISC_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << CORE_MISC_CFG_RESERVED_1__SHIFT) & CORE_MISC_CFG_RESERVED_1__MASK;
+}
+#define CORE_MISC_CFG_PROC_PRECISION__MASK 0x00000700
+#define CORE_MISC_CFG_PROC_PRECISION__SHIFT 8
+static inline uint32_t CORE_MISC_CFG_PROC_PRECISION(uint32_t val)
+{
+ return ((val) << CORE_MISC_CFG_PROC_PRECISION__SHIFT) & CORE_MISC_CFG_PROC_PRECISION__MASK;
+}
+#define CORE_MISC_CFG_RESERVED_2__MASK 0x000000fc
+#define CORE_MISC_CFG_RESERVED_2__SHIFT 2
+static inline uint32_t CORE_MISC_CFG_RESERVED_2(uint32_t val)
+{
+ return ((val) << CORE_MISC_CFG_RESERVED_2__SHIFT) & CORE_MISC_CFG_RESERVED_2__MASK;
+}
+#define CORE_MISC_CFG_DW_EN__MASK 0x00000002
+#define CORE_MISC_CFG_DW_EN__SHIFT 1
+static inline uint32_t CORE_MISC_CFG_DW_EN(uint32_t val)
+{
+ return ((val) << CORE_MISC_CFG_DW_EN__SHIFT) & CORE_MISC_CFG_DW_EN__MASK;
+}
+#define CORE_MISC_CFG_QD_EN__MASK 0x00000001
+#define CORE_MISC_CFG_QD_EN__SHIFT 0
+static inline uint32_t CORE_MISC_CFG_QD_EN(uint32_t val)
+{
+ return ((val) << CORE_MISC_CFG_QD_EN__SHIFT) & CORE_MISC_CFG_QD_EN__MASK;
+}
+
+#define REG_CORE_DATAOUT_SIZE_0 0x00003014
+#define CORE_DATAOUT_SIZE_0_DATAOUT_HEIGHT__MASK 0xffff0000
+#define CORE_DATAOUT_SIZE_0_DATAOUT_HEIGHT__SHIFT 16
+static inline uint32_t CORE_DATAOUT_SIZE_0_DATAOUT_HEIGHT(uint32_t val)
+{
+ return ((val) << CORE_DATAOUT_SIZE_0_DATAOUT_HEIGHT__SHIFT) & CORE_DATAOUT_SIZE_0_DATAOUT_HEIGHT__MASK;
+}
+#define CORE_DATAOUT_SIZE_0_DATAOUT_WIDTH__MASK 0x0000ffff
+#define CORE_DATAOUT_SIZE_0_DATAOUT_WIDTH__SHIFT 0
+static inline uint32_t CORE_DATAOUT_SIZE_0_DATAOUT_WIDTH(uint32_t val)
+{
+ return ((val) << CORE_DATAOUT_SIZE_0_DATAOUT_WIDTH__SHIFT) & CORE_DATAOUT_SIZE_0_DATAOUT_WIDTH__MASK;
+}
+
+#define REG_CORE_DATAOUT_SIZE_1 0x00003018
+#define CORE_DATAOUT_SIZE_1_RESERVED_0__MASK 0xffff0000
+#define CORE_DATAOUT_SIZE_1_RESERVED_0__SHIFT 16
+static inline uint32_t CORE_DATAOUT_SIZE_1_RESERVED_0(uint32_t val)
+{
+ return ((val) << CORE_DATAOUT_SIZE_1_RESERVED_0__SHIFT) & CORE_DATAOUT_SIZE_1_RESERVED_0__MASK;
+}
+#define CORE_DATAOUT_SIZE_1_DATAOUT_CHANNEL__MASK 0x0000ffff
+#define CORE_DATAOUT_SIZE_1_DATAOUT_CHANNEL__SHIFT 0
+static inline uint32_t CORE_DATAOUT_SIZE_1_DATAOUT_CHANNEL(uint32_t val)
+{
+ return ((val) << CORE_DATAOUT_SIZE_1_DATAOUT_CHANNEL__SHIFT) & CORE_DATAOUT_SIZE_1_DATAOUT_CHANNEL__MASK;
+}
+
+#define REG_CORE_CLIP_TRUNCATE 0x0000301c
+#define CORE_CLIP_TRUNCATE_RESERVED_0__MASK 0xffffff80
+#define CORE_CLIP_TRUNCATE_RESERVED_0__SHIFT 7
+static inline uint32_t CORE_CLIP_TRUNCATE_RESERVED_0(uint32_t val)
+{
+ return ((val) << CORE_CLIP_TRUNCATE_RESERVED_0__SHIFT) & CORE_CLIP_TRUNCATE_RESERVED_0__MASK;
+}
+#define CORE_CLIP_TRUNCATE_ROUND_TYPE__MASK 0x00000040
+#define CORE_CLIP_TRUNCATE_ROUND_TYPE__SHIFT 6
+static inline uint32_t CORE_CLIP_TRUNCATE_ROUND_TYPE(uint32_t val)
+{
+ return ((val) << CORE_CLIP_TRUNCATE_ROUND_TYPE__SHIFT) & CORE_CLIP_TRUNCATE_ROUND_TYPE__MASK;
+}
+#define CORE_CLIP_TRUNCATE_RESERVED_1__MASK 0x00000020
+#define CORE_CLIP_TRUNCATE_RESERVED_1__SHIFT 5
+static inline uint32_t CORE_CLIP_TRUNCATE_RESERVED_1(uint32_t val)
+{
+ return ((val) << CORE_CLIP_TRUNCATE_RESERVED_1__SHIFT) & CORE_CLIP_TRUNCATE_RESERVED_1__MASK;
+}
+#define CORE_CLIP_TRUNCATE_CLIP_TRUNCATE__MASK 0x0000001f
+#define CORE_CLIP_TRUNCATE_CLIP_TRUNCATE__SHIFT 0
+static inline uint32_t CORE_CLIP_TRUNCATE_CLIP_TRUNCATE(uint32_t val)
+{
+ return ((val) << CORE_CLIP_TRUNCATE_CLIP_TRUNCATE__SHIFT) & CORE_CLIP_TRUNCATE_CLIP_TRUNCATE__MASK;
+}
+
+#define REG_DPU_S_STATUS 0x00004000
+#define DPU_S_STATUS_RESERVED_0__MASK 0xfffc0000
+#define DPU_S_STATUS_RESERVED_0__SHIFT 18
+static inline uint32_t DPU_S_STATUS_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_S_STATUS_RESERVED_0__SHIFT) & DPU_S_STATUS_RESERVED_0__MASK;
+}
+#define DPU_S_STATUS_STATUS_1__MASK 0x00030000
+#define DPU_S_STATUS_STATUS_1__SHIFT 16
+static inline uint32_t DPU_S_STATUS_STATUS_1(uint32_t val)
+{
+ return ((val) << DPU_S_STATUS_STATUS_1__SHIFT) & DPU_S_STATUS_STATUS_1__MASK;
+}
+#define DPU_S_STATUS_RESERVED_1__MASK 0x0000fffc
+#define DPU_S_STATUS_RESERVED_1__SHIFT 2
+static inline uint32_t DPU_S_STATUS_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_S_STATUS_RESERVED_1__SHIFT) & DPU_S_STATUS_RESERVED_1__MASK;
+}
+#define DPU_S_STATUS_STATUS_0__MASK 0x00000003
+#define DPU_S_STATUS_STATUS_0__SHIFT 0
+static inline uint32_t DPU_S_STATUS_STATUS_0(uint32_t val)
+{
+ return ((val) << DPU_S_STATUS_STATUS_0__SHIFT) & DPU_S_STATUS_STATUS_0__MASK;
+}
+
+#define REG_DPU_S_POINTER 0x00004004
+#define DPU_S_POINTER_RESERVED_0__MASK 0xfffe0000
+#define DPU_S_POINTER_RESERVED_0__SHIFT 17
+static inline uint32_t DPU_S_POINTER_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_S_POINTER_RESERVED_0__SHIFT) & DPU_S_POINTER_RESERVED_0__MASK;
+}
+#define DPU_S_POINTER_EXECUTER__MASK 0x00010000
+#define DPU_S_POINTER_EXECUTER__SHIFT 16
+static inline uint32_t DPU_S_POINTER_EXECUTER(uint32_t val)
+{
+ return ((val) << DPU_S_POINTER_EXECUTER__SHIFT) & DPU_S_POINTER_EXECUTER__MASK;
+}
+#define DPU_S_POINTER_RESERVED_1__MASK 0x0000ffc0
+#define DPU_S_POINTER_RESERVED_1__SHIFT 6
+static inline uint32_t DPU_S_POINTER_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_S_POINTER_RESERVED_1__SHIFT) & DPU_S_POINTER_RESERVED_1__MASK;
+}
+#define DPU_S_POINTER_EXECUTER_PP_CLEAR__MASK 0x00000020
+#define DPU_S_POINTER_EXECUTER_PP_CLEAR__SHIFT 5
+static inline uint32_t DPU_S_POINTER_EXECUTER_PP_CLEAR(uint32_t val)
+{
+ return ((val) << DPU_S_POINTER_EXECUTER_PP_CLEAR__SHIFT) & DPU_S_POINTER_EXECUTER_PP_CLEAR__MASK;
+}
+#define DPU_S_POINTER_POINTER_PP_CLEAR__MASK 0x00000010
+#define DPU_S_POINTER_POINTER_PP_CLEAR__SHIFT 4
+static inline uint32_t DPU_S_POINTER_POINTER_PP_CLEAR(uint32_t val)
+{
+ return ((val) << DPU_S_POINTER_POINTER_PP_CLEAR__SHIFT) & DPU_S_POINTER_POINTER_PP_CLEAR__MASK;
+}
+#define DPU_S_POINTER_POINTER_PP_MODE__MASK 0x00000008
+#define DPU_S_POINTER_POINTER_PP_MODE__SHIFT 3
+static inline uint32_t DPU_S_POINTER_POINTER_PP_MODE(uint32_t val)
+{
+ return ((val) << DPU_S_POINTER_POINTER_PP_MODE__SHIFT) & DPU_S_POINTER_POINTER_PP_MODE__MASK;
+}
+#define DPU_S_POINTER_EXECUTER_PP_EN__MASK 0x00000004
+#define DPU_S_POINTER_EXECUTER_PP_EN__SHIFT 2
+static inline uint32_t DPU_S_POINTER_EXECUTER_PP_EN(uint32_t val)
+{
+ return ((val) << DPU_S_POINTER_EXECUTER_PP_EN__SHIFT) & DPU_S_POINTER_EXECUTER_PP_EN__MASK;
+}
+#define DPU_S_POINTER_POINTER_PP_EN__MASK 0x00000002
+#define DPU_S_POINTER_POINTER_PP_EN__SHIFT 1
+static inline uint32_t DPU_S_POINTER_POINTER_PP_EN(uint32_t val)
+{
+ return ((val) << DPU_S_POINTER_POINTER_PP_EN__SHIFT) & DPU_S_POINTER_POINTER_PP_EN__MASK;
+}
+#define DPU_S_POINTER_POINTER__MASK 0x00000001
+#define DPU_S_POINTER_POINTER__SHIFT 0
+static inline uint32_t DPU_S_POINTER_POINTER(uint32_t val)
+{
+ return ((val) << DPU_S_POINTER_POINTER__SHIFT) & DPU_S_POINTER_POINTER__MASK;
+}
+
+#define REG_DPU_OPERATION_ENABLE 0x00004008
+#define DPU_OPERATION_ENABLE_RESERVED_0__MASK 0xfffffffe
+#define DPU_OPERATION_ENABLE_RESERVED_0__SHIFT 1
+static inline uint32_t DPU_OPERATION_ENABLE_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_OPERATION_ENABLE_RESERVED_0__SHIFT) & DPU_OPERATION_ENABLE_RESERVED_0__MASK;
+}
+#define DPU_OPERATION_ENABLE_OP_EN__MASK 0x00000001
+#define DPU_OPERATION_ENABLE_OP_EN__SHIFT 0
+static inline uint32_t DPU_OPERATION_ENABLE_OP_EN(uint32_t val)
+{
+ return ((val) << DPU_OPERATION_ENABLE_OP_EN__SHIFT) & DPU_OPERATION_ENABLE_OP_EN__MASK;
+}
+
+#define REG_DPU_FEATURE_MODE_CFG 0x0000400c
+#define DPU_FEATURE_MODE_CFG_COMB_USE__MASK 0x80000000
+#define DPU_FEATURE_MODE_CFG_COMB_USE__SHIFT 31
+static inline uint32_t DPU_FEATURE_MODE_CFG_COMB_USE(uint32_t val)
+{
+ return ((val) << DPU_FEATURE_MODE_CFG_COMB_USE__SHIFT) & DPU_FEATURE_MODE_CFG_COMB_USE__MASK;
+}
+#define DPU_FEATURE_MODE_CFG_TP_EN__MASK 0x40000000
+#define DPU_FEATURE_MODE_CFG_TP_EN__SHIFT 30
+static inline uint32_t DPU_FEATURE_MODE_CFG_TP_EN(uint32_t val)
+{
+ return ((val) << DPU_FEATURE_MODE_CFG_TP_EN__SHIFT) & DPU_FEATURE_MODE_CFG_TP_EN__MASK;
+}
+#define DPU_FEATURE_MODE_CFG_RGP_TYPE__MASK 0x3c000000
+#define DPU_FEATURE_MODE_CFG_RGP_TYPE__SHIFT 26
+static inline uint32_t DPU_FEATURE_MODE_CFG_RGP_TYPE(uint32_t val)
+{
+ return ((val) << DPU_FEATURE_MODE_CFG_RGP_TYPE__SHIFT) & DPU_FEATURE_MODE_CFG_RGP_TYPE__MASK;
+}
+#define DPU_FEATURE_MODE_CFG_NONALIGN__MASK 0x02000000
+#define DPU_FEATURE_MODE_CFG_NONALIGN__SHIFT 25
+static inline uint32_t DPU_FEATURE_MODE_CFG_NONALIGN(uint32_t val)
+{
+ return ((val) << DPU_FEATURE_MODE_CFG_NONALIGN__SHIFT) & DPU_FEATURE_MODE_CFG_NONALIGN__MASK;
+}
+#define DPU_FEATURE_MODE_CFG_SURF_LEN__MASK 0x01fffe00
+#define DPU_FEATURE_MODE_CFG_SURF_LEN__SHIFT 9
+static inline uint32_t DPU_FEATURE_MODE_CFG_SURF_LEN(uint32_t val)
+{
+ return ((val) << DPU_FEATURE_MODE_CFG_SURF_LEN__SHIFT) & DPU_FEATURE_MODE_CFG_SURF_LEN__MASK;
+}
+#define DPU_FEATURE_MODE_CFG_BURST_LEN__MASK 0x000001e0
+#define DPU_FEATURE_MODE_CFG_BURST_LEN__SHIFT 5
+static inline uint32_t DPU_FEATURE_MODE_CFG_BURST_LEN(uint32_t val)
+{
+ return ((val) << DPU_FEATURE_MODE_CFG_BURST_LEN__SHIFT) & DPU_FEATURE_MODE_CFG_BURST_LEN__MASK;
+}
+#define DPU_FEATURE_MODE_CFG_CONV_MODE__MASK 0x00000018
+#define DPU_FEATURE_MODE_CFG_CONV_MODE__SHIFT 3
+static inline uint32_t DPU_FEATURE_MODE_CFG_CONV_MODE(uint32_t val)
+{
+ return ((val) << DPU_FEATURE_MODE_CFG_CONV_MODE__SHIFT) & DPU_FEATURE_MODE_CFG_CONV_MODE__MASK;
+}
+#define DPU_FEATURE_MODE_CFG_OUTPUT_MODE__MASK 0x00000006
+#define DPU_FEATURE_MODE_CFG_OUTPUT_MODE__SHIFT 1
+static inline uint32_t DPU_FEATURE_MODE_CFG_OUTPUT_MODE(uint32_t val)
+{
+ return ((val) << DPU_FEATURE_MODE_CFG_OUTPUT_MODE__SHIFT) & DPU_FEATURE_MODE_CFG_OUTPUT_MODE__MASK;
+}
+#define DPU_FEATURE_MODE_CFG_FLYING_MODE__MASK 0x00000001
+#define DPU_FEATURE_MODE_CFG_FLYING_MODE__SHIFT 0
+static inline uint32_t DPU_FEATURE_MODE_CFG_FLYING_MODE(uint32_t val)
+{
+ return ((val) << DPU_FEATURE_MODE_CFG_FLYING_MODE__SHIFT) & DPU_FEATURE_MODE_CFG_FLYING_MODE__MASK;
+}
+
+#define REG_DPU_DATA_FORMAT 0x00004010
+#define DPU_DATA_FORMAT_OUT_PRECISION__MASK 0xe0000000
+#define DPU_DATA_FORMAT_OUT_PRECISION__SHIFT 29
+static inline uint32_t DPU_DATA_FORMAT_OUT_PRECISION(uint32_t val)
+{
+ return ((val) << DPU_DATA_FORMAT_OUT_PRECISION__SHIFT) & DPU_DATA_FORMAT_OUT_PRECISION__MASK;
+}
+#define DPU_DATA_FORMAT_IN_PRECISION__MASK 0x1c000000
+#define DPU_DATA_FORMAT_IN_PRECISION__SHIFT 26
+static inline uint32_t DPU_DATA_FORMAT_IN_PRECISION(uint32_t val)
+{
+ return ((val) << DPU_DATA_FORMAT_IN_PRECISION__SHIFT) & DPU_DATA_FORMAT_IN_PRECISION__MASK;
+}
+#define DPU_DATA_FORMAT_EW_TRUNCATE_NEG__MASK 0x03ff0000
+#define DPU_DATA_FORMAT_EW_TRUNCATE_NEG__SHIFT 16
+static inline uint32_t DPU_DATA_FORMAT_EW_TRUNCATE_NEG(uint32_t val)
+{
+ return ((val) << DPU_DATA_FORMAT_EW_TRUNCATE_NEG__SHIFT) & DPU_DATA_FORMAT_EW_TRUNCATE_NEG__MASK;
+}
+#define DPU_DATA_FORMAT_BN_MUL_SHIFT_VALUE_NEG__MASK 0x0000fc00
+#define DPU_DATA_FORMAT_BN_MUL_SHIFT_VALUE_NEG__SHIFT 10
+static inline uint32_t DPU_DATA_FORMAT_BN_MUL_SHIFT_VALUE_NEG(uint32_t val)
+{
+ return ((val) << DPU_DATA_FORMAT_BN_MUL_SHIFT_VALUE_NEG__SHIFT) & DPU_DATA_FORMAT_BN_MUL_SHIFT_VALUE_NEG__MASK;
+}
+#define DPU_DATA_FORMAT_BS_MUL_SHIFT_VALUE_NEG__MASK 0x000003f0
+#define DPU_DATA_FORMAT_BS_MUL_SHIFT_VALUE_NEG__SHIFT 4
+static inline uint32_t DPU_DATA_FORMAT_BS_MUL_SHIFT_VALUE_NEG(uint32_t val)
+{
+ return ((val) << DPU_DATA_FORMAT_BS_MUL_SHIFT_VALUE_NEG__SHIFT) & DPU_DATA_FORMAT_BS_MUL_SHIFT_VALUE_NEG__MASK;
+}
+#define DPU_DATA_FORMAT_MC_SURF_OUT__MASK 0x00000008
+#define DPU_DATA_FORMAT_MC_SURF_OUT__SHIFT 3
+static inline uint32_t DPU_DATA_FORMAT_MC_SURF_OUT(uint32_t val)
+{
+ return ((val) << DPU_DATA_FORMAT_MC_SURF_OUT__SHIFT) & DPU_DATA_FORMAT_MC_SURF_OUT__MASK;
+}
+#define DPU_DATA_FORMAT_PROC_PRECISION__MASK 0x00000007
+#define DPU_DATA_FORMAT_PROC_PRECISION__SHIFT 0
+static inline uint32_t DPU_DATA_FORMAT_PROC_PRECISION(uint32_t val)
+{
+ return ((val) << DPU_DATA_FORMAT_PROC_PRECISION__SHIFT) & DPU_DATA_FORMAT_PROC_PRECISION__MASK;
+}
+
+#define REG_DPU_OFFSET_PEND 0x00004014
+#define DPU_OFFSET_PEND_RESERVED_0__MASK 0xffff0000
+#define DPU_OFFSET_PEND_RESERVED_0__SHIFT 16
+static inline uint32_t DPU_OFFSET_PEND_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_OFFSET_PEND_RESERVED_0__SHIFT) & DPU_OFFSET_PEND_RESERVED_0__MASK;
+}
+#define DPU_OFFSET_PEND_OFFSET_PEND__MASK 0x0000ffff
+#define DPU_OFFSET_PEND_OFFSET_PEND__SHIFT 0
+static inline uint32_t DPU_OFFSET_PEND_OFFSET_PEND(uint32_t val)
+{
+ return ((val) << DPU_OFFSET_PEND_OFFSET_PEND__SHIFT) & DPU_OFFSET_PEND_OFFSET_PEND__MASK;
+}
+
+#define REG_DPU_DST_BASE_ADDR 0x00004020
+#define DPU_DST_BASE_ADDR_DST_BASE_ADDR__MASK 0xffffffff
+#define DPU_DST_BASE_ADDR_DST_BASE_ADDR__SHIFT 0
+static inline uint32_t DPU_DST_BASE_ADDR_DST_BASE_ADDR(uint32_t val)
+{
+ return ((val) << DPU_DST_BASE_ADDR_DST_BASE_ADDR__SHIFT) & DPU_DST_BASE_ADDR_DST_BASE_ADDR__MASK;
+}
+
+#define REG_DPU_DST_SURF_STRIDE 0x00004024
+#define DPU_DST_SURF_STRIDE_DST_SURF_STRIDE__MASK 0xfffffff0
+#define DPU_DST_SURF_STRIDE_DST_SURF_STRIDE__SHIFT 4
+static inline uint32_t DPU_DST_SURF_STRIDE_DST_SURF_STRIDE(uint32_t val)
+{
+ return ((val) << DPU_DST_SURF_STRIDE_DST_SURF_STRIDE__SHIFT) & DPU_DST_SURF_STRIDE_DST_SURF_STRIDE__MASK;
+}
+#define DPU_DST_SURF_STRIDE_RESERVED_0__MASK 0x0000000f
+#define DPU_DST_SURF_STRIDE_RESERVED_0__SHIFT 0
+static inline uint32_t DPU_DST_SURF_STRIDE_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_DST_SURF_STRIDE_RESERVED_0__SHIFT) & DPU_DST_SURF_STRIDE_RESERVED_0__MASK;
+}
+
+#define REG_DPU_DATA_CUBE_WIDTH 0x00004030
+#define DPU_DATA_CUBE_WIDTH_RESERVED_0__MASK 0xffffe000
+#define DPU_DATA_CUBE_WIDTH_RESERVED_0__SHIFT 13
+static inline uint32_t DPU_DATA_CUBE_WIDTH_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_WIDTH_RESERVED_0__SHIFT) & DPU_DATA_CUBE_WIDTH_RESERVED_0__MASK;
+}
+#define DPU_DATA_CUBE_WIDTH_WIDTH__MASK 0x00001fff
+#define DPU_DATA_CUBE_WIDTH_WIDTH__SHIFT 0
+static inline uint32_t DPU_DATA_CUBE_WIDTH_WIDTH(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_WIDTH_WIDTH__SHIFT) & DPU_DATA_CUBE_WIDTH_WIDTH__MASK;
+}
+
+#define REG_DPU_DATA_CUBE_HEIGHT 0x00004034
+#define DPU_DATA_CUBE_HEIGHT_RESERVED_0__MASK 0xfe000000
+#define DPU_DATA_CUBE_HEIGHT_RESERVED_0__SHIFT 25
+static inline uint32_t DPU_DATA_CUBE_HEIGHT_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_HEIGHT_RESERVED_0__SHIFT) & DPU_DATA_CUBE_HEIGHT_RESERVED_0__MASK;
+}
+#define DPU_DATA_CUBE_HEIGHT_MINMAX_CTL__MASK 0x01c00000
+#define DPU_DATA_CUBE_HEIGHT_MINMAX_CTL__SHIFT 22
+static inline uint32_t DPU_DATA_CUBE_HEIGHT_MINMAX_CTL(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_HEIGHT_MINMAX_CTL__SHIFT) & DPU_DATA_CUBE_HEIGHT_MINMAX_CTL__MASK;
+}
+#define DPU_DATA_CUBE_HEIGHT_RESERVED_1__MASK 0x003fe000
+#define DPU_DATA_CUBE_HEIGHT_RESERVED_1__SHIFT 13
+static inline uint32_t DPU_DATA_CUBE_HEIGHT_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_HEIGHT_RESERVED_1__SHIFT) & DPU_DATA_CUBE_HEIGHT_RESERVED_1__MASK;
+}
+#define DPU_DATA_CUBE_HEIGHT_HEIGHT__MASK 0x00001fff
+#define DPU_DATA_CUBE_HEIGHT_HEIGHT__SHIFT 0
+static inline uint32_t DPU_DATA_CUBE_HEIGHT_HEIGHT(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_HEIGHT_HEIGHT__SHIFT) & DPU_DATA_CUBE_HEIGHT_HEIGHT__MASK;
+}
+
+#define REG_DPU_DATA_CUBE_NOTCH_ADDR 0x00004038
+#define DPU_DATA_CUBE_NOTCH_ADDR_RESERVED_0__MASK 0xe0000000
+#define DPU_DATA_CUBE_NOTCH_ADDR_RESERVED_0__SHIFT 29
+static inline uint32_t DPU_DATA_CUBE_NOTCH_ADDR_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_NOTCH_ADDR_RESERVED_0__SHIFT) & DPU_DATA_CUBE_NOTCH_ADDR_RESERVED_0__MASK;
+}
+#define DPU_DATA_CUBE_NOTCH_ADDR_NOTCH_ADDR_1__MASK 0x1fff0000
+#define DPU_DATA_CUBE_NOTCH_ADDR_NOTCH_ADDR_1__SHIFT 16
+static inline uint32_t DPU_DATA_CUBE_NOTCH_ADDR_NOTCH_ADDR_1(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_NOTCH_ADDR_NOTCH_ADDR_1__SHIFT) & DPU_DATA_CUBE_NOTCH_ADDR_NOTCH_ADDR_1__MASK;
+}
+#define DPU_DATA_CUBE_NOTCH_ADDR_RESERVED_1__MASK 0x0000e000
+#define DPU_DATA_CUBE_NOTCH_ADDR_RESERVED_1__SHIFT 13
+static inline uint32_t DPU_DATA_CUBE_NOTCH_ADDR_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_NOTCH_ADDR_RESERVED_1__SHIFT) & DPU_DATA_CUBE_NOTCH_ADDR_RESERVED_1__MASK;
+}
+#define DPU_DATA_CUBE_NOTCH_ADDR_NOTCH_ADDR_0__MASK 0x00001fff
+#define DPU_DATA_CUBE_NOTCH_ADDR_NOTCH_ADDR_0__SHIFT 0
+static inline uint32_t DPU_DATA_CUBE_NOTCH_ADDR_NOTCH_ADDR_0(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_NOTCH_ADDR_NOTCH_ADDR_0__SHIFT) & DPU_DATA_CUBE_NOTCH_ADDR_NOTCH_ADDR_0__MASK;
+}
+
+#define REG_DPU_DATA_CUBE_CHANNEL 0x0000403c
+#define DPU_DATA_CUBE_CHANNEL_RESERVED_0__MASK 0xe0000000
+#define DPU_DATA_CUBE_CHANNEL_RESERVED_0__SHIFT 29
+static inline uint32_t DPU_DATA_CUBE_CHANNEL_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_CHANNEL_RESERVED_0__SHIFT) & DPU_DATA_CUBE_CHANNEL_RESERVED_0__MASK;
+}
+#define DPU_DATA_CUBE_CHANNEL_ORIG_CHANNEL__MASK 0x1fff0000
+#define DPU_DATA_CUBE_CHANNEL_ORIG_CHANNEL__SHIFT 16
+static inline uint32_t DPU_DATA_CUBE_CHANNEL_ORIG_CHANNEL(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_CHANNEL_ORIG_CHANNEL__SHIFT) & DPU_DATA_CUBE_CHANNEL_ORIG_CHANNEL__MASK;
+}
+#define DPU_DATA_CUBE_CHANNEL_RESERVED_1__MASK 0x0000e000
+#define DPU_DATA_CUBE_CHANNEL_RESERVED_1__SHIFT 13
+static inline uint32_t DPU_DATA_CUBE_CHANNEL_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_CHANNEL_RESERVED_1__SHIFT) & DPU_DATA_CUBE_CHANNEL_RESERVED_1__MASK;
+}
+#define DPU_DATA_CUBE_CHANNEL_CHANNEL__MASK 0x00001fff
+#define DPU_DATA_CUBE_CHANNEL_CHANNEL__SHIFT 0
+static inline uint32_t DPU_DATA_CUBE_CHANNEL_CHANNEL(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_CHANNEL_CHANNEL__SHIFT) & DPU_DATA_CUBE_CHANNEL_CHANNEL__MASK;
+}
+
+#define REG_DPU_BS_CFG 0x00004040
+#define DPU_BS_CFG_RESERVED_0__MASK 0xfff00000
+#define DPU_BS_CFG_RESERVED_0__SHIFT 20
+static inline uint32_t DPU_BS_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_BS_CFG_RESERVED_0__SHIFT) & DPU_BS_CFG_RESERVED_0__MASK;
+}
+#define DPU_BS_CFG_BS_ALU_ALGO__MASK 0x000f0000
+#define DPU_BS_CFG_BS_ALU_ALGO__SHIFT 16
+static inline uint32_t DPU_BS_CFG_BS_ALU_ALGO(uint32_t val)
+{
+ return ((val) << DPU_BS_CFG_BS_ALU_ALGO__SHIFT) & DPU_BS_CFG_BS_ALU_ALGO__MASK;
+}
+#define DPU_BS_CFG_RESERVED_1__MASK 0x0000fe00
+#define DPU_BS_CFG_RESERVED_1__SHIFT 9
+static inline uint32_t DPU_BS_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_BS_CFG_RESERVED_1__SHIFT) & DPU_BS_CFG_RESERVED_1__MASK;
+}
+#define DPU_BS_CFG_BS_ALU_SRC__MASK 0x00000100
+#define DPU_BS_CFG_BS_ALU_SRC__SHIFT 8
+static inline uint32_t DPU_BS_CFG_BS_ALU_SRC(uint32_t val)
+{
+ return ((val) << DPU_BS_CFG_BS_ALU_SRC__SHIFT) & DPU_BS_CFG_BS_ALU_SRC__MASK;
+}
+#define DPU_BS_CFG_BS_RELUX_EN__MASK 0x00000080
+#define DPU_BS_CFG_BS_RELUX_EN__SHIFT 7
+static inline uint32_t DPU_BS_CFG_BS_RELUX_EN(uint32_t val)
+{
+ return ((val) << DPU_BS_CFG_BS_RELUX_EN__SHIFT) & DPU_BS_CFG_BS_RELUX_EN__MASK;
+}
+#define DPU_BS_CFG_BS_RELU_BYPASS__MASK 0x00000040
+#define DPU_BS_CFG_BS_RELU_BYPASS__SHIFT 6
+static inline uint32_t DPU_BS_CFG_BS_RELU_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_BS_CFG_BS_RELU_BYPASS__SHIFT) & DPU_BS_CFG_BS_RELU_BYPASS__MASK;
+}
+#define DPU_BS_CFG_BS_MUL_PRELU__MASK 0x00000020
+#define DPU_BS_CFG_BS_MUL_PRELU__SHIFT 5
+static inline uint32_t DPU_BS_CFG_BS_MUL_PRELU(uint32_t val)
+{
+ return ((val) << DPU_BS_CFG_BS_MUL_PRELU__SHIFT) & DPU_BS_CFG_BS_MUL_PRELU__MASK;
+}
+#define DPU_BS_CFG_BS_MUL_BYPASS__MASK 0x00000010
+#define DPU_BS_CFG_BS_MUL_BYPASS__SHIFT 4
+static inline uint32_t DPU_BS_CFG_BS_MUL_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_BS_CFG_BS_MUL_BYPASS__SHIFT) & DPU_BS_CFG_BS_MUL_BYPASS__MASK;
+}
+#define DPU_BS_CFG_RESERVED_2__MASK 0x0000000c
+#define DPU_BS_CFG_RESERVED_2__SHIFT 2
+static inline uint32_t DPU_BS_CFG_RESERVED_2(uint32_t val)
+{
+ return ((val) << DPU_BS_CFG_RESERVED_2__SHIFT) & DPU_BS_CFG_RESERVED_2__MASK;
+}
+#define DPU_BS_CFG_BS_ALU_BYPASS__MASK 0x00000002
+#define DPU_BS_CFG_BS_ALU_BYPASS__SHIFT 1
+static inline uint32_t DPU_BS_CFG_BS_ALU_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_BS_CFG_BS_ALU_BYPASS__SHIFT) & DPU_BS_CFG_BS_ALU_BYPASS__MASK;
+}
+#define DPU_BS_CFG_BS_BYPASS__MASK 0x00000001
+#define DPU_BS_CFG_BS_BYPASS__SHIFT 0
+static inline uint32_t DPU_BS_CFG_BS_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_BS_CFG_BS_BYPASS__SHIFT) & DPU_BS_CFG_BS_BYPASS__MASK;
+}
+
+#define REG_DPU_BS_ALU_CFG 0x00004044
+#define DPU_BS_ALU_CFG_BS_ALU_OPERAND__MASK 0xffffffff
+#define DPU_BS_ALU_CFG_BS_ALU_OPERAND__SHIFT 0
+static inline uint32_t DPU_BS_ALU_CFG_BS_ALU_OPERAND(uint32_t val)
+{
+ return ((val) << DPU_BS_ALU_CFG_BS_ALU_OPERAND__SHIFT) & DPU_BS_ALU_CFG_BS_ALU_OPERAND__MASK;
+}
+
+#define REG_DPU_BS_MUL_CFG 0x00004048
+#define DPU_BS_MUL_CFG_BS_MUL_OPERAND__MASK 0xffff0000
+#define DPU_BS_MUL_CFG_BS_MUL_OPERAND__SHIFT 16
+static inline uint32_t DPU_BS_MUL_CFG_BS_MUL_OPERAND(uint32_t val)
+{
+ return ((val) << DPU_BS_MUL_CFG_BS_MUL_OPERAND__SHIFT) & DPU_BS_MUL_CFG_BS_MUL_OPERAND__MASK;
+}
+#define DPU_BS_MUL_CFG_RESERVED_0__MASK 0x0000c000
+#define DPU_BS_MUL_CFG_RESERVED_0__SHIFT 14
+static inline uint32_t DPU_BS_MUL_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_BS_MUL_CFG_RESERVED_0__SHIFT) & DPU_BS_MUL_CFG_RESERVED_0__MASK;
+}
+#define DPU_BS_MUL_CFG_BS_MUL_SHIFT_VALUE__MASK 0x00003f00
+#define DPU_BS_MUL_CFG_BS_MUL_SHIFT_VALUE__SHIFT 8
+static inline uint32_t DPU_BS_MUL_CFG_BS_MUL_SHIFT_VALUE(uint32_t val)
+{
+ return ((val) << DPU_BS_MUL_CFG_BS_MUL_SHIFT_VALUE__SHIFT) & DPU_BS_MUL_CFG_BS_MUL_SHIFT_VALUE__MASK;
+}
+#define DPU_BS_MUL_CFG_RESERVED_1__MASK 0x000000fc
+#define DPU_BS_MUL_CFG_RESERVED_1__SHIFT 2
+static inline uint32_t DPU_BS_MUL_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_BS_MUL_CFG_RESERVED_1__SHIFT) & DPU_BS_MUL_CFG_RESERVED_1__MASK;
+}
+#define DPU_BS_MUL_CFG_BS_TRUNCATE_SRC__MASK 0x00000002
+#define DPU_BS_MUL_CFG_BS_TRUNCATE_SRC__SHIFT 1
+static inline uint32_t DPU_BS_MUL_CFG_BS_TRUNCATE_SRC(uint32_t val)
+{
+ return ((val) << DPU_BS_MUL_CFG_BS_TRUNCATE_SRC__SHIFT) & DPU_BS_MUL_CFG_BS_TRUNCATE_SRC__MASK;
+}
+#define DPU_BS_MUL_CFG_BS_MUL_SRC__MASK 0x00000001
+#define DPU_BS_MUL_CFG_BS_MUL_SRC__SHIFT 0
+static inline uint32_t DPU_BS_MUL_CFG_BS_MUL_SRC(uint32_t val)
+{
+ return ((val) << DPU_BS_MUL_CFG_BS_MUL_SRC__SHIFT) & DPU_BS_MUL_CFG_BS_MUL_SRC__MASK;
+}
+
+#define REG_DPU_BS_RELUX_CMP_VALUE 0x0000404c
+#define DPU_BS_RELUX_CMP_VALUE_BS_RELUX_CMP_DAT__MASK 0xffffffff
+#define DPU_BS_RELUX_CMP_VALUE_BS_RELUX_CMP_DAT__SHIFT 0
+static inline uint32_t DPU_BS_RELUX_CMP_VALUE_BS_RELUX_CMP_DAT(uint32_t val)
+{
+ return ((val) << DPU_BS_RELUX_CMP_VALUE_BS_RELUX_CMP_DAT__SHIFT) & DPU_BS_RELUX_CMP_VALUE_BS_RELUX_CMP_DAT__MASK;
+}
+
+#define REG_DPU_BS_OW_CFG 0x00004050
+#define DPU_BS_OW_CFG_RGP_CNTER__MASK 0xf0000000
+#define DPU_BS_OW_CFG_RGP_CNTER__SHIFT 28
+static inline uint32_t DPU_BS_OW_CFG_RGP_CNTER(uint32_t val)
+{
+ return ((val) << DPU_BS_OW_CFG_RGP_CNTER__SHIFT) & DPU_BS_OW_CFG_RGP_CNTER__MASK;
+}
+#define DPU_BS_OW_CFG_TP_ORG_EN__MASK 0x08000000
+#define DPU_BS_OW_CFG_TP_ORG_EN__SHIFT 27
+static inline uint32_t DPU_BS_OW_CFG_TP_ORG_EN(uint32_t val)
+{
+ return ((val) << DPU_BS_OW_CFG_TP_ORG_EN__SHIFT) & DPU_BS_OW_CFG_TP_ORG_EN__MASK;
+}
+#define DPU_BS_OW_CFG_RESERVED_0__MASK 0x07fff800
+#define DPU_BS_OW_CFG_RESERVED_0__SHIFT 11
+static inline uint32_t DPU_BS_OW_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_BS_OW_CFG_RESERVED_0__SHIFT) & DPU_BS_OW_CFG_RESERVED_0__MASK;
+}
+#define DPU_BS_OW_CFG_SIZE_E_2__MASK 0x00000700
+#define DPU_BS_OW_CFG_SIZE_E_2__SHIFT 8
+static inline uint32_t DPU_BS_OW_CFG_SIZE_E_2(uint32_t val)
+{
+ return ((val) << DPU_BS_OW_CFG_SIZE_E_2__SHIFT) & DPU_BS_OW_CFG_SIZE_E_2__MASK;
+}
+#define DPU_BS_OW_CFG_SIZE_E_1__MASK 0x000000e0
+#define DPU_BS_OW_CFG_SIZE_E_1__SHIFT 5
+static inline uint32_t DPU_BS_OW_CFG_SIZE_E_1(uint32_t val)
+{
+ return ((val) << DPU_BS_OW_CFG_SIZE_E_1__SHIFT) & DPU_BS_OW_CFG_SIZE_E_1__MASK;
+}
+#define DPU_BS_OW_CFG_SIZE_E_0__MASK 0x0000001c
+#define DPU_BS_OW_CFG_SIZE_E_0__SHIFT 2
+static inline uint32_t DPU_BS_OW_CFG_SIZE_E_0(uint32_t val)
+{
+ return ((val) << DPU_BS_OW_CFG_SIZE_E_0__SHIFT) & DPU_BS_OW_CFG_SIZE_E_0__MASK;
+}
+#define DPU_BS_OW_CFG_OD_BYPASS__MASK 0x00000002
+#define DPU_BS_OW_CFG_OD_BYPASS__SHIFT 1
+static inline uint32_t DPU_BS_OW_CFG_OD_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_BS_OW_CFG_OD_BYPASS__SHIFT) & DPU_BS_OW_CFG_OD_BYPASS__MASK;
+}
+#define DPU_BS_OW_CFG_OW_SRC__MASK 0x00000001
+#define DPU_BS_OW_CFG_OW_SRC__SHIFT 0
+static inline uint32_t DPU_BS_OW_CFG_OW_SRC(uint32_t val)
+{
+ return ((val) << DPU_BS_OW_CFG_OW_SRC__SHIFT) & DPU_BS_OW_CFG_OW_SRC__MASK;
+}
+
+#define REG_DPU_BS_OW_OP 0x00004054
+#define DPU_BS_OW_OP_RESERVED_0__MASK 0xffff0000
+#define DPU_BS_OW_OP_RESERVED_0__SHIFT 16
+static inline uint32_t DPU_BS_OW_OP_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_BS_OW_OP_RESERVED_0__SHIFT) & DPU_BS_OW_OP_RESERVED_0__MASK;
+}
+#define DPU_BS_OW_OP_OW_OP__MASK 0x0000ffff
+#define DPU_BS_OW_OP_OW_OP__SHIFT 0
+static inline uint32_t DPU_BS_OW_OP_OW_OP(uint32_t val)
+{
+ return ((val) << DPU_BS_OW_OP_OW_OP__SHIFT) & DPU_BS_OW_OP_OW_OP__MASK;
+}
+
+#define REG_DPU_WDMA_SIZE_0 0x00004058
+#define DPU_WDMA_SIZE_0_RESERVED_0__MASK 0xf0000000
+#define DPU_WDMA_SIZE_0_RESERVED_0__SHIFT 28
+static inline uint32_t DPU_WDMA_SIZE_0_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_WDMA_SIZE_0_RESERVED_0__SHIFT) & DPU_WDMA_SIZE_0_RESERVED_0__MASK;
+}
+#define DPU_WDMA_SIZE_0_TP_PRECISION__MASK 0x08000000
+#define DPU_WDMA_SIZE_0_TP_PRECISION__SHIFT 27
+static inline uint32_t DPU_WDMA_SIZE_0_TP_PRECISION(uint32_t val)
+{
+ return ((val) << DPU_WDMA_SIZE_0_TP_PRECISION__SHIFT) & DPU_WDMA_SIZE_0_TP_PRECISION__MASK;
+}
+#define DPU_WDMA_SIZE_0_SIZE_C_WDMA__MASK 0x07ff0000
+#define DPU_WDMA_SIZE_0_SIZE_C_WDMA__SHIFT 16
+static inline uint32_t DPU_WDMA_SIZE_0_SIZE_C_WDMA(uint32_t val)
+{
+ return ((val) << DPU_WDMA_SIZE_0_SIZE_C_WDMA__SHIFT) & DPU_WDMA_SIZE_0_SIZE_C_WDMA__MASK;
+}
+#define DPU_WDMA_SIZE_0_RESERVED_1__MASK 0x0000e000
+#define DPU_WDMA_SIZE_0_RESERVED_1__SHIFT 13
+static inline uint32_t DPU_WDMA_SIZE_0_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_WDMA_SIZE_0_RESERVED_1__SHIFT) & DPU_WDMA_SIZE_0_RESERVED_1__MASK;
+}
+#define DPU_WDMA_SIZE_0_CHANNEL_WDMA__MASK 0x00001fff
+#define DPU_WDMA_SIZE_0_CHANNEL_WDMA__SHIFT 0
+static inline uint32_t DPU_WDMA_SIZE_0_CHANNEL_WDMA(uint32_t val)
+{
+ return ((val) << DPU_WDMA_SIZE_0_CHANNEL_WDMA__SHIFT) & DPU_WDMA_SIZE_0_CHANNEL_WDMA__MASK;
+}
+
+#define REG_DPU_WDMA_SIZE_1 0x0000405c
+#define DPU_WDMA_SIZE_1_RESERVED_0__MASK 0xe0000000
+#define DPU_WDMA_SIZE_1_RESERVED_0__SHIFT 29
+static inline uint32_t DPU_WDMA_SIZE_1_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_WDMA_SIZE_1_RESERVED_0__SHIFT) & DPU_WDMA_SIZE_1_RESERVED_0__MASK;
+}
+#define DPU_WDMA_SIZE_1_HEIGHT_WDMA__MASK 0x1fff0000
+#define DPU_WDMA_SIZE_1_HEIGHT_WDMA__SHIFT 16
+static inline uint32_t DPU_WDMA_SIZE_1_HEIGHT_WDMA(uint32_t val)
+{
+ return ((val) << DPU_WDMA_SIZE_1_HEIGHT_WDMA__SHIFT) & DPU_WDMA_SIZE_1_HEIGHT_WDMA__MASK;
+}
+#define DPU_WDMA_SIZE_1_RESERVED_1__MASK 0x0000e000
+#define DPU_WDMA_SIZE_1_RESERVED_1__SHIFT 13
+static inline uint32_t DPU_WDMA_SIZE_1_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_WDMA_SIZE_1_RESERVED_1__SHIFT) & DPU_WDMA_SIZE_1_RESERVED_1__MASK;
+}
+#define DPU_WDMA_SIZE_1_WIDTH_WDMA__MASK 0x00001fff
+#define DPU_WDMA_SIZE_1_WIDTH_WDMA__SHIFT 0
+static inline uint32_t DPU_WDMA_SIZE_1_WIDTH_WDMA(uint32_t val)
+{
+ return ((val) << DPU_WDMA_SIZE_1_WIDTH_WDMA__SHIFT) & DPU_WDMA_SIZE_1_WIDTH_WDMA__MASK;
+}
+
+#define REG_DPU_BN_CFG 0x00004060
+#define DPU_BN_CFG_RESERVED_0__MASK 0xfff00000
+#define DPU_BN_CFG_RESERVED_0__SHIFT 20
+static inline uint32_t DPU_BN_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_BN_CFG_RESERVED_0__SHIFT) & DPU_BN_CFG_RESERVED_0__MASK;
+}
+#define DPU_BN_CFG_BN_ALU_ALGO__MASK 0x000f0000
+#define DPU_BN_CFG_BN_ALU_ALGO__SHIFT 16
+static inline uint32_t DPU_BN_CFG_BN_ALU_ALGO(uint32_t val)
+{
+ return ((val) << DPU_BN_CFG_BN_ALU_ALGO__SHIFT) & DPU_BN_CFG_BN_ALU_ALGO__MASK;
+}
+#define DPU_BN_CFG_RESERVED_1__MASK 0x0000fe00
+#define DPU_BN_CFG_RESERVED_1__SHIFT 9
+static inline uint32_t DPU_BN_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_BN_CFG_RESERVED_1__SHIFT) & DPU_BN_CFG_RESERVED_1__MASK;
+}
+#define DPU_BN_CFG_BN_ALU_SRC__MASK 0x00000100
+#define DPU_BN_CFG_BN_ALU_SRC__SHIFT 8
+static inline uint32_t DPU_BN_CFG_BN_ALU_SRC(uint32_t val)
+{
+ return ((val) << DPU_BN_CFG_BN_ALU_SRC__SHIFT) & DPU_BN_CFG_BN_ALU_SRC__MASK;
+}
+#define DPU_BN_CFG_BN_RELUX_EN__MASK 0x00000080
+#define DPU_BN_CFG_BN_RELUX_EN__SHIFT 7
+static inline uint32_t DPU_BN_CFG_BN_RELUX_EN(uint32_t val)
+{
+ return ((val) << DPU_BN_CFG_BN_RELUX_EN__SHIFT) & DPU_BN_CFG_BN_RELUX_EN__MASK;
+}
+#define DPU_BN_CFG_BN_RELU_BYPASS__MASK 0x00000040
+#define DPU_BN_CFG_BN_RELU_BYPASS__SHIFT 6
+static inline uint32_t DPU_BN_CFG_BN_RELU_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_BN_CFG_BN_RELU_BYPASS__SHIFT) & DPU_BN_CFG_BN_RELU_BYPASS__MASK;
+}
+#define DPU_BN_CFG_BN_MUL_PRELU__MASK 0x00000020
+#define DPU_BN_CFG_BN_MUL_PRELU__SHIFT 5
+static inline uint32_t DPU_BN_CFG_BN_MUL_PRELU(uint32_t val)
+{
+ return ((val) << DPU_BN_CFG_BN_MUL_PRELU__SHIFT) & DPU_BN_CFG_BN_MUL_PRELU__MASK;
+}
+#define DPU_BN_CFG_BN_MUL_BYPASS__MASK 0x00000010
+#define DPU_BN_CFG_BN_MUL_BYPASS__SHIFT 4
+static inline uint32_t DPU_BN_CFG_BN_MUL_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_BN_CFG_BN_MUL_BYPASS__SHIFT) & DPU_BN_CFG_BN_MUL_BYPASS__MASK;
+}
+#define DPU_BN_CFG_RESERVED_2__MASK 0x0000000c
+#define DPU_BN_CFG_RESERVED_2__SHIFT 2
+static inline uint32_t DPU_BN_CFG_RESERVED_2(uint32_t val)
+{
+ return ((val) << DPU_BN_CFG_RESERVED_2__SHIFT) & DPU_BN_CFG_RESERVED_2__MASK;
+}
+#define DPU_BN_CFG_BN_ALU_BYPASS__MASK 0x00000002
+#define DPU_BN_CFG_BN_ALU_BYPASS__SHIFT 1
+static inline uint32_t DPU_BN_CFG_BN_ALU_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_BN_CFG_BN_ALU_BYPASS__SHIFT) & DPU_BN_CFG_BN_ALU_BYPASS__MASK;
+}
+#define DPU_BN_CFG_BN_BYPASS__MASK 0x00000001
+#define DPU_BN_CFG_BN_BYPASS__SHIFT 0
+static inline uint32_t DPU_BN_CFG_BN_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_BN_CFG_BN_BYPASS__SHIFT) & DPU_BN_CFG_BN_BYPASS__MASK;
+}
+
+#define REG_DPU_BN_ALU_CFG 0x00004064
+#define DPU_BN_ALU_CFG_BN_ALU_OPERAND__MASK 0xffffffff
+#define DPU_BN_ALU_CFG_BN_ALU_OPERAND__SHIFT 0
+static inline uint32_t DPU_BN_ALU_CFG_BN_ALU_OPERAND(uint32_t val)
+{
+ return ((val) << DPU_BN_ALU_CFG_BN_ALU_OPERAND__SHIFT) & DPU_BN_ALU_CFG_BN_ALU_OPERAND__MASK;
+}
+
+#define REG_DPU_BN_MUL_CFG 0x00004068
+#define DPU_BN_MUL_CFG_BN_MUL_OPERAND__MASK 0xffff0000
+#define DPU_BN_MUL_CFG_BN_MUL_OPERAND__SHIFT 16
+static inline uint32_t DPU_BN_MUL_CFG_BN_MUL_OPERAND(uint32_t val)
+{
+ return ((val) << DPU_BN_MUL_CFG_BN_MUL_OPERAND__SHIFT) & DPU_BN_MUL_CFG_BN_MUL_OPERAND__MASK;
+}
+#define DPU_BN_MUL_CFG_RESERVED_0__MASK 0x0000c000
+#define DPU_BN_MUL_CFG_RESERVED_0__SHIFT 14
+static inline uint32_t DPU_BN_MUL_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_BN_MUL_CFG_RESERVED_0__SHIFT) & DPU_BN_MUL_CFG_RESERVED_0__MASK;
+}
+#define DPU_BN_MUL_CFG_BN_MUL_SHIFT_VALUE__MASK 0x00003f00
+#define DPU_BN_MUL_CFG_BN_MUL_SHIFT_VALUE__SHIFT 8
+static inline uint32_t DPU_BN_MUL_CFG_BN_MUL_SHIFT_VALUE(uint32_t val)
+{
+ return ((val) << DPU_BN_MUL_CFG_BN_MUL_SHIFT_VALUE__SHIFT) & DPU_BN_MUL_CFG_BN_MUL_SHIFT_VALUE__MASK;
+}
+#define DPU_BN_MUL_CFG_RESERVED_1__MASK 0x000000fc
+#define DPU_BN_MUL_CFG_RESERVED_1__SHIFT 2
+static inline uint32_t DPU_BN_MUL_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_BN_MUL_CFG_RESERVED_1__SHIFT) & DPU_BN_MUL_CFG_RESERVED_1__MASK;
+}
+#define DPU_BN_MUL_CFG_BN_TRUNCATE_SRC__MASK 0x00000002
+#define DPU_BN_MUL_CFG_BN_TRUNCATE_SRC__SHIFT 1
+static inline uint32_t DPU_BN_MUL_CFG_BN_TRUNCATE_SRC(uint32_t val)
+{
+ return ((val) << DPU_BN_MUL_CFG_BN_TRUNCATE_SRC__SHIFT) & DPU_BN_MUL_CFG_BN_TRUNCATE_SRC__MASK;
+}
+#define DPU_BN_MUL_CFG_BN_MUL_SRC__MASK 0x00000001
+#define DPU_BN_MUL_CFG_BN_MUL_SRC__SHIFT 0
+static inline uint32_t DPU_BN_MUL_CFG_BN_MUL_SRC(uint32_t val)
+{
+ return ((val) << DPU_BN_MUL_CFG_BN_MUL_SRC__SHIFT) & DPU_BN_MUL_CFG_BN_MUL_SRC__MASK;
+}
+
+#define REG_DPU_BN_RELUX_CMP_VALUE 0x0000406c
+#define DPU_BN_RELUX_CMP_VALUE_BN_RELUX_CMP_DAT__MASK 0xffffffff
+#define DPU_BN_RELUX_CMP_VALUE_BN_RELUX_CMP_DAT__SHIFT 0
+static inline uint32_t DPU_BN_RELUX_CMP_VALUE_BN_RELUX_CMP_DAT(uint32_t val)
+{
+ return ((val) << DPU_BN_RELUX_CMP_VALUE_BN_RELUX_CMP_DAT__SHIFT) & DPU_BN_RELUX_CMP_VALUE_BN_RELUX_CMP_DAT__MASK;
+}
+
+#define REG_DPU_EW_CFG 0x00004070
+#define DPU_EW_CFG_EW_CVT_TYPE__MASK 0x80000000
+#define DPU_EW_CFG_EW_CVT_TYPE__SHIFT 31
+static inline uint32_t DPU_EW_CFG_EW_CVT_TYPE(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_CVT_TYPE__SHIFT) & DPU_EW_CFG_EW_CVT_TYPE__MASK;
+}
+#define DPU_EW_CFG_EW_CVT_ROUND__MASK 0x40000000
+#define DPU_EW_CFG_EW_CVT_ROUND__SHIFT 30
+static inline uint32_t DPU_EW_CFG_EW_CVT_ROUND(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_CVT_ROUND__SHIFT) & DPU_EW_CFG_EW_CVT_ROUND__MASK;
+}
+#define DPU_EW_CFG_EW_DATA_MODE__MASK 0x30000000
+#define DPU_EW_CFG_EW_DATA_MODE__SHIFT 28
+static inline uint32_t DPU_EW_CFG_EW_DATA_MODE(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_DATA_MODE__SHIFT) & DPU_EW_CFG_EW_DATA_MODE__MASK;
+}
+#define DPU_EW_CFG_RESERVED_0__MASK 0x0f000000
+#define DPU_EW_CFG_RESERVED_0__SHIFT 24
+static inline uint32_t DPU_EW_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_RESERVED_0__SHIFT) & DPU_EW_CFG_RESERVED_0__MASK;
+}
+#define DPU_EW_CFG_EDATA_SIZE__MASK 0x00c00000
+#define DPU_EW_CFG_EDATA_SIZE__SHIFT 22
+static inline uint32_t DPU_EW_CFG_EDATA_SIZE(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EDATA_SIZE__SHIFT) & DPU_EW_CFG_EDATA_SIZE__MASK;
+}
+#define DPU_EW_CFG_EW_EQUAL_EN__MASK 0x00200000
+#define DPU_EW_CFG_EW_EQUAL_EN__SHIFT 21
+static inline uint32_t DPU_EW_CFG_EW_EQUAL_EN(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_EQUAL_EN__SHIFT) & DPU_EW_CFG_EW_EQUAL_EN__MASK;
+}
+#define DPU_EW_CFG_EW_BINARY_EN__MASK 0x00100000
+#define DPU_EW_CFG_EW_BINARY_EN__SHIFT 20
+static inline uint32_t DPU_EW_CFG_EW_BINARY_EN(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_BINARY_EN__SHIFT) & DPU_EW_CFG_EW_BINARY_EN__MASK;
+}
+#define DPU_EW_CFG_EW_ALU_ALGO__MASK 0x000f0000
+#define DPU_EW_CFG_EW_ALU_ALGO__SHIFT 16
+static inline uint32_t DPU_EW_CFG_EW_ALU_ALGO(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_ALU_ALGO__SHIFT) & DPU_EW_CFG_EW_ALU_ALGO__MASK;
+}
+#define DPU_EW_CFG_RESERVED_1__MASK 0x0000f800
+#define DPU_EW_CFG_RESERVED_1__SHIFT 11
+static inline uint32_t DPU_EW_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_RESERVED_1__SHIFT) & DPU_EW_CFG_RESERVED_1__MASK;
+}
+#define DPU_EW_CFG_EW_RELUX_EN__MASK 0x00000400
+#define DPU_EW_CFG_EW_RELUX_EN__SHIFT 10
+static inline uint32_t DPU_EW_CFG_EW_RELUX_EN(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_RELUX_EN__SHIFT) & DPU_EW_CFG_EW_RELUX_EN__MASK;
+}
+#define DPU_EW_CFG_EW_RELU_BYPASS__MASK 0x00000200
+#define DPU_EW_CFG_EW_RELU_BYPASS__SHIFT 9
+static inline uint32_t DPU_EW_CFG_EW_RELU_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_RELU_BYPASS__SHIFT) & DPU_EW_CFG_EW_RELU_BYPASS__MASK;
+}
+#define DPU_EW_CFG_EW_OP_CVT_BYPASS__MASK 0x00000100
+#define DPU_EW_CFG_EW_OP_CVT_BYPASS__SHIFT 8
+static inline uint32_t DPU_EW_CFG_EW_OP_CVT_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_OP_CVT_BYPASS__SHIFT) & DPU_EW_CFG_EW_OP_CVT_BYPASS__MASK;
+}
+#define DPU_EW_CFG_EW_LUT_BYPASS__MASK 0x00000080
+#define DPU_EW_CFG_EW_LUT_BYPASS__SHIFT 7
+static inline uint32_t DPU_EW_CFG_EW_LUT_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_LUT_BYPASS__SHIFT) & DPU_EW_CFG_EW_LUT_BYPASS__MASK;
+}
+#define DPU_EW_CFG_EW_OP_SRC__MASK 0x00000040
+#define DPU_EW_CFG_EW_OP_SRC__SHIFT 6
+static inline uint32_t DPU_EW_CFG_EW_OP_SRC(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_OP_SRC__SHIFT) & DPU_EW_CFG_EW_OP_SRC__MASK;
+}
+#define DPU_EW_CFG_EW_MUL_PRELU__MASK 0x00000020
+#define DPU_EW_CFG_EW_MUL_PRELU__SHIFT 5
+static inline uint32_t DPU_EW_CFG_EW_MUL_PRELU(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_MUL_PRELU__SHIFT) & DPU_EW_CFG_EW_MUL_PRELU__MASK;
+}
+#define DPU_EW_CFG_RESERVED_2__MASK 0x00000018
+#define DPU_EW_CFG_RESERVED_2__SHIFT 3
+static inline uint32_t DPU_EW_CFG_RESERVED_2(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_RESERVED_2__SHIFT) & DPU_EW_CFG_RESERVED_2__MASK;
+}
+#define DPU_EW_CFG_EW_OP_TYPE__MASK 0x00000004
+#define DPU_EW_CFG_EW_OP_TYPE__SHIFT 2
+static inline uint32_t DPU_EW_CFG_EW_OP_TYPE(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_OP_TYPE__SHIFT) & DPU_EW_CFG_EW_OP_TYPE__MASK;
+}
+#define DPU_EW_CFG_EW_OP_BYPASS__MASK 0x00000002
+#define DPU_EW_CFG_EW_OP_BYPASS__SHIFT 1
+static inline uint32_t DPU_EW_CFG_EW_OP_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_OP_BYPASS__SHIFT) & DPU_EW_CFG_EW_OP_BYPASS__MASK;
+}
+#define DPU_EW_CFG_EW_BYPASS__MASK 0x00000001
+#define DPU_EW_CFG_EW_BYPASS__SHIFT 0
+static inline uint32_t DPU_EW_CFG_EW_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_BYPASS__SHIFT) & DPU_EW_CFG_EW_BYPASS__MASK;
+}
+
+#define REG_DPU_EW_CVT_OFFSET_VALUE 0x00004074
+#define DPU_EW_CVT_OFFSET_VALUE_EW_OP_CVT_OFFSET__MASK 0xffffffff
+#define DPU_EW_CVT_OFFSET_VALUE_EW_OP_CVT_OFFSET__SHIFT 0
+static inline uint32_t DPU_EW_CVT_OFFSET_VALUE_EW_OP_CVT_OFFSET(uint32_t val)
+{
+ return ((val) << DPU_EW_CVT_OFFSET_VALUE_EW_OP_CVT_OFFSET__SHIFT) & DPU_EW_CVT_OFFSET_VALUE_EW_OP_CVT_OFFSET__MASK;
+}
+
+#define REG_DPU_EW_CVT_SCALE_VALUE 0x00004078
+#define DPU_EW_CVT_SCALE_VALUE_EW_TRUNCATE__MASK 0xffc00000
+#define DPU_EW_CVT_SCALE_VALUE_EW_TRUNCATE__SHIFT 22
+static inline uint32_t DPU_EW_CVT_SCALE_VALUE_EW_TRUNCATE(uint32_t val)
+{
+ return ((val) << DPU_EW_CVT_SCALE_VALUE_EW_TRUNCATE__SHIFT) & DPU_EW_CVT_SCALE_VALUE_EW_TRUNCATE__MASK;
+}
+#define DPU_EW_CVT_SCALE_VALUE_EW_OP_CVT_SHIFT__MASK 0x003f0000
+#define DPU_EW_CVT_SCALE_VALUE_EW_OP_CVT_SHIFT__SHIFT 16
+static inline uint32_t DPU_EW_CVT_SCALE_VALUE_EW_OP_CVT_SHIFT(uint32_t val)
+{
+ return ((val) << DPU_EW_CVT_SCALE_VALUE_EW_OP_CVT_SHIFT__SHIFT) & DPU_EW_CVT_SCALE_VALUE_EW_OP_CVT_SHIFT__MASK;
+}
+#define DPU_EW_CVT_SCALE_VALUE_EW_OP_CVT_SCALE__MASK 0x0000ffff
+#define DPU_EW_CVT_SCALE_VALUE_EW_OP_CVT_SCALE__SHIFT 0
+static inline uint32_t DPU_EW_CVT_SCALE_VALUE_EW_OP_CVT_SCALE(uint32_t val)
+{
+ return ((val) << DPU_EW_CVT_SCALE_VALUE_EW_OP_CVT_SCALE__SHIFT) & DPU_EW_CVT_SCALE_VALUE_EW_OP_CVT_SCALE__MASK;
+}
+
+#define REG_DPU_EW_RELUX_CMP_VALUE 0x0000407c
+#define DPU_EW_RELUX_CMP_VALUE_EW_RELUX_CMP_DAT__MASK 0xffffffff
+#define DPU_EW_RELUX_CMP_VALUE_EW_RELUX_CMP_DAT__SHIFT 0
+static inline uint32_t DPU_EW_RELUX_CMP_VALUE_EW_RELUX_CMP_DAT(uint32_t val)
+{
+ return ((val) << DPU_EW_RELUX_CMP_VALUE_EW_RELUX_CMP_DAT__SHIFT) & DPU_EW_RELUX_CMP_VALUE_EW_RELUX_CMP_DAT__MASK;
+}
+
+#define REG_DPU_OUT_CVT_OFFSET 0x00004080
+#define DPU_OUT_CVT_OFFSET_OUT_CVT_OFFSET__MASK 0xffffffff
+#define DPU_OUT_CVT_OFFSET_OUT_CVT_OFFSET__SHIFT 0
+static inline uint32_t DPU_OUT_CVT_OFFSET_OUT_CVT_OFFSET(uint32_t val)
+{
+ return ((val) << DPU_OUT_CVT_OFFSET_OUT_CVT_OFFSET__SHIFT) & DPU_OUT_CVT_OFFSET_OUT_CVT_OFFSET__MASK;
+}
+
+#define REG_DPU_OUT_CVT_SCALE 0x00004084
+#define DPU_OUT_CVT_SCALE_RESERVED_0__MASK 0xfffe0000
+#define DPU_OUT_CVT_SCALE_RESERVED_0__SHIFT 17
+static inline uint32_t DPU_OUT_CVT_SCALE_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_OUT_CVT_SCALE_RESERVED_0__SHIFT) & DPU_OUT_CVT_SCALE_RESERVED_0__MASK;
+}
+#define DPU_OUT_CVT_SCALE_FP32TOFP16_EN__MASK 0x00010000
+#define DPU_OUT_CVT_SCALE_FP32TOFP16_EN__SHIFT 16
+static inline uint32_t DPU_OUT_CVT_SCALE_FP32TOFP16_EN(uint32_t val)
+{
+ return ((val) << DPU_OUT_CVT_SCALE_FP32TOFP16_EN__SHIFT) & DPU_OUT_CVT_SCALE_FP32TOFP16_EN__MASK;
+}
+#define DPU_OUT_CVT_SCALE_OUT_CVT_SCALE__MASK 0x0000ffff
+#define DPU_OUT_CVT_SCALE_OUT_CVT_SCALE__SHIFT 0
+static inline uint32_t DPU_OUT_CVT_SCALE_OUT_CVT_SCALE(uint32_t val)
+{
+ return ((val) << DPU_OUT_CVT_SCALE_OUT_CVT_SCALE__SHIFT) & DPU_OUT_CVT_SCALE_OUT_CVT_SCALE__MASK;
+}
+
+#define REG_DPU_OUT_CVT_SHIFT 0x00004088
+#define DPU_OUT_CVT_SHIFT_CVT_TYPE__MASK 0x80000000
+#define DPU_OUT_CVT_SHIFT_CVT_TYPE__SHIFT 31
+static inline uint32_t DPU_OUT_CVT_SHIFT_CVT_TYPE(uint32_t val)
+{
+ return ((val) << DPU_OUT_CVT_SHIFT_CVT_TYPE__SHIFT) & DPU_OUT_CVT_SHIFT_CVT_TYPE__MASK;
+}
+#define DPU_OUT_CVT_SHIFT_CVT_ROUND__MASK 0x40000000
+#define DPU_OUT_CVT_SHIFT_CVT_ROUND__SHIFT 30
+static inline uint32_t DPU_OUT_CVT_SHIFT_CVT_ROUND(uint32_t val)
+{
+ return ((val) << DPU_OUT_CVT_SHIFT_CVT_ROUND__SHIFT) & DPU_OUT_CVT_SHIFT_CVT_ROUND__MASK;
+}
+#define DPU_OUT_CVT_SHIFT_RESERVED_0__MASK 0x3ff00000
+#define DPU_OUT_CVT_SHIFT_RESERVED_0__SHIFT 20
+static inline uint32_t DPU_OUT_CVT_SHIFT_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_OUT_CVT_SHIFT_RESERVED_0__SHIFT) & DPU_OUT_CVT_SHIFT_RESERVED_0__MASK;
+}
+#define DPU_OUT_CVT_SHIFT_MINUS_EXP__MASK 0x000ff000
+#define DPU_OUT_CVT_SHIFT_MINUS_EXP__SHIFT 12
+static inline uint32_t DPU_OUT_CVT_SHIFT_MINUS_EXP(uint32_t val)
+{
+ return ((val) << DPU_OUT_CVT_SHIFT_MINUS_EXP__SHIFT) & DPU_OUT_CVT_SHIFT_MINUS_EXP__MASK;
+}
+#define DPU_OUT_CVT_SHIFT_OUT_CVT_SHIFT__MASK 0x00000fff
+#define DPU_OUT_CVT_SHIFT_OUT_CVT_SHIFT__SHIFT 0
+static inline uint32_t DPU_OUT_CVT_SHIFT_OUT_CVT_SHIFT(uint32_t val)
+{
+ return ((val) << DPU_OUT_CVT_SHIFT_OUT_CVT_SHIFT__SHIFT) & DPU_OUT_CVT_SHIFT_OUT_CVT_SHIFT__MASK;
+}
+
+#define REG_DPU_EW_OP_VALUE_0 0x00004090
+#define DPU_EW_OP_VALUE_0_EW_OPERAND_0__MASK 0xffffffff
+#define DPU_EW_OP_VALUE_0_EW_OPERAND_0__SHIFT 0
+static inline uint32_t DPU_EW_OP_VALUE_0_EW_OPERAND_0(uint32_t val)
+{
+ return ((val) << DPU_EW_OP_VALUE_0_EW_OPERAND_0__SHIFT) & DPU_EW_OP_VALUE_0_EW_OPERAND_0__MASK;
+}
+
+#define REG_DPU_EW_OP_VALUE_1 0x00004094
+#define DPU_EW_OP_VALUE_1_EW_OPERAND_1__MASK 0xffffffff
+#define DPU_EW_OP_VALUE_1_EW_OPERAND_1__SHIFT 0
+static inline uint32_t DPU_EW_OP_VALUE_1_EW_OPERAND_1(uint32_t val)
+{
+ return ((val) << DPU_EW_OP_VALUE_1_EW_OPERAND_1__SHIFT) & DPU_EW_OP_VALUE_1_EW_OPERAND_1__MASK;
+}
+
+#define REG_DPU_EW_OP_VALUE_2 0x00004098
+#define DPU_EW_OP_VALUE_2_EW_OPERAND_2__MASK 0xffffffff
+#define DPU_EW_OP_VALUE_2_EW_OPERAND_2__SHIFT 0
+static inline uint32_t DPU_EW_OP_VALUE_2_EW_OPERAND_2(uint32_t val)
+{
+ return ((val) << DPU_EW_OP_VALUE_2_EW_OPERAND_2__SHIFT) & DPU_EW_OP_VALUE_2_EW_OPERAND_2__MASK;
+}
+
+#define REG_DPU_EW_OP_VALUE_3 0x0000409c
+#define DPU_EW_OP_VALUE_3_EW_OPERAND_3__MASK 0xffffffff
+#define DPU_EW_OP_VALUE_3_EW_OPERAND_3__SHIFT 0
+static inline uint32_t DPU_EW_OP_VALUE_3_EW_OPERAND_3(uint32_t val)
+{
+ return ((val) << DPU_EW_OP_VALUE_3_EW_OPERAND_3__SHIFT) & DPU_EW_OP_VALUE_3_EW_OPERAND_3__MASK;
+}
+
+#define REG_DPU_EW_OP_VALUE_4 0x000040a0
+#define DPU_EW_OP_VALUE_4_EW_OPERAND_4__MASK 0xffffffff
+#define DPU_EW_OP_VALUE_4_EW_OPERAND_4__SHIFT 0
+static inline uint32_t DPU_EW_OP_VALUE_4_EW_OPERAND_4(uint32_t val)
+{
+ return ((val) << DPU_EW_OP_VALUE_4_EW_OPERAND_4__SHIFT) & DPU_EW_OP_VALUE_4_EW_OPERAND_4__MASK;
+}
+
+#define REG_DPU_EW_OP_VALUE_5 0x000040a4
+#define DPU_EW_OP_VALUE_5_EW_OPERAND_5__MASK 0xffffffff
+#define DPU_EW_OP_VALUE_5_EW_OPERAND_5__SHIFT 0
+static inline uint32_t DPU_EW_OP_VALUE_5_EW_OPERAND_5(uint32_t val)
+{
+ return ((val) << DPU_EW_OP_VALUE_5_EW_OPERAND_5__SHIFT) & DPU_EW_OP_VALUE_5_EW_OPERAND_5__MASK;
+}
+
+#define REG_DPU_EW_OP_VALUE_6 0x000040a8
+#define DPU_EW_OP_VALUE_6_EW_OPERAND_6__MASK 0xffffffff
+#define DPU_EW_OP_VALUE_6_EW_OPERAND_6__SHIFT 0
+static inline uint32_t DPU_EW_OP_VALUE_6_EW_OPERAND_6(uint32_t val)
+{
+ return ((val) << DPU_EW_OP_VALUE_6_EW_OPERAND_6__SHIFT) & DPU_EW_OP_VALUE_6_EW_OPERAND_6__MASK;
+}
+
+#define REG_DPU_EW_OP_VALUE_7 0x000040ac
+#define DPU_EW_OP_VALUE_7_EW_OPERAND_7__MASK 0xffffffff
+#define DPU_EW_OP_VALUE_7_EW_OPERAND_7__SHIFT 0
+static inline uint32_t DPU_EW_OP_VALUE_7_EW_OPERAND_7(uint32_t val)
+{
+ return ((val) << DPU_EW_OP_VALUE_7_EW_OPERAND_7__SHIFT) & DPU_EW_OP_VALUE_7_EW_OPERAND_7__MASK;
+}
+
+#define REG_DPU_SURFACE_ADD 0x000040c0
+#define DPU_SURFACE_ADD_SURF_ADD__MASK 0xfffffff0
+#define DPU_SURFACE_ADD_SURF_ADD__SHIFT 4
+static inline uint32_t DPU_SURFACE_ADD_SURF_ADD(uint32_t val)
+{
+ return ((val) << DPU_SURFACE_ADD_SURF_ADD__SHIFT) & DPU_SURFACE_ADD_SURF_ADD__MASK;
+}
+#define DPU_SURFACE_ADD_RESERVED_0__MASK 0x0000000f
+#define DPU_SURFACE_ADD_RESERVED_0__SHIFT 0
+static inline uint32_t DPU_SURFACE_ADD_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_SURFACE_ADD_RESERVED_0__SHIFT) & DPU_SURFACE_ADD_RESERVED_0__MASK;
+}
+
+#define REG_DPU_LUT_ACCESS_CFG 0x00004100
+#define DPU_LUT_ACCESS_CFG_RESERVED_0__MASK 0xfffc0000
+#define DPU_LUT_ACCESS_CFG_RESERVED_0__SHIFT 18
+static inline uint32_t DPU_LUT_ACCESS_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_LUT_ACCESS_CFG_RESERVED_0__SHIFT) & DPU_LUT_ACCESS_CFG_RESERVED_0__MASK;
+}
+#define DPU_LUT_ACCESS_CFG_LUT_ACCESS_TYPE__MASK 0x00020000
+#define DPU_LUT_ACCESS_CFG_LUT_ACCESS_TYPE__SHIFT 17
+static inline uint32_t DPU_LUT_ACCESS_CFG_LUT_ACCESS_TYPE(uint32_t val)
+{
+ return ((val) << DPU_LUT_ACCESS_CFG_LUT_ACCESS_TYPE__SHIFT) & DPU_LUT_ACCESS_CFG_LUT_ACCESS_TYPE__MASK;
+}
+#define DPU_LUT_ACCESS_CFG_LUT_TABLE_ID__MASK 0x00010000
+#define DPU_LUT_ACCESS_CFG_LUT_TABLE_ID__SHIFT 16
+static inline uint32_t DPU_LUT_ACCESS_CFG_LUT_TABLE_ID(uint32_t val)
+{
+ return ((val) << DPU_LUT_ACCESS_CFG_LUT_TABLE_ID__SHIFT) & DPU_LUT_ACCESS_CFG_LUT_TABLE_ID__MASK;
+}
+#define DPU_LUT_ACCESS_CFG_RESERVED_1__MASK 0x0000fc00
+#define DPU_LUT_ACCESS_CFG_RESERVED_1__SHIFT 10
+static inline uint32_t DPU_LUT_ACCESS_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_LUT_ACCESS_CFG_RESERVED_1__SHIFT) & DPU_LUT_ACCESS_CFG_RESERVED_1__MASK;
+}
+#define DPU_LUT_ACCESS_CFG_LUT_ADDR__MASK 0x000003ff
+#define DPU_LUT_ACCESS_CFG_LUT_ADDR__SHIFT 0
+static inline uint32_t DPU_LUT_ACCESS_CFG_LUT_ADDR(uint32_t val)
+{
+ return ((val) << DPU_LUT_ACCESS_CFG_LUT_ADDR__SHIFT) & DPU_LUT_ACCESS_CFG_LUT_ADDR__MASK;
+}
+
+#define REG_DPU_LUT_ACCESS_DATA 0x00004104
+#define DPU_LUT_ACCESS_DATA_RESERVED_0__MASK 0xffff0000
+#define DPU_LUT_ACCESS_DATA_RESERVED_0__SHIFT 16
+static inline uint32_t DPU_LUT_ACCESS_DATA_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_LUT_ACCESS_DATA_RESERVED_0__SHIFT) & DPU_LUT_ACCESS_DATA_RESERVED_0__MASK;
+}
+#define DPU_LUT_ACCESS_DATA_LUT_ACCESS_DATA__MASK 0x0000ffff
+#define DPU_LUT_ACCESS_DATA_LUT_ACCESS_DATA__SHIFT 0
+static inline uint32_t DPU_LUT_ACCESS_DATA_LUT_ACCESS_DATA(uint32_t val)
+{
+ return ((val) << DPU_LUT_ACCESS_DATA_LUT_ACCESS_DATA__SHIFT) & DPU_LUT_ACCESS_DATA_LUT_ACCESS_DATA__MASK;
+}
+
+#define REG_DPU_LUT_CFG 0x00004108
+#define DPU_LUT_CFG_RESERVED_0__MASK 0xffffff00
+#define DPU_LUT_CFG_RESERVED_0__SHIFT 8
+static inline uint32_t DPU_LUT_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_LUT_CFG_RESERVED_0__SHIFT) & DPU_LUT_CFG_RESERVED_0__MASK;
+}
+#define DPU_LUT_CFG_LUT_CAL_SEL__MASK 0x00000080
+#define DPU_LUT_CFG_LUT_CAL_SEL__SHIFT 7
+static inline uint32_t DPU_LUT_CFG_LUT_CAL_SEL(uint32_t val)
+{
+ return ((val) << DPU_LUT_CFG_LUT_CAL_SEL__SHIFT) & DPU_LUT_CFG_LUT_CAL_SEL__MASK;
+}
+#define DPU_LUT_CFG_LUT_HYBRID_PRIORITY__MASK 0x00000040
+#define DPU_LUT_CFG_LUT_HYBRID_PRIORITY__SHIFT 6
+static inline uint32_t DPU_LUT_CFG_LUT_HYBRID_PRIORITY(uint32_t val)
+{
+ return ((val) << DPU_LUT_CFG_LUT_HYBRID_PRIORITY__SHIFT) & DPU_LUT_CFG_LUT_HYBRID_PRIORITY__MASK;
+}
+#define DPU_LUT_CFG_LUT_OFLOW_PRIORITY__MASK 0x00000020
+#define DPU_LUT_CFG_LUT_OFLOW_PRIORITY__SHIFT 5
+static inline uint32_t DPU_LUT_CFG_LUT_OFLOW_PRIORITY(uint32_t val)
+{
+ return ((val) << DPU_LUT_CFG_LUT_OFLOW_PRIORITY__SHIFT) & DPU_LUT_CFG_LUT_OFLOW_PRIORITY__MASK;
+}
+#define DPU_LUT_CFG_LUT_UFLOW_PRIORITY__MASK 0x00000010
+#define DPU_LUT_CFG_LUT_UFLOW_PRIORITY__SHIFT 4
+static inline uint32_t DPU_LUT_CFG_LUT_UFLOW_PRIORITY(uint32_t val)
+{
+ return ((val) << DPU_LUT_CFG_LUT_UFLOW_PRIORITY__SHIFT) & DPU_LUT_CFG_LUT_UFLOW_PRIORITY__MASK;
+}
+#define DPU_LUT_CFG_LUT_LO_LE_MUX__MASK 0x0000000c
+#define DPU_LUT_CFG_LUT_LO_LE_MUX__SHIFT 2
+static inline uint32_t DPU_LUT_CFG_LUT_LO_LE_MUX(uint32_t val)
+{
+ return ((val) << DPU_LUT_CFG_LUT_LO_LE_MUX__SHIFT) & DPU_LUT_CFG_LUT_LO_LE_MUX__MASK;
+}
+#define DPU_LUT_CFG_LUT_EXPAND_EN__MASK 0x00000002
+#define DPU_LUT_CFG_LUT_EXPAND_EN__SHIFT 1
+static inline uint32_t DPU_LUT_CFG_LUT_EXPAND_EN(uint32_t val)
+{
+ return ((val) << DPU_LUT_CFG_LUT_EXPAND_EN__SHIFT) & DPU_LUT_CFG_LUT_EXPAND_EN__MASK;
+}
+#define DPU_LUT_CFG_LUT_ROAD_SEL__MASK 0x00000001
+#define DPU_LUT_CFG_LUT_ROAD_SEL__SHIFT 0
+static inline uint32_t DPU_LUT_CFG_LUT_ROAD_SEL(uint32_t val)
+{
+ return ((val) << DPU_LUT_CFG_LUT_ROAD_SEL__SHIFT) & DPU_LUT_CFG_LUT_ROAD_SEL__MASK;
+}
+
+#define REG_DPU_LUT_INFO 0x0000410c
+#define DPU_LUT_INFO_RESERVED_0__MASK 0xff000000
+#define DPU_LUT_INFO_RESERVED_0__SHIFT 24
+static inline uint32_t DPU_LUT_INFO_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_LUT_INFO_RESERVED_0__SHIFT) & DPU_LUT_INFO_RESERVED_0__MASK;
+}
+#define DPU_LUT_INFO_LUT_LO_INDEX_SELECT__MASK 0x00ff0000
+#define DPU_LUT_INFO_LUT_LO_INDEX_SELECT__SHIFT 16
+static inline uint32_t DPU_LUT_INFO_LUT_LO_INDEX_SELECT(uint32_t val)
+{
+ return ((val) << DPU_LUT_INFO_LUT_LO_INDEX_SELECT__SHIFT) & DPU_LUT_INFO_LUT_LO_INDEX_SELECT__MASK;
+}
+#define DPU_LUT_INFO_LUT_LE_INDEX_SELECT__MASK 0x0000ff00
+#define DPU_LUT_INFO_LUT_LE_INDEX_SELECT__SHIFT 8
+static inline uint32_t DPU_LUT_INFO_LUT_LE_INDEX_SELECT(uint32_t val)
+{
+ return ((val) << DPU_LUT_INFO_LUT_LE_INDEX_SELECT__SHIFT) & DPU_LUT_INFO_LUT_LE_INDEX_SELECT__MASK;
+}
+#define DPU_LUT_INFO_RESERVED_1__MASK 0x000000ff
+#define DPU_LUT_INFO_RESERVED_1__SHIFT 0
+static inline uint32_t DPU_LUT_INFO_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_LUT_INFO_RESERVED_1__SHIFT) & DPU_LUT_INFO_RESERVED_1__MASK;
+}
+
+#define REG_DPU_LUT_LE_START 0x00004110
+#define DPU_LUT_LE_START_LUT_LE_START__MASK 0xffffffff
+#define DPU_LUT_LE_START_LUT_LE_START__SHIFT 0
+static inline uint32_t DPU_LUT_LE_START_LUT_LE_START(uint32_t val)
+{
+ return ((val) << DPU_LUT_LE_START_LUT_LE_START__SHIFT) & DPU_LUT_LE_START_LUT_LE_START__MASK;
+}
+
+#define REG_DPU_LUT_LE_END 0x00004114
+#define DPU_LUT_LE_END_LUT_LE_END__MASK 0xffffffff
+#define DPU_LUT_LE_END_LUT_LE_END__SHIFT 0
+static inline uint32_t DPU_LUT_LE_END_LUT_LE_END(uint32_t val)
+{
+ return ((val) << DPU_LUT_LE_END_LUT_LE_END__SHIFT) & DPU_LUT_LE_END_LUT_LE_END__MASK;
+}
+
+#define REG_DPU_LUT_LO_START 0x00004118
+#define DPU_LUT_LO_START_LUT_LO_START__MASK 0xffffffff
+#define DPU_LUT_LO_START_LUT_LO_START__SHIFT 0
+static inline uint32_t DPU_LUT_LO_START_LUT_LO_START(uint32_t val)
+{
+ return ((val) << DPU_LUT_LO_START_LUT_LO_START__SHIFT) & DPU_LUT_LO_START_LUT_LO_START__MASK;
+}
+
+#define REG_DPU_LUT_LO_END 0x0000411c
+#define DPU_LUT_LO_END_LUT_LO_END__MASK 0xffffffff
+#define DPU_LUT_LO_END_LUT_LO_END__SHIFT 0
+static inline uint32_t DPU_LUT_LO_END_LUT_LO_END(uint32_t val)
+{
+ return ((val) << DPU_LUT_LO_END_LUT_LO_END__SHIFT) & DPU_LUT_LO_END_LUT_LO_END__MASK;
+}
+
+#define REG_DPU_LUT_LE_SLOPE_SCALE 0x00004120
+#define DPU_LUT_LE_SLOPE_SCALE_LUT_LE_SLOPE_OFLOW_SCALE__MASK 0xffff0000
+#define DPU_LUT_LE_SLOPE_SCALE_LUT_LE_SLOPE_OFLOW_SCALE__SHIFT 16
+static inline uint32_t DPU_LUT_LE_SLOPE_SCALE_LUT_LE_SLOPE_OFLOW_SCALE(uint32_t val)
+{
+ return ((val) << DPU_LUT_LE_SLOPE_SCALE_LUT_LE_SLOPE_OFLOW_SCALE__SHIFT) & DPU_LUT_LE_SLOPE_SCALE_LUT_LE_SLOPE_OFLOW_SCALE__MASK;
+}
+#define DPU_LUT_LE_SLOPE_SCALE_LUT_LE_SLOPE_UFLOW_SCALE__MASK 0x0000ffff
+#define DPU_LUT_LE_SLOPE_SCALE_LUT_LE_SLOPE_UFLOW_SCALE__SHIFT 0
+static inline uint32_t DPU_LUT_LE_SLOPE_SCALE_LUT_LE_SLOPE_UFLOW_SCALE(uint32_t val)
+{
+ return ((val) << DPU_LUT_LE_SLOPE_SCALE_LUT_LE_SLOPE_UFLOW_SCALE__SHIFT) & DPU_LUT_LE_SLOPE_SCALE_LUT_LE_SLOPE_UFLOW_SCALE__MASK;
+}
+
+#define REG_DPU_LUT_LE_SLOPE_SHIFT 0x00004124
+#define DPU_LUT_LE_SLOPE_SHIFT_RESERVED_0__MASK 0xfffffc00
+#define DPU_LUT_LE_SLOPE_SHIFT_RESERVED_0__SHIFT 10
+static inline uint32_t DPU_LUT_LE_SLOPE_SHIFT_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_LUT_LE_SLOPE_SHIFT_RESERVED_0__SHIFT) & DPU_LUT_LE_SLOPE_SHIFT_RESERVED_0__MASK;
+}
+#define DPU_LUT_LE_SLOPE_SHIFT_LUT_LE_SLOPE_OFLOW_SHIFT__MASK 0x000003e0
+#define DPU_LUT_LE_SLOPE_SHIFT_LUT_LE_SLOPE_OFLOW_SHIFT__SHIFT 5
+static inline uint32_t DPU_LUT_LE_SLOPE_SHIFT_LUT_LE_SLOPE_OFLOW_SHIFT(uint32_t val)
+{
+ return ((val) << DPU_LUT_LE_SLOPE_SHIFT_LUT_LE_SLOPE_OFLOW_SHIFT__SHIFT) & DPU_LUT_LE_SLOPE_SHIFT_LUT_LE_SLOPE_OFLOW_SHIFT__MASK;
+}
+#define DPU_LUT_LE_SLOPE_SHIFT_LUT_LE_SLOPE_UFLOW_SHIFT__MASK 0x0000001f
+#define DPU_LUT_LE_SLOPE_SHIFT_LUT_LE_SLOPE_UFLOW_SHIFT__SHIFT 0
+static inline uint32_t DPU_LUT_LE_SLOPE_SHIFT_LUT_LE_SLOPE_UFLOW_SHIFT(uint32_t val)
+{
+ return ((val) << DPU_LUT_LE_SLOPE_SHIFT_LUT_LE_SLOPE_UFLOW_SHIFT__SHIFT) & DPU_LUT_LE_SLOPE_SHIFT_LUT_LE_SLOPE_UFLOW_SHIFT__MASK;
+}
+
+#define REG_DPU_LUT_LO_SLOPE_SCALE 0x00004128
+#define DPU_LUT_LO_SLOPE_SCALE_LUT_LO_SLOPE_OFLOW_SCALE__MASK 0xffff0000
+#define DPU_LUT_LO_SLOPE_SCALE_LUT_LO_SLOPE_OFLOW_SCALE__SHIFT 16
+static inline uint32_t DPU_LUT_LO_SLOPE_SCALE_LUT_LO_SLOPE_OFLOW_SCALE(uint32_t val)
+{
+ return ((val) << DPU_LUT_LO_SLOPE_SCALE_LUT_LO_SLOPE_OFLOW_SCALE__SHIFT) & DPU_LUT_LO_SLOPE_SCALE_LUT_LO_SLOPE_OFLOW_SCALE__MASK;
+}
+#define DPU_LUT_LO_SLOPE_SCALE_LUT_LO_SLOPE_UFLOW_SCALE__MASK 0x0000ffff
+#define DPU_LUT_LO_SLOPE_SCALE_LUT_LO_SLOPE_UFLOW_SCALE__SHIFT 0
+static inline uint32_t DPU_LUT_LO_SLOPE_SCALE_LUT_LO_SLOPE_UFLOW_SCALE(uint32_t val)
+{
+ return ((val) << DPU_LUT_LO_SLOPE_SCALE_LUT_LO_SLOPE_UFLOW_SCALE__SHIFT) & DPU_LUT_LO_SLOPE_SCALE_LUT_LO_SLOPE_UFLOW_SCALE__MASK;
+}
+
+#define REG_DPU_LUT_LO_SLOPE_SHIFT 0x0000412c
+#define DPU_LUT_LO_SLOPE_SHIFT_RESERVED_0__MASK 0xfffffc00
+#define DPU_LUT_LO_SLOPE_SHIFT_RESERVED_0__SHIFT 10
+static inline uint32_t DPU_LUT_LO_SLOPE_SHIFT_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_LUT_LO_SLOPE_SHIFT_RESERVED_0__SHIFT) & DPU_LUT_LO_SLOPE_SHIFT_RESERVED_0__MASK;
+}
+#define DPU_LUT_LO_SLOPE_SHIFT_LUT_LO_SLOPE_OFLOW_SHIFT__MASK 0x000003e0
+#define DPU_LUT_LO_SLOPE_SHIFT_LUT_LO_SLOPE_OFLOW_SHIFT__SHIFT 5
+static inline uint32_t DPU_LUT_LO_SLOPE_SHIFT_LUT_LO_SLOPE_OFLOW_SHIFT(uint32_t val)
+{
+ return ((val) << DPU_LUT_LO_SLOPE_SHIFT_LUT_LO_SLOPE_OFLOW_SHIFT__SHIFT) & DPU_LUT_LO_SLOPE_SHIFT_LUT_LO_SLOPE_OFLOW_SHIFT__MASK;
+}
+#define DPU_LUT_LO_SLOPE_SHIFT_LUT_LO_SLOPE_UFLOW_SHIFT__MASK 0x0000001f
+#define DPU_LUT_LO_SLOPE_SHIFT_LUT_LO_SLOPE_UFLOW_SHIFT__SHIFT 0
+static inline uint32_t DPU_LUT_LO_SLOPE_SHIFT_LUT_LO_SLOPE_UFLOW_SHIFT(uint32_t val)
+{
+ return ((val) << DPU_LUT_LO_SLOPE_SHIFT_LUT_LO_SLOPE_UFLOW_SHIFT__SHIFT) & DPU_LUT_LO_SLOPE_SHIFT_LUT_LO_SLOPE_UFLOW_SHIFT__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_S_STATUS 0x00005000
+#define DPU_RDMA_RDMA_S_STATUS_RESERVED_0__MASK 0xfffc0000
+#define DPU_RDMA_RDMA_S_STATUS_RESERVED_0__SHIFT 18
+static inline uint32_t DPU_RDMA_RDMA_S_STATUS_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_STATUS_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_S_STATUS_RESERVED_0__MASK;
+}
+#define DPU_RDMA_RDMA_S_STATUS_STATUS_1__MASK 0x00030000
+#define DPU_RDMA_RDMA_S_STATUS_STATUS_1__SHIFT 16
+static inline uint32_t DPU_RDMA_RDMA_S_STATUS_STATUS_1(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_STATUS_STATUS_1__SHIFT) & DPU_RDMA_RDMA_S_STATUS_STATUS_1__MASK;
+}
+#define DPU_RDMA_RDMA_S_STATUS_RESERVED_1__MASK 0x0000fffc
+#define DPU_RDMA_RDMA_S_STATUS_RESERVED_1__SHIFT 2
+static inline uint32_t DPU_RDMA_RDMA_S_STATUS_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_STATUS_RESERVED_1__SHIFT) & DPU_RDMA_RDMA_S_STATUS_RESERVED_1__MASK;
+}
+#define DPU_RDMA_RDMA_S_STATUS_STATUS_0__MASK 0x00000003
+#define DPU_RDMA_RDMA_S_STATUS_STATUS_0__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_S_STATUS_STATUS_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_STATUS_STATUS_0__SHIFT) & DPU_RDMA_RDMA_S_STATUS_STATUS_0__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_S_POINTER 0x00005004
+#define DPU_RDMA_RDMA_S_POINTER_RESERVED_0__MASK 0xfffe0000
+#define DPU_RDMA_RDMA_S_POINTER_RESERVED_0__SHIFT 17
+static inline uint32_t DPU_RDMA_RDMA_S_POINTER_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_POINTER_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_S_POINTER_RESERVED_0__MASK;
+}
+#define DPU_RDMA_RDMA_S_POINTER_EXECUTER__MASK 0x00010000
+#define DPU_RDMA_RDMA_S_POINTER_EXECUTER__SHIFT 16
+static inline uint32_t DPU_RDMA_RDMA_S_POINTER_EXECUTER(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_POINTER_EXECUTER__SHIFT) & DPU_RDMA_RDMA_S_POINTER_EXECUTER__MASK;
+}
+#define DPU_RDMA_RDMA_S_POINTER_RESERVED_1__MASK 0x0000ffc0
+#define DPU_RDMA_RDMA_S_POINTER_RESERVED_1__SHIFT 6
+static inline uint32_t DPU_RDMA_RDMA_S_POINTER_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_POINTER_RESERVED_1__SHIFT) & DPU_RDMA_RDMA_S_POINTER_RESERVED_1__MASK;
+}
+#define DPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_CLEAR__MASK 0x00000020
+#define DPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_CLEAR__SHIFT 5
+static inline uint32_t DPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_CLEAR(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_CLEAR__SHIFT) & DPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_CLEAR__MASK;
+}
+#define DPU_RDMA_RDMA_S_POINTER_POINTER_PP_CLEAR__MASK 0x00000010
+#define DPU_RDMA_RDMA_S_POINTER_POINTER_PP_CLEAR__SHIFT 4
+static inline uint32_t DPU_RDMA_RDMA_S_POINTER_POINTER_PP_CLEAR(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_POINTER_POINTER_PP_CLEAR__SHIFT) & DPU_RDMA_RDMA_S_POINTER_POINTER_PP_CLEAR__MASK;
+}
+#define DPU_RDMA_RDMA_S_POINTER_POINTER_PP_MODE__MASK 0x00000008
+#define DPU_RDMA_RDMA_S_POINTER_POINTER_PP_MODE__SHIFT 3
+static inline uint32_t DPU_RDMA_RDMA_S_POINTER_POINTER_PP_MODE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_POINTER_POINTER_PP_MODE__SHIFT) & DPU_RDMA_RDMA_S_POINTER_POINTER_PP_MODE__MASK;
+}
+#define DPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_EN__MASK 0x00000004
+#define DPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_EN__SHIFT 2
+static inline uint32_t DPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_EN(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_EN__SHIFT) & DPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_EN__MASK;
+}
+#define DPU_RDMA_RDMA_S_POINTER_POINTER_PP_EN__MASK 0x00000002
+#define DPU_RDMA_RDMA_S_POINTER_POINTER_PP_EN__SHIFT 1
+static inline uint32_t DPU_RDMA_RDMA_S_POINTER_POINTER_PP_EN(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_POINTER_POINTER_PP_EN__SHIFT) & DPU_RDMA_RDMA_S_POINTER_POINTER_PP_EN__MASK;
+}
+#define DPU_RDMA_RDMA_S_POINTER_POINTER__MASK 0x00000001
+#define DPU_RDMA_RDMA_S_POINTER_POINTER__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_S_POINTER_POINTER(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_POINTER_POINTER__SHIFT) & DPU_RDMA_RDMA_S_POINTER_POINTER__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_OPERATION_ENABLE 0x00005008
+#define DPU_RDMA_RDMA_OPERATION_ENABLE_RESERVED_0__MASK 0xfffffffe
+#define DPU_RDMA_RDMA_OPERATION_ENABLE_RESERVED_0__SHIFT 1
+static inline uint32_t DPU_RDMA_RDMA_OPERATION_ENABLE_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_OPERATION_ENABLE_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_OPERATION_ENABLE_RESERVED_0__MASK;
+}
+#define DPU_RDMA_RDMA_OPERATION_ENABLE_OP_EN__MASK 0x00000001
+#define DPU_RDMA_RDMA_OPERATION_ENABLE_OP_EN__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_OPERATION_ENABLE_OP_EN(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_OPERATION_ENABLE_OP_EN__SHIFT) & DPU_RDMA_RDMA_OPERATION_ENABLE_OP_EN__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_DATA_CUBE_WIDTH 0x0000500c
+#define DPU_RDMA_RDMA_DATA_CUBE_WIDTH_RESERVED_0__MASK 0xffffe000
+#define DPU_RDMA_RDMA_DATA_CUBE_WIDTH_RESERVED_0__SHIFT 13
+static inline uint32_t DPU_RDMA_RDMA_DATA_CUBE_WIDTH_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_DATA_CUBE_WIDTH_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_DATA_CUBE_WIDTH_RESERVED_0__MASK;
+}
+#define DPU_RDMA_RDMA_DATA_CUBE_WIDTH_WIDTH__MASK 0x00001fff
+#define DPU_RDMA_RDMA_DATA_CUBE_WIDTH_WIDTH__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_DATA_CUBE_WIDTH_WIDTH(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_DATA_CUBE_WIDTH_WIDTH__SHIFT) & DPU_RDMA_RDMA_DATA_CUBE_WIDTH_WIDTH__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_DATA_CUBE_HEIGHT 0x00005010
+#define DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_RESERVED_0__MASK 0xe0000000
+#define DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_RESERVED_0__SHIFT 29
+static inline uint32_t DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_RESERVED_0__MASK;
+}
+#define DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_EW_LINE_NOTCH_ADDR__MASK 0x1fff0000
+#define DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_EW_LINE_NOTCH_ADDR__SHIFT 16
+static inline uint32_t DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_EW_LINE_NOTCH_ADDR(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_EW_LINE_NOTCH_ADDR__SHIFT) & DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_EW_LINE_NOTCH_ADDR__MASK;
+}
+#define DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_RESERVED_1__MASK 0x0000e000
+#define DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_RESERVED_1__SHIFT 13
+static inline uint32_t DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_RESERVED_1__SHIFT) & DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_RESERVED_1__MASK;
+}
+#define DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_HEIGHT__MASK 0x00001fff
+#define DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_HEIGHT__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_HEIGHT(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_HEIGHT__SHIFT) & DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_HEIGHT__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_DATA_CUBE_CHANNEL 0x00005014
+#define DPU_RDMA_RDMA_DATA_CUBE_CHANNEL_RESERVED_0__MASK 0xffffe000
+#define DPU_RDMA_RDMA_DATA_CUBE_CHANNEL_RESERVED_0__SHIFT 13
+static inline uint32_t DPU_RDMA_RDMA_DATA_CUBE_CHANNEL_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_DATA_CUBE_CHANNEL_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_DATA_CUBE_CHANNEL_RESERVED_0__MASK;
+}
+#define DPU_RDMA_RDMA_DATA_CUBE_CHANNEL_CHANNEL__MASK 0x00001fff
+#define DPU_RDMA_RDMA_DATA_CUBE_CHANNEL_CHANNEL__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_DATA_CUBE_CHANNEL_CHANNEL(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_DATA_CUBE_CHANNEL_CHANNEL__SHIFT) & DPU_RDMA_RDMA_DATA_CUBE_CHANNEL_CHANNEL__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_SRC_BASE_ADDR 0x00005018
+#define DPU_RDMA_RDMA_SRC_BASE_ADDR_SRC_BASE_ADDR__MASK 0xffffffff
+#define DPU_RDMA_RDMA_SRC_BASE_ADDR_SRC_BASE_ADDR__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_SRC_BASE_ADDR_SRC_BASE_ADDR(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_SRC_BASE_ADDR_SRC_BASE_ADDR__SHIFT) & DPU_RDMA_RDMA_SRC_BASE_ADDR_SRC_BASE_ADDR__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_BRDMA_CFG 0x0000501c
+#define DPU_RDMA_RDMA_BRDMA_CFG_RESERVED_0__MASK 0xffffffe0
+#define DPU_RDMA_RDMA_BRDMA_CFG_RESERVED_0__SHIFT 5
+static inline uint32_t DPU_RDMA_RDMA_BRDMA_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_BRDMA_CFG_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_BRDMA_CFG_RESERVED_0__MASK;
+}
+#define DPU_RDMA_RDMA_BRDMA_CFG_BRDMA_DATA_USE__MASK 0x0000001e
+#define DPU_RDMA_RDMA_BRDMA_CFG_BRDMA_DATA_USE__SHIFT 1
+static inline uint32_t DPU_RDMA_RDMA_BRDMA_CFG_BRDMA_DATA_USE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_BRDMA_CFG_BRDMA_DATA_USE__SHIFT) & DPU_RDMA_RDMA_BRDMA_CFG_BRDMA_DATA_USE__MASK;
+}
+#define DPU_RDMA_RDMA_BRDMA_CFG_RESERVED_1__MASK 0x00000001
+#define DPU_RDMA_RDMA_BRDMA_CFG_RESERVED_1__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_BRDMA_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_BRDMA_CFG_RESERVED_1__SHIFT) & DPU_RDMA_RDMA_BRDMA_CFG_RESERVED_1__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_BS_BASE_ADDR 0x00005020
+#define DPU_RDMA_RDMA_BS_BASE_ADDR_BS_BASE_ADDR__MASK 0xffffffff
+#define DPU_RDMA_RDMA_BS_BASE_ADDR_BS_BASE_ADDR__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_BS_BASE_ADDR_BS_BASE_ADDR(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_BS_BASE_ADDR_BS_BASE_ADDR__SHIFT) & DPU_RDMA_RDMA_BS_BASE_ADDR_BS_BASE_ADDR__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_NRDMA_CFG 0x00005028
+#define DPU_RDMA_RDMA_NRDMA_CFG_RESERVED_0__MASK 0xffffffe0
+#define DPU_RDMA_RDMA_NRDMA_CFG_RESERVED_0__SHIFT 5
+static inline uint32_t DPU_RDMA_RDMA_NRDMA_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_NRDMA_CFG_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_NRDMA_CFG_RESERVED_0__MASK;
+}
+#define DPU_RDMA_RDMA_NRDMA_CFG_NRDMA_DATA_USE__MASK 0x0000001e
+#define DPU_RDMA_RDMA_NRDMA_CFG_NRDMA_DATA_USE__SHIFT 1
+static inline uint32_t DPU_RDMA_RDMA_NRDMA_CFG_NRDMA_DATA_USE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_NRDMA_CFG_NRDMA_DATA_USE__SHIFT) & DPU_RDMA_RDMA_NRDMA_CFG_NRDMA_DATA_USE__MASK;
+}
+#define DPU_RDMA_RDMA_NRDMA_CFG_RESERVED_1__MASK 0x00000001
+#define DPU_RDMA_RDMA_NRDMA_CFG_RESERVED_1__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_NRDMA_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_NRDMA_CFG_RESERVED_1__SHIFT) & DPU_RDMA_RDMA_NRDMA_CFG_RESERVED_1__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_BN_BASE_ADDR 0x0000502c
+#define DPU_RDMA_RDMA_BN_BASE_ADDR_BN_BASE_ADDR__MASK 0xffffffff
+#define DPU_RDMA_RDMA_BN_BASE_ADDR_BN_BASE_ADDR__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_BN_BASE_ADDR_BN_BASE_ADDR(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_BN_BASE_ADDR_BN_BASE_ADDR__SHIFT) & DPU_RDMA_RDMA_BN_BASE_ADDR_BN_BASE_ADDR__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_ERDMA_CFG 0x00005034
+#define DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DATA_MODE__MASK 0xc0000000
+#define DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DATA_MODE__SHIFT 30
+static inline uint32_t DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DATA_MODE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DATA_MODE__SHIFT) & DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DATA_MODE__MASK;
+}
+#define DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_SURF_MODE__MASK 0x20000000
+#define DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_SURF_MODE__SHIFT 29
+static inline uint32_t DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_SURF_MODE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_SURF_MODE__SHIFT) & DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_SURF_MODE__MASK;
+}
+#define DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_NONALIGN__MASK 0x10000000
+#define DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_NONALIGN__SHIFT 28
+static inline uint32_t DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_NONALIGN(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_NONALIGN__SHIFT) & DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_NONALIGN__MASK;
+}
+#define DPU_RDMA_RDMA_ERDMA_CFG_RESERVED_0__MASK 0x0ffffff0
+#define DPU_RDMA_RDMA_ERDMA_CFG_RESERVED_0__SHIFT 4
+static inline uint32_t DPU_RDMA_RDMA_ERDMA_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_ERDMA_CFG_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_ERDMA_CFG_RESERVED_0__MASK;
+}
+#define DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DATA_SIZE__MASK 0x0000000c
+#define DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DATA_SIZE__SHIFT 2
+static inline uint32_t DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DATA_SIZE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DATA_SIZE__SHIFT) & DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DATA_SIZE__MASK;
+}
+#define DPU_RDMA_RDMA_ERDMA_CFG_OV4K_BYPASS__MASK 0x00000002
+#define DPU_RDMA_RDMA_ERDMA_CFG_OV4K_BYPASS__SHIFT 1
+static inline uint32_t DPU_RDMA_RDMA_ERDMA_CFG_OV4K_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_ERDMA_CFG_OV4K_BYPASS__SHIFT) & DPU_RDMA_RDMA_ERDMA_CFG_OV4K_BYPASS__MASK;
+}
+#define DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DISABLE__MASK 0x00000001
+#define DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DISABLE__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DISABLE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DISABLE__SHIFT) & DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DISABLE__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_EW_BASE_ADDR 0x00005038
+#define DPU_RDMA_RDMA_EW_BASE_ADDR_EW_BASE_ADDR__MASK 0xffffffff
+#define DPU_RDMA_RDMA_EW_BASE_ADDR_EW_BASE_ADDR__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_EW_BASE_ADDR_EW_BASE_ADDR(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_EW_BASE_ADDR_EW_BASE_ADDR__SHIFT) & DPU_RDMA_RDMA_EW_BASE_ADDR_EW_BASE_ADDR__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_EW_SURF_STRIDE 0x00005040
+#define DPU_RDMA_RDMA_EW_SURF_STRIDE_EW_SURF_STRIDE__MASK 0xfffffff0
+#define DPU_RDMA_RDMA_EW_SURF_STRIDE_EW_SURF_STRIDE__SHIFT 4
+static inline uint32_t DPU_RDMA_RDMA_EW_SURF_STRIDE_EW_SURF_STRIDE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_EW_SURF_STRIDE_EW_SURF_STRIDE__SHIFT) & DPU_RDMA_RDMA_EW_SURF_STRIDE_EW_SURF_STRIDE__MASK;
+}
+#define DPU_RDMA_RDMA_EW_SURF_STRIDE_RESERVED_0__MASK 0x0000000f
+#define DPU_RDMA_RDMA_EW_SURF_STRIDE_RESERVED_0__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_EW_SURF_STRIDE_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_EW_SURF_STRIDE_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_EW_SURF_STRIDE_RESERVED_0__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_FEATURE_MODE_CFG 0x00005044
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_RESERVED_0__MASK 0xfffc0000
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_RESERVED_0__SHIFT 18
+static inline uint32_t DPU_RDMA_RDMA_FEATURE_MODE_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_FEATURE_MODE_CFG_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_FEATURE_MODE_CFG_RESERVED_0__MASK;
+}
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_IN_PRECISION__MASK 0x00038000
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_IN_PRECISION__SHIFT 15
+static inline uint32_t DPU_RDMA_RDMA_FEATURE_MODE_CFG_IN_PRECISION(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_FEATURE_MODE_CFG_IN_PRECISION__SHIFT) & DPU_RDMA_RDMA_FEATURE_MODE_CFG_IN_PRECISION__MASK;
+}
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_BURST_LEN__MASK 0x00007800
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_BURST_LEN__SHIFT 11
+static inline uint32_t DPU_RDMA_RDMA_FEATURE_MODE_CFG_BURST_LEN(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_FEATURE_MODE_CFG_BURST_LEN__SHIFT) & DPU_RDMA_RDMA_FEATURE_MODE_CFG_BURST_LEN__MASK;
+}
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_COMB_USE__MASK 0x00000700
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_COMB_USE__SHIFT 8
+static inline uint32_t DPU_RDMA_RDMA_FEATURE_MODE_CFG_COMB_USE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_FEATURE_MODE_CFG_COMB_USE__SHIFT) & DPU_RDMA_RDMA_FEATURE_MODE_CFG_COMB_USE__MASK;
+}
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_PROC_PRECISION__MASK 0x000000e0
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_PROC_PRECISION__SHIFT 5
+static inline uint32_t DPU_RDMA_RDMA_FEATURE_MODE_CFG_PROC_PRECISION(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_FEATURE_MODE_CFG_PROC_PRECISION__SHIFT) & DPU_RDMA_RDMA_FEATURE_MODE_CFG_PROC_PRECISION__MASK;
+}
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_MRDMA_DISABLE__MASK 0x00000010
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_MRDMA_DISABLE__SHIFT 4
+static inline uint32_t DPU_RDMA_RDMA_FEATURE_MODE_CFG_MRDMA_DISABLE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_FEATURE_MODE_CFG_MRDMA_DISABLE__SHIFT) & DPU_RDMA_RDMA_FEATURE_MODE_CFG_MRDMA_DISABLE__MASK;
+}
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_MRDMA_FP16TOFP32_EN__MASK 0x00000008
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_MRDMA_FP16TOFP32_EN__SHIFT 3
+static inline uint32_t DPU_RDMA_RDMA_FEATURE_MODE_CFG_MRDMA_FP16TOFP32_EN(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_FEATURE_MODE_CFG_MRDMA_FP16TOFP32_EN__SHIFT) & DPU_RDMA_RDMA_FEATURE_MODE_CFG_MRDMA_FP16TOFP32_EN__MASK;
+}
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_CONV_MODE__MASK 0x00000006
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_CONV_MODE__SHIFT 1
+static inline uint32_t DPU_RDMA_RDMA_FEATURE_MODE_CFG_CONV_MODE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_FEATURE_MODE_CFG_CONV_MODE__SHIFT) & DPU_RDMA_RDMA_FEATURE_MODE_CFG_CONV_MODE__MASK;
+}
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_FLYING_MODE__MASK 0x00000001
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_FLYING_MODE__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_FEATURE_MODE_CFG_FLYING_MODE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_FEATURE_MODE_CFG_FLYING_MODE__SHIFT) & DPU_RDMA_RDMA_FEATURE_MODE_CFG_FLYING_MODE__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_SRC_DMA_CFG 0x00005048
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_LINE_NOTCH_ADDR__MASK 0xfff80000
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_LINE_NOTCH_ADDR__SHIFT 19
+static inline uint32_t DPU_RDMA_RDMA_SRC_DMA_CFG_LINE_NOTCH_ADDR(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_SRC_DMA_CFG_LINE_NOTCH_ADDR__SHIFT) & DPU_RDMA_RDMA_SRC_DMA_CFG_LINE_NOTCH_ADDR__MASK;
+}
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_RESERVED_0__MASK 0x0007c000
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_RESERVED_0__SHIFT 14
+static inline uint32_t DPU_RDMA_RDMA_SRC_DMA_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_SRC_DMA_CFG_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_SRC_DMA_CFG_RESERVED_0__MASK;
+}
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_POOLING_METHOD__MASK 0x00002000
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_POOLING_METHOD__SHIFT 13
+static inline uint32_t DPU_RDMA_RDMA_SRC_DMA_CFG_POOLING_METHOD(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_SRC_DMA_CFG_POOLING_METHOD__SHIFT) & DPU_RDMA_RDMA_SRC_DMA_CFG_POOLING_METHOD__MASK;
+}
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_UNPOOLING_EN__MASK 0x00001000
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_UNPOOLING_EN__SHIFT 12
+static inline uint32_t DPU_RDMA_RDMA_SRC_DMA_CFG_UNPOOLING_EN(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_SRC_DMA_CFG_UNPOOLING_EN__SHIFT) & DPU_RDMA_RDMA_SRC_DMA_CFG_UNPOOLING_EN__MASK;
+}
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_STRIDE_HEIGHT__MASK 0x00000e00
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_STRIDE_HEIGHT__SHIFT 9
+static inline uint32_t DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_STRIDE_HEIGHT(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_STRIDE_HEIGHT__SHIFT) & DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_STRIDE_HEIGHT__MASK;
+}
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_STRIDE_WIDTH__MASK 0x000001c0
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_STRIDE_WIDTH__SHIFT 6
+static inline uint32_t DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_STRIDE_WIDTH(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_STRIDE_WIDTH__SHIFT) & DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_STRIDE_WIDTH__MASK;
+}
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_HEIGHT__MASK 0x00000038
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_HEIGHT__SHIFT 3
+static inline uint32_t DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_HEIGHT(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_HEIGHT__SHIFT) & DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_HEIGHT__MASK;
+}
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_WIDTH__MASK 0x00000007
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_WIDTH__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_WIDTH(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_WIDTH__SHIFT) & DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_WIDTH__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_SURF_NOTCH 0x0000504c
+#define DPU_RDMA_RDMA_SURF_NOTCH_SURF_NOTCH_ADDR__MASK 0xfffffff0
+#define DPU_RDMA_RDMA_SURF_NOTCH_SURF_NOTCH_ADDR__SHIFT 4
+static inline uint32_t DPU_RDMA_RDMA_SURF_NOTCH_SURF_NOTCH_ADDR(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_SURF_NOTCH_SURF_NOTCH_ADDR__SHIFT) & DPU_RDMA_RDMA_SURF_NOTCH_SURF_NOTCH_ADDR__MASK;
+}
+#define DPU_RDMA_RDMA_SURF_NOTCH_RESERVED_0__MASK 0x0000000f
+#define DPU_RDMA_RDMA_SURF_NOTCH_RESERVED_0__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_SURF_NOTCH_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_SURF_NOTCH_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_SURF_NOTCH_RESERVED_0__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_PAD_CFG 0x00005064
+#define DPU_RDMA_RDMA_PAD_CFG_PAD_VALUE__MASK 0xffff0000
+#define DPU_RDMA_RDMA_PAD_CFG_PAD_VALUE__SHIFT 16
+static inline uint32_t DPU_RDMA_RDMA_PAD_CFG_PAD_VALUE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_PAD_CFG_PAD_VALUE__SHIFT) & DPU_RDMA_RDMA_PAD_CFG_PAD_VALUE__MASK;
+}
+#define DPU_RDMA_RDMA_PAD_CFG_RESERVED_0__MASK 0x0000ff80
+#define DPU_RDMA_RDMA_PAD_CFG_RESERVED_0__SHIFT 7
+static inline uint32_t DPU_RDMA_RDMA_PAD_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_PAD_CFG_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_PAD_CFG_RESERVED_0__MASK;
+}
+#define DPU_RDMA_RDMA_PAD_CFG_PAD_TOP__MASK 0x00000070
+#define DPU_RDMA_RDMA_PAD_CFG_PAD_TOP__SHIFT 4
+static inline uint32_t DPU_RDMA_RDMA_PAD_CFG_PAD_TOP(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_PAD_CFG_PAD_TOP__SHIFT) & DPU_RDMA_RDMA_PAD_CFG_PAD_TOP__MASK;
+}
+#define DPU_RDMA_RDMA_PAD_CFG_RESERVED_1__MASK 0x00000008
+#define DPU_RDMA_RDMA_PAD_CFG_RESERVED_1__SHIFT 3
+static inline uint32_t DPU_RDMA_RDMA_PAD_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_PAD_CFG_RESERVED_1__SHIFT) & DPU_RDMA_RDMA_PAD_CFG_RESERVED_1__MASK;
+}
+#define DPU_RDMA_RDMA_PAD_CFG_PAD_LEFT__MASK 0x00000007
+#define DPU_RDMA_RDMA_PAD_CFG_PAD_LEFT__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_PAD_CFG_PAD_LEFT(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_PAD_CFG_PAD_LEFT__SHIFT) & DPU_RDMA_RDMA_PAD_CFG_PAD_LEFT__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_WEIGHT 0x00005068
+#define DPU_RDMA_RDMA_WEIGHT_E_WEIGHT__MASK 0xff000000
+#define DPU_RDMA_RDMA_WEIGHT_E_WEIGHT__SHIFT 24
+static inline uint32_t DPU_RDMA_RDMA_WEIGHT_E_WEIGHT(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_WEIGHT_E_WEIGHT__SHIFT) & DPU_RDMA_RDMA_WEIGHT_E_WEIGHT__MASK;
+}
+#define DPU_RDMA_RDMA_WEIGHT_N_WEIGHT__MASK 0x00ff0000
+#define DPU_RDMA_RDMA_WEIGHT_N_WEIGHT__SHIFT 16
+static inline uint32_t DPU_RDMA_RDMA_WEIGHT_N_WEIGHT(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_WEIGHT_N_WEIGHT__SHIFT) & DPU_RDMA_RDMA_WEIGHT_N_WEIGHT__MASK;
+}
+#define DPU_RDMA_RDMA_WEIGHT_B_WEIGHT__MASK 0x0000ff00
+#define DPU_RDMA_RDMA_WEIGHT_B_WEIGHT__SHIFT 8
+static inline uint32_t DPU_RDMA_RDMA_WEIGHT_B_WEIGHT(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_WEIGHT_B_WEIGHT__SHIFT) & DPU_RDMA_RDMA_WEIGHT_B_WEIGHT__MASK;
+}
+#define DPU_RDMA_RDMA_WEIGHT_M_WEIGHT__MASK 0x000000ff
+#define DPU_RDMA_RDMA_WEIGHT_M_WEIGHT__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_WEIGHT_M_WEIGHT(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_WEIGHT_M_WEIGHT__SHIFT) & DPU_RDMA_RDMA_WEIGHT_M_WEIGHT__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_EW_SURF_NOTCH 0x0000506c
+#define DPU_RDMA_RDMA_EW_SURF_NOTCH_EW_SURF_NOTCH__MASK 0xfffffff0
+#define DPU_RDMA_RDMA_EW_SURF_NOTCH_EW_SURF_NOTCH__SHIFT 4
+static inline uint32_t DPU_RDMA_RDMA_EW_SURF_NOTCH_EW_SURF_NOTCH(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_EW_SURF_NOTCH_EW_SURF_NOTCH__SHIFT) & DPU_RDMA_RDMA_EW_SURF_NOTCH_EW_SURF_NOTCH__MASK;
+}
+#define DPU_RDMA_RDMA_EW_SURF_NOTCH_RESERVED_0__MASK 0x0000000f
+#define DPU_RDMA_RDMA_EW_SURF_NOTCH_RESERVED_0__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_EW_SURF_NOTCH_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_EW_SURF_NOTCH_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_EW_SURF_NOTCH_RESERVED_0__MASK;
+}
+
+#define REG_PPU_S_STATUS 0x00006000
+#define PPU_S_STATUS_RESERVED_0__MASK 0xfffc0000
+#define PPU_S_STATUS_RESERVED_0__SHIFT 18
+static inline uint32_t PPU_S_STATUS_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_S_STATUS_RESERVED_0__SHIFT) & PPU_S_STATUS_RESERVED_0__MASK;
+}
+#define PPU_S_STATUS_STATUS_1__MASK 0x00030000
+#define PPU_S_STATUS_STATUS_1__SHIFT 16
+static inline uint32_t PPU_S_STATUS_STATUS_1(uint32_t val)
+{
+ return ((val) << PPU_S_STATUS_STATUS_1__SHIFT) & PPU_S_STATUS_STATUS_1__MASK;
+}
+#define PPU_S_STATUS_RESERVED_1__MASK 0x0000fffc
+#define PPU_S_STATUS_RESERVED_1__SHIFT 2
+static inline uint32_t PPU_S_STATUS_RESERVED_1(uint32_t val)
+{
+ return ((val) << PPU_S_STATUS_RESERVED_1__SHIFT) & PPU_S_STATUS_RESERVED_1__MASK;
+}
+#define PPU_S_STATUS_STATUS_0__MASK 0x00000003
+#define PPU_S_STATUS_STATUS_0__SHIFT 0
+static inline uint32_t PPU_S_STATUS_STATUS_0(uint32_t val)
+{
+ return ((val) << PPU_S_STATUS_STATUS_0__SHIFT) & PPU_S_STATUS_STATUS_0__MASK;
+}
+
+#define REG_PPU_S_POINTER 0x00006004
+#define PPU_S_POINTER_RESERVED_0__MASK 0xfffe0000
+#define PPU_S_POINTER_RESERVED_0__SHIFT 17
+static inline uint32_t PPU_S_POINTER_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_S_POINTER_RESERVED_0__SHIFT) & PPU_S_POINTER_RESERVED_0__MASK;
+}
+#define PPU_S_POINTER_EXECUTER__MASK 0x00010000
+#define PPU_S_POINTER_EXECUTER__SHIFT 16
+static inline uint32_t PPU_S_POINTER_EXECUTER(uint32_t val)
+{
+ return ((val) << PPU_S_POINTER_EXECUTER__SHIFT) & PPU_S_POINTER_EXECUTER__MASK;
+}
+#define PPU_S_POINTER_RESERVED_1__MASK 0x0000ffc0
+#define PPU_S_POINTER_RESERVED_1__SHIFT 6
+static inline uint32_t PPU_S_POINTER_RESERVED_1(uint32_t val)
+{
+ return ((val) << PPU_S_POINTER_RESERVED_1__SHIFT) & PPU_S_POINTER_RESERVED_1__MASK;
+}
+#define PPU_S_POINTER_EXECUTER_PP_CLEAR__MASK 0x00000020
+#define PPU_S_POINTER_EXECUTER_PP_CLEAR__SHIFT 5
+static inline uint32_t PPU_S_POINTER_EXECUTER_PP_CLEAR(uint32_t val)
+{
+ return ((val) << PPU_S_POINTER_EXECUTER_PP_CLEAR__SHIFT) & PPU_S_POINTER_EXECUTER_PP_CLEAR__MASK;
+}
+#define PPU_S_POINTER_POINTER_PP_CLEAR__MASK 0x00000010
+#define PPU_S_POINTER_POINTER_PP_CLEAR__SHIFT 4
+static inline uint32_t PPU_S_POINTER_POINTER_PP_CLEAR(uint32_t val)
+{
+ return ((val) << PPU_S_POINTER_POINTER_PP_CLEAR__SHIFT) & PPU_S_POINTER_POINTER_PP_CLEAR__MASK;
+}
+#define PPU_S_POINTER_POINTER_PP_MODE__MASK 0x00000008
+#define PPU_S_POINTER_POINTER_PP_MODE__SHIFT 3
+static inline uint32_t PPU_S_POINTER_POINTER_PP_MODE(uint32_t val)
+{
+ return ((val) << PPU_S_POINTER_POINTER_PP_MODE__SHIFT) & PPU_S_POINTER_POINTER_PP_MODE__MASK;
+}
+#define PPU_S_POINTER_EXECUTER_PP_EN__MASK 0x00000004
+#define PPU_S_POINTER_EXECUTER_PP_EN__SHIFT 2
+static inline uint32_t PPU_S_POINTER_EXECUTER_PP_EN(uint32_t val)
+{
+ return ((val) << PPU_S_POINTER_EXECUTER_PP_EN__SHIFT) & PPU_S_POINTER_EXECUTER_PP_EN__MASK;
+}
+#define PPU_S_POINTER_POINTER_PP_EN__MASK 0x00000002
+#define PPU_S_POINTER_POINTER_PP_EN__SHIFT 1
+static inline uint32_t PPU_S_POINTER_POINTER_PP_EN(uint32_t val)
+{
+ return ((val) << PPU_S_POINTER_POINTER_PP_EN__SHIFT) & PPU_S_POINTER_POINTER_PP_EN__MASK;
+}
+#define PPU_S_POINTER_POINTER__MASK 0x00000001
+#define PPU_S_POINTER_POINTER__SHIFT 0
+static inline uint32_t PPU_S_POINTER_POINTER(uint32_t val)
+{
+ return ((val) << PPU_S_POINTER_POINTER__SHIFT) & PPU_S_POINTER_POINTER__MASK;
+}
+
+#define REG_PPU_OPERATION_ENABLE 0x00006008
+#define PPU_OPERATION_ENABLE_RESERVED_0__MASK 0xfffffffe
+#define PPU_OPERATION_ENABLE_RESERVED_0__SHIFT 1
+static inline uint32_t PPU_OPERATION_ENABLE_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_OPERATION_ENABLE_RESERVED_0__SHIFT) & PPU_OPERATION_ENABLE_RESERVED_0__MASK;
+}
+#define PPU_OPERATION_ENABLE_OP_EN__MASK 0x00000001
+#define PPU_OPERATION_ENABLE_OP_EN__SHIFT 0
+static inline uint32_t PPU_OPERATION_ENABLE_OP_EN(uint32_t val)
+{
+ return ((val) << PPU_OPERATION_ENABLE_OP_EN__SHIFT) & PPU_OPERATION_ENABLE_OP_EN__MASK;
+}
+
+#define REG_PPU_DATA_CUBE_IN_WIDTH 0x0000600c
+#define PPU_DATA_CUBE_IN_WIDTH_RESERVED_0__MASK 0xffffe000
+#define PPU_DATA_CUBE_IN_WIDTH_RESERVED_0__SHIFT 13
+static inline uint32_t PPU_DATA_CUBE_IN_WIDTH_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_DATA_CUBE_IN_WIDTH_RESERVED_0__SHIFT) & PPU_DATA_CUBE_IN_WIDTH_RESERVED_0__MASK;
+}
+#define PPU_DATA_CUBE_IN_WIDTH_CUBE_IN_WIDTH__MASK 0x00001fff
+#define PPU_DATA_CUBE_IN_WIDTH_CUBE_IN_WIDTH__SHIFT 0
+static inline uint32_t PPU_DATA_CUBE_IN_WIDTH_CUBE_IN_WIDTH(uint32_t val)
+{
+ return ((val) << PPU_DATA_CUBE_IN_WIDTH_CUBE_IN_WIDTH__SHIFT) & PPU_DATA_CUBE_IN_WIDTH_CUBE_IN_WIDTH__MASK;
+}
+
+#define REG_PPU_DATA_CUBE_IN_HEIGHT 0x00006010
+#define PPU_DATA_CUBE_IN_HEIGHT_RESERVED_0__MASK 0xffffe000
+#define PPU_DATA_CUBE_IN_HEIGHT_RESERVED_0__SHIFT 13
+static inline uint32_t PPU_DATA_CUBE_IN_HEIGHT_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_DATA_CUBE_IN_HEIGHT_RESERVED_0__SHIFT) & PPU_DATA_CUBE_IN_HEIGHT_RESERVED_0__MASK;
+}
+#define PPU_DATA_CUBE_IN_HEIGHT_CUBE_IN_HEIGHT__MASK 0x00001fff
+#define PPU_DATA_CUBE_IN_HEIGHT_CUBE_IN_HEIGHT__SHIFT 0
+static inline uint32_t PPU_DATA_CUBE_IN_HEIGHT_CUBE_IN_HEIGHT(uint32_t val)
+{
+ return ((val) << PPU_DATA_CUBE_IN_HEIGHT_CUBE_IN_HEIGHT__SHIFT) & PPU_DATA_CUBE_IN_HEIGHT_CUBE_IN_HEIGHT__MASK;
+}
+
+#define REG_PPU_DATA_CUBE_IN_CHANNEL 0x00006014
+#define PPU_DATA_CUBE_IN_CHANNEL_RESERVED_0__MASK 0xffffe000
+#define PPU_DATA_CUBE_IN_CHANNEL_RESERVED_0__SHIFT 13
+static inline uint32_t PPU_DATA_CUBE_IN_CHANNEL_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_DATA_CUBE_IN_CHANNEL_RESERVED_0__SHIFT) & PPU_DATA_CUBE_IN_CHANNEL_RESERVED_0__MASK;
+}
+#define PPU_DATA_CUBE_IN_CHANNEL_CUBE_IN_CHANNEL__MASK 0x00001fff
+#define PPU_DATA_CUBE_IN_CHANNEL_CUBE_IN_CHANNEL__SHIFT 0
+static inline uint32_t PPU_DATA_CUBE_IN_CHANNEL_CUBE_IN_CHANNEL(uint32_t val)
+{
+ return ((val) << PPU_DATA_CUBE_IN_CHANNEL_CUBE_IN_CHANNEL__SHIFT) & PPU_DATA_CUBE_IN_CHANNEL_CUBE_IN_CHANNEL__MASK;
+}
+
+#define REG_PPU_DATA_CUBE_OUT_WIDTH 0x00006018
+#define PPU_DATA_CUBE_OUT_WIDTH_RESERVED_0__MASK 0xffffe000
+#define PPU_DATA_CUBE_OUT_WIDTH_RESERVED_0__SHIFT 13
+static inline uint32_t PPU_DATA_CUBE_OUT_WIDTH_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_DATA_CUBE_OUT_WIDTH_RESERVED_0__SHIFT) & PPU_DATA_CUBE_OUT_WIDTH_RESERVED_0__MASK;
+}
+#define PPU_DATA_CUBE_OUT_WIDTH_CUBE_OUT_WIDTH__MASK 0x00001fff
+#define PPU_DATA_CUBE_OUT_WIDTH_CUBE_OUT_WIDTH__SHIFT 0
+static inline uint32_t PPU_DATA_CUBE_OUT_WIDTH_CUBE_OUT_WIDTH(uint32_t val)
+{
+ return ((val) << PPU_DATA_CUBE_OUT_WIDTH_CUBE_OUT_WIDTH__SHIFT) & PPU_DATA_CUBE_OUT_WIDTH_CUBE_OUT_WIDTH__MASK;
+}
+
+#define REG_PPU_DATA_CUBE_OUT_HEIGHT 0x0000601c
+#define PPU_DATA_CUBE_OUT_HEIGHT_RESERVED_0__MASK 0xffffe000
+#define PPU_DATA_CUBE_OUT_HEIGHT_RESERVED_0__SHIFT 13
+static inline uint32_t PPU_DATA_CUBE_OUT_HEIGHT_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_DATA_CUBE_OUT_HEIGHT_RESERVED_0__SHIFT) & PPU_DATA_CUBE_OUT_HEIGHT_RESERVED_0__MASK;
+}
+#define PPU_DATA_CUBE_OUT_HEIGHT_CUBE_OUT_HEIGHT__MASK 0x00001fff
+#define PPU_DATA_CUBE_OUT_HEIGHT_CUBE_OUT_HEIGHT__SHIFT 0
+static inline uint32_t PPU_DATA_CUBE_OUT_HEIGHT_CUBE_OUT_HEIGHT(uint32_t val)
+{
+ return ((val) << PPU_DATA_CUBE_OUT_HEIGHT_CUBE_OUT_HEIGHT__SHIFT) & PPU_DATA_CUBE_OUT_HEIGHT_CUBE_OUT_HEIGHT__MASK;
+}
+
+#define REG_PPU_DATA_CUBE_OUT_CHANNEL 0x00006020
+#define PPU_DATA_CUBE_OUT_CHANNEL_RESERVED_0__MASK 0xffffe000
+#define PPU_DATA_CUBE_OUT_CHANNEL_RESERVED_0__SHIFT 13
+static inline uint32_t PPU_DATA_CUBE_OUT_CHANNEL_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_DATA_CUBE_OUT_CHANNEL_RESERVED_0__SHIFT) & PPU_DATA_CUBE_OUT_CHANNEL_RESERVED_0__MASK;
+}
+#define PPU_DATA_CUBE_OUT_CHANNEL_CUBE_OUT_CHANNEL__MASK 0x00001fff
+#define PPU_DATA_CUBE_OUT_CHANNEL_CUBE_OUT_CHANNEL__SHIFT 0
+static inline uint32_t PPU_DATA_CUBE_OUT_CHANNEL_CUBE_OUT_CHANNEL(uint32_t val)
+{
+ return ((val) << PPU_DATA_CUBE_OUT_CHANNEL_CUBE_OUT_CHANNEL__SHIFT) & PPU_DATA_CUBE_OUT_CHANNEL_CUBE_OUT_CHANNEL__MASK;
+}
+
+#define REG_PPU_OPERATION_MODE_CFG 0x00006024
+#define PPU_OPERATION_MODE_CFG_RESERVED_0__MASK 0x80000000
+#define PPU_OPERATION_MODE_CFG_RESERVED_0__SHIFT 31
+static inline uint32_t PPU_OPERATION_MODE_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_OPERATION_MODE_CFG_RESERVED_0__SHIFT) & PPU_OPERATION_MODE_CFG_RESERVED_0__MASK;
+}
+#define PPU_OPERATION_MODE_CFG_INDEX_EN__MASK 0x40000000
+#define PPU_OPERATION_MODE_CFG_INDEX_EN__SHIFT 30
+static inline uint32_t PPU_OPERATION_MODE_CFG_INDEX_EN(uint32_t val)
+{
+ return ((val) << PPU_OPERATION_MODE_CFG_INDEX_EN__SHIFT) & PPU_OPERATION_MODE_CFG_INDEX_EN__MASK;
+}
+#define PPU_OPERATION_MODE_CFG_RESERVED_1__MASK 0x20000000
+#define PPU_OPERATION_MODE_CFG_RESERVED_1__SHIFT 29
+static inline uint32_t PPU_OPERATION_MODE_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << PPU_OPERATION_MODE_CFG_RESERVED_1__SHIFT) & PPU_OPERATION_MODE_CFG_RESERVED_1__MASK;
+}
+#define PPU_OPERATION_MODE_CFG_NOTCH_ADDR__MASK 0x1fff0000
+#define PPU_OPERATION_MODE_CFG_NOTCH_ADDR__SHIFT 16
+static inline uint32_t PPU_OPERATION_MODE_CFG_NOTCH_ADDR(uint32_t val)
+{
+ return ((val) << PPU_OPERATION_MODE_CFG_NOTCH_ADDR__SHIFT) & PPU_OPERATION_MODE_CFG_NOTCH_ADDR__MASK;
+}
+#define PPU_OPERATION_MODE_CFG_RESERVED_2__MASK 0x0000ff00
+#define PPU_OPERATION_MODE_CFG_RESERVED_2__SHIFT 8
+static inline uint32_t PPU_OPERATION_MODE_CFG_RESERVED_2(uint32_t val)
+{
+ return ((val) << PPU_OPERATION_MODE_CFG_RESERVED_2__SHIFT) & PPU_OPERATION_MODE_CFG_RESERVED_2__MASK;
+}
+#define PPU_OPERATION_MODE_CFG_USE_CNT__MASK 0x000000e0
+#define PPU_OPERATION_MODE_CFG_USE_CNT__SHIFT 5
+static inline uint32_t PPU_OPERATION_MODE_CFG_USE_CNT(uint32_t val)
+{
+ return ((val) << PPU_OPERATION_MODE_CFG_USE_CNT__SHIFT) & PPU_OPERATION_MODE_CFG_USE_CNT__MASK;
+}
+#define PPU_OPERATION_MODE_CFG_FLYING_MODE__MASK 0x00000010
+#define PPU_OPERATION_MODE_CFG_FLYING_MODE__SHIFT 4
+static inline uint32_t PPU_OPERATION_MODE_CFG_FLYING_MODE(uint32_t val)
+{
+ return ((val) << PPU_OPERATION_MODE_CFG_FLYING_MODE__SHIFT) & PPU_OPERATION_MODE_CFG_FLYING_MODE__MASK;
+}
+#define PPU_OPERATION_MODE_CFG_RESERVED_3__MASK 0x0000000c
+#define PPU_OPERATION_MODE_CFG_RESERVED_3__SHIFT 2
+static inline uint32_t PPU_OPERATION_MODE_CFG_RESERVED_3(uint32_t val)
+{
+ return ((val) << PPU_OPERATION_MODE_CFG_RESERVED_3__SHIFT) & PPU_OPERATION_MODE_CFG_RESERVED_3__MASK;
+}
+#define PPU_OPERATION_MODE_CFG_POOLING_METHOD__MASK 0x00000003
+#define PPU_OPERATION_MODE_CFG_POOLING_METHOD__SHIFT 0
+static inline uint32_t PPU_OPERATION_MODE_CFG_POOLING_METHOD(uint32_t val)
+{
+ return ((val) << PPU_OPERATION_MODE_CFG_POOLING_METHOD__SHIFT) & PPU_OPERATION_MODE_CFG_POOLING_METHOD__MASK;
+}
+
+#define REG_PPU_POOLING_KERNEL_CFG 0x00006034
+#define PPU_POOLING_KERNEL_CFG_RESERVED_0__MASK 0xff000000
+#define PPU_POOLING_KERNEL_CFG_RESERVED_0__SHIFT 24
+static inline uint32_t PPU_POOLING_KERNEL_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_POOLING_KERNEL_CFG_RESERVED_0__SHIFT) & PPU_POOLING_KERNEL_CFG_RESERVED_0__MASK;
+}
+#define PPU_POOLING_KERNEL_CFG_KERNEL_STRIDE_HEIGHT__MASK 0x00f00000
+#define PPU_POOLING_KERNEL_CFG_KERNEL_STRIDE_HEIGHT__SHIFT 20
+static inline uint32_t PPU_POOLING_KERNEL_CFG_KERNEL_STRIDE_HEIGHT(uint32_t val)
+{
+ return ((val) << PPU_POOLING_KERNEL_CFG_KERNEL_STRIDE_HEIGHT__SHIFT) & PPU_POOLING_KERNEL_CFG_KERNEL_STRIDE_HEIGHT__MASK;
+}
+#define PPU_POOLING_KERNEL_CFG_KERNEL_STRIDE_WIDTH__MASK 0x000f0000
+#define PPU_POOLING_KERNEL_CFG_KERNEL_STRIDE_WIDTH__SHIFT 16
+static inline uint32_t PPU_POOLING_KERNEL_CFG_KERNEL_STRIDE_WIDTH(uint32_t val)
+{
+ return ((val) << PPU_POOLING_KERNEL_CFG_KERNEL_STRIDE_WIDTH__SHIFT) & PPU_POOLING_KERNEL_CFG_KERNEL_STRIDE_WIDTH__MASK;
+}
+#define PPU_POOLING_KERNEL_CFG_RESERVED_1__MASK 0x0000f000
+#define PPU_POOLING_KERNEL_CFG_RESERVED_1__SHIFT 12
+static inline uint32_t PPU_POOLING_KERNEL_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << PPU_POOLING_KERNEL_CFG_RESERVED_1__SHIFT) & PPU_POOLING_KERNEL_CFG_RESERVED_1__MASK;
+}
+#define PPU_POOLING_KERNEL_CFG_KERNEL_HEIGHT__MASK 0x00000f00
+#define PPU_POOLING_KERNEL_CFG_KERNEL_HEIGHT__SHIFT 8
+static inline uint32_t PPU_POOLING_KERNEL_CFG_KERNEL_HEIGHT(uint32_t val)
+{
+ return ((val) << PPU_POOLING_KERNEL_CFG_KERNEL_HEIGHT__SHIFT) & PPU_POOLING_KERNEL_CFG_KERNEL_HEIGHT__MASK;
+}
+#define PPU_POOLING_KERNEL_CFG_RESERVED_2__MASK 0x000000f0
+#define PPU_POOLING_KERNEL_CFG_RESERVED_2__SHIFT 4
+static inline uint32_t PPU_POOLING_KERNEL_CFG_RESERVED_2(uint32_t val)
+{
+ return ((val) << PPU_POOLING_KERNEL_CFG_RESERVED_2__SHIFT) & PPU_POOLING_KERNEL_CFG_RESERVED_2__MASK;
+}
+#define PPU_POOLING_KERNEL_CFG_KERNEL_WIDTH__MASK 0x0000000f
+#define PPU_POOLING_KERNEL_CFG_KERNEL_WIDTH__SHIFT 0
+static inline uint32_t PPU_POOLING_KERNEL_CFG_KERNEL_WIDTH(uint32_t val)
+{
+ return ((val) << PPU_POOLING_KERNEL_CFG_KERNEL_WIDTH__SHIFT) & PPU_POOLING_KERNEL_CFG_KERNEL_WIDTH__MASK;
+}
+
+#define REG_PPU_RECIP_KERNEL_WIDTH 0x00006038
+#define PPU_RECIP_KERNEL_WIDTH_RESERVED_0__MASK 0xfffe0000
+#define PPU_RECIP_KERNEL_WIDTH_RESERVED_0__SHIFT 17
+static inline uint32_t PPU_RECIP_KERNEL_WIDTH_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_RECIP_KERNEL_WIDTH_RESERVED_0__SHIFT) & PPU_RECIP_KERNEL_WIDTH_RESERVED_0__MASK;
+}
+#define PPU_RECIP_KERNEL_WIDTH_RECIP_KERNEL_WIDTH__MASK 0x0001ffff
+#define PPU_RECIP_KERNEL_WIDTH_RECIP_KERNEL_WIDTH__SHIFT 0
+static inline uint32_t PPU_RECIP_KERNEL_WIDTH_RECIP_KERNEL_WIDTH(uint32_t val)
+{
+ return ((val) << PPU_RECIP_KERNEL_WIDTH_RECIP_KERNEL_WIDTH__SHIFT) & PPU_RECIP_KERNEL_WIDTH_RECIP_KERNEL_WIDTH__MASK;
+}
+
+#define REG_PPU_RECIP_KERNEL_HEIGHT 0x0000603c
+#define PPU_RECIP_KERNEL_HEIGHT_RESERVED_0__MASK 0xfffe0000
+#define PPU_RECIP_KERNEL_HEIGHT_RESERVED_0__SHIFT 17
+static inline uint32_t PPU_RECIP_KERNEL_HEIGHT_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_RECIP_KERNEL_HEIGHT_RESERVED_0__SHIFT) & PPU_RECIP_KERNEL_HEIGHT_RESERVED_0__MASK;
+}
+#define PPU_RECIP_KERNEL_HEIGHT_RECIP_KERNEL_HEIGHT__MASK 0x0001ffff
+#define PPU_RECIP_KERNEL_HEIGHT_RECIP_KERNEL_HEIGHT__SHIFT 0
+static inline uint32_t PPU_RECIP_KERNEL_HEIGHT_RECIP_KERNEL_HEIGHT(uint32_t val)
+{
+ return ((val) << PPU_RECIP_KERNEL_HEIGHT_RECIP_KERNEL_HEIGHT__SHIFT) & PPU_RECIP_KERNEL_HEIGHT_RECIP_KERNEL_HEIGHT__MASK;
+}
+
+#define REG_PPU_POOLING_PADDING_CFG 0x00006040
+#define PPU_POOLING_PADDING_CFG_RESERVED_0__MASK 0xffff8000
+#define PPU_POOLING_PADDING_CFG_RESERVED_0__SHIFT 15
+static inline uint32_t PPU_POOLING_PADDING_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_POOLING_PADDING_CFG_RESERVED_0__SHIFT) & PPU_POOLING_PADDING_CFG_RESERVED_0__MASK;
+}
+#define PPU_POOLING_PADDING_CFG_PAD_BOTTOM__MASK 0x00007000
+#define PPU_POOLING_PADDING_CFG_PAD_BOTTOM__SHIFT 12
+static inline uint32_t PPU_POOLING_PADDING_CFG_PAD_BOTTOM(uint32_t val)
+{
+ return ((val) << PPU_POOLING_PADDING_CFG_PAD_BOTTOM__SHIFT) & PPU_POOLING_PADDING_CFG_PAD_BOTTOM__MASK;
+}
+#define PPU_POOLING_PADDING_CFG_RESERVED_1__MASK 0x00000800
+#define PPU_POOLING_PADDING_CFG_RESERVED_1__SHIFT 11
+static inline uint32_t PPU_POOLING_PADDING_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << PPU_POOLING_PADDING_CFG_RESERVED_1__SHIFT) & PPU_POOLING_PADDING_CFG_RESERVED_1__MASK;
+}
+#define PPU_POOLING_PADDING_CFG_PAD_RIGHT__MASK 0x00000700
+#define PPU_POOLING_PADDING_CFG_PAD_RIGHT__SHIFT 8
+static inline uint32_t PPU_POOLING_PADDING_CFG_PAD_RIGHT(uint32_t val)
+{
+ return ((val) << PPU_POOLING_PADDING_CFG_PAD_RIGHT__SHIFT) & PPU_POOLING_PADDING_CFG_PAD_RIGHT__MASK;
+}
+#define PPU_POOLING_PADDING_CFG_RESERVED_2__MASK 0x00000080
+#define PPU_POOLING_PADDING_CFG_RESERVED_2__SHIFT 7
+static inline uint32_t PPU_POOLING_PADDING_CFG_RESERVED_2(uint32_t val)
+{
+ return ((val) << PPU_POOLING_PADDING_CFG_RESERVED_2__SHIFT) & PPU_POOLING_PADDING_CFG_RESERVED_2__MASK;
+}
+#define PPU_POOLING_PADDING_CFG_PAD_TOP__MASK 0x00000070
+#define PPU_POOLING_PADDING_CFG_PAD_TOP__SHIFT 4
+static inline uint32_t PPU_POOLING_PADDING_CFG_PAD_TOP(uint32_t val)
+{
+ return ((val) << PPU_POOLING_PADDING_CFG_PAD_TOP__SHIFT) & PPU_POOLING_PADDING_CFG_PAD_TOP__MASK;
+}
+#define PPU_POOLING_PADDING_CFG_RESERVED_3__MASK 0x00000008
+#define PPU_POOLING_PADDING_CFG_RESERVED_3__SHIFT 3
+static inline uint32_t PPU_POOLING_PADDING_CFG_RESERVED_3(uint32_t val)
+{
+ return ((val) << PPU_POOLING_PADDING_CFG_RESERVED_3__SHIFT) & PPU_POOLING_PADDING_CFG_RESERVED_3__MASK;
+}
+#define PPU_POOLING_PADDING_CFG_PAD_LEFT__MASK 0x00000007
+#define PPU_POOLING_PADDING_CFG_PAD_LEFT__SHIFT 0
+static inline uint32_t PPU_POOLING_PADDING_CFG_PAD_LEFT(uint32_t val)
+{
+ return ((val) << PPU_POOLING_PADDING_CFG_PAD_LEFT__SHIFT) & PPU_POOLING_PADDING_CFG_PAD_LEFT__MASK;
+}
+
+#define REG_PPU_PADDING_VALUE_1_CFG 0x00006044
+#define PPU_PADDING_VALUE_1_CFG_PAD_VALUE_0__MASK 0xffffffff
+#define PPU_PADDING_VALUE_1_CFG_PAD_VALUE_0__SHIFT 0
+static inline uint32_t PPU_PADDING_VALUE_1_CFG_PAD_VALUE_0(uint32_t val)
+{
+ return ((val) << PPU_PADDING_VALUE_1_CFG_PAD_VALUE_0__SHIFT) & PPU_PADDING_VALUE_1_CFG_PAD_VALUE_0__MASK;
+}
+
+#define REG_PPU_PADDING_VALUE_2_CFG 0x00006048
+#define PPU_PADDING_VALUE_2_CFG_RESERVED_0__MASK 0xfffffff8
+#define PPU_PADDING_VALUE_2_CFG_RESERVED_0__SHIFT 3
+static inline uint32_t PPU_PADDING_VALUE_2_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_PADDING_VALUE_2_CFG_RESERVED_0__SHIFT) & PPU_PADDING_VALUE_2_CFG_RESERVED_0__MASK;
+}
+#define PPU_PADDING_VALUE_2_CFG_PAD_VALUE_1__MASK 0x00000007
+#define PPU_PADDING_VALUE_2_CFG_PAD_VALUE_1__SHIFT 0
+static inline uint32_t PPU_PADDING_VALUE_2_CFG_PAD_VALUE_1(uint32_t val)
+{
+ return ((val) << PPU_PADDING_VALUE_2_CFG_PAD_VALUE_1__SHIFT) & PPU_PADDING_VALUE_2_CFG_PAD_VALUE_1__MASK;
+}
+
+#define REG_PPU_DST_BASE_ADDR 0x00006070
+#define PPU_DST_BASE_ADDR_DST_BASE_ADDR__MASK 0xfffffff0
+#define PPU_DST_BASE_ADDR_DST_BASE_ADDR__SHIFT 4
+static inline uint32_t PPU_DST_BASE_ADDR_DST_BASE_ADDR(uint32_t val)
+{
+ return ((val) << PPU_DST_BASE_ADDR_DST_BASE_ADDR__SHIFT) & PPU_DST_BASE_ADDR_DST_BASE_ADDR__MASK;
+}
+#define PPU_DST_BASE_ADDR_RESERVED_0__MASK 0x0000000f
+#define PPU_DST_BASE_ADDR_RESERVED_0__SHIFT 0
+static inline uint32_t PPU_DST_BASE_ADDR_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_DST_BASE_ADDR_RESERVED_0__SHIFT) & PPU_DST_BASE_ADDR_RESERVED_0__MASK;
+}
+
+#define REG_PPU_DST_SURF_STRIDE 0x0000607c
+#define PPU_DST_SURF_STRIDE_DST_SURF_STRIDE__MASK 0xfffffff0
+#define PPU_DST_SURF_STRIDE_DST_SURF_STRIDE__SHIFT 4
+static inline uint32_t PPU_DST_SURF_STRIDE_DST_SURF_STRIDE(uint32_t val)
+{
+ return ((val) << PPU_DST_SURF_STRIDE_DST_SURF_STRIDE__SHIFT) & PPU_DST_SURF_STRIDE_DST_SURF_STRIDE__MASK;
+}
+#define PPU_DST_SURF_STRIDE_RESERVED_0__MASK 0x0000000f
+#define PPU_DST_SURF_STRIDE_RESERVED_0__SHIFT 0
+static inline uint32_t PPU_DST_SURF_STRIDE_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_DST_SURF_STRIDE_RESERVED_0__SHIFT) & PPU_DST_SURF_STRIDE_RESERVED_0__MASK;
+}
+
+#define REG_PPU_DATA_FORMAT 0x00006084
+#define PPU_DATA_FORMAT_INDEX_ADD__MASK 0xfffffff0
+#define PPU_DATA_FORMAT_INDEX_ADD__SHIFT 4
+static inline uint32_t PPU_DATA_FORMAT_INDEX_ADD(uint32_t val)
+{
+ return ((val) << PPU_DATA_FORMAT_INDEX_ADD__SHIFT) & PPU_DATA_FORMAT_INDEX_ADD__MASK;
+}
+#define PPU_DATA_FORMAT_DPU_FLYIN__MASK 0x00000008
+#define PPU_DATA_FORMAT_DPU_FLYIN__SHIFT 3
+static inline uint32_t PPU_DATA_FORMAT_DPU_FLYIN(uint32_t val)
+{
+ return ((val) << PPU_DATA_FORMAT_DPU_FLYIN__SHIFT) & PPU_DATA_FORMAT_DPU_FLYIN__MASK;
+}
+#define PPU_DATA_FORMAT_PROC_PRECISION__MASK 0x00000007
+#define PPU_DATA_FORMAT_PROC_PRECISION__SHIFT 0
+static inline uint32_t PPU_DATA_FORMAT_PROC_PRECISION(uint32_t val)
+{
+ return ((val) << PPU_DATA_FORMAT_PROC_PRECISION__SHIFT) & PPU_DATA_FORMAT_PROC_PRECISION__MASK;
+}
+
+#define REG_PPU_MISC_CTRL 0x000060dc
+#define PPU_MISC_CTRL_SURF_LEN__MASK 0xffff0000
+#define PPU_MISC_CTRL_SURF_LEN__SHIFT 16
+static inline uint32_t PPU_MISC_CTRL_SURF_LEN(uint32_t val)
+{
+ return ((val) << PPU_MISC_CTRL_SURF_LEN__SHIFT) & PPU_MISC_CTRL_SURF_LEN__MASK;
+}
+#define PPU_MISC_CTRL_RESERVED_0__MASK 0x0000fe00
+#define PPU_MISC_CTRL_RESERVED_0__SHIFT 9
+static inline uint32_t PPU_MISC_CTRL_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_MISC_CTRL_RESERVED_0__SHIFT) & PPU_MISC_CTRL_RESERVED_0__MASK;
+}
+#define PPU_MISC_CTRL_MC_SURF_OUT__MASK 0x00000100
+#define PPU_MISC_CTRL_MC_SURF_OUT__SHIFT 8
+static inline uint32_t PPU_MISC_CTRL_MC_SURF_OUT(uint32_t val)
+{
+ return ((val) << PPU_MISC_CTRL_MC_SURF_OUT__SHIFT) & PPU_MISC_CTRL_MC_SURF_OUT__MASK;
+}
+#define PPU_MISC_CTRL_NONALIGN__MASK 0x00000080
+#define PPU_MISC_CTRL_NONALIGN__SHIFT 7
+static inline uint32_t PPU_MISC_CTRL_NONALIGN(uint32_t val)
+{
+ return ((val) << PPU_MISC_CTRL_NONALIGN__SHIFT) & PPU_MISC_CTRL_NONALIGN__MASK;
+}
+#define PPU_MISC_CTRL_RESERVED_1__MASK 0x00000070
+#define PPU_MISC_CTRL_RESERVED_1__SHIFT 4
+static inline uint32_t PPU_MISC_CTRL_RESERVED_1(uint32_t val)
+{
+ return ((val) << PPU_MISC_CTRL_RESERVED_1__SHIFT) & PPU_MISC_CTRL_RESERVED_1__MASK;
+}
+#define PPU_MISC_CTRL_BURST_LEN__MASK 0x0000000f
+#define PPU_MISC_CTRL_BURST_LEN__SHIFT 0
+static inline uint32_t PPU_MISC_CTRL_BURST_LEN(uint32_t val)
+{
+ return ((val) << PPU_MISC_CTRL_BURST_LEN__SHIFT) & PPU_MISC_CTRL_BURST_LEN__MASK;
+}
+
+#define REG_PPU_RDMA_RDMA_S_STATUS 0x00007000
+#define PPU_RDMA_RDMA_S_STATUS_RESERVED_0__MASK 0xfffc0000
+#define PPU_RDMA_RDMA_S_STATUS_RESERVED_0__SHIFT 18
+static inline uint32_t PPU_RDMA_RDMA_S_STATUS_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_STATUS_RESERVED_0__SHIFT) & PPU_RDMA_RDMA_S_STATUS_RESERVED_0__MASK;
+}
+#define PPU_RDMA_RDMA_S_STATUS_STATUS_1__MASK 0x00030000
+#define PPU_RDMA_RDMA_S_STATUS_STATUS_1__SHIFT 16
+static inline uint32_t PPU_RDMA_RDMA_S_STATUS_STATUS_1(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_STATUS_STATUS_1__SHIFT) & PPU_RDMA_RDMA_S_STATUS_STATUS_1__MASK;
+}
+#define PPU_RDMA_RDMA_S_STATUS_RESERVED_1__MASK 0x0000fffc
+#define PPU_RDMA_RDMA_S_STATUS_RESERVED_1__SHIFT 2
+static inline uint32_t PPU_RDMA_RDMA_S_STATUS_RESERVED_1(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_STATUS_RESERVED_1__SHIFT) & PPU_RDMA_RDMA_S_STATUS_RESERVED_1__MASK;
+}
+#define PPU_RDMA_RDMA_S_STATUS_STATUS_0__MASK 0x00000003
+#define PPU_RDMA_RDMA_S_STATUS_STATUS_0__SHIFT 0
+static inline uint32_t PPU_RDMA_RDMA_S_STATUS_STATUS_0(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_STATUS_STATUS_0__SHIFT) & PPU_RDMA_RDMA_S_STATUS_STATUS_0__MASK;
+}
+
+#define REG_PPU_RDMA_RDMA_S_POINTER 0x00007004
+#define PPU_RDMA_RDMA_S_POINTER_RESERVED_0__MASK 0xfffe0000
+#define PPU_RDMA_RDMA_S_POINTER_RESERVED_0__SHIFT 17
+static inline uint32_t PPU_RDMA_RDMA_S_POINTER_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_POINTER_RESERVED_0__SHIFT) & PPU_RDMA_RDMA_S_POINTER_RESERVED_0__MASK;
+}
+#define PPU_RDMA_RDMA_S_POINTER_EXECUTER__MASK 0x00010000
+#define PPU_RDMA_RDMA_S_POINTER_EXECUTER__SHIFT 16
+static inline uint32_t PPU_RDMA_RDMA_S_POINTER_EXECUTER(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_POINTER_EXECUTER__SHIFT) & PPU_RDMA_RDMA_S_POINTER_EXECUTER__MASK;
+}
+#define PPU_RDMA_RDMA_S_POINTER_RESERVED_1__MASK 0x0000ffc0
+#define PPU_RDMA_RDMA_S_POINTER_RESERVED_1__SHIFT 6
+static inline uint32_t PPU_RDMA_RDMA_S_POINTER_RESERVED_1(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_POINTER_RESERVED_1__SHIFT) & PPU_RDMA_RDMA_S_POINTER_RESERVED_1__MASK;
+}
+#define PPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_CLEAR__MASK 0x00000020
+#define PPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_CLEAR__SHIFT 5
+static inline uint32_t PPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_CLEAR(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_CLEAR__SHIFT) & PPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_CLEAR__MASK;
+}
+#define PPU_RDMA_RDMA_S_POINTER_POINTER_PP_CLEAR__MASK 0x00000010
+#define PPU_RDMA_RDMA_S_POINTER_POINTER_PP_CLEAR__SHIFT 4
+static inline uint32_t PPU_RDMA_RDMA_S_POINTER_POINTER_PP_CLEAR(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_POINTER_POINTER_PP_CLEAR__SHIFT) & PPU_RDMA_RDMA_S_POINTER_POINTER_PP_CLEAR__MASK;
+}
+#define PPU_RDMA_RDMA_S_POINTER_POINTER_PP_MODE__MASK 0x00000008
+#define PPU_RDMA_RDMA_S_POINTER_POINTER_PP_MODE__SHIFT 3
+static inline uint32_t PPU_RDMA_RDMA_S_POINTER_POINTER_PP_MODE(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_POINTER_POINTER_PP_MODE__SHIFT) & PPU_RDMA_RDMA_S_POINTER_POINTER_PP_MODE__MASK;
+}
+#define PPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_EN__MASK 0x00000004
+#define PPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_EN__SHIFT 2
+static inline uint32_t PPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_EN(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_EN__SHIFT) & PPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_EN__MASK;
+}
+#define PPU_RDMA_RDMA_S_POINTER_POINTER_PP_EN__MASK 0x00000002
+#define PPU_RDMA_RDMA_S_POINTER_POINTER_PP_EN__SHIFT 1
+static inline uint32_t PPU_RDMA_RDMA_S_POINTER_POINTER_PP_EN(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_POINTER_POINTER_PP_EN__SHIFT) & PPU_RDMA_RDMA_S_POINTER_POINTER_PP_EN__MASK;
+}
+#define PPU_RDMA_RDMA_S_POINTER_POINTER__MASK 0x00000001
+#define PPU_RDMA_RDMA_S_POINTER_POINTER__SHIFT 0
+static inline uint32_t PPU_RDMA_RDMA_S_POINTER_POINTER(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_POINTER_POINTER__SHIFT) & PPU_RDMA_RDMA_S_POINTER_POINTER__MASK;
+}
+
+#define REG_PPU_RDMA_RDMA_OPERATION_ENABLE 0x00007008
+#define PPU_RDMA_RDMA_OPERATION_ENABLE_RESERVED_0__MASK 0xfffffffe
+#define PPU_RDMA_RDMA_OPERATION_ENABLE_RESERVED_0__SHIFT 1
+static inline uint32_t PPU_RDMA_RDMA_OPERATION_ENABLE_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_OPERATION_ENABLE_RESERVED_0__SHIFT) & PPU_RDMA_RDMA_OPERATION_ENABLE_RESERVED_0__MASK;
+}
+#define PPU_RDMA_RDMA_OPERATION_ENABLE_OP_EN__MASK 0x00000001
+#define PPU_RDMA_RDMA_OPERATION_ENABLE_OP_EN__SHIFT 0
+static inline uint32_t PPU_RDMA_RDMA_OPERATION_ENABLE_OP_EN(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_OPERATION_ENABLE_OP_EN__SHIFT) & PPU_RDMA_RDMA_OPERATION_ENABLE_OP_EN__MASK;
+}
+
+#define REG_PPU_RDMA_RDMA_CUBE_IN_WIDTH 0x0000700c
+#define PPU_RDMA_RDMA_CUBE_IN_WIDTH_RESERVED_0__MASK 0xffffe000
+#define PPU_RDMA_RDMA_CUBE_IN_WIDTH_RESERVED_0__SHIFT 13
+static inline uint32_t PPU_RDMA_RDMA_CUBE_IN_WIDTH_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_CUBE_IN_WIDTH_RESERVED_0__SHIFT) & PPU_RDMA_RDMA_CUBE_IN_WIDTH_RESERVED_0__MASK;
+}
+#define PPU_RDMA_RDMA_CUBE_IN_WIDTH_CUBE_IN_WIDTH__MASK 0x00001fff
+#define PPU_RDMA_RDMA_CUBE_IN_WIDTH_CUBE_IN_WIDTH__SHIFT 0
+static inline uint32_t PPU_RDMA_RDMA_CUBE_IN_WIDTH_CUBE_IN_WIDTH(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_CUBE_IN_WIDTH_CUBE_IN_WIDTH__SHIFT) & PPU_RDMA_RDMA_CUBE_IN_WIDTH_CUBE_IN_WIDTH__MASK;
+}
+
+#define REG_PPU_RDMA_RDMA_CUBE_IN_HEIGHT 0x00007010
+#define PPU_RDMA_RDMA_CUBE_IN_HEIGHT_RESERVED_0__MASK 0xffffe000
+#define PPU_RDMA_RDMA_CUBE_IN_HEIGHT_RESERVED_0__SHIFT 13
+static inline uint32_t PPU_RDMA_RDMA_CUBE_IN_HEIGHT_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_CUBE_IN_HEIGHT_RESERVED_0__SHIFT) & PPU_RDMA_RDMA_CUBE_IN_HEIGHT_RESERVED_0__MASK;
+}
+#define PPU_RDMA_RDMA_CUBE_IN_HEIGHT_CUBE_IN_HEIGHT__MASK 0x00001fff
+#define PPU_RDMA_RDMA_CUBE_IN_HEIGHT_CUBE_IN_HEIGHT__SHIFT 0
+static inline uint32_t PPU_RDMA_RDMA_CUBE_IN_HEIGHT_CUBE_IN_HEIGHT(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_CUBE_IN_HEIGHT_CUBE_IN_HEIGHT__SHIFT) & PPU_RDMA_RDMA_CUBE_IN_HEIGHT_CUBE_IN_HEIGHT__MASK;
+}
+
+#define REG_PPU_RDMA_RDMA_CUBE_IN_CHANNEL 0x00007014
+#define PPU_RDMA_RDMA_CUBE_IN_CHANNEL_RESERVED_0__MASK 0xffffe000
+#define PPU_RDMA_RDMA_CUBE_IN_CHANNEL_RESERVED_0__SHIFT 13
+static inline uint32_t PPU_RDMA_RDMA_CUBE_IN_CHANNEL_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_CUBE_IN_CHANNEL_RESERVED_0__SHIFT) & PPU_RDMA_RDMA_CUBE_IN_CHANNEL_RESERVED_0__MASK;
+}
+#define PPU_RDMA_RDMA_CUBE_IN_CHANNEL_CUBE_IN_CHANNEL__MASK 0x00001fff
+#define PPU_RDMA_RDMA_CUBE_IN_CHANNEL_CUBE_IN_CHANNEL__SHIFT 0
+static inline uint32_t PPU_RDMA_RDMA_CUBE_IN_CHANNEL_CUBE_IN_CHANNEL(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_CUBE_IN_CHANNEL_CUBE_IN_CHANNEL__SHIFT) & PPU_RDMA_RDMA_CUBE_IN_CHANNEL_CUBE_IN_CHANNEL__MASK;
+}
+
+#define REG_PPU_RDMA_RDMA_SRC_BASE_ADDR 0x0000701c
+#define PPU_RDMA_RDMA_SRC_BASE_ADDR_SRC_BASE_ADDR__MASK 0xffffffff
+#define PPU_RDMA_RDMA_SRC_BASE_ADDR_SRC_BASE_ADDR__SHIFT 0
+static inline uint32_t PPU_RDMA_RDMA_SRC_BASE_ADDR_SRC_BASE_ADDR(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_SRC_BASE_ADDR_SRC_BASE_ADDR__SHIFT) & PPU_RDMA_RDMA_SRC_BASE_ADDR_SRC_BASE_ADDR__MASK;
+}
+
+#define REG_PPU_RDMA_RDMA_SRC_LINE_STRIDE 0x00007024
+#define PPU_RDMA_RDMA_SRC_LINE_STRIDE_SRC_LINE_STRIDE__MASK 0xfffffff0
+#define PPU_RDMA_RDMA_SRC_LINE_STRIDE_SRC_LINE_STRIDE__SHIFT 4
+static inline uint32_t PPU_RDMA_RDMA_SRC_LINE_STRIDE_SRC_LINE_STRIDE(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_SRC_LINE_STRIDE_SRC_LINE_STRIDE__SHIFT) & PPU_RDMA_RDMA_SRC_LINE_STRIDE_SRC_LINE_STRIDE__MASK;
+}
+#define PPU_RDMA_RDMA_SRC_LINE_STRIDE_RESERVED_0__MASK 0x0000000f
+#define PPU_RDMA_RDMA_SRC_LINE_STRIDE_RESERVED_0__SHIFT 0
+static inline uint32_t PPU_RDMA_RDMA_SRC_LINE_STRIDE_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_SRC_LINE_STRIDE_RESERVED_0__SHIFT) & PPU_RDMA_RDMA_SRC_LINE_STRIDE_RESERVED_0__MASK;
+}
+
+#define REG_PPU_RDMA_RDMA_SRC_SURF_STRIDE 0x00007028
+#define PPU_RDMA_RDMA_SRC_SURF_STRIDE_SRC_SURF_STRIDE__MASK 0xfffffff0
+#define PPU_RDMA_RDMA_SRC_SURF_STRIDE_SRC_SURF_STRIDE__SHIFT 4
+static inline uint32_t PPU_RDMA_RDMA_SRC_SURF_STRIDE_SRC_SURF_STRIDE(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_SRC_SURF_STRIDE_SRC_SURF_STRIDE__SHIFT) & PPU_RDMA_RDMA_SRC_SURF_STRIDE_SRC_SURF_STRIDE__MASK;
+}
+#define PPU_RDMA_RDMA_SRC_SURF_STRIDE_RESERVED_0__MASK 0x0000000f
+#define PPU_RDMA_RDMA_SRC_SURF_STRIDE_RESERVED_0__SHIFT 0
+static inline uint32_t PPU_RDMA_RDMA_SRC_SURF_STRIDE_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_SRC_SURF_STRIDE_RESERVED_0__SHIFT) & PPU_RDMA_RDMA_SRC_SURF_STRIDE_RESERVED_0__MASK;
+}
+
+#define REG_PPU_RDMA_RDMA_DATA_FORMAT 0x00007030
+#define PPU_RDMA_RDMA_DATA_FORMAT_RESERVED_0__MASK 0xfffffffc
+#define PPU_RDMA_RDMA_DATA_FORMAT_RESERVED_0__SHIFT 2
+static inline uint32_t PPU_RDMA_RDMA_DATA_FORMAT_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_DATA_FORMAT_RESERVED_0__SHIFT) & PPU_RDMA_RDMA_DATA_FORMAT_RESERVED_0__MASK;
+}
+#define PPU_RDMA_RDMA_DATA_FORMAT_IN_PRECISION__MASK 0x00000003
+#define PPU_RDMA_RDMA_DATA_FORMAT_IN_PRECISION__SHIFT 0
+static inline uint32_t PPU_RDMA_RDMA_DATA_FORMAT_IN_PRECISION(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_DATA_FORMAT_IN_PRECISION__SHIFT) & PPU_RDMA_RDMA_DATA_FORMAT_IN_PRECISION__MASK;
+}
+
+#define REG_DDMA_CFG_OUTSTANDING 0x00008000
+#define DDMA_CFG_OUTSTANDING_RESERVED_0__MASK 0xffff0000
+#define DDMA_CFG_OUTSTANDING_RESERVED_0__SHIFT 16
+static inline uint32_t DDMA_CFG_OUTSTANDING_RESERVED_0(uint32_t val)
+{
+ return ((val) << DDMA_CFG_OUTSTANDING_RESERVED_0__SHIFT) & DDMA_CFG_OUTSTANDING_RESERVED_0__MASK;
+}
+#define DDMA_CFG_OUTSTANDING_WR_OS_CNT__MASK 0x0000ff00
+#define DDMA_CFG_OUTSTANDING_WR_OS_CNT__SHIFT 8
+static inline uint32_t DDMA_CFG_OUTSTANDING_WR_OS_CNT(uint32_t val)
+{
+ return ((val) << DDMA_CFG_OUTSTANDING_WR_OS_CNT__SHIFT) & DDMA_CFG_OUTSTANDING_WR_OS_CNT__MASK;
+}
+#define DDMA_CFG_OUTSTANDING_RD_OS_CNT__MASK 0x000000ff
+#define DDMA_CFG_OUTSTANDING_RD_OS_CNT__SHIFT 0
+static inline uint32_t DDMA_CFG_OUTSTANDING_RD_OS_CNT(uint32_t val)
+{
+ return ((val) << DDMA_CFG_OUTSTANDING_RD_OS_CNT__SHIFT) & DDMA_CFG_OUTSTANDING_RD_OS_CNT__MASK;
+}
+
+#define REG_DDMA_RD_WEIGHT_0 0x00008004
+#define DDMA_RD_WEIGHT_0_RD_WEIGHT_PDP__MASK 0xff000000
+#define DDMA_RD_WEIGHT_0_RD_WEIGHT_PDP__SHIFT 24
+static inline uint32_t DDMA_RD_WEIGHT_0_RD_WEIGHT_PDP(uint32_t val)
+{
+ return ((val) << DDMA_RD_WEIGHT_0_RD_WEIGHT_PDP__SHIFT) & DDMA_RD_WEIGHT_0_RD_WEIGHT_PDP__MASK;
+}
+#define DDMA_RD_WEIGHT_0_RD_WEIGHT_DPU__MASK 0x00ff0000
+#define DDMA_RD_WEIGHT_0_RD_WEIGHT_DPU__SHIFT 16
+static inline uint32_t DDMA_RD_WEIGHT_0_RD_WEIGHT_DPU(uint32_t val)
+{
+ return ((val) << DDMA_RD_WEIGHT_0_RD_WEIGHT_DPU__SHIFT) & DDMA_RD_WEIGHT_0_RD_WEIGHT_DPU__MASK;
+}
+#define DDMA_RD_WEIGHT_0_RD_WEIGHT_KERNEL__MASK 0x0000ff00
+#define DDMA_RD_WEIGHT_0_RD_WEIGHT_KERNEL__SHIFT 8
+static inline uint32_t DDMA_RD_WEIGHT_0_RD_WEIGHT_KERNEL(uint32_t val)
+{
+ return ((val) << DDMA_RD_WEIGHT_0_RD_WEIGHT_KERNEL__SHIFT) & DDMA_RD_WEIGHT_0_RD_WEIGHT_KERNEL__MASK;
+}
+#define DDMA_RD_WEIGHT_0_RD_WEIGHT_FEATURE__MASK 0x000000ff
+#define DDMA_RD_WEIGHT_0_RD_WEIGHT_FEATURE__SHIFT 0
+static inline uint32_t DDMA_RD_WEIGHT_0_RD_WEIGHT_FEATURE(uint32_t val)
+{
+ return ((val) << DDMA_RD_WEIGHT_0_RD_WEIGHT_FEATURE__SHIFT) & DDMA_RD_WEIGHT_0_RD_WEIGHT_FEATURE__MASK;
+}
+
+#define REG_DDMA_WR_WEIGHT_0 0x00008008
+#define DDMA_WR_WEIGHT_0_RESERVED_0__MASK 0xffff0000
+#define DDMA_WR_WEIGHT_0_RESERVED_0__SHIFT 16
+static inline uint32_t DDMA_WR_WEIGHT_0_RESERVED_0(uint32_t val)
+{
+ return ((val) << DDMA_WR_WEIGHT_0_RESERVED_0__SHIFT) & DDMA_WR_WEIGHT_0_RESERVED_0__MASK;
+}
+#define DDMA_WR_WEIGHT_0_WR_WEIGHT_PDP__MASK 0x0000ff00
+#define DDMA_WR_WEIGHT_0_WR_WEIGHT_PDP__SHIFT 8
+static inline uint32_t DDMA_WR_WEIGHT_0_WR_WEIGHT_PDP(uint32_t val)
+{
+ return ((val) << DDMA_WR_WEIGHT_0_WR_WEIGHT_PDP__SHIFT) & DDMA_WR_WEIGHT_0_WR_WEIGHT_PDP__MASK;
+}
+#define DDMA_WR_WEIGHT_0_WR_WEIGHT_DPU__MASK 0x000000ff
+#define DDMA_WR_WEIGHT_0_WR_WEIGHT_DPU__SHIFT 0
+static inline uint32_t DDMA_WR_WEIGHT_0_WR_WEIGHT_DPU(uint32_t val)
+{
+ return ((val) << DDMA_WR_WEIGHT_0_WR_WEIGHT_DPU__SHIFT) & DDMA_WR_WEIGHT_0_WR_WEIGHT_DPU__MASK;
+}
+
+#define REG_DDMA_CFG_ID_ERROR 0x0000800c
+#define DDMA_CFG_ID_ERROR_RESERVED_0__MASK 0xfffffc00
+#define DDMA_CFG_ID_ERROR_RESERVED_0__SHIFT 10
+static inline uint32_t DDMA_CFG_ID_ERROR_RESERVED_0(uint32_t val)
+{
+ return ((val) << DDMA_CFG_ID_ERROR_RESERVED_0__SHIFT) & DDMA_CFG_ID_ERROR_RESERVED_0__MASK;
+}
+#define DDMA_CFG_ID_ERROR_WR_RESP_ID__MASK 0x000003c0
+#define DDMA_CFG_ID_ERROR_WR_RESP_ID__SHIFT 6
+static inline uint32_t DDMA_CFG_ID_ERROR_WR_RESP_ID(uint32_t val)
+{
+ return ((val) << DDMA_CFG_ID_ERROR_WR_RESP_ID__SHIFT) & DDMA_CFG_ID_ERROR_WR_RESP_ID__MASK;
+}
+#define DDMA_CFG_ID_ERROR_RESERVED_1__MASK 0x00000020
+#define DDMA_CFG_ID_ERROR_RESERVED_1__SHIFT 5
+static inline uint32_t DDMA_CFG_ID_ERROR_RESERVED_1(uint32_t val)
+{
+ return ((val) << DDMA_CFG_ID_ERROR_RESERVED_1__SHIFT) & DDMA_CFG_ID_ERROR_RESERVED_1__MASK;
+}
+#define DDMA_CFG_ID_ERROR_RD_RESP_ID__MASK 0x0000001f
+#define DDMA_CFG_ID_ERROR_RD_RESP_ID__SHIFT 0
+static inline uint32_t DDMA_CFG_ID_ERROR_RD_RESP_ID(uint32_t val)
+{
+ return ((val) << DDMA_CFG_ID_ERROR_RD_RESP_ID__SHIFT) & DDMA_CFG_ID_ERROR_RD_RESP_ID__MASK;
+}
+
+#define REG_DDMA_RD_WEIGHT_1 0x00008010
+#define DDMA_RD_WEIGHT_1_RESERVED_0__MASK 0xffffff00
+#define DDMA_RD_WEIGHT_1_RESERVED_0__SHIFT 8
+static inline uint32_t DDMA_RD_WEIGHT_1_RESERVED_0(uint32_t val)
+{
+ return ((val) << DDMA_RD_WEIGHT_1_RESERVED_0__SHIFT) & DDMA_RD_WEIGHT_1_RESERVED_0__MASK;
+}
+#define DDMA_RD_WEIGHT_1_RD_WEIGHT_PC__MASK 0x000000ff
+#define DDMA_RD_WEIGHT_1_RD_WEIGHT_PC__SHIFT 0
+static inline uint32_t DDMA_RD_WEIGHT_1_RD_WEIGHT_PC(uint32_t val)
+{
+ return ((val) << DDMA_RD_WEIGHT_1_RD_WEIGHT_PC__SHIFT) & DDMA_RD_WEIGHT_1_RD_WEIGHT_PC__MASK;
+}
+
+#define REG_DDMA_CFG_DMA_FIFO_CLR 0x00008014
+#define DDMA_CFG_DMA_FIFO_CLR_RESERVED_0__MASK 0xfffffffe
+#define DDMA_CFG_DMA_FIFO_CLR_RESERVED_0__SHIFT 1
+static inline uint32_t DDMA_CFG_DMA_FIFO_CLR_RESERVED_0(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_FIFO_CLR_RESERVED_0__SHIFT) & DDMA_CFG_DMA_FIFO_CLR_RESERVED_0__MASK;
+}
+#define DDMA_CFG_DMA_FIFO_CLR_DMA_FIFO_CLR__MASK 0x00000001
+#define DDMA_CFG_DMA_FIFO_CLR_DMA_FIFO_CLR__SHIFT 0
+static inline uint32_t DDMA_CFG_DMA_FIFO_CLR_DMA_FIFO_CLR(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_FIFO_CLR_DMA_FIFO_CLR__SHIFT) & DDMA_CFG_DMA_FIFO_CLR_DMA_FIFO_CLR__MASK;
+}
+
+#define REG_DDMA_CFG_DMA_ARB 0x00008018
+#define DDMA_CFG_DMA_ARB_RESERVED_0__MASK 0xfffffc00
+#define DDMA_CFG_DMA_ARB_RESERVED_0__SHIFT 10
+static inline uint32_t DDMA_CFG_DMA_ARB_RESERVED_0(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_ARB_RESERVED_0__SHIFT) & DDMA_CFG_DMA_ARB_RESERVED_0__MASK;
+}
+#define DDMA_CFG_DMA_ARB_WR_ARBIT_MODEL__MASK 0x00000200
+#define DDMA_CFG_DMA_ARB_WR_ARBIT_MODEL__SHIFT 9
+static inline uint32_t DDMA_CFG_DMA_ARB_WR_ARBIT_MODEL(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_ARB_WR_ARBIT_MODEL__SHIFT) & DDMA_CFG_DMA_ARB_WR_ARBIT_MODEL__MASK;
+}
+#define DDMA_CFG_DMA_ARB_RD_ARBIT_MODEL__MASK 0x00000100
+#define DDMA_CFG_DMA_ARB_RD_ARBIT_MODEL__SHIFT 8
+static inline uint32_t DDMA_CFG_DMA_ARB_RD_ARBIT_MODEL(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_ARB_RD_ARBIT_MODEL__SHIFT) & DDMA_CFG_DMA_ARB_RD_ARBIT_MODEL__MASK;
+}
+#define DDMA_CFG_DMA_ARB_RESERVED_1__MASK 0x00000080
+#define DDMA_CFG_DMA_ARB_RESERVED_1__SHIFT 7
+static inline uint32_t DDMA_CFG_DMA_ARB_RESERVED_1(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_ARB_RESERVED_1__SHIFT) & DDMA_CFG_DMA_ARB_RESERVED_1__MASK;
+}
+#define DDMA_CFG_DMA_ARB_WR_FIX_ARB__MASK 0x00000070
+#define DDMA_CFG_DMA_ARB_WR_FIX_ARB__SHIFT 4
+static inline uint32_t DDMA_CFG_DMA_ARB_WR_FIX_ARB(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_ARB_WR_FIX_ARB__SHIFT) & DDMA_CFG_DMA_ARB_WR_FIX_ARB__MASK;
+}
+#define DDMA_CFG_DMA_ARB_RESERVED_2__MASK 0x00000008
+#define DDMA_CFG_DMA_ARB_RESERVED_2__SHIFT 3
+static inline uint32_t DDMA_CFG_DMA_ARB_RESERVED_2(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_ARB_RESERVED_2__SHIFT) & DDMA_CFG_DMA_ARB_RESERVED_2__MASK;
+}
+#define DDMA_CFG_DMA_ARB_RD_FIX_ARB__MASK 0x00000007
+#define DDMA_CFG_DMA_ARB_RD_FIX_ARB__SHIFT 0
+static inline uint32_t DDMA_CFG_DMA_ARB_RD_FIX_ARB(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_ARB_RD_FIX_ARB__SHIFT) & DDMA_CFG_DMA_ARB_RD_FIX_ARB__MASK;
+}
+
+#define REG_DDMA_CFG_DMA_RD_QOS 0x00008020
+#define DDMA_CFG_DMA_RD_QOS_RESERVED_0__MASK 0xfffffc00
+#define DDMA_CFG_DMA_RD_QOS_RESERVED_0__SHIFT 10
+static inline uint32_t DDMA_CFG_DMA_RD_QOS_RESERVED_0(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_RD_QOS_RESERVED_0__SHIFT) & DDMA_CFG_DMA_RD_QOS_RESERVED_0__MASK;
+}
+#define DDMA_CFG_DMA_RD_QOS_RD_PC_QOS__MASK 0x00000300
+#define DDMA_CFG_DMA_RD_QOS_RD_PC_QOS__SHIFT 8
+static inline uint32_t DDMA_CFG_DMA_RD_QOS_RD_PC_QOS(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_RD_QOS_RD_PC_QOS__SHIFT) & DDMA_CFG_DMA_RD_QOS_RD_PC_QOS__MASK;
+}
+#define DDMA_CFG_DMA_RD_QOS_RD_PPU_QOS__MASK 0x000000c0
+#define DDMA_CFG_DMA_RD_QOS_RD_PPU_QOS__SHIFT 6
+static inline uint32_t DDMA_CFG_DMA_RD_QOS_RD_PPU_QOS(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_RD_QOS_RD_PPU_QOS__SHIFT) & DDMA_CFG_DMA_RD_QOS_RD_PPU_QOS__MASK;
+}
+#define DDMA_CFG_DMA_RD_QOS_RD_DPU_QOS__MASK 0x00000030
+#define DDMA_CFG_DMA_RD_QOS_RD_DPU_QOS__SHIFT 4
+static inline uint32_t DDMA_CFG_DMA_RD_QOS_RD_DPU_QOS(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_RD_QOS_RD_DPU_QOS__SHIFT) & DDMA_CFG_DMA_RD_QOS_RD_DPU_QOS__MASK;
+}
+#define DDMA_CFG_DMA_RD_QOS_RD_KERNEL_QOS__MASK 0x0000000c
+#define DDMA_CFG_DMA_RD_QOS_RD_KERNEL_QOS__SHIFT 2
+static inline uint32_t DDMA_CFG_DMA_RD_QOS_RD_KERNEL_QOS(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_RD_QOS_RD_KERNEL_QOS__SHIFT) & DDMA_CFG_DMA_RD_QOS_RD_KERNEL_QOS__MASK;
+}
+#define DDMA_CFG_DMA_RD_QOS_RD_FEATURE_QOS__MASK 0x00000003
+#define DDMA_CFG_DMA_RD_QOS_RD_FEATURE_QOS__SHIFT 0
+static inline uint32_t DDMA_CFG_DMA_RD_QOS_RD_FEATURE_QOS(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_RD_QOS_RD_FEATURE_QOS__SHIFT) & DDMA_CFG_DMA_RD_QOS_RD_FEATURE_QOS__MASK;
+}
+
+#define REG_DDMA_CFG_DMA_RD_CFG 0x00008024
+#define DDMA_CFG_DMA_RD_CFG_RESERVED_0__MASK 0xffffe000
+#define DDMA_CFG_DMA_RD_CFG_RESERVED_0__SHIFT 13
+static inline uint32_t DDMA_CFG_DMA_RD_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_RD_CFG_RESERVED_0__SHIFT) & DDMA_CFG_DMA_RD_CFG_RESERVED_0__MASK;
+}
+#define DDMA_CFG_DMA_RD_CFG_RD_ARLOCK__MASK 0x00001000
+#define DDMA_CFG_DMA_RD_CFG_RD_ARLOCK__SHIFT 12
+static inline uint32_t DDMA_CFG_DMA_RD_CFG_RD_ARLOCK(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_RD_CFG_RD_ARLOCK__SHIFT) & DDMA_CFG_DMA_RD_CFG_RD_ARLOCK__MASK;
+}
+#define DDMA_CFG_DMA_RD_CFG_RD_ARCACHE__MASK 0x00000f00
+#define DDMA_CFG_DMA_RD_CFG_RD_ARCACHE__SHIFT 8
+static inline uint32_t DDMA_CFG_DMA_RD_CFG_RD_ARCACHE(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_RD_CFG_RD_ARCACHE__SHIFT) & DDMA_CFG_DMA_RD_CFG_RD_ARCACHE__MASK;
+}
+#define DDMA_CFG_DMA_RD_CFG_RD_ARPROT__MASK 0x000000e0
+#define DDMA_CFG_DMA_RD_CFG_RD_ARPROT__SHIFT 5
+static inline uint32_t DDMA_CFG_DMA_RD_CFG_RD_ARPROT(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_RD_CFG_RD_ARPROT__SHIFT) & DDMA_CFG_DMA_RD_CFG_RD_ARPROT__MASK;
+}
+#define DDMA_CFG_DMA_RD_CFG_RD_ARBURST__MASK 0x00000018
+#define DDMA_CFG_DMA_RD_CFG_RD_ARBURST__SHIFT 3
+static inline uint32_t DDMA_CFG_DMA_RD_CFG_RD_ARBURST(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_RD_CFG_RD_ARBURST__SHIFT) & DDMA_CFG_DMA_RD_CFG_RD_ARBURST__MASK;
+}
+#define DDMA_CFG_DMA_RD_CFG_RD_ARSIZE__MASK 0x00000007
+#define DDMA_CFG_DMA_RD_CFG_RD_ARSIZE__SHIFT 0
+static inline uint32_t DDMA_CFG_DMA_RD_CFG_RD_ARSIZE(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_RD_CFG_RD_ARSIZE__SHIFT) & DDMA_CFG_DMA_RD_CFG_RD_ARSIZE__MASK;
+}
+
+#define REG_DDMA_CFG_DMA_WR_CFG 0x00008028
+#define DDMA_CFG_DMA_WR_CFG_RESERVED_0__MASK 0xffffe000
+#define DDMA_CFG_DMA_WR_CFG_RESERVED_0__SHIFT 13
+static inline uint32_t DDMA_CFG_DMA_WR_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_WR_CFG_RESERVED_0__SHIFT) & DDMA_CFG_DMA_WR_CFG_RESERVED_0__MASK;
+}
+#define DDMA_CFG_DMA_WR_CFG_WR_AWLOCK__MASK 0x00001000
+#define DDMA_CFG_DMA_WR_CFG_WR_AWLOCK__SHIFT 12
+static inline uint32_t DDMA_CFG_DMA_WR_CFG_WR_AWLOCK(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_WR_CFG_WR_AWLOCK__SHIFT) & DDMA_CFG_DMA_WR_CFG_WR_AWLOCK__MASK;
+}
+#define DDMA_CFG_DMA_WR_CFG_WR_AWCACHE__MASK 0x00000f00
+#define DDMA_CFG_DMA_WR_CFG_WR_AWCACHE__SHIFT 8
+static inline uint32_t DDMA_CFG_DMA_WR_CFG_WR_AWCACHE(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_WR_CFG_WR_AWCACHE__SHIFT) & DDMA_CFG_DMA_WR_CFG_WR_AWCACHE__MASK;
+}
+#define DDMA_CFG_DMA_WR_CFG_WR_AWPROT__MASK 0x000000e0
+#define DDMA_CFG_DMA_WR_CFG_WR_AWPROT__SHIFT 5
+static inline uint32_t DDMA_CFG_DMA_WR_CFG_WR_AWPROT(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_WR_CFG_WR_AWPROT__SHIFT) & DDMA_CFG_DMA_WR_CFG_WR_AWPROT__MASK;
+}
+#define DDMA_CFG_DMA_WR_CFG_WR_AWBURST__MASK 0x00000018
+#define DDMA_CFG_DMA_WR_CFG_WR_AWBURST__SHIFT 3
+static inline uint32_t DDMA_CFG_DMA_WR_CFG_WR_AWBURST(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_WR_CFG_WR_AWBURST__SHIFT) & DDMA_CFG_DMA_WR_CFG_WR_AWBURST__MASK;
+}
+#define DDMA_CFG_DMA_WR_CFG_WR_AWSIZE__MASK 0x00000007
+#define DDMA_CFG_DMA_WR_CFG_WR_AWSIZE__SHIFT 0
+static inline uint32_t DDMA_CFG_DMA_WR_CFG_WR_AWSIZE(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_WR_CFG_WR_AWSIZE__SHIFT) & DDMA_CFG_DMA_WR_CFG_WR_AWSIZE__MASK;
+}
+
+#define REG_DDMA_CFG_DMA_WSTRB 0x0000802c
+#define DDMA_CFG_DMA_WSTRB_WR_WSTRB__MASK 0xffffffff
+#define DDMA_CFG_DMA_WSTRB_WR_WSTRB__SHIFT 0
+static inline uint32_t DDMA_CFG_DMA_WSTRB_WR_WSTRB(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_WSTRB_WR_WSTRB__SHIFT) & DDMA_CFG_DMA_WSTRB_WR_WSTRB__MASK;
+}
+
+#define REG_DDMA_CFG_STATUS 0x00008030
+#define DDMA_CFG_STATUS_RESERVED_0__MASK 0xfffffe00
+#define DDMA_CFG_STATUS_RESERVED_0__SHIFT 9
+static inline uint32_t DDMA_CFG_STATUS_RESERVED_0(uint32_t val)
+{
+ return ((val) << DDMA_CFG_STATUS_RESERVED_0__SHIFT) & DDMA_CFG_STATUS_RESERVED_0__MASK;
+}
+#define DDMA_CFG_STATUS_IDEL__MASK 0x00000100
+#define DDMA_CFG_STATUS_IDEL__SHIFT 8
+static inline uint32_t DDMA_CFG_STATUS_IDEL(uint32_t val)
+{
+ return ((val) << DDMA_CFG_STATUS_IDEL__SHIFT) & DDMA_CFG_STATUS_IDEL__MASK;
+}
+#define DDMA_CFG_STATUS_RESERVED_1__MASK 0x000000ff
+#define DDMA_CFG_STATUS_RESERVED_1__SHIFT 0
+static inline uint32_t DDMA_CFG_STATUS_RESERVED_1(uint32_t val)
+{
+ return ((val) << DDMA_CFG_STATUS_RESERVED_1__SHIFT) & DDMA_CFG_STATUS_RESERVED_1__MASK;
+}
+
+#define REG_SDMA_CFG_OUTSTANDING 0x00009000
+#define SDMA_CFG_OUTSTANDING_RESERVED_0__MASK 0xffff0000
+#define SDMA_CFG_OUTSTANDING_RESERVED_0__SHIFT 16
+static inline uint32_t SDMA_CFG_OUTSTANDING_RESERVED_0(uint32_t val)
+{
+ return ((val) << SDMA_CFG_OUTSTANDING_RESERVED_0__SHIFT) & SDMA_CFG_OUTSTANDING_RESERVED_0__MASK;
+}
+#define SDMA_CFG_OUTSTANDING_WR_OS_CNT__MASK 0x0000ff00
+#define SDMA_CFG_OUTSTANDING_WR_OS_CNT__SHIFT 8
+static inline uint32_t SDMA_CFG_OUTSTANDING_WR_OS_CNT(uint32_t val)
+{
+ return ((val) << SDMA_CFG_OUTSTANDING_WR_OS_CNT__SHIFT) & SDMA_CFG_OUTSTANDING_WR_OS_CNT__MASK;
+}
+#define SDMA_CFG_OUTSTANDING_RD_OS_CNT__MASK 0x000000ff
+#define SDMA_CFG_OUTSTANDING_RD_OS_CNT__SHIFT 0
+static inline uint32_t SDMA_CFG_OUTSTANDING_RD_OS_CNT(uint32_t val)
+{
+ return ((val) << SDMA_CFG_OUTSTANDING_RD_OS_CNT__SHIFT) & SDMA_CFG_OUTSTANDING_RD_OS_CNT__MASK;
+}
+
+#define REG_SDMA_RD_WEIGHT_0 0x00009004
+#define SDMA_RD_WEIGHT_0_RD_WEIGHT_PDP__MASK 0xff000000
+#define SDMA_RD_WEIGHT_0_RD_WEIGHT_PDP__SHIFT 24
+static inline uint32_t SDMA_RD_WEIGHT_0_RD_WEIGHT_PDP(uint32_t val)
+{
+ return ((val) << SDMA_RD_WEIGHT_0_RD_WEIGHT_PDP__SHIFT) & SDMA_RD_WEIGHT_0_RD_WEIGHT_PDP__MASK;
+}
+#define SDMA_RD_WEIGHT_0_RD_WEIGHT_DPU__MASK 0x00ff0000
+#define SDMA_RD_WEIGHT_0_RD_WEIGHT_DPU__SHIFT 16
+static inline uint32_t SDMA_RD_WEIGHT_0_RD_WEIGHT_DPU(uint32_t val)
+{
+ return ((val) << SDMA_RD_WEIGHT_0_RD_WEIGHT_DPU__SHIFT) & SDMA_RD_WEIGHT_0_RD_WEIGHT_DPU__MASK;
+}
+#define SDMA_RD_WEIGHT_0_RD_WEIGHT_KERNEL__MASK 0x0000ff00
+#define SDMA_RD_WEIGHT_0_RD_WEIGHT_KERNEL__SHIFT 8
+static inline uint32_t SDMA_RD_WEIGHT_0_RD_WEIGHT_KERNEL(uint32_t val)
+{
+ return ((val) << SDMA_RD_WEIGHT_0_RD_WEIGHT_KERNEL__SHIFT) & SDMA_RD_WEIGHT_0_RD_WEIGHT_KERNEL__MASK;
+}
+#define SDMA_RD_WEIGHT_0_RD_WEIGHT_FEATURE__MASK 0x000000ff
+#define SDMA_RD_WEIGHT_0_RD_WEIGHT_FEATURE__SHIFT 0
+static inline uint32_t SDMA_RD_WEIGHT_0_RD_WEIGHT_FEATURE(uint32_t val)
+{
+ return ((val) << SDMA_RD_WEIGHT_0_RD_WEIGHT_FEATURE__SHIFT) & SDMA_RD_WEIGHT_0_RD_WEIGHT_FEATURE__MASK;
+}
+
+#define REG_SDMA_WR_WEIGHT_0 0x00009008
+#define SDMA_WR_WEIGHT_0_RESERVED_0__MASK 0xffff0000
+#define SDMA_WR_WEIGHT_0_RESERVED_0__SHIFT 16
+static inline uint32_t SDMA_WR_WEIGHT_0_RESERVED_0(uint32_t val)
+{
+ return ((val) << SDMA_WR_WEIGHT_0_RESERVED_0__SHIFT) & SDMA_WR_WEIGHT_0_RESERVED_0__MASK;
+}
+#define SDMA_WR_WEIGHT_0_WR_WEIGHT_PDP__MASK 0x0000ff00
+#define SDMA_WR_WEIGHT_0_WR_WEIGHT_PDP__SHIFT 8
+static inline uint32_t SDMA_WR_WEIGHT_0_WR_WEIGHT_PDP(uint32_t val)
+{
+ return ((val) << SDMA_WR_WEIGHT_0_WR_WEIGHT_PDP__SHIFT) & SDMA_WR_WEIGHT_0_WR_WEIGHT_PDP__MASK;
+}
+#define SDMA_WR_WEIGHT_0_WR_WEIGHT_DPU__MASK 0x000000ff
+#define SDMA_WR_WEIGHT_0_WR_WEIGHT_DPU__SHIFT 0
+static inline uint32_t SDMA_WR_WEIGHT_0_WR_WEIGHT_DPU(uint32_t val)
+{
+ return ((val) << SDMA_WR_WEIGHT_0_WR_WEIGHT_DPU__SHIFT) & SDMA_WR_WEIGHT_0_WR_WEIGHT_DPU__MASK;
+}
+
+#define REG_SDMA_CFG_ID_ERROR 0x0000900c
+#define SDMA_CFG_ID_ERROR_RESERVED_0__MASK 0xfffffc00
+#define SDMA_CFG_ID_ERROR_RESERVED_0__SHIFT 10
+static inline uint32_t SDMA_CFG_ID_ERROR_RESERVED_0(uint32_t val)
+{
+ return ((val) << SDMA_CFG_ID_ERROR_RESERVED_0__SHIFT) & SDMA_CFG_ID_ERROR_RESERVED_0__MASK;
+}
+#define SDMA_CFG_ID_ERROR_WR_RESP_ID__MASK 0x000003c0
+#define SDMA_CFG_ID_ERROR_WR_RESP_ID__SHIFT 6
+static inline uint32_t SDMA_CFG_ID_ERROR_WR_RESP_ID(uint32_t val)
+{
+ return ((val) << SDMA_CFG_ID_ERROR_WR_RESP_ID__SHIFT) & SDMA_CFG_ID_ERROR_WR_RESP_ID__MASK;
+}
+#define SDMA_CFG_ID_ERROR_RESERVED_1__MASK 0x00000020
+#define SDMA_CFG_ID_ERROR_RESERVED_1__SHIFT 5
+static inline uint32_t SDMA_CFG_ID_ERROR_RESERVED_1(uint32_t val)
+{
+ return ((val) << SDMA_CFG_ID_ERROR_RESERVED_1__SHIFT) & SDMA_CFG_ID_ERROR_RESERVED_1__MASK;
+}
+#define SDMA_CFG_ID_ERROR_RD_RESP_ID__MASK 0x0000001f
+#define SDMA_CFG_ID_ERROR_RD_RESP_ID__SHIFT 0
+static inline uint32_t SDMA_CFG_ID_ERROR_RD_RESP_ID(uint32_t val)
+{
+ return ((val) << SDMA_CFG_ID_ERROR_RD_RESP_ID__SHIFT) & SDMA_CFG_ID_ERROR_RD_RESP_ID__MASK;
+}
+
+#define REG_SDMA_RD_WEIGHT_1 0x00009010
+#define SDMA_RD_WEIGHT_1_RESERVED_0__MASK 0xffffff00
+#define SDMA_RD_WEIGHT_1_RESERVED_0__SHIFT 8
+static inline uint32_t SDMA_RD_WEIGHT_1_RESERVED_0(uint32_t val)
+{
+ return ((val) << SDMA_RD_WEIGHT_1_RESERVED_0__SHIFT) & SDMA_RD_WEIGHT_1_RESERVED_0__MASK;
+}
+#define SDMA_RD_WEIGHT_1_RD_WEIGHT_PC__MASK 0x000000ff
+#define SDMA_RD_WEIGHT_1_RD_WEIGHT_PC__SHIFT 0
+static inline uint32_t SDMA_RD_WEIGHT_1_RD_WEIGHT_PC(uint32_t val)
+{
+ return ((val) << SDMA_RD_WEIGHT_1_RD_WEIGHT_PC__SHIFT) & SDMA_RD_WEIGHT_1_RD_WEIGHT_PC__MASK;
+}
+
+#define REG_SDMA_CFG_DMA_FIFO_CLR 0x00009014
+#define SDMA_CFG_DMA_FIFO_CLR_RESERVED_0__MASK 0xfffffffe
+#define SDMA_CFG_DMA_FIFO_CLR_RESERVED_0__SHIFT 1
+static inline uint32_t SDMA_CFG_DMA_FIFO_CLR_RESERVED_0(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_FIFO_CLR_RESERVED_0__SHIFT) & SDMA_CFG_DMA_FIFO_CLR_RESERVED_0__MASK;
+}
+#define SDMA_CFG_DMA_FIFO_CLR_DMA_FIFO_CLR__MASK 0x00000001
+#define SDMA_CFG_DMA_FIFO_CLR_DMA_FIFO_CLR__SHIFT 0
+static inline uint32_t SDMA_CFG_DMA_FIFO_CLR_DMA_FIFO_CLR(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_FIFO_CLR_DMA_FIFO_CLR__SHIFT) & SDMA_CFG_DMA_FIFO_CLR_DMA_FIFO_CLR__MASK;
+}
+
+#define REG_SDMA_CFG_DMA_ARB 0x00009018
+#define SDMA_CFG_DMA_ARB_RESERVED_0__MASK 0xfffffc00
+#define SDMA_CFG_DMA_ARB_RESERVED_0__SHIFT 10
+static inline uint32_t SDMA_CFG_DMA_ARB_RESERVED_0(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_ARB_RESERVED_0__SHIFT) & SDMA_CFG_DMA_ARB_RESERVED_0__MASK;
+}
+#define SDMA_CFG_DMA_ARB_WR_ARBIT_MODEL__MASK 0x00000200
+#define SDMA_CFG_DMA_ARB_WR_ARBIT_MODEL__SHIFT 9
+static inline uint32_t SDMA_CFG_DMA_ARB_WR_ARBIT_MODEL(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_ARB_WR_ARBIT_MODEL__SHIFT) & SDMA_CFG_DMA_ARB_WR_ARBIT_MODEL__MASK;
+}
+#define SDMA_CFG_DMA_ARB_RD_ARBIT_MODEL__MASK 0x00000100
+#define SDMA_CFG_DMA_ARB_RD_ARBIT_MODEL__SHIFT 8
+static inline uint32_t SDMA_CFG_DMA_ARB_RD_ARBIT_MODEL(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_ARB_RD_ARBIT_MODEL__SHIFT) & SDMA_CFG_DMA_ARB_RD_ARBIT_MODEL__MASK;
+}
+#define SDMA_CFG_DMA_ARB_RESERVED_1__MASK 0x00000080
+#define SDMA_CFG_DMA_ARB_RESERVED_1__SHIFT 7
+static inline uint32_t SDMA_CFG_DMA_ARB_RESERVED_1(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_ARB_RESERVED_1__SHIFT) & SDMA_CFG_DMA_ARB_RESERVED_1__MASK;
+}
+#define SDMA_CFG_DMA_ARB_WR_FIX_ARB__MASK 0x00000070
+#define SDMA_CFG_DMA_ARB_WR_FIX_ARB__SHIFT 4
+static inline uint32_t SDMA_CFG_DMA_ARB_WR_FIX_ARB(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_ARB_WR_FIX_ARB__SHIFT) & SDMA_CFG_DMA_ARB_WR_FIX_ARB__MASK;
+}
+#define SDMA_CFG_DMA_ARB_RESERVED_2__MASK 0x00000008
+#define SDMA_CFG_DMA_ARB_RESERVED_2__SHIFT 3
+static inline uint32_t SDMA_CFG_DMA_ARB_RESERVED_2(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_ARB_RESERVED_2__SHIFT) & SDMA_CFG_DMA_ARB_RESERVED_2__MASK;
+}
+#define SDMA_CFG_DMA_ARB_RD_FIX_ARB__MASK 0x00000007
+#define SDMA_CFG_DMA_ARB_RD_FIX_ARB__SHIFT 0
+static inline uint32_t SDMA_CFG_DMA_ARB_RD_FIX_ARB(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_ARB_RD_FIX_ARB__SHIFT) & SDMA_CFG_DMA_ARB_RD_FIX_ARB__MASK;
+}
+
+#define REG_SDMA_CFG_DMA_RD_QOS 0x00009020
+#define SDMA_CFG_DMA_RD_QOS_RESERVED_0__MASK 0xfffffc00
+#define SDMA_CFG_DMA_RD_QOS_RESERVED_0__SHIFT 10
+static inline uint32_t SDMA_CFG_DMA_RD_QOS_RESERVED_0(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_RD_QOS_RESERVED_0__SHIFT) & SDMA_CFG_DMA_RD_QOS_RESERVED_0__MASK;
+}
+#define SDMA_CFG_DMA_RD_QOS_RD_PC_QOS__MASK 0x00000300
+#define SDMA_CFG_DMA_RD_QOS_RD_PC_QOS__SHIFT 8
+static inline uint32_t SDMA_CFG_DMA_RD_QOS_RD_PC_QOS(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_RD_QOS_RD_PC_QOS__SHIFT) & SDMA_CFG_DMA_RD_QOS_RD_PC_QOS__MASK;
+}
+#define SDMA_CFG_DMA_RD_QOS_RD_PPU_QOS__MASK 0x000000c0
+#define SDMA_CFG_DMA_RD_QOS_RD_PPU_QOS__SHIFT 6
+static inline uint32_t SDMA_CFG_DMA_RD_QOS_RD_PPU_QOS(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_RD_QOS_RD_PPU_QOS__SHIFT) & SDMA_CFG_DMA_RD_QOS_RD_PPU_QOS__MASK;
+}
+#define SDMA_CFG_DMA_RD_QOS_RD_DPU_QOS__MASK 0x00000030
+#define SDMA_CFG_DMA_RD_QOS_RD_DPU_QOS__SHIFT 4
+static inline uint32_t SDMA_CFG_DMA_RD_QOS_RD_DPU_QOS(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_RD_QOS_RD_DPU_QOS__SHIFT) & SDMA_CFG_DMA_RD_QOS_RD_DPU_QOS__MASK;
+}
+#define SDMA_CFG_DMA_RD_QOS_RD_KERNEL_QOS__MASK 0x0000000c
+#define SDMA_CFG_DMA_RD_QOS_RD_KERNEL_QOS__SHIFT 2
+static inline uint32_t SDMA_CFG_DMA_RD_QOS_RD_KERNEL_QOS(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_RD_QOS_RD_KERNEL_QOS__SHIFT) & SDMA_CFG_DMA_RD_QOS_RD_KERNEL_QOS__MASK;
+}
+#define SDMA_CFG_DMA_RD_QOS_RD_FEATURE_QOS__MASK 0x00000003
+#define SDMA_CFG_DMA_RD_QOS_RD_FEATURE_QOS__SHIFT 0
+static inline uint32_t SDMA_CFG_DMA_RD_QOS_RD_FEATURE_QOS(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_RD_QOS_RD_FEATURE_QOS__SHIFT) & SDMA_CFG_DMA_RD_QOS_RD_FEATURE_QOS__MASK;
+}
+
+#define REG_SDMA_CFG_DMA_RD_CFG 0x00009024
+#define SDMA_CFG_DMA_RD_CFG_RESERVED_0__MASK 0xffffe000
+#define SDMA_CFG_DMA_RD_CFG_RESERVED_0__SHIFT 13
+static inline uint32_t SDMA_CFG_DMA_RD_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_RD_CFG_RESERVED_0__SHIFT) & SDMA_CFG_DMA_RD_CFG_RESERVED_0__MASK;
+}
+#define SDMA_CFG_DMA_RD_CFG_RD_ARLOCK__MASK 0x00001000
+#define SDMA_CFG_DMA_RD_CFG_RD_ARLOCK__SHIFT 12
+static inline uint32_t SDMA_CFG_DMA_RD_CFG_RD_ARLOCK(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_RD_CFG_RD_ARLOCK__SHIFT) & SDMA_CFG_DMA_RD_CFG_RD_ARLOCK__MASK;
+}
+#define SDMA_CFG_DMA_RD_CFG_RD_ARCACHE__MASK 0x00000f00
+#define SDMA_CFG_DMA_RD_CFG_RD_ARCACHE__SHIFT 8
+static inline uint32_t SDMA_CFG_DMA_RD_CFG_RD_ARCACHE(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_RD_CFG_RD_ARCACHE__SHIFT) & SDMA_CFG_DMA_RD_CFG_RD_ARCACHE__MASK;
+}
+#define SDMA_CFG_DMA_RD_CFG_RD_ARPROT__MASK 0x000000e0
+#define SDMA_CFG_DMA_RD_CFG_RD_ARPROT__SHIFT 5
+static inline uint32_t SDMA_CFG_DMA_RD_CFG_RD_ARPROT(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_RD_CFG_RD_ARPROT__SHIFT) & SDMA_CFG_DMA_RD_CFG_RD_ARPROT__MASK;
+}
+#define SDMA_CFG_DMA_RD_CFG_RD_ARBURST__MASK 0x00000018
+#define SDMA_CFG_DMA_RD_CFG_RD_ARBURST__SHIFT 3
+static inline uint32_t SDMA_CFG_DMA_RD_CFG_RD_ARBURST(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_RD_CFG_RD_ARBURST__SHIFT) & SDMA_CFG_DMA_RD_CFG_RD_ARBURST__MASK;
+}
+#define SDMA_CFG_DMA_RD_CFG_RD_ARSIZE__MASK 0x00000007
+#define SDMA_CFG_DMA_RD_CFG_RD_ARSIZE__SHIFT 0
+static inline uint32_t SDMA_CFG_DMA_RD_CFG_RD_ARSIZE(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_RD_CFG_RD_ARSIZE__SHIFT) & SDMA_CFG_DMA_RD_CFG_RD_ARSIZE__MASK;
+}
+
+#define REG_SDMA_CFG_DMA_WR_CFG 0x00009028
+#define SDMA_CFG_DMA_WR_CFG_RESERVED_0__MASK 0xffffe000
+#define SDMA_CFG_DMA_WR_CFG_RESERVED_0__SHIFT 13
+static inline uint32_t SDMA_CFG_DMA_WR_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_WR_CFG_RESERVED_0__SHIFT) & SDMA_CFG_DMA_WR_CFG_RESERVED_0__MASK;
+}
+#define SDMA_CFG_DMA_WR_CFG_WR_AWLOCK__MASK 0x00001000
+#define SDMA_CFG_DMA_WR_CFG_WR_AWLOCK__SHIFT 12
+static inline uint32_t SDMA_CFG_DMA_WR_CFG_WR_AWLOCK(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_WR_CFG_WR_AWLOCK__SHIFT) & SDMA_CFG_DMA_WR_CFG_WR_AWLOCK__MASK;
+}
+#define SDMA_CFG_DMA_WR_CFG_WR_AWCACHE__MASK 0x00000f00
+#define SDMA_CFG_DMA_WR_CFG_WR_AWCACHE__SHIFT 8
+static inline uint32_t SDMA_CFG_DMA_WR_CFG_WR_AWCACHE(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_WR_CFG_WR_AWCACHE__SHIFT) & SDMA_CFG_DMA_WR_CFG_WR_AWCACHE__MASK;
+}
+#define SDMA_CFG_DMA_WR_CFG_WR_AWPROT__MASK 0x000000e0
+#define SDMA_CFG_DMA_WR_CFG_WR_AWPROT__SHIFT 5
+static inline uint32_t SDMA_CFG_DMA_WR_CFG_WR_AWPROT(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_WR_CFG_WR_AWPROT__SHIFT) & SDMA_CFG_DMA_WR_CFG_WR_AWPROT__MASK;
+}
+#define SDMA_CFG_DMA_WR_CFG_WR_AWBURST__MASK 0x00000018
+#define SDMA_CFG_DMA_WR_CFG_WR_AWBURST__SHIFT 3
+static inline uint32_t SDMA_CFG_DMA_WR_CFG_WR_AWBURST(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_WR_CFG_WR_AWBURST__SHIFT) & SDMA_CFG_DMA_WR_CFG_WR_AWBURST__MASK;
+}
+#define SDMA_CFG_DMA_WR_CFG_WR_AWSIZE__MASK 0x00000007
+#define SDMA_CFG_DMA_WR_CFG_WR_AWSIZE__SHIFT 0
+static inline uint32_t SDMA_CFG_DMA_WR_CFG_WR_AWSIZE(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_WR_CFG_WR_AWSIZE__SHIFT) & SDMA_CFG_DMA_WR_CFG_WR_AWSIZE__MASK;
+}
+
+#define REG_SDMA_CFG_DMA_WSTRB 0x0000902c
+#define SDMA_CFG_DMA_WSTRB_WR_WSTRB__MASK 0xffffffff
+#define SDMA_CFG_DMA_WSTRB_WR_WSTRB__SHIFT 0
+static inline uint32_t SDMA_CFG_DMA_WSTRB_WR_WSTRB(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_WSTRB_WR_WSTRB__SHIFT) & SDMA_CFG_DMA_WSTRB_WR_WSTRB__MASK;
+}
+
+#define REG_SDMA_CFG_STATUS 0x00009030
+#define SDMA_CFG_STATUS_RESERVED_0__MASK 0xfffffe00
+#define SDMA_CFG_STATUS_RESERVED_0__SHIFT 9
+static inline uint32_t SDMA_CFG_STATUS_RESERVED_0(uint32_t val)
+{
+ return ((val) << SDMA_CFG_STATUS_RESERVED_0__SHIFT) & SDMA_CFG_STATUS_RESERVED_0__MASK;
+}
+#define SDMA_CFG_STATUS_IDEL__MASK 0x00000100
+#define SDMA_CFG_STATUS_IDEL__SHIFT 8
+static inline uint32_t SDMA_CFG_STATUS_IDEL(uint32_t val)
+{
+ return ((val) << SDMA_CFG_STATUS_IDEL__SHIFT) & SDMA_CFG_STATUS_IDEL__MASK;
+}
+#define SDMA_CFG_STATUS_RESERVED_1__MASK 0x000000ff
+#define SDMA_CFG_STATUS_RESERVED_1__SHIFT 0
+static inline uint32_t SDMA_CFG_STATUS_RESERVED_1(uint32_t val)
+{
+ return ((val) << SDMA_CFG_STATUS_RESERVED_1__SHIFT) & SDMA_CFG_STATUS_RESERVED_1__MASK;
+}
+
+#define REG_GLOBAL_OPERATION_ENABLE 0x0000f008
+#define GLOBAL_OPERATION_ENABLE_RESERVED_0__MASK 0xffffff80
+#define GLOBAL_OPERATION_ENABLE_RESERVED_0__SHIFT 7
+static inline uint32_t GLOBAL_OPERATION_ENABLE_RESERVED_0(uint32_t val)
+{
+ return ((val) << GLOBAL_OPERATION_ENABLE_RESERVED_0__SHIFT) & GLOBAL_OPERATION_ENABLE_RESERVED_0__MASK;
+}
+#define GLOBAL_OPERATION_ENABLE_PPU_RDMA_OP_EN__MASK 0x00000040
+#define GLOBAL_OPERATION_ENABLE_PPU_RDMA_OP_EN__SHIFT 6
+static inline uint32_t GLOBAL_OPERATION_ENABLE_PPU_RDMA_OP_EN(uint32_t val)
+{
+ return ((val) << GLOBAL_OPERATION_ENABLE_PPU_RDMA_OP_EN__SHIFT) & GLOBAL_OPERATION_ENABLE_PPU_RDMA_OP_EN__MASK;
+}
+#define GLOBAL_OPERATION_ENABLE_PPU_OP_EN__MASK 0x00000020
+#define GLOBAL_OPERATION_ENABLE_PPU_OP_EN__SHIFT 5
+static inline uint32_t GLOBAL_OPERATION_ENABLE_PPU_OP_EN(uint32_t val)
+{
+ return ((val) << GLOBAL_OPERATION_ENABLE_PPU_OP_EN__SHIFT) & GLOBAL_OPERATION_ENABLE_PPU_OP_EN__MASK;
+}
+#define GLOBAL_OPERATION_ENABLE_DPU_RDMA_OP_EN__MASK 0x00000010
+#define GLOBAL_OPERATION_ENABLE_DPU_RDMA_OP_EN__SHIFT 4
+static inline uint32_t GLOBAL_OPERATION_ENABLE_DPU_RDMA_OP_EN(uint32_t val)
+{
+ return ((val) << GLOBAL_OPERATION_ENABLE_DPU_RDMA_OP_EN__SHIFT) & GLOBAL_OPERATION_ENABLE_DPU_RDMA_OP_EN__MASK;
+}
+#define GLOBAL_OPERATION_ENABLE_DPU_OP_EN__MASK 0x00000008
+#define GLOBAL_OPERATION_ENABLE_DPU_OP_EN__SHIFT 3
+static inline uint32_t GLOBAL_OPERATION_ENABLE_DPU_OP_EN(uint32_t val)
+{
+ return ((val) << GLOBAL_OPERATION_ENABLE_DPU_OP_EN__SHIFT) & GLOBAL_OPERATION_ENABLE_DPU_OP_EN__MASK;
+}
+#define GLOBAL_OPERATION_ENABLE_CORE_OP_EN__MASK 0x00000004
+#define GLOBAL_OPERATION_ENABLE_CORE_OP_EN__SHIFT 2
+static inline uint32_t GLOBAL_OPERATION_ENABLE_CORE_OP_EN(uint32_t val)
+{
+ return ((val) << GLOBAL_OPERATION_ENABLE_CORE_OP_EN__SHIFT) & GLOBAL_OPERATION_ENABLE_CORE_OP_EN__MASK;
+}
+#define GLOBAL_OPERATION_ENABLE_RESERVED_1__MASK 0x00000002
+#define GLOBAL_OPERATION_ENABLE_RESERVED_1__SHIFT 1
+static inline uint32_t GLOBAL_OPERATION_ENABLE_RESERVED_1(uint32_t val)
+{
+ return ((val) << GLOBAL_OPERATION_ENABLE_RESERVED_1__SHIFT) & GLOBAL_OPERATION_ENABLE_RESERVED_1__MASK;
+}
+#define GLOBAL_OPERATION_ENABLE_CNA_OP_EN__MASK 0x00000001
+#define GLOBAL_OPERATION_ENABLE_CNA_OP_EN__SHIFT 0
+static inline uint32_t GLOBAL_OPERATION_ENABLE_CNA_OP_EN(uint32_t val)
+{
+ return ((val) << GLOBAL_OPERATION_ENABLE_CNA_OP_EN__SHIFT) & GLOBAL_OPERATION_ENABLE_CNA_OP_EN__MASK;
+}
+
+#endif /* __ROCKET_REGISTERS_XML__ */
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 7bc40c2735ac..ca00a5dbcf75 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -394,6 +394,7 @@ config ACPI_TABLE_OVERRIDE_VIA_BUILTIN_INITRD
config ACPI_DEBUG
bool "Debug Statements"
+ default y
help
The ACPI subsystem can produce debug output. Saying Y enables this
output and increases the kernel size by around 50K.
@@ -460,7 +461,7 @@ config ACPI_HED
config ACPI_BGRT
bool "Boottime Graphics Resource Table support"
- depends on EFI && (X86 || ARM64 || LOONGARCH)
+ depends on EFI
help
This driver adds support for exposing the ACPI Boottime Graphics
Resource Table, which allows the operating system to obtain
@@ -546,6 +547,10 @@ if ARM64
source "drivers/acpi/arm64/Kconfig"
endif
+if RISCV
+source "drivers/acpi/riscv/Kconfig"
+endif
+
config ACPI_PPTT
bool
diff --git a/drivers/acpi/acpi_dbg.c b/drivers/acpi/acpi_dbg.c
index d50261d05f3a..515b20d0b698 100644
--- a/drivers/acpi/acpi_dbg.c
+++ b/drivers/acpi/acpi_dbg.c
@@ -569,11 +569,11 @@ static int acpi_aml_release(struct inode *inode, struct file *file)
return 0;
}
-static int acpi_aml_read_user(char __user *buf, int len)
+static ssize_t acpi_aml_read_user(char __user *buf, size_t len)
{
- int ret;
struct circ_buf *crc = &acpi_aml_io.out_crc;
- int n;
+ ssize_t ret;
+ size_t n;
char *p;
ret = acpi_aml_lock_read(crc, ACPI_AML_OUT_USER);
@@ -582,7 +582,7 @@ static int acpi_aml_read_user(char __user *buf, int len)
/* sync head before removing logs */
smp_rmb();
p = &crc->buf[crc->tail];
- n = min(len, circ_count_to_end(crc));
+ n = min_t(size_t, len, circ_count_to_end(crc));
if (copy_to_user(buf, p, n)) {
ret = -EFAULT;
goto out;
@@ -599,8 +599,8 @@ out:
static ssize_t acpi_aml_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
- int ret = 0;
- int size = 0;
+ ssize_t ret = 0;
+ ssize_t size = 0;
if (!count)
return 0;
@@ -639,11 +639,11 @@ again:
return size > 0 ? size : ret;
}
-static int acpi_aml_write_user(const char __user *buf, int len)
+static ssize_t acpi_aml_write_user(const char __user *buf, size_t len)
{
- int ret;
struct circ_buf *crc = &acpi_aml_io.in_crc;
- int n;
+ ssize_t ret;
+ size_t n;
char *p;
ret = acpi_aml_lock_write(crc, ACPI_AML_IN_USER);
@@ -652,7 +652,7 @@ static int acpi_aml_write_user(const char __user *buf, int len)
/* sync tail before inserting cmds */
smp_mb();
p = &crc->buf[crc->head];
- n = min(len, circ_space_to_end(crc));
+ n = min_t(size_t, len, circ_space_to_end(crc));
if (copy_from_user(p, buf, n)) {
ret = -EFAULT;
goto out;
@@ -663,14 +663,14 @@ static int acpi_aml_write_user(const char __user *buf, int len)
ret = n;
out:
acpi_aml_unlock_fifo(ACPI_AML_IN_USER, ret >= 0);
- return n;
+ return ret;
}
static ssize_t acpi_aml_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
- int ret = 0;
- int size = 0;
+ ssize_t ret = 0;
+ ssize_t size = 0;
if (!count)
return 0;
diff --git a/drivers/acpi/acpi_mrrm.c b/drivers/acpi/acpi_mrrm.c
index 47ea3ccc2142..6d69554c940e 100644
--- a/drivers/acpi/acpi_mrrm.c
+++ b/drivers/acpi/acpi_mrrm.c
@@ -63,6 +63,9 @@ static __init int acpi_parse_mrrm(struct acpi_table_header *table)
if (!mrrm)
return -ENODEV;
+ if (mrrm->header.revision != 1)
+ return -EINVAL;
+
if (mrrm->flags & ACPI_MRRM_FLAGS_REGION_ASSIGNMENT_OS)
return -EOPNOTSUPP;
@@ -149,26 +152,49 @@ ATTRIBUTE_GROUPS(memory_range);
static __init int add_boot_memory_ranges(void)
{
- struct kobject *pkobj, *kobj;
+ struct kobject *pkobj, *kobj, **kobjs;
int ret = -EINVAL;
- char *name;
+ char name[16];
+ int i;
pkobj = kobject_create_and_add("memory_ranges", acpi_kobj);
+ if (!pkobj)
+ return -ENOMEM;
- for (int i = 0; i < mrrm_mem_entry_num; i++) {
- name = kasprintf(GFP_KERNEL, "range%d", i);
- if (!name) {
- ret = -ENOMEM;
- break;
- }
+ kobjs = kcalloc(mrrm_mem_entry_num, sizeof(*kobjs), GFP_KERNEL);
+ if (!kobjs) {
+ kobject_put(pkobj);
+ return -ENOMEM;
+ }
+ for (i = 0; i < mrrm_mem_entry_num; i++) {
+ scnprintf(name, sizeof(name), "range%d", i);
kobj = kobject_create_and_add(name, pkobj);
+ if (!kobj) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
ret = sysfs_create_groups(kobj, memory_range_groups);
- if (ret)
- return ret;
+ if (ret) {
+ kobject_put(kobj);
+ goto cleanup;
+ }
+ kobjs[i] = kobj;
}
+ kfree(kobjs);
+ return 0;
+
+cleanup:
+ for (int j = 0; j < i; j++) {
+ if (kobjs[j]) {
+ sysfs_remove_groups(kobjs[j], memory_range_groups);
+ kobject_put(kobjs[j]);
+ }
+ }
+ kfree(kobjs);
+ kobject_put(pkobj);
return ret;
}
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index 6f8bbe1247a5..c9a0bcaba2e4 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -33,7 +33,7 @@
static DEFINE_MUTEX(isolated_cpus_lock);
static DEFINE_MUTEX(round_robin_lock);
-static unsigned long power_saving_mwait_eax;
+static unsigned int power_saving_mwait_eax;
static unsigned char tsc_detected_unstable;
static unsigned char tsc_marked_unstable;
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index 7cf6101cb4c7..7ec1dc04fd11 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -275,7 +275,7 @@ static inline int acpi_processor_hotadd_init(struct acpi_processor *pr,
static int acpi_processor_get_info(struct acpi_device *device)
{
- union acpi_object object = { 0 };
+ union acpi_object object = { .processor = { 0 } };
struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
struct acpi_processor *pr = acpi_driver_data(device);
int device_declaration = 0;
@@ -815,7 +815,7 @@ bool acpi_processor_claim_cst_control(void)
cst_control_claimed = true;
return true;
}
-EXPORT_SYMBOL_GPL(acpi_processor_claim_cst_control);
+EXPORT_SYMBOL_NS_GPL(acpi_processor_claim_cst_control, "ACPI_PROCESSOR_IDLE");
/**
* acpi_processor_evaluate_cst - Evaluate the processor _CST control method.
@@ -994,5 +994,5 @@ end:
return ret;
}
-EXPORT_SYMBOL_GPL(acpi_processor_evaluate_cst);
+EXPORT_SYMBOL_NS_GPL(acpi_processor_evaluate_cst, "ACPI_PROCESSOR_IDLE");
#endif /* CONFIG_ACPI_PROCESSOR_CSTATE */
diff --git a/drivers/acpi/acpi_tad.c b/drivers/acpi/acpi_tad.c
index 825c2a8acea4..6d870d97ada6 100644
--- a/drivers/acpi/acpi_tad.c
+++ b/drivers/acpi/acpi_tad.c
@@ -90,19 +90,18 @@ static int acpi_tad_set_real_time(struct device *dev, struct acpi_tad_rt *rt)
args[0].buffer.pointer = (u8 *)rt;
args[0].buffer.length = sizeof(*rt);
- pm_runtime_get_sync(dev);
+ PM_RUNTIME_ACQUIRE(dev, pm);
+ if (PM_RUNTIME_ACQUIRE_ERR(&pm))
+ return -ENXIO;
status = acpi_evaluate_integer(handle, "_SRT", &arg_list, &retval);
-
- pm_runtime_put_sync(dev);
-
if (ACPI_FAILURE(status) || retval)
return -EIO;
return 0;
}
-static int acpi_tad_get_real_time(struct device *dev, struct acpi_tad_rt *rt)
+static int acpi_tad_evaluate_grt(struct device *dev, struct acpi_tad_rt *rt)
{
acpi_handle handle = ACPI_HANDLE(dev);
struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER };
@@ -111,12 +110,7 @@ static int acpi_tad_get_real_time(struct device *dev, struct acpi_tad_rt *rt)
acpi_status status;
int ret = -EIO;
- pm_runtime_get_sync(dev);
-
status = acpi_evaluate_object(handle, "_GRT", NULL, &output);
-
- pm_runtime_put_sync(dev);
-
if (ACPI_FAILURE(status))
goto out_free;
@@ -139,6 +133,21 @@ out_free:
return ret;
}
+static int acpi_tad_get_real_time(struct device *dev, struct acpi_tad_rt *rt)
+{
+ int ret;
+
+ PM_RUNTIME_ACQUIRE(dev, pm);
+ if (PM_RUNTIME_ACQUIRE_ERR(&pm))
+ return -ENXIO;
+
+ ret = acpi_tad_evaluate_grt(dev, rt);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
static char *acpi_tad_rt_next_field(char *s, int *val)
{
char *p;
@@ -233,7 +242,7 @@ static ssize_t time_show(struct device *dev, struct device_attribute *attr,
if (ret)
return ret;
- return sprintf(buf, "%u:%u:%u:%u:%u:%u:%d:%u\n",
+ return sysfs_emit(buf, "%u:%u:%u:%u:%u:%u:%d:%u\n",
rt.year, rt.month, rt.day, rt.hour, rt.minute, rt.second,
rt.tz, rt.daylight);
}
@@ -266,12 +275,11 @@ static int acpi_tad_wake_set(struct device *dev, char *method, u32 timer_id,
args[0].integer.value = timer_id;
args[1].integer.value = value;
- pm_runtime_get_sync(dev);
+ PM_RUNTIME_ACQUIRE(dev, pm);
+ if (PM_RUNTIME_ACQUIRE_ERR(&pm))
+ return -ENXIO;
status = acpi_evaluate_integer(handle, method, &arg_list, &retval);
-
- pm_runtime_put_sync(dev);
-
if (ACPI_FAILURE(status) || retval)
return -EIO;
@@ -314,12 +322,11 @@ static ssize_t acpi_tad_wake_read(struct device *dev, char *buf, char *method,
args[0].integer.value = timer_id;
- pm_runtime_get_sync(dev);
+ PM_RUNTIME_ACQUIRE(dev, pm);
+ if (PM_RUNTIME_ACQUIRE_ERR(&pm))
+ return -ENXIO;
status = acpi_evaluate_integer(handle, method, &arg_list, &retval);
-
- pm_runtime_put_sync(dev);
-
if (ACPI_FAILURE(status))
return -EIO;
@@ -370,12 +377,11 @@ static int acpi_tad_clear_status(struct device *dev, u32 timer_id)
args[0].integer.value = timer_id;
- pm_runtime_get_sync(dev);
+ PM_RUNTIME_ACQUIRE(dev, pm);
+ if (PM_RUNTIME_ACQUIRE_ERR(&pm))
+ return -ENXIO;
status = acpi_evaluate_integer(handle, "_CWS", &arg_list, &retval);
-
- pm_runtime_put_sync(dev);
-
if (ACPI_FAILURE(status) || retval)
return -EIO;
@@ -411,12 +417,11 @@ static ssize_t acpi_tad_status_read(struct device *dev, char *buf, u32 timer_id)
args[0].integer.value = timer_id;
- pm_runtime_get_sync(dev);
+ PM_RUNTIME_ACQUIRE(dev, pm);
+ if (PM_RUNTIME_ACQUIRE_ERR(&pm))
+ return -ENXIO;
status = acpi_evaluate_integer(handle, "_GWS", &arg_list, &retval);
-
- pm_runtime_put_sync(dev);
-
if (ACPI_FAILURE(status))
return -EIO;
@@ -428,7 +433,7 @@ static ssize_t caps_show(struct device *dev, struct device_attribute *attr,
{
struct acpi_tad_driver_data *dd = dev_get_drvdata(dev);
- return sprintf(buf, "0x%02X\n", dd->capabilities);
+ return sysfs_emit(buf, "0x%02X\n", dd->capabilities);
}
static DEVICE_ATTR_RO(caps);
@@ -563,21 +568,24 @@ static void acpi_tad_remove(struct platform_device *pdev)
device_init_wakeup(dev, false);
- pm_runtime_get_sync(dev);
+ if (dd->capabilities & ACPI_TAD_RT)
+ sysfs_remove_group(&dev->kobj, &acpi_tad_time_attr_group);
if (dd->capabilities & ACPI_TAD_DC_WAKE)
sysfs_remove_group(&dev->kobj, &acpi_tad_dc_attr_group);
sysfs_remove_group(&dev->kobj, &acpi_tad_attr_group);
- acpi_tad_disable_timer(dev, ACPI_TAD_AC_TIMER);
- acpi_tad_clear_status(dev, ACPI_TAD_AC_TIMER);
- if (dd->capabilities & ACPI_TAD_DC_WAKE) {
- acpi_tad_disable_timer(dev, ACPI_TAD_DC_TIMER);
- acpi_tad_clear_status(dev, ACPI_TAD_DC_TIMER);
+ scoped_guard(pm_runtime_noresume, dev) {
+ acpi_tad_disable_timer(dev, ACPI_TAD_AC_TIMER);
+ acpi_tad_clear_status(dev, ACPI_TAD_AC_TIMER);
+ if (dd->capabilities & ACPI_TAD_DC_WAKE) {
+ acpi_tad_disable_timer(dev, ACPI_TAD_DC_TIMER);
+ acpi_tad_clear_status(dev, ACPI_TAD_DC_TIMER);
+ }
}
- pm_runtime_put_sync(dev);
+ pm_runtime_suspend(dev);
pm_runtime_disable(dev);
acpi_remove_cmos_rtc_space_handler(handle);
}
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 103f29661576..be8e7e18abca 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -1959,8 +1959,10 @@ static void acpi_video_bus_remove_notify_handler(struct acpi_video_bus *video)
struct acpi_video_device *dev;
mutex_lock(&video->device_list_lock);
- list_for_each_entry(dev, &video->video_device_list, entry)
+ list_for_each_entry(dev, &video->video_device_list, entry) {
acpi_video_dev_remove_notify_handler(dev);
+ cancel_delayed_work_sync(&dev->switch_brightness_work);
+ }
mutex_unlock(&video->device_list_lock);
acpi_video_bus_stop_devices(video);
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h
index fe6d38b43c9a..91241bd6917a 100644
--- a/drivers/acpi/acpica/acdebug.h
+++ b/drivers/acpi/acpica/acdebug.h
@@ -37,7 +37,7 @@ struct acpi_db_argument_info {
struct acpi_db_execute_walk {
u32 count;
u32 max_count;
- char name_seg[ACPI_NAMESEG_SIZE + 1] ACPI_NONSTRING;
+ char name_seg[ACPI_NAMESEG_SIZE + 1];
};
#define PARAM_LIST(pl) pl
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 0c41f0097e8d..f98640086f4e 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -1141,7 +1141,7 @@ struct acpi_port_info {
#define ACPI_RESOURCE_NAME_PIN_GROUP_FUNCTION 0x91
#define ACPI_RESOURCE_NAME_PIN_GROUP_CONFIG 0x92
#define ACPI_RESOURCE_NAME_CLOCK_INPUT 0x93
-#define ACPI_RESOURCE_NAME_LARGE_MAX 0x94
+#define ACPI_RESOURCE_NAME_LARGE_MAX 0x93
/*****************************************************************************
*
diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h
index 76c5ed02e916..da2c45880cc7 100644
--- a/drivers/acpi/acpica/acpredef.h
+++ b/drivers/acpi/acpica/acpredef.h
@@ -450,7 +450,8 @@ const union acpi_predefined_info acpi_gbl_predefined_methods[] = {
{{"_DSM",
METHOD_4ARGS(ACPI_TYPE_BUFFER, ACPI_TYPE_INTEGER, ACPI_TYPE_INTEGER,
- ACPI_TYPE_ANY) | ARG_COUNT_IS_MINIMUM,
+ ACPI_TYPE_ANY | ACPI_TYPE_PACKAGE) |
+ ARG_COUNT_IS_MINIMUM,
METHOD_RETURNS(ACPI_RTYPE_ALL)}}, /* Must return a value, but it can be of any type */
{{"_DSS", METHOD_1ARGS(ACPI_TYPE_INTEGER),
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index c8f37f4e6626..45ec32e81903 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -462,7 +462,6 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
struct acpi_walk_state *next_walk_state = NULL;
union acpi_operand_object *obj_desc;
struct acpi_evaluate_info *info;
- u32 i;
ACPI_FUNCTION_TRACE_PTR(ds_call_control_method, this_walk_state);
@@ -483,6 +482,20 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
return_ACPI_STATUS(AE_NULL_OBJECT);
}
+ if (this_walk_state->num_operands < obj_desc->method.param_count) {
+ ACPI_ERROR((AE_INFO, "Missing argument(s) for method [%4.4s]",
+ acpi_ut_get_node_name(method_node)));
+
+ return_ACPI_STATUS(AE_AML_TOO_FEW_ARGUMENTS);
+ }
+
+ else if (this_walk_state->num_operands > obj_desc->method.param_count) {
+ ACPI_ERROR((AE_INFO, "Too many arguments for method [%4.4s]",
+ acpi_ut_get_node_name(method_node)));
+
+ return_ACPI_STATUS(AE_AML_TOO_MANY_ARGUMENTS);
+ }
+
/* Init for new method, possibly wait on method mutex */
status =
@@ -539,14 +552,7 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
* Delete the operands on the previous walkstate operand stack
* (they were copied to new objects)
*/
- for (i = 0; i < obj_desc->method.param_count; i++) {
- acpi_ut_remove_reference(this_walk_state->operands[i]);
- this_walk_state->operands[i] = NULL;
- }
-
- /* Clear the operand stack */
-
- this_walk_state->num_operands = 0;
+ acpi_ds_clear_operands(this_walk_state);
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"**** Begin nested execution of [%4.4s] **** WalkState=%p\n",
diff --git a/drivers/acpi/acpica/evglock.c b/drivers/acpi/acpica/evglock.c
index fa3e0d00d1ca..df2a4ab0e0da 100644
--- a/drivers/acpi/acpica/evglock.c
+++ b/drivers/acpi/acpica/evglock.c
@@ -42,6 +42,10 @@ acpi_status acpi_ev_init_global_lock_handler(void)
return_ACPI_STATUS(AE_OK);
}
+ if (!acpi_gbl_use_global_lock) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
/* Attempt installation of the global lock handler */
status = acpi_install_fixed_event_handler(ACPI_EVENT_GLOBAL,
diff --git a/drivers/acpi/acpica/extrace.c b/drivers/acpi/acpica/extrace.c
index d34497f3576a..36934d4f26fb 100644
--- a/drivers/acpi/acpica/extrace.c
+++ b/drivers/acpi/acpica/extrace.c
@@ -136,9 +136,9 @@ acpi_ex_trace_point(acpi_trace_event_type type,
if (pathname) {
ACPI_DEBUG_PRINT((ACPI_DB_TRACE_POINT,
- "%s %s [0x%p:%s] execution.\n",
+ "%s %s [%s] execution.\n",
acpi_ex_get_trace_event_name(type),
- begin ? "Begin" : "End", aml, pathname));
+ begin ? "Begin" : "End", pathname));
} else {
ACPI_DEBUG_PRINT((ACPI_DB_TRACE_POINT,
"%s %s [0x%p] execution.\n",
diff --git a/drivers/acpi/acpica/nswalk.c b/drivers/acpi/acpica/nswalk.c
index a2ac06a26e92..5670ff5a43cd 100644
--- a/drivers/acpi/acpica/nswalk.c
+++ b/drivers/acpi/acpica/nswalk.c
@@ -169,9 +169,12 @@ acpi_ns_walk_namespace(acpi_object_type type,
if (start_node == ACPI_ROOT_OBJECT) {
start_node = acpi_gbl_root_node;
- if (!start_node) {
- return_ACPI_STATUS(AE_NO_NAMESPACE);
- }
+ }
+
+ /* Avoid walking the namespace if the StartNode is NULL */
+
+ if (!start_node) {
+ return_ACPI_STATUS(AE_NO_NAMESPACE);
}
/* Null child means "get first node" */
diff --git a/drivers/acpi/acpica/psopinfo.c b/drivers/acpi/acpica/psopinfo.c
index 1c8044ffcb97..532ea307a675 100644
--- a/drivers/acpi/acpica/psopinfo.c
+++ b/drivers/acpi/acpica/psopinfo.c
@@ -34,7 +34,7 @@ static const u8 acpi_gbl_argument_count[] =
const struct acpi_opcode_info *acpi_ps_get_opcode_info(u16 opcode)
{
-#ifdef ACPI_DEBUG_OUTPUT
+#if defined ACPI_ASL_COMPILER && defined ACPI_DEBUG_OUTPUT
const char *opcode_name = "Unknown AML opcode";
#endif
@@ -102,11 +102,11 @@ const struct acpi_opcode_info *acpi_ps_get_opcode_info(u16 opcode)
default:
break;
}
-#endif
/* Unknown AML opcode */
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%s [%4.4X]\n", opcode_name, opcode));
+#endif
return (&acpi_gbl_aml_op_info[_UNK]);
}
diff --git a/drivers/acpi/acpica/tbprint.c b/drivers/acpi/acpica/tbprint.c
index fd64460a2e26..e5631027f7f1 100644
--- a/drivers/acpi/acpica/tbprint.c
+++ b/drivers/acpi/acpica/tbprint.c
@@ -95,6 +95,11 @@ acpi_tb_print_table_header(acpi_physical_address address,
{
struct acpi_table_header local_header;
+#pragma GCC diagnostic push
+#if defined(__GNUC__) && __GNUC__ >= 11
+#pragma GCC diagnostic ignored "-Wstringop-overread"
+#endif
+
if (ACPI_COMPARE_NAMESEG(header->signature, ACPI_SIG_FACS)) {
/* FACS only has signature and length fields */
@@ -121,6 +126,14 @@ acpi_tb_print_table_header(acpi_physical_address address,
ACPI_CAST_PTR(struct acpi_table_rsdp,
header)->revision,
local_header.oem_id));
+ } else if (acpi_gbl_CDAT && !acpi_ut_valid_nameseg(header->signature)) {
+
+ /* CDAT does not use the common ACPI table header */
+
+ ACPI_INFO(("%-4.4s 0x%8.8X%8.8X %06X",
+ ACPI_SIG_CDAT, ACPI_FORMAT_UINT64(address),
+ ACPI_CAST_PTR(struct acpi_table_cdat,
+ header)->length));
} else {
/* Standard ACPI table with full common header */
@@ -135,4 +148,5 @@ acpi_tb_print_table_header(acpi_physical_address address,
local_header.asl_compiler_id,
local_header.asl_compiler_revision));
}
+#pragma GCC diagnostic pop
}
diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
index cd2766c69d78..77c10a7a7a9f 100644
--- a/drivers/acpi/apei/apei-internal.h
+++ b/drivers/acpi/apei/apei-internal.h
@@ -131,7 +131,7 @@ static inline u32 cper_estatus_len(struct acpi_hest_generic_status *estatus)
int apei_osc_setup(void);
-int einj_get_available_error_type(u32 *type);
+int einj_get_available_error_type(u32 *type, int einj_action);
int einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2, u64 param3,
u64 param4);
int einj_cxl_rch_error_inject(u32 type, u32 flags, u64 param1, u64 param2,
diff --git a/drivers/acpi/apei/einj-core.c b/drivers/acpi/apei/einj-core.c
index fea11a35eea3..305c240a303f 100644
--- a/drivers/acpi/apei/einj-core.c
+++ b/drivers/acpi/apei/einj-core.c
@@ -33,6 +33,8 @@
#define SLEEP_UNIT_MAX 5000 /* 5ms */
/* Firmware should respond within 1 seconds */
#define FIRMWARE_TIMEOUT (1 * USEC_PER_SEC)
+#define COMPONENT_LEN 16
+#define ACPI65_EINJV2_SUPP BIT(30)
#define ACPI5_VENDOR_BIT BIT(31)
#define MEM_ERROR_MASK (ACPI_EINJ_MEMORY_CORRECTABLE | \
ACPI_EINJ_MEMORY_UNCORRECTABLE | \
@@ -49,6 +51,28 @@
*/
static int acpi5;
+struct syndrome_array {
+ union {
+ u8 acpi_id[COMPONENT_LEN];
+ u8 device_id[COMPONENT_LEN];
+ u8 pcie_sbdf[COMPONENT_LEN];
+ u8 vendor_id[COMPONENT_LEN];
+ } comp_id;
+ union {
+ u8 proc_synd[COMPONENT_LEN];
+ u8 mem_synd[COMPONENT_LEN];
+ u8 pcie_synd[COMPONENT_LEN];
+ u8 vendor_synd[COMPONENT_LEN];
+ } comp_synd;
+};
+
+struct einjv2_extension_struct {
+ u32 length;
+ u16 revision;
+ u16 component_arr_count;
+ struct syndrome_array component_arr[] __counted_by(component_arr_count);
+};
+
struct set_error_type_with_address {
u32 type;
u32 vendor_extension;
@@ -57,11 +81,13 @@ struct set_error_type_with_address {
u64 memory_address;
u64 memory_address_range;
u32 pcie_sbdf;
+ struct einjv2_extension_struct einjv2_struct;
};
enum {
SETWA_FLAGS_APICID = 1,
SETWA_FLAGS_MEM = 2,
SETWA_FLAGS_PCIE_SBDF = 4,
+ SETWA_FLAGS_EINJV2 = 8,
};
/*
@@ -83,7 +109,10 @@ static struct debugfs_blob_wrapper vendor_blob;
static struct debugfs_blob_wrapper vendor_errors;
static char vendor_dev[64];
+static u32 max_nr_components;
static u32 available_error_type;
+static u32 available_error_type_v2;
+static struct syndrome_array *syndrome_data;
/*
* Some BIOSes allow parameters to the SET_ERROR_TYPE entries in the
@@ -151,7 +180,10 @@ static DEFINE_MUTEX(einj_mutex);
*/
bool einj_initialized __ro_after_init;
-static void *einj_param;
+static void __iomem *einj_param;
+static u32 v5param_size;
+static u32 v66param_size;
+static bool is_v2;
static void einj_exec_ctx_init(struct apei_exec_context *ctx)
{
@@ -159,13 +191,13 @@ static void einj_exec_ctx_init(struct apei_exec_context *ctx)
EINJ_TAB_ENTRY(einj_tab), einj_tab->entries);
}
-static int __einj_get_available_error_type(u32 *type)
+static int __einj_get_available_error_type(u32 *type, int einj_action)
{
struct apei_exec_context ctx;
int rc;
einj_exec_ctx_init(&ctx);
- rc = apei_exec_run(&ctx, ACPI_EINJ_GET_ERROR_TYPE);
+ rc = apei_exec_run(&ctx, einj_action);
if (rc)
return rc;
*type = apei_exec_ctx_get_output(&ctx);
@@ -174,17 +206,34 @@ static int __einj_get_available_error_type(u32 *type)
}
/* Get error injection capabilities of the platform */
-int einj_get_available_error_type(u32 *type)
+int einj_get_available_error_type(u32 *type, int einj_action)
{
int rc;
mutex_lock(&einj_mutex);
- rc = __einj_get_available_error_type(type);
+ rc = __einj_get_available_error_type(type, einj_action);
mutex_unlock(&einj_mutex);
return rc;
}
+static int einj_get_available_error_types(u32 *type1, u32 *type2)
+{
+ int rc;
+
+ rc = einj_get_available_error_type(type1, ACPI_EINJ_GET_ERROR_TYPE);
+ if (rc)
+ return rc;
+ if (*type1 & ACPI65_EINJV2_SUPP) {
+ rc = einj_get_available_error_type(type2,
+ ACPI_EINJV2_GET_ERROR_TYPE);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
static int einj_timedout(u64 *t)
{
if ((s64)*t < SLEEP_UNIT_MIN) {
@@ -216,24 +265,44 @@ static void check_vendor_extension(u64 paddr,
struct set_error_type_with_address *v5param)
{
int offset = v5param->vendor_extension;
- struct vendor_error_type_extension *v;
+ struct vendor_error_type_extension v;
+ struct vendor_error_type_extension __iomem *p;
u32 sbdf;
if (!offset)
return;
- v = acpi_os_map_iomem(paddr + offset, sizeof(*v));
- if (!v)
+ p = acpi_os_map_iomem(paddr + offset, sizeof(*p));
+ if (!p)
return;
- get_oem_vendor_struct(paddr, offset, v);
- sbdf = v->pcie_sbdf;
+ memcpy_fromio(&v, p, sizeof(v));
+ get_oem_vendor_struct(paddr, offset, &v);
+ sbdf = v.pcie_sbdf;
sprintf(vendor_dev, "%x:%x:%x.%x vendor_id=%x device_id=%x rev_id=%x\n",
sbdf >> 24, (sbdf >> 16) & 0xff,
(sbdf >> 11) & 0x1f, (sbdf >> 8) & 0x7,
- v->vendor_id, v->device_id, v->rev_id);
- acpi_os_unmap_iomem(v, sizeof(*v));
+ v.vendor_id, v.device_id, v.rev_id);
+ acpi_os_unmap_iomem(p, sizeof(v));
}
-static void *einj_get_parameter_address(void)
+static u32 einjv2_init(struct einjv2_extension_struct *e)
+{
+ if (e->revision != 1) {
+ pr_info("Unknown v2 extension revision %u\n", e->revision);
+ return 0;
+ }
+ if (e->length < sizeof(*e) || e->length > PAGE_SIZE) {
+ pr_info(FW_BUG "Bad1 v2 extension length %u\n", e->length);
+ return 0;
+ }
+ if ((e->length - sizeof(*e)) % sizeof(e->component_arr[0])) {
+ pr_info(FW_BUG "Bad2 v2 extension length %u\n", e->length);
+ return 0;
+ }
+
+ return (e->length - sizeof(*e)) / sizeof(e->component_arr[0]);
+}
+
+static void __iomem *einj_get_parameter_address(void)
{
int i;
u64 pa_v4 = 0, pa_v5 = 0;
@@ -254,26 +323,43 @@ static void *einj_get_parameter_address(void)
entry++;
}
if (pa_v5) {
- struct set_error_type_with_address *v5param;
+ struct set_error_type_with_address v5param;
+ struct set_error_type_with_address __iomem *p;
- v5param = acpi_os_map_iomem(pa_v5, sizeof(*v5param));
- if (v5param) {
+ v5param_size = sizeof(v5param);
+ p = acpi_os_map_iomem(pa_v5, sizeof(*p));
+ if (p) {
+ memcpy_fromio(&v5param, p, v5param_size);
acpi5 = 1;
- check_vendor_extension(pa_v5, v5param);
- return v5param;
+ check_vendor_extension(pa_v5, &v5param);
+ if (available_error_type & ACPI65_EINJV2_SUPP) {
+ struct einjv2_extension_struct *e;
+
+ e = &v5param.einjv2_struct;
+ max_nr_components = einjv2_init(e);
+
+ /* remap including einjv2_extension_struct */
+ acpi_os_unmap_iomem(p, v5param_size);
+ v66param_size = v5param_size - sizeof(*e) + e->length;
+ p = acpi_os_map_iomem(pa_v5, v66param_size);
+ }
+
+ return p;
}
}
if (param_extension && pa_v4) {
- struct einj_parameter *v4param;
+ struct einj_parameter v4param;
+ struct einj_parameter __iomem *p;
- v4param = acpi_os_map_iomem(pa_v4, sizeof(*v4param));
- if (!v4param)
+ p = acpi_os_map_iomem(pa_v4, sizeof(*p));
+ if (!p)
return NULL;
- if (v4param->reserved1 || v4param->reserved2) {
- acpi_os_unmap_iomem(v4param, sizeof(*v4param));
+ memcpy_fromio(&v4param, p, sizeof(v4param));
+ if (v4param.reserved1 || v4param.reserved2) {
+ acpi_os_unmap_iomem(p, sizeof(v4param));
return NULL;
}
- return v4param;
+ return p;
}
return NULL;
@@ -319,7 +405,8 @@ static struct acpi_generic_address *einj_get_trigger_parameter_region(
static int __einj_error_trigger(u64 trigger_paddr, u32 type,
u64 param1, u64 param2)
{
- struct acpi_einj_trigger *trigger_tab = NULL;
+ struct acpi_einj_trigger trigger_tab;
+ struct acpi_einj_trigger *full_trigger_tab;
struct apei_exec_context trigger_ctx;
struct apei_resources trigger_resources;
struct acpi_whea_header *trigger_entry;
@@ -327,54 +414,60 @@ static int __einj_error_trigger(u64 trigger_paddr, u32 type,
u32 table_size;
int rc = -EIO;
struct acpi_generic_address *trigger_param_region = NULL;
+ struct acpi_einj_trigger __iomem *p = NULL;
- r = request_mem_region(trigger_paddr, sizeof(*trigger_tab),
+ r = request_mem_region(trigger_paddr, sizeof(trigger_tab),
"APEI EINJ Trigger Table");
if (!r) {
pr_err("Can not request [mem %#010llx-%#010llx] for Trigger table\n",
(unsigned long long)trigger_paddr,
(unsigned long long)trigger_paddr +
- sizeof(*trigger_tab) - 1);
+ sizeof(trigger_tab) - 1);
goto out;
}
- trigger_tab = ioremap_cache(trigger_paddr, sizeof(*trigger_tab));
- if (!trigger_tab) {
+ p = ioremap_cache(trigger_paddr, sizeof(*p));
+ if (!p) {
pr_err("Failed to map trigger table!\n");
goto out_rel_header;
}
- rc = einj_check_trigger_header(trigger_tab);
+ memcpy_fromio(&trigger_tab, p, sizeof(trigger_tab));
+ rc = einj_check_trigger_header(&trigger_tab);
if (rc) {
pr_warn(FW_BUG "Invalid trigger error action table.\n");
goto out_rel_header;
}
/* No action structures in the TRIGGER_ERROR table, nothing to do */
- if (!trigger_tab->entry_count)
+ if (!trigger_tab.entry_count)
goto out_rel_header;
rc = -EIO;
- table_size = trigger_tab->table_size;
- r = request_mem_region(trigger_paddr + sizeof(*trigger_tab),
- table_size - sizeof(*trigger_tab),
+ table_size = trigger_tab.table_size;
+ full_trigger_tab = kmalloc(table_size, GFP_KERNEL);
+ if (!full_trigger_tab)
+ goto out_rel_header;
+ r = request_mem_region(trigger_paddr + sizeof(trigger_tab),
+ table_size - sizeof(trigger_tab),
"APEI EINJ Trigger Table");
if (!r) {
pr_err("Can not request [mem %#010llx-%#010llx] for Trigger Table Entry\n",
- (unsigned long long)trigger_paddr + sizeof(*trigger_tab),
+ (unsigned long long)trigger_paddr + sizeof(trigger_tab),
(unsigned long long)trigger_paddr + table_size - 1);
- goto out_rel_header;
+ goto out_free_trigger_tab;
}
- iounmap(trigger_tab);
- trigger_tab = ioremap_cache(trigger_paddr, table_size);
- if (!trigger_tab) {
+ iounmap(p);
+ p = ioremap_cache(trigger_paddr, table_size);
+ if (!p) {
pr_err("Failed to map trigger table!\n");
goto out_rel_entry;
}
+ memcpy_fromio(full_trigger_tab, p, table_size);
trigger_entry = (struct acpi_whea_header *)
- ((char *)trigger_tab + sizeof(struct acpi_einj_trigger));
+ ((char *)full_trigger_tab + sizeof(struct acpi_einj_trigger));
apei_resources_init(&trigger_resources);
apei_exec_ctx_init(&trigger_ctx, einj_ins_type,
ARRAY_SIZE(einj_ins_type),
- trigger_entry, trigger_tab->entry_count);
+ trigger_entry, trigger_tab.entry_count);
rc = apei_exec_collect_resources(&trigger_ctx, &trigger_resources);
if (rc)
goto out_fini;
@@ -392,7 +485,7 @@ static int __einj_error_trigger(u64 trigger_paddr, u32 type,
apei_resources_init(&addr_resources);
trigger_param_region = einj_get_trigger_parameter_region(
- trigger_tab, param1, param2);
+ full_trigger_tab, param1, param2);
if (trigger_param_region) {
rc = apei_resources_add(&addr_resources,
trigger_param_region->address,
@@ -421,23 +514,34 @@ out_release:
out_fini:
apei_resources_fini(&trigger_resources);
out_rel_entry:
- release_mem_region(trigger_paddr + sizeof(*trigger_tab),
- table_size - sizeof(*trigger_tab));
+ release_mem_region(trigger_paddr + sizeof(trigger_tab),
+ table_size - sizeof(trigger_tab));
+out_free_trigger_tab:
+ kfree(full_trigger_tab);
out_rel_header:
- release_mem_region(trigger_paddr, sizeof(*trigger_tab));
+ release_mem_region(trigger_paddr, sizeof(trigger_tab));
out:
- if (trigger_tab)
- iounmap(trigger_tab);
+ if (p)
+ iounmap(p);
return rc;
}
+static bool is_end_of_list(u8 *val)
+{
+ for (int i = 0; i < COMPONENT_LEN; ++i) {
+ if (val[i] != 0xFF)
+ return false;
+ }
+ return true;
+}
static int __einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2,
u64 param3, u64 param4)
{
struct apei_exec_context ctx;
+ u32 param_size = is_v2 ? v66param_size : v5param_size;
u64 val, trigger_paddr, timeout = FIRMWARE_TIMEOUT;
- int rc;
+ int i, rc;
einj_exec_ctx_init(&ctx);
@@ -446,8 +550,13 @@ static int __einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2,
return rc;
apei_exec_ctx_set_input(&ctx, type);
if (acpi5) {
- struct set_error_type_with_address *v5param = einj_param;
+ struct set_error_type_with_address *v5param;
+ v5param = kmalloc(param_size, GFP_KERNEL);
+ if (!v5param)
+ return -ENOMEM;
+
+ memcpy_fromio(v5param, einj_param, param_size);
v5param->type = type;
if (type & ACPI5_VENDOR_BIT) {
switch (vendor_flags) {
@@ -467,8 +576,21 @@ static int __einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2,
v5param->flags = flags;
v5param->memory_address = param1;
v5param->memory_address_range = param2;
- v5param->apicid = param3;
- v5param->pcie_sbdf = param4;
+
+ if (is_v2) {
+ for (i = 0; i < max_nr_components; i++) {
+ if (is_end_of_list(syndrome_data[i].comp_id.acpi_id))
+ break;
+ v5param->einjv2_struct.component_arr[i].comp_id =
+ syndrome_data[i].comp_id;
+ v5param->einjv2_struct.component_arr[i].comp_synd =
+ syndrome_data[i].comp_synd;
+ }
+ v5param->einjv2_struct.component_arr_count = i;
+ } else {
+ v5param->apicid = param3;
+ v5param->pcie_sbdf = param4;
+ }
} else {
switch (type) {
case ACPI_EINJ_PROCESSOR_CORRECTABLE:
@@ -492,15 +614,19 @@ static int __einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2,
break;
}
}
+ memcpy_toio(einj_param, v5param, param_size);
+ kfree(v5param);
} else {
rc = apei_exec_run(&ctx, ACPI_EINJ_SET_ERROR_TYPE);
if (rc)
return rc;
if (einj_param) {
- struct einj_parameter *v4param = einj_param;
+ struct einj_parameter v4param;
- v4param->param1 = param1;
- v4param->param2 = param2;
+ memcpy_fromio(&v4param, einj_param, sizeof(v4param));
+ v4param.param1 = param1;
+ v4param.param2 = param2;
+ memcpy_toio(einj_param, &v4param, sizeof(v4param));
}
}
rc = apei_exec_run(&ctx, ACPI_EINJ_EXECUTE_OPERATION);
@@ -543,6 +669,43 @@ static int __einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2,
return rc;
}
+/* Allow almost all types of address except MMIO. */
+static bool is_allowed_range(u64 base_addr, u64 size)
+{
+ int i;
+ /*
+ * MMIO region is usually claimed with IORESOURCE_MEM + IORES_DESC_NONE.
+ * However, IORES_DESC_NONE is treated like a wildcard when we check if
+ * region intersects with known resource. So do an allow list check for
+ * IORES_DESCs that definitely or most likely not MMIO.
+ */
+ int non_mmio_desc[] = {
+ IORES_DESC_CRASH_KERNEL,
+ IORES_DESC_ACPI_TABLES,
+ IORES_DESC_ACPI_NV_STORAGE,
+ IORES_DESC_PERSISTENT_MEMORY,
+ IORES_DESC_PERSISTENT_MEMORY_LEGACY,
+ /* Treat IORES_DESC_DEVICE_PRIVATE_MEMORY as MMIO. */
+ IORES_DESC_RESERVED,
+ IORES_DESC_SOFT_RESERVED,
+ };
+
+ if (region_intersects(base_addr, size, IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE)
+ == REGION_INTERSECTS)
+ return true;
+
+ for (i = 0; i < ARRAY_SIZE(non_mmio_desc); ++i) {
+ if (region_intersects(base_addr, size, IORESOURCE_MEM, non_mmio_desc[i])
+ == REGION_INTERSECTS)
+ return true;
+ }
+
+ if (arch_is_platform_page(base_addr))
+ return true;
+
+ return false;
+}
+
/* Inject the specified hardware error */
int einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2, u64 param3,
u64 param4)
@@ -551,10 +714,15 @@ int einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2, u64 param3,
u64 base_addr, size;
/* If user manually set "flags", make sure it is legal */
- if (flags && (flags &
- ~(SETWA_FLAGS_APICID|SETWA_FLAGS_MEM|SETWA_FLAGS_PCIE_SBDF)))
+ if (flags && (flags & ~(SETWA_FLAGS_APICID | SETWA_FLAGS_MEM |
+ SETWA_FLAGS_PCIE_SBDF | SETWA_FLAGS_EINJV2)))
return -EINVAL;
+ /* check if type is a valid EINJv2 error type */
+ if (is_v2) {
+ if (!(type & available_error_type_v2))
+ return -EINVAL;
+ }
/*
* We need extra sanity checks for memory errors.
* Other types leap directly to injection.
@@ -584,19 +752,15 @@ int einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2, u64 param3,
* Disallow crazy address masks that give BIOS leeway to pick
* injection address almost anywhere. Insist on page or
* better granularity and that target address is normal RAM or
- * NVDIMM.
+ * as long as is not MMIO.
*/
base_addr = param1 & param2;
size = ~param2 + 1;
- if (((param2 & PAGE_MASK) != PAGE_MASK) ||
- ((region_intersects(base_addr, size, IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE)
- != REGION_INTERSECTS) &&
- (region_intersects(base_addr, size, IORESOURCE_MEM, IORES_DESC_PERSISTENT_MEMORY)
- != REGION_INTERSECTS) &&
- (region_intersects(base_addr, size, IORESOURCE_MEM, IORES_DESC_SOFT_RESERVED)
- != REGION_INTERSECTS) &&
- !arch_is_platform_page(base_addr)))
+ if ((param2 & PAGE_MASK) != PAGE_MASK)
+ return -EINVAL;
+
+ if (!is_allowed_range(base_addr, size))
return -EINVAL;
if (is_zero_pfn(base_addr >> PAGE_SHIFT))
@@ -632,6 +796,8 @@ static u64 error_param2;
static u64 error_param3;
static u64 error_param4;
static struct dentry *einj_debug_dir;
+static char einj_buf[32];
+static bool einj_v2_enabled;
static struct { u32 mask; const char *str; } const einj_error_type_string[] = {
{ BIT(0), "Processor Correctable" },
{ BIT(1), "Processor Uncorrectable non-fatal" },
@@ -648,6 +814,12 @@ static struct { u32 mask; const char *str; } const einj_error_type_string[] = {
{ BIT(31), "Vendor Defined Error Types" },
};
+static struct { u32 mask; const char *str; } const einjv2_error_type_string[] = {
+ { BIT(0), "EINJV2 Processor Error" },
+ { BIT(1), "EINJV2 Memory Error" },
+ { BIT(2), "EINJV2 PCI Express Error" },
+};
+
static int available_error_type_show(struct seq_file *m, void *v)
{
@@ -655,17 +827,22 @@ static int available_error_type_show(struct seq_file *m, void *v)
if (available_error_type & einj_error_type_string[pos].mask)
seq_printf(m, "0x%08x\t%s\n", einj_error_type_string[pos].mask,
einj_error_type_string[pos].str);
-
+ if ((available_error_type & ACPI65_EINJV2_SUPP) && einj_v2_enabled) {
+ for (int pos = 0; pos < ARRAY_SIZE(einjv2_error_type_string); pos++) {
+ if (available_error_type_v2 & einjv2_error_type_string[pos].mask)
+ seq_printf(m, "V2_0x%08x\t%s\n", einjv2_error_type_string[pos].mask,
+ einjv2_error_type_string[pos].str);
+ }
+ }
return 0;
}
DEFINE_SHOW_ATTRIBUTE(available_error_type);
-static int error_type_get(void *data, u64 *val)
+static ssize_t error_type_get(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
{
- *val = error_type;
-
- return 0;
+ return simple_read_from_buffer(buf, count, ppos, einj_buf, strlen(einj_buf));
}
bool einj_is_cxl_error_type(u64 type)
@@ -692,15 +869,35 @@ int einj_validate_error_type(u64 type)
if (tval & (tval - 1))
return -EINVAL;
if (!vendor)
- if (!(type & available_error_type))
+ if (!(type & (available_error_type | available_error_type_v2)))
return -EINVAL;
return 0;
}
-static int error_type_set(void *data, u64 val)
+static ssize_t error_type_set(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
{
int rc;
+ u64 val;
+
+ /* Leave the last character for the NUL terminator */
+ if (count > sizeof(einj_buf) - 1)
+ return -EINVAL;
+
+ memset(einj_buf, 0, sizeof(einj_buf));
+ if (copy_from_user(einj_buf, buf, count))
+ return -EFAULT;
+
+ if (strncmp(einj_buf, "V2_", 3) == 0) {
+ if (!sscanf(einj_buf, "V2_%llx", &val))
+ return -EINVAL;
+ is_v2 = true;
+ } else {
+ if (!sscanf(einj_buf, "%llx", &val))
+ return -EINVAL;
+ is_v2 = false;
+ }
rc = einj_validate_error_type(val);
if (rc)
@@ -708,17 +905,24 @@ static int error_type_set(void *data, u64 val)
error_type = val;
- return 0;
+ return count;
}
-DEFINE_DEBUGFS_ATTRIBUTE(error_type_fops, error_type_get, error_type_set,
- "0x%llx\n");
+static const struct file_operations error_type_fops = {
+ .read = error_type_get,
+ .write = error_type_set,
+};
static int error_inject_set(void *data, u64 val)
{
if (!error_type)
return -EINVAL;
+ if (is_v2)
+ error_flags |= SETWA_FLAGS_EINJV2;
+ else
+ error_flags &= ~SETWA_FLAGS_EINJV2;
+
return einj_error_inject(error_type, error_flags, error_param1, error_param2,
error_param3, error_param4);
}
@@ -741,6 +945,98 @@ static int einj_check_table(struct acpi_table_einj *einj_tab)
return 0;
}
+static ssize_t u128_read(struct file *f, char __user *buf, size_t count, loff_t *off)
+{
+ char output[2 * COMPONENT_LEN + 1];
+ u8 *data = f->f_inode->i_private;
+ int i;
+
+ if (*off >= sizeof(output))
+ return 0;
+
+ for (i = 0; i < COMPONENT_LEN; i++)
+ sprintf(output + 2 * i, "%.02x", data[COMPONENT_LEN - i - 1]);
+ output[2 * COMPONENT_LEN] = '\n';
+
+ return simple_read_from_buffer(buf, count, off, output, sizeof(output));
+}
+
+static ssize_t u128_write(struct file *f, const char __user *buf, size_t count, loff_t *off)
+{
+ char input[2 + 2 * COMPONENT_LEN + 2];
+ u8 *save = f->f_inode->i_private;
+ u8 tmp[COMPONENT_LEN];
+ char byte[3] = {};
+ char *s, *e;
+ ssize_t c;
+ long val;
+ int i;
+
+ /* Require that user supply whole input line in one write(2) syscall */
+ if (*off)
+ return -EINVAL;
+
+ c = simple_write_to_buffer(input, sizeof(input), off, buf, count);
+ if (c < 0)
+ return c;
+
+ if (c < 1 || input[c - 1] != '\n')
+ return -EINVAL;
+
+ /* Empty line means invalidate this entry */
+ if (c == 1) {
+ memset(save, 0xff, COMPONENT_LEN);
+ return c;
+ }
+
+ if (input[0] == '0' && (input[1] == 'x' || input[1] == 'X'))
+ s = input + 2;
+ else
+ s = input;
+ e = input + c - 1;
+
+ for (i = 0; i < COMPONENT_LEN; i++) {
+ byte[1] = *--e;
+ byte[0] = e > s ? *--e : '0';
+ if (kstrtol(byte, 16, &val))
+ return -EINVAL;
+ tmp[i] = val;
+ if (e <= s)
+ break;
+ }
+ while (++i < COMPONENT_LEN)
+ tmp[i] = 0;
+
+ memcpy(save, tmp, COMPONENT_LEN);
+
+ return c;
+}
+
+static const struct file_operations u128_fops = {
+ .read = u128_read,
+ .write = u128_write,
+};
+
+static bool setup_einjv2_component_files(void)
+{
+ char name[32];
+
+ syndrome_data = kcalloc(max_nr_components, sizeof(syndrome_data[0]), GFP_KERNEL);
+ if (!syndrome_data)
+ return false;
+
+ for (int i = 0; i < max_nr_components; i++) {
+ sprintf(name, "component_id%d", i);
+ debugfs_create_file(name, 0600, einj_debug_dir,
+ &syndrome_data[i].comp_id, &u128_fops);
+ sprintf(name, "component_syndrome%d", i);
+ debugfs_create_file(name, 0600, einj_debug_dir,
+ &syndrome_data[i].comp_synd, &u128_fops);
+ }
+
+ return true;
+}
+
static int __init einj_probe(struct faux_device *fdev)
{
int rc;
@@ -764,7 +1060,7 @@ static int __init einj_probe(struct faux_device *fdev)
goto err_put_table;
}
- rc = einj_get_available_error_type(&available_error_type);
+ rc = einj_get_available_error_types(&available_error_type, &available_error_type_v2);
if (rc)
goto err_put_table;
@@ -812,6 +1108,8 @@ static int __init einj_probe(struct faux_device *fdev)
&error_param4);
debugfs_create_x32("notrigger", S_IRUSR | S_IWUSR,
einj_debug_dir, &notrigger);
+ if (available_error_type & ACPI65_EINJV2_SUPP)
+ einj_v2_enabled = setup_einjv2_component_files();
}
if (vendor_dev[0]) {
@@ -842,14 +1140,19 @@ err_put_table:
return rc;
}
-static void __exit einj_remove(struct faux_device *fdev)
+static void einj_remove(struct faux_device *fdev)
{
struct apei_exec_context ctx;
if (einj_param) {
- acpi_size size = (acpi5) ?
- sizeof(struct set_error_type_with_address) :
- sizeof(struct einj_parameter);
+ acpi_size size;
+
+ if (v66param_size)
+ size = v66param_size;
+ else if (acpi5)
+ size = v5param_size;
+ else
+ size = sizeof(struct einj_parameter);
acpi_os_unmap_iomem(einj_param, size);
if (vendor_errors.size)
@@ -860,19 +1163,14 @@ static void __exit einj_remove(struct faux_device *fdev)
apei_resources_release(&einj_resources);
apei_resources_fini(&einj_resources);
debugfs_remove_recursive(einj_debug_dir);
+ kfree(syndrome_data);
acpi_put_table((struct acpi_table_header *)einj_tab);
}
static struct faux_device *einj_dev;
-/*
- * einj_remove() lives in .exit.text. For drivers registered via
- * platform_driver_probe() this is ok because they cannot get unbound at
- * runtime. So mark the driver struct with __refdata to prevent modpost
- * triggering a section mismatch warning.
- */
-static struct faux_device_ops einj_device_ops __refdata = {
+static struct faux_device_ops einj_device_ops = {
.probe = einj_probe,
- .remove = __exit_p(einj_remove),
+ .remove = einj_remove,
};
static int __init einj_init(void)
@@ -883,19 +1181,16 @@ static int __init einj_init(void)
}
einj_dev = faux_device_create("acpi-einj", NULL, &einj_device_ops);
- if (!einj_dev)
- return -ENODEV;
- einj_initialized = true;
+ if (einj_dev)
+ einj_initialized = true;
return 0;
}
static void __exit einj_exit(void)
{
- if (einj_initialized)
- faux_device_destroy(einj_dev);
-
+ faux_device_destroy(einj_dev);
}
module_init(einj_init);
diff --git a/drivers/acpi/apei/einj-cxl.c b/drivers/acpi/apei/einj-cxl.c
index 78da9ae543a2..e70a416ec925 100644
--- a/drivers/acpi/apei/einj-cxl.c
+++ b/drivers/acpi/apei/einj-cxl.c
@@ -30,7 +30,7 @@ int einj_cxl_available_error_type_show(struct seq_file *m, void *v)
int cxl_err, rc;
u32 available_error_type = 0;
- rc = einj_get_available_error_type(&available_error_type);
+ rc = einj_get_available_error_type(&available_error_type, ACPI_EINJ_GET_ERROR_TYPE);
if (rc)
return rc;
diff --git a/drivers/acpi/apei/erst-dbg.c b/drivers/acpi/apei/erst-dbg.c
index 246076341e8c..ff0e8bf8e97a 100644
--- a/drivers/acpi/apei/erst-dbg.c
+++ b/drivers/acpi/apei/erst-dbg.c
@@ -60,9 +60,8 @@ static long erst_dbg_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
switch (cmd) {
case APEI_ERST_CLEAR_RECORD:
- rc = copy_from_user(&record_id, (void __user *)arg,
- sizeof(record_id));
- if (rc)
+ if (copy_from_user(&record_id, (void __user *)arg,
+ sizeof(record_id)))
return -EFAULT;
return erst_clear(record_id);
case APEI_ERST_GET_RECORD_COUNT:
@@ -175,8 +174,7 @@ static ssize_t erst_dbg_write(struct file *filp, const char __user *ubuf,
erst_dbg_buf = p;
erst_dbg_buf_len = usize;
}
- rc = copy_from_user(erst_dbg_buf, ubuf, usize);
- if (rc) {
+ if (copy_from_user(erst_dbg_buf, ubuf, usize)) {
rc = -EFAULT;
goto out;
}
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index f0584ccad451..0dc767392a6c 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -22,6 +22,7 @@
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/acpi.h>
+#include <linux/bitfield.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/timer.h>
@@ -43,6 +44,7 @@
#include <linux/uuid.h>
#include <linux/ras.h>
#include <linux/task_work.h>
+#include <linux/vmcore_info.h>
#include <acpi/actbl1.h>
#include <acpi/ghes.h>
@@ -464,39 +466,58 @@ static void ghes_clear_estatus(struct ghes *ghes,
ghes_ack_error(ghes->generic_v2);
}
-/*
- * Called as task_work before returning to user-space.
- * Ensure any queued work has been done before we return to the context that
- * triggered the notification.
+/**
+ * struct ghes_task_work - for synchronous RAS event
+ *
+ * @twork: callback_head for task work
+ * @pfn: page frame number of corrupted page
+ * @flags: work control flags
+ *
+ * Structure to pass task work to be handled before
+ * returning to user-space via task_work_add().
*/
-static void ghes_kick_task_work(struct callback_head *head)
+struct ghes_task_work {
+ struct callback_head twork;
+ u64 pfn;
+ int flags;
+};
+
+static void memory_failure_cb(struct callback_head *twork)
{
- struct acpi_hest_generic_status *estatus;
- struct ghes_estatus_node *estatus_node;
- u32 node_len;
+ struct ghes_task_work *twcb = container_of(twork, struct ghes_task_work, twork);
+ int ret;
- estatus_node = container_of(head, struct ghes_estatus_node, task_work);
- if (IS_ENABLED(CONFIG_ACPI_APEI_MEMORY_FAILURE))
- memory_failure_queue_kick(estatus_node->task_work_cpu);
+ ret = memory_failure(twcb->pfn, twcb->flags);
+ gen_pool_free(ghes_estatus_pool, (unsigned long)twcb, sizeof(*twcb));
- estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
- node_len = GHES_ESTATUS_NODE_LEN(cper_estatus_len(estatus));
- gen_pool_free(ghes_estatus_pool, (unsigned long)estatus_node, node_len);
+ if (!ret || ret == -EHWPOISON || ret == -EOPNOTSUPP)
+ return;
+
+ pr_err("%#llx: Sending SIGBUS to %s:%d due to hardware memory corruption\n",
+ twcb->pfn, current->comm, task_pid_nr(current));
+ force_sig(SIGBUS);
}
static bool ghes_do_memory_failure(u64 physical_addr, int flags)
{
+ struct ghes_task_work *twcb;
unsigned long pfn;
if (!IS_ENABLED(CONFIG_ACPI_APEI_MEMORY_FAILURE))
return false;
pfn = PHYS_PFN(physical_addr);
- if (!pfn_valid(pfn) && !arch_is_platform_page(physical_addr)) {
- pr_warn_ratelimited(FW_WARN GHES_PFX
- "Invalid address in generic error data: %#llx\n",
- physical_addr);
- return false;
+
+ if (flags == MF_ACTION_REQUIRED && current->mm) {
+ twcb = (void *)gen_pool_alloc(ghes_estatus_pool, sizeof(*twcb));
+ if (!twcb)
+ return false;
+
+ twcb->pfn = pfn;
+ twcb->flags = flags;
+ init_task_work(&twcb->twork, memory_failure_cb);
+ task_work_add(current, &twcb->twork, TWA_RESUME);
+ return true;
}
memory_failure_queue(pfn, flags);
@@ -527,26 +548,25 @@ static bool ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata,
}
static bool ghes_handle_arm_hw_error(struct acpi_hest_generic_data *gdata,
- int sev, bool sync)
+ int sev, bool sync)
{
struct cper_sec_proc_arm *err = acpi_hest_get_payload(gdata);
int flags = sync ? MF_ACTION_REQUIRED : 0;
+ char error_type[120];
bool queued = false;
int sec_sev, i;
char *p;
- log_arm_hw_error(err);
-
sec_sev = ghes_severity(gdata->error_severity);
+ log_arm_hw_error(err, sec_sev);
if (sev != GHES_SEV_RECOVERABLE || sec_sev != GHES_SEV_RECOVERABLE)
return false;
p = (char *)(err + 1);
for (i = 0; i < err->err_info_num; i++) {
struct cper_arm_err_info *err_info = (struct cper_arm_err_info *)p;
- bool is_cache = (err_info->type == CPER_ARM_CACHE_ERROR);
+ bool is_cache = err_info->type & CPER_ARM_CACHE_ERROR;
bool has_pa = (err_info->validation_bits & CPER_ARM_INFO_VALID_PHYSICAL_ADDR);
- const char *error_type = "unknown error";
/*
* The field (err_info->error_info & BIT(26)) is fixed to set to
@@ -560,12 +580,15 @@ static bool ghes_handle_arm_hw_error(struct acpi_hest_generic_data *gdata,
continue;
}
- if (err_info->type < ARRAY_SIZE(cper_proc_error_type_strs))
- error_type = cper_proc_error_type_strs[err_info->type];
+ cper_bits_to_str(error_type, sizeof(error_type),
+ FIELD_GET(CPER_ARM_ERR_TYPE_MASK, err_info->type),
+ cper_proc_error_type_strs,
+ ARRAY_SIZE(cper_proc_error_type_strs));
pr_warn_ratelimited(FW_WARN GHES_PFX
- "Unhandled processor error type: %s\n",
- error_type);
+ "Unhandled processor error type 0x%02x: %s%s\n",
+ err_info->type, error_type,
+ (err_info->type & ~CPER_ARM_ERR_TYPE_MASK) ? " with reserved bit(s)" : "");
p += err_info->length;
}
@@ -842,7 +865,41 @@ int cxl_cper_kfifo_get(struct cxl_cper_work_data *wd)
}
EXPORT_SYMBOL_NS_GPL(cxl_cper_kfifo_get, "CXL");
-static bool ghes_do_proc(struct ghes *ghes,
+static void ghes_log_hwerr(int sev, guid_t *sec_type)
+{
+ if (sev != CPER_SEV_RECOVERABLE)
+ return;
+
+ if (guid_equal(sec_type, &CPER_SEC_PROC_ARM) ||
+ guid_equal(sec_type, &CPER_SEC_PROC_GENERIC) ||
+ guid_equal(sec_type, &CPER_SEC_PROC_IA)) {
+ hwerr_log_error_type(HWERR_RECOV_CPU);
+ return;
+ }
+
+ if (guid_equal(sec_type, &CPER_SEC_CXL_PROT_ERR) ||
+ guid_equal(sec_type, &CPER_SEC_CXL_GEN_MEDIA_GUID) ||
+ guid_equal(sec_type, &CPER_SEC_CXL_DRAM_GUID) ||
+ guid_equal(sec_type, &CPER_SEC_CXL_MEM_MODULE_GUID)) {
+ hwerr_log_error_type(HWERR_RECOV_CXL);
+ return;
+ }
+
+ if (guid_equal(sec_type, &CPER_SEC_PCIE) ||
+ guid_equal(sec_type, &CPER_SEC_PCI_X_BUS)) {
+ hwerr_log_error_type(HWERR_RECOV_PCI);
+ return;
+ }
+
+ if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) {
+ hwerr_log_error_type(HWERR_RECOV_MEMORY);
+ return;
+ }
+
+ hwerr_log_error_type(HWERR_RECOV_OTHERS);
+}
+
+static void ghes_do_proc(struct ghes *ghes,
const struct acpi_hest_generic_status *estatus)
{
int sev, sec_sev;
@@ -863,6 +920,7 @@ static bool ghes_do_proc(struct ghes *ghes,
if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT)
fru_text = gdata->fru_text;
+ ghes_log_hwerr(sev, sec_type);
if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) {
struct cper_sec_mem_err *mem_err = acpi_hest_get_payload(gdata);
@@ -870,11 +928,9 @@ static bool ghes_do_proc(struct ghes *ghes,
arch_apei_report_mem_error(sev, mem_err);
queued = ghes_handle_memory_failure(gdata, sev, sync);
- }
- else if (guid_equal(sec_type, &CPER_SEC_PCIE)) {
+ } else if (guid_equal(sec_type, &CPER_SEC_PCIE)) {
ghes_handle_aer(gdata);
- }
- else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) {
+ } else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) {
queued = ghes_handle_arm_hw_error(gdata, sev, sync);
} else if (guid_equal(sec_type, &CPER_SEC_CXL_PROT_ERR)) {
struct cxl_cper_sec_prot_err *prot_err = acpi_hest_get_payload(gdata);
@@ -902,7 +958,16 @@ static bool ghes_do_proc(struct ghes *ghes,
}
}
- return queued;
+ /*
+ * If no memory failure work is queued for abnormal synchronous
+ * errors, do a force kill.
+ */
+ if (sync && !queued) {
+ dev_err(ghes->dev,
+ HW_ERR GHES_PFX "%s:%d: synchronous unrecoverable error (SIGBUS)\n",
+ current->comm, task_pid_nr(current));
+ force_sig(SIGBUS);
+ }
}
static void __ghes_print_estatus(const char *pfx,
@@ -1088,6 +1153,8 @@ static void __ghes_panic(struct ghes *ghes,
__ghes_print_estatus(KERN_EMERG, ghes->generic, estatus);
+ add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK);
+
ghes_clear_estatus(ghes, estatus, buf_paddr, fixmap_idx);
if (!panic_timeout)
@@ -1171,12 +1238,10 @@ static int ghes_notify_hed(struct notifier_block *this, unsigned long event,
int ret = NOTIFY_DONE;
spin_lock_irqsave(&ghes_notify_lock_irq, flags);
- rcu_read_lock();
list_for_each_entry_rcu(ghes, &ghes_hed, list) {
if (!ghes_proc(ghes))
ret = NOTIFY_OK;
}
- rcu_read_unlock();
spin_unlock_irqrestore(&ghes_notify_lock_irq, flags);
return ret;
@@ -1206,9 +1271,7 @@ static void ghes_proc_in_irq(struct irq_work *irq_work)
struct ghes_estatus_node *estatus_node;
struct acpi_hest_generic *generic;
struct acpi_hest_generic_status *estatus;
- bool task_work_pending;
u32 len, node_len;
- int ret;
llnode = llist_del_all(&ghes_estatus_llist);
/*
@@ -1223,25 +1286,16 @@ static void ghes_proc_in_irq(struct irq_work *irq_work)
estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
len = cper_estatus_len(estatus);
node_len = GHES_ESTATUS_NODE_LEN(len);
- task_work_pending = ghes_do_proc(estatus_node->ghes, estatus);
+
+ ghes_do_proc(estatus_node->ghes, estatus);
+
if (!ghes_estatus_cached(estatus)) {
generic = estatus_node->generic;
if (ghes_print_estatus(NULL, generic, estatus))
ghes_estatus_cache_add(generic, estatus);
}
-
- if (task_work_pending && current->mm) {
- estatus_node->task_work.func = ghes_kick_task_work;
- estatus_node->task_work_cpu = smp_processor_id();
- ret = task_work_add(current, &estatus_node->task_work,
- TWA_RESUME);
- if (ret)
- estatus_node->task_work.func = NULL;
- }
-
- if (!estatus_node->task_work.func)
- gen_pool_free(ghes_estatus_pool,
- (unsigned long)estatus_node, node_len);
+ gen_pool_free(ghes_estatus_pool, (unsigned long)estatus_node,
+ node_len);
llnode = next;
}
@@ -1302,7 +1356,6 @@ static int ghes_in_nmi_queue_one_entry(struct ghes *ghes,
estatus_node->ghes = ghes;
estatus_node->generic = ghes->generic;
- estatus_node->task_work.func = NULL;
estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
if (__ghes_read_estatus(estatus, buf_paddr, fixmap_idx, len)) {
diff --git a/drivers/acpi/arm64/Kconfig b/drivers/acpi/arm64/Kconfig
index b3ed6212244c..f2fd79f22e7d 100644
--- a/drivers/acpi/arm64/Kconfig
+++ b/drivers/acpi/arm64/Kconfig
@@ -21,3 +21,6 @@ config ACPI_AGDI
config ACPI_APMT
bool
+
+config ACPI_MPAM
+ bool
diff --git a/drivers/acpi/arm64/Makefile b/drivers/acpi/arm64/Makefile
index 05ecde9eaabe..9390b57cb564 100644
--- a/drivers/acpi/arm64/Makefile
+++ b/drivers/acpi/arm64/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_ACPI_APMT) += apmt.o
obj-$(CONFIG_ACPI_FFH) += ffh.o
obj-$(CONFIG_ACPI_GTDT) += gtdt.o
obj-$(CONFIG_ACPI_IORT) += iort.o
+obj-$(CONFIG_ACPI_MPAM) += mpam.o
obj-$(CONFIG_ACPI_PROCESSOR_IDLE) += cpuidle.o
obj-$(CONFIG_ARM_AMBA) += amba.o
obj-y += dma.o init.o
diff --git a/drivers/acpi/arm64/gtdt.c b/drivers/acpi/arm64/gtdt.c
index 70f8290b659d..ffc867bac2d6 100644
--- a/drivers/acpi/arm64/gtdt.c
+++ b/drivers/acpi/arm64/gtdt.c
@@ -303,40 +303,6 @@ error:
return -EINVAL;
}
-/**
- * acpi_arch_timer_mem_init() - Get the info of all GT blocks in GTDT table.
- * @timer_mem: The pointer to the array of struct arch_timer_mem for returning
- * the result of parsing. The element number of this array should
- * be platform_timer_count(the total number of platform timers).
- * @timer_count: It points to a integer variable which is used for storing the
- * number of GT blocks we have parsed.
- *
- * Return: 0 if success, -EINVAL/-ENODEV if error.
- */
-int __init acpi_arch_timer_mem_init(struct arch_timer_mem *timer_mem,
- int *timer_count)
-{
- int ret;
- void *platform_timer;
-
- *timer_count = 0;
- for_each_platform_timer(platform_timer) {
- if (is_timer_block(platform_timer)) {
- ret = gtdt_parse_timer_block(platform_timer, timer_mem);
- if (ret)
- return ret;
- timer_mem++;
- (*timer_count)++;
- }
- }
-
- if (*timer_count)
- pr_info("found %d memory-mapped timer block(s).\n",
- *timer_count);
-
- return 0;
-}
-
/*
* Initialize a SBSA generic Watchdog platform device info from GTDT
*/
@@ -388,11 +354,11 @@ static int __init gtdt_import_sbsa_gwdt(struct acpi_gtdt_watchdog *wd,
return 0;
}
-static int __init gtdt_sbsa_gwdt_init(void)
+static int __init gtdt_platform_timer_init(void)
{
void *platform_timer;
struct acpi_table_header *table;
- int ret, timer_count, gwdt_count = 0;
+ int ret, timer_count, gwdt_count = 0, mmio_timer_count = 0;
if (acpi_disabled)
return 0;
@@ -414,20 +380,41 @@ static int __init gtdt_sbsa_gwdt_init(void)
goto out_put_gtdt;
for_each_platform_timer(platform_timer) {
+ ret = 0;
+
if (is_non_secure_watchdog(platform_timer)) {
ret = gtdt_import_sbsa_gwdt(platform_timer, gwdt_count);
if (ret)
- break;
+ continue;
gwdt_count++;
+ } else if (is_timer_block(platform_timer)) {
+ struct arch_timer_mem atm = {};
+ struct platform_device *pdev;
+
+ ret = gtdt_parse_timer_block(platform_timer, &atm);
+ if (ret)
+ continue;
+
+ pdev = platform_device_register_data(NULL, "gtdt-arm-mmio-timer",
+ mmio_timer_count, &atm,
+ sizeof(atm));
+ if (IS_ERR(pdev)) {
+ pr_err("Can't register timer %d\n", mmio_timer_count);
+ continue;
+ }
+
+ mmio_timer_count++;
}
}
if (gwdt_count)
pr_info("found %d SBSA generic Watchdog(s).\n", gwdt_count);
+ if (mmio_timer_count)
+ pr_info("found %d Generic MMIO timer(s).\n", mmio_timer_count);
out_put_gtdt:
acpi_put_table(table);
return ret;
}
-device_initcall(gtdt_sbsa_gwdt_init);
+device_initcall(gtdt_platform_timer_init);
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index 98759d6199d3..65f0f56ad753 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -937,8 +937,10 @@ static u32 *iort_rmr_alloc_sids(u32 *sids, u32 count, u32 id_start,
new_sids = krealloc_array(sids, count + new_count,
sizeof(*new_sids), GFP_KERNEL);
- if (!new_sids)
+ if (!new_sids) {
+ kfree(sids);
return NULL;
+ }
for (i = count; i < total_count; i++)
new_sids[i] = id_start++;
diff --git a/drivers/acpi/arm64/mpam.c b/drivers/acpi/arm64/mpam.c
new file mode 100644
index 000000000000..84963a20c3e7
--- /dev/null
+++ b/drivers/acpi/arm64/mpam.c
@@ -0,0 +1,411 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2025 Arm Ltd.
+
+/* Parse the MPAM ACPI table feeding the discovered nodes into the driver */
+
+#define pr_fmt(fmt) "ACPI MPAM: " fmt
+
+#include <linux/acpi.h>
+#include <linux/arm_mpam.h>
+#include <linux/bits.h>
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/platform_device.h>
+
+#include <acpi/processor.h>
+
+/*
+ * Flags for acpi_table_mpam_msc.*_interrupt_flags.
+ * See 2.1.1 Interrupt Flags, Table 5, of DEN0065B_MPAM_ACPI_3.0-bet.
+ */
+#define ACPI_MPAM_MSC_IRQ_MODE BIT(0)
+#define ACPI_MPAM_MSC_IRQ_TYPE_MASK GENMASK(2, 1)
+#define ACPI_MPAM_MSC_IRQ_TYPE_WIRED 0
+#define ACPI_MPAM_MSC_IRQ_AFFINITY_TYPE_MASK BIT(3)
+#define ACPI_MPAM_MSC_IRQ_AFFINITY_TYPE_PROCESSOR 0
+#define ACPI_MPAM_MSC_IRQ_AFFINITY_TYPE_PROCESSOR_CONTAINER 1
+#define ACPI_MPAM_MSC_IRQ_AFFINITY_VALID BIT(4)
+
+/*
+ * Encodings for the MSC node body interface type field.
+ * See 2.1 MPAM MSC node, Table 4 of DEN0065B_MPAM_ACPI_3.0-bet.
+ */
+#define ACPI_MPAM_MSC_IFACE_MMIO 0x00
+#define ACPI_MPAM_MSC_IFACE_PCC 0x0a
+
+static bool _is_ppi_partition(u32 flags)
+{
+ u32 aff_type, is_ppi;
+ bool ret;
+
+ is_ppi = FIELD_GET(ACPI_MPAM_MSC_IRQ_AFFINITY_VALID, flags);
+ if (!is_ppi)
+ return false;
+
+ aff_type = FIELD_GET(ACPI_MPAM_MSC_IRQ_AFFINITY_TYPE_MASK, flags);
+ ret = (aff_type == ACPI_MPAM_MSC_IRQ_AFFINITY_TYPE_PROCESSOR_CONTAINER);
+ if (ret)
+ pr_err_once("Partitioned interrupts not supported\n");
+
+ return ret;
+}
+
+static int acpi_mpam_register_irq(struct platform_device *pdev,
+ u32 intid, u32 flags)
+{
+ int irq;
+ u32 int_type;
+ int trigger;
+
+ if (!intid)
+ return -EINVAL;
+
+ if (_is_ppi_partition(flags))
+ return -EINVAL;
+
+ trigger = FIELD_GET(ACPI_MPAM_MSC_IRQ_MODE, flags);
+ int_type = FIELD_GET(ACPI_MPAM_MSC_IRQ_TYPE_MASK, flags);
+ if (int_type != ACPI_MPAM_MSC_IRQ_TYPE_WIRED)
+ return -EINVAL;
+
+ irq = acpi_register_gsi(&pdev->dev, intid, trigger, ACPI_ACTIVE_HIGH);
+ if (irq < 0)
+ pr_err_once("Failed to register interrupt 0x%x with ACPI\n", intid);
+
+ return irq;
+}
+
+static void acpi_mpam_parse_irqs(struct platform_device *pdev,
+ struct acpi_mpam_msc_node *tbl_msc,
+ struct resource *res, int *res_idx)
+{
+ u32 flags, intid;
+ int irq;
+
+ intid = tbl_msc->overflow_interrupt;
+ flags = tbl_msc->overflow_interrupt_flags;
+ irq = acpi_mpam_register_irq(pdev, intid, flags);
+ if (irq > 0)
+ res[(*res_idx)++] = DEFINE_RES_IRQ_NAMED(irq, "overflow");
+
+ intid = tbl_msc->error_interrupt;
+ flags = tbl_msc->error_interrupt_flags;
+ irq = acpi_mpam_register_irq(pdev, intid, flags);
+ if (irq > 0)
+ res[(*res_idx)++] = DEFINE_RES_IRQ_NAMED(irq, "error");
+}
+
+static int acpi_mpam_parse_resource(struct mpam_msc *msc,
+ struct acpi_mpam_resource_node *res)
+{
+ int level, nid;
+ u32 cache_id;
+
+ switch (res->locator_type) {
+ case ACPI_MPAM_LOCATION_TYPE_PROCESSOR_CACHE:
+ cache_id = res->locator.cache_locator.cache_reference;
+ level = find_acpi_cache_level_from_id(cache_id);
+ if (level <= 0) {
+ pr_err_once("Bad level (%d) for cache with id %u\n", level, cache_id);
+ return -EINVAL;
+ }
+ return mpam_ris_create(msc, res->ris_index, MPAM_CLASS_CACHE,
+ level, cache_id);
+ case ACPI_MPAM_LOCATION_TYPE_MEMORY:
+ nid = pxm_to_node(res->locator.memory_locator.proximity_domain);
+ if (nid == NUMA_NO_NODE) {
+ pr_debug("Bad proximity domain %lld, using node 0 instead\n",
+ res->locator.memory_locator.proximity_domain);
+ nid = 0;
+ }
+ return mpam_ris_create(msc, res->ris_index, MPAM_CLASS_MEMORY,
+ MPAM_CLASS_ID_DEFAULT, nid);
+ default:
+ /* These get discovered later and are treated as unknown */
+ return 0;
+ }
+}
+
+int acpi_mpam_parse_resources(struct mpam_msc *msc,
+ struct acpi_mpam_msc_node *tbl_msc)
+{
+ int i, err;
+ char *ptr, *table_end;
+ struct acpi_mpam_resource_node *resource;
+
+ table_end = (char *)tbl_msc + tbl_msc->length;
+ ptr = (char *)(tbl_msc + 1);
+ for (i = 0; i < tbl_msc->num_resource_nodes; i++) {
+ u64 max_deps, remaining_table;
+
+ if (ptr + sizeof(*resource) > table_end)
+ return -EINVAL;
+
+ resource = (struct acpi_mpam_resource_node *)ptr;
+
+ remaining_table = table_end - ptr;
+ max_deps = remaining_table / sizeof(struct acpi_mpam_func_deps);
+ if (resource->num_functional_deps > max_deps) {
+ pr_debug("MSC has impossible number of functional dependencies\n");
+ return -EINVAL;
+ }
+
+ err = acpi_mpam_parse_resource(msc, resource);
+ if (err)
+ return err;
+
+ ptr += sizeof(*resource);
+ ptr += resource->num_functional_deps * sizeof(struct acpi_mpam_func_deps);
+ }
+
+ return 0;
+}
+
+/*
+ * Creates the device power management link and returns true if the
+ * acpi id is valid and usable for cpu affinity. This is the case
+ * when the linked device is a processor or a processor container.
+ */
+static bool __init parse_msc_pm_link(struct acpi_mpam_msc_node *tbl_msc,
+ struct platform_device *pdev,
+ u32 *acpi_id)
+{
+ char hid[sizeof(tbl_msc->hardware_id_linked_device) + 1] = { 0 };
+ bool acpi_id_valid = false;
+ struct acpi_device *buddy;
+ char uid[11];
+ int len;
+
+ memcpy(hid, &tbl_msc->hardware_id_linked_device,
+ sizeof(tbl_msc->hardware_id_linked_device));
+
+ if (!strcmp(hid, ACPI_PROCESSOR_CONTAINER_HID)) {
+ *acpi_id = tbl_msc->instance_id_linked_device;
+ acpi_id_valid = true;
+ }
+
+ len = snprintf(uid, sizeof(uid), "%u",
+ tbl_msc->instance_id_linked_device);
+ if (len >= sizeof(uid)) {
+ pr_debug("Failed to convert uid of device for power management.");
+ return acpi_id_valid;
+ }
+
+ buddy = acpi_dev_get_first_match_dev(hid, uid, -1);
+ if (buddy) {
+ device_link_add(&pdev->dev, &buddy->dev, DL_FLAG_STATELESS);
+ acpi_dev_put(buddy);
+ }
+
+ return acpi_id_valid;
+}
+
+static int decode_interface_type(struct acpi_mpam_msc_node *tbl_msc,
+ enum mpam_msc_iface *iface)
+{
+ switch (tbl_msc->interface_type) {
+ case ACPI_MPAM_MSC_IFACE_MMIO:
+ *iface = MPAM_IFACE_MMIO;
+ return 0;
+ case ACPI_MPAM_MSC_IFACE_PCC:
+ *iface = MPAM_IFACE_PCC;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static struct platform_device * __init acpi_mpam_parse_msc(struct acpi_mpam_msc_node *tbl_msc)
+{
+ struct platform_device *pdev __free(platform_device_put) =
+ platform_device_alloc("mpam_msc", tbl_msc->identifier);
+ int next_res = 0, next_prop = 0, err;
+ /* pcc, nrdy, affinity and a sentinel */
+ struct property_entry props[4] = { 0 };
+ /* mmio, 2xirq, no sentinel. */
+ struct resource res[3] = { 0 };
+ struct acpi_device *companion;
+ enum mpam_msc_iface iface;
+ char uid[16];
+ u32 acpi_id;
+
+ if (!pdev)
+ return ERR_PTR(-ENOMEM);
+
+ /* Some power management is described in the namespace: */
+ err = snprintf(uid, sizeof(uid), "%u", tbl_msc->identifier);
+ if (err > 0 && err < sizeof(uid)) {
+ companion = acpi_dev_get_first_match_dev("ARMHAA5C", uid, -1);
+ if (companion) {
+ ACPI_COMPANION_SET(&pdev->dev, companion);
+ acpi_dev_put(companion);
+ } else {
+ pr_debug("MSC.%u: missing namespace entry\n", tbl_msc->identifier);
+ }
+ }
+
+ if (decode_interface_type(tbl_msc, &iface)) {
+ pr_debug("MSC.%u: unknown interface type\n", tbl_msc->identifier);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (iface == MPAM_IFACE_MMIO) {
+ res[next_res++] = DEFINE_RES_MEM_NAMED(tbl_msc->base_address,
+ tbl_msc->mmio_size,
+ "MPAM:MSC");
+ } else if (iface == MPAM_IFACE_PCC) {
+ props[next_prop++] = PROPERTY_ENTRY_U32("pcc-channel",
+ tbl_msc->base_address);
+ }
+
+ acpi_mpam_parse_irqs(pdev, tbl_msc, res, &next_res);
+
+ WARN_ON_ONCE(next_res > ARRAY_SIZE(res));
+ err = platform_device_add_resources(pdev, res, next_res);
+ if (err)
+ return ERR_PTR(err);
+
+ props[next_prop++] = PROPERTY_ENTRY_U32("arm,not-ready-us",
+ tbl_msc->max_nrdy_usec);
+
+ /*
+ * The MSC's CPU affinity is described via its linked power
+ * management device, but only if it points at a Processor or
+ * Processor Container.
+ */
+ if (parse_msc_pm_link(tbl_msc, pdev, &acpi_id))
+ props[next_prop++] = PROPERTY_ENTRY_U32("cpu_affinity", acpi_id);
+
+ WARN_ON_ONCE(next_prop > ARRAY_SIZE(props) - 1);
+ err = device_create_managed_software_node(&pdev->dev, props, NULL);
+ if (err)
+ return ERR_PTR(err);
+
+ /*
+ * Stash the table entry for acpi_mpam_parse_resources() to discover
+ * what this MSC controls.
+ */
+ err = platform_device_add_data(pdev, tbl_msc, tbl_msc->length);
+ if (err)
+ return ERR_PTR(err);
+
+ err = platform_device_add(pdev);
+ if (err)
+ return ERR_PTR(err);
+
+ return_ptr(pdev);
+}
+
+static int __init acpi_mpam_parse(void)
+{
+ char *table_end, *table_offset;
+ struct acpi_mpam_msc_node *tbl_msc;
+ struct platform_device *pdev;
+
+ if (acpi_disabled || !system_supports_mpam())
+ return 0;
+
+ struct acpi_table_header *table __free(acpi_put_table) =
+ acpi_get_table_pointer(ACPI_SIG_MPAM, 0);
+
+ if (IS_ERR(table))
+ return 0;
+
+ if (table->revision < 1) {
+ pr_debug("MPAM ACPI table revision %d not supported\n", table->revision);
+ return 0;
+ }
+
+ table_offset = (char *)(table + 1);
+ table_end = (char *)table + table->length;
+
+ while (table_offset < table_end) {
+ tbl_msc = (struct acpi_mpam_msc_node *)table_offset;
+ if (table_offset + sizeof(*tbl_msc) > table_end ||
+ table_offset + tbl_msc->length > table_end) {
+ pr_err("MSC entry overlaps end of ACPI table\n");
+ return -EINVAL;
+ }
+ table_offset += tbl_msc->length;
+
+ /*
+ * If any of the reserved fields are set, make no attempt to
+ * parse the MSC structure. This MSC will still be counted by
+ * acpi_mpam_count_msc(), meaning the MPAM driver can't probe
+ * against all MSC, and will never be enabled. There is no way
+ * to enable it safely, because we cannot determine safe
+ * system-wide partid and pmg ranges in this situation.
+ */
+ if (tbl_msc->reserved || tbl_msc->reserved1 || tbl_msc->reserved2) {
+ pr_err_once("Unrecognised MSC, MPAM not usable\n");
+ pr_debug("MSC.%u: reserved field set\n", tbl_msc->identifier);
+ continue;
+ }
+
+ if (!tbl_msc->mmio_size) {
+ pr_debug("MSC.%u: marked as disabled\n", tbl_msc->identifier);
+ continue;
+ }
+
+ pdev = acpi_mpam_parse_msc(tbl_msc);
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
+ }
+
+ return 0;
+}
+
+/**
+ * acpi_mpam_count_msc() - Count the number of MSC described by firmware.
+ *
+ * Returns the number of MSCs, or zero for an error.
+ *
+ * This can be called before or in parallel with acpi_mpam_parse().
+ */
+int acpi_mpam_count_msc(void)
+{
+ char *table_end, *table_offset;
+ struct acpi_mpam_msc_node *tbl_msc;
+ int count = 0;
+
+ if (acpi_disabled || !system_supports_mpam())
+ return 0;
+
+ struct acpi_table_header *table __free(acpi_put_table) =
+ acpi_get_table_pointer(ACPI_SIG_MPAM, 0);
+
+ if (IS_ERR(table))
+ return 0;
+
+ if (table->revision < 1)
+ return 0;
+
+ table_offset = (char *)(table + 1);
+ table_end = (char *)table + table->length;
+
+ while (table_offset < table_end) {
+ tbl_msc = (struct acpi_mpam_msc_node *)table_offset;
+
+ if (table_offset + sizeof(*tbl_msc) > table_end)
+ return -EINVAL;
+ if (tbl_msc->length < sizeof(*tbl_msc))
+ return -EINVAL;
+ if (tbl_msc->length > table_end - table_offset)
+ return -EINVAL;
+ table_offset += tbl_msc->length;
+
+ if (!tbl_msc->mmio_size)
+ continue;
+
+ count++;
+ }
+
+ return count;
+}
+
+/*
+ * Call after ACPI devices have been created, which happens behind acpi_scan_init()
+ * called from subsys_initcall(). PCC requires the mailbox driver, which is
+ * initialised from postcore_initcall().
+ */
+subsys_initcall_sync(acpi_mpam_parse);
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 45593612a4db..34181fa52e93 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -91,8 +91,7 @@ enum {
};
struct acpi_battery {
- struct mutex lock;
- struct mutex sysfs_lock;
+ struct mutex update_lock;
struct power_supply *bat;
struct power_supply_desc bat_desc;
struct acpi_device *device;
@@ -243,23 +242,10 @@ static int acpi_battery_get_property(struct power_supply *psy,
break;
case POWER_SUPPLY_PROP_CURRENT_NOW:
case POWER_SUPPLY_PROP_POWER_NOW:
- if (battery->rate_now == ACPI_BATTERY_VALUE_UNKNOWN) {
+ if (battery->rate_now == ACPI_BATTERY_VALUE_UNKNOWN)
ret = -ENODEV;
- break;
- }
-
- val->intval = battery->rate_now * 1000;
- /*
- * When discharging, the current should be reported as a
- * negative number as per the power supply class interface
- * definition.
- */
- if (psp == POWER_SUPPLY_PROP_CURRENT_NOW &&
- (battery->state & ACPI_BATTERY_STATE_DISCHARGING) &&
- acpi_battery_handle_discharging(battery)
- == POWER_SUPPLY_STATUS_DISCHARGING)
- val->intval = -val->intval;
-
+ else
+ val->intval = battery->rate_now * 1000;
break;
case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
@@ -548,11 +534,9 @@ static int acpi_battery_get_info(struct acpi_battery *battery)
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
acpi_status status = AE_ERROR;
- mutex_lock(&battery->lock);
status = acpi_evaluate_object(battery->device->handle,
use_bix ? "_BIX":"_BIF",
NULL, &buffer);
- mutex_unlock(&battery->lock);
if (ACPI_FAILURE(status)) {
acpi_handle_info(battery->device->handle,
@@ -589,11 +573,8 @@ static int acpi_battery_get_state(struct acpi_battery *battery)
msecs_to_jiffies(cache_time)))
return 0;
- mutex_lock(&battery->lock);
status = acpi_evaluate_object(battery->device->handle, "_BST",
NULL, &buffer);
- mutex_unlock(&battery->lock);
-
if (ACPI_FAILURE(status)) {
acpi_handle_info(battery->device->handle,
"_BST evaluation failed: %s",
@@ -641,11 +622,8 @@ static int acpi_battery_set_alarm(struct acpi_battery *battery)
!test_bit(ACPI_BATTERY_ALARM_PRESENT, &battery->flags))
return -ENODEV;
- mutex_lock(&battery->lock);
status = acpi_execute_simple_method(battery->device->handle, "_BTP",
battery->alarm);
- mutex_unlock(&battery->lock);
-
if (ACPI_FAILURE(status))
return -ENODEV;
@@ -917,15 +895,12 @@ static int sysfs_add_battery(struct acpi_battery *battery)
static void sysfs_remove_battery(struct acpi_battery *battery)
{
- mutex_lock(&battery->sysfs_lock);
- if (!battery->bat) {
- mutex_unlock(&battery->sysfs_lock);
+ if (!battery->bat)
return;
- }
+
battery_hook_remove_battery(battery);
power_supply_unregister(battery->bat);
battery->bat = NULL;
- mutex_unlock(&battery->sysfs_lock);
}
static void find_battery(const struct dmi_header *dm, void *private)
@@ -1085,6 +1060,9 @@ static void acpi_battery_notify(acpi_handle handle, u32 event, void *data)
if (!battery)
return;
+
+ guard(mutex)(&battery->update_lock);
+
old = battery->bat;
/*
* On Acer Aspire V5-573G notifications are sometimes triggered too
@@ -1107,21 +1085,22 @@ static void acpi_battery_notify(acpi_handle handle, u32 event, void *data)
}
static int battery_notify(struct notifier_block *nb,
- unsigned long mode, void *_unused)
+ unsigned long mode, void *_unused)
{
struct acpi_battery *battery = container_of(nb, struct acpi_battery,
pm_nb);
- int result;
- switch (mode) {
- case PM_POST_HIBERNATION:
- case PM_POST_SUSPEND:
+ if (mode == PM_POST_SUSPEND || mode == PM_POST_HIBERNATION) {
+ guard(mutex)(&battery->update_lock);
+
if (!acpi_battery_present(battery))
return 0;
if (battery->bat) {
acpi_battery_refresh(battery);
} else {
+ int result;
+
result = acpi_battery_get_info(battery);
if (result)
return result;
@@ -1133,7 +1112,6 @@ static int battery_notify(struct notifier_block *nb,
acpi_battery_init_alarm(battery);
acpi_battery_get_state(battery);
- break;
}
return 0;
@@ -1211,6 +1189,8 @@ static int acpi_battery_update_retry(struct acpi_battery *battery)
{
int retry, ret;
+ guard(mutex)(&battery->update_lock);
+
for (retry = 5; retry; retry--) {
ret = acpi_battery_update(battery, false);
if (!ret)
@@ -1221,6 +1201,13 @@ static int acpi_battery_update_retry(struct acpi_battery *battery)
return ret;
}
+static void sysfs_battery_cleanup(struct acpi_battery *battery)
+{
+ guard(mutex)(&battery->update_lock);
+
+ sysfs_remove_battery(battery);
+}
+
static int acpi_battery_add(struct acpi_device *device)
{
int result = 0;
@@ -1239,11 +1226,8 @@ static int acpi_battery_add(struct acpi_device *device)
strscpy(acpi_device_name(device), ACPI_BATTERY_DEVICE_NAME);
strscpy(acpi_device_class(device), ACPI_BATTERY_CLASS);
device->driver_data = battery;
- result = devm_mutex_init(&device->dev, &battery->lock);
- if (result)
- return result;
- result = devm_mutex_init(&device->dev, &battery->sysfs_lock);
+ result = devm_mutex_init(&device->dev, &battery->update_lock);
if (result)
return result;
@@ -1275,7 +1259,7 @@ fail_pm:
device_init_wakeup(&device->dev, 0);
unregister_pm_notifier(&battery->pm_nb);
fail:
- sysfs_remove_battery(battery);
+ sysfs_battery_cleanup(battery);
return result;
}
@@ -1294,6 +1278,9 @@ static void acpi_battery_remove(struct acpi_device *device)
device_init_wakeup(&device->dev, 0);
unregister_pm_notifier(&battery->pm_nb);
+
+ guard(mutex)(&battery->update_lock);
+
sysfs_remove_battery(battery);
}
@@ -1310,6 +1297,9 @@ static int acpi_battery_resume(struct device *dev)
return -EINVAL;
battery->update_time = 0;
+
+ guard(mutex)(&battery->update_lock);
+
acpi_battery_update(battery, true);
return 0;
}
diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
index 35ece8e9f15d..0fdd581ef96f 100644
--- a/drivers/acpi/bgrt.c
+++ b/drivers/acpi/bgrt.c
@@ -47,7 +47,7 @@ static const struct bin_attribute *const bgrt_bin_attributes[] = {
static const struct attribute_group bgrt_attribute_group = {
.attrs = bgrt_attributes,
- .bin_attrs_new = bgrt_bin_attributes,
+ .bin_attrs = bgrt_bin_attributes,
};
int __init acpi_parse_bgrt(struct acpi_table_header *table)
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index c2ab2783303f..a984ccd4a2a0 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -1406,7 +1406,7 @@ static int __init acpi_bus_init(void)
goto error1;
/*
- * Register the for all standard device notifications.
+ * Register for all standard device notifications.
*/
status =
acpi_install_notify_handler(ACPI_ROOT_OBJECT, ACPI_SYSTEM_NOTIFY,
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 0a7026040188..3c6dd9b4ba0a 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -619,8 +619,10 @@ static int acpi_button_add(struct acpi_device *device)
input_set_drvdata(input, device);
error = input_register_device(input);
- if (error)
+ if (error) {
+ input_free_device(input);
goto err_remove_fs;
+ }
switch (device->device_type) {
case ACPI_BUS_TYPE_POWER_BUTTON:
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index a9ae2fd62863..3bdeeee3414e 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -460,7 +460,7 @@ bool acpi_cpc_valid(void)
if (acpi_disabled)
return false;
- for_each_present_cpu(cpu) {
+ for_each_online_cpu(cpu) {
cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
if (!cpc_ptr)
return false;
@@ -476,7 +476,7 @@ bool cppc_allow_fast_switch(void)
struct cpc_desc *cpc_ptr;
int cpu;
- for_each_possible_cpu(cpu) {
+ for_each_online_cpu(cpu) {
cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
desired_reg = &cpc_ptr->cpc_regs[DESIRED_PERF];
if (!CPC_IN_SYSTEM_MEMORY(desired_reg) &&
@@ -750,7 +750,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
}
/*
- * Disregard _CPC if the number of entries in the return pachage is not
+ * Disregard _CPC if the number of entries in the return package is not
* as expected, but support future revisions being proper supersets of
* the v3 and only causing more entries to be returned by _CPC.
*/
@@ -1435,7 +1435,7 @@ bool cppc_perf_ctrs_in_pcc(void)
{
int cpu;
- for_each_present_cpu(cpu) {
+ for_each_online_cpu(cpu) {
struct cpc_register_resource *ref_perf_reg;
struct cpc_desc *cpc_desc;
@@ -1876,7 +1876,7 @@ EXPORT_SYMBOL_GPL(cppc_set_perf);
* If desired_reg is in the SystemMemory or SystemIo ACPI address space,
* then assume there is no latency.
*/
-unsigned int cppc_get_transition_latency(int cpu_num)
+int cppc_get_transition_latency(int cpu_num)
{
/*
* Expected transition latency is based on the PCCT timing values
@@ -1889,31 +1889,29 @@ unsigned int cppc_get_transition_latency(int cpu_num)
* completion of a command before issuing the next command,
* in microseconds.
*/
- unsigned int latency_ns = 0;
struct cpc_desc *cpc_desc;
struct cpc_register_resource *desired_reg;
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu_num);
struct cppc_pcc_data *pcc_ss_data;
+ int latency_ns = 0;
cpc_desc = per_cpu(cpc_desc_ptr, cpu_num);
if (!cpc_desc)
- return CPUFREQ_ETERNAL;
+ return -ENODATA;
desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
if (CPC_IN_SYSTEM_MEMORY(desired_reg) || CPC_IN_SYSTEM_IO(desired_reg))
return 0;
- else if (!CPC_IN_PCC(desired_reg))
- return CPUFREQ_ETERNAL;
- if (pcc_ss_id < 0)
- return CPUFREQ_ETERNAL;
+ if (!CPC_IN_PCC(desired_reg) || pcc_ss_id < 0)
+ return -ENODATA;
pcc_ss_data = pcc_data[pcc_ss_id];
if (pcc_ss_data->pcc_mpar)
latency_ns = 60 * (1000 * 1000 * 1000 / pcc_ss_data->pcc_mpar);
- latency_ns = max(latency_ns, pcc_ss_data->pcc_nominal * 1000);
- latency_ns = max(latency_ns, pcc_ss_data->pcc_mrtt * 1000);
+ latency_ns = max_t(int, latency_ns, pcc_ss_data->pcc_nominal * 1000);
+ latency_ns = max_t(int, latency_ns, pcc_ss_data->pcc_mrtt * 1000);
return latency_ns;
}
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index dbd4446025ec..4e0583274b8f 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -1119,6 +1119,8 @@ int acpi_subsys_prepare(struct device *dev)
{
struct acpi_device *adev = ACPI_COMPANION(dev);
+ dev_pm_set_strict_midlayer(dev, true);
+
if (dev->driver && dev->driver->pm && dev->driver->pm->prepare) {
int ret = dev->driver->pm->prepare(dev);
@@ -1147,6 +1149,8 @@ void acpi_subsys_complete(struct device *dev)
*/
if (pm_runtime_suspended(dev) && pm_resume_via_firmware())
pm_request_resume(dev);
+
+ dev_pm_set_strict_midlayer(dev, false);
}
EXPORT_SYMBOL_GPL(acpi_subsys_complete);
@@ -1362,6 +1366,8 @@ static int acpi_subsys_poweroff_noirq(struct device *dev)
}
#endif /* CONFIG_PM_SLEEP */
+static void acpi_dev_pm_detach(struct device *dev, bool power_off);
+
static struct dev_pm_domain acpi_general_pm_domain = {
.ops = {
.runtime_suspend = acpi_subsys_runtime_suspend,
@@ -1382,6 +1388,7 @@ static struct dev_pm_domain acpi_general_pm_domain = {
.restore_early = acpi_subsys_restore_early,
#endif
},
+ .detach = acpi_dev_pm_detach,
};
/**
@@ -1465,7 +1472,6 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on)
acpi_device_wakeup_disable(adev);
}
- dev->pm_domain->detach = acpi_dev_pm_detach;
return 1;
}
EXPORT_SYMBOL_GPL(acpi_dev_pm_attach);
diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c
index 3961fc47152c..cd199fbe4dc9 100644
--- a/drivers/acpi/device_sysfs.c
+++ b/drivers/acpi/device_sysfs.c
@@ -464,7 +464,7 @@ static ssize_t description_show(struct device *dev,
buf[result++] = '\n';
- kfree(str_obj);
+ ACPI_FREE(str_obj);
return result;
}
diff --git a/drivers/acpi/dptf/Makefile b/drivers/acpi/dptf/Makefile
index 297340682f66..e912a3be1d28 100644
--- a/drivers/acpi/dptf/Makefile
+++ b/drivers/acpi/dptf/Makefile
@@ -1,4 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_ACPI) += int340x_thermal.o
obj-$(CONFIG_DPTF_POWER) += dptf_power.o
obj-$(CONFIG_DPTF_PCH_FIVR) += dptf_pch_fivr.o
diff --git a/drivers/acpi/dptf/dptf_pch_fivr.c b/drivers/acpi/dptf/dptf_pch_fivr.c
index 952216c67d58..8d7e555929d3 100644
--- a/drivers/acpi/dptf/dptf_pch_fivr.c
+++ b/drivers/acpi/dptf/dptf_pch_fivr.c
@@ -41,7 +41,7 @@ static int pch_fivr_read(acpi_handle handle, char *method, struct pch_fivr_resp
ret = 0;
release_buffer:
- kfree(buffer.pointer);
+ ACPI_FREE(buffer.pointer);
return ret;
}
diff --git a/drivers/acpi/dptf/dptf_power.c b/drivers/acpi/dptf/dptf_power.c
index e8caf4106ff9..55ccbb8ddbe3 100644
--- a/drivers/acpi/dptf/dptf_power.c
+++ b/drivers/acpi/dptf/dptf_power.c
@@ -238,6 +238,10 @@ static const struct acpi_device_id int3407_device_ids[] = {
{"INTC10A5", 0},
{"INTC10D8", 0},
{"INTC10D9", 0},
+ {"INTC1100", 0},
+ {"INTC1101", 0},
+ {"INTC10F7", 0},
+ {"INTC10F8", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, int3407_device_ids);
diff --git a/drivers/acpi/dptf/int340x_thermal.c b/drivers/acpi/dptf/int340x_thermal.c
deleted file mode 100644
index aef7aca2161d..000000000000
--- a/drivers/acpi/dptf/int340x_thermal.c
+++ /dev/null
@@ -1,87 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * ACPI support for int340x thermal drivers
- *
- * Copyright (C) 2014, Intel Corporation
- * Authors: Zhang Rui <rui.zhang@intel.com>
- */
-
-#include <linux/acpi.h>
-#include <linux/module.h>
-
-#include "../internal.h"
-
-#define INT3401_DEVICE 0X01
-static const struct acpi_device_id int340x_thermal_device_ids[] = {
- {"INT3400"},
- {"INT3401", INT3401_DEVICE},
- {"INT3402"},
- {"INT3403"},
- {"INT3404"},
- {"INT3406"},
- {"INT3407"},
- {"INT3408"},
- {"INT3409"},
- {"INT340A"},
- {"INT340B"},
- {"INT3532"},
- {"INTC1040"},
- {"INTC1041"},
- {"INTC1042"},
- {"INTC1043"},
- {"INTC1044"},
- {"INTC1045"},
- {"INTC1046"},
- {"INTC1047"},
- {"INTC1048"},
- {"INTC1049"},
- {"INTC1050"},
- {"INTC1060"},
- {"INTC1061"},
- {"INTC1062"},
- {"INTC1063"},
- {"INTC1064"},
- {"INTC1065"},
- {"INTC1066"},
- {"INTC1068"},
- {"INTC1069"},
- {"INTC106A"},
- {"INTC106B"},
- {"INTC106C"},
- {"INTC106D"},
- {"INTC10A0"},
- {"INTC10A1"},
- {"INTC10A2"},
- {"INTC10A3"},
- {"INTC10A4"},
- {"INTC10A5"},
- {"INTC10D4"},
- {"INTC10D5"},
- {"INTC10D6"},
- {"INTC10D7"},
- {"INTC10D8"},
- {"INTC10D9"},
- {""},
-};
-
-static int int340x_thermal_handler_attach(struct acpi_device *adev,
- const struct acpi_device_id *id)
-{
- if (IS_ENABLED(CONFIG_INT340X_THERMAL))
- acpi_create_platform_device(adev, NULL);
- /* Intel SoC DTS thermal driver needs INT3401 to set IRQ descriptor */
- else if (IS_ENABLED(CONFIG_INTEL_SOC_DTS_THERMAL) &&
- id->driver_data == INT3401_DEVICE)
- acpi_create_platform_device(adev, NULL);
- return 1;
-}
-
-static struct acpi_scan_handler int340x_thermal_handler = {
- .ids = int340x_thermal_device_ids,
- .attach = int340x_thermal_handler_attach,
-};
-
-void __init acpi_int340x_thermal_init(void)
-{
- acpi_scan_add_handler(&int340x_thermal_handler);
-}
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 6f4203716b53..59b3d50ff01e 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -23,8 +23,10 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/list.h>
+#include <linux/printk.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
+#include <linux/string.h>
#include <linux/suspend.h>
#include <linux/acpi.h>
#include <linux/dmi.h>
@@ -2031,6 +2033,25 @@ void __init acpi_ec_ecdt_probe(void)
goto out;
}
+ if (!strlen(ecdt_ptr->id)) {
+ /*
+ * The ECDT table on some MSI notebooks contains invalid data, together
+ * with an empty ID string ("").
+ *
+ * Section 5.2.15 of the ACPI specification requires the ID string to be
+ * a "fully qualified reference to the (...) embedded controller device",
+ * so this string always has to start with a backslash.
+ *
+ * However some ThinkBook machines have a ECDT table with a valid EC
+ * description but an invalid ID string ("_SB.PC00.LPCB.EC0").
+ *
+ * Because of this we only check if the ID string is empty in order to
+ * avoid the obvious cases.
+ */
+ pr_err(FW_BUG "Ignoring ECDT due to empty ID string\n");
+ goto out;
+ }
+
ec = acpi_ec_alloc();
if (!ec)
goto out;
@@ -2273,7 +2294,8 @@ static int acpi_ec_init_workqueues(void)
ec_wq = alloc_ordered_workqueue("kec", 0);
if (!ec_query_wq)
- ec_query_wq = alloc_workqueue("kec_query", 0, ec_max_queries);
+ ec_query_wq = alloc_workqueue("kec_query", WQ_PERCPU,
+ ec_max_queries);
if (!ec_wq || !ec_query_wq) {
acpi_ec_destroy_workqueues();
diff --git a/drivers/acpi/fan.h b/drivers/acpi/fan.h
index 15eba1c70e66..97ce3212edf3 100644
--- a/drivers/acpi/fan.h
+++ b/drivers/acpi/fan.h
@@ -11,6 +11,7 @@
#define _ACPI_FAN_H_
#include <linux/kconfig.h>
+#include <linux/limits.h>
#define ACPI_FAN_DEVICE_IDS \
{"INT3404", }, /* Fan */ \
@@ -20,6 +21,8 @@
{"INTC106A", }, /* Fan for Lunar Lake generation */ \
{"INTC10A2", }, /* Fan for Raptor Lake generation */ \
{"INTC10D6", }, /* Fan for Panther Lake generation */ \
+ {"INTC10FE", }, /* Fan for Wildcat Lake generation */ \
+ {"INTC10F5", }, /* Fan for Nova Lake generation */ \
{"PNP0C0B", } /* Generic ACPI fan */
#define ACPI_FPS_NAME_LEN 20
@@ -48,24 +51,64 @@ struct acpi_fan_fst {
};
struct acpi_fan {
+ acpi_handle handle;
bool acpi4;
bool has_fst;
struct acpi_fan_fif fif;
struct acpi_fan_fps *fps;
int fps_count;
+ /* A value of 0 means that trippoint-related functions are not supported */
+ u32 fan_trip_granularity;
+#if IS_REACHABLE(CONFIG_HWMON)
+ struct device *hdev;
+#endif
struct thermal_cooling_device *cdev;
struct device_attribute fst_speed;
struct device_attribute fine_grain_control;
};
-int acpi_fan_get_fst(struct acpi_device *device, struct acpi_fan_fst *fst);
+/**
+ * acpi_fan_speed_valid - Check if fan speed value is valid
+ * @speeed: Speed value returned by the ACPI firmware
+ *
+ * Check if the fan speed value returned by the ACPI firmware is valid. This function is
+ * necessary as ACPI firmware implementations can return 0xFFFFFFFF to signal that the
+ * ACPI fan does not support speed reporting. Additionally, some buggy ACPI firmware
+ * implementations return a value larger than the 32-bit integer value defined by
+ * the ACPI specification when using placeholder values. Such invalid values are also
+ * detected by this function.
+ *
+ * Returns: True if the fan speed value is valid, false otherwise.
+ */
+static inline bool acpi_fan_speed_valid(u64 speed)
+{
+ return speed < U32_MAX;
+}
+
+/**
+ * acpi_fan_power_valid - Check if fan power value is valid
+ * @power: Power value returned by the ACPI firmware
+ *
+ * Check if the fan power value returned by the ACPI firmware is valid.
+ * See acpi_fan_speed_valid() for details.
+ *
+ * Returns: True if the fan power value is valid, false otherwise.
+ */
+static inline bool acpi_fan_power_valid(u64 power)
+{
+ return power < U32_MAX;
+}
+
+int acpi_fan_get_fst(acpi_handle handle, struct acpi_fan_fst *fst);
int acpi_fan_create_attributes(struct acpi_device *device);
void acpi_fan_delete_attributes(struct acpi_device *device);
#if IS_REACHABLE(CONFIG_HWMON)
-int devm_acpi_fan_create_hwmon(struct acpi_device *device);
+int devm_acpi_fan_create_hwmon(struct device *dev);
+void acpi_fan_notify_hwmon(struct device *dev);
#else
-static inline int devm_acpi_fan_create_hwmon(struct acpi_device *device) { return 0; };
+static inline int devm_acpi_fan_create_hwmon(struct device *dev) { return 0; };
+static inline void acpi_fan_notify_hwmon(struct device *dev) { };
#endif
#endif
diff --git a/drivers/acpi/fan_attr.c b/drivers/acpi/fan_attr.c
index 22d29ac2447c..9b7fa52f3c2a 100644
--- a/drivers/acpi/fan_attr.c
+++ b/drivers/acpi/fan_attr.c
@@ -22,9 +22,9 @@ static ssize_t show_state(struct device *dev, struct device_attribute *attr, cha
int count;
if (fps->control == 0xFFFFFFFF || fps->control > 100)
- count = scnprintf(buf, PAGE_SIZE, "not-defined:");
+ count = sysfs_emit(buf, "not-defined:");
else
- count = scnprintf(buf, PAGE_SIZE, "%lld:", fps->control);
+ count = sysfs_emit(buf, "%lld:", fps->control);
if (fps->trip_point == 0xFFFFFFFF || fps->trip_point > 9)
count += sysfs_emit_at(buf, count, "not-defined:");
@@ -55,11 +55,11 @@ static ssize_t show_fan_speed(struct device *dev, struct device_attribute *attr,
struct acpi_fan_fst fst;
int status;
- status = acpi_fan_get_fst(acpi_dev, &fst);
+ status = acpi_fan_get_fst(acpi_dev->handle, &fst);
if (status)
return status;
- return sprintf(buf, "%lld\n", fst.speed);
+ return sysfs_emit(buf, "%lld\n", fst.speed);
}
static ssize_t show_fine_grain_control(struct device *dev, struct device_attribute *attr, char *buf)
@@ -67,7 +67,7 @@ static ssize_t show_fine_grain_control(struct device *dev, struct device_attribu
struct acpi_device *acpi_dev = container_of(dev, struct acpi_device, dev);
struct acpi_fan *fan = acpi_driver_data(acpi_dev);
- return sprintf(buf, "%d\n", fan->fif.fine_grain_ctrl);
+ return sysfs_emit(buf, "%d\n", fan->fif.fine_grain_ctrl);
}
int acpi_fan_create_attributes(struct acpi_device *device)
diff --git a/drivers/acpi/fan_core.c b/drivers/acpi/fan_core.c
index 8ad12ad3aaaf..fb08b8549ed7 100644
--- a/drivers/acpi/fan_core.c
+++ b/drivers/acpi/fan_core.c
@@ -7,11 +7,16 @@
* Copyright (C) 2022 Intel Corporation. All rights reserved.
*/
+#include <linux/bits.h>
#include <linux/kernel.h>
+#include <linux/limits.h>
+#include <linux/math.h>
+#include <linux/math64.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/uaccess.h>
+#include <linux/uuid.h>
#include <linux/thermal.h>
#include <linux/acpi.h>
#include <linux/platform_device.h>
@@ -19,6 +24,26 @@
#include "fan.h"
+#define ACPI_FAN_NOTIFY_STATE_CHANGED 0x80
+
+/*
+ * Defined inside the "Fan Noise Signal" section at
+ * https://learn.microsoft.com/en-us/windows-hardware/design/device-experiences/design-guide.
+ */
+static const guid_t acpi_fan_microsoft_guid = GUID_INIT(0xA7611840, 0x99FE, 0x41AE, 0xA4, 0x88,
+ 0x35, 0xC7, 0x59, 0x26, 0xC8, 0xEB);
+#define ACPI_FAN_DSM_GET_TRIP_POINT_GRANULARITY 1
+#define ACPI_FAN_DSM_SET_TRIP_POINTS 2
+#define ACPI_FAN_DSM_GET_OPERATING_RANGES 3
+
+/*
+ * Ensures that fans with a very low trip point granularity
+ * do not send too many notifications.
+ */
+static uint min_trip_distance = 100;
+module_param(min_trip_distance, uint, 0);
+MODULE_PARM_DESC(min_trip_distance, "Minimum distance between fan speed trip points in RPM");
+
static const struct acpi_device_id fan_device_ids[] = {
ACPI_FAN_DEVICE_IDS,
{"", 0},
@@ -44,25 +69,30 @@ static int fan_get_max_state(struct thermal_cooling_device *cdev, unsigned long
return 0;
}
-int acpi_fan_get_fst(struct acpi_device *device, struct acpi_fan_fst *fst)
+int acpi_fan_get_fst(acpi_handle handle, struct acpi_fan_fst *fst)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
acpi_status status;
int ret = 0;
- status = acpi_evaluate_object(device->handle, "_FST", NULL, &buffer);
- if (ACPI_FAILURE(status)) {
- dev_err(&device->dev, "Get fan state failed\n");
- return -ENODEV;
- }
+ status = acpi_evaluate_object(handle, "_FST", NULL, &buffer);
+ if (ACPI_FAILURE(status))
+ return -EIO;
obj = buffer.pointer;
- if (!obj || obj->type != ACPI_TYPE_PACKAGE ||
- obj->package.count != 3 ||
- obj->package.elements[1].type != ACPI_TYPE_INTEGER) {
- dev_err(&device->dev, "Invalid _FST data\n");
- ret = -EINVAL;
+ if (!obj)
+ return -ENODATA;
+
+ if (obj->type != ACPI_TYPE_PACKAGE || obj->package.count != 3) {
+ ret = -EPROTO;
+ goto err;
+ }
+
+ if (obj->package.elements[0].type != ACPI_TYPE_INTEGER ||
+ obj->package.elements[1].type != ACPI_TYPE_INTEGER ||
+ obj->package.elements[2].type != ACPI_TYPE_INTEGER) {
+ ret = -EPROTO;
goto err;
}
@@ -81,7 +111,7 @@ static int fan_get_state_acpi4(struct acpi_device *device, unsigned long *state)
struct acpi_fan_fst fst;
int status, i;
- status = acpi_fan_get_fst(device, &fst);
+ status = acpi_fan_get_fst(device->handle, &fst);
if (status)
return status;
@@ -102,7 +132,7 @@ match_fps:
break;
}
if (i == fan->fps_count) {
- dev_dbg(&device->dev, "Invalid control value returned\n");
+ dev_dbg(&device->dev, "No matching fps control value\n");
return -EINVAL;
}
@@ -203,18 +233,6 @@ static const struct thermal_cooling_device_ops fan_cooling_ops = {
* --------------------------------------------------------------------------
*/
-static bool acpi_fan_has_fst(struct acpi_device *device)
-{
- return acpi_has_method(device->handle, "_FST");
-}
-
-static bool acpi_fan_is_acpi4(struct acpi_device *device)
-{
- return acpi_has_method(device->handle, "_FIF") &&
- acpi_has_method(device->handle, "_FPS") &&
- acpi_has_method(device->handle, "_FSL");
-}
-
static int acpi_fan_get_fif(struct acpi_device *device)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
@@ -315,6 +333,182 @@ err:
return status;
}
+static int acpi_fan_dsm_init(struct device *dev)
+{
+ union acpi_object dummy = {
+ .package = {
+ .type = ACPI_TYPE_PACKAGE,
+ .count = 0,
+ .elements = NULL,
+ },
+ };
+ struct acpi_fan *fan = dev_get_drvdata(dev);
+ union acpi_object *obj;
+ int ret = 0;
+
+ if (!acpi_check_dsm(fan->handle, &acpi_fan_microsoft_guid, 0,
+ BIT(ACPI_FAN_DSM_GET_TRIP_POINT_GRANULARITY) |
+ BIT(ACPI_FAN_DSM_SET_TRIP_POINTS)))
+ return 0;
+
+ dev_info(dev, "Using Microsoft fan extensions\n");
+
+ obj = acpi_evaluate_dsm_typed(fan->handle, &acpi_fan_microsoft_guid, 0,
+ ACPI_FAN_DSM_GET_TRIP_POINT_GRANULARITY, &dummy,
+ ACPI_TYPE_INTEGER);
+ if (!obj)
+ return -EIO;
+
+ if (obj->integer.value > U32_MAX)
+ ret = -EOVERFLOW;
+ else
+ fan->fan_trip_granularity = obj->integer.value;
+
+ kfree(obj);
+
+ return ret;
+}
+
+static int acpi_fan_dsm_set_trip_points(struct device *dev, u64 upper, u64 lower)
+{
+ union acpi_object args[2] = {
+ {
+ .integer = {
+ .type = ACPI_TYPE_INTEGER,
+ .value = lower,
+ },
+ },
+ {
+ .integer = {
+ .type = ACPI_TYPE_INTEGER,
+ .value = upper,
+ },
+ },
+ };
+ struct acpi_fan *fan = dev_get_drvdata(dev);
+ union acpi_object in = {
+ .package = {
+ .type = ACPI_TYPE_PACKAGE,
+ .count = ARRAY_SIZE(args),
+ .elements = args,
+ },
+ };
+ union acpi_object *obj;
+
+ obj = acpi_evaluate_dsm(fan->handle, &acpi_fan_microsoft_guid, 0,
+ ACPI_FAN_DSM_SET_TRIP_POINTS, &in);
+ kfree(obj);
+
+ return 0;
+}
+
+static int acpi_fan_dsm_start(struct device *dev)
+{
+ struct acpi_fan *fan = dev_get_drvdata(dev);
+ int ret;
+
+ if (!fan->fan_trip_granularity)
+ return 0;
+
+ /*
+ * Some firmware implementations only update the values returned by the
+ * _FST control method when a notification is received. This usually
+ * works with Microsoft Windows as setting up trip points will keep
+ * triggering said notifications, but will cause issues when using _FST
+ * without the Microsoft-specific trip point extension.
+ *
+ * Because of this, an initial notification needs to be triggered to
+ * start the cycle of trip points updates. This is achieved by setting
+ * the trip points sequencially to two separate ranges. As by the
+ * Microsoft specification the firmware should trigger a notification
+ * immediately if the fan speed is outside the trip point range. This
+ * _should_ result in at least one notification as both ranges do not
+ * overlap, meaning that the current fan speed needs to be outside at
+ * least one range.
+ */
+ ret = acpi_fan_dsm_set_trip_points(dev, fan->fan_trip_granularity, 0);
+ if (ret < 0)
+ return ret;
+
+ return acpi_fan_dsm_set_trip_points(dev, fan->fan_trip_granularity * 3,
+ fan->fan_trip_granularity * 2);
+}
+
+static int acpi_fan_dsm_update_trips_points(struct device *dev, struct acpi_fan_fst *fst)
+{
+ struct acpi_fan *fan = dev_get_drvdata(dev);
+ u64 upper, lower;
+
+ if (!fan->fan_trip_granularity)
+ return 0;
+
+ if (!acpi_fan_speed_valid(fst->speed))
+ return -EINVAL;
+
+ upper = roundup_u64(fst->speed + min_trip_distance, fan->fan_trip_granularity);
+ if (fst->speed <= min_trip_distance) {
+ lower = 0;
+ } else {
+ /*
+ * Valid fan speed values cannot be larger than 32 bit, so
+ * we can safely assume that no overflow will happen here.
+ */
+ lower = rounddown((u32)fst->speed - min_trip_distance, fan->fan_trip_granularity);
+ }
+
+ return acpi_fan_dsm_set_trip_points(dev, upper, lower);
+}
+
+static void acpi_fan_notify_handler(acpi_handle handle, u32 event, void *context)
+{
+ struct device *dev = context;
+ struct acpi_fan_fst fst;
+ int ret;
+
+ switch (event) {
+ case ACPI_FAN_NOTIFY_STATE_CHANGED:
+ /*
+ * The ACPI specification says that we must evaluate _FST when we
+ * receive an ACPI event indicating that the fan state has changed.
+ */
+ ret = acpi_fan_get_fst(handle, &fst);
+ if (ret < 0) {
+ dev_err(dev, "Error retrieving current fan status: %d\n", ret);
+ } else {
+ ret = acpi_fan_dsm_update_trips_points(dev, &fst);
+ if (ret < 0)
+ dev_err(dev, "Failed to update trip points: %d\n", ret);
+ }
+
+ acpi_fan_notify_hwmon(dev);
+ acpi_bus_generate_netlink_event("fan", dev_name(dev), event, 0);
+ break;
+ default:
+ dev_dbg(dev, "Unsupported ACPI notification 0x%x\n", event);
+ break;
+ }
+}
+
+static void acpi_fan_notify_remove(void *data)
+{
+ struct acpi_fan *fan = data;
+
+ acpi_remove_notify_handler(fan->handle, ACPI_DEVICE_NOTIFY, acpi_fan_notify_handler);
+}
+
+static int devm_acpi_fan_notify_init(struct device *dev)
+{
+ struct acpi_fan *fan = dev_get_drvdata(dev);
+ acpi_status status;
+
+ status = acpi_install_notify_handler(fan->handle, ACPI_DEVICE_NOTIFY,
+ acpi_fan_notify_handler, dev);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ return devm_add_action_or_reset(dev, acpi_fan_notify_remove, fan);
+}
+
static int acpi_fan_probe(struct platform_device *pdev)
{
int result = 0;
@@ -323,17 +517,24 @@ static int acpi_fan_probe(struct platform_device *pdev)
struct acpi_device *device = ACPI_COMPANION(&pdev->dev);
char *name;
+ if (!device)
+ return -ENODEV;
+
fan = devm_kzalloc(&pdev->dev, sizeof(*fan), GFP_KERNEL);
if (!fan) {
dev_err(&device->dev, "No memory for fan\n");
return -ENOMEM;
}
+
+ fan->handle = device->handle;
device->driver_data = fan;
platform_set_drvdata(pdev, fan);
- if (acpi_fan_has_fst(device)) {
+ if (acpi_has_method(device->handle, "_FST")) {
fan->has_fst = true;
- fan->acpi4 = acpi_fan_is_acpi4(device);
+ fan->acpi4 = acpi_has_method(device->handle, "_FIF") &&
+ acpi_has_method(device->handle, "_FPS") &&
+ acpi_has_method(device->handle, "_FSL");
}
if (fan->acpi4) {
@@ -347,10 +548,24 @@ static int acpi_fan_probe(struct platform_device *pdev)
}
if (fan->has_fst) {
- result = devm_acpi_fan_create_hwmon(device);
+ result = acpi_fan_dsm_init(&pdev->dev);
+ if (result)
+ return result;
+
+ result = devm_acpi_fan_create_hwmon(&pdev->dev);
if (result)
return result;
+ result = devm_acpi_fan_notify_init(&pdev->dev);
+ if (result)
+ return result;
+
+ result = acpi_fan_dsm_start(&pdev->dev);
+ if (result) {
+ dev_err(&pdev->dev, "Failed to start Microsoft fan extensions\n");
+ return result;
+ }
+
result = acpi_fan_create_attributes(device);
if (result)
return result;
@@ -436,8 +651,14 @@ static int acpi_fan_suspend(struct device *dev)
static int acpi_fan_resume(struct device *dev)
{
- int result;
struct acpi_fan *fan = dev_get_drvdata(dev);
+ int result;
+
+ if (fan->has_fst) {
+ result = acpi_fan_dsm_start(dev);
+ if (result)
+ dev_err(dev, "Failed to start Microsoft fan extensions: %d\n", result);
+ }
if (fan->acpi4)
return 0;
diff --git a/drivers/acpi/fan_hwmon.c b/drivers/acpi/fan_hwmon.c
index e8d90605106e..d3374f8f524b 100644
--- a/drivers/acpi/fan_hwmon.c
+++ b/drivers/acpi/fan_hwmon.c
@@ -15,10 +15,6 @@
#include "fan.h"
-/* Returned when the ACPI fan does not support speed reporting */
-#define FAN_SPEED_UNAVAILABLE U32_MAX
-#define FAN_POWER_UNAVAILABLE U32_MAX
-
static struct acpi_fan_fps *acpi_fan_get_current_fps(struct acpi_fan *fan, u64 control)
{
unsigned int i;
@@ -77,7 +73,7 @@ static umode_t acpi_fan_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_
* when the associated attribute should not be created.
*/
for (i = 0; i < fan->fps_count; i++) {
- if (fan->fps[i].power != FAN_POWER_UNAVAILABLE)
+ if (acpi_fan_power_valid(fan->fps[i].power))
return 0444;
}
@@ -93,13 +89,12 @@ static umode_t acpi_fan_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_
static int acpi_fan_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
int channel, long *val)
{
- struct acpi_device *adev = to_acpi_device(dev->parent);
struct acpi_fan *fan = dev_get_drvdata(dev);
struct acpi_fan_fps *fps;
struct acpi_fan_fst fst;
int ret;
- ret = acpi_fan_get_fst(adev, &fst);
+ ret = acpi_fan_get_fst(fan->handle, &fst);
if (ret < 0)
return ret;
@@ -107,7 +102,7 @@ static int acpi_fan_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
case hwmon_fan:
switch (attr) {
case hwmon_fan_input:
- if (fst.speed == FAN_SPEED_UNAVAILABLE)
+ if (!acpi_fan_speed_valid(fst.speed))
return -ENODEV;
if (fst.speed > LONG_MAX)
@@ -135,7 +130,7 @@ static int acpi_fan_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
if (!fps)
return -EIO;
- if (fps->power == FAN_POWER_UNAVAILABLE)
+ if (!acpi_fan_power_valid(fps->power))
return -ENODEV;
if (fps->power > LONG_MAX / MICROWATT_PER_MILLIWATT)
@@ -167,12 +162,19 @@ static const struct hwmon_chip_info acpi_fan_hwmon_chip_info = {
.info = acpi_fan_hwmon_info,
};
-int devm_acpi_fan_create_hwmon(struct acpi_device *device)
+void acpi_fan_notify_hwmon(struct device *dev)
+{
+ struct acpi_fan *fan = dev_get_drvdata(dev);
+
+ hwmon_notify_event(fan->hdev, hwmon_fan, hwmon_fan_input, 0);
+}
+
+int devm_acpi_fan_create_hwmon(struct device *dev)
{
- struct acpi_fan *fan = acpi_driver_data(device);
- struct device *hdev;
+ struct acpi_fan *fan = dev_get_drvdata(dev);
+
+ fan->hdev = devm_hwmon_device_register_with_info(dev, "acpi_fan", fan,
+ &acpi_fan_hwmon_chip_info, NULL);
- hdev = devm_hwmon_device_register_with_info(&device->dev, "acpi_fan", fan,
- &acpi_fan_hwmon_chip_info, NULL);
- return PTR_ERR_OR_ZERO(hdev);
+ return PTR_ERR_OR_ZERO(fan->hdev);
}
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 00910ccd7eda..40f875b265a9 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -27,7 +27,6 @@ static inline void acpi_pci_link_init(void) {}
void acpi_processor_init(void);
void acpi_platform_init(void);
void acpi_pnp_init(void);
-void acpi_int340x_thermal_init(void);
int acpi_sysfs_init(void);
void acpi_gpe_apply_masked_gpes(void);
void acpi_container_init(void);
@@ -140,6 +139,7 @@ int __acpi_device_uevent_modalias(const struct acpi_device *adev,
/* --------------------------------------------------------------------------
Power Resource
-------------------------------------------------------------------------- */
+void acpi_power_resources_init(void);
void acpi_power_resources_list_free(struct list_head *list);
int acpi_extract_power_resources(union acpi_object *package, unsigned int start,
struct list_head *list);
@@ -175,6 +175,12 @@ bool processor_physically_present(acpi_handle handle);
static inline void acpi_early_processor_control_setup(void) {}
#endif
+#ifdef CONFIG_ACPI_PROCESSOR_CSTATE
+void acpi_idle_rescan_dead_smt_siblings(void);
+#else
+static inline void acpi_idle_rescan_dead_smt_siblings(void) {}
+#endif
+
/* --------------------------------------------------------------------------
Embedded Controller
-------------------------------------------------------------------------- */
diff --git a/drivers/acpi/irq.c b/drivers/acpi/irq.c
index 76a856c32c4d..d1595156c86a 100644
--- a/drivers/acpi/irq.c
+++ b/drivers/acpi/irq.c
@@ -300,6 +300,25 @@ int acpi_irq_get(acpi_handle handle, unsigned int index, struct resource *res)
}
EXPORT_SYMBOL_GPL(acpi_irq_get);
+const struct cpumask *acpi_irq_get_affinity(acpi_handle handle,
+ unsigned int index)
+{
+ struct irq_fwspec_info info;
+ struct irq_fwspec fwspec;
+ unsigned long flags;
+
+ if (acpi_irq_parse_one(handle, index, &fwspec, &flags))
+ return NULL;
+
+ if (irq_populate_fwspec_info(&fwspec, &info))
+ return NULL;
+
+ if (!(info.flags & IRQ_FWSPEC_INFO_AFFINITY_VALID))
+ return NULL;
+
+ return info.affinity;
+}
+
/**
* acpi_set_irq_model - Setup the GSI irqdomain information
* @model: the value assigned to acpi_irq_model
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index ae035b93da08..3eb56b77cb6d 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -2637,7 +2637,7 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
if (ndr_desc->target_node == NUMA_NO_NODE) {
ndr_desc->target_node = phys_to_target_node(spa->address);
dev_info(acpi_desc->dev, "changing target node from %d to %d for nfit region [%pa-%pa]",
- NUMA_NO_NODE, ndr_desc->numa_node, &res.start, &res.end);
+ NUMA_NO_NODE, ndr_desc->target_node, &res.start, &res.end);
}
/*
diff --git a/drivers/acpi/nfit/intel.c b/drivers/acpi/nfit/intel.c
index 3902759abcba..bce6f6a18426 100644
--- a/drivers/acpi/nfit/intel.c
+++ b/drivers/acpi/nfit/intel.c
@@ -55,10 +55,9 @@ static unsigned long intel_security_flags(struct nvdimm *nvdimm,
{
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
unsigned long security_flags = 0;
- struct {
- struct nd_cmd_pkg pkg;
+ TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload,
struct nd_intel_get_security_state cmd;
- } nd_cmd = {
+ ) nd_cmd = {
.pkg = {
.nd_command = NVDIMM_INTEL_GET_SECURITY_STATE,
.nd_family = NVDIMM_FAMILY_INTEL,
@@ -120,10 +119,9 @@ static unsigned long intel_security_flags(struct nvdimm *nvdimm,
static int intel_security_freeze(struct nvdimm *nvdimm)
{
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
- struct {
- struct nd_cmd_pkg pkg;
+ TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload,
struct nd_intel_freeze_lock cmd;
- } nd_cmd = {
+ ) nd_cmd = {
.pkg = {
.nd_command = NVDIMM_INTEL_FREEZE_LOCK,
.nd_family = NVDIMM_FAMILY_INTEL,
@@ -153,10 +151,9 @@ static int intel_security_change_key(struct nvdimm *nvdimm,
unsigned int cmd = ptype == NVDIMM_MASTER ?
NVDIMM_INTEL_SET_MASTER_PASSPHRASE :
NVDIMM_INTEL_SET_PASSPHRASE;
- struct {
- struct nd_cmd_pkg pkg;
+ TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload,
struct nd_intel_set_passphrase cmd;
- } nd_cmd = {
+ ) nd_cmd = {
.pkg = {
.nd_family = NVDIMM_FAMILY_INTEL,
.nd_size_in = ND_INTEL_PASSPHRASE_SIZE * 2,
@@ -195,10 +192,9 @@ static int __maybe_unused intel_security_unlock(struct nvdimm *nvdimm,
const struct nvdimm_key_data *key_data)
{
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
- struct {
- struct nd_cmd_pkg pkg;
+ TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload,
struct nd_intel_unlock_unit cmd;
- } nd_cmd = {
+ ) nd_cmd = {
.pkg = {
.nd_command = NVDIMM_INTEL_UNLOCK_UNIT,
.nd_family = NVDIMM_FAMILY_INTEL,
@@ -234,10 +230,9 @@ static int intel_security_disable(struct nvdimm *nvdimm,
{
int rc;
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
- struct {
- struct nd_cmd_pkg pkg;
+ TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload,
struct nd_intel_disable_passphrase cmd;
- } nd_cmd = {
+ ) nd_cmd = {
.pkg = {
.nd_command = NVDIMM_INTEL_DISABLE_PASSPHRASE,
.nd_family = NVDIMM_FAMILY_INTEL,
@@ -277,10 +272,9 @@ static int __maybe_unused intel_security_erase(struct nvdimm *nvdimm,
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
unsigned int cmd = ptype == NVDIMM_MASTER ?
NVDIMM_INTEL_MASTER_SECURE_ERASE : NVDIMM_INTEL_SECURE_ERASE;
- struct {
- struct nd_cmd_pkg pkg;
+ TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload,
struct nd_intel_secure_erase cmd;
- } nd_cmd = {
+ ) nd_cmd = {
.pkg = {
.nd_family = NVDIMM_FAMILY_INTEL,
.nd_size_in = ND_INTEL_PASSPHRASE_SIZE,
@@ -318,10 +312,9 @@ static int __maybe_unused intel_security_query_overwrite(struct nvdimm *nvdimm)
{
int rc;
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
- struct {
- struct nd_cmd_pkg pkg;
+ TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload,
struct nd_intel_query_overwrite cmd;
- } nd_cmd = {
+ ) nd_cmd = {
.pkg = {
.nd_command = NVDIMM_INTEL_QUERY_OVERWRITE,
.nd_family = NVDIMM_FAMILY_INTEL,
@@ -354,10 +347,9 @@ static int __maybe_unused intel_security_overwrite(struct nvdimm *nvdimm,
{
int rc;
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
- struct {
- struct nd_cmd_pkg pkg;
+ TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload,
struct nd_intel_overwrite cmd;
- } nd_cmd = {
+ ) nd_cmd = {
.pkg = {
.nd_command = NVDIMM_INTEL_OVERWRITE,
.nd_family = NVDIMM_FAMILY_INTEL,
@@ -407,10 +399,9 @@ const struct nvdimm_security_ops *intel_security_ops = &__intel_security_ops;
static int intel_bus_fwa_businfo(struct nvdimm_bus_descriptor *nd_desc,
struct nd_intel_bus_fw_activate_businfo *info)
{
- struct {
- struct nd_cmd_pkg pkg;
+ TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload,
struct nd_intel_bus_fw_activate_businfo cmd;
- } nd_cmd = {
+ ) nd_cmd = {
.pkg = {
.nd_command = NVDIMM_BUS_INTEL_FW_ACTIVATE_BUSINFO,
.nd_family = NVDIMM_BUS_FAMILY_INTEL,
@@ -518,33 +509,31 @@ static enum nvdimm_fwa_capability intel_bus_fwa_capability(
static int intel_bus_fwa_activate(struct nvdimm_bus_descriptor *nd_desc)
{
struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
- struct {
- struct nd_cmd_pkg pkg;
+ TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload,
struct nd_intel_bus_fw_activate cmd;
- } nd_cmd = {
- .pkg = {
- .nd_command = NVDIMM_BUS_INTEL_FW_ACTIVATE,
- .nd_family = NVDIMM_BUS_FAMILY_INTEL,
- .nd_size_in = sizeof(nd_cmd.cmd.iodev_state),
- .nd_size_out =
- sizeof(struct nd_intel_bus_fw_activate),
- .nd_fw_size =
- sizeof(struct nd_intel_bus_fw_activate),
- },
+ ) nd_cmd;
+ int rc;
+
+ nd_cmd.pkg = (struct nd_cmd_pkg) {
+ .nd_command = NVDIMM_BUS_INTEL_FW_ACTIVATE,
+ .nd_family = NVDIMM_BUS_FAMILY_INTEL,
+ .nd_size_in = sizeof(nd_cmd.cmd.iodev_state),
+ .nd_size_out =
+ sizeof(struct nd_intel_bus_fw_activate),
+ .nd_fw_size =
+ sizeof(struct nd_intel_bus_fw_activate),
+ };
+ nd_cmd.cmd = (struct nd_intel_bus_fw_activate) {
/*
* Even though activate is run from a suspended context,
* for safety, still ask platform firmware to force
* quiesce devices by default. Let a module
* parameter override that policy.
*/
- .cmd = {
- .iodev_state = acpi_desc->fwa_noidle
- ? ND_INTEL_BUS_FWA_IODEV_OS_IDLE
- : ND_INTEL_BUS_FWA_IODEV_FORCE_IDLE,
- },
+ .iodev_state = acpi_desc->fwa_noidle
+ ? ND_INTEL_BUS_FWA_IODEV_OS_IDLE
+ : ND_INTEL_BUS_FWA_IODEV_FORCE_IDLE,
};
- int rc;
-
switch (intel_bus_fwa_state(nd_desc)) {
case NVDIMM_FWA_ARMED:
case NVDIMM_FWA_ARM_OVERFLOW:
@@ -582,10 +571,9 @@ const struct nvdimm_bus_fw_ops *intel_bus_fw_ops = &__intel_bus_fw_ops;
static int intel_fwa_dimminfo(struct nvdimm *nvdimm,
struct nd_intel_fw_activate_dimminfo *info)
{
- struct {
- struct nd_cmd_pkg pkg;
+ TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload,
struct nd_intel_fw_activate_dimminfo cmd;
- } nd_cmd = {
+ ) nd_cmd = {
.pkg = {
.nd_command = NVDIMM_INTEL_FW_ACTIVATE_DIMMINFO,
.nd_family = NVDIMM_FAMILY_INTEL,
@@ -688,27 +676,24 @@ static int intel_fwa_arm(struct nvdimm *nvdimm, enum nvdimm_fwa_trigger arm)
{
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
struct acpi_nfit_desc *acpi_desc = nfit_mem->acpi_desc;
- struct {
- struct nd_cmd_pkg pkg;
+ TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload,
struct nd_intel_fw_activate_arm cmd;
- } nd_cmd = {
- .pkg = {
- .nd_command = NVDIMM_INTEL_FW_ACTIVATE_ARM,
- .nd_family = NVDIMM_FAMILY_INTEL,
- .nd_size_in = sizeof(nd_cmd.cmd.activate_arm),
- .nd_size_out =
- sizeof(struct nd_intel_fw_activate_arm),
- .nd_fw_size =
- sizeof(struct nd_intel_fw_activate_arm),
- },
- .cmd = {
- .activate_arm = arm == NVDIMM_FWA_ARM
- ? ND_INTEL_DIMM_FWA_ARM
- : ND_INTEL_DIMM_FWA_DISARM,
- },
- };
+ ) nd_cmd;
int rc;
+ nd_cmd.pkg = (struct nd_cmd_pkg) {
+ .nd_command = NVDIMM_INTEL_FW_ACTIVATE_ARM,
+ .nd_family = NVDIMM_FAMILY_INTEL,
+ .nd_size_in = sizeof(nd_cmd.cmd.activate_arm),
+ .nd_size_out = sizeof(struct nd_intel_fw_activate_arm),
+ .nd_fw_size = sizeof(struct nd_intel_fw_activate_arm),
+ };
+ nd_cmd.cmd = (struct nd_intel_fw_activate_arm) {
+ .activate_arm = arm == NVDIMM_FWA_ARM ?
+ ND_INTEL_DIMM_FWA_ARM :
+ ND_INTEL_DIMM_FWA_DISARM,
+ };
+
switch (intel_fwa_state(nvdimm)) {
case NVDIMM_FWA_INVALID:
return -ENXIO;
diff --git a/drivers/acpi/numa/hmat.c b/drivers/acpi/numa/hmat.c
index 9d9052258e92..77a81627aaef 100644
--- a/drivers/acpi/numa/hmat.c
+++ b/drivers/acpi/numa/hmat.c
@@ -74,7 +74,6 @@ struct memory_target {
struct node_cache_attrs cache_attrs;
u8 gen_port_device_handle[ACPI_SRAT_DEVICE_HANDLE_SIZE];
bool registered;
- bool ext_updated; /* externally updated */
};
struct memory_initiator {
@@ -368,35 +367,6 @@ static void hmat_update_target_access(struct memory_target *target,
}
}
-int hmat_update_target_coordinates(int nid, struct access_coordinate *coord,
- enum access_coordinate_class access)
-{
- struct memory_target *target;
- int pxm;
-
- if (nid == NUMA_NO_NODE)
- return -EINVAL;
-
- pxm = node_to_pxm(nid);
- guard(mutex)(&target_lock);
- target = find_mem_target(pxm);
- if (!target)
- return -ENODEV;
-
- hmat_update_target_access(target, ACPI_HMAT_READ_LATENCY,
- coord->read_latency, access);
- hmat_update_target_access(target, ACPI_HMAT_WRITE_LATENCY,
- coord->write_latency, access);
- hmat_update_target_access(target, ACPI_HMAT_READ_BANDWIDTH,
- coord->read_bandwidth, access);
- hmat_update_target_access(target, ACPI_HMAT_WRITE_BANDWIDTH,
- coord->write_bandwidth, access);
- target->ext_updated = true;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(hmat_update_target_coordinates);
-
static __init void hmat_add_locality(struct acpi_hmat_locality *hmat_loc)
{
struct memory_locality *loc;
@@ -773,10 +743,6 @@ static void hmat_update_target_attrs(struct memory_target *target,
u32 best = 0;
int i;
- /* Don't update if an external agent has changed the data. */
- if (target->ext_updated)
- return;
-
/* Don't update for generic port if there's no device handle */
if ((access == NODE_ACCESS_CLASS_GENPORT_SINK_LOCAL ||
access == NODE_ACCESS_CLASS_GENPORT_SINK_CPU) &&
@@ -908,11 +874,33 @@ static void hmat_register_target_devices(struct memory_target *target)
}
}
-static void hmat_register_target(struct memory_target *target)
+static void hmat_hotplug_target(struct memory_target *target)
{
int nid = pxm_to_node(target->memory_pxm);
/*
+ * Skip offline nodes. This can happen when memory marked EFI_MEMORY_SP,
+ * "specific purpose", is applied to all the memory in a proximity
+ * domain leading to * the node being marked offline / unplugged, or if
+ * memory-only "hotplug" node is offline.
+ */
+ if (nid == NUMA_NO_NODE || !node_online(nid))
+ return;
+
+ guard(mutex)(&target_lock);
+ if (target->registered)
+ return;
+
+ hmat_register_target_initiators(target);
+ hmat_register_target_cache(target);
+ hmat_register_target_perf(target, ACCESS_COORDINATE_LOCAL);
+ hmat_register_target_perf(target, ACCESS_COORDINATE_CPU);
+ target->registered = true;
+}
+
+static void hmat_register_target(struct memory_target *target)
+{
+ /*
* Devices may belong to either an offline or online
* node, so unconditionally add them.
*/
@@ -922,32 +910,15 @@ static void hmat_register_target(struct memory_target *target)
* Register generic port perf numbers. The nid may not be
* initialized and is still NUMA_NO_NODE.
*/
- mutex_lock(&target_lock);
- if (*(u16 *)target->gen_port_device_handle) {
- hmat_update_generic_target(target);
- target->registered = true;
+ scoped_guard(mutex, &target_lock) {
+ if (*(u16 *)target->gen_port_device_handle) {
+ hmat_update_generic_target(target);
+ target->registered = true;
+ return;
+ }
}
- mutex_unlock(&target_lock);
-
- /*
- * Skip offline nodes. This can happen when memory
- * marked EFI_MEMORY_SP, "specific purpose", is applied
- * to all the memory in a proximity domain leading to
- * the node being marked offline / unplugged, or if
- * memory-only "hotplug" node is offline.
- */
- if (nid == NUMA_NO_NODE || !node_online(nid))
- return;
- mutex_lock(&target_lock);
- if (!target->registered) {
- hmat_register_target_initiators(target);
- hmat_register_target_cache(target);
- hmat_register_target_perf(target, ACCESS_COORDINATE_LOCAL);
- hmat_register_target_perf(target, ACCESS_COORDINATE_CPU);
- target->registered = true;
- }
- mutex_unlock(&target_lock);
+ hmat_hotplug_target(target);
}
static void hmat_register_targets(void)
@@ -962,10 +933,10 @@ static int hmat_callback(struct notifier_block *self,
unsigned long action, void *arg)
{
struct memory_target *target;
- struct memory_notify *mnb = arg;
- int pxm, nid = mnb->status_change_nid;
+ struct node_notify *nn = arg;
+ int pxm, nid = nn->nid;
- if (nid == NUMA_NO_NODE || action != MEM_ONLINE)
+ if (action != NODE_ADDED_FIRST_MEMORY)
return NOTIFY_OK;
pxm = node_to_pxm(nid);
@@ -973,7 +944,7 @@ static int hmat_callback(struct notifier_block *self,
if (!target)
return NOTIFY_OK;
- hmat_register_target(target);
+ hmat_hotplug_target(target);
return NOTIFY_OK;
}
@@ -1118,7 +1089,7 @@ static __init int hmat_init(void)
hmat_register_targets();
/* Keep the table and structures if the notifier may use them */
- if (hotplug_memory_notifier(hmat_callback, HMAT_CALLBACK_PRI))
+ if (hotplug_node_notifier(hmat_callback, HMAT_CALLBACK_PRI))
goto out_put;
if (!hmat_set_default_dram_perf())
diff --git a/drivers/acpi/numa/srat.c b/drivers/acpi/numa/srat.c
index 53816dfab645..aa87ee1583a4 100644
--- a/drivers/acpi/numa/srat.c
+++ b/drivers/acpi/numa/srat.c
@@ -237,7 +237,7 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header)
struct acpi_srat_generic_affinity *p =
(struct acpi_srat_generic_affinity *)header;
- if (p->device_handle_type == 0) {
+ if (p->device_handle_type == 1) {
/*
* For pci devices this may be the only place they
* are assigned a proximity domain
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 5ff343096ece..05393a7315fe 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -398,7 +398,7 @@ static void acpi_os_drop_map_ref(struct acpi_ioremap *map)
list_del_rcu(&map->list);
INIT_RCU_WORK(&map->track.rwork, acpi_os_map_remove);
- queue_rcu_work(system_wq, &map->track.rwork);
+ queue_rcu_work(system_percpu_wq, &map->track.rwork);
}
/**
@@ -1694,8 +1694,8 @@ acpi_status __init acpi_os_initialize(void)
acpi_status __init acpi_os_initialize1(void)
{
- kacpid_wq = alloc_workqueue("kacpid", 0, 1);
- kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 0);
+ kacpid_wq = alloc_workqueue("kacpid", WQ_PERCPU, 1);
+ kacpi_notify_wq = alloc_workqueue("kacpi_notify", WQ_PERCPU, 0);
kacpi_hotplug_wq = alloc_ordered_workqueue("kacpi_hotplug", 0);
BUG_ON(!kacpid_wq);
BUG_ON(!kacpi_notify_wq);
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index 630fe0a34bc6..ad81aa03fe2f 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -22,6 +22,7 @@
#include <linux/acpi.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
+#include <linux/string_choices.h>
struct acpi_prt_entry {
struct acpi_pci_id id;
@@ -468,7 +469,7 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
dev_dbg(&dev->dev, "PCI INT %c%s -> GSI %u (%s, %s) -> IRQ %d\n",
pin_name(pin), link_desc, gsi,
(triggering == ACPI_LEVEL_SENSITIVE) ? "level" : "edge",
- (polarity == ACPI_ACTIVE_LOW) ? "low" : "high", dev->irq);
+ str_low_high(polarity == ACPI_ACTIVE_LOW), dev->irq);
kfree(entry);
return 0;
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index 08e10b6226dc..bed7dc85612e 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -268,7 +268,7 @@ static int acpi_pci_link_get_current(struct acpi_pci_link *link)
link->irq.active = irq;
- acpi_handle_debug(handle, "Link at IRQ %d \n", link->irq.active);
+ acpi_handle_debug(handle, "Link at IRQ %d\n", link->irq.active);
end:
return result;
@@ -761,7 +761,7 @@ static int acpi_pci_link_resume(struct acpi_pci_link *link)
return 0;
}
-static void irqrouter_resume(void)
+static void irqrouter_resume(void *data)
{
struct acpi_pci_link *link;
@@ -888,10 +888,14 @@ static int __init acpi_irq_balance_set(char *str)
__setup("acpi_irq_balance", acpi_irq_balance_set);
-static struct syscore_ops irqrouter_syscore_ops = {
+static const struct syscore_ops irqrouter_syscore_ops = {
.resume = irqrouter_resume,
};
+static struct syscore irqrouter_syscore = {
+ .ops = &irqrouter_syscore_ops,
+};
+
void __init acpi_pci_link_init(void)
{
if (acpi_noirq)
@@ -904,6 +908,6 @@ void __init acpi_pci_link_init(void)
else
acpi_irq_balance = 0;
}
- register_syscore_ops(&irqrouter_syscore_ops);
+ register_syscore(&irqrouter_syscore);
acpi_scan_add_handler(&pci_link_handler);
}
diff --git a/drivers/acpi/pfr_update.c b/drivers/acpi/pfr_update.c
index 031d1ba81b86..11b1c2828005 100644
--- a/drivers/acpi/pfr_update.c
+++ b/drivers/acpi/pfr_update.c
@@ -127,8 +127,11 @@ static int query_capability(struct pfru_update_cap_info *cap_hdr,
pfru_dev->rev_id,
PFRU_FUNC_QUERY_UPDATE_CAP,
NULL, ACPI_TYPE_PACKAGE);
- if (!out_obj)
+ if (!out_obj) {
+ dev_dbg(pfru_dev->parent_dev,
+ "Query cap failed with no object\n");
return ret;
+ }
if (out_obj->package.count < CAP_NR_IDX ||
out_obj->package.elements[CAP_STATUS_IDX].type != ACPI_TYPE_INTEGER ||
@@ -141,13 +144,17 @@ static int query_capability(struct pfru_update_cap_info *cap_hdr,
out_obj->package.elements[CAP_DRV_SVN_IDX].type != ACPI_TYPE_INTEGER ||
out_obj->package.elements[CAP_PLAT_ID_IDX].type != ACPI_TYPE_BUFFER ||
out_obj->package.elements[CAP_OEM_ID_IDX].type != ACPI_TYPE_BUFFER ||
- out_obj->package.elements[CAP_OEM_INFO_IDX].type != ACPI_TYPE_BUFFER)
+ out_obj->package.elements[CAP_OEM_INFO_IDX].type != ACPI_TYPE_BUFFER) {
+ dev_dbg(pfru_dev->parent_dev,
+ "Query cap failed with invalid package count/type\n");
goto free_acpi_buffer;
+ }
cap_hdr->status = out_obj->package.elements[CAP_STATUS_IDX].integer.value;
if (cap_hdr->status != DSM_SUCCEED) {
ret = -EBUSY;
- dev_dbg(pfru_dev->parent_dev, "Error Status:%d\n", cap_hdr->status);
+ dev_dbg(pfru_dev->parent_dev, "Query cap Error Status:%d\n",
+ cap_hdr->status);
goto free_acpi_buffer;
}
@@ -193,24 +200,32 @@ static int query_buffer(struct pfru_com_buf_info *info,
out_obj = acpi_evaluate_dsm_typed(handle, &pfru_guid,
pfru_dev->rev_id, PFRU_FUNC_QUERY_BUF,
NULL, ACPI_TYPE_PACKAGE);
- if (!out_obj)
+ if (!out_obj) {
+ dev_dbg(pfru_dev->parent_dev,
+ "Query buf failed with no object\n");
return ret;
+ }
if (out_obj->package.count < BUF_NR_IDX ||
out_obj->package.elements[BUF_STATUS_IDX].type != ACPI_TYPE_INTEGER ||
out_obj->package.elements[BUF_EXT_STATUS_IDX].type != ACPI_TYPE_INTEGER ||
out_obj->package.elements[BUF_ADDR_LOW_IDX].type != ACPI_TYPE_INTEGER ||
out_obj->package.elements[BUF_ADDR_HI_IDX].type != ACPI_TYPE_INTEGER ||
- out_obj->package.elements[BUF_SIZE_IDX].type != ACPI_TYPE_INTEGER)
+ out_obj->package.elements[BUF_SIZE_IDX].type != ACPI_TYPE_INTEGER) {
+ dev_dbg(pfru_dev->parent_dev,
+ "Query buf failed with invalid package count/type\n");
goto free_acpi_buffer;
+ }
info->status = out_obj->package.elements[BUF_STATUS_IDX].integer.value;
info->ext_status =
out_obj->package.elements[BUF_EXT_STATUS_IDX].integer.value;
if (info->status != DSM_SUCCEED) {
ret = -EBUSY;
- dev_dbg(pfru_dev->parent_dev, "Error Status:%d\n", info->status);
- dev_dbg(pfru_dev->parent_dev, "Error Extended Status:%d\n", info->ext_status);
+ dev_dbg(pfru_dev->parent_dev,
+ "Query buf failed with Error Status:%d\n", info->status);
+ dev_dbg(pfru_dev->parent_dev,
+ "Query buf failed with Error Extended Status:%d\n", info->ext_status);
goto free_acpi_buffer;
}
@@ -295,12 +310,16 @@ static bool applicable_image(const void *data, struct pfru_update_cap_info *cap,
m_img_hdr = data + size;
type = get_image_type(m_img_hdr, pfru_dev);
- if (type < 0)
+ if (type < 0) {
+ dev_dbg(pfru_dev->parent_dev, "Invalid image type\n");
return false;
+ }
size = adjust_efi_size(m_img_hdr, size);
- if (size < 0)
+ if (size < 0) {
+ dev_dbg(pfru_dev->parent_dev, "Invalid image size\n");
return false;
+ }
auth = data + size;
size += sizeof(u64) + auth->auth_info.hdr.len;
@@ -310,7 +329,7 @@ static bool applicable_image(const void *data, struct pfru_update_cap_info *cap,
if (type == PFRU_CODE_INJECT_TYPE)
return payload_hdr->rt_ver >= cap->code_rt_version;
- return payload_hdr->rt_ver >= cap->drv_rt_version;
+ return payload_hdr->svn_ver >= cap->drv_svn;
}
static void print_update_debug_info(struct pfru_updated_result *result,
@@ -346,8 +365,11 @@ static int start_update(int action, struct pfru_device *pfru_dev)
out_obj = acpi_evaluate_dsm_typed(handle, &pfru_guid,
pfru_dev->rev_id, PFRU_FUNC_START,
&in_obj, ACPI_TYPE_PACKAGE);
- if (!out_obj)
+ if (!out_obj) {
+ dev_dbg(pfru_dev->parent_dev,
+ "Update failed to start with no object\n");
return ret;
+ }
if (out_obj->package.count < UPDATE_NR_IDX ||
out_obj->package.elements[UPDATE_STATUS_IDX].type != ACPI_TYPE_INTEGER ||
@@ -355,8 +377,11 @@ static int start_update(int action, struct pfru_device *pfru_dev)
out_obj->package.elements[UPDATE_AUTH_TIME_LOW_IDX].type != ACPI_TYPE_INTEGER ||
out_obj->package.elements[UPDATE_AUTH_TIME_HI_IDX].type != ACPI_TYPE_INTEGER ||
out_obj->package.elements[UPDATE_EXEC_TIME_LOW_IDX].type != ACPI_TYPE_INTEGER ||
- out_obj->package.elements[UPDATE_EXEC_TIME_HI_IDX].type != ACPI_TYPE_INTEGER)
+ out_obj->package.elements[UPDATE_EXEC_TIME_HI_IDX].type != ACPI_TYPE_INTEGER) {
+ dev_dbg(pfru_dev->parent_dev,
+ "Update failed with invalid package count/type\n");
goto free_acpi_buffer;
+ }
update_result.status =
out_obj->package.elements[UPDATE_STATUS_IDX].integer.value;
@@ -365,8 +390,10 @@ static int start_update(int action, struct pfru_device *pfru_dev)
if (update_result.status != DSM_SUCCEED) {
ret = -EBUSY;
- dev_dbg(pfru_dev->parent_dev, "Error Status:%d\n", update_result.status);
- dev_dbg(pfru_dev->parent_dev, "Error Extended Status:%d\n",
+ dev_dbg(pfru_dev->parent_dev,
+ "Update failed with Error Status:%d\n", update_result.status);
+ dev_dbg(pfru_dev->parent_dev,
+ "Update failed with Error Extended Status:%d\n",
update_result.ext_status);
goto free_acpi_buffer;
@@ -450,8 +477,10 @@ static ssize_t pfru_write(struct file *file, const char __user *buf,
if (ret)
return ret;
- if (len > buf_info.buf_size)
+ if (len > buf_info.buf_size) {
+ dev_dbg(pfru_dev->parent_dev, "Capsule image size too large\n");
return -EINVAL;
+ }
iov.iov_base = (void __user *)buf;
iov.iov_len = len;
@@ -460,10 +489,14 @@ static ssize_t pfru_write(struct file *file, const char __user *buf,
/* map the communication buffer */
phy_addr = (phys_addr_t)((buf_info.addr_hi << 32) | buf_info.addr_lo);
buf_ptr = memremap(phy_addr, buf_info.buf_size, MEMREMAP_WB);
- if (!buf_ptr)
+ if (!buf_ptr) {
+ dev_dbg(pfru_dev->parent_dev, "Failed to remap the buffer\n");
return -ENOMEM;
+ }
if (!copy_from_iter_full(buf_ptr, len, &iter)) {
+ dev_dbg(pfru_dev->parent_dev,
+ "Failed to copy the data from the user space buffer\n");
ret = -EINVAL;
goto unmap;
}
diff --git a/drivers/acpi/platform_profile.c b/drivers/acpi/platform_profile.c
index b43f4459a4f6..ea04a8c69215 100644
--- a/drivers/acpi/platform_profile.c
+++ b/drivers/acpi/platform_profile.c
@@ -37,6 +37,7 @@ static const char * const profile_names[] = {
[PLATFORM_PROFILE_BALANCED] = "balanced",
[PLATFORM_PROFILE_BALANCED_PERFORMANCE] = "balanced-performance",
[PLATFORM_PROFILE_PERFORMANCE] = "performance",
+ [PLATFORM_PROFILE_MAX_POWER] = "max-power",
[PLATFORM_PROFILE_CUSTOM] = "custom",
};
static_assert(ARRAY_SIZE(profile_names) == PLATFORM_PROFILE_LAST);
@@ -506,7 +507,8 @@ int platform_profile_cycle(void)
if (err)
return err;
- if (profile == PLATFORM_PROFILE_CUSTOM ||
+ if (profile == PLATFORM_PROFILE_MAX_POWER ||
+ profile == PLATFORM_PROFILE_CUSTOM ||
profile == PLATFORM_PROFILE_LAST)
return -EINVAL;
@@ -515,7 +517,8 @@ int platform_profile_cycle(void)
if (err)
return err;
- /* never iterate into a custom if all drivers supported it */
+ /* never iterate into a custom or max power if all drivers supported it */
+ clear_bit(PLATFORM_PROFILE_MAX_POWER, data.aggregate);
clear_bit(PLATFORM_PROFILE_CUSTOM, data.aggregate);
next = find_next_bit_wrap(data.aggregate,
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index b7243d7563b1..361a7721a6a8 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -23,6 +23,7 @@
#define pr_fmt(fmt) "ACPI: PM: " fmt
+#include <linux/delay.h>
#include <linux/dmi.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -63,6 +64,9 @@ struct acpi_power_resource_entry {
struct acpi_power_resource *resource;
};
+static bool hp_eb_gp12pxp_quirk;
+static bool unused_power_resources_quirk;
+
static LIST_HEAD(acpi_power_resource_list);
static DEFINE_MUTEX(power_resource_list_lock);
@@ -992,6 +996,38 @@ struct acpi_device *acpi_add_power_resource(acpi_handle handle)
}
#ifdef CONFIG_ACPI_SLEEP
+static bool resource_is_gp12pxp(acpi_handle handle)
+{
+ const char *path;
+ bool ret;
+
+ path = acpi_handle_path(handle);
+ ret = path && strcmp(path, "\\_SB_.PCI0.GP12.PXP_") == 0;
+ kfree(path);
+
+ return ret;
+}
+
+static void acpi_resume_on_eb_gp12pxp(struct acpi_power_resource *resource)
+{
+ acpi_handle_notice(resource->device.handle,
+ "HP EB quirk - turning OFF then ON\n");
+
+ __acpi_power_off(resource);
+ __acpi_power_on(resource);
+
+ /*
+ * Use the same delay as DSDT uses in modem _RST method.
+ *
+ * Otherwise we get "Unable to change power state from unknown to D0,
+ * device inaccessible" error for the modem PCI device after thaw.
+ *
+ * This power resource is normally being enabled only during thaw (once)
+ * so this wait is not a performance issue.
+ */
+ msleep(200);
+}
+
void acpi_resume_power_resources(void)
{
struct acpi_power_resource *resource;
@@ -1013,8 +1049,14 @@ void acpi_resume_power_resources(void)
if (state == ACPI_POWER_RESOURCE_STATE_OFF
&& resource->ref_count) {
- acpi_handle_debug(resource->device.handle, "Turning ON\n");
- __acpi_power_on(resource);
+ if (hp_eb_gp12pxp_quirk &&
+ resource_is_gp12pxp(resource->device.handle)) {
+ acpi_resume_on_eb_gp12pxp(resource);
+ } else {
+ acpi_handle_debug(resource->device.handle,
+ "Turning ON\n");
+ __acpi_power_on(resource);
+ }
}
mutex_unlock(&resource->resource_lock);
@@ -1024,6 +1066,41 @@ void acpi_resume_power_resources(void)
}
#endif
+static const struct dmi_system_id dmi_hp_elitebook_gp12pxp_quirk[] = {
+/*
+ * This laptop (and possibly similar models too) has power resource called
+ * "GP12.PXP_" for its WWAN modem.
+ *
+ * For this power resource to turn ON power for the modem it needs certain
+ * internal flag called "ONEN" to be set.
+ * This flag only gets set from this power resource "_OFF" method, while the
+ * actual modem power gets turned off during suspend by "GP12.PTS" method
+ * called from the global "_PTS" (Prepare To Sleep) method.
+ * On the other hand, this power resource "_OFF" method implementation just
+ * sets the aforementioned flag without actually doing anything else (it
+ * doesn't contain any code to actually turn off power).
+ *
+ * The above means that when upon hibernation finish we try to set this
+ * power resource back ON since its "_STA" method returns 0 (while the resource
+ * is still considered in use) its "_ON" method won't do anything since
+ * that "ONEN" flag is not set.
+ * Overall, this means the modem is dead until laptop is rebooted since its
+ * power has been cut by "_PTS" and its PCI configuration was lost and not able
+ * to be restored.
+ *
+ * The easiest way to workaround the issue is to call this power resource
+ * "_OFF" method before calling the "_ON" method to make sure the "ONEN"
+ * flag gets properly set.
+ */
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 855 G7 Notebook PC"),
+ },
+ },
+ {}
+};
+
static const struct dmi_system_id dmi_leave_unused_power_resources_on[] = {
{
/*
@@ -1046,7 +1123,7 @@ void acpi_turn_off_unused_power_resources(void)
{
struct acpi_power_resource *resource;
- if (dmi_check_system(dmi_leave_unused_power_resources_on))
+ if (unused_power_resources_quirk)
return;
mutex_lock(&power_resource_list_lock);
@@ -1065,3 +1142,10 @@ void acpi_turn_off_unused_power_resources(void)
mutex_unlock(&power_resource_list_lock);
}
+
+void __init acpi_power_resources_init(void)
+{
+ hp_eb_gp12pxp_quirk = dmi_check_system(dmi_hp_elitebook_gp12pxp_quirk);
+ unused_power_resources_quirk =
+ dmi_check_system(dmi_leave_unused_power_resources_on);
+}
diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c
index 54676e3d82dd..de5f8c018333 100644
--- a/drivers/acpi/pptt.c
+++ b/drivers/acpi/pptt.c
@@ -21,6 +21,25 @@
#include <linux/cacheinfo.h>
#include <acpi/processor.h>
+/*
+ * The acpi_pptt_cache_v1 in actbl2.h, which is imported from acpica,
+ * only contains the cache_id field rather than all the fields of the
+ * Cache Type Structure. Use this alternative structure until it is
+ * resolved in acpica.
+ */
+struct acpi_pptt_cache_v1_full {
+ struct acpi_subtable_header header;
+ u16 reserved;
+ u32 flags;
+ u32 next_level_of_cache;
+ u32 size;
+ u32 number_of_sets;
+ u8 associativity;
+ u8 attributes;
+ u16 line_size;
+ u32 cache_id;
+} __packed;
+
static struct acpi_subtable_header *fetch_pptt_subtable(struct acpi_table_header *table_hdr,
u32 pptt_ref)
{
@@ -56,6 +75,18 @@ static struct acpi_pptt_cache *fetch_pptt_cache(struct acpi_table_header *table_
return (struct acpi_pptt_cache *)fetch_pptt_subtable(table_hdr, pptt_ref);
}
+static struct acpi_pptt_cache_v1_full *upgrade_pptt_cache(struct acpi_pptt_cache *cache)
+{
+ if (cache->header.length < sizeof(struct acpi_pptt_cache_v1_full))
+ return NULL;
+
+ /* No use for v1 if the only additional field is invalid */
+ if (!(cache->flags & ACPI_PPTT_CACHE_ID_VALID))
+ return NULL;
+
+ return (struct acpi_pptt_cache_v1_full *)cache;
+}
+
static struct acpi_subtable_header *acpi_get_pptt_resource(struct acpi_table_header *table_hdr,
struct acpi_pptt_processor *node,
int resource)
@@ -177,14 +208,14 @@ acpi_find_cache_level(struct acpi_table_header *table_hdr,
}
/**
- * acpi_count_levels() - Given a PPTT table, and a CPU node, count the cache
- * levels and split cache levels (data/instruction).
+ * acpi_count_levels() - Given a PPTT table, and a CPU node, count the
+ * total number of levels and split cache levels (data/instruction).
* @table_hdr: Pointer to the head of the PPTT table
* @cpu_node: processor node we wish to count caches for
- * @levels: Number of levels if success.
* @split_levels: Number of split cache levels (data/instruction) if
* success. Can by NULL.
*
+ * Return: number of levels.
* Given a processor node containing a processing unit, walk into it and count
* how many levels exist solely for it, and then walk up each level until we hit
* the root node (ignore the package level because it may be possible to have
@@ -192,14 +223,18 @@ acpi_find_cache_level(struct acpi_table_header *table_hdr,
* split cache levels (data/instruction) that exist at each level on the way
* up.
*/
-static void acpi_count_levels(struct acpi_table_header *table_hdr,
- struct acpi_pptt_processor *cpu_node,
- unsigned int *levels, unsigned int *split_levels)
+static int acpi_count_levels(struct acpi_table_header *table_hdr,
+ struct acpi_pptt_processor *cpu_node,
+ unsigned int *split_levels)
{
+ int current_level = 0;
+
do {
- acpi_find_cache_level(table_hdr, cpu_node, levels, split_levels, 0, 0);
+ acpi_find_cache_level(table_hdr, cpu_node, &current_level, split_levels, 0, 0);
cpu_node = fetch_pptt_node(table_hdr, cpu_node->parent);
} while (cpu_node);
+
+ return current_level;
}
/**
@@ -351,7 +386,6 @@ static struct acpi_pptt_cache *acpi_find_cache_node(struct acpi_table_header *ta
* @this_leaf: Kernel cache info structure being updated
* @found_cache: The PPTT node describing this cache instance
* @cpu_node: A unique reference to describe this cache instance
- * @revision: The revision of the PPTT table
*
* The ACPI spec implies that the fields in the cache structures are used to
* extend and correct the information probed from the hardware. Lets only
@@ -361,10 +395,9 @@ static struct acpi_pptt_cache *acpi_find_cache_node(struct acpi_table_header *ta
*/
static void update_cache_properties(struct cacheinfo *this_leaf,
struct acpi_pptt_cache *found_cache,
- struct acpi_pptt_processor *cpu_node,
- u8 revision)
+ struct acpi_pptt_processor *cpu_node)
{
- struct acpi_pptt_cache_v1* found_cache_v1;
+ struct acpi_pptt_cache_v1_full *found_cache_v1;
this_leaf->fw_token = cpu_node;
if (found_cache->flags & ACPI_PPTT_SIZE_PROPERTY_VALID)
@@ -414,9 +447,8 @@ static void update_cache_properties(struct cacheinfo *this_leaf,
found_cache->flags & ACPI_PPTT_CACHE_TYPE_VALID)
this_leaf->type = CACHE_TYPE_UNIFIED;
- if (revision >= 3 && (found_cache->flags & ACPI_PPTT_CACHE_ID_VALID)) {
- found_cache_v1 = ACPI_ADD_PTR(struct acpi_pptt_cache_v1,
- found_cache, sizeof(struct acpi_pptt_cache));
+ found_cache_v1 = upgrade_pptt_cache(found_cache);
+ if (found_cache_v1) {
this_leaf->id = found_cache_v1->cache_id;
this_leaf->attributes |= CACHE_ID;
}
@@ -441,8 +473,7 @@ static void cache_setup_acpi_cpu(struct acpi_table_header *table,
pr_debug("found = %p %p\n", found_cache, cpu_node);
if (found_cache)
update_cache_properties(this_leaf, found_cache,
- ACPI_TO_POINTER(ACPI_PTR_DIFF(cpu_node, table)),
- table->revision);
+ ACPI_TO_POINTER(ACPI_PTR_DIFF(cpu_node, table)));
index++;
}
@@ -645,7 +676,7 @@ int acpi_get_cache_info(unsigned int cpu, unsigned int *levels,
if (!cpu_node)
return -ENOENT;
- acpi_count_levels(table, cpu_node, levels, split_levels);
+ *levels = acpi_count_levels(table, cpu_node, split_levels);
pr_debug("Cache Setup: last_level=%d split_levels=%d\n",
*levels, split_levels ? *split_levels : -1);
@@ -817,3 +848,218 @@ int find_acpi_cpu_topology_hetero_id(unsigned int cpu)
return find_acpi_cpu_topology_tag(cpu, PPTT_ABORT_PACKAGE,
ACPI_PPTT_ACPI_IDENTICAL);
}
+
+/**
+ * acpi_pptt_get_child_cpus() - Find all the CPUs below a PPTT
+ * processor hierarchy node
+ *
+ * @table_hdr: A reference to the PPTT table
+ * @parent_node: A pointer to the processor hierarchy node in the
+ * table_hdr
+ * @cpus: A cpumask to fill with the CPUs below @parent_node
+ *
+ * Walks up the PPTT from every possible CPU to find if the provided
+ * @parent_node is a parent of this CPU.
+ */
+static void acpi_pptt_get_child_cpus(struct acpi_table_header *table_hdr,
+ struct acpi_pptt_processor *parent_node,
+ cpumask_t *cpus)
+{
+ struct acpi_pptt_processor *cpu_node;
+ u32 acpi_id;
+ int cpu;
+
+ cpumask_clear(cpus);
+
+ for_each_possible_cpu(cpu) {
+ acpi_id = get_acpi_id_for_cpu(cpu);
+ cpu_node = acpi_find_processor_node(table_hdr, acpi_id);
+
+ while (cpu_node) {
+ if (cpu_node == parent_node) {
+ cpumask_set_cpu(cpu, cpus);
+ break;
+ }
+ cpu_node = fetch_pptt_node(table_hdr, cpu_node->parent);
+ }
+ }
+}
+
+/**
+ * acpi_pptt_get_cpus_from_container() - Populate a cpumask with all CPUs in a
+ * processor container
+ * @acpi_cpu_id: The UID of the processor container
+ * @cpus: The resulting CPU mask
+ *
+ * Find the specified Processor Container, and fill @cpus with all the cpus
+ * below it.
+ *
+ * Not all 'Processor Hierarchy' entries in the PPTT are either a CPU
+ * or a Processor Container, they may exist purely to describe a
+ * Private resource. CPUs have to be leaves, so a Processor Container
+ * is a non-leaf that has the 'ACPI Processor ID valid' flag set.
+ */
+void acpi_pptt_get_cpus_from_container(u32 acpi_cpu_id, cpumask_t *cpus)
+{
+ struct acpi_table_header *table_hdr;
+ struct acpi_subtable_header *entry;
+ unsigned long table_end;
+ u32 proc_sz;
+
+ cpumask_clear(cpus);
+
+ table_hdr = acpi_get_pptt();
+ if (!table_hdr)
+ return;
+
+ table_end = (unsigned long)table_hdr + table_hdr->length;
+ entry = ACPI_ADD_PTR(struct acpi_subtable_header, table_hdr,
+ sizeof(struct acpi_table_pptt));
+ proc_sz = sizeof(struct acpi_pptt_processor);
+ while ((unsigned long)entry + proc_sz <= table_end) {
+ if (entry->type == ACPI_PPTT_TYPE_PROCESSOR) {
+ struct acpi_pptt_processor *cpu_node;
+
+ cpu_node = (struct acpi_pptt_processor *)entry;
+ if (cpu_node->flags & ACPI_PPTT_ACPI_PROCESSOR_ID_VALID &&
+ !acpi_pptt_leaf_node(table_hdr, cpu_node) &&
+ cpu_node->acpi_processor_id == acpi_cpu_id) {
+ acpi_pptt_get_child_cpus(table_hdr, cpu_node, cpus);
+ break;
+ }
+ }
+ entry = ACPI_ADD_PTR(struct acpi_subtable_header, entry,
+ entry->length);
+ }
+}
+
+/**
+ * find_acpi_cache_level_from_id() - Get the level of the specified cache
+ * @cache_id: The id field of the cache
+ *
+ * Determine the level relative to any CPU for the cache identified by
+ * cache_id. This allows the property to be found even if the CPUs are offline.
+ *
+ * The returned level can be used to group caches that are peers.
+ *
+ * The PPTT table must be rev 3 or later.
+ *
+ * If one CPU's L2 is shared with another CPU as L3, this function will return
+ * an unpredictable value.
+ *
+ * Return: -ENOENT if the PPTT doesn't exist, the revision isn't supported or
+ * the cache cannot be found.
+ * Otherwise returns a value which represents the level of the specified cache.
+ */
+int find_acpi_cache_level_from_id(u32 cache_id)
+{
+ int cpu;
+ struct acpi_table_header *table;
+
+ table = acpi_get_pptt();
+ if (!table)
+ return -ENOENT;
+
+ if (table->revision < 3)
+ return -ENOENT;
+
+ for_each_possible_cpu(cpu) {
+ bool empty;
+ int level = 1;
+ u32 acpi_cpu_id = get_acpi_id_for_cpu(cpu);
+ struct acpi_pptt_cache *cache;
+ struct acpi_pptt_processor *cpu_node;
+
+ cpu_node = acpi_find_processor_node(table, acpi_cpu_id);
+ if (!cpu_node)
+ continue;
+
+ do {
+ int cache_type[] = {CACHE_TYPE_INST, CACHE_TYPE_DATA, CACHE_TYPE_UNIFIED};
+
+ empty = true;
+ for (int i = 0; i < ARRAY_SIZE(cache_type); i++) {
+ struct acpi_pptt_cache_v1_full *cache_v1;
+
+ cache = acpi_find_cache_node(table, acpi_cpu_id, cache_type[i],
+ level, &cpu_node);
+ if (!cache)
+ continue;
+
+ empty = false;
+
+ cache_v1 = upgrade_pptt_cache(cache);
+ if (cache_v1 && cache_v1->cache_id == cache_id)
+ return level;
+ }
+ level++;
+ } while (!empty);
+ }
+
+ return -ENOENT;
+}
+
+/**
+ * acpi_pptt_get_cpumask_from_cache_id() - Get the cpus associated with the
+ * specified cache
+ * @cache_id: The id field of the cache
+ * @cpus: Where to build the cpumask
+ *
+ * Determine which CPUs are below this cache in the PPTT. This allows the property
+ * to be found even if the CPUs are offline.
+ *
+ * The PPTT table must be rev 3 or later,
+ *
+ * Return: -ENOENT if the PPTT doesn't exist, or the cache cannot be found.
+ * Otherwise returns 0 and sets the cpus in the provided cpumask.
+ */
+int acpi_pptt_get_cpumask_from_cache_id(u32 cache_id, cpumask_t *cpus)
+{
+ int cpu;
+ struct acpi_table_header *table;
+
+ cpumask_clear(cpus);
+
+ table = acpi_get_pptt();
+ if (!table)
+ return -ENOENT;
+
+ if (table->revision < 3)
+ return -ENOENT;
+
+ for_each_possible_cpu(cpu) {
+ bool empty;
+ int level = 1;
+ u32 acpi_cpu_id = get_acpi_id_for_cpu(cpu);
+ struct acpi_pptt_cache *cache;
+ struct acpi_pptt_processor *cpu_node;
+
+ cpu_node = acpi_find_processor_node(table, acpi_cpu_id);
+ if (!cpu_node)
+ continue;
+
+ do {
+ int cache_type[] = {CACHE_TYPE_INST, CACHE_TYPE_DATA, CACHE_TYPE_UNIFIED};
+
+ empty = true;
+ for (int i = 0; i < ARRAY_SIZE(cache_type); i++) {
+ struct acpi_pptt_cache_v1_full *cache_v1;
+
+ cache = acpi_find_cache_node(table, acpi_cpu_id, cache_type[i],
+ level, &cpu_node);
+
+ if (!cache)
+ continue;
+
+ empty = false;
+
+ cache_v1 = upgrade_pptt_cache(cache);
+ if (cache_v1 && cache_v1->cache_id == cache_id)
+ cpumask_set_cpu(cpu, cpus);
+ }
+ level++;
+ } while (!empty);
+ }
+
+ return 0;
+}
diff --git a/drivers/acpi/prmt.c b/drivers/acpi/prmt.c
index e549914a636c..7b8b5d2015ec 100644
--- a/drivers/acpi/prmt.c
+++ b/drivers/acpi/prmt.c
@@ -85,8 +85,6 @@ static u64 efi_pa_va_lookup(efi_guid_t *guid, u64 pa)
}
}
- pr_warn("Failed to find VA for GUID: %pUL, PA: 0x%llx", guid, pa);
-
return 0;
}
@@ -152,15 +150,52 @@ acpi_parse_prmt(union acpi_subtable_headers *header, const unsigned long end)
th = &tm->handlers[cur_handler];
guid_copy(&th->guid, (guid_t *)handler_info->handler_guid);
+
+ /*
+ * Print an error message if handler_address is NULL, the parse of VA also
+ * can be skipped.
+ */
+ if (unlikely(!handler_info->handler_address)) {
+ pr_info("Skipping handler with NULL address for GUID: %pUL",
+ (guid_t *)handler_info->handler_guid);
+ continue;
+ }
+
th->handler_addr =
(void *)efi_pa_va_lookup(&th->guid, handler_info->handler_address);
+ /*
+ * Print a warning message and skip the parse of VA if handler_addr is zero
+ * which is not expected to ever happen.
+ */
+ if (unlikely(!th->handler_addr)) {
+ pr_warn("Failed to find VA of handler for GUID: %pUL, PA: 0x%llx",
+ &th->guid, handler_info->handler_address);
+ continue;
+ }
th->static_data_buffer_addr =
efi_pa_va_lookup(&th->guid, handler_info->static_data_buffer_address);
+ /*
+ * According to the PRM specification, static_data_buffer_address can be zero,
+ * so avoid printing a warning message in that case. Otherwise, if the
+ * return value of efi_pa_va_lookup() is zero, print the message.
+ */
+ if (unlikely(!th->static_data_buffer_addr && handler_info->static_data_buffer_address))
+ pr_warn("Failed to find VA of static data buffer for GUID: %pUL, PA: 0x%llx",
+ &th->guid, handler_info->static_data_buffer_address);
th->acpi_param_buffer_addr =
efi_pa_va_lookup(&th->guid, handler_info->acpi_param_buffer_address);
+ /*
+ * According to the PRM specification, acpi_param_buffer_address can be zero,
+ * so avoid printing a warning message in that case. Otherwise, if the
+ * return value of efi_pa_va_lookup() is zero, print the message.
+ */
+ if (unlikely(!th->acpi_param_buffer_addr && handler_info->acpi_param_buffer_address))
+ pr_warn("Failed to find VA of acpi param buffer for GUID: %pUL, PA: 0x%llx",
+ &th->guid, handler_info->acpi_param_buffer_address);
+
} while (++cur_handler < tm->handler_count && (handler_info = get_next_handler(handler_info)));
return 0;
@@ -209,6 +244,12 @@ static struct prm_handler_info *find_prm_handler(const guid_t *guid)
return (struct prm_handler_info *) find_guid_info(guid, GET_HANDLER);
}
+bool acpi_prm_handler_available(const guid_t *guid)
+{
+ return find_prm_handler(guid) && find_prm_module(guid);
+}
+EXPORT_SYMBOL_GPL(acpi_prm_handler_available);
+
/* In-coming PRM commands */
#define PRM_CMD_RUN_SERVICE 0
diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
index 4322f2da6d10..c08ead07252b 100644
--- a/drivers/acpi/proc.c
+++ b/drivers/acpi/proc.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
-#include <linux/export.h>
+#include <linux/string_choices.h>
#include <linux/suspend.h>
#include <linux/bcd.h>
#include <linux/acpi.h>
@@ -30,17 +30,16 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset)
if (!dev->wakeup.flags.valid)
continue;
- seq_printf(seq, "%s\t S%d\t",
+ seq_printf(seq, "%s\t S%llu\t",
dev->pnp.bus_id,
- (u32) dev->wakeup.sleep_state);
+ dev->wakeup.sleep_state);
mutex_lock(&dev->physical_node_lock);
if (!dev->physical_node_count) {
seq_printf(seq, "%c%-8s\n",
dev->wakeup.flags.valid ? '*' : ' ',
- device_may_wakeup(&dev->dev) ?
- "enabled" : "disabled");
+ str_enabled_disabled(device_may_wakeup(&dev->dev)));
} else {
struct device *ldev;
list_for_each_entry(entry, &dev->physical_node_list,
@@ -55,9 +54,8 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset)
seq_printf(seq, "%c%-8s %s:%s\n",
dev->wakeup.flags.valid ? '*' : ' ',
- (device_may_wakeup(&dev->dev) ||
- device_may_wakeup(ldev)) ?
- "enabled" : "disabled",
+ str_enabled_disabled(device_may_wakeup(ldev) ||
+ device_may_wakeup(&dev->dev)),
ldev->bus ? ldev->bus->name :
"no-bus", dev_name(ldev));
put_device(ldev);
@@ -141,6 +139,5 @@ static const struct proc_ops acpi_system_wakeup_device_proc_ops = {
void __init acpi_sleep_proc_init(void)
{
/* 'wakeup device' [R/W] */
- proc_create("wakeup", S_IFREG | S_IRUGO | S_IWUSR,
- acpi_root_dir, &acpi_system_wakeup_device_proc_ops);
+ proc_create("wakeup", 0644, acpi_root_dir, &acpi_system_wakeup_device_proc_ops);
}
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 9b6b71a2ffb5..a4498357bd16 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -54,7 +54,7 @@ static int map_x2apic_id(struct acpi_subtable_header *entry,
if (!(apic->lapic_flags & ACPI_MADT_ENABLED))
return -ENODEV;
- if (device_declaration && (apic->uid == acpi_id)) {
+ if (apic->uid == acpi_id && (device_declaration || acpi_id < 255)) {
*apic_id = apic->local_apic_id;
return 0;
}
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index 3b281bc1e73c..65e779be64ff 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -279,6 +279,9 @@ static int __init acpi_processor_driver_init(void)
* after acpi_cppc_processor_probe() has been called for all online CPUs
*/
acpi_processor_init_invariance_cppc();
+
+ acpi_idle_rescan_dead_smt_siblings();
+
return 0;
err:
driver_unregister(&acpi_processor_driver);
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index e2febca2ec13..89f2f08b2554 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -24,6 +24,8 @@
#include <acpi/processor.h>
#include <linux/context_tracking.h>
+#include "internal.h"
+
/*
* Include the apic definitions for x86 to have the APIC timer related defines
* available also for UP (on SMP it gets magically included via linux/smp.h).
@@ -55,6 +57,12 @@ struct cpuidle_driver acpi_idle_driver = {
};
#ifdef CONFIG_ACPI_PROCESSOR_CSTATE
+void acpi_idle_rescan_dead_smt_siblings(void)
+{
+ if (cpuidle_get_driver() == &acpi_idle_driver)
+ arch_cpu_rescan_dead_smt_siblings();
+}
+
static
DEFINE_PER_CPU(struct acpi_processor_cx * [CPUIDLE_STATE_MAX], acpi_cstate);
@@ -724,18 +732,16 @@ static int __cpuidle acpi_idle_enter_s2idle(struct cpuidle_device *dev,
return 0;
}
-static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
- struct cpuidle_device *dev)
+static void acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
+ struct cpuidle_device *dev)
{
int i, count = ACPI_IDLE_STATE_START;
struct acpi_processor_cx *cx;
- struct cpuidle_state *state;
if (max_cstate == 0)
max_cstate = 1;
for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
- state = &acpi_idle_driver.states[count];
cx = &pr->power.states[i];
if (!cx->valid)
@@ -743,27 +749,13 @@ static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
per_cpu(acpi_cstate[count], dev->cpu) = cx;
- if (lapic_timer_needs_broadcast(pr, cx))
- state->flags |= CPUIDLE_FLAG_TIMER_STOP;
-
- if (cx->type == ACPI_STATE_C3) {
- state->flags |= CPUIDLE_FLAG_TLB_FLUSHED;
- if (pr->flags.bm_check)
- state->flags |= CPUIDLE_FLAG_RCU_IDLE;
- }
-
count++;
if (count == CPUIDLE_STATE_MAX)
break;
}
-
- if (!count)
- return -EINVAL;
-
- return 0;
}
-static int acpi_processor_setup_cstates(struct acpi_processor *pr)
+static void acpi_processor_setup_cstates(struct acpi_processor *pr)
{
int i, count;
struct acpi_processor_cx *cx;
@@ -810,17 +802,21 @@ static int acpi_processor_setup_cstates(struct acpi_processor *pr)
if (cx->type != ACPI_STATE_C1 && !acpi_idle_fallback_to_c1(pr))
state->enter_s2idle = acpi_idle_enter_s2idle;
+ if (lapic_timer_needs_broadcast(pr, cx))
+ state->flags |= CPUIDLE_FLAG_TIMER_STOP;
+
+ if (cx->type == ACPI_STATE_C3) {
+ state->flags |= CPUIDLE_FLAG_TLB_FLUSHED;
+ if (pr->flags.bm_check)
+ state->flags |= CPUIDLE_FLAG_RCU_IDLE;
+ }
+
count++;
if (count == CPUIDLE_STATE_MAX)
break;
}
drv->state_count = count;
-
- if (!count)
- return -EINVAL;
-
- return 0;
}
static inline void acpi_processor_cstate_first_run_checks(void)
@@ -990,11 +986,6 @@ end:
return ret;
}
-/*
- * flat_state_cnt - the number of composite LPI states after the process of flattening
- */
-static int flat_state_cnt;
-
/**
* combine_lpi_states - combine local and parent LPI states to form a composite LPI state
*
@@ -1037,9 +1028,10 @@ static void stash_composite_state(struct acpi_lpi_states_array *curr_level,
curr_level->composite_states[curr_level->composite_states_size++] = t;
}
-static int flatten_lpi_states(struct acpi_processor *pr,
- struct acpi_lpi_states_array *curr_level,
- struct acpi_lpi_states_array *prev_level)
+static unsigned int flatten_lpi_states(struct acpi_processor *pr,
+ unsigned int flat_state_cnt,
+ struct acpi_lpi_states_array *curr_level,
+ struct acpi_lpi_states_array *prev_level)
{
int i, j, state_count = curr_level->size;
struct acpi_lpi_state *p, *t = curr_level->entries;
@@ -1079,7 +1071,7 @@ static int flatten_lpi_states(struct acpi_processor *pr,
}
kfree(curr_level->entries);
- return 0;
+ return flat_state_cnt;
}
int __weak acpi_processor_ffh_lpi_probe(unsigned int cpu)
@@ -1094,6 +1086,7 @@ static int acpi_processor_get_lpi_info(struct acpi_processor *pr)
acpi_handle handle = pr->handle, pr_ahandle;
struct acpi_device *d = NULL;
struct acpi_lpi_states_array info[2], *tmp, *prev, *curr;
+ unsigned int state_count;
/* make sure our architecture has support */
ret = acpi_processor_ffh_lpi_probe(pr->id);
@@ -1106,14 +1099,13 @@ static int acpi_processor_get_lpi_info(struct acpi_processor *pr)
if (!acpi_has_method(handle, "_LPI"))
return -EINVAL;
- flat_state_cnt = 0;
prev = &info[0];
curr = &info[1];
handle = pr->handle;
ret = acpi_processor_evaluate_lpi(handle, prev);
if (ret)
return ret;
- flatten_lpi_states(pr, prev, NULL);
+ state_count = flatten_lpi_states(pr, 0, prev, NULL);
status = acpi_get_parent(handle, &pr_ahandle);
while (ACPI_SUCCESS(status)) {
@@ -1135,18 +1127,19 @@ static int acpi_processor_get_lpi_info(struct acpi_processor *pr)
break;
/* flatten all the LPI states in this level of hierarchy */
- flatten_lpi_states(pr, curr, prev);
+ state_count = flatten_lpi_states(pr, state_count, curr, prev);
tmp = prev, prev = curr, curr = tmp;
status = acpi_get_parent(handle, &pr_ahandle);
}
- pr->power.count = flat_state_cnt;
/* reset the index after flattening */
- for (i = 0; i < pr->power.count; i++)
+ for (i = 0; i < state_count; i++)
pr->power.lpi_states[i].index = i;
+ pr->power.count = state_count;
+
/* Tell driver that _LPI is supported. */
pr->flags.has_lpi = 1;
pr->flags.power = 1;
@@ -1238,7 +1231,8 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
if (pr->flags.has_lpi)
return acpi_processor_setup_lpi_states(pr);
- return acpi_processor_setup_cstates(pr);
+ acpi_processor_setup_cstates(pr);
+ return 0;
}
/**
@@ -1258,7 +1252,8 @@ static int acpi_processor_setup_cpuidle_dev(struct acpi_processor *pr,
if (pr->flags.has_lpi)
return acpi_processor_ffh_lpi_probe(pr->id);
- return acpi_processor_setup_cpuidle_cx(pr, dev);
+ acpi_processor_setup_cpuidle_cx(pr, dev);
+ return 0;
}
static int acpi_processor_get_power_info(struct acpi_processor *pr)
@@ -1397,6 +1392,9 @@ int acpi_processor_power_init(struct acpi_processor *pr)
if (retval) {
if (acpi_processor_registered == 0)
cpuidle_unregister_driver(&acpi_idle_driver);
+
+ per_cpu(acpi_cpuidle_device, pr->id) = NULL;
+ kfree(dev);
return retval;
}
acpi_processor_registered++;
@@ -1423,3 +1421,5 @@ int acpi_processor_power_exit(struct acpi_processor *pr)
pr->flags.power_setup_done = 0;
return 0;
}
+
+MODULE_IMPORT_NS("ACPI_PROCESSOR_IDLE");
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index 64b8d1e19594..8972446b7162 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -173,6 +173,9 @@ void acpi_processor_ppc_init(struct cpufreq_policy *policy)
{
unsigned int cpu;
+ if (ignore_ppc == 1)
+ return;
+
for_each_cpu(cpu, policy->related_cpus) {
struct acpi_processor *pr = per_cpu(processors, cpu);
int ret;
@@ -193,6 +196,14 @@ void acpi_processor_ppc_init(struct cpufreq_policy *policy)
if (ret < 0)
pr_err("Failed to add freq constraint for CPU%d (%d)\n",
cpu, ret);
+
+ if (!pr->performance)
+ continue;
+
+ ret = acpi_processor_get_platform_limit(pr);
+ if (ret)
+ pr_err("Failed to update freq constraint for CPU%d (%d)\n",
+ cpu, ret);
}
}
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c
index 1219adb11ab9..c7b1dc5687ec 100644
--- a/drivers/acpi/processor_thermal.c
+++ b/drivers/acpi/processor_thermal.c
@@ -62,19 +62,14 @@ static int phys_package_first_cpu(int cpu)
return 0;
}
-static int cpu_has_cpufreq(unsigned int cpu)
+static bool cpu_has_cpufreq(unsigned int cpu)
{
- struct cpufreq_policy *policy;
-
if (!acpi_processor_cpufreq_init)
return 0;
- policy = cpufreq_cpu_get(cpu);
- if (policy) {
- cpufreq_cpu_put(policy);
- return 1;
- }
- return 0;
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
+
+ return policy != NULL;
}
static int cpufreq_get_max_state(unsigned int cpu)
@@ -93,12 +88,31 @@ static int cpufreq_get_cur_state(unsigned int cpu)
return reduction_step(cpu);
}
+static bool cpufreq_update_thermal_limit(unsigned int cpu, struct acpi_processor *pr)
+{
+ unsigned long max_freq;
+ int ret;
+
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
+ if (!policy)
+ return false;
+
+ max_freq = (policy->cpuinfo.max_freq *
+ (100 - reduction_step(cpu) * cpufreq_thermal_reduction_pctg)) / 100;
+
+ ret = freq_qos_update_request(&pr->thermal_req, max_freq);
+ if (ret < 0) {
+ pr_warn("Failed to update thermal freq constraint: CPU%d (%d)\n",
+ pr->id, ret);
+ }
+
+ return true;
+}
+
static int cpufreq_set_cur_state(unsigned int cpu, int state)
{
- struct cpufreq_policy *policy;
struct acpi_processor *pr;
- unsigned long max_freq;
- int i, ret;
+ int i;
if (!cpu_has_cpufreq(cpu))
return 0;
@@ -120,20 +134,8 @@ static int cpufreq_set_cur_state(unsigned int cpu, int state)
if (unlikely(!freq_qos_request_active(&pr->thermal_req)))
continue;
- policy = cpufreq_cpu_get(i);
- if (!policy)
+ if (!cpufreq_update_thermal_limit(i, pr))
return -EINVAL;
-
- max_freq = (policy->cpuinfo.max_freq *
- (100 - reduction_step(i) * cpufreq_thermal_reduction_pctg)) / 100;
-
- cpufreq_cpu_put(policy);
-
- ret = freq_qos_update_request(&pr->thermal_req, max_freq);
- if (ret < 0) {
- pr_warn("Failed to update thermal freq constraint: CPU%d (%d)\n",
- pr->id, ret);
- }
}
return 0;
}
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index d1541a386fbc..f9c2bc1d4a3a 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -235,7 +235,7 @@ static int acpi_processor_throttling_notifier(unsigned long event, void *data)
if (pr->throttling_platform_limit > target_state)
target_state = pr->throttling_platform_limit;
if (target_state >= p_throttling->state_count) {
- pr_warn("Exceed the limit of T-state \n");
+ pr_warn("Exceed the limit of T-state\n");
target_state = p_throttling->state_count - 1;
}
p_tstate->target_state = target_state;
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index 436019d96027..18e90067d567 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -83,6 +83,7 @@ static bool acpi_nondev_subnode_extract(union acpi_object *desc,
struct fwnode_handle *parent)
{
struct acpi_data_node *dn;
+ acpi_handle scope = NULL;
bool result;
if (acpi_graph_ignore_port(handle))
@@ -98,59 +99,45 @@ static bool acpi_nondev_subnode_extract(union acpi_object *desc,
INIT_LIST_HEAD(&dn->data.properties);
INIT_LIST_HEAD(&dn->data.subnodes);
- result = acpi_extract_properties(handle, desc, &dn->data);
-
- if (handle) {
- acpi_handle scope;
- acpi_status status;
+ /*
+ * The scope for the completion of relative pathname segments and
+ * subnode object lookup is the one of the namespace node (device)
+ * containing the object that has returned the package. That is, it's
+ * the scope of that object's parent device.
+ */
+ if (handle)
+ acpi_get_parent(handle, &scope);
- /*
- * The scope for the subnode object lookup is the one of the
- * namespace node (device) containing the object that has
- * returned the package. That is, it's the scope of that
- * object's parent.
- */
- status = acpi_get_parent(handle, &scope);
- if (ACPI_SUCCESS(status)
- && acpi_enumerate_nondev_subnodes(scope, desc, &dn->data,
- &dn->fwnode))
- result = true;
- } else if (acpi_enumerate_nondev_subnodes(NULL, desc, &dn->data,
- &dn->fwnode)) {
+ /*
+ * Extract properties from the _DSD-equivalent package pointed to by
+ * desc and use scope (if not NULL) for the completion of relative
+ * pathname segments.
+ *
+ * The extracted properties will be held in the new data node dn.
+ */
+ result = acpi_extract_properties(scope, desc, &dn->data);
+ /*
+ * Look for subnodes in the _DSD-equivalent package pointed to by desc
+ * and create child nodes of dn if there are any.
+ */
+ if (acpi_enumerate_nondev_subnodes(scope, desc, &dn->data, &dn->fwnode))
result = true;
- }
-
- if (result) {
- dn->handle = handle;
- dn->data.pointer = desc;
- list_add_tail(&dn->sibling, list);
- return true;
- }
-
- kfree(dn);
- acpi_handle_debug(handle, "Invalid properties/subnodes data, skipping\n");
- return false;
-}
-
-static bool acpi_nondev_subnode_data_ok(acpi_handle handle,
- const union acpi_object *link,
- struct list_head *list,
- struct fwnode_handle *parent)
-{
- struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER };
- acpi_status status;
- status = acpi_evaluate_object_typed(handle, NULL, NULL, &buf,
- ACPI_TYPE_PACKAGE);
- if (ACPI_FAILURE(status))
+ if (!result) {
+ kfree(dn);
+ acpi_handle_debug(handle, "Invalid properties/subnodes data, skipping\n");
return false;
+ }
- if (acpi_nondev_subnode_extract(buf.pointer, handle, link, list,
- parent))
- return true;
+ /*
+ * This will be NULL if the desc package is embedded in an outer
+ * _DSD-equivalent package and its scope cannot be determined.
+ */
+ dn->handle = handle;
+ dn->data.pointer = desc;
+ list_add_tail(&dn->sibling, list);
- ACPI_FREE(buf.pointer);
- return false;
+ return true;
}
static bool acpi_nondev_subnode_ok(acpi_handle scope,
@@ -158,9 +145,16 @@ static bool acpi_nondev_subnode_ok(acpi_handle scope,
struct list_head *list,
struct fwnode_handle *parent)
{
+ struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER };
acpi_handle handle;
acpi_status status;
+ /*
+ * If the scope is unknown, the _DSD-equivalent package being parsed
+ * was embedded in an outer _DSD-equivalent package as a result of
+ * direct evaluation of an object pointed to by a reference. In that
+ * case, using a pathname as the target object pointer is invalid.
+ */
if (!scope)
return false;
@@ -169,7 +163,17 @@ static bool acpi_nondev_subnode_ok(acpi_handle scope,
if (ACPI_FAILURE(status))
return false;
- return acpi_nondev_subnode_data_ok(handle, link, list, parent);
+ status = acpi_evaluate_object_typed(handle, NULL, NULL, &buf,
+ ACPI_TYPE_PACKAGE);
+ if (ACPI_FAILURE(status))
+ return false;
+
+ if (acpi_nondev_subnode_extract(buf.pointer, handle, link, list,
+ parent))
+ return true;
+
+ ACPI_FREE(buf.pointer);
+ return false;
}
static bool acpi_add_nondev_subnodes(acpi_handle scope,
@@ -180,9 +184,12 @@ static bool acpi_add_nondev_subnodes(acpi_handle scope,
bool ret = false;
int i;
+ /*
+ * Every element in the links package is expected to represent a link
+ * to a non-device node in a tree containing device-specific data.
+ */
for (i = 0; i < links->package.count; i++) {
union acpi_object *link, *desc;
- acpi_handle handle;
bool result;
link = &links->package.elements[i];
@@ -190,26 +197,53 @@ static bool acpi_add_nondev_subnodes(acpi_handle scope,
if (link->package.count != 2)
continue;
- /* The first one must be a string. */
+ /* The first one (the key) must be a string. */
if (link->package.elements[0].type != ACPI_TYPE_STRING)
continue;
- /* The second one may be a string, a reference or a package. */
+ /* The second one (the target) may be a string or a package. */
switch (link->package.elements[1].type) {
case ACPI_TYPE_STRING:
+ /*
+ * The string is expected to be a full pathname or a
+ * pathname segment relative to the given scope. That
+ * pathname is expected to point to an object returning
+ * a package that contains _DSD-equivalent information.
+ */
result = acpi_nondev_subnode_ok(scope, link, list,
parent);
break;
- case ACPI_TYPE_LOCAL_REFERENCE:
- handle = link->package.elements[1].reference.handle;
- result = acpi_nondev_subnode_data_ok(handle, link, list,
- parent);
- break;
case ACPI_TYPE_PACKAGE:
+ /*
+ * This happens when a reference is used in AML to
+ * point to the target. Since the target is expected
+ * to be a named object, a reference to it will cause it
+ * to be avaluated in place and its return package will
+ * be embedded in the links package at the location of
+ * the reference.
+ *
+ * The target package is expected to contain _DSD-
+ * equivalent information, but the scope in which it
+ * is located in the original AML is unknown. Thus
+ * it cannot contain pathname segments represented as
+ * strings because there is no way to build full
+ * pathnames out of them.
+ */
+ acpi_handle_debug(scope, "subnode %s: Unknown scope\n",
+ link->package.elements[0].string.pointer);
desc = &link->package.elements[1];
result = acpi_nondev_subnode_extract(desc, NULL, link,
list, parent);
break;
+ case ACPI_TYPE_LOCAL_REFERENCE:
+ /*
+ * It is not expected to see any local references in
+ * the links package because referencing a named object
+ * should cause it to be evaluated in place.
+ */
+ acpi_handle_info(scope, "subnode %s: Unexpected reference\n",
+ link->package.elements[0].string.pointer);
+ fallthrough;
default:
result = false;
break;
@@ -369,6 +403,9 @@ static void acpi_untie_nondev_subnodes(struct acpi_device_data *data)
struct acpi_data_node *dn;
list_for_each_entry(dn, &data->subnodes, sibling) {
+ if (!dn->handle)
+ continue;
+
acpi_detach_data(dn->handle, acpi_nondev_subnode_tag);
acpi_untie_nondev_subnodes(&dn->data);
@@ -383,6 +420,9 @@ static bool acpi_tie_nondev_subnodes(struct acpi_device_data *data)
acpi_status status;
bool ret;
+ if (!dn->handle)
+ continue;
+
status = acpi_attach_data(dn->handle, acpi_nondev_subnode_tag, dn);
if (ACPI_FAILURE(status) && status != AE_ALREADY_EXISTS) {
acpi_handle_err(dn->handle, "Can't tag data node\n");
@@ -804,13 +844,35 @@ acpi_fwnode_get_named_child_node(const struct fwnode_handle *fwnode,
return NULL;
}
+static unsigned int acpi_fwnode_get_args_count(struct fwnode_handle *fwnode,
+ const char *nargs_prop)
+{
+ const struct acpi_device_data *data;
+ const union acpi_object *obj;
+ int ret;
+
+ data = acpi_device_data_of_node(fwnode);
+ if (!data)
+ return 0;
+
+ ret = acpi_data_get_property(data, nargs_prop, ACPI_TYPE_INTEGER, &obj);
+ if (ret)
+ return 0;
+
+ return obj->integer.value;
+}
+
static int acpi_get_ref_args(struct fwnode_reference_args *args,
struct fwnode_handle *ref_fwnode,
+ const char *nargs_prop,
const union acpi_object **element,
const union acpi_object *end, size_t num_args)
{
u32 nargs = 0, i;
+ if (nargs_prop)
+ num_args = acpi_fwnode_get_args_count(ref_fwnode, nargs_prop);
+
/*
* Assume the following integer elements are all args. Stop counting on
* the first reference (possibly represented as a string) or end of the
@@ -882,45 +944,10 @@ static struct fwnode_handle *acpi_parse_string_ref(const struct fwnode_handle *f
return &dn->fwnode;
}
-/**
- * __acpi_node_get_property_reference - returns handle to the referenced object
- * @fwnode: Firmware node to get the property from
- * @propname: Name of the property
- * @index: Index of the reference to return
- * @num_args: Maximum number of arguments after each reference
- * @args: Location to store the returned reference with optional arguments
- * (may be NULL)
- *
- * Find property with @name, verifify that it is a package containing at least
- * one object reference and if so, store the ACPI device object pointer to the
- * target object in @args->adev. If the reference includes arguments, store
- * them in the @args->args[] array.
- *
- * If there's more than one reference in the property value package, @index is
- * used to select the one to return.
- *
- * It is possible to leave holes in the property value set like in the
- * example below:
- *
- * Package () {
- * "cs-gpios",
- * Package () {
- * ^GPIO, 19, 0, 0,
- * ^GPIO, 20, 0, 0,
- * 0,
- * ^GPIO, 21, 0, 0,
- * }
- * }
- *
- * Calling this function with index %2 or index %3 return %-ENOENT. If the
- * property does not contain any more values %-ENOENT is returned. The NULL
- * entry must be single integer and preferably contain value %0.
- *
- * Return: %0 on success, negative error code on failure.
- */
-int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
- const char *propname, size_t index, size_t num_args,
- struct fwnode_reference_args *args)
+static int acpi_fwnode_get_reference_args(const struct fwnode_handle *fwnode,
+ const char *propname, const char *nargs_prop,
+ unsigned int args_count, unsigned int index,
+ struct fwnode_reference_args *args)
{
const union acpi_object *element, *end;
const union acpi_object *obj;
@@ -996,10 +1023,10 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
return -EINVAL;
element++;
-
ret = acpi_get_ref_args(idx == index ? args : NULL,
acpi_fwnode_handle(device),
- &element, end, num_args);
+ nargs_prop, &element, end,
+ args_count);
if (ret < 0)
return ret;
@@ -1014,10 +1041,9 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
return -EINVAL;
element++;
-
ret = acpi_get_ref_args(idx == index ? args : NULL,
- ref_fwnode, &element, end,
- num_args);
+ ref_fwnode, nargs_prop, &element, end,
+ args_count);
if (ret < 0)
return ret;
@@ -1039,6 +1065,50 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
return -ENOENT;
}
+
+/**
+ * __acpi_node_get_property_reference - returns handle to the referenced object
+ * @fwnode: Firmware node to get the property from
+ * @propname: Name of the property
+ * @index: Index of the reference to return
+ * @num_args: Maximum number of arguments after each reference
+ * @args: Location to store the returned reference with optional arguments
+ * (may be NULL)
+ *
+ * Find property with @name, verifify that it is a package containing at least
+ * one object reference and if so, store the ACPI device object pointer to the
+ * target object in @args->adev. If the reference includes arguments, store
+ * them in the @args->args[] array.
+ *
+ * If there's more than one reference in the property value package, @index is
+ * used to select the one to return.
+ *
+ * It is possible to leave holes in the property value set like in the
+ * example below:
+ *
+ * Package () {
+ * "cs-gpios",
+ * Package () {
+ * ^GPIO, 19, 0, 0,
+ * ^GPIO, 20, 0, 0,
+ * 0,
+ * ^GPIO, 21, 0, 0,
+ * }
+ * }
+ *
+ * Calling this function with index %2 or index %3 return %-ENOENT. If the
+ * property does not contain any more values %-ENOENT is returned. The NULL
+ * entry must be single integer and preferably contain value %0.
+ *
+ * Return: %0 on success, negative error code on failure.
+ */
+int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
+ const char *propname, size_t index,
+ size_t num_args,
+ struct fwnode_reference_args *args)
+{
+ return acpi_fwnode_get_reference_args(fwnode, propname, NULL, num_args, index, args);
+}
EXPORT_SYMBOL_GPL(__acpi_node_get_property_reference);
static int acpi_data_prop_read_single(const struct acpi_device_data *data,
@@ -1210,7 +1280,7 @@ static int acpi_data_prop_read(const struct acpi_device_data *data,
ret = acpi_copy_property_array_uint(items, (u64 *)val, nval);
break;
case DEV_PROP_STRING:
- nval = min_t(u32, nval, obj->package.count);
+ nval = min(nval, obj->package.count);
if (nval == 0)
return -ENODATA;
@@ -1259,13 +1329,14 @@ static int stop_on_next(struct acpi_device *adev, void *data)
return 0;
}
-/**
+/*
* acpi_get_next_subnode - Return the next child node handle for a fwnode
* @fwnode: Firmware node to find the next child node for.
* @child: Handle to one of the device's child nodes or a null handle.
*/
-struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode,
- struct fwnode_handle *child)
+static struct fwnode_handle *
+acpi_get_next_subnode(const struct fwnode_handle *fwnode,
+ struct fwnode_handle *child)
{
struct acpi_device *adev = to_acpi_device_node(fwnode);
@@ -1318,6 +1389,28 @@ struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode,
return NULL;
}
+/*
+ * acpi_get_next_present_subnode - Return the next present child node handle
+ * @fwnode: Firmware node to find the next child node for.
+ * @child: Handle to one of the device's child nodes or a null handle.
+ *
+ * Like acpi_get_next_subnode(), but the device nodes returned by
+ * acpi_get_next_present_subnode() are guaranteed to be present.
+ *
+ * Returns: The fwnode handle of the next present sub-node.
+ */
+static struct fwnode_handle *
+acpi_get_next_present_subnode(const struct fwnode_handle *fwnode,
+ struct fwnode_handle *child)
+{
+ do {
+ child = acpi_get_next_subnode(fwnode, child);
+ } while (is_acpi_device_node(child) &&
+ !acpi_device_is_present(to_acpi_device_node(child)));
+
+ return child;
+}
+
/**
* acpi_node_get_parent - Return parent fwnode of this fwnode
* @fwnode: Firmware node whose parent to get
@@ -1380,7 +1473,7 @@ static struct fwnode_handle *acpi_graph_get_next_endpoint(
if (!prev) {
do {
- port = fwnode_get_next_child_node(fwnode, port);
+ port = acpi_get_next_subnode(fwnode, port);
/*
* The names of the port nodes begin with "port@"
* followed by the number of the port node and they also
@@ -1398,14 +1491,17 @@ static struct fwnode_handle *acpi_graph_get_next_endpoint(
if (!port)
return NULL;
- endpoint = fwnode_get_next_child_node(port, prev);
- while (!endpoint) {
- port = fwnode_get_next_child_node(fwnode, port);
- if (!port)
+ do {
+ endpoint = acpi_get_next_subnode(port, prev);
+ if (endpoint)
break;
- if (is_acpi_graph_node(port, "port"))
- endpoint = fwnode_get_next_child_node(port, NULL);
- }
+
+ prev = NULL;
+
+ do {
+ port = acpi_get_next_subnode(fwnode, port);
+ } while (port && !is_acpi_graph_node(port, "port"));
+ } while (port);
/*
* The names of the endpoint nodes begin with "endpoint@" followed by
@@ -1558,16 +1654,6 @@ acpi_fwnode_property_read_string_array(const struct fwnode_handle *fwnode,
val, nval);
}
-static int
-acpi_fwnode_get_reference_args(const struct fwnode_handle *fwnode,
- const char *prop, const char *nargs_prop,
- unsigned int args_count, unsigned int index,
- struct fwnode_reference_args *args)
-{
- return __acpi_node_get_property_reference(fwnode, prop, index,
- args_count, args);
-}
-
static const char *acpi_fwnode_get_name(const struct fwnode_handle *fwnode)
{
const struct acpi_device *adev;
@@ -1632,6 +1718,7 @@ static int acpi_fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
if (fwnode_property_read_u32(fwnode, "reg", &endpoint->id))
fwnode_property_read_u32(fwnode, "endpoint", &endpoint->id);
+ fwnode_handle_put(port_fwnode);
return 0;
}
@@ -1662,7 +1749,7 @@ static int acpi_fwnode_irq_get(const struct fwnode_handle *fwnode,
.property_read_string_array = \
acpi_fwnode_property_read_string_array, \
.get_parent = acpi_node_get_parent, \
- .get_next_child_node = acpi_get_next_subnode, \
+ .get_next_child_node = acpi_get_next_present_subnode, \
.get_named_child_node = acpi_fwnode_get_named_child_node, \
.get_name = acpi_fwnode_get_name, \
.get_name_prefix = acpi_fwnode_get_name_prefix, \
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index 7d59c6c9185f..d16906f46484 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -17,6 +17,7 @@
#include <linux/slab.h>
#include <linux/irq.h>
#include <linux/dmi.h>
+#include <linux/string_choices.h>
#ifdef CONFIG_X86
#define valid_IRQ(i) (((i) != 0) && ((i) != 2))
@@ -511,6 +512,13 @@ static const struct dmi_system_id irq1_level_low_skip_override[] = {
},
},
{
+ /* Asus Vivobook Pro N6506CU* */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "N6506CU"),
+ },
+ },
+ {
/* LG Electronics 17U70P */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"),
@@ -667,6 +675,13 @@ static const struct dmi_system_id irq1_edge_low_force_override[] = {
},
},
{
+ /* MACHENIKE L16P/L16P */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MACHENIKE"),
+ DMI_MATCH(DMI_BOARD_NAME, "L16P"),
+ },
+ },
+ {
/*
* TongFang GM5HG0A in case of the SKIKK Vanaheim relabel the
* board-name is changed, so check OEM strings instead. Note
@@ -766,7 +781,7 @@ static void acpi_dev_get_irqresource(struct resource *res, u32 gsi,
pr_warn("ACPI: IRQ %d override to %s%s, %s%s\n", gsi,
t ? "level" : "edge",
trig == triggering ? "" : "(!)",
- p ? "low" : "high",
+ str_low_high(p),
pol == polarity ? "" : "(!)");
triggering = trig;
polarity = pol;
diff --git a/drivers/acpi/riscv/Kconfig b/drivers/acpi/riscv/Kconfig
new file mode 100644
index 000000000000..046296a18d00
--- /dev/null
+++ b/drivers/acpi/riscv/Kconfig
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# ACPI Configuration for RISC-V
+#
+
+config ACPI_RIMT
+ bool
diff --git a/drivers/acpi/riscv/Makefile b/drivers/acpi/riscv/Makefile
index a96fdf1e2cb8..1284a076fa88 100644
--- a/drivers/acpi/riscv/Makefile
+++ b/drivers/acpi/riscv/Makefile
@@ -2,3 +2,4 @@
obj-y += rhct.o init.o irq.o
obj-$(CONFIG_ACPI_PROCESSOR_IDLE) += cpuidle.o
obj-$(CONFIG_ACPI_CPPC_LIB) += cppc.o
+obj-$(CONFIG_ACPI_RIMT) += rimt.o
diff --git a/drivers/acpi/riscv/cppc.c b/drivers/acpi/riscv/cppc.c
index 4cdff387deff..42c1a9052470 100644
--- a/drivers/acpi/riscv/cppc.c
+++ b/drivers/acpi/riscv/cppc.c
@@ -37,10 +37,8 @@ static int __init sbi_cppc_init(void)
{
if (sbi_spec_version >= sbi_mk_version(2, 0) &&
sbi_probe_extension(SBI_EXT_CPPC) > 0) {
- pr_info("SBI CPPC extension detected\n");
cppc_ext_present = true;
} else {
- pr_info("SBI CPPC extension NOT detected!!\n");
cppc_ext_present = false;
}
@@ -121,7 +119,7 @@ int cpc_read_ffh(int cpu, struct cpc_reg *reg, u64 *val)
*val = data.ret.value;
- return (data.ret.error) ? sbi_err_map_linux_errno(data.ret.error) : 0;
+ return data.ret.error;
}
return -EINVAL;
@@ -150,7 +148,7 @@ int cpc_write_ffh(int cpu, struct cpc_reg *reg, u64 val)
smp_call_function_single(cpu, cppc_ffh_csr_write, &data, 1);
- return (data.ret.error) ? sbi_err_map_linux_errno(data.ret.error) : 0;
+ return data.ret.error;
}
return -EINVAL;
diff --git a/drivers/acpi/riscv/init.c b/drivers/acpi/riscv/init.c
index 673e4d5dd752..7c00f7995e86 100644
--- a/drivers/acpi/riscv/init.c
+++ b/drivers/acpi/riscv/init.c
@@ -10,4 +10,6 @@
void __init acpi_arch_init(void)
{
riscv_acpi_init_gsi_mapping();
+ if (IS_ENABLED(CONFIG_ACPI_RIMT))
+ riscv_acpi_rimt_init();
}
diff --git a/drivers/acpi/riscv/init.h b/drivers/acpi/riscv/init.h
index 0b9a07e4031f..1680aa2aaf23 100644
--- a/drivers/acpi/riscv/init.h
+++ b/drivers/acpi/riscv/init.h
@@ -2,3 +2,4 @@
#include <linux/init.h>
void __init riscv_acpi_init_gsi_mapping(void);
+void __init riscv_acpi_rimt_init(void);
diff --git a/drivers/acpi/riscv/irq.c b/drivers/acpi/riscv/irq.c
index cced960c2aef..d9a2154d6c6a 100644
--- a/drivers/acpi/riscv/irq.c
+++ b/drivers/acpi/riscv/irq.c
@@ -10,6 +10,8 @@
#include "init.h"
+#define RISCV_ACPI_INTC_FLAG_PENDING BIT(0)
+
struct riscv_ext_intc_list {
acpi_handle handle;
u32 gsi_base;
@@ -17,6 +19,7 @@ struct riscv_ext_intc_list {
u32 nr_idcs;
u32 id;
u32 type;
+ u32 flag;
struct list_head list;
};
@@ -69,6 +72,22 @@ static acpi_status riscv_acpi_update_gsi_handle(u32 gsi_base, acpi_handle handle
return AE_NOT_FOUND;
}
+int riscv_acpi_update_gsi_range(u32 gsi_base, u32 nr_irqs)
+{
+ struct riscv_ext_intc_list *ext_intc_element;
+
+ list_for_each_entry(ext_intc_element, &ext_intc_list, list) {
+ if (gsi_base == ext_intc_element->gsi_base &&
+ (ext_intc_element->flag & RISCV_ACPI_INTC_FLAG_PENDING)) {
+ ext_intc_element->nr_irqs = nr_irqs;
+ ext_intc_element->flag &= ~RISCV_ACPI_INTC_FLAG_PENDING;
+ return 0;
+ }
+ }
+
+ return -ENODEV;
+}
+
int riscv_acpi_get_gsi_info(struct fwnode_handle *fwnode, u32 *gsi_base,
u32 *id, u32 *nr_irqs, u32 *nr_idcs)
{
@@ -115,20 +134,67 @@ struct fwnode_handle *riscv_acpi_get_gsi_domain_id(u32 gsi)
static int __init riscv_acpi_register_ext_intc(u32 gsi_base, u32 nr_irqs, u32 nr_idcs,
u32 id, u32 type)
{
- struct riscv_ext_intc_list *ext_intc_element;
+ struct riscv_ext_intc_list *ext_intc_element, *node, *prev;
ext_intc_element = kzalloc(sizeof(*ext_intc_element), GFP_KERNEL);
if (!ext_intc_element)
return -ENOMEM;
ext_intc_element->gsi_base = gsi_base;
- ext_intc_element->nr_irqs = nr_irqs;
+
+ /* If nr_irqs is zero, indicate it in flag and set to max range possible */
+ if (nr_irqs) {
+ ext_intc_element->nr_irqs = nr_irqs;
+ } else {
+ ext_intc_element->flag |= RISCV_ACPI_INTC_FLAG_PENDING;
+ ext_intc_element->nr_irqs = U32_MAX - ext_intc_element->gsi_base;
+ }
+
ext_intc_element->nr_idcs = nr_idcs;
ext_intc_element->id = id;
- list_add_tail(&ext_intc_element->list, &ext_intc_list);
+ list_for_each_entry(node, &ext_intc_list, list) {
+ if (node->gsi_base < ext_intc_element->gsi_base)
+ break;
+ }
+
+ /* Adjust the previous node's GSI range if that has pending registration */
+ prev = list_prev_entry(node, list);
+ if (!list_entry_is_head(prev, &ext_intc_list, list)) {
+ if (prev->flag & RISCV_ACPI_INTC_FLAG_PENDING)
+ prev->nr_irqs = ext_intc_element->gsi_base - prev->gsi_base;
+ }
+
+ list_add_tail(&ext_intc_element->list, &node->list);
return 0;
}
+static acpi_status __init riscv_acpi_create_gsi_map_smsi(acpi_handle handle, u32 level,
+ void *context, void **return_value)
+{
+ acpi_status status;
+ u64 gbase;
+
+ if (!acpi_has_method(handle, "_GSB")) {
+ acpi_handle_err(handle, "_GSB method not found\n");
+ return AE_ERROR;
+ }
+
+ status = acpi_evaluate_integer(handle, "_GSB", NULL, &gbase);
+ if (ACPI_FAILURE(status)) {
+ acpi_handle_err(handle, "failed to evaluate _GSB method\n");
+ return status;
+ }
+
+ riscv_acpi_register_ext_intc(gbase, 0, 0, 0, ACPI_RISCV_IRQCHIP_SMSI);
+ status = riscv_acpi_update_gsi_handle((u32)gbase, handle);
+ if (ACPI_FAILURE(status)) {
+ acpi_handle_err(handle, "failed to find the GSI mapping entry\n");
+ return status;
+ }
+
+ return AE_OK;
+}
+
static acpi_status __init riscv_acpi_create_gsi_map(acpi_handle handle, u32 level,
void *context, void **return_value)
{
@@ -183,6 +249,9 @@ void __init riscv_acpi_init_gsi_mapping(void)
if (acpi_table_parse_madt(ACPI_MADT_TYPE_APLIC, riscv_acpi_aplic_parse_madt, 0) > 0)
acpi_get_devices("RSCV0002", riscv_acpi_create_gsi_map, NULL, NULL);
+
+ /* Unlike PLIC/APLIC, SYSMSI doesn't have MADT */
+ acpi_get_devices("RSCV0006", riscv_acpi_create_gsi_map_smsi, NULL, NULL);
}
static acpi_handle riscv_acpi_get_gsi_handle(u32 gsi)
diff --git a/drivers/acpi/riscv/rimt.c b/drivers/acpi/riscv/rimt.c
new file mode 100644
index 000000000000..7f423405e5ef
--- /dev/null
+++ b/drivers/acpi/riscv/rimt.c
@@ -0,0 +1,520 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024-2025, Ventana Micro Systems Inc
+ * Author: Sunil V L <sunilvl@ventanamicro.com>
+ *
+ */
+
+#define pr_fmt(fmt) "ACPI: RIMT: " fmt
+
+#include <linux/acpi.h>
+#include <linux/acpi_rimt.h>
+#include <linux/iommu.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include "init.h"
+
+struct rimt_fwnode {
+ struct list_head list;
+ struct acpi_rimt_node *rimt_node;
+ struct fwnode_handle *fwnode;
+};
+
+static LIST_HEAD(rimt_fwnode_list);
+static DEFINE_SPINLOCK(rimt_fwnode_lock);
+
+#define RIMT_TYPE_MASK(type) (1 << (type))
+#define RIMT_IOMMU_TYPE BIT(0)
+
+/* Root pointer to the mapped RIMT table */
+static struct acpi_table_header *rimt_table;
+
+/**
+ * rimt_set_fwnode() - Create rimt_fwnode and use it to register
+ * iommu data in the rimt_fwnode_list
+ *
+ * @rimt_node: RIMT table node associated with the IOMMU
+ * @fwnode: fwnode associated with the RIMT node
+ *
+ * Returns: 0 on success
+ * <0 on failure
+ */
+static int rimt_set_fwnode(struct acpi_rimt_node *rimt_node,
+ struct fwnode_handle *fwnode)
+{
+ struct rimt_fwnode *np;
+
+ np = kzalloc(sizeof(*np), GFP_ATOMIC);
+
+ if (WARN_ON(!np))
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&np->list);
+ np->rimt_node = rimt_node;
+ np->fwnode = fwnode;
+
+ spin_lock(&rimt_fwnode_lock);
+ list_add_tail(&np->list, &rimt_fwnode_list);
+ spin_unlock(&rimt_fwnode_lock);
+
+ return 0;
+}
+
+static acpi_status rimt_match_node_callback(struct acpi_rimt_node *node,
+ void *context)
+{
+ acpi_status status = AE_NOT_FOUND;
+ struct device *dev = context;
+
+ if (node->type == ACPI_RIMT_NODE_TYPE_IOMMU) {
+ struct acpi_rimt_iommu *iommu_node = (struct acpi_rimt_iommu *)&node->node_data;
+
+ if (dev_is_pci(dev)) {
+ struct pci_dev *pdev;
+ u16 bdf;
+
+ pdev = to_pci_dev(dev);
+ bdf = PCI_DEVID(pdev->bus->number, pdev->devfn);
+ if ((pci_domain_nr(pdev->bus) == iommu_node->pcie_segment_number) &&
+ bdf == iommu_node->pcie_bdf) {
+ status = AE_OK;
+ } else {
+ status = AE_NOT_FOUND;
+ }
+ } else {
+ struct platform_device *pdev = to_platform_device(dev);
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res && res->start == iommu_node->base_address)
+ status = AE_OK;
+ else
+ status = AE_NOT_FOUND;
+ }
+ } else if (node->type == ACPI_RIMT_NODE_TYPE_PCIE_ROOT_COMPLEX) {
+ struct acpi_rimt_pcie_rc *pci_rc;
+ struct pci_bus *bus;
+
+ bus = to_pci_bus(dev);
+ pci_rc = (struct acpi_rimt_pcie_rc *)node->node_data;
+
+ /*
+ * It is assumed that PCI segment numbers maps one-to-one
+ * with root complexes. Each segment number can represent only
+ * one root complex.
+ */
+ status = pci_rc->pcie_segment_number == pci_domain_nr(bus) ?
+ AE_OK : AE_NOT_FOUND;
+ } else if (node->type == ACPI_RIMT_NODE_TYPE_PLAT_DEVICE) {
+ struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_rimt_platform_device *ncomp;
+ struct device *plat_dev = dev;
+ struct acpi_device *adev;
+
+ /*
+ * Walk the device tree to find a device with an
+ * ACPI companion; there is no point in scanning
+ * RIMT for a device matching a platform device if
+ * the device does not have an ACPI companion to
+ * start with.
+ */
+ do {
+ adev = ACPI_COMPANION(plat_dev);
+ if (adev)
+ break;
+
+ plat_dev = plat_dev->parent;
+ } while (plat_dev);
+
+ if (!adev)
+ return status;
+
+ status = acpi_get_name(adev->handle, ACPI_FULL_PATHNAME, &buf);
+ if (ACPI_FAILURE(status)) {
+ dev_warn(plat_dev, "Can't get device full path name\n");
+ return status;
+ }
+
+ ncomp = (struct acpi_rimt_platform_device *)node->node_data;
+ status = !strcmp(ncomp->device_name, buf.pointer) ?
+ AE_OK : AE_NOT_FOUND;
+ acpi_os_free(buf.pointer);
+ }
+
+ return status;
+}
+
+static struct acpi_rimt_node *rimt_scan_node(enum acpi_rimt_node_type type,
+ void *context)
+{
+ struct acpi_rimt_node *rimt_node, *rimt_end;
+ struct acpi_table_rimt *rimt;
+ int i;
+
+ if (!rimt_table)
+ return NULL;
+
+ /* Get the first RIMT node */
+ rimt = (struct acpi_table_rimt *)rimt_table;
+ rimt_node = ACPI_ADD_PTR(struct acpi_rimt_node, rimt,
+ rimt->node_offset);
+ rimt_end = ACPI_ADD_PTR(struct acpi_rimt_node, rimt_table,
+ rimt_table->length);
+
+ for (i = 0; i < rimt->num_nodes; i++) {
+ if (WARN_TAINT(rimt_node >= rimt_end, TAINT_FIRMWARE_WORKAROUND,
+ "RIMT node pointer overflows, bad table!\n"))
+ return NULL;
+
+ if (rimt_node->type == type &&
+ ACPI_SUCCESS(rimt_match_node_callback(rimt_node, context)))
+ return rimt_node;
+
+ rimt_node = ACPI_ADD_PTR(struct acpi_rimt_node, rimt_node,
+ rimt_node->length);
+ }
+
+ return NULL;
+}
+
+/*
+ * RISC-V supports IOMMU as a PCI device or a platform device.
+ * When it is a platform device, there should be a namespace device as
+ * well along with RIMT. To create the link between RIMT information and
+ * the platform device, the IOMMU driver should register itself with the
+ * RIMT module. This is true for PCI based IOMMU as well.
+ */
+int rimt_iommu_register(struct device *dev)
+{
+ struct fwnode_handle *rimt_fwnode;
+ struct acpi_rimt_node *node;
+
+ node = rimt_scan_node(ACPI_RIMT_NODE_TYPE_IOMMU, dev);
+ if (!node) {
+ pr_err("Could not find IOMMU node in RIMT\n");
+ return -ENODEV;
+ }
+
+ if (dev_is_pci(dev)) {
+ rimt_fwnode = acpi_alloc_fwnode_static();
+ if (!rimt_fwnode)
+ return -ENOMEM;
+
+ rimt_fwnode->dev = dev;
+ if (!dev->fwnode)
+ dev->fwnode = rimt_fwnode;
+
+ rimt_set_fwnode(node, rimt_fwnode);
+ } else {
+ rimt_set_fwnode(node, dev->fwnode);
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_IOMMU_API
+
+/**
+ * rimt_get_fwnode() - Retrieve fwnode associated with an RIMT node
+ *
+ * @node: RIMT table node to be looked-up
+ *
+ * Returns: fwnode_handle pointer on success, NULL on failure
+ */
+static struct fwnode_handle *rimt_get_fwnode(struct acpi_rimt_node *node)
+{
+ struct fwnode_handle *fwnode = NULL;
+ struct rimt_fwnode *curr;
+
+ spin_lock(&rimt_fwnode_lock);
+ list_for_each_entry(curr, &rimt_fwnode_list, list) {
+ if (curr->rimt_node == node) {
+ fwnode = curr->fwnode;
+ break;
+ }
+ }
+ spin_unlock(&rimt_fwnode_lock);
+
+ return fwnode;
+}
+
+static bool rimt_pcie_rc_supports_ats(struct acpi_rimt_node *node)
+{
+ struct acpi_rimt_pcie_rc *pci_rc;
+
+ pci_rc = (struct acpi_rimt_pcie_rc *)node->node_data;
+ return pci_rc->flags & ACPI_RIMT_PCIE_ATS_SUPPORTED;
+}
+
+static int rimt_iommu_xlate(struct device *dev, struct acpi_rimt_node *node, u32 deviceid)
+{
+ struct fwnode_handle *rimt_fwnode;
+
+ if (!node)
+ return -ENODEV;
+
+ rimt_fwnode = rimt_get_fwnode(node);
+
+ /*
+ * The IOMMU drivers may not be probed yet.
+ * Defer the IOMMU configuration
+ */
+ if (!rimt_fwnode)
+ return -EPROBE_DEFER;
+
+ return acpi_iommu_fwspec_init(dev, deviceid, rimt_fwnode);
+}
+
+struct rimt_pci_alias_info {
+ struct device *dev;
+ struct acpi_rimt_node *node;
+ const struct iommu_ops *ops;
+};
+
+static int rimt_id_map(struct acpi_rimt_id_mapping *map, u8 type, u32 rid_in, u32 *rid_out)
+{
+ if (rid_in < map->source_id_base ||
+ (rid_in > map->source_id_base + map->num_ids))
+ return -ENXIO;
+
+ *rid_out = map->dest_id_base + (rid_in - map->source_id_base);
+ return 0;
+}
+
+static struct acpi_rimt_node *rimt_node_get_id(struct acpi_rimt_node *node,
+ u32 *id_out, int index)
+{
+ struct acpi_rimt_platform_device *plat_node;
+ u32 id_mapping_offset, num_id_mapping;
+ struct acpi_rimt_pcie_rc *pci_node;
+ struct acpi_rimt_id_mapping *map;
+ struct acpi_rimt_node *parent;
+
+ if (node->type == ACPI_RIMT_NODE_TYPE_PCIE_ROOT_COMPLEX) {
+ pci_node = (struct acpi_rimt_pcie_rc *)&node->node_data;
+ id_mapping_offset = pci_node->id_mapping_offset;
+ num_id_mapping = pci_node->num_id_mappings;
+ } else if (node->type == ACPI_RIMT_NODE_TYPE_PLAT_DEVICE) {
+ plat_node = (struct acpi_rimt_platform_device *)&node->node_data;
+ id_mapping_offset = plat_node->id_mapping_offset;
+ num_id_mapping = plat_node->num_id_mappings;
+ } else {
+ return NULL;
+ }
+
+ if (!id_mapping_offset || !num_id_mapping || index >= num_id_mapping)
+ return NULL;
+
+ map = ACPI_ADD_PTR(struct acpi_rimt_id_mapping, node,
+ id_mapping_offset + index * sizeof(*map));
+
+ /* Firmware bug! */
+ if (!map->dest_offset) {
+ pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n",
+ node, node->type);
+ return NULL;
+ }
+
+ parent = ACPI_ADD_PTR(struct acpi_rimt_node, rimt_table, map->dest_offset);
+
+ if (node->type == ACPI_RIMT_NODE_TYPE_PLAT_DEVICE ||
+ node->type == ACPI_RIMT_NODE_TYPE_PCIE_ROOT_COMPLEX) {
+ *id_out = map->dest_id_base;
+ return parent;
+ }
+
+ return NULL;
+}
+
+static struct acpi_rimt_node *rimt_node_map_id(struct acpi_rimt_node *node,
+ u32 id_in, u32 *id_out,
+ u8 type_mask)
+{
+ struct acpi_rimt_platform_device *plat_node;
+ u32 id_mapping_offset, num_id_mapping;
+ struct acpi_rimt_pcie_rc *pci_node;
+ u32 id = id_in;
+
+ /* Parse the ID mapping tree to find specified node type */
+ while (node) {
+ struct acpi_rimt_id_mapping *map;
+ int i, rc = 0;
+ u32 map_id = id;
+
+ if (RIMT_TYPE_MASK(node->type) & type_mask) {
+ if (id_out)
+ *id_out = id;
+ return node;
+ }
+
+ if (node->type == ACPI_RIMT_NODE_TYPE_PCIE_ROOT_COMPLEX) {
+ pci_node = (struct acpi_rimt_pcie_rc *)&node->node_data;
+ id_mapping_offset = pci_node->id_mapping_offset;
+ num_id_mapping = pci_node->num_id_mappings;
+ } else if (node->type == ACPI_RIMT_NODE_TYPE_PLAT_DEVICE) {
+ plat_node = (struct acpi_rimt_platform_device *)&node->node_data;
+ id_mapping_offset = plat_node->id_mapping_offset;
+ num_id_mapping = plat_node->num_id_mappings;
+ } else {
+ goto fail_map;
+ }
+
+ if (!id_mapping_offset || !num_id_mapping)
+ goto fail_map;
+
+ map = ACPI_ADD_PTR(struct acpi_rimt_id_mapping, node,
+ id_mapping_offset);
+
+ /* Firmware bug! */
+ if (!map->dest_offset) {
+ pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n",
+ node, node->type);
+ goto fail_map;
+ }
+
+ /* Do the ID translation */
+ for (i = 0; i < num_id_mapping; i++, map++) {
+ rc = rimt_id_map(map, node->type, map_id, &id);
+ if (!rc)
+ break;
+ }
+
+ if (i == num_id_mapping)
+ goto fail_map;
+
+ node = ACPI_ADD_PTR(struct acpi_rimt_node, rimt_table,
+ rc ? 0 : map->dest_offset);
+ }
+
+fail_map:
+ /* Map input ID to output ID unchanged on mapping failure */
+ if (id_out)
+ *id_out = id_in;
+
+ return NULL;
+}
+
+static struct acpi_rimt_node *rimt_node_map_platform_id(struct acpi_rimt_node *node, u32 *id_out,
+ u8 type_mask, int index)
+{
+ struct acpi_rimt_node *parent;
+ u32 id;
+
+ parent = rimt_node_get_id(node, &id, index);
+ if (!parent)
+ return NULL;
+
+ if (!(RIMT_TYPE_MASK(parent->type) & type_mask))
+ parent = rimt_node_map_id(parent, id, id_out, type_mask);
+ else
+ if (id_out)
+ *id_out = id;
+
+ return parent;
+}
+
+static int rimt_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data)
+{
+ struct rimt_pci_alias_info *info = data;
+ struct acpi_rimt_node *parent;
+ u32 deviceid;
+
+ parent = rimt_node_map_id(info->node, alias, &deviceid, RIMT_IOMMU_TYPE);
+ return rimt_iommu_xlate(info->dev, parent, deviceid);
+}
+
+static int rimt_plat_iommu_map(struct device *dev, struct acpi_rimt_node *node)
+{
+ struct acpi_rimt_node *parent;
+ int err = -ENODEV, i = 0;
+ u32 deviceid = 0;
+
+ do {
+ parent = rimt_node_map_platform_id(node, &deviceid,
+ RIMT_IOMMU_TYPE,
+ i++);
+
+ if (parent)
+ err = rimt_iommu_xlate(dev, parent, deviceid);
+ } while (parent && !err);
+
+ return err;
+}
+
+static int rimt_plat_iommu_map_id(struct device *dev,
+ struct acpi_rimt_node *node,
+ const u32 *in_id)
+{
+ struct acpi_rimt_node *parent;
+ u32 deviceid;
+
+ parent = rimt_node_map_id(node, *in_id, &deviceid, RIMT_IOMMU_TYPE);
+ if (parent)
+ return rimt_iommu_xlate(dev, parent, deviceid);
+
+ return -ENODEV;
+}
+
+/**
+ * rimt_iommu_configure_id - Set-up IOMMU configuration for a device.
+ *
+ * @dev: device to configure
+ * @id_in: optional input id const value pointer
+ *
+ * Returns: 0 on success, <0 on failure
+ */
+int rimt_iommu_configure_id(struct device *dev, const u32 *id_in)
+{
+ struct acpi_rimt_node *node;
+ int err = -ENODEV;
+
+ if (dev_is_pci(dev)) {
+ struct iommu_fwspec *fwspec;
+ struct pci_bus *bus = to_pci_dev(dev)->bus;
+ struct rimt_pci_alias_info info = { .dev = dev };
+
+ node = rimt_scan_node(ACPI_RIMT_NODE_TYPE_PCIE_ROOT_COMPLEX, &bus->dev);
+ if (!node)
+ return -ENODEV;
+
+ info.node = node;
+ err = pci_for_each_dma_alias(to_pci_dev(dev),
+ rimt_pci_iommu_init, &info);
+
+ fwspec = dev_iommu_fwspec_get(dev);
+ if (fwspec && rimt_pcie_rc_supports_ats(node))
+ fwspec->flags |= IOMMU_FWSPEC_PCI_RC_ATS;
+ } else {
+ node = rimt_scan_node(ACPI_RIMT_NODE_TYPE_PLAT_DEVICE, dev);
+ if (!node)
+ return -ENODEV;
+
+ err = id_in ? rimt_plat_iommu_map_id(dev, node, id_in) :
+ rimt_plat_iommu_map(dev, node);
+ }
+
+ return err;
+}
+
+#endif
+
+void __init riscv_acpi_rimt_init(void)
+{
+ acpi_status status;
+
+ /* rimt_table will be used at runtime after the rimt init,
+ * so we don't need to call acpi_put_table() to release
+ * the RIMT table mapping.
+ */
+ status = acpi_get_table(ACPI_SIG_RIMT, 0, &rimt_table);
+ if (ACPI_FAILURE(status)) {
+ if (status != AE_NOT_FOUND) {
+ const char *msg = acpi_format_exception(status);
+
+ pr_err("Failed to get table, %s\n", msg);
+ }
+
+ return;
+ }
+}
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index a3f95a3fffde..d3edc3bcbf01 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -487,7 +487,7 @@ static int acpi_battery_read(struct acpi_battery *battery)
if (result)
return result;
- battery->present = state & (1 << battery->id);
+ battery->present = !!(state & (1 << battery->id));
if (!battery->present)
return 0;
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index fb1fe9f3b1a3..416d87f9bd10 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/acpi.h>
#include <linux/acpi_iort.h>
+#include <linux/acpi_rimt.h>
#include <linux/acpi_viot.h>
#include <linux/iommu.h>
#include <linux/signal.h>
@@ -845,6 +846,8 @@ static bool acpi_info_matches_ids(struct acpi_device_info *info,
static const char * const acpi_ignore_dep_ids[] = {
"PNP0D80", /* Windows-compatible System Power Management Controller */
"INT33BD", /* Intel Baytrail Mailbox Device */
+ "INTC10DE", /* Intel CVS LNL */
+ "INTC10E0", /* Intel CVS ARL */
"LATT2021", /* Lattice FW Update Client Driver */
NULL
};
@@ -858,6 +861,8 @@ static const char * const acpi_honor_dep_ids[] = {
"INTC10CF", /* IVSC (MTL) driver must be loaded to allow i2c access to camera sensors */
"RSCV0001", /* RISC-V PLIC */
"RSCV0002", /* RISC-V APLIC */
+ "RSCV0005", /* RISC-V SBI MPXY MBOX */
+ "RSCV0006", /* RISC-V RPMI SYSMSI */
"PNP0C0F", /* PCI Link Device */
NULL
};
@@ -1629,7 +1634,10 @@ static int acpi_iommu_configure_id(struct device *dev, const u32 *id_in)
err = iort_iommu_configure_id(dev, id_in);
if (err && err != -EPROBE_DEFER)
+ err = rimt_iommu_configure_id(dev, id_in);
+ if (err && err != -EPROBE_DEFER)
err = viot_iommu_configure(dev);
+
mutex_unlock(&iommu_probe_device_lock);
return err;
@@ -2389,7 +2397,7 @@ static bool acpi_scan_clear_dep_queue(struct acpi_device *adev)
* initial enumeration of devices is complete, put it into the unbound
* workqueue.
*/
- queue_work(system_unbound_wq, &cdw->work);
+ queue_work(system_dfl_wq, &cdw->work);
return true;
}
@@ -2702,7 +2710,7 @@ void __init acpi_scan_init(void)
acpi_memory_hotplug_init();
acpi_watchdog_init();
acpi_pnp_init();
- acpi_int340x_thermal_init();
+ acpi_power_resources_init();
acpi_init_lpit();
acpi_scan_add_handler(&generic_device_handler);
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index c8ee8e42b0f6..66ec81e306d4 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -642,7 +642,7 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
/*
* Disable all GPE and clear their status bits before interrupts are
* enabled. Some GPEs (like wakeup GPEs) have no handlers and this can
- * prevent them from producing spurious interrups.
+ * prevent them from producing spurious interrupts.
*
* acpi_leave_sleep_state() will reenable specific GPEs later.
*
@@ -884,13 +884,13 @@ bool acpi_s2idle_wakeup(void)
#ifdef CONFIG_PM_SLEEP
static u32 saved_bm_rld;
-static int acpi_save_bm_rld(void)
+static int acpi_save_bm_rld(void *data)
{
acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &saved_bm_rld);
return 0;
}
-static void acpi_restore_bm_rld(void)
+static void acpi_restore_bm_rld(void *data)
{
u32 resumed_bm_rld = 0;
@@ -901,14 +901,18 @@ static void acpi_restore_bm_rld(void)
acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld);
}
-static struct syscore_ops acpi_sleep_syscore_ops = {
+static const struct syscore_ops acpi_sleep_syscore_ops = {
.suspend = acpi_save_bm_rld,
.resume = acpi_restore_bm_rld,
};
+static struct syscore acpi_sleep_syscore = {
+ .ops = &acpi_sleep_syscore_ops,
+};
+
static void acpi_sleep_syscore_init(void)
{
- register_syscore_ops(&acpi_sleep_syscore_ops);
+ register_syscore(&acpi_sleep_syscore);
}
#else
static inline void acpi_sleep_syscore_init(void) {}
diff --git a/drivers/acpi/sleep.h b/drivers/acpi/sleep.h
index d960a238be4e..9c3cb109c5d2 100644
--- a/drivers/acpi/sleep.h
+++ b/drivers/acpi/sleep.h
@@ -17,10 +17,7 @@ static inline acpi_status acpi_set_waking_vector(u32 wakeup_address)
extern int acpi_s2idle_begin(void);
extern int acpi_s2idle_prepare(void);
-extern int acpi_s2idle_prepare_late(void);
-extern void acpi_s2idle_check(void);
extern bool acpi_s2idle_wake(void);
-extern void acpi_s2idle_restore_early(void);
extern void acpi_s2idle_restore(void);
extern void acpi_s2idle_end(void);
diff --git a/drivers/acpi/spcr.c b/drivers/acpi/spcr.c
index cd36a97b0ea2..73cb933fdc89 100644
--- a/drivers/acpi/spcr.c
+++ b/drivers/acpi/spcr.c
@@ -141,12 +141,23 @@ int __init acpi_parse_spcr(bool enable_earlycon, bool enable_console)
case ACPI_DBG2_16550_NVIDIA:
uart = "uart";
break;
+ case ACPI_DBG2_RISCV_SBI_CON:
+ uart = "sbi";
+ break;
default:
err = -ENOENT;
goto done;
}
- switch (table->baud_rate) {
+ /*
+ * SPCR 1.09 defines Precise Baud Rate Filed contains a specific
+ * non-zero baud rate which overrides the value of the Configured
+ * Baud Rate field. If this field is zero or not present, Configured
+ * Baud Rate is used.
+ */
+ if (table->header.revision >= 4 && table->precise_baudrate)
+ baud_rate = table->precise_baudrate;
+ else switch (table->baud_rate) {
case 0:
/*
* SPCR 1.04 defines 0 as a preconfigured state of UART.
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index a48ebbf768f9..e596224302f4 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -372,7 +372,7 @@ static int acpi_table_attr_init(struct kobject *tables_obj,
}
table_attr->attr.size = table_header->length;
- table_attr->attr.read_new = acpi_table_show;
+ table_attr->attr.read = acpi_table_show;
table_attr->attr.attr.name = table_attr->filename;
table_attr->attr.attr.mode = 0400;
@@ -495,7 +495,7 @@ static int acpi_table_data_init(struct acpi_table_header *th)
if (!data_attr)
return -ENOMEM;
sysfs_attr_init(&data_attr->attr.attr);
- data_attr->attr.read_new = acpi_data_show;
+ data_attr->attr.read = acpi_data_show;
data_attr->attr.attr.mode = 0400;
return acpi_data_objs[i].fn(th, data_attr);
}
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index fa9bb8c8ce95..4286e4af1092 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -408,7 +408,7 @@ static const char table_sigs[][ACPI_NAMESEG_SIZE] __nonstring_array __initconst
ACPI_SIG_PSDT, ACPI_SIG_RSDT, ACPI_SIG_XSDT, ACPI_SIG_SSDT,
ACPI_SIG_IORT, ACPI_SIG_NFIT, ACPI_SIG_HMAT, ACPI_SIG_PPTT,
ACPI_SIG_NHLT, ACPI_SIG_AEST, ACPI_SIG_CEDT, ACPI_SIG_AGDI,
- ACPI_SIG_NBFT };
+ ACPI_SIG_NBFT, ACPI_SIG_SWFT, ACPI_SIG_MPAM};
#define ACPI_HEADER_SIZE sizeof(struct acpi_table_header)
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 5c2defe55898..a511f9ea0267 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -924,7 +924,7 @@ static int acpi_thermal_suspend(struct device *dev)
static int acpi_thermal_resume(struct device *dev)
{
struct acpi_thermal *tz;
- int i, j, power_state;
+ int i, j;
if (!dev)
return -EINVAL;
@@ -939,10 +939,8 @@ static int acpi_thermal_resume(struct device *dev)
if (!acpi_thermal_trip_valid(acpi_trip))
break;
- for (j = 0; j < acpi_trip->devices.count; j++) {
- acpi_bus_update_power(acpi_trip->devices.handles[j],
- &power_state);
- }
+ for (j = 0; j < acpi_trip->devices.count; j++)
+ acpi_bus_update_power(acpi_trip->devices.handles[j], NULL);
}
acpi_queue_thermal_check(tz);
@@ -1062,7 +1060,8 @@ static int __init acpi_thermal_init(void)
}
acpi_thermal_pm_queue = alloc_workqueue("acpi_thermal_pm",
- WQ_HIGHPRI | WQ_MEM_RECLAIM, 0);
+ WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_PERCPU,
+ 0);
if (!acpi_thermal_pm_queue)
return -ENODEV;
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index d507d5e08435..4cf74f173c78 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -948,6 +948,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Mipad2"),
},
},
+ /* https://gitlab.freedesktop.org/drm/amd/-/issues/4512 */
+ {
+ .callback = video_detect_force_native,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "82K8"),
+ },
+ },
{ },
};
diff --git a/drivers/acpi/wakeup.c b/drivers/acpi/wakeup.c
index b02bf770aead..ff6dc957bc11 100644
--- a/drivers/acpi/wakeup.c
+++ b/drivers/acpi/wakeup.c
@@ -42,7 +42,7 @@ void acpi_enable_wakeup_devices(u8 sleep_state)
list_for_each_entry_safe(dev, tmp, &acpi_wakeup_device_list,
wakeup_list) {
if (!dev->wakeup.flags.valid
- || sleep_state > (u32) dev->wakeup.sleep_state
+ || sleep_state > dev->wakeup.sleep_state
|| !(device_may_wakeup(&dev->dev)
|| dev->wakeup.prepare_count))
continue;
@@ -67,7 +67,7 @@ void acpi_disable_wakeup_devices(u8 sleep_state)
list_for_each_entry_safe(dev, tmp, &acpi_wakeup_device_list,
wakeup_list) {
if (!dev->wakeup.flags.valid
- || sleep_state > (u32) dev->wakeup.sleep_state
+ || sleep_state > dev->wakeup.sleep_state
|| !(device_may_wakeup(&dev->dev)
|| dev->wakeup.prepare_count))
continue;
diff --git a/drivers/acpi/x86/lpss.c b/drivers/acpi/x86/lpss.c
index 258440b899a9..1dcb80ab0d23 100644
--- a/drivers/acpi/x86/lpss.c
+++ b/drivers/acpi/x86/lpss.c
@@ -181,7 +181,7 @@ static void byt_i2c_setup(struct lpss_private_data *pdata)
acpi_status status;
u64 uid;
- /* Expected to always be successfull, but better safe then sorry */
+ /* Expected to always be successful, but better safe then sorry */
if (!acpi_dev_uid_to_integer(pdata->adev, &uid) && uid) {
/* Detect I2C bus shared with PUNIT and ignore its d3 status */
status = acpi_evaluate_integer(handle, "_SEM", NULL, &shared_host);
@@ -387,9 +387,6 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = {
{ "INT3435", LPSS_ADDR(lpt_uart_dev_desc) },
{ "INT3436", LPSS_ADDR(lpt_sdio_dev_desc) },
- /* Wildcat Point LPSS devices */
- { "INT3438", LPSS_ADDR(lpt_spi_dev_desc) },
-
{ }
};
diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
index dd0b40b9bbe8..6d4d06236f61 100644
--- a/drivers/acpi/x86/s2idle.c
+++ b/drivers/acpi/x86/s2idle.c
@@ -299,34 +299,13 @@ free_acpi_buffer:
ACPI_FREE(out_obj);
}
-/**
- * acpi_get_lps0_constraint - Get the LPS0 constraint for a device.
- * @adev: Device to get the constraint for.
- *
- * The LPS0 constraint is the shallowest (minimum) power state in which the
- * device can be so as to allow the platform as a whole to achieve additional
- * energy conservation by utilizing a system-wide low-power state.
- *
- * Returns:
- * - ACPI power state value of the constraint for @adev on success.
- * - Otherwise, ACPI_STATE_UNKNOWN.
- */
-int acpi_get_lps0_constraint(struct acpi_device *adev)
-{
- struct lpi_constraints *entry;
-
- for_each_lpi_constraint(entry) {
- if (adev->handle == entry->handle)
- return entry->min_dstate;
- }
-
- return ACPI_STATE_UNKNOWN;
-}
-
static void lpi_check_constraints(void)
{
struct lpi_constraints *entry;
+ if (IS_ERR_OR_NULL(lpi_constraints_table))
+ return;
+
for_each_lpi_constraint(entry) {
struct acpi_device *adev = acpi_fetch_acpi_dev(entry->handle);
@@ -508,11 +487,6 @@ static int lps0_device_attach(struct acpi_device *adev,
lps0_device_handle = adev->handle;
- if (acpi_s2idle_vendor_amd())
- lpi_device_get_constraints_amd();
- else
- lpi_device_get_constraints();
-
/*
* Use suspend-to-idle by default if ACPI_FADT_LOW_POWER_S0 is set in
* the FADT and the default suspend mode was not set from the command
@@ -539,7 +513,26 @@ static struct acpi_scan_handler lps0_handler = {
.attach = lps0_device_attach,
};
-int acpi_s2idle_prepare_late(void)
+static int acpi_s2idle_begin_lps0(void)
+{
+ if (pm_debug_messages_on && !lpi_constraints_table) {
+ if (acpi_s2idle_vendor_amd())
+ lpi_device_get_constraints_amd();
+ else
+ lpi_device_get_constraints();
+
+ /*
+ * Try to retrieve the constraints only once because failures
+ * to do so usually are sticky.
+ */
+ if (!lpi_constraints_table)
+ lpi_constraints_table = ERR_PTR(-ENODATA);
+ }
+
+ return acpi_s2idle_begin();
+}
+
+static int acpi_s2idle_prepare_late_lps0(void)
{
struct acpi_s2idle_dev_ops *handler;
@@ -585,7 +578,7 @@ int acpi_s2idle_prepare_late(void)
return 0;
}
-void acpi_s2idle_check(void)
+static void acpi_s2idle_check_lps0(void)
{
struct acpi_s2idle_dev_ops *handler;
@@ -598,7 +591,7 @@ void acpi_s2idle_check(void)
}
}
-void acpi_s2idle_restore_early(void)
+static void acpi_s2idle_restore_early_lps0(void)
{
struct acpi_s2idle_dev_ops *handler;
@@ -636,12 +629,12 @@ void acpi_s2idle_restore_early(void)
}
static const struct platform_s2idle_ops acpi_s2idle_ops_lps0 = {
- .begin = acpi_s2idle_begin,
+ .begin = acpi_s2idle_begin_lps0,
.prepare = acpi_s2idle_prepare,
- .prepare_late = acpi_s2idle_prepare_late,
- .check = acpi_s2idle_check,
+ .prepare_late = acpi_s2idle_prepare_late_lps0,
+ .check = acpi_s2idle_check_lps0,
.wake = acpi_s2idle_wake,
- .restore_early = acpi_s2idle_restore_early,
+ .restore_early = acpi_s2idle_restore_early_lps0,
.restore = acpi_s2idle_restore,
.end = acpi_s2idle_end,
};
diff --git a/drivers/amba/Kconfig b/drivers/amba/Kconfig
index fb6c7e0b4cce..14bb61ff801e 100644
--- a/drivers/amba/Kconfig
+++ b/drivers/amba/Kconfig
@@ -5,7 +5,7 @@ config ARM_AMBA
if ARM_AMBA
config TEGRA_AHB
- bool
+ bool "Enable AHB driver for NVIDIA Tegra SoCs" if COMPILE_TEST
default y if ARCH_TEGRA
help
Adds AHB configuration functionality for NVIDIA Tegra SoCs,
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index 71482d639a6d..952c45ca6e48 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -138,7 +138,7 @@ static int amba_read_periphid(struct amba_device *dev)
void __iomem *tmp;
int i, ret;
- ret = dev_pm_domain_attach(&dev->dev, true);
+ ret = dev_pm_domain_attach(&dev->dev, PD_FLAG_ATTACH_POWER_ON);
if (ret) {
dev_dbg(&dev->dev, "can't get PM domain: %d\n", ret);
goto err_out;
@@ -291,15 +291,14 @@ static int amba_probe(struct device *dev)
if (ret < 0)
break;
- ret = dev_pm_domain_attach(dev, true);
+ ret = dev_pm_domain_attach(dev, PD_FLAG_ATTACH_POWER_ON |
+ PD_FLAG_DETACH_POWER_OFF);
if (ret)
break;
ret = amba_get_enable_pclk(pcdev);
- if (ret) {
- dev_pm_domain_detach(dev, true);
+ if (ret)
break;
- }
pm_runtime_get_noresume(dev);
pm_runtime_set_active(dev);
@@ -314,7 +313,6 @@ static int amba_probe(struct device *dev)
pm_runtime_put_noidle(dev);
amba_put_disable_pclk(pcdev);
- dev_pm_domain_detach(dev, true);
} while (0);
return ret;
@@ -336,7 +334,6 @@ static void amba_remove(struct device *dev)
pm_runtime_put_noidle(dev);
amba_put_disable_pclk(pcdev);
- dev_pm_domain_detach(dev, true);
}
static void amba_shutdown(struct device *dev)
diff --git a/drivers/amba/tegra-ahb.c b/drivers/amba/tegra-ahb.c
index c0e8b765522d..f23c3ed01810 100644
--- a/drivers/amba/tegra-ahb.c
+++ b/drivers/amba/tegra-ahb.c
@@ -144,6 +144,7 @@ int tegra_ahb_enable_smmu(struct device_node *dn)
if (!dev)
return -EPROBE_DEFER;
ahb = dev_get_drvdata(dev);
+ put_device(dev);
val = gizmo_readl(ahb, AHB_ARBITRATION_XBAR_CTRL);
val |= AHB_ARBITRATION_XBAR_CTRL_SMMU_INIT_DONE;
gizmo_writel(ahb, val, AHB_ARBITRATION_XBAR_CTRL);
diff --git a/drivers/android/Kconfig b/drivers/android/Kconfig
index 07aa8ae0a058..e2e402c9d175 100644
--- a/drivers/android/Kconfig
+++ b/drivers/android/Kconfig
@@ -4,6 +4,7 @@ menu "Android"
config ANDROID_BINDER_IPC
bool "Android Binder IPC Driver"
depends on MMU
+ depends on NET
default n
help
Binder is used in Android for both communication between processes,
@@ -13,6 +14,19 @@ config ANDROID_BINDER_IPC
Android process, using Binder to identify, invoke and pass arguments
between said processes.
+config ANDROID_BINDER_IPC_RUST
+ bool "Rust version of Android Binder IPC Driver"
+ depends on RUST && MMU && !ANDROID_BINDER_IPC
+ help
+ This enables the Rust implementation of the Binder driver.
+
+ Binder is used in Android for both communication between processes,
+ and remote method invocation.
+
+ This means one Android process can call a method/routine in another
+ Android process, using Binder to identify, invoke and pass arguments
+ between said processes.
+
config ANDROID_BINDERFS
bool "Android Binderfs filesystem"
depends on ANDROID_BINDER_IPC
@@ -27,7 +41,7 @@ config ANDROID_BINDERFS
config ANDROID_BINDER_DEVICES
string "Android Binder devices"
- depends on ANDROID_BINDER_IPC
+ depends on ANDROID_BINDER_IPC || ANDROID_BINDER_IPC_RUST
default "binder,hwbinder,vndbinder"
help
Default value for the binder.devices parameter.
@@ -37,14 +51,15 @@ config ANDROID_BINDER_DEVICES
created. Each binder device has its own context manager, and is
therefore logically separated from the other devices.
-config ANDROID_BINDER_IPC_SELFTEST
- bool "Android Binder IPC Driver Selftest"
- depends on ANDROID_BINDER_IPC
+config ANDROID_BINDER_ALLOC_KUNIT_TEST
+ tristate "KUnit Tests for Android Binder Alloc" if !KUNIT_ALL_TESTS
+ depends on ANDROID_BINDER_IPC && KUNIT
+ default KUNIT_ALL_TESTS
help
- This feature allows binder selftest to run.
+ This feature builds the binder alloc KUnit tests.
- Binder selftest checks the allocation and free of binder buffers
- exhaustively with combinations of various buffer sizes and
- alignments.
+ Each test case runs using a pared-down binder_alloc struct and
+ test-specific freelist, which allows this KUnit module to be loaded
+ for testing without interfering with a running system.
endmenu
diff --git a/drivers/android/Makefile b/drivers/android/Makefile
index c9d3d0c99c25..e0c650d3898e 100644
--- a/drivers/android/Makefile
+++ b/drivers/android/Makefile
@@ -2,5 +2,6 @@
ccflags-y += -I$(src) # needed for trace events
obj-$(CONFIG_ANDROID_BINDERFS) += binderfs.o
-obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o binder_alloc.o
-obj-$(CONFIG_ANDROID_BINDER_IPC_SELFTEST) += binder_alloc_selftest.o
+obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o binder_alloc.o binder_netlink.o
+obj-$(CONFIG_ANDROID_BINDER_ALLOC_KUNIT_TEST) += tests/
+obj-$(CONFIG_ANDROID_BINDER_IPC_RUST) += binder/
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index c463ca4a8fff..535fc881c8da 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -68,10 +68,13 @@
#include <linux/sizes.h>
#include <linux/ktime.h>
+#include <kunit/visibility.h>
+
#include <uapi/linux/android/binder.h>
#include <linux/cacheflush.h>
+#include "binder_netlink.h"
#include "binder_internal.h"
#include "binder_trace.h"
@@ -848,17 +851,8 @@ static int binder_inc_node_nilocked(struct binder_node *node, int strong,
} else {
if (!internal)
node->local_weak_refs++;
- if (!node->has_weak_ref && list_empty(&node->work.entry)) {
- if (target_list == NULL) {
- pr_err("invalid inc weak node for %d\n",
- node->debug_id);
- return -EINVAL;
- }
- /*
- * See comment above
- */
+ if (!node->has_weak_ref && target_list && list_empty(&node->work.entry))
binder_enqueue_work_ilocked(&node->work, target_list);
- }
}
return 0;
}
@@ -1585,11 +1579,10 @@ static struct binder_thread *binder_get_txn_from(
{
struct binder_thread *from;
- spin_lock(&t->lock);
+ guard(spinlock)(&t->lock);
from = t->from;
if (from)
atomic_inc(&from->tmp_ref);
- spin_unlock(&t->lock);
return from;
}
@@ -2416,10 +2409,10 @@ err_fd_not_accepted:
/**
* struct binder_ptr_fixup - data to be fixed-up in target buffer
- * @offset offset in target buffer to fixup
- * @skip_size bytes to skip in copy (fixup will be written later)
- * @fixup_data data to write at fixup offset
- * @node list node
+ * @offset: offset in target buffer to fixup
+ * @skip_size: bytes to skip in copy (fixup will be written later)
+ * @fixup_data: data to write at fixup offset
+ * @node: list node
*
* This is used for the pointer fixup list (pf) which is created and consumed
* during binder_transaction() and is only accessed locally. No
@@ -2436,10 +2429,10 @@ struct binder_ptr_fixup {
/**
* struct binder_sg_copy - scatter-gather data to be copied
- * @offset offset in target buffer
- * @sender_uaddr user address in source buffer
- * @length bytes to copy
- * @node list node
+ * @offset: offset in target buffer
+ * @sender_uaddr: user address in source buffer
+ * @length: bytes to copy
+ * @node: list node
*
* This is used for the sg copy list (sgc) which is created and consumed
* during binder_transaction() and is only accessed locally. No
@@ -2992,6 +2985,69 @@ static void binder_set_txn_from_error(struct binder_transaction *t, int id,
binder_thread_dec_tmpref(from);
}
+/**
+ * binder_netlink_report() - report a transaction failure via netlink
+ * @proc: the binder proc sending the transaction
+ * @t: the binder transaction that failed
+ * @data_size: the user provided data size for the transaction
+ * @error: enum binder_driver_return_protocol returned to sender
+ */
+static void binder_netlink_report(struct binder_proc *proc,
+ struct binder_transaction *t,
+ u32 data_size,
+ u32 error)
+{
+ const char *context = proc->context->name;
+ struct sk_buff *skb;
+ void *hdr;
+
+ if (!genl_has_listeners(&binder_nl_family, &init_net,
+ BINDER_NLGRP_REPORT))
+ return;
+
+ trace_binder_netlink_report(context, t, data_size, error);
+
+ skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!skb)
+ return;
+
+ hdr = genlmsg_put(skb, 0, 0, &binder_nl_family, 0, BINDER_CMD_REPORT);
+ if (!hdr)
+ goto free_skb;
+
+ if (nla_put_u32(skb, BINDER_A_REPORT_ERROR, error) ||
+ nla_put_string(skb, BINDER_A_REPORT_CONTEXT, context) ||
+ nla_put_u32(skb, BINDER_A_REPORT_FROM_PID, t->from_pid) ||
+ nla_put_u32(skb, BINDER_A_REPORT_FROM_TID, t->from_tid))
+ goto cancel_skb;
+
+ if (t->to_proc &&
+ nla_put_u32(skb, BINDER_A_REPORT_TO_PID, t->to_proc->pid))
+ goto cancel_skb;
+
+ if (t->to_thread &&
+ nla_put_u32(skb, BINDER_A_REPORT_TO_TID, t->to_thread->pid))
+ goto cancel_skb;
+
+ if (t->is_reply && nla_put_flag(skb, BINDER_A_REPORT_IS_REPLY))
+ goto cancel_skb;
+
+ if (nla_put_u32(skb, BINDER_A_REPORT_FLAGS, t->flags) ||
+ nla_put_u32(skb, BINDER_A_REPORT_CODE, t->code) ||
+ nla_put_u32(skb, BINDER_A_REPORT_DATA_SIZE, data_size))
+ goto cancel_skb;
+
+ genlmsg_end(skb, hdr);
+ genlmsg_multicast(&binder_nl_family, skb, 0, BINDER_NLGRP_REPORT,
+ GFP_KERNEL);
+ return;
+
+cancel_skb:
+ genlmsg_cancel(skb, hdr);
+free_skb:
+ nlmsg_free(skb);
+}
+
static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_transaction_data *tr, int reply,
@@ -3041,6 +3097,32 @@ static void binder_transaction(struct binder_proc *proc,
binder_set_extended_error(&thread->ee, t_debug_id, BR_OK, 0);
binder_inner_proc_unlock(proc);
+ t = kzalloc(sizeof(*t), GFP_KERNEL);
+ if (!t) {
+ binder_txn_error("%d:%d cannot allocate transaction\n",
+ thread->pid, proc->pid);
+ return_error = BR_FAILED_REPLY;
+ return_error_param = -ENOMEM;
+ return_error_line = __LINE__;
+ goto err_alloc_t_failed;
+ }
+ INIT_LIST_HEAD(&t->fd_fixups);
+ binder_stats_created(BINDER_STAT_TRANSACTION);
+ spin_lock_init(&t->lock);
+ t->debug_id = t_debug_id;
+ t->start_time = t_start_time;
+ t->from_pid = proc->pid;
+ t->from_tid = thread->pid;
+ t->sender_euid = task_euid(proc->tsk);
+ t->code = tr->code;
+ t->flags = tr->flags;
+ t->priority = task_nice(current);
+ t->work.type = BINDER_WORK_TRANSACTION;
+ t->is_async = !reply && (tr->flags & TF_ONE_WAY);
+ t->is_reply = reply;
+ if (!reply && !(tr->flags & TF_ONE_WAY))
+ t->from = thread;
+
if (reply) {
binder_inner_proc_lock(proc);
in_reply_to = thread->transaction_stack;
@@ -3144,10 +3226,8 @@ static void binder_transaction(struct binder_proc *proc,
}
if (!target_node) {
binder_txn_error("%d:%d cannot find target node\n",
- thread->pid, proc->pid);
- /*
- * return_error is set above
- */
+ proc->pid, thread->pid);
+ /* return_error is set above */
return_error_param = -EINVAL;
return_error_line = __LINE__;
goto err_dead_binder;
@@ -3229,24 +3309,13 @@ static void binder_transaction(struct binder_proc *proc,
}
binder_inner_proc_unlock(proc);
}
+
+ t->to_proc = target_proc;
+ t->to_thread = target_thread;
if (target_thread)
e->to_thread = target_thread->pid;
e->to_proc = target_proc->pid;
- /* TODO: reuse incoming transaction for reply */
- t = kzalloc(sizeof(*t), GFP_KERNEL);
- if (t == NULL) {
- binder_txn_error("%d:%d cannot allocate transaction\n",
- thread->pid, proc->pid);
- return_error = BR_FAILED_REPLY;
- return_error_param = -ENOMEM;
- return_error_line = __LINE__;
- goto err_alloc_t_failed;
- }
- INIT_LIST_HEAD(&t->fd_fixups);
- binder_stats_created(BINDER_STAT_TRANSACTION);
- spin_lock_init(&t->lock);
-
tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
if (tcomplete == NULL) {
binder_txn_error("%d:%d cannot allocate work for transaction\n",
@@ -3258,9 +3327,6 @@ static void binder_transaction(struct binder_proc *proc,
}
binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
- t->debug_id = t_debug_id;
- t->start_time = t_start_time;
-
if (reply)
binder_debug(BINDER_DEBUG_TRANSACTION,
"%d:%d BC_REPLY %d -> %d:%d, data size %lld-%lld-%lld\n",
@@ -3276,19 +3342,6 @@ static void binder_transaction(struct binder_proc *proc,
(u64)tr->data_size, (u64)tr->offsets_size,
(u64)extra_buffers_size);
- if (!reply && !(tr->flags & TF_ONE_WAY))
- t->from = thread;
- else
- t->from = NULL;
- t->from_pid = proc->pid;
- t->from_tid = thread->pid;
- t->sender_euid = task_euid(proc->tsk);
- t->to_proc = target_proc;
- t->to_thread = target_thread;
- t->code = tr->code;
- t->flags = tr->flags;
- t->priority = task_nice(current);
-
if (target_node && target_node->txn_security_ctx) {
u32 secid;
size_t added_size;
@@ -3681,11 +3734,13 @@ static void binder_transaction(struct binder_proc *proc,
return_error_line = __LINE__;
goto err_copy_data_failed;
}
- if (t->buffer->oneway_spam_suspect)
+ if (t->buffer->oneway_spam_suspect) {
tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT;
- else
+ binder_netlink_report(proc, t, tr->data_size,
+ BR_ONEWAY_SPAM_SUSPECT);
+ } else {
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
- t->work.type = BINDER_WORK_TRANSACTION;
+ }
if (reply) {
binder_enqueue_thread_work(thread, tcomplete);
@@ -3713,7 +3768,6 @@ static void binder_transaction(struct binder_proc *proc,
* the target replies (or there is an error).
*/
binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
- t->need_reply = 1;
t->from_parent = thread->transaction_stack;
thread->transaction_stack = t;
binder_inner_proc_unlock(proc);
@@ -3734,8 +3788,11 @@ static void binder_transaction(struct binder_proc *proc,
* process and is put in a pending queue, waiting for the target
* process to be unfrozen.
*/
- if (return_error == BR_TRANSACTION_PENDING_FROZEN)
+ if (return_error == BR_TRANSACTION_PENDING_FROZEN) {
tcomplete->type = BINDER_WORK_TRANSACTION_PENDING;
+ binder_netlink_report(proc, t, tr->data_size,
+ return_error);
+ }
binder_enqueue_thread_work(thread, tcomplete);
if (return_error &&
return_error != BR_TRANSACTION_PENDING_FROZEN)
@@ -3784,9 +3841,6 @@ err_get_secctx_failed:
err_alloc_tcomplete_failed:
if (trace_binder_txn_latency_free_enabled())
binder_txn_latency_free(t);
- kfree(t);
- binder_stats_deleted(BINDER_STAT_TRANSACTION);
-err_alloc_t_failed:
err_bad_todo_list:
err_bad_call_stack:
err_empty_call_stack:
@@ -3797,6 +3851,11 @@ err_invalid_target_handle:
binder_dec_node_tmpref(target_node);
}
+ binder_netlink_report(proc, t, tr->data_size, return_error);
+ kfree(t);
+ binder_stats_deleted(BINDER_STAT_TRANSACTION);
+err_alloc_t_failed:
+
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
"%d:%d transaction %s to %d:%d failed %d/%d/%d, code %u size %lld-%lld line %d\n",
proc->pid, thread->pid, reply ? "reply" :
@@ -3995,14 +4054,15 @@ binder_freeze_notification_done(struct binder_proc *proc,
/**
* binder_free_buf() - free the specified buffer
- * @proc: binder proc that owns buffer
- * @buffer: buffer to be freed
- * @is_failure: failed to send transaction
+ * @proc: binder proc that owns buffer
+ * @thread: binder thread performing the buffer release
+ * @buffer: buffer to be freed
+ * @is_failure: failed to send transaction
*
- * If buffer for an async transaction, enqueue the next async
+ * If the buffer is for an async transaction, enqueue the next async
* transaction from the node.
*
- * Cleanup buffer and free it.
+ * Cleanup the buffer and free it.
*/
static void
binder_free_buf(struct binder_proc *proc,
@@ -4609,6 +4669,8 @@ static int binder_wait_for_work(struct binder_thread *thread,
*
* If we fail to allocate an fd, skip the install and release
* any fds that have already been allocated.
+ *
+ * Return: 0 on success, a negative errno code on failure.
*/
static int binder_apply_fd_fixups(struct binder_proc *proc,
struct binder_transaction *t)
@@ -5384,10 +5446,9 @@ static int binder_ioctl_write_read(struct file *filp, unsigned long arg,
void __user *ubuf = (void __user *)arg;
struct binder_write_read bwr;
- if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
- ret = -EFAULT;
- goto out;
- }
+ if (copy_from_user(&bwr, ubuf, sizeof(bwr)))
+ return -EFAULT;
+
binder_debug(BINDER_DEBUG_READ_WRITE,
"%d:%d write %lld at %016llx, read %lld at %016llx\n",
proc->pid, thread->pid,
@@ -5402,8 +5463,6 @@ static int binder_ioctl_write_read(struct file *filp, unsigned long arg,
trace_binder_write_done(ret);
if (ret < 0) {
bwr.read_consumed = 0;
- if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
- ret = -EFAULT;
goto out;
}
}
@@ -5417,22 +5476,17 @@ static int binder_ioctl_write_read(struct file *filp, unsigned long arg,
if (!binder_worklist_empty_ilocked(&proc->todo))
binder_wakeup_proc_ilocked(proc);
binder_inner_proc_unlock(proc);
- if (ret < 0) {
- if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
- ret = -EFAULT;
+ if (ret < 0)
goto out;
- }
}
binder_debug(BINDER_DEBUG_READ_WRITE,
"%d:%d wrote %lld of %lld, read return %lld of %lld\n",
proc->pid, thread->pid,
(u64)bwr.write_consumed, (u64)bwr.write_size,
(u64)bwr.read_consumed, (u64)bwr.read_size);
- if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
- ret = -EFAULT;
- goto out;
- }
out:
+ if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
+ ret = -EFAULT;
return ret;
}
@@ -5445,32 +5499,28 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp,
struct binder_node *new_node;
kuid_t curr_euid = current_euid();
- mutex_lock(&context->context_mgr_node_lock);
+ guard(mutex)(&context->context_mgr_node_lock);
if (context->binder_context_mgr_node) {
pr_err("BINDER_SET_CONTEXT_MGR already set\n");
- ret = -EBUSY;
- goto out;
+ return -EBUSY;
}
ret = security_binder_set_context_mgr(proc->cred);
if (ret < 0)
- goto out;
+ return ret;
if (uid_valid(context->binder_context_mgr_uid)) {
if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
from_kuid(&init_user_ns, curr_euid),
from_kuid(&init_user_ns,
context->binder_context_mgr_uid));
- ret = -EPERM;
- goto out;
+ return -EPERM;
}
} else {
context->binder_context_mgr_uid = curr_euid;
}
new_node = binder_new_node(proc, fbo);
- if (!new_node) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!new_node)
+ return -ENOMEM;
binder_node_lock(new_node);
new_node->local_weak_refs++;
new_node->local_strong_refs++;
@@ -5479,8 +5529,6 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp,
context->binder_context_mgr_node = new_node;
binder_node_unlock(new_node);
binder_put_node(new_node);
-out:
- mutex_unlock(&context->context_mgr_node_lock);
return ret;
}
@@ -5716,11 +5764,6 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
struct binder_thread *thread;
void __user *ubuf = (void __user *)arg;
- /*pr_info("binder_ioctl: %d:%d %x %lx\n",
- proc->pid, current->pid, cmd, arg);*/
-
- binder_selftest_alloc(&proc->alloc);
-
trace_binder_ioctl(cmd, arg);
ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
@@ -5956,10 +5999,11 @@ static void binder_vma_close(struct vm_area_struct *vma)
binder_alloc_vma_close(&proc->alloc);
}
-static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
+VISIBLE_IF_KUNIT vm_fault_t binder_vm_fault(struct vm_fault *vmf)
{
return VM_FAULT_SIGBUS;
}
+EXPORT_SYMBOL_IF_KUNIT(binder_vm_fault);
static const struct vm_operations_struct binder_vm_ops = {
.open = binder_vma_open,
@@ -6128,7 +6172,7 @@ static int binder_release(struct inode *nodp, struct file *filp)
debugfs_remove(proc->debugfs_entry);
if (proc->binderfs_entry) {
- binderfs_remove_file(proc->binderfs_entry);
+ simple_recursive_removal(proc->binderfs_entry, NULL);
proc->binderfs_entry = NULL;
}
@@ -6322,14 +6366,13 @@ static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
static void
binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
{
- mutex_lock(&binder_deferred_lock);
+ guard(mutex)(&binder_deferred_lock);
proc->deferred_work |= defer;
if (hlist_unhashed(&proc->deferred_work_node)) {
hlist_add_head(&proc->deferred_work_node,
&binder_deferred_list);
schedule_work(&binder_deferred_work);
}
- mutex_unlock(&binder_deferred_lock);
}
static void print_binder_transaction_ilocked(struct seq_file *m,
@@ -6344,13 +6387,13 @@ static void print_binder_transaction_ilocked(struct seq_file *m,
spin_lock(&t->lock);
to_proc = t->to_proc;
seq_printf(m,
- "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d elapsed %lldms",
+ "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld a%d r%d elapsed %lldms",
prefix, t->debug_id, t,
t->from_pid,
t->from_tid,
to_proc ? to_proc->pid : 0,
t->to_thread ? t->to_thread->pid : 0,
- t->code, t->flags, t->priority, t->need_reply,
+ t->code, t->flags, t->priority, t->is_async, t->is_reply,
ktime_ms_delta(current_time, t->start_time));
spin_unlock(&t->lock);
@@ -6871,14 +6914,13 @@ static int proc_show(struct seq_file *m, void *unused)
struct binder_proc *itr;
int pid = (unsigned long)m->private;
- mutex_lock(&binder_procs_lock);
+ guard(mutex)(&binder_procs_lock);
hlist_for_each_entry(itr, &binder_procs, proc_node) {
if (itr->pid == pid) {
seq_puts(m, "binder proc state:\n");
print_binder_proc(m, itr, true, false);
}
}
- mutex_unlock(&binder_procs_lock);
return 0;
}
@@ -6996,16 +7038,14 @@ const struct binder_debugfs_entry binder_debugfs_entries[] = {
void binder_add_device(struct binder_device *device)
{
- spin_lock(&binder_devices_lock);
+ guard(spinlock)(&binder_devices_lock);
hlist_add_head(&device->hlist, &binder_devices);
- spin_unlock(&binder_devices_lock);
}
void binder_remove_device(struct binder_device *device)
{
- spin_lock(&binder_devices_lock);
+ guard(spinlock)(&binder_devices_lock);
hlist_del_init(&device->hlist);
- spin_unlock(&binder_devices_lock);
}
static int __init init_binder_device(const char *name)
@@ -7085,12 +7125,19 @@ static int __init binder_init(void)
}
}
- ret = init_binderfs();
+ ret = genl_register_family(&binder_nl_family);
if (ret)
goto err_init_binder_device_failed;
+ ret = init_binderfs();
+ if (ret)
+ goto err_init_binderfs_failed;
+
return ret;
+err_init_binderfs_failed:
+ genl_unregister_family(&binder_nl_family);
+
err_init_binder_device_failed:
hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
misc_deregister(&device->miscdev);
@@ -7111,5 +7158,3 @@ device_initcall(binder_init);
#define CREATE_TRACE_POINTS
#include "binder_trace.h"
-
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/android/binder/Makefile b/drivers/android/binder/Makefile
new file mode 100644
index 000000000000..09eabb527fa0
--- /dev/null
+++ b/drivers/android/binder/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+ccflags-y += -I$(src) # needed for trace events
+
+obj-$(CONFIG_ANDROID_BINDER_IPC_RUST) += rust_binder.o
+rust_binder-y := \
+ rust_binder_main.o \
+ rust_binderfs.o \
+ rust_binder_events.o \
+ page_range_helper.o
diff --git a/drivers/android/binder/allocation.rs b/drivers/android/binder/allocation.rs
new file mode 100644
index 000000000000..7f65a9c3a0e5
--- /dev/null
+++ b/drivers/android/binder/allocation.rs
@@ -0,0 +1,602 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+use core::mem::{size_of, size_of_val, MaybeUninit};
+use core::ops::Range;
+
+use kernel::{
+ bindings,
+ fs::file::{File, FileDescriptorReservation},
+ prelude::*,
+ sync::{aref::ARef, Arc},
+ transmute::{AsBytes, FromBytes},
+ uaccess::UserSliceReader,
+ uapi,
+};
+
+use crate::{
+ deferred_close::DeferredFdCloser,
+ defs::*,
+ node::{Node, NodeRef},
+ process::Process,
+ DArc,
+};
+
+#[derive(Default)]
+pub(crate) struct AllocationInfo {
+ /// Range within the allocation where we can find the offsets to the object descriptors.
+ pub(crate) offsets: Option<Range<usize>>,
+ /// The target node of the transaction this allocation is associated to.
+ /// Not set for replies.
+ pub(crate) target_node: Option<NodeRef>,
+ /// When this allocation is dropped, call `pending_oneway_finished` on the node.
+ ///
+ /// This is used to serialize oneway transaction on the same node. Binder guarantees that
+ /// oneway transactions to the same node are delivered sequentially in the order they are sent.
+ pub(crate) oneway_node: Option<DArc<Node>>,
+ /// Zero the data in the buffer on free.
+ pub(crate) clear_on_free: bool,
+ /// List of files embedded in this transaction.
+ file_list: FileList,
+}
+
+/// Represents an allocation that the kernel is currently using.
+///
+/// When allocations are idle, the range allocator holds the data related to them.
+///
+/// # Invariants
+///
+/// This allocation corresponds to an allocation in the range allocator, so the relevant pages are
+/// marked in use in the page range.
+pub(crate) struct Allocation {
+ pub(crate) offset: usize,
+ size: usize,
+ pub(crate) ptr: usize,
+ pub(crate) process: Arc<Process>,
+ allocation_info: Option<AllocationInfo>,
+ free_on_drop: bool,
+ pub(crate) oneway_spam_detected: bool,
+ #[allow(dead_code)]
+ pub(crate) debug_id: usize,
+}
+
+impl Allocation {
+ pub(crate) fn new(
+ process: Arc<Process>,
+ debug_id: usize,
+ offset: usize,
+ size: usize,
+ ptr: usize,
+ oneway_spam_detected: bool,
+ ) -> Self {
+ Self {
+ process,
+ offset,
+ size,
+ ptr,
+ debug_id,
+ oneway_spam_detected,
+ allocation_info: None,
+ free_on_drop: true,
+ }
+ }
+
+ fn size_check(&self, offset: usize, size: usize) -> Result {
+ let overflow_fail = offset.checked_add(size).is_none();
+ let cmp_size_fail = offset.wrapping_add(size) > self.size;
+ if overflow_fail || cmp_size_fail {
+ return Err(EFAULT);
+ }
+ Ok(())
+ }
+
+ pub(crate) fn copy_into(
+ &self,
+ reader: &mut UserSliceReader,
+ offset: usize,
+ size: usize,
+ ) -> Result {
+ self.size_check(offset, size)?;
+
+ // SAFETY: While this object exists, the range allocator will keep the range allocated, and
+ // in turn, the pages will be marked as in use.
+ unsafe {
+ self.process
+ .pages
+ .copy_from_user_slice(reader, self.offset + offset, size)
+ }
+ }
+
+ pub(crate) fn read<T: FromBytes>(&self, offset: usize) -> Result<T> {
+ self.size_check(offset, size_of::<T>())?;
+
+ // SAFETY: While this object exists, the range allocator will keep the range allocated, and
+ // in turn, the pages will be marked as in use.
+ unsafe { self.process.pages.read(self.offset + offset) }
+ }
+
+ pub(crate) fn write<T: ?Sized>(&self, offset: usize, obj: &T) -> Result {
+ self.size_check(offset, size_of_val::<T>(obj))?;
+
+ // SAFETY: While this object exists, the range allocator will keep the range allocated, and
+ // in turn, the pages will be marked as in use.
+ unsafe { self.process.pages.write(self.offset + offset, obj) }
+ }
+
+ pub(crate) fn fill_zero(&self) -> Result {
+ // SAFETY: While this object exists, the range allocator will keep the range allocated, and
+ // in turn, the pages will be marked as in use.
+ unsafe { self.process.pages.fill_zero(self.offset, self.size) }
+ }
+
+ pub(crate) fn keep_alive(mut self) {
+ self.process
+ .buffer_make_freeable(self.offset, self.allocation_info.take());
+ self.free_on_drop = false;
+ }
+
+ pub(crate) fn set_info(&mut self, info: AllocationInfo) {
+ self.allocation_info = Some(info);
+ }
+
+ pub(crate) fn get_or_init_info(&mut self) -> &mut AllocationInfo {
+ self.allocation_info.get_or_insert_with(Default::default)
+ }
+
+ pub(crate) fn set_info_offsets(&mut self, offsets: Range<usize>) {
+ self.get_or_init_info().offsets = Some(offsets);
+ }
+
+ pub(crate) fn set_info_oneway_node(&mut self, oneway_node: DArc<Node>) {
+ self.get_or_init_info().oneway_node = Some(oneway_node);
+ }
+
+ pub(crate) fn set_info_clear_on_drop(&mut self) {
+ self.get_or_init_info().clear_on_free = true;
+ }
+
+ pub(crate) fn set_info_target_node(&mut self, target_node: NodeRef) {
+ self.get_or_init_info().target_node = Some(target_node);
+ }
+
+ /// Reserve enough space to push at least `num_fds` fds.
+ pub(crate) fn info_add_fd_reserve(&mut self, num_fds: usize) -> Result {
+ self.get_or_init_info()
+ .file_list
+ .files_to_translate
+ .reserve(num_fds, GFP_KERNEL)?;
+
+ Ok(())
+ }
+
+ pub(crate) fn info_add_fd(
+ &mut self,
+ file: ARef<File>,
+ buffer_offset: usize,
+ close_on_free: bool,
+ ) -> Result {
+ self.get_or_init_info().file_list.files_to_translate.push(
+ FileEntry {
+ file,
+ buffer_offset,
+ close_on_free,
+ },
+ GFP_KERNEL,
+ )?;
+
+ Ok(())
+ }
+
+ pub(crate) fn set_info_close_on_free(&mut self, cof: FdsCloseOnFree) {
+ self.get_or_init_info().file_list.close_on_free = cof.0;
+ }
+
+ pub(crate) fn translate_fds(&mut self) -> Result<TranslatedFds> {
+ let file_list = match self.allocation_info.as_mut() {
+ Some(info) => &mut info.file_list,
+ None => return Ok(TranslatedFds::new()),
+ };
+
+ let files = core::mem::take(&mut file_list.files_to_translate);
+
+ let num_close_on_free = files.iter().filter(|entry| entry.close_on_free).count();
+ let mut close_on_free = KVec::with_capacity(num_close_on_free, GFP_KERNEL)?;
+
+ let mut reservations = KVec::with_capacity(files.len(), GFP_KERNEL)?;
+ for file_info in files {
+ let res = FileDescriptorReservation::get_unused_fd_flags(bindings::O_CLOEXEC)?;
+ let fd = res.reserved_fd();
+ self.write::<u32>(file_info.buffer_offset, &fd)?;
+
+ reservations.push(
+ Reservation {
+ res,
+ file: file_info.file,
+ },
+ GFP_KERNEL,
+ )?;
+ if file_info.close_on_free {
+ close_on_free.push(fd, GFP_KERNEL)?;
+ }
+ }
+
+ Ok(TranslatedFds {
+ reservations,
+ close_on_free: FdsCloseOnFree(close_on_free),
+ })
+ }
+
+ /// Should the looper return to userspace when freeing this allocation?
+ pub(crate) fn looper_need_return_on_free(&self) -> bool {
+ // Closing fds involves pushing task_work for execution when we return to userspace. Hence,
+ // we should return to userspace asap if we are closing fds.
+ match self.allocation_info {
+ Some(ref info) => !info.file_list.close_on_free.is_empty(),
+ None => false,
+ }
+ }
+}
+
+impl Drop for Allocation {
+ fn drop(&mut self) {
+ if !self.free_on_drop {
+ return;
+ }
+
+ if let Some(mut info) = self.allocation_info.take() {
+ if let Some(oneway_node) = info.oneway_node.as_ref() {
+ oneway_node.pending_oneway_finished();
+ }
+
+ info.target_node = None;
+
+ if let Some(offsets) = info.offsets.clone() {
+ let view = AllocationView::new(self, offsets.start);
+ for i in offsets.step_by(size_of::<usize>()) {
+ if view.cleanup_object(i).is_err() {
+ pr_warn!("Error cleaning up object at offset {}\n", i)
+ }
+ }
+ }
+
+ for &fd in &info.file_list.close_on_free {
+ let closer = match DeferredFdCloser::new(GFP_KERNEL) {
+ Ok(closer) => closer,
+ Err(kernel::alloc::AllocError) => {
+ // Ignore allocation failures.
+ break;
+ }
+ };
+
+ // Here, we ignore errors. The operation can fail if the fd is not valid, or if the
+ // method is called from a kthread. However, this is always called from a syscall,
+ // so the latter case cannot happen, and we don't care about the first case.
+ let _ = closer.close_fd(fd);
+ }
+
+ if info.clear_on_free {
+ if let Err(e) = self.fill_zero() {
+ pr_warn!("Failed to clear data on free: {:?}", e);
+ }
+ }
+ }
+
+ self.process.buffer_raw_free(self.ptr);
+ }
+}
+
+/// A wrapper around `Allocation` that is being created.
+///
+/// If the allocation is destroyed while wrapped in this wrapper, then the allocation will be
+/// considered to be part of a failed transaction. Successful transactions avoid that by calling
+/// `success`, which skips the destructor.
+#[repr(transparent)]
+pub(crate) struct NewAllocation(pub(crate) Allocation);
+
+impl NewAllocation {
+ pub(crate) fn success(self) -> Allocation {
+ // This skips the destructor.
+ //
+ // SAFETY: This type is `#[repr(transparent)]`, so the layout matches.
+ unsafe { core::mem::transmute(self) }
+ }
+}
+
+impl core::ops::Deref for NewAllocation {
+ type Target = Allocation;
+ fn deref(&self) -> &Allocation {
+ &self.0
+ }
+}
+
+impl core::ops::DerefMut for NewAllocation {
+ fn deref_mut(&mut self) -> &mut Allocation {
+ &mut self.0
+ }
+}
+
+/// A view into the beginning of an allocation.
+///
+/// All attempts to read or write outside of the view will fail. To intentionally access outside of
+/// this view, use the `alloc` field of this struct directly.
+pub(crate) struct AllocationView<'a> {
+ pub(crate) alloc: &'a mut Allocation,
+ limit: usize,
+}
+
+impl<'a> AllocationView<'a> {
+ pub(crate) fn new(alloc: &'a mut Allocation, limit: usize) -> Self {
+ AllocationView { alloc, limit }
+ }
+
+ pub(crate) fn read<T: FromBytes>(&self, offset: usize) -> Result<T> {
+ if offset.checked_add(size_of::<T>()).ok_or(EINVAL)? > self.limit {
+ return Err(EINVAL);
+ }
+ self.alloc.read(offset)
+ }
+
+ pub(crate) fn write<T: AsBytes>(&self, offset: usize, obj: &T) -> Result {
+ if offset.checked_add(size_of::<T>()).ok_or(EINVAL)? > self.limit {
+ return Err(EINVAL);
+ }
+ self.alloc.write(offset, obj)
+ }
+
+ pub(crate) fn copy_into(
+ &self,
+ reader: &mut UserSliceReader,
+ offset: usize,
+ size: usize,
+ ) -> Result {
+ if offset.checked_add(size).ok_or(EINVAL)? > self.limit {
+ return Err(EINVAL);
+ }
+ self.alloc.copy_into(reader, offset, size)
+ }
+
+ pub(crate) fn transfer_binder_object(
+ &self,
+ offset: usize,
+ obj: &uapi::flat_binder_object,
+ strong: bool,
+ node_ref: NodeRef,
+ ) -> Result {
+ let mut newobj = FlatBinderObject::default();
+ let node = node_ref.node.clone();
+ if Arc::ptr_eq(&node_ref.node.owner, &self.alloc.process) {
+ // The receiving process is the owner of the node, so send it a binder object (instead
+ // of a handle).
+ let (ptr, cookie) = node.get_id();
+ newobj.hdr.type_ = if strong {
+ BINDER_TYPE_BINDER
+ } else {
+ BINDER_TYPE_WEAK_BINDER
+ };
+ newobj.flags = obj.flags;
+ newobj.__bindgen_anon_1.binder = ptr as _;
+ newobj.cookie = cookie as _;
+ self.write(offset, &newobj)?;
+ // Increment the user ref count on the node. It will be decremented as part of the
+ // destruction of the buffer, when we see a binder or weak-binder object.
+ node.update_refcount(true, 1, strong);
+ } else {
+ // The receiving process is different from the owner, so we need to insert a handle to
+ // the binder object.
+ let handle = self
+ .alloc
+ .process
+ .as_arc_borrow()
+ .insert_or_update_handle(node_ref, false)?;
+ newobj.hdr.type_ = if strong {
+ BINDER_TYPE_HANDLE
+ } else {
+ BINDER_TYPE_WEAK_HANDLE
+ };
+ newobj.flags = obj.flags;
+ newobj.__bindgen_anon_1.handle = handle;
+ if self.write(offset, &newobj).is_err() {
+ // Decrement ref count on the handle we just created.
+ let _ = self
+ .alloc
+ .process
+ .as_arc_borrow()
+ .update_ref(handle, false, strong);
+ return Err(EINVAL);
+ }
+ }
+
+ Ok(())
+ }
+
+ fn cleanup_object(&self, index_offset: usize) -> Result {
+ let offset = self.alloc.read(index_offset)?;
+ let header = self.read::<BinderObjectHeader>(offset)?;
+ match header.type_ {
+ BINDER_TYPE_WEAK_BINDER | BINDER_TYPE_BINDER => {
+ let obj = self.read::<FlatBinderObject>(offset)?;
+ let strong = header.type_ == BINDER_TYPE_BINDER;
+ // SAFETY: The type is `BINDER_TYPE_{WEAK_}BINDER`, so the `binder` field is
+ // populated.
+ let ptr = unsafe { obj.__bindgen_anon_1.binder };
+ let cookie = obj.cookie;
+ self.alloc.process.update_node(ptr, cookie, strong);
+ Ok(())
+ }
+ BINDER_TYPE_WEAK_HANDLE | BINDER_TYPE_HANDLE => {
+ let obj = self.read::<FlatBinderObject>(offset)?;
+ let strong = header.type_ == BINDER_TYPE_HANDLE;
+ // SAFETY: The type is `BINDER_TYPE_{WEAK_}HANDLE`, so the `handle` field is
+ // populated.
+ let handle = unsafe { obj.__bindgen_anon_1.handle };
+ self.alloc
+ .process
+ .as_arc_borrow()
+ .update_ref(handle, false, strong)
+ }
+ _ => Ok(()),
+ }
+ }
+}
+
+/// A binder object as it is serialized.
+///
+/// # Invariants
+///
+/// All bytes must be initialized, and the value of `self.hdr.type_` must be one of the allowed
+/// types.
+#[repr(C)]
+pub(crate) union BinderObject {
+ hdr: uapi::binder_object_header,
+ fbo: uapi::flat_binder_object,
+ fdo: uapi::binder_fd_object,
+ bbo: uapi::binder_buffer_object,
+ fdao: uapi::binder_fd_array_object,
+}
+
+/// A view into a `BinderObject` that can be used in a match statement.
+pub(crate) enum BinderObjectRef<'a> {
+ Binder(&'a mut uapi::flat_binder_object),
+ Handle(&'a mut uapi::flat_binder_object),
+ Fd(&'a mut uapi::binder_fd_object),
+ Ptr(&'a mut uapi::binder_buffer_object),
+ Fda(&'a mut uapi::binder_fd_array_object),
+}
+
+impl BinderObject {
+ pub(crate) fn read_from(reader: &mut UserSliceReader) -> Result<BinderObject> {
+ let object = Self::read_from_inner(|slice| {
+ let read_len = usize::min(slice.len(), reader.len());
+ reader.clone_reader().read_slice(&mut slice[..read_len])?;
+ Ok(())
+ })?;
+
+ // If we used a object type smaller than the largest object size, then we've read more
+ // bytes than we needed to. However, we used `.clone_reader()` to avoid advancing the
+ // original reader. Now, we call `skip` so that the caller's reader is advanced by the
+ // right amount.
+ //
+ // The `skip` call fails if the reader doesn't have `size` bytes available. This could
+ // happen if the type header corresponds to an object type that is larger than the rest of
+ // the reader.
+ //
+ // Any extra bytes beyond the size of the object are inaccessible after this call, so
+ // reading them again from the `reader` later does not result in TOCTOU bugs.
+ reader.skip(object.size())?;
+
+ Ok(object)
+ }
+
+ /// Use the provided reader closure to construct a `BinderObject`.
+ ///
+ /// The closure should write the bytes for the object into the provided slice.
+ pub(crate) fn read_from_inner<R>(reader: R) -> Result<BinderObject>
+ where
+ R: FnOnce(&mut [u8; size_of::<BinderObject>()]) -> Result<()>,
+ {
+ let mut obj = MaybeUninit::<BinderObject>::zeroed();
+
+ // SAFETY: The lengths of `BinderObject` and `[u8; size_of::<BinderObject>()]` are equal,
+ // and the byte array has an alignment requirement of one, so the pointer cast is okay.
+ // Additionally, `obj` was initialized to zeros, so the byte array will not be
+ // uninitialized.
+ (reader)(unsafe { &mut *obj.as_mut_ptr().cast() })?;
+
+ // SAFETY: The entire object is initialized, so accessing this field is safe.
+ let type_ = unsafe { obj.assume_init_ref().hdr.type_ };
+ if Self::type_to_size(type_).is_none() {
+ // The value of `obj.hdr_type_` was invalid.
+ return Err(EINVAL);
+ }
+
+ // SAFETY: All bytes are initialized (since we zeroed them at the start) and we checked
+ // that `self.hdr.type_` is one of the allowed types, so the type invariants are satisfied.
+ unsafe { Ok(obj.assume_init()) }
+ }
+
+ pub(crate) fn as_ref(&mut self) -> BinderObjectRef<'_> {
+ use BinderObjectRef::*;
+ // SAFETY: The constructor ensures that all bytes of `self` are initialized, and all
+ // variants of this union accept all initialized bit patterns.
+ unsafe {
+ match self.hdr.type_ {
+ BINDER_TYPE_WEAK_BINDER | BINDER_TYPE_BINDER => Binder(&mut self.fbo),
+ BINDER_TYPE_WEAK_HANDLE | BINDER_TYPE_HANDLE => Handle(&mut self.fbo),
+ BINDER_TYPE_FD => Fd(&mut self.fdo),
+ BINDER_TYPE_PTR => Ptr(&mut self.bbo),
+ BINDER_TYPE_FDA => Fda(&mut self.fdao),
+ // SAFETY: By the type invariant, the value of `self.hdr.type_` cannot have any
+ // other value than the ones checked above.
+ _ => core::hint::unreachable_unchecked(),
+ }
+ }
+ }
+
+ pub(crate) fn size(&self) -> usize {
+ // SAFETY: The entire object is initialized, so accessing this field is safe.
+ let type_ = unsafe { self.hdr.type_ };
+
+ // SAFETY: The type invariants guarantee that the type field is correct.
+ unsafe { Self::type_to_size(type_).unwrap_unchecked() }
+ }
+
+ fn type_to_size(type_: u32) -> Option<usize> {
+ match type_ {
+ BINDER_TYPE_WEAK_BINDER => Some(size_of::<uapi::flat_binder_object>()),
+ BINDER_TYPE_BINDER => Some(size_of::<uapi::flat_binder_object>()),
+ BINDER_TYPE_WEAK_HANDLE => Some(size_of::<uapi::flat_binder_object>()),
+ BINDER_TYPE_HANDLE => Some(size_of::<uapi::flat_binder_object>()),
+ BINDER_TYPE_FD => Some(size_of::<uapi::binder_fd_object>()),
+ BINDER_TYPE_PTR => Some(size_of::<uapi::binder_buffer_object>()),
+ BINDER_TYPE_FDA => Some(size_of::<uapi::binder_fd_array_object>()),
+ _ => None,
+ }
+ }
+}
+
+#[derive(Default)]
+struct FileList {
+ files_to_translate: KVec<FileEntry>,
+ close_on_free: KVec<u32>,
+}
+
+struct FileEntry {
+ /// The file for which a descriptor will be created in the recipient process.
+ file: ARef<File>,
+ /// The offset in the buffer where the file descriptor is stored.
+ buffer_offset: usize,
+ /// Whether this fd should be closed when the allocation is freed.
+ close_on_free: bool,
+}
+
+pub(crate) struct TranslatedFds {
+ reservations: KVec<Reservation>,
+ /// If commit is called, then these fds should be closed. (If commit is not called, then they
+ /// shouldn't be closed.)
+ close_on_free: FdsCloseOnFree,
+}
+
+struct Reservation {
+ res: FileDescriptorReservation,
+ file: ARef<File>,
+}
+
+impl TranslatedFds {
+ pub(crate) fn new() -> Self {
+ Self {
+ reservations: KVec::new(),
+ close_on_free: FdsCloseOnFree(KVec::new()),
+ }
+ }
+
+ pub(crate) fn commit(self) -> FdsCloseOnFree {
+ for entry in self.reservations {
+ entry.res.fd_install(entry.file);
+ }
+
+ self.close_on_free
+ }
+}
+
+pub(crate) struct FdsCloseOnFree(KVec<u32>);
diff --git a/drivers/android/binder/context.rs b/drivers/android/binder/context.rs
new file mode 100644
index 000000000000..3d135ec03ca7
--- /dev/null
+++ b/drivers/android/binder/context.rs
@@ -0,0 +1,180 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+use kernel::{
+ error::Error,
+ list::{List, ListArc, ListLinks},
+ prelude::*,
+ security,
+ str::{CStr, CString},
+ sync::{Arc, Mutex},
+ task::Kuid,
+};
+
+use crate::{error::BinderError, node::NodeRef, process::Process};
+
+kernel::sync::global_lock! {
+ // SAFETY: We call `init` in the module initializer, so it's initialized before first use.
+ pub(crate) unsafe(uninit) static CONTEXTS: Mutex<ContextList> = ContextList {
+ list: List::new(),
+ };
+}
+
+pub(crate) struct ContextList {
+ list: List<Context>,
+}
+
+pub(crate) fn get_all_contexts() -> Result<KVec<Arc<Context>>> {
+ let lock = CONTEXTS.lock();
+
+ let count = lock.list.iter().count();
+
+ let mut ctxs = KVec::with_capacity(count, GFP_KERNEL)?;
+ for ctx in &lock.list {
+ ctxs.push(Arc::from(ctx), GFP_KERNEL)?;
+ }
+ Ok(ctxs)
+}
+
+/// This struct keeps track of the processes using this context, and which process is the context
+/// manager.
+struct Manager {
+ node: Option<NodeRef>,
+ uid: Option<Kuid>,
+ all_procs: List<Process>,
+}
+
+/// There is one context per binder file (/dev/binder, /dev/hwbinder, etc)
+#[pin_data]
+pub(crate) struct Context {
+ #[pin]
+ manager: Mutex<Manager>,
+ pub(crate) name: CString,
+ #[pin]
+ links: ListLinks,
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<0> for Context { untracked; }
+}
+kernel::list::impl_list_item! {
+ impl ListItem<0> for Context {
+ using ListLinks { self.links };
+ }
+}
+
+impl Context {
+ pub(crate) fn new(name: &CStr) -> Result<Arc<Self>> {
+ let name = CString::try_from(name)?;
+ let list_ctx = ListArc::pin_init::<Error>(
+ try_pin_init!(Context {
+ name,
+ links <- ListLinks::new(),
+ manager <- kernel::new_mutex!(Manager {
+ all_procs: List::new(),
+ node: None,
+ uid: None,
+ }, "Context::manager"),
+ }),
+ GFP_KERNEL,
+ )?;
+
+ let ctx = list_ctx.clone_arc();
+ CONTEXTS.lock().list.push_back(list_ctx);
+
+ Ok(ctx)
+ }
+
+ /// Called when the file for this context is unlinked.
+ ///
+ /// No-op if called twice.
+ pub(crate) fn deregister(&self) {
+ // SAFETY: We never add the context to any other linked list than this one, so it is either
+ // in this list, or not in any list.
+ unsafe { CONTEXTS.lock().list.remove(self) };
+ }
+
+ pub(crate) fn register_process(self: &Arc<Self>, proc: ListArc<Process>) {
+ if !Arc::ptr_eq(self, &proc.ctx) {
+ pr_err!("Context::register_process called on the wrong context.");
+ return;
+ }
+ self.manager.lock().all_procs.push_back(proc);
+ }
+
+ pub(crate) fn deregister_process(self: &Arc<Self>, proc: &Process) {
+ if !Arc::ptr_eq(self, &proc.ctx) {
+ pr_err!("Context::deregister_process called on the wrong context.");
+ return;
+ }
+ // SAFETY: We just checked that this is the right list.
+ unsafe { self.manager.lock().all_procs.remove(proc) };
+ }
+
+ pub(crate) fn set_manager_node(&self, node_ref: NodeRef) -> Result {
+ let mut manager = self.manager.lock();
+ if manager.node.is_some() {
+ pr_warn!("BINDER_SET_CONTEXT_MGR already set");
+ return Err(EBUSY);
+ }
+ security::binder_set_context_mgr(&node_ref.node.owner.cred)?;
+
+ // If the context manager has been set before, ensure that we use the same euid.
+ let caller_uid = Kuid::current_euid();
+ if let Some(ref uid) = manager.uid {
+ if *uid != caller_uid {
+ return Err(EPERM);
+ }
+ }
+
+ manager.node = Some(node_ref);
+ manager.uid = Some(caller_uid);
+ Ok(())
+ }
+
+ pub(crate) fn unset_manager_node(&self) {
+ let node_ref = self.manager.lock().node.take();
+ drop(node_ref);
+ }
+
+ pub(crate) fn get_manager_node(&self, strong: bool) -> Result<NodeRef, BinderError> {
+ self.manager
+ .lock()
+ .node
+ .as_ref()
+ .ok_or_else(BinderError::new_dead)?
+ .clone(strong)
+ .map_err(BinderError::from)
+ }
+
+ pub(crate) fn for_each_proc<F>(&self, mut func: F)
+ where
+ F: FnMut(&Process),
+ {
+ let lock = self.manager.lock();
+ for proc in &lock.all_procs {
+ func(&proc);
+ }
+ }
+
+ pub(crate) fn get_all_procs(&self) -> Result<KVec<Arc<Process>>> {
+ let lock = self.manager.lock();
+ let count = lock.all_procs.iter().count();
+
+ let mut procs = KVec::with_capacity(count, GFP_KERNEL)?;
+ for proc in &lock.all_procs {
+ procs.push(Arc::from(proc), GFP_KERNEL)?;
+ }
+ Ok(procs)
+ }
+
+ pub(crate) fn get_procs_with_pid(&self, pid: i32) -> Result<KVec<Arc<Process>>> {
+ let orig = self.get_all_procs()?;
+ let mut backing = KVec::with_capacity(orig.len(), GFP_KERNEL)?;
+ for proc in orig.into_iter().filter(|proc| proc.task.pid() == pid) {
+ backing.push(proc, GFP_KERNEL)?;
+ }
+ Ok(backing)
+ }
+}
diff --git a/drivers/android/binder/deferred_close.rs b/drivers/android/binder/deferred_close.rs
new file mode 100644
index 000000000000..ac895c04d0cb
--- /dev/null
+++ b/drivers/android/binder/deferred_close.rs
@@ -0,0 +1,204 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+//! Logic for closing files in a deferred manner.
+//!
+//! This file could make sense to have in `kernel::fs`, but it was rejected for being too
+//! Binder-specific.
+
+use core::mem::MaybeUninit;
+use kernel::{
+ alloc::{AllocError, Flags},
+ bindings,
+ prelude::*,
+};
+
+/// Helper used for closing file descriptors in a way that is safe even if the file is currently
+/// held using `fdget`.
+///
+/// Additional motivation can be found in commit 80cd795630d6 ("binder: fix use-after-free due to
+/// ksys_close() during fdget()") and in the comments on `binder_do_fd_close`.
+pub(crate) struct DeferredFdCloser {
+ inner: KBox<DeferredFdCloserInner>,
+}
+
+/// SAFETY: This just holds an allocation with no real content, so there's no safety issue with
+/// moving it across threads.
+unsafe impl Send for DeferredFdCloser {}
+/// SAFETY: This just holds an allocation with no real content, so there's no safety issue with
+/// moving it across threads.
+unsafe impl Sync for DeferredFdCloser {}
+
+/// # Invariants
+///
+/// If the `file` pointer is non-null, then it points at a `struct file` and owns a refcount to
+/// that file.
+#[repr(C)]
+struct DeferredFdCloserInner {
+ twork: MaybeUninit<bindings::callback_head>,
+ file: *mut bindings::file,
+}
+
+impl DeferredFdCloser {
+ /// Create a new [`DeferredFdCloser`].
+ pub(crate) fn new(flags: Flags) -> Result<Self, AllocError> {
+ Ok(Self {
+ // INVARIANT: The `file` pointer is null, so the type invariant does not apply.
+ inner: KBox::new(
+ DeferredFdCloserInner {
+ twork: MaybeUninit::uninit(),
+ file: core::ptr::null_mut(),
+ },
+ flags,
+ )?,
+ })
+ }
+
+ /// Schedule a task work that closes the file descriptor when this task returns to userspace.
+ ///
+ /// Fails if this is called from a context where we cannot run work when returning to
+ /// userspace. (E.g., from a kthread.)
+ pub(crate) fn close_fd(self, fd: u32) -> Result<(), DeferredFdCloseError> {
+ use bindings::task_work_notify_mode_TWA_RESUME as TWA_RESUME;
+
+ // In this method, we schedule the task work before closing the file. This is because
+ // scheduling a task work is fallible, and we need to know whether it will fail before we
+ // attempt to close the file.
+
+ // Task works are not available on kthreads.
+ let current = kernel::current!();
+
+ // Check if this is a kthread.
+ // SAFETY: Reading `flags` from a task is always okay.
+ if unsafe { ((*current.as_ptr()).flags & bindings::PF_KTHREAD) != 0 } {
+ return Err(DeferredFdCloseError::TaskWorkUnavailable);
+ }
+
+ // Transfer ownership of the box's allocation to a raw pointer. This disables the
+ // destructor, so we must manually convert it back to a KBox to drop it.
+ //
+ // Until we convert it back to a `KBox`, there are no aliasing requirements on this
+ // pointer.
+ let inner = KBox::into_raw(self.inner);
+
+ // The `callback_head` field is first in the struct, so this cast correctly gives us a
+ // pointer to the field.
+ let callback_head = inner.cast::<bindings::callback_head>();
+ // SAFETY: This pointer offset operation does not go out-of-bounds.
+ let file_field = unsafe { core::ptr::addr_of_mut!((*inner).file) };
+
+ let current = current.as_ptr();
+
+ // SAFETY: This function currently has exclusive access to the `DeferredFdCloserInner`, so
+ // it is okay for us to perform unsynchronized writes to its `callback_head` field.
+ unsafe { bindings::init_task_work(callback_head, Some(Self::do_close_fd)) };
+
+ // SAFETY: This inserts the `DeferredFdCloserInner` into the task workqueue for the current
+ // task. If this operation is successful, then this transfers exclusive ownership of the
+ // `callback_head` field to the C side until it calls `do_close_fd`, and we don't touch or
+ // invalidate the field during that time.
+ //
+ // When the C side calls `do_close_fd`, the safety requirements of that method are
+ // satisfied because when a task work is executed, the callback is given ownership of the
+ // pointer.
+ //
+ // The file pointer is currently null. If it is changed to be non-null before `do_close_fd`
+ // is called, then that change happens due to the write at the end of this function, and
+ // that write has a safety comment that explains why the refcount can be dropped when
+ // `do_close_fd` runs.
+ let res = unsafe { bindings::task_work_add(current, callback_head, TWA_RESUME) };
+
+ if res != 0 {
+ // SAFETY: Scheduling the task work failed, so we still have ownership of the box, so
+ // we may destroy it.
+ unsafe { drop(KBox::from_raw(inner)) };
+
+ return Err(DeferredFdCloseError::TaskWorkUnavailable);
+ }
+
+ // This removes the fd from the fd table in `current`. The file is not fully closed until
+ // `filp_close` is called. We are given ownership of one refcount to the file.
+ //
+ // SAFETY: This is safe no matter what `fd` is. If the `fd` is valid (that is, if the
+ // pointer is non-null), then we call `filp_close` on the returned pointer as required by
+ // `file_close_fd`.
+ let file = unsafe { bindings::file_close_fd(fd) };
+ if file.is_null() {
+ // We don't clean up the task work since that might be expensive if the task work queue
+ // is long. Just let it execute and let it clean up for itself.
+ return Err(DeferredFdCloseError::BadFd);
+ }
+
+ // Acquire a second refcount to the file.
+ //
+ // SAFETY: The `file` pointer points at a file with a non-zero refcount.
+ unsafe { bindings::get_file(file) };
+
+ // This method closes the fd, consuming one of our two refcounts. There could be active
+ // light refcounts created from that fd, so we must ensure that the file has a positive
+ // refcount for the duration of those active light refcounts. We do that by holding on to
+ // the second refcount until the current task returns to userspace.
+ //
+ // SAFETY: The `file` pointer is valid. Passing `current->files` as the file table to close
+ // it in is correct, since we just got the `fd` from `file_close_fd` which also uses
+ // `current->files`.
+ //
+ // Note: fl_owner_t is currently a void pointer.
+ unsafe { bindings::filp_close(file, (*current).files as bindings::fl_owner_t) };
+
+ // We update the file pointer that the task work is supposed to fput. This transfers
+ // ownership of our last refcount.
+ //
+ // INVARIANT: This changes the `file` field of a `DeferredFdCloserInner` from null to
+ // non-null. This doesn't break the type invariant for `DeferredFdCloserInner` because we
+ // still own a refcount to the file, so we can pass ownership of that refcount to the
+ // `DeferredFdCloserInner`.
+ //
+ // When `do_close_fd` runs, it must be safe for it to `fput` the refcount. However, this is
+ // the case because all light refcounts that are associated with the fd we closed
+ // previously must be dropped when `do_close_fd`, since light refcounts must be dropped
+ // before returning to userspace.
+ //
+ // SAFETY: Task works are executed on the current thread right before we return to
+ // userspace, so this write is guaranteed to happen before `do_close_fd` is called, which
+ // means that a race is not possible here.
+ unsafe { *file_field = file };
+
+ Ok(())
+ }
+
+ /// # Safety
+ ///
+ /// The provided pointer must point at the `twork` field of a `DeferredFdCloserInner` stored in
+ /// a `KBox`, and the caller must pass exclusive ownership of that `KBox`. Furthermore, if the
+ /// file pointer is non-null, then it must be okay to release the refcount by calling `fput`.
+ unsafe extern "C" fn do_close_fd(inner: *mut bindings::callback_head) {
+ // SAFETY: The caller just passed us ownership of this box.
+ let inner = unsafe { KBox::from_raw(inner.cast::<DeferredFdCloserInner>()) };
+ if !inner.file.is_null() {
+ // SAFETY: By the type invariants, we own a refcount to this file, and the caller
+ // guarantees that dropping the refcount now is okay.
+ unsafe { bindings::fput(inner.file) };
+ }
+ // The allocation is freed when `inner` goes out of scope.
+ }
+}
+
+/// Represents a failure to close an fd in a deferred manner.
+#[derive(Copy, Clone, Debug, Eq, PartialEq)]
+pub(crate) enum DeferredFdCloseError {
+ /// Closing the fd failed because we were unable to schedule a task work.
+ TaskWorkUnavailable,
+ /// Closing the fd failed because the fd does not exist.
+ BadFd,
+}
+
+impl From<DeferredFdCloseError> for Error {
+ fn from(err: DeferredFdCloseError) -> Error {
+ match err {
+ DeferredFdCloseError::TaskWorkUnavailable => ESRCH,
+ DeferredFdCloseError::BadFd => EBADF,
+ }
+ }
+}
diff --git a/drivers/android/binder/defs.rs b/drivers/android/binder/defs.rs
new file mode 100644
index 000000000000..33f51b4139c7
--- /dev/null
+++ b/drivers/android/binder/defs.rs
@@ -0,0 +1,182 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+use core::mem::MaybeUninit;
+use core::ops::{Deref, DerefMut};
+use kernel::{
+ transmute::{AsBytes, FromBytes},
+ uapi::{self, *},
+};
+
+macro_rules! pub_no_prefix {
+ ($prefix:ident, $($newname:ident),+ $(,)?) => {
+ $(pub(crate) const $newname: u32 = kernel::macros::concat_idents!($prefix, $newname);)+
+ };
+}
+
+pub_no_prefix!(
+ binder_driver_return_protocol_,
+ BR_TRANSACTION,
+ BR_TRANSACTION_SEC_CTX,
+ BR_REPLY,
+ BR_DEAD_REPLY,
+ BR_FAILED_REPLY,
+ BR_FROZEN_REPLY,
+ BR_NOOP,
+ BR_SPAWN_LOOPER,
+ BR_TRANSACTION_COMPLETE,
+ BR_TRANSACTION_PENDING_FROZEN,
+ BR_ONEWAY_SPAM_SUSPECT,
+ BR_OK,
+ BR_ERROR,
+ BR_INCREFS,
+ BR_ACQUIRE,
+ BR_RELEASE,
+ BR_DECREFS,
+ BR_DEAD_BINDER,
+ BR_CLEAR_DEATH_NOTIFICATION_DONE,
+ BR_FROZEN_BINDER,
+ BR_CLEAR_FREEZE_NOTIFICATION_DONE,
+);
+
+pub_no_prefix!(
+ binder_driver_command_protocol_,
+ BC_TRANSACTION,
+ BC_TRANSACTION_SG,
+ BC_REPLY,
+ BC_REPLY_SG,
+ BC_FREE_BUFFER,
+ BC_ENTER_LOOPER,
+ BC_EXIT_LOOPER,
+ BC_REGISTER_LOOPER,
+ BC_INCREFS,
+ BC_ACQUIRE,
+ BC_RELEASE,
+ BC_DECREFS,
+ BC_INCREFS_DONE,
+ BC_ACQUIRE_DONE,
+ BC_REQUEST_DEATH_NOTIFICATION,
+ BC_CLEAR_DEATH_NOTIFICATION,
+ BC_DEAD_BINDER_DONE,
+ BC_REQUEST_FREEZE_NOTIFICATION,
+ BC_CLEAR_FREEZE_NOTIFICATION,
+ BC_FREEZE_NOTIFICATION_DONE,
+);
+
+pub_no_prefix!(
+ flat_binder_object_flags_,
+ FLAT_BINDER_FLAG_ACCEPTS_FDS,
+ FLAT_BINDER_FLAG_TXN_SECURITY_CTX
+);
+
+pub_no_prefix!(
+ transaction_flags_,
+ TF_ONE_WAY,
+ TF_ACCEPT_FDS,
+ TF_CLEAR_BUF,
+ TF_UPDATE_TXN
+);
+
+pub(crate) use uapi::{
+ BINDER_TYPE_BINDER, BINDER_TYPE_FD, BINDER_TYPE_FDA, BINDER_TYPE_HANDLE, BINDER_TYPE_PTR,
+ BINDER_TYPE_WEAK_BINDER, BINDER_TYPE_WEAK_HANDLE,
+};
+
+macro_rules! decl_wrapper {
+ ($newname:ident, $wrapped:ty) => {
+ // Define a wrapper around the C type. Use `MaybeUninit` to enforce that the value of
+ // padding bytes must be preserved.
+ #[derive(Copy, Clone)]
+ #[repr(transparent)]
+ pub(crate) struct $newname(MaybeUninit<$wrapped>);
+
+ // SAFETY: This macro is only used with types where this is ok.
+ unsafe impl FromBytes for $newname {}
+ // SAFETY: This macro is only used with types where this is ok.
+ unsafe impl AsBytes for $newname {}
+
+ impl Deref for $newname {
+ type Target = $wrapped;
+ fn deref(&self) -> &Self::Target {
+ // SAFETY: We use `MaybeUninit` only to preserve padding. The value must still
+ // always be valid.
+ unsafe { self.0.assume_init_ref() }
+ }
+ }
+
+ impl DerefMut for $newname {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ // SAFETY: We use `MaybeUninit` only to preserve padding. The value must still
+ // always be valid.
+ unsafe { self.0.assume_init_mut() }
+ }
+ }
+
+ impl Default for $newname {
+ fn default() -> Self {
+ // Create a new value of this type where all bytes (including padding) are zeroed.
+ Self(MaybeUninit::zeroed())
+ }
+ }
+ };
+}
+
+decl_wrapper!(BinderNodeDebugInfo, uapi::binder_node_debug_info);
+decl_wrapper!(BinderNodeInfoForRef, uapi::binder_node_info_for_ref);
+decl_wrapper!(FlatBinderObject, uapi::flat_binder_object);
+decl_wrapper!(BinderFdObject, uapi::binder_fd_object);
+decl_wrapper!(BinderFdArrayObject, uapi::binder_fd_array_object);
+decl_wrapper!(BinderObjectHeader, uapi::binder_object_header);
+decl_wrapper!(BinderBufferObject, uapi::binder_buffer_object);
+decl_wrapper!(BinderTransactionData, uapi::binder_transaction_data);
+decl_wrapper!(
+ BinderTransactionDataSecctx,
+ uapi::binder_transaction_data_secctx
+);
+decl_wrapper!(BinderTransactionDataSg, uapi::binder_transaction_data_sg);
+decl_wrapper!(BinderWriteRead, uapi::binder_write_read);
+decl_wrapper!(BinderVersion, uapi::binder_version);
+decl_wrapper!(BinderFrozenStatusInfo, uapi::binder_frozen_status_info);
+decl_wrapper!(BinderFreezeInfo, uapi::binder_freeze_info);
+decl_wrapper!(BinderFrozenStateInfo, uapi::binder_frozen_state_info);
+decl_wrapper!(BinderHandleCookie, uapi::binder_handle_cookie);
+decl_wrapper!(ExtendedError, uapi::binder_extended_error);
+
+impl BinderVersion {
+ pub(crate) fn current() -> Self {
+ Self(MaybeUninit::new(uapi::binder_version {
+ protocol_version: BINDER_CURRENT_PROTOCOL_VERSION as _,
+ }))
+ }
+}
+
+impl BinderTransactionData {
+ pub(crate) fn with_buffers_size(self, buffers_size: u64) -> BinderTransactionDataSg {
+ BinderTransactionDataSg(MaybeUninit::new(uapi::binder_transaction_data_sg {
+ transaction_data: *self,
+ buffers_size,
+ }))
+ }
+}
+
+impl BinderTransactionDataSecctx {
+ /// View the inner data as wrapped in `BinderTransactionData`.
+ pub(crate) fn tr_data(&mut self) -> &mut BinderTransactionData {
+ // SAFETY: Transparent wrapper is safe to transmute.
+ unsafe {
+ &mut *(&mut self.transaction_data as *mut uapi::binder_transaction_data
+ as *mut BinderTransactionData)
+ }
+ }
+}
+
+impl ExtendedError {
+ pub(crate) fn new(id: u32, command: u32, param: i32) -> Self {
+ Self(MaybeUninit::new(uapi::binder_extended_error {
+ id,
+ command,
+ param,
+ }))
+ }
+}
diff --git a/drivers/android/binder/error.rs b/drivers/android/binder/error.rs
new file mode 100644
index 000000000000..b24497cfa292
--- /dev/null
+++ b/drivers/android/binder/error.rs
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+use kernel::fmt;
+use kernel::prelude::*;
+
+use crate::defs::*;
+
+pub(crate) type BinderResult<T = ()> = core::result::Result<T, BinderError>;
+
+/// An error that will be returned to userspace via the `BINDER_WRITE_READ` ioctl rather than via
+/// errno.
+pub(crate) struct BinderError {
+ pub(crate) reply: u32,
+ source: Option<Error>,
+}
+
+impl BinderError {
+ pub(crate) fn new_dead() -> Self {
+ Self {
+ reply: BR_DEAD_REPLY,
+ source: None,
+ }
+ }
+
+ pub(crate) fn new_frozen() -> Self {
+ Self {
+ reply: BR_FROZEN_REPLY,
+ source: None,
+ }
+ }
+
+ pub(crate) fn new_frozen_oneway() -> Self {
+ Self {
+ reply: BR_TRANSACTION_PENDING_FROZEN,
+ source: None,
+ }
+ }
+
+ pub(crate) fn is_dead(&self) -> bool {
+ self.reply == BR_DEAD_REPLY
+ }
+
+ pub(crate) fn as_errno(&self) -> kernel::ffi::c_int {
+ self.source.unwrap_or(EINVAL).to_errno()
+ }
+
+ pub(crate) fn should_pr_warn(&self) -> bool {
+ self.source.is_some()
+ }
+}
+
+/// Convert an errno into a `BinderError` and store the errno used to construct it. The errno
+/// should be stored as the thread's extended error when given to userspace.
+impl From<Error> for BinderError {
+ fn from(source: Error) -> Self {
+ Self {
+ reply: BR_FAILED_REPLY,
+ source: Some(source),
+ }
+ }
+}
+
+impl From<kernel::fs::file::BadFdError> for BinderError {
+ fn from(source: kernel::fs::file::BadFdError) -> Self {
+ BinderError::from(Error::from(source))
+ }
+}
+
+impl From<kernel::alloc::AllocError> for BinderError {
+ fn from(_: kernel::alloc::AllocError) -> Self {
+ Self {
+ reply: BR_FAILED_REPLY,
+ source: Some(ENOMEM),
+ }
+ }
+}
+
+impl fmt::Debug for BinderError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self.reply {
+ BR_FAILED_REPLY => match self.source.as_ref() {
+ Some(source) => f
+ .debug_struct("BR_FAILED_REPLY")
+ .field("source", source)
+ .finish(),
+ None => f.pad("BR_FAILED_REPLY"),
+ },
+ BR_DEAD_REPLY => f.pad("BR_DEAD_REPLY"),
+ BR_FROZEN_REPLY => f.pad("BR_FROZEN_REPLY"),
+ BR_TRANSACTION_PENDING_FROZEN => f.pad("BR_TRANSACTION_PENDING_FROZEN"),
+ BR_TRANSACTION_COMPLETE => f.pad("BR_TRANSACTION_COMPLETE"),
+ _ => f
+ .debug_struct("BinderError")
+ .field("reply", &self.reply)
+ .finish(),
+ }
+ }
+}
diff --git a/drivers/android/binder/freeze.rs b/drivers/android/binder/freeze.rs
new file mode 100644
index 000000000000..53b60035639a
--- /dev/null
+++ b/drivers/android/binder/freeze.rs
@@ -0,0 +1,398 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+use kernel::{
+ alloc::AllocError,
+ list::ListArc,
+ prelude::*,
+ rbtree::{self, RBTreeNodeReservation},
+ seq_file::SeqFile,
+ seq_print,
+ sync::{Arc, UniqueArc},
+ uaccess::UserSliceReader,
+};
+
+use crate::{
+ defs::*, node::Node, process::Process, thread::Thread, BinderReturnWriter, DArc, DLArc,
+ DTRWrap, DeliverToRead,
+};
+
+#[derive(Clone, Copy, Eq, PartialEq, Ord, PartialOrd)]
+pub(crate) struct FreezeCookie(u64);
+
+/// Represents a listener for changes to the frozen state of a process.
+pub(crate) struct FreezeListener {
+ /// The node we are listening for.
+ pub(crate) node: DArc<Node>,
+ /// The cookie of this freeze listener.
+ cookie: FreezeCookie,
+ /// What value of `is_frozen` did we most recently tell userspace about?
+ last_is_frozen: Option<bool>,
+ /// We sent a `BR_FROZEN_BINDER` and we are waiting for `BC_FREEZE_NOTIFICATION_DONE` before
+ /// sending any other commands.
+ is_pending: bool,
+ /// Userspace sent `BC_CLEAR_FREEZE_NOTIFICATION` and we need to reply with
+ /// `BR_CLEAR_FREEZE_NOTIFICATION_DONE` as soon as possible. If `is_pending` is set, then we
+ /// must wait for it to be unset before we can reply.
+ is_clearing: bool,
+ /// Number of cleared duplicates that can't be deleted until userspace sends
+ /// `BC_FREEZE_NOTIFICATION_DONE`.
+ num_pending_duplicates: u64,
+ /// Number of cleared duplicates that can be deleted.
+ num_cleared_duplicates: u64,
+}
+
+impl FreezeListener {
+ /// Is it okay to create a new listener with the same cookie as this one for the provided node?
+ ///
+ /// Under some scenarios, userspace may delete a freeze listener and immediately recreate it
+ /// with the same cookie. This results in duplicate listeners. To avoid issues with ambiguity,
+ /// we allow this only if the new listener is for the same node, and we also require that the
+ /// old listener has already been cleared.
+ fn allow_duplicate(&self, node: &DArc<Node>) -> bool {
+ Arc::ptr_eq(&self.node, node) && self.is_clearing
+ }
+}
+
+type UninitFM = UniqueArc<core::mem::MaybeUninit<DTRWrap<FreezeMessage>>>;
+
+/// Represents a notification that the freeze state has changed.
+pub(crate) struct FreezeMessage {
+ cookie: FreezeCookie,
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<0> for FreezeMessage {
+ untracked;
+ }
+}
+
+impl FreezeMessage {
+ fn new(flags: kernel::alloc::Flags) -> Result<UninitFM, AllocError> {
+ UniqueArc::new_uninit(flags)
+ }
+
+ fn init(ua: UninitFM, cookie: FreezeCookie) -> DLArc<FreezeMessage> {
+ match ua.pin_init_with(DTRWrap::new(FreezeMessage { cookie })) {
+ Ok(msg) => ListArc::from(msg),
+ Err(err) => match err {},
+ }
+ }
+}
+
+impl DeliverToRead for FreezeMessage {
+ fn do_work(
+ self: DArc<Self>,
+ thread: &Thread,
+ writer: &mut BinderReturnWriter<'_>,
+ ) -> Result<bool> {
+ let _removed_listener;
+ let mut node_refs = thread.process.node_refs.lock();
+ let Some(mut freeze_entry) = node_refs.freeze_listeners.find_mut(&self.cookie) else {
+ return Ok(true);
+ };
+ let freeze = freeze_entry.get_mut();
+
+ if freeze.num_cleared_duplicates > 0 {
+ freeze.num_cleared_duplicates -= 1;
+ drop(node_refs);
+ writer.write_code(BR_CLEAR_FREEZE_NOTIFICATION_DONE)?;
+ writer.write_payload(&self.cookie.0)?;
+ return Ok(true);
+ }
+
+ if freeze.is_pending {
+ return Ok(true);
+ }
+ if freeze.is_clearing {
+ kernel::warn_on!(freeze.num_cleared_duplicates != 0);
+ if freeze.num_pending_duplicates > 0 {
+ // The primary freeze listener was deleted, so convert a pending duplicate back
+ // into the primary one.
+ freeze.num_pending_duplicates -= 1;
+ freeze.is_pending = true;
+ freeze.is_clearing = true;
+ } else {
+ _removed_listener = freeze_entry.remove_node();
+ }
+ drop(node_refs);
+ writer.write_code(BR_CLEAR_FREEZE_NOTIFICATION_DONE)?;
+ writer.write_payload(&self.cookie.0)?;
+ Ok(true)
+ } else {
+ let is_frozen = freeze.node.owner.inner.lock().is_frozen.is_fully_frozen();
+ if freeze.last_is_frozen == Some(is_frozen) {
+ return Ok(true);
+ }
+
+ let mut state_info = BinderFrozenStateInfo::default();
+ state_info.is_frozen = is_frozen as u32;
+ state_info.cookie = freeze.cookie.0;
+ freeze.is_pending = true;
+ freeze.last_is_frozen = Some(is_frozen);
+ drop(node_refs);
+
+ writer.write_code(BR_FROZEN_BINDER)?;
+ writer.write_payload(&state_info)?;
+ // BR_FROZEN_BINDER notifications can cause transactions
+ Ok(false)
+ }
+ }
+
+ fn cancel(self: DArc<Self>) {}
+
+ fn should_sync_wakeup(&self) -> bool {
+ false
+ }
+
+ #[inline(never)]
+ fn debug_print(&self, m: &SeqFile, prefix: &str, _tprefix: &str) -> Result<()> {
+ seq_print!(m, "{}has frozen binder\n", prefix);
+ Ok(())
+ }
+}
+
+impl FreezeListener {
+ pub(crate) fn on_process_exit(&self, proc: &Arc<Process>) {
+ if !self.is_clearing {
+ self.node.remove_freeze_listener(proc);
+ }
+ }
+}
+
+impl Process {
+ pub(crate) fn request_freeze_notif(
+ self: &Arc<Self>,
+ reader: &mut UserSliceReader,
+ ) -> Result<()> {
+ let hc = reader.read::<BinderHandleCookie>()?;
+ let handle = hc.handle;
+ let cookie = FreezeCookie(hc.cookie);
+
+ let msg = FreezeMessage::new(GFP_KERNEL)?;
+ let alloc = RBTreeNodeReservation::new(GFP_KERNEL)?;
+
+ let mut node_refs_guard = self.node_refs.lock();
+ let node_refs = &mut *node_refs_guard;
+ let Some(info) = node_refs.by_handle.get_mut(&handle) else {
+ pr_warn!("BC_REQUEST_FREEZE_NOTIFICATION invalid ref {}\n", handle);
+ return Err(EINVAL);
+ };
+ if info.freeze().is_some() {
+ pr_warn!("BC_REQUEST_FREEZE_NOTIFICATION already set\n");
+ return Err(EINVAL);
+ }
+ let node_ref = info.node_ref();
+ let freeze_entry = node_refs.freeze_listeners.entry(cookie);
+
+ if let rbtree::Entry::Occupied(ref dupe) = freeze_entry {
+ if !dupe.get().allow_duplicate(&node_ref.node) {
+ pr_warn!("BC_REQUEST_FREEZE_NOTIFICATION duplicate cookie\n");
+ return Err(EINVAL);
+ }
+ }
+
+ // All failure paths must come before this call, and all modifications must come after this
+ // call.
+ node_ref.node.add_freeze_listener(self, GFP_KERNEL)?;
+
+ match freeze_entry {
+ rbtree::Entry::Vacant(entry) => {
+ entry.insert(
+ FreezeListener {
+ cookie,
+ node: node_ref.node.clone(),
+ last_is_frozen: None,
+ is_pending: false,
+ is_clearing: false,
+ num_pending_duplicates: 0,
+ num_cleared_duplicates: 0,
+ },
+ alloc,
+ );
+ }
+ rbtree::Entry::Occupied(mut dupe) => {
+ let dupe = dupe.get_mut();
+ if dupe.is_pending {
+ dupe.num_pending_duplicates += 1;
+ } else {
+ dupe.num_cleared_duplicates += 1;
+ }
+ dupe.last_is_frozen = None;
+ dupe.is_pending = false;
+ dupe.is_clearing = false;
+ }
+ }
+
+ *info.freeze() = Some(cookie);
+ let msg = FreezeMessage::init(msg, cookie);
+ drop(node_refs_guard);
+ let _ = self.push_work(msg);
+ Ok(())
+ }
+
+ pub(crate) fn freeze_notif_done(self: &Arc<Self>, reader: &mut UserSliceReader) -> Result<()> {
+ let cookie = FreezeCookie(reader.read()?);
+ let alloc = FreezeMessage::new(GFP_KERNEL)?;
+ let mut node_refs_guard = self.node_refs.lock();
+ let node_refs = &mut *node_refs_guard;
+ let Some(freeze) = node_refs.freeze_listeners.get_mut(&cookie) else {
+ pr_warn!("BC_FREEZE_NOTIFICATION_DONE {:016x} not found\n", cookie.0);
+ return Err(EINVAL);
+ };
+ let mut clear_msg = None;
+ if freeze.num_pending_duplicates > 0 {
+ clear_msg = Some(FreezeMessage::init(alloc, cookie));
+ freeze.num_pending_duplicates -= 1;
+ freeze.num_cleared_duplicates += 1;
+ } else {
+ if !freeze.is_pending {
+ pr_warn!(
+ "BC_FREEZE_NOTIFICATION_DONE {:016x} not pending\n",
+ cookie.0
+ );
+ return Err(EINVAL);
+ }
+ let is_frozen = freeze.node.owner.inner.lock().is_frozen.is_fully_frozen();
+ if freeze.is_clearing || freeze.last_is_frozen != Some(is_frozen) {
+ // Immediately send another FreezeMessage.
+ clear_msg = Some(FreezeMessage::init(alloc, cookie));
+ }
+ freeze.is_pending = false;
+ }
+ drop(node_refs_guard);
+ if let Some(clear_msg) = clear_msg {
+ let _ = self.push_work(clear_msg);
+ }
+ Ok(())
+ }
+
+ pub(crate) fn clear_freeze_notif(self: &Arc<Self>, reader: &mut UserSliceReader) -> Result<()> {
+ let hc = reader.read::<BinderHandleCookie>()?;
+ let handle = hc.handle;
+ let cookie = FreezeCookie(hc.cookie);
+
+ let alloc = FreezeMessage::new(GFP_KERNEL)?;
+ let mut node_refs_guard = self.node_refs.lock();
+ let node_refs = &mut *node_refs_guard;
+ let Some(info) = node_refs.by_handle.get_mut(&handle) else {
+ pr_warn!("BC_CLEAR_FREEZE_NOTIFICATION invalid ref {}\n", handle);
+ return Err(EINVAL);
+ };
+ let Some(info_cookie) = info.freeze() else {
+ pr_warn!("BC_CLEAR_FREEZE_NOTIFICATION freeze notification not active\n");
+ return Err(EINVAL);
+ };
+ if *info_cookie != cookie {
+ pr_warn!("BC_CLEAR_FREEZE_NOTIFICATION freeze notification cookie mismatch\n");
+ return Err(EINVAL);
+ }
+ let Some(listener) = node_refs.freeze_listeners.get_mut(&cookie) else {
+ pr_warn!("BC_CLEAR_FREEZE_NOTIFICATION invalid cookie {}\n", handle);
+ return Err(EINVAL);
+ };
+ listener.is_clearing = true;
+ listener.node.remove_freeze_listener(self);
+ *info.freeze() = None;
+ let mut msg = None;
+ if !listener.is_pending {
+ msg = Some(FreezeMessage::init(alloc, cookie));
+ }
+ drop(node_refs_guard);
+
+ if let Some(msg) = msg {
+ let _ = self.push_work(msg);
+ }
+ Ok(())
+ }
+
+ fn get_freeze_cookie(&self, node: &DArc<Node>) -> Option<FreezeCookie> {
+ let node_refs = &mut *self.node_refs.lock();
+ let handle = node_refs.by_node.get(&node.global_id())?;
+ let node_ref = node_refs.by_handle.get_mut(handle)?;
+ *node_ref.freeze()
+ }
+
+ /// Creates a vector of every freeze listener on this process.
+ ///
+ /// Returns pairs of the remote process listening for notifications and the local node it is
+ /// listening on.
+ #[expect(clippy::type_complexity)]
+ fn find_freeze_recipients(&self) -> Result<KVVec<(DArc<Node>, Arc<Process>)>, AllocError> {
+ // Defined before `inner` to drop after releasing spinlock if `push_within_capacity` fails.
+ let mut node_proc_pair;
+
+ // We pre-allocate space for up to 8 recipients before we take the spinlock. However, if
+ // the allocation fails, use a vector with a capacity of zero instead of failing. After
+ // all, there might not be any freeze listeners, in which case this operation could still
+ // succeed.
+ let mut recipients =
+ KVVec::with_capacity(8, GFP_KERNEL).unwrap_or_else(|_err| KVVec::new());
+
+ let mut inner = self.lock_with_nodes();
+ let mut curr = inner.nodes.cursor_front_mut();
+ while let Some(cursor) = curr {
+ let (key, node) = cursor.current();
+ let key = *key;
+ let list = node.freeze_list(&inner.inner);
+ let len = list.len();
+
+ if recipients.spare_capacity_mut().len() < len {
+ drop(inner);
+ recipients.reserve(len, GFP_KERNEL)?;
+ inner = self.lock_with_nodes();
+ // Find the node we were looking at and try again. If the set of nodes was changed,
+ // then just proceed to the next node. This is ok because we don't guarantee the
+ // inclusion of nodes that are added or removed in parallel with this operation.
+ curr = inner.nodes.cursor_lower_bound_mut(&key);
+ continue;
+ }
+
+ for proc in list {
+ node_proc_pair = (node.clone(), proc.clone());
+ recipients
+ .push_within_capacity(node_proc_pair)
+ .map_err(|_| {
+ pr_err!(
+ "push_within_capacity failed even though we checked the capacity\n"
+ );
+ AllocError
+ })?;
+ }
+
+ curr = cursor.move_next();
+ }
+ Ok(recipients)
+ }
+
+ /// Prepare allocations for sending freeze messages.
+ pub(crate) fn prepare_freeze_messages(&self) -> Result<FreezeMessages, AllocError> {
+ let recipients = self.find_freeze_recipients()?;
+ let mut batch = KVVec::with_capacity(recipients.len(), GFP_KERNEL)?;
+ for (node, proc) in recipients {
+ let Some(cookie) = proc.get_freeze_cookie(&node) else {
+ // If the freeze listener was removed in the meantime, just discard the
+ // notification.
+ continue;
+ };
+ let msg_alloc = FreezeMessage::new(GFP_KERNEL)?;
+ let msg = FreezeMessage::init(msg_alloc, cookie);
+ batch.push((proc, msg), GFP_KERNEL)?;
+ }
+
+ Ok(FreezeMessages { batch })
+ }
+}
+
+pub(crate) struct FreezeMessages {
+ batch: KVVec<(Arc<Process>, DLArc<FreezeMessage>)>,
+}
+
+impl FreezeMessages {
+ pub(crate) fn send_messages(self) {
+ for (proc, msg) in self.batch {
+ let _ = proc.push_work(msg);
+ }
+ }
+}
diff --git a/drivers/android/binder/node.rs b/drivers/android/binder/node.rs
new file mode 100644
index 000000000000..c26d113ede96
--- /dev/null
+++ b/drivers/android/binder/node.rs
@@ -0,0 +1,1131 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+use kernel::{
+ list::{AtomicTracker, List, ListArc, ListLinks, TryNewListArc},
+ prelude::*,
+ seq_file::SeqFile,
+ seq_print,
+ sync::lock::{spinlock::SpinLockBackend, Guard},
+ sync::{Arc, LockedBy, SpinLock},
+};
+
+use crate::{
+ defs::*,
+ error::BinderError,
+ process::{NodeRefInfo, Process, ProcessInner},
+ thread::Thread,
+ transaction::Transaction,
+ BinderReturnWriter, DArc, DLArc, DTRWrap, DeliverToRead,
+};
+
+use core::mem;
+
+mod wrapper;
+pub(crate) use self::wrapper::CritIncrWrapper;
+
+#[derive(Debug)]
+pub(crate) struct CouldNotDeliverCriticalIncrement;
+
+/// Keeps track of how this node is scheduled.
+///
+/// There are two ways to schedule a node to a work list. Just schedule the node itself, or
+/// allocate a wrapper that references the node and schedule the wrapper. These wrappers exists to
+/// make it possible to "move" a node from one list to another - when `do_work` is called directly
+/// on the `Node`, then it's a no-op if there's also a pending wrapper.
+///
+/// Wrappers are generally only needed for zero-to-one refcount increments, and there are two cases
+/// of this: weak increments and strong increments. We call such increments "critical" because it
+/// is critical that they are delivered to the thread doing the increment. Some examples:
+///
+/// * One thread makes a zero-to-one strong increment, and another thread makes a zero-to-one weak
+/// increment. Delivering the node to the thread doing the weak increment is wrong, since the
+/// thread doing the strong increment may have ended a long time ago when the command is actually
+/// processed by userspace.
+///
+/// * We have a weak reference and are about to drop it on one thread. But then another thread does
+/// a zero-to-one strong increment. If the strong increment gets sent to the thread that was
+/// about to drop the weak reference, then the strong increment could be processed after the
+/// other thread has already exited, which would be too late.
+///
+/// Note that trying to create a `ListArc` to the node can succeed even if `has_normal_push` is
+/// set. This is because another thread might just have popped the node from a todo list, but not
+/// yet called `do_work`. However, if `has_normal_push` is false, then creating a `ListArc` should
+/// always succeed.
+///
+/// Like the other fields in `NodeInner`, the delivery state is protected by the process lock.
+struct DeliveryState {
+ /// Is the `Node` currently scheduled?
+ has_pushed_node: bool,
+
+ /// Is a wrapper currently scheduled?
+ ///
+ /// The wrapper is used only for strong zero2one increments.
+ has_pushed_wrapper: bool,
+
+ /// Is the currently scheduled `Node` scheduled due to a weak zero2one increment?
+ ///
+ /// Weak zero2one operations are always scheduled using the `Node`.
+ has_weak_zero2one: bool,
+
+ /// Is the currently scheduled wrapper/`Node` scheduled due to a strong zero2one increment?
+ ///
+ /// If `has_pushed_wrapper` is set, then the strong zero2one increment was scheduled using the
+ /// wrapper. Otherwise, `has_pushed_node` must be set and it was scheduled using the `Node`.
+ has_strong_zero2one: bool,
+}
+
+impl DeliveryState {
+ fn should_normal_push(&self) -> bool {
+ !self.has_pushed_node && !self.has_pushed_wrapper
+ }
+
+ fn did_normal_push(&mut self) {
+ assert!(self.should_normal_push());
+ self.has_pushed_node = true;
+ }
+
+ fn should_push_weak_zero2one(&self) -> bool {
+ !self.has_weak_zero2one && !self.has_strong_zero2one
+ }
+
+ fn can_push_weak_zero2one_normally(&self) -> bool {
+ !self.has_pushed_node
+ }
+
+ fn did_push_weak_zero2one(&mut self) {
+ assert!(self.should_push_weak_zero2one());
+ assert!(self.can_push_weak_zero2one_normally());
+ self.has_pushed_node = true;
+ self.has_weak_zero2one = true;
+ }
+
+ fn should_push_strong_zero2one(&self) -> bool {
+ !self.has_strong_zero2one
+ }
+
+ fn can_push_strong_zero2one_normally(&self) -> bool {
+ !self.has_pushed_node
+ }
+
+ fn did_push_strong_zero2one(&mut self) {
+ assert!(self.should_push_strong_zero2one());
+ assert!(self.can_push_strong_zero2one_normally());
+ self.has_pushed_node = true;
+ self.has_strong_zero2one = true;
+ }
+
+ fn did_push_strong_zero2one_wrapper(&mut self) {
+ assert!(self.should_push_strong_zero2one());
+ assert!(!self.can_push_strong_zero2one_normally());
+ self.has_pushed_wrapper = true;
+ self.has_strong_zero2one = true;
+ }
+}
+
+struct CountState {
+ /// The reference count.
+ count: usize,
+ /// Whether the process that owns this node thinks that we hold a refcount on it. (Note that
+ /// even if count is greater than one, we only increment it once in the owning process.)
+ has_count: bool,
+}
+
+impl CountState {
+ fn new() -> Self {
+ Self {
+ count: 0,
+ has_count: false,
+ }
+ }
+}
+
+struct NodeInner {
+ /// Strong refcounts held on this node by `NodeRef` objects.
+ strong: CountState,
+ /// Weak refcounts held on this node by `NodeRef` objects.
+ weak: CountState,
+ delivery_state: DeliveryState,
+ /// The binder driver guarantees that oneway transactions sent to the same node are serialized,
+ /// that is, userspace will not be given the next one until it has finished processing the
+ /// previous oneway transaction. This is done to avoid the case where two oneway transactions
+ /// arrive in opposite order from the order in which they were sent. (E.g., they could be
+ /// delivered to two different threads, which could appear as-if they were sent in opposite
+ /// order.)
+ ///
+ /// To fix that, we store pending oneway transactions in a separate list in the node, and don't
+ /// deliver the next oneway transaction until userspace signals that it has finished processing
+ /// the previous oneway transaction by calling the `BC_FREE_BUFFER` ioctl.
+ oneway_todo: List<DTRWrap<Transaction>>,
+ /// Keeps track of whether this node has a pending oneway transaction.
+ ///
+ /// When this is true, incoming oneway transactions are stored in `oneway_todo`, instead of
+ /// being delivered directly to the process.
+ has_oneway_transaction: bool,
+ /// List of processes to deliver a notification to when this node is destroyed (usually due to
+ /// the process dying).
+ death_list: List<DTRWrap<NodeDeath>, 1>,
+ /// List of processes to deliver freeze notifications to.
+ freeze_list: KVVec<Arc<Process>>,
+ /// The number of active BR_INCREFS or BR_ACQUIRE operations. (should be maximum two)
+ ///
+ /// If this is non-zero, then we postpone any BR_RELEASE or BR_DECREFS notifications until the
+ /// active operations have ended. This avoids the situation an increment and decrement get
+ /// reordered from userspace's perspective.
+ active_inc_refs: u8,
+ /// List of `NodeRefInfo` objects that reference this node.
+ refs: List<NodeRefInfo, { NodeRefInfo::LIST_NODE }>,
+}
+
+#[pin_data]
+pub(crate) struct Node {
+ pub(crate) debug_id: usize,
+ ptr: u64,
+ pub(crate) cookie: u64,
+ pub(crate) flags: u32,
+ pub(crate) owner: Arc<Process>,
+ inner: LockedBy<NodeInner, ProcessInner>,
+ #[pin]
+ links_track: AtomicTracker,
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<0> for Node {
+ tracked_by links_track: AtomicTracker;
+ }
+}
+
+// Make `oneway_todo` work.
+kernel::list::impl_list_item! {
+ impl ListItem<0> for DTRWrap<Transaction> {
+ using ListLinks { self.links.inner };
+ }
+}
+
+impl Node {
+ pub(crate) fn new(
+ ptr: u64,
+ cookie: u64,
+ flags: u32,
+ owner: Arc<Process>,
+ ) -> impl PinInit<Self> {
+ pin_init!(Self {
+ inner: LockedBy::new(
+ &owner.inner,
+ NodeInner {
+ strong: CountState::new(),
+ weak: CountState::new(),
+ delivery_state: DeliveryState {
+ has_pushed_node: false,
+ has_pushed_wrapper: false,
+ has_weak_zero2one: false,
+ has_strong_zero2one: false,
+ },
+ death_list: List::new(),
+ oneway_todo: List::new(),
+ freeze_list: KVVec::new(),
+ has_oneway_transaction: false,
+ active_inc_refs: 0,
+ refs: List::new(),
+ },
+ ),
+ debug_id: super::next_debug_id(),
+ ptr,
+ cookie,
+ flags,
+ owner,
+ links_track <- AtomicTracker::new(),
+ })
+ }
+
+ pub(crate) fn has_oneway_transaction(&self, owner_inner: &mut ProcessInner) -> bool {
+ let inner = self.inner.access_mut(owner_inner);
+ inner.has_oneway_transaction
+ }
+
+ #[inline(never)]
+ pub(crate) fn full_debug_print(
+ &self,
+ m: &SeqFile,
+ owner_inner: &mut ProcessInner,
+ ) -> Result<()> {
+ let inner = self.inner.access_mut(owner_inner);
+ seq_print!(
+ m,
+ " node {}: u{:016x} c{:016x} hs {} hw {} cs {} cw {}",
+ self.debug_id,
+ self.ptr,
+ self.cookie,
+ inner.strong.has_count,
+ inner.weak.has_count,
+ inner.strong.count,
+ inner.weak.count,
+ );
+ if !inner.refs.is_empty() {
+ seq_print!(m, " proc");
+ for node_ref in &inner.refs {
+ seq_print!(m, " {}", node_ref.process.task.pid());
+ }
+ }
+ seq_print!(m, "\n");
+ for t in &inner.oneway_todo {
+ t.debug_print_inner(m, " pending async transaction ");
+ }
+ Ok(())
+ }
+
+ /// Insert the `NodeRef` into this `refs` list.
+ ///
+ /// # Safety
+ ///
+ /// It must be the case that `info.node_ref.node` is this node.
+ pub(crate) unsafe fn insert_node_info(
+ &self,
+ info: ListArc<NodeRefInfo, { NodeRefInfo::LIST_NODE }>,
+ ) {
+ self.inner
+ .access_mut(&mut self.owner.inner.lock())
+ .refs
+ .push_front(info);
+ }
+
+ /// Insert the `NodeRef` into this `refs` list.
+ ///
+ /// # Safety
+ ///
+ /// It must be the case that `info.node_ref.node` is this node.
+ pub(crate) unsafe fn remove_node_info(
+ &self,
+ info: &NodeRefInfo,
+ ) -> Option<ListArc<NodeRefInfo, { NodeRefInfo::LIST_NODE }>> {
+ // SAFETY: We always insert `NodeRefInfo` objects into the `refs` list of the node that it
+ // references in `info.node_ref.node`. That is this node, so `info` cannot possibly be in
+ // the `refs` list of another node.
+ unsafe {
+ self.inner
+ .access_mut(&mut self.owner.inner.lock())
+ .refs
+ .remove(info)
+ }
+ }
+
+ /// An id that is unique across all binder nodes on the system. Used as the key in the
+ /// `by_node` map.
+ pub(crate) fn global_id(&self) -> usize {
+ self as *const Node as usize
+ }
+
+ pub(crate) fn get_id(&self) -> (u64, u64) {
+ (self.ptr, self.cookie)
+ }
+
+ pub(crate) fn add_death(
+ &self,
+ death: ListArc<DTRWrap<NodeDeath>, 1>,
+ guard: &mut Guard<'_, ProcessInner, SpinLockBackend>,
+ ) {
+ self.inner.access_mut(guard).death_list.push_back(death);
+ }
+
+ pub(crate) fn inc_ref_done_locked(
+ self: &DArc<Node>,
+ _strong: bool,
+ owner_inner: &mut ProcessInner,
+ ) -> Option<DLArc<Node>> {
+ let inner = self.inner.access_mut(owner_inner);
+ if inner.active_inc_refs == 0 {
+ pr_err!("inc_ref_done called when no active inc_refs");
+ return None;
+ }
+
+ inner.active_inc_refs -= 1;
+ if inner.active_inc_refs == 0 {
+ // Having active inc_refs can inhibit dropping of ref-counts. Calculate whether we
+ // would send a refcount decrement, and if so, tell the caller to schedule us.
+ let strong = inner.strong.count > 0;
+ let has_strong = inner.strong.has_count;
+ let weak = strong || inner.weak.count > 0;
+ let has_weak = inner.weak.has_count;
+
+ let should_drop_weak = !weak && has_weak;
+ let should_drop_strong = !strong && has_strong;
+
+ // If we want to drop the ref-count again, tell the caller to schedule a work node for
+ // that.
+ let need_push = should_drop_weak || should_drop_strong;
+
+ if need_push && inner.delivery_state.should_normal_push() {
+ let list_arc = ListArc::try_from_arc(self.clone()).ok().unwrap();
+ inner.delivery_state.did_normal_push();
+ Some(list_arc)
+ } else {
+ None
+ }
+ } else {
+ None
+ }
+ }
+
+ pub(crate) fn update_refcount_locked(
+ self: &DArc<Node>,
+ inc: bool,
+ strong: bool,
+ count: usize,
+ owner_inner: &mut ProcessInner,
+ ) -> Option<DLArc<Node>> {
+ let is_dead = owner_inner.is_dead;
+ let inner = self.inner.access_mut(owner_inner);
+
+ // Get a reference to the state we'll update.
+ let state = if strong {
+ &mut inner.strong
+ } else {
+ &mut inner.weak
+ };
+
+ // Update the count and determine whether we need to push work.
+ let need_push = if inc {
+ state.count += count;
+ // TODO: This method shouldn't be used for zero-to-one increments.
+ !is_dead && !state.has_count
+ } else {
+ if state.count < count {
+ pr_err!("Failure: refcount underflow!");
+ return None;
+ }
+ state.count -= count;
+ !is_dead && state.count == 0 && state.has_count
+ };
+
+ if need_push && inner.delivery_state.should_normal_push() {
+ let list_arc = ListArc::try_from_arc(self.clone()).ok().unwrap();
+ inner.delivery_state.did_normal_push();
+ Some(list_arc)
+ } else {
+ None
+ }
+ }
+
+ pub(crate) fn incr_refcount_allow_zero2one(
+ self: &DArc<Self>,
+ strong: bool,
+ owner_inner: &mut ProcessInner,
+ ) -> Result<Option<DLArc<Node>>, CouldNotDeliverCriticalIncrement> {
+ let is_dead = owner_inner.is_dead;
+ let inner = self.inner.access_mut(owner_inner);
+
+ // Get a reference to the state we'll update.
+ let state = if strong {
+ &mut inner.strong
+ } else {
+ &mut inner.weak
+ };
+
+ // Update the count and determine whether we need to push work.
+ state.count += 1;
+ if is_dead || state.has_count {
+ return Ok(None);
+ }
+
+ // Userspace needs to be notified of this.
+ if !strong && inner.delivery_state.should_push_weak_zero2one() {
+ assert!(inner.delivery_state.can_push_weak_zero2one_normally());
+ let list_arc = ListArc::try_from_arc(self.clone()).ok().unwrap();
+ inner.delivery_state.did_push_weak_zero2one();
+ Ok(Some(list_arc))
+ } else if strong && inner.delivery_state.should_push_strong_zero2one() {
+ if inner.delivery_state.can_push_strong_zero2one_normally() {
+ let list_arc = ListArc::try_from_arc(self.clone()).ok().unwrap();
+ inner.delivery_state.did_push_strong_zero2one();
+ Ok(Some(list_arc))
+ } else {
+ state.count -= 1;
+ Err(CouldNotDeliverCriticalIncrement)
+ }
+ } else {
+ // Work is already pushed, and we don't need to push again.
+ Ok(None)
+ }
+ }
+
+ pub(crate) fn incr_refcount_allow_zero2one_with_wrapper(
+ self: &DArc<Self>,
+ strong: bool,
+ wrapper: CritIncrWrapper,
+ owner_inner: &mut ProcessInner,
+ ) -> Option<DLArc<dyn DeliverToRead>> {
+ match self.incr_refcount_allow_zero2one(strong, owner_inner) {
+ Ok(Some(node)) => Some(node as _),
+ Ok(None) => None,
+ Err(CouldNotDeliverCriticalIncrement) => {
+ assert!(strong);
+ let inner = self.inner.access_mut(owner_inner);
+ inner.strong.count += 1;
+ inner.delivery_state.did_push_strong_zero2one_wrapper();
+ Some(wrapper.init(self.clone()))
+ }
+ }
+ }
+
+ pub(crate) fn update_refcount(self: &DArc<Self>, inc: bool, count: usize, strong: bool) {
+ self.owner
+ .inner
+ .lock()
+ .update_node_refcount(self, inc, strong, count, None);
+ }
+
+ pub(crate) fn populate_counts(
+ &self,
+ out: &mut BinderNodeInfoForRef,
+ guard: &Guard<'_, ProcessInner, SpinLockBackend>,
+ ) {
+ let inner = self.inner.access(guard);
+ out.strong_count = inner.strong.count as _;
+ out.weak_count = inner.weak.count as _;
+ }
+
+ pub(crate) fn populate_debug_info(
+ &self,
+ out: &mut BinderNodeDebugInfo,
+ guard: &Guard<'_, ProcessInner, SpinLockBackend>,
+ ) {
+ out.ptr = self.ptr as _;
+ out.cookie = self.cookie as _;
+ let inner = self.inner.access(guard);
+ if inner.strong.has_count {
+ out.has_strong_ref = 1;
+ }
+ if inner.weak.has_count {
+ out.has_weak_ref = 1;
+ }
+ }
+
+ pub(crate) fn force_has_count(&self, guard: &mut Guard<'_, ProcessInner, SpinLockBackend>) {
+ let inner = self.inner.access_mut(guard);
+ inner.strong.has_count = true;
+ inner.weak.has_count = true;
+ }
+
+ fn write(&self, writer: &mut BinderReturnWriter<'_>, code: u32) -> Result {
+ writer.write_code(code)?;
+ writer.write_payload(&self.ptr)?;
+ writer.write_payload(&self.cookie)?;
+ Ok(())
+ }
+
+ pub(crate) fn submit_oneway(
+ &self,
+ transaction: DLArc<Transaction>,
+ guard: &mut Guard<'_, ProcessInner, SpinLockBackend>,
+ ) -> Result<(), (BinderError, DLArc<dyn DeliverToRead>)> {
+ if guard.is_dead {
+ return Err((BinderError::new_dead(), transaction));
+ }
+
+ let inner = self.inner.access_mut(guard);
+ if inner.has_oneway_transaction {
+ inner.oneway_todo.push_back(transaction);
+ } else {
+ inner.has_oneway_transaction = true;
+ guard.push_work(transaction)?;
+ }
+ Ok(())
+ }
+
+ pub(crate) fn release(&self) {
+ let mut guard = self.owner.inner.lock();
+ while let Some(work) = self.inner.access_mut(&mut guard).oneway_todo.pop_front() {
+ drop(guard);
+ work.into_arc().cancel();
+ guard = self.owner.inner.lock();
+ }
+
+ while let Some(death) = self.inner.access_mut(&mut guard).death_list.pop_front() {
+ drop(guard);
+ death.into_arc().set_dead();
+ guard = self.owner.inner.lock();
+ }
+ }
+
+ pub(crate) fn pending_oneway_finished(&self) {
+ let mut guard = self.owner.inner.lock();
+ if guard.is_dead {
+ // Cleanup will happen in `Process::deferred_release`.
+ return;
+ }
+
+ let inner = self.inner.access_mut(&mut guard);
+
+ let transaction = inner.oneway_todo.pop_front();
+ inner.has_oneway_transaction = transaction.is_some();
+ if let Some(transaction) = transaction {
+ match guard.push_work(transaction) {
+ Ok(()) => {}
+ Err((_err, work)) => {
+ // Process is dead.
+ // This shouldn't happen due to the `is_dead` check, but if it does, just drop
+ // the transaction and return.
+ drop(guard);
+ drop(work);
+ }
+ }
+ }
+ }
+
+ /// Finds an outdated transaction that the given transaction can replace.
+ ///
+ /// If one is found, it is removed from the list and returned.
+ pub(crate) fn take_outdated_transaction(
+ &self,
+ new: &Transaction,
+ guard: &mut Guard<'_, ProcessInner, SpinLockBackend>,
+ ) -> Option<DLArc<Transaction>> {
+ let inner = self.inner.access_mut(guard);
+ let mut cursor = inner.oneway_todo.cursor_front();
+ while let Some(next) = cursor.peek_next() {
+ if new.can_replace(&next) {
+ return Some(next.remove());
+ }
+ cursor.move_next();
+ }
+ None
+ }
+
+ /// This is split into a separate function since it's called by both `Node::do_work` and
+ /// `NodeWrapper::do_work`.
+ fn do_work_locked(
+ &self,
+ writer: &mut BinderReturnWriter<'_>,
+ mut guard: Guard<'_, ProcessInner, SpinLockBackend>,
+ ) -> Result<bool> {
+ let inner = self.inner.access_mut(&mut guard);
+ let strong = inner.strong.count > 0;
+ let has_strong = inner.strong.has_count;
+ let weak = strong || inner.weak.count > 0;
+ let has_weak = inner.weak.has_count;
+
+ if weak && !has_weak {
+ inner.weak.has_count = true;
+ inner.active_inc_refs += 1;
+ }
+
+ if strong && !has_strong {
+ inner.strong.has_count = true;
+ inner.active_inc_refs += 1;
+ }
+
+ let no_active_inc_refs = inner.active_inc_refs == 0;
+ let should_drop_weak = no_active_inc_refs && (!weak && has_weak);
+ let should_drop_strong = no_active_inc_refs && (!strong && has_strong);
+ if should_drop_weak {
+ inner.weak.has_count = false;
+ }
+ if should_drop_strong {
+ inner.strong.has_count = false;
+ }
+ if no_active_inc_refs && !weak {
+ // Remove the node if there are no references to it.
+ guard.remove_node(self.ptr);
+ }
+ drop(guard);
+
+ if weak && !has_weak {
+ self.write(writer, BR_INCREFS)?;
+ }
+ if strong && !has_strong {
+ self.write(writer, BR_ACQUIRE)?;
+ }
+ if should_drop_strong {
+ self.write(writer, BR_RELEASE)?;
+ }
+ if should_drop_weak {
+ self.write(writer, BR_DECREFS)?;
+ }
+
+ Ok(true)
+ }
+
+ pub(crate) fn add_freeze_listener(
+ &self,
+ process: &Arc<Process>,
+ flags: kernel::alloc::Flags,
+ ) -> Result {
+ let mut vec_alloc = KVVec::<Arc<Process>>::new();
+ loop {
+ let mut guard = self.owner.inner.lock();
+ // Do not check for `guard.dead`. The `dead` flag that matters here is the owner of the
+ // listener, no the target.
+ let inner = self.inner.access_mut(&mut guard);
+ let len = inner.freeze_list.len();
+ if len >= inner.freeze_list.capacity() {
+ if len >= vec_alloc.capacity() {
+ drop(guard);
+ vec_alloc = KVVec::with_capacity((1 + len).next_power_of_two(), flags)?;
+ continue;
+ }
+ mem::swap(&mut inner.freeze_list, &mut vec_alloc);
+ for elem in vec_alloc.drain_all() {
+ inner.freeze_list.push_within_capacity(elem)?;
+ }
+ }
+ inner.freeze_list.push_within_capacity(process.clone())?;
+ return Ok(());
+ }
+ }
+
+ pub(crate) fn remove_freeze_listener(&self, p: &Arc<Process>) {
+ let _unused_capacity;
+ let mut guard = self.owner.inner.lock();
+ let inner = self.inner.access_mut(&mut guard);
+ let len = inner.freeze_list.len();
+ inner.freeze_list.retain(|proc| !Arc::ptr_eq(proc, p));
+ if len == inner.freeze_list.len() {
+ pr_warn!(
+ "Could not remove freeze listener for {}\n",
+ p.pid_in_current_ns()
+ );
+ }
+ if inner.freeze_list.is_empty() {
+ _unused_capacity = mem::take(&mut inner.freeze_list);
+ }
+ }
+
+ pub(crate) fn freeze_list<'a>(&'a self, guard: &'a ProcessInner) -> &'a [Arc<Process>] {
+ &self.inner.access(guard).freeze_list
+ }
+}
+
+impl DeliverToRead for Node {
+ fn do_work(
+ self: DArc<Self>,
+ _thread: &Thread,
+ writer: &mut BinderReturnWriter<'_>,
+ ) -> Result<bool> {
+ let mut owner_inner = self.owner.inner.lock();
+ let inner = self.inner.access_mut(&mut owner_inner);
+
+ assert!(inner.delivery_state.has_pushed_node);
+ if inner.delivery_state.has_pushed_wrapper {
+ // If the wrapper is scheduled, then we are either a normal push or weak zero2one
+ // increment, and the wrapper is a strong zero2one increment, so the wrapper always
+ // takes precedence over us.
+ assert!(inner.delivery_state.has_strong_zero2one);
+ inner.delivery_state.has_pushed_node = false;
+ inner.delivery_state.has_weak_zero2one = false;
+ return Ok(true);
+ }
+
+ inner.delivery_state.has_pushed_node = false;
+ inner.delivery_state.has_weak_zero2one = false;
+ inner.delivery_state.has_strong_zero2one = false;
+
+ self.do_work_locked(writer, owner_inner)
+ }
+
+ fn cancel(self: DArc<Self>) {}
+
+ fn should_sync_wakeup(&self) -> bool {
+ false
+ }
+
+ #[inline(never)]
+ fn debug_print(&self, m: &SeqFile, prefix: &str, _tprefix: &str) -> Result<()> {
+ seq_print!(
+ m,
+ "{}node work {}: u{:016x} c{:016x}\n",
+ prefix,
+ self.debug_id,
+ self.ptr,
+ self.cookie,
+ );
+ Ok(())
+ }
+}
+
+/// Represents something that holds one or more ref-counts to a `Node`.
+///
+/// Whenever process A holds a refcount to a node owned by a different process B, then process A
+/// will store a `NodeRef` that refers to the `Node` in process B. When process A releases the
+/// refcount, we destroy the NodeRef, which decrements the ref-count in process A.
+///
+/// This type is also used for some other cases. For example, a transaction allocation holds a
+/// refcount on the target node, and this is implemented by storing a `NodeRef` in the allocation
+/// so that the destructor of the allocation will drop a refcount of the `Node`.
+pub(crate) struct NodeRef {
+ pub(crate) node: DArc<Node>,
+ /// How many times does this NodeRef hold a refcount on the Node?
+ strong_node_count: usize,
+ weak_node_count: usize,
+ /// How many times does userspace hold a refcount on this NodeRef?
+ strong_count: usize,
+ weak_count: usize,
+}
+
+impl NodeRef {
+ pub(crate) fn new(node: DArc<Node>, strong_count: usize, weak_count: usize) -> Self {
+ Self {
+ node,
+ strong_node_count: strong_count,
+ weak_node_count: weak_count,
+ strong_count,
+ weak_count,
+ }
+ }
+
+ pub(crate) fn absorb(&mut self, mut other: Self) {
+ assert!(
+ Arc::ptr_eq(&self.node, &other.node),
+ "absorb called with differing nodes"
+ );
+ self.strong_node_count += other.strong_node_count;
+ self.weak_node_count += other.weak_node_count;
+ self.strong_count += other.strong_count;
+ self.weak_count += other.weak_count;
+ other.strong_count = 0;
+ other.weak_count = 0;
+ other.strong_node_count = 0;
+ other.weak_node_count = 0;
+
+ if self.strong_node_count >= 2 || self.weak_node_count >= 2 {
+ let mut guard = self.node.owner.inner.lock();
+ let inner = self.node.inner.access_mut(&mut guard);
+
+ if self.strong_node_count >= 2 {
+ inner.strong.count -= self.strong_node_count - 1;
+ self.strong_node_count = 1;
+ assert_ne!(inner.strong.count, 0);
+ }
+ if self.weak_node_count >= 2 {
+ inner.weak.count -= self.weak_node_count - 1;
+ self.weak_node_count = 1;
+ assert_ne!(inner.weak.count, 0);
+ }
+ }
+ }
+
+ pub(crate) fn get_count(&self) -> (usize, usize) {
+ (self.strong_count, self.weak_count)
+ }
+
+ pub(crate) fn clone(&self, strong: bool) -> Result<NodeRef> {
+ if strong && self.strong_count == 0 {
+ return Err(EINVAL);
+ }
+ Ok(self
+ .node
+ .owner
+ .inner
+ .lock()
+ .new_node_ref(self.node.clone(), strong, None))
+ }
+
+ /// Updates (increments or decrements) the number of references held against the node. If the
+ /// count being updated transitions from 0 to 1 or from 1 to 0, the node is notified by having
+ /// its `update_refcount` function called.
+ ///
+ /// Returns whether `self` should be removed (when both counts are zero).
+ pub(crate) fn update(&mut self, inc: bool, strong: bool) -> bool {
+ if strong && self.strong_count == 0 {
+ return false;
+ }
+ let (count, node_count, other_count) = if strong {
+ (
+ &mut self.strong_count,
+ &mut self.strong_node_count,
+ self.weak_count,
+ )
+ } else {
+ (
+ &mut self.weak_count,
+ &mut self.weak_node_count,
+ self.strong_count,
+ )
+ };
+ if inc {
+ if *count == 0 {
+ *node_count = 1;
+ self.node.update_refcount(true, 1, strong);
+ }
+ *count += 1;
+ } else {
+ if *count == 0 {
+ pr_warn!(
+ "pid {} performed invalid decrement on ref\n",
+ kernel::current!().pid()
+ );
+ return false;
+ }
+ *count -= 1;
+ if *count == 0 {
+ self.node.update_refcount(false, *node_count, strong);
+ *node_count = 0;
+ return other_count == 0;
+ }
+ }
+ false
+ }
+}
+
+impl Drop for NodeRef {
+ // This destructor is called conditionally from `Allocation::drop`. That branch is often
+ // mispredicted. Inlining this method call reduces the cost of those branch mispredictions.
+ #[inline(always)]
+ fn drop(&mut self) {
+ if self.strong_node_count > 0 {
+ self.node
+ .update_refcount(false, self.strong_node_count, true);
+ }
+ if self.weak_node_count > 0 {
+ self.node
+ .update_refcount(false, self.weak_node_count, false);
+ }
+ }
+}
+
+struct NodeDeathInner {
+ dead: bool,
+ cleared: bool,
+ notification_done: bool,
+ /// Indicates whether the normal flow was interrupted by removing the handle. In this case, we
+ /// need behave as if the death notification didn't exist (i.e., we don't deliver anything to
+ /// the user.
+ aborted: bool,
+}
+
+/// Used to deliver notifications when a process dies.
+///
+/// A process can request to be notified when a process dies using `BC_REQUEST_DEATH_NOTIFICATION`.
+/// This will make the driver send a `BR_DEAD_BINDER` to userspace when the process dies (or
+/// immediately if it is already dead). Userspace is supposed to respond with `BC_DEAD_BINDER_DONE`
+/// once it has processed the notification.
+///
+/// Userspace can unregister from death notifications using the `BC_CLEAR_DEATH_NOTIFICATION`
+/// command. In this case, the kernel will respond with `BR_CLEAR_DEATH_NOTIFICATION_DONE` once the
+/// notification has been removed. Note that if the remote process dies before the kernel has
+/// responded with `BR_CLEAR_DEATH_NOTIFICATION_DONE`, then the kernel will still send a
+/// `BR_DEAD_BINDER`, which userspace must be able to process. In this case, the kernel will wait
+/// for the `BC_DEAD_BINDER_DONE` command before it sends `BR_CLEAR_DEATH_NOTIFICATION_DONE`.
+///
+/// Note that even if the kernel sends a `BR_DEAD_BINDER`, this does not remove the death
+/// notification. Userspace must still remove it manually using `BC_CLEAR_DEATH_NOTIFICATION`.
+///
+/// If a process uses `BC_RELEASE` to destroy its last refcount on a node that has an active death
+/// registration, then the death registration is immediately deleted (we implement this using the
+/// `aborted` field). However, userspace is not supposed to delete a `NodeRef` without first
+/// deregistering death notifications, so this codepath is not executed under normal circumstances.
+#[pin_data]
+pub(crate) struct NodeDeath {
+ node: DArc<Node>,
+ process: Arc<Process>,
+ pub(crate) cookie: u64,
+ #[pin]
+ links_track: AtomicTracker<0>,
+ /// Used by the owner `Node` to store a list of registered death notifications.
+ ///
+ /// # Invariants
+ ///
+ /// Only ever used with the `death_list` list of `self.node`.
+ #[pin]
+ death_links: ListLinks<1>,
+ /// Used by the process to keep track of the death notifications for which we have sent a
+ /// `BR_DEAD_BINDER` but not yet received a `BC_DEAD_BINDER_DONE`.
+ ///
+ /// # Invariants
+ ///
+ /// Only ever used with the `delivered_deaths` list of `self.process`.
+ #[pin]
+ delivered_links: ListLinks<2>,
+ #[pin]
+ delivered_links_track: AtomicTracker<2>,
+ #[pin]
+ inner: SpinLock<NodeDeathInner>,
+}
+
+impl NodeDeath {
+ /// Constructs a new node death notification object.
+ pub(crate) fn new(
+ node: DArc<Node>,
+ process: Arc<Process>,
+ cookie: u64,
+ ) -> impl PinInit<DTRWrap<Self>> {
+ DTRWrap::new(pin_init!(
+ Self {
+ node,
+ process,
+ cookie,
+ links_track <- AtomicTracker::new(),
+ death_links <- ListLinks::new(),
+ delivered_links <- ListLinks::new(),
+ delivered_links_track <- AtomicTracker::new(),
+ inner <- kernel::new_spinlock!(NodeDeathInner {
+ dead: false,
+ cleared: false,
+ notification_done: false,
+ aborted: false,
+ }, "NodeDeath::inner"),
+ }
+ ))
+ }
+
+ /// Sets the cleared flag to `true`.
+ ///
+ /// It removes `self` from the node's death notification list if needed.
+ ///
+ /// Returns whether it needs to be queued.
+ pub(crate) fn set_cleared(self: &DArc<Self>, abort: bool) -> bool {
+ let (needs_removal, needs_queueing) = {
+ // Update state and determine if we need to queue a work item. We only need to do it
+ // when the node is not dead or if the user already completed the death notification.
+ let mut inner = self.inner.lock();
+ if abort {
+ inner.aborted = true;
+ }
+ if inner.cleared {
+ // Already cleared.
+ return false;
+ }
+ inner.cleared = true;
+ (!inner.dead, !inner.dead || inner.notification_done)
+ };
+
+ // Remove death notification from node.
+ if needs_removal {
+ let mut owner_inner = self.node.owner.inner.lock();
+ let node_inner = self.node.inner.access_mut(&mut owner_inner);
+ // SAFETY: A `NodeDeath` is never inserted into the death list of any node other than
+ // its owner, so it is either in this death list or in no death list.
+ unsafe { node_inner.death_list.remove(self) };
+ }
+ needs_queueing
+ }
+
+ /// Sets the 'notification done' flag to `true`.
+ pub(crate) fn set_notification_done(self: DArc<Self>, thread: &Thread) {
+ let needs_queueing = {
+ let mut inner = self.inner.lock();
+ inner.notification_done = true;
+ inner.cleared
+ };
+ if needs_queueing {
+ if let Some(death) = ListArc::try_from_arc_or_drop(self) {
+ let _ = thread.push_work_if_looper(death);
+ }
+ }
+ }
+
+ /// Sets the 'dead' flag to `true` and queues work item if needed.
+ pub(crate) fn set_dead(self: DArc<Self>) {
+ let needs_queueing = {
+ let mut inner = self.inner.lock();
+ if inner.cleared {
+ false
+ } else {
+ inner.dead = true;
+ true
+ }
+ };
+ if needs_queueing {
+ // Push the death notification to the target process. There is nothing else to do if
+ // it's already dead.
+ if let Some(death) = ListArc::try_from_arc_or_drop(self) {
+ let process = death.process.clone();
+ let _ = process.push_work(death);
+ }
+ }
+ }
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<0> for NodeDeath {
+ tracked_by links_track: AtomicTracker;
+ }
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<1> for DTRWrap<NodeDeath> { untracked; }
+}
+kernel::list::impl_list_item! {
+ impl ListItem<1> for DTRWrap<NodeDeath> {
+ using ListLinks { self.wrapped.death_links };
+ }
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<2> for DTRWrap<NodeDeath> {
+ tracked_by wrapped: NodeDeath;
+ }
+}
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<2> for NodeDeath {
+ tracked_by delivered_links_track: AtomicTracker<2>;
+ }
+}
+kernel::list::impl_list_item! {
+ impl ListItem<2> for DTRWrap<NodeDeath> {
+ using ListLinks { self.wrapped.delivered_links };
+ }
+}
+
+impl DeliverToRead for NodeDeath {
+ fn do_work(
+ self: DArc<Self>,
+ _thread: &Thread,
+ writer: &mut BinderReturnWriter<'_>,
+ ) -> Result<bool> {
+ let done = {
+ let inner = self.inner.lock();
+ if inner.aborted {
+ return Ok(true);
+ }
+ inner.cleared && (!inner.dead || inner.notification_done)
+ };
+
+ let cookie = self.cookie;
+ let cmd = if done {
+ BR_CLEAR_DEATH_NOTIFICATION_DONE
+ } else {
+ let process = self.process.clone();
+ let mut process_inner = process.inner.lock();
+ let inner = self.inner.lock();
+ if inner.aborted {
+ return Ok(true);
+ }
+ // We're still holding the inner lock, so it cannot be aborted while we insert it into
+ // the delivered list.
+ process_inner.death_delivered(self.clone());
+ BR_DEAD_BINDER
+ };
+
+ writer.write_code(cmd)?;
+ writer.write_payload(&cookie)?;
+ // DEAD_BINDER notifications can cause transactions, so stop processing work items when we
+ // get to a death notification.
+ Ok(cmd != BR_DEAD_BINDER)
+ }
+
+ fn cancel(self: DArc<Self>) {}
+
+ fn should_sync_wakeup(&self) -> bool {
+ false
+ }
+
+ #[inline(never)]
+ fn debug_print(&self, m: &SeqFile, prefix: &str, _tprefix: &str) -> Result<()> {
+ let inner = self.inner.lock();
+
+ let dead_binder = inner.dead && !inner.notification_done;
+
+ if dead_binder {
+ if inner.cleared {
+ seq_print!(m, "{}has cleared dead binder\n", prefix);
+ } else {
+ seq_print!(m, "{}has dead binder\n", prefix);
+ }
+ } else {
+ seq_print!(m, "{}has cleared death notification\n", prefix);
+ }
+
+ Ok(())
+ }
+}
diff --git a/drivers/android/binder/node/wrapper.rs b/drivers/android/binder/node/wrapper.rs
new file mode 100644
index 000000000000..43294c050502
--- /dev/null
+++ b/drivers/android/binder/node/wrapper.rs
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+use kernel::{list::ListArc, prelude::*, seq_file::SeqFile, seq_print, sync::UniqueArc};
+
+use crate::{node::Node, thread::Thread, BinderReturnWriter, DArc, DLArc, DTRWrap, DeliverToRead};
+
+use core::mem::MaybeUninit;
+
+pub(crate) struct CritIncrWrapper {
+ inner: UniqueArc<MaybeUninit<DTRWrap<NodeWrapper>>>,
+}
+
+impl CritIncrWrapper {
+ pub(crate) fn new() -> Result<Self> {
+ Ok(CritIncrWrapper {
+ inner: UniqueArc::new_uninit(GFP_KERNEL)?,
+ })
+ }
+
+ pub(super) fn init(self, node: DArc<Node>) -> DLArc<dyn DeliverToRead> {
+ match self.inner.pin_init_with(DTRWrap::new(NodeWrapper { node })) {
+ Ok(initialized) => ListArc::from(initialized) as _,
+ Err(err) => match err {},
+ }
+ }
+}
+
+struct NodeWrapper {
+ node: DArc<Node>,
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<0> for NodeWrapper {
+ untracked;
+ }
+}
+
+impl DeliverToRead for NodeWrapper {
+ fn do_work(
+ self: DArc<Self>,
+ _thread: &Thread,
+ writer: &mut BinderReturnWriter<'_>,
+ ) -> Result<bool> {
+ let node = &self.node;
+ let mut owner_inner = node.owner.inner.lock();
+ let inner = node.inner.access_mut(&mut owner_inner);
+
+ let ds = &mut inner.delivery_state;
+
+ assert!(ds.has_pushed_wrapper);
+ assert!(ds.has_strong_zero2one);
+ ds.has_pushed_wrapper = false;
+ ds.has_strong_zero2one = false;
+
+ node.do_work_locked(writer, owner_inner)
+ }
+
+ fn cancel(self: DArc<Self>) {}
+
+ fn should_sync_wakeup(&self) -> bool {
+ false
+ }
+
+ #[inline(never)]
+ fn debug_print(&self, m: &SeqFile, prefix: &str, _tprefix: &str) -> Result<()> {
+ seq_print!(
+ m,
+ "{}node work {}: u{:016x} c{:016x}\n",
+ prefix,
+ self.node.debug_id,
+ self.node.ptr,
+ self.node.cookie,
+ );
+ Ok(())
+ }
+}
diff --git a/drivers/android/binder/page_range.rs b/drivers/android/binder/page_range.rs
new file mode 100644
index 000000000000..9379038f61f5
--- /dev/null
+++ b/drivers/android/binder/page_range.rs
@@ -0,0 +1,734 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+//! This module has utilities for managing a page range where unused pages may be reclaimed by a
+//! vma shrinker.
+
+// To avoid deadlocks, locks are taken in the order:
+//
+// 1. mmap lock
+// 2. spinlock
+// 3. lru spinlock
+//
+// The shrinker will use trylock methods because it locks them in a different order.
+
+use core::{
+ marker::PhantomPinned,
+ mem::{size_of, size_of_val, MaybeUninit},
+ ptr,
+};
+
+use kernel::{
+ bindings,
+ error::Result,
+ ffi::{c_ulong, c_void},
+ mm::{virt, Mm, MmWithUser},
+ new_mutex, new_spinlock,
+ page::{Page, PAGE_SHIFT, PAGE_SIZE},
+ prelude::*,
+ str::CStr,
+ sync::{aref::ARef, Mutex, SpinLock},
+ task::Pid,
+ transmute::FromBytes,
+ types::Opaque,
+ uaccess::UserSliceReader,
+};
+
+/// Represents a shrinker that can be registered with the kernel.
+///
+/// Each shrinker can be used by many `ShrinkablePageRange` objects.
+#[repr(C)]
+pub(crate) struct Shrinker {
+ inner: Opaque<*mut bindings::shrinker>,
+ list_lru: Opaque<bindings::list_lru>,
+}
+
+// SAFETY: The shrinker and list_lru are thread safe.
+unsafe impl Send for Shrinker {}
+// SAFETY: The shrinker and list_lru are thread safe.
+unsafe impl Sync for Shrinker {}
+
+impl Shrinker {
+ /// Create a new shrinker.
+ ///
+ /// # Safety
+ ///
+ /// Before using this shrinker with a `ShrinkablePageRange`, the `register` method must have
+ /// been called exactly once, and it must not have returned an error.
+ pub(crate) const unsafe fn new() -> Self {
+ Self {
+ inner: Opaque::uninit(),
+ list_lru: Opaque::uninit(),
+ }
+ }
+
+ /// Register this shrinker with the kernel.
+ pub(crate) fn register(&'static self, name: &CStr) -> Result<()> {
+ // SAFETY: These fields are not yet used, so it's okay to zero them.
+ unsafe {
+ self.inner.get().write(ptr::null_mut());
+ self.list_lru.get().write_bytes(0, 1);
+ }
+
+ // SAFETY: The field is not yet used, so we can initialize it.
+ let ret = unsafe { bindings::__list_lru_init(self.list_lru.get(), false, ptr::null_mut()) };
+ if ret != 0 {
+ return Err(Error::from_errno(ret));
+ }
+
+ // SAFETY: The `name` points at a valid c string.
+ let shrinker = unsafe { bindings::shrinker_alloc(0, name.as_char_ptr()) };
+ if shrinker.is_null() {
+ // SAFETY: We initialized it, so its okay to destroy it.
+ unsafe { bindings::list_lru_destroy(self.list_lru.get()) };
+ return Err(Error::from_errno(ret));
+ }
+
+ // SAFETY: We're about to register the shrinker, and these are the fields we need to
+ // initialize. (All other fields are already zeroed.)
+ unsafe {
+ (&raw mut (*shrinker).count_objects).write(Some(rust_shrink_count));
+ (&raw mut (*shrinker).scan_objects).write(Some(rust_shrink_scan));
+ (&raw mut (*shrinker).private_data).write(self.list_lru.get().cast());
+ }
+
+ // SAFETY: The new shrinker has been fully initialized, so we can register it.
+ unsafe { bindings::shrinker_register(shrinker) };
+
+ // SAFETY: This initializes the pointer to the shrinker so that we can use it.
+ unsafe { self.inner.get().write(shrinker) };
+
+ Ok(())
+ }
+}
+
+/// A container that manages a page range in a vma.
+///
+/// The pages can be thought of as an array of booleans of whether the pages are usable. The
+/// methods `use_range` and `stop_using_range` set all booleans in a range to true or false
+/// respectively. Initially, no pages are allocated. When a page is not used, it is not freed
+/// immediately. Instead, it is made available to the memory shrinker to free it if the device is
+/// under memory pressure.
+///
+/// It's okay for `use_range` and `stop_using_range` to race with each other, although there's no
+/// way to know whether an index ends up with true or false if a call to `use_range` races with
+/// another call to `stop_using_range` on a given index.
+///
+/// It's also okay for the two methods to race with themselves, e.g. if two threads call
+/// `use_range` on the same index, then that's fine and neither call will return until the page is
+/// allocated and mapped.
+///
+/// The methods that read or write to a range require that the page is marked as in use. So it is
+/// _not_ okay to call `stop_using_range` on a page that is in use by the methods that read or
+/// write to the page.
+#[pin_data(PinnedDrop)]
+pub(crate) struct ShrinkablePageRange {
+ /// Shrinker object registered with the kernel.
+ shrinker: &'static Shrinker,
+ /// Pid using this page range. Only used as debugging information.
+ pid: Pid,
+ /// The mm for the relevant process.
+ mm: ARef<Mm>,
+ /// Used to synchronize calls to `vm_insert_page` and `zap_page_range_single`.
+ #[pin]
+ mm_lock: Mutex<()>,
+ /// Spinlock protecting changes to pages.
+ #[pin]
+ lock: SpinLock<Inner>,
+
+ /// Must not move, since page info has pointers back.
+ #[pin]
+ _pin: PhantomPinned,
+}
+
+struct Inner {
+ /// Array of pages.
+ ///
+ /// Since this is also accessed by the shrinker, we can't use a `Box`, which asserts exclusive
+ /// ownership. To deal with that, we manage it using raw pointers.
+ pages: *mut PageInfo,
+ /// Length of the `pages` array.
+ size: usize,
+ /// The address of the vma to insert the pages into.
+ vma_addr: usize,
+}
+
+// SAFETY: proper locking is in place for `Inner`
+unsafe impl Send for Inner {}
+
+type StableMmGuard =
+ kernel::sync::lock::Guard<'static, (), kernel::sync::lock::mutex::MutexBackend>;
+
+/// An array element that describes the current state of a page.
+///
+/// There are three states:
+///
+/// * Free. The page is None. The `lru` element is not queued.
+/// * Available. The page is Some. The `lru` element is queued to the shrinker's lru.
+/// * Used. The page is Some. The `lru` element is not queued.
+///
+/// When an element is available, the shrinker is able to free the page.
+#[repr(C)]
+struct PageInfo {
+ lru: bindings::list_head,
+ page: Option<Page>,
+ range: *const ShrinkablePageRange,
+}
+
+impl PageInfo {
+ /// # Safety
+ ///
+ /// The caller ensures that writing to `me.page` is ok, and that the page is not currently set.
+ unsafe fn set_page(me: *mut PageInfo, page: Page) {
+ // SAFETY: This pointer offset is in bounds.
+ let ptr = unsafe { &raw mut (*me).page };
+
+ // SAFETY: The pointer is valid for writing, so also valid for reading.
+ if unsafe { (*ptr).is_some() } {
+ pr_err!("set_page called when there is already a page");
+ // SAFETY: We will initialize the page again below.
+ unsafe { ptr::drop_in_place(ptr) };
+ }
+
+ // SAFETY: The pointer is valid for writing.
+ unsafe { ptr::write(ptr, Some(page)) };
+ }
+
+ /// # Safety
+ ///
+ /// The caller ensures that reading from `me.page` is ok for the duration of 'a.
+ unsafe fn get_page<'a>(me: *const PageInfo) -> Option<&'a Page> {
+ // SAFETY: This pointer offset is in bounds.
+ let ptr = unsafe { &raw const (*me).page };
+
+ // SAFETY: The pointer is valid for reading.
+ unsafe { (*ptr).as_ref() }
+ }
+
+ /// # Safety
+ ///
+ /// The caller ensures that writing to `me.page` is ok for the duration of 'a.
+ unsafe fn take_page(me: *mut PageInfo) -> Option<Page> {
+ // SAFETY: This pointer offset is in bounds.
+ let ptr = unsafe { &raw mut (*me).page };
+
+ // SAFETY: The pointer is valid for reading.
+ unsafe { (*ptr).take() }
+ }
+
+ /// Add this page to the lru list, if not already in the list.
+ ///
+ /// # Safety
+ ///
+ /// The pointer must be valid, and it must be the right shrinker and nid.
+ unsafe fn list_lru_add(me: *mut PageInfo, nid: i32, shrinker: &'static Shrinker) {
+ // SAFETY: This pointer offset is in bounds.
+ let lru_ptr = unsafe { &raw mut (*me).lru };
+ // SAFETY: The lru pointer is valid, and we're not using it with any other lru list.
+ unsafe { bindings::list_lru_add(shrinker.list_lru.get(), lru_ptr, nid, ptr::null_mut()) };
+ }
+
+ /// Remove this page from the lru list, if it is in the list.
+ ///
+ /// # Safety
+ ///
+ /// The pointer must be valid, and it must be the right shrinker and nid.
+ unsafe fn list_lru_del(me: *mut PageInfo, nid: i32, shrinker: &'static Shrinker) {
+ // SAFETY: This pointer offset is in bounds.
+ let lru_ptr = unsafe { &raw mut (*me).lru };
+ // SAFETY: The lru pointer is valid, and we're not using it with any other lru list.
+ unsafe { bindings::list_lru_del(shrinker.list_lru.get(), lru_ptr, nid, ptr::null_mut()) };
+ }
+}
+
+impl ShrinkablePageRange {
+ /// Create a new `ShrinkablePageRange` using the given shrinker.
+ pub(crate) fn new(shrinker: &'static Shrinker) -> impl PinInit<Self, Error> {
+ try_pin_init!(Self {
+ shrinker,
+ pid: kernel::current!().pid(),
+ mm: ARef::from(&**kernel::current!().mm().ok_or(ESRCH)?),
+ mm_lock <- new_mutex!((), "ShrinkablePageRange::mm"),
+ lock <- new_spinlock!(Inner {
+ pages: ptr::null_mut(),
+ size: 0,
+ vma_addr: 0,
+ }, "ShrinkablePageRange"),
+ _pin: PhantomPinned,
+ })
+ }
+
+ pub(crate) fn stable_trylock_mm(&self) -> Option<StableMmGuard> {
+ // SAFETY: This extends the duration of the reference. Since this call happens before
+ // `mm_lock` is taken in the destructor of `ShrinkablePageRange`, the destructor will block
+ // until the returned guard is dropped. This ensures that the guard is valid until dropped.
+ let mm_lock = unsafe { &*ptr::from_ref(&self.mm_lock) };
+
+ mm_lock.try_lock()
+ }
+
+ /// Register a vma with this page range. Returns the size of the region.
+ pub(crate) fn register_with_vma(&self, vma: &virt::VmaNew) -> Result<usize> {
+ let num_bytes = usize::min(vma.end() - vma.start(), bindings::SZ_4M as usize);
+ let num_pages = num_bytes >> PAGE_SHIFT;
+
+ if !ptr::eq::<Mm>(&*self.mm, &**vma.mm()) {
+ pr_debug!("Failed to register with vma: invalid vma->vm_mm");
+ return Err(EINVAL);
+ }
+ if num_pages == 0 {
+ pr_debug!("Failed to register with vma: size zero");
+ return Err(EINVAL);
+ }
+
+ let mut pages = KVVec::<PageInfo>::with_capacity(num_pages, GFP_KERNEL)?;
+
+ // SAFETY: This just initializes the pages array.
+ unsafe {
+ let self_ptr = self as *const ShrinkablePageRange;
+ for i in 0..num_pages {
+ let info = pages.as_mut_ptr().add(i);
+ (&raw mut (*info).range).write(self_ptr);
+ (&raw mut (*info).page).write(None);
+ let lru = &raw mut (*info).lru;
+ (&raw mut (*lru).next).write(lru);
+ (&raw mut (*lru).prev).write(lru);
+ }
+ }
+
+ let mut inner = self.lock.lock();
+ if inner.size > 0 {
+ pr_debug!("Failed to register with vma: already registered");
+ drop(inner);
+ return Err(EBUSY);
+ }
+
+ inner.pages = pages.into_raw_parts().0;
+ inner.size = num_pages;
+ inner.vma_addr = vma.start();
+
+ Ok(num_pages)
+ }
+
+ /// Make sure that the given pages are allocated and mapped.
+ ///
+ /// Must not be called from an atomic context.
+ pub(crate) fn use_range(&self, start: usize, end: usize) -> Result<()> {
+ if start >= end {
+ return Ok(());
+ }
+ let mut inner = self.lock.lock();
+ assert!(end <= inner.size);
+
+ for i in start..end {
+ // SAFETY: This pointer offset is in bounds.
+ let page_info = unsafe { inner.pages.add(i) };
+
+ // SAFETY: The pointer is valid, and we hold the lock so reading from the page is okay.
+ if let Some(page) = unsafe { PageInfo::get_page(page_info) } {
+ // Since we're going to use the page, we should remove it from the lru list so that
+ // the shrinker will not free it.
+ //
+ // SAFETY: The pointer is valid, and this is the right shrinker.
+ //
+ // The shrinker can't free the page between the check and this call to
+ // `list_lru_del` because we hold the lock.
+ unsafe { PageInfo::list_lru_del(page_info, page.nid(), self.shrinker) };
+ } else {
+ // We have to allocate a new page. Use the slow path.
+ drop(inner);
+ // SAFETY: `i < end <= inner.size` so `i` is in bounds.
+ match unsafe { self.use_page_slow(i) } {
+ Ok(()) => {}
+ Err(err) => {
+ pr_warn!("Error in use_page_slow: {:?}", err);
+ return Err(err);
+ }
+ }
+ inner = self.lock.lock();
+ }
+ }
+ Ok(())
+ }
+
+ /// Mark the given page as in use, slow path.
+ ///
+ /// Must not be called from an atomic context.
+ ///
+ /// # Safety
+ ///
+ /// Assumes that `i` is in bounds.
+ #[cold]
+ unsafe fn use_page_slow(&self, i: usize) -> Result<()> {
+ let new_page = Page::alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO)?;
+
+ let mm_mutex = self.mm_lock.lock();
+ let inner = self.lock.lock();
+
+ // SAFETY: This pointer offset is in bounds.
+ let page_info = unsafe { inner.pages.add(i) };
+
+ // SAFETY: The pointer is valid, and we hold the lock so reading from the page is okay.
+ if let Some(page) = unsafe { PageInfo::get_page(page_info) } {
+ // The page was already there, or someone else added the page while we didn't hold the
+ // spinlock.
+ //
+ // SAFETY: The pointer is valid, and this is the right shrinker.
+ //
+ // The shrinker can't free the page between the check and this call to
+ // `list_lru_del` because we hold the lock.
+ unsafe { PageInfo::list_lru_del(page_info, page.nid(), self.shrinker) };
+ return Ok(());
+ }
+
+ let vma_addr = inner.vma_addr;
+ // Release the spinlock while we insert the page into the vma.
+ drop(inner);
+
+ // No overflow since we stay in bounds of the vma.
+ let user_page_addr = vma_addr + (i << PAGE_SHIFT);
+
+ // We use `mmput_async` when dropping the `mm` because `use_page_slow` is usually used from
+ // a remote process. If the call to `mmput` races with the process shutting down, then the
+ // caller of `use_page_slow` becomes responsible for cleaning up the `mm`, which doesn't
+ // happen until it returns to userspace. However, the caller might instead go to sleep and
+ // wait for the owner of the `mm` to wake it up, which doesn't happen because it's in the
+ // middle of a shutdown process that won't complete until the `mm` is dropped. This can
+ // amount to a deadlock.
+ //
+ // Using `mmput_async` avoids this, because then the `mm` cleanup is instead queued to a
+ // workqueue.
+ MmWithUser::into_mmput_async(self.mm.mmget_not_zero().ok_or(ESRCH)?)
+ .mmap_read_lock()
+ .vma_lookup(vma_addr)
+ .ok_or(ESRCH)?
+ .as_mixedmap_vma()
+ .ok_or(ESRCH)?
+ .vm_insert_page(user_page_addr, &new_page)
+ .inspect_err(|err| {
+ pr_warn!(
+ "Failed to vm_insert_page({}): vma_addr:{} i:{} err:{:?}",
+ user_page_addr,
+ vma_addr,
+ i,
+ err
+ )
+ })?;
+
+ let inner = self.lock.lock();
+
+ // SAFETY: The `page_info` pointer is valid and currently does not have a page. The page
+ // can be written to since we hold the lock.
+ //
+ // We released and reacquired the spinlock since we checked that the page is null, but we
+ // always hold the mm_lock mutex when setting the page to a non-null value, so it's not
+ // possible for someone else to have changed it since our check.
+ unsafe { PageInfo::set_page(page_info, new_page) };
+
+ drop(inner);
+ drop(mm_mutex);
+
+ Ok(())
+ }
+
+ /// If the given page is in use, then mark it as available so that the shrinker can free it.
+ ///
+ /// May be called from an atomic context.
+ pub(crate) fn stop_using_range(&self, start: usize, end: usize) {
+ if start >= end {
+ return;
+ }
+ let inner = self.lock.lock();
+ assert!(end <= inner.size);
+
+ for i in (start..end).rev() {
+ // SAFETY: The pointer is in bounds.
+ let page_info = unsafe { inner.pages.add(i) };
+
+ // SAFETY: Okay for reading since we have the lock.
+ if let Some(page) = unsafe { PageInfo::get_page(page_info) } {
+ // SAFETY: The pointer is valid, and it's the right shrinker.
+ unsafe { PageInfo::list_lru_add(page_info, page.nid(), self.shrinker) };
+ }
+ }
+ }
+
+ /// Helper for reading or writing to a range of bytes that may overlap with several pages.
+ ///
+ /// # Safety
+ ///
+ /// All pages touched by this operation must be in use for the duration of this call.
+ unsafe fn iterate<T>(&self, mut offset: usize, mut size: usize, mut cb: T) -> Result
+ where
+ T: FnMut(&Page, usize, usize) -> Result,
+ {
+ if size == 0 {
+ return Ok(());
+ }
+
+ let (pages, num_pages) = {
+ let inner = self.lock.lock();
+ (inner.pages, inner.size)
+ };
+ let num_bytes = num_pages << PAGE_SHIFT;
+
+ // Check that the request is within the buffer.
+ if offset.checked_add(size).ok_or(EFAULT)? > num_bytes {
+ return Err(EFAULT);
+ }
+
+ let mut page_index = offset >> PAGE_SHIFT;
+ offset &= PAGE_SIZE - 1;
+ while size > 0 {
+ let available = usize::min(size, PAGE_SIZE - offset);
+ // SAFETY: The pointer is in bounds.
+ let page_info = unsafe { pages.add(page_index) };
+ // SAFETY: The caller guarantees that this page is in the "in use" state for the
+ // duration of this call to `iterate`, so nobody will change the page.
+ let page = unsafe { PageInfo::get_page(page_info) };
+ if page.is_none() {
+ pr_warn!("Page is null!");
+ }
+ let page = page.ok_or(EFAULT)?;
+ cb(page, offset, available)?;
+ size -= available;
+ page_index += 1;
+ offset = 0;
+ }
+ Ok(())
+ }
+
+ /// Copy from userspace into this page range.
+ ///
+ /// # Safety
+ ///
+ /// All pages touched by this operation must be in use for the duration of this call.
+ pub(crate) unsafe fn copy_from_user_slice(
+ &self,
+ reader: &mut UserSliceReader,
+ offset: usize,
+ size: usize,
+ ) -> Result {
+ // SAFETY: `self.iterate` has the same safety requirements as `copy_from_user_slice`.
+ unsafe {
+ self.iterate(offset, size, |page, offset, to_copy| {
+ page.copy_from_user_slice_raw(reader, offset, to_copy)
+ })
+ }
+ }
+
+ /// Copy from this page range into kernel space.
+ ///
+ /// # Safety
+ ///
+ /// All pages touched by this operation must be in use for the duration of this call.
+ pub(crate) unsafe fn read<T: FromBytes>(&self, offset: usize) -> Result<T> {
+ let mut out = MaybeUninit::<T>::uninit();
+ let mut out_offset = 0;
+ // SAFETY: `self.iterate` has the same safety requirements as `read`.
+ unsafe {
+ self.iterate(offset, size_of::<T>(), |page, offset, to_copy| {
+ // SAFETY: The sum of `offset` and `to_copy` is bounded by the size of T.
+ let obj_ptr = (out.as_mut_ptr() as *mut u8).add(out_offset);
+ // SAFETY: The pointer points is in-bounds of the `out` variable, so it is valid.
+ page.read_raw(obj_ptr, offset, to_copy)?;
+ out_offset += to_copy;
+ Ok(())
+ })?;
+ }
+ // SAFETY: We just initialised the data.
+ Ok(unsafe { out.assume_init() })
+ }
+
+ /// Copy from kernel space into this page range.
+ ///
+ /// # Safety
+ ///
+ /// All pages touched by this operation must be in use for the duration of this call.
+ pub(crate) unsafe fn write<T: ?Sized>(&self, offset: usize, obj: &T) -> Result {
+ let mut obj_offset = 0;
+ // SAFETY: `self.iterate` has the same safety requirements as `write`.
+ unsafe {
+ self.iterate(offset, size_of_val(obj), |page, offset, to_copy| {
+ // SAFETY: The sum of `offset` and `to_copy` is bounded by the size of T.
+ let obj_ptr = (obj as *const T as *const u8).add(obj_offset);
+ // SAFETY: We have a reference to the object, so the pointer is valid.
+ page.write_raw(obj_ptr, offset, to_copy)?;
+ obj_offset += to_copy;
+ Ok(())
+ })
+ }
+ }
+
+ /// Write zeroes to the given range.
+ ///
+ /// # Safety
+ ///
+ /// All pages touched by this operation must be in use for the duration of this call.
+ pub(crate) unsafe fn fill_zero(&self, offset: usize, size: usize) -> Result {
+ // SAFETY: `self.iterate` has the same safety requirements as `copy_into`.
+ unsafe {
+ self.iterate(offset, size, |page, offset, len| {
+ page.fill_zero_raw(offset, len)
+ })
+ }
+ }
+}
+
+#[pinned_drop]
+impl PinnedDrop for ShrinkablePageRange {
+ fn drop(self: Pin<&mut Self>) {
+ let (pages, size) = {
+ let lock = self.lock.lock();
+ (lock.pages, lock.size)
+ };
+
+ if size == 0 {
+ return;
+ }
+
+ // Note: This call is also necessary for the safety of `stable_trylock_mm`.
+ let mm_lock = self.mm_lock.lock();
+
+ // This is the destructor, so unlike the other methods, we only need to worry about races
+ // with the shrinker here. Since we hold the `mm_lock`, we also can't race with the
+ // shrinker, and after this loop, the shrinker will not access any of our pages since we
+ // removed them from the lru list.
+ for i in 0..size {
+ // SAFETY: Loop is in-bounds of the size.
+ let p_ptr = unsafe { pages.add(i) };
+ // SAFETY: No other readers, so we can read.
+ if let Some(p) = unsafe { PageInfo::get_page(p_ptr) } {
+ // SAFETY: The pointer is valid and it's the right shrinker.
+ unsafe { PageInfo::list_lru_del(p_ptr, p.nid(), self.shrinker) };
+ }
+ }
+
+ drop(mm_lock);
+
+ // SAFETY: `pages` was allocated as an `KVVec<PageInfo>` with capacity `size`. Furthermore,
+ // all `size` elements are initialized. Also, the array is no longer shared with the
+ // shrinker due to the above loop.
+ drop(unsafe { KVVec::from_raw_parts(pages, size, size) });
+ }
+}
+
+/// # Safety
+/// Called by the shrinker.
+#[no_mangle]
+unsafe extern "C" fn rust_shrink_count(
+ shrink: *mut bindings::shrinker,
+ _sc: *mut bindings::shrink_control,
+) -> c_ulong {
+ // SAFETY: We can access our own private data.
+ let list_lru = unsafe { (*shrink).private_data.cast::<bindings::list_lru>() };
+ // SAFETY: Accessing the lru list is okay. Just an FFI call.
+ unsafe { bindings::list_lru_count(list_lru) }
+}
+
+/// # Safety
+/// Called by the shrinker.
+#[no_mangle]
+unsafe extern "C" fn rust_shrink_scan(
+ shrink: *mut bindings::shrinker,
+ sc: *mut bindings::shrink_control,
+) -> c_ulong {
+ // SAFETY: We can access our own private data.
+ let list_lru = unsafe { (*shrink).private_data.cast::<bindings::list_lru>() };
+ // SAFETY: Caller guarantees that it is safe to read this field.
+ let nr_to_scan = unsafe { (*sc).nr_to_scan };
+ // SAFETY: Accessing the lru list is okay. Just an FFI call.
+ unsafe {
+ bindings::list_lru_walk(
+ list_lru,
+ Some(bindings::rust_shrink_free_page_wrap),
+ ptr::null_mut(),
+ nr_to_scan,
+ )
+ }
+}
+
+const LRU_SKIP: bindings::lru_status = bindings::lru_status_LRU_SKIP;
+const LRU_REMOVED_ENTRY: bindings::lru_status = bindings::lru_status_LRU_REMOVED_RETRY;
+
+/// # Safety
+/// Called by the shrinker.
+#[no_mangle]
+unsafe extern "C" fn rust_shrink_free_page(
+ item: *mut bindings::list_head,
+ lru: *mut bindings::list_lru_one,
+ _cb_arg: *mut c_void,
+) -> bindings::lru_status {
+ // Fields that should survive after unlocking the lru lock.
+ let page;
+ let page_index;
+ let mm;
+ let mmap_read;
+ let mm_mutex;
+ let vma_addr;
+
+ {
+ // CAST: The `list_head` field is first in `PageInfo`.
+ let info = item as *mut PageInfo;
+ // SAFETY: The `range` field of `PageInfo` is immutable.
+ let range = unsafe { &*((*info).range) };
+
+ mm = match range.mm.mmget_not_zero() {
+ Some(mm) => MmWithUser::into_mmput_async(mm),
+ None => return LRU_SKIP,
+ };
+
+ mm_mutex = match range.stable_trylock_mm() {
+ Some(guard) => guard,
+ None => return LRU_SKIP,
+ };
+
+ mmap_read = match mm.mmap_read_trylock() {
+ Some(guard) => guard,
+ None => return LRU_SKIP,
+ };
+
+ // We can't lock it normally here, since we hold the lru lock.
+ let inner = match range.lock.try_lock() {
+ Some(inner) => inner,
+ None => return LRU_SKIP,
+ };
+
+ // SAFETY: The item is in this lru list, so it's okay to remove it.
+ unsafe { bindings::list_lru_isolate(lru, item) };
+
+ // SAFETY: Both pointers are in bounds of the same allocation.
+ page_index = unsafe { info.offset_from(inner.pages) } as usize;
+
+ // SAFETY: We hold the spinlock, so we can take the page.
+ //
+ // This sets the page pointer to zero before we unmap it from the vma. However, we call
+ // `zap_page_range` before we release the mmap lock, so `use_page_slow` will not be able to
+ // insert a new page until after our call to `zap_page_range`.
+ page = unsafe { PageInfo::take_page(info) };
+ vma_addr = inner.vma_addr;
+
+ // From this point on, we don't access this PageInfo or ShrinkablePageRange again, because
+ // they can be freed at any point after we unlock `lru_lock`. This is with the exception of
+ // `mm_mutex` which is kept alive by holding the lock.
+ }
+
+ // SAFETY: The lru lock is locked when this method is called.
+ unsafe { bindings::spin_unlock(&raw mut (*lru).lock) };
+
+ if let Some(vma) = mmap_read.vma_lookup(vma_addr) {
+ let user_page_addr = vma_addr + (page_index << PAGE_SHIFT);
+ vma.zap_page_range_single(user_page_addr, PAGE_SIZE);
+ }
+
+ drop(mmap_read);
+ drop(mm_mutex);
+ drop(mm);
+ drop(page);
+
+ // SAFETY: We just unlocked the lru lock, but it should be locked when we return.
+ unsafe { bindings::spin_lock(&raw mut (*lru).lock) };
+
+ LRU_REMOVED_ENTRY
+}
diff --git a/drivers/android/binder/page_range_helper.c b/drivers/android/binder/page_range_helper.c
new file mode 100644
index 000000000000..496887723ee0
--- /dev/null
+++ b/drivers/android/binder/page_range_helper.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* C helper for page_range.rs to work around a CFI violation.
+ *
+ * Bindgen currently pretends that `enum lru_status` is the same as an integer.
+ * This assumption is fine ABI-wise, but once you add CFI to the mix, it
+ * triggers a CFI violation because `enum lru_status` gets a different CFI tag.
+ *
+ * This file contains a workaround until bindgen can be fixed.
+ *
+ * Copyright (C) 2025 Google LLC.
+ */
+#include "page_range_helper.h"
+
+unsigned int rust_shrink_free_page(struct list_head *item,
+ struct list_lru_one *list,
+ void *cb_arg);
+
+enum lru_status
+rust_shrink_free_page_wrap(struct list_head *item, struct list_lru_one *list,
+ void *cb_arg)
+{
+ return rust_shrink_free_page(item, list, cb_arg);
+}
diff --git a/drivers/android/binder/page_range_helper.h b/drivers/android/binder/page_range_helper.h
new file mode 100644
index 000000000000..18dd2dd117b2
--- /dev/null
+++ b/drivers/android/binder/page_range_helper.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2025 Google, Inc.
+ */
+
+#ifndef _LINUX_PAGE_RANGE_HELPER_H
+#define _LINUX_PAGE_RANGE_HELPER_H
+
+#include <linux/list_lru.h>
+
+enum lru_status
+rust_shrink_free_page_wrap(struct list_head *item, struct list_lru_one *list,
+ void *cb_arg);
+
+#endif /* _LINUX_PAGE_RANGE_HELPER_H */
diff --git a/drivers/android/binder/process.rs b/drivers/android/binder/process.rs
new file mode 100644
index 000000000000..132055b4790f
--- /dev/null
+++ b/drivers/android/binder/process.rs
@@ -0,0 +1,1745 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+//! This module defines the `Process` type, which represents a process using a particular binder
+//! context.
+//!
+//! The `Process` object keeps track of all of the resources that this process owns in the binder
+//! context.
+//!
+//! There is one `Process` object for each binder fd that a process has opened, so processes using
+//! several binder contexts have several `Process` objects. This ensures that the contexts are
+//! fully separated.
+
+use core::mem::take;
+
+use kernel::{
+ bindings,
+ cred::Credential,
+ error::Error,
+ fs::file::{self, File},
+ id_pool::IdPool,
+ list::{List, ListArc, ListArcField, ListLinks},
+ mm,
+ prelude::*,
+ rbtree::{self, RBTree, RBTreeNode, RBTreeNodeReservation},
+ seq_file::SeqFile,
+ seq_print,
+ sync::poll::PollTable,
+ sync::{
+ lock::{spinlock::SpinLockBackend, Guard},
+ Arc, ArcBorrow, CondVar, CondVarTimeoutResult, Mutex, SpinLock, UniqueArc,
+ },
+ task::Task,
+ types::ARef,
+ uaccess::{UserSlice, UserSliceReader},
+ uapi,
+ workqueue::{self, Work},
+};
+
+use crate::{
+ allocation::{Allocation, AllocationInfo, NewAllocation},
+ context::Context,
+ defs::*,
+ error::{BinderError, BinderResult},
+ node::{CouldNotDeliverCriticalIncrement, CritIncrWrapper, Node, NodeDeath, NodeRef},
+ page_range::ShrinkablePageRange,
+ range_alloc::{RangeAllocator, ReserveNew, ReserveNewArgs},
+ stats::BinderStats,
+ thread::{PushWorkRes, Thread},
+ BinderfsProcFile, DArc, DLArc, DTRWrap, DeliverToRead,
+};
+
+#[path = "freeze.rs"]
+mod freeze;
+use self::freeze::{FreezeCookie, FreezeListener};
+
+struct Mapping {
+ address: usize,
+ alloc: RangeAllocator<AllocationInfo>,
+}
+
+impl Mapping {
+ fn new(address: usize, size: usize) -> Self {
+ Self {
+ address,
+ alloc: RangeAllocator::new(size),
+ }
+ }
+}
+
+// bitflags for defer_work.
+const PROC_DEFER_FLUSH: u8 = 1;
+const PROC_DEFER_RELEASE: u8 = 2;
+
+#[derive(Copy, Clone)]
+pub(crate) enum IsFrozen {
+ Yes,
+ No,
+ InProgress,
+}
+
+impl IsFrozen {
+ /// Whether incoming transactions should be rejected due to freeze.
+ pub(crate) fn is_frozen(self) -> bool {
+ match self {
+ IsFrozen::Yes => true,
+ IsFrozen::No => false,
+ IsFrozen::InProgress => true,
+ }
+ }
+
+ /// Whether freeze notifications consider this process frozen.
+ pub(crate) fn is_fully_frozen(self) -> bool {
+ match self {
+ IsFrozen::Yes => true,
+ IsFrozen::No => false,
+ IsFrozen::InProgress => false,
+ }
+ }
+}
+
+/// The fields of `Process` protected by the spinlock.
+pub(crate) struct ProcessInner {
+ is_manager: bool,
+ pub(crate) is_dead: bool,
+ threads: RBTree<i32, Arc<Thread>>,
+ /// INVARIANT: Threads pushed to this list must be owned by this process.
+ ready_threads: List<Thread>,
+ nodes: RBTree<u64, DArc<Node>>,
+ mapping: Option<Mapping>,
+ work: List<DTRWrap<dyn DeliverToRead>>,
+ delivered_deaths: List<DTRWrap<NodeDeath>, 2>,
+
+ /// The number of requested threads that haven't registered yet.
+ requested_thread_count: u32,
+ /// The maximum number of threads used by the process thread pool.
+ max_threads: u32,
+ /// The number of threads the started and registered with the thread pool.
+ started_thread_count: u32,
+
+ /// Bitmap of deferred work to do.
+ defer_work: u8,
+
+ /// Number of transactions to be transmitted before processes in freeze_wait
+ /// are woken up.
+ outstanding_txns: u32,
+ /// Process is frozen and unable to service binder transactions.
+ pub(crate) is_frozen: IsFrozen,
+ /// Process received sync transactions since last frozen.
+ pub(crate) sync_recv: bool,
+ /// Process received async transactions since last frozen.
+ pub(crate) async_recv: bool,
+ pub(crate) binderfs_file: Option<BinderfsProcFile>,
+ /// Check for oneway spam
+ oneway_spam_detection_enabled: bool,
+}
+
+impl ProcessInner {
+ fn new() -> Self {
+ Self {
+ is_manager: false,
+ is_dead: false,
+ threads: RBTree::new(),
+ ready_threads: List::new(),
+ mapping: None,
+ nodes: RBTree::new(),
+ work: List::new(),
+ delivered_deaths: List::new(),
+ requested_thread_count: 0,
+ max_threads: 0,
+ started_thread_count: 0,
+ defer_work: 0,
+ outstanding_txns: 0,
+ is_frozen: IsFrozen::No,
+ sync_recv: false,
+ async_recv: false,
+ binderfs_file: None,
+ oneway_spam_detection_enabled: false,
+ }
+ }
+
+ /// Schedule the work item for execution on this process.
+ ///
+ /// If any threads are ready for work, then the work item is given directly to that thread and
+ /// it is woken up. Otherwise, it is pushed to the process work list.
+ ///
+ /// This call can fail only if the process is dead. In this case, the work item is returned to
+ /// the caller so that the caller can drop it after releasing the inner process lock. This is
+ /// necessary since the destructor of `Transaction` will take locks that can't necessarily be
+ /// taken while holding the inner process lock.
+ pub(crate) fn push_work(
+ &mut self,
+ work: DLArc<dyn DeliverToRead>,
+ ) -> Result<(), (BinderError, DLArc<dyn DeliverToRead>)> {
+ // Try to find a ready thread to which to push the work.
+ if let Some(thread) = self.ready_threads.pop_front() {
+ // Push to thread while holding state lock. This prevents the thread from giving up
+ // (for example, because of a signal) when we're about to deliver work.
+ match thread.push_work(work) {
+ PushWorkRes::Ok => Ok(()),
+ PushWorkRes::FailedDead(work) => Err((BinderError::new_dead(), work)),
+ }
+ } else if self.is_dead {
+ Err((BinderError::new_dead(), work))
+ } else {
+ let sync = work.should_sync_wakeup();
+
+ // Didn't find a thread waiting for proc work; this can happen
+ // in two scenarios:
+ // 1. All threads are busy handling transactions
+ // In that case, one of those threads should call back into
+ // the kernel driver soon and pick up this work.
+ // 2. Threads are using the (e)poll interface, in which case
+ // they may be blocked on the waitqueue without having been
+ // added to waiting_threads. For this case, we just iterate
+ // over all threads not handling transaction work, and
+ // wake them all up. We wake all because we don't know whether
+ // a thread that called into (e)poll is handling non-binder
+ // work currently.
+ self.work.push_back(work);
+
+ // Wake up polling threads, if any.
+ for thread in self.threads.values() {
+ thread.notify_if_poll_ready(sync);
+ }
+
+ Ok(())
+ }
+ }
+
+ pub(crate) fn remove_node(&mut self, ptr: u64) {
+ self.nodes.remove(&ptr);
+ }
+
+ /// Updates the reference count on the given node.
+ pub(crate) fn update_node_refcount(
+ &mut self,
+ node: &DArc<Node>,
+ inc: bool,
+ strong: bool,
+ count: usize,
+ othread: Option<&Thread>,
+ ) {
+ let push = node.update_refcount_locked(inc, strong, count, self);
+
+ // If we decided that we need to push work, push either to the process or to a thread if
+ // one is specified.
+ if let Some(node) = push {
+ if let Some(thread) = othread {
+ thread.push_work_deferred(node);
+ } else {
+ let _ = self.push_work(node);
+ // Nothing to do: `push_work` may fail if the process is dead, but that's ok as in
+ // that case, it doesn't care about the notification.
+ }
+ }
+ }
+
+ pub(crate) fn new_node_ref(
+ &mut self,
+ node: DArc<Node>,
+ strong: bool,
+ thread: Option<&Thread>,
+ ) -> NodeRef {
+ self.update_node_refcount(&node, true, strong, 1, thread);
+ let strong_count = if strong { 1 } else { 0 };
+ NodeRef::new(node, strong_count, 1 - strong_count)
+ }
+
+ pub(crate) fn new_node_ref_with_thread(
+ &mut self,
+ node: DArc<Node>,
+ strong: bool,
+ thread: &Thread,
+ wrapper: Option<CritIncrWrapper>,
+ ) -> Result<NodeRef, CouldNotDeliverCriticalIncrement> {
+ let push = match wrapper {
+ None => node
+ .incr_refcount_allow_zero2one(strong, self)?
+ .map(|node| node as _),
+ Some(wrapper) => node.incr_refcount_allow_zero2one_with_wrapper(strong, wrapper, self),
+ };
+ if let Some(node) = push {
+ thread.push_work_deferred(node);
+ }
+ let strong_count = if strong { 1 } else { 0 };
+ Ok(NodeRef::new(node, strong_count, 1 - strong_count))
+ }
+
+ /// Returns an existing node with the given pointer and cookie, if one exists.
+ ///
+ /// Returns an error if a node with the given pointer but a different cookie exists.
+ fn get_existing_node(&self, ptr: u64, cookie: u64) -> Result<Option<DArc<Node>>> {
+ match self.nodes.get(&ptr) {
+ None => Ok(None),
+ Some(node) => {
+ let (_, node_cookie) = node.get_id();
+ if node_cookie == cookie {
+ Ok(Some(node.clone()))
+ } else {
+ Err(EINVAL)
+ }
+ }
+ }
+ }
+
+ fn register_thread(&mut self) -> bool {
+ if self.requested_thread_count == 0 {
+ return false;
+ }
+
+ self.requested_thread_count -= 1;
+ self.started_thread_count += 1;
+ true
+ }
+
+ /// Finds a delivered death notification with the given cookie, removes it from the thread's
+ /// delivered list, and returns it.
+ fn pull_delivered_death(&mut self, cookie: u64) -> Option<DArc<NodeDeath>> {
+ let mut cursor = self.delivered_deaths.cursor_front();
+ while let Some(next) = cursor.peek_next() {
+ if next.cookie == cookie {
+ return Some(next.remove().into_arc());
+ }
+ cursor.move_next();
+ }
+ None
+ }
+
+ pub(crate) fn death_delivered(&mut self, death: DArc<NodeDeath>) {
+ if let Some(death) = ListArc::try_from_arc_or_drop(death) {
+ self.delivered_deaths.push_back(death);
+ } else {
+ pr_warn!("Notification added to `delivered_deaths` twice.");
+ }
+ }
+
+ pub(crate) fn add_outstanding_txn(&mut self) {
+ self.outstanding_txns += 1;
+ }
+
+ fn txns_pending_locked(&self) -> bool {
+ if self.outstanding_txns > 0 {
+ return true;
+ }
+ for thread in self.threads.values() {
+ if thread.has_current_transaction() {
+ return true;
+ }
+ }
+ false
+ }
+}
+
+/// Used to keep track of a node that this process has a handle to.
+#[pin_data]
+pub(crate) struct NodeRefInfo {
+ debug_id: usize,
+ /// The refcount that this process owns to the node.
+ node_ref: ListArcField<NodeRef, { Self::LIST_PROC }>,
+ death: ListArcField<Option<DArc<NodeDeath>>, { Self::LIST_PROC }>,
+ /// Cookie of the active freeze listener for this node.
+ freeze: ListArcField<Option<FreezeCookie>, { Self::LIST_PROC }>,
+ /// Used to store this `NodeRefInfo` in the node's `refs` list.
+ #[pin]
+ links: ListLinks<{ Self::LIST_NODE }>,
+ /// The handle for this `NodeRefInfo`.
+ handle: u32,
+ /// The process that has a handle to the node.
+ pub(crate) process: Arc<Process>,
+}
+
+impl NodeRefInfo {
+ /// The id used for the `Node::refs` list.
+ pub(crate) const LIST_NODE: u64 = 0x2da16350fb724a10;
+ /// The id used for the `ListArc` in `ProcessNodeRefs`.
+ const LIST_PROC: u64 = 0xd703a5263dcc8650;
+
+ fn new(node_ref: NodeRef, handle: u32, process: Arc<Process>) -> impl PinInit<Self> {
+ pin_init!(Self {
+ debug_id: super::next_debug_id(),
+ node_ref: ListArcField::new(node_ref),
+ death: ListArcField::new(None),
+ freeze: ListArcField::new(None),
+ links <- ListLinks::new(),
+ handle,
+ process,
+ })
+ }
+
+ kernel::list::define_list_arc_field_getter! {
+ pub(crate) fn death(&mut self<{Self::LIST_PROC}>) -> &mut Option<DArc<NodeDeath>> { death }
+ pub(crate) fn freeze(&mut self<{Self::LIST_PROC}>) -> &mut Option<FreezeCookie> { freeze }
+ pub(crate) fn node_ref(&mut self<{Self::LIST_PROC}>) -> &mut NodeRef { node_ref }
+ pub(crate) fn node_ref2(&self<{Self::LIST_PROC}>) -> &NodeRef { node_ref }
+ }
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<{Self::LIST_NODE}> for NodeRefInfo { untracked; }
+ impl ListArcSafe<{Self::LIST_PROC}> for NodeRefInfo { untracked; }
+}
+kernel::list::impl_list_item! {
+ impl ListItem<{Self::LIST_NODE}> for NodeRefInfo {
+ using ListLinks { self.links };
+ }
+}
+
+/// Keeps track of references this process has to nodes owned by other processes.
+///
+/// TODO: Currently, the rbtree requires two allocations per node reference, and two tree
+/// traversals to look up a node by `Node::global_id`. Once the rbtree is more powerful, these
+/// extra costs should be eliminated.
+struct ProcessNodeRefs {
+ /// Used to look up nodes using the 32-bit id that this process knows it by.
+ by_handle: RBTree<u32, ListArc<NodeRefInfo, { NodeRefInfo::LIST_PROC }>>,
+ /// Used to quickly find unused ids in `by_handle`.
+ handle_is_present: IdPool,
+ /// Used to look up nodes without knowing their local 32-bit id. The usize is the address of
+ /// the underlying `Node` struct as returned by `Node::global_id`.
+ by_node: RBTree<usize, u32>,
+ /// Used to look up a `FreezeListener` by cookie.
+ ///
+ /// There might be multiple freeze listeners for the same node, but at most one of them is
+ /// active.
+ freeze_listeners: RBTree<FreezeCookie, FreezeListener>,
+}
+
+impl ProcessNodeRefs {
+ fn new() -> Self {
+ Self {
+ by_handle: RBTree::new(),
+ handle_is_present: IdPool::new(),
+ by_node: RBTree::new(),
+ freeze_listeners: RBTree::new(),
+ }
+ }
+}
+
+/// A process using binder.
+///
+/// Strictly speaking, there can be multiple of these per process. There is one for each binder fd
+/// that a process has opened, so processes using several binder contexts have several `Process`
+/// objects. This ensures that the contexts are fully separated.
+#[pin_data]
+pub(crate) struct Process {
+ pub(crate) ctx: Arc<Context>,
+
+ // The task leader (process).
+ pub(crate) task: ARef<Task>,
+
+ // Credential associated with file when `Process` is created.
+ pub(crate) cred: ARef<Credential>,
+
+ #[pin]
+ pub(crate) inner: SpinLock<ProcessInner>,
+
+ #[pin]
+ pub(crate) pages: ShrinkablePageRange,
+
+ // Waitqueue of processes waiting for all outstanding transactions to be
+ // processed.
+ #[pin]
+ freeze_wait: CondVar,
+
+ // Node references are in a different lock to avoid recursive acquisition when
+ // incrementing/decrementing a node in another process.
+ #[pin]
+ node_refs: Mutex<ProcessNodeRefs>,
+
+ // Work node for deferred work item.
+ #[pin]
+ defer_work: Work<Process>,
+
+ // Links for process list in Context.
+ #[pin]
+ links: ListLinks,
+
+ pub(crate) stats: BinderStats,
+}
+
+kernel::impl_has_work! {
+ impl HasWork<Process> for Process { self.defer_work }
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<0> for Process { untracked; }
+}
+kernel::list::impl_list_item! {
+ impl ListItem<0> for Process {
+ using ListLinks { self.links };
+ }
+}
+
+impl workqueue::WorkItem for Process {
+ type Pointer = Arc<Process>;
+
+ fn run(me: Arc<Self>) {
+ let defer;
+ {
+ let mut inner = me.inner.lock();
+ defer = inner.defer_work;
+ inner.defer_work = 0;
+ }
+
+ if defer & PROC_DEFER_FLUSH != 0 {
+ me.deferred_flush();
+ }
+ if defer & PROC_DEFER_RELEASE != 0 {
+ me.deferred_release();
+ }
+ }
+}
+
+impl Process {
+ fn new(ctx: Arc<Context>, cred: ARef<Credential>) -> Result<Arc<Self>> {
+ let current = kernel::current!();
+ let list_process = ListArc::pin_init::<Error>(
+ try_pin_init!(Process {
+ ctx,
+ cred,
+ inner <- kernel::new_spinlock!(ProcessInner::new(), "Process::inner"),
+ pages <- ShrinkablePageRange::new(&super::BINDER_SHRINKER),
+ node_refs <- kernel::new_mutex!(ProcessNodeRefs::new(), "Process::node_refs"),
+ freeze_wait <- kernel::new_condvar!("Process::freeze_wait"),
+ task: current.group_leader().into(),
+ defer_work <- kernel::new_work!("Process::defer_work"),
+ links <- ListLinks::new(),
+ stats: BinderStats::new(),
+ }),
+ GFP_KERNEL,
+ )?;
+
+ let process = list_process.clone_arc();
+ process.ctx.register_process(list_process);
+
+ Ok(process)
+ }
+
+ pub(crate) fn pid_in_current_ns(&self) -> kernel::task::Pid {
+ self.task.tgid_nr_ns(None)
+ }
+
+ #[inline(never)]
+ pub(crate) fn debug_print_stats(&self, m: &SeqFile, ctx: &Context) -> Result<()> {
+ seq_print!(m, "proc {}\n", self.pid_in_current_ns());
+ seq_print!(m, "context {}\n", &*ctx.name);
+
+ let inner = self.inner.lock();
+ seq_print!(m, " threads: {}\n", inner.threads.iter().count());
+ seq_print!(
+ m,
+ " requested threads: {}+{}/{}\n",
+ inner.requested_thread_count,
+ inner.started_thread_count,
+ inner.max_threads,
+ );
+ if let Some(mapping) = &inner.mapping {
+ seq_print!(
+ m,
+ " free oneway space: {}\n",
+ mapping.alloc.free_oneway_space()
+ );
+ seq_print!(m, " buffers: {}\n", mapping.alloc.count_buffers());
+ }
+ seq_print!(
+ m,
+ " outstanding transactions: {}\n",
+ inner.outstanding_txns
+ );
+ seq_print!(m, " nodes: {}\n", inner.nodes.iter().count());
+ drop(inner);
+
+ {
+ let mut refs = self.node_refs.lock();
+ let (mut count, mut weak, mut strong) = (0, 0, 0);
+ for r in refs.by_handle.values_mut() {
+ let node_ref = r.node_ref();
+ let (nstrong, nweak) = node_ref.get_count();
+ count += 1;
+ weak += nweak;
+ strong += nstrong;
+ }
+ seq_print!(m, " refs: {count} s {strong} w {weak}\n");
+ }
+
+ self.stats.debug_print(" ", m);
+
+ Ok(())
+ }
+
+ #[inline(never)]
+ pub(crate) fn debug_print(&self, m: &SeqFile, ctx: &Context, print_all: bool) -> Result<()> {
+ seq_print!(m, "proc {}\n", self.pid_in_current_ns());
+ seq_print!(m, "context {}\n", &*ctx.name);
+
+ let mut all_threads = KVec::new();
+ let mut all_nodes = KVec::new();
+ loop {
+ let inner = self.inner.lock();
+ let num_threads = inner.threads.iter().count();
+ let num_nodes = inner.nodes.iter().count();
+
+ if all_threads.capacity() < num_threads || all_nodes.capacity() < num_nodes {
+ drop(inner);
+ all_threads.reserve(num_threads, GFP_KERNEL)?;
+ all_nodes.reserve(num_nodes, GFP_KERNEL)?;
+ continue;
+ }
+
+ for thread in inner.threads.values() {
+ assert!(all_threads.len() < all_threads.capacity());
+ let _ = all_threads.push(thread.clone(), GFP_ATOMIC);
+ }
+
+ for node in inner.nodes.values() {
+ assert!(all_nodes.len() < all_nodes.capacity());
+ let _ = all_nodes.push(node.clone(), GFP_ATOMIC);
+ }
+
+ break;
+ }
+
+ for thread in all_threads {
+ thread.debug_print(m, print_all)?;
+ }
+
+ let mut inner = self.inner.lock();
+ for node in all_nodes {
+ if print_all || node.has_oneway_transaction(&mut inner) {
+ node.full_debug_print(m, &mut inner)?;
+ }
+ }
+ drop(inner);
+
+ if print_all {
+ let mut refs = self.node_refs.lock();
+ for r in refs.by_handle.values_mut() {
+ let node_ref = r.node_ref();
+ let dead = node_ref.node.owner.inner.lock().is_dead;
+ let (strong, weak) = node_ref.get_count();
+ let debug_id = node_ref.node.debug_id;
+
+ seq_print!(
+ m,
+ " ref {}: desc {} {}node {debug_id} s {strong} w {weak}",
+ r.debug_id,
+ r.handle,
+ if dead { "dead " } else { "" }
+ );
+ }
+ }
+
+ let inner = self.inner.lock();
+ for work in &inner.work {
+ work.debug_print(m, " ", " pending transaction ")?;
+ }
+ for _death in &inner.delivered_deaths {
+ seq_print!(m, " has delivered dead binder\n");
+ }
+ if let Some(mapping) = &inner.mapping {
+ mapping.alloc.debug_print(m)?;
+ }
+ drop(inner);
+
+ Ok(())
+ }
+
+ /// Attempts to fetch a work item from the process queue.
+ pub(crate) fn get_work(&self) -> Option<DLArc<dyn DeliverToRead>> {
+ self.inner.lock().work.pop_front()
+ }
+
+ /// Attempts to fetch a work item from the process queue. If none is available, it registers the
+ /// given thread as ready to receive work directly.
+ ///
+ /// This must only be called when the thread is not participating in a transaction chain; when
+ /// it is, work will always be delivered directly to the thread (and not through the process
+ /// queue).
+ pub(crate) fn get_work_or_register<'a>(
+ &'a self,
+ thread: &'a Arc<Thread>,
+ ) -> GetWorkOrRegister<'a> {
+ let mut inner = self.inner.lock();
+ // Try to get work from the process queue.
+ if let Some(work) = inner.work.pop_front() {
+ return GetWorkOrRegister::Work(work);
+ }
+
+ // Register the thread as ready.
+ GetWorkOrRegister::Register(Registration::new(thread, &mut inner))
+ }
+
+ fn get_current_thread(self: ArcBorrow<'_, Self>) -> Result<Arc<Thread>> {
+ let id = {
+ let current = kernel::current!();
+ if !core::ptr::eq(current.group_leader(), &*self.task) {
+ pr_err!("get_current_thread was called from the wrong process.");
+ return Err(EINVAL);
+ }
+ current.pid()
+ };
+
+ {
+ let inner = self.inner.lock();
+ if let Some(thread) = inner.threads.get(&id) {
+ return Ok(thread.clone());
+ }
+ }
+
+ // Allocate a new `Thread` without holding any locks.
+ let reservation = RBTreeNodeReservation::new(GFP_KERNEL)?;
+ let ta: Arc<Thread> = Thread::new(id, self.into())?;
+
+ let mut inner = self.inner.lock();
+ match inner.threads.entry(id) {
+ rbtree::Entry::Vacant(entry) => {
+ entry.insert(ta.clone(), reservation);
+ Ok(ta)
+ }
+ rbtree::Entry::Occupied(_entry) => {
+ pr_err!("Cannot create two threads with the same id.");
+ Err(EINVAL)
+ }
+ }
+ }
+
+ pub(crate) fn push_work(&self, work: DLArc<dyn DeliverToRead>) -> BinderResult {
+ // If push_work fails, drop the work item outside the lock.
+ let res = self.inner.lock().push_work(work);
+ match res {
+ Ok(()) => Ok(()),
+ Err((err, work)) => {
+ drop(work);
+ Err(err)
+ }
+ }
+ }
+
+ fn set_as_manager(
+ self: ArcBorrow<'_, Self>,
+ info: Option<FlatBinderObject>,
+ thread: &Thread,
+ ) -> Result {
+ let (ptr, cookie, flags) = if let Some(obj) = info {
+ (
+ // SAFETY: The object type for this ioctl is implicitly `BINDER_TYPE_BINDER`, so it
+ // is safe to access the `binder` field.
+ unsafe { obj.__bindgen_anon_1.binder },
+ obj.cookie,
+ obj.flags,
+ )
+ } else {
+ (0, 0, 0)
+ };
+ let node_ref = self.get_node(ptr, cookie, flags as _, true, thread)?;
+ let node = node_ref.node.clone();
+ self.ctx.set_manager_node(node_ref)?;
+ self.inner.lock().is_manager = true;
+
+ // Force the state of the node to prevent the delivery of acquire/increfs.
+ let mut owner_inner = node.owner.inner.lock();
+ node.force_has_count(&mut owner_inner);
+ Ok(())
+ }
+
+ fn get_node_inner(
+ self: ArcBorrow<'_, Self>,
+ ptr: u64,
+ cookie: u64,
+ flags: u32,
+ strong: bool,
+ thread: &Thread,
+ wrapper: Option<CritIncrWrapper>,
+ ) -> Result<Result<NodeRef, CouldNotDeliverCriticalIncrement>> {
+ // Try to find an existing node.
+ {
+ let mut inner = self.inner.lock();
+ if let Some(node) = inner.get_existing_node(ptr, cookie)? {
+ return Ok(inner.new_node_ref_with_thread(node, strong, thread, wrapper));
+ }
+ }
+
+ // Allocate the node before reacquiring the lock.
+ let node = DTRWrap::arc_pin_init(Node::new(ptr, cookie, flags, self.into()))?.into_arc();
+ let rbnode = RBTreeNode::new(ptr, node.clone(), GFP_KERNEL)?;
+ let mut inner = self.inner.lock();
+ if let Some(node) = inner.get_existing_node(ptr, cookie)? {
+ return Ok(inner.new_node_ref_with_thread(node, strong, thread, wrapper));
+ }
+
+ inner.nodes.insert(rbnode);
+ // This can only fail if someone has already pushed the node to a list, but we just created
+ // it and still hold the lock, so it can't fail right now.
+ let node_ref = inner
+ .new_node_ref_with_thread(node, strong, thread, wrapper)
+ .unwrap();
+
+ Ok(Ok(node_ref))
+ }
+
+ pub(crate) fn get_node(
+ self: ArcBorrow<'_, Self>,
+ ptr: u64,
+ cookie: u64,
+ flags: u32,
+ strong: bool,
+ thread: &Thread,
+ ) -> Result<NodeRef> {
+ let mut wrapper = None;
+ for _ in 0..2 {
+ match self.get_node_inner(ptr, cookie, flags, strong, thread, wrapper) {
+ Err(err) => return Err(err),
+ Ok(Ok(node_ref)) => return Ok(node_ref),
+ Ok(Err(CouldNotDeliverCriticalIncrement)) => {
+ wrapper = Some(CritIncrWrapper::new()?);
+ }
+ }
+ }
+ // We only get a `CouldNotDeliverCriticalIncrement` error if `wrapper` is `None`, so the
+ // loop should run at most twice.
+ unreachable!()
+ }
+
+ pub(crate) fn insert_or_update_handle(
+ self: ArcBorrow<'_, Process>,
+ node_ref: NodeRef,
+ is_manager: bool,
+ ) -> Result<u32> {
+ {
+ let mut refs = self.node_refs.lock();
+
+ // Do a lookup before inserting.
+ if let Some(handle_ref) = refs.by_node.get(&node_ref.node.global_id()) {
+ let handle = *handle_ref;
+ let info = refs.by_handle.get_mut(&handle).unwrap();
+ info.node_ref().absorb(node_ref);
+ return Ok(handle);
+ }
+ }
+
+ // Reserve memory for tree nodes.
+ let reserve1 = RBTreeNodeReservation::new(GFP_KERNEL)?;
+ let reserve2 = RBTreeNodeReservation::new(GFP_KERNEL)?;
+ let info = UniqueArc::new_uninit(GFP_KERNEL)?;
+
+ let mut refs_lock = self.node_refs.lock();
+ let mut refs = &mut *refs_lock;
+
+ let (unused_id, by_handle_slot) = loop {
+ // ID 0 may only be used by the manager.
+ let start = if is_manager { 0 } else { 1 };
+
+ if let Some(res) = refs.handle_is_present.find_unused_id(start) {
+ match refs.by_handle.entry(res.as_u32()) {
+ rbtree::Entry::Vacant(entry) => break (res, entry),
+ rbtree::Entry::Occupied(_) => {
+ pr_err!("Detected mismatch between handle_is_present and by_handle");
+ res.acquire();
+ kernel::warn_on!(true);
+ return Err(EINVAL);
+ }
+ }
+ }
+
+ let grow_request = refs.handle_is_present.grow_request().ok_or(ENOMEM)?;
+ drop(refs_lock);
+ let resizer = grow_request.realloc(GFP_KERNEL)?;
+ refs_lock = self.node_refs.lock();
+ refs = &mut *refs_lock;
+ refs.handle_is_present.grow(resizer);
+ };
+ let handle = unused_id.as_u32();
+
+ // Do a lookup again as node may have been inserted before the lock was reacquired.
+ if let Some(handle_ref) = refs.by_node.get(&node_ref.node.global_id()) {
+ let handle = *handle_ref;
+ let info = refs.by_handle.get_mut(&handle).unwrap();
+ info.node_ref().absorb(node_ref);
+ return Ok(handle);
+ }
+
+ let gid = node_ref.node.global_id();
+ let (info_proc, info_node) = {
+ let info_init = NodeRefInfo::new(node_ref, handle, self.into());
+ match info.pin_init_with(info_init) {
+ Ok(info) => ListArc::pair_from_pin_unique(info),
+ // error is infallible
+ Err(err) => match err {},
+ }
+ };
+
+ // Ensure the process is still alive while we insert a new reference.
+ //
+ // This releases the lock before inserting the nodes, but since `is_dead` is set as the
+ // first thing in `deferred_release`, process cleanup will not miss the items inserted into
+ // `refs` below.
+ if self.inner.lock().is_dead {
+ return Err(ESRCH);
+ }
+
+ // SAFETY: `info_proc` and `info_node` reference the same node, so we are inserting
+ // `info_node` into the right node's `refs` list.
+ unsafe { info_proc.node_ref2().node.insert_node_info(info_node) };
+
+ refs.by_node.insert(reserve1.into_node(gid, handle));
+ by_handle_slot.insert(info_proc, reserve2);
+ unused_id.acquire();
+ Ok(handle)
+ }
+
+ pub(crate) fn get_transaction_node(&self, handle: u32) -> BinderResult<NodeRef> {
+ // When handle is zero, try to get the context manager.
+ if handle == 0 {
+ Ok(self.ctx.get_manager_node(true)?)
+ } else {
+ Ok(self.get_node_from_handle(handle, true)?)
+ }
+ }
+
+ pub(crate) fn get_node_from_handle(&self, handle: u32, strong: bool) -> Result<NodeRef> {
+ self.node_refs
+ .lock()
+ .by_handle
+ .get_mut(&handle)
+ .ok_or(ENOENT)?
+ .node_ref()
+ .clone(strong)
+ }
+
+ pub(crate) fn remove_from_delivered_deaths(&self, death: &DArc<NodeDeath>) {
+ let mut inner = self.inner.lock();
+ // SAFETY: By the invariant on the `delivered_links` field, this is the right linked list.
+ let removed = unsafe { inner.delivered_deaths.remove(death) };
+ drop(inner);
+ drop(removed);
+ }
+
+ pub(crate) fn update_ref(
+ self: ArcBorrow<'_, Process>,
+ handle: u32,
+ inc: bool,
+ strong: bool,
+ ) -> Result {
+ if inc && handle == 0 {
+ if let Ok(node_ref) = self.ctx.get_manager_node(strong) {
+ if core::ptr::eq(&*self, &*node_ref.node.owner) {
+ return Err(EINVAL);
+ }
+ let _ = self.insert_or_update_handle(node_ref, true);
+ return Ok(());
+ }
+ }
+
+ // To preserve original binder behaviour, we only fail requests where the manager tries to
+ // increment references on itself.
+ let mut refs = self.node_refs.lock();
+ if let Some(info) = refs.by_handle.get_mut(&handle) {
+ if info.node_ref().update(inc, strong) {
+ // Clean up death if there is one attached to this node reference.
+ if let Some(death) = info.death().take() {
+ death.set_cleared(true);
+ self.remove_from_delivered_deaths(&death);
+ }
+
+ // Remove reference from process tables, and from the node's `refs` list.
+
+ // SAFETY: We are removing the `NodeRefInfo` from the right node.
+ unsafe { info.node_ref2().node.remove_node_info(info) };
+
+ let id = info.node_ref().node.global_id();
+ refs.by_handle.remove(&handle);
+ refs.by_node.remove(&id);
+ refs.handle_is_present.release_id(handle as usize);
+
+ if let Some(shrink) = refs.handle_is_present.shrink_request() {
+ drop(refs);
+ // This intentionally ignores allocation failures.
+ if let Ok(new_bitmap) = shrink.realloc(GFP_KERNEL) {
+ refs = self.node_refs.lock();
+ refs.handle_is_present.shrink(new_bitmap);
+ }
+ }
+ }
+ } else {
+ // All refs are cleared in process exit, so this warning is expected in that case.
+ if !self.inner.lock().is_dead {
+ pr_warn!("{}: no such ref {handle}\n", self.pid_in_current_ns());
+ }
+ }
+ Ok(())
+ }
+
+ /// Decrements the refcount of the given node, if one exists.
+ pub(crate) fn update_node(&self, ptr: u64, cookie: u64, strong: bool) {
+ let mut inner = self.inner.lock();
+ if let Ok(Some(node)) = inner.get_existing_node(ptr, cookie) {
+ inner.update_node_refcount(&node, false, strong, 1, None);
+ }
+ }
+
+ pub(crate) fn inc_ref_done(&self, reader: &mut UserSliceReader, strong: bool) -> Result {
+ let ptr = reader.read::<u64>()?;
+ let cookie = reader.read::<u64>()?;
+ let mut inner = self.inner.lock();
+ if let Ok(Some(node)) = inner.get_existing_node(ptr, cookie) {
+ if let Some(node) = node.inc_ref_done_locked(strong, &mut inner) {
+ // This only fails if the process is dead.
+ let _ = inner.push_work(node);
+ }
+ }
+ Ok(())
+ }
+
+ pub(crate) fn buffer_alloc(
+ self: &Arc<Self>,
+ debug_id: usize,
+ size: usize,
+ is_oneway: bool,
+ from_pid: i32,
+ ) -> BinderResult<NewAllocation> {
+ use kernel::page::PAGE_SIZE;
+
+ let mut reserve_new_args = ReserveNewArgs {
+ debug_id,
+ size,
+ is_oneway,
+ pid: from_pid,
+ ..ReserveNewArgs::default()
+ };
+
+ let (new_alloc, addr) = loop {
+ let mut inner = self.inner.lock();
+ let mapping = inner.mapping.as_mut().ok_or_else(BinderError::new_dead)?;
+ let alloc_request = match mapping.alloc.reserve_new(reserve_new_args)? {
+ ReserveNew::Success(new_alloc) => break (new_alloc, mapping.address),
+ ReserveNew::NeedAlloc(request) => request,
+ };
+ drop(inner);
+ // We need to allocate memory and then call `reserve_new` again.
+ reserve_new_args = alloc_request.make_alloc()?;
+ };
+
+ let res = Allocation::new(
+ self.clone(),
+ debug_id,
+ new_alloc.offset,
+ size,
+ addr + new_alloc.offset,
+ new_alloc.oneway_spam_detected,
+ );
+
+ // This allocation will be marked as in use until the `Allocation` is used to free it.
+ //
+ // This method can't be called while holding a lock, so we release the lock first. It's
+ // okay for several threads to use the method on the same index at the same time. In that
+ // case, one of the calls will allocate the given page (if missing), and the other call
+ // will wait for the other call to finish allocating the page.
+ //
+ // We will not call `stop_using_range` in parallel with this on the same page, because the
+ // allocation can only be removed via the destructor of the `Allocation` object that we
+ // currently own.
+ match self.pages.use_range(
+ new_alloc.offset / PAGE_SIZE,
+ (new_alloc.offset + size).div_ceil(PAGE_SIZE),
+ ) {
+ Ok(()) => {}
+ Err(err) => {
+ pr_warn!("use_range failure {:?}", err);
+ return Err(err.into());
+ }
+ }
+
+ Ok(NewAllocation(res))
+ }
+
+ pub(crate) fn buffer_get(self: &Arc<Self>, ptr: usize) -> Option<Allocation> {
+ let mut inner = self.inner.lock();
+ let mapping = inner.mapping.as_mut()?;
+ let offset = ptr.checked_sub(mapping.address)?;
+ let (size, debug_id, odata) = mapping.alloc.reserve_existing(offset).ok()?;
+ let mut alloc = Allocation::new(self.clone(), debug_id, offset, size, ptr, false);
+ if let Some(data) = odata {
+ alloc.set_info(data);
+ }
+ Some(alloc)
+ }
+
+ pub(crate) fn buffer_raw_free(&self, ptr: usize) {
+ let mut inner = self.inner.lock();
+ if let Some(ref mut mapping) = &mut inner.mapping {
+ let offset = match ptr.checked_sub(mapping.address) {
+ Some(offset) => offset,
+ None => return,
+ };
+
+ let freed_range = match mapping.alloc.reservation_abort(offset) {
+ Ok(freed_range) => freed_range,
+ Err(_) => {
+ pr_warn!(
+ "Pointer {:x} failed to free, base = {:x}\n",
+ ptr,
+ mapping.address
+ );
+ return;
+ }
+ };
+
+ // No more allocations in this range. Mark them as not in use.
+ //
+ // Must be done before we release the lock so that `use_range` is not used on these
+ // indices until `stop_using_range` returns.
+ self.pages
+ .stop_using_range(freed_range.start_page_idx, freed_range.end_page_idx);
+ }
+ }
+
+ pub(crate) fn buffer_make_freeable(&self, offset: usize, mut data: Option<AllocationInfo>) {
+ let mut inner = self.inner.lock();
+ if let Some(ref mut mapping) = &mut inner.mapping {
+ if mapping.alloc.reservation_commit(offset, &mut data).is_err() {
+ pr_warn!("Offset {} failed to be marked freeable\n", offset);
+ }
+ }
+ }
+
+ fn create_mapping(&self, vma: &mm::virt::VmaNew) -> Result {
+ use kernel::page::PAGE_SIZE;
+ let size = usize::min(vma.end() - vma.start(), bindings::SZ_4M as usize);
+ let mapping = Mapping::new(vma.start(), size);
+ let page_count = self.pages.register_with_vma(vma)?;
+ if page_count * PAGE_SIZE != size {
+ return Err(EINVAL);
+ }
+
+ // Save range allocator for later.
+ self.inner.lock().mapping = Some(mapping);
+
+ Ok(())
+ }
+
+ fn version(&self, data: UserSlice) -> Result {
+ data.writer().write(&BinderVersion::current())
+ }
+
+ pub(crate) fn register_thread(&self) -> bool {
+ self.inner.lock().register_thread()
+ }
+
+ fn remove_thread(&self, thread: Arc<Thread>) {
+ self.inner.lock().threads.remove(&thread.id);
+ thread.release();
+ }
+
+ fn set_max_threads(&self, max: u32) {
+ self.inner.lock().max_threads = max;
+ }
+
+ fn set_oneway_spam_detection_enabled(&self, enabled: u32) {
+ self.inner.lock().oneway_spam_detection_enabled = enabled != 0;
+ }
+
+ pub(crate) fn is_oneway_spam_detection_enabled(&self) -> bool {
+ self.inner.lock().oneway_spam_detection_enabled
+ }
+
+ fn get_node_debug_info(&self, data: UserSlice) -> Result {
+ let (mut reader, mut writer) = data.reader_writer();
+
+ // Read the starting point.
+ let ptr = reader.read::<BinderNodeDebugInfo>()?.ptr;
+ let mut out = BinderNodeDebugInfo::default();
+
+ {
+ let inner = self.inner.lock();
+ for (node_ptr, node) in &inner.nodes {
+ if *node_ptr > ptr {
+ node.populate_debug_info(&mut out, &inner);
+ break;
+ }
+ }
+ }
+
+ writer.write(&out)
+ }
+
+ fn get_node_info_from_ref(&self, data: UserSlice) -> Result {
+ let (mut reader, mut writer) = data.reader_writer();
+ let mut out = reader.read::<BinderNodeInfoForRef>()?;
+
+ if out.strong_count != 0
+ || out.weak_count != 0
+ || out.reserved1 != 0
+ || out.reserved2 != 0
+ || out.reserved3 != 0
+ {
+ return Err(EINVAL);
+ }
+
+ // Only the context manager is allowed to use this ioctl.
+ if !self.inner.lock().is_manager {
+ return Err(EPERM);
+ }
+
+ {
+ let mut node_refs = self.node_refs.lock();
+ let node_info = node_refs.by_handle.get_mut(&out.handle).ok_or(ENOENT)?;
+ let node_ref = node_info.node_ref();
+ let owner_inner = node_ref.node.owner.inner.lock();
+ node_ref.node.populate_counts(&mut out, &owner_inner);
+ }
+
+ // Write the result back.
+ writer.write(&out)
+ }
+
+ pub(crate) fn needs_thread(&self) -> bool {
+ let mut inner = self.inner.lock();
+ let ret = inner.requested_thread_count == 0
+ && inner.ready_threads.is_empty()
+ && inner.started_thread_count < inner.max_threads;
+ if ret {
+ inner.requested_thread_count += 1
+ }
+ ret
+ }
+
+ pub(crate) fn request_death(
+ self: &Arc<Self>,
+ reader: &mut UserSliceReader,
+ thread: &Thread,
+ ) -> Result {
+ let handle: u32 = reader.read()?;
+ let cookie: u64 = reader.read()?;
+
+ // Queue BR_ERROR if we can't allocate memory for the death notification.
+ let death = UniqueArc::new_uninit(GFP_KERNEL).inspect_err(|_| {
+ thread.push_return_work(BR_ERROR);
+ })?;
+ let mut refs = self.node_refs.lock();
+ let Some(info) = refs.by_handle.get_mut(&handle) else {
+ pr_warn!("BC_REQUEST_DEATH_NOTIFICATION invalid ref {handle}\n");
+ return Ok(());
+ };
+
+ // Nothing to do if there is already a death notification request for this handle.
+ if info.death().is_some() {
+ pr_warn!("BC_REQUEST_DEATH_NOTIFICATION death notification already set\n");
+ return Ok(());
+ }
+
+ let death = {
+ let death_init = NodeDeath::new(info.node_ref().node.clone(), self.clone(), cookie);
+ match death.pin_init_with(death_init) {
+ Ok(death) => death,
+ // error is infallible
+ Err(err) => match err {},
+ }
+ };
+
+ // Register the death notification.
+ {
+ let owner = info.node_ref2().node.owner.clone();
+ let mut owner_inner = owner.inner.lock();
+ if owner_inner.is_dead {
+ let death = Arc::from(death);
+ *info.death() = Some(death.clone());
+ drop(owner_inner);
+ death.set_dead();
+ } else {
+ let death = ListArc::from(death);
+ *info.death() = Some(death.clone_arc());
+ info.node_ref().node.add_death(death, &mut owner_inner);
+ }
+ }
+ Ok(())
+ }
+
+ pub(crate) fn clear_death(&self, reader: &mut UserSliceReader, thread: &Thread) -> Result {
+ let handle: u32 = reader.read()?;
+ let cookie: u64 = reader.read()?;
+
+ let mut refs = self.node_refs.lock();
+ let Some(info) = refs.by_handle.get_mut(&handle) else {
+ pr_warn!("BC_CLEAR_DEATH_NOTIFICATION invalid ref {handle}\n");
+ return Ok(());
+ };
+
+ let Some(death) = info.death().take() else {
+ pr_warn!("BC_CLEAR_DEATH_NOTIFICATION death notification not active\n");
+ return Ok(());
+ };
+ if death.cookie != cookie {
+ *info.death() = Some(death);
+ pr_warn!("BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch\n");
+ return Ok(());
+ }
+
+ // Update state and determine if we need to queue a work item. We only need to do it when
+ // the node is not dead or if the user already completed the death notification.
+ if death.set_cleared(false) {
+ if let Some(death) = ListArc::try_from_arc_or_drop(death) {
+ let _ = thread.push_work_if_looper(death);
+ }
+ }
+
+ Ok(())
+ }
+
+ pub(crate) fn dead_binder_done(&self, cookie: u64, thread: &Thread) {
+ if let Some(death) = self.inner.lock().pull_delivered_death(cookie) {
+ death.set_notification_done(thread);
+ }
+ }
+
+ /// Locks the spinlock and move the `nodes` rbtree out.
+ ///
+ /// This allows you to iterate through `nodes` while also allowing you to give other parts of
+ /// the codebase exclusive access to `ProcessInner`.
+ pub(crate) fn lock_with_nodes(&self) -> WithNodes<'_> {
+ let mut inner = self.inner.lock();
+ WithNodes {
+ nodes: take(&mut inner.nodes),
+ inner,
+ }
+ }
+
+ fn deferred_flush(&self) {
+ let inner = self.inner.lock();
+ for thread in inner.threads.values() {
+ thread.exit_looper();
+ }
+ }
+
+ fn deferred_release(self: Arc<Self>) {
+ let is_manager = {
+ let mut inner = self.inner.lock();
+ inner.is_dead = true;
+ inner.is_frozen = IsFrozen::No;
+ inner.sync_recv = false;
+ inner.async_recv = false;
+ inner.is_manager
+ };
+
+ if is_manager {
+ self.ctx.unset_manager_node();
+ }
+
+ self.ctx.deregister_process(&self);
+
+ let binderfs_file = self.inner.lock().binderfs_file.take();
+ drop(binderfs_file);
+
+ // Release threads.
+ let threads = {
+ let mut inner = self.inner.lock();
+ let threads = take(&mut inner.threads);
+ let ready = take(&mut inner.ready_threads);
+ drop(inner);
+ drop(ready);
+
+ for thread in threads.values() {
+ thread.release();
+ }
+ threads
+ };
+
+ // Release nodes.
+ {
+ while let Some(node) = {
+ let mut lock = self.inner.lock();
+ lock.nodes.cursor_front_mut().map(|c| c.remove_current().1)
+ } {
+ node.to_key_value().1.release();
+ }
+ }
+
+ // Clean up death listeners and remove nodes from external node info lists.
+ for info in self.node_refs.lock().by_handle.values_mut() {
+ // SAFETY: We are removing the `NodeRefInfo` from the right node.
+ unsafe { info.node_ref2().node.remove_node_info(info) };
+
+ // Remove all death notifications from the nodes (that belong to a different process).
+ let death = if let Some(existing) = info.death().take() {
+ existing
+ } else {
+ continue;
+ };
+ death.set_cleared(false);
+ }
+
+ // Clean up freeze listeners.
+ let freeze_listeners = take(&mut self.node_refs.lock().freeze_listeners);
+ for listener in freeze_listeners.values() {
+ listener.on_process_exit(&self);
+ }
+ drop(freeze_listeners);
+
+ // Release refs on foreign nodes.
+ {
+ let mut refs = self.node_refs.lock();
+ let by_handle = take(&mut refs.by_handle);
+ let by_node = take(&mut refs.by_node);
+ drop(refs);
+ drop(by_node);
+ drop(by_handle);
+ }
+
+ // Cancel all pending work items.
+ while let Some(work) = self.get_work() {
+ work.into_arc().cancel();
+ }
+
+ // Clear delivered_deaths list.
+ //
+ // Scope ensures that MutexGuard is dropped while executing the body.
+ while let Some(delivered_death) = { self.inner.lock().delivered_deaths.pop_front() } {
+ drop(delivered_death);
+ }
+
+ // Free any resources kept alive by allocated buffers.
+ let omapping = self.inner.lock().mapping.take();
+ if let Some(mut mapping) = omapping {
+ let address = mapping.address;
+ mapping
+ .alloc
+ .take_for_each(|offset, size, debug_id, odata| {
+ let ptr = offset + address;
+ let mut alloc =
+ Allocation::new(self.clone(), debug_id, offset, size, ptr, false);
+ if let Some(data) = odata {
+ alloc.set_info(data);
+ }
+ drop(alloc)
+ });
+ }
+
+ // calls to synchronize_rcu() in thread drop will happen here
+ drop(threads);
+ }
+
+ pub(crate) fn drop_outstanding_txn(&self) {
+ let wake = {
+ let mut inner = self.inner.lock();
+ if inner.outstanding_txns == 0 {
+ pr_err!("outstanding_txns underflow");
+ return;
+ }
+ inner.outstanding_txns -= 1;
+ inner.is_frozen.is_frozen() && inner.outstanding_txns == 0
+ };
+
+ if wake {
+ self.freeze_wait.notify_all();
+ }
+ }
+
+ pub(crate) fn ioctl_freeze(&self, info: &BinderFreezeInfo) -> Result {
+ if info.enable == 0 {
+ let msgs = self.prepare_freeze_messages()?;
+ let mut inner = self.inner.lock();
+ inner.sync_recv = false;
+ inner.async_recv = false;
+ inner.is_frozen = IsFrozen::No;
+ drop(inner);
+ msgs.send_messages();
+ return Ok(());
+ }
+
+ let mut inner = self.inner.lock();
+ inner.sync_recv = false;
+ inner.async_recv = false;
+ inner.is_frozen = IsFrozen::InProgress;
+
+ if info.timeout_ms > 0 {
+ let mut jiffies = kernel::time::msecs_to_jiffies(info.timeout_ms);
+ while jiffies > 0 {
+ if inner.outstanding_txns == 0 {
+ break;
+ }
+
+ match self
+ .freeze_wait
+ .wait_interruptible_timeout(&mut inner, jiffies)
+ {
+ CondVarTimeoutResult::Signal { .. } => {
+ inner.is_frozen = IsFrozen::No;
+ return Err(ERESTARTSYS);
+ }
+ CondVarTimeoutResult::Woken { jiffies: remaining } => {
+ jiffies = remaining;
+ }
+ CondVarTimeoutResult::Timeout => {
+ jiffies = 0;
+ }
+ }
+ }
+ }
+
+ if inner.txns_pending_locked() {
+ inner.is_frozen = IsFrozen::No;
+ Err(EAGAIN)
+ } else {
+ drop(inner);
+ match self.prepare_freeze_messages() {
+ Ok(batch) => {
+ self.inner.lock().is_frozen = IsFrozen::Yes;
+ batch.send_messages();
+ Ok(())
+ }
+ Err(kernel::alloc::AllocError) => {
+ self.inner.lock().is_frozen = IsFrozen::No;
+ Err(ENOMEM)
+ }
+ }
+ }
+ }
+}
+
+fn get_frozen_status(data: UserSlice) -> Result {
+ let (mut reader, mut writer) = data.reader_writer();
+
+ let mut info = reader.read::<BinderFrozenStatusInfo>()?;
+ info.sync_recv = 0;
+ info.async_recv = 0;
+ let mut found = false;
+
+ for ctx in crate::context::get_all_contexts()? {
+ ctx.for_each_proc(|proc| {
+ if proc.task.pid() == info.pid as _ {
+ found = true;
+ let inner = proc.inner.lock();
+ let txns_pending = inner.txns_pending_locked();
+ info.async_recv |= inner.async_recv as u32;
+ info.sync_recv |= inner.sync_recv as u32;
+ info.sync_recv |= (txns_pending as u32) << 1;
+ }
+ });
+ }
+
+ if found {
+ writer.write(&info)?;
+ Ok(())
+ } else {
+ Err(EINVAL)
+ }
+}
+
+fn ioctl_freeze(reader: &mut UserSliceReader) -> Result {
+ let info = reader.read::<BinderFreezeInfo>()?;
+
+ // Very unlikely for there to be more than 3, since a process normally uses at most binder and
+ // hwbinder.
+ let mut procs = KVec::with_capacity(3, GFP_KERNEL)?;
+
+ let ctxs = crate::context::get_all_contexts()?;
+ for ctx in ctxs {
+ for proc in ctx.get_procs_with_pid(info.pid as i32)? {
+ procs.push(proc, GFP_KERNEL)?;
+ }
+ }
+
+ for proc in procs {
+ proc.ioctl_freeze(&info)?;
+ }
+ Ok(())
+}
+
+/// The ioctl handler.
+impl Process {
+ /// Ioctls that are write-only from the perspective of userspace.
+ ///
+ /// The kernel will only read from the pointer that userspace provided to us.
+ fn ioctl_write_only(
+ this: ArcBorrow<'_, Process>,
+ _file: &File,
+ cmd: u32,
+ reader: &mut UserSliceReader,
+ ) -> Result {
+ let thread = this.get_current_thread()?;
+ match cmd {
+ uapi::BINDER_SET_MAX_THREADS => this.set_max_threads(reader.read()?),
+ uapi::BINDER_THREAD_EXIT => this.remove_thread(thread),
+ uapi::BINDER_SET_CONTEXT_MGR => this.set_as_manager(None, &thread)?,
+ uapi::BINDER_SET_CONTEXT_MGR_EXT => {
+ this.set_as_manager(Some(reader.read()?), &thread)?
+ }
+ uapi::BINDER_ENABLE_ONEWAY_SPAM_DETECTION => {
+ this.set_oneway_spam_detection_enabled(reader.read()?)
+ }
+ uapi::BINDER_FREEZE => ioctl_freeze(reader)?,
+ _ => return Err(EINVAL),
+ }
+ Ok(())
+ }
+
+ /// Ioctls that are read/write from the perspective of userspace.
+ ///
+ /// The kernel will both read from and write to the pointer that userspace provided to us.
+ fn ioctl_write_read(
+ this: ArcBorrow<'_, Process>,
+ file: &File,
+ cmd: u32,
+ data: UserSlice,
+ ) -> Result {
+ let thread = this.get_current_thread()?;
+ let blocking = (file.flags() & file::flags::O_NONBLOCK) == 0;
+ match cmd {
+ uapi::BINDER_WRITE_READ => thread.write_read(data, blocking)?,
+ uapi::BINDER_GET_NODE_DEBUG_INFO => this.get_node_debug_info(data)?,
+ uapi::BINDER_GET_NODE_INFO_FOR_REF => this.get_node_info_from_ref(data)?,
+ uapi::BINDER_VERSION => this.version(data)?,
+ uapi::BINDER_GET_FROZEN_INFO => get_frozen_status(data)?,
+ uapi::BINDER_GET_EXTENDED_ERROR => thread.get_extended_error(data)?,
+ _ => return Err(EINVAL),
+ }
+ Ok(())
+ }
+}
+
+/// The file operations supported by `Process`.
+impl Process {
+ pub(crate) fn open(ctx: ArcBorrow<'_, Context>, file: &File) -> Result<Arc<Process>> {
+ Self::new(ctx.into(), ARef::from(file.cred()))
+ }
+
+ pub(crate) fn release(this: Arc<Process>, _file: &File) {
+ let binderfs_file;
+ let should_schedule;
+ {
+ let mut inner = this.inner.lock();
+ should_schedule = inner.defer_work == 0;
+ inner.defer_work |= PROC_DEFER_RELEASE;
+ binderfs_file = inner.binderfs_file.take();
+ }
+
+ if should_schedule {
+ // Ignore failures to schedule to the workqueue. Those just mean that we're already
+ // scheduled for execution.
+ let _ = workqueue::system().enqueue(this);
+ }
+
+ drop(binderfs_file);
+ }
+
+ pub(crate) fn flush(this: ArcBorrow<'_, Process>) -> Result {
+ let should_schedule;
+ {
+ let mut inner = this.inner.lock();
+ should_schedule = inner.defer_work == 0;
+ inner.defer_work |= PROC_DEFER_FLUSH;
+ }
+
+ if should_schedule {
+ // Ignore failures to schedule to the workqueue. Those just mean that we're already
+ // scheduled for execution.
+ let _ = workqueue::system().enqueue(Arc::from(this));
+ }
+ Ok(())
+ }
+
+ pub(crate) fn ioctl(this: ArcBorrow<'_, Process>, file: &File, cmd: u32, arg: usize) -> Result {
+ use kernel::ioctl::{_IOC_DIR, _IOC_SIZE};
+ use kernel::uapi::{_IOC_READ, _IOC_WRITE};
+
+ crate::trace::trace_ioctl(cmd, arg);
+
+ let user_slice = UserSlice::new(UserPtr::from_addr(arg), _IOC_SIZE(cmd));
+
+ const _IOC_READ_WRITE: u32 = _IOC_READ | _IOC_WRITE;
+
+ match _IOC_DIR(cmd) {
+ _IOC_WRITE => Self::ioctl_write_only(this, file, cmd, &mut user_slice.reader()),
+ _IOC_READ_WRITE => Self::ioctl_write_read(this, file, cmd, user_slice),
+ _ => Err(EINVAL),
+ }
+ }
+
+ pub(crate) fn mmap(
+ this: ArcBorrow<'_, Process>,
+ _file: &File,
+ vma: &mm::virt::VmaNew,
+ ) -> Result {
+ // We don't allow mmap to be used in a different process.
+ if !core::ptr::eq(kernel::current!().group_leader(), &*this.task) {
+ return Err(EINVAL);
+ }
+ if vma.start() == 0 {
+ return Err(EINVAL);
+ }
+
+ vma.try_clear_maywrite().map_err(|_| EPERM)?;
+ vma.set_dontcopy();
+ vma.set_mixedmap();
+
+ // TODO: Set ops. We need to learn when the user unmaps so that we can stop using it.
+ this.create_mapping(vma)
+ }
+
+ pub(crate) fn poll(
+ this: ArcBorrow<'_, Process>,
+ file: &File,
+ table: PollTable<'_>,
+ ) -> Result<u32> {
+ let thread = this.get_current_thread()?;
+ let (from_proc, mut mask) = thread.poll(file, table);
+ if mask == 0 && from_proc && !this.inner.lock().work.is_empty() {
+ mask |= bindings::POLLIN;
+ }
+ Ok(mask)
+ }
+}
+
+/// Represents that a thread has registered with the `ready_threads` list of its process.
+///
+/// The destructor of this type will unregister the thread from the list of ready threads.
+pub(crate) struct Registration<'a> {
+ thread: &'a Arc<Thread>,
+}
+
+impl<'a> Registration<'a> {
+ fn new(thread: &'a Arc<Thread>, guard: &mut Guard<'_, ProcessInner, SpinLockBackend>) -> Self {
+ assert!(core::ptr::eq(&thread.process.inner, guard.lock_ref()));
+ // INVARIANT: We are pushing this thread to the right `ready_threads` list.
+ if let Ok(list_arc) = ListArc::try_from_arc(thread.clone()) {
+ guard.ready_threads.push_front(list_arc);
+ } else {
+ // It is an error to hit this branch, and it should not be reachable. We try to do
+ // something reasonable when the failure path happens. Most likely, the thread in
+ // question will sleep forever.
+ pr_err!("Same thread registered with `ready_threads` twice.");
+ }
+ Self { thread }
+ }
+}
+
+impl Drop for Registration<'_> {
+ fn drop(&mut self) {
+ let mut inner = self.thread.process.inner.lock();
+ // SAFETY: The thread has the invariant that we never push it to any other linked list than
+ // the `ready_threads` list of its parent process. Therefore, the thread is either in that
+ // list, or in no list.
+ unsafe { inner.ready_threads.remove(self.thread) };
+ }
+}
+
+pub(crate) struct WithNodes<'a> {
+ pub(crate) inner: Guard<'a, ProcessInner, SpinLockBackend>,
+ pub(crate) nodes: RBTree<u64, DArc<Node>>,
+}
+
+impl Drop for WithNodes<'_> {
+ fn drop(&mut self) {
+ core::mem::swap(&mut self.nodes, &mut self.inner.nodes);
+ if self.nodes.iter().next().is_some() {
+ pr_err!("nodes array was modified while using lock_with_nodes\n");
+ }
+ }
+}
+
+pub(crate) enum GetWorkOrRegister<'a> {
+ Work(DLArc<dyn DeliverToRead>),
+ Register(Registration<'a>),
+}
diff --git a/drivers/android/binder/range_alloc/array.rs b/drivers/android/binder/range_alloc/array.rs
new file mode 100644
index 000000000000..07e1dec2ce63
--- /dev/null
+++ b/drivers/android/binder/range_alloc/array.rs
@@ -0,0 +1,251 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+use kernel::{
+ page::{PAGE_MASK, PAGE_SIZE},
+ prelude::*,
+ seq_file::SeqFile,
+ seq_print,
+ task::Pid,
+};
+
+use crate::range_alloc::{DescriptorState, FreedRange, Range};
+
+/// Keeps track of allocations in a process' mmap.
+///
+/// Each process has an mmap where the data for incoming transactions will be placed. This struct
+/// keeps track of allocations made in the mmap. For each allocation, we store a descriptor that
+/// has metadata related to the allocation. We also keep track of available free space.
+pub(super) struct ArrayRangeAllocator<T> {
+ /// This stores all ranges that are allocated. Unlike the tree based allocator, we do *not*
+ /// store the free ranges.
+ ///
+ /// Sorted by offset.
+ pub(super) ranges: KVec<Range<T>>,
+ size: usize,
+ free_oneway_space: usize,
+}
+
+struct FindEmptyRes {
+ /// Which index in `ranges` should we insert the new range at?
+ ///
+ /// Inserting the new range at this index keeps `ranges` sorted.
+ insert_at_idx: usize,
+ /// Which offset should we insert the new range at?
+ insert_at_offset: usize,
+}
+
+impl<T> ArrayRangeAllocator<T> {
+ pub(crate) fn new(size: usize, alloc: EmptyArrayAlloc<T>) -> Self {
+ Self {
+ ranges: alloc.ranges,
+ size,
+ free_oneway_space: size / 2,
+ }
+ }
+
+ pub(crate) fn free_oneway_space(&self) -> usize {
+ self.free_oneway_space
+ }
+
+ pub(crate) fn count_buffers(&self) -> usize {
+ self.ranges.len()
+ }
+
+ pub(crate) fn total_size(&self) -> usize {
+ self.size
+ }
+
+ pub(crate) fn is_full(&self) -> bool {
+ self.ranges.len() == self.ranges.capacity()
+ }
+
+ pub(crate) fn debug_print(&self, m: &SeqFile) -> Result<()> {
+ for range in &self.ranges {
+ seq_print!(
+ m,
+ " buffer {}: {} size {} pid {} oneway {}",
+ 0,
+ range.offset,
+ range.size,
+ range.state.pid(),
+ range.state.is_oneway(),
+ );
+ if let DescriptorState::Reserved(_) = range.state {
+ seq_print!(m, " reserved\n");
+ } else {
+ seq_print!(m, " allocated\n");
+ }
+ }
+ Ok(())
+ }
+
+ /// Find somewhere to put a new range.
+ ///
+ /// Unlike the tree implementation, we do not bother to find the smallest gap. The idea is that
+ /// fragmentation isn't a big issue when we don't have many ranges.
+ ///
+ /// Returns the index that the new range should have in `self.ranges` after insertion.
+ fn find_empty_range(&self, size: usize) -> Option<FindEmptyRes> {
+ let after_last_range = self.ranges.last().map(Range::endpoint).unwrap_or(0);
+
+ if size <= self.total_size() - after_last_range {
+ // We can put the range at the end, so just do that.
+ Some(FindEmptyRes {
+ insert_at_idx: self.ranges.len(),
+ insert_at_offset: after_last_range,
+ })
+ } else {
+ let mut end_of_prev = 0;
+ for (i, range) in self.ranges.iter().enumerate() {
+ // Does it fit before the i'th range?
+ if size <= range.offset - end_of_prev {
+ return Some(FindEmptyRes {
+ insert_at_idx: i,
+ insert_at_offset: end_of_prev,
+ });
+ }
+ end_of_prev = range.endpoint();
+ }
+ None
+ }
+ }
+
+ pub(crate) fn reserve_new(
+ &mut self,
+ debug_id: usize,
+ size: usize,
+ is_oneway: bool,
+ pid: Pid,
+ ) -> Result<usize> {
+ // Compute new value of free_oneway_space, which is set only on success.
+ let new_oneway_space = if is_oneway {
+ match self.free_oneway_space.checked_sub(size) {
+ Some(new_oneway_space) => new_oneway_space,
+ None => return Err(ENOSPC),
+ }
+ } else {
+ self.free_oneway_space
+ };
+
+ let FindEmptyRes {
+ insert_at_idx,
+ insert_at_offset,
+ } = self.find_empty_range(size).ok_or(ENOSPC)?;
+ self.free_oneway_space = new_oneway_space;
+
+ let new_range = Range {
+ offset: insert_at_offset,
+ size,
+ state: DescriptorState::new(is_oneway, debug_id, pid),
+ };
+ // Insert the value at the given index to keep the array sorted.
+ self.ranges
+ .insert_within_capacity(insert_at_idx, new_range)
+ .ok()
+ .unwrap();
+
+ Ok(insert_at_offset)
+ }
+
+ pub(crate) fn reservation_abort(&mut self, offset: usize) -> Result<FreedRange> {
+ // This could use a binary search, but linear scans are usually faster for small arrays.
+ let i = self
+ .ranges
+ .iter()
+ .position(|range| range.offset == offset)
+ .ok_or(EINVAL)?;
+ let range = &self.ranges[i];
+
+ if let DescriptorState::Allocated(_) = range.state {
+ return Err(EPERM);
+ }
+
+ let size = range.size;
+ let offset = range.offset;
+
+ if range.state.is_oneway() {
+ self.free_oneway_space += size;
+ }
+
+ // This computes the range of pages that are no longer used by *any* allocated range. The
+ // caller will mark them as unused, which means that they can be freed if the system comes
+ // under memory pressure.
+ let mut freed_range = FreedRange::interior_pages(offset, size);
+ #[expect(clippy::collapsible_if)] // reads better like this
+ if offset % PAGE_SIZE != 0 {
+ if i == 0 || self.ranges[i - 1].endpoint() <= (offset & PAGE_MASK) {
+ freed_range.start_page_idx -= 1;
+ }
+ }
+ if range.endpoint() % PAGE_SIZE != 0 {
+ let page_after = (range.endpoint() & PAGE_MASK) + PAGE_SIZE;
+ if i + 1 == self.ranges.len() || page_after <= self.ranges[i + 1].offset {
+ freed_range.end_page_idx += 1;
+ }
+ }
+
+ self.ranges.remove(i)?;
+ Ok(freed_range)
+ }
+
+ pub(crate) fn reservation_commit(&mut self, offset: usize, data: &mut Option<T>) -> Result {
+ // This could use a binary search, but linear scans are usually faster for small arrays.
+ let range = self
+ .ranges
+ .iter_mut()
+ .find(|range| range.offset == offset)
+ .ok_or(ENOENT)?;
+
+ let DescriptorState::Reserved(reservation) = &range.state else {
+ return Err(ENOENT);
+ };
+
+ range.state = DescriptorState::Allocated(reservation.clone().allocate(data.take()));
+ Ok(())
+ }
+
+ pub(crate) fn reserve_existing(&mut self, offset: usize) -> Result<(usize, usize, Option<T>)> {
+ // This could use a binary search, but linear scans are usually faster for small arrays.
+ let range = self
+ .ranges
+ .iter_mut()
+ .find(|range| range.offset == offset)
+ .ok_or(ENOENT)?;
+
+ let DescriptorState::Allocated(allocation) = &mut range.state else {
+ return Err(ENOENT);
+ };
+
+ let data = allocation.take();
+ let debug_id = allocation.reservation.debug_id;
+ range.state = DescriptorState::Reserved(allocation.reservation.clone());
+ Ok((range.size, debug_id, data))
+ }
+
+ pub(crate) fn take_for_each<F: Fn(usize, usize, usize, Option<T>)>(&mut self, callback: F) {
+ for range in self.ranges.iter_mut() {
+ if let DescriptorState::Allocated(allocation) = &mut range.state {
+ callback(
+ range.offset,
+ range.size,
+ allocation.reservation.debug_id,
+ allocation.data.take(),
+ );
+ }
+ }
+ }
+}
+
+pub(crate) struct EmptyArrayAlloc<T> {
+ ranges: KVec<Range<T>>,
+}
+
+impl<T> EmptyArrayAlloc<T> {
+ pub(crate) fn try_new(capacity: usize) -> Result<Self> {
+ Ok(Self {
+ ranges: KVec::with_capacity(capacity, GFP_KERNEL)?,
+ })
+ }
+}
diff --git a/drivers/android/binder/range_alloc/mod.rs b/drivers/android/binder/range_alloc/mod.rs
new file mode 100644
index 000000000000..2301e2bc1a1f
--- /dev/null
+++ b/drivers/android/binder/range_alloc/mod.rs
@@ -0,0 +1,329 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+use kernel::{page::PAGE_SIZE, prelude::*, seq_file::SeqFile, task::Pid};
+
+mod tree;
+use self::tree::{FromArrayAllocs, ReserveNewTreeAlloc, TreeRangeAllocator};
+
+mod array;
+use self::array::{ArrayRangeAllocator, EmptyArrayAlloc};
+
+enum DescriptorState<T> {
+ Reserved(Reservation),
+ Allocated(Allocation<T>),
+}
+
+impl<T> DescriptorState<T> {
+ fn new(is_oneway: bool, debug_id: usize, pid: Pid) -> Self {
+ DescriptorState::Reserved(Reservation {
+ debug_id,
+ is_oneway,
+ pid,
+ })
+ }
+
+ fn pid(&self) -> Pid {
+ match self {
+ DescriptorState::Reserved(inner) => inner.pid,
+ DescriptorState::Allocated(inner) => inner.reservation.pid,
+ }
+ }
+
+ fn is_oneway(&self) -> bool {
+ match self {
+ DescriptorState::Reserved(inner) => inner.is_oneway,
+ DescriptorState::Allocated(inner) => inner.reservation.is_oneway,
+ }
+ }
+}
+
+#[derive(Clone)]
+struct Reservation {
+ debug_id: usize,
+ is_oneway: bool,
+ pid: Pid,
+}
+
+impl Reservation {
+ fn allocate<T>(self, data: Option<T>) -> Allocation<T> {
+ Allocation {
+ data,
+ reservation: self,
+ }
+ }
+}
+
+struct Allocation<T> {
+ reservation: Reservation,
+ data: Option<T>,
+}
+
+impl<T> Allocation<T> {
+ fn deallocate(self) -> (Reservation, Option<T>) {
+ (self.reservation, self.data)
+ }
+
+ fn debug_id(&self) -> usize {
+ self.reservation.debug_id
+ }
+
+ fn take(&mut self) -> Option<T> {
+ self.data.take()
+ }
+}
+
+/// The array implementation must switch to the tree if it wants to go beyond this number of
+/// ranges.
+const TREE_THRESHOLD: usize = 8;
+
+/// Represents a range of pages that have just become completely free.
+#[derive(Copy, Clone)]
+pub(crate) struct FreedRange {
+ pub(crate) start_page_idx: usize,
+ pub(crate) end_page_idx: usize,
+}
+
+impl FreedRange {
+ fn interior_pages(offset: usize, size: usize) -> FreedRange {
+ FreedRange {
+ // Divide round up
+ start_page_idx: offset.div_ceil(PAGE_SIZE),
+ // Divide round down
+ end_page_idx: (offset + size) / PAGE_SIZE,
+ }
+ }
+}
+
+struct Range<T> {
+ offset: usize,
+ size: usize,
+ state: DescriptorState<T>,
+}
+
+impl<T> Range<T> {
+ fn endpoint(&self) -> usize {
+ self.offset + self.size
+ }
+}
+
+pub(crate) struct RangeAllocator<T> {
+ inner: Impl<T>,
+}
+
+enum Impl<T> {
+ Empty(usize),
+ Array(ArrayRangeAllocator<T>),
+ Tree(TreeRangeAllocator<T>),
+}
+
+impl<T> RangeAllocator<T> {
+ pub(crate) fn new(size: usize) -> Self {
+ Self {
+ inner: Impl::Empty(size),
+ }
+ }
+
+ pub(crate) fn free_oneway_space(&self) -> usize {
+ match &self.inner {
+ Impl::Empty(size) => size / 2,
+ Impl::Array(array) => array.free_oneway_space(),
+ Impl::Tree(tree) => tree.free_oneway_space(),
+ }
+ }
+
+ pub(crate) fn count_buffers(&self) -> usize {
+ match &self.inner {
+ Impl::Empty(_size) => 0,
+ Impl::Array(array) => array.count_buffers(),
+ Impl::Tree(tree) => tree.count_buffers(),
+ }
+ }
+
+ pub(crate) fn debug_print(&self, m: &SeqFile) -> Result<()> {
+ match &self.inner {
+ Impl::Empty(_size) => Ok(()),
+ Impl::Array(array) => array.debug_print(m),
+ Impl::Tree(tree) => tree.debug_print(m),
+ }
+ }
+
+ /// Try to reserve a new buffer, using the provided allocation if necessary.
+ pub(crate) fn reserve_new(&mut self, mut args: ReserveNewArgs<T>) -> Result<ReserveNew<T>> {
+ match &mut self.inner {
+ Impl::Empty(size) => {
+ let empty_array = match args.empty_array_alloc.take() {
+ Some(empty_array) => ArrayRangeAllocator::new(*size, empty_array),
+ None => {
+ return Ok(ReserveNew::NeedAlloc(ReserveNewNeedAlloc {
+ args,
+ need_empty_array_alloc: true,
+ need_new_tree_alloc: false,
+ need_tree_alloc: false,
+ }))
+ }
+ };
+
+ self.inner = Impl::Array(empty_array);
+ self.reserve_new(args)
+ }
+ Impl::Array(array) if array.is_full() => {
+ let allocs = match args.new_tree_alloc {
+ Some(ref mut allocs) => allocs,
+ None => {
+ return Ok(ReserveNew::NeedAlloc(ReserveNewNeedAlloc {
+ args,
+ need_empty_array_alloc: false,
+ need_new_tree_alloc: true,
+ need_tree_alloc: true,
+ }))
+ }
+ };
+
+ let new_tree =
+ TreeRangeAllocator::from_array(array.total_size(), &mut array.ranges, allocs);
+
+ self.inner = Impl::Tree(new_tree);
+ self.reserve_new(args)
+ }
+ Impl::Array(array) => {
+ let offset =
+ array.reserve_new(args.debug_id, args.size, args.is_oneway, args.pid)?;
+ Ok(ReserveNew::Success(ReserveNewSuccess {
+ offset,
+ oneway_spam_detected: false,
+ _empty_array_alloc: args.empty_array_alloc,
+ _new_tree_alloc: args.new_tree_alloc,
+ _tree_alloc: args.tree_alloc,
+ }))
+ }
+ Impl::Tree(tree) => {
+ let alloc = match args.tree_alloc {
+ Some(alloc) => alloc,
+ None => {
+ return Ok(ReserveNew::NeedAlloc(ReserveNewNeedAlloc {
+ args,
+ need_empty_array_alloc: false,
+ need_new_tree_alloc: false,
+ need_tree_alloc: true,
+ }));
+ }
+ };
+ let (offset, oneway_spam_detected) =
+ tree.reserve_new(args.debug_id, args.size, args.is_oneway, args.pid, alloc)?;
+ Ok(ReserveNew::Success(ReserveNewSuccess {
+ offset,
+ oneway_spam_detected,
+ _empty_array_alloc: args.empty_array_alloc,
+ _new_tree_alloc: args.new_tree_alloc,
+ _tree_alloc: None,
+ }))
+ }
+ }
+ }
+
+ /// Deletes the allocations at `offset`.
+ pub(crate) fn reservation_abort(&mut self, offset: usize) -> Result<FreedRange> {
+ match &mut self.inner {
+ Impl::Empty(_size) => Err(EINVAL),
+ Impl::Array(array) => array.reservation_abort(offset),
+ Impl::Tree(tree) => {
+ let freed_range = tree.reservation_abort(offset)?;
+ if tree.is_empty() {
+ self.inner = Impl::Empty(tree.total_size());
+ }
+ Ok(freed_range)
+ }
+ }
+ }
+
+ /// Called when an allocation is no longer in use by the kernel.
+ ///
+ /// The value in `data` will be stored, if any. A mutable reference is used to avoid dropping
+ /// the `T` when an error is returned.
+ pub(crate) fn reservation_commit(&mut self, offset: usize, data: &mut Option<T>) -> Result {
+ match &mut self.inner {
+ Impl::Empty(_size) => Err(EINVAL),
+ Impl::Array(array) => array.reservation_commit(offset, data),
+ Impl::Tree(tree) => tree.reservation_commit(offset, data),
+ }
+ }
+
+ /// Called when the kernel starts using an allocation.
+ ///
+ /// Returns the size of the existing entry and the data associated with it.
+ pub(crate) fn reserve_existing(&mut self, offset: usize) -> Result<(usize, usize, Option<T>)> {
+ match &mut self.inner {
+ Impl::Empty(_size) => Err(EINVAL),
+ Impl::Array(array) => array.reserve_existing(offset),
+ Impl::Tree(tree) => tree.reserve_existing(offset),
+ }
+ }
+
+ /// Call the provided callback at every allocated region.
+ ///
+ /// This destroys the range allocator. Used only during shutdown.
+ pub(crate) fn take_for_each<F: Fn(usize, usize, usize, Option<T>)>(&mut self, callback: F) {
+ match &mut self.inner {
+ Impl::Empty(_size) => {}
+ Impl::Array(array) => array.take_for_each(callback),
+ Impl::Tree(tree) => tree.take_for_each(callback),
+ }
+ }
+}
+
+/// The arguments for `reserve_new`.
+#[derive(Default)]
+pub(crate) struct ReserveNewArgs<T> {
+ pub(crate) size: usize,
+ pub(crate) is_oneway: bool,
+ pub(crate) debug_id: usize,
+ pub(crate) pid: Pid,
+ pub(crate) empty_array_alloc: Option<EmptyArrayAlloc<T>>,
+ pub(crate) new_tree_alloc: Option<FromArrayAllocs<T>>,
+ pub(crate) tree_alloc: Option<ReserveNewTreeAlloc<T>>,
+}
+
+/// The return type of `ReserveNew`.
+pub(crate) enum ReserveNew<T> {
+ Success(ReserveNewSuccess<T>),
+ NeedAlloc(ReserveNewNeedAlloc<T>),
+}
+
+/// Returned by `reserve_new` when the reservation was successul.
+pub(crate) struct ReserveNewSuccess<T> {
+ pub(crate) offset: usize,
+ pub(crate) oneway_spam_detected: bool,
+
+ // If the user supplied an allocation that we did not end up using, then we return it here.
+ // The caller will kfree it outside of the lock.
+ _empty_array_alloc: Option<EmptyArrayAlloc<T>>,
+ _new_tree_alloc: Option<FromArrayAllocs<T>>,
+ _tree_alloc: Option<ReserveNewTreeAlloc<T>>,
+}
+
+/// Returned by `reserve_new` to request the caller to make an allocation before calling the method
+/// again.
+pub(crate) struct ReserveNewNeedAlloc<T> {
+ args: ReserveNewArgs<T>,
+ need_empty_array_alloc: bool,
+ need_new_tree_alloc: bool,
+ need_tree_alloc: bool,
+}
+
+impl<T> ReserveNewNeedAlloc<T> {
+ /// Make the necessary allocations for another call to `reserve_new`.
+ pub(crate) fn make_alloc(mut self) -> Result<ReserveNewArgs<T>> {
+ if self.need_empty_array_alloc && self.args.empty_array_alloc.is_none() {
+ self.args.empty_array_alloc = Some(EmptyArrayAlloc::try_new(TREE_THRESHOLD)?);
+ }
+ if self.need_new_tree_alloc && self.args.new_tree_alloc.is_none() {
+ self.args.new_tree_alloc = Some(FromArrayAllocs::try_new(TREE_THRESHOLD)?);
+ }
+ if self.need_tree_alloc && self.args.tree_alloc.is_none() {
+ self.args.tree_alloc = Some(ReserveNewTreeAlloc::try_new()?);
+ }
+ Ok(self.args)
+ }
+}
diff --git a/drivers/android/binder/range_alloc/tree.rs b/drivers/android/binder/range_alloc/tree.rs
new file mode 100644
index 000000000000..838fdd2b47ea
--- /dev/null
+++ b/drivers/android/binder/range_alloc/tree.rs
@@ -0,0 +1,488 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+use kernel::{
+ page::PAGE_SIZE,
+ prelude::*,
+ rbtree::{RBTree, RBTreeNode, RBTreeNodeReservation},
+ seq_file::SeqFile,
+ seq_print,
+ task::Pid,
+};
+
+use crate::range_alloc::{DescriptorState, FreedRange, Range};
+
+/// Keeps track of allocations in a process' mmap.
+///
+/// Each process has an mmap where the data for incoming transactions will be placed. This struct
+/// keeps track of allocations made in the mmap. For each allocation, we store a descriptor that
+/// has metadata related to the allocation. We also keep track of available free space.
+pub(super) struct TreeRangeAllocator<T> {
+ /// This collection contains descriptors for *both* ranges containing an allocation, *and* free
+ /// ranges between allocations. The free ranges get merged, so there are never two free ranges
+ /// next to each other.
+ tree: RBTree<usize, Descriptor<T>>,
+ /// Contains an entry for every free range in `self.tree`. This tree sorts the ranges by size,
+ /// letting us look up the smallest range whose size is at least some lower bound.
+ free_tree: RBTree<FreeKey, ()>,
+ size: usize,
+ free_oneway_space: usize,
+}
+
+impl<T> TreeRangeAllocator<T> {
+ pub(crate) fn from_array(
+ size: usize,
+ ranges: &mut KVec<Range<T>>,
+ alloc: &mut FromArrayAllocs<T>,
+ ) -> Self {
+ let mut tree = TreeRangeAllocator {
+ tree: RBTree::new(),
+ free_tree: RBTree::new(),
+ size,
+ free_oneway_space: size / 2,
+ };
+
+ let mut free_offset = 0;
+ for range in ranges.drain_all() {
+ let free_size = range.offset - free_offset;
+ if free_size > 0 {
+ let free_node = alloc.free_tree.pop().unwrap();
+ tree.free_tree
+ .insert(free_node.into_node((free_size, free_offset), ()));
+ let tree_node = alloc.tree.pop().unwrap();
+ tree.tree.insert(
+ tree_node.into_node(free_offset, Descriptor::new(free_offset, free_size)),
+ );
+ }
+ free_offset = range.endpoint();
+
+ if range.state.is_oneway() {
+ tree.free_oneway_space = tree.free_oneway_space.saturating_sub(range.size);
+ }
+
+ let free_res = alloc.free_tree.pop().unwrap();
+ let tree_node = alloc.tree.pop().unwrap();
+ let mut desc = Descriptor::new(range.offset, range.size);
+ desc.state = Some((range.state, free_res));
+ tree.tree.insert(tree_node.into_node(range.offset, desc));
+ }
+
+ // After the last range, we may need a free range.
+ if free_offset < size {
+ let free_size = size - free_offset;
+ let free_node = alloc.free_tree.pop().unwrap();
+ tree.free_tree
+ .insert(free_node.into_node((free_size, free_offset), ()));
+ let tree_node = alloc.tree.pop().unwrap();
+ tree.tree
+ .insert(tree_node.into_node(free_offset, Descriptor::new(free_offset, free_size)));
+ }
+
+ tree
+ }
+
+ pub(crate) fn is_empty(&self) -> bool {
+ let mut tree_iter = self.tree.values();
+ // There's always at least one range, because index zero is either the start of a free or
+ // allocated range.
+ let first_value = tree_iter.next().unwrap();
+ if tree_iter.next().is_some() {
+ // There are never two free ranges next to each other, so if there is more than one
+ // descriptor, then at least one of them must hold an allocated range.
+ return false;
+ }
+ // There is only one descriptor. Return true if it is for a free range.
+ first_value.state.is_none()
+ }
+
+ pub(crate) fn total_size(&self) -> usize {
+ self.size
+ }
+
+ pub(crate) fn free_oneway_space(&self) -> usize {
+ self.free_oneway_space
+ }
+
+ pub(crate) fn count_buffers(&self) -> usize {
+ self.tree
+ .values()
+ .filter(|desc| desc.state.is_some())
+ .count()
+ }
+
+ pub(crate) fn debug_print(&self, m: &SeqFile) -> Result<()> {
+ for desc in self.tree.values() {
+ let state = match &desc.state {
+ Some(state) => &state.0,
+ None => continue,
+ };
+ seq_print!(
+ m,
+ " buffer: {} size {} pid {}",
+ desc.offset,
+ desc.size,
+ state.pid(),
+ );
+ if state.is_oneway() {
+ seq_print!(m, " oneway");
+ }
+ match state {
+ DescriptorState::Reserved(_res) => {
+ seq_print!(m, " reserved\n");
+ }
+ DescriptorState::Allocated(_alloc) => {
+ seq_print!(m, " allocated\n");
+ }
+ }
+ }
+ Ok(())
+ }
+
+ fn find_best_match(&mut self, size: usize) -> Option<&mut Descriptor<T>> {
+ let free_cursor = self.free_tree.cursor_lower_bound(&(size, 0))?;
+ let ((_, offset), ()) = free_cursor.current();
+ self.tree.get_mut(offset)
+ }
+
+ /// Try to reserve a new buffer, using the provided allocation if necessary.
+ pub(crate) fn reserve_new(
+ &mut self,
+ debug_id: usize,
+ size: usize,
+ is_oneway: bool,
+ pid: Pid,
+ alloc: ReserveNewTreeAlloc<T>,
+ ) -> Result<(usize, bool)> {
+ // Compute new value of free_oneway_space, which is set only on success.
+ let new_oneway_space = if is_oneway {
+ match self.free_oneway_space.checked_sub(size) {
+ Some(new_oneway_space) => new_oneway_space,
+ None => return Err(ENOSPC),
+ }
+ } else {
+ self.free_oneway_space
+ };
+
+ // Start detecting spammers once we have less than 20%
+ // of async space left (which is less than 10% of total
+ // buffer size).
+ //
+ // (This will short-circut, so `low_oneway_space` is
+ // only called when necessary.)
+ let oneway_spam_detected =
+ is_oneway && new_oneway_space < self.size / 10 && self.low_oneway_space(pid);
+
+ let (found_size, found_off, tree_node, free_tree_node) = match self.find_best_match(size) {
+ None => {
+ pr_warn!("ENOSPC from range_alloc.reserve_new - size: {}", size);
+ return Err(ENOSPC);
+ }
+ Some(desc) => {
+ let found_size = desc.size;
+ let found_offset = desc.offset;
+
+ // In case we need to break up the descriptor
+ let new_desc = Descriptor::new(found_offset + size, found_size - size);
+ let (tree_node, free_tree_node, desc_node_res) = alloc.initialize(new_desc);
+
+ desc.state = Some((
+ DescriptorState::new(is_oneway, debug_id, pid),
+ desc_node_res,
+ ));
+ desc.size = size;
+
+ (found_size, found_offset, tree_node, free_tree_node)
+ }
+ };
+ self.free_oneway_space = new_oneway_space;
+ self.free_tree.remove(&(found_size, found_off));
+
+ if found_size != size {
+ self.tree.insert(tree_node);
+ self.free_tree.insert(free_tree_node);
+ }
+
+ Ok((found_off, oneway_spam_detected))
+ }
+
+ pub(crate) fn reservation_abort(&mut self, offset: usize) -> Result<FreedRange> {
+ let mut cursor = self.tree.cursor_lower_bound_mut(&offset).ok_or_else(|| {
+ pr_warn!(
+ "EINVAL from range_alloc.reservation_abort - offset: {}",
+ offset
+ );
+ EINVAL
+ })?;
+
+ let (_, desc) = cursor.current_mut();
+
+ if desc.offset != offset {
+ pr_warn!(
+ "EINVAL from range_alloc.reservation_abort - offset: {}",
+ offset
+ );
+ return Err(EINVAL);
+ }
+
+ let (reservation, free_node_res) = desc.try_change_state(|state| match state {
+ Some((DescriptorState::Reserved(reservation), free_node_res)) => {
+ (None, Ok((reservation, free_node_res)))
+ }
+ None => {
+ pr_warn!(
+ "EINVAL from range_alloc.reservation_abort - offset: {}",
+ offset
+ );
+ (None, Err(EINVAL))
+ }
+ allocated => {
+ pr_warn!(
+ "EPERM from range_alloc.reservation_abort - offset: {}",
+ offset
+ );
+ (allocated, Err(EPERM))
+ }
+ })?;
+
+ let mut size = desc.size;
+ let mut offset = desc.offset;
+ let free_oneway_space_add = if reservation.is_oneway { size } else { 0 };
+
+ self.free_oneway_space += free_oneway_space_add;
+
+ let mut freed_range = FreedRange::interior_pages(offset, size);
+ // Compute how large the next free region needs to be to include one more page in
+ // the newly freed range.
+ let add_next_page_needed = match (offset + size) % PAGE_SIZE {
+ 0 => usize::MAX,
+ unalign => PAGE_SIZE - unalign,
+ };
+ // Compute how large the previous free region needs to be to include one more page
+ // in the newly freed range.
+ let add_prev_page_needed = match offset % PAGE_SIZE {
+ 0 => usize::MAX,
+ unalign => unalign,
+ };
+
+ // Merge next into current if next is free
+ let remove_next = match cursor.peek_next() {
+ Some((_, next)) if next.state.is_none() => {
+ if next.size >= add_next_page_needed {
+ freed_range.end_page_idx += 1;
+ }
+ self.free_tree.remove(&(next.size, next.offset));
+ size += next.size;
+ true
+ }
+ _ => false,
+ };
+
+ if remove_next {
+ let (_, desc) = cursor.current_mut();
+ desc.size = size;
+ cursor.remove_next();
+ }
+
+ // Merge current into prev if prev is free
+ match cursor.peek_prev_mut() {
+ Some((_, prev)) if prev.state.is_none() => {
+ if prev.size >= add_prev_page_needed {
+ freed_range.start_page_idx -= 1;
+ }
+ // merge previous with current, remove current
+ self.free_tree.remove(&(prev.size, prev.offset));
+ offset = prev.offset;
+ size += prev.size;
+ prev.size = size;
+ cursor.remove_current();
+ }
+ _ => {}
+ };
+
+ self.free_tree
+ .insert(free_node_res.into_node((size, offset), ()));
+
+ Ok(freed_range)
+ }
+
+ pub(crate) fn reservation_commit(&mut self, offset: usize, data: &mut Option<T>) -> Result {
+ let desc = self.tree.get_mut(&offset).ok_or(ENOENT)?;
+
+ desc.try_change_state(|state| match state {
+ Some((DescriptorState::Reserved(reservation), free_node_res)) => (
+ Some((
+ DescriptorState::Allocated(reservation.allocate(data.take())),
+ free_node_res,
+ )),
+ Ok(()),
+ ),
+ other => (other, Err(ENOENT)),
+ })
+ }
+
+ /// Takes an entry at the given offset from [`DescriptorState::Allocated`] to
+ /// [`DescriptorState::Reserved`].
+ ///
+ /// Returns the size of the existing entry and the data associated with it.
+ pub(crate) fn reserve_existing(&mut self, offset: usize) -> Result<(usize, usize, Option<T>)> {
+ let desc = self.tree.get_mut(&offset).ok_or_else(|| {
+ pr_warn!(
+ "ENOENT from range_alloc.reserve_existing - offset: {}",
+ offset
+ );
+ ENOENT
+ })?;
+
+ let (debug_id, data) = desc.try_change_state(|state| match state {
+ Some((DescriptorState::Allocated(allocation), free_node_res)) => {
+ let (reservation, data) = allocation.deallocate();
+ let debug_id = reservation.debug_id;
+ (
+ Some((DescriptorState::Reserved(reservation), free_node_res)),
+ Ok((debug_id, data)),
+ )
+ }
+ other => {
+ pr_warn!(
+ "ENOENT from range_alloc.reserve_existing - offset: {}",
+ offset
+ );
+ (other, Err(ENOENT))
+ }
+ })?;
+
+ Ok((desc.size, debug_id, data))
+ }
+
+ /// Call the provided callback at every allocated region.
+ ///
+ /// This destroys the range allocator. Used only during shutdown.
+ pub(crate) fn take_for_each<F: Fn(usize, usize, usize, Option<T>)>(&mut self, callback: F) {
+ for (_, desc) in self.tree.iter_mut() {
+ if let Some((DescriptorState::Allocated(allocation), _)) = &mut desc.state {
+ callback(
+ desc.offset,
+ desc.size,
+ allocation.debug_id(),
+ allocation.take(),
+ );
+ }
+ }
+ }
+
+ /// Find the amount and size of buffers allocated by the current caller.
+ ///
+ /// The idea is that once we cross the threshold, whoever is responsible
+ /// for the low async space is likely to try to send another async transaction,
+ /// and at some point we'll catch them in the act. This is more efficient
+ /// than keeping a map per pid.
+ fn low_oneway_space(&self, calling_pid: Pid) -> bool {
+ let mut total_alloc_size = 0;
+ let mut num_buffers = 0;
+ for (_, desc) in self.tree.iter() {
+ if let Some((state, _)) = &desc.state {
+ if state.is_oneway() && state.pid() == calling_pid {
+ total_alloc_size += desc.size;
+ num_buffers += 1;
+ }
+ }
+ }
+
+ // Warn if this pid has more than 50 transactions, or more than 50% of
+ // async space (which is 25% of total buffer size). Oneway spam is only
+ // detected when the threshold is exceeded.
+ num_buffers > 50 || total_alloc_size > self.size / 4
+ }
+}
+
+type TreeDescriptorState<T> = (DescriptorState<T>, FreeNodeRes);
+struct Descriptor<T> {
+ size: usize,
+ offset: usize,
+ state: Option<TreeDescriptorState<T>>,
+}
+
+impl<T> Descriptor<T> {
+ fn new(offset: usize, size: usize) -> Self {
+ Self {
+ size,
+ offset,
+ state: None,
+ }
+ }
+
+ fn try_change_state<F, Data>(&mut self, f: F) -> Result<Data>
+ where
+ F: FnOnce(Option<TreeDescriptorState<T>>) -> (Option<TreeDescriptorState<T>>, Result<Data>),
+ {
+ let (new_state, result) = f(self.state.take());
+ self.state = new_state;
+ result
+ }
+}
+
+// (Descriptor.size, Descriptor.offset)
+type FreeKey = (usize, usize);
+type FreeNodeRes = RBTreeNodeReservation<FreeKey, ()>;
+
+/// An allocation for use by `reserve_new`.
+pub(crate) struct ReserveNewTreeAlloc<T> {
+ tree_node_res: RBTreeNodeReservation<usize, Descriptor<T>>,
+ free_tree_node_res: FreeNodeRes,
+ desc_node_res: FreeNodeRes,
+}
+
+impl<T> ReserveNewTreeAlloc<T> {
+ pub(crate) fn try_new() -> Result<Self> {
+ let tree_node_res = RBTreeNodeReservation::new(GFP_KERNEL)?;
+ let free_tree_node_res = RBTreeNodeReservation::new(GFP_KERNEL)?;
+ let desc_node_res = RBTreeNodeReservation::new(GFP_KERNEL)?;
+ Ok(Self {
+ tree_node_res,
+ free_tree_node_res,
+ desc_node_res,
+ })
+ }
+
+ fn initialize(
+ self,
+ desc: Descriptor<T>,
+ ) -> (
+ RBTreeNode<usize, Descriptor<T>>,
+ RBTreeNode<FreeKey, ()>,
+ FreeNodeRes,
+ ) {
+ let size = desc.size;
+ let offset = desc.offset;
+ (
+ self.tree_node_res.into_node(offset, desc),
+ self.free_tree_node_res.into_node((size, offset), ()),
+ self.desc_node_res,
+ )
+ }
+}
+
+/// An allocation for creating a tree from an `ArrayRangeAllocator`.
+pub(crate) struct FromArrayAllocs<T> {
+ tree: KVec<RBTreeNodeReservation<usize, Descriptor<T>>>,
+ free_tree: KVec<RBTreeNodeReservation<FreeKey, ()>>,
+}
+
+impl<T> FromArrayAllocs<T> {
+ pub(crate) fn try_new(len: usize) -> Result<Self> {
+ let num_descriptors = 2 * len + 1;
+
+ let mut tree = KVec::with_capacity(num_descriptors, GFP_KERNEL)?;
+ for _ in 0..num_descriptors {
+ tree.push(RBTreeNodeReservation::new(GFP_KERNEL)?, GFP_KERNEL)?;
+ }
+
+ let mut free_tree = KVec::with_capacity(num_descriptors, GFP_KERNEL)?;
+ for _ in 0..num_descriptors {
+ free_tree.push(RBTreeNodeReservation::new(GFP_KERNEL)?, GFP_KERNEL)?;
+ }
+
+ Ok(Self { tree, free_tree })
+ }
+}
diff --git a/drivers/android/binder/rust_binder.h b/drivers/android/binder/rust_binder.h
new file mode 100644
index 000000000000..31806890ed1a
--- /dev/null
+++ b/drivers/android/binder/rust_binder.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2025 Google, Inc.
+ */
+
+#ifndef _LINUX_RUST_BINDER_H
+#define _LINUX_RUST_BINDER_H
+
+#include <uapi/linux/android/binder.h>
+#include <uapi/linux/android/binderfs.h>
+
+/*
+ * These symbols are exposed by `rust_binderfs.c` and exist here so that Rust
+ * Binder can call them.
+ */
+int init_rust_binderfs(void);
+
+struct dentry;
+struct inode;
+struct dentry *rust_binderfs_create_proc_file(struct inode *nodp, int pid);
+void rust_binderfs_remove_file(struct dentry *dentry);
+
+#endif
diff --git a/drivers/android/binder/rust_binder_events.c b/drivers/android/binder/rust_binder_events.c
new file mode 100644
index 000000000000..488b1470060c
--- /dev/null
+++ b/drivers/android/binder/rust_binder_events.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* rust_binder_events.c
+ *
+ * Rust Binder tracepoints.
+ *
+ * Copyright 2025 Google LLC
+ */
+
+#include "rust_binder.h"
+
+const char * const binder_command_strings[] = {
+ "BC_TRANSACTION",
+ "BC_REPLY",
+ "BC_ACQUIRE_RESULT",
+ "BC_FREE_BUFFER",
+ "BC_INCREFS",
+ "BC_ACQUIRE",
+ "BC_RELEASE",
+ "BC_DECREFS",
+ "BC_INCREFS_DONE",
+ "BC_ACQUIRE_DONE",
+ "BC_ATTEMPT_ACQUIRE",
+ "BC_REGISTER_LOOPER",
+ "BC_ENTER_LOOPER",
+ "BC_EXIT_LOOPER",
+ "BC_REQUEST_DEATH_NOTIFICATION",
+ "BC_CLEAR_DEATH_NOTIFICATION",
+ "BC_DEAD_BINDER_DONE",
+ "BC_TRANSACTION_SG",
+ "BC_REPLY_SG",
+};
+
+const char * const binder_return_strings[] = {
+ "BR_ERROR",
+ "BR_OK",
+ "BR_TRANSACTION",
+ "BR_REPLY",
+ "BR_ACQUIRE_RESULT",
+ "BR_DEAD_REPLY",
+ "BR_TRANSACTION_COMPLETE",
+ "BR_INCREFS",
+ "BR_ACQUIRE",
+ "BR_RELEASE",
+ "BR_DECREFS",
+ "BR_ATTEMPT_ACQUIRE",
+ "BR_NOOP",
+ "BR_SPAWN_LOOPER",
+ "BR_FINISHED",
+ "BR_DEAD_BINDER",
+ "BR_CLEAR_DEATH_NOTIFICATION_DONE",
+ "BR_FAILED_REPLY",
+ "BR_FROZEN_REPLY",
+ "BR_ONEWAY_SPAM_SUSPECT",
+ "BR_TRANSACTION_PENDING_FROZEN"
+};
+
+#define CREATE_TRACE_POINTS
+#define CREATE_RUST_TRACE_POINTS
+#include "rust_binder_events.h"
diff --git a/drivers/android/binder/rust_binder_events.h b/drivers/android/binder/rust_binder_events.h
new file mode 100644
index 000000000000..2f3efbf9dba6
--- /dev/null
+++ b/drivers/android/binder/rust_binder_events.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2025 Google, Inc.
+ */
+
+#undef TRACE_SYSTEM
+#undef TRACE_INCLUDE_FILE
+#undef TRACE_INCLUDE_PATH
+#define TRACE_SYSTEM rust_binder
+#define TRACE_INCLUDE_FILE rust_binder_events
+#define TRACE_INCLUDE_PATH ../drivers/android/binder
+
+#if !defined(_RUST_BINDER_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _RUST_BINDER_TRACE_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(rust_binder_ioctl,
+ TP_PROTO(unsigned int cmd, unsigned long arg),
+ TP_ARGS(cmd, arg),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, cmd)
+ __field(unsigned long, arg)
+ ),
+ TP_fast_assign(
+ __entry->cmd = cmd;
+ __entry->arg = arg;
+ ),
+ TP_printk("cmd=0x%x arg=0x%lx", __entry->cmd, __entry->arg)
+);
+
+#endif /* _RUST_BINDER_TRACE_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/android/binder/rust_binder_internal.h b/drivers/android/binder/rust_binder_internal.h
new file mode 100644
index 000000000000..78288fe7964d
--- /dev/null
+++ b/drivers/android/binder/rust_binder_internal.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* rust_binder_internal.h
+ *
+ * This file contains internal data structures used by Rust Binder. Mostly,
+ * these are type definitions used only by binderfs or things that Rust Binder
+ * define and export to binderfs.
+ *
+ * It does not include things exported by binderfs to Rust Binder since this
+ * file is not included as input to bindgen.
+ *
+ * Copyright (C) 2025 Google LLC.
+ */
+
+#ifndef _LINUX_RUST_BINDER_INTERNAL_H
+#define _LINUX_RUST_BINDER_INTERNAL_H
+
+#define RUST_BINDERFS_SUPER_MAGIC 0x6c6f6f71
+
+#include <linux/seq_file.h>
+#include <uapi/linux/android/binder.h>
+#include <uapi/linux/android/binderfs.h>
+
+/*
+ * The internal data types in the Rust Binder driver are opaque to C, so we use
+ * void pointer typedefs for these types.
+ */
+typedef void *rust_binder_context;
+
+/**
+ * struct binder_device - information about a binder device node
+ * @minor: the minor number used by this device
+ * @ctx: the Rust Context used by this device, or null for binder-control
+ *
+ * This is used as the private data for files directly in binderfs, but not
+ * files in the binder_logs subdirectory. This struct owns a refcount on `ctx`
+ * and the entry for `minor` in `binderfs_minors`. For binder-control `ctx` is
+ * null.
+ */
+struct binder_device {
+ int minor;
+ rust_binder_context ctx;
+};
+
+int rust_binder_stats_show(struct seq_file *m, void *unused);
+int rust_binder_state_show(struct seq_file *m, void *unused);
+int rust_binder_transactions_show(struct seq_file *m, void *unused);
+int rust_binder_proc_show(struct seq_file *m, void *pid);
+
+extern const struct file_operations rust_binder_fops;
+rust_binder_context rust_binder_new_context(char *name);
+void rust_binder_remove_context(rust_binder_context device);
+
+/**
+ * binderfs_mount_opts - mount options for binderfs
+ * @max: maximum number of allocatable binderfs binder devices
+ * @stats_mode: enable binder stats in binderfs.
+ */
+struct binderfs_mount_opts {
+ int max;
+ int stats_mode;
+};
+
+/**
+ * binderfs_info - information about a binderfs mount
+ * @ipc_ns: The ipc namespace the binderfs mount belongs to.
+ * @control_dentry: This records the dentry of this binderfs mount
+ * binder-control device.
+ * @root_uid: uid that needs to be used when a new binder device is
+ * created.
+ * @root_gid: gid that needs to be used when a new binder device is
+ * created.
+ * @mount_opts: The mount options in use.
+ * @device_count: The current number of allocated binder devices.
+ * @proc_log_dir: Pointer to the directory dentry containing process-specific
+ * logs.
+ */
+struct binderfs_info {
+ struct ipc_namespace *ipc_ns;
+ struct dentry *control_dentry;
+ kuid_t root_uid;
+ kgid_t root_gid;
+ struct binderfs_mount_opts mount_opts;
+ int device_count;
+ struct dentry *proc_log_dir;
+};
+
+#endif /* _LINUX_RUST_BINDER_INTERNAL_H */
diff --git a/drivers/android/binder/rust_binder_main.rs b/drivers/android/binder/rust_binder_main.rs
new file mode 100644
index 000000000000..c79a9e742240
--- /dev/null
+++ b/drivers/android/binder/rust_binder_main.rs
@@ -0,0 +1,611 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+//! Binder -- the Android IPC mechanism.
+#![recursion_limit = "256"]
+#![allow(
+ clippy::as_underscore,
+ clippy::ref_as_ptr,
+ clippy::ptr_as_ptr,
+ clippy::cast_lossless
+)]
+
+use kernel::{
+ bindings::{self, seq_file},
+ fs::File,
+ list::{ListArc, ListArcSafe, ListLinksSelfPtr, TryNewListArc},
+ prelude::*,
+ seq_file::SeqFile,
+ seq_print,
+ sync::poll::PollTable,
+ sync::Arc,
+ task::Pid,
+ transmute::AsBytes,
+ types::ForeignOwnable,
+ uaccess::UserSliceWriter,
+};
+
+use crate::{context::Context, page_range::Shrinker, process::Process, thread::Thread};
+
+use core::{
+ ptr::NonNull,
+ sync::atomic::{AtomicBool, AtomicUsize, Ordering},
+};
+
+mod allocation;
+mod context;
+mod deferred_close;
+mod defs;
+mod error;
+mod node;
+mod page_range;
+mod process;
+mod range_alloc;
+mod stats;
+mod thread;
+mod trace;
+mod transaction;
+
+#[allow(warnings)] // generated bindgen code
+mod binderfs {
+ use kernel::bindings::{dentry, inode};
+
+ extern "C" {
+ pub fn init_rust_binderfs() -> kernel::ffi::c_int;
+ }
+ extern "C" {
+ pub fn rust_binderfs_create_proc_file(
+ nodp: *mut inode,
+ pid: kernel::ffi::c_int,
+ ) -> *mut dentry;
+ }
+ extern "C" {
+ pub fn rust_binderfs_remove_file(dentry: *mut dentry);
+ }
+ pub type rust_binder_context = *mut kernel::ffi::c_void;
+ #[repr(C)]
+ #[derive(Copy, Clone)]
+ pub struct binder_device {
+ pub minor: kernel::ffi::c_int,
+ pub ctx: rust_binder_context,
+ }
+ impl Default for binder_device {
+ fn default() -> Self {
+ let mut s = ::core::mem::MaybeUninit::<Self>::uninit();
+ unsafe {
+ ::core::ptr::write_bytes(s.as_mut_ptr(), 0, 1);
+ s.assume_init()
+ }
+ }
+ }
+}
+
+module! {
+ type: BinderModule,
+ name: "rust_binder",
+ authors: ["Wedson Almeida Filho", "Alice Ryhl"],
+ description: "Android Binder",
+ license: "GPL",
+}
+
+fn next_debug_id() -> usize {
+ static NEXT_DEBUG_ID: AtomicUsize = AtomicUsize::new(0);
+
+ NEXT_DEBUG_ID.fetch_add(1, Ordering::Relaxed)
+}
+
+/// Provides a single place to write Binder return values via the
+/// supplied `UserSliceWriter`.
+pub(crate) struct BinderReturnWriter<'a> {
+ writer: UserSliceWriter,
+ thread: &'a Thread,
+}
+
+impl<'a> BinderReturnWriter<'a> {
+ fn new(writer: UserSliceWriter, thread: &'a Thread) -> Self {
+ BinderReturnWriter { writer, thread }
+ }
+
+ /// Write a return code back to user space.
+ /// Should be a `BR_` constant from [`defs`] e.g. [`defs::BR_TRANSACTION_COMPLETE`].
+ fn write_code(&mut self, code: u32) -> Result {
+ stats::GLOBAL_STATS.inc_br(code);
+ self.thread.process.stats.inc_br(code);
+ self.writer.write(&code)
+ }
+
+ /// Write something *other than* a return code to user space.
+ fn write_payload<T: AsBytes>(&mut self, payload: &T) -> Result {
+ self.writer.write(payload)
+ }
+
+ fn len(&self) -> usize {
+ self.writer.len()
+ }
+}
+
+/// Specifies how a type should be delivered to the read part of a BINDER_WRITE_READ ioctl.
+///
+/// When a value is pushed to the todo list for a process or thread, it is stored as a trait object
+/// with the type `Arc<dyn DeliverToRead>`. Trait objects are a Rust feature that lets you
+/// implement dynamic dispatch over many different types. This lets us store many different types
+/// in the todo list.
+trait DeliverToRead: ListArcSafe + Send + Sync {
+ /// Performs work. Returns true if remaining work items in the queue should be processed
+ /// immediately, or false if it should return to caller before processing additional work
+ /// items.
+ fn do_work(
+ self: DArc<Self>,
+ thread: &Thread,
+ writer: &mut BinderReturnWriter<'_>,
+ ) -> Result<bool>;
+
+ /// Cancels the given work item. This is called instead of [`DeliverToRead::do_work`] when work
+ /// won't be delivered.
+ fn cancel(self: DArc<Self>);
+
+ /// Should we use `wake_up_interruptible_sync` or `wake_up_interruptible` when scheduling this
+ /// work item?
+ ///
+ /// Generally only set to true for non-oneway transactions.
+ fn should_sync_wakeup(&self) -> bool;
+
+ fn debug_print(&self, m: &SeqFile, prefix: &str, transaction_prefix: &str) -> Result<()>;
+}
+
+// Wrapper around a `DeliverToRead` with linked list links.
+#[pin_data]
+struct DTRWrap<T: ?Sized> {
+ #[pin]
+ links: ListLinksSelfPtr<DTRWrap<dyn DeliverToRead>>,
+ #[pin]
+ wrapped: T,
+}
+kernel::list::impl_list_arc_safe! {
+ impl{T: ListArcSafe + ?Sized} ListArcSafe<0> for DTRWrap<T> {
+ tracked_by wrapped: T;
+ }
+}
+kernel::list::impl_list_item! {
+ impl ListItem<0> for DTRWrap<dyn DeliverToRead> {
+ using ListLinksSelfPtr { self.links };
+ }
+}
+
+impl<T: ?Sized> core::ops::Deref for DTRWrap<T> {
+ type Target = T;
+ fn deref(&self) -> &T {
+ &self.wrapped
+ }
+}
+
+type DArc<T> = kernel::sync::Arc<DTRWrap<T>>;
+type DLArc<T> = kernel::list::ListArc<DTRWrap<T>>;
+
+impl<T: ListArcSafe> DTRWrap<T> {
+ fn new(val: impl PinInit<T>) -> impl PinInit<Self> {
+ pin_init!(Self {
+ links <- ListLinksSelfPtr::new(),
+ wrapped <- val,
+ })
+ }
+
+ fn arc_try_new(val: T) -> Result<DLArc<T>, kernel::alloc::AllocError> {
+ ListArc::pin_init(
+ try_pin_init!(Self {
+ links <- ListLinksSelfPtr::new(),
+ wrapped: val,
+ }),
+ GFP_KERNEL,
+ )
+ .map_err(|_| kernel::alloc::AllocError)
+ }
+
+ fn arc_pin_init(init: impl PinInit<T>) -> Result<DLArc<T>, kernel::error::Error> {
+ ListArc::pin_init(
+ try_pin_init!(Self {
+ links <- ListLinksSelfPtr::new(),
+ wrapped <- init,
+ }),
+ GFP_KERNEL,
+ )
+ }
+}
+
+struct DeliverCode {
+ code: u32,
+ skip: AtomicBool,
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<0> for DeliverCode { untracked; }
+}
+
+impl DeliverCode {
+ fn new(code: u32) -> Self {
+ Self {
+ code,
+ skip: AtomicBool::new(false),
+ }
+ }
+
+ /// Disable this DeliverCode and make it do nothing.
+ ///
+ /// This is used instead of removing it from the work list, since `LinkedList::remove` is
+ /// unsafe, whereas this method is not.
+ fn skip(&self) {
+ self.skip.store(true, Ordering::Relaxed);
+ }
+}
+
+impl DeliverToRead for DeliverCode {
+ fn do_work(
+ self: DArc<Self>,
+ _thread: &Thread,
+ writer: &mut BinderReturnWriter<'_>,
+ ) -> Result<bool> {
+ if !self.skip.load(Ordering::Relaxed) {
+ writer.write_code(self.code)?;
+ }
+ Ok(true)
+ }
+
+ fn cancel(self: DArc<Self>) {}
+
+ fn should_sync_wakeup(&self) -> bool {
+ false
+ }
+
+ fn debug_print(&self, m: &SeqFile, prefix: &str, _tprefix: &str) -> Result<()> {
+ seq_print!(m, "{}", prefix);
+ if self.skip.load(Ordering::Relaxed) {
+ seq_print!(m, "(skipped) ");
+ }
+ if self.code == defs::BR_TRANSACTION_COMPLETE {
+ seq_print!(m, "transaction complete\n");
+ } else {
+ seq_print!(m, "transaction error: {}\n", self.code);
+ }
+ Ok(())
+ }
+}
+
+fn ptr_align(value: usize) -> Option<usize> {
+ let size = core::mem::size_of::<usize>() - 1;
+ Some(value.checked_add(size)? & !size)
+}
+
+// SAFETY: We call register in `init`.
+static BINDER_SHRINKER: Shrinker = unsafe { Shrinker::new() };
+
+struct BinderModule {}
+
+impl kernel::Module for BinderModule {
+ fn init(_module: &'static kernel::ThisModule) -> Result<Self> {
+ // SAFETY: The module initializer never runs twice, so we only call this once.
+ unsafe { crate::context::CONTEXTS.init() };
+
+ pr_warn!("Loaded Rust Binder.");
+
+ BINDER_SHRINKER.register(kernel::c_str!("android-binder"))?;
+
+ // SAFETY: The module is being loaded, so we can initialize binderfs.
+ unsafe { kernel::error::to_result(binderfs::init_rust_binderfs())? };
+
+ Ok(Self {})
+ }
+}
+
+/// Makes the inner type Sync.
+#[repr(transparent)]
+pub struct AssertSync<T>(T);
+// SAFETY: Used only to insert `file_operations` into a global, which is safe.
+unsafe impl<T> Sync for AssertSync<T> {}
+
+/// File operations that rust_binderfs.c can use.
+#[no_mangle]
+#[used]
+pub static rust_binder_fops: AssertSync<kernel::bindings::file_operations> = {
+ // SAFETY: All zeroes is safe for the `file_operations` type.
+ let zeroed_ops = unsafe { core::mem::MaybeUninit::zeroed().assume_init() };
+
+ let ops = kernel::bindings::file_operations {
+ owner: THIS_MODULE.as_ptr(),
+ poll: Some(rust_binder_poll),
+ unlocked_ioctl: Some(rust_binder_ioctl),
+ compat_ioctl: Some(bindings::compat_ptr_ioctl),
+ mmap: Some(rust_binder_mmap),
+ open: Some(rust_binder_open),
+ release: Some(rust_binder_release),
+ flush: Some(rust_binder_flush),
+ ..zeroed_ops
+ };
+ AssertSync(ops)
+};
+
+/// # Safety
+/// Only called by binderfs.
+#[no_mangle]
+unsafe extern "C" fn rust_binder_new_context(
+ name: *const kernel::ffi::c_char,
+) -> *mut kernel::ffi::c_void {
+ // SAFETY: The caller will always provide a valid c string here.
+ let name = unsafe { kernel::str::CStr::from_char_ptr(name) };
+ match Context::new(name) {
+ Ok(ctx) => Arc::into_foreign(ctx),
+ Err(_err) => core::ptr::null_mut(),
+ }
+}
+
+/// # Safety
+/// Only called by binderfs.
+#[no_mangle]
+unsafe extern "C" fn rust_binder_remove_context(device: *mut kernel::ffi::c_void) {
+ if !device.is_null() {
+ // SAFETY: The caller ensures that the `device` pointer came from a previous call to
+ // `rust_binder_new_device`.
+ let ctx = unsafe { Arc::<Context>::from_foreign(device) };
+ ctx.deregister();
+ drop(ctx);
+ }
+}
+
+/// # Safety
+/// Only called by binderfs.
+unsafe extern "C" fn rust_binder_open(
+ inode: *mut bindings::inode,
+ file_ptr: *mut bindings::file,
+) -> kernel::ffi::c_int {
+ // SAFETY: The `rust_binderfs.c` file ensures that `i_private` is set to a
+ // `struct binder_device`.
+ let device = unsafe { (*inode).i_private } as *const binderfs::binder_device;
+
+ assert!(!device.is_null());
+
+ // SAFETY: The `rust_binderfs.c` file ensures that `device->ctx` holds a binder context when
+ // using the rust binder fops.
+ let ctx = unsafe { Arc::<Context>::borrow((*device).ctx) };
+
+ // SAFETY: The caller provides a valid file pointer to a new `struct file`.
+ let file = unsafe { File::from_raw_file(file_ptr) };
+ let process = match Process::open(ctx, file) {
+ Ok(process) => process,
+ Err(err) => return err.to_errno(),
+ };
+
+ // SAFETY: This is an `inode` for a newly created binder file.
+ match unsafe { BinderfsProcFile::new(inode, process.task.pid()) } {
+ Ok(Some(file)) => process.inner.lock().binderfs_file = Some(file),
+ Ok(None) => { /* pid already exists */ }
+ Err(err) => return err.to_errno(),
+ }
+
+ // SAFETY: This file is associated with Rust binder, so we own the `private_data` field.
+ unsafe { (*file_ptr).private_data = process.into_foreign() };
+ 0
+}
+
+/// # Safety
+/// Only called by binderfs.
+unsafe extern "C" fn rust_binder_release(
+ _inode: *mut bindings::inode,
+ file: *mut bindings::file,
+) -> kernel::ffi::c_int {
+ // SAFETY: We previously set `private_data` in `rust_binder_open`.
+ let process = unsafe { Arc::<Process>::from_foreign((*file).private_data) };
+ // SAFETY: The caller ensures that the file is valid.
+ let file = unsafe { File::from_raw_file(file) };
+ Process::release(process, file);
+ 0
+}
+
+/// # Safety
+/// Only called by binderfs.
+unsafe extern "C" fn rust_binder_ioctl(
+ file: *mut bindings::file,
+ cmd: kernel::ffi::c_uint,
+ arg: kernel::ffi::c_ulong,
+) -> kernel::ffi::c_long {
+ // SAFETY: We previously set `private_data` in `rust_binder_open`.
+ let f = unsafe { Arc::<Process>::borrow((*file).private_data) };
+ // SAFETY: The caller ensures that the file is valid.
+ match Process::ioctl(f, unsafe { File::from_raw_file(file) }, cmd as _, arg as _) {
+ Ok(()) => 0,
+ Err(err) => err.to_errno() as isize,
+ }
+}
+
+/// # Safety
+/// Only called by binderfs.
+unsafe extern "C" fn rust_binder_mmap(
+ file: *mut bindings::file,
+ vma: *mut bindings::vm_area_struct,
+) -> kernel::ffi::c_int {
+ // SAFETY: We previously set `private_data` in `rust_binder_open`.
+ let f = unsafe { Arc::<Process>::borrow((*file).private_data) };
+ // SAFETY: The caller ensures that the vma is valid.
+ let area = unsafe { kernel::mm::virt::VmaNew::from_raw(vma) };
+ // SAFETY: The caller ensures that the file is valid.
+ match Process::mmap(f, unsafe { File::from_raw_file(file) }, area) {
+ Ok(()) => 0,
+ Err(err) => err.to_errno(),
+ }
+}
+
+/// # Safety
+/// Only called by binderfs.
+unsafe extern "C" fn rust_binder_poll(
+ file: *mut bindings::file,
+ wait: *mut bindings::poll_table_struct,
+) -> bindings::__poll_t {
+ // SAFETY: We previously set `private_data` in `rust_binder_open`.
+ let f = unsafe { Arc::<Process>::borrow((*file).private_data) };
+ // SAFETY: The caller ensures that the file is valid.
+ let fileref = unsafe { File::from_raw_file(file) };
+ // SAFETY: The caller ensures that the `PollTable` is valid.
+ match Process::poll(f, fileref, unsafe { PollTable::from_raw(wait) }) {
+ Ok(v) => v,
+ Err(_) => bindings::POLLERR,
+ }
+}
+
+/// # Safety
+/// Only called by binderfs.
+unsafe extern "C" fn rust_binder_flush(
+ file: *mut bindings::file,
+ _id: bindings::fl_owner_t,
+) -> kernel::ffi::c_int {
+ // SAFETY: We previously set `private_data` in `rust_binder_open`.
+ let f = unsafe { Arc::<Process>::borrow((*file).private_data) };
+ match Process::flush(f) {
+ Ok(()) => 0,
+ Err(err) => err.to_errno(),
+ }
+}
+
+/// # Safety
+/// Only called by binderfs.
+#[no_mangle]
+unsafe extern "C" fn rust_binder_stats_show(
+ ptr: *mut seq_file,
+ _: *mut kernel::ffi::c_void,
+) -> kernel::ffi::c_int {
+ // SAFETY: The caller ensures that the pointer is valid and exclusive for the duration in which
+ // this method is called.
+ let m = unsafe { SeqFile::from_raw(ptr) };
+ if let Err(err) = rust_binder_stats_show_impl(m) {
+ seq_print!(m, "failed to generate state: {:?}\n", err);
+ }
+ 0
+}
+
+/// # Safety
+/// Only called by binderfs.
+#[no_mangle]
+unsafe extern "C" fn rust_binder_state_show(
+ ptr: *mut seq_file,
+ _: *mut kernel::ffi::c_void,
+) -> kernel::ffi::c_int {
+ // SAFETY: The caller ensures that the pointer is valid and exclusive for the duration in which
+ // this method is called.
+ let m = unsafe { SeqFile::from_raw(ptr) };
+ if let Err(err) = rust_binder_state_show_impl(m) {
+ seq_print!(m, "failed to generate state: {:?}\n", err);
+ }
+ 0
+}
+
+/// # Safety
+/// Only called by binderfs.
+#[no_mangle]
+unsafe extern "C" fn rust_binder_proc_show(
+ ptr: *mut seq_file,
+ _: *mut kernel::ffi::c_void,
+) -> kernel::ffi::c_int {
+ // SAFETY: Accessing the private field of `seq_file` is okay.
+ let pid = (unsafe { (*ptr).private }) as usize as Pid;
+ // SAFETY: The caller ensures that the pointer is valid and exclusive for the duration in which
+ // this method is called.
+ let m = unsafe { SeqFile::from_raw(ptr) };
+ if let Err(err) = rust_binder_proc_show_impl(m, pid) {
+ seq_print!(m, "failed to generate state: {:?}\n", err);
+ }
+ 0
+}
+
+/// # Safety
+/// Only called by binderfs.
+#[no_mangle]
+unsafe extern "C" fn rust_binder_transactions_show(
+ ptr: *mut seq_file,
+ _: *mut kernel::ffi::c_void,
+) -> kernel::ffi::c_int {
+ // SAFETY: The caller ensures that the pointer is valid and exclusive for the duration in which
+ // this method is called.
+ let m = unsafe { SeqFile::from_raw(ptr) };
+ if let Err(err) = rust_binder_transactions_show_impl(m) {
+ seq_print!(m, "failed to generate state: {:?}\n", err);
+ }
+ 0
+}
+
+fn rust_binder_transactions_show_impl(m: &SeqFile) -> Result<()> {
+ seq_print!(m, "binder transactions:\n");
+ let contexts = context::get_all_contexts()?;
+ for ctx in contexts {
+ let procs = ctx.get_all_procs()?;
+ for proc in procs {
+ proc.debug_print(m, &ctx, false)?;
+ seq_print!(m, "\n");
+ }
+ }
+ Ok(())
+}
+
+fn rust_binder_stats_show_impl(m: &SeqFile) -> Result<()> {
+ seq_print!(m, "binder stats:\n");
+ stats::GLOBAL_STATS.debug_print("", m);
+ let contexts = context::get_all_contexts()?;
+ for ctx in contexts {
+ let procs = ctx.get_all_procs()?;
+ for proc in procs {
+ proc.debug_print_stats(m, &ctx)?;
+ seq_print!(m, "\n");
+ }
+ }
+ Ok(())
+}
+
+fn rust_binder_state_show_impl(m: &SeqFile) -> Result<()> {
+ seq_print!(m, "binder state:\n");
+ let contexts = context::get_all_contexts()?;
+ for ctx in contexts {
+ let procs = ctx.get_all_procs()?;
+ for proc in procs {
+ proc.debug_print(m, &ctx, true)?;
+ seq_print!(m, "\n");
+ }
+ }
+ Ok(())
+}
+
+fn rust_binder_proc_show_impl(m: &SeqFile, pid: Pid) -> Result<()> {
+ seq_print!(m, "binder proc state:\n");
+ let contexts = context::get_all_contexts()?;
+ for ctx in contexts {
+ let procs = ctx.get_procs_with_pid(pid)?;
+ for proc in procs {
+ proc.debug_print(m, &ctx, true)?;
+ seq_print!(m, "\n");
+ }
+ }
+ Ok(())
+}
+
+struct BinderfsProcFile(NonNull<bindings::dentry>);
+
+// SAFETY: Safe to drop any thread.
+unsafe impl Send for BinderfsProcFile {}
+
+impl BinderfsProcFile {
+ /// # Safety
+ ///
+ /// Takes an inode from a newly created binder file.
+ unsafe fn new(nodp: *mut bindings::inode, pid: i32) -> Result<Option<Self>> {
+ // SAFETY: The caller passes an `inode` for a newly created binder file.
+ let dentry = unsafe { binderfs::rust_binderfs_create_proc_file(nodp, pid) };
+ match kernel::error::from_err_ptr(dentry) {
+ Ok(dentry) => Ok(NonNull::new(dentry).map(Self)),
+ Err(err) if err == EEXIST => Ok(None),
+ Err(err) => Err(err),
+ }
+ }
+}
+
+impl Drop for BinderfsProcFile {
+ fn drop(&mut self) {
+ // SAFETY: This is a dentry from `rust_binderfs_remove_file` that has not been deleted yet.
+ unsafe { binderfs::rust_binderfs_remove_file(self.0.as_ptr()) };
+ }
+}
diff --git a/drivers/android/binder/rust_binderfs.c b/drivers/android/binder/rust_binderfs.c
new file mode 100644
index 000000000000..c69026df775c
--- /dev/null
+++ b/drivers/android/binder/rust_binderfs.c
@@ -0,0 +1,795 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/compiler_types.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/fsnotify.h>
+#include <linux/gfp.h>
+#include <linux/idr.h>
+#include <linux/init.h>
+#include <linux/ipc_namespace.h>
+#include <linux/kdev_t.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/namei.h>
+#include <linux/magic.h>
+#include <linux/major.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/mount.h>
+#include <linux/fs_parser.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/spinlock_types.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/user_namespace.h>
+#include <linux/xarray.h>
+#include <uapi/asm-generic/errno-base.h>
+#include <uapi/linux/android/binder.h>
+#include <uapi/linux/android/binderfs.h>
+
+#include "rust_binder.h"
+#include "rust_binder_internal.h"
+
+#define FIRST_INODE 1
+#define SECOND_INODE 2
+#define INODE_OFFSET 3
+#define BINDERFS_MAX_MINOR (1U << MINORBITS)
+/* Ensure that the initial ipc namespace always has devices available. */
+#define BINDERFS_MAX_MINOR_CAPPED (BINDERFS_MAX_MINOR - 4)
+
+DEFINE_SHOW_ATTRIBUTE(rust_binder_stats);
+DEFINE_SHOW_ATTRIBUTE(rust_binder_state);
+DEFINE_SHOW_ATTRIBUTE(rust_binder_transactions);
+DEFINE_SHOW_ATTRIBUTE(rust_binder_proc);
+
+char *rust_binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
+module_param_named(rust_devices, rust_binder_devices_param, charp, 0444);
+
+static dev_t binderfs_dev;
+static DEFINE_MUTEX(binderfs_minors_mutex);
+static DEFINE_IDA(binderfs_minors);
+
+enum binderfs_param {
+ Opt_max,
+ Opt_stats_mode,
+};
+
+enum binderfs_stats_mode {
+ binderfs_stats_mode_unset,
+ binderfs_stats_mode_global,
+};
+
+struct binder_features {
+ bool oneway_spam_detection;
+ bool extended_error;
+ bool freeze_notification;
+};
+
+static const struct constant_table binderfs_param_stats[] = {
+ { "global", binderfs_stats_mode_global },
+ {}
+};
+
+static const struct fs_parameter_spec binderfs_fs_parameters[] = {
+ fsparam_u32("max", Opt_max),
+ fsparam_enum("stats", Opt_stats_mode, binderfs_param_stats),
+ {}
+};
+
+static struct binder_features binder_features = {
+ .oneway_spam_detection = true,
+ .extended_error = true,
+ .freeze_notification = true,
+};
+
+static inline struct binderfs_info *BINDERFS_SB(const struct super_block *sb)
+{
+ return sb->s_fs_info;
+}
+
+/**
+ * binderfs_binder_device_create - allocate inode from super block of a
+ * binderfs mount
+ * @ref_inode: inode from wich the super block will be taken
+ * @userp: buffer to copy information about new device for userspace to
+ * @req: struct binderfs_device as copied from userspace
+ *
+ * This function allocates a new binder_device and reserves a new minor
+ * number for it.
+ * Minor numbers are limited and tracked globally in binderfs_minors. The
+ * function will stash a struct binder_device for the specific binder
+ * device in i_private of the inode.
+ * It will go on to allocate a new inode from the super block of the
+ * filesystem mount, stash a struct binder_device in its i_private field
+ * and attach a dentry to that inode.
+ *
+ * Return: 0 on success, negative errno on failure
+ */
+static int binderfs_binder_device_create(struct inode *ref_inode,
+ struct binderfs_device __user *userp,
+ struct binderfs_device *req)
+{
+ int minor, ret;
+ struct dentry *dentry, *root;
+ struct binder_device *device = NULL;
+ rust_binder_context ctx = NULL;
+ struct inode *inode = NULL;
+ struct super_block *sb = ref_inode->i_sb;
+ struct binderfs_info *info = sb->s_fs_info;
+#if defined(CONFIG_IPC_NS)
+ bool use_reserve = (info->ipc_ns == &init_ipc_ns);
+#else
+ bool use_reserve = true;
+#endif
+
+ /* Reserve new minor number for the new device. */
+ mutex_lock(&binderfs_minors_mutex);
+ if (++info->device_count <= info->mount_opts.max)
+ minor = ida_alloc_max(&binderfs_minors,
+ use_reserve ? BINDERFS_MAX_MINOR :
+ BINDERFS_MAX_MINOR_CAPPED,
+ GFP_KERNEL);
+ else
+ minor = -ENOSPC;
+ if (minor < 0) {
+ --info->device_count;
+ mutex_unlock(&binderfs_minors_mutex);
+ return minor;
+ }
+ mutex_unlock(&binderfs_minors_mutex);
+
+ ret = -ENOMEM;
+ device = kzalloc(sizeof(*device), GFP_KERNEL);
+ if (!device)
+ goto err;
+
+ req->name[BINDERFS_MAX_NAME] = '\0'; /* NUL-terminate */
+
+ ctx = rust_binder_new_context(req->name);
+ if (!ctx)
+ goto err;
+
+ inode = new_inode(sb);
+ if (!inode)
+ goto err;
+
+ inode->i_ino = minor + INODE_OFFSET;
+ simple_inode_init_ts(inode);
+ init_special_inode(inode, S_IFCHR | 0600,
+ MKDEV(MAJOR(binderfs_dev), minor));
+ inode->i_fop = &rust_binder_fops;
+ inode->i_uid = info->root_uid;
+ inode->i_gid = info->root_gid;
+
+ req->major = MAJOR(binderfs_dev);
+ req->minor = minor;
+ device->ctx = ctx;
+ device->minor = minor;
+
+ if (userp && copy_to_user(userp, req, sizeof(*req))) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ root = sb->s_root;
+ dentry = simple_start_creating(root, req->name);
+ if (IS_ERR(dentry)) {
+ ret = PTR_ERR(dentry);
+ goto err;
+ }
+
+ inode->i_private = device;
+ d_make_persistent(dentry, inode);
+
+ fsnotify_create(root->d_inode, dentry);
+ simple_done_creating(dentry);
+
+ return 0;
+
+err:
+ kfree(device);
+ rust_binder_remove_context(ctx);
+ mutex_lock(&binderfs_minors_mutex);
+ --info->device_count;
+ ida_free(&binderfs_minors, minor);
+ mutex_unlock(&binderfs_minors_mutex);
+ iput(inode);
+
+ return ret;
+}
+
+/**
+ * binder_ctl_ioctl - handle binder device node allocation requests
+ *
+ * The request handler for the binder-control device. All requests operate on
+ * the binderfs mount the binder-control device resides in:
+ * - BINDER_CTL_ADD
+ * Allocate a new binder device.
+ *
+ * Return: %0 on success, negative errno on failure.
+ */
+static long binder_ctl_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret = -EINVAL;
+ struct inode *inode = file_inode(file);
+ struct binderfs_device __user *device = (struct binderfs_device __user *)arg;
+ struct binderfs_device device_req;
+
+ switch (cmd) {
+ case BINDER_CTL_ADD:
+ ret = copy_from_user(&device_req, device, sizeof(device_req));
+ if (ret) {
+ ret = -EFAULT;
+ break;
+ }
+
+ ret = binderfs_binder_device_create(inode, device, &device_req);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static void binderfs_evict_inode(struct inode *inode)
+{
+ struct binder_device *device = inode->i_private;
+ struct binderfs_info *info = BINDERFS_SB(inode->i_sb);
+
+ clear_inode(inode);
+
+ if (!S_ISCHR(inode->i_mode) || !device)
+ return;
+
+ mutex_lock(&binderfs_minors_mutex);
+ --info->device_count;
+ ida_free(&binderfs_minors, device->minor);
+ mutex_unlock(&binderfs_minors_mutex);
+
+ /* ctx is null for binder-control, but this function ignores null pointers */
+ rust_binder_remove_context(device->ctx);
+
+ kfree(device);
+}
+
+static int binderfs_fs_context_parse_param(struct fs_context *fc,
+ struct fs_parameter *param)
+{
+ int opt;
+ struct binderfs_mount_opts *ctx = fc->fs_private;
+ struct fs_parse_result result;
+
+ opt = fs_parse(fc, binderfs_fs_parameters, param, &result);
+ if (opt < 0)
+ return opt;
+
+ switch (opt) {
+ case Opt_max:
+ if (result.uint_32 > BINDERFS_MAX_MINOR)
+ return invalfc(fc, "Bad value for '%s'", param->key);
+
+ ctx->max = result.uint_32;
+ break;
+ case Opt_stats_mode:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ ctx->stats_mode = result.uint_32;
+ break;
+ default:
+ return invalfc(fc, "Unsupported parameter '%s'", param->key);
+ }
+
+ return 0;
+}
+
+static int binderfs_fs_context_reconfigure(struct fs_context *fc)
+{
+ struct binderfs_mount_opts *ctx = fc->fs_private;
+ struct binderfs_info *info = BINDERFS_SB(fc->root->d_sb);
+
+ if (info->mount_opts.stats_mode != ctx->stats_mode)
+ return invalfc(fc, "Binderfs stats mode cannot be changed during a remount");
+
+ info->mount_opts.stats_mode = ctx->stats_mode;
+ info->mount_opts.max = ctx->max;
+ return 0;
+}
+
+static int binderfs_show_options(struct seq_file *seq, struct dentry *root)
+{
+ struct binderfs_info *info = BINDERFS_SB(root->d_sb);
+
+ if (info->mount_opts.max <= BINDERFS_MAX_MINOR)
+ seq_printf(seq, ",max=%d", info->mount_opts.max);
+
+ switch (info->mount_opts.stats_mode) {
+ case binderfs_stats_mode_unset:
+ break;
+ case binderfs_stats_mode_global:
+ seq_puts(seq, ",stats=global");
+ break;
+ }
+
+ return 0;
+}
+
+static const struct super_operations binderfs_super_ops = {
+ .evict_inode = binderfs_evict_inode,
+ .show_options = binderfs_show_options,
+ .statfs = simple_statfs,
+};
+
+static inline bool is_binderfs_control_device(const struct dentry *dentry)
+{
+ struct binderfs_info *info = dentry->d_sb->s_fs_info;
+
+ return info->control_dentry == dentry;
+}
+
+static int binderfs_rename(struct mnt_idmap *idmap,
+ struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry,
+ unsigned int flags)
+{
+ if (is_binderfs_control_device(old_dentry) ||
+ is_binderfs_control_device(new_dentry))
+ return -EPERM;
+
+ return simple_rename(idmap, old_dir, old_dentry, new_dir,
+ new_dentry, flags);
+}
+
+static int binderfs_unlink(struct inode *dir, struct dentry *dentry)
+{
+ if (is_binderfs_control_device(dentry))
+ return -EPERM;
+
+ return simple_unlink(dir, dentry);
+}
+
+static const struct file_operations binder_ctl_fops = {
+ .owner = THIS_MODULE,
+ .open = nonseekable_open,
+ .unlocked_ioctl = binder_ctl_ioctl,
+ .compat_ioctl = binder_ctl_ioctl,
+ .llseek = noop_llseek,
+};
+
+/**
+ * binderfs_binder_ctl_create - create a new binder-control device
+ * @sb: super block of the binderfs mount
+ *
+ * This function creates a new binder-control device node in the binderfs mount
+ * referred to by @sb.
+ *
+ * Return: 0 on success, negative errno on failure
+ */
+static int binderfs_binder_ctl_create(struct super_block *sb)
+{
+ int minor, ret;
+ struct dentry *dentry;
+ struct binder_device *device;
+ struct inode *inode = NULL;
+ struct dentry *root = sb->s_root;
+ struct binderfs_info *info = sb->s_fs_info;
+#if defined(CONFIG_IPC_NS)
+ bool use_reserve = (info->ipc_ns == &init_ipc_ns);
+#else
+ bool use_reserve = true;
+#endif
+
+ device = kzalloc(sizeof(*device), GFP_KERNEL);
+ if (!device)
+ return -ENOMEM;
+
+ /* If we have already created a binder-control node, return. */
+ if (info->control_dentry) {
+ ret = 0;
+ goto out;
+ }
+
+ ret = -ENOMEM;
+ inode = new_inode(sb);
+ if (!inode)
+ goto out;
+
+ /* Reserve a new minor number for the new device. */
+ mutex_lock(&binderfs_minors_mutex);
+ minor = ida_alloc_max(&binderfs_minors,
+ use_reserve ? BINDERFS_MAX_MINOR :
+ BINDERFS_MAX_MINOR_CAPPED,
+ GFP_KERNEL);
+ mutex_unlock(&binderfs_minors_mutex);
+ if (minor < 0) {
+ ret = minor;
+ goto out;
+ }
+
+ inode->i_ino = SECOND_INODE;
+ simple_inode_init_ts(inode);
+ init_special_inode(inode, S_IFCHR | 0600,
+ MKDEV(MAJOR(binderfs_dev), minor));
+ inode->i_fop = &binder_ctl_fops;
+ inode->i_uid = info->root_uid;
+ inode->i_gid = info->root_gid;
+
+ device->minor = minor;
+ device->ctx = NULL;
+
+ dentry = d_alloc_name(root, "binder-control");
+ if (!dentry)
+ goto out;
+
+ inode->i_private = device;
+ info->control_dentry = dentry;
+ d_add(dentry, inode);
+
+ return 0;
+
+out:
+ kfree(device);
+ iput(inode);
+
+ return ret;
+}
+
+static const struct inode_operations binderfs_dir_inode_operations = {
+ .lookup = simple_lookup,
+ .rename = binderfs_rename,
+ .unlink = binderfs_unlink,
+};
+
+static struct inode *binderfs_make_inode(struct super_block *sb, int mode)
+{
+ struct inode *ret;
+
+ ret = new_inode(sb);
+ if (ret) {
+ ret->i_ino = iunique(sb, BINDERFS_MAX_MINOR + INODE_OFFSET);
+ ret->i_mode = mode;
+ simple_inode_init_ts(ret);
+ }
+ return ret;
+}
+
+void rust_binderfs_remove_file(struct dentry *dentry)
+{
+ simple_recursive_removal(dentry, NULL);
+}
+
+static struct dentry *rust_binderfs_create_file(struct dentry *parent, const char *name,
+ const struct file_operations *fops,
+ void *data)
+{
+ struct dentry *dentry;
+ struct inode *new_inode;
+
+ new_inode = binderfs_make_inode(parent->d_sb, S_IFREG | 0444);
+ if (!new_inode)
+ return ERR_PTR(-ENOMEM);
+ new_inode->i_fop = fops;
+ new_inode->i_private = data;
+
+ dentry = simple_start_creating(parent, name);
+ if (IS_ERR(dentry)) {
+ iput(new_inode);
+ return dentry;
+ }
+
+ d_make_persistent(dentry, new_inode);
+ fsnotify_create(parent->d_inode, dentry);
+ simple_done_creating(dentry);
+ return dentry;
+}
+
+struct dentry *rust_binderfs_create_proc_file(struct inode *nodp, int pid)
+{
+ struct binderfs_info *info = nodp->i_sb->s_fs_info;
+ struct dentry *dir = info->proc_log_dir;
+ char strbuf[20 + 1];
+ void *data = (void *)(unsigned long) pid;
+
+ if (!dir)
+ return NULL;
+
+ snprintf(strbuf, sizeof(strbuf), "%u", pid);
+ return rust_binderfs_create_file(dir, strbuf, &rust_binder_proc_fops, data);
+}
+
+static struct dentry *binderfs_create_dir(struct dentry *parent,
+ const char *name)
+{
+ struct dentry *dentry;
+ struct inode *new_inode;
+
+ new_inode = binderfs_make_inode(parent->d_sb, S_IFDIR | 0755);
+ if (!new_inode)
+ return ERR_PTR(-ENOMEM);
+
+ new_inode->i_fop = &simple_dir_operations;
+ new_inode->i_op = &simple_dir_inode_operations;
+
+ dentry = simple_start_creating(parent, name);
+ if (IS_ERR(dentry)) {
+ iput(new_inode);
+ return dentry;
+ }
+
+ inc_nlink(parent->d_inode);
+ set_nlink(new_inode, 2);
+ d_make_persistent(dentry, new_inode);
+ fsnotify_mkdir(parent->d_inode, dentry);
+ simple_done_creating(dentry);
+ return dentry;
+}
+
+static int binder_features_show(struct seq_file *m, void *unused)
+{
+ bool *feature = m->private;
+
+ seq_printf(m, "%d\n", *feature);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(binder_features);
+
+static int init_binder_features(struct super_block *sb)
+{
+ struct dentry *dentry, *dir;
+
+ dir = binderfs_create_dir(sb->s_root, "features");
+ if (IS_ERR(dir))
+ return PTR_ERR(dir);
+
+ dentry = rust_binderfs_create_file(dir, "oneway_spam_detection",
+ &binder_features_fops,
+ &binder_features.oneway_spam_detection);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
+
+ dentry = rust_binderfs_create_file(dir, "extended_error",
+ &binder_features_fops,
+ &binder_features.extended_error);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
+
+ dentry = rust_binderfs_create_file(dir, "freeze_notification",
+ &binder_features_fops,
+ &binder_features.freeze_notification);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
+
+ return 0;
+}
+
+static int init_binder_logs(struct super_block *sb)
+{
+ struct dentry *binder_logs_root_dir, *dentry, *proc_log_dir;
+ struct binderfs_info *info;
+ int ret = 0;
+
+ binder_logs_root_dir = binderfs_create_dir(sb->s_root,
+ "binder_logs");
+ if (IS_ERR(binder_logs_root_dir)) {
+ ret = PTR_ERR(binder_logs_root_dir);
+ goto out;
+ }
+
+ dentry = rust_binderfs_create_file(binder_logs_root_dir, "stats",
+ &rust_binder_stats_fops, NULL);
+ if (IS_ERR(dentry)) {
+ ret = PTR_ERR(dentry);
+ goto out;
+ }
+
+ dentry = rust_binderfs_create_file(binder_logs_root_dir, "state",
+ &rust_binder_state_fops, NULL);
+ if (IS_ERR(dentry)) {
+ ret = PTR_ERR(dentry);
+ goto out;
+ }
+
+ dentry = rust_binderfs_create_file(binder_logs_root_dir, "transactions",
+ &rust_binder_transactions_fops, NULL);
+ if (IS_ERR(dentry)) {
+ ret = PTR_ERR(dentry);
+ goto out;
+ }
+
+ proc_log_dir = binderfs_create_dir(binder_logs_root_dir, "proc");
+ if (IS_ERR(proc_log_dir)) {
+ ret = PTR_ERR(proc_log_dir);
+ goto out;
+ }
+ info = sb->s_fs_info;
+ info->proc_log_dir = proc_log_dir;
+
+out:
+ return ret;
+}
+
+static int binderfs_fill_super(struct super_block *sb, struct fs_context *fc)
+{
+ int ret;
+ struct binderfs_info *info;
+ struct binderfs_mount_opts *ctx = fc->fs_private;
+ struct inode *inode = NULL;
+ struct binderfs_device device_info = {};
+ const char *name;
+ size_t len;
+
+ sb->s_blocksize = PAGE_SIZE;
+ sb->s_blocksize_bits = PAGE_SHIFT;
+
+ /*
+ * The binderfs filesystem can be mounted by userns root in a
+ * non-initial userns. By default such mounts have the SB_I_NODEV flag
+ * set in s_iflags to prevent security issues where userns root can
+ * just create random device nodes via mknod() since it owns the
+ * filesystem mount. But binderfs does not allow to create any files
+ * including devices nodes. The only way to create binder devices nodes
+ * is through the binder-control device which userns root is explicitly
+ * allowed to do. So removing the SB_I_NODEV flag from s_iflags is both
+ * necessary and safe.
+ */
+ sb->s_iflags &= ~SB_I_NODEV;
+ sb->s_iflags |= SB_I_NOEXEC;
+ sb->s_magic = RUST_BINDERFS_SUPER_MAGIC;
+ sb->s_op = &binderfs_super_ops;
+ sb->s_time_gran = 1;
+
+ sb->s_fs_info = kzalloc(sizeof(struct binderfs_info), GFP_KERNEL);
+ if (!sb->s_fs_info)
+ return -ENOMEM;
+ info = sb->s_fs_info;
+
+ info->ipc_ns = get_ipc_ns(current->nsproxy->ipc_ns);
+
+ info->root_gid = make_kgid(sb->s_user_ns, 0);
+ if (!gid_valid(info->root_gid))
+ info->root_gid = GLOBAL_ROOT_GID;
+ info->root_uid = make_kuid(sb->s_user_ns, 0);
+ if (!uid_valid(info->root_uid))
+ info->root_uid = GLOBAL_ROOT_UID;
+ info->mount_opts.max = ctx->max;
+ info->mount_opts.stats_mode = ctx->stats_mode;
+
+ inode = new_inode(sb);
+ if (!inode)
+ return -ENOMEM;
+
+ inode->i_ino = FIRST_INODE;
+ inode->i_fop = &simple_dir_operations;
+ inode->i_mode = S_IFDIR | 0755;
+ simple_inode_init_ts(inode);
+ inode->i_op = &binderfs_dir_inode_operations;
+ set_nlink(inode, 2);
+
+ sb->s_root = d_make_root(inode);
+ if (!sb->s_root)
+ return -ENOMEM;
+
+ ret = binderfs_binder_ctl_create(sb);
+ if (ret)
+ return ret;
+
+ name = rust_binder_devices_param;
+ for (len = strcspn(name, ","); len > 0; len = strcspn(name, ",")) {
+ strscpy(device_info.name, name, len + 1);
+ ret = binderfs_binder_device_create(inode, NULL, &device_info);
+ if (ret)
+ return ret;
+ name += len;
+ if (*name == ',')
+ name++;
+ }
+
+ ret = init_binder_features(sb);
+ if (ret)
+ return ret;
+
+ if (info->mount_opts.stats_mode == binderfs_stats_mode_global)
+ return init_binder_logs(sb);
+
+ return 0;
+}
+
+static int binderfs_fs_context_get_tree(struct fs_context *fc)
+{
+ return get_tree_nodev(fc, binderfs_fill_super);
+}
+
+static void binderfs_fs_context_free(struct fs_context *fc)
+{
+ struct binderfs_mount_opts *ctx = fc->fs_private;
+
+ kfree(ctx);
+}
+
+static const struct fs_context_operations binderfs_fs_context_ops = {
+ .free = binderfs_fs_context_free,
+ .get_tree = binderfs_fs_context_get_tree,
+ .parse_param = binderfs_fs_context_parse_param,
+ .reconfigure = binderfs_fs_context_reconfigure,
+};
+
+static int binderfs_init_fs_context(struct fs_context *fc)
+{
+ struct binderfs_mount_opts *ctx;
+
+ ctx = kzalloc(sizeof(struct binderfs_mount_opts), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->max = BINDERFS_MAX_MINOR;
+ ctx->stats_mode = binderfs_stats_mode_unset;
+
+ fc->fs_private = ctx;
+ fc->ops = &binderfs_fs_context_ops;
+
+ return 0;
+}
+
+static void binderfs_kill_super(struct super_block *sb)
+{
+ struct binderfs_info *info = sb->s_fs_info;
+
+ /*
+ * During inode eviction struct binderfs_info is needed.
+ * So first wipe the super_block then free struct binderfs_info.
+ */
+ kill_anon_super(sb);
+
+ if (info && info->ipc_ns)
+ put_ipc_ns(info->ipc_ns);
+
+ kfree(info);
+}
+
+static struct file_system_type binder_fs_type = {
+ .name = "binder",
+ .init_fs_context = binderfs_init_fs_context,
+ .parameters = binderfs_fs_parameters,
+ .kill_sb = binderfs_kill_super,
+ .fs_flags = FS_USERNS_MOUNT,
+};
+
+int init_rust_binderfs(void)
+{
+ int ret;
+ const char *name;
+ size_t len;
+
+ /* Verify that the default binderfs device names are valid. */
+ name = rust_binder_devices_param;
+ for (len = strcspn(name, ","); len > 0; len = strcspn(name, ",")) {
+ if (len > BINDERFS_MAX_NAME)
+ return -E2BIG;
+ name += len;
+ if (*name == ',')
+ name++;
+ }
+
+ /* Allocate new major number for binderfs. */
+ ret = alloc_chrdev_region(&binderfs_dev, 0, BINDERFS_MAX_MINOR,
+ "rust_binder");
+ if (ret)
+ return ret;
+
+ ret = register_filesystem(&binder_fs_type);
+ if (ret) {
+ unregister_chrdev_region(binderfs_dev, BINDERFS_MAX_MINOR);
+ return ret;
+ }
+
+ return ret;
+}
diff --git a/drivers/android/binder/stats.rs b/drivers/android/binder/stats.rs
new file mode 100644
index 000000000000..037002651941
--- /dev/null
+++ b/drivers/android/binder/stats.rs
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+//! Keep track of statistics for binder_logs.
+
+use crate::defs::*;
+use core::sync::atomic::{AtomicU32, Ordering::Relaxed};
+use kernel::{ioctl::_IOC_NR, seq_file::SeqFile, seq_print};
+
+const BC_COUNT: usize = _IOC_NR(BC_REPLY_SG) as usize + 1;
+const BR_COUNT: usize = _IOC_NR(BR_TRANSACTION_PENDING_FROZEN) as usize + 1;
+
+pub(crate) static GLOBAL_STATS: BinderStats = BinderStats::new();
+
+pub(crate) struct BinderStats {
+ bc: [AtomicU32; BC_COUNT],
+ br: [AtomicU32; BR_COUNT],
+}
+
+impl BinderStats {
+ pub(crate) const fn new() -> Self {
+ #[expect(clippy::declare_interior_mutable_const)]
+ const ZERO: AtomicU32 = AtomicU32::new(0);
+
+ Self {
+ bc: [ZERO; BC_COUNT],
+ br: [ZERO; BR_COUNT],
+ }
+ }
+
+ pub(crate) fn inc_bc(&self, bc: u32) {
+ let idx = _IOC_NR(bc) as usize;
+ if let Some(bc_ref) = self.bc.get(idx) {
+ bc_ref.fetch_add(1, Relaxed);
+ }
+ }
+
+ pub(crate) fn inc_br(&self, br: u32) {
+ let idx = _IOC_NR(br) as usize;
+ if let Some(br_ref) = self.br.get(idx) {
+ br_ref.fetch_add(1, Relaxed);
+ }
+ }
+
+ pub(crate) fn debug_print(&self, prefix: &str, m: &SeqFile) {
+ for (i, cnt) in self.bc.iter().enumerate() {
+ let cnt = cnt.load(Relaxed);
+ if cnt > 0 {
+ seq_print!(m, "{}{}: {}\n", prefix, command_string(i), cnt);
+ }
+ }
+ for (i, cnt) in self.br.iter().enumerate() {
+ let cnt = cnt.load(Relaxed);
+ if cnt > 0 {
+ seq_print!(m, "{}{}: {}\n", prefix, return_string(i), cnt);
+ }
+ }
+ }
+}
+
+mod strings {
+ use core::str::from_utf8_unchecked;
+ use kernel::str::{CStr, CStrExt as _};
+
+ extern "C" {
+ static binder_command_strings: [*const u8; super::BC_COUNT];
+ static binder_return_strings: [*const u8; super::BR_COUNT];
+ }
+
+ pub(super) fn command_string(i: usize) -> &'static str {
+ // SAFETY: Accessing `binder_command_strings` is always safe.
+ let c_str_ptr = unsafe { binder_command_strings[i] };
+ // SAFETY: The `binder_command_strings` array only contains nul-terminated strings.
+ let bytes = unsafe { CStr::from_char_ptr(c_str_ptr) }.to_bytes();
+ // SAFETY: The `binder_command_strings` array only contains strings with ascii-chars.
+ unsafe { from_utf8_unchecked(bytes) }
+ }
+
+ pub(super) fn return_string(i: usize) -> &'static str {
+ // SAFETY: Accessing `binder_return_strings` is always safe.
+ let c_str_ptr = unsafe { binder_return_strings[i] };
+ // SAFETY: The `binder_command_strings` array only contains nul-terminated strings.
+ let bytes = unsafe { CStr::from_char_ptr(c_str_ptr) }.to_bytes();
+ // SAFETY: The `binder_command_strings` array only contains strings with ascii-chars.
+ unsafe { from_utf8_unchecked(bytes) }
+ }
+}
+use strings::{command_string, return_string};
diff --git a/drivers/android/binder/thread.rs b/drivers/android/binder/thread.rs
new file mode 100644
index 000000000000..1a8e6fdc0dc4
--- /dev/null
+++ b/drivers/android/binder/thread.rs
@@ -0,0 +1,1596 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+//! This module defines the `Thread` type, which represents a userspace thread that is using
+//! binder.
+//!
+//! The `Process` object stores all of the threads in an rb tree.
+
+use kernel::{
+ bindings,
+ fs::{File, LocalFile},
+ list::{AtomicTracker, List, ListArc, ListLinks, TryNewListArc},
+ prelude::*,
+ security,
+ seq_file::SeqFile,
+ seq_print,
+ sync::poll::{PollCondVar, PollTable},
+ sync::{Arc, SpinLock},
+ task::Task,
+ types::ARef,
+ uaccess::UserSlice,
+ uapi,
+};
+
+use crate::{
+ allocation::{Allocation, AllocationView, BinderObject, BinderObjectRef, NewAllocation},
+ defs::*,
+ error::BinderResult,
+ process::{GetWorkOrRegister, Process},
+ ptr_align,
+ stats::GLOBAL_STATS,
+ transaction::Transaction,
+ BinderReturnWriter, DArc, DLArc, DTRWrap, DeliverCode, DeliverToRead,
+};
+
+use core::{
+ mem::size_of,
+ sync::atomic::{AtomicU32, Ordering},
+};
+
+/// Stores the layout of the scatter-gather entries. This is used during the `translate_objects`
+/// call and is discarded when it returns.
+struct ScatterGatherState {
+ /// A struct that tracks the amount of unused buffer space.
+ unused_buffer_space: UnusedBufferSpace,
+ /// Scatter-gather entries to copy.
+ sg_entries: KVec<ScatterGatherEntry>,
+ /// Indexes into `sg_entries` corresponding to the last binder_buffer_object that
+ /// was processed and all of its ancestors. The array is in sorted order.
+ ancestors: KVec<usize>,
+}
+
+/// This entry specifies an additional buffer that should be copied using the scatter-gather
+/// mechanism.
+struct ScatterGatherEntry {
+ /// The index in the offset array of the BINDER_TYPE_PTR that this entry originates from.
+ obj_index: usize,
+ /// Offset in target buffer.
+ offset: usize,
+ /// User address in source buffer.
+ sender_uaddr: usize,
+ /// Number of bytes to copy.
+ length: usize,
+ /// The minimum offset of the next fixup in this buffer.
+ fixup_min_offset: usize,
+ /// The offsets within this buffer that contain pointers which should be translated.
+ pointer_fixups: KVec<PointerFixupEntry>,
+}
+
+/// This entry specifies that a fixup should happen at `target_offset` of the
+/// buffer. If `skip` is nonzero, then the fixup is a `binder_fd_array_object`
+/// and is applied later. Otherwise if `skip` is zero, then the size of the
+/// fixup is `sizeof::<u64>()` and `pointer_value` is written to the buffer.
+struct PointerFixupEntry {
+ /// The number of bytes to skip, or zero for a `binder_buffer_object` fixup.
+ skip: usize,
+ /// The translated pointer to write when `skip` is zero.
+ pointer_value: u64,
+ /// The offset at which the value should be written. The offset is relative
+ /// to the original buffer.
+ target_offset: usize,
+}
+
+/// Return type of `apply_and_validate_fixup_in_parent`.
+struct ParentFixupInfo {
+ /// The index of the parent buffer in `sg_entries`.
+ parent_sg_index: usize,
+ /// The number of ancestors of the buffer.
+ ///
+ /// The buffer is considered an ancestor of itself, so this is always at
+ /// least one.
+ num_ancestors: usize,
+ /// New value of `fixup_min_offset` if this fixup is applied.
+ new_min_offset: usize,
+ /// The offset of the fixup in the target buffer.
+ target_offset: usize,
+}
+
+impl ScatterGatherState {
+ /// Called when a `binder_buffer_object` or `binder_fd_array_object` tries
+ /// to access a region in its parent buffer. These accesses have various
+ /// restrictions, which this method verifies.
+ ///
+ /// The `parent_offset` and `length` arguments describe the offset and
+ /// length of the access in the parent buffer.
+ ///
+ /// # Detailed restrictions
+ ///
+ /// Obviously the fixup must be in-bounds for the parent buffer.
+ ///
+ /// For safety reasons, we only allow fixups inside a buffer to happen
+ /// at increasing offsets; additionally, we only allow fixup on the last
+ /// buffer object that was verified, or one of its parents.
+ ///
+ /// Example of what is allowed:
+ ///
+ /// A
+ /// B (parent = A, offset = 0)
+ /// C (parent = A, offset = 16)
+ /// D (parent = C, offset = 0)
+ /// E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
+ ///
+ /// Examples of what is not allowed:
+ ///
+ /// Decreasing offsets within the same parent:
+ /// A
+ /// C (parent = A, offset = 16)
+ /// B (parent = A, offset = 0) // decreasing offset within A
+ ///
+ /// Arcerring to a parent that wasn't the last object or any of its parents:
+ /// A
+ /// B (parent = A, offset = 0)
+ /// C (parent = A, offset = 0)
+ /// C (parent = A, offset = 16)
+ /// D (parent = B, offset = 0) // B is not A or any of A's parents
+ fn validate_parent_fixup(
+ &self,
+ parent: usize,
+ parent_offset: usize,
+ length: usize,
+ ) -> Result<ParentFixupInfo> {
+ // Using `position` would also be correct, but `rposition` avoids
+ // quadratic running times.
+ let ancestors_i = self
+ .ancestors
+ .iter()
+ .copied()
+ .rposition(|sg_idx| self.sg_entries[sg_idx].obj_index == parent)
+ .ok_or(EINVAL)?;
+ let sg_idx = self.ancestors[ancestors_i];
+ let sg_entry = match self.sg_entries.get(sg_idx) {
+ Some(sg_entry) => sg_entry,
+ None => {
+ pr_err!(
+ "self.ancestors[{}] is {}, but self.sg_entries.len() is {}",
+ ancestors_i,
+ sg_idx,
+ self.sg_entries.len()
+ );
+ return Err(EINVAL);
+ }
+ };
+ if sg_entry.fixup_min_offset > parent_offset {
+ pr_warn!(
+ "validate_parent_fixup: fixup_min_offset={}, parent_offset={}",
+ sg_entry.fixup_min_offset,
+ parent_offset
+ );
+ return Err(EINVAL);
+ }
+ let new_min_offset = parent_offset.checked_add(length).ok_or(EINVAL)?;
+ if new_min_offset > sg_entry.length {
+ pr_warn!(
+ "validate_parent_fixup: new_min_offset={}, sg_entry.length={}",
+ new_min_offset,
+ sg_entry.length
+ );
+ return Err(EINVAL);
+ }
+ let target_offset = sg_entry.offset.checked_add(parent_offset).ok_or(EINVAL)?;
+ // The `ancestors_i + 1` operation can't overflow since the output of the addition is at
+ // most `self.ancestors.len()`, which also fits in a usize.
+ Ok(ParentFixupInfo {
+ parent_sg_index: sg_idx,
+ num_ancestors: ancestors_i + 1,
+ new_min_offset,
+ target_offset,
+ })
+ }
+}
+
+/// Keeps track of how much unused buffer space is left. The initial amount is the number of bytes
+/// requested by the user using the `buffers_size` field of `binder_transaction_data_sg`. Each time
+/// we translate an object of type `BINDER_TYPE_PTR`, some of the unused buffer space is consumed.
+struct UnusedBufferSpace {
+ /// The start of the remaining space.
+ offset: usize,
+ /// The end of the remaining space.
+ limit: usize,
+}
+impl UnusedBufferSpace {
+ /// Claim the next `size` bytes from the unused buffer space. The offset for the claimed chunk
+ /// into the buffer is returned.
+ fn claim_next(&mut self, size: usize) -> Result<usize> {
+ // We require every chunk to be aligned.
+ let size = ptr_align(size).ok_or(EINVAL)?;
+ let new_offset = self.offset.checked_add(size).ok_or(EINVAL)?;
+
+ if new_offset <= self.limit {
+ let offset = self.offset;
+ self.offset = new_offset;
+ Ok(offset)
+ } else {
+ Err(EINVAL)
+ }
+ }
+}
+
+pub(crate) enum PushWorkRes {
+ Ok,
+ FailedDead(DLArc<dyn DeliverToRead>),
+}
+
+impl PushWorkRes {
+ fn is_ok(&self) -> bool {
+ match self {
+ PushWorkRes::Ok => true,
+ PushWorkRes::FailedDead(_) => false,
+ }
+ }
+}
+
+/// The fields of `Thread` protected by the spinlock.
+struct InnerThread {
+ /// Determines the looper state of the thread. It is a bit-wise combination of the constants
+ /// prefixed with `LOOPER_`.
+ looper_flags: u32,
+
+ /// Determines whether the looper should return.
+ looper_need_return: bool,
+
+ /// Determines if thread is dead.
+ is_dead: bool,
+
+ /// Work item used to deliver error codes to the thread that started a transaction. Stored here
+ /// so that it can be reused.
+ reply_work: DArc<ThreadError>,
+
+ /// Work item used to deliver error codes to the current thread. Stored here so that it can be
+ /// reused.
+ return_work: DArc<ThreadError>,
+
+ /// Determines whether the work list below should be processed. When set to false, `work_list`
+ /// is treated as if it were empty.
+ process_work_list: bool,
+ /// List of work items to deliver to userspace.
+ work_list: List<DTRWrap<dyn DeliverToRead>>,
+ current_transaction: Option<DArc<Transaction>>,
+
+ /// Extended error information for this thread.
+ extended_error: ExtendedError,
+}
+
+const LOOPER_REGISTERED: u32 = 0x01;
+const LOOPER_ENTERED: u32 = 0x02;
+const LOOPER_EXITED: u32 = 0x04;
+const LOOPER_INVALID: u32 = 0x08;
+const LOOPER_WAITING: u32 = 0x10;
+const LOOPER_WAITING_PROC: u32 = 0x20;
+const LOOPER_POLL: u32 = 0x40;
+
+impl InnerThread {
+ fn new() -> Result<Self> {
+ fn next_err_id() -> u32 {
+ static EE_ID: AtomicU32 = AtomicU32::new(0);
+ EE_ID.fetch_add(1, Ordering::Relaxed)
+ }
+
+ Ok(Self {
+ looper_flags: 0,
+ looper_need_return: false,
+ is_dead: false,
+ process_work_list: false,
+ reply_work: ThreadError::try_new()?,
+ return_work: ThreadError::try_new()?,
+ work_list: List::new(),
+ current_transaction: None,
+ extended_error: ExtendedError::new(next_err_id(), BR_OK, 0),
+ })
+ }
+
+ fn pop_work(&mut self) -> Option<DLArc<dyn DeliverToRead>> {
+ if !self.process_work_list {
+ return None;
+ }
+
+ let ret = self.work_list.pop_front();
+ self.process_work_list = !self.work_list.is_empty();
+ ret
+ }
+
+ fn push_work(&mut self, work: DLArc<dyn DeliverToRead>) -> PushWorkRes {
+ if self.is_dead {
+ PushWorkRes::FailedDead(work)
+ } else {
+ self.work_list.push_back(work);
+ self.process_work_list = true;
+ PushWorkRes::Ok
+ }
+ }
+
+ fn push_reply_work(&mut self, code: u32) {
+ if let Ok(work) = ListArc::try_from_arc(self.reply_work.clone()) {
+ work.set_error_code(code);
+ self.push_work(work);
+ } else {
+ pr_warn!("Thread reply work is already in use.");
+ }
+ }
+
+ fn push_return_work(&mut self, reply: u32) {
+ if let Ok(work) = ListArc::try_from_arc(self.return_work.clone()) {
+ work.set_error_code(reply);
+ self.push_work(work);
+ } else {
+ pr_warn!("Thread return work is already in use.");
+ }
+ }
+
+ /// Used to push work items that do not need to be processed immediately and can wait until the
+ /// thread gets another work item.
+ fn push_work_deferred(&mut self, work: DLArc<dyn DeliverToRead>) {
+ self.work_list.push_back(work);
+ }
+
+ /// Fetches the transaction this thread can reply to. If the thread has a pending transaction
+ /// (that it could respond to) but it has also issued a transaction, it must first wait for the
+ /// previously-issued transaction to complete.
+ ///
+ /// The `thread` parameter should be the thread containing this `ThreadInner`.
+ fn pop_transaction_to_reply(&mut self, thread: &Thread) -> Result<DArc<Transaction>> {
+ let transaction = self.current_transaction.take().ok_or(EINVAL)?;
+ if core::ptr::eq(thread, transaction.from.as_ref()) {
+ self.current_transaction = Some(transaction);
+ return Err(EINVAL);
+ }
+ // Find a new current transaction for this thread.
+ self.current_transaction = transaction.find_from(thread).cloned();
+ Ok(transaction)
+ }
+
+ fn pop_transaction_replied(&mut self, transaction: &DArc<Transaction>) -> bool {
+ match self.current_transaction.take() {
+ None => false,
+ Some(old) => {
+ if !Arc::ptr_eq(transaction, &old) {
+ self.current_transaction = Some(old);
+ return false;
+ }
+ self.current_transaction = old.clone_next();
+ true
+ }
+ }
+ }
+
+ fn looper_enter(&mut self) {
+ self.looper_flags |= LOOPER_ENTERED;
+ if self.looper_flags & LOOPER_REGISTERED != 0 {
+ self.looper_flags |= LOOPER_INVALID;
+ }
+ }
+
+ fn looper_register(&mut self, valid: bool) {
+ self.looper_flags |= LOOPER_REGISTERED;
+ if !valid || self.looper_flags & LOOPER_ENTERED != 0 {
+ self.looper_flags |= LOOPER_INVALID;
+ }
+ }
+
+ fn looper_exit(&mut self) {
+ self.looper_flags |= LOOPER_EXITED;
+ }
+
+ /// Determines whether the thread is part of a pool, i.e., if it is a looper.
+ fn is_looper(&self) -> bool {
+ self.looper_flags & (LOOPER_ENTERED | LOOPER_REGISTERED) != 0
+ }
+
+ /// Determines whether the thread should attempt to fetch work items from the process queue.
+ /// This is generally case when the thread is registered as a looper and not part of a
+ /// transaction stack. But if there is local work, we want to return to userspace before we
+ /// deliver any remote work.
+ fn should_use_process_work_queue(&self) -> bool {
+ self.current_transaction.is_none() && !self.process_work_list && self.is_looper()
+ }
+
+ fn poll(&mut self) -> u32 {
+ self.looper_flags |= LOOPER_POLL;
+ if self.process_work_list || self.looper_need_return {
+ bindings::POLLIN
+ } else {
+ 0
+ }
+ }
+}
+
+/// This represents a thread that's used with binder.
+#[pin_data]
+pub(crate) struct Thread {
+ pub(crate) id: i32,
+ pub(crate) process: Arc<Process>,
+ pub(crate) task: ARef<Task>,
+ #[pin]
+ inner: SpinLock<InnerThread>,
+ #[pin]
+ work_condvar: PollCondVar,
+ /// Used to insert this thread into the process' `ready_threads` list.
+ ///
+ /// INVARIANT: May never be used for any other list than the `self.process.ready_threads`.
+ #[pin]
+ links: ListLinks,
+ #[pin]
+ links_track: AtomicTracker,
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<0> for Thread {
+ tracked_by links_track: AtomicTracker;
+ }
+}
+kernel::list::impl_list_item! {
+ impl ListItem<0> for Thread {
+ using ListLinks { self.links };
+ }
+}
+
+impl Thread {
+ pub(crate) fn new(id: i32, process: Arc<Process>) -> Result<Arc<Self>> {
+ let inner = InnerThread::new()?;
+
+ Arc::pin_init(
+ try_pin_init!(Thread {
+ id,
+ process,
+ task: ARef::from(&**kernel::current!()),
+ inner <- kernel::new_spinlock!(inner, "Thread::inner"),
+ work_condvar <- kernel::new_poll_condvar!("Thread::work_condvar"),
+ links <- ListLinks::new(),
+ links_track <- AtomicTracker::new(),
+ }),
+ GFP_KERNEL,
+ )
+ }
+
+ #[inline(never)]
+ pub(crate) fn debug_print(self: &Arc<Self>, m: &SeqFile, print_all: bool) -> Result<()> {
+ let inner = self.inner.lock();
+
+ if print_all || inner.current_transaction.is_some() || !inner.work_list.is_empty() {
+ seq_print!(
+ m,
+ " thread {}: l {:02x} need_return {}\n",
+ self.id,
+ inner.looper_flags,
+ inner.looper_need_return,
+ );
+ }
+
+ let mut t_opt = inner.current_transaction.as_ref();
+ while let Some(t) = t_opt {
+ if Arc::ptr_eq(&t.from, self) {
+ t.debug_print_inner(m, " outgoing transaction ");
+ t_opt = t.from_parent.as_ref();
+ } else if Arc::ptr_eq(&t.to, &self.process) {
+ t.debug_print_inner(m, " incoming transaction ");
+ t_opt = t.find_from(self);
+ } else {
+ t.debug_print_inner(m, " bad transaction ");
+ t_opt = None;
+ }
+ }
+
+ for work in &inner.work_list {
+ work.debug_print(m, " ", " pending transaction ")?;
+ }
+ Ok(())
+ }
+
+ pub(crate) fn get_extended_error(&self, data: UserSlice) -> Result {
+ let mut writer = data.writer();
+ let ee = self.inner.lock().extended_error;
+ writer.write(&ee)?;
+ Ok(())
+ }
+
+ pub(crate) fn set_current_transaction(&self, transaction: DArc<Transaction>) {
+ self.inner.lock().current_transaction = Some(transaction);
+ }
+
+ pub(crate) fn has_current_transaction(&self) -> bool {
+ self.inner.lock().current_transaction.is_some()
+ }
+
+ /// Attempts to fetch a work item from the thread-local queue. The behaviour if the queue is
+ /// empty depends on `wait`: if it is true, the function waits for some work to be queued (or a
+ /// signal); otherwise it returns indicating that none is available.
+ fn get_work_local(self: &Arc<Self>, wait: bool) -> Result<Option<DLArc<dyn DeliverToRead>>> {
+ {
+ let mut inner = self.inner.lock();
+ if inner.looper_need_return {
+ return Ok(inner.pop_work());
+ }
+ }
+
+ // Try once if the caller does not want to wait.
+ if !wait {
+ return self.inner.lock().pop_work().ok_or(EAGAIN).map(Some);
+ }
+
+ // Loop waiting only on the local queue (i.e., not registering with the process queue).
+ let mut inner = self.inner.lock();
+ loop {
+ if let Some(work) = inner.pop_work() {
+ return Ok(Some(work));
+ }
+
+ inner.looper_flags |= LOOPER_WAITING;
+ let signal_pending = self.work_condvar.wait_interruptible_freezable(&mut inner);
+ inner.looper_flags &= !LOOPER_WAITING;
+
+ if signal_pending {
+ return Err(EINTR);
+ }
+ if inner.looper_need_return {
+ return Ok(None);
+ }
+ }
+ }
+
+ /// Attempts to fetch a work item from the thread-local queue, falling back to the process-wide
+ /// queue if none is available locally.
+ ///
+ /// This must only be called when the thread is not participating in a transaction chain. If it
+ /// is, the local version (`get_work_local`) should be used instead.
+ fn get_work(self: &Arc<Self>, wait: bool) -> Result<Option<DLArc<dyn DeliverToRead>>> {
+ // Try to get work from the thread's work queue, using only a local lock.
+ {
+ let mut inner = self.inner.lock();
+ if let Some(work) = inner.pop_work() {
+ return Ok(Some(work));
+ }
+ if inner.looper_need_return {
+ drop(inner);
+ return Ok(self.process.get_work());
+ }
+ }
+
+ // If the caller doesn't want to wait, try to grab work from the process queue.
+ //
+ // We know nothing will have been queued directly to the thread queue because it is not in
+ // a transaction and it is not in the process' ready list.
+ if !wait {
+ return self.process.get_work().ok_or(EAGAIN).map(Some);
+ }
+
+ // Get work from the process queue. If none is available, atomically register as ready.
+ let reg = match self.process.get_work_or_register(self) {
+ GetWorkOrRegister::Work(work) => return Ok(Some(work)),
+ GetWorkOrRegister::Register(reg) => reg,
+ };
+
+ let mut inner = self.inner.lock();
+ loop {
+ if let Some(work) = inner.pop_work() {
+ return Ok(Some(work));
+ }
+
+ inner.looper_flags |= LOOPER_WAITING | LOOPER_WAITING_PROC;
+ let signal_pending = self.work_condvar.wait_interruptible_freezable(&mut inner);
+ inner.looper_flags &= !(LOOPER_WAITING | LOOPER_WAITING_PROC);
+
+ if signal_pending || inner.looper_need_return {
+ // We need to return now. We need to pull the thread off the list of ready threads
+ // (by dropping `reg`), then check the state again after it's off the list to
+ // ensure that something was not queued in the meantime. If something has been
+ // queued, we just return it (instead of the error).
+ drop(inner);
+ drop(reg);
+
+ let res = match self.inner.lock().pop_work() {
+ Some(work) => Ok(Some(work)),
+ None if signal_pending => Err(EINTR),
+ None => Ok(None),
+ };
+ return res;
+ }
+ }
+ }
+
+ /// Push the provided work item to be delivered to user space via this thread.
+ ///
+ /// Returns whether the item was successfully pushed. This can only fail if the thread is dead.
+ pub(crate) fn push_work(&self, work: DLArc<dyn DeliverToRead>) -> PushWorkRes {
+ let sync = work.should_sync_wakeup();
+
+ let res = self.inner.lock().push_work(work);
+
+ if res.is_ok() {
+ if sync {
+ self.work_condvar.notify_sync();
+ } else {
+ self.work_condvar.notify_one();
+ }
+ }
+
+ res
+ }
+
+ /// Attempts to push to given work item to the thread if it's a looper thread (i.e., if it's
+ /// part of a thread pool) and is alive. Otherwise, push the work item to the process instead.
+ pub(crate) fn push_work_if_looper(&self, work: DLArc<dyn DeliverToRead>) -> BinderResult {
+ let mut inner = self.inner.lock();
+ if inner.is_looper() && !inner.is_dead {
+ inner.push_work(work);
+ Ok(())
+ } else {
+ drop(inner);
+ self.process.push_work(work)
+ }
+ }
+
+ pub(crate) fn push_work_deferred(&self, work: DLArc<dyn DeliverToRead>) {
+ self.inner.lock().push_work_deferred(work);
+ }
+
+ pub(crate) fn push_return_work(&self, reply: u32) {
+ self.inner.lock().push_return_work(reply);
+ }
+
+ fn translate_object(
+ &self,
+ obj_index: usize,
+ offset: usize,
+ object: BinderObjectRef<'_>,
+ view: &mut AllocationView<'_>,
+ allow_fds: bool,
+ sg_state: &mut ScatterGatherState,
+ ) -> BinderResult {
+ match object {
+ BinderObjectRef::Binder(obj) => {
+ let strong = obj.hdr.type_ == BINDER_TYPE_BINDER;
+ // SAFETY: `binder` is a `binder_uintptr_t`; any bit pattern is a valid
+ // representation.
+ let ptr = unsafe { obj.__bindgen_anon_1.binder } as _;
+ let cookie = obj.cookie as _;
+ let flags = obj.flags as _;
+ let node = self
+ .process
+ .as_arc_borrow()
+ .get_node(ptr, cookie, flags, strong, self)?;
+ security::binder_transfer_binder(&self.process.cred, &view.alloc.process.cred)?;
+ view.transfer_binder_object(offset, obj, strong, node)?;
+ }
+ BinderObjectRef::Handle(obj) => {
+ let strong = obj.hdr.type_ == BINDER_TYPE_HANDLE;
+ // SAFETY: `handle` is a `u32`; any bit pattern is a valid representation.
+ let handle = unsafe { obj.__bindgen_anon_1.handle } as _;
+ let node = self.process.get_node_from_handle(handle, strong)?;
+ security::binder_transfer_binder(&self.process.cred, &view.alloc.process.cred)?;
+ view.transfer_binder_object(offset, obj, strong, node)?;
+ }
+ BinderObjectRef::Fd(obj) => {
+ if !allow_fds {
+ return Err(EPERM.into());
+ }
+
+ // SAFETY: `fd` is a `u32`; any bit pattern is a valid representation.
+ let fd = unsafe { obj.__bindgen_anon_1.fd };
+ let file = LocalFile::fget(fd)?;
+ // SAFETY: The binder driver never calls `fdget_pos` and this code runs from an
+ // ioctl, so there are no active calls to `fdget_pos` on this thread.
+ let file = unsafe { LocalFile::assume_no_fdget_pos(file) };
+ security::binder_transfer_file(
+ &self.process.cred,
+ &view.alloc.process.cred,
+ &file,
+ )?;
+
+ let mut obj_write = BinderFdObject::default();
+ obj_write.hdr.type_ = BINDER_TYPE_FD;
+ // This will be overwritten with the actual fd when the transaction is received.
+ obj_write.__bindgen_anon_1.fd = u32::MAX;
+ obj_write.cookie = obj.cookie;
+ view.write::<BinderFdObject>(offset, &obj_write)?;
+
+ const FD_FIELD_OFFSET: usize =
+ core::mem::offset_of!(uapi::binder_fd_object, __bindgen_anon_1.fd);
+
+ let field_offset = offset + FD_FIELD_OFFSET;
+
+ view.alloc.info_add_fd(file, field_offset, false)?;
+ }
+ BinderObjectRef::Ptr(obj) => {
+ let obj_length = obj.length.try_into().map_err(|_| EINVAL)?;
+ let alloc_offset = match sg_state.unused_buffer_space.claim_next(obj_length) {
+ Ok(alloc_offset) => alloc_offset,
+ Err(err) => {
+ pr_warn!(
+ "Failed to claim space for a BINDER_TYPE_PTR. (offset: {}, limit: {}, size: {})",
+ sg_state.unused_buffer_space.offset,
+ sg_state.unused_buffer_space.limit,
+ obj_length,
+ );
+ return Err(err.into());
+ }
+ };
+
+ let sg_state_idx = sg_state.sg_entries.len();
+ sg_state.sg_entries.push(
+ ScatterGatherEntry {
+ obj_index,
+ offset: alloc_offset,
+ sender_uaddr: obj.buffer as _,
+ length: obj_length,
+ pointer_fixups: KVec::new(),
+ fixup_min_offset: 0,
+ },
+ GFP_KERNEL,
+ )?;
+
+ let buffer_ptr_in_user_space = (view.alloc.ptr + alloc_offset) as u64;
+
+ if obj.flags & uapi::BINDER_BUFFER_FLAG_HAS_PARENT == 0 {
+ sg_state.ancestors.clear();
+ sg_state.ancestors.push(sg_state_idx, GFP_KERNEL)?;
+ } else {
+ // Another buffer also has a pointer to this buffer, and we need to fixup that
+ // pointer too.
+
+ let parent_index = usize::try_from(obj.parent).map_err(|_| EINVAL)?;
+ let parent_offset = usize::try_from(obj.parent_offset).map_err(|_| EINVAL)?;
+
+ let info = sg_state.validate_parent_fixup(
+ parent_index,
+ parent_offset,
+ size_of::<u64>(),
+ )?;
+
+ sg_state.ancestors.truncate(info.num_ancestors);
+ sg_state.ancestors.push(sg_state_idx, GFP_KERNEL)?;
+
+ let parent_entry = match sg_state.sg_entries.get_mut(info.parent_sg_index) {
+ Some(parent_entry) => parent_entry,
+ None => {
+ pr_err!(
+ "validate_parent_fixup returned index out of bounds for sg.entries"
+ );
+ return Err(EINVAL.into());
+ }
+ };
+
+ parent_entry.fixup_min_offset = info.new_min_offset;
+ parent_entry.pointer_fixups.push(
+ PointerFixupEntry {
+ skip: 0,
+ pointer_value: buffer_ptr_in_user_space,
+ target_offset: info.target_offset,
+ },
+ GFP_KERNEL,
+ )?;
+ }
+
+ let mut obj_write = BinderBufferObject::default();
+ obj_write.hdr.type_ = BINDER_TYPE_PTR;
+ obj_write.flags = obj.flags;
+ obj_write.buffer = buffer_ptr_in_user_space;
+ obj_write.length = obj.length;
+ obj_write.parent = obj.parent;
+ obj_write.parent_offset = obj.parent_offset;
+ view.write::<BinderBufferObject>(offset, &obj_write)?;
+ }
+ BinderObjectRef::Fda(obj) => {
+ if !allow_fds {
+ return Err(EPERM.into());
+ }
+ let parent_index = usize::try_from(obj.parent).map_err(|_| EINVAL)?;
+ let parent_offset = usize::try_from(obj.parent_offset).map_err(|_| EINVAL)?;
+ let num_fds = usize::try_from(obj.num_fds).map_err(|_| EINVAL)?;
+ let fds_len = num_fds.checked_mul(size_of::<u32>()).ok_or(EINVAL)?;
+
+ let info = sg_state.validate_parent_fixup(parent_index, parent_offset, fds_len)?;
+ view.alloc.info_add_fd_reserve(num_fds)?;
+
+ sg_state.ancestors.truncate(info.num_ancestors);
+ let parent_entry = match sg_state.sg_entries.get_mut(info.parent_sg_index) {
+ Some(parent_entry) => parent_entry,
+ None => {
+ pr_err!(
+ "validate_parent_fixup returned index out of bounds for sg.entries"
+ );
+ return Err(EINVAL.into());
+ }
+ };
+
+ parent_entry.fixup_min_offset = info.new_min_offset;
+ parent_entry
+ .pointer_fixups
+ .push(
+ PointerFixupEntry {
+ skip: fds_len,
+ pointer_value: 0,
+ target_offset: info.target_offset,
+ },
+ GFP_KERNEL,
+ )
+ .map_err(|_| ENOMEM)?;
+
+ let fda_uaddr = parent_entry
+ .sender_uaddr
+ .checked_add(parent_offset)
+ .ok_or(EINVAL)?;
+ let mut fda_bytes = KVec::new();
+ UserSlice::new(UserPtr::from_addr(fda_uaddr as _), fds_len)
+ .read_all(&mut fda_bytes, GFP_KERNEL)?;
+
+ if fds_len != fda_bytes.len() {
+ pr_err!("UserSlice::read_all returned wrong length in BINDER_TYPE_FDA");
+ return Err(EINVAL.into());
+ }
+
+ for i in (0..fds_len).step_by(size_of::<u32>()) {
+ let fd = {
+ let mut fd_bytes = [0u8; size_of::<u32>()];
+ fd_bytes.copy_from_slice(&fda_bytes[i..i + size_of::<u32>()]);
+ u32::from_ne_bytes(fd_bytes)
+ };
+
+ let file = LocalFile::fget(fd)?;
+ // SAFETY: The binder driver never calls `fdget_pos` and this code runs from an
+ // ioctl, so there are no active calls to `fdget_pos` on this thread.
+ let file = unsafe { LocalFile::assume_no_fdget_pos(file) };
+ security::binder_transfer_file(
+ &self.process.cred,
+ &view.alloc.process.cred,
+ &file,
+ )?;
+
+ // The `validate_parent_fixup` call ensuers that this addition will not
+ // overflow.
+ view.alloc.info_add_fd(file, info.target_offset + i, true)?;
+ }
+ drop(fda_bytes);
+
+ let mut obj_write = BinderFdArrayObject::default();
+ obj_write.hdr.type_ = BINDER_TYPE_FDA;
+ obj_write.num_fds = obj.num_fds;
+ obj_write.parent = obj.parent;
+ obj_write.parent_offset = obj.parent_offset;
+ view.write::<BinderFdArrayObject>(offset, &obj_write)?;
+ }
+ }
+ Ok(())
+ }
+
+ fn apply_sg(&self, alloc: &mut Allocation, sg_state: &mut ScatterGatherState) -> BinderResult {
+ for sg_entry in &mut sg_state.sg_entries {
+ let mut end_of_previous_fixup = sg_entry.offset;
+ let offset_end = sg_entry.offset.checked_add(sg_entry.length).ok_or(EINVAL)?;
+
+ let mut reader =
+ UserSlice::new(UserPtr::from_addr(sg_entry.sender_uaddr), sg_entry.length).reader();
+ for fixup in &mut sg_entry.pointer_fixups {
+ let fixup_len = if fixup.skip == 0 {
+ size_of::<u64>()
+ } else {
+ fixup.skip
+ };
+
+ let target_offset_end = fixup.target_offset.checked_add(fixup_len).ok_or(EINVAL)?;
+ if fixup.target_offset < end_of_previous_fixup || offset_end < target_offset_end {
+ pr_warn!(
+ "Fixups oob {} {} {} {}",
+ fixup.target_offset,
+ end_of_previous_fixup,
+ offset_end,
+ target_offset_end
+ );
+ return Err(EINVAL.into());
+ }
+
+ let copy_off = end_of_previous_fixup;
+ let copy_len = fixup.target_offset - end_of_previous_fixup;
+ if let Err(err) = alloc.copy_into(&mut reader, copy_off, copy_len) {
+ pr_warn!("Failed copying into alloc: {:?}", err);
+ return Err(err.into());
+ }
+ if fixup.skip == 0 {
+ let res = alloc.write::<u64>(fixup.target_offset, &fixup.pointer_value);
+ if let Err(err) = res {
+ pr_warn!("Failed copying ptr into alloc: {:?}", err);
+ return Err(err.into());
+ }
+ }
+ if let Err(err) = reader.skip(fixup_len) {
+ pr_warn!("Failed skipping {} from reader: {:?}", fixup_len, err);
+ return Err(err.into());
+ }
+ end_of_previous_fixup = target_offset_end;
+ }
+ let copy_off = end_of_previous_fixup;
+ let copy_len = offset_end - end_of_previous_fixup;
+ if let Err(err) = alloc.copy_into(&mut reader, copy_off, copy_len) {
+ pr_warn!("Failed copying remainder into alloc: {:?}", err);
+ return Err(err.into());
+ }
+ }
+ Ok(())
+ }
+
+ /// This method copies the payload of a transaction into the target process.
+ ///
+ /// The resulting payload will have several different components, which will be stored next to
+ /// each other in the allocation. Furthermore, various objects can be embedded in the payload,
+ /// and those objects have to be translated so that they make sense to the target transaction.
+ pub(crate) fn copy_transaction_data(
+ &self,
+ to_process: Arc<Process>,
+ tr: &BinderTransactionDataSg,
+ debug_id: usize,
+ allow_fds: bool,
+ txn_security_ctx_offset: Option<&mut usize>,
+ ) -> BinderResult<NewAllocation> {
+ let trd = &tr.transaction_data;
+ let is_oneway = trd.flags & TF_ONE_WAY != 0;
+ let mut secctx = if let Some(offset) = txn_security_ctx_offset {
+ let secid = self.process.cred.get_secid();
+ let ctx = match security::SecurityCtx::from_secid(secid) {
+ Ok(ctx) => ctx,
+ Err(err) => {
+ pr_warn!("Failed to get security ctx for id {}: {:?}", secid, err);
+ return Err(err.into());
+ }
+ };
+ Some((offset, ctx))
+ } else {
+ None
+ };
+
+ let data_size = trd.data_size.try_into().map_err(|_| EINVAL)?;
+ let aligned_data_size = ptr_align(data_size).ok_or(EINVAL)?;
+ let offsets_size = trd.offsets_size.try_into().map_err(|_| EINVAL)?;
+ let aligned_offsets_size = ptr_align(offsets_size).ok_or(EINVAL)?;
+ let buffers_size = tr.buffers_size.try_into().map_err(|_| EINVAL)?;
+ let aligned_buffers_size = ptr_align(buffers_size).ok_or(EINVAL)?;
+ let aligned_secctx_size = match secctx.as_ref() {
+ Some((_offset, ctx)) => ptr_align(ctx.len()).ok_or(EINVAL)?,
+ None => 0,
+ };
+
+ // This guarantees that at least `sizeof(usize)` bytes will be allocated.
+ let len = usize::max(
+ aligned_data_size
+ .checked_add(aligned_offsets_size)
+ .and_then(|sum| sum.checked_add(aligned_buffers_size))
+ .and_then(|sum| sum.checked_add(aligned_secctx_size))
+ .ok_or(ENOMEM)?,
+ size_of::<usize>(),
+ );
+ let secctx_off = aligned_data_size + aligned_offsets_size + aligned_buffers_size;
+ let mut alloc =
+ match to_process.buffer_alloc(debug_id, len, is_oneway, self.process.task.pid()) {
+ Ok(alloc) => alloc,
+ Err(err) => {
+ pr_warn!(
+ "Failed to allocate buffer. len:{}, is_oneway:{}",
+ len,
+ is_oneway
+ );
+ return Err(err);
+ }
+ };
+
+ // SAFETY: This accesses a union field, but it's okay because the field's type is valid for
+ // all bit-patterns.
+ let trd_data_ptr = unsafe { &trd.data.ptr };
+ let mut buffer_reader =
+ UserSlice::new(UserPtr::from_addr(trd_data_ptr.buffer as _), data_size).reader();
+ let mut end_of_previous_object = 0;
+ let mut sg_state = None;
+
+ // Copy offsets if there are any.
+ if offsets_size > 0 {
+ {
+ let mut reader =
+ UserSlice::new(UserPtr::from_addr(trd_data_ptr.offsets as _), offsets_size)
+ .reader();
+ alloc.copy_into(&mut reader, aligned_data_size, offsets_size)?;
+ }
+
+ let offsets_start = aligned_data_size;
+ let offsets_end = aligned_data_size + aligned_offsets_size;
+
+ // This state is used for BINDER_TYPE_PTR objects.
+ let sg_state = sg_state.insert(ScatterGatherState {
+ unused_buffer_space: UnusedBufferSpace {
+ offset: offsets_end,
+ limit: len,
+ },
+ sg_entries: KVec::new(),
+ ancestors: KVec::new(),
+ });
+
+ // Traverse the objects specified.
+ let mut view = AllocationView::new(&mut alloc, data_size);
+ for (index, index_offset) in (offsets_start..offsets_end)
+ .step_by(size_of::<usize>())
+ .enumerate()
+ {
+ let offset = view.alloc.read(index_offset)?;
+
+ if offset < end_of_previous_object {
+ pr_warn!("Got transaction with invalid offset.");
+ return Err(EINVAL.into());
+ }
+
+ // Copy data between two objects.
+ if end_of_previous_object < offset {
+ view.copy_into(
+ &mut buffer_reader,
+ end_of_previous_object,
+ offset - end_of_previous_object,
+ )?;
+ }
+
+ let mut object = BinderObject::read_from(&mut buffer_reader)?;
+
+ match self.translate_object(
+ index,
+ offset,
+ object.as_ref(),
+ &mut view,
+ allow_fds,
+ sg_state,
+ ) {
+ Ok(()) => end_of_previous_object = offset + object.size(),
+ Err(err) => {
+ pr_warn!("Error while translating object.");
+ return Err(err);
+ }
+ }
+
+ // Update the indexes containing objects to clean up.
+ let offset_after_object = index_offset + size_of::<usize>();
+ view.alloc
+ .set_info_offsets(offsets_start..offset_after_object);
+ }
+ }
+
+ // Copy remaining raw data.
+ alloc.copy_into(
+ &mut buffer_reader,
+ end_of_previous_object,
+ data_size - end_of_previous_object,
+ )?;
+
+ if let Some(sg_state) = sg_state.as_mut() {
+ if let Err(err) = self.apply_sg(&mut alloc, sg_state) {
+ pr_warn!("Failure in apply_sg: {:?}", err);
+ return Err(err);
+ }
+ }
+
+ if let Some((off_out, secctx)) = secctx.as_mut() {
+ if let Err(err) = alloc.write(secctx_off, secctx.as_bytes()) {
+ pr_warn!("Failed to write security context: {:?}", err);
+ return Err(err.into());
+ }
+ **off_out = secctx_off;
+ }
+ Ok(alloc)
+ }
+
+ fn unwind_transaction_stack(self: &Arc<Self>) {
+ let mut thread = self.clone();
+ while let Ok(transaction) = {
+ let mut inner = thread.inner.lock();
+ inner.pop_transaction_to_reply(thread.as_ref())
+ } {
+ let reply = Err(BR_DEAD_REPLY);
+ if !transaction.from.deliver_single_reply(reply, &transaction) {
+ break;
+ }
+
+ thread = transaction.from.clone();
+ }
+ }
+
+ pub(crate) fn deliver_reply(
+ &self,
+ reply: Result<DLArc<Transaction>, u32>,
+ transaction: &DArc<Transaction>,
+ ) {
+ if self.deliver_single_reply(reply, transaction) {
+ transaction.from.unwind_transaction_stack();
+ }
+ }
+
+ /// Delivers a reply to the thread that started a transaction. The reply can either be a
+ /// reply-transaction or an error code to be delivered instead.
+ ///
+ /// Returns whether the thread is dead. If it is, the caller is expected to unwind the
+ /// transaction stack by completing transactions for threads that are dead.
+ fn deliver_single_reply(
+ &self,
+ reply: Result<DLArc<Transaction>, u32>,
+ transaction: &DArc<Transaction>,
+ ) -> bool {
+ if let Ok(transaction) = &reply {
+ transaction.set_outstanding(&mut self.process.inner.lock());
+ }
+
+ {
+ let mut inner = self.inner.lock();
+ if !inner.pop_transaction_replied(transaction) {
+ return false;
+ }
+
+ if inner.is_dead {
+ return true;
+ }
+
+ match reply {
+ Ok(work) => {
+ inner.push_work(work);
+ }
+ Err(code) => inner.push_reply_work(code),
+ }
+ }
+
+ // Notify the thread now that we've released the inner lock.
+ self.work_condvar.notify_sync();
+ false
+ }
+
+ /// Determines if the given transaction is the current transaction for this thread.
+ fn is_current_transaction(&self, transaction: &DArc<Transaction>) -> bool {
+ let inner = self.inner.lock();
+ match &inner.current_transaction {
+ None => false,
+ Some(current) => Arc::ptr_eq(current, transaction),
+ }
+ }
+
+ /// Determines the current top of the transaction stack. It fails if the top is in another
+ /// thread (i.e., this thread belongs to a stack but it has called another thread). The top is
+ /// [`None`] if the thread is not currently participating in a transaction stack.
+ fn top_of_transaction_stack(&self) -> Result<Option<DArc<Transaction>>> {
+ let inner = self.inner.lock();
+ if let Some(cur) = &inner.current_transaction {
+ if core::ptr::eq(self, cur.from.as_ref()) {
+ pr_warn!("got new transaction with bad transaction stack");
+ return Err(EINVAL);
+ }
+ Ok(Some(cur.clone()))
+ } else {
+ Ok(None)
+ }
+ }
+
+ fn transaction<T>(self: &Arc<Self>, tr: &BinderTransactionDataSg, inner: T)
+ where
+ T: FnOnce(&Arc<Self>, &BinderTransactionDataSg) -> BinderResult,
+ {
+ if let Err(err) = inner(self, tr) {
+ if err.should_pr_warn() {
+ let mut ee = self.inner.lock().extended_error;
+ ee.command = err.reply;
+ ee.param = err.as_errno();
+ pr_warn!(
+ "Transaction failed: {:?} my_pid:{}",
+ err,
+ self.process.pid_in_current_ns()
+ );
+ }
+
+ self.push_return_work(err.reply);
+ }
+ }
+
+ fn transaction_inner(self: &Arc<Self>, tr: &BinderTransactionDataSg) -> BinderResult {
+ // SAFETY: Handle's type has no invalid bit patterns.
+ let handle = unsafe { tr.transaction_data.target.handle };
+ let node_ref = self.process.get_transaction_node(handle)?;
+ security::binder_transaction(&self.process.cred, &node_ref.node.owner.cred)?;
+ // TODO: We need to ensure that there isn't a pending transaction in the work queue. How
+ // could this happen?
+ let top = self.top_of_transaction_stack()?;
+ let list_completion = DTRWrap::arc_try_new(DeliverCode::new(BR_TRANSACTION_COMPLETE))?;
+ let completion = list_completion.clone_arc();
+ let transaction = Transaction::new(node_ref, top, self, tr)?;
+
+ // Check that the transaction stack hasn't changed while the lock was released, then update
+ // it with the new transaction.
+ {
+ let mut inner = self.inner.lock();
+ if !transaction.is_stacked_on(&inner.current_transaction) {
+ pr_warn!("Transaction stack changed during transaction!");
+ return Err(EINVAL.into());
+ }
+ inner.current_transaction = Some(transaction.clone_arc());
+ // We push the completion as a deferred work so that we wait for the reply before
+ // returning to userland.
+ inner.push_work_deferred(list_completion);
+ }
+
+ if let Err(e) = transaction.submit() {
+ completion.skip();
+ // Define `transaction` first to drop it after `inner`.
+ let transaction;
+ let mut inner = self.inner.lock();
+ transaction = inner.current_transaction.take().unwrap();
+ inner.current_transaction = transaction.clone_next();
+ Err(e)
+ } else {
+ Ok(())
+ }
+ }
+
+ fn reply_inner(self: &Arc<Self>, tr: &BinderTransactionDataSg) -> BinderResult {
+ let orig = self.inner.lock().pop_transaction_to_reply(self)?;
+ if !orig.from.is_current_transaction(&orig) {
+ return Err(EINVAL.into());
+ }
+
+ // We need to complete the transaction even if we cannot complete building the reply.
+ let out = (|| -> BinderResult<_> {
+ let completion = DTRWrap::arc_try_new(DeliverCode::new(BR_TRANSACTION_COMPLETE))?;
+ let process = orig.from.process.clone();
+ let allow_fds = orig.flags & TF_ACCEPT_FDS != 0;
+ let reply = Transaction::new_reply(self, process, tr, allow_fds)?;
+ self.inner.lock().push_work(completion);
+ orig.from.deliver_reply(Ok(reply), &orig);
+ Ok(())
+ })()
+ .map_err(|mut err| {
+ // At this point we only return `BR_TRANSACTION_COMPLETE` to the caller, and we must let
+ // the sender know that the transaction has completed (with an error in this case).
+ pr_warn!(
+ "Failure {:?} during reply - delivering BR_FAILED_REPLY to sender.",
+ err
+ );
+ let reply = Err(BR_FAILED_REPLY);
+ orig.from.deliver_reply(reply, &orig);
+ err.reply = BR_TRANSACTION_COMPLETE;
+ err
+ });
+
+ out
+ }
+
+ fn oneway_transaction_inner(self: &Arc<Self>, tr: &BinderTransactionDataSg) -> BinderResult {
+ // SAFETY: The `handle` field is valid for all possible byte values, so reading from the
+ // union is okay.
+ let handle = unsafe { tr.transaction_data.target.handle };
+ let node_ref = self.process.get_transaction_node(handle)?;
+ security::binder_transaction(&self.process.cred, &node_ref.node.owner.cred)?;
+ let transaction = Transaction::new(node_ref, None, self, tr)?;
+ let code = if self.process.is_oneway_spam_detection_enabled()
+ && transaction.oneway_spam_detected
+ {
+ BR_ONEWAY_SPAM_SUSPECT
+ } else {
+ BR_TRANSACTION_COMPLETE
+ };
+ let list_completion = DTRWrap::arc_try_new(DeliverCode::new(code))?;
+ let completion = list_completion.clone_arc();
+ self.inner.lock().push_work(list_completion);
+ match transaction.submit() {
+ Ok(()) => Ok(()),
+ Err(err) => {
+ completion.skip();
+ Err(err)
+ }
+ }
+ }
+
+ fn write(self: &Arc<Self>, req: &mut BinderWriteRead) -> Result {
+ let write_start = req.write_buffer.wrapping_add(req.write_consumed);
+ let write_len = req.write_size.saturating_sub(req.write_consumed);
+ let mut reader =
+ UserSlice::new(UserPtr::from_addr(write_start as _), write_len as _).reader();
+
+ while reader.len() >= size_of::<u32>() && self.inner.lock().return_work.is_unused() {
+ let before = reader.len();
+ let cmd = reader.read::<u32>()?;
+ GLOBAL_STATS.inc_bc(cmd);
+ self.process.stats.inc_bc(cmd);
+ match cmd {
+ BC_TRANSACTION => {
+ let tr = reader.read::<BinderTransactionData>()?.with_buffers_size(0);
+ if tr.transaction_data.flags & TF_ONE_WAY != 0 {
+ self.transaction(&tr, Self::oneway_transaction_inner);
+ } else {
+ self.transaction(&tr, Self::transaction_inner);
+ }
+ }
+ BC_TRANSACTION_SG => {
+ let tr = reader.read::<BinderTransactionDataSg>()?;
+ if tr.transaction_data.flags & TF_ONE_WAY != 0 {
+ self.transaction(&tr, Self::oneway_transaction_inner);
+ } else {
+ self.transaction(&tr, Self::transaction_inner);
+ }
+ }
+ BC_REPLY => {
+ let tr = reader.read::<BinderTransactionData>()?.with_buffers_size(0);
+ self.transaction(&tr, Self::reply_inner)
+ }
+ BC_REPLY_SG => {
+ let tr = reader.read::<BinderTransactionDataSg>()?;
+ self.transaction(&tr, Self::reply_inner)
+ }
+ BC_FREE_BUFFER => {
+ let buffer = self.process.buffer_get(reader.read()?);
+ if let Some(buffer) = buffer {
+ if buffer.looper_need_return_on_free() {
+ self.inner.lock().looper_need_return = true;
+ }
+ drop(buffer);
+ }
+ }
+ BC_INCREFS => {
+ self.process
+ .as_arc_borrow()
+ .update_ref(reader.read()?, true, false)?
+ }
+ BC_ACQUIRE => {
+ self.process
+ .as_arc_borrow()
+ .update_ref(reader.read()?, true, true)?
+ }
+ BC_RELEASE => {
+ self.process
+ .as_arc_borrow()
+ .update_ref(reader.read()?, false, true)?
+ }
+ BC_DECREFS => {
+ self.process
+ .as_arc_borrow()
+ .update_ref(reader.read()?, false, false)?
+ }
+ BC_INCREFS_DONE => self.process.inc_ref_done(&mut reader, false)?,
+ BC_ACQUIRE_DONE => self.process.inc_ref_done(&mut reader, true)?,
+ BC_REQUEST_DEATH_NOTIFICATION => self.process.request_death(&mut reader, self)?,
+ BC_CLEAR_DEATH_NOTIFICATION => self.process.clear_death(&mut reader, self)?,
+ BC_DEAD_BINDER_DONE => self.process.dead_binder_done(reader.read()?, self),
+ BC_REGISTER_LOOPER => {
+ let valid = self.process.register_thread();
+ self.inner.lock().looper_register(valid);
+ }
+ BC_ENTER_LOOPER => self.inner.lock().looper_enter(),
+ BC_EXIT_LOOPER => self.inner.lock().looper_exit(),
+ BC_REQUEST_FREEZE_NOTIFICATION => self.process.request_freeze_notif(&mut reader)?,
+ BC_CLEAR_FREEZE_NOTIFICATION => self.process.clear_freeze_notif(&mut reader)?,
+ BC_FREEZE_NOTIFICATION_DONE => self.process.freeze_notif_done(&mut reader)?,
+
+ // Fail if given an unknown error code.
+ // BC_ATTEMPT_ACQUIRE and BC_ACQUIRE_RESULT are no longer supported.
+ _ => return Err(EINVAL),
+ }
+ // Update the number of write bytes consumed.
+ req.write_consumed += (before - reader.len()) as u64;
+ }
+
+ Ok(())
+ }
+
+ fn read(self: &Arc<Self>, req: &mut BinderWriteRead, wait: bool) -> Result {
+ let read_start = req.read_buffer.wrapping_add(req.read_consumed);
+ let read_len = req.read_size.saturating_sub(req.read_consumed);
+ let mut writer = BinderReturnWriter::new(
+ UserSlice::new(UserPtr::from_addr(read_start as _), read_len as _).writer(),
+ self,
+ );
+ let (in_pool, use_proc_queue) = {
+ let inner = self.inner.lock();
+ (inner.is_looper(), inner.should_use_process_work_queue())
+ };
+
+ let getter = if use_proc_queue {
+ Self::get_work
+ } else {
+ Self::get_work_local
+ };
+
+ // Reserve some room at the beginning of the read buffer so that we can send a
+ // BR_SPAWN_LOOPER if we need to.
+ let mut has_noop_placeholder = false;
+ if req.read_consumed == 0 {
+ if let Err(err) = writer.write_code(BR_NOOP) {
+ pr_warn!("Failure when writing BR_NOOP at beginning of buffer.");
+ return Err(err);
+ }
+ has_noop_placeholder = true;
+ }
+
+ // Loop doing work while there is room in the buffer.
+ let initial_len = writer.len();
+ while writer.len() >= size_of::<uapi::binder_transaction_data_secctx>() + 4 {
+ match getter(self, wait && initial_len == writer.len()) {
+ Ok(Some(work)) => match work.into_arc().do_work(self, &mut writer) {
+ Ok(true) => {}
+ Ok(false) => break,
+ Err(err) => {
+ return Err(err);
+ }
+ },
+ Ok(None) => {
+ break;
+ }
+ Err(err) => {
+ // Propagate the error if we haven't written anything else.
+ if err != EINTR && err != EAGAIN {
+ pr_warn!("Failure in work getter: {:?}", err);
+ }
+ if initial_len == writer.len() {
+ return Err(err);
+ } else {
+ break;
+ }
+ }
+ }
+ }
+
+ req.read_consumed += read_len - writer.len() as u64;
+
+ // Write BR_SPAWN_LOOPER if the process needs more threads for its pool.
+ if has_noop_placeholder && in_pool && self.process.needs_thread() {
+ let mut writer =
+ UserSlice::new(UserPtr::from_addr(req.read_buffer as _), req.read_size as _)
+ .writer();
+ writer.write(&BR_SPAWN_LOOPER)?;
+ }
+ Ok(())
+ }
+
+ pub(crate) fn write_read(self: &Arc<Self>, data: UserSlice, wait: bool) -> Result {
+ let (mut reader, mut writer) = data.reader_writer();
+ let mut req = reader.read::<BinderWriteRead>()?;
+
+ // Go through the write buffer.
+ let mut ret = Ok(());
+ if req.write_size > 0 {
+ ret = self.write(&mut req);
+ if let Err(err) = ret {
+ pr_warn!(
+ "Write failure {:?} in pid:{}",
+ err,
+ self.process.pid_in_current_ns()
+ );
+ req.read_consumed = 0;
+ writer.write(&req)?;
+ self.inner.lock().looper_need_return = false;
+ return ret;
+ }
+ }
+
+ // Go through the work queue.
+ if req.read_size > 0 {
+ ret = self.read(&mut req, wait);
+ if ret.is_err() && ret != Err(EINTR) {
+ pr_warn!(
+ "Read failure {:?} in pid:{}",
+ ret,
+ self.process.pid_in_current_ns()
+ );
+ }
+ }
+
+ // Write the request back so that the consumed fields are visible to the caller.
+ writer.write(&req)?;
+
+ self.inner.lock().looper_need_return = false;
+
+ ret
+ }
+
+ pub(crate) fn poll(&self, file: &File, table: PollTable<'_>) -> (bool, u32) {
+ table.register_wait(file, &self.work_condvar);
+ let mut inner = self.inner.lock();
+ (inner.should_use_process_work_queue(), inner.poll())
+ }
+
+ /// Make the call to `get_work` or `get_work_local` return immediately, if any.
+ pub(crate) fn exit_looper(&self) {
+ let mut inner = self.inner.lock();
+ let should_notify = inner.looper_flags & LOOPER_WAITING != 0;
+ if should_notify {
+ inner.looper_need_return = true;
+ }
+ drop(inner);
+
+ if should_notify {
+ self.work_condvar.notify_one();
+ }
+ }
+
+ pub(crate) fn notify_if_poll_ready(&self, sync: bool) {
+ // Determine if we need to notify. This requires the lock.
+ let inner = self.inner.lock();
+ let notify = inner.looper_flags & LOOPER_POLL != 0 && inner.should_use_process_work_queue();
+ drop(inner);
+
+ // Now that the lock is no longer held, notify the waiters if we have to.
+ if notify {
+ if sync {
+ self.work_condvar.notify_sync();
+ } else {
+ self.work_condvar.notify_one();
+ }
+ }
+ }
+
+ pub(crate) fn release(self: &Arc<Self>) {
+ self.inner.lock().is_dead = true;
+
+ //self.work_condvar.clear();
+ self.unwind_transaction_stack();
+
+ // Cancel all pending work items.
+ while let Ok(Some(work)) = self.get_work_local(false) {
+ work.into_arc().cancel();
+ }
+ }
+}
+
+#[pin_data]
+struct ThreadError {
+ error_code: AtomicU32,
+ #[pin]
+ links_track: AtomicTracker,
+}
+
+impl ThreadError {
+ fn try_new() -> Result<DArc<Self>> {
+ DTRWrap::arc_pin_init(pin_init!(Self {
+ error_code: AtomicU32::new(BR_OK),
+ links_track <- AtomicTracker::new(),
+ }))
+ .map(ListArc::into_arc)
+ }
+
+ fn set_error_code(&self, code: u32) {
+ self.error_code.store(code, Ordering::Relaxed);
+ }
+
+ fn is_unused(&self) -> bool {
+ self.error_code.load(Ordering::Relaxed) == BR_OK
+ }
+}
+
+impl DeliverToRead for ThreadError {
+ fn do_work(
+ self: DArc<Self>,
+ _thread: &Thread,
+ writer: &mut BinderReturnWriter<'_>,
+ ) -> Result<bool> {
+ let code = self.error_code.load(Ordering::Relaxed);
+ self.error_code.store(BR_OK, Ordering::Relaxed);
+ writer.write_code(code)?;
+ Ok(true)
+ }
+
+ fn cancel(self: DArc<Self>) {}
+
+ fn should_sync_wakeup(&self) -> bool {
+ false
+ }
+
+ fn debug_print(&self, m: &SeqFile, prefix: &str, _tprefix: &str) -> Result<()> {
+ seq_print!(
+ m,
+ "{}transaction error: {}\n",
+ prefix,
+ self.error_code.load(Ordering::Relaxed)
+ );
+ Ok(())
+ }
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<0> for ThreadError {
+ tracked_by links_track: AtomicTracker;
+ }
+}
diff --git a/drivers/android/binder/trace.rs b/drivers/android/binder/trace.rs
new file mode 100644
index 000000000000..af0e4392805e
--- /dev/null
+++ b/drivers/android/binder/trace.rs
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+use kernel::ffi::{c_uint, c_ulong};
+use kernel::tracepoint::declare_trace;
+
+declare_trace! {
+ unsafe fn rust_binder_ioctl(cmd: c_uint, arg: c_ulong);
+}
+
+#[inline]
+pub(crate) fn trace_ioctl(cmd: u32, arg: usize) {
+ // SAFETY: Always safe to call.
+ unsafe { rust_binder_ioctl(cmd, arg as c_ulong) }
+}
diff --git a/drivers/android/binder/transaction.rs b/drivers/android/binder/transaction.rs
new file mode 100644
index 000000000000..4bd3c0e417eb
--- /dev/null
+++ b/drivers/android/binder/transaction.rs
@@ -0,0 +1,456 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+use core::sync::atomic::{AtomicBool, Ordering};
+use kernel::{
+ prelude::*,
+ seq_file::SeqFile,
+ seq_print,
+ sync::{Arc, SpinLock},
+ task::Kuid,
+ time::{Instant, Monotonic},
+ types::ScopeGuard,
+};
+
+use crate::{
+ allocation::{Allocation, TranslatedFds},
+ defs::*,
+ error::{BinderError, BinderResult},
+ node::{Node, NodeRef},
+ process::{Process, ProcessInner},
+ ptr_align,
+ thread::{PushWorkRes, Thread},
+ BinderReturnWriter, DArc, DLArc, DTRWrap, DeliverToRead,
+};
+
+#[pin_data(PinnedDrop)]
+pub(crate) struct Transaction {
+ pub(crate) debug_id: usize,
+ target_node: Option<DArc<Node>>,
+ pub(crate) from_parent: Option<DArc<Transaction>>,
+ pub(crate) from: Arc<Thread>,
+ pub(crate) to: Arc<Process>,
+ #[pin]
+ allocation: SpinLock<Option<Allocation>>,
+ is_outstanding: AtomicBool,
+ code: u32,
+ pub(crate) flags: u32,
+ data_size: usize,
+ offsets_size: usize,
+ data_address: usize,
+ sender_euid: Kuid,
+ txn_security_ctx_off: Option<usize>,
+ pub(crate) oneway_spam_detected: bool,
+ start_time: Instant<Monotonic>,
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<0> for Transaction { untracked; }
+}
+
+impl Transaction {
+ pub(crate) fn new(
+ node_ref: NodeRef,
+ from_parent: Option<DArc<Transaction>>,
+ from: &Arc<Thread>,
+ tr: &BinderTransactionDataSg,
+ ) -> BinderResult<DLArc<Self>> {
+ let debug_id = super::next_debug_id();
+ let trd = &tr.transaction_data;
+ let allow_fds = node_ref.node.flags & FLAT_BINDER_FLAG_ACCEPTS_FDS != 0;
+ let txn_security_ctx = node_ref.node.flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX != 0;
+ let mut txn_security_ctx_off = if txn_security_ctx { Some(0) } else { None };
+ let to = node_ref.node.owner.clone();
+ let mut alloc = match from.copy_transaction_data(
+ to.clone(),
+ tr,
+ debug_id,
+ allow_fds,
+ txn_security_ctx_off.as_mut(),
+ ) {
+ Ok(alloc) => alloc,
+ Err(err) => {
+ if !err.is_dead() {
+ pr_warn!("Failure in copy_transaction_data: {:?}", err);
+ }
+ return Err(err);
+ }
+ };
+ let oneway_spam_detected = alloc.oneway_spam_detected;
+ if trd.flags & TF_ONE_WAY != 0 {
+ if from_parent.is_some() {
+ pr_warn!("Oneway transaction should not be in a transaction stack.");
+ return Err(EINVAL.into());
+ }
+ alloc.set_info_oneway_node(node_ref.node.clone());
+ }
+ if trd.flags & TF_CLEAR_BUF != 0 {
+ alloc.set_info_clear_on_drop();
+ }
+ let target_node = node_ref.node.clone();
+ alloc.set_info_target_node(node_ref);
+ let data_address = alloc.ptr;
+
+ Ok(DTRWrap::arc_pin_init(pin_init!(Transaction {
+ debug_id,
+ target_node: Some(target_node),
+ from_parent,
+ sender_euid: from.process.task.euid(),
+ from: from.clone(),
+ to,
+ code: trd.code,
+ flags: trd.flags,
+ data_size: trd.data_size as _,
+ offsets_size: trd.offsets_size as _,
+ data_address,
+ allocation <- kernel::new_spinlock!(Some(alloc.success()), "Transaction::new"),
+ is_outstanding: AtomicBool::new(false),
+ txn_security_ctx_off,
+ oneway_spam_detected,
+ start_time: Instant::now(),
+ }))?)
+ }
+
+ pub(crate) fn new_reply(
+ from: &Arc<Thread>,
+ to: Arc<Process>,
+ tr: &BinderTransactionDataSg,
+ allow_fds: bool,
+ ) -> BinderResult<DLArc<Self>> {
+ let debug_id = super::next_debug_id();
+ let trd = &tr.transaction_data;
+ let mut alloc = match from.copy_transaction_data(to.clone(), tr, debug_id, allow_fds, None)
+ {
+ Ok(alloc) => alloc,
+ Err(err) => {
+ pr_warn!("Failure in copy_transaction_data: {:?}", err);
+ return Err(err);
+ }
+ };
+ let oneway_spam_detected = alloc.oneway_spam_detected;
+ if trd.flags & TF_CLEAR_BUF != 0 {
+ alloc.set_info_clear_on_drop();
+ }
+ Ok(DTRWrap::arc_pin_init(pin_init!(Transaction {
+ debug_id,
+ target_node: None,
+ from_parent: None,
+ sender_euid: from.process.task.euid(),
+ from: from.clone(),
+ to,
+ code: trd.code,
+ flags: trd.flags,
+ data_size: trd.data_size as _,
+ offsets_size: trd.offsets_size as _,
+ data_address: alloc.ptr,
+ allocation <- kernel::new_spinlock!(Some(alloc.success()), "Transaction::new"),
+ is_outstanding: AtomicBool::new(false),
+ txn_security_ctx_off: None,
+ oneway_spam_detected,
+ start_time: Instant::now(),
+ }))?)
+ }
+
+ #[inline(never)]
+ pub(crate) fn debug_print_inner(&self, m: &SeqFile, prefix: &str) {
+ seq_print!(
+ m,
+ "{}{}: from {}:{} to {} code {:x} flags {:x} elapsed {}ms",
+ prefix,
+ self.debug_id,
+ self.from.process.task.pid(),
+ self.from.id,
+ self.to.task.pid(),
+ self.code,
+ self.flags,
+ self.start_time.elapsed().as_millis(),
+ );
+ if let Some(target_node) = &self.target_node {
+ seq_print!(m, " node {}", target_node.debug_id);
+ }
+ seq_print!(m, " size {}:{}\n", self.data_size, self.offsets_size);
+ }
+
+ /// Determines if the transaction is stacked on top of the given transaction.
+ pub(crate) fn is_stacked_on(&self, onext: &Option<DArc<Self>>) -> bool {
+ match (&self.from_parent, onext) {
+ (None, None) => true,
+ (Some(from_parent), Some(next)) => Arc::ptr_eq(from_parent, next),
+ _ => false,
+ }
+ }
+
+ /// Returns a pointer to the next transaction on the transaction stack, if there is one.
+ pub(crate) fn clone_next(&self) -> Option<DArc<Self>> {
+ Some(self.from_parent.as_ref()?.clone())
+ }
+
+ /// Searches in the transaction stack for a thread that belongs to the target process. This is
+ /// useful when finding a target for a new transaction: if the node belongs to a process that
+ /// is already part of the transaction stack, we reuse the thread.
+ fn find_target_thread(&self) -> Option<Arc<Thread>> {
+ let mut it = &self.from_parent;
+ while let Some(transaction) = it {
+ if Arc::ptr_eq(&transaction.from.process, &self.to) {
+ return Some(transaction.from.clone());
+ }
+ it = &transaction.from_parent;
+ }
+ None
+ }
+
+ /// Searches in the transaction stack for a transaction originating at the given thread.
+ pub(crate) fn find_from(&self, thread: &Thread) -> Option<&DArc<Transaction>> {
+ let mut it = &self.from_parent;
+ while let Some(transaction) = it {
+ if core::ptr::eq(thread, transaction.from.as_ref()) {
+ return Some(transaction);
+ }
+
+ it = &transaction.from_parent;
+ }
+ None
+ }
+
+ pub(crate) fn set_outstanding(&self, to_process: &mut ProcessInner) {
+ // No race because this method is only called once.
+ if !self.is_outstanding.load(Ordering::Relaxed) {
+ self.is_outstanding.store(true, Ordering::Relaxed);
+ to_process.add_outstanding_txn();
+ }
+ }
+
+ /// Decrement `outstanding_txns` in `to` if it hasn't already been decremented.
+ fn drop_outstanding_txn(&self) {
+ // No race because this is called at most twice, and one of the calls are in the
+ // destructor, which is guaranteed to not race with any other operations on the
+ // transaction. It also cannot race with `set_outstanding`, since submission happens
+ // before delivery.
+ if self.is_outstanding.load(Ordering::Relaxed) {
+ self.is_outstanding.store(false, Ordering::Relaxed);
+ self.to.drop_outstanding_txn();
+ }
+ }
+
+ /// Submits the transaction to a work queue. Uses a thread if there is one in the transaction
+ /// stack, otherwise uses the destination process.
+ ///
+ /// Not used for replies.
+ pub(crate) fn submit(self: DLArc<Self>) -> BinderResult {
+ // Defined before `process_inner` so that the destructor runs after releasing the lock.
+ let mut _t_outdated;
+
+ let oneway = self.flags & TF_ONE_WAY != 0;
+ let process = self.to.clone();
+ let mut process_inner = process.inner.lock();
+
+ self.set_outstanding(&mut process_inner);
+
+ if oneway {
+ if let Some(target_node) = self.target_node.clone() {
+ if process_inner.is_frozen.is_frozen() {
+ process_inner.async_recv = true;
+ if self.flags & TF_UPDATE_TXN != 0 {
+ if let Some(t_outdated) =
+ target_node.take_outdated_transaction(&self, &mut process_inner)
+ {
+ // Save the transaction to be dropped after locks are released.
+ _t_outdated = t_outdated;
+ }
+ }
+ }
+ match target_node.submit_oneway(self, &mut process_inner) {
+ Ok(()) => {}
+ Err((err, work)) => {
+ drop(process_inner);
+ // Drop work after releasing process lock.
+ drop(work);
+ return Err(err);
+ }
+ }
+
+ if process_inner.is_frozen.is_frozen() {
+ return Err(BinderError::new_frozen_oneway());
+ } else {
+ return Ok(());
+ }
+ } else {
+ pr_err!("Failed to submit oneway transaction to node.");
+ }
+ }
+
+ if process_inner.is_frozen.is_frozen() {
+ process_inner.sync_recv = true;
+ return Err(BinderError::new_frozen());
+ }
+
+ let res = if let Some(thread) = self.find_target_thread() {
+ match thread.push_work(self) {
+ PushWorkRes::Ok => Ok(()),
+ PushWorkRes::FailedDead(me) => Err((BinderError::new_dead(), me)),
+ }
+ } else {
+ process_inner.push_work(self)
+ };
+ drop(process_inner);
+
+ match res {
+ Ok(()) => Ok(()),
+ Err((err, work)) => {
+ // Drop work after releasing process lock.
+ drop(work);
+ Err(err)
+ }
+ }
+ }
+
+ /// Check whether one oneway transaction can supersede another.
+ pub(crate) fn can_replace(&self, old: &Transaction) -> bool {
+ if self.from.process.task.pid() != old.from.process.task.pid() {
+ return false;
+ }
+
+ if self.flags & old.flags & (TF_ONE_WAY | TF_UPDATE_TXN) != (TF_ONE_WAY | TF_UPDATE_TXN) {
+ return false;
+ }
+
+ let target_node_match = match (self.target_node.as_ref(), old.target_node.as_ref()) {
+ (None, None) => true,
+ (Some(tn1), Some(tn2)) => Arc::ptr_eq(tn1, tn2),
+ _ => false,
+ };
+
+ self.code == old.code && self.flags == old.flags && target_node_match
+ }
+
+ fn prepare_file_list(&self) -> Result<TranslatedFds> {
+ let mut alloc = self.allocation.lock().take().ok_or(ESRCH)?;
+
+ match alloc.translate_fds() {
+ Ok(translated) => {
+ *self.allocation.lock() = Some(alloc);
+ Ok(translated)
+ }
+ Err(err) => {
+ // Free the allocation eagerly.
+ drop(alloc);
+ Err(err)
+ }
+ }
+ }
+}
+
+impl DeliverToRead for Transaction {
+ fn do_work(
+ self: DArc<Self>,
+ thread: &Thread,
+ writer: &mut BinderReturnWriter<'_>,
+ ) -> Result<bool> {
+ let send_failed_reply = ScopeGuard::new(|| {
+ if self.target_node.is_some() && self.flags & TF_ONE_WAY == 0 {
+ let reply = Err(BR_FAILED_REPLY);
+ self.from.deliver_reply(reply, &self);
+ }
+ self.drop_outstanding_txn();
+ });
+
+ let files = if let Ok(list) = self.prepare_file_list() {
+ list
+ } else {
+ // On failure to process the list, we send a reply back to the sender and ignore the
+ // transaction on the recipient.
+ return Ok(true);
+ };
+
+ let mut tr_sec = BinderTransactionDataSecctx::default();
+ let tr = tr_sec.tr_data();
+ if let Some(target_node) = &self.target_node {
+ let (ptr, cookie) = target_node.get_id();
+ tr.target.ptr = ptr as _;
+ tr.cookie = cookie as _;
+ };
+ tr.code = self.code;
+ tr.flags = self.flags;
+ tr.data_size = self.data_size as _;
+ tr.data.ptr.buffer = self.data_address as _;
+ tr.offsets_size = self.offsets_size as _;
+ if tr.offsets_size > 0 {
+ tr.data.ptr.offsets = (self.data_address + ptr_align(self.data_size).unwrap()) as _;
+ }
+ tr.sender_euid = self.sender_euid.into_uid_in_current_ns();
+ tr.sender_pid = 0;
+ if self.target_node.is_some() && self.flags & TF_ONE_WAY == 0 {
+ // Not a reply and not one-way.
+ tr.sender_pid = self.from.process.pid_in_current_ns();
+ }
+ let code = if self.target_node.is_none() {
+ BR_REPLY
+ } else if self.txn_security_ctx_off.is_some() {
+ BR_TRANSACTION_SEC_CTX
+ } else {
+ BR_TRANSACTION
+ };
+
+ // Write the transaction code and data to the user buffer.
+ writer.write_code(code)?;
+ if let Some(off) = self.txn_security_ctx_off {
+ tr_sec.secctx = (self.data_address + off) as u64;
+ writer.write_payload(&tr_sec)?;
+ } else {
+ writer.write_payload(&*tr)?;
+ }
+
+ let mut alloc = self.allocation.lock().take().ok_or(ESRCH)?;
+
+ // Dismiss the completion of transaction with a failure. No failure paths are allowed from
+ // here on out.
+ send_failed_reply.dismiss();
+
+ // Commit files, and set FDs in FDA to be closed on buffer free.
+ let close_on_free = files.commit();
+ alloc.set_info_close_on_free(close_on_free);
+
+ // It is now the user's responsibility to clear the allocation.
+ alloc.keep_alive();
+
+ self.drop_outstanding_txn();
+
+ // When this is not a reply and not a oneway transaction, update `current_transaction`. If
+ // it's a reply, `current_transaction` has already been updated appropriately.
+ if self.target_node.is_some() && tr_sec.transaction_data.flags & TF_ONE_WAY == 0 {
+ thread.set_current_transaction(self);
+ }
+
+ Ok(false)
+ }
+
+ fn cancel(self: DArc<Self>) {
+ let allocation = self.allocation.lock().take();
+ drop(allocation);
+
+ // If this is not a reply or oneway transaction, then send a dead reply.
+ if self.target_node.is_some() && self.flags & TF_ONE_WAY == 0 {
+ let reply = Err(BR_DEAD_REPLY);
+ self.from.deliver_reply(reply, &self);
+ }
+
+ self.drop_outstanding_txn();
+ }
+
+ fn should_sync_wakeup(&self) -> bool {
+ self.flags & TF_ONE_WAY == 0
+ }
+
+ fn debug_print(&self, m: &SeqFile, _prefix: &str, tprefix: &str) -> Result<()> {
+ self.debug_print_inner(m, tprefix);
+ Ok(())
+ }
+}
+
+#[pinned_drop]
+impl PinnedDrop for Transaction {
+ fn drop(self: Pin<&mut Self>) {
+ self.drop_outstanding_txn();
+ }
+}
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index fcfaf1b899c8..979c96b74cad 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -23,10 +23,11 @@
#include <linux/uaccess.h>
#include <linux/highmem.h>
#include <linux/sizes.h>
+#include <kunit/visibility.h>
#include "binder_alloc.h"
#include "binder_trace.h"
-struct list_lru binder_freelist;
+static struct list_lru binder_freelist;
static DEFINE_MUTEX(binder_alloc_mmap_lock);
@@ -57,13 +58,14 @@ static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer)
return list_entry(buffer->entry.prev, struct binder_buffer, entry);
}
-static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
- struct binder_buffer *buffer)
+VISIBLE_IF_KUNIT size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
+ struct binder_buffer *buffer)
{
if (list_is_last(&buffer->entry, &alloc->buffers))
return alloc->vm_start + alloc->buffer_size - buffer->user_data;
return binder_buffer_next(buffer)->user_data - buffer->user_data;
}
+EXPORT_SYMBOL_IF_KUNIT(binder_alloc_buffer_size);
static void binder_insert_free_buffer(struct binder_alloc *alloc,
struct binder_buffer *new_buffer)
@@ -167,12 +169,8 @@ static struct binder_buffer *binder_alloc_prepare_to_free_locked(
struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
unsigned long user_ptr)
{
- struct binder_buffer *buffer;
-
- mutex_lock(&alloc->mutex);
- buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
- mutex_unlock(&alloc->mutex);
- return buffer;
+ guard(mutex)(&alloc->mutex);
+ return binder_alloc_prepare_to_free_locked(alloc, user_ptr);
}
static inline void
@@ -210,7 +208,7 @@ static void binder_lru_freelist_add(struct binder_alloc *alloc,
trace_binder_free_lru_start(alloc, index);
- ret = list_lru_add(&binder_freelist,
+ ret = list_lru_add(alloc->freelist,
page_to_lru(page),
page_to_nid(page),
NULL);
@@ -409,7 +407,7 @@ static void binder_lru_freelist_del(struct binder_alloc *alloc,
if (page) {
trace_binder_alloc_lru_start(alloc, index);
- on_lru = list_lru_del(&binder_freelist,
+ on_lru = list_lru_del(alloc->freelist,
page_to_lru(page),
page_to_nid(page),
NULL);
@@ -699,6 +697,7 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
out:
return buffer;
}
+EXPORT_SYMBOL_IF_KUNIT(binder_alloc_new_buf);
static unsigned long buffer_start_page(struct binder_buffer *buffer)
{
@@ -877,6 +876,7 @@ void binder_alloc_free_buf(struct binder_alloc *alloc,
binder_free_buf_locked(alloc, buffer);
mutex_unlock(&alloc->mutex);
}
+EXPORT_SYMBOL_IF_KUNIT(binder_alloc_free_buf);
/**
* binder_alloc_mmap_handler() - map virtual address space for proc
@@ -959,7 +959,7 @@ err_invalid_mm:
failure_string, ret);
return ret;
}
-
+EXPORT_SYMBOL_IF_KUNIT(binder_alloc_mmap_handler);
void binder_alloc_deferred_release(struct binder_alloc *alloc)
{
@@ -1007,7 +1007,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
if (!page)
continue;
- on_lru = list_lru_del(&binder_freelist,
+ on_lru = list_lru_del(alloc->freelist,
page_to_lru(page),
page_to_nid(page),
NULL);
@@ -1028,6 +1028,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
"%s: %d buffers %d, pages %d\n",
__func__, alloc->pid, buffers, page_count);
}
+EXPORT_SYMBOL_IF_KUNIT(binder_alloc_deferred_release);
/**
* binder_alloc_print_allocated() - print buffer info
@@ -1043,7 +1044,7 @@ void binder_alloc_print_allocated(struct seq_file *m,
struct binder_buffer *buffer;
struct rb_node *n;
- mutex_lock(&alloc->mutex);
+ guard(mutex)(&alloc->mutex);
for (n = rb_first(&alloc->allocated_buffers); n; n = rb_next(n)) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
seq_printf(m, " buffer %d: %lx size %zd:%zd:%zd %s\n",
@@ -1053,7 +1054,6 @@ void binder_alloc_print_allocated(struct seq_file *m,
buffer->extra_buffers_size,
buffer->transaction ? "active" : "delivered");
}
- mutex_unlock(&alloc->mutex);
}
/**
@@ -1102,10 +1102,9 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
struct rb_node *n;
int count = 0;
- mutex_lock(&alloc->mutex);
+ guard(mutex)(&alloc->mutex);
for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
count++;
- mutex_unlock(&alloc->mutex);
return count;
}
@@ -1122,6 +1121,7 @@ void binder_alloc_vma_close(struct binder_alloc *alloc)
{
binder_alloc_set_mapped(alloc, false);
}
+EXPORT_SYMBOL_IF_KUNIT(binder_alloc_vma_close);
/**
* binder_alloc_free_page() - shrinker callback to free pages
@@ -1213,6 +1213,7 @@ err_mmap_read_lock_failed:
err_mmget:
return LRU_SKIP;
}
+EXPORT_SYMBOL_IF_KUNIT(binder_alloc_free_page);
static unsigned long
binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
@@ -1229,6 +1230,18 @@ binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
static struct shrinker *binder_shrinker;
+VISIBLE_IF_KUNIT void __binder_alloc_init(struct binder_alloc *alloc,
+ struct list_lru *freelist)
+{
+ alloc->pid = current->group_leader->pid;
+ alloc->mm = current->mm;
+ mmgrab(alloc->mm);
+ mutex_init(&alloc->mutex);
+ INIT_LIST_HEAD(&alloc->buffers);
+ alloc->freelist = freelist;
+}
+EXPORT_SYMBOL_IF_KUNIT(__binder_alloc_init);
+
/**
* binder_alloc_init() - called by binder_open() for per-proc initialization
* @alloc: binder_alloc for this proc
@@ -1238,11 +1251,7 @@ static struct shrinker *binder_shrinker;
*/
void binder_alloc_init(struct binder_alloc *alloc)
{
- alloc->pid = current->group_leader->pid;
- alloc->mm = current->mm;
- mmgrab(alloc->mm);
- mutex_init(&alloc->mutex);
- INIT_LIST_HEAD(&alloc->buffers);
+ __binder_alloc_init(alloc, &binder_freelist);
}
int binder_alloc_shrinker_init(void)
diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
index feecd7414241..d6f1f6f2d00e 100644
--- a/drivers/android/binder_alloc.h
+++ b/drivers/android/binder_alloc.h
@@ -15,7 +15,6 @@
#include <linux/list_lru.h>
#include <uapi/linux/android/binder.h>
-extern struct list_lru binder_freelist;
struct binder_transaction;
/**
@@ -91,6 +90,7 @@ static inline struct list_head *page_to_lru(struct page *p)
* @free_async_space: VA space available for async buffers. This is
* initialized at mmap time to 1/2 the full VA space
* @pages: array of struct page *
+ * @freelist: lru list to use for free pages (invariant after init)
* @buffer_size: size of address space specified via mmap
* @pid: pid for associated binder_proc (invariant after init)
* @pages_high: high watermark of offset in @pages
@@ -113,6 +113,7 @@ struct binder_alloc {
struct rb_root allocated_buffers;
size_t free_async_space;
struct page **pages;
+ struct list_lru *freelist;
size_t buffer_size;
int pid;
size_t pages_high;
@@ -120,11 +121,6 @@ struct binder_alloc {
bool oneway_spam_detected;
};
-#ifdef CONFIG_ANDROID_BINDER_IPC_SELFTEST
-void binder_selftest_alloc(struct binder_alloc *alloc);
-#else
-static inline void binder_selftest_alloc(struct binder_alloc *alloc) {}
-#endif
enum lru_status binder_alloc_free_page(struct list_head *item,
struct list_lru_one *lru,
void *cb_arg);
@@ -160,12 +156,8 @@ void binder_alloc_print_pages(struct seq_file *m,
static inline size_t
binder_alloc_get_free_async_space(struct binder_alloc *alloc)
{
- size_t free_async_space;
-
- mutex_lock(&alloc->mutex);
- free_async_space = alloc->free_async_space;
- mutex_unlock(&alloc->mutex);
- return free_async_space;
+ guard(mutex)(&alloc->mutex);
+ return alloc->free_async_space;
}
unsigned long
@@ -187,5 +179,11 @@ int binder_alloc_copy_from_buffer(struct binder_alloc *alloc,
binder_size_t buffer_offset,
size_t bytes);
+#if IS_ENABLED(CONFIG_KUNIT)
+void __binder_alloc_init(struct binder_alloc *alloc, struct list_lru *freelist);
+size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
+ struct binder_buffer *buffer);
+#endif
+
#endif /* _LINUX_BINDER_ALLOC_H */
diff --git a/drivers/android/binder_alloc_selftest.c b/drivers/android/binder_alloc_selftest.c
deleted file mode 100644
index c88735c54848..000000000000
--- a/drivers/android/binder_alloc_selftest.c
+++ /dev/null
@@ -1,306 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* binder_alloc_selftest.c
- *
- * Android IPC Subsystem
- *
- * Copyright (C) 2017 Google, Inc.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/mm_types.h>
-#include <linux/err.h>
-#include "binder_alloc.h"
-
-#define BUFFER_NUM 5
-#define BUFFER_MIN_SIZE (PAGE_SIZE / 8)
-
-static bool binder_selftest_run = true;
-static int binder_selftest_failures;
-static DEFINE_MUTEX(binder_selftest_lock);
-
-/**
- * enum buf_end_align_type - Page alignment of a buffer
- * end with regard to the end of the previous buffer.
- *
- * In the pictures below, buf2 refers to the buffer we
- * are aligning. buf1 refers to previous buffer by addr.
- * Symbol [ means the start of a buffer, ] means the end
- * of a buffer, and | means page boundaries.
- */
-enum buf_end_align_type {
- /**
- * @SAME_PAGE_UNALIGNED: The end of this buffer is on
- * the same page as the end of the previous buffer and
- * is not page aligned. Examples:
- * buf1 ][ buf2 ][ ...
- * buf1 ]|[ buf2 ][ ...
- */
- SAME_PAGE_UNALIGNED = 0,
- /**
- * @SAME_PAGE_ALIGNED: When the end of the previous buffer
- * is not page aligned, the end of this buffer is on the
- * same page as the end of the previous buffer and is page
- * aligned. When the previous buffer is page aligned, the
- * end of this buffer is aligned to the next page boundary.
- * Examples:
- * buf1 ][ buf2 ]| ...
- * buf1 ]|[ buf2 ]| ...
- */
- SAME_PAGE_ALIGNED,
- /**
- * @NEXT_PAGE_UNALIGNED: The end of this buffer is on
- * the page next to the end of the previous buffer and
- * is not page aligned. Examples:
- * buf1 ][ buf2 | buf2 ][ ...
- * buf1 ]|[ buf2 | buf2 ][ ...
- */
- NEXT_PAGE_UNALIGNED,
- /**
- * @NEXT_PAGE_ALIGNED: The end of this buffer is on
- * the page next to the end of the previous buffer and
- * is page aligned. Examples:
- * buf1 ][ buf2 | buf2 ]| ...
- * buf1 ]|[ buf2 | buf2 ]| ...
- */
- NEXT_PAGE_ALIGNED,
- /**
- * @NEXT_NEXT_UNALIGNED: The end of this buffer is on
- * the page that follows the page after the end of the
- * previous buffer and is not page aligned. Examples:
- * buf1 ][ buf2 | buf2 | buf2 ][ ...
- * buf1 ]|[ buf2 | buf2 | buf2 ][ ...
- */
- NEXT_NEXT_UNALIGNED,
- /**
- * @LOOP_END: The number of enum values in &buf_end_align_type.
- * It is used for controlling loop termination.
- */
- LOOP_END,
-};
-
-static void pr_err_size_seq(size_t *sizes, int *seq)
-{
- int i;
-
- pr_err("alloc sizes: ");
- for (i = 0; i < BUFFER_NUM; i++)
- pr_cont("[%zu]", sizes[i]);
- pr_cont("\n");
- pr_err("free seq: ");
- for (i = 0; i < BUFFER_NUM; i++)
- pr_cont("[%d]", seq[i]);
- pr_cont("\n");
-}
-
-static bool check_buffer_pages_allocated(struct binder_alloc *alloc,
- struct binder_buffer *buffer,
- size_t size)
-{
- unsigned long page_addr;
- unsigned long end;
- int page_index;
-
- end = PAGE_ALIGN(buffer->user_data + size);
- page_addr = buffer->user_data;
- for (; page_addr < end; page_addr += PAGE_SIZE) {
- page_index = (page_addr - alloc->vm_start) / PAGE_SIZE;
- if (!alloc->pages[page_index] ||
- !list_empty(page_to_lru(alloc->pages[page_index]))) {
- pr_err("expect alloc but is %s at page index %d\n",
- alloc->pages[page_index] ?
- "lru" : "free", page_index);
- return false;
- }
- }
- return true;
-}
-
-static void binder_selftest_alloc_buf(struct binder_alloc *alloc,
- struct binder_buffer *buffers[],
- size_t *sizes, int *seq)
-{
- int i;
-
- for (i = 0; i < BUFFER_NUM; i++) {
- buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0);
- if (IS_ERR(buffers[i]) ||
- !check_buffer_pages_allocated(alloc, buffers[i],
- sizes[i])) {
- pr_err_size_seq(sizes, seq);
- binder_selftest_failures++;
- }
- }
-}
-
-static void binder_selftest_free_buf(struct binder_alloc *alloc,
- struct binder_buffer *buffers[],
- size_t *sizes, int *seq, size_t end)
-{
- int i;
-
- for (i = 0; i < BUFFER_NUM; i++)
- binder_alloc_free_buf(alloc, buffers[seq[i]]);
-
- for (i = 0; i < end / PAGE_SIZE; i++) {
- /**
- * Error message on a free page can be false positive
- * if binder shrinker ran during binder_alloc_free_buf
- * calls above.
- */
- if (list_empty(page_to_lru(alloc->pages[i]))) {
- pr_err_size_seq(sizes, seq);
- pr_err("expect lru but is %s at page index %d\n",
- alloc->pages[i] ? "alloc" : "free", i);
- binder_selftest_failures++;
- }
- }
-}
-
-static void binder_selftest_free_page(struct binder_alloc *alloc)
-{
- int i;
- unsigned long count;
-
- while ((count = list_lru_count(&binder_freelist))) {
- list_lru_walk(&binder_freelist, binder_alloc_free_page,
- NULL, count);
- }
-
- for (i = 0; i < (alloc->buffer_size / PAGE_SIZE); i++) {
- if (alloc->pages[i]) {
- pr_err("expect free but is %s at page index %d\n",
- list_empty(page_to_lru(alloc->pages[i])) ?
- "alloc" : "lru", i);
- binder_selftest_failures++;
- }
- }
-}
-
-static void binder_selftest_alloc_free(struct binder_alloc *alloc,
- size_t *sizes, int *seq, size_t end)
-{
- struct binder_buffer *buffers[BUFFER_NUM];
-
- binder_selftest_alloc_buf(alloc, buffers, sizes, seq);
- binder_selftest_free_buf(alloc, buffers, sizes, seq, end);
-
- /* Allocate from lru. */
- binder_selftest_alloc_buf(alloc, buffers, sizes, seq);
- if (list_lru_count(&binder_freelist))
- pr_err("lru list should be empty but is not\n");
-
- binder_selftest_free_buf(alloc, buffers, sizes, seq, end);
- binder_selftest_free_page(alloc);
-}
-
-static bool is_dup(int *seq, int index, int val)
-{
- int i;
-
- for (i = 0; i < index; i++) {
- if (seq[i] == val)
- return true;
- }
- return false;
-}
-
-/* Generate BUFFER_NUM factorial free orders. */
-static void binder_selftest_free_seq(struct binder_alloc *alloc,
- size_t *sizes, int *seq,
- int index, size_t end)
-{
- int i;
-
- if (index == BUFFER_NUM) {
- binder_selftest_alloc_free(alloc, sizes, seq, end);
- return;
- }
- for (i = 0; i < BUFFER_NUM; i++) {
- if (is_dup(seq, index, i))
- continue;
- seq[index] = i;
- binder_selftest_free_seq(alloc, sizes, seq, index + 1, end);
- }
-}
-
-static void binder_selftest_alloc_size(struct binder_alloc *alloc,
- size_t *end_offset)
-{
- int i;
- int seq[BUFFER_NUM] = {0};
- size_t front_sizes[BUFFER_NUM];
- size_t back_sizes[BUFFER_NUM];
- size_t last_offset, offset = 0;
-
- for (i = 0; i < BUFFER_NUM; i++) {
- last_offset = offset;
- offset = end_offset[i];
- front_sizes[i] = offset - last_offset;
- back_sizes[BUFFER_NUM - i - 1] = front_sizes[i];
- }
- /*
- * Buffers share the first or last few pages.
- * Only BUFFER_NUM - 1 buffer sizes are adjustable since
- * we need one giant buffer before getting to the last page.
- */
- back_sizes[0] += alloc->buffer_size - end_offset[BUFFER_NUM - 1];
- binder_selftest_free_seq(alloc, front_sizes, seq, 0,
- end_offset[BUFFER_NUM - 1]);
- binder_selftest_free_seq(alloc, back_sizes, seq, 0, alloc->buffer_size);
-}
-
-static void binder_selftest_alloc_offset(struct binder_alloc *alloc,
- size_t *end_offset, int index)
-{
- int align;
- size_t end, prev;
-
- if (index == BUFFER_NUM) {
- binder_selftest_alloc_size(alloc, end_offset);
- return;
- }
- prev = index == 0 ? 0 : end_offset[index - 1];
- end = prev;
-
- BUILD_BUG_ON(BUFFER_MIN_SIZE * BUFFER_NUM >= PAGE_SIZE);
-
- for (align = SAME_PAGE_UNALIGNED; align < LOOP_END; align++) {
- if (align % 2)
- end = ALIGN(end, PAGE_SIZE);
- else
- end += BUFFER_MIN_SIZE;
- end_offset[index] = end;
- binder_selftest_alloc_offset(alloc, end_offset, index + 1);
- }
-}
-
-/**
- * binder_selftest_alloc() - Test alloc and free of buffer pages.
- * @alloc: Pointer to alloc struct.
- *
- * Allocate BUFFER_NUM buffers to cover all page alignment cases,
- * then free them in all orders possible. Check that pages are
- * correctly allocated, put onto lru when buffers are freed, and
- * are freed when binder_alloc_free_page is called.
- */
-void binder_selftest_alloc(struct binder_alloc *alloc)
-{
- size_t end_offset[BUFFER_NUM];
-
- if (!binder_selftest_run)
- return;
- mutex_lock(&binder_selftest_lock);
- if (!binder_selftest_run || !alloc->mapped)
- goto done;
- pr_info("STARTED\n");
- binder_selftest_alloc_offset(alloc, end_offset, 0);
- binder_selftest_run = false;
- if (binder_selftest_failures > 0)
- pr_info("%d tests FAILED\n", binder_selftest_failures);
- else
- pr_info("PASSED\n");
-
-done:
- mutex_unlock(&binder_selftest_lock);
-}
diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h
index 1ba5caf1d88d..342574bfd28a 100644
--- a/drivers/android/binder_internal.h
+++ b/drivers/android/binder_internal.h
@@ -81,7 +81,6 @@ extern bool is_binderfs_device(const struct inode *inode);
extern struct dentry *binderfs_create_file(struct dentry *dir, const char *name,
const struct file_operations *fops,
void *data);
-extern void binderfs_remove_file(struct dentry *dentry);
#else
static inline bool is_binderfs_device(const struct inode *inode)
{
@@ -94,7 +93,6 @@ static inline struct dentry *binderfs_create_file(struct dentry *dir,
{
return NULL;
}
-static inline void binderfs_remove_file(struct dentry *dentry) {}
#endif
#ifdef CONFIG_ANDROID_BINDERFS
@@ -539,8 +537,8 @@ struct binder_transaction {
struct binder_proc *to_proc;
struct binder_thread *to_thread;
struct binder_transaction *to_parent;
- unsigned need_reply:1;
- /* unsigned is_dead:1; */ /* not used at the moment */
+ unsigned is_async:1;
+ unsigned is_reply:1;
struct binder_buffer *buffer;
unsigned int code;
@@ -592,4 +590,8 @@ void binder_add_device(struct binder_device *device);
*/
void binder_remove_device(struct binder_device *device);
+#if IS_ENABLED(CONFIG_KUNIT)
+vm_fault_t binder_vm_fault(struct vm_fault *vmf);
+#endif
+
#endif /* _LINUX_BINDER_INTERNAL_H */
diff --git a/drivers/android/binder_netlink.c b/drivers/android/binder_netlink.c
new file mode 100644
index 000000000000..81e8432b5904
--- /dev/null
+++ b/drivers/android/binder_netlink.c
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/binder.yaml */
+/* YNL-GEN kernel source */
+/* To regenerate run: tools/net/ynl/ynl-regen.sh */
+
+#include <net/netlink.h>
+#include <net/genetlink.h>
+
+#include "binder_netlink.h"
+
+#include <uapi/linux/android/binder_netlink.h>
+
+/* Ops table for binder */
+static const struct genl_split_ops binder_nl_ops[] = {
+};
+
+static const struct genl_multicast_group binder_nl_mcgrps[] = {
+ [BINDER_NLGRP_REPORT] = { "report", },
+};
+
+struct genl_family binder_nl_family __ro_after_init = {
+ .name = BINDER_FAMILY_NAME,
+ .version = BINDER_FAMILY_VERSION,
+ .netnsok = true,
+ .parallel_ops = true,
+ .module = THIS_MODULE,
+ .split_ops = binder_nl_ops,
+ .n_split_ops = ARRAY_SIZE(binder_nl_ops),
+ .mcgrps = binder_nl_mcgrps,
+ .n_mcgrps = ARRAY_SIZE(binder_nl_mcgrps),
+};
diff --git a/drivers/android/binder_netlink.h b/drivers/android/binder_netlink.h
new file mode 100644
index 000000000000..57399942a5e3
--- /dev/null
+++ b/drivers/android/binder_netlink.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/binder.yaml */
+/* YNL-GEN kernel header */
+/* To regenerate run: tools/net/ynl/ynl-regen.sh */
+
+#ifndef _LINUX_BINDER_GEN_H
+#define _LINUX_BINDER_GEN_H
+
+#include <net/netlink.h>
+#include <net/genetlink.h>
+
+#include <uapi/linux/android/binder_netlink.h>
+
+enum {
+ BINDER_NLGRP_REPORT,
+};
+
+extern struct genl_family binder_nl_family;
+
+#endif /* _LINUX_BINDER_GEN_H */
diff --git a/drivers/android/binder_trace.h b/drivers/android/binder_trace.h
index 16de1b9e72f7..fa5eb61cf580 100644
--- a/drivers/android/binder_trace.h
+++ b/drivers/android/binder_trace.h
@@ -34,27 +34,6 @@ TRACE_EVENT(binder_ioctl,
TP_printk("cmd=0x%x arg=0x%lx", __entry->cmd, __entry->arg)
);
-DECLARE_EVENT_CLASS(binder_lock_class,
- TP_PROTO(const char *tag),
- TP_ARGS(tag),
- TP_STRUCT__entry(
- __field(const char *, tag)
- ),
- TP_fast_assign(
- __entry->tag = tag;
- ),
- TP_printk("tag=%s", __entry->tag)
-);
-
-#define DEFINE_BINDER_LOCK_EVENT(name) \
-DEFINE_EVENT(binder_lock_class, name, \
- TP_PROTO(const char *func), \
- TP_ARGS(func))
-
-DEFINE_BINDER_LOCK_EVENT(binder_lock);
-DEFINE_BINDER_LOCK_EVENT(binder_locked);
-DEFINE_BINDER_LOCK_EVENT(binder_unlock);
-
DECLARE_EVENT_CLASS(binder_function_return_class,
TP_PROTO(int ret),
TP_ARGS(ret),
@@ -423,6 +402,43 @@ TRACE_EVENT(binder_return,
"unknown")
);
+TRACE_EVENT(binder_netlink_report,
+ TP_PROTO(const char *context,
+ struct binder_transaction *t,
+ u32 data_size,
+ u32 error),
+ TP_ARGS(context, t, data_size, error),
+ TP_STRUCT__entry(
+ __field(const char *, context)
+ __field(u32, error)
+ __field(int, from_pid)
+ __field(int, from_tid)
+ __field(int, to_pid)
+ __field(int, to_tid)
+ __field(bool, is_reply)
+ __field(unsigned int, flags)
+ __field(unsigned int, code)
+ __field(size_t, data_size)
+ ),
+ TP_fast_assign(
+ __entry->context = context;
+ __entry->error = error;
+ __entry->from_pid = t->from_pid;
+ __entry->from_tid = t->from_tid;
+ __entry->to_pid = t->to_proc ? t->to_proc->pid : 0;
+ __entry->to_tid = t->to_thread ? t->to_thread->pid : 0;
+ __entry->is_reply = t->is_reply;
+ __entry->flags = t->flags;
+ __entry->code = t->code;
+ __entry->data_size = data_size;
+ ),
+ TP_printk("from %d:%d to %d:%d context=%s error=%d is_reply=%d flags=0x%x code=0x%x size=%zu",
+ __entry->from_pid, __entry->from_tid,
+ __entry->to_pid, __entry->to_tid,
+ __entry->context, __entry->error, __entry->is_reply,
+ __entry->flags, __entry->code, __entry->data_size)
+);
+
#endif /* _BINDER_TRACE_H */
#undef TRACE_INCLUDE_PATH
diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c
index 024275dbfdd8..b46bcb91072d 100644
--- a/drivers/android/binderfs.c
+++ b/drivers/android/binderfs.c
@@ -59,6 +59,7 @@ struct binder_features {
bool oneway_spam_detection;
bool extended_error;
bool freeze_notification;
+ bool transaction_report;
};
static const struct constant_table binderfs_param_stats[] = {
@@ -76,6 +77,7 @@ static struct binder_features binder_features = {
.oneway_spam_detection = true,
.extended_error = true,
.freeze_notification = true,
+ .transaction_report = true,
};
static inline struct binderfs_info *BINDERFS_SB(const struct super_block *sb)
@@ -117,7 +119,6 @@ static int binderfs_binder_device_create(struct inode *ref_inode,
struct dentry *dentry, *root;
struct binder_device *device;
char *name = NULL;
- size_t name_len;
struct inode *inode = NULL;
struct super_block *sb = ref_inode->i_sb;
struct binderfs_info *info = sb->s_fs_info;
@@ -161,9 +162,7 @@ static int binderfs_binder_device_create(struct inode *ref_inode,
inode->i_gid = info->root_gid;
req->name[BINDERFS_MAX_NAME] = '\0'; /* NUL-terminate */
- name_len = strlen(req->name);
- /* Make sure to include terminating NUL byte */
- name = kmemdup(req->name, name_len + 1, GFP_KERNEL);
+ name = kstrdup(req->name, GFP_KERNEL);
if (!name)
goto err;
@@ -184,28 +183,15 @@ static int binderfs_binder_device_create(struct inode *ref_inode,
}
root = sb->s_root;
- inode_lock(d_inode(root));
-
- /* look it up */
- dentry = lookup_noperm(&QSTR(name), root);
+ dentry = simple_start_creating(root, name);
if (IS_ERR(dentry)) {
- inode_unlock(d_inode(root));
ret = PTR_ERR(dentry);
goto err;
}
-
- if (d_really_is_positive(dentry)) {
- /* already exists */
- dput(dentry);
- inode_unlock(d_inode(root));
- ret = -EEXIST;
- goto err;
- }
-
inode->i_private = device;
- d_instantiate(dentry, inode);
+ d_make_persistent(dentry, inode);
fsnotify_create(root->d_inode, dentry);
- inode_unlock(d_inode(root));
+ simple_done_creating(dentry);
binder_add_device(device);
@@ -225,6 +211,9 @@ err:
/**
* binder_ctl_ioctl - handle binder device node allocation requests
+ * @file: The file pointer for the binder-control device node.
+ * @cmd: The ioctl command.
+ * @arg: The ioctl argument.
*
* The request handler for the binder-control device. All requests operate on
* the binderfs mount the binder-control device resides in:
@@ -411,12 +400,6 @@ static int binderfs_binder_ctl_create(struct super_block *sb)
if (!device)
return -ENOMEM;
- /* If we have already created a binder-control node, return. */
- if (info->control_dentry) {
- ret = 0;
- goto out;
- }
-
ret = -ENOMEM;
inode = new_inode(sb);
if (!inode)
@@ -452,7 +435,8 @@ static int binderfs_binder_ctl_create(struct super_block *sb)
inode->i_private = device;
info->control_dentry = dentry;
- d_add(dentry, inode);
+ d_make_persistent(dentry, inode);
+ dput(dentry);
return 0;
@@ -482,39 +466,6 @@ static struct inode *binderfs_make_inode(struct super_block *sb, int mode)
return ret;
}
-static struct dentry *binderfs_create_dentry(struct dentry *parent,
- const char *name)
-{
- struct dentry *dentry;
-
- dentry = lookup_noperm(&QSTR(name), parent);
- if (IS_ERR(dentry))
- return dentry;
-
- /* Return error if the file/dir already exists. */
- if (d_really_is_positive(dentry)) {
- dput(dentry);
- return ERR_PTR(-EEXIST);
- }
-
- return dentry;
-}
-
-void binderfs_remove_file(struct dentry *dentry)
-{
- struct inode *parent_inode;
-
- parent_inode = d_inode(dentry->d_parent);
- inode_lock(parent_inode);
- if (simple_positive(dentry)) {
- dget(dentry);
- simple_unlink(parent_inode, dentry);
- d_delete(dentry);
- dput(dentry);
- }
- inode_unlock(parent_inode);
-}
-
struct dentry *binderfs_create_file(struct dentry *parent, const char *name,
const struct file_operations *fops,
void *data)
@@ -524,28 +475,24 @@ struct dentry *binderfs_create_file(struct dentry *parent, const char *name,
struct super_block *sb;
parent_inode = d_inode(parent);
- inode_lock(parent_inode);
- dentry = binderfs_create_dentry(parent, name);
+ dentry = simple_start_creating(parent, name);
if (IS_ERR(dentry))
- goto out;
+ return dentry;
sb = parent_inode->i_sb;
new_inode = binderfs_make_inode(sb, S_IFREG | 0444);
if (!new_inode) {
- dput(dentry);
- dentry = ERR_PTR(-ENOMEM);
- goto out;
+ simple_done_creating(dentry);
+ return ERR_PTR(-ENOMEM);
}
new_inode->i_fop = fops;
new_inode->i_private = data;
- d_instantiate(dentry, new_inode);
+ d_make_persistent(dentry, new_inode);
fsnotify_create(parent_inode, dentry);
-
-out:
- inode_unlock(parent_inode);
- return dentry;
+ simple_done_creating(dentry);
+ return dentry; // borrowed
}
static struct dentry *binderfs_create_dir(struct dentry *parent,
@@ -556,30 +503,26 @@ static struct dentry *binderfs_create_dir(struct dentry *parent,
struct super_block *sb;
parent_inode = d_inode(parent);
- inode_lock(parent_inode);
- dentry = binderfs_create_dentry(parent, name);
+ dentry = simple_start_creating(parent, name);
if (IS_ERR(dentry))
- goto out;
+ return dentry;
sb = parent_inode->i_sb;
new_inode = binderfs_make_inode(sb, S_IFDIR | 0755);
if (!new_inode) {
- dput(dentry);
- dentry = ERR_PTR(-ENOMEM);
- goto out;
+ simple_done_creating(dentry);
+ return ERR_PTR(-ENOMEM);
}
new_inode->i_fop = &simple_dir_operations;
new_inode->i_op = &simple_dir_inode_operations;
set_nlink(new_inode, 2);
- d_instantiate(dentry, new_inode);
+ d_make_persistent(dentry, new_inode);
inc_nlink(parent_inode);
fsnotify_mkdir(parent_inode, dentry);
-
-out:
- inode_unlock(parent_inode);
+ simple_done_creating(dentry);
return dentry;
}
@@ -619,6 +562,12 @@ static int init_binder_features(struct super_block *sb)
if (IS_ERR(dentry))
return PTR_ERR(dentry);
+ dentry = binderfs_create_file(dir, "transaction_report",
+ &binder_features_fops,
+ &binder_features.transaction_report);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
+
return 0;
}
@@ -789,7 +738,7 @@ static void binderfs_kill_super(struct super_block *sb)
* During inode eviction struct binderfs_info is needed.
* So first wipe the super_block then free struct binderfs_info.
*/
- kill_litter_super(sb);
+ kill_anon_super(sb);
if (info && info->ipc_ns)
put_ipc_ns(info->ipc_ns);
diff --git a/drivers/android/dbitmap.h b/drivers/android/dbitmap.h
index 956f1bd087d1..c7299ce8b374 100644
--- a/drivers/android/dbitmap.h
+++ b/drivers/android/dbitmap.h
@@ -37,6 +37,7 @@ static inline void dbitmap_free(struct dbitmap *dmap)
{
dmap->nbits = 0;
kfree(dmap->map);
+ dmap->map = NULL;
}
/* Returns the nbits that a dbitmap can shrink to, 0 if not possible. */
diff --git a/drivers/android/tests/.kunitconfig b/drivers/android/tests/.kunitconfig
new file mode 100644
index 000000000000..39b76bab9d9a
--- /dev/null
+++ b/drivers/android/tests/.kunitconfig
@@ -0,0 +1,7 @@
+#
+# Copyright 2025 Google LLC.
+#
+
+CONFIG_KUNIT=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_ANDROID_BINDER_ALLOC_KUNIT_TEST=y
diff --git a/drivers/android/tests/Makefile b/drivers/android/tests/Makefile
new file mode 100644
index 000000000000..27268418eb03
--- /dev/null
+++ b/drivers/android/tests/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Copyright 2025 Google LLC.
+#
+
+obj-$(CONFIG_ANDROID_BINDER_ALLOC_KUNIT_TEST) += binder_alloc_kunit.o
diff --git a/drivers/android/tests/binder_alloc_kunit.c b/drivers/android/tests/binder_alloc_kunit.c
new file mode 100644
index 000000000000..7f9cc003bbe3
--- /dev/null
+++ b/drivers/android/tests/binder_alloc_kunit.c
@@ -0,0 +1,572 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test cases for binder allocator code.
+ *
+ * Copyright 2025 Google LLC.
+ * Author: Tiffany Yang <ynaffit@google.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <kunit/test.h>
+#include <linux/anon_inodes.h>
+#include <linux/err.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/seq_buf.h>
+#include <linux/sizes.h>
+
+#include "../binder_alloc.h"
+#include "../binder_internal.h"
+
+MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
+
+#define BINDER_MMAP_SIZE SZ_128K
+
+#define BUFFER_NUM 5
+#define BUFFER_MIN_SIZE (PAGE_SIZE / 8)
+
+#define FREESEQ_BUFLEN ((3 * BUFFER_NUM) + 1)
+
+#define ALIGN_TYPE_STRLEN (12)
+
+#define ALIGNMENTS_BUFLEN (((ALIGN_TYPE_STRLEN + 6) * BUFFER_NUM) + 1)
+
+#define PRINT_ALL_CASES (0)
+
+/* 5^5 alignment combinations * 2 places to share pages * 5! free sequences */
+#define TOTAL_EXHAUSTIVE_CASES (3125 * 2 * 120)
+
+/**
+ * enum buf_end_align_type - Page alignment of a buffer
+ * end with regard to the end of the previous buffer.
+ *
+ * In the pictures below, buf2 refers to the buffer we
+ * are aligning. buf1 refers to previous buffer by addr.
+ * Symbol [ means the start of a buffer, ] means the end
+ * of a buffer, and | means page boundaries.
+ */
+enum buf_end_align_type {
+ /**
+ * @SAME_PAGE_UNALIGNED: The end of this buffer is on
+ * the same page as the end of the previous buffer and
+ * is not page aligned. Examples:
+ * buf1 ][ buf2 ][ ...
+ * buf1 ]|[ buf2 ][ ...
+ */
+ SAME_PAGE_UNALIGNED = 0,
+ /**
+ * @SAME_PAGE_ALIGNED: When the end of the previous buffer
+ * is not page aligned, the end of this buffer is on the
+ * same page as the end of the previous buffer and is page
+ * aligned. When the previous buffer is page aligned, the
+ * end of this buffer is aligned to the next page boundary.
+ * Examples:
+ * buf1 ][ buf2 ]| ...
+ * buf1 ]|[ buf2 ]| ...
+ */
+ SAME_PAGE_ALIGNED,
+ /**
+ * @NEXT_PAGE_UNALIGNED: The end of this buffer is on
+ * the page next to the end of the previous buffer and
+ * is not page aligned. Examples:
+ * buf1 ][ buf2 | buf2 ][ ...
+ * buf1 ]|[ buf2 | buf2 ][ ...
+ */
+ NEXT_PAGE_UNALIGNED,
+ /**
+ * @NEXT_PAGE_ALIGNED: The end of this buffer is on
+ * the page next to the end of the previous buffer and
+ * is page aligned. Examples:
+ * buf1 ][ buf2 | buf2 ]| ...
+ * buf1 ]|[ buf2 | buf2 ]| ...
+ */
+ NEXT_PAGE_ALIGNED,
+ /**
+ * @NEXT_NEXT_UNALIGNED: The end of this buffer is on
+ * the page that follows the page after the end of the
+ * previous buffer and is not page aligned. Examples:
+ * buf1 ][ buf2 | buf2 | buf2 ][ ...
+ * buf1 ]|[ buf2 | buf2 | buf2 ][ ...
+ */
+ NEXT_NEXT_UNALIGNED,
+ /**
+ * @LOOP_END: The number of enum values in &buf_end_align_type.
+ * It is used for controlling loop termination.
+ */
+ LOOP_END,
+};
+
+static const char *const buf_end_align_type_strs[LOOP_END] = {
+ [SAME_PAGE_UNALIGNED] = "SP_UNALIGNED",
+ [SAME_PAGE_ALIGNED] = " SP_ALIGNED ",
+ [NEXT_PAGE_UNALIGNED] = "NP_UNALIGNED",
+ [NEXT_PAGE_ALIGNED] = " NP_ALIGNED ",
+ [NEXT_NEXT_UNALIGNED] = "NN_UNALIGNED",
+};
+
+struct binder_alloc_test_case_info {
+ char alignments[ALIGNMENTS_BUFLEN];
+ struct seq_buf alignments_sb;
+ size_t *buffer_sizes;
+ int *free_sequence;
+ bool front_pages;
+};
+
+static void stringify_free_seq(struct kunit *test, int *seq, struct seq_buf *sb)
+{
+ int i;
+
+ for (i = 0; i < BUFFER_NUM; i++)
+ seq_buf_printf(sb, "[%d]", seq[i]);
+
+ KUNIT_EXPECT_FALSE(test, seq_buf_has_overflowed(sb));
+}
+
+static void stringify_alignments(struct kunit *test, int *alignments,
+ struct seq_buf *sb)
+{
+ int i;
+
+ for (i = 0; i < BUFFER_NUM; i++)
+ seq_buf_printf(sb, "[ %d:%s ]", i,
+ buf_end_align_type_strs[alignments[i]]);
+
+ KUNIT_EXPECT_FALSE(test, seq_buf_has_overflowed(sb));
+}
+
+static bool check_buffer_pages_allocated(struct kunit *test,
+ struct binder_alloc *alloc,
+ struct binder_buffer *buffer,
+ size_t size)
+{
+ unsigned long page_addr;
+ unsigned long end;
+ int page_index;
+
+ end = PAGE_ALIGN(buffer->user_data + size);
+ page_addr = buffer->user_data;
+ for (; page_addr < end; page_addr += PAGE_SIZE) {
+ page_index = (page_addr - alloc->vm_start) / PAGE_SIZE;
+ if (!alloc->pages[page_index] ||
+ !list_empty(page_to_lru(alloc->pages[page_index]))) {
+ kunit_err(test, "expect alloc but is %s at page index %d\n",
+ alloc->pages[page_index] ?
+ "lru" : "free", page_index);
+ return false;
+ }
+ }
+ return true;
+}
+
+static unsigned long binder_alloc_test_alloc_buf(struct kunit *test,
+ struct binder_alloc *alloc,
+ struct binder_buffer *buffers[],
+ size_t *sizes, int *seq)
+{
+ unsigned long failures = 0;
+ int i;
+
+ for (i = 0; i < BUFFER_NUM; i++) {
+ buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0);
+ if (IS_ERR(buffers[i]) ||
+ !check_buffer_pages_allocated(test, alloc, buffers[i], sizes[i]))
+ failures++;
+ }
+
+ return failures;
+}
+
+static unsigned long binder_alloc_test_free_buf(struct kunit *test,
+ struct binder_alloc *alloc,
+ struct binder_buffer *buffers[],
+ size_t *sizes, int *seq, size_t end)
+{
+ unsigned long failures = 0;
+ int i;
+
+ for (i = 0; i < BUFFER_NUM; i++)
+ binder_alloc_free_buf(alloc, buffers[seq[i]]);
+
+ for (i = 0; i <= (end - 1) / PAGE_SIZE; i++) {
+ if (list_empty(page_to_lru(alloc->pages[i]))) {
+ kunit_err(test, "expect lru but is %s at page index %d\n",
+ alloc->pages[i] ? "alloc" : "free", i);
+ failures++;
+ }
+ }
+
+ return failures;
+}
+
+static unsigned long binder_alloc_test_free_page(struct kunit *test,
+ struct binder_alloc *alloc)
+{
+ unsigned long failures = 0;
+ unsigned long count;
+ int i;
+
+ while ((count = list_lru_count(alloc->freelist))) {
+ list_lru_walk(alloc->freelist, binder_alloc_free_page,
+ NULL, count);
+ }
+
+ for (i = 0; i < (alloc->buffer_size / PAGE_SIZE); i++) {
+ if (alloc->pages[i]) {
+ kunit_err(test, "expect free but is %s at page index %d\n",
+ list_empty(page_to_lru(alloc->pages[i])) ?
+ "alloc" : "lru", i);
+ failures++;
+ }
+ }
+
+ return failures;
+}
+
+/* Executes one full test run for the given test case. */
+static bool binder_alloc_test_alloc_free(struct kunit *test,
+ struct binder_alloc *alloc,
+ struct binder_alloc_test_case_info *tc,
+ size_t end)
+{
+ unsigned long pages = PAGE_ALIGN(end) / PAGE_SIZE;
+ struct binder_buffer *buffers[BUFFER_NUM];
+ unsigned long failures;
+ bool failed = false;
+
+ failures = binder_alloc_test_alloc_buf(test, alloc, buffers,
+ tc->buffer_sizes,
+ tc->free_sequence);
+ failed = failed || failures;
+ KUNIT_EXPECT_EQ_MSG(test, failures, 0,
+ "Initial allocation failed: %lu/%u buffers with errors",
+ failures, BUFFER_NUM);
+
+ failures = binder_alloc_test_free_buf(test, alloc, buffers,
+ tc->buffer_sizes,
+ tc->free_sequence, end);
+ failed = failed || failures;
+ KUNIT_EXPECT_EQ_MSG(test, failures, 0,
+ "Initial buffers not freed correctly: %lu/%lu pages not on lru list",
+ failures, pages);
+
+ /* Allocate from lru. */
+ failures = binder_alloc_test_alloc_buf(test, alloc, buffers,
+ tc->buffer_sizes,
+ tc->free_sequence);
+ failed = failed || failures;
+ KUNIT_EXPECT_EQ_MSG(test, failures, 0,
+ "Reallocation failed: %lu/%u buffers with errors",
+ failures, BUFFER_NUM);
+
+ failures = list_lru_count(alloc->freelist);
+ failed = failed || failures;
+ KUNIT_EXPECT_EQ_MSG(test, failures, 0,
+ "lru list should be empty after reallocation but still has %lu pages",
+ failures);
+
+ failures = binder_alloc_test_free_buf(test, alloc, buffers,
+ tc->buffer_sizes,
+ tc->free_sequence, end);
+ failed = failed || failures;
+ KUNIT_EXPECT_EQ_MSG(test, failures, 0,
+ "Reallocated buffers not freed correctly: %lu/%lu pages not on lru list",
+ failures, pages);
+
+ failures = binder_alloc_test_free_page(test, alloc);
+ failed = failed || failures;
+ KUNIT_EXPECT_EQ_MSG(test, failures, 0,
+ "Failed to clean up allocated pages: %lu/%lu pages still installed",
+ failures, (alloc->buffer_size / PAGE_SIZE));
+
+ return failed;
+}
+
+static bool is_dup(int *seq, int index, int val)
+{
+ int i;
+
+ for (i = 0; i < index; i++) {
+ if (seq[i] == val)
+ return true;
+ }
+ return false;
+}
+
+/* Generate BUFFER_NUM factorial free orders. */
+static void permute_frees(struct kunit *test, struct binder_alloc *alloc,
+ struct binder_alloc_test_case_info *tc,
+ unsigned long *runs, unsigned long *failures,
+ int index, size_t end)
+{
+ bool case_failed;
+ int i;
+
+ if (index == BUFFER_NUM) {
+ DECLARE_SEQ_BUF(freeseq_sb, FREESEQ_BUFLEN);
+
+ case_failed = binder_alloc_test_alloc_free(test, alloc, tc, end);
+ *runs += 1;
+ *failures += case_failed;
+
+ if (case_failed || PRINT_ALL_CASES) {
+ stringify_free_seq(test, tc->free_sequence,
+ &freeseq_sb);
+ kunit_err(test, "case %lu: [%s] | %s - %s - %s", *runs,
+ case_failed ? "FAILED" : "PASSED",
+ tc->front_pages ? "front" : "back ",
+ seq_buf_str(&tc->alignments_sb),
+ seq_buf_str(&freeseq_sb));
+ }
+
+ return;
+ }
+ for (i = 0; i < BUFFER_NUM; i++) {
+ if (is_dup(tc->free_sequence, index, i))
+ continue;
+ tc->free_sequence[index] = i;
+ permute_frees(test, alloc, tc, runs, failures, index + 1, end);
+ }
+}
+
+static void gen_buf_sizes(struct kunit *test,
+ struct binder_alloc *alloc,
+ struct binder_alloc_test_case_info *tc,
+ size_t *end_offset, unsigned long *runs,
+ unsigned long *failures)
+{
+ size_t last_offset, offset = 0;
+ size_t front_sizes[BUFFER_NUM];
+ size_t back_sizes[BUFFER_NUM];
+ int seq[BUFFER_NUM] = {0};
+ int i;
+
+ tc->free_sequence = seq;
+ for (i = 0; i < BUFFER_NUM; i++) {
+ last_offset = offset;
+ offset = end_offset[i];
+ front_sizes[i] = offset - last_offset;
+ back_sizes[BUFFER_NUM - i - 1] = front_sizes[i];
+ }
+ back_sizes[0] += alloc->buffer_size - end_offset[BUFFER_NUM - 1];
+
+ /*
+ * Buffers share the first or last few pages.
+ * Only BUFFER_NUM - 1 buffer sizes are adjustable since
+ * we need one giant buffer before getting to the last page.
+ */
+ tc->front_pages = true;
+ tc->buffer_sizes = front_sizes;
+ permute_frees(test, alloc, tc, runs, failures, 0,
+ end_offset[BUFFER_NUM - 1]);
+
+ tc->front_pages = false;
+ tc->buffer_sizes = back_sizes;
+ permute_frees(test, alloc, tc, runs, failures, 0, alloc->buffer_size);
+}
+
+static void gen_buf_offsets(struct kunit *test, struct binder_alloc *alloc,
+ size_t *end_offset, int *alignments,
+ unsigned long *runs, unsigned long *failures,
+ int index)
+{
+ size_t end, prev;
+ int align;
+
+ if (index == BUFFER_NUM) {
+ struct binder_alloc_test_case_info tc = {0};
+
+ seq_buf_init(&tc.alignments_sb, tc.alignments,
+ ALIGNMENTS_BUFLEN);
+ stringify_alignments(test, alignments, &tc.alignments_sb);
+
+ gen_buf_sizes(test, alloc, &tc, end_offset, runs, failures);
+ return;
+ }
+ prev = index == 0 ? 0 : end_offset[index - 1];
+ end = prev;
+
+ BUILD_BUG_ON(BUFFER_MIN_SIZE * BUFFER_NUM >= PAGE_SIZE);
+
+ for (align = SAME_PAGE_UNALIGNED; align < LOOP_END; align++) {
+ if (align % 2)
+ end = ALIGN(end, PAGE_SIZE);
+ else
+ end += BUFFER_MIN_SIZE;
+ end_offset[index] = end;
+ alignments[index] = align;
+ gen_buf_offsets(test, alloc, end_offset, alignments, runs,
+ failures, index + 1);
+ }
+}
+
+struct binder_alloc_test {
+ struct binder_alloc alloc;
+ struct list_lru binder_test_freelist;
+ struct file *filp;
+ unsigned long mmap_uaddr;
+};
+
+static void binder_alloc_test_init_freelist(struct kunit *test)
+{
+ struct binder_alloc_test *priv = test->priv;
+
+ KUNIT_EXPECT_PTR_EQ(test, priv->alloc.freelist,
+ &priv->binder_test_freelist);
+}
+
+static void binder_alloc_test_mmap(struct kunit *test)
+{
+ struct binder_alloc_test *priv = test->priv;
+ struct binder_alloc *alloc = &priv->alloc;
+ struct binder_buffer *buf;
+ struct rb_node *n;
+
+ KUNIT_EXPECT_EQ(test, alloc->mapped, true);
+ KUNIT_EXPECT_EQ(test, alloc->buffer_size, BINDER_MMAP_SIZE);
+
+ n = rb_first(&alloc->allocated_buffers);
+ KUNIT_EXPECT_PTR_EQ(test, n, NULL);
+
+ n = rb_first(&alloc->free_buffers);
+ buf = rb_entry(n, struct binder_buffer, rb_node);
+ KUNIT_EXPECT_EQ(test, binder_alloc_buffer_size(alloc, buf),
+ BINDER_MMAP_SIZE);
+ KUNIT_EXPECT_TRUE(test, list_is_last(&buf->entry, &alloc->buffers));
+}
+
+/**
+ * binder_alloc_exhaustive_test() - Exhaustively test alloc and free of buffer pages.
+ * @test: The test context object.
+ *
+ * Allocate BUFFER_NUM buffers to cover all page alignment cases,
+ * then free them in all orders possible. Check that pages are
+ * correctly allocated, put onto lru when buffers are freed, and
+ * are freed when binder_alloc_free_page() is called.
+ */
+static void binder_alloc_exhaustive_test(struct kunit *test)
+{
+ struct binder_alloc_test *priv = test->priv;
+ size_t end_offset[BUFFER_NUM];
+ int alignments[BUFFER_NUM];
+ unsigned long failures = 0;
+ unsigned long runs = 0;
+
+ gen_buf_offsets(test, &priv->alloc, end_offset, alignments, &runs,
+ &failures, 0);
+
+ KUNIT_EXPECT_EQ(test, runs, TOTAL_EXHAUSTIVE_CASES);
+ KUNIT_EXPECT_EQ(test, failures, 0);
+}
+
+/* ===== End test cases ===== */
+
+static void binder_alloc_test_vma_close(struct vm_area_struct *vma)
+{
+ struct binder_alloc *alloc = vma->vm_private_data;
+
+ binder_alloc_vma_close(alloc);
+}
+
+static const struct vm_operations_struct binder_alloc_test_vm_ops = {
+ .close = binder_alloc_test_vma_close,
+ .fault = binder_vm_fault,
+};
+
+static int binder_alloc_test_mmap_handler(struct file *filp,
+ struct vm_area_struct *vma)
+{
+ struct binder_alloc *alloc = filp->private_data;
+
+ vm_flags_mod(vma, VM_DONTCOPY | VM_MIXEDMAP, VM_MAYWRITE);
+
+ vma->vm_ops = &binder_alloc_test_vm_ops;
+ vma->vm_private_data = alloc;
+
+ return binder_alloc_mmap_handler(alloc, vma);
+}
+
+static const struct file_operations binder_alloc_test_fops = {
+ .mmap = binder_alloc_test_mmap_handler,
+};
+
+static int binder_alloc_test_init(struct kunit *test)
+{
+ struct binder_alloc_test *priv;
+ int ret;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ test->priv = priv;
+
+ ret = list_lru_init(&priv->binder_test_freelist);
+ if (ret) {
+ kunit_err(test, "Failed to initialize test freelist\n");
+ return ret;
+ }
+
+ /* __binder_alloc_init requires mm to be attached */
+ ret = kunit_attach_mm();
+ if (ret) {
+ kunit_err(test, "Failed to attach mm\n");
+ return ret;
+ }
+ __binder_alloc_init(&priv->alloc, &priv->binder_test_freelist);
+
+ priv->filp = anon_inode_getfile("binder_alloc_kunit",
+ &binder_alloc_test_fops, &priv->alloc,
+ O_RDWR | O_CLOEXEC);
+ if (IS_ERR_OR_NULL(priv->filp)) {
+ kunit_err(test, "Failed to open binder alloc test driver file\n");
+ return priv->filp ? PTR_ERR(priv->filp) : -ENOMEM;
+ }
+
+ priv->mmap_uaddr = kunit_vm_mmap(test, priv->filp, 0, BINDER_MMAP_SIZE,
+ PROT_READ, MAP_PRIVATE | MAP_NORESERVE,
+ 0);
+ if (!priv->mmap_uaddr) {
+ kunit_err(test, "Could not map the test's transaction memory\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void binder_alloc_test_exit(struct kunit *test)
+{
+ struct binder_alloc_test *priv = test->priv;
+
+ /* Close the backing file to make sure binder_alloc_vma_close runs */
+ if (!IS_ERR_OR_NULL(priv->filp))
+ fput(priv->filp);
+
+ if (priv->alloc.mm)
+ binder_alloc_deferred_release(&priv->alloc);
+
+ /* Make sure freelist is empty */
+ KUNIT_EXPECT_EQ(test, list_lru_count(&priv->binder_test_freelist), 0);
+ list_lru_destroy(&priv->binder_test_freelist);
+}
+
+static struct kunit_case binder_alloc_test_cases[] = {
+ KUNIT_CASE(binder_alloc_test_init_freelist),
+ KUNIT_CASE(binder_alloc_test_mmap),
+ KUNIT_CASE_SLOW(binder_alloc_exhaustive_test),
+ {}
+};
+
+static struct kunit_suite binder_alloc_test_suite = {
+ .name = "binder_alloc",
+ .test_cases = binder_alloc_test_cases,
+ .init = binder_alloc_test_init,
+ .exit = binder_alloc_test_exit,
+};
+
+kunit_test_suite(binder_alloc_test_suite);
+
+MODULE_AUTHOR("Tiffany Yang <ynaffit@google.com>");
+MODULE_DESCRIPTION("Binder Alloc KUnit tests");
+MODULE_LICENSE("GPL");
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index e00536b49552..120a2b7067fc 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -117,23 +117,39 @@ config SATA_AHCI
config SATA_MOBILE_LPM_POLICY
int "Default SATA Link Power Management policy"
- range 0 4
+ range 0 5
default 3
depends on SATA_AHCI
help
Select the Default SATA Link Power Management (LPM) policy to use
for chipsets / "South Bridges" supporting low-power modes. Such
chipsets are ubiquitous across laptops, desktops and servers.
-
- The value set has the following meanings:
+ Each policy combines power saving states and features:
+ - Partial: The Phy logic is powered but is in a reduced power
+ state. The exit latency from this state is no longer than
+ 10us).
+ - Slumber: The Phy logic is powered but is in an even lower power
+ state. The exit latency from this state is potentially
+ longer, but no longer than 10ms.
+ - DevSleep: The Phy logic may be powered down. The exit latency from
+ this state is no longer than 20 ms, unless otherwise
+ specified by DETO in the device Identify Device Data log.
+ - HIPM: Host Initiated Power Management (host automatically
+ transitions to partial and slumber).
+ - DIPM: Device Initiated Power Management (device automatically
+ transitions to partial and slumber).
+
+ The possible values for the default SATA link power management
+ policies are:
0 => Keep firmware settings
- 1 => Maximum performance
- 2 => Medium power
- 3 => Medium power with Device Initiated PM enabled
- 4 => Minimum power
-
- Note "Minimum power" is known to cause issues, including disk
- corruption, with some disks and should not be used.
+ 1 => No power savings (maximum performance)
+ 2 => HIPM (Partial)
+ 3 => HIPM (Partial) and DIPM (Partial and Slumber)
+ 4 => HIPM (Partial and DevSleep) and DIPM (Partial and Slumber)
+ 5 => HIPM (Slumber and DevSleep) and DIPM (Partial and Slumber)
+
+ Excluding the value 0, higher values represent policies with higher
+ power savings.
config SATA_AHCI_PLATFORM
tristate "Platform AHCI SATA support"
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 163ac909bd06..7a7f88b3fa2b 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -110,17 +110,17 @@ static const struct scsi_host_template ahci_sht = {
static struct ata_port_operations ahci_vt8251_ops = {
.inherits = &ahci_ops,
- .hardreset = ahci_vt8251_hardreset,
+ .reset.hardreset = ahci_vt8251_hardreset,
};
static struct ata_port_operations ahci_p5wdh_ops = {
.inherits = &ahci_ops,
- .hardreset = ahci_p5wdh_hardreset,
+ .reset.hardreset = ahci_p5wdh_hardreset,
};
static struct ata_port_operations ahci_avn_ops = {
.inherits = &ahci_ops,
- .hardreset = ahci_avn_hardreset,
+ .reset.hardreset = ahci_avn_hardreset,
};
static const struct ata_port_info ahci_port_info[] = {
@@ -674,7 +674,9 @@ MODULE_PARM_DESC(marvell_enable, "Marvell SATA via AHCI (1 = enabled)");
static int mobile_lpm_policy = -1;
module_param(mobile_lpm_policy, int, 0644);
-MODULE_PARM_DESC(mobile_lpm_policy, "Default LPM policy for mobile chipsets");
+MODULE_PARM_DESC(mobile_lpm_policy,
+ "Default LPM policy. Despite its name, this parameter applies "
+ "to all chipsets, including desktop and server chipsets");
static char *ahci_mask_port_map;
module_param_named(mask_port_map, ahci_mask_port_map, charp, 0444);
@@ -687,40 +689,50 @@ MODULE_PARM_DESC(mask_port_map,
"where <pci_dev> is the PCI ID of an AHCI controller in the "
"form \"domain:bus:dev.func\"");
-static void ahci_apply_port_map_mask(struct device *dev,
- struct ahci_host_priv *hpriv, char *mask_s)
+static char *ahci_mask_port_ext;
+module_param_named(mask_port_ext, ahci_mask_port_ext, charp, 0444);
+MODULE_PARM_DESC(mask_port_ext,
+ "32-bits mask to ignore the external/hotplug capability of ports. "
+ "Valid values are: "
+ "\"<mask>\" to apply the same mask to all AHCI controller "
+ "devices, and \"<pci_dev>=<mask>,<pci_dev>=<mask>,...\" to "
+ "specify different masks for the controllers specified, "
+ "where <pci_dev> is the PCI ID of an AHCI controller in the "
+ "form \"domain:bus:dev.func\"");
+
+static u32 ahci_port_mask(struct device *dev, char *mask_s)
{
unsigned int mask;
if (kstrtouint(mask_s, 0, &mask)) {
dev_err(dev, "Invalid port map mask\n");
- return;
+ return 0;
}
- hpriv->mask_port_map = mask;
+ return mask;
}
-static void ahci_get_port_map_mask(struct device *dev,
- struct ahci_host_priv *hpriv)
+static u32 ahci_get_port_mask(struct device *dev, char *mask_p)
{
char *param, *end, *str, *mask_s;
char *name;
+ u32 mask = 0;
- if (!strlen(ahci_mask_port_map))
- return;
+ if (!mask_p || !strlen(mask_p))
+ return 0;
- str = kstrdup(ahci_mask_port_map, GFP_KERNEL);
+ str = kstrdup(mask_p, GFP_KERNEL);
if (!str)
- return;
+ return 0;
/* Handle single mask case */
if (!strchr(str, '=')) {
- ahci_apply_port_map_mask(dev, hpriv, str);
+ mask = ahci_port_mask(dev, str);
goto free;
}
/*
- * Mask list case: parse the parameter to apply the mask only if
+ * Mask list case: parse the parameter to get the mask only if
* the device name matches.
*/
param = str;
@@ -750,11 +762,13 @@ static void ahci_get_port_map_mask(struct device *dev,
param++;
}
- ahci_apply_port_map_mask(dev, hpriv, mask_s);
+ mask = ahci_port_mask(dev, mask_s);
}
free:
kfree(str);
+
+ return mask;
}
static void ahci_pci_save_initial_config(struct pci_dev *pdev,
@@ -780,8 +794,10 @@ static void ahci_pci_save_initial_config(struct pci_dev *pdev,
}
/* Handle port map masks passed as module parameter. */
- if (ahci_mask_port_map)
- ahci_get_port_map_mask(&pdev->dev, hpriv);
+ hpriv->mask_port_map =
+ ahci_get_port_mask(&pdev->dev, ahci_mask_port_map);
+ hpriv->mask_port_ext =
+ ahci_get_port_mask(&pdev->dev, ahci_mask_port_ext);
ahci_save_initial_config(&pdev->dev, hpriv);
}
@@ -1410,8 +1426,15 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
static bool ahci_broken_lpm(struct pci_dev *pdev)
{
+ /*
+ * Platforms with LPM problems.
+ * If driver_data is NULL, there is no existing BIOS version with
+ * functioning LPM.
+ * If driver_data is non-NULL, then driver_data contains the DMI BIOS
+ * build date of the first BIOS version with functioning LPM (i.e. older
+ * BIOS versions have broken LPM).
+ */
static const struct dmi_system_id sysids[] = {
- /* Various Lenovo 50 series have LPM issues with older BIOSen */
{
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
@@ -1438,13 +1461,30 @@ static bool ahci_broken_lpm(struct pci_dev *pdev)
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W541"),
},
+ .driver_data = "20180409", /* 2.35 */
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ASUSPRO D840MB_M840SA"),
+ },
+ /* 320 is broken, there is no known good version. */
+ },
+ {
/*
- * Note date based on release notes, 2.35 has been
- * reported to be good, but I've been unable to get
- * a hold of the reporter to get the DMI BIOS date.
- * TODO: fix this.
+ * AMD 500 Series Chipset SATA Controller [1022:43eb]
+ * on this motherboard timeouts on ports 5 and 6 when
+ * LPM is enabled, at least with WDC WD20EFAX-68FB5N0
+ * hard drives. LPM with the same drive works fine on
+ * all other ports on the same controller.
*/
- .driver_data = "20180310", /* 2.35 */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR,
+ "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_BOARD_NAME,
+ "ROG STRIX B550-F GAMING (WI-FI)"),
+ },
+ /* 3621 is broken, there is no known good version. */
},
{ } /* terminate list */
};
@@ -1455,6 +1495,9 @@ static bool ahci_broken_lpm(struct pci_dev *pdev)
if (!dmi)
return false;
+ if (!dmi->driver_data)
+ return true;
+
dmi_get_date(DMI_BIOS_DATE, &year, &month, &date);
snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date);
@@ -1728,11 +1771,20 @@ static void ahci_mark_external_port(struct ata_port *ap)
void __iomem *port_mmio = ahci_port_base(ap);
u32 tmp;
- /* mark external ports (hotplug-capable, eSATA) */
+ /*
+ * Mark external ports (hotplug-capable, eSATA), unless we were asked to
+ * ignore this feature.
+ */
tmp = readl(port_mmio + PORT_CMD);
if (((tmp & PORT_CMD_ESP) && (hpriv->cap & HOST_CAP_SXS)) ||
- (tmp & PORT_CMD_HPCP))
+ (tmp & PORT_CMD_HPCP)) {
+ if (hpriv->mask_port_ext & (1U << ap->port_no)) {
+ ata_port_info(ap,
+ "Ignoring external/hotplug capability\n");
+ return;
+ }
ap->pflags |= ATA_PFLAG_EXTERNAL;
+ }
}
static void ahci_update_initial_lpm_policy(struct ata_port *ap)
@@ -1747,15 +1799,26 @@ static void ahci_update_initial_lpm_policy(struct ata_port *ap)
* LPM if the port advertises itself as an external port.
*/
if (ap->pflags & ATA_PFLAG_EXTERNAL) {
- ata_port_dbg(ap, "external port, not enabling LPM\n");
+ ap->flags |= ATA_FLAG_NO_LPM;
+ ap->target_lpm_policy = ATA_LPM_MAX_POWER;
return;
}
+ /* If no Partial or no Slumber, we cannot support DIPM. */
+ if ((ap->host->flags & ATA_HOST_NO_PART) ||
+ (ap->host->flags & ATA_HOST_NO_SSC)) {
+ ata_port_dbg(ap, "Host does not support DIPM\n");
+ ap->flags |= ATA_FLAG_NO_DIPM;
+ }
+
/* If no LPM states are supported by the HBA, do not bother with LPM */
if ((ap->host->flags & ATA_HOST_NO_PART) &&
(ap->host->flags & ATA_HOST_NO_SSC) &&
(ap->host->flags & ATA_HOST_NO_DEVSLP)) {
- ata_port_dbg(ap, "no LPM states supported, not enabling LPM\n");
+ ata_port_dbg(ap,
+ "No LPM states supported, forcing LPM max_power\n");
+ ap->flags |= ATA_FLAG_NO_LPM;
+ ap->target_lpm_policy = ATA_LPM_MAX_POWER;
return;
}
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 2c10c8f440d1..293b7fb216b5 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -330,6 +330,7 @@ struct ahci_host_priv {
/* Input fields */
unsigned int flags; /* AHCI_HFLAG_* */
u32 mask_port_map; /* Mask of valid ports */
+ u32 mask_port_ext; /* Mask of ports ext capability */
void __iomem * mmio; /* bus-independent mem map */
u32 cap; /* cap to use */
diff --git a/drivers/ata/ahci_da850.c b/drivers/ata/ahci_da850.c
index ca0924dc5bd2..f97566c420f8 100644
--- a/drivers/ata/ahci_da850.c
+++ b/drivers/ata/ahci_da850.c
@@ -137,13 +137,13 @@ static int ahci_da850_hardreset(struct ata_link *link,
static struct ata_port_operations ahci_da850_port_ops = {
.inherits = &ahci_platform_ops,
- .softreset = ahci_da850_softreset,
+ .reset.softreset = ahci_da850_softreset,
/*
* No need to override .pmp_softreset - it's only used for actual
* PMP-enabled ports.
*/
- .hardreset = ahci_da850_hardreset,
- .pmp_hardreset = ahci_da850_hardreset,
+ .reset.hardreset = ahci_da850_hardreset,
+ .pmp_reset.hardreset = ahci_da850_hardreset,
};
static const struct ata_port_info ahci_da850_port_info = {
diff --git a/drivers/ata/ahci_dm816.c b/drivers/ata/ahci_dm816.c
index b08547b877a1..93faed2cfeb6 100644
--- a/drivers/ata/ahci_dm816.c
+++ b/drivers/ata/ahci_dm816.c
@@ -124,7 +124,7 @@ static int ahci_dm816_softreset(struct ata_link *link,
static struct ata_port_operations ahci_dm816_port_ops = {
.inherits = &ahci_platform_ops,
- .softreset = ahci_dm816_softreset,
+ .reset.softreset = ahci_dm816_softreset,
};
static const struct ata_port_info ahci_dm816_port_info = {
diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c
index f01f08048f97..86aedd5923ac 100644
--- a/drivers/ata/ahci_imx.c
+++ b/drivers/ata/ahci_imx.c
@@ -642,18 +642,19 @@ static int ahci_imx_softreset(struct ata_link *link, unsigned int *class,
int ret;
if (imxpriv->type == AHCI_IMX53)
- ret = ahci_pmp_retry_srst_ops.softreset(link, class, deadline);
+ ret = ahci_pmp_retry_srst_ops.reset.softreset(link, class,
+ deadline);
else
- ret = ahci_ops.softreset(link, class, deadline);
+ ret = ahci_ops.reset.softreset(link, class, deadline);
return ret;
}
static struct ata_port_operations ahci_imx_ops = {
- .inherits = &ahci_ops,
- .host_stop = ahci_imx_host_stop,
- .error_handler = ahci_imx_error_handler,
- .softreset = ahci_imx_softreset,
+ .inherits = &ahci_ops,
+ .host_stop = ahci_imx_host_stop,
+ .error_handler = ahci_imx_error_handler,
+ .reset.softreset = ahci_imx_softreset,
};
static const struct ata_port_info ahci_imx_port_info = {
diff --git a/drivers/ata/ahci_qoriq.c b/drivers/ata/ahci_qoriq.c
index 30e39885b64e..0dec1a17e5b1 100644
--- a/drivers/ata/ahci_qoriq.c
+++ b/drivers/ata/ahci_qoriq.c
@@ -146,8 +146,8 @@ static int ahci_qoriq_hardreset(struct ata_link *link, unsigned int *class,
}
static struct ata_port_operations ahci_qoriq_ops = {
- .inherits = &ahci_ops,
- .hardreset = ahci_qoriq_hardreset,
+ .inherits = &ahci_ops,
+ .reset.hardreset = ahci_qoriq_hardreset,
};
static const struct ata_port_info ahci_qoriq_port_info = {
diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c
index dfbd8c53abcb..6b8844646fcd 100644
--- a/drivers/ata/ahci_xgene.c
+++ b/drivers/ata/ahci_xgene.c
@@ -450,7 +450,6 @@ static int xgene_ahci_pmp_softreset(struct ata_link *link, unsigned int *class,
{
int pmp = sata_srst_pmp(link);
struct ata_port *ap = link->ap;
- u32 rc;
void __iomem *port_mmio = ahci_port_base(ap);
u32 port_fbs;
@@ -463,9 +462,7 @@ static int xgene_ahci_pmp_softreset(struct ata_link *link, unsigned int *class,
port_fbs |= pmp << PORT_FBS_DEV_OFFSET;
writel(port_fbs, port_mmio + PORT_FBS);
- rc = ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
-
- return rc;
+ return ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
}
/**
@@ -500,7 +497,7 @@ static int xgene_ahci_softreset(struct ata_link *link, unsigned int *class,
u32 port_fbs;
u32 port_fbs_save;
u32 retry = 1;
- u32 rc;
+ int rc;
port_fbs_save = readl(port_mmio + PORT_FBS);
@@ -613,11 +610,11 @@ static irqreturn_t xgene_ahci_irq_intr(int irq, void *dev_instance)
static struct ata_port_operations xgene_ahci_v1_ops = {
.inherits = &ahci_ops,
.host_stop = xgene_ahci_host_stop,
- .hardreset = xgene_ahci_hardreset,
+ .reset.hardreset = xgene_ahci_hardreset,
+ .reset.softreset = xgene_ahci_softreset,
+ .pmp_reset.softreset = xgene_ahci_pmp_softreset,
.read_id = xgene_ahci_read_id,
.qc_issue = xgene_ahci_qc_issue,
- .softreset = xgene_ahci_softreset,
- .pmp_softreset = xgene_ahci_pmp_softreset
};
static const struct ata_port_info xgene_ahci_v1_port_info = {
@@ -630,7 +627,7 @@ static const struct ata_port_info xgene_ahci_v1_port_info = {
static struct ata_port_operations xgene_ahci_v2_ops = {
.inherits = &ahci_ops,
.host_stop = xgene_ahci_host_stop,
- .hardreset = xgene_ahci_hardreset,
+ .reset.hardreset = xgene_ahci_hardreset,
.read_id = xgene_ahci_read_id,
};
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index d441246fa357..495fa096dd65 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -1074,7 +1074,7 @@ static struct ata_port_operations piix_pata_ops = {
.cable_detect = ata_cable_40wire,
.set_piomode = piix_set_piomode,
.set_dmamode = piix_set_dmamode,
- .prereset = piix_pata_prereset,
+ .reset.prereset = piix_pata_prereset,
};
static struct ata_port_operations piix_vmw_ops = {
@@ -1089,6 +1089,7 @@ static struct ata_port_operations ich_pata_ops = {
};
static struct attribute *piix_sidpr_shost_attrs[] = {
+ &dev_attr_link_power_management_supported.attr,
&dev_attr_link_power_management_policy.attr,
NULL
};
@@ -1102,7 +1103,7 @@ static const struct scsi_host_template piix_sidpr_sht = {
static struct ata_port_operations piix_sidpr_sata_ops = {
.inherits = &piix_sata_ops,
- .hardreset = sata_std_hardreset,
+ .reset.hardreset = sata_std_hardreset,
.scr_read = piix_sidpr_scr_read,
.scr_write = piix_sidpr_scr_write,
.set_lpm = piix_sidpr_set_lpm,
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 4e9c82f36df1..c79abdfcd7a9 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -111,6 +111,7 @@ static DEVICE_ATTR(em_buffer, S_IWUSR | S_IRUGO,
static DEVICE_ATTR(em_message_supported, S_IRUGO, ahci_show_em_supported, NULL);
static struct attribute *ahci_shost_attrs[] = {
+ &dev_attr_link_power_management_supported.attr,
&dev_attr_link_power_management_policy.attr,
&dev_attr_em_message_type.attr,
&dev_attr_em_message.attr,
@@ -162,10 +163,10 @@ struct ata_port_operations ahci_ops = {
.freeze = ahci_freeze,
.thaw = ahci_thaw,
- .softreset = ahci_softreset,
- .hardreset = ahci_hardreset,
- .postreset = ahci_postreset,
- .pmp_softreset = ahci_softreset,
+ .reset.softreset = ahci_softreset,
+ .reset.hardreset = ahci_hardreset,
+ .reset.postreset = ahci_postreset,
+ .pmp_reset.softreset = ahci_softreset,
.error_handler = ahci_error_handler,
.post_internal_cmd = ahci_post_internal_cmd,
.dev_config = ahci_dev_config,
@@ -192,7 +193,7 @@ EXPORT_SYMBOL_GPL(ahci_ops);
struct ata_port_operations ahci_pmp_retry_srst_ops = {
.inherits = &ahci_ops,
- .softreset = ahci_pmp_retry_softreset,
+ .reset.softreset = ahci_pmp_retry_softreset,
};
EXPORT_SYMBOL_GPL(ahci_pmp_retry_srst_ops);
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index b7f0bf795521..15e18d50dcc6 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -246,6 +246,73 @@ void ata_acpi_bind_dev(struct ata_device *dev)
}
/**
+ * ata_acpi_dev_manage_restart - if the disk should be stopped (spun down) on
+ * system restart.
+ * @dev: target ATA device
+ *
+ * RETURNS:
+ * true if the disk should be stopped, otherwise false.
+ */
+bool ata_acpi_dev_manage_restart(struct ata_device *dev)
+{
+ struct device *tdev;
+
+ /*
+ * If ATA_FLAG_ACPI_SATA is set, the acpi fwnode is attached to the
+ * ata_device instead of the ata_port.
+ */
+ if (dev->link->ap->flags & ATA_FLAG_ACPI_SATA)
+ tdev = &dev->tdev;
+ else
+ tdev = &dev->link->ap->tdev;
+
+ if (!is_acpi_device_node(tdev->fwnode))
+ return false;
+ return acpi_bus_power_manageable(ACPI_HANDLE(tdev));
+}
+
+/**
+ * ata_acpi_port_power_on - set the power state of the ata port to D0
+ * @ap: target ATA port
+ *
+ * This function is called at the beginning of ata_port_probe().
+ */
+void ata_acpi_port_power_on(struct ata_port *ap)
+{
+ acpi_handle handle;
+ int i;
+
+ /*
+ * If ATA_FLAG_ACPI_SATA is set, the acpi fwnode is attached to the
+ * ata_device instead of the ata_port.
+ */
+ if (ap->flags & ATA_FLAG_ACPI_SATA) {
+ for (i = 0; i < ATA_MAX_DEVICES; i++) {
+ struct ata_device *dev = &ap->link.device[i];
+
+ if (!is_acpi_device_node(dev->tdev.fwnode))
+ continue;
+ handle = ACPI_HANDLE(&dev->tdev);
+ if (!acpi_bus_power_manageable(handle))
+ continue;
+ if (acpi_bus_set_power(handle, ACPI_STATE_D0))
+ ata_dev_err(dev,
+ "acpi: failed to set power state to D0\n");
+ }
+ return;
+ }
+
+ if (!is_acpi_device_node(ap->tdev.fwnode))
+ return;
+ handle = ACPI_HANDLE(&ap->tdev);
+ if (!acpi_bus_power_manageable(handle))
+ return;
+
+ if (acpi_bus_set_power(handle, ACPI_STATE_D0))
+ ata_port_err(ap, "acpi: failed to set power state to D0\n");
+}
+
+/**
* ata_acpi_dissociate - dissociate ATA host from ACPI objects
* @host: target ATA host
*
@@ -514,15 +581,19 @@ unsigned int ata_acpi_gtm_xfermask(struct ata_device *dev,
EXPORT_SYMBOL_GPL(ata_acpi_gtm_xfermask);
/**
- * ata_acpi_cbl_80wire - Check for 80 wire cable
+ * ata_acpi_cbl_pata_type - Return PATA cable type
* @ap: Port to check
- * @gtm: GTM data to use
*
- * Return 1 if the @gtm indicates the BIOS selected an 80wire mode.
+ * Return ATA_CBL_PATA* according to the transfer mode selected by BIOS
*/
-int ata_acpi_cbl_80wire(struct ata_port *ap, const struct ata_acpi_gtm *gtm)
+int ata_acpi_cbl_pata_type(struct ata_port *ap)
{
struct ata_device *dev;
+ int ret = ATA_CBL_PATA_UNK;
+ const struct ata_acpi_gtm *gtm = ata_acpi_init_gtm(ap);
+
+ if (!gtm)
+ return ATA_CBL_PATA40;
ata_for_each_dev(dev, &ap->link, ENABLED) {
unsigned int xfer_mask, udma_mask;
@@ -530,13 +601,17 @@ int ata_acpi_cbl_80wire(struct ata_port *ap, const struct ata_acpi_gtm *gtm)
xfer_mask = ata_acpi_gtm_xfermask(dev, gtm);
ata_unpack_xfermask(xfer_mask, NULL, NULL, &udma_mask);
- if (udma_mask & ~ATA_UDMA_MASK_40C)
- return 1;
+ ret = ATA_CBL_PATA40;
+
+ if (udma_mask & ~ATA_UDMA_MASK_40C) {
+ ret = ATA_CBL_PATA80;
+ break;
+ }
}
- return 0;
+ return ret;
}
-EXPORT_SYMBOL_GPL(ata_acpi_cbl_80wire);
+EXPORT_SYMBOL_GPL(ata_acpi_cbl_pata_type);
static void ata_acpi_gtf_to_tf(struct ata_device *dev,
const struct ata_acpi_gtf *gtf,
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 79b20da0a256..0b24bd169d61 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -65,8 +65,8 @@
#include "libata-transport.h"
const struct ata_port_operations ata_base_port_ops = {
- .prereset = ata_std_prereset,
- .postreset = ata_std_postreset,
+ .reset.prereset = ata_std_prereset,
+ .reset.postreset = ata_std_postreset,
.error_handler = ata_std_error_handler,
.sched_eh = ata_std_sched_eh,
.end_eh = ata_std_end_eh,
@@ -2154,14 +2154,43 @@ retry:
return err_mask;
}
+static inline void ata_clear_log_directory(struct ata_device *dev)
+{
+ memset(dev->gp_log_dir, 0, ATA_SECT_SIZE);
+}
+
+static int ata_read_log_directory(struct ata_device *dev)
+{
+ u16 version;
+
+ /* If the log page is already cached, do nothing. */
+ version = get_unaligned_le16(&dev->gp_log_dir[0]);
+ if (version == 0x0001)
+ return 0;
+
+ if (ata_read_log_page(dev, ATA_LOG_DIRECTORY, 0, dev->gp_log_dir, 1)) {
+ ata_clear_log_directory(dev);
+ return -EIO;
+ }
+
+ version = get_unaligned_le16(&dev->gp_log_dir[0]);
+ if (version != 0x0001)
+ ata_dev_warn_once(dev,
+ "Invalid log directory version 0x%04x\n",
+ version);
+
+ return 0;
+}
+
static int ata_log_supported(struct ata_device *dev, u8 log)
{
if (dev->quirks & ATA_QUIRK_NO_LOG_DIR)
return 0;
- if (ata_read_log_page(dev, ATA_LOG_DIRECTORY, 0, dev->sector_buf, 1))
+ if (ata_read_log_directory(dev))
return 0;
- return get_unaligned_le16(&dev->sector_buf[log * 2]);
+
+ return get_unaligned_le16(&dev->gp_log_dir[log * 2]);
}
static bool ata_identify_page_supported(struct ata_device *dev, u8 page)
@@ -2421,18 +2450,7 @@ static void ata_dev_config_zac(struct ata_device *dev)
dev->zac_zones_optimal_nonseq = U32_MAX;
dev->zac_zones_max_open = U32_MAX;
- /*
- * Always set the 'ZAC' flag for Host-managed devices.
- */
- if (dev->class == ATA_DEV_ZAC)
- dev->flags |= ATA_DFLAG_ZAC;
- else if (ata_id_zoned_cap(dev->id) == 0x01)
- /*
- * Check for host-aware devices.
- */
- dev->flags |= ATA_DFLAG_ZAC;
-
- if (!(dev->flags & ATA_DFLAG_ZAC))
+ if (!ata_dev_is_zac(dev))
return;
if (!ata_identify_page_supported(dev, ATA_LOG_ZONED_INFORMATION)) {
@@ -2495,7 +2513,7 @@ static void ata_dev_config_trusted(struct ata_device *dev)
dev->flags |= ATA_DFLAG_TRUSTED;
}
-void ata_dev_cleanup_cdl_resources(struct ata_device *dev)
+static void ata_dev_cleanup_cdl_resources(struct ata_device *dev)
{
kfree(dev->cdl);
dev->cdl = NULL;
@@ -2801,17 +2819,70 @@ out:
kfree(buf);
}
+/*
+ * Configure features related to link power management.
+ */
+static void ata_dev_config_lpm(struct ata_device *dev)
+{
+ struct ata_port *ap = dev->link->ap;
+ unsigned int err_mask;
+
+ if (ap->flags & ATA_FLAG_NO_LPM) {
+ /*
+ * When the port does not support LPM, we cannot support it on
+ * the device either.
+ */
+ dev->quirks |= ATA_QUIRK_NOLPM;
+ } else {
+ /*
+ * Some WD SATA-1 drives have issues with LPM, turn on NOLPM for
+ * them.
+ */
+ if ((dev->quirks & ATA_QUIRK_WD_BROKEN_LPM) &&
+ (dev->id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2)
+ dev->quirks |= ATA_QUIRK_NOLPM;
+
+ /* ATI specific quirk */
+ if ((dev->quirks & ATA_QUIRK_NO_LPM_ON_ATI) &&
+ ata_dev_check_adapter(dev, PCI_VENDOR_ID_ATI))
+ dev->quirks |= ATA_QUIRK_NOLPM;
+ }
+
+ if (dev->quirks & ATA_QUIRK_NOLPM &&
+ ap->target_lpm_policy != ATA_LPM_MAX_POWER) {
+ ata_dev_warn(dev, "LPM support broken, forcing max_power\n");
+ ap->target_lpm_policy = ATA_LPM_MAX_POWER;
+ }
+
+ /*
+ * Device Initiated Power Management (DIPM) is normally disabled by
+ * default on a device. However, DIPM may have been enabled and that
+ * setting kept even after COMRESET because of the Software Settings
+ * Preservation feature. So if the port does not support DIPM and the
+ * device does, disable DIPM on the device.
+ */
+ if (ap->flags & ATA_FLAG_NO_DIPM && ata_id_has_dipm(dev->id)) {
+ err_mask = ata_dev_set_feature(dev,
+ SETFEATURES_SATA_DISABLE, SATA_DIPM);
+ if (err_mask && err_mask != AC_ERR_DEV)
+ ata_dev_err(dev, "Disable DIPM failed, Emask 0x%x\n",
+ err_mask);
+ }
+}
+
static void ata_dev_print_features(struct ata_device *dev)
{
if (!(dev->flags & ATA_DFLAG_FEATURES_MASK))
return;
ata_dev_info(dev,
- "Features:%s%s%s%s%s%s%s%s\n",
+ "Features:%s%s%s%s%s%s%s%s%s%s\n",
dev->flags & ATA_DFLAG_FUA ? " FUA" : "",
dev->flags & ATA_DFLAG_TRUSTED ? " Trust" : "",
dev->flags & ATA_DFLAG_DA ? " Dev-Attention" : "",
dev->flags & ATA_DFLAG_DEVSLP ? " Dev-Sleep" : "",
+ ata_id_has_hipm(dev->id) ? " HIPM" : "",
+ ata_id_has_dipm(dev->id) ? " DIPM" : "",
dev->flags & ATA_DFLAG_NCQ_SEND_RECV ? " NCQ-sndrcv" : "",
dev->flags & ATA_DFLAG_NCQ_PRIO ? " NCQ-prio" : "",
dev->flags & ATA_DFLAG_CDL ? " CDL" : "",
@@ -2848,6 +2919,9 @@ int ata_dev_configure(struct ata_device *dev)
return 0;
}
+ /* Clear the general purpose log directory cache. */
+ ata_clear_log_directory(dev);
+
/* Set quirks */
dev->quirks |= ata_dev_quirks(dev);
ata_force_quirks(dev);
@@ -2871,23 +2945,6 @@ int ata_dev_configure(struct ata_device *dev)
if (rc)
return rc;
- /* some WD SATA-1 drives have issues with LPM, turn on NOLPM for them */
- if ((dev->quirks & ATA_QUIRK_WD_BROKEN_LPM) &&
- (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2)
- dev->quirks |= ATA_QUIRK_NOLPM;
-
- if (dev->quirks & ATA_QUIRK_NO_LPM_ON_ATI &&
- ata_dev_check_adapter(dev, PCI_VENDOR_ID_ATI))
- dev->quirks |= ATA_QUIRK_NOLPM;
-
- if (ap->flags & ATA_FLAG_NO_LPM)
- dev->quirks |= ATA_QUIRK_NOLPM;
-
- if (dev->quirks & ATA_QUIRK_NOLPM) {
- ata_dev_warn(dev, "LPM support broken, forcing max_power\n");
- dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER;
- }
-
/* let ACPI work its magic */
rc = ata_acpi_on_devcfg(dev);
if (rc)
@@ -2949,6 +3006,16 @@ int ata_dev_configure(struct ata_device *dev)
}
dev->n_sectors = ata_id_n_sectors(id);
+ if (ata_id_is_locked(id)) {
+ /*
+ * If Security locked, set capacity to zero to prevent
+ * any I/O, e.g. partition scanning, as any I/O to a
+ * locked drive will result in user visible errors.
+ */
+ ata_dev_info(dev,
+ "Security locked, setting capacity to zero\n");
+ dev->n_sectors = 0;
+ }
/* get current R/W Multiple count setting */
if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) {
@@ -2974,6 +3041,7 @@ int ata_dev_configure(struct ata_device *dev)
ata_dev_config_chs(dev);
}
+ ata_dev_config_lpm(dev);
ata_dev_config_fua(dev);
ata_dev_config_devslp(dev);
ata_dev_config_sense_reporting(dev);
@@ -3078,6 +3146,10 @@ int ata_dev_configure(struct ata_device *dev)
dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_1024,
dev->max_sectors);
+ if (dev->quirks & ATA_QUIRK_MAX_SEC_8191)
+ dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_8191,
+ dev->max_sectors);
+
if (dev->quirks & ATA_QUIRK_MAX_SEC_LBA48)
dev->max_sectors = ATA_MAX_SECTORS_LBA48;
@@ -3449,7 +3521,7 @@ static int ata_dev_set_mode(struct ata_device *dev)
}
/**
- * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
+ * ata_set_mode - Program timings and issue SET FEATURES - XFER
* @link: link on which timings will be programmed
* @r_failed_dev: out parameter for failed device
*
@@ -3465,7 +3537,7 @@ static int ata_dev_set_mode(struct ata_device *dev)
* 0 on success, negative errno otherwise
*/
-int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
+int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
{
struct ata_port *ap = link->ap;
struct ata_device *dev;
@@ -3546,7 +3618,7 @@ int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
*r_failed_dev = dev;
return rc;
}
-EXPORT_SYMBOL_GPL(ata_do_set_mode);
+EXPORT_SYMBOL_GPL(ata_set_mode);
/**
* ata_wait_ready - wait for link to become ready
@@ -3930,6 +4002,7 @@ static const char * const ata_quirk_names[] = {
[__ATA_QUIRK_NO_DMA_LOG] = "nodmalog",
[__ATA_QUIRK_NOTRIM] = "notrim",
[__ATA_QUIRK_MAX_SEC_1024] = "maxsec1024",
+ [__ATA_QUIRK_MAX_SEC_8191] = "maxsec8191",
[__ATA_QUIRK_MAX_TRIM_128M] = "maxtrim128m",
[__ATA_QUIRK_NO_NCQ_ON_ATI] = "noncqonati",
[__ATA_QUIRK_NO_LPM_ON_ATI] = "nolpmonati",
@@ -4036,6 +4109,12 @@ static const struct ata_dev_quirks_entry __ata_dev_quirks[] = {
{ "LITEON CX1-JB*-HP", NULL, ATA_QUIRK_MAX_SEC_1024 },
{ "LITEON EP1-*", NULL, ATA_QUIRK_MAX_SEC_1024 },
+ /*
+ * These devices time out with higher max sects.
+ * https://bugzilla.kernel.org/show_bug.cgi?id=220693
+ */
+ { "DELLBOSS VD", "MV.R00-0", ATA_QUIRK_MAX_SEC_8191 },
+
/* Devices we expect to fail diagnostics */
/* Devices where NCQ should be avoided */
@@ -4148,6 +4227,10 @@ static const struct ata_dev_quirks_entry __ata_dev_quirks[] = {
/* Apacer models with LPM issues */
{ "Apacer AS340*", NULL, ATA_QUIRK_NOLPM },
+ /* Silicon Motion models with LPM issues */
+ { "MD619HXCLDE3TC", "TCVAID", ATA_QUIRK_NOLPM },
+ { "MD619GXCLDE3TC", "TCV35D", ATA_QUIRK_NOLPM },
+
/* These specific Samsung models/firmware-revs do not handle LPM well */
{ "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_QUIRK_NOLPM },
{ "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_QUIRK_NOLPM },
@@ -4541,7 +4624,7 @@ static unsigned int ata_dev_init_params(struct ata_device *dev,
return AC_ERR_INVALID;
/* set up init dev params taskfile */
- ata_dev_dbg(dev, "init dev params \n");
+ ata_dev_dbg(dev, "init dev params\n");
ata_tf_init(dev, &tf);
tf.command = ATA_CMD_INIT_DEV_PARAMS;
@@ -5843,6 +5926,8 @@ void ata_port_probe(struct ata_port *ap)
struct ata_eh_info *ehi = &ap->link.eh_info;
unsigned long flags;
+ ata_acpi_port_power_on(ap);
+
/* kick EH for boot probing */
spin_lock_irqsave(ap->lock, flags);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 1f90fcd2b3c4..2586e77ebf45 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -153,8 +153,6 @@ ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = {
#undef CMDS
static void __ata_port_freeze(struct ata_port *ap);
-static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
- struct ata_device **r_failed_dev);
#ifdef CONFIG_PM
static void ata_eh_handle_port_suspend(struct ata_port *ap);
static void ata_eh_handle_port_resume(struct ata_port *ap);
@@ -825,7 +823,7 @@ void ata_port_wait_eh(struct ata_port *ap)
retry:
spin_lock_irqsave(ap->lock, flags);
- while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) {
+ while (ata_port_eh_scheduled(ap)) {
prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
spin_unlock_irqrestore(ap->lock, flags);
schedule();
@@ -909,7 +907,7 @@ void ata_eh_fastdrain_timerfn(struct timer_list *t)
* LOCKING:
* spin_lock_irqsave(host lock)
*/
-static void ata_eh_set_pending(struct ata_port *ap, int fastdrain)
+static void ata_eh_set_pending(struct ata_port *ap, bool fastdrain)
{
unsigned int cnt;
@@ -949,7 +947,7 @@ void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
struct ata_port *ap = qc->ap;
qc->flags |= ATA_QCFLAG_EH;
- ata_eh_set_pending(ap, 1);
+ ata_eh_set_pending(ap, true);
/* The following will fail if timeout has already expired.
* ata_scsi_error() takes care of such scmds on EH entry.
@@ -971,7 +969,7 @@ void ata_std_sched_eh(struct ata_port *ap)
if (ap->pflags & ATA_PFLAG_INITIALIZING)
return;
- ata_eh_set_pending(ap, 1);
+ ata_eh_set_pending(ap, true);
scsi_schedule_eh(ap->scsi_host);
trace_ata_std_sched_eh(ap);
@@ -1022,7 +1020,7 @@ static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link)
int tag, nr_aborted = 0;
/* we're gonna abort all commands, no need for fast drain */
- ata_eh_set_pending(ap, 0);
+ ata_eh_set_pending(ap, false);
/* include internal tag in iteration */
ata_qc_for_each_with_internal(ap, qc, tag) {
@@ -2073,6 +2071,188 @@ out:
ata_eh_done(link, dev, ATA_EH_GET_SUCCESS_SENSE);
}
+/*
+ * Check if a link is established. This is a relaxed version of
+ * ata_phys_link_online() which accounts for the fact that this is potentially
+ * called after changing the link power management policy, which may not be
+ * reflected immediately in the SStatus register (e.g., we may still be seeing
+ * the PHY in partial, slumber or devsleep Partial power management state.
+ * So check that:
+ * - A device is still present, that is, DET is 1h (Device presence detected
+ * but Phy communication not established) or 3h (Device presence detected and
+ * Phy communication established)
+ * - Communication is established, that is, IPM is not 0h, indicating that PHY
+ * is online or in a low power state.
+ */
+static bool ata_eh_link_established(struct ata_link *link)
+{
+ u32 sstatus;
+ u8 det, ipm;
+
+ /*
+ * For old IDE/PATA adapters that do not have a valid scr_read method,
+ * or if reading the SStatus register fails, assume that the device is
+ * present. Device probe will determine if that is really the case.
+ */
+ if (sata_scr_read(link, SCR_STATUS, &sstatus))
+ return true;
+
+ det = sstatus & 0x0f;
+ ipm = (sstatus >> 8) & 0x0f;
+
+ return (det & 0x01) && ipm;
+}
+
+/**
+ * ata_eh_link_set_lpm - configure SATA interface power management
+ * @link: link to configure
+ * @policy: the link power management policy
+ * @r_failed_dev: out parameter for failed device
+ *
+ * Enable SATA Interface power management. This will enable
+ * Device Interface Power Management (DIPM) for min_power and
+ * medium_power_with_dipm policies, and then call driver specific
+ * callbacks for enabling Host Initiated Power management.
+ *
+ * LOCKING:
+ * EH context.
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+static int ata_eh_link_set_lpm(struct ata_link *link,
+ enum ata_lpm_policy policy,
+ struct ata_device **r_failed_dev)
+{
+ struct ata_port *ap = ata_is_host_link(link) ? link->ap : NULL;
+ struct ata_eh_context *ehc = &link->eh_context;
+ struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL;
+ enum ata_lpm_policy old_policy = link->lpm_policy;
+ bool host_has_dipm = !(link->ap->flags & ATA_FLAG_NO_DIPM);
+ unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM;
+ unsigned int err_mask;
+ int rc;
+
+ /* if the link or host doesn't do LPM, noop */
+ if (!IS_ENABLED(CONFIG_SATA_HOST) ||
+ (link->flags & ATA_LFLAG_NO_LPM) || (ap && !ap->ops->set_lpm))
+ return 0;
+
+ /*
+ * This function currently assumes that it will never be supplied policy
+ * ATA_LPM_UNKNOWN.
+ */
+ if (WARN_ON_ONCE(policy == ATA_LPM_UNKNOWN))
+ return 0;
+
+ ata_link_dbg(link, "Set LPM policy: %d -> %d\n", old_policy, policy);
+
+ /*
+ * DIPM is enabled only for ATA_LPM_MIN_POWER,
+ * ATA_LPM_MIN_POWER_WITH_PARTIAL, and ATA_LPM_MED_POWER_WITH_DIPM, as
+ * some devices misbehave when the host NACKs transition to SLUMBER.
+ */
+ ata_for_each_dev(dev, link, ENABLED) {
+ bool dev_has_hipm = ata_id_has_hipm(dev->id);
+ bool dev_has_dipm = ata_id_has_dipm(dev->id);
+
+ /* find the first enabled and LPM enabled devices */
+ if (!link_dev)
+ link_dev = dev;
+
+ if (!lpm_dev &&
+ (dev_has_hipm || (dev_has_dipm && host_has_dipm)))
+ lpm_dev = dev;
+
+ hints &= ~ATA_LPM_EMPTY;
+ if (!dev_has_hipm)
+ hints &= ~ATA_LPM_HIPM;
+
+ /* disable DIPM before changing link config */
+ if (dev_has_dipm) {
+ err_mask = ata_dev_set_feature(dev,
+ SETFEATURES_SATA_DISABLE, SATA_DIPM);
+ if (err_mask && err_mask != AC_ERR_DEV) {
+ ata_dev_warn(dev,
+ "failed to disable DIPM, Emask 0x%x\n",
+ err_mask);
+ rc = -EIO;
+ goto fail;
+ }
+ }
+ }
+
+ if (ap) {
+ rc = ap->ops->set_lpm(link, policy, hints);
+ if (!rc && ap->slave_link)
+ rc = ap->ops->set_lpm(ap->slave_link, policy, hints);
+ } else
+ rc = sata_pmp_set_lpm(link, policy, hints);
+
+ /*
+ * Attribute link config failure to the first (LPM) enabled
+ * device on the link.
+ */
+ if (rc) {
+ if (rc == -EOPNOTSUPP) {
+ link->flags |= ATA_LFLAG_NO_LPM;
+ return 0;
+ }
+ dev = lpm_dev ? lpm_dev : link_dev;
+ goto fail;
+ }
+
+ /*
+ * Low level driver acked the transition. Issue DIPM command
+ * with the new policy set.
+ */
+ link->lpm_policy = policy;
+ if (ap && ap->slave_link)
+ ap->slave_link->lpm_policy = policy;
+
+ /*
+ * Host config updated, enable DIPM if transitioning to
+ * ATA_LPM_MIN_POWER, ATA_LPM_MIN_POWER_WITH_PARTIAL, or
+ * ATA_LPM_MED_POWER_WITH_DIPM.
+ */
+ ata_for_each_dev(dev, link, ENABLED) {
+ bool dev_has_dipm = ata_id_has_dipm(dev->id);
+
+ if (policy >= ATA_LPM_MED_POWER_WITH_DIPM && host_has_dipm &&
+ dev_has_dipm) {
+ err_mask = ata_dev_set_feature(dev,
+ SETFEATURES_SATA_ENABLE, SATA_DIPM);
+ if (err_mask && err_mask != AC_ERR_DEV) {
+ ata_dev_warn(dev,
+ "failed to enable DIPM, Emask 0x%x\n",
+ err_mask);
+ rc = -EIO;
+ goto fail;
+ }
+ }
+ }
+
+ link->last_lpm_change = jiffies;
+ link->flags |= ATA_LFLAG_CHANGED;
+
+ return 0;
+
+fail:
+ /* restore the old policy */
+ link->lpm_policy = old_policy;
+ if (ap && ap->slave_link)
+ ap->slave_link->lpm_policy = old_policy;
+
+ /* if no device or only one more chance is left, disable LPM */
+ if (!dev || ehc->tries[dev->devno] <= 2) {
+ ata_link_warn(link, "disabling LPM on the link\n");
+ link->flags |= ATA_LFLAG_NO_LPM;
+ }
+ if (r_failed_dev)
+ *r_failed_dev = dev;
+ return rc;
+}
+
/**
* ata_eh_link_autopsy - analyze error and determine recovery action
* @link: host link to perform autopsy on
@@ -2606,25 +2786,28 @@ static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset,
return reset(link, classes, deadline);
}
-static int ata_eh_followup_srst_needed(struct ata_link *link, int rc)
+static bool ata_eh_followup_srst_needed(struct ata_link *link, int rc)
{
if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link))
- return 0;
+ return false;
if (rc == -EAGAIN)
- return 1;
+ return true;
if (sata_pmp_supported(link->ap) && ata_is_host_link(link))
- return 1;
- return 0;
+ return true;
+ return false;
}
int ata_eh_reset(struct ata_link *link, int classify,
- ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
- ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
+ struct ata_reset_operations *reset_ops)
{
struct ata_port *ap = link->ap;
struct ata_link *slave = ap->slave_link;
struct ata_eh_context *ehc = &link->eh_context;
struct ata_eh_context *sehc = slave ? &slave->eh_context : NULL;
+ ata_reset_fn_t hardreset = reset_ops->hardreset;
+ ata_reset_fn_t softreset = reset_ops->softreset;
+ ata_prereset_fn_t prereset = reset_ops->prereset;
+ ata_postreset_fn_t postreset = reset_ops->postreset;
unsigned int *classes = ehc->classes;
unsigned int lflags = link->flags;
int verbose = !(ehc->i.flags & ATA_EHI_QUIET);
@@ -3123,13 +3306,13 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link,
* to ap->target_lpm_policy after revalidation is done.
*/
if (link->lpm_policy > ATA_LPM_MAX_POWER) {
- rc = ata_eh_set_lpm(link, ATA_LPM_MAX_POWER,
- r_failed_dev);
+ rc = ata_eh_link_set_lpm(link, ATA_LPM_MAX_POWER,
+ r_failed_dev);
if (rc)
goto err;
}
- if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
+ if (!ata_eh_link_established(ata_dev_phys_link(dev))) {
rc = -EIO;
goto err;
}
@@ -3233,12 +3416,12 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link,
}
/**
- * ata_set_mode - Program timings and issue SET FEATURES - XFER
+ * ata_eh_set_mode - Program timings and issue SET FEATURES - XFER
* @link: link on which timings will be programmed
* @r_failed_dev: out parameter for failed device
*
* Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
- * ata_set_mode() fails, pointer to the failing device is
+ * ata_eh_set_mode() fails, pointer to the failing device is
* returned in @r_failed_dev.
*
* LOCKING:
@@ -3247,7 +3430,8 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link,
* RETURNS:
* 0 on success, negative errno otherwise
*/
-int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
+static int ata_eh_set_mode(struct ata_link *link,
+ struct ata_device **r_failed_dev)
{
struct ata_port *ap = link->ap;
struct ata_device *dev;
@@ -3268,7 +3452,7 @@ int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
if (ap->ops->set_mode)
rc = ap->ops->set_mode(link, r_failed_dev);
else
- rc = ata_do_set_mode(link, r_failed_dev);
+ rc = ata_set_mode(link, r_failed_dev);
/* if transfer mode has changed, set DUBIOUS_XFER on device */
ata_for_each_dev(dev, link, ENABLED) {
@@ -3408,153 +3592,6 @@ static int ata_eh_maybe_retry_flush(struct ata_device *dev)
return rc;
}
-/**
- * ata_eh_set_lpm - configure SATA interface power management
- * @link: link to configure power management
- * @policy: the link power management policy
- * @r_failed_dev: out parameter for failed device
- *
- * Enable SATA Interface power management. This will enable
- * Device Interface Power Management (DIPM) for min_power and
- * medium_power_with_dipm policies, and then call driver specific
- * callbacks for enabling Host Initiated Power management.
- *
- * LOCKING:
- * EH context.
- *
- * RETURNS:
- * 0 on success, -errno on failure.
- */
-static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
- struct ata_device **r_failed_dev)
-{
- struct ata_port *ap = ata_is_host_link(link) ? link->ap : NULL;
- struct ata_eh_context *ehc = &link->eh_context;
- struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL;
- enum ata_lpm_policy old_policy = link->lpm_policy;
- bool host_has_dipm = !(link->ap->flags & ATA_FLAG_NO_DIPM);
- unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM;
- unsigned int err_mask;
- int rc;
-
- /* if the link or host doesn't do LPM, noop */
- if (!IS_ENABLED(CONFIG_SATA_HOST) ||
- (link->flags & ATA_LFLAG_NO_LPM) || (ap && !ap->ops->set_lpm))
- return 0;
-
- /*
- * This function currently assumes that it will never be supplied policy
- * ATA_LPM_UNKNOWN.
- */
- if (WARN_ON_ONCE(policy == ATA_LPM_UNKNOWN))
- return 0;
-
- /*
- * DIPM is enabled only for ATA_LPM_MIN_POWER,
- * ATA_LPM_MIN_POWER_WITH_PARTIAL, and ATA_LPM_MED_POWER_WITH_DIPM, as
- * some devices misbehave when the host NACKs transition to SLUMBER.
- */
- ata_for_each_dev(dev, link, ENABLED) {
- bool dev_has_hipm = ata_id_has_hipm(dev->id);
- bool dev_has_dipm = ata_id_has_dipm(dev->id);
-
- /* find the first enabled and LPM enabled devices */
- if (!link_dev)
- link_dev = dev;
-
- if (!lpm_dev &&
- (dev_has_hipm || (dev_has_dipm && host_has_dipm)))
- lpm_dev = dev;
-
- hints &= ~ATA_LPM_EMPTY;
- if (!dev_has_hipm)
- hints &= ~ATA_LPM_HIPM;
-
- /* disable DIPM before changing link config */
- if (dev_has_dipm) {
- err_mask = ata_dev_set_feature(dev,
- SETFEATURES_SATA_DISABLE, SATA_DIPM);
- if (err_mask && err_mask != AC_ERR_DEV) {
- ata_dev_warn(dev,
- "failed to disable DIPM, Emask 0x%x\n",
- err_mask);
- rc = -EIO;
- goto fail;
- }
- }
- }
-
- if (ap) {
- rc = ap->ops->set_lpm(link, policy, hints);
- if (!rc && ap->slave_link)
- rc = ap->ops->set_lpm(ap->slave_link, policy, hints);
- } else
- rc = sata_pmp_set_lpm(link, policy, hints);
-
- /*
- * Attribute link config failure to the first (LPM) enabled
- * device on the link.
- */
- if (rc) {
- if (rc == -EOPNOTSUPP) {
- link->flags |= ATA_LFLAG_NO_LPM;
- return 0;
- }
- dev = lpm_dev ? lpm_dev : link_dev;
- goto fail;
- }
-
- /*
- * Low level driver acked the transition. Issue DIPM command
- * with the new policy set.
- */
- link->lpm_policy = policy;
- if (ap && ap->slave_link)
- ap->slave_link->lpm_policy = policy;
-
- /*
- * Host config updated, enable DIPM if transitioning to
- * ATA_LPM_MIN_POWER, ATA_LPM_MIN_POWER_WITH_PARTIAL, or
- * ATA_LPM_MED_POWER_WITH_DIPM.
- */
- ata_for_each_dev(dev, link, ENABLED) {
- bool dev_has_dipm = ata_id_has_dipm(dev->id);
-
- if (policy >= ATA_LPM_MED_POWER_WITH_DIPM && host_has_dipm &&
- dev_has_dipm) {
- err_mask = ata_dev_set_feature(dev,
- SETFEATURES_SATA_ENABLE, SATA_DIPM);
- if (err_mask && err_mask != AC_ERR_DEV) {
- ata_dev_warn(dev,
- "failed to enable DIPM, Emask 0x%x\n",
- err_mask);
- rc = -EIO;
- goto fail;
- }
- }
- }
-
- link->last_lpm_change = jiffies;
- link->flags |= ATA_LFLAG_CHANGED;
-
- return 0;
-
-fail:
- /* restore the old policy */
- link->lpm_policy = old_policy;
- if (ap && ap->slave_link)
- ap->slave_link->lpm_policy = old_policy;
-
- /* if no device or only one more chance is left, disable LPM */
- if (!dev || ehc->tries[dev->devno] <= 2) {
- ata_link_warn(link, "disabling LPM on the link\n");
- link->flags |= ATA_LFLAG_NO_LPM;
- }
- if (r_failed_dev)
- *r_failed_dev = dev;
- return rc;
-}
-
int ata_link_nr_enabled(struct ata_link *link)
{
struct ata_device *dev;
@@ -3727,10 +3764,7 @@ static int ata_eh_handle_dev_fail(struct ata_device *dev, int err)
/**
* ata_eh_recover - recover host port after error
* @ap: host port to recover
- * @prereset: prereset method (can be NULL)
- * @softreset: softreset method (can be NULL)
- * @hardreset: hardreset method (can be NULL)
- * @postreset: postreset method (can be NULL)
+ * @reset_ops: The set of reset operations to use
* @r_failed_link: out parameter for failed link
*
* This is the alpha and omega, eum and yang, heart and soul of
@@ -3746,9 +3780,7 @@ static int ata_eh_handle_dev_fail(struct ata_device *dev, int err)
* RETURNS:
* 0 on success, -errno on failure.
*/
-int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
- ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
- ata_postreset_fn_t postreset,
+int ata_eh_recover(struct ata_port *ap, struct ata_reset_operations *reset_ops,
struct ata_link **r_failed_link)
{
struct ata_link *link;
@@ -3816,8 +3848,7 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
if (!(ehc->i.action & ATA_EH_RESET))
continue;
- rc = ata_eh_reset(link, ata_link_nr_vacant(link),
- prereset, softreset, hardreset, postreset);
+ rc = ata_eh_reset(link, ata_link_nr_vacant(link), reset_ops);
if (rc) {
ata_link_err(link, "reset failed, giving up\n");
goto out;
@@ -3898,7 +3929,7 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
/* configure transfer mode if necessary */
if (ehc->i.flags & ATA_EHI_SETMODE) {
- rc = ata_set_mode(link, &dev);
+ rc = ata_eh_set_mode(link, &dev);
if (rc)
goto rest_fail;
ehc->i.flags &= ~ATA_EHI_SETMODE;
@@ -3943,7 +3974,8 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
config_lpm:
/* configure link power saving */
if (link->lpm_policy != ap->target_lpm_policy) {
- rc = ata_eh_set_lpm(link, ap->target_lpm_policy, &dev);
+ rc = ata_eh_link_set_lpm(link, ap->target_lpm_policy,
+ &dev);
if (rc)
goto rest_fail;
}
@@ -4037,59 +4069,39 @@ void ata_eh_finish(struct ata_port *ap)
}
/**
- * ata_do_eh - do standard error handling
+ * ata_std_error_handler - standard error handler
* @ap: host port to handle error for
*
- * @prereset: prereset method (can be NULL)
- * @softreset: softreset method (can be NULL)
- * @hardreset: hardreset method (can be NULL)
- * @postreset: postreset method (can be NULL)
- *
* Perform standard error handling sequence.
*
* LOCKING:
* Kernel thread context (may sleep).
*/
-void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
- ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
- ata_postreset_fn_t postreset)
+void ata_std_error_handler(struct ata_port *ap)
{
- struct ata_device *dev;
+ struct ata_reset_operations *reset_ops = &ap->ops->reset;
+ struct ata_link *link = &ap->link;
int rc;
+ /* Ignore built-in hardresets if SCR access is not available */
+ if ((reset_ops->hardreset == sata_std_hardreset ||
+ reset_ops->hardreset == sata_sff_hardreset) &&
+ !sata_scr_valid(link))
+ link->flags |= ATA_LFLAG_NO_HRST;
+
ata_eh_autopsy(ap);
ata_eh_report(ap);
- rc = ata_eh_recover(ap, prereset, softreset, hardreset, postreset,
- NULL);
+ rc = ata_eh_recover(ap, reset_ops, NULL);
if (rc) {
- ata_for_each_dev(dev, &ap->link, ALL)
+ struct ata_device *dev;
+
+ ata_for_each_dev(dev, link, ALL)
ata_dev_disable(dev);
}
ata_eh_finish(ap);
}
-
-/**
- * ata_std_error_handler - standard error handler
- * @ap: host port to handle error for
- *
- * Standard error handler
- *
- * LOCKING:
- * Kernel thread context (may sleep).
- */
-void ata_std_error_handler(struct ata_port *ap)
-{
- struct ata_port_operations *ops = ap->ops;
- ata_reset_fn_t hardreset = ops->hardreset;
-
- /* ignore built-in hardreset if SCR access is not available */
- if (hardreset == sata_std_hardreset && !sata_scr_valid(&ap->link))
- hardreset = NULL;
-
- ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset);
-}
EXPORT_SYMBOL_GPL(ata_std_error_handler);
#ifdef CONFIG_PM
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
index d5d189328ae6..57023324a56f 100644
--- a/drivers/ata/libata-pmp.c
+++ b/drivers/ata/libata-pmp.c
@@ -15,9 +15,9 @@
const struct ata_port_operations sata_pmp_port_ops = {
.inherits = &sata_port_ops,
- .pmp_prereset = ata_std_prereset,
- .pmp_hardreset = sata_std_hardreset,
- .pmp_postreset = ata_std_postreset,
+ .pmp_reset.prereset = ata_std_prereset,
+ .pmp_reset.hardreset = sata_std_hardreset,
+ .pmp_reset.postreset = ata_std_postreset,
.error_handler = sata_pmp_error_handler,
};
@@ -727,10 +727,7 @@ static int sata_pmp_revalidate_quick(struct ata_device *dev)
/**
* sata_pmp_eh_recover_pmp - recover PMP
* @ap: ATA port PMP is attached to
- * @prereset: prereset method (can be NULL)
- * @softreset: softreset method
- * @hardreset: hardreset method
- * @postreset: postreset method (can be NULL)
+ * @reset_ops: The set of reset operations to use
*
* Recover PMP attached to @ap. Recovery procedure is somewhat
* similar to that of ata_eh_recover() except that reset should
@@ -744,8 +741,7 @@ static int sata_pmp_revalidate_quick(struct ata_device *dev)
* 0 on success, -errno on failure.
*/
static int sata_pmp_eh_recover_pmp(struct ata_port *ap,
- ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
- ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
+ struct ata_reset_operations *reset_ops)
{
struct ata_link *link = &ap->link;
struct ata_eh_context *ehc = &link->eh_context;
@@ -767,8 +763,7 @@ static int sata_pmp_eh_recover_pmp(struct ata_port *ap,
struct ata_link *tlink;
/* reset */
- rc = ata_eh_reset(link, 0, prereset, softreset, hardreset,
- postreset);
+ rc = ata_eh_reset(link, 0, reset_ops);
if (rc) {
ata_link_err(link, "failed to reset PMP, giving up\n");
goto fail;
@@ -932,8 +927,7 @@ static int sata_pmp_eh_recover(struct ata_port *ap)
retry:
/* PMP attached? */
if (!sata_pmp_attached(ap)) {
- rc = ata_eh_recover(ap, ops->prereset, ops->softreset,
- ops->hardreset, ops->postreset, NULL);
+ rc = ata_eh_recover(ap, &ops->reset, NULL);
if (rc) {
ata_for_each_dev(dev, &ap->link, ALL)
ata_dev_disable(dev);
@@ -951,8 +945,7 @@ static int sata_pmp_eh_recover(struct ata_port *ap)
}
/* recover pmp */
- rc = sata_pmp_eh_recover_pmp(ap, ops->prereset, ops->softreset,
- ops->hardreset, ops->postreset);
+ rc = sata_pmp_eh_recover_pmp(ap, &ops->reset);
if (rc)
goto pmp_fail;
@@ -978,8 +971,7 @@ static int sata_pmp_eh_recover(struct ata_port *ap)
goto pmp_fail;
/* recover links */
- rc = ata_eh_recover(ap, ops->pmp_prereset, ops->pmp_softreset,
- ops->pmp_hardreset, ops->pmp_postreset, &link);
+ rc = ata_eh_recover(ap, &ops->pmp_reset, &link);
if (rc)
goto link_fail;
diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c
index cb46ce276bb1..b2817a2995d6 100644
--- a/drivers/ata/libata-sata.c
+++ b/drivers/ata/libata-sata.c
@@ -900,14 +900,52 @@ static const char *ata_lpm_policy_names[] = {
[ATA_LPM_MIN_POWER] = "min_power",
};
+/*
+ * Check if a port supports link power management.
+ * Must be called with the port locked.
+ */
+static bool ata_scsi_lpm_supported(struct ata_port *ap)
+{
+ struct ata_link *link;
+ struct ata_device *dev;
+
+ if (ap->flags & ATA_FLAG_NO_LPM)
+ return false;
+
+ ata_for_each_link(link, ap, EDGE) {
+ ata_for_each_dev(dev, &ap->link, ENABLED) {
+ if (dev->quirks & ATA_QUIRK_NOLPM)
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static ssize_t ata_scsi_lpm_supported_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ata_port *ap = ata_shost_to_port(shost);
+ unsigned long flags;
+ bool supported;
+
+ spin_lock_irqsave(ap->lock, flags);
+ supported = ata_scsi_lpm_supported(ap);
+ spin_unlock_irqrestore(ap->lock, flags);
+
+ return sysfs_emit(buf, "%d\n", supported);
+}
+DEVICE_ATTR(link_power_management_supported, S_IRUGO,
+ ata_scsi_lpm_supported_show, NULL);
+EXPORT_SYMBOL_GPL(dev_attr_link_power_management_supported);
+
static ssize_t ata_scsi_lpm_store(struct device *device,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct Scsi_Host *shost = class_to_shost(device);
struct ata_port *ap = ata_shost_to_port(shost);
- struct ata_link *link;
- struct ata_device *dev;
enum ata_lpm_policy policy;
unsigned long flags;
@@ -924,13 +962,9 @@ static ssize_t ata_scsi_lpm_store(struct device *device,
spin_lock_irqsave(ap->lock, flags);
- ata_for_each_link(link, ap, EDGE) {
- ata_for_each_dev(dev, &ap->link, ENABLED) {
- if (dev->quirks & ATA_QUIRK_NOLPM) {
- count = -EOPNOTSUPP;
- goto out_unlock;
- }
- }
+ if (!ata_scsi_lpm_supported(ap)) {
+ count = -EOPNOTSUPP;
+ goto out_unlock;
}
ap->target_lpm_policy = policy;
@@ -1699,6 +1733,6 @@ const struct ata_port_operations sata_port_ops = {
.inherits = &ata_base_port_ops,
.qc_defer = ata_std_qc_defer,
- .hardreset = sata_std_hardreset,
+ .reset.hardreset = sata_std_hardreset,
};
EXPORT_SYMBOL_GPL(sata_port_ops);
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index a21c9895408d..721d3f270c8e 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -351,7 +351,7 @@ EXPORT_SYMBOL_GPL(ata_common_sdev_groups);
/**
* ata_std_bios_param - generic bios head/sector/cylinder calculator used by sd.
* @sdev: SCSI device for which BIOS geometry is to be determined
- * @bdev: block device associated with @sdev
+ * @unused: gendisk associated with @sdev
* @capacity: capacity of SCSI device
* @geom: location to which geometry will be output
*
@@ -366,7 +366,7 @@ EXPORT_SYMBOL_GPL(ata_common_sdev_groups);
* RETURNS:
* Zero.
*/
-int ata_std_bios_param(struct scsi_device *sdev, struct block_device *bdev,
+int ata_std_bios_param(struct scsi_device *sdev, struct gendisk *unused,
sector_t capacity, int geom[])
{
geom[0] = 255;
@@ -859,18 +859,14 @@ static void ata_to_sense_error(u8 drv_stat, u8 drv_err, u8 *sk, u8 *asc,
{0xFF, 0xFF, 0xFF, 0xFF}, // END mark
};
static const unsigned char stat_table[][4] = {
- /* Must be first because BUSY means no other bits valid */
- {0x80, ABORTED_COMMAND, 0x47, 0x00},
- // Busy, fake parity for now
- {0x40, ILLEGAL_REQUEST, 0x21, 0x04},
- // Device ready, unaligned write command
- {0x20, HARDWARE_ERROR, 0x44, 0x00},
- // Device fault, internal target failure
- {0x08, ABORTED_COMMAND, 0x47, 0x00},
- // Timed out in xfer, fake parity for now
- {0x04, RECOVERED_ERROR, 0x11, 0x00},
- // Recovered ECC error Medium error, recovered
- {0xFF, 0xFF, 0xFF, 0xFF}, // END mark
+ /* Busy: must be first because BUSY means no other bits valid */
+ { ATA_BUSY, ABORTED_COMMAND, 0x00, 0x00 },
+ /* Device fault: INTERNAL TARGET FAILURE */
+ { ATA_DF, HARDWARE_ERROR, 0x44, 0x00 },
+ /* Corrected data error */
+ { ATA_CORR, RECOVERED_ERROR, 0x00, 0x00 },
+
+ { 0xFF, 0xFF, 0xFF, 0xFF }, /* END mark */
};
/*
@@ -942,6 +938,8 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc)
if (!(qc->flags & ATA_QCFLAG_RTF_FILLED)) {
ata_dev_dbg(dev,
"missing result TF: can't generate ATA PT sense data\n");
+ if (qc->err_mask)
+ ata_scsi_set_sense(dev, cmd, ABORTED_COMMAND, 0, 0);
return;
}
@@ -994,10 +992,17 @@ static void ata_gen_ata_sense(struct ata_queued_cmd *qc)
return;
}
+ if (ata_id_is_locked(dev->id)) {
+ /* Security locked */
+ /* LOGICAL UNIT ACCESS NOT AUTHORIZED */
+ ata_scsi_set_sense(dev, cmd, DATA_PROTECT, 0x74, 0x71);
+ return;
+ }
+
if (!(qc->flags & ATA_QCFLAG_RTF_FILLED)) {
ata_dev_dbg(dev,
- "missing result TF: can't generate sense data\n");
- return;
+ "Missing result TF: reporting aborted command\n");
+ goto aborted;
}
/* Use ata_to_sense_error() to map status register bits
@@ -1008,13 +1013,15 @@ static void ata_gen_ata_sense(struct ata_queued_cmd *qc)
ata_to_sense_error(tf->status, tf->error,
&sense_key, &asc, &ascq);
ata_scsi_set_sense(dev, cmd, sense_key, asc, ascq);
- } else {
- /* Could not decode error */
- ata_dev_warn(dev, "could not decode error status 0x%x err_mask 0x%x\n",
- tf->status, qc->err_mask);
- ata_scsi_set_sense(dev, cmd, ABORTED_COMMAND, 0, 0);
return;
}
+
+ /* Could not decode error */
+ ata_dev_warn(dev,
+ "Could not decode error 0x%x, status 0x%x (err_mask=0x%x)\n",
+ tf->error, tf->status, qc->err_mask);
+aborted:
+ ata_scsi_set_sense(dev, cmd, ABORTED_COMMAND, 0, 0);
}
void ata_scsi_sdev_config(struct scsi_device *sdev)
@@ -1095,6 +1102,7 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct queue_limits *lim,
*/
sdev->manage_runtime_start_stop = 1;
sdev->manage_shutdown = 1;
+ sdev->manage_restart = ata_acpi_dev_manage_restart(dev);
sdev->force_runtime_start_on_system_start = 1;
}
@@ -1923,8 +1931,7 @@ static unsigned int ata_scsiop_inq_00(struct ata_device *dev,
};
for (i = 0; i < sizeof(pages); i++) {
- if (pages[i] == 0xb6 &&
- !(dev->flags & ATA_DFLAG_ZAC))
+ if (pages[i] == 0xb6 && !ata_dev_is_zac(dev))
continue;
rbuf[num_pages + 4] = pages[i];
num_pages++;
@@ -2181,7 +2188,7 @@ static unsigned int ata_scsiop_inq_b2(struct ata_device *dev,
static unsigned int ata_scsiop_inq_b6(struct ata_device *dev,
struct scsi_cmnd *cmd, u8 *rbuf)
{
- if (!(dev->flags & ATA_DFLAG_ZAC)) {
+ if (!ata_dev_is_zac(dev)) {
ata_scsi_set_invalid_field(dev, cmd, 2, 0xff);
return 0;
}
@@ -3905,21 +3912,16 @@ static int ata_mselect_control_ata_feature(struct ata_queued_cmd *qc,
/* Check cdl_ctrl */
switch (buf[0] & 0x03) {
case 0:
- /* Disable CDL if it is enabled */
- if (!(dev->flags & ATA_DFLAG_CDL_ENABLED))
- return 0;
+ /* Disable CDL */
ata_dev_dbg(dev, "Disabling CDL\n");
cdl_action = 0;
dev->flags &= ~ATA_DFLAG_CDL_ENABLED;
break;
case 0x02:
/*
- * Enable CDL if not already enabled. Since this is mutually
- * exclusive with NCQ priority, allow this only if NCQ priority
- * is disabled.
+ * Enable CDL. Since CDL is mutually exclusive with NCQ
+ * priority, allow this only if NCQ priority is disabled.
*/
- if (dev->flags & ATA_DFLAG_CDL_ENABLED)
- return 0;
if (dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLED) {
ata_dev_err(dev,
"NCQ priority must be disabled to enable CDL\n");
@@ -4317,9 +4319,10 @@ int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, struct ata_device *dev)
* scsi_queue_rq() will defer commands if scsi_host_in_recovery().
* However, this check is done without holding the ap->lock (a libata
* specific lock), so we can have received an error irq since then,
- * therefore we must check if EH is pending, while holding ap->lock.
+ * therefore we must check if EH is pending or running, while holding
+ * ap->lock.
*/
- if (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS))
+ if (ata_port_eh_scheduled(ap))
return SCSI_MLQUEUE_DEVICE_BUSY;
if (unlikely(!scmd->cmd_len))
@@ -4634,24 +4637,23 @@ void ata_scsi_scan_host(struct ata_port *ap, int sync)
* ata_scsi_offline_dev - offline attached SCSI device
* @dev: ATA device to offline attached SCSI device for
*
- * This function is called from ata_eh_hotplug() and responsible
- * for taking the SCSI device attached to @dev offline. This
- * function is called with host lock which protects dev->sdev
- * against clearing.
+ * This function is called from ata_eh_detach_dev() and is responsible for
+ * taking the SCSI device attached to @dev offline. This function is
+ * called with host lock which protects dev->sdev against clearing.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*
* RETURNS:
- * 1 if attached SCSI device exists, 0 otherwise.
+ * true if attached SCSI device exists, false otherwise.
*/
-int ata_scsi_offline_dev(struct ata_device *dev)
+bool ata_scsi_offline_dev(struct ata_device *dev)
{
if (dev->sdev) {
scsi_device_set_state(dev->sdev, SDEV_OFFLINE);
- return 1;
+ return true;
}
- return 0;
+ return false;
}
/**
@@ -4900,8 +4902,10 @@ void ata_scsi_dev_rescan(struct work_struct *work)
spin_unlock_irqrestore(ap->lock, flags);
if (do_resume) {
ret = scsi_resume_device(sdev);
- if (ret == -EWOULDBLOCK)
+ if (ret == -EWOULDBLOCK) {
+ scsi_device_put(sdev);
goto unlock_scan;
+ }
dev->flags &= ~ATA_DFLAG_RESUMING;
}
ret = scsi_rescan_device(sdev);
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 5a46c066abc3..785b6e371abf 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -31,10 +31,10 @@ const struct ata_port_operations ata_sff_port_ops = {
.freeze = ata_sff_freeze,
.thaw = ata_sff_thaw,
- .prereset = ata_sff_prereset,
- .softreset = ata_sff_softreset,
- .hardreset = sata_sff_hardreset,
- .postreset = ata_sff_postreset,
+ .reset.prereset = ata_sff_prereset,
+ .reset.softreset = ata_sff_softreset,
+ .reset.hardreset = sata_sff_hardreset,
+ .reset.postreset = ata_sff_postreset,
.error_handler = ata_sff_error_handler,
.sff_dev_select = ata_sff_dev_select,
@@ -614,7 +614,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
offset = qc->cursg->offset + qc->cursg_ofs;
/* get the current page and offset */
- page = nth_page(page, (offset >> PAGE_SHIFT));
+ page += offset >> PAGE_SHIFT;
offset %= PAGE_SIZE;
/* don't overrun current sg */
@@ -631,7 +631,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
unsigned int split_len = PAGE_SIZE - offset;
ata_pio_xfer(qc, page, offset, split_len);
- ata_pio_xfer(qc, nth_page(page, 1), 0, count - split_len);
+ ata_pio_xfer(qc, page + 1, 0, count - split_len);
} else {
ata_pio_xfer(qc, page, offset, count);
}
@@ -751,7 +751,7 @@ next_sg:
offset = sg->offset + qc->cursg_ofs;
/* get the current page and offset */
- page = nth_page(page, (offset >> PAGE_SHIFT));
+ page += offset >> PAGE_SHIFT;
offset %= PAGE_SIZE;
/* don't overrun current sg */
@@ -2054,8 +2054,6 @@ EXPORT_SYMBOL_GPL(ata_sff_drain_fifo);
*/
void ata_sff_error_handler(struct ata_port *ap)
{
- ata_reset_fn_t softreset = ap->ops->softreset;
- ata_reset_fn_t hardreset = ap->ops->hardreset;
struct ata_queued_cmd *qc;
unsigned long flags;
@@ -2077,13 +2075,7 @@ void ata_sff_error_handler(struct ata_port *ap)
spin_unlock_irqrestore(ap->lock, flags);
- /* ignore built-in hardresets if SCR access is not available */
- if ((hardreset == sata_std_hardreset ||
- hardreset == sata_sff_hardreset) && !sata_scr_valid(&ap->link))
- hardreset = NULL;
-
- ata_do_eh(ap, ap->ops->prereset, softreset, hardreset,
- ap->ops->postreset);
+ ata_std_error_handler(ap);
}
EXPORT_SYMBOL_GPL(ata_sff_error_handler);
@@ -3199,7 +3191,8 @@ void ata_sff_port_init(struct ata_port *ap)
int __init ata_sff_init(void)
{
- ata_sff_wq = alloc_workqueue("ata_sff", WQ_MEM_RECLAIM, WQ_MAX_ACTIVE);
+ ata_sff_wq = alloc_workqueue("ata_sff", WQ_MEM_RECLAIM | WQ_PERCPU,
+ WQ_MAX_ACTIVE);
if (!ata_sff_wq)
return -ENOMEM;
diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c
index e898be49df6b..62415fe67a11 100644
--- a/drivers/ata/libata-transport.c
+++ b/drivers/ata/libata-transport.c
@@ -202,7 +202,7 @@ show_ata_port_##name(struct device *dev, \
{ \
struct ata_port *ap = transport_class_to_port(dev); \
\
- return scnprintf(buf, 20, format_string, cast ap->field); \
+ return sysfs_emit(buf, format_string, cast ap->field); \
}
#define ata_port_simple_attr(field, name, format_string, type) \
@@ -389,7 +389,7 @@ show_ata_dev_##field(struct device *dev, \
{ \
struct ata_device *ata_dev = transport_class_to_dev(dev); \
\
- return scnprintf(buf, 20, format_string, cast ata_dev->field); \
+ return sysfs_emit(buf, format_string, cast ata_dev->field); \
}
#define ata_dev_simple_attr(field, format_string, type) \
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index ce5c628fa6fd..0e7ecac73680 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -44,6 +44,18 @@ static inline bool ata_sstatus_online(u32 sstatus)
return (sstatus & 0xf) == 0x3;
}
+static inline bool ata_dev_is_zac(struct ata_device *dev)
+{
+ /* Host managed device or host aware device */
+ return dev->class == ATA_DEV_ZAC ||
+ ata_id_zoned_cap(dev->id) == 0x01;
+}
+
+static inline bool ata_port_eh_scheduled(struct ata_port *ap)
+{
+ return ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS);
+}
+
#ifdef CONFIG_ATA_FORCE
extern void ata_force_cbl(struct ata_port *ap);
#else
@@ -90,7 +102,6 @@ extern int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg);
extern const char *sata_spd_string(unsigned int spd);
extern unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
u8 page, void *buf, unsigned int sectors);
-void ata_dev_cleanup_cdl_resources(struct ata_device *dev);
#define to_ata_port(d) container_of(d, struct ata_port, tdev)
@@ -119,6 +130,8 @@ extern void ata_acpi_on_disable(struct ata_device *dev);
extern void ata_acpi_set_state(struct ata_port *ap, pm_message_t state);
extern void ata_acpi_bind_port(struct ata_port *ap);
extern void ata_acpi_bind_dev(struct ata_device *dev);
+extern void ata_acpi_port_power_on(struct ata_port *ap);
+extern bool ata_acpi_dev_manage_restart(struct ata_device *dev);
extern acpi_handle ata_dev_acpi_handle(struct ata_device *dev);
#else
static inline void ata_acpi_dissociate(struct ata_host *host) { }
@@ -129,6 +142,8 @@ static inline void ata_acpi_set_state(struct ata_port *ap,
pm_message_t state) { }
static inline void ata_acpi_bind_port(struct ata_port *ap) {}
static inline void ata_acpi_bind_dev(struct ata_device *dev) {}
+static inline void ata_acpi_port_power_on(struct ata_port *ap) {}
+static inline bool ata_acpi_dev_manage_restart(struct ata_device *dev) { return 0; }
#endif
/* libata-scsi.c */
@@ -137,7 +152,7 @@ extern struct ata_device *ata_scsi_find_dev(struct ata_port *ap,
extern int ata_scsi_add_hosts(struct ata_host *host,
const struct scsi_host_template *sht);
extern void ata_scsi_scan_host(struct ata_port *ap, int sync);
-extern int ata_scsi_offline_dev(struct ata_device *dev);
+extern bool ata_scsi_offline_dev(struct ata_device *dev);
extern bool ata_scsi_sense_is_valid(u8 sk, u8 asc, u8 ascq);
extern void ata_scsi_set_sense(struct ata_device *dev,
struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq);
@@ -169,12 +184,9 @@ extern void ata_eh_autopsy(struct ata_port *ap);
const char *ata_get_cmd_name(u8 command);
extern void ata_eh_report(struct ata_port *ap);
extern int ata_eh_reset(struct ata_link *link, int classify,
- ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
- ata_reset_fn_t hardreset, ata_postreset_fn_t postreset);
-extern int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev);
-extern int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
- ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
- ata_postreset_fn_t postreset,
+ struct ata_reset_operations *reset_ops);
+extern int ata_eh_recover(struct ata_port *ap,
+ struct ata_reset_operations *reset_ops,
struct ata_link **r_failed_disk);
extern void ata_eh_finish(struct ata_port *ap);
extern int ata_ering_map(struct ata_ering *ering,
diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c
index ab38871b5e00..23fff10af2ac 100644
--- a/drivers/ata/pata_acpi.c
+++ b/drivers/ata/pata_acpi.c
@@ -216,7 +216,7 @@ static struct ata_port_operations pacpi_ops = {
.mode_filter = pacpi_mode_filter,
.set_piomode = pacpi_set_piomode,
.set_dmamode = pacpi_set_dmamode,
- .prereset = pacpi_pre_reset,
+ .reset.prereset = pacpi_pre_reset,
.port_start = pacpi_port_start,
};
diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
index bb790edd6036..9d5cb9c34c52 100644
--- a/drivers/ata/pata_ali.c
+++ b/drivers/ata/pata_ali.c
@@ -392,11 +392,11 @@ static struct ata_port_operations ali_20_port_ops = {
* Port operations for DMA capable ALi with cable detect
*/
static struct ata_port_operations ali_c2_port_ops = {
- .inherits = &ali_dma_base_ops,
- .check_atapi_dma = ali_check_atapi_dma,
- .cable_detect = ali_c2_cable_detect,
- .dev_config = ali_lock_sectors,
- .postreset = ali_c2_c3_postreset,
+ .inherits = &ali_dma_base_ops,
+ .check_atapi_dma = ali_check_atapi_dma,
+ .cable_detect = ali_c2_cable_detect,
+ .dev_config = ali_lock_sectors,
+ .reset.postreset = ali_c2_c3_postreset,
};
/*
diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
index 5b02b89748b7..a2fecadc927d 100644
--- a/drivers/ata/pata_amd.c
+++ b/drivers/ata/pata_amd.c
@@ -394,7 +394,7 @@ static const struct scsi_host_template amd_sht = {
static const struct ata_port_operations amd_base_port_ops = {
.inherits = &ata_bmdma32_port_ops,
- .prereset = amd_pre_reset,
+ .reset.prereset = amd_pre_reset,
};
static struct ata_port_operations amd33_port_ops = {
@@ -429,7 +429,7 @@ static const struct ata_port_operations nv_base_port_ops = {
.inherits = &ata_bmdma_port_ops,
.cable_detect = ata_cable_ignore,
.mode_filter = nv_mode_filter,
- .prereset = nv_pre_reset,
+ .reset.prereset = nv_pre_reset,
.host_stop = nv_host_stop,
};
diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c
index 40544282f455..6160414172a3 100644
--- a/drivers/ata/pata_artop.c
+++ b/drivers/ata/pata_artop.c
@@ -301,7 +301,7 @@ static struct ata_port_operations artop6210_ops = {
.cable_detect = ata_cable_40wire,
.set_piomode = artop6210_set_piomode,
.set_dmamode = artop6210_set_dmamode,
- .prereset = artop62x0_pre_reset,
+ .reset.prereset = artop62x0_pre_reset,
.qc_defer = artop6210_qc_defer,
};
@@ -310,7 +310,7 @@ static struct ata_port_operations artop6260_ops = {
.cable_detect = artop6260_cable_detect,
.set_piomode = artop6260_set_piomode,
.set_dmamode = artop6260_set_dmamode,
- .prereset = artop62x0_pre_reset,
+ .reset.prereset = artop62x0_pre_reset,
};
static void atp8xx_fixup(struct pci_dev *pdev)
diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
index 8c5cc803aab3..4c612f9543f6 100644
--- a/drivers/ata/pata_atiixp.c
+++ b/drivers/ata/pata_atiixp.c
@@ -264,7 +264,7 @@ static struct ata_port_operations atiixp_port_ops = {
.bmdma_start = atiixp_bmdma_start,
.bmdma_stop = atiixp_bmdma_stop,
- .prereset = atiixp_prereset,
+ .reset.prereset = atiixp_prereset,
.cable_detect = atiixp_cable_detect,
.set_piomode = atiixp_set_piomode,
.set_dmamode = atiixp_set_dmamode,
diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c
index b811efd2cc34..73e81e160c91 100644
--- a/drivers/ata/pata_cs5536.c
+++ b/drivers/ata/pata_cs5536.c
@@ -27,7 +27,7 @@
#include <scsi/scsi_host.h>
#include <linux/dmi.h>
-#ifdef CONFIG_X86_32
+#if defined(CONFIG_X86) && defined(CONFIG_X86_32)
#include <asm/msr.h>
static int use_msr;
module_param_named(msr, use_msr, int, 0644);
diff --git a/drivers/ata/pata_efar.c b/drivers/ata/pata_efar.c
index 2e6eccf2902f..6fe49b303fee 100644
--- a/drivers/ata/pata_efar.c
+++ b/drivers/ata/pata_efar.c
@@ -243,7 +243,7 @@ static struct ata_port_operations efar_ops = {
.cable_detect = efar_cable_detect,
.set_piomode = efar_set_piomode,
.set_dmamode = efar_set_dmamode,
- .prereset = efar_pre_reset,
+ .reset.prereset = efar_pre_reset,
};
diff --git a/drivers/ata/pata_ep93xx.c b/drivers/ata/pata_ep93xx.c
index e8cda988feb5..b2b9e0058333 100644
--- a/drivers/ata/pata_ep93xx.c
+++ b/drivers/ata/pata_ep93xx.c
@@ -879,8 +879,8 @@ static const struct scsi_host_template ep93xx_pata_sht = {
static struct ata_port_operations ep93xx_pata_port_ops = {
.inherits = &ata_bmdma_port_ops,
- .softreset = ep93xx_pata_softreset,
- .hardreset = ATA_OP_NULL,
+ .reset.softreset = ep93xx_pata_softreset,
+ .reset.hardreset = ATA_OP_NULL,
.sff_dev_select = ep93xx_pata_dev_select,
.sff_set_devctl = ep93xx_pata_set_devctl,
diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
index 5280e9960025..b96e8bd2a3f8 100644
--- a/drivers/ata/pata_hpt366.c
+++ b/drivers/ata/pata_hpt366.c
@@ -322,7 +322,7 @@ static const struct scsi_host_template hpt36x_sht = {
static struct ata_port_operations hpt366_port_ops = {
.inherits = &ata_bmdma_port_ops,
- .prereset = hpt366_prereset,
+ .reset.prereset = hpt366_prereset,
.cable_detect = hpt36x_cable_detect,
.mode_filter = hpt366_filter,
.set_piomode = hpt366_set_piomode,
diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
index 4af22b819416..07e3a984cbb1 100644
--- a/drivers/ata/pata_hpt37x.c
+++ b/drivers/ata/pata_hpt37x.c
@@ -543,7 +543,7 @@ static struct ata_port_operations hpt370_port_ops = {
.cable_detect = hpt37x_cable_detect,
.set_piomode = hpt37x_set_piomode,
.set_dmamode = hpt37x_set_dmamode,
- .prereset = hpt37x_pre_reset,
+ .reset.prereset = hpt37x_pre_reset,
};
/*
@@ -567,7 +567,7 @@ static struct ata_port_operations hpt302_port_ops = {
.cable_detect = hpt37x_cable_detect,
.set_piomode = hpt37x_set_piomode,
.set_dmamode = hpt37x_set_dmamode,
- .prereset = hpt37x_pre_reset,
+ .reset.prereset = hpt37x_pre_reset,
};
/*
diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c
index 5b1ecccf3c83..2cc57fcf2c46 100644
--- a/drivers/ata/pata_hpt3x2n.c
+++ b/drivers/ata/pata_hpt3x2n.c
@@ -356,7 +356,7 @@ static struct ata_port_operations hpt3xxn_port_ops = {
.cable_detect = hpt3x2n_cable_detect,
.set_piomode = hpt3x2n_set_piomode,
.set_dmamode = hpt3x2n_set_dmamode,
- .prereset = hpt3x2n_pre_reset,
+ .reset.prereset = hpt3x2n_pre_reset,
};
/*
diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c
index 61d8760f09d9..70f056e47e6b 100644
--- a/drivers/ata/pata_icside.c
+++ b/drivers/ata/pata_icside.c
@@ -336,7 +336,7 @@ static struct ata_port_operations pata_icside_port_ops = {
.cable_detect = ata_cable_40wire,
.set_dmamode = pata_icside_set_dmamode,
- .postreset = pata_icside_postreset,
+ .reset.postreset = pata_icside_postreset,
.port_start = ATA_OP_NULL, /* don't need PRD table */
};
diff --git a/drivers/ata/pata_it8213.c b/drivers/ata/pata_it8213.c
index 9cbe2132ce59..a6f2cfc1602e 100644
--- a/drivers/ata/pata_it8213.c
+++ b/drivers/ata/pata_it8213.c
@@ -238,7 +238,7 @@ static struct ata_port_operations it8213_ops = {
.cable_detect = it8213_cable_detect,
.set_piomode = it8213_set_piomode,
.set_dmamode = it8213_set_dmamode,
- .prereset = it8213_pre_reset,
+ .reset.prereset = it8213_pre_reset,
};
diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
index 042f6ad1f7c6..fc762dcc61bf 100644
--- a/drivers/ata/pata_it821x.c
+++ b/drivers/ata/pata_it821x.c
@@ -75,6 +75,7 @@
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/slab.h>
+#include <linux/string.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
@@ -632,9 +633,9 @@ static void it821x_display_disk(struct ata_port *ap, int n, u8 *buf)
cbl = "";
if (mode)
- snprintf(mbuf, 8, "%5s%d", mtype, mode - 1);
+ snprintf(mbuf, sizeof(mbuf), "%5s%d", mtype, mode - 1);
else
- strcpy(mbuf, "PIO");
+ strscpy(mbuf, "PIO");
if (buf[52] == 4)
ata_port_info(ap, "%d: %-6s %-8s %s %s\n",
n, mbuf, types[buf[52]], id, cbl);
diff --git a/drivers/ata/pata_jmicron.c b/drivers/ata/pata_jmicron.c
index f51fb8219762..b885f33e8980 100644
--- a/drivers/ata/pata_jmicron.c
+++ b/drivers/ata/pata_jmicron.c
@@ -113,7 +113,7 @@ static const struct scsi_host_template jmicron_sht = {
static struct ata_port_operations jmicron_ops = {
.inherits = &ata_bmdma_port_ops,
- .prereset = jmicron_pre_reset,
+ .reset.prereset = jmicron_pre_reset,
};
diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c
index fbf5f07ea357..9eefdc5df5df 100644
--- a/drivers/ata/pata_macio.c
+++ b/drivers/ata/pata_macio.c
@@ -758,7 +758,7 @@ static void pata_macio_irq_clear(struct ata_port *ap)
static void pata_macio_reset_hw(struct pata_macio_priv *priv, int resume)
{
- dev_dbg(priv->dev, "Enabling & resetting... \n");
+ dev_dbg(priv->dev, "Enabling & resetting...\n");
if (priv->mediabay)
return;
@@ -1298,7 +1298,7 @@ static int pata_macio_pci_attach(struct pci_dev *pdev,
priv->dev = &pdev->dev;
/* Get MMIO regions */
- if (pci_request_regions(pdev, "pata-macio")) {
+ if (pcim_request_all_regions(pdev, "pata-macio")) {
dev_err(&pdev->dev,
"Cannot obtain PCI resources\n");
return -EBUSY;
diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
index 8119caaad605..deab67328388 100644
--- a/drivers/ata/pata_marvell.c
+++ b/drivers/ata/pata_marvell.c
@@ -99,7 +99,7 @@ static const struct scsi_host_template marvell_sht = {
static struct ata_port_operations marvell_ops = {
.inherits = &ata_bmdma_port_ops,
.cable_detect = marvell_cable_detect,
- .prereset = marvell_pre_reset,
+ .reset.prereset = marvell_pre_reset,
};
diff --git a/drivers/ata/pata_mpiix.c b/drivers/ata/pata_mpiix.c
index 69e4baf27d72..ce310ae7c93a 100644
--- a/drivers/ata/pata_mpiix.c
+++ b/drivers/ata/pata_mpiix.c
@@ -145,7 +145,7 @@ static struct ata_port_operations mpiix_port_ops = {
.qc_issue = mpiix_qc_issue,
.cable_detect = ata_cable_40wire,
.set_piomode = mpiix_set_piomode,
- .prereset = mpiix_pre_reset,
+ .reset.prereset = mpiix_pre_reset,
.sff_data_xfer = ata_sff_data_xfer32,
};
diff --git a/drivers/ata/pata_ns87410.c b/drivers/ata/pata_ns87410.c
index 44cc24d21d5f..bdb55c1a3280 100644
--- a/drivers/ata/pata_ns87410.c
+++ b/drivers/ata/pata_ns87410.c
@@ -123,7 +123,7 @@ static struct ata_port_operations ns87410_port_ops = {
.qc_issue = ns87410_qc_issue,
.cable_detect = ata_cable_40wire,
.set_piomode = ns87410_set_piomode,
- .prereset = ns87410_pre_reset,
+ .reset.prereset = ns87410_pre_reset,
};
static int ns87410_init_one(struct pci_dev *dev, const struct pci_device_id *id)
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index 2d32125c16fd..df42ebe98db7 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -941,7 +941,7 @@ static int octeon_cf_probe(struct platform_device *pdev)
/* 16 bit but not True IDE */
base = cs0 + 0x800;
octeon_cf_ops.sff_data_xfer = octeon_cf_data_xfer16;
- octeon_cf_ops.softreset = octeon_cf_softreset16;
+ octeon_cf_ops.reset.softreset = octeon_cf_softreset16;
octeon_cf_ops.sff_check_status = octeon_cf_check_status16;
octeon_cf_ops.sff_tf_read = octeon_cf_tf_read16;
octeon_cf_ops.sff_tf_load = octeon_cf_tf_load16;
diff --git a/drivers/ata/pata_oldpiix.c b/drivers/ata/pata_oldpiix.c
index 3d01b7000e41..81a7f3eb5654 100644
--- a/drivers/ata/pata_oldpiix.c
+++ b/drivers/ata/pata_oldpiix.c
@@ -214,7 +214,7 @@ static struct ata_port_operations oldpiix_pata_ops = {
.cable_detect = ata_cable_40wire,
.set_piomode = oldpiix_set_piomode,
.set_dmamode = oldpiix_set_dmamode,
- .prereset = oldpiix_pre_reset,
+ .reset.prereset = oldpiix_pre_reset,
};
diff --git a/drivers/ata/pata_opti.c b/drivers/ata/pata_opti.c
index 3d23f57eb128..3db1b95d1404 100644
--- a/drivers/ata/pata_opti.c
+++ b/drivers/ata/pata_opti.c
@@ -156,7 +156,7 @@ static struct ata_port_operations opti_port_ops = {
.inherits = &ata_sff_port_ops,
.cable_detect = ata_cable_40wire,
.set_piomode = opti_set_piomode,
- .prereset = opti_pre_reset,
+ .reset.prereset = opti_pre_reset,
};
static int opti_init_one(struct pci_dev *dev, const struct pci_device_id *id)
diff --git a/drivers/ata/pata_optidma.c b/drivers/ata/pata_optidma.c
index dfc36b4ec9c6..b42dba5f4e05 100644
--- a/drivers/ata/pata_optidma.c
+++ b/drivers/ata/pata_optidma.c
@@ -322,7 +322,9 @@ static int optidma_set_mode(struct ata_link *link, struct ata_device **r_failed)
u8 r;
int nybble = 4 * ap->port_no;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
- int rc = ata_do_set_mode(link, r_failed);
+ int rc;
+
+ rc = ata_set_mode(link, r_failed);
if (rc == 0) {
pci_read_config_byte(pdev, 0x43, &r);
@@ -344,7 +346,7 @@ static struct ata_port_operations optidma_port_ops = {
.set_piomode = optidma_set_pio_mode,
.set_dmamode = optidma_set_dma_mode,
.set_mode = optidma_set_mode,
- .prereset = optidma_pre_reset,
+ .reset.prereset = optidma_pre_reset,
};
static struct ata_port_operations optiplus_port_ops = {
diff --git a/drivers/ata/pata_parport/pata_parport.c b/drivers/ata/pata_parport/pata_parport.c
index 93ebf566b54e..22bd3ff6b7ae 100644
--- a/drivers/ata/pata_parport/pata_parport.c
+++ b/drivers/ata/pata_parport/pata_parport.c
@@ -321,8 +321,8 @@ static void pata_parport_drain_fifo(struct ata_queued_cmd *qc)
static struct ata_port_operations pata_parport_port_ops = {
.inherits = &ata_sff_port_ops,
- .softreset = pata_parport_softreset,
- .hardreset = NULL,
+ .reset.softreset = pata_parport_softreset,
+ .reset.hardreset = NULL,
.sff_dev_select = pata_parport_dev_select,
.sff_set_devctl = pata_parport_set_devctl,
diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
index 5b602206c522..caefcd8c4b3c 100644
--- a/drivers/ata/pata_pcmcia.c
+++ b/drivers/ata/pata_pcmcia.c
@@ -46,7 +46,7 @@ static int pcmcia_set_mode(struct ata_link *link, struct ata_device **r_failed_d
struct ata_device *slave = &link->device[1];
if (!ata_dev_enabled(master) || !ata_dev_enabled(slave))
- return ata_do_set_mode(link, r_failed_dev);
+ return ata_set_mode(link, r_failed_dev);
if (memcmp(master->id + ATA_ID_FW_REV, slave->id + ATA_ID_FW_REV,
ATA_ID_FW_REV_LEN + ATA_ID_PROD_LEN) == 0) {
@@ -58,7 +58,7 @@ static int pcmcia_set_mode(struct ata_link *link, struct ata_device **r_failed_d
ata_dev_disable(slave);
}
}
- return ata_do_set_mode(link, r_failed_dev);
+ return ata_set_mode(link, r_failed_dev);
}
/**
@@ -344,6 +344,7 @@ static const struct pcmcia_device_id pcmcia_devices[] = {
PCMCIA_DEVICE_PROD_ID2("NinjaATA-", 0xebe0bd79),
PCMCIA_DEVICE_PROD_ID12("PCMCIA", "CD-ROM", 0x281f1c5d, 0x66536591),
PCMCIA_DEVICE_PROD_ID12("PCMCIA", "PnPIDE", 0x281f1c5d, 0x0c694728),
+ PCMCIA_DEVICE_PROD_ID2("PCMCIA ATA/ATAPI Adapter", 0x888d7b73),
PCMCIA_DEVICE_PROD_ID12("SHUTTLE TECHNOLOGY LTD.", "PCCARD-IDE/ATAPI Adapter", 0x4a3f0ba0, 0x322560e1),
PCMCIA_DEVICE_PROD_ID12("SEAGATE", "ST1", 0x87c1b330, 0xe1f30883),
PCMCIA_DEVICE_PROD_ID12("SAMSUNG", "04/05/06", 0x43d74cb4, 0x6a22777d),
diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
index 6820c5597b14..ae914dcb0c83 100644
--- a/drivers/ata/pata_pdc2027x.c
+++ b/drivers/ata/pata_pdc2027x.c
@@ -130,7 +130,7 @@ static struct ata_port_operations pdc2027x_pata100_ops = {
.inherits = &ata_bmdma_port_ops,
.check_atapi_dma = pdc2027x_check_atapi_dma,
.cable_detect = pdc2027x_cable_detect,
- .prereset = pdc2027x_prereset,
+ .reset.prereset = pdc2027x_prereset,
};
static struct ata_port_operations pdc2027x_pata133_ops = {
@@ -295,7 +295,7 @@ static void pdc2027x_set_piomode(struct ata_port *ap, struct ata_device *adev)
}
/* Set the PIO timing registers using value table for 133MHz */
- ata_port_dbg(ap, "Set pio regs... \n");
+ ata_port_dbg(ap, "Set PIO regs...\n");
ctcr0 = ioread32(dev_mmio(ap, adev, PDC_CTCR0));
ctcr0 &= 0xffff0000;
@@ -308,7 +308,7 @@ static void pdc2027x_set_piomode(struct ata_port *ap, struct ata_device *adev)
ctcr1 |= (pdc2027x_pio_timing_tbl[pio].value2 << 24);
iowrite32(ctcr1, dev_mmio(ap, adev, PDC_CTCR1));
- ata_port_dbg(ap, "Set to pio mode[%u] \n", pio);
+ ata_port_dbg(ap, "Set to PIO mode[%u]\n", pio);
}
/**
@@ -341,7 +341,7 @@ static void pdc2027x_set_dmamode(struct ata_port *ap, struct ata_device *adev)
iowrite32(ctcr1 & ~(1 << 7), dev_mmio(ap, adev, PDC_CTCR1));
}
- ata_port_dbg(ap, "Set udma regs... \n");
+ ata_port_dbg(ap, "Set UDMA regs...\n");
ctcr1 = ioread32(dev_mmio(ap, adev, PDC_CTCR1));
ctcr1 &= 0xff000000;
@@ -350,14 +350,14 @@ static void pdc2027x_set_dmamode(struct ata_port *ap, struct ata_device *adev)
(pdc2027x_udma_timing_tbl[udma_mode].value2 << 16);
iowrite32(ctcr1, dev_mmio(ap, adev, PDC_CTCR1));
- ata_port_dbg(ap, "Set to udma mode[%u] \n", udma_mode);
+ ata_port_dbg(ap, "Set to UDMA mode[%u]\n", udma_mode);
} else if ((dma_mode >= XFER_MW_DMA_0) &&
(dma_mode <= XFER_MW_DMA_2)) {
/* Set the MDMA timing registers with value table for 133MHz */
unsigned int mdma_mode = dma_mode & 0x07;
- ata_port_dbg(ap, "Set mdma regs... \n");
+ ata_port_dbg(ap, "Set MDMA regs...\n");
ctcr0 = ioread32(dev_mmio(ap, adev, PDC_CTCR0));
ctcr0 &= 0x0000ffff;
@@ -366,7 +366,7 @@ static void pdc2027x_set_dmamode(struct ata_port *ap, struct ata_device *adev)
iowrite32(ctcr0, dev_mmio(ap, adev, PDC_CTCR0));
- ata_port_dbg(ap, "Set to mdma mode[%u] \n", mdma_mode);
+ ata_port_dbg(ap, "Set to MDMA mode[%u]\n", mdma_mode);
} else {
ata_port_err(ap, "Unknown dma mode [%u] ignored\n", dma_mode);
}
@@ -387,7 +387,7 @@ static int pdc2027x_set_mode(struct ata_link *link, struct ata_device **r_failed
struct ata_device *dev;
int rc;
- rc = ata_do_set_mode(link, r_failed);
+ rc = ata_set_mode(link, r_failed);
if (rc < 0)
return rc;
diff --git a/drivers/ata/pata_rdc.c b/drivers/ata/pata_rdc.c
index 09792aac7f9d..6ff4c11e937d 100644
--- a/drivers/ata/pata_rdc.c
+++ b/drivers/ata/pata_rdc.c
@@ -276,7 +276,7 @@ static struct ata_port_operations rdc_pata_ops = {
.cable_detect = rdc_pata_cable_detect,
.set_piomode = rdc_set_piomode,
.set_dmamode = rdc_set_dmamode,
- .prereset = rdc_pata_prereset,
+ .reset.prereset = rdc_pata_prereset,
};
static const struct ata_port_info rdc_port_info = {
@@ -359,8 +359,8 @@ static void rdc_remove_one(struct pci_dev *pdev)
}
static const struct pci_device_id rdc_pci_tbl[] = {
- { PCI_DEVICE(0x17F3, 0x1011), },
- { PCI_DEVICE(0x17F3, 0x1012), },
+ { PCI_VDEVICE(RDC, 0x1011) },
+ { PCI_VDEVICE(RDC, 0x1012) },
{ } /* terminate list */
};
diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c
index 31de06b66221..2b751e393771 100644
--- a/drivers/ata/pata_sis.c
+++ b/drivers/ata/pata_sis.c
@@ -552,7 +552,7 @@ static struct ata_port_operations sis_133_for_sata_ops = {
static struct ata_port_operations sis_base_ops = {
.inherits = &ata_bmdma_port_ops,
- .prereset = sis_pre_reset,
+ .reset.prereset = sis_pre_reset,
};
static struct ata_port_operations sis_133_ops = {
diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c
index 93882e976ede..2d24c6b3e9d9 100644
--- a/drivers/ata/pata_sl82c105.c
+++ b/drivers/ata/pata_sl82c105.c
@@ -248,7 +248,7 @@ static struct ata_port_operations sl82c105_port_ops = {
.bmdma_stop = sl82c105_bmdma_stop,
.cable_detect = ata_cable_40wire,
.set_piomode = sl82c105_set_piomode,
- .prereset = sl82c105_pre_reset,
+ .reset.prereset = sl82c105_pre_reset,
.sff_irq_check = sl82c105_sff_irq_check,
};
diff --git a/drivers/ata/pata_triflex.c b/drivers/ata/pata_triflex.c
index 26d448a869e2..596e86a031b3 100644
--- a/drivers/ata/pata_triflex.c
+++ b/drivers/ata/pata_triflex.c
@@ -170,7 +170,7 @@ static struct ata_port_operations triflex_port_ops = {
.bmdma_stop = triflex_bmdma_stop,
.cable_detect = ata_cable_40wire,
.set_piomode = triflex_set_piomode,
- .prereset = triflex_prereset,
+ .reset.prereset = triflex_prereset,
};
static int triflex_init_one(struct pci_dev *dev, const struct pci_device_id *id)
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
index 696b99720dcb..a8c9cf685b4b 100644
--- a/drivers/ata/pata_via.c
+++ b/drivers/ata/pata_via.c
@@ -201,11 +201,9 @@ static int via_cable_detect(struct ata_port *ap) {
two drives */
if (ata66 & (0x10100000 >> (16 * ap->port_no)))
return ATA_CBL_PATA80;
+
/* Check with ACPI so we can spot BIOS reported SATA bridges */
- if (ata_acpi_init_gtm(ap) &&
- ata_acpi_cbl_80wire(ap, ata_acpi_init_gtm(ap)))
- return ATA_CBL_PATA80;
- return ATA_CBL_PATA40;
+ return ata_acpi_cbl_pata_type(ap);
}
static int via_pre_reset(struct ata_link *link, unsigned long deadline)
@@ -368,7 +366,8 @@ static unsigned int via_mode_filter(struct ata_device *dev, unsigned int mask)
}
if (dev->class == ATA_DEV_ATAPI &&
- dmi_check_system(no_atapi_dma_dmi_table)) {
+ (dmi_check_system(no_atapi_dma_dmi_table) ||
+ config->id == PCI_DEVICE_ID_VIA_6415)) {
ata_dev_warn(dev, "controller locks up on ATAPI DMA, forcing PIO\n");
mask &= ATA_MASK_PIO;
}
@@ -452,7 +451,7 @@ static struct ata_port_operations via_port_ops = {
.cable_detect = via_cable_detect,
.set_piomode = via_set_piomode,
.set_dmamode = via_set_dmamode,
- .prereset = via_pre_reset,
+ .reset.prereset = via_pre_reset,
.sff_tf_load = via_tf_load,
.port_start = via_port_start,
.mode_filter = via_mode_filter,
diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c
index 8e6b2599f0d5..17a5a59861c3 100644
--- a/drivers/ata/pdc_adma.c
+++ b/drivers/ata/pdc_adma.c
@@ -140,7 +140,7 @@ static struct ata_port_operations adma_ata_ops = {
.freeze = adma_freeze,
.thaw = adma_thaw,
- .prereset = adma_prereset,
+ .reset.prereset = adma_prereset,
.port_start = adma_port_start,
.port_stop = adma_port_stop,
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
index 6e1dd0d9c035..7a4f59202156 100644
--- a/drivers/ata/sata_dwc_460ex.c
+++ b/drivers/ata/sata_dwc_460ex.c
@@ -1097,7 +1097,7 @@ static struct ata_port_operations sata_dwc_ops = {
.inherits = &ata_sff_port_ops,
.error_handler = sata_dwc_error_handler,
- .hardreset = sata_dwc_hardreset,
+ .reset.hardreset = sata_dwc_hardreset,
.qc_issue = sata_dwc_qc_issue,
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index 87e91a937a44..84da8d6ef28e 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -1395,9 +1395,9 @@ static struct ata_port_operations sata_fsl_ops = {
.freeze = sata_fsl_freeze,
.thaw = sata_fsl_thaw,
- .softreset = sata_fsl_softreset,
- .hardreset = sata_fsl_hardreset,
- .pmp_softreset = sata_fsl_softreset,
+ .reset.softreset = sata_fsl_softreset,
+ .reset.hardreset = sata_fsl_hardreset,
+ .pmp_reset.softreset = sata_fsl_softreset,
.error_handler = sata_fsl_error_handler,
.post_internal_cmd = sata_fsl_post_internal_cmd,
diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c
index c8c817c51230..3421039f4bae 100644
--- a/drivers/ata/sata_highbank.c
+++ b/drivers/ata/sata_highbank.c
@@ -428,7 +428,7 @@ static int ahci_highbank_hardreset(struct ata_link *link, unsigned int *class,
static struct ata_port_operations ahci_highbank_ops = {
.inherits = &ahci_ops,
- .hardreset = ahci_highbank_hardreset,
+ .reset.hardreset = ahci_highbank_hardreset,
.transmit_led_message = ecx_transmit_led_message,
};
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
index db9c255dc9f2..46a8c20daf18 100644
--- a/drivers/ata/sata_inic162x.c
+++ b/drivers/ata/sata_inic162x.c
@@ -730,7 +730,7 @@ static struct ata_port_operations inic_port_ops = {
.freeze = inic_freeze,
.thaw = inic_thaw,
- .hardreset = inic_hardreset,
+ .reset.hardreset = inic_hardreset,
.error_handler = inic_error_handler,
.post_internal_cmd = inic_post_internal_cmd,
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index bcbf96867f89..ffb396f61731 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -687,7 +687,7 @@ static struct ata_port_operations mv5_ops = {
.freeze = mv_eh_freeze,
.thaw = mv_eh_thaw,
- .hardreset = mv_hardreset,
+ .reset.hardreset = mv_hardreset,
.scr_read = mv5_scr_read,
.scr_write = mv5_scr_write,
@@ -709,10 +709,10 @@ static struct ata_port_operations mv6_ops = {
.freeze = mv_eh_freeze,
.thaw = mv_eh_thaw,
- .hardreset = mv_hardreset,
- .softreset = mv_softreset,
- .pmp_hardreset = mv_pmp_hardreset,
- .pmp_softreset = mv_softreset,
+ .reset.hardreset = mv_hardreset,
+ .reset.softreset = mv_softreset,
+ .pmp_reset.hardreset = mv_pmp_hardreset,
+ .pmp_reset.softreset = mv_softreset,
.error_handler = mv_pmp_error_handler,
.scr_read = mv_scr_read,
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index f36e2915ccf1..841e7de2bba6 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -462,7 +462,7 @@ static struct ata_port_operations nv_generic_ops = {
.lost_interrupt = ATA_OP_NULL,
.scr_read = nv_scr_read,
.scr_write = nv_scr_write,
- .hardreset = nv_hardreset,
+ .reset.hardreset = nv_hardreset,
};
static struct ata_port_operations nv_nf2_ops = {
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
index 2df1a070b25a..2a005aede123 100644
--- a/drivers/ata/sata_promise.c
+++ b/drivers/ata/sata_promise.c
@@ -188,7 +188,7 @@ static struct ata_port_operations pdc_sata_ops = {
.scr_read = pdc_sata_scr_read,
.scr_write = pdc_sata_scr_write,
.port_start = pdc_sata_port_start,
- .hardreset = pdc_sata_hardreset,
+ .reset.hardreset = pdc_sata_hardreset,
};
/* First-generation chips need a more restrictive ->check_atapi_dma op,
@@ -206,7 +206,7 @@ static struct ata_port_operations pdc_pata_ops = {
.freeze = pdc_freeze,
.thaw = pdc_thaw,
.port_start = pdc_common_port_start,
- .softreset = pdc_pata_softreset,
+ .reset.softreset = pdc_pata_softreset,
};
static const struct ata_port_info pdc_port_info[] = {
diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
index 8a6286159044..cfb9b5b61cd7 100644
--- a/drivers/ata/sata_qstor.c
+++ b/drivers/ata/sata_qstor.c
@@ -123,8 +123,8 @@ static struct ata_port_operations qs_ata_ops = {
.freeze = qs_freeze,
.thaw = qs_thaw,
- .prereset = qs_prereset,
- .softreset = ATA_OP_NULL,
+ .reset.prereset = qs_prereset,
+ .reset.softreset = ATA_OP_NULL,
.error_handler = qs_error_handler,
.lost_interrupt = ATA_OP_NULL,
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
index 22820a02d740..487eadd4073f 100644
--- a/drivers/ata/sata_rcar.c
+++ b/drivers/ata/sata_rcar.c
@@ -624,7 +624,7 @@ static struct ata_port_operations sata_rcar_port_ops = {
.freeze = sata_rcar_freeze,
.thaw = sata_rcar_thaw,
- .softreset = sata_rcar_softreset,
+ .reset.softreset = sata_rcar_softreset,
.scr_read = sata_rcar_scr_read,
.scr_write = sata_rcar_scr_write,
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index 3a99f66198a9..1b6dc950a42a 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -351,7 +351,7 @@ static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed)
u32 tmp, dev_mode[2] = { };
int rc;
- rc = ata_do_set_mode(link, r_failed);
+ rc = ata_set_mode(link, r_failed);
if (rc)
return rc;
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index 87f4cde6a686..d642ece9f07a 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -393,10 +393,10 @@ static struct ata_port_operations sil24_ops = {
.freeze = sil24_freeze,
.thaw = sil24_thaw,
- .softreset = sil24_softreset,
- .hardreset = sil24_hardreset,
- .pmp_softreset = sil24_softreset,
- .pmp_hardreset = sil24_pmp_hardreset,
+ .reset.softreset = sil24_softreset,
+ .reset.hardreset = sil24_hardreset,
+ .pmp_reset.softreset = sil24_softreset,
+ .pmp_reset.hardreset = sil24_pmp_hardreset,
.error_handler = sil24_error_handler,
.post_internal_cmd = sil24_post_internal_cmd,
.dev_config = sil24_dev_config,
diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
index 598a872f6a08..c5d6aa36c9c3 100644
--- a/drivers/ata/sata_svw.c
+++ b/drivers/ata/sata_svw.c
@@ -340,8 +340,8 @@ static const struct scsi_host_template k2_sata_sht = {
static struct ata_port_operations k2_sata_ops = {
.inherits = &ata_bmdma_port_ops,
- .softreset = k2_sata_softreset,
- .hardreset = k2_sata_hardreset,
+ .reset.softreset = k2_sata_softreset,
+ .reset.hardreset = k2_sata_hardreset,
.sff_tf_load = k2_sata_tf_load,
.sff_tf_read = k2_sata_tf_read,
.sff_check_status = k2_stat_check_status,
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
index f7f5131af937..0986ebd1eb4e 100644
--- a/drivers/ata/sata_sx4.c
+++ b/drivers/ata/sata_sx4.c
@@ -241,7 +241,7 @@ static struct ata_port_operations pdc_20621_ops = {
.freeze = pdc_freeze,
.thaw = pdc_thaw,
- .softreset = pdc_softreset,
+ .reset.softreset = pdc_softreset,
.error_handler = pdc_error_handler,
.lost_interrupt = ATA_OP_NULL,
.post_internal_cmd = pdc_post_internal_cmd,
diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c
index 52894ff49dcb..44985796cc47 100644
--- a/drivers/ata/sata_uli.c
+++ b/drivers/ata/sata_uli.c
@@ -67,7 +67,7 @@ static struct ata_port_operations uli_ops = {
.inherits = &ata_bmdma_port_ops,
.scr_read = uli_scr_read,
.scr_write = uli_scr_write,
- .hardreset = ATA_OP_NULL,
+ .reset.hardreset = ATA_OP_NULL,
};
static const struct ata_port_info uli_port_info = {
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
index 4ecd8f33b082..68e9003ec2d4 100644
--- a/drivers/ata/sata_via.c
+++ b/drivers/ata/sata_via.c
@@ -120,7 +120,7 @@ static struct ata_port_operations svia_base_ops = {
static struct ata_port_operations vt6420_sata_ops = {
.inherits = &svia_base_ops,
.freeze = svia_noop_freeze,
- .prereset = vt6420_prereset,
+ .reset.prereset = vt6420_prereset,
.bmdma_start = vt6420_bmdma_start,
};
@@ -140,7 +140,7 @@ static struct ata_port_operations vt6421_sata_ops = {
static struct ata_port_operations vt8251_ops = {
.inherits = &svia_base_ops,
- .hardreset = sata_std_hardreset,
+ .reset.hardreset = sata_std_hardreset,
.scr_read = vt8251_scr_read,
.scr_write = vt8251_scr_write,
};
diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
index d4aa0f353b6c..fa3c76a2b49d 100644
--- a/drivers/atm/atmtcp.c
+++ b/drivers/atm/atmtcp.c
@@ -279,6 +279,19 @@ static struct atm_vcc *find_vcc(struct atm_dev *dev, short vpi, int vci)
return NULL;
}
+static int atmtcp_c_pre_send(struct atm_vcc *vcc, struct sk_buff *skb)
+{
+ struct atmtcp_hdr *hdr;
+
+ if (skb->len < sizeof(struct atmtcp_hdr))
+ return -EINVAL;
+
+ hdr = (struct atmtcp_hdr *)skb->data;
+ if (hdr->length == ATMTCP_HDR_MAGIC)
+ return -EINVAL;
+
+ return 0;
+}
static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
{
@@ -288,7 +301,6 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
struct sk_buff *new_skb;
int result = 0;
- if (!skb->len) return 0;
dev = vcc->dev_data;
hdr = (struct atmtcp_hdr *) skb->data;
if (hdr->length == ATMTCP_HDR_MAGIC) {
@@ -345,6 +357,7 @@ static const struct atmdev_ops atmtcp_v_dev_ops = {
static const struct atmdev_ops atmtcp_c_dev_ops = {
.close = atmtcp_c_close,
+ .pre_send = atmtcp_c_pre_send,
.send = atmtcp_c_send
};
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index 4fea1149e003..f62e38571440 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -1374,7 +1374,9 @@ fore200e_open(struct atm_vcc *vcc)
vcc->dev_data = NULL;
+ mutex_lock(&fore200e->rate_mtx);
fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
+ mutex_unlock(&fore200e->rate_mtx);
kfree(fore200e_vcc);
return -EINVAL;
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 1206ab764ba9..f2e91b7d79f0 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -852,6 +852,8 @@ queue_skb(struct idt77252_dev *card, struct vc_map *vc,
IDT77252_PRV_PADDR(skb) = dma_map_single(&card->pcidev->dev, skb->data,
skb->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&card->pcidev->dev, IDT77252_PRV_PADDR(skb)))
+ return -ENOMEM;
error = -EINVAL;
@@ -1857,6 +1859,8 @@ add_rx_skb(struct idt77252_dev *card, int queue,
paddr = dma_map_single(&card->pcidev->dev, skb->data,
skb_end_pointer(skb) - skb->data,
DMA_FROM_DEVICE);
+ if (dma_mapping_error(&card->pcidev->dev, paddr))
+ goto outpoolrm;
IDT77252_PRV_PADDR(skb) = paddr;
if (push_rx_skb(card, skb, queue)) {
@@ -1871,6 +1875,7 @@ outunmap:
dma_unmap_single(&card->pcidev->dev, IDT77252_PRV_PADDR(skb),
skb_end_pointer(skb) - skb->data, DMA_FROM_DEVICE);
+outpoolrm:
handle = IDT77252_PRV_POOL(skb);
card->sbpool[POOL_QUEUE(handle)].skb[POOL_INDEX(handle)] = NULL;
diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
index 2a1fe3080712..0dfa2cdc897c 100644
--- a/drivers/atm/lanai.c
+++ b/drivers/atm/lanai.c
@@ -755,7 +755,7 @@ static void lanai_shutdown_rx_vci(const struct lanai_vcc *lvcc)
/* Shutdown transmitting on card.
* Unfortunately the lanai needs us to wait until all the data
* drains out of the buffer before we can dealloc it, so this
- * can take awhile -- up to 370ms for a full 128KB buffer
+ * can take a while -- up to 370ms for a full 128KB buffer
* assuming everone else is quiet. In theory the time is
* boundless if there's a CBR VCC holding things up.
*/
diff --git a/drivers/auxdisplay/line-display.c b/drivers/auxdisplay/line-display.c
index 8590a4cd21e0..4e22373fcc1a 100644
--- a/drivers/auxdisplay/line-display.c
+++ b/drivers/auxdisplay/line-display.c
@@ -6,20 +6,23 @@
* Author: Paul Burton <paul.burton@mips.com>
*
* Copyright (C) 2021 Glider bv
+ * Copyright (C) 2025 Jean-François Lessard
*/
#ifndef CONFIG_PANEL_BOOT_MESSAGE
#include <generated/utsrelease.h>
#endif
-#include <linux/container_of.h>
+#include <linux/cleanup.h>
#include <linux/device.h>
#include <linux/export.h>
#include <linux/idr.h>
#include <linux/jiffies.h>
#include <linux/kstrtox.h>
+#include <linux/list.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/sysfs.h>
#include <linux/timer.h>
@@ -32,6 +35,87 @@
#define DEFAULT_SCROLL_RATE (HZ / 2)
/**
+ * struct linedisp_attachment - Holds the device to linedisp mapping
+ * @list: List entry for the linedisp_attachments list
+ * @device: Pointer to the device where linedisp attributes are added
+ * @linedisp: Pointer to the linedisp mapped to the device
+ * @direct: true for directly attached device using linedisp_attach(),
+ * false for child registered device using linedisp_register()
+ */
+struct linedisp_attachment {
+ struct list_head list;
+ struct device *device;
+ struct linedisp *linedisp;
+ bool direct;
+};
+
+static LIST_HEAD(linedisp_attachments);
+static DEFINE_SPINLOCK(linedisp_attachments_lock);
+
+static int create_attachment(struct device *dev, struct linedisp *linedisp, bool direct)
+{
+ struct linedisp_attachment *attachment;
+
+ attachment = kzalloc(sizeof(*attachment), GFP_KERNEL);
+ if (!attachment)
+ return -ENOMEM;
+
+ attachment->device = dev;
+ attachment->linedisp = linedisp;
+ attachment->direct = direct;
+
+ guard(spinlock)(&linedisp_attachments_lock);
+ list_add(&attachment->list, &linedisp_attachments);
+
+ return 0;
+}
+
+static struct linedisp *delete_attachment(struct device *dev, bool direct)
+{
+ struct linedisp_attachment *attachment;
+ struct linedisp *linedisp;
+
+ guard(spinlock)(&linedisp_attachments_lock);
+
+ list_for_each_entry(attachment, &linedisp_attachments, list) {
+ if (attachment->device == dev &&
+ attachment->direct == direct)
+ break;
+ }
+
+ if (list_entry_is_head(attachment, &linedisp_attachments, list))
+ return NULL;
+
+ linedisp = attachment->linedisp;
+ list_del(&attachment->list);
+ kfree(attachment);
+
+ return linedisp;
+}
+
+static struct linedisp *to_linedisp(struct device *dev)
+{
+ struct linedisp_attachment *attachment;
+
+ guard(spinlock)(&linedisp_attachments_lock);
+
+ list_for_each_entry(attachment, &linedisp_attachments, list) {
+ if (attachment->device == dev)
+ break;
+ }
+
+ if (list_entry_is_head(attachment, &linedisp_attachments, list))
+ return NULL;
+
+ return attachment->linedisp;
+}
+
+static inline bool should_scroll(struct linedisp *linedisp)
+{
+ return linedisp->message_len > linedisp->num_chars && linedisp->scroll_rate;
+}
+
+/**
* linedisp_scroll() - scroll the display by a character
* @t: really a pointer to the private data structure
*
@@ -62,8 +146,7 @@ static void linedisp_scroll(struct timer_list *t)
linedisp->scroll_pos %= linedisp->message_len;
/* rearm the timer */
- if (linedisp->message_len > num_chars && linedisp->scroll_rate)
- mod_timer(&linedisp->timer, jiffies + linedisp->scroll_rate);
+ mod_timer(&linedisp->timer, jiffies + linedisp->scroll_rate);
}
/**
@@ -113,8 +196,16 @@ static int linedisp_display(struct linedisp *linedisp, const char *msg,
linedisp->message_len = count;
linedisp->scroll_pos = 0;
- /* update the display */
- linedisp_scroll(&linedisp->timer);
+ if (should_scroll(linedisp)) {
+ /* display scrolling message */
+ linedisp_scroll(&linedisp->timer);
+ } else {
+ /* display static message */
+ memset(linedisp->buf, ' ', linedisp->num_chars);
+ memcpy(linedisp->buf, linedisp->message,
+ umin(linedisp->num_chars, linedisp->message_len));
+ linedisp->ops->update(linedisp);
+ }
return 0;
}
@@ -133,7 +224,7 @@ static int linedisp_display(struct linedisp *linedisp, const char *msg,
static ssize_t message_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct linedisp *linedisp = container_of(dev, struct linedisp, dev);
+ struct linedisp *linedisp = to_linedisp(dev);
return sysfs_emit(buf, "%s\n", linedisp->message);
}
@@ -152,7 +243,7 @@ static ssize_t message_show(struct device *dev, struct device_attribute *attr,
static ssize_t message_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct linedisp *linedisp = container_of(dev, struct linedisp, dev);
+ struct linedisp *linedisp = to_linedisp(dev);
int err;
err = linedisp_display(linedisp, buf, count);
@@ -161,10 +252,20 @@ static ssize_t message_store(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RW(message);
+static ssize_t num_chars_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct linedisp *linedisp = to_linedisp(dev);
+
+ return sysfs_emit(buf, "%u\n", linedisp->num_chars);
+}
+
+static DEVICE_ATTR_RO(num_chars);
+
static ssize_t scroll_step_ms_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct linedisp *linedisp = container_of(dev, struct linedisp, dev);
+ struct linedisp *linedisp = to_linedisp(dev);
return sysfs_emit(buf, "%u\n", jiffies_to_msecs(linedisp->scroll_rate));
}
@@ -173,7 +274,7 @@ static ssize_t scroll_step_ms_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct linedisp *linedisp = container_of(dev, struct linedisp, dev);
+ struct linedisp *linedisp = to_linedisp(dev);
unsigned int ms;
int err;
@@ -181,12 +282,12 @@ static ssize_t scroll_step_ms_store(struct device *dev,
if (err)
return err;
+ timer_delete_sync(&linedisp->timer);
+
linedisp->scroll_rate = msecs_to_jiffies(ms);
- if (linedisp->message && linedisp->message_len > linedisp->num_chars) {
- timer_delete_sync(&linedisp->timer);
- if (linedisp->scroll_rate)
- linedisp_scroll(&linedisp->timer);
- }
+
+ if (should_scroll(linedisp))
+ linedisp_scroll(&linedisp->timer);
return count;
}
@@ -195,7 +296,7 @@ static DEVICE_ATTR_RW(scroll_step_ms);
static ssize_t map_seg_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct linedisp *linedisp = container_of(dev, struct linedisp, dev);
+ struct linedisp *linedisp = to_linedisp(dev);
struct linedisp_map *map = linedisp->map;
memcpy(buf, &map->map, map->size);
@@ -205,7 +306,7 @@ static ssize_t map_seg_show(struct device *dev, struct device_attribute *attr, c
static ssize_t map_seg_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct linedisp *linedisp = container_of(dev, struct linedisp, dev);
+ struct linedisp *linedisp = to_linedisp(dev);
struct linedisp_map *map = linedisp->map;
if (count != map->size)
@@ -223,6 +324,7 @@ static DEVICE_ATTR(map_seg14, 0644, map_seg_show, map_seg_store);
static struct attribute *linedisp_attrs[] = {
&dev_attr_message.attr,
+ &dev_attr_num_chars.attr,
&dev_attr_scroll_step_ms.attr,
&dev_attr_map_seg7.attr,
&dev_attr_map_seg14.attr,
@@ -232,7 +334,7 @@ static struct attribute *linedisp_attrs[] = {
static umode_t linedisp_attr_is_visible(struct kobject *kobj, struct attribute *attr, int n)
{
struct device *dev = kobj_to_dev(kobj);
- struct linedisp *linedisp = container_of(dev, struct linedisp, dev);
+ struct linedisp *linedisp = to_linedisp(dev);
struct linedisp_map *map = linedisp->map;
umode_t mode = attr->mode;
@@ -263,7 +365,7 @@ static DEFINE_IDA(linedisp_id);
static void linedisp_release(struct device *dev)
{
- struct linedisp *linedisp = container_of(dev, struct linedisp, dev);
+ struct linedisp *linedisp = to_linedisp(dev);
kfree(linedisp->map);
kfree(linedisp->message);
@@ -321,12 +423,101 @@ static int linedisp_init_map(struct linedisp *linedisp)
#endif
/**
+ * linedisp_attach - attach a character line display
+ * @linedisp: pointer to character line display structure
+ * @dev: pointer of the device to attach to
+ * @num_chars: the number of characters that can be displayed
+ * @ops: character line display operations
+ *
+ * Directly attach the line-display sysfs attributes to the passed device.
+ * The caller is responsible for calling linedisp_detach() to release resources
+ * after use.
+ *
+ * Return: zero on success, else a negative error code.
+ */
+int linedisp_attach(struct linedisp *linedisp, struct device *dev,
+ unsigned int num_chars, const struct linedisp_ops *ops)
+{
+ int err;
+
+ memset(linedisp, 0, sizeof(*linedisp));
+ linedisp->ops = ops;
+ linedisp->num_chars = num_chars;
+ linedisp->scroll_rate = DEFAULT_SCROLL_RATE;
+
+ linedisp->buf = kzalloc(linedisp->num_chars, GFP_KERNEL);
+ if (!linedisp->buf)
+ return -ENOMEM;
+
+ /* initialise a character mapping, if required */
+ err = linedisp_init_map(linedisp);
+ if (err)
+ goto out_free_buf;
+
+ /* initialise a timer for scrolling the message */
+ timer_setup(&linedisp->timer, linedisp_scroll, 0);
+
+ err = create_attachment(dev, linedisp, true);
+ if (err)
+ goto out_del_timer;
+
+ /* display a default message */
+ err = linedisp_display(linedisp, LINEDISP_INIT_TEXT, -1);
+ if (err)
+ goto out_del_attach;
+
+ /* add attribute groups to target device */
+ err = device_add_groups(dev, linedisp_groups);
+ if (err)
+ goto out_del_attach;
+
+ return 0;
+
+out_del_attach:
+ delete_attachment(dev, true);
+out_del_timer:
+ timer_delete_sync(&linedisp->timer);
+out_free_buf:
+ kfree(linedisp->buf);
+ return err;
+}
+EXPORT_SYMBOL_NS_GPL(linedisp_attach, "LINEDISP");
+
+/**
+ * linedisp_detach - detach a character line display
+ * @dev: pointer of the device to detach from, that was previously
+ * attached with linedisp_attach()
+ */
+void linedisp_detach(struct device *dev)
+{
+ struct linedisp *linedisp;
+
+ linedisp = delete_attachment(dev, true);
+ if (!linedisp)
+ return;
+
+ timer_delete_sync(&linedisp->timer);
+
+ device_remove_groups(dev, linedisp_groups);
+
+ kfree(linedisp->map);
+ kfree(linedisp->message);
+ kfree(linedisp->buf);
+}
+EXPORT_SYMBOL_NS_GPL(linedisp_detach, "LINEDISP");
+
+/**
* linedisp_register - register a character line display
* @linedisp: pointer to character line display structure
* @parent: parent device
* @num_chars: the number of characters that can be displayed
* @ops: character line display operations
*
+ * Register the line-display sysfs attributes to a new device named
+ * "linedisp.N" added to the passed parent device.
+ * The caller is responsible for calling linedisp_unregister() to release
+ * resources after use.
+ *
* Return: zero on success, else a negative error code.
*/
int linedisp_register(struct linedisp *linedisp, struct device *parent,
@@ -362,19 +553,23 @@ int linedisp_register(struct linedisp *linedisp, struct device *parent,
/* initialise a timer for scrolling the message */
timer_setup(&linedisp->timer, linedisp_scroll, 0);
- err = device_add(&linedisp->dev);
+ err = create_attachment(&linedisp->dev, linedisp, false);
if (err)
goto out_del_timer;
/* display a default message */
err = linedisp_display(linedisp, LINEDISP_INIT_TEXT, -1);
if (err)
- goto out_del_dev;
+ goto out_del_attach;
+
+ err = device_add(&linedisp->dev);
+ if (err)
+ goto out_del_attach;
return 0;
-out_del_dev:
- device_del(&linedisp->dev);
+out_del_attach:
+ delete_attachment(&linedisp->dev, false);
out_del_timer:
timer_delete_sync(&linedisp->timer);
out_put_device:
@@ -391,6 +586,7 @@ EXPORT_SYMBOL_NS_GPL(linedisp_register, "LINEDISP");
void linedisp_unregister(struct linedisp *linedisp)
{
device_del(&linedisp->dev);
+ delete_attachment(&linedisp->dev, false);
timer_delete_sync(&linedisp->timer);
put_device(&linedisp->dev);
}
diff --git a/drivers/auxdisplay/line-display.h b/drivers/auxdisplay/line-display.h
index 4348d7a2f69a..36853b639711 100644
--- a/drivers/auxdisplay/line-display.h
+++ b/drivers/auxdisplay/line-display.h
@@ -6,6 +6,7 @@
* Author: Paul Burton <paul.burton@mips.com>
*
* Copyright (C) 2021 Glider bv
+ * Copyright (C) 2025 Jean-François Lessard
*/
#ifndef _LINEDISP_H
@@ -81,6 +82,9 @@ struct linedisp {
unsigned int id;
};
+int linedisp_attach(struct linedisp *linedisp, struct device *dev,
+ unsigned int num_chars, const struct linedisp_ops *ops);
+void linedisp_detach(struct device *dev);
int linedisp_register(struct linedisp *linedisp, struct device *parent,
unsigned int num_chars, const struct linedisp_ops *ops);
void linedisp_unregister(struct linedisp *linedisp);
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 064eb52ff7e2..1786d87b29e2 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -167,6 +167,12 @@ config PM_QOS_KUNIT_TEST
depends on KUNIT=y
default KUNIT_ALL_TESTS
+config PM_RUNTIME_KUNIT_TEST
+ tristate "KUnit Tests for runtime PM" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ depends on PM
+ default KUNIT_ALL_TESTS
+
config HMEM_REPORTING
bool
default n
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 1037169abb45..84ec92bff642 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -292,7 +292,7 @@ bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
* frequency (by keeping the initial capacity_freq_ref value).
*/
cpu_clk = of_clk_get(cpu_node, 0);
- if (!PTR_ERR_OR_ZERO(cpu_clk)) {
+ if (!IS_ERR_OR_NULL(cpu_clk)) {
per_cpu(capacity_freq_ref, cpu) =
clk_get_rate(cpu_clk) / HZ_PER_KHZ;
clk_put(cpu_clk);
@@ -823,12 +823,106 @@ void remove_cpu_topology(unsigned int cpu)
clear_cpu_topology(cpu);
}
+#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
+struct cpu_smt_info {
+ unsigned int thread_num;
+ int core_id;
+};
+
+static bool __init acpi_cpu_is_threaded(int cpu)
+{
+ int is_threaded = acpi_pptt_cpu_is_thread(cpu);
+
+ /*
+ * if the PPTT doesn't have thread information, check for architecture
+ * specific fallback if available
+ */
+ if (is_threaded < 0)
+ is_threaded = arch_cpu_is_threaded();
+
+ return !!is_threaded;
+}
+
+/*
+ * Propagate the topology information of the processor_topology_node tree to the
+ * cpu_topology array.
+ */
__weak int __init parse_acpi_topology(void)
{
+ unsigned int max_smt_thread_num = 1;
+ struct cpu_smt_info *entry;
+ struct xarray hetero_cpu;
+ unsigned long hetero_id;
+ int cpu, topology_id;
+
+ if (acpi_disabled)
+ return 0;
+
+ xa_init(&hetero_cpu);
+
+ for_each_possible_cpu(cpu) {
+ topology_id = find_acpi_cpu_topology(cpu, 0);
+ if (topology_id < 0)
+ return topology_id;
+
+ if (acpi_cpu_is_threaded(cpu)) {
+ cpu_topology[cpu].thread_id = topology_id;
+ topology_id = find_acpi_cpu_topology(cpu, 1);
+ cpu_topology[cpu].core_id = topology_id;
+
+ /*
+ * In the PPTT, CPUs below a node with the 'identical
+ * implementation' flag have the same number of threads.
+ * Count the number of threads for only one CPU (i.e.
+ * one core_id) among those with the same hetero_id.
+ * See the comment of find_acpi_cpu_topology_hetero_id()
+ * for more details.
+ *
+ * One entry is created for each node having:
+ * - the 'identical implementation' flag
+ * - its parent not having the flag
+ */
+ hetero_id = find_acpi_cpu_topology_hetero_id(cpu);
+ entry = xa_load(&hetero_cpu, hetero_id);
+ if (!entry) {
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ WARN_ON_ONCE(!entry);
+
+ if (entry) {
+ entry->core_id = topology_id;
+ entry->thread_num = 1;
+ xa_store(&hetero_cpu, hetero_id,
+ entry, GFP_KERNEL);
+ }
+ } else if (entry->core_id == topology_id) {
+ entry->thread_num++;
+ }
+ } else {
+ cpu_topology[cpu].thread_id = -1;
+ cpu_topology[cpu].core_id = topology_id;
+ }
+ topology_id = find_acpi_cpu_topology_cluster(cpu);
+ cpu_topology[cpu].cluster_id = topology_id;
+ topology_id = find_acpi_cpu_topology_package(cpu);
+ cpu_topology[cpu].package_id = topology_id;
+ }
+
+ /*
+ * This is a short loop since the number of XArray elements is the
+ * number of heterogeneous CPU clusters. On a homogeneous system
+ * there's only one entry in the XArray.
+ */
+ xa_for_each(&hetero_cpu, hetero_id, entry) {
+ max_smt_thread_num = max(max_smt_thread_num, entry->thread_num);
+ xa_erase(&hetero_cpu, hetero_id);
+ kfree(entry);
+ }
+
+ cpu_smt_set_num_threads(max_smt_thread_num, max_smt_thread_num);
+ xa_destroy(&hetero_cpu);
return 0;
}
-#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
void __init init_cpu_topology(void)
{
int cpu, ret;
diff --git a/drivers/base/auxiliary.c b/drivers/base/auxiliary.c
index dba7c8e13a53..04bdbff4dbe5 100644
--- a/drivers/base/auxiliary.c
+++ b/drivers/base/auxiliary.c
@@ -171,17 +171,18 @@
static const struct auxiliary_device_id *auxiliary_match_id(const struct auxiliary_device_id *id,
const struct auxiliary_device *auxdev)
{
- for (; id->name[0]; id++) {
- const char *p = strrchr(dev_name(&auxdev->dev), '.');
- int match_size;
+ const char *auxdev_name = dev_name(&auxdev->dev);
+ const char *p = strrchr(auxdev_name, '.');
+ int match_size;
- if (!p)
- continue;
- match_size = p - dev_name(&auxdev->dev);
+ if (!p)
+ return NULL;
+ match_size = p - auxdev_name;
+ for (; id->name[0]; id++) {
/* use dev_name(&auxdev->dev) prefix before last '.' char to match to */
if (strlen(id->name) == match_size &&
- !strncmp(dev_name(&auxdev->dev), id->name, match_size))
+ !strncmp(auxdev_name, id->name, match_size))
return id;
}
return NULL;
@@ -217,17 +218,14 @@ static int auxiliary_bus_probe(struct device *dev)
struct auxiliary_device *auxdev = to_auxiliary_dev(dev);
int ret;
- ret = dev_pm_domain_attach(dev, true);
+ ret = dev_pm_domain_attach(dev, PD_FLAG_ATTACH_POWER_ON |
+ PD_FLAG_DETACH_POWER_OFF);
if (ret) {
dev_warn(dev, "Failed to attach to PM Domain : %d\n", ret);
return ret;
}
- ret = auxdrv->probe(auxdev, auxiliary_match_id(auxdrv->id_table, auxdev));
- if (ret)
- dev_pm_domain_detach(dev, true);
-
- return ret;
+ return auxdrv->probe(auxdev, auxiliary_match_id(auxdrv->id_table, auxdev));
}
static void auxiliary_bus_remove(struct device *dev)
@@ -237,7 +235,6 @@ static void auxiliary_bus_remove(struct device *dev)
if (auxdrv->remove)
auxdrv->remove(auxdev);
- dev_pm_domain_detach(dev, true);
}
static void auxiliary_bus_shutdown(struct device *dev)
@@ -399,6 +396,7 @@ static void auxiliary_device_release(struct device *dev)
{
struct auxiliary_device *auxdev = to_auxiliary_dev(dev);
+ of_node_put(dev->of_node);
kfree(auxdev);
}
@@ -435,6 +433,7 @@ struct auxiliary_device *auxiliary_device_create(struct device *dev,
ret = auxiliary_device_init(auxdev);
if (ret) {
+ of_node_put(auxdev->dev.of_node);
kfree(auxdev);
return NULL;
}
diff --git a/drivers/base/base.h b/drivers/base/base.h
index 123031a757d9..430cbefbc97f 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -85,6 +85,18 @@ struct driver_private {
};
#define to_driver(obj) container_of(obj, struct driver_private, kobj)
+#ifdef CONFIG_RUST
+/**
+ * struct driver_type - Representation of a Rust driver type.
+ */
+struct driver_type {
+ /**
+ * @id: Representation of core::any::TypeId.
+ */
+ u8 id[16];
+} __packed;
+#endif
+
/**
* struct device_private - structure to hold the private to the driver core portions of the device structure.
*
@@ -100,6 +112,7 @@ struct driver_private {
* @async_driver - pointer to device driver awaiting probe via async_probe
* @device - pointer back to the struct device that this structure is
* associated with.
+ * @driver_type - The type of the bound Rust driver.
* @dead - This device is currently either in the process of or has been
* removed from the system. Any asynchronous events scheduled for this
* device should exit without taking any action.
@@ -116,6 +129,9 @@ struct device_private {
const struct device_driver *async_driver;
char *deferred_probe_reason;
struct device *device;
+#ifdef CONFIG_RUST
+ struct driver_type driver_type;
+#endif
u8 dead:1;
};
#define to_device_private_parent(obj) \
@@ -248,9 +264,18 @@ void device_links_driver_cleanup(struct device *dev);
void device_links_no_driver(struct device *dev);
bool device_links_busy(struct device *dev);
void device_links_unbind_consumers(struct device *dev);
+bool device_link_flag_is_sync_state_only(u32 flags);
void fw_devlink_drivers_done(void);
void fw_devlink_probing_done(void);
+#define dev_for_each_link_to_supplier(__link, __dev) \
+ list_for_each_entry_srcu(__link, &(__dev)->links.suppliers, c_node, \
+ device_links_read_lock_held())
+
+#define dev_for_each_link_to_consumer(__link, __dev) \
+ list_for_each_entry_srcu(__link, &(__dev)->links.consumers, s_node, \
+ device_links_read_lock_held())
+
/* device pm support */
void device_pm_move_to_tail(struct device *dev);
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 5e75e1bce551..9eb7771706f0 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -334,6 +334,19 @@ static struct device *next_device(struct klist_iter *i)
return dev;
}
+static struct device *prev_device(struct klist_iter *i)
+{
+ struct klist_node *n = klist_prev(i);
+ struct device *dev = NULL;
+ struct device_private *dev_prv;
+
+ if (n) {
+ dev_prv = to_device_private_bus(n);
+ dev = dev_prv->device;
+ }
+ return dev;
+}
+
/**
* bus_for_each_dev - device iterator.
* @bus: bus type.
@@ -414,6 +427,31 @@ struct device *bus_find_device(const struct bus_type *bus,
}
EXPORT_SYMBOL_GPL(bus_find_device);
+struct device *bus_find_device_reverse(const struct bus_type *bus,
+ struct device *start, const void *data,
+ device_match_t match)
+{
+ struct subsys_private *sp = bus_to_subsys(bus);
+ struct klist_iter i;
+ struct device *dev;
+
+ if (!sp)
+ return NULL;
+
+ klist_iter_init_node(&sp->klist_devices, &i,
+ (start ? &start->p->knode_bus : NULL));
+ while ((dev = prev_device(&i))) {
+ if (match(dev, data)) {
+ get_device(dev);
+ break;
+ }
+ }
+ klist_iter_exit(&i);
+ subsys_put(sp);
+ return dev;
+}
+EXPORT_SYMBOL_GPL(bus_find_device_reverse);
+
static struct device_driver *next_driver(struct klist_iter *i)
{
struct klist_node *n = klist_next(i);
@@ -533,8 +571,7 @@ void bus_probe_device(struct device *dev)
if (!sp)
return;
- if (sp->drivers_autoprobe)
- device_initial_probe(dev);
+ device_initial_probe(dev);
mutex_lock(&sp->mutex);
list_for_each_entry(sif, &sp->interfaces, node)
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
index cf0d455209d7..613410705a47 100644
--- a/drivers/base/cacheinfo.c
+++ b/drivers/base/cacheinfo.c
@@ -8,6 +8,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/acpi.h>
+#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/cacheinfo.h>
#include <linux/compiler.h>
@@ -183,6 +184,54 @@ static bool cache_node_is_unified(struct cacheinfo *this_leaf,
return of_property_read_bool(np, "cache-unified");
}
+static bool match_cache_node(struct device_node *cpu,
+ const struct device_node *cache_node)
+{
+ struct device_node *prev, *cache = of_find_next_cache_node(cpu);
+
+ while (cache) {
+ if (cache == cache_node) {
+ of_node_put(cache);
+ return true;
+ }
+
+ prev = cache;
+ cache = of_find_next_cache_node(cache);
+ of_node_put(prev);
+ }
+
+ return false;
+}
+
+#ifndef arch_compact_of_hwid
+#define arch_compact_of_hwid(_x) (_x)
+#endif
+
+static void cache_of_set_id(struct cacheinfo *this_leaf,
+ struct device_node *cache_node)
+{
+ struct device_node *cpu;
+ u32 min_id = ~0;
+
+ for_each_of_cpu_node(cpu) {
+ u64 id = of_get_cpu_hwid(cpu, 0);
+
+ id = arch_compact_of_hwid(id);
+ if (FIELD_GET(GENMASK_ULL(63, 32), id)) {
+ of_node_put(cpu);
+ return;
+ }
+
+ if (match_cache_node(cpu, cache_node))
+ min_id = min(min_id, id);
+ }
+
+ if (min_id != ~0) {
+ this_leaf->id = min_id;
+ this_leaf->attributes |= CACHE_ID;
+ }
+}
+
static void cache_of_set_props(struct cacheinfo *this_leaf,
struct device_node *np)
{
@@ -198,6 +247,7 @@ static void cache_of_set_props(struct cacheinfo *this_leaf,
cache_get_line_size(this_leaf, np);
cache_nr_sets(this_leaf, np);
cache_associativity(this_leaf);
+ cache_of_set_id(this_leaf, np);
}
static int cache_setup_of_node(unsigned int cpu)
diff --git a/drivers/base/core.c b/drivers/base/core.c
index cbc0099d8ef2..40de2f51a1b1 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -287,7 +287,7 @@ static bool device_is_ancestor(struct device *dev, struct device *target)
#define DL_MARKER_FLAGS (DL_FLAG_INFERRED | \
DL_FLAG_CYCLE | \
DL_FLAG_MANAGED)
-static inline bool device_link_flag_is_sync_state_only(u32 flags)
+bool device_link_flag_is_sync_state_only(u32 flags)
{
return (flags & ~DL_MARKER_FLAGS) == DL_FLAG_SYNC_STATE_ONLY;
}
@@ -460,9 +460,9 @@ static ssize_t auto_remove_on_show(struct device *dev,
struct device_link *link = to_devlink(dev);
const char *output;
- if (link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
+ if (device_link_test(link, DL_FLAG_AUTOREMOVE_SUPPLIER))
output = "supplier unbind";
- else if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER)
+ else if (device_link_test(link, DL_FLAG_AUTOREMOVE_CONSUMER))
output = "consumer unbind";
else
output = "never";
@@ -476,7 +476,7 @@ static ssize_t runtime_pm_show(struct device *dev,
{
struct device_link *link = to_devlink(dev);
- return sysfs_emit(buf, "%d\n", !!(link->flags & DL_FLAG_PM_RUNTIME));
+ return sysfs_emit(buf, "%d\n", device_link_test(link, DL_FLAG_PM_RUNTIME));
}
static DEVICE_ATTR_RO(runtime_pm);
@@ -485,8 +485,7 @@ static ssize_t sync_state_only_show(struct device *dev,
{
struct device_link *link = to_devlink(dev);
- return sysfs_emit(buf, "%d\n",
- !!(link->flags & DL_FLAG_SYNC_STATE_ONLY));
+ return sysfs_emit(buf, "%d\n", device_link_test(link, DL_FLAG_SYNC_STATE_ONLY));
}
static DEVICE_ATTR_RO(sync_state_only);
@@ -792,12 +791,12 @@ struct device_link *device_link_add(struct device *consumer,
if (link->consumer != consumer)
continue;
- if (link->flags & DL_FLAG_INFERRED &&
+ if (device_link_test(link, DL_FLAG_INFERRED) &&
!(flags & DL_FLAG_INFERRED))
link->flags &= ~DL_FLAG_INFERRED;
if (flags & DL_FLAG_PM_RUNTIME) {
- if (!(link->flags & DL_FLAG_PM_RUNTIME)) {
+ if (!device_link_test(link, DL_FLAG_PM_RUNTIME)) {
pm_runtime_new_link(consumer);
link->flags |= DL_FLAG_PM_RUNTIME;
}
@@ -807,8 +806,8 @@ struct device_link *device_link_add(struct device *consumer,
if (flags & DL_FLAG_STATELESS) {
kref_get(&link->kref);
- if (link->flags & DL_FLAG_SYNC_STATE_ONLY &&
- !(link->flags & DL_FLAG_STATELESS)) {
+ if (device_link_test(link, DL_FLAG_SYNC_STATE_ONLY) &&
+ !device_link_test(link, DL_FLAG_STATELESS)) {
link->flags |= DL_FLAG_STATELESS;
goto reorder;
} else {
@@ -823,7 +822,7 @@ struct device_link *device_link_add(struct device *consumer,
* update the existing link to stay around longer.
*/
if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER) {
- if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) {
+ if (device_link_test(link, DL_FLAG_AUTOREMOVE_CONSUMER)) {
link->flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER;
link->flags |= DL_FLAG_AUTOREMOVE_SUPPLIER;
}
@@ -831,12 +830,12 @@ struct device_link *device_link_add(struct device *consumer,
link->flags &= ~(DL_FLAG_AUTOREMOVE_CONSUMER |
DL_FLAG_AUTOREMOVE_SUPPLIER);
}
- if (!(link->flags & DL_FLAG_MANAGED)) {
+ if (!device_link_test(link, DL_FLAG_MANAGED)) {
kref_get(&link->kref);
link->flags |= DL_FLAG_MANAGED;
device_link_init_status(link, consumer, supplier);
}
- if (link->flags & DL_FLAG_SYNC_STATE_ONLY &&
+ if (device_link_test(link, DL_FLAG_SYNC_STATE_ONLY) &&
!(flags & DL_FLAG_SYNC_STATE_ONLY)) {
link->flags &= ~DL_FLAG_SYNC_STATE_ONLY;
goto reorder;
@@ -940,7 +939,7 @@ static void __device_link_del(struct kref *kref)
static void device_link_put_kref(struct device_link *link)
{
- if (link->flags & DL_FLAG_STATELESS)
+ if (device_link_test(link, DL_FLAG_STATELESS))
kref_put(&link->kref, __device_link_del);
else if (!device_is_registered(link->consumer))
__device_link_del(&link->kref);
@@ -1004,7 +1003,7 @@ static void device_links_missing_supplier(struct device *dev)
if (link->supplier->links.status == DL_DEV_DRIVER_BOUND) {
WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
} else {
- WARN_ON(!(link->flags & DL_FLAG_SYNC_STATE_ONLY));
+ WARN_ON(!device_link_test(link, DL_FLAG_SYNC_STATE_ONLY));
WRITE_ONCE(link->status, DL_STATE_DORMANT);
}
}
@@ -1072,14 +1071,14 @@ int device_links_check_suppliers(struct device *dev)
device_links_write_lock();
list_for_each_entry(link, &dev->links.suppliers, c_node) {
- if (!(link->flags & DL_FLAG_MANAGED))
+ if (!device_link_test(link, DL_FLAG_MANAGED))
continue;
if (link->status != DL_STATE_AVAILABLE &&
- !(link->flags & DL_FLAG_SYNC_STATE_ONLY)) {
+ !device_link_test(link, DL_FLAG_SYNC_STATE_ONLY)) {
if (dev_is_best_effort(dev) &&
- link->flags & DL_FLAG_INFERRED &&
+ device_link_test(link, DL_FLAG_INFERRED) &&
!link->supplier->can_match) {
ret = -EAGAIN;
continue;
@@ -1128,7 +1127,7 @@ static void __device_links_queue_sync_state(struct device *dev,
return;
list_for_each_entry(link, &dev->links.consumers, s_node) {
- if (!(link->flags & DL_FLAG_MANAGED))
+ if (!device_link_test(link, DL_FLAG_MANAGED))
continue;
if (link->status != DL_STATE_ACTIVE)
return;
@@ -1268,7 +1267,7 @@ void device_links_force_bind(struct device *dev)
device_links_write_lock();
list_for_each_entry_safe(link, ln, &dev->links.suppliers, c_node) {
- if (!(link->flags & DL_FLAG_MANAGED))
+ if (!device_link_test(link, DL_FLAG_MANAGED))
continue;
if (link->status != DL_STATE_AVAILABLE) {
@@ -1329,7 +1328,7 @@ void device_links_driver_bound(struct device *dev)
device_links_write_lock();
list_for_each_entry(link, &dev->links.consumers, s_node) {
- if (!(link->flags & DL_FLAG_MANAGED))
+ if (!device_link_test(link, DL_FLAG_MANAGED))
continue;
/*
@@ -1345,7 +1344,7 @@ void device_links_driver_bound(struct device *dev)
WARN_ON(link->status != DL_STATE_DORMANT);
WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
- if (link->flags & DL_FLAG_AUTOPROBE_CONSUMER)
+ if (device_link_test(link, DL_FLAG_AUTOPROBE_CONSUMER))
driver_deferred_probe_add(link->consumer);
}
@@ -1357,11 +1356,11 @@ void device_links_driver_bound(struct device *dev)
list_for_each_entry_safe(link, ln, &dev->links.suppliers, c_node) {
struct device *supplier;
- if (!(link->flags & DL_FLAG_MANAGED))
+ if (!device_link_test(link, DL_FLAG_MANAGED))
continue;
supplier = link->supplier;
- if (link->flags & DL_FLAG_SYNC_STATE_ONLY) {
+ if (device_link_test(link, DL_FLAG_SYNC_STATE_ONLY)) {
/*
* When DL_FLAG_SYNC_STATE_ONLY is set, it means no
* other DL_MANAGED_LINK_FLAGS have been set. So, it's
@@ -1369,7 +1368,7 @@ void device_links_driver_bound(struct device *dev)
*/
device_link_drop_managed(link);
} else if (dev_is_best_effort(dev) &&
- link->flags & DL_FLAG_INFERRED &&
+ device_link_test(link, DL_FLAG_INFERRED) &&
link->status != DL_STATE_CONSUMER_PROBE &&
!link->supplier->can_match) {
/*
@@ -1421,10 +1420,10 @@ static void __device_links_no_driver(struct device *dev)
struct device_link *link, *ln;
list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) {
- if (!(link->flags & DL_FLAG_MANAGED))
+ if (!device_link_test(link, DL_FLAG_MANAGED))
continue;
- if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) {
+ if (device_link_test(link, DL_FLAG_AUTOREMOVE_CONSUMER)) {
device_link_drop_managed(link);
continue;
}
@@ -1436,7 +1435,7 @@ static void __device_links_no_driver(struct device *dev)
if (link->supplier->links.status == DL_DEV_DRIVER_BOUND) {
WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
} else {
- WARN_ON(!(link->flags & DL_FLAG_SYNC_STATE_ONLY));
+ WARN_ON(!device_link_test(link, DL_FLAG_SYNC_STATE_ONLY));
WRITE_ONCE(link->status, DL_STATE_DORMANT);
}
}
@@ -1461,7 +1460,7 @@ void device_links_no_driver(struct device *dev)
device_links_write_lock();
list_for_each_entry(link, &dev->links.consumers, s_node) {
- if (!(link->flags & DL_FLAG_MANAGED))
+ if (!device_link_test(link, DL_FLAG_MANAGED))
continue;
/*
@@ -1498,10 +1497,10 @@ void device_links_driver_cleanup(struct device *dev)
device_links_write_lock();
list_for_each_entry_safe(link, ln, &dev->links.consumers, s_node) {
- if (!(link->flags & DL_FLAG_MANAGED))
+ if (!device_link_test(link, DL_FLAG_MANAGED))
continue;
- WARN_ON(link->flags & DL_FLAG_AUTOREMOVE_CONSUMER);
+ WARN_ON(device_link_test(link, DL_FLAG_AUTOREMOVE_CONSUMER));
WARN_ON(link->status != DL_STATE_SUPPLIER_UNBIND);
/*
@@ -1510,7 +1509,7 @@ void device_links_driver_cleanup(struct device *dev)
* has moved to DL_STATE_SUPPLIER_UNBIND.
*/
if (link->status == DL_STATE_SUPPLIER_UNBIND &&
- link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
+ device_link_test(link, DL_FLAG_AUTOREMOVE_SUPPLIER))
device_link_drop_managed(link);
WRITE_ONCE(link->status, DL_STATE_DORMANT);
@@ -1544,7 +1543,7 @@ bool device_links_busy(struct device *dev)
device_links_write_lock();
list_for_each_entry(link, &dev->links.consumers, s_node) {
- if (!(link->flags & DL_FLAG_MANAGED))
+ if (!device_link_test(link, DL_FLAG_MANAGED))
continue;
if (link->status == DL_STATE_CONSUMER_PROBE
@@ -1586,8 +1585,8 @@ void device_links_unbind_consumers(struct device *dev)
list_for_each_entry(link, &dev->links.consumers, s_node) {
enum device_link_state status;
- if (!(link->flags & DL_FLAG_MANAGED) ||
- link->flags & DL_FLAG_SYNC_STATE_ONLY)
+ if (!device_link_test(link, DL_FLAG_MANAGED) ||
+ device_link_test(link, DL_FLAG_SYNC_STATE_ONLY))
continue;
status = link->status;
@@ -1743,7 +1742,7 @@ static void fw_devlink_parse_fwtree(struct fwnode_handle *fwnode)
static void fw_devlink_relax_link(struct device_link *link)
{
- if (!(link->flags & DL_FLAG_INFERRED))
+ if (!device_link_test(link, DL_FLAG_INFERRED))
return;
if (device_link_flag_is_sync_state_only(link->flags))
@@ -1779,13 +1778,13 @@ static int fw_devlink_dev_sync_state(struct device *dev, void *data)
struct device_link *link = to_devlink(dev);
struct device *sup = link->supplier;
- if (!(link->flags & DL_FLAG_MANAGED) ||
+ if (!device_link_test(link, DL_FLAG_MANAGED) ||
link->status == DL_STATE_ACTIVE || sup->state_synced ||
!dev_has_sync_state(sup))
return 0;
if (fw_devlink_sync_state == FW_DEVLINK_SYNC_STATE_STRICT) {
- dev_warn(sup, "sync_state() pending due to %s\n",
+ dev_info(sup, "sync_state() pending due to %s\n",
dev_name(link->consumer));
return 0;
}
@@ -1881,8 +1880,6 @@ static void fw_devlink_unblock_consumers(struct device *dev)
device_links_write_unlock();
}
-#define get_dev_from_fwnode(fwnode) get_device((fwnode)->dev)
-
static bool fwnode_init_without_drv(struct fwnode_handle *fwnode)
{
struct device *dev;
@@ -2063,7 +2060,7 @@ static bool __fw_devlink_relax_cycles(struct fwnode_handle *con_handle,
* such due to a cycle.
*/
if (device_link_flag_is_sync_state_only(dev_link->flags) &&
- !(dev_link->flags & DL_FLAG_CYCLE))
+ !device_link_test(dev_link, DL_FLAG_CYCLE))
continue;
if (__fw_devlink_relax_cycles(con_handle,
@@ -3997,8 +3994,8 @@ const char *device_get_devnode(const struct device *dev,
/**
* device_for_each_child - device child iterator.
* @parent: parent struct device.
- * @fn: function to be called for each device.
* @data: data for the callback.
+ * @fn: function to be called for each device.
*
* Iterate over @parent's child devices, and call @fn for each,
* passing it @data.
@@ -4027,8 +4024,8 @@ EXPORT_SYMBOL_GPL(device_for_each_child);
/**
* device_for_each_child_reverse - device child iterator in reversed order.
* @parent: parent struct device.
- * @fn: function to be called for each device.
* @data: data for the callback.
+ * @fn: function to be called for each device.
*
* Iterate over @parent's child devices, and call @fn for each,
* passing it @data.
@@ -4058,8 +4055,8 @@ EXPORT_SYMBOL_GPL(device_for_each_child_reverse);
* device_for_each_child_reverse_from - device child iterator in reversed order.
* @parent: parent struct device.
* @from: optional starting point in child list
- * @fn: function to be called for each device.
* @data: data for the callback.
+ * @fn: function to be called for each device.
*
* Iterate over @parent's child devices, starting at @from, and call @fn
* for each, passing it @data. This helper is identical to
@@ -4092,8 +4089,8 @@ EXPORT_SYMBOL_GPL(device_for_each_child_reverse_from);
/**
* device_find_child - device iterator for locating a particular device.
* @parent: parent struct device
- * @match: Callback function to check device
* @data: Data to pass to match function
+ * @match: Callback function to check device
*
* This is similar to the device_for_each_child() function above, but it
* returns a reference to a device that is 'found' for later use, as
@@ -4141,7 +4138,7 @@ int __init devices_init(void)
sysfs_dev_char_kobj = kobject_create_and_add("char", dev_kobj);
if (!sysfs_dev_char_kobj)
goto char_kobj_err;
- device_link_wq = alloc_workqueue("device_link_wq", 0, 0);
+ device_link_wq = alloc_workqueue("device_link_wq", WQ_PERCPU, 0);
if (!device_link_wq)
goto wq_err;
@@ -5281,6 +5278,31 @@ void device_set_node(struct device *dev, struct fwnode_handle *fwnode)
}
EXPORT_SYMBOL_GPL(device_set_node);
+/**
+ * get_dev_from_fwnode - Obtain a reference count of the struct device the
+ * struct fwnode_handle is associated with.
+ * @fwnode: The pointer to the struct fwnode_handle to obtain the struct device
+ * reference count of.
+ *
+ * This function obtains a reference count of the device the device pointer
+ * embedded in the struct fwnode_handle points to.
+ *
+ * Note that the struct device pointer embedded in struct fwnode_handle does
+ * *not* have a reference count of the struct device itself.
+ *
+ * Hence, it is a UAF (and thus a bug) to call this function if the caller can't
+ * guarantee that the last reference count of the corresponding struct device is
+ * not dropped concurrently.
+ *
+ * This is possible since struct fwnode_handle has its own reference count and
+ * hence can out-live the struct device it is associated with.
+ */
+struct device *get_dev_from_fwnode(struct fwnode_handle *fwnode)
+{
+ return get_device((fwnode)->dev);
+}
+EXPORT_SYMBOL_GPL(get_dev_from_fwnode);
+
int device_match_name(struct device *dev, const void *name)
{
return sysfs_streq(dev_name(dev), name);
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 7779ab0ca7ce..c6c57b6f61c6 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -300,13 +300,30 @@ static ssize_t print_cpus_isolated(struct device *dev,
}
static DEVICE_ATTR(isolated, 0444, print_cpus_isolated, NULL);
+static ssize_t housekeeping_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ const struct cpumask *hk_mask;
+
+ hk_mask = housekeeping_cpumask(HK_TYPE_KERNEL_NOISE);
+
+ if (housekeeping_enabled(HK_TYPE_KERNEL_NOISE))
+ return sysfs_emit(buf, "%*pbl\n", cpumask_pr_args(hk_mask));
+ return sysfs_emit(buf, "\n");
+}
+static DEVICE_ATTR_RO(housekeeping);
+
#ifdef CONFIG_NO_HZ_FULL
-static ssize_t print_cpus_nohz_full(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t nohz_full_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
- return sysfs_emit(buf, "%*pbl\n", cpumask_pr_args(tick_nohz_full_mask));
+ if (cpumask_available(tick_nohz_full_mask))
+ return sysfs_emit(buf, "%*pbl\n",
+ cpumask_pr_args(tick_nohz_full_mask));
+ return sysfs_emit(buf, "\n");
}
-static DEVICE_ATTR(nohz_full, 0444, print_cpus_nohz_full, NULL);
+static DEVICE_ATTR_RO(nohz_full);
#endif
#ifdef CONFIG_CRASH_HOTPLUG
@@ -325,7 +342,7 @@ static void cpu_device_release(struct device *dev)
* This is an empty function to prevent the driver core from spitting a
* warning at us. Yes, I know this is directly opposite of what the
* documentation for the driver core and kobjects say, and the author
- * of this code has already been publically ridiculed for doing
+ * of this code has already been publicly ridiculed for doing
* something as foolish as this. However, at this point in time, it is
* the only way to handle the issue of statically allocated cpu
* devices. The different architectures will have their cpu device
@@ -505,6 +522,7 @@ static struct attribute *cpu_root_attrs[] = {
&dev_attr_offline.attr,
&dev_attr_enabled.attr,
&dev_attr_isolated.attr,
+ &dev_attr_housekeeping.attr,
#ifdef CONFIG_NO_HZ_FULL
&dev_attr_nohz_full.attr,
#endif
@@ -602,6 +620,8 @@ CPU_SHOW_VULN_FALLBACK(reg_file_data_sampling);
CPU_SHOW_VULN_FALLBACK(ghostwrite);
CPU_SHOW_VULN_FALLBACK(old_microcode);
CPU_SHOW_VULN_FALLBACK(indirect_target_selection);
+CPU_SHOW_VULN_FALLBACK(tsa);
+CPU_SHOW_VULN_FALLBACK(vmscape);
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
@@ -620,6 +640,8 @@ static DEVICE_ATTR(reg_file_data_sampling, 0444, cpu_show_reg_file_data_sampling
static DEVICE_ATTR(ghostwrite, 0444, cpu_show_ghostwrite, NULL);
static DEVICE_ATTR(old_microcode, 0444, cpu_show_old_microcode, NULL);
static DEVICE_ATTR(indirect_target_selection, 0444, cpu_show_indirect_target_selection, NULL);
+static DEVICE_ATTR(tsa, 0444, cpu_show_tsa, NULL);
+static DEVICE_ATTR(vmscape, 0444, cpu_show_vmscape, NULL);
static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_meltdown.attr,
@@ -639,6 +661,8 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_ghostwrite.attr,
&dev_attr_old_microcode.attr,
&dev_attr_indirect_target_selection.attr,
+ &dev_attr_tsa.attr,
+ &dev_attr_vmscape.attr,
NULL
};
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index b526e0e0f52d..349f31bedfa1 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -25,6 +25,7 @@
#include <linux/kthread.h>
#include <linux/wait.h>
#include <linux/async.h>
+#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
#include <linux/pinctrl/devinfo.h>
#include <linux/slab.h>
@@ -192,7 +193,7 @@ void driver_deferred_probe_trigger(void)
* Kick the re-probe thread. It may already be scheduled, but it is
* safe to kick it again.
*/
- queue_work(system_unbound_wq, &deferred_probe_work);
+ queue_work(system_dfl_wq, &deferred_probe_work);
}
/**
@@ -552,6 +553,7 @@ static void device_unbind_cleanup(struct device *dev)
dev->dma_range_map = NULL;
device_set_driver(dev, NULL);
dev_set_drvdata(dev, NULL);
+ dev_pm_domain_detach(dev, dev->power.detach_power_off);
if (dev->pm_domain && dev->pm_domain->dismiss)
dev->pm_domain->dismiss(dev);
pm_runtime_reinit(dev);
@@ -1075,7 +1077,15 @@ EXPORT_SYMBOL_GPL(device_attach);
void device_initial_probe(struct device *dev)
{
- __device_attach(dev, true);
+ struct subsys_private *sp = bus_to_subsys(dev->bus);
+
+ if (!sp)
+ return;
+
+ if (sp->drivers_autoprobe)
+ __device_attach(dev, true);
+
+ subsys_put(sp);
}
/*
diff --git a/drivers/base/devcoredump.c b/drivers/base/devcoredump.c
index 03a39c417dc4..55bdc7f5e59d 100644
--- a/drivers/base/devcoredump.c
+++ b/drivers/base/devcoredump.c
@@ -23,50 +23,46 @@ struct devcd_entry {
void *data;
size_t datalen;
/*
- * Here, mutex is required to serialize the calls to del_wk work between
- * user/kernel space which happens when devcd is added with device_add()
- * and that sends uevent to user space. User space reads the uevents,
- * and calls to devcd_data_write() which try to modify the work which is
- * not even initialized/queued from devcoredump.
+ * There are 2 races for which mutex is required.
*
+ * The first race is between device creation and userspace writing to
+ * schedule immediately destruction.
*
+ * This race is handled by arming the timer before device creation, but
+ * when device creation fails the timer still exists.
*
- * cpu0(X) cpu1(Y)
+ * To solve this, hold the mutex during device_add(), and set
+ * init_completed on success before releasing the mutex.
*
- * dev_coredump() uevent sent to user space
- * device_add() ======================> user space process Y reads the
- * uevents writes to devcd fd
- * which results into writes to
+ * That way the timer will never fire until device_add() is called,
+ * it will do nothing if init_completed is not set. The timer is also
+ * cancelled in that case.
*
- * devcd_data_write()
- * mod_delayed_work()
- * try_to_grab_pending()
- * timer_delete()
- * debug_assert_init()
- * INIT_DELAYED_WORK()
- * schedule_delayed_work()
- *
- *
- * Also, mutex alone would not be enough to avoid scheduling of
- * del_wk work after it get flush from a call to devcd_free()
- * mentioned as below.
- *
- * disabled_store()
- * devcd_free()
- * mutex_lock() devcd_data_write()
- * flush_delayed_work()
- * mutex_unlock()
- * mutex_lock()
- * mod_delayed_work()
- * mutex_unlock()
- * So, delete_work flag is required.
+ * The second race involves multiple parallel invocations of devcd_free(),
+ * add a deleted flag so only 1 can call the destructor.
*/
struct mutex mutex;
- bool delete_work;
+ bool init_completed, deleted;
struct module *owner;
ssize_t (*read)(char *buffer, loff_t offset, size_t count,
void *data, size_t datalen);
void (*free)(void *data);
+ /*
+ * If nothing interferes and device_add() was returns success,
+ * del_wk will destroy the device after the timer fires.
+ *
+ * Multiple userspace processes can interfere in the working of the timer:
+ * - Writing to the coredump will reschedule the timer to run immediately,
+ * if still armed.
+ *
+ * This is handled by using "if (cancel_delayed_work()) {
+ * schedule_delayed_work() }", to prevent re-arming after having
+ * been previously fired.
+ * - Writing to /sys/class/devcoredump/disabled will destroy the
+ * coredump synchronously.
+ * This is handled by using disable_delayed_work_sync(), and then
+ * checking if deleted flag is set with &devcd->mutex held.
+ */
struct delayed_work del_wk;
struct device *failing_dev;
};
@@ -95,14 +91,27 @@ static void devcd_dev_release(struct device *dev)
kfree(devcd);
}
+static void __devcd_del(struct devcd_entry *devcd)
+{
+ devcd->deleted = true;
+ device_del(&devcd->devcd_dev);
+ put_device(&devcd->devcd_dev);
+}
+
static void devcd_del(struct work_struct *wk)
{
struct devcd_entry *devcd;
+ bool init_completed;
devcd = container_of(wk, struct devcd_entry, del_wk.work);
- device_del(&devcd->devcd_dev);
- put_device(&devcd->devcd_dev);
+ /* devcd->mutex serializes against dev_coredumpm_timeout */
+ mutex_lock(&devcd->mutex);
+ init_completed = devcd->init_completed;
+ mutex_unlock(&devcd->mutex);
+
+ if (init_completed)
+ __devcd_del(devcd);
}
static ssize_t devcd_data_read(struct file *filp, struct kobject *kobj,
@@ -122,12 +131,12 @@ static ssize_t devcd_data_write(struct file *filp, struct kobject *kobj,
struct device *dev = kobj_to_dev(kobj);
struct devcd_entry *devcd = dev_to_devcd(dev);
- mutex_lock(&devcd->mutex);
- if (!devcd->delete_work) {
- devcd->delete_work = true;
- mod_delayed_work(system_wq, &devcd->del_wk, 0);
- }
- mutex_unlock(&devcd->mutex);
+ /*
+ * Although it's tempting to use mod_delayed work here,
+ * that will cause a reschedule if the timer already fired.
+ */
+ if (cancel_delayed_work(&devcd->del_wk))
+ schedule_delayed_work(&devcd->del_wk, 0);
return count;
}
@@ -140,7 +149,7 @@ static const struct bin_attribute *const devcd_dev_bin_attrs[] = {
};
static const struct attribute_group devcd_dev_group = {
- .bin_attrs_new = devcd_dev_bin_attrs,
+ .bin_attrs = devcd_dev_bin_attrs,
};
static const struct attribute_group *devcd_dev_groups[] = {
@@ -151,11 +160,21 @@ static int devcd_free(struct device *dev, void *data)
{
struct devcd_entry *devcd = dev_to_devcd(dev);
+ /*
+ * To prevent a race with devcd_data_write(), disable work and
+ * complete manually instead.
+ *
+ * We cannot rely on the return value of
+ * disable_delayed_work_sync() here, because it might be in the
+ * middle of a cancel_delayed_work + schedule_delayed_work pair.
+ *
+ * devcd->mutex here guards against multiple parallel invocations
+ * of devcd_free().
+ */
+ disable_delayed_work_sync(&devcd->del_wk);
mutex_lock(&devcd->mutex);
- if (!devcd->delete_work)
- devcd->delete_work = true;
-
- flush_delayed_work(&devcd->del_wk);
+ if (!devcd->deleted)
+ __devcd_del(devcd);
mutex_unlock(&devcd->mutex);
return 0;
}
@@ -179,12 +198,10 @@ static ssize_t disabled_show(const struct class *class, const struct class_attri
* put_device() <- last reference
* error = fn(dev, data) devcd_dev_release()
* devcd_free(dev, data) kfree(devcd)
- * mutex_lock(&devcd->mutex);
*
*
* In the above diagram, it looks like disabled_store() would be racing with parallelly
- * running devcd_del() and result in memory abort while acquiring devcd->mutex which
- * is called after kfree of devcd memory after dropping its last reference with
+ * running devcd_del() and result in memory abort after dropping its last reference with
* put_device(). However, this will not happens as fn(dev, data) runs
* with its own reference to device via klist_node so it is not its last reference.
* so, above situation would not occur.
@@ -374,7 +391,7 @@ void dev_coredumpm_timeout(struct device *dev, struct module *owner,
devcd->read = read;
devcd->free = free;
devcd->failing_dev = get_device(dev);
- devcd->delete_work = false;
+ devcd->deleted = false;
mutex_init(&devcd->mutex);
device_initialize(&devcd->devcd_dev);
@@ -383,8 +400,14 @@ void dev_coredumpm_timeout(struct device *dev, struct module *owner,
atomic_inc_return(&devcd_count));
devcd->devcd_dev.class = &devcd_class;
- mutex_lock(&devcd->mutex);
dev_set_uevent_suppress(&devcd->devcd_dev, true);
+
+ /* devcd->mutex prevents devcd_del() completing until init finishes */
+ mutex_lock(&devcd->mutex);
+ devcd->init_completed = false;
+ INIT_DELAYED_WORK(&devcd->del_wk, devcd_del);
+ schedule_delayed_work(&devcd->del_wk, timeout);
+
if (device_add(&devcd->devcd_dev))
goto put_device;
@@ -401,13 +424,20 @@ void dev_coredumpm_timeout(struct device *dev, struct module *owner,
dev_set_uevent_suppress(&devcd->devcd_dev, false);
kobject_uevent(&devcd->devcd_dev.kobj, KOBJ_ADD);
- INIT_DELAYED_WORK(&devcd->del_wk, devcd_del);
- schedule_delayed_work(&devcd->del_wk, timeout);
+
+ /*
+ * Safe to run devcd_del() now that we are done with devcd_dev.
+ * Alternatively we could have taken a ref on devcd_dev before
+ * dropping the lock.
+ */
+ devcd->init_completed = true;
mutex_unlock(&devcd->mutex);
return;
put_device:
- put_device(&devcd->devcd_dev);
mutex_unlock(&devcd->mutex);
+ cancel_delayed_work_sync(&devcd->del_wk);
+ put_device(&devcd->devcd_dev);
+
put_module:
module_put(owner);
free:
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index ff55e1bcfa30..f54db6d138ab 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -1117,6 +1117,27 @@ void *devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp)
}
EXPORT_SYMBOL_GPL(devm_kmemdup);
+/**
+ * devm_kmemdup_const - conditionally duplicate and manage a region of memory
+ *
+ * @dev: Device this memory belongs to
+ * @src: memory region to duplicate
+ * @len: memory region length,
+ * @gfp: GFP mask to use
+ *
+ * Return: source address if it is in .rodata or the return value of kmemdup()
+ * to which the function falls back otherwise.
+ */
+const void *
+devm_kmemdup_const(struct device *dev, const void *src, size_t len, gfp_t gfp)
+{
+ if (is_kernel_rodata((unsigned long)src))
+ return src;
+
+ return devm_kmemdup(dev, src, len, gfp);
+}
+EXPORT_SYMBOL_GPL(devm_kmemdup_const);
+
struct pages_devres {
unsigned long addr;
unsigned int order;
@@ -1201,13 +1222,6 @@ static void devm_percpu_release(struct device *dev, void *pdata)
free_percpu(p);
}
-static int devm_percpu_match(struct device *dev, void *data, void *p)
-{
- struct devres *devr = container_of(data, struct devres, data);
-
- return *(void **)devr->data == p;
-}
-
/**
* __devm_alloc_percpu - Resource-managed alloc_percpu
* @dev: Device to allocate per-cpu memory for
@@ -1243,21 +1257,3 @@ void __percpu *__devm_alloc_percpu(struct device *dev, size_t size,
return pcpu;
}
EXPORT_SYMBOL_GPL(__devm_alloc_percpu);
-
-/**
- * devm_free_percpu - Resource-managed free_percpu
- * @dev: Device this memory belongs to
- * @pdata: Per-cpu memory to free
- *
- * Free memory allocated with devm_alloc_percpu().
- */
-void devm_free_percpu(struct device *dev, void __percpu *pdata)
-{
- /*
- * Use devres_release() to prevent memory leakage as
- * devm_free_pages() does.
- */
- WARN_ON(devres_release(dev, devm_percpu_release, devm_percpu_match,
- (void *)(__force unsigned long)pdata));
-}
-EXPORT_SYMBOL_GPL(devm_free_percpu);
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index 31bfb3194b4c..194b44075ac7 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -70,7 +70,7 @@ static struct file_system_type internal_fs_type = {
#else
.init_fs_context = ramfs_init_fs_context,
#endif
- .kill_sb = kill_litter_super,
+ .kill_sb = kill_anon_super,
};
/* Simply take a ref on the existing mount */
@@ -176,15 +176,15 @@ static int dev_mkdir(const char *name, umode_t mode)
struct dentry *dentry;
struct path path;
- dentry = kern_path_create(AT_FDCWD, name, &path, LOOKUP_DIRECTORY);
+ dentry = start_creating_path(AT_FDCWD, name, &path, LOOKUP_DIRECTORY);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
- dentry = vfs_mkdir(&nop_mnt_idmap, d_inode(path.dentry), dentry, mode);
+ dentry = vfs_mkdir(&nop_mnt_idmap, d_inode(path.dentry), dentry, mode, NULL);
if (!IS_ERR(dentry))
/* mark as kernel-created inode */
d_inode(dentry)->i_private = &thread;
- done_path_create(&path, dentry);
+ end_creating_path(&path, dentry);
return PTR_ERR_OR_ZERO(dentry);
}
@@ -222,16 +222,16 @@ static int handle_create(const char *nodename, umode_t mode, kuid_t uid,
struct path path;
int err;
- dentry = kern_path_create(AT_FDCWD, nodename, &path, 0);
+ dentry = start_creating_path(AT_FDCWD, nodename, &path, 0);
if (dentry == ERR_PTR(-ENOENT)) {
create_path(nodename);
- dentry = kern_path_create(AT_FDCWD, nodename, &path, 0);
+ dentry = start_creating_path(AT_FDCWD, nodename, &path, 0);
}
if (IS_ERR(dentry))
return PTR_ERR(dentry);
err = vfs_mknod(&nop_mnt_idmap, d_inode(path.dentry), dentry, mode,
- dev->devt);
+ dev->devt, NULL);
if (!err) {
struct iattr newattrs;
@@ -246,7 +246,7 @@ static int handle_create(const char *nodename, umode_t mode, kuid_t uid,
/* mark as kernel-created inode */
d_inode(dentry)->i_private = &thread;
}
- done_path_create(&path, dentry);
+ end_creating_path(&path, dentry);
return err;
}
@@ -256,18 +256,16 @@ static int dev_rmdir(const char *name)
struct dentry *dentry;
int err;
- dentry = kern_path_locked(name, &parent);
+ dentry = start_removing_path(name, &parent);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
if (d_inode(dentry)->i_private == &thread)
err = vfs_rmdir(&nop_mnt_idmap, d_inode(parent.dentry),
- dentry);
+ dentry, NULL);
else
err = -EPERM;
- dput(dentry);
- inode_unlock(d_inode(parent.dentry));
- path_put(&parent);
+ end_removing_path(&parent, dentry);
return err;
}
@@ -325,7 +323,7 @@ static int handle_remove(const char *nodename, struct device *dev)
int deleted = 0;
int err = 0;
- dentry = kern_path_locked(nodename, &parent);
+ dentry = start_removing_path(nodename, &parent);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
@@ -349,10 +347,8 @@ static int handle_remove(const char *nodename, struct device *dev)
if (!err || err == -ENOENT)
deleted = 1;
}
- dput(dentry);
- inode_unlock(d_inode(parent.dentry));
+ end_removing_path(&parent, dentry);
- path_put(&parent);
if (deleted && strchr(nodename, '/'))
delete_path(nodename);
return err;
diff --git a/drivers/base/faux.c b/drivers/base/faux.c
index 9054d346bd7f..21dd02124231 100644
--- a/drivers/base/faux.c
+++ b/drivers/base/faux.c
@@ -86,6 +86,7 @@ static struct device_driver faux_driver = {
.name = "faux_driver",
.bus = &faux_bus_type,
.probe_type = PROBE_FORCE_SYNCHRONOUS,
+ .suppress_bind_attrs = true,
};
static void faux_device_release(struct device *dev)
@@ -154,6 +155,7 @@ struct faux_device *faux_device_create_with_groups(const char *name,
dev->parent = &faux_bus_root;
dev->bus = &faux_bus_type;
dev_set_name(dev, "%s", name);
+ device_set_pm_not_required(dev);
ret = device_add(dev);
if (ret) {
@@ -169,7 +171,7 @@ struct faux_device *faux_device_create_with_groups(const char *name,
* successful is almost impossible to determine by the caller.
*/
if (!dev->driver) {
- dev_err(dev, "probe did not succeed, tearing down the device\n");
+ dev_dbg(dev, "probe did not succeed, tearing down the device\n");
faux_device_destroy(faux_dev);
faux_dev = NULL;
}
diff --git a/drivers/base/firmware_loader/Kconfig b/drivers/base/firmware_loader/Kconfig
index 752b9a9bea03..15eff8a4b505 100644
--- a/drivers/base/firmware_loader/Kconfig
+++ b/drivers/base/firmware_loader/Kconfig
@@ -38,7 +38,7 @@ config FW_LOADER_DEBUG
config RUST_FW_LOADER_ABSTRACTIONS
bool "Rust Firmware Loader abstractions"
depends on RUST
- depends on FW_LOADER=y
+ select FW_LOADER
help
This enables the Rust abstractions for the firmware loader API.
diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
index 44486b2c7172..4ebdca9e4da4 100644
--- a/drivers/base/firmware_loader/main.c
+++ b/drivers/base/firmware_loader/main.c
@@ -822,26 +822,6 @@ static void fw_log_firmware_info(const struct firmware *fw, const char *name,
{}
#endif
-/*
- * Reject firmware file names with ".." path components.
- * There are drivers that construct firmware file names from device-supplied
- * strings, and we don't want some device to be able to tell us "I would like to
- * be sent my firmware from ../../../etc/shadow, please".
- *
- * Search for ".." surrounded by either '/' or start/end of string.
- *
- * This intentionally only looks at the firmware name, not at the firmware base
- * directory or at symlink contents.
- */
-static bool name_contains_dotdot(const char *name)
-{
- size_t name_len = strlen(name);
-
- return strcmp(name, "..") == 0 || strncmp(name, "../", 3) == 0 ||
- strstr(name, "/../") != NULL ||
- (name_len >= 3 && strcmp(name+name_len-3, "/..") == 0);
-}
-
/* called from request_firmware() and request_firmware_work_func() */
static int
_request_firmware(const struct firmware **firmware_p, const char *name,
@@ -849,8 +829,6 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
size_t offset, u32 opt_flags)
{
struct firmware *fw = NULL;
- struct cred *kern_cred = NULL;
- const struct cred *old_cred;
bool nondirect = false;
int ret;
@@ -862,6 +840,17 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
goto out;
}
+
+ /*
+ * Reject firmware file names with ".." path components.
+ * There are drivers that construct firmware file names from
+ * device-supplied strings, and we don't want some device to be
+ * able to tell us "I would like to be sent my firmware from
+ * ../../../etc/shadow, please".
+ *
+ * This intentionally only looks at the firmware name, not at
+ * the firmware base directory or at symlink contents.
+ */
if (name_contains_dotdot(name)) {
dev_warn(device,
"Firmware load for '%s' refused, path contains '..' component\n",
@@ -880,45 +869,38 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
* called by a driver when serving an unrelated request from userland, we use
* the kernel credentials to read the file.
*/
- kern_cred = prepare_kernel_cred(&init_task);
- if (!kern_cred) {
- ret = -ENOMEM;
- goto out;
- }
- old_cred = override_creds(kern_cred);
+ scoped_with_kernel_creds() {
+ ret = fw_get_filesystem_firmware(device, fw->priv, "", NULL);
- ret = fw_get_filesystem_firmware(device, fw->priv, "", NULL);
-
- /* Only full reads can support decompression, platform, and sysfs. */
- if (!(opt_flags & FW_OPT_PARTIAL))
- nondirect = true;
+ /* Only full reads can support decompression, platform, and sysfs. */
+ if (!(opt_flags & FW_OPT_PARTIAL))
+ nondirect = true;
#ifdef CONFIG_FW_LOADER_COMPRESS_ZSTD
- if (ret == -ENOENT && nondirect)
- ret = fw_get_filesystem_firmware(device, fw->priv, ".zst",
- fw_decompress_zstd);
+ if (ret == -ENOENT && nondirect)
+ ret = fw_get_filesystem_firmware(device, fw->priv, ".zst",
+ fw_decompress_zstd);
#endif
#ifdef CONFIG_FW_LOADER_COMPRESS_XZ
- if (ret == -ENOENT && nondirect)
- ret = fw_get_filesystem_firmware(device, fw->priv, ".xz",
- fw_decompress_xz);
+ if (ret == -ENOENT && nondirect)
+ ret = fw_get_filesystem_firmware(device, fw->priv, ".xz",
+ fw_decompress_xz);
#endif
- if (ret == -ENOENT && nondirect)
- ret = firmware_fallback_platform(fw->priv);
+ if (ret == -ENOENT && nondirect)
+ ret = firmware_fallback_platform(fw->priv);
- if (ret) {
- if (!(opt_flags & FW_OPT_NO_WARN))
- dev_warn(device,
- "Direct firmware load for %s failed with error %d\n",
- name, ret);
- if (nondirect)
- ret = firmware_fallback_sysfs(fw, name, device,
- opt_flags, ret);
- } else
- ret = assign_fw(fw, device);
-
- revert_creds(old_cred);
- put_cred(kern_cred);
+ if (ret) {
+ if (!(opt_flags & FW_OPT_NO_WARN))
+ dev_warn(device,
+ "Direct firmware load for %s failed with error %d\n",
+ name, ret);
+ if (nondirect)
+ ret = firmware_fallback_sysfs(fw, name, device,
+ opt_flags, ret);
+ } else {
+ ret = assign_fw(fw, device);
+ }
+ }
out:
if (ret < 0) {
@@ -1594,16 +1576,20 @@ static int fw_pm_notify(struct notifier_block *notify_block,
}
/* stop caching firmware once syscore_suspend is reached */
-static int fw_suspend(void)
+static int fw_suspend(void *data)
{
fw_cache.state = FW_LOADER_NO_CACHE;
return 0;
}
-static struct syscore_ops fw_syscore_ops = {
+static const struct syscore_ops fw_syscore_ops = {
.suspend = fw_suspend,
};
+static struct syscore fw_syscore = {
+ .ops = &fw_syscore_ops,
+};
+
static int __init register_fw_pm_ops(void)
{
int ret;
@@ -1619,14 +1605,14 @@ static int __init register_fw_pm_ops(void)
if (ret)
return ret;
- register_syscore_ops(&fw_syscore_ops);
+ register_syscore(&fw_syscore);
return ret;
}
static inline void unregister_fw_pm_ops(void)
{
- unregister_syscore_ops(&fw_syscore_ops);
+ unregister_syscore(&fw_syscore);
unregister_pm_notifier(&fw_cache.pm_notify);
}
#else
diff --git a/drivers/base/firmware_loader/sysfs.c b/drivers/base/firmware_loader/sysfs.c
index d254ceb56d84..92e91050f96a 100644
--- a/drivers/base/firmware_loader/sysfs.c
+++ b/drivers/base/firmware_loader/sysfs.c
@@ -47,7 +47,10 @@ static ssize_t timeout_show(const struct class *class, const struct class_attrib
static ssize_t timeout_store(const struct class *class, const struct class_attribute *attr,
const char *buf, size_t count)
{
- int tmp_loading_timeout = simple_strtol(buf, NULL, 10);
+ int tmp_loading_timeout;
+
+ if (kstrtoint(buf, 10, &tmp_loading_timeout))
+ return -EINVAL;
if (tmp_loading_timeout < 0)
tmp_loading_timeout = 0;
@@ -157,7 +160,10 @@ static ssize_t firmware_loading_store(struct device *dev,
struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
struct fw_priv *fw_priv;
ssize_t written = count;
- int loading = simple_strtol(buf, NULL, 10);
+ int loading;
+
+ if (kstrtoint(buf, 10, &loading))
+ return -EINVAL;
mutex_lock(&fw_lock);
fw_priv = fw_sysfs->fw_priv;
@@ -359,8 +365,8 @@ out:
static const struct bin_attribute firmware_attr_data = {
.attr = { .name = "data", .mode = 0644 },
.size = 0,
- .read_new = firmware_data_read,
- .write_new = firmware_data_write,
+ .read = firmware_data_read,
+ .write = firmware_data_write,
};
static struct attribute *fw_dev_attrs[] = {
@@ -381,7 +387,7 @@ static const struct bin_attribute *const fw_dev_bin_attrs[] = {
static const struct attribute_group fw_dev_attr_group = {
.attrs = fw_dev_attrs,
- .bin_attrs_new = fw_dev_bin_attrs,
+ .bin_attrs = fw_dev_bin_attrs,
#ifdef CONFIG_FW_UPLOAD
.is_visible = fw_upload_is_visible,
#endif
diff --git a/drivers/base/firmware_loader/sysfs_upload.c b/drivers/base/firmware_loader/sysfs_upload.c
index 829270067d16..c3797b93c5f5 100644
--- a/drivers/base/firmware_loader/sysfs_upload.c
+++ b/drivers/base/firmware_loader/sysfs_upload.c
@@ -100,8 +100,10 @@ static ssize_t cancel_store(struct device *dev, struct device_attribute *attr,
return -EINVAL;
mutex_lock(&fwlp->lock);
- if (fwlp->progress == FW_UPLOAD_PROG_IDLE)
- ret = -ENODEV;
+ if (fwlp->progress == FW_UPLOAD_PROG_IDLE) {
+ mutex_unlock(&fwlp->lock);
+ return -ENODEV;
+ }
fwlp->ops->cancel(fwlp->fw_upload);
mutex_unlock(&fwlp->lock);
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index ed3e69dc785c..751f248ca4a8 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -22,6 +22,7 @@
#include <linux/stat.h>
#include <linux/slab.h>
#include <linux/xarray.h>
+#include <linux/export.h>
#include <linux/atomic.h>
#include <linux/uaccess.h>
@@ -48,22 +49,8 @@ int mhp_online_type_from_str(const char *str)
#define to_memory_block(dev) container_of(dev, struct memory_block, dev)
-static int sections_per_block;
-
-static inline unsigned long memory_block_id(unsigned long section_nr)
-{
- return section_nr / sections_per_block;
-}
-
-static inline unsigned long pfn_to_block_id(unsigned long pfn)
-{
- return memory_block_id(pfn_to_section_nr(pfn));
-}
-
-static inline unsigned long phys_to_block_id(unsigned long phys)
-{
- return pfn_to_block_id(PFN_DOWN(phys));
-}
+int sections_per_block;
+EXPORT_SYMBOL(sections_per_block);
static int memory_subsys_online(struct device *dev);
static int memory_subsys_offline(struct device *dev);
@@ -211,15 +198,15 @@ static ssize_t state_show(struct device *dev, struct device_attribute *attr,
break;
default:
WARN_ON(1);
- return sysfs_emit(buf, "ERROR-UNKNOWN-%ld\n", mem->state);
+ return sysfs_emit(buf, "ERROR-UNKNOWN-%d\n", mem->state);
}
return sysfs_emit(buf, "%s\n", output);
}
-int memory_notify(unsigned long val, void *v)
+int memory_notify(enum memory_block_state state, void *v)
{
- return blocking_notifier_call_chain(&memory_chain, val, v);
+ return blocking_notifier_call_chain(&memory_chain, state, v);
}
#if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_MEMORY_HOTPLUG)
@@ -239,7 +226,6 @@ static int memory_block_online(struct memory_block *mem)
unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
unsigned long nr_vmemmap_pages = 0;
- struct memory_notify arg;
struct zone *zone;
int ret;
@@ -259,19 +245,9 @@ static int memory_block_online(struct memory_block *mem)
if (mem->altmap)
nr_vmemmap_pages = mem->altmap->free;
- arg.altmap_start_pfn = start_pfn;
- arg.altmap_nr_pages = nr_vmemmap_pages;
- arg.start_pfn = start_pfn + nr_vmemmap_pages;
- arg.nr_pages = nr_pages - nr_vmemmap_pages;
mem_hotplug_begin();
- ret = memory_notify(MEM_PREPARE_ONLINE, &arg);
- ret = notifier_to_errno(ret);
- if (ret)
- goto out_notifier;
-
if (nr_vmemmap_pages) {
- ret = mhp_init_memmap_on_memory(start_pfn, nr_vmemmap_pages,
- zone, mem->altmap->inaccessible);
+ ret = mhp_init_memmap_on_memory(start_pfn, nr_vmemmap_pages, zone);
if (ret)
goto out;
}
@@ -293,11 +269,7 @@ static int memory_block_online(struct memory_block *mem)
nr_vmemmap_pages);
mem->zone = zone;
- mem_hotplug_done();
- return ret;
out:
- memory_notify(MEM_FINISH_OFFLINE, &arg);
-out_notifier:
mem_hotplug_done();
return ret;
}
@@ -310,7 +282,6 @@ static int memory_block_offline(struct memory_block *mem)
unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
unsigned long nr_vmemmap_pages = 0;
- struct memory_notify arg;
int ret;
if (!mem->zone)
@@ -342,11 +313,6 @@ static int memory_block_offline(struct memory_block *mem)
mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages);
mem->zone = NULL;
- arg.altmap_start_pfn = start_pfn;
- arg.altmap_nr_pages = nr_vmemmap_pages;
- arg.start_pfn = start_pfn + nr_vmemmap_pages;
- arg.nr_pages = nr_pages - nr_vmemmap_pages;
- memory_notify(MEM_FINISH_OFFLINE, &arg);
out:
mem_hotplug_done();
return ret;
@@ -683,7 +649,7 @@ int __weak arch_get_memory_phys_device(unsigned long start_pfn)
*
* Called under device_hotplug_lock.
*/
-static struct memory_block *find_memory_block_by_id(unsigned long block_id)
+struct memory_block *find_memory_block_by_id(unsigned long block_id)
{
struct memory_block *mem;
@@ -782,21 +748,22 @@ static struct zone *early_node_zone_for_memory_block(struct memory_block *mem,
#ifdef CONFIG_NUMA
/**
- * memory_block_add_nid() - Indicate that system RAM falling into this memory
- * block device (partially) belongs to the given node.
+ * memory_block_add_nid_early() - Indicate that early system RAM falling into
+ * this memory block device (partially) belongs
+ * to the given node.
* @mem: The memory block device.
* @nid: The node id.
- * @context: The memory initialization context.
*
- * Indicate that system RAM falling into this memory block (partially) belongs
- * to the given node. If the context indicates ("early") that we are adding the
- * node during node device subsystem initialization, this will also properly
- * set/adjust mem->zone based on the zone ranges of the given node.
+ * Indicate that early system RAM falling into this memory block (partially)
+ * belongs to the given node. This will also properly set/adjust mem->zone based
+ * on the zone ranges of the given node.
+ *
+ * Memory hotplug handles this on memory block creation, where we can only have
+ * a single nid span a memory block.
*/
-void memory_block_add_nid(struct memory_block *mem, int nid,
- enum meminit_context context)
+void memory_block_add_nid_early(struct memory_block *mem, int nid)
{
- if (context == MEMINIT_EARLY && mem->nid != nid) {
+ if (mem->nid != nid) {
/*
* For early memory we have to determine the zone when setting
* the node id and handle multiple nodes spanning a single
@@ -810,19 +777,18 @@ void memory_block_add_nid(struct memory_block *mem, int nid,
mem->zone = early_node_zone_for_memory_block(mem, nid);
else
mem->zone = NULL;
+ /*
+ * If this memory block spans multiple nodes, we only indicate
+ * the last processed node. If we span multiple nodes (not applicable
+ * to hotplugged memory), zone == NULL will prohibit memory offlining
+ * and consequently unplug.
+ */
+ mem->nid = nid;
}
-
- /*
- * If this memory block spans multiple nodes, we only indicate
- * the last processed node. If we span multiple nodes (not applicable
- * to hotplugged memory), zone == NULL will prohibit memory offlining
- * and consequently unplug.
- */
- mem->nid = nid;
}
#endif
-static int add_memory_block(unsigned long block_id, unsigned long state,
+static int add_memory_block(unsigned long block_id, int nid, unsigned long state,
struct vmem_altmap *altmap,
struct memory_group *group)
{
@@ -840,7 +806,7 @@ static int add_memory_block(unsigned long block_id, unsigned long state,
mem->start_section_nr = block_id * sections_per_block;
mem->state = state;
- mem->nid = NUMA_NO_NODE;
+ mem->nid = nid;
mem->altmap = altmap;
INIT_LIST_HEAD(&mem->group_next);
@@ -867,13 +833,6 @@ static int add_memory_block(unsigned long block_id, unsigned long state,
return 0;
}
-static int add_hotplug_memory_block(unsigned long block_id,
- struct vmem_altmap *altmap,
- struct memory_group *group)
-{
- return add_memory_block(block_id, MEM_OFFLINE, altmap, group);
-}
-
static void remove_memory_block(struct memory_block *memory)
{
if (WARN_ON_ONCE(memory->dev.bus != &memory_subsys))
@@ -899,7 +858,7 @@ static void remove_memory_block(struct memory_block *memory)
* Called under device_hotplug_lock.
*/
int create_memory_block_devices(unsigned long start, unsigned long size,
- struct vmem_altmap *altmap,
+ int nid, struct vmem_altmap *altmap,
struct memory_group *group)
{
const unsigned long start_block_id = pfn_to_block_id(PFN_DOWN(start));
@@ -913,7 +872,7 @@ int create_memory_block_devices(unsigned long start, unsigned long size,
return -EINVAL;
for (block_id = start_block_id; block_id != end_block_id; block_id++) {
- ret = add_hotplug_memory_block(block_id, altmap, group);
+ ret = add_memory_block(block_id, nid, MEM_OFFLINE, altmap, group);
if (ret)
break;
}
@@ -1018,7 +977,7 @@ void __init memory_dev_init(void)
continue;
block_id = memory_block_id(nr);
- ret = add_memory_block(block_id, MEM_ONLINE, NULL, NULL);
+ ret = add_memory_block(block_id, NUMA_NO_NODE, MEM_ONLINE, NULL, NULL);
if (ret) {
panic("%s() failed to add memory block: %d\n",
__func__, ret);
diff --git a/drivers/base/node.c b/drivers/base/node.c
index c19094481630..00cf4532f121 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -21,6 +21,7 @@
#include <linux/pm_runtime.h>
#include <linux/swap.h>
#include <linux/slab.h>
+#include <linux/memblock.h>
static const struct bus_type node_subsys = {
.name = "node",
@@ -111,6 +112,27 @@ static const struct attribute_group *node_access_node_groups[] = {
NULL,
};
+#ifdef CONFIG_MEMORY_HOTPLUG
+static BLOCKING_NOTIFIER_HEAD(node_chain);
+
+int register_node_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&node_chain, nb);
+}
+EXPORT_SYMBOL(register_node_notifier);
+
+void unregister_node_notifier(struct notifier_block *nb)
+{
+ blocking_notifier_chain_unregister(&node_chain, nb);
+}
+EXPORT_SYMBOL(unregister_node_notifier);
+
+int node_notify(unsigned long val, void *v)
+{
+ return blocking_notifier_call_chain(&node_chain, val, v);
+}
+#endif
+
static void node_remove_accesses(struct node *node)
{
struct node_access_nodes *c, *cnext;
@@ -227,6 +249,44 @@ void node_set_perf_attrs(unsigned int nid, struct access_coordinate *coord,
EXPORT_SYMBOL_GPL(node_set_perf_attrs);
/**
+ * node_update_perf_attrs - Update the performance values for given access class
+ * @nid: Node identifier to be updated
+ * @coord: Heterogeneous memory performance coordinates
+ * @access: The access class for the given attributes
+ */
+void node_update_perf_attrs(unsigned int nid, struct access_coordinate *coord,
+ enum access_coordinate_class access)
+{
+ struct node_access_nodes *access_node;
+ struct node *node;
+ int i;
+
+ if (WARN_ON_ONCE(!node_online(nid)))
+ return;
+
+ node = node_devices[nid];
+ list_for_each_entry(access_node, &node->access_list, list_node) {
+ if (access_node->access != access)
+ continue;
+
+ access_node->coord = *coord;
+ for (i = 0; access_attrs[i]; i++) {
+ sysfs_notify(&access_node->dev.kobj,
+ NULL, access_attrs[i]->name);
+ }
+ break;
+ }
+
+ /* When setting CPU access coordinates, update mempolicy */
+ if (access != ACCESS_COORDINATE_CPU)
+ return;
+
+ if (mempolicy_set_node_perf(nid, coord))
+ pr_info("failed to set mempolicy attrs for node %d\n", nid);
+}
+EXPORT_SYMBOL_GPL(node_update_perf_attrs);
+
+/**
* struct node_cache_info - Internal tracking for memory node caches
* @dev: Device represeting the cache level
* @node: List element for tracking in the node
@@ -478,7 +538,7 @@ static ssize_t node_read_meminfo(struct device *dev,
nid, K(node_page_state(pgdat, NR_SECONDARY_PAGETABLE)),
nid, 0UL,
nid, 0UL,
- nid, K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
+ nid, 0UL,
nid, K(sreclaimable +
node_page_state(pgdat, NR_KERNEL_MISC_RECLAIMABLE)),
nid, K(sreclaimable + sunreclaimable),
@@ -597,7 +657,7 @@ static const struct bin_attribute *node_dev_bin_attrs[] = {
static const struct attribute_group node_dev_group = {
.attrs = node_dev_attrs,
- .bin_attrs_new = node_dev_bin_attrs,
+ .bin_attrs = node_dev_bin_attrs,
};
static const struct attribute_group *node_dev_groups[] = {
@@ -616,48 +676,6 @@ static void node_device_release(struct device *dev)
kfree(to_node(dev));
}
-/*
- * register_node - Setup a sysfs device for a node.
- * @num - Node number to use when creating the device.
- *
- * Initialize and register the node device.
- */
-static int register_node(struct node *node, int num)
-{
- int error;
-
- node->dev.id = num;
- node->dev.bus = &node_subsys;
- node->dev.release = node_device_release;
- node->dev.groups = node_dev_groups;
- error = device_register(&node->dev);
-
- if (error) {
- put_device(&node->dev);
- } else {
- hugetlb_register_node(node);
- compaction_register_node(node);
- }
-
- return error;
-}
-
-/**
- * unregister_node - unregister a node device
- * @node: node going away
- *
- * Unregisters a node device @node. All the devices on the node must be
- * unregistered before calling this function.
- */
-void unregister_node(struct node *node)
-{
- hugetlb_unregister_node(node);
- compaction_unregister_node(node);
- node_remove_accesses(node);
- node_remove_caches(node);
- device_unregister(&node->dev);
-}
-
struct node *node_devices[MAX_NUMNODES];
/*
@@ -756,23 +774,11 @@ int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
}
#ifdef CONFIG_MEMORY_HOTPLUG
-static int __ref get_nid_for_pfn(unsigned long pfn)
-{
-#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
- if (system_state < SYSTEM_RUNNING)
- return early_pfn_to_nid(pfn);
-#endif
- return pfn_to_nid(pfn);
-}
-
static void do_register_memory_block_under_node(int nid,
- struct memory_block *mem_blk,
- enum meminit_context context)
+ struct memory_block *mem_blk)
{
int ret;
- memory_block_add_nid(mem_blk, nid, context);
-
ret = sysfs_create_link_nowarn(&node_devices[nid]->dev.kobj,
&mem_blk->dev.kobj,
kobject_name(&mem_blk->dev.kobj));
@@ -791,46 +797,6 @@ static void do_register_memory_block_under_node(int nid,
ret);
}
-/* register memory section under specified node if it spans that node */
-static int register_mem_block_under_node_early(struct memory_block *mem_blk,
- void *arg)
-{
- unsigned long memory_block_pfns = memory_block_size_bytes() / PAGE_SIZE;
- unsigned long start_pfn = section_nr_to_pfn(mem_blk->start_section_nr);
- unsigned long end_pfn = start_pfn + memory_block_pfns - 1;
- int nid = *(int *)arg;
- unsigned long pfn;
-
- for (pfn = start_pfn; pfn <= end_pfn; pfn++) {
- int page_nid;
-
- /*
- * memory block could have several absent sections from start.
- * skip pfn range from absent section
- */
- if (!pfn_in_present_section(pfn)) {
- pfn = round_down(pfn + PAGES_PER_SECTION,
- PAGES_PER_SECTION) - 1;
- continue;
- }
-
- /*
- * We need to check if page belongs to nid only at the boot
- * case because node's ranges can be interleaved.
- */
- page_nid = get_nid_for_pfn(pfn);
- if (page_nid < 0)
- continue;
- if (page_nid != nid)
- continue;
-
- do_register_memory_block_under_node(nid, mem_blk, MEMINIT_EARLY);
- return 0;
- }
- /* mem section does not span the specified node */
- return 0;
-}
-
/*
* During hotplug we know that all pages in the memory block belong to the same
* node.
@@ -840,7 +806,7 @@ static int register_mem_block_under_node_hotplug(struct memory_block *mem_blk,
{
int nid = *(int *)arg;
- do_register_memory_block_under_node(nid, mem_blk, MEMINIT_HOTPLUG);
+ do_register_memory_block_under_node(nid, mem_blk);
return 0;
}
@@ -859,24 +825,51 @@ void unregister_memory_block_under_nodes(struct memory_block *mem_blk)
kobject_name(&node_devices[mem_blk->nid]->dev.kobj));
}
-void register_memory_blocks_under_node(int nid, unsigned long start_pfn,
- unsigned long end_pfn,
- enum meminit_context context)
+/* register all memory blocks under the corresponding nodes */
+static void register_memory_blocks_under_nodes(void)
{
- walk_memory_blocks_func_t func;
+ struct memblock_region *r;
+
+ for_each_mem_region(r) {
+ const unsigned long start_block_id = phys_to_block_id(r->base);
+ const unsigned long end_block_id = phys_to_block_id(r->base + r->size - 1);
+ const int nid = memblock_get_region_node(r);
+ unsigned long block_id;
+
+ if (!node_online(nid))
+ continue;
+
+ for (block_id = start_block_id; block_id <= end_block_id; block_id++) {
+ struct memory_block *mem;
+
+ mem = find_memory_block_by_id(block_id);
+ if (!mem)
+ continue;
+
+ memory_block_add_nid_early(mem, nid);
+ do_register_memory_block_under_node(nid, mem);
+ put_device(&mem->dev);
+ }
- if (context == MEMINIT_HOTPLUG)
- func = register_mem_block_under_node_hotplug;
- else
- func = register_mem_block_under_node_early;
+ }
+}
+void register_memory_blocks_under_node_hotplug(int nid, unsigned long start_pfn,
+ unsigned long end_pfn)
+{
walk_memory_blocks(PFN_PHYS(start_pfn), PFN_PHYS(end_pfn - start_pfn),
- (void *)&nid, func);
+ (void *)&nid, register_mem_block_under_node_hotplug);
return;
}
#endif /* CONFIG_MEMORY_HOTPLUG */
-int __register_one_node(int nid)
+/**
+ * register_node - Initialize and register the node device.
+ * @nid: Node number to use when creating the device.
+ *
+ * Return: 0 on success, -errno otherwise
+ */
+int register_node(int nid)
{
int error;
int cpu;
@@ -887,9 +880,22 @@ int __register_one_node(int nid)
return -ENOMEM;
INIT_LIST_HEAD(&node->access_list);
- node_devices[nid] = node;
- error = register_node(node_devices[nid], nid);
+ node->dev.id = nid;
+ node->dev.bus = &node_subsys;
+ node->dev.release = node_device_release;
+ node->dev.groups = node_dev_groups;
+
+ error = device_register(&node->dev);
+ if (error) {
+ put_device(&node->dev);
+ return error;
+ }
+
+ node_devices[nid] = node;
+ hugetlb_register_node(node);
+ compaction_register_node(node);
+ reclaim_register_node(node);
/* link cpu under this node */
for_each_present_cpu(cpu) {
@@ -901,13 +907,26 @@ int __register_one_node(int nid)
return error;
}
-
-void unregister_one_node(int nid)
+/**
+ * unregister_node - unregister a node device
+ * @nid: nid of the node going away
+ *
+ * Unregisters the node device at node id @nid. All the devices on the
+ * node must be unregistered before calling this function.
+ */
+void unregister_node(int nid)
{
- if (!node_devices[nid])
+ struct node *node = node_devices[nid];
+
+ if (!node)
return;
- unregister_node(node_devices[nid]);
+ hugetlb_unregister_node(node);
+ compaction_unregister_node(node);
+ reclaim_unregister_node(node);
+ node_remove_accesses(node);
+ node_remove_caches(node);
+ device_unregister(&node->dev);
node_devices[nid] = NULL;
}
@@ -980,11 +999,13 @@ void __init node_dev_init(void)
/*
* Create all node devices, which will properly link the node
- * to applicable memory block devices and already created cpu devices.
+ * to already created cpu devices.
*/
for_each_online_node(i) {
- ret = register_one_node(i);
+ ret = register_node(i);
if (ret)
panic("%s() failed to add node: %d\n", __func__, ret);
}
+
+ register_memory_blocks_under_nodes();
}
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 075ec1d1b73a..b45d41b018ca 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -150,25 +150,37 @@ devm_platform_ioremap_resource_byname(struct platform_device *pdev,
EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource_byname);
#endif /* CONFIG_HAS_IOMEM */
+static const struct cpumask *get_irq_affinity(struct platform_device *dev,
+ unsigned int num)
+{
+ const struct cpumask *mask = NULL;
+#ifndef CONFIG_SPARC
+ struct fwnode_handle *fwnode = dev_fwnode(&dev->dev);
+
+ if (is_of_node(fwnode))
+ mask = of_irq_get_affinity(to_of_node(fwnode), num);
+ else if (is_acpi_device_node(fwnode))
+ mask = acpi_irq_get_affinity(ACPI_HANDLE_FWNODE(fwnode), num);
+#endif
+
+ return mask ?: cpu_possible_mask;
+}
+
/**
- * platform_get_irq_optional - get an optional IRQ for a device
- * @dev: platform device
- * @num: IRQ number index
+ * platform_get_irq_affinity - get an optional IRQ and its affinity for a device
+ * @dev: platform device
+ * @num: interrupt number index
+ * @affinity: optional cpumask pointer to get the affinity of a per-cpu interrupt
*
- * Gets an IRQ for a platform device. Device drivers should check the return
- * value for errors so as to not pass a negative integer value to the
- * request_irq() APIs. This is the same as platform_get_irq(), except that it
- * does not print an error message if an IRQ can not be obtained.
- *
- * For example::
+ * Gets an interupt for a platform device. Device drivers should check the
+ * return value for errors so as to not pass a negative integer value to
+ * the request_irq() APIs. Optional affinity information is provided in the
+ * affinity pointer if available, and NULL otherwise.
*
- * int irq = platform_get_irq_optional(pdev, 0);
- * if (irq < 0)
- * return irq;
- *
- * Return: non-zero IRQ number on success, negative error number on failure.
+ * Return: non-zero interrupt number on success, negative error number on failure.
*/
-int platform_get_irq_optional(struct platform_device *dev, unsigned int num)
+int platform_get_irq_affinity(struct platform_device *dev, unsigned int num,
+ const struct cpumask **affinity)
{
int ret;
#ifdef CONFIG_SPARC
@@ -236,8 +248,37 @@ out_not_found:
out:
if (WARN(!ret, "0 is an invalid IRQ number\n"))
return -EINVAL;
+
+ if (ret > 0 && affinity)
+ *affinity = get_irq_affinity(dev, num);
+
return ret;
}
+EXPORT_SYMBOL_GPL(platform_get_irq_affinity);
+
+/**
+ * platform_get_irq_optional - get an optional interrupt for a device
+ * @dev: platform device
+ * @num: interrupt number index
+ *
+ * Gets an interrupt for a platform device. Device drivers should check the
+ * return value for errors so as to not pass a negative integer value to
+ * the request_irq() APIs. This is the same as platform_get_irq(), except
+ * that it does not print an error message if an interrupt can not be
+ * obtained.
+ *
+ * For example::
+ *
+ * int irq = platform_get_irq_optional(pdev, 0);
+ * if (irq < 0)
+ * return irq;
+ *
+ * Return: non-zero interrupt number on success, negative error number on failure.
+ */
+int platform_get_irq_optional(struct platform_device *dev, unsigned int num)
+{
+ return platform_get_irq_affinity(dev, num, NULL);
+}
EXPORT_SYMBOL_GPL(platform_get_irq_optional);
/**
@@ -1396,15 +1437,13 @@ static int platform_probe(struct device *_dev)
if (ret < 0)
return ret;
- ret = dev_pm_domain_attach(_dev, true);
+ ret = dev_pm_domain_attach(_dev, PD_FLAG_ATTACH_POWER_ON |
+ PD_FLAG_DETACH_POWER_OFF);
if (ret)
goto out;
- if (drv->probe) {
+ if (drv->probe)
ret = drv->probe(dev);
- if (ret)
- dev_pm_domain_detach(_dev, true);
- }
out:
if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) {
@@ -1422,7 +1461,6 @@ static void platform_remove(struct device *_dev)
if (drv->remove)
drv->remove(dev);
- dev_pm_domain_detach(_dev, true);
}
static void platform_shutdown(struct device *_dev)
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile
index 01f11629d241..2989e42d0161 100644
--- a/drivers/base/power/Makefile
+++ b/drivers/base/power/Makefile
@@ -4,5 +4,6 @@ obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o wakeup_stats.o
obj-$(CONFIG_PM_TRACE_RTC) += trace.o
obj-$(CONFIG_HAVE_CLK) += clock_ops.o
obj-$(CONFIG_PM_QOS_KUNIT_TEST) += qos-test.o
+obj-$(CONFIG_PM_RUNTIME_KUNIT_TEST) += runtime-test.o
ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c
index 781968a128ff..6ecf9ce4a4e6 100644
--- a/drivers/base/power/common.c
+++ b/drivers/base/power/common.c
@@ -83,7 +83,7 @@ EXPORT_SYMBOL_GPL(dev_pm_put_subsys_data);
/**
* dev_pm_domain_attach - Attach a device to its PM domain.
* @dev: Device to attach.
- * @power_on: Used to indicate whether we should power on the device.
+ * @flags: indicate whether we should power on/off the device on attach/detach
*
* The @dev may only be attached to a single PM domain. By iterating through
* the available alternatives we try to find a valid PM domain for the device.
@@ -100,17 +100,20 @@ EXPORT_SYMBOL_GPL(dev_pm_put_subsys_data);
* Returns 0 on successfully attached PM domain, or when it is found that the
* device doesn't need a PM domain, else a negative error code.
*/
-int dev_pm_domain_attach(struct device *dev, bool power_on)
+int dev_pm_domain_attach(struct device *dev, u32 flags)
{
int ret;
if (dev->pm_domain)
return 0;
- ret = acpi_dev_pm_attach(dev, power_on);
+ ret = acpi_dev_pm_attach(dev, !!(flags & PD_FLAG_ATTACH_POWER_ON));
if (!ret)
ret = genpd_dev_pm_attach(dev);
+ if (dev->pm_domain)
+ dev->power.detach_power_off = !!(flags & PD_FLAG_DETACH_POWER_OFF);
+
return ret < 0 ? ret : 0;
}
EXPORT_SYMBOL_GPL(dev_pm_domain_attach);
diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c
index 6502720bb564..af99bbcf281c 100644
--- a/drivers/base/power/generic_ops.c
+++ b/drivers/base/power/generic_ops.c
@@ -8,6 +8,13 @@
#include <linux/pm_runtime.h>
#include <linux/export.h>
+#define CALL_PM_OP(dev, op) \
+({ \
+ struct device *_dev = (dev); \
+ const struct dev_pm_ops *pm = _dev->driver ? _dev->driver->pm : NULL; \
+ pm && pm->op ? pm->op(_dev) : 0; \
+})
+
#ifdef CONFIG_PM
/**
* pm_generic_runtime_suspend - Generic runtime suspend callback for subsystems.
@@ -19,12 +26,7 @@
*/
int pm_generic_runtime_suspend(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- int ret;
-
- ret = pm && pm->runtime_suspend ? pm->runtime_suspend(dev) : 0;
-
- return ret;
+ return CALL_PM_OP(dev, runtime_suspend);
}
EXPORT_SYMBOL_GPL(pm_generic_runtime_suspend);
@@ -38,12 +40,7 @@ EXPORT_SYMBOL_GPL(pm_generic_runtime_suspend);
*/
int pm_generic_runtime_resume(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- int ret;
-
- ret = pm && pm->runtime_resume ? pm->runtime_resume(dev) : 0;
-
- return ret;
+ return CALL_PM_OP(dev, runtime_resume);
}
EXPORT_SYMBOL_GPL(pm_generic_runtime_resume);
#endif /* CONFIG_PM */
@@ -72,9 +69,7 @@ int pm_generic_prepare(struct device *dev)
*/
int pm_generic_suspend_noirq(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->suspend_noirq ? pm->suspend_noirq(dev) : 0;
+ return CALL_PM_OP(dev, suspend_noirq);
}
EXPORT_SYMBOL_GPL(pm_generic_suspend_noirq);
@@ -84,9 +79,7 @@ EXPORT_SYMBOL_GPL(pm_generic_suspend_noirq);
*/
int pm_generic_suspend_late(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->suspend_late ? pm->suspend_late(dev) : 0;
+ return CALL_PM_OP(dev, suspend_late);
}
EXPORT_SYMBOL_GPL(pm_generic_suspend_late);
@@ -96,9 +89,7 @@ EXPORT_SYMBOL_GPL(pm_generic_suspend_late);
*/
int pm_generic_suspend(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->suspend ? pm->suspend(dev) : 0;
+ return CALL_PM_OP(dev, suspend);
}
EXPORT_SYMBOL_GPL(pm_generic_suspend);
@@ -108,9 +99,7 @@ EXPORT_SYMBOL_GPL(pm_generic_suspend);
*/
int pm_generic_freeze_noirq(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->freeze_noirq ? pm->freeze_noirq(dev) : 0;
+ return CALL_PM_OP(dev, freeze_noirq);
}
EXPORT_SYMBOL_GPL(pm_generic_freeze_noirq);
@@ -120,9 +109,7 @@ EXPORT_SYMBOL_GPL(pm_generic_freeze_noirq);
*/
int pm_generic_freeze(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->freeze ? pm->freeze(dev) : 0;
+ return CALL_PM_OP(dev, freeze);
}
EXPORT_SYMBOL_GPL(pm_generic_freeze);
@@ -132,9 +119,7 @@ EXPORT_SYMBOL_GPL(pm_generic_freeze);
*/
int pm_generic_poweroff_noirq(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->poweroff_noirq ? pm->poweroff_noirq(dev) : 0;
+ return CALL_PM_OP(dev, poweroff_noirq);
}
EXPORT_SYMBOL_GPL(pm_generic_poweroff_noirq);
@@ -144,9 +129,7 @@ EXPORT_SYMBOL_GPL(pm_generic_poweroff_noirq);
*/
int pm_generic_poweroff_late(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->poweroff_late ? pm->poweroff_late(dev) : 0;
+ return CALL_PM_OP(dev, poweroff_late);
}
EXPORT_SYMBOL_GPL(pm_generic_poweroff_late);
@@ -156,9 +139,7 @@ EXPORT_SYMBOL_GPL(pm_generic_poweroff_late);
*/
int pm_generic_poweroff(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->poweroff ? pm->poweroff(dev) : 0;
+ return CALL_PM_OP(dev, poweroff);
}
EXPORT_SYMBOL_GPL(pm_generic_poweroff);
@@ -168,9 +149,7 @@ EXPORT_SYMBOL_GPL(pm_generic_poweroff);
*/
int pm_generic_thaw_noirq(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->thaw_noirq ? pm->thaw_noirq(dev) : 0;
+ return CALL_PM_OP(dev, thaw_noirq);
}
EXPORT_SYMBOL_GPL(pm_generic_thaw_noirq);
@@ -180,9 +159,7 @@ EXPORT_SYMBOL_GPL(pm_generic_thaw_noirq);
*/
int pm_generic_thaw(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->thaw ? pm->thaw(dev) : 0;
+ return CALL_PM_OP(dev, thaw);
}
EXPORT_SYMBOL_GPL(pm_generic_thaw);
@@ -192,9 +169,7 @@ EXPORT_SYMBOL_GPL(pm_generic_thaw);
*/
int pm_generic_resume_noirq(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->resume_noirq ? pm->resume_noirq(dev) : 0;
+ return CALL_PM_OP(dev, resume_noirq);
}
EXPORT_SYMBOL_GPL(pm_generic_resume_noirq);
@@ -204,9 +179,7 @@ EXPORT_SYMBOL_GPL(pm_generic_resume_noirq);
*/
int pm_generic_resume_early(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->resume_early ? pm->resume_early(dev) : 0;
+ return CALL_PM_OP(dev, resume_early);
}
EXPORT_SYMBOL_GPL(pm_generic_resume_early);
@@ -216,9 +189,7 @@ EXPORT_SYMBOL_GPL(pm_generic_resume_early);
*/
int pm_generic_resume(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->resume ? pm->resume(dev) : 0;
+ return CALL_PM_OP(dev, resume);
}
EXPORT_SYMBOL_GPL(pm_generic_resume);
@@ -228,9 +199,7 @@ EXPORT_SYMBOL_GPL(pm_generic_resume);
*/
int pm_generic_restore_noirq(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->restore_noirq ? pm->restore_noirq(dev) : 0;
+ return CALL_PM_OP(dev, restore_noirq);
}
EXPORT_SYMBOL_GPL(pm_generic_restore_noirq);
@@ -240,9 +209,7 @@ EXPORT_SYMBOL_GPL(pm_generic_restore_noirq);
*/
int pm_generic_restore_early(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->restore_early ? pm->restore_early(dev) : 0;
+ return CALL_PM_OP(dev, restore_early);
}
EXPORT_SYMBOL_GPL(pm_generic_restore_early);
@@ -252,9 +219,7 @@ EXPORT_SYMBOL_GPL(pm_generic_restore_early);
*/
int pm_generic_restore(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->restore ? pm->restore(dev) : 0;
+ return CALL_PM_OP(dev, restore);
}
EXPORT_SYMBOL_GPL(pm_generic_restore);
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index eebe699fdf4f..97a8b4fcf471 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -34,16 +34,13 @@
#include <linux/cpufreq.h>
#include <linux/devfreq.h>
#include <linux/timer.h>
+#include <linux/nmi.h>
#include "../base.h"
#include "power.h"
typedef int (*pm_callback_t)(struct device *);
-#define list_for_each_entry_rcu_locked(pos, head, member) \
- list_for_each_entry_rcu(pos, head, member, \
- device_links_read_lock_held())
-
/*
* The entries in the dpm_list list are in a depth first order, simply
* because children are guaranteed to be discovered after parents, and
@@ -66,6 +63,20 @@ static pm_message_t pm_transition;
static DEFINE_MUTEX(async_wip_mtx);
static int async_error;
+/**
+ * pm_hibernate_is_recovering - if recovering from hibernate due to error.
+ *
+ * Used to query if dev_pm_ops.thaw() is called for normal hibernation case or
+ * recovering from some error.
+ *
+ * Return: true for error case, false for normal case.
+ */
+bool pm_hibernate_is_recovering(void)
+{
+ return pm_transition.event == PM_EVENT_RECOVER;
+}
+EXPORT_SYMBOL_GPL(pm_hibernate_is_recovering);
+
static const char *pm_verb(int event)
{
switch (event) {
@@ -85,6 +96,8 @@ static const char *pm_verb(int event)
return "restore";
case PM_EVENT_RECOVER:
return "recover";
+ case PM_EVENT_POWEROFF:
+ return "poweroff";
default:
return "(unknown PM event)";
}
@@ -267,8 +280,9 @@ static void dpm_wait_for_suppliers(struct device *dev, bool async)
* callbacks freeing the link objects for the links in the list we're
* walking.
*/
- list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
- if (READ_ONCE(link->status) != DL_STATE_DORMANT)
+ dev_for_each_link_to_supplier(link, dev)
+ if (READ_ONCE(link->status) != DL_STATE_DORMANT &&
+ !device_link_flag_is_sync_state_only(link->flags))
dpm_wait(link->supplier, async);
device_links_read_unlock(idx);
@@ -324,8 +338,9 @@ static void dpm_wait_for_consumers(struct device *dev, bool async)
* continue instead of trying to continue in parallel with its
* unregistration).
*/
- list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node)
- if (READ_ONCE(link->status) != DL_STATE_DORMANT)
+ dev_for_each_link_to_consumer(link, dev)
+ if (READ_ONCE(link->status) != DL_STATE_DORMANT &&
+ !device_link_flag_is_sync_state_only(link->flags))
dpm_wait(link->consumer, async);
device_links_read_unlock(idx);
@@ -355,6 +370,7 @@ static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
case PM_EVENT_FREEZE:
case PM_EVENT_QUIESCE:
return ops->freeze;
+ case PM_EVENT_POWEROFF:
case PM_EVENT_HIBERNATE:
return ops->poweroff;
case PM_EVENT_THAW:
@@ -389,6 +405,7 @@ static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
case PM_EVENT_FREEZE:
case PM_EVENT_QUIESCE:
return ops->freeze_late;
+ case PM_EVENT_POWEROFF:
case PM_EVENT_HIBERNATE:
return ops->poweroff_late;
case PM_EVENT_THAW:
@@ -423,6 +440,7 @@ static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t stat
case PM_EVENT_FREEZE:
case PM_EVENT_QUIESCE:
return ops->freeze_noirq;
+ case PM_EVENT_POWEROFF:
case PM_EVENT_HIBERNATE:
return ops->poweroff_noirq;
case PM_EVENT_THAW:
@@ -503,6 +521,11 @@ struct dpm_watchdog {
#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
struct dpm_watchdog wd
+static bool __read_mostly dpm_watchdog_all_cpu_backtrace;
+module_param(dpm_watchdog_all_cpu_backtrace, bool, 0644);
+MODULE_PARM_DESC(dpm_watchdog_all_cpu_backtrace,
+ "Backtrace all CPUs on DPM watchdog timeout");
+
/**
* dpm_watchdog_handler - Driver suspend / resume watchdog handler.
* @t: The timer that PM watchdog depends on.
@@ -518,8 +541,12 @@ static void dpm_watchdog_handler(struct timer_list *t)
unsigned int time_left;
if (wd->fatal) {
+ unsigned int this_cpu = smp_processor_id();
+
dev_emerg(wd->dev, "**** DPM device timeout ****\n");
show_stack(wd->tsk, NULL, KERN_EMERG);
+ if (dpm_watchdog_all_cpu_backtrace)
+ trigger_allbutcpu_cpu_backtrace(this_cpu);
panic("%s %s: unrecoverable failure\n",
dev_driver_string(wd->dev), dev_name(wd->dev));
}
@@ -647,14 +674,27 @@ static void dpm_async_resume_children(struct device *dev, async_func_t func)
/*
* Start processing "async" children of the device unless it's been
* started already for them.
- *
- * This could have been done for the device's "async" consumers too, but
- * they either need to wait for their parents or the processing has
- * already started for them after their parents were processed.
*/
device_for_each_child(dev, func, dpm_async_with_cleanup);
}
+static void dpm_async_resume_subordinate(struct device *dev, async_func_t func)
+{
+ struct device_link *link;
+ int idx;
+
+ dpm_async_resume_children(dev, func);
+
+ idx = device_links_read_lock();
+
+ /* Start processing the device's "async" consumers. */
+ dev_for_each_link_to_consumer(link, dev)
+ if (READ_ONCE(link->status) != DL_STATE_DORMANT)
+ dpm_async_with_cleanup(link->consumer, func);
+
+ device_links_read_unlock(idx);
+}
+
static void dpm_clear_async_state(struct device *dev)
{
reinit_completion(&dev->power.completion);
@@ -663,7 +703,14 @@ static void dpm_clear_async_state(struct device *dev)
static bool dpm_root_device(struct device *dev)
{
- return !dev->parent;
+ lockdep_assert_held(&dpm_list_mtx);
+
+ /*
+ * Since this function is required to run under dpm_list_mtx, the
+ * list_empty() below will only return true if the device's list of
+ * consumers is actually empty before calling it.
+ */
+ return !dev->parent && list_empty(&dev->links.suppliers);
}
static void async_resume_noirq(void *data, async_cookie_t cookie);
@@ -690,8 +737,20 @@ static void device_resume_noirq(struct device *dev, pm_message_t state, bool asy
if (dev->power.syscore || dev->power.direct_complete)
goto Out;
- if (!dev->power.is_noirq_suspended)
+ if (!dev->power.is_noirq_suspended) {
+ /*
+ * This means that system suspend has been aborted in the noirq
+ * phase before invoking the noirq suspend callback for the
+ * device, so if device_suspend_late() has left it in suspend,
+ * device_resume_early() should leave it in suspend either in
+ * case the early resume of it depends on the noirq resume that
+ * has not run.
+ */
+ if (dev_pm_skip_suspend(dev))
+ dev->power.must_resume = false;
+
goto Out;
+ }
if (!dpm_wait_for_superior(dev, async))
goto Out;
@@ -747,12 +806,12 @@ Out:
TRACE_RESUME(error);
if (error) {
- async_error = error;
+ WRITE_ONCE(async_error, error);
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
}
- dpm_async_resume_children(dev, async_resume_noirq);
+ dpm_async_resume_subordinate(dev, async_resume_noirq);
}
static void async_resume_noirq(void *data, async_cookie_t cookie)
@@ -804,7 +863,7 @@ static void dpm_noirq_resume_devices(pm_message_t state)
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
dpm_show_time(starttime, state, 0, "noirq");
- if (async_error)
+ if (READ_ONCE(async_error))
dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
@@ -844,12 +903,15 @@ static void device_resume_early(struct device *dev, pm_message_t state, bool asy
TRACE_DEVICE(dev);
TRACE_RESUME(0);
- if (dev->power.syscore || dev->power.direct_complete)
+ if (dev->power.direct_complete)
goto Out;
if (!dev->power.is_late_suspended)
goto Out;
+ if (dev->power.syscore)
+ goto Skip;
+
if (!dpm_wait_for_superior(dev, async))
goto Out;
@@ -882,20 +944,20 @@ Run:
Skip:
dev->power.is_late_suspended = false;
+ pm_runtime_enable(dev);
Out:
TRACE_RESUME(error);
- pm_runtime_enable(dev);
complete_all(&dev->power.completion);
if (error) {
- async_error = error;
+ WRITE_ONCE(async_error, error);
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, async ? " async early" : " early", error);
}
- dpm_async_resume_children(dev, async_resume_early);
+ dpm_async_resume_subordinate(dev, async_resume_early);
}
static void async_resume_early(void *data, async_cookie_t cookie)
@@ -951,7 +1013,7 @@ void dpm_resume_early(pm_message_t state)
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
dpm_show_time(starttime, state, 0, "early");
- if (async_error)
+ if (READ_ONCE(async_error))
dpm_save_failed_step(SUSPEND_RESUME_EARLY);
trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
@@ -1066,12 +1128,12 @@ static void device_resume(struct device *dev, pm_message_t state, bool async)
TRACE_RESUME(error);
if (error) {
- async_error = error;
+ WRITE_ONCE(async_error, error);
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, async ? " async" : "", error);
}
- dpm_async_resume_children(dev, async_resume);
+ dpm_async_resume_subordinate(dev, async_resume);
}
static void async_resume(void *data, async_cookie_t cookie)
@@ -1095,7 +1157,6 @@ void dpm_resume(pm_message_t state)
ktime_t starttime = ktime_get();
trace_suspend_resume(TPS("dpm_resume"), state.event, true);
- might_sleep();
pm_transition = state;
async_error = 0;
@@ -1131,7 +1192,7 @@ void dpm_resume(pm_message_t state)
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
dpm_show_time(starttime, state, 0, NULL);
- if (async_error)
+ if (READ_ONCE(async_error))
dpm_save_failed_step(SUSPEND_RESUME);
cpufreq_resume();
@@ -1198,7 +1259,6 @@ void dpm_complete(pm_message_t state)
struct list_head list;
trace_suspend_resume(TPS("dpm_complete"), state.event, true);
- might_sleep();
INIT_LIST_HEAD(&list);
mutex_lock(&dpm_list_mtx);
@@ -1237,6 +1297,7 @@ void dpm_complete(pm_message_t state)
void dpm_resume_end(pm_message_t state)
{
dpm_resume(state);
+ pm_restore_gfp_mask();
dpm_complete(state);
}
EXPORT_SYMBOL_GPL(dpm_resume_end);
@@ -1257,10 +1318,15 @@ static bool dpm_leaf_device(struct device *dev)
return false;
}
- return true;
+ /*
+ * Since this function is required to run under dpm_list_mtx, the
+ * list_empty() below will only return true if the device's list of
+ * consumers is actually empty before calling it.
+ */
+ return list_empty(&dev->links.consumers);
}
-static void dpm_async_suspend_parent(struct device *dev, async_func_t func)
+static bool dpm_async_suspend_parent(struct device *dev, async_func_t func)
{
guard(mutex)(&dpm_list_mtx);
@@ -1272,11 +1338,47 @@ static void dpm_async_suspend_parent(struct device *dev, async_func_t func)
* deleted before it.
*/
if (!device_pm_initialized(dev))
- return;
+ return false;
/* Start processing the device's parent if it is "async". */
if (dev->parent)
dpm_async_with_cleanup(dev->parent, func);
+
+ return true;
+}
+
+static void dpm_async_suspend_superior(struct device *dev, async_func_t func)
+{
+ struct device_link *link;
+ int idx;
+
+ if (!dpm_async_suspend_parent(dev, func))
+ return;
+
+ idx = device_links_read_lock();
+
+ /* Start processing the device's "async" suppliers. */
+ dev_for_each_link_to_supplier(link, dev)
+ if (READ_ONCE(link->status) != DL_STATE_DORMANT)
+ dpm_async_with_cleanup(link->supplier, func);
+
+ device_links_read_unlock(idx);
+}
+
+static void dpm_async_suspend_complete_all(struct list_head *device_list)
+{
+ struct device *dev;
+
+ guard(mutex)(&async_wip_mtx);
+
+ list_for_each_entry_reverse(dev, device_list, power.entry) {
+ /*
+ * In case the device is being waited for and async processing
+ * has not started for it yet, let the waiters make progress.
+ */
+ if (!dev->power.work_in_progress)
+ complete_all(&dev->power.completion);
+ }
}
/**
@@ -1310,7 +1412,7 @@ static void dpm_superior_set_must_resume(struct device *dev)
idx = device_links_read_lock();
- list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
+ dev_for_each_link_to_supplier(link, dev)
link->supplier->power.must_resume = true;
device_links_read_unlock(idx);
@@ -1327,7 +1429,7 @@ static void async_suspend_noirq(void *data, async_cookie_t cookie);
* The driver of @dev will not receive interrupts while this function is being
* executed.
*/
-static int device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
+static void device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
const char *info = NULL;
@@ -1338,7 +1440,7 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state, bool asy
dpm_wait_for_subordinate(dev, async);
- if (async_error)
+ if (READ_ONCE(async_error))
goto Complete;
if (dev->power.syscore || dev->power.direct_complete)
@@ -1371,7 +1473,7 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state, bool asy
Run:
error = dpm_run_callback(callback, dev, state, info);
if (error) {
- async_error = error;
+ WRITE_ONCE(async_error, error);
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
goto Complete;
@@ -1397,12 +1499,10 @@ Complete:
complete_all(&dev->power.completion);
TRACE_SUSPEND(error);
- if (error || async_error)
- return error;
-
- dpm_async_suspend_parent(dev, async_suspend_noirq);
+ if (error || READ_ONCE(async_error))
+ return;
- return 0;
+ dpm_async_suspend_superior(dev, async_suspend_noirq);
}
static void async_suspend_noirq(void *data, async_cookie_t cookie)
@@ -1417,7 +1517,7 @@ static int dpm_noirq_suspend_devices(pm_message_t state)
{
ktime_t starttime = ktime_get();
struct device *dev;
- int error = 0;
+ int error;
trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
@@ -1448,13 +1548,14 @@ static int dpm_noirq_suspend_devices(pm_message_t state)
mutex_unlock(&dpm_list_mtx);
- error = device_suspend_noirq(dev, state, false);
+ device_suspend_noirq(dev, state, false);
put_device(dev);
mutex_lock(&dpm_list_mtx);
- if (error || async_error) {
+ if (READ_ONCE(async_error)) {
+ dpm_async_suspend_complete_all(&dpm_late_early_list);
/*
* Move all devices to the target list to resume them
* properly.
@@ -1467,9 +1568,8 @@ static int dpm_noirq_suspend_devices(pm_message_t state)
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
- if (!error)
- error = async_error;
+ error = READ_ONCE(async_error);
if (error)
dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
@@ -1524,7 +1624,7 @@ static void async_suspend_late(void *data, async_cookie_t cookie);
*
* Runtime PM is disabled for @dev while this function is being executed.
*/
-static int device_suspend_late(struct device *dev, pm_message_t state, bool async)
+static void device_suspend_late(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
const char *info = NULL;
@@ -1533,25 +1633,28 @@ static int device_suspend_late(struct device *dev, pm_message_t state, bool asyn
TRACE_DEVICE(dev);
TRACE_SUSPEND(0);
- /*
- * Disable runtime PM for the device without checking if there is a
- * pending resume request for it.
- */
- __pm_runtime_disable(dev, false);
-
dpm_wait_for_subordinate(dev, async);
- if (async_error)
+ if (READ_ONCE(async_error))
goto Complete;
if (pm_wakeup_pending()) {
- async_error = -EBUSY;
+ WRITE_ONCE(async_error, -EBUSY);
goto Complete;
}
- if (dev->power.syscore || dev->power.direct_complete)
+ if (dev->power.direct_complete)
goto Complete;
+ /*
+ * Disable runtime PM for the device without checking if there is a
+ * pending resume request for it.
+ */
+ __pm_runtime_disable(dev, false);
+
+ if (dev->power.syscore)
+ goto Skip;
+
if (dev->pm_domain) {
info = "late power domain ";
callback = pm_late_early_op(&dev->pm_domain->ops, state);
@@ -1579,9 +1682,10 @@ static int device_suspend_late(struct device *dev, pm_message_t state, bool asyn
Run:
error = dpm_run_callback(callback, dev, state, info);
if (error) {
- async_error = error;
+ WRITE_ONCE(async_error, error);
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, async ? " async late" : " late", error);
+ pm_runtime_enable(dev);
goto Complete;
}
dpm_propagate_wakeup_to_parent(dev);
@@ -1593,12 +1697,10 @@ Complete:
TRACE_SUSPEND(error);
complete_all(&dev->power.completion);
- if (error || async_error)
- return error;
-
- dpm_async_suspend_parent(dev, async_suspend_late);
+ if (error || READ_ONCE(async_error))
+ return;
- return 0;
+ dpm_async_suspend_superior(dev, async_suspend_late);
}
static void async_suspend_late(void *data, async_cookie_t cookie)
@@ -1617,7 +1719,7 @@ int dpm_suspend_late(pm_message_t state)
{
ktime_t starttime = ktime_get();
struct device *dev;
- int error = 0;
+ int error;
trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
@@ -1650,13 +1752,14 @@ int dpm_suspend_late(pm_message_t state)
mutex_unlock(&dpm_list_mtx);
- error = device_suspend_late(dev, state, false);
+ device_suspend_late(dev, state, false);
put_device(dev);
mutex_lock(&dpm_list_mtx);
- if (error || async_error) {
+ if (READ_ONCE(async_error)) {
+ dpm_async_suspend_complete_all(&dpm_suspended_list);
/*
* Move all devices to the target list to resume them
* properly.
@@ -1669,9 +1772,8 @@ int dpm_suspend_late(pm_message_t state)
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
- if (!error)
- error = async_error;
+ error = READ_ONCE(async_error);
if (error) {
dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
dpm_resume_early(resume_event(state));
@@ -1743,7 +1845,7 @@ static void dpm_clear_superiors_direct_complete(struct device *dev)
idx = device_links_read_lock();
- list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
+ dev_for_each_link_to_supplier(link, dev) {
spin_lock_irq(&link->supplier->power.lock);
link->supplier->power.direct_complete = false;
spin_unlock_irq(&link->supplier->power.lock);
@@ -1760,7 +1862,7 @@ static void async_suspend(void *data, async_cookie_t cookie);
* @state: PM transition of the system being carried out.
* @async: If true, the device is being suspended asynchronously.
*/
-static int device_suspend(struct device *dev, pm_message_t state, bool async)
+static void device_suspend(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
const char *info = NULL;
@@ -1772,7 +1874,7 @@ static int device_suspend(struct device *dev, pm_message_t state, bool async)
dpm_wait_for_subordinate(dev, async);
- if (async_error) {
+ if (READ_ONCE(async_error)) {
dev->power.direct_complete = false;
goto Complete;
}
@@ -1792,7 +1894,7 @@ static int device_suspend(struct device *dev, pm_message_t state, bool async)
if (pm_wakeup_pending()) {
dev->power.direct_complete = false;
- async_error = -EBUSY;
+ WRITE_ONCE(async_error, -EBUSY);
goto Complete;
}
@@ -1876,7 +1978,7 @@ static int device_suspend(struct device *dev, pm_message_t state, bool async)
Complete:
if (error) {
- async_error = error;
+ WRITE_ONCE(async_error, error);
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, async ? " async" : "", error);
}
@@ -1884,12 +1986,10 @@ static int device_suspend(struct device *dev, pm_message_t state, bool async)
complete_all(&dev->power.completion);
TRACE_SUSPEND(error);
- if (error || async_error)
- return error;
-
- dpm_async_suspend_parent(dev, async_suspend);
+ if (error || READ_ONCE(async_error))
+ return;
- return 0;
+ dpm_async_suspend_superior(dev, async_suspend);
}
static void async_suspend(void *data, async_cookie_t cookie)
@@ -1908,7 +2008,7 @@ int dpm_suspend(pm_message_t state)
{
ktime_t starttime = ktime_get();
struct device *dev;
- int error = 0;
+ int error;
trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
might_sleep();
@@ -1943,13 +2043,14 @@ int dpm_suspend(pm_message_t state)
mutex_unlock(&dpm_list_mtx);
- error = device_suspend(dev, state, false);
+ device_suspend(dev, state, false);
put_device(dev);
mutex_lock(&dpm_list_mtx);
- if (error || async_error) {
+ if (READ_ONCE(async_error)) {
+ dpm_async_suspend_complete_all(&dpm_prepared_list);
/*
* Move all devices to the target list to resume them
* properly.
@@ -1962,9 +2063,8 @@ int dpm_suspend(pm_message_t state)
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
- if (!error)
- error = async_error;
+ error = READ_ONCE(async_error);
if (error)
dpm_save_failed_step(SUSPEND_SUSPEND);
@@ -1997,8 +2097,8 @@ static bool device_prepare_smart_suspend(struct device *dev)
idx = device_links_read_lock();
- list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
- if (!(link->flags & DL_FLAG_PM_RUNTIME))
+ dev_for_each_link_to_supplier(link, dev) {
+ if (!device_link_test(link, DL_FLAG_PM_RUNTIME))
continue;
if (!dev_pm_smart_suspend(link->supplier) &&
@@ -2048,6 +2148,7 @@ static int device_prepare(struct device *dev, pm_message_t state)
device_lock(dev);
dev->power.wakeup_path = false;
+ dev->power.out_band_wakeup = false;
if (dev->power.no_pm_callbacks)
goto unlock;
@@ -2109,7 +2210,6 @@ int dpm_prepare(pm_message_t state)
int error = 0;
trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
- might_sleep();
/*
* Give a chance for the known devices to complete their probes, before
@@ -2176,8 +2276,10 @@ int dpm_suspend_start(pm_message_t state)
error = dpm_prepare(state);
if (error)
dpm_save_failed_step(SUSPEND_PREPARE);
- else
+ else {
+ pm_restrict_gfp_mask();
error = dpm_suspend(state);
+ }
dpm_show_time(starttime, state, error, "start");
return error;
diff --git a/drivers/base/power/runtime-test.c b/drivers/base/power/runtime-test.c
new file mode 100644
index 000000000000..1535ad2b0264
--- /dev/null
+++ b/drivers/base/power/runtime-test.c
@@ -0,0 +1,249 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2025 Google, Inc.
+ */
+
+#include <linux/cleanup.h>
+#include <linux/pm_runtime.h>
+#include <kunit/device.h>
+#include <kunit/test.h>
+
+#define DEVICE_NAME "pm_runtime_test_device"
+
+static void pm_runtime_depth_test(struct kunit *test)
+{
+ struct device *dev = kunit_device_register(test, DEVICE_NAME);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+
+ pm_runtime_enable(dev);
+
+ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_get_sync(dev));
+ KUNIT_EXPECT_TRUE(test, pm_runtime_active(dev));
+ KUNIT_EXPECT_EQ(test, 1, pm_runtime_get_sync(dev)); /* "already active" */
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_put_sync(dev));
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_put_sync(dev));
+ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
+}
+
+/* Test pm_runtime_put() and friends when already suspended. */
+static void pm_runtime_already_suspended_test(struct kunit *test)
+{
+ struct device *dev = kunit_device_register(test, DEVICE_NAME);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+
+ pm_runtime_enable(dev);
+ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
+
+ pm_runtime_get_noresume(dev);
+ KUNIT_EXPECT_EQ(test, 1, pm_runtime_put_sync(dev));
+
+ KUNIT_EXPECT_EQ(test, 1, pm_runtime_suspend(dev));
+ KUNIT_EXPECT_EQ(test, 1, pm_runtime_autosuspend(dev));
+ KUNIT_EXPECT_EQ(test, 1, pm_request_autosuspend(dev));
+
+ pm_runtime_get_noresume(dev);
+ KUNIT_EXPECT_EQ(test, 1, pm_runtime_put_sync_autosuspend(dev));
+
+ pm_runtime_get_noresume(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ /* Grab 2 refcounts */
+ pm_runtime_get_noresume(dev);
+ pm_runtime_get_noresume(dev);
+ /* The first put() sees usage_count 1 */
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_put_sync_autosuspend(dev));
+ /* The second put() sees usage_count 0 but tells us "already suspended". */
+ KUNIT_EXPECT_EQ(test, 1, pm_runtime_put_sync_autosuspend(dev));
+
+ /* Should have remained suspended the whole time. */
+ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
+}
+
+static void pm_runtime_idle_test(struct kunit *test)
+{
+ struct device *dev = kunit_device_register(test, DEVICE_NAME);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+
+ pm_runtime_enable(dev);
+
+ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_get_sync(dev));
+ KUNIT_EXPECT_TRUE(test, pm_runtime_active(dev));
+ KUNIT_EXPECT_EQ(test, -EAGAIN, pm_runtime_idle(dev));
+ KUNIT_EXPECT_TRUE(test, pm_runtime_active(dev));
+ pm_runtime_put_noidle(dev);
+ KUNIT_EXPECT_TRUE(test, pm_runtime_active(dev));
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_idle(dev));
+ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
+ KUNIT_EXPECT_EQ(test, -EAGAIN, pm_runtime_idle(dev));
+ KUNIT_EXPECT_EQ(test, -EAGAIN, pm_request_idle(dev));
+}
+
+static void pm_runtime_disabled_test(struct kunit *test)
+{
+ struct device *dev = kunit_device_register(test, DEVICE_NAME);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+
+ /* Never called pm_runtime_enable() */
+ KUNIT_EXPECT_FALSE(test, pm_runtime_enabled(dev));
+
+ /* "disabled" is treated as "active" */
+ KUNIT_EXPECT_TRUE(test, pm_runtime_active(dev));
+ KUNIT_EXPECT_FALSE(test, pm_runtime_suspended(dev));
+
+ /*
+ * Note: these "fail", but they still acquire/release refcounts, so
+ * keep them balanced.
+ */
+ KUNIT_EXPECT_EQ(test, -EACCES, pm_runtime_get(dev));
+ pm_runtime_put(dev);
+
+ KUNIT_EXPECT_EQ(test, -EACCES, pm_runtime_get_sync(dev));
+ KUNIT_EXPECT_EQ(test, -EACCES, pm_runtime_put_sync(dev));
+
+ KUNIT_EXPECT_EQ(test, -EACCES, pm_runtime_get(dev));
+ pm_runtime_put_autosuspend(dev);
+
+ KUNIT_EXPECT_EQ(test, -EACCES, pm_runtime_resume_and_get(dev));
+ KUNIT_EXPECT_EQ(test, -EACCES, pm_runtime_idle(dev));
+ KUNIT_EXPECT_EQ(test, -EACCES, pm_request_idle(dev));
+ KUNIT_EXPECT_EQ(test, -EACCES, pm_request_resume(dev));
+ KUNIT_EXPECT_EQ(test, -EACCES, pm_request_autosuspend(dev));
+ KUNIT_EXPECT_EQ(test, -EACCES, pm_runtime_suspend(dev));
+ KUNIT_EXPECT_EQ(test, -EACCES, pm_runtime_resume(dev));
+ KUNIT_EXPECT_EQ(test, -EACCES, pm_runtime_autosuspend(dev));
+
+ /* Still disabled */
+ KUNIT_EXPECT_TRUE(test, pm_runtime_active(dev));
+ KUNIT_EXPECT_FALSE(test, pm_runtime_enabled(dev));
+}
+
+static void pm_runtime_error_test(struct kunit *test)
+{
+ struct device *dev = kunit_device_register(test, DEVICE_NAME);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+
+ pm_runtime_enable(dev);
+ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
+
+ /* Fake a .runtime_resume() error */
+ dev->power.runtime_error = -EIO;
+
+ /*
+ * Note: these "fail", but they still acquire/release refcounts, so
+ * keep them balanced.
+ */
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_get(dev));
+ pm_runtime_put(dev);
+
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_get_sync(dev));
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_put_sync(dev));
+
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_get(dev));
+ pm_runtime_put_autosuspend(dev);
+
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_get(dev));
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_put_sync_autosuspend(dev));
+
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_resume_and_get(dev));
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_idle(dev));
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_request_idle(dev));
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_request_resume(dev));
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_request_autosuspend(dev));
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_suspend(dev));
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_resume(dev));
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_autosuspend(dev));
+
+ /* Error is still pending */
+ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
+ KUNIT_EXPECT_EQ(test, -EIO, dev->power.runtime_error);
+ /* Clear error */
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_set_suspended(dev));
+ KUNIT_EXPECT_EQ(test, 0, dev->power.runtime_error);
+ /* Still suspended */
+ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
+
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_get(dev));
+ pm_runtime_barrier(dev);
+ pm_runtime_put(dev);
+ pm_runtime_suspend(dev); /* flush the put(), to suspend */
+ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
+
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_get_sync(dev));
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_put_sync(dev));
+
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_get_sync(dev));
+ pm_runtime_put_autosuspend(dev);
+
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_resume_and_get(dev));
+
+ /*
+ * The following should all return -EAGAIN (usage is non-zero) or 1
+ * (already resumed).
+ */
+ KUNIT_EXPECT_EQ(test, -EAGAIN, pm_runtime_idle(dev));
+ KUNIT_EXPECT_EQ(test, -EAGAIN, pm_request_idle(dev));
+ KUNIT_EXPECT_EQ(test, 1, pm_request_resume(dev));
+ KUNIT_EXPECT_EQ(test, -EAGAIN, pm_request_autosuspend(dev));
+ KUNIT_EXPECT_EQ(test, -EAGAIN, pm_runtime_suspend(dev));
+ KUNIT_EXPECT_EQ(test, 1, pm_runtime_resume(dev));
+ KUNIT_EXPECT_EQ(test, -EAGAIN, pm_runtime_autosuspend(dev));
+
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_put_sync(dev));
+
+ /* Suspended again */
+ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
+}
+
+/*
+ * Explore a typical probe() sequence in which a device marks itself powered,
+ * but doesn't hold any runtime PM reference, so it suspends as soon as it goes
+ * idle.
+ */
+static void pm_runtime_probe_active_test(struct kunit *test)
+{
+ struct device *dev = kunit_device_register(test, DEVICE_NAME);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+
+ KUNIT_EXPECT_TRUE(test, pm_runtime_status_suspended(dev));
+
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_set_active(dev));
+ KUNIT_EXPECT_TRUE(test, pm_runtime_active(dev));
+
+ pm_runtime_enable(dev);
+ KUNIT_EXPECT_TRUE(test, pm_runtime_active(dev));
+
+ /* Nothing to flush. We stay active. */
+ pm_runtime_barrier(dev);
+ KUNIT_EXPECT_TRUE(test, pm_runtime_active(dev));
+
+ /* Ask for idle? Now we suspend. */
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_idle(dev));
+ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
+}
+
+static struct kunit_case pm_runtime_test_cases[] = {
+ KUNIT_CASE(pm_runtime_depth_test),
+ KUNIT_CASE(pm_runtime_already_suspended_test),
+ KUNIT_CASE(pm_runtime_idle_test),
+ KUNIT_CASE(pm_runtime_disabled_test),
+ KUNIT_CASE(pm_runtime_error_test),
+ KUNIT_CASE(pm_runtime_probe_active_test),
+ {}
+};
+
+static struct kunit_suite pm_runtime_test_suite = {
+ .name = "pm_runtime_test_cases",
+ .test_cases = pm_runtime_test_cases,
+};
+
+kunit_test_suite(pm_runtime_test_suite);
+MODULE_DESCRIPTION("Runtime power management unit test suite");
+MODULE_LICENSE("GPL");
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index c55a7c70bc1a..84676cc24221 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -19,10 +19,24 @@
typedef int (*pm_callback_t)(struct device *);
+static inline pm_callback_t get_callback_ptr(const void *start, size_t offset)
+{
+ return *(pm_callback_t *)(start + offset);
+}
+
+static pm_callback_t __rpm_get_driver_callback(struct device *dev,
+ size_t cb_offset)
+{
+ if (dev->driver && dev->driver->pm)
+ return get_callback_ptr(dev->driver->pm, cb_offset);
+
+ return NULL;
+}
+
static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)
{
- pm_callback_t cb;
const struct dev_pm_ops *ops;
+ pm_callback_t cb = NULL;
if (dev->pm_domain)
ops = &dev->pm_domain->ops;
@@ -36,12 +50,10 @@ static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)
ops = NULL;
if (ops)
- cb = *(pm_callback_t *)((void *)ops + cb_offset);
- else
- cb = NULL;
+ cb = get_callback_ptr(ops, cb_offset);
- if (!cb && dev->driver && dev->driver->pm)
- cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset);
+ if (!cb)
+ cb = __rpm_get_driver_callback(dev, cb_offset);
return cb;
}
@@ -78,7 +90,7 @@ static void update_pm_runtime_accounting(struct device *dev)
/*
* Because ktime_get_mono_fast_ns() is not monotonic during
* timekeeping updates, ensure that 'now' is after the last saved
- * timesptamp.
+ * timestamp.
*/
if (now < last)
return;
@@ -205,7 +217,7 @@ static int dev_memalloc_noio(struct device *dev, void *data)
* resume/suspend callback of any one of its ancestors(or the
* block device itself), the deadlock may be triggered inside the
* memory allocation since it might not complete until the block
- * device becomes active and the involed page I/O finishes. The
+ * device becomes active and the involved page I/O finishes. The
* situation is pointed out first by Alan Stern. Network device
* are involved in iSCSI kind of situation.
*
@@ -290,7 +302,7 @@ static int rpm_get_suppliers(struct device *dev)
device_links_read_lock_held()) {
int retval;
- if (!(link->flags & DL_FLAG_PM_RUNTIME))
+ if (!device_link_test(link, DL_FLAG_PM_RUNTIME))
continue;
retval = pm_runtime_get_sync(link->supplier);
@@ -486,6 +498,9 @@ static int rpm_idle(struct device *dev, int rpmflags)
if (retval < 0)
; /* Conditions are wrong. */
+ else if ((rpmflags & RPM_GET_PUT) && retval == 1)
+ ; /* put() is allowed in RPM_SUSPENDED */
+
/* Idle notifications are allowed only in the RPM_ACTIVE state. */
else if (dev->power.runtime_status != RPM_ACTIVE)
retval = -EAGAIN;
@@ -784,6 +799,8 @@ static int rpm_resume(struct device *dev, int rpmflags)
if (dev->power.runtime_status == RPM_ACTIVE &&
dev->power.last_status == RPM_ACTIVE)
retval = 1;
+ else if (rpmflags & RPM_TRANSPARENT)
+ goto out;
else
retval = -EACCES;
}
@@ -1191,10 +1208,12 @@ EXPORT_SYMBOL_GPL(__pm_runtime_resume);
*
* Return -EINVAL if runtime PM is disabled for @dev.
*
- * Otherwise, if the runtime PM status of @dev is %RPM_ACTIVE and either
- * @ign_usage_count is %true or the runtime PM usage counter of @dev is not
- * zero, increment the usage counter of @dev and return 1. Otherwise, return 0
- * without changing the usage counter.
+ * Otherwise, if its runtime PM status is %RPM_ACTIVE and (1) @ign_usage_count
+ * is set, or (2) @dev is not ignoring children and its active child count is
+ * nonzero, or (3) the runtime PM usage counter of @dev is not zero, increment
+ * the usage counter of @dev and return 1.
+ *
+ * Otherwise, return 0 without changing the usage counter.
*
* If @ign_usage_count is %true, this function can be used to prevent suspending
* the device when its runtime PM status is %RPM_ACTIVE.
@@ -1216,7 +1235,8 @@ static int pm_runtime_get_conditional(struct device *dev, bool ign_usage_count)
retval = -EINVAL;
} else if (dev->power.runtime_status != RPM_ACTIVE) {
retval = 0;
- } else if (ign_usage_count) {
+ } else if (ign_usage_count || (!dev->power.ignore_children &&
+ atomic_read(&dev->power.child_count) > 0)) {
retval = 1;
atomic_inc(&dev->power.usage_count);
} else {
@@ -1249,10 +1269,16 @@ EXPORT_SYMBOL_GPL(pm_runtime_get_if_active);
* @dev: Target device.
*
* Increment the runtime PM usage counter of @dev if its runtime PM status is
- * %RPM_ACTIVE and its runtime PM usage counter is greater than 0, in which case
- * it returns 1. If the device is in a different state or its usage_count is 0,
- * 0 is returned. -EINVAL is returned if runtime PM is disabled for the device,
- * in which case also the usage_count will remain unmodified.
+ * %RPM_ACTIVE and its runtime PM usage counter is greater than 0 or it is not
+ * ignoring children and its active child count is nonzero. 1 is returned in
+ * this case.
+ *
+ * If @dev is in a different state or it is not in use (that is, its usage
+ * counter is 0, or it is ignoring children, or its active child count is 0),
+ * 0 is returned.
+ *
+ * -EINVAL is returned if runtime PM is disabled for the device, in which case
+ * also the usage counter of @dev is not updated.
*/
int pm_runtime_get_if_in_use(struct device *dev)
{
@@ -1441,30 +1467,20 @@ static void __pm_runtime_barrier(struct device *dev)
* Next, make sure that all pending requests for the device have been flushed
* from pm_wq and wait for all runtime PM operations involving the device in
* progress to complete.
- *
- * Return value:
- * 1, if there was a resume request pending and the device had to be woken up,
- * 0, otherwise
*/
-int pm_runtime_barrier(struct device *dev)
+void pm_runtime_barrier(struct device *dev)
{
- int retval = 0;
-
pm_runtime_get_noresume(dev);
spin_lock_irq(&dev->power.lock);
if (dev->power.request_pending
- && dev->power.request == RPM_REQ_RESUME) {
+ && dev->power.request == RPM_REQ_RESUME)
rpm_resume(dev, 0);
- retval = 1;
- }
__pm_runtime_barrier(dev);
spin_unlock_irq(&dev->power.lock);
pm_runtime_put_noidle(dev);
-
- return retval;
}
EXPORT_SYMBOL_GPL(pm_runtime_barrier);
@@ -1638,9 +1654,12 @@ EXPORT_SYMBOL_GPL(devm_pm_runtime_get_noresume);
* pm_runtime_forbid - Block runtime PM of a device.
* @dev: Device to handle.
*
- * Increase the device's usage count and clear its power.runtime_auto flag,
- * so that it cannot be suspended at run time until pm_runtime_allow() is called
- * for it.
+ * Resume @dev if already suspended and block runtime suspend of @dev in such
+ * a way that it can be unblocked via the /sys/devices/.../power/control
+ * interface, or otherwise by calling pm_runtime_allow().
+ *
+ * Calling this function many times in a row has the same effect as calling it
+ * once.
*/
void pm_runtime_forbid(struct device *dev)
{
@@ -1661,7 +1680,13 @@ EXPORT_SYMBOL_GPL(pm_runtime_forbid);
* pm_runtime_allow - Unblock runtime PM of a device.
* @dev: Device to handle.
*
- * Decrease the device's usage count and set its power.runtime_auto flag.
+ * Unblock runtime suspend of @dev after it has been blocked by
+ * pm_runtime_forbid() (for instance, if it has been blocked via the
+ * /sys/devices/.../power/control interface), check if @dev can be
+ * suspended and suspend it in that case.
+ *
+ * Calling this function many times in a row has the same effect as calling it
+ * once.
*/
void pm_runtime_allow(struct device *dev)
{
@@ -1827,7 +1852,7 @@ void pm_runtime_init(struct device *dev)
dev->power.request_pending = false;
dev->power.request = RPM_REQ_NONE;
dev->power.deferred_resume = false;
- dev->power.needs_force_resume = 0;
+ dev->power.needs_force_resume = false;
INIT_WORK(&dev->power.work, pm_runtime_work);
dev->power.timer_expires = 0;
@@ -1854,6 +1879,11 @@ void pm_runtime_reinit(struct device *dev)
pm_runtime_put(dev->parent);
}
}
+ /*
+ * Clear power.needs_force_resume in case it has been set by
+ * pm_runtime_force_suspend() invoked from a driver remove callback.
+ */
+ dev->power.needs_force_resume = false;
}
/**
@@ -1877,9 +1907,8 @@ void pm_runtime_get_suppliers(struct device *dev)
idx = device_links_read_lock();
- list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
- device_links_read_lock_held())
- if (link->flags & DL_FLAG_PM_RUNTIME) {
+ dev_for_each_link_to_supplier(link, dev)
+ if (device_link_test(link, DL_FLAG_PM_RUNTIME)) {
link->supplier_preactivated = true;
pm_runtime_get_sync(link->supplier);
}
@@ -1933,7 +1962,7 @@ static void pm_runtime_drop_link_count(struct device *dev)
*/
void pm_runtime_drop_link(struct device_link *link)
{
- if (!(link->flags & DL_FLAG_PM_RUNTIME))
+ if (!device_link_test(link, DL_FLAG_PM_RUNTIME))
return;
pm_runtime_drop_link_count(link->consumer);
@@ -1941,13 +1970,23 @@ void pm_runtime_drop_link(struct device_link *link)
pm_request_idle(link->supplier);
}
-bool pm_runtime_need_not_resume(struct device *dev)
+static pm_callback_t get_callback(struct device *dev, size_t cb_offset)
{
- return atomic_read(&dev->power.usage_count) <= 1 &&
- (atomic_read(&dev->power.child_count) == 0 ||
- dev->power.ignore_children);
+ /*
+ * Setting power.strict_midlayer means that the middle layer
+ * code does not want its runtime PM callbacks to be invoked via
+ * pm_runtime_force_suspend() and pm_runtime_force_resume(), so
+ * return a direct pointer to the driver callback in that case.
+ */
+ if (dev_pm_strict_midlayer_is_set(dev))
+ return __rpm_get_driver_callback(dev, cb_offset);
+
+ return __rpm_get_callback(dev, cb_offset);
}
+#define GET_CALLBACK(dev, callback) \
+ get_callback(dev, offsetof(struct dev_pm_ops, callback))
+
/**
* pm_runtime_force_suspend - Force a device into suspend state if needed.
* @dev: Device to suspend.
@@ -1964,10 +2003,6 @@ bool pm_runtime_need_not_resume(struct device *dev)
* sure the device is put into low power state and it should only be used during
* system-wide PM transitions to sleep states. It assumes that the analogous
* pm_runtime_force_resume() will be used to resume the device.
- *
- * Do not use with DPM_FLAG_SMART_SUSPEND as this can lead to an inconsistent
- * state where this function has called the ->runtime_suspend callback but the
- * PM core marks the driver as runtime active.
*/
int pm_runtime_force_suspend(struct device *dev)
{
@@ -1975,10 +2010,10 @@ int pm_runtime_force_suspend(struct device *dev)
int ret;
pm_runtime_disable(dev);
- if (pm_runtime_status_suspended(dev))
+ if (pm_runtime_status_suspended(dev) || dev->power.needs_force_resume)
return 0;
- callback = RPM_GET_CALLBACK(dev, runtime_suspend);
+ callback = GET_CALLBACK(dev, runtime_suspend);
dev_pm_enable_wake_irq_check(dev, true);
ret = callback ? callback(dev) : 0;
@@ -1990,15 +2025,16 @@ int pm_runtime_force_suspend(struct device *dev)
/*
* If the device can stay in suspend after the system-wide transition
* to the working state that will follow, drop the children counter of
- * its parent, but set its status to RPM_SUSPENDED anyway in case this
- * function will be called again for it in the meantime.
+ * its parent and the usage counters of its suppliers. Otherwise, set
+ * power.needs_force_resume to let pm_runtime_force_resume() know that
+ * the device needs to be taken care of and to prevent this function
+ * from handling the device again in case the device is passed to it
+ * once more subsequently.
*/
- if (pm_runtime_need_not_resume(dev)) {
+ if (pm_runtime_need_not_resume(dev))
pm_runtime_set_suspended(dev);
- } else {
- __update_runtime_status(dev, RPM_SUSPENDED);
- dev->power.needs_force_resume = 1;
- }
+ else
+ dev->power.needs_force_resume = true;
return 0;
@@ -2009,33 +2045,37 @@ err:
}
EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
+#ifdef CONFIG_PM_SLEEP
+
/**
* pm_runtime_force_resume - Force a device into resume state if needed.
* @dev: Device to resume.
*
- * Prior invoking this function we expect the user to have brought the device
- * into low power state by a call to pm_runtime_force_suspend(). Here we reverse
- * those actions and bring the device into full power, if it is expected to be
- * used on system resume. In the other case, we defer the resume to be managed
- * via runtime PM.
+ * This function expects that either pm_runtime_force_suspend() has put the
+ * device into a low-power state prior to calling it, or the device had been
+ * runtime-suspended before the preceding system-wide suspend transition and it
+ * was left in suspend during that transition.
+ *
+ * The actions carried out by pm_runtime_force_suspend(), or by a runtime
+ * suspend in general, are reversed and the device is brought back into full
+ * power if it is expected to be used on system resume, which is the case when
+ * its needs_force_resume flag is set or when its smart_suspend flag is set and
+ * its runtime PM status is "active".
+ *
+ * In other cases, the resume is deferred to be managed via runtime PM.
*
- * Typically this function may be invoked from a system resume callback.
+ * Typically, this function may be invoked from a system resume callback.
*/
int pm_runtime_force_resume(struct device *dev)
{
int (*callback)(struct device *);
int ret = 0;
- if (!dev->power.needs_force_resume)
+ if (!dev->power.needs_force_resume && (!dev_pm_smart_suspend(dev) ||
+ pm_runtime_status_suspended(dev)))
goto out;
- /*
- * The value of the parent's children counter is correct already, so
- * just update the status of the device.
- */
- __update_runtime_status(dev, RPM_ACTIVE);
-
- callback = RPM_GET_CALLBACK(dev, runtime_resume);
+ callback = GET_CALLBACK(dev, runtime_resume);
dev_pm_disable_wake_irq_check(dev, false);
ret = callback ? callback(dev) : 0;
@@ -2046,9 +2086,30 @@ int pm_runtime_force_resume(struct device *dev)
}
pm_runtime_mark_last_busy(dev);
+
out:
- dev->power.needs_force_resume = 0;
+ /*
+ * The smart_suspend flag can be cleared here because it is not going
+ * to be necessary until the next system-wide suspend transition that
+ * will update it again.
+ */
+ dev->power.smart_suspend = false;
+ /*
+ * Also clear needs_force_resume to make this function skip devices that
+ * have been seen by it once.
+ */
+ dev->power.needs_force_resume = false;
+
pm_runtime_enable(dev);
return ret;
}
EXPORT_SYMBOL_GPL(pm_runtime_force_resume);
+
+bool pm_runtime_need_not_resume(struct device *dev)
+{
+ return atomic_read(&dev->power.usage_count) <= 1 &&
+ (atomic_read(&dev->power.child_count) == 0 ||
+ dev->power.ignore_children);
+}
+
+#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c
index cd6e559648b2..d8da7195bb00 100644
--- a/drivers/base/power/trace.c
+++ b/drivers/base/power/trace.c
@@ -238,10 +238,8 @@ int show_trace_dev_match(char *buf, size_t size)
unsigned int hash = hash_string(DEVSEED, dev_name(dev),
DEVHASH);
if (hash == value) {
- int len = snprintf(buf, size, "%s\n",
+ int len = scnprintf(buf, size, "%s\n",
dev_driver_string(dev));
- if (len > size)
- len = size;
buf += len;
ret += len;
size -= len;
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index d1283ff1080b..1e1a0e7eeac5 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -189,17 +189,16 @@ static void wakeup_source_remove(struct wakeup_source *ws)
if (WARN_ON(!ws))
return;
+ /*
+ * After shutting down the timer, wakeup_source_activate() will warn if
+ * the given wakeup source is passed to it.
+ */
+ timer_shutdown_sync(&ws->timer);
+
raw_spin_lock_irqsave(&events_lock, flags);
list_del_rcu(&ws->entry);
raw_spin_unlock_irqrestore(&events_lock, flags);
synchronize_srcu(&wakeup_srcu);
-
- timer_delete_sync(&ws->timer);
- /*
- * Clear timer.function to make wakeup_source_not_registered() treat
- * this wakeup source as not registered.
- */
- ws->timer.function = NULL;
}
/**
@@ -506,14 +505,14 @@ int device_set_wakeup_enable(struct device *dev, bool enable)
EXPORT_SYMBOL_GPL(device_set_wakeup_enable);
/**
- * wakeup_source_not_registered - validate the given wakeup source.
+ * wakeup_source_not_usable - validate the given wakeup source.
* @ws: Wakeup source to be validated.
*/
-static bool wakeup_source_not_registered(struct wakeup_source *ws)
+static bool wakeup_source_not_usable(struct wakeup_source *ws)
{
/*
- * Use timer struct to check if the given source is initialized
- * by wakeup_source_add.
+ * Use the timer struct to check if the given wakeup source has been
+ * initialized by wakeup_source_add() and it is not going away.
*/
return ws->timer.function != pm_wakeup_timer_fn;
}
@@ -558,8 +557,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
{
unsigned int cec;
- if (WARN_ONCE(wakeup_source_not_registered(ws),
- "unregistered wakeup source\n"))
+ if (WARN_ONCE(wakeup_source_not_usable(ws), "unusable wakeup source\n"))
return;
ws->active = true;
diff --git a/drivers/base/property.c b/drivers/base/property.c
index f626d5bbe806..6a63860579dd 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -578,7 +578,7 @@ EXPORT_SYMBOL_GPL(fwnode_property_match_property_string);
* @prop: The name of the property
* @nargs_prop: The name of the property telling the number of
* arguments in the referred node. NULL if @nargs is known,
- * otherwise @nargs is ignored. Only relevant on OF.
+ * otherwise @nargs is ignored.
* @nargs: Number of arguments. Ignored if @nargs_prop is non-NULL.
* @index: Index of the reference, from zero onwards.
* @args: Result structure with reference and integer arguments.
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index 6f31240ee4a9..1477329410ec 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -186,6 +186,7 @@ struct regcache_ops {
enum regcache_type type;
int (*init)(struct regmap *map);
int (*exit)(struct regmap *map);
+ int (*populate)(struct regmap *map);
#ifdef CONFIG_DEBUG_FS
void (*debugfs_init)(struct regmap *map);
#endif
@@ -288,6 +289,7 @@ enum regmap_endian regmap_get_val_endian(struct device *dev,
const struct regmap_bus *bus,
const struct regmap_config *config);
+extern struct regcache_ops regcache_flat_sparse_ops;
extern struct regcache_ops regcache_rbtree_ops;
extern struct regcache_ops regcache_maple_ops;
extern struct regcache_ops regcache_flat_ops;
diff --git a/drivers/base/regmap/regcache-flat.c b/drivers/base/regmap/regcache-flat.c
index f36d3618b67c..53cc59c84e2f 100644
--- a/drivers/base/regmap/regcache-flat.c
+++ b/drivers/base/regmap/regcache-flat.c
@@ -6,7 +6,11 @@
//
// Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
#include <linux/device.h>
+#include <linux/limits.h>
+#include <linux/overflow.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
@@ -18,46 +22,92 @@ static inline unsigned int regcache_flat_get_index(const struct regmap *map,
return regcache_get_index_by_order(map, reg);
}
+struct regcache_flat_data {
+ unsigned long *valid;
+ unsigned int data[];
+};
+
static int regcache_flat_init(struct regmap *map)
{
- int i;
- unsigned int *cache;
+ unsigned int cache_size;
+ struct regcache_flat_data *cache;
if (!map || map->reg_stride_order < 0 || !map->max_register_is_set)
return -EINVAL;
- map->cache = kcalloc(regcache_flat_get_index(map, map->max_register)
- + 1, sizeof(unsigned int), map->alloc_flags);
- if (!map->cache)
+ cache_size = regcache_flat_get_index(map, map->max_register) + 1;
+ cache = kzalloc(struct_size(cache, data, cache_size), map->alloc_flags);
+ if (!cache)
return -ENOMEM;
- cache = map->cache;
+ cache->valid = bitmap_zalloc(cache_size, map->alloc_flags);
+ if (!cache->valid)
+ goto err_free;
+
+ map->cache = cache;
+
+ return 0;
+
+err_free:
+ kfree(cache);
+ return -ENOMEM;
+}
+
+static int regcache_flat_exit(struct regmap *map)
+{
+ struct regcache_flat_data *cache = map->cache;
+
+ if (cache)
+ bitmap_free(cache->valid);
+
+ kfree(cache);
+ map->cache = NULL;
+
+ return 0;
+}
+
+static int regcache_flat_populate(struct regmap *map)
+{
+ struct regcache_flat_data *cache = map->cache;
+ unsigned int i;
for (i = 0; i < map->num_reg_defaults; i++) {
unsigned int reg = map->reg_defaults[i].reg;
unsigned int index = regcache_flat_get_index(map, reg);
- cache[index] = map->reg_defaults[i].def;
+ cache->data[index] = map->reg_defaults[i].def;
+ __set_bit(index, cache->valid);
}
return 0;
}
-static int regcache_flat_exit(struct regmap *map)
+static int regcache_flat_read(struct regmap *map,
+ unsigned int reg, unsigned int *value)
{
- kfree(map->cache);
- map->cache = NULL;
+ struct regcache_flat_data *cache = map->cache;
+ unsigned int index = regcache_flat_get_index(map, reg);
+
+ /* legacy behavior: ignore validity, but warn the user */
+ if (unlikely(!test_bit(index, cache->valid)))
+ dev_warn_once(map->dev,
+ "using zero-initialized flat cache, this may cause unexpected behavior");
+
+ *value = cache->data[index];
return 0;
}
-static int regcache_flat_read(struct regmap *map,
- unsigned int reg, unsigned int *value)
+static int regcache_flat_sparse_read(struct regmap *map,
+ unsigned int reg, unsigned int *value)
{
- unsigned int *cache = map->cache;
+ struct regcache_flat_data *cache = map->cache;
unsigned int index = regcache_flat_get_index(map, reg);
- *value = cache[index];
+ if (unlikely(!test_bit(index, cache->valid)))
+ return -ENOENT;
+
+ *value = cache->data[index];
return 0;
}
@@ -65,10 +115,23 @@ static int regcache_flat_read(struct regmap *map,
static int regcache_flat_write(struct regmap *map, unsigned int reg,
unsigned int value)
{
- unsigned int *cache = map->cache;
+ struct regcache_flat_data *cache = map->cache;
unsigned int index = regcache_flat_get_index(map, reg);
- cache[index] = value;
+ cache->data[index] = value;
+ __set_bit(index, cache->valid);
+
+ return 0;
+}
+
+static int regcache_flat_drop(struct regmap *map, unsigned int min,
+ unsigned int max)
+{
+ struct regcache_flat_data *cache = map->cache;
+ unsigned int bitmap_min = regcache_flat_get_index(map, min);
+ unsigned int bitmap_max = regcache_flat_get_index(map, max);
+
+ bitmap_clear(cache->valid, bitmap_min, bitmap_max + 1 - bitmap_min);
return 0;
}
@@ -78,6 +141,18 @@ struct regcache_ops regcache_flat_ops = {
.name = "flat",
.init = regcache_flat_init,
.exit = regcache_flat_exit,
+ .populate = regcache_flat_populate,
.read = regcache_flat_read,
.write = regcache_flat_write,
};
+
+struct regcache_ops regcache_flat_sparse_ops = {
+ .type = REGCACHE_FLAT_S,
+ .name = "flat-sparse",
+ .init = regcache_flat_init,
+ .exit = regcache_flat_exit,
+ .populate = regcache_flat_populate,
+ .read = regcache_flat_sparse_read,
+ .write = regcache_flat_write,
+ .drop = regcache_flat_drop,
+};
diff --git a/drivers/base/regmap/regcache-maple.c b/drivers/base/regmap/regcache-maple.c
index 2319c30283a6..ca1c72b68f31 100644
--- a/drivers/base/regmap/regcache-maple.c
+++ b/drivers/base/regmap/regcache-maple.c
@@ -289,6 +289,23 @@ out:
return ret;
}
+static int regcache_maple_init(struct regmap *map)
+{
+ struct maple_tree *mt;
+
+ mt = kmalloc(sizeof(*mt), map->alloc_flags);
+ if (!mt)
+ return -ENOMEM;
+ map->cache = mt;
+
+ mt_init(mt);
+
+ if (!mt_external_lock(mt) && map->lock_key)
+ lockdep_set_class_and_subclass(&mt->ma_lock, map->lock_key, 1);
+
+ return 0;
+}
+
static int regcache_maple_exit(struct regmap *map)
{
struct maple_tree *mt = map->cache;
@@ -340,26 +357,12 @@ static int regcache_maple_insert_block(struct regmap *map, int first,
return ret;
}
-static int regcache_maple_init(struct regmap *map)
+static int regcache_maple_populate(struct regmap *map)
{
- struct maple_tree *mt;
int i;
int ret;
int range_start;
- mt = kmalloc(sizeof(*mt), map->alloc_flags);
- if (!mt)
- return -ENOMEM;
- map->cache = mt;
-
- mt_init(mt);
-
- if (!mt_external_lock(mt) && map->lock_key)
- lockdep_set_class_and_subclass(&mt->ma_lock, map->lock_key, 1);
-
- if (!map->num_reg_defaults)
- return 0;
-
range_start = 0;
/* Scan for ranges of contiguous registers */
@@ -369,23 +372,14 @@ static int regcache_maple_init(struct regmap *map)
ret = regcache_maple_insert_block(map, range_start,
i - 1);
if (ret != 0)
- goto err;
+ return ret;
range_start = i;
}
}
/* Add the last block */
- ret = regcache_maple_insert_block(map, range_start,
- map->num_reg_defaults - 1);
- if (ret != 0)
- goto err;
-
- return 0;
-
-err:
- regcache_maple_exit(map);
- return ret;
+ return regcache_maple_insert_block(map, range_start, map->num_reg_defaults - 1);
}
struct regcache_ops regcache_maple_ops = {
@@ -393,6 +387,7 @@ struct regcache_ops regcache_maple_ops = {
.name = "maple",
.init = regcache_maple_init,
.exit = regcache_maple_exit,
+ .populate = regcache_maple_populate,
.read = regcache_maple_read,
.write = regcache_maple_write,
.drop = regcache_maple_drop,
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index a9d17f316e55..3344b82c3799 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -184,8 +184,6 @@ static void rbtree_debugfs_init(struct regmap *map)
static int regcache_rbtree_init(struct regmap *map)
{
struct regcache_rbtree_ctx *rbtree_ctx;
- int i;
- int ret;
map->cache = kmalloc(sizeof *rbtree_ctx, map->alloc_flags);
if (!map->cache)
@@ -195,19 +193,7 @@ static int regcache_rbtree_init(struct regmap *map)
rbtree_ctx->root = RB_ROOT;
rbtree_ctx->cached_rbnode = NULL;
- for (i = 0; i < map->num_reg_defaults; i++) {
- ret = regcache_rbtree_write(map,
- map->reg_defaults[i].reg,
- map->reg_defaults[i].def);
- if (ret)
- goto err;
- }
-
return 0;
-
-err:
- regcache_rbtree_exit(map);
- return ret;
}
static int regcache_rbtree_exit(struct regmap *map)
@@ -239,6 +225,22 @@ static int regcache_rbtree_exit(struct regmap *map)
return 0;
}
+static int regcache_rbtree_populate(struct regmap *map)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < map->num_reg_defaults; i++) {
+ ret = regcache_rbtree_write(map,
+ map->reg_defaults[i].reg,
+ map->reg_defaults[i].def);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int regcache_rbtree_read(struct regmap *map,
unsigned int reg, unsigned int *value)
{
@@ -546,6 +548,7 @@ struct regcache_ops regcache_rbtree_ops = {
.name = "rbtree",
.init = regcache_rbtree_init,
.exit = regcache_rbtree_exit,
+ .populate = regcache_rbtree_populate,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = rbtree_debugfs_init,
#endif
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index c7650fa434ad..319c342bf5a0 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -16,6 +16,7 @@
#include "internal.h"
static const struct regcache_ops *cache_types[] = {
+ &regcache_flat_sparse_ops,
&regcache_rbtree_ops,
&regcache_maple_ops,
&regcache_flat_ops,
@@ -221,8 +222,24 @@ int regcache_init(struct regmap *map, const struct regmap_config *config)
if (ret)
goto err_free;
}
+
+ if (map->num_reg_defaults && map->cache_ops->populate) {
+ dev_dbg(map->dev, "Populating %s cache\n", map->cache_ops->name);
+ map->lock(map->lock_arg);
+ ret = map->cache_ops->populate(map);
+ map->unlock(map->lock_arg);
+ if (ret)
+ goto err_exit;
+ }
return 0;
+err_exit:
+ if (map->cache_ops->exit) {
+ dev_dbg(map->dev, "Destroying %s cache\n", map->cache_ops->name);
+ map->lock(map->lock_arg);
+ ret = map->cache_ops->exit(map);
+ map->unlock(map->lock_arg);
+ }
err_free:
kfree(map->reg_defaults);
if (map->cache_free)
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index fb84cda92a75..c9b4c04b1cf6 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -470,10 +470,6 @@ static ssize_t regmap_cache_only_write_file(struct file *file,
if (err)
return count;
- err = debugfs_file_get(file->f_path.dentry);
- if (err)
- return err;
-
map->lock(map->lock_arg);
if (new_val && !map->cache_only) {
@@ -486,7 +482,6 @@ static ssize_t regmap_cache_only_write_file(struct file *file,
map->cache_only = new_val;
map->unlock(map->lock_arg);
- debugfs_file_put(file->f_path.dentry);
if (require_sync) {
err = regcache_sync(map);
@@ -517,10 +512,6 @@ static ssize_t regmap_cache_bypass_write_file(struct file *file,
if (err)
return count;
- err = debugfs_file_get(file->f_path.dentry);
- if (err)
- return err;
-
map->lock(map->lock_arg);
if (new_val && !map->cache_bypass) {
@@ -532,7 +523,6 @@ static ssize_t regmap_cache_bypass_write_file(struct file *file,
map->cache_bypass = new_val;
map->unlock(map->lock_arg);
- debugfs_file_put(file->f_path.dentry);
return count;
}
diff --git a/drivers/base/regmap/regmap-i3c.c b/drivers/base/regmap/regmap-i3c.c
index b5300b7c477e..863b348704dc 100644
--- a/drivers/base/regmap/regmap-i3c.c
+++ b/drivers/base/regmap/regmap-i3c.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
+#include <linux/array_size.h>
#include <linux/regmap.h>
#include <linux/i3c/device.h>
#include <linux/i3c/master.h>
@@ -10,7 +11,7 @@ static int regmap_i3c_write(void *context, const void *data, size_t count)
{
struct device *dev = context;
struct i3c_device *i3c = dev_to_i3cdev(dev);
- struct i3c_priv_xfer xfers[] = {
+ struct i3c_xfer xfers[] = {
{
.rnw = false,
.len = count,
@@ -18,7 +19,7 @@ static int regmap_i3c_write(void *context, const void *data, size_t count)
},
};
- return i3c_device_do_priv_xfers(i3c, xfers, 1);
+ return i3c_device_do_xfers(i3c, xfers, ARRAY_SIZE(xfers), I3C_SDR);
}
static int regmap_i3c_read(void *context,
@@ -27,7 +28,7 @@ static int regmap_i3c_read(void *context,
{
struct device *dev = context;
struct i3c_device *i3c = dev_to_i3cdev(dev);
- struct i3c_priv_xfer xfers[2];
+ struct i3c_xfer xfers[2];
xfers[0].rnw = false;
xfers[0].len = reg_size;
@@ -37,7 +38,7 @@ static int regmap_i3c_read(void *context,
xfers[1].len = val_size;
xfers[1].data.in = val;
- return i3c_device_do_priv_xfers(i3c, xfers, 2);
+ return i3c_device_do_xfers(i3c, xfers, ARRAY_SIZE(xfers), I3C_SDR);
}
static const struct regmap_bus regmap_i3c = {
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index d1585f073776..6112d942499b 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -21,6 +21,7 @@
struct regmap_irq_chip_data {
struct mutex lock;
+ struct lock_class_key lock_key;
struct irq_chip irq_chip;
struct regmap *map;
@@ -801,7 +802,13 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
goto err_alloc;
}
- mutex_init(&d->lock);
+ /*
+ * If one regmap-irq is the parent of another then we'll try
+ * to lock the child with the parent locked, use an explicit
+ * lock_key so lockdep can figure out what's going on.
+ */
+ lockdep_register_key(&d->lock_key);
+ mutex_init_with_key(&d->lock, &d->lock_key);
for (i = 0; i < chip->num_irqs; i++)
d->mask_buf_def[chip->irqs[i].reg_offset / map->reg_stride]
@@ -816,7 +823,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
d->mask_buf[i],
chip->irq_drv_data);
if (ret)
- goto err_alloc;
+ goto err_mutex;
}
if (chip->mask_base && !chip->handle_mask_sync) {
@@ -827,7 +834,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
if (ret) {
dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
reg, ret);
- goto err_alloc;
+ goto err_mutex;
}
}
@@ -838,7 +845,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
if (ret) {
dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
reg, ret);
- goto err_alloc;
+ goto err_mutex;
}
}
@@ -855,7 +862,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
if (ret != 0) {
dev_err(map->dev, "Failed to read IRQ status: %d\n",
ret);
- goto err_alloc;
+ goto err_mutex;
}
}
@@ -879,7 +886,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
if (ret != 0) {
dev_err(map->dev, "Failed to ack 0x%x: %d\n",
reg, ret);
- goto err_alloc;
+ goto err_mutex;
}
}
}
@@ -901,7 +908,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
if (ret != 0) {
dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
reg, ret);
- goto err_alloc;
+ goto err_mutex;
}
}
}
@@ -910,7 +917,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
if (chip->status_is_level) {
ret = read_irq_data(d);
if (ret < 0)
- goto err_alloc;
+ goto err_mutex;
memcpy(d->prev_status_buf, d->status_buf,
array_size(d->chip->num_regs, sizeof(d->prev_status_buf[0])));
@@ -918,7 +925,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
ret = regmap_irq_create_domain(fwnode, irq_base, chip, d);
if (ret)
- goto err_alloc;
+ goto err_mutex;
ret = request_threaded_irq(irq, NULL, regmap_irq_thread,
irq_flags | IRQF_ONESHOT,
@@ -935,6 +942,9 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
err_domain:
/* Should really dispose of the domain but... */
+err_mutex:
+ mutex_destroy(&d->lock);
+ lockdep_unregister_key(&d->lock_key);
err_alloc:
kfree(d->type_buf);
kfree(d->type_buf_def);
@@ -1027,6 +1037,8 @@ void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
kfree(d->config_buf[i]);
kfree(d->config_buf);
}
+ mutex_destroy(&d->lock);
+ lockdep_unregister_key(&d->lock_key);
kfree(d);
}
EXPORT_SYMBOL_GPL(regmap_del_irq_chip);
diff --git a/drivers/base/regmap/regmap-kunit.c b/drivers/base/regmap/regmap-kunit.c
index 64ea340950b6..f6fc5ed016da 100644
--- a/drivers/base/regmap/regmap-kunit.c
+++ b/drivers/base/regmap/regmap-kunit.c
@@ -54,6 +54,8 @@ static const char *regcache_type_name(enum regcache_type type)
return "none";
case REGCACHE_FLAT:
return "flat";
+ case REGCACHE_FLAT_S:
+ return "flat-sparse";
case REGCACHE_RBTREE:
return "rbtree";
case REGCACHE_MAPLE:
@@ -93,6 +95,8 @@ static const struct regmap_test_param regcache_types_list[] = {
{ .cache = REGCACHE_NONE, .fast_io = true },
{ .cache = REGCACHE_FLAT },
{ .cache = REGCACHE_FLAT, .fast_io = true },
+ { .cache = REGCACHE_FLAT_S },
+ { .cache = REGCACHE_FLAT_S, .fast_io = true },
{ .cache = REGCACHE_RBTREE },
{ .cache = REGCACHE_RBTREE, .fast_io = true },
{ .cache = REGCACHE_MAPLE },
@@ -104,6 +108,8 @@ KUNIT_ARRAY_PARAM(regcache_types, regcache_types_list, param_to_desc);
static const struct regmap_test_param real_cache_types_only_list[] = {
{ .cache = REGCACHE_FLAT },
{ .cache = REGCACHE_FLAT, .fast_io = true },
+ { .cache = REGCACHE_FLAT_S },
+ { .cache = REGCACHE_FLAT_S, .fast_io = true },
{ .cache = REGCACHE_RBTREE },
{ .cache = REGCACHE_RBTREE, .fast_io = true },
{ .cache = REGCACHE_MAPLE },
@@ -119,6 +125,12 @@ static const struct regmap_test_param real_cache_types_list[] = {
{ .cache = REGCACHE_FLAT, .from_reg = 0x2002 },
{ .cache = REGCACHE_FLAT, .from_reg = 0x2003 },
{ .cache = REGCACHE_FLAT, .from_reg = 0x2004 },
+ { .cache = REGCACHE_FLAT_S, .from_reg = 0 },
+ { .cache = REGCACHE_FLAT_S, .from_reg = 0, .fast_io = true },
+ { .cache = REGCACHE_FLAT_S, .from_reg = 0x2001 },
+ { .cache = REGCACHE_FLAT_S, .from_reg = 0x2002 },
+ { .cache = REGCACHE_FLAT_S, .from_reg = 0x2003 },
+ { .cache = REGCACHE_FLAT_S, .from_reg = 0x2004 },
{ .cache = REGCACHE_RBTREE, .from_reg = 0 },
{ .cache = REGCACHE_RBTREE, .from_reg = 0, .fast_io = true },
{ .cache = REGCACHE_RBTREE, .from_reg = 0x2001 },
@@ -136,6 +148,12 @@ static const struct regmap_test_param real_cache_types_list[] = {
KUNIT_ARRAY_PARAM(real_cache_types, real_cache_types_list, param_to_desc);
static const struct regmap_test_param sparse_cache_types_list[] = {
+ { .cache = REGCACHE_FLAT_S, .from_reg = 0 },
+ { .cache = REGCACHE_FLAT_S, .from_reg = 0, .fast_io = true },
+ { .cache = REGCACHE_FLAT_S, .from_reg = 0x2001 },
+ { .cache = REGCACHE_FLAT_S, .from_reg = 0x2002 },
+ { .cache = REGCACHE_FLAT_S, .from_reg = 0x2003 },
+ { .cache = REGCACHE_FLAT_S, .from_reg = 0x2004 },
{ .cache = REGCACHE_RBTREE, .from_reg = 0 },
{ .cache = REGCACHE_RBTREE, .from_reg = 0, .fast_io = true },
{ .cache = REGCACHE_RBTREE, .from_reg = 0x2001 },
@@ -736,7 +754,7 @@ static void stride(struct kunit *test)
}
}
-static struct regmap_range_cfg test_range = {
+static const struct regmap_range_cfg test_range = {
.selector_reg = 1,
.selector_mask = 0xff,
@@ -1597,6 +1615,8 @@ static const struct regmap_test_param raw_types_list[] = {
{ .cache = REGCACHE_NONE, .val_endian = REGMAP_ENDIAN_BIG },
{ .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_LITTLE },
{ .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_BIG },
+ { .cache = REGCACHE_FLAT_S, .val_endian = REGMAP_ENDIAN_LITTLE },
+ { .cache = REGCACHE_FLAT_S, .val_endian = REGMAP_ENDIAN_BIG },
{ .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_LITTLE },
{ .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_BIG },
{ .cache = REGCACHE_MAPLE, .val_endian = REGMAP_ENDIAN_LITTLE },
@@ -1608,6 +1628,8 @@ KUNIT_ARRAY_PARAM(raw_test_types, raw_types_list, param_to_desc);
static const struct regmap_test_param raw_cache_types_list[] = {
{ .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_LITTLE },
{ .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_BIG },
+ { .cache = REGCACHE_FLAT_S, .val_endian = REGMAP_ENDIAN_LITTLE },
+ { .cache = REGCACHE_FLAT_S, .val_endian = REGMAP_ENDIAN_BIG },
{ .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_LITTLE },
{ .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_BIG },
{ .cache = REGCACHE_MAPLE, .val_endian = REGMAP_ENDIAN_LITTLE },
diff --git a/drivers/base/regmap/regmap-mmio.c b/drivers/base/regmap/regmap-mmio.c
index 99d7fd85ca7d..29e5f3175301 100644
--- a/drivers/base/regmap/regmap-mmio.c
+++ b/drivers/base/regmap/regmap-mmio.c
@@ -609,4 +609,5 @@ void regmap_mmio_detach_clk(struct regmap *map)
}
EXPORT_SYMBOL_GPL(regmap_mmio_detach_clk);
+MODULE_DESCRIPTION("regmap MMIO Module");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap-sdw-mbq.c b/drivers/base/regmap/regmap-sdw-mbq.c
index 86644bbd0710..6a61629f5f89 100644
--- a/drivers/base/regmap/regmap-sdw-mbq.c
+++ b/drivers/base/regmap/regmap-sdw-mbq.c
@@ -15,11 +15,13 @@
struct regmap_mbq_context {
struct device *dev;
+ struct sdw_slave *sdw;
+
+ bool (*readable_reg)(struct device *dev, unsigned int reg);
struct regmap_sdw_mbq_cfg cfg;
int val_size;
- bool (*readable_reg)(struct device *dev, unsigned int reg);
};
static int regmap_sdw_mbq_size(struct regmap_mbq_context *ctx, unsigned int reg)
@@ -46,7 +48,7 @@ static bool regmap_sdw_mbq_deferrable(struct regmap_mbq_context *ctx, unsigned i
static int regmap_sdw_mbq_poll_busy(struct sdw_slave *slave, unsigned int reg,
struct regmap_mbq_context *ctx)
{
- struct device *dev = &slave->dev;
+ struct device *dev = ctx->dev;
int val, ret = 0;
dev_dbg(dev, "Deferring transaction for 0x%x\n", reg);
@@ -96,8 +98,7 @@ static int regmap_sdw_mbq_write_impl(struct sdw_slave *slave,
static int regmap_sdw_mbq_write(void *context, unsigned int reg, unsigned int val)
{
struct regmap_mbq_context *ctx = context;
- struct device *dev = ctx->dev;
- struct sdw_slave *slave = dev_to_sdw_dev(dev);
+ struct sdw_slave *slave = ctx->sdw;
bool deferrable = regmap_sdw_mbq_deferrable(ctx, reg);
int mbq_size = regmap_sdw_mbq_size(ctx, reg);
int ret;
@@ -156,8 +157,7 @@ static int regmap_sdw_mbq_read_impl(struct sdw_slave *slave,
static int regmap_sdw_mbq_read(void *context, unsigned int reg, unsigned int *val)
{
struct regmap_mbq_context *ctx = context;
- struct device *dev = ctx->dev;
- struct sdw_slave *slave = dev_to_sdw_dev(dev);
+ struct sdw_slave *slave = ctx->sdw;
bool deferrable = regmap_sdw_mbq_deferrable(ctx, reg);
int mbq_size = regmap_sdw_mbq_size(ctx, reg);
int ret;
@@ -208,6 +208,7 @@ static int regmap_sdw_mbq_config_check(const struct regmap_config *config)
static struct regmap_mbq_context *
regmap_sdw_mbq_gen_context(struct device *dev,
+ struct sdw_slave *sdw,
const struct regmap_config *config,
const struct regmap_sdw_mbq_cfg *mbq_config)
{
@@ -218,6 +219,7 @@ regmap_sdw_mbq_gen_context(struct device *dev,
return ERR_PTR(-ENOMEM);
ctx->dev = dev;
+ ctx->sdw = sdw;
if (mbq_config)
ctx->cfg = *mbq_config;
@@ -228,7 +230,7 @@ regmap_sdw_mbq_gen_context(struct device *dev,
return ctx;
}
-struct regmap *__regmap_init_sdw_mbq(struct sdw_slave *sdw,
+struct regmap *__regmap_init_sdw_mbq(struct device *dev, struct sdw_slave *sdw,
const struct regmap_config *config,
const struct regmap_sdw_mbq_cfg *mbq_config,
struct lock_class_key *lock_key,
@@ -241,16 +243,16 @@ struct regmap *__regmap_init_sdw_mbq(struct sdw_slave *sdw,
if (ret)
return ERR_PTR(ret);
- ctx = regmap_sdw_mbq_gen_context(&sdw->dev, config, mbq_config);
+ ctx = regmap_sdw_mbq_gen_context(dev, sdw, config, mbq_config);
if (IS_ERR(ctx))
return ERR_CAST(ctx);
- return __regmap_init(&sdw->dev, &regmap_sdw_mbq, ctx,
+ return __regmap_init(dev, &regmap_sdw_mbq, ctx,
config, lock_key, lock_name);
}
EXPORT_SYMBOL_GPL(__regmap_init_sdw_mbq);
-struct regmap *__devm_regmap_init_sdw_mbq(struct sdw_slave *sdw,
+struct regmap *__devm_regmap_init_sdw_mbq(struct device *dev, struct sdw_slave *sdw,
const struct regmap_config *config,
const struct regmap_sdw_mbq_cfg *mbq_config,
struct lock_class_key *lock_key,
@@ -263,11 +265,11 @@ struct regmap *__devm_regmap_init_sdw_mbq(struct sdw_slave *sdw,
if (ret)
return ERR_PTR(ret);
- ctx = regmap_sdw_mbq_gen_context(&sdw->dev, config, mbq_config);
+ ctx = regmap_sdw_mbq_gen_context(dev, sdw, config, mbq_config);
if (IS_ERR(ctx))
return ERR_CAST(ctx);
- return __devm_regmap_init(&sdw->dev, &regmap_sdw_mbq, ctx,
+ return __devm_regmap_init(dev, &regmap_sdw_mbq, ctx,
config, lock_key, lock_name);
}
EXPORT_SYMBOL_GPL(__devm_regmap_init_sdw_mbq);
diff --git a/drivers/base/regmap/regmap-slimbus.c b/drivers/base/regmap/regmap-slimbus.c
index 54eb7d227cf4..e523fae73004 100644
--- a/drivers/base/regmap/regmap-slimbus.c
+++ b/drivers/base/regmap/regmap-slimbus.c
@@ -48,8 +48,7 @@ struct regmap *__regmap_init_slimbus(struct slim_device *slimbus,
if (IS_ERR(bus))
return ERR_CAST(bus);
- return __regmap_init(&slimbus->dev, bus, &slimbus->dev, config,
- lock_key, lock_name);
+ return __regmap_init(&slimbus->dev, bus, slimbus, config, lock_key, lock_name);
}
EXPORT_SYMBOL_GPL(__regmap_init_slimbus);
@@ -63,8 +62,7 @@ struct regmap *__devm_regmap_init_slimbus(struct slim_device *slimbus,
if (IS_ERR(bus))
return ERR_CAST(bus);
- return __devm_regmap_init(&slimbus->dev, bus, &slimbus, config,
- lock_key, lock_name);
+ return __devm_regmap_init(&slimbus->dev, bus, slimbus, config, lock_key, lock_name);
}
EXPORT_SYMBOL_GPL(__devm_regmap_init_slimbus);
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index f2843f814675..ce9be3989a21 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -827,7 +827,7 @@ struct regmap *__regmap_init(struct device *dev,
map->read_flag_mask = bus->read_flag_mask;
}
- if (config && config->read && config->write) {
+ if (config->read && config->write) {
map->reg_read = _regmap_bus_read;
if (config->reg_update_bits)
map->reg_update_bits = config->reg_update_bits;
@@ -1173,6 +1173,8 @@ err_name:
err_map:
kfree(map);
err:
+ if (bus && bus->free_on_exit)
+ kfree(bus);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(__regmap_init);
@@ -2256,12 +2258,14 @@ EXPORT_SYMBOL_GPL(regmap_field_update_bits_base);
* @field: Register field to operate on
* @bits: Bits to test
*
- * Returns -1 if the underlying regmap_field_read() fails, 0 if at least one of the
- * tested bits is not set and 1 if all tested bits are set.
+ * Returns negative errno if the underlying regmap_field_read() fails,
+ * 0 if at least one of the tested bits is not set and 1 if all tested
+ * bits are set.
*/
int regmap_field_test_bits(struct regmap_field *field, unsigned int bits)
{
- unsigned int val, ret;
+ unsigned int val;
+ int ret;
ret = regmap_field_read(field, &val);
if (ret)
@@ -3307,7 +3311,8 @@ EXPORT_SYMBOL_GPL(regmap_update_bits_base);
*/
int regmap_test_bits(struct regmap *map, unsigned int reg, unsigned int bits)
{
- unsigned int val, ret;
+ unsigned int val;
+ int ret;
ret = regmap_read(map, reg, &val);
if (ret)
diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
index deda7f35a059..16a8301c25d6 100644
--- a/drivers/base/swnode.c
+++ b/drivers/base/swnode.c
@@ -535,14 +535,29 @@ software_node_get_reference_args(const struct fwnode_handle *fwnode,
ref_array = prop->pointer;
ref = &ref_array[index];
- refnode = software_node_fwnode(ref->node);
+ /*
+ * A software node can reference other software nodes or firmware
+ * nodes (which are the abstraction layer sitting on top of them).
+ * This is done to ensure we can create references to static software
+ * nodes before they're registered with the firmware node framework.
+ * At the time the reference is being resolved, we expect the swnodes
+ * in question to already have been registered and to be backed by
+ * a firmware node. This is why we use the fwnode API below to read the
+ * relevant properties and bump the reference count.
+ */
+
+ if (ref->swnode)
+ refnode = software_node_fwnode(ref->swnode);
+ else if (ref->fwnode)
+ refnode = ref->fwnode;
+ else
+ return -EINVAL;
+
if (!refnode)
return -ENOENT;
if (nargs_prop) {
- error = property_entry_read_int_array(ref->node->properties,
- nargs_prop, sizeof(u32),
- &nargs_prop_val, 1);
+ error = fwnode_property_read_u32(refnode, nargs_prop, &nargs_prop_val);
if (error)
return error;
@@ -555,7 +570,7 @@ software_node_get_reference_args(const struct fwnode_handle *fwnode,
if (!args)
return 0;
- args->fwnode = software_node_get(refnode);
+ args->fwnode = fwnode_handle_get(refnode);
args->nargs = nargs;
for (i = 0; i < nargs; i++)
@@ -635,7 +650,10 @@ software_node_graph_get_remote_endpoint(const struct fwnode_handle *fwnode)
ref = prop->pointer;
- return software_node_get(software_node_fwnode(ref[0].node));
+ if (!ref->swnode)
+ return NULL;
+
+ return software_node_get(software_node_fwnode(ref->swnode));
}
static struct fwnode_handle *
@@ -844,7 +862,7 @@ swnode_register(const struct software_node *node, struct swnode *parent,
* of this function or by ordering the array such that parent comes before
* child.
*/
-int software_node_register_node_group(const struct software_node **node_group)
+int software_node_register_node_group(const struct software_node * const *node_group)
{
unsigned int i;
int ret;
@@ -877,8 +895,7 @@ EXPORT_SYMBOL_GPL(software_node_register_node_group);
* remove the nodes individually, in the correct order (child before
* parent).
*/
-void software_node_unregister_node_group(
- const struct software_node **node_group)
+void software_node_unregister_node_group(const struct software_node * const *node_group)
{
unsigned int i = 0;
diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
index 13db1f78d2ce..483adb796654 100644
--- a/drivers/base/syscore.c
+++ b/drivers/base/syscore.c
@@ -11,32 +11,32 @@
#include <linux/suspend.h>
#include <trace/events/power.h>
-static LIST_HEAD(syscore_ops_list);
-static DEFINE_MUTEX(syscore_ops_lock);
+static LIST_HEAD(syscore_list);
+static DEFINE_MUTEX(syscore_lock);
/**
- * register_syscore_ops - Register a set of system core operations.
- * @ops: System core operations to register.
+ * register_syscore - Register a set of system core operations.
+ * @syscore: System core operations to register.
*/
-void register_syscore_ops(struct syscore_ops *ops)
+void register_syscore(struct syscore *syscore)
{
- mutex_lock(&syscore_ops_lock);
- list_add_tail(&ops->node, &syscore_ops_list);
- mutex_unlock(&syscore_ops_lock);
+ mutex_lock(&syscore_lock);
+ list_add_tail(&syscore->node, &syscore_list);
+ mutex_unlock(&syscore_lock);
}
-EXPORT_SYMBOL_GPL(register_syscore_ops);
+EXPORT_SYMBOL_GPL(register_syscore);
/**
- * unregister_syscore_ops - Unregister a set of system core operations.
- * @ops: System core operations to unregister.
+ * unregister_syscore - Unregister a set of system core operations.
+ * @syscore: System core operations to unregister.
*/
-void unregister_syscore_ops(struct syscore_ops *ops)
+void unregister_syscore(struct syscore *syscore)
{
- mutex_lock(&syscore_ops_lock);
- list_del(&ops->node);
- mutex_unlock(&syscore_ops_lock);
+ mutex_lock(&syscore_lock);
+ list_del(&syscore->node);
+ mutex_unlock(&syscore_lock);
}
-EXPORT_SYMBOL_GPL(unregister_syscore_ops);
+EXPORT_SYMBOL_GPL(unregister_syscore);
#ifdef CONFIG_PM_SLEEP
/**
@@ -46,7 +46,7 @@ EXPORT_SYMBOL_GPL(unregister_syscore_ops);
*/
int syscore_suspend(void)
{
- struct syscore_ops *ops;
+ struct syscore *syscore;
int ret = 0;
trace_suspend_resume(TPS("syscore_suspend"), 0, true);
@@ -59,25 +59,27 @@ int syscore_suspend(void)
WARN_ONCE(!irqs_disabled(),
"Interrupts enabled before system core suspend.\n");
- list_for_each_entry_reverse(ops, &syscore_ops_list, node)
- if (ops->suspend) {
- pm_pr_dbg("Calling %pS\n", ops->suspend);
- ret = ops->suspend();
+ list_for_each_entry_reverse(syscore, &syscore_list, node)
+ if (syscore->ops->suspend) {
+ pm_pr_dbg("Calling %pS\n", syscore->ops->suspend);
+ ret = syscore->ops->suspend(syscore->data);
if (ret)
goto err_out;
WARN_ONCE(!irqs_disabled(),
- "Interrupts enabled after %pS\n", ops->suspend);
+ "Interrupts enabled after %pS\n",
+ syscore->ops->suspend);
}
trace_suspend_resume(TPS("syscore_suspend"), 0, false);
return 0;
err_out:
- pr_err("PM: System core suspend callback %pS failed.\n", ops->suspend);
+ pr_err("PM: System core suspend callback %pS failed.\n",
+ syscore->ops->suspend);
- list_for_each_entry_continue(ops, &syscore_ops_list, node)
- if (ops->resume)
- ops->resume();
+ list_for_each_entry_continue(syscore, &syscore_list, node)
+ if (syscore->ops->resume)
+ syscore->ops->resume(syscore->data);
return ret;
}
@@ -90,18 +92,19 @@ EXPORT_SYMBOL_GPL(syscore_suspend);
*/
void syscore_resume(void)
{
- struct syscore_ops *ops;
+ struct syscore *syscore;
trace_suspend_resume(TPS("syscore_resume"), 0, true);
WARN_ONCE(!irqs_disabled(),
"Interrupts enabled before system core resume.\n");
- list_for_each_entry(ops, &syscore_ops_list, node)
- if (ops->resume) {
- pm_pr_dbg("Calling %pS\n", ops->resume);
- ops->resume();
+ list_for_each_entry(syscore, &syscore_list, node)
+ if (syscore->ops->resume) {
+ pm_pr_dbg("Calling %pS\n", syscore->ops->resume);
+ syscore->ops->resume(syscore->data);
WARN_ONCE(!irqs_disabled(),
- "Interrupts enabled after %pS\n", ops->resume);
+ "Interrupts enabled after %pS\n",
+ syscore->ops->resume);
}
trace_suspend_resume(TPS("syscore_resume"), 0, false);
}
@@ -113,16 +116,17 @@ EXPORT_SYMBOL_GPL(syscore_resume);
*/
void syscore_shutdown(void)
{
- struct syscore_ops *ops;
+ struct syscore *syscore;
- mutex_lock(&syscore_ops_lock);
+ mutex_lock(&syscore_lock);
- list_for_each_entry_reverse(ops, &syscore_ops_list, node)
- if (ops->shutdown) {
+ list_for_each_entry_reverse(syscore, &syscore_list, node)
+ if (syscore->ops->shutdown) {
if (initcall_debug)
- pr_info("PM: Calling %pS\n", ops->shutdown);
- ops->shutdown();
+ pr_info("PM: Calling %pS\n",
+ syscore->ops->shutdown);
+ syscore->ops->shutdown(syscore->data);
}
- mutex_unlock(&syscore_ops_lock);
+ mutex_unlock(&syscore_lock);
}
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index 8b42df05feff..c890e2a5b428 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -179,7 +179,7 @@ static umode_t topology_is_visible(struct kobject *kobj,
static const struct attribute_group topology_attr_group = {
.attrs = default_attrs,
- .bin_attrs_new = bin_attrs,
+ .bin_attrs = bin_attrs,
.is_visible = topology_is_visible,
.name = "topology"
};
diff --git a/drivers/bcma/driver_gpio.c b/drivers/bcma/driver_gpio.c
index f021e27644e0..658c7e2ac8bf 100644
--- a/drivers/bcma/driver_gpio.c
+++ b/drivers/bcma/driver_gpio.c
@@ -186,7 +186,7 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
chip->request = bcma_gpio_request;
chip->free = bcma_gpio_free;
chip->get = bcma_gpio_get_value;
- chip->set_rv = bcma_gpio_set_value;
+ chip->set = bcma_gpio_set_value;
chip->direction_input = bcma_gpio_direction_input;
chip->direction_output = bcma_gpio_direction_output;
chip->parent = bus->dev;
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index 6ecfc821cf83..72f045e6ed51 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -294,6 +294,8 @@ static int bcma_register_devices(struct bcma_bus *bus)
int err;
list_for_each_entry(core, &bus->cores, list) {
+ struct device_node *np;
+
/* We support that core ourselves */
switch (core->id.id) {
case BCMA_CORE_4706_CHIPCOMMON:
@@ -311,6 +313,10 @@ static int bcma_register_devices(struct bcma_bus *bus)
if (bcma_is_core_needed_early(core->id.id))
continue;
+ np = core->dev.of_node;
+ if (np && !of_device_is_available(np))
+ continue;
+
/* Only first GMAC core on BCM4706 is connected and working */
if (core->id.id == BCMA_CORE_4706_MAC_GBIT &&
core->core_unit > 0)
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 0f70e2374e7f..77d694448990 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -17,6 +17,7 @@ menuconfig BLK_DEV
if BLK_DEV
source "drivers/block/null_blk/Kconfig"
+source "drivers/block/rnull/Kconfig"
config BLK_DEV_FD
tristate "Normal floppy disk support"
@@ -256,49 +257,6 @@ config BLK_DEV_RAM_SIZE
The default value is 4096 kilobytes. Only change this if you know
what you are doing.
-config CDROM_PKTCDVD
- tristate "Packet writing on CD/DVD media (DEPRECATED)"
- depends on !UML
- depends on SCSI
- select CDROM
- help
- Note: This driver is deprecated and will be removed from the
- kernel in the near future!
-
- If you have a CDROM/DVD drive that supports packet writing, say
- Y to include support. It should work with any MMC/Mt Fuji
- compliant ATAPI or SCSI drive, which is just about any newer
- DVD/CD writer.
-
- Currently only writing to CD-RW, DVD-RW, DVD+RW and DVDRAM discs
- is possible.
- DVD-RW disks must be in restricted overwrite mode.
-
- See the file <file:Documentation/cdrom/packet-writing.rst>
- for further information on the use of this driver.
-
- To compile this driver as a module, choose M here: the
- module will be called pktcdvd.
-
-config CDROM_PKTCDVD_BUFFERS
- int "Free buffers for data gathering"
- depends on CDROM_PKTCDVD
- default "8"
- help
- This controls the maximum number of active concurrent packets. More
- concurrent packets can increase write performance, but also require
- more memory. Each concurrent packet will require approximately 64Kb
- of non-swappable kernel memory, memory which will be allocated when
- a disc is opened for writing.
-
-config CDROM_PKTCDVD_WCACHE
- bool "Enable write caching"
- depends on CDROM_PKTCDVD
- help
- If enabled, write caching will be set for the CD-R/W device. For now
- this option is dangerous unless the CD-RW media is known good, as we
- don't do deferred write error handling yet.
-
config ATA_OVER_ETH
tristate "ATA over Ethernet support"
depends on NET
@@ -354,15 +312,6 @@ config VIRTIO_BLK
This is the virtual block driver for virtio. It can be used with
QEMU based VMMs (like KVM or Xen). Say Y or M.
-config BLK_DEV_RUST_NULL
- tristate "Rust null block driver (Experimental)"
- depends on RUST
- help
- This is the Rust implementation of the null block driver. For now it
- is only a minimal stub.
-
- If unsure, say N.
-
config BLK_DEV_RBD
tristate "Rados block device (RBD)"
depends on INET && BLOCK
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index 097707aca725..2d8096eb8cdf 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -9,9 +9,6 @@
# needed for trace events
ccflags-y += -I$(src)
-obj-$(CONFIG_BLK_DEV_RUST_NULL) += rnull_mod.o
-rnull_mod-y := rnull.o
-
obj-$(CONFIG_MAC_FLOPPY) += swim3.o
obj-$(CONFIG_BLK_DEV_SWIM) += swim_mod.o
obj-$(CONFIG_BLK_DEV_FD) += floppy.o
@@ -23,7 +20,6 @@ obj-$(CONFIG_AMIGA_Z2RAM) += z2ram.o
obj-$(CONFIG_N64CART) += n64cart.o
obj-$(CONFIG_BLK_DEV_RAM) += brd.o
obj-$(CONFIG_BLK_DEV_LOOP) += loop.o
-obj-$(CONFIG_CDROM_PKTCDVD) += pktcdvd.o
obj-$(CONFIG_SUNVDC) += sunvdc.o
obj-$(CONFIG_BLK_DEV_NBD) += nbd.o
@@ -39,6 +35,7 @@ obj-$(CONFIG_ZRAM) += zram/
obj-$(CONFIG_BLK_DEV_RNBD) += rnbd/
obj-$(CONFIG_BLK_DEV_NULL_BLK) += null_blk/
+obj-$(CONFIG_BLK_DEV_RUST_NULL) += rnull/
obj-$(CONFIG_BLK_DEV_UBLK) += ublk_drv.o
obj-$(CONFIG_BLK_DEV_ZONED_LOOP) += zloop.o
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 6357d86eafdc..2932b6653b6f 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -1523,13 +1523,13 @@ static blk_status_t amiflop_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_STS_OK;
}
-static int fd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+static int fd_getgeo(struct gendisk *disk, struct hd_geometry *geo)
{
- int drive = MINOR(bdev->bd_dev) & 3;
+ struct amiga_floppy_struct *p = disk->private_data;
- geo->heads = unit[drive].type->heads;
- geo->sectors = unit[drive].dtype->sects * unit[drive].type->sect_mult;
- geo->cylinders = unit[drive].type->tracks;
+ geo->heads = p->type->heads;
+ geo->sectors = p->dtype->sects * p->type->sect_mult;
+ geo->cylinders = p->type->tracks;
return 0;
}
diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h
index 749ae1246f4c..d35caa3c69e1 100644
--- a/drivers/block/aoe/aoe.h
+++ b/drivers/block/aoe/aoe.h
@@ -80,6 +80,7 @@ enum {
DEVFL_NEWSIZE = (1<<6), /* need to update dev size in block layer */
DEVFL_FREEING = (1<<7), /* set when device is being cleaned up */
DEVFL_FREED = (1<<8), /* device has been cleaned up */
+ DEVFL_DEAD = (1<<9), /* device has timed out of aoe_deadsecs */
};
enum {
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 00b74a845328..34ead75e7e02 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -269,9 +269,9 @@ static blk_status_t aoeblk_queue_rq(struct blk_mq_hw_ctx *hctx,
}
static int
-aoeblk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+aoeblk_getgeo(struct gendisk *disk, struct hd_geometry *geo)
{
- struct aoedev *d = bdev->bd_disk->private_data;
+ struct aoedev *d = disk->private_data;
if ((d->flags & DEVFL_UP) == 0) {
printk(KERN_ERR "aoe: disk not up\n");
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 50cc90f6ab35..a9affb7c264d 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -754,7 +754,7 @@ rexmit_timer(struct timer_list *timer)
utgts = count_targets(d, NULL);
- if (d->flags & DEVFL_TKILL) {
+ if (d->flags & (DEVFL_TKILL | DEVFL_DEAD)) {
spin_unlock_irqrestore(&d->lock, flags);
return;
}
@@ -786,7 +786,8 @@ rexmit_timer(struct timer_list *timer)
* to clean up.
*/
list_splice(&flist, &d->factive[0]);
- aoedev_downdev(d);
+ d->flags |= DEVFL_DEAD;
+ queue_work(aoe_wq, &d->work);
goto out;
}
@@ -898,6 +899,9 @@ aoecmd_sleepwork(struct work_struct *work)
{
struct aoedev *d = container_of(work, struct aoedev, work);
+ if (d->flags & DEVFL_DEAD)
+ aoedev_downdev(d);
+
if (d->flags & DEVFL_GDALLOC)
aoeblk_gdalloc(d);
@@ -1757,6 +1761,6 @@ aoecmd_exit(void)
kfree(kts);
kfree(ktiowq);
- free_page((unsigned long) page_address(empty_page));
+ __free_page(empty_page);
empty_page = NULL;
}
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c
index bba05f0c5bbd..3a240755045b 100644
--- a/drivers/block/aoe/aoedev.c
+++ b/drivers/block/aoe/aoedev.c
@@ -198,9 +198,13 @@ aoedev_downdev(struct aoedev *d)
{
struct aoetgt *t, **tt, **te;
struct list_head *head, *pos, *nx;
+ struct request *rq, *rqnext;
int i;
+ unsigned long flags;
- d->flags &= ~DEVFL_UP;
+ spin_lock_irqsave(&d->lock, flags);
+ d->flags &= ~(DEVFL_UP | DEVFL_DEAD);
+ spin_unlock_irqrestore(&d->lock, flags);
/* clean out active and to-be-retransmitted buffers */
for (i = 0; i < NFACTIVE; i++) {
@@ -223,6 +227,13 @@ aoedev_downdev(struct aoedev *d)
/* clean out the in-process request (if any) */
aoe_failip(d);
+ /* clean out any queued block requests */
+ list_for_each_entry_safe(rq, rqnext, &d->rq_list, queuelist) {
+ list_del_init(&rq->queuelist);
+ blk_mq_start_request(rq);
+ blk_mq_end_request(rq, BLK_STS_IOERR);
+ }
+
/* fast fail all pending I/O */
if (d->blkq) {
/* UP is cleared, freeze+quiesce to insure all are errored */
diff --git a/drivers/block/aoe/aoemain.c b/drivers/block/aoe/aoemain.c
index cdf6e4041bb9..3b21750038ee 100644
--- a/drivers/block/aoe/aoemain.c
+++ b/drivers/block/aoe/aoemain.c
@@ -44,7 +44,7 @@ aoe_init(void)
{
int ret;
- aoe_wq = alloc_workqueue("aoe_wq", 0, 0);
+ aoe_wq = alloc_workqueue("aoe_wq", WQ_PERCPU, 0);
if (!aoe_wq)
return -ENOMEM;
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index b1be6c510372..9778259b30d4 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -44,43 +44,74 @@ struct brd_device {
};
/*
- * Look up and return a brd's page for a given sector.
+ * Look up and return a brd's page with reference grabbed for a given sector.
*/
static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector)
{
- return xa_load(&brd->brd_pages, sector >> PAGE_SECTORS_SHIFT);
+ struct page *page;
+ XA_STATE(xas, &brd->brd_pages, sector >> PAGE_SECTORS_SHIFT);
+
+ rcu_read_lock();
+repeat:
+ page = xas_load(&xas);
+ if (xas_retry(&xas, page)) {
+ xas_reset(&xas);
+ goto repeat;
+ }
+
+ if (!page)
+ goto out;
+
+ if (!get_page_unless_zero(page)) {
+ xas_reset(&xas);
+ goto repeat;
+ }
+
+ if (unlikely(page != xas_reload(&xas))) {
+ put_page(page);
+ xas_reset(&xas);
+ goto repeat;
+ }
+out:
+ rcu_read_unlock();
+
+ return page;
}
/*
* Insert a new page for a given sector, if one does not already exist.
+ * The returned page will grab reference.
*/
static struct page *brd_insert_page(struct brd_device *brd, sector_t sector,
blk_opf_t opf)
- __releases(rcu)
- __acquires(rcu)
{
gfp_t gfp = (opf & REQ_NOWAIT) ? GFP_NOWAIT : GFP_NOIO;
struct page *page, *ret;
- rcu_read_unlock();
page = alloc_page(gfp | __GFP_ZERO | __GFP_HIGHMEM);
- rcu_read_lock();
if (!page)
return ERR_PTR(-ENOMEM);
xa_lock(&brd->brd_pages);
ret = __xa_cmpxchg(&brd->brd_pages, sector >> PAGE_SECTORS_SHIFT, NULL,
page, gfp);
- if (ret) {
+ if (!ret) {
+ brd->brd_nr_pages++;
+ get_page(page);
+ xa_unlock(&brd->brd_pages);
+ return page;
+ }
+
+ if (!xa_is_err(ret)) {
+ get_page(ret);
xa_unlock(&brd->brd_pages);
- __free_page(page);
- if (xa_is_err(ret))
- return ERR_PTR(xa_err(ret));
+ put_page(page);
return ret;
}
- brd->brd_nr_pages++;
+
xa_unlock(&brd->brd_pages);
- return page;
+ put_page(page);
+ return ERR_PTR(xa_err(ret));
}
/*
@@ -93,7 +124,7 @@ static void brd_free_pages(struct brd_device *brd)
pgoff_t idx;
xa_for_each(&brd->brd_pages, idx, page) {
- __free_page(page);
+ put_page(page);
cond_resched();
}
@@ -115,7 +146,6 @@ static bool brd_rw_bvec(struct brd_device *brd, struct bio *bio)
bv.bv_len = min_t(u32, bv.bv_len, PAGE_SIZE - offset);
- rcu_read_lock();
page = brd_lookup_page(brd, sector);
if (!page && op_is_write(opf)) {
page = brd_insert_page(brd, sector, opf);
@@ -133,13 +163,13 @@ static bool brd_rw_bvec(struct brd_device *brd, struct bio *bio)
memset(kaddr, 0, bv.bv_len);
}
kunmap_local(kaddr);
- rcu_read_unlock();
bio_advance_iter_single(bio, &bio->bi_iter, bv.bv_len);
+ if (page)
+ put_page(page);
return true;
out_error:
- rcu_read_unlock();
if (PTR_ERR(page) == -ENOMEM && (opf & REQ_NOWAIT))
bio_wouldblock_error(bio);
else
@@ -147,13 +177,6 @@ out_error:
return false;
}
-static void brd_free_one_page(struct rcu_head *head)
-{
- struct page *page = container_of(head, struct page, rcu_head);
-
- __free_page(page);
-}
-
static void brd_do_discard(struct brd_device *brd, sector_t sector, u32 size)
{
sector_t aligned_sector = round_up(sector, PAGE_SECTORS);
@@ -168,7 +191,7 @@ static void brd_do_discard(struct brd_device *brd, sector_t sector, u32 size)
while (aligned_sector < aligned_end && aligned_sector < rd_size * 2) {
page = __xa_erase(&brd->brd_pages, aligned_sector >> PAGE_SECTORS_SHIFT);
if (page) {
- call_rcu(&page->rcu_head, brd_free_one_page);
+ put_page(page);
brd->brd_nr_pages--;
}
aligned_sector += PAGE_SECTORS;
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index 85ca000a0564..d90fa3e7f4cf 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -1210,7 +1210,7 @@ static int bm_rw(struct drbd_device *device, const unsigned int flags, unsigned
return err;
}
-/**
+/*
* drbd_bm_read() - Read the whole bitmap from its on disk location.
* @device: DRBD device.
*/
@@ -1221,7 +1221,7 @@ int drbd_bm_read(struct drbd_device *device,
return bm_rw(device, BM_AIO_READ, 0);
}
-/**
+/*
* drbd_bm_write() - Write the whole bitmap to its on disk location.
* @device: DRBD device.
*
@@ -1233,7 +1233,7 @@ int drbd_bm_write(struct drbd_device *device,
return bm_rw(device, 0, 0);
}
-/**
+/*
* drbd_bm_write_all() - Write the whole bitmap to its on disk location.
* @device: DRBD device.
*
@@ -1255,7 +1255,7 @@ int drbd_bm_write_lazy(struct drbd_device *device, unsigned upper_idx) __must_ho
return bm_rw(device, BM_AIO_COPY_PAGES, upper_idx);
}
-/**
+/*
* drbd_bm_write_copy_pages() - Write the whole bitmap to its on disk location.
* @device: DRBD device.
*
@@ -1272,7 +1272,7 @@ int drbd_bm_write_copy_pages(struct drbd_device *device,
return bm_rw(device, BM_AIO_COPY_PAGES, 0);
}
-/**
+/*
* drbd_bm_write_hinted() - Write bitmap pages with "hint" marks, if they have changed.
* @device: DRBD device.
*/
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index e21492981f7d..f6d6276974ee 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -380,6 +380,9 @@ enum {
/* this is/was a write request */
__EE_WRITE,
+ /* hand back using mempool_free(e, drbd_buffer_page_pool) */
+ __EE_RELEASE_TO_MEMPOOL,
+
/* this is/was a write same request */
__EE_WRITE_SAME,
@@ -402,6 +405,7 @@ enum {
#define EE_IN_INTERVAL_TREE (1<<__EE_IN_INTERVAL_TREE)
#define EE_SUBMITTED (1<<__EE_SUBMITTED)
#define EE_WRITE (1<<__EE_WRITE)
+#define EE_RELEASE_TO_MEMPOOL (1<<__EE_RELEASE_TO_MEMPOOL)
#define EE_WRITE_SAME (1<<__EE_WRITE_SAME)
#define EE_APPLICATION (1<<__EE_APPLICATION)
#define EE_RS_THIN_REQ (1<<__EE_RS_THIN_REQ)
@@ -858,7 +862,6 @@ struct drbd_device {
struct list_head sync_ee; /* IO in progress (P_RS_DATA_REPLY gets written to disk) */
struct list_head done_ee; /* need to send P_WRITE_ACK */
struct list_head read_ee; /* [RS]P_DATA_REQUEST being read */
- struct list_head net_ee; /* zero-copy network send in progress */
struct list_head resync_reads;
atomic_t pp_in_use; /* allocated from page pool */
@@ -1329,24 +1332,6 @@ extern struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
extern mempool_t drbd_request_mempool;
extern mempool_t drbd_ee_mempool;
-/* drbd's page pool, used to buffer data received from the peer,
- * or data requested by the peer.
- *
- * This does not have an emergency reserve.
- *
- * When allocating from this pool, it first takes pages from the pool.
- * Only if the pool is depleted will try to allocate from the system.
- *
- * The assumption is that pages taken from this pool will be processed,
- * and given back, "quickly", and then can be recycled, so we can avoid
- * frequent calls to alloc_page(), and still will be able to make progress even
- * under memory pressure.
- */
-extern struct page *drbd_pp_pool;
-extern spinlock_t drbd_pp_lock;
-extern int drbd_pp_vacant;
-extern wait_queue_head_t drbd_pp_wait;
-
/* We also need a standard (emergency-reserve backed) page pool
* for meta data IO (activity log, bitmap).
* We can keep it global, as long as it is used as "N pages at a time".
@@ -1354,6 +1339,7 @@ extern wait_queue_head_t drbd_pp_wait;
*/
#define DRBD_MIN_POOL_PAGES 128
extern mempool_t drbd_md_io_page_pool;
+extern mempool_t drbd_buffer_page_pool;
/* We also need to make sure we get a bio
* when we need it for housekeeping purposes */
@@ -1488,10 +1474,7 @@ extern struct drbd_peer_request *drbd_alloc_peer_req(struct drbd_peer_device *,
sector_t, unsigned int,
unsigned int,
gfp_t) __must_hold(local);
-extern void __drbd_free_peer_req(struct drbd_device *, struct drbd_peer_request *,
- int);
-#define drbd_free_peer_req(m,e) __drbd_free_peer_req(m, e, 0)
-#define drbd_free_net_peer_req(m,e) __drbd_free_peer_req(m, e, 1)
+extern void drbd_free_peer_req(struct drbd_device *device, struct drbd_peer_request *req);
extern struct page *drbd_alloc_pages(struct drbd_peer_device *, unsigned int, bool);
extern void _drbd_clear_done_ee(struct drbd_device *device, struct list_head *to_be_freed);
extern int drbd_connected(struct drbd_peer_device *);
@@ -1610,16 +1593,6 @@ static inline struct page *page_chain_next(struct page *page)
for (; page && ({ n = page_chain_next(page); 1; }); page = n)
-static inline int drbd_peer_req_has_active_page(struct drbd_peer_request *peer_req)
-{
- struct page *page = peer_req->pages;
- page_chain_for_each(page) {
- if (page_count(page) > 1)
- return 1;
- }
- return 0;
-}
-
static inline union drbd_state drbd_read_state(struct drbd_device *device)
{
struct drbd_resource *resource = device->resource;
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 52724b79be30..c73376886e7a 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -114,20 +114,10 @@ struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
mempool_t drbd_request_mempool;
mempool_t drbd_ee_mempool;
mempool_t drbd_md_io_page_pool;
+mempool_t drbd_buffer_page_pool;
struct bio_set drbd_md_io_bio_set;
struct bio_set drbd_io_bio_set;
-/* I do not use a standard mempool, because:
- 1) I want to hand out the pre-allocated objects first.
- 2) I want to be able to interrupt sleeping allocation with a signal.
- Note: This is a single linked list, the next pointer is the private
- member of struct page.
- */
-struct page *drbd_pp_pool;
-DEFINE_SPINLOCK(drbd_pp_lock);
-int drbd_pp_vacant;
-wait_queue_head_t drbd_pp_wait;
-
DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5);
static const struct block_device_operations drbd_ops = {
@@ -1611,6 +1601,7 @@ static int _drbd_send_zc_bio(struct drbd_peer_device *peer_device, struct bio *b
static int _drbd_send_zc_ee(struct drbd_peer_device *peer_device,
struct drbd_peer_request *peer_req)
{
+ bool use_sendpage = !(peer_req->flags & EE_RELEASE_TO_MEMPOOL);
struct page *page = peer_req->pages;
unsigned len = peer_req->i.size;
int err;
@@ -1619,8 +1610,13 @@ static int _drbd_send_zc_ee(struct drbd_peer_device *peer_device,
page_chain_for_each(page) {
unsigned l = min_t(unsigned, len, PAGE_SIZE);
- err = _drbd_send_page(peer_device, page, 0, l,
- page_chain_next(page) ? MSG_MORE : 0);
+ if (likely(use_sendpage))
+ err = _drbd_send_page(peer_device, page, 0, l,
+ page_chain_next(page) ? MSG_MORE : 0);
+ else
+ err = _drbd_no_send_page(peer_device, page, 0, l,
+ page_chain_next(page) ? MSG_MORE : 0);
+
if (err)
return err;
len -= l;
@@ -1962,7 +1958,6 @@ void drbd_init_set_defaults(struct drbd_device *device)
INIT_LIST_HEAD(&device->sync_ee);
INIT_LIST_HEAD(&device->done_ee);
INIT_LIST_HEAD(&device->read_ee);
- INIT_LIST_HEAD(&device->net_ee);
INIT_LIST_HEAD(&device->resync_reads);
INIT_LIST_HEAD(&device->resync_work.list);
INIT_LIST_HEAD(&device->unplug_work.list);
@@ -2043,7 +2038,6 @@ void drbd_device_cleanup(struct drbd_device *device)
D_ASSERT(device, list_empty(&device->sync_ee));
D_ASSERT(device, list_empty(&device->done_ee));
D_ASSERT(device, list_empty(&device->read_ee));
- D_ASSERT(device, list_empty(&device->net_ee));
D_ASSERT(device, list_empty(&device->resync_reads));
D_ASSERT(device, list_empty(&first_peer_device(device)->connection->sender_work.q));
D_ASSERT(device, list_empty(&device->resync_work.list));
@@ -2055,19 +2049,11 @@ void drbd_device_cleanup(struct drbd_device *device)
static void drbd_destroy_mempools(void)
{
- struct page *page;
-
- while (drbd_pp_pool) {
- page = drbd_pp_pool;
- drbd_pp_pool = (struct page *)page_private(page);
- __free_page(page);
- drbd_pp_vacant--;
- }
-
/* D_ASSERT(device, atomic_read(&drbd_pp_vacant)==0); */
bioset_exit(&drbd_io_bio_set);
bioset_exit(&drbd_md_io_bio_set);
+ mempool_exit(&drbd_buffer_page_pool);
mempool_exit(&drbd_md_io_page_pool);
mempool_exit(&drbd_ee_mempool);
mempool_exit(&drbd_request_mempool);
@@ -2086,9 +2072,8 @@ static void drbd_destroy_mempools(void)
static int drbd_create_mempools(void)
{
- struct page *page;
const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * drbd_minor_count;
- int i, ret;
+ int ret;
/* caches */
drbd_request_cache = kmem_cache_create(
@@ -2125,6 +2110,10 @@ static int drbd_create_mempools(void)
if (ret)
goto Enomem;
+ ret = mempool_init_page_pool(&drbd_buffer_page_pool, number, 0);
+ if (ret)
+ goto Enomem;
+
ret = mempool_init_slab_pool(&drbd_request_mempool, number,
drbd_request_cache);
if (ret)
@@ -2134,15 +2123,6 @@ static int drbd_create_mempools(void)
if (ret)
goto Enomem;
- for (i = 0; i < number; i++) {
- page = alloc_page(GFP_HIGHUSER);
- if (!page)
- goto Enomem;
- set_page_private(page, (unsigned long)drbd_pp_pool);
- drbd_pp_pool = page;
- }
- drbd_pp_vacant = number;
-
return 0;
Enomem:
@@ -2169,10 +2149,6 @@ static void drbd_release_all_peer_reqs(struct drbd_device *device)
rr = drbd_free_peer_reqs(device, &device->done_ee);
if (rr)
drbd_err(device, "%d EEs in done list found!\n", rr);
-
- rr = drbd_free_peer_reqs(device, &device->net_ee);
- if (rr)
- drbd_err(device, "%d EEs in net list found!\n", rr);
}
/* caution. no locking. */
@@ -2863,11 +2839,6 @@ static int __init drbd_init(void)
return err;
}
- /*
- * allocate all necessary structs
- */
- init_waitqueue_head(&drbd_pp_wait);
-
drbd_proc = NULL; /* play safe for drbd_cleanup */
idr_init(&drbd_devices);
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index e09930c2b226..91f3b8afb63c 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -1330,6 +1330,7 @@ void drbd_reconsider_queue_parameters(struct drbd_device *device,
lim.max_write_zeroes_sectors = DRBD_MAX_BBIO_SECTORS;
else
lim.max_write_zeroes_sectors = 0;
+ lim.max_hw_wzeroes_unmap_sectors = 0;
if ((lim.discard_granularity >> SECTOR_SHIFT) >
lim.max_hw_discard_sectors) {
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index e5a2e5f7887b..3de919b6f0e1 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -33,6 +33,7 @@
#include <linux/string.h>
#include <linux/scatterlist.h>
#include <linux/part_stat.h>
+#include <linux/mempool.h>
#include "drbd_int.h"
#include "drbd_protocol.h"
#include "drbd_req.h"
@@ -63,182 +64,31 @@ static int e_end_block(struct drbd_work *, int);
#define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
-/*
- * some helper functions to deal with single linked page lists,
- * page->private being our "next" pointer.
- */
-
-/* If at least n pages are linked at head, get n pages off.
- * Otherwise, don't modify head, and return NULL.
- * Locking is the responsibility of the caller.
- */
-static struct page *page_chain_del(struct page **head, int n)
-{
- struct page *page;
- struct page *tmp;
-
- BUG_ON(!n);
- BUG_ON(!head);
-
- page = *head;
-
- if (!page)
- return NULL;
-
- while (page) {
- tmp = page_chain_next(page);
- if (--n == 0)
- break; /* found sufficient pages */
- if (tmp == NULL)
- /* insufficient pages, don't use any of them. */
- return NULL;
- page = tmp;
- }
-
- /* add end of list marker for the returned list */
- set_page_private(page, 0);
- /* actual return value, and adjustment of head */
- page = *head;
- *head = tmp;
- return page;
-}
-
-/* may be used outside of locks to find the tail of a (usually short)
- * "private" page chain, before adding it back to a global chain head
- * with page_chain_add() under a spinlock. */
-static struct page *page_chain_tail(struct page *page, int *len)
-{
- struct page *tmp;
- int i = 1;
- while ((tmp = page_chain_next(page))) {
- ++i;
- page = tmp;
- }
- if (len)
- *len = i;
- return page;
-}
-
-static int page_chain_free(struct page *page)
-{
- struct page *tmp;
- int i = 0;
- page_chain_for_each_safe(page, tmp) {
- put_page(page);
- ++i;
- }
- return i;
-}
-
-static void page_chain_add(struct page **head,
- struct page *chain_first, struct page *chain_last)
-{
-#if 1
- struct page *tmp;
- tmp = page_chain_tail(chain_first, NULL);
- BUG_ON(tmp != chain_last);
-#endif
-
- /* add chain to head */
- set_page_private(chain_last, (unsigned long)*head);
- *head = chain_first;
-}
-
-static struct page *__drbd_alloc_pages(struct drbd_device *device,
- unsigned int number)
+static struct page *__drbd_alloc_pages(unsigned int number)
{
struct page *page = NULL;
struct page *tmp = NULL;
unsigned int i = 0;
- /* Yes, testing drbd_pp_vacant outside the lock is racy.
- * So what. It saves a spin_lock. */
- if (drbd_pp_vacant >= number) {
- spin_lock(&drbd_pp_lock);
- page = page_chain_del(&drbd_pp_pool, number);
- if (page)
- drbd_pp_vacant -= number;
- spin_unlock(&drbd_pp_lock);
- if (page)
- return page;
- }
-
/* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
* "criss-cross" setup, that might cause write-out on some other DRBD,
* which in turn might block on the other node at this very place. */
for (i = 0; i < number; i++) {
- tmp = alloc_page(GFP_TRY);
+ tmp = mempool_alloc(&drbd_buffer_page_pool, GFP_TRY);
if (!tmp)
- break;
+ goto fail;
set_page_private(tmp, (unsigned long)page);
page = tmp;
}
-
- if (i == number)
- return page;
-
- /* Not enough pages immediately available this time.
- * No need to jump around here, drbd_alloc_pages will retry this
- * function "soon". */
- if (page) {
- tmp = page_chain_tail(page, NULL);
- spin_lock(&drbd_pp_lock);
- page_chain_add(&drbd_pp_pool, page, tmp);
- drbd_pp_vacant += i;
- spin_unlock(&drbd_pp_lock);
+ return page;
+fail:
+ page_chain_for_each_safe(page, tmp) {
+ set_page_private(page, 0);
+ mempool_free(page, &drbd_buffer_page_pool);
}
return NULL;
}
-static void reclaim_finished_net_peer_reqs(struct drbd_device *device,
- struct list_head *to_be_freed)
-{
- struct drbd_peer_request *peer_req, *tmp;
-
- /* The EEs are always appended to the end of the list. Since
- they are sent in order over the wire, they have to finish
- in order. As soon as we see the first not finished we can
- stop to examine the list... */
-
- list_for_each_entry_safe(peer_req, tmp, &device->net_ee, w.list) {
- if (drbd_peer_req_has_active_page(peer_req))
- break;
- list_move(&peer_req->w.list, to_be_freed);
- }
-}
-
-static void drbd_reclaim_net_peer_reqs(struct drbd_device *device)
-{
- LIST_HEAD(reclaimed);
- struct drbd_peer_request *peer_req, *t;
-
- spin_lock_irq(&device->resource->req_lock);
- reclaim_finished_net_peer_reqs(device, &reclaimed);
- spin_unlock_irq(&device->resource->req_lock);
- list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
- drbd_free_net_peer_req(device, peer_req);
-}
-
-static void conn_reclaim_net_peer_reqs(struct drbd_connection *connection)
-{
- struct drbd_peer_device *peer_device;
- int vnr;
-
- rcu_read_lock();
- idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
- struct drbd_device *device = peer_device->device;
- if (!atomic_read(&device->pp_in_use_by_net))
- continue;
-
- kref_get(&device->kref);
- rcu_read_unlock();
- drbd_reclaim_net_peer_reqs(device);
- kref_put(&device->kref, drbd_destroy_device);
- rcu_read_lock();
- }
- rcu_read_unlock();
-}
-
/**
* drbd_alloc_pages() - Returns @number pages, retries forever (or until signalled)
* @peer_device: DRBD device.
@@ -263,9 +113,8 @@ struct page *drbd_alloc_pages(struct drbd_peer_device *peer_device, unsigned int
bool retry)
{
struct drbd_device *device = peer_device->device;
- struct page *page = NULL;
+ struct page *page;
struct net_conf *nc;
- DEFINE_WAIT(wait);
unsigned int mxb;
rcu_read_lock();
@@ -273,37 +122,9 @@ struct page *drbd_alloc_pages(struct drbd_peer_device *peer_device, unsigned int
mxb = nc ? nc->max_buffers : 1000000;
rcu_read_unlock();
- if (atomic_read(&device->pp_in_use) < mxb)
- page = __drbd_alloc_pages(device, number);
-
- /* Try to keep the fast path fast, but occasionally we need
- * to reclaim the pages we lended to the network stack. */
- if (page && atomic_read(&device->pp_in_use_by_net) > 512)
- drbd_reclaim_net_peer_reqs(device);
-
- while (page == NULL) {
- prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
-
- drbd_reclaim_net_peer_reqs(device);
-
- if (atomic_read(&device->pp_in_use) < mxb) {
- page = __drbd_alloc_pages(device, number);
- if (page)
- break;
- }
-
- if (!retry)
- break;
-
- if (signal_pending(current)) {
- drbd_warn(device, "drbd_alloc_pages interrupted!\n");
- break;
- }
-
- if (schedule_timeout(HZ/10) == 0)
- mxb = UINT_MAX;
- }
- finish_wait(&drbd_pp_wait, &wait);
+ if (atomic_read(&device->pp_in_use) >= mxb)
+ schedule_timeout_interruptible(HZ / 10);
+ page = __drbd_alloc_pages(number);
if (page)
atomic_add(number, &device->pp_in_use);
@@ -314,29 +135,25 @@ struct page *drbd_alloc_pages(struct drbd_peer_device *peer_device, unsigned int
* Is also used from inside an other spin_lock_irq(&resource->req_lock);
* Either links the page chain back to the global pool,
* or returns all pages to the system. */
-static void drbd_free_pages(struct drbd_device *device, struct page *page, int is_net)
+static void drbd_free_pages(struct drbd_device *device, struct page *page)
{
- atomic_t *a = is_net ? &device->pp_in_use_by_net : &device->pp_in_use;
- int i;
+ struct page *tmp;
+ int i = 0;
if (page == NULL)
return;
- if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * drbd_minor_count)
- i = page_chain_free(page);
- else {
- struct page *tmp;
- tmp = page_chain_tail(page, &i);
- spin_lock(&drbd_pp_lock);
- page_chain_add(&drbd_pp_pool, page, tmp);
- drbd_pp_vacant += i;
- spin_unlock(&drbd_pp_lock);
- }
- i = atomic_sub_return(i, a);
+ page_chain_for_each_safe(page, tmp) {
+ set_page_private(page, 0);
+ if (page_count(page) == 1)
+ mempool_free(page, &drbd_buffer_page_pool);
+ else
+ put_page(page);
+ i++;
+ }
+ i = atomic_sub_return(i, &device->pp_in_use);
if (i < 0)
- drbd_warn(device, "ASSERTION FAILED: %s: %d < 0\n",
- is_net ? "pp_in_use_by_net" : "pp_in_use", i);
- wake_up(&drbd_pp_wait);
+ drbd_warn(device, "ASSERTION FAILED: pp_in_use: %d < 0\n", i);
}
/*
@@ -380,6 +197,8 @@ drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t secto
gfpflags_allow_blocking(gfp_mask));
if (!page)
goto fail;
+ if (!mempool_is_saturated(&drbd_buffer_page_pool))
+ peer_req->flags |= EE_RELEASE_TO_MEMPOOL;
}
memset(peer_req, 0, sizeof(*peer_req));
@@ -403,13 +222,12 @@ drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t secto
return NULL;
}
-void __drbd_free_peer_req(struct drbd_device *device, struct drbd_peer_request *peer_req,
- int is_net)
+void drbd_free_peer_req(struct drbd_device *device, struct drbd_peer_request *peer_req)
{
might_sleep();
if (peer_req->flags & EE_HAS_DIGEST)
kfree(peer_req->digest);
- drbd_free_pages(device, peer_req->pages, is_net);
+ drbd_free_pages(device, peer_req->pages);
D_ASSERT(device, atomic_read(&peer_req->pending_bios) == 0);
D_ASSERT(device, drbd_interval_empty(&peer_req->i));
if (!expect(device, !(peer_req->flags & EE_CALL_AL_COMPLETE_IO))) {
@@ -424,14 +242,13 @@ int drbd_free_peer_reqs(struct drbd_device *device, struct list_head *list)
LIST_HEAD(work_list);
struct drbd_peer_request *peer_req, *t;
int count = 0;
- int is_net = list == &device->net_ee;
spin_lock_irq(&device->resource->req_lock);
list_splice_init(list, &work_list);
spin_unlock_irq(&device->resource->req_lock);
list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
- __drbd_free_peer_req(device, peer_req, is_net);
+ drbd_free_peer_req(device, peer_req);
count++;
}
return count;
@@ -443,18 +260,13 @@ int drbd_free_peer_reqs(struct drbd_device *device, struct list_head *list)
static int drbd_finish_peer_reqs(struct drbd_device *device)
{
LIST_HEAD(work_list);
- LIST_HEAD(reclaimed);
struct drbd_peer_request *peer_req, *t;
int err = 0;
spin_lock_irq(&device->resource->req_lock);
- reclaim_finished_net_peer_reqs(device, &reclaimed);
list_splice_init(&device->done_ee, &work_list);
spin_unlock_irq(&device->resource->req_lock);
- list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
- drbd_free_net_peer_req(device, peer_req);
-
/* possible callbacks here:
* e_end_block, and e_end_resync_block, e_send_superseded.
* all ignore the last argument.
@@ -638,7 +450,7 @@ static struct socket *drbd_try_connect(struct drbd_connection *connection)
* a free one dynamically.
*/
what = "bind before connect";
- err = sock->ops->bind(sock, (struct sockaddr *) &src_in6, my_addr_len);
+ err = sock->ops->bind(sock, (struct sockaddr_unsized *) &src_in6, my_addr_len);
if (err < 0)
goto out;
@@ -646,7 +458,7 @@ static struct socket *drbd_try_connect(struct drbd_connection *connection)
* stay C_WF_CONNECTION, don't go Disconnecting! */
disconnect_on_error = 0;
what = "connect";
- err = sock->ops->connect(sock, (struct sockaddr *) &peer_in6, peer_addr_len, 0);
+ err = sock->ops->connect(sock, (struct sockaddr_unsized *) &peer_in6, peer_addr_len, 0);
out:
if (err < 0) {
@@ -725,7 +537,7 @@ static int prepare_listen_socket(struct drbd_connection *connection, struct acce
drbd_setbufsize(s_listen, sndbuf_size, rcvbuf_size);
what = "bind before listen";
- err = s_listen->ops->bind(s_listen, (struct sockaddr *)&my_addr, my_addr_len);
+ err = s_listen->ops->bind(s_listen, (struct sockaddr_unsized *)&my_addr, my_addr_len);
if (err < 0)
goto out;
@@ -1924,13 +1736,13 @@ read_in_block(struct drbd_peer_device *peer_device, u64 id, sector_t sector,
page = peer_req->pages;
page_chain_for_each(page) {
unsigned len = min_t(int, ds, PAGE_SIZE);
- data = kmap(page);
+ data = kmap_local_page(page);
err = drbd_recv_all_warn(peer_device->connection, data, len);
if (drbd_insert_fault(device, DRBD_FAULT_RECEIVE)) {
drbd_err(device, "Fault injection: Corrupting data on receive\n");
data[0] = data[0] ^ (unsigned long)-1;
}
- kunmap(page);
+ kunmap_local(data);
if (err) {
drbd_free_peer_req(device, peer_req);
return NULL;
@@ -1965,7 +1777,7 @@ static int drbd_drain_block(struct drbd_peer_device *peer_device, int data_size)
page = drbd_alloc_pages(peer_device, 1, 1);
- data = kmap(page);
+ data = kmap_local_page(page);
while (data_size) {
unsigned int len = min_t(int, data_size, PAGE_SIZE);
@@ -1974,8 +1786,8 @@ static int drbd_drain_block(struct drbd_peer_device *peer_device, int data_size)
break;
data_size -= len;
}
- kunmap(page);
- drbd_free_pages(peer_device->device, page, 0);
+ kunmap_local(data);
+ drbd_free_pages(peer_device->device, page);
return err;
}
@@ -2500,7 +2312,11 @@ static int handle_write_conflicts(struct drbd_device *device,
peer_req->w.cb = superseded ? e_send_superseded :
e_send_retry_write;
list_add_tail(&peer_req->w.list, &device->done_ee);
- queue_work(connection->ack_sender, &peer_req->peer_device->send_acks_work);
+ /* put is in drbd_send_acks_wf() */
+ kref_get(&device->kref);
+ if (!queue_work(connection->ack_sender,
+ &peer_req->peer_device->send_acks_work))
+ kref_put(&device->kref, drbd_destroy_device);
err = -ENOENT;
goto out;
@@ -5220,16 +5036,6 @@ static int drbd_disconnected(struct drbd_peer_device *peer_device)
put_ldev(device);
}
- /* tcp_close and release of sendpage pages can be deferred. I don't
- * want to use SO_LINGER, because apparently it can be deferred for
- * more than 20 seconds (longest time I checked).
- *
- * Actually we don't care for exactly when the network stack does its
- * put_page(), but release our reference on these pages right here.
- */
- i = drbd_free_peer_reqs(device, &device->net_ee);
- if (i)
- drbd_info(device, "net_ee not empty, killed %u entries\n", i);
i = atomic_read(&device->pp_in_use_by_net);
if (i)
drbd_info(device, "pp_in_use_by_net = %d, expected 0\n", i);
@@ -5976,8 +5782,6 @@ int drbd_ack_receiver(struct drbd_thread *thi)
while (get_t_state(thi) == RUNNING) {
drbd_thread_current_set_cpu(thi);
- conn_reclaim_net_peer_reqs(connection);
-
if (test_and_clear_bit(SEND_PING, &connection->flags)) {
if (drbd_send_ping(connection)) {
drbd_err(connection, "drbd_send_ping has failed\n");
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index a6ea737b3b71..dea3e79d044f 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -1030,22 +1030,6 @@ out:
return 1;
}
-/* helper */
-static void move_to_net_ee_or_free(struct drbd_device *device, struct drbd_peer_request *peer_req)
-{
- if (drbd_peer_req_has_active_page(peer_req)) {
- /* This might happen if sendpage() has not finished */
- int i = PFN_UP(peer_req->i.size);
- atomic_add(i, &device->pp_in_use_by_net);
- atomic_sub(i, &device->pp_in_use);
- spin_lock_irq(&device->resource->req_lock);
- list_add_tail(&peer_req->w.list, &device->net_ee);
- spin_unlock_irq(&device->resource->req_lock);
- wake_up(&drbd_pp_wait);
- } else
- drbd_free_peer_req(device, peer_req);
-}
-
/**
* w_e_end_data_req() - Worker callback, to send a P_DATA_REPLY packet in response to a P_DATA_REQUEST
* @w: work object.
@@ -1059,9 +1043,8 @@ int w_e_end_data_req(struct drbd_work *w, int cancel)
int err;
if (unlikely(cancel)) {
- drbd_free_peer_req(device, peer_req);
- dec_unacked(device);
- return 0;
+ err = 0;
+ goto out;
}
if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
@@ -1074,12 +1057,12 @@ int w_e_end_data_req(struct drbd_work *w, int cancel)
err = drbd_send_ack(peer_device, P_NEG_DREPLY, peer_req);
}
- dec_unacked(device);
-
- move_to_net_ee_or_free(device, peer_req);
-
if (unlikely(err))
drbd_err(device, "drbd_send_block() failed\n");
+out:
+ dec_unacked(device);
+ drbd_free_peer_req(device, peer_req);
+
return err;
}
@@ -1120,9 +1103,8 @@ int w_e_end_rsdata_req(struct drbd_work *w, int cancel)
int err;
if (unlikely(cancel)) {
- drbd_free_peer_req(device, peer_req);
- dec_unacked(device);
- return 0;
+ err = 0;
+ goto out;
}
if (get_ldev_if_state(device, D_FAILED)) {
@@ -1155,13 +1137,12 @@ int w_e_end_rsdata_req(struct drbd_work *w, int cancel)
/* update resync data with failure */
drbd_rs_failed_io(peer_device, peer_req->i.sector, peer_req->i.size);
}
-
- dec_unacked(device);
-
- move_to_net_ee_or_free(device, peer_req);
-
if (unlikely(err))
drbd_err(device, "drbd_send_block() failed\n");
+out:
+ dec_unacked(device);
+ drbd_free_peer_req(device, peer_req);
+
return err;
}
@@ -1176,9 +1157,8 @@ int w_e_end_csum_rs_req(struct drbd_work *w, int cancel)
int err, eq = 0;
if (unlikely(cancel)) {
- drbd_free_peer_req(device, peer_req);
- dec_unacked(device);
- return 0;
+ err = 0;
+ goto out;
}
if (get_ldev(device)) {
@@ -1220,12 +1200,12 @@ int w_e_end_csum_rs_req(struct drbd_work *w, int cancel)
if (drbd_ratelimit())
drbd_err(device, "Sending NegDReply. I guess it gets messy.\n");
}
-
- dec_unacked(device);
- move_to_net_ee_or_free(device, peer_req);
-
if (unlikely(err))
drbd_err(device, "drbd_send_block/ack() failed\n");
+out:
+ dec_unacked(device);
+ drbd_free_peer_req(device, peer_req);
+
return err;
}
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index e97432032f01..c28786e0fe1c 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -163,35 +163,35 @@
/* do print messages for unexpected interrupts */
static int print_unex = 1;
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/fs.h>
-#include <linux/kernel.h>
-#include <linux/timer.h>
-#include <linux/workqueue.h>
-#include <linux/fdreg.h>
-#include <linux/fd.h>
-#include <linux/hdreg.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/mm.h>
+#include <linux/async.h>
#include <linux/bio.h>
-#include <linux/string.h>
-#include <linux/jiffies.h>
-#include <linux/fcntl.h>
+#include <linux/compat.h>
#include <linux/delay.h>
-#include <linux/mc146818rtc.h> /* CMOS defines */
-#include <linux/ioport.h>
-#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/fcntl.h>
+#include <linux/fd.h>
+#include <linux/fdreg.h>
+#include <linux/fs.h>
+#include <linux/hdreg.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
#include <linux/major.h>
-#include <linux/platform_device.h>
+#include <linux/mc146818rtc.h> /* CMOS defines */
+#include <linux/mm.h>
#include <linux/mod_devicetable.h>
+#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/timer.h>
#include <linux/uaccess.h>
-#include <linux/async.h>
-#include <linux/compat.h>
+#include <linux/workqueue.h>
/*
* PS/2 floppies have much slower step rates than regular floppies.
@@ -233,8 +233,6 @@ static unsigned short virtual_dma_port = 0x3f0;
irqreturn_t floppy_interrupt(int irq, void *dev_id);
static int set_dor(int fdc, char mask, char data);
-#define K_64 0x10000 /* 64KB */
-
/* the following is the mask of allowed drives. By default units 2 and
* 3 of both floppy controllers are disabled, because switching on the
* motor of these drives causes system hangs on some PCI computers. drive
@@ -331,7 +329,7 @@ static bool initialized;
* This default is used whenever the current disk size is unknown.
* [Now it is rather a minimum]
*/
-#define MAX_DISK_SIZE 4 /* 3984 */
+#define MAX_DISK_SIZE (PAGE_SIZE / 1024)
/*
* globals used by 'result()'
@@ -3092,16 +3090,13 @@ static int raw_cmd_copyin(int cmd, void __user *param,
*rcmd = NULL;
loop:
- ptr = kmalloc(sizeof(struct floppy_raw_cmd), GFP_KERNEL);
- if (!ptr)
- return -ENOMEM;
+ ptr = memdup_user(param, sizeof(*ptr));
+ if (IS_ERR(ptr))
+ return PTR_ERR(ptr);
*rcmd = ptr;
- ret = copy_from_user(ptr, param, sizeof(*ptr));
ptr->next = NULL;
ptr->buffer_length = 0;
ptr->kernel_data = NULL;
- if (ret)
- return -EFAULT;
param += sizeof(struct floppy_raw_cmd);
if (ptr->cmd_count > FD_RAW_CMD_FULLSIZE)
return -EINVAL;
@@ -3363,9 +3358,9 @@ static int get_floppy_geometry(int drive, int type, struct floppy_struct **g)
return 0;
}
-static int fd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+static int fd_getgeo(struct gendisk *disk, struct hd_geometry *geo)
{
- int drive = (long)bdev->bd_disk->private_data;
+ int drive = (long)disk->private_data;
int type = ITYPE(drive_state[drive].fd_device);
struct floppy_struct *g;
int ret;
@@ -3411,7 +3406,7 @@ static int fd_locked_ioctl(struct block_device *bdev, blk_mode_t mode,
struct floppy_max_errors max_errors;
struct floppy_drive_params dp;
} inparam; /* parameters coming from user space */
- const void *outparam; /* parameters passed back to user space */
+ const void *outparam = NULL; /* parameters passed back to user space */
/* convert compatibility eject ioctls into floppy eject ioctl.
* We do this in order to provide a means to eject floppy disks before
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index f8d136684109..272bc608e528 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -137,20 +137,35 @@ static void loop_global_unlock(struct loop_device *lo, bool global)
static int max_part;
static int part_shift;
-static loff_t get_size(loff_t offset, loff_t sizelimit, struct file *file)
+static loff_t lo_calculate_size(struct loop_device *lo, struct file *file)
{
loff_t loopsize;
+ int ret;
+
+ if (S_ISBLK(file_inode(file)->i_mode)) {
+ loopsize = i_size_read(file->f_mapping->host);
+ } else {
+ struct kstat stat;
- /* Compute loopsize in bytes */
- loopsize = i_size_read(file->f_mapping->host);
- if (offset > 0)
- loopsize -= offset;
+ /*
+ * Get the accurate file size. This provides better results than
+ * cached inode data, particularly for network filesystems where
+ * metadata may be stale.
+ */
+ ret = vfs_getattr_nosec(&file->f_path, &stat, STATX_SIZE, 0);
+ if (ret)
+ return 0;
+
+ loopsize = stat.size;
+ }
+
+ if (lo->lo_offset > 0)
+ loopsize -= lo->lo_offset;
/* offset is beyond i_size, weird but possible */
if (loopsize < 0)
return 0;
-
- if (sizelimit > 0 && sizelimit < loopsize)
- loopsize = sizelimit;
+ if (lo->lo_sizelimit > 0 && lo->lo_sizelimit < loopsize)
+ loopsize = lo->lo_sizelimit;
/*
* Unfortunately, if we want to do I/O on the device,
* the number of 512-byte sectors has to fit into a sector_t.
@@ -158,11 +173,6 @@ static loff_t get_size(loff_t offset, loff_t sizelimit, struct file *file)
return loopsize >> 9;
}
-static loff_t get_loop_size(struct loop_device *lo, struct file *file)
-{
- return get_size(lo->lo_offset, lo->lo_sizelimit, file);
-}
-
/*
* We support direct I/O only if lo_offset is aligned with the logical I/O size
* of backing device, and the logical block size of loop is bigger than that of
@@ -308,14 +318,13 @@ end_io:
static void lo_rw_aio_do_completion(struct loop_cmd *cmd)
{
struct request *rq = blk_mq_rq_from_pdu(cmd);
- struct loop_device *lo = rq->q->queuedata;
if (!atomic_dec_and_test(&cmd->ref))
return;
kfree(cmd->bvec);
cmd->bvec = NULL;
if (req_op(rq) == REQ_OP_WRITE)
- file_end_write(lo->lo_backing_file);
+ kiocb_end_write(&cmd->iocb);
if (likely(!blk_should_fake_timeout(rq->q)))
blk_mq_complete_request(rq);
}
@@ -339,11 +348,10 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
struct file *file = lo->lo_backing_file;
struct bio_vec tmp;
unsigned int offset;
- int nr_bvec = 0;
+ unsigned int nr_bvec;
int ret;
- rq_for_each_bvec(tmp, rq, rq_iter)
- nr_bvec++;
+ nr_bvec = blk_rq_nr_bvec(rq);
if (rq->bio != rq->biotail) {
@@ -391,7 +399,7 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
}
if (rw == ITER_SOURCE) {
- file_start_write(lo->lo_backing_file);
+ kiocb_start_write(&cmd->iocb);
ret = file->f_op->write_iter(&cmd->iocb, &iter);
} else
ret = file->f_op->read_iter(&cmd->iocb, &iter);
@@ -542,8 +550,10 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
return -EBADF;
error = loop_check_backing_file(file);
- if (error)
+ if (error) {
+ fput(file);
return error;
+ }
/* suppress uevents while reconfiguring the device */
dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 1);
@@ -570,7 +580,7 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
error = -EINVAL;
/* size of the new backing store needs to be the same */
- if (get_loop_size(lo, file) != get_loop_size(lo, old_file))
+ if (lo_calculate_size(lo, file) != lo_calculate_size(lo, old_file))
goto out_err;
/*
@@ -813,7 +823,7 @@ static void loop_queue_work(struct loop_device *lo, struct loop_cmd *cmd)
if (worker)
goto queue_work;
- worker = kzalloc(sizeof(struct loop_worker), GFP_NOWAIT | __GFP_NOWARN);
+ worker = kzalloc(sizeof(struct loop_worker), GFP_NOWAIT);
/*
* In the event we cannot allocate a worker, just queue on the
* rootcg worker and issue the I/O as the rootcg
@@ -984,8 +994,10 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
return -EBADF;
error = loop_check_backing_file(file);
- if (error)
+ if (error) {
+ fput(file);
return error;
+ }
is_loop = is_loop_device(file);
@@ -1064,7 +1076,7 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
loop_update_dio(lo);
loop_sysfs_init(lo);
- size = get_loop_size(lo, file);
+ size = lo_calculate_size(lo, file);
loop_set_size(lo, size);
/* Order wrt reading lo_state in loop_validate_file(). */
@@ -1248,12 +1260,6 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
lo->lo_flags &= ~LOOP_SET_STATUS_CLEARABLE_FLAGS;
lo->lo_flags |= (info->lo_flags & LOOP_SET_STATUS_SETTABLE_FLAGS);
- if (size_changed) {
- loff_t new_size = get_size(lo->lo_offset, lo->lo_sizelimit,
- lo->lo_backing_file);
- loop_set_size(lo, new_size);
- }
-
/* update the direct I/O flag if lo_offset changed */
loop_update_dio(lo);
@@ -1261,6 +1267,10 @@ out_unfreeze:
blk_mq_unfreeze_queue(lo->lo_queue, memflags);
if (partscan)
clear_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state);
+ if (!err && size_changed) {
+ loff_t new_size = lo_calculate_size(lo, lo->lo_backing_file);
+ loop_set_size(lo, new_size);
+ }
out_unlock:
mutex_unlock(&lo->lo_mutex);
if (partscan)
@@ -1401,7 +1411,7 @@ static int loop_set_capacity(struct loop_device *lo)
if (unlikely(lo->lo_state != Lo_bound))
return -ENXIO;
- size = get_loop_size(lo, lo->lo_backing_file);
+ size = lo_calculate_size(lo, lo->lo_backing_file);
loop_set_size(lo, size);
return 0;
@@ -1433,17 +1443,34 @@ static int loop_set_dio(struct loop_device *lo, unsigned long arg)
return 0;
}
-static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
+static int loop_set_block_size(struct loop_device *lo, blk_mode_t mode,
+ struct block_device *bdev, unsigned long arg)
{
struct queue_limits lim;
unsigned int memflags;
int err = 0;
- if (lo->lo_state != Lo_bound)
- return -ENXIO;
+ /*
+ * If we don't hold exclusive handle for the device, upgrade to it
+ * here to avoid changing device under exclusive owner.
+ */
+ if (!(mode & BLK_OPEN_EXCL)) {
+ err = bd_prepare_to_claim(bdev, loop_set_block_size, NULL);
+ if (err)
+ return err;
+ }
+
+ err = mutex_lock_killable(&lo->lo_mutex);
+ if (err)
+ goto abort_claim;
+
+ if (lo->lo_state != Lo_bound) {
+ err = -ENXIO;
+ goto unlock;
+ }
if (lo->lo_queue->limits.logical_block_size == arg)
- return 0;
+ goto unlock;
sync_blockdev(lo->lo_device);
invalidate_bdev(lo->lo_device);
@@ -1456,6 +1483,11 @@ static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
loop_update_dio(lo);
blk_mq_unfreeze_queue(lo->lo_queue, memflags);
+unlock:
+ mutex_unlock(&lo->lo_mutex);
+abort_claim:
+ if (!(mode & BLK_OPEN_EXCL))
+ bd_abort_claiming(bdev, loop_set_block_size);
return err;
}
@@ -1474,9 +1506,6 @@ static int lo_simple_ioctl(struct loop_device *lo, unsigned int cmd,
case LOOP_SET_DIRECT_IO:
err = loop_set_dio(lo, arg);
break;
- case LOOP_SET_BLOCK_SIZE:
- err = loop_set_block_size(lo, arg);
- break;
default:
err = -EINVAL;
}
@@ -1531,9 +1560,12 @@ static int lo_ioctl(struct block_device *bdev, blk_mode_t mode,
break;
case LOOP_GET_STATUS64:
return loop_get_status64(lo, argp);
+ case LOOP_SET_BLOCK_SIZE:
+ if (!(mode & BLK_OPEN_WRITE) && !capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ return loop_set_block_size(lo, mode, bdev, arg);
case LOOP_SET_CAPACITY:
case LOOP_SET_DIRECT_IO:
- case LOOP_SET_BLOCK_SIZE:
if (!(mode & BLK_OPEN_WRITE) && !capable(CAP_SYS_ADMIN))
return -EPERM;
fallthrough;
@@ -1875,6 +1907,10 @@ static void loop_handle_cmd(struct loop_cmd *cmd)
goto failed;
}
+ /* We can block in this context, so ignore REQ_NOWAIT. */
+ if (rq->cmd_flags & REQ_NOWAIT)
+ rq->cmd_flags &= ~REQ_NOWAIT;
+
if (cmd_blkcg_css)
kthread_associate_blkcg(cmd_blkcg_css);
if (cmd_memcg_css)
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 66ce6b81c7d9..567192e371a8 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -2040,11 +2040,12 @@ static int mtip_hw_ioctl(struct driver_data *dd, unsigned int cmd,
* @dir Direction (read or write)
*
* return value
- * None
+ * 0 The IO completed successfully.
+ * -ENOMEM The DMA mapping failed.
*/
-static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq,
- struct mtip_cmd *command,
- struct blk_mq_hw_ctx *hctx)
+static int mtip_hw_submit_io(struct driver_data *dd, struct request *rq,
+ struct mtip_cmd *command,
+ struct blk_mq_hw_ctx *hctx)
{
struct mtip_cmd_hdr *hdr =
dd->port->command_list + sizeof(struct mtip_cmd_hdr) * rq->tag;
@@ -2056,12 +2057,14 @@ static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq,
unsigned int nents;
/* Map the scatter list for DMA access */
- nents = blk_rq_map_sg(rq, command->sg);
- nents = dma_map_sg(&dd->pdev->dev, command->sg, nents, dma_dir);
+ command->scatter_ents = blk_rq_map_sg(rq, command->sg);
+ nents = dma_map_sg(&dd->pdev->dev, command->sg,
+ command->scatter_ents, dma_dir);
+ if (!nents)
+ return -ENOMEM;
- prefetch(&port->flags);
- command->scatter_ents = nents;
+ prefetch(&port->flags);
/*
* The number of retries for this command before it is
@@ -2112,11 +2115,13 @@ static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq,
if (unlikely(port->flags & MTIP_PF_PAUSE_IO)) {
set_bit(rq->tag, port->cmds_to_issue);
set_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags);
- return;
+ return 0;
}
/* Issue the command to the hardware */
mtip_issue_ncq_command(port, rq->tag);
+
+ return 0;
}
/*
@@ -3143,17 +3148,17 @@ static int mtip_block_compat_ioctl(struct block_device *dev,
* that each partition is also 4KB aligned. Non-aligned partitions adversely
* affects performance.
*
- * @dev Pointer to the block_device strucutre.
+ * @disk Pointer to the gendisk strucutre.
* @geo Pointer to a hd_geometry structure.
*
* return value
* 0 Operation completed successfully.
* -ENOTTY An error occurred while reading the drive capacity.
*/
-static int mtip_block_getgeo(struct block_device *dev,
+static int mtip_block_getgeo(struct gendisk *disk,
struct hd_geometry *geo)
{
- struct driver_data *dd = dev->bd_disk->private_data;
+ struct driver_data *dd = disk->private_data;
sector_t capacity;
if (!dd)
@@ -3315,7 +3320,9 @@ static blk_status_t mtip_queue_rq(struct blk_mq_hw_ctx *hctx,
blk_mq_start_request(rq);
- mtip_hw_submit_io(dd, rq, cmd, hctx);
+ if (mtip_hw_submit_io(dd, rq, cmd, hctx))
+ return BLK_STS_IOERR;
+
return BLK_STS_OK;
}
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 7bdc7eb808ea..f6c33b21f69e 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -311,7 +311,7 @@ static void nbd_mark_nsock_dead(struct nbd_device *nbd, struct nbd_sock *nsock,
if (args) {
INIT_WORK(&args->work, nbd_dead_link_work);
args->index = nbd->index;
- queue_work(system_wq, &args->work);
+ queue_work(system_percpu_wq, &args->work);
}
}
if (!nsock->dead) {
@@ -565,24 +565,27 @@ static int __sock_xmit(struct nbd_device *nbd, struct socket *sock, int send,
msg.msg_iter = *iter;
noreclaim_flag = memalloc_noreclaim_save();
- do {
- sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC;
- sock->sk->sk_use_task_frag = false;
- msg.msg_flags = msg_flags | MSG_NOSIGNAL;
-
- if (send)
- result = sock_sendmsg(sock, &msg);
- else
- result = sock_recvmsg(sock, &msg, msg.msg_flags);
-
- if (result <= 0) {
- if (result == 0)
- result = -EPIPE; /* short read */
- break;
- }
- if (sent)
- *sent += result;
- } while (msg_data_left(&msg));
+
+ scoped_with_kernel_creds() {
+ do {
+ sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC;
+ sock->sk->sk_use_task_frag = false;
+ msg.msg_flags = msg_flags | MSG_NOSIGNAL;
+
+ if (send)
+ result = sock_sendmsg(sock, &msg);
+ else
+ result = sock_recvmsg(sock, &msg, msg.msg_flags);
+
+ if (result <= 0) {
+ if (result == 0)
+ result = -EPIPE; /* short read */
+ break;
+ }
+ if (sent)
+ *sent += result;
+ } while (msg_data_left(&msg));
+ }
memalloc_noreclaim_restore(noreclaim_flag);
@@ -1018,9 +1021,9 @@ static void recv_work(struct work_struct *work)
nbd_mark_nsock_dead(nbd, nsock, 1);
mutex_unlock(&nsock->tx_lock);
- nbd_config_put(nbd);
atomic_dec(&config->recv_threads);
wake_up(&config->recv_wq);
+ nbd_config_put(nbd);
kfree(args);
}
@@ -1217,6 +1220,14 @@ static struct socket *nbd_get_socket(struct nbd_device *nbd, unsigned long fd,
if (!sock)
return NULL;
+ if (!sk_is_tcp(sock->sk) &&
+ !sk_is_stream_unix(sock->sk)) {
+ dev_err(disk_to_dev(nbd->disk), "Unsupported socket: should be TCP or UNIX.\n");
+ *err = -EINVAL;
+ sockfd_put(sock);
+ return NULL;
+ }
+
if (sock->ops->shutdown == sock_no_shutdown) {
dev_err(disk_to_dev(nbd->disk), "Unsupported socket: shutdown callout must be supported.\n");
*err = -EINVAL;
@@ -1473,7 +1484,17 @@ static int nbd_start_device(struct nbd_device *nbd)
return -EINVAL;
}
- blk_mq_update_nr_hw_queues(&nbd->tag_set, config->num_connections);
+retry:
+ mutex_unlock(&nbd->config_lock);
+ blk_mq_update_nr_hw_queues(&nbd->tag_set, num_connections);
+ mutex_lock(&nbd->config_lock);
+
+ /* if another code path updated nr_hw_queues, retry until succeed */
+ if (num_connections != config->num_connections) {
+ num_connections = config->num_connections;
+ goto retry;
+ }
+
nbd->pid = task_pid_nr(current);
nbd_parse_flags(nbd);
@@ -2198,9 +2219,7 @@ again:
goto out;
}
}
- ret = nbd_start_device(nbd);
- if (ret)
- goto out;
+
if (info->attrs[NBD_ATTR_BACKEND_IDENTIFIER]) {
nbd->backend = nla_strdup(info->attrs[NBD_ATTR_BACKEND_IDENTIFIER],
GFP_KERNEL);
@@ -2216,13 +2235,16 @@ again:
goto out;
}
set_bit(NBD_RT_HAS_BACKEND_FILE, &config->runtime_flags);
+
+ ret = nbd_start_device(nbd);
out:
- mutex_unlock(&nbd->config_lock);
if (!ret) {
set_bit(NBD_RT_HAS_CONFIG_REF, &config->runtime_flags);
refcount_inc(&nbd->config_refs);
nbd_connect_reply(info, nbd->index);
}
+ mutex_unlock(&nbd->config_lock);
+
nbd_config_put(nbd);
if (put_dev)
nbd_put(nbd);
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index aa163ae9b2aa..c7c0fb79a6bf 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -223,7 +223,7 @@ MODULE_PARM_DESC(discard, "Support discard operations (requires memory-backed nu
static unsigned long g_cache_size;
module_param_named(cache_size, g_cache_size, ulong, 0444);
-MODULE_PARM_DESC(mbps, "Cache size in MiB for memory-backed device. Default: 0 (none)");
+MODULE_PARM_DESC(cache_size, "Cache size in MiB for memory-backed device. Default: 0 (none)");
static bool g_fua = true;
module_param_named(fua, g_fua, bool, 0444);
@@ -1129,26 +1129,28 @@ again:
return 0;
}
-static int copy_to_nullb(struct nullb *nullb, struct page *source,
- unsigned int off, sector_t sector, size_t n, bool is_fua)
+static blk_status_t copy_to_nullb(struct nullb *nullb, void *source,
+ loff_t pos, size_t n, bool is_fua)
{
size_t temp, count = 0;
- unsigned int offset;
struct nullb_page *t_page;
+ sector_t sector;
while (count < n) {
- temp = min_t(size_t, nullb->dev->blocksize, n - count);
+ temp = min3(nullb->dev->blocksize, n - count,
+ PAGE_SIZE - offset_in_page(pos));
+ sector = pos >> SECTOR_SHIFT;
if (null_cache_active(nullb) && !is_fua)
null_make_cache_space(nullb, PAGE_SIZE);
- offset = (sector & SECTOR_MASK) << SECTOR_SHIFT;
t_page = null_insert_page(nullb, sector,
!null_cache_active(nullb) || is_fua);
if (!t_page)
- return -ENOSPC;
+ return BLK_STS_NOSPC;
- memcpy_page(t_page->page, offset, source, off + count, temp);
+ memcpy_to_page(t_page->page, offset_in_page(pos),
+ source + count, temp);
__set_bit(sector & SECTOR_MASK, t_page->bitmap);
@@ -1156,41 +1158,34 @@ static int copy_to_nullb(struct nullb *nullb, struct page *source,
null_free_sector(nullb, sector, true);
count += temp;
- sector += temp >> SECTOR_SHIFT;
+ pos += temp;
}
- return 0;
+ return BLK_STS_OK;
}
-static int copy_from_nullb(struct nullb *nullb, struct page *dest,
- unsigned int off, sector_t sector, size_t n)
+static void copy_from_nullb(struct nullb *nullb, void *dest, loff_t pos,
+ size_t n)
{
size_t temp, count = 0;
- unsigned int offset;
struct nullb_page *t_page;
+ sector_t sector;
while (count < n) {
- temp = min_t(size_t, nullb->dev->blocksize, n - count);
+ temp = min3(nullb->dev->blocksize, n - count,
+ PAGE_SIZE - offset_in_page(pos));
+ sector = pos >> SECTOR_SHIFT;
- offset = (sector & SECTOR_MASK) << SECTOR_SHIFT;
t_page = null_lookup_page(nullb, sector, false,
!null_cache_active(nullb));
-
if (t_page)
- memcpy_page(dest, off + count, t_page->page, offset,
- temp);
+ memcpy_from_page(dest + count, t_page->page,
+ offset_in_page(pos), temp);
else
- zero_user(dest, off + count, temp);
+ memset(dest + count, 0, temp);
count += temp;
- sector += temp >> SECTOR_SHIFT;
+ pos += temp;
}
- return 0;
-}
-
-static void nullb_fill_pattern(struct nullb *nullb, struct page *page,
- unsigned int len, unsigned int off)
-{
- memset_page(page, off, 0xff, len);
}
blk_status_t null_handle_discard(struct nullb_device *dev,
@@ -1234,34 +1229,39 @@ static blk_status_t null_handle_flush(struct nullb *nullb)
return errno_to_blk_status(err);
}
-static int null_transfer(struct nullb *nullb, struct page *page,
- unsigned int len, unsigned int off, bool is_write, sector_t sector,
+static blk_status_t null_transfer(struct nullb *nullb, struct page *page,
+ unsigned int len, unsigned int off, bool is_write, loff_t pos,
bool is_fua)
{
struct nullb_device *dev = nullb->dev;
+ blk_status_t err = BLK_STS_OK;
unsigned int valid_len = len;
- int err = 0;
+ void *p;
+ p = kmap_local_page(page) + off;
if (!is_write) {
- if (dev->zoned)
+ if (dev->zoned) {
valid_len = null_zone_valid_read_len(nullb,
- sector, len);
+ pos >> SECTOR_SHIFT, len);
+ if (valid_len && valid_len != len)
+ valid_len -= pos & (SECTOR_SIZE - 1);
+ }
if (valid_len) {
- err = copy_from_nullb(nullb, page, off,
- sector, valid_len);
+ copy_from_nullb(nullb, p, pos, valid_len);
off += valid_len;
len -= valid_len;
}
if (len)
- nullb_fill_pattern(nullb, page, len, off);
+ memset(p + valid_len, 0xff, len);
flush_dcache_page(page);
} else {
flush_dcache_page(page);
- err = copy_to_nullb(nullb, page, off, sector, len, is_fua);
+ err = copy_to_nullb(nullb, p, pos, len, is_fua);
}
+ kunmap_local(p);
return err;
}
@@ -1274,9 +1274,9 @@ static blk_status_t null_handle_data_transfer(struct nullb_cmd *cmd,
{
struct request *rq = blk_mq_rq_from_pdu(cmd);
struct nullb *nullb = cmd->nq->dev->nullb;
- int err = 0;
+ blk_status_t err = BLK_STS_OK;
unsigned int len;
- sector_t sector = blk_rq_pos(rq);
+ loff_t pos = blk_rq_pos(rq) << SECTOR_SHIFT;
unsigned int max_bytes = nr_sectors << SECTOR_SHIFT;
unsigned int transferred_bytes = 0;
struct req_iterator iter;
@@ -1288,18 +1288,18 @@ static blk_status_t null_handle_data_transfer(struct nullb_cmd *cmd,
if (transferred_bytes + len > max_bytes)
len = max_bytes - transferred_bytes;
err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset,
- op_is_write(req_op(rq)), sector,
+ op_is_write(req_op(rq)), pos,
rq->cmd_flags & REQ_FUA);
if (err)
break;
- sector += len >> SECTOR_SHIFT;
+ pos += len;
transferred_bytes += len;
if (transferred_bytes >= max_bytes)
break;
}
spin_unlock_irq(&nullb->lock);
- return errno_to_blk_status(err);
+ return err;
}
static inline blk_status_t null_handle_throttled(struct nullb_cmd *cmd)
@@ -1949,6 +1949,7 @@ static int null_add_dev(struct nullb_device *dev)
.logical_block_size = dev->blocksize,
.physical_block_size = dev->blocksize,
.max_hw_sectors = dev->max_sectors,
+ .dma_alignment = 1,
};
struct nullb *nullb;
diff --git a/drivers/block/null_blk/null_blk.h b/drivers/block/null_blk/null_blk.h
index 7bb6128dbaaf..6c4c4bbe7dad 100644
--- a/drivers/block/null_blk/null_blk.h
+++ b/drivers/block/null_blk/null_blk.h
@@ -143,7 +143,8 @@ int null_init_zoned_dev(struct nullb_device *dev, struct queue_limits *lim);
int null_register_zoned_dev(struct nullb *nullb);
void null_free_zoned_dev(struct nullb_device *dev);
int null_report_zones(struct gendisk *disk, sector_t sector,
- unsigned int nr_zones, report_zones_cb cb, void *data);
+ unsigned int nr_zones,
+ struct blk_report_zones_args *args);
blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_op op,
sector_t sector, sector_t nr_sectors);
size_t null_zone_valid_read_len(struct nullb *nullb,
diff --git a/drivers/block/null_blk/zoned.c b/drivers/block/null_blk/zoned.c
index 4e5728f45989..0ada35dc0989 100644
--- a/drivers/block/null_blk/zoned.c
+++ b/drivers/block/null_blk/zoned.c
@@ -191,7 +191,7 @@ void null_free_zoned_dev(struct nullb_device *dev)
}
int null_report_zones(struct gendisk *disk, sector_t sector,
- unsigned int nr_zones, report_zones_cb cb, void *data)
+ unsigned int nr_zones, struct blk_report_zones_args *args)
{
struct nullb *nullb = disk->private_data;
struct nullb_device *dev = nullb->dev;
@@ -225,7 +225,7 @@ int null_report_zones(struct gendisk *disk, sector_t sector,
blkz.capacity = zone->capacity;
null_unlock_zone(dev, zone);
- error = cb(&blkz, i, data);
+ error = disk_report_zone(disk, &blkz, i, args);
if (error)
return error;
}
@@ -242,7 +242,7 @@ size_t null_zone_valid_read_len(struct nullb *nullb,
{
struct nullb_device *dev = nullb->dev;
struct nullb_zone *zone = &dev->zones[null_zone_no(dev, sector)];
- unsigned int nr_sectors = len >> SECTOR_SHIFT;
+ unsigned int nr_sectors = DIV_ROUND_UP(len, SECTOR_SIZE);
/* Read must be below the write pointer position */
if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL ||
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
deleted file mode 100644
index d5cc7bd2875c..000000000000
--- a/drivers/block/pktcdvd.c
+++ /dev/null
@@ -1,2916 +0,0 @@
-/*
- * Copyright (C) 2000 Jens Axboe <axboe@suse.de>
- * Copyright (C) 2001-2004 Peter Osterlund <petero2@telia.com>
- * Copyright (C) 2006 Thomas Maier <balagi@justmail.de>
- *
- * May be copied or modified under the terms of the GNU General Public
- * License. See linux/COPYING for more information.
- *
- * Packet writing layer for ATAPI and SCSI CD-RW, DVD+RW, DVD-RW and
- * DVD-RAM devices.
- *
- * Theory of operation:
- *
- * At the lowest level, there is the standard driver for the CD/DVD device,
- * such as drivers/scsi/sr.c. This driver can handle read and write requests,
- * but it doesn't know anything about the special restrictions that apply to
- * packet writing. One restriction is that write requests must be aligned to
- * packet boundaries on the physical media, and the size of a write request
- * must be equal to the packet size. Another restriction is that a
- * GPCMD_FLUSH_CACHE command has to be issued to the drive before a read
- * command, if the previous command was a write.
- *
- * The purpose of the packet writing driver is to hide these restrictions from
- * higher layers, such as file systems, and present a block device that can be
- * randomly read and written using 2kB-sized blocks.
- *
- * The lowest layer in the packet writing driver is the packet I/O scheduler.
- * Its data is defined by the struct packet_iosched and includes two bio
- * queues with pending read and write requests. These queues are processed
- * by the pkt_iosched_process_queue() function. The write requests in this
- * queue are already properly aligned and sized. This layer is responsible for
- * issuing the flush cache commands and scheduling the I/O in a good order.
- *
- * The next layer transforms unaligned write requests to aligned writes. This
- * transformation requires reading missing pieces of data from the underlying
- * block device, assembling the pieces to full packets and queuing them to the
- * packet I/O scheduler.
- *
- * At the top layer there is a custom ->submit_bio function that forwards
- * read requests directly to the iosched queue and puts write requests in the
- * unaligned write queue. A kernel thread performs the necessary read
- * gathering to convert the unaligned writes to aligned writes and then feeds
- * them to the packet I/O scheduler.
- *
- *************************************************************************/
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/backing-dev.h>
-#include <linux/compat.h>
-#include <linux/debugfs.h>
-#include <linux/device.h>
-#include <linux/errno.h>
-#include <linux/file.h>
-#include <linux/freezer.h>
-#include <linux/kernel.h>
-#include <linux/kthread.h>
-#include <linux/miscdevice.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/nospec.h>
-#include <linux/pktcdvd.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/types.h>
-#include <linux/uaccess.h>
-
-#include <scsi/scsi.h>
-#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_ioctl.h>
-
-#include <linux/unaligned.h>
-
-#define DRIVER_NAME "pktcdvd"
-
-#define MAX_SPEED 0xffff
-
-static DEFINE_MUTEX(pktcdvd_mutex);
-static struct pktcdvd_device *pkt_devs[MAX_WRITERS];
-static struct proc_dir_entry *pkt_proc;
-static int pktdev_major;
-static int write_congestion_on = PKT_WRITE_CONGESTION_ON;
-static int write_congestion_off = PKT_WRITE_CONGESTION_OFF;
-static struct mutex ctl_mutex; /* Serialize open/close/setup/teardown */
-static mempool_t psd_pool;
-static struct bio_set pkt_bio_set;
-
-/* /sys/class/pktcdvd */
-static struct class class_pktcdvd;
-static struct dentry *pkt_debugfs_root = NULL; /* /sys/kernel/debug/pktcdvd */
-
-/* forward declaration */
-static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev);
-static int pkt_remove_dev(dev_t pkt_dev);
-
-static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd)
-{
- return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1);
-}
-
-/**********************************************************
- * sysfs interface for pktcdvd
- * by (C) 2006 Thomas Maier <balagi@justmail.de>
-
- /sys/class/pktcdvd/pktcdvd[0-7]/
- stat/reset
- stat/packets_started
- stat/packets_finished
- stat/kb_written
- stat/kb_read
- stat/kb_read_gather
- write_queue/size
- write_queue/congestion_off
- write_queue/congestion_on
- **********************************************************/
-
-static ssize_t packets_started_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct pktcdvd_device *pd = dev_get_drvdata(dev);
-
- return sysfs_emit(buf, "%lu\n", pd->stats.pkt_started);
-}
-static DEVICE_ATTR_RO(packets_started);
-
-static ssize_t packets_finished_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct pktcdvd_device *pd = dev_get_drvdata(dev);
-
- return sysfs_emit(buf, "%lu\n", pd->stats.pkt_ended);
-}
-static DEVICE_ATTR_RO(packets_finished);
-
-static ssize_t kb_written_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct pktcdvd_device *pd = dev_get_drvdata(dev);
-
- return sysfs_emit(buf, "%lu\n", pd->stats.secs_w >> 1);
-}
-static DEVICE_ATTR_RO(kb_written);
-
-static ssize_t kb_read_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct pktcdvd_device *pd = dev_get_drvdata(dev);
-
- return sysfs_emit(buf, "%lu\n", pd->stats.secs_r >> 1);
-}
-static DEVICE_ATTR_RO(kb_read);
-
-static ssize_t kb_read_gather_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct pktcdvd_device *pd = dev_get_drvdata(dev);
-
- return sysfs_emit(buf, "%lu\n", pd->stats.secs_rg >> 1);
-}
-static DEVICE_ATTR_RO(kb_read_gather);
-
-static ssize_t reset_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t len)
-{
- struct pktcdvd_device *pd = dev_get_drvdata(dev);
-
- if (len > 0) {
- pd->stats.pkt_started = 0;
- pd->stats.pkt_ended = 0;
- pd->stats.secs_w = 0;
- pd->stats.secs_rg = 0;
- pd->stats.secs_r = 0;
- }
- return len;
-}
-static DEVICE_ATTR_WO(reset);
-
-static struct attribute *pkt_stat_attrs[] = {
- &dev_attr_packets_finished.attr,
- &dev_attr_packets_started.attr,
- &dev_attr_kb_read.attr,
- &dev_attr_kb_written.attr,
- &dev_attr_kb_read_gather.attr,
- &dev_attr_reset.attr,
- NULL,
-};
-
-static const struct attribute_group pkt_stat_group = {
- .name = "stat",
- .attrs = pkt_stat_attrs,
-};
-
-static ssize_t size_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct pktcdvd_device *pd = dev_get_drvdata(dev);
- int n;
-
- spin_lock(&pd->lock);
- n = sysfs_emit(buf, "%d\n", pd->bio_queue_size);
- spin_unlock(&pd->lock);
- return n;
-}
-static DEVICE_ATTR_RO(size);
-
-static void init_write_congestion_marks(int* lo, int* hi)
-{
- if (*hi > 0) {
- *hi = max(*hi, 500);
- *hi = min(*hi, 1000000);
- if (*lo <= 0)
- *lo = *hi - 100;
- else {
- *lo = min(*lo, *hi - 100);
- *lo = max(*lo, 100);
- }
- } else {
- *hi = -1;
- *lo = -1;
- }
-}
-
-static ssize_t congestion_off_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct pktcdvd_device *pd = dev_get_drvdata(dev);
- int n;
-
- spin_lock(&pd->lock);
- n = sysfs_emit(buf, "%d\n", pd->write_congestion_off);
- spin_unlock(&pd->lock);
- return n;
-}
-
-static ssize_t congestion_off_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- struct pktcdvd_device *pd = dev_get_drvdata(dev);
- int val, ret;
-
- ret = kstrtoint(buf, 10, &val);
- if (ret)
- return ret;
-
- spin_lock(&pd->lock);
- pd->write_congestion_off = val;
- init_write_congestion_marks(&pd->write_congestion_off, &pd->write_congestion_on);
- spin_unlock(&pd->lock);
- return len;
-}
-static DEVICE_ATTR_RW(congestion_off);
-
-static ssize_t congestion_on_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct pktcdvd_device *pd = dev_get_drvdata(dev);
- int n;
-
- spin_lock(&pd->lock);
- n = sysfs_emit(buf, "%d\n", pd->write_congestion_on);
- spin_unlock(&pd->lock);
- return n;
-}
-
-static ssize_t congestion_on_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- struct pktcdvd_device *pd = dev_get_drvdata(dev);
- int val, ret;
-
- ret = kstrtoint(buf, 10, &val);
- if (ret)
- return ret;
-
- spin_lock(&pd->lock);
- pd->write_congestion_on = val;
- init_write_congestion_marks(&pd->write_congestion_off, &pd->write_congestion_on);
- spin_unlock(&pd->lock);
- return len;
-}
-static DEVICE_ATTR_RW(congestion_on);
-
-static struct attribute *pkt_wq_attrs[] = {
- &dev_attr_congestion_on.attr,
- &dev_attr_congestion_off.attr,
- &dev_attr_size.attr,
- NULL,
-};
-
-static const struct attribute_group pkt_wq_group = {
- .name = "write_queue",
- .attrs = pkt_wq_attrs,
-};
-
-static const struct attribute_group *pkt_groups[] = {
- &pkt_stat_group,
- &pkt_wq_group,
- NULL,
-};
-
-static void pkt_sysfs_dev_new(struct pktcdvd_device *pd)
-{
- if (class_is_registered(&class_pktcdvd)) {
- pd->dev = device_create_with_groups(&class_pktcdvd, NULL,
- MKDEV(0, 0), pd, pkt_groups,
- "%s", pd->disk->disk_name);
- if (IS_ERR(pd->dev))
- pd->dev = NULL;
- }
-}
-
-static void pkt_sysfs_dev_remove(struct pktcdvd_device *pd)
-{
- if (class_is_registered(&class_pktcdvd))
- device_unregister(pd->dev);
-}
-
-
-/********************************************************************
- /sys/class/pktcdvd/
- add map block device
- remove unmap packet dev
- device_map show mappings
- *******************************************************************/
-
-static ssize_t device_map_show(const struct class *c, const struct class_attribute *attr,
- char *data)
-{
- int n = 0;
- int idx;
- mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
- for (idx = 0; idx < MAX_WRITERS; idx++) {
- struct pktcdvd_device *pd = pkt_devs[idx];
- if (!pd)
- continue;
- n += sysfs_emit_at(data, n, "%s %u:%u %u:%u\n",
- pd->disk->disk_name,
- MAJOR(pd->pkt_dev), MINOR(pd->pkt_dev),
- MAJOR(file_bdev(pd->bdev_file)->bd_dev),
- MINOR(file_bdev(pd->bdev_file)->bd_dev));
- }
- mutex_unlock(&ctl_mutex);
- return n;
-}
-static CLASS_ATTR_RO(device_map);
-
-static ssize_t add_store(const struct class *c, const struct class_attribute *attr,
- const char *buf, size_t count)
-{
- unsigned int major, minor;
-
- if (sscanf(buf, "%u:%u", &major, &minor) == 2) {
- /* pkt_setup_dev() expects caller to hold reference to self */
- if (!try_module_get(THIS_MODULE))
- return -ENODEV;
-
- pkt_setup_dev(MKDEV(major, minor), NULL);
-
- module_put(THIS_MODULE);
-
- return count;
- }
-
- return -EINVAL;
-}
-static CLASS_ATTR_WO(add);
-
-static ssize_t remove_store(const struct class *c, const struct class_attribute *attr,
- const char *buf, size_t count)
-{
- unsigned int major, minor;
- if (sscanf(buf, "%u:%u", &major, &minor) == 2) {
- pkt_remove_dev(MKDEV(major, minor));
- return count;
- }
- return -EINVAL;
-}
-static CLASS_ATTR_WO(remove);
-
-static struct attribute *class_pktcdvd_attrs[] = {
- &class_attr_add.attr,
- &class_attr_remove.attr,
- &class_attr_device_map.attr,
- NULL,
-};
-ATTRIBUTE_GROUPS(class_pktcdvd);
-
-static struct class class_pktcdvd = {
- .name = DRIVER_NAME,
- .class_groups = class_pktcdvd_groups,
-};
-
-static int pkt_sysfs_init(void)
-{
- /*
- * create control files in sysfs
- * /sys/class/pktcdvd/...
- */
- return class_register(&class_pktcdvd);
-}
-
-static void pkt_sysfs_cleanup(void)
-{
- class_unregister(&class_pktcdvd);
-}
-
-/********************************************************************
- entries in debugfs
-
- /sys/kernel/debug/pktcdvd[0-7]/
- info
-
- *******************************************************************/
-
-static void pkt_count_states(struct pktcdvd_device *pd, int *states)
-{
- struct packet_data *pkt;
- int i;
-
- for (i = 0; i < PACKET_NUM_STATES; i++)
- states[i] = 0;
-
- spin_lock(&pd->cdrw.active_list_lock);
- list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
- states[pkt->state]++;
- }
- spin_unlock(&pd->cdrw.active_list_lock);
-}
-
-static int pkt_seq_show(struct seq_file *m, void *p)
-{
- struct pktcdvd_device *pd = m->private;
- char *msg;
- int states[PACKET_NUM_STATES];
-
- seq_printf(m, "Writer %s mapped to %pg:\n", pd->disk->disk_name,
- file_bdev(pd->bdev_file));
-
- seq_printf(m, "\nSettings:\n");
- seq_printf(m, "\tpacket size:\t\t%dkB\n", pd->settings.size / 2);
-
- if (pd->settings.write_type == 0)
- msg = "Packet";
- else
- msg = "Unknown";
- seq_printf(m, "\twrite type:\t\t%s\n", msg);
-
- seq_printf(m, "\tpacket type:\t\t%s\n", pd->settings.fp ? "Fixed" : "Variable");
- seq_printf(m, "\tlink loss:\t\t%d\n", pd->settings.link_loss);
-
- seq_printf(m, "\ttrack mode:\t\t%d\n", pd->settings.track_mode);
-
- if (pd->settings.block_mode == PACKET_BLOCK_MODE1)
- msg = "Mode 1";
- else if (pd->settings.block_mode == PACKET_BLOCK_MODE2)
- msg = "Mode 2";
- else
- msg = "Unknown";
- seq_printf(m, "\tblock mode:\t\t%s\n", msg);
-
- seq_printf(m, "\nStatistics:\n");
- seq_printf(m, "\tpackets started:\t%lu\n", pd->stats.pkt_started);
- seq_printf(m, "\tpackets ended:\t\t%lu\n", pd->stats.pkt_ended);
- seq_printf(m, "\twritten:\t\t%lukB\n", pd->stats.secs_w >> 1);
- seq_printf(m, "\tread gather:\t\t%lukB\n", pd->stats.secs_rg >> 1);
- seq_printf(m, "\tread:\t\t\t%lukB\n", pd->stats.secs_r >> 1);
-
- seq_printf(m, "\nMisc:\n");
- seq_printf(m, "\treference count:\t%d\n", pd->refcnt);
- seq_printf(m, "\tflags:\t\t\t0x%lx\n", pd->flags);
- seq_printf(m, "\tread speed:\t\t%ukB/s\n", pd->read_speed);
- seq_printf(m, "\twrite speed:\t\t%ukB/s\n", pd->write_speed);
- seq_printf(m, "\tstart offset:\t\t%lu\n", pd->offset);
- seq_printf(m, "\tmode page offset:\t%u\n", pd->mode_offset);
-
- seq_printf(m, "\nQueue state:\n");
- seq_printf(m, "\tbios queued:\t\t%d\n", pd->bio_queue_size);
- seq_printf(m, "\tbios pending:\t\t%d\n", atomic_read(&pd->cdrw.pending_bios));
- seq_printf(m, "\tcurrent sector:\t\t0x%llx\n", pd->current_sector);
-
- pkt_count_states(pd, states);
- seq_printf(m, "\tstate:\t\t\ti:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
- states[0], states[1], states[2], states[3], states[4], states[5]);
-
- seq_printf(m, "\twrite congestion marks:\toff=%d on=%d\n",
- pd->write_congestion_off,
- pd->write_congestion_on);
- return 0;
-}
-DEFINE_SHOW_ATTRIBUTE(pkt_seq);
-
-static void pkt_debugfs_dev_new(struct pktcdvd_device *pd)
-{
- if (!pkt_debugfs_root)
- return;
- pd->dfs_d_root = debugfs_create_dir(pd->disk->disk_name, pkt_debugfs_root);
-
- pd->dfs_f_info = debugfs_create_file("info", 0444, pd->dfs_d_root,
- pd, &pkt_seq_fops);
-}
-
-static void pkt_debugfs_dev_remove(struct pktcdvd_device *pd)
-{
- if (!pkt_debugfs_root)
- return;
- debugfs_remove(pd->dfs_f_info);
- debugfs_remove(pd->dfs_d_root);
- pd->dfs_f_info = NULL;
- pd->dfs_d_root = NULL;
-}
-
-static void pkt_debugfs_init(void)
-{
- pkt_debugfs_root = debugfs_create_dir(DRIVER_NAME, NULL);
-}
-
-static void pkt_debugfs_cleanup(void)
-{
- debugfs_remove(pkt_debugfs_root);
- pkt_debugfs_root = NULL;
-}
-
-/* ----------------------------------------------------------*/
-
-
-static void pkt_bio_finished(struct pktcdvd_device *pd)
-{
- struct device *ddev = disk_to_dev(pd->disk);
-
- BUG_ON(atomic_read(&pd->cdrw.pending_bios) <= 0);
- if (atomic_dec_and_test(&pd->cdrw.pending_bios)) {
- dev_dbg(ddev, "queue empty\n");
- atomic_set(&pd->iosched.attention, 1);
- wake_up(&pd->wqueue);
- }
-}
-
-/*
- * Allocate a packet_data struct
- */
-static struct packet_data *pkt_alloc_packet_data(int frames)
-{
- int i;
- struct packet_data *pkt;
-
- pkt = kzalloc(sizeof(struct packet_data), GFP_KERNEL);
- if (!pkt)
- goto no_pkt;
-
- pkt->frames = frames;
- pkt->w_bio = bio_kmalloc(frames, GFP_KERNEL);
- if (!pkt->w_bio)
- goto no_bio;
-
- for (i = 0; i < frames / FRAMES_PER_PAGE; i++) {
- pkt->pages[i] = alloc_page(GFP_KERNEL|__GFP_ZERO);
- if (!pkt->pages[i])
- goto no_page;
- }
-
- spin_lock_init(&pkt->lock);
- bio_list_init(&pkt->orig_bios);
-
- for (i = 0; i < frames; i++) {
- pkt->r_bios[i] = bio_kmalloc(1, GFP_KERNEL);
- if (!pkt->r_bios[i])
- goto no_rd_bio;
- }
-
- return pkt;
-
-no_rd_bio:
- for (i = 0; i < frames; i++)
- kfree(pkt->r_bios[i]);
-no_page:
- for (i = 0; i < frames / FRAMES_PER_PAGE; i++)
- if (pkt->pages[i])
- __free_page(pkt->pages[i]);
- kfree(pkt->w_bio);
-no_bio:
- kfree(pkt);
-no_pkt:
- return NULL;
-}
-
-/*
- * Free a packet_data struct
- */
-static void pkt_free_packet_data(struct packet_data *pkt)
-{
- int i;
-
- for (i = 0; i < pkt->frames; i++)
- kfree(pkt->r_bios[i]);
- for (i = 0; i < pkt->frames / FRAMES_PER_PAGE; i++)
- __free_page(pkt->pages[i]);
- kfree(pkt->w_bio);
- kfree(pkt);
-}
-
-static void pkt_shrink_pktlist(struct pktcdvd_device *pd)
-{
- struct packet_data *pkt, *next;
-
- BUG_ON(!list_empty(&pd->cdrw.pkt_active_list));
-
- list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_free_list, list) {
- pkt_free_packet_data(pkt);
- }
- INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
-}
-
-static int pkt_grow_pktlist(struct pktcdvd_device *pd, int nr_packets)
-{
- struct packet_data *pkt;
-
- BUG_ON(!list_empty(&pd->cdrw.pkt_free_list));
-
- while (nr_packets > 0) {
- pkt = pkt_alloc_packet_data(pd->settings.size >> 2);
- if (!pkt) {
- pkt_shrink_pktlist(pd);
- return 0;
- }
- pkt->id = nr_packets;
- pkt->pd = pd;
- list_add(&pkt->list, &pd->cdrw.pkt_free_list);
- nr_packets--;
- }
- return 1;
-}
-
-static inline struct pkt_rb_node *pkt_rbtree_next(struct pkt_rb_node *node)
-{
- struct rb_node *n = rb_next(&node->rb_node);
- if (!n)
- return NULL;
- return rb_entry(n, struct pkt_rb_node, rb_node);
-}
-
-static void pkt_rbtree_erase(struct pktcdvd_device *pd, struct pkt_rb_node *node)
-{
- rb_erase(&node->rb_node, &pd->bio_queue);
- mempool_free(node, &pd->rb_pool);
- pd->bio_queue_size--;
- BUG_ON(pd->bio_queue_size < 0);
-}
-
-/*
- * Find the first node in the pd->bio_queue rb tree with a starting sector >= s.
- */
-static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s)
-{
- struct rb_node *n = pd->bio_queue.rb_node;
- struct rb_node *next;
- struct pkt_rb_node *tmp;
-
- if (!n) {
- BUG_ON(pd->bio_queue_size > 0);
- return NULL;
- }
-
- for (;;) {
- tmp = rb_entry(n, struct pkt_rb_node, rb_node);
- if (s <= tmp->bio->bi_iter.bi_sector)
- next = n->rb_left;
- else
- next = n->rb_right;
- if (!next)
- break;
- n = next;
- }
-
- if (s > tmp->bio->bi_iter.bi_sector) {
- tmp = pkt_rbtree_next(tmp);
- if (!tmp)
- return NULL;
- }
- BUG_ON(s > tmp->bio->bi_iter.bi_sector);
- return tmp;
-}
-
-/*
- * Insert a node into the pd->bio_queue rb tree.
- */
-static void pkt_rbtree_insert(struct pktcdvd_device *pd, struct pkt_rb_node *node)
-{
- struct rb_node **p = &pd->bio_queue.rb_node;
- struct rb_node *parent = NULL;
- sector_t s = node->bio->bi_iter.bi_sector;
- struct pkt_rb_node *tmp;
-
- while (*p) {
- parent = *p;
- tmp = rb_entry(parent, struct pkt_rb_node, rb_node);
- if (s < tmp->bio->bi_iter.bi_sector)
- p = &(*p)->rb_left;
- else
- p = &(*p)->rb_right;
- }
- rb_link_node(&node->rb_node, parent, p);
- rb_insert_color(&node->rb_node, &pd->bio_queue);
- pd->bio_queue_size++;
-}
-
-/*
- * Send a packet_command to the underlying block device and
- * wait for completion.
- */
-static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *cgc)
-{
- struct request_queue *q = bdev_get_queue(file_bdev(pd->bdev_file));
- struct scsi_cmnd *scmd;
- struct request *rq;
- int ret = 0;
-
- rq = scsi_alloc_request(q, (cgc->data_direction == CGC_DATA_WRITE) ?
- REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
- scmd = blk_mq_rq_to_pdu(rq);
-
- if (cgc->buflen) {
- ret = blk_rq_map_kern(rq, cgc->buffer, cgc->buflen,
- GFP_NOIO);
- if (ret)
- goto out;
- }
-
- scmd->cmd_len = COMMAND_SIZE(cgc->cmd[0]);
- memcpy(scmd->cmnd, cgc->cmd, CDROM_PACKET_SIZE);
-
- rq->timeout = 60*HZ;
- if (cgc->quiet)
- rq->rq_flags |= RQF_QUIET;
-
- blk_execute_rq(rq, false);
- if (scmd->result)
- ret = -EIO;
-out:
- blk_mq_free_request(rq);
- return ret;
-}
-
-static const char *sense_key_string(__u8 index)
-{
- static const char * const info[] = {
- "No sense", "Recovered error", "Not ready",
- "Medium error", "Hardware error", "Illegal request",
- "Unit attention", "Data protect", "Blank check",
- };
-
- return index < ARRAY_SIZE(info) ? info[index] : "INVALID";
-}
-
-/*
- * A generic sense dump / resolve mechanism should be implemented across
- * all ATAPI + SCSI devices.
- */
-static void pkt_dump_sense(struct pktcdvd_device *pd,
- struct packet_command *cgc)
-{
- struct device *ddev = disk_to_dev(pd->disk);
- struct scsi_sense_hdr *sshdr = cgc->sshdr;
-
- if (sshdr)
- dev_err(ddev, "%*ph - sense %02x.%02x.%02x (%s)\n",
- CDROM_PACKET_SIZE, cgc->cmd,
- sshdr->sense_key, sshdr->asc, sshdr->ascq,
- sense_key_string(sshdr->sense_key));
- else
- dev_err(ddev, "%*ph - no sense\n", CDROM_PACKET_SIZE, cgc->cmd);
-}
-
-/*
- * flush the drive cache to media
- */
-static int pkt_flush_cache(struct pktcdvd_device *pd)
-{
- struct packet_command cgc;
-
- init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
- cgc.cmd[0] = GPCMD_FLUSH_CACHE;
- cgc.quiet = 1;
-
- /*
- * the IMMED bit -- we default to not setting it, although that
- * would allow a much faster close, this is safer
- */
-#if 0
- cgc.cmd[1] = 1 << 1;
-#endif
- return pkt_generic_packet(pd, &cgc);
-}
-
-/*
- * speed is given as the normal factor, e.g. 4 for 4x
- */
-static noinline_for_stack int pkt_set_speed(struct pktcdvd_device *pd,
- unsigned write_speed, unsigned read_speed)
-{
- struct packet_command cgc;
- struct scsi_sense_hdr sshdr;
- int ret;
-
- init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
- cgc.sshdr = &sshdr;
- cgc.cmd[0] = GPCMD_SET_SPEED;
- put_unaligned_be16(read_speed, &cgc.cmd[2]);
- put_unaligned_be16(write_speed, &cgc.cmd[4]);
-
- ret = pkt_generic_packet(pd, &cgc);
- if (ret)
- pkt_dump_sense(pd, &cgc);
-
- return ret;
-}
-
-/*
- * Queue a bio for processing by the low-level CD device. Must be called
- * from process context.
- */
-static void pkt_queue_bio(struct pktcdvd_device *pd, struct bio *bio)
-{
- /*
- * Some CDRW drives can not handle writes larger than one packet,
- * even if the size is a multiple of the packet size.
- */
- bio->bi_opf |= REQ_NOMERGE;
-
- spin_lock(&pd->iosched.lock);
- if (bio_data_dir(bio) == READ)
- bio_list_add(&pd->iosched.read_queue, bio);
- else
- bio_list_add(&pd->iosched.write_queue, bio);
- spin_unlock(&pd->iosched.lock);
-
- atomic_set(&pd->iosched.attention, 1);
- wake_up(&pd->wqueue);
-}
-
-/*
- * Process the queued read/write requests. This function handles special
- * requirements for CDRW drives:
- * - A cache flush command must be inserted before a read request if the
- * previous request was a write.
- * - Switching between reading and writing is slow, so don't do it more often
- * than necessary.
- * - Optimize for throughput at the expense of latency. This means that streaming
- * writes will never be interrupted by a read, but if the drive has to seek
- * before the next write, switch to reading instead if there are any pending
- * read requests.
- * - Set the read speed according to current usage pattern. When only reading
- * from the device, it's best to use the highest possible read speed, but
- * when switching often between reading and writing, it's better to have the
- * same read and write speeds.
- */
-static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
-{
- struct device *ddev = disk_to_dev(pd->disk);
-
- if (atomic_read(&pd->iosched.attention) == 0)
- return;
- atomic_set(&pd->iosched.attention, 0);
-
- for (;;) {
- struct bio *bio;
- int reads_queued, writes_queued;
-
- spin_lock(&pd->iosched.lock);
- reads_queued = !bio_list_empty(&pd->iosched.read_queue);
- writes_queued = !bio_list_empty(&pd->iosched.write_queue);
- spin_unlock(&pd->iosched.lock);
-
- if (!reads_queued && !writes_queued)
- break;
-
- if (pd->iosched.writing) {
- int need_write_seek = 1;
- spin_lock(&pd->iosched.lock);
- bio = bio_list_peek(&pd->iosched.write_queue);
- spin_unlock(&pd->iosched.lock);
- if (bio && (bio->bi_iter.bi_sector ==
- pd->iosched.last_write))
- need_write_seek = 0;
- if (need_write_seek && reads_queued) {
- if (atomic_read(&pd->cdrw.pending_bios) > 0) {
- dev_dbg(ddev, "write, waiting\n");
- break;
- }
- pkt_flush_cache(pd);
- pd->iosched.writing = 0;
- }
- } else {
- if (!reads_queued && writes_queued) {
- if (atomic_read(&pd->cdrw.pending_bios) > 0) {
- dev_dbg(ddev, "read, waiting\n");
- break;
- }
- pd->iosched.writing = 1;
- }
- }
-
- spin_lock(&pd->iosched.lock);
- if (pd->iosched.writing)
- bio = bio_list_pop(&pd->iosched.write_queue);
- else
- bio = bio_list_pop(&pd->iosched.read_queue);
- spin_unlock(&pd->iosched.lock);
-
- if (!bio)
- continue;
-
- if (bio_data_dir(bio) == READ)
- pd->iosched.successive_reads +=
- bio->bi_iter.bi_size >> 10;
- else {
- pd->iosched.successive_reads = 0;
- pd->iosched.last_write = bio_end_sector(bio);
- }
- if (pd->iosched.successive_reads >= HI_SPEED_SWITCH) {
- if (pd->read_speed == pd->write_speed) {
- pd->read_speed = MAX_SPEED;
- pkt_set_speed(pd, pd->write_speed, pd->read_speed);
- }
- } else {
- if (pd->read_speed != pd->write_speed) {
- pd->read_speed = pd->write_speed;
- pkt_set_speed(pd, pd->write_speed, pd->read_speed);
- }
- }
-
- atomic_inc(&pd->cdrw.pending_bios);
- submit_bio_noacct(bio);
- }
-}
-
-/*
- * Special care is needed if the underlying block device has a small
- * max_phys_segments value.
- */
-static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q)
-{
- struct device *ddev = disk_to_dev(pd->disk);
-
- if ((pd->settings.size << 9) / CD_FRAMESIZE <= queue_max_segments(q)) {
- /*
- * The cdrom device can handle one segment/frame
- */
- clear_bit(PACKET_MERGE_SEGS, &pd->flags);
- return 0;
- }
-
- if ((pd->settings.size << 9) / PAGE_SIZE <= queue_max_segments(q)) {
- /*
- * We can handle this case at the expense of some extra memory
- * copies during write operations
- */
- set_bit(PACKET_MERGE_SEGS, &pd->flags);
- return 0;
- }
-
- dev_err(ddev, "cdrom max_phys_segments too small\n");
- return -EIO;
-}
-
-static void pkt_end_io_read(struct bio *bio)
-{
- struct packet_data *pkt = bio->bi_private;
- struct pktcdvd_device *pd = pkt->pd;
- BUG_ON(!pd);
-
- dev_dbg(disk_to_dev(pd->disk), "bio=%p sec0=%llx sec=%llx err=%d\n",
- bio, pkt->sector, bio->bi_iter.bi_sector, bio->bi_status);
-
- if (bio->bi_status)
- atomic_inc(&pkt->io_errors);
- bio_uninit(bio);
- if (atomic_dec_and_test(&pkt->io_wait)) {
- atomic_inc(&pkt->run_sm);
- wake_up(&pd->wqueue);
- }
- pkt_bio_finished(pd);
-}
-
-static void pkt_end_io_packet_write(struct bio *bio)
-{
- struct packet_data *pkt = bio->bi_private;
- struct pktcdvd_device *pd = pkt->pd;
- BUG_ON(!pd);
-
- dev_dbg(disk_to_dev(pd->disk), "id=%d, err=%d\n", pkt->id, bio->bi_status);
-
- pd->stats.pkt_ended++;
-
- bio_uninit(bio);
- pkt_bio_finished(pd);
- atomic_dec(&pkt->io_wait);
- atomic_inc(&pkt->run_sm);
- wake_up(&pd->wqueue);
-}
-
-/*
- * Schedule reads for the holes in a packet
- */
-static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
-{
- struct device *ddev = disk_to_dev(pd->disk);
- int frames_read = 0;
- struct bio *bio;
- int f;
- char written[PACKET_MAX_SIZE];
-
- BUG_ON(bio_list_empty(&pkt->orig_bios));
-
- atomic_set(&pkt->io_wait, 0);
- atomic_set(&pkt->io_errors, 0);
-
- /*
- * Figure out which frames we need to read before we can write.
- */
- memset(written, 0, sizeof(written));
- spin_lock(&pkt->lock);
- bio_list_for_each(bio, &pkt->orig_bios) {
- int first_frame = (bio->bi_iter.bi_sector - pkt->sector) /
- (CD_FRAMESIZE >> 9);
- int num_frames = bio->bi_iter.bi_size / CD_FRAMESIZE;
- pd->stats.secs_w += num_frames * (CD_FRAMESIZE >> 9);
- BUG_ON(first_frame < 0);
- BUG_ON(first_frame + num_frames > pkt->frames);
- for (f = first_frame; f < first_frame + num_frames; f++)
- written[f] = 1;
- }
- spin_unlock(&pkt->lock);
-
- if (pkt->cache_valid) {
- dev_dbg(ddev, "zone %llx cached\n", pkt->sector);
- goto out_account;
- }
-
- /*
- * Schedule reads for missing parts of the packet.
- */
- for (f = 0; f < pkt->frames; f++) {
- int p, offset;
-
- if (written[f])
- continue;
-
- bio = pkt->r_bios[f];
- bio_init(bio, file_bdev(pd->bdev_file), bio->bi_inline_vecs, 1,
- REQ_OP_READ);
- bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
- bio->bi_end_io = pkt_end_io_read;
- bio->bi_private = pkt;
-
- p = (f * CD_FRAMESIZE) / PAGE_SIZE;
- offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
- dev_dbg(ddev, "Adding frame %d, page:%p offs:%d\n", f,
- pkt->pages[p], offset);
- if (!bio_add_page(bio, pkt->pages[p], CD_FRAMESIZE, offset))
- BUG();
-
- atomic_inc(&pkt->io_wait);
- pkt_queue_bio(pd, bio);
- frames_read++;
- }
-
-out_account:
- dev_dbg(ddev, "need %d frames for zone %llx\n", frames_read, pkt->sector);
- pd->stats.pkt_started++;
- pd->stats.secs_rg += frames_read * (CD_FRAMESIZE >> 9);
-}
-
-/*
- * Find a packet matching zone, or the least recently used packet if
- * there is no match.
- */
-static struct packet_data *pkt_get_packet_data(struct pktcdvd_device *pd, int zone)
-{
- struct packet_data *pkt;
-
- list_for_each_entry(pkt, &pd->cdrw.pkt_free_list, list) {
- if (pkt->sector == zone || pkt->list.next == &pd->cdrw.pkt_free_list) {
- list_del_init(&pkt->list);
- if (pkt->sector != zone)
- pkt->cache_valid = 0;
- return pkt;
- }
- }
- BUG();
- return NULL;
-}
-
-static void pkt_put_packet_data(struct pktcdvd_device *pd, struct packet_data *pkt)
-{
- if (pkt->cache_valid) {
- list_add(&pkt->list, &pd->cdrw.pkt_free_list);
- } else {
- list_add_tail(&pkt->list, &pd->cdrw.pkt_free_list);
- }
-}
-
-static inline void pkt_set_state(struct device *ddev, struct packet_data *pkt,
- enum packet_data_state state)
-{
- static const char *state_name[] = {
- "IDLE", "WAITING", "READ_WAIT", "WRITE_WAIT", "RECOVERY", "FINISHED"
- };
- enum packet_data_state old_state = pkt->state;
-
- dev_dbg(ddev, "pkt %2d : s=%6llx %s -> %s\n",
- pkt->id, pkt->sector, state_name[old_state], state_name[state]);
-
- pkt->state = state;
-}
-
-/*
- * Scan the work queue to see if we can start a new packet.
- * returns non-zero if any work was done.
- */
-static int pkt_handle_queue(struct pktcdvd_device *pd)
-{
- struct device *ddev = disk_to_dev(pd->disk);
- struct packet_data *pkt, *p;
- struct bio *bio = NULL;
- sector_t zone = 0; /* Suppress gcc warning */
- struct pkt_rb_node *node, *first_node;
- struct rb_node *n;
-
- atomic_set(&pd->scan_queue, 0);
-
- if (list_empty(&pd->cdrw.pkt_free_list)) {
- dev_dbg(ddev, "no pkt\n");
- return 0;
- }
-
- /*
- * Try to find a zone we are not already working on.
- */
- spin_lock(&pd->lock);
- first_node = pkt_rbtree_find(pd, pd->current_sector);
- if (!first_node) {
- n = rb_first(&pd->bio_queue);
- if (n)
- first_node = rb_entry(n, struct pkt_rb_node, rb_node);
- }
- node = first_node;
- while (node) {
- bio = node->bio;
- zone = get_zone(bio->bi_iter.bi_sector, pd);
- list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) {
- if (p->sector == zone) {
- bio = NULL;
- goto try_next_bio;
- }
- }
- break;
-try_next_bio:
- node = pkt_rbtree_next(node);
- if (!node) {
- n = rb_first(&pd->bio_queue);
- if (n)
- node = rb_entry(n, struct pkt_rb_node, rb_node);
- }
- if (node == first_node)
- node = NULL;
- }
- spin_unlock(&pd->lock);
- if (!bio) {
- dev_dbg(ddev, "no bio\n");
- return 0;
- }
-
- pkt = pkt_get_packet_data(pd, zone);
-
- pd->current_sector = zone + pd->settings.size;
- pkt->sector = zone;
- BUG_ON(pkt->frames != pd->settings.size >> 2);
- pkt->write_size = 0;
-
- /*
- * Scan work queue for bios in the same zone and link them
- * to this packet.
- */
- spin_lock(&pd->lock);
- dev_dbg(ddev, "looking for zone %llx\n", zone);
- while ((node = pkt_rbtree_find(pd, zone)) != NULL) {
- sector_t tmp = get_zone(node->bio->bi_iter.bi_sector, pd);
-
- bio = node->bio;
- dev_dbg(ddev, "found zone=%llx\n", tmp);
- if (tmp != zone)
- break;
- pkt_rbtree_erase(pd, node);
- spin_lock(&pkt->lock);
- bio_list_add(&pkt->orig_bios, bio);
- pkt->write_size += bio->bi_iter.bi_size / CD_FRAMESIZE;
- spin_unlock(&pkt->lock);
- }
- /* check write congestion marks, and if bio_queue_size is
- * below, wake up any waiters
- */
- if (pd->congested &&
- pd->bio_queue_size <= pd->write_congestion_off) {
- pd->congested = false;
- wake_up_var(&pd->congested);
- }
- spin_unlock(&pd->lock);
-
- pkt->sleep_time = max(PACKET_WAIT_TIME, 1);
- pkt_set_state(ddev, pkt, PACKET_WAITING_STATE);
- atomic_set(&pkt->run_sm, 1);
-
- spin_lock(&pd->cdrw.active_list_lock);
- list_add(&pkt->list, &pd->cdrw.pkt_active_list);
- spin_unlock(&pd->cdrw.active_list_lock);
-
- return 1;
-}
-
-/**
- * bio_list_copy_data - copy contents of data buffers from one chain of bios to
- * another
- * @src: source bio list
- * @dst: destination bio list
- *
- * Stops when it reaches the end of either the @src list or @dst list - that is,
- * copies min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of
- * bios).
- */
-static void bio_list_copy_data(struct bio *dst, struct bio *src)
-{
- struct bvec_iter src_iter = src->bi_iter;
- struct bvec_iter dst_iter = dst->bi_iter;
-
- while (1) {
- if (!src_iter.bi_size) {
- src = src->bi_next;
- if (!src)
- break;
-
- src_iter = src->bi_iter;
- }
-
- if (!dst_iter.bi_size) {
- dst = dst->bi_next;
- if (!dst)
- break;
-
- dst_iter = dst->bi_iter;
- }
-
- bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
- }
-}
-
-/*
- * Assemble a bio to write one packet and queue the bio for processing
- * by the underlying block device.
- */
-static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
-{
- struct device *ddev = disk_to_dev(pd->disk);
- int f;
-
- bio_init(pkt->w_bio, file_bdev(pd->bdev_file), pkt->w_bio->bi_inline_vecs,
- pkt->frames, REQ_OP_WRITE);
- pkt->w_bio->bi_iter.bi_sector = pkt->sector;
- pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
- pkt->w_bio->bi_private = pkt;
-
- /* XXX: locking? */
- for (f = 0; f < pkt->frames; f++) {
- struct page *page = pkt->pages[(f * CD_FRAMESIZE) / PAGE_SIZE];
- unsigned offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
-
- if (!bio_add_page(pkt->w_bio, page, CD_FRAMESIZE, offset))
- BUG();
- }
- dev_dbg(ddev, "vcnt=%d\n", pkt->w_bio->bi_vcnt);
-
- /*
- * Fill-in bvec with data from orig_bios.
- */
- spin_lock(&pkt->lock);
- bio_list_copy_data(pkt->w_bio, pkt->orig_bios.head);
-
- pkt_set_state(ddev, pkt, PACKET_WRITE_WAIT_STATE);
- spin_unlock(&pkt->lock);
-
- dev_dbg(ddev, "Writing %d frames for zone %llx\n", pkt->write_size, pkt->sector);
-
- if (test_bit(PACKET_MERGE_SEGS, &pd->flags) || (pkt->write_size < pkt->frames))
- pkt->cache_valid = 1;
- else
- pkt->cache_valid = 0;
-
- /* Start the write request */
- atomic_set(&pkt->io_wait, 1);
- pkt_queue_bio(pd, pkt->w_bio);
-}
-
-static void pkt_finish_packet(struct packet_data *pkt, blk_status_t status)
-{
- struct bio *bio;
-
- if (status)
- pkt->cache_valid = 0;
-
- /* Finish all bios corresponding to this packet */
- while ((bio = bio_list_pop(&pkt->orig_bios))) {
- bio->bi_status = status;
- bio_endio(bio);
- }
-}
-
-static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data *pkt)
-{
- struct device *ddev = disk_to_dev(pd->disk);
-
- dev_dbg(ddev, "pkt %d\n", pkt->id);
-
- for (;;) {
- switch (pkt->state) {
- case PACKET_WAITING_STATE:
- if ((pkt->write_size < pkt->frames) && (pkt->sleep_time > 0))
- return;
-
- pkt->sleep_time = 0;
- pkt_gather_data(pd, pkt);
- pkt_set_state(ddev, pkt, PACKET_READ_WAIT_STATE);
- break;
-
- case PACKET_READ_WAIT_STATE:
- if (atomic_read(&pkt->io_wait) > 0)
- return;
-
- if (atomic_read(&pkt->io_errors) > 0) {
- pkt_set_state(ddev, pkt, PACKET_RECOVERY_STATE);
- } else {
- pkt_start_write(pd, pkt);
- }
- break;
-
- case PACKET_WRITE_WAIT_STATE:
- if (atomic_read(&pkt->io_wait) > 0)
- return;
-
- if (!pkt->w_bio->bi_status) {
- pkt_set_state(ddev, pkt, PACKET_FINISHED_STATE);
- } else {
- pkt_set_state(ddev, pkt, PACKET_RECOVERY_STATE);
- }
- break;
-
- case PACKET_RECOVERY_STATE:
- dev_dbg(ddev, "No recovery possible\n");
- pkt_set_state(ddev, pkt, PACKET_FINISHED_STATE);
- break;
-
- case PACKET_FINISHED_STATE:
- pkt_finish_packet(pkt, pkt->w_bio->bi_status);
- return;
-
- default:
- BUG();
- break;
- }
- }
-}
-
-static void pkt_handle_packets(struct pktcdvd_device *pd)
-{
- struct device *ddev = disk_to_dev(pd->disk);
- struct packet_data *pkt, *next;
-
- /*
- * Run state machine for active packets
- */
- list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
- if (atomic_read(&pkt->run_sm) > 0) {
- atomic_set(&pkt->run_sm, 0);
- pkt_run_state_machine(pd, pkt);
- }
- }
-
- /*
- * Move no longer active packets to the free list
- */
- spin_lock(&pd->cdrw.active_list_lock);
- list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_active_list, list) {
- if (pkt->state == PACKET_FINISHED_STATE) {
- list_del(&pkt->list);
- pkt_put_packet_data(pd, pkt);
- pkt_set_state(ddev, pkt, PACKET_IDLE_STATE);
- atomic_set(&pd->scan_queue, 1);
- }
- }
- spin_unlock(&pd->cdrw.active_list_lock);
-}
-
-/*
- * kcdrwd is woken up when writes have been queued for one of our
- * registered devices
- */
-static int kcdrwd(void *foobar)
-{
- struct pktcdvd_device *pd = foobar;
- struct device *ddev = disk_to_dev(pd->disk);
- struct packet_data *pkt;
- int states[PACKET_NUM_STATES];
- long min_sleep_time, residue;
-
- set_user_nice(current, MIN_NICE);
- set_freezable();
-
- for (;;) {
- DECLARE_WAITQUEUE(wait, current);
-
- /*
- * Wait until there is something to do
- */
- add_wait_queue(&pd->wqueue, &wait);
- for (;;) {
- set_current_state(TASK_INTERRUPTIBLE);
-
- /* Check if we need to run pkt_handle_queue */
- if (atomic_read(&pd->scan_queue) > 0)
- goto work_to_do;
-
- /* Check if we need to run the state machine for some packet */
- list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
- if (atomic_read(&pkt->run_sm) > 0)
- goto work_to_do;
- }
-
- /* Check if we need to process the iosched queues */
- if (atomic_read(&pd->iosched.attention) != 0)
- goto work_to_do;
-
- /* Otherwise, go to sleep */
- pkt_count_states(pd, states);
- dev_dbg(ddev, "i:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
- states[0], states[1], states[2], states[3], states[4], states[5]);
-
- min_sleep_time = MAX_SCHEDULE_TIMEOUT;
- list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
- if (pkt->sleep_time && pkt->sleep_time < min_sleep_time)
- min_sleep_time = pkt->sleep_time;
- }
-
- dev_dbg(ddev, "sleeping\n");
- residue = schedule_timeout(min_sleep_time);
- dev_dbg(ddev, "wake up\n");
-
- /* make swsusp happy with our thread */
- try_to_freeze();
-
- list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
- if (!pkt->sleep_time)
- continue;
- pkt->sleep_time -= min_sleep_time - residue;
- if (pkt->sleep_time <= 0) {
- pkt->sleep_time = 0;
- atomic_inc(&pkt->run_sm);
- }
- }
-
- if (kthread_should_stop())
- break;
- }
-work_to_do:
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&pd->wqueue, &wait);
-
- if (kthread_should_stop())
- break;
-
- /*
- * if pkt_handle_queue returns true, we can queue
- * another request.
- */
- while (pkt_handle_queue(pd))
- ;
-
- /*
- * Handle packet state machine
- */
- pkt_handle_packets(pd);
-
- /*
- * Handle iosched queues
- */
- pkt_iosched_process_queue(pd);
- }
-
- return 0;
-}
-
-static void pkt_print_settings(struct pktcdvd_device *pd)
-{
- dev_info(disk_to_dev(pd->disk), "%s packets, %u blocks, Mode-%c disc\n",
- pd->settings.fp ? "Fixed" : "Variable",
- pd->settings.size >> 2,
- pd->settings.block_mode == 8 ? '1' : '2');
-}
-
-static int pkt_mode_sense(struct pktcdvd_device *pd, struct packet_command *cgc, int page_code, int page_control)
-{
- memset(cgc->cmd, 0, sizeof(cgc->cmd));
-
- cgc->cmd[0] = GPCMD_MODE_SENSE_10;
- cgc->cmd[2] = page_code | (page_control << 6);
- put_unaligned_be16(cgc->buflen, &cgc->cmd[7]);
- cgc->data_direction = CGC_DATA_READ;
- return pkt_generic_packet(pd, cgc);
-}
-
-static int pkt_mode_select(struct pktcdvd_device *pd, struct packet_command *cgc)
-{
- memset(cgc->cmd, 0, sizeof(cgc->cmd));
- memset(cgc->buffer, 0, 2);
- cgc->cmd[0] = GPCMD_MODE_SELECT_10;
- cgc->cmd[1] = 0x10; /* PF */
- put_unaligned_be16(cgc->buflen, &cgc->cmd[7]);
- cgc->data_direction = CGC_DATA_WRITE;
- return pkt_generic_packet(pd, cgc);
-}
-
-static int pkt_get_disc_info(struct pktcdvd_device *pd, disc_information *di)
-{
- struct packet_command cgc;
- int ret;
-
- /* set up command and get the disc info */
- init_cdrom_command(&cgc, di, sizeof(*di), CGC_DATA_READ);
- cgc.cmd[0] = GPCMD_READ_DISC_INFO;
- cgc.cmd[8] = cgc.buflen = 2;
- cgc.quiet = 1;
-
- ret = pkt_generic_packet(pd, &cgc);
- if (ret)
- return ret;
-
- /* not all drives have the same disc_info length, so requeue
- * packet with the length the drive tells us it can supply
- */
- cgc.buflen = be16_to_cpu(di->disc_information_length) +
- sizeof(di->disc_information_length);
-
- if (cgc.buflen > sizeof(disc_information))
- cgc.buflen = sizeof(disc_information);
-
- cgc.cmd[8] = cgc.buflen;
- return pkt_generic_packet(pd, &cgc);
-}
-
-static int pkt_get_track_info(struct pktcdvd_device *pd, __u16 track, __u8 type, track_information *ti)
-{
- struct packet_command cgc;
- int ret;
-
- init_cdrom_command(&cgc, ti, 8, CGC_DATA_READ);
- cgc.cmd[0] = GPCMD_READ_TRACK_RZONE_INFO;
- cgc.cmd[1] = type & 3;
- put_unaligned_be16(track, &cgc.cmd[4]);
- cgc.cmd[8] = 8;
- cgc.quiet = 1;
-
- ret = pkt_generic_packet(pd, &cgc);
- if (ret)
- return ret;
-
- cgc.buflen = be16_to_cpu(ti->track_information_length) +
- sizeof(ti->track_information_length);
-
- if (cgc.buflen > sizeof(track_information))
- cgc.buflen = sizeof(track_information);
-
- cgc.cmd[8] = cgc.buflen;
- return pkt_generic_packet(pd, &cgc);
-}
-
-static noinline_for_stack int pkt_get_last_written(struct pktcdvd_device *pd,
- long *last_written)
-{
- disc_information di;
- track_information ti;
- __u32 last_track;
- int ret;
-
- ret = pkt_get_disc_info(pd, &di);
- if (ret)
- return ret;
-
- last_track = (di.last_track_msb << 8) | di.last_track_lsb;
- ret = pkt_get_track_info(pd, last_track, 1, &ti);
- if (ret)
- return ret;
-
- /* if this track is blank, try the previous. */
- if (ti.blank) {
- last_track--;
- ret = pkt_get_track_info(pd, last_track, 1, &ti);
- if (ret)
- return ret;
- }
-
- /* if last recorded field is valid, return it. */
- if (ti.lra_v) {
- *last_written = be32_to_cpu(ti.last_rec_address);
- } else {
- /* make it up instead */
- *last_written = be32_to_cpu(ti.track_start) +
- be32_to_cpu(ti.track_size);
- if (ti.free_blocks)
- *last_written -= (be32_to_cpu(ti.free_blocks) + 7);
- }
- return 0;
-}
-
-/*
- * write mode select package based on pd->settings
- */
-static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd)
-{
- struct device *ddev = disk_to_dev(pd->disk);
- struct packet_command cgc;
- struct scsi_sense_hdr sshdr;
- write_param_page *wp;
- char buffer[128];
- int ret, size;
-
- /* doesn't apply to DVD+RW or DVD-RAM */
- if ((pd->mmc3_profile == 0x1a) || (pd->mmc3_profile == 0x12))
- return 0;
-
- memset(buffer, 0, sizeof(buffer));
- init_cdrom_command(&cgc, buffer, sizeof(*wp), CGC_DATA_READ);
- cgc.sshdr = &sshdr;
- ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0);
- if (ret) {
- pkt_dump_sense(pd, &cgc);
- return ret;
- }
-
- size = 2 + get_unaligned_be16(&buffer[0]);
- pd->mode_offset = get_unaligned_be16(&buffer[6]);
- if (size > sizeof(buffer))
- size = sizeof(buffer);
-
- /*
- * now get it all
- */
- init_cdrom_command(&cgc, buffer, size, CGC_DATA_READ);
- cgc.sshdr = &sshdr;
- ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0);
- if (ret) {
- pkt_dump_sense(pd, &cgc);
- return ret;
- }
-
- /*
- * write page is offset header + block descriptor length
- */
- wp = (write_param_page *) &buffer[sizeof(struct mode_page_header) + pd->mode_offset];
-
- wp->fp = pd->settings.fp;
- wp->track_mode = pd->settings.track_mode;
- wp->write_type = pd->settings.write_type;
- wp->data_block_type = pd->settings.block_mode;
-
- wp->multi_session = 0;
-
-#ifdef PACKET_USE_LS
- wp->link_size = 7;
- wp->ls_v = 1;
-#endif
-
- if (wp->data_block_type == PACKET_BLOCK_MODE1) {
- wp->session_format = 0;
- wp->subhdr2 = 0x20;
- } else if (wp->data_block_type == PACKET_BLOCK_MODE2) {
- wp->session_format = 0x20;
- wp->subhdr2 = 8;
-#if 0
- wp->mcn[0] = 0x80;
- memcpy(&wp->mcn[1], PACKET_MCN, sizeof(wp->mcn) - 1);
-#endif
- } else {
- /*
- * paranoia
- */
- dev_err(ddev, "write mode wrong %d\n", wp->data_block_type);
- return 1;
- }
- wp->packet_size = cpu_to_be32(pd->settings.size >> 2);
-
- cgc.buflen = cgc.cmd[8] = size;
- ret = pkt_mode_select(pd, &cgc);
- if (ret) {
- pkt_dump_sense(pd, &cgc);
- return ret;
- }
-
- pkt_print_settings(pd);
- return 0;
-}
-
-/*
- * 1 -- we can write to this track, 0 -- we can't
- */
-static int pkt_writable_track(struct pktcdvd_device *pd, track_information *ti)
-{
- struct device *ddev = disk_to_dev(pd->disk);
-
- switch (pd->mmc3_profile) {
- case 0x1a: /* DVD+RW */
- case 0x12: /* DVD-RAM */
- /* The track is always writable on DVD+RW/DVD-RAM */
- return 1;
- default:
- break;
- }
-
- if (!ti->packet || !ti->fp)
- return 0;
-
- /*
- * "good" settings as per Mt Fuji.
- */
- if (ti->rt == 0 && ti->blank == 0)
- return 1;
-
- if (ti->rt == 0 && ti->blank == 1)
- return 1;
-
- if (ti->rt == 1 && ti->blank == 0)
- return 1;
-
- dev_err(ddev, "bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet);
- return 0;
-}
-
-/*
- * 1 -- we can write to this disc, 0 -- we can't
- */
-static int pkt_writable_disc(struct pktcdvd_device *pd, disc_information *di)
-{
- struct device *ddev = disk_to_dev(pd->disk);
-
- switch (pd->mmc3_profile) {
- case 0x0a: /* CD-RW */
- case 0xffff: /* MMC3 not supported */
- break;
- case 0x1a: /* DVD+RW */
- case 0x13: /* DVD-RW */
- case 0x12: /* DVD-RAM */
- return 1;
- default:
- dev_dbg(ddev, "Wrong disc profile (%x)\n", pd->mmc3_profile);
- return 0;
- }
-
- /*
- * for disc type 0xff we should probably reserve a new track.
- * but i'm not sure, should we leave this to user apps? probably.
- */
- if (di->disc_type == 0xff) {
- dev_notice(ddev, "unknown disc - no track?\n");
- return 0;
- }
-
- if (di->disc_type != 0x20 && di->disc_type != 0) {
- dev_err(ddev, "wrong disc type (%x)\n", di->disc_type);
- return 0;
- }
-
- if (di->erasable == 0) {
- dev_err(ddev, "disc not erasable\n");
- return 0;
- }
-
- if (di->border_status == PACKET_SESSION_RESERVED) {
- dev_err(ddev, "can't write to last track (reserved)\n");
- return 0;
- }
-
- return 1;
-}
-
-static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
-{
- struct device *ddev = disk_to_dev(pd->disk);
- struct packet_command cgc;
- unsigned char buf[12];
- disc_information di;
- track_information ti;
- int ret, track;
-
- init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
- cgc.cmd[0] = GPCMD_GET_CONFIGURATION;
- cgc.cmd[8] = 8;
- ret = pkt_generic_packet(pd, &cgc);
- pd->mmc3_profile = ret ? 0xffff : get_unaligned_be16(&buf[6]);
-
- memset(&di, 0, sizeof(disc_information));
- memset(&ti, 0, sizeof(track_information));
-
- ret = pkt_get_disc_info(pd, &di);
- if (ret) {
- dev_err(ddev, "failed get_disc\n");
- return ret;
- }
-
- if (!pkt_writable_disc(pd, &di))
- return -EROFS;
-
- pd->type = di.erasable ? PACKET_CDRW : PACKET_CDR;
-
- track = 1; /* (di.last_track_msb << 8) | di.last_track_lsb; */
- ret = pkt_get_track_info(pd, track, 1, &ti);
- if (ret) {
- dev_err(ddev, "failed get_track\n");
- return ret;
- }
-
- if (!pkt_writable_track(pd, &ti)) {
- dev_err(ddev, "can't write to this track\n");
- return -EROFS;
- }
-
- /*
- * we keep packet size in 512 byte units, makes it easier to
- * deal with request calculations.
- */
- pd->settings.size = be32_to_cpu(ti.fixed_packet_size) << 2;
- if (pd->settings.size == 0) {
- dev_notice(ddev, "detected zero packet size!\n");
- return -ENXIO;
- }
- if (pd->settings.size > PACKET_MAX_SECTORS) {
- dev_err(ddev, "packet size is too big\n");
- return -EROFS;
- }
- pd->settings.fp = ti.fp;
- pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
-
- if (ti.nwa_v) {
- pd->nwa = be32_to_cpu(ti.next_writable);
- set_bit(PACKET_NWA_VALID, &pd->flags);
- }
-
- /*
- * in theory we could use lra on -RW media as well and just zero
- * blocks that haven't been written yet, but in practice that
- * is just a no-go. we'll use that for -R, naturally.
- */
- if (ti.lra_v) {
- pd->lra = be32_to_cpu(ti.last_rec_address);
- set_bit(PACKET_LRA_VALID, &pd->flags);
- } else {
- pd->lra = 0xffffffff;
- set_bit(PACKET_LRA_VALID, &pd->flags);
- }
-
- /*
- * fine for now
- */
- pd->settings.link_loss = 7;
- pd->settings.write_type = 0; /* packet */
- pd->settings.track_mode = ti.track_mode;
-
- /*
- * mode1 or mode2 disc
- */
- switch (ti.data_mode) {
- case PACKET_MODE1:
- pd->settings.block_mode = PACKET_BLOCK_MODE1;
- break;
- case PACKET_MODE2:
- pd->settings.block_mode = PACKET_BLOCK_MODE2;
- break;
- default:
- dev_err(ddev, "unknown data mode\n");
- return -EROFS;
- }
- return 0;
-}
-
-/*
- * enable/disable write caching on drive
- */
-static noinline_for_stack int pkt_write_caching(struct pktcdvd_device *pd)
-{
- struct device *ddev = disk_to_dev(pd->disk);
- struct packet_command cgc;
- struct scsi_sense_hdr sshdr;
- unsigned char buf[64];
- bool set = IS_ENABLED(CONFIG_CDROM_PKTCDVD_WCACHE);
- int ret;
-
- init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
- cgc.sshdr = &sshdr;
- cgc.buflen = pd->mode_offset + 12;
-
- /*
- * caching mode page might not be there, so quiet this command
- */
- cgc.quiet = 1;
-
- ret = pkt_mode_sense(pd, &cgc, GPMODE_WCACHING_PAGE, 0);
- if (ret)
- return ret;
-
- /*
- * use drive write caching -- we need deferred error handling to be
- * able to successfully recover with this option (drive will return good
- * status as soon as the cdb is validated).
- */
- buf[pd->mode_offset + 10] |= (set << 2);
-
- cgc.buflen = cgc.cmd[8] = 2 + get_unaligned_be16(&buf[0]);
- ret = pkt_mode_select(pd, &cgc);
- if (ret) {
- dev_err(ddev, "write caching control failed\n");
- pkt_dump_sense(pd, &cgc);
- } else if (!ret && set)
- dev_notice(ddev, "enabled write caching\n");
- return ret;
-}
-
-static int pkt_lock_door(struct pktcdvd_device *pd, int lockflag)
-{
- struct packet_command cgc;
-
- init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
- cgc.cmd[0] = GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL;
- cgc.cmd[4] = lockflag ? 1 : 0;
- return pkt_generic_packet(pd, &cgc);
-}
-
-/*
- * Returns drive maximum write speed
- */
-static noinline_for_stack int pkt_get_max_speed(struct pktcdvd_device *pd,
- unsigned *write_speed)
-{
- struct packet_command cgc;
- struct scsi_sense_hdr sshdr;
- unsigned char buf[256+18];
- unsigned char *cap_buf;
- int ret, offset;
-
- cap_buf = &buf[sizeof(struct mode_page_header) + pd->mode_offset];
- init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_UNKNOWN);
- cgc.sshdr = &sshdr;
-
- ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
- if (ret) {
- cgc.buflen = pd->mode_offset + cap_buf[1] + 2 +
- sizeof(struct mode_page_header);
- ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
- if (ret) {
- pkt_dump_sense(pd, &cgc);
- return ret;
- }
- }
-
- offset = 20; /* Obsoleted field, used by older drives */
- if (cap_buf[1] >= 28)
- offset = 28; /* Current write speed selected */
- if (cap_buf[1] >= 30) {
- /* If the drive reports at least one "Logical Unit Write
- * Speed Performance Descriptor Block", use the information
- * in the first block. (contains the highest speed)
- */
- int num_spdb = get_unaligned_be16(&cap_buf[30]);
- if (num_spdb > 0)
- offset = 34;
- }
-
- *write_speed = get_unaligned_be16(&cap_buf[offset]);
- return 0;
-}
-
-/* These tables from cdrecord - I don't have orange book */
-/* standard speed CD-RW (1-4x) */
-static char clv_to_speed[16] = {
- /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
- 0, 2, 4, 6, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
-};
-/* high speed CD-RW (-10x) */
-static char hs_clv_to_speed[16] = {
- /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
- 0, 2, 4, 6, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
-};
-/* ultra high speed CD-RW */
-static char us_clv_to_speed[16] = {
- /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
- 0, 2, 4, 8, 0, 0,16, 0,24,32,40,48, 0, 0, 0, 0
-};
-
-/*
- * reads the maximum media speed from ATIP
- */
-static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd,
- unsigned *speed)
-{
- struct device *ddev = disk_to_dev(pd->disk);
- struct packet_command cgc;
- struct scsi_sense_hdr sshdr;
- unsigned char buf[64];
- unsigned int size, st, sp;
- int ret;
-
- init_cdrom_command(&cgc, buf, 2, CGC_DATA_READ);
- cgc.sshdr = &sshdr;
- cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
- cgc.cmd[1] = 2;
- cgc.cmd[2] = 4; /* READ ATIP */
- cgc.cmd[8] = 2;
- ret = pkt_generic_packet(pd, &cgc);
- if (ret) {
- pkt_dump_sense(pd, &cgc);
- return ret;
- }
- size = 2 + get_unaligned_be16(&buf[0]);
- if (size > sizeof(buf))
- size = sizeof(buf);
-
- init_cdrom_command(&cgc, buf, size, CGC_DATA_READ);
- cgc.sshdr = &sshdr;
- cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
- cgc.cmd[1] = 2;
- cgc.cmd[2] = 4;
- cgc.cmd[8] = size;
- ret = pkt_generic_packet(pd, &cgc);
- if (ret) {
- pkt_dump_sense(pd, &cgc);
- return ret;
- }
-
- if (!(buf[6] & 0x40)) {
- dev_notice(ddev, "disc type is not CD-RW\n");
- return 1;
- }
- if (!(buf[6] & 0x4)) {
- dev_notice(ddev, "A1 values on media are not valid, maybe not CDRW?\n");
- return 1;
- }
-
- st = (buf[6] >> 3) & 0x7; /* disc sub-type */
-
- sp = buf[16] & 0xf; /* max speed from ATIP A1 field */
-
- /* Info from cdrecord */
- switch (st) {
- case 0: /* standard speed */
- *speed = clv_to_speed[sp];
- break;
- case 1: /* high speed */
- *speed = hs_clv_to_speed[sp];
- break;
- case 2: /* ultra high speed */
- *speed = us_clv_to_speed[sp];
- break;
- default:
- dev_notice(ddev, "unknown disc sub-type %d\n", st);
- return 1;
- }
- if (*speed) {
- dev_info(ddev, "maximum media speed: %d\n", *speed);
- return 0;
- } else {
- dev_notice(ddev, "unknown speed %d for sub-type %d\n", sp, st);
- return 1;
- }
-}
-
-static noinline_for_stack int pkt_perform_opc(struct pktcdvd_device *pd)
-{
- struct device *ddev = disk_to_dev(pd->disk);
- struct packet_command cgc;
- struct scsi_sense_hdr sshdr;
- int ret;
-
- dev_dbg(ddev, "Performing OPC\n");
-
- init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
- cgc.sshdr = &sshdr;
- cgc.timeout = 60*HZ;
- cgc.cmd[0] = GPCMD_SEND_OPC;
- cgc.cmd[1] = 1;
- ret = pkt_generic_packet(pd, &cgc);
- if (ret)
- pkt_dump_sense(pd, &cgc);
- return ret;
-}
-
-static int pkt_open_write(struct pktcdvd_device *pd)
-{
- struct device *ddev = disk_to_dev(pd->disk);
- int ret;
- unsigned int write_speed, media_write_speed, read_speed;
-
- ret = pkt_probe_settings(pd);
- if (ret) {
- dev_dbg(ddev, "failed probe\n");
- return ret;
- }
-
- ret = pkt_set_write_settings(pd);
- if (ret) {
- dev_notice(ddev, "failed saving write settings\n");
- return -EIO;
- }
-
- pkt_write_caching(pd);
-
- ret = pkt_get_max_speed(pd, &write_speed);
- if (ret)
- write_speed = 16 * 177;
- switch (pd->mmc3_profile) {
- case 0x13: /* DVD-RW */
- case 0x1a: /* DVD+RW */
- case 0x12: /* DVD-RAM */
- dev_notice(ddev, "write speed %ukB/s\n", write_speed);
- break;
- default:
- ret = pkt_media_speed(pd, &media_write_speed);
- if (ret)
- media_write_speed = 16;
- write_speed = min(write_speed, media_write_speed * 177);
- dev_notice(ddev, "write speed %ux\n", write_speed / 176);
- break;
- }
- read_speed = write_speed;
-
- ret = pkt_set_speed(pd, write_speed, read_speed);
- if (ret) {
- dev_notice(ddev, "couldn't set write speed\n");
- return -EIO;
- }
- pd->write_speed = write_speed;
- pd->read_speed = read_speed;
-
- ret = pkt_perform_opc(pd);
- if (ret)
- dev_notice(ddev, "Optimum Power Calibration failed\n");
-
- return 0;
-}
-
-/*
- * called at open time.
- */
-static int pkt_open_dev(struct pktcdvd_device *pd, bool write)
-{
- struct device *ddev = disk_to_dev(pd->disk);
- int ret;
- long lba;
- struct request_queue *q;
- struct file *bdev_file;
-
- /*
- * We need to re-open the cdrom device without O_NONBLOCK to be able
- * to read/write from/to it. It is already opened in O_NONBLOCK mode
- * so open should not fail.
- */
- bdev_file = bdev_file_open_by_dev(file_bdev(pd->bdev_file)->bd_dev,
- BLK_OPEN_READ, pd, NULL);
- if (IS_ERR(bdev_file)) {
- ret = PTR_ERR(bdev_file);
- goto out;
- }
- pd->f_open_bdev = bdev_file;
-
- ret = pkt_get_last_written(pd, &lba);
- if (ret) {
- dev_err(ddev, "pkt_get_last_written failed\n");
- goto out_putdev;
- }
-
- set_capacity(pd->disk, lba << 2);
- set_capacity_and_notify(file_bdev(pd->bdev_file)->bd_disk, lba << 2);
-
- q = bdev_get_queue(file_bdev(pd->bdev_file));
- if (write) {
- ret = pkt_open_write(pd);
- if (ret)
- goto out_putdev;
- set_bit(PACKET_WRITABLE, &pd->flags);
- } else {
- pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
- clear_bit(PACKET_WRITABLE, &pd->flags);
- }
-
- ret = pkt_set_segment_merging(pd, q);
- if (ret)
- goto out_putdev;
-
- if (write) {
- if (!pkt_grow_pktlist(pd, CONFIG_CDROM_PKTCDVD_BUFFERS)) {
- dev_err(ddev, "not enough memory for buffers\n");
- ret = -ENOMEM;
- goto out_putdev;
- }
- dev_info(ddev, "%lukB available on disc\n", lba << 1);
- }
- set_blocksize(bdev_file, CD_FRAMESIZE);
-
- return 0;
-
-out_putdev:
- fput(bdev_file);
-out:
- return ret;
-}
-
-/*
- * called when the device is closed. makes sure that the device flushes
- * the internal cache before we close.
- */
-static void pkt_release_dev(struct pktcdvd_device *pd, int flush)
-{
- struct device *ddev = disk_to_dev(pd->disk);
-
- if (flush && pkt_flush_cache(pd))
- dev_notice(ddev, "not flushing cache\n");
-
- pkt_lock_door(pd, 0);
-
- pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
- fput(pd->f_open_bdev);
- pd->f_open_bdev = NULL;
-
- pkt_shrink_pktlist(pd);
-}
-
-static struct pktcdvd_device *pkt_find_dev_from_minor(unsigned int dev_minor)
-{
- if (dev_minor >= MAX_WRITERS)
- return NULL;
-
- dev_minor = array_index_nospec(dev_minor, MAX_WRITERS);
- return pkt_devs[dev_minor];
-}
-
-static int pkt_open(struct gendisk *disk, blk_mode_t mode)
-{
- struct pktcdvd_device *pd = NULL;
- int ret;
-
- mutex_lock(&pktcdvd_mutex);
- mutex_lock(&ctl_mutex);
- pd = pkt_find_dev_from_minor(disk->first_minor);
- if (!pd) {
- ret = -ENODEV;
- goto out;
- }
- BUG_ON(pd->refcnt < 0);
-
- pd->refcnt++;
- if (pd->refcnt > 1) {
- if ((mode & BLK_OPEN_WRITE) &&
- !test_bit(PACKET_WRITABLE, &pd->flags)) {
- ret = -EBUSY;
- goto out_dec;
- }
- } else {
- ret = pkt_open_dev(pd, mode & BLK_OPEN_WRITE);
- if (ret)
- goto out_dec;
- }
- mutex_unlock(&ctl_mutex);
- mutex_unlock(&pktcdvd_mutex);
- return 0;
-
-out_dec:
- pd->refcnt--;
-out:
- mutex_unlock(&ctl_mutex);
- mutex_unlock(&pktcdvd_mutex);
- return ret;
-}
-
-static void pkt_release(struct gendisk *disk)
-{
- struct pktcdvd_device *pd = disk->private_data;
-
- mutex_lock(&pktcdvd_mutex);
- mutex_lock(&ctl_mutex);
- pd->refcnt--;
- BUG_ON(pd->refcnt < 0);
- if (pd->refcnt == 0) {
- int flush = test_bit(PACKET_WRITABLE, &pd->flags);
- pkt_release_dev(pd, flush);
- }
- mutex_unlock(&ctl_mutex);
- mutex_unlock(&pktcdvd_mutex);
-}
-
-
-static void pkt_end_io_read_cloned(struct bio *bio)
-{
- struct packet_stacked_data *psd = bio->bi_private;
- struct pktcdvd_device *pd = psd->pd;
-
- psd->bio->bi_status = bio->bi_status;
- bio_put(bio);
- bio_endio(psd->bio);
- mempool_free(psd, &psd_pool);
- pkt_bio_finished(pd);
-}
-
-static void pkt_make_request_read(struct pktcdvd_device *pd, struct bio *bio)
-{
- struct bio *cloned_bio = bio_alloc_clone(file_bdev(pd->bdev_file), bio,
- GFP_NOIO, &pkt_bio_set);
- struct packet_stacked_data *psd = mempool_alloc(&psd_pool, GFP_NOIO);
-
- psd->pd = pd;
- psd->bio = bio;
- cloned_bio->bi_private = psd;
- cloned_bio->bi_end_io = pkt_end_io_read_cloned;
- pd->stats.secs_r += bio_sectors(bio);
- pkt_queue_bio(pd, cloned_bio);
-}
-
-static void pkt_make_request_write(struct bio *bio)
-{
- struct pktcdvd_device *pd = bio->bi_bdev->bd_disk->private_data;
- sector_t zone;
- struct packet_data *pkt;
- int was_empty, blocked_bio;
- struct pkt_rb_node *node;
-
- zone = get_zone(bio->bi_iter.bi_sector, pd);
-
- /*
- * If we find a matching packet in state WAITING or READ_WAIT, we can
- * just append this bio to that packet.
- */
- spin_lock(&pd->cdrw.active_list_lock);
- blocked_bio = 0;
- list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
- if (pkt->sector == zone) {
- spin_lock(&pkt->lock);
- if ((pkt->state == PACKET_WAITING_STATE) ||
- (pkt->state == PACKET_READ_WAIT_STATE)) {
- bio_list_add(&pkt->orig_bios, bio);
- pkt->write_size +=
- bio->bi_iter.bi_size / CD_FRAMESIZE;
- if ((pkt->write_size >= pkt->frames) &&
- (pkt->state == PACKET_WAITING_STATE)) {
- atomic_inc(&pkt->run_sm);
- wake_up(&pd->wqueue);
- }
- spin_unlock(&pkt->lock);
- spin_unlock(&pd->cdrw.active_list_lock);
- return;
- } else {
- blocked_bio = 1;
- }
- spin_unlock(&pkt->lock);
- }
- }
- spin_unlock(&pd->cdrw.active_list_lock);
-
- /*
- * Test if there is enough room left in the bio work queue
- * (queue size >= congestion on mark).
- * If not, wait till the work queue size is below the congestion off mark.
- */
- spin_lock(&pd->lock);
- if (pd->write_congestion_on > 0
- && pd->bio_queue_size >= pd->write_congestion_on) {
- struct wait_bit_queue_entry wqe;
-
- init_wait_var_entry(&wqe, &pd->congested, 0);
- for (;;) {
- prepare_to_wait_event(__var_waitqueue(&pd->congested),
- &wqe.wq_entry,
- TASK_UNINTERRUPTIBLE);
- if (pd->bio_queue_size <= pd->write_congestion_off)
- break;
- pd->congested = true;
- spin_unlock(&pd->lock);
- schedule();
- spin_lock(&pd->lock);
- }
- }
- spin_unlock(&pd->lock);
-
- /*
- * No matching packet found. Store the bio in the work queue.
- */
- node = mempool_alloc(&pd->rb_pool, GFP_NOIO);
- node->bio = bio;
- spin_lock(&pd->lock);
- BUG_ON(pd->bio_queue_size < 0);
- was_empty = (pd->bio_queue_size == 0);
- pkt_rbtree_insert(pd, node);
- spin_unlock(&pd->lock);
-
- /*
- * Wake up the worker thread.
- */
- atomic_set(&pd->scan_queue, 1);
- if (was_empty) {
- /* This wake_up is required for correct operation */
- wake_up(&pd->wqueue);
- } else if (!list_empty(&pd->cdrw.pkt_free_list) && !blocked_bio) {
- /*
- * This wake up is not required for correct operation,
- * but improves performance in some cases.
- */
- wake_up(&pd->wqueue);
- }
-}
-
-static void pkt_submit_bio(struct bio *bio)
-{
- struct pktcdvd_device *pd = bio->bi_bdev->bd_disk->private_data;
- struct device *ddev = disk_to_dev(pd->disk);
- struct bio *split;
-
- bio = bio_split_to_limits(bio);
- if (!bio)
- return;
-
- dev_dbg(ddev, "start = %6llx stop = %6llx\n",
- bio->bi_iter.bi_sector, bio_end_sector(bio));
-
- /*
- * Clone READ bios so we can have our own bi_end_io callback.
- */
- if (bio_data_dir(bio) == READ) {
- pkt_make_request_read(pd, bio);
- return;
- }
-
- if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
- dev_notice(ddev, "WRITE for ro device (%llu)\n", bio->bi_iter.bi_sector);
- goto end_io;
- }
-
- if (!bio->bi_iter.bi_size || (bio->bi_iter.bi_size % CD_FRAMESIZE)) {
- dev_err(ddev, "wrong bio size\n");
- goto end_io;
- }
-
- do {
- sector_t zone = get_zone(bio->bi_iter.bi_sector, pd);
- sector_t last_zone = get_zone(bio_end_sector(bio) - 1, pd);
-
- if (last_zone != zone) {
- BUG_ON(last_zone != zone + pd->settings.size);
-
- split = bio_split(bio, last_zone -
- bio->bi_iter.bi_sector,
- GFP_NOIO, &pkt_bio_set);
- bio_chain(split, bio);
- } else {
- split = bio;
- }
-
- pkt_make_request_write(split);
- } while (split != bio);
-
- return;
-end_io:
- bio_io_error(bio);
-}
-
-static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
-{
- struct device *ddev = disk_to_dev(pd->disk);
- int i;
- struct file *bdev_file;
- struct scsi_device *sdev;
-
- if (pd->pkt_dev == dev) {
- dev_err(ddev, "recursive setup not allowed\n");
- return -EBUSY;
- }
- for (i = 0; i < MAX_WRITERS; i++) {
- struct pktcdvd_device *pd2 = pkt_devs[i];
- if (!pd2)
- continue;
- if (file_bdev(pd2->bdev_file)->bd_dev == dev) {
- dev_err(ddev, "%pg already setup\n",
- file_bdev(pd2->bdev_file));
- return -EBUSY;
- }
- if (pd2->pkt_dev == dev) {
- dev_err(ddev, "can't chain pktcdvd devices\n");
- return -EBUSY;
- }
- }
-
- bdev_file = bdev_file_open_by_dev(dev, BLK_OPEN_READ | BLK_OPEN_NDELAY,
- NULL, NULL);
- if (IS_ERR(bdev_file))
- return PTR_ERR(bdev_file);
- sdev = scsi_device_from_queue(file_bdev(bdev_file)->bd_disk->queue);
- if (!sdev) {
- fput(bdev_file);
- return -EINVAL;
- }
- put_device(&sdev->sdev_gendev);
-
- /* This is safe, since we have a reference from open(). */
- __module_get(THIS_MODULE);
-
- pd->bdev_file = bdev_file;
-
- atomic_set(&pd->cdrw.pending_bios, 0);
- pd->cdrw.thread = kthread_run(kcdrwd, pd, "%s", pd->disk->disk_name);
- if (IS_ERR(pd->cdrw.thread)) {
- dev_err(ddev, "can't start kernel thread\n");
- goto out_mem;
- }
-
- proc_create_single_data(pd->disk->disk_name, 0, pkt_proc, pkt_seq_show, pd);
- dev_notice(ddev, "writer mapped to %pg\n", file_bdev(bdev_file));
- return 0;
-
-out_mem:
- fput(bdev_file);
- /* This is safe: open() is still holding a reference. */
- module_put(THIS_MODULE);
- return -ENOMEM;
-}
-
-static int pkt_ioctl(struct block_device *bdev, blk_mode_t mode,
- unsigned int cmd, unsigned long arg)
-{
- struct pktcdvd_device *pd = bdev->bd_disk->private_data;
- struct device *ddev = disk_to_dev(pd->disk);
- int ret;
-
- dev_dbg(ddev, "cmd %x, dev %d:%d\n", cmd, MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
-
- mutex_lock(&pktcdvd_mutex);
- switch (cmd) {
- case CDROMEJECT:
- /*
- * The door gets locked when the device is opened, so we
- * have to unlock it or else the eject command fails.
- */
- if (pd->refcnt == 1)
- pkt_lock_door(pd, 0);
- fallthrough;
- /*
- * forward selected CDROM ioctls to CD-ROM, for UDF
- */
- case CDROMMULTISESSION:
- case CDROMREADTOCENTRY:
- case CDROM_LAST_WRITTEN:
- case CDROM_SEND_PACKET:
- case SCSI_IOCTL_SEND_COMMAND:
- if (!bdev->bd_disk->fops->ioctl)
- ret = -ENOTTY;
- else
- ret = bdev->bd_disk->fops->ioctl(bdev, mode, cmd, arg);
- break;
- default:
- dev_dbg(ddev, "Unknown ioctl (%x)\n", cmd);
- ret = -ENOTTY;
- }
- mutex_unlock(&pktcdvd_mutex);
-
- return ret;
-}
-
-static unsigned int pkt_check_events(struct gendisk *disk,
- unsigned int clearing)
-{
- struct pktcdvd_device *pd = disk->private_data;
- struct gendisk *attached_disk;
-
- if (!pd)
- return 0;
- if (!pd->bdev_file)
- return 0;
- attached_disk = file_bdev(pd->bdev_file)->bd_disk;
- if (!attached_disk || !attached_disk->fops->check_events)
- return 0;
- return attached_disk->fops->check_events(attached_disk, clearing);
-}
-
-static char *pkt_devnode(struct gendisk *disk, umode_t *mode)
-{
- return kasprintf(GFP_KERNEL, "pktcdvd/%s", disk->disk_name);
-}
-
-static const struct block_device_operations pktcdvd_ops = {
- .owner = THIS_MODULE,
- .submit_bio = pkt_submit_bio,
- .open = pkt_open,
- .release = pkt_release,
- .ioctl = pkt_ioctl,
- .compat_ioctl = blkdev_compat_ptr_ioctl,
- .check_events = pkt_check_events,
- .devnode = pkt_devnode,
-};
-
-/*
- * Set up mapping from pktcdvd device to CD-ROM device.
- */
-static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
-{
- struct queue_limits lim = {
- .max_hw_sectors = PACKET_MAX_SECTORS,
- .logical_block_size = CD_FRAMESIZE,
- .features = BLK_FEAT_ROTATIONAL,
- };
- int idx;
- int ret = -ENOMEM;
- struct pktcdvd_device *pd;
- struct gendisk *disk;
-
- mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
-
- for (idx = 0; idx < MAX_WRITERS; idx++)
- if (!pkt_devs[idx])
- break;
- if (idx == MAX_WRITERS) {
- pr_err("max %d writers supported\n", MAX_WRITERS);
- ret = -EBUSY;
- goto out_mutex;
- }
-
- pd = kzalloc(sizeof(struct pktcdvd_device), GFP_KERNEL);
- if (!pd)
- goto out_mutex;
-
- ret = mempool_init_kmalloc_pool(&pd->rb_pool, PKT_RB_POOL_SIZE,
- sizeof(struct pkt_rb_node));
- if (ret)
- goto out_mem;
-
- INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
- INIT_LIST_HEAD(&pd->cdrw.pkt_active_list);
- spin_lock_init(&pd->cdrw.active_list_lock);
-
- spin_lock_init(&pd->lock);
- spin_lock_init(&pd->iosched.lock);
- bio_list_init(&pd->iosched.read_queue);
- bio_list_init(&pd->iosched.write_queue);
- init_waitqueue_head(&pd->wqueue);
- pd->bio_queue = RB_ROOT;
-
- pd->write_congestion_on = write_congestion_on;
- pd->write_congestion_off = write_congestion_off;
-
- disk = blk_alloc_disk(&lim, NUMA_NO_NODE);
- if (IS_ERR(disk)) {
- ret = PTR_ERR(disk);
- goto out_mem;
- }
- pd->disk = disk;
- disk->major = pktdev_major;
- disk->first_minor = idx;
- disk->minors = 1;
- disk->fops = &pktcdvd_ops;
- disk->flags = GENHD_FL_REMOVABLE | GENHD_FL_NO_PART;
- snprintf(disk->disk_name, sizeof(disk->disk_name), DRIVER_NAME"%d", idx);
- disk->private_data = pd;
-
- pd->pkt_dev = MKDEV(pktdev_major, idx);
- ret = pkt_new_dev(pd, dev);
- if (ret)
- goto out_mem2;
-
- /* inherit events of the host device */
- disk->events = file_bdev(pd->bdev_file)->bd_disk->events;
-
- ret = add_disk(disk);
- if (ret)
- goto out_mem2;
-
- pkt_sysfs_dev_new(pd);
- pkt_debugfs_dev_new(pd);
-
- pkt_devs[idx] = pd;
- if (pkt_dev)
- *pkt_dev = pd->pkt_dev;
-
- mutex_unlock(&ctl_mutex);
- return 0;
-
-out_mem2:
- put_disk(disk);
-out_mem:
- mempool_exit(&pd->rb_pool);
- kfree(pd);
-out_mutex:
- mutex_unlock(&ctl_mutex);
- pr_err("setup of pktcdvd device failed\n");
- return ret;
-}
-
-/*
- * Tear down mapping from pktcdvd device to CD-ROM device.
- */
-static int pkt_remove_dev(dev_t pkt_dev)
-{
- struct pktcdvd_device *pd;
- struct device *ddev;
- int idx;
- int ret = 0;
-
- mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
-
- for (idx = 0; idx < MAX_WRITERS; idx++) {
- pd = pkt_devs[idx];
- if (pd && (pd->pkt_dev == pkt_dev))
- break;
- }
- if (idx == MAX_WRITERS) {
- pr_debug("dev not setup\n");
- ret = -ENXIO;
- goto out;
- }
-
- if (pd->refcnt > 0) {
- ret = -EBUSY;
- goto out;
- }
-
- ddev = disk_to_dev(pd->disk);
-
- if (!IS_ERR(pd->cdrw.thread))
- kthread_stop(pd->cdrw.thread);
-
- pkt_devs[idx] = NULL;
-
- pkt_debugfs_dev_remove(pd);
- pkt_sysfs_dev_remove(pd);
-
- fput(pd->bdev_file);
-
- remove_proc_entry(pd->disk->disk_name, pkt_proc);
- dev_notice(ddev, "writer unmapped\n");
-
- del_gendisk(pd->disk);
- put_disk(pd->disk);
-
- mempool_exit(&pd->rb_pool);
- kfree(pd);
-
- /* This is safe: open() is still holding a reference. */
- module_put(THIS_MODULE);
-
-out:
- mutex_unlock(&ctl_mutex);
- return ret;
-}
-
-static void pkt_get_status(struct pkt_ctrl_command *ctrl_cmd)
-{
- struct pktcdvd_device *pd;
-
- mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
-
- pd = pkt_find_dev_from_minor(ctrl_cmd->dev_index);
- if (pd) {
- ctrl_cmd->dev = new_encode_dev(file_bdev(pd->bdev_file)->bd_dev);
- ctrl_cmd->pkt_dev = new_encode_dev(pd->pkt_dev);
- } else {
- ctrl_cmd->dev = 0;
- ctrl_cmd->pkt_dev = 0;
- }
- ctrl_cmd->num_devices = MAX_WRITERS;
-
- mutex_unlock(&ctl_mutex);
-}
-
-static long pkt_ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- void __user *argp = (void __user *)arg;
- struct pkt_ctrl_command ctrl_cmd;
- int ret = 0;
- dev_t pkt_dev = 0;
-
- if (cmd != PACKET_CTRL_CMD)
- return -ENOTTY;
-
- if (copy_from_user(&ctrl_cmd, argp, sizeof(struct pkt_ctrl_command)))
- return -EFAULT;
-
- switch (ctrl_cmd.command) {
- case PKT_CTRL_CMD_SETUP:
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
- ret = pkt_setup_dev(new_decode_dev(ctrl_cmd.dev), &pkt_dev);
- ctrl_cmd.pkt_dev = new_encode_dev(pkt_dev);
- break;
- case PKT_CTRL_CMD_TEARDOWN:
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
- ret = pkt_remove_dev(new_decode_dev(ctrl_cmd.pkt_dev));
- break;
- case PKT_CTRL_CMD_STATUS:
- pkt_get_status(&ctrl_cmd);
- break;
- default:
- return -ENOTTY;
- }
-
- if (copy_to_user(argp, &ctrl_cmd, sizeof(struct pkt_ctrl_command)))
- return -EFAULT;
- return ret;
-}
-
-#ifdef CONFIG_COMPAT
-static long pkt_ctl_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- return pkt_ctl_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
-}
-#endif
-
-static const struct file_operations pkt_ctl_fops = {
- .open = nonseekable_open,
- .unlocked_ioctl = pkt_ctl_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = pkt_ctl_compat_ioctl,
-#endif
- .owner = THIS_MODULE,
-};
-
-static struct miscdevice pkt_misc = {
- .minor = MISC_DYNAMIC_MINOR,
- .name = DRIVER_NAME,
- .nodename = "pktcdvd/control",
- .fops = &pkt_ctl_fops
-};
-
-static int __init pkt_init(void)
-{
- int ret;
-
- mutex_init(&ctl_mutex);
-
- ret = mempool_init_kmalloc_pool(&psd_pool, PSD_POOL_SIZE,
- sizeof(struct packet_stacked_data));
- if (ret)
- return ret;
- ret = bioset_init(&pkt_bio_set, BIO_POOL_SIZE, 0, 0);
- if (ret) {
- mempool_exit(&psd_pool);
- return ret;
- }
-
- ret = register_blkdev(pktdev_major, DRIVER_NAME);
- if (ret < 0) {
- pr_err("unable to register block device\n");
- goto out2;
- }
- if (!pktdev_major)
- pktdev_major = ret;
-
- ret = pkt_sysfs_init();
- if (ret)
- goto out;
-
- pkt_debugfs_init();
-
- ret = misc_register(&pkt_misc);
- if (ret) {
- pr_err("unable to register misc device\n");
- goto out_misc;
- }
-
- pkt_proc = proc_mkdir("driver/"DRIVER_NAME, NULL);
-
- return 0;
-
-out_misc:
- pkt_debugfs_cleanup();
- pkt_sysfs_cleanup();
-out:
- unregister_blkdev(pktdev_major, DRIVER_NAME);
-out2:
- mempool_exit(&psd_pool);
- bioset_exit(&pkt_bio_set);
- return ret;
-}
-
-static void __exit pkt_exit(void)
-{
- remove_proc_entry("driver/"DRIVER_NAME, NULL);
- misc_deregister(&pkt_misc);
-
- pkt_debugfs_cleanup();
- pkt_sysfs_cleanup();
-
- unregister_blkdev(pktdev_major, DRIVER_NAME);
- mempool_exit(&psd_pool);
- bioset_exit(&pkt_bio_set);
-}
-
-MODULE_DESCRIPTION("Packet writing layer for CD/DVD drives");
-MODULE_AUTHOR("Jens Axboe <axboe@suse.de>");
-MODULE_LICENSE("GPL");
-
-module_init(pkt_init);
-module_exit(pkt_exit);
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index dc9e4a14b885..8892f218a814 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -85,10 +85,14 @@ static void ps3disk_scatter_gather(struct ps3_storage_device *dev,
struct bio_vec bvec;
rq_for_each_segment(bvec, req, iter) {
+ dev_dbg(&dev->sbd.core, "%s:%u: %u sectors from %llu\n",
+ __func__, __LINE__, bio_sectors(iter.bio),
+ iter.bio->bi_iter.bi_sector);
if (gather)
memcpy_from_bvec(dev->bounce_buf + offset, &bvec);
else
memcpy_to_bvec(&bvec, dev->bounce_buf + offset);
+ offset += bvec.bv_len;
}
}
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index faafd7ff43d6..af0e21149dbc 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -7389,7 +7389,7 @@ static int __init rbd_init(void)
* The number of active work items is limited by the number of
* rbd devices * queue depth, so leave @max_active at default.
*/
- rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM, 0);
+ rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (!rbd_wq) {
rc = -ENOMEM;
goto err_out_slab;
diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c
index 15627417f12e..f1409e54010a 100644
--- a/drivers/block/rnbd/rnbd-clt.c
+++ b/drivers/block/rnbd/rnbd-clt.c
@@ -942,11 +942,11 @@ static void rnbd_client_release(struct gendisk *gen)
rnbd_clt_put_dev(dev);
}
-static int rnbd_client_getgeo(struct block_device *block_device,
+static int rnbd_client_getgeo(struct gendisk *disk,
struct hd_geometry *geo)
{
u64 size;
- struct rnbd_clt_dev *dev = block_device->bd_disk->private_data;
+ struct rnbd_clt_dev *dev = disk->private_data;
struct queue_limits *limit = &dev->queue->limits;
size = dev->size * (limit->logical_block_size / SECTOR_SIZE);
@@ -1809,7 +1809,7 @@ static int __init rnbd_client_init(void)
unregister_blkdev(rnbd_client_major, "rnbd");
return err;
}
- rnbd_clt_wq = alloc_workqueue("rnbd_clt_wq", 0, 0);
+ rnbd_clt_wq = alloc_workqueue("rnbd_clt_wq", WQ_PERCPU, 0);
if (!rnbd_clt_wq) {
pr_err("Failed to load module, alloc_workqueue failed.\n");
rnbd_clt_destroy_sysfs_files();
diff --git a/drivers/block/rnbd/rnbd-proto.h b/drivers/block/rnbd/rnbd-proto.h
index f35be51d213c..77360c2a6069 100644
--- a/drivers/block/rnbd/rnbd-proto.h
+++ b/drivers/block/rnbd/rnbd-proto.h
@@ -24,7 +24,7 @@
#define RTRS_PORT 1234
/**
- * enum rnbd_msg_types - RNBD message types
+ * enum rnbd_msg_type - RNBD message types
* @RNBD_MSG_SESS_INFO: initial session info from client to server
* @RNBD_MSG_SESS_INFO_RSP: initial session info from server to client
* @RNBD_MSG_OPEN: open (map) device request
@@ -47,10 +47,11 @@ enum rnbd_msg_type {
*/
struct rnbd_msg_hdr {
__le16 type;
+ /* private: */
__le16 __padding;
};
-/**
+/*
* We allow to map RO many times and RW only once. We allow to map yet another
* time RW, if MIGRATION is provided (second RW export can be required for
* example for VM migration)
@@ -78,6 +79,7 @@ static const __maybe_unused struct {
struct rnbd_msg_sess_info {
struct rnbd_msg_hdr hdr;
u8 ver;
+ /* private: */
u8 reserved[31];
};
@@ -89,6 +91,7 @@ struct rnbd_msg_sess_info {
struct rnbd_msg_sess_info_rsp {
struct rnbd_msg_hdr hdr;
u8 ver;
+ /* private: */
u8 reserved[31];
};
@@ -97,13 +100,16 @@ struct rnbd_msg_sess_info_rsp {
* @hdr: message header
* @access_mode: the mode to open remote device, valid values see:
* enum rnbd_access_mode
- * @device_name: device path on remote side
+ * @dev_name: device path on remote side
*/
struct rnbd_msg_open {
struct rnbd_msg_hdr hdr;
u8 access_mode;
+ /* private: */
u8 resv1;
+ /* public: */
s8 dev_name[NAME_MAX];
+ /* private: */
u8 reserved[3];
};
@@ -155,6 +161,7 @@ struct rnbd_msg_open_rsp {
__le16 secure_discard;
u8 obsolete_rotational;
u8 cache_policy;
+ /* private: */
u8 reserved[10];
};
@@ -187,7 +194,7 @@ struct rnbd_msg_io {
* @RNBD_OP_DISCARD: discard sectors
* @RNBD_OP_SECURE_ERASE: securely erase sectors
* @RNBD_OP_WRITE_ZEROES: write zeroes sectors
-
+ *
* @RNBD_F_SYNC: request is sync (sync write or read)
* @RNBD_F_FUA: forced unit access
*/
diff --git a/drivers/block/rnull.rs b/drivers/block/rnull.rs
deleted file mode 100644
index d07e76ae2c13..000000000000
--- a/drivers/block/rnull.rs
+++ /dev/null
@@ -1,80 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-//! This is a Rust implementation of the C null block driver.
-//!
-//! Supported features:
-//!
-//! - blk-mq interface
-//! - direct completion
-//! - block size 4k
-//!
-//! The driver is not configurable.
-
-use kernel::{
- alloc::flags,
- block::mq::{
- self,
- gen_disk::{self, GenDisk},
- Operations, TagSet,
- },
- error::Result,
- new_mutex, pr_info,
- prelude::*,
- sync::{Arc, Mutex},
- types::ARef,
-};
-
-module! {
- type: NullBlkModule,
- name: "rnull_mod",
- authors: ["Andreas Hindborg"],
- description: "Rust implementation of the C null block driver",
- license: "GPL v2",
-}
-
-#[pin_data]
-struct NullBlkModule {
- #[pin]
- _disk: Mutex<GenDisk<NullBlkDevice>>,
-}
-
-impl kernel::InPlaceModule for NullBlkModule {
- fn init(_module: &'static ThisModule) -> impl PinInit<Self, Error> {
- pr_info!("Rust null_blk loaded\n");
-
- // Use a immediately-called closure as a stable `try` block
- let disk = /* try */ (|| {
- let tagset = Arc::pin_init(TagSet::new(1, 256, 1), flags::GFP_KERNEL)?;
-
- gen_disk::GenDiskBuilder::new()
- .capacity_sectors(4096 << 11)
- .logical_block_size(4096)?
- .physical_block_size(4096)?
- .rotational(false)
- .build(format_args!("rnullb{}", 0), tagset)
- })();
-
- try_pin_init!(Self {
- _disk <- new_mutex!(disk?, "nullb:disk"),
- })
- }
-}
-
-struct NullBlkDevice;
-
-#[vtable]
-impl Operations for NullBlkDevice {
- #[inline(always)]
- fn queue_rq(rq: ARef<mq::Request<Self>>, _is_last: bool) -> Result {
- mq::Request::end_ok(rq)
- .map_err(|_e| kernel::error::code::EIO)
- // We take no refcounts on the request, so we expect to be able to
- // end the request. The request reference must be unique at this
- // point, and so `end_ok` cannot fail.
- .expect("Fatal error - expected to be able to end request");
-
- Ok(())
- }
-
- fn commit_rqs() {}
-}
diff --git a/drivers/block/rnull/Kconfig b/drivers/block/rnull/Kconfig
new file mode 100644
index 000000000000..7bc5b376c128
--- /dev/null
+++ b/drivers/block/rnull/Kconfig
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Rust null block device driver configuration
+
+config BLK_DEV_RUST_NULL
+ tristate "Rust null block driver (Experimental)"
+ depends on RUST && CONFIGFS_FS
+ help
+ This is the Rust implementation of the null block driver. Like
+ the C version, the driver allows the user to create virutal block
+ devices that can be configured via various configuration options.
+
+ If unsure, say N.
diff --git a/drivers/block/rnull/Makefile b/drivers/block/rnull/Makefile
new file mode 100644
index 000000000000..11cfa5e615dc
--- /dev/null
+++ b/drivers/block/rnull/Makefile
@@ -0,0 +1,3 @@
+
+obj-$(CONFIG_BLK_DEV_RUST_NULL) += rnull_mod.o
+rnull_mod-y := rnull.o
diff --git a/drivers/block/rnull/configfs.rs b/drivers/block/rnull/configfs.rs
new file mode 100644
index 000000000000..6713a6d92391
--- /dev/null
+++ b/drivers/block/rnull/configfs.rs
@@ -0,0 +1,263 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use super::{NullBlkDevice, THIS_MODULE};
+use kernel::{
+ block::mq::gen_disk::{GenDisk, GenDiskBuilder},
+ c_str,
+ configfs::{self, AttributeOperations},
+ configfs_attrs,
+ fmt::{self, Write as _},
+ new_mutex,
+ page::PAGE_SIZE,
+ prelude::*,
+ str::{kstrtobool_bytes, CString},
+ sync::Mutex,
+};
+use pin_init::PinInit;
+
+pub(crate) fn subsystem() -> impl PinInit<kernel::configfs::Subsystem<Config>, Error> {
+ let item_type = configfs_attrs! {
+ container: configfs::Subsystem<Config>,
+ data: Config,
+ child: DeviceConfig,
+ attributes: [
+ features: 0,
+ ],
+ };
+
+ kernel::configfs::Subsystem::new(c_str!("rnull"), item_type, try_pin_init!(Config {}))
+}
+
+#[pin_data]
+pub(crate) struct Config {}
+
+#[vtable]
+impl AttributeOperations<0> for Config {
+ type Data = Config;
+
+ fn show(_this: &Config, page: &mut [u8; PAGE_SIZE]) -> Result<usize> {
+ let mut writer = kernel::str::Formatter::new(page);
+ writer.write_str("blocksize,size,rotational,irqmode\n")?;
+ Ok(writer.bytes_written())
+ }
+}
+
+#[vtable]
+impl configfs::GroupOperations for Config {
+ type Child = DeviceConfig;
+
+ fn make_group(
+ &self,
+ name: &CStr,
+ ) -> Result<impl PinInit<configfs::Group<DeviceConfig>, Error>> {
+ let item_type = configfs_attrs! {
+ container: configfs::Group<DeviceConfig>,
+ data: DeviceConfig,
+ attributes: [
+ // Named for compatibility with C null_blk
+ power: 0,
+ blocksize: 1,
+ rotational: 2,
+ size: 3,
+ irqmode: 4,
+ ],
+ };
+
+ Ok(configfs::Group::new(
+ name.try_into()?,
+ item_type,
+ // TODO: cannot coerce new_mutex!() to impl PinInit<_, Error>, so put mutex inside
+ try_pin_init!( DeviceConfig {
+ data <- new_mutex!(DeviceConfigInner {
+ powered: false,
+ block_size: 4096,
+ rotational: false,
+ disk: None,
+ capacity_mib: 4096,
+ irq_mode: IRQMode::None,
+ name: name.try_into()?,
+ }),
+ }),
+ ))
+ }
+}
+
+#[derive(Debug, Clone, Copy)]
+pub(crate) enum IRQMode {
+ None,
+ Soft,
+}
+
+impl TryFrom<u8> for IRQMode {
+ type Error = kernel::error::Error;
+
+ fn try_from(value: u8) -> Result<Self> {
+ match value {
+ 0 => Ok(Self::None),
+ 1 => Ok(Self::Soft),
+ _ => Err(EINVAL),
+ }
+ }
+}
+
+impl fmt::Display for IRQMode {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ Self::None => f.write_str("0")?,
+ Self::Soft => f.write_str("1")?,
+ }
+ Ok(())
+ }
+}
+
+#[pin_data]
+pub(crate) struct DeviceConfig {
+ #[pin]
+ data: Mutex<DeviceConfigInner>,
+}
+
+#[pin_data]
+struct DeviceConfigInner {
+ powered: bool,
+ name: CString,
+ block_size: u32,
+ rotational: bool,
+ capacity_mib: u64,
+ irq_mode: IRQMode,
+ disk: Option<GenDisk<NullBlkDevice>>,
+}
+
+#[vtable]
+impl configfs::AttributeOperations<0> for DeviceConfig {
+ type Data = DeviceConfig;
+
+ fn show(this: &DeviceConfig, page: &mut [u8; PAGE_SIZE]) -> Result<usize> {
+ let mut writer = kernel::str::Formatter::new(page);
+
+ if this.data.lock().powered {
+ writer.write_str("1\n")?;
+ } else {
+ writer.write_str("0\n")?;
+ }
+
+ Ok(writer.bytes_written())
+ }
+
+ fn store(this: &DeviceConfig, page: &[u8]) -> Result {
+ let power_op = kstrtobool_bytes(page)?;
+ let mut guard = this.data.lock();
+
+ if !guard.powered && power_op {
+ guard.disk = Some(NullBlkDevice::new(
+ &guard.name,
+ guard.block_size,
+ guard.rotational,
+ guard.capacity_mib,
+ guard.irq_mode,
+ )?);
+ guard.powered = true;
+ } else if guard.powered && !power_op {
+ drop(guard.disk.take());
+ guard.powered = false;
+ }
+
+ Ok(())
+ }
+}
+
+#[vtable]
+impl configfs::AttributeOperations<1> for DeviceConfig {
+ type Data = DeviceConfig;
+
+ fn show(this: &DeviceConfig, page: &mut [u8; PAGE_SIZE]) -> Result<usize> {
+ let mut writer = kernel::str::Formatter::new(page);
+ writer.write_fmt(fmt!("{}\n", this.data.lock().block_size))?;
+ Ok(writer.bytes_written())
+ }
+
+ fn store(this: &DeviceConfig, page: &[u8]) -> Result {
+ if this.data.lock().powered {
+ return Err(EBUSY);
+ }
+
+ let text = core::str::from_utf8(page)?.trim();
+ let value = text.parse::<u32>().map_err(|_| EINVAL)?;
+
+ GenDiskBuilder::validate_block_size(value)?;
+ this.data.lock().block_size = value;
+ Ok(())
+ }
+}
+
+#[vtable]
+impl configfs::AttributeOperations<2> for DeviceConfig {
+ type Data = DeviceConfig;
+
+ fn show(this: &DeviceConfig, page: &mut [u8; PAGE_SIZE]) -> Result<usize> {
+ let mut writer = kernel::str::Formatter::new(page);
+
+ if this.data.lock().rotational {
+ writer.write_str("1\n")?;
+ } else {
+ writer.write_str("0\n")?;
+ }
+
+ Ok(writer.bytes_written())
+ }
+
+ fn store(this: &DeviceConfig, page: &[u8]) -> Result {
+ if this.data.lock().powered {
+ return Err(EBUSY);
+ }
+
+ this.data.lock().rotational = kstrtobool_bytes(page)?;
+
+ Ok(())
+ }
+}
+
+#[vtable]
+impl configfs::AttributeOperations<3> for DeviceConfig {
+ type Data = DeviceConfig;
+
+ fn show(this: &DeviceConfig, page: &mut [u8; PAGE_SIZE]) -> Result<usize> {
+ let mut writer = kernel::str::Formatter::new(page);
+ writer.write_fmt(fmt!("{}\n", this.data.lock().capacity_mib))?;
+ Ok(writer.bytes_written())
+ }
+
+ fn store(this: &DeviceConfig, page: &[u8]) -> Result {
+ if this.data.lock().powered {
+ return Err(EBUSY);
+ }
+
+ let text = core::str::from_utf8(page)?.trim();
+ let value = text.parse::<u64>().map_err(|_| EINVAL)?;
+
+ this.data.lock().capacity_mib = value;
+ Ok(())
+ }
+}
+
+#[vtable]
+impl configfs::AttributeOperations<4> for DeviceConfig {
+ type Data = DeviceConfig;
+
+ fn show(this: &DeviceConfig, page: &mut [u8; PAGE_SIZE]) -> Result<usize> {
+ let mut writer = kernel::str::Formatter::new(page);
+ writer.write_fmt(fmt!("{}\n", this.data.lock().irq_mode))?;
+ Ok(writer.bytes_written())
+ }
+
+ fn store(this: &DeviceConfig, page: &[u8]) -> Result {
+ if this.data.lock().powered {
+ return Err(EBUSY);
+ }
+
+ let text = core::str::from_utf8(page)?.trim();
+ let value = text.parse::<u8>().map_err(|_| EINVAL)?;
+
+ this.data.lock().irq_mode = IRQMode::try_from(value)?;
+ Ok(())
+ }
+}
diff --git a/drivers/block/rnull/rnull.rs b/drivers/block/rnull/rnull.rs
new file mode 100644
index 000000000000..a9d5e575a2c4
--- /dev/null
+++ b/drivers/block/rnull/rnull.rs
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! This is a Rust implementation of the C null block driver.
+
+mod configfs;
+
+use configfs::IRQMode;
+use kernel::{
+ block::{
+ self,
+ mq::{
+ self,
+ gen_disk::{self, GenDisk},
+ Operations, TagSet,
+ },
+ },
+ error::Result,
+ pr_info,
+ prelude::*,
+ sync::{aref::ARef, Arc},
+};
+use pin_init::PinInit;
+
+module! {
+ type: NullBlkModule,
+ name: "rnull_mod",
+ authors: ["Andreas Hindborg"],
+ description: "Rust implementation of the C null block driver",
+ license: "GPL v2",
+}
+
+#[pin_data]
+struct NullBlkModule {
+ #[pin]
+ configfs_subsystem: kernel::configfs::Subsystem<configfs::Config>,
+}
+
+impl kernel::InPlaceModule for NullBlkModule {
+ fn init(_module: &'static ThisModule) -> impl PinInit<Self, Error> {
+ pr_info!("Rust null_blk loaded\n");
+
+ try_pin_init!(Self {
+ configfs_subsystem <- configfs::subsystem(),
+ })
+ }
+}
+
+struct NullBlkDevice;
+
+impl NullBlkDevice {
+ fn new(
+ name: &CStr,
+ block_size: u32,
+ rotational: bool,
+ capacity_mib: u64,
+ irq_mode: IRQMode,
+ ) -> Result<GenDisk<Self>> {
+ let tagset = Arc::pin_init(TagSet::new(1, 256, 1), GFP_KERNEL)?;
+
+ let queue_data = Box::new(QueueData { irq_mode }, GFP_KERNEL)?;
+
+ gen_disk::GenDiskBuilder::new()
+ .capacity_sectors(capacity_mib << (20 - block::SECTOR_SHIFT))
+ .logical_block_size(block_size)?
+ .physical_block_size(block_size)?
+ .rotational(rotational)
+ .build(fmt!("{}", name.to_str()?), tagset, queue_data)
+ }
+}
+
+struct QueueData {
+ irq_mode: IRQMode,
+}
+
+#[vtable]
+impl Operations for NullBlkDevice {
+ type QueueData = KBox<QueueData>;
+
+ #[inline(always)]
+ fn queue_rq(queue_data: &QueueData, rq: ARef<mq::Request<Self>>, _is_last: bool) -> Result {
+ match queue_data.irq_mode {
+ IRQMode::None => mq::Request::end_ok(rq)
+ .map_err(|_e| kernel::error::code::EIO)
+ // We take no refcounts on the request, so we expect to be able to
+ // end the request. The request reference must be unique at this
+ // point, and so `end_ok` cannot fail.
+ .expect("Fatal error - expected to be able to end request"),
+ IRQMode::Soft => mq::Request::complete(rq),
+ }
+ Ok(())
+ }
+
+ fn commit_rqs(_queue_data: &QueueData) {}
+
+ fn complete(rq: ARef<mq::Request<Self>>) {
+ mq::Request::end_ok(rq)
+ .map_err(|_e| kernel::error::code::EIO)
+ // We take no refcounts on the request, so we expect to be able to
+ // end the request. The request reference must be unique at this
+ // point, and so `end_ok` cannot fail.
+ .expect("Fatal error - expected to be able to end request");
+ }
+}
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index b5727dea15bd..db1fe9772a4d 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -119,9 +119,8 @@ static inline u32 vdc_tx_dring_avail(struct vio_dring_state *dr)
return vio_dring_avail(dr, VDC_TX_RING_SIZE);
}
-static int vdc_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+static int vdc_getgeo(struct gendisk *disk, struct hd_geometry *geo)
{
- struct gendisk *disk = bdev->bd_disk;
sector_t nsect = get_capacity(disk);
sector_t cylinders = nsect;
@@ -957,8 +956,10 @@ static bool vdc_port_mpgroup_check(struct vio_dev *vdev)
dev = device_find_child(vdev->dev.parent, &port_data,
vdc_device_probed);
- if (dev)
+ if (dev) {
+ put_device(dev);
return true;
+ }
return false;
}
@@ -1187,7 +1188,7 @@ static void vdc_ldc_reset(struct vdc_port *port)
}
if (port->ldc_timeout)
- mod_delayed_work(system_wq, &port->ldc_reset_timer_work,
+ mod_delayed_work(system_percpu_wq, &port->ldc_reset_timer_work,
round_jiffies(jiffies + HZ * port->ldc_timeout));
mod_timer(&port->vio.timer, round_jiffies(jiffies + HZ));
return;
@@ -1215,7 +1216,7 @@ static int __init vdc_init(void)
{
int err;
- sunvdc_wq = alloc_workqueue("sunvdc", 0, 0);
+ sunvdc_wq = alloc_workqueue("sunvdc", WQ_PERCPU, 0);
if (!sunvdc_wq)
return -ENOMEM;
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index eda33c5eb5e2..416015947ae6 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -711,9 +711,9 @@ static int floppy_ioctl(struct block_device *bdev, blk_mode_t mode,
return -ENOTTY;
}
-static int floppy_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+static int floppy_getgeo(struct gendisk *disk, struct hd_geometry *geo)
{
- struct floppy_state *fs = bdev->bd_disk->private_data;
+ struct floppy_state *fs = disk->private_data;
struct floppy_struct *g;
int ret;
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index c637ea010d34..2c715df63f23 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -48,6 +48,8 @@
#define UBLK_MINORS (1U << MINORBITS)
+#define UBLK_INVALID_BUF_IDX ((u16)-1)
+
/* private ioctl command mirror */
#define UBLK_CMD_DEL_DEV_ASYNC _IOC_NR(UBLK_U_CMD_DEL_DEV_ASYNC)
#define UBLK_CMD_UPDATE_SIZE _IOC_NR(UBLK_U_CMD_UPDATE_SIZE)
@@ -70,7 +72,8 @@
| UBLK_F_UPDATE_SIZE \
| UBLK_F_AUTO_BUF_REG \
| UBLK_F_QUIESCE \
- | UBLK_F_PER_IO_DAEMON)
+ | UBLK_F_PER_IO_DAEMON \
+ | UBLK_F_BUF_REG_OFF_DAEMON)
#define UBLK_F_ALL_RECOVERY_FLAGS (UBLK_F_USER_RECOVERY \
| UBLK_F_USER_RECOVERY_REISSUE \
@@ -82,14 +85,6 @@
UBLK_PARAM_TYPE_DEVT | UBLK_PARAM_TYPE_ZONED | \
UBLK_PARAM_TYPE_DMA_ALIGN | UBLK_PARAM_TYPE_SEGMENT)
-struct ublk_rq_data {
- refcount_t ref;
-
- /* for auto-unregister buffer in case of UBLK_F_AUTO_BUF_REG */
- u16 buf_index;
- void *buf_ctx_handle;
-};
-
struct ublk_uring_cmd_pdu {
/*
* Store requests in same batch temporarily for queuing them to
@@ -110,8 +105,6 @@ struct ublk_uring_cmd_pdu {
*/
struct ublk_queue *ubq;
- struct ublk_auto_buf_reg buf;
-
u16 tag;
};
@@ -155,9 +148,20 @@ struct ublk_uring_cmd_pdu {
/* atomic RW with ubq->cancel_lock */
#define UBLK_IO_FLAG_CANCELED 0x80000000
-struct ublk_io {
- /* userspace buffer address from io cmd */
+/*
+ * Initialize refcount to a large number to include any registered buffers.
+ * UBLK_IO_COMMIT_AND_FETCH_REQ will release these references minus those for
+ * any buffers registered on the io daemon task.
+ */
+#define UBLK_REFCOUNT_INIT (REFCOUNT_MAX / 2)
+
+union ublk_io_buf {
__u64 addr;
+ struct ublk_auto_buf_reg auto_reg;
+};
+
+struct ublk_io {
+ union ublk_io_buf buf;
unsigned int flags;
int res;
@@ -169,7 +173,24 @@ struct ublk_io {
};
struct task_struct *task;
-};
+
+ /*
+ * The number of uses of this I/O by the ublk server
+ * if user copy or zero copy are enabled:
+ * - UBLK_REFCOUNT_INIT from dispatch to the server
+ * until UBLK_IO_COMMIT_AND_FETCH_REQ
+ * - 1 for each inflight ublk_ch_{read,write}_iter() call
+ * - 1 for each io_uring registered buffer not registered on task
+ * The I/O can only be completed once all references are dropped.
+ * User copy and buffer registration operations are only permitted
+ * if the reference count is nonzero.
+ */
+ refcount_t ref;
+ /* Count of buffers registered on task and not yet unregistered */
+ unsigned task_registered_buffers;
+
+ void *buf_ctx_handle;
+} ____cacheline_aligned_in_smp;
struct ublk_queue {
int q_id;
@@ -181,18 +202,14 @@ struct ublk_queue {
bool force_abort;
bool canceling;
bool fail_io; /* copy of dev->state == UBLK_S_DEV_FAIL_IO */
- unsigned short nr_io_ready; /* how many ios setup */
spinlock_t cancel_lock;
struct ublk_device *dev;
- struct ublk_io ios[];
+ struct ublk_io ios[] __counted_by(q_depth);
};
struct ublk_device {
struct gendisk *ub_disk;
- char *__queues;
-
- unsigned int queue_size;
struct ublksrv_ctrl_dev_info dev_info;
struct blk_mq_tag_set tag_set;
@@ -214,8 +231,14 @@ struct ublk_device {
struct ublk_params params;
struct completion completion;
- unsigned int nr_queues_ready;
- unsigned int nr_privileged_daemon;
+ u32 nr_io_ready;
+ bool unprivileged_daemons;
+ struct mutex cancel_mutex;
+ bool canceling;
+ pid_t ublksrv_tgid;
+ struct delayed_work exit_work;
+
+ struct ublk_queue *queues[];
};
/* header of ublk_params */
@@ -228,7 +251,7 @@ static void ublk_io_release(void *priv);
static void ublk_stop_dev_unlocked(struct ublk_device *ub);
static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq);
static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub,
- const struct ublk_queue *ubq, int tag, size_t offset);
+ u16 q_id, u16 tag, struct ublk_io *io, size_t offset);
static inline unsigned int ublk_req_build_flags(struct request *req);
static inline struct ublksrv_io_desc *
@@ -242,7 +265,7 @@ static inline bool ublk_dev_is_zoned(const struct ublk_device *ub)
return ub->dev_info.flags & UBLK_F_ZONED;
}
-static inline bool ublk_queue_is_zoned(struct ublk_queue *ubq)
+static inline bool ublk_queue_is_zoned(const struct ublk_queue *ubq)
{
return ubq->flags & UBLK_F_ZONED;
}
@@ -345,7 +368,7 @@ static void *ublk_alloc_report_buffer(struct ublk_device *ublk,
}
static int ublk_report_zones(struct gendisk *disk, sector_t sector,
- unsigned int nr_zones, report_zones_cb cb, void *data)
+ unsigned int nr_zones, struct blk_report_zones_args *args)
{
struct ublk_device *ub = disk->private_data;
unsigned int zone_size_sectors = disk->queue->limits.chunk_sectors;
@@ -408,7 +431,7 @@ free_req:
if (!zone->len)
break;
- ret = cb(zone, i, data);
+ ret = disk_report_zone(disk, zone, i, args);
if (ret)
goto out;
@@ -476,7 +499,7 @@ static blk_status_t ublk_setup_iod_zoned(struct ublk_queue *ubq,
iod->op_flags = ublk_op | ublk_req_build_flags(req);
iod->nr_sectors = blk_rq_sectors(req);
iod->start_sector = blk_rq_pos(req);
- iod->addr = io->addr;
+ iod->addr = io->buf.addr;
return BLK_STS_OK;
}
@@ -507,7 +530,8 @@ static blk_status_t ublk_setup_iod_zoned(struct ublk_queue *ubq,
#endif
-static inline void __ublk_complete_rq(struct request *req);
+static inline void __ublk_complete_rq(struct request *req, struct ublk_io *io,
+ bool need_map);
static dev_t ublk_chr_devt;
static const struct class ublk_chr_class = {
@@ -639,22 +663,44 @@ static inline bool ublk_support_zero_copy(const struct ublk_queue *ubq)
return ubq->flags & UBLK_F_SUPPORT_ZERO_COPY;
}
+static inline bool ublk_dev_support_zero_copy(const struct ublk_device *ub)
+{
+ return ub->dev_info.flags & UBLK_F_SUPPORT_ZERO_COPY;
+}
+
static inline bool ublk_support_auto_buf_reg(const struct ublk_queue *ubq)
{
return ubq->flags & UBLK_F_AUTO_BUF_REG;
}
+static inline bool ublk_dev_support_auto_buf_reg(const struct ublk_device *ub)
+{
+ return ub->dev_info.flags & UBLK_F_AUTO_BUF_REG;
+}
+
static inline bool ublk_support_user_copy(const struct ublk_queue *ubq)
{
return ubq->flags & UBLK_F_USER_COPY;
}
+static inline bool ublk_dev_support_user_copy(const struct ublk_device *ub)
+{
+ return ub->dev_info.flags & UBLK_F_USER_COPY;
+}
+
static inline bool ublk_need_map_io(const struct ublk_queue *ubq)
{
return !ublk_support_user_copy(ubq) && !ublk_support_zero_copy(ubq) &&
!ublk_support_auto_buf_reg(ubq);
}
+static inline bool ublk_dev_need_map_io(const struct ublk_device *ub)
+{
+ return !ublk_dev_support_user_copy(ub) &&
+ !ublk_dev_support_zero_copy(ub) &&
+ !ublk_dev_support_auto_buf_reg(ub);
+}
+
static inline bool ublk_need_req_ref(const struct ublk_queue *ubq)
{
/*
@@ -672,39 +718,40 @@ static inline bool ublk_need_req_ref(const struct ublk_queue *ubq)
ublk_support_auto_buf_reg(ubq);
}
-static inline void ublk_init_req_ref(const struct ublk_queue *ubq,
- struct request *req)
+static inline bool ublk_dev_need_req_ref(const struct ublk_device *ub)
{
- if (ublk_need_req_ref(ubq)) {
- struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
+ return ublk_dev_support_user_copy(ub) ||
+ ublk_dev_support_zero_copy(ub) ||
+ ublk_dev_support_auto_buf_reg(ub);
+}
- refcount_set(&data->ref, 1);
- }
+static inline void ublk_init_req_ref(const struct ublk_queue *ubq,
+ struct ublk_io *io)
+{
+ if (ublk_need_req_ref(ubq))
+ refcount_set(&io->ref, UBLK_REFCOUNT_INIT);
}
-static inline bool ublk_get_req_ref(const struct ublk_queue *ubq,
- struct request *req)
+static inline bool ublk_get_req_ref(struct ublk_io *io)
{
- if (ublk_need_req_ref(ubq)) {
- struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
+ return refcount_inc_not_zero(&io->ref);
+}
- return refcount_inc_not_zero(&data->ref);
- }
+static inline void ublk_put_req_ref(struct ublk_io *io, struct request *req)
+{
+ if (!refcount_dec_and_test(&io->ref))
+ return;
- return true;
+ /* ublk_need_map_io() and ublk_need_req_ref() are mutually exclusive */
+ __ublk_complete_rq(req, io, false);
}
-static inline void ublk_put_req_ref(const struct ublk_queue *ubq,
- struct request *req)
+static inline bool ublk_sub_req_ref(struct ublk_io *io)
{
- if (ublk_need_req_ref(ubq)) {
- struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
+ unsigned sub_refs = UBLK_REFCOUNT_INIT - io->task_registered_buffers;
- if (refcount_dec_and_test(&data->ref))
- __ublk_complete_rq(req);
- } else {
- __ublk_complete_rq(req);
- }
+ io->task_registered_buffers = 0;
+ return refcount_sub_and_test(sub_refs, &io->ref);
}
static inline bool ublk_need_get_data(const struct ublk_queue *ubq)
@@ -712,6 +759,11 @@ static inline bool ublk_need_get_data(const struct ublk_queue *ubq)
return ubq->flags & UBLK_F_NEED_GET_DATA;
}
+static inline bool ublk_dev_need_get_data(const struct ublk_device *ub)
+{
+ return ub->dev_info.flags & UBLK_F_NEED_GET_DATA;
+}
+
/* Called in slow path only, keep it noinline for trace purpose */
static noinline struct ublk_device *ublk_get_device(struct ublk_device *ub)
{
@@ -729,7 +781,7 @@ static noinline void ublk_put_device(struct ublk_device *ub)
static inline struct ublk_queue *ublk_get_queue(struct ublk_device *dev,
int qid)
{
- return (struct ublk_queue *)&(dev->__queues[qid * dev->queue_size]);
+ return dev->queues[qid];
}
static inline bool ublk_rq_has_data(const struct request *rq)
@@ -748,11 +800,9 @@ static inline int __ublk_queue_cmd_buf_size(int depth)
return round_up(depth * sizeof(struct ublksrv_io_desc), PAGE_SIZE);
}
-static inline int ublk_queue_cmd_buf_size(struct ublk_device *ub, int q_id)
+static inline int ublk_queue_cmd_buf_size(struct ublk_device *ub)
{
- struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
-
- return __ublk_queue_cmd_buf_size(ubq->q_depth);
+ return __ublk_queue_cmd_buf_size(ub->dev_info.queue_depth);
}
static int ublk_max_cmd_buf_size(void)
@@ -864,73 +914,6 @@ static const struct block_device_operations ub_fops = {
.report_zones = ublk_report_zones,
};
-#define UBLK_MAX_PIN_PAGES 32
-
-struct ublk_io_iter {
- struct page *pages[UBLK_MAX_PIN_PAGES];
- struct bio *bio;
- struct bvec_iter iter;
-};
-
-/* return how many pages are copied */
-static void ublk_copy_io_pages(struct ublk_io_iter *data,
- size_t total, size_t pg_off, int dir)
-{
- unsigned done = 0;
- unsigned pg_idx = 0;
-
- while (done < total) {
- struct bio_vec bv = bio_iter_iovec(data->bio, data->iter);
- unsigned int bytes = min3(bv.bv_len, (unsigned)total - done,
- (unsigned)(PAGE_SIZE - pg_off));
- void *bv_buf = bvec_kmap_local(&bv);
- void *pg_buf = kmap_local_page(data->pages[pg_idx]);
-
- if (dir == ITER_DEST)
- memcpy(pg_buf + pg_off, bv_buf, bytes);
- else
- memcpy(bv_buf, pg_buf + pg_off, bytes);
-
- kunmap_local(pg_buf);
- kunmap_local(bv_buf);
-
- /* advance page array */
- pg_off += bytes;
- if (pg_off == PAGE_SIZE) {
- pg_idx += 1;
- pg_off = 0;
- }
-
- done += bytes;
-
- /* advance bio */
- bio_advance_iter_single(data->bio, &data->iter, bytes);
- if (!data->iter.bi_size) {
- data->bio = data->bio->bi_next;
- if (data->bio == NULL)
- break;
- data->iter = data->bio->bi_iter;
- }
- }
-}
-
-static bool ublk_advance_io_iter(const struct request *req,
- struct ublk_io_iter *iter, unsigned int offset)
-{
- struct bio *bio = req->bio;
-
- for_each_bio(bio) {
- if (bio->bi_iter.bi_size > offset) {
- iter->bio = bio;
- iter->iter = bio->bi_iter;
- bio_advance_iter(iter->bio, &iter->iter, offset);
- return true;
- }
- offset -= bio->bi_iter.bi_size;
- }
- return false;
-}
-
/*
* Copy data between request pages and io_iter, and 'offset'
* is the start point of linear offset of request.
@@ -938,34 +921,35 @@ static bool ublk_advance_io_iter(const struct request *req,
static size_t ublk_copy_user_pages(const struct request *req,
unsigned offset, struct iov_iter *uiter, int dir)
{
- struct ublk_io_iter iter;
+ struct req_iterator iter;
+ struct bio_vec bv;
size_t done = 0;
- if (!ublk_advance_io_iter(req, &iter, offset))
- return 0;
-
- while (iov_iter_count(uiter) && iter.bio) {
- unsigned nr_pages;
- ssize_t len;
- size_t off;
- int i;
+ rq_for_each_segment(bv, req, iter) {
+ void *bv_buf;
+ size_t copied;
- len = iov_iter_get_pages2(uiter, iter.pages,
- iov_iter_count(uiter),
- UBLK_MAX_PIN_PAGES, &off);
- if (len <= 0)
- return done;
-
- ublk_copy_io_pages(&iter, len, off, dir);
- nr_pages = DIV_ROUND_UP(len + off, PAGE_SIZE);
- for (i = 0; i < nr_pages; i++) {
- if (dir == ITER_DEST)
- set_page_dirty(iter.pages[i]);
- put_page(iter.pages[i]);
+ if (offset >= bv.bv_len) {
+ offset -= bv.bv_len;
+ continue;
}
- done += len;
- }
+ bv.bv_offset += offset;
+ bv.bv_len -= offset;
+ bv_buf = bvec_kmap_local(&bv);
+ if (dir == ITER_DEST)
+ copied = copy_to_iter(bv_buf, bv.bv_len, uiter);
+ else
+ copied = copy_from_iter(bv_buf, bv.bv_len, uiter);
+
+ kunmap_local(bv_buf);
+
+ done += copied;
+ if (copied < bv.bv_len)
+ break;
+
+ offset = 0;
+ }
return done;
}
@@ -980,8 +964,9 @@ static inline bool ublk_need_unmap_req(const struct request *req)
(req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_DRV_IN);
}
-static int ublk_map_io(const struct ublk_queue *ubq, const struct request *req,
- struct ublk_io *io)
+static unsigned int ublk_map_io(const struct ublk_queue *ubq,
+ const struct request *req,
+ const struct ublk_io *io)
{
const unsigned int rq_bytes = blk_rq_bytes(req);
@@ -997,19 +982,19 @@ static int ublk_map_io(const struct ublk_queue *ubq, const struct request *req,
struct iov_iter iter;
const int dir = ITER_DEST;
- import_ubuf(dir, u64_to_user_ptr(io->addr), rq_bytes, &iter);
+ import_ubuf(dir, u64_to_user_ptr(io->buf.addr), rq_bytes, &iter);
return ublk_copy_user_pages(req, 0, &iter, dir);
}
return rq_bytes;
}
-static int ublk_unmap_io(const struct ublk_queue *ubq,
+static unsigned int ublk_unmap_io(bool need_map,
const struct request *req,
- struct ublk_io *io)
+ const struct ublk_io *io)
{
const unsigned int rq_bytes = blk_rq_bytes(req);
- if (!ublk_need_map_io(ubq))
+ if (!need_map)
return rq_bytes;
if (ublk_need_unmap_req(req)) {
@@ -1018,7 +1003,7 @@ static int ublk_unmap_io(const struct ublk_queue *ubq,
WARN_ON_ONCE(io->res > rq_bytes);
- import_ubuf(dir, u64_to_user_ptr(io->addr), io->res, &iter);
+ import_ubuf(dir, u64_to_user_ptr(io->buf.addr), io->res, &iter);
return ublk_copy_user_pages(req, 0, &iter, dir);
}
return rq_bytes;
@@ -1056,13 +1041,8 @@ static blk_status_t ublk_setup_iod(struct ublk_queue *ubq, struct request *req)
{
struct ublksrv_io_desc *iod = ublk_get_iod(ubq, req->tag);
struct ublk_io *io = &ubq->ios[req->tag];
- enum req_op op = req_op(req);
u32 ublk_op;
- if (!ublk_queue_is_zoned(ubq) &&
- (op_is_zone_mgmt(op) || op == REQ_OP_ZONE_APPEND))
- return BLK_STS_IOERR;
-
switch (req_op(req)) {
case REQ_OP_READ:
ublk_op = UBLK_IO_OP_READ;
@@ -1089,7 +1069,7 @@ static blk_status_t ublk_setup_iod(struct ublk_queue *ubq, struct request *req)
iod->op_flags = ublk_op | ublk_req_build_flags(req);
iod->nr_sectors = blk_rq_sectors(req);
iod->start_sector = blk_rq_pos(req);
- iod->addr = io->addr;
+ iod->addr = io->buf.addr;
return BLK_STS_OK;
}
@@ -1101,10 +1081,9 @@ static inline struct ublk_uring_cmd_pdu *ublk_get_uring_cmd_pdu(
}
/* todo: handle partial completion */
-static inline void __ublk_complete_rq(struct request *req)
+static inline void __ublk_complete_rq(struct request *req, struct ublk_io *io,
+ bool need_map)
{
- struct ublk_queue *ubq = req->mq_hctx->driver_data;
- struct ublk_io *io = &ubq->ios[req->tag];
unsigned int unmapped_bytes;
blk_status_t res = BLK_STS_OK;
@@ -1128,7 +1107,7 @@ static inline void __ublk_complete_rq(struct request *req)
goto exit;
/* for READ request, writing data in iod->addr to rq buffers */
- unmapped_bytes = ublk_unmap_io(ubq, req, io);
+ unmapped_bytes = ublk_unmap_io(need_map, req, io);
/*
* Extremely impossible since we got data filled in just before
@@ -1140,7 +1119,7 @@ static inline void __ublk_complete_rq(struct request *req)
if (blk_update_request(req, BLK_STS_OK, io->res))
blk_mq_requeue_request(req, true);
- else
+ else if (likely(!blk_should_fake_timeout(req->q)))
__blk_mq_end_request(req, BLK_STS_OK);
return;
@@ -1148,8 +1127,8 @@ exit:
blk_mq_end_request(req, res);
}
-static void ublk_complete_io_cmd(struct ublk_io *io, struct request *req,
- int res, unsigned issue_flags)
+static struct io_uring_cmd *__ublk_prep_compl_io_cmd(struct ublk_io *io,
+ struct request *req)
{
/* read cmd first because req will overwrite it */
struct io_uring_cmd *cmd = io->cmd;
@@ -1164,9 +1143,16 @@ static void ublk_complete_io_cmd(struct ublk_io *io, struct request *req,
io->flags &= ~UBLK_IO_FLAG_ACTIVE;
io->req = req;
+ return cmd;
+}
+
+static void ublk_complete_io_cmd(struct ublk_io *io, struct request *req,
+ int res, unsigned issue_flags)
+{
+ struct io_uring_cmd *cmd = __ublk_prep_compl_io_cmd(io, req);
/* tell ublksrv one io request is coming */
- io_uring_cmd_done(cmd, res, 0, issue_flags);
+ io_uring_cmd_done(cmd, res, issue_flags);
}
#define UBLK_REQUEUE_DELAY_MS 3
@@ -1181,52 +1167,66 @@ static inline void __ublk_abort_rq(struct ublk_queue *ubq,
blk_mq_end_request(rq, BLK_STS_IOERR);
}
-static void ublk_auto_buf_reg_fallback(struct request *req)
+static void
+ublk_auto_buf_reg_fallback(const struct ublk_queue *ubq, unsigned tag)
{
- const struct ublk_queue *ubq = req->mq_hctx->driver_data;
- struct ublksrv_io_desc *iod = ublk_get_iod(ubq, req->tag);
- struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
+ struct ublksrv_io_desc *iod = ublk_get_iod(ubq, tag);
iod->op_flags |= UBLK_IO_F_NEED_REG_BUF;
- refcount_set(&data->ref, 1);
}
-static bool ublk_auto_buf_reg(struct request *req, struct ublk_io *io,
- unsigned int issue_flags)
+enum auto_buf_reg_res {
+ AUTO_BUF_REG_FAIL,
+ AUTO_BUF_REG_FALLBACK,
+ AUTO_BUF_REG_OK,
+};
+
+static void ublk_prep_auto_buf_reg_io(const struct ublk_queue *ubq,
+ struct request *req, struct ublk_io *io,
+ struct io_uring_cmd *cmd,
+ enum auto_buf_reg_res res)
+{
+ if (res == AUTO_BUF_REG_OK) {
+ io->task_registered_buffers = 1;
+ io->buf_ctx_handle = io_uring_cmd_ctx_handle(cmd);
+ io->flags |= UBLK_IO_FLAG_AUTO_BUF_REG;
+ }
+ ublk_init_req_ref(ubq, io);
+ __ublk_prep_compl_io_cmd(io, req);
+}
+
+static enum auto_buf_reg_res
+__ublk_do_auto_buf_reg(const struct ublk_queue *ubq, struct request *req,
+ struct ublk_io *io, struct io_uring_cmd *cmd,
+ unsigned int issue_flags)
{
- struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(io->cmd);
- struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
int ret;
- ret = io_buffer_register_bvec(io->cmd, req, ublk_io_release,
- pdu->buf.index, issue_flags);
+ ret = io_buffer_register_bvec(cmd, req, ublk_io_release,
+ io->buf.auto_reg.index, issue_flags);
if (ret) {
- if (pdu->buf.flags & UBLK_AUTO_BUF_REG_FALLBACK) {
- ublk_auto_buf_reg_fallback(req);
- return true;
+ if (io->buf.auto_reg.flags & UBLK_AUTO_BUF_REG_FALLBACK) {
+ ublk_auto_buf_reg_fallback(ubq, req->tag);
+ return AUTO_BUF_REG_FALLBACK;
}
blk_mq_end_request(req, BLK_STS_IOERR);
- return false;
+ return AUTO_BUF_REG_FAIL;
}
- /* one extra reference is dropped by ublk_io_release */
- refcount_set(&data->ref, 2);
- data->buf_ctx_handle = io_uring_cmd_ctx_handle(io->cmd);
- /* store buffer index in request payload */
- data->buf_index = pdu->buf.index;
- io->flags |= UBLK_IO_FLAG_AUTO_BUF_REG;
- return true;
+ return AUTO_BUF_REG_OK;
}
-static bool ublk_prep_auto_buf_reg(struct ublk_queue *ubq,
- struct request *req, struct ublk_io *io,
- unsigned int issue_flags)
+static void ublk_do_auto_buf_reg(const struct ublk_queue *ubq, struct request *req,
+ struct ublk_io *io, struct io_uring_cmd *cmd,
+ unsigned int issue_flags)
{
- if (ublk_support_auto_buf_reg(ubq) && ublk_rq_has_data(req))
- return ublk_auto_buf_reg(req, io, issue_flags);
+ enum auto_buf_reg_res res = __ublk_do_auto_buf_reg(ubq, req, io, cmd,
+ issue_flags);
- ublk_init_req_ref(ubq, req);
- return true;
+ if (res != AUTO_BUF_REG_FAIL) {
+ ublk_prep_auto_buf_reg_io(ubq, req, io, cmd, res);
+ io_uring_cmd_done(cmd, UBLK_IO_RES_OK, issue_flags);
+ }
}
static bool ublk_start_io(const struct ublk_queue *ubq, struct request *req,
@@ -1257,10 +1257,9 @@ static bool ublk_start_io(const struct ublk_queue *ubq, struct request *req,
return true;
}
-static void ublk_dispatch_req(struct ublk_queue *ubq,
- struct request *req,
- unsigned int issue_flags)
+static void ublk_dispatch_req(struct ublk_queue *ubq, struct request *req)
{
+ unsigned int issue_flags = IO_URING_CMD_TASK_WORK_ISSUE_FLAGS;
int tag = req->tag;
struct ublk_io *io = &ubq->ios[tag];
@@ -1299,17 +1298,21 @@ static void ublk_dispatch_req(struct ublk_queue *ubq,
if (!ublk_start_io(ubq, req, io))
return;
- if (ublk_prep_auto_buf_reg(ubq, req, io, issue_flags))
+ if (ublk_support_auto_buf_reg(ubq) && ublk_rq_has_data(req)) {
+ ublk_do_auto_buf_reg(ubq, req, io, io->cmd, issue_flags);
+ } else {
+ ublk_init_req_ref(ubq, io);
ublk_complete_io_cmd(io, req, UBLK_IO_RES_OK, issue_flags);
+ }
}
-static void ublk_cmd_tw_cb(struct io_uring_cmd *cmd,
- unsigned int issue_flags)
+static void ublk_cmd_tw_cb(struct io_tw_req tw_req, io_tw_token_t tw)
{
+ struct io_uring_cmd *cmd = io_uring_cmd_from_tw(tw_req);
struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
struct ublk_queue *ubq = pdu->ubq;
- ublk_dispatch_req(ubq, pdu->req, issue_flags);
+ ublk_dispatch_req(ubq, pdu->req);
}
static void ublk_queue_cmd(struct ublk_queue *ubq, struct request *rq)
@@ -1321,9 +1324,9 @@ static void ublk_queue_cmd(struct ublk_queue *ubq, struct request *rq)
io_uring_cmd_complete_in_task(cmd, ublk_cmd_tw_cb);
}
-static void ublk_cmd_list_tw_cb(struct io_uring_cmd *cmd,
- unsigned int issue_flags)
+static void ublk_cmd_list_tw_cb(struct io_tw_req tw_req, io_tw_token_t tw)
{
+ struct io_uring_cmd *cmd = io_uring_cmd_from_tw(tw_req);
struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
struct request *rq = pdu->req_list;
struct request *next;
@@ -1331,7 +1334,7 @@ static void ublk_cmd_list_tw_cb(struct io_uring_cmd *cmd,
do {
next = rq->rq_next;
rq->rq_next = NULL;
- ublk_dispatch_req(rq->mq_hctx->driver_data, rq, issue_flags);
+ ublk_dispatch_req(rq->mq_hctx->driver_data, rq);
rq = next;
} while (rq);
}
@@ -1349,14 +1352,23 @@ static void ublk_queue_cmd_list(struct ublk_io *io, struct rq_list *l)
static enum blk_eh_timer_return ublk_timeout(struct request *rq)
{
struct ublk_queue *ubq = rq->mq_hctx->driver_data;
- struct ublk_io *io = &ubq->ios[rq->tag];
+ pid_t tgid = ubq->dev->ublksrv_tgid;
+ struct task_struct *p;
+ struct pid *pid;
- if (ubq->flags & UBLK_F_UNPRIVILEGED_DEV) {
- send_sig(SIGKILL, io->task, 0);
- return BLK_EH_DONE;
- }
+ if (!(ubq->flags & UBLK_F_UNPRIVILEGED_DEV))
+ return BLK_EH_RESET_TIMER;
+
+ if (unlikely(!tgid))
+ return BLK_EH_RESET_TIMER;
- return BLK_EH_RESET_TIMER;
+ rcu_read_lock();
+ pid = find_vpid(tgid);
+ p = pid_task(pid, PIDTYPE_PID);
+ if (p)
+ send_sig(SIGKILL, p, 0);
+ rcu_read_unlock();
+ return BLK_EH_DONE;
}
static blk_status_t ublk_prep_req(struct ublk_queue *ubq, struct request *rq,
@@ -1364,7 +1376,7 @@ static blk_status_t ublk_prep_req(struct ublk_queue *ubq, struct request *rq,
{
blk_status_t res;
- if (unlikely(ubq->fail_io))
+ if (unlikely(READ_ONCE(ubq->fail_io)))
return BLK_STS_TARGET;
/* With recovery feature enabled, force_abort is set in
@@ -1376,7 +1388,8 @@ static blk_status_t ublk_prep_req(struct ublk_queue *ubq, struct request *rq,
* Note: force_abort is guaranteed to be seen because it is set
* before request queue is unqiuesced.
*/
- if (ublk_nosrv_should_queue_io(ubq) && unlikely(ubq->force_abort))
+ if (ublk_nosrv_should_queue_io(ubq) &&
+ unlikely(READ_ONCE(ubq->force_abort)))
return BLK_STS_IOERR;
if (check_cancel && unlikely(ubq->canceling))
@@ -1416,6 +1429,14 @@ static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_STS_OK;
}
+static inline bool ublk_belong_to_same_batch(const struct ublk_io *io,
+ const struct ublk_io *io2)
+{
+ return (io_uring_cmd_ctx_handle(io->cmd) ==
+ io_uring_cmd_ctx_handle(io2->cmd)) &&
+ (io->task == io2->task);
+}
+
static void ublk_queue_rqs(struct rq_list *rqlist)
{
struct rq_list requeue_list = { };
@@ -1427,14 +1448,16 @@ static void ublk_queue_rqs(struct rq_list *rqlist)
struct ublk_queue *this_q = req->mq_hctx->driver_data;
struct ublk_io *this_io = &this_q->ios[req->tag];
- if (io && io->task != this_io->task && !rq_list_empty(&submit_list))
+ if (ublk_prep_req(this_q, req, true) != BLK_STS_OK) {
+ rq_list_add_tail(&requeue_list, req);
+ continue;
+ }
+
+ if (io && !ublk_belong_to_same_batch(io, this_io) &&
+ !rq_list_empty(&submit_list))
ublk_queue_cmd_list(io, &submit_list);
io = this_io;
-
- if (ublk_prep_req(this_q, req, true) == BLK_STS_OK)
- rq_list_add_tail(&submit_list, req);
- else
- rq_list_add_tail(&requeue_list, req);
+ rq_list_add_tail(&submit_list, req);
}
if (!rq_list_empty(&submit_list))
@@ -1463,9 +1486,6 @@ static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq)
{
int i;
- /* All old ioucmds have to be completed */
- ubq->nr_io_ready = 0;
-
for (i = 0; i < ubq->q_depth; i++) {
struct ublk_io *io = &ubq->ios[i];
@@ -1475,7 +1495,7 @@ static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq)
*/
io->flags &= UBLK_IO_FLAG_CANCELED;
io->cmd = NULL;
- io->addr = 0;
+ io->buf.addr = 0;
/*
* old task is PF_EXITING, put it now
@@ -1487,6 +1507,9 @@ static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq)
put_task_struct(io->task);
io->task = NULL;
}
+
+ WARN_ON_ONCE(refcount_read(&io->ref));
+ WARN_ON_ONCE(io->task_registered_buffers);
}
}
@@ -1498,6 +1521,7 @@ static int ublk_ch_open(struct inode *inode, struct file *filp)
if (test_and_set_bit(UB_STATE_OPEN, &ub->state))
return -EBUSY;
filp->private_data = ub;
+ ub->ublksrv_tgid = current->tgid;
return 0;
}
@@ -1510,8 +1534,9 @@ static void ublk_reset_ch_dev(struct ublk_device *ub)
/* set to NULL, otherwise new tasks cannot mmap io_cmd_buf */
ub->mm = NULL;
- ub->nr_queues_ready = 0;
- ub->nr_privileged_daemon = 0;
+ ub->nr_io_ready = 0;
+ ub->unprivileged_daemons = false;
+ ub->ublksrv_tgid = -1;
}
static struct gendisk *ublk_get_disk(struct ublk_device *ub)
@@ -1533,13 +1558,84 @@ static void ublk_put_disk(struct gendisk *disk)
put_device(disk_to_dev(disk));
}
-static int ublk_ch_release(struct inode *inode, struct file *filp)
+/*
+ * Use this function to ensure that ->canceling is consistently set for
+ * the device and all queues. Do not set these flags directly.
+ *
+ * Caller must ensure that:
+ * - cancel_mutex is held. This ensures that there is no concurrent
+ * access to ub->canceling and no concurrent writes to ubq->canceling.
+ * - there are no concurrent reads of ubq->canceling from the queue_rq
+ * path. This can be done by quiescing the queue, or through other
+ * means.
+ */
+static void ublk_set_canceling(struct ublk_device *ub, bool canceling)
+ __must_hold(&ub->cancel_mutex)
{
- struct ublk_device *ub = filp->private_data;
+ int i;
+
+ ub->canceling = canceling;
+ for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
+ ublk_get_queue(ub, i)->canceling = canceling;
+}
+
+static bool ublk_check_and_reset_active_ref(struct ublk_device *ub)
+{
+ int i, j;
+
+ if (!(ub->dev_info.flags & (UBLK_F_SUPPORT_ZERO_COPY |
+ UBLK_F_AUTO_BUF_REG)))
+ return false;
+
+ for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
+ struct ublk_queue *ubq = ublk_get_queue(ub, i);
+
+ for (j = 0; j < ubq->q_depth; j++) {
+ struct ublk_io *io = &ubq->ios[j];
+ unsigned int refs = refcount_read(&io->ref) +
+ io->task_registered_buffers;
+
+ /*
+ * UBLK_REFCOUNT_INIT or zero means no active
+ * reference
+ */
+ if (refs != UBLK_REFCOUNT_INIT && refs != 0)
+ return true;
+
+ /* reset to zero if the io hasn't active references */
+ refcount_set(&io->ref, 0);
+ io->task_registered_buffers = 0;
+ }
+ }
+ return false;
+}
+
+static void ublk_ch_release_work_fn(struct work_struct *work)
+{
+ struct ublk_device *ub =
+ container_of(work, struct ublk_device, exit_work.work);
struct gendisk *disk;
int i;
/*
+ * For zero-copy and auto buffer register modes, I/O references
+ * might not be dropped naturally when the daemon is killed, but
+ * io_uring guarantees that registered bvec kernel buffers are
+ * unregistered finally when freeing io_uring context, then the
+ * active references are dropped.
+ *
+ * Wait until active references are dropped for avoiding use-after-free
+ *
+ * registered buffer may be unregistered in io_ring's release hander,
+ * so have to wait by scheduling work function for avoiding the two
+ * file release dependency.
+ */
+ if (ublk_check_and_reset_active_ref(ub)) {
+ schedule_delayed_work(&ub->exit_work, 1);
+ return;
+ }
+
+ /*
* disk isn't attached yet, either device isn't live, or it has
* been removed already, so we needn't to do anything
*/
@@ -1561,12 +1657,11 @@ static int ublk_ch_release(struct inode *inode, struct file *filp)
* All requests may be inflight, so ->canceling may not be set, set
* it now.
*/
- for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
- struct ublk_queue *ubq = ublk_get_queue(ub, i);
-
- ubq->canceling = true;
- ublk_abort_queue(ub, ubq);
- }
+ mutex_lock(&ub->cancel_mutex);
+ ublk_set_canceling(ub, true);
+ for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
+ ublk_abort_queue(ub, ublk_get_queue(ub, i));
+ mutex_unlock(&ub->cancel_mutex);
blk_mq_kick_requeue_list(disk->queue);
/*
@@ -1584,7 +1679,6 @@ static int ublk_ch_release(struct inode *inode, struct file *filp)
* Transition the device to the nosrv state. What exactly this
* means depends on the recovery flags
*/
- blk_mq_quiesce_queue(disk->queue);
if (ublk_nosrv_should_stop_dev(ub)) {
/*
* Allow any pending/future I/O to pass through quickly
@@ -1592,8 +1686,7 @@ static int ublk_ch_release(struct inode *inode, struct file *filp)
* waits for all pending I/O to complete
*/
for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
- ublk_get_queue(ub, i)->force_abort = true;
- blk_mq_unquiesce_queue(disk->queue);
+ WRITE_ONCE(ublk_get_queue(ub, i)->force_abort, true);
ublk_stop_dev_unlocked(ub);
} else {
@@ -1603,9 +1696,8 @@ static int ublk_ch_release(struct inode *inode, struct file *filp)
} else {
ub->dev_info.state = UBLK_S_DEV_FAIL_IO;
for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
- ublk_get_queue(ub, i)->fail_io = true;
+ WRITE_ONCE(ublk_get_queue(ub, i)->fail_io, true);
}
- blk_mq_unquiesce_queue(disk->queue);
}
unlock:
mutex_unlock(&ub->mutex);
@@ -1615,6 +1707,23 @@ unlock:
ublk_reset_ch_dev(ub);
out:
clear_bit(UB_STATE_OPEN, &ub->state);
+
+ /* put the reference grabbed in ublk_ch_release() */
+ ublk_put_device(ub);
+}
+
+static int ublk_ch_release(struct inode *inode, struct file *filp)
+{
+ struct ublk_device *ub = filp->private_data;
+
+ /*
+ * Grab ublk device reference, so it won't be gone until we are
+ * really released from work function.
+ */
+ ublk_get_device(ub);
+
+ INIT_DELAYED_WORK(&ub->exit_work, ublk_ch_release_work_fn);
+ schedule_delayed_work(&ub->exit_work, 0);
return 0;
}
@@ -1649,23 +1758,23 @@ static int ublk_ch_mmap(struct file *filp, struct vm_area_struct *vma)
__func__, q_id, current->pid, vma->vm_start,
phys_off, (unsigned long)sz);
- if (sz != ublk_queue_cmd_buf_size(ub, q_id))
+ if (sz != ublk_queue_cmd_buf_size(ub))
return -EINVAL;
pfn = virt_to_phys(ublk_queue_cmd_buf(ub, q_id)) >> PAGE_SHIFT;
return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
}
-static void __ublk_fail_req(struct ublk_queue *ubq, struct ublk_io *io,
+static void __ublk_fail_req(struct ublk_device *ub, struct ublk_io *io,
struct request *req)
{
WARN_ON_ONCE(io->flags & UBLK_IO_FLAG_ACTIVE);
- if (ublk_nosrv_should_reissue_outstanding(ubq->dev))
+ if (ublk_nosrv_should_reissue_outstanding(ub))
blk_mq_requeue_request(req, false);
else {
io->res = -EIO;
- __ublk_complete_rq(req);
+ __ublk_complete_rq(req, io, ublk_dev_need_map_io(ub));
}
}
@@ -1685,27 +1794,21 @@ static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq)
struct ublk_io *io = &ubq->ios[i];
if (io->flags & UBLK_IO_FLAG_OWNED_BY_SRV)
- __ublk_fail_req(ubq, io, io->req);
+ __ublk_fail_req(ub, io, io->req);
}
}
-/* Must be called when queue is frozen */
-static void ublk_mark_queue_canceling(struct ublk_queue *ubq)
+static void ublk_start_cancel(struct ublk_device *ub)
{
- spin_lock(&ubq->cancel_lock);
- if (!ubq->canceling)
- ubq->canceling = true;
- spin_unlock(&ubq->cancel_lock);
-}
-
-static void ublk_start_cancel(struct ublk_queue *ubq)
-{
- struct ublk_device *ub = ubq->dev;
struct gendisk *disk = ublk_get_disk(ub);
/* Our disk has been dead */
if (!disk)
return;
+
+ mutex_lock(&ub->cancel_mutex);
+ if (ub->canceling)
+ goto out;
/*
* Now we are serialized with ublk_queue_rq()
*
@@ -1714,8 +1817,10 @@ static void ublk_start_cancel(struct ublk_queue *ubq)
* touch completed uring_cmd
*/
blk_mq_quiesce_queue(disk->queue);
- ublk_mark_queue_canceling(ubq);
+ ublk_set_canceling(ub, true);
blk_mq_unquiesce_queue(disk->queue);
+out:
+ mutex_unlock(&ub->cancel_mutex);
ublk_put_disk(disk);
}
@@ -1751,7 +1856,7 @@ static void ublk_cancel_cmd(struct ublk_queue *ubq, unsigned tag,
spin_unlock(&ubq->cancel_lock);
if (!done)
- io_uring_cmd_done(io->cmd, UBLK_IO_RES_ABORT, 0, issue_flags);
+ io_uring_cmd_done(io->cmd, UBLK_IO_RES_ABORT, issue_flags);
}
/*
@@ -1788,16 +1893,17 @@ static void ublk_uring_cmd_cancel_fn(struct io_uring_cmd *cmd,
if (WARN_ON_ONCE(task && task != io->task))
return;
- if (!ubq->canceling)
- ublk_start_cancel(ubq);
+ ublk_start_cancel(ubq->dev);
WARN_ON_ONCE(io->cmd != cmd);
ublk_cancel_cmd(ubq, pdu->tag, issue_flags);
}
-static inline bool ublk_queue_ready(struct ublk_queue *ubq)
+static inline bool ublk_dev_ready(const struct ublk_device *ub)
{
- return ubq->nr_io_ready == ubq->q_depth;
+ u32 total = (u32)ub->dev_info.nr_hw_queues * ub->dev_info.queue_depth;
+
+ return ub->nr_io_ready == total;
}
static void ublk_cancel_queue(struct ublk_queue *ubq)
@@ -1913,24 +2019,22 @@ static void ublk_reset_io_flags(struct ublk_device *ub)
for (j = 0; j < ubq->q_depth; j++)
ubq->ios[j].flags &= ~UBLK_IO_FLAG_CANCELED;
spin_unlock(&ubq->cancel_lock);
- ubq->canceling = false;
ubq->fail_io = false;
}
+ mutex_lock(&ub->cancel_mutex);
+ ublk_set_canceling(ub, false);
+ mutex_unlock(&ub->cancel_mutex);
}
/* device can only be started after all IOs are ready */
-static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq)
+static void ublk_mark_io_ready(struct ublk_device *ub)
__must_hold(&ub->mutex)
{
- ubq->nr_io_ready++;
- if (ublk_queue_ready(ubq)) {
- ub->nr_queues_ready++;
-
- if (capable(CAP_SYS_ADMIN))
- ub->nr_privileged_daemon++;
- }
+ if (!ub->unprivileged_daemons && !capable(CAP_SYS_ADMIN))
+ ub->unprivileged_daemons = true;
- if (ub->nr_queues_ready == ub->dev_info.nr_hw_queues) {
+ ub->nr_io_ready++;
+ if (ublk_dev_ready(ub)) {
/* now we are ready for handling ublk io request */
ublk_reset_io_flags(ub);
complete_all(&ub->completion);
@@ -1950,12 +2054,69 @@ static inline int ublk_check_cmd_op(u32 cmd_op)
return 0;
}
-static inline void ublk_fill_io_cmd(struct ublk_io *io,
- struct io_uring_cmd *cmd, unsigned long buf_addr)
+static inline int ublk_set_auto_buf_reg(struct ublk_io *io, struct io_uring_cmd *cmd)
+{
+ struct ublk_auto_buf_reg buf;
+
+ buf = ublk_sqe_addr_to_auto_buf_reg(READ_ONCE(cmd->sqe->addr));
+
+ if (buf.reserved0 || buf.reserved1)
+ return -EINVAL;
+
+ if (buf.flags & ~UBLK_AUTO_BUF_REG_F_MASK)
+ return -EINVAL;
+ io->buf.auto_reg = buf;
+ return 0;
+}
+
+static int ublk_handle_auto_buf_reg(struct ublk_io *io,
+ struct io_uring_cmd *cmd,
+ u16 *buf_idx)
{
+ if (io->flags & UBLK_IO_FLAG_AUTO_BUF_REG) {
+ io->flags &= ~UBLK_IO_FLAG_AUTO_BUF_REG;
+
+ /*
+ * `UBLK_F_AUTO_BUF_REG` only works iff `UBLK_IO_FETCH_REQ`
+ * and `UBLK_IO_COMMIT_AND_FETCH_REQ` are issued from same
+ * `io_ring_ctx`.
+ *
+ * If this uring_cmd's io_ring_ctx isn't same with the
+ * one for registering the buffer, it is ublk server's
+ * responsibility for unregistering the buffer, otherwise
+ * this ublk request gets stuck.
+ */
+ if (io->buf_ctx_handle == io_uring_cmd_ctx_handle(cmd))
+ *buf_idx = io->buf.auto_reg.index;
+ }
+
+ return ublk_set_auto_buf_reg(io, cmd);
+}
+
+/* Once we return, `io->req` can't be used any more */
+static inline struct request *
+ublk_fill_io_cmd(struct ublk_io *io, struct io_uring_cmd *cmd)
+{
+ struct request *req = io->req;
+
io->cmd = cmd;
io->flags |= UBLK_IO_FLAG_ACTIVE;
- io->addr = buf_addr;
+ /* now this cmd slot is owned by ublk driver */
+ io->flags &= ~UBLK_IO_FLAG_OWNED_BY_SRV;
+
+ return req;
+}
+
+static inline int
+ublk_config_io_buf(const struct ublk_device *ub, struct ublk_io *io,
+ struct io_uring_cmd *cmd, unsigned long buf_addr,
+ u16 *buf_idx)
+{
+ if (ublk_dev_support_auto_buf_reg(ub))
+ return ublk_handle_auto_buf_reg(io, cmd, buf_idx);
+
+ io->buf.addr = buf_addr;
+ return 0;
}
static inline void ublk_prep_cancel(struct io_uring_cmd *cmd,
@@ -1973,132 +2134,158 @@ static inline void ublk_prep_cancel(struct io_uring_cmd *cmd,
io_uring_cmd_mark_cancelable(cmd, issue_flags);
}
-static inline int ublk_set_auto_buf_reg(struct io_uring_cmd *cmd)
-{
- struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
-
- pdu->buf = ublk_sqe_addr_to_auto_buf_reg(READ_ONCE(cmd->sqe->addr));
-
- if (pdu->buf.reserved0 || pdu->buf.reserved1)
- return -EINVAL;
-
- if (pdu->buf.flags & ~UBLK_AUTO_BUF_REG_F_MASK)
- return -EINVAL;
- return 0;
-}
-
static void ublk_io_release(void *priv)
{
struct request *rq = priv;
struct ublk_queue *ubq = rq->mq_hctx->driver_data;
+ struct ublk_io *io = &ubq->ios[rq->tag];
- ublk_put_req_ref(ubq, rq);
+ /*
+ * task_registered_buffers may be 0 if buffers were registered off task
+ * but unregistered on task. Or after UBLK_IO_COMMIT_AND_FETCH_REQ.
+ */
+ if (current == io->task && io->task_registered_buffers)
+ io->task_registered_buffers--;
+ else
+ ublk_put_req_ref(io, rq);
}
static int ublk_register_io_buf(struct io_uring_cmd *cmd,
- const struct ublk_queue *ubq, unsigned int tag,
+ struct ublk_device *ub,
+ u16 q_id, u16 tag,
+ struct ublk_io *io,
unsigned int index, unsigned int issue_flags)
{
- struct ublk_device *ub = cmd->file->private_data;
struct request *req;
int ret;
- if (!ublk_support_zero_copy(ubq))
+ if (!ublk_dev_support_zero_copy(ub))
return -EINVAL;
- req = __ublk_check_and_get_req(ub, ubq, tag, 0);
+ req = __ublk_check_and_get_req(ub, q_id, tag, io, 0);
if (!req)
return -EINVAL;
ret = io_buffer_register_bvec(cmd, req, ublk_io_release, index,
issue_flags);
if (ret) {
- ublk_put_req_ref(ubq, req);
+ ublk_put_req_ref(io, req);
return ret;
}
return 0;
}
+static int
+ublk_daemon_register_io_buf(struct io_uring_cmd *cmd,
+ struct ublk_device *ub,
+ u16 q_id, u16 tag, struct ublk_io *io,
+ unsigned index, unsigned issue_flags)
+{
+ unsigned new_registered_buffers;
+ struct request *req = io->req;
+ int ret;
+
+ /*
+ * Ensure there are still references for ublk_sub_req_ref() to release.
+ * If not, fall back on the thread-safe buffer registration.
+ */
+ new_registered_buffers = io->task_registered_buffers + 1;
+ if (unlikely(new_registered_buffers >= UBLK_REFCOUNT_INIT))
+ return ublk_register_io_buf(cmd, ub, q_id, tag, io, index,
+ issue_flags);
+
+ if (!ublk_dev_support_zero_copy(ub) || !ublk_rq_has_data(req))
+ return -EINVAL;
+
+ ret = io_buffer_register_bvec(cmd, req, ublk_io_release, index,
+ issue_flags);
+ if (ret)
+ return ret;
+
+ io->task_registered_buffers = new_registered_buffers;
+ return 0;
+}
+
static int ublk_unregister_io_buf(struct io_uring_cmd *cmd,
- const struct ublk_queue *ubq,
+ const struct ublk_device *ub,
unsigned int index, unsigned int issue_flags)
{
- if (!ublk_support_zero_copy(ubq))
+ if (!(ub->dev_info.flags & UBLK_F_SUPPORT_ZERO_COPY))
return -EINVAL;
return io_buffer_unregister_bvec(cmd, index, issue_flags);
}
-static int ublk_fetch(struct io_uring_cmd *cmd, struct ublk_queue *ubq,
- struct ublk_io *io, __u64 buf_addr)
+static int ublk_check_fetch_buf(const struct ublk_device *ub, __u64 buf_addr)
{
- struct ublk_device *ub = ubq->dev;
- int ret = 0;
-
- /*
- * When handling FETCH command for setting up ublk uring queue,
- * ub->mutex is the innermost lock, and we won't block for handling
- * FETCH, so it is fine even for IO_URING_F_NONBLOCK.
- */
- mutex_lock(&ub->mutex);
- /* UBLK_IO_FETCH_REQ is only allowed before queue is setup */
- if (ublk_queue_ready(ubq)) {
- ret = -EBUSY;
- goto out;
- }
-
- /* allow each command to be FETCHed at most once */
- if (io->flags & UBLK_IO_FLAG_ACTIVE) {
- ret = -EINVAL;
- goto out;
- }
-
- WARN_ON_ONCE(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV);
-
- if (ublk_need_map_io(ubq)) {
+ if (ublk_dev_need_map_io(ub)) {
/*
* FETCH_RQ has to provide IO buffer if NEED GET
* DATA is not enabled
*/
- if (!buf_addr && !ublk_need_get_data(ubq))
- goto out;
+ if (!buf_addr && !ublk_dev_need_get_data(ub))
+ return -EINVAL;
} else if (buf_addr) {
/* User copy requires addr to be unset */
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
+ return 0;
+}
- if (ublk_support_auto_buf_reg(ubq)) {
- ret = ublk_set_auto_buf_reg(cmd);
- if (ret)
- goto out;
- }
+static int __ublk_fetch(struct io_uring_cmd *cmd, struct ublk_device *ub,
+ struct ublk_io *io)
+{
+ /* UBLK_IO_FETCH_REQ is only allowed before dev is setup */
+ if (ublk_dev_ready(ub))
+ return -EBUSY;
+
+ /* allow each command to be FETCHed at most once */
+ if (io->flags & UBLK_IO_FLAG_ACTIVE)
+ return -EINVAL;
+
+ WARN_ON_ONCE(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV);
+
+ ublk_fill_io_cmd(io, cmd);
- ublk_fill_io_cmd(io, cmd, buf_addr);
WRITE_ONCE(io->task, get_task_struct(current));
- ublk_mark_io_ready(ub, ubq);
-out:
+ ublk_mark_io_ready(ub);
+
+ return 0;
+}
+
+static int ublk_fetch(struct io_uring_cmd *cmd, struct ublk_device *ub,
+ struct ublk_io *io, __u64 buf_addr)
+{
+ int ret;
+
+ /*
+ * When handling FETCH command for setting up ublk uring queue,
+ * ub->mutex is the innermost lock, and we won't block for handling
+ * FETCH, so it is fine even for IO_URING_F_NONBLOCK.
+ */
+ mutex_lock(&ub->mutex);
+ ret = __ublk_fetch(cmd, ub, io);
+ if (!ret)
+ ret = ublk_config_io_buf(ub, io, cmd, buf_addr, NULL);
mutex_unlock(&ub->mutex);
return ret;
}
-static int ublk_commit_and_fetch(const struct ublk_queue *ubq,
- struct ublk_io *io, struct io_uring_cmd *cmd,
- const struct ublksrv_io_cmd *ub_cmd,
- unsigned int issue_flags)
+static int ublk_check_commit_and_fetch(const struct ublk_device *ub,
+ struct ublk_io *io, __u64 buf_addr)
{
struct request *req = io->req;
- if (ublk_need_map_io(ubq)) {
+ if (ublk_dev_need_map_io(ub)) {
/*
* COMMIT_AND_FETCH_REQ has to provide IO buffer if
* NEED GET DATA is not enabled or it is Read IO.
*/
- if (!ub_cmd->addr && (!ublk_need_get_data(ubq) ||
+ if (!buf_addr && (!ublk_dev_need_get_data(ub) ||
req_op(req) == REQ_OP_READ))
return -EINVAL;
- } else if (req_op(req) != REQ_OP_ZONE_APPEND && ub_cmd->addr) {
+ } else if (req_op(req) != REQ_OP_ZONE_APPEND && buf_addr) {
/*
* User copy requires addr to be unset when command is
* not zone append
@@ -2106,52 +2293,20 @@ static int ublk_commit_and_fetch(const struct ublk_queue *ubq,
return -EINVAL;
}
- if (ublk_support_auto_buf_reg(ubq)) {
- int ret;
-
- /*
- * `UBLK_F_AUTO_BUF_REG` only works iff `UBLK_IO_FETCH_REQ`
- * and `UBLK_IO_COMMIT_AND_FETCH_REQ` are issued from same
- * `io_ring_ctx`.
- *
- * If this uring_cmd's io_ring_ctx isn't same with the
- * one for registering the buffer, it is ublk server's
- * responsibility for unregistering the buffer, otherwise
- * this ublk request gets stuck.
- */
- if (io->flags & UBLK_IO_FLAG_AUTO_BUF_REG) {
- struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
-
- if (data->buf_ctx_handle == io_uring_cmd_ctx_handle(cmd))
- io_buffer_unregister_bvec(cmd, data->buf_index,
- issue_flags);
- io->flags &= ~UBLK_IO_FLAG_AUTO_BUF_REG;
- }
-
- ret = ublk_set_auto_buf_reg(cmd);
- if (ret)
- return ret;
- }
-
- ublk_fill_io_cmd(io, cmd, ub_cmd->addr);
-
- /* now this cmd slot is owned by ublk driver */
- io->flags &= ~UBLK_IO_FLAG_OWNED_BY_SRV;
- io->res = ub_cmd->result;
-
- if (req_op(req) == REQ_OP_ZONE_APPEND)
- req->__sector = ub_cmd->zone_append_lba;
-
- if (likely(!blk_should_fake_timeout(req->q)))
- ublk_put_req_ref(ubq, req);
-
return 0;
}
-static bool ublk_get_data(const struct ublk_queue *ubq, struct ublk_io *io)
+static bool ublk_need_complete_req(const struct ublk_device *ub,
+ struct ublk_io *io)
{
- struct request *req = io->req;
+ if (ublk_dev_need_req_ref(ub))
+ return ublk_sub_req_ref(io);
+ return true;
+}
+static bool ublk_get_data(const struct ublk_queue *ubq, struct ublk_io *io,
+ struct request *req)
+{
/*
* We have handled UBLK_IO_NEED_GET_DATA command,
* so clear UBLK_IO_FLAG_NEED_GET_DATA now and just
@@ -2159,7 +2314,7 @@ static bool ublk_get_data(const struct ublk_queue *ubq, struct ublk_io *io)
*/
io->flags &= ~UBLK_IO_FLAG_NEED_GET_DATA;
/* update iod->addr because ublksrv may have passed a new io buffer */
- ublk_get_iod(ubq, req->tag)->addr = io->addr;
+ ublk_get_iod(ubq, req->tag)->addr = io->buf.addr;
pr_devel("%s: update iod->addr: qid %d tag %d io_flags %x addr %llx\n",
__func__, ubq->q_id, req->tag, io->flags,
ublk_get_iod(ubq, req->tag)->addr);
@@ -2167,46 +2322,81 @@ static bool ublk_get_data(const struct ublk_queue *ubq, struct ublk_io *io)
return ublk_start_io(ubq, req, io);
}
-static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
- unsigned int issue_flags,
- const struct ublksrv_io_cmd *ub_cmd)
+static int ublk_ch_uring_cmd_local(struct io_uring_cmd *cmd,
+ unsigned int issue_flags)
{
+ /* May point to userspace-mapped memory */
+ const struct ublksrv_io_cmd *ub_src = io_uring_sqe_cmd(cmd->sqe);
+ u16 buf_idx = UBLK_INVALID_BUF_IDX;
struct ublk_device *ub = cmd->file->private_data;
- struct task_struct *task;
struct ublk_queue *ubq;
- struct ublk_io *io;
+ struct ublk_io *io = NULL;
u32 cmd_op = cmd->cmd_op;
- unsigned tag = ub_cmd->tag;
- int ret = -EINVAL;
+ u16 q_id = READ_ONCE(ub_src->q_id);
+ u16 tag = READ_ONCE(ub_src->tag);
+ s32 result = READ_ONCE(ub_src->result);
+ u64 addr = READ_ONCE(ub_src->addr); /* unioned with zone_append_lba */
+ struct request *req;
+ int ret;
+ bool compl;
+
+ WARN_ON_ONCE(issue_flags & IO_URING_F_UNLOCKED);
pr_devel("%s: received: cmd op %d queue %d tag %d result %d\n",
- __func__, cmd->cmd_op, ub_cmd->q_id, tag,
- ub_cmd->result);
+ __func__, cmd->cmd_op, q_id, tag, result);
- if (ub_cmd->q_id >= ub->dev_info.nr_hw_queues)
+ ret = ublk_check_cmd_op(cmd_op);
+ if (ret)
goto out;
- ubq = ublk_get_queue(ub, ub_cmd->q_id);
+ /*
+ * io_buffer_unregister_bvec() doesn't access the ubq or io,
+ * so no need to validate the q_id, tag, or task
+ */
+ if (_IOC_NR(cmd_op) == UBLK_IO_UNREGISTER_IO_BUF)
+ return ublk_unregister_io_buf(cmd, ub, addr, issue_flags);
- if (tag >= ubq->q_depth)
+ ret = -EINVAL;
+ if (q_id >= ub->dev_info.nr_hw_queues)
+ goto out;
+
+ ubq = ublk_get_queue(ub, q_id);
+
+ if (tag >= ub->dev_info.queue_depth)
goto out;
io = &ubq->ios[tag];
- task = READ_ONCE(io->task);
- if (task && task != current)
+ /* UBLK_IO_FETCH_REQ can be handled on any task, which sets io->task */
+ if (unlikely(_IOC_NR(cmd_op) == UBLK_IO_FETCH_REQ)) {
+ ret = ublk_check_fetch_buf(ub, addr);
+ if (ret)
+ goto out;
+ ret = ublk_fetch(cmd, ub, io, addr);
+ if (ret)
+ goto out;
+
+ ublk_prep_cancel(cmd, issue_flags, ubq, tag);
+ return -EIOCBQUEUED;
+ }
+
+ if (READ_ONCE(io->task) != current) {
+ /*
+ * ublk_register_io_buf() accesses only the io's refcount,
+ * so can be handled on any task
+ */
+ if (_IOC_NR(cmd_op) == UBLK_IO_REGISTER_IO_BUF)
+ return ublk_register_io_buf(cmd, ub, q_id, tag, io,
+ addr, issue_flags);
+
goto out;
+ }
/* there is pending io cmd, something must be wrong */
- if (io->flags & UBLK_IO_FLAG_ACTIVE) {
+ if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV)) {
ret = -EBUSY;
goto out;
}
- /* only UBLK_IO_FETCH_REQ is allowed if io is not OWNED_BY_SRV */
- if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV) &&
- _IOC_NR(cmd_op) != UBLK_IO_FETCH_REQ)
- goto out;
-
/*
* ensure that the user issues UBLK_IO_NEED_GET_DATA
* iff the driver have set the UBLK_IO_FLAG_NEED_GET_DATA.
@@ -2215,32 +2405,44 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
^ (_IOC_NR(cmd_op) == UBLK_IO_NEED_GET_DATA))
goto out;
- ret = ublk_check_cmd_op(cmd_op);
- if (ret)
- goto out;
-
- ret = -EINVAL;
switch (_IOC_NR(cmd_op)) {
case UBLK_IO_REGISTER_IO_BUF:
- return ublk_register_io_buf(cmd, ubq, tag, ub_cmd->addr, issue_flags);
- case UBLK_IO_UNREGISTER_IO_BUF:
- return ublk_unregister_io_buf(cmd, ubq, ub_cmd->addr, issue_flags);
- case UBLK_IO_FETCH_REQ:
- ret = ublk_fetch(cmd, ubq, io, ub_cmd->addr);
+ return ublk_daemon_register_io_buf(cmd, ub, q_id, tag, io, addr,
+ issue_flags);
+ case UBLK_IO_COMMIT_AND_FETCH_REQ:
+ ret = ublk_check_commit_and_fetch(ub, io, addr);
if (ret)
goto out;
- break;
- case UBLK_IO_COMMIT_AND_FETCH_REQ:
- ret = ublk_commit_and_fetch(ubq, io, cmd, ub_cmd, issue_flags);
+ io->res = result;
+ req = ublk_fill_io_cmd(io, cmd);
+ ret = ublk_config_io_buf(ub, io, cmd, addr, &buf_idx);
+ compl = ublk_need_complete_req(ub, io);
+
+ /* can't touch 'ublk_io' any more */
+ if (buf_idx != UBLK_INVALID_BUF_IDX)
+ io_buffer_unregister_bvec(cmd, buf_idx, issue_flags);
+ if (req_op(req) == REQ_OP_ZONE_APPEND)
+ req->__sector = addr;
+ if (compl)
+ __ublk_complete_rq(req, io, ublk_dev_need_map_io(ub));
+
if (ret)
goto out;
break;
case UBLK_IO_NEED_GET_DATA:
- io->addr = ub_cmd->addr;
- if (!ublk_get_data(ubq, io))
- return -EIOCBQUEUED;
-
- return UBLK_IO_RES_OK;
+ /*
+ * ublk_get_data() may fail and fallback to requeue, so keep
+ * uring_cmd active first and prepare for handling new requeued
+ * request
+ */
+ req = ublk_fill_io_cmd(io, cmd);
+ ret = ublk_config_io_buf(ub, io, cmd, addr, NULL);
+ WARN_ON_ONCE(ret);
+ if (likely(ublk_get_data(ubq, io, req))) {
+ __ublk_prep_compl_io_cmd(io, req);
+ return UBLK_IO_RES_OK;
+ }
+ break;
default:
goto out;
}
@@ -2249,20 +2451,24 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
out:
pr_devel("%s: complete: cmd op %d, tag %d ret %x io_flags %x\n",
- __func__, cmd_op, tag, ret, io->flags);
+ __func__, cmd_op, tag, ret, io ? io->flags : 0);
return ret;
}
static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub,
- const struct ublk_queue *ubq, int tag, size_t offset)
+ u16 q_id, u16 tag, struct ublk_io *io, size_t offset)
{
struct request *req;
- req = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], tag);
+ /*
+ * can't use io->req in case of concurrent UBLK_IO_COMMIT_AND_FETCH_REQ,
+ * which would overwrite it with io->cmd
+ */
+ req = blk_mq_tag_to_rq(ub->tag_set.tags[q_id], tag);
if (!req)
return NULL;
- if (!ublk_get_req_ref(ubq, req))
+ if (!ublk_get_req_ref(io))
return NULL;
if (unlikely(!blk_mq_request_started(req) || req->tag != tag))
@@ -2276,37 +2482,18 @@ static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub,
return req;
fail_put:
- ublk_put_req_ref(ubq, req);
+ ublk_put_req_ref(io, req);
return NULL;
}
-static inline int ublk_ch_uring_cmd_local(struct io_uring_cmd *cmd,
- unsigned int issue_flags)
-{
- /*
- * Not necessary for async retry, but let's keep it simple and always
- * copy the values to avoid any potential reuse.
- */
- const struct ublksrv_io_cmd *ub_src = io_uring_sqe_cmd(cmd->sqe);
- const struct ublksrv_io_cmd ub_cmd = {
- .q_id = READ_ONCE(ub_src->q_id),
- .tag = READ_ONCE(ub_src->tag),
- .result = READ_ONCE(ub_src->result),
- .addr = READ_ONCE(ub_src->addr)
- };
-
- WARN_ON_ONCE(issue_flags & IO_URING_F_UNLOCKED);
-
- return __ublk_ch_uring_cmd(cmd, issue_flags, &ub_cmd);
-}
-
-static void ublk_ch_uring_cmd_cb(struct io_uring_cmd *cmd,
- unsigned int issue_flags)
+static void ublk_ch_uring_cmd_cb(struct io_tw_req tw_req, io_tw_token_t tw)
{
+ unsigned int issue_flags = IO_URING_CMD_TASK_WORK_ISSUE_FLAGS;
+ struct io_uring_cmd *cmd = io_uring_cmd_from_tw(tw_req);
int ret = ublk_ch_uring_cmd_local(cmd, issue_flags);
if (ret != -EIOCBQUEUED)
- io_uring_cmd_done(cmd, ret, 0, issue_flags);
+ io_uring_cmd_done(cmd, ret, issue_flags);
}
static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
@@ -2343,7 +2530,8 @@ static inline bool ublk_check_ubuf_dir(const struct request *req,
}
static struct request *ublk_check_and_get_req(struct kiocb *iocb,
- struct iov_iter *iter, size_t *off, int dir)
+ struct iov_iter *iter, size_t *off, int dir,
+ struct ublk_io **io)
{
struct ublk_device *ub = iocb->ki_filp->private_data;
struct ublk_queue *ubq;
@@ -2351,9 +2539,6 @@ static struct request *ublk_check_and_get_req(struct kiocb *iocb,
size_t buf_off;
u16 tag, q_id;
- if (!ub)
- return ERR_PTR(-EACCES);
-
if (!user_backed_iter(iter))
return ERR_PTR(-EACCES);
@@ -2368,64 +2553,57 @@ static struct request *ublk_check_and_get_req(struct kiocb *iocb,
return ERR_PTR(-EINVAL);
ubq = ublk_get_queue(ub, q_id);
- if (!ubq)
- return ERR_PTR(-EINVAL);
-
- if (!ublk_support_user_copy(ubq))
+ if (!ublk_dev_support_user_copy(ub))
return ERR_PTR(-EACCES);
- if (tag >= ubq->q_depth)
+ if (tag >= ub->dev_info.queue_depth)
return ERR_PTR(-EINVAL);
- req = __ublk_check_and_get_req(ub, ubq, tag, buf_off);
+ *io = &ubq->ios[tag];
+ req = __ublk_check_and_get_req(ub, q_id, tag, *io, buf_off);
if (!req)
return ERR_PTR(-EINVAL);
- if (!req->mq_hctx || !req->mq_hctx->driver_data)
- goto fail;
-
if (!ublk_check_ubuf_dir(req, dir))
goto fail;
*off = buf_off;
return req;
fail:
- ublk_put_req_ref(ubq, req);
+ ublk_put_req_ref(*io, req);
return ERR_PTR(-EACCES);
}
static ssize_t ublk_ch_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
- struct ublk_queue *ubq;
struct request *req;
+ struct ublk_io *io;
size_t buf_off;
size_t ret;
- req = ublk_check_and_get_req(iocb, to, &buf_off, ITER_DEST);
+ req = ublk_check_and_get_req(iocb, to, &buf_off, ITER_DEST, &io);
if (IS_ERR(req))
return PTR_ERR(req);
ret = ublk_copy_user_pages(req, buf_off, to, ITER_DEST);
- ubq = req->mq_hctx->driver_data;
- ublk_put_req_ref(ubq, req);
+ ublk_put_req_ref(io, req);
return ret;
}
static ssize_t ublk_ch_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
- struct ublk_queue *ubq;
struct request *req;
+ struct ublk_io *io;
size_t buf_off;
size_t ret;
- req = ublk_check_and_get_req(iocb, from, &buf_off, ITER_SOURCE);
+ req = ublk_check_and_get_req(iocb, from, &buf_off, ITER_SOURCE, &io);
if (IS_ERR(req))
return PTR_ERR(req);
ret = ublk_copy_user_pages(req, buf_off, from, ITER_SOURCE);
- ubq = req->mq_hctx->driver_data;
- ublk_put_req_ref(ubq, req);
+ ublk_put_req_ref(io, req);
return ret;
}
@@ -2442,69 +2620,94 @@ static const struct file_operations ublk_ch_fops = {
static void ublk_deinit_queue(struct ublk_device *ub, int q_id)
{
- int size = ublk_queue_cmd_buf_size(ub, q_id);
- struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
- int i;
+ struct ublk_queue *ubq = ub->queues[q_id];
+ int size, i;
+
+ if (!ubq)
+ return;
+
+ size = ublk_queue_cmd_buf_size(ub);
for (i = 0; i < ubq->q_depth; i++) {
struct ublk_io *io = &ubq->ios[i];
if (io->task)
put_task_struct(io->task);
+ WARN_ON_ONCE(refcount_read(&io->ref));
+ WARN_ON_ONCE(io->task_registered_buffers);
}
if (ubq->io_cmd_buf)
free_pages((unsigned long)ubq->io_cmd_buf, get_order(size));
+
+ kvfree(ubq);
+ ub->queues[q_id] = NULL;
+}
+
+static int ublk_get_queue_numa_node(struct ublk_device *ub, int q_id)
+{
+ unsigned int cpu;
+
+ /* Find first CPU mapped to this queue */
+ for_each_possible_cpu(cpu) {
+ if (ub->tag_set.map[HCTX_TYPE_DEFAULT].mq_map[cpu] == q_id)
+ return cpu_to_node(cpu);
+ }
+
+ return NUMA_NO_NODE;
}
static int ublk_init_queue(struct ublk_device *ub, int q_id)
{
- struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
+ int depth = ub->dev_info.queue_depth;
gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO;
- void *ptr;
+ struct ublk_queue *ubq;
+ struct page *page;
+ int numa_node;
int size;
+ /* Determine NUMA node based on queue's CPU affinity */
+ numa_node = ublk_get_queue_numa_node(ub, q_id);
+
+ /* Allocate queue structure on local NUMA node */
+ ubq = kvzalloc_node(struct_size(ubq, ios, depth), GFP_KERNEL,
+ numa_node);
+ if (!ubq)
+ return -ENOMEM;
+
spin_lock_init(&ubq->cancel_lock);
ubq->flags = ub->dev_info.flags;
ubq->q_id = q_id;
- ubq->q_depth = ub->dev_info.queue_depth;
- size = ublk_queue_cmd_buf_size(ub, q_id);
+ ubq->q_depth = depth;
+ size = ublk_queue_cmd_buf_size(ub);
- ptr = (void *) __get_free_pages(gfp_flags, get_order(size));
- if (!ptr)
+ /* Allocate I/O command buffer on local NUMA node */
+ page = alloc_pages_node(numa_node, gfp_flags, get_order(size));
+ if (!page) {
+ kvfree(ubq);
return -ENOMEM;
+ }
+ ubq->io_cmd_buf = page_address(page);
- ubq->io_cmd_buf = ptr;
+ ub->queues[q_id] = ubq;
ubq->dev = ub;
return 0;
}
static void ublk_deinit_queues(struct ublk_device *ub)
{
- int nr_queues = ub->dev_info.nr_hw_queues;
int i;
- if (!ub->__queues)
- return;
-
- for (i = 0; i < nr_queues; i++)
+ for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
ublk_deinit_queue(ub, i);
- kfree(ub->__queues);
}
static int ublk_init_queues(struct ublk_device *ub)
{
- int nr_queues = ub->dev_info.nr_hw_queues;
- int depth = ub->dev_info.queue_depth;
- int ubq_size = sizeof(struct ublk_queue) + depth * sizeof(struct ublk_io);
- int i, ret = -ENOMEM;
-
- ub->queue_size = ubq_size;
- ub->__queues = kcalloc(nr_queues, ubq_size, GFP_KERNEL);
- if (!ub->__queues)
- return ret;
+ int i, ret;
- for (i = 0; i < nr_queues; i++) {
- if (ublk_init_queue(ub, i))
+ for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
+ ret = ublk_init_queue(ub, i);
+ if (ret)
goto fail;
}
@@ -2555,6 +2758,7 @@ static void ublk_cdev_rel(struct device *dev)
ublk_deinit_queues(ub);
ublk_free_dev_number(ub);
mutex_destroy(&ub->mutex);
+ mutex_destroy(&ub->cancel_mutex);
kfree(ub);
}
@@ -2602,7 +2806,6 @@ static int ublk_add_tag_set(struct ublk_device *ub)
ub->tag_set.nr_hw_queues = ub->dev_info.nr_hw_queues;
ub->tag_set.queue_depth = ub->dev_info.queue_depth;
ub->tag_set.numa_node = NUMA_NO_NODE;
- ub->tag_set.cmd_size = sizeof(struct ublk_rq_data);
ub->tag_set.driver_data = ub;
return blk_mq_alloc_tag_set(&ub->tag_set);
}
@@ -2704,6 +2907,9 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub,
if (wait_for_completion_interruptible(&ub->completion) != 0)
return -EINTR;
+ if (ub->ublksrv_tgid != ublksrv_pid)
+ return -EINVAL;
+
mutex_lock(&ub->mutex);
if (ub->dev_info.state == UBLK_S_DEV_LIVE ||
test_bit(UB_STATE_USED, &ub->state)) {
@@ -2725,8 +2931,8 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub,
ublk_apply_params(ub);
- /* don't probe partitions if any one ubq daemon is un-trusted */
- if (ub->nr_privileged_daemon != ub->nr_queues_ready)
+ /* don't probe partitions if any daemon task is un-trusted */
+ if (ub->unprivileged_daemons)
set_bit(GD_SUPPRESS_PART_SCAN, &disk->state);
ublk_get_device(ub);
@@ -2825,6 +3031,10 @@ static int ublk_ctrl_add_dev(const struct ublksrv_ctrl_cmd *header)
if (copy_from_user(&info, argp, sizeof(info)))
return -EFAULT;
+ if (info.queue_depth > UBLK_MAX_QUEUE_DEPTH || !info.queue_depth ||
+ info.nr_hw_queues > UBLK_MAX_NR_QUEUES || !info.nr_hw_queues)
+ return -EINVAL;
+
if (capable(CAP_SYS_ADMIN))
info.flags &= ~UBLK_F_UNPRIVILEGED_DEV;
else if (!(info.flags & UBLK_F_UNPRIVILEGED_DEV))
@@ -2899,11 +3109,12 @@ static int ublk_ctrl_add_dev(const struct ublksrv_ctrl_cmd *header)
goto out_unlock;
ret = -ENOMEM;
- ub = kzalloc(sizeof(*ub), GFP_KERNEL);
+ ub = kzalloc(struct_size(ub, queues, info.nr_hw_queues), GFP_KERNEL);
if (!ub)
goto out_unlock;
mutex_init(&ub->mutex);
spin_lock_init(&ub->lock);
+ mutex_init(&ub->cancel_mutex);
ret = ublk_alloc_dev_number(ub, header->dev_id);
if (ret < 0)
@@ -2924,7 +3135,8 @@ static int ublk_ctrl_add_dev(const struct ublksrv_ctrl_cmd *header)
ub->dev_info.flags |= UBLK_F_CMD_IOCTL_ENCODE |
UBLK_F_URING_CMD_COMP_IN_TASK |
- UBLK_F_PER_IO_DAEMON;
+ UBLK_F_PER_IO_DAEMON |
+ UBLK_F_BUF_REG_OFF_DAEMON;
/* GET_DATA isn't needed any more with USER_COPY or ZERO COPY */
if (ub->dev_info.flags & (UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY |
@@ -2947,17 +3159,17 @@ static int ublk_ctrl_add_dev(const struct ublksrv_ctrl_cmd *header)
ub->dev_info.nr_hw_queues, nr_cpu_ids);
ublk_align_max_io_size(ub);
- ret = ublk_init_queues(ub);
+ ret = ublk_add_tag_set(ub);
if (ret)
goto out_free_dev_number;
- ret = ublk_add_tag_set(ub);
+ ret = ublk_init_queues(ub);
if (ret)
- goto out_deinit_queues;
+ goto out_free_tag_set;
ret = -EFAULT;
if (copy_to_user(argp, &ub->dev_info, sizeof(info)))
- goto out_free_tag_set;
+ goto out_deinit_queues;
/*
* Add the char dev so that ublksrv daemon can be setup.
@@ -2966,14 +3178,15 @@ static int ublk_ctrl_add_dev(const struct ublksrv_ctrl_cmd *header)
ret = ublk_add_chdev(ub);
goto out_unlock;
-out_free_tag_set:
- blk_mq_free_tag_set(&ub->tag_set);
out_deinit_queues:
ublk_deinit_queues(ub);
+out_free_tag_set:
+ blk_mq_free_tag_set(&ub->tag_set);
out_free_dev_number:
ublk_free_dev_number(ub);
out_free_ub:
mutex_destroy(&ub->mutex);
+ mutex_destroy(&ub->cancel_mutex);
kfree(ub);
out_unlock:
mutex_unlock(&ublk_ctl_mutex);
@@ -3198,6 +3411,9 @@ static int ublk_ctrl_end_recovery(struct ublk_device *ub,
pr_devel("%s: All FETCH_REQs received, dev id %d\n", __func__,
header->dev_id);
+ if (ub->ublksrv_tgid != ublksrv_pid)
+ return -EINVAL;
+
mutex_lock(&ub->mutex);
if (ublk_nosrv_should_stop_dev(ub))
goto out_unlock;
@@ -3311,7 +3527,7 @@ static int ublk_ctrl_quiesce_dev(struct ublk_device *ub,
/* zero means wait forever */
u64 timeout_ms = header->data[0];
struct gendisk *disk;
- int i, ret = -ENODEV;
+ int ret = -ENODEV;
if (!(ub->dev_info.flags & UBLK_F_QUIESCE))
return -EOPNOTSUPP;
@@ -3328,14 +3544,12 @@ static int ublk_ctrl_quiesce_dev(struct ublk_device *ub,
if (ub->dev_info.state != UBLK_S_DEV_LIVE)
goto put_disk;
- /* Mark all queues as canceling */
+ /* Mark the device as canceling */
+ mutex_lock(&ub->cancel_mutex);
blk_mq_quiesce_queue(disk->queue);
- for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
- struct ublk_queue *ubq = ublk_get_queue(ub, i);
-
- ubq->canceling = true;
- }
+ ublk_set_canceling(ub, true);
blk_mq_unquiesce_queue(disk->queue);
+ mutex_unlock(&ub->cancel_mutex);
if (!timeout_ms)
timeout_ms = UINT_MAX;
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 30bca8cb7106..357434bdae99 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -584,7 +584,8 @@ out:
static int virtblk_parse_zone(struct virtio_blk *vblk,
struct virtio_blk_zone_descriptor *entry,
- unsigned int idx, report_zones_cb cb, void *data)
+ unsigned int idx,
+ struct blk_report_zones_args *args)
{
struct blk_zone zone = { };
@@ -650,12 +651,12 @@ static int virtblk_parse_zone(struct virtio_blk *vblk,
* The callback below checks the validity of the reported
* entry data, no need to further validate it here.
*/
- return cb(&zone, idx, data);
+ return disk_report_zone(vblk->disk, &zone, idx, args);
}
static int virtblk_report_zones(struct gendisk *disk, sector_t sector,
- unsigned int nr_zones, report_zones_cb cb,
- void *data)
+ unsigned int nr_zones,
+ struct blk_report_zones_args *args)
{
struct virtio_blk *vblk = disk->private_data;
struct virtio_blk_zone_report *report;
@@ -693,7 +694,7 @@ static int virtblk_report_zones(struct gendisk *disk, sector_t sector,
for (i = 0; i < nz && zone_idx < nr_zones; i++) {
ret = virtblk_parse_zone(vblk, &report->zones[i],
- zone_idx, cb, data);
+ zone_idx, args);
if (ret)
goto fail_report;
@@ -829,9 +830,9 @@ out:
}
/* We provide getgeo only to please some old bootloader/partitioning tools */
-static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
+static int virtblk_getgeo(struct gendisk *disk, struct hd_geometry *geo)
{
- struct virtio_blk *vblk = bd->bd_disk->private_data;
+ struct virtio_blk *vblk = disk->private_data;
int ret = 0;
mutex_lock(&vblk->vdev_mutex);
@@ -853,7 +854,7 @@ static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
/* some standard values, similar to sd */
geo->heads = 1 << 6;
geo->sectors = 1 << 5;
- geo->cylinders = get_capacity(bd->bd_disk) >> 11;
+ geo->cylinders = get_capacity(disk) >> 11;
}
out:
mutex_unlock(&vblk->vdev_mutex);
@@ -976,9 +977,8 @@ static int init_vq(struct virtio_blk *vblk)
return -EINVAL;
}
- num_vqs = min_t(unsigned int,
- min_not_zero(num_request_queues, nr_cpu_ids),
- num_vqs);
+ num_vqs = blk_mq_num_possible_queues(
+ min_not_zero(num_request_queues, num_vqs));
num_poll_vqs = min_t(unsigned int, poll_queues, num_vqs - 1);
@@ -1027,8 +1027,13 @@ static int init_vq(struct virtio_blk *vblk)
out:
kfree(vqs);
kfree(vqs_info);
- if (err)
+ if (err) {
kfree(vblk->vqs);
+ /*
+ * Set to NULL to prevent freeing vqs again during freezing.
+ */
+ vblk->vqs = NULL;
+ }
return err;
}
@@ -1599,6 +1604,12 @@ static int virtblk_freeze_priv(struct virtio_device *vdev)
vdev->config->del_vqs(vdev);
kfree(vblk->vqs);
+ /*
+ * Set to NULL to prevent freeing vqs again after a failed vqs
+ * allocation during resume. Note that kfree() already handles NULL
+ * pointers safely.
+ */
+ vblk->vqs = NULL;
return 0;
}
@@ -1683,7 +1694,7 @@ static int __init virtio_blk_init(void)
{
int error;
- virtblk_wq = alloc_workqueue("virtio-blk", 0, 0);
+ virtblk_wq = alloc_workqueue("virtio-blk", WQ_PERCPU, 0);
if (!virtblk_wq)
return -ENOMEM;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 5babe575c288..04fc6b552c04 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -493,11 +493,11 @@ static void blkif_restart_queue_callback(void *arg)
schedule_work(&rinfo->work);
}
-static int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg)
+static int blkif_getgeo(struct gendisk *disk, struct hd_geometry *hg)
{
/* We don't have real geometry info, but let's at least return
values consistent with the size of the device */
- sector_t nsect = get_capacity(bd->bd_disk);
+ sector_t nsect = get_capacity(disk);
sector_t cylinders = nsect;
hg->heads = 0xff;
diff --git a/drivers/block/zloop.c b/drivers/block/zloop.c
index 553b1a713ab9..77bd6081b244 100644
--- a/drivers/block/zloop.c
+++ b/drivers/block/zloop.c
@@ -32,6 +32,8 @@ enum {
ZLOOP_OPT_NR_QUEUES = (1 << 6),
ZLOOP_OPT_QUEUE_DEPTH = (1 << 7),
ZLOOP_OPT_BUFFERED_IO = (1 << 8),
+ ZLOOP_OPT_ZONE_APPEND = (1 << 9),
+ ZLOOP_OPT_ORDERED_ZONE_APPEND = (1 << 10),
};
static const match_table_t zloop_opt_tokens = {
@@ -44,6 +46,8 @@ static const match_table_t zloop_opt_tokens = {
{ ZLOOP_OPT_NR_QUEUES, "nr_queues=%u" },
{ ZLOOP_OPT_QUEUE_DEPTH, "queue_depth=%u" },
{ ZLOOP_OPT_BUFFERED_IO, "buffered_io" },
+ { ZLOOP_OPT_ZONE_APPEND, "zone_append=%u" },
+ { ZLOOP_OPT_ORDERED_ZONE_APPEND, "ordered_zone_append" },
{ ZLOOP_OPT_ERR, NULL }
};
@@ -56,6 +60,8 @@ static const match_table_t zloop_opt_tokens = {
#define ZLOOP_DEF_NR_QUEUES 1
#define ZLOOP_DEF_QUEUE_DEPTH 128
#define ZLOOP_DEF_BUFFERED_IO false
+#define ZLOOP_DEF_ZONE_APPEND true
+#define ZLOOP_DEF_ORDERED_ZONE_APPEND false
/* Arbitrary limit on the zone size (16GB). */
#define ZLOOP_MAX_ZONE_SIZE_MB 16384
@@ -71,6 +77,8 @@ struct zloop_options {
unsigned int nr_queues;
unsigned int queue_depth;
bool buffered_io;
+ bool zone_append;
+ bool ordered_zone_append;
};
/*
@@ -92,6 +100,7 @@ struct zloop_zone {
unsigned long flags;
struct mutex lock;
+ spinlock_t wp_lock;
enum blk_zone_cond cond;
sector_t start;
sector_t wp;
@@ -108,6 +117,8 @@ struct zloop_device {
struct workqueue_struct *workqueue;
bool buffered_io;
+ bool zone_append;
+ bool ordered_zone_append;
const char *base_dir;
struct file *data_dir;
@@ -147,6 +158,7 @@ static int zloop_update_seq_zone(struct zloop_device *zlo, unsigned int zone_no)
struct zloop_zone *zone = &zlo->zones[zone_no];
struct kstat stat;
sector_t file_sectors;
+ unsigned long flags;
int ret;
lockdep_assert_held(&zone->lock);
@@ -172,16 +184,18 @@ static int zloop_update_seq_zone(struct zloop_device *zlo, unsigned int zone_no)
return -EINVAL;
}
+ spin_lock_irqsave(&zone->wp_lock, flags);
if (!file_sectors) {
zone->cond = BLK_ZONE_COND_EMPTY;
zone->wp = zone->start;
} else if (file_sectors == zlo->zone_capacity) {
zone->cond = BLK_ZONE_COND_FULL;
- zone->wp = zone->start + zlo->zone_size;
+ zone->wp = ULLONG_MAX;
} else {
zone->cond = BLK_ZONE_COND_CLOSED;
zone->wp = zone->start + file_sectors;
}
+ spin_unlock_irqrestore(&zone->wp_lock, flags);
return 0;
}
@@ -225,6 +239,7 @@ unlock:
static int zloop_close_zone(struct zloop_device *zlo, unsigned int zone_no)
{
struct zloop_zone *zone = &zlo->zones[zone_no];
+ unsigned long flags;
int ret = 0;
if (test_bit(ZLOOP_ZONE_CONV, &zone->flags))
@@ -243,10 +258,12 @@ static int zloop_close_zone(struct zloop_device *zlo, unsigned int zone_no)
break;
case BLK_ZONE_COND_IMP_OPEN:
case BLK_ZONE_COND_EXP_OPEN:
+ spin_lock_irqsave(&zone->wp_lock, flags);
if (zone->wp == zone->start)
zone->cond = BLK_ZONE_COND_EMPTY;
else
zone->cond = BLK_ZONE_COND_CLOSED;
+ spin_unlock_irqrestore(&zone->wp_lock, flags);
break;
case BLK_ZONE_COND_EMPTY:
case BLK_ZONE_COND_FULL:
@@ -264,6 +281,7 @@ unlock:
static int zloop_reset_zone(struct zloop_device *zlo, unsigned int zone_no)
{
struct zloop_zone *zone = &zlo->zones[zone_no];
+ unsigned long flags;
int ret = 0;
if (test_bit(ZLOOP_ZONE_CONV, &zone->flags))
@@ -281,9 +299,11 @@ static int zloop_reset_zone(struct zloop_device *zlo, unsigned int zone_no)
goto unlock;
}
+ spin_lock_irqsave(&zone->wp_lock, flags);
zone->cond = BLK_ZONE_COND_EMPTY;
zone->wp = zone->start;
clear_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags);
+ spin_unlock_irqrestore(&zone->wp_lock, flags);
unlock:
mutex_unlock(&zone->lock);
@@ -308,6 +328,7 @@ static int zloop_reset_all_zones(struct zloop_device *zlo)
static int zloop_finish_zone(struct zloop_device *zlo, unsigned int zone_no)
{
struct zloop_zone *zone = &zlo->zones[zone_no];
+ unsigned long flags;
int ret = 0;
if (test_bit(ZLOOP_ZONE_CONV, &zone->flags))
@@ -325,9 +346,11 @@ static int zloop_finish_zone(struct zloop_device *zlo, unsigned int zone_no)
goto unlock;
}
+ spin_lock_irqsave(&zone->wp_lock, flags);
zone->cond = BLK_ZONE_COND_FULL;
- zone->wp = zone->start + zlo->zone_size;
+ zone->wp = ULLONG_MAX;
clear_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags);
+ spin_unlock_irqrestore(&zone->wp_lock, flags);
unlock:
mutex_unlock(&zone->lock);
@@ -369,8 +392,9 @@ static void zloop_rw(struct zloop_cmd *cmd)
struct zloop_zone *zone;
struct iov_iter iter;
struct bio_vec tmp;
+ unsigned long flags;
sector_t zone_end;
- int nr_bvec = 0;
+ unsigned int nr_bvec;
int ret;
atomic_set(&cmd->ref, 2);
@@ -378,6 +402,11 @@ static void zloop_rw(struct zloop_cmd *cmd)
cmd->nr_sectors = nr_sectors;
cmd->ret = 0;
+ if (WARN_ON_ONCE(is_append && !zlo->zone_append)) {
+ ret = -EIO;
+ goto out;
+ }
+
/* We should never get an I/O beyond the device capacity. */
if (WARN_ON_ONCE(zone_no >= zlo->nr_zones)) {
ret = -EIO;
@@ -406,16 +435,31 @@ static void zloop_rw(struct zloop_cmd *cmd)
if (!test_bit(ZLOOP_ZONE_CONV, &zone->flags) && is_write) {
mutex_lock(&zone->lock);
- if (is_append) {
- sector = zone->wp;
- cmd->sector = sector;
- }
+ spin_lock_irqsave(&zone->wp_lock, flags);
/*
- * Write operations must be aligned to the write pointer and
- * fully contained within the zone capacity.
+ * Zone append operations always go at the current write
+ * pointer, but regular write operations must already be
+ * aligned to the write pointer when submitted.
*/
- if (sector != zone->wp || zone->wp + nr_sectors > zone_end) {
+ if (is_append) {
+ /*
+ * If ordered zone append is in use, we already checked
+ * and set the target sector in zloop_queue_rq().
+ */
+ if (!zlo->ordered_zone_append) {
+ if (zone->cond == BLK_ZONE_COND_FULL ||
+ zone->wp + nr_sectors > zone_end) {
+ spin_unlock_irqrestore(&zone->wp_lock,
+ flags);
+ ret = -EIO;
+ goto unlock;
+ }
+ sector = zone->wp;
+ }
+ cmd->sector = sector;
+ } else if (sector != zone->wp) {
+ spin_unlock_irqrestore(&zone->wp_lock, flags);
pr_err("Zone %u: unaligned write: sect %llu, wp %llu\n",
zone_no, sector, zone->wp);
ret = -EIO;
@@ -428,17 +472,22 @@ static void zloop_rw(struct zloop_cmd *cmd)
zone->cond = BLK_ZONE_COND_IMP_OPEN;
/*
- * Advance the write pointer of sequential zones. If the write
- * fails, the wp position will be corrected when the next I/O
- * copmpletes.
+ * Advance the write pointer, unless ordered zone append is in
+ * use. If the write fails, the write pointer position will be
+ * corrected when the next I/O starts execution.
*/
- zone->wp += nr_sectors;
- if (zone->wp == zone_end)
- zone->cond = BLK_ZONE_COND_FULL;
+ if (!is_append || !zlo->ordered_zone_append) {
+ zone->wp += nr_sectors;
+ if (zone->wp == zone_end) {
+ zone->cond = BLK_ZONE_COND_FULL;
+ zone->wp = ULLONG_MAX;
+ }
+ }
+
+ spin_unlock_irqrestore(&zone->wp_lock, flags);
}
- rq_for_each_bvec(tmp, rq, rq_iter)
- nr_bvec++;
+ nr_bvec = blk_rq_nr_bvec(rq);
if (rq->bio != rq->biotail) {
struct bio_vec *bvec;
@@ -498,6 +547,10 @@ static void zloop_handle_cmd(struct zloop_cmd *cmd)
struct request *rq = blk_mq_rq_from_pdu(cmd);
struct zloop_device *zlo = rq->q->queuedata;
+ /* We can block in this context, so ignore REQ_NOWAIT. */
+ if (rq->cmd_flags & REQ_NOWAIT)
+ rq->cmd_flags &= ~REQ_NOWAIT;
+
switch (req_op(rq)) {
case REQ_OP_READ:
case REQ_OP_WRITE:
@@ -608,6 +661,35 @@ static void zloop_complete_rq(struct request *rq)
blk_mq_end_request(rq, sts);
}
+static bool zloop_set_zone_append_sector(struct request *rq)
+{
+ struct zloop_device *zlo = rq->q->queuedata;
+ unsigned int zone_no = rq_zone_no(rq);
+ struct zloop_zone *zone = &zlo->zones[zone_no];
+ sector_t zone_end = zone->start + zlo->zone_capacity;
+ sector_t nr_sectors = blk_rq_sectors(rq);
+ unsigned long flags;
+
+ spin_lock_irqsave(&zone->wp_lock, flags);
+
+ if (zone->cond == BLK_ZONE_COND_FULL ||
+ zone->wp + nr_sectors > zone_end) {
+ spin_unlock_irqrestore(&zone->wp_lock, flags);
+ return false;
+ }
+
+ rq->__sector = zone->wp;
+ zone->wp += blk_rq_sectors(rq);
+ if (zone->wp >= zone_end) {
+ zone->cond = BLK_ZONE_COND_FULL;
+ zone->wp = ULLONG_MAX;
+ }
+
+ spin_unlock_irqrestore(&zone->wp_lock, flags);
+
+ return true;
+}
+
static blk_status_t zloop_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
@@ -618,6 +700,16 @@ static blk_status_t zloop_queue_rq(struct blk_mq_hw_ctx *hctx,
if (zlo->state == Zlo_deleting)
return BLK_STS_IOERR;
+ /*
+ * If we need to strongly order zone append operations, set the request
+ * sector to the zone write pointer location now instead of when the
+ * command work runs.
+ */
+ if (zlo->ordered_zone_append && req_op(rq) == REQ_OP_ZONE_APPEND) {
+ if (!zloop_set_zone_append_sector(rq))
+ return BLK_STS_IOERR;
+ }
+
blk_mq_start_request(rq);
INIT_WORK(&cmd->work, zloop_cmd_workfn);
@@ -647,11 +739,12 @@ static int zloop_open(struct gendisk *disk, blk_mode_t mode)
}
static int zloop_report_zones(struct gendisk *disk, sector_t sector,
- unsigned int nr_zones, report_zones_cb cb, void *data)
+ unsigned int nr_zones, struct blk_report_zones_args *args)
{
struct zloop_device *zlo = disk->private_data;
struct blk_zone blkz = {};
unsigned int first, i;
+ unsigned long flags;
int ret;
first = disk_zone_no(disk, sector);
@@ -675,7 +768,9 @@ static int zloop_report_zones(struct gendisk *disk, sector_t sector,
blkz.start = zone->start;
blkz.len = zlo->zone_size;
+ spin_lock_irqsave(&zone->wp_lock, flags);
blkz.wp = zone->wp;
+ spin_unlock_irqrestore(&zone->wp_lock, flags);
blkz.cond = zone->cond;
if (test_bit(ZLOOP_ZONE_CONV, &zone->flags)) {
blkz.type = BLK_ZONE_TYPE_CONVENTIONAL;
@@ -687,7 +782,7 @@ static int zloop_report_zones(struct gendisk *disk, sector_t sector,
mutex_unlock(&zone->lock);
- ret = cb(&blkz, i, data);
+ ret = disk_report_zone(disk, &blkz, i, args);
if (ret)
return ret;
}
@@ -700,6 +795,8 @@ static void zloop_free_disk(struct gendisk *disk)
struct zloop_device *zlo = disk->private_data;
unsigned int i;
+ blk_mq_free_tag_set(&zlo->tag_set);
+
for (i = 0; i < zlo->nr_zones; i++) {
struct zloop_zone *zone = &zlo->zones[i];
@@ -781,6 +878,7 @@ static int zloop_init_zone(struct zloop_device *zlo, struct zloop_options *opts,
int ret;
mutex_init(&zone->lock);
+ spin_lock_init(&zone->wp_lock);
zone->start = (sector_t)zone_no << zlo->zone_shift;
if (!restore)
@@ -882,7 +980,6 @@ static int zloop_ctl_add(struct zloop_options *opts)
{
struct queue_limits lim = {
.max_hw_sectors = SZ_1M >> SECTOR_SHIFT,
- .max_hw_zone_append_sectors = SZ_1M >> SECTOR_SHIFT,
.chunk_sectors = opts->zone_size,
.features = BLK_FEAT_ZONED,
};
@@ -934,6 +1031,9 @@ static int zloop_ctl_add(struct zloop_options *opts)
zlo->nr_zones = nr_zones;
zlo->nr_conv_zones = opts->nr_conv_zones;
zlo->buffered_io = opts->buffered_io;
+ zlo->zone_append = opts->zone_append;
+ if (zlo->zone_append)
+ zlo->ordered_zone_append = opts->ordered_zone_append;
zlo->workqueue = alloc_workqueue("zloop%d", WQ_UNBOUND | WQ_FREEZABLE,
opts->nr_queues * opts->queue_depth, zlo->id);
@@ -974,6 +1074,8 @@ static int zloop_ctl_add(struct zloop_options *opts)
lim.physical_block_size = zlo->block_size;
lim.logical_block_size = zlo->block_size;
+ if (zlo->zone_append)
+ lim.max_hw_zone_append_sectors = lim.max_hw_sectors;
zlo->tag_set.ops = &zloop_mq_ops;
zlo->tag_set.nr_hw_queues = opts->nr_queues;
@@ -1014,10 +1116,14 @@ static int zloop_ctl_add(struct zloop_options *opts)
zlo->state = Zlo_live;
mutex_unlock(&zloop_ctl_mutex);
- pr_info("Added device %d: %u zones of %llu MB, %u B block size\n",
+ pr_info("zloop: device %d, %u zones of %llu MiB, %u B block size\n",
zlo->id, zlo->nr_zones,
((sector_t)zlo->zone_size << SECTOR_SHIFT) >> 20,
zlo->block_size);
+ pr_info("zloop%d: using %s%s zone append\n",
+ zlo->id,
+ zlo->ordered_zone_append ? "ordered " : "",
+ zlo->zone_append ? "native" : "emulated");
return 0;
@@ -1080,7 +1186,6 @@ static int zloop_ctl_remove(struct zloop_options *opts)
del_gendisk(zlo->disk);
put_disk(zlo->disk);
- blk_mq_free_tag_set(&zlo->tag_set);
pr_info("Removed device %d\n", opts->id);
@@ -1105,6 +1210,8 @@ static int zloop_parse_options(struct zloop_options *opts, const char *buf)
opts->nr_queues = ZLOOP_DEF_NR_QUEUES;
opts->queue_depth = ZLOOP_DEF_QUEUE_DEPTH;
opts->buffered_io = ZLOOP_DEF_BUFFERED_IO;
+ opts->zone_append = ZLOOP_DEF_ZONE_APPEND;
+ opts->ordered_zone_append = ZLOOP_DEF_ORDERED_ZONE_APPEND;
if (!buf)
return 0;
@@ -1214,6 +1321,21 @@ static int zloop_parse_options(struct zloop_options *opts, const char *buf)
case ZLOOP_OPT_BUFFERED_IO:
opts->buffered_io = true;
break;
+ case ZLOOP_OPT_ZONE_APPEND:
+ if (match_uint(args, &token)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (token != 0 && token != 1) {
+ pr_err("Invalid zone_append value\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ opts->zone_append = token;
+ break;
+ case ZLOOP_OPT_ORDERED_ZONE_APPEND:
+ opts->ordered_zone_append = true;
+ break;
case ZLOOP_OPT_ERR:
default:
pr_warn("unknown parameter or missing value '%s'\n", p);
diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c
index d26a58c67e95..b1bd1daa0060 100644
--- a/drivers/block/zram/zcomp.c
+++ b/drivers/block/zram/zcomp.c
@@ -8,6 +8,7 @@
#include <linux/sched.h>
#include <linux/cpuhotplug.h>
#include <linux/vmalloc.h>
+#include <linux/sysfs.h>
#include "zcomp.h"
@@ -89,23 +90,21 @@ bool zcomp_available_algorithm(const char *comp)
}
/* show available compressors */
-ssize_t zcomp_available_show(const char *comp, char *buf)
+ssize_t zcomp_available_show(const char *comp, char *buf, ssize_t at)
{
- ssize_t sz = 0;
int i;
for (i = 0; i < ARRAY_SIZE(backends) - 1; i++) {
if (!strcmp(comp, backends[i]->name)) {
- sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2,
- "[%s] ", backends[i]->name);
+ at += sysfs_emit_at(buf, at, "[%s] ",
+ backends[i]->name);
} else {
- sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2,
- "%s ", backends[i]->name);
+ at += sysfs_emit_at(buf, at, "%s ", backends[i]->name);
}
}
- sz += scnprintf(buf + sz, PAGE_SIZE - sz, "\n");
- return sz;
+ at += sysfs_emit_at(buf, at, "\n");
+ return at;
}
struct zcomp_strm *zcomp_stream_get(struct zcomp *comp)
diff --git a/drivers/block/zram/zcomp.h b/drivers/block/zram/zcomp.h
index 4acffe671a5e..eacfd3f7d61d 100644
--- a/drivers/block/zram/zcomp.h
+++ b/drivers/block/zram/zcomp.h
@@ -79,7 +79,7 @@ struct zcomp {
int zcomp_cpu_up_prepare(unsigned int cpu, struct hlist_node *node);
int zcomp_cpu_dead(unsigned int cpu, struct hlist_node *node);
-ssize_t zcomp_available_show(const char *comp, char *buf);
+ssize_t zcomp_available_show(const char *comp, char *buf, ssize_t at);
bool zcomp_available_algorithm(const char *comp);
struct zcomp *zcomp_create(const char *alg, struct zcomp_params *params);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 54c57103715f..5759823d6314 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -373,7 +373,7 @@ static ssize_t initstate_show(struct device *dev,
val = init_done(zram);
up_read(&zram->init_lock);
- return scnprintf(buf, PAGE_SIZE, "%u\n", val);
+ return sysfs_emit(buf, "%u\n", val);
}
static ssize_t disksize_show(struct device *dev,
@@ -381,7 +381,7 @@ static ssize_t disksize_show(struct device *dev,
{
struct zram *zram = dev_to_zram(dev);
- return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize);
+ return sysfs_emit(buf, "%llu\n", zram->disksize);
}
static ssize_t mem_limit_store(struct device *dev,
@@ -500,8 +500,31 @@ out:
}
#ifdef CONFIG_ZRAM_WRITEBACK
+#define INVALID_BDEV_BLOCK (~0UL)
+
+struct zram_wb_ctl {
+ /* idle list is accessed only by the writeback task, no concurency */
+ struct list_head idle_reqs;
+ /* done list is accessed concurrently, protect by done_lock */
+ struct list_head done_reqs;
+ wait_queue_head_t done_wait;
+ spinlock_t done_lock;
+ atomic_t num_inflight;
+};
+
+struct zram_wb_req {
+ unsigned long blk_idx;
+ struct page *page;
+ struct zram_pp_slot *pps;
+ struct bio_vec bio_vec;
+ struct bio bio;
+
+ struct list_head entry;
+};
+
static ssize_t writeback_limit_enable_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct zram *zram = dev_to_zram(dev);
u64 val;
@@ -510,33 +533,31 @@ static ssize_t writeback_limit_enable_store(struct device *dev,
if (kstrtoull(buf, 10, &val))
return ret;
- down_read(&zram->init_lock);
- spin_lock(&zram->wb_limit_lock);
+ down_write(&zram->init_lock);
zram->wb_limit_enable = val;
- spin_unlock(&zram->wb_limit_lock);
- up_read(&zram->init_lock);
+ up_write(&zram->init_lock);
ret = len;
return ret;
}
static ssize_t writeback_limit_enable_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
bool val;
struct zram *zram = dev_to_zram(dev);
down_read(&zram->init_lock);
- spin_lock(&zram->wb_limit_lock);
val = zram->wb_limit_enable;
- spin_unlock(&zram->wb_limit_lock);
up_read(&zram->init_lock);
- return scnprintf(buf, PAGE_SIZE, "%d\n", val);
+ return sysfs_emit(buf, "%d\n", val);
}
static ssize_t writeback_limit_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct zram *zram = dev_to_zram(dev);
u64 val;
@@ -545,29 +566,69 @@ static ssize_t writeback_limit_store(struct device *dev,
if (kstrtoull(buf, 10, &val))
return ret;
- down_read(&zram->init_lock);
- spin_lock(&zram->wb_limit_lock);
+ /*
+ * When the page size is greater than 4KB, if bd_wb_limit is set to
+ * a value that is not page - size aligned, it will cause value
+ * wrapping. For example, when the page size is set to 16KB and
+ * bd_wb_limit is set to 3, a single write - back operation will
+ * cause bd_wb_limit to become -1. Even more terrifying is that
+ * bd_wb_limit is an unsigned number.
+ */
+ val = rounddown(val, PAGE_SIZE / 4096);
+
+ down_write(&zram->init_lock);
zram->bd_wb_limit = val;
- spin_unlock(&zram->wb_limit_lock);
- up_read(&zram->init_lock);
+ up_write(&zram->init_lock);
ret = len;
return ret;
}
static ssize_t writeback_limit_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
u64 val;
struct zram *zram = dev_to_zram(dev);
down_read(&zram->init_lock);
- spin_lock(&zram->wb_limit_lock);
val = zram->bd_wb_limit;
- spin_unlock(&zram->wb_limit_lock);
up_read(&zram->init_lock);
- return scnprintf(buf, PAGE_SIZE, "%llu\n", val);
+ return sysfs_emit(buf, "%llu\n", val);
+}
+
+static ssize_t writeback_batch_size_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct zram *zram = dev_to_zram(dev);
+ u32 val;
+
+ if (kstrtouint(buf, 10, &val))
+ return -EINVAL;
+
+ if (!val)
+ return -EINVAL;
+
+ down_write(&zram->init_lock);
+ zram->wb_batch_size = val;
+ up_write(&zram->init_lock);
+
+ return len;
+}
+
+static ssize_t writeback_batch_size_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u32 val;
+ struct zram *zram = dev_to_zram(dev);
+
+ down_read(&zram->init_lock);
+ val = zram->wb_batch_size;
+ up_read(&zram->init_lock);
+
+ return sysfs_emit(buf, "%u\n", val);
}
static void reset_bdev(struct zram *zram)
@@ -697,23 +758,20 @@ out:
return err;
}
-static unsigned long alloc_block_bdev(struct zram *zram)
+static unsigned long zram_reserve_bdev_block(struct zram *zram)
{
- unsigned long blk_idx = 1;
-retry:
- /* skip 0 bit to confuse zram.handle = 0 */
- blk_idx = find_next_zero_bit(zram->bitmap, zram->nr_pages, blk_idx);
- if (blk_idx == zram->nr_pages)
- return 0;
+ unsigned long blk_idx;
- if (test_and_set_bit(blk_idx, zram->bitmap))
- goto retry;
+ blk_idx = find_next_zero_bit(zram->bitmap, zram->nr_pages, 0);
+ if (blk_idx == zram->nr_pages)
+ return INVALID_BDEV_BLOCK;
+ set_bit(blk_idx, zram->bitmap);
atomic64_inc(&zram->stats.bd_count);
return blk_idx;
}
-static void free_block_bdev(struct zram *zram, unsigned long blk_idx)
+static void zram_release_bdev_block(struct zram *zram, unsigned long blk_idx)
{
int was_set;
@@ -734,32 +792,249 @@ static void read_from_bdev_async(struct zram *zram, struct page *page,
submit_bio(bio);
}
-static int zram_writeback_slots(struct zram *zram, struct zram_pp_ctl *ctl)
+static void release_wb_req(struct zram_wb_req *req)
{
- unsigned long blk_idx = 0;
- struct page *page = NULL;
- struct zram_pp_slot *pps;
- struct bio_vec bio_vec;
- struct bio bio;
+ __free_page(req->page);
+ kfree(req);
+}
+
+static void release_wb_ctl(struct zram_wb_ctl *wb_ctl)
+{
+ if (!wb_ctl)
+ return;
+
+ /* We should never have inflight requests at this point */
+ WARN_ON(atomic_read(&wb_ctl->num_inflight));
+ WARN_ON(!list_empty(&wb_ctl->done_reqs));
+
+ while (!list_empty(&wb_ctl->idle_reqs)) {
+ struct zram_wb_req *req;
+
+ req = list_first_entry(&wb_ctl->idle_reqs,
+ struct zram_wb_req, entry);
+ list_del(&req->entry);
+ release_wb_req(req);
+ }
+
+ kfree(wb_ctl);
+}
+
+static struct zram_wb_ctl *init_wb_ctl(struct zram *zram)
+{
+ struct zram_wb_ctl *wb_ctl;
+ int i;
+
+ wb_ctl = kmalloc(sizeof(*wb_ctl), GFP_KERNEL);
+ if (!wb_ctl)
+ return NULL;
+
+ INIT_LIST_HEAD(&wb_ctl->idle_reqs);
+ INIT_LIST_HEAD(&wb_ctl->done_reqs);
+ atomic_set(&wb_ctl->num_inflight, 0);
+ init_waitqueue_head(&wb_ctl->done_wait);
+ spin_lock_init(&wb_ctl->done_lock);
+
+ for (i = 0; i < zram->wb_batch_size; i++) {
+ struct zram_wb_req *req;
+
+ /*
+ * This is fatal condition only if we couldn't allocate
+ * any requests at all. Otherwise we just work with the
+ * requests that we have successfully allocated, so that
+ * writeback can still proceed, even if there is only one
+ * request on the idle list.
+ */
+ req = kzalloc(sizeof(*req), GFP_KERNEL | __GFP_NOWARN);
+ if (!req)
+ break;
+
+ req->page = alloc_page(GFP_KERNEL | __GFP_NOWARN);
+ if (!req->page) {
+ kfree(req);
+ break;
+ }
+
+ list_add(&req->entry, &wb_ctl->idle_reqs);
+ }
+
+ /* We couldn't allocate any requests, so writeabck is not possible */
+ if (list_empty(&wb_ctl->idle_reqs))
+ goto release_wb_ctl;
+
+ return wb_ctl;
+
+release_wb_ctl:
+ release_wb_ctl(wb_ctl);
+ return NULL;
+}
+
+static void zram_account_writeback_rollback(struct zram *zram)
+{
+ lockdep_assert_held_read(&zram->init_lock);
+
+ if (zram->wb_limit_enable)
+ zram->bd_wb_limit += 1UL << (PAGE_SHIFT - 12);
+}
+
+static void zram_account_writeback_submit(struct zram *zram)
+{
+ lockdep_assert_held_read(&zram->init_lock);
+
+ if (zram->wb_limit_enable && zram->bd_wb_limit > 0)
+ zram->bd_wb_limit -= 1UL << (PAGE_SHIFT - 12);
+}
+
+static int zram_writeback_complete(struct zram *zram, struct zram_wb_req *req)
+{
+ u32 index = req->pps->index;
+ int err;
+
+ err = blk_status_to_errno(req->bio.bi_status);
+ if (err) {
+ /*
+ * Failed wb requests should not be accounted in wb_limit
+ * (if enabled).
+ */
+ zram_account_writeback_rollback(zram);
+ zram_release_bdev_block(zram, req->blk_idx);
+ return err;
+ }
+
+ atomic64_inc(&zram->stats.bd_writes);
+ zram_slot_lock(zram, index);
+ /*
+ * We release slot lock during writeback so slot can change under us:
+ * slot_free() or slot_free() and zram_write_page(). In both cases
+ * slot loses ZRAM_PP_SLOT flag. No concurrent post-processing can
+ * set ZRAM_PP_SLOT on such slots until current post-processing
+ * finishes.
+ */
+ if (!zram_test_flag(zram, index, ZRAM_PP_SLOT)) {
+ zram_release_bdev_block(zram, req->blk_idx);
+ goto out;
+ }
+
+ zram_free_page(zram, index);
+ zram_set_flag(zram, index, ZRAM_WB);
+ zram_set_handle(zram, index, req->blk_idx);
+ atomic64_inc(&zram->stats.pages_stored);
+
+out:
+ zram_slot_unlock(zram, index);
+ return 0;
+}
+
+static void zram_writeback_endio(struct bio *bio)
+{
+ struct zram_wb_req *req = container_of(bio, struct zram_wb_req, bio);
+ struct zram_wb_ctl *wb_ctl = bio->bi_private;
+ unsigned long flags;
+
+ spin_lock_irqsave(&wb_ctl->done_lock, flags);
+ list_add(&req->entry, &wb_ctl->done_reqs);
+ spin_unlock_irqrestore(&wb_ctl->done_lock, flags);
+
+ wake_up(&wb_ctl->done_wait);
+}
+
+static void zram_submit_wb_request(struct zram *zram,
+ struct zram_wb_ctl *wb_ctl,
+ struct zram_wb_req *req)
+{
+ /*
+ * wb_limit (if enabled) should be adjusted before submission,
+ * so that we don't over-submit.
+ */
+ zram_account_writeback_submit(zram);
+ atomic_inc(&wb_ctl->num_inflight);
+ req->bio.bi_private = wb_ctl;
+ submit_bio(&req->bio);
+}
+
+static int zram_complete_done_reqs(struct zram *zram,
+ struct zram_wb_ctl *wb_ctl)
+{
+ struct zram_wb_req *req;
+ unsigned long flags;
int ret = 0, err;
- u32 index;
- page = alloc_page(GFP_KERNEL);
- if (!page)
- return -ENOMEM;
+ while (atomic_read(&wb_ctl->num_inflight) > 0) {
+ spin_lock_irqsave(&wb_ctl->done_lock, flags);
+ req = list_first_entry_or_null(&wb_ctl->done_reqs,
+ struct zram_wb_req, entry);
+ if (req)
+ list_del(&req->entry);
+ spin_unlock_irqrestore(&wb_ctl->done_lock, flags);
+
+ /* ->num_inflight > 0 doesn't mean we have done requests */
+ if (!req)
+ break;
+
+ err = zram_writeback_complete(zram, req);
+ if (err)
+ ret = err;
+
+ atomic_dec(&wb_ctl->num_inflight);
+ release_pp_slot(zram, req->pps);
+ req->pps = NULL;
+
+ list_add(&req->entry, &wb_ctl->idle_reqs);
+ }
+
+ return ret;
+}
+
+static struct zram_wb_req *zram_select_idle_req(struct zram_wb_ctl *wb_ctl)
+{
+ struct zram_wb_req *req;
+
+ req = list_first_entry_or_null(&wb_ctl->idle_reqs,
+ struct zram_wb_req, entry);
+ if (req)
+ list_del(&req->entry);
+ return req;
+}
+
+static int zram_writeback_slots(struct zram *zram,
+ struct zram_pp_ctl *ctl,
+ struct zram_wb_ctl *wb_ctl)
+{
+ unsigned long blk_idx = INVALID_BDEV_BLOCK;
+ struct zram_wb_req *req = NULL;
+ struct zram_pp_slot *pps;
+ int ret = 0, err = 0;
+ u32 index = 0;
while ((pps = select_pp_slot(ctl))) {
- spin_lock(&zram->wb_limit_lock);
if (zram->wb_limit_enable && !zram->bd_wb_limit) {
- spin_unlock(&zram->wb_limit_lock);
ret = -EIO;
break;
}
- spin_unlock(&zram->wb_limit_lock);
- if (!blk_idx) {
- blk_idx = alloc_block_bdev(zram);
- if (!blk_idx) {
+ while (!req) {
+ req = zram_select_idle_req(wb_ctl);
+ if (req)
+ break;
+
+ wait_event(wb_ctl->done_wait,
+ !list_empty(&wb_ctl->done_reqs));
+
+ err = zram_complete_done_reqs(zram, wb_ctl);
+ /*
+ * BIO errors are not fatal, we continue and simply
+ * attempt to writeback the remaining objects (pages).
+ * At the same time we need to signal user-space that
+ * some writes (at least one, but also could be all of
+ * them) were not successful and we do so by returning
+ * the most recent BIO error.
+ */
+ if (err)
+ ret = err;
+ }
+
+ if (blk_idx == INVALID_BDEV_BLOCK) {
+ blk_idx = zram_reserve_bdev_block(zram);
+ if (blk_idx == INVALID_BDEV_BLOCK) {
ret = -ENOSPC;
break;
}
@@ -768,74 +1043,54 @@ static int zram_writeback_slots(struct zram *zram, struct zram_pp_ctl *ctl)
index = pps->index;
zram_slot_lock(zram, index);
/*
- * scan_slots() sets ZRAM_PP_SLOT and relases slot lock, so
+ * scan_slots() sets ZRAM_PP_SLOT and releases slot lock, so
* slots can change in the meantime. If slots are accessed or
* freed they lose ZRAM_PP_SLOT flag and hence we don't
* post-process them.
*/
if (!zram_test_flag(zram, index, ZRAM_PP_SLOT))
goto next;
- if (zram_read_from_zspool(zram, page, index))
+ if (zram_read_from_zspool(zram, req->page, index))
goto next;
zram_slot_unlock(zram, index);
- bio_init(&bio, zram->bdev, &bio_vec, 1,
- REQ_OP_WRITE | REQ_SYNC);
- bio.bi_iter.bi_sector = blk_idx * (PAGE_SIZE >> 9);
- __bio_add_page(&bio, page, PAGE_SIZE, 0);
-
- /*
- * XXX: A single page IO would be inefficient for write
- * but it would be not bad as starter.
- */
- err = submit_bio_wait(&bio);
- if (err) {
- release_pp_slot(zram, pps);
- /*
- * BIO errors are not fatal, we continue and simply
- * attempt to writeback the remaining objects (pages).
- * At the same time we need to signal user-space that
- * some writes (at least one, but also could be all of
- * them) were not successful and we do so by returning
- * the most recent BIO error.
- */
- ret = err;
- continue;
- }
-
- atomic64_inc(&zram->stats.bd_writes);
- zram_slot_lock(zram, index);
/*
- * Same as above, we release slot lock during writeback so
- * slot can change under us: slot_free() or slot_free() and
- * reallocation (zram_write_page()). In both cases slot loses
- * ZRAM_PP_SLOT flag. No concurrent post-processing can set
- * ZRAM_PP_SLOT on such slots until current post-processing
- * finishes.
+ * From now on pp-slot is owned by the req, remove it from
+ * its pp bucket.
*/
- if (!zram_test_flag(zram, index, ZRAM_PP_SLOT))
- goto next;
+ list_del_init(&pps->entry);
+
+ req->blk_idx = blk_idx;
+ req->pps = pps;
+ bio_init(&req->bio, zram->bdev, &req->bio_vec, 1, REQ_OP_WRITE);
+ req->bio.bi_iter.bi_sector = req->blk_idx * (PAGE_SIZE >> 9);
+ req->bio.bi_end_io = zram_writeback_endio;
+ __bio_add_page(&req->bio, req->page, PAGE_SIZE, 0);
+
+ zram_submit_wb_request(zram, wb_ctl, req);
+ blk_idx = INVALID_BDEV_BLOCK;
+ req = NULL;
+ cond_resched();
+ continue;
- zram_free_page(zram, index);
- zram_set_flag(zram, index, ZRAM_WB);
- zram_set_handle(zram, index, blk_idx);
- blk_idx = 0;
- atomic64_inc(&zram->stats.pages_stored);
- spin_lock(&zram->wb_limit_lock);
- if (zram->wb_limit_enable && zram->bd_wb_limit > 0)
- zram->bd_wb_limit -= 1UL << (PAGE_SHIFT - 12);
- spin_unlock(&zram->wb_limit_lock);
next:
zram_slot_unlock(zram, index);
release_pp_slot(zram, pps);
-
- cond_resched();
}
- if (blk_idx)
- free_block_bdev(zram, blk_idx);
- if (page)
- __free_page(page);
+ /*
+ * Selected idle req, but never submitted it due to some error or
+ * wb limit.
+ */
+ if (req)
+ release_wb_req(req);
+
+ while (atomic_read(&wb_ctl->num_inflight) > 0) {
+ wait_event(wb_ctl->done_wait, !list_empty(&wb_ctl->done_reqs));
+ err = zram_complete_done_reqs(zram, wb_ctl);
+ if (err)
+ ret = err;
+ }
return ret;
}
@@ -948,7 +1203,8 @@ static ssize_t writeback_store(struct device *dev,
struct zram *zram = dev_to_zram(dev);
u64 nr_pages = zram->disksize >> PAGE_SHIFT;
unsigned long lo = 0, hi = nr_pages;
- struct zram_pp_ctl *ctl = NULL;
+ struct zram_pp_ctl *pp_ctl = NULL;
+ struct zram_wb_ctl *wb_ctl = NULL;
char *args, *param, *val;
ssize_t ret = len;
int err, mode = 0;
@@ -970,8 +1226,14 @@ static ssize_t writeback_store(struct device *dev,
goto release_init_lock;
}
- ctl = init_pp_ctl();
- if (!ctl) {
+ pp_ctl = init_pp_ctl();
+ if (!pp_ctl) {
+ ret = -ENOMEM;
+ goto release_init_lock;
+ }
+
+ wb_ctl = init_wb_ctl(zram);
+ if (!wb_ctl) {
ret = -ENOMEM;
goto release_init_lock;
}
@@ -1000,7 +1262,7 @@ static ssize_t writeback_store(struct device *dev,
goto release_init_lock;
}
- scan_slots_for_writeback(zram, mode, lo, hi, ctl);
+ scan_slots_for_writeback(zram, mode, lo, hi, pp_ctl);
break;
}
@@ -1011,7 +1273,7 @@ static ssize_t writeback_store(struct device *dev,
goto release_init_lock;
}
- scan_slots_for_writeback(zram, mode, lo, hi, ctl);
+ scan_slots_for_writeback(zram, mode, lo, hi, pp_ctl);
break;
}
@@ -1022,7 +1284,7 @@ static ssize_t writeback_store(struct device *dev,
goto release_init_lock;
}
- scan_slots_for_writeback(zram, mode, lo, hi, ctl);
+ scan_slots_for_writeback(zram, mode, lo, hi, pp_ctl);
continue;
}
@@ -1033,17 +1295,18 @@ static ssize_t writeback_store(struct device *dev,
goto release_init_lock;
}
- scan_slots_for_writeback(zram, mode, lo, hi, ctl);
+ scan_slots_for_writeback(zram, mode, lo, hi, pp_ctl);
continue;
}
}
- err = zram_writeback_slots(zram, ctl);
+ err = zram_writeback_slots(zram, pp_ctl, wb_ctl);
if (err)
ret = err;
release_init_lock:
- release_pp_ctl(zram, ctl);
+ release_pp_ctl(zram, pp_ctl);
+ release_wb_ctl(wb_ctl);
atomic_set(&zram->pp_in_progress, 0);
up_read(&zram->init_lock);
@@ -1085,7 +1348,7 @@ static int read_from_bdev_sync(struct zram *zram, struct page *page,
work.entry = entry;
INIT_WORK_ONSTACK(&work.work, zram_sync_read);
- queue_work(system_unbound_wq, &work.work);
+ queue_work(system_dfl_wq, &work.work);
flush_work(&work.work);
destroy_work_on_stack(&work.work);
@@ -1112,7 +1375,9 @@ static int read_from_bdev(struct zram *zram, struct page *page,
return -EIO;
}
-static void free_block_bdev(struct zram *zram, unsigned long blk_idx) {};
+static void zram_release_bdev_block(struct zram *zram, unsigned long blk_idx)
+{
+}
#endif
#ifdef CONFIG_ZRAM_MEMORY_TRACKING
@@ -1225,17 +1490,6 @@ static void comp_algorithm_set(struct zram *zram, u32 prio, const char *alg)
zram->comp_algs[prio] = alg;
}
-static ssize_t __comp_algorithm_show(struct zram *zram, u32 prio, char *buf)
-{
- ssize_t sz;
-
- down_read(&zram->init_lock);
- sz = zcomp_available_show(zram->comp_algs[prio], buf);
- up_read(&zram->init_lock);
-
- return sz;
-}
-
static int __comp_algorithm_store(struct zram *zram, u32 prio, const char *buf)
{
char *compressor;
@@ -1386,8 +1640,12 @@ static ssize_t comp_algorithm_show(struct device *dev,
char *buf)
{
struct zram *zram = dev_to_zram(dev);
+ ssize_t sz;
- return __comp_algorithm_show(zram, ZRAM_PRIMARY_COMP, buf);
+ down_read(&zram->init_lock);
+ sz = zcomp_available_show(zram->comp_algs[ZRAM_PRIMARY_COMP], buf, 0);
+ up_read(&zram->init_lock);
+ return sz;
}
static ssize_t comp_algorithm_store(struct device *dev,
@@ -1411,14 +1669,15 @@ static ssize_t recomp_algorithm_show(struct device *dev,
ssize_t sz = 0;
u32 prio;
+ down_read(&zram->init_lock);
for (prio = ZRAM_SECONDARY_COMP; prio < ZRAM_MAX_COMPS; prio++) {
if (!zram->comp_algs[prio])
continue;
- sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2, "#%d: ", prio);
- sz += __comp_algorithm_show(zram, prio, buf + sz);
+ sz += sysfs_emit_at(buf, sz, "#%d: ", prio);
+ sz += zcomp_available_show(zram->comp_algs[prio], buf, sz);
}
-
+ up_read(&zram->init_lock);
return sz;
}
@@ -1488,7 +1747,7 @@ static ssize_t io_stat_show(struct device *dev,
ssize_t ret;
down_read(&zram->init_lock);
- ret = scnprintf(buf, PAGE_SIZE,
+ ret = sysfs_emit(buf,
"%8llu %8llu 0 %8llu\n",
(u64)atomic64_read(&zram->stats.failed_reads),
(u64)atomic64_read(&zram->stats.failed_writes),
@@ -1518,7 +1777,7 @@ static ssize_t mm_stat_show(struct device *dev,
orig_size = atomic64_read(&zram->stats.pages_stored);
max_used = atomic_long_read(&zram->stats.max_used_pages);
- ret = scnprintf(buf, PAGE_SIZE,
+ ret = sysfs_emit(buf,
"%8llu %8llu %8llu %8lu %8ld %8llu %8lu %8llu %8llu\n",
orig_size << PAGE_SHIFT,
(u64)atomic64_read(&zram->stats.compr_data_size),
@@ -1543,8 +1802,8 @@ static ssize_t bd_stat_show(struct device *dev,
ssize_t ret;
down_read(&zram->init_lock);
- ret = scnprintf(buf, PAGE_SIZE,
- "%8llu %8llu %8llu\n",
+ ret = sysfs_emit(buf,
+ "%8llu %8llu %8llu\n",
FOUR_K((u64)atomic64_read(&zram->stats.bd_count)),
FOUR_K((u64)atomic64_read(&zram->stats.bd_reads)),
FOUR_K((u64)atomic64_read(&zram->stats.bd_writes)));
@@ -1562,7 +1821,7 @@ static ssize_t debug_stat_show(struct device *dev,
ssize_t ret;
down_read(&zram->init_lock);
- ret = scnprintf(buf, PAGE_SIZE,
+ ret = sysfs_emit(buf,
"version: %d\n0 %8llu\n",
version,
(u64)atomic64_read(&zram->stats.miss_free));
@@ -1640,7 +1899,7 @@ static void zram_free_page(struct zram *zram, size_t index)
if (zram_test_flag(zram, index, ZRAM_WB)) {
zram_clear_flag(zram, index, ZRAM_WB);
- free_block_bdev(zram, zram_get_handle(zram, index));
+ zram_release_bdev_block(zram, zram_get_handle(zram, index));
goto out;
}
@@ -1746,14 +2005,14 @@ static int zram_read_page(struct zram *zram, struct page *page, u32 index,
ret = zram_read_from_zspool(zram, page, index);
zram_slot_unlock(zram, index);
} else {
+ unsigned long blk_idx = zram_get_handle(zram, index);
+
/*
* The slot should be unlocked before reading from the backing
* device.
*/
zram_slot_unlock(zram, index);
-
- ret = read_from_bdev(zram, page, zram_get_handle(zram, index),
- parent);
+ ret = read_from_bdev(zram, page, blk_idx, parent);
}
/* Should NEVER happen. Return bio error if it does. */
@@ -1794,6 +2053,7 @@ static int write_same_filled_page(struct zram *zram, unsigned long fill,
u32 index)
{
zram_slot_lock(zram, index);
+ zram_free_page(zram, index);
zram_set_flag(zram, index, ZRAM_SAME);
zram_set_handle(zram, index, fill);
zram_slot_unlock(zram, index);
@@ -1831,6 +2091,7 @@ static int write_incompressible_page(struct zram *zram, struct page *page,
kunmap_local(src);
zram_slot_lock(zram, index);
+ zram_free_page(zram, index);
zram_set_flag(zram, index, ZRAM_HUGE);
zram_set_handle(zram, index, handle);
zram_set_obj_size(zram, index, PAGE_SIZE);
@@ -1854,11 +2115,6 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index)
unsigned long element;
bool same_filled;
- /* First, free memory allocated to this slot (if any) */
- zram_slot_lock(zram, index);
- zram_free_page(zram, index);
- zram_slot_unlock(zram, index);
-
mem = kmap_local_page(page);
same_filled = page_same_filled(mem, &element);
kunmap_local(mem);
@@ -1900,6 +2156,7 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index)
zcomp_stream_put(zstrm);
zram_slot_lock(zram, index);
+ zram_free_page(zram, index);
zram_set_handle(zram, index, handle);
zram_set_obj_size(zram, index, comp_len);
zram_slot_unlock(zram, index);
@@ -2618,6 +2875,7 @@ static DEVICE_ATTR_RW(backing_dev);
static DEVICE_ATTR_WO(writeback);
static DEVICE_ATTR_RW(writeback_limit);
static DEVICE_ATTR_RW(writeback_limit_enable);
+static DEVICE_ATTR_RW(writeback_batch_size);
#endif
#ifdef CONFIG_ZRAM_MULTI_COMP
static DEVICE_ATTR_RW(recomp_algorithm);
@@ -2639,6 +2897,7 @@ static struct attribute *zram_disk_attrs[] = {
&dev_attr_writeback.attr,
&dev_attr_writeback_limit.attr,
&dev_attr_writeback_limit_enable.attr,
+ &dev_attr_writeback_batch_size.attr,
#endif
&dev_attr_io_stat.attr,
&dev_attr_mm_stat.attr,
@@ -2700,7 +2959,7 @@ static int zram_add(void)
init_rwsem(&zram->init_lock);
#ifdef CONFIG_ZRAM_WRITEBACK
- spin_lock_init(&zram->wb_limit_lock);
+ zram->wb_batch_size = 32;
#endif
/* gendisk structure */
@@ -2810,7 +3069,7 @@ static ssize_t hot_add_show(const struct class *class,
if (ret < 0)
return ret;
- return scnprintf(buf, PAGE_SIZE, "%d\n", ret);
+ return sysfs_emit(buf, "%d\n", ret);
}
/* This attribute must be set to 0400, so CLASS_ATTR_RO() can not be used */
static struct class_attribute class_attr_hot_add =
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index 6cee93f9c0d0..c6d94501376c 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -127,8 +127,8 @@ struct zram {
bool claim; /* Protected by disk->open_mutex */
#ifdef CONFIG_ZRAM_WRITEBACK
struct file *backing_dev;
- spinlock_t wb_limit_lock;
bool wb_limit_enable;
+ u32 wb_batch_size;
u64 bd_wb_limit;
struct block_device *bdev;
unsigned long *bitmap;
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 4ab32abf0f48..c5d45cf91f88 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -188,6 +188,7 @@ config BT_HCIUART_3WIRE
bool "Three-wire UART (H5) protocol support"
depends on BT_HCIUART
depends on BT_HCIUART_SERDEV
+ select CRC_CCITT
help
The HCI Three-wire UART Transport Layer makes it possible to
user the Bluetooth HCI over a serial port interface. The HCI
@@ -312,7 +313,9 @@ config BT_HCIBCM4377
config BT_HCIBPA10X
tristate "HCI BPA10x USB driver"
+ depends on BT_HCIUART
depends on USB
+ select BT_HCIUART_H4
help
Bluetooth HCI BPA10x USB driver.
This driver provides support for the Digianswer BPA 100/105 Bluetooth
@@ -437,8 +440,10 @@ config BT_MTKSDIO
config BT_MTKUART
tristate "MediaTek HCI UART driver"
+ depends on BT_HCIUART
depends on SERIAL_DEV_BUS
depends on USB || !BT_HCIBTUSB_MTK
+ select BT_HCIUART_H4
select BT_MTK
help
MediaTek Bluetooth HCI UART driver.
@@ -483,7 +488,9 @@ config BT_VIRTIO
config BT_NXPUART
tristate "NXP protocol support"
+ depends on BT_HCIUART
depends on SERIAL_DEV_BUS
+ select BT_HCIUART_H4
select CRC32
select CRC8
help
diff --git a/drivers/bluetooth/bfusb.c b/drivers/bluetooth/bfusb.c
index 0d6ad50da046..8df310983bf6 100644
--- a/drivers/bluetooth/bfusb.c
+++ b/drivers/bluetooth/bfusb.c
@@ -670,7 +670,7 @@ static int bfusb_probe(struct usb_interface *intf, const struct usb_device_id *i
hdev->flush = bfusb_flush;
hdev->send = bfusb_send_frame;
- set_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_LOCAL_COMMANDS);
if (hci_register_dev(hdev) < 0) {
BT_ERR("Can't register HCI device");
diff --git a/drivers/bluetooth/bpa10x.c b/drivers/bluetooth/bpa10x.c
index 1fa58c059cbf..e305d04aac9d 100644
--- a/drivers/bluetooth/bpa10x.c
+++ b/drivers/bluetooth/bpa10x.c
@@ -20,7 +20,7 @@
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
-#include "h4_recv.h"
+#include "hci_uart.h"
#define VERSION "0.11"
@@ -41,6 +41,7 @@ struct bpa10x_data {
struct usb_anchor rx_anchor;
struct sk_buff *rx_skb[2];
+ struct hci_uart hu;
};
static void bpa10x_tx_complete(struct urb *urb)
@@ -96,7 +97,7 @@ static void bpa10x_rx_complete(struct urb *urb)
if (urb->status == 0) {
bool idx = usb_pipebulk(urb->pipe);
- data->rx_skb[idx] = h4_recv_buf(hdev, data->rx_skb[idx],
+ data->rx_skb[idx] = h4_recv_buf(&data->hu, data->rx_skb[idx],
urb->transfer_buffer,
urb->actual_length,
bpa10x_recv_pkts,
@@ -388,6 +389,7 @@ static int bpa10x_probe(struct usb_interface *intf,
hci_set_drvdata(hdev, data);
data->hdev = hdev;
+ data->hu.hdev = hdev;
SET_HCIDEV_DEV(hdev, &intf->dev);
@@ -398,7 +400,7 @@ static int bpa10x_probe(struct usb_interface *intf,
hdev->send = bpa10x_send_frame;
hdev->set_diag = bpa10x_set_diag;
- set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_RESET_ON_CLOSE);
err = hci_register_dev(hdev);
if (err < 0) {
diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
index 0a60660fc8ce..d33cc70eec66 100644
--- a/drivers/bluetooth/btbcm.c
+++ b/drivers/bluetooth/btbcm.c
@@ -135,7 +135,7 @@ int btbcm_check_bdaddr(struct hci_dev *hdev)
if (btbcm_set_bdaddr_from_efi(hdev) != 0) {
bt_dev_info(hdev, "BCM: Using default device address (%pMR)",
&bda->bdaddr);
- set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_INVALID_BDADDR);
}
}
@@ -467,7 +467,7 @@ static int btbcm_print_controller_features(struct hci_dev *hdev)
/* Read DMI and disable broken Read LE Min/Max Tx Power */
if (dmi_first_match(disable_broken_read_transmit_power))
- set_bit(HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER);
return 0;
}
@@ -642,7 +642,9 @@ int btbcm_initialize(struct hci_dev *hdev, bool *fw_load_done, bool use_autobaud
snprintf(postfix, sizeof(postfix), "-%4.4x-%4.4x", vid, pid);
}
- fw_name = kmalloc(BCM_FW_NAME_COUNT_MAX * BCM_FW_NAME_LEN, GFP_KERNEL);
+ fw_name = kmalloc_array(BCM_FW_NAME_COUNT_MAX,
+ sizeof(*fw_name),
+ GFP_KERNEL);
if (!fw_name)
return -ENOMEM;
@@ -706,7 +708,7 @@ int btbcm_finalize(struct hci_dev *hdev, bool *fw_load_done, bool use_autobaud_m
btbcm_check_bdaddr(hdev);
- set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_STRICT_DUPLICATE_FILTER);
return 0;
}
@@ -769,7 +771,7 @@ int btbcm_setup_apple(struct hci_dev *hdev)
kfree_skb(skb);
}
- set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_STRICT_DUPLICATE_FILTER);
return 0;
}
diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
index 55cc1652bfe4..9d29ab811f80 100644
--- a/drivers/bluetooth/btintel.c
+++ b/drivers/bluetooth/btintel.c
@@ -88,7 +88,7 @@ int btintel_check_bdaddr(struct hci_dev *hdev)
if (!bacmp(&bda->bdaddr, BDADDR_INTEL)) {
bt_dev_err(hdev, "Found Intel default device address (%pMR)",
&bda->bdaddr);
- set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_INVALID_BDADDR);
}
kfree_skb(skb);
@@ -484,6 +484,7 @@ int btintel_version_info_tlv(struct hci_dev *hdev,
case 0x1d: /* BlazarU (BzrU) */
case 0x1e: /* BlazarI (Bzr) */
case 0x1f: /* Scorpious Peak */
+ case 0x22: /* BlazarIW (BzrIW) */
break;
default:
bt_dev_err(hdev, "Unsupported Intel hardware variant (0x%x)",
@@ -555,7 +556,7 @@ int btintel_parse_version_tlv(struct hci_dev *hdev,
/* Consume Command Complete Status field */
skb_pull(skb, 1);
- /* Event parameters contatin multiple TLVs. Read each of them
+ /* Event parameters contain multiple TLVs. Read each of them
* and only keep the required data. Also, it use existing legacy
* version field like hw_platform, hw_variant, and fw_variant
* to keep the existing setup flow
@@ -889,7 +890,7 @@ int btintel_send_intel_reset(struct hci_dev *hdev, u32 boot_param)
params.boot_param = cpu_to_le32(boot_param);
- skb = __hci_cmd_sync(hdev, 0xfc01, sizeof(params), &params,
+ skb = __hci_cmd_sync(hdev, BTINTEL_HCI_OP_RESET, sizeof(params), &params,
HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
bt_dev_err(hdev, "Failed to send Intel Reset command");
@@ -1287,7 +1288,7 @@ static void btintel_reset_to_bootloader(struct hci_dev *hdev)
params.boot_option = 0x00;
params.boot_param = cpu_to_le32(0x00000000);
- skb = __hci_cmd_sync(hdev, 0xfc01, sizeof(params),
+ skb = __hci_cmd_sync(hdev, BTINTEL_HCI_OP_RESET, sizeof(params),
&params, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
bt_dev_err(hdev, "FW download error recovery failed (%ld)",
@@ -2027,7 +2028,7 @@ static int btintel_download_fw(struct hci_dev *hdev,
*/
if (!bacmp(&params->otp_bdaddr, BDADDR_ANY)) {
bt_dev_info(hdev, "No device address configured");
- set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_INVALID_BDADDR);
}
download:
@@ -2295,7 +2296,7 @@ static int btintel_prepare_fw_download_tlv(struct hci_dev *hdev,
*/
if (!bacmp(&ver->otp_bd_addr, BDADDR_ANY)) {
bt_dev_info(hdev, "No device address configured");
- set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_INVALID_BDADDR);
}
}
@@ -2670,7 +2671,7 @@ static u8 btintel_classify_pkt_type(struct hci_dev *hdev, struct sk_buff *skb)
* Distinguish ISO data packets form ACL data packets
* based on their connection handle value range.
*/
- if (hci_skb_pkt_type(skb) == HCI_ACLDATA_PKT) {
+ if (iso_capable(hdev) && hci_skb_pkt_type(skb) == HCI_ACLDATA_PKT) {
__u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle);
if (hci_handle(handle) >= BTINTEL_ISODATA_HANDLE_BASE)
@@ -3253,6 +3254,7 @@ void btintel_set_msft_opcode(struct hci_dev *hdev, u8 hw_variant)
case 0x1d:
case 0x1e:
case 0x1f:
+ case 0x22:
hci_set_msft_opcode(hdev, 0xFC1E);
break;
default:
@@ -3435,9 +3437,9 @@ static int btintel_setup_combined(struct hci_dev *hdev)
}
/* Apply the common HCI quirks for Intel device */
- set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
- set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
- set_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_STRICT_DUPLICATE_FILTER);
+ hci_set_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY);
+ hci_set_quirk(hdev, HCI_QUIRK_NON_PERSISTENT_DIAG);
/* Set up the quality report callback for Intel devices */
hdev->set_quality_report = btintel_set_quality_report;
@@ -3475,8 +3477,8 @@ static int btintel_setup_combined(struct hci_dev *hdev)
*/
if (!btintel_test_flag(hdev,
INTEL_ROM_LEGACY_NO_WBS_SUPPORT))
- set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
- &hdev->quirks);
+ hci_set_quirk(hdev,
+ HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED);
err = btintel_legacy_rom_setup(hdev, &ver);
break;
@@ -3491,11 +3493,11 @@ static int btintel_setup_combined(struct hci_dev *hdev)
*
* All Legacy bootloader devices support WBS
*/
- set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
- &hdev->quirks);
+ hci_set_quirk(hdev,
+ HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED);
/* These variants don't seem to support LE Coded PHY */
- set_bit(HCI_QUIRK_BROKEN_LE_CODED, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_LE_CODED);
/* Setup MSFT Extension support */
btintel_set_msft_opcode(hdev, ver.hw_variant);
@@ -3571,10 +3573,10 @@ static int btintel_setup_combined(struct hci_dev *hdev)
*
* All Legacy bootloader devices support WBS
*/
- set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED);
/* These variants don't seem to support LE Coded PHY */
- set_bit(HCI_QUIRK_BROKEN_LE_CODED, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_LE_CODED);
/* Setup MSFT Extension support */
btintel_set_msft_opcode(hdev, ver.hw_variant);
@@ -3593,6 +3595,7 @@ static int btintel_setup_combined(struct hci_dev *hdev)
case 0x1d:
case 0x1e:
case 0x1f:
+ case 0x22:
/* Display version information of TLV type */
btintel_version_info_tlv(hdev, &ver_tlv);
@@ -3600,7 +3603,7 @@ static int btintel_setup_combined(struct hci_dev *hdev)
*
* All TLV based devices support WBS
*/
- set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED);
/* Setup MSFT Extension support */
btintel_set_msft_opcode(hdev,
diff --git a/drivers/bluetooth/btintel.h b/drivers/bluetooth/btintel.h
index 1d12c4113c66..431998049e68 100644
--- a/drivers/bluetooth/btintel.h
+++ b/drivers/bluetooth/btintel.h
@@ -52,6 +52,8 @@ struct intel_tlv {
u8 val[];
} __packed;
+#define BTINTEL_HCI_OP_RESET 0xfc01
+
#define BTINTEL_CNVI_BLAZARI 0x900
#define BTINTEL_CNVI_BLAZARIW 0x901
#define BTINTEL_CNVI_GAP 0x910
diff --git a/drivers/bluetooth/btintel_pcie.c b/drivers/bluetooth/btintel_pcie.c
index 50fe17f1e1d1..2936b535479f 100644
--- a/drivers/bluetooth/btintel_pcie.c
+++ b/drivers/bluetooth/btintel_pcie.c
@@ -15,9 +15,11 @@
#include <linux/interrupt.h>
#include <linux/unaligned.h>
+#include <linux/devcoredump.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
+#include <net/bluetooth/hci_drv.h>
#include "btintel.h"
#include "btintel_pcie.h"
@@ -35,12 +37,25 @@
/* Intel Bluetooth PCIe device id table */
static const struct pci_device_id btintel_pcie_table[] = {
+ /* BlazarI, Wildcat Lake */
+ { BTINTEL_PCI_DEVICE(0x4D76, PCI_ANY_ID) },
+ /* BlazarI, Lunar Lake */
{ BTINTEL_PCI_DEVICE(0xA876, PCI_ANY_ID) },
+ /* Scorpious, Panther Lake-H484 */
+ { BTINTEL_PCI_DEVICE(0xE376, PCI_ANY_ID) },
+ /* Scorpious, Panther Lake-H404 */
{ BTINTEL_PCI_DEVICE(0xE476, PCI_ANY_ID) },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, btintel_pcie_table);
+struct btintel_pcie_dev_recovery {
+ struct list_head list;
+ u8 count;
+ time64_t last_error;
+ char name[];
+};
+
/* Intel PCIe uses 4 bytes of HCI type instead of 1 byte BT SIG HCI type */
#define BTINTEL_PCIE_HCI_TYPE_LEN 4
#define BTINTEL_PCIE_HCI_CMD_PKT 0x00000001
@@ -62,6 +77,9 @@ MODULE_DEVICE_TABLE(pci, btintel_pcie_table);
#define BTINTEL_PCIE_TRIGGER_REASON_USER_TRIGGER 0x17A2
#define BTINTEL_PCIE_TRIGGER_REASON_FW_ASSERT 0x1E61
+#define BTINTEL_PCIE_RESET_WINDOW_SECS 5
+#define BTINTEL_PCIE_FLR_MAX_RETRY 1
+
/* Alive interrupt context */
enum {
BTINTEL_PCIE_ROM,
@@ -99,6 +117,36 @@ struct btintel_pcie_dbgc_ctxt {
struct btintel_pcie_dbgc_ctxt_buf bufs[BTINTEL_PCIE_DBGC_BUFFER_COUNT];
};
+struct btintel_pcie_removal {
+ struct pci_dev *pdev;
+ struct work_struct work;
+};
+
+static LIST_HEAD(btintel_pcie_recovery_list);
+static DEFINE_SPINLOCK(btintel_pcie_recovery_lock);
+
+static inline char *btintel_pcie_alivectxt_state2str(u32 alive_intr_ctxt)
+{
+ switch (alive_intr_ctxt) {
+ case BTINTEL_PCIE_ROM:
+ return "rom";
+ case BTINTEL_PCIE_FW_DL:
+ return "fw_dl";
+ case BTINTEL_PCIE_D0:
+ return "d0";
+ case BTINTEL_PCIE_D3:
+ return "d3";
+ case BTINTEL_PCIE_HCI_RESET:
+ return "hci_reset";
+ case BTINTEL_PCIE_INTEL_HCI_RESET1:
+ return "intel_reset1";
+ case BTINTEL_PCIE_INTEL_HCI_RESET2:
+ return "intel_reset2";
+ default:
+ return "unknown";
+ }
+}
+
/* This function initializes the memory for DBGC buffers and formats the
* DBGC fragment which consists header info and DBGC buffer's LSB, MSB and
* size as the payload
@@ -299,10 +347,14 @@ static inline void btintel_pcie_dump_debug_registers(struct hci_dev *hdev)
}
static int btintel_pcie_send_sync(struct btintel_pcie_data *data,
- struct sk_buff *skb)
+ struct sk_buff *skb, u32 pkt_type, u16 opcode)
{
int ret;
u16 tfd_index;
+ u32 old_ctxt;
+ bool wait_on_alive = false;
+ struct hci_dev *hdev = data->hdev;
+
struct txq *txq = &data->txq;
tfd_index = data->ia.tr_hia[BTINTEL_PCIE_TXQ_NUM];
@@ -310,6 +362,26 @@ static int btintel_pcie_send_sync(struct btintel_pcie_data *data,
if (tfd_index > txq->count)
return -ERANGE;
+ /* Firmware raises alive interrupt on HCI_OP_RESET or
+ * BTINTEL_HCI_OP_RESET
+ */
+ wait_on_alive = (pkt_type == BTINTEL_PCIE_HCI_CMD_PKT &&
+ (opcode == BTINTEL_HCI_OP_RESET || opcode == HCI_OP_RESET));
+
+ if (wait_on_alive) {
+ data->gp0_received = false;
+ old_ctxt = data->alive_intr_ctxt;
+ data->alive_intr_ctxt =
+ (opcode == BTINTEL_HCI_OP_RESET ? BTINTEL_PCIE_INTEL_HCI_RESET1 :
+ BTINTEL_PCIE_HCI_RESET);
+ bt_dev_dbg(data->hdev, "sending cmd: 0x%4.4x alive context changed: %s -> %s",
+ opcode, btintel_pcie_alivectxt_state2str(old_ctxt),
+ btintel_pcie_alivectxt_state2str(data->alive_intr_ctxt));
+ }
+
+ memcpy(skb_push(skb, BTINTEL_PCIE_HCI_TYPE_LEN), &pkt_type,
+ BTINTEL_PCIE_HCI_TYPE_LEN);
+
/* Prepare for TX. It updates the TFD with the length of data and
* address of the DMA buffer, and copy the data to the DMA buffer
*/
@@ -328,11 +400,24 @@ static int btintel_pcie_send_sync(struct btintel_pcie_data *data,
ret = wait_event_timeout(data->tx_wait_q, data->tx_wait_done,
msecs_to_jiffies(BTINTEL_PCIE_TX_WAIT_TIMEOUT_MS));
if (!ret) {
- bt_dev_err(data->hdev, "tx completion timeout");
+ bt_dev_err(data->hdev, "Timeout (%u ms) on tx completion",
+ BTINTEL_PCIE_TX_WAIT_TIMEOUT_MS);
btintel_pcie_dump_debug_registers(data->hdev);
return -ETIME;
}
+ if (wait_on_alive) {
+ ret = wait_event_timeout(data->gp0_wait_q,
+ data->gp0_received,
+ msecs_to_jiffies(BTINTEL_DEFAULT_INTR_TIMEOUT_MS));
+ if (!ret) {
+ hdev->stat.err_tx++;
+ bt_dev_err(hdev, "Timeout (%u ms) on alive interrupt, alive context: %s",
+ BTINTEL_DEFAULT_INTR_TIMEOUT_MS,
+ btintel_pcie_alivectxt_state2str(data->alive_intr_ctxt));
+ return -ETIME;
+ }
+ }
return 0;
}
@@ -396,8 +481,13 @@ static int btintel_pcie_submit_rx(struct btintel_pcie_data *data)
static int btintel_pcie_start_rx(struct btintel_pcie_data *data)
{
int i, ret;
+ struct rxq *rxq = &data->rxq;
- for (i = 0; i < BTINTEL_PCIE_RX_MAX_QUEUE; i++) {
+ /* Post (BTINTEL_PCIE_RX_DESCS_COUNT - 3) buffers to overcome the
+ * hardware issues leading to race condition at the firmware.
+ */
+
+ for (i = 0; i < rxq->count - 3; i++) {
ret = btintel_pcie_submit_rx(data);
if (ret)
return ret;
@@ -471,25 +561,6 @@ static void btintel_pcie_mac_init(struct btintel_pcie_data *data)
btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
}
-static int btintel_pcie_add_dmp_data(struct hci_dev *hdev, const void *data, int size)
-{
- struct sk_buff *skb;
- int err;
-
- skb = alloc_skb(size, GFP_ATOMIC);
- if (!skb)
- return -ENOMEM;
-
- skb_put_data(skb, data, size);
- err = hci_devcd_append(hdev, skb);
- if (err) {
- bt_dev_err(hdev, "Failed to append data in the coredump");
- return err;
- }
-
- return 0;
-}
-
static int btintel_pcie_get_mac_access(struct btintel_pcie_data *data)
{
u32 reg;
@@ -534,30 +605,35 @@ static void btintel_pcie_release_mac_access(struct btintel_pcie_data *data)
btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
}
-static void btintel_pcie_copy_tlv(struct sk_buff *skb, enum btintel_pcie_tlv_type type,
- void *data, int size)
+static void *btintel_pcie_copy_tlv(void *dest, enum btintel_pcie_tlv_type type,
+ void *data, size_t size)
{
struct intel_tlv *tlv;
- tlv = skb_put(skb, sizeof(*tlv) + size);
+ tlv = dest;
tlv->type = type;
tlv->len = size;
memcpy(tlv->val, data, tlv->len);
+ return dest + sizeof(*tlv) + size;
}
static int btintel_pcie_read_dram_buffers(struct btintel_pcie_data *data)
{
- u32 offset, prev_size, wr_ptr_status, dump_size, i;
+ u32 offset, prev_size, wr_ptr_status, dump_size, data_len;
struct btintel_pcie_dbgc *dbgc = &data->dbgc;
- u8 buf_idx, dump_time_len, fw_build;
struct hci_dev *hdev = data->hdev;
+ u8 *pdata, *p, buf_idx;
struct intel_tlv *tlv;
struct timespec64 now;
- struct sk_buff *skb;
struct tm tm_now;
- char buf[256];
- u16 hdr_len;
- int ret;
+ char fw_build[128];
+ char ts[128];
+ char vendor[64];
+ char driver[64];
+
+ if (!IS_ENABLED(CONFIG_DEV_COREDUMP))
+ return -EOPNOTSUPP;
+
wr_ptr_status = btintel_pcie_rd_dev_mem(data, BTINTEL_PCIE_DBGC_CUR_DBGBUFF_STATUS);
offset = wr_ptr_status & BTINTEL_PCIE_DBG_OFFSET_BIT_MASK;
@@ -574,88 +650,84 @@ static int btintel_pcie_read_dram_buffers(struct btintel_pcie_data *data)
else
return -EINVAL;
+ snprintf(vendor, sizeof(vendor), "Vendor: Intel\n");
+ snprintf(driver, sizeof(driver), "Driver: %s\n",
+ data->dmp_hdr.driver_name);
+
ktime_get_real_ts64(&now);
time64_to_tm(now.tv_sec, 0, &tm_now);
- dump_time_len = snprintf(buf, sizeof(buf), "Dump Time: %02d-%02d-%04ld %02d:%02d:%02d",
+ snprintf(ts, sizeof(ts), "Dump Time: %02d-%02d-%04ld %02d:%02d:%02d",
tm_now.tm_mday, tm_now.tm_mon + 1, tm_now.tm_year + 1900,
tm_now.tm_hour, tm_now.tm_min, tm_now.tm_sec);
- fw_build = snprintf(buf + dump_time_len, sizeof(buf) - dump_time_len,
+ snprintf(fw_build, sizeof(fw_build),
"Firmware Timestamp: Year %u WW %02u buildtype %u build %u",
2000 + (data->dmp_hdr.fw_timestamp >> 8),
data->dmp_hdr.fw_timestamp & 0xff, data->dmp_hdr.fw_build_type,
data->dmp_hdr.fw_build_num);
- hdr_len = sizeof(*tlv) + sizeof(data->dmp_hdr.cnvi_bt) +
- sizeof(*tlv) + sizeof(data->dmp_hdr.write_ptr) +
- sizeof(*tlv) + sizeof(data->dmp_hdr.wrap_ctr) +
- sizeof(*tlv) + sizeof(data->dmp_hdr.trigger_reason) +
- sizeof(*tlv) + sizeof(data->dmp_hdr.fw_git_sha1) +
- sizeof(*tlv) + sizeof(data->dmp_hdr.cnvr_top) +
- sizeof(*tlv) + sizeof(data->dmp_hdr.cnvi_top) +
- sizeof(*tlv) + dump_time_len +
- sizeof(*tlv) + fw_build;
+ data_len = sizeof(*tlv) + sizeof(data->dmp_hdr.cnvi_bt) +
+ sizeof(*tlv) + sizeof(data->dmp_hdr.write_ptr) +
+ sizeof(*tlv) + sizeof(data->dmp_hdr.wrap_ctr) +
+ sizeof(*tlv) + sizeof(data->dmp_hdr.trigger_reason) +
+ sizeof(*tlv) + sizeof(data->dmp_hdr.fw_git_sha1) +
+ sizeof(*tlv) + sizeof(data->dmp_hdr.cnvr_top) +
+ sizeof(*tlv) + sizeof(data->dmp_hdr.cnvi_top) +
+ sizeof(*tlv) + strlen(ts) +
+ sizeof(*tlv) + strlen(fw_build) +
+ sizeof(*tlv) + strlen(vendor) +
+ sizeof(*tlv) + strlen(driver);
- dump_size = hdr_len + sizeof(hdr_len);
+ /*
+ * sizeof(u32) - signature
+ * sizeof(data_len) - to store tlv data size
+ * data_len - TLV data
+ */
+ dump_size = sizeof(u32) + sizeof(data_len) + data_len;
- skb = alloc_skb(dump_size, GFP_KERNEL);
- if (!skb)
- return -ENOMEM;
/* Add debug buffers data length to dump size */
dump_size += BTINTEL_PCIE_DBGC_BUFFER_SIZE * dbgc->count;
- ret = hci_devcd_init(hdev, dump_size);
- if (ret) {
- bt_dev_err(hdev, "Failed to init devcoredump, err %d", ret);
- kfree_skb(skb);
- return ret;
- }
+ pdata = vmalloc(dump_size);
+ if (!pdata)
+ return -ENOMEM;
+ p = pdata;
+
+ *(u32 *)p = BTINTEL_PCIE_MAGIC_NUM;
+ p += sizeof(u32);
- skb_put_data(skb, &hdr_len, sizeof(hdr_len));
+ *(u32 *)p = data_len;
+ p += sizeof(u32);
- btintel_pcie_copy_tlv(skb, BTINTEL_CNVI_BT, &data->dmp_hdr.cnvi_bt,
- sizeof(data->dmp_hdr.cnvi_bt));
- btintel_pcie_copy_tlv(skb, BTINTEL_WRITE_PTR, &data->dmp_hdr.write_ptr,
- sizeof(data->dmp_hdr.write_ptr));
+ p = btintel_pcie_copy_tlv(p, BTINTEL_VENDOR, vendor, strlen(vendor));
+ p = btintel_pcie_copy_tlv(p, BTINTEL_DRIVER, driver, strlen(driver));
+ p = btintel_pcie_copy_tlv(p, BTINTEL_DUMP_TIME, ts, strlen(ts));
+ p = btintel_pcie_copy_tlv(p, BTINTEL_FW_BUILD, fw_build,
+ strlen(fw_build));
+ p = btintel_pcie_copy_tlv(p, BTINTEL_CNVI_BT, &data->dmp_hdr.cnvi_bt,
+ sizeof(data->dmp_hdr.cnvi_bt));
+ p = btintel_pcie_copy_tlv(p, BTINTEL_WRITE_PTR, &data->dmp_hdr.write_ptr,
+ sizeof(data->dmp_hdr.write_ptr));
+ p = btintel_pcie_copy_tlv(p, BTINTEL_WRAP_CTR, &data->dmp_hdr.wrap_ctr,
+ sizeof(data->dmp_hdr.wrap_ctr));
data->dmp_hdr.wrap_ctr = btintel_pcie_rd_dev_mem(data,
BTINTEL_PCIE_DBGC_DBGBUFF_WRAP_ARND);
- btintel_pcie_copy_tlv(skb, BTINTEL_WRAP_CTR, &data->dmp_hdr.wrap_ctr,
- sizeof(data->dmp_hdr.wrap_ctr));
-
- btintel_pcie_copy_tlv(skb, BTINTEL_TRIGGER_REASON, &data->dmp_hdr.trigger_reason,
- sizeof(data->dmp_hdr.trigger_reason));
-
- btintel_pcie_copy_tlv(skb, BTINTEL_FW_SHA, &data->dmp_hdr.fw_git_sha1,
- sizeof(data->dmp_hdr.fw_git_sha1));
-
- btintel_pcie_copy_tlv(skb, BTINTEL_CNVR_TOP, &data->dmp_hdr.cnvr_top,
- sizeof(data->dmp_hdr.cnvr_top));
-
- btintel_pcie_copy_tlv(skb, BTINTEL_CNVI_TOP, &data->dmp_hdr.cnvi_top,
- sizeof(data->dmp_hdr.cnvi_top));
-
- btintel_pcie_copy_tlv(skb, BTINTEL_DUMP_TIME, buf, dump_time_len);
-
- btintel_pcie_copy_tlv(skb, BTINTEL_FW_BUILD, buf + dump_time_len, fw_build);
-
- ret = hci_devcd_append(hdev, skb);
- if (ret)
- goto exit_err;
-
- for (i = 0; i < dbgc->count; i++) {
- ret = btintel_pcie_add_dmp_data(hdev, dbgc->bufs[i].data,
- BTINTEL_PCIE_DBGC_BUFFER_SIZE);
- if (ret)
- break;
- }
-
-exit_err:
- hci_devcd_complete(hdev);
- return ret;
+ p = btintel_pcie_copy_tlv(p, BTINTEL_TRIGGER_REASON, &data->dmp_hdr.trigger_reason,
+ sizeof(data->dmp_hdr.trigger_reason));
+ p = btintel_pcie_copy_tlv(p, BTINTEL_FW_SHA, &data->dmp_hdr.fw_git_sha1,
+ sizeof(data->dmp_hdr.fw_git_sha1));
+ p = btintel_pcie_copy_tlv(p, BTINTEL_CNVR_TOP, &data->dmp_hdr.cnvr_top,
+ sizeof(data->dmp_hdr.cnvr_top));
+ p = btintel_pcie_copy_tlv(p, BTINTEL_CNVI_TOP, &data->dmp_hdr.cnvi_top,
+ sizeof(data->dmp_hdr.cnvi_top));
+
+ memcpy(p, dbgc->bufs[0].data, dbgc->count * BTINTEL_PCIE_DBGC_BUFFER_SIZE);
+ dev_coredumpv(&hdev->dev, pdata, dump_size, GFP_KERNEL);
+ return 0;
}
static void btintel_pcie_dump_traces(struct hci_dev *hdev)
@@ -677,51 +749,6 @@ static void btintel_pcie_dump_traces(struct hci_dev *hdev)
bt_dev_err(hdev, "Failed to dump traces: (%d)", ret);
}
-static void btintel_pcie_dump_hdr(struct hci_dev *hdev, struct sk_buff *skb)
-{
- struct btintel_pcie_data *data = hci_get_drvdata(hdev);
- u16 len = skb->len;
- u16 *hdrlen_ptr;
- char buf[80];
-
- hdrlen_ptr = skb_put_zero(skb, sizeof(len));
-
- snprintf(buf, sizeof(buf), "Controller Name: 0x%X\n",
- INTEL_HW_VARIANT(data->dmp_hdr.cnvi_bt));
- skb_put_data(skb, buf, strlen(buf));
-
- snprintf(buf, sizeof(buf), "Firmware Build Number: %u\n",
- data->dmp_hdr.fw_build_num);
- skb_put_data(skb, buf, strlen(buf));
-
- snprintf(buf, sizeof(buf), "Driver: %s\n", data->dmp_hdr.driver_name);
- skb_put_data(skb, buf, strlen(buf));
-
- snprintf(buf, sizeof(buf), "Vendor: Intel\n");
- skb_put_data(skb, buf, strlen(buf));
-
- *hdrlen_ptr = skb->len - len;
-}
-
-static void btintel_pcie_dump_notify(struct hci_dev *hdev, int state)
-{
- struct btintel_pcie_data *data = hci_get_drvdata(hdev);
-
- switch (state) {
- case HCI_DEVCOREDUMP_IDLE:
- data->dmp_hdr.state = HCI_DEVCOREDUMP_IDLE;
- break;
- case HCI_DEVCOREDUMP_ACTIVE:
- data->dmp_hdr.state = HCI_DEVCOREDUMP_ACTIVE;
- break;
- case HCI_DEVCOREDUMP_TIMEOUT:
- case HCI_DEVCOREDUMP_ABORT:
- case HCI_DEVCOREDUMP_DONE:
- data->dmp_hdr.state = HCI_DEVCOREDUMP_IDLE;
- break;
- }
-}
-
/* This function enables BT function by setting BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT bit in
* BTINTEL_PCIE_CSR_FUNC_CTRL_REG register and wait for MSI-X with
* BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0.
@@ -799,6 +826,11 @@ static inline bool btintel_pcie_in_d0(struct btintel_pcie_data *data)
return !(data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_D3_STATE_READY);
}
+static inline bool btintel_pcie_in_device_halt(struct btintel_pcie_data *data)
+{
+ return data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_DEVICE_HALTED;
+}
+
static void btintel_pcie_wr_sleep_cntrl(struct btintel_pcie_data *data,
u32 dxstate)
{
@@ -806,28 +838,6 @@ static void btintel_pcie_wr_sleep_cntrl(struct btintel_pcie_data *data,
btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_IPC_SLEEP_CTL_REG, dxstate);
}
-static inline char *btintel_pcie_alivectxt_state2str(u32 alive_intr_ctxt)
-{
- switch (alive_intr_ctxt) {
- case BTINTEL_PCIE_ROM:
- return "rom";
- case BTINTEL_PCIE_FW_DL:
- return "fw_dl";
- case BTINTEL_PCIE_D0:
- return "d0";
- case BTINTEL_PCIE_D3:
- return "d3";
- case BTINTEL_PCIE_HCI_RESET:
- return "hci_reset";
- case BTINTEL_PCIE_INTEL_HCI_RESET1:
- return "intel_reset1";
- case BTINTEL_PCIE_INTEL_HCI_RESET2:
- return "intel_reset2";
- default:
- return "unknown";
- }
-}
-
static int btintel_pcie_read_device_mem(struct btintel_pcie_data *data,
void *buf, u32 dev_addr, int len)
{
@@ -923,11 +933,13 @@ static void btintel_pcie_msix_gp0_handler(struct btintel_pcie_data *data)
case BTINTEL_PCIE_INTEL_HCI_RESET1:
if (btintel_pcie_in_op(data)) {
submit_rx = true;
+ signal_waitq = true;
break;
}
if (btintel_pcie_in_iml(data)) {
submit_rx = true;
+ signal_waitq = true;
data->alive_intr_ctxt = BTINTEL_PCIE_FW_DL;
break;
}
@@ -1315,6 +1327,11 @@ static void btintel_pcie_rx_work(struct work_struct *work)
struct btintel_pcie_data, rx_work);
struct sk_buff *skb;
+ if (test_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags)) {
+ btintel_pcie_dump_traces(data->hdev);
+ clear_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags);
+ }
+
if (test_bit(BTINTEL_PCIE_HWEXP_INPROGRESS, &data->flags)) {
/* Unlike usb products, controller will not send hardware
* exception event on exception. Instead controller writes the
@@ -1327,11 +1344,6 @@ static void btintel_pcie_rx_work(struct work_struct *work)
clear_bit(BTINTEL_PCIE_HWEXP_INPROGRESS, &data->flags);
}
- if (test_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags)) {
- btintel_pcie_dump_traces(data->hdev);
- clear_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags);
- }
-
/* Process the sk_buf in queue and send to the HCI layer */
while ((skb = skb_dequeue(&data->rx_skb_q))) {
btintel_pcie_recv_frame(data, skb);
@@ -1461,11 +1473,6 @@ static irqreturn_t btintel_pcie_irq_msix_handler(int irq, void *dev_id)
if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP1)
btintel_pcie_msix_gp1_handler(data);
- /* This interrupt is triggered by the firmware after updating
- * boot_stage register and image_response register
- */
- if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0)
- btintel_pcie_msix_gp0_handler(data);
/* For TX */
if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0) {
@@ -1481,6 +1488,12 @@ static irqreturn_t btintel_pcie_irq_msix_handler(int irq, void *dev_id)
btintel_pcie_msix_tx_handle(data);
}
+ /* This interrupt is triggered by the firmware after updating
+ * boot_stage register and image_response register
+ */
+ if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0)
+ btintel_pcie_msix_gp0_handler(data);
+
/*
* Before sending the interrupt the HW disables it to prevent a nested
* interrupt. This is done by writing 1 to the corresponding bit in
@@ -1782,8 +1795,8 @@ static int btintel_pcie_alloc(struct btintel_pcie_data *data)
* + size of index * Number of queues(2) * type of index array(4)
* + size of context information
*/
- total = (sizeof(struct tfd) + sizeof(struct urbd0) + sizeof(struct frbd)
- + sizeof(struct urbd1)) * BTINTEL_DESCS_COUNT;
+ total = (sizeof(struct tfd) + sizeof(struct urbd0)) * BTINTEL_PCIE_TX_DESCS_COUNT;
+ total += (sizeof(struct frbd) + sizeof(struct urbd1)) * BTINTEL_PCIE_RX_DESCS_COUNT;
/* Add the sum of size of index array and size of ci struct */
total += (sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 4) + sizeof(struct ctx_info);
@@ -1808,36 +1821,36 @@ static int btintel_pcie_alloc(struct btintel_pcie_data *data)
data->dma_v_addr = v_addr;
/* Setup descriptor count */
- data->txq.count = BTINTEL_DESCS_COUNT;
- data->rxq.count = BTINTEL_DESCS_COUNT;
+ data->txq.count = BTINTEL_PCIE_TX_DESCS_COUNT;
+ data->rxq.count = BTINTEL_PCIE_RX_DESCS_COUNT;
/* Setup tfds */
data->txq.tfds_p_addr = p_addr;
data->txq.tfds = v_addr;
- p_addr += (sizeof(struct tfd) * BTINTEL_DESCS_COUNT);
- v_addr += (sizeof(struct tfd) * BTINTEL_DESCS_COUNT);
+ p_addr += (sizeof(struct tfd) * BTINTEL_PCIE_TX_DESCS_COUNT);
+ v_addr += (sizeof(struct tfd) * BTINTEL_PCIE_TX_DESCS_COUNT);
/* Setup urbd0 */
data->txq.urbd0s_p_addr = p_addr;
data->txq.urbd0s = v_addr;
- p_addr += (sizeof(struct urbd0) * BTINTEL_DESCS_COUNT);
- v_addr += (sizeof(struct urbd0) * BTINTEL_DESCS_COUNT);
+ p_addr += (sizeof(struct urbd0) * BTINTEL_PCIE_TX_DESCS_COUNT);
+ v_addr += (sizeof(struct urbd0) * BTINTEL_PCIE_TX_DESCS_COUNT);
/* Setup FRBD*/
data->rxq.frbds_p_addr = p_addr;
data->rxq.frbds = v_addr;
- p_addr += (sizeof(struct frbd) * BTINTEL_DESCS_COUNT);
- v_addr += (sizeof(struct frbd) * BTINTEL_DESCS_COUNT);
+ p_addr += (sizeof(struct frbd) * BTINTEL_PCIE_RX_DESCS_COUNT);
+ v_addr += (sizeof(struct frbd) * BTINTEL_PCIE_RX_DESCS_COUNT);
/* Setup urbd1 */
data->rxq.urbd1s_p_addr = p_addr;
data->rxq.urbd1s = v_addr;
- p_addr += (sizeof(struct urbd1) * BTINTEL_DESCS_COUNT);
- v_addr += (sizeof(struct urbd1) * BTINTEL_DESCS_COUNT);
+ p_addr += (sizeof(struct urbd1) * BTINTEL_PCIE_RX_DESCS_COUNT);
+ v_addr += (sizeof(struct urbd1) * BTINTEL_PCIE_RX_DESCS_COUNT);
/* Setup data buffers for txq */
err = btintel_pcie_setup_txq_bufs(data, &data->txq);
@@ -1925,7 +1938,9 @@ static int btintel_pcie_send_frame(struct hci_dev *hdev,
__u16 opcode = ~0;
int ret;
u32 type;
- u32 old_ctxt;
+
+ if (test_bit(BTINTEL_PCIE_CORE_HALTED, &data->flags))
+ return -ENODEV;
/* Due to the fw limitation, the type header of the packet should be
* 4 bytes unlike 1 byte for UART. In UART, the firmware can read
@@ -1950,17 +1965,14 @@ static int btintel_pcie_send_frame(struct hci_dev *hdev,
struct hci_command_hdr *cmd = (void *)skb->data;
__u16 opcode = le16_to_cpu(cmd->opcode);
- /* When the 0xfc01 command is issued to boot into
- * the operational firmware, it will actually not
- * send a command complete event. To keep the flow
+ /* When the BTINTEL_HCI_OP_RESET command is issued to
+ * boot into the operational firmware, it will actually
+ * not send a command complete event. To keep the flow
* control working inject that event here.
*/
- if (opcode == 0xfc01)
+ if (opcode == BTINTEL_HCI_OP_RESET)
btintel_pcie_inject_cmd_complete(hdev, opcode);
}
- /* Firmware raises alive interrupt on HCI_OP_RESET */
- if (opcode == HCI_OP_RESET)
- data->gp0_received = false;
hdev->stat.cmd_tx++;
break;
@@ -1979,38 +1991,14 @@ static int btintel_pcie_send_frame(struct hci_dev *hdev,
bt_dev_err(hdev, "Unknown HCI packet type");
return -EILSEQ;
}
- memcpy(skb_push(skb, BTINTEL_PCIE_HCI_TYPE_LEN), &type,
- BTINTEL_PCIE_HCI_TYPE_LEN);
- ret = btintel_pcie_send_sync(data, skb);
+ ret = btintel_pcie_send_sync(data, skb, type, opcode);
if (ret) {
hdev->stat.err_tx++;
bt_dev_err(hdev, "Failed to send frame (%d)", ret);
goto exit_error;
}
- if (type == BTINTEL_PCIE_HCI_CMD_PKT &&
- (opcode == HCI_OP_RESET || opcode == 0xfc01)) {
- old_ctxt = data->alive_intr_ctxt;
- data->alive_intr_ctxt =
- (opcode == 0xfc01 ? BTINTEL_PCIE_INTEL_HCI_RESET1 :
- BTINTEL_PCIE_HCI_RESET);
- bt_dev_dbg(data->hdev, "sent cmd: 0x%4.4x alive context changed: %s -> %s",
- opcode, btintel_pcie_alivectxt_state2str(old_ctxt),
- btintel_pcie_alivectxt_state2str(data->alive_intr_ctxt));
- if (opcode == HCI_OP_RESET) {
- ret = wait_event_timeout(data->gp0_wait_q,
- data->gp0_received,
- msecs_to_jiffies(BTINTEL_DEFAULT_INTR_TIMEOUT_MS));
- if (!ret) {
- hdev->stat.err_tx++;
- bt_dev_err(hdev, "No alive interrupt received for %s",
- btintel_pcie_alivectxt_state2str(data->alive_intr_ctxt));
- ret = -ETIME;
- goto exit_error;
- }
- }
- }
hdev->stat.byte_tx += skb->len;
kfree_skb(skb);
@@ -2028,6 +2016,28 @@ static void btintel_pcie_release_hdev(struct btintel_pcie_data *data)
data->hdev = NULL;
}
+static void btintel_pcie_disable_interrupts(struct btintel_pcie_data *data)
+{
+ spin_lock(&data->irq_lock);
+ btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK, data->fh_init_mask);
+ btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK, data->hw_init_mask);
+ spin_unlock(&data->irq_lock);
+}
+
+static void btintel_pcie_enable_interrupts(struct btintel_pcie_data *data)
+{
+ spin_lock(&data->irq_lock);
+ btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK, ~data->fh_init_mask);
+ btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK, ~data->hw_init_mask);
+ spin_unlock(&data->irq_lock);
+}
+
+static void btintel_pcie_synchronize_irqs(struct btintel_pcie_data *data)
+{
+ for (int i = 0; i < data->alloc_vecs; i++)
+ synchronize_irq(data->msix_entries[i].vector);
+}
+
static int btintel_pcie_setup_internal(struct hci_dev *hdev)
{
struct btintel_pcie_data *data = hci_get_drvdata(hdev);
@@ -2054,9 +2064,9 @@ static int btintel_pcie_setup_internal(struct hci_dev *hdev)
}
/* Apply the common HCI quirks for Intel device */
- set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
- set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
- set_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_STRICT_DUPLICATE_FILTER);
+ hci_set_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY);
+ hci_set_quirk(hdev, HCI_QUIRK_NON_PERSISTENT_DIAG);
/* Set up the quality report callback for Intel devices */
hdev->set_quality_report = btintel_set_quality_report;
@@ -2089,6 +2099,7 @@ static int btintel_pcie_setup_internal(struct hci_dev *hdev)
switch (INTEL_HW_VARIANT(ver_tlv.cnvi_bt)) {
case 0x1e: /* BzrI */
case 0x1f: /* ScP */
+ case 0x22: /* BzrIW */
/* Display version information of TLV type */
btintel_version_info_tlv(hdev, &ver_tlv);
@@ -2096,7 +2107,7 @@ static int btintel_pcie_setup_internal(struct hci_dev *hdev)
*
* All TLV based devices support WBS
*/
- set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED);
/* Setup MSFT Extension support */
btintel_set_msft_opcode(hdev,
@@ -2124,13 +2135,6 @@ static int btintel_pcie_setup_internal(struct hci_dev *hdev)
if (ver_tlv.img_type == 0x02 || ver_tlv.img_type == 0x03)
data->dmp_hdr.fw_git_sha1 = ver_tlv.git_sha1;
- err = hci_devcd_register(hdev, btintel_pcie_dump_traces, btintel_pcie_dump_hdr,
- btintel_pcie_dump_notify);
- if (err) {
- bt_dev_err(hdev, "Failed to register coredump (%d)", err);
- goto exit_error;
- }
-
btintel_print_fseq_info(hdev);
exit_error:
kfree_skb(skb);
@@ -2147,6 +2151,8 @@ static int btintel_pcie_setup(struct hci_dev *hdev)
bt_dev_err(hdev, "Firmware download retry count: %d",
fw_dl_retry);
btintel_pcie_dump_debug_registers(hdev);
+ btintel_pcie_disable_interrupts(data);
+ btintel_pcie_synchronize_irqs(data);
err = btintel_pcie_reset_bt(data);
if (err) {
bt_dev_err(hdev, "Failed to do shr reset: %d", err);
@@ -2154,6 +2160,7 @@ static int btintel_pcie_setup(struct hci_dev *hdev)
}
usleep_range(10000, 12000);
btintel_pcie_reset_ia(data);
+ btintel_pcie_enable_interrupts(data);
btintel_pcie_config_msix(data);
err = btintel_pcie_enable_bt(data);
if (err) {
@@ -2162,9 +2169,255 @@ static int btintel_pcie_setup(struct hci_dev *hdev)
}
btintel_pcie_start_rx(data);
}
+
+ if (!err)
+ set_bit(BTINTEL_PCIE_SETUP_DONE, &data->flags);
return err;
}
+static struct btintel_pcie_dev_recovery *
+btintel_pcie_get_recovery(struct pci_dev *pdev, struct device *dev)
+{
+ struct btintel_pcie_dev_recovery *tmp, *data = NULL;
+ const char *name = pci_name(pdev);
+ const size_t name_len = strlen(name) + 1;
+ struct hci_dev *hdev = to_hci_dev(dev);
+
+ spin_lock(&btintel_pcie_recovery_lock);
+ list_for_each_entry(tmp, &btintel_pcie_recovery_list, list) {
+ if (strcmp(tmp->name, name))
+ continue;
+ data = tmp;
+ break;
+ }
+ spin_unlock(&btintel_pcie_recovery_lock);
+
+ if (data) {
+ bt_dev_dbg(hdev, "Found restart data for BDF: %s", data->name);
+ return data;
+ }
+
+ data = kzalloc(struct_size(data, name, name_len), GFP_ATOMIC);
+ if (!data)
+ return NULL;
+
+ strscpy(data->name, name, name_len);
+ spin_lock(&btintel_pcie_recovery_lock);
+ list_add_tail(&data->list, &btintel_pcie_recovery_list);
+ spin_unlock(&btintel_pcie_recovery_lock);
+
+ return data;
+}
+
+static void btintel_pcie_free_restart_list(void)
+{
+ struct btintel_pcie_dev_recovery *tmp;
+
+ while ((tmp = list_first_entry_or_null(&btintel_pcie_recovery_list,
+ typeof(*tmp), list))) {
+ list_del(&tmp->list);
+ kfree(tmp);
+ }
+}
+
+static void btintel_pcie_inc_recovery_count(struct pci_dev *pdev,
+ struct device *dev)
+{
+ struct btintel_pcie_dev_recovery *data;
+ time64_t retry_window;
+
+ data = btintel_pcie_get_recovery(pdev, dev);
+ if (!data)
+ return;
+
+ retry_window = ktime_get_boottime_seconds() - data->last_error;
+ if (data->count == 0) {
+ data->last_error = ktime_get_boottime_seconds();
+ data->count++;
+ } else if (retry_window < BTINTEL_PCIE_RESET_WINDOW_SECS &&
+ data->count <= BTINTEL_PCIE_FLR_MAX_RETRY) {
+ data->count++;
+ } else if (retry_window > BTINTEL_PCIE_RESET_WINDOW_SECS) {
+ data->last_error = 0;
+ data->count = 0;
+ }
+}
+
+static int btintel_pcie_setup_hdev(struct btintel_pcie_data *data);
+
+static void btintel_pcie_removal_work(struct work_struct *wk)
+{
+ struct btintel_pcie_removal *removal =
+ container_of(wk, struct btintel_pcie_removal, work);
+ struct pci_dev *pdev = removal->pdev;
+ struct btintel_pcie_data *data;
+ int err;
+
+ pci_lock_rescan_remove();
+
+ if (!pdev->bus)
+ goto error;
+
+ data = pci_get_drvdata(pdev);
+
+ btintel_pcie_disable_interrupts(data);
+ btintel_pcie_synchronize_irqs(data);
+
+ flush_work(&data->rx_work);
+
+ bt_dev_dbg(data->hdev, "Release bluetooth interface");
+ btintel_pcie_release_hdev(data);
+
+ err = pci_reset_function(pdev);
+ if (err) {
+ BT_ERR("Failed resetting the pcie device (%d)", err);
+ goto error;
+ }
+
+ btintel_pcie_enable_interrupts(data);
+ btintel_pcie_config_msix(data);
+
+ err = btintel_pcie_enable_bt(data);
+ if (err) {
+ BT_ERR("Failed to enable bluetooth hardware after reset (%d)",
+ err);
+ goto error;
+ }
+
+ btintel_pcie_reset_ia(data);
+ btintel_pcie_start_rx(data);
+ data->flags = 0;
+
+ err = btintel_pcie_setup_hdev(data);
+ if (err) {
+ BT_ERR("Failed registering hdev (%d)", err);
+ goto error;
+ }
+error:
+ pci_dev_put(pdev);
+ pci_unlock_rescan_remove();
+ kfree(removal);
+}
+
+static void btintel_pcie_reset(struct hci_dev *hdev)
+{
+ struct btintel_pcie_removal *removal;
+ struct btintel_pcie_data *data;
+
+ data = hci_get_drvdata(hdev);
+
+ if (!test_bit(BTINTEL_PCIE_SETUP_DONE, &data->flags))
+ return;
+
+ if (test_and_set_bit(BTINTEL_PCIE_RECOVERY_IN_PROGRESS, &data->flags))
+ return;
+
+ removal = kzalloc(sizeof(*removal), GFP_ATOMIC);
+ if (!removal)
+ return;
+
+ removal->pdev = data->pdev;
+ INIT_WORK(&removal->work, btintel_pcie_removal_work);
+ pci_dev_get(removal->pdev);
+ schedule_work(&removal->work);
+}
+
+static void btintel_pcie_hw_error(struct hci_dev *hdev, u8 code)
+{
+ struct btintel_pcie_dev_recovery *data;
+ struct btintel_pcie_data *dev_data = hci_get_drvdata(hdev);
+ struct pci_dev *pdev = dev_data->pdev;
+ time64_t retry_window;
+
+ if (code == 0x13) {
+ bt_dev_err(hdev, "Encountered top exception");
+ return;
+ }
+
+ data = btintel_pcie_get_recovery(pdev, &hdev->dev);
+ if (!data)
+ return;
+
+ retry_window = ktime_get_boottime_seconds() - data->last_error;
+
+ if (retry_window < BTINTEL_PCIE_RESET_WINDOW_SECS &&
+ data->count >= BTINTEL_PCIE_FLR_MAX_RETRY) {
+ bt_dev_err(hdev, "Exhausted maximum: %d recovery attempts: %d",
+ BTINTEL_PCIE_FLR_MAX_RETRY, data->count);
+ bt_dev_dbg(hdev, "Boot time: %lld seconds",
+ ktime_get_boottime_seconds());
+ bt_dev_dbg(hdev, "last error at: %lld seconds",
+ data->last_error);
+ return;
+ }
+ btintel_pcie_inc_recovery_count(pdev, &hdev->dev);
+ btintel_pcie_reset(hdev);
+}
+
+static bool btintel_pcie_wakeup(struct hci_dev *hdev)
+{
+ struct btintel_pcie_data *data = hci_get_drvdata(hdev);
+
+ return device_may_wakeup(&data->pdev->dev);
+}
+
+static const struct {
+ u16 opcode;
+ const char *desc;
+} btintel_pcie_hci_drv_supported_commands[] = {
+ /* Common commands */
+ { HCI_DRV_OP_READ_INFO, "Read Info" },
+};
+
+static int btintel_pcie_hci_drv_read_info(struct hci_dev *hdev, void *data,
+ u16 data_len)
+{
+ struct hci_drv_rp_read_info *rp;
+ size_t rp_size;
+ int err, i;
+ u16 opcode, num_supported_commands =
+ ARRAY_SIZE(btintel_pcie_hci_drv_supported_commands);
+
+ rp_size = sizeof(*rp) + num_supported_commands * 2;
+
+ rp = kmalloc(rp_size, GFP_KERNEL);
+ if (!rp)
+ return -ENOMEM;
+
+ strscpy_pad(rp->driver_name, KBUILD_MODNAME);
+
+ rp->num_supported_commands = cpu_to_le16(num_supported_commands);
+ for (i = 0; i < num_supported_commands; i++) {
+ opcode = btintel_pcie_hci_drv_supported_commands[i].opcode;
+ bt_dev_dbg(hdev,
+ "Supported HCI Drv command (0x%02x|0x%04x): %s",
+ hci_opcode_ogf(opcode),
+ hci_opcode_ocf(opcode),
+ btintel_pcie_hci_drv_supported_commands[i].desc);
+ rp->supported_commands[i] = cpu_to_le16(opcode);
+ }
+
+ err = hci_drv_cmd_complete(hdev, HCI_DRV_OP_READ_INFO,
+ HCI_DRV_STATUS_SUCCESS,
+ rp, rp_size);
+
+ kfree(rp);
+ return err;
+}
+
+static const struct hci_drv_handler btintel_pcie_hci_drv_common_handlers[] = {
+ { btintel_pcie_hci_drv_read_info, HCI_DRV_READ_INFO_SIZE },
+};
+
+static const struct hci_drv_handler btintel_pcie_hci_drv_specific_handlers[] = {};
+
+static struct hci_drv btintel_pcie_hci_drv = {
+ .common_handler_count = ARRAY_SIZE(btintel_pcie_hci_drv_common_handlers),
+ .common_handlers = btintel_pcie_hci_drv_common_handlers,
+ .specific_handler_count = ARRAY_SIZE(btintel_pcie_hci_drv_specific_handlers),
+ .specific_handlers = btintel_pcie_hci_drv_specific_handlers,
+};
+
static int btintel_pcie_setup_hdev(struct btintel_pcie_data *data)
{
int err;
@@ -2186,9 +2439,12 @@ static int btintel_pcie_setup_hdev(struct btintel_pcie_data *data)
hdev->send = btintel_pcie_send_frame;
hdev->setup = btintel_pcie_setup;
hdev->shutdown = btintel_shutdown_combined;
- hdev->hw_error = btintel_hw_error;
+ hdev->hw_error = btintel_pcie_hw_error;
hdev->set_diag = btintel_set_diag;
hdev->set_bdaddr = btintel_set_bdaddr;
+ hdev->reset = btintel_pcie_reset;
+ hdev->wakeup = btintel_pcie_wakeup;
+ hdev->hci_drv = &btintel_pcie_hci_drv;
err = hci_register_dev(hdev);
if (err < 0) {
@@ -2286,6 +2542,12 @@ static void btintel_pcie_remove(struct pci_dev *pdev)
data = pci_get_drvdata(pdev);
+ btintel_pcie_disable_interrupts(data);
+
+ btintel_pcie_synchronize_irqs(data);
+
+ flush_work(&data->rx_work);
+
btintel_pcie_reset_bt(data);
for (int i = 0; i < data->alloc_vecs; i++) {
struct msix_entry *msix_entry;
@@ -2298,8 +2560,6 @@ static void btintel_pcie_remove(struct pci_dev *pdev)
btintel_pcie_release_hdev(data);
- flush_work(&data->rx_work);
-
destroy_workqueue(data->workqueue);
btintel_pcie_free(data);
@@ -2323,16 +2583,183 @@ static void btintel_pcie_coredump(struct device *dev)
}
#endif
+static int btintel_pcie_set_dxstate(struct btintel_pcie_data *data, u32 dxstate)
+{
+ int retry = 0, status;
+ u32 dx_intr_timeout_ms = 200;
+
+ do {
+ data->gp0_received = false;
+
+ btintel_pcie_wr_sleep_cntrl(data, dxstate);
+
+ status = wait_event_timeout(data->gp0_wait_q, data->gp0_received,
+ msecs_to_jiffies(dx_intr_timeout_ms));
+
+ if (status)
+ return 0;
+
+ bt_dev_warn(data->hdev,
+ "Timeout (%u ms) on alive interrupt for D%d entry, retry count %d",
+ dx_intr_timeout_ms, dxstate, retry);
+
+ /* clear gp0 cause */
+ btintel_pcie_clr_reg_bits(data,
+ BTINTEL_PCIE_CSR_MSIX_HW_INT_CAUSES,
+ BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0);
+
+ /* A hardware bug may cause the alive interrupt to be missed.
+ * Check if the controller reached the expected state and retry
+ * the operation only if it hasn't.
+ */
+ if (dxstate == BTINTEL_PCIE_STATE_D0) {
+ if (btintel_pcie_in_d0(data))
+ return 0;
+ } else {
+ if (btintel_pcie_in_d3(data))
+ return 0;
+ }
+
+ } while (++retry < BTINTEL_PCIE_DX_TRANSITION_MAX_RETRIES);
+
+ return -EBUSY;
+}
+
+static int btintel_pcie_suspend_late(struct device *dev, pm_message_t mesg)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct btintel_pcie_data *data;
+ ktime_t start;
+ u32 dxstate;
+ int err;
+
+ data = pci_get_drvdata(pdev);
+
+ dxstate = (mesg.event == PM_EVENT_SUSPEND ?
+ BTINTEL_PCIE_STATE_D3_HOT : BTINTEL_PCIE_STATE_D3_COLD);
+
+ data->pm_sx_event = mesg.event;
+
+ start = ktime_get();
+
+ /* Refer: 6.4.11.7 -> Platform power management */
+ err = btintel_pcie_set_dxstate(data, dxstate);
+
+ if (err)
+ return err;
+
+ bt_dev_dbg(data->hdev,
+ "device entered into d3 state from d0 in %lld us",
+ ktime_to_us(ktime_get() - start));
+ return err;
+}
+
+static int btintel_pcie_suspend(struct device *dev)
+{
+ return btintel_pcie_suspend_late(dev, PMSG_SUSPEND);
+}
+
+static int btintel_pcie_hibernate(struct device *dev)
+{
+ return btintel_pcie_suspend_late(dev, PMSG_HIBERNATE);
+}
+
+static int btintel_pcie_freeze(struct device *dev)
+{
+ return btintel_pcie_suspend_late(dev, PMSG_FREEZE);
+}
+
+static int btintel_pcie_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct btintel_pcie_data *data;
+ ktime_t start;
+ int err;
+
+ data = pci_get_drvdata(pdev);
+ data->gp0_received = false;
+
+ start = ktime_get();
+
+ /* When the system enters S4 (hibernate) mode, bluetooth device loses
+ * power, which results in the erasure of its loaded firmware.
+ * Consequently, function level reset (flr) is required on system
+ * resume to bring the controller back into an operational state by
+ * initiating a new firmware download.
+ */
+
+ if (data->pm_sx_event == PM_EVENT_FREEZE ||
+ data->pm_sx_event == PM_EVENT_HIBERNATE) {
+ set_bit(BTINTEL_PCIE_CORE_HALTED, &data->flags);
+ btintel_pcie_reset(data->hdev);
+ return 0;
+ }
+
+ /* Refer: 6.4.11.7 -> Platform power management */
+ err = btintel_pcie_set_dxstate(data, BTINTEL_PCIE_STATE_D0);
+
+ if (err == 0) {
+ bt_dev_dbg(data->hdev,
+ "device entered into d0 state from d3 in %lld us",
+ ktime_to_us(ktime_get() - start));
+ return err;
+ }
+
+ /* Trigger function level reset if the controller is in error
+ * state during resume() to bring back the controller to
+ * operational mode
+ */
+
+ data->boot_stage_cache = btintel_pcie_rd_reg32(data,
+ BTINTEL_PCIE_CSR_BOOT_STAGE_REG);
+ if (btintel_pcie_in_error(data) ||
+ btintel_pcie_in_device_halt(data)) {
+ bt_dev_err(data->hdev, "Controller in error state for D0 entry");
+ if (!test_and_set_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS,
+ &data->flags)) {
+ data->dmp_hdr.trigger_reason =
+ BTINTEL_PCIE_TRIGGER_REASON_FW_ASSERT;
+ queue_work(data->workqueue, &data->rx_work);
+ }
+ set_bit(BTINTEL_PCIE_CORE_HALTED, &data->flags);
+ btintel_pcie_reset(data->hdev);
+ }
+ return err;
+}
+
+static const struct dev_pm_ops btintel_pcie_pm_ops = {
+ .suspend = btintel_pcie_suspend,
+ .resume = btintel_pcie_resume,
+ .freeze = btintel_pcie_freeze,
+ .thaw = btintel_pcie_resume,
+ .poweroff = btintel_pcie_hibernate,
+ .restore = btintel_pcie_resume,
+};
+
static struct pci_driver btintel_pcie_driver = {
.name = KBUILD_MODNAME,
.id_table = btintel_pcie_table,
.probe = btintel_pcie_probe,
.remove = btintel_pcie_remove,
+ .driver.pm = pm_sleep_ptr(&btintel_pcie_pm_ops),
#ifdef CONFIG_DEV_COREDUMP
.driver.coredump = btintel_pcie_coredump
#endif
};
-module_pci_driver(btintel_pcie_driver);
+
+static int __init btintel_pcie_init(void)
+{
+ return pci_register_driver(&btintel_pcie_driver);
+}
+
+static void __exit btintel_pcie_exit(void)
+{
+ pci_unregister_driver(&btintel_pcie_driver);
+ btintel_pcie_free_restart_list();
+}
+
+module_init(btintel_pcie_init);
+module_exit(btintel_pcie_exit);
MODULE_AUTHOR("Tedd Ho-Jeong An <tedd.an@intel.com>");
MODULE_DESCRIPTION("Intel Bluetooth PCIe transport driver ver " VERSION);
diff --git a/drivers/bluetooth/btintel_pcie.h b/drivers/bluetooth/btintel_pcie.h
index 21b964b15c1c..e3d941ffef4a 100644
--- a/drivers/bluetooth/btintel_pcie.h
+++ b/drivers/bluetooth/btintel_pcie.h
@@ -117,7 +117,9 @@ enum {
enum {
BTINTEL_PCIE_CORE_HALTED,
BTINTEL_PCIE_HWEXP_INPROGRESS,
- BTINTEL_PCIE_COREDUMP_INPROGRESS
+ BTINTEL_PCIE_COREDUMP_INPROGRESS,
+ BTINTEL_PCIE_RECOVERY_IN_PROGRESS,
+ BTINTEL_PCIE_SETUP_DONE
};
enum btintel_pcie_tlv_type {
@@ -130,6 +132,8 @@ enum btintel_pcie_tlv_type {
BTINTEL_CNVI_TOP,
BTINTEL_DUMP_TIME,
BTINTEL_FW_BUILD,
+ BTINTEL_VENDOR,
+ BTINTEL_DRIVER
};
/* causes for the MBOX interrupts */
@@ -154,8 +158,13 @@ enum msix_mbox_int_causes {
/* Default interrupt timeout in msec */
#define BTINTEL_DEFAULT_INTR_TIMEOUT_MS 3000
-/* The number of descriptors in TX/RX queues */
-#define BTINTEL_DESCS_COUNT 16
+#define BTINTEL_PCIE_DX_TRANSITION_MAX_RETRIES 3
+
+/* The number of descriptors in TX queues */
+#define BTINTEL_PCIE_TX_DESCS_COUNT 32
+
+/* The number of descriptors in RX queues */
+#define BTINTEL_PCIE_RX_DESCS_COUNT 64
/* Number of Queue for TX and RX
* It indicates the index of the IA(Index Array)
@@ -177,9 +186,6 @@ enum {
/* Doorbell vector for TFD */
#define BTINTEL_PCIE_TX_DB_VEC 0
-/* Number of pending RX requests for downlink */
-#define BTINTEL_PCIE_RX_MAX_QUEUE 6
-
/* Doorbell vector for FRBD */
#define BTINTEL_PCIE_RX_DB_VEC 513
@@ -460,6 +466,7 @@ struct btintel_pcie_dump_header {
* @txq: TX Queue struct
* @rxq: RX Queue struct
* @alive_intr_ctxt: Alive interrupt context
+ * @pm_sx_event: PM event on which system got suspended
*/
struct btintel_pcie_data {
struct pci_dev *pdev;
@@ -509,6 +516,7 @@ struct btintel_pcie_data {
u32 alive_intr_ctxt;
struct btintel_pcie_dbgc dbgc;
struct btintel_pcie_dump_header dmp_hdr;
+ u8 pm_sx_event;
};
static inline u32 btintel_pcie_rd_reg32(struct btintel_pcie_data *data,
diff --git a/drivers/bluetooth/btmtk.c b/drivers/bluetooth/btmtk.c
index 4390fd571dbd..a8c520dc09e1 100644
--- a/drivers/bluetooth/btmtk.c
+++ b/drivers/bluetooth/btmtk.c
@@ -642,12 +642,7 @@ static int btmtk_usb_hci_wmt_sync(struct hci_dev *hdev,
* WMT command.
*/
err = wait_on_bit_timeout(&data->flags, BTMTK_TX_WAIT_VND_EVT,
- TASK_INTERRUPTIBLE, HCI_INIT_TIMEOUT);
- if (err == -EINTR) {
- bt_dev_err(hdev, "Execution of wmt command interrupted");
- clear_bit(BTMTK_TX_WAIT_VND_EVT, &data->flags);
- goto err_free_wc;
- }
+ TASK_UNINTERRUPTIBLE, HCI_INIT_TIMEOUT);
if (err) {
bt_dev_err(hdev, "Execution of wmt command timed out");
diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c
index c16a3518b8ff..fba3ab6d30a5 100644
--- a/drivers/bluetooth/btmtksdio.c
+++ b/drivers/bluetooth/btmtksdio.c
@@ -29,7 +29,7 @@
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
-#include "h4_recv.h"
+#include "hci_uart.h"
#include "btmtk.h"
#define VERSION "0.1"
@@ -615,7 +615,6 @@ static void btmtksdio_txrx_work(struct work_struct *work)
sdio_release_host(bdev->func);
- pm_runtime_mark_last_busy(bdev->dev);
pm_runtime_put_autosuspend(bdev->dev);
}
@@ -1141,7 +1140,7 @@ static int btmtksdio_setup(struct hci_dev *hdev)
}
/* Enable WBS with mSBC codec */
- set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED);
/* Enable GPIO reset mechanism */
if (bdev->reset) {
@@ -1270,6 +1269,12 @@ static void btmtksdio_reset(struct hci_dev *hdev)
sdio_claim_host(bdev->func);
+ /* set drv_pmctrl if BT is closed before doing reset */
+ if (!test_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state)) {
+ sdio_enable_func(bdev->func);
+ btmtksdio_drv_pmctrl(bdev);
+ }
+
sdio_writel(bdev->func, C_INT_EN_CLR, MTK_REG_CHLPCR, NULL);
skb_queue_purge(&bdev->txq);
cancel_work_sync(&bdev->txrx_work);
@@ -1285,6 +1290,12 @@ static void btmtksdio_reset(struct hci_dev *hdev)
goto err;
}
+ /* set fw_pmctrl back if BT is closed after doing reset */
+ if (!test_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state)) {
+ btmtksdio_fw_pmctrl(bdev);
+ sdio_disable_func(bdev->func);
+ }
+
clear_bit(BTMTKSDIO_PATCH_ENABLED, &bdev->tx_state);
err:
sdio_release_host(bdev->func);
@@ -1384,7 +1395,7 @@ static int btmtksdio_probe(struct sdio_func *func,
SET_HCIDEV_DEV(hdev, &func->dev);
hdev->manufacturer = 70;
- set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_NON_PERSISTENT_SETUP);
sdio_set_drvdata(func, bdev);
diff --git a/drivers/bluetooth/btmtkuart.c b/drivers/bluetooth/btmtkuart.c
index c97e260fcb0c..27aa48ff3ac2 100644
--- a/drivers/bluetooth/btmtkuart.c
+++ b/drivers/bluetooth/btmtkuart.c
@@ -27,7 +27,7 @@
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
-#include "h4_recv.h"
+#include "hci_uart.h"
#include "btmtk.h"
#define VERSION "0.2"
@@ -79,6 +79,7 @@ struct btmtkuart_dev {
u16 stp_dlen;
const struct btmtkuart_data *data;
+ struct hci_uart hu;
};
#define btmtkuart_is_standalone(bdev) \
@@ -316,7 +317,7 @@ mtk_stp_split(struct btmtkuart_dev *bdev, const unsigned char *data, int count,
/* Resync STP when unexpected data is being read */
if (shdr->prefix != 0x80 || bdev->stp_dlen > 2048) {
- bt_dev_err(bdev->hdev, "stp format unexpect (%d, %d)",
+ bt_dev_err(bdev->hdev, "stp format unexpected (%d, %d)",
shdr->prefix, bdev->stp_dlen);
bdev->stp_cursor = 2;
bdev->stp_dlen = 0;
@@ -368,7 +369,7 @@ static void btmtkuart_recv(struct hci_dev *hdev, const u8 *data, size_t count)
sz_left -= adv;
p_left += adv;
- bdev->rx_skb = h4_recv_buf(bdev->hdev, bdev->rx_skb, p_h4,
+ bdev->rx_skb = h4_recv_buf(&bdev->hu, bdev->rx_skb, p_h4,
sz_h4, mtk_recv_pkts,
ARRAY_SIZE(mtk_recv_pkts));
if (IS_ERR(bdev->rx_skb)) {
@@ -858,6 +859,7 @@ static int btmtkuart_probe(struct serdev_device *serdev)
}
bdev->hdev = hdev;
+ bdev->hu.hdev = hdev;
hdev->bus = HCI_UART;
hci_set_drvdata(hdev, bdev);
@@ -872,7 +874,7 @@ static int btmtkuart_probe(struct serdev_device *serdev)
SET_HCIDEV_DEV(hdev, &serdev->dev);
hdev->manufacturer = 70;
- set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_NON_PERSISTENT_SETUP);
if (btmtkuart_is_standalone(bdev)) {
err = clk_prepare_enable(bdev->osc);
diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c
index 1088db6056a4..3b1e9224e965 100644
--- a/drivers/bluetooth/btnxpuart.c
+++ b/drivers/bluetooth/btnxpuart.c
@@ -18,11 +18,13 @@
#include <linux/string_helpers.h>
#include <linux/gpio/consumer.h>
#include <linux/of_irq.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
-#include "h4_recv.h"
+#include "hci_uart.h"
#define MANUFACTURER_NXP 37
@@ -73,7 +75,8 @@
#define FW_AUTH_ENC 0xc0
#define HCI_NXP_PRI_BAUDRATE 115200
-#define HCI_NXP_SEC_BAUDRATE 3000000
+#define HCI_NXP_SEC_BAUDRATE_3M 3000000
+#define HCI_NXP_SEC_BAUDRATE_4M 4000000
#define MAX_FW_FILE_NAME_LEN 50
@@ -201,12 +204,15 @@ struct btnxpuart_dev {
u32 new_baudrate;
u32 current_baudrate;
u32 fw_init_baudrate;
+ u32 secondary_baudrate;
enum bootloader_param_change timeout_changed;
enum bootloader_param_change baudrate_changed;
bool helper_downloaded;
struct ps_data psdata;
struct btnxpuart_data *nxp_data;
+ struct reset_control *pdn;
+ struct hci_uart hu;
};
#define NXP_V1_FW_REQ_PKT 0xa5
@@ -365,17 +371,26 @@ static u8 crc8_table[CRC8_TABLE_SIZE];
static struct sk_buff *nxp_drv_send_cmd(struct hci_dev *hdev, u16 opcode,
u32 plen,
- void *param)
+ void *param,
+ bool resp)
{
struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
struct ps_data *psdata = &nxpdev->psdata;
- struct sk_buff *skb;
+ struct sk_buff *skb = NULL;
/* set flag to prevent nxp_enqueue from parsing values from this command and
* calling hci_cmd_sync_queue() again.
*/
psdata->driver_sent_cmd = true;
- skb = __hci_cmd_sync(hdev, opcode, plen, param, HCI_CMD_TIMEOUT);
+ if (resp) {
+ skb = __hci_cmd_sync(hdev, opcode, plen, param, HCI_CMD_TIMEOUT);
+ } else {
+ __hci_cmd_send(hdev, opcode, plen, param);
+ /* Allow command to be sent before tx_work is cancelled
+ * by btnxpuart_flush()
+ */
+ msleep(20);
+ }
psdata->driver_sent_cmd = false;
return skb;
@@ -529,10 +544,10 @@ static int ps_setup(struct hci_dev *hdev)
}
if (psdata->wakeup_source) {
- ret = devm_request_irq(&serdev->dev, psdata->irq_handler,
- ps_host_wakeup_irq_handler,
- IRQF_ONESHOT | IRQF_TRIGGER_FALLING,
- dev_name(&serdev->dev), nxpdev);
+ ret = devm_request_threaded_irq(&serdev->dev, psdata->irq_handler,
+ NULL, ps_host_wakeup_irq_handler,
+ IRQF_ONESHOT,
+ dev_name(&serdev->dev), nxpdev);
if (ret)
bt_dev_info(hdev, "error setting wakeup IRQ handler, ignoring\n");
disable_irq(psdata->irq_handler);
@@ -595,7 +610,8 @@ static int send_ps_cmd(struct hci_dev *hdev, void *data)
pcmd.ps_cmd = BT_PS_DISABLE;
pcmd.c2h_ps_interval = __cpu_to_le16(psdata->c2h_ps_interval);
- skb = nxp_drv_send_cmd(hdev, HCI_NXP_AUTO_SLEEP_MODE, sizeof(pcmd), &pcmd);
+ skb = nxp_drv_send_cmd(hdev, HCI_NXP_AUTO_SLEEP_MODE, sizeof(pcmd),
+ &pcmd, true);
if (IS_ERR(skb)) {
bt_dev_err(hdev, "Setting Power Save mode failed (%ld)", PTR_ERR(skb));
return PTR_ERR(skb);
@@ -644,7 +660,8 @@ static int send_wakeup_method_cmd(struct hci_dev *hdev, void *data)
break;
}
- skb = nxp_drv_send_cmd(hdev, HCI_NXP_WAKEUP_METHOD, sizeof(pcmd), &pcmd);
+ skb = nxp_drv_send_cmd(hdev, HCI_NXP_WAKEUP_METHOD, sizeof(pcmd),
+ &pcmd, true);
if (IS_ERR(skb)) {
bt_dev_err(hdev, "Setting wake-up method failed (%ld)", PTR_ERR(skb));
return PTR_ERR(skb);
@@ -802,7 +819,10 @@ static bool nxp_fw_change_baudrate(struct hci_dev *hdev, u16 req_len)
nxpdev->fw_v3_offset_correction += req_len;
} else if (req_len == sizeof(uart_config)) {
uart_config.clkdiv.address = __cpu_to_le32(clkdivaddr);
- uart_config.clkdiv.value = __cpu_to_le32(0x00c00000);
+ if (nxpdev->new_baudrate == HCI_NXP_SEC_BAUDRATE_4M)
+ uart_config.clkdiv.value = __cpu_to_le32(0x01000000);
+ else
+ uart_config.clkdiv.value = __cpu_to_le32(0x00c00000);
uart_config.uartdiv.address = __cpu_to_le32(uartdivaddr);
uart_config.uartdiv.value = __cpu_to_le32(1);
uart_config.mcr.address = __cpu_to_le32(uartmcraddr);
@@ -966,12 +986,13 @@ static int nxp_recv_fw_req_v1(struct hci_dev *hdev, struct sk_buff *skb)
goto free_skb;
}
if (nxpdev->baudrate_changed != changed) {
+ nxpdev->new_baudrate = nxpdev->secondary_baudrate;
if (nxp_fw_change_baudrate(hdev, len)) {
nxpdev->baudrate_changed = changed;
serdev_device_set_baudrate(nxpdev->serdev,
- HCI_NXP_SEC_BAUDRATE);
+ nxpdev->secondary_baudrate);
serdev_device_set_flow_control(nxpdev->serdev, true);
- nxpdev->current_baudrate = HCI_NXP_SEC_BAUDRATE;
+ nxpdev->current_baudrate = nxpdev->secondary_baudrate;
}
goto free_skb;
}
@@ -992,7 +1013,7 @@ static int nxp_recv_fw_req_v1(struct hci_dev *hdev, struct sk_buff *skb)
nxpdev->helper_downloaded = true;
serdev_device_wait_until_sent(nxpdev->serdev, 0);
serdev_device_set_baudrate(nxpdev->serdev,
- HCI_NXP_SEC_BAUDRATE);
+ HCI_NXP_SEC_BAUDRATE_3M);
serdev_device_set_flow_control(nxpdev->serdev, true);
} else {
clear_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state);
@@ -1216,12 +1237,13 @@ static int nxp_recv_fw_req_v3(struct hci_dev *hdev, struct sk_buff *skb)
}
if (nxpdev->baudrate_changed != changed) {
+ nxpdev->new_baudrate = nxpdev->secondary_baudrate;
if (nxp_fw_change_baudrate(hdev, len)) {
nxpdev->baudrate_changed = cmd_sent;
serdev_device_set_baudrate(nxpdev->serdev,
- HCI_NXP_SEC_BAUDRATE);
+ nxpdev->secondary_baudrate);
serdev_device_set_flow_control(nxpdev->serdev, true);
- nxpdev->current_baudrate = HCI_NXP_SEC_BAUDRATE;
+ nxpdev->current_baudrate = nxpdev->secondary_baudrate;
}
goto free_skb;
}
@@ -1265,7 +1287,8 @@ static int nxp_set_baudrate_cmd(struct hci_dev *hdev, void *data)
if (!psdata)
return 0;
- skb = nxp_drv_send_cmd(hdev, HCI_NXP_SET_OPER_SPEED, 4, (u8 *)&new_baudrate);
+ skb = nxp_drv_send_cmd(hdev, HCI_NXP_SET_OPER_SPEED, 4,
+ (u8 *)&new_baudrate, true);
if (IS_ERR(skb)) {
bt_dev_err(hdev, "Setting baudrate failed (%ld)", PTR_ERR(skb));
return PTR_ERR(skb);
@@ -1323,7 +1346,7 @@ static void nxp_coredump(struct hci_dev *hdev)
struct sk_buff *skb;
u8 pcmd = 2;
- skb = nxp_drv_send_cmd(hdev, HCI_NXP_TRIGGER_DUMP, 1, &pcmd);
+ skb = nxp_drv_send_cmd(hdev, HCI_NXP_TRIGGER_DUMP, 1, &pcmd, true);
if (IS_ERR(skb))
bt_dev_err(hdev, "Failed to trigger FW Dump. (%ld)", PTR_ERR(skb));
else
@@ -1365,7 +1388,6 @@ static int nxp_process_fw_dump(struct hci_dev *hdev, struct sk_buff *skb)
if (buf_len == 0) {
bt_dev_warn(hdev, "==== FW dump complete ===");
- clear_bit(BTNXPUART_FW_DUMP_IN_PROGRESS, &nxpdev->tx_state);
hci_devcd_complete(hdev);
nxp_set_ind_reset(hdev, NULL);
}
@@ -1419,6 +1441,10 @@ static int nxp_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
static int nxp_setup(struct hci_dev *hdev)
{
struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
+ struct serdev_device *serdev = nxpdev->serdev;
+ char device_string[30];
+ char event_string[50];
+ char *envp[] = {device_string, event_string, NULL};
int err = 0;
if (nxp_check_boot_sign(nxpdev)) {
@@ -1431,6 +1457,12 @@ static int nxp_setup(struct hci_dev *hdev)
clear_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state);
}
+ snprintf(device_string, 30, "BTNXPUART_DEV=%s", dev_name(&serdev->dev));
+ snprintf(event_string, 50, "BTNXPUART_STATE=FW_READY");
+ bt_dev_dbg(hdev, "==== Send uevent: %s:%s ===", device_string,
+ event_string);
+ kobject_uevent_env(&serdev->dev.kobj, KOBJ_CHANGE, envp);
+
serdev_device_set_baudrate(nxpdev->serdev, nxpdev->fw_init_baudrate);
nxpdev->current_baudrate = nxpdev->fw_init_baudrate;
@@ -1447,8 +1479,8 @@ static int nxp_post_init(struct hci_dev *hdev)
struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
struct ps_data *psdata = &nxpdev->psdata;
- if (nxpdev->current_baudrate != HCI_NXP_SEC_BAUDRATE) {
- nxpdev->new_baudrate = HCI_NXP_SEC_BAUDRATE;
+ if (nxpdev->current_baudrate != nxpdev->secondary_baudrate) {
+ nxpdev->new_baudrate = nxpdev->secondary_baudrate;
nxp_set_baudrate_cmd(hdev, NULL);
}
if (psdata->cur_h2c_wakeupmode != psdata->h2c_wakeupmode)
@@ -1479,7 +1511,13 @@ static int nxp_shutdown(struct hci_dev *hdev)
u8 pcmd = 0;
if (ind_reset_in_progress(nxpdev)) {
- skb = nxp_drv_send_cmd(hdev, HCI_NXP_IND_RESET, 1, &pcmd);
+ if (test_and_clear_bit(BTNXPUART_FW_DUMP_IN_PROGRESS,
+ &nxpdev->tx_state))
+ skb = nxp_drv_send_cmd(hdev, HCI_NXP_IND_RESET, 1,
+ &pcmd, false);
+ else
+ skb = nxp_drv_send_cmd(hdev, HCI_NXP_IND_RESET, 1,
+ &pcmd, true);
serdev_device_set_flow_control(nxpdev->serdev, false);
set_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state);
/* HCI_NXP_IND_RESET command may not returns any response */
@@ -1719,7 +1757,7 @@ static size_t btnxpuart_receive_buf(struct serdev_device *serdev,
ps_start_timer(nxpdev);
- nxpdev->rx_skb = h4_recv_buf(nxpdev->hdev, nxpdev->rx_skb, data, count,
+ nxpdev->rx_skb = h4_recv_buf(&nxpdev->hu, nxpdev->rx_skb, data, count,
nxp_recv_pkts, ARRAY_SIZE(nxp_recv_pkts));
if (IS_ERR(nxpdev->rx_skb)) {
int err = PTR_ERR(nxpdev->rx_skb);
@@ -1745,11 +1783,41 @@ static const struct serdev_device_ops btnxpuart_client_ops = {
.write_wakeup = btnxpuart_write_wakeup,
};
+static void nxp_coredump_notify(struct hci_dev *hdev, int state)
+{
+ struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
+ struct serdev_device *serdev = nxpdev->serdev;
+ char device_string[30];
+ char event_string[50];
+ char *envp[] = {device_string, event_string, NULL};
+
+ snprintf(device_string, 30, "BTNXPUART_DEV=%s", dev_name(&serdev->dev));
+ switch (state) {
+ case HCI_DEVCOREDUMP_ACTIVE:
+ snprintf(event_string, 50, "BTNXPUART_STATE=FW_DUMP_ACTIVE");
+ break;
+ case HCI_DEVCOREDUMP_DONE:
+ snprintf(event_string, 50, "BTNXPUART_STATE=FW_DUMP_DONE");
+ break;
+ case HCI_DEVCOREDUMP_TIMEOUT:
+ snprintf(event_string, 50, "BTNXPUART_STATE=FW_DUMP_TIMEOUT");
+ break;
+ default:
+ snprintf(event_string, 50, "BTNXPUART_STATE=FW_DUMP_STATE_%d",
+ state);
+ break;
+ }
+ bt_dev_dbg(hdev, "==== Send uevent: %s:%s ===", device_string,
+ event_string);
+ kobject_uevent_env(&serdev->dev.kobj, KOBJ_CHANGE, envp);
+}
+
static int nxp_serdev_probe(struct serdev_device *serdev)
{
struct hci_dev *hdev;
struct btnxpuart_dev *nxpdev;
bdaddr_t ba = {0};
+ int err;
nxpdev = devm_kzalloc(&serdev->dev, sizeof(*nxpdev), GFP_KERNEL);
if (!nxpdev)
@@ -1773,10 +1841,31 @@ static int nxp_serdev_probe(struct serdev_device *serdev)
if (!nxpdev->fw_init_baudrate)
nxpdev->fw_init_baudrate = FW_INIT_BAUDRATE;
+ device_property_read_u32(&nxpdev->serdev->dev, "max-speed",
+ &nxpdev->secondary_baudrate);
+ if (!nxpdev->secondary_baudrate ||
+ (nxpdev->secondary_baudrate != HCI_NXP_SEC_BAUDRATE_3M &&
+ nxpdev->secondary_baudrate != HCI_NXP_SEC_BAUDRATE_4M)) {
+ if (nxpdev->secondary_baudrate)
+ dev_err(&serdev->dev,
+ "Invalid max-speed. Using default 3000000.");
+ nxpdev->secondary_baudrate = HCI_NXP_SEC_BAUDRATE_3M;
+ }
+
set_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state);
crc8_populate_msb(crc8_table, POLYNOMIAL8);
+ nxpdev->pdn = devm_reset_control_get_optional_shared(&serdev->dev, NULL);
+ if (IS_ERR(nxpdev->pdn))
+ return PTR_ERR(nxpdev->pdn);
+
+ err = devm_regulator_get_enable(&serdev->dev, "vcc");
+ if (err) {
+ dev_err(&serdev->dev, "Failed to enable vcc regulator\n");
+ return err;
+ }
+
/* Initialize and register HCI device */
hdev = hci_alloc_dev();
if (!hdev) {
@@ -1784,7 +1873,10 @@ static int nxp_serdev_probe(struct serdev_device *serdev)
return -ENOMEM;
}
+ reset_control_deassert(nxpdev->pdn);
+
nxpdev->hdev = hdev;
+ nxpdev->hu.hdev = hdev;
hdev->bus = HCI_UART;
hci_set_drvdata(hdev, nxpdev);
@@ -1807,7 +1899,7 @@ static int nxp_serdev_probe(struct serdev_device *serdev)
"local-bd-address",
(u8 *)&ba, sizeof(ba));
if (bacmp(&ba, BDADDR_ANY))
- set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_USE_BDADDR_PROPERTY);
if (hci_register_dev(hdev) < 0) {
dev_err(&serdev->dev, "Can't register HCI device\n");
@@ -1817,11 +1909,13 @@ static int nxp_serdev_probe(struct serdev_device *serdev)
if (ps_setup(hdev))
goto probe_fail;
- hci_devcd_register(hdev, nxp_coredump, nxp_coredump_hdr, NULL);
+ hci_devcd_register(hdev, nxp_coredump, nxp_coredump_hdr,
+ nxp_coredump_notify);
return 0;
probe_fail:
+ reset_control_assert(nxpdev->pdn);
hci_free_dev(hdev);
return -ENODEV;
}
@@ -1849,6 +1943,7 @@ static void nxp_serdev_remove(struct serdev_device *serdev)
ps_cleanup(nxpdev);
hci_unregister_dev(hdev);
+ reset_control_assert(nxpdev->pdn);
hci_free_dev(hdev);
}
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
index edefb9dc76aa..7c958d6065be 100644
--- a/drivers/bluetooth/btqca.c
+++ b/drivers/bluetooth/btqca.c
@@ -739,7 +739,7 @@ static int qca_check_bdaddr(struct hci_dev *hdev, const struct qca_fw_config *co
bda = (struct hci_rp_read_bd_addr *)skb->data;
if (!bacmp(&bda->bdaddr, &config->bdaddr))
- set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_USE_BDADDR_PROPERTY);
kfree_skb(skb);
diff --git a/drivers/bluetooth/btqcomsmd.c b/drivers/bluetooth/btqcomsmd.c
index c0eb71d6ffd3..d2e13fcb6bab 100644
--- a/drivers/bluetooth/btqcomsmd.c
+++ b/drivers/bluetooth/btqcomsmd.c
@@ -117,7 +117,7 @@ static int btqcomsmd_setup(struct hci_dev *hdev)
/* Devices do not have persistent storage for BD address. Retrieve
* it from the firmware node property.
*/
- set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_USE_BDADDR_PROPERTY);
return 0;
}
diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
index 7838c89e529e..5603b282f9bc 100644
--- a/drivers/bluetooth/btrtl.c
+++ b/drivers/bluetooth/btrtl.c
@@ -50,7 +50,7 @@
#define RTL_CHIP_SUBVER (&(struct rtl_vendor_cmd) {{0x10, 0x38, 0x04, 0x28, 0x80}})
#define RTL_CHIP_REV (&(struct rtl_vendor_cmd) {{0x10, 0x3A, 0x04, 0x28, 0x80}})
-#define RTL_SEC_PROJ (&(struct rtl_vendor_cmd) {{0x10, 0xA4, 0x0D, 0x00, 0xb0}})
+#define RTL_SEC_PROJ (&(struct rtl_vendor_cmd) {{0x10, 0xA4, 0xAD, 0x00, 0xb0}})
#define RTL_PATCH_SNIPPETS 0x01
#define RTL_PATCH_DUMMY_HEADER 0x02
@@ -72,6 +72,7 @@ enum btrtl_chip_id {
CHIP_ID_8851B = 36,
CHIP_ID_8922A = 44,
CHIP_ID_8852BT = 47,
+ CHIP_ID_8761C = 51,
};
struct id_table {
@@ -230,6 +231,14 @@ static const struct id_table ic_id_table[] = {
.cfg_name = "rtl_bt/rtl8761bu_config",
.hw_info = "rtl8761bu" },
+ /* 8761CU */
+ { IC_INFO(RTL_ROM_LMP_8761A, 0x0e, 0, HCI_USB),
+ .config_needed = false,
+ .has_rom_version = true,
+ .fw_name = "rtl_bt/rtl8761cu_fw",
+ .cfg_name = "rtl_bt/rtl8761cu_config",
+ .hw_info = "rtl8761cu" },
+
/* 8822C with UART interface */
{ IC_INFO(RTL_ROM_LMP_8822B, 0xc, 0x8, HCI_UART),
.config_needed = true,
@@ -344,7 +353,8 @@ static const struct id_table *btrtl_match_ic(u16 lmp_subver, u16 hci_rev,
(ic_id_table[i].hci_rev != hci_rev))
continue;
if ((ic_id_table[i].match_flags & IC_MATCH_FL_HCIVER) &&
- (ic_id_table[i].hci_ver != hci_ver))
+ (ic_id_table[i].hci_ver != hci_ver) &&
+ (ic_id_table[i].hci_ver != 0))
continue;
if ((ic_id_table[i].match_flags & IC_MATCH_FL_HCIBUS) &&
(ic_id_table[i].hci_bus != hci_bus))
@@ -534,7 +544,6 @@ static int rtlbt_parse_firmware_v2(struct hci_dev *hdev,
{
struct rtl_epatch_header_v2 *hdr;
int rc;
- u8 reg_val[2];
u8 key_id;
u32 num_sections;
struct rtl_section *section;
@@ -549,14 +558,7 @@ static int rtlbt_parse_firmware_v2(struct hci_dev *hdev,
.len = btrtl_dev->fw_len - 7, /* Cut the tail */
};
- rc = btrtl_vendor_read_reg16(hdev, RTL_SEC_PROJ, reg_val);
- if (rc < 0)
- return -EIO;
- key_id = reg_val[0];
-
- rtl_dev_dbg(hdev, "%s: key id %u", __func__, key_id);
-
- btrtl_dev->key_id = key_id;
+ key_id = btrtl_dev->key_id;
hdr = rtl_iov_pull_data(&iov, sizeof(*hdr));
if (!hdr)
@@ -625,8 +627,10 @@ static int rtlbt_parse_firmware_v2(struct hci_dev *hdev,
len += entry->len;
}
- if (!len)
+ if (!len) {
+ kvfree(ptr);
return -EPERM;
+ }
*_buf = ptr;
return len;
@@ -668,6 +672,7 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev,
{ RTL_ROM_LMP_8851B, 36 }, /* 8851B */
{ RTL_ROM_LMP_8922A, 44 }, /* 8922A */
{ RTL_ROM_LMP_8852A, 47 }, /* 8852BT */
+ { RTL_ROM_LMP_8761A, 51 }, /* 8761C */
};
if (btrtl_dev->fw_len <= 8)
@@ -693,7 +698,7 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev,
/* Loop from the end of the firmware parsing instructions, until
* we find an instruction that identifies the "project ID" for the
- * hardware supported by this firwmare file.
+ * hardware supported by this firmware file.
* Once we have that, we double-check that project_id is suitable
* for the hardware we are working with.
*/
@@ -1068,6 +1073,8 @@ struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev,
u16 hci_rev, lmp_subver;
u8 hci_ver, lmp_ver, chip_type = 0;
int ret;
+ int rc;
+ u8 key_id;
u8 reg_val[2];
btrtl_dev = kzalloc(sizeof(*btrtl_dev), GFP_KERNEL);
@@ -1178,6 +1185,14 @@ next:
goto err_free;
}
+ rc = btrtl_vendor_read_reg16(hdev, RTL_SEC_PROJ, reg_val);
+ if (rc < 0)
+ goto err_free;
+
+ key_id = reg_val[0];
+ btrtl_dev->key_id = key_id;
+ rtl_dev_info(hdev, "%s: key id %u", __func__, key_id);
+
btrtl_dev->fw_len = -EIO;
if (lmp_subver == RTL_ROM_LMP_8852A && hci_rev == 0x000c) {
snprintf(fw_name, sizeof(fw_name), "%s_v2.bin",
@@ -1200,7 +1215,7 @@ next:
goto err_free;
}
- if (btrtl_dev->ic_info->cfg_name) {
+ if (btrtl_dev->ic_info->cfg_name && !btrtl_dev->key_id) {
if (postfix) {
snprintf(cfg_name, sizeof(cfg_name), "%s-%s.bin",
btrtl_dev->ic_info->cfg_name, postfix);
@@ -1287,7 +1302,7 @@ void btrtl_set_quirks(struct hci_dev *hdev, struct btrtl_device_info *btrtl_dev)
/* Enable controller to do both LE scan and BR/EDR inquiry
* simultaneously.
*/
- set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY);
/* Enable central-peripheral role (able to create new connections with
* an existing connection in slave role).
@@ -1301,7 +1316,8 @@ void btrtl_set_quirks(struct hci_dev *hdev, struct btrtl_device_info *btrtl_dev)
case CHIP_ID_8851B:
case CHIP_ID_8922A:
case CHIP_ID_8852BT:
- set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
+ case CHIP_ID_8761C:
+ hci_set_quirk(hdev, HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED);
/* RTL8852C needs to transmit mSBC data continuously without
* the zero length of USB packets for the ALT 6 supported chips
@@ -1312,7 +1328,8 @@ void btrtl_set_quirks(struct hci_dev *hdev, struct btrtl_device_info *btrtl_dev)
if (btrtl_dev->project_id == CHIP_ID_8852A ||
btrtl_dev->project_id == CHIP_ID_8852B ||
btrtl_dev->project_id == CHIP_ID_8852C)
- set_bit(HCI_QUIRK_USE_MSFT_EXT_ADDRESS_FILTER, &hdev->quirks);
+ hci_set_quirk(hdev,
+ HCI_QUIRK_USE_MSFT_EXT_ADDRESS_FILTER);
hci_set_aosp_capable(hdev);
break;
@@ -1331,8 +1348,7 @@ void btrtl_set_quirks(struct hci_dev *hdev, struct btrtl_device_info *btrtl_dev)
* but it doesn't support any features from page 2 -
* it either responds with garbage or with error status
*/
- set_bit(HCI_QUIRK_BROKEN_LOCAL_EXT_FEATURES_PAGE_2,
- &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_LOCAL_EXT_FEATURES_PAGE_2);
break;
default:
break;
@@ -1520,6 +1536,8 @@ MODULE_FIRMWARE("rtl_bt/rtl8761b_fw.bin");
MODULE_FIRMWARE("rtl_bt/rtl8761b_config.bin");
MODULE_FIRMWARE("rtl_bt/rtl8761bu_fw.bin");
MODULE_FIRMWARE("rtl_bt/rtl8761bu_config.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8761cu_fw.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8761cu_config.bin");
MODULE_FIRMWARE("rtl_bt/rtl8821a_fw.bin");
MODULE_FIRMWARE("rtl_bt/rtl8821a_config.bin");
MODULE_FIRMWARE("rtl_bt/rtl8821c_fw.bin");
diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c
index a69feb08486a..8325655ce6aa 100644
--- a/drivers/bluetooth/btsdio.c
+++ b/drivers/bluetooth/btsdio.c
@@ -327,7 +327,7 @@ static int btsdio_probe(struct sdio_func *func,
hdev->send = btsdio_send_frame;
if (func->vendor == 0x0104 && func->device == 0x00c5)
- set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_RESET_ON_CLOSE);
err = hci_register_dev(hdev);
if (err < 0) {
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 9ab661d2d1e6..8ed3883ab8ee 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -66,6 +66,7 @@ static struct usb_driver btusb_driver;
#define BTUSB_INTEL_BROKEN_INITIAL_NCMD BIT(25)
#define BTUSB_INTEL_NO_WBS_SUPPORT BIT(26)
#define BTUSB_ACTIONS_SEMI BIT(27)
+#define BTUSB_BARROT BIT(28)
static const struct usb_device_id btusb_table[] = {
/* Generic Bluetooth USB device */
@@ -298,75 +299,77 @@ static const struct usb_device_id quirks_table[] = {
BTUSB_WIDEBAND_SPEECH },
/* QCA WCN6855 chipset */
- { USB_DEVICE(0x0cf3, 0xe600), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x0489, 0xe0c7), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x0489, 0xe0cc), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x0489, 0xe0c9), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x0489, 0xe0d6), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x0489, 0xe0ca), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x0489, 0xe0e3), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x0489, 0xe0cb), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x10ab, 0x9309), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x0489, 0xe0cc), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x10ab, 0x9409), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x0489, 0xe0ce), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe0d0), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x10ab, 0x9108), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x0489, 0xe0d6), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x10ab, 0x9109), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x0489, 0xe0de), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x10ab, 0x9208), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x0489, 0xe0df), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x10ab, 0x9209), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x0489, 0xe0e1), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x10ab, 0x9308), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x0489, 0xe0e3), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x10ab, 0x9408), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x0489, 0xe0ea), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x10ab, 0x9508), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x0489, 0xe0ec), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x10ab, 0x9509), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x04ca, 0x3022), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x10ab, 0x9608), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x04ca, 0x3023), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x10ab, 0x9609), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x04ca, 0x3024), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x10ab, 0x9f09), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x04ca, 0x3a22), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x04ca, 0x3022), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x04ca, 0x3a24), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x0489, 0xe0c7), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x04ca, 0x3a26), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x0489, 0xe0c9), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x04ca, 0x3a27), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x0489, 0xe0ca), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x0cf3, 0xe600), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x0489, 0xe0cb), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x10ab, 0x9108), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x0489, 0xe0ce), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x10ab, 0x9109), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x0489, 0xe0de), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x10ab, 0x9208), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x0489, 0xe0df), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x10ab, 0x9209), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x0489, 0xe0e1), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x10ab, 0x9308), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x0489, 0xe0ea), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x10ab, 0x9309), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x0489, 0xe0ec), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x10ab, 0x9408), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x04ca, 0x3023), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x10ab, 0x9409), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x04ca, 0x3024), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x10ab, 0x9508), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x04ca, 0x3a22), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x10ab, 0x9509), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x04ca, 0x3a24), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x10ab, 0x9608), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x04ca, 0x3a26), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x10ab, 0x9609), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
- { USB_DEVICE(0x04ca, 0x3a27), .driver_info = BTUSB_QCA_WCN6855 |
+ { USB_DEVICE(0x10ab, 0x9f09), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x28de, 0x1401), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
/* QCA WCN785x chipset */
@@ -501,6 +504,8 @@ static const struct usb_device_id quirks_table[] = {
/* Realtek 8821CE Bluetooth devices */
{ USB_DEVICE(0x13d3, 0x3529), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3533), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
/* Realtek 8822CE Bluetooth devices */
{ USB_DEVICE(0x0bda, 0xb00c), .driver_info = BTUSB_REALTEK |
@@ -515,6 +520,13 @@ static const struct usb_device_id quirks_table[] = {
/* Realtek 8851BE Bluetooth devices */
{ USB_DEVICE(0x0bda, 0xb850), .driver_info = BTUSB_REALTEK },
{ USB_DEVICE(0x13d3, 0x3600), .driver_info = BTUSB_REALTEK },
+ { USB_DEVICE(0x13d3, 0x3601), .driver_info = BTUSB_REALTEK },
+
+ /* Realtek 8851BU Bluetooth devices */
+ { USB_DEVICE(0x3625, 0x010b), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x2001, 0x332a), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
/* Realtek 8852AE Bluetooth devices */
{ USB_DEVICE(0x0bda, 0x2852), .driver_info = BTUSB_REALTEK |
@@ -565,6 +577,8 @@ static const struct usb_device_id quirks_table[] = {
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3591), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3618), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe123), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe125), .driver_info = BTUSB_REALTEK |
@@ -573,6 +587,12 @@ static const struct usb_device_id quirks_table[] = {
/* Realtek 8852BT/8852BE-VT Bluetooth devices */
{ USB_DEVICE(0x0bda, 0x8520), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe12f), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3618), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3619), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
/* Realtek 8922AE Bluetooth devices */
{ USB_DEVICE(0x0bda, 0x8922), .driver_info = BTUSB_REALTEK |
@@ -609,6 +629,8 @@ static const struct usb_device_id quirks_table[] = {
/* Additional MediaTek MT7920 Bluetooth devices */
{ USB_DEVICE(0x0489, 0xe134), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe135), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3620), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3621), .driver_info = BTUSB_MEDIATEK |
@@ -673,6 +695,8 @@ static const struct usb_device_id quirks_table[] = {
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe153), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe170), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x04ca, 0x3804), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x04ca, 0x38e4), .driver_info = BTUSB_MEDIATEK |
@@ -689,6 +713,8 @@ static const struct usb_device_id quirks_table[] = {
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3615), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3633), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x35f5, 0x7922), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
@@ -705,6 +731,8 @@ static const struct usb_device_id quirks_table[] = {
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe139), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe14e), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe14f), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe150), .driver_info = BTUSB_MEDIATEK |
@@ -721,10 +749,14 @@ static const struct usb_device_id quirks_table[] = {
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3613), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3627), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3628), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3630), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x2c7c, 0x7009), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
/* Additional Realtek 8723AE Bluetooth devices */
{ USB_DEVICE(0x0930, 0x021d), .driver_info = BTUSB_REALTEK },
@@ -761,6 +793,8 @@ static const struct usb_device_id quirks_table[] = {
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x2b89, 0x8761), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x2b89, 0x6275), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
/* Additional Realtek 8821AE Bluetooth devices */
{ USB_DEVICE(0x0b05, 0x17dc), .driver_info = BTUSB_REALTEK },
@@ -797,6 +831,10 @@ static const struct usb_device_id quirks_table[] = {
{ USB_DEVICE(0x0cb5, 0xc547), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
+ /* Barrot Technology Bluetooth devices */
+ { USB_DEVICE(0x33fa, 0x0010), .driver_info = BTUSB_BARROT },
+ { USB_DEVICE(0x33fa, 0x0012), .driver_info = BTUSB_BARROT },
+
/* Actions Semiconductor ATS2851 based devices */
{ USB_DEVICE(0x10d7, 0xb012), .driver_info = BTUSB_ACTIONS_SEMI },
@@ -1107,6 +1145,24 @@ static void btusb_qca_reset(struct hci_dev *hdev)
btusb_reset(hdev);
}
+static u8 btusb_classify_qca_pkt_type(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ /* Some Qualcomm controllers, e.g., QCNFA765 with WCN6855 chip, send debug
+ * packets as ACL frames with connection handle 0x2EDC. These are not real
+ * ACL packets and should be reclassified as HCI_DIAG_PKT to prevent
+ * "ACL packet for unknown connection handle 3804" errors.
+ */
+ if (skb->len >= 2) {
+ u16 handle = get_unaligned_le16(skb->data);
+
+ if (handle == 0x2EDC)
+ return HCI_DIAG_PKT;
+ }
+
+ /* Use default packet type for other packets */
+ return hci_skb_pkt_type(skb);
+}
+
static inline void btusb_free_frags(struct btusb_data *data)
{
unsigned long flags;
@@ -1179,6 +1235,18 @@ static int btusb_recv_intr(struct btusb_data *data, void *buffer, int count)
}
if (!hci_skb_expect(skb)) {
+ /* Each chunk should correspond to at least 1 or more
+ * events so if there are still bytes left that doesn't
+ * constitute a new event this is likely a bug in the
+ * controller.
+ */
+ if (count && count < HCI_EVENT_HDR_SIZE) {
+ bt_dev_warn(data->hdev,
+ "Unexpected continuation: %d bytes",
+ count);
+ count = 0;
+ }
+
/* Complete frame */
btusb_recv_event(data, skb);
skb = NULL;
@@ -2472,18 +2540,18 @@ static int btusb_setup_csr(struct hci_dev *hdev)
* Probably will need to be expanded in the future;
* without these the controller will lock up.
*/
- set_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks);
- set_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks);
- set_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks);
- set_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks);
- set_bit(HCI_QUIRK_BROKEN_READ_VOICE_SETTING, &hdev->quirks);
- set_bit(HCI_QUIRK_BROKEN_READ_PAGE_SCAN_TYPE, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_STORED_LINK_KEY);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_ERR_DATA_REPORTING);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL);
+ hci_set_quirk(hdev, HCI_QUIRK_NO_SUSPEND_NOTIFIER);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_READ_VOICE_SETTING);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_READ_PAGE_SCAN_TYPE);
/* Clear the reset quirk since this is not an actual
* early Bluetooth 1.1 device from CSR.
*/
- clear_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
- clear_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
+ hci_clear_quirk(hdev, HCI_QUIRK_RESET_ON_CLOSE);
+ hci_clear_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY);
/*
* Special workaround for these BT 4.0 chip clones, and potentially more:
@@ -2594,12 +2662,12 @@ static int btusb_send_frame_intel(struct hci_dev *hdev, struct sk_buff *skb)
else
urb = alloc_ctrl_urb(hdev, skb);
- /* When the 0xfc01 command is issued to boot into
- * the operational firmware, it will actually not
- * send a command complete event. To keep the flow
+ /* When the BTINTEL_HCI_OP_RESET command is issued to
+ * boot into the operational firmware, it will actually
+ * not send a command complete event. To keep the flow
* control working inject that event here.
*/
- if (opcode == 0xfc01)
+ if (opcode == BTINTEL_HCI_OP_RESET)
inject_cmd_complete(hdev, opcode);
} else {
urb = alloc_ctrl_urb(hdev, skb);
@@ -2675,9 +2743,21 @@ static int btusb_recv_event_realtek(struct hci_dev *hdev, struct sk_buff *skb)
static void btusb_mtk_claim_iso_intf(struct btusb_data *data)
{
- struct btmtk_data *btmtk_data = hci_get_priv(data->hdev);
+ struct btmtk_data *btmtk_data;
int err;
+ if (!data->hdev)
+ return;
+
+ btmtk_data = hci_get_priv(data->hdev);
+ if (!btmtk_data)
+ return;
+
+ if (!btmtk_data->isopkt_intf) {
+ bt_dev_err(data->hdev, "Can't claim NULL iso interface");
+ return;
+ }
+
/*
* The function usb_driver_claim_interface() is documented to need
* locks held if it's not called from a probe routine. The code here
@@ -2699,17 +2779,30 @@ static void btusb_mtk_claim_iso_intf(struct btusb_data *data)
static void btusb_mtk_release_iso_intf(struct hci_dev *hdev)
{
- struct btmtk_data *btmtk_data = hci_get_priv(hdev);
+ struct btmtk_data *btmtk_data;
+
+ if (!hdev)
+ return;
+
+ btmtk_data = hci_get_priv(hdev);
+ if (!btmtk_data)
+ return;
if (test_bit(BTMTK_ISOPKT_OVER_INTR, &btmtk_data->flags)) {
usb_kill_anchored_urbs(&btmtk_data->isopkt_anchor);
clear_bit(BTMTK_ISOPKT_RUNNING, &btmtk_data->flags);
- dev_kfree_skb_irq(btmtk_data->isopkt_skb);
- btmtk_data->isopkt_skb = NULL;
- usb_set_intfdata(btmtk_data->isopkt_intf, NULL);
- usb_driver_release_interface(&btusb_driver,
- btmtk_data->isopkt_intf);
+ if (btmtk_data->isopkt_skb) {
+ dev_kfree_skb_irq(btmtk_data->isopkt_skb);
+ btmtk_data->isopkt_skb = NULL;
+ }
+
+ if (btmtk_data->isopkt_intf) {
+ usb_set_intfdata(btmtk_data->isopkt_intf, NULL);
+ usb_driver_release_interface(&btusb_driver,
+ btmtk_data->isopkt_intf);
+ btmtk_data->isopkt_intf = NULL;
+ }
}
clear_bit(BTMTK_ISOPKT_OVER_INTR, &btmtk_data->flags);
@@ -2747,6 +2840,19 @@ static int btusb_mtk_reset(struct hci_dev *hdev, void *rst_data)
btusb_stop_traffic(data);
usb_kill_anchored_urbs(&data->tx_anchor);
+ /* Toggle the hard reset line. The MediaTek device is going to
+ * yank itself off the USB and then replug. The cleanup is handled
+ * correctly on the way out (standard USB disconnect), and the new
+ * device is detected cleanly and bound to the driver again like
+ * it should be.
+ */
+ if (data->reset_gpio) {
+ gpiod_set_value_cansleep(data->reset_gpio, 1);
+ msleep(200);
+ gpiod_set_value_cansleep(data->reset_gpio, 0);
+ return 0;
+ }
+
err = btmtk_usb_subsys_reset(hdev, btmtk_data->dev_id);
usb_queue_reset_device(data->intf);
@@ -3179,6 +3285,12 @@ struct qca_device_info {
u8 ver_offset; /* offset of version structure in rampatch */
};
+struct qca_custom_firmware {
+ u32 rom_version;
+ u16 board_id;
+ const char *subdirectory;
+};
+
static const struct qca_device_info qca_devices_table[] = {
{ 0x00000100, 20, 4, 8 }, /* Rome 1.0 */
{ 0x00000101, 20, 4, 8 }, /* Rome 1.1 */
@@ -3192,6 +3304,58 @@ static const struct qca_device_info qca_devices_table[] = {
{ 0x00190200, 40, 4, 16 }, /* WCN785x 2.0 */
};
+static const struct qca_custom_firmware qca_custom_btfws[] = {
+ { 0x00130201, 0x030A, "QCA2066" },
+ { 0x00130201, 0x030B, "QCA2066" },
+ { },
+};
+
+static u16 qca_extract_board_id(const struct qca_version *ver)
+{
+ u16 flag = le16_to_cpu(ver->flag);
+ u16 board_id = 0;
+
+ if (((flag >> 8) & 0xff) == QCA_FLAG_MULTI_NVM) {
+ /* The board_id should be split into two bytes
+ * The 1st byte is chip ID, and the 2nd byte is platform ID
+ * For example, board ID 0x010A, 0x01 is platform ID. 0x0A is chip ID
+ * we have several platforms, and platform IDs are continuously added
+ * Platform ID:
+ * 0x00 is for Mobile
+ * 0x01 is for X86
+ * 0x02 is for Automotive
+ * 0x03 is for Consumer electronic
+ */
+ board_id = (ver->chip_id << 8) + ver->platform_id;
+ }
+
+ /* Take 0xffff as invalid board ID */
+ if (board_id == 0xffff)
+ board_id = 0;
+
+ return board_id;
+}
+
+static const char *qca_get_fw_subdirectory(const struct qca_version *ver)
+{
+ const struct qca_custom_firmware *ptr;
+ u32 rom_ver;
+ u16 board_id;
+
+ rom_ver = le32_to_cpu(ver->rom_version);
+ board_id = qca_extract_board_id(ver);
+ if (!board_id)
+ return NULL;
+
+ for (ptr = qca_custom_btfws; ptr->rom_version; ptr++) {
+ if (ptr->rom_version == rom_ver &&
+ ptr->board_id == board_id)
+ return ptr->subdirectory;
+ }
+
+ return NULL;
+}
+
static int btusb_qca_send_vendor_req(struct usb_device *udev, u8 request,
void *data, u16 size)
{
@@ -3296,15 +3460,22 @@ static int btusb_setup_qca_load_rampatch(struct hci_dev *hdev,
{
struct qca_rampatch_version *rver;
const struct firmware *fw;
+ const char *fw_subdir;
u32 ver_rom, ver_patch, rver_rom;
u16 rver_rom_low, rver_rom_high, rver_patch;
- char fwname[64];
+ char fwname[80];
int err;
ver_rom = le32_to_cpu(ver->rom_version);
ver_patch = le32_to_cpu(ver->patch_version);
- snprintf(fwname, sizeof(fwname), "qca/rampatch_usb_%08x.bin", ver_rom);
+ fw_subdir = qca_get_fw_subdirectory(ver);
+ if (fw_subdir)
+ snprintf(fwname, sizeof(fwname), "qca/%s/rampatch_usb_%08x.bin",
+ fw_subdir, ver_rom);
+ else
+ snprintf(fwname, sizeof(fwname), "qca/rampatch_usb_%08x.bin",
+ ver_rom);
err = request_firmware(&fw, fwname, &hdev->dev);
if (err) {
@@ -3348,44 +3519,34 @@ static void btusb_generate_qca_nvm_name(char *fwname, size_t max_size,
const struct qca_version *ver)
{
u32 rom_version = le32_to_cpu(ver->rom_version);
- u16 flag = le16_to_cpu(ver->flag);
+ const char *variant, *fw_subdir;
+ int len;
+ u16 board_id;
- if (((flag >> 8) & 0xff) == QCA_FLAG_MULTI_NVM) {
- /* The board_id should be split into two bytes
- * The 1st byte is chip ID, and the 2nd byte is platform ID
- * For example, board ID 0x010A, 0x01 is platform ID. 0x0A is chip ID
- * we have several platforms, and platform IDs are continuously added
- * Platform ID:
- * 0x00 is for Mobile
- * 0x01 is for X86
- * 0x02 is for Automotive
- * 0x03 is for Consumer electronic
- */
- u16 board_id = (ver->chip_id << 8) + ver->platform_id;
- const char *variant;
+ fw_subdir = qca_get_fw_subdirectory(ver);
+ board_id = qca_extract_board_id(ver);
- switch (le32_to_cpu(ver->ram_version)) {
- case WCN6855_2_0_RAM_VERSION_GF:
- case WCN6855_2_1_RAM_VERSION_GF:
- variant = "_gf";
- break;
- default:
- variant = "";
- break;
- }
-
- if (board_id == 0) {
- snprintf(fwname, max_size, "qca/nvm_usb_%08x%s.bin",
- rom_version, variant);
- } else {
- snprintf(fwname, max_size, "qca/nvm_usb_%08x%s_%04x.bin",
- rom_version, variant, board_id);
- }
- } else {
- snprintf(fwname, max_size, "qca/nvm_usb_%08x.bin",
- rom_version);
+ switch (le32_to_cpu(ver->ram_version)) {
+ case WCN6855_2_0_RAM_VERSION_GF:
+ case WCN6855_2_1_RAM_VERSION_GF:
+ variant = "_gf";
+ break;
+ default:
+ variant = NULL;
+ break;
}
+ if (fw_subdir)
+ len = snprintf(fwname, max_size, "qca/%s/nvm_usb_%08x",
+ fw_subdir, rom_version);
+ else
+ len = snprintf(fwname, max_size, "qca/nvm_usb_%08x",
+ rom_version);
+ if (variant)
+ len += snprintf(fwname + len, max_size - len, "%s", variant);
+ if (board_id)
+ len += snprintf(fwname + len, max_size - len, "_%04x", board_id);
+ len += snprintf(fwname + len, max_size - len, ".bin");
}
static int btusb_setup_qca_load_nvm(struct hci_dev *hdev,
@@ -3393,7 +3554,7 @@ static int btusb_setup_qca_load_nvm(struct hci_dev *hdev,
const struct qca_device_info *info)
{
const struct firmware *fw;
- char fwname[64];
+ char fwname[80];
int err;
btusb_generate_qca_nvm_name(fwname, sizeof(fwname), ver);
@@ -3494,7 +3655,7 @@ static int btusb_setup_qca(struct hci_dev *hdev)
/* Mark HCI_OP_ENHANCED_SETUP_SYNC_CONN as broken as it doesn't seem to
* work with the likes of HSP/HFP mSBC.
*/
- set_bit(HCI_QUIRK_BROKEN_ENHANCED_SETUP_SYNC_CONN, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_ENHANCED_SETUP_SYNC_CONN);
return 0;
}
@@ -3792,6 +3953,8 @@ static int btusb_hci_drv_supported_altsettings(struct hci_dev *hdev, void *data,
/* There are at most 7 alt (0 - 6) */
rp = kmalloc(sizeof(*rp) + 7, GFP_KERNEL);
+ if (!rp)
+ return -ENOMEM;
rp->num = 0;
if (!drvdata->isoc)
@@ -4008,10 +4171,10 @@ static int btusb_probe(struct usb_interface *intf,
}
#endif
if (id->driver_info & BTUSB_CW6622)
- set_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_STORED_LINK_KEY);
if (id->driver_info & BTUSB_BCM2045)
- set_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_STORED_LINK_KEY);
if (id->driver_info & BTUSB_BCM92035)
hdev->setup = btusb_setup_bcm92035;
@@ -4068,8 +4231,8 @@ static int btusb_probe(struct usb_interface *intf,
hdev->reset = btmtk_reset_sync;
hdev->set_bdaddr = btmtk_set_bdaddr;
hdev->send = btusb_send_frame_mtk;
- set_bit(HCI_QUIRK_BROKEN_ENHANCED_SETUP_SYNC_CONN, &hdev->quirks);
- set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_ENHANCED_SETUP_SYNC_CONN);
+ hci_set_quirk(hdev, HCI_QUIRK_NON_PERSISTENT_SETUP);
data->recv_acl = btmtk_usb_recv_acl;
data->suspend = btmtk_usb_suspend;
data->resume = btmtk_usb_resume;
@@ -4077,20 +4240,20 @@ static int btusb_probe(struct usb_interface *intf,
}
if (id->driver_info & BTUSB_SWAVE) {
- set_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks);
- set_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_FIXUP_INQUIRY_MODE);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_LOCAL_COMMANDS);
}
if (id->driver_info & BTUSB_INTEL_BOOT) {
hdev->manufacturer = 2;
- set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_RAW_DEVICE);
}
if (id->driver_info & BTUSB_ATH3012) {
data->setup_on_usb = btusb_setup_qca;
hdev->set_bdaddr = btusb_set_bdaddr_ath3012;
- set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
- set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY);
+ hci_set_quirk(hdev, HCI_QUIRK_STRICT_DUPLICATE_FILTER);
}
if (id->driver_info & BTUSB_QCA_ROME) {
@@ -4098,7 +4261,7 @@ static int btusb_probe(struct usb_interface *intf,
hdev->shutdown = btusb_shutdown_qca;
hdev->set_bdaddr = btusb_set_bdaddr_ath3012;
hdev->reset = btusb_qca_reset;
- set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY);
btusb_check_needs_reset_resume(intf);
}
@@ -4109,10 +4272,11 @@ static int btusb_probe(struct usb_interface *intf,
data->recv_acl = btusb_recv_acl_qca;
hci_devcd_register(hdev, btusb_coredump_qca, btusb_dump_hdr_qca, NULL);
data->setup_on_usb = btusb_setup_qca;
+ hdev->classify_pkt_type = btusb_classify_qca_pkt_type;
hdev->shutdown = btusb_shutdown_qca;
hdev->set_bdaddr = btusb_set_bdaddr_wcn6855;
hdev->reset = btusb_qca_reset;
- set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY);
hci_set_msft_opcode(hdev, 0xFD70);
}
@@ -4140,35 +4304,35 @@ static int btusb_probe(struct usb_interface *intf,
if (id->driver_info & BTUSB_ACTIONS_SEMI) {
/* Support is advertised, but not implemented */
- set_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks);
- set_bit(HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER, &hdev->quirks);
- set_bit(HCI_QUIRK_BROKEN_SET_RPA_TIMEOUT, &hdev->quirks);
- set_bit(HCI_QUIRK_BROKEN_EXT_SCAN, &hdev->quirks);
- set_bit(HCI_QUIRK_BROKEN_READ_ENC_KEY_SIZE, &hdev->quirks);
- set_bit(HCI_QUIRK_BROKEN_EXT_CREATE_CONN, &hdev->quirks);
- set_bit(HCI_QUIRK_BROKEN_WRITE_AUTH_PAYLOAD_TIMEOUT, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_ERR_DATA_REPORTING);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_SET_RPA_TIMEOUT);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_EXT_SCAN);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_READ_ENC_KEY_SIZE);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_EXT_CREATE_CONN);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_WRITE_AUTH_PAYLOAD_TIMEOUT);
}
if (!reset)
- set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_RESET_ON_CLOSE);
if (force_scofix || id->driver_info & BTUSB_WRONG_SCO_MTU) {
if (!disable_scofix)
- set_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_FIXUP_BUFFER_SIZE);
}
if (id->driver_info & BTUSB_BROKEN_ISOC)
data->isoc = NULL;
if (id->driver_info & BTUSB_WIDEBAND_SPEECH)
- set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED);
if (id->driver_info & BTUSB_INVALID_LE_STATES)
- set_bit(HCI_QUIRK_BROKEN_LE_STATES, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_LE_STATES);
if (id->driver_info & BTUSB_DIGIANSWER) {
data->cmdreq_type = USB_TYPE_VENDOR;
- set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_RESET_ON_CLOSE);
}
if (id->driver_info & BTUSB_CSR) {
@@ -4177,10 +4341,10 @@ static int btusb_probe(struct usb_interface *intf,
/* Old firmware would otherwise execute USB reset */
if (bcdDevice < 0x117)
- set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_RESET_ON_CLOSE);
/* This must be set first in case we disable it for fakes */
- set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY);
/* Fake CSR devices with broken commands */
if (le16_to_cpu(udev->descriptor.idVendor) == 0x0a12 &&
@@ -4193,7 +4357,7 @@ static int btusb_probe(struct usb_interface *intf,
/* New sniffer firmware has crippled HCI interface */
if (le16_to_cpu(udev->descriptor.bcdDevice) > 0x997)
- set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_RAW_DEVICE);
}
if (id->driver_info & BTUSB_INTEL_BOOT) {
@@ -4269,6 +4433,11 @@ static void btusb_disconnect(struct usb_interface *intf)
hci_unregister_dev(hdev);
+ if (data->oob_wake_irq)
+ device_init_wakeup(&data->udev->dev, false);
+ if (data->reset_gpio)
+ gpiod_put(data->reset_gpio);
+
if (intf == data->intf) {
if (data->isoc)
usb_driver_release_interface(&btusb_driver, data->isoc);
@@ -4279,17 +4448,11 @@ static void btusb_disconnect(struct usb_interface *intf)
usb_driver_release_interface(&btusb_driver, data->diag);
usb_driver_release_interface(&btusb_driver, data->intf);
} else if (intf == data->diag) {
- usb_driver_release_interface(&btusb_driver, data->intf);
if (data->isoc)
usb_driver_release_interface(&btusb_driver, data->isoc);
+ usb_driver_release_interface(&btusb_driver, data->intf);
}
- if (data->oob_wake_irq)
- device_init_wakeup(&data->udev->dev, false);
-
- if (data->reset_gpio)
- gpiod_put(data->reset_gpio);
-
hci_free_dev(hdev);
}
diff --git a/drivers/bluetooth/h4_recv.h b/drivers/bluetooth/h4_recv.h
deleted file mode 100644
index 28cf2d8c2d48..000000000000
--- a/drivers/bluetooth/h4_recv.h
+++ /dev/null
@@ -1,153 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- *
- * Generic Bluetooth HCI UART driver
- *
- * Copyright (C) 2015-2018 Intel Corporation
- */
-
-#include <linux/unaligned.h>
-
-struct h4_recv_pkt {
- u8 type; /* Packet type */
- u8 hlen; /* Header length */
- u8 loff; /* Data length offset in header */
- u8 lsize; /* Data length field size */
- u16 maxlen; /* Max overall packet length */
- int (*recv)(struct hci_dev *hdev, struct sk_buff *skb);
-};
-
-#define H4_RECV_ACL \
- .type = HCI_ACLDATA_PKT, \
- .hlen = HCI_ACL_HDR_SIZE, \
- .loff = 2, \
- .lsize = 2, \
- .maxlen = HCI_MAX_FRAME_SIZE \
-
-#define H4_RECV_SCO \
- .type = HCI_SCODATA_PKT, \
- .hlen = HCI_SCO_HDR_SIZE, \
- .loff = 2, \
- .lsize = 1, \
- .maxlen = HCI_MAX_SCO_SIZE
-
-#define H4_RECV_EVENT \
- .type = HCI_EVENT_PKT, \
- .hlen = HCI_EVENT_HDR_SIZE, \
- .loff = 1, \
- .lsize = 1, \
- .maxlen = HCI_MAX_EVENT_SIZE
-
-#define H4_RECV_ISO \
- .type = HCI_ISODATA_PKT, \
- .hlen = HCI_ISO_HDR_SIZE, \
- .loff = 2, \
- .lsize = 2, \
- .maxlen = HCI_MAX_FRAME_SIZE
-
-static inline struct sk_buff *h4_recv_buf(struct hci_dev *hdev,
- struct sk_buff *skb,
- const unsigned char *buffer,
- int count,
- const struct h4_recv_pkt *pkts,
- int pkts_count)
-{
- /* Check for error from previous call */
- if (IS_ERR(skb))
- skb = NULL;
-
- while (count) {
- int i, len;
-
- if (!skb) {
- for (i = 0; i < pkts_count; i++) {
- if (buffer[0] != (&pkts[i])->type)
- continue;
-
- skb = bt_skb_alloc((&pkts[i])->maxlen,
- GFP_ATOMIC);
- if (!skb)
- return ERR_PTR(-ENOMEM);
-
- hci_skb_pkt_type(skb) = (&pkts[i])->type;
- hci_skb_expect(skb) = (&pkts[i])->hlen;
- break;
- }
-
- /* Check for invalid packet type */
- if (!skb)
- return ERR_PTR(-EILSEQ);
-
- count -= 1;
- buffer += 1;
- }
-
- len = min_t(uint, hci_skb_expect(skb) - skb->len, count);
- skb_put_data(skb, buffer, len);
-
- count -= len;
- buffer += len;
-
- /* Check for partial packet */
- if (skb->len < hci_skb_expect(skb))
- continue;
-
- for (i = 0; i < pkts_count; i++) {
- if (hci_skb_pkt_type(skb) == (&pkts[i])->type)
- break;
- }
-
- if (i >= pkts_count) {
- kfree_skb(skb);
- return ERR_PTR(-EILSEQ);
- }
-
- if (skb->len == (&pkts[i])->hlen) {
- u16 dlen;
-
- switch ((&pkts[i])->lsize) {
- case 0:
- /* No variable data length */
- dlen = 0;
- break;
- case 1:
- /* Single octet variable length */
- dlen = skb->data[(&pkts[i])->loff];
- hci_skb_expect(skb) += dlen;
-
- if (skb_tailroom(skb) < dlen) {
- kfree_skb(skb);
- return ERR_PTR(-EMSGSIZE);
- }
- break;
- case 2:
- /* Double octet variable length */
- dlen = get_unaligned_le16(skb->data +
- (&pkts[i])->loff);
- hci_skb_expect(skb) += dlen;
-
- if (skb_tailroom(skb) < dlen) {
- kfree_skb(skb);
- return ERR_PTR(-EMSGSIZE);
- }
- break;
- default:
- /* Unsupported variable length */
- kfree_skb(skb);
- return ERR_PTR(-EILSEQ);
- }
-
- if (!dlen) {
- /* No more data, complete frame */
- (&pkts[i])->recv(hdev, skb);
- skb = NULL;
- }
- } else {
- /* Complete frame */
- (&pkts[i])->recv(hdev, skb);
- skb = NULL;
- }
- }
-
- return skb;
-}
diff --git a/drivers/bluetooth/hci_ag6xx.c b/drivers/bluetooth/hci_ag6xx.c
index 2d40302409ff..94588676510f 100644
--- a/drivers/bluetooth/hci_ag6xx.c
+++ b/drivers/bluetooth/hci_ag6xx.c
@@ -105,7 +105,7 @@ static int ag6xx_recv(struct hci_uart *hu, const void *data, int count)
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
return -EUNATCH;
- ag6xx->rx_skb = h4_recv_buf(hu->hdev, ag6xx->rx_skb, data, count,
+ ag6xx->rx_skb = h4_recv_buf(hu, ag6xx->rx_skb, data, count,
ag6xx_recv_pkts,
ARRAY_SIZE(ag6xx_recv_pkts));
if (IS_ERR(ag6xx->rx_skb)) {
diff --git a/drivers/bluetooth/hci_aml.c b/drivers/bluetooth/hci_aml.c
index 1394c575aa6d..b1f32c5a8a3f 100644
--- a/drivers/bluetooth/hci_aml.c
+++ b/drivers/bluetooth/hci_aml.c
@@ -424,7 +424,7 @@ static int aml_check_bdaddr(struct hci_dev *hdev)
if (!bacmp(&paddr->bdaddr, AML_BDADDR_DEFAULT)) {
bt_dev_info(hdev, "amlbt using default bdaddr (%pM)", &paddr->bdaddr);
- set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_INVALID_BDADDR);
}
exit:
@@ -650,7 +650,7 @@ static int aml_recv(struct hci_uart *hu, const void *data, int count)
struct aml_data *aml_data = hu->priv;
int err;
- aml_data->rx_skb = h4_recv_buf(hu->hdev, aml_data->rx_skb, data, count,
+ aml_data->rx_skb = h4_recv_buf(hu, aml_data->rx_skb, data, count,
aml_recv_pkts,
ARRAY_SIZE(aml_recv_pkts));
if (IS_ERR(aml_data->rx_skb)) {
diff --git a/drivers/bluetooth/hci_ath.c b/drivers/bluetooth/hci_ath.c
index dbfe34664633..8d2b5e7f0d6a 100644
--- a/drivers/bluetooth/hci_ath.c
+++ b/drivers/bluetooth/hci_ath.c
@@ -191,7 +191,7 @@ static int ath_recv(struct hci_uart *hu, const void *data, int count)
{
struct ath_struct *ath = hu->priv;
- ath->rx_skb = h4_recv_buf(hu->hdev, ath->rx_skb, data, count,
+ ath->rx_skb = h4_recv_buf(hu, ath->rx_skb, data, count,
ath_recv_pkts, ARRAY_SIZE(ath_recv_pkts));
if (IS_ERR(ath->rx_skb)) {
int err = PTR_ERR(ath->rx_skb);
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index 9684eb16059b..9286a5f40f55 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -326,7 +326,6 @@ static irqreturn_t bcm_host_wake(int irq, void *data)
bt_dev_dbg(bdev, "Host wake IRQ");
pm_runtime_get(bdev->dev);
- pm_runtime_mark_last_busy(bdev->dev);
pm_runtime_put_autosuspend(bdev->dev);
return IRQ_HANDLED;
@@ -643,8 +642,8 @@ static int bcm_setup(struct hci_uart *hu)
* Allow the bootloader to set a valid address through the
* device tree.
*/
- if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hu->hdev->quirks))
- set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hu->hdev->quirks);
+ if (hci_test_quirk(hu->hdev, HCI_QUIRK_INVALID_BDADDR))
+ hci_set_quirk(hu->hdev, HCI_QUIRK_USE_BDADDR_PROPERTY);
if (!bcm_request_irq(bcm))
err = bcm_setup_sleep(hu);
@@ -698,7 +697,7 @@ static int bcm_recv(struct hci_uart *hu, const void *data, int count)
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
return -EUNATCH;
- bcm->rx_skb = h4_recv_buf(hu->hdev, bcm->rx_skb, data, count,
+ bcm->rx_skb = h4_recv_buf(hu, bcm->rx_skb, data, count,
bcm_recv_pkts, ARRAY_SIZE(bcm_recv_pkts));
if (IS_ERR(bcm->rx_skb)) {
int err = PTR_ERR(bcm->rx_skb);
@@ -710,7 +709,6 @@ static int bcm_recv(struct hci_uart *hu, const void *data, int count)
mutex_lock(&bcm_device_lock);
if (bcm->dev && bcm_device_exists(bcm->dev)) {
pm_runtime_get(bcm->dev->dev);
- pm_runtime_mark_last_busy(bcm->dev->dev);
pm_runtime_put_autosuspend(bcm->dev->dev);
}
mutex_unlock(&bcm_device_lock);
@@ -748,10 +746,8 @@ static struct sk_buff *bcm_dequeue(struct hci_uart *hu)
skb = skb_dequeue(&bcm->txq);
- if (bdev) {
- pm_runtime_mark_last_busy(bdev->dev);
+ if (bdev)
pm_runtime_put_autosuspend(bdev->dev);
- }
mutex_unlock(&bcm_device_lock);
diff --git a/drivers/bluetooth/hci_bcm4377.c b/drivers/bluetooth/hci_bcm4377.c
index 9bce53e49cfa..45e6d84224ee 100644
--- a/drivers/bluetooth/hci_bcm4377.c
+++ b/drivers/bluetooth/hci_bcm4377.c
@@ -420,7 +420,7 @@ struct bcm4377_ring_state {
* payloads_dma:DMA address for payload buffer
* events: pointer to array of completions if waiting is allowed
* msgids: bitmap to keep track of used message ids
- * lock: Spinlock to protect access to ring structurs used in the irq handler
+ * lock: Spinlock to protect access to ring structures used in the irq handler
*/
struct bcm4377_transfer_ring {
enum bcm4377_transfer_ring_id ring_id;
@@ -1435,7 +1435,7 @@ static int bcm4377_check_bdaddr(struct bcm4377_data *bcm4377)
bda = (struct hci_rp_read_bd_addr *)skb->data;
if (!bcm4377_is_valid_bdaddr(bcm4377, &bda->bdaddr))
- set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &bcm4377->hdev->quirks);
+ hci_set_quirk(bcm4377->hdev, HCI_QUIRK_USE_BDADDR_PROPERTY);
kfree_skb(skb);
return 0;
@@ -2389,13 +2389,13 @@ static int bcm4377_probe(struct pci_dev *pdev, const struct pci_device_id *id)
hdev->setup = bcm4377_hci_setup;
if (bcm4377->hw->broken_mws_transport_config)
- set_bit(HCI_QUIRK_BROKEN_MWS_TRANSPORT_CONFIG, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_MWS_TRANSPORT_CONFIG);
if (bcm4377->hw->broken_ext_scan)
- set_bit(HCI_QUIRK_BROKEN_EXT_SCAN, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_EXT_SCAN);
if (bcm4377->hw->broken_le_coded)
- set_bit(HCI_QUIRK_BROKEN_LE_CODED, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_LE_CODED);
if (bcm4377->hw->broken_le_ext_adv_report_phy)
- set_bit(HCI_QUIRK_FIXUP_LE_EXT_ADV_REPORT_PHY, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_FIXUP_LE_EXT_ADV_REPORT_PHY);
pci_set_drvdata(pdev, bcm4377);
hci_set_drvdata(hdev, bcm4377);
diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c
index 664d82d1e613..591abe6d63dd 100644
--- a/drivers/bluetooth/hci_bcsp.c
+++ b/drivers/bluetooth/hci_bcsp.c
@@ -582,6 +582,9 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count)
struct bcsp_struct *bcsp = hu->priv;
const unsigned char *ptr;
+ if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
+ return -EUNATCH;
+
BT_DBG("hu %p count %d rx_state %d rx_count %ld",
hu, count, bcsp->rx_state, bcsp->rx_count);
diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c
index 9070e31a68bf..ec017df8572c 100644
--- a/drivers/bluetooth/hci_h4.c
+++ b/drivers/bluetooth/hci_h4.c
@@ -112,7 +112,7 @@ static int h4_recv(struct hci_uart *hu, const void *data, int count)
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
return -EUNATCH;
- h4->rx_skb = h4_recv_buf(hu->hdev, h4->rx_skb, data, count,
+ h4->rx_skb = h4_recv_buf(hu, h4->rx_skb, data, count,
h4_recv_pkts, ARRAY_SIZE(h4_recv_pkts));
if (IS_ERR(h4->rx_skb)) {
int err = PTR_ERR(h4->rx_skb);
@@ -151,12 +151,12 @@ int __exit h4_deinit(void)
return hci_uart_unregister_proto(&h4p);
}
-struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb,
+struct sk_buff *h4_recv_buf(struct hci_uart *hu, struct sk_buff *skb,
const unsigned char *buffer, int count,
const struct h4_recv_pkt *pkts, int pkts_count)
{
- struct hci_uart *hu = hci_get_drvdata(hdev);
u8 alignment = hu->alignment ? hu->alignment : 1;
+ struct hci_dev *hdev = hu->hdev;
/* Check for error from previous call */
if (IS_ERR(skb))
diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
index d0d4420c1a0f..96e20a66ecd1 100644
--- a/drivers/bluetooth/hci_h5.c
+++ b/drivers/bluetooth/hci_h5.c
@@ -7,6 +7,8 @@
*/
#include <linux/acpi.h>
+#include <linux/bitrev.h>
+#include <linux/crc-ccitt.h>
#include <linux/errno.h>
#include <linux/gpio/consumer.h>
#include <linux/kernel.h>
@@ -58,6 +60,7 @@ enum {
H5_TX_ACK_REQ, /* Pending ack to send */
H5_WAKEUP_DISABLE, /* Device cannot wake host */
H5_HW_FLOW_CONTROL, /* Use HW flow control */
+ H5_CRC, /* Use CRC */
};
struct h5 {
@@ -141,8 +144,8 @@ static void h5_link_control(struct hci_uart *hu, const void *data, size_t len)
static u8 h5_cfg_field(struct h5 *h5)
{
- /* Sliding window size (first 3 bits) */
- return h5->tx_win & 0x07;
+ /* Sliding window size (first 3 bits) and CRC request (fifth bit). */
+ return (h5->tx_win & 0x07) | 0x10;
}
static void h5_timed_event(struct timer_list *t)
@@ -213,7 +216,6 @@ static void h5_peer_reset(struct hci_uart *hu)
static int h5_open(struct hci_uart *hu)
{
struct h5 *h5;
- const unsigned char sync[] = { 0x01, 0x7e };
BT_DBG("hu %p", hu);
@@ -243,9 +245,11 @@ static int h5_open(struct hci_uart *hu)
set_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags);
- /* Send initial sync request */
- h5_link_control(hu, sync, sizeof(sync));
- mod_timer(&h5->timer, jiffies + H5_SYNC_TIMEOUT);
+ /*
+ * Wait one jiffy because the UART layer won't set HCI_UART_PROTO_READY,
+ * which allows us to send link packets, until this function returns.
+ */
+ mod_timer(&h5->timer, jiffies + 1);
return 0;
}
@@ -360,8 +364,10 @@ static void h5_handle_internal_rx(struct hci_uart *hu)
h5_link_control(hu, conf_rsp, 2);
h5_link_control(hu, conf_req, 3);
} else if (memcmp(data, conf_rsp, 2) == 0) {
- if (H5_HDR_LEN(hdr) > 2)
+ if (H5_HDR_LEN(hdr) > 2) {
h5->tx_win = (data[2] & 0x07);
+ assign_bit(H5_CRC, &h5->flags, data[2] & 0x10);
+ }
BT_DBG("Three-wire init complete. tx_win %u", h5->tx_win);
h5->state = H5_ACTIVE;
hci_uart_init_ready(hu);
@@ -425,7 +431,24 @@ static void h5_complete_rx_pkt(struct hci_uart *hu)
static int h5_rx_crc(struct hci_uart *hu, unsigned char c)
{
- h5_complete_rx_pkt(hu);
+ struct h5 *h5 = hu->priv;
+ const unsigned char *hdr = h5->rx_skb->data;
+ u16 crc;
+ __be16 crc_be;
+
+ crc = crc_ccitt(0xffff, hdr, 4 + H5_HDR_LEN(hdr));
+ crc = bitrev16(crc);
+
+ crc_be = cpu_to_be16(crc);
+
+ if (memcmp(&crc_be, hdr + 4 + H5_HDR_LEN(hdr), 2) != 0) {
+ bt_dev_err(hu->hdev, "Received packet with invalid CRC");
+ h5_reset_rx(h5);
+ } else {
+ /* Remove CRC bytes */
+ skb_trim(h5->rx_skb, 4 + H5_HDR_LEN(hdr));
+ h5_complete_rx_pkt(hu);
+ }
return 0;
}
@@ -556,6 +579,7 @@ static void h5_reset_rx(struct h5 *h5)
h5->rx_func = h5_rx_delimiter;
h5->rx_pending = 0;
clear_bit(H5_RX_ESC, &h5->flags);
+ clear_bit(H5_CRC, &h5->flags);
}
static int h5_recv(struct hci_uart *hu, const void *data, int count)
@@ -592,7 +616,6 @@ static int h5_recv(struct hci_uart *hu, const void *data, int count)
if (hu->serdev) {
pm_runtime_get(&hu->serdev->dev);
- pm_runtime_mark_last_busy(&hu->serdev->dev);
pm_runtime_put_autosuspend(&hu->serdev->dev);
}
@@ -634,7 +657,6 @@ static int h5_enqueue(struct hci_uart *hu, struct sk_buff *skb)
if (hu->serdev) {
pm_runtime_get_sync(&hu->serdev->dev);
- pm_runtime_mark_last_busy(&hu->serdev->dev);
pm_runtime_put_autosuspend(&hu->serdev->dev);
}
@@ -686,6 +708,7 @@ static struct sk_buff *h5_prepare_pkt(struct hci_uart *hu, u8 pkt_type,
struct h5 *h5 = hu->priv;
struct sk_buff *nskb;
u8 hdr[4];
+ u16 crc;
int i;
if (!valid_packet_type(pkt_type)) {
@@ -713,6 +736,7 @@ static struct sk_buff *h5_prepare_pkt(struct hci_uart *hu, u8 pkt_type,
/* Reliable packet? */
if (pkt_type == HCI_ACLDATA_PKT || pkt_type == HCI_COMMAND_PKT) {
hdr[0] |= 1 << 7;
+ hdr[0] |= (test_bit(H5_CRC, &h5->flags) && 1) << 6;
hdr[0] |= h5->tx_seq;
h5->tx_seq = (h5->tx_seq + 1) % 8;
}
@@ -732,6 +756,15 @@ static struct sk_buff *h5_prepare_pkt(struct hci_uart *hu, u8 pkt_type,
for (i = 0; i < len; i++)
h5_slip_one_byte(nskb, data[i]);
+ if (H5_HDR_CRC(hdr)) {
+ crc = crc_ccitt(0xffff, hdr, 4);
+ crc = crc_ccitt(crc, data, len);
+ crc = bitrev16(crc);
+
+ h5_slip_one_byte(nskb, (crc >> 8) & 0xff);
+ h5_slip_one_byte(nskb, crc & 0xff);
+ }
+
h5_slip_delim(nskb);
return nskb;
diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c
index 811f33701f84..20baf2895dec 100644
--- a/drivers/bluetooth/hci_intel.c
+++ b/drivers/bluetooth/hci_intel.c
@@ -280,7 +280,6 @@ static irqreturn_t intel_irq(int irq, void *dev_id)
/* Host/Controller are now LPM resumed, trigger a new delayed suspend */
pm_runtime_get(&idev->pdev->dev);
- pm_runtime_mark_last_busy(&idev->pdev->dev);
pm_runtime_put_autosuspend(&idev->pdev->dev);
return IRQ_HANDLED;
@@ -371,7 +370,6 @@ static void intel_busy_work(struct work_struct *work)
list_for_each_entry(idev, &intel_device_list, list) {
if (intel->hu->tty->dev->parent == idev->pdev->dev.parent) {
pm_runtime_get(&idev->pdev->dev);
- pm_runtime_mark_last_busy(&idev->pdev->dev);
pm_runtime_put_autosuspend(&idev->pdev->dev);
break;
}
@@ -660,7 +658,7 @@ static int intel_setup(struct hci_uart *hu)
*/
if (!bacmp(&params.otp_bdaddr, BDADDR_ANY)) {
bt_dev_info(hdev, "No device address configured");
- set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_INVALID_BDADDR);
}
/* With this Intel bootloader only the hardware variant and device
@@ -972,7 +970,7 @@ static int intel_recv(struct hci_uart *hu, const void *data, int count)
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
return -EUNATCH;
- intel->rx_skb = h4_recv_buf(hu->hdev, intel->rx_skb, data, count,
+ intel->rx_skb = h4_recv_buf(hu, intel->rx_skb, data, count,
intel_recv_pkts,
ARRAY_SIZE(intel_recv_pkts));
if (IS_ERR(intel->rx_skb)) {
@@ -1003,7 +1001,6 @@ static int intel_enqueue(struct hci_uart *hu, struct sk_buff *skb)
list_for_each_entry(idev, &intel_device_list, list) {
if (hu->tty->dev->parent == idev->pdev->dev.parent) {
pm_runtime_get_sync(&idev->pdev->dev);
- pm_runtime_mark_last_busy(&idev->pdev->dev);
pm_runtime_put_autosuspend(&idev->pdev->dev);
break;
}
@@ -1029,12 +1026,12 @@ static struct sk_buff *intel_dequeue(struct hci_uart *hu)
struct hci_command_hdr *cmd = (void *)skb->data;
__u16 opcode = le16_to_cpu(cmd->opcode);
- /* When the 0xfc01 command is issued to boot into
- * the operational firmware, it will actually not
- * send a command complete event. To keep the flow
- * control working inject that event here.
+ /* When the BTINTEL_HCI_OP_RESET command is issued to boot into
+ * the operational firmware, it will actually not send a command
+ * complete event. To keep the flow control working inject that
+ * event here.
*/
- if (opcode == 0xfc01)
+ if (opcode == BTINTEL_HCI_OP_RESET)
inject_cmd_complete(hu->hdev, opcode);
}
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index acba83156de9..d0adae3267b4 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -667,13 +667,13 @@ static int hci_uart_register_dev(struct hci_uart *hu)
SET_HCIDEV_DEV(hdev, hu->tty->dev);
if (test_bit(HCI_UART_RAW_DEVICE, &hu->hdev_flags))
- set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_RAW_DEVICE);
if (test_bit(HCI_UART_EXT_CONFIG, &hu->hdev_flags))
- set_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG);
if (!test_bit(HCI_UART_RESET_ON_INIT, &hu->hdev_flags))
- set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_RESET_ON_CLOSE);
/* Only call open() for the protocol after hdev is fully initialized as
* open() (or a timer/workqueue it starts) may attempt to reference it.
diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c
index e19e9bd49555..6f4e25917b86 100644
--- a/drivers/bluetooth/hci_ll.c
+++ b/drivers/bluetooth/hci_ll.c
@@ -429,7 +429,7 @@ static int ll_recv(struct hci_uart *hu, const void *data, int count)
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
return -EUNATCH;
- ll->rx_skb = h4_recv_buf(hu->hdev, ll->rx_skb, data, count,
+ ll->rx_skb = h4_recv_buf(hu, ll->rx_skb, data, count,
ll_recv_pkts, ARRAY_SIZE(ll_recv_pkts));
if (IS_ERR(ll->rx_skb)) {
int err = PTR_ERR(ll->rx_skb);
@@ -649,11 +649,11 @@ static int ll_setup(struct hci_uart *hu)
/* This means that there was an error getting the BD address
* during probe, so mark the device as having a bad address.
*/
- set_bit(HCI_QUIRK_INVALID_BDADDR, &hu->hdev->quirks);
+ hci_set_quirk(hu->hdev, HCI_QUIRK_INVALID_BDADDR);
} else if (bacmp(&lldev->bdaddr, BDADDR_ANY)) {
err = ll_set_bdaddr(hu->hdev, &lldev->bdaddr);
if (err)
- set_bit(HCI_QUIRK_INVALID_BDADDR, &hu->hdev->quirks);
+ hci_set_quirk(hu->hdev, HCI_QUIRK_INVALID_BDADDR);
}
/* Operational speed if any */
diff --git a/drivers/bluetooth/hci_mrvl.c b/drivers/bluetooth/hci_mrvl.c
index e08222395772..8767522ec4c6 100644
--- a/drivers/bluetooth/hci_mrvl.c
+++ b/drivers/bluetooth/hci_mrvl.c
@@ -264,9 +264,9 @@ static int mrvl_recv(struct hci_uart *hu, const void *data, int count)
!test_bit(STATE_FW_LOADED, &mrvl->flags))
return count;
- mrvl->rx_skb = h4_recv_buf(hu->hdev, mrvl->rx_skb, data, count,
- mrvl_recv_pkts,
- ARRAY_SIZE(mrvl_recv_pkts));
+ mrvl->rx_skb = h4_recv_buf(hu, mrvl->rx_skb, data, count,
+ mrvl_recv_pkts,
+ ARRAY_SIZE(mrvl_recv_pkts));
if (IS_ERR(mrvl->rx_skb)) {
int err = PTR_ERR(mrvl->rx_skb);
bt_dev_err(hu->hdev, "Frame reassembly failed (%d)", err);
diff --git a/drivers/bluetooth/hci_nokia.c b/drivers/bluetooth/hci_nokia.c
index 9fc10a16fd96..1e65b541f8ad 100644
--- a/drivers/bluetooth/hci_nokia.c
+++ b/drivers/bluetooth/hci_nokia.c
@@ -439,7 +439,7 @@ static int nokia_setup(struct hci_uart *hu)
if (btdev->man_id == NOKIA_ID_BCM2048) {
hu->hdev->set_bdaddr = btbcm_set_bdaddr;
- set_bit(HCI_QUIRK_INVALID_BDADDR, &hu->hdev->quirks);
+ hci_set_quirk(hu->hdev, HCI_QUIRK_INVALID_BDADDR);
dev_dbg(dev, "bcm2048 has invalid bluetooth address!");
}
@@ -624,8 +624,8 @@ static int nokia_recv(struct hci_uart *hu, const void *data, int count)
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
return -EUNATCH;
- btdev->rx_skb = h4_recv_buf(hu->hdev, btdev->rx_skb, data, count,
- nokia_recv_pkts, ARRAY_SIZE(nokia_recv_pkts));
+ btdev->rx_skb = h4_recv_buf(hu, btdev->rx_skb, data, count,
+ nokia_recv_pkts, ARRAY_SIZE(nokia_recv_pkts));
if (IS_ERR(btdev->rx_skb)) {
err = PTR_ERR(btdev->rx_skb);
dev_err(dev, "Frame reassembly failed (%d)", err);
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 5fe5879881f5..888176b0faa9 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -1264,6 +1264,7 @@ static const struct h4_recv_pkt qca_recv_pkts[] = {
{ H4_RECV_ACL, .recv = qca_recv_acl_data },
{ H4_RECV_SCO, .recv = hci_recv_frame },
{ H4_RECV_EVENT, .recv = qca_recv_event },
+ { H4_RECV_ISO, .recv = hci_recv_frame },
{ QCA_IBS_WAKE_IND_EVENT, .recv = qca_ibs_wake_ind },
{ QCA_IBS_WAKE_ACK_EVENT, .recv = qca_ibs_wake_ack },
{ QCA_IBS_SLEEP_IND_EVENT, .recv = qca_ibs_sleep_ind },
@@ -1276,7 +1277,7 @@ static int qca_recv(struct hci_uart *hu, const void *data, int count)
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
return -EUNATCH;
- qca->rx_skb = h4_recv_buf(hu->hdev, qca->rx_skb, data, count,
+ qca->rx_skb = h4_recv_buf(hu, qca->rx_skb, data, count,
qca_recv_pkts, ARRAY_SIZE(qca_recv_pkts));
if (IS_ERR(qca->rx_skb)) {
int err = PTR_ERR(qca->rx_skb);
@@ -1892,7 +1893,7 @@ static int qca_setup(struct hci_uart *hu)
/* Enable controller to do both LE scan and BR/EDR inquiry
* simultaneously.
*/
- set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY);
switch (soc_type) {
case QCA_QCA2066:
@@ -1944,7 +1945,7 @@ retry:
case QCA_WCN7850:
qcadev = serdev_device_get_drvdata(hu->serdev);
if (qcadev->bdaddr_property_broken)
- set_bit(HCI_QUIRK_BDADDR_PROPERTY_BROKEN, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_BDADDR_PROPERTY_BROKEN);
hci_set_aosp_capable(hdev);
@@ -2392,10 +2393,17 @@ static int qca_serdev_probe(struct serdev_device *serdev)
*/
qcadev->bt_power->pwrseq = devm_pwrseq_get(&serdev->dev,
"bluetooth");
- if (IS_ERR(qcadev->bt_power->pwrseq))
- return PTR_ERR(qcadev->bt_power->pwrseq);
- break;
+ /*
+ * Some modules have BT_EN enabled via a hardware pull-up,
+ * meaning it is not defined in the DTS and is not controlled
+ * through the power sequence. In such cases, fall through
+ * to follow the legacy flow.
+ */
+ if (IS_ERR(qcadev->bt_power->pwrseq))
+ qcadev->bt_power->pwrseq = NULL;
+ else
+ break;
}
fallthrough;
case QCA_WCN3950:
@@ -2480,7 +2488,7 @@ static int qca_serdev_probe(struct serdev_device *serdev)
hdev = qcadev->serdev_hu.hdev;
if (power_ctrl_enabled) {
- set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_NON_PERSISTENT_SETUP);
hdev->shutdown = qca_power_off;
}
@@ -2489,11 +2497,11 @@ static int qca_serdev_probe(struct serdev_device *serdev)
* be queried via hci. Same with the valid le states quirk.
*/
if (data->capabilities & QCA_CAP_WIDEBAND_SPEECH)
- set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
- &hdev->quirks);
+ hci_set_quirk(hdev,
+ HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED);
if (!(data->capabilities & QCA_CAP_VALID_LE_STATES))
- set_bit(HCI_QUIRK_BROKEN_LE_STATES, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_LE_STATES);
}
return 0;
@@ -2543,7 +2551,7 @@ static void qca_serdev_shutdown(struct device *dev)
* invoked and the SOC is already in the initial state, so
* don't also need to send the VSC.
*/
- if (test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks) ||
+ if (hci_test_quirk(hdev, HCI_QUIRK_NON_PERSISTENT_SETUP) ||
hci_dev_test_flag(hdev, HCI_SETUP))
return;
diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c
index 89a22e9b3253..593d9cefbbf9 100644
--- a/drivers/bluetooth/hci_serdev.c
+++ b/drivers/bluetooth/hci_serdev.c
@@ -152,7 +152,7 @@ static int hci_uart_close(struct hci_dev *hdev)
* BT SOC is completely powered OFF during BT OFF, holding port
* open may drain the battery.
*/
- if (test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) {
+ if (hci_test_quirk(hdev, HCI_QUIRK_NON_PERSISTENT_SETUP)) {
clear_bit(HCI_UART_PROTO_READY, &hu->flags);
serdev_device_close(hu->serdev);
}
@@ -358,13 +358,13 @@ int hci_uart_register_device_priv(struct hci_uart *hu,
SET_HCIDEV_DEV(hdev, &hu->serdev->dev);
if (test_bit(HCI_UART_NO_SUSPEND_NOTIFIER, &hu->flags))
- set_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_NO_SUSPEND_NOTIFIER);
if (test_bit(HCI_UART_RAW_DEVICE, &hu->hdev_flags))
- set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_RAW_DEVICE);
if (test_bit(HCI_UART_EXT_CONFIG, &hu->hdev_flags))
- set_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG);
if (test_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags))
return 0;
diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h
index 5ea5dd80e297..48ac7ca9334e 100644
--- a/drivers/bluetooth/hci_uart.h
+++ b/drivers/bluetooth/hci_uart.h
@@ -121,10 +121,6 @@ void hci_uart_set_flow_control(struct hci_uart *hu, bool enable);
void hci_uart_set_speeds(struct hci_uart *hu, unsigned int init_speed,
unsigned int oper_speed);
-#ifdef CONFIG_BT_HCIUART_H4
-int h4_init(void);
-int h4_deinit(void);
-
struct h4_recv_pkt {
u8 type; /* Packet type */
u8 hlen; /* Header length */
@@ -162,7 +158,11 @@ struct h4_recv_pkt {
.lsize = 2, \
.maxlen = HCI_MAX_FRAME_SIZE \
-struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb,
+#ifdef CONFIG_BT_HCIUART_H4
+int h4_init(void);
+int h4_deinit(void);
+
+struct sk_buff *h4_recv_buf(struct hci_uart *hu, struct sk_buff *skb,
const unsigned char *buffer, int count,
const struct h4_recv_pkt *pkts, int pkts_count);
#endif
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index 59f4d7bdffdc..2fef08254d78 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -380,6 +380,28 @@ static const struct file_operations force_devcoredump_fops = {
.write = force_devcd_write,
};
+static void vhci_debugfs_init(struct vhci_data *data)
+{
+ struct hci_dev *hdev = data->hdev;
+
+ debugfs_create_file("force_suspend", 0644, hdev->debugfs, data,
+ &force_suspend_fops);
+
+ debugfs_create_file("force_wakeup", 0644, hdev->debugfs, data,
+ &force_wakeup_fops);
+
+ if (IS_ENABLED(CONFIG_BT_MSFTEXT))
+ debugfs_create_file("msft_opcode", 0644, hdev->debugfs, data,
+ &msft_opcode_fops);
+
+ if (IS_ENABLED(CONFIG_BT_AOSPEXT))
+ debugfs_create_file("aosp_capable", 0644, hdev->debugfs, data,
+ &aosp_capable_fops);
+
+ debugfs_create_file("force_devcoredump", 0644, hdev->debugfs, data,
+ &force_devcoredump_fops);
+}
+
static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
{
struct hci_dev *hdev;
@@ -415,16 +437,16 @@ static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
hdev->get_codec_config_data = vhci_get_codec_config_data;
hdev->wakeup = vhci_wakeup;
hdev->setup = vhci_setup;
- set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
- set_bit(HCI_QUIRK_SYNC_FLOWCTL_SUPPORTED, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_NON_PERSISTENT_SETUP);
+ hci_set_quirk(hdev, HCI_QUIRK_SYNC_FLOWCTL_SUPPORTED);
/* bit 6 is for external configuration */
if (opcode & 0x40)
- set_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG);
/* bit 7 is for raw device */
if (opcode & 0x80)
- set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_RAW_DEVICE);
if (hci_register_dev(hdev) < 0) {
BT_ERR("Can't register HCI device");
@@ -434,22 +456,8 @@ static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
return -EBUSY;
}
- debugfs_create_file("force_suspend", 0644, hdev->debugfs, data,
- &force_suspend_fops);
-
- debugfs_create_file("force_wakeup", 0644, hdev->debugfs, data,
- &force_wakeup_fops);
-
- if (IS_ENABLED(CONFIG_BT_MSFTEXT))
- debugfs_create_file("msft_opcode", 0644, hdev->debugfs, data,
- &msft_opcode_fops);
-
- if (IS_ENABLED(CONFIG_BT_AOSPEXT))
- debugfs_create_file("aosp_capable", 0644, hdev->debugfs, data,
- &aosp_capable_fops);
-
- debugfs_create_file("force_devcoredump", 0644, hdev->debugfs, data,
- &force_devcoredump_fops);
+ if (!IS_ERR_OR_NULL(hdev->debugfs))
+ vhci_debugfs_init(data);
hci_skb_pkt_type(skb) = HCI_VENDOR_PKT;
@@ -651,6 +659,21 @@ static int vhci_open(struct inode *inode, struct file *file)
return 0;
}
+static void vhci_debugfs_remove(struct hci_dev *hdev)
+{
+ debugfs_lookup_and_remove("force_suspend", hdev->debugfs);
+
+ debugfs_lookup_and_remove("force_wakeup", hdev->debugfs);
+
+ if (IS_ENABLED(CONFIG_BT_MSFTEXT))
+ debugfs_lookup_and_remove("msft_opcode", hdev->debugfs);
+
+ if (IS_ENABLED(CONFIG_BT_AOSPEXT))
+ debugfs_lookup_and_remove("aosp_capable", hdev->debugfs);
+
+ debugfs_lookup_and_remove("force_devcoredump", hdev->debugfs);
+}
+
static int vhci_release(struct inode *inode, struct file *file)
{
struct vhci_data *data = file->private_data;
@@ -662,6 +685,8 @@ static int vhci_release(struct inode *inode, struct file *file)
hdev = data->hdev;
if (hdev) {
+ if (!IS_ERR_OR_NULL(hdev->debugfs))
+ vhci_debugfs_remove(hdev);
hci_unregister_dev(hdev);
hci_free_dev(hdev);
}
diff --git a/drivers/bluetooth/virtio_bt.c b/drivers/bluetooth/virtio_bt.c
index 756f292df9e8..6f1a37e85c6a 100644
--- a/drivers/bluetooth/virtio_bt.c
+++ b/drivers/bluetooth/virtio_bt.c
@@ -327,17 +327,17 @@ static int virtbt_probe(struct virtio_device *vdev)
hdev->setup = virtbt_setup_intel;
hdev->shutdown = virtbt_shutdown_generic;
hdev->set_bdaddr = virtbt_set_bdaddr_intel;
- set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
- set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
- set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_STRICT_DUPLICATE_FILTER);
+ hci_set_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY);
+ hci_set_quirk(hdev, HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED);
break;
case VIRTIO_BT_CONFIG_VENDOR_REALTEK:
hdev->manufacturer = 93;
hdev->setup = virtbt_setup_realtek;
hdev->shutdown = virtbt_shutdown_generic;
- set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
- set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY);
+ hci_set_quirk(hdev, HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED);
break;
}
}
diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c
index 7671bd158545..25845c04e562 100644
--- a/drivers/bus/fsl-mc/fsl-mc-bus.c
+++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
@@ -176,8 +176,8 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
{
struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
- return sprintf(buf, "fsl-mc:v%08Xd%s\n", mc_dev->obj_desc.vendor,
- mc_dev->obj_desc.type);
+ return sysfs_emit(buf, "fsl-mc:v%08Xd%s\n", mc_dev->obj_desc.vendor,
+ mc_dev->obj_desc.type);
}
static DEVICE_ATTR_RO(modalias);
@@ -203,7 +203,7 @@ static ssize_t driver_override_show(struct device *dev,
{
struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
- return snprintf(buf, PAGE_SIZE, "%s\n", mc_dev->driver_override);
+ return sysfs_emit(buf, "%s\n", mc_dev->driver_override);
}
static DEVICE_ATTR_RW(driver_override);
@@ -943,6 +943,7 @@ struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev,
struct fsl_mc_obj_desc endpoint_desc = {{ 0 }};
struct dprc_endpoint endpoint1 = {{ 0 }};
struct dprc_endpoint endpoint2 = {{ 0 }};
+ struct fsl_mc_bus *mc_bus;
int state, err;
mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
@@ -966,6 +967,8 @@ struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev,
strcpy(endpoint_desc.type, endpoint2.type);
endpoint_desc.id = endpoint2.id;
endpoint = fsl_mc_device_lookup(&endpoint_desc, mc_bus_dev);
+ if (endpoint)
+ return endpoint;
/*
* We know that the device has an endpoint because we verified by
@@ -973,17 +976,13 @@ struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev,
* yet discovered by the fsl-mc bus, thus the lookup returned NULL.
* Force a rescan of the devices in this container and retry the lookup.
*/
- if (!endpoint) {
- struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
-
- if (mutex_trylock(&mc_bus->scan_mutex)) {
- err = dprc_scan_objects(mc_bus_dev, true);
- mutex_unlock(&mc_bus->scan_mutex);
- }
-
- if (err < 0)
- return ERR_PTR(err);
+ mc_bus = to_fsl_mc_bus(mc_bus_dev);
+ if (mutex_trylock(&mc_bus->scan_mutex)) {
+ err = dprc_scan_objects(mc_bus_dev, true);
+ mutex_unlock(&mc_bus->scan_mutex);
}
+ if (err < 0)
+ return ERR_PTR(err);
endpoint = fsl_mc_device_lookup(&endpoint_desc, mc_bus_dev);
/*
@@ -1105,6 +1104,9 @@ static int fsl_mc_bus_probe(struct platform_device *pdev)
* Get physical address of MC portal for the root DPRC:
*/
plat_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!plat_res)
+ return -EINVAL;
+
mc_portal_phys_addr = plat_res->start;
mc_portal_size = resource_size(plat_res);
mc_portal_base_phys_addr = mc_portal_phys_addr & ~0x3ffffff;
diff --git a/drivers/bus/fsl-mc/mc-sys.c b/drivers/bus/fsl-mc/mc-sys.c
index b22c59d57c8f..31037f41893e 100644
--- a/drivers/bus/fsl-mc/mc-sys.c
+++ b/drivers/bus/fsl-mc/mc-sys.c
@@ -248,7 +248,7 @@ int mc_send_command(struct fsl_mc_io *mc_io, struct fsl_mc_command *cmd)
enum mc_cmd_status status;
unsigned long irq_flags = 0;
- if (in_irq() && !(mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL))
+ if (in_hardirq() && !(mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL))
return -EINVAL;
if (mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)
diff --git a/drivers/bus/mhi/ep/internal.h b/drivers/bus/mhi/ep/internal.h
index 577965f95fda..512da7482acc 100644
--- a/drivers/bus/mhi/ep/internal.h
+++ b/drivers/bus/mhi/ep/internal.h
@@ -11,7 +11,7 @@
#include "../common.h"
-extern struct bus_type mhi_ep_bus_type;
+extern const struct bus_type mhi_ep_bus_type;
#define MHI_REG_OFFSET 0x100
#define BHI_REG_OFFSET 0x200
diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c
index b3eafcf2a2c5..3c208b5c8446 100644
--- a/drivers/bus/mhi/ep/main.c
+++ b/drivers/bus/mhi/ep/main.c
@@ -403,17 +403,13 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
{
struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id];
struct device *dev = &mhi_cntrl->mhi_dev->dev;
- size_t tr_len, read_offset, write_offset;
+ size_t tr_len, read_offset;
struct mhi_ep_buf_info buf_info = {};
u32 len = MHI_EP_DEFAULT_MTU;
struct mhi_ring_element *el;
- bool tr_done = false;
void *buf_addr;
- u32 buf_left;
int ret;
- buf_left = len;
-
do {
/* Don't process the transfer ring if the channel is not in RUNNING state */
if (mhi_chan->state != MHI_CH_STATE_RUNNING) {
@@ -426,24 +422,23 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
/* Check if there is data pending to be read from previous read operation */
if (mhi_chan->tre_bytes_left) {
dev_dbg(dev, "TRE bytes remaining: %u\n", mhi_chan->tre_bytes_left);
- tr_len = min(buf_left, mhi_chan->tre_bytes_left);
+ tr_len = min(len, mhi_chan->tre_bytes_left);
} else {
mhi_chan->tre_loc = MHI_TRE_DATA_GET_PTR(el);
mhi_chan->tre_size = MHI_TRE_DATA_GET_LEN(el);
mhi_chan->tre_bytes_left = mhi_chan->tre_size;
- tr_len = min(buf_left, mhi_chan->tre_size);
+ tr_len = min(len, mhi_chan->tre_size);
}
read_offset = mhi_chan->tre_size - mhi_chan->tre_bytes_left;
- write_offset = len - buf_left;
buf_addr = kmem_cache_zalloc(mhi_cntrl->tre_buf_cache, GFP_KERNEL);
if (!buf_addr)
return -ENOMEM;
buf_info.host_addr = mhi_chan->tre_loc + read_offset;
- buf_info.dev_addr = buf_addr + write_offset;
+ buf_info.dev_addr = buf_addr;
buf_info.size = tr_len;
buf_info.cb = mhi_ep_read_completion;
buf_info.cb_buf = buf_addr;
@@ -459,16 +454,12 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
goto err_free_buf_addr;
}
- buf_left -= tr_len;
mhi_chan->tre_bytes_left -= tr_len;
- if (!mhi_chan->tre_bytes_left) {
- if (MHI_TRE_DATA_GET_IEOT(el))
- tr_done = true;
-
+ if (!mhi_chan->tre_bytes_left)
mhi_chan->rd_offset = (mhi_chan->rd_offset + 1) % ring->ring_size;
- }
- } while (buf_left && !tr_done);
+ /* Read until the some buffer is left or the ring becomes not empty */
+ } while (!mhi_ep_queue_is_empty(mhi_chan->mhi_dev, DMA_TO_DEVICE));
return 0;
@@ -502,15 +493,11 @@ static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring)
mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
} else {
/* UL channel */
- do {
- ret = mhi_ep_read_channel(mhi_cntrl, ring);
- if (ret < 0) {
- dev_err(&mhi_chan->mhi_dev->dev, "Failed to read channel\n");
- return ret;
- }
-
- /* Read until the ring becomes empty */
- } while (!mhi_ep_queue_is_empty(mhi_chan->mhi_dev, DMA_TO_DEVICE));
+ ret = mhi_ep_read_channel(mhi_cntrl, ring);
+ if (ret < 0) {
+ dev_err(&mhi_chan->mhi_dev->dev, "Failed to read channel\n");
+ return ret;
+ }
}
return 0;
@@ -1507,7 +1494,7 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
INIT_WORK(&mhi_cntrl->cmd_ring_work, mhi_ep_cmd_ring_worker);
INIT_WORK(&mhi_cntrl->ch_ring_work, mhi_ep_ch_ring_worker);
- mhi_cntrl->wq = alloc_workqueue("mhi_ep_wq", 0, 0);
+ mhi_cntrl->wq = alloc_workqueue("mhi_ep_wq", WQ_PERCPU, 0);
if (!mhi_cntrl->wq) {
ret = -ENOMEM;
goto err_destroy_ring_item_cache;
@@ -1716,7 +1703,7 @@ static int mhi_ep_match(struct device *dev, const struct device_driver *drv)
return 0;
};
-struct bus_type mhi_ep_bus_type = {
+const struct bus_type mhi_ep_bus_type = {
.name = "mhi_ep",
.dev_name = "mhi_ep",
.match = mhi_ep_match,
diff --git a/drivers/bus/mhi/host/boot.c b/drivers/bus/mhi/host/boot.c
index efa3b6dddf4d..205d83ac069f 100644
--- a/drivers/bus/mhi/host/boot.c
+++ b/drivers/bus/mhi/host/boot.c
@@ -31,8 +31,8 @@ int mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
int ret;
for (i = 0; i < img_info->entries - 1; i++, mhi_buf++, bhi_vec++) {
- bhi_vec->dma_addr = mhi_buf->dma_addr;
- bhi_vec->size = mhi_buf->len;
+ bhi_vec->dma_addr = cpu_to_le64(mhi_buf->dma_addr);
+ bhi_vec->size = cpu_to_le64(mhi_buf->len);
}
dev_dbg(dev, "BHIe programming for RDDM\n");
@@ -431,8 +431,8 @@ static void mhi_firmware_copy_bhie(struct mhi_controller *mhi_cntrl,
while (remainder) {
to_cpy = min(remainder, mhi_buf->len);
memcpy(mhi_buf->buf, buf, to_cpy);
- bhi_vec->dma_addr = mhi_buf->dma_addr;
- bhi_vec->size = to_cpy;
+ bhi_vec->dma_addr = cpu_to_le64(mhi_buf->dma_addr);
+ bhi_vec->size = cpu_to_le64(to_cpy);
buf += to_cpy;
remainder -= to_cpy;
diff --git a/drivers/bus/mhi/host/debugfs.c b/drivers/bus/mhi/host/debugfs.c
index cfec7811dfbb..39e45748a24c 100644
--- a/drivers/bus/mhi/host/debugfs.c
+++ b/drivers/bus/mhi/host/debugfs.c
@@ -10,6 +10,7 @@
#include <linux/list.h>
#include <linux/mhi.h>
#include <linux/module.h>
+#include <linux/string_choices.h>
#include "internal.h"
static int mhi_debugfs_states_show(struct seq_file *m, void *d)
@@ -22,7 +23,7 @@ static int mhi_debugfs_states_show(struct seq_file *m, void *d)
mhi_is_active(mhi_cntrl) ? "Active" : "Inactive",
mhi_state_str(mhi_cntrl->dev_state),
TO_MHI_EXEC_STR(mhi_cntrl->ee),
- mhi_cntrl->wake_set ? "true" : "false");
+ str_true_false(mhi_cntrl->wake_set));
/* counters */
seq_printf(m, "M0: %u M2: %u M3: %u", mhi_cntrl->M0, mhi_cntrl->M2,
diff --git a/drivers/bus/mhi/host/init.c b/drivers/bus/mhi/host/init.c
index 13e7a55f54ff..099be8dd1900 100644
--- a/drivers/bus/mhi/host/init.c
+++ b/drivers/bus/mhi/host/init.c
@@ -176,7 +176,7 @@ static int mhi_alloc_aligned_ring(struct mhi_controller *mhi_cntrl,
return 0;
}
-void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl)
+static void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl)
{
int i;
struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
@@ -191,10 +191,9 @@ void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl)
free_irq(mhi_cntrl->irq[0], mhi_cntrl);
}
-int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
+static int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
{
struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
- struct device *dev = &mhi_cntrl->mhi_dev->dev;
unsigned long irq_flags = IRQF_SHARED | IRQF_NO_SUSPEND;
int i, ret;
@@ -221,7 +220,7 @@ int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
continue;
if (mhi_event->irq >= mhi_cntrl->nr_irqs) {
- dev_err(dev, "irq %d not available for event ring\n",
+ dev_err(mhi_cntrl->cntrl_dev, "irq %d not available for event ring\n",
mhi_event->irq);
ret = -EINVAL;
goto error_request;
@@ -232,7 +231,7 @@ int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
irq_flags,
"mhi", mhi_event);
if (ret) {
- dev_err(dev, "Error requesting irq:%d for ev:%d\n",
+ dev_err(mhi_cntrl->cntrl_dev, "Error requesting irq:%d for ev:%d\n",
mhi_cntrl->irq[mhi_event->irq], i);
goto error_request;
}
@@ -254,7 +253,7 @@ error_request:
return ret;
}
-void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl)
+static void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl)
{
int i;
struct mhi_ctxt *mhi_ctxt = mhi_cntrl->mhi_ctxt;
@@ -299,7 +298,7 @@ void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl)
mhi_cntrl->mhi_ctxt = NULL;
}
-int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl)
+static int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl)
{
struct mhi_ctxt *mhi_ctxt;
struct mhi_chan_ctxt *chan_ctxt;
diff --git a/drivers/bus/mhi/host/internal.h b/drivers/bus/mhi/host/internal.h
index ce566f7d2e92..7937bb1f742c 100644
--- a/drivers/bus/mhi/host/internal.h
+++ b/drivers/bus/mhi/host/internal.h
@@ -25,8 +25,8 @@ struct mhi_ctxt {
};
struct bhi_vec_entry {
- u64 dma_addr;
- u64 size;
+ __le64 dma_addr;
+ __le64 size;
};
enum mhi_fw_load_type {
@@ -170,6 +170,8 @@ enum mhi_pm_state {
MHI_PM_IN_ERROR_STATE(pm_state))
#define MHI_PM_IN_SUSPEND_STATE(pm_state) (pm_state & \
(MHI_PM_M3_ENTER | MHI_PM_M3))
+#define MHI_PM_FATAL_ERROR(pm_state) ((pm_state == MHI_PM_FW_DL_ERR) || \
+ (pm_state >= MHI_PM_SYS_ERR_FAIL))
#define NR_OF_CMD_RINGS 1
#define CMD_EL_PER_RING 128
@@ -383,19 +385,12 @@ void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl,
/* Initialization methods */
int mhi_init_mmio(struct mhi_controller *mhi_cntrl);
-int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl);
-void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl);
-int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl);
-void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl);
int mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
struct image_info *img_info);
void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl);
/* Automatically allocate and queue inbound buffers */
#define MHI_CH_INBOUND_ALLOC_BUFS BIT(0)
-int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
- struct mhi_chan *mhi_chan, unsigned int flags);
-
int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
struct mhi_chan *mhi_chan);
void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
@@ -410,6 +405,7 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
struct mhi_event *mhi_event, u32 event_quota);
int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
struct mhi_event *mhi_event, u32 event_quota);
+void mhi_uevent_notify(struct mhi_controller *mhi_cntrl, enum mhi_ee_type ee);
/* ISR handlers */
irqreturn_t mhi_irq_handler(int irq_number, void *dev);
diff --git a/drivers/bus/mhi/host/main.c b/drivers/bus/mhi/host/main.c
index 9bb0df43ceef..861551274319 100644
--- a/drivers/bus/mhi/host/main.c
+++ b/drivers/bus/mhi/host/main.c
@@ -512,6 +512,7 @@ irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv)
if (mhi_cntrl->rddm_image && mhi_is_active(mhi_cntrl)) {
mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
mhi_cntrl->ee = ee;
+ mhi_uevent_notify(mhi_cntrl, mhi_cntrl->ee);
wake_up_all(&mhi_cntrl->state_event);
}
break;
@@ -602,7 +603,7 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
{
dma_addr_t ptr = MHI_TRE_GET_EV_PTR(event);
struct mhi_ring_element *local_rp, *ev_tre;
- void *dev_rp;
+ void *dev_rp, *next_rp;
struct mhi_buf_info *buf_info;
u16 xfer_len;
@@ -621,6 +622,16 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
result.dir = mhi_chan->dir;
local_rp = tre_ring->rp;
+
+ next_rp = local_rp + 1;
+ if (next_rp >= tre_ring->base + tre_ring->len)
+ next_rp = tre_ring->base;
+ if (dev_rp != next_rp && !MHI_TRE_DATA_GET_CHAIN(local_rp)) {
+ dev_err(&mhi_cntrl->mhi_dev->dev,
+ "Event element points to an unexpected TRE\n");
+ break;
+ }
+
while (local_rp != dev_rp) {
buf_info = buf_ring->rp;
/* If it's the last TRE, get length from the event */
@@ -1435,7 +1446,7 @@ exit_unprepare_channel:
mutex_unlock(&mhi_chan->mutex);
}
-int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
+static int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
struct mhi_chan *mhi_chan, unsigned int flags)
{
int ret = 0;
diff --git a/drivers/bus/mhi/host/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c
index 589cb6722316..e3bc737313a2 100644
--- a/drivers/bus/mhi/host/pci_generic.c
+++ b/drivers/bus/mhi/host/pci_generic.c
@@ -34,26 +34,34 @@
/**
* struct mhi_pci_dev_info - MHI PCI device specific information
* @config: MHI controller configuration
+ * @vf_config: MHI controller configuration for Virtual function (optional)
* @name: name of the PCI module
* @fw: firmware path (if any)
* @edl: emergency download mode firmware path (if any)
* @edl_trigger: capable of triggering EDL mode in the device (if supported)
* @bar_num: PCI base address register to use for MHI MMIO register space
* @dma_data_width: DMA transfer word size (32 or 64 bits)
+ * @vf_dma_data_width: DMA transfer word size for VF's (optional)
* @mru_default: default MRU size for MBIM network packets
* @sideband_wake: Devices using dedicated sideband GPIO for wakeup instead
* of inband wake support (such as sdx24)
+ * @no_m3: M3 not supported
+ * @reset_on_remove: Set true for devices that require SoC during driver removal
*/
struct mhi_pci_dev_info {
const struct mhi_controller_config *config;
+ const struct mhi_controller_config *vf_config;
const char *name;
const char *fw;
const char *edl;
bool edl_trigger;
unsigned int bar_num;
unsigned int dma_data_width;
+ unsigned int vf_dma_data_width;
unsigned int mru_default;
bool sideband_wake;
+ bool no_m3;
+ bool reset_on_remove;
};
#define MHI_CHANNEL_CONFIG_UL(ch_num, ch_name, el_count, ev_ring) \
@@ -294,7 +302,10 @@ static const struct mhi_pci_dev_info mhi_qcom_qdu100_info = {
.config = &mhi_qcom_qdu100_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
.dma_data_width = 32,
+ .vf_dma_data_width = 40,
.sideband_wake = false,
+ .no_m3 = true,
+ .reset_on_remove = true,
};
static const struct mhi_channel_config mhi_qcom_sa8775p_channels[] = {
@@ -490,6 +501,23 @@ static const struct mhi_channel_config mhi_foxconn_sdx55_channels[] = {
MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3),
};
+static const struct mhi_channel_config mhi_foxconn_sdx61_channels[] = {
+ MHI_CHANNEL_CONFIG_UL(0, "LOOPBACK", 32, 0),
+ MHI_CHANNEL_CONFIG_DL(1, "LOOPBACK", 32, 0),
+ MHI_CHANNEL_CONFIG_UL(4, "DIAG", 32, 1),
+ MHI_CHANNEL_CONFIG_DL(5, "DIAG", 32, 1),
+ MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0),
+ MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0),
+ MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
+ MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
+ MHI_CHANNEL_CONFIG_UL_FP(34, "FIREHOSE", 32, 0),
+ MHI_CHANNEL_CONFIG_DL_FP(35, "FIREHOSE", 32, 0),
+ MHI_CHANNEL_CONFIG_UL(50, "NMEA", 32, 0),
+ MHI_CHANNEL_CONFIG_DL(51, "NMEA", 32, 0),
+ MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2),
+ MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3),
+};
+
static struct mhi_event_config mhi_foxconn_sdx55_events[] = {
MHI_EVENT_CONFIG_CTRL(0, 128),
MHI_EVENT_CONFIG_DATA(1, 128),
@@ -506,6 +534,15 @@ static const struct mhi_controller_config modem_foxconn_sdx55_config = {
.event_cfg = mhi_foxconn_sdx55_events,
};
+static const struct mhi_controller_config modem_foxconn_sdx61_config = {
+ .max_channels = 128,
+ .timeout_ms = 20000,
+ .num_channels = ARRAY_SIZE(mhi_foxconn_sdx61_channels),
+ .ch_cfg = mhi_foxconn_sdx61_channels,
+ .num_events = ARRAY_SIZE(mhi_foxconn_sdx55_events),
+ .event_cfg = mhi_foxconn_sdx55_events,
+};
+
static const struct mhi_controller_config modem_foxconn_sdx72_config = {
.max_channels = 128,
.timeout_ms = 20000,
@@ -593,8 +630,8 @@ static const struct mhi_pci_dev_info mhi_foxconn_dw5932e_info = {
.sideband_wake = false,
};
-static const struct mhi_pci_dev_info mhi_foxconn_t99w515_info = {
- .name = "foxconn-t99w515",
+static const struct mhi_pci_dev_info mhi_foxconn_t99w640_info = {
+ .name = "foxconn-t99w640",
.edl = "qcom/sdx72m/foxconn/edl.mbn",
.edl_trigger = true,
.config = &modem_foxconn_sdx72_config,
@@ -615,6 +652,28 @@ static const struct mhi_pci_dev_info mhi_foxconn_dw5934e_info = {
.sideband_wake = false,
};
+static const struct mhi_pci_dev_info mhi_foxconn_t99w696_info = {
+ .name = "foxconn-t99w696",
+ .edl = "qcom/sdx61/foxconn/prog_firehose_lite.elf",
+ .edl_trigger = true,
+ .config = &modem_foxconn_sdx61_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .mru_default = 32768,
+ .sideband_wake = false,
+};
+
+static const struct mhi_pci_dev_info mhi_foxconn_t99w760_info = {
+ .name = "foxconn-t99w760",
+ .edl = "qcom/sdx35/foxconn/xbl_s_devprg_ns.melf",
+ .edl_trigger = true,
+ .config = &modem_foxconn_sdx61_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .mru_default = 32768,
+ .sideband_wake = false,
+};
+
static const struct mhi_channel_config mhi_mv3x_channels[] = {
MHI_CHANNEL_CONFIG_UL(0, "LOOPBACK", 64, 0),
MHI_CHANNEL_CONFIG_DL(1, "LOOPBACK", 64, 0),
@@ -695,6 +754,7 @@ static const struct mhi_pci_dev_info mhi_sierra_em919x_info = {
.config = &modem_sierra_em919x_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
.dma_data_width = 32,
+ .mru_default = 32768,
.sideband_wake = false,
};
@@ -818,6 +878,26 @@ static const struct mhi_pci_dev_info mhi_telit_fn920c04_info = {
.edl_trigger = true,
};
+static const struct mhi_pci_dev_info mhi_telit_fn990b40_info = {
+ .name = "telit-fn990b40",
+ .config = &modem_telit_fn920c04_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .sideband_wake = false,
+ .mru_default = 32768,
+ .edl_trigger = true,
+};
+
+static const struct mhi_pci_dev_info mhi_telit_fe990b40_info = {
+ .name = "telit-fe990b40",
+ .config = &modem_telit_fn920c04_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .sideband_wake = false,
+ .mru_default = 32768,
+ .edl_trigger = true,
+};
+
static const struct mhi_pci_dev_info mhi_netprisma_lcur57_info = {
.name = "netprisma-lcur57",
.edl = "qcom/prog_firehose_sdx24.mbn",
@@ -852,6 +932,9 @@ static const struct pci_device_id mhi_pci_id_table[] = {
/* EM919x (sdx55), use the same vid:pid as qcom-sdx55m */
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, 0x18d7, 0x0200),
.driver_data = (kernel_ulong_t) &mhi_sierra_em919x_info },
+ /* EM929x (sdx65), use the same configuration as EM919x */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, 0x18d7, 0x0301),
+ .driver_data = (kernel_ulong_t) &mhi_sierra_em919x_info },
/* Telit FN980 hardware revision v1 */
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, 0x1C5D, 0x2000),
.driver_data = (kernel_ulong_t) &mhi_telit_fn980_hw_v1_info },
@@ -863,8 +946,17 @@ static const struct pci_device_id mhi_pci_id_table[] = {
/* Telit FE990A */
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, 0x1c5d, 0x2015),
.driver_data = (kernel_ulong_t) &mhi_telit_fe990a_info },
+ /* Foxconn T99W696, all variants */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, PCI_VENDOR_ID_FOXCONN, PCI_ANY_ID),
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w696_info },
{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0308),
.driver_data = (kernel_ulong_t) &mhi_qcom_sdx65_info },
+ /* Telit FN990B40 (sdx72) */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0309, 0x1c5d, 0x201a),
+ .driver_data = (kernel_ulong_t) &mhi_telit_fn990b40_info },
+ /* Telit FE990B40 (sdx72) */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0309, 0x1c5d, 0x2025),
+ .driver_data = (kernel_ulong_t) &mhi_telit_fe990b40_info },
{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0309),
.driver_data = (kernel_ulong_t) &mhi_qcom_sdx75_info },
/* QDU100, x100-DU */
@@ -920,15 +1012,17 @@ static const struct pci_device_id mhi_pci_id_table[] = {
/* DW5932e (sdx62), Non-eSIM */
{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0f9),
.driver_data = (kernel_ulong_t) &mhi_foxconn_dw5932e_info },
- /* T99W515 (sdx72) */
+ /* T99W640 (sdx72) */
{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe118),
- .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w515_info },
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w640_info },
/* DW5934e(sdx72), With eSIM */
{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe11d),
.driver_data = (kernel_ulong_t) &mhi_foxconn_dw5934e_info },
/* DW5934e(sdx72), Non-eSIM */
{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe11e),
.driver_data = (kernel_ulong_t) &mhi_foxconn_dw5934e_info },
+ { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe123),
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w760_info },
/* MV31-W (Cinterion) */
{ PCI_DEVICE(PCI_VENDOR_ID_THALES, 0x00b3),
.driver_data = (kernel_ulong_t) &mhi_mv31_info },
@@ -965,6 +1059,7 @@ struct mhi_pci_device {
struct work_struct recovery_work;
struct timer_list health_check_timer;
unsigned long status;
+ bool reset_on_remove;
};
static int mhi_pci_read_reg(struct mhi_controller *mhi_cntrl,
@@ -1020,7 +1115,7 @@ static bool mhi_pci_is_alive(struct mhi_controller *mhi_cntrl)
struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
u16 vendor = 0;
- if (pci_read_config_word(pdev, PCI_VENDOR_ID, &vendor))
+ if (pci_read_config_word(pci_physfn(pdev), PCI_VENDOR_ID, &vendor))
return false;
if (vendor == (u16) ~0 || vendor == 0)
@@ -1131,7 +1226,9 @@ static void mhi_pci_recovery_work(struct work_struct *work)
dev_warn(&pdev->dev, "device recovery started\n");
- timer_delete(&mhi_pdev->health_check_timer);
+ if (pdev->is_physfn)
+ timer_delete(&mhi_pdev->health_check_timer);
+
pm_runtime_forbid(&pdev->dev);
/* Clean up MHI state */
@@ -1158,7 +1255,10 @@ static void mhi_pci_recovery_work(struct work_struct *work)
dev_dbg(&pdev->dev, "Recovery completed\n");
set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status);
- mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
+
+ if (pdev->is_physfn)
+ mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
+
return;
err_unprepare:
@@ -1229,6 +1329,7 @@ static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
const struct mhi_controller_config *mhi_cntrl_config;
struct mhi_pci_device *mhi_pdev;
struct mhi_controller *mhi_cntrl;
+ unsigned int dma_data_width;
int err;
dev_info(&pdev->dev, "MHI PCI device found: %s\n", info->name);
@@ -1239,14 +1340,24 @@ static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return -ENOMEM;
INIT_WORK(&mhi_pdev->recovery_work, mhi_pci_recovery_work);
- timer_setup(&mhi_pdev->health_check_timer, health_check, 0);
- mhi_cntrl_config = info->config;
+ if (pdev->is_virtfn && info->vf_config)
+ mhi_cntrl_config = info->vf_config;
+ else
+ mhi_cntrl_config = info->config;
+
+ /* Initialize health check monitor only for Physical functions */
+ if (pdev->is_physfn)
+ timer_setup(&mhi_pdev->health_check_timer, health_check, 0);
+
mhi_cntrl = &mhi_pdev->mhi_cntrl;
+ dma_data_width = (pdev->is_virtfn && info->vf_dma_data_width) ?
+ info->vf_dma_data_width : info->dma_data_width;
+
mhi_cntrl->cntrl_dev = &pdev->dev;
mhi_cntrl->iova_start = 0;
- mhi_cntrl->iova_stop = (dma_addr_t)DMA_BIT_MASK(info->dma_data_width);
+ mhi_cntrl->iova_stop = (dma_addr_t)DMA_BIT_MASK(dma_data_width);
mhi_cntrl->fw_image = info->fw;
mhi_cntrl->edl_image = info->edl;
@@ -1258,6 +1369,9 @@ static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mhi_cntrl->mru = info->mru_default;
mhi_cntrl->name = info->name;
+ if (pdev->is_physfn)
+ mhi_pdev->reset_on_remove = info->reset_on_remove;
+
if (info->edl_trigger)
mhi_cntrl->edl_trigger = mhi_pci_generic_edl_trigger;
@@ -1267,7 +1381,7 @@ static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mhi_cntrl->wake_toggle = mhi_pci_wake_toggle_nop;
}
- err = mhi_pci_claim(mhi_cntrl, info->bar_num, DMA_BIT_MASK(info->dma_data_width));
+ err = mhi_pci_claim(mhi_cntrl, info->bar_num, DMA_BIT_MASK(dma_data_width));
if (err)
return err;
@@ -1304,10 +1418,11 @@ static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status);
/* start health check */
- mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
+ if (pdev->is_physfn)
+ mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
- /* Only allow runtime-suspend if PME capable (for wakeup) */
- if (pci_pme_capable(pdev, PCI_D3hot)) {
+ /* Allow runtime suspend only if both PME from D3Hot and M3 are supported */
+ if (pci_pme_capable(pdev, PCI_D3hot) && !(info->no_m3)) {
pm_runtime_set_autosuspend_delay(&pdev->dev, 2000);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_mark_last_busy(&pdev->dev);
@@ -1329,7 +1444,10 @@ static void mhi_pci_remove(struct pci_dev *pdev)
struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
- timer_delete_sync(&mhi_pdev->health_check_timer);
+ pci_disable_sriov(pdev);
+
+ if (pdev->is_physfn)
+ timer_delete_sync(&mhi_pdev->health_check_timer);
cancel_work_sync(&mhi_pdev->recovery_work);
if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
@@ -1341,6 +1459,9 @@ static void mhi_pci_remove(struct pci_dev *pdev)
if (pci_pme_capable(pdev, PCI_D3hot))
pm_runtime_get_noresume(&pdev->dev);
+ if (mhi_pdev->reset_on_remove)
+ mhi_soc_reset(mhi_cntrl);
+
mhi_unregister_controller(mhi_cntrl);
}
@@ -1357,7 +1478,8 @@ static void mhi_pci_reset_prepare(struct pci_dev *pdev)
dev_info(&pdev->dev, "reset\n");
- timer_delete(&mhi_pdev->health_check_timer);
+ if (pdev->is_physfn)
+ timer_delete(&mhi_pdev->health_check_timer);
/* Clean up MHI state */
if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
@@ -1402,7 +1524,8 @@ static void mhi_pci_reset_done(struct pci_dev *pdev)
}
set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status);
- mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
+ if (pdev->is_physfn)
+ mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
}
static pci_ers_result_t mhi_pci_error_detected(struct pci_dev *pdev,
@@ -1467,7 +1590,9 @@ static int __maybe_unused mhi_pci_runtime_suspend(struct device *dev)
if (test_and_set_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status))
return 0;
- timer_delete(&mhi_pdev->health_check_timer);
+ if (pdev->is_physfn)
+ timer_delete(&mhi_pdev->health_check_timer);
+
cancel_work_sync(&mhi_pdev->recovery_work);
if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) ||
@@ -1518,7 +1643,8 @@ static int __maybe_unused mhi_pci_runtime_resume(struct device *dev)
}
/* Resume health check */
- mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
+ if (pdev->is_physfn)
+ mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
/* It can be a remote wakeup (no mhi runtime_get), update access time */
pm_runtime_mark_last_busy(dev);
@@ -1604,7 +1730,8 @@ static struct pci_driver mhi_pci_driver = {
.remove = mhi_pci_remove,
.shutdown = mhi_pci_shutdown,
.err_handler = &mhi_pci_err_handler,
- .driver.pm = &mhi_pci_pm_ops
+ .driver.pm = &mhi_pci_pm_ops,
+ .sriov_configure = pci_sriov_configure_simple,
};
module_pci_driver(mhi_pci_driver);
diff --git a/drivers/bus/mhi/host/pm.c b/drivers/bus/mhi/host/pm.c
index 33d92bf2fc3e..b4ef115189b5 100644
--- a/drivers/bus/mhi/host/pm.c
+++ b/drivers/bus/mhi/host/pm.c
@@ -418,6 +418,7 @@ static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl)
device_for_each_child(&mhi_cntrl->mhi_dev->dev, &current_ee,
mhi_destroy_device);
mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_MISSION_MODE);
+ mhi_uevent_notify(mhi_cntrl, mhi_cntrl->ee);
/* Force MHI to be in M0 state before continuing */
ret = __mhi_device_get_sync(mhi_cntrl);
@@ -631,6 +632,8 @@ static void mhi_pm_sys_error_transition(struct mhi_controller *mhi_cntrl)
/* Wake up threads waiting for state transition */
wake_up_all(&mhi_cntrl->state_event);
+ mhi_uevent_notify(mhi_cntrl, mhi_cntrl->ee);
+
if (MHI_REG_ACCESS_VALID(prev_state)) {
/*
* If the device is in PBL or SBL, it will only respond to
@@ -829,6 +832,8 @@ void mhi_pm_st_worker(struct work_struct *work)
mhi_create_devices(mhi_cntrl);
if (mhi_cntrl->fbc_download)
mhi_download_amss_image(mhi_cntrl);
+
+ mhi_uevent_notify(mhi_cntrl, mhi_cntrl->ee);
break;
case DEV_ST_TRANSITION_MISSION_MODE:
mhi_pm_mission_mode_transition(mhi_cntrl);
@@ -838,6 +843,7 @@ void mhi_pm_st_worker(struct work_struct *work)
mhi_cntrl->ee = MHI_EE_FP;
write_unlock_irq(&mhi_cntrl->pm_lock);
mhi_create_devices(mhi_cntrl);
+ mhi_uevent_notify(mhi_cntrl, mhi_cntrl->ee);
break;
case DEV_ST_TRANSITION_READY:
mhi_ready_state_transition(mhi_cntrl);
@@ -1240,6 +1246,8 @@ static void __mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful,
write_unlock_irq(&mhi_cntrl->pm_lock);
mutex_unlock(&mhi_cntrl->pm_mutex);
+ mhi_uevent_notify(mhi_cntrl, mhi_cntrl->ee);
+
if (destroy_device)
mhi_queue_state_transition(mhi_cntrl,
DEV_ST_TRANSITION_DISABLE_DESTROY_DEVICE);
@@ -1279,7 +1287,7 @@ int mhi_sync_power_up(struct mhi_controller *mhi_cntrl)
mhi_cntrl->ready_timeout_ms : mhi_cntrl->timeout_ms;
wait_event_timeout(mhi_cntrl->state_event,
MHI_IN_MISSION_MODE(mhi_cntrl->ee) ||
- MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
+ MHI_PM_FATAL_ERROR(mhi_cntrl->pm_state),
msecs_to_jiffies(timeout_ms));
ret = (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) ? 0 : -ETIMEDOUT;
@@ -1338,3 +1346,22 @@ void mhi_device_put(struct mhi_device *mhi_dev)
read_unlock_bh(&mhi_cntrl->pm_lock);
}
EXPORT_SYMBOL_GPL(mhi_device_put);
+
+void mhi_uevent_notify(struct mhi_controller *mhi_cntrl, enum mhi_ee_type ee)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ char *buf[2];
+ int ret;
+
+ buf[0] = kasprintf(GFP_KERNEL, "EXEC_ENV=%s", TO_MHI_EXEC_STR(ee));
+ buf[1] = NULL;
+
+ if (!buf[0])
+ return;
+
+ ret = kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, buf);
+ if (ret)
+ dev_err(dev, "Failed to send %s uevent\n", TO_MHI_EXEC_STR(ee));
+
+ kfree(buf[0]);
+}
diff --git a/drivers/bus/moxtet.c b/drivers/bus/moxtet.c
index 6c3e5c5dae10..7ce61d629a87 100644
--- a/drivers/bus/moxtet.c
+++ b/drivers/bus/moxtet.c
@@ -737,8 +737,7 @@ static int moxtet_irq_setup(struct moxtet *moxtet)
{
int i, ret;
- moxtet->irq.domain = irq_domain_create_simple(of_fwnode_handle(moxtet->dev->of_node),
- MOXTET_NIRQS, 0,
+ moxtet->irq.domain = irq_domain_create_simple(dev_fwnode(moxtet->dev), MOXTET_NIRQS, 0,
&moxtet_irq_domain, moxtet);
if (moxtet->irq.domain == NULL) {
dev_err(moxtet->dev, "Could not add IRQ domain\n");
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
index 00cb792bda18..dd94145c9b22 100644
--- a/drivers/bus/mvebu-mbus.c
+++ b/drivers/bus/mvebu-mbus.c
@@ -1006,7 +1006,7 @@ static __init int mvebu_mbus_debugfs_init(void)
}
fs_initcall(mvebu_mbus_debugfs_init);
-static int mvebu_mbus_suspend(void)
+static int mvebu_mbus_suspend(void *data)
{
struct mvebu_mbus_state *s = &mbus_state;
int win;
@@ -1040,7 +1040,7 @@ static int mvebu_mbus_suspend(void)
return 0;
}
-static void mvebu_mbus_resume(void)
+static void mvebu_mbus_resume(void *data)
{
struct mvebu_mbus_state *s = &mbus_state;
int win;
@@ -1069,9 +1069,13 @@ static void mvebu_mbus_resume(void)
}
}
-static struct syscore_ops mvebu_mbus_syscore_ops = {
- .suspend = mvebu_mbus_suspend,
- .resume = mvebu_mbus_resume,
+static const struct syscore_ops mvebu_mbus_syscore_ops = {
+ .suspend = mvebu_mbus_suspend,
+ .resume = mvebu_mbus_resume,
+};
+
+static struct syscore mvebu_mbus_syscore = {
+ .ops = &mvebu_mbus_syscore_ops,
};
static int __init mvebu_mbus_common_init(struct mvebu_mbus_state *mbus,
@@ -1118,7 +1122,7 @@ static int __init mvebu_mbus_common_init(struct mvebu_mbus_state *mbus,
writel(UNIT_SYNC_BARRIER_ALL,
mbus->mbuswins_base + UNIT_SYNC_BARRIER_OFF);
- register_syscore_ops(&mvebu_mbus_syscore_ops);
+ register_syscore(&mvebu_mbus_syscore);
return 0;
}
diff --git a/drivers/bus/stm32_rifsc.c b/drivers/bus/stm32_rifsc.c
index 4cf1b60014b7..debeaf8ea1bd 100644
--- a/drivers/bus/stm32_rifsc.c
+++ b/drivers/bus/stm32_rifsc.c
@@ -5,6 +5,7 @@
#include <linux/bitfield.h>
#include <linux/bits.h>
+#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/init.h>
@@ -25,6 +26,8 @@
#define RIFSC_RISC_PRIVCFGR0 0x30
#define RIFSC_RISC_PER0_CIDCFGR 0x100
#define RIFSC_RISC_PER0_SEMCR 0x104
+#define RIFSC_RISC_REG0_ACFGR 0x900
+#define RIFSC_RISC_REG3_AADDR 0x924
#define RIFSC_RISC_HWCFGR2 0xFEC
/*
@@ -70,6 +73,565 @@
#define RIF_CID0 0x0
#define RIF_CID1 0x1
+#if defined(CONFIG_DEBUG_FS)
+#define RIFSC_RISUP_ENTRIES 128
+#define RIFSC_RIMU_ENTRIES 16
+#define RIFSC_RISAL_SUBREGIONS 2
+#define RIFSC_RISAL_GRANULARITY 8
+
+#define RIFSC_RIMC_ATTR0 0xC10
+
+#define RIFSC_RIMC_CIDSEL BIT(2)
+#define RIFSC_RIMC_MCID_MASK GENMASK(6, 4)
+#define RIFSC_RIMC_MSEC BIT(8)
+#define RIFSC_RIMC_MPRIV BIT(9)
+
+#define RIFSC_RISC_SRCID_MASK GENMASK(6, 4)
+#define RIFSC_RISC_SRPRIV BIT(9)
+#define RIFSC_RISC_SRSEC BIT(8)
+#define RIFSC_RISC_SRRLOCK BIT(1)
+#define RIFSC_RISC_SREN BIT(0)
+#define RIFSC_RISC_SRLENGTH_MASK GENMASK(27, 16)
+#define RIFSC_RISC_SRSTART_MASK GENMASK(10, 0)
+
+static const char *stm32mp21_rifsc_rimu_names[RIFSC_RIMU_ENTRIES] = {
+ "ETR",
+ "SDMMC1",
+ "SDMMC2",
+ "SDMMC3",
+ "OTG_HS",
+ "USBH",
+ "ETH1",
+ "ETH2",
+ "RESERVED",
+ "RESERVED",
+ "DCMIPP",
+ "LTDC_L1/L2",
+ "LTDC_L3",
+ "RESERVED",
+ "RESERVED",
+ "RESERVED",
+};
+
+static const char *stm32mp25_rifsc_rimu_names[RIFSC_RIMU_ENTRIES] = {
+ "ETR",
+ "SDMMC1",
+ "SDMMC2",
+ "SDMMC3",
+ "USB3DR",
+ "USBH",
+ "ETH1",
+ "ETH2",
+ "PCIE",
+ "GPU",
+ "DMCIPP",
+ "LTDC_L0/L1",
+ "LTDC_L2",
+ "LTDC_ROT",
+ "VDEC",
+ "VENC"
+};
+
+static const char *stm32mp21_rifsc_risup_names[RIFSC_RISUP_ENTRIES] = {
+ "TIM1",
+ "TIM2",
+ "TIM3",
+ "TIM4",
+ "TIM5",
+ "TIM6",
+ "TIM7",
+ "TIM8",
+ "TIM10",
+ "TIM11",
+ "TIM12",
+ "TIM13",
+ "TIM14",
+ "TIM15",
+ "TIM16",
+ "TIM17",
+ "RESERVED",
+ "LPTIM1",
+ "LPTIM2",
+ "LPTIM3",
+ "LPTIM4",
+ "LPTIM5",
+ "SPI1",
+ "SPI2",
+ "SPI3",
+ "SPI4",
+ "SPI5",
+ "SPI6",
+ "RESERVED",
+ "RESERVED",
+ "SPDIFRX",
+ "USART1",
+ "USART2",
+ "USART3",
+ "UART4",
+ "UART5",
+ "USART6",
+ "UART7",
+ "RESERVED",
+ "RESERVED",
+ "LPUART1",
+ "I2C1",
+ "I2C2",
+ "I2C3",
+ "RESERVED",
+ "RESERVED",
+ "RESERVED",
+ "RESERVED",
+ "RESERVED",
+ "SAI1",
+ "SAI2",
+ "SAI3",
+ "SAI4",
+ "RESERVED",
+ "MDF1",
+ "RESERVED",
+ "FDCAN",
+ "HDP",
+ "ADC1",
+ "ADC2",
+ "ETH1",
+ "ETH2",
+ "RESERVED",
+ "USBH",
+ "RESERVED",
+ "RESERVED",
+ "OTG_HS",
+ "DDRPERFM",
+ "RESERVED",
+ "RESERVED",
+ "RESERVED",
+ "RESERVED",
+ "RESERVED",
+ "STGEN",
+ "OCTOSPI1",
+ "RESERVED",
+ "SDMMC1",
+ "SDMMC2",
+ "SDMMC3",
+ "RESERVED",
+ "LTDC_CMN",
+ "RESERVED",
+ "RESERVED",
+ "RESERVED",
+ "RESERVED",
+ "RESERVED",
+ "CSI",
+ "DCMIPP",
+ "DCMI_PSSI",
+ "RESERVED",
+ "RESERVED",
+ "RESERVED",
+ "RNG1",
+ "RNG2",
+ "PKA",
+ "SAES",
+ "HASH1",
+ "HASH2",
+ "CRYP1",
+ "CRYP2",
+ "IWDG1",
+ "IWDG2",
+ "IWDG3",
+ "IWDG4",
+ "WWDG1",
+ "RESERVED",
+ "VREFBUF",
+ "DTS",
+ "RAMCFG",
+ "CRC",
+ "SERC",
+ "RESERVED",
+ "RESERVED",
+ "RESERVED",
+ "I3C1",
+ "I3C2",
+ "I3C3",
+ "RESERVED",
+ "ICACHE_DCACHE",
+ "LTDC_L1L2",
+ "LTDC_L3",
+ "RESERVED",
+ "RESERVED",
+ "RESERVED",
+ "RESERVED",
+ "OTFDEC1",
+ "RESERVED",
+ "IAC",
+};
+
+static const char *stm32mp25_rifsc_risup_names[RIFSC_RISUP_ENTRIES] = {
+ "TIM1",
+ "TIM2",
+ "TIM3",
+ "TIM4",
+ "TIM5",
+ "TIM6",
+ "TIM7",
+ "TIM8",
+ "TIM10",
+ "TIM11",
+ "TIM12",
+ "TIM13",
+ "TIM14",
+ "TIM15",
+ "TIM16",
+ "TIM17",
+ "TIM20",
+ "LPTIM1",
+ "LPTIM2",
+ "LPTIM3",
+ "LPTIM4",
+ "LPTIM5",
+ "SPI1",
+ "SPI2",
+ "SPI3",
+ "SPI4",
+ "SPI5",
+ "SPI6",
+ "SPI7",
+ "SPI8",
+ "SPDIFRX",
+ "USART1",
+ "USART2",
+ "USART3",
+ "UART4",
+ "UART5",
+ "USART6",
+ "UART7",
+ "UART8",
+ "UART9",
+ "LPUART1",
+ "I2C1",
+ "I2C2",
+ "I2C3",
+ "I2C4",
+ "I2C5",
+ "I2C6",
+ "I2C7",
+ "I2C8",
+ "SAI1",
+ "SAI2",
+ "SAI3",
+ "SAI4",
+ "RESERVED",
+ "MDF1",
+ "ADF1",
+ "FDCAN",
+ "HDP",
+ "ADC12",
+ "ADC3",
+ "ETH1",
+ "ETH2",
+ "RESERVED",
+ "USBH",
+ "RESERVED",
+ "RESERVED",
+ "USB3DR",
+ "COMBOPHY",
+ "PCIE",
+ "UCPD1",
+ "ETHSW_DEIP",
+ "ETHSW_ACM_CF",
+ "ETHSW_ACM_MSGBU",
+ "STGEN",
+ "OCTOSPI1",
+ "OCTOSPI2",
+ "SDMMC1",
+ "SDMMC2",
+ "SDMMC3",
+ "GPU",
+ "LTDC_CMN",
+ "DSI_CMN",
+ "RESERVED",
+ "RESERVED",
+ "LVDS",
+ "RESERVED",
+ "CSI",
+ "DCMIPP",
+ "DCMI_PSSI",
+ "VDEC",
+ "VENC",
+ "RESERVED",
+ "RNG",
+ "PKA",
+ "SAES",
+ "HASH",
+ "CRYP1",
+ "CRYP2",
+ "IWDG1",
+ "IWDG2",
+ "IWDG3",
+ "IWDG4",
+ "IWDG5",
+ "WWDG1",
+ "WWDG2",
+ "RESERVED",
+ "VREFBUF",
+ "DTS",
+ "RAMCFG",
+ "CRC",
+ "SERC",
+ "OCTOSPIM",
+ "GICV2M",
+ "RESERVED",
+ "I3C1",
+ "I3C2",
+ "I3C3",
+ "I3C4",
+ "ICACHE_DCACHE",
+ "LTDC_L0L1",
+ "LTDC_L2",
+ "LTDC_ROT",
+ "DSI_TRIG",
+ "DSI_RDFIFO",
+ "RESERVED",
+ "OTFDEC1",
+ "OTFDEC2",
+ "IAC",
+};
+struct rifsc_risup_debug_data {
+ char dev_name[15];
+ u8 dev_cid;
+ u8 dev_sem_cids;
+ u8 dev_id;
+ bool dev_cid_filt_en;
+ bool dev_sem_en;
+ bool dev_priv;
+ bool dev_sec;
+};
+
+struct rifsc_rimu_debug_data {
+ char m_name[11];
+ u8 m_cid;
+ bool cidsel;
+ bool m_sec;
+ bool m_priv;
+};
+
+struct rifsc_subreg_debug_data {
+ bool sr_sec;
+ bool sr_priv;
+ u8 sr_cid;
+ bool sr_rlock;
+ bool sr_enable;
+ u16 sr_start;
+ u16 sr_length;
+};
+
+struct stm32_rifsc_resources_names {
+ const char **device_names;
+ const char **initiator_names;
+};
+struct rifsc_dbg_private {
+ const struct stm32_rifsc_resources_names *res_names;
+ void __iomem *mmio;
+ unsigned int nb_risup;
+ unsigned int nb_rimu;
+ unsigned int nb_risal;
+};
+
+static const struct stm32_rifsc_resources_names rifsc_mp21_res_names = {
+ .device_names = stm32mp21_rifsc_risup_names,
+ .initiator_names = stm32mp21_rifsc_rimu_names,
+};
+
+static const struct stm32_rifsc_resources_names rifsc_mp25_res_names = {
+ .device_names = stm32mp25_rifsc_risup_names,
+ .initiator_names = stm32mp25_rifsc_rimu_names,
+};
+
+static void stm32_rifsc_fill_rimu_dbg_entry(struct rifsc_dbg_private *rifsc,
+ struct rifsc_rimu_debug_data *dbg_entry, int i)
+{
+ const struct stm32_rifsc_resources_names *dbg_names = rifsc->res_names;
+ u32 rimc_attr = readl_relaxed(rifsc->mmio + RIFSC_RIMC_ATTR0 + 0x4 * i);
+
+ snprintf(dbg_entry->m_name, sizeof(dbg_entry->m_name), "%s", dbg_names->initiator_names[i]);
+ dbg_entry->m_cid = FIELD_GET(RIFSC_RIMC_MCID_MASK, rimc_attr);
+ dbg_entry->cidsel = rimc_attr & RIFSC_RIMC_CIDSEL;
+ dbg_entry->m_sec = rimc_attr & RIFSC_RIMC_MSEC;
+ dbg_entry->m_priv = rimc_attr & RIFSC_RIMC_MPRIV;
+}
+
+static void stm32_rifsc_fill_dev_dbg_entry(struct rifsc_dbg_private *rifsc,
+ struct rifsc_risup_debug_data *dbg_entry, int i)
+{
+ const struct stm32_rifsc_resources_names *dbg_names = rifsc->res_names;
+ u32 cid_cfgr, sec_cfgr, priv_cfgr;
+ u8 reg_id = i / IDS_PER_RISC_SEC_PRIV_REGS;
+ u8 reg_offset = i % IDS_PER_RISC_SEC_PRIV_REGS;
+
+ cid_cfgr = readl_relaxed(rifsc->mmio + RIFSC_RISC_PER0_CIDCFGR + 0x8 * i);
+ sec_cfgr = readl_relaxed(rifsc->mmio + RIFSC_RISC_SECCFGR0 + 0x4 * reg_id);
+ priv_cfgr = readl_relaxed(rifsc->mmio + RIFSC_RISC_PRIVCFGR0 + 0x4 * reg_id);
+
+ snprintf(dbg_entry->dev_name, sizeof(dbg_entry->dev_name), "%s",
+ dbg_names->device_names[i]);
+ dbg_entry->dev_id = i;
+ dbg_entry->dev_cid_filt_en = cid_cfgr & CIDCFGR_CFEN;
+ dbg_entry->dev_sem_en = cid_cfgr & CIDCFGR_SEMEN;
+ dbg_entry->dev_cid = FIELD_GET(RIFSC_RISC_SCID_MASK, cid_cfgr);
+ dbg_entry->dev_sem_cids = FIELD_GET(RIFSC_RISC_SEMWL_MASK, cid_cfgr);
+ dbg_entry->dev_sec = sec_cfgr & BIT(reg_offset) ? true : false;
+ dbg_entry->dev_priv = priv_cfgr & BIT(reg_offset) ? true : false;
+}
+
+
+static void stm32_rifsc_fill_subreg_dbg_entry(struct rifsc_dbg_private *rifsc,
+ struct rifsc_subreg_debug_data *dbg_entry, int i,
+ int j)
+{
+ u32 risc_xcfgr = readl_relaxed(rifsc->mmio + RIFSC_RISC_REG0_ACFGR + 0x10 * i + 0x8 * j);
+ u32 risc_xaddr;
+
+ dbg_entry->sr_sec = risc_xcfgr & RIFSC_RISC_SRSEC;
+ dbg_entry->sr_priv = risc_xcfgr & RIFSC_RISC_SRPRIV;
+ dbg_entry->sr_cid = FIELD_GET(RIFSC_RISC_SRCID_MASK, risc_xcfgr);
+ dbg_entry->sr_rlock = risc_xcfgr & RIFSC_RISC_SRRLOCK;
+ dbg_entry->sr_enable = risc_xcfgr & RIFSC_RISC_SREN;
+ if (i == 2) {
+ risc_xaddr = readl_relaxed(rifsc->mmio + RIFSC_RISC_REG3_AADDR + 0x8 * j);
+ dbg_entry->sr_length = FIELD_GET(RIFSC_RISC_SRLENGTH_MASK, risc_xaddr);
+ dbg_entry->sr_start = FIELD_GET(RIFSC_RISC_SRSTART_MASK, risc_xaddr);
+ } else {
+ dbg_entry->sr_start = 0;
+ dbg_entry->sr_length = U16_MAX;
+ }
+}
+
+static int stm32_rifsc_conf_dump_show(struct seq_file *s, void *data)
+{
+ struct rifsc_dbg_private *rifsc = (struct rifsc_dbg_private *)s->private;
+ int i, j;
+
+ seq_puts(s, "\n=============================================\n");
+ seq_puts(s, " RIFSC dump\n");
+ seq_puts(s, "=============================================\n\n");
+
+ seq_puts(s, "\n=============================================\n");
+ seq_puts(s, " RISUP dump\n");
+ seq_puts(s, "=============================================\n");
+
+ seq_printf(s, "\n| %-15s |", "Peripheral name");
+ seq_puts(s, "| Firewall ID |");
+ seq_puts(s, "| N/SECURE |");
+ seq_puts(s, "| N/PRIVILEGED |");
+ seq_puts(s, "| CID filtering |");
+ seq_puts(s, "| Semaphore mode |");
+ seq_puts(s, "| SCID |");
+ seq_printf(s, "| %7s |\n", "SEMWL");
+
+ for (i = 0; i < RIFSC_RISUP_ENTRIES && i < rifsc->nb_risup; i++) {
+ struct rifsc_risup_debug_data d_dbg_entry;
+
+ stm32_rifsc_fill_dev_dbg_entry(rifsc, &d_dbg_entry, i);
+
+ seq_printf(s, "| %-15s |", d_dbg_entry.dev_name);
+ seq_printf(s, "| %-11d |", d_dbg_entry.dev_id);
+ seq_printf(s, "| %-8s |", d_dbg_entry.dev_sec ? "SEC" : "NSEC");
+ seq_printf(s, "| %-12s |", d_dbg_entry.dev_priv ? "PRIV" : "NPRIV");
+ seq_printf(s, "| %-13s |", str_enabled_disabled(d_dbg_entry.dev_cid_filt_en));
+ seq_printf(s, "| %-14s |", str_enabled_disabled(d_dbg_entry.dev_sem_en));
+ seq_printf(s, "| %-4d |", d_dbg_entry.dev_cid);
+ seq_printf(s, "| %#-7x |\n", d_dbg_entry.dev_sem_cids);
+ }
+
+ seq_puts(s, "\n=============================================\n");
+ seq_puts(s, " RIMU dump\n");
+ seq_puts(s, "=============================================\n");
+
+ seq_puts(s, "| RIMU's name |");
+ seq_puts(s, "| CIDSEL |");
+ seq_puts(s, "| MCID |");
+ seq_puts(s, "| N/SECURE |");
+ seq_puts(s, "| N/PRIVILEGED |\n");
+
+ for (i = 0; i < RIFSC_RIMU_ENTRIES && rifsc->nb_rimu; i++) {
+ struct rifsc_rimu_debug_data m_dbg_entry;
+
+ stm32_rifsc_fill_rimu_dbg_entry(rifsc, &m_dbg_entry, i);
+
+ seq_printf(s, "| %-11s |", m_dbg_entry.m_name);
+ seq_printf(s, "| %-6s |", m_dbg_entry.cidsel ? "CIDSEL" : "");
+ seq_printf(s, "| %-4d |", m_dbg_entry.m_cid);
+ seq_printf(s, "| %-8s |", m_dbg_entry.m_sec ? "SEC" : "NSEC");
+ seq_printf(s, "| %-12s |\n", m_dbg_entry.m_priv ? "PRIV" : "NPRIV");
+ }
+
+ if (rifsc->nb_risal > 0) {
+ seq_puts(s, "\n=============================================\n");
+ seq_puts(s, " RISAL dump\n");
+ seq_puts(s, "=============================================\n");
+
+ seq_puts(s, "| Memory |");
+ seq_puts(s, "| Subreg. |");
+ seq_puts(s, "| N/SECURE |");
+ seq_puts(s, "| N/PRIVILEGED |");
+ seq_puts(s, "| Subreg. CID |");
+ seq_puts(s, "| Resource lock |");
+ seq_puts(s, "| Subreg. enable |");
+ seq_puts(s, "| Subreg. start |");
+ seq_puts(s, "| Subreg. end |\n");
+
+ for (i = 0; i < rifsc->nb_risal; i++) {
+ for (j = 0; j < RIFSC_RISAL_SUBREGIONS; j++) {
+ struct rifsc_subreg_debug_data sr_dbg_entry;
+
+ stm32_rifsc_fill_subreg_dbg_entry(rifsc, &sr_dbg_entry, i, j);
+
+ seq_printf(s, "| LPSRAM%1d |", i + 1);
+ seq_printf(s, "| %1s |", (j == 0) ? "A" : "B");
+ seq_printf(s, "| %-8s |", sr_dbg_entry.sr_sec ? "SEC" : "NSEC");
+ seq_printf(s, "| %-12s |", sr_dbg_entry.sr_priv ? "PRIV" : "NPRIV");
+ seq_printf(s, "| 0x%-9x |", sr_dbg_entry.sr_cid);
+ seq_printf(s, "| %-13s |",
+ sr_dbg_entry.sr_rlock ? "locked (1)" : "unlocked (0)");
+ seq_printf(s, "| %-14s |",
+ str_enabled_disabled(sr_dbg_entry.sr_enable));
+ seq_printf(s, "| 0x%-11x |", sr_dbg_entry.sr_start);
+ seq_printf(s, "| 0x%-11x |\n", sr_dbg_entry.sr_start +
+ sr_dbg_entry.sr_length - 1);
+ }
+ }
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(stm32_rifsc_conf_dump);
+
+static int stm32_rifsc_register_debugfs(struct stm32_firewall_controller *rifsc_controller,
+ u32 nb_risup, u32 nb_rimu, u32 nb_risal)
+{
+ struct rifsc_dbg_private *rifsc_priv;
+ struct dentry *root = NULL;
+
+ rifsc_priv = devm_kzalloc(rifsc_controller->dev, sizeof(*rifsc_priv), GFP_KERNEL);
+ if (!rifsc_priv)
+ return -ENOMEM;
+
+ rifsc_priv->mmio = rifsc_controller->mmio;
+ rifsc_priv->nb_risup = nb_risup;
+ rifsc_priv->nb_rimu = nb_rimu;
+ rifsc_priv->nb_risal = nb_risal;
+ rifsc_priv->res_names = of_device_get_match_data(rifsc_controller->dev);
+
+ root = debugfs_lookup("stm32_firewall", NULL);
+ if (!root)
+ root = debugfs_create_dir("stm32_firewall", NULL);
+
+ if (IS_ERR(root))
+ return PTR_ERR(root);
+
+ debugfs_create_file("rifsc", 0444, root, rifsc_priv, &stm32_rifsc_conf_dump_fops);
+
+ return 0;
+}
+#endif /* defined(CONFIG_DEBUG_FS) */
+
static bool stm32_rifsc_is_semaphore_available(void __iomem *addr)
{
return !(readl(addr) & SEMCR_MUTEX);
@@ -207,9 +769,19 @@ static int stm32_rifsc_probe(struct platform_device *pdev)
rifsc_controller->release_access = stm32_rifsc_release_access;
/* Get number of RIFSC entries*/
- nb_risup = readl(rifsc_controller->mmio + RIFSC_RISC_HWCFGR2) & HWCFGR2_CONF1_MASK;
- nb_rimu = readl(rifsc_controller->mmio + RIFSC_RISC_HWCFGR2) & HWCFGR2_CONF2_MASK;
- nb_risal = readl(rifsc_controller->mmio + RIFSC_RISC_HWCFGR2) & HWCFGR2_CONF3_MASK;
+ nb_risup = FIELD_GET(HWCFGR2_CONF1_MASK,
+ readl(rifsc_controller->mmio + RIFSC_RISC_HWCFGR2));
+ nb_rimu = FIELD_GET(HWCFGR2_CONF2_MASK,
+ readl(rifsc_controller->mmio + RIFSC_RISC_HWCFGR2));
+ nb_risal = FIELD_GET(HWCFGR2_CONF3_MASK,
+ readl(rifsc_controller->mmio + RIFSC_RISC_HWCFGR2));
+ /*
+ * On STM32MP21, RIFSC_RISC_HWCFGR2 shows an incorrect number of RISAL (NUM_RISAL is 3
+ * instead of 0). A software workaround is implemented using the st,mem-map property in the
+ * device tree. This property is absent or left empty if there is no RISAL.
+ */
+ if (of_device_is_compatible(np, "st,stm32mp21-rifsc"))
+ nb_risal = 0;
rifsc_controller->max_entries = nb_risup + nb_rimu + nb_risal;
platform_set_drvdata(pdev, rifsc_controller);
@@ -228,12 +800,29 @@ static int stm32_rifsc_probe(struct platform_device *pdev)
return rc;
}
+#if defined(CONFIG_DEBUG_FS)
+ rc = stm32_rifsc_register_debugfs(rifsc_controller, nb_risup, nb_rimu, nb_risal);
+ if (rc)
+ return dev_err_probe(rifsc_controller->dev, rc, "Failed creating debugfs entry\n");
+#endif
+
/* Populate all allowed nodes */
return of_platform_populate(np, NULL, NULL, &pdev->dev);
}
static const struct of_device_id stm32_rifsc_of_match[] = {
- { .compatible = "st,stm32mp25-rifsc" },
+ {
+ .compatible = "st,stm32mp25-rifsc",
+#if defined(CONFIG_DEBUG_FS)
+ .data = &rifsc_mp25_res_names,
+#endif
+ },
+ {
+ .compatible = "st,stm32mp21-rifsc",
+#if defined(CONFIG_DEBUG_FS)
+ .data = &rifsc_mp21_res_names,
+#endif
+ },
{}
};
MODULE_DEVICE_TABLE(of, stm32_rifsc_of_match);
diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c
index 7a33c3b31d1e..82735c58be11 100644
--- a/drivers/bus/sunxi-rsb.c
+++ b/drivers/bus/sunxi-rsb.c
@@ -373,7 +373,6 @@ static int sunxi_rsb_read(struct sunxi_rsb *rsb, u8 rtaddr, u8 addr,
unlock:
mutex_unlock(&rsb->lock);
- pm_runtime_mark_last_busy(rsb->dev);
pm_runtime_put_autosuspend(rsb->dev);
return ret;
@@ -417,7 +416,6 @@ static int sunxi_rsb_write(struct sunxi_rsb *rsb, u8 rtaddr, u8 addr,
mutex_unlock(&rsb->lock);
- pm_runtime_mark_last_busy(rsb->dev);
pm_runtime_put_autosuspend(rsb->dev);
return ret;
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index 9f624e5da991..610354ce7f8f 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -48,6 +48,7 @@ enum sysc_soc {
SOC_UNKNOWN,
SOC_2420,
SOC_2430,
+ SOC_AM33,
SOC_3430,
SOC_AM35,
SOC_3630,
@@ -2170,9 +2171,8 @@ static int sysc_reset(struct sysc *ddata)
static int sysc_init_module(struct sysc *ddata)
{
bool rstctrl_deasserted = false;
- int error = 0;
+ int error = sysc_clockdomain_init(ddata);
- error = sysc_clockdomain_init(ddata);
if (error)
return error;
@@ -2913,6 +2913,7 @@ static void ti_sysc_idle(struct work_struct *work)
static const struct soc_device_attribute sysc_soc_match[] = {
SOC_FLAG("OMAP242*", SOC_2420),
SOC_FLAG("OMAP243*", SOC_2430),
+ SOC_FLAG("AM33*", SOC_AM33),
SOC_FLAG("AM35*", SOC_AM35),
SOC_FLAG("OMAP3[45]*", SOC_3430),
SOC_FLAG("OMAP3[67]*", SOC_3630),
@@ -3118,10 +3119,15 @@ static int sysc_check_active_timer(struct sysc *ddata)
* can be dropped if we stop supporting old beagleboard revisions
* A to B4 at some point.
*/
- if (sysc_soc->soc == SOC_3430 || sysc_soc->soc == SOC_AM35)
+ switch (sysc_soc->soc) {
+ case SOC_AM33:
+ case SOC_3430:
+ case SOC_AM35:
error = -ENXIO;
- else
+ break;
+ default:
error = -EBUSY;
+ }
if ((ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT) &&
(ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE))
diff --git a/drivers/cache/Kconfig b/drivers/cache/Kconfig
index db51386c663a..1518449d47b5 100644
--- a/drivers/cache/Kconfig
+++ b/drivers/cache/Kconfig
@@ -1,9 +1,17 @@
# SPDX-License-Identifier: GPL-2.0
-menu "Cache Drivers"
+
+menuconfig CACHEMAINT_FOR_DMA
+ bool "Cache management for noncoherent DMA"
+ depends on RISCV
+ default y
+ help
+ These drivers implement support for noncoherent DMA master devices
+ on platforms that lack the standard CPU interfaces for this.
+
+if CACHEMAINT_FOR_DMA
config AX45MP_L2_CACHE
bool "Andes Technology AX45MP L2 Cache controller"
- depends on RISCV
select RISCV_NONSTANDARD_CACHE_OPS
help
Support for the L2 cache controller on Andes Technology AX45MP platforms.
@@ -16,7 +24,6 @@ config SIFIVE_CCACHE
config STARFIVE_STARLINK_CACHE
bool "StarFive StarLink Cache controller"
- depends on RISCV
depends on ARCH_STARFIVE
depends on 64BIT
select RISCV_DMA_NONCOHERENT
@@ -24,4 +31,26 @@ config STARFIVE_STARLINK_CACHE
help
Support for the StarLink cache controller IP from StarFive.
-endmenu
+endif #CACHEMAINT_FOR_DMA
+
+menuconfig CACHEMAINT_FOR_HOTPLUG
+ bool "Cache management for memory hot plug like operations"
+ depends on GENERIC_CPU_CACHE_MAINTENANCE
+ help
+ These drivers implement cache management for flows where it is necessary
+ to flush data from all host caches.
+
+if CACHEMAINT_FOR_HOTPLUG
+
+config HISI_SOC_HHA
+ tristate "HiSilicon Hydra Home Agent (HHA) device driver"
+ depends on (ARM64 && ACPI) || COMPILE_TEST
+ help
+ The Hydra Home Agent (HHA) is responsible for cache coherency
+ on the SoC. This drivers enables the cache maintenance functions of
+ the HHA.
+
+ This driver can be built as a module. If so, the module will be
+ called hisi_soc_hha.
+
+endif #CACHEMAINT_FOR_HOTPLUG
diff --git a/drivers/cache/Makefile b/drivers/cache/Makefile
index 55c5e851034d..b3362b15d6c1 100644
--- a/drivers/cache/Makefile
+++ b/drivers/cache/Makefile
@@ -3,3 +3,5 @@
obj-$(CONFIG_AX45MP_L2_CACHE) += ax45mp_cache.o
obj-$(CONFIG_SIFIVE_CCACHE) += sifive_ccache.o
obj-$(CONFIG_STARFIVE_STARLINK_CACHE) += starfive_starlink_cache.o
+
+obj-$(CONFIG_HISI_SOC_HHA) += hisi_soc_hha.o
diff --git a/drivers/cache/hisi_soc_hha.c b/drivers/cache/hisi_soc_hha.c
new file mode 100644
index 000000000000..25ff0f5ae79b
--- /dev/null
+++ b/drivers/cache/hisi_soc_hha.c
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for HiSilicon Hydra Home Agent (HHA).
+ *
+ * Copyright (c) 2025 HiSilicon Technologies Co., Ltd.
+ * Author: Yicong Yang <yangyicong@hisilicon.com>
+ * Yushan Wang <wangyushan12@huawei.com>
+ *
+ * A system typically contains multiple HHAs. Each is responsible for a subset
+ * of the physical addresses in the system, but interleave can make the mapping
+ * from a particular cache line to a responsible HHA complex. As such no
+ * filtering is done in the driver, with the hardware being responsible for
+ * responding with success for even if it was not responsible for any addresses
+ * in the range on which the operation was requested.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/cache_coherency.h>
+#include <linux/dev_printk.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/memregion.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+
+#define HISI_HHA_CTRL 0x5004
+#define HISI_HHA_CTRL_EN BIT(0)
+#define HISI_HHA_CTRL_RANGE BIT(1)
+#define HISI_HHA_CTRL_TYPE GENMASK(3, 2)
+#define HISI_HHA_START_L 0x5008
+#define HISI_HHA_START_H 0x500c
+#define HISI_HHA_LEN_L 0x5010
+#define HISI_HHA_LEN_H 0x5014
+
+/* The maintain operation performs in a 128 Byte granularity */
+#define HISI_HHA_MAINT_ALIGN 128
+
+#define HISI_HHA_POLL_GAP_US 10
+#define HISI_HHA_POLL_TIMEOUT_US 50000
+
+struct hisi_soc_hha {
+ /* Must be first element */
+ struct cache_coherency_ops_inst cci;
+ /* Locks HHA instance to forbid overlapping access. */
+ struct mutex lock;
+ void __iomem *base;
+};
+
+static bool hisi_hha_cache_maintain_wait_finished(struct hisi_soc_hha *soc_hha)
+{
+ u32 val;
+
+ return !readl_poll_timeout_atomic(soc_hha->base + HISI_HHA_CTRL, val,
+ !(val & HISI_HHA_CTRL_EN),
+ HISI_HHA_POLL_GAP_US,
+ HISI_HHA_POLL_TIMEOUT_US);
+}
+
+static int hisi_soc_hha_wbinv(struct cache_coherency_ops_inst *cci,
+ struct cc_inval_params *invp)
+{
+ struct hisi_soc_hha *soc_hha =
+ container_of(cci, struct hisi_soc_hha, cci);
+ phys_addr_t top, addr = invp->addr;
+ size_t size = invp->size;
+ u32 reg;
+
+ if (!size)
+ return -EINVAL;
+
+ addr = ALIGN_DOWN(addr, HISI_HHA_MAINT_ALIGN);
+ top = ALIGN(addr + size, HISI_HHA_MAINT_ALIGN);
+ size = top - addr;
+
+ guard(mutex)(&soc_hha->lock);
+
+ if (!hisi_hha_cache_maintain_wait_finished(soc_hha))
+ return -EBUSY;
+
+ /*
+ * Hardware will search for addresses ranging [addr, addr + size - 1],
+ * last byte included, and perform maintenance in 128 byte granules
+ * on those cachelines which contain the addresses. If a given instance
+ * is either not responsible for a cacheline or that cacheline is not
+ * currently present then the search will fail, no operation will be
+ * necessary and the device will report success.
+ */
+ size -= 1;
+
+ writel(lower_32_bits(addr), soc_hha->base + HISI_HHA_START_L);
+ writel(upper_32_bits(addr), soc_hha->base + HISI_HHA_START_H);
+ writel(lower_32_bits(size), soc_hha->base + HISI_HHA_LEN_L);
+ writel(upper_32_bits(size), soc_hha->base + HISI_HHA_LEN_H);
+
+ reg = FIELD_PREP(HISI_HHA_CTRL_TYPE, 1); /* Clean Invalid */
+ reg |= HISI_HHA_CTRL_RANGE | HISI_HHA_CTRL_EN;
+ writel(reg, soc_hha->base + HISI_HHA_CTRL);
+
+ return 0;
+}
+
+static int hisi_soc_hha_done(struct cache_coherency_ops_inst *cci)
+{
+ struct hisi_soc_hha *soc_hha =
+ container_of(cci, struct hisi_soc_hha, cci);
+
+ guard(mutex)(&soc_hha->lock);
+ if (!hisi_hha_cache_maintain_wait_finished(soc_hha))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static const struct cache_coherency_ops hha_ops = {
+ .wbinv = hisi_soc_hha_wbinv,
+ .done = hisi_soc_hha_done,
+};
+
+static int hisi_soc_hha_probe(struct platform_device *pdev)
+{
+ struct hisi_soc_hha *soc_hha;
+ struct resource *mem;
+ int ret;
+
+ soc_hha = cache_coherency_ops_instance_alloc(&hha_ops,
+ struct hisi_soc_hha, cci);
+ if (!soc_hha)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, soc_hha);
+
+ mutex_init(&soc_hha->lock);
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem) {
+ ret = -ENOMEM;
+ goto err_free_cci;
+ }
+
+ soc_hha->base = ioremap(mem->start, resource_size(mem));
+ if (!soc_hha->base) {
+ ret = dev_err_probe(&pdev->dev, -ENOMEM,
+ "failed to remap io memory");
+ goto err_free_cci;
+ }
+
+ ret = cache_coherency_ops_instance_register(&soc_hha->cci);
+ if (ret)
+ goto err_iounmap;
+
+ return 0;
+
+err_iounmap:
+ iounmap(soc_hha->base);
+err_free_cci:
+ cache_coherency_ops_instance_put(&soc_hha->cci);
+ return ret;
+}
+
+static void hisi_soc_hha_remove(struct platform_device *pdev)
+{
+ struct hisi_soc_hha *soc_hha = platform_get_drvdata(pdev);
+
+ cache_coherency_ops_instance_unregister(&soc_hha->cci);
+ iounmap(soc_hha->base);
+ cache_coherency_ops_instance_put(&soc_hha->cci);
+}
+
+static const struct acpi_device_id hisi_soc_hha_ids[] = {
+ { "HISI0511", },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, hisi_soc_hha_ids);
+
+static struct platform_driver hisi_soc_hha_driver = {
+ .driver = {
+ .name = "hisi_soc_hha",
+ .acpi_match_table = hisi_soc_hha_ids,
+ },
+ .probe = hisi_soc_hha_probe,
+ .remove = hisi_soc_hha_remove,
+};
+
+module_platform_driver(hisi_soc_hha_driver);
+
+MODULE_IMPORT_NS("CACHE_COHERENCY");
+MODULE_DESCRIPTION("HiSilicon Hydra Home Agent driver supporting cache maintenance");
+MODULE_AUTHOR("Yicong Yang <yangyicong@hisilicon.com>");
+MODULE_AUTHOR("Yushan Wang <wangyushan12@huawei.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cache/sifive_ccache.c b/drivers/cache/sifive_ccache.c
index e1a283805ea7..a86800b123b9 100644
--- a/drivers/cache/sifive_ccache.c
+++ b/drivers/cache/sifive_ccache.c
@@ -151,16 +151,16 @@ static void ccache_flush_range(phys_addr_t start, size_t len)
if (!len)
return;
- mb();
+ mb(); /* complete earlier memory accesses before the cache flush */
for (line = ALIGN_DOWN(start, SIFIVE_CCACHE_LINE_SIZE); line < end;
line += SIFIVE_CCACHE_LINE_SIZE) {
#ifdef CONFIG_32BIT
- writel(line >> 4, ccache_base + SIFIVE_CCACHE_FLUSH32);
+ writel_relaxed(line >> 4, ccache_base + SIFIVE_CCACHE_FLUSH32);
#else
- writeq(line, ccache_base + SIFIVE_CCACHE_FLUSH64);
+ writeq_relaxed(line, ccache_base + SIFIVE_CCACHE_FLUSH64);
#endif
- mb();
}
+ mb(); /* issue later memory accesses after the cache flush */
}
static const struct riscv_nonstd_cache_ops ccache_mgmt_ops __initconst = {
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 21a10552da61..31ba1f8c1f78 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -624,9 +624,6 @@ int register_cdrom(struct gendisk *disk, struct cdrom_device_info *cdi)
if (check_media_type == 1)
cdi->options |= (int) CDO_CHECK_TYPE;
- if (CDROM_CAN(CDC_MRW_W))
- cdi->exit = cdrom_mrw_exit;
-
if (cdi->ops->read_cdda_bpc)
cdi->cdda_method = CDDA_BPC_FULL;
else
@@ -651,9 +648,6 @@ void unregister_cdrom(struct cdrom_device_info *cdi)
list_del(&cdi->list);
mutex_unlock(&cdrom_mutex);
- if (cdi->exit)
- cdi->exit(cdi);
-
cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name);
}
EXPORT_SYMBOL(unregister_cdrom);
@@ -1264,6 +1258,8 @@ void cdrom_release(struct cdrom_device_info *cdi)
cd_dbg(CD_CLOSE, "Use count for \"/dev/%s\" now zero\n",
cdi->name);
cdrom_dvd_rw_close_write(cdi);
+ if (CDROM_CAN(CDC_MRW_W))
+ cdrom_mrw_exit(cdi);
if ((cdo->capability & CDC_LOCK) && !cdi->keeplocked) {
cd_dbg(CD_CLOSE, "Unlocking door!\n");
diff --git a/drivers/cdx/Kconfig b/drivers/cdx/Kconfig
index a08958485e31..1f1e360507d7 100644
--- a/drivers/cdx/Kconfig
+++ b/drivers/cdx/Kconfig
@@ -7,7 +7,7 @@
config CDX_BUS
bool "CDX Bus driver"
- depends on OF && ARM64
+ depends on OF && ARM64 || COMPILE_TEST
help
Driver to enable Composable DMA Transfer(CDX) Bus. CDX bus
exposes Fabric devices which uses composable DMA IP to the
diff --git a/drivers/cdx/cdx.c b/drivers/cdx/cdx.c
index 092306ca2541..b39af2f1937f 100644
--- a/drivers/cdx/cdx.c
+++ b/drivers/cdx/cdx.c
@@ -170,7 +170,7 @@ static int cdx_unregister_device(struct device *dev,
return 0;
}
-static void cdx_unregister_devices(struct bus_type *bus)
+static void cdx_unregister_devices(const struct bus_type *bus)
{
/* Reset all the devices attached to cdx bus */
bus_for_each_dev(bus, NULL, NULL, cdx_unregister_device);
@@ -310,7 +310,7 @@ static int cdx_probe(struct device *dev)
* Setup MSI device data so that generic MSI alloc/free can
* be used by the device driver.
*/
- if (cdx->msi_domain) {
+ if (IS_ENABLED(CONFIG_GENERIC_MSI_IRQ) && cdx->msi_domain) {
error = msi_setup_device_data(&cdx_dev->dev);
if (error)
return error;
@@ -651,7 +651,7 @@ static struct attribute *cdx_bus_attrs[] = {
};
ATTRIBUTE_GROUPS(cdx_bus);
-struct bus_type cdx_bus_type = {
+const struct bus_type cdx_bus_type = {
.name = "cdx",
.match = cdx_bus_match,
.probe = cdx_probe,
@@ -833,7 +833,7 @@ int cdx_device_add(struct cdx_dev_params *dev_params)
((cdx->id << CDX_CONTROLLER_ID_SHIFT) | (cdx_dev->bus_num & CDX_BUS_NUM_MASK)),
cdx_dev->dev_num);
- if (cdx->msi_domain) {
+ if (IS_ENABLED(CONFIG_GENERIC_MSI_IRQ) && cdx->msi_domain) {
cdx_dev->num_msi = dev_params->num_msi;
dev_set_msi_domain(&cdx_dev->dev, cdx->msi_domain);
}
diff --git a/drivers/cdx/cdx_msi.c b/drivers/cdx/cdx_msi.c
index 3388a5d1462c..91b95422b263 100644
--- a/drivers/cdx/cdx_msi.c
+++ b/drivers/cdx/cdx_msi.c
@@ -174,6 +174,7 @@ struct irq_domain *cdx_msi_domain_init(struct device *dev)
}
parent = irq_find_matching_fwnode(of_fwnode_handle(parent_node), DOMAIN_BUS_NEXUS);
+ of_node_put(parent_node);
if (!parent || !msi_get_domain_info(parent)) {
dev_err(dev, "unable to locate ITS domain\n");
return NULL;
diff --git a/drivers/cdx/controller/Kconfig b/drivers/cdx/controller/Kconfig
index f8e729761aee..a480b62cbd1f 100644
--- a/drivers/cdx/controller/Kconfig
+++ b/drivers/cdx/controller/Kconfig
@@ -9,7 +9,7 @@ if CDX_BUS
config CDX_CONTROLLER
tristate "CDX bus controller"
- select GENERIC_MSI_IRQ
+ depends on HAS_DMA
select REMOTEPROC
select RPMSG
help
diff --git a/drivers/cdx/controller/bitfield.h b/drivers/cdx/controller/bitfield.h
deleted file mode 100644
index 567f8ec47582..000000000000
--- a/drivers/cdx/controller/bitfield.h
+++ /dev/null
@@ -1,90 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
- * Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2013 Solarflare Communications Inc.
- * Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
- */
-
-#ifndef CDX_BITFIELD_H
-#define CDX_BITFIELD_H
-
-#include <linux/bitfield.h>
-
-/* Lowest bit numbers and widths */
-#define CDX_DWORD_LBN 0
-#define CDX_DWORD_WIDTH 32
-
-/* Specified attribute (e.g. LBN) of the specified field */
-#define CDX_VAL(field, attribute) field ## _ ## attribute
-/* Low bit number of the specified field */
-#define CDX_LOW_BIT(field) CDX_VAL(field, LBN)
-/* Bit width of the specified field */
-#define CDX_WIDTH(field) CDX_VAL(field, WIDTH)
-/* High bit number of the specified field */
-#define CDX_HIGH_BIT(field) (CDX_LOW_BIT(field) + CDX_WIDTH(field) - 1)
-
-/* A doubleword (i.e. 4 byte) datatype - little-endian in HW */
-struct cdx_dword {
- __le32 cdx_u32;
-};
-
-/* Value expanders for printk */
-#define CDX_DWORD_VAL(dword) \
- ((unsigned int)le32_to_cpu((dword).cdx_u32))
-
-/*
- * Extract bit field portion [low,high) from the 32-bit little-endian
- * element which contains bits [min,max)
- */
-#define CDX_DWORD_FIELD(dword, field) \
- (FIELD_GET(GENMASK(CDX_HIGH_BIT(field), CDX_LOW_BIT(field)), \
- le32_to_cpu((dword).cdx_u32)))
-
-/*
- * Creates the portion of the named bit field that lies within the
- * range [min,max).
- */
-#define CDX_INSERT_FIELD(field, value) \
- (FIELD_PREP(GENMASK(CDX_HIGH_BIT(field), \
- CDX_LOW_BIT(field)), value))
-
-/*
- * Creates the portion of the named bit fields that lie within the
- * range [min,max).
- */
-#define CDX_INSERT_FIELDS(field1, value1, \
- field2, value2, \
- field3, value3, \
- field4, value4, \
- field5, value5, \
- field6, value6, \
- field7, value7) \
- (CDX_INSERT_FIELD(field1, (value1)) | \
- CDX_INSERT_FIELD(field2, (value2)) | \
- CDX_INSERT_FIELD(field3, (value3)) | \
- CDX_INSERT_FIELD(field4, (value4)) | \
- CDX_INSERT_FIELD(field5, (value5)) | \
- CDX_INSERT_FIELD(field6, (value6)) | \
- CDX_INSERT_FIELD(field7, (value7)))
-
-#define CDX_POPULATE_DWORD(dword, ...) \
- (dword).cdx_u32 = cpu_to_le32(CDX_INSERT_FIELDS(__VA_ARGS__))
-
-/* Populate a dword field with various numbers of arguments */
-#define CDX_POPULATE_DWORD_7 CDX_POPULATE_DWORD
-#define CDX_POPULATE_DWORD_6(dword, ...) \
- CDX_POPULATE_DWORD_7(dword, CDX_DWORD, 0, __VA_ARGS__)
-#define CDX_POPULATE_DWORD_5(dword, ...) \
- CDX_POPULATE_DWORD_6(dword, CDX_DWORD, 0, __VA_ARGS__)
-#define CDX_POPULATE_DWORD_4(dword, ...) \
- CDX_POPULATE_DWORD_5(dword, CDX_DWORD, 0, __VA_ARGS__)
-#define CDX_POPULATE_DWORD_3(dword, ...) \
- CDX_POPULATE_DWORD_4(dword, CDX_DWORD, 0, __VA_ARGS__)
-#define CDX_POPULATE_DWORD_2(dword, ...) \
- CDX_POPULATE_DWORD_3(dword, CDX_DWORD, 0, __VA_ARGS__)
-#define CDX_POPULATE_DWORD_1(dword, ...) \
- CDX_POPULATE_DWORD_2(dword, CDX_DWORD, 0, __VA_ARGS__)
-#define CDX_SET_DWORD(dword) \
- CDX_POPULATE_DWORD_1(dword, CDX_DWORD, 0xffffffff)
-
-#endif /* CDX_BITFIELD_H */
diff --git a/drivers/cdx/controller/cdx_controller.c b/drivers/cdx/controller/cdx_controller.c
index d623f9c7517a..280f207735da 100644
--- a/drivers/cdx/controller/cdx_controller.c
+++ b/drivers/cdx/controller/cdx_controller.c
@@ -14,7 +14,7 @@
#include "cdx_controller.h"
#include "../cdx.h"
#include "mcdi_functions.h"
-#include "mcdi.h"
+#include "mcdid.h"
static unsigned int cdx_mcdi_rpc_timeout(struct cdx_mcdi *cdx, unsigned int cmd)
{
@@ -193,21 +193,19 @@ static int xlnx_cdx_probe(struct platform_device *pdev)
cdx->ops = &cdx_ops;
/* Create MSI domain */
- cdx->msi_domain = cdx_msi_domain_init(&pdev->dev);
+ if (IS_ENABLED(CONFIG_GENERIC_MSI_IRQ))
+ cdx->msi_domain = cdx_msi_domain_init(&pdev->dev);
if (!cdx->msi_domain) {
- dev_err(&pdev->dev, "cdx_msi_domain_init() failed");
- ret = -ENODEV;
+ ret = dev_err_probe(&pdev->dev, -ENODEV, "cdx_msi_domain_init() failed");
goto cdx_msi_fail;
}
ret = cdx_setup_rpmsg(pdev);
if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Failed to register CDX RPMsg transport\n");
+ dev_err_probe(&pdev->dev, ret, "Failed to register CDX RPMsg transport\n");
goto cdx_rpmsg_fail;
}
- dev_info(&pdev->dev, "Successfully registered CDX controller with RPMsg as transport\n");
return 0;
cdx_rpmsg_fail:
@@ -246,31 +244,13 @@ MODULE_DEVICE_TABLE(of, cdx_match_table);
static struct platform_driver cdx_pdriver = {
.driver = {
.name = "cdx-controller",
- .pm = NULL,
.of_match_table = cdx_match_table,
},
.probe = xlnx_cdx_probe,
.remove = xlnx_cdx_remove,
};
-static int __init cdx_controller_init(void)
-{
- int ret;
-
- ret = platform_driver_register(&cdx_pdriver);
- if (ret)
- pr_err("platform_driver_register() failed: %d\n", ret);
-
- return ret;
-}
-
-static void __exit cdx_controller_exit(void)
-{
- platform_driver_unregister(&cdx_pdriver);
-}
-
-module_init(cdx_controller_init);
-module_exit(cdx_controller_exit);
+module_platform_driver(cdx_pdriver);
MODULE_AUTHOR("AMD Inc.");
MODULE_DESCRIPTION("CDX controller for AMD devices");
diff --git a/drivers/cdx/controller/cdx_rpmsg.c b/drivers/cdx/controller/cdx_rpmsg.c
index 04b578a0be17..59aabd99fa8f 100644
--- a/drivers/cdx/controller/cdx_rpmsg.c
+++ b/drivers/cdx/controller/cdx_rpmsg.c
@@ -15,7 +15,7 @@
#include "../cdx.h"
#include "cdx_controller.h"
#include "mcdi_functions.h"
-#include "mcdi.h"
+#include "mcdid.h"
static struct rpmsg_device_id cdx_rpmsg_id_table[] = {
{ .name = "mcdi_ipc" },
@@ -129,8 +129,7 @@ static int cdx_rpmsg_probe(struct rpmsg_device *rpdev)
chinfo.src = RPMSG_ADDR_ANY;
chinfo.dst = rpdev->dst;
- strscpy(chinfo.name, cdx_rpmsg_id_table[0].name,
- strlen(cdx_rpmsg_id_table[0].name));
+ strscpy(chinfo.name, cdx_rpmsg_id_table[0].name, sizeof(chinfo.name));
cdx_mcdi->ept = rpmsg_create_ept(rpdev, cdx_rpmsg_cb, NULL, chinfo);
if (!cdx_mcdi->ept) {
diff --git a/drivers/cdx/controller/mcdi.c b/drivers/cdx/controller/mcdi.c
index e760f8d347cc..2e82ffc18d89 100644
--- a/drivers/cdx/controller/mcdi.c
+++ b/drivers/cdx/controller/mcdi.c
@@ -23,9 +23,10 @@
#include <linux/log2.h>
#include <linux/net_tstamp.h>
#include <linux/wait.h>
+#include <linux/cdx/bitfield.h>
-#include "bitfield.h"
-#include "mcdi.h"
+#include <linux/cdx/mcdi.h>
+#include "mcdid.h"
static void cdx_mcdi_cancel_cmd(struct cdx_mcdi *cdx, struct cdx_mcdi_cmd *cmd);
static void cdx_mcdi_wait_for_cleanup(struct cdx_mcdi *cdx);
@@ -99,6 +100,19 @@ static unsigned long cdx_mcdi_rpc_timeout(struct cdx_mcdi *cdx, unsigned int cmd
return cdx->mcdi_ops->mcdi_rpc_timeout(cdx, cmd);
}
+/**
+ * cdx_mcdi_init - Initialize MCDI (Management Controller Driver Interface) state
+ * @cdx: Handle to the CDX MCDI structure
+ *
+ * This function allocates and initializes internal MCDI structures and resources
+ * for the CDX device, including the workqueue, locking primitives, and command
+ * tracking mechanisms. It sets the initial operating mode and prepares the device
+ * for MCDI operations.
+ *
+ * Return:
+ * * 0 - on success
+ * * -ENOMEM - if memory allocation or workqueue creation fails
+ */
int cdx_mcdi_init(struct cdx_mcdi *cdx)
{
struct cdx_mcdi_iface *mcdi;
@@ -128,7 +142,16 @@ fail2:
fail:
return rc;
}
+EXPORT_SYMBOL_GPL(cdx_mcdi_init);
+/**
+ * cdx_mcdi_finish - Cleanup MCDI (Management Controller Driver Interface) state
+ * @cdx: Handle to the CDX MCDI structure
+ *
+ * This function is responsible for cleaning up the MCDI (Management Controller Driver Interface)
+ * resources associated with a cdx_mcdi structure. Also destroys the mcdi workqueue.
+ *
+ */
void cdx_mcdi_finish(struct cdx_mcdi *cdx)
{
struct cdx_mcdi_iface *mcdi;
@@ -143,6 +166,7 @@ void cdx_mcdi_finish(struct cdx_mcdi *cdx)
kfree(cdx->mcdi);
cdx->mcdi = NULL;
}
+EXPORT_SYMBOL_GPL(cdx_mcdi_finish);
static bool cdx_mcdi_flushed(struct cdx_mcdi_iface *mcdi, bool ignore_cleanups)
{
@@ -553,6 +577,19 @@ static void cdx_mcdi_start_or_queue(struct cdx_mcdi_iface *mcdi,
cdx_mcdi_cmd_start_or_queue(mcdi, cmd);
}
+/**
+ * cdx_mcdi_process_cmd - Process an incoming MCDI response
+ * @cdx: Handle to the CDX MCDI structure
+ * @outbuf: Pointer to the response buffer received from the management controller
+ * @len: Length of the response buffer in bytes
+ *
+ * This function handles a response from the management controller. It locates the
+ * corresponding command using the sequence number embedded in the header,
+ * completes the command if it is still pending, and initiates any necessary cleanup.
+ *
+ * The function assumes that the response buffer is well-formed and at least one
+ * dword in size.
+ */
void cdx_mcdi_process_cmd(struct cdx_mcdi *cdx, struct cdx_dword *outbuf, int len)
{
struct cdx_mcdi_iface *mcdi;
@@ -590,6 +627,7 @@ void cdx_mcdi_process_cmd(struct cdx_mcdi *cdx, struct cdx_dword *outbuf, int le
cdx_mcdi_process_cleanup_list(mcdi->cdx, &cleanup_list);
}
+EXPORT_SYMBOL_GPL(cdx_mcdi_process_cmd);
static void cdx_mcdi_cmd_work(struct work_struct *context)
{
@@ -757,6 +795,7 @@ int cdx_mcdi_rpc(struct cdx_mcdi *cdx, unsigned int cmd,
return cdx_mcdi_rpc_sync(cdx, cmd, inbuf, inlen, outbuf, outlen,
outlen_actual, false);
}
+EXPORT_SYMBOL_GPL(cdx_mcdi_rpc);
/**
* cdx_mcdi_rpc_async - Schedule an MCDI command to run asynchronously
diff --git a/drivers/cdx/controller/mcdi.h b/drivers/cdx/controller/mcdi.h
deleted file mode 100644
index 54a65e9760ae..000000000000
--- a/drivers/cdx/controller/mcdi.h
+++ /dev/null
@@ -1,242 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
- * Copyright 2008-2013 Solarflare Communications Inc.
- * Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
- */
-
-#ifndef CDX_MCDI_H
-#define CDX_MCDI_H
-
-#include <linux/mutex.h>
-#include <linux/kref.h>
-#include <linux/rpmsg.h>
-
-#include "bitfield.h"
-#include "mc_cdx_pcol.h"
-
-#ifdef DEBUG
-#define CDX_WARN_ON_ONCE_PARANOID(x) WARN_ON_ONCE(x)
-#define CDX_WARN_ON_PARANOID(x) WARN_ON(x)
-#else
-#define CDX_WARN_ON_ONCE_PARANOID(x) do {} while (0)
-#define CDX_WARN_ON_PARANOID(x) do {} while (0)
-#endif
-
-/**
- * enum cdx_mcdi_mode - MCDI transaction mode
- * @MCDI_MODE_EVENTS: wait for an mcdi response callback.
- * @MCDI_MODE_FAIL: we think MCDI is dead, so fail-fast all calls
- */
-enum cdx_mcdi_mode {
- MCDI_MODE_EVENTS,
- MCDI_MODE_FAIL,
-};
-
-#define MCDI_RPC_TIMEOUT (10 * HZ)
-#define MCDI_RPC_LONG_TIMEOU (60 * HZ)
-#define MCDI_RPC_POST_RST_TIME (10 * HZ)
-
-#define MCDI_BUF_LEN (8 + MCDI_CTL_SDU_LEN_MAX)
-
-/**
- * enum cdx_mcdi_cmd_state - State for an individual MCDI command
- * @MCDI_STATE_QUEUED: Command not started and is waiting to run.
- * @MCDI_STATE_RETRY: Command was submitted and MC rejected with no resources,
- * as MC have too many outstanding commands. Command will be retried once
- * another command returns.
- * @MCDI_STATE_RUNNING: Command was accepted and is running.
- * @MCDI_STATE_RUNNING_CANCELLED: Command is running but the issuer cancelled
- * the command.
- * @MCDI_STATE_FINISHED: Processing of this command has completed.
- */
-
-enum cdx_mcdi_cmd_state {
- MCDI_STATE_QUEUED,
- MCDI_STATE_RETRY,
- MCDI_STATE_RUNNING,
- MCDI_STATE_RUNNING_CANCELLED,
- MCDI_STATE_FINISHED,
-};
-
-/**
- * struct cdx_mcdi - CDX MCDI Firmware interface, to interact
- * with CDX controller.
- * @mcdi: MCDI interface
- * @mcdi_ops: MCDI operations
- * @r5_rproc : R5 Remoteproc device handle
- * @rpdev: RPMsg device
- * @ept: RPMsg endpoint
- * @work: Post probe work
- */
-struct cdx_mcdi {
- /* MCDI interface */
- struct cdx_mcdi_data *mcdi;
- const struct cdx_mcdi_ops *mcdi_ops;
-
- struct rproc *r5_rproc;
- struct rpmsg_device *rpdev;
- struct rpmsg_endpoint *ept;
- struct work_struct work;
-};
-
-struct cdx_mcdi_ops {
- void (*mcdi_request)(struct cdx_mcdi *cdx,
- const struct cdx_dword *hdr, size_t hdr_len,
- const struct cdx_dword *sdu, size_t sdu_len);
- unsigned int (*mcdi_rpc_timeout)(struct cdx_mcdi *cdx, unsigned int cmd);
-};
-
-typedef void cdx_mcdi_async_completer(struct cdx_mcdi *cdx,
- unsigned long cookie, int rc,
- struct cdx_dword *outbuf,
- size_t outlen_actual);
-
-/**
- * struct cdx_mcdi_cmd - An outstanding MCDI command
- * @ref: Reference count. There will be one reference if the command is
- * in the mcdi_iface cmd_list, another if it's on a cleanup list,
- * and a third if it's queued in the work queue.
- * @list: The data for this entry in mcdi->cmd_list
- * @cleanup_list: The data for this entry in a cleanup list
- * @work: The work item for this command, queued in mcdi->workqueue
- * @mcdi: The mcdi_iface for this command
- * @state: The state of this command
- * @inlen: inbuf length
- * @inbuf: Input buffer
- * @quiet: Whether to silence errors
- * @reboot_seen: Whether a reboot has been seen during this command,
- * to prevent duplicates
- * @seq: Sequence number
- * @started: Jiffies this command was started at
- * @cookie: Context for completion function
- * @completer: Completion function
- * @handle: Command handle
- * @cmd: Command number
- * @rc: Return code
- * @outlen: Length of output buffer
- * @outbuf: Output buffer
- */
-struct cdx_mcdi_cmd {
- struct kref ref;
- struct list_head list;
- struct list_head cleanup_list;
- struct work_struct work;
- struct cdx_mcdi_iface *mcdi;
- enum cdx_mcdi_cmd_state state;
- size_t inlen;
- const struct cdx_dword *inbuf;
- bool quiet;
- bool reboot_seen;
- u8 seq;
- unsigned long started;
- unsigned long cookie;
- cdx_mcdi_async_completer *completer;
- unsigned int handle;
- unsigned int cmd;
- int rc;
- size_t outlen;
- struct cdx_dword *outbuf;
- /* followed by inbuf data if necessary */
-};
-
-/**
- * struct cdx_mcdi_iface - MCDI protocol context
- * @cdx: The associated NIC
- * @iface_lock: Serialise access to this structure
- * @outstanding_cleanups: Count of cleanups
- * @cmd_list: List of outstanding and running commands
- * @workqueue: Workqueue used for delayed processing
- * @cmd_complete_wq: Waitqueue for command completion
- * @db_held_by: Command the MC doorbell is in use by
- * @seq_held_by: Command each sequence number is in use by
- * @prev_handle: The last used command handle
- * @mode: Poll for mcdi completion, or wait for an mcdi_event
- * @prev_seq: The last used sequence number
- * @new_epoch: Indicates start of day or start of MC reboot recovery
- */
-struct cdx_mcdi_iface {
- struct cdx_mcdi *cdx;
- /* Serialise access */
- struct mutex iface_lock;
- unsigned int outstanding_cleanups;
- struct list_head cmd_list;
- struct workqueue_struct *workqueue;
- wait_queue_head_t cmd_complete_wq;
- struct cdx_mcdi_cmd *db_held_by;
- struct cdx_mcdi_cmd *seq_held_by[16];
- unsigned int prev_handle;
- enum cdx_mcdi_mode mode;
- u8 prev_seq;
- bool new_epoch;
-};
-
-/**
- * struct cdx_mcdi_data - extra state for NICs that implement MCDI
- * @iface: Interface/protocol state
- * @fn_flags: Flags for this function, as returned by %MC_CMD_DRV_ATTACH.
- */
-struct cdx_mcdi_data {
- struct cdx_mcdi_iface iface;
- u32 fn_flags;
-};
-
-static inline struct cdx_mcdi_iface *cdx_mcdi_if(struct cdx_mcdi *cdx)
-{
- return cdx->mcdi ? &cdx->mcdi->iface : NULL;
-}
-
-int cdx_mcdi_init(struct cdx_mcdi *cdx);
-void cdx_mcdi_finish(struct cdx_mcdi *cdx);
-
-void cdx_mcdi_process_cmd(struct cdx_mcdi *cdx, struct cdx_dword *outbuf, int len);
-int cdx_mcdi_rpc(struct cdx_mcdi *cdx, unsigned int cmd,
- const struct cdx_dword *inbuf, size_t inlen,
- struct cdx_dword *outbuf, size_t outlen, size_t *outlen_actual);
-int cdx_mcdi_rpc_async(struct cdx_mcdi *cdx, unsigned int cmd,
- const struct cdx_dword *inbuf, size_t inlen,
- cdx_mcdi_async_completer *complete,
- unsigned long cookie);
-int cdx_mcdi_wait_for_quiescence(struct cdx_mcdi *cdx,
- unsigned int timeout_jiffies);
-
-/*
- * We expect that 16- and 32-bit fields in MCDI requests and responses
- * are appropriately aligned, but 64-bit fields are only
- * 32-bit-aligned.
- */
-#define MCDI_DECLARE_BUF(_name, _len) struct cdx_dword _name[DIV_ROUND_UP(_len, 4)] = {{0}}
-#define _MCDI_PTR(_buf, _offset) \
- ((u8 *)(_buf) + (_offset))
-#define MCDI_PTR(_buf, _field) \
- _MCDI_PTR(_buf, MC_CMD_ ## _field ## _OFST)
-#define _MCDI_CHECK_ALIGN(_ofst, _align) \
- ((void)BUILD_BUG_ON_ZERO((_ofst) & ((_align) - 1)), \
- (_ofst))
-#define _MCDI_DWORD(_buf, _field) \
- ((_buf) + (_MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _OFST, 4) >> 2))
-
-#define MCDI_BYTE(_buf, _field) \
- ((void)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 1), \
- *MCDI_PTR(_buf, _field))
-#define MCDI_WORD(_buf, _field) \
- ((void)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 2), \
- le16_to_cpu(*(__force const __le16 *)MCDI_PTR(_buf, _field)))
-#define MCDI_SET_DWORD(_buf, _field, _value) \
- CDX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), CDX_DWORD, _value)
-#define MCDI_DWORD(_buf, _field) \
- CDX_DWORD_FIELD(*_MCDI_DWORD(_buf, _field), CDX_DWORD)
-#define MCDI_POPULATE_DWORD_1(_buf, _field, _name1, _value1) \
- CDX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), \
- MC_CMD_ ## _name1, _value1)
-#define MCDI_SET_QWORD(_buf, _field, _value) \
- do { \
- CDX_POPULATE_DWORD_1(_MCDI_DWORD(_buf, _field)[0], \
- CDX_DWORD, (u32)(_value)); \
- CDX_POPULATE_DWORD_1(_MCDI_DWORD(_buf, _field)[1], \
- CDX_DWORD, (u64)(_value) >> 32); \
- } while (0)
-#define MCDI_QWORD(_buf, _field) \
- (CDX_DWORD_FIELD(_MCDI_DWORD(_buf, _field)[0], CDX_DWORD) | \
- (u64)CDX_DWORD_FIELD(_MCDI_DWORD(_buf, _field)[1], CDX_DWORD) << 32)
-
-#endif /* CDX_MCDI_H */
diff --git a/drivers/cdx/controller/mcdi_functions.c b/drivers/cdx/controller/mcdi_functions.c
index 885c69e6ebe5..8ae2d99be81e 100644
--- a/drivers/cdx/controller/mcdi_functions.c
+++ b/drivers/cdx/controller/mcdi_functions.c
@@ -5,7 +5,6 @@
#include <linux/module.h>
-#include "mcdi.h"
#include "mcdi_functions.h"
int cdx_mcdi_get_num_buses(struct cdx_mcdi *cdx)
diff --git a/drivers/cdx/controller/mcdi_functions.h b/drivers/cdx/controller/mcdi_functions.h
index b9942affdc6b..57fd1bae706b 100644
--- a/drivers/cdx/controller/mcdi_functions.h
+++ b/drivers/cdx/controller/mcdi_functions.h
@@ -8,7 +8,8 @@
#ifndef CDX_MCDI_FUNCTIONS_H
#define CDX_MCDI_FUNCTIONS_H
-#include "mcdi.h"
+#include <linux/cdx/mcdi.h>
+#include "mcdid.h"
#include "../cdx.h"
/**
diff --git a/drivers/cdx/controller/mcdid.h b/drivers/cdx/controller/mcdid.h
new file mode 100644
index 000000000000..7fc29f099265
--- /dev/null
+++ b/drivers/cdx/controller/mcdid.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2008-2013 Solarflare Communications Inc.
+ * Copyright (C) 2022-2025, Advanced Micro Devices, Inc.
+ */
+
+#ifndef CDX_MCDID_H
+#define CDX_MCDID_H
+
+#include <linux/mutex.h>
+#include <linux/kref.h>
+#include <linux/rpmsg.h>
+
+#include "mc_cdx_pcol.h"
+
+#ifdef DEBUG
+#define CDX_WARN_ON_ONCE_PARANOID(x) WARN_ON_ONCE(x)
+#define CDX_WARN_ON_PARANOID(x) WARN_ON(x)
+#else
+#define CDX_WARN_ON_ONCE_PARANOID(x) do {} while (0)
+#define CDX_WARN_ON_PARANOID(x) do {} while (0)
+#endif
+
+#define MCDI_BUF_LEN (8 + MCDI_CTL_SDU_LEN_MAX)
+
+static inline struct cdx_mcdi_iface *cdx_mcdi_if(struct cdx_mcdi *cdx)
+{
+ return cdx->mcdi ? &cdx->mcdi->iface : NULL;
+}
+
+int cdx_mcdi_rpc_async(struct cdx_mcdi *cdx, unsigned int cmd,
+ const struct cdx_dword *inbuf, size_t inlen,
+ cdx_mcdi_async_completer *complete,
+ unsigned long cookie);
+int cdx_mcdi_wait_for_quiescence(struct cdx_mcdi *cdx,
+ unsigned int timeout_jiffies);
+
+/*
+ * We expect that 16- and 32-bit fields in MCDI requests and responses
+ * are appropriately aligned, but 64-bit fields are only
+ * 32-bit-aligned.
+ */
+#define MCDI_BYTE(_buf, _field) \
+ ((void)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 1), \
+ *MCDI_PTR(_buf, _field))
+#define MCDI_WORD(_buf, _field) \
+ ((void)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 2), \
+ le16_to_cpu(*(__force const __le16 *)MCDI_PTR(_buf, _field)))
+#define MCDI_POPULATE_DWORD_1(_buf, _field, _name1, _value1) \
+ CDX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), \
+ MC_CMD_ ## _name1, _value1)
+#define MCDI_SET_QWORD(_buf, _field, _value) \
+ do { \
+ CDX_POPULATE_DWORD_1(_MCDI_DWORD(_buf, _field)[0], \
+ CDX_DWORD, (u32)(_value)); \
+ CDX_POPULATE_DWORD_1(_MCDI_DWORD(_buf, _field)[1], \
+ CDX_DWORD, (u64)(_value) >> 32); \
+ } while (0)
+#define MCDI_QWORD(_buf, _field) \
+ (CDX_DWORD_FIELD(_MCDI_DWORD(_buf, _field)[0], CDX_DWORD) | \
+ (u64)CDX_DWORD_FIELD(_MCDI_DWORD(_buf, _field)[1], CDX_DWORD) << 32)
+
+#endif /* CDX_MCDID_H */
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index ae6196760556..d2cfc584e202 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -237,7 +237,7 @@ config APPLICOM
config SONYPI
tristate "Sony Vaio Programmable I/O Control Device support"
- depends on X86_32 && PCI && INPUT
+ depends on X86_32 && PCI && INPUT && HAS_IOPORT
depends on ACPI_EC || !ACPI
help
This driver enables access to the Sony Programmable I/O Control
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index e9b360cdc99a..1291369b9126 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -6,6 +6,7 @@
obj-y += mem.o random.o
obj-$(CONFIG_TTY_PRINTK) += ttyprintk.o
obj-y += misc.o
+obj-$(CONFIG_TEST_MISC_MINOR) += misc_minor_kunit.o
obj-$(CONFIG_ATARI_DSP56K) += dsp56k.o
obj-$(CONFIG_VIRTIO_CONSOLE) += virtio_console.o
obj-$(CONFIG_UV_MMTIMER) += uv_mmtimer.o
diff --git a/drivers/char/adi.c b/drivers/char/adi.c
index f9bec10a6064..0849d933a2d5 100644
--- a/drivers/char/adi.c
+++ b/drivers/char/adi.c
@@ -80,8 +80,8 @@ static ssize_t adi_read(struct file *file, char __user *buf,
bytes_read += ver_buf_sz;
ver_buf_idx = 0;
- ver_buf_sz = min(count - bytes_read,
- (size_t)MAX_BUF_SZ);
+ ver_buf_sz = min_t(size_t, count - bytes_read,
+ MAX_BUF_SZ);
}
}
@@ -131,7 +131,7 @@ static ssize_t adi_write(struct file *file, const char __user *buf,
ssize_t ret;
int i;
- if (count <= 0)
+ if (count == 0)
return -EINVAL;
ver_buf_sz = min_t(size_t, count, MAX_BUF_SZ);
@@ -157,7 +157,7 @@ static ssize_t adi_write(struct file *file, const char __user *buf,
}
bytes_written += ver_buf_sz;
- ver_buf_sz = min(count - bytes_written, (size_t)MAX_BUF_SZ);
+ ver_buf_sz = min_t(size_t, count - bytes_written, MAX_BUF_SZ);
} while (bytes_written < count);
(*offp) += bytes_written;
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index bf490967241a..2505df1f4e69 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -720,11 +720,6 @@ static const struct pci_device_id agp_amd64_pci_table[] = {
MODULE_DEVICE_TABLE(pci, agp_amd64_pci_table);
-static const struct pci_device_id agp_amd64_pci_promisc_table[] = {
- { PCI_DEVICE_CLASS(0, 0) },
- { }
-};
-
static DEFINE_SIMPLE_DEV_PM_OPS(agp_amd64_pm_ops, NULL, agp_amd64_resume);
static struct pci_driver agp_amd64_pci_driver = {
@@ -739,6 +734,7 @@ static struct pci_driver agp_amd64_pci_driver = {
/* Not static due to IOMMU code calling it early. */
int __init agp_amd64_init(void)
{
+ struct pci_dev *pdev = NULL;
int err = 0;
if (agp_off)
@@ -767,9 +763,13 @@ int __init agp_amd64_init(void)
}
/* Look for any AGP bridge */
- agp_amd64_pci_driver.id_table = agp_amd64_pci_promisc_table;
- err = driver_attach(&agp_amd64_pci_driver.driver);
- if (err == 0 && agp_bridges_found == 0) {
+ for_each_pci_dev(pdev)
+ if (pci_find_capability(pdev, PCI_CAP_ID_AGP))
+ pci_add_dynid(&agp_amd64_pci_driver,
+ pdev->vendor, pdev->device,
+ pdev->subsystem_vendor,
+ pdev->subsystem_device, 0, 0, 0);
+ if (agp_bridges_found == 0) {
pci_unregister_driver(&agp_amd64_pci_driver);
err = -ENODEV;
}
diff --git a/drivers/char/apm-emulation.c b/drivers/char/apm-emulation.c
index 53ce352f7197..4aa5d1c76f83 100644
--- a/drivers/char/apm-emulation.c
+++ b/drivers/char/apm-emulation.c
@@ -143,17 +143,9 @@ static DEFINE_MUTEX(state_lock);
/*
- * Compatibility cruft until the IPAQ people move over to the new
- * interface.
- */
-static void __apm_get_power_status(struct apm_power_info *info)
-{
-}
-
-/*
* This allows machines to provide their own "apm get power status" function.
*/
-void (*apm_get_power_status)(struct apm_power_info *) = __apm_get_power_status;
+void (*apm_get_power_status)(struct apm_power_info *);
EXPORT_SYMBOL(apm_get_power_status);
diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c
index 9fed9706d9cd..c138c468f3a4 100644
--- a/drivers/char/applicom.c
+++ b/drivers/char/applicom.c
@@ -835,7 +835,10 @@ static long ac_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
ret = -ENOTTY;
break;
}
- Dummy = readb(apbs[IndexCard].RamIO + VERS);
+
+ if (cmd != 6)
+ Dummy = readb(apbs[IndexCard].RamIO + VERS);
+
kfree(adgl);
mutex_unlock(&ac_mutex);
return ret;
diff --git a/drivers/char/hangcheck-timer.c b/drivers/char/hangcheck-timer.c
index 497fc167cb8c..231cbf7b300f 100644
--- a/drivers/char/hangcheck-timer.c
+++ b/drivers/char/hangcheck-timer.c
@@ -69,7 +69,8 @@ MODULE_VERSION(VERSION_STR);
static int __init hangcheck_parse_tick(char *str)
{
int par;
- if (get_option(&str,&par))
+
+ if (get_option(&str, &par))
hangcheck_tick = par;
return 1;
}
@@ -77,7 +78,8 @@ static int __init hangcheck_parse_tick(char *str)
static int __init hangcheck_parse_margin(char *str)
{
int par;
- if (get_option(&str,&par))
+
+ if (get_option(&str, &par))
hangcheck_margin = par;
return 1;
}
@@ -85,7 +87,8 @@ static int __init hangcheck_parse_margin(char *str)
static int __init hangcheck_parse_reboot(char *str)
{
int par;
- if (get_option(&str,&par))
+
+ if (get_option(&str, &par))
hangcheck_reboot = par;
return 1;
}
@@ -93,7 +96,8 @@ static int __init hangcheck_parse_reboot(char *str)
static int __init hangcheck_parse_dump_tasks(char *str)
{
int par;
- if (get_option(&str,&par))
+
+ if (get_option(&str, &par))
hangcheck_dump_tasks = par;
return 1;
}
@@ -126,23 +130,23 @@ static void hangcheck_fire(struct timer_list *unused)
if (tsc_diff > hangcheck_tsc_margin) {
if (hangcheck_dump_tasks) {
- printk(KERN_CRIT "Hangcheck: Task state:\n");
+ pr_crit("Hangcheck: Task state:\n");
#ifdef CONFIG_MAGIC_SYSRQ
handle_sysrq('t');
#endif /* CONFIG_MAGIC_SYSRQ */
}
if (hangcheck_reboot) {
- printk(KERN_CRIT "Hangcheck: hangcheck is restarting the machine.\n");
+ pr_crit("Hangcheck: hangcheck is restarting the machine.\n");
emergency_restart();
} else {
- printk(KERN_CRIT "Hangcheck: hangcheck value past margin!\n");
+ pr_crit("Hangcheck: hangcheck value past margin!\n");
}
}
#if 0
/*
* Enable to investigate delays in detail
*/
- printk("Hangcheck: called %Ld ns since last time (%Ld ns overshoot)\n",
+ pr_debug("Hangcheck: called %lld ns since last time (%lld ns overshoot)\n",
tsc_diff, tsc_diff - hangcheck_tick*TIMER_FREQ);
#endif
mod_timer(&hangcheck_ticktock, jiffies + (hangcheck_tick*HZ));
@@ -152,7 +156,7 @@ static void hangcheck_fire(struct timer_list *unused)
static int __init hangcheck_init(void)
{
- printk("Hangcheck: starting hangcheck timer %s (tick is %d seconds, margin is %d seconds).\n",
+ pr_debug("Hangcheck: starting hangcheck timer %s (tick is %d seconds, margin is %d seconds).\n",
VERSION_STR, hangcheck_tick, hangcheck_margin);
hangcheck_tsc_margin =
(unsigned long long)hangcheck_margin + hangcheck_tick;
@@ -168,7 +172,7 @@ static int __init hangcheck_init(void)
static void __exit hangcheck_exit(void)
{
timer_delete_sync(&hangcheck_ticktock);
- printk("Hangcheck: Stopped hangcheck timer.\n");
+ pr_debug("Hangcheck: Stopped hangcheck timer.\n");
}
module_init(hangcheck_init);
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 0713ea2b2a51..4f5ccd3a1f56 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -867,7 +867,7 @@ int hpet_alloc(struct hpet_data *hdp)
printk(KERN_INFO "hpet%u: at MMIO 0x%lx, IRQ%s",
hpetp->hp_which, hdp->hd_phys_address,
- hpetp->hp_ntimer > 1 ? "s" : "");
+ str_plural(hpetp->hp_ntimer));
for (i = 0; i < hpetp->hp_ntimer; i++)
printk(KERN_CONT "%s %u", i > 0 ? "," : "", hdp->hd_irq[i]);
printk(KERN_CONT "\n");
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index c85827843447..492a2a61a65b 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -77,7 +77,7 @@ config HW_RANDOM_AIROHA
config HW_RANDOM_ATMEL
tristate "Atmel Random Number Generator support"
- depends on (ARCH_AT91 || COMPILE_TEST)
+ depends on (ARCH_MICROCHIP || COMPILE_TEST)
default HW_RANDOM
help
This driver provides kernel-side support for the Random Number
@@ -312,6 +312,7 @@ config HW_RANDOM_INGENIC_TRNG
config HW_RANDOM_NOMADIK
tristate "ST-Ericsson Nomadik Random Number Generator support"
depends on ARCH_NOMADIK || COMPILE_TEST
+ depends on ARM_AMBA
default HW_RANDOM
help
This driver provides kernel-side support for the Random Number
diff --git a/drivers/char/hw_random/atmel-rng.c b/drivers/char/hw_random/atmel-rng.c
index d2b00458761e..6ed24be3481d 100644
--- a/drivers/char/hw_random/atmel-rng.c
+++ b/drivers/char/hw_random/atmel-rng.c
@@ -80,7 +80,6 @@ static int atmel_trng_read(struct hwrng *rng, void *buf, size_t max,
ret = 4;
out:
- pm_runtime_mark_last_busy(trng->dev);
pm_runtime_put_sync_autosuspend(trng->dev);
return ret;
}
diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c
index aa2b135e3ee2..6d6ac409efcf 100644
--- a/drivers/char/hw_random/bcm2835-rng.c
+++ b/drivers/char/hw_random/bcm2835-rng.c
@@ -138,12 +138,11 @@ static const struct of_device_id bcm2835_rng_of_match[] = {
{ .compatible = "brcm,bcm6368-rng"},
{},
};
+MODULE_DEVICE_TABLE(of, bcm2835_rng_of_match);
static int bcm2835_rng_probe(struct platform_device *pdev)
{
- const struct bcm2835_rng_of_data *of_data;
struct device *dev = &pdev->dev;
- const struct of_device_id *rng_id;
struct bcm2835_rng_priv *priv;
int err;
@@ -171,12 +170,10 @@ static int bcm2835_rng_probe(struct platform_device *pdev)
priv->rng.cleanup = bcm2835_rng_cleanup;
if (dev_of_node(dev)) {
- rng_id = of_match_node(bcm2835_rng_of_match, dev->of_node);
- if (!rng_id)
- return -EINVAL;
+ const struct bcm2835_rng_of_data *of_data;
/* Check for rng init function, execute it */
- of_data = rng_id->data;
+ of_data = of_device_get_match_data(dev);
if (of_data)
priv->mask_interrupts = of_data->mask_interrupts;
}
@@ -191,8 +188,6 @@ static int bcm2835_rng_probe(struct platform_device *pdev)
return err;
}
-MODULE_DEVICE_TABLE(of, bcm2835_rng_of_match);
-
static const struct platform_device_id bcm2835_rng_devtype[] = {
{ .name = "bcm2835-rng" },
{ .name = "bcm63xx-rng" },
diff --git a/drivers/char/hw_random/cctrng.c b/drivers/char/hw_random/cctrng.c
index 4db198849695..a5be9258037f 100644
--- a/drivers/char/hw_random/cctrng.c
+++ b/drivers/char/hw_random/cctrng.c
@@ -98,7 +98,6 @@ static void cc_trng_pm_put_suspend(struct device *dev)
{
int rc = 0;
- pm_runtime_mark_last_busy(dev);
rc = pm_runtime_put_autosuspend(dev);
if (rc)
dev_err(dev, "pm_runtime_put_autosuspend returned %x\n", rc);
diff --git a/drivers/char/hw_random/cn10k-rng.c b/drivers/char/hw_random/cn10k-rng.c
index 31935316a160..3b4e78182e14 100644
--- a/drivers/char/hw_random/cn10k-rng.c
+++ b/drivers/char/hw_random/cn10k-rng.c
@@ -188,7 +188,7 @@ static int cn10k_rng_probe(struct pci_dev *pdev, const struct pci_device_id *id)
rng->reg_base = pcim_iomap(pdev, 0, 0);
if (!rng->reg_base)
- return dev_err_probe(&pdev->dev, -ENOMEM, "Error while mapping CSRs, exiting\n");
+ return -ENOMEM;
rng->ops.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
"cn10k-rng-%s", dev_name(&pdev->dev));
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 018316f54621..96d7fe41b373 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -341,6 +341,9 @@ static ssize_t rng_current_store(struct device *dev,
if (sysfs_streq(buf, "")) {
err = enable_best_rng();
+ } else if (sysfs_streq(buf, "none")) {
+ cur_rng_set_by_user = 1;
+ drop_current_rng();
} else {
list_for_each_entry(rng, &rng_list, list) {
if (sysfs_streq(rng->name, buf)) {
@@ -392,7 +395,7 @@ static ssize_t rng_available_show(struct device *dev,
strlcat(buf, rng->name, PAGE_SIZE);
strlcat(buf, " ", PAGE_SIZE);
}
- strlcat(buf, "\n", PAGE_SIZE);
+ strlcat(buf, "none\n", PAGE_SIZE);
mutex_unlock(&rng_mutex);
return strlen(buf);
@@ -542,10 +545,10 @@ int hwrng_register(struct hwrng *rng)
init_completion(&rng->dying);
/* Adjust quality field to always have a proper value */
- rng->quality = min_t(u16, min_t(u16, default_quality, 1024), rng->quality ?: 1024);
+ rng->quality = min3(default_quality, 1024, rng->quality ?: 1024);
- if (!current_rng ||
- (!cur_rng_set_by_user && rng->quality > current_rng->quality)) {
+ if (!cur_rng_set_by_user &&
+ (!current_rng || rng->quality > current_rng->quality)) {
/*
* Set new rng as current as the new rng source
* provides better entropy quality and was not
diff --git a/drivers/char/hw_random/ks-sa-rng.c b/drivers/char/hw_random/ks-sa-rng.c
index d8fd8a354482..9e408144a10c 100644
--- a/drivers/char/hw_random/ks-sa-rng.c
+++ b/drivers/char/hw_random/ks-sa-rng.c
@@ -231,6 +231,10 @@ static int ks_sa_rng_probe(struct platform_device *pdev)
if (IS_ERR(ks_sa_rng->regmap_cfg))
return dev_err_probe(dev, -EINVAL, "syscon_node_to_regmap failed\n");
+ ks_sa_rng->clk = devm_clk_get_enabled(dev, NULL);
+ if (IS_ERR(ks_sa_rng->clk))
+ return dev_err_probe(dev, PTR_ERR(ks_sa_rng->clk), "Failed to get clock\n");
+
pm_runtime_enable(dev);
ret = pm_runtime_resume_and_get(dev);
if (ret < 0) {
diff --git a/drivers/char/hw_random/mtk-rng.c b/drivers/char/hw_random/mtk-rng.c
index b7fa1bc1122b..5808d09d12c4 100644
--- a/drivers/char/hw_random/mtk-rng.c
+++ b/drivers/char/hw_random/mtk-rng.c
@@ -98,7 +98,6 @@ static int mtk_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
max -= sizeof(u32);
}
- pm_runtime_mark_last_busy(priv->dev);
pm_runtime_put_sync_autosuspend(priv->dev);
return retval || !wait ? retval : -EIO;
@@ -143,7 +142,9 @@ static int mtk_rng_probe(struct platform_device *pdev)
dev_set_drvdata(&pdev->dev, priv);
pm_runtime_set_autosuspend_delay(&pdev->dev, RNG_AUTOSUSPEND_TIMEOUT);
pm_runtime_use_autosuspend(&pdev->dev);
- devm_pm_runtime_enable(&pdev->dev);
+ ret = devm_pm_runtime_enable(&pdev->dev);
+ if (ret)
+ return ret;
dev_info(&pdev->dev, "registered RNG driver\n");
diff --git a/drivers/char/hw_random/n2rng.h b/drivers/char/hw_random/n2rng.h
index 9a870f5dc371..7612f15a261f 100644
--- a/drivers/char/hw_random/n2rng.h
+++ b/drivers/char/hw_random/n2rng.h
@@ -48,7 +48,7 @@
#define HV_RNG_NUM_CONTROL 4
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
extern unsigned long sun4v_rng_get_diag_ctl(void);
extern unsigned long sun4v_rng_ctl_read_v1(unsigned long ctl_regs_ra,
unsigned long *state,
@@ -147,6 +147,6 @@ struct n2rng {
#define N2RNG_BUSY_LIMIT 100
#define N2RNG_HCHECK_LIMIT 100
-#endif /* !(__ASSEMBLY__) */
+#endif /* !(__ASSEMBLER__) */
#endif /* _N2RNG_H */
diff --git a/drivers/char/hw_random/npcm-rng.c b/drivers/char/hw_random/npcm-rng.c
index 3e308c890bd2..40d6e29dea03 100644
--- a/drivers/char/hw_random/npcm-rng.c
+++ b/drivers/char/hw_random/npcm-rng.c
@@ -80,7 +80,6 @@ static int npcm_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
max--;
}
- pm_runtime_mark_last_busy(priv->dev);
pm_runtime_put_sync_autosuspend(priv->dev);
return retval || !wait ? retval : -EIO;
diff --git a/drivers/char/hw_random/omap3-rom-rng.c b/drivers/char/hw_random/omap3-rom-rng.c
index 8064c792caf0..aa71f61c3dc9 100644
--- a/drivers/char/hw_random/omap3-rom-rng.c
+++ b/drivers/char/hw_random/omap3-rom-rng.c
@@ -56,7 +56,6 @@ static int omap3_rom_rng_read(struct hwrng *rng, void *data, size_t max, bool w)
else
r = 4;
- pm_runtime_mark_last_busy(ddata->dev);
pm_runtime_put_autosuspend(ddata->dev);
return r;
diff --git a/drivers/char/hw_random/rockchip-rng.c b/drivers/char/hw_random/rockchip-rng.c
index fb4a30b95507..6e3ed4b85605 100644
--- a/drivers/char/hw_random/rockchip-rng.c
+++ b/drivers/char/hw_random/rockchip-rng.c
@@ -223,7 +223,6 @@ static int rk3568_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
/* Read random data stored in the registers */
memcpy_fromio(buf, rk_rng->base + TRNG_RNG_DOUT, to_read);
out:
- pm_runtime_mark_last_busy(rk_rng->dev);
pm_runtime_put_sync_autosuspend(rk_rng->dev);
return (ret < 0) ? ret : to_read;
@@ -263,7 +262,6 @@ static int rk3576_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
memcpy_fromio(buf, rk_rng->base + RKRNG_TRNG_DATA0, to_read);
out:
- pm_runtime_mark_last_busy(rk_rng->dev);
pm_runtime_put_sync_autosuspend(rk_rng->dev);
return (ret < 0) ? ret : to_read;
@@ -355,7 +353,6 @@ out:
/* close the TRNG */
rk_rng_writel(rk_rng, TRNG_V1_CTRL_NOP, TRNG_V1_CTRL);
- pm_runtime_mark_last_busy(rk_rng->dev);
pm_runtime_put_sync_autosuspend(rk_rng->dev);
return (ret < 0) ? ret : to_read;
diff --git a/drivers/char/hw_random/s390-trng.c b/drivers/char/hw_random/s390-trng.c
index d27e32e9bfee..3024d5e9fd61 100644
--- a/drivers/char/hw_random/s390-trng.c
+++ b/drivers/char/hw_random/s390-trng.c
@@ -9,8 +9,7 @@
* Author(s): Harald Freudenberger <freude@de.ibm.com>
*/
-#define KMSG_COMPONENT "trng"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#define pr_fmt(fmt) "trng: " fmt
#include <linux/hw_random.h>
#include <linux/kernel.h>
diff --git a/drivers/char/hw_random/stm32-rng.c b/drivers/char/hw_random/stm32-rng.c
index 98edbe796bc5..9a8c00586ab0 100644
--- a/drivers/char/hw_random/stm32-rng.c
+++ b/drivers/char/hw_random/stm32-rng.c
@@ -255,7 +255,6 @@ static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
}
exit_rpm:
- pm_runtime_mark_last_busy(priv->dev);
pm_runtime_put_sync_autosuspend(priv->dev);
return retval || !wait ? retval : -EIO;
diff --git a/drivers/char/hw_random/timeriomem-rng.c b/drivers/char/hw_random/timeriomem-rng.c
index b95f6d0f17ed..e61f06393209 100644
--- a/drivers/char/hw_random/timeriomem-rng.c
+++ b/drivers/char/hw_random/timeriomem-rng.c
@@ -150,7 +150,7 @@ static int timeriomem_rng_probe(struct platform_device *pdev)
priv->rng_ops.quality = pdata->quality;
}
- priv->period = ns_to_ktime(period * NSEC_PER_USEC);
+ priv->period = us_to_ktime(period);
init_completion(&priv->completion);
hrtimer_setup(&priv->timer, timeriomem_rng_trigger, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
diff --git a/drivers/char/ipmi/Kconfig b/drivers/char/ipmi/Kconfig
index f4adc6feb3b2..92bed266d07c 100644
--- a/drivers/char/ipmi/Kconfig
+++ b/drivers/char/ipmi/Kconfig
@@ -84,6 +84,13 @@ config IPMI_IPMB
bus, and it also supports direct messaging on the bus using
IPMB direct messages. This module requires I2C support.
+config IPMI_LS2K
+ bool 'Loongson-2K IPMI interface'
+ depends on LOONGARCH
+ select MFD_LS2K_BMC_CORE
+ help
+ Provides a driver for Loongson-2K IPMI interfaces.
+
config IPMI_POWERNV
depends on PPC_POWERNV
tristate 'POWERNV (OPAL firmware) IPMI interface'
diff --git a/drivers/char/ipmi/Makefile b/drivers/char/ipmi/Makefile
index e0944547c9d0..4ea450a82242 100644
--- a/drivers/char/ipmi/Makefile
+++ b/drivers/char/ipmi/Makefile
@@ -8,6 +8,7 @@ ipmi_si-y := ipmi_si_intf.o ipmi_kcs_sm.o ipmi_smic_sm.o ipmi_bt_sm.o \
ipmi_si_mem_io.o
ipmi_si-$(CONFIG_HAS_IOPORT) += ipmi_si_port_io.o
ipmi_si-$(CONFIG_PCI) += ipmi_si_pci.o
+ipmi_si-$(CONFIG_IPMI_LS2K) += ipmi_si_ls2k.o
ipmi_si-$(CONFIG_PARISC) += ipmi_si_parisc.o
obj-$(CONFIG_IPMI_HANDLER) += ipmi_msghandler.o
diff --git a/drivers/char/ipmi/ipmi_ipmb.c b/drivers/char/ipmi/ipmi_ipmb.c
index 6a4f279c7c1f..3a51e58b2487 100644
--- a/drivers/char/ipmi/ipmi_ipmb.c
+++ b/drivers/char/ipmi/ipmi_ipmb.c
@@ -404,8 +404,7 @@ static void ipmi_ipmb_shutdown(void *send_info)
ipmi_ipmb_stop_thread(iidev);
}
-static void ipmi_ipmb_sender(void *send_info,
- struct ipmi_smi_msg *msg)
+static int ipmi_ipmb_sender(void *send_info, struct ipmi_smi_msg *msg)
{
struct ipmi_ipmb_dev *iidev = send_info;
unsigned long flags;
@@ -417,6 +416,7 @@ static void ipmi_ipmb_sender(void *send_info,
spin_unlock_irqrestore(&iidev->lock, flags);
up(&iidev->wake_thread);
+ return IPMI_CC_NO_ERROR;
}
static void ipmi_ipmb_request_events(void *send_info)
diff --git a/drivers/char/ipmi/ipmi_kcs_sm.c b/drivers/char/ipmi/ipmi_kcs_sm.c
index ecfcb50302f6..efda90dcf5b3 100644
--- a/drivers/char/ipmi/ipmi_kcs_sm.c
+++ b/drivers/char/ipmi/ipmi_kcs_sm.c
@@ -122,10 +122,10 @@ struct si_sm_data {
unsigned long error0_timeout;
};
-static unsigned int init_kcs_data_with_state(struct si_sm_data *kcs,
- struct si_sm_io *io, enum kcs_states state)
+static unsigned int init_kcs_data(struct si_sm_data *kcs,
+ struct si_sm_io *io)
{
- kcs->state = state;
+ kcs->state = KCS_IDLE;
kcs->io = io;
kcs->write_pos = 0;
kcs->write_count = 0;
@@ -140,12 +140,6 @@ static unsigned int init_kcs_data_with_state(struct si_sm_data *kcs,
return 2;
}
-static unsigned int init_kcs_data(struct si_sm_data *kcs,
- struct si_sm_io *io)
-{
- return init_kcs_data_with_state(kcs, io, KCS_IDLE);
-}
-
static inline unsigned char read_status(struct si_sm_data *kcs)
{
return kcs->io->inputb(kcs->io, 1);
@@ -276,7 +270,7 @@ static int start_kcs_transaction(struct si_sm_data *kcs, unsigned char *data,
if (size > MAX_KCS_WRITE_SIZE)
return IPMI_REQ_LEN_EXCEEDED_ERR;
- if (kcs->state != KCS_IDLE) {
+ if ((kcs->state != KCS_IDLE) && (kcs->state != KCS_HOSED)) {
dev_warn(kcs->io->dev, "KCS in invalid state %d\n", kcs->state);
return IPMI_NOT_IN_MY_STATE_ERR;
}
@@ -501,7 +495,7 @@ static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time)
}
if (kcs->state == KCS_HOSED) {
- init_kcs_data_with_state(kcs, kcs->io, KCS_ERROR0);
+ init_kcs_data(kcs, kcs->io);
return SI_SM_HOSED;
}
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 064944ae9fdc..3f48fc6ab596 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -38,7 +38,9 @@
#define IPMI_DRIVER_VERSION "39.2"
-static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
+static struct ipmi_recv_msg *ipmi_alloc_recv_msg(struct ipmi_user *user);
+static void ipmi_set_recv_msg_user(struct ipmi_recv_msg *msg,
+ struct ipmi_user *user);
static int ipmi_init_msghandler(void);
static void smi_work(struct work_struct *t);
static void handle_new_recv_msgs(struct ipmi_smi *intf);
@@ -50,6 +52,8 @@ static void intf_free(struct kref *ref);
static bool initialized;
static bool drvregistered;
+static struct timer_list ipmi_timer;
+
/* Numbers in this enumerator should be mapped to ipmi_panic_event_str */
enum ipmi_panic_event_op {
IPMI_SEND_PANIC_EVENT_NONE,
@@ -432,6 +436,7 @@ struct ipmi_smi {
atomic_t nr_users;
struct device_attribute nr_users_devattr;
struct device_attribute nr_msgs_devattr;
+ struct device_attribute maintenance_mode_devattr;
/* Used for wake ups at startup. */
@@ -464,7 +469,7 @@ struct ipmi_smi {
* interface to match them up with their responses. A routine
* is called periodically to time the items in this list.
*/
- spinlock_t seq_lock;
+ struct mutex seq_lock;
struct seq_table seq_table[IPMI_IPMB_NUM_SEQ];
int curr_seq;
@@ -539,7 +544,11 @@ struct ipmi_smi {
/* For handling of maintenance mode. */
int maintenance_mode;
- bool maintenance_mode_enable;
+
+#define IPMI_MAINTENANCE_MODE_STATE_OFF 0
+#define IPMI_MAINTENANCE_MODE_STATE_FIRMWARE 1
+#define IPMI_MAINTENANCE_MODE_STATE_RESET 2
+ int maintenance_mode_state;
int auto_maintenance_timeout;
spinlock_t maintenance_mode_lock; /* Used in a timer... */
@@ -590,7 +599,8 @@ static void __ipmi_bmc_unregister(struct ipmi_smi *intf);
static int __ipmi_bmc_register(struct ipmi_smi *intf,
struct ipmi_device_id *id,
bool guid_set, guid_t *guid, int intf_num);
-static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id);
+static int __scan_channels(struct ipmi_smi *intf,
+ struct ipmi_device_id *id, bool rescan);
static void free_ipmi_user(struct kref *ref)
{
@@ -955,7 +965,6 @@ static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
* risk. At this moment, simply skip it in that case.
*/
ipmi_free_recv_msg(msg);
- atomic_dec(&msg->user->nr_msgs);
} else {
/*
* Deliver it in smi_work. The message will hold a
@@ -1116,12 +1125,11 @@ static int intf_find_seq(struct ipmi_smi *intf,
struct ipmi_recv_msg **recv_msg)
{
int rv = -ENODEV;
- unsigned long flags;
if (seq >= IPMI_IPMB_NUM_SEQ)
return -EINVAL;
- spin_lock_irqsave(&intf->seq_lock, flags);
+ mutex_lock(&intf->seq_lock);
if (intf->seq_table[seq].inuse) {
struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg;
@@ -1134,7 +1142,7 @@ static int intf_find_seq(struct ipmi_smi *intf,
rv = 0;
}
}
- spin_unlock_irqrestore(&intf->seq_lock, flags);
+ mutex_unlock(&intf->seq_lock);
return rv;
}
@@ -1145,14 +1153,13 @@ static int intf_start_seq_timer(struct ipmi_smi *intf,
long msgid)
{
int rv = -ENODEV;
- unsigned long flags;
unsigned char seq;
unsigned long seqid;
GET_SEQ_FROM_MSGID(msgid, seq, seqid);
- spin_lock_irqsave(&intf->seq_lock, flags);
+ mutex_lock(&intf->seq_lock);
/*
* We do this verification because the user can be deleted
* while a message is outstanding.
@@ -1163,7 +1170,7 @@ static int intf_start_seq_timer(struct ipmi_smi *intf,
ent->timeout = ent->orig_timeout;
rv = 0;
}
- spin_unlock_irqrestore(&intf->seq_lock, flags);
+ mutex_unlock(&intf->seq_lock);
return rv;
}
@@ -1174,7 +1181,6 @@ static int intf_err_seq(struct ipmi_smi *intf,
unsigned int err)
{
int rv = -ENODEV;
- unsigned long flags;
unsigned char seq;
unsigned long seqid;
struct ipmi_recv_msg *msg = NULL;
@@ -1182,7 +1188,7 @@ static int intf_err_seq(struct ipmi_smi *intf,
GET_SEQ_FROM_MSGID(msgid, seq, seqid);
- spin_lock_irqsave(&intf->seq_lock, flags);
+ mutex_lock(&intf->seq_lock);
/*
* We do this verification because the user can be deleted
* while a message is outstanding.
@@ -1196,7 +1202,7 @@ static int intf_err_seq(struct ipmi_smi *intf,
msg = ent->recv_msg;
rv = 0;
}
- spin_unlock_irqrestore(&intf->seq_lock, flags);
+ mutex_unlock(&intf->seq_lock);
if (msg)
deliver_err_response(intf, msg, err);
@@ -1209,7 +1215,6 @@ int ipmi_create_user(unsigned int if_num,
void *handler_data,
struct ipmi_user **user)
{
- unsigned long flags;
struct ipmi_user *new_user = NULL;
int rv = 0;
struct ipmi_smi *intf;
@@ -1277,9 +1282,9 @@ int ipmi_create_user(unsigned int if_num,
new_user->gets_events = false;
mutex_lock(&intf->users_mutex);
- spin_lock_irqsave(&intf->seq_lock, flags);
+ mutex_lock(&intf->seq_lock);
list_add(&new_user->link, &intf->users);
- spin_unlock_irqrestore(&intf->seq_lock, flags);
+ mutex_unlock(&intf->seq_lock);
mutex_unlock(&intf->users_mutex);
if (handler->ipmi_watchdog_pretimeout)
@@ -1325,7 +1330,6 @@ static void _ipmi_destroy_user(struct ipmi_user *user)
{
struct ipmi_smi *intf = user->intf;
int i;
- unsigned long flags;
struct cmd_rcvr *rcvr;
struct cmd_rcvr *rcvrs = NULL;
struct ipmi_recv_msg *msg, *msg2;
@@ -1346,7 +1350,7 @@ static void _ipmi_destroy_user(struct ipmi_user *user)
list_del(&user->link);
atomic_dec(&intf->nr_users);
- spin_lock_irqsave(&intf->seq_lock, flags);
+ mutex_lock(&intf->seq_lock);
for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
if (intf->seq_table[i].inuse
&& (intf->seq_table[i].recv_msg->user == user)) {
@@ -1355,7 +1359,7 @@ static void _ipmi_destroy_user(struct ipmi_user *user)
ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
}
}
- spin_unlock_irqrestore(&intf->seq_lock, flags);
+ mutex_unlock(&intf->seq_lock);
/*
* Remove the user from the command receiver's table. First
@@ -1534,8 +1538,15 @@ EXPORT_SYMBOL(ipmi_get_maintenance_mode);
static void maintenance_mode_update(struct ipmi_smi *intf)
{
if (intf->handlers->set_maintenance_mode)
+ /*
+ * Lower level drivers only care about firmware mode
+ * as it affects their timing. They don't care about
+ * reset, which disables all commands for a while.
+ */
intf->handlers->set_maintenance_mode(
- intf->send_info, intf->maintenance_mode_enable);
+ intf->send_info,
+ (intf->maintenance_mode_state ==
+ IPMI_MAINTENANCE_MODE_STATE_FIRMWARE));
}
int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode)
@@ -1552,16 +1563,17 @@ int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode)
if (intf->maintenance_mode != mode) {
switch (mode) {
case IPMI_MAINTENANCE_MODE_AUTO:
- intf->maintenance_mode_enable
- = (intf->auto_maintenance_timeout > 0);
+ /* Just leave it alone. */
break;
case IPMI_MAINTENANCE_MODE_OFF:
- intf->maintenance_mode_enable = false;
+ intf->maintenance_mode_state =
+ IPMI_MAINTENANCE_MODE_STATE_OFF;
break;
case IPMI_MAINTENANCE_MODE_ON:
- intf->maintenance_mode_enable = true;
+ intf->maintenance_mode_state =
+ IPMI_MAINTENANCE_MODE_STATE_FIRMWARE;
break;
default:
@@ -1616,8 +1628,7 @@ int ipmi_set_gets_events(struct ipmi_user *user, bool val)
}
list_for_each_entry_safe(msg, msg2, &msgs, link) {
- msg->user = user;
- kref_get(&user->refcount);
+ ipmi_set_recv_msg_user(msg, user);
deliver_local_response(intf, msg);
}
}
@@ -1922,14 +1933,20 @@ static int i_ipmi_req_sysintf(struct ipmi_smi *intf,
if (is_maintenance_mode_cmd(msg)) {
unsigned long flags;
+ int newst;
+
+ if (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST)
+ newst = IPMI_MAINTENANCE_MODE_STATE_FIRMWARE;
+ else
+ newst = IPMI_MAINTENANCE_MODE_STATE_RESET;
spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
- intf->auto_maintenance_timeout
- = maintenance_mode_timeout_ms;
+ intf->auto_maintenance_timeout = maintenance_mode_timeout_ms;
if (!intf->maintenance_mode
- && !intf->maintenance_mode_enable) {
- intf->maintenance_mode_enable = true;
+ && intf->maintenance_mode_state < newst) {
+ intf->maintenance_mode_state = newst;
maintenance_mode_update(intf);
+ mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
}
spin_unlock_irqrestore(&intf->maintenance_mode_lock,
flags);
@@ -1943,7 +1960,7 @@ static int i_ipmi_req_sysintf(struct ipmi_smi *intf,
smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3);
smi_msg->data[1] = msg->cmd;
smi_msg->msgid = msgid;
- smi_msg->user_data = recv_msg;
+ smi_msg->recv_msg = recv_msg;
if (msg->data_len > 0)
memcpy(&smi_msg->data[2], msg->data, msg->data_len);
smi_msg->data_size = msg->data_len + 2;
@@ -2024,12 +2041,9 @@ static int i_ipmi_req_ipmb(struct ipmi_smi *intf,
* Save the receive message so we can use it
* to deliver the response.
*/
- smi_msg->user_data = recv_msg;
+ smi_msg->recv_msg = recv_msg;
} else {
- /* It's a command, so get a sequence for it. */
- unsigned long flags;
-
- spin_lock_irqsave(&intf->seq_lock, flags);
+ mutex_lock(&intf->seq_lock);
if (is_maintenance_mode_cmd(msg))
intf->ipmb_maintenance_mode_timeout =
@@ -2087,7 +2101,7 @@ static int i_ipmi_req_ipmb(struct ipmi_smi *intf,
* to be correct.
*/
out_err:
- spin_unlock_irqrestore(&intf->seq_lock, flags);
+ mutex_unlock(&intf->seq_lock);
}
return rv;
@@ -2140,7 +2154,7 @@ static int i_ipmi_req_ipmb_direct(struct ipmi_smi *intf,
memcpy(smi_msg->data + 4, msg->data, msg->data_len);
smi_msg->data_size = msg->data_len + 4;
- smi_msg->user_data = recv_msg;
+ smi_msg->recv_msg = recv_msg;
return 0;
}
@@ -2203,12 +2217,9 @@ static int i_ipmi_req_lan(struct ipmi_smi *intf,
* Save the receive message so we can use it
* to deliver the response.
*/
- smi_msg->user_data = recv_msg;
+ smi_msg->recv_msg = recv_msg;
} else {
- /* It's a command, so get a sequence for it. */
- unsigned long flags;
-
- spin_lock_irqsave(&intf->seq_lock, flags);
+ mutex_lock(&intf->seq_lock);
/*
* Create a sequence number with a 1 second
@@ -2257,7 +2268,7 @@ static int i_ipmi_req_lan(struct ipmi_smi *intf,
* to be correct.
*/
out_err:
- spin_unlock_irqrestore(&intf->seq_lock, flags);
+ mutex_unlock(&intf->seq_lock);
}
return rv;
@@ -2288,22 +2299,18 @@ static int i_ipmi_request(struct ipmi_user *user,
int run_to_completion = READ_ONCE(intf->run_to_completion);
int rv = 0;
- if (user) {
- if (atomic_add_return(1, &user->nr_msgs) > max_msgs_per_user) {
- /* Decrement will happen at the end of the routine. */
- rv = -EBUSY;
- goto out;
- }
- }
-
- if (supplied_recv)
+ if (supplied_recv) {
recv_msg = supplied_recv;
- else {
- recv_msg = ipmi_alloc_recv_msg();
- if (recv_msg == NULL) {
- rv = -ENOMEM;
- goto out;
+ recv_msg->user = user;
+ if (user) {
+ atomic_inc(&user->nr_msgs);
+ /* The put happens when the message is freed. */
+ kref_get(&user->refcount);
}
+ } else {
+ recv_msg = ipmi_alloc_recv_msg(user);
+ if (IS_ERR(recv_msg))
+ return PTR_ERR(recv_msg);
}
recv_msg->user_msg_data = user_msg_data;
@@ -2314,22 +2321,22 @@ static int i_ipmi_request(struct ipmi_user *user,
if (smi_msg == NULL) {
if (!supplied_recv)
ipmi_free_recv_msg(recv_msg);
- rv = -ENOMEM;
- goto out;
+ return -ENOMEM;
}
}
if (!run_to_completion)
mutex_lock(&intf->users_mutex);
+ if (intf->maintenance_mode_state == IPMI_MAINTENANCE_MODE_STATE_RESET) {
+ /* No messages while the BMC is in reset. */
+ rv = -EBUSY;
+ goto out_err;
+ }
if (intf->in_shutdown) {
rv = -ENODEV;
goto out_err;
}
- recv_msg->user = user;
- if (user)
- /* The put happens when the message is freed. */
- kref_get(&user->refcount);
recv_msg->msgid = msgid;
/*
* Store the message to send in the receive message so timeout
@@ -2358,8 +2365,10 @@ static int i_ipmi_request(struct ipmi_user *user,
if (rv) {
out_err:
- ipmi_free_smi_msg(smi_msg);
- ipmi_free_recv_msg(recv_msg);
+ if (!supplied_smi)
+ ipmi_free_smi_msg(smi_msg);
+ if (!supplied_recv)
+ ipmi_free_recv_msg(recv_msg);
} else {
dev_dbg(intf->si_dev, "Send: %*ph\n",
smi_msg->data_size, smi_msg->data);
@@ -2369,9 +2378,6 @@ out_err:
if (!run_to_completion)
mutex_unlock(&intf->users_mutex);
-out:
- if (rv && user)
- atomic_dec(&user->nr_msgs);
return rv;
}
@@ -2622,6 +2628,12 @@ retry_bmc_lock:
(bmc->dyn_id_set && time_is_after_jiffies(bmc->dyn_id_expiry)))
goto out_noprocessing;
+ /* Don't allow sysfs access when in maintenance mode. */
+ if (intf->maintenance_mode_state) {
+ rv = -EBUSY;
+ goto out_noprocessing;
+ }
+
prev_guid_set = bmc->dyn_guid_set;
__get_guid(intf);
@@ -2657,7 +2669,7 @@ retry_bmc_lock:
if (__ipmi_bmc_register(intf, &id, guid_set, &guid, intf_num))
need_waiter(intf); /* Retry later on an error. */
else
- __scan_channels(intf, &id);
+ __scan_channels(intf, &id, false);
if (!intf_set) {
@@ -2677,7 +2689,7 @@ retry_bmc_lock:
goto out_noprocessing;
} else if (memcmp(&bmc->fetch_id, &bmc->id, sizeof(bmc->id)))
/* Version info changes, scan the channels again. */
- __scan_channels(intf, &bmc->fetch_id);
+ __scan_channels(intf, &bmc->fetch_id, true);
bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY;
@@ -3406,8 +3418,6 @@ channel_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
intf->channels_ready = true;
wake_up(&intf->waitq);
} else {
- intf->channel_list = intf->wchannels + set;
- intf->channels_ready = true;
rv = send_channel_info_cmd(intf, intf->curr_channel);
}
@@ -3429,10 +3439,21 @@ channel_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
/*
* Must be holding intf->bmc_reg_mutex to call this.
*/
-static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id)
+static int __scan_channels(struct ipmi_smi *intf,
+ struct ipmi_device_id *id,
+ bool rescan)
{
int rv;
+ if (rescan) {
+ /* Clear channels_ready to force channels rescan. */
+ intf->channels_ready = false;
+ }
+
+ /* Skip channel scan if channels are already marked ready */
+ if (intf->channels_ready)
+ return 0;
+
if (ipmi_version_major(id) > 1
|| (ipmi_version_major(id) == 1
&& ipmi_version_minor(id) >= 5)) {
@@ -3517,6 +3538,19 @@ static ssize_t nr_msgs_show(struct device *dev,
}
static DEVICE_ATTR_RO(nr_msgs);
+static ssize_t maintenance_mode_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ipmi_smi *intf = container_of(attr,
+ struct ipmi_smi,
+ maintenance_mode_devattr);
+
+ return sysfs_emit(buf, "%u %d\n", intf->maintenance_mode_state,
+ intf->auto_maintenance_timeout);
+}
+static DEVICE_ATTR_RO(maintenance_mode);
+
static void redo_bmc_reg(struct work_struct *work)
{
struct ipmi_smi *intf = container_of(work, struct ipmi_smi,
@@ -3575,7 +3609,7 @@ int ipmi_add_smi(struct module *owner,
atomic_set(&intf->nr_users, 0);
intf->handlers = handlers;
intf->send_info = send_info;
- spin_lock_init(&intf->seq_lock);
+ mutex_init(&intf->seq_lock);
for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) {
intf->seq_table[j].inuse = 0;
intf->seq_table[j].seqid = 0;
@@ -3634,7 +3668,7 @@ int ipmi_add_smi(struct module *owner,
}
mutex_lock(&intf->bmc_reg_mutex);
- rv = __scan_channels(intf, &id);
+ rv = __scan_channels(intf, &id, false);
mutex_unlock(&intf->bmc_reg_mutex);
if (rv)
goto out_err_bmc_reg;
@@ -3653,6 +3687,14 @@ int ipmi_add_smi(struct module *owner,
goto out_err_bmc_reg;
}
+ intf->maintenance_mode_devattr = dev_attr_maintenance_mode;
+ sysfs_attr_init(&intf->maintenance_mode_devattr.attr);
+ rv = device_create_file(intf->si_dev, &intf->maintenance_mode_devattr);
+ if (rv) {
+ device_remove_file(intf->si_dev, &intf->nr_users_devattr);
+ goto out_err_bmc_reg;
+ }
+
intf->intf_num = i;
mutex_unlock(&ipmi_interfaces_mutex);
@@ -3760,6 +3802,7 @@ void ipmi_unregister_smi(struct ipmi_smi *intf)
if (intf->handlers->shutdown)
intf->handlers->shutdown(intf->send_info);
+ device_remove_file(intf->si_dev, &intf->maintenance_mode_devattr);
device_remove_file(intf->si_dev, &intf->nr_msgs_devattr);
device_remove_file(intf->si_dev, &intf->nr_users_devattr);
@@ -3862,7 +3905,7 @@ static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf,
unsigned char chan;
struct ipmi_user *user = NULL;
struct ipmi_ipmb_addr *ipmb_addr;
- struct ipmi_recv_msg *recv_msg;
+ struct ipmi_recv_msg *recv_msg = NULL;
if (msg->rsp_size < 10) {
/* Message not big enough, just ignore it. */
@@ -3883,9 +3926,8 @@ static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf,
rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
if (rcvr) {
user = rcvr->user;
- kref_get(&user->refcount);
- } else
- user = NULL;
+ recv_msg = ipmi_alloc_recv_msg(user);
+ }
rcu_read_unlock();
if (user == NULL) {
@@ -3915,47 +3957,41 @@ static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf,
* causes it to not be freed or queued.
*/
rv = -1;
- } else {
- recv_msg = ipmi_alloc_recv_msg();
- if (!recv_msg) {
- /*
- * We couldn't allocate memory for the
- * message, so requeue it for handling
- * later.
- */
- rv = 1;
- kref_put(&user->refcount, free_ipmi_user);
- } else {
- /* Extract the source address from the data. */
- ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
- ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
- ipmb_addr->slave_addr = msg->rsp[6];
- ipmb_addr->lun = msg->rsp[7] & 3;
- ipmb_addr->channel = msg->rsp[3] & 0xf;
+ } else if (!IS_ERR(recv_msg)) {
+ /* Extract the source address from the data. */
+ ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
+ ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
+ ipmb_addr->slave_addr = msg->rsp[6];
+ ipmb_addr->lun = msg->rsp[7] & 3;
+ ipmb_addr->channel = msg->rsp[3] & 0xf;
- /*
- * Extract the rest of the message information
- * from the IPMB header.
- */
- recv_msg->user = user;
- recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
- recv_msg->msgid = msg->rsp[7] >> 2;
- recv_msg->msg.netfn = msg->rsp[4] >> 2;
- recv_msg->msg.cmd = msg->rsp[8];
- recv_msg->msg.data = recv_msg->msg_data;
+ /*
+ * Extract the rest of the message information
+ * from the IPMB header.
+ */
+ recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
+ recv_msg->msgid = msg->rsp[7] >> 2;
+ recv_msg->msg.netfn = msg->rsp[4] >> 2;
+ recv_msg->msg.cmd = msg->rsp[8];
+ recv_msg->msg.data = recv_msg->msg_data;
- /*
- * We chop off 10, not 9 bytes because the checksum
- * at the end also needs to be removed.
- */
- recv_msg->msg.data_len = msg->rsp_size - 10;
- memcpy(recv_msg->msg_data, &msg->rsp[9],
- msg->rsp_size - 10);
- if (deliver_response(intf, recv_msg))
- ipmi_inc_stat(intf, unhandled_commands);
- else
- ipmi_inc_stat(intf, handled_commands);
- }
+ /*
+ * We chop off 10, not 9 bytes because the checksum
+ * at the end also needs to be removed.
+ */
+ recv_msg->msg.data_len = msg->rsp_size - 10;
+ memcpy(recv_msg->msg_data, &msg->rsp[9],
+ msg->rsp_size - 10);
+ if (deliver_response(intf, recv_msg))
+ ipmi_inc_stat(intf, unhandled_commands);
+ else
+ ipmi_inc_stat(intf, handled_commands);
+ } else {
+ /*
+ * We couldn't allocate memory for the message, so
+ * requeue it for handling later.
+ */
+ rv = 1;
}
return rv;
@@ -3968,7 +4004,7 @@ static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf,
int rv = 0;
struct ipmi_user *user = NULL;
struct ipmi_ipmb_direct_addr *daddr;
- struct ipmi_recv_msg *recv_msg;
+ struct ipmi_recv_msg *recv_msg = NULL;
unsigned char netfn = msg->rsp[0] >> 2;
unsigned char cmd = msg->rsp[3];
@@ -3977,9 +4013,8 @@ static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf,
rcvr = find_cmd_rcvr(intf, netfn, cmd, 0);
if (rcvr) {
user = rcvr->user;
- kref_get(&user->refcount);
- } else
- user = NULL;
+ recv_msg = ipmi_alloc_recv_msg(user);
+ }
rcu_read_unlock();
if (user == NULL) {
@@ -4001,44 +4036,38 @@ static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf,
* causes it to not be freed or queued.
*/
rv = -1;
- } else {
- recv_msg = ipmi_alloc_recv_msg();
- if (!recv_msg) {
- /*
- * We couldn't allocate memory for the
- * message, so requeue it for handling
- * later.
- */
- rv = 1;
- kref_put(&user->refcount, free_ipmi_user);
- } else {
- /* Extract the source address from the data. */
- daddr = (struct ipmi_ipmb_direct_addr *)&recv_msg->addr;
- daddr->addr_type = IPMI_IPMB_DIRECT_ADDR_TYPE;
- daddr->channel = 0;
- daddr->slave_addr = msg->rsp[1];
- daddr->rs_lun = msg->rsp[0] & 3;
- daddr->rq_lun = msg->rsp[2] & 3;
+ } else if (!IS_ERR(recv_msg)) {
+ /* Extract the source address from the data. */
+ daddr = (struct ipmi_ipmb_direct_addr *)&recv_msg->addr;
+ daddr->addr_type = IPMI_IPMB_DIRECT_ADDR_TYPE;
+ daddr->channel = 0;
+ daddr->slave_addr = msg->rsp[1];
+ daddr->rs_lun = msg->rsp[0] & 3;
+ daddr->rq_lun = msg->rsp[2] & 3;
- /*
- * Extract the rest of the message information
- * from the IPMB header.
- */
- recv_msg->user = user;
- recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
- recv_msg->msgid = (msg->rsp[2] >> 2);
- recv_msg->msg.netfn = msg->rsp[0] >> 2;
- recv_msg->msg.cmd = msg->rsp[3];
- recv_msg->msg.data = recv_msg->msg_data;
-
- recv_msg->msg.data_len = msg->rsp_size - 4;
- memcpy(recv_msg->msg_data, msg->rsp + 4,
- msg->rsp_size - 4);
- if (deliver_response(intf, recv_msg))
- ipmi_inc_stat(intf, unhandled_commands);
- else
- ipmi_inc_stat(intf, handled_commands);
- }
+ /*
+ * Extract the rest of the message information
+ * from the IPMB header.
+ */
+ recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
+ recv_msg->msgid = (msg->rsp[2] >> 2);
+ recv_msg->msg.netfn = msg->rsp[0] >> 2;
+ recv_msg->msg.cmd = msg->rsp[3];
+ recv_msg->msg.data = recv_msg->msg_data;
+
+ recv_msg->msg.data_len = msg->rsp_size - 4;
+ memcpy(recv_msg->msg_data, msg->rsp + 4,
+ msg->rsp_size - 4);
+ if (deliver_response(intf, recv_msg))
+ ipmi_inc_stat(intf, unhandled_commands);
+ else
+ ipmi_inc_stat(intf, handled_commands);
+ } else {
+ /*
+ * We couldn't allocate memory for the message, so
+ * requeue it for handling later.
+ */
+ rv = 1;
}
return rv;
@@ -4050,7 +4079,7 @@ static int handle_ipmb_direct_rcv_rsp(struct ipmi_smi *intf,
struct ipmi_recv_msg *recv_msg;
struct ipmi_ipmb_direct_addr *daddr;
- recv_msg = msg->user_data;
+ recv_msg = msg->recv_msg;
if (recv_msg == NULL) {
dev_warn(intf->si_dev,
"IPMI direct message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vendor for assistance.\n");
@@ -4152,7 +4181,7 @@ static int handle_lan_get_msg_cmd(struct ipmi_smi *intf,
unsigned char chan;
struct ipmi_user *user = NULL;
struct ipmi_lan_addr *lan_addr;
- struct ipmi_recv_msg *recv_msg;
+ struct ipmi_recv_msg *recv_msg = NULL;
if (msg->rsp_size < 12) {
/* Message not big enough, just ignore it. */
@@ -4173,9 +4202,8 @@ static int handle_lan_get_msg_cmd(struct ipmi_smi *intf,
rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
if (rcvr) {
user = rcvr->user;
- kref_get(&user->refcount);
- } else
- user = NULL;
+ recv_msg = ipmi_alloc_recv_msg(user);
+ }
rcu_read_unlock();
if (user == NULL) {
@@ -4206,49 +4234,44 @@ static int handle_lan_get_msg_cmd(struct ipmi_smi *intf,
* causes it to not be freed or queued.
*/
rv = -1;
- } else {
- recv_msg = ipmi_alloc_recv_msg();
- if (!recv_msg) {
- /*
- * We couldn't allocate memory for the
- * message, so requeue it for handling later.
- */
- rv = 1;
- kref_put(&user->refcount, free_ipmi_user);
- } else {
- /* Extract the source address from the data. */
- lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
- lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
- lan_addr->session_handle = msg->rsp[4];
- lan_addr->remote_SWID = msg->rsp[8];
- lan_addr->local_SWID = msg->rsp[5];
- lan_addr->lun = msg->rsp[9] & 3;
- lan_addr->channel = msg->rsp[3] & 0xf;
- lan_addr->privilege = msg->rsp[3] >> 4;
+ } else if (!IS_ERR(recv_msg)) {
+ /* Extract the source address from the data. */
+ lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
+ lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
+ lan_addr->session_handle = msg->rsp[4];
+ lan_addr->remote_SWID = msg->rsp[8];
+ lan_addr->local_SWID = msg->rsp[5];
+ lan_addr->lun = msg->rsp[9] & 3;
+ lan_addr->channel = msg->rsp[3] & 0xf;
+ lan_addr->privilege = msg->rsp[3] >> 4;
- /*
- * Extract the rest of the message information
- * from the IPMB header.
- */
- recv_msg->user = user;
- recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
- recv_msg->msgid = msg->rsp[9] >> 2;
- recv_msg->msg.netfn = msg->rsp[6] >> 2;
- recv_msg->msg.cmd = msg->rsp[10];
- recv_msg->msg.data = recv_msg->msg_data;
+ /*
+ * Extract the rest of the message information
+ * from the IPMB header.
+ */
+ recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
+ recv_msg->msgid = msg->rsp[9] >> 2;
+ recv_msg->msg.netfn = msg->rsp[6] >> 2;
+ recv_msg->msg.cmd = msg->rsp[10];
+ recv_msg->msg.data = recv_msg->msg_data;
- /*
- * We chop off 12, not 11 bytes because the checksum
- * at the end also needs to be removed.
- */
- recv_msg->msg.data_len = msg->rsp_size - 12;
- memcpy(recv_msg->msg_data, &msg->rsp[11],
- msg->rsp_size - 12);
- if (deliver_response(intf, recv_msg))
- ipmi_inc_stat(intf, unhandled_commands);
- else
- ipmi_inc_stat(intf, handled_commands);
- }
+ /*
+ * We chop off 12, not 11 bytes because the checksum
+ * at the end also needs to be removed.
+ */
+ recv_msg->msg.data_len = msg->rsp_size - 12;
+ memcpy(recv_msg->msg_data, &msg->rsp[11],
+ msg->rsp_size - 12);
+ if (deliver_response(intf, recv_msg))
+ ipmi_inc_stat(intf, unhandled_commands);
+ else
+ ipmi_inc_stat(intf, handled_commands);
+ } else {
+ /*
+ * We couldn't allocate memory for the message, so
+ * requeue it for handling later.
+ */
+ rv = 1;
}
return rv;
@@ -4270,7 +4293,7 @@ static int handle_oem_get_msg_cmd(struct ipmi_smi *intf,
unsigned char chan;
struct ipmi_user *user = NULL;
struct ipmi_system_interface_addr *smi_addr;
- struct ipmi_recv_msg *recv_msg;
+ struct ipmi_recv_msg *recv_msg = NULL;
/*
* We expect the OEM SW to perform error checking
@@ -4299,9 +4322,8 @@ static int handle_oem_get_msg_cmd(struct ipmi_smi *intf,
rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
if (rcvr) {
user = rcvr->user;
- kref_get(&user->refcount);
- } else
- user = NULL;
+ recv_msg = ipmi_alloc_recv_msg(user);
+ }
rcu_read_unlock();
if (user == NULL) {
@@ -4314,48 +4336,42 @@ static int handle_oem_get_msg_cmd(struct ipmi_smi *intf,
*/
rv = 0;
- } else {
- recv_msg = ipmi_alloc_recv_msg();
- if (!recv_msg) {
- /*
- * We couldn't allocate memory for the
- * message, so requeue it for handling
- * later.
- */
- rv = 1;
- kref_put(&user->refcount, free_ipmi_user);
- } else {
- /*
- * OEM Messages are expected to be delivered via
- * the system interface to SMS software. We might
- * need to visit this again depending on OEM
- * requirements
- */
- smi_addr = ((struct ipmi_system_interface_addr *)
- &recv_msg->addr);
- smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
- smi_addr->channel = IPMI_BMC_CHANNEL;
- smi_addr->lun = msg->rsp[0] & 3;
-
- recv_msg->user = user;
- recv_msg->user_msg_data = NULL;
- recv_msg->recv_type = IPMI_OEM_RECV_TYPE;
- recv_msg->msg.netfn = msg->rsp[0] >> 2;
- recv_msg->msg.cmd = msg->rsp[1];
- recv_msg->msg.data = recv_msg->msg_data;
+ } else if (!IS_ERR(recv_msg)) {
+ /*
+ * OEM Messages are expected to be delivered via
+ * the system interface to SMS software. We might
+ * need to visit this again depending on OEM
+ * requirements
+ */
+ smi_addr = ((struct ipmi_system_interface_addr *)
+ &recv_msg->addr);
+ smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ smi_addr->channel = IPMI_BMC_CHANNEL;
+ smi_addr->lun = msg->rsp[0] & 3;
+
+ recv_msg->user_msg_data = NULL;
+ recv_msg->recv_type = IPMI_OEM_RECV_TYPE;
+ recv_msg->msg.netfn = msg->rsp[0] >> 2;
+ recv_msg->msg.cmd = msg->rsp[1];
+ recv_msg->msg.data = recv_msg->msg_data;
- /*
- * The message starts at byte 4 which follows the
- * Channel Byte in the "GET MESSAGE" command
- */
- recv_msg->msg.data_len = msg->rsp_size - 4;
- memcpy(recv_msg->msg_data, &msg->rsp[4],
- msg->rsp_size - 4);
- if (deliver_response(intf, recv_msg))
- ipmi_inc_stat(intf, unhandled_commands);
- else
- ipmi_inc_stat(intf, handled_commands);
- }
+ /*
+ * The message starts at byte 4 which follows the
+ * Channel Byte in the "GET MESSAGE" command
+ */
+ recv_msg->msg.data_len = msg->rsp_size - 4;
+ memcpy(recv_msg->msg_data, &msg->rsp[4],
+ msg->rsp_size - 4);
+ if (deliver_response(intf, recv_msg))
+ ipmi_inc_stat(intf, unhandled_commands);
+ else
+ ipmi_inc_stat(intf, handled_commands);
+ } else {
+ /*
+ * We couldn't allocate memory for the message, so
+ * requeue it for handling later.
+ */
+ rv = 1;
}
return rv;
@@ -4413,8 +4429,8 @@ static int handle_read_event_rsp(struct ipmi_smi *intf,
if (!user->gets_events)
continue;
- recv_msg = ipmi_alloc_recv_msg();
- if (!recv_msg) {
+ recv_msg = ipmi_alloc_recv_msg(user);
+ if (IS_ERR(recv_msg)) {
mutex_unlock(&intf->users_mutex);
list_for_each_entry_safe(recv_msg, recv_msg2, &msgs,
link) {
@@ -4435,8 +4451,6 @@ static int handle_read_event_rsp(struct ipmi_smi *intf,
deliver_count++;
copy_event_into_recv_msg(recv_msg, msg);
- recv_msg->user = user;
- kref_get(&user->refcount);
list_add_tail(&recv_msg->link, &msgs);
}
mutex_unlock(&intf->users_mutex);
@@ -4452,8 +4466,8 @@ static int handle_read_event_rsp(struct ipmi_smi *intf,
* No one to receive the message, put it in queue if there's
* not already too many things in the queue.
*/
- recv_msg = ipmi_alloc_recv_msg();
- if (!recv_msg) {
+ recv_msg = ipmi_alloc_recv_msg(NULL);
+ if (IS_ERR(recv_msg)) {
/*
* We couldn't allocate memory for the
* message, so requeue it for handling
@@ -4488,7 +4502,7 @@ static int handle_bmc_rsp(struct ipmi_smi *intf,
struct ipmi_recv_msg *recv_msg;
struct ipmi_system_interface_addr *smi_addr;
- recv_msg = msg->user_data;
+ recv_msg = msg->recv_msg;
if (recv_msg == NULL) {
dev_warn(intf->si_dev,
"IPMI SMI message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vendor for assistance.\n");
@@ -4529,9 +4543,10 @@ static int handle_one_recv_msg(struct ipmi_smi *intf,
if (msg->rsp_size < 2) {
/* Message is too small to be correct. */
- dev_warn(intf->si_dev,
- "BMC returned too small a message for netfn %x cmd %x, got %d bytes\n",
- (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size);
+ dev_warn_ratelimited(intf->si_dev,
+ "BMC returned too small a message for netfn %x cmd %x, got %d bytes\n",
+ (msg->data[0] >> 2) | 1,
+ msg->data[1], msg->rsp_size);
return_unspecified:
/* Generate an error response for the message. */
@@ -4561,14 +4576,14 @@ return_unspecified:
} else if ((msg->data_size >= 2)
&& (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
&& (msg->data[1] == IPMI_SEND_MSG_CMD)
- && (msg->user_data == NULL)) {
+ && (msg->recv_msg == NULL)) {
if (intf->in_shutdown || intf->run_to_completion)
goto out;
/*
* This is the local response to a command send, start
- * the timer for these. The user_data will not be
+ * the timer for these. The recv_msg will not be
* NULL if this is a response send, and we will let
* response sends just go through.
*/
@@ -4607,10 +4622,10 @@ return_unspecified:
* The NetFN and Command in the response is not even
* marginally correct.
*/
- dev_warn(intf->si_dev,
- "BMC returned incorrect response, expected netfn %x cmd %x, got netfn %x cmd %x\n",
- (msg->data[0] >> 2) | 1, msg->data[1],
- msg->rsp[0] >> 2, msg->rsp[1]);
+ dev_warn_ratelimited(intf->si_dev,
+ "BMC returned incorrect response, expected netfn %x cmd %x, got netfn %x cmd %x\n",
+ (msg->data[0] >> 2) | 1, msg->data[1],
+ msg->rsp[0] >> 2, msg->rsp[1]);
goto return_unspecified;
}
@@ -4628,7 +4643,7 @@ return_unspecified:
requeue = handle_ipmb_direct_rcv_rsp(intf, msg);
} else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
&& (msg->rsp[1] == IPMI_SEND_MSG_CMD)
- && (msg->user_data != NULL)) {
+ && (msg->recv_msg != NULL)) {
/*
* It's a response to a response we sent. For this we
* deliver a send message response to the user.
@@ -4645,7 +4660,7 @@ return_unspecified:
cc = msg->rsp[2];
process_response_response:
- recv_msg = msg->user_data;
+ recv_msg = msg->recv_msg;
requeue = 0;
if (!recv_msg)
@@ -4801,6 +4816,7 @@ static void smi_work(struct work_struct *t)
int run_to_completion = READ_ONCE(intf->run_to_completion);
struct ipmi_smi_msg *newmsg = NULL;
struct ipmi_recv_msg *msg, *msg2;
+ int cc;
/*
* Start the next message if available.
@@ -4809,7 +4825,7 @@ static void smi_work(struct work_struct *t)
* because the lower layer is allowed to hold locks while calling
* message delivery.
*/
-
+restart:
if (!run_to_completion)
spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
if (intf->curr_msg == NULL && !intf->in_shutdown) {
@@ -4830,8 +4846,17 @@ static void smi_work(struct work_struct *t)
if (!run_to_completion)
spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
- if (newmsg)
- intf->handlers->sender(intf->send_info, newmsg);
+ if (newmsg) {
+ cc = intf->handlers->sender(intf->send_info, newmsg);
+ if (cc) {
+ if (newmsg->recv_msg)
+ deliver_err_response(intf,
+ newmsg->recv_msg, cc);
+ else
+ ipmi_free_smi_msg(newmsg);
+ goto restart;
+ }
+ }
handle_new_recv_msgs(intf);
@@ -4868,12 +4893,10 @@ static void smi_work(struct work_struct *t)
list_del(&msg->link);
- if (refcount_read(&user->destroyed) == 0) {
+ if (refcount_read(&user->destroyed) == 0)
ipmi_free_recv_msg(msg);
- } else {
- atomic_dec(&user->nr_msgs);
+ else
user->handler->ipmi_recv_hndl(msg, user->handler_data);
- }
}
mutex_unlock(&intf->user_msgs_mutex);
@@ -4951,8 +4974,7 @@ smi_from_recv_msg(struct ipmi_smi *intf, struct ipmi_recv_msg *recv_msg,
static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent,
struct list_head *timeouts,
unsigned long timeout_period,
- int slot, unsigned long *flags,
- bool *need_timer)
+ int slot, bool *need_timer)
{
struct ipmi_recv_msg *msg;
@@ -5004,7 +5026,7 @@ static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent,
return;
}
- spin_unlock_irqrestore(&intf->seq_lock, *flags);
+ mutex_unlock(&intf->seq_lock);
/*
* Send the new message. We send with a zero
@@ -5025,7 +5047,7 @@ static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent,
} else
ipmi_free_smi_msg(smi_msg);
- spin_lock_irqsave(&intf->seq_lock, *flags);
+ mutex_lock(&intf->seq_lock);
}
}
@@ -5052,7 +5074,7 @@ static bool ipmi_timeout_handler(struct ipmi_smi *intf,
* list.
*/
INIT_LIST_HEAD(&timeouts);
- spin_lock_irqsave(&intf->seq_lock, flags);
+ mutex_lock(&intf->seq_lock);
if (intf->ipmb_maintenance_mode_timeout) {
if (intf->ipmb_maintenance_mode_timeout <= timeout_period)
intf->ipmb_maintenance_mode_timeout = 0;
@@ -5062,8 +5084,8 @@ static bool ipmi_timeout_handler(struct ipmi_smi *intf,
for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++)
check_msg_timeout(intf, &intf->seq_table[i],
&timeouts, timeout_period, i,
- &flags, &need_timer);
- spin_unlock_irqrestore(&intf->seq_lock, flags);
+ &need_timer);
+ mutex_unlock(&intf->seq_lock);
list_for_each_entry_safe(msg, msg2, &timeouts, link)
deliver_err_response(intf, msg, IPMI_TIMEOUT_COMPLETION_CODE);
@@ -5083,7 +5105,9 @@ static bool ipmi_timeout_handler(struct ipmi_smi *intf,
-= timeout_period;
if (!intf->maintenance_mode
&& (intf->auto_maintenance_timeout <= 0)) {
- intf->maintenance_mode_enable = false;
+ intf->maintenance_mode_state =
+ IPMI_MAINTENANCE_MODE_STATE_OFF;
+ intf->auto_maintenance_timeout = 0;
maintenance_mode_update(intf);
}
}
@@ -5099,15 +5123,13 @@ static bool ipmi_timeout_handler(struct ipmi_smi *intf,
static void ipmi_request_event(struct ipmi_smi *intf)
{
/* No event requests when in maintenance mode. */
- if (intf->maintenance_mode_enable)
+ if (intf->maintenance_mode_state)
return;
if (!intf->in_shutdown)
intf->handlers->request_events(intf->send_info);
}
-static struct timer_list ipmi_timer;
-
static atomic_t stop_operation;
static void ipmi_timeout_work(struct work_struct *work)
@@ -5131,6 +5153,8 @@ static void ipmi_timeout_work(struct work_struct *work)
}
need_timer = true;
}
+ if (intf->maintenance_mode_state)
+ need_timer = true;
need_timer |= ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME);
}
@@ -5174,7 +5198,7 @@ struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC);
if (rv) {
rv->done = free_smi_msg;
- rv->user_data = NULL;
+ rv->recv_msg = NULL;
rv->type = IPMI_SMI_MSG_TYPE_NORMAL;
atomic_inc(&smi_msg_inuse_count);
}
@@ -5190,27 +5214,51 @@ static void free_recv_msg(struct ipmi_recv_msg *msg)
kfree(msg);
}
-static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
+static struct ipmi_recv_msg *ipmi_alloc_recv_msg(struct ipmi_user *user)
{
struct ipmi_recv_msg *rv;
+ if (user) {
+ if (atomic_add_return(1, &user->nr_msgs) > max_msgs_per_user) {
+ atomic_dec(&user->nr_msgs);
+ return ERR_PTR(-EBUSY);
+ }
+ }
+
rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC);
- if (rv) {
- rv->user = NULL;
- rv->done = free_recv_msg;
- atomic_inc(&recv_msg_inuse_count);
+ if (!rv) {
+ if (user)
+ atomic_dec(&user->nr_msgs);
+ return ERR_PTR(-ENOMEM);
}
+
+ rv->user = user;
+ rv->done = free_recv_msg;
+ if (user)
+ kref_get(&user->refcount);
+ atomic_inc(&recv_msg_inuse_count);
return rv;
}
void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
{
- if (msg->user && !oops_in_progress)
+ if (msg->user && !oops_in_progress) {
+ atomic_dec(&msg->user->nr_msgs);
kref_put(&msg->user->refcount, free_ipmi_user);
+ }
msg->done(msg);
}
EXPORT_SYMBOL(ipmi_free_recv_msg);
+static void ipmi_set_recv_msg_user(struct ipmi_recv_msg *msg,
+ struct ipmi_user *user)
+{
+ WARN_ON_ONCE(msg->user); /* User should not be set. */
+ msg->user = user;
+ atomic_inc(&user->nr_msgs);
+ kref_get(&user->refcount);
+}
+
static atomic_t panic_done_count = ATOMIC_INIT(0);
static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
diff --git a/drivers/char/ipmi/ipmi_powernv.c b/drivers/char/ipmi/ipmi_powernv.c
index 4a2efafcd1f8..52a1130defe5 100644
--- a/drivers/char/ipmi/ipmi_powernv.c
+++ b/drivers/char/ipmi/ipmi_powernv.c
@@ -51,7 +51,7 @@ static void send_error_reply(struct ipmi_smi_powernv *smi,
ipmi_smi_msg_received(smi->intf, msg);
}
-static void ipmi_powernv_send(void *send_info, struct ipmi_smi_msg *msg)
+static int ipmi_powernv_send(void *send_info, struct ipmi_smi_msg *msg)
{
struct ipmi_smi_powernv *smi = send_info;
struct opal_ipmi_msg *opal_msg;
@@ -93,18 +93,19 @@ static void ipmi_powernv_send(void *send_info, struct ipmi_smi_msg *msg)
smi->interface_id, opal_msg, size);
rc = opal_ipmi_send(smi->interface_id, opal_msg, size);
pr_devel("%s: -> %d\n", __func__, rc);
-
- if (!rc) {
- smi->cur_msg = msg;
- spin_unlock_irqrestore(&smi->msg_lock, flags);
- return;
+ if (rc) {
+ comp = IPMI_ERR_UNSPECIFIED;
+ goto err_unlock;
}
- comp = IPMI_ERR_UNSPECIFIED;
+ smi->cur_msg = msg;
+ spin_unlock_irqrestore(&smi->msg_lock, flags);
+ return IPMI_CC_NO_ERROR;
+
err_unlock:
spin_unlock_irqrestore(&smi->msg_lock, flags);
err:
- send_error_reply(smi, msg, comp);
+ return comp;
}
static int ipmi_powernv_recv(struct ipmi_smi_powernv *smi)
diff --git a/drivers/char/ipmi/ipmi_si.h b/drivers/char/ipmi/ipmi_si.h
index 508c3fd45877..687835b53da5 100644
--- a/drivers/char/ipmi/ipmi_si.h
+++ b/drivers/char/ipmi/ipmi_si.h
@@ -101,6 +101,13 @@ void ipmi_si_pci_shutdown(void);
static inline void ipmi_si_pci_init(void) { }
static inline void ipmi_si_pci_shutdown(void) { }
#endif
+#ifdef CONFIG_IPMI_LS2K
+void ipmi_si_ls2k_init(void);
+void ipmi_si_ls2k_shutdown(void);
+#else
+static inline void ipmi_si_ls2k_init(void) { }
+static inline void ipmi_si_ls2k_shutdown(void) { }
+#endif
#ifdef CONFIG_PARISC
void ipmi_si_parisc_init(void);
void ipmi_si_parisc_shutdown(void);
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index bb42dfe1c6a8..5459ffdde8dc 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -53,6 +53,7 @@
#define SI_TIMEOUT_JIFFIES (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY)
#define SI_SHORT_TIMEOUT_USEC 250 /* .25ms when the SM request a
short timeout */
+#define SI_TIMEOUT_HOSED (HZ) /* 1 second when in hosed state. */
enum si_intf_state {
SI_NORMAL,
@@ -61,7 +62,8 @@ enum si_intf_state {
SI_CLEARING_FLAGS,
SI_GETTING_MESSAGES,
SI_CHECKING_ENABLES,
- SI_SETTING_ENABLES
+ SI_SETTING_ENABLES,
+ SI_HOSED
/* FIXME - add watchdog stuff. */
};
@@ -273,8 +275,7 @@ void debug_timestamp(struct smi_info *smi_info, char *msg)
struct timespec64 t;
ktime_get_ts64(&t);
- dev_dbg(smi_info->io.dev, "**%s: %lld.%9.9ld\n",
- msg, t.tv_sec, t.tv_nsec);
+ dev_dbg(smi_info->io.dev, "**%s: %ptSp\n", msg, &t);
}
#else
#define debug_timestamp(smi_info, x)
@@ -313,7 +314,7 @@ static void return_hosed_msg(struct smi_info *smi_info, int cCode)
static enum si_sm_result start_next_msg(struct smi_info *smi_info)
{
- int rv;
+ int rv;
if (!smi_info->waiting_msg) {
smi_info->curr_msg = NULL;
@@ -390,6 +391,17 @@ static void start_clear_flags(struct smi_info *smi_info)
smi_info->si_state = SI_CLEARING_FLAGS;
}
+static void start_get_flags(struct smi_info *smi_info)
+{
+ unsigned char msg[2];
+
+ msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ msg[1] = IPMI_GET_MSG_FLAGS_CMD;
+
+ start_new_msg(smi_info, msg, 2);
+ smi_info->si_state = SI_GETTING_FLAGS;
+}
+
static void start_getting_msg_queue(struct smi_info *smi_info)
{
smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
@@ -742,6 +754,8 @@ static void handle_transaction_done(struct smi_info *smi_info)
}
break;
}
+ case SI_HOSED: /* Shouldn't happen. */
+ break;
}
}
@@ -756,6 +770,10 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
enum si_sm_result si_sm_result;
restart:
+ if (smi_info->si_state == SI_HOSED)
+ /* Just in case, hosed state is only left from the timeout. */
+ return SI_SM_HOSED;
+
/*
* There used to be a loop here that waited a little while
* (around 25us) before giving up. That turned out to be
@@ -779,18 +797,20 @@ restart:
/*
* Do the before return_hosed_msg, because that
- * releases the lock.
+ * releases the lock. We just disable operations for
+ * a while and retry in hosed state.
*/
- smi_info->si_state = SI_NORMAL;
+ smi_info->si_state = SI_HOSED;
if (smi_info->curr_msg != NULL) {
/*
* If we were handling a user message, format
* a response to send to the upper layer to
* tell it about the error.
*/
- return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED);
+ return_hosed_msg(smi_info, IPMI_BUS_ERR);
}
- goto restart;
+ smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_HOSED);
+ goto out;
}
/*
@@ -798,8 +818,6 @@ restart:
* this if there is not yet an upper layer to handle anything.
*/
if (si_sm_result == SI_SM_ATTN || smi_info->got_attn) {
- unsigned char msg[2];
-
if (smi_info->si_state != SI_NORMAL) {
/*
* We got an ATTN, but we are doing something else.
@@ -817,11 +835,7 @@ restart:
* interrupts work with the SMI, that's not really
* possible.
*/
- msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
- msg[1] = IPMI_GET_MSG_FLAGS_CMD;
-
- start_new_msg(smi_info, msg, 2);
- smi_info->si_state = SI_GETTING_FLAGS;
+ start_get_flags(smi_info);
goto restart;
}
}
@@ -894,27 +908,29 @@ static void flush_messages(void *send_info)
* mode. This means we are single-threaded, no need for locks.
*/
result = smi_event_handler(smi_info, 0);
- while (result != SI_SM_IDLE) {
+ while (result != SI_SM_IDLE && result != SI_SM_HOSED) {
udelay(SI_SHORT_TIMEOUT_USEC);
result = smi_event_handler(smi_info, SI_SHORT_TIMEOUT_USEC);
}
}
-static void sender(void *send_info,
- struct ipmi_smi_msg *msg)
+static int sender(void *send_info, struct ipmi_smi_msg *msg)
{
struct smi_info *smi_info = send_info;
unsigned long flags;
debug_timestamp(smi_info, "Enqueue");
+ if (smi_info->si_state == SI_HOSED)
+ return IPMI_BUS_ERR;
+
if (smi_info->run_to_completion) {
/*
* If we are running to completion, start it. Upper
* layer will call flush_messages to clear it out.
*/
smi_info->waiting_msg = msg;
- return;
+ return IPMI_CC_NO_ERROR;
}
spin_lock_irqsave(&smi_info->si_lock, flags);
@@ -929,6 +945,7 @@ static void sender(void *send_info,
smi_info->waiting_msg = msg;
check_start_timer_thread(smi_info);
spin_unlock_irqrestore(&smi_info->si_lock, flags);
+ return IPMI_CC_NO_ERROR;
}
static void set_run_to_completion(void *send_info, bool i_run_to_completion)
@@ -1087,6 +1104,10 @@ static void smi_timeout(struct timer_list *t)
spin_lock_irqsave(&(smi_info->si_lock), flags);
debug_timestamp(smi_info, "Timer");
+ if (smi_info->si_state == SI_HOSED)
+ /* Try something to see if the BMC is now operational. */
+ start_get_flags(smi_info);
+
jiffies_now = jiffies;
time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
* SI_USEC_PER_JIFFY);
@@ -1096,14 +1117,11 @@ static void smi_timeout(struct timer_list *t)
/* Running with interrupts, only do long timeouts. */
timeout = jiffies + SI_TIMEOUT_JIFFIES;
smi_inc_stat(smi_info, long_timeouts);
- goto do_mod_timer;
- }
-
- /*
- * If the state machine asks for a short delay, then shorten
- * the timer timeout.
- */
- if (smi_result == SI_SM_CALL_WITH_DELAY) {
+ } else if (smi_result == SI_SM_CALL_WITH_DELAY) {
+ /*
+ * If the state machine asks for a short delay, then shorten
+ * the timer timeout.
+ */
smi_inc_stat(smi_info, short_timeouts);
timeout = jiffies + 1;
} else {
@@ -1111,7 +1129,6 @@ static void smi_timeout(struct timer_list *t)
timeout = jiffies + SI_TIMEOUT_JIFFIES;
}
-do_mod_timer:
if (smi_result != SI_SM_IDLE)
smi_mod_timer(smi_info, timeout);
else
@@ -2108,7 +2125,6 @@ static bool __init ipmi_smi_info_same(struct smi_info *e1, struct smi_info *e2)
static int __init init_ipmi_si(void)
{
struct smi_info *e, *e2;
- enum ipmi_addr_src type = SI_INVALID;
if (initialized)
return 0;
@@ -2121,6 +2137,8 @@ static int __init init_ipmi_si(void)
ipmi_si_pci_init();
+ ipmi_si_ls2k_init();
+
ipmi_si_parisc_init();
mutex_lock(&smi_infos_lock);
@@ -2190,9 +2208,6 @@ static int __init init_ipmi_si(void)
initialized = true;
mutex_unlock(&smi_infos_lock);
- if (type)
- return 0;
-
mutex_lock(&smi_infos_lock);
if (unload_when_empty && list_empty(&smi_infos)) {
mutex_unlock(&smi_infos_lock);
@@ -2335,6 +2350,8 @@ static void cleanup_ipmi_si(void)
ipmi_si_pci_shutdown();
+ ipmi_si_ls2k_shutdown();
+
ipmi_si_parisc_shutdown();
ipmi_si_platform_shutdown();
diff --git a/drivers/char/ipmi/ipmi_si_ls2k.c b/drivers/char/ipmi/ipmi_si_ls2k.c
new file mode 100644
index 000000000000..45442c257efd
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_si_ls2k.c
@@ -0,0 +1,189 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Driver for Loongson-2K BMC IPMI interface
+ *
+ * Copyright (C) 2024-2025 Loongson Technology Corporation Limited.
+ *
+ * Authors:
+ * Chong Qiao <qiaochong@loongson.cn>
+ * Binbin Zhou <zhoubinbin@loongson.cn>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/types.h>
+
+#include "ipmi_si.h"
+
+#define LS2K_KCS_FIFO_IBFH 0x0
+#define LS2K_KCS_FIFO_IBFT 0x1
+#define LS2K_KCS_FIFO_OBFH 0x2
+#define LS2K_KCS_FIFO_OBFT 0x3
+
+/* KCS registers */
+#define LS2K_KCS_REG_STS 0x4
+#define LS2K_KCS_REG_DATA_OUT 0x5
+#define LS2K_KCS_REG_DATA_IN 0x6
+#define LS2K_KCS_REG_CMD 0x8
+
+#define LS2K_KCS_CMD_DATA 0xa
+#define LS2K_KCS_VERSION 0xb
+#define LS2K_KCS_WR_REQ 0xc
+#define LS2K_KCS_WR_ACK 0x10
+
+#define LS2K_KCS_STS_OBF BIT(0)
+#define LS2K_KCS_STS_IBF BIT(1)
+#define LS2K_KCS_STS_SMS_ATN BIT(2)
+#define LS2K_KCS_STS_CMD BIT(3)
+
+#define LS2K_KCS_DATA_MASK (LS2K_KCS_STS_OBF | LS2K_KCS_STS_IBF | LS2K_KCS_STS_CMD)
+
+static bool ls2k_registered;
+
+static unsigned char ls2k_mem_inb_v0(const struct si_sm_io *io, unsigned int offset)
+{
+ void __iomem *addr = io->addr;
+ int reg_offset;
+
+ if (offset & BIT(0)) {
+ reg_offset = LS2K_KCS_REG_STS;
+ } else {
+ writeb(readb(addr + LS2K_KCS_REG_STS) & ~LS2K_KCS_STS_OBF, addr + LS2K_KCS_REG_STS);
+ reg_offset = LS2K_KCS_REG_DATA_OUT;
+ }
+
+ return readb(addr + reg_offset);
+}
+
+static unsigned char ls2k_mem_inb_v1(const struct si_sm_io *io, unsigned int offset)
+{
+ void __iomem *addr = io->addr;
+ unsigned char inb = 0, cmd;
+ bool obf, ibf;
+
+ obf = readb(addr + LS2K_KCS_FIFO_OBFH) ^ readb(addr + LS2K_KCS_FIFO_OBFT);
+ ibf = readb(addr + LS2K_KCS_FIFO_IBFH) ^ readb(addr + LS2K_KCS_FIFO_IBFT);
+ cmd = readb(addr + LS2K_KCS_CMD_DATA);
+
+ if (offset & BIT(0)) {
+ inb = readb(addr + LS2K_KCS_REG_STS) & ~LS2K_KCS_DATA_MASK;
+ inb |= FIELD_PREP(LS2K_KCS_STS_OBF, obf)
+ | FIELD_PREP(LS2K_KCS_STS_IBF, ibf)
+ | FIELD_PREP(LS2K_KCS_STS_CMD, cmd);
+ } else {
+ inb = readb(addr + LS2K_KCS_REG_DATA_OUT);
+ writeb(readb(addr + LS2K_KCS_FIFO_OBFH), addr + LS2K_KCS_FIFO_OBFT);
+ }
+
+ return inb;
+}
+
+static void ls2k_mem_outb_v0(const struct si_sm_io *io, unsigned int offset,
+ unsigned char val)
+{
+ void __iomem *addr = io->addr;
+ unsigned char sts = readb(addr + LS2K_KCS_REG_STS);
+ int reg_offset;
+
+ if (sts & LS2K_KCS_STS_IBF)
+ return;
+
+ if (offset & BIT(0)) {
+ reg_offset = LS2K_KCS_REG_CMD;
+ sts |= LS2K_KCS_STS_CMD;
+ } else {
+ reg_offset = LS2K_KCS_REG_DATA_IN;
+ sts &= ~LS2K_KCS_STS_CMD;
+ }
+
+ writew(val, addr + reg_offset);
+ writeb(sts | LS2K_KCS_STS_IBF, addr + LS2K_KCS_REG_STS);
+ writel(readl(addr + LS2K_KCS_WR_REQ) + 1, addr + LS2K_KCS_WR_REQ);
+}
+
+static void ls2k_mem_outb_v1(const struct si_sm_io *io, unsigned int offset,
+ unsigned char val)
+{
+ void __iomem *addr = io->addr;
+ unsigned char ibfh, ibft;
+ int reg_offset;
+
+ ibfh = readb(addr + LS2K_KCS_FIFO_IBFH);
+ ibft = readb(addr + LS2K_KCS_FIFO_IBFT);
+
+ if (ibfh ^ ibft)
+ return;
+
+ reg_offset = (offset & BIT(0)) ? LS2K_KCS_REG_CMD : LS2K_KCS_REG_DATA_IN;
+ writew(val, addr + reg_offset);
+
+ writeb(offset & BIT(0), addr + LS2K_KCS_CMD_DATA);
+ writeb(!ibft, addr + LS2K_KCS_FIFO_IBFH);
+ writel(readl(addr + LS2K_KCS_WR_REQ) + 1, addr + LS2K_KCS_WR_REQ);
+}
+
+static void ls2k_mem_cleanup(struct si_sm_io *io)
+{
+ if (io->addr)
+ iounmap(io->addr);
+}
+
+static int ipmi_ls2k_mem_setup(struct si_sm_io *io)
+{
+ unsigned char version;
+
+ io->addr = ioremap(io->addr_data, io->regspacing);
+ if (!io->addr)
+ return -EIO;
+
+ version = readb(io->addr + LS2K_KCS_VERSION);
+
+ io->inputb = version ? ls2k_mem_inb_v1 : ls2k_mem_inb_v0;
+ io->outputb = version ? ls2k_mem_outb_v1 : ls2k_mem_outb_v0;
+ io->io_cleanup = ls2k_mem_cleanup;
+
+ return 0;
+}
+
+static int ipmi_ls2k_probe(struct platform_device *pdev)
+{
+ struct si_sm_io io;
+
+ memset(&io, 0, sizeof(io));
+
+ io.si_info = &ipmi_kcs_si_info;
+ io.io_setup = ipmi_ls2k_mem_setup;
+ io.addr_data = pdev->resource[0].start;
+ io.regspacing = resource_size(&pdev->resource[0]);
+ io.dev = &pdev->dev;
+
+ dev_dbg(&pdev->dev, "addr 0x%lx, spacing %d.\n", io.addr_data, io.regspacing);
+
+ return ipmi_si_add_smi(&io);
+}
+
+static void ipmi_ls2k_remove(struct platform_device *pdev)
+{
+ ipmi_si_remove_by_dev(&pdev->dev);
+}
+
+struct platform_driver ipmi_ls2k_platform_driver = {
+ .driver = {
+ .name = "ls2k-ipmi-si",
+ },
+ .probe = ipmi_ls2k_probe,
+ .remove = ipmi_ls2k_remove,
+};
+
+void ipmi_si_ls2k_init(void)
+{
+ platform_driver_register(&ipmi_ls2k_platform_driver);
+ ls2k_registered = true;
+}
+
+void ipmi_si_ls2k_shutdown(void)
+{
+ if (ls2k_registered)
+ platform_driver_unregister(&ipmi_ls2k_platform_driver);
+}
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 1bc42830444d..ef1582a029f4 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -1068,8 +1068,7 @@ static void start_next_msg(struct ssif_info *ssif_info, unsigned long *flags)
}
}
-static void sender(void *send_info,
- struct ipmi_smi_msg *msg)
+static int sender(void *send_info, struct ipmi_smi_msg *msg)
{
struct ssif_info *ssif_info = send_info;
unsigned long oflags, *flags;
@@ -1084,11 +1083,10 @@ static void sender(void *send_info,
struct timespec64 t;
ktime_get_real_ts64(&t);
- dev_dbg(&ssif_info->client->dev,
- "**Enqueue %02x %02x: %lld.%6.6ld\n",
- msg->data[0], msg->data[1],
- (long long)t.tv_sec, (long)t.tv_nsec / NSEC_PER_USEC);
+ dev_dbg(&ssif_info->client->dev, "**Enqueue %02x %02x: %ptSp\n",
+ msg->data[0], msg->data[1], &t);
}
+ return IPMI_CC_NO_ERROR;
}
static int get_smi_info(void *send_info, struct ipmi_smi_info *data)
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index ab759b492fdd..a013ddbf1466 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -1146,14 +1146,8 @@ static struct ipmi_smi_watcher smi_watcher = {
.smi_gone = ipmi_smi_gone
};
-static int action_op(const char *inval, char *outval)
+static int action_op_set_val(const char *inval)
{
- if (outval)
- strcpy(outval, action);
-
- if (!inval)
- return 0;
-
if (strcmp(inval, "reset") == 0)
action_val = WDOG_TIMEOUT_RESET;
else if (strcmp(inval, "none") == 0)
@@ -1164,18 +1158,26 @@ static int action_op(const char *inval, char *outval)
action_val = WDOG_TIMEOUT_POWER_DOWN;
else
return -EINVAL;
- strcpy(action, inval);
return 0;
}
-static int preaction_op(const char *inval, char *outval)
+static int action_op(const char *inval, char *outval)
{
+ int rv;
+
if (outval)
- strcpy(outval, preaction);
+ strcpy(outval, action);
if (!inval)
return 0;
+ rv = action_op_set_val(inval);
+ if (!rv)
+ strcpy(action, inval);
+ return rv;
+}
+static int preaction_op_set_val(const char *inval)
+{
if (strcmp(inval, "pre_none") == 0)
preaction_val = WDOG_PRETIMEOUT_NONE;
else if (strcmp(inval, "pre_smi") == 0)
@@ -1188,18 +1190,26 @@ static int preaction_op(const char *inval, char *outval)
preaction_val = WDOG_PRETIMEOUT_MSG_INT;
else
return -EINVAL;
- strcpy(preaction, inval);
return 0;
}
-static int preop_op(const char *inval, char *outval)
+static int preaction_op(const char *inval, char *outval)
{
+ int rv;
+
if (outval)
- strcpy(outval, preop);
+ strcpy(outval, preaction);
if (!inval)
return 0;
+ rv = preaction_op_set_val(inval);
+ if (!rv)
+ strcpy(preaction, inval);
+ return 0;
+}
+static int preop_op_set_val(const char *inval)
+{
if (strcmp(inval, "preop_none") == 0)
preop_val = WDOG_PREOP_NONE;
else if (strcmp(inval, "preop_panic") == 0)
@@ -1208,7 +1218,22 @@ static int preop_op(const char *inval, char *outval)
preop_val = WDOG_PREOP_GIVE_DATA;
else
return -EINVAL;
- strcpy(preop, inval);
+ return 0;
+}
+
+static int preop_op(const char *inval, char *outval)
+{
+ int rv;
+
+ if (outval)
+ strcpy(outval, preop);
+
+ if (!inval)
+ return 0;
+
+ rv = preop_op_set_val(inval);
+ if (!rv)
+ strcpy(preop, inval);
return 0;
}
@@ -1245,18 +1270,18 @@ static int __init ipmi_wdog_init(void)
{
int rv;
- if (action_op(action, NULL)) {
+ if (action_op_set_val(action)) {
action_op("reset", NULL);
pr_info("Unknown action '%s', defaulting to reset\n", action);
}
- if (preaction_op(preaction, NULL)) {
+ if (preaction_op_set_val(preaction)) {
preaction_op("pre_none", NULL);
pr_info("Unknown preaction '%s', defaulting to none\n",
preaction);
}
- if (preop_op(preop, NULL)) {
+ if (preop_op_set_val(preop)) {
preop_op("preop_none", NULL);
pr_info("Unknown preop '%s', defaulting to none\n", preop);
}
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 48839958b0b1..52039fae1594 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -304,13 +304,13 @@ static unsigned zero_mmap_capabilities(struct file *file)
}
/* can't do an in-place private mapping if there's no MMU */
-static inline int private_mapping_ok(struct vm_area_struct *vma)
+static inline int private_mapping_ok(struct vm_area_desc *desc)
{
- return is_nommu_shared_mapping(vma->vm_flags);
+ return is_nommu_shared_mapping(desc->vm_flags);
}
#else
-static inline int private_mapping_ok(struct vm_area_struct *vma)
+static inline int private_mapping_ok(struct vm_area_desc *desc)
{
return 1;
}
@@ -322,46 +322,49 @@ static const struct vm_operations_struct mmap_mem_ops = {
#endif
};
-static int mmap_mem(struct file *file, struct vm_area_struct *vma)
+static int mmap_filter_error(int err)
{
- size_t size = vma->vm_end - vma->vm_start;
- phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
+ return -EAGAIN;
+}
+
+static int mmap_mem_prepare(struct vm_area_desc *desc)
+{
+ struct file *file = desc->file;
+ const size_t size = vma_desc_size(desc);
+ const phys_addr_t offset = (phys_addr_t)desc->pgoff << PAGE_SHIFT;
/* Does it even fit in phys_addr_t? */
- if (offset >> PAGE_SHIFT != vma->vm_pgoff)
+ if (offset >> PAGE_SHIFT != desc->pgoff)
return -EINVAL;
/* It's illegal to wrap around the end of the physical address space. */
if (offset + (phys_addr_t)size - 1 < offset)
return -EINVAL;
- if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
+ if (!valid_mmap_phys_addr_range(desc->pgoff, size))
return -EINVAL;
- if (!private_mapping_ok(vma))
+ if (!private_mapping_ok(desc))
return -ENOSYS;
- if (!range_is_allowed(vma->vm_pgoff, size))
+ if (!range_is_allowed(desc->pgoff, size))
return -EPERM;
- if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
- &vma->vm_page_prot))
+ if (!phys_mem_access_prot_allowed(file, desc->pgoff, size,
+ &desc->page_prot))
return -EINVAL;
- vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
- size,
- vma->vm_page_prot);
+ desc->page_prot = phys_mem_access_prot(file, desc->pgoff,
+ size,
+ desc->page_prot);
- vma->vm_ops = &mmap_mem_ops;
+ desc->vm_ops = &mmap_mem_ops;
+
+ /* Remap-pfn-range will mark the range VM_IO. */
+ mmap_action_remap_full(desc, desc->pgoff);
+ /* We filter remap errors to -EAGAIN. */
+ desc->action.error_hook = mmap_filter_error;
- /* Remap-pfn-range will mark the range VM_IO */
- if (remap_pfn_range(vma,
- vma->vm_start,
- vma->vm_pgoff,
- size,
- vma->vm_page_prot)) {
- return -EAGAIN;
- }
return 0;
}
@@ -501,38 +504,64 @@ static ssize_t read_zero(struct file *file, char __user *buf,
return cleared;
}
-static int mmap_zero(struct file *file, struct vm_area_struct *vma)
+static int mmap_zero_private_success(const struct vm_area_struct *vma)
+{
+ /*
+ * This is a highly unique situation where we mark a MAP_PRIVATE mapping
+ * of /dev/zero anonymous, despite it not being.
+ */
+ vma_set_anonymous((struct vm_area_struct *)vma);
+
+ return 0;
+}
+
+static int mmap_zero_prepare(struct vm_area_desc *desc)
{
#ifndef CONFIG_MMU
return -ENOSYS;
#endif
- if (vma->vm_flags & VM_SHARED)
- return shmem_zero_setup(vma);
- vma_set_anonymous(vma);
+ if (desc->vm_flags & VM_SHARED)
+ return shmem_zero_setup_desc(desc);
+
+ desc->action.success_hook = mmap_zero_private_success;
return 0;
}
+#ifndef CONFIG_MMU
+static unsigned long get_unmapped_area_zero(struct file *file,
+ unsigned long addr, unsigned long len,
+ unsigned long pgoff, unsigned long flags)
+{
+ return -ENOSYS;
+}
+#else
static unsigned long get_unmapped_area_zero(struct file *file,
unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags)
{
-#ifdef CONFIG_MMU
if (flags & MAP_SHARED) {
/*
- * mmap_zero() will call shmem_zero_setup() to create a file,
- * so use shmem's get_unmapped_area in case it can be huge;
- * and pass NULL for file as in mmap.c's get_unmapped_area(),
- * so as not to confuse shmem with our handle on "/dev/zero".
+ * mmap_zero_prepare() will call shmem_zero_setup() to create a
+ * file, so use shmem's get_unmapped_area in case it can be
+ * huge; and pass NULL for file as in mmap.c's
+ * get_unmapped_area(), so as not to confuse shmem with our
+ * handle on "/dev/zero".
*/
return shmem_get_unmapped_area(NULL, addr, len, pgoff, flags);
}
- /* Otherwise flags & MAP_PRIVATE: with no shmem object beneath it */
- return mm_get_unmapped_area(current->mm, file, addr, len, pgoff, flags);
+ /*
+ * Otherwise flags & MAP_PRIVATE: with no shmem object beneath it,
+ * attempt to map aligned to huge page size if possible, otherwise we
+ * fall back to system page size mappings.
+ */
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ return thp_get_unmapped_area(file, addr, len, pgoff, flags);
#else
- return -ENOSYS;
+ return mm_get_unmapped_area(file, addr, len, pgoff, flags);
#endif
}
+#endif /* CONFIG_MMU */
static ssize_t write_full(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
@@ -619,7 +648,7 @@ static const struct file_operations __maybe_unused mem_fops = {
.llseek = memory_lseek,
.read = read_mem,
.write = write_mem,
- .mmap = mmap_mem,
+ .mmap_prepare = mmap_mem_prepare,
.open = open_mem,
#ifndef CONFIG_MMU
.get_unmapped_area = get_unmapped_area_mem,
@@ -655,7 +684,7 @@ static const struct file_operations zero_fops = {
.write_iter = write_iter_zero,
.splice_read = copy_splice_read,
.splice_write = splice_write_zero,
- .mmap = mmap_zero,
+ .mmap_prepare = mmap_zero_prepare,
.get_unmapped_area = get_unmapped_area_zero,
#ifndef CONFIG_MMU
.mmap_capabilities = zero_mmap_capabilities,
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index d5accc10a110..726516fb0a3b 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -132,7 +132,8 @@ static int misc_open(struct inode *inode, struct file *file)
break;
}
- if (!new_fops) {
+ /* Only request module for fixed minor code */
+ if (!new_fops && minor < MISC_DYNAMIC_MINOR) {
mutex_unlock(&misc_mtx);
request_module("char-major-%d-%d", MISC_MAJOR, minor);
mutex_lock(&misc_mtx);
@@ -144,10 +145,11 @@ static int misc_open(struct inode *inode, struct file *file)
new_fops = fops_get(iter->fops);
break;
}
- if (!new_fops)
- goto fail;
}
+ if (!new_fops)
+ goto fail;
+
/*
* Place the miscdevice in the file's
* private_data so it can be used by the
@@ -210,6 +212,12 @@ int misc_register(struct miscdevice *misc)
int err = 0;
bool is_dynamic = (misc->minor == MISC_DYNAMIC_MINOR);
+ if (misc->minor > MISC_DYNAMIC_MINOR) {
+ pr_err("Invalid fixed minor %d for miscdevice '%s'\n",
+ misc->minor, misc->name);
+ return -EINVAL;
+ }
+
INIT_LIST_HEAD(&misc->list);
mutex_lock(&misc_mtx);
@@ -275,13 +283,12 @@ EXPORT_SYMBOL(misc_register);
void misc_deregister(struct miscdevice *misc)
{
- if (WARN_ON(list_empty(&misc->list)))
- return;
-
mutex_lock(&misc_mtx);
- list_del(&misc->list);
+ list_del_init(&misc->list);
device_destroy(&misc_class, MKDEV(MISC_MAJOR, misc->minor));
misc_minor_free(misc->minor);
+ if (misc->minor > MISC_DYNAMIC_MINOR)
+ misc->minor = MISC_DYNAMIC_MINOR;
mutex_unlock(&misc_mtx);
}
EXPORT_SYMBOL(misc_deregister);
@@ -289,15 +296,15 @@ EXPORT_SYMBOL(misc_deregister);
static int __init misc_init(void)
{
int err;
- struct proc_dir_entry *ret;
+ struct proc_dir_entry *misc_proc_file;
- ret = proc_create_seq("misc", 0, NULL, &misc_seq_ops);
+ misc_proc_file = proc_create_seq("misc", 0, NULL, &misc_seq_ops);
err = class_register(&misc_class);
if (err)
goto fail_remove;
- err = -EIO;
- if (__register_chrdev(MISC_MAJOR, 0, MINORMASK + 1, "misc", &misc_fops))
+ err = __register_chrdev(MISC_MAJOR, 0, MINORMASK + 1, "misc", &misc_fops);
+ if (err < 0)
goto fail_printk;
return 0;
@@ -305,7 +312,7 @@ fail_printk:
pr_err("unable to get major %d for misc devices\n", MISC_MAJOR);
class_unregister(&misc_class);
fail_remove:
- if (ret)
+ if (misc_proc_file)
remove_proc_entry("misc", NULL);
return err;
}
diff --git a/drivers/char/misc_minor_kunit.c b/drivers/char/misc_minor_kunit.c
new file mode 100644
index 000000000000..6fc8b05169c5
--- /dev/null
+++ b/drivers/char/misc_minor_kunit.c
@@ -0,0 +1,689 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <kunit/test.h>
+#include <kunit/test-bug.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/init_syscalls.h>
+
+/* static minor (LCD_MINOR) */
+static struct miscdevice dev_static_minor = {
+ .minor = LCD_MINOR,
+ .name = "dev_static_minor",
+};
+
+/* misc dynamic minor */
+static struct miscdevice dev_misc_dynamic_minor = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "dev_misc_dynamic_minor",
+};
+
+static void kunit_static_minor(struct kunit *test)
+{
+ int ret;
+
+ ret = misc_register(&dev_static_minor);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_EQ(test, LCD_MINOR, dev_static_minor.minor);
+ misc_deregister(&dev_static_minor);
+}
+
+static void kunit_misc_dynamic_minor(struct kunit *test)
+{
+ int ret;
+
+ ret = misc_register(&dev_misc_dynamic_minor);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ misc_deregister(&dev_misc_dynamic_minor);
+}
+
+struct miscdev_test_case {
+ const char *str;
+ int minor;
+};
+
+static struct miscdev_test_case miscdev_test_ranges[] = {
+ {
+ .str = "lower static range, top",
+ .minor = 15,
+ },
+ {
+ .str = "upper static range, bottom",
+ .minor = 130,
+ },
+ {
+ .str = "lower static range, bottom",
+ .minor = 0,
+ },
+ {
+ .str = "upper static range, top",
+ .minor = MISC_DYNAMIC_MINOR - 1,
+ },
+};
+
+KUNIT_ARRAY_PARAM_DESC(miscdev, miscdev_test_ranges, str);
+
+static int miscdev_find_minors(struct kunit_suite *suite)
+{
+ int ret;
+ struct miscdevice miscstat = {
+ .name = "miscstat",
+ };
+ int i;
+
+ for (i = 15; i >= 0; i--) {
+ miscstat.minor = i;
+ ret = misc_register(&miscstat);
+ if (ret == 0)
+ break;
+ }
+
+ if (ret == 0) {
+ kunit_info(suite, "found misc device minor %d available\n",
+ miscstat.minor);
+ miscdev_test_ranges[0].minor = miscstat.minor;
+ misc_deregister(&miscstat);
+ } else {
+ return ret;
+ }
+
+ for (i = 128; i < MISC_DYNAMIC_MINOR; i++) {
+ miscstat.minor = i;
+ ret = misc_register(&miscstat);
+ if (ret == 0)
+ break;
+ }
+
+ if (ret == 0) {
+ kunit_info(suite, "found misc device minor %d available\n",
+ miscstat.minor);
+ miscdev_test_ranges[1].minor = miscstat.minor;
+ misc_deregister(&miscstat);
+ } else {
+ return ret;
+ }
+
+ for (i = 0; i < miscdev_test_ranges[0].minor; i++) {
+ miscstat.minor = i;
+ ret = misc_register(&miscstat);
+ if (ret == 0)
+ break;
+ }
+
+ if (ret == 0) {
+ kunit_info(suite, "found misc device minor %d available\n",
+ miscstat.minor);
+ miscdev_test_ranges[2].minor = miscstat.minor;
+ misc_deregister(&miscstat);
+ } else {
+ return ret;
+ }
+
+ for (i = MISC_DYNAMIC_MINOR - 1; i > miscdev_test_ranges[1].minor; i--) {
+ miscstat.minor = i;
+ ret = misc_register(&miscstat);
+ if (ret == 0)
+ break;
+ }
+
+ if (ret == 0) {
+ kunit_info(suite, "found misc device minor %d available\n",
+ miscstat.minor);
+ miscdev_test_ranges[3].minor = miscstat.minor;
+ misc_deregister(&miscstat);
+ }
+
+ return ret;
+}
+
+static bool is_valid_dynamic_minor(int minor)
+{
+ if (minor < 0)
+ return false;
+ return minor > MISC_DYNAMIC_MINOR;
+}
+
+static int miscdev_test_open(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static const struct file_operations miscdev_test_fops = {
+ .open = miscdev_test_open,
+};
+
+static void __init miscdev_test_can_open(struct kunit *test, struct miscdevice *misc)
+{
+ int ret;
+ struct file *filp;
+ char *devname;
+
+ devname = kasprintf(GFP_KERNEL, "/dev/%s", misc->name);
+ ret = init_mknod(devname, S_IFCHR | 0600,
+ new_encode_dev(MKDEV(MISC_MAJOR, misc->minor)));
+ if (ret != 0)
+ KUNIT_FAIL(test, "failed to create node\n");
+
+ filp = filp_open(devname, O_RDONLY, 0);
+ if (IS_ERR_OR_NULL(filp))
+ KUNIT_FAIL(test, "failed to open misc device: %ld\n", PTR_ERR(filp));
+ else
+ fput(filp);
+
+ init_unlink(devname);
+ kfree(devname);
+}
+
+static void __init miscdev_test_static_basic(struct kunit *test)
+{
+ struct miscdevice misc_test = {
+ .name = "misc_test",
+ .fops = &miscdev_test_fops,
+ };
+ int ret;
+ const struct miscdev_test_case *params = test->param_value;
+
+ misc_test.minor = params->minor;
+
+ ret = misc_register(&misc_test);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_EQ(test, misc_test.minor, params->minor);
+
+ if (ret == 0) {
+ miscdev_test_can_open(test, &misc_test);
+ misc_deregister(&misc_test);
+ }
+}
+
+static void __init miscdev_test_dynamic_basic(struct kunit *test)
+{
+ struct miscdevice misc_test = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "misc_test",
+ .fops = &miscdev_test_fops,
+ };
+ int ret;
+
+ ret = misc_register(&misc_test);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_TRUE(test, is_valid_dynamic_minor(misc_test.minor));
+
+ if (ret == 0) {
+ miscdev_test_can_open(test, &misc_test);
+ misc_deregister(&misc_test);
+ }
+}
+
+static void miscdev_test_twice(struct kunit *test)
+{
+ struct miscdevice misc_test = {
+ .name = "misc_test",
+ .fops = &miscdev_test_fops,
+ };
+ int ret;
+ const struct miscdev_test_case *params = test->param_value;
+
+ misc_test.minor = params->minor;
+
+ ret = misc_register(&misc_test);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_EQ(test, misc_test.minor, params->minor);
+ if (ret == 0)
+ misc_deregister(&misc_test);
+
+ ret = misc_register(&misc_test);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_EQ(test, misc_test.minor, params->minor);
+ if (ret == 0)
+ misc_deregister(&misc_test);
+}
+
+static void miscdev_test_duplicate_minor(struct kunit *test)
+{
+ struct miscdevice misc1 = {
+ .name = "misc1",
+ .fops = &miscdev_test_fops,
+ };
+ struct miscdevice misc2 = {
+ .name = "misc2",
+ .fops = &miscdev_test_fops,
+ };
+ int ret;
+ const struct miscdev_test_case *params = test->param_value;
+
+ misc1.minor = params->minor;
+ misc2.minor = params->minor;
+
+ ret = misc_register(&misc1);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_EQ(test, misc1.minor, params->minor);
+
+ ret = misc_register(&misc2);
+ KUNIT_EXPECT_EQ(test, ret, -EBUSY);
+ if (ret == 0)
+ misc_deregister(&misc2);
+
+ misc_deregister(&misc1);
+}
+
+static void miscdev_test_duplicate_name(struct kunit *test)
+{
+ struct miscdevice misc1 = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "misc1",
+ .fops = &miscdev_test_fops,
+ };
+ struct miscdevice misc2 = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "misc1",
+ .fops = &miscdev_test_fops,
+ };
+ int ret;
+
+ ret = misc_register(&misc1);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_TRUE(test, is_valid_dynamic_minor(misc1.minor));
+
+ ret = misc_register(&misc2);
+ KUNIT_EXPECT_EQ(test, ret, -EEXIST);
+ if (ret == 0)
+ misc_deregister(&misc2);
+
+ misc_deregister(&misc1);
+}
+
+/*
+ * Test that after a duplicate name failure, the reserved minor number is
+ * freed to be allocated next.
+ */
+static void miscdev_test_duplicate_name_leak(struct kunit *test)
+{
+ struct miscdevice misc1 = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "misc1",
+ .fops = &miscdev_test_fops,
+ };
+ struct miscdevice misc2 = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "misc1",
+ .fops = &miscdev_test_fops,
+ };
+ struct miscdevice misc3 = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "misc3",
+ .fops = &miscdev_test_fops,
+ };
+ int ret;
+ int dyn_minor;
+
+ ret = misc_register(&misc1);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_TRUE(test, is_valid_dynamic_minor(misc1.minor));
+
+ /*
+ * Find out what is the next minor number available.
+ */
+ ret = misc_register(&misc3);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_TRUE(test, is_valid_dynamic_minor(misc3.minor));
+ dyn_minor = misc3.minor;
+ misc_deregister(&misc3);
+ misc3.minor = MISC_DYNAMIC_MINOR;
+
+ ret = misc_register(&misc2);
+ KUNIT_EXPECT_EQ(test, ret, -EEXIST);
+ if (ret == 0)
+ misc_deregister(&misc2);
+
+ /*
+ * Now check that we can still get the same minor we found before.
+ */
+ ret = misc_register(&misc3);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_TRUE(test, is_valid_dynamic_minor(misc3.minor));
+ KUNIT_EXPECT_EQ(test, misc3.minor, dyn_minor);
+ misc_deregister(&misc3);
+
+ misc_deregister(&misc1);
+}
+
+/*
+ * Try to register a static minor with a duplicate name. That might not
+ * deallocate the minor, preventing it from being used again.
+ */
+static void miscdev_test_duplicate_error(struct kunit *test)
+{
+ struct miscdevice miscdyn = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "name1",
+ .fops = &miscdev_test_fops,
+ };
+ struct miscdevice miscstat = {
+ .name = "name1",
+ .fops = &miscdev_test_fops,
+ };
+ struct miscdevice miscnew = {
+ .name = "name2",
+ .fops = &miscdev_test_fops,
+ };
+ int ret;
+ const struct miscdev_test_case *params = test->param_value;
+
+ miscstat.minor = params->minor;
+ miscnew.minor = params->minor;
+
+ ret = misc_register(&miscdyn);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_TRUE(test, is_valid_dynamic_minor(miscdyn.minor));
+
+ ret = misc_register(&miscstat);
+ KUNIT_EXPECT_EQ(test, ret, -EEXIST);
+ if (ret == 0)
+ misc_deregister(&miscstat);
+
+ ret = misc_register(&miscnew);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_EQ(test, miscnew.minor, params->minor);
+ if (ret == 0)
+ misc_deregister(&miscnew);
+
+ misc_deregister(&miscdyn);
+}
+
+static void __init miscdev_test_dynamic_only_range(struct kunit *test)
+{
+ int ret;
+ struct miscdevice *miscdev;
+ const int dynamic_minors = 256;
+ int i;
+
+ miscdev = kunit_kmalloc_array(test, dynamic_minors,
+ sizeof(struct miscdevice),
+ GFP_KERNEL | __GFP_ZERO);
+
+ for (i = 0; i < dynamic_minors; i++) {
+ miscdev[i].minor = MISC_DYNAMIC_MINOR;
+ miscdev[i].name = kasprintf(GFP_KERNEL, "misc_test%d", i);
+ miscdev[i].fops = &miscdev_test_fops;
+ ret = misc_register(&miscdev[i]);
+ if (ret != 0)
+ break;
+ /*
+ * This is the bug we are looking for!
+ * We asked for a dynamic minor and got a minor in the static range space.
+ */
+ if (miscdev[i].minor >= 0 && miscdev[i].minor <= 15) {
+ KUNIT_FAIL(test, "misc_register allocated minor %d\n", miscdev[i].minor);
+ i++;
+ break;
+ }
+ KUNIT_EXPECT_TRUE(test, is_valid_dynamic_minor(miscdev[i].minor));
+ }
+
+ for (i--; i >= 0; i--) {
+ miscdev_test_can_open(test, &miscdev[i]);
+ misc_deregister(&miscdev[i]);
+ kfree_const(miscdev[i].name);
+ }
+
+ KUNIT_EXPECT_EQ(test, ret, 0);
+}
+
+static void __init miscdev_test_collision(struct kunit *test)
+{
+ int ret;
+ struct miscdevice *miscdev;
+ struct miscdevice miscstat = {
+ .name = "miscstat",
+ .fops = &miscdev_test_fops,
+ };
+ const int dynamic_minors = 256;
+ int i;
+
+ miscdev = kunit_kmalloc_array(test, dynamic_minors,
+ sizeof(struct miscdevice),
+ GFP_KERNEL | __GFP_ZERO);
+
+ miscstat.minor = miscdev_test_ranges[0].minor;
+ ret = misc_register(&miscstat);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_EXPECT_EQ(test, miscstat.minor, miscdev_test_ranges[0].minor);
+
+ for (i = 0; i < dynamic_minors; i++) {
+ miscdev[i].minor = MISC_DYNAMIC_MINOR;
+ miscdev[i].name = kasprintf(GFP_KERNEL, "misc_test%d", i);
+ miscdev[i].fops = &miscdev_test_fops;
+ ret = misc_register(&miscdev[i]);
+ if (ret != 0)
+ break;
+ KUNIT_EXPECT_TRUE(test, is_valid_dynamic_minor(miscdev[i].minor));
+ }
+
+ for (i--; i >= 0; i--) {
+ miscdev_test_can_open(test, &miscdev[i]);
+ misc_deregister(&miscdev[i]);
+ kfree_const(miscdev[i].name);
+ }
+
+ misc_deregister(&miscstat);
+
+ KUNIT_EXPECT_EQ(test, ret, 0);
+}
+
+static void __init miscdev_test_collision_reverse(struct kunit *test)
+{
+ int ret;
+ struct miscdevice *miscdev;
+ struct miscdevice miscstat = {
+ .name = "miscstat",
+ .fops = &miscdev_test_fops,
+ };
+ const int dynamic_minors = 256;
+ int i;
+
+ miscdev = kunit_kmalloc_array(test, dynamic_minors,
+ sizeof(struct miscdevice),
+ GFP_KERNEL | __GFP_ZERO);
+
+ for (i = 0; i < dynamic_minors; i++) {
+ miscdev[i].minor = MISC_DYNAMIC_MINOR;
+ miscdev[i].name = kasprintf(GFP_KERNEL, "misc_test%d", i);
+ miscdev[i].fops = &miscdev_test_fops;
+ ret = misc_register(&miscdev[i]);
+ if (ret != 0)
+ break;
+ KUNIT_EXPECT_TRUE(test, is_valid_dynamic_minor(miscdev[i].minor));
+ }
+
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ miscstat.minor = miscdev_test_ranges[0].minor;
+ ret = misc_register(&miscstat);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_EQ(test, miscstat.minor, miscdev_test_ranges[0].minor);
+ if (ret == 0)
+ misc_deregister(&miscstat);
+
+ for (i--; i >= 0; i--) {
+ miscdev_test_can_open(test, &miscdev[i]);
+ misc_deregister(&miscdev[i]);
+ kfree_const(miscdev[i].name);
+ }
+}
+
+static void __init miscdev_test_conflict(struct kunit *test)
+{
+ int ret;
+ struct miscdevice miscdyn = {
+ .name = "miscdyn",
+ .minor = MISC_DYNAMIC_MINOR,
+ .fops = &miscdev_test_fops,
+ };
+ struct miscdevice miscstat = {
+ .name = "miscstat",
+ .fops = &miscdev_test_fops,
+ };
+
+ ret = misc_register(&miscdyn);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_EXPECT_TRUE(test, is_valid_dynamic_minor(miscdyn.minor));
+
+ /*
+ * Try to register a static minor with the same minor as the
+ * dynamic one.
+ */
+ miscstat.minor = miscdyn.minor;
+ ret = misc_register(&miscstat);
+ KUNIT_EXPECT_EQ(test, ret, -EINVAL);
+ if (ret == 0)
+ misc_deregister(&miscstat);
+
+ miscdev_test_can_open(test, &miscdyn);
+
+ misc_deregister(&miscdyn);
+}
+
+static void __init miscdev_test_conflict_reverse(struct kunit *test)
+{
+ int ret;
+ struct miscdevice miscdyn = {
+ .name = "miscdyn",
+ .minor = MISC_DYNAMIC_MINOR,
+ .fops = &miscdev_test_fops,
+ };
+ struct miscdevice miscstat = {
+ .name = "miscstat",
+ .fops = &miscdev_test_fops,
+ };
+
+ /*
+ * Find the first available dynamic minor to use it as a static
+ * minor later on.
+ */
+ ret = misc_register(&miscdyn);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_EXPECT_TRUE(test, is_valid_dynamic_minor(miscdyn.minor));
+ miscstat.minor = miscdyn.minor;
+ misc_deregister(&miscdyn);
+
+ ret = misc_register(&miscstat);
+ KUNIT_EXPECT_EQ(test, ret, -EINVAL);
+ if (ret == 0)
+ misc_deregister(&miscstat);
+
+ /*
+ * Try to register a dynamic minor after registering a static minor
+ * within the dynamic range. It should work but get a different
+ * minor.
+ */
+ miscdyn.minor = MISC_DYNAMIC_MINOR;
+ ret = misc_register(&miscdyn);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_EQ(test, miscdyn.minor, miscstat.minor);
+ KUNIT_EXPECT_TRUE(test, is_valid_dynamic_minor(miscdyn.minor));
+ if (ret == 0)
+ misc_deregister(&miscdyn);
+}
+
+/* Take minor(> MISC_DYNAMIC_MINOR) as invalid when register miscdevice */
+static void miscdev_test_invalid_input(struct kunit *test)
+{
+ struct miscdevice misc_test = {
+ .minor = MISC_DYNAMIC_MINOR + 1,
+ .name = "misc_test",
+ .fops = &miscdev_test_fops,
+ };
+ int ret;
+
+ ret = misc_register(&misc_test);
+ KUNIT_EXPECT_EQ(test, ret, -EINVAL);
+ if (ret == 0)
+ misc_deregister(&misc_test);
+}
+
+/*
+ * Verify if @miscdyn_a can still be registered successfully without
+ * reinitialization even if its minor ever owned was requested by
+ * another miscdevice such as @miscdyn_b.
+ */
+static void miscdev_test_dynamic_reentry(struct kunit *test)
+{
+ struct miscdevice miscdyn_a = {
+ .name = "miscdyn_a",
+ .minor = MISC_DYNAMIC_MINOR,
+ .fops = &miscdev_test_fops,
+ };
+ struct miscdevice miscdyn_b = {
+ .name = "miscdyn_b",
+ .minor = MISC_DYNAMIC_MINOR,
+ .fops = &miscdev_test_fops,
+ };
+ int ret, minor_a;
+
+ ret = misc_register(&miscdyn_a);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_EXPECT_TRUE(test, is_valid_dynamic_minor(miscdyn_a.minor));
+ minor_a = miscdyn_a.minor;
+ if (ret != 0)
+ return;
+ misc_deregister(&miscdyn_a);
+
+ ret = misc_register(&miscdyn_b);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_EXPECT_EQ(test, miscdyn_b.minor, minor_a);
+ if (ret != 0)
+ return;
+
+ ret = misc_register(&miscdyn_a);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_EXPECT_TRUE(test, is_valid_dynamic_minor(miscdyn_a.minor));
+ KUNIT_EXPECT_NE(test, miscdyn_a.minor, miscdyn_b.minor);
+ if (ret == 0)
+ misc_deregister(&miscdyn_a);
+
+ misc_deregister(&miscdyn_b);
+}
+
+static struct kunit_case test_cases[] = {
+ KUNIT_CASE(kunit_static_minor),
+ KUNIT_CASE(kunit_misc_dynamic_minor),
+ KUNIT_CASE(miscdev_test_invalid_input),
+ KUNIT_CASE_PARAM(miscdev_test_twice, miscdev_gen_params),
+ KUNIT_CASE_PARAM(miscdev_test_duplicate_minor, miscdev_gen_params),
+ KUNIT_CASE(miscdev_test_duplicate_name),
+ KUNIT_CASE(miscdev_test_duplicate_name_leak),
+ KUNIT_CASE_PARAM(miscdev_test_duplicate_error, miscdev_gen_params),
+ KUNIT_CASE(miscdev_test_dynamic_reentry),
+ {}
+};
+
+static struct kunit_suite test_suite = {
+ .name = "miscdev",
+ .suite_init = miscdev_find_minors,
+ .test_cases = test_cases,
+};
+kunit_test_suite(test_suite);
+
+static struct kunit_case __refdata test_init_cases[] = {
+ KUNIT_CASE_PARAM(miscdev_test_static_basic, miscdev_gen_params),
+ KUNIT_CASE(miscdev_test_dynamic_basic),
+ KUNIT_CASE(miscdev_test_dynamic_only_range),
+ KUNIT_CASE(miscdev_test_collision),
+ KUNIT_CASE(miscdev_test_collision_reverse),
+ KUNIT_CASE(miscdev_test_conflict),
+ KUNIT_CASE(miscdev_test_conflict_reverse),
+ {}
+};
+
+static struct kunit_suite test_init_suite = {
+ .name = "miscdev_init",
+ .suite_init = miscdev_find_minors,
+ .test_cases = test_init_cases,
+};
+kunit_test_init_section_suite(test_init_suite);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Vimal Agrawal");
+MODULE_AUTHOR("Thadeu Lima de Souza Cascardo <cascardo@igalia.com>");
+MODULE_DESCRIPTION("Test module for misc character devices");
diff --git a/drivers/char/mwave/3780i.c b/drivers/char/mwave/3780i.c
index 4a8937f80570..90f93cefb21c 100644
--- a/drivers/char/mwave/3780i.c
+++ b/drivers/char/mwave/3780i.c
@@ -46,6 +46,8 @@
* First release to the public
*/
+#define pr_fmt(fmt) "3780i: " fmt
+
#include <linux/kernel.h>
#include <linux/unistd.h>
#include <linux/delay.h>
@@ -75,18 +77,12 @@ unsigned short dsp3780I_ReadMsaCfg(unsigned short usDspBaseIO,
unsigned long flags;
unsigned short val;
- PRINTK_3(TRACE_3780I,
- "3780i::dsp3780I_ReadMsaCfg entry usDspBaseIO %x ulMsaAddr %lx\n",
- usDspBaseIO, ulMsaAddr);
-
spin_lock_irqsave(&dsp_lock, flags);
OutWordDsp(DSP_MsaAddrLow, (unsigned short) ulMsaAddr);
OutWordDsp(DSP_MsaAddrHigh, (unsigned short) (ulMsaAddr >> 16));
val = InWordDsp(DSP_MsaDataDSISHigh);
spin_unlock_irqrestore(&dsp_lock, flags);
- PRINTK_2(TRACE_3780I, "3780i::dsp3780I_ReadMsaCfg exit val %x\n", val);
-
return val;
}
@@ -95,10 +91,6 @@ void dsp3780I_WriteMsaCfg(unsigned short usDspBaseIO,
{
unsigned long flags;
- PRINTK_4(TRACE_3780I,
- "3780i::dsp3780i_WriteMsaCfg entry usDspBaseIO %x ulMsaAddr %lx usValue %x\n",
- usDspBaseIO, ulMsaAddr, usValue);
-
spin_lock_irqsave(&dsp_lock, flags);
OutWordDsp(DSP_MsaAddrLow, (unsigned short) ulMsaAddr);
OutWordDsp(DSP_MsaAddrHigh, (unsigned short) (ulMsaAddr >> 16));
@@ -112,64 +104,18 @@ static void dsp3780I_WriteGenCfg(unsigned short usDspBaseIO, unsigned uIndex,
DSP_ISA_SLAVE_CONTROL rSlaveControl;
DSP_ISA_SLAVE_CONTROL rSlaveControl_Save;
-
- PRINTK_4(TRACE_3780I,
- "3780i::dsp3780i_WriteGenCfg entry usDspBaseIO %x uIndex %x ucValue %x\n",
- usDspBaseIO, uIndex, ucValue);
-
MKBYTE(rSlaveControl) = InByteDsp(DSP_IsaSlaveControl);
- PRINTK_2(TRACE_3780I,
- "3780i::dsp3780i_WriteGenCfg rSlaveControl %x\n",
- MKBYTE(rSlaveControl));
-
rSlaveControl_Save = rSlaveControl;
rSlaveControl.ConfigMode = true;
- PRINTK_2(TRACE_3780I,
- "3780i::dsp3780i_WriteGenCfg entry rSlaveControl+ConfigMode %x\n",
- MKBYTE(rSlaveControl));
-
OutByteDsp(DSP_IsaSlaveControl, MKBYTE(rSlaveControl));
OutByteDsp(DSP_ConfigAddress, (unsigned char) uIndex);
OutByteDsp(DSP_ConfigData, ucValue);
OutByteDsp(DSP_IsaSlaveControl, MKBYTE(rSlaveControl_Save));
-
- PRINTK_1(TRACE_3780I, "3780i::dsp3780i_WriteGenCfg exit\n");
-
-
}
-#if 0
-unsigned char dsp3780I_ReadGenCfg(unsigned short usDspBaseIO,
- unsigned uIndex)
-{
- DSP_ISA_SLAVE_CONTROL rSlaveControl;
- DSP_ISA_SLAVE_CONTROL rSlaveControl_Save;
- unsigned char ucValue;
-
-
- PRINTK_3(TRACE_3780I,
- "3780i::dsp3780i_ReadGenCfg entry usDspBaseIO %x uIndex %x\n",
- usDspBaseIO, uIndex);
-
- MKBYTE(rSlaveControl) = InByteDsp(DSP_IsaSlaveControl);
- rSlaveControl_Save = rSlaveControl;
- rSlaveControl.ConfigMode = true;
- OutByteDsp(DSP_IsaSlaveControl, MKBYTE(rSlaveControl));
- OutByteDsp(DSP_ConfigAddress, (unsigned char) uIndex);
- ucValue = InByteDsp(DSP_ConfigData);
- OutByteDsp(DSP_IsaSlaveControl, MKBYTE(rSlaveControl_Save));
-
- PRINTK_2(TRACE_3780I,
- "3780i::dsp3780i_ReadGenCfg exit ucValue %x\n", ucValue);
-
-
- return ucValue;
-}
-#endif /* 0 */
-
-int dsp3780I_EnableDSP(DSP_3780I_CONFIG_SETTINGS * pSettings,
+int dsp3780I_EnableDSP(struct dsp_3780i_config_settings *pSettings,
unsigned short *pIrqMap,
unsigned short *pDmaMap)
{
@@ -191,25 +137,13 @@ int dsp3780I_EnableDSP(DSP_3780I_CONFIG_SETTINGS * pSettings,
DSP_CLOCK_CONTROL_2 rClockControl2;
DSP_ISA_SLAVE_CONTROL rSlaveControl;
DSP_HBRIDGE_CONTROL rHBridgeControl;
- unsigned short ChipID = 0;
unsigned short tval;
-
- PRINTK_2(TRACE_3780I,
- "3780i::dsp3780I_EnableDSP entry pSettings->bDSPEnabled %x\n",
- pSettings->bDSPEnabled);
-
-
if (!pSettings->bDSPEnabled) {
- PRINTK_ERROR( KERN_ERR "3780i::dsp3780I_EnableDSP: Error: DSP not enabled. Aborting.\n" );
+ pr_err("%s: Error: DSP not enabled. Aborting.\n", __func__);
return -EIO;
}
-
- PRINTK_2(TRACE_3780I,
- "3780i::dsp3780i_EnableDSP entry pSettings->bModemEnabled %x\n",
- pSettings->bModemEnabled);
-
if (pSettings->bModemEnabled) {
rUartCfg1.Reserved = rUartCfg2.Reserved = 0;
rUartCfg1.IrqActiveLow = pSettings->bUartIrqActiveLow;
@@ -282,23 +216,10 @@ int dsp3780I_EnableDSP(DSP_3780I_CONFIG_SETTINGS * pSettings,
rSlaveControl.ConfigMode = false;
rSlaveControl.Reserved = 0;
- PRINTK_4(TRACE_3780I,
- "3780i::dsp3780i_EnableDSP usDspBaseIO %x index %x taddr %x\n",
- usDspBaseIO, DSP_IsaSlaveControl,
- usDspBaseIO + DSP_IsaSlaveControl);
-
- PRINTK_2(TRACE_3780I,
- "3780i::dsp3780i_EnableDSP rSlaveContrl %x\n",
- MKWORD(rSlaveControl));
-
spin_lock_irqsave(&dsp_lock, flags);
OutWordDsp(DSP_IsaSlaveControl, MKWORD(rSlaveControl));
MKWORD(tval) = InWordDsp(DSP_IsaSlaveControl);
- PRINTK_2(TRACE_3780I,
- "3780i::dsp3780i_EnableDSP rSlaveControl 2 %x\n", tval);
-
-
for (i = 0; i < 11; i++)
udelay(2000);
@@ -307,10 +228,6 @@ int dsp3780I_EnableDSP(DSP_3780I_CONFIG_SETTINGS * pSettings,
MKWORD(tval) = InWordDsp(DSP_IsaSlaveControl);
- PRINTK_2(TRACE_3780I,
- "3780i::dsp3780i_EnableDSP rSlaveControl 3 %x\n", tval);
-
-
/* Program our general configuration registers */
WriteGenCfg(DSP_HBridgeCfg1Index, MKBYTE(rHBridgeCfg1));
WriteGenCfg(DSP_HBridgeCfg2Index, MKBYTE(rHBridgeCfg2));
@@ -331,10 +248,6 @@ int dsp3780I_EnableDSP(DSP_3780I_CONFIG_SETTINGS * pSettings,
rHBridgeControl.IoAutoInc = false;
rHBridgeControl.DiagnosticMode = false;
- PRINTK_3(TRACE_3780I,
- "3780i::dsp3780i_EnableDSP DSP_HBridgeControl %x rHBridgeControl %x\n",
- DSP_HBridgeControl, MKWORD(rHBridgeControl));
-
OutWordDsp(DSP_HBridgeControl, MKWORD(rHBridgeControl));
spin_unlock_irqrestore(&dsp_lock, flags);
WriteMsaCfg(DSP_LBusTimeoutDisable, MKWORD(rLBusTimeoutDisable));
@@ -342,24 +255,17 @@ int dsp3780I_EnableDSP(DSP_3780I_CONFIG_SETTINGS * pSettings,
WriteMsaCfg(DSP_ClockControl_2, MKWORD(rClockControl2));
WriteMsaCfg(DSP_ChipReset, MKWORD(rChipReset));
- ChipID = ReadMsaCfg(DSP_ChipID);
-
- PRINTK_2(TRACE_3780I,
- "3780i::dsp3780I_EnableDSP exiting bRC=true, ChipID %x\n",
- ChipID);
+ ReadMsaCfg(DSP_ChipID);
return 0;
}
-int dsp3780I_DisableDSP(DSP_3780I_CONFIG_SETTINGS * pSettings)
+int dsp3780I_DisableDSP(struct dsp_3780i_config_settings *pSettings)
{
unsigned long flags;
unsigned short usDspBaseIO = pSettings->usDspBaseIO;
DSP_ISA_SLAVE_CONTROL rSlaveControl;
-
- PRINTK_1(TRACE_3780I, "3780i::dsp3780i_DisableDSP entry\n");
-
rSlaveControl.ClockControl = 0;
rSlaveControl.SoftReset = true;
rSlaveControl.ConfigMode = false;
@@ -375,29 +281,20 @@ int dsp3780I_DisableDSP(DSP_3780I_CONFIG_SETTINGS * pSettings)
udelay(5);
-
- PRINTK_1(TRACE_3780I, "3780i::dsp3780i_DisableDSP exit\n");
-
return 0;
}
-int dsp3780I_Reset(DSP_3780I_CONFIG_SETTINGS * pSettings)
+int dsp3780I_Reset(struct dsp_3780i_config_settings *pSettings)
{
unsigned long flags;
unsigned short usDspBaseIO = pSettings->usDspBaseIO;
DSP_BOOT_DOMAIN rBootDomain;
DSP_HBRIDGE_CONTROL rHBridgeControl;
-
- PRINTK_1(TRACE_3780I, "3780i::dsp3780i_Reset entry\n");
-
spin_lock_irqsave(&dsp_lock, flags);
/* Mask DSP to PC interrupt */
MKWORD(rHBridgeControl) = InWordDsp(DSP_HBridgeControl);
- PRINTK_2(TRACE_3780I, "3780i::dsp3780i_Reset rHBridgeControl %x\n",
- MKWORD(rHBridgeControl));
-
rHBridgeControl.EnableDspInt = false;
OutWordDsp(DSP_HBridgeControl, MKWORD(rHBridgeControl));
spin_unlock_irqrestore(&dsp_lock, flags);
@@ -408,9 +305,6 @@ int dsp3780I_Reset(DSP_3780I_CONFIG_SETTINGS * pSettings)
rBootDomain.NMI = true;
rBootDomain.Reserved = 0;
- PRINTK_2(TRACE_3780I, "3780i::dsp3780i_Reset rBootDomain %x\n",
- MKWORD(rBootDomain));
-
WriteMsaCfg(DSP_MspBootDomain, MKWORD(rBootDomain));
/* Reset all the chiplets and then reactivate them */
@@ -419,24 +313,17 @@ int dsp3780I_Reset(DSP_3780I_CONFIG_SETTINGS * pSettings)
WriteMsaCfg(DSP_ChipReset,
(unsigned short) (~pSettings->usChipletEnable));
-
- PRINTK_1(TRACE_3780I, "3780i::dsp3780i_Reset exit bRC=0\n");
-
return 0;
}
-int dsp3780I_Run(DSP_3780I_CONFIG_SETTINGS * pSettings)
+int dsp3780I_Run(struct dsp_3780i_config_settings *pSettings)
{
unsigned long flags;
unsigned short usDspBaseIO = pSettings->usDspBaseIO;
DSP_BOOT_DOMAIN rBootDomain;
DSP_HBRIDGE_CONTROL rHBridgeControl;
-
- PRINTK_1(TRACE_3780I, "3780i::dsp3780i_Run entry\n");
-
-
/* Transition the core to a running state */
rBootDomain.ResetCore = true;
rBootDomain.Halt = false;
@@ -459,15 +346,9 @@ int dsp3780I_Run(DSP_3780I_CONFIG_SETTINGS * pSettings)
MKWORD(rHBridgeControl) = InWordDsp(DSP_HBridgeControl);
rHBridgeControl.EnableDspInt = true;
- PRINTK_2(TRACE_3780I, "3780i::dsp3780i_Run rHBridgeControl %x\n",
- MKWORD(rHBridgeControl));
-
OutWordDsp(DSP_HBridgeControl, MKWORD(rHBridgeControl));
spin_unlock_irqrestore(&dsp_lock, flags);
-
- PRINTK_1(TRACE_3780I, "3780i::dsp3780i_Run exit bRC=true\n");
-
return 0;
}
@@ -479,12 +360,6 @@ int dsp3780I_ReadDStore(unsigned short usDspBaseIO, void __user *pvBuffer,
unsigned short __user *pusBuffer = pvBuffer;
unsigned short val;
-
- PRINTK_5(TRACE_3780I,
- "3780i::dsp3780I_ReadDStore entry usDspBaseIO %x, pusBuffer %p, uCount %x, ulDSPAddr %lx\n",
- usDspBaseIO, pusBuffer, uCount, ulDSPAddr);
-
-
/* Set the initial MSA address. No adjustments need to be made to data store addresses */
spin_lock_irqsave(&dsp_lock, flags);
OutWordDsp(DSP_MsaAddrLow, (unsigned short) ulDSPAddr);
@@ -499,17 +374,9 @@ int dsp3780I_ReadDStore(unsigned short usDspBaseIO, void __user *pvBuffer,
if(put_user(val, pusBuffer++))
return -EFAULT;
- PRINTK_3(TRACE_3780I,
- "3780I::dsp3780I_ReadDStore uCount %x val %x\n",
- uCount, val);
-
PaceMsaAccess(usDspBaseIO);
}
-
- PRINTK_1(TRACE_3780I,
- "3780I::dsp3780I_ReadDStore exit bRC=true\n");
-
return 0;
}
@@ -521,12 +388,6 @@ int dsp3780I_ReadAndClearDStore(unsigned short usDspBaseIO,
unsigned short __user *pusBuffer = pvBuffer;
unsigned short val;
-
- PRINTK_5(TRACE_3780I,
- "3780i::dsp3780I_ReadAndDStore entry usDspBaseIO %x, pusBuffer %p, uCount %x, ulDSPAddr %lx\n",
- usDspBaseIO, pusBuffer, uCount, ulDSPAddr);
-
-
/* Set the initial MSA address. No adjustments need to be made to data store addresses */
spin_lock_irqsave(&dsp_lock, flags);
OutWordDsp(DSP_MsaAddrLow, (unsigned short) ulDSPAddr);
@@ -541,17 +402,9 @@ int dsp3780I_ReadAndClearDStore(unsigned short usDspBaseIO,
if(put_user(val, pusBuffer++))
return -EFAULT;
- PRINTK_3(TRACE_3780I,
- "3780I::dsp3780I_ReadAndCleanDStore uCount %x val %x\n",
- uCount, val);
-
PaceMsaAccess(usDspBaseIO);
}
-
- PRINTK_1(TRACE_3780I,
- "3780I::dsp3780I_ReadAndClearDStore exit bRC=true\n");
-
return 0;
}
@@ -562,12 +415,6 @@ int dsp3780I_WriteDStore(unsigned short usDspBaseIO, void __user *pvBuffer,
unsigned long flags;
unsigned short __user *pusBuffer = pvBuffer;
-
- PRINTK_5(TRACE_3780I,
- "3780i::dsp3780D_WriteDStore entry usDspBaseIO %x, pusBuffer %p, uCount %x, ulDSPAddr %lx\n",
- usDspBaseIO, pusBuffer, uCount, ulDSPAddr);
-
-
/* Set the initial MSA address. No adjustments need to be made to data store addresses */
spin_lock_irqsave(&dsp_lock, flags);
OutWordDsp(DSP_MsaAddrLow, (unsigned short) ulDSPAddr);
@@ -583,17 +430,9 @@ int dsp3780I_WriteDStore(unsigned short usDspBaseIO, void __user *pvBuffer,
OutWordDsp(DSP_MsaDataDSISHigh, val);
spin_unlock_irqrestore(&dsp_lock, flags);
- PRINTK_3(TRACE_3780I,
- "3780I::dsp3780I_WriteDStore uCount %x val %x\n",
- uCount, val);
-
PaceMsaAccess(usDspBaseIO);
}
-
- PRINTK_1(TRACE_3780I,
- "3780I::dsp3780D_WriteDStore exit bRC=true\n");
-
return 0;
}
@@ -604,10 +443,6 @@ int dsp3780I_ReadIStore(unsigned short usDspBaseIO, void __user *pvBuffer,
unsigned long flags;
unsigned short __user *pusBuffer = pvBuffer;
- PRINTK_5(TRACE_3780I,
- "3780i::dsp3780I_ReadIStore entry usDspBaseIO %x, pusBuffer %p, uCount %x, ulDSPAddr %lx\n",
- usDspBaseIO, pusBuffer, uCount, ulDSPAddr);
-
/*
* Set the initial MSA address. To convert from an instruction store
* address to an MSA address
@@ -631,17 +466,10 @@ int dsp3780I_ReadIStore(unsigned short usDspBaseIO, void __user *pvBuffer,
if(put_user(val_hi, pusBuffer++))
return -EFAULT;
- PRINTK_4(TRACE_3780I,
- "3780I::dsp3780I_ReadIStore uCount %x val_lo %x val_hi %x\n",
- uCount, val_lo, val_hi);
-
PaceMsaAccess(usDspBaseIO);
}
- PRINTK_1(TRACE_3780I,
- "3780I::dsp3780I_ReadIStore exit bRC=true\n");
-
return 0;
}
@@ -652,11 +480,6 @@ int dsp3780I_WriteIStore(unsigned short usDspBaseIO, void __user *pvBuffer,
unsigned long flags;
unsigned short __user *pusBuffer = pvBuffer;
- PRINTK_5(TRACE_3780I,
- "3780i::dsp3780I_WriteIStore entry usDspBaseIO %x, pusBuffer %p, uCount %x, ulDSPAddr %lx\n",
- usDspBaseIO, pusBuffer, uCount, ulDSPAddr);
-
-
/*
* Set the initial MSA address. To convert from an instruction store
* address to an MSA address
@@ -680,17 +503,9 @@ int dsp3780I_WriteIStore(unsigned short usDspBaseIO, void __user *pvBuffer,
OutWordDsp(DSP_MsaDataDSISHigh, val_hi);
spin_unlock_irqrestore(&dsp_lock, flags);
- PRINTK_4(TRACE_3780I,
- "3780I::dsp3780I_WriteIStore uCount %x val_lo %x val_hi %x\n",
- uCount, val_lo, val_hi);
-
PaceMsaAccess(usDspBaseIO);
-
}
- PRINTK_1(TRACE_3780I,
- "3780I::dsp3780I_WriteIStore exit bRC=true\n");
-
return 0;
}
@@ -700,12 +515,6 @@ int dsp3780I_GetIPCSource(unsigned short usDspBaseIO,
{
unsigned long flags;
DSP_HBRIDGE_CONTROL rHBridgeControl;
- unsigned short temp;
-
-
- PRINTK_3(TRACE_3780I,
- "3780i::dsp3780I_GetIPCSource entry usDspBaseIO %x pusIPCSource %p\n",
- usDspBaseIO, pusIPCSource);
/*
* Disable DSP to PC interrupts, read the interrupt register,
@@ -717,22 +526,11 @@ int dsp3780I_GetIPCSource(unsigned short usDspBaseIO,
OutWordDsp(DSP_HBridgeControl, MKWORD(rHBridgeControl));
*pusIPCSource = InWordDsp(DSP_Interrupt);
- temp = (unsigned short) ~(*pusIPCSource);
-
- PRINTK_3(TRACE_3780I,
- "3780i::dsp3780I_GetIPCSource, usIPCSource %x ~ %x\n",
- *pusIPCSource, temp);
-
OutWordDsp(DSP_Interrupt, (unsigned short) ~(*pusIPCSource));
rHBridgeControl.EnableDspInt = true;
OutWordDsp(DSP_HBridgeControl, MKWORD(rHBridgeControl));
spin_unlock_irqrestore(&dsp_lock, flags);
-
- PRINTK_2(TRACE_3780I,
- "3780i::dsp3780I_GetIPCSource exit usIPCSource %x\n",
- *pusIPCSource);
-
return 0;
}
diff --git a/drivers/char/mwave/3780i.h b/drivers/char/mwave/3780i.h
index 95164246afd1..53dafceb20e0 100644
--- a/drivers/char/mwave/3780i.h
+++ b/drivers/char/mwave/3780i.h
@@ -261,7 +261,7 @@ typedef struct {
* the only values maintained by the 3780i support layer are the saved UART
* registers.
*/
-typedef struct _DSP_3780I_CONFIG_SETTINGS {
+struct dsp_3780i_config_settings {
/* Location of base configuration register */
unsigned short usBaseConfigIO;
@@ -313,16 +313,16 @@ typedef struct _DSP_3780I_CONFIG_SETTINGS {
unsigned char ucSCR; /* Scratch register */
unsigned char ucDLL; /* Divisor latch, low byte */
unsigned char ucDLM; /* Divisor latch, high byte */
-} DSP_3780I_CONFIG_SETTINGS;
+};
/* 3780i support functions */
-int dsp3780I_EnableDSP(DSP_3780I_CONFIG_SETTINGS * pSettings,
+int dsp3780I_EnableDSP(struct dsp_3780i_config_settings *pSettings,
unsigned short *pIrqMap,
unsigned short *pDmaMap);
-int dsp3780I_DisableDSP(DSP_3780I_CONFIG_SETTINGS * pSettings);
-int dsp3780I_Reset(DSP_3780I_CONFIG_SETTINGS * pSettings);
-int dsp3780I_Run(DSP_3780I_CONFIG_SETTINGS * pSettings);
+int dsp3780I_DisableDSP(struct dsp_3780i_config_settings *pSettings);
+int dsp3780I_Reset(struct dsp_3780i_config_settings *pSettings);
+int dsp3780I_Run(struct dsp_3780i_config_settings *pSettings);
int dsp3780I_ReadDStore(unsigned short usDspBaseIO, void __user *pvBuffer,
unsigned uCount, unsigned long ulDSPAddr);
int dsp3780I_ReadAndClearDStore(unsigned short usDspBaseIO,
diff --git a/drivers/char/mwave/Makefile b/drivers/char/mwave/Makefile
index a24fe96e3c96..e56c1a375535 100644
--- a/drivers/char/mwave/Makefile
+++ b/drivers/char/mwave/Makefile
@@ -8,9 +8,3 @@
obj-$(CONFIG_MWAVE) += mwave.o
mwave-y := mwavedd.o smapi.o tp3780i.o 3780i.o
-
-# To have the mwave driver disable other uarts if necessary
-# ccflags-y := -DMWAVE_FUTZ_WITH_OTHER_DEVICES
-
-# To compile in lots (~20 KiB) of run-time enablable printk()s for debugging:
-ccflags-y += -DMW_TRACE
diff --git a/drivers/char/mwave/README b/drivers/char/mwave/README
index c2a58f428bc8..6224aa814c62 100644
--- a/drivers/char/mwave/README
+++ b/drivers/char/mwave/README
@@ -4,16 +4,6 @@ Module options
The mwave module takes the following options. Note that these options
are not saved by the BIOS and so do not persist after unload and reload.
- mwave_debug=value, where value is bitwise OR of trace flags:
- 0x0001 mwavedd api tracing
- 0x0002 smapi api tracing
- 0x0004 3780i tracing
- 0x0008 tp3780i tracing
-
- Tracing only occurs if the driver has been compiled with the
- MW_TRACE macro #defined (i.e. let ccflags-y := -DMW_TRACE
- in the Makefile).
-
mwave_3780i_irq=5/7/10/11/15
If the dsp irq has not been setup and stored in bios by the
thinkpad configuration utility then this parameter allows the
diff --git a/drivers/char/mwave/mwavedd.c b/drivers/char/mwave/mwavedd.c
index 11272d605ecd..640a9cb0dd8d 100644
--- a/drivers/char/mwave/mwavedd.c
+++ b/drivers/char/mwave/mwavedd.c
@@ -46,6 +46,8 @@
* First release to the public
*/
+#define pr_fmt(fmt) "mwavedd: " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/fs.h>
@@ -75,131 +77,62 @@ MODULE_LICENSE("GPL");
* We'll depend on users using the tpctl utility to do that for now
*/
static DEFINE_MUTEX(mwave_mutex);
-int mwave_debug = 0;
int mwave_3780i_irq = 0;
int mwave_3780i_io = 0;
int mwave_uart_irq = 0;
int mwave_uart_io = 0;
-module_param(mwave_debug, int, 0);
module_param_hw(mwave_3780i_irq, int, irq, 0);
module_param_hw(mwave_3780i_io, int, ioport, 0);
module_param_hw(mwave_uart_irq, int, irq, 0);
module_param_hw(mwave_uart_io, int, ioport, 0);
-static int mwave_open(struct inode *inode, struct file *file);
-static int mwave_close(struct inode *inode, struct file *file);
-static long mwave_ioctl(struct file *filp, unsigned int iocmd,
- unsigned long ioarg);
-
-MWAVE_DEVICE_DATA mwave_s_mdd;
-
-static int mwave_open(struct inode *inode, struct file *file)
-{
- unsigned int retval = 0;
-
- PRINTK_3(TRACE_MWAVE,
- "mwavedd::mwave_open, entry inode %p file %p\n",
- inode, file);
- PRINTK_2(TRACE_MWAVE,
- "mwavedd::mwave_open, exit return retval %x\n", retval);
-
- return retval;
-}
-
-static int mwave_close(struct inode *inode, struct file *file)
-{
- unsigned int retval = 0;
-
- PRINTK_3(TRACE_MWAVE,
- "mwavedd::mwave_close, entry inode %p file %p\n",
- inode, file);
-
- PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_close, exit retval %x\n",
- retval);
-
- return retval;
-}
+struct mwave_device_data mwave_s_mdd;
static long mwave_ioctl(struct file *file, unsigned int iocmd,
unsigned long ioarg)
{
unsigned int retval = 0;
- pMWAVE_DEVICE_DATA pDrvData = &mwave_s_mdd;
+ struct mwave_device_data *pDrvData = &mwave_s_mdd;
void __user *arg = (void __user *)ioarg;
- PRINTK_4(TRACE_MWAVE,
- "mwavedd::mwave_ioctl, entry file %p cmd %x arg %x\n",
- file, iocmd, (int) ioarg);
-
switch (iocmd) {
case IOCTL_MW_RESET:
- PRINTK_1(TRACE_MWAVE,
- "mwavedd::mwave_ioctl, IOCTL_MW_RESET"
- " calling tp3780I_ResetDSP\n");
mutex_lock(&mwave_mutex);
retval = tp3780I_ResetDSP(&pDrvData->rBDData);
mutex_unlock(&mwave_mutex);
- PRINTK_2(TRACE_MWAVE,
- "mwavedd::mwave_ioctl, IOCTL_MW_RESET"
- " retval %x from tp3780I_ResetDSP\n",
- retval);
break;
case IOCTL_MW_RUN:
- PRINTK_1(TRACE_MWAVE,
- "mwavedd::mwave_ioctl, IOCTL_MW_RUN"
- " calling tp3780I_StartDSP\n");
mutex_lock(&mwave_mutex);
retval = tp3780I_StartDSP(&pDrvData->rBDData);
mutex_unlock(&mwave_mutex);
- PRINTK_2(TRACE_MWAVE,
- "mwavedd::mwave_ioctl, IOCTL_MW_RUN"
- " retval %x from tp3780I_StartDSP\n",
- retval);
break;
case IOCTL_MW_DSP_ABILITIES: {
- MW_ABILITIES rAbilities;
+ struct mw_abilities rAbilities;
- PRINTK_1(TRACE_MWAVE,
- "mwavedd::mwave_ioctl,"
- " IOCTL_MW_DSP_ABILITIES calling"
- " tp3780I_QueryAbilities\n");
mutex_lock(&mwave_mutex);
retval = tp3780I_QueryAbilities(&pDrvData->rBDData,
&rAbilities);
mutex_unlock(&mwave_mutex);
- PRINTK_2(TRACE_MWAVE,
- "mwavedd::mwave_ioctl, IOCTL_MW_DSP_ABILITIES"
- " retval %x from tp3780I_QueryAbilities\n",
- retval);
if (retval == 0) {
- if( copy_to_user(arg, &rAbilities,
- sizeof(MW_ABILITIES)) )
+ if (copy_to_user(arg, &rAbilities, sizeof(rAbilities)))
return -EFAULT;
}
- PRINTK_2(TRACE_MWAVE,
- "mwavedd::mwave_ioctl, IOCTL_MW_DSP_ABILITIES"
- " exit retval %x\n",
- retval);
}
break;
case IOCTL_MW_READ_DATA:
case IOCTL_MW_READCLEAR_DATA: {
- MW_READWRITE rReadData;
+ struct mw_readwrite rReadData;
unsigned short __user *pusBuffer = NULL;
if( copy_from_user(&rReadData, arg,
- sizeof(MW_READWRITE)) )
+ sizeof(struct mw_readwrite)) )
return -EFAULT;
pusBuffer = (unsigned short __user *) (rReadData.pBuf);
- PRINTK_4(TRACE_MWAVE,
- "mwavedd::mwave_ioctl IOCTL_MW_READ_DATA,"
- " size %lx, ioarg %lx pusBuffer %p\n",
- rReadData.ulDataLength, ioarg, pusBuffer);
mutex_lock(&mwave_mutex);
retval = tp3780I_ReadWriteDspDStore(&pDrvData->rBDData,
iocmd,
@@ -211,19 +144,13 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
break;
case IOCTL_MW_READ_INST: {
- MW_READWRITE rReadData;
+ struct mw_readwrite rReadData;
unsigned short __user *pusBuffer = NULL;
- if( copy_from_user(&rReadData, arg,
- sizeof(MW_READWRITE)) )
+ if (copy_from_user(&rReadData, arg, sizeof(rReadData)))
return -EFAULT;
pusBuffer = (unsigned short __user *) (rReadData.pBuf);
- PRINTK_4(TRACE_MWAVE,
- "mwavedd::mwave_ioctl IOCTL_MW_READ_INST,"
- " size %lx, ioarg %lx pusBuffer %p\n",
- rReadData.ulDataLength / 2, ioarg,
- pusBuffer);
mutex_lock(&mwave_mutex);
retval = tp3780I_ReadWriteDspDStore(&pDrvData->rBDData,
iocmd, pusBuffer,
@@ -234,19 +161,13 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
break;
case IOCTL_MW_WRITE_DATA: {
- MW_READWRITE rWriteData;
+ struct mw_readwrite rWriteData;
unsigned short __user *pusBuffer = NULL;
- if( copy_from_user(&rWriteData, arg,
- sizeof(MW_READWRITE)) )
+ if (copy_from_user(&rWriteData, arg, sizeof(rWriteData)))
return -EFAULT;
pusBuffer = (unsigned short __user *) (rWriteData.pBuf);
- PRINTK_4(TRACE_MWAVE,
- "mwavedd::mwave_ioctl IOCTL_MW_WRITE_DATA,"
- " size %lx, ioarg %lx pusBuffer %p\n",
- rWriteData.ulDataLength, ioarg,
- pusBuffer);
mutex_lock(&mwave_mutex);
retval = tp3780I_ReadWriteDspDStore(&pDrvData->rBDData,
iocmd, pusBuffer,
@@ -257,19 +178,13 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
break;
case IOCTL_MW_WRITE_INST: {
- MW_READWRITE rWriteData;
+ struct mw_readwrite rWriteData;
unsigned short __user *pusBuffer = NULL;
- if( copy_from_user(&rWriteData, arg,
- sizeof(MW_READWRITE)) )
+ if (copy_from_user(&rWriteData, arg, sizeof(rWriteData)))
return -EFAULT;
pusBuffer = (unsigned short __user *)(rWriteData.pBuf);
- PRINTK_4(TRACE_MWAVE,
- "mwavedd::mwave_ioctl IOCTL_MW_WRITE_INST,"
- " size %lx, ioarg %lx pusBuffer %p\n",
- rWriteData.ulDataLength, ioarg,
- pusBuffer);
mutex_lock(&mwave_mutex);
retval = tp3780I_ReadWriteDspIStore(&pDrvData->rBDData,
iocmd, pusBuffer,
@@ -283,30 +198,17 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
unsigned int ipcnum = (unsigned int) ioarg;
if (ipcnum >= ARRAY_SIZE(pDrvData->IPCs)) {
- PRINTK_ERROR(KERN_ERR_MWAVE
- "mwavedd::mwave_ioctl:"
- " IOCTL_MW_REGISTER_IPC:"
- " Error: Invalid ipcnum %x\n",
- ipcnum);
+ pr_err("%s: IOCTL_MW_REGISTER_IPC: Error: Invalid ipcnum %x\n",
+ __func__, ipcnum);
return -EINVAL;
}
ipcnum = array_index_nospec(ipcnum,
ARRAY_SIZE(pDrvData->IPCs));
- PRINTK_3(TRACE_MWAVE,
- "mwavedd::mwave_ioctl IOCTL_MW_REGISTER_IPC"
- " ipcnum %x entry usIntCount %x\n",
- ipcnum,
- pDrvData->IPCs[ipcnum].usIntCount);
mutex_lock(&mwave_mutex);
pDrvData->IPCs[ipcnum].bIsHere = false;
pDrvData->IPCs[ipcnum].bIsEnabled = true;
mutex_unlock(&mwave_mutex);
-
- PRINTK_2(TRACE_MWAVE,
- "mwavedd::mwave_ioctl IOCTL_MW_REGISTER_IPC"
- " ipcnum %x exit\n",
- ipcnum);
}
break;
@@ -314,28 +216,17 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
unsigned int ipcnum = (unsigned int) ioarg;
if (ipcnum >= ARRAY_SIZE(pDrvData->IPCs)) {
- PRINTK_ERROR(KERN_ERR_MWAVE
- "mwavedd::mwave_ioctl:"
- " IOCTL_MW_GET_IPC: Error:"
- " Invalid ipcnum %x\n", ipcnum);
+ pr_err("%s: IOCTL_MW_GET_IPC: Error: Invalid ipcnum %x\n", __func__,
+ ipcnum);
return -EINVAL;
}
ipcnum = array_index_nospec(ipcnum,
ARRAY_SIZE(pDrvData->IPCs));
- PRINTK_3(TRACE_MWAVE,
- "mwavedd::mwave_ioctl IOCTL_MW_GET_IPC"
- " ipcnum %x, usIntCount %x\n",
- ipcnum,
- pDrvData->IPCs[ipcnum].usIntCount);
-
+
mutex_lock(&mwave_mutex);
if (pDrvData->IPCs[ipcnum].bIsEnabled == true) {
DECLARE_WAITQUEUE(wait, current);
- PRINTK_2(TRACE_MWAVE,
- "mwavedd::mwave_ioctl, thread for"
- " ipc %x going to sleep\n",
- ipcnum);
add_wait_queue(&pDrvData->IPCs[ipcnum].ipc_wait_queue, &wait);
pDrvData->IPCs[ipcnum].bIsHere = true;
set_current_state(TASK_INTERRUPTIBLE);
@@ -343,31 +234,15 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
/* the interrupt handler while we were gone */
if (pDrvData->IPCs[ipcnum].usIntCount == 1) { /* first int has occurred (race condition) */
pDrvData->IPCs[ipcnum].usIntCount = 2; /* first int has been handled */
- PRINTK_2(TRACE_MWAVE,
- "mwavedd::mwave_ioctl"
- " IOCTL_MW_GET_IPC ipcnum %x"
- " handling first int\n",
- ipcnum);
} else { /* either 1st int has not yet occurred, or we have already handled the first int */
schedule();
if (pDrvData->IPCs[ipcnum].usIntCount == 1) {
pDrvData->IPCs[ipcnum].usIntCount = 2;
}
- PRINTK_2(TRACE_MWAVE,
- "mwavedd::mwave_ioctl"
- " IOCTL_MW_GET_IPC ipcnum %x"
- " woke up and returning to"
- " application\n",
- ipcnum);
}
pDrvData->IPCs[ipcnum].bIsHere = false;
remove_wait_queue(&pDrvData->IPCs[ipcnum].ipc_wait_queue, &wait);
set_current_state(TASK_RUNNING);
- PRINTK_2(TRACE_MWAVE,
- "mwavedd::mwave_ioctl IOCTL_MW_GET_IPC,"
- " returning thread for ipc %x"
- " processing\n",
- ipcnum);
}
mutex_unlock(&mwave_mutex);
}
@@ -376,16 +251,9 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
case IOCTL_MW_UNREGISTER_IPC: {
unsigned int ipcnum = (unsigned int) ioarg;
- PRINTK_2(TRACE_MWAVE,
- "mwavedd::mwave_ioctl IOCTL_MW_UNREGISTER_IPC"
- " ipcnum %x\n",
- ipcnum);
if (ipcnum >= ARRAY_SIZE(pDrvData->IPCs)) {
- PRINTK_ERROR(KERN_ERR_MWAVE
- "mwavedd::mwave_ioctl:"
- " IOCTL_MW_UNREGISTER_IPC:"
- " Error: Invalid ipcnum %x\n",
- ipcnum);
+ pr_err("%s: IOCTL_MW_UNREGISTER_IPC: Error: Invalid ipcnum %x\n",
+ __func__, ipcnum);
return -EINVAL;
}
ipcnum = array_index_nospec(ipcnum,
@@ -405,35 +273,9 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
return -ENOTTY;
} /* switch */
- PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_ioctl, exit retval %x\n", retval);
-
return retval;
}
-
-static ssize_t mwave_read(struct file *file, char __user *buf, size_t count,
- loff_t * ppos)
-{
- PRINTK_5(TRACE_MWAVE,
- "mwavedd::mwave_read entry file %p, buf %p, count %zx ppos %p\n",
- file, buf, count, ppos);
-
- return -EINVAL;
-}
-
-
-static ssize_t mwave_write(struct file *file, const char __user *buf,
- size_t count, loff_t * ppos)
-{
- PRINTK_5(TRACE_MWAVE,
- "mwavedd::mwave_write entry file %p, buf %p,"
- " count %zx ppos %p\n",
- file, buf, count, ppos);
-
- return -EINVAL;
-}
-
-
static int register_serial_portandirq(unsigned int port, int irq)
{
struct uart_8250_port uart;
@@ -446,9 +288,7 @@ static int register_serial_portandirq(unsigned int port, int irq)
/* OK */
break;
default:
- PRINTK_ERROR(KERN_ERR_MWAVE
- "mwavedd::register_serial_portandirq:"
- " Error: Illegal port %x\n", port );
+ pr_err("%s: Error: Illegal port %x\n", __func__, port);
return -1;
} /* switch */
/* port is okay */
@@ -461,9 +301,7 @@ static int register_serial_portandirq(unsigned int port, int irq)
/* OK */
break;
default:
- PRINTK_ERROR(KERN_ERR_MWAVE
- "mwavedd::register_serial_portandirq:"
- " Error: Illegal irq %x\n", irq );
+ pr_err("%s: Error: Illegal irq %x\n", __func__, irq);
return -1;
} /* switch */
/* irq is okay */
@@ -478,56 +316,14 @@ static int register_serial_portandirq(unsigned int port, int irq)
return serial8250_register_8250_port(&uart);
}
-
static const struct file_operations mwave_fops = {
.owner = THIS_MODULE,
- .read = mwave_read,
- .write = mwave_write,
.unlocked_ioctl = mwave_ioctl,
- .open = mwave_open,
- .release = mwave_close,
.llseek = default_llseek,
};
-
static struct miscdevice mwave_misc_dev = { MWAVE_MINOR, "mwave", &mwave_fops };
-#if 0 /* totally b0rked */
-/*
- * sysfs support <paulsch@us.ibm.com>
- */
-
-struct device mwave_device;
-
-/* Prevent code redundancy, create a macro for mwave_show_* functions. */
-#define mwave_show_function(attr_name, format_string, field) \
-static ssize_t mwave_show_##attr_name(struct device *dev, struct device_attribute *attr, char *buf) \
-{ \
- DSP_3780I_CONFIG_SETTINGS *pSettings = \
- &mwave_s_mdd.rBDData.rDspSettings; \
- return sprintf(buf, format_string, pSettings->field); \
-}
-
-/* All of our attributes are read attributes. */
-#define mwave_dev_rd_attr(attr_name, format_string, field) \
- mwave_show_function(attr_name, format_string, field) \
-static DEVICE_ATTR(attr_name, S_IRUGO, mwave_show_##attr_name, NULL)
-
-mwave_dev_rd_attr (3780i_dma, "%i\n", usDspDma);
-mwave_dev_rd_attr (3780i_irq, "%i\n", usDspIrq);
-mwave_dev_rd_attr (3780i_io, "%#.4x\n", usDspBaseIO);
-mwave_dev_rd_attr (uart_irq, "%i\n", usUartIrq);
-mwave_dev_rd_attr (uart_io, "%#.4x\n", usUartBaseIO);
-
-static struct device_attribute * const mwave_dev_attrs[] = {
- &dev_attr_3780i_dma,
- &dev_attr_3780i_irq,
- &dev_attr_3780i_io,
- &dev_attr_uart_irq,
- &dev_attr_uart_io,
-};
-#endif
-
/*
* mwave_init is called on module load
*
@@ -536,20 +332,7 @@ static struct device_attribute * const mwave_dev_attrs[] = {
*/
static void mwave_exit(void)
{
- pMWAVE_DEVICE_DATA pDrvData = &mwave_s_mdd;
-
- PRINTK_1(TRACE_MWAVE, "mwavedd::mwave_exit entry\n");
-
-#if 0
- for (i = 0; i < pDrvData->nr_registered_attrs; i++)
- device_remove_file(&mwave_device, mwave_dev_attrs[i]);
- pDrvData->nr_registered_attrs = 0;
-
- if (pDrvData->device_registered) {
- device_unregister(&mwave_device);
- pDrvData->device_registered = false;
- }
-#endif
+ struct mwave_device_data *pDrvData = &mwave_s_mdd;
if ( pDrvData->sLine >= 0 ) {
serial8250_unregister_port(pDrvData->sLine);
@@ -566,8 +349,6 @@ static void mwave_exit(void)
if (pDrvData->bBDInitialized) {
tp3780I_Cleanup(&pDrvData->rBDData);
}
-
- PRINTK_1(TRACE_MWAVE, "mwavedd::mwave_exit exit\n");
}
module_exit(mwave_exit);
@@ -576,11 +357,9 @@ static int __init mwave_init(void)
{
int i;
int retval = 0;
- pMWAVE_DEVICE_DATA pDrvData = &mwave_s_mdd;
+ struct mwave_device_data *pDrvData = &mwave_s_mdd;
- PRINTK_1(TRACE_MWAVE, "mwavedd::mwave_init entry\n");
-
- memset(&mwave_s_mdd, 0, sizeof(MWAVE_DEVICE_DATA));
+ memset(&mwave_s_mdd, 0, sizeof(mwave_s_mdd));
pDrvData->bBDInitialized = false;
pDrvData->bResourcesClaimed = false;
@@ -597,60 +376,34 @@ static int __init mwave_init(void)
}
retval = tp3780I_InitializeBoardData(&pDrvData->rBDData);
- PRINTK_2(TRACE_MWAVE,
- "mwavedd::mwave_init, return from tp3780I_InitializeBoardData"
- " retval %x\n",
- retval);
if (retval) {
- PRINTK_ERROR(KERN_ERR_MWAVE
- "mwavedd::mwave_init: Error:"
- " Failed to initialize board data\n");
+ pr_err("%s: Error: Failed to initialize board data\n", __func__);
goto cleanup_error;
}
pDrvData->bBDInitialized = true;
retval = tp3780I_CalcResources(&pDrvData->rBDData);
- PRINTK_2(TRACE_MWAVE,
- "mwavedd::mwave_init, return from tp3780I_CalcResources"
- " retval %x\n",
- retval);
if (retval) {
- PRINTK_ERROR(KERN_ERR_MWAVE
- "mwavedd:mwave_init: Error:"
- " Failed to calculate resources\n");
+ pr_err("%s: Error: Failed to calculate resources\n", __func__);
goto cleanup_error;
}
retval = tp3780I_ClaimResources(&pDrvData->rBDData);
- PRINTK_2(TRACE_MWAVE,
- "mwavedd::mwave_init, return from tp3780I_ClaimResources"
- " retval %x\n",
- retval);
if (retval) {
- PRINTK_ERROR(KERN_ERR_MWAVE
- "mwavedd:mwave_init: Error:"
- " Failed to claim resources\n");
+ pr_err("%s: Error: Failed to claim resources\n", __func__);
goto cleanup_error;
}
pDrvData->bResourcesClaimed = true;
retval = tp3780I_EnableDSP(&pDrvData->rBDData);
- PRINTK_2(TRACE_MWAVE,
- "mwavedd::mwave_init, return from tp3780I_EnableDSP"
- " retval %x\n",
- retval);
if (retval) {
- PRINTK_ERROR(KERN_ERR_MWAVE
- "mwavedd:mwave_init: Error:"
- " Failed to enable DSP\n");
+ pr_err("%s: Error: Failed to enable DSP\n", __func__);
goto cleanup_error;
}
pDrvData->bDSPEnabled = true;
if (misc_register(&mwave_misc_dev) < 0) {
- PRINTK_ERROR(KERN_ERR_MWAVE
- "mwavedd:mwave_init: Error:"
- " Failed to register misc device\n");
+ pr_err("%s: Error: Failed to register misc device\n", __func__);
goto cleanup_error;
}
pDrvData->bMwaveDevRegistered = true;
@@ -660,40 +413,16 @@ static int __init mwave_init(void)
pDrvData->rBDData.rDspSettings.usUartIrq
);
if (pDrvData->sLine < 0) {
- PRINTK_ERROR(KERN_ERR_MWAVE
- "mwavedd:mwave_init: Error:"
- " Failed to register serial driver\n");
+ pr_err("%s: Error: Failed to register serial driver\n", __func__);
goto cleanup_error;
}
/* uart is registered */
-#if 0
- /* sysfs */
- memset(&mwave_device, 0, sizeof (struct device));
- dev_set_name(&mwave_device, "mwave");
-
- if (device_register(&mwave_device))
- goto cleanup_error;
- pDrvData->device_registered = true;
- for (i = 0; i < ARRAY_SIZE(mwave_dev_attrs); i++) {
- if(device_create_file(&mwave_device, mwave_dev_attrs[i])) {
- PRINTK_ERROR(KERN_ERR_MWAVE
- "mwavedd:mwave_init: Error:"
- " Failed to create sysfs file %s\n",
- mwave_dev_attrs[i]->attr.name);
- goto cleanup_error;
- }
- pDrvData->nr_registered_attrs++;
- }
-#endif
-
/* SUCCESS! */
return 0;
cleanup_error:
- PRINTK_ERROR(KERN_ERR_MWAVE
- "mwavedd::mwave_init: Error:"
- " Failed to initialize\n");
+ pr_err("%s: Error: Failed to initialize\n", __func__);
mwave_exit(); /* clean up */
return -EIO;
diff --git a/drivers/char/mwave/mwavedd.h b/drivers/char/mwave/mwavedd.h
index 21cb09c7bed7..e1da1493eec5 100644
--- a/drivers/char/mwave/mwavedd.h
+++ b/drivers/char/mwave/mwavedd.h
@@ -56,97 +56,35 @@
#include <linux/uaccess.h>
#include <linux/wait.h>
-extern int mwave_debug;
extern int mwave_3780i_irq;
extern int mwave_3780i_io;
extern int mwave_uart_irq;
extern int mwave_uart_io;
-#define PRINTK_ERROR printk
-#define KERN_ERR_MWAVE KERN_ERR "mwave: "
-
-#define TRACE_MWAVE 0x0001
-#define TRACE_SMAPI 0x0002
-#define TRACE_3780I 0x0004
-#define TRACE_TP3780I 0x0008
-
-#ifdef MW_TRACE
-#define PRINTK_1(f,s) \
- if (f & (mwave_debug)) { \
- printk(s); \
- }
-
-#define PRINTK_2(f,s,v1) \
- if (f & (mwave_debug)) { \
- printk(s,v1); \
- }
-
-#define PRINTK_3(f,s,v1,v2) \
- if (f & (mwave_debug)) { \
- printk(s,v1,v2); \
- }
-
-#define PRINTK_4(f,s,v1,v2,v3) \
- if (f & (mwave_debug)) { \
- printk(s,v1,v2,v3); \
- }
-
-#define PRINTK_5(f,s,v1,v2,v3,v4) \
- if (f & (mwave_debug)) { \
- printk(s,v1,v2,v3,v4); \
- }
-
-#define PRINTK_6(f,s,v1,v2,v3,v4,v5) \
- if (f & (mwave_debug)) { \
- printk(s,v1,v2,v3,v4,v5); \
- }
-
-#define PRINTK_7(f,s,v1,v2,v3,v4,v5,v6) \
- if (f & (mwave_debug)) { \
- printk(s,v1,v2,v3,v4,v5,v6); \
- }
-
-#define PRINTK_8(f,s,v1,v2,v3,v4,v5,v6,v7) \
- if (f & (mwave_debug)) { \
- printk(s,v1,v2,v3,v4,v5,v6,v7); \
- }
-
-#else
-#define PRINTK_1(f,s)
-#define PRINTK_2(f,s,v1)
-#define PRINTK_3(f,s,v1,v2)
-#define PRINTK_4(f,s,v1,v2,v3)
-#define PRINTK_5(f,s,v1,v2,v3,v4)
-#define PRINTK_6(f,s,v1,v2,v3,v4,v5)
-#define PRINTK_7(f,s,v1,v2,v3,v4,v5,v6)
-#define PRINTK_8(f,s,v1,v2,v3,v4,v5,v6,v7)
-#endif
-
-
-typedef struct _MWAVE_IPC {
+struct mwave_ipc {
unsigned short usIntCount; /* 0=none, 1=first, 2=greater than 1st */
bool bIsEnabled;
bool bIsHere;
/* entry spin lock */
wait_queue_head_t ipc_wait_queue;
-} MWAVE_IPC;
+};
-typedef struct _MWAVE_DEVICE_DATA {
- THINKPAD_BD_DATA rBDData; /* board driver's data area */
+struct mwave_device_data {
+ struct thinkpad_bd_data rBDData; /* board driver's data area */
unsigned long ulIPCSource_ISR; /* IPC source bits for recently processed intr, set during ISR processing */
unsigned long ulIPCSource_DPC; /* IPC source bits for recently processed intr, set during DPC processing */
bool bBDInitialized;
bool bResourcesClaimed;
bool bDSPEnabled;
bool bDSPReset;
- MWAVE_IPC IPCs[16];
+ struct mwave_ipc IPCs[16];
bool bMwaveDevRegistered;
short sLine;
int nr_registered_attrs;
int device_registered;
-} MWAVE_DEVICE_DATA, *pMWAVE_DEVICE_DATA;
+};
-extern MWAVE_DEVICE_DATA mwave_s_mdd;
+extern struct mwave_device_data mwave_s_mdd;
#endif
diff --git a/drivers/char/mwave/mwavepub.h b/drivers/char/mwave/mwavepub.h
index 60c961ae23b4..280327bdaa38 100644
--- a/drivers/char/mwave/mwavepub.h
+++ b/drivers/char/mwave/mwavepub.h
@@ -53,7 +53,7 @@
#include <linux/miscdevice.h>
-typedef struct _MW_ABILITIES {
+struct mw_abilities {
unsigned long instr_per_sec;
unsigned long data_size;
unsigned long inst_size;
@@ -63,27 +63,27 @@ typedef struct _MW_ABILITIES {
unsigned long component_list[7];
char mwave_os_name[16];
char bios_task_name[16];
-} MW_ABILITIES, *pMW_ABILITIES;
+};
-typedef struct _MW_READWRITE {
+struct mw_readwrite {
unsigned short usDspAddress; /* The dsp address */
unsigned long ulDataLength; /* The size in bytes of the data or user buffer */
void __user *pBuf; /* Input:variable sized buffer */
-} MW_READWRITE, *pMW_READWRITE;
+};
#define IOCTL_MW_RESET _IO(MWAVE_MINOR,1)
#define IOCTL_MW_RUN _IO(MWAVE_MINOR,2)
-#define IOCTL_MW_DSP_ABILITIES _IOR(MWAVE_MINOR,3,MW_ABILITIES)
-#define IOCTL_MW_READ_DATA _IOR(MWAVE_MINOR,4,MW_READWRITE)
-#define IOCTL_MW_READCLEAR_DATA _IOR(MWAVE_MINOR,5,MW_READWRITE)
-#define IOCTL_MW_READ_INST _IOR(MWAVE_MINOR,6,MW_READWRITE)
-#define IOCTL_MW_WRITE_DATA _IOW(MWAVE_MINOR,7,MW_READWRITE)
-#define IOCTL_MW_WRITE_INST _IOW(MWAVE_MINOR,8,MW_READWRITE)
+#define IOCTL_MW_DSP_ABILITIES _IOR(MWAVE_MINOR,3,struct mw_abilities)
+#define IOCTL_MW_READ_DATA _IOR(MWAVE_MINOR,4,struct mw_readwrite)
+#define IOCTL_MW_READCLEAR_DATA _IOR(MWAVE_MINOR,5,struct mw_readwrite)
+#define IOCTL_MW_READ_INST _IOR(MWAVE_MINOR,6,struct mw_readwrite)
+#define IOCTL_MW_WRITE_DATA _IOW(MWAVE_MINOR,7,struct mw_readwrite)
+#define IOCTL_MW_WRITE_INST _IOW(MWAVE_MINOR,8,struct mw_readwrite)
#define IOCTL_MW_REGISTER_IPC _IOW(MWAVE_MINOR,9,int)
#define IOCTL_MW_UNREGISTER_IPC _IOW(MWAVE_MINOR,10,int)
#define IOCTL_MW_GET_IPC _IOW(MWAVE_MINOR,11,int)
-#define IOCTL_MW_TRACE _IOR(MWAVE_MINOR,12,MW_READWRITE)
+#define IOCTL_MW_TRACE _IOR(MWAVE_MINOR,12,struct mw_readwrite)
#endif
diff --git a/drivers/char/mwave/smapi.c b/drivers/char/mwave/smapi.c
index f8d79d393b69..df6354b24339 100644
--- a/drivers/char/mwave/smapi.c
+++ b/drivers/char/mwave/smapi.c
@@ -46,6 +46,8 @@
* First release to the public
*/
+#define pr_fmt(fmt) "smapi: " fmt
+
#include <linux/kernel.h>
#include <linux/mc146818rtc.h> /* CMOS defines */
#include "smapi.h"
@@ -69,10 +71,6 @@ static int smapi_request(unsigned short inBX, unsigned short inCX,
unsigned short usSmapiOK = -EIO, *pusSmapiOK = &usSmapiOK;
unsigned int inBXCX = (inBX << 16) | inCX;
unsigned int inDISI = (inDI << 16) | inSI;
- int retval = 0;
-
- PRINTK_5(TRACE_SMAPI, "inBX %x inCX %x inDI %x inSI %x\n",
- inBX, inCX, inDI, inSI);
__asm__ __volatile__("movw $0x5380,%%ax\n\t"
"movl %7,%%ebx\n\t"
@@ -107,10 +105,6 @@ static int smapi_request(unsigned short inBX, unsigned short inCX,
:"%eax", "%ebx", "%ecx", "%edx", "%edi",
"%esi");
- PRINTK_8(TRACE_SMAPI,
- "myoutAX %x myoutBX %x myoutCX %x myoutDX %x myoutDI %x myoutSI %x usSmapiOK %x\n",
- myoutAX, myoutBX, myoutCX, myoutDX, myoutDI, myoutSI,
- usSmapiOK);
*outAX = myoutAX;
*outBX = myoutBX;
*outCX = myoutCX;
@@ -118,13 +112,11 @@ static int smapi_request(unsigned short inBX, unsigned short inCX,
*outDI = myoutDI;
*outSI = myoutSI;
- retval = (usSmapiOK == 1) ? 0 : -EIO;
- PRINTK_2(TRACE_SMAPI, "smapi::smapi_request exit retval %x\n", retval);
- return retval;
+ return usSmapiOK == 1 ? 0 : -EIO;
}
-int smapi_query_DSP_cfg(SMAPI_DSP_SETTINGS * pSettings)
+int smapi_query_DSP_cfg(struct smapi_dsp_settings *pSettings)
{
int bRC;
unsigned short usAX, usBX, usCX, usDX, usDI, usSI;
@@ -134,17 +126,13 @@ int smapi_query_DSP_cfg(SMAPI_DSP_SETTINGS * pSettings)
static const unsigned short ausUartBases[] = {
0x03F8, 0x02F8, 0x03E8, 0x02E8 };
- PRINTK_1(TRACE_SMAPI, "smapi::smapi_query_DSP_cfg entry\n");
-
bRC = smapi_request(0x1802, 0x0000, 0, 0,
&usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
if (bRC) {
- PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_query_DSP_cfg: Error: Could not get DSP Settings. Aborting.\n");
+ pr_err("%s: Error: Could not get DSP Settings. Aborting.\n", __func__);
return bRC;
}
- PRINTK_1(TRACE_SMAPI, "smapi::smapi_query_DSP_cfg, smapi_request OK\n");
-
pSettings->bDSPPresent = ((usBX & 0x0100) != 0);
pSettings->bDSPEnabled = ((usCX & 0x0001) != 0);
pSettings->usDspIRQ = usSI & 0x00FF;
@@ -154,27 +142,20 @@ int smapi_query_DSP_cfg(SMAPI_DSP_SETTINGS * pSettings)
} else {
pSettings->usDspBaseIO = 0;
}
- PRINTK_6(TRACE_SMAPI,
- "smapi::smapi_query_DSP_cfg get DSP Settings bDSPPresent %x bDSPEnabled %x usDspIRQ %x usDspDMA %x usDspBaseIO %x\n",
- pSettings->bDSPPresent, pSettings->bDSPEnabled,
- pSettings->usDspIRQ, pSettings->usDspDMA,
- pSettings->usDspBaseIO);
/* check for illegal values */
if ( pSettings->usDspBaseIO == 0 )
- PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_query_DSP_cfg: Worry: DSP base I/O address is 0\n");
+ pr_err("%s: Worry: DSP base I/O address is 0\n", __func__);
if ( pSettings->usDspIRQ == 0 )
- PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_query_DSP_cfg: Worry: DSP IRQ line is 0\n");
+ pr_err("%s: Worry: DSP IRQ line is 0\n", __func__);
bRC = smapi_request(0x1804, 0x0000, 0, 0,
&usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
if (bRC) {
- PRINTK_ERROR("smapi::smapi_query_DSP_cfg: Error: Could not get DSP modem settings. Aborting.\n");
+ pr_err("%s: Error: Could not get DSP modem settings. Aborting.\n", __func__);
return bRC;
}
- PRINTK_1(TRACE_SMAPI, "smapi::smapi_query_DSP_cfg, smapi_request OK\n");
-
pSettings->bModemEnabled = ((usCX & 0x0001) != 0);
pSettings->usUartIRQ = usSI & 0x000F;
if (((usSI & 0xFF00) >> 8) < ARRAY_SIZE(ausUartBases)) {
@@ -183,19 +164,11 @@ int smapi_query_DSP_cfg(SMAPI_DSP_SETTINGS * pSettings)
pSettings->usUartBaseIO = 0;
}
- PRINTK_4(TRACE_SMAPI,
- "smapi::smapi_query_DSP_cfg get DSP modem settings bModemEnabled %x usUartIRQ %x usUartBaseIO %x\n",
- pSettings->bModemEnabled,
- pSettings->usUartIRQ,
- pSettings->usUartBaseIO);
-
/* check for illegal values */
if ( pSettings->usUartBaseIO == 0 )
- PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_query_DSP_cfg: Worry: UART base I/O address is 0\n");
+ pr_err("%s: Worry: UART base I/O address is 0\n", __func__);
if ( pSettings->usUartIRQ == 0 )
- PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_query_DSP_cfg: Worry: UART IRQ line is 0\n");
-
- PRINTK_2(TRACE_SMAPI, "smapi::smapi_query_DSP_cfg exit bRC %x\n", bRC);
+ pr_err("%s: Worry: UART IRQ line is 0\n", __func__);
return bRC;
}
@@ -218,17 +191,14 @@ int smapi_set_DSP_cfg(void)
unsigned short dspio_index = 0, uartio_index = 0;
- PRINTK_5(TRACE_SMAPI,
- "smapi::smapi_set_DSP_cfg entry mwave_3780i_irq %x mwave_3780i_io %x mwave_uart_irq %x mwave_uart_io %x\n",
- mwave_3780i_irq, mwave_3780i_io, mwave_uart_irq, mwave_uart_io);
-
if (mwave_3780i_io) {
for (i = 0; i < ARRAY_SIZE(ausDspBases); i++) {
if (mwave_3780i_io == ausDspBases[i])
break;
}
if (i == ARRAY_SIZE(ausDspBases)) {
- PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Error: Invalid mwave_3780i_io address %x. Aborting.\n", mwave_3780i_io);
+ pr_err("%s: Error: Invalid mwave_3780i_io address %x. Aborting.\n",
+ __func__, mwave_3780i_io);
return bRC;
}
dspio_index = i;
@@ -240,7 +210,8 @@ int smapi_set_DSP_cfg(void)
break;
}
if (i == ARRAY_SIZE(ausDspIrqs)) {
- PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Error: Invalid mwave_3780i_irq %x. Aborting.\n", mwave_3780i_irq);
+ pr_err("%s: Error: Invalid mwave_3780i_irq %x. Aborting.\n", __func__,
+ mwave_3780i_irq);
return bRC;
}
}
@@ -251,7 +222,8 @@ int smapi_set_DSP_cfg(void)
break;
}
if (i == ARRAY_SIZE(ausUartBases)) {
- PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Error: Invalid mwave_uart_io address %x. Aborting.\n", mwave_uart_io);
+ pr_err("%s: Error: Invalid mwave_uart_io address %x. Aborting.\n", __func__,
+ mwave_uart_io);
return bRC;
}
uartio_index = i;
@@ -264,7 +236,8 @@ int smapi_set_DSP_cfg(void)
break;
}
if (i == ARRAY_SIZE(ausUartIrqs)) {
- PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Error: Invalid mwave_uart_irq %x. Aborting.\n", mwave_uart_irq);
+ pr_err("%s: Error: Invalid mwave_uart_irq %x. Aborting.\n", __func__,
+ mwave_uart_irq);
return bRC;
}
}
@@ -279,46 +252,15 @@ int smapi_set_DSP_cfg(void)
if (usBX & 0x0100) { /* serial port A is present */
if (usCX & 1) { /* serial port is enabled */
if ((usSI & 0xFF) == mwave_uart_irq) {
-#ifndef MWAVE_FUTZ_WITH_OTHER_DEVICES
- PRINTK_ERROR(KERN_ERR_MWAVE
- "smapi::smapi_set_DSP_cfg: Serial port A irq %x conflicts with mwave_uart_irq %x\n", usSI & 0xFF, mwave_uart_irq);
-#else
- PRINTK_3(TRACE_SMAPI,
- "smapi::smapi_set_DSP_cfg: Serial port A irq %x conflicts with mwave_uart_irq %x\n", usSI & 0xFF, mwave_uart_irq);
-#endif
-#ifdef MWAVE_FUTZ_WITH_OTHER_DEVICES
- PRINTK_1(TRACE_SMAPI,
- "smapi::smapi_set_DSP_cfg Disabling conflicting serial port\n");
- bRC = smapi_request(0x1403, 0x0100, 0, usSI,
- &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
- if (bRC) goto exit_smapi_request_error;
- bRC = smapi_request(0x1402, 0x0000, 0, 0,
- &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
- if (bRC) goto exit_smapi_request_error;
-#else
+ pr_err("%s: Serial port A irq %x conflicts with mwave_uart_irq %x\n",
+ __func__, usSI & 0xFF, mwave_uart_irq);
goto exit_conflict;
-#endif
} else {
if ((usSI >> 8) == uartio_index) {
-#ifndef MWAVE_FUTZ_WITH_OTHER_DEVICES
- PRINTK_ERROR(KERN_ERR_MWAVE
- "smapi::smapi_set_DSP_cfg: Serial port A base I/O address %x conflicts with mwave uart I/O %x\n", ausUartBases[usSI >> 8], ausUartBases[uartio_index]);
-#else
- PRINTK_3(TRACE_SMAPI,
- "smapi::smapi_set_DSP_cfg: Serial port A base I/O address %x conflicts with mwave uart I/O %x\n", ausUartBases[usSI >> 8], ausUartBases[uartio_index]);
-#endif
-#ifdef MWAVE_FUTZ_WITH_OTHER_DEVICES
- PRINTK_1(TRACE_SMAPI,
- "smapi::smapi_set_DSP_cfg Disabling conflicting serial port A\n");
- bRC = smapi_request (0x1403, 0x0100, 0, usSI,
- &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
- if (bRC) goto exit_smapi_request_error;
- bRC = smapi_request (0x1402, 0x0000, 0, 0,
- &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
- if (bRC) goto exit_smapi_request_error;
-#else
+ pr_err("%s: Serial port A base I/O address %x conflicts with mwave uart I/O %x\n",
+ __func__, ausUartBases[usSI >> 8],
+ ausUartBases[uartio_index]);
goto exit_conflict;
-#endif
}
}
}
@@ -332,46 +274,15 @@ int smapi_set_DSP_cfg(void)
if (usBX & 0x0100) { /* serial port B is present */
if (usCX & 1) { /* serial port is enabled */
if ((usSI & 0xFF) == mwave_uart_irq) {
-#ifndef MWAVE_FUTZ_WITH_OTHER_DEVICES
- PRINTK_ERROR(KERN_ERR_MWAVE
- "smapi::smapi_set_DSP_cfg: Serial port B irq %x conflicts with mwave_uart_irq %x\n", usSI & 0xFF, mwave_uart_irq);
-#else
- PRINTK_3(TRACE_SMAPI,
- "smapi::smapi_set_DSP_cfg: Serial port B irq %x conflicts with mwave_uart_irq %x\n", usSI & 0xFF, mwave_uart_irq);
-#endif
-#ifdef MWAVE_FUTZ_WITH_OTHER_DEVICES
- PRINTK_1(TRACE_SMAPI,
- "smapi::smapi_set_DSP_cfg Disabling conflicting serial port B\n");
- bRC = smapi_request(0x1405, 0x0100, 0, usSI,
- &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
- if (bRC) goto exit_smapi_request_error;
- bRC = smapi_request(0x1404, 0x0000, 0, 0,
- &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
- if (bRC) goto exit_smapi_request_error;
-#else
+ pr_err("%s: Serial port B irq %x conflicts with mwave_uart_irq %x\n",
+ __func__, usSI & 0xFF, mwave_uart_irq);
goto exit_conflict;
-#endif
} else {
if ((usSI >> 8) == uartio_index) {
-#ifndef MWAVE_FUTZ_WITH_OTHER_DEVICES
- PRINTK_ERROR(KERN_ERR_MWAVE
- "smapi::smapi_set_DSP_cfg: Serial port B base I/O address %x conflicts with mwave uart I/O %x\n", ausUartBases[usSI >> 8], ausUartBases[uartio_index]);
-#else
- PRINTK_3(TRACE_SMAPI,
- "smapi::smapi_set_DSP_cfg: Serial port B base I/O address %x conflicts with mwave uart I/O %x\n", ausUartBases[usSI >> 8], ausUartBases[uartio_index]);
-#endif
-#ifdef MWAVE_FUTZ_WITH_OTHER_DEVICES
- PRINTK_1 (TRACE_SMAPI,
- "smapi::smapi_set_DSP_cfg Disabling conflicting serial port B\n");
- bRC = smapi_request (0x1405, 0x0100, 0, usSI,
- &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
- if (bRC) goto exit_smapi_request_error;
- bRC = smapi_request (0x1404, 0x0000, 0, 0,
- &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
- if (bRC) goto exit_smapi_request_error;
-#else
+ pr_err("%s: Serial port B base I/O address %x conflicts with mwave uart I/O %x\n",
+ __func__, ausUartBases[usSI >> 8],
+ ausUartBases[uartio_index]);
goto exit_conflict;
-#endif
}
}
}
@@ -387,58 +298,15 @@ int smapi_set_DSP_cfg(void)
/* bRC == 0 */
if ((usCX & 0xff) != 0xff) { /* IR port not disabled */
if ((usCX & 0xff) == mwave_uart_irq) {
-#ifndef MWAVE_FUTZ_WITH_OTHER_DEVICES
- PRINTK_ERROR(KERN_ERR_MWAVE
- "smapi::smapi_set_DSP_cfg: IR port irq %x conflicts with mwave_uart_irq %x\n", usCX & 0xff, mwave_uart_irq);
-#else
- PRINTK_3(TRACE_SMAPI,
- "smapi::smapi_set_DSP_cfg: IR port irq %x conflicts with mwave_uart_irq %x\n", usCX & 0xff, mwave_uart_irq);
-#endif
-#ifdef MWAVE_FUTZ_WITH_OTHER_DEVICES
- PRINTK_1(TRACE_SMAPI,
- "smapi::smapi_set_DSP_cfg Disabling conflicting IR port\n");
- bRC = smapi_request(0x1701, 0x0100, 0, 0,
- &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
- if (bRC) goto exit_smapi_request_error;
- bRC = smapi_request(0x1700, 0, 0, 0,
- &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
- if (bRC) goto exit_smapi_request_error;
- bRC = smapi_request(0x1705, 0x01ff, 0, usSI,
- &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
- if (bRC) goto exit_smapi_request_error;
- bRC = smapi_request(0x1704, 0x0000, 0, 0,
- &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
- if (bRC) goto exit_smapi_request_error;
-#else
+ pr_err("%s: IR port irq %x conflicts with mwave_uart_irq %x\n",
+ __func__, usCX & 0xff, mwave_uart_irq);
goto exit_conflict;
-#endif
} else {
if ((usSI & 0xff) == uartio_index) {
-#ifndef MWAVE_FUTZ_WITH_OTHER_DEVICES
- PRINTK_ERROR(KERN_ERR_MWAVE
- "smapi::smapi_set_DSP_cfg: IR port base I/O address %x conflicts with mwave uart I/O %x\n", ausUartBases[usSI & 0xff], ausUartBases[uartio_index]);
-#else
- PRINTK_3(TRACE_SMAPI,
- "smapi::smapi_set_DSP_cfg: IR port base I/O address %x conflicts with mwave uart I/O %x\n", ausUartBases[usSI & 0xff], ausUartBases[uartio_index]);
-#endif
-#ifdef MWAVE_FUTZ_WITH_OTHER_DEVICES
- PRINTK_1(TRACE_SMAPI,
- "smapi::smapi_set_DSP_cfg Disabling conflicting IR port\n");
- bRC = smapi_request(0x1701, 0x0100, 0, 0,
- &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
- if (bRC) goto exit_smapi_request_error;
- bRC = smapi_request(0x1700, 0, 0, 0,
- &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
- if (bRC) goto exit_smapi_request_error;
- bRC = smapi_request(0x1705, 0x01ff, 0, usSI,
- &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
- if (bRC) goto exit_smapi_request_error;
- bRC = smapi_request(0x1704, 0x0000, 0, 0,
- &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
- if (bRC) goto exit_smapi_request_error;
-#else
+ pr_err("%s: IR port base I/O address %x conflicts with mwave uart I/O %x\n",
+ __func__, ausUartBases[usSI & 0xff],
+ ausUartBases[uartio_index]);
goto exit_conflict;
-#endif
}
}
}
@@ -482,7 +350,6 @@ int smapi_set_DSP_cfg(void)
if (bRC) goto exit_smapi_request_error;
/* normal exit: */
- PRINTK_1(TRACE_SMAPI, "smapi::smapi_set_DSP_cfg exit\n");
return 0;
exit_conflict:
@@ -490,64 +357,32 @@ exit_conflict:
return -EIO;
exit_smapi_request_error:
- PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg exit on smapi_request error bRC %x\n", bRC);
+ pr_err("%s: exit on smapi_request error bRC %x\n", __func__, bRC);
return bRC;
}
int smapi_set_DSP_power_state(bool bOn)
{
- int bRC;
unsigned short usAX, usBX, usCX, usDX, usDI, usSI;
unsigned short usPowerFunction;
- PRINTK_2(TRACE_SMAPI, "smapi::smapi_set_DSP_power_state entry bOn %x\n", bOn);
-
usPowerFunction = (bOn) ? 1 : 0;
- bRC = smapi_request(0x4901, 0x0000, 0, usPowerFunction,
- &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
-
- PRINTK_2(TRACE_SMAPI, "smapi::smapi_set_DSP_power_state exit bRC %x\n", bRC);
-
- return bRC;
+ return smapi_request(0x4901, 0x0000, 0, usPowerFunction, &usAX, &usBX, &usCX, &usDX, &usDI,
+ &usSI);
}
-#if 0
-static int SmapiQuerySystemID(void)
-{
- int bRC = -EIO;
- unsigned short usAX = 0xffff, usBX = 0xffff, usCX = 0xffff,
- usDX = 0xffff, usDI = 0xffff, usSI = 0xffff;
-
- printk("smapi::SmapiQUerySystemID entry\n");
- bRC = smapi_request(0x0000, 0, 0, 0,
- &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
-
- if (bRC == 0) {
- printk("AX=%x, BX=%x, CX=%x, DX=%x, DI=%x, SI=%x\n",
- usAX, usBX, usCX, usDX, usDI, usSI);
- } else {
- printk("smapi::SmapiQuerySystemID smapi_request error\n");
- }
-
- return bRC;
-}
-#endif /* 0 */
-
int smapi_init(void)
{
int retval = -EIO;
unsigned short usSmapiID = 0;
unsigned long flags;
- PRINTK_1(TRACE_SMAPI, "smapi::smapi_init entry\n");
-
spin_lock_irqsave(&rtc_lock, flags);
usSmapiID = CMOS_READ(0x7C);
usSmapiID |= (CMOS_READ(0x7D) << 8);
spin_unlock_irqrestore(&rtc_lock, flags);
- PRINTK_2(TRACE_SMAPI, "smapi::smapi_init usSmapiID %x\n", usSmapiID);
if (usSmapiID == 0x5349) {
spin_lock_irqsave(&rtc_lock, flags);
@@ -555,16 +390,13 @@ int smapi_init(void)
g_usSmapiPort |= (CMOS_READ(0x7F) << 8);
spin_unlock_irqrestore(&rtc_lock, flags);
if (g_usSmapiPort == 0) {
- PRINTK_ERROR("smapi::smapi_init, ERROR unable to read from SMAPI port\n");
+ pr_err("%s: ERROR unable to read from SMAPI port\n", __func__);
} else {
- PRINTK_2(TRACE_SMAPI,
- "smapi::smapi_init, exit true g_usSmapiPort %x\n",
- g_usSmapiPort);
retval = 0;
//SmapiQuerySystemID();
}
} else {
- PRINTK_ERROR("smapi::smapi_init, ERROR invalid usSmapiID\n");
+ pr_err("%s: ERROR invalid usSmapiID\n", __func__);
retval = -ENXIO;
}
diff --git a/drivers/char/mwave/smapi.h b/drivers/char/mwave/smapi.h
index ebc206b000b9..e605b16ed23c 100644
--- a/drivers/char/mwave/smapi.h
+++ b/drivers/char/mwave/smapi.h
@@ -49,7 +49,7 @@
#ifndef _LINUX_SMAPI_H
#define _LINUX_SMAPI_H
-typedef struct {
+struct smapi_dsp_settings {
int bDSPPresent;
int bDSPEnabled;
int bModemEnabled;
@@ -65,10 +65,10 @@ typedef struct {
unsigned short usSndblstIRQ;
unsigned short usSndblstDMA;
unsigned short usSndblstBaseIO;
-} SMAPI_DSP_SETTINGS;
+};
int smapi_init(void);
-int smapi_query_DSP_cfg(SMAPI_DSP_SETTINGS * pSettings);
+int smapi_query_DSP_cfg(struct smapi_dsp_settings *pSettings);
int smapi_set_DSP_cfg(void);
int smapi_set_DSP_power_state(bool bOn);
diff --git a/drivers/char/mwave/tp3780i.c b/drivers/char/mwave/tp3780i.c
index 83eaffeb22c8..7363b0f764e0 100644
--- a/drivers/char/mwave/tp3780i.c
+++ b/drivers/char/mwave/tp3780i.c
@@ -46,6 +46,8 @@
* First release to the public
*/
+#define pr_fmt(fmt) "tp3780i: " fmt
+
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/ptrace.h>
@@ -65,16 +67,14 @@ static unsigned short s_ausThinkpadDmaToField[8] =
static unsigned short s_numIrqs = 16, s_numDmas = 8;
-static void EnableSRAM(THINKPAD_BD_DATA * pBDData)
+static void EnableSRAM(struct thinkpad_bd_data *pBDData)
{
- DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings;
+ struct dsp_3780i_config_settings *pSettings = &pBDData->rDspSettings;
unsigned short usDspBaseIO = pSettings->usDspBaseIO;
DSP_GPIO_OUTPUT_DATA_15_8 rGpioOutputData;
DSP_GPIO_DRIVER_ENABLE_15_8 rGpioDriverEnable;
DSP_GPIO_MODE_15_8 rGpioMode;
- PRINTK_1(TRACE_TP3780I, "tp3780i::EnableSRAM, entry\n");
-
MKWORD(rGpioMode) = ReadMsaCfg(DSP_GpioModeControl_15_8);
rGpioMode.GpioMode10 = 0;
WriteMsaCfg(DSP_GpioModeControl_15_8, MKWORD(rGpioMode));
@@ -88,54 +88,31 @@ static void EnableSRAM(THINKPAD_BD_DATA * pBDData)
rGpioOutputData.Latch10 = 0;
rGpioOutputData.Mask10 = true;
WriteMsaCfg(DSP_GpioOutputData_15_8, MKWORD(rGpioOutputData));
-
- PRINTK_1(TRACE_TP3780I, "tp3780i::EnableSRAM exit\n");
}
static irqreturn_t UartInterrupt(int irq, void *dev_id)
{
- PRINTK_3(TRACE_TP3780I,
- "tp3780i::UartInterrupt entry irq %x dev_id %p\n", irq, dev_id);
return IRQ_HANDLED;
}
static irqreturn_t DspInterrupt(int irq, void *dev_id)
{
- pMWAVE_DEVICE_DATA pDrvData = &mwave_s_mdd;
- DSP_3780I_CONFIG_SETTINGS *pSettings = &pDrvData->rBDData.rDspSettings;
+ struct mwave_device_data *pDrvData = &mwave_s_mdd;
+ struct dsp_3780i_config_settings *pSettings = &pDrvData->rBDData.rDspSettings;
unsigned short usDspBaseIO = pSettings->usDspBaseIO;
unsigned short usIPCSource = 0, usIsolationMask, usPCNum;
- PRINTK_3(TRACE_TP3780I,
- "tp3780i::DspInterrupt entry irq %x dev_id %p\n", irq, dev_id);
-
if (dsp3780I_GetIPCSource(usDspBaseIO, &usIPCSource) == 0) {
- PRINTK_2(TRACE_TP3780I,
- "tp3780i::DspInterrupt, return from dsp3780i_GetIPCSource, usIPCSource %x\n",
- usIPCSource);
usIsolationMask = 1;
for (usPCNum = 1; usPCNum <= 16; usPCNum++) {
if (usIPCSource & usIsolationMask) {
usIPCSource &= ~usIsolationMask;
- PRINTK_3(TRACE_TP3780I,
- "tp3780i::DspInterrupt usPCNum %x usIPCSource %x\n",
- usPCNum, usIPCSource);
if (pDrvData->IPCs[usPCNum - 1].usIntCount == 0) {
pDrvData->IPCs[usPCNum - 1].usIntCount = 1;
}
- PRINTK_2(TRACE_TP3780I,
- "tp3780i::DspInterrupt usIntCount %x\n",
- pDrvData->IPCs[usPCNum - 1].usIntCount);
if (pDrvData->IPCs[usPCNum - 1].bIsEnabled == true) {
- PRINTK_2(TRACE_TP3780I,
- "tp3780i::DspInterrupt, waking up usPCNum %x\n",
- usPCNum - 1);
wake_up_interruptible(&pDrvData->IPCs[usPCNum - 1].ipc_wait_queue);
- } else {
- PRINTK_2(TRACE_TP3780I,
- "tp3780i::DspInterrupt, no one waiting for IPC %x\n",
- usPCNum - 1);
}
}
if (usIPCSource == 0)
@@ -143,56 +120,42 @@ static irqreturn_t DspInterrupt(int irq, void *dev_id)
/* try next IPC */
usIsolationMask = usIsolationMask << 1;
}
- } else {
- PRINTK_1(TRACE_TP3780I,
- "tp3780i::DspInterrupt, return false from dsp3780i_GetIPCSource\n");
}
- PRINTK_1(TRACE_TP3780I, "tp3780i::DspInterrupt exit\n");
return IRQ_HANDLED;
}
-int tp3780I_InitializeBoardData(THINKPAD_BD_DATA * pBDData)
+int tp3780I_InitializeBoardData(struct thinkpad_bd_data *pBDData)
{
int retval = 0;
- DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings;
-
-
- PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_InitializeBoardData entry pBDData %p\n", pBDData);
+ struct dsp_3780i_config_settings *pSettings = &pBDData->rDspSettings;
pBDData->bDSPEnabled = false;
pSettings->bInterruptClaimed = false;
retval = smapi_init();
if (retval) {
- PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_InitializeBoardData: Error: SMAPI is not available on this machine\n");
+ pr_err("%s: Error: SMAPI is not available on this machine\n", __func__);
} else {
if (mwave_3780i_irq || mwave_3780i_io || mwave_uart_irq || mwave_uart_io) {
retval = smapi_set_DSP_cfg();
}
}
- PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_InitializeBoardData exit retval %x\n", retval);
-
return retval;
}
-void tp3780I_Cleanup(THINKPAD_BD_DATA *pBDData)
+void tp3780I_Cleanup(struct thinkpad_bd_data *pBDData)
{
- PRINTK_2(TRACE_TP3780I,
- "tp3780i::tp3780I_Cleanup entry and exit pBDData %p\n", pBDData);
}
-int tp3780I_CalcResources(THINKPAD_BD_DATA * pBDData)
+int tp3780I_CalcResources(struct thinkpad_bd_data *pBDData)
{
- SMAPI_DSP_SETTINGS rSmapiInfo;
- DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings;
-
- PRINTK_2(TRACE_TP3780I,
- "tp3780i::tp3780I_CalcResources entry pBDData %p\n", pBDData);
+ struct smapi_dsp_settings rSmapiInfo;
+ struct dsp_3780i_config_settings *pSettings = &pBDData->rDspSettings;
if (smapi_query_DSP_cfg(&rSmapiInfo)) {
- PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_CalcResources: Error: Could not query DSP config. Aborting.\n");
+ pr_err("%s: Error: Could not query DSP config. Aborting.\n", __func__);
return -EIO;
}
@@ -203,7 +166,7 @@ int tp3780I_CalcResources(THINKPAD_BD_DATA * pBDData)
|| ( rSmapiInfo.usUartIRQ == 0 )
|| ( rSmapiInfo.usUartBaseIO == 0 )
) {
- PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_CalcResources: Error: Illegal resource setting. Aborting.\n");
+ pr_err("%s: Error: Illegal resource setting. Aborting.\n", __func__);
return -EIO;
}
@@ -225,41 +188,31 @@ int tp3780I_CalcResources(THINKPAD_BD_DATA * pBDData)
pBDData->bShareDspIrq = pBDData->bShareUartIrq = 0;
}
- PRINTK_1(TRACE_TP3780I, "tp3780i::tp3780I_CalcResources exit\n");
-
return 0;
}
-int tp3780I_ClaimResources(THINKPAD_BD_DATA * pBDData)
+int tp3780I_ClaimResources(struct thinkpad_bd_data *pBDData)
{
int retval = 0;
- DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings;
+ struct dsp_3780i_config_settings *pSettings = &pBDData->rDspSettings;
struct resource *pres;
- PRINTK_2(TRACE_TP3780I,
- "tp3780i::tp3780I_ClaimResources entry pBDData %p\n", pBDData);
-
pres = request_region(pSettings->usDspBaseIO, 16, "mwave_3780i");
if ( pres == NULL ) retval = -EIO;
if (retval) {
- PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_ClaimResources: Error: Could not claim I/O region starting at %x\n", pSettings->usDspBaseIO);
- retval = -EIO;
+ pr_err("%s: Error: Could not claim I/O region starting at %x\n", __func__,
+ pSettings->usDspBaseIO);
+ return -EIO;
}
- PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_ClaimResources exit retval %x\n", retval);
-
return retval;
}
-int tp3780I_ReleaseResources(THINKPAD_BD_DATA * pBDData)
+int tp3780I_ReleaseResources(struct thinkpad_bd_data *pBDData)
{
- int retval = 0;
- DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings;
-
- PRINTK_2(TRACE_TP3780I,
- "tp3780i::tp3780I_ReleaseResources entry pBDData %p\n", pBDData);
+ struct dsp_3780i_config_settings *pSettings = &pBDData->rDspSettings;
release_region(pSettings->usDspBaseIO & (~3), 16);
@@ -268,28 +221,23 @@ int tp3780I_ReleaseResources(THINKPAD_BD_DATA * pBDData)
pSettings->bInterruptClaimed = false;
}
- PRINTK_2(TRACE_TP3780I,
- "tp3780i::tp3780I_ReleaseResources exit retval %x\n", retval);
-
- return retval;
+ return 0;
}
-int tp3780I_EnableDSP(THINKPAD_BD_DATA * pBDData)
+int tp3780I_EnableDSP(struct thinkpad_bd_data *pBDData)
{
- DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings;
+ struct dsp_3780i_config_settings *pSettings = &pBDData->rDspSettings;
bool bDSPPoweredUp = false, bInterruptAllocated = false;
- PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_EnableDSP entry pBDData %p\n", pBDData);
-
if (pBDData->bDSPEnabled) {
- PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_EnableDSP: Error: DSP already enabled!\n");
+ pr_err("%s: Error: DSP already enabled!\n", __func__);
goto exit_cleanup;
}
if (!pSettings->bDSPEnabled) {
- PRINTK_ERROR(KERN_ERR_MWAVE "tp3780::tp3780I_EnableDSP: Error: pSettings->bDSPEnabled not set\n");
+ pr_err("%s: Error: pSettings->bDSPEnabled not set\n", __func__);
goto exit_cleanup;
}
@@ -299,7 +247,7 @@ int tp3780I_EnableDSP(THINKPAD_BD_DATA * pBDData)
|| (s_ausThinkpadIrqToField[pSettings->usDspIrq] == 0xFFFF)
|| (s_ausThinkpadDmaToField[pSettings->usDspDma] == 0xFFFF)
) {
- PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_EnableDSP: Error: invalid irq %x\n", pSettings->usDspIrq);
+ pr_err("%s: Error: invalid irq %x\n", __func__, pSettings->usDspIrq);
goto exit_cleanup;
}
@@ -307,7 +255,8 @@ int tp3780I_EnableDSP(THINKPAD_BD_DATA * pBDData)
((pSettings->usDspBaseIO & 0xF00F) != 0)
|| (pSettings->usDspBaseIO & 0x0FF0) == 0
) {
- PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_EnableDSP: Error: Invalid DSP base I/O address %x\n", pSettings->usDspBaseIO);
+ pr_err("%s: Error: Invalid DSP base I/O address %x\n", __func__,
+ pSettings->usDspBaseIO);
goto exit_cleanup;
}
@@ -316,7 +265,7 @@ int tp3780I_EnableDSP(THINKPAD_BD_DATA * pBDData)
pSettings->usUartIrq >= s_numIrqs
|| s_ausThinkpadIrqToField[pSettings->usUartIrq] == 0xFFFF
) {
- PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_EnableDSP: Error: Invalid UART IRQ %x\n", pSettings->usUartIrq);
+ pr_err("%s: Error: Invalid UART IRQ %x\n", __func__, pSettings->usUartIrq);
goto exit_cleanup;
}
switch (pSettings->usUartBaseIO) {
@@ -327,7 +276,8 @@ int tp3780I_EnableDSP(THINKPAD_BD_DATA * pBDData)
break;
default:
- PRINTK_ERROR("tp3780i::tp3780I_EnableDSP: Error: Invalid UART base I/O address %x\n", pSettings->usUartBaseIO);
+ pr_err("%s: Error: Invalid UART base I/O address %x\n", __func__,
+ pSettings->usUartBaseIO);
goto exit_cleanup;
}
}
@@ -356,33 +306,30 @@ int tp3780I_EnableDSP(THINKPAD_BD_DATA * pBDData)
pSettings->usChipletEnable = TP_CFG_ChipletEnable;
if (request_irq(pSettings->usUartIrq, &UartInterrupt, 0, "mwave_uart", NULL)) {
- PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_EnableDSP: Error: Could not get UART IRQ %x\n", pSettings->usUartIrq);
+ pr_err("%s: Error: Could not get UART IRQ %x\n", __func__, pSettings->usUartIrq);
goto exit_cleanup;
} else { /* no conflict just release */
free_irq(pSettings->usUartIrq, NULL);
}
if (request_irq(pSettings->usDspIrq, &DspInterrupt, 0, "mwave_3780i", NULL)) {
- PRINTK_ERROR("tp3780i::tp3780I_EnableDSP: Error: Could not get 3780i IRQ %x\n", pSettings->usDspIrq);
+ pr_err("%s: Error: Could not get 3780i IRQ %x\n", __func__, pSettings->usDspIrq);
goto exit_cleanup;
} else {
- PRINTK_3(TRACE_TP3780I,
- "tp3780i::tp3780I_EnableDSP, got interrupt %x bShareDspIrq %x\n",
- pSettings->usDspIrq, pBDData->bShareDspIrq);
bInterruptAllocated = true;
pSettings->bInterruptClaimed = true;
}
smapi_set_DSP_power_state(false);
if (smapi_set_DSP_power_state(true)) {
- PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_EnableDSP: Error: smapi_set_DSP_power_state(true) failed\n");
+ pr_err("%s: Error: smapi_set_DSP_power_state(true) failed\n", __func__);
goto exit_cleanup;
} else {
bDSPPoweredUp = true;
}
if (dsp3780I_EnableDSP(pSettings, s_ausThinkpadIrqToField, s_ausThinkpadDmaToField)) {
- PRINTK_ERROR("tp3780i::tp3780I_EnableDSP: Error: dsp7880I_EnableDSP() failed\n");
+ pr_err("%s: Error: dsp7880I_EnableDSP() failed\n", __func__);
goto exit_cleanup;
}
@@ -390,12 +337,10 @@ int tp3780I_EnableDSP(THINKPAD_BD_DATA * pBDData)
pBDData->bDSPEnabled = true;
- PRINTK_1(TRACE_TP3780I, "tp3780i::tp3780I_EnableDSP exit\n");
-
return 0;
exit_cleanup:
- PRINTK_ERROR("tp3780i::tp3780I_EnableDSP: Cleaning up\n");
+ pr_err("%s: Cleaning up\n", __func__);
if (bDSPPoweredUp)
smapi_set_DSP_power_state(false);
if (bInterruptAllocated) {
@@ -406,12 +351,9 @@ exit_cleanup:
}
-int tp3780I_DisableDSP(THINKPAD_BD_DATA * pBDData)
+int tp3780I_DisableDSP(struct thinkpad_bd_data *pBDData)
{
- int retval = 0;
- DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings;
-
- PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_DisableDSP entry pBDData %p\n", pBDData);
+ struct dsp_3780i_config_settings *pSettings = &pBDData->rDspSettings;
if (pBDData->bDSPEnabled) {
dsp3780I_DisableDSP(&pBDData->rDspSettings);
@@ -423,56 +365,38 @@ int tp3780I_DisableDSP(THINKPAD_BD_DATA * pBDData)
pBDData->bDSPEnabled = false;
}
- PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_DisableDSP exit retval %x\n", retval);
-
- return retval;
+ return 0;
}
-int tp3780I_ResetDSP(THINKPAD_BD_DATA * pBDData)
+int tp3780I_ResetDSP(struct thinkpad_bd_data *pBDData)
{
- int retval = 0;
- DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings;
-
- PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_ResetDSP entry pBDData %p\n",
- pBDData);
+ struct dsp_3780i_config_settings *pSettings = &pBDData->rDspSettings;
if (dsp3780I_Reset(pSettings) == 0) {
EnableSRAM(pBDData);
- } else {
- retval = -EIO;
+ return 0;
}
-
- PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_ResetDSP exit retval %x\n", retval);
-
- return retval;
+ return -EIO;
}
-int tp3780I_StartDSP(THINKPAD_BD_DATA * pBDData)
+int tp3780I_StartDSP(struct thinkpad_bd_data *pBDData)
{
- int retval = 0;
- DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings;
-
- PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_StartDSP entry pBDData %p\n", pBDData);
+ struct dsp_3780i_config_settings *pSettings = &pBDData->rDspSettings;
if (dsp3780I_Run(pSettings) == 0) {
// @BUG @TBD EnableSRAM(pBDData);
} else {
- retval = -EIO;
+ return -EIO;
}
- PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_StartDSP exit retval %x\n", retval);
-
- return retval;
+ return 0;
}
-int tp3780I_QueryAbilities(THINKPAD_BD_DATA * pBDData, MW_ABILITIES * pAbilities)
+int tp3780I_QueryAbilities(struct thinkpad_bd_data *pBDData, struct mw_abilities *pAbilities)
{
- PRINTK_2(TRACE_TP3780I,
- "tp3780i::tp3780I_QueryAbilities entry pBDData %p\n", pBDData);
-
memset(pAbilities, 0, sizeof(*pAbilities));
/* fill out standard constant fields */
pAbilities->instr_per_sec = pBDData->rDspSettings.uIps;
@@ -497,25 +421,17 @@ int tp3780I_QueryAbilities(THINKPAD_BD_DATA * pBDData, MW_ABILITIES * pAbilities
memcpy(pAbilities->bios_task_name, TP_ABILITIES_BIOSTASK_NAME,
sizeof(TP_ABILITIES_BIOSTASK_NAME));
- PRINTK_1(TRACE_TP3780I,
- "tp3780i::tp3780I_QueryAbilities exit retval=SUCCESSFUL\n");
-
return 0;
}
-int tp3780I_ReadWriteDspDStore(THINKPAD_BD_DATA * pBDData, unsigned int uOpcode,
+int tp3780I_ReadWriteDspDStore(struct thinkpad_bd_data *pBDData, unsigned int uOpcode,
void __user *pvBuffer, unsigned int uCount,
unsigned long ulDSPAddr)
{
- int retval = 0;
- DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings;
+ struct dsp_3780i_config_settings *pSettings = &pBDData->rDspSettings;
unsigned short usDspBaseIO = pSettings->usDspBaseIO;
bool bRC = 0;
- PRINTK_6(TRACE_TP3780I,
- "tp3780i::tp3780I_ReadWriteDspDStore entry pBDData %p, uOpcode %x, pvBuffer %p, uCount %x, ulDSPAddr %lx\n",
- pBDData, uOpcode, pvBuffer, uCount, ulDSPAddr);
-
if (pBDData->bDSPEnabled) {
switch (uOpcode) {
case IOCTL_MW_READ_DATA:
@@ -532,26 +448,18 @@ int tp3780I_ReadWriteDspDStore(THINKPAD_BD_DATA * pBDData, unsigned int uOpcode,
}
}
- retval = (bRC) ? -EIO : 0;
- PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_ReadWriteDspDStore exit retval %x\n", retval);
-
- return retval;
+ return bRC ? -EIO : 0;
}
-int tp3780I_ReadWriteDspIStore(THINKPAD_BD_DATA * pBDData, unsigned int uOpcode,
+int tp3780I_ReadWriteDspIStore(struct thinkpad_bd_data *pBDData, unsigned int uOpcode,
void __user *pvBuffer, unsigned int uCount,
unsigned long ulDSPAddr)
{
- int retval = 0;
- DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings;
+ struct dsp_3780i_config_settings *pSettings = &pBDData->rDspSettings;
unsigned short usDspBaseIO = pSettings->usDspBaseIO;
bool bRC = 0;
- PRINTK_6(TRACE_TP3780I,
- "tp3780i::tp3780I_ReadWriteDspIStore entry pBDData %p, uOpcode %x, pvBuffer %p, uCount %x, ulDSPAddr %lx\n",
- pBDData, uOpcode, pvBuffer, uCount, ulDSPAddr);
-
if (pBDData->bDSPEnabled) {
switch (uOpcode) {
case IOCTL_MW_READ_INST:
@@ -564,11 +472,6 @@ int tp3780I_ReadWriteDspIStore(THINKPAD_BD_DATA * pBDData, unsigned int uOpcode,
}
}
- retval = (bRC) ? -EIO : 0;
-
- PRINTK_2(TRACE_TP3780I,
- "tp3780i::tp3780I_ReadWriteDspIStore exit retval %x\n", retval);
-
- return retval;
+ return bRC ? -EIO : 0;
}
diff --git a/drivers/char/mwave/tp3780i.h b/drivers/char/mwave/tp3780i.h
index 8bd976d42fae..c0001a344741 100644
--- a/drivers/char/mwave/tp3780i.h
+++ b/drivers/char/mwave/tp3780i.h
@@ -75,27 +75,27 @@
#define TP_CFG_PllBypass 0 /* don't bypass */
#define TP_CFG_ChipletEnable 0xFFFF /* Enable all chiplets */
-typedef struct {
+struct thinkpad_bd_data {
int bDSPEnabled;
int bShareDspIrq;
int bShareUartIrq;
- DSP_3780I_CONFIG_SETTINGS rDspSettings;
-} THINKPAD_BD_DATA;
+ struct dsp_3780i_config_settings rDspSettings;
+};
-int tp3780I_InitializeBoardData(THINKPAD_BD_DATA * pBDData);
-int tp3780I_CalcResources(THINKPAD_BD_DATA * pBDData);
-int tp3780I_ClaimResources(THINKPAD_BD_DATA * pBDData);
-int tp3780I_ReleaseResources(THINKPAD_BD_DATA * pBDData);
-int tp3780I_EnableDSP(THINKPAD_BD_DATA * pBDData);
-int tp3780I_DisableDSP(THINKPAD_BD_DATA * pBDData);
-int tp3780I_ResetDSP(THINKPAD_BD_DATA * pBDData);
-int tp3780I_StartDSP(THINKPAD_BD_DATA * pBDData);
-int tp3780I_QueryAbilities(THINKPAD_BD_DATA * pBDData, MW_ABILITIES * pAbilities);
-void tp3780I_Cleanup(THINKPAD_BD_DATA *pBDData);
-int tp3780I_ReadWriteDspDStore(THINKPAD_BD_DATA * pBDData, unsigned int uOpcode,
+int tp3780I_InitializeBoardData(struct thinkpad_bd_data *pBDData);
+int tp3780I_CalcResources(struct thinkpad_bd_data *pBDData);
+int tp3780I_ClaimResources(struct thinkpad_bd_data *pBDData);
+int tp3780I_ReleaseResources(struct thinkpad_bd_data *pBDData);
+int tp3780I_EnableDSP(struct thinkpad_bd_data *pBDData);
+int tp3780I_DisableDSP(struct thinkpad_bd_data *pBDData);
+int tp3780I_ResetDSP(struct thinkpad_bd_data *pBDData);
+int tp3780I_StartDSP(struct thinkpad_bd_data *pBDData);
+int tp3780I_QueryAbilities(struct thinkpad_bd_data *pBDData, struct mw_abilities *pAbilities);
+void tp3780I_Cleanup(struct thinkpad_bd_data *pBDData);
+int tp3780I_ReadWriteDspDStore(struct thinkpad_bd_data *pBDData, unsigned int uOpcode,
void __user *pvBuffer, unsigned int uCount,
unsigned long ulDSPAddr);
-int tp3780I_ReadWriteDspIStore(THINKPAD_BD_DATA * pBDData, unsigned int uOpcode,
+int tp3780I_ReadWriteDspIStore(struct thinkpad_bd_data *pBDData, unsigned int uOpcode,
void __user *pvBuffer, unsigned int uCount,
unsigned long ulDSPAddr);
diff --git a/drivers/char/random.c b/drivers/char/random.c
index b8b24b6ed3fe..bab03c7c4194 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -259,8 +259,8 @@ static void crng_reseed(struct work_struct *work)
u8 key[CHACHA_KEY_SIZE];
/* Immediately schedule the next reseeding, so that it fires sooner rather than later. */
- if (likely(system_unbound_wq))
- queue_delayed_work(system_unbound_wq, &next_reseed, crng_reseed_interval());
+ if (likely(system_dfl_wq))
+ queue_delayed_work(system_dfl_wq, &next_reseed, crng_reseed_interval());
extract_entropy(key, sizeof(key));
@@ -427,7 +427,7 @@ static void _get_random_bytes(void *buf, size_t len)
/*
* This returns random bytes in arbitrary quantities. The quality of the
- * random bytes is good as /dev/urandom. In order to ensure that the
+ * random bytes is as good as /dev/urandom. In order to ensure that the
* randomness provided by this function is okay, the function
* wait_for_random_bytes() should be called and return 0 at least once
* at any point prior.
@@ -491,7 +491,7 @@ out_zero_chacha:
/*
* Batched entropy returns random integers. The quality of the random
- * number is good as /dev/urandom. In order to ensure that the randomness
+ * number is as good as /dev/urandom. In order to ensure that the randomness
* provided by this function is okay, the function wait_for_random_bytes()
* should be called and return 0 at least once at any point prior.
*/
@@ -636,7 +636,7 @@ enum {
};
static struct {
- struct blake2s_state hash;
+ struct blake2s_ctx hash;
spinlock_t lock;
unsigned int init_bits;
} input_pool = {
@@ -701,7 +701,7 @@ static void extract_entropy(void *buf, size_t len)
/* next_key = HASHPRF(seed, RDSEED || 0) */
block.counter = 0;
- blake2s(next_key, (u8 *)&block, seed, sizeof(next_key), sizeof(block), sizeof(seed));
+ blake2s(seed, sizeof(seed), (const u8 *)&block, sizeof(block), next_key, sizeof(next_key));
blake2s_init_key(&input_pool.hash, BLAKE2S_HASH_SIZE, next_key, sizeof(next_key));
spin_unlock_irqrestore(&input_pool.lock, flags);
@@ -711,7 +711,7 @@ static void extract_entropy(void *buf, size_t len)
i = min_t(size_t, len, BLAKE2S_HASH_SIZE);
/* output = HASHPRF(seed, RDSEED || ++counter) */
++block.counter;
- blake2s(buf, (u8 *)&block, seed, i, sizeof(block), sizeof(seed));
+ blake2s(seed, sizeof(seed), (const u8 *)&block, sizeof(block), buf, i);
len -= i;
buf += i;
}
@@ -741,8 +741,8 @@ static void __cold _credit_init_bits(size_t bits)
if (orig < POOL_READY_BITS && new >= POOL_READY_BITS) {
crng_reseed(NULL); /* Sets crng_init to CRNG_READY under base_crng.lock. */
- if (static_key_initialized && system_unbound_wq)
- queue_work(system_unbound_wq, &set_ready);
+ if (system_dfl_wq)
+ queue_work(system_dfl_wq, &set_ready);
atomic_notifier_call_chain(&random_ready_notifier, 0, NULL);
#ifdef CONFIG_VDSO_GETRANDOM
WRITE_ONCE(vdso_k_rng_data->is_ready, true);
@@ -794,7 +794,7 @@ static void __cold _credit_init_bits(size_t bits)
*
* add_bootloader_randomness() is called by bootloader drivers, such as EFI
* and device tree, and credits its input depending on whether or not the
- * command line option 'random.trust_bootloader'.
+ * command line option 'random.trust_bootloader' is set.
*
* add_vmfork_randomness() adds a unique (but not necessarily secret) ID
* representing the current instance of a VM to the pool, without crediting,
@@ -915,9 +915,8 @@ void __init random_init(void)
add_latent_entropy();
/*
- * If we were initialized by the cpu or bootloader before jump labels
- * or workqueues are initialized, then we should enable the static
- * branch here, where it's guaranteed that these have been initialized.
+ * If we were initialized by the cpu or bootloader before workqueues
+ * are initialized, then we should enable the static branch here.
*/
if (!static_branch_likely(&crng_is_ready) && crng_init >= CRNG_READY)
crng_set_ready(NULL);
@@ -1296,6 +1295,7 @@ static void __cold try_to_generate_entropy(void)
struct entropy_timer_state *stack = PTR_ALIGN((void *)stack_bytes, SMP_CACHE_BYTES);
unsigned int i, num_different = 0;
unsigned long last = random_get_entropy();
+ cpumask_var_t timer_cpus;
int cpu = -1;
for (i = 0; i < NUM_TRIAL_SAMPLES - 1; ++i) {
@@ -1310,13 +1310,15 @@ static void __cold try_to_generate_entropy(void)
atomic_set(&stack->samples, 0);
timer_setup_on_stack(&stack->timer, entropy_timer, 0);
+ if (!alloc_cpumask_var(&timer_cpus, GFP_KERNEL))
+ goto out;
+
while (!crng_ready() && !signal_pending(current)) {
/*
* Check !timer_pending() and then ensure that any previous callback has finished
* executing by checking timer_delete_sync_try(), before queueing the next one.
*/
if (!timer_pending(&stack->timer) && timer_delete_sync_try(&stack->timer) >= 0) {
- struct cpumask timer_cpus;
unsigned int num_cpus;
/*
@@ -1326,19 +1328,19 @@ static void __cold try_to_generate_entropy(void)
preempt_disable();
/* Only schedule callbacks on timer CPUs that are online. */
- cpumask_and(&timer_cpus, housekeeping_cpumask(HK_TYPE_TIMER), cpu_online_mask);
- num_cpus = cpumask_weight(&timer_cpus);
+ cpumask_and(timer_cpus, housekeeping_cpumask(HK_TYPE_TIMER), cpu_online_mask);
+ num_cpus = cpumask_weight(timer_cpus);
/* In very bizarre case of misconfiguration, fallback to all online. */
if (unlikely(num_cpus == 0)) {
- timer_cpus = *cpu_online_mask;
- num_cpus = cpumask_weight(&timer_cpus);
+ *timer_cpus = *cpu_online_mask;
+ num_cpus = cpumask_weight(timer_cpus);
}
/* Basic CPU round-robin, which avoids the current CPU. */
do {
- cpu = cpumask_next(cpu, &timer_cpus);
+ cpu = cpumask_next(cpu, timer_cpus);
if (cpu >= nr_cpu_ids)
- cpu = cpumask_first(&timer_cpus);
+ cpu = cpumask_first(timer_cpus);
} while (cpu == smp_processor_id() && num_cpus > 1);
/* Expiring the timer at `jiffies` means it's the next tick. */
@@ -1354,6 +1356,8 @@ static void __cold try_to_generate_entropy(void)
}
mix_pool_bytes(&stack->entropy, sizeof(stack->entropy));
+ free_cpumask_var(timer_cpus);
+out:
timer_delete_sync(&stack->timer);
timer_destroy_on_stack(&stack->timer);
}
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index dddd702b2454..8a8f692b6088 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -29,10 +29,11 @@ if TCG_TPM
config TCG_TPM2_HMAC
bool "Use HMAC and encrypted transactions on the TPM bus"
- default X86_64
+ default n
select CRYPTO_ECDH
select CRYPTO_LIB_AESCFB
select CRYPTO_LIB_SHA256
+ select CRYPTO_LIB_UTILS
help
Setting this causes us to deploy a scheme which uses request
and response HMACs in addition to encryption for
@@ -189,6 +190,15 @@ config TCG_IBMVTPM
will be accessible from within Linux. To compile this driver
as a module, choose M here; the module will be called tpm_ibmvtpm.
+config TCG_LOONGSON
+ tristate "Loongson TPM Interface"
+ depends on MFD_LOONGSON_SE
+ help
+ If you want to make Loongson TPM support available, say Yes and
+ it will be accessible from within Linux. To compile this
+ driver as a module, choose M here; the module will be called
+ tpm_loongson.
+
config TCG_XEN
tristate "XEN TPM Interface"
depends on TCG_TPM && XEN
diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile
index 9de1b3ea34a9..5b5cdc0d32e4 100644
--- a/drivers/char/tpm/Makefile
+++ b/drivers/char/tpm/Makefile
@@ -46,3 +46,4 @@ obj-$(CONFIG_TCG_ARM_CRB_FFA) += tpm_crb_ffa.o
obj-$(CONFIG_TCG_VTPM_PROXY) += tpm_vtpm_proxy.o
obj-$(CONFIG_TCG_FTPM_TEE) += tpm_ftpm_tee.o
obj-$(CONFIG_TCG_SVSM) += tpm_svsm.o
+obj-$(CONFIG_TCG_LOONGSON) += tpm_loongson.o
diff --git a/drivers/char/tpm/eventlog/common.c b/drivers/char/tpm/eventlog/common.c
index 4c0bbba64ee5..691813d2a5a2 100644
--- a/drivers/char/tpm/eventlog/common.c
+++ b/drivers/char/tpm/eventlog/common.c
@@ -32,7 +32,7 @@ static int tpm_bios_measurements_open(struct inode *inode,
struct tpm_chip *chip;
inode_lock(inode);
- if (!inode->i_private) {
+ if (!inode->i_nlink) {
inode_unlock(inode);
return -ENODEV;
}
@@ -105,7 +105,7 @@ static int tpm_read_log(struct tpm_chip *chip)
void tpm_bios_log_setup(struct tpm_chip *chip)
{
const char *name = dev_name(&chip->dev);
- unsigned int cnt;
+ struct dentry *dentry;
int log_version;
int rc = 0;
@@ -117,14 +117,12 @@ void tpm_bios_log_setup(struct tpm_chip *chip)
return;
log_version = rc;
- cnt = 0;
- chip->bios_dir[cnt] = securityfs_create_dir(name, NULL);
+ chip->bios_dir = securityfs_create_dir(name, NULL);
/* NOTE: securityfs_create_dir can return ENODEV if securityfs is
* compiled out. The caller should ignore the ENODEV return code.
*/
- if (IS_ERR(chip->bios_dir[cnt]))
- goto err;
- cnt++;
+ if (IS_ERR(chip->bios_dir))
+ return;
chip->bin_log_seqops.chip = chip;
if (log_version == EFI_TCG2_EVENT_LOG_FORMAT_TCG_2)
@@ -135,14 +133,13 @@ void tpm_bios_log_setup(struct tpm_chip *chip)
&tpm1_binary_b_measurements_seqops;
- chip->bios_dir[cnt] =
+ dentry =
securityfs_create_file("binary_bios_measurements",
- 0440, chip->bios_dir[0],
+ 0440, chip->bios_dir,
(void *)&chip->bin_log_seqops,
&tpm_bios_measurements_ops);
- if (IS_ERR(chip->bios_dir[cnt]))
+ if (IS_ERR(dentry))
goto err;
- cnt++;
if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) {
@@ -150,42 +147,23 @@ void tpm_bios_log_setup(struct tpm_chip *chip)
chip->ascii_log_seqops.seqops =
&tpm1_ascii_b_measurements_seqops;
- chip->bios_dir[cnt] =
+ dentry =
securityfs_create_file("ascii_bios_measurements",
- 0440, chip->bios_dir[0],
+ 0440, chip->bios_dir,
(void *)&chip->ascii_log_seqops,
&tpm_bios_measurements_ops);
- if (IS_ERR(chip->bios_dir[cnt]))
+ if (IS_ERR(dentry))
goto err;
- cnt++;
}
return;
err:
- chip->bios_dir[cnt] = NULL;
tpm_bios_log_teardown(chip);
return;
}
void tpm_bios_log_teardown(struct tpm_chip *chip)
{
- int i;
- struct inode *inode;
-
- /* securityfs_remove currently doesn't take care of handling sync
- * between removal and opening of pseudo files. To handle this, a
- * workaround is added by making i_private = NULL here during removal
- * and to check it during open(), both within inode_lock()/unlock().
- * This design ensures that open() either safely gets kref or fails.
- */
- for (i = (TPM_NUM_EVENT_LOG_FILES - 1); i >= 0; i--) {
- if (chip->bios_dir[i]) {
- inode = d_inode(chip->bios_dir[i]);
- inode_lock(inode);
- inode->i_private = NULL;
- inode_unlock(inode);
- securityfs_remove(chip->bios_dir[i]);
- }
- }
+ securityfs_remove(chip->bios_dir);
}
diff --git a/drivers/char/tpm/eventlog/of.c b/drivers/char/tpm/eventlog/of.c
index 930fe43d5daf..92cec9722ee4 100644
--- a/drivers/char/tpm/eventlog/of.c
+++ b/drivers/char/tpm/eventlog/of.c
@@ -24,16 +24,10 @@
static int tpm_read_log_memory_region(struct tpm_chip *chip)
{
- struct device_node *node;
struct resource res;
int rc;
- node = of_parse_phandle(chip->dev.parent->of_node, "memory-region", 0);
- if (!node)
- return -ENODEV;
-
- rc = of_address_to_resource(node, 0, &res);
- of_node_put(node);
+ rc = of_reserved_mem_region_to_resource(chip->dev.parent->of_node, 0, &res);
if (rc)
return rc;
diff --git a/drivers/char/tpm/st33zp24/st33zp24.c b/drivers/char/tpm/st33zp24/st33zp24.c
index c0771980bc2f..2ed7815e4899 100644
--- a/drivers/char/tpm/st33zp24/st33zp24.c
+++ b/drivers/char/tpm/st33zp24/st33zp24.c
@@ -300,7 +300,7 @@ static irqreturn_t tpm_ioserirq_handler(int irq, void *dev_id)
* send TPM commands through the I2C bus.
*/
static int st33zp24_send(struct tpm_chip *chip, unsigned char *buf,
- size_t len)
+ size_t bufsiz, size_t len)
{
struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
u32 status, i, size, ordinal;
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index e25daf2396d3..082b910ddf0d 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -231,42 +231,6 @@ struct tpm_chip *tpm_default_chip(void)
EXPORT_SYMBOL_GPL(tpm_default_chip);
/**
- * tpm_find_get_ops() - find and reserve a TPM chip
- * @chip: a &struct tpm_chip instance, %NULL for the default chip
- *
- * Finds a TPM chip and reserves its class device and operations. The chip must
- * be released with tpm_put_ops() after use.
- * This function is for internal use only. It supports existing TPM callers
- * by accepting NULL, but those callers should be converted to pass in a chip
- * directly.
- *
- * Return:
- * A reserved &struct tpm_chip instance.
- * %NULL if a chip is not found.
- * %NULL if the chip is not available.
- */
-struct tpm_chip *tpm_find_get_ops(struct tpm_chip *chip)
-{
- int rc;
-
- if (chip) {
- if (!tpm_try_get_ops(chip))
- return chip;
- return NULL;
- }
-
- chip = tpm_default_chip();
- if (!chip)
- return NULL;
- rc = tpm_try_get_ops(chip);
- /* release additional reference we got from tpm_default_chip() */
- put_device(&chip->dev);
- if (rc)
- return NULL;
- return chip;
-}
-
-/**
* tpm_dev_release() - free chip memory and the device number
* @dev: the character device for the TPM chip
*
@@ -282,7 +246,6 @@ static void tpm_dev_release(struct device *dev)
kfree(chip->work_space.context_buf);
kfree(chip->work_space.session_buf);
- kfree(chip->allocated_banks);
#ifdef CONFIG_TCG_TPM2_HMAC
kfree(chip->auth);
#endif
diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c
index f2a5e09257dd..f942c0c8e402 100644
--- a/drivers/char/tpm/tpm-dev-common.c
+++ b/drivers/char/tpm/tpm-dev-common.c
@@ -275,7 +275,8 @@ void tpm_common_release(struct file *file, struct file_priv *priv)
int __init tpm_dev_common_init(void)
{
- tpm_dev_wq = alloc_workqueue("tpm_dev_wq", WQ_MEM_RECLAIM, 0);
+ tpm_dev_wq = alloc_workqueue("tpm_dev_wq", WQ_MEM_RECLAIM | WQ_PERCPU,
+ 0);
return !tpm_dev_wq ? -ENOMEM : 0;
}
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index 8d7e4da6ed53..f745a098908b 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -52,7 +52,7 @@ MODULE_PARM_DESC(suspend_pcr,
unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal)
{
if (chip->flags & TPM_CHIP_FLAG_TPM2)
- return tpm2_calc_ordinal_duration(chip, ordinal);
+ return tpm2_calc_ordinal_duration(ordinal);
else
return tpm1_calc_ordinal_duration(chip, ordinal);
}
@@ -82,6 +82,13 @@ static bool tpm_chip_req_canceled(struct tpm_chip *chip, u8 status)
return chip->ops->req_canceled(chip, status);
}
+static bool tpm_transmit_completed(u8 status, struct tpm_chip *chip)
+{
+ u8 status_masked = status & chip->ops->req_complete_mask;
+
+ return status_masked == chip->ops->req_complete_val;
+}
+
static ssize_t tpm_try_transmit(struct tpm_chip *chip, void *buf, size_t bufsiz)
{
struct tpm_header *header = buf;
@@ -106,7 +113,7 @@ static ssize_t tpm_try_transmit(struct tpm_chip *chip, void *buf, size_t bufsiz)
return -E2BIG;
}
- rc = chip->ops->send(chip, buf, count);
+ rc = chip->ops->send(chip, buf, bufsiz, count);
if (rc < 0) {
if (rc != -EPIPE)
dev_err(&chip->dev,
@@ -114,8 +121,19 @@ static ssize_t tpm_try_transmit(struct tpm_chip *chip, void *buf, size_t bufsiz)
return rc;
}
- /* A sanity check. send() should just return zero on success e.g.
- * not the command length.
+ /*
+ * Synchronous devices return the response directly during the send()
+ * call in the same buffer.
+ */
+ if (chip->flags & TPM_CHIP_FLAG_SYNC) {
+ len = rc;
+ rc = 0;
+ goto out_sync;
+ }
+
+ /*
+ * A sanity check. send() of asynchronous devices should just return
+ * zero on success e.g. not the command length.
*/
if (rc > 0) {
dev_warn(&chip->dev,
@@ -129,8 +147,7 @@ static ssize_t tpm_try_transmit(struct tpm_chip *chip, void *buf, size_t bufsiz)
stop = jiffies + tpm_calc_ordinal_duration(chip, ordinal);
do {
u8 status = tpm_chip_status(chip);
- if ((status & chip->ops->req_complete_mask) ==
- chip->ops->req_complete_val)
+ if (tpm_transmit_completed(status, chip))
goto out_recv;
if (tpm_chip_req_canceled(chip, status)) {
@@ -142,6 +159,13 @@ static ssize_t tpm_try_transmit(struct tpm_chip *chip, void *buf, size_t bufsiz)
rmb();
} while (time_before(jiffies, stop));
+ /*
+ * Check for completion one more time, just in case the device reported
+ * it while the driver was sleeping in the busy loop above.
+ */
+ if (tpm_transmit_completed(tpm_chip_status(chip), chip))
+ goto out_recv;
+
tpm_chip_cancel(chip);
dev_err(&chip->dev, "Operation Timed out\n");
return -ETIME;
@@ -151,7 +175,10 @@ out_recv:
if (len < 0) {
rc = len;
dev_err(&chip->dev, "tpm_transmit: tpm_recv: error %d\n", rc);
- } else if (len < TPM_HEADER_SIZE || len != be32_to_cpu(header->length))
+ return rc;
+ }
+out_sync:
+ if (len < TPM_HEADER_SIZE || len != be32_to_cpu(header->length))
rc = -EFAULT;
return rc ? rc : len;
@@ -286,10 +313,13 @@ int tpm_is_tpm2(struct tpm_chip *chip)
{
int rc;
- chip = tpm_find_get_ops(chip);
if (!chip)
return -ENODEV;
+ rc = tpm_try_get_ops(chip);
+ if (rc)
+ return rc;
+
rc = (chip->flags & TPM_CHIP_FLAG_TPM2) != 0;
tpm_put_ops(chip);
@@ -311,10 +341,13 @@ int tpm_pcr_read(struct tpm_chip *chip, u32 pcr_idx,
{
int rc;
- chip = tpm_find_get_ops(chip);
if (!chip)
return -ENODEV;
+ rc = tpm_try_get_ops(chip);
+ if (rc)
+ return rc;
+
if (chip->flags & TPM_CHIP_FLAG_TPM2)
rc = tpm2_pcr_read(chip, pcr_idx, digest, NULL);
else
@@ -342,10 +375,13 @@ int tpm_pcr_extend(struct tpm_chip *chip, u32 pcr_idx,
int rc;
int i;
- chip = tpm_find_get_ops(chip);
if (!chip)
return -ENODEV;
+ rc = tpm_try_get_ops(chip);
+ if (rc)
+ return rc;
+
for (i = 0; i < chip->nr_allocated_banks; i++) {
if (digests[i].alg_id != chip->allocated_banks[i].alg_id) {
rc = -EINVAL;
@@ -465,10 +501,13 @@ int tpm_get_random(struct tpm_chip *chip, u8 *out, size_t max)
if (!out || max > TPM_MAX_RNG_DATA)
return -EINVAL;
- chip = tpm_find_get_ops(chip);
if (!chip)
return -ENODEV;
+ rc = tpm_try_get_ops(chip);
+ if (rc)
+ return rc;
+
if (chip->flags & TPM_CHIP_FLAG_TPM2)
rc = tpm2_get_random(chip, out, max);
else
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 7bb87fa5f7a1..02c07fef41ba 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -267,7 +267,6 @@ static inline void tpm_msleep(unsigned int delay_msec)
int tpm_chip_bootstrap(struct tpm_chip *chip);
int tpm_chip_start(struct tpm_chip *chip);
void tpm_chip_stop(struct tpm_chip *chip);
-struct tpm_chip *tpm_find_get_ops(struct tpm_chip *chip);
struct tpm_chip *tpm_chip_alloc(struct device *dev,
const struct tpm_class_ops *ops);
@@ -299,7 +298,7 @@ ssize_t tpm2_get_tpm_pt(struct tpm_chip *chip, u32 property_id,
ssize_t tpm2_get_pcr_allocation(struct tpm_chip *chip);
int tpm2_auto_startup(struct tpm_chip *chip);
void tpm2_shutdown(struct tpm_chip *chip, u16 shutdown_type);
-unsigned long tpm2_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal);
+unsigned long tpm2_calc_ordinal_duration(u32 ordinal);
int tpm2_probe(struct tpm_chip *chip);
int tpm2_get_cc_attrs_tbl(struct tpm_chip *chip);
int tpm2_find_cc(struct tpm_chip *chip, u32 cc);
diff --git a/drivers/char/tpm/tpm1-cmd.c b/drivers/char/tpm/tpm1-cmd.c
index cf64c7385105..b49a790f1bd5 100644
--- a/drivers/char/tpm/tpm1-cmd.c
+++ b/drivers/char/tpm/tpm1-cmd.c
@@ -799,11 +799,6 @@ int tpm1_pm_suspend(struct tpm_chip *chip, u32 tpm_suspend_pcr)
*/
int tpm1_get_pcr_allocation(struct tpm_chip *chip)
{
- chip->allocated_banks = kcalloc(1, sizeof(*chip->allocated_banks),
- GFP_KERNEL);
- if (!chip->allocated_banks)
- return -ENOMEM;
-
chip->allocated_banks[0].alg_id = TPM_ALG_SHA1;
chip->allocated_banks[0].digest_size = hash_digest_size[HASH_ALGO_SHA1];
chip->allocated_banks[0].crypto_id = HASH_ALGO_SHA1;
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index 524d802ede26..3a77be7ebf4a 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -11,14 +11,17 @@
* used by the kernel internally.
*/
+#include "linux/dev_printk.h"
+#include "linux/tpm.h"
#include "tpm.h"
#include <crypto/hash_info.h>
+#include <linux/unaligned.h>
static bool disable_pcr_integrity;
module_param(disable_pcr_integrity, bool, 0444);
MODULE_PARM_DESC(disable_pcr_integrity, "Disable integrity protection of TPM2_PCR_Extend");
-static struct tpm2_hash tpm2_hash_map[] = {
+struct tpm2_hash tpm2_hash_map[] = {
{HASH_ALGO_SHA1, TPM_ALG_SHA1},
{HASH_ALGO_SHA256, TPM_ALG_SHA256},
{HASH_ALGO_SHA384, TPM_ALG_SHA384},
@@ -26,122 +29,71 @@ static struct tpm2_hash tpm2_hash_map[] = {
{HASH_ALGO_SM3_256, TPM_ALG_SM3_256},
};
+int tpm2_find_hash_alg(unsigned int crypto_id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tpm2_hash_map); i++)
+ if (crypto_id == tpm2_hash_map[i].crypto_id)
+ return tpm2_hash_map[i].tpm_id;
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(tpm2_find_hash_alg);
+
int tpm2_get_timeouts(struct tpm_chip *chip)
{
- /* Fixed timeouts for TPM2 */
chip->timeout_a = msecs_to_jiffies(TPM2_TIMEOUT_A);
chip->timeout_b = msecs_to_jiffies(TPM2_TIMEOUT_B);
chip->timeout_c = msecs_to_jiffies(TPM2_TIMEOUT_C);
chip->timeout_d = msecs_to_jiffies(TPM2_TIMEOUT_D);
-
- /* PTP spec timeouts */
- chip->duration[TPM_SHORT] = msecs_to_jiffies(TPM2_DURATION_SHORT);
- chip->duration[TPM_MEDIUM] = msecs_to_jiffies(TPM2_DURATION_MEDIUM);
- chip->duration[TPM_LONG] = msecs_to_jiffies(TPM2_DURATION_LONG);
-
- /* Key creation commands long timeouts */
- chip->duration[TPM_LONG_LONG] =
- msecs_to_jiffies(TPM2_DURATION_LONG_LONG);
-
chip->flags |= TPM_CHIP_FLAG_HAVE_TIMEOUTS;
-
return 0;
}
-/**
- * tpm2_ordinal_duration_index() - returns an index to the chip duration table
- * @ordinal: TPM command ordinal.
- *
- * The function returns an index to the chip duration table
- * (enum tpm_duration), that describes the maximum amount of
- * time the chip could take to return the result for a particular ordinal.
- *
- * The values of the MEDIUM, and LONG durations are taken
- * from the PC Client Profile (PTP) specification (750, 2000 msec)
- *
- * LONG_LONG is for commands that generates keys which empirically takes
- * a longer time on some systems.
- *
- * Return:
- * * TPM_MEDIUM
- * * TPM_LONG
- * * TPM_LONG_LONG
- * * TPM_UNDEFINED
+/*
+ * Contains the maximum durations in milliseconds for TPM2 commands.
*/
-static u8 tpm2_ordinal_duration_index(u32 ordinal)
-{
- switch (ordinal) {
- /* Startup */
- case TPM2_CC_STARTUP: /* 144 */
- return TPM_MEDIUM;
-
- case TPM2_CC_SELF_TEST: /* 143 */
- return TPM_LONG;
-
- case TPM2_CC_GET_RANDOM: /* 17B */
- return TPM_LONG;
-
- case TPM2_CC_SEQUENCE_UPDATE: /* 15C */
- return TPM_MEDIUM;
- case TPM2_CC_SEQUENCE_COMPLETE: /* 13E */
- return TPM_MEDIUM;
- case TPM2_CC_EVENT_SEQUENCE_COMPLETE: /* 185 */
- return TPM_MEDIUM;
- case TPM2_CC_HASH_SEQUENCE_START: /* 186 */
- return TPM_MEDIUM;
-
- case TPM2_CC_VERIFY_SIGNATURE: /* 177 */
- return TPM_LONG_LONG;
-
- case TPM2_CC_PCR_EXTEND: /* 182 */
- return TPM_MEDIUM;
-
- case TPM2_CC_HIERARCHY_CONTROL: /* 121 */
- return TPM_LONG;
- case TPM2_CC_HIERARCHY_CHANGE_AUTH: /* 129 */
- return TPM_LONG;
-
- case TPM2_CC_GET_CAPABILITY: /* 17A */
- return TPM_MEDIUM;
-
- case TPM2_CC_NV_READ: /* 14E */
- return TPM_LONG;
-
- case TPM2_CC_CREATE_PRIMARY: /* 131 */
- return TPM_LONG_LONG;
- case TPM2_CC_CREATE: /* 153 */
- return TPM_LONG_LONG;
- case TPM2_CC_CREATE_LOADED: /* 191 */
- return TPM_LONG_LONG;
-
- default:
- return TPM_UNDEFINED;
- }
-}
+static const struct {
+ unsigned long ordinal;
+ unsigned long duration;
+} tpm2_ordinal_duration_map[] = {
+ {TPM2_CC_STARTUP, 750},
+ {TPM2_CC_SELF_TEST, 3000},
+ {TPM2_CC_GET_RANDOM, 2000},
+ {TPM2_CC_SEQUENCE_UPDATE, 750},
+ {TPM2_CC_SEQUENCE_COMPLETE, 750},
+ {TPM2_CC_EVENT_SEQUENCE_COMPLETE, 750},
+ {TPM2_CC_HASH_SEQUENCE_START, 750},
+ {TPM2_CC_VERIFY_SIGNATURE, 30000},
+ {TPM2_CC_PCR_EXTEND, 750},
+ {TPM2_CC_HIERARCHY_CONTROL, 2000},
+ {TPM2_CC_HIERARCHY_CHANGE_AUTH, 2000},
+ {TPM2_CC_GET_CAPABILITY, 750},
+ {TPM2_CC_NV_READ, 2000},
+ {TPM2_CC_CREATE_PRIMARY, 30000},
+ {TPM2_CC_CREATE, 30000},
+ {TPM2_CC_CREATE_LOADED, 30000},
+};
/**
- * tpm2_calc_ordinal_duration() - calculate the maximum command duration
- * @chip: TPM chip to use.
+ * tpm2_calc_ordinal_duration() - Calculate the maximum command duration
* @ordinal: TPM command ordinal.
*
- * The function returns the maximum amount of time the chip could take
- * to return the result for a particular ordinal in jiffies.
- *
- * Return: A maximal duration time for an ordinal in jiffies.
+ * Returns the maximum amount of time the chip is expected by kernel to
+ * take in jiffies.
*/
-unsigned long tpm2_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal)
+unsigned long tpm2_calc_ordinal_duration(u32 ordinal)
{
- unsigned int index;
+ int i;
- index = tpm2_ordinal_duration_index(ordinal);
+ for (i = 0; i < ARRAY_SIZE(tpm2_ordinal_duration_map); i++)
+ if (ordinal == tpm2_ordinal_duration_map[i].ordinal)
+ return msecs_to_jiffies(tpm2_ordinal_duration_map[i].duration);
- if (index != TPM_UNDEFINED)
- return chip->duration[index];
- else
- return msecs_to_jiffies(TPM2_DURATION_DEFAULT);
+ return msecs_to_jiffies(TPM2_DURATION_DEFAULT);
}
-
struct tpm2_pcr_read_out {
__be32 update_cnt;
__be32 pcr_selects_cnt;
@@ -250,11 +202,15 @@ int tpm2_pcr_extend(struct tpm_chip *chip, u32 pcr_idx,
}
if (!disable_pcr_integrity) {
- tpm_buf_append_name(chip, &buf, pcr_idx, NULL);
+ rc = tpm_buf_append_name(chip, &buf, pcr_idx, NULL);
+ if (rc) {
+ tpm_buf_destroy(&buf);
+ return rc;
+ }
tpm_buf_append_hmac_session(chip, &buf, 0, NULL, 0);
} else {
tpm_buf_append_handle(chip, &buf, pcr_idx);
- tpm_buf_append_auth(chip, &buf, 0, NULL, 0);
+ tpm_buf_append_auth(chip, &buf, NULL, 0);
}
tpm_buf_append_u32(&buf, chip->nr_allocated_banks);
@@ -265,8 +221,14 @@ int tpm2_pcr_extend(struct tpm_chip *chip, u32 pcr_idx,
chip->allocated_banks[i].digest_size);
}
- if (!disable_pcr_integrity)
- tpm_buf_fill_hmac_session(chip, &buf);
+ if (!disable_pcr_integrity) {
+ rc = tpm_buf_fill_hmac_session(chip, &buf);
+ if (rc) {
+ tpm_buf_destroy(&buf);
+ return rc;
+ }
+ }
+
rc = tpm_transmit_cmd(chip, &buf, 0, "attempting extend a PCR value");
if (!disable_pcr_integrity)
rc = tpm_buf_check_hmac_response(chip, &buf, rc);
@@ -320,11 +282,24 @@ int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max)
do {
tpm_buf_reset(&buf, TPM2_ST_SESSIONS, TPM2_CC_GET_RANDOM);
- tpm_buf_append_hmac_session_opt(chip, &buf, TPM2_SA_ENCRYPT
- | TPM2_SA_CONTINUE_SESSION,
- NULL, 0);
+ if (tpm2_chip_auth(chip)) {
+ tpm_buf_append_hmac_session(chip, &buf,
+ TPM2_SA_ENCRYPT |
+ TPM2_SA_CONTINUE_SESSION,
+ NULL, 0);
+ } else {
+ offset = buf.handles * 4 + TPM_HEADER_SIZE;
+ head = (struct tpm_header *)buf.data;
+ if (tpm_buf_length(&buf) == offset)
+ head->tag = cpu_to_be16(TPM2_ST_NO_SESSIONS);
+ }
tpm_buf_append_u16(&buf, num_bytes);
- tpm_buf_fill_hmac_session(chip, &buf);
+ err = tpm_buf_fill_hmac_session(chip, &buf);
+ if (err) {
+ tpm_buf_destroy(&buf);
+ return err;
+ }
+
err = tpm_transmit_cmd(chip, &buf,
offsetof(struct tpm2_get_random_out,
buffer),
@@ -601,11 +576,9 @@ ssize_t tpm2_get_pcr_allocation(struct tpm_chip *chip)
nr_possible_banks = be32_to_cpup(
(__be32 *)&buf.data[TPM_HEADER_SIZE + 5]);
-
- chip->allocated_banks = kcalloc(nr_possible_banks,
- sizeof(*chip->allocated_banks),
- GFP_KERNEL);
- if (!chip->allocated_banks) {
+ if (nr_possible_banks > TPM2_MAX_PCR_BANKS) {
+ pr_err("tpm: out of bank capacity: %u > %u\n",
+ nr_possible_banks, TPM2_MAX_PCR_BANKS);
rc = -ENOMEM;
goto out;
}
diff --git a/drivers/char/tpm/tpm2-sessions.c b/drivers/char/tpm/tpm2-sessions.c
index 7b5049b3d476..4149379665c4 100644
--- a/drivers/char/tpm/tpm2-sessions.c
+++ b/drivers/char/tpm/tpm2-sessions.c
@@ -69,8 +69,8 @@
#include <linux/unaligned.h>
#include <crypto/kpp.h>
#include <crypto/ecdh.h>
-#include <crypto/hash.h>
-#include <crypto/hmac.h>
+#include <crypto/sha2.h>
+#include <crypto/utils.h>
/* maximum number of names the TPM must remember for authorization */
#define AUTH_MAX_NAMES 3
@@ -144,59 +144,80 @@ struct tpm2_auth {
/*
* Name Size based on TPM algorithm (assumes no hash bigger than 255)
*/
-static u8 name_size(const u8 *name)
+static int name_size(const u8 *name)
{
- static u8 size_map[] = {
- [TPM_ALG_SHA1] = SHA1_DIGEST_SIZE,
- [TPM_ALG_SHA256] = SHA256_DIGEST_SIZE,
- [TPM_ALG_SHA384] = SHA384_DIGEST_SIZE,
- [TPM_ALG_SHA512] = SHA512_DIGEST_SIZE,
- };
- u16 alg = get_unaligned_be16(name);
- return size_map[alg] + 2;
-}
-
-static int tpm2_parse_read_public(char *name, struct tpm_buf *buf)
-{
- struct tpm_header *head = (struct tpm_header *)buf->data;
- off_t offset = TPM_HEADER_SIZE;
- u32 tot_len = be32_to_cpu(head->length);
- u32 val;
-
- /* we're starting after the header so adjust the length */
- tot_len -= TPM_HEADER_SIZE;
-
- /* skip public */
- val = tpm_buf_read_u16(buf, &offset);
- if (val > tot_len)
- return -EINVAL;
- offset += val;
- /* name */
- val = tpm_buf_read_u16(buf, &offset);
- if (val != name_size(&buf->data[offset]))
+ u16 hash_alg = get_unaligned_be16(name);
+
+ switch (hash_alg) {
+ case TPM_ALG_SHA1:
+ return SHA1_DIGEST_SIZE + 2;
+ case TPM_ALG_SHA256:
+ return SHA256_DIGEST_SIZE + 2;
+ case TPM_ALG_SHA384:
+ return SHA384_DIGEST_SIZE + 2;
+ case TPM_ALG_SHA512:
+ return SHA512_DIGEST_SIZE + 2;
+ default:
+ pr_warn("tpm: unsupported name algorithm: 0x%04x\n", hash_alg);
return -EINVAL;
- memcpy(name, &buf->data[offset], val);
- /* forget the rest */
- return 0;
+ }
}
-static int tpm2_read_public(struct tpm_chip *chip, u32 handle, char *name)
+static int tpm2_read_public(struct tpm_chip *chip, u32 handle, void *name)
{
+ u32 mso = tpm2_handle_mso(handle);
+ off_t offset = TPM_HEADER_SIZE;
+ int rc, name_size_alg;
struct tpm_buf buf;
- int rc;
+
+ if (mso != TPM2_MSO_PERSISTENT && mso != TPM2_MSO_VOLATILE &&
+ mso != TPM2_MSO_NVRAM) {
+ memcpy(name, &handle, sizeof(u32));
+ return sizeof(u32);
+ }
rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_READ_PUBLIC);
if (rc)
return rc;
tpm_buf_append_u32(&buf, handle);
- rc = tpm_transmit_cmd(chip, &buf, 0, "read public");
- if (rc == TPM2_RC_SUCCESS)
- rc = tpm2_parse_read_public(name, &buf);
- tpm_buf_destroy(&buf);
+ rc = tpm_transmit_cmd(chip, &buf, 0, "TPM2_ReadPublic");
+ if (rc) {
+ tpm_buf_destroy(&buf);
+ return tpm_ret_to_err(rc);
+ }
- return rc;
+ /* Skip TPMT_PUBLIC: */
+ offset += tpm_buf_read_u16(&buf, &offset);
+
+ /*
+ * Ensure space for the length field of TPM2B_NAME and hashAlg field of
+ * TPMT_HA (the extra four bytes).
+ */
+ if (offset + 4 > tpm_buf_length(&buf)) {
+ tpm_buf_destroy(&buf);
+ return -EIO;
+ }
+
+ rc = tpm_buf_read_u16(&buf, &offset);
+ name_size_alg = name_size(&buf.data[offset]);
+
+ if (name_size_alg < 0)
+ return name_size_alg;
+
+ if (rc != name_size_alg) {
+ tpm_buf_destroy(&buf);
+ return -EIO;
+ }
+
+ if (offset + rc > tpm_buf_length(&buf)) {
+ tpm_buf_destroy(&buf);
+ return -EIO;
+ }
+
+ memcpy(name, &buf.data[offset], rc);
+ return name_size_alg;
}
#endif /* CONFIG_TCG_TPM2_HMAC */
@@ -221,52 +242,76 @@ static int tpm2_read_public(struct tpm_chip *chip, u32 handle, char *name)
* As with most tpm_buf operations, success is assumed because failure
* will be caused by an incorrect programming model and indicated by a
* kernel message.
+ *
+ * Ends the authorization session on failure.
*/
-void tpm_buf_append_name(struct tpm_chip *chip, struct tpm_buf *buf,
- u32 handle, u8 *name)
+int tpm_buf_append_name(struct tpm_chip *chip, struct tpm_buf *buf,
+ u32 handle, u8 *name)
{
#ifdef CONFIG_TCG_TPM2_HMAC
enum tpm2_mso_type mso = tpm2_handle_mso(handle);
struct tpm2_auth *auth;
+ u16 name_size_alg;
int slot;
+ int ret;
#endif
if (!tpm2_chip_auth(chip)) {
tpm_buf_append_handle(chip, buf, handle);
- return;
+ return 0;
}
#ifdef CONFIG_TCG_TPM2_HMAC
slot = (tpm_buf_length(buf) - TPM_HEADER_SIZE) / 4;
if (slot >= AUTH_MAX_NAMES) {
- dev_err(&chip->dev, "TPM: too many handles\n");
- return;
+ dev_err(&chip->dev, "too many handles\n");
+ ret = -EIO;
+ goto err;
}
auth = chip->auth;
- WARN(auth->session != tpm_buf_length(buf),
- "name added in wrong place\n");
+ if (auth->session != tpm_buf_length(buf)) {
+ dev_err(&chip->dev, "session state malformed");
+ ret = -EIO;
+ goto err;
+ }
tpm_buf_append_u32(buf, handle);
auth->session += 4;
if (mso == TPM2_MSO_PERSISTENT ||
mso == TPM2_MSO_VOLATILE ||
mso == TPM2_MSO_NVRAM) {
- if (!name)
- tpm2_read_public(chip, handle, auth->name[slot]);
+ if (!name) {
+ ret = tpm2_read_public(chip, handle, auth->name[slot]);
+ if (ret < 0)
+ goto err;
+
+ name_size_alg = ret;
+ }
} else {
- if (name)
- dev_err(&chip->dev, "TPM: Handle does not require name but one is specified\n");
+ if (name) {
+ dev_err(&chip->dev, "handle 0x%08x does not use a name\n",
+ handle);
+ ret = -EIO;
+ goto err;
+ }
}
auth->name_h[slot] = handle;
if (name)
- memcpy(auth->name[slot], name, name_size(name));
+ memcpy(auth->name[slot], name, name_size_alg);
+#endif
+ return 0;
+
+#ifdef CONFIG_TCG_TPM2_HMAC
+err:
+ tpm2_end_auth_session(chip);
+ return tpm_ret_to_err(ret);
#endif
}
EXPORT_SYMBOL_GPL(tpm_buf_append_name);
void tpm_buf_append_auth(struct tpm_chip *chip, struct tpm_buf *buf,
- u8 attributes, u8 *passphrase, int passphrase_len)
+ u8 *passphrase, int passphrase_len)
{
/* offset tells us where the sessions area begins */
int offset = buf->handles * 4 + TPM_HEADER_SIZE;
@@ -327,8 +372,7 @@ void tpm_buf_append_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf,
#endif
if (!tpm2_chip_auth(chip)) {
- tpm_buf_append_auth(chip, buf, attributes, passphrase,
- passphrase_len);
+ tpm_buf_append_auth(chip, buf, passphrase, passphrase_len);
return;
}
@@ -385,51 +429,6 @@ static int tpm2_create_primary(struct tpm_chip *chip, u32 hierarchy,
u32 *handle, u8 *name);
/*
- * It turns out the crypto hmac(sha256) is hard for us to consume
- * because it assumes a fixed key and the TPM seems to change the key
- * on every operation, so we weld the hmac init and final functions in
- * here to give it the same usage characteristics as a regular hash
- */
-static void tpm2_hmac_init(struct sha256_state *sctx, u8 *key, u32 key_len)
-{
- u8 pad[SHA256_BLOCK_SIZE];
- int i;
-
- sha256_init(sctx);
- for (i = 0; i < sizeof(pad); i++) {
- if (i < key_len)
- pad[i] = key[i];
- else
- pad[i] = 0;
- pad[i] ^= HMAC_IPAD_VALUE;
- }
- sha256_update(sctx, pad, sizeof(pad));
-}
-
-static void tpm2_hmac_final(struct sha256_state *sctx, u8 *key, u32 key_len,
- u8 *out)
-{
- u8 pad[SHA256_BLOCK_SIZE];
- int i;
-
- for (i = 0; i < sizeof(pad); i++) {
- if (i < key_len)
- pad[i] = key[i];
- else
- pad[i] = 0;
- pad[i] ^= HMAC_OPAD_VALUE;
- }
-
- /* collect the final hash; use out as temporary storage */
- sha256_final(sctx, out);
-
- sha256_init(sctx);
- sha256_update(sctx, pad, sizeof(pad));
- sha256_update(sctx, out, SHA256_DIGEST_SIZE);
- sha256_final(sctx, out);
-}
-
-/*
* assume hash sha256 and nonces u, v of size SHA256_DIGEST_SIZE but
* otherwise standard tpm2_KDFa. Note output is in bytes not bits.
*/
@@ -440,16 +439,16 @@ static void tpm2_KDFa(u8 *key, u32 key_len, const char *label, u8 *u,
const __be32 bits = cpu_to_be32(bytes * 8);
while (bytes > 0) {
- struct sha256_state sctx;
+ struct hmac_sha256_ctx hctx;
__be32 c = cpu_to_be32(counter);
- tpm2_hmac_init(&sctx, key, key_len);
- sha256_update(&sctx, (u8 *)&c, sizeof(c));
- sha256_update(&sctx, label, strlen(label)+1);
- sha256_update(&sctx, u, SHA256_DIGEST_SIZE);
- sha256_update(&sctx, v, SHA256_DIGEST_SIZE);
- sha256_update(&sctx, (u8 *)&bits, sizeof(bits));
- tpm2_hmac_final(&sctx, key, key_len, out);
+ hmac_sha256_init_usingrawkey(&hctx, key, key_len);
+ hmac_sha256_update(&hctx, (u8 *)&c, sizeof(c));
+ hmac_sha256_update(&hctx, label, strlen(label) + 1);
+ hmac_sha256_update(&hctx, u, SHA256_DIGEST_SIZE);
+ hmac_sha256_update(&hctx, v, SHA256_DIGEST_SIZE);
+ hmac_sha256_update(&hctx, (u8 *)&bits, sizeof(bits));
+ hmac_sha256_final(&hctx, out);
bytes -= SHA256_DIGEST_SIZE;
counter++;
@@ -467,7 +466,7 @@ static void tpm2_KDFa(u8 *key, u32 key_len, const char *label, u8 *u,
static void tpm2_KDFe(u8 z[EC_PT_SZ], const char *str, u8 *pt_u, u8 *pt_v,
u8 *out)
{
- struct sha256_state sctx;
+ struct sha256_ctx sctx;
/*
* this should be an iterative counter, but because we know
* we're only taking 32 bytes for the point using a sha256
@@ -578,11 +577,9 @@ static void tpm_buf_append_salt(struct tpm_buf *buf, struct tpm_chip *chip,
* encryption key and encrypts the first parameter of the command
* buffer with it.
*
- * As with most tpm_buf operations, success is assumed because failure
- * will be caused by an incorrect programming model and indicated by a
- * kernel message.
+ * Ends the authorization session on failure.
*/
-void tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf)
+int tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf)
{
u32 cc, handles, val;
struct tpm2_auth *auth = chip->auth;
@@ -592,10 +589,14 @@ void tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf)
u8 *hmac = NULL;
u32 attrs;
u8 cphash[SHA256_DIGEST_SIZE];
- struct sha256_state sctx;
+ struct sha256_ctx sctx;
+ struct hmac_sha256_ctx hctx;
+ int ret;
- if (!auth)
- return;
+ if (!auth) {
+ ret = -EIO;
+ goto err;
+ }
/* save the command code in BE format */
auth->ordinal = head->ordinal;
@@ -604,9 +605,11 @@ void tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf)
i = tpm2_find_cc(chip, cc);
if (i < 0) {
- dev_err(&chip->dev, "Command 0x%x not found in TPM\n", cc);
- return;
+ dev_err(&chip->dev, "command 0x%08x not found\n", cc);
+ ret = -EIO;
+ goto err;
}
+
attrs = chip->cc_attrs_tbl[i];
handles = (attrs >> TPM2_CC_ATTR_CHANDLES) & GENMASK(2, 0);
@@ -620,9 +623,9 @@ void tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf)
u32 handle = tpm_buf_read_u32(buf, &offset_s);
if (auth->name_h[i] != handle) {
- dev_err(&chip->dev, "TPM: handle %d wrong for name\n",
- i);
- return;
+ dev_err(&chip->dev, "invalid handle 0x%08x\n", handle);
+ ret = -EIO;
+ goto err;
}
}
/* point offset_s to the start of the sessions */
@@ -653,12 +656,14 @@ void tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf)
offset_s += len;
}
if (offset_s != offset_p) {
- dev_err(&chip->dev, "TPM session length is incorrect\n");
- return;
+ dev_err(&chip->dev, "session length is incorrect\n");
+ ret = -EIO;
+ goto err;
}
if (!hmac) {
- dev_err(&chip->dev, "TPM could not find HMAC session\n");
- return;
+ dev_err(&chip->dev, "could not find HMAC session\n");
+ ret = -EIO;
+ goto err;
}
/* encrypt before HMAC */
@@ -690,8 +695,11 @@ void tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf)
if (mso == TPM2_MSO_PERSISTENT ||
mso == TPM2_MSO_VOLATILE ||
mso == TPM2_MSO_NVRAM) {
- sha256_update(&sctx, auth->name[i],
- name_size(auth->name[i]));
+ ret = name_size(auth->name[i]);
+ if (ret < 0)
+ goto err;
+
+ sha256_update(&sctx, auth->name[i], ret);
} else {
__be32 h = cpu_to_be32(auth->name_h[i]);
@@ -704,14 +712,19 @@ void tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf)
sha256_final(&sctx, cphash);
/* now calculate the hmac */
- tpm2_hmac_init(&sctx, auth->session_key, sizeof(auth->session_key)
- + auth->passphrase_len);
- sha256_update(&sctx, cphash, sizeof(cphash));
- sha256_update(&sctx, auth->our_nonce, sizeof(auth->our_nonce));
- sha256_update(&sctx, auth->tpm_nonce, sizeof(auth->tpm_nonce));
- sha256_update(&sctx, &auth->attrs, 1);
- tpm2_hmac_final(&sctx, auth->session_key, sizeof(auth->session_key)
- + auth->passphrase_len, hmac);
+ hmac_sha256_init_usingrawkey(&hctx, auth->session_key,
+ sizeof(auth->session_key) +
+ auth->passphrase_len);
+ hmac_sha256_update(&hctx, cphash, sizeof(cphash));
+ hmac_sha256_update(&hctx, auth->our_nonce, sizeof(auth->our_nonce));
+ hmac_sha256_update(&hctx, auth->tpm_nonce, sizeof(auth->tpm_nonce));
+ hmac_sha256_update(&hctx, &auth->attrs, 1);
+ hmac_sha256_final(&hctx, hmac);
+ return 0;
+
+err:
+ tpm2_end_auth_session(chip);
+ return ret;
}
EXPORT_SYMBOL(tpm_buf_fill_hmac_session);
@@ -750,7 +763,8 @@ int tpm_buf_check_hmac_response(struct tpm_chip *chip, struct tpm_buf *buf,
off_t offset_s, offset_p;
u8 rphash[SHA256_DIGEST_SIZE];
u32 attrs, cc;
- struct sha256_state sctx;
+ struct sha256_ctx sctx;
+ struct hmac_sha256_ctx hctx;
u16 tag = be16_to_cpu(head->tag);
int parm_len, len, i, handles;
@@ -820,21 +834,20 @@ int tpm_buf_check_hmac_response(struct tpm_chip *chip, struct tpm_buf *buf,
sha256_final(&sctx, rphash);
/* now calculate the hmac */
- tpm2_hmac_init(&sctx, auth->session_key, sizeof(auth->session_key)
- + auth->passphrase_len);
- sha256_update(&sctx, rphash, sizeof(rphash));
- sha256_update(&sctx, auth->tpm_nonce, sizeof(auth->tpm_nonce));
- sha256_update(&sctx, auth->our_nonce, sizeof(auth->our_nonce));
- sha256_update(&sctx, &auth->attrs, 1);
+ hmac_sha256_init_usingrawkey(&hctx, auth->session_key,
+ sizeof(auth->session_key) +
+ auth->passphrase_len);
+ hmac_sha256_update(&hctx, rphash, sizeof(rphash));
+ hmac_sha256_update(&hctx, auth->tpm_nonce, sizeof(auth->tpm_nonce));
+ hmac_sha256_update(&hctx, auth->our_nonce, sizeof(auth->our_nonce));
+ hmac_sha256_update(&hctx, &auth->attrs, 1);
/* we're done with the rphash, so put our idea of the hmac there */
- tpm2_hmac_final(&sctx, auth->session_key, sizeof(auth->session_key)
- + auth->passphrase_len, rphash);
- if (memcmp(rphash, &buf->data[offset_s], SHA256_DIGEST_SIZE) == 0) {
- rc = 0;
- } else {
+ hmac_sha256_final(&hctx, rphash);
+ if (crypto_memneq(rphash, &buf->data[offset_s], SHA256_DIGEST_SIZE)) {
dev_err(&chip->dev, "TPM: HMAC check failed\n");
goto out;
}
+ rc = 0;
/* now do response decryption */
if (auth->attrs & TPM2_SA_ENCRYPT) {
diff --git a/drivers/char/tpm/tpm_atmel.c b/drivers/char/tpm/tpm_atmel.c
index 54a0360a3c95..f25faf468bba 100644
--- a/drivers/char/tpm/tpm_atmel.c
+++ b/drivers/char/tpm/tpm_atmel.c
@@ -148,7 +148,8 @@ static int tpm_atml_recv(struct tpm_chip *chip, u8 *buf, size_t count)
return size;
}
-static int tpm_atml_send(struct tpm_chip *chip, u8 *buf, size_t count)
+static int tpm_atml_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz,
+ size_t count)
{
struct tpm_atmel_priv *priv = dev_get_drvdata(&chip->dev);
int i;
diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
index 876edf2705ab..6c25305c256e 100644
--- a/drivers/char/tpm/tpm_crb.c
+++ b/drivers/char/tpm/tpm_crb.c
@@ -133,8 +133,7 @@ static inline bool tpm_crb_has_idle(u32 start_method)
{
return !(start_method == ACPI_TPM2_START_METHOD ||
start_method == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD ||
- start_method == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC ||
- start_method == ACPI_TPM2_CRB_WITH_ARM_FFA);
+ start_method == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC);
}
static bool crb_wait_for_reg_32(u32 __iomem *reg, u32 mask, u32 value,
@@ -180,6 +179,7 @@ static int crb_try_pluton_doorbell(struct crb_priv *priv, bool wait_for_complete
*
* @dev: crb device
* @priv: crb private data
+ * @loc: locality
*
* Write CRB_CTRL_REQ_GO_IDLE to TPM_CRB_CTRL_REQ
* The device should respond within TIMEOUT_C by clearing the bit.
@@ -191,7 +191,7 @@ static int crb_try_pluton_doorbell(struct crb_priv *priv, bool wait_for_complete
*
* Return: 0 always
*/
-static int __crb_go_idle(struct device *dev, struct crb_priv *priv)
+static int __crb_go_idle(struct device *dev, struct crb_priv *priv, int loc)
{
int rc;
@@ -200,6 +200,12 @@ static int __crb_go_idle(struct device *dev, struct crb_priv *priv)
iowrite32(CRB_CTRL_REQ_GO_IDLE, &priv->regs_t->ctrl_req);
+ if (priv->sm == ACPI_TPM2_CRB_WITH_ARM_FFA) {
+ rc = tpm_crb_ffa_start(CRB_FFA_START_TYPE_COMMAND, loc);
+ if (rc)
+ return rc;
+ }
+
rc = crb_try_pluton_doorbell(priv, true);
if (rc)
return rc;
@@ -220,7 +226,7 @@ static int crb_go_idle(struct tpm_chip *chip)
struct device *dev = &chip->dev;
struct crb_priv *priv = dev_get_drvdata(dev);
- return __crb_go_idle(dev, priv);
+ return __crb_go_idle(dev, priv, chip->locality);
}
/**
@@ -228,6 +234,7 @@ static int crb_go_idle(struct tpm_chip *chip)
*
* @dev: crb device
* @priv: crb private data
+ * @loc: locality
*
* Write CRB_CTRL_REQ_CMD_READY to TPM_CRB_CTRL_REQ
* and poll till the device acknowledge it by clearing the bit.
@@ -238,7 +245,7 @@ static int crb_go_idle(struct tpm_chip *chip)
*
* Return: 0 on success -ETIME on timeout;
*/
-static int __crb_cmd_ready(struct device *dev, struct crb_priv *priv)
+static int __crb_cmd_ready(struct device *dev, struct crb_priv *priv, int loc)
{
int rc;
@@ -247,6 +254,12 @@ static int __crb_cmd_ready(struct device *dev, struct crb_priv *priv)
iowrite32(CRB_CTRL_REQ_CMD_READY, &priv->regs_t->ctrl_req);
+ if (priv->sm == ACPI_TPM2_CRB_WITH_ARM_FFA) {
+ rc = tpm_crb_ffa_start(CRB_FFA_START_TYPE_COMMAND, loc);
+ if (rc)
+ return rc;
+ }
+
rc = crb_try_pluton_doorbell(priv, true);
if (rc)
return rc;
@@ -267,7 +280,7 @@ static int crb_cmd_ready(struct tpm_chip *chip)
struct device *dev = &chip->dev;
struct crb_priv *priv = dev_get_drvdata(dev);
- return __crb_cmd_ready(dev, priv);
+ return __crb_cmd_ready(dev, priv, chip->locality);
}
static int __crb_request_locality(struct device *dev,
@@ -401,7 +414,7 @@ static int crb_do_acpi_start(struct tpm_chip *chip)
#ifdef CONFIG_ARM64
/*
* This is a TPM Command Response Buffer start method that invokes a
- * Secure Monitor Call to requrest the firmware to execute or cancel
+ * Secure Monitor Call to request the firmware to execute or cancel
* a TPM 2.0 command.
*/
static int tpm_crb_smc_start(struct device *dev, unsigned long func_id)
@@ -426,7 +439,7 @@ static int tpm_crb_smc_start(struct device *dev, unsigned long func_id)
}
#endif
-static int crb_send(struct tpm_chip *chip, u8 *buf, size_t len)
+static int crb_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz, size_t len)
{
struct crb_priv *priv = dev_get_drvdata(&chip->dev);
int rc = 0;
@@ -444,7 +457,7 @@ static int crb_send(struct tpm_chip *chip, u8 *buf, size_t len)
/* Seems to be necessary for every command */
if (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON)
- __crb_cmd_ready(&chip->dev, priv);
+ __crb_cmd_ready(&chip->dev, priv, chip->locality);
memcpy_toio(priv->cmd, buf, len);
@@ -672,7 +685,7 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
* PTT HW bug w/a: wake up the device to access
* possibly not retained registers.
*/
- ret = __crb_cmd_ready(dev, priv);
+ ret = __crb_cmd_ready(dev, priv, 0);
if (ret)
goto out_relinquish_locality;
@@ -744,7 +757,7 @@ out:
if (!ret)
priv->cmd_size = cmd_size;
- __crb_go_idle(dev, priv);
+ __crb_go_idle(dev, priv, 0);
out_relinquish_locality:
diff --git a/drivers/char/tpm/tpm_crb_ffa.c b/drivers/char/tpm/tpm_crb_ffa.c
index 4ead61f01299..755b77b32ea4 100644
--- a/drivers/char/tpm/tpm_crb_ffa.c
+++ b/drivers/char/tpm/tpm_crb_ffa.c
@@ -10,8 +10,16 @@
#define pr_fmt(fmt) "CRB_FFA: " fmt
#include <linux/arm_ffa.h>
+#include <linux/delay.h>
+#include <linux/moduleparam.h>
#include "tpm_crb_ffa.h"
+static unsigned int busy_timeout_ms = 2000;
+
+module_param(busy_timeout_ms, uint, 0644);
+MODULE_PARM_DESC(busy_timeout_ms,
+ "Maximum time in ms to retry before giving up on busy");
+
/* TPM service function status codes */
#define CRB_FFA_OK 0x05000001
#define CRB_FFA_OK_RESULTS_RETURNED 0x05000002
@@ -115,6 +123,7 @@ struct tpm_crb_ffa {
};
static struct tpm_crb_ffa *tpm_crb_ffa;
+static struct ffa_driver tpm_crb_ffa_driver;
static int tpm_crb_ffa_to_linux_errno(int errno)
{
@@ -168,50 +177,51 @@ static int tpm_crb_ffa_to_linux_errno(int errno)
*/
int tpm_crb_ffa_init(void)
{
+ int ret = 0;
+
+ if (!IS_MODULE(CONFIG_TCG_ARM_CRB_FFA)) {
+ ret = ffa_register(&tpm_crb_ffa_driver);
+ if (ret) {
+ tpm_crb_ffa = ERR_PTR(-ENODEV);
+ return ret;
+ }
+ }
+
if (!tpm_crb_ffa)
- return -ENOENT;
+ ret = -ENOENT;
if (IS_ERR_VALUE(tpm_crb_ffa))
- return -ENODEV;
+ ret = -ENODEV;
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(tpm_crb_ffa_init);
-static int __tpm_crb_ffa_send_recieve(unsigned long func_id,
- unsigned long a0,
- unsigned long a1,
- unsigned long a2)
+static int __tpm_crb_ffa_try_send_receive(unsigned long func_id,
+ unsigned long a0, unsigned long a1,
+ unsigned long a2)
{
const struct ffa_msg_ops *msg_ops;
int ret;
- if (!tpm_crb_ffa)
- return -ENOENT;
-
msg_ops = tpm_crb_ffa->ffa_dev->ops->msg_ops;
if (ffa_partition_supports_direct_req2_recv(tpm_crb_ffa->ffa_dev)) {
- memset(&tpm_crb_ffa->direct_msg_data2, 0x00,
- sizeof(struct ffa_send_direct_data2));
-
- tpm_crb_ffa->direct_msg_data2.data[0] = func_id;
- tpm_crb_ffa->direct_msg_data2.data[1] = a0;
- tpm_crb_ffa->direct_msg_data2.data[2] = a1;
- tpm_crb_ffa->direct_msg_data2.data[3] = a2;
+ tpm_crb_ffa->direct_msg_data2 = (struct ffa_send_direct_data2){
+ .data = { func_id, a0, a1, a2 },
+ };
ret = msg_ops->sync_send_receive2(tpm_crb_ffa->ffa_dev,
&tpm_crb_ffa->direct_msg_data2);
if (!ret)
ret = tpm_crb_ffa_to_linux_errno(tpm_crb_ffa->direct_msg_data2.data[0]);
} else {
- memset(&tpm_crb_ffa->direct_msg_data, 0x00,
- sizeof(struct ffa_send_direct_data));
-
- tpm_crb_ffa->direct_msg_data.data1 = func_id;
- tpm_crb_ffa->direct_msg_data.data2 = a0;
- tpm_crb_ffa->direct_msg_data.data3 = a1;
- tpm_crb_ffa->direct_msg_data.data4 = a2;
+ tpm_crb_ffa->direct_msg_data = (struct ffa_send_direct_data){
+ .data1 = func_id,
+ .data2 = a0,
+ .data3 = a1,
+ .data4 = a2,
+ };
ret = msg_ops->sync_send_receive(tpm_crb_ffa->ffa_dev,
&tpm_crb_ffa->direct_msg_data);
@@ -219,6 +229,33 @@ static int __tpm_crb_ffa_send_recieve(unsigned long func_id,
ret = tpm_crb_ffa_to_linux_errno(tpm_crb_ffa->direct_msg_data.data1);
}
+ return ret;
+}
+
+static int __tpm_crb_ffa_send_receive(unsigned long func_id, unsigned long a0,
+ unsigned long a1, unsigned long a2)
+{
+ ktime_t start, stop;
+ int ret;
+
+ if (!tpm_crb_ffa)
+ return -ENOENT;
+
+ start = ktime_get();
+ stop = ktime_add(start, ms_to_ktime(busy_timeout_ms));
+
+ for (;;) {
+ ret = __tpm_crb_ffa_try_send_receive(func_id, a0, a1, a2);
+ if (ret != -EBUSY)
+ break;
+
+ usleep_range(50, 100);
+ if (ktime_after(ktime_get(), stop)) {
+ dev_warn(&tpm_crb_ffa->ffa_dev->dev,
+ "Busy retry timed out\n");
+ break;
+ }
+ }
return ret;
}
@@ -236,7 +273,7 @@ static int __tpm_crb_ffa_send_recieve(unsigned long func_id,
*
* Return: 0 on success, negative error code on failure.
*/
-int tpm_crb_ffa_get_interface_version(u16 *major, u16 *minor)
+static int tpm_crb_ffa_get_interface_version(u16 *major, u16 *minor)
{
int rc;
@@ -251,7 +288,7 @@ int tpm_crb_ffa_get_interface_version(u16 *major, u16 *minor)
guard(mutex)(&tpm_crb_ffa->msg_data_lock);
- rc = __tpm_crb_ffa_send_recieve(CRB_FFA_GET_INTERFACE_VERSION, 0x00, 0x00, 0x00);
+ rc = __tpm_crb_ffa_send_receive(CRB_FFA_GET_INTERFACE_VERSION, 0x00, 0x00, 0x00);
if (!rc) {
if (ffa_partition_supports_direct_req2_recv(tpm_crb_ffa->ffa_dev)) {
*major = CRB_FFA_MAJOR_VERSION(tpm_crb_ffa->direct_msg_data2.data[1]);
@@ -264,7 +301,6 @@ int tpm_crb_ffa_get_interface_version(u16 *major, u16 *minor)
return rc;
}
-EXPORT_SYMBOL_GPL(tpm_crb_ffa_get_interface_version);
/**
* tpm_crb_ffa_start() - signals the TPM that a field has changed in the CRB
@@ -289,7 +325,7 @@ int tpm_crb_ffa_start(int request_type, int locality)
guard(mutex)(&tpm_crb_ffa->msg_data_lock);
- return __tpm_crb_ffa_send_recieve(CRB_FFA_START, request_type, locality, 0x00);
+ return __tpm_crb_ffa_send_receive(CRB_FFA_START, request_type, locality, 0x00);
}
EXPORT_SYMBOL_GPL(tpm_crb_ffa_start);
@@ -369,7 +405,9 @@ static struct ffa_driver tpm_crb_ffa_driver = {
.id_table = tpm_crb_ffa_device_id,
};
+#ifdef MODULE
module_ffa_driver(tpm_crb_ffa_driver);
+#endif
MODULE_AUTHOR("Arm");
MODULE_DESCRIPTION("TPM CRB FFA driver");
diff --git a/drivers/char/tpm/tpm_crb_ffa.h b/drivers/char/tpm/tpm_crb_ffa.h
index 645c41ede10e..d7e1344ea003 100644
--- a/drivers/char/tpm/tpm_crb_ffa.h
+++ b/drivers/char/tpm/tpm_crb_ffa.h
@@ -11,11 +11,9 @@
#if IS_REACHABLE(CONFIG_TCG_ARM_CRB_FFA)
int tpm_crb_ffa_init(void);
-int tpm_crb_ffa_get_interface_version(u16 *major, u16 *minor);
int tpm_crb_ffa_start(int request_type, int locality);
#else
static inline int tpm_crb_ffa_init(void) { return 0; }
-static inline int tpm_crb_ffa_get_interface_version(u16 *major, u16 *minor) { return 0; }
static inline int tpm_crb_ffa_start(int request_type, int locality) { return 0; }
#endif
diff --git a/drivers/char/tpm/tpm_ftpm_tee.c b/drivers/char/tpm/tpm_ftpm_tee.c
index 53ba28ccd5d3..4e63c30aeaf1 100644
--- a/drivers/char/tpm/tpm_ftpm_tee.c
+++ b/drivers/char/tpm/tpm_ftpm_tee.c
@@ -31,45 +31,19 @@ static const uuid_t ftpm_ta_uuid =
0x82, 0xCB, 0x34, 0x3F, 0xB7, 0xF3, 0x78, 0x96);
/**
- * ftpm_tee_tpm_op_recv() - retrieve fTPM response.
- * @chip: the tpm_chip description as specified in driver/char/tpm/tpm.h.
- * @buf: the buffer to store data.
- * @count: the number of bytes to read.
- *
- * Return:
- * In case of success the number of bytes received.
- * On failure, -errno.
- */
-static int ftpm_tee_tpm_op_recv(struct tpm_chip *chip, u8 *buf, size_t count)
-{
- struct ftpm_tee_private *pvt_data = dev_get_drvdata(chip->dev.parent);
- size_t len;
-
- len = pvt_data->resp_len;
- if (count < len) {
- dev_err(&chip->dev,
- "%s: Invalid size in recv: count=%zd, resp_len=%zd\n",
- __func__, count, len);
- return -EIO;
- }
-
- memcpy(buf, pvt_data->resp_buf, len);
- pvt_data->resp_len = 0;
-
- return len;
-}
-
-/**
- * ftpm_tee_tpm_op_send() - send TPM commands through the TEE shared memory.
+ * ftpm_tee_tpm_op_send() - send TPM commands through the TEE shared memory
+ * and retrieve the response.
* @chip: the tpm_chip description as specified in driver/char/tpm/tpm.h
- * @buf: the buffer to send.
- * @len: the number of bytes to send.
+ * @buf: the buffer to send and to store the response.
+ * @bufsiz: the size of the buffer.
+ * @cmd_len: the number of bytes to send.
*
* Return:
- * In case of success, returns 0.
+ * In case of success, returns the number of bytes received.
* On failure, -errno
*/
-static int ftpm_tee_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t len)
+static int ftpm_tee_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz,
+ size_t cmd_len)
{
struct ftpm_tee_private *pvt_data = dev_get_drvdata(chip->dev.parent);
size_t resp_len;
@@ -80,16 +54,15 @@ static int ftpm_tee_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t len)
struct tee_param command_params[4];
struct tee_shm *shm = pvt_data->shm;
- if (len > MAX_COMMAND_SIZE) {
+ if (cmd_len > MAX_COMMAND_SIZE) {
dev_err(&chip->dev,
"%s: len=%zd exceeds MAX_COMMAND_SIZE supported by fTPM TA\n",
- __func__, len);
+ __func__, cmd_len);
return -EIO;
}
memset(&transceive_args, 0, sizeof(transceive_args));
memset(command_params, 0, sizeof(command_params));
- pvt_data->resp_len = 0;
/* Invoke FTPM_OPTEE_TA_SUBMIT_COMMAND function of fTPM TA */
transceive_args = (struct tee_ioctl_invoke_arg) {
@@ -103,7 +76,7 @@ static int ftpm_tee_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t len)
.attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT,
.u.memref = {
.shm = shm,
- .size = len,
+ .size = cmd_len,
.shm_offs = 0,
},
};
@@ -115,7 +88,7 @@ static int ftpm_tee_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t len)
return PTR_ERR(temp_buf);
}
memset(temp_buf, 0, (MAX_COMMAND_SIZE + MAX_RESPONSE_SIZE));
- memcpy(temp_buf, buf, len);
+ memcpy(temp_buf, buf, cmd_len);
command_params[1] = (struct tee_param) {
.attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT,
@@ -156,17 +129,20 @@ static int ftpm_tee_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t len)
__func__, resp_len);
return -EIO;
}
+ if (resp_len > bufsiz) {
+ dev_err(&chip->dev,
+ "%s: resp_len=%zd exceeds bufsiz=%zd\n",
+ __func__, resp_len, bufsiz);
+ return -EIO;
+ }
- /* sanity checks look good, cache the response */
- memcpy(pvt_data->resp_buf, temp_buf, resp_len);
- pvt_data->resp_len = resp_len;
+ memcpy(buf, temp_buf, resp_len);
- return 0;
+ return resp_len;
}
static const struct tpm_class_ops ftpm_tee_tpm_ops = {
.flags = TPM_OPS_AUTO_STARTUP,
- .recv = ftpm_tee_tpm_op_recv,
.send = ftpm_tee_tpm_op_send,
};
@@ -251,7 +227,7 @@ static int ftpm_tee_probe(struct device *dev)
}
pvt_data->chip = chip;
- pvt_data->chip->flags |= TPM_CHIP_FLAG_TPM2;
+ pvt_data->chip->flags |= TPM_CHIP_FLAG_TPM2 | TPM_CHIP_FLAG_SYNC;
/* Create a character device for the fTPM */
rc = tpm_chip_register(pvt_data->chip);
diff --git a/drivers/char/tpm/tpm_ftpm_tee.h b/drivers/char/tpm/tpm_ftpm_tee.h
index e39903b7ea07..8d5c3f0d2879 100644
--- a/drivers/char/tpm/tpm_ftpm_tee.h
+++ b/drivers/char/tpm/tpm_ftpm_tee.h
@@ -22,16 +22,12 @@
* struct ftpm_tee_private - fTPM's private data
* @chip: struct tpm_chip instance registered with tpm framework.
* @session: fTPM TA session identifier.
- * @resp_len: cached response buffer length.
- * @resp_buf: cached response buffer.
* @ctx: TEE context handler.
* @shm: Memory pool shared with fTPM TA in TEE.
*/
struct ftpm_tee_private {
struct tpm_chip *chip;
u32 session;
- size_t resp_len;
- u8 resp_buf[MAX_RESPONSE_SIZE];
struct tee_context *ctx;
struct tee_shm *shm;
};
diff --git a/drivers/char/tpm/tpm_i2c_atmel.c b/drivers/char/tpm/tpm_i2c_atmel.c
index d1d27fdfe523..4f229656a8e2 100644
--- a/drivers/char/tpm/tpm_i2c_atmel.c
+++ b/drivers/char/tpm/tpm_i2c_atmel.c
@@ -37,7 +37,8 @@ struct priv_data {
u8 buffer[sizeof(struct tpm_header) + 25];
};
-static int i2c_atmel_send(struct tpm_chip *chip, u8 *buf, size_t len)
+static int i2c_atmel_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz,
+ size_t len)
{
struct priv_data *priv = dev_get_drvdata(&chip->dev);
struct i2c_client *client = to_i2c_client(chip->dev.parent);
diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c
index 81d8a78dc655..bdf1f329a679 100644
--- a/drivers/char/tpm/tpm_i2c_infineon.c
+++ b/drivers/char/tpm/tpm_i2c_infineon.c
@@ -514,7 +514,8 @@ out:
return size;
}
-static int tpm_tis_i2c_send(struct tpm_chip *chip, u8 *buf, size_t len)
+static int tpm_tis_i2c_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz,
+ size_t len)
{
int rc, status;
ssize_t burstcnt;
diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c
index 3c3ee5f551db..d44903b29929 100644
--- a/drivers/char/tpm/tpm_i2c_nuvoton.c
+++ b/drivers/char/tpm/tpm_i2c_nuvoton.c
@@ -350,7 +350,8 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count)
* tpm.c can skip polling for the data to be available as the interrupt is
* waited for here
*/
-static int i2c_nuvoton_send(struct tpm_chip *chip, u8 *buf, size_t len)
+static int i2c_nuvoton_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz,
+ size_t len)
{
struct priv_data *priv = dev_get_drvdata(&chip->dev);
struct device *dev = chip->dev.parent;
diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
index 76d048f63d55..4734a69406ce 100644
--- a/drivers/char/tpm/tpm_ibmvtpm.c
+++ b/drivers/char/tpm/tpm_ibmvtpm.c
@@ -191,13 +191,15 @@ static int tpm_ibmvtpm_resume(struct device *dev)
* tpm_ibmvtpm_send() - Send a TPM command
* @chip: tpm chip struct
* @buf: buffer contains data to send
- * @count: size of buffer
+ * @bufsiz: size of the buffer
+ * @count: length of the command
*
* Return:
* 0 on success,
* -errno on error
*/
-static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
+static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz,
+ size_t count)
{
struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
bool retry = true;
diff --git a/drivers/char/tpm/tpm_infineon.c b/drivers/char/tpm/tpm_infineon.c
index 2d2ae37153ba..7638b65b851b 100644
--- a/drivers/char/tpm/tpm_infineon.c
+++ b/drivers/char/tpm/tpm_infineon.c
@@ -312,7 +312,8 @@ recv_begin:
return -EIO;
}
-static int tpm_inf_send(struct tpm_chip *chip, u8 * buf, size_t count)
+static int tpm_inf_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz,
+ size_t count)
{
int i;
int ret;
diff --git a/drivers/char/tpm/tpm_loongson.c b/drivers/char/tpm/tpm_loongson.c
new file mode 100644
index 000000000000..9e50250763d1
--- /dev/null
+++ b/drivers/char/tpm/tpm_loongson.c
@@ -0,0 +1,84 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Loongson Technology Corporation Limited. */
+
+#include <linux/device.h>
+#include <linux/mfd/loongson-se.h>
+#include <linux/platform_device.h>
+#include <linux/wait.h>
+
+#include "tpm.h"
+
+struct tpm_loongson_cmd {
+ u32 cmd_id;
+ u32 data_off;
+ u32 data_len;
+ u32 pad[5];
+};
+
+static int tpm_loongson_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+ struct loongson_se_engine *tpm_engine = dev_get_drvdata(&chip->dev);
+ struct tpm_loongson_cmd *cmd_ret = tpm_engine->command_ret;
+
+ if (cmd_ret->data_len > count)
+ return -EIO;
+
+ memcpy(buf, tpm_engine->data_buffer, cmd_ret->data_len);
+
+ return cmd_ret->data_len;
+}
+
+static int tpm_loongson_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz, size_t count)
+{
+ struct loongson_se_engine *tpm_engine = dev_get_drvdata(&chip->dev);
+ struct tpm_loongson_cmd *cmd = tpm_engine->command;
+
+ if (count > tpm_engine->buffer_size)
+ return -E2BIG;
+
+ cmd->data_len = count;
+ memcpy(tpm_engine->data_buffer, buf, count);
+
+ return loongson_se_send_engine_cmd(tpm_engine);
+}
+
+static const struct tpm_class_ops tpm_loongson_ops = {
+ .flags = TPM_OPS_AUTO_STARTUP,
+ .recv = tpm_loongson_recv,
+ .send = tpm_loongson_send,
+};
+
+static int tpm_loongson_probe(struct platform_device *pdev)
+{
+ struct loongson_se_engine *tpm_engine;
+ struct device *dev = &pdev->dev;
+ struct tpm_loongson_cmd *cmd;
+ struct tpm_chip *chip;
+
+ tpm_engine = loongson_se_init_engine(dev->parent, SE_ENGINE_TPM);
+ if (!tpm_engine)
+ return -ENODEV;
+ cmd = tpm_engine->command;
+ cmd->cmd_id = SE_CMD_TPM;
+ cmd->data_off = tpm_engine->buffer_off;
+
+ chip = tpmm_chip_alloc(dev, &tpm_loongson_ops);
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ chip->flags = TPM_CHIP_FLAG_TPM2 | TPM_CHIP_FLAG_IRQ;
+ dev_set_drvdata(&chip->dev, tpm_engine);
+
+ return tpm_chip_register(chip);
+}
+
+static struct platform_driver tpm_loongson = {
+ .probe = tpm_loongson_probe,
+ .driver = {
+ .name = "tpm_loongson",
+ },
+};
+module_platform_driver(tpm_loongson);
+
+MODULE_ALIAS("platform:tpm_loongson");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Loongson TPM driver");
diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c
index 0f62bbc940da..879ac88f5783 100644
--- a/drivers/char/tpm/tpm_nsc.c
+++ b/drivers/char/tpm/tpm_nsc.c
@@ -178,7 +178,8 @@ static int tpm_nsc_recv(struct tpm_chip *chip, u8 * buf, size_t count)
return size;
}
-static int tpm_nsc_send(struct tpm_chip *chip, u8 * buf, size_t count)
+static int tpm_nsc_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz,
+ size_t count)
{
struct tpm_nsc_priv *priv = dev_get_drvdata(&chip->dev);
u8 data;
diff --git a/drivers/char/tpm/tpm_ppi.c b/drivers/char/tpm/tpm_ppi.c
index bc7b1b4501b3..c9793a3d986d 100644
--- a/drivers/char/tpm/tpm_ppi.c
+++ b/drivers/char/tpm/tpm_ppi.c
@@ -33,6 +33,20 @@ static const guid_t tpm_ppi_guid =
GUID_INIT(0x3DDDFAA6, 0x361B, 0x4EB4,
0xA4, 0x24, 0x8D, 0x10, 0x08, 0x9D, 0x16, 0x53);
+static const char * const tpm_ppi_info[] = {
+ "Not implemented",
+ "BIOS only",
+ "Blocked for OS by system firmware",
+ "User required",
+ "User not required",
+};
+
+/* A spinlock to protect access to the cache from concurrent reads */
+static DEFINE_MUTEX(tpm_ppi_lock);
+
+static u32 ppi_operations_cache[PPI_VS_REQ_END + 1];
+static bool ppi_cache_populated;
+
static bool tpm_ppi_req_has_parameter(u64 req)
{
return req == 23;
@@ -52,7 +66,7 @@ static ssize_t tpm_show_ppi_version(struct device *dev,
{
struct tpm_chip *chip = to_tpm_chip(dev);
- return scnprintf(buf, PAGE_SIZE, "%s\n", chip->ppi_version);
+ return sysfs_emit(buf, "%s\n", chip->ppi_version);
}
static ssize_t tpm_show_ppi_request(struct device *dev,
@@ -87,12 +101,10 @@ static ssize_t tpm_show_ppi_request(struct device *dev,
else {
req = obj->package.elements[1].integer.value;
if (tpm_ppi_req_has_parameter(req))
- size = scnprintf(buf, PAGE_SIZE,
- "%llu %llu\n", req,
- obj->package.elements[2].integer.value);
+ size = sysfs_emit(buf, "%llu %llu\n", req,
+ obj->package.elements[2].integer.value);
else
- size = scnprintf(buf, PAGE_SIZE,
- "%llu\n", req);
+ size = sysfs_emit(buf, "%llu\n", req);
}
} else if (obj->package.count == 2 &&
obj->package.elements[0].type == ACPI_TYPE_INTEGER &&
@@ -100,8 +112,8 @@ static ssize_t tpm_show_ppi_request(struct device *dev,
if (obj->package.elements[0].integer.value)
size = -EFAULT;
else
- size = scnprintf(buf, PAGE_SIZE, "%llu\n",
- obj->package.elements[1].integer.value);
+ size = sysfs_emit(buf, "%llu\n",
+ obj->package.elements[1].integer.value);
}
ACPI_FREE(obj);
@@ -211,10 +223,10 @@ static ssize_t tpm_show_ppi_transition_action(struct device *dev,
}
if (ret < ARRAY_SIZE(info) - 1)
- status = scnprintf(buf, PAGE_SIZE, "%d: %s\n", ret, info[ret]);
+ status = sysfs_emit(buf, "%d: %s\n", ret, info[ret]);
else
- status = scnprintf(buf, PAGE_SIZE, "%d: %s\n", ret,
- info[ARRAY_SIZE(info)-1]);
+ status = sysfs_emit(buf, "%d: %s\n", ret,
+ info[ARRAY_SIZE(info) - 1]);
return status;
}
@@ -255,23 +267,23 @@ static ssize_t tpm_show_ppi_response(struct device *dev,
res = ret_obj[2].integer.value;
if (req) {
if (res == 0)
- status = scnprintf(buf, PAGE_SIZE, "%llu %s\n", req,
- "0: Success");
+ status = sysfs_emit(buf, "%llu %s\n", req,
+ "0: Success");
else if (res == 0xFFFFFFF0)
- status = scnprintf(buf, PAGE_SIZE, "%llu %s\n", req,
- "0xFFFFFFF0: User Abort");
+ status = sysfs_emit(buf, "%llu %s\n", req,
+ "0xFFFFFFF0: User Abort");
else if (res == 0xFFFFFFF1)
- status = scnprintf(buf, PAGE_SIZE, "%llu %s\n", req,
- "0xFFFFFFF1: BIOS Failure");
+ status = sysfs_emit(buf, "%llu %s\n", req,
+ "0xFFFFFFF1: BIOS Failure");
else if (res >= 1 && res <= 0x00000FFF)
- status = scnprintf(buf, PAGE_SIZE, "%llu %llu: %s\n",
- req, res, "Corresponding TPM error");
+ status = sysfs_emit(buf, "%llu %llu: %s\n",
+ req, res, "Corresponding TPM error");
else
- status = scnprintf(buf, PAGE_SIZE, "%llu %llu: %s\n",
- req, res, "Error");
+ status = sysfs_emit(buf, "%llu %llu: %s\n",
+ req, res, "Error");
} else {
- status = scnprintf(buf, PAGE_SIZE, "%llu: %s\n",
- req, "No Recent Request");
+ status = sysfs_emit(buf, "%llu: %s\n",
+ req, "No Recent Request");
}
cleanup:
@@ -279,46 +291,33 @@ cleanup:
return status;
}
-static ssize_t show_ppi_operations(acpi_handle dev_handle, char *buf, u32 start,
- u32 end)
+static ssize_t cache_ppi_operations(acpi_handle dev_handle, char *buf)
{
int i;
u32 ret;
- char *str = buf;
+ int len = 0;
union acpi_object *obj, tmp;
union acpi_object argv = ACPI_INIT_DSM_ARGV4(1, &tmp);
- static char *info[] = {
- "Not implemented",
- "BIOS only",
- "Blocked for OS by BIOS",
- "User required",
- "User not required",
- };
-
if (!acpi_check_dsm(dev_handle, &tpm_ppi_guid, TPM_PPI_REVISION_ID_1,
1 << TPM_PPI_FN_GETOPR))
return -EPERM;
tmp.integer.type = ACPI_TYPE_INTEGER;
- for (i = start; i <= end; i++) {
+ for (i = 0; i <= PPI_VS_REQ_END; i++) {
tmp.integer.value = i;
obj = tpm_eval_dsm(dev_handle, TPM_PPI_FN_GETOPR,
ACPI_TYPE_INTEGER, &argv,
TPM_PPI_REVISION_ID_1);
- if (!obj) {
+ if (!obj)
return -ENOMEM;
- } else {
- ret = obj->integer.value;
- ACPI_FREE(obj);
- }
- if (ret > 0 && ret < ARRAY_SIZE(info))
- str += scnprintf(str, PAGE_SIZE, "%d %d: %s\n",
- i, ret, info[ret]);
+ ret = obj->integer.value;
+ ppi_operations_cache[i] = ret;
+ ACPI_FREE(obj);
}
- return str - buf;
+ return len;
}
static ssize_t tpm_show_ppi_tcg_operations(struct device *dev,
@@ -326,9 +325,30 @@ static ssize_t tpm_show_ppi_tcg_operations(struct device *dev,
char *buf)
{
struct tpm_chip *chip = to_tpm_chip(dev);
+ ssize_t len = 0;
+ u32 ret;
+ int i;
+
+ mutex_lock(&tpm_ppi_lock);
+ if (!ppi_cache_populated) {
+ len = cache_ppi_operations(chip->acpi_dev_handle, buf);
+ if (len < 0) {
+ mutex_unlock(&tpm_ppi_lock);
+ return len;
+ }
+
+ ppi_cache_populated = true;
+ }
- return show_ppi_operations(chip->acpi_dev_handle, buf, 0,
- PPI_TPM_REQ_MAX);
+ for (i = 0; i <= PPI_TPM_REQ_MAX; i++) {
+ ret = ppi_operations_cache[i];
+ if (ret >= 0 && ret < ARRAY_SIZE(tpm_ppi_info))
+ len += sysfs_emit_at(buf, len, "%d %d: %s\n",
+ i, ret, tpm_ppi_info[ret]);
+ }
+ mutex_unlock(&tpm_ppi_lock);
+
+ return len;
}
static ssize_t tpm_show_ppi_vs_operations(struct device *dev,
@@ -336,9 +356,30 @@ static ssize_t tpm_show_ppi_vs_operations(struct device *dev,
char *buf)
{
struct tpm_chip *chip = to_tpm_chip(dev);
+ ssize_t len = 0;
+ u32 ret;
+ int i;
+
+ mutex_lock(&tpm_ppi_lock);
+ if (!ppi_cache_populated) {
+ len = cache_ppi_operations(chip->acpi_dev_handle, buf);
+ if (len < 0) {
+ mutex_unlock(&tpm_ppi_lock);
+ return len;
+ }
+
+ ppi_cache_populated = true;
+ }
+
+ for (i = PPI_VS_REQ_START; i <= PPI_VS_REQ_END; i++) {
+ ret = ppi_operations_cache[i];
+ if (ret >= 0 && ret < ARRAY_SIZE(tpm_ppi_info))
+ len += sysfs_emit_at(buf, len, "%d %d: %s\n",
+ i, ret, tpm_ppi_info[ret]);
+ }
+ mutex_unlock(&tpm_ppi_lock);
- return show_ppi_operations(chip->acpi_dev_handle, buf, PPI_VS_REQ_START,
- PPI_VS_REQ_END);
+ return len;
}
static DEVICE_ATTR(version, S_IRUGO, tpm_show_ppi_version, NULL);
diff --git a/drivers/char/tpm/tpm_svsm.c b/drivers/char/tpm/tpm_svsm.c
index 4280edf427d6..f5ba0f64850b 100644
--- a/drivers/char/tpm/tpm_svsm.c
+++ b/drivers/char/tpm/tpm_svsm.c
@@ -25,37 +25,32 @@ struct tpm_svsm_priv {
void *buffer;
};
-static int tpm_svsm_send(struct tpm_chip *chip, u8 *buf, size_t len)
+static int tpm_svsm_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz,
+ size_t cmd_len)
{
struct tpm_svsm_priv *priv = dev_get_drvdata(&chip->dev);
int ret;
- ret = svsm_vtpm_cmd_request_fill(priv->buffer, 0, buf, len);
+ ret = svsm_vtpm_cmd_request_fill(priv->buffer, 0, buf, cmd_len);
if (ret)
return ret;
/*
* The SVSM call uses the same buffer for the command and for the
- * response, so after this call, the buffer will contain the response
- * that can be used by .recv() op.
+ * response, so after this call, the buffer will contain the response.
+ *
+ * Note: we have to use an internal buffer because the device in SVSM
+ * expects the svsm_vtpm header + data to be physically contiguous.
*/
- return snp_svsm_vtpm_send_command(priv->buffer);
-}
-
-static int tpm_svsm_recv(struct tpm_chip *chip, u8 *buf, size_t len)
-{
- struct tpm_svsm_priv *priv = dev_get_drvdata(&chip->dev);
+ ret = snp_svsm_vtpm_send_command(priv->buffer);
+ if (ret)
+ return ret;
- /*
- * The internal buffer contains the response after we send the command
- * to SVSM.
- */
- return svsm_vtpm_cmd_response_parse(priv->buffer, buf, len);
+ return svsm_vtpm_cmd_response_parse(priv->buffer, buf, bufsiz);
}
static struct tpm_class_ops tpm_chip_ops = {
.flags = TPM_OPS_AUTO_STARTUP,
- .recv = tpm_svsm_recv,
.send = tpm_svsm_send,
};
@@ -84,6 +79,7 @@ static int __init tpm_svsm_probe(struct platform_device *pdev)
dev_set_drvdata(&chip->dev, priv);
+ chip->flags |= TPM_CHIP_FLAG_SYNC;
err = tpm2_probe(chip);
if (err)
return err;
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index ed0d3d8449b3..e2a1769081b1 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -265,8 +265,7 @@ static u8 tpm_tis_status(struct tpm_chip *chip)
/*
* Dump stack for forensics, as invalid TPM_STS.x could be
- * potentially triggered by impaired tpm_try_get_ops() or
- * tpm_find_get_ops().
+ * potentially triggered by impaired tpm_try_get_ops().
*/
dump_stack();
}
@@ -580,7 +579,8 @@ out_err:
return rc;
}
-static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
+static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz,
+ size_t len)
{
int rc, irq;
struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
@@ -977,8 +977,8 @@ restore_irqs:
* will call disable_irq which undoes all of the above.
*/
if (!(chip->flags & TPM_CHIP_FLAG_IRQ)) {
- tpm_tis_write8(priv, original_int_vec,
- TPM_INT_VECTOR(priv->locality));
+ tpm_tis_write8(priv, TPM_INT_VECTOR(priv->locality),
+ original_int_vec);
rc = -1;
}
diff --git a/drivers/char/tpm/tpm_tis_i2c_cr50.c b/drivers/char/tpm/tpm_tis_i2c_cr50.c
index 3b55a7b05c46..fc6891a0b693 100644
--- a/drivers/char/tpm/tpm_tis_i2c_cr50.c
+++ b/drivers/char/tpm/tpm_tis_i2c_cr50.c
@@ -546,13 +546,15 @@ out_err:
* tpm_cr50_i2c_tis_send() - TPM transmission callback.
* @chip: A TPM chip.
* @buf: Buffer to send.
- * @len: Buffer length.
+ * @bufsiz: Buffer size.
+ * @len: Command length.
*
* Return:
* - 0: Success.
* - -errno: A POSIX error code.
*/
-static int tpm_cr50_i2c_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
+static int tpm_cr50_i2c_tis_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz,
+ size_t len)
{
size_t burstcnt, limit, sent = 0;
u8 tpm_go[4] = { TPM_STS_GO };
diff --git a/drivers/char/tpm/tpm_vtpm_proxy.c b/drivers/char/tpm/tpm_vtpm_proxy.c
index 8fe4a01eea12..0818bb517805 100644
--- a/drivers/char/tpm/tpm_vtpm_proxy.c
+++ b/drivers/char/tpm/tpm_vtpm_proxy.c
@@ -321,12 +321,14 @@ static int vtpm_proxy_is_driver_command(struct tpm_chip *chip,
*
* @chip: tpm chip to use
* @buf: send buffer
+ * @bufsiz: size of the buffer
* @count: bytes to send
*
* Return:
* 0 in case of success, negative error value otherwise.
*/
-static int vtpm_proxy_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t count)
+static int vtpm_proxy_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz,
+ size_t count)
{
struct proxy_dev *proxy_dev = dev_get_drvdata(&chip->dev);
diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c
index 80cca3b83b22..556bf2256716 100644
--- a/drivers/char/tpm/xen-tpmfront.c
+++ b/drivers/char/tpm/xen-tpmfront.c
@@ -131,7 +131,8 @@ static size_t shr_data_offset(struct vtpm_shared_page *shr)
return struct_size(shr, extra_pages, shr->nr_extra_pages);
}
-static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
+static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz,
+ size_t count)
{
struct tpm_private *priv = dev_get_drvdata(&chip->dev);
struct vtpm_shared_page *shr = priv->shr;
diff --git a/drivers/char/xillybus/xillybus_core.c b/drivers/char/xillybus/xillybus_core.c
index efb1ae834265..fc4e69b5cb6a 100644
--- a/drivers/char/xillybus/xillybus_core.c
+++ b/drivers/char/xillybus/xillybus_core.c
@@ -1973,7 +1973,7 @@ EXPORT_SYMBOL(xillybus_endpoint_remove);
static int __init xillybus_init(void)
{
- xillybus_wq = alloc_workqueue(xillyname, 0, 0);
+ xillybus_wq = alloc_workqueue(xillyname, WQ_UNBOUND, 0);
if (!xillybus_wq)
return -ENOMEM;
diff --git a/drivers/char/xillybus/xillyusb.c b/drivers/char/xillybus/xillyusb.c
index 45771b1a3716..386531474213 100644
--- a/drivers/char/xillybus/xillyusb.c
+++ b/drivers/char/xillybus/xillyusb.c
@@ -2163,7 +2163,7 @@ static int xillyusb_probe(struct usb_interface *interface,
spin_lock_init(&xdev->error_lock);
xdev->in_counter = 0;
xdev->in_bytes_left = 0;
- xdev->workq = alloc_workqueue(xillyname, WQ_HIGHPRI, 0);
+ xdev->workq = alloc_workqueue(xillyname, WQ_HIGHPRI | WQ_UNBOUND, 0);
if (!xdev->workq) {
dev_err(&interface->dev, "Failed to allocate work queue\n");
@@ -2275,7 +2275,7 @@ static int __init xillyusb_init(void)
{
int rc = 0;
- wakeup_wq = alloc_workqueue(xillyname, 0, 0);
+ wakeup_wq = alloc_workqueue(xillyname, WQ_UNBOUND, 0);
if (!wakeup_wq)
return -ENOMEM;
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 19c1ed280fd7..3a1611008e48 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -61,7 +61,6 @@ config LMK04832
config COMMON_CLK_APPLE_NCO
tristate "Clock driver for Apple SoC NCOs"
depends on ARCH_APPLE || COMPILE_TEST
- default ARCH_APPLE
help
This driver supports NCO (Numerically Controlled Oscillator) blocks
found on Apple SoCs such as t8103 (M1). The blocks are typically
@@ -88,6 +87,15 @@ config COMMON_CLK_RK808
These multi-function devices have two fixed-rate oscillators, clocked at 32KHz each.
Clkout1 is always on, Clkout2 can off by control register.
+config COMMON_CLK_RP1
+ tristate "Raspberry Pi RP1-based clock support"
+ depends on MISC_RP1 || COMPILE_TEST
+ default MISC_RP1
+ help
+ Enable common clock framework support for Raspberry Pi RP1.
+ This multi-function device has 3 main PLLs and several clock
+ generators to drive the internal sub-peripherals.
+
config COMMON_CLK_HI655X
tristate "Clock driver for Hi655x" if EXPERT
depends on (MFD_HI655X_PMIC || COMPILE_TEST)
@@ -356,6 +364,7 @@ config COMMON_CLK_LOCHNAGAR
config COMMON_CLK_NPCM8XX
tristate "Clock driver for the NPCM8XX SoC Family"
depends on ARCH_NPCM || COMPILE_TEST
+ select AUXILIARY_BUS
help
This driver supports the clocks on the Nuvoton BMC NPCM8XX SoC Family,
all the clocks are initialized by the bootloader, so this driver
@@ -493,6 +502,15 @@ config COMMON_CLK_SP7021
Not all features of the PLL are currently supported
by the driver.
+config COMMON_CLK_RPMI
+ tristate "Clock driver based on RISC-V RPMI"
+ depends on RISCV || COMPILE_TEST
+ depends on MAILBOX
+ default RISCV
+ help
+ Support for clocks based on the clock service group defined by
+ the RISC-V platform management interface (RPMI) specification.
+
source "drivers/clk/actions/Kconfig"
source "drivers/clk/analogbits/Kconfig"
source "drivers/clk/baikal-t1/Kconfig"
@@ -503,6 +521,7 @@ source "drivers/clk/imx/Kconfig"
source "drivers/clk/ingenic/Kconfig"
source "drivers/clk/keystone/Kconfig"
source "drivers/clk/mediatek/Kconfig"
+source "drivers/clk/mmp/Kconfig"
source "drivers/clk/meson/Kconfig"
source "drivers/clk/mstar/Kconfig"
source "drivers/clk/microchip/Kconfig"
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index 42867cd37c33..61ec08404442 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -18,6 +18,7 @@ clk-test-y := clk_test.o \
kunit_clk_assigned_rates_without_consumer.dtbo.o \
kunit_clk_assigned_rates_zero.dtbo.o \
kunit_clk_assigned_rates_zero_consumer.dtbo.o \
+ kunit_clk_hw_get_dev_of_node.dtbo.o \
kunit_clk_parent_data_test.dtbo.o
obj-$(CONFIG_COMMON_CLK) += clk-divider.o
obj-$(CONFIG_COMMON_CLK) += clk-fixed-factor.o
@@ -84,6 +85,8 @@ obj-$(CONFIG_CLK_LS1028A_PLLDIG) += clk-plldig.o
obj-$(CONFIG_COMMON_CLK_PWM) += clk-pwm.o
obj-$(CONFIG_CLK_QORIQ) += clk-qoriq.o
obj-$(CONFIG_COMMON_CLK_RK808) += clk-rk808.o
+obj-$(CONFIG_COMMON_CLK_RP1) += clk-rp1.o
+obj-$(CONFIG_COMMON_CLK_RPMI) += clk-rpmi.o
obj-$(CONFIG_COMMON_CLK_HI655X) += clk-hi655x.o
obj-$(CONFIG_COMMON_CLK_S2MPS11) += clk-s2mps11.o
obj-$(CONFIG_COMMON_CLK_SCMI) += clk-scmi.o
@@ -122,8 +125,7 @@ obj-$(CONFIG_ARCH_HISI) += hisilicon/
obj-y += imgtec/
obj-y += imx/
obj-y += ingenic/
-obj-$(CONFIG_ARCH_K3) += keystone/
-obj-$(CONFIG_ARCH_KEYSTONE) += keystone/
+obj-y += keystone/
obj-y += mediatek/
obj-$(CONFIG_ARCH_MESON) += meson/
obj-y += microchip/
diff --git a/drivers/clk/actions/owl-common.c b/drivers/clk/actions/owl-common.c
index c62024b7c737..b3dded204dc5 100644
--- a/drivers/clk/actions/owl-common.c
+++ b/drivers/clk/actions/owl-common.c
@@ -18,7 +18,6 @@ static const struct regmap_config owl_regmap_config = {
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x00cc,
- .fast_io = true,
};
static void owl_clk_set_regmap(const struct owl_clk_desc *desc,
diff --git a/drivers/clk/actions/owl-common.h b/drivers/clk/actions/owl-common.h
index 8fb65f3e82d7..5768a2e0f6a0 100644
--- a/drivers/clk/actions/owl-common.h
+++ b/drivers/clk/actions/owl-common.h
@@ -32,7 +32,7 @@ struct owl_clk_desc {
};
static inline struct owl_clk_common *
- hw_to_owl_clk_common(const struct clk_hw *hw)
+ hw_to_owl_clk_common(struct clk_hw *hw)
{
return container_of(hw, struct owl_clk_common, hw);
}
diff --git a/drivers/clk/actions/owl-composite.c b/drivers/clk/actions/owl-composite.c
index 48f177f6ce9c..00b74f8bc437 100644
--- a/drivers/clk/actions/owl-composite.c
+++ b/drivers/clk/actions/owl-composite.c
@@ -122,13 +122,13 @@ static int owl_comp_fact_set_rate(struct clk_hw *hw, unsigned long rate,
rate, parent_rate);
}
-static long owl_comp_fix_fact_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int owl_comp_fix_fact_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct owl_composite *comp = hw_to_owl_comp(hw);
struct clk_fixed_factor *fix_fact_hw = &comp->rate.fix_fact_hw;
- return comp->fix_fact_ops->round_rate(&fix_fact_hw->hw, rate, parent_rate);
+ return comp->fix_fact_ops->determine_rate(&fix_fact_hw->hw, req);
}
static unsigned long owl_comp_fix_fact_recalc_rate(struct clk_hw *hw,
@@ -193,7 +193,7 @@ const struct clk_ops owl_comp_fix_fact_ops = {
.is_enabled = owl_comp_is_enabled,
/* fix_fact_ops */
- .round_rate = owl_comp_fix_fact_round_rate,
+ .determine_rate = owl_comp_fix_fact_determine_rate,
.recalc_rate = owl_comp_fix_fact_recalc_rate,
.set_rate = owl_comp_fix_fact_set_rate,
};
diff --git a/drivers/clk/actions/owl-composite.h b/drivers/clk/actions/owl-composite.h
index bca38bf8f218..6d7c6f0c47c8 100644
--- a/drivers/clk/actions/owl-composite.h
+++ b/drivers/clk/actions/owl-composite.h
@@ -108,7 +108,7 @@ struct owl_composite {
}, \
}
-static inline struct owl_composite *hw_to_owl_comp(const struct clk_hw *hw)
+static inline struct owl_composite *hw_to_owl_comp(struct clk_hw *hw)
{
struct owl_clk_common *common = hw_to_owl_clk_common(hw);
diff --git a/drivers/clk/actions/owl-divider.c b/drivers/clk/actions/owl-divider.c
index cddac00fe324..118f1393c678 100644
--- a/drivers/clk/actions/owl-divider.c
+++ b/drivers/clk/actions/owl-divider.c
@@ -23,13 +23,16 @@ long owl_divider_helper_round_rate(struct owl_clk_common *common,
div_hw->div_flags);
}
-static long owl_divider_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int owl_divider_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct owl_divider *div = hw_to_owl_divider(hw);
- return owl_divider_helper_round_rate(&div->common, &div->div_hw,
- rate, parent_rate);
+ req->rate = owl_divider_helper_round_rate(&div->common, &div->div_hw,
+ req->rate,
+ &req->best_parent_rate);
+
+ return 0;
}
unsigned long owl_divider_helper_recalc_rate(struct owl_clk_common *common,
@@ -89,6 +92,6 @@ static int owl_divider_set_rate(struct clk_hw *hw, unsigned long rate,
const struct clk_ops owl_divider_ops = {
.recalc_rate = owl_divider_recalc_rate,
- .round_rate = owl_divider_round_rate,
+ .determine_rate = owl_divider_determine_rate,
.set_rate = owl_divider_set_rate,
};
diff --git a/drivers/clk/actions/owl-divider.h b/drivers/clk/actions/owl-divider.h
index 083be6d80954..d76f58782c52 100644
--- a/drivers/clk/actions/owl-divider.h
+++ b/drivers/clk/actions/owl-divider.h
@@ -49,7 +49,7 @@ struct owl_divider {
}, \
}
-static inline struct owl_divider *hw_to_owl_divider(const struct clk_hw *hw)
+static inline struct owl_divider *hw_to_owl_divider(struct clk_hw *hw)
{
struct owl_clk_common *common = hw_to_owl_clk_common(hw);
diff --git a/drivers/clk/actions/owl-factor.c b/drivers/clk/actions/owl-factor.c
index 64f316cf7cfc..12f41f6bacd6 100644
--- a/drivers/clk/actions/owl-factor.c
+++ b/drivers/clk/actions/owl-factor.c
@@ -130,14 +130,16 @@ long owl_factor_helper_round_rate(struct owl_clk_common *common,
return *parent_rate * mul / div;
}
-static long owl_factor_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int owl_factor_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct owl_factor *factor = hw_to_owl_factor(hw);
struct owl_factor_hw *factor_hw = &factor->factor_hw;
- return owl_factor_helper_round_rate(&factor->common, factor_hw,
- rate, parent_rate);
+ req->rate = owl_factor_helper_round_rate(&factor->common, factor_hw,
+ req->rate, &req->best_parent_rate);
+
+ return 0;
}
unsigned long owl_factor_helper_recalc_rate(struct owl_clk_common *common,
@@ -214,7 +216,7 @@ static int owl_factor_set_rate(struct clk_hw *hw, unsigned long rate,
}
const struct clk_ops owl_factor_ops = {
- .round_rate = owl_factor_round_rate,
+ .determine_rate = owl_factor_determine_rate,
.recalc_rate = owl_factor_recalc_rate,
.set_rate = owl_factor_set_rate,
};
diff --git a/drivers/clk/actions/owl-factor.h b/drivers/clk/actions/owl-factor.h
index 04b89cbfdccb..24c704d40925 100644
--- a/drivers/clk/actions/owl-factor.h
+++ b/drivers/clk/actions/owl-factor.h
@@ -57,7 +57,7 @@ struct owl_factor {
#define div_mask(d) ((1 << ((d)->width)) - 1)
-static inline struct owl_factor *hw_to_owl_factor(const struct clk_hw *hw)
+static inline struct owl_factor *hw_to_owl_factor(struct clk_hw *hw)
{
struct owl_clk_common *common = hw_to_owl_clk_common(hw);
diff --git a/drivers/clk/actions/owl-gate.h b/drivers/clk/actions/owl-gate.h
index c2f161c93fda..ac458d4385ee 100644
--- a/drivers/clk/actions/owl-gate.h
+++ b/drivers/clk/actions/owl-gate.h
@@ -56,7 +56,7 @@ struct owl_gate {
}, \
} \
-static inline struct owl_gate *hw_to_owl_gate(const struct clk_hw *hw)
+static inline struct owl_gate *hw_to_owl_gate(struct clk_hw *hw)
{
struct owl_clk_common *common = hw_to_owl_clk_common(hw);
diff --git a/drivers/clk/actions/owl-mux.h b/drivers/clk/actions/owl-mux.h
index 53b9ab665294..dc0ecc2d5e10 100644
--- a/drivers/clk/actions/owl-mux.h
+++ b/drivers/clk/actions/owl-mux.h
@@ -44,7 +44,7 @@ struct owl_mux {
}, \
}
-static inline struct owl_mux *hw_to_owl_mux(const struct clk_hw *hw)
+static inline struct owl_mux *hw_to_owl_mux(struct clk_hw *hw)
{
struct owl_clk_common *common = hw_to_owl_clk_common(hw);
diff --git a/drivers/clk/actions/owl-pll.c b/drivers/clk/actions/owl-pll.c
index 155f313986b4..869690b79cc1 100644
--- a/drivers/clk/actions/owl-pll.c
+++ b/drivers/clk/actions/owl-pll.c
@@ -56,8 +56,8 @@ static const struct clk_pll_table *_get_pll_table(
return table;
}
-static long owl_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int owl_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct owl_pll *pll = hw_to_owl_pll(hw);
struct owl_pll_hw *pll_hw = &pll->pll_hw;
@@ -65,17 +65,24 @@ static long owl_pll_round_rate(struct clk_hw *hw, unsigned long rate,
u32 mul;
if (pll_hw->table) {
- clkt = _get_pll_table(pll_hw->table, rate);
- return clkt->rate;
+ clkt = _get_pll_table(pll_hw->table, req->rate);
+ req->rate = clkt->rate;
+
+ return 0;
}
/* fixed frequency */
- if (pll_hw->width == 0)
- return pll_hw->bfreq;
+ if (pll_hw->width == 0) {
+ req->rate = pll_hw->bfreq;
- mul = owl_pll_calculate_mul(pll_hw, rate);
+ return 0;
+ }
+
+ mul = owl_pll_calculate_mul(pll_hw, req->rate);
- return pll_hw->bfreq * mul;
+ req->rate = pll_hw->bfreq * mul;
+
+ return 0;
}
static unsigned long owl_pll_recalc_rate(struct clk_hw *hw,
@@ -188,7 +195,7 @@ const struct clk_ops owl_pll_ops = {
.enable = owl_pll_enable,
.disable = owl_pll_disable,
.is_enabled = owl_pll_is_enabled,
- .round_rate = owl_pll_round_rate,
+ .determine_rate = owl_pll_determine_rate,
.recalc_rate = owl_pll_recalc_rate,
.set_rate = owl_pll_set_rate,
};
diff --git a/drivers/clk/actions/owl-pll.h b/drivers/clk/actions/owl-pll.h
index 78e5fc360b03..58e19f1ade43 100644
--- a/drivers/clk/actions/owl-pll.h
+++ b/drivers/clk/actions/owl-pll.h
@@ -98,7 +98,7 @@ struct owl_pll {
#define mul_mask(m) ((1 << ((m)->width)) - 1)
-static inline struct owl_pll *hw_to_owl_pll(const struct clk_hw *hw)
+static inline struct owl_pll *hw_to_owl_pll(struct clk_hw *hw)
{
struct owl_clk_common *common = hw_to_owl_clk_common(hw);
diff --git a/drivers/clk/at91/clk-audio-pll.c b/drivers/clk/at91/clk-audio-pll.c
index a92da64c12e1..bf9b635ac9d6 100644
--- a/drivers/clk/at91/clk-audio-pll.c
+++ b/drivers/clk/at91/clk-audio-pll.c
@@ -270,8 +270,8 @@ static int clk_audio_pll_frac_determine_rate(struct clk_hw *hw,
return 0;
}
-static long clk_audio_pll_pad_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_audio_pll_pad_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_hw *pclk = clk_hw_get_parent(hw);
long best_rate = -EINVAL;
@@ -283,7 +283,7 @@ static long clk_audio_pll_pad_round_rate(struct clk_hw *hw, unsigned long rate,
int best_diff = -1;
pr_debug("A PLL/PAD: %s, rate = %lu (parent_rate = %lu)\n", __func__,
- rate, *parent_rate);
+ req->rate, req->best_parent_rate);
/*
* Rate divisor is actually made of two different divisors, multiplied
@@ -304,12 +304,12 @@ static long clk_audio_pll_pad_round_rate(struct clk_hw *hw, unsigned long rate,
continue;
best_parent_rate = clk_hw_round_rate(pclk,
- rate * tmp_qd * div);
+ req->rate * tmp_qd * div);
tmp_rate = best_parent_rate / (div * tmp_qd);
- tmp_diff = abs(rate - tmp_rate);
+ tmp_diff = abs(req->rate - tmp_rate);
if (best_diff < 0 || best_diff > tmp_diff) {
- *parent_rate = best_parent_rate;
+ req->best_parent_rate = best_parent_rate;
best_rate = tmp_rate;
best_diff = tmp_diff;
}
@@ -318,11 +318,13 @@ static long clk_audio_pll_pad_round_rate(struct clk_hw *hw, unsigned long rate,
pr_debug("A PLL/PAD: %s, best_rate = %ld, best_parent_rate = %lu\n",
__func__, best_rate, best_parent_rate);
- return best_rate;
+ req->rate = best_rate;
+
+ return 0;
}
-static long clk_audio_pll_pmc_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_audio_pll_pmc_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_hw *pclk = clk_hw_get_parent(hw);
long best_rate = -EINVAL;
@@ -333,20 +335,20 @@ static long clk_audio_pll_pmc_round_rate(struct clk_hw *hw, unsigned long rate,
int best_diff = -1;
pr_debug("A PLL/PMC: %s, rate = %lu (parent_rate = %lu)\n", __func__,
- rate, *parent_rate);
+ req->rate, req->best_parent_rate);
- if (!rate)
+ if (!req->rate)
return 0;
best_parent_rate = clk_round_rate(pclk->clk, 1);
- div = max(best_parent_rate / rate, 1UL);
+ div = max(best_parent_rate / req->rate, 1UL);
for (; div <= AUDIO_PLL_QDPMC_MAX; div++) {
- best_parent_rate = clk_round_rate(pclk->clk, rate * div);
+ best_parent_rate = clk_round_rate(pclk->clk, req->rate * div);
tmp_rate = best_parent_rate / div;
- tmp_diff = abs(rate - tmp_rate);
+ tmp_diff = abs(req->rate - tmp_rate);
if (best_diff < 0 || best_diff > tmp_diff) {
- *parent_rate = best_parent_rate;
+ req->best_parent_rate = best_parent_rate;
best_rate = tmp_rate;
best_diff = tmp_diff;
tmp_qd = div;
@@ -356,9 +358,11 @@ static long clk_audio_pll_pmc_round_rate(struct clk_hw *hw, unsigned long rate,
}
pr_debug("A PLL/PMC: %s, best_rate = %ld, best_parent_rate = %lu (qd = %d)\n",
- __func__, best_rate, *parent_rate, tmp_qd - 1);
+ __func__, best_rate, req->best_parent_rate, tmp_qd - 1);
+
+ req->rate = best_rate;
- return best_rate;
+ return 0;
}
static int clk_audio_pll_frac_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -436,7 +440,7 @@ static const struct clk_ops audio_pll_pad_ops = {
.enable = clk_audio_pll_pad_enable,
.disable = clk_audio_pll_pad_disable,
.recalc_rate = clk_audio_pll_pad_recalc_rate,
- .round_rate = clk_audio_pll_pad_round_rate,
+ .determine_rate = clk_audio_pll_pad_determine_rate,
.set_rate = clk_audio_pll_pad_set_rate,
};
@@ -444,7 +448,7 @@ static const struct clk_ops audio_pll_pmc_ops = {
.enable = clk_audio_pll_pmc_enable,
.disable = clk_audio_pll_pmc_disable,
.recalc_rate = clk_audio_pll_pmc_recalc_rate,
- .round_rate = clk_audio_pll_pmc_round_rate,
+ .determine_rate = clk_audio_pll_pmc_determine_rate,
.set_rate = clk_audio_pll_pmc_set_rate,
};
diff --git a/drivers/clk/at91/clk-h32mx.c b/drivers/clk/at91/clk-h32mx.c
index 1e6c12eeda10..a9aa93b5a870 100644
--- a/drivers/clk/at91/clk-h32mx.c
+++ b/drivers/clk/at91/clk-h32mx.c
@@ -40,21 +40,32 @@ static unsigned long clk_sama5d4_h32mx_recalc_rate(struct clk_hw *hw,
return parent_rate;
}
-static long clk_sama5d4_h32mx_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_sama5d4_h32mx_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
unsigned long div;
- if (rate > *parent_rate)
- return *parent_rate;
- div = *parent_rate / 2;
- if (rate < div)
- return div;
+ if (req->rate > req->best_parent_rate) {
+ req->rate = req->best_parent_rate;
- if (rate - div < *parent_rate - rate)
- return div;
+ return 0;
+ }
+ div = req->best_parent_rate / 2;
+ if (req->rate < div) {
+ req->rate = div;
+
+ return 0;
+ }
+
+ if (req->rate - div < req->best_parent_rate - req->rate) {
+ req->rate = div;
- return *parent_rate;
+ return 0;
+ }
+
+ req->rate = req->best_parent_rate;
+
+ return 0;
}
static int clk_sama5d4_h32mx_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -77,7 +88,7 @@ static int clk_sama5d4_h32mx_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops h32mx_ops = {
.recalc_rate = clk_sama5d4_h32mx_recalc_rate,
- .round_rate = clk_sama5d4_h32mx_round_rate,
+ .determine_rate = clk_sama5d4_h32mx_determine_rate,
.set_rate = clk_sama5d4_h32mx_set_rate,
};
diff --git a/drivers/clk/at91/clk-master.c b/drivers/clk/at91/clk-master.c
index 7a544e429d34..d5ea2069ec83 100644
--- a/drivers/clk/at91/clk-master.c
+++ b/drivers/clk/at91/clk-master.c
@@ -580,6 +580,9 @@ clk_sama7g5_master_recalc_rate(struct clk_hw *hw,
{
struct clk_master *master = to_clk_master(hw);
+ if (master->div == MASTER_PRES_MAX)
+ return DIV_ROUND_CLOSEST_ULL(parent_rate, 3);
+
return DIV_ROUND_CLOSEST_ULL(parent_rate, (1 << master->div));
}
diff --git a/drivers/clk/at91/clk-peripheral.c b/drivers/clk/at91/clk-peripheral.c
index c173a44c800a..e7208c47268b 100644
--- a/drivers/clk/at91/clk-peripheral.c
+++ b/drivers/clk/at91/clk-peripheral.c
@@ -3,6 +3,7 @@
* Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
*/
+#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
@@ -279,8 +280,11 @@ static int clk_sam9x5_peripheral_determine_rate(struct clk_hw *hw,
long best_diff = LONG_MIN;
u32 shift;
- if (periph->id < PERIPHERAL_ID_MIN || !periph->range.max)
- return parent_rate;
+ if (periph->id < PERIPHERAL_ID_MIN || !periph->range.max) {
+ req->rate = parent_rate;
+
+ return 0;
+ }
/* Fist step: check the available dividers. */
for (shift = 0; shift <= PERIPHERAL_MAX_SHIFT; shift++) {
@@ -332,50 +336,57 @@ end:
return 0;
}
-static long clk_sam9x5_peripheral_round_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long *parent_rate)
+static int clk_sam9x5_peripheral_no_parent_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
int shift = 0;
unsigned long best_rate;
unsigned long best_diff;
- unsigned long cur_rate = *parent_rate;
+ unsigned long cur_rate = req->best_parent_rate;
unsigned long cur_diff;
struct clk_sam9x5_peripheral *periph = to_clk_sam9x5_peripheral(hw);
- if (periph->id < PERIPHERAL_ID_MIN || !periph->range.max)
- return *parent_rate;
+ if (periph->id < PERIPHERAL_ID_MIN || !periph->range.max) {
+ req->rate = req->best_parent_rate;
+
+ return 0;
+ }
if (periph->range.max) {
for (; shift <= PERIPHERAL_MAX_SHIFT; shift++) {
- cur_rate = *parent_rate >> shift;
+ cur_rate = req->best_parent_rate >> shift;
if (cur_rate <= periph->range.max)
break;
}
}
- if (rate >= cur_rate)
- return cur_rate;
+ if (req->rate >= cur_rate) {
+ req->rate = cur_rate;
+
+ return 0;
+ }
- best_diff = cur_rate - rate;
+ best_diff = cur_rate - req->rate;
best_rate = cur_rate;
for (; shift <= PERIPHERAL_MAX_SHIFT; shift++) {
- cur_rate = *parent_rate >> shift;
- if (cur_rate < rate)
- cur_diff = rate - cur_rate;
+ cur_rate = req->best_parent_rate >> shift;
+ if (cur_rate < req->rate)
+ cur_diff = req->rate - cur_rate;
else
- cur_diff = cur_rate - rate;
+ cur_diff = cur_rate - req->rate;
if (cur_diff < best_diff) {
best_diff = cur_diff;
best_rate = cur_rate;
}
- if (!best_diff || cur_rate < rate)
+ if (!best_diff || cur_rate < req->rate)
break;
}
- return best_rate;
+ req->rate = best_rate;
+
+ return 0;
}
static int clk_sam9x5_peripheral_set_rate(struct clk_hw *hw,
@@ -427,7 +438,7 @@ static const struct clk_ops sam9x5_peripheral_ops = {
.disable = clk_sam9x5_peripheral_disable,
.is_enabled = clk_sam9x5_peripheral_is_enabled,
.recalc_rate = clk_sam9x5_peripheral_recalc_rate,
- .round_rate = clk_sam9x5_peripheral_round_rate,
+ .determine_rate = clk_sam9x5_peripheral_no_parent_determine_rate,
.set_rate = clk_sam9x5_peripheral_set_rate,
.save_context = clk_sam9x5_peripheral_save_context,
.restore_context = clk_sam9x5_peripheral_restore_context,
diff --git a/drivers/clk/at91/clk-pll.c b/drivers/clk/at91/clk-pll.c
index 249d6a53cedf..5c5f7398effe 100644
--- a/drivers/clk/at91/clk-pll.c
+++ b/drivers/clk/at91/clk-pll.c
@@ -231,13 +231,15 @@ static long clk_pll_get_best_div_mul(struct clk_pll *pll, unsigned long rate,
return bestrate;
}
-static long clk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_pll *pll = to_clk_pll(hw);
- return clk_pll_get_best_div_mul(pll, rate, *parent_rate,
- NULL, NULL, NULL);
+ req->rate = clk_pll_get_best_div_mul(pll, req->rate, req->best_parent_rate,
+ NULL, NULL, NULL);
+
+ return 0;
}
static int clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -302,7 +304,7 @@ static const struct clk_ops pll_ops = {
.unprepare = clk_pll_unprepare,
.is_prepared = clk_pll_is_prepared,
.recalc_rate = clk_pll_recalc_rate,
- .round_rate = clk_pll_round_rate,
+ .determine_rate = clk_pll_determine_rate,
.set_rate = clk_pll_set_rate,
.save_context = clk_pll_save_context,
.restore_context = clk_pll_restore_context,
diff --git a/drivers/clk/at91/clk-plldiv.c b/drivers/clk/at91/clk-plldiv.c
index ba3a1839a96d..3ac09fecc54e 100644
--- a/drivers/clk/at91/clk-plldiv.c
+++ b/drivers/clk/at91/clk-plldiv.c
@@ -33,21 +33,33 @@ static unsigned long clk_plldiv_recalc_rate(struct clk_hw *hw,
return parent_rate;
}
-static long clk_plldiv_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_plldiv_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
unsigned long div;
- if (rate > *parent_rate)
- return *parent_rate;
- div = *parent_rate / 2;
- if (rate < div)
- return div;
+ if (req->rate > req->best_parent_rate) {
+ req->rate = req->best_parent_rate;
- if (rate - div < *parent_rate - rate)
- return div;
+ return 0;
+ }
+
+ div = req->best_parent_rate / 2;
+ if (req->rate < div) {
+ req->rate = div;
+
+ return 0;
+ }
+
+ if (req->rate - div < req->best_parent_rate - req->rate) {
+ req->rate = div;
- return *parent_rate;
+ return 0;
+ }
+
+ req->rate = req->best_parent_rate;
+
+ return 0;
}
static int clk_plldiv_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -66,7 +78,7 @@ static int clk_plldiv_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops plldiv_ops = {
.recalc_rate = clk_plldiv_recalc_rate,
- .round_rate = clk_plldiv_round_rate,
+ .determine_rate = clk_plldiv_determine_rate,
.set_rate = clk_plldiv_set_rate,
};
diff --git a/drivers/clk/at91/clk-sam9x60-pll.c b/drivers/clk/at91/clk-sam9x60-pll.c
index cefd9948e103..3b965057ba0d 100644
--- a/drivers/clk/at91/clk-sam9x60-pll.c
+++ b/drivers/clk/at91/clk-sam9x60-pll.c
@@ -93,8 +93,8 @@ static int sam9x60_frac_pll_set(struct sam9x60_pll_core *core)
spin_lock_irqsave(core->lock, flags);
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
- AT91_PMC_PLL_UPDT_ID_MSK, core->id);
+ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_ID_MSK, core->id);
regmap_read(regmap, AT91_PMC_PLL_CTRL1, &val);
cmul = (val & core->layout->mul_mask) >> core->layout->mul_shift;
cfrac = (val & core->layout->frac_mask) >> core->layout->frac_shift;
@@ -103,11 +103,8 @@ static int sam9x60_frac_pll_set(struct sam9x60_pll_core *core)
(cmul == frac->mul && cfrac == frac->frac))
goto unlock;
- /* Recommended value for PMC_PLL_ACR */
- if (core->characteristics->upll)
- val = AT91_PMC_PLL_ACR_DEFAULT_UPLL;
- else
- val = AT91_PMC_PLL_ACR_DEFAULT_PLLA;
+ /* Load recommended value for PMC_PLL_ACR */
+ val = core->characteristics->acr;
regmap_write(regmap, AT91_PMC_PLL_ACR, val);
regmap_write(regmap, AT91_PMC_PLL_CTRL1,
@@ -128,17 +125,17 @@ static int sam9x60_frac_pll_set(struct sam9x60_pll_core *core)
udelay(10);
}
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
- AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
- AT91_PMC_PLL_UPDT_UPDATE | core->id);
+ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
+ AT91_PMC_PLL_UPDT_UPDATE | core->id);
regmap_update_bits(regmap, AT91_PMC_PLL_CTRL0,
AT91_PMC_PLL_CTRL0_ENLOCK | AT91_PMC_PLL_CTRL0_ENPLL,
AT91_PMC_PLL_CTRL0_ENLOCK | AT91_PMC_PLL_CTRL0_ENPLL);
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
- AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
- AT91_PMC_PLL_UPDT_UPDATE | core->id);
+ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
+ AT91_PMC_PLL_UPDT_UPDATE | core->id);
while (!sam9x60_pll_ready(regmap, core->id))
cpu_relax();
@@ -164,8 +161,8 @@ static void sam9x60_frac_pll_unprepare(struct clk_hw *hw)
spin_lock_irqsave(core->lock, flags);
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
- AT91_PMC_PLL_UPDT_ID_MSK, core->id);
+ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_ID_MSK, core->id);
regmap_update_bits(regmap, AT91_PMC_PLL_CTRL0, AT91_PMC_PLL_CTRL0_ENPLL, 0);
@@ -173,9 +170,9 @@ static void sam9x60_frac_pll_unprepare(struct clk_hw *hw)
regmap_update_bits(regmap, AT91_PMC_PLL_ACR,
AT91_PMC_PLL_ACR_UTMIBG | AT91_PMC_PLL_ACR_UTMIVR, 0);
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
- AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
- AT91_PMC_PLL_UPDT_UPDATE | core->id);
+ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
+ AT91_PMC_PLL_UPDT_UPDATE | core->id);
spin_unlock_irqrestore(core->lock, flags);
}
@@ -230,12 +227,16 @@ static long sam9x60_frac_pll_compute_mul_frac(struct sam9x60_pll_core *core,
return tmprate;
}
-static long sam9x60_frac_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int sam9x60_frac_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw);
- return sam9x60_frac_pll_compute_mul_frac(core, rate, *parent_rate, false);
+ req->rate = sam9x60_frac_pll_compute_mul_frac(core, req->rate,
+ req->best_parent_rate,
+ false);
+
+ return 0;
}
static int sam9x60_frac_pll_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -262,8 +263,8 @@ static int sam9x60_frac_pll_set_rate_chg(struct clk_hw *hw, unsigned long rate,
spin_lock_irqsave(core->lock, irqflags);
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT, AT91_PMC_PLL_UPDT_ID_MSK,
- core->id);
+ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT, AT91_PMC_PLL_UPDT_ID_MSK,
+ core->id);
regmap_read(regmap, AT91_PMC_PLL_CTRL1, &val);
cmul = (val & core->layout->mul_mask) >> core->layout->mul_shift;
cfrac = (val & core->layout->frac_mask) >> core->layout->frac_shift;
@@ -275,18 +276,18 @@ static int sam9x60_frac_pll_set_rate_chg(struct clk_hw *hw, unsigned long rate,
(frac->mul << core->layout->mul_shift) |
(frac->frac << core->layout->frac_shift));
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
- AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
- AT91_PMC_PLL_UPDT_UPDATE | core->id);
+ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
+ AT91_PMC_PLL_UPDT_UPDATE | core->id);
regmap_update_bits(regmap, AT91_PMC_PLL_CTRL0,
AT91_PMC_PLL_CTRL0_ENLOCK | AT91_PMC_PLL_CTRL0_ENPLL,
AT91_PMC_PLL_CTRL0_ENLOCK |
AT91_PMC_PLL_CTRL0_ENPLL);
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
- AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
- AT91_PMC_PLL_UPDT_UPDATE | core->id);
+ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
+ AT91_PMC_PLL_UPDT_UPDATE | core->id);
while (!sam9x60_pll_ready(regmap, core->id))
cpu_relax();
@@ -321,7 +322,7 @@ static const struct clk_ops sam9x60_frac_pll_ops = {
.unprepare = sam9x60_frac_pll_unprepare,
.is_prepared = sam9x60_frac_pll_is_prepared,
.recalc_rate = sam9x60_frac_pll_recalc_rate,
- .round_rate = sam9x60_frac_pll_round_rate,
+ .determine_rate = sam9x60_frac_pll_determine_rate,
.set_rate = sam9x60_frac_pll_set_rate,
.save_context = sam9x60_frac_pll_save_context,
.restore_context = sam9x60_frac_pll_restore_context,
@@ -332,13 +333,16 @@ static const struct clk_ops sam9x60_frac_pll_ops_chg = {
.unprepare = sam9x60_frac_pll_unprepare,
.is_prepared = sam9x60_frac_pll_is_prepared,
.recalc_rate = sam9x60_frac_pll_recalc_rate,
- .round_rate = sam9x60_frac_pll_round_rate,
+ .determine_rate = sam9x60_frac_pll_determine_rate,
.set_rate = sam9x60_frac_pll_set_rate_chg,
.save_context = sam9x60_frac_pll_save_context,
.restore_context = sam9x60_frac_pll_restore_context,
};
-/* This function should be called with spinlock acquired. */
+/* This function should be called with spinlock acquired.
+ * Warning: this function must be called only if the same PLL ID was set in
+ * PLL_UPDT register previously.
+ */
static void sam9x60_div_pll_set_div(struct sam9x60_pll_core *core, u32 div,
bool enable)
{
@@ -350,9 +354,9 @@ static void sam9x60_div_pll_set_div(struct sam9x60_pll_core *core, u32 div,
core->layout->div_mask | ena_msk,
(div << core->layout->div_shift) | ena_val);
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
- AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
- AT91_PMC_PLL_UPDT_UPDATE | core->id);
+ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
+ AT91_PMC_PLL_UPDT_UPDATE | core->id);
while (!sam9x60_pll_ready(regmap, core->id))
cpu_relax();
@@ -366,8 +370,8 @@ static int sam9x60_div_pll_set(struct sam9x60_pll_core *core)
unsigned int val, cdiv;
spin_lock_irqsave(core->lock, flags);
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
- AT91_PMC_PLL_UPDT_ID_MSK, core->id);
+ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_ID_MSK, core->id);
regmap_read(regmap, AT91_PMC_PLL_CTRL0, &val);
cdiv = (val & core->layout->div_mask) >> core->layout->div_shift;
@@ -398,15 +402,15 @@ static void sam9x60_div_pll_unprepare(struct clk_hw *hw)
spin_lock_irqsave(core->lock, flags);
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
- AT91_PMC_PLL_UPDT_ID_MSK, core->id);
+ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_ID_MSK, core->id);
regmap_update_bits(regmap, AT91_PMC_PLL_CTRL0,
core->layout->endiv_mask, 0);
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
- AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
- AT91_PMC_PLL_UPDT_UPDATE | core->id);
+ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
+ AT91_PMC_PLL_UPDT_UPDATE | core->id);
spin_unlock_irqrestore(core->lock, flags);
}
@@ -487,12 +491,15 @@ static long sam9x60_div_pll_compute_div(struct sam9x60_pll_core *core,
return best_rate;
}
-static long sam9x60_div_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int sam9x60_div_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw);
- return sam9x60_div_pll_compute_div(core, parent_rate, rate);
+ req->rate = sam9x60_div_pll_compute_div(core, &req->best_parent_rate,
+ req->rate);
+
+ return 0;
}
static int sam9x60_div_pll_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -518,8 +525,8 @@ static int sam9x60_div_pll_set_rate_chg(struct clk_hw *hw, unsigned long rate,
div->div = DIV_ROUND_CLOSEST(parent_rate, rate) - 1;
spin_lock_irqsave(core->lock, irqflags);
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT, AT91_PMC_PLL_UPDT_ID_MSK,
- core->id);
+ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT, AT91_PMC_PLL_UPDT_ID_MSK,
+ core->id);
regmap_read(regmap, AT91_PMC_PLL_CTRL0, &val);
cdiv = (val & core->layout->div_mask) >> core->layout->div_shift;
@@ -574,8 +581,8 @@ static int sam9x60_div_pll_notifier_fn(struct notifier_block *notifier,
div->div = div->safe_div;
spin_lock_irqsave(core.lock, irqflags);
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT, AT91_PMC_PLL_UPDT_ID_MSK,
- core.id);
+ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT, AT91_PMC_PLL_UPDT_ID_MSK,
+ core.id);
regmap_read(regmap, AT91_PMC_PLL_CTRL0, &val);
cdiv = (val & core.layout->div_mask) >> core.layout->div_shift;
@@ -601,7 +608,7 @@ static const struct clk_ops sam9x60_div_pll_ops = {
.unprepare = sam9x60_div_pll_unprepare,
.is_prepared = sam9x60_div_pll_is_prepared,
.recalc_rate = sam9x60_div_pll_recalc_rate,
- .round_rate = sam9x60_div_pll_round_rate,
+ .determine_rate = sam9x60_div_pll_determine_rate,
.set_rate = sam9x60_div_pll_set_rate,
.save_context = sam9x60_div_pll_save_context,
.restore_context = sam9x60_div_pll_restore_context,
@@ -612,7 +619,7 @@ static const struct clk_ops sam9x60_div_pll_ops_chg = {
.unprepare = sam9x60_div_pll_unprepare,
.is_prepared = sam9x60_div_pll_is_prepared,
.recalc_rate = sam9x60_div_pll_recalc_rate,
- .round_rate = sam9x60_div_pll_round_rate,
+ .determine_rate = sam9x60_div_pll_determine_rate,
.set_rate = sam9x60_div_pll_set_rate_chg,
.save_context = sam9x60_div_pll_save_context,
.restore_context = sam9x60_div_pll_restore_context,
@@ -623,7 +630,7 @@ static const struct clk_ops sam9x60_fixed_div_pll_ops = {
.unprepare = sam9x60_div_pll_unprepare,
.is_prepared = sam9x60_div_pll_is_prepared,
.recalc_rate = sam9x60_fixed_div_pll_recalc_rate,
- .round_rate = sam9x60_div_pll_round_rate,
+ .determine_rate = sam9x60_div_pll_determine_rate,
.save_context = sam9x60_div_pll_save_context,
.restore_context = sam9x60_div_pll_restore_context,
};
diff --git a/drivers/clk/at91/clk-usb.c b/drivers/clk/at91/clk-usb.c
index b0696a928aa9..e906928cfbf0 100644
--- a/drivers/clk/at91/clk-usb.c
+++ b/drivers/clk/at91/clk-usb.c
@@ -319,8 +319,8 @@ static unsigned long at91rm9200_clk_usb_recalc_rate(struct clk_hw *hw,
return 0;
}
-static long at91rm9200_clk_usb_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int at91rm9200_clk_usb_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct at91rm9200_clk_usb *usb = to_at91rm9200_clk_usb(hw);
struct clk_hw *parent = clk_hw_get_parent(hw);
@@ -336,25 +336,27 @@ static long at91rm9200_clk_usb_round_rate(struct clk_hw *hw, unsigned long rate,
if (!usb->divisors[i])
continue;
- tmp_parent_rate = rate * usb->divisors[i];
+ tmp_parent_rate = req->rate * usb->divisors[i];
tmp_parent_rate = clk_hw_round_rate(parent, tmp_parent_rate);
tmprate = DIV_ROUND_CLOSEST(tmp_parent_rate, usb->divisors[i]);
- if (tmprate < rate)
- tmpdiff = rate - tmprate;
+ if (tmprate < req->rate)
+ tmpdiff = req->rate - tmprate;
else
- tmpdiff = tmprate - rate;
+ tmpdiff = tmprate - req->rate;
if (bestdiff < 0 || bestdiff > tmpdiff) {
bestrate = tmprate;
bestdiff = tmpdiff;
- *parent_rate = tmp_parent_rate;
+ req->best_parent_rate = tmp_parent_rate;
}
if (!bestdiff)
break;
}
- return bestrate;
+ req->rate = bestrate;
+
+ return 0;
}
static int at91rm9200_clk_usb_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -384,7 +386,7 @@ static int at91rm9200_clk_usb_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops at91rm9200_usb_ops = {
.recalc_rate = at91rm9200_clk_usb_recalc_rate,
- .round_rate = at91rm9200_clk_usb_round_rate,
+ .determine_rate = at91rm9200_clk_usb_determine_rate,
.set_rate = at91rm9200_clk_usb_set_rate,
};
diff --git a/drivers/clk/at91/pmc.c b/drivers/clk/at91/pmc.c
index acf780a81589..2310f6f73162 100644
--- a/drivers/clk/at91/pmc.c
+++ b/drivers/clk/at91/pmc.c
@@ -115,7 +115,7 @@ struct pmc_data *pmc_data_allocate(unsigned int ncore, unsigned int nsystem,
/* Address in SECURAM that say if we suspend to backup mode. */
static void __iomem *at91_pmc_backup_suspend;
-static int at91_pmc_suspend(void)
+static int at91_pmc_suspend(void *data)
{
unsigned int backup;
@@ -129,7 +129,7 @@ static int at91_pmc_suspend(void)
return clk_save_context();
}
-static void at91_pmc_resume(void)
+static void at91_pmc_resume(void *data)
{
unsigned int backup;
@@ -143,11 +143,15 @@ static void at91_pmc_resume(void)
clk_restore_context();
}
-static struct syscore_ops pmc_syscore_ops = {
+static const struct syscore_ops pmc_syscore_ops = {
.suspend = at91_pmc_suspend,
.resume = at91_pmc_resume,
};
+static struct syscore pmc_syscore = {
+ .ops = &pmc_syscore_ops,
+};
+
static const struct of_device_id pmc_dt_ids[] = {
{ .compatible = "atmel,sama5d2-pmc" },
{ .compatible = "microchip,sama7g5-pmc", },
@@ -185,7 +189,7 @@ static int __init pmc_register_ops(void)
return -ENOMEM;
}
- register_syscore_ops(&pmc_syscore_ops);
+ register_syscore(&pmc_syscore);
return 0;
}
diff --git a/drivers/clk/at91/pmc.h b/drivers/clk/at91/pmc.h
index 4fb29ca111f7..543d7aee8d24 100644
--- a/drivers/clk/at91/pmc.h
+++ b/drivers/clk/at91/pmc.h
@@ -80,6 +80,7 @@ struct clk_pll_characteristics {
u16 *icpll;
u8 *out;
u8 upll : 1;
+ u32 acr;
};
struct clk_programmable_layout {
@@ -116,9 +117,6 @@ struct at91_clk_pms {
unsigned int parent;
};
-#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
-#define field_prep(_mask, _val) (((_val) << (ffs(_mask) - 1)) & (_mask))
-
#define ndck(a, s) (a[s - 1].id + 1)
#define nck(a) (a[ARRAY_SIZE(a) - 1].id + 1)
diff --git a/drivers/clk/at91/sam9x60.c b/drivers/clk/at91/sam9x60.c
index db6db9e2073e..18baf4a256f4 100644
--- a/drivers/clk/at91/sam9x60.c
+++ b/drivers/clk/at91/sam9x60.c
@@ -36,6 +36,7 @@ static const struct clk_pll_characteristics plla_characteristics = {
.num_output = ARRAY_SIZE(plla_outputs),
.output = plla_outputs,
.core_output = core_outputs,
+ .acr = UL(0x00020010),
};
static const struct clk_range upll_outputs[] = {
@@ -48,6 +49,7 @@ static const struct clk_pll_characteristics upll_characteristics = {
.output = upll_outputs,
.core_output = core_outputs,
.upll = true,
+ .acr = UL(0x12023010), /* fIN = [18 MHz, 32 MHz]*/
};
static const struct clk_pll_layout pll_frac_layout = {
diff --git a/drivers/clk/at91/sam9x7.c b/drivers/clk/at91/sam9x7.c
index cbb8b220f16b..89868a0aeaba 100644
--- a/drivers/clk/at91/sam9x7.c
+++ b/drivers/clk/at91/sam9x7.c
@@ -61,44 +61,44 @@ static const struct clk_master_layout sam9x7_master_layout = {
/* Fractional PLL core output range. */
static const struct clk_range plla_core_outputs[] = {
- { .min = 375000000, .max = 1600000000 },
+ { .min = 800000000, .max = 1600000000 },
};
static const struct clk_range upll_core_outputs[] = {
- { .min = 600000000, .max = 1200000000 },
+ { .min = 600000000, .max = 960000000 },
};
static const struct clk_range lvdspll_core_outputs[] = {
- { .min = 400000000, .max = 800000000 },
+ { .min = 600000000, .max = 1200000000 },
};
static const struct clk_range audiopll_core_outputs[] = {
- { .min = 400000000, .max = 800000000 },
+ { .min = 600000000, .max = 1200000000 },
};
static const struct clk_range plladiv2_core_outputs[] = {
- { .min = 375000000, .max = 1600000000 },
+ { .min = 800000000, .max = 1600000000 },
};
/* Fractional PLL output range. */
static const struct clk_range plla_outputs[] = {
- { .min = 732421, .max = 800000000 },
+ { .min = 400000000, .max = 800000000 },
};
static const struct clk_range upll_outputs[] = {
- { .min = 300000000, .max = 600000000 },
+ { .min = 300000000, .max = 480000000 },
};
static const struct clk_range lvdspll_outputs[] = {
- { .min = 10000000, .max = 800000000 },
+ { .min = 175000000, .max = 550000000 },
};
static const struct clk_range audiopll_outputs[] = {
- { .min = 10000000, .max = 800000000 },
+ { .min = 0, .max = 300000000 },
};
static const struct clk_range plladiv2_outputs[] = {
- { .min = 366210, .max = 400000000 },
+ { .min = 200000000, .max = 400000000 },
};
/* PLL characteristics. */
@@ -107,6 +107,7 @@ static const struct clk_pll_characteristics plla_characteristics = {
.num_output = ARRAY_SIZE(plla_outputs),
.output = plla_outputs,
.core_output = plla_core_outputs,
+ .acr = UL(0x00020010), /* Old ACR_DEFAULT_PLLA value */
};
static const struct clk_pll_characteristics upll_characteristics = {
@@ -115,6 +116,7 @@ static const struct clk_pll_characteristics upll_characteristics = {
.output = upll_outputs,
.core_output = upll_core_outputs,
.upll = true,
+ .acr = UL(0x12023010), /* fIN=[20 MHz, 32 MHz] */
};
static const struct clk_pll_characteristics lvdspll_characteristics = {
@@ -122,6 +124,7 @@ static const struct clk_pll_characteristics lvdspll_characteristics = {
.num_output = ARRAY_SIZE(lvdspll_outputs),
.output = lvdspll_outputs,
.core_output = lvdspll_core_outputs,
+ .acr = UL(0x12023010), /* fIN=[20 MHz, 32 MHz] */
};
static const struct clk_pll_characteristics audiopll_characteristics = {
@@ -129,6 +132,7 @@ static const struct clk_pll_characteristics audiopll_characteristics = {
.num_output = ARRAY_SIZE(audiopll_outputs),
.output = audiopll_outputs,
.core_output = audiopll_core_outputs,
+ .acr = UL(0x12023010), /* fIN=[20 MHz, 32 MHz] */
};
static const struct clk_pll_characteristics plladiv2_characteristics = {
@@ -136,6 +140,7 @@ static const struct clk_pll_characteristics plladiv2_characteristics = {
.num_output = ARRAY_SIZE(plladiv2_outputs),
.output = plladiv2_outputs,
.core_output = plladiv2_core_outputs,
+ .acr = UL(0x00020010), /* Old ACR_DEFAULT_PLLA value */
};
/* Layout for fractional PLL ID PLLA. */
@@ -403,6 +408,7 @@ static const struct {
{ .n = "pioD_clk", .id = 44, },
{ .n = "tcb1_clk", .id = 45, },
{ .n = "dbgu_clk", .id = 47, },
+ { .n = "pmecc_clk", .id = 48, },
/*
* mpddr_clk feeds DDR controller and is enabled by bootloader thus we
* need to keep it enabled in case there is no Linux consumer for it.
diff --git a/drivers/clk/at91/sama7d65.c b/drivers/clk/at91/sama7d65.c
index a5d40df8b2f2..7dee2b160ffb 100644
--- a/drivers/clk/at91/sama7d65.c
+++ b/drivers/clk/at91/sama7d65.c
@@ -138,6 +138,7 @@ static const struct clk_pll_characteristics cpu_pll_characteristics = {
.num_output = ARRAY_SIZE(cpu_pll_outputs),
.output = cpu_pll_outputs,
.core_output = core_outputs,
+ .acr = UL(0x00070010),
};
/* PLL characteristics. */
@@ -146,6 +147,7 @@ static const struct clk_pll_characteristics pll_characteristics = {
.num_output = ARRAY_SIZE(pll_outputs),
.output = pll_outputs,
.core_output = core_outputs,
+ .acr = UL(0x00070010),
};
static const struct clk_pll_characteristics lvdspll_characteristics = {
@@ -153,6 +155,7 @@ static const struct clk_pll_characteristics lvdspll_characteristics = {
.num_output = ARRAY_SIZE(lvdspll_outputs),
.output = lvdspll_outputs,
.core_output = lvdspll_core_outputs,
+ .acr = UL(0x00070010),
};
static const struct clk_pll_characteristics upll_characteristics = {
@@ -160,6 +163,7 @@ static const struct clk_pll_characteristics upll_characteristics = {
.num_output = ARRAY_SIZE(upll_outputs),
.output = upll_outputs,
.core_output = upll_core_outputs,
+ .acr = UL(0x12020010),
.upll = true,
};
diff --git a/drivers/clk/at91/sama7g5.c b/drivers/clk/at91/sama7g5.c
index 8385badc1c70..1340c2b00619 100644
--- a/drivers/clk/at91/sama7g5.c
+++ b/drivers/clk/at91/sama7g5.c
@@ -113,6 +113,7 @@ static const struct clk_pll_characteristics cpu_pll_characteristics = {
.num_output = ARRAY_SIZE(cpu_pll_outputs),
.output = cpu_pll_outputs,
.core_output = core_outputs,
+ .acr = UL(0x00070010),
};
/* PLL characteristics. */
@@ -121,6 +122,7 @@ static const struct clk_pll_characteristics pll_characteristics = {
.num_output = ARRAY_SIZE(pll_outputs),
.output = pll_outputs,
.core_output = core_outputs,
+ .acr = UL(0x00070010),
};
/*
diff --git a/drivers/clk/axs10x/i2s_pll_clock.c b/drivers/clk/axs10x/i2s_pll_clock.c
index 9667ce898428..6f3e1151b354 100644
--- a/drivers/clk/axs10x/i2s_pll_clock.c
+++ b/drivers/clk/axs10x/i2s_pll_clock.c
@@ -108,21 +108,21 @@ static unsigned long i2s_pll_recalc_rate(struct clk_hw *hw,
return ((parent_rate / idiv) * fbdiv) / odiv;
}
-static long i2s_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int i2s_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct i2s_pll_clk *clk = to_i2s_pll_clk(hw);
- const struct i2s_pll_cfg *pll_cfg = i2s_pll_get_cfg(*prate);
+ const struct i2s_pll_cfg *pll_cfg = i2s_pll_get_cfg(req->best_parent_rate);
int i;
if (!pll_cfg) {
- dev_err(clk->dev, "invalid parent rate=%ld\n", *prate);
+ dev_err(clk->dev, "invalid parent rate=%ld\n", req->best_parent_rate);
return -EINVAL;
}
for (i = 0; pll_cfg[i].rate != 0; i++)
- if (pll_cfg[i].rate == rate)
- return rate;
+ if (pll_cfg[i].rate == req->rate)
+ return 0;
return -EINVAL;
}
@@ -156,7 +156,7 @@ static int i2s_pll_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops i2s_pll_ops = {
.recalc_rate = i2s_pll_recalc_rate,
- .round_rate = i2s_pll_round_rate,
+ .determine_rate = i2s_pll_determine_rate,
.set_rate = i2s_pll_set_rate,
};
diff --git a/drivers/clk/axs10x/pll_clock.c b/drivers/clk/axs10x/pll_clock.c
index 6c7a2b62b406..c7ca473ee76c 100644
--- a/drivers/clk/axs10x/pll_clock.c
+++ b/drivers/clk/axs10x/pll_clock.c
@@ -149,8 +149,8 @@ static unsigned long axs10x_pll_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long axs10x_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int axs10x_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
int i;
long best_rate;
@@ -163,11 +163,13 @@ static long axs10x_pll_round_rate(struct clk_hw *hw, unsigned long rate,
best_rate = pll_cfg[0].rate;
for (i = 1; pll_cfg[i].rate != 0; i++) {
- if (abs(rate - pll_cfg[i].rate) < abs(rate - best_rate))
+ if (abs(req->rate - pll_cfg[i].rate) < abs(req->rate - best_rate))
best_rate = pll_cfg[i].rate;
}
- return best_rate;
+ req->rate = best_rate;
+
+ return 0;
}
static int axs10x_pll_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -208,7 +210,7 @@ static int axs10x_pll_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops axs10x_pll_ops = {
.recalc_rate = axs10x_pll_recalc_rate,
- .round_rate = axs10x_pll_round_rate,
+ .determine_rate = axs10x_pll_determine_rate,
.set_rate = axs10x_pll_set_rate,
};
diff --git a/drivers/clk/baikal-t1/ccu-div.c b/drivers/clk/baikal-t1/ccu-div.c
index 8d5fc7158f33..849d1f55765f 100644
--- a/drivers/clk/baikal-t1/ccu-div.c
+++ b/drivers/clk/baikal-t1/ccu-div.c
@@ -228,15 +228,18 @@ static inline unsigned long ccu_div_var_calc_divider(unsigned long rate,
CCU_DIV_CLKDIV_MAX(mask));
}
-static long ccu_div_var_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int ccu_div_var_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct ccu_div *div = to_ccu_div(hw);
unsigned long divider;
- divider = ccu_div_var_calc_divider(rate, *parent_rate, div->mask);
+ divider = ccu_div_var_calc_divider(req->rate, req->best_parent_rate,
+ div->mask);
- return ccu_div_calc_freq(*parent_rate, divider);
+ req->rate = ccu_div_calc_freq(req->best_parent_rate, divider);
+
+ return 0;
}
/*
@@ -308,12 +311,14 @@ static unsigned long ccu_div_fixed_recalc_rate(struct clk_hw *hw,
return ccu_div_calc_freq(parent_rate, div->divider);
}
-static long ccu_div_fixed_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int ccu_div_fixed_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct ccu_div *div = to_ccu_div(hw);
- return ccu_div_calc_freq(*parent_rate, div->divider);
+ req->rate = ccu_div_calc_freq(req->best_parent_rate, div->divider);
+
+ return 0;
}
static int ccu_div_fixed_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -534,14 +539,14 @@ static const struct clk_ops ccu_div_var_gate_to_set_ops = {
.disable = ccu_div_gate_disable,
.is_enabled = ccu_div_gate_is_enabled,
.recalc_rate = ccu_div_var_recalc_rate,
- .round_rate = ccu_div_var_round_rate,
+ .determine_rate = ccu_div_var_determine_rate,
.set_rate = ccu_div_var_set_rate_fast,
.debug_init = ccu_div_var_debug_init
};
static const struct clk_ops ccu_div_var_nogate_ops = {
.recalc_rate = ccu_div_var_recalc_rate,
- .round_rate = ccu_div_var_round_rate,
+ .determine_rate = ccu_div_var_determine_rate,
.set_rate = ccu_div_var_set_rate_slow,
.debug_init = ccu_div_var_debug_init
};
@@ -551,7 +556,7 @@ static const struct clk_ops ccu_div_gate_ops = {
.disable = ccu_div_gate_disable,
.is_enabled = ccu_div_gate_is_enabled,
.recalc_rate = ccu_div_fixed_recalc_rate,
- .round_rate = ccu_div_fixed_round_rate,
+ .determine_rate = ccu_div_fixed_determine_rate,
.set_rate = ccu_div_fixed_set_rate,
.debug_init = ccu_div_gate_debug_init
};
@@ -565,7 +570,7 @@ static const struct clk_ops ccu_div_buf_ops = {
static const struct clk_ops ccu_div_fixed_ops = {
.recalc_rate = ccu_div_fixed_recalc_rate,
- .round_rate = ccu_div_fixed_round_rate,
+ .determine_rate = ccu_div_fixed_determine_rate,
.set_rate = ccu_div_fixed_set_rate,
.debug_init = ccu_div_fixed_debug_init
};
diff --git a/drivers/clk/baikal-t1/ccu-pll.c b/drivers/clk/baikal-t1/ccu-pll.c
index 13ef28001439..357269f41cdc 100644
--- a/drivers/clk/baikal-t1/ccu-pll.c
+++ b/drivers/clk/baikal-t1/ccu-pll.c
@@ -228,14 +228,16 @@ static void ccu_pll_calc_factors(unsigned long rate, unsigned long parent_rate,
}
}
-static long ccu_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int ccu_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
unsigned long nr = 1, nf = 1, od = 1;
- ccu_pll_calc_factors(rate, *parent_rate, &nr, &nf, &od);
+ ccu_pll_calc_factors(req->rate, req->best_parent_rate, &nr, &nf, &od);
- return ccu_pll_calc_freq(*parent_rate, nr, nf, od);
+ req->rate = ccu_pll_calc_freq(req->best_parent_rate, nr, nf, od);
+
+ return 0;
}
/*
@@ -481,7 +483,7 @@ static const struct clk_ops ccu_pll_gate_to_set_ops = {
.disable = ccu_pll_disable,
.is_enabled = ccu_pll_is_enabled,
.recalc_rate = ccu_pll_recalc_rate,
- .round_rate = ccu_pll_round_rate,
+ .determine_rate = ccu_pll_determine_rate,
.set_rate = ccu_pll_set_rate_norst,
.debug_init = ccu_pll_debug_init
};
@@ -491,7 +493,7 @@ static const struct clk_ops ccu_pll_straight_set_ops = {
.disable = ccu_pll_disable,
.is_enabled = ccu_pll_is_enabled,
.recalc_rate = ccu_pll_recalc_rate,
- .round_rate = ccu_pll_round_rate,
+ .determine_rate = ccu_pll_determine_rate,
.set_rate = ccu_pll_set_rate_reset,
.debug_init = ccu_pll_debug_init
};
diff --git a/drivers/clk/baikal-t1/clk-ccu-div.c b/drivers/clk/baikal-t1/clk-ccu-div.c
index 84555a00f950..17d75e8e2e8f 100644
--- a/drivers/clk/baikal-t1/clk-ccu-div.c
+++ b/drivers/clk/baikal-t1/clk-ccu-div.c
@@ -405,7 +405,7 @@ static void ccu_div_clk_unregister(struct ccu_div_data *data, bool defer)
{
int idx;
- /* Uninstall only the clocks registered on the specfied stage */
+ /* Uninstall only the clocks registered on the specified stage */
for (idx = 0; idx < data->divs_num; ++idx) {
if (!!(data->divs_info[idx].features & CCU_DIV_BASIC) ^ defer)
continue;
diff --git a/drivers/clk/baikal-t1/clk-ccu-pll.c b/drivers/clk/baikal-t1/clk-ccu-pll.c
index fce02ce77347..921b87024feb 100644
--- a/drivers/clk/baikal-t1/clk-ccu-pll.c
+++ b/drivers/clk/baikal-t1/clk-ccu-pll.c
@@ -196,7 +196,7 @@ static void ccu_pll_clk_unregister(struct ccu_pll_data *data, bool defer)
{
int idx;
- /* Uninstall only the clocks registered on the specfied stage */
+ /* Uninstall only the clocks registered on the specified stage */
for (idx = 0; idx < CCU_PLL_NUM; ++idx) {
if (!!(pll_info[idx].features & CCU_PLL_BASIC) ^ defer)
continue;
diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
index fb04734afc80..02215ea79403 100644
--- a/drivers/clk/bcm/clk-bcm2835.c
+++ b/drivers/clk/bcm/clk-bcm2835.c
@@ -570,18 +570,23 @@ static long bcm2835_pll_rate_from_divisors(unsigned long parent_rate,
return rate >> A2W_PLL_FRAC_BITS;
}
-static long bcm2835_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int bcm2835_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct bcm2835_pll *pll = container_of(hw, struct bcm2835_pll, hw);
const struct bcm2835_pll_data *data = pll->data;
u32 ndiv, fdiv;
- rate = clamp(rate, data->min_rate, data->max_rate);
+ req->rate = clamp(req->rate, data->min_rate, data->max_rate);
- bcm2835_pll_choose_ndiv_and_fdiv(rate, *parent_rate, &ndiv, &fdiv);
+ bcm2835_pll_choose_ndiv_and_fdiv(req->rate, req->best_parent_rate,
+ &ndiv, &fdiv);
- return bcm2835_pll_rate_from_divisors(*parent_rate, ndiv, fdiv, 1);
+ req->rate = bcm2835_pll_rate_from_divisors(req->best_parent_rate,
+ ndiv, fdiv,
+ 1);
+
+ return 0;
}
static unsigned long bcm2835_pll_get_rate(struct clk_hw *hw,
@@ -783,7 +788,7 @@ static const struct clk_ops bcm2835_pll_clk_ops = {
.unprepare = bcm2835_pll_off,
.recalc_rate = bcm2835_pll_get_rate,
.set_rate = bcm2835_pll_set_rate,
- .round_rate = bcm2835_pll_round_rate,
+ .determine_rate = bcm2835_pll_determine_rate,
.debug_init = bcm2835_pll_debug_init,
};
@@ -1550,7 +1555,7 @@ static const char *const bcm2835_clock_osc_parents[] = {
.parents = bcm2835_clock_osc_parents, \
__VA_ARGS__)
-/* main peripherial parent mux */
+/* main peripheral parent mux */
static const char *const bcm2835_clock_per_parents[] = {
"gnd",
"xosc",
diff --git a/drivers/clk/bcm/clk-bcm53573-ilp.c b/drivers/clk/bcm/clk-bcm53573-ilp.c
index 83ef41d618be..b2fc05b60783 100644
--- a/drivers/clk/bcm/clk-bcm53573-ilp.c
+++ b/drivers/clk/bcm/clk-bcm53573-ilp.c
@@ -59,7 +59,7 @@ static unsigned long bcm53573_ilp_recalc_rate(struct clk_hw *hw,
/*
* At minimum we should loop for a bit to let hardware do the
* measurement. This isn't very accurate however, so for a better
- * precision lets try getting 20 different values for and use average.
+ * precision let's try getting 20 different values and use average.
*/
while (num < 20) {
regmap_read(regmap, PMU_XTAL_FREQ_RATIO, &cur_val);
diff --git a/drivers/clk/bcm/clk-iproc-asiu.c b/drivers/clk/bcm/clk-iproc-asiu.c
index dcacf55c55ae..83ec13da9b2e 100644
--- a/drivers/clk/bcm/clk-iproc-asiu.c
+++ b/drivers/clk/bcm/clk-iproc-asiu.c
@@ -98,22 +98,27 @@ static unsigned long iproc_asiu_clk_recalc_rate(struct clk_hw *hw,
return clk->rate;
}
-static long iproc_asiu_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int iproc_asiu_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
unsigned int div;
- if (rate == 0 || *parent_rate == 0)
+ if (req->rate == 0 || req->best_parent_rate == 0)
return -EINVAL;
- if (rate == *parent_rate)
- return *parent_rate;
+ if (req->rate == req->best_parent_rate)
+ return 0;
- div = DIV_ROUND_CLOSEST(*parent_rate, rate);
- if (div < 2)
- return *parent_rate;
+ div = DIV_ROUND_CLOSEST(req->best_parent_rate, req->rate);
+ if (div < 2) {
+ req->rate = req->best_parent_rate;
- return *parent_rate / div;
+ return 0;
+ }
+
+ req->rate = req->best_parent_rate / div;
+
+ return 0;
}
static int iproc_asiu_clk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -168,7 +173,7 @@ static const struct clk_ops iproc_asiu_ops = {
.enable = iproc_asiu_clk_enable,
.disable = iproc_asiu_clk_disable,
.recalc_rate = iproc_asiu_clk_recalc_rate,
- .round_rate = iproc_asiu_clk_round_rate,
+ .determine_rate = iproc_asiu_clk_determine_rate,
.set_rate = iproc_asiu_clk_set_rate,
};
diff --git a/drivers/clk/bcm/clk-raspberrypi.c b/drivers/clk/bcm/clk-raspberrypi.c
index 8e4fde03ed23..1a9162f0ae31 100644
--- a/drivers/clk/bcm/clk-raspberrypi.c
+++ b/drivers/clk/bcm/clk-raspberrypi.c
@@ -68,6 +68,8 @@ struct raspberrypi_clk_variant {
char *clkdev;
unsigned long min_rate;
bool minimize;
+ bool maximize;
+ u32 flags;
};
static struct raspberrypi_clk_variant
@@ -75,6 +77,7 @@ raspberrypi_clk_variants[RPI_FIRMWARE_NUM_CLK_ID] = {
[RPI_FIRMWARE_ARM_CLK_ID] = {
.export = true,
.clkdev = "cpu0",
+ .flags = CLK_IS_CRITICAL,
},
[RPI_FIRMWARE_CORE_CLK_ID] = {
.export = true,
@@ -90,6 +93,12 @@ raspberrypi_clk_variants[RPI_FIRMWARE_NUM_CLK_ID] = {
* always use the minimum the drivers will let us.
*/
.minimize = true,
+
+ /*
+ * It should never be disabled as it drives the bus for
+ * everything else.
+ */
+ .flags = CLK_IS_CRITICAL,
},
[RPI_FIRMWARE_M2MC_CLK_ID] = {
.export = true,
@@ -115,18 +124,29 @@ raspberrypi_clk_variants[RPI_FIRMWARE_NUM_CLK_ID] = {
* drivers will let us.
*/
.minimize = true,
+
+ /*
+ * As mentioned above, this clock is disabled during boot,
+ * the firmware will skip the HSM initialization, resulting
+ * in a bus lockup. Therefore, make sure it's enabled
+ * during boot, but after it, it can be enabled/disabled
+ * by the driver.
+ */
+ .flags = CLK_IGNORE_UNUSED,
},
[RPI_FIRMWARE_V3D_CLK_ID] = {
.export = true,
- .minimize = true,
+ .maximize = true,
},
[RPI_FIRMWARE_PIXEL_CLK_ID] = {
.export = true,
.minimize = true,
+ .flags = CLK_IS_CRITICAL,
},
[RPI_FIRMWARE_HEVC_CLK_ID] = {
.export = true,
.minimize = true,
+ .flags = CLK_IS_CRITICAL,
},
[RPI_FIRMWARE_ISP_CLK_ID] = {
.export = true,
@@ -135,6 +155,7 @@ raspberrypi_clk_variants[RPI_FIRMWARE_NUM_CLK_ID] = {
[RPI_FIRMWARE_PIXEL_BVB_CLK_ID] = {
.export = true,
.minimize = true,
+ .flags = CLK_IS_CRITICAL,
},
[RPI_FIRMWARE_VEC_CLK_ID] = {
.export = true,
@@ -194,8 +215,11 @@ static int raspberrypi_fw_is_prepared(struct clk_hw *hw)
ret = raspberrypi_clock_property(rpi->firmware, data,
RPI_FIRMWARE_GET_CLOCK_STATE, &val);
- if (ret)
+ if (ret) {
+ dev_err_ratelimited(rpi->dev, "Failed to get %s state: %d\n",
+ clk_hw_get_name(hw), ret);
return 0;
+ }
return !!(val & RPI_FIRMWARE_STATE_ENABLE_BIT);
}
@@ -211,8 +235,11 @@ static unsigned long raspberrypi_fw_get_rate(struct clk_hw *hw,
ret = raspberrypi_clock_property(rpi->firmware, data,
RPI_FIRMWARE_GET_CLOCK_RATE, &val);
- if (ret)
+ if (ret) {
+ dev_err_ratelimited(rpi->dev, "Failed to get %s frequency: %d\n",
+ clk_hw_get_name(hw), ret);
return 0;
+ }
return val;
}
@@ -259,7 +286,41 @@ static int raspberrypi_fw_dumb_determine_rate(struct clk_hw *hw,
return 0;
}
+static int raspberrypi_fw_prepare(struct clk_hw *hw)
+{
+ const struct raspberrypi_clk_data *data = clk_hw_to_data(hw);
+ struct raspberrypi_clk *rpi = data->rpi;
+ u32 state = RPI_FIRMWARE_STATE_ENABLE_BIT;
+ int ret;
+
+ ret = raspberrypi_clock_property(rpi->firmware, data,
+ RPI_FIRMWARE_SET_CLOCK_STATE, &state);
+ if (ret)
+ dev_err_ratelimited(rpi->dev,
+ "Failed to set clock %s state to on: %d\n",
+ clk_hw_get_name(hw), ret);
+
+ return ret;
+}
+
+static void raspberrypi_fw_unprepare(struct clk_hw *hw)
+{
+ const struct raspberrypi_clk_data *data = clk_hw_to_data(hw);
+ struct raspberrypi_clk *rpi = data->rpi;
+ u32 state = 0;
+ int ret;
+
+ ret = raspberrypi_clock_property(rpi->firmware, data,
+ RPI_FIRMWARE_SET_CLOCK_STATE, &state);
+ if (ret)
+ dev_err_ratelimited(rpi->dev,
+ "Failed to set clock %s state to off: %d\n",
+ clk_hw_get_name(hw), ret);
+}
+
static const struct clk_ops raspberrypi_firmware_clk_ops = {
+ .prepare = raspberrypi_fw_prepare,
+ .unprepare = raspberrypi_fw_unprepare,
.is_prepared = raspberrypi_fw_is_prepared,
.recalc_rate = raspberrypi_fw_get_rate,
.determine_rate = raspberrypi_fw_dumb_determine_rate,
@@ -289,7 +350,7 @@ static struct clk_hw *raspberrypi_clk_register(struct raspberrypi_clk *rpi,
if (!init.name)
return ERR_PTR(-ENOMEM);
init.ops = &raspberrypi_firmware_clk_ops;
- init.flags = CLK_GET_RATE_NOCACHE;
+ init.flags = variant->flags | CLK_GET_RATE_NOCACHE;
data->hw.init = &init;
@@ -326,6 +387,9 @@ static struct clk_hw *raspberrypi_clk_register(struct raspberrypi_clk *rpi,
}
}
+ if (variant->maximize)
+ variant->min_rate = max_rate;
+
if (variant->min_rate) {
unsigned long rate;
diff --git a/drivers/clk/berlin/berlin2-avpll.c b/drivers/clk/berlin/berlin2-avpll.c
index aa89b4c9464e..79f3d37a0ee0 100644
--- a/drivers/clk/berlin/berlin2-avpll.c
+++ b/drivers/clk/berlin/berlin2-avpll.c
@@ -319,7 +319,7 @@ berlin2_avpll_channel_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
/*
* AV3 divider start at VCO_CTRL14, bit 7; each 4 bits wide.
- * AV2/AV3 form a fractional divider, where only specfic values for AV3
+ * AV2/AV3 form a fractional divider, where only specific values for AV3
* are allowed. AV3 != 0 divides by AV2/2, AV3=0 is bypass.
*/
if (ch->index < 6) {
diff --git a/drivers/clk/clk-apple-nco.c b/drivers/clk/clk-apple-nco.c
index 457a48d48941..d3ced4a0f029 100644
--- a/drivers/clk/clk-apple-nco.c
+++ b/drivers/clk/clk-apple-nco.c
@@ -212,13 +212,15 @@ static unsigned long applnco_recalc_rate(struct clk_hw *hw,
((u64) div) * incbase + inc1);
}
-static long applnco_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int applnco_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- unsigned long lo = *parent_rate / (COARSE_DIV_OFFSET + LFSR_TBLSIZE) + 1;
- unsigned long hi = *parent_rate / COARSE_DIV_OFFSET;
+ unsigned long lo = req->best_parent_rate / (COARSE_DIV_OFFSET + LFSR_TBLSIZE) + 1;
+ unsigned long hi = req->best_parent_rate / COARSE_DIV_OFFSET;
- return clamp(rate, lo, hi);
+ req->rate = clamp(req->rate, lo, hi);
+
+ return 0;
}
static int applnco_enable(struct clk_hw *hw)
@@ -246,7 +248,7 @@ static void applnco_disable(struct clk_hw *hw)
static const struct clk_ops applnco_ops = {
.set_rate = applnco_set_rate,
.recalc_rate = applnco_recalc_rate,
- .round_rate = applnco_round_rate,
+ .determine_rate = applnco_determine_rate,
.enable = applnco_enable,
.disable = applnco_disable,
.is_enabled = applnco_is_enabled,
diff --git a/drivers/clk/clk-asm9260.c b/drivers/clk/clk-asm9260.c
index 3432c801f1bd..595cfa533fb9 100644
--- a/drivers/clk/clk-asm9260.c
+++ b/drivers/clk/clk-asm9260.c
@@ -92,8 +92,8 @@ static const struct asm9260_div_clk asm9260_div_clks[] __initconst = {
{ CLKID_SYS_CPU, "cpu_div", "main_gate", HW_CPUCLKDIV },
{ CLKID_SYS_AHB, "ahb_div", "cpu_div", HW_SYSAHBCLKDIV },
- /* i2s has two deviders: one for only external mclk and internal
- * devider for all clks. */
+ /* i2s has two dividers: one for only external mclk and internal
+ * divider for all clks. */
{ CLKID_SYS_I2S0M, "i2s0m_div", "i2s0_mclk", HW_I2S0MCLKDIV },
{ CLKID_SYS_I2S1M, "i2s1m_div", "i2s1_mclk", HW_I2S1MCLKDIV },
{ CLKID_SYS_I2S0S, "i2s0s_div", "i2s0_gate", HW_I2S0SCLKDIV },
diff --git a/drivers/clk/clk-ast2600.c b/drivers/clk/clk-ast2600.c
index faf88324f7b1..114afc13d640 100644
--- a/drivers/clk/clk-ast2600.c
+++ b/drivers/clk/clk-ast2600.c
@@ -92,7 +92,7 @@ static u8 soc_rev;
*
* There are some gates that do not have an associated reset; these are
* handled by using -1 as the index for the reset, and the consumer must
- * explictly assert/deassert reset lines as required.
+ * explicitly assert/deassert reset lines as required.
*
* Clocks marked with CLK_IS_CRITICAL:
*
diff --git a/drivers/clk/clk-axi-clkgen.c b/drivers/clk/clk-axi-clkgen.c
index 934e53a96ddd..fa5ccef73e60 100644
--- a/drivers/clk/clk-axi-clkgen.c
+++ b/drivers/clk/clk-axi-clkgen.c
@@ -6,14 +6,17 @@
* Author: Lars-Peter Clausen <lars@metafoo.de>
*/
-#include <linux/platform_device.h>
+#include <linux/adi-axi-common.h>
+#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
-#include <linux/slab.h>
+#include <linux/err.h>
#include <linux/io.h>
-#include <linux/of.h>
#include <linux/module.h>
-#include <linux/err.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
#define AXI_CLKGEN_V2_REG_RESET 0x40
#define AXI_CLKGEN_V2_REG_CLKSEL 0x44
@@ -28,6 +31,9 @@
#define AXI_CLKGEN_V2_DRP_STATUS_BUSY BIT(16)
+#define ADI_CLKGEN_REG_FPGA_VOLTAGE 0x0140
+#define ADI_CLKGEN_INFO_FPGA_VOLTAGE(val) ((val) & GENMASK(15, 0))
+
#define MMCM_REG_CLKOUT5_2 0x07
#define MMCM_REG_CLKOUT0_1 0x08
#define MMCM_REG_CLKOUT0_2 0x09
@@ -90,7 +96,7 @@ static uint32_t axi_clkgen_lookup_filter(unsigned int m)
}
}
-static const uint32_t axi_clkgen_lock_table[] = {
+static const u32 axi_clkgen_lock_table[] = {
0x060603e8, 0x060603e8, 0x080803e8, 0x0b0b03e8,
0x0e0e03e8, 0x111103e8, 0x131303e8, 0x161603e8,
0x191903e8, 0x1c1c03e8, 0x1f1f0384, 0x1f1f0339,
@@ -102,7 +108,7 @@ static const uint32_t axi_clkgen_lock_table[] = {
0x1f1f012c, 0x1f1f0113, 0x1f1f0113, 0x1f1f0113,
};
-static uint32_t axi_clkgen_lookup_lock(unsigned int m)
+static u32 axi_clkgen_lookup_lock(unsigned int m)
{
if (m < ARRAY_SIZE(axi_clkgen_lock_table))
return axi_clkgen_lock_table[m];
@@ -118,14 +124,15 @@ static const struct axi_clkgen_limits axi_clkgen_zynqmp_default_limits = {
static const struct axi_clkgen_limits axi_clkgen_zynq_default_limits = {
.fpfd_min = 10000,
- .fpfd_max = 300000,
+ .fpfd_max = 450000,
.fvco_min = 600000,
.fvco_max = 1200000,
};
static void axi_clkgen_calc_params(const struct axi_clkgen_limits *limits,
- unsigned long fin, unsigned long fout,
- unsigned int *best_d, unsigned int *best_m, unsigned int *best_dout)
+ unsigned long fin, unsigned long fout,
+ unsigned int *best_d, unsigned int *best_m,
+ unsigned int *best_dout)
{
unsigned long d, d_min, d_max, _d_min, _d_max;
unsigned long m, m_min, m_max;
@@ -141,15 +148,15 @@ static void axi_clkgen_calc_params(const struct axi_clkgen_limits *limits,
*best_m = 0;
*best_dout = 0;
- d_min = max_t(unsigned long, DIV_ROUND_UP(fin, limits->fpfd_max), 1);
- d_max = min_t(unsigned long, fin / limits->fpfd_min, 80);
+ d_min = max(DIV_ROUND_UP(fin, limits->fpfd_max), 1);
+ d_max = min(fin / limits->fpfd_min, 80);
again:
fvco_min_fract = limits->fvco_min << fract_shift;
fvco_max_fract = limits->fvco_max << fract_shift;
- m_min = max_t(unsigned long, DIV_ROUND_UP(fvco_min_fract, fin) * d_min, 1);
- m_max = min_t(unsigned long, fvco_max_fract * d_max / fin, 64 << fract_shift);
+ m_min = max(DIV_ROUND_UP(fvco_min_fract, fin) * d_min, 1);
+ m_max = min(fvco_max_fract * d_max / fin, 64 << fract_shift);
for (m = m_min; m <= m_max; m++) {
_d_min = max(d_min, DIV_ROUND_UP(fin * m, fvco_max_fract));
@@ -172,7 +179,7 @@ again:
}
}
- /* Lets see if we find a better setting in fractional mode */
+ /* Let's see if we find a better setting in fractional mode */
if (fract_shift == 0) {
fract_shift = 3;
goto again;
@@ -192,9 +199,9 @@ struct axi_clkgen_div_params {
};
static void axi_clkgen_calc_clk_params(unsigned int divider,
- unsigned int frac_divider, struct axi_clkgen_div_params *params)
+ unsigned int frac_divider,
+ struct axi_clkgen_div_params *params)
{
-
memset(params, 0x0, sizeof(*params));
if (divider == 1) {
@@ -222,7 +229,7 @@ static void axi_clkgen_calc_clk_params(unsigned int divider,
if (params->edge == 0 || frac_divider == 1)
params->low--;
if (((params->edge == 0) ^ (frac_divider == 1)) ||
- (divider == 2 && frac_divider == 1))
+ (divider == 2 && frac_divider == 1))
params->frac_wf_f = 1;
params->frac_phase = params->edge * 4 + frac_divider / 2;
@@ -230,13 +237,13 @@ static void axi_clkgen_calc_clk_params(unsigned int divider,
}
static void axi_clkgen_write(struct axi_clkgen *axi_clkgen,
- unsigned int reg, unsigned int val)
+ unsigned int reg, unsigned int val)
{
writel(val, axi_clkgen->base + reg);
}
static void axi_clkgen_read(struct axi_clkgen *axi_clkgen,
- unsigned int reg, unsigned int *val)
+ unsigned int reg, unsigned int *val)
{
*val = readl(axi_clkgen->base + reg);
}
@@ -257,7 +264,7 @@ static int axi_clkgen_wait_non_busy(struct axi_clkgen *axi_clkgen)
}
static int axi_clkgen_mmcm_read(struct axi_clkgen *axi_clkgen,
- unsigned int reg, unsigned int *val)
+ unsigned int reg, unsigned int *val)
{
unsigned int reg_val;
int ret;
@@ -281,7 +288,8 @@ static int axi_clkgen_mmcm_read(struct axi_clkgen *axi_clkgen,
}
static int axi_clkgen_mmcm_write(struct axi_clkgen *axi_clkgen,
- unsigned int reg, unsigned int val, unsigned int mask)
+ unsigned int reg, unsigned int val,
+ unsigned int mask)
{
unsigned int reg_val = 0;
int ret;
@@ -302,8 +310,7 @@ static int axi_clkgen_mmcm_write(struct axi_clkgen *axi_clkgen,
return 0;
}
-static void axi_clkgen_mmcm_enable(struct axi_clkgen *axi_clkgen,
- bool enable)
+static void axi_clkgen_mmcm_enable(struct axi_clkgen *axi_clkgen, bool enable)
{
unsigned int val = AXI_CLKGEN_V2_RESET_ENABLE;
@@ -319,31 +326,31 @@ static struct axi_clkgen *clk_hw_to_axi_clkgen(struct clk_hw *clk_hw)
}
static void axi_clkgen_set_div(struct axi_clkgen *axi_clkgen,
- unsigned int reg1, unsigned int reg2, unsigned int reg3,
- struct axi_clkgen_div_params *params)
+ unsigned int reg1, unsigned int reg2,
+ unsigned int reg3,
+ struct axi_clkgen_div_params *params)
{
axi_clkgen_mmcm_write(axi_clkgen, reg1,
- (params->high << 6) | params->low, 0xefff);
+ (params->high << 6) | params->low, 0xefff);
axi_clkgen_mmcm_write(axi_clkgen, reg2,
- (params->frac << 12) | (params->frac_en << 11) |
- (params->frac_wf_r << 10) | (params->edge << 7) |
- (params->nocount << 6), 0x7fff);
+ (params->frac << 12) | (params->frac_en << 11) |
+ (params->frac_wf_r << 10) | (params->edge << 7) |
+ (params->nocount << 6), 0x7fff);
if (reg3 != 0) {
axi_clkgen_mmcm_write(axi_clkgen, reg3,
- (params->frac_phase << 11) | (params->frac_wf_f << 10), 0x3c00);
+ (params->frac_phase << 11) | (params->frac_wf_f << 10),
+ 0x3c00);
}
}
-static int axi_clkgen_set_rate(struct clk_hw *clk_hw,
- unsigned long rate, unsigned long parent_rate)
+static int axi_clkgen_set_rate(struct clk_hw *clk_hw, unsigned long rate,
+ unsigned long parent_rate)
{
struct axi_clkgen *axi_clkgen = clk_hw_to_axi_clkgen(clk_hw);
const struct axi_clkgen_limits *limits = &axi_clkgen->limits;
unsigned int d, m, dout;
struct axi_clkgen_div_params params;
- uint32_t power = 0;
- uint32_t filter;
- uint32_t lock;
+ u32 power = 0, filter, lock;
if (parent_rate == 0 || rate == 0)
return -EINVAL;
@@ -363,22 +370,22 @@ static int axi_clkgen_set_rate(struct clk_hw *clk_hw,
axi_clkgen_calc_clk_params(dout >> 3, dout & 0x7, &params);
axi_clkgen_set_div(axi_clkgen, MMCM_REG_CLKOUT0_1, MMCM_REG_CLKOUT0_2,
- MMCM_REG_CLKOUT5_2, &params);
+ MMCM_REG_CLKOUT5_2, &params);
axi_clkgen_calc_clk_params(d, 0, &params);
axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_CLK_DIV,
- (params.edge << 13) | (params.nocount << 12) |
- (params.high << 6) | params.low, 0x3fff);
+ (params.edge << 13) | (params.nocount << 12) |
+ (params.high << 6) | params.low, 0x3fff);
axi_clkgen_calc_clk_params(m >> 3, m & 0x7, &params);
axi_clkgen_set_div(axi_clkgen, MMCM_REG_CLK_FB1, MMCM_REG_CLK_FB2,
- MMCM_REG_CLKOUT6_2, &params);
+ MMCM_REG_CLKOUT6_2, &params);
axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_LOCK1, lock & 0x3ff, 0x3ff);
axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_LOCK2,
- (((lock >> 16) & 0x1f) << 10) | 0x1, 0x7fff);
+ (((lock >> 16) & 0x1f) << 10) | 0x1, 0x7fff);
axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_LOCK3,
- (((lock >> 24) & 0x1f) << 10) | 0x3e9, 0x7fff);
+ (((lock >> 24) & 0x1f) << 10) | 0x3e9, 0x7fff);
axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_FILTER1, filter >> 16, 0x9900);
axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_FILTER2, filter, 0x9900);
@@ -407,7 +414,7 @@ static int axi_clkgen_determine_rate(struct clk_hw *hw,
}
static unsigned int axi_clkgen_get_div(struct axi_clkgen *axi_clkgen,
- unsigned int reg1, unsigned int reg2)
+ unsigned int reg1, unsigned int reg2)
{
unsigned int val1, val2;
unsigned int div;
@@ -434,7 +441,7 @@ static unsigned int axi_clkgen_get_div(struct axi_clkgen *axi_clkgen,
}
static unsigned long axi_clkgen_recalc_rate(struct clk_hw *clk_hw,
- unsigned long parent_rate)
+ unsigned long parent_rate)
{
struct axi_clkgen *axi_clkgen = clk_hw_to_axi_clkgen(clk_hw);
unsigned int d, m, dout;
@@ -442,9 +449,9 @@ static unsigned long axi_clkgen_recalc_rate(struct clk_hw *clk_hw,
unsigned int val;
dout = axi_clkgen_get_div(axi_clkgen, MMCM_REG_CLKOUT0_1,
- MMCM_REG_CLKOUT0_2);
+ MMCM_REG_CLKOUT0_2);
m = axi_clkgen_get_div(axi_clkgen, MMCM_REG_CLK_FB1,
- MMCM_REG_CLK_FB2);
+ MMCM_REG_CLK_FB2);
axi_clkgen_mmcm_read(axi_clkgen, MMCM_REG_CLK_DIV, &val);
if (val & MMCM_CLK_DIV_NOCOUNT)
@@ -496,6 +503,54 @@ static u8 axi_clkgen_get_parent(struct clk_hw *clk_hw)
return parent;
}
+static int axi_clkgen_setup_limits(struct axi_clkgen *axi_clkgen,
+ struct device *dev)
+{
+ unsigned int tech, family, speed_grade, reg_value;
+
+ axi_clkgen_read(axi_clkgen, ADI_AXI_REG_FPGA_INFO, &reg_value);
+ tech = ADI_AXI_INFO_FPGA_TECH(reg_value);
+ family = ADI_AXI_INFO_FPGA_FAMILY(reg_value);
+ speed_grade = ADI_AXI_INFO_FPGA_SPEED_GRADE(reg_value);
+
+ axi_clkgen->limits.fpfd_min = 10000;
+ axi_clkgen->limits.fvco_min = 600000;
+
+ switch (speed_grade) {
+ case ADI_AXI_FPGA_SPEED_1 ... ADI_AXI_FPGA_SPEED_1LV:
+ axi_clkgen->limits.fvco_max = 1200000;
+ axi_clkgen->limits.fpfd_max = 450000;
+ break;
+ case ADI_AXI_FPGA_SPEED_2 ... ADI_AXI_FPGA_SPEED_2LV:
+ axi_clkgen->limits.fvco_max = 1440000;
+ axi_clkgen->limits.fpfd_max = 500000;
+ if (family == ADI_AXI_FPGA_FAMILY_KINTEX || family == ADI_AXI_FPGA_FAMILY_ARTIX) {
+ axi_clkgen_read(axi_clkgen, ADI_CLKGEN_REG_FPGA_VOLTAGE,
+ &reg_value);
+ if (ADI_CLKGEN_INFO_FPGA_VOLTAGE(reg_value) < 950) {
+ axi_clkgen->limits.fvco_max = 1200000;
+ axi_clkgen->limits.fpfd_max = 450000;
+ }
+ }
+ break;
+ case ADI_AXI_FPGA_SPEED_3:
+ axi_clkgen->limits.fvco_max = 1600000;
+ axi_clkgen->limits.fpfd_max = 550000;
+ break;
+ default:
+ return dev_err_probe(dev, -ENODEV, "Unknown speed grade %d\n",
+ speed_grade);
+ }
+
+ /* Overwrite vco limits for ultrascale+ */
+ if (tech == ADI_AXI_FPGA_TECH_ULTRASCALE_PLUS) {
+ axi_clkgen->limits.fvco_max = 1600000;
+ axi_clkgen->limits.fvco_min = 800000;
+ }
+
+ return 0;
+}
+
static const struct clk_ops axi_clkgen_ops = {
.recalc_rate = axi_clkgen_recalc_rate,
.determine_rate = axi_clkgen_determine_rate,
@@ -510,6 +565,7 @@ static int axi_clkgen_probe(struct platform_device *pdev)
{
const struct axi_clkgen_limits *dflt_limits;
struct axi_clkgen *axi_clkgen;
+ unsigned int pcore_version;
struct clk_init_data init;
const char *parent_names[2];
const char *clk_name;
@@ -555,11 +611,20 @@ static int axi_clkgen_probe(struct platform_device *pdev)
return -EINVAL;
}
- memcpy(&axi_clkgen->limits, dflt_limits, sizeof(axi_clkgen->limits));
+ axi_clkgen_read(axi_clkgen, ADI_AXI_REG_VERSION, &pcore_version);
+
+ if (ADI_AXI_PCORE_VER_MAJOR(pcore_version) > 0x04) {
+ ret = axi_clkgen_setup_limits(axi_clkgen, &pdev->dev);
+ if (ret)
+ return ret;
+ } else {
+ memcpy(&axi_clkgen->limits, dflt_limits,
+ sizeof(axi_clkgen->limits));
+ }
clk_name = pdev->dev.of_node->name;
of_property_read_string(pdev->dev.of_node, "clock-output-names",
- &clk_name);
+ &clk_name);
init.name = clk_name;
init.ops = &axi_clkgen_ops;
diff --git a/drivers/clk/clk-axm5516.c b/drivers/clk/clk-axm5516.c
index 4a3462ee8f3e..3823383f3fa6 100644
--- a/drivers/clk/clk-axm5516.c
+++ b/drivers/clk/clk-axm5516.c
@@ -529,7 +529,6 @@ static const struct regmap_config axmclk_regmap_config = {
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x1fffc,
- .fast_io = true,
};
static const struct of_device_id axmclk_match_table[] = {
diff --git a/drivers/clk/clk-bm1880.c b/drivers/clk/clk-bm1880.c
index 002f7360b1c6..dac190bc6e19 100644
--- a/drivers/clk/clk-bm1880.c
+++ b/drivers/clk/clk-bm1880.c
@@ -608,8 +608,8 @@ static unsigned long bm1880_clk_div_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long bm1880_clk_div_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int bm1880_clk_div_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct bm1880_div_hw_clock *div_hw = to_bm1880_div_clk(hw);
struct bm1880_div_clock *div = &div_hw->div;
@@ -621,13 +621,18 @@ static long bm1880_clk_div_round_rate(struct clk_hw *hw, unsigned long rate,
val = readl(reg_addr) >> div->shift;
val &= clk_div_mask(div->width);
- return divider_ro_round_rate(hw, rate, prate, div->table,
- div->width, div->flags,
- val);
+ req->rate = divider_ro_round_rate(hw, req->rate,
+ &req->best_parent_rate,
+ div->table,
+ div->width, div->flags, val);
+
+ return 0;
}
- return divider_round_rate(hw, rate, prate, div->table,
- div->width, div->flags);
+ req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate,
+ div->table, div->width, div->flags);
+
+ return 0;
}
static int bm1880_clk_div_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -665,7 +670,7 @@ static int bm1880_clk_div_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops bm1880_clk_div_ops = {
.recalc_rate = bm1880_clk_div_recalc_rate,
- .round_rate = bm1880_clk_div_round_rate,
+ .determine_rate = bm1880_clk_div_determine_rate,
.set_rate = bm1880_clk_div_set_rate,
};
diff --git a/drivers/clk/clk-cdce706.c b/drivers/clk/clk-cdce706.c
index d0705bb03a2a..a495d313b02f 100644
--- a/drivers/clk/clk-cdce706.c
+++ b/drivers/clk/clk-cdce706.c
@@ -183,8 +183,8 @@ static unsigned long cdce706_pll_recalc_rate(struct clk_hw *hw,
return 0;
}
-static long cdce706_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int cdce706_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct cdce706_hw_data *hwd = to_hw_data(hw);
unsigned long mul, div;
@@ -192,9 +192,9 @@ static long cdce706_pll_round_rate(struct clk_hw *hw, unsigned long rate,
dev_dbg(&hwd->dev_data->client->dev,
"%s, rate: %lu, parent_rate: %lu\n",
- __func__, rate, *parent_rate);
+ __func__, req->rate, req->best_parent_rate);
- rational_best_approximation(rate, *parent_rate,
+ rational_best_approximation(req->rate, req->best_parent_rate,
CDCE706_PLL_N_MAX, CDCE706_PLL_M_MAX,
&mul, &div);
hwd->mul = mul;
@@ -204,9 +204,11 @@ static long cdce706_pll_round_rate(struct clk_hw *hw, unsigned long rate,
"%s, pll: %d, mul: %lu, div: %lu\n",
__func__, hwd->idx, mul, div);
- res = (u64)*parent_rate * hwd->mul;
+ res = (u64)req->best_parent_rate * hwd->mul;
do_div(res, hwd->div);
- return res;
+ req->rate = res;
+
+ return 0;
}
static int cdce706_pll_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -251,7 +253,7 @@ static int cdce706_pll_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops cdce706_pll_ops = {
.recalc_rate = cdce706_pll_recalc_rate,
- .round_rate = cdce706_pll_round_rate,
+ .determine_rate = cdce706_pll_determine_rate,
.set_rate = cdce706_pll_set_rate,
};
diff --git a/drivers/clk/clk-cdce925.c b/drivers/clk/clk-cdce925.c
index c51818c1af98..0b2ad21e6e4d 100644
--- a/drivers/clk/clk-cdce925.c
+++ b/drivers/clk/clk-cdce925.c
@@ -128,13 +128,15 @@ static void cdce925_pll_find_rate(unsigned long rate,
}
}
-static long cdce925_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int cdce925_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
u16 n, m;
- cdce925_pll_find_rate(rate, *parent_rate, &n, &m);
- return (long)cdce925_pll_calculate_rate(*parent_rate, n, m);
+ cdce925_pll_find_rate(req->rate, req->best_parent_rate, &n, &m);
+ req->rate = (long)cdce925_pll_calculate_rate(req->best_parent_rate, n, m);
+
+ return 0;
}
static int cdce925_pll_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -266,7 +268,7 @@ static const struct clk_ops cdce925_pll_ops = {
.prepare = cdce925_pll_prepare,
.unprepare = cdce925_pll_unprepare,
.recalc_rate = cdce925_pll_recalc_rate,
- .round_rate = cdce925_pll_round_rate,
+ .determine_rate = cdce925_pll_determine_rate,
.set_rate = cdce925_pll_set_rate,
};
@@ -420,20 +422,23 @@ static unsigned long cdce925_clk_best_parent_rate(
return rate * pdiv_best;
}
-static long cdce925_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int cdce925_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- unsigned long l_parent_rate = *parent_rate;
- u16 divider = cdce925_calc_divider(rate, l_parent_rate);
+ unsigned long l_parent_rate = req->best_parent_rate;
+ u16 divider = cdce925_calc_divider(req->rate, l_parent_rate);
- if (l_parent_rate / divider != rate) {
- l_parent_rate = cdce925_clk_best_parent_rate(hw, rate);
- divider = cdce925_calc_divider(rate, l_parent_rate);
- *parent_rate = l_parent_rate;
+ if (l_parent_rate / divider != req->rate) {
+ l_parent_rate = cdce925_clk_best_parent_rate(hw, req->rate);
+ divider = cdce925_calc_divider(req->rate, l_parent_rate);
+ req->best_parent_rate = l_parent_rate;
}
if (divider)
- return (long)(l_parent_rate / divider);
+ req->rate = (long)(l_parent_rate / divider);
+ else
+ req->rate = 0;
+
return 0;
}
@@ -451,7 +456,7 @@ static const struct clk_ops cdce925_clk_ops = {
.prepare = cdce925_clk_prepare,
.unprepare = cdce925_clk_unprepare,
.recalc_rate = cdce925_clk_recalc_rate,
- .round_rate = cdce925_clk_round_rate,
+ .determine_rate = cdce925_clk_determine_rate,
.set_rate = cdce925_clk_set_rate,
};
@@ -473,14 +478,17 @@ static u16 cdce925_y1_calc_divider(unsigned long rate,
return (u16)divider;
}
-static long cdce925_clk_y1_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int cdce925_clk_y1_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- unsigned long l_parent_rate = *parent_rate;
- u16 divider = cdce925_y1_calc_divider(rate, l_parent_rate);
+ unsigned long l_parent_rate = req->best_parent_rate;
+ u16 divider = cdce925_y1_calc_divider(req->rate, l_parent_rate);
if (divider)
- return (long)(l_parent_rate / divider);
+ req->rate = (long)(l_parent_rate / divider);
+ else
+ req->rate = 0;
+
return 0;
}
@@ -498,7 +506,7 @@ static const struct clk_ops cdce925_clk_y1_ops = {
.prepare = cdce925_clk_prepare,
.unprepare = cdce925_clk_unprepare,
.recalc_rate = cdce925_clk_recalc_rate,
- .round_rate = cdce925_clk_y1_round_rate,
+ .determine_rate = cdce925_clk_y1_determine_rate,
.set_rate = cdce925_clk_y1_set_rate,
};
diff --git a/drivers/clk/clk-clps711x.c b/drivers/clk/clk-clps711x.c
index f8417ee2961a..402ab74d9bfb 100644
--- a/drivers/clk/clk-clps711x.c
+++ b/drivers/clk/clk-clps711x.c
@@ -99,7 +99,7 @@ static void __init clps711x_clk_init_dt(struct device_node *np)
*/
tmp &= ~(SYSCON1_TC1M | SYSCON1_TC1S);
/* Timer2 in prescale mode.
- * Value writen is automatically re-loaded when
+ * Value written is automatically re-loaded when
* the counter underflows.
*/
tmp |= SYSCON1_TC2M | SYSCON1_TC2S;
diff --git a/drivers/clk/clk-cs2000-cp.c b/drivers/clk/clk-cs2000-cp.c
index 35cb93ad298a..8800472ba63f 100644
--- a/drivers/clk/clk-cs2000-cp.c
+++ b/drivers/clk/clk-cs2000-cp.c
@@ -305,15 +305,19 @@ static unsigned long cs2000_recalc_rate(struct clk_hw *hw,
return cs2000_ratio_to_rate(ratio, parent_rate, priv->lf_ratio);
}
-static long cs2000_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int cs2000_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct cs2000_priv *priv = hw_to_priv(hw);
u32 ratio;
- ratio = cs2000_rate_to_ratio(*parent_rate, rate, priv->lf_ratio);
+ ratio = cs2000_rate_to_ratio(req->best_parent_rate, req->rate,
+ priv->lf_ratio);
- return cs2000_ratio_to_rate(ratio, *parent_rate, priv->lf_ratio);
+ req->rate = cs2000_ratio_to_rate(ratio, req->best_parent_rate,
+ priv->lf_ratio);
+
+ return 0;
}
static int cs2000_select_ratio_mode(struct cs2000_priv *priv,
@@ -430,7 +434,7 @@ static u8 cs2000_get_parent(struct clk_hw *hw)
static const struct clk_ops cs2000_ops = {
.get_parent = cs2000_get_parent,
.recalc_rate = cs2000_recalc_rate,
- .round_rate = cs2000_round_rate,
+ .determine_rate = cs2000_determine_rate,
.set_rate = cs2000_set_rate,
.prepare = cs2000_enable,
.unprepare = cs2000_disable,
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index c1f426b8a504..2601b6155afb 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -431,27 +431,6 @@ long divider_ro_round_rate_parent(struct clk_hw *hw, struct clk_hw *parent,
}
EXPORT_SYMBOL_GPL(divider_ro_round_rate_parent);
-static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
-{
- struct clk_divider *divider = to_clk_divider(hw);
-
- /* if read only, just return current value */
- if (divider->flags & CLK_DIVIDER_READ_ONLY) {
- u32 val;
-
- val = clk_div_readl(divider) >> divider->shift;
- val &= clk_div_mask(divider->width);
-
- return divider_ro_round_rate(hw, rate, prate, divider->table,
- divider->width, divider->flags,
- val);
- }
-
- return divider_round_rate(hw, rate, prate, divider->table,
- divider->width, divider->flags);
-}
-
static int clk_divider_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
@@ -527,7 +506,6 @@ static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
const struct clk_ops clk_divider_ops = {
.recalc_rate = clk_divider_recalc_rate,
- .round_rate = clk_divider_round_rate,
.determine_rate = clk_divider_determine_rate,
.set_rate = clk_divider_set_rate,
};
@@ -535,7 +513,6 @@ EXPORT_SYMBOL_GPL(clk_divider_ops);
const struct clk_ops clk_divider_ro_ops = {
.recalc_rate = clk_divider_recalc_rate,
- .round_rate = clk_divider_round_rate,
.determine_rate = clk_divider_determine_rate,
};
EXPORT_SYMBOL_GPL(clk_divider_ro_ops);
diff --git a/drivers/clk/clk-en7523.c b/drivers/clk/clk-en7523.c
index 15bbdeb60b8e..08cc8e5acf43 100644
--- a/drivers/clk/clk-en7523.c
+++ b/drivers/clk/clk-en7523.c
@@ -9,6 +9,7 @@
#include <linux/regmap.h>
#include <linux/reset-controller.h>
#include <dt-bindings/clock/en7523-clk.h>
+#include <dt-bindings/reset/airoha,en7523-reset.h>
#include <dt-bindings/reset/airoha,en7581-reset.h>
#define RST_NR_PER_BANK 32
@@ -299,6 +300,53 @@ static const u16 en7581_rst_ofs[] = {
REG_RST_CTRL1,
};
+static const u16 en7523_rst_map[] = {
+ /* RST_CTRL2 */
+ [EN7523_XPON_PHY_RST] = 0,
+ [EN7523_XSI_MAC_RST] = 7,
+ [EN7523_XSI_PHY_RST] = 8,
+ [EN7523_NPU_RST] = 9,
+ [EN7523_I2S_RST] = 10,
+ [EN7523_TRNG_RST] = 11,
+ [EN7523_TRNG_MSTART_RST] = 12,
+ [EN7523_DUAL_HSI0_RST] = 13,
+ [EN7523_DUAL_HSI1_RST] = 14,
+ [EN7523_HSI_RST] = 15,
+ [EN7523_DUAL_HSI0_MAC_RST] = 16,
+ [EN7523_DUAL_HSI1_MAC_RST] = 17,
+ [EN7523_HSI_MAC_RST] = 18,
+ [EN7523_WDMA_RST] = 19,
+ [EN7523_WOE0_RST] = 20,
+ [EN7523_WOE1_RST] = 21,
+ [EN7523_HSDMA_RST] = 22,
+ [EN7523_I2C2RBUS_RST] = 23,
+ [EN7523_TDMA_RST] = 24,
+ /* RST_CTRL1 */
+ [EN7523_PCM1_ZSI_ISI_RST] = RST_NR_PER_BANK + 0,
+ [EN7523_FE_PDMA_RST] = RST_NR_PER_BANK + 1,
+ [EN7523_FE_QDMA_RST] = RST_NR_PER_BANK + 2,
+ [EN7523_PCM_SPIWP_RST] = RST_NR_PER_BANK + 4,
+ [EN7523_CRYPTO_RST] = RST_NR_PER_BANK + 6,
+ [EN7523_TIMER_RST] = RST_NR_PER_BANK + 8,
+ [EN7523_PCM1_RST] = RST_NR_PER_BANK + 11,
+ [EN7523_UART_RST] = RST_NR_PER_BANK + 12,
+ [EN7523_GPIO_RST] = RST_NR_PER_BANK + 13,
+ [EN7523_GDMA_RST] = RST_NR_PER_BANK + 14,
+ [EN7523_I2C_MASTER_RST] = RST_NR_PER_BANK + 16,
+ [EN7523_PCM2_ZSI_ISI_RST] = RST_NR_PER_BANK + 17,
+ [EN7523_SFC_RST] = RST_NR_PER_BANK + 18,
+ [EN7523_UART2_RST] = RST_NR_PER_BANK + 19,
+ [EN7523_GDMP_RST] = RST_NR_PER_BANK + 20,
+ [EN7523_FE_RST] = RST_NR_PER_BANK + 21,
+ [EN7523_USB_HOST_P0_RST] = RST_NR_PER_BANK + 22,
+ [EN7523_GSW_RST] = RST_NR_PER_BANK + 23,
+ [EN7523_SFC2_PCM_RST] = RST_NR_PER_BANK + 25,
+ [EN7523_PCIE0_RST] = RST_NR_PER_BANK + 26,
+ [EN7523_PCIE1_RST] = RST_NR_PER_BANK + 27,
+ [EN7523_PCIE_HB_RST] = RST_NR_PER_BANK + 29,
+ [EN7523_XPON_MAC_RST] = RST_NR_PER_BANK + 31,
+};
+
static const u16 en7581_rst_map[] = {
/* RST_CTRL2 */
[EN7581_XPON_PHY_RST] = 0,
@@ -357,6 +405,9 @@ static const u16 en7581_rst_map[] = {
[EN7581_XPON_MAC_RST] = RST_NR_PER_BANK + 31,
};
+static int en7581_reset_register(struct device *dev, void __iomem *base,
+ const u16 *rst_map, int nr_resets);
+
static u32 en7523_get_base_rate(const struct en_clk_desc *desc, u32 val)
{
if (!desc->base_bits)
@@ -552,7 +603,8 @@ static int en7523_clk_hw_init(struct platform_device *pdev,
en7523_register_clocks(&pdev->dev, clk_data, base, np_base);
- return 0;
+ return en7581_reset_register(&pdev->dev, np_base, en7523_rst_map,
+ ARRAY_SIZE(en7523_rst_map));
}
static void en7581_register_clocks(struct device *dev, struct clk_hw_onecell_data *clk_data,
@@ -652,7 +704,8 @@ static const struct reset_control_ops en7581_reset_ops = {
.status = en7523_reset_status,
};
-static int en7581_reset_register(struct device *dev, void __iomem *base)
+static int en7581_reset_register(struct device *dev, void __iomem *base,
+ const u16 *rst_map, int nr_resets)
{
struct en_rst_data *rst_data;
@@ -661,10 +714,10 @@ static int en7581_reset_register(struct device *dev, void __iomem *base)
return -ENOMEM;
rst_data->bank_ofs = en7581_rst_ofs;
- rst_data->idx_map = en7581_rst_map;
+ rst_data->idx_map = rst_map;
rst_data->base = base;
- rst_data->rcdev.nr_resets = ARRAY_SIZE(en7581_rst_map);
+ rst_data->rcdev.nr_resets = nr_resets;
rst_data->rcdev.of_xlate = en7523_reset_xlate;
rst_data->rcdev.ops = &en7581_reset_ops;
rst_data->rcdev.of_node = dev->of_node;
@@ -698,7 +751,8 @@ static int en7581_clk_hw_init(struct platform_device *pdev,
val = readl(base + REG_NP_SCU_PCIC);
writel(val | 3, base + REG_NP_SCU_PCIC);
- return en7581_reset_register(&pdev->dev, base);
+ return en7581_reset_register(&pdev->dev, base, en7581_rst_map,
+ ARRAY_SIZE(en7581_rst_map));
}
static int en7523_clk_probe(struct platform_device *pdev)
diff --git a/drivers/clk/clk-ep93xx.c b/drivers/clk/clk-ep93xx.c
index 4bd8d6ecf6a2..972aadd11493 100644
--- a/drivers/clk/clk-ep93xx.c
+++ b/drivers/clk/clk-ep93xx.c
@@ -389,23 +389,25 @@ static unsigned long ep93xx_div_recalc_rate(struct clk_hw *hw,
return DIV_ROUND_CLOSEST(parent_rate, clk->div[index]);
}
-static long ep93xx_div_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int ep93xx_div_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct ep93xx_clk *clk = ep93xx_clk_from(hw);
unsigned long best = 0, now;
unsigned int i;
for (i = 0; i < clk->num_div; i++) {
- if ((rate * clk->div[i]) == *parent_rate)
- return rate;
+ if (req->rate * clk->div[i] == req->best_parent_rate)
+ return 0;
- now = DIV_ROUND_CLOSEST(*parent_rate, clk->div[i]);
- if (!best || is_best(rate, now, best))
+ now = DIV_ROUND_CLOSEST(req->best_parent_rate, clk->div[i]);
+ if (!best || is_best(req->rate, now, best))
best = now;
}
- return best;
+ req->rate = best;
+
+ return 0;
}
static int ep93xx_div_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -437,7 +439,7 @@ static const struct clk_ops ep93xx_div_ops = {
.disable = ep93xx_clk_disable,
.is_enabled = ep93xx_clk_is_enabled,
.recalc_rate = ep93xx_div_recalc_rate,
- .round_rate = ep93xx_div_round_rate,
+ .determine_rate = ep93xx_div_determine_rate,
.set_rate = ep93xx_div_set_rate,
};
@@ -486,9 +488,10 @@ static const struct ep93xx_gate ep93xx_uarts[] = {
static int ep93xx_uart_clock_init(struct ep93xx_clk_priv *priv)
{
struct clk_parent_data parent_data = { };
- unsigned int i, idx, ret, clk_uart_div;
+ unsigned int i, idx, clk_uart_div;
struct ep93xx_clk *clk;
u32 val;
+ int ret;
regmap_read(priv->map, EP93XX_SYSCON_PWRCNT, &val);
if (val & EP93XX_SYSCON_PWRCNT_UARTBAUD)
diff --git a/drivers/clk/clk-eyeq.c b/drivers/clk/clk-eyeq.c
index 640c25788487..ea1c3d78e7cd 100644
--- a/drivers/clk/clk-eyeq.c
+++ b/drivers/clk/clk-eyeq.c
@@ -131,7 +131,7 @@ struct eqc_early_match_data {
* Both factors (mult and div) must fit in 32 bits. When an operation overflows,
* this function throws away low bits so that factors still fit in 32 bits.
*
- * Precision loss depends on amplitude of mult and div. Worst theorical
+ * Precision loss depends on amplitude of mult and div. Worst theoretical
* loss is: (UINT_MAX+1) / UINT_MAX - 1 = 2.3e-10.
* This is 1Hz every 4.3GHz.
*/
diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c
index e62ae8794d44..de658c9e4c53 100644
--- a/drivers/clk/clk-fixed-factor.c
+++ b/drivers/clk/clk-fixed-factor.c
@@ -30,19 +30,21 @@ static unsigned long clk_factor_recalc_rate(struct clk_hw *hw,
return (unsigned long)rate;
}
-static long clk_factor_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_factor_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_fixed_factor *fix = to_clk_fixed_factor(hw);
if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) {
unsigned long best_parent;
- best_parent = (rate / fix->mult) * fix->div;
- *prate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent);
+ best_parent = (req->rate / fix->mult) * fix->div;
+ req->best_parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent);
}
- return (*prate / fix->div) * fix->mult;
+ req->rate = (req->best_parent_rate / fix->div) * fix->mult;
+
+ return 0;
}
static int clk_factor_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -50,7 +52,7 @@ static int clk_factor_set_rate(struct clk_hw *hw, unsigned long rate,
{
/*
* We must report success but we can do so unconditionally because
- * clk_factor_round_rate returns values that ensure this call is a
+ * clk_factor_determine_rate returns values that ensure this call is a
* nop.
*/
@@ -69,7 +71,7 @@ static unsigned long clk_factor_recalc_accuracy(struct clk_hw *hw,
}
const struct clk_ops clk_fixed_factor_ops = {
- .round_rate = clk_factor_round_rate,
+ .determine_rate = clk_factor_determine_rate,
.set_rate = clk_factor_set_rate,
.recalc_rate = clk_factor_recalc_rate,
.recalc_accuracy = clk_factor_recalc_accuracy,
diff --git a/drivers/clk/clk-fractional-divider.c b/drivers/clk/clk-fractional-divider.c
index da057172cc90..cd36a6e27f25 100644
--- a/drivers/clk/clk-fractional-divider.c
+++ b/drivers/clk/clk-fractional-divider.c
@@ -151,25 +151,32 @@ void clk_fractional_divider_general_approximation(struct clk_hw *hw,
}
EXPORT_SYMBOL_GPL(clk_fractional_divider_general_approximation);
-static long clk_fd_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_fd_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_fractional_divider *fd = to_clk_fd(hw);
unsigned long m, n;
u64 ret;
- if (!rate || (!clk_hw_can_set_rate_parent(hw) && rate >= *parent_rate))
- return *parent_rate;
+ if (!req->rate || (!clk_hw_can_set_rate_parent(hw) && req->rate >= req->best_parent_rate)) {
+ req->rate = req->best_parent_rate;
+
+ return 0;
+ }
if (fd->approximation)
- fd->approximation(hw, rate, parent_rate, &m, &n);
+ fd->approximation(hw, req->rate, &req->best_parent_rate, &m, &n);
else
- clk_fractional_divider_general_approximation(hw, rate, parent_rate, &m, &n);
+ clk_fractional_divider_general_approximation(hw, req->rate,
+ &req->best_parent_rate,
+ &m, &n);
- ret = (u64)*parent_rate * m;
+ ret = (u64)req->best_parent_rate * m;
do_div(ret, n);
- return ret;
+ req->rate = ret;
+
+ return 0;
}
static int clk_fd_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -250,7 +257,7 @@ static void clk_fd_debug_init(struct clk_hw *hw, struct dentry *dentry)
const struct clk_ops clk_fractional_divider_ops = {
.recalc_rate = clk_fd_recalc_rate,
- .round_rate = clk_fd_round_rate,
+ .determine_rate = clk_fd_determine_rate,
.set_rate = clk_fd_set_rate,
#ifdef CONFIG_DEBUG_FS
.debug_init = clk_fd_debug_init,
diff --git a/drivers/clk/clk-gate.c b/drivers/clk/clk-gate.c
index 68e585a02fd9..4746f8219132 100644
--- a/drivers/clk/clk-gate.c
+++ b/drivers/clk/clk-gate.c
@@ -15,7 +15,7 @@
#include <linux/string.h>
/**
- * DOC: basic gatable clock which can gate and ungate its output
+ * DOC: basic gateable clock which can gate and ungate its output
*
* Traits of this clock:
* prepare - clk_(un)prepare only ensures parent is (un)prepared
diff --git a/drivers/clk/clk-gemini.c b/drivers/clk/clk-gemini.c
index 856b008e07c6..e94589c38568 100644
--- a/drivers/clk/clk-gemini.c
+++ b/drivers/clk/clk-gemini.c
@@ -126,13 +126,16 @@ static unsigned long gemini_pci_recalc_rate(struct clk_hw *hw,
return 33000000;
}
-static long gemini_pci_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int gemini_pci_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
/* We support 33 and 66 MHz */
- if (rate < 48000000)
- return 33000000;
- return 66000000;
+ if (req->rate < 48000000)
+ req->rate = 33000000;
+ else
+ req->rate = 66000000;
+
+ return 0;
}
static int gemini_pci_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -179,7 +182,7 @@ static int gemini_pci_is_enabled(struct clk_hw *hw)
static const struct clk_ops gemini_pci_clk_ops = {
.recalc_rate = gemini_pci_recalc_rate,
- .round_rate = gemini_pci_round_rate,
+ .determine_rate = gemini_pci_determine_rate,
.set_rate = gemini_pci_set_rate,
.enable = gemini_pci_enable,
.disable = gemini_pci_disable,
diff --git a/drivers/clk/clk-highbank.c b/drivers/clk/clk-highbank.c
index 6e68a41a70a1..cc583934ecf2 100644
--- a/drivers/clk/clk-highbank.c
+++ b/drivers/clk/clk-highbank.c
@@ -130,15 +130,17 @@ static void clk_pll_calc(unsigned long rate, unsigned long ref_freq,
*pdivf = divf;
}
-static long clk_pll_round_rate(struct clk_hw *hwclk, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
u32 divq, divf;
- unsigned long ref_freq = *parent_rate;
+ unsigned long ref_freq = req->best_parent_rate;
- clk_pll_calc(rate, ref_freq, &divq, &divf);
+ clk_pll_calc(req->rate, ref_freq, &divq, &divf);
- return (ref_freq * (divf + 1)) / (1 << divq);
+ req->rate = (ref_freq * (divf + 1)) / (1 << divq);
+
+ return 0;
}
static int clk_pll_set_rate(struct clk_hw *hwclk, unsigned long rate,
@@ -185,7 +187,7 @@ static const struct clk_ops clk_pll_ops = {
.enable = clk_pll_enable,
.disable = clk_pll_disable,
.recalc_rate = clk_pll_recalc_rate,
- .round_rate = clk_pll_round_rate,
+ .determine_rate = clk_pll_determine_rate,
.set_rate = clk_pll_set_rate,
};
@@ -227,16 +229,18 @@ static unsigned long clk_periclk_recalc_rate(struct clk_hw *hwclk,
return parent_rate / div;
}
-static long clk_periclk_round_rate(struct clk_hw *hwclk, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_periclk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
u32 div;
- div = *parent_rate / rate;
+ div = req->best_parent_rate / req->rate;
div++;
div &= ~0x1;
- return *parent_rate / div;
+ req->rate = req->best_parent_rate / div;
+
+ return 0;
}
static int clk_periclk_set_rate(struct clk_hw *hwclk, unsigned long rate,
@@ -255,7 +259,7 @@ static int clk_periclk_set_rate(struct clk_hw *hwclk, unsigned long rate,
static const struct clk_ops periclk_ops = {
.recalc_rate = clk_periclk_recalc_rate,
- .round_rate = clk_periclk_round_rate,
+ .determine_rate = clk_periclk_determine_rate,
.set_rate = clk_periclk_set_rate,
};
diff --git a/drivers/clk/clk-hsdk-pll.c b/drivers/clk/clk-hsdk-pll.c
index 5d2a90addf1a..7d56a47c2aa7 100644
--- a/drivers/clk/clk-hsdk-pll.c
+++ b/drivers/clk/clk-hsdk-pll.c
@@ -197,8 +197,8 @@ static unsigned long hsdk_pll_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long hsdk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int hsdk_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
int i;
unsigned long best_rate;
@@ -211,13 +211,15 @@ static long hsdk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
best_rate = pll_cfg[0].rate;
for (i = 1; pll_cfg[i].rate != 0; i++) {
- if (abs(rate - pll_cfg[i].rate) < abs(rate - best_rate))
+ if (abs(req->rate - pll_cfg[i].rate) < abs(req->rate - best_rate))
best_rate = pll_cfg[i].rate;
}
dev_dbg(clk->dev, "chosen best rate: %lu\n", best_rate);
- return best_rate;
+ req->rate = best_rate;
+
+ return 0;
}
static int hsdk_pll_comm_update_rate(struct hsdk_pll_clk *clk,
@@ -265,7 +267,7 @@ static int hsdk_pll_core_update_rate(struct hsdk_pll_clk *clk,
return -EINVAL;
/*
- * Program divider to div-by-1 if we succesfuly set core clock below
+ * Program divider to div-by-1 if we successfully set core clock below
* 500MHz threshold.
*/
if (rate <= CORE_IF_CLK_THRESHOLD_HZ)
@@ -296,7 +298,7 @@ static int hsdk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops hsdk_pll_ops = {
.recalc_rate = hsdk_pll_recalc_rate,
- .round_rate = hsdk_pll_round_rate,
+ .determine_rate = hsdk_pll_determine_rate,
.set_rate = hsdk_pll_set_rate,
};
diff --git a/drivers/clk/clk-lan966x.c b/drivers/clk/clk-lan966x.c
index 16e0405fe28b..3c7a48c616bb 100644
--- a/drivers/clk/clk-lan966x.c
+++ b/drivers/clk/clk-lan966x.c
@@ -16,8 +16,6 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
-#include <dt-bindings/clock/microchip,lan966x.h>
-
#define GCK_ENA BIT(0)
#define GCK_SRC_SEL GENMASK(9, 8)
#define GCK_PRESCALER GENMASK(23, 16)
diff --git a/drivers/clk/clk-lmk04832.c b/drivers/clk/clk-lmk04832.c
index 2bcf422f0b04..b2107b31efa2 100644
--- a/drivers/clk/clk-lmk04832.c
+++ b/drivers/clk/clk-lmk04832.c
@@ -491,28 +491,33 @@ static long lmk04832_calc_pll2_params(unsigned long prate, unsigned long rate,
return DIV_ROUND_CLOSEST(prate * 2 * pll2_p * pll2_n, pll2_r);
}
-static long lmk04832_vco_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int lmk04832_vco_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct lmk04832 *lmk = container_of(hw, struct lmk04832, vco);
unsigned int n, p, r;
long vco_rate;
int ret;
- ret = lmk04832_check_vco_ranges(lmk, rate);
+ ret = lmk04832_check_vco_ranges(lmk, req->rate);
if (ret < 0)
return ret;
- vco_rate = lmk04832_calc_pll2_params(*prate, rate, &n, &p, &r);
+ vco_rate = lmk04832_calc_pll2_params(req->best_parent_rate, req->rate,
+ &n, &p, &r);
if (vco_rate < 0) {
dev_err(lmk->dev, "PLL2 parameters out of range\n");
- return vco_rate;
+ req->rate = vco_rate;
+
+ return 0;
}
- if (rate != vco_rate)
+ if (req->rate != vco_rate)
return -EINVAL;
- return vco_rate;
+ req->rate = vco_rate;
+
+ return 0;
}
static int lmk04832_vco_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -579,7 +584,7 @@ static const struct clk_ops lmk04832_vco_ops = {
.prepare = lmk04832_vco_prepare,
.unprepare = lmk04832_vco_unprepare,
.recalc_rate = lmk04832_vco_recalc_rate,
- .round_rate = lmk04832_vco_round_rate,
+ .determine_rate = lmk04832_vco_determine_rate,
.set_rate = lmk04832_vco_set_rate,
};
@@ -888,25 +893,27 @@ static unsigned long lmk04832_sclk_recalc_rate(struct clk_hw *hw,
return DIV_ROUND_CLOSEST(prate, sysref_div);
}
-static long lmk04832_sclk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int lmk04832_sclk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct lmk04832 *lmk = container_of(hw, struct lmk04832, sclk);
unsigned long sclk_rate;
unsigned int sysref_div;
- sysref_div = DIV_ROUND_CLOSEST(*prate, rate);
- sclk_rate = DIV_ROUND_CLOSEST(*prate, sysref_div);
+ sysref_div = DIV_ROUND_CLOSEST(req->best_parent_rate, req->rate);
+ sclk_rate = DIV_ROUND_CLOSEST(req->best_parent_rate, sysref_div);
if (sysref_div < 0x07 || sysref_div > 0x1fff) {
dev_err(lmk->dev, "SYSREF divider out of range\n");
return -EINVAL;
}
- if (rate != sclk_rate)
+ if (req->rate != sclk_rate)
return -EINVAL;
- return sclk_rate;
+ req->rate = sclk_rate;
+
+ return 0;
}
static int lmk04832_sclk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -945,7 +952,7 @@ static const struct clk_ops lmk04832_sclk_ops = {
.prepare = lmk04832_sclk_prepare,
.unprepare = lmk04832_sclk_unprepare,
.recalc_rate = lmk04832_sclk_recalc_rate,
- .round_rate = lmk04832_sclk_round_rate,
+ .determine_rate = lmk04832_sclk_determine_rate,
.set_rate = lmk04832_sclk_set_rate,
};
@@ -1069,26 +1076,28 @@ static unsigned long lmk04832_dclk_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long lmk04832_dclk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int lmk04832_dclk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct lmk_dclk *dclk = container_of(hw, struct lmk_dclk, hw);
struct lmk04832 *lmk = dclk->lmk;
unsigned long dclk_rate;
unsigned int dclk_div;
- dclk_div = DIV_ROUND_CLOSEST(*prate, rate);
- dclk_rate = DIV_ROUND_CLOSEST(*prate, dclk_div);
+ dclk_div = DIV_ROUND_CLOSEST(req->best_parent_rate, req->rate);
+ dclk_rate = DIV_ROUND_CLOSEST(req->best_parent_rate, dclk_div);
if (dclk_div < 1 || dclk_div > 0x3ff) {
dev_err(lmk->dev, "%s_div out of range\n", clk_hw_get_name(hw));
return -EINVAL;
}
- if (rate != dclk_rate)
+ if (req->rate != dclk_rate)
return -EINVAL;
- return dclk_rate;
+ req->rate = dclk_rate;
+
+ return 0;
}
static int lmk04832_dclk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -1158,7 +1167,7 @@ static const struct clk_ops lmk04832_dclk_ops = {
.prepare = lmk04832_dclk_prepare,
.unprepare = lmk04832_dclk_unprepare,
.recalc_rate = lmk04832_dclk_recalc_rate,
- .round_rate = lmk04832_dclk_round_rate,
+ .determine_rate = lmk04832_dclk_determine_rate,
.set_rate = lmk04832_dclk_set_rate,
};
diff --git a/drivers/clk/clk-loongson1.c b/drivers/clk/clk-loongson1.c
index a3467aa6790f..f9f060d08a5f 100644
--- a/drivers/clk/clk-loongson1.c
+++ b/drivers/clk/clk-loongson1.c
@@ -93,14 +93,16 @@ static unsigned long ls1x_divider_recalc_rate(struct clk_hw *hw,
d->flags, d->width);
}
-static long ls1x_divider_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int ls1x_divider_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct ls1x_clk *ls1x_clk = to_ls1x_clk(hw);
const struct ls1x_clk_div_data *d = ls1x_clk->data;
- return divider_round_rate(hw, rate, prate, d->table,
- d->width, d->flags);
+ req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate,
+ d->table, d->width, d->flags);
+
+ return 0;
}
static int ls1x_divider_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -146,7 +148,7 @@ static int ls1x_divider_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops ls1x_clk_divider_ops = {
.recalc_rate = ls1x_divider_recalc_rate,
- .round_rate = ls1x_divider_round_rate,
+ .determine_rate = ls1x_divider_determine_rate,
.set_rate = ls1x_divider_set_rate,
};
diff --git a/drivers/clk/clk-loongson2.c b/drivers/clk/clk-loongson2.c
index 27e632edd484..9c4c6c99db3e 100644
--- a/drivers/clk/clk-loongson2.c
+++ b/drivers/clk/clk-loongson2.c
@@ -13,10 +13,6 @@
#include <linux/io-64-nonatomic-lo-hi.h>
#include <dt-bindings/clock/loongson,ls2k-clk.h>
-static const struct clk_parent_data pdata[] = {
- { .fw_name = "ref_100m", },
-};
-
enum loongson2_clk_type {
CLK_TYPE_PLL,
CLK_TYPE_SCALE,
@@ -42,6 +38,7 @@ struct loongson2_clk_data {
u8 div_width;
u8 mult_shift;
u8 mult_width;
+ u8 bit_idx;
};
struct loongson2_clk_board_info {
@@ -50,6 +47,7 @@ struct loongson2_clk_board_info {
const char *name;
const char *parent_name;
unsigned long fixed_rate;
+ unsigned long flags;
u8 reg_offset;
u8 div_shift;
u8 div_width;
@@ -95,6 +93,19 @@ struct loongson2_clk_board_info {
.div_width = _dwidth, \
}
+#define CLK_SCALE_MODE(_id, _name, _pname, _offset, \
+ _dshift, _dwidth, _midx) \
+ { \
+ .id = _id, \
+ .type = CLK_TYPE_SCALE, \
+ .name = _name, \
+ .parent_name = _pname, \
+ .reg_offset = _offset, \
+ .div_shift = _dshift, \
+ .div_width = _dwidth, \
+ .bit_idx = _midx + 1, \
+ }
+
#define CLK_GATE(_id, _name, _pname, _offset, _bidx) \
{ \
.id = _id, \
@@ -105,6 +116,18 @@ struct loongson2_clk_board_info {
.bit_idx = _bidx, \
}
+#define CLK_GATE_FLAGS(_id, _name, _pname, _offset, _bidx, \
+ _flags) \
+ { \
+ .id = _id, \
+ .type = CLK_TYPE_GATE, \
+ .name = _name, \
+ .parent_name = _pname, \
+ .reg_offset = _offset, \
+ .bit_idx = _bidx, \
+ .flags = _flags \
+ }
+
#define CLK_FIXED(_id, _name, _pname, _rate) \
{ \
.id = _id, \
@@ -114,6 +137,51 @@ struct loongson2_clk_board_info {
.fixed_rate = _rate, \
}
+static const struct loongson2_clk_board_info ls2k0300_clks[] = {
+ /* Reference Clock */
+ CLK_PLL(LS2K0300_NODE_PLL, "pll_node", 0x00, 15, 9, 8, 7),
+ CLK_PLL(LS2K0300_DDR_PLL, "pll_ddr", 0x08, 15, 9, 8, 7),
+ CLK_PLL(LS2K0300_PIX_PLL, "pll_pix", 0x10, 15, 9, 8, 7),
+ CLK_FIXED(LS2K0300_CLK_STABLE, "clk_stable", NULL, 100000000),
+ CLK_FIXED(LS2K0300_CLK_THSENS, "clk_thsens", NULL, 10000000),
+ /* Node PLL */
+ CLK_DIV(LS2K0300_CLK_NODE_DIV, "clk_node_div", "pll_node", 0x00, 24, 7),
+ CLK_DIV(LS2K0300_CLK_GMAC_DIV, "clk_gmac_div", "pll_node", 0x04, 0, 7),
+ CLK_DIV(LS2K0300_CLK_I2S_DIV, "clk_i2s_div", "pll_node", 0x04, 8, 7),
+ CLK_GATE(LS2K0300_CLK_NODE_PLL_GATE, "clk_node_pll_gate", "clk_node_div", 0x00, 0),
+ CLK_GATE(LS2K0300_CLK_GMAC_GATE, "clk_gmac_gate", "clk_gmac_div", 0x00, 1),
+ CLK_GATE(LS2K0300_CLK_I2S_GATE, "clk_i2s_gate", "clk_i2s_div", 0x00, 2),
+ CLK_GATE_FLAGS(LS2K0300_CLK_NODE_GATE, "clk_node_gate", "clk_node_scale", 0x24, 0,
+ CLK_IS_CRITICAL),
+ CLK_SCALE_MODE(LS2K0300_CLK_NODE_SCALE, "clk_node_scale", "clk_node_pll_gate", 0x20, 0, 3,
+ 3),
+ /* DDR PLL */
+ CLK_DIV(LS2K0300_CLK_DDR_DIV, "clk_ddr_div", "pll_ddr", 0x08, 24, 7),
+ CLK_DIV(LS2K0300_CLK_NET_DIV, "clk_net_div", "pll_ddr", 0x0c, 0, 7),
+ CLK_DIV(LS2K0300_CLK_DEV_DIV, "clk_dev_div", "pll_ddr", 0x0c, 8, 7),
+ CLK_GATE(LS2K0300_CLK_NET_GATE, "clk_net_gate", "clk_net_div", 0x08, 1),
+ CLK_GATE(LS2K0300_CLK_DEV_GATE, "clk_dev_gate", "clk_dev_div", 0x08, 2),
+ CLK_GATE_FLAGS(LS2K0300_CLK_DDR_GATE, "clk_ddr_gate", "clk_ddr_div", 0x08, 0,
+ CLK_IS_CRITICAL),
+ /* PIX PLL */
+ CLK_DIV(LS2K0300_CLK_PIX_DIV, "clk_pix_div", "pll_pix", 0x10, 24, 7),
+ CLK_DIV(LS2K0300_CLK_GMACBP_DIV, "clk_gmacbp_div", "pll_pix", 0x14, 0, 7),
+ CLK_GATE(LS2K0300_CLK_PIX_PLL_GATE, "clk_pix_pll_gate", "clk_pix_div", 0x10, 0),
+ CLK_GATE(LS2K0300_CLK_PIX_GATE, "clk_pix_gate", "clk_pix_scale", 0x24, 6),
+ CLK_GATE(LS2K0300_CLK_GMACBP_GATE, "clk_gmacbp_gate", "clk_gmacbp_div", 0x10, 1),
+ CLK_SCALE_MODE(LS2K0300_CLK_PIX_SCALE, "clk_pix_scale", "clk_pix_pll_gate", 0x20, 4, 3, 7),
+ /* clk_dev_gate */
+ CLK_DIV(LS2K0300_CLK_SDIO_SCALE, "clk_sdio_scale", "clk_dev_gate", 0x20, 24, 4),
+ CLK_GATE(LS2K0300_CLK_USB_GATE, "clk_usb_gate", "clk_usb_scale", 0x24, 2),
+ CLK_GATE(LS2K0300_CLK_SDIO_GATE, "clk_sdio_gate", "clk_sdio_scale", 0x24, 4),
+ CLK_GATE(LS2K0300_CLK_APB_GATE, "clk_apb_gate", "clk_apb_scale", 0x24, 3),
+ CLK_GATE_FLAGS(LS2K0300_CLK_BOOT_GATE, "clk_boot_gate", "clk_boot_scale", 0x24, 1,
+ CLK_IS_CRITICAL),
+ CLK_SCALE_MODE(LS2K0300_CLK_USB_SCALE, "clk_usb_scale", "clk_dev_gate", 0x20, 12, 3, 15),
+ CLK_SCALE_MODE(LS2K0300_CLK_APB_SCALE, "clk_apb_scale", "clk_dev_gate", 0x20, 16, 3, 19),
+ CLK_SCALE_MODE(LS2K0300_CLK_BOOT_SCALE, "clk_boot_scale", "clk_dev_gate", 0x20, 8, 3, 11),
+};
+
static const struct loongson2_clk_board_info ls2k0500_clks[] = {
CLK_PLL(LOONGSON2_NODE_PLL, "pll_node", 0, 16, 8, 8, 6),
CLK_PLL(LOONGSON2_DDR_PLL, "pll_ddr", 0x8, 16, 8, 8, 6),
@@ -230,20 +298,26 @@ static const struct clk_ops loongson2_pll_recalc_ops = {
static unsigned long loongson2_freqscale_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
- u64 val, mult;
+ u64 val, scale;
+ u32 mode = 0;
struct loongson2_clk_data *clk = to_loongson2_clk(hw);
val = readq(clk->reg);
- mult = loongson2_rate_part(val, clk->div_shift, clk->div_width) + 1;
+ scale = loongson2_rate_part(val, clk->div_shift, clk->div_width) + 1;
+
+ if (clk->bit_idx)
+ mode = val & BIT(clk->bit_idx - 1);
- return div_u64((u64)parent_rate * mult, 8);
+ return mode == 0 ? div_u64((u64)parent_rate * scale, 8) :
+ div_u64((u64)parent_rate, scale);
}
static const struct clk_ops loongson2_freqscale_recalc_ops = {
.recalc_rate = loongson2_freqscale_recalc_rate,
};
-static struct clk_hw *loongson2_clk_register(struct loongson2_clk_provider *clp,
+static struct clk_hw *loongson2_clk_register(const char *parent,
+ struct loongson2_clk_provider *clp,
const struct loongson2_clk_board_info *cld,
const struct clk_ops *ops)
{
@@ -260,17 +334,14 @@ static struct clk_hw *loongson2_clk_register(struct loongson2_clk_provider *clp,
init.ops = ops;
init.flags = 0;
init.num_parents = 1;
-
- if (!cld->parent_name)
- init.parent_data = pdata;
- else
- init.parent_names = &cld->parent_name;
+ init.parent_names = &parent;
clk->reg = clp->base + cld->reg_offset;
clk->div_shift = cld->div_shift;
clk->div_width = cld->div_width;
clk->mult_shift = cld->mult_shift;
clk->mult_width = cld->mult_width;
+ clk->bit_idx = cld->bit_idx;
clk->hw.init = &init;
hw = &clk->hw;
@@ -288,11 +359,17 @@ static int loongson2_clk_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct loongson2_clk_provider *clp;
const struct loongson2_clk_board_info *p, *data;
+ const char *refclk_name, *parent_name;
data = device_get_match_data(dev);
if (!data)
return -EINVAL;
+ refclk_name = of_clk_get_parent_name(dev->of_node, 0);
+ if (IS_ERR(refclk_name))
+ return dev_err_probe(dev, PTR_ERR(refclk_name),
+ "failed to get refclk name\n");
+
for (p = data; p->name; p++)
clks_num = max(clks_num, p->id + 1);
@@ -314,32 +391,36 @@ static int loongson2_clk_probe(struct platform_device *pdev)
for (i = 0; i < clks_num; i++) {
p = &data[i];
+ parent_name = p->parent_name ? p->parent_name : refclk_name;
+
switch (p->type) {
case CLK_TYPE_PLL:
- hw = loongson2_clk_register(clp, p,
+ hw = loongson2_clk_register(parent_name, clp, p,
&loongson2_pll_recalc_ops);
break;
case CLK_TYPE_SCALE:
- hw = loongson2_clk_register(clp, p,
+ hw = loongson2_clk_register(parent_name, clp, p,
&loongson2_freqscale_recalc_ops);
break;
case CLK_TYPE_DIVIDER:
hw = devm_clk_hw_register_divider(dev, p->name,
- p->parent_name, 0,
+ parent_name, 0,
clp->base + p->reg_offset,
p->div_shift, p->div_width,
- CLK_DIVIDER_ONE_BASED,
+ CLK_DIVIDER_ONE_BASED |
+ CLK_DIVIDER_ALLOW_ZERO,
&clp->clk_lock);
break;
case CLK_TYPE_GATE:
- hw = devm_clk_hw_register_gate(dev, p->name, p->parent_name, 0,
+ hw = devm_clk_hw_register_gate(dev, p->name, parent_name,
+ p->flags,
clp->base + p->reg_offset,
p->bit_idx, 0,
&clp->clk_lock);
break;
case CLK_TYPE_FIXED:
- hw = devm_clk_hw_register_fixed_rate_parent_data(dev, p->name, pdata,
- 0, p->fixed_rate);
+ hw = devm_clk_hw_register_fixed_rate(dev, p->name, parent_name,
+ 0, p->fixed_rate);
break;
default:
return dev_err_probe(dev, -EINVAL, "Invalid clk type\n");
@@ -357,6 +438,7 @@ static int loongson2_clk_probe(struct platform_device *pdev)
}
static const struct of_device_id loongson2_clk_match_table[] = {
+ { .compatible = "loongson,ls2k0300-clk", .data = &ls2k0300_clks },
{ .compatible = "loongson,ls2k0500-clk", .data = &ls2k0500_clks },
{ .compatible = "loongson,ls2k-clk", .data = &ls2k1000_clks },
{ .compatible = "loongson,ls2k2000-clk", .data = &ls2k2000_clks },
diff --git a/drivers/clk/clk-max9485.c b/drivers/clk/clk-max9485.c
index be9020b6c789..0515e3e41162 100644
--- a/drivers/clk/clk-max9485.c
+++ b/drivers/clk/clk-max9485.c
@@ -159,29 +159,32 @@ static unsigned long max9485_clkout_recalc_rate(struct clk_hw *hw,
return 0;
}
-static long max9485_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int max9485_clkout_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
const struct max9485_rate *curr, *prev = NULL;
for (curr = max9485_rates; curr->out != 0; curr++) {
/* Exact matches */
- if (curr->out == rate)
- return rate;
+ if (curr->out == req->rate)
+ return 0;
/*
* Find the first entry that has a frequency higher than the
* requested one.
*/
- if (curr->out > rate) {
+ if (curr->out > req->rate) {
unsigned int mid;
/*
* If this is the first entry, clamp the value to the
* lowest possible frequency.
*/
- if (!prev)
- return curr->out;
+ if (!prev) {
+ req->rate = curr->out;
+
+ return 0;
+ }
/*
* Otherwise, determine whether the previous entry or
@@ -189,14 +192,18 @@ static long max9485_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
*/
mid = prev->out + ((curr->out - prev->out) / 2);
- return (mid > rate) ? prev->out : curr->out;
+ req->rate = mid > req->rate ? prev->out : curr->out;
+
+ return 0;
}
prev = curr;
}
/* If the last entry was still too high, clamp the value */
- return prev->out;
+ req->rate = prev->out;
+
+ return 0;
}
struct max9485_clk {
@@ -221,7 +228,7 @@ static const struct max9485_clk max9485_clks[MAX9485_NUM_CLKS] = {
.parent_index = -1,
.ops = {
.set_rate = max9485_clkout_set_rate,
- .round_rate = max9485_clkout_round_rate,
+ .determine_rate = max9485_clkout_determine_rate,
.recalc_rate = max9485_clkout_recalc_rate,
},
},
diff --git a/drivers/clk/clk-milbeaut.c b/drivers/clk/clk-milbeaut.c
index 18c20aff45f7..b4f9b7143eaa 100644
--- a/drivers/clk/clk-milbeaut.c
+++ b/drivers/clk/clk-milbeaut.c
@@ -386,8 +386,8 @@ static unsigned long m10v_clk_divider_recalc_rate(struct clk_hw *hw,
divider->flags, divider->width);
}
-static long m10v_clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int m10v_clk_divider_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct m10v_clk_divider *divider = to_m10v_div(hw);
@@ -398,13 +398,19 @@ static long m10v_clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
val = readl(divider->reg) >> divider->shift;
val &= clk_div_mask(divider->width);
- return divider_ro_round_rate(hw, rate, prate, divider->table,
- divider->width, divider->flags,
- val);
+ req->rate = divider_ro_round_rate(hw, req->rate,
+ &req->best_parent_rate,
+ divider->table,
+ divider->width,
+ divider->flags, val);
+
+ return 0;
}
- return divider_round_rate(hw, rate, prate, divider->table,
- divider->width, divider->flags);
+ req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate,
+ divider->table, divider->width, divider->flags);
+
+ return 0;
}
static int m10v_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -450,7 +456,7 @@ static int m10v_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops m10v_clk_divider_ops = {
.recalc_rate = m10v_clk_divider_recalc_rate,
- .round_rate = m10v_clk_divider_round_rate,
+ .determine_rate = m10v_clk_divider_determine_rate,
.set_rate = m10v_clk_divider_set_rate,
};
diff --git a/drivers/clk/clk-multiplier.c b/drivers/clk/clk-multiplier.c
index e507aa958da9..6f2955d408b6 100644
--- a/drivers/clk/clk-multiplier.c
+++ b/drivers/clk/clk-multiplier.c
@@ -112,14 +112,16 @@ static unsigned long __bestmult(struct clk_hw *hw, unsigned long rate,
return bestmult;
}
-static long clk_multiplier_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_multiplier_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_multiplier *mult = to_clk_multiplier(hw);
- unsigned long factor = __bestmult(hw, rate, parent_rate,
+ unsigned long factor = __bestmult(hw, req->rate, &req->best_parent_rate,
mult->width, mult->flags);
- return *parent_rate * factor;
+ req->rate = req->best_parent_rate * factor;
+
+ return 0;
}
static int clk_multiplier_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -150,7 +152,7 @@ static int clk_multiplier_set_rate(struct clk_hw *hw, unsigned long rate,
const struct clk_ops clk_multiplier_ops = {
.recalc_rate = clk_multiplier_recalc_rate,
- .round_rate = clk_multiplier_round_rate,
+ .determine_rate = clk_multiplier_determine_rate,
.set_rate = clk_multiplier_set_rate,
};
EXPORT_SYMBOL_GPL(clk_multiplier_ops);
diff --git a/drivers/clk/clk-pwm.c b/drivers/clk/clk-pwm.c
index bd4f21c22004..4709f0338e37 100644
--- a/drivers/clk/clk-pwm.c
+++ b/drivers/clk/clk-pwm.c
@@ -14,6 +14,7 @@
struct clk_pwm {
struct clk_hw hw;
struct pwm_device *pwm;
+ struct pwm_state state;
u32 fixed_rate;
};
@@ -22,11 +23,28 @@ static inline struct clk_pwm *to_clk_pwm(struct clk_hw *hw)
return container_of(hw, struct clk_pwm, hw);
}
+static int clk_pwm_enable(struct clk_hw *hw)
+{
+ struct clk_pwm *clk_pwm = to_clk_pwm(hw);
+
+ return pwm_apply_atomic(clk_pwm->pwm, &clk_pwm->state);
+}
+
+static void clk_pwm_disable(struct clk_hw *hw)
+{
+ struct clk_pwm *clk_pwm = to_clk_pwm(hw);
+ struct pwm_state state = clk_pwm->state;
+
+ state.enabled = false;
+
+ pwm_apply_atomic(clk_pwm->pwm, &state);
+}
+
static int clk_pwm_prepare(struct clk_hw *hw)
{
struct clk_pwm *clk_pwm = to_clk_pwm(hw);
- return pwm_enable(clk_pwm->pwm);
+ return pwm_apply_might_sleep(clk_pwm->pwm, &clk_pwm->state);
}
static void clk_pwm_unprepare(struct clk_hw *hw)
@@ -48,8 +66,11 @@ static int clk_pwm_get_duty_cycle(struct clk_hw *hw, struct clk_duty *duty)
{
struct clk_pwm *clk_pwm = to_clk_pwm(hw);
struct pwm_state state;
+ int ret;
- pwm_get_state(clk_pwm->pwm, &state);
+ ret = pwm_get_state_hw(clk_pwm->pwm, &state);
+ if (ret)
+ return ret;
duty->num = state.duty_cycle;
duty->den = state.period;
@@ -57,6 +78,13 @@ static int clk_pwm_get_duty_cycle(struct clk_hw *hw, struct clk_duty *duty)
return 0;
}
+static const struct clk_ops clk_pwm_ops_atomic = {
+ .enable = clk_pwm_enable,
+ .disable = clk_pwm_disable,
+ .recalc_rate = clk_pwm_recalc_rate,
+ .get_duty_cycle = clk_pwm_get_duty_cycle,
+};
+
static const struct clk_ops clk_pwm_ops = {
.prepare = clk_pwm_prepare,
.unprepare = clk_pwm_unprepare,
@@ -103,20 +131,19 @@ static int clk_pwm_probe(struct platform_device *pdev)
return -EINVAL;
}
- /*
- * FIXME: pwm_apply_args() should be removed when switching to the
- * atomic PWM API.
- */
- pwm_apply_args(pwm);
- ret = pwm_config(pwm, (pargs.period + 1) >> 1, pargs.period);
- if (ret < 0)
- return ret;
+ pwm_init_state(pwm, &clk_pwm->state);
+ pwm_set_relative_duty_cycle(&clk_pwm->state, 1, 2);
+ clk_pwm->state.enabled = true;
clk_name = node->name;
of_property_read_string(node, "clock-output-names", &clk_name);
init.name = clk_name;
- init.ops = &clk_pwm_ops;
+ if (pwm_might_sleep(pwm))
+ init.ops = &clk_pwm_ops;
+ else
+ init.ops = &clk_pwm_ops_atomic;
+
init.flags = 0;
init.num_parents = 0;
diff --git a/drivers/clk/clk-rp1.c b/drivers/clk/clk-rp1.c
new file mode 100644
index 000000000000..fd144755b879
--- /dev/null
+++ b/drivers/clk/clk-rp1.c
@@ -0,0 +1,2462 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023 Raspberry Pi Ltd.
+ *
+ * Clock driver for RP1 PCIe multifunction chip.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include <linux/math64.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/units.h>
+
+#include <dt-bindings/clock/raspberrypi,rp1-clocks.h>
+
+#define PLL_SYS_OFFSET 0x08000
+#define PLL_SYS_CS (PLL_SYS_OFFSET + 0x00)
+#define PLL_SYS_PWR (PLL_SYS_OFFSET + 0x04)
+#define PLL_SYS_FBDIV_INT (PLL_SYS_OFFSET + 0x08)
+#define PLL_SYS_FBDIV_FRAC (PLL_SYS_OFFSET + 0x0c)
+#define PLL_SYS_PRIM (PLL_SYS_OFFSET + 0x10)
+#define PLL_SYS_SEC (PLL_SYS_OFFSET + 0x14)
+
+#define PLL_AUDIO_OFFSET 0x0c000
+#define PLL_AUDIO_CS (PLL_AUDIO_OFFSET + 0x00)
+#define PLL_AUDIO_PWR (PLL_AUDIO_OFFSET + 0x04)
+#define PLL_AUDIO_FBDIV_INT (PLL_AUDIO_OFFSET + 0x08)
+#define PLL_AUDIO_FBDIV_FRAC (PLL_AUDIO_OFFSET + 0x0c)
+#define PLL_AUDIO_PRIM (PLL_AUDIO_OFFSET + 0x10)
+#define PLL_AUDIO_SEC (PLL_AUDIO_OFFSET + 0x14)
+#define PLL_AUDIO_TERN (PLL_AUDIO_OFFSET + 0x18)
+
+#define PLL_VIDEO_OFFSET 0x10000
+#define PLL_VIDEO_CS (PLL_VIDEO_OFFSET + 0x00)
+#define PLL_VIDEO_PWR (PLL_VIDEO_OFFSET + 0x04)
+#define PLL_VIDEO_FBDIV_INT (PLL_VIDEO_OFFSET + 0x08)
+#define PLL_VIDEO_FBDIV_FRAC (PLL_VIDEO_OFFSET + 0x0c)
+#define PLL_VIDEO_PRIM (PLL_VIDEO_OFFSET + 0x10)
+#define PLL_VIDEO_SEC (PLL_VIDEO_OFFSET + 0x14)
+
+#define GPCLK_OE_CTRL 0x00000
+
+#define CLK_SYS_OFFSET 0x00014
+#define CLK_SYS_CTRL (CLK_SYS_OFFSET + 0x00)
+#define CLK_SYS_DIV_INT (CLK_SYS_OFFSET + 0x04)
+#define CLK_SYS_SEL (CLK_SYS_OFFSET + 0x0c)
+
+#define CLK_SLOW_OFFSET 0x00024
+#define CLK_SLOW_SYS_CTRL (CLK_SLOW_OFFSET + 0x00)
+#define CLK_SLOW_SYS_DIV_INT (CLK_SLOW_OFFSET + 0x04)
+#define CLK_SLOW_SYS_SEL (CLK_SLOW_OFFSET + 0x0c)
+
+#define CLK_DMA_OFFSET 0x00044
+#define CLK_DMA_CTRL (CLK_DMA_OFFSET + 0x00)
+#define CLK_DMA_DIV_INT (CLK_DMA_OFFSET + 0x04)
+#define CLK_DMA_SEL (CLK_DMA_OFFSET + 0x0c)
+
+#define CLK_UART_OFFSET 0x00054
+#define CLK_UART_CTRL (CLK_UART_OFFSET + 0x00)
+#define CLK_UART_DIV_INT (CLK_UART_OFFSET + 0x04)
+#define CLK_UART_SEL (CLK_UART_OFFSET + 0x0c)
+
+#define CLK_ETH_OFFSET 0x00064
+#define CLK_ETH_CTRL (CLK_ETH_OFFSET + 0x00)
+#define CLK_ETH_DIV_INT (CLK_ETH_OFFSET + 0x04)
+#define CLK_ETH_SEL (CLK_ETH_OFFSET + 0x0c)
+
+#define CLK_PWM0_OFFSET 0x00074
+#define CLK_PWM0_CTRL (CLK_PWM0_OFFSET + 0x00)
+#define CLK_PWM0_DIV_INT (CLK_PWM0_OFFSET + 0x04)
+#define CLK_PWM0_DIV_FRAC (CLK_PWM0_OFFSET + 0x08)
+#define CLK_PWM0_SEL (CLK_PWM0_OFFSET + 0x0c)
+
+#define CLK_PWM1_OFFSET 0x00084
+#define CLK_PWM1_CTRL (CLK_PWM1_OFFSET + 0x00)
+#define CLK_PWM1_DIV_INT (CLK_PWM1_OFFSET + 0x04)
+#define CLK_PWM1_DIV_FRAC (CLK_PWM1_OFFSET + 0x08)
+#define CLK_PWM1_SEL (CLK_PWM1_OFFSET + 0x0c)
+
+#define CLK_AUDIO_IN_OFFSET 0x00094
+#define CLK_AUDIO_IN_CTRL (CLK_AUDIO_IN_OFFSET + 0x00)
+#define CLK_AUDIO_IN_DIV_INT (CLK_AUDIO_IN_OFFSET + 0x04)
+#define CLK_AUDIO_IN_SEL (CLK_AUDIO_IN_OFFSET + 0x0c)
+
+#define CLK_AUDIO_OUT_OFFSET 0x000a4
+#define CLK_AUDIO_OUT_CTRL (CLK_AUDIO_OUT_OFFSET + 0x00)
+#define CLK_AUDIO_OUT_DIV_INT (CLK_AUDIO_OUT_OFFSET + 0x04)
+#define CLK_AUDIO_OUT_SEL (CLK_AUDIO_OUT_OFFSET + 0x0c)
+
+#define CLK_I2S_OFFSET 0x000b4
+#define CLK_I2S_CTRL (CLK_I2S_OFFSET + 0x00)
+#define CLK_I2S_DIV_INT (CLK_I2S_OFFSET + 0x04)
+#define CLK_I2S_SEL (CLK_I2S_OFFSET + 0x0c)
+
+#define CLK_MIPI0_CFG_OFFSET 0x000c4
+#define CLK_MIPI0_CFG_CTRL (CLK_MIPI0_CFG_OFFSET + 0x00)
+#define CLK_MIPI0_CFG_DIV_INT (CLK_MIPI0_CFG_OFFSET + 0x04)
+#define CLK_MIPI0_CFG_SEL (CLK_MIPI0_CFG_OFFSET + 0x0c)
+
+#define CLK_MIPI1_CFG_OFFSET 0x000d4
+#define CLK_MIPI1_CFG_CTRL (CLK_MIPI1_CFG_OFFSET + 0x00)
+#define CLK_MIPI1_CFG_DIV_INT (CLK_MIPI1_CFG_OFFSET + 0x04)
+#define CLK_MIPI1_CFG_SEL (CLK_MIPI1_CFG_OFFSET + 0x0c)
+
+#define CLK_PCIE_AUX_OFFSET 0x000e4
+#define CLK_PCIE_AUX_CTRL (CLK_PCIE_AUX_OFFSET + 0x00)
+#define CLK_PCIE_AUX_DIV_INT (CLK_PCIE_AUX_OFFSET + 0x04)
+#define CLK_PCIE_AUX_SEL (CLK_PCIE_AUX_OFFSET + 0x0c)
+
+#define CLK_USBH0_MICROFRAME_OFFSET 0x000f4
+#define CLK_USBH0_MICROFRAME_CTRL (CLK_USBH0_MICROFRAME_OFFSET + 0x00)
+#define CLK_USBH0_MICROFRAME_DIV_INT (CLK_USBH0_MICROFRAME_OFFSET + 0x04)
+#define CLK_USBH0_MICROFRAME_SEL (CLK_USBH0_MICROFRAME_OFFSET + 0x0c)
+
+#define CLK_USBH1_MICROFRAME_OFFSET 0x00104
+#define CLK_USBH1_MICROFRAME_CTRL (CLK_USBH1_MICROFRAME_OFFSET + 0x00)
+#define CLK_USBH1_MICROFRAME_DIV_INT (CLK_USBH1_MICROFRAME_OFFSET + 0x04)
+#define CLK_USBH1_MICROFRAME_SEL (CLK_USBH1_MICROFRAME_OFFSET + 0x0c)
+
+#define CLK_USBH0_SUSPEND_OFFSET 0x00114
+#define CLK_USBH0_SUSPEND_CTRL (CLK_USBH0_SUSPEND_OFFSET + 0x00)
+#define CLK_USBH0_SUSPEND_DIV_INT (CLK_USBH0_SUSPEND_OFFSET + 0x04)
+#define CLK_USBH0_SUSPEND_SEL (CLK_USBH0_SUSPEND_OFFSET + 0x0c)
+
+#define CLK_USBH1_SUSPEND_OFFSET 0x00124
+#define CLK_USBH1_SUSPEND_CTRL (CLK_USBH1_SUSPEND_OFFSET + 0x00)
+#define CLK_USBH1_SUSPEND_DIV_INT (CLK_USBH1_SUSPEND_OFFSET + 0x04)
+#define CLK_USBH1_SUSPEND_SEL (CLK_USBH1_SUSPEND_OFFSET + 0x0c)
+
+#define CLK_ETH_TSU_OFFSET 0x00134
+#define CLK_ETH_TSU_CTRL (CLK_ETH_TSU_OFFSET + 0x00)
+#define CLK_ETH_TSU_DIV_INT (CLK_ETH_TSU_OFFSET + 0x04)
+#define CLK_ETH_TSU_SEL (CLK_ETH_TSU_OFFSET + 0x0c)
+
+#define CLK_ADC_OFFSET 0x00144
+#define CLK_ADC_CTRL (CLK_ADC_OFFSET + 0x00)
+#define CLK_ADC_DIV_INT (CLK_ADC_OFFSET + 0x04)
+#define CLK_ADC_SEL (CLK_ADC_OFFSET + 0x0c)
+
+#define CLK_SDIO_TIMER_OFFSET 0x00154
+#define CLK_SDIO_TIMER_CTRL (CLK_SDIO_TIMER_OFFSET + 0x00)
+#define CLK_SDIO_TIMER_DIV_INT (CLK_SDIO_TIMER_OFFSET + 0x04)
+#define CLK_SDIO_TIMER_SEL (CLK_SDIO_TIMER_OFFSET + 0x0c)
+
+#define CLK_SDIO_ALT_SRC_OFFSET 0x00164
+#define CLK_SDIO_ALT_SRC_CTRL (CLK_SDIO_ALT_SRC_OFFSET + 0x00)
+#define CLK_SDIO_ALT_SRC_DIV_INT (CLK_SDIO_ALT_SRC_OFFSET + 0x04)
+#define CLK_SDIO_ALT_SRC_SEL (CLK_SDIO_ALT_SRC_OFFSET + 0x0c)
+
+#define CLK_GP0_OFFSET 0x00174
+#define CLK_GP0_CTRL (CLK_GP0_OFFSET + 0x00)
+#define CLK_GP0_DIV_INT (CLK_GP0_OFFSET + 0x04)
+#define CLK_GP0_DIV_FRAC (CLK_GP0_OFFSET + 0x08)
+#define CLK_GP0_SEL (CLK_GP0_OFFSET + 0x0c)
+
+#define CLK_GP1_OFFSET 0x00184
+#define CLK_GP1_CTRL (CLK_GP1_OFFSET + 0x00)
+#define CLK_GP1_DIV_INT (CLK_GP1_OFFSET + 0x04)
+#define CLK_GP1_DIV_FRAC (CLK_GP1_OFFSET + 0x08)
+#define CLK_GP1_SEL (CLK_GP1_OFFSET + 0x0c)
+
+#define CLK_GP2_OFFSET 0x00194
+#define CLK_GP2_CTRL (CLK_GP2_OFFSET + 0x00)
+#define CLK_GP2_DIV_INT (CLK_GP2_OFFSET + 0x04)
+#define CLK_GP2_DIV_FRAC (CLK_GP2_OFFSET + 0x08)
+#define CLK_GP2_SEL (CLK_GP2_OFFSET + 0x0c)
+
+#define CLK_GP3_OFFSET 0x001a4
+#define CLK_GP3_CTRL (CLK_GP3_OFFSET + 0x00)
+#define CLK_GP3_DIV_INT (CLK_GP3_OFFSET + 0x04)
+#define CLK_GP3_DIV_FRAC (CLK_GP3_OFFSET + 0x08)
+#define CLK_GP3_SEL (CLK_GP3_OFFSET + 0x0c)
+
+#define CLK_GP4_OFFSET 0x001b4
+#define CLK_GP4_CTRL (CLK_GP4_OFFSET + 0x00)
+#define CLK_GP4_DIV_INT (CLK_GP4_OFFSET + 0x04)
+#define CLK_GP4_DIV_FRAC (CLK_GP4_OFFSET + 0x08)
+#define CLK_GP4_SEL (CLK_GP4_OFFSET + 0x0c)
+
+#define CLK_GP5_OFFSET 0x001c4
+#define CLK_GP5_CTRL (CLK_GP5_OFFSET + 0x00)
+#define CLK_GP5_DIV_INT (CLK_GP5_OFFSET + 0x04)
+#define CLK_GP5_DIV_FRAC (CLK_GP5_OFFSET + 0x08)
+#define CLK_GP5_SEL (CLK_GP5_OFFSET + 0x0c)
+
+#define CLK_SYS_RESUS_CTRL 0x0020c
+
+#define CLK_SLOW_SYS_RESUS_CTRL 0x00214
+
+#define FC0_OFFSET 0x0021c
+#define FC0_REF_KHZ (FC0_OFFSET + 0x00)
+#define FC0_MIN_KHZ (FC0_OFFSET + 0x04)
+#define FC0_MAX_KHZ (FC0_OFFSET + 0x08)
+#define FC0_DELAY (FC0_OFFSET + 0x0c)
+#define FC0_INTERVAL (FC0_OFFSET + 0x10)
+#define FC0_SRC (FC0_OFFSET + 0x14)
+#define FC0_STATUS (FC0_OFFSET + 0x18)
+#define FC0_RESULT (FC0_OFFSET + 0x1c)
+#define FC_SIZE 0x20
+#define FC_COUNT 8
+#define FC_NUM(idx, off) ((idx) * 32 + (off))
+
+#define AUX_SEL 1
+
+#define VIDEO_CLOCKS_OFFSET 0x4000
+#define VIDEO_CLK_VEC_CTRL (VIDEO_CLOCKS_OFFSET + 0x0000)
+#define VIDEO_CLK_VEC_DIV_INT (VIDEO_CLOCKS_OFFSET + 0x0004)
+#define VIDEO_CLK_VEC_SEL (VIDEO_CLOCKS_OFFSET + 0x000c)
+#define VIDEO_CLK_DPI_CTRL (VIDEO_CLOCKS_OFFSET + 0x0010)
+#define VIDEO_CLK_DPI_DIV_INT (VIDEO_CLOCKS_OFFSET + 0x0014)
+#define VIDEO_CLK_DPI_SEL (VIDEO_CLOCKS_OFFSET + 0x001c)
+#define VIDEO_CLK_MIPI0_DPI_CTRL (VIDEO_CLOCKS_OFFSET + 0x0020)
+#define VIDEO_CLK_MIPI0_DPI_DIV_INT (VIDEO_CLOCKS_OFFSET + 0x0024)
+#define VIDEO_CLK_MIPI0_DPI_DIV_FRAC (VIDEO_CLOCKS_OFFSET + 0x0028)
+#define VIDEO_CLK_MIPI0_DPI_SEL (VIDEO_CLOCKS_OFFSET + 0x002c)
+#define VIDEO_CLK_MIPI1_DPI_CTRL (VIDEO_CLOCKS_OFFSET + 0x0030)
+#define VIDEO_CLK_MIPI1_DPI_DIV_INT (VIDEO_CLOCKS_OFFSET + 0x0034)
+#define VIDEO_CLK_MIPI1_DPI_DIV_FRAC (VIDEO_CLOCKS_OFFSET + 0x0038)
+#define VIDEO_CLK_MIPI1_DPI_SEL (VIDEO_CLOCKS_OFFSET + 0x003c)
+
+#define DIV_INT_8BIT_MAX GENMASK(7, 0) /* max divide for most clocks */
+#define DIV_INT_16BIT_MAX GENMASK(15, 0) /* max divide for GPx, PWM */
+#define DIV_INT_24BIT_MAX GENMASK(23, 0) /* max divide for CLK_SYS */
+
+#define FC0_STATUS_DONE BIT(4)
+#define FC0_STATUS_RUNNING BIT(8)
+#define FC0_RESULT_FRAC_SHIFT 5
+
+#define PLL_PRIM_DIV1_MASK GENMASK(18, 16)
+#define PLL_PRIM_DIV2_MASK GENMASK(14, 12)
+
+#define PLL_SEC_DIV_MASK GENMASK(12, 8)
+
+#define PLL_CS_LOCK BIT(31)
+#define PLL_CS_REFDIV_MASK BIT(1)
+
+#define PLL_PWR_PD BIT(0)
+#define PLL_PWR_DACPD BIT(1)
+#define PLL_PWR_DSMPD BIT(2)
+#define PLL_PWR_POSTDIVPD BIT(3)
+#define PLL_PWR_4PHASEPD BIT(4)
+#define PLL_PWR_VCOPD BIT(5)
+#define PLL_PWR_MASK GENMASK(5, 0)
+
+#define PLL_SEC_RST BIT(16)
+#define PLL_SEC_IMPL BIT(31)
+
+/* PLL phase output for both PRI and SEC */
+#define PLL_PH_EN BIT(4)
+#define PLL_PH_PHASE_SHIFT 0
+
+#define RP1_PLL_PHASE_0 0
+#define RP1_PLL_PHASE_90 1
+#define RP1_PLL_PHASE_180 2
+#define RP1_PLL_PHASE_270 3
+
+/* Clock fields for all clocks */
+#define CLK_CTRL_ENABLE BIT(11)
+#define CLK_CTRL_AUXSRC_MASK GENMASK(9, 5)
+#define CLK_CTRL_SRC_SHIFT 0
+#define CLK_DIV_FRAC_BITS 16
+
+#define LOCK_TIMEOUT_US 100000
+#define LOCK_POLL_DELAY_US 5
+
+#define MAX_CLK_PARENTS 16
+
+#define PLL_DIV_INVALID 19
+/*
+ * Secondary PLL channel output divider table.
+ * Divider values range from 8 to 19, where
+ * 19 means invalid.
+ */
+static const struct clk_div_table pll_sec_div_table[] = {
+ { 0x00, PLL_DIV_INVALID },
+ { 0x01, PLL_DIV_INVALID },
+ { 0x02, PLL_DIV_INVALID },
+ { 0x03, PLL_DIV_INVALID },
+ { 0x04, PLL_DIV_INVALID },
+ { 0x05, PLL_DIV_INVALID },
+ { 0x06, PLL_DIV_INVALID },
+ { 0x07, PLL_DIV_INVALID },
+ { 0x08, 8 },
+ { 0x09, 9 },
+ { 0x0a, 10 },
+ { 0x0b, 11 },
+ { 0x0c, 12 },
+ { 0x0d, 13 },
+ { 0x0e, 14 },
+ { 0x0f, 15 },
+ { 0x10, 16 },
+ { 0x11, 17 },
+ { 0x12, 18 },
+ { 0x13, PLL_DIV_INVALID },
+ { 0x14, PLL_DIV_INVALID },
+ { 0x15, PLL_DIV_INVALID },
+ { 0x16, PLL_DIV_INVALID },
+ { 0x17, PLL_DIV_INVALID },
+ { 0x18, PLL_DIV_INVALID },
+ { 0x19, PLL_DIV_INVALID },
+ { 0x1a, PLL_DIV_INVALID },
+ { 0x1b, PLL_DIV_INVALID },
+ { 0x1c, PLL_DIV_INVALID },
+ { 0x1d, PLL_DIV_INVALID },
+ { 0x1e, PLL_DIV_INVALID },
+ { 0x1f, PLL_DIV_INVALID },
+ { 0 }
+};
+
+struct rp1_clockman {
+ struct device *dev;
+ void __iomem *regs;
+ struct regmap *regmap;
+ spinlock_t regs_lock; /* spinlock for all clocks */
+
+ /* Must be last */
+ struct clk_hw_onecell_data onecell;
+};
+
+struct rp1_pll_core_data {
+ u32 cs_reg;
+ u32 pwr_reg;
+ u32 fbdiv_int_reg;
+ u32 fbdiv_frac_reg;
+ u32 fc0_src;
+};
+
+struct rp1_pll_data {
+ u32 ctrl_reg;
+ u32 fc0_src;
+};
+
+struct rp1_pll_ph_data {
+ unsigned int phase;
+ unsigned int fixed_divider;
+ u32 ph_reg;
+ u32 fc0_src;
+};
+
+struct rp1_pll_divider_data {
+ u32 sec_reg;
+ u32 fc0_src;
+};
+
+struct rp1_clock_data {
+ int num_std_parents;
+ int num_aux_parents;
+ u32 oe_mask;
+ u32 clk_src_mask;
+ u32 ctrl_reg;
+ u32 div_int_reg;
+ u32 div_frac_reg;
+ u32 sel_reg;
+ u32 div_int_max;
+ unsigned long max_freq;
+ u32 fc0_src;
+};
+
+struct rp1_clk_desc {
+ struct clk_hw *(*clk_register)(struct rp1_clockman *clockman,
+ struct rp1_clk_desc *desc);
+ const void *data;
+ struct clk_hw hw;
+ struct rp1_clockman *clockman;
+ unsigned long cached_rate;
+ struct clk_divider div;
+};
+
+static struct rp1_clk_desc *clk_audio_core;
+static struct rp1_clk_desc *clk_audio;
+static struct rp1_clk_desc *clk_i2s;
+static struct clk_hw *clk_xosc;
+
+static inline
+void clockman_write(struct rp1_clockman *clockman, u32 reg, u32 val)
+{
+ regmap_write(clockman->regmap, reg, val);
+}
+
+static inline u32 clockman_read(struct rp1_clockman *clockman, u32 reg)
+{
+ u32 val;
+
+ regmap_read(clockman->regmap, reg, &val);
+
+ return val;
+}
+
+static int rp1_pll_core_is_on(struct clk_hw *hw)
+{
+ struct rp1_clk_desc *pll_core = container_of(hw, struct rp1_clk_desc, hw);
+ struct rp1_clockman *clockman = pll_core->clockman;
+ const struct rp1_pll_core_data *data = pll_core->data;
+ u32 pwr = clockman_read(clockman, data->pwr_reg);
+
+ return (pwr & PLL_PWR_PD) || (pwr & PLL_PWR_POSTDIVPD);
+}
+
+static int rp1_pll_core_on(struct clk_hw *hw)
+{
+ struct rp1_clk_desc *pll_core = container_of(hw, struct rp1_clk_desc, hw);
+ struct rp1_clockman *clockman = pll_core->clockman;
+ const struct rp1_pll_core_data *data = pll_core->data;
+ u32 fbdiv_frac, val;
+ int ret;
+
+ spin_lock(&clockman->regs_lock);
+
+ if (!(clockman_read(clockman, data->cs_reg) & PLL_CS_LOCK)) {
+ /* Reset to a known state. */
+ clockman_write(clockman, data->pwr_reg, PLL_PWR_MASK);
+ clockman_write(clockman, data->fbdiv_int_reg, 20);
+ clockman_write(clockman, data->fbdiv_frac_reg, 0);
+ clockman_write(clockman, data->cs_reg, PLL_CS_REFDIV_MASK);
+ }
+
+ /* Come out of reset. */
+ fbdiv_frac = clockman_read(clockman, data->fbdiv_frac_reg);
+ clockman_write(clockman, data->pwr_reg, fbdiv_frac ? 0 : PLL_PWR_DSMPD);
+ spin_unlock(&clockman->regs_lock);
+
+ /* Wait for the PLL to lock. */
+ ret = regmap_read_poll_timeout(clockman->regmap, data->cs_reg, val,
+ val & PLL_CS_LOCK,
+ LOCK_POLL_DELAY_US, LOCK_TIMEOUT_US);
+ if (ret)
+ dev_err(clockman->dev, "%s: can't lock PLL\n",
+ clk_hw_get_name(hw));
+
+ return ret;
+}
+
+static void rp1_pll_core_off(struct clk_hw *hw)
+{
+ struct rp1_clk_desc *pll_core = container_of(hw, struct rp1_clk_desc, hw);
+ struct rp1_clockman *clockman = pll_core->clockman;
+ const struct rp1_pll_core_data *data = pll_core->data;
+
+ spin_lock(&clockman->regs_lock);
+ clockman_write(clockman, data->pwr_reg, 0);
+ spin_unlock(&clockman->regs_lock);
+}
+
+static inline unsigned long get_pll_core_divider(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long parent_rate,
+ u32 *div_int, u32 *div_frac)
+{
+ u32 fbdiv_int, fbdiv_frac;
+ unsigned long calc_rate;
+ u64 shifted_fbdiv_int;
+ u64 div_fp64; /* 32.32 fixed point fraction. */
+
+ /* Factor of reference clock to VCO frequency. */
+ div_fp64 = (u64)(rate) << 32;
+ div_fp64 = DIV_ROUND_CLOSEST_ULL(div_fp64, parent_rate);
+
+ /* Round the fractional component at 24 bits. */
+ div_fp64 += 1 << (32 - 24 - 1);
+
+ fbdiv_int = div_fp64 >> 32;
+ fbdiv_frac = (div_fp64 >> (32 - 24)) & 0xffffff;
+
+ shifted_fbdiv_int = (u64)fbdiv_int << 24;
+ calc_rate = (u64)parent_rate * (shifted_fbdiv_int + fbdiv_frac);
+ calc_rate += BIT(23);
+ calc_rate >>= 24;
+
+ *div_int = fbdiv_int;
+ *div_frac = fbdiv_frac;
+
+ return calc_rate;
+}
+
+static int rp1_pll_core_set_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate)
+{
+ struct rp1_clk_desc *pll_core = container_of(hw, struct rp1_clk_desc, hw);
+ struct rp1_clockman *clockman = pll_core->clockman;
+ const struct rp1_pll_core_data *data = pll_core->data;
+ u32 fbdiv_int, fbdiv_frac;
+
+ /* Disable dividers to start with. */
+ spin_lock(&clockman->regs_lock);
+ clockman_write(clockman, data->fbdiv_int_reg, 0);
+ clockman_write(clockman, data->fbdiv_frac_reg, 0);
+ spin_unlock(&clockman->regs_lock);
+
+ get_pll_core_divider(hw, rate, parent_rate,
+ &fbdiv_int, &fbdiv_frac);
+
+ spin_lock(&clockman->regs_lock);
+ clockman_write(clockman, data->pwr_reg, fbdiv_frac ? 0 : PLL_PWR_DSMPD);
+ clockman_write(clockman, data->fbdiv_int_reg, fbdiv_int);
+ clockman_write(clockman, data->fbdiv_frac_reg, fbdiv_frac);
+ spin_unlock(&clockman->regs_lock);
+
+ /* Check that reference frequency is no greater than VCO / 16. */
+ if (WARN_ON_ONCE(parent_rate > (rate / 16)))
+ return -ERANGE;
+
+ spin_lock(&clockman->regs_lock);
+ /* Don't need to divide ref unless parent_rate > (output freq / 16) */
+ clockman_write(clockman, data->cs_reg,
+ clockman_read(clockman, data->cs_reg) |
+ PLL_CS_REFDIV_MASK);
+ spin_unlock(&clockman->regs_lock);
+
+ return 0;
+}
+
+static unsigned long rp1_pll_core_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct rp1_clk_desc *pll_core = container_of(hw, struct rp1_clk_desc, hw);
+ struct rp1_clockman *clockman = pll_core->clockman;
+ const struct rp1_pll_core_data *data = pll_core->data;
+ u32 fbdiv_int, fbdiv_frac;
+ unsigned long calc_rate;
+ u64 shifted_fbdiv_int;
+
+ fbdiv_int = clockman_read(clockman, data->fbdiv_int_reg);
+ fbdiv_frac = clockman_read(clockman, data->fbdiv_frac_reg);
+
+ shifted_fbdiv_int = (u64)fbdiv_int << 24;
+ calc_rate = (u64)parent_rate * (shifted_fbdiv_int + fbdiv_frac);
+ calc_rate += BIT(23);
+ calc_rate >>= 24;
+
+ return calc_rate;
+}
+
+static int rp1_pll_core_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ u32 fbdiv_int, fbdiv_frac;
+
+ req->rate = get_pll_core_divider(hw, req->rate, req->best_parent_rate,
+ &fbdiv_int,
+ &fbdiv_frac);
+
+ return 0;
+}
+
+static void get_pll_prim_dividers(unsigned long rate, unsigned long parent_rate,
+ u32 *divider1, u32 *divider2)
+{
+ unsigned int div1, div2;
+ unsigned int best_div1 = 7, best_div2 = 7;
+ unsigned long best_rate_diff =
+ abs_diff(DIV_ROUND_CLOSEST(parent_rate, best_div1 * best_div2), rate);
+ unsigned long rate_diff, calc_rate;
+
+ for (div1 = 1; div1 <= 7; div1++) {
+ for (div2 = 1; div2 <= div1; div2++) {
+ calc_rate = DIV_ROUND_CLOSEST(parent_rate, div1 * div2);
+ rate_diff = abs_diff(calc_rate, rate);
+
+ if (calc_rate == rate) {
+ best_div1 = div1;
+ best_div2 = div2;
+ goto done;
+ } else if (rate_diff < best_rate_diff) {
+ best_div1 = div1;
+ best_div2 = div2;
+ best_rate_diff = rate_diff;
+ }
+ }
+ }
+
+done:
+ *divider1 = best_div1;
+ *divider2 = best_div2;
+}
+
+static int rp1_pll_set_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate)
+{
+ struct rp1_clk_desc *pll = container_of(hw, struct rp1_clk_desc, hw);
+ struct rp1_clockman *clockman = pll->clockman;
+ const struct rp1_pll_data *data = pll->data;
+
+ u32 prim, prim_div1, prim_div2;
+
+ get_pll_prim_dividers(rate, parent_rate, &prim_div1, &prim_div2);
+
+ spin_lock(&clockman->regs_lock);
+ prim = clockman_read(clockman, data->ctrl_reg);
+ prim &= ~PLL_PRIM_DIV1_MASK;
+ prim |= FIELD_PREP(PLL_PRIM_DIV1_MASK, prim_div1);
+ prim &= ~PLL_PRIM_DIV2_MASK;
+ prim |= FIELD_PREP(PLL_PRIM_DIV2_MASK, prim_div2);
+ clockman_write(clockman, data->ctrl_reg, prim);
+ spin_unlock(&clockman->regs_lock);
+
+ return 0;
+}
+
+static unsigned long rp1_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct rp1_clk_desc *pll = container_of(hw, struct rp1_clk_desc, hw);
+ struct rp1_clockman *clockman = pll->clockman;
+ const struct rp1_pll_data *data = pll->data;
+ u32 prim, prim_div1, prim_div2;
+
+ prim = clockman_read(clockman, data->ctrl_reg);
+ prim_div1 = FIELD_GET(PLL_PRIM_DIV1_MASK, prim);
+ prim_div2 = FIELD_GET(PLL_PRIM_DIV2_MASK, prim);
+
+ if (!prim_div1 || !prim_div2) {
+ dev_err(clockman->dev, "%s: (%s) zero divider value\n",
+ __func__, clk_hw_get_name(hw));
+ return 0;
+ }
+
+ return DIV_ROUND_CLOSEST(parent_rate, prim_div1 * prim_div2);
+}
+
+static int rp1_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_hw *clk_audio_hw = &clk_audio->hw;
+ u32 div1, div2;
+
+ if (hw == clk_audio_hw && clk_audio->cached_rate == req->rate)
+ req->best_parent_rate = clk_audio_core->cached_rate;
+
+ get_pll_prim_dividers(req->rate, req->best_parent_rate, &div1, &div2);
+
+ req->rate = DIV_ROUND_CLOSEST(req->best_parent_rate, div1 * div2);
+
+ return 0;
+}
+
+static int rp1_pll_ph_is_on(struct clk_hw *hw)
+{
+ struct rp1_clk_desc *pll_ph = container_of(hw, struct rp1_clk_desc, hw);
+ struct rp1_clockman *clockman = pll_ph->clockman;
+ const struct rp1_pll_ph_data *data = pll_ph->data;
+
+ return !!(clockman_read(clockman, data->ph_reg) & PLL_PH_EN);
+}
+
+static int rp1_pll_ph_on(struct clk_hw *hw)
+{
+ struct rp1_clk_desc *pll_ph = container_of(hw, struct rp1_clk_desc, hw);
+ struct rp1_clockman *clockman = pll_ph->clockman;
+ const struct rp1_pll_ph_data *data = pll_ph->data;
+ u32 ph_reg;
+
+ spin_lock(&clockman->regs_lock);
+ ph_reg = clockman_read(clockman, data->ph_reg);
+ ph_reg |= data->phase << PLL_PH_PHASE_SHIFT;
+ ph_reg |= PLL_PH_EN;
+ clockman_write(clockman, data->ph_reg, ph_reg);
+ spin_unlock(&clockman->regs_lock);
+
+ return 0;
+}
+
+static void rp1_pll_ph_off(struct clk_hw *hw)
+{
+ struct rp1_clk_desc *pll_ph = container_of(hw, struct rp1_clk_desc, hw);
+ struct rp1_clockman *clockman = pll_ph->clockman;
+ const struct rp1_pll_ph_data *data = pll_ph->data;
+
+ spin_lock(&clockman->regs_lock);
+ clockman_write(clockman, data->ph_reg,
+ clockman_read(clockman, data->ph_reg) & ~PLL_PH_EN);
+ spin_unlock(&clockman->regs_lock);
+}
+
+static unsigned long rp1_pll_ph_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct rp1_clk_desc *pll_ph = container_of(hw, struct rp1_clk_desc, hw);
+ const struct rp1_pll_ph_data *data = pll_ph->data;
+
+ return parent_rate / data->fixed_divider;
+}
+
+static int rp1_pll_ph_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct rp1_clk_desc *pll_ph = container_of(hw, struct rp1_clk_desc, hw);
+ const struct rp1_pll_ph_data *data = pll_ph->data;
+
+ req->rate = req->best_parent_rate / data->fixed_divider;
+
+ return 0;
+}
+
+static int rp1_pll_divider_is_on(struct clk_hw *hw)
+{
+ struct rp1_clk_desc *divider = container_of(hw, struct rp1_clk_desc, div.hw);
+ struct rp1_clockman *clockman = divider->clockman;
+ const struct rp1_pll_data *data = divider->data;
+
+ return !(clockman_read(clockman, data->ctrl_reg) & PLL_SEC_RST);
+}
+
+static int rp1_pll_divider_on(struct clk_hw *hw)
+{
+ struct rp1_clk_desc *divider = container_of(hw, struct rp1_clk_desc, div.hw);
+ struct rp1_clockman *clockman = divider->clockman;
+ const struct rp1_pll_data *data = divider->data;
+
+ spin_lock(&clockman->regs_lock);
+ /* Check the implementation bit is set! */
+ WARN_ON(!(clockman_read(clockman, data->ctrl_reg) & PLL_SEC_IMPL));
+ clockman_write(clockman, data->ctrl_reg,
+ clockman_read(clockman, data->ctrl_reg) & ~PLL_SEC_RST);
+ spin_unlock(&clockman->regs_lock);
+
+ return 0;
+}
+
+static void rp1_pll_divider_off(struct clk_hw *hw)
+{
+ struct rp1_clk_desc *divider = container_of(hw, struct rp1_clk_desc, div.hw);
+ struct rp1_clockman *clockman = divider->clockman;
+ const struct rp1_pll_data *data = divider->data;
+
+ spin_lock(&clockman->regs_lock);
+ clockman_write(clockman, data->ctrl_reg,
+ clockman_read(clockman, data->ctrl_reg) | PLL_SEC_RST);
+ spin_unlock(&clockman->regs_lock);
+}
+
+static int rp1_pll_divider_set_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct rp1_clk_desc *divider = container_of(hw, struct rp1_clk_desc, div.hw);
+ struct rp1_clockman *clockman = divider->clockman;
+ const struct rp1_pll_data *data = divider->data;
+ u32 div, sec;
+
+ div = DIV_ROUND_UP_ULL(parent_rate, rate);
+ div = clamp(div, 8u, 19u);
+
+ spin_lock(&clockman->regs_lock);
+ sec = clockman_read(clockman, data->ctrl_reg);
+ sec &= ~PLL_SEC_DIV_MASK;
+ sec |= FIELD_PREP(PLL_SEC_DIV_MASK, div);
+
+ /* Must keep the divider in reset to change the value. */
+ sec |= PLL_SEC_RST;
+ clockman_write(clockman, data->ctrl_reg, sec);
+
+ /* must sleep 10 pll vco cycles */
+ ndelay(div64_ul(10ULL * div * NSEC_PER_SEC, parent_rate));
+
+ sec &= ~PLL_SEC_RST;
+ clockman_write(clockman, data->ctrl_reg, sec);
+ spin_unlock(&clockman->regs_lock);
+
+ return 0;
+}
+
+static unsigned long rp1_pll_divider_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ return clk_divider_ops.recalc_rate(hw, parent_rate);
+}
+
+static int rp1_pll_divider_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ req->rate = clk_divider_ops.determine_rate(hw, req);
+
+ return 0;
+}
+
+static int rp1_clock_is_on(struct clk_hw *hw)
+{
+ struct rp1_clk_desc *clock = container_of(hw, struct rp1_clk_desc, hw);
+ struct rp1_clockman *clockman = clock->clockman;
+ const struct rp1_clock_data *data = clock->data;
+
+ return !!(clockman_read(clockman, data->ctrl_reg) & CLK_CTRL_ENABLE);
+}
+
+static unsigned long rp1_clock_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct rp1_clk_desc *clock = container_of(hw, struct rp1_clk_desc, hw);
+ struct rp1_clockman *clockman = clock->clockman;
+ const struct rp1_clock_data *data = clock->data;
+ u64 calc_rate;
+ u64 div;
+ u32 frac;
+
+ div = clockman_read(clockman, data->div_int_reg);
+ frac = (data->div_frac_reg != 0) ?
+ clockman_read(clockman, data->div_frac_reg) : 0;
+
+ /* If the integer portion of the divider is 0, treat it as 2^16 */
+ if (!div)
+ div = 1 << 16;
+
+ div = (div << CLK_DIV_FRAC_BITS) | (frac >> (32 - CLK_DIV_FRAC_BITS));
+
+ calc_rate = (u64)parent_rate << CLK_DIV_FRAC_BITS;
+ calc_rate = div64_u64(calc_rate, div);
+
+ return calc_rate;
+}
+
+static int rp1_clock_on(struct clk_hw *hw)
+{
+ struct rp1_clk_desc *clock = container_of(hw, struct rp1_clk_desc, hw);
+ struct rp1_clockman *clockman = clock->clockman;
+ const struct rp1_clock_data *data = clock->data;
+
+ spin_lock(&clockman->regs_lock);
+ clockman_write(clockman, data->ctrl_reg,
+ clockman_read(clockman, data->ctrl_reg) | CLK_CTRL_ENABLE);
+ /* If this is a GPCLK, turn on the output-enable */
+ if (data->oe_mask)
+ clockman_write(clockman, GPCLK_OE_CTRL,
+ clockman_read(clockman, GPCLK_OE_CTRL) | data->oe_mask);
+ spin_unlock(&clockman->regs_lock);
+
+ return 0;
+}
+
+static void rp1_clock_off(struct clk_hw *hw)
+{
+ struct rp1_clk_desc *clock = container_of(hw, struct rp1_clk_desc, hw);
+ struct rp1_clockman *clockman = clock->clockman;
+ const struct rp1_clock_data *data = clock->data;
+
+ spin_lock(&clockman->regs_lock);
+ clockman_write(clockman, data->ctrl_reg,
+ clockman_read(clockman, data->ctrl_reg) & ~CLK_CTRL_ENABLE);
+ /* If this is a GPCLK, turn off the output-enable */
+ if (data->oe_mask)
+ clockman_write(clockman, GPCLK_OE_CTRL,
+ clockman_read(clockman, GPCLK_OE_CTRL) & ~data->oe_mask);
+ spin_unlock(&clockman->regs_lock);
+}
+
+static u32 rp1_clock_choose_div(unsigned long rate, unsigned long parent_rate,
+ const struct rp1_clock_data *data)
+{
+ u64 div;
+
+ /*
+ * Due to earlier rounding, calculated parent_rate may differ from
+ * expected value. Don't fail on a small discrepancy near unity divide.
+ */
+ if (!rate || rate > parent_rate + (parent_rate >> CLK_DIV_FRAC_BITS))
+ return 0;
+
+ /*
+ * Always express div in fixed-point format for fractional division;
+ * If no fractional divider is present, the fraction part will be zero.
+ */
+ if (data->div_frac_reg) {
+ div = (u64)parent_rate << CLK_DIV_FRAC_BITS;
+ div = DIV_ROUND_CLOSEST_ULL(div, rate);
+ } else {
+ div = DIV_ROUND_CLOSEST_ULL(parent_rate, rate);
+ div <<= CLK_DIV_FRAC_BITS;
+ }
+
+ div = clamp(div,
+ 1ull << CLK_DIV_FRAC_BITS,
+ (u64)data->div_int_max << CLK_DIV_FRAC_BITS);
+
+ return div;
+}
+
+static u8 rp1_clock_get_parent(struct clk_hw *hw)
+{
+ struct rp1_clk_desc *clock = container_of(hw, struct rp1_clk_desc, hw);
+ struct rp1_clockman *clockman = clock->clockman;
+ const struct rp1_clock_data *data = clock->data;
+ u32 sel, ctrl;
+ u8 parent;
+
+ /* Sel is one-hot, so find the first bit set */
+ sel = clockman_read(clockman, data->sel_reg);
+ parent = ffs(sel) - 1;
+
+ /* sel == 0 implies the parent clock is not enabled yet. */
+ if (!sel) {
+ /* Read the clock src from the CTRL register instead */
+ ctrl = clockman_read(clockman, data->ctrl_reg);
+ parent = (ctrl & data->clk_src_mask) >> CLK_CTRL_SRC_SHIFT;
+ }
+
+ if (parent >= data->num_std_parents)
+ parent = AUX_SEL;
+
+ if (parent == AUX_SEL) {
+ /*
+ * Clock parent is an auxiliary source, so get the parent from
+ * the AUXSRC register field.
+ */
+ ctrl = clockman_read(clockman, data->ctrl_reg);
+ parent = FIELD_GET(CLK_CTRL_AUXSRC_MASK, ctrl);
+ parent += data->num_std_parents;
+ }
+
+ return parent;
+}
+
+static int rp1_clock_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct rp1_clk_desc *clock = container_of(hw, struct rp1_clk_desc, hw);
+ struct rp1_clockman *clockman = clock->clockman;
+ const struct rp1_clock_data *data = clock->data;
+ u32 ctrl, sel;
+
+ spin_lock(&clockman->regs_lock);
+ ctrl = clockman_read(clockman, data->ctrl_reg);
+
+ if (index >= data->num_std_parents) {
+ /* This is an aux source request */
+ if (index >= data->num_std_parents + data->num_aux_parents) {
+ spin_unlock(&clockman->regs_lock);
+ return -EINVAL;
+ }
+
+ /* Select parent from aux list */
+ ctrl &= ~CLK_CTRL_AUXSRC_MASK;
+ ctrl |= FIELD_PREP(CLK_CTRL_AUXSRC_MASK, index - data->num_std_parents);
+ /* Set src to aux list */
+ ctrl &= ~data->clk_src_mask;
+ ctrl |= (AUX_SEL << CLK_CTRL_SRC_SHIFT) & data->clk_src_mask;
+ } else {
+ ctrl &= ~data->clk_src_mask;
+ ctrl |= (index << CLK_CTRL_SRC_SHIFT) & data->clk_src_mask;
+ }
+
+ clockman_write(clockman, data->ctrl_reg, ctrl);
+ spin_unlock(&clockman->regs_lock);
+
+ sel = rp1_clock_get_parent(hw);
+ if (sel != index)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int rp1_clock_set_rate_and_parent(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long parent_rate,
+ u8 parent)
+{
+ struct rp1_clk_desc *clock = container_of(hw, struct rp1_clk_desc, hw);
+ struct rp1_clockman *clockman = clock->clockman;
+ const struct rp1_clock_data *data = clock->data;
+ u32 div = rp1_clock_choose_div(rate, parent_rate, data);
+
+ spin_lock(&clockman->regs_lock);
+
+ clockman_write(clockman, data->div_int_reg, div >> CLK_DIV_FRAC_BITS);
+ if (data->div_frac_reg)
+ clockman_write(clockman, data->div_frac_reg, div << (32 - CLK_DIV_FRAC_BITS));
+
+ spin_unlock(&clockman->regs_lock);
+
+ if (parent != 0xff)
+ return rp1_clock_set_parent(hw, parent);
+
+ return 0;
+}
+
+static int rp1_clock_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ return rp1_clock_set_rate_and_parent(hw, rate, parent_rate, 0xff);
+}
+
+static unsigned long calc_core_pll_rate(struct clk_hw *pll_hw,
+ unsigned long target_rate,
+ int *pdiv_prim, int *pdiv_clk)
+{
+ static const int prim_divs[] = {
+ 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 14, 15, 16,
+ 18, 20, 21, 24, 25, 28, 30, 35, 36, 42, 49,
+ };
+ const unsigned long xosc_rate = clk_hw_get_rate(clk_xosc);
+ const unsigned long core_min = xosc_rate * 16;
+ const unsigned long core_max = 2400000000;
+ int best_div_prim = 1, best_div_clk = 1;
+ unsigned long best_rate = core_max + 1;
+ unsigned long core_rate = 0;
+ int div_int, div_frac;
+ u64 div;
+ int i;
+
+ /* Given the target rate, choose a set of divisors/multipliers */
+ for (i = 0; i < ARRAY_SIZE(prim_divs); i++) {
+ int div_prim = prim_divs[i];
+ int div_clk;
+
+ for (div_clk = 1; div_clk <= 256; div_clk++) {
+ core_rate = target_rate * div_clk * div_prim;
+ if (core_rate >= core_min) {
+ if (core_rate < best_rate) {
+ best_rate = core_rate;
+ best_div_prim = div_prim;
+ best_div_clk = div_clk;
+ }
+ break;
+ }
+ }
+ }
+
+ if (best_rate < core_max) {
+ div = ((best_rate << 24) + xosc_rate / 2) / xosc_rate;
+ div_int = div >> 24;
+ div_frac = div % (1 << 24);
+ core_rate = (xosc_rate * ((div_int << 24) + div_frac) + (1 << 23)) >> 24;
+ } else {
+ core_rate = 0;
+ }
+
+ if (pdiv_prim)
+ *pdiv_prim = best_div_prim;
+ if (pdiv_clk)
+ *pdiv_clk = best_div_clk;
+
+ return core_rate;
+}
+
+static void rp1_clock_choose_div_and_prate(struct clk_hw *hw,
+ int parent_idx,
+ unsigned long rate,
+ unsigned long *prate,
+ unsigned long *calc_rate)
+{
+ struct rp1_clk_desc *clock = container_of(hw, struct rp1_clk_desc, hw);
+ const struct rp1_clock_data *data = clock->data;
+ struct clk_hw *clk_audio_hw = &clk_audio->hw;
+ struct clk_hw *clk_i2s_hw = &clk_i2s->hw;
+ struct clk_hw *parent;
+ u32 div;
+ u64 tmp;
+
+ parent = clk_hw_get_parent_by_index(hw, parent_idx);
+
+ if (hw == clk_i2s_hw && clk_i2s->cached_rate == rate && parent == clk_audio_hw) {
+ *prate = clk_audio->cached_rate;
+ *calc_rate = rate;
+ return;
+ }
+
+ if (hw == clk_i2s_hw && parent == clk_audio_hw) {
+ unsigned long core_rate, audio_rate, i2s_rate;
+ int div_prim, div_clk;
+
+ core_rate = calc_core_pll_rate(parent, rate, &div_prim, &div_clk);
+ audio_rate = DIV_ROUND_CLOSEST(core_rate, div_prim);
+ i2s_rate = DIV_ROUND_CLOSEST(audio_rate, div_clk);
+ clk_audio_core->cached_rate = core_rate;
+ clk_audio->cached_rate = audio_rate;
+ clk_i2s->cached_rate = i2s_rate;
+ *prate = audio_rate;
+ *calc_rate = i2s_rate;
+ return;
+ }
+
+ *prate = clk_hw_get_rate(parent);
+ div = rp1_clock_choose_div(rate, *prate, data);
+
+ if (!div) {
+ *calc_rate = 0;
+ return;
+ }
+
+ /* Recalculate to account for rounding errors */
+ tmp = (u64)*prate << CLK_DIV_FRAC_BITS;
+ tmp = div_u64(tmp, div);
+
+ /*
+ * Prevent overclocks - if all parent choices result in
+ * a downstream clock in excess of the maximum, then the
+ * call to set the clock will fail.
+ */
+ if (tmp > data->max_freq)
+ *calc_rate = 0;
+ else
+ *calc_rate = tmp;
+}
+
+static int rp1_clock_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_hw *parent, *best_parent = NULL;
+ unsigned long best_rate = 0;
+ unsigned long best_prate = 0;
+ unsigned long best_rate_diff = ULONG_MAX;
+ unsigned long prate, calc_rate;
+ size_t i;
+
+ /*
+ * If the NO_REPARENT flag is set, try to use existing parent.
+ */
+ if ((clk_hw_get_flags(hw) & CLK_SET_RATE_NO_REPARENT)) {
+ i = rp1_clock_get_parent(hw);
+ parent = clk_hw_get_parent_by_index(hw, i);
+ if (parent) {
+ rp1_clock_choose_div_and_prate(hw, i, req->rate, &prate,
+ &calc_rate);
+ if (calc_rate > 0) {
+ req->best_parent_hw = parent;
+ req->best_parent_rate = prate;
+ req->rate = calc_rate;
+ return 0;
+ }
+ }
+ }
+
+ /*
+ * Select parent clock that results in the closest rate (lower or
+ * higher)
+ */
+ for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
+ parent = clk_hw_get_parent_by_index(hw, i);
+ if (!parent)
+ continue;
+
+ rp1_clock_choose_div_and_prate(hw, i, req->rate, &prate,
+ &calc_rate);
+
+ if (abs_diff(calc_rate, req->rate) < best_rate_diff) {
+ best_parent = parent;
+ best_prate = prate;
+ best_rate = calc_rate;
+ best_rate_diff = abs_diff(calc_rate, req->rate);
+
+ if (best_rate_diff == 0)
+ break;
+ }
+ }
+
+ if (best_rate == 0)
+ return -EINVAL;
+
+ req->best_parent_hw = best_parent;
+ req->best_parent_rate = best_prate;
+ req->rate = best_rate;
+
+ return 0;
+}
+
+static int rp1_varsrc_set_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate)
+{
+ struct rp1_clk_desc *clock = container_of(hw, struct rp1_clk_desc, hw);
+
+ /*
+ * "varsrc" exists purely to let clock dividers know the frequency
+ * of an externally-managed clock source (such as MIPI DSI byte-clock)
+ * which may change at run-time as a side-effect of some other driver.
+ */
+ clock->cached_rate = rate;
+ return 0;
+}
+
+static unsigned long rp1_varsrc_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct rp1_clk_desc *clock = container_of(hw, struct rp1_clk_desc, hw);
+
+ return clock->cached_rate;
+}
+
+static int rp1_varsrc_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ return 0;
+}
+
+static const struct clk_ops rp1_pll_core_ops = {
+ .is_prepared = rp1_pll_core_is_on,
+ .prepare = rp1_pll_core_on,
+ .unprepare = rp1_pll_core_off,
+ .set_rate = rp1_pll_core_set_rate,
+ .recalc_rate = rp1_pll_core_recalc_rate,
+ .determine_rate = rp1_pll_core_determine_rate,
+};
+
+static const struct clk_ops rp1_pll_ops = {
+ .set_rate = rp1_pll_set_rate,
+ .recalc_rate = rp1_pll_recalc_rate,
+ .determine_rate = rp1_pll_determine_rate,
+};
+
+static const struct clk_ops rp1_pll_ph_ops = {
+ .is_prepared = rp1_pll_ph_is_on,
+ .prepare = rp1_pll_ph_on,
+ .unprepare = rp1_pll_ph_off,
+ .recalc_rate = rp1_pll_ph_recalc_rate,
+ .determine_rate = rp1_pll_ph_determine_rate,
+};
+
+static const struct clk_ops rp1_pll_divider_ops = {
+ .is_prepared = rp1_pll_divider_is_on,
+ .prepare = rp1_pll_divider_on,
+ .unprepare = rp1_pll_divider_off,
+ .set_rate = rp1_pll_divider_set_rate,
+ .recalc_rate = rp1_pll_divider_recalc_rate,
+ .determine_rate = rp1_pll_divider_determine_rate,
+};
+
+static const struct clk_ops rp1_clk_ops = {
+ .is_prepared = rp1_clock_is_on,
+ .prepare = rp1_clock_on,
+ .unprepare = rp1_clock_off,
+ .recalc_rate = rp1_clock_recalc_rate,
+ .get_parent = rp1_clock_get_parent,
+ .set_parent = rp1_clock_set_parent,
+ .set_rate_and_parent = rp1_clock_set_rate_and_parent,
+ .set_rate = rp1_clock_set_rate,
+ .determine_rate = rp1_clock_determine_rate,
+};
+
+static const struct clk_ops rp1_varsrc_ops = {
+ .set_rate = rp1_varsrc_set_rate,
+ .recalc_rate = rp1_varsrc_recalc_rate,
+ .determine_rate = rp1_varsrc_determine_rate,
+};
+
+static struct clk_hw *rp1_register_pll(struct rp1_clockman *clockman,
+ struct rp1_clk_desc *desc)
+{
+ int ret;
+
+ desc->clockman = clockman;
+
+ ret = devm_clk_hw_register(clockman->dev, &desc->hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return &desc->hw;
+}
+
+static struct clk_hw *rp1_register_pll_divider(struct rp1_clockman *clockman,
+ struct rp1_clk_desc *desc)
+{
+ const struct rp1_pll_data *divider_data = desc->data;
+ int ret;
+
+ desc->div.reg = clockman->regs + divider_data->ctrl_reg;
+ desc->div.shift = __ffs(PLL_SEC_DIV_MASK);
+ desc->div.width = __ffs(~(PLL_SEC_DIV_MASK >> desc->div.shift));
+ desc->div.flags = CLK_DIVIDER_ROUND_CLOSEST;
+ desc->div.lock = &clockman->regs_lock;
+ desc->div.hw.init = desc->hw.init;
+ desc->div.table = pll_sec_div_table;
+
+ desc->clockman = clockman;
+
+ ret = devm_clk_hw_register(clockman->dev, &desc->div.hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return &desc->div.hw;
+}
+
+static struct clk_hw *rp1_register_clock(struct rp1_clockman *clockman,
+ struct rp1_clk_desc *desc)
+{
+ const struct rp1_clock_data *clock_data = desc->data;
+ int ret;
+
+ if (WARN_ON_ONCE(MAX_CLK_PARENTS <
+ clock_data->num_std_parents + clock_data->num_aux_parents))
+ return ERR_PTR(-EINVAL);
+
+ /* There must be a gap for the AUX selector */
+ if (WARN_ON_ONCE(clock_data->num_std_parents > AUX_SEL &&
+ desc->hw.init->parent_data[AUX_SEL].index != -1))
+ return ERR_PTR(-EINVAL);
+
+ desc->clockman = clockman;
+
+ ret = devm_clk_hw_register(clockman->dev, &desc->hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return &desc->hw;
+}
+
+/* Assignment helper macros for different clock types. */
+#define _REGISTER(f, ...) { .clk_register = f, __VA_ARGS__ }
+
+#define CLK_DATA(type, ...) .data = &(struct type) { __VA_ARGS__ }
+
+#define REGISTER_PLL(...) _REGISTER(&rp1_register_pll, \
+ __VA_ARGS__)
+
+#define REGISTER_PLL_DIV(...) _REGISTER(&rp1_register_pll_divider, \
+ __VA_ARGS__)
+
+#define REGISTER_CLK(...) _REGISTER(&rp1_register_clock, \
+ __VA_ARGS__)
+
+static struct rp1_clk_desc pll_sys_core_desc = REGISTER_PLL(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "pll_sys_core",
+ (const struct clk_parent_data[]) { { .index = 0 } },
+ &rp1_pll_core_ops,
+ CLK_IS_CRITICAL
+ ),
+ CLK_DATA(rp1_pll_core_data,
+ .cs_reg = PLL_SYS_CS,
+ .pwr_reg = PLL_SYS_PWR,
+ .fbdiv_int_reg = PLL_SYS_FBDIV_INT,
+ .fbdiv_frac_reg = PLL_SYS_FBDIV_FRAC,
+ )
+);
+
+static struct rp1_clk_desc pll_audio_core_desc = REGISTER_PLL(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "pll_audio_core",
+ (const struct clk_parent_data[]) { { .index = 0 } },
+ &rp1_pll_core_ops,
+ CLK_IS_CRITICAL
+ ),
+ CLK_DATA(rp1_pll_core_data,
+ .cs_reg = PLL_AUDIO_CS,
+ .pwr_reg = PLL_AUDIO_PWR,
+ .fbdiv_int_reg = PLL_AUDIO_FBDIV_INT,
+ .fbdiv_frac_reg = PLL_AUDIO_FBDIV_FRAC,
+ )
+);
+
+static struct rp1_clk_desc pll_video_core_desc = REGISTER_PLL(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "pll_video_core",
+ (const struct clk_parent_data[]) { { .index = 0 } },
+ &rp1_pll_core_ops,
+ CLK_IS_CRITICAL
+ ),
+ CLK_DATA(rp1_pll_core_data,
+ .cs_reg = PLL_VIDEO_CS,
+ .pwr_reg = PLL_VIDEO_PWR,
+ .fbdiv_int_reg = PLL_VIDEO_FBDIV_INT,
+ .fbdiv_frac_reg = PLL_VIDEO_FBDIV_FRAC,
+ )
+);
+
+static struct rp1_clk_desc pll_sys_desc = REGISTER_PLL(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "pll_sys",
+ (const struct clk_parent_data[]) {
+ { .hw = &pll_sys_core_desc.hw }
+ },
+ &rp1_pll_ops,
+ 0
+ ),
+ CLK_DATA(rp1_pll_data,
+ .ctrl_reg = PLL_SYS_PRIM,
+ .fc0_src = FC_NUM(0, 2),
+ )
+);
+
+static struct rp1_clk_desc pll_audio_desc = REGISTER_PLL(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "pll_audio",
+ (const struct clk_parent_data[]) {
+ { .hw = &pll_audio_core_desc.hw }
+ },
+ &rp1_pll_ops,
+ CLK_SET_RATE_PARENT
+ ),
+ CLK_DATA(rp1_pll_data,
+ .ctrl_reg = PLL_AUDIO_PRIM,
+ .fc0_src = FC_NUM(4, 2),
+ )
+);
+
+static struct rp1_clk_desc pll_video_desc = REGISTER_PLL(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "pll_video",
+ (const struct clk_parent_data[]) {
+ { .hw = &pll_video_core_desc.hw }
+ },
+ &rp1_pll_ops,
+ 0
+ ),
+ CLK_DATA(rp1_pll_data,
+ .ctrl_reg = PLL_VIDEO_PRIM,
+ .fc0_src = FC_NUM(3, 2),
+ )
+);
+
+static struct rp1_clk_desc pll_sys_sec_desc = REGISTER_PLL_DIV(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "pll_sys_sec",
+ (const struct clk_parent_data[]) {
+ { .hw = &pll_sys_core_desc.hw }
+ },
+ &rp1_pll_divider_ops,
+ 0
+ ),
+ CLK_DATA(rp1_pll_data,
+ .ctrl_reg = PLL_SYS_SEC,
+ .fc0_src = FC_NUM(2, 2),
+ )
+);
+
+static struct rp1_clk_desc pll_video_sec_desc = REGISTER_PLL_DIV(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "pll_video_sec",
+ (const struct clk_parent_data[]) {
+ { .hw = &pll_video_core_desc.hw }
+ },
+ &rp1_pll_divider_ops,
+ 0
+ ),
+ CLK_DATA(rp1_pll_data,
+ .ctrl_reg = PLL_VIDEO_SEC,
+ .fc0_src = FC_NUM(5, 3),
+ )
+);
+
+static const struct clk_parent_data clk_eth_tsu_parents[] = {
+ { .index = 0 },
+ { .hw = &pll_video_sec_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+};
+
+static struct rp1_clk_desc clk_eth_tsu_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_eth_tsu",
+ clk_eth_tsu_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 8,
+ .ctrl_reg = CLK_ETH_TSU_CTRL,
+ .div_int_reg = CLK_ETH_TSU_DIV_INT,
+ .sel_reg = CLK_ETH_TSU_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 50 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(5, 7),
+ )
+);
+
+static const struct clk_parent_data clk_eth_parents[] = {
+ { .hw = &pll_sys_sec_desc.div.hw },
+ { .hw = &pll_sys_desc.hw },
+ { .hw = &pll_video_sec_desc.hw },
+};
+
+static struct rp1_clk_desc clk_eth_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_eth",
+ clk_eth_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 3,
+ .ctrl_reg = CLK_ETH_CTRL,
+ .div_int_reg = CLK_ETH_DIV_INT,
+ .sel_reg = CLK_ETH_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 125 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(4, 6),
+ )
+);
+
+static const struct clk_parent_data clk_sys_parents[] = {
+ { .index = 0 },
+ { .index = -1 },
+ { .hw = &pll_sys_desc.hw },
+};
+
+static struct rp1_clk_desc clk_sys_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_sys",
+ clk_sys_parents,
+ &rp1_clk_ops,
+ CLK_IS_CRITICAL
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 3,
+ .num_aux_parents = 0,
+ .ctrl_reg = CLK_SYS_CTRL,
+ .div_int_reg = CLK_SYS_DIV_INT,
+ .sel_reg = CLK_SYS_SEL,
+ .div_int_max = DIV_INT_24BIT_MAX,
+ .max_freq = 200 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(0, 4),
+ .clk_src_mask = 0x3,
+ )
+);
+
+static struct rp1_clk_desc pll_sys_pri_ph_desc = REGISTER_PLL(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "pll_sys_pri_ph",
+ (const struct clk_parent_data[]) {
+ { .hw = &pll_sys_desc.hw }
+ },
+ &rp1_pll_ph_ops,
+ 0
+ ),
+ CLK_DATA(rp1_pll_ph_data,
+ .ph_reg = PLL_SYS_PRIM,
+ .fixed_divider = 2,
+ .phase = RP1_PLL_PHASE_0,
+ .fc0_src = FC_NUM(1, 2),
+ )
+);
+
+static struct rp1_clk_desc pll_audio_pri_ph_desc = REGISTER_PLL(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "pll_audio_pri_ph",
+ (const struct clk_parent_data[]) {
+ { .hw = &pll_audio_desc.hw }
+ },
+ &rp1_pll_ph_ops,
+ 0
+ ),
+ CLK_DATA(rp1_pll_ph_data,
+ .ph_reg = PLL_AUDIO_PRIM,
+ .fixed_divider = 2,
+ .phase = RP1_PLL_PHASE_0,
+ .fc0_src = FC_NUM(5, 1),
+ )
+);
+
+static struct rp1_clk_desc pll_video_pri_ph_desc = REGISTER_PLL(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "pll_video_pri_ph",
+ (const struct clk_parent_data[]) {
+ { .hw = &pll_video_desc.hw }
+ },
+ &rp1_pll_ph_ops,
+ 0
+ ),
+ CLK_DATA(rp1_pll_ph_data,
+ .ph_reg = PLL_VIDEO_PRIM,
+ .fixed_divider = 2,
+ .phase = RP1_PLL_PHASE_0,
+ .fc0_src = FC_NUM(4, 3),
+ )
+);
+
+static struct rp1_clk_desc pll_audio_sec_desc = REGISTER_PLL_DIV(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "pll_audio_sec",
+ (const struct clk_parent_data[]) {
+ { .hw = &pll_audio_core_desc.hw }
+ },
+ &rp1_pll_divider_ops,
+ 0
+ ),
+ CLK_DATA(rp1_pll_data,
+ .ctrl_reg = PLL_AUDIO_SEC,
+ .fc0_src = FC_NUM(6, 2),
+ )
+);
+
+static struct rp1_clk_desc pll_audio_tern_desc = REGISTER_PLL_DIV(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "pll_audio_tern",
+ (const struct clk_parent_data[]) {
+ { .hw = &pll_audio_core_desc.hw }
+ },
+ &rp1_pll_divider_ops,
+ 0
+ ),
+ CLK_DATA(rp1_pll_data,
+ .ctrl_reg = PLL_AUDIO_TERN,
+ .fc0_src = FC_NUM(6, 2),
+ )
+);
+
+static struct rp1_clk_desc clk_slow_sys_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_slow_sys",
+ (const struct clk_parent_data[]) { { .index = 0 } },
+ &rp1_clk_ops,
+ CLK_IS_CRITICAL
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 1,
+ .num_aux_parents = 0,
+ .ctrl_reg = CLK_SLOW_SYS_CTRL,
+ .div_int_reg = CLK_SLOW_SYS_DIV_INT,
+ .sel_reg = CLK_SLOW_SYS_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 50 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(1, 4),
+ .clk_src_mask = 0x1,
+ )
+);
+
+static const struct clk_parent_data clk_dma_parents[] = {
+ { .hw = &pll_sys_pri_ph_desc.hw },
+ { .hw = &pll_video_desc.hw },
+ { .index = 0 },
+};
+
+static struct rp1_clk_desc clk_dma_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_dma",
+ clk_dma_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 3,
+ .ctrl_reg = CLK_DMA_CTRL,
+ .div_int_reg = CLK_DMA_DIV_INT,
+ .sel_reg = CLK_DMA_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 100 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(2, 2),
+ )
+);
+
+static const struct clk_parent_data clk_uart_parents[] = {
+ { .hw = &pll_sys_pri_ph_desc.hw },
+ { .hw = &pll_video_desc.hw },
+ { .index = 0 },
+};
+
+static struct rp1_clk_desc clk_uart_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_uart",
+ clk_uart_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 3,
+ .ctrl_reg = CLK_UART_CTRL,
+ .div_int_reg = CLK_UART_DIV_INT,
+ .sel_reg = CLK_UART_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 100 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(6, 7),
+ )
+);
+
+static const struct clk_parent_data clk_pwm0_parents[] = {
+ { .index = -1 },
+ { .hw = &pll_video_sec_desc.hw },
+ { .index = 0 },
+};
+
+static struct rp1_clk_desc clk_pwm0_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_pwm0",
+ clk_pwm0_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 3,
+ .ctrl_reg = CLK_PWM0_CTRL,
+ .div_int_reg = CLK_PWM0_DIV_INT,
+ .div_frac_reg = CLK_PWM0_DIV_FRAC,
+ .sel_reg = CLK_PWM0_SEL,
+ .div_int_max = DIV_INT_16BIT_MAX,
+ .max_freq = 76800 * HZ_PER_KHZ,
+ .fc0_src = FC_NUM(0, 5),
+ )
+);
+
+static const struct clk_parent_data clk_pwm1_parents[] = {
+ { .index = -1 },
+ { .hw = &pll_video_sec_desc.hw },
+ { .index = 0 },
+};
+
+static struct rp1_clk_desc clk_pwm1_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_pwm1",
+ clk_pwm1_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 3,
+ .ctrl_reg = CLK_PWM1_CTRL,
+ .div_int_reg = CLK_PWM1_DIV_INT,
+ .div_frac_reg = CLK_PWM1_DIV_FRAC,
+ .sel_reg = CLK_PWM1_SEL,
+ .div_int_max = DIV_INT_16BIT_MAX,
+ .max_freq = 76800 * HZ_PER_KHZ,
+ .fc0_src = FC_NUM(1, 5),
+ )
+);
+
+static const struct clk_parent_data clk_audio_in_parents[] = {
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &pll_video_sec_desc.hw },
+ { .index = 0 },
+};
+
+static struct rp1_clk_desc clk_audio_in_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_audio_in",
+ clk_audio_in_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 5,
+ .ctrl_reg = CLK_AUDIO_IN_CTRL,
+ .div_int_reg = CLK_AUDIO_IN_DIV_INT,
+ .sel_reg = CLK_AUDIO_IN_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 76800 * HZ_PER_KHZ,
+ .fc0_src = FC_NUM(2, 5),
+ )
+);
+
+static const struct clk_parent_data clk_audio_out_parents[] = {
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &pll_video_sec_desc.hw },
+ { .index = 0 },
+};
+
+static struct rp1_clk_desc clk_audio_out_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_audio_out",
+ clk_audio_out_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 4,
+ .ctrl_reg = CLK_AUDIO_OUT_CTRL,
+ .div_int_reg = CLK_AUDIO_OUT_DIV_INT,
+ .sel_reg = CLK_AUDIO_OUT_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 153600 * HZ_PER_KHZ,
+ .fc0_src = FC_NUM(3, 5),
+ )
+);
+
+static const struct clk_parent_data clk_i2s_parents[] = {
+ { .index = 0 },
+ { .hw = &pll_audio_desc.hw },
+ { .hw = &pll_audio_sec_desc.hw },
+};
+
+static struct rp1_clk_desc clk_i2s_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_i2s",
+ clk_i2s_parents,
+ &rp1_clk_ops,
+ CLK_SET_RATE_PARENT
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 3,
+ .ctrl_reg = CLK_I2S_CTRL,
+ .div_int_reg = CLK_I2S_DIV_INT,
+ .sel_reg = CLK_I2S_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 50 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(4, 4),
+ )
+);
+
+static struct rp1_clk_desc clk_mipi0_cfg_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_mipi0_cfg",
+ (const struct clk_parent_data[]) { { .index = 0 } },
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 1,
+ .ctrl_reg = CLK_MIPI0_CFG_CTRL,
+ .div_int_reg = CLK_MIPI0_CFG_DIV_INT,
+ .sel_reg = CLK_MIPI0_CFG_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 50 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(4, 5),
+ )
+);
+
+static struct rp1_clk_desc clk_mipi1_cfg_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_mipi1_cfg",
+ (const struct clk_parent_data[]) { { .index = 0 } },
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 1,
+ .ctrl_reg = CLK_MIPI1_CFG_CTRL,
+ .div_int_reg = CLK_MIPI1_CFG_DIV_INT,
+ .sel_reg = CLK_MIPI1_CFG_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 50 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(5, 6),
+ .clk_src_mask = 0x1,
+ )
+);
+
+static struct rp1_clk_desc clk_adc_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_adc",
+ (const struct clk_parent_data[]) { { .index = 0 } },
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 1,
+ .ctrl_reg = CLK_ADC_CTRL,
+ .div_int_reg = CLK_ADC_DIV_INT,
+ .sel_reg = CLK_ADC_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 50 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(5, 5),
+ )
+);
+
+static struct rp1_clk_desc clk_sdio_timer_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_sdio_timer",
+ (const struct clk_parent_data[]) { { .index = 0 } },
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 1,
+ .ctrl_reg = CLK_SDIO_TIMER_CTRL,
+ .div_int_reg = CLK_SDIO_TIMER_DIV_INT,
+ .sel_reg = CLK_SDIO_TIMER_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 50 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(3, 4),
+ )
+);
+
+static struct rp1_clk_desc clk_sdio_alt_src_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_sdio_alt_src",
+ (const struct clk_parent_data[]) {
+ { .hw = &pll_sys_desc.hw }
+ },
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 1,
+ .ctrl_reg = CLK_SDIO_ALT_SRC_CTRL,
+ .div_int_reg = CLK_SDIO_ALT_SRC_DIV_INT,
+ .sel_reg = CLK_SDIO_ALT_SRC_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 200 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(5, 4),
+ )
+);
+
+static const struct clk_parent_data clk_dpi_parents[] = {
+ { .hw = &pll_sys_desc.hw },
+ { .hw = &pll_video_sec_desc.hw },
+ { .hw = &pll_video_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+};
+
+static struct rp1_clk_desc clk_dpi_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_dpi",
+ clk_dpi_parents,
+ &rp1_clk_ops,
+ CLK_SET_RATE_NO_REPARENT /* Let DPI driver set parent */
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 8,
+ .ctrl_reg = VIDEO_CLK_DPI_CTRL,
+ .div_int_reg = VIDEO_CLK_DPI_DIV_INT,
+ .sel_reg = VIDEO_CLK_DPI_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 200 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(1, 6),
+ )
+);
+
+static const struct clk_parent_data clk_gp0_parents[] = {
+ { .index = 0 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &pll_sys_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &clk_i2s_desc.hw },
+ { .hw = &clk_adc_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &clk_sys_desc.hw },
+};
+
+static struct rp1_clk_desc clk_gp0_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_gp0",
+ clk_gp0_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 16,
+ .oe_mask = BIT(0),
+ .ctrl_reg = CLK_GP0_CTRL,
+ .div_int_reg = CLK_GP0_DIV_INT,
+ .div_frac_reg = CLK_GP0_DIV_FRAC,
+ .sel_reg = CLK_GP0_SEL,
+ .div_int_max = DIV_INT_16BIT_MAX,
+ .max_freq = 100 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(0, 1),
+ )
+);
+
+static const struct clk_parent_data clk_gp1_parents[] = {
+ { .hw = &clk_sdio_timer_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &pll_sys_pri_ph_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &clk_adc_desc.hw },
+ { .hw = &clk_dpi_desc.hw },
+ { .hw = &clk_pwm0_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+};
+
+static struct rp1_clk_desc clk_gp1_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_gp1",
+ clk_gp1_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 16,
+ .oe_mask = BIT(1),
+ .ctrl_reg = CLK_GP1_CTRL,
+ .div_int_reg = CLK_GP1_DIV_INT,
+ .div_frac_reg = CLK_GP1_DIV_FRAC,
+ .sel_reg = CLK_GP1_SEL,
+ .div_int_max = DIV_INT_16BIT_MAX,
+ .max_freq = 100 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(1, 1),
+ )
+);
+
+static struct rp1_clk_desc clksrc_mipi0_dsi_byteclk_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clksrc_mipi0_dsi_byteclk",
+ (const struct clk_parent_data[]) { { .index = 0 } },
+ &rp1_varsrc_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 1,
+ .num_aux_parents = 0,
+ )
+);
+
+static struct rp1_clk_desc clksrc_mipi1_dsi_byteclk_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clksrc_mipi1_dsi_byteclk",
+ (const struct clk_parent_data[]) { { .index = 0 } },
+ &rp1_varsrc_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 1,
+ .num_aux_parents = 0,
+ )
+);
+
+static const struct clk_parent_data clk_mipi0_dpi_parents[] = {
+ { .hw = &pll_sys_desc.hw },
+ { .hw = &pll_video_sec_desc.hw },
+ { .hw = &pll_video_desc.hw },
+ { .hw = &clksrc_mipi0_dsi_byteclk_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+};
+
+static struct rp1_clk_desc clk_mipi0_dpi_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_mipi0_dpi",
+ clk_mipi0_dpi_parents,
+ &rp1_clk_ops,
+ CLK_SET_RATE_NO_REPARENT /* Let DSI driver set parent */
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 8,
+ .ctrl_reg = VIDEO_CLK_MIPI0_DPI_CTRL,
+ .div_int_reg = VIDEO_CLK_MIPI0_DPI_DIV_INT,
+ .div_frac_reg = VIDEO_CLK_MIPI0_DPI_DIV_FRAC,
+ .sel_reg = VIDEO_CLK_MIPI0_DPI_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 200 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(2, 6),
+ )
+);
+
+static const struct clk_parent_data clk_mipi1_dpi_parents[] = {
+ { .hw = &pll_sys_desc.hw },
+ { .hw = &pll_video_sec_desc.hw },
+ { .hw = &pll_video_desc.hw },
+ { .hw = &clksrc_mipi1_dsi_byteclk_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+};
+
+static struct rp1_clk_desc clk_mipi1_dpi_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_mipi1_dpi",
+ clk_mipi1_dpi_parents,
+ &rp1_clk_ops,
+ CLK_SET_RATE_NO_REPARENT /* Let DSI driver set parent */
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 8,
+ .ctrl_reg = VIDEO_CLK_MIPI1_DPI_CTRL,
+ .div_int_reg = VIDEO_CLK_MIPI1_DPI_DIV_INT,
+ .div_frac_reg = VIDEO_CLK_MIPI1_DPI_DIV_FRAC,
+ .sel_reg = VIDEO_CLK_MIPI1_DPI_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 200 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(3, 6),
+ )
+);
+
+static const struct clk_parent_data clk_gp2_parents[] = {
+ { .hw = &clk_sdio_alt_src_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &pll_sys_sec_desc.hw },
+ { .index = -1 },
+ { .hw = &pll_video_desc.hw },
+ { .hw = &clk_audio_in_desc.hw },
+ { .hw = &clk_dpi_desc.hw },
+ { .hw = &clk_pwm0_desc.hw },
+ { .hw = &clk_pwm1_desc.hw },
+ { .hw = &clk_mipi0_dpi_desc.hw },
+ { .hw = &clk_mipi1_cfg_desc.hw },
+ { .hw = &clk_sys_desc.hw },
+};
+
+static struct rp1_clk_desc clk_gp2_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_gp2",
+ clk_gp2_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 16,
+ .oe_mask = BIT(2),
+ .ctrl_reg = CLK_GP2_CTRL,
+ .div_int_reg = CLK_GP2_DIV_INT,
+ .div_frac_reg = CLK_GP2_DIV_FRAC,
+ .sel_reg = CLK_GP2_SEL,
+ .div_int_max = DIV_INT_16BIT_MAX,
+ .max_freq = 100 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(2, 1),
+ )
+);
+
+static const struct clk_parent_data clk_gp3_parents[] = {
+ { .index = 0 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &pll_video_pri_ph_desc.hw },
+ { .hw = &clk_audio_out_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &clk_mipi1_dpi_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+};
+
+static struct rp1_clk_desc clk_gp3_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_gp3",
+ clk_gp3_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 16,
+ .oe_mask = BIT(3),
+ .ctrl_reg = CLK_GP3_CTRL,
+ .div_int_reg = CLK_GP3_DIV_INT,
+ .div_frac_reg = CLK_GP3_DIV_FRAC,
+ .sel_reg = CLK_GP3_SEL,
+ .div_int_max = DIV_INT_16BIT_MAX,
+ .max_freq = 100 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(3, 1),
+ )
+);
+
+static const struct clk_parent_data clk_gp4_parents[] = {
+ { .index = 0 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &pll_video_sec_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &clk_mipi0_cfg_desc.hw },
+ { .hw = &clk_uart_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &clk_sys_desc.hw },
+};
+
+static struct rp1_clk_desc clk_gp4_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_gp4",
+ clk_gp4_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 16,
+ .oe_mask = BIT(4),
+ .ctrl_reg = CLK_GP4_CTRL,
+ .div_int_reg = CLK_GP4_DIV_INT,
+ .div_frac_reg = CLK_GP4_DIV_FRAC,
+ .sel_reg = CLK_GP4_SEL,
+ .div_int_max = DIV_INT_16BIT_MAX,
+ .max_freq = 100 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(4, 1),
+ )
+);
+
+static const struct clk_parent_data clk_vec_parents[] = {
+ { .hw = &pll_sys_pri_ph_desc.hw },
+ { .hw = &pll_video_sec_desc.hw },
+ { .hw = &pll_video_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+};
+
+static struct rp1_clk_desc clk_vec_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_vec",
+ clk_vec_parents,
+ &rp1_clk_ops,
+ CLK_SET_RATE_NO_REPARENT /* Let VEC driver set parent */
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 8,
+ .ctrl_reg = VIDEO_CLK_VEC_CTRL,
+ .div_int_reg = VIDEO_CLK_VEC_DIV_INT,
+ .sel_reg = VIDEO_CLK_VEC_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 108 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(0, 6),
+ )
+);
+
+static const struct clk_parent_data clk_gp5_parents[] = {
+ { .index = 0 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &pll_video_sec_desc.hw },
+ { .hw = &clk_eth_tsu_desc.hw },
+ { .index = -1 },
+ { .hw = &clk_vec_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+};
+
+static struct rp1_clk_desc clk_gp5_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_gp5",
+ clk_gp5_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 16,
+ .oe_mask = BIT(5),
+ .ctrl_reg = CLK_GP5_CTRL,
+ .div_int_reg = CLK_GP5_DIV_INT,
+ .div_frac_reg = CLK_GP5_DIV_FRAC,
+ .sel_reg = CLK_GP5_SEL,
+ .div_int_max = DIV_INT_16BIT_MAX,
+ .max_freq = 100 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(5, 1),
+ )
+);
+
+static struct rp1_clk_desc *const clk_desc_array[] = {
+ [RP1_PLL_SYS_CORE] = &pll_sys_core_desc,
+ [RP1_PLL_AUDIO_CORE] = &pll_audio_core_desc,
+ [RP1_PLL_VIDEO_CORE] = &pll_video_core_desc,
+ [RP1_PLL_SYS] = &pll_sys_desc,
+ [RP1_CLK_ETH_TSU] = &clk_eth_tsu_desc,
+ [RP1_CLK_ETH] = &clk_eth_desc,
+ [RP1_CLK_SYS] = &clk_sys_desc,
+ [RP1_PLL_SYS_PRI_PH] = &pll_sys_pri_ph_desc,
+ [RP1_PLL_SYS_SEC] = &pll_sys_sec_desc,
+ [RP1_PLL_AUDIO] = &pll_audio_desc,
+ [RP1_PLL_VIDEO] = &pll_video_desc,
+ [RP1_PLL_AUDIO_PRI_PH] = &pll_audio_pri_ph_desc,
+ [RP1_PLL_VIDEO_PRI_PH] = &pll_video_pri_ph_desc,
+ [RP1_PLL_AUDIO_SEC] = &pll_audio_sec_desc,
+ [RP1_PLL_VIDEO_SEC] = &pll_video_sec_desc,
+ [RP1_PLL_AUDIO_TERN] = &pll_audio_tern_desc,
+ [RP1_CLK_SLOW_SYS] = &clk_slow_sys_desc,
+ [RP1_CLK_DMA] = &clk_dma_desc,
+ [RP1_CLK_UART] = &clk_uart_desc,
+ [RP1_CLK_PWM0] = &clk_pwm0_desc,
+ [RP1_CLK_PWM1] = &clk_pwm1_desc,
+ [RP1_CLK_AUDIO_IN] = &clk_audio_in_desc,
+ [RP1_CLK_AUDIO_OUT] = &clk_audio_out_desc,
+ [RP1_CLK_I2S] = &clk_i2s_desc,
+ [RP1_CLK_MIPI0_CFG] = &clk_mipi0_cfg_desc,
+ [RP1_CLK_MIPI1_CFG] = &clk_mipi1_cfg_desc,
+ [RP1_CLK_ADC] = &clk_adc_desc,
+ [RP1_CLK_SDIO_TIMER] = &clk_sdio_timer_desc,
+ [RP1_CLK_SDIO_ALT_SRC] = &clk_sdio_alt_src_desc,
+ [RP1_CLK_GP0] = &clk_gp0_desc,
+ [RP1_CLK_GP1] = &clk_gp1_desc,
+ [RP1_CLK_GP2] = &clk_gp2_desc,
+ [RP1_CLK_GP3] = &clk_gp3_desc,
+ [RP1_CLK_GP4] = &clk_gp4_desc,
+ [RP1_CLK_GP5] = &clk_gp5_desc,
+ [RP1_CLK_VEC] = &clk_vec_desc,
+ [RP1_CLK_DPI] = &clk_dpi_desc,
+ [RP1_CLK_MIPI0_DPI] = &clk_mipi0_dpi_desc,
+ [RP1_CLK_MIPI1_DPI] = &clk_mipi1_dpi_desc,
+ [RP1_CLK_MIPI0_DSI_BYTECLOCK] = &clksrc_mipi0_dsi_byteclk_desc,
+ [RP1_CLK_MIPI1_DSI_BYTECLOCK] = &clksrc_mipi1_dsi_byteclk_desc,
+};
+
+static const struct regmap_range rp1_reg_ranges[] = {
+ regmap_reg_range(PLL_SYS_CS, PLL_SYS_SEC),
+ regmap_reg_range(PLL_AUDIO_CS, PLL_AUDIO_TERN),
+ regmap_reg_range(PLL_VIDEO_CS, PLL_VIDEO_SEC),
+ regmap_reg_range(GPCLK_OE_CTRL, GPCLK_OE_CTRL),
+ regmap_reg_range(CLK_SYS_CTRL, CLK_SYS_DIV_INT),
+ regmap_reg_range(CLK_SYS_SEL, CLK_SYS_SEL),
+ regmap_reg_range(CLK_SLOW_SYS_CTRL, CLK_SLOW_SYS_DIV_INT),
+ regmap_reg_range(CLK_SLOW_SYS_SEL, CLK_SLOW_SYS_SEL),
+ regmap_reg_range(CLK_DMA_CTRL, CLK_DMA_DIV_INT),
+ regmap_reg_range(CLK_DMA_SEL, CLK_DMA_SEL),
+ regmap_reg_range(CLK_UART_CTRL, CLK_UART_DIV_INT),
+ regmap_reg_range(CLK_UART_SEL, CLK_UART_SEL),
+ regmap_reg_range(CLK_ETH_CTRL, CLK_ETH_DIV_INT),
+ regmap_reg_range(CLK_ETH_SEL, CLK_ETH_SEL),
+ regmap_reg_range(CLK_PWM0_CTRL, CLK_PWM0_SEL),
+ regmap_reg_range(CLK_PWM1_CTRL, CLK_PWM1_SEL),
+ regmap_reg_range(CLK_AUDIO_IN_CTRL, CLK_AUDIO_IN_DIV_INT),
+ regmap_reg_range(CLK_AUDIO_IN_SEL, CLK_AUDIO_IN_SEL),
+ regmap_reg_range(CLK_AUDIO_OUT_CTRL, CLK_AUDIO_OUT_DIV_INT),
+ regmap_reg_range(CLK_AUDIO_OUT_SEL, CLK_AUDIO_OUT_SEL),
+ regmap_reg_range(CLK_I2S_CTRL, CLK_I2S_DIV_INT),
+ regmap_reg_range(CLK_I2S_SEL, CLK_I2S_SEL),
+ regmap_reg_range(CLK_MIPI0_CFG_CTRL, CLK_MIPI0_CFG_DIV_INT),
+ regmap_reg_range(CLK_MIPI0_CFG_SEL, CLK_MIPI0_CFG_SEL),
+ regmap_reg_range(CLK_MIPI1_CFG_CTRL, CLK_MIPI1_CFG_DIV_INT),
+ regmap_reg_range(CLK_MIPI1_CFG_SEL, CLK_MIPI1_CFG_SEL),
+ regmap_reg_range(CLK_PCIE_AUX_CTRL, CLK_PCIE_AUX_DIV_INT),
+ regmap_reg_range(CLK_PCIE_AUX_SEL, CLK_PCIE_AUX_SEL),
+ regmap_reg_range(CLK_USBH0_MICROFRAME_CTRL, CLK_USBH0_MICROFRAME_DIV_INT),
+ regmap_reg_range(CLK_USBH0_MICROFRAME_SEL, CLK_USBH0_MICROFRAME_SEL),
+ regmap_reg_range(CLK_USBH1_MICROFRAME_CTRL, CLK_USBH1_MICROFRAME_DIV_INT),
+ regmap_reg_range(CLK_USBH1_MICROFRAME_SEL, CLK_USBH1_MICROFRAME_SEL),
+ regmap_reg_range(CLK_USBH0_SUSPEND_CTRL, CLK_USBH0_SUSPEND_DIV_INT),
+ regmap_reg_range(CLK_USBH0_SUSPEND_SEL, CLK_USBH0_SUSPEND_SEL),
+ regmap_reg_range(CLK_USBH1_SUSPEND_CTRL, CLK_USBH1_SUSPEND_DIV_INT),
+ regmap_reg_range(CLK_USBH1_SUSPEND_SEL, CLK_USBH1_SUSPEND_SEL),
+ regmap_reg_range(CLK_ETH_TSU_CTRL, CLK_ETH_TSU_DIV_INT),
+ regmap_reg_range(CLK_ETH_TSU_SEL, CLK_ETH_TSU_SEL),
+ regmap_reg_range(CLK_ADC_CTRL, CLK_ADC_DIV_INT),
+ regmap_reg_range(CLK_ADC_SEL, CLK_ADC_SEL),
+ regmap_reg_range(CLK_SDIO_TIMER_CTRL, CLK_SDIO_TIMER_DIV_INT),
+ regmap_reg_range(CLK_SDIO_TIMER_SEL, CLK_SDIO_TIMER_SEL),
+ regmap_reg_range(CLK_SDIO_ALT_SRC_CTRL, CLK_SDIO_ALT_SRC_DIV_INT),
+ regmap_reg_range(CLK_SDIO_ALT_SRC_SEL, CLK_SDIO_ALT_SRC_SEL),
+ regmap_reg_range(CLK_GP0_CTRL, CLK_GP0_SEL),
+ regmap_reg_range(CLK_GP1_CTRL, CLK_GP1_SEL),
+ regmap_reg_range(CLK_GP2_CTRL, CLK_GP2_SEL),
+ regmap_reg_range(CLK_GP3_CTRL, CLK_GP3_SEL),
+ regmap_reg_range(CLK_GP4_CTRL, CLK_GP4_SEL),
+ regmap_reg_range(CLK_GP5_CTRL, CLK_GP5_SEL),
+ regmap_reg_range(CLK_SYS_RESUS_CTRL, CLK_SYS_RESUS_CTRL),
+ regmap_reg_range(CLK_SLOW_SYS_RESUS_CTRL, CLK_SLOW_SYS_RESUS_CTRL),
+ regmap_reg_range(FC0_REF_KHZ, FC0_RESULT),
+ regmap_reg_range(VIDEO_CLK_VEC_CTRL, VIDEO_CLK_VEC_DIV_INT),
+ regmap_reg_range(VIDEO_CLK_VEC_SEL, VIDEO_CLK_DPI_DIV_INT),
+ regmap_reg_range(VIDEO_CLK_DPI_SEL, VIDEO_CLK_MIPI1_DPI_SEL),
+};
+
+static const struct regmap_access_table rp1_reg_table = {
+ .yes_ranges = rp1_reg_ranges,
+ .n_yes_ranges = ARRAY_SIZE(rp1_reg_ranges),
+};
+
+static const struct regmap_config rp1_clk_regmap_cfg = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = PLL_VIDEO_SEC,
+ .name = "rp1-clk",
+ .rd_table = &rp1_reg_table,
+ .disable_locking = true,
+};
+
+static int rp1_clk_probe(struct platform_device *pdev)
+{
+ const size_t asize = ARRAY_SIZE(clk_desc_array);
+ struct rp1_clk_desc *desc;
+ struct device *dev = &pdev->dev;
+ struct rp1_clockman *clockman;
+ struct clk_hw **hws;
+ unsigned int i;
+
+ clockman = devm_kzalloc(dev, struct_size(clockman, onecell.hws, asize),
+ GFP_KERNEL);
+ if (!clockman)
+ return -ENOMEM;
+
+ spin_lock_init(&clockman->regs_lock);
+ clockman->dev = dev;
+
+ clockman->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(clockman->regs))
+ return PTR_ERR(clockman->regs);
+
+ clockman->regmap = devm_regmap_init_mmio(dev, clockman->regs,
+ &rp1_clk_regmap_cfg);
+ if (IS_ERR(clockman->regmap)) {
+ dev_err_probe(dev, PTR_ERR(clockman->regmap),
+ "could not init clock regmap\n");
+ return PTR_ERR(clockman->regmap);
+ }
+
+ clockman->onecell.num = asize;
+ hws = clockman->onecell.hws;
+
+ for (i = 0; i < asize; i++) {
+ desc = clk_desc_array[i];
+ if (desc && desc->clk_register && desc->data)
+ hws[i] = desc->clk_register(clockman, desc);
+ }
+
+ clk_audio_core = &pll_audio_core_desc;
+ clk_audio = &pll_audio_desc;
+ clk_i2s = &clk_i2s_desc;
+ clk_xosc = clk_hw_get_parent_by_index(&clk_i2s->hw, 0);
+
+ platform_set_drvdata(pdev, clockman);
+
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
+ &clockman->onecell);
+}
+
+static const struct of_device_id rp1_clk_of_match[] = {
+ { .compatible = "raspberrypi,rp1-clocks" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, rp1_clk_of_match);
+
+static struct platform_driver rp1_clk_driver = {
+ .driver = {
+ .name = "rp1-clk",
+ .of_match_table = rp1_clk_of_match,
+ },
+ .probe = rp1_clk_probe,
+};
+
+module_platform_driver(rp1_clk_driver);
+
+MODULE_AUTHOR("Naushir Patuck <naush@raspberrypi.com>");
+MODULE_AUTHOR("Andrea della Porta <andrea.porta@suse.com>");
+MODULE_DESCRIPTION("RP1 clock driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/clk-rpmi.c b/drivers/clk/clk-rpmi.c
new file mode 100644
index 000000000000..921296aafa68
--- /dev/null
+++ b/drivers/clk/clk-rpmi.c
@@ -0,0 +1,620 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * RISC-V MPXY Based Clock Driver
+ *
+ * Copyright (C) 2025 Ventana Micro Systems Ltd.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/mailbox_client.h>
+#include <linux/mailbox/riscv-rpmi-message.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/wordpart.h>
+
+#define RPMI_CLK_DISCRETE_MAX_NUM_RATES 16
+#define RPMI_CLK_NAME_LEN 16
+
+#define to_rpmi_clk(clk) container_of(clk, struct rpmi_clk, hw)
+
+enum rpmi_clk_config {
+ RPMI_CLK_DISABLE = 0,
+ RPMI_CLK_ENABLE = 1,
+ RPMI_CLK_CONFIG_MAX_IDX
+};
+
+#define RPMI_CLK_TYPE_MASK GENMASK(1, 0)
+enum rpmi_clk_type {
+ RPMI_CLK_DISCRETE = 0,
+ RPMI_CLK_LINEAR = 1,
+ RPMI_CLK_TYPE_MAX_IDX
+};
+
+struct rpmi_clk_context {
+ struct device *dev;
+ struct mbox_chan *chan;
+ struct mbox_client client;
+ u32 max_msg_data_size;
+};
+
+/*
+ * rpmi_clk_rates represents the rates format
+ * as specified by the RPMI specification.
+ * No other data format (e.g., struct linear_range)
+ * is required to avoid to and from conversion.
+ */
+union rpmi_clk_rates {
+ u64 discrete[RPMI_CLK_DISCRETE_MAX_NUM_RATES];
+ struct {
+ u64 min;
+ u64 max;
+ u64 step;
+ } linear;
+};
+
+struct rpmi_clk {
+ struct rpmi_clk_context *context;
+ u32 id;
+ u32 num_rates;
+ u32 transition_latency;
+ enum rpmi_clk_type type;
+ union rpmi_clk_rates *rates;
+ char name[RPMI_CLK_NAME_LEN];
+ struct clk_hw hw;
+};
+
+struct rpmi_clk_rate_discrete {
+ __le32 lo;
+ __le32 hi;
+};
+
+struct rpmi_clk_rate_linear {
+ __le32 min_lo;
+ __le32 min_hi;
+ __le32 max_lo;
+ __le32 max_hi;
+ __le32 step_lo;
+ __le32 step_hi;
+};
+
+struct rpmi_get_num_clocks_rx {
+ __le32 status;
+ __le32 num_clocks;
+};
+
+struct rpmi_get_attrs_tx {
+ __le32 clkid;
+};
+
+struct rpmi_get_attrs_rx {
+ __le32 status;
+ __le32 flags;
+ __le32 num_rates;
+ __le32 transition_latency;
+ char name[RPMI_CLK_NAME_LEN];
+};
+
+struct rpmi_get_supp_rates_tx {
+ __le32 clkid;
+ __le32 clk_rate_idx;
+};
+
+struct rpmi_get_supp_rates_rx {
+ __le32 status;
+ __le32 flags;
+ __le32 remaining;
+ __le32 returned;
+ __le32 rates[];
+};
+
+struct rpmi_get_rate_tx {
+ __le32 clkid;
+};
+
+struct rpmi_get_rate_rx {
+ __le32 status;
+ __le32 lo;
+ __le32 hi;
+};
+
+struct rpmi_set_rate_tx {
+ __le32 clkid;
+ __le32 flags;
+ __le32 lo;
+ __le32 hi;
+};
+
+struct rpmi_set_rate_rx {
+ __le32 status;
+};
+
+struct rpmi_set_config_tx {
+ __le32 clkid;
+ __le32 config;
+};
+
+struct rpmi_set_config_rx {
+ __le32 status;
+};
+
+static inline u64 rpmi_clkrate_u64(u32 __hi, u32 __lo)
+{
+ return (((u64)(__hi) << 32) | (u32)(__lo));
+}
+
+static u32 rpmi_clk_get_num_clocks(struct rpmi_clk_context *context)
+{
+ struct rpmi_get_num_clocks_rx rx, *resp;
+ struct rpmi_mbox_message msg;
+ int ret;
+
+ rpmi_mbox_init_send_with_response(&msg, RPMI_CLK_SRV_GET_NUM_CLOCKS,
+ NULL, 0, &rx, sizeof(rx));
+
+ ret = rpmi_mbox_send_message(context->chan, &msg);
+ if (ret)
+ return 0;
+
+ resp = rpmi_mbox_get_msg_response(&msg);
+ if (!resp || resp->status)
+ return 0;
+
+ return le32_to_cpu(resp->num_clocks);
+}
+
+static int rpmi_clk_get_attrs(u32 clkid, struct rpmi_clk *rpmi_clk)
+{
+ struct rpmi_clk_context *context = rpmi_clk->context;
+ struct rpmi_mbox_message msg;
+ struct rpmi_get_attrs_tx tx;
+ struct rpmi_get_attrs_rx rx, *resp;
+ u8 format;
+ int ret;
+
+ tx.clkid = cpu_to_le32(clkid);
+ rpmi_mbox_init_send_with_response(&msg, RPMI_CLK_SRV_GET_ATTRIBUTES,
+ &tx, sizeof(tx), &rx, sizeof(rx));
+
+ ret = rpmi_mbox_send_message(context->chan, &msg);
+ if (ret)
+ return ret;
+
+ resp = rpmi_mbox_get_msg_response(&msg);
+ if (!resp)
+ return -EINVAL;
+ if (resp->status)
+ return rpmi_to_linux_error(le32_to_cpu(resp->status));
+
+ rpmi_clk->id = clkid;
+ rpmi_clk->num_rates = le32_to_cpu(resp->num_rates);
+ rpmi_clk->transition_latency = le32_to_cpu(resp->transition_latency);
+ strscpy(rpmi_clk->name, resp->name, RPMI_CLK_NAME_LEN);
+
+ format = le32_to_cpu(resp->flags) & RPMI_CLK_TYPE_MASK;
+ if (format >= RPMI_CLK_TYPE_MAX_IDX)
+ return -EINVAL;
+
+ rpmi_clk->type = format;
+
+ return 0;
+}
+
+static int rpmi_clk_get_supported_rates(u32 clkid, struct rpmi_clk *rpmi_clk)
+{
+ struct rpmi_clk_context *context = rpmi_clk->context;
+ struct rpmi_clk_rate_discrete *rate_discrete;
+ struct rpmi_clk_rate_linear *rate_linear;
+ struct rpmi_get_supp_rates_tx tx;
+ struct rpmi_get_supp_rates_rx *resp;
+ struct rpmi_mbox_message msg;
+ size_t clk_rate_idx;
+ int ret, rateidx, j;
+
+ tx.clkid = cpu_to_le32(clkid);
+ tx.clk_rate_idx = 0;
+
+ /*
+ * Make sure we allocate rx buffer sufficient to be accommodate all
+ * the rates sent in one RPMI message.
+ */
+ struct rpmi_get_supp_rates_rx *rx __free(kfree) =
+ kzalloc(context->max_msg_data_size, GFP_KERNEL);
+ if (!rx)
+ return -ENOMEM;
+
+ rpmi_mbox_init_send_with_response(&msg, RPMI_CLK_SRV_GET_SUPPORTED_RATES,
+ &tx, sizeof(tx), rx, context->max_msg_data_size);
+
+ ret = rpmi_mbox_send_message(context->chan, &msg);
+ if (ret)
+ return ret;
+
+ resp = rpmi_mbox_get_msg_response(&msg);
+ if (!resp)
+ return -EINVAL;
+ if (resp->status)
+ return rpmi_to_linux_error(le32_to_cpu(resp->status));
+ if (!le32_to_cpu(resp->returned))
+ return -EINVAL;
+
+ if (rpmi_clk->type == RPMI_CLK_DISCRETE) {
+ rate_discrete = (struct rpmi_clk_rate_discrete *)resp->rates;
+
+ for (rateidx = 0; rateidx < le32_to_cpu(resp->returned); rateidx++) {
+ rpmi_clk->rates->discrete[rateidx] =
+ rpmi_clkrate_u64(le32_to_cpu(rate_discrete[rateidx].hi),
+ le32_to_cpu(rate_discrete[rateidx].lo));
+ }
+
+ /*
+ * Keep sending the request message until all
+ * the rates are received.
+ */
+ clk_rate_idx = 0;
+ while (le32_to_cpu(resp->remaining)) {
+ clk_rate_idx += le32_to_cpu(resp->returned);
+ tx.clk_rate_idx = cpu_to_le32(clk_rate_idx);
+
+ rpmi_mbox_init_send_with_response(&msg,
+ RPMI_CLK_SRV_GET_SUPPORTED_RATES,
+ &tx, sizeof(tx),
+ rx, context->max_msg_data_size);
+
+ ret = rpmi_mbox_send_message(context->chan, &msg);
+ if (ret)
+ return ret;
+
+ resp = rpmi_mbox_get_msg_response(&msg);
+ if (!resp)
+ return -EINVAL;
+ if (resp->status)
+ return rpmi_to_linux_error(le32_to_cpu(resp->status));
+ if (!le32_to_cpu(resp->returned))
+ return -EINVAL;
+
+ for (j = 0; j < le32_to_cpu(resp->returned); j++) {
+ if (rateidx >= clk_rate_idx + le32_to_cpu(resp->returned))
+ break;
+ rpmi_clk->rates->discrete[rateidx++] =
+ rpmi_clkrate_u64(le32_to_cpu(rate_discrete[j].hi),
+ le32_to_cpu(rate_discrete[j].lo));
+ }
+ }
+ } else if (rpmi_clk->type == RPMI_CLK_LINEAR) {
+ rate_linear = (struct rpmi_clk_rate_linear *)resp->rates;
+
+ rpmi_clk->rates->linear.min = rpmi_clkrate_u64(le32_to_cpu(rate_linear->min_hi),
+ le32_to_cpu(rate_linear->min_lo));
+ rpmi_clk->rates->linear.max = rpmi_clkrate_u64(le32_to_cpu(rate_linear->max_hi),
+ le32_to_cpu(rate_linear->max_lo));
+ rpmi_clk->rates->linear.step = rpmi_clkrate_u64(le32_to_cpu(rate_linear->step_hi),
+ le32_to_cpu(rate_linear->step_lo));
+ }
+
+ return 0;
+}
+
+static unsigned long rpmi_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct rpmi_clk *rpmi_clk = to_rpmi_clk(hw);
+ struct rpmi_clk_context *context = rpmi_clk->context;
+ struct rpmi_mbox_message msg;
+ struct rpmi_get_rate_tx tx;
+ struct rpmi_get_rate_rx rx, *resp;
+ int ret;
+
+ tx.clkid = cpu_to_le32(rpmi_clk->id);
+
+ rpmi_mbox_init_send_with_response(&msg, RPMI_CLK_SRV_GET_RATE,
+ &tx, sizeof(tx), &rx, sizeof(rx));
+
+ ret = rpmi_mbox_send_message(context->chan, &msg);
+ if (ret)
+ return ret;
+
+ resp = rpmi_mbox_get_msg_response(&msg);
+ if (!resp)
+ return -EINVAL;
+ if (resp->status)
+ return rpmi_to_linux_error(le32_to_cpu(resp->status));
+
+ return rpmi_clkrate_u64(le32_to_cpu(resp->hi), le32_to_cpu(resp->lo));
+}
+
+static int rpmi_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct rpmi_clk *rpmi_clk = to_rpmi_clk(hw);
+ u64 fmin, fmax, ftmp;
+
+ /*
+ * Keep the requested rate if the clock format
+ * is of discrete type. Let the platform which
+ * is actually controlling the clock handle that.
+ */
+ if (rpmi_clk->type == RPMI_CLK_DISCRETE)
+ return 0;
+
+ fmin = rpmi_clk->rates->linear.min;
+ fmax = rpmi_clk->rates->linear.max;
+
+ if (req->rate <= fmin) {
+ req->rate = fmin;
+ return 0;
+ } else if (req->rate >= fmax) {
+ req->rate = fmax;
+ return 0;
+ }
+
+ ftmp = req->rate - fmin;
+ ftmp += rpmi_clk->rates->linear.step - 1;
+ do_div(ftmp, rpmi_clk->rates->linear.step);
+
+ req->rate = ftmp * rpmi_clk->rates->linear.step + fmin;
+
+ return 0;
+}
+
+static int rpmi_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct rpmi_clk *rpmi_clk = to_rpmi_clk(hw);
+ struct rpmi_clk_context *context = rpmi_clk->context;
+ struct rpmi_mbox_message msg;
+ struct rpmi_set_rate_tx tx;
+ struct rpmi_set_rate_rx rx, *resp;
+ int ret;
+
+ tx.clkid = cpu_to_le32(rpmi_clk->id);
+ tx.lo = cpu_to_le32(lower_32_bits(rate));
+ tx.hi = cpu_to_le32(upper_32_bits(rate));
+
+ rpmi_mbox_init_send_with_response(&msg, RPMI_CLK_SRV_SET_RATE,
+ &tx, sizeof(tx), &rx, sizeof(rx));
+
+ ret = rpmi_mbox_send_message(context->chan, &msg);
+ if (ret)
+ return ret;
+
+ resp = rpmi_mbox_get_msg_response(&msg);
+ if (!resp)
+ return -EINVAL;
+ if (resp->status)
+ return rpmi_to_linux_error(le32_to_cpu(resp->status));
+
+ return 0;
+}
+
+static int rpmi_clk_enable(struct clk_hw *hw)
+{
+ struct rpmi_clk *rpmi_clk = to_rpmi_clk(hw);
+ struct rpmi_clk_context *context = rpmi_clk->context;
+ struct rpmi_mbox_message msg;
+ struct rpmi_set_config_tx tx;
+ struct rpmi_set_config_rx rx, *resp;
+ int ret;
+
+ tx.config = cpu_to_le32(RPMI_CLK_ENABLE);
+ tx.clkid = cpu_to_le32(rpmi_clk->id);
+
+ rpmi_mbox_init_send_with_response(&msg, RPMI_CLK_SRV_SET_CONFIG,
+ &tx, sizeof(tx), &rx, sizeof(rx));
+
+ ret = rpmi_mbox_send_message(context->chan, &msg);
+ if (ret)
+ return ret;
+
+ resp = rpmi_mbox_get_msg_response(&msg);
+ if (!resp)
+ return -EINVAL;
+ if (resp->status)
+ return rpmi_to_linux_error(le32_to_cpu(resp->status));
+
+ return 0;
+}
+
+static void rpmi_clk_disable(struct clk_hw *hw)
+{
+ struct rpmi_clk *rpmi_clk = to_rpmi_clk(hw);
+ struct rpmi_clk_context *context = rpmi_clk->context;
+ struct rpmi_mbox_message msg;
+ struct rpmi_set_config_tx tx;
+ struct rpmi_set_config_rx rx;
+
+ tx.config = cpu_to_le32(RPMI_CLK_DISABLE);
+ tx.clkid = cpu_to_le32(rpmi_clk->id);
+
+ rpmi_mbox_init_send_with_response(&msg, RPMI_CLK_SRV_SET_CONFIG,
+ &tx, sizeof(tx), &rx, sizeof(rx));
+
+ rpmi_mbox_send_message(context->chan, &msg);
+}
+
+static const struct clk_ops rpmi_clk_ops = {
+ .recalc_rate = rpmi_clk_recalc_rate,
+ .determine_rate = rpmi_clk_determine_rate,
+ .set_rate = rpmi_clk_set_rate,
+ .prepare = rpmi_clk_enable,
+ .unprepare = rpmi_clk_disable,
+};
+
+static struct clk_hw *rpmi_clk_enumerate(struct rpmi_clk_context *context, u32 clkid)
+{
+ struct device *dev = context->dev;
+ unsigned long min_rate, max_rate;
+ union rpmi_clk_rates *rates;
+ struct rpmi_clk *rpmi_clk;
+ struct clk_init_data init = {};
+ struct clk_hw *clk_hw;
+ int ret;
+
+ rates = devm_kzalloc(dev, sizeof(*rates), GFP_KERNEL);
+ if (!rates)
+ return ERR_PTR(-ENOMEM);
+
+ rpmi_clk = devm_kzalloc(dev, sizeof(*rpmi_clk), GFP_KERNEL);
+ if (!rpmi_clk)
+ return ERR_PTR(-ENOMEM);
+
+ rpmi_clk->context = context;
+ rpmi_clk->rates = rates;
+
+ ret = rpmi_clk_get_attrs(clkid, rpmi_clk);
+ if (ret)
+ return dev_err_ptr_probe(dev, ret,
+ "Failed to get clk-%u attributes\n",
+ clkid);
+
+ ret = rpmi_clk_get_supported_rates(clkid, rpmi_clk);
+ if (ret)
+ return dev_err_ptr_probe(dev, ret,
+ "Get supported rates failed for clk-%u\n",
+ clkid);
+
+ init.flags = CLK_GET_RATE_NOCACHE;
+ init.num_parents = 0;
+ init.ops = &rpmi_clk_ops;
+ init.name = rpmi_clk->name;
+ clk_hw = &rpmi_clk->hw;
+ clk_hw->init = &init;
+
+ ret = devm_clk_hw_register(dev, clk_hw);
+ if (ret)
+ return dev_err_ptr_probe(dev, ret,
+ "Unable to register clk-%u\n",
+ clkid);
+
+ if (rpmi_clk->type == RPMI_CLK_DISCRETE) {
+ min_rate = rpmi_clk->rates->discrete[0];
+ max_rate = rpmi_clk->rates->discrete[rpmi_clk->num_rates - 1];
+ } else {
+ min_rate = rpmi_clk->rates->linear.min;
+ max_rate = rpmi_clk->rates->linear.max;
+ }
+
+ clk_hw_set_rate_range(clk_hw, min_rate, max_rate);
+
+ return clk_hw;
+}
+
+static void rpmi_clk_mbox_chan_release(void *data)
+{
+ struct mbox_chan *chan = data;
+
+ mbox_free_channel(chan);
+}
+
+static int rpmi_clk_probe(struct platform_device *pdev)
+{
+ int ret;
+ unsigned int num_clocks, i;
+ struct clk_hw_onecell_data *clk_data;
+ struct rpmi_clk_context *context;
+ struct rpmi_mbox_message msg;
+ struct clk_hw *hw_ptr;
+ struct device *dev = &pdev->dev;
+
+ context = devm_kzalloc(dev, sizeof(*context), GFP_KERNEL);
+ if (!context)
+ return -ENOMEM;
+ context->dev = dev;
+ platform_set_drvdata(pdev, context);
+
+ context->client.dev = context->dev;
+ context->client.rx_callback = NULL;
+ context->client.tx_block = false;
+ context->client.knows_txdone = true;
+ context->client.tx_tout = 0;
+
+ context->chan = mbox_request_channel(&context->client, 0);
+ if (IS_ERR(context->chan))
+ return PTR_ERR(context->chan);
+
+ ret = devm_add_action_or_reset(dev, rpmi_clk_mbox_chan_release, context->chan);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add rpmi mbox channel cleanup\n");
+
+ rpmi_mbox_init_get_attribute(&msg, RPMI_MBOX_ATTR_SPEC_VERSION);
+ ret = rpmi_mbox_send_message(context->chan, &msg);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get spec version\n");
+ if (msg.attr.value < RPMI_MKVER(1, 0)) {
+ return dev_err_probe(dev, -EINVAL,
+ "msg protocol version mismatch, expected 0x%x, found 0x%x\n",
+ RPMI_MKVER(1, 0), msg.attr.value);
+ }
+
+ rpmi_mbox_init_get_attribute(&msg, RPMI_MBOX_ATTR_SERVICEGROUP_ID);
+ ret = rpmi_mbox_send_message(context->chan, &msg);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get service group ID\n");
+ if (msg.attr.value != RPMI_SRVGRP_CLOCK) {
+ return dev_err_probe(dev, -EINVAL,
+ "service group match failed, expected 0x%x, found 0x%x\n",
+ RPMI_SRVGRP_CLOCK, msg.attr.value);
+ }
+
+ rpmi_mbox_init_get_attribute(&msg, RPMI_MBOX_ATTR_SERVICEGROUP_VERSION);
+ ret = rpmi_mbox_send_message(context->chan, &msg);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get service group version\n");
+ if (msg.attr.value < RPMI_MKVER(1, 0)) {
+ return dev_err_probe(dev, -EINVAL,
+ "service group version failed, expected 0x%x, found 0x%x\n",
+ RPMI_MKVER(1, 0), msg.attr.value);
+ }
+
+ rpmi_mbox_init_get_attribute(&msg, RPMI_MBOX_ATTR_MAX_MSG_DATA_SIZE);
+ ret = rpmi_mbox_send_message(context->chan, &msg);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get max message data size\n");
+
+ context->max_msg_data_size = msg.attr.value;
+ num_clocks = rpmi_clk_get_num_clocks(context);
+ if (!num_clocks)
+ return dev_err_probe(dev, -ENODEV, "No clocks found\n");
+
+ clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, num_clocks),
+ GFP_KERNEL);
+ if (!clk_data)
+ return dev_err_probe(dev, -ENOMEM, "No memory for clock data\n");
+ clk_data->num = num_clocks;
+
+ for (i = 0; i < clk_data->num; i++) {
+ hw_ptr = rpmi_clk_enumerate(context, i);
+ if (IS_ERR(hw_ptr)) {
+ return dev_err_probe(dev, PTR_ERR(hw_ptr),
+ "Failed to register clk-%d\n", i);
+ }
+ clk_data->hws[i] = hw_ptr;
+ }
+
+ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, clk_data);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to register clock HW provider\n");
+
+ return 0;
+}
+
+static const struct of_device_id rpmi_clk_of_match[] = {
+ { .compatible = "riscv,rpmi-clock" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, rpmi_clk_of_match);
+
+static struct platform_driver rpmi_clk_driver = {
+ .driver = {
+ .name = "riscv-rpmi-clock",
+ .of_match_table = rpmi_clk_of_match,
+ },
+ .probe = rpmi_clk_probe,
+};
+module_platform_driver(rpmi_clk_driver);
+
+MODULE_AUTHOR("Rahul Pathak <rpathak@ventanamicro.com>");
+MODULE_DESCRIPTION("Clock Driver based on RPMI message protocol");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c
index 8ddf3a9a53df..ff7ce12a5da6 100644
--- a/drivers/clk/clk-s2mps11.c
+++ b/drivers/clk/clk-s2mps11.c
@@ -11,6 +11,7 @@
#include <linux/regmap.h>
#include <linux/clk-provider.h>
#include <linux/platform_device.h>
+#include <linux/mfd/samsung/s2mpg10.h>
#include <linux/mfd/samsung/s2mps11.h>
#include <linux/mfd/samsung/s2mps13.h>
#include <linux/mfd/samsung/s2mps14.h>
@@ -140,6 +141,9 @@ static int s2mps11_clk_probe(struct platform_device *pdev)
clk_data->num = S2MPS11_CLKS_NUM;
switch (hwid) {
+ case S2MPG10:
+ s2mps11_reg = S2MPG10_PMIC_RTCBUF;
+ break;
case S2MPS11X:
s2mps11_reg = S2MPS11_REG_RTC_CTRL;
break;
@@ -221,6 +225,7 @@ static void s2mps11_clk_remove(struct platform_device *pdev)
}
static const struct platform_device_id s2mps11_clk_id[] = {
+ { "s2mpg10-clk", S2MPG10},
{ "s2mps11-clk", S2MPS11X},
{ "s2mps13-clk", S2MPS13X},
{ "s2mps14-clk", S2MPS14X},
@@ -235,12 +240,15 @@ MODULE_DEVICE_TABLE(platform, s2mps11_clk_id);
* through platform_device_id.
*
* However if device's DT node contains proper clock compatible and driver is
- * built as a module, then the *module* matching will be done trough DT aliases.
+ * built as a module, then the *module* matching will be done through DT aliases.
* This requires of_device_id table. In the same time this will not change the
* actual *device* matching so do not add .of_match_table.
*/
static const struct of_device_id s2mps11_dt_match[] __used = {
{
+ .compatible = "samsung,s2mpg10-clk",
+ .data = (void *)S2MPG10,
+ }, {
.compatible = "samsung,s2mps11-clk",
.data = (void *)S2MPS11X,
}, {
diff --git a/drivers/clk/clk-scmi.c b/drivers/clk/clk-scmi.c
index 15510c2ff21c..6b286ea6f121 100644
--- a/drivers/clk/clk-scmi.c
+++ b/drivers/clk/clk-scmi.c
@@ -54,8 +54,8 @@ static unsigned long scmi_clk_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long scmi_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int scmi_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
u64 fmin, fmax, ftmp;
struct scmi_clk *clk = to_scmi_clk(hw);
@@ -67,20 +67,27 @@ static long scmi_clk_round_rate(struct clk_hw *hw, unsigned long rate,
* running at then.
*/
if (clk->info->rate_discrete)
- return rate;
+ return 0;
fmin = clk->info->range.min_rate;
fmax = clk->info->range.max_rate;
- if (rate <= fmin)
- return fmin;
- else if (rate >= fmax)
- return fmax;
+ if (req->rate <= fmin) {
+ req->rate = fmin;
+
+ return 0;
+ } else if (req->rate >= fmax) {
+ req->rate = fmax;
- ftmp = rate - fmin;
+ return 0;
+ }
+
+ ftmp = req->rate - fmin;
ftmp += clk->info->range.step_size - 1; /* to round up */
do_div(ftmp, clk->info->range.step_size);
- return ftmp * clk->info->range.step_size + fmin;
+ req->rate = ftmp * clk->info->range.step_size + fmin;
+
+ return 0;
}
static int scmi_clk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -119,15 +126,6 @@ static u8 scmi_clk_get_parent(struct clk_hw *hw)
return p_idx;
}
-static int scmi_clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
-{
- /*
- * Suppose all the requested rates are supported, and let firmware
- * to handle the left work.
- */
- return 0;
-}
-
static int scmi_clk_enable(struct clk_hw *hw)
{
struct scmi_clk *clk = to_scmi_clk(hw);
@@ -300,7 +298,6 @@ scmi_clk_ops_alloc(struct device *dev, unsigned long feats_key)
/* Rate ops */
ops->recalc_rate = scmi_clk_recalc_rate;
- ops->round_rate = scmi_clk_round_rate;
ops->determine_rate = scmi_clk_determine_rate;
if (feats_key & BIT(SCMI_CLK_RATE_CTRL_SUPPORTED))
ops->set_rate = scmi_clk_set_rate;
@@ -349,6 +346,8 @@ scmi_clk_ops_select(struct scmi_clk *sclk, bool atomic_capable,
unsigned int atomic_threshold_us,
const struct clk_ops **clk_ops_db, size_t db_size)
{
+ int ret;
+ u32 val;
const struct scmi_clock_info *ci = sclk->info;
unsigned int feats_key = 0;
const struct clk_ops *ops;
@@ -370,8 +369,13 @@ scmi_clk_ops_select(struct scmi_clk *sclk, bool atomic_capable,
if (!ci->parent_ctrl_forbidden)
feats_key |= BIT(SCMI_CLK_PARENT_CTRL_SUPPORTED);
- if (ci->extended_config)
- feats_key |= BIT(SCMI_CLK_DUTY_CYCLE_SUPPORTED);
+ if (ci->extended_config) {
+ ret = scmi_proto_clk_ops->config_oem_get(sclk->ph, sclk->id,
+ SCMI_CLOCK_CFG_DUTY_CYCLE,
+ &val, NULL, false);
+ if (!ret)
+ feats_key |= BIT(SCMI_CLK_DUTY_CYCLE_SUPPORTED);
+ }
if (WARN_ON(feats_key >= db_size))
return NULL;
@@ -404,6 +408,7 @@ static int scmi_clocks_probe(struct scmi_device *sdev)
const struct scmi_handle *handle = sdev->handle;
struct scmi_protocol_handle *ph;
const struct clk_ops *scmi_clk_ops_db[SCMI_MAX_CLK_OPS] = {};
+ struct scmi_clk *sclks;
if (!handle)
return -ENODEV;
@@ -430,18 +435,21 @@ static int scmi_clocks_probe(struct scmi_device *sdev)
transport_is_atomic = handle->is_transport_atomic(handle,
&atomic_threshold_us);
+ sclks = devm_kcalloc(dev, count, sizeof(*sclks), GFP_KERNEL);
+ if (!sclks)
+ return -ENOMEM;
+
+ for (idx = 0; idx < count; idx++)
+ hws[idx] = &sclks[idx].hw;
+
for (idx = 0; idx < count; idx++) {
- struct scmi_clk *sclk;
+ struct scmi_clk *sclk = &sclks[idx];
const struct clk_ops *scmi_ops;
- sclk = devm_kzalloc(dev, sizeof(*sclk), GFP_KERNEL);
- if (!sclk)
- return -ENOMEM;
-
sclk->info = scmi_proto_clk_ops->info_get(ph, idx);
if (!sclk->info) {
dev_dbg(dev, "invalid clock info for idx %d\n", idx);
- devm_kfree(dev, sclk);
+ hws[idx] = NULL;
continue;
}
@@ -451,7 +459,7 @@ static int scmi_clocks_probe(struct scmi_device *sdev)
/*
* Note that the scmi_clk_ops_db is on the stack, not global,
- * because it cannot be shared between mulitple probe-sequences
+ * because it cannot be shared between multiple probe-sequences
* to avoid sharing the devm_ allocated clk_ops between multiple
* SCMI clk driver instances.
*/
@@ -479,13 +487,11 @@ static int scmi_clocks_probe(struct scmi_device *sdev)
if (err) {
dev_err(dev, "failed to register clock %d\n", idx);
devm_kfree(dev, sclk->parent_data);
- devm_kfree(dev, sclk);
hws[idx] = NULL;
} else {
dev_dbg(dev, "Registered clock:%s%s\n",
sclk->info->name,
scmi_ops->enable ? " (atomic ops)" : "");
- hws[idx] = &sclk->hw;
}
}
diff --git a/drivers/clk/clk-scpi.c b/drivers/clk/clk-scpi.c
index 19d530d52e64..0b592de7bdb2 100644
--- a/drivers/clk/clk-scpi.c
+++ b/drivers/clk/clk-scpi.c
@@ -32,8 +32,8 @@ static unsigned long scpi_clk_recalc_rate(struct clk_hw *hw,
return clk->scpi_ops->clk_get_val(clk->id);
}
-static long scpi_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int scpi_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
/*
* We can't figure out what rate it will be, so just return the
@@ -41,7 +41,7 @@ static long scpi_clk_round_rate(struct clk_hw *hw, unsigned long rate,
* after the rate is set and we'll know what rate the clock is
* running at then.
*/
- return rate;
+ return 0;
}
static int scpi_clk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -54,7 +54,7 @@ static int scpi_clk_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops scpi_clk_ops = {
.recalc_rate = scpi_clk_recalc_rate,
- .round_rate = scpi_clk_round_rate,
+ .determine_rate = scpi_clk_determine_rate,
.set_rate = scpi_clk_set_rate,
};
@@ -92,12 +92,14 @@ static unsigned long scpi_dvfs_recalc_rate(struct clk_hw *hw,
return opp->freq;
}
-static long scpi_dvfs_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int scpi_dvfs_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct scpi_clk *clk = to_scpi_clk(hw);
- return __scpi_dvfs_round_rate(clk, rate);
+ req->rate = __scpi_dvfs_round_rate(clk, req->rate);
+
+ return 0;
}
static int __scpi_find_dvfs_index(struct scpi_clk *clk, unsigned long rate)
@@ -124,7 +126,7 @@ static int scpi_dvfs_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops scpi_dvfs_ops = {
.recalc_rate = scpi_dvfs_recalc_rate,
- .round_rate = scpi_dvfs_round_rate,
+ .determine_rate = scpi_dvfs_determine_rate,
.set_rate = scpi_dvfs_set_rate,
};
diff --git a/drivers/clk/clk-si514.c b/drivers/clk/clk-si514.c
index 1127c35ce57d..f61590d70575 100644
--- a/drivers/clk/clk-si514.c
+++ b/drivers/clk/clk-si514.c
@@ -227,20 +227,28 @@ static unsigned long si514_recalc_rate(struct clk_hw *hw,
return si514_calc_rate(&settings);
}
-static long si514_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int si514_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_si514_muldiv settings;
int err;
- if (!rate)
+ if (!req->rate) {
+ req->rate = 0;
+
return 0;
+ }
- err = si514_calc_muldiv(&settings, rate);
- if (err)
- return err;
+ err = si514_calc_muldiv(&settings, req->rate);
+ if (err) {
+ req->rate = err;
- return si514_calc_rate(&settings);
+ return 0;
+ }
+
+ req->rate = si514_calc_rate(&settings);
+
+ return 0;
}
/*
@@ -289,7 +297,7 @@ static const struct clk_ops si514_clk_ops = {
.unprepare = si514_unprepare,
.is_prepared = si514_is_prepared,
.recalc_rate = si514_recalc_rate,
- .round_rate = si514_round_rate,
+ .determine_rate = si514_determine_rate,
.set_rate = si514_set_rate,
};
diff --git a/drivers/clk/clk-si521xx.c b/drivers/clk/clk-si521xx.c
index 4f7b74f889f1..4ed4e1a5f4f2 100644
--- a/drivers/clk/clk-si521xx.c
+++ b/drivers/clk/clk-si521xx.c
@@ -164,15 +164,17 @@ static unsigned long si521xx_diff_recalc_rate(struct clk_hw *hw,
return (unsigned long)rate;
}
-static long si521xx_diff_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int si521xx_diff_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
unsigned long best_parent;
- best_parent = (rate / SI521XX_DIFF_MULT) * SI521XX_DIFF_DIV;
- *prate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent);
+ best_parent = (req->rate / SI521XX_DIFF_MULT) * SI521XX_DIFF_DIV;
+ req->best_parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent);
- return (*prate / SI521XX_DIFF_DIV) * SI521XX_DIFF_MULT;
+ req->rate = (req->best_parent_rate / SI521XX_DIFF_DIV) * SI521XX_DIFF_MULT;
+
+ return 0;
}
static int si521xx_diff_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -208,7 +210,7 @@ static void si521xx_diff_unprepare(struct clk_hw *hw)
}
static const struct clk_ops si521xx_diff_clk_ops = {
- .round_rate = si521xx_diff_round_rate,
+ .determine_rate = si521xx_diff_determine_rate,
.set_rate = si521xx_diff_set_rate,
.recalc_rate = si521xx_diff_recalc_rate,
.prepare = si521xx_diff_prepare,
diff --git a/drivers/clk/clk-si5341.c b/drivers/clk/clk-si5341.c
index 5004888c7eca..2499b771cd83 100644
--- a/drivers/clk/clk-si5341.c
+++ b/drivers/clk/clk-si5341.c
@@ -663,8 +663,8 @@ static unsigned long si5341_synth_clk_recalc_rate(struct clk_hw *hw,
return f;
}
-static long si5341_synth_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int si5341_synth_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_si5341_synth *synth = to_clk_si5341_synth(hw);
u64 f;
@@ -672,15 +672,21 @@ static long si5341_synth_clk_round_rate(struct clk_hw *hw, unsigned long rate,
/* The synthesizer accuracy is such that anything in range will work */
f = synth->data->freq_vco;
do_div(f, SI5341_SYNTH_N_MAX);
- if (rate < f)
- return f;
+ if (req->rate < f) {
+ req->rate = f;
+
+ return 0;
+ }
f = synth->data->freq_vco;
do_div(f, SI5341_SYNTH_N_MIN);
- if (rate > f)
- return f;
+ if (req->rate > f) {
+ req->rate = f;
- return rate;
+ return 0;
+ }
+
+ return 0;
}
static int si5341_synth_program(struct clk_si5341_synth *synth,
@@ -741,7 +747,7 @@ static const struct clk_ops si5341_synth_clk_ops = {
.prepare = si5341_synth_clk_prepare,
.unprepare = si5341_synth_clk_unprepare,
.recalc_rate = si5341_synth_clk_recalc_rate,
- .round_rate = si5341_synth_clk_round_rate,
+ .determine_rate = si5341_synth_clk_determine_rate,
.set_rate = si5341_synth_clk_set_rate,
};
diff --git a/drivers/clk/clk-si5351.c b/drivers/clk/clk-si5351.c
index a4c92c5ef3ff..e755db545e2e 100644
--- a/drivers/clk/clk-si5351.c
+++ b/drivers/clk/clk-si5351.c
@@ -655,7 +655,7 @@ static int si5351_msynth_determine_rate(struct clk_hw *hw,
unsigned long a, b, c;
int divby4;
- /* multisync6-7 can only handle freqencies < 150MHz */
+ /* multisync6-7 can only handle frequencies < 150MHz */
if (hwdata->num >= 6 && rate > SI5351_MULTISYNTH67_MAX_FREQ)
rate = SI5351_MULTISYNTH67_MAX_FREQ;
@@ -1048,11 +1048,11 @@ static int si5351_clkout_determine_rate(struct clk_hw *hw,
unsigned long rate = req->rate;
unsigned char rdiv;
- /* clkout6/7 can only handle output freqencies < 150MHz */
+ /* clkout6/7 can only handle output frequencies < 150MHz */
if (hwdata->num >= 6 && rate > SI5351_CLKOUT67_MAX_FREQ)
rate = SI5351_CLKOUT67_MAX_FREQ;
- /* clkout freqency is 8kHz - 160MHz */
+ /* clkout frequency is 8kHz - 160MHz */
if (rate > SI5351_CLKOUT_MAX_FREQ)
rate = SI5351_CLKOUT_MAX_FREQ;
if (rate < SI5351_CLKOUT_MIN_FREQ)
diff --git a/drivers/clk/clk-si544.c b/drivers/clk/clk-si544.c
index c88650558f32..09c06ecec1a5 100644
--- a/drivers/clk/clk-si544.c
+++ b/drivers/clk/clk-si544.c
@@ -39,7 +39,7 @@
/* Max freq depends on speed grade */
#define SI544_MIN_FREQ 200000U
-/* Si544 Internal oscilator runs at 55.05 MHz */
+/* Si544 Internal oscillator runs at 55.05 MHz */
#define FXO 55050000U
/* VCO range is 10.8 .. 12.1 GHz, max depends on speed grade */
@@ -307,16 +307,16 @@ static unsigned long si544_recalc_rate(struct clk_hw *hw,
return si544_calc_rate(&settings);
}
-static long si544_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int si544_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_si544 *data = to_clk_si544(hw);
- if (!is_valid_frequency(data, rate))
+ if (!is_valid_frequency(data, req->rate))
return -EINVAL;
/* The accuracy is less than 1 Hz, so any rate is possible */
- return rate;
+ return 0;
}
/* Calculates the maximum "small" change, 950 * rate / 1000000 */
@@ -408,7 +408,7 @@ static const struct clk_ops si544_clk_ops = {
.unprepare = si544_unprepare,
.is_prepared = si544_is_prepared,
.recalc_rate = si544_recalc_rate,
- .round_rate = si544_round_rate,
+ .determine_rate = si544_determine_rate,
.set_rate = si544_set_rate,
};
diff --git a/drivers/clk/clk-si570.c b/drivers/clk/clk-si570.c
index a549ea13be20..b0b1830dd430 100644
--- a/drivers/clk/clk-si570.c
+++ b/drivers/clk/clk-si570.c
@@ -63,7 +63,7 @@ struct clk_si570_info {
* struct clk_si570:
* @hw: Clock hw struct
* @regmap: Device's regmap
- * @div_offset: Rgister offset for dividers
+ * @div_offset: Register offset for dividers
* @info: Device info
* @fxtal: Factory xtal frequency
* @n1: Clock divider N1
@@ -181,7 +181,7 @@ static int si570_update_rfreq(struct clk_si570 *data)
}
/**
- * si570_calc_divs() - Caluclate clock dividers
+ * si570_calc_divs() - Calculate clock dividers
* @frequency: Target frequency
* @data: Driver data structure
* @out_rfreq: RFREG fractional multiplier (output)
@@ -246,34 +246,40 @@ static unsigned long si570_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long si570_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int si570_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
int err;
u64 rfreq;
unsigned int n1, hs_div;
struct clk_si570 *data = to_clk_si570(hw);
- if (!rate)
+ if (!req->rate) {
+ req->rate = 0;
+
return 0;
+ }
- if (div64_u64(abs(rate - data->frequency) * 10000LL,
+ if (div64_u64(abs(req->rate - data->frequency) * 10000LL,
data->frequency) < 35) {
- rfreq = div64_u64((data->rfreq * rate) +
- div64_u64(data->frequency, 2), data->frequency);
+ rfreq = div64_u64((data->rfreq * req->rate) +
+ div64_u64(data->frequency, 2),
+ data->frequency);
n1 = data->n1;
hs_div = data->hs_div;
} else {
- err = si570_calc_divs(rate, data, &rfreq, &n1, &hs_div);
+ err = si570_calc_divs(req->rate, data, &rfreq, &n1, &hs_div);
if (err) {
dev_err(&data->i2c_client->dev,
"unable to round rate\n");
+ req->rate = 0;
+
return 0;
}
}
- return rate;
+ return 0;
}
/**
@@ -368,7 +374,7 @@ static int si570_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops si570_clk_ops = {
.recalc_rate = si570_recalc_rate,
- .round_rate = si570_round_rate,
+ .determine_rate = si570_determine_rate,
.set_rate = si570_set_rate,
};
diff --git a/drivers/clk/clk-sp7021.c b/drivers/clk/clk-sp7021.c
index 7cb7d501d7a6..36528a71a2e6 100644
--- a/drivers/clk/clk-sp7021.c
+++ b/drivers/clk/clk-sp7021.c
@@ -7,6 +7,7 @@
#include <linux/clk-provider.h>
#include <linux/of.h>
#include <linux/bitfield.h>
+#include <linux/hw_bitfield.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/err.h>
@@ -14,7 +15,7 @@
#include <dt-bindings/clock/sunplus,sp7021-clkc.h>
-/* speical div_width values for PLLTV/PLLA */
+/* special div_width values for PLLTV/PLLA */
#define DIV_TV 33
#define DIV_A 34
@@ -38,13 +39,6 @@ enum {
#define MASK_DIVN GENMASK(7, 0)
#define MASK_DIVM GENMASK(14, 8)
-/* HIWORD_MASK FIELD_PREP */
-#define HWM_FIELD_PREP(mask, value) \
-({ \
- u64 _m = mask; \
- (_m << 16) | FIELD_PREP(_m, value); \
-})
-
struct sp_pll {
struct clk_hw hw;
void __iomem *reg;
@@ -313,15 +307,15 @@ static int plltv_set_rate(struct sp_pll *clk)
u32 r0, r1, r2;
r0 = BIT(clk->bp_bit + 16);
- r0 |= HWM_FIELD_PREP(MASK_SEL_FRA, clk->p[SEL_FRA]);
- r0 |= HWM_FIELD_PREP(MASK_SDM_MOD, clk->p[SDM_MOD]);
- r0 |= HWM_FIELD_PREP(MASK_PH_SEL, clk->p[PH_SEL]);
- r0 |= HWM_FIELD_PREP(MASK_NFRA, clk->p[NFRA]);
+ r0 |= FIELD_PREP_WM16(MASK_SEL_FRA, clk->p[SEL_FRA]);
+ r0 |= FIELD_PREP_WM16(MASK_SDM_MOD, clk->p[SDM_MOD]);
+ r0 |= FIELD_PREP_WM16(MASK_PH_SEL, clk->p[PH_SEL]);
+ r0 |= FIELD_PREP_WM16(MASK_NFRA, clk->p[NFRA]);
- r1 = HWM_FIELD_PREP(MASK_DIVR, clk->p[DIVR]);
+ r1 = FIELD_PREP_WM16(MASK_DIVR, clk->p[DIVR]);
- r2 = HWM_FIELD_PREP(MASK_DIVN, clk->p[DIVN] - 1);
- r2 |= HWM_FIELD_PREP(MASK_DIVM, clk->p[DIVM] - 1);
+ r2 = FIELD_PREP_WM16(MASK_DIVN, clk->p[DIVN] - 1);
+ r2 |= FIELD_PREP_WM16(MASK_DIVM, clk->p[DIVM] - 1);
spin_lock_irqsave(&clk->lock, flags);
writel(r0, clk->reg);
@@ -412,25 +406,27 @@ static long sp_pll_calc_div(struct sp_pll *clk, unsigned long rate)
return fbdiv;
}
-static long sp_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int sp_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct sp_pll *clk = to_sp_pll(hw);
long ret;
- if (rate == *prate) {
- ret = *prate; /* bypass */
+ if (req->rate == req->best_parent_rate) {
+ ret = req->best_parent_rate; /* bypass */
} else if (clk->div_width == DIV_A) {
- ret = plla_round_rate(clk, rate);
+ ret = plla_round_rate(clk, req->rate);
} else if (clk->div_width == DIV_TV) {
- ret = plltv_div(clk, rate);
+ ret = plltv_div(clk, req->rate);
if (ret < 0)
- ret = *prate;
+ ret = req->best_parent_rate;
} else {
- ret = sp_pll_calc_div(clk, rate) * clk->brate;
+ ret = sp_pll_calc_div(clk, req->rate) * clk->brate;
}
- return ret;
+ req->rate = ret;
+
+ return 0;
}
static unsigned long sp_pll_recalc_rate(struct clk_hw *hw,
@@ -535,7 +531,7 @@ static const struct clk_ops sp_pll_ops = {
.enable = sp_pll_enable,
.disable = sp_pll_disable,
.is_enabled = sp_pll_is_enabled,
- .round_rate = sp_pll_round_rate,
+ .determine_rate = sp_pll_determine_rate,
.recalc_rate = sp_pll_recalc_rate,
.set_rate = sp_pll_set_rate
};
diff --git a/drivers/clk/clk-sparx5.c b/drivers/clk/clk-sparx5.c
index 0fad0c1a0186..b2facc9c95d4 100644
--- a/drivers/clk/clk-sparx5.c
+++ b/drivers/clk/clk-sparx5.c
@@ -213,19 +213,21 @@ static unsigned long s5_pll_recalc_rate(struct clk_hw *hw,
return conf.freq;
}
-static long s5_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int s5_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct s5_pll_conf conf;
- return s5_calc_params(rate, *parent_rate, &conf);
+ req->rate = s5_calc_params(req->rate, req->best_parent_rate, &conf);
+
+ return 0;
}
static const struct clk_ops s5_pll_ops = {
.enable = s5_pll_enable,
.disable = s5_pll_disable,
.set_rate = s5_pll_set_rate,
- .round_rate = s5_pll_round_rate,
+ .determine_rate = s5_pll_determine_rate,
.recalc_rate = s5_pll_recalc_rate,
};
diff --git a/drivers/clk/clk-stm32f4.c b/drivers/clk/clk-stm32f4.c
index 85e23961ec34..b5d4d48432a0 100644
--- a/drivers/clk/clk-stm32f4.c
+++ b/drivers/clk/clk-stm32f4.c
@@ -19,7 +19,7 @@
#include <linux/mfd/syscon.h>
/*
- * Include list of clocks wich are not derived from system clock (SYSCLOCK)
+ * Include list of clocks which are not derived from system clock (SYSCLOCK)
* The index of these clocks is the secondary index of DT bindings
*
*/
@@ -443,8 +443,8 @@ static unsigned long clk_apb_mul_recalc_rate(struct clk_hw *hw,
return parent_rate;
}
-static long clk_apb_mul_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_apb_mul_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_apb_mul *am = to_clk_apb_mul(hw);
unsigned long mult = 1;
@@ -453,12 +453,14 @@ static long clk_apb_mul_round_rate(struct clk_hw *hw, unsigned long rate,
mult = 2;
if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) {
- unsigned long best_parent = rate / mult;
+ unsigned long best_parent = req->rate / mult;
- *prate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent);
+ req->best_parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent);
}
- return *prate * mult;
+ req->rate = req->best_parent_rate * mult;
+
+ return 0;
}
static int clk_apb_mul_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -474,7 +476,7 @@ static int clk_apb_mul_set_rate(struct clk_hw *hw, unsigned long rate,
}
static const struct clk_ops clk_apb_mul_factor_ops = {
- .round_rate = clk_apb_mul_round_rate,
+ .determine_rate = clk_apb_mul_determine_rate,
.set_rate = clk_apb_mul_set_rate,
.recalc_rate = clk_apb_mul_recalc_rate,
};
@@ -670,21 +672,23 @@ static unsigned long stm32f4_pll_recalc(struct clk_hw *hw,
return parent_rate * n;
}
-static long stm32f4_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int stm32f4_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_gate *gate = to_clk_gate(hw);
struct stm32f4_pll *pll = to_stm32f4_pll(gate);
unsigned long n;
- n = rate / *prate;
+ n = req->rate / req->best_parent_rate;
if (n < pll->n_start)
n = pll->n_start;
else if (n > 432)
n = 432;
- return *prate * n;
+ req->rate = req->best_parent_rate * n;
+
+ return 0;
}
static void stm32f4_pll_set_ssc(struct clk_hw *hw, unsigned long parent_rate,
@@ -749,7 +753,7 @@ static const struct clk_ops stm32f4_pll_gate_ops = {
.disable = stm32f4_pll_disable,
.is_enabled = stm32f4_pll_is_enabled,
.recalc_rate = stm32f4_pll_recalc,
- .round_rate = stm32f4_pll_round_rate,
+ .determine_rate = stm32f4_pll_determine_rate,
.set_rate = stm32f4_pll_set_rate,
};
diff --git a/drivers/clk/clk-tps68470.c b/drivers/clk/clk-tps68470.c
index 38f44b5b9b1b..9511248c6bc9 100644
--- a/drivers/clk/clk-tps68470.c
+++ b/drivers/clk/clk-tps68470.c
@@ -146,12 +146,14 @@ static unsigned int tps68470_clk_cfg_lookup(unsigned long rate)
return best_idx;
}
-static long tps68470_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int tps68470_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- unsigned int idx = tps68470_clk_cfg_lookup(rate);
+ unsigned int idx = tps68470_clk_cfg_lookup(req->rate);
+
+ req->rate = clk_freqs[idx].freq;
- return clk_freqs[idx].freq;
+ return 0;
}
static int tps68470_clk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -186,7 +188,7 @@ static const struct clk_ops tps68470_clk_ops = {
.prepare = tps68470_clk_prepare,
.unprepare = tps68470_clk_unprepare,
.recalc_rate = tps68470_clk_recalc_rate,
- .round_rate = tps68470_clk_round_rate,
+ .determine_rate = tps68470_clk_determine_rate,
.set_rate = tps68470_clk_set_rate,
};
diff --git a/drivers/clk/clk-versaclock3.c b/drivers/clk/clk-versaclock3.c
index 9fe27dace111..1849863dbd67 100644
--- a/drivers/clk/clk-versaclock3.c
+++ b/drivers/clk/clk-versaclock3.c
@@ -289,22 +289,25 @@ static unsigned long vc3_pfd_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long vc3_pfd_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int vc3_pfd_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct vc3_hw_data *vc3 = container_of(hw, struct vc3_hw_data, hw);
const struct vc3_pfd_data *pfd = vc3->data;
unsigned long idiv;
/* PLL cannot operate with input clock above 50 MHz. */
- if (rate > 50000000)
+ if (req->rate > 50000000)
return -EINVAL;
/* CLKIN within range of PLL input, feed directly to PLL. */
- if (*parent_rate <= 50000000)
- return *parent_rate;
+ if (req->best_parent_rate <= 50000000) {
+ req->rate = req->best_parent_rate;
- idiv = DIV_ROUND_UP(*parent_rate, rate);
+ return 0;
+ }
+
+ idiv = DIV_ROUND_UP(req->best_parent_rate, req->rate);
if (pfd->num == VC3_PFD1 || pfd->num == VC3_PFD3) {
if (idiv > 63)
return -EINVAL;
@@ -313,7 +316,9 @@ static long vc3_pfd_round_rate(struct clk_hw *hw, unsigned long rate,
return -EINVAL;
}
- return *parent_rate / idiv;
+ req->rate = req->best_parent_rate / idiv;
+
+ return 0;
}
static int vc3_pfd_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -354,7 +359,7 @@ static int vc3_pfd_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops vc3_pfd_ops = {
.recalc_rate = vc3_pfd_recalc_rate,
- .round_rate = vc3_pfd_round_rate,
+ .determine_rate = vc3_pfd_determine_rate,
.set_rate = vc3_pfd_set_rate,
};
@@ -385,36 +390,38 @@ static unsigned long vc3_pll_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long vc3_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int vc3_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct vc3_hw_data *vc3 = container_of(hw, struct vc3_hw_data, hw);
const struct vc3_pll_data *pll = vc3->data;
u64 div_frc;
- if (rate < pll->vco.min)
- rate = pll->vco.min;
- if (rate > pll->vco.max)
- rate = pll->vco.max;
+ if (req->rate < pll->vco.min)
+ req->rate = pll->vco.min;
+ if (req->rate > pll->vco.max)
+ req->rate = pll->vco.max;
- vc3->div_int = rate / *parent_rate;
+ vc3->div_int = req->rate / req->best_parent_rate;
if (pll->num == VC3_PLL2) {
if (vc3->div_int > 0x7ff)
- rate = *parent_rate * 0x7ff;
+ req->rate = req->best_parent_rate * 0x7ff;
/* Determine best fractional part, which is 16 bit wide */
- div_frc = rate % *parent_rate;
+ div_frc = req->rate % req->best_parent_rate;
div_frc *= BIT(16) - 1;
- vc3->div_frc = min_t(u64, div64_ul(div_frc, *parent_rate), U16_MAX);
- rate = (*parent_rate *
- (vc3->div_int * VC3_2_POW_16 + vc3->div_frc) / VC3_2_POW_16);
+ vc3->div_frc = min_t(u64,
+ div64_ul(div_frc, req->best_parent_rate),
+ U16_MAX);
+ req->rate = (req->best_parent_rate *
+ (vc3->div_int * VC3_2_POW_16 + vc3->div_frc) / VC3_2_POW_16);
} else {
- rate = *parent_rate * vc3->div_int;
+ req->rate = req->best_parent_rate * vc3->div_int;
}
- return rate;
+ return 0;
}
static int vc3_pll_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -441,7 +448,7 @@ static int vc3_pll_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops vc3_pll_ops = {
.recalc_rate = vc3_pll_recalc_rate,
- .round_rate = vc3_pll_round_rate,
+ .determine_rate = vc3_pll_determine_rate,
.set_rate = vc3_pll_set_rate,
};
@@ -498,8 +505,8 @@ static unsigned long vc3_div_recalc_rate(struct clk_hw *hw,
div_data->flags, div_data->width);
}
-static long vc3_div_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int vc3_div_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct vc3_hw_data *vc3 = container_of(hw, struct vc3_hw_data, hw);
const struct vc3_div_data *div_data = vc3->data;
@@ -511,11 +518,16 @@ static long vc3_div_round_rate(struct clk_hw *hw, unsigned long rate,
bestdiv >>= div_data->shift;
bestdiv &= VC3_DIV_MASK(div_data->width);
bestdiv = vc3_get_div(div_data->table, bestdiv, div_data->flags);
- return DIV_ROUND_UP(*parent_rate, bestdiv);
+ req->rate = DIV_ROUND_UP(req->best_parent_rate, bestdiv);
+
+ return 0;
}
- return divider_round_rate(hw, rate, parent_rate, div_data->table,
- div_data->width, div_data->flags);
+ req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate,
+ div_data->table,
+ div_data->width, div_data->flags);
+
+ return 0;
}
static int vc3_div_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -534,7 +546,7 @@ static int vc3_div_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops vc3_div_ops = {
.recalc_rate = vc3_div_recalc_rate,
- .round_rate = vc3_div_round_rate,
+ .determine_rate = vc3_div_determine_rate,
.set_rate = vc3_div_set_rate,
};
diff --git a/drivers/clk/clk-versaclock5.c b/drivers/clk/clk-versaclock5.c
index 6d31cd54d7cf..57228e88e81d 100644
--- a/drivers/clk/clk-versaclock5.c
+++ b/drivers/clk/clk-versaclock5.c
@@ -136,7 +136,7 @@
#define VC5_MAX_FOD_NUM 4
/* flags to describe chip features */
-/* chip has built-in oscilator */
+/* chip has built-in oscillator */
#define VC5_HAS_INTERNAL_XTAL BIT(0)
/* chip has PFD requency doubler */
#define VC5_HAS_PFD_FREQ_DBL BIT(1)
@@ -304,11 +304,11 @@ static unsigned long vc5_dbl_recalc_rate(struct clk_hw *hw,
return parent_rate;
}
-static long vc5_dbl_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int vc5_dbl_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- if ((*parent_rate == rate) || ((*parent_rate * 2) == rate))
- return rate;
+ if ((req->best_parent_rate == req->rate) || ((req->best_parent_rate * 2) == req->rate))
+ return 0;
else
return -EINVAL;
}
@@ -332,7 +332,7 @@ static int vc5_dbl_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops vc5_dbl_ops = {
.recalc_rate = vc5_dbl_recalc_rate,
- .round_rate = vc5_dbl_round_rate,
+ .determine_rate = vc5_dbl_determine_rate,
.set_rate = vc5_dbl_set_rate,
};
@@ -363,24 +363,29 @@ static unsigned long vc5_pfd_recalc_rate(struct clk_hw *hw,
return parent_rate / VC5_REF_DIVIDER_REF_DIV(div);
}
-static long vc5_pfd_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int vc5_pfd_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
unsigned long idiv;
/* PLL cannot operate with input clock above 50 MHz. */
- if (rate > 50000000)
+ if (req->rate > 50000000)
return -EINVAL;
/* CLKIN within range of PLL input, feed directly to PLL. */
- if (*parent_rate <= 50000000)
- return *parent_rate;
+ if (req->best_parent_rate <= 50000000) {
+ req->rate = req->best_parent_rate;
+
+ return 0;
+ }
- idiv = DIV_ROUND_UP(*parent_rate, rate);
+ idiv = DIV_ROUND_UP(req->best_parent_rate, req->rate);
if (idiv > 127)
return -EINVAL;
- return *parent_rate / idiv;
+ req->rate = req->best_parent_rate / idiv;
+
+ return 0;
}
static int vc5_pfd_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -420,7 +425,7 @@ static int vc5_pfd_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops vc5_pfd_ops = {
.recalc_rate = vc5_pfd_recalc_rate,
- .round_rate = vc5_pfd_round_rate,
+ .determine_rate = vc5_pfd_determine_rate,
.set_rate = vc5_pfd_set_rate,
};
@@ -444,30 +449,32 @@ static unsigned long vc5_pll_recalc_rate(struct clk_hw *hw,
return (parent_rate * div_int) + ((parent_rate * div_frc) >> 24);
}
-static long vc5_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int vc5_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct vc5_hw_data *hwdata = container_of(hw, struct vc5_hw_data, hw);
struct vc5_driver_data *vc5 = hwdata->vc5;
u32 div_int;
u64 div_frc;
- rate = clamp(rate, VC5_PLL_VCO_MIN, vc5->chip_info->vco_max);
+ req->rate = clamp(req->rate, VC5_PLL_VCO_MIN, vc5->chip_info->vco_max);
/* Determine integer part, which is 12 bit wide */
- div_int = rate / *parent_rate;
+ div_int = req->rate / req->best_parent_rate;
if (div_int > 0xfff)
- rate = *parent_rate * 0xfff;
+ req->rate = req->best_parent_rate * 0xfff;
/* Determine best fractional part, which is 24 bit wide */
- div_frc = rate % *parent_rate;
+ div_frc = req->rate % req->best_parent_rate;
div_frc *= BIT(24) - 1;
- do_div(div_frc, *parent_rate);
+ do_div(div_frc, req->best_parent_rate);
hwdata->div_int = div_int;
hwdata->div_frc = (u32)div_frc;
- return (*parent_rate * div_int) + ((*parent_rate * div_frc) >> 24);
+ req->rate = (req->best_parent_rate * div_int) + ((req->best_parent_rate * div_frc) >> 24);
+
+ return 0;
}
static int vc5_pll_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -488,7 +495,7 @@ static int vc5_pll_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops vc5_pll_ops = {
.recalc_rate = vc5_pll_recalc_rate,
- .round_rate = vc5_pll_round_rate,
+ .determine_rate = vc5_pll_determine_rate,
.set_rate = vc5_pll_set_rate,
};
@@ -520,17 +527,17 @@ static unsigned long vc5_fod_recalc_rate(struct clk_hw *hw,
return div64_u64((u64)f_in << 24ULL, ((u64)div_int << 24ULL) + div_frc);
}
-static long vc5_fod_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int vc5_fod_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct vc5_hw_data *hwdata = container_of(hw, struct vc5_hw_data, hw);
/* VCO frequency is divided by two before entering FOD */
- u32 f_in = *parent_rate / 2;
+ u32 f_in = req->best_parent_rate / 2;
u32 div_int;
u64 div_frc;
/* Determine integer part, which is 12 bit wide */
- div_int = f_in / rate;
+ div_int = f_in / req->rate;
/*
* WARNING: The clock chip does not output signal if the integer part
* of the divider is 0xfff and fractional part is non-zero.
@@ -538,18 +545,20 @@ static long vc5_fod_round_rate(struct clk_hw *hw, unsigned long rate,
*/
if (div_int > 0xffe) {
div_int = 0xffe;
- rate = f_in / div_int;
+ req->rate = f_in / div_int;
}
/* Determine best fractional part, which is 30 bit wide */
- div_frc = f_in % rate;
+ div_frc = f_in % req->rate;
div_frc <<= 24;
- do_div(div_frc, rate);
+ do_div(div_frc, req->rate);
hwdata->div_int = div_int;
hwdata->div_frc = (u32)div_frc;
- return div64_u64((u64)f_in << 24ULL, ((u64)div_int << 24ULL) + div_frc);
+ req->rate = div64_u64((u64)f_in << 24ULL, ((u64)div_int << 24ULL) + div_frc);
+
+ return 0;
}
static int vc5_fod_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -589,7 +598,7 @@ static int vc5_fod_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops vc5_fod_ops = {
.recalc_rate = vc5_fod_recalc_rate,
- .round_rate = vc5_fod_round_rate,
+ .determine_rate = vc5_fod_determine_rate,
.set_rate = vc5_fod_set_rate,
};
diff --git a/drivers/clk/clk-versaclock7.c b/drivers/clk/clk-versaclock7.c
index f323263e32c3..adcc603e3259 100644
--- a/drivers/clk/clk-versaclock7.c
+++ b/drivers/clk/clk-versaclock7.c
@@ -900,17 +900,18 @@ static unsigned long vc7_fod_recalc_rate(struct clk_hw *hw, unsigned long parent
return fod_rate;
}
-static long vc7_fod_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *parent_rate)
+static int vc7_fod_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct vc7_fod_data *fod = container_of(hw, struct vc7_fod_data, hw);
unsigned long fod_rate;
pr_debug("%s - %s: requested rate: %lu, parent_rate: %lu\n",
- __func__, clk_hw_get_name(hw), rate, *parent_rate);
+ __func__, clk_hw_get_name(hw), req->rate, req->best_parent_rate);
- vc7_calc_fod_divider(rate, *parent_rate,
+ vc7_calc_fod_divider(req->rate, req->best_parent_rate,
&fod->fod_1st_int, &fod->fod_2nd_int, &fod->fod_frac);
- fod_rate = vc7_calc_fod_2nd_stage_rate(*parent_rate, fod->fod_1st_int,
+ fod_rate = vc7_calc_fod_2nd_stage_rate(req->best_parent_rate, fod->fod_1st_int,
fod->fod_2nd_int, fod->fod_frac);
pr_debug("%s - %s: fod_1st_int: %u, fod_2nd_int: %u, fod_frac: %llu\n",
@@ -918,7 +919,9 @@ static long vc7_fod_round_rate(struct clk_hw *hw, unsigned long rate, unsigned l
fod->fod_1st_int, fod->fod_2nd_int, fod->fod_frac);
pr_debug("%s - %s rate: %lu\n", __func__, clk_hw_get_name(hw), fod_rate);
- return fod_rate;
+ req->rate = fod_rate;
+
+ return 0;
}
static int vc7_fod_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate)
@@ -952,7 +955,7 @@ static int vc7_fod_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long
static const struct clk_ops vc7_fod_ops = {
.recalc_rate = vc7_fod_recalc_rate,
- .round_rate = vc7_fod_round_rate,
+ .determine_rate = vc7_fod_determine_rate,
.set_rate = vc7_fod_set_rate,
};
@@ -978,21 +981,24 @@ static unsigned long vc7_iod_recalc_rate(struct clk_hw *hw, unsigned long parent
return iod_rate;
}
-static long vc7_iod_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *parent_rate)
+static int vc7_iod_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct vc7_iod_data *iod = container_of(hw, struct vc7_iod_data, hw);
unsigned long iod_rate;
pr_debug("%s - %s: requested rate: %lu, parent_rate: %lu\n",
- __func__, clk_hw_get_name(hw), rate, *parent_rate);
+ __func__, clk_hw_get_name(hw), req->rate, req->best_parent_rate);
- vc7_calc_iod_divider(rate, *parent_rate, &iod->iod_int);
- iod_rate = div64_u64(*parent_rate, iod->iod_int);
+ vc7_calc_iod_divider(req->rate, req->best_parent_rate, &iod->iod_int);
+ iod_rate = div64_u64(req->best_parent_rate, iod->iod_int);
pr_debug("%s - %s: iod_int: %u\n", __func__, clk_hw_get_name(hw), iod->iod_int);
pr_debug("%s - %s rate: %ld\n", __func__, clk_hw_get_name(hw), iod_rate);
- return iod_rate;
+ req->rate = iod_rate;
+
+ return 0;
}
static int vc7_iod_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate)
@@ -1023,7 +1029,7 @@ static int vc7_iod_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long
static const struct clk_ops vc7_iod_ops = {
.recalc_rate = vc7_iod_recalc_rate,
- .round_rate = vc7_iod_round_rate,
+ .determine_rate = vc7_iod_determine_rate,
.set_rate = vc7_iod_set_rate,
};
@@ -1257,7 +1263,7 @@ static const struct vc7_chip_info vc7_rc21008a_info = {
.num_outputs = 8,
};
-static struct regmap_range_cfg vc7_range_cfg[] = {
+static const struct regmap_range_cfg vc7_range_cfg[] = {
{
.range_min = 0,
.range_max = VC7_MAX_REG,
diff --git a/drivers/clk/clk-vt8500.c b/drivers/clk/clk-vt8500.c
index 2a74a713ad59..eae5b3fbfb82 100644
--- a/drivers/clk/clk-vt8500.c
+++ b/drivers/clk/clk-vt8500.c
@@ -128,30 +128,31 @@ static unsigned long vt8500_dclk_recalc_rate(struct clk_hw *hw,
return parent_rate / div;
}
-static long vt8500_dclk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int vt8500_dclk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_device *cdev = to_clk_device(hw);
u32 divisor;
- if (rate == 0)
+ if (req->rate == 0)
return 0;
- divisor = *prate / rate;
+ divisor = req->best_parent_rate / req->rate;
/* If prate / rate would be decimal, incr the divisor */
- if (rate * divisor < *prate)
+ if (req->rate * divisor < req->best_parent_rate)
divisor++;
/*
* If this is a request for SDMMC we have to adjust the divisor
* when >31 to use the fixed predivisor
*/
- if ((cdev->div_mask == 0x3F) && (divisor > 31)) {
+ if ((cdev->div_mask == 0x3F) && (divisor > 31))
divisor = 64 * ((divisor / 64) + 1);
- }
- return *prate / divisor;
+ req->rate = req->best_parent_rate / divisor;
+
+ return 0;
}
static int vt8500_dclk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -202,7 +203,7 @@ static const struct clk_ops vt8500_gated_clk_ops = {
};
static const struct clk_ops vt8500_divisor_clk_ops = {
- .round_rate = vt8500_dclk_round_rate,
+ .determine_rate = vt8500_dclk_determine_rate,
.set_rate = vt8500_dclk_set_rate,
.recalc_rate = vt8500_dclk_recalc_rate,
};
@@ -211,7 +212,7 @@ static const struct clk_ops vt8500_gated_divisor_clk_ops = {
.enable = vt8500_dclk_enable,
.disable = vt8500_dclk_disable,
.is_enabled = vt8500_dclk_is_enabled,
- .round_rate = vt8500_dclk_round_rate,
+ .determine_rate = vt8500_dclk_determine_rate,
.set_rate = vt8500_dclk_set_rate,
.recalc_rate = vt8500_dclk_recalc_rate,
};
@@ -594,8 +595,8 @@ static int vtwm_pll_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
-static long vtwm_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int vtwm_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_pll *pll = to_clk_pll(hw);
u32 filter, mul, div1, div2;
@@ -604,33 +605,43 @@ static long vtwm_pll_round_rate(struct clk_hw *hw, unsigned long rate,
switch (pll->type) {
case PLL_TYPE_VT8500:
- ret = vt8500_find_pll_bits(rate, *prate, &mul, &div1);
+ ret = vt8500_find_pll_bits(req->rate, req->best_parent_rate,
+ &mul, &div1);
if (!ret)
- round_rate = VT8500_BITS_TO_FREQ(*prate, mul, div1);
+ round_rate = VT8500_BITS_TO_FREQ(req->best_parent_rate,
+ mul, div1);
break;
case PLL_TYPE_WM8650:
- ret = wm8650_find_pll_bits(rate, *prate, &mul, &div1, &div2);
+ ret = wm8650_find_pll_bits(req->rate, req->best_parent_rate,
+ &mul, &div1, &div2);
if (!ret)
- round_rate = WM8650_BITS_TO_FREQ(*prate, mul, div1, div2);
+ round_rate = WM8650_BITS_TO_FREQ(req->best_parent_rate,
+ mul, div1, div2);
break;
case PLL_TYPE_WM8750:
- ret = wm8750_find_pll_bits(rate, *prate, &filter, &mul, &div1, &div2);
+ ret = wm8750_find_pll_bits(req->rate, req->best_parent_rate,
+ &filter, &mul, &div1, &div2);
if (!ret)
- round_rate = WM8750_BITS_TO_FREQ(*prate, mul, div1, div2);
+ round_rate = WM8750_BITS_TO_FREQ(req->best_parent_rate,
+ mul, div1, div2);
break;
case PLL_TYPE_WM8850:
- ret = wm8850_find_pll_bits(rate, *prate, &mul, &div1, &div2);
+ ret = wm8850_find_pll_bits(req->rate, req->best_parent_rate,
+ &mul, &div1, &div2);
if (!ret)
- round_rate = WM8850_BITS_TO_FREQ(*prate, mul, div1, div2);
+ round_rate = WM8850_BITS_TO_FREQ(req->best_parent_rate,
+ mul, div1, div2);
break;
default:
- ret = -EINVAL;
+ return -EINVAL;
}
if (ret)
- return ret;
+ req->rate = ret;
+ else
+ req->rate = round_rate;
- return round_rate;
+ return 0;
}
static unsigned long vtwm_pll_recalc_rate(struct clk_hw *hw,
@@ -665,7 +676,7 @@ static unsigned long vtwm_pll_recalc_rate(struct clk_hw *hw,
}
static const struct clk_ops vtwm_pll_ops = {
- .round_rate = vtwm_pll_round_rate,
+ .determine_rate = vtwm_pll_determine_rate,
.set_rate = vtwm_pll_set_rate,
.recalc_rate = vtwm_pll_recalc_rate,
};
diff --git a/drivers/clk/clk-wm831x.c b/drivers/clk/clk-wm831x.c
index 34e9d4d541e2..263e927138c2 100644
--- a/drivers/clk/clk-wm831x.c
+++ b/drivers/clk/clk-wm831x.c
@@ -133,18 +133,20 @@ static unsigned long wm831x_fll_recalc_rate(struct clk_hw *hw,
return 0;
}
-static long wm831x_fll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *unused)
+static int wm831x_fll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
int best = 0;
int i;
for (i = 0; i < ARRAY_SIZE(wm831x_fll_auto_rates); i++)
- if (abs(wm831x_fll_auto_rates[i] - rate) <
- abs(wm831x_fll_auto_rates[best] - rate))
+ if (abs(wm831x_fll_auto_rates[i] - req->rate) <
+ abs(wm831x_fll_auto_rates[best] - req->rate))
best = i;
- return wm831x_fll_auto_rates[best];
+ req->rate = wm831x_fll_auto_rates[best];
+
+ return 0;
}
static int wm831x_fll_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -214,7 +216,7 @@ static const struct clk_ops wm831x_fll_ops = {
.is_prepared = wm831x_fll_is_prepared,
.prepare = wm831x_fll_prepare,
.unprepare = wm831x_fll_unprepare,
- .round_rate = wm831x_fll_round_rate,
+ .determine_rate = wm831x_fll_determine_rate,
.recalc_rate = wm831x_fll_recalc_rate,
.set_rate = wm831x_fll_set_rate,
.get_parent = wm831x_fll_get_parent,
diff --git a/drivers/clk/clk-xgene.c b/drivers/clk/clk-xgene.c
index 96946a8e2854..92e39f3237c2 100644
--- a/drivers/clk/clk-xgene.c
+++ b/drivers/clk/clk-xgene.c
@@ -271,23 +271,28 @@ static unsigned long xgene_clk_pmd_recalc_rate(struct clk_hw *hw,
return ret;
}
-static long xgene_clk_pmd_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int xgene_clk_pmd_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct xgene_clk_pmd *fd = to_xgene_clk_pmd(hw);
u64 ret, scale;
- if (!rate || rate >= *parent_rate)
- return *parent_rate;
+ if (!req->rate || req->rate >= req->best_parent_rate) {
+ req->rate = req->best_parent_rate;
+
+ return 0;
+ }
/* freq = parent_rate * scaler / denom */
- ret = rate * fd->denom;
- scale = DIV_ROUND_UP_ULL(ret, *parent_rate);
+ ret = req->rate * fd->denom;
+ scale = DIV_ROUND_UP_ULL(ret, req->best_parent_rate);
- ret = (u64)*parent_rate * scale;
+ ret = (u64)req->best_parent_rate * scale;
do_div(ret, fd->denom);
- return ret;
+ req->rate = ret;
+
+ return 0;
}
static int xgene_clk_pmd_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -333,7 +338,7 @@ static int xgene_clk_pmd_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops xgene_clk_pmd_ops = {
.recalc_rate = xgene_clk_pmd_recalc_rate,
- .round_rate = xgene_clk_pmd_round_rate,
+ .determine_rate = xgene_clk_pmd_determine_rate,
.set_rate = xgene_clk_pmd_set_rate,
};
@@ -593,23 +598,25 @@ static int xgene_clk_set_rate(struct clk_hw *hw, unsigned long rate,
return parent_rate / divider_save;
}
-static long xgene_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int xgene_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct xgene_clk *pclk = to_xgene_clk(hw);
- unsigned long parent_rate = *prate;
+ unsigned long parent_rate = req->best_parent_rate;
u32 divider;
if (pclk->param.divider_reg) {
/* Let's compute the divider */
- if (rate > parent_rate)
- rate = parent_rate;
- divider = parent_rate / rate; /* Rounded down */
+ if (req->rate > parent_rate)
+ req->rate = parent_rate;
+ divider = parent_rate / req->rate; /* Rounded down */
} else {
divider = 1;
}
- return parent_rate / divider;
+ req->rate = parent_rate / divider;
+
+ return 0;
}
static const struct clk_ops xgene_clk_ops = {
@@ -618,7 +625,7 @@ static const struct clk_ops xgene_clk_ops = {
.is_enabled = xgene_clk_is_enabled,
.recalc_rate = xgene_clk_recalc_rate,
.set_rate = xgene_clk_set_rate,
- .round_rate = xgene_clk_round_rate,
+ .determine_rate = xgene_clk_determine_rate,
};
static struct clk *xgene_register_clk(struct device *dev,
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 0565c87656cf..85d2f2481acf 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -6,21 +6,24 @@
* Standard functionality for the common clock API. See Documentation/driver-api/clk.rst
*/
+#include <linux/clk/clk-conf.h>
+#include <linux/clkdev.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
-#include <linux/clk/clk-conf.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/spinlock.h>
+#include <linux/device.h>
#include <linux/err.h>
+#include <linux/hashtable.h>
+#include <linux/init.h>
#include <linux/list.h>
-#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/of.h>
-#include <linux/device.h>
-#include <linux/init.h>
#include <linux/pm_runtime.h>
#include <linux/sched.h>
-#include <linux/clkdev.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/stringhash.h>
#include "clk.h"
@@ -33,6 +36,9 @@ static struct task_struct *enable_owner;
static int prepare_refcnt;
static int enable_refcnt;
+#define CLK_HASH_BITS 9
+static DEFINE_HASHTABLE(clk_hashtable, CLK_HASH_BITS);
+
static HLIST_HEAD(clk_root_list);
static HLIST_HEAD(clk_orphan_list);
static LIST_HEAD(clk_notifier_list);
@@ -87,6 +93,7 @@ struct clk_core {
struct clk_duty duty;
struct hlist_head children;
struct hlist_node child_node;
+ struct hlist_node hashtable_node;
struct hlist_head clks;
unsigned int notifier_count;
#ifdef CONFIG_DEBUG_FS
@@ -365,6 +372,18 @@ const char *clk_hw_get_name(const struct clk_hw *hw)
}
EXPORT_SYMBOL_GPL(clk_hw_get_name);
+struct device *clk_hw_get_dev(const struct clk_hw *hw)
+{
+ return hw->core->dev;
+}
+EXPORT_SYMBOL_GPL(clk_hw_get_dev);
+
+struct device_node *clk_hw_get_of_node(const struct clk_hw *hw)
+{
+ return hw->core->of_node;
+}
+EXPORT_SYMBOL_GPL(clk_hw_get_of_node);
+
struct clk_hw *__clk_get_hw(struct clk *clk)
{
return !clk ? NULL : clk->core->hw;
@@ -383,45 +402,20 @@ struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw)
}
EXPORT_SYMBOL_GPL(clk_hw_get_parent);
-static struct clk_core *__clk_lookup_subtree(const char *name,
- struct clk_core *core)
-{
- struct clk_core *child;
- struct clk_core *ret;
-
- if (!strcmp(core->name, name))
- return core;
-
- hlist_for_each_entry(child, &core->children, child_node) {
- ret = __clk_lookup_subtree(name, child);
- if (ret)
- return ret;
- }
-
- return NULL;
-}
-
static struct clk_core *clk_core_lookup(const char *name)
{
- struct clk_core *root_clk;
- struct clk_core *ret;
+ struct clk_core *core;
+ u32 hash;
if (!name)
return NULL;
- /* search the 'proper' clk tree first */
- hlist_for_each_entry(root_clk, &clk_root_list, child_node) {
- ret = __clk_lookup_subtree(name, root_clk);
- if (ret)
- return ret;
- }
+ hash = full_name_hash(NULL, name, strlen(name));
- /* if not found, then search the orphan tree */
- hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) {
- ret = __clk_lookup_subtree(name, root_clk);
- if (ret)
- return ret;
- }
+ /* search the hashtable */
+ hash_for_each_possible(clk_hashtable, core, hashtable_node, hash)
+ if (!strcmp(core->name, name))
+ return core;
return NULL;
}
@@ -4001,6 +3995,8 @@ static int __clk_core_init(struct clk_core *core)
hlist_add_head(&core->child_node, &clk_orphan_list);
core->orphan = true;
}
+ hash_add(clk_hashtable, &core->hashtable_node,
+ full_name_hash(NULL, core->name, strlen(core->name)));
/*
* Set clk's accuracy. The preferred method is to use
@@ -4077,6 +4073,7 @@ out:
clk_pm_runtime_put(core);
unlock:
if (ret) {
+ hash_del(&core->hashtable_node);
hlist_del_init(&core->child_node);
core->hw->core = NULL;
}
@@ -4598,6 +4595,7 @@ void clk_unregister(struct clk *clk)
clk_core_evict_parent_cache(clk->core);
+ hash_del(&clk->core->hashtable_node);
hlist_del_init(&clk->core->child_node);
if (clk->core->prepare_count)
diff --git a/drivers/clk/clk_test.c b/drivers/clk/clk_test.c
index f08feeaa3750..a268d7b5d4cb 100644
--- a/drivers/clk/clk_test.c
+++ b/drivers/clk/clk_test.c
@@ -292,7 +292,7 @@ static void clk_test_set_set_get_rate(struct kunit *test)
}
/*
- * Test that clk_round_rate and clk_set_rate are consitent and will
+ * Test that clk_round_rate and clk_set_rate are consistent and will
* return the same frequency.
*/
static void clk_test_round_set_get_rate(struct kunit *test)
@@ -2794,49 +2794,49 @@ static struct kunit_suite clk_register_clk_parent_data_of_suite = {
};
/**
- * struct clk_register_clk_parent_data_device_ctx - Context for clk_parent_data device tests
- * @dev: device of clk under test
- * @hw: clk_hw for clk under test
+ * struct platform_driver_dev_ctx - Context to stash platform device
+ * @dev: device under test
* @pdrv: driver to attach to find @dev
*/
-struct clk_register_clk_parent_data_device_ctx {
+struct platform_driver_dev_ctx {
struct device *dev;
- struct clk_hw hw;
struct platform_driver pdrv;
};
-static inline struct clk_register_clk_parent_data_device_ctx *
-clk_register_clk_parent_data_driver_to_test_context(struct platform_device *pdev)
+static inline struct platform_driver_dev_ctx *
+pdev_to_platform_driver_dev_ctx(struct platform_device *pdev)
{
return container_of(to_platform_driver(pdev->dev.driver),
- struct clk_register_clk_parent_data_device_ctx, pdrv);
+ struct platform_driver_dev_ctx, pdrv);
}
-static int clk_register_clk_parent_data_device_probe(struct platform_device *pdev)
+static int kunit_platform_driver_dev_probe(struct platform_device *pdev)
{
- struct clk_register_clk_parent_data_device_ctx *ctx;
+ struct platform_driver_dev_ctx *ctx;
- ctx = clk_register_clk_parent_data_driver_to_test_context(pdev);
+ ctx = pdev_to_platform_driver_dev_ctx(pdev);
ctx->dev = &pdev->dev;
return 0;
}
-static void clk_register_clk_parent_data_device_driver(struct kunit *test)
+static struct device *
+kunit_of_platform_driver_dev(struct kunit *test, const struct of_device_id *match_table)
{
- struct clk_register_clk_parent_data_device_ctx *ctx = test->priv;
- static const struct of_device_id match_table[] = {
- { .compatible = "test,clk-parent-data" },
- { }
- };
+ struct platform_driver_dev_ctx *ctx;
- ctx->pdrv.probe = clk_register_clk_parent_data_device_probe;
+ ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
+ ctx->pdrv.probe = kunit_platform_driver_dev_probe;
ctx->pdrv.driver.of_match_table = match_table;
ctx->pdrv.driver.name = __func__;
ctx->pdrv.driver.owner = THIS_MODULE;
KUNIT_ASSERT_EQ(test, 0, kunit_platform_driver_register(test, &ctx->pdrv));
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->dev);
+
+ return ctx->dev;
}
static const struct clk_register_clk_parent_data_test_case
@@ -2909,30 +2909,34 @@ KUNIT_ARRAY_PARAM(clk_register_clk_parent_data_device_test,
*/
static void clk_register_clk_parent_data_device_test(struct kunit *test)
{
- struct clk_register_clk_parent_data_device_ctx *ctx;
+ struct device *dev;
+ struct clk_hw *hw;
const struct clk_register_clk_parent_data_test_case *test_param;
struct clk_hw *parent_hw;
struct clk_init_data init = { };
struct clk *expected_parent, *actual_parent;
+ static const struct of_device_id match_table[] = {
+ { .compatible = "test,clk-parent-data" },
+ { }
+ };
- ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
- test->priv = ctx;
-
- clk_register_clk_parent_data_device_driver(test);
+ dev = kunit_of_platform_driver_dev(test, match_table);
- expected_parent = clk_get_kunit(test, ctx->dev, "50");
+ expected_parent = clk_get_kunit(test, dev, "50");
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, expected_parent);
+ hw = kunit_kzalloc(test, sizeof(*hw), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hw);
+
test_param = test->param_value;
init.parent_data = &test_param->pdata;
init.num_parents = 1;
init.name = "parent_data_device_test_clk";
init.ops = &clk_dummy_single_parent_ops;
- ctx->hw.init = &init;
- KUNIT_ASSERT_EQ(test, 0, clk_hw_register_kunit(test, ctx->dev, &ctx->hw));
+ hw->init = &init;
+ KUNIT_ASSERT_EQ(test, 0, clk_hw_register_kunit(test, dev, hw));
- parent_hw = clk_hw_get_parent(&ctx->hw);
+ parent_hw = clk_hw_get_parent(hw);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent_hw);
actual_parent = clk_hw_get_clk_kunit(test, parent_hw, __func__);
@@ -3016,18 +3020,19 @@ KUNIT_ARRAY_PARAM(clk_register_clk_parent_data_device_hw_test,
*/
static void clk_register_clk_parent_data_device_hw_test(struct kunit *test)
{
- struct clk_register_clk_parent_data_device_ctx *ctx;
+ struct device *dev;
+ struct clk_hw *hw;
const struct clk_register_clk_parent_data_test_case *test_param;
struct clk_dummy_context *parent;
struct clk_hw *parent_hw;
struct clk_parent_data pdata = { };
struct clk_init_data init = { };
+ static const struct of_device_id match_table[] = {
+ { .compatible = "test,clk-parent-data" },
+ { }
+ };
- ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
- test->priv = ctx;
-
- clk_register_clk_parent_data_device_driver(test);
+ dev = kunit_of_platform_driver_dev(test, match_table);
parent = kunit_kzalloc(test, sizeof(*parent), GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
@@ -3036,7 +3041,10 @@ static void clk_register_clk_parent_data_device_hw_test(struct kunit *test)
parent_hw->init = CLK_HW_INIT_NO_PARENT("parent-clk",
&clk_dummy_rate_ops, 0);
- KUNIT_ASSERT_EQ(test, 0, clk_hw_register_kunit(test, ctx->dev, parent_hw));
+ KUNIT_ASSERT_EQ(test, 0, clk_hw_register_kunit(test, dev, parent_hw));
+
+ hw = kunit_kzalloc(test, sizeof(*hw), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hw);
test_param = test->param_value;
memcpy(&pdata, &test_param->pdata, sizeof(pdata));
@@ -3045,10 +3053,10 @@ static void clk_register_clk_parent_data_device_hw_test(struct kunit *test)
init.num_parents = 1;
init.ops = &clk_dummy_single_parent_ops;
init.name = "parent_data_device_hw_test_clk";
- ctx->hw.init = &init;
- KUNIT_ASSERT_EQ(test, 0, clk_hw_register_kunit(test, ctx->dev, &ctx->hw));
+ hw->init = &init;
+ KUNIT_ASSERT_EQ(test, 0, clk_hw_register_kunit(test, dev, hw));
- KUNIT_EXPECT_PTR_EQ(test, parent_hw, clk_hw_get_parent(&ctx->hw));
+ KUNIT_EXPECT_PTR_EQ(test, parent_hw, clk_hw_get_parent(hw));
}
static struct kunit_case clk_register_clk_parent_data_device_test_cases[] = {
@@ -3395,8 +3403,148 @@ static struct kunit_suite clk_assigned_rates_suite = {
.init = clk_assigned_rates_test_init,
};
+static const struct clk_init_data clk_hw_get_dev_of_node_init_data = {
+ .name = "clk_hw_get_dev_of_node",
+ .ops = &empty_clk_ops,
+};
+
+/*
+ * Test that a clk registered with a struct device returns the device from
+ * clk_hw_get_dev() and the node from clk_hw_get_of_node()
+ */
+static void clk_hw_register_dev_get_dev_returns_dev(struct kunit *test)
+{
+ struct device *dev;
+ struct clk_hw *hw;
+ static const struct of_device_id match_table[] = {
+ { .compatible = "test,clk-hw-get-dev-of-node" },
+ { }
+ };
+
+ KUNIT_ASSERT_EQ(test, 0, of_overlay_apply_kunit(test, kunit_clk_hw_get_dev_of_node));
+
+ dev = kunit_of_platform_driver_dev(test, match_table);
+
+ hw = kunit_kzalloc(test, sizeof(*hw), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hw);
+
+ hw->init = &clk_hw_get_dev_of_node_init_data;
+ KUNIT_ASSERT_EQ(test, 0, clk_hw_register_kunit(test, dev, hw));
+
+ KUNIT_EXPECT_PTR_EQ(test, dev, clk_hw_get_dev(hw));
+ KUNIT_EXPECT_PTR_EQ(test, dev_of_node(dev), clk_hw_get_of_node(hw));
+}
+
+/*
+ * Test that a clk registered with a struct device that's not associated with
+ * an OF node returns the device from clk_hw_get_dev() and NULL from
+ * clk_hw_get_of_node()
+ */
+static void clk_hw_register_dev_no_node_get_dev_returns_dev(struct kunit *test)
+{
+ struct platform_device *pdev;
+ struct device *dev;
+ struct clk_hw *hw;
+
+ pdev = kunit_platform_device_alloc(test, "clk_hw_register_dev_no_node", -1);
+ KUNIT_ASSERT_NOT_NULL(test, pdev);
+ KUNIT_ASSERT_EQ(test, 0, kunit_platform_device_add(test, pdev));
+ dev = &pdev->dev;
+
+ hw = kunit_kzalloc(test, sizeof(*hw), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hw);
+
+ hw->init = &clk_hw_get_dev_of_node_init_data;
+ KUNIT_ASSERT_EQ(test, 0, clk_hw_register_kunit(test, dev, hw));
+
+ KUNIT_EXPECT_PTR_EQ(test, dev, clk_hw_get_dev(hw));
+ KUNIT_EXPECT_PTR_EQ(test, NULL, clk_hw_get_of_node(hw));
+}
+
+/*
+ * Test that a clk registered without a struct device returns NULL from
+ * clk_hw_get_dev()
+ */
+static void clk_hw_register_NULL_get_dev_of_node_returns_NULL(struct kunit *test)
+{
+ struct clk_hw *hw;
+
+ hw = kunit_kzalloc(test, sizeof(*hw), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hw);
+
+ hw->init = &clk_hw_get_dev_of_node_init_data;
+
+ KUNIT_ASSERT_EQ(test, 0, clk_hw_register_kunit(test, NULL, hw));
+
+ KUNIT_EXPECT_PTR_EQ(test, NULL, clk_hw_get_dev(hw));
+ KUNIT_EXPECT_PTR_EQ(test, NULL, clk_hw_get_of_node(hw));
+}
+
+/*
+ * Test that a clk registered with an of_node returns the node from
+ * clk_hw_get_of_node() and NULL from clk_hw_get_dev()
+ */
+static void of_clk_hw_register_node_get_of_node_returns_node(struct kunit *test)
+{
+ struct device_node *np;
+ struct clk_hw *hw;
+
+ hw = kunit_kzalloc(test, sizeof(*hw), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hw);
+
+ KUNIT_ASSERT_EQ(test, 0, of_overlay_apply_kunit(test, kunit_clk_hw_get_dev_of_node));
+
+ np = of_find_compatible_node(NULL, NULL, "test,clk-hw-get-dev-of-node");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, np);
+ of_node_put_kunit(test, np);
+
+ hw->init = &clk_hw_get_dev_of_node_init_data;
+ KUNIT_ASSERT_EQ(test, 0, of_clk_hw_register_kunit(test, np, hw));
+
+ KUNIT_EXPECT_PTR_EQ(test, NULL, clk_hw_get_dev(hw));
+ KUNIT_EXPECT_PTR_EQ(test, np, clk_hw_get_of_node(hw));
+}
+
+/*
+ * Test that a clk registered without an of_node returns the node from
+ * clk_hw_get_of_node() and clk_hw_get_dev()
+ */
+static void of_clk_hw_register_NULL_get_of_node_returns_NULL(struct kunit *test)
+{
+ struct clk_hw *hw;
+
+ hw = kunit_kzalloc(test, sizeof(*hw), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hw);
+
+ hw->init = &clk_hw_get_dev_of_node_init_data;
+ KUNIT_ASSERT_EQ(test, 0, of_clk_hw_register_kunit(test, NULL, hw));
+
+ KUNIT_EXPECT_PTR_EQ(test, NULL, clk_hw_get_dev(hw));
+ KUNIT_EXPECT_PTR_EQ(test, NULL, clk_hw_get_of_node(hw));
+}
+
+static struct kunit_case clk_hw_get_dev_of_node_test_cases[] = {
+ KUNIT_CASE(clk_hw_register_dev_get_dev_returns_dev),
+ KUNIT_CASE(clk_hw_register_dev_no_node_get_dev_returns_dev),
+ KUNIT_CASE(clk_hw_register_NULL_get_dev_of_node_returns_NULL),
+ KUNIT_CASE(of_clk_hw_register_node_get_of_node_returns_node),
+ KUNIT_CASE(of_clk_hw_register_NULL_get_of_node_returns_NULL),
+ {}
+};
+
+/*
+ * Test suite to verify clk_hw_get_dev() and clk_hw_get_of_node() when clk
+ * registered with clk_hw_register() and of_clk_hw_register()
+ */
+static struct kunit_suite clk_hw_get_dev_of_node_test_suite = {
+ .name = "clk_hw_get_dev_of_node_test_suite",
+ .test_cases = clk_hw_get_dev_of_node_test_cases,
+};
+
+
kunit_test_suites(
&clk_assigned_rates_suite,
+ &clk_hw_get_dev_of_node_test_suite,
&clk_leaf_mux_set_rate_parent_test_suite,
&clk_test_suite,
&clk_multiple_parents_mux_test_suite,
diff --git a/drivers/clk/davinci/pll.h b/drivers/clk/davinci/pll.h
index 20bfcec2d3b5..ad286ba4ce0c 100644
--- a/drivers/clk/davinci/pll.h
+++ b/drivers/clk/davinci/pll.h
@@ -80,7 +80,7 @@ static const struct davinci_pll_sysclk_info n = { \
* @name: The name of the clock
* @parent_names: Array of names of the parent clocks
* @num_parents: Length of @parent_names
- * @table: Array of values to write to OCSEL[OCSRC] cooresponding to
+ * @table: Array of values to write to OCSEL[OCSRC] corresponding to
* @parent_names
* @ocsrc_mask: Bitmask for OCSEL[OCSRC]
*/
diff --git a/drivers/clk/davinci/psc-da850.c b/drivers/clk/davinci/psc-da850.c
index 5a18bca464cd..94081ab1e688 100644
--- a/drivers/clk/davinci/psc-da850.c
+++ b/drivers/clk/davinci/psc-da850.c
@@ -6,7 +6,6 @@
*/
#include <linux/clk-provider.h>
-#include <linux/reset-controller.h>
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/init.h>
@@ -66,14 +65,8 @@ LPSC_CLKDEV3(ecap_clkdev, "fck", "ecap.0",
"fck", "ecap.1",
"fck", "ecap.2");
-static struct reset_control_lookup da850_psc0_reset_lookup_table[] = {
- RESET_LOOKUP("da850-psc0", 15, "davinci-rproc.0", NULL),
-};
-
static int da850_psc0_init(struct device *dev, void __iomem *base)
{
- reset_controller_add_lookup(da850_psc0_reset_lookup_table,
- ARRAY_SIZE(da850_psc0_reset_lookup_table));
return davinci_psc_register_clocks(dev, da850_psc0_info, 16, base);
}
diff --git a/drivers/clk/davinci/psc.c b/drivers/clk/davinci/psc.c
index b48322176c21..f3ee9397bb0c 100644
--- a/drivers/clk/davinci/psc.c
+++ b/drivers/clk/davinci/psc.c
@@ -277,6 +277,11 @@ davinci_lpsc_clk_register(struct device *dev, const char *name,
lpsc->pm_domain.name = devm_kasprintf(dev, GFP_KERNEL, "%s: %s",
best_dev_name(dev), name);
+ if (!lpsc->pm_domain.name) {
+ clk_hw_unregister(&lpsc->hw);
+ kfree(lpsc);
+ return ERR_PTR(-ENOMEM);
+ }
lpsc->pm_domain.attach_dev = davinci_psc_genpd_attach_dev;
lpsc->pm_domain.detach_dev = davinci_psc_genpd_detach_dev;
lpsc->pm_domain.flags = GENPD_FLAG_PM_CLK;
diff --git a/drivers/clk/hisilicon/clk-hi3660-stub.c b/drivers/clk/hisilicon/clk-hi3660-stub.c
index 3a653d54bee0..7c8b00ee6019 100644
--- a/drivers/clk/hisilicon/clk-hi3660-stub.c
+++ b/drivers/clk/hisilicon/clk-hi3660-stub.c
@@ -34,7 +34,7 @@
.num_parents = 0, \
.flags = CLK_GET_RATE_NOCACHE, \
}, \
- },
+ }
#define to_stub_clk(_hw) container_of(_hw, struct hi3660_stub_clk, hw)
@@ -67,14 +67,14 @@ static unsigned long hi3660_stub_clk_recalc_rate(struct clk_hw *hw,
return stub_clk->rate;
}
-static long hi3660_stub_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int hi3660_stub_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
/*
* LPM3 handles rate rounding so just return whatever
* rate is requested.
*/
- return rate;
+ return 0;
}
static int hi3660_stub_clk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -97,15 +97,15 @@ static int hi3660_stub_clk_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops hi3660_stub_clk_ops = {
.recalc_rate = hi3660_stub_clk_recalc_rate,
- .round_rate = hi3660_stub_clk_round_rate,
+ .determine_rate = hi3660_stub_clk_determine_rate,
.set_rate = hi3660_stub_clk_set_rate,
};
static struct hi3660_stub_clk hi3660_stub_clks[HI3660_CLK_STUB_NUM] = {
- DEFINE_CLK_STUB(HI3660_CLK_STUB_CLUSTER0, 0x0001030A, "cpu-cluster.0")
- DEFINE_CLK_STUB(HI3660_CLK_STUB_CLUSTER1, 0x0002030A, "cpu-cluster.1")
- DEFINE_CLK_STUB(HI3660_CLK_STUB_GPU, 0x0003030A, "clk-g3d")
- DEFINE_CLK_STUB(HI3660_CLK_STUB_DDR, 0x00040309, "clk-ddrc")
+ DEFINE_CLK_STUB(HI3660_CLK_STUB_CLUSTER0, 0x0001030A, "cpu-cluster.0"),
+ DEFINE_CLK_STUB(HI3660_CLK_STUB_CLUSTER1, 0x0002030A, "cpu-cluster.1"),
+ DEFINE_CLK_STUB(HI3660_CLK_STUB_GPU, 0x0003030A, "clk-g3d"),
+ DEFINE_CLK_STUB(HI3660_CLK_STUB_DDR, 0x00040309, "clk-ddrc"),
};
static struct clk_hw *hi3660_stub_clk_hw_get(struct of_phandle_args *clkspec,
diff --git a/drivers/clk/hisilicon/clk-hi6220-stub.c b/drivers/clk/hisilicon/clk-hi6220-stub.c
index a8319795ed1c..bf99cfafafa0 100644
--- a/drivers/clk/hisilicon/clk-hi6220-stub.c
+++ b/drivers/clk/hisilicon/clk-hi6220-stub.c
@@ -161,11 +161,11 @@ static int hi6220_stub_clk_set_rate(struct clk_hw *hw, unsigned long rate,
return ret;
}
-static long hi6220_stub_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int hi6220_stub_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct hi6220_stub_clk *stub_clk = to_stub_clk(hw);
- unsigned long new_rate = rate / 1000; /* kHz */
+ unsigned long new_rate = req->rate / 1000; /* kHz */
switch (stub_clk->id) {
case HI6220_STUB_ACPU0:
@@ -181,12 +181,14 @@ static long hi6220_stub_clk_round_rate(struct clk_hw *hw, unsigned long rate,
break;
}
- return new_rate;
+ req->rate = new_rate;
+
+ return 0;
}
static const struct clk_ops hi6220_stub_clk_ops = {
.recalc_rate = hi6220_stub_clk_recalc_rate,
- .round_rate = hi6220_stub_clk_round_rate,
+ .determine_rate = hi6220_stub_clk_determine_rate,
.set_rate = hi6220_stub_clk_set_rate,
};
diff --git a/drivers/clk/hisilicon/clkdivider-hi6220.c b/drivers/clk/hisilicon/clkdivider-hi6220.c
index 5348bafe694f..6bae18a84cb6 100644
--- a/drivers/clk/hisilicon/clkdivider-hi6220.c
+++ b/drivers/clk/hisilicon/clkdivider-hi6220.c
@@ -55,13 +55,15 @@ static unsigned long hi6220_clkdiv_recalc_rate(struct clk_hw *hw,
CLK_DIVIDER_ROUND_CLOSEST, dclk->width);
}
-static long hi6220_clkdiv_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int hi6220_clkdiv_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct hi6220_clk_divider *dclk = to_hi6220_clk_divider(hw);
- return divider_round_rate(hw, rate, prate, dclk->table,
- dclk->width, CLK_DIVIDER_ROUND_CLOSEST);
+ req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate, dclk->table,
+ dclk->width, CLK_DIVIDER_ROUND_CLOSEST);
+
+ return 0;
}
static int hi6220_clkdiv_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -93,7 +95,7 @@ static int hi6220_clkdiv_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops hi6220_clkdiv_ops = {
.recalc_rate = hi6220_clkdiv_recalc_rate,
- .round_rate = hi6220_clkdiv_round_rate,
+ .determine_rate = hi6220_clkdiv_determine_rate,
.set_rate = hi6220_clkdiv_set_rate,
};
diff --git a/drivers/clk/hisilicon/clkgate-separated.c b/drivers/clk/hisilicon/clkgate-separated.c
index 90d858522967..21d4297f3225 100644
--- a/drivers/clk/hisilicon/clkgate-separated.c
+++ b/drivers/clk/hisilicon/clkgate-separated.c
@@ -17,9 +17,9 @@
#include "clk.h"
/* clock separated gate register offset */
-#define CLKGATE_SEPERATED_ENABLE 0x0
-#define CLKGATE_SEPERATED_DISABLE 0x4
-#define CLKGATE_SEPERATED_STATUS 0x8
+#define CLKGATE_SEPARATED_ENABLE 0x0
+#define CLKGATE_SEPARATED_DISABLE 0x4
+#define CLKGATE_SEPARATED_STATUS 0x8
struct clkgate_separated {
struct clk_hw hw;
@@ -40,7 +40,7 @@ static int clkgate_separated_enable(struct clk_hw *hw)
spin_lock_irqsave(sclk->lock, flags);
reg = BIT(sclk->bit_idx);
writel_relaxed(reg, sclk->enable);
- readl_relaxed(sclk->enable + CLKGATE_SEPERATED_STATUS);
+ readl_relaxed(sclk->enable + CLKGATE_SEPARATED_STATUS);
if (sclk->lock)
spin_unlock_irqrestore(sclk->lock, flags);
return 0;
@@ -56,8 +56,8 @@ static void clkgate_separated_disable(struct clk_hw *hw)
if (sclk->lock)
spin_lock_irqsave(sclk->lock, flags);
reg = BIT(sclk->bit_idx);
- writel_relaxed(reg, sclk->enable + CLKGATE_SEPERATED_DISABLE);
- readl_relaxed(sclk->enable + CLKGATE_SEPERATED_STATUS);
+ writel_relaxed(reg, sclk->enable + CLKGATE_SEPARATED_DISABLE);
+ readl_relaxed(sclk->enable + CLKGATE_SEPARATED_STATUS);
if (sclk->lock)
spin_unlock_irqrestore(sclk->lock, flags);
}
@@ -68,7 +68,7 @@ static int clkgate_separated_is_enabled(struct clk_hw *hw)
u32 reg;
sclk = container_of(hw, struct clkgate_separated, hw);
- reg = readl_relaxed(sclk->enable + CLKGATE_SEPERATED_STATUS);
+ reg = readl_relaxed(sclk->enable + CLKGATE_SEPARATED_STATUS);
reg &= BIT(sclk->bit_idx);
return reg ? 1 : 0;
@@ -100,7 +100,7 @@ struct clk *hisi_register_clkgate_sep(struct device *dev, const char *name,
init.parent_names = (parent_name ? &parent_name : NULL);
init.num_parents = (parent_name ? 1 : 0);
- sclk->enable = reg + CLKGATE_SEPERATED_ENABLE;
+ sclk->enable = reg + CLKGATE_SEPARATED_ENABLE;
sclk->bit_idx = bit_idx;
sclk->flags = clk_gate_flags;
sclk->hw.init = &init;
diff --git a/drivers/clk/imx/Kconfig b/drivers/clk/imx/Kconfig
index 6ff6d934848a..b292e7ca5c24 100644
--- a/drivers/clk/imx/Kconfig
+++ b/drivers/clk/imx/Kconfig
@@ -105,6 +105,7 @@ config CLK_IMX8ULP
tristate "IMX8ULP CCM Clock Driver"
depends on ARCH_MXC || COMPILE_TEST
select MXC_CLK
+ select AUXILIARY_BUS
help
Build the driver for i.MX8ULP CCM Clock Driver
diff --git a/drivers/clk/imx/Makefile b/drivers/clk/imx/Makefile
index 03f2b2a1ab63..208b46873a18 100644
--- a/drivers/clk/imx/Makefile
+++ b/drivers/clk/imx/Makefile
@@ -41,6 +41,7 @@ clk-imx-lpcg-scu-$(CONFIG_CLK_IMX8QXP) += clk-lpcg-scu.o clk-imx8qxp-lpcg.o
clk-imx-acm-$(CONFIG_CLK_IMX8QXP) = clk-imx8-acm.o
obj-$(CONFIG_CLK_IMX8ULP) += clk-imx8ulp.o
+obj-$(CONFIG_CLK_IMX8ULP) += clk-imx8ulp-sim-lpav.o
obj-$(CONFIG_CLK_IMX1) += clk-imx1.o
obj-$(CONFIG_CLK_IMX25) += clk-imx25.o
diff --git a/drivers/clk/imx/clk-busy.c b/drivers/clk/imx/clk-busy.c
index f163df952ccc..eb27c6fee359 100644
--- a/drivers/clk/imx/clk-busy.c
+++ b/drivers/clk/imx/clk-busy.c
@@ -46,12 +46,12 @@ static unsigned long clk_busy_divider_recalc_rate(struct clk_hw *hw,
return busy->div_ops->recalc_rate(&busy->div.hw, parent_rate);
}
-static long clk_busy_divider_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_busy_divider_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_busy_divider *busy = to_clk_busy_divider(hw);
- return busy->div_ops->round_rate(&busy->div.hw, rate, prate);
+ return busy->div_ops->determine_rate(&busy->div.hw, req);
}
static int clk_busy_divider_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -69,7 +69,7 @@ static int clk_busy_divider_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops clk_busy_divider_ops = {
.recalc_rate = clk_busy_divider_recalc_rate,
- .round_rate = clk_busy_divider_round_rate,
+ .determine_rate = clk_busy_divider_determine_rate,
.set_rate = clk_busy_divider_set_rate,
};
diff --git a/drivers/clk/imx/clk-composite-7ulp.c b/drivers/clk/imx/clk-composite-7ulp.c
index 8ed2e0ad2769..37d2fc197be6 100644
--- a/drivers/clk/imx/clk-composite-7ulp.c
+++ b/drivers/clk/imx/clk-composite-7ulp.c
@@ -7,6 +7,7 @@
#include <linux/bits.h>
#include <linux/clk-provider.h>
+#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/slab.h>
@@ -36,6 +37,9 @@ static int pcc_gate_enable(struct clk_hw *hw)
if (ret)
return ret;
+ /* Make sure the IP's clock is ready before release reset */
+ udelay(1);
+
spin_lock_irqsave(gate->lock, flags);
/*
* release the sw reset for peripherals associated with
@@ -47,6 +51,15 @@ static int pcc_gate_enable(struct clk_hw *hw)
spin_unlock_irqrestore(gate->lock, flags);
+ /*
+ * Read back the register to make sure the previous write has been
+ * done in the target HW register. For IP like GPU, after deassert
+ * the reset, need to wait for a while to make sure the sync reset
+ * is done
+ */
+ readl(gate->reg);
+ udelay(1);
+
return 0;
}
diff --git a/drivers/clk/imx/clk-composite-8m.c b/drivers/clk/imx/clk-composite-8m.c
index f187582ba491..1467d0a1b934 100644
--- a/drivers/clk/imx/clk-composite-8m.c
+++ b/drivers/clk/imx/clk-composite-8m.c
@@ -73,21 +73,6 @@ static int imx8m_clk_composite_compute_dividers(unsigned long rate,
return ret;
}
-static long imx8m_clk_composite_divider_round_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long *prate)
-{
- int prediv_value;
- int div_value;
-
- imx8m_clk_composite_compute_dividers(rate, *prate,
- &prediv_value, &div_value);
- rate = DIV_ROUND_UP(*prate, prediv_value);
-
- return DIV_ROUND_UP(rate, div_value);
-
-}
-
static int imx8m_clk_composite_divider_set_rate(struct clk_hw *hw,
unsigned long rate,
unsigned long parent_rate)
@@ -153,7 +138,6 @@ static int imx8m_divider_determine_rate(struct clk_hw *hw,
static const struct clk_ops imx8m_clk_composite_divider_ops = {
.recalc_rate = imx8m_clk_composite_divider_recalc_rate,
- .round_rate = imx8m_clk_composite_divider_round_rate,
.set_rate = imx8m_clk_composite_divider_set_rate,
.determine_rate = imx8m_divider_determine_rate,
};
diff --git a/drivers/clk/imx/clk-composite-93.c b/drivers/clk/imx/clk-composite-93.c
index 6c6c5a30f328..513d74a39d3b 100644
--- a/drivers/clk/imx/clk-composite-93.c
+++ b/drivers/clk/imx/clk-composite-93.c
@@ -98,12 +98,6 @@ imx93_clk_composite_divider_recalc_rate(struct clk_hw *hw, unsigned long parent_
return clk_divider_ops.recalc_rate(hw, parent_rate);
}
-static long
-imx93_clk_composite_divider_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *prate)
-{
- return clk_divider_ops.round_rate(hw, rate, prate);
-}
-
static int
imx93_clk_composite_divider_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
{
@@ -141,7 +135,6 @@ static int imx93_clk_composite_divider_set_rate(struct clk_hw *hw, unsigned long
static const struct clk_ops imx93_clk_composite_divider_ops = {
.recalc_rate = imx93_clk_composite_divider_recalc_rate,
- .round_rate = imx93_clk_composite_divider_round_rate,
.determine_rate = imx93_clk_composite_divider_determine_rate,
.set_rate = imx93_clk_composite_divider_set_rate,
};
diff --git a/drivers/clk/imx/clk-cpu.c b/drivers/clk/imx/clk-cpu.c
index cb6ca4cf0535..43637cb61693 100644
--- a/drivers/clk/imx/clk-cpu.c
+++ b/drivers/clk/imx/clk-cpu.c
@@ -30,12 +30,14 @@ static unsigned long clk_cpu_recalc_rate(struct clk_hw *hw,
return clk_get_rate(cpu->div);
}
-static long clk_cpu_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_cpu_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_cpu *cpu = to_clk_cpu(hw);
- return clk_round_rate(cpu->pll, rate);
+ req->rate = clk_round_rate(cpu->pll, req->rate);
+
+ return 0;
}
static int clk_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -66,7 +68,7 @@ static int clk_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops clk_cpu_ops = {
.recalc_rate = clk_cpu_recalc_rate,
- .round_rate = clk_cpu_round_rate,
+ .determine_rate = clk_cpu_determine_rate,
.set_rate = clk_cpu_set_rate,
};
diff --git a/drivers/clk/imx/clk-fixup-div.c b/drivers/clk/imx/clk-fixup-div.c
index 100ca828b052..aa6addbeb5a8 100644
--- a/drivers/clk/imx/clk-fixup-div.c
+++ b/drivers/clk/imx/clk-fixup-div.c
@@ -18,7 +18,7 @@
* @fixup: a hook to fixup the write value
*
* The imx fixup divider clock is a subclass of basic clk_divider
- * with an addtional fixup hook.
+ * with an additional fixup hook.
*/
struct clk_fixup_div {
struct clk_divider divider;
@@ -41,12 +41,12 @@ static unsigned long clk_fixup_div_recalc_rate(struct clk_hw *hw,
return fixup_div->ops->recalc_rate(&fixup_div->divider.hw, parent_rate);
}
-static long clk_fixup_div_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_fixup_div_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_fixup_div *fixup_div = to_clk_fixup_div(hw);
- return fixup_div->ops->round_rate(&fixup_div->divider.hw, rate, prate);
+ return fixup_div->ops->determine_rate(&fixup_div->divider.hw, req);
}
static int clk_fixup_div_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -81,7 +81,7 @@ static int clk_fixup_div_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops clk_fixup_div_ops = {
.recalc_rate = clk_fixup_div_recalc_rate,
- .round_rate = clk_fixup_div_round_rate,
+ .determine_rate = clk_fixup_div_determine_rate,
.set_rate = clk_fixup_div_set_rate,
};
diff --git a/drivers/clk/imx/clk-fixup-mux.c b/drivers/clk/imx/clk-fixup-mux.c
index b48701864ef0..418ac9fe2c26 100644
--- a/drivers/clk/imx/clk-fixup-mux.c
+++ b/drivers/clk/imx/clk-fixup-mux.c
@@ -17,7 +17,7 @@
* @fixup: a hook to fixup the write value
*
* The imx fixup multiplexer clock is a subclass of basic clk_mux
- * with an addtional fixup hook.
+ * with an additional fixup hook.
*/
struct clk_fixup_mux {
struct clk_mux mux;
diff --git a/drivers/clk/imx/clk-frac-pll.c b/drivers/clk/imx/clk-frac-pll.c
index c703056fae85..eb668faaa38f 100644
--- a/drivers/clk/imx/clk-frac-pll.c
+++ b/drivers/clk/imx/clk-frac-pll.c
@@ -119,19 +119,19 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long clk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- u64 parent_rate = *prate;
+ u64 parent_rate = req->best_parent_rate;
u32 divff, divfi;
u64 temp64;
parent_rate *= 8;
- rate *= 2;
- temp64 = rate;
+ req->rate *= 2;
+ temp64 = req->rate;
do_div(temp64, parent_rate);
divfi = temp64;
- temp64 = rate - divfi * parent_rate;
+ temp64 = req->rate - divfi * parent_rate;
temp64 *= PLL_FRAC_DENOM;
do_div(temp64, parent_rate);
divff = temp64;
@@ -140,9 +140,11 @@ static long clk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
temp64 *= divff;
do_div(temp64, PLL_FRAC_DENOM);
- rate = parent_rate * divfi + temp64;
+ req->rate = parent_rate * divfi + temp64;
+
+ req->rate = req->rate / 2;
- return rate / 2;
+ return 0;
}
/*
@@ -198,7 +200,7 @@ static const struct clk_ops clk_frac_pll_ops = {
.unprepare = clk_pll_unprepare,
.is_prepared = clk_pll_is_prepared,
.recalc_rate = clk_pll_recalc_rate,
- .round_rate = clk_pll_round_rate,
+ .determine_rate = clk_pll_determine_rate,
.set_rate = clk_pll_set_rate,
};
diff --git a/drivers/clk/imx/clk-fracn-gppll.c b/drivers/clk/imx/clk-fracn-gppll.c
index 85771afd4698..090d60867250 100644
--- a/drivers/clk/imx/clk-fracn-gppll.c
+++ b/drivers/clk/imx/clk-fracn-gppll.c
@@ -134,8 +134,8 @@ imx_get_pll_settings(struct clk_fracn_gppll *pll, unsigned long rate)
return NULL;
}
-static long clk_fracn_gppll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_fracn_gppll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_fracn_gppll *pll = to_clk_fracn_gppll(hw);
const struct imx_fracn_gppll_rate_table *rate_table = pll->rate_table;
@@ -143,11 +143,16 @@ static long clk_fracn_gppll_round_rate(struct clk_hw *hw, unsigned long rate,
/* Assuming rate_table is in descending order */
for (i = 0; i < pll->rate_count; i++)
- if (rate >= rate_table[i].rate)
- return rate_table[i].rate;
+ if (req->rate >= rate_table[i].rate) {
+ req->rate = rate_table[i].rate;
+
+ return 0;
+ }
/* return minimum supported value */
- return rate_table[pll->rate_count - 1].rate;
+ req->rate = rate_table[pll->rate_count - 1].rate;
+
+ return 0;
}
static unsigned long clk_fracn_gppll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
@@ -345,7 +350,7 @@ static const struct clk_ops clk_fracn_gppll_ops = {
.unprepare = clk_fracn_gppll_unprepare,
.is_prepared = clk_fracn_gppll_is_prepared,
.recalc_rate = clk_fracn_gppll_recalc_rate,
- .round_rate = clk_fracn_gppll_round_rate,
+ .determine_rate = clk_fracn_gppll_determine_rate,
.set_rate = clk_fracn_gppll_set_rate,
};
diff --git a/drivers/clk/imx/clk-gate-exclusive.c b/drivers/clk/imx/clk-gate-exclusive.c
index 77342893bb71..7017e9d4e188 100644
--- a/drivers/clk/imx/clk-gate-exclusive.c
+++ b/drivers/clk/imx/clk-gate-exclusive.c
@@ -18,7 +18,7 @@
* gate clock
*
* The imx exclusive gate clock is a subclass of basic clk_gate
- * with an addtional mask to indicate which other gate bits in the same
+ * with an additional mask to indicate which other gate bits in the same
* register is mutually exclusive to this gate clock.
*/
struct clk_gate_exclusive {
diff --git a/drivers/clk/imx/clk-imx5.c b/drivers/clk/imx/clk-imx5.c
index b82044911603..9c5f489b3975 100644
--- a/drivers/clk/imx/clk-imx5.c
+++ b/drivers/clk/imx/clk-imx5.c
@@ -454,7 +454,7 @@ static void __init mx51_clocks_init(struct device_node *np)
* longer supported. Set to one for better power saving.
*
* The effect of not setting these bits is that MIPI clocks can't be
- * enabled without the IPU clock being enabled aswell.
+ * enabled without the IPU clock being enabled as well.
*/
val = readl(MXC_CCM_CCDR);
val |= 1 << 18;
diff --git a/drivers/clk/imx/clk-imx8-acm.c b/drivers/clk/imx/clk-imx8-acm.c
index c169fe53a35f..790f7e44b11e 100644
--- a/drivers/clk/imx/clk-imx8-acm.c
+++ b/drivers/clk/imx/clk-imx8-acm.c
@@ -22,7 +22,7 @@
* struct clk_imx_acm_pm_domains - structure for multi power domain
* @pd_dev: power domain device
* @pd_dev_link: power domain device link
- * @num_domains: power domain nummber
+ * @num_domains: power domain number
*/
struct clk_imx_acm_pm_domains {
struct device **pd_dev;
diff --git a/drivers/clk/imx/clk-imx8mp-audiomix.c b/drivers/clk/imx/clk-imx8mp-audiomix.c
index 775f62dddb11..131702f2c9ec 100644
--- a/drivers/clk/imx/clk-imx8mp-audiomix.c
+++ b/drivers/clk/imx/clk-imx8mp-audiomix.c
@@ -230,50 +230,19 @@ struct clk_imx8mp_audiomix_priv {
#if IS_ENABLED(CONFIG_RESET_CONTROLLER)
-static void clk_imx8mp_audiomix_reset_unregister_adev(void *_adev)
-{
- struct auxiliary_device *adev = _adev;
-
- auxiliary_device_delete(adev);
- auxiliary_device_uninit(adev);
-}
-
-static void clk_imx8mp_audiomix_reset_adev_release(struct device *dev)
-{
- struct auxiliary_device *adev = to_auxiliary_dev(dev);
-
- kfree(adev);
-}
-
static int clk_imx8mp_audiomix_reset_controller_register(struct device *dev,
struct clk_imx8mp_audiomix_priv *priv)
{
- struct auxiliary_device *adev __free(kfree) = NULL;
- int ret;
+ struct auxiliary_device *adev;
if (!of_property_present(dev->of_node, "#reset-cells"))
return 0;
- adev = kzalloc(sizeof(*adev), GFP_KERNEL);
+ adev = devm_auxiliary_device_create(dev, "reset", NULL);
if (!adev)
- return -ENOMEM;
-
- adev->name = "reset";
- adev->dev.parent = dev;
- adev->dev.release = clk_imx8mp_audiomix_reset_adev_release;
-
- ret = auxiliary_device_init(adev);
- if (ret)
- return ret;
+ return -ENODEV;
- ret = auxiliary_device_add(adev);
- if (ret) {
- auxiliary_device_uninit(adev);
- return ret;
- }
-
- return devm_add_action_or_reset(dev, clk_imx8mp_audiomix_reset_unregister_adev,
- no_free_ptr(adev));
+ return 0;
}
#else /* !CONFIG_RESET_CONTROLLER */
diff --git a/drivers/clk/imx/clk-imx8qxp-lpcg.c b/drivers/clk/imx/clk-imx8qxp-lpcg.c
index d0ccaa040225..1dae3410ee99 100644
--- a/drivers/clk/imx/clk-imx8qxp-lpcg.c
+++ b/drivers/clk/imx/clk-imx8qxp-lpcg.c
@@ -267,7 +267,6 @@ static int imx_lpcg_parse_clks_from_dt(struct platform_device *pdev,
if (ret)
goto unreg;
- pm_runtime_mark_last_busy(&pdev->dev);
pm_runtime_put_autosuspend(&pdev->dev);
return 0;
diff --git a/drivers/clk/imx/clk-imx8ulp-sim-lpav.c b/drivers/clk/imx/clk-imx8ulp-sim-lpav.c
new file mode 100644
index 000000000000..990c95b89b75
--- /dev/null
+++ b/drivers/clk/imx/clk-imx8ulp-sim-lpav.c
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2025 NXP
+ */
+
+#include <dt-bindings/clock/imx8ulp-clock.h>
+
+#include <linux/auxiliary_bus.h>
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#define SYSCTRL0 0x8
+
+#define IMX8ULP_HIFI_CLK_GATE(gname, cname, pname, bidx) \
+ { \
+ .name = gname "_cg", \
+ .id = IMX8ULP_CLK_SIM_LPAV_HIFI_##cname, \
+ .parent = { .fw_name = pname }, \
+ .bit = bidx, \
+ }
+
+struct clk_imx8ulp_sim_lpav_data {
+ spinlock_t lock; /* shared by MUX, clock gate and reset */
+ unsigned long flags; /* for spinlock usage */
+ struct clk_hw_onecell_data clk_data; /* keep last */
+};
+
+struct clk_imx8ulp_sim_lpav_gate {
+ const char *name;
+ int id;
+ const struct clk_parent_data parent;
+ u8 bit;
+};
+
+static struct clk_imx8ulp_sim_lpav_gate gates[] = {
+ IMX8ULP_HIFI_CLK_GATE("hifi_core", CORE, "core", 17),
+ IMX8ULP_HIFI_CLK_GATE("hifi_pbclk", PBCLK, "bus", 18),
+ IMX8ULP_HIFI_CLK_GATE("hifi_plat", PLAT, "plat", 19)
+};
+
+static void clk_imx8ulp_sim_lpav_lock(void *arg) __acquires(&data->lock)
+{
+ struct clk_imx8ulp_sim_lpav_data *data = dev_get_drvdata(arg);
+
+ spin_lock_irqsave(&data->lock, data->flags);
+}
+
+static void clk_imx8ulp_sim_lpav_unlock(void *arg) __releases(&data->lock)
+{
+ struct clk_imx8ulp_sim_lpav_data *data = dev_get_drvdata(arg);
+
+ spin_unlock_irqrestore(&data->lock, data->flags);
+}
+
+static int clk_imx8ulp_sim_lpav_probe(struct platform_device *pdev)
+{
+ const struct regmap_config regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .lock = clk_imx8ulp_sim_lpav_lock,
+ .unlock = clk_imx8ulp_sim_lpav_unlock,
+ .lock_arg = &pdev->dev,
+ };
+ struct clk_imx8ulp_sim_lpav_data *data;
+ struct auxiliary_device *adev;
+ struct regmap *regmap;
+ void __iomem *base;
+ struct clk_hw *hw;
+ int i, ret;
+
+ data = devm_kzalloc(&pdev->dev,
+ struct_size(data, clk_data.hws, ARRAY_SIZE(gates)),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ dev_set_drvdata(&pdev->dev, data);
+
+ /*
+ * this lock is used directly by the clock gate and indirectly
+ * by the reset and mux controller via the regmap API
+ */
+ spin_lock_init(&data->lock);
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return dev_err_probe(&pdev->dev, PTR_ERR(base),
+ "failed to ioremap base\n");
+ /*
+ * although the clock gate doesn't use the regmap API to modify the
+ * registers, we still need the regmap because of the reset auxiliary
+ * driver and the MUX drivers, which use the parent device's regmap
+ */
+ regmap = devm_regmap_init_mmio(&pdev->dev, base, &regmap_config);
+ if (IS_ERR(regmap))
+ return dev_err_probe(&pdev->dev, PTR_ERR(regmap),
+ "failed to initialize regmap\n");
+
+ data->clk_data.num = ARRAY_SIZE(gates);
+
+ for (i = 0; i < ARRAY_SIZE(gates); i++) {
+ hw = devm_clk_hw_register_gate_parent_data(&pdev->dev,
+ gates[i].name,
+ &gates[i].parent,
+ CLK_SET_RATE_PARENT,
+ base + SYSCTRL0,
+ gates[i].bit,
+ 0x0, &data->lock);
+ if (IS_ERR(hw))
+ return dev_err_probe(&pdev->dev, PTR_ERR(hw),
+ "failed to register %s gate\n",
+ gates[i].name);
+
+ data->clk_data.hws[i] = hw;
+ }
+
+ adev = devm_auxiliary_device_create(&pdev->dev, "reset", NULL);
+ if (!adev)
+ return dev_err_probe(&pdev->dev, -ENODEV,
+ "failed to register aux reset\n");
+
+ ret = devm_of_clk_add_hw_provider(&pdev->dev,
+ of_clk_hw_onecell_get,
+ &data->clk_data);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to register clk hw provider\n");
+
+ /* used to probe MUX child device */
+ return devm_of_platform_populate(&pdev->dev);
+}
+
+static const struct of_device_id clk_imx8ulp_sim_lpav_of_match[] = {
+ { .compatible = "fsl,imx8ulp-sim-lpav" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, clk_imx8ulp_sim_lpav_of_match);
+
+static struct platform_driver clk_imx8ulp_sim_lpav_driver = {
+ .probe = clk_imx8ulp_sim_lpav_probe,
+ .driver = {
+ .name = "clk-imx8ulp-sim-lpav",
+ .of_match_table = clk_imx8ulp_sim_lpav_of_match,
+ },
+};
+module_platform_driver(clk_imx8ulp_sim_lpav_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("i.MX8ULP LPAV System Integration Module (SIM) clock driver");
+MODULE_AUTHOR("Laurentiu Mihalcea <laurentiu.mihalcea@nxp.com>");
diff --git a/drivers/clk/imx/clk-imx95-blk-ctl.c b/drivers/clk/imx/clk-imx95-blk-ctl.c
index 25974947ad0c..56bed4471995 100644
--- a/drivers/clk/imx/clk-imx95-blk-ctl.c
+++ b/drivers/clk/imx/clk-imx95-blk-ctl.c
@@ -1,8 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright 2024 NXP
+ * Copyright 2024-2025 NXP
*/
+#include <dt-bindings/clock/nxp,imx94-clock.h>
#include <dt-bindings/clock/nxp,imx95-clock.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
@@ -35,6 +36,7 @@ struct imx95_blk_ctl {
void __iomem *base;
/* clock gate register */
u32 clk_reg_restore;
+ const struct imx95_blk_ctl_dev_data *pdata;
};
struct imx95_blk_ctl_clk_dev_data {
@@ -156,7 +158,7 @@ static const struct imx95_blk_ctl_dev_data camblk_dev_data = {
.clk_reg_offset = 0,
};
-static const struct imx95_blk_ctl_clk_dev_data lvds_clk_dev_data[] = {
+static const struct imx95_blk_ctl_clk_dev_data imx95_lvds_clk_dev_data[] = {
[IMX95_CLK_DISPMIX_LVDS_PHY_DIV] = {
.name = "ldb_phy_div",
.parent_names = (const char *[]){ "ldbpll", },
@@ -213,17 +215,21 @@ static const struct imx95_blk_ctl_clk_dev_data lvds_clk_dev_data[] = {
},
};
-static const struct imx95_blk_ctl_dev_data lvds_csr_dev_data = {
- .num_clks = ARRAY_SIZE(lvds_clk_dev_data),
- .clk_dev_data = lvds_clk_dev_data,
+static const struct imx95_blk_ctl_dev_data imx95_lvds_csr_dev_data = {
+ .num_clks = ARRAY_SIZE(imx95_lvds_clk_dev_data),
+ .clk_dev_data = imx95_lvds_clk_dev_data,
.clk_reg_offset = 0,
};
-static const struct imx95_blk_ctl_clk_dev_data dispmix_csr_clk_dev_data[] = {
+static const char * const imx95_disp_engine_parents[] = {
+ "videopll1", "dsi_pll", "ldb_pll_div7"
+};
+
+static const struct imx95_blk_ctl_clk_dev_data imx95_dispmix_csr_clk_dev_data[] = {
[IMX95_CLK_DISPMIX_ENG0_SEL] = {
.name = "disp_engine0_sel",
- .parent_names = (const char *[]){"videopll1", "dsi_pll", "ldb_pll_div7", },
- .num_parents = 4,
+ .parent_names = imx95_disp_engine_parents,
+ .num_parents = ARRAY_SIZE(imx95_disp_engine_parents),
.reg = 0,
.bit_idx = 0,
.bit_width = 2,
@@ -232,8 +238,8 @@ static const struct imx95_blk_ctl_clk_dev_data dispmix_csr_clk_dev_data[] = {
},
[IMX95_CLK_DISPMIX_ENG1_SEL] = {
.name = "disp_engine1_sel",
- .parent_names = (const char *[]){"videopll1", "dsi_pll", "ldb_pll_div7", },
- .num_parents = 4,
+ .parent_names = imx95_disp_engine_parents,
+ .num_parents = ARRAY_SIZE(imx95_disp_engine_parents),
.reg = 0,
.bit_idx = 2,
.bit_width = 2,
@@ -242,9 +248,9 @@ static const struct imx95_blk_ctl_clk_dev_data dispmix_csr_clk_dev_data[] = {
}
};
-static const struct imx95_blk_ctl_dev_data dispmix_csr_dev_data = {
- .num_clks = ARRAY_SIZE(dispmix_csr_clk_dev_data),
- .clk_dev_data = dispmix_csr_clk_dev_data,
+static const struct imx95_blk_ctl_dev_data imx95_dispmix_csr_dev_data = {
+ .num_clks = ARRAY_SIZE(imx95_dispmix_csr_clk_dev_data),
+ .clk_dev_data = imx95_dispmix_csr_clk_dev_data,
.clk_reg_offset = 0,
};
@@ -296,10 +302,54 @@ static const struct imx95_blk_ctl_dev_data hsio_blk_ctl_dev_data = {
.clk_reg_offset = 0,
};
+static const struct imx95_blk_ctl_clk_dev_data imx94_lvds_clk_dev_data[] = {
+ [IMX94_CLK_DISPMIX_LVDS_CLK_GATE] = {
+ .name = "lvds_clk_gate",
+ .parent_names = (const char *[]){ "ldbpll", },
+ .num_parents = 1,
+ .reg = 0,
+ .bit_idx = 1,
+ .bit_width = 1,
+ .type = CLK_GATE,
+ .flags = CLK_SET_RATE_PARENT,
+ .flags2 = CLK_GATE_SET_TO_DISABLE,
+ },
+};
+
+static const struct imx95_blk_ctl_dev_data imx94_lvds_csr_dev_data = {
+ .num_clks = ARRAY_SIZE(imx94_lvds_clk_dev_data),
+ .clk_dev_data = imx94_lvds_clk_dev_data,
+ .clk_reg_offset = 0,
+ .rpm_enabled = true,
+};
+
+static const char * const imx94_disp_engine_parents[] = {
+ "disppix", "ldb_pll_div7"
+};
+
+static const struct imx95_blk_ctl_clk_dev_data imx94_dispmix_csr_clk_dev_data[] = {
+ [IMX94_CLK_DISPMIX_CLK_SEL] = {
+ .name = "disp_clk_sel",
+ .parent_names = imx94_disp_engine_parents,
+ .num_parents = ARRAY_SIZE(imx94_disp_engine_parents),
+ .reg = 0,
+ .bit_idx = 1,
+ .bit_width = 1,
+ .type = CLK_MUX,
+ .flags = CLK_SET_RATE_NO_REPARENT | CLK_SET_RATE_PARENT,
+ },
+};
+
+static const struct imx95_blk_ctl_dev_data imx94_dispmix_csr_dev_data = {
+ .num_clks = ARRAY_SIZE(imx94_dispmix_csr_clk_dev_data),
+ .clk_dev_data = imx94_dispmix_csr_clk_dev_data,
+ .clk_reg_offset = 0,
+ .rpm_enabled = true,
+};
+
static int imx95_bc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- const struct imx95_blk_ctl_dev_data *bc_data;
struct imx95_blk_ctl *bc;
struct clk_hw_onecell_data *clk_hw_data;
struct clk_hw **hws;
@@ -329,23 +379,25 @@ static int imx95_bc_probe(struct platform_device *pdev)
return ret;
}
- bc_data = of_device_get_match_data(dev);
- if (!bc_data)
+ bc->pdata = of_device_get_match_data(dev);
+ if (!bc->pdata)
return devm_of_platform_populate(dev);
- clk_hw_data = devm_kzalloc(dev, struct_size(clk_hw_data, hws, bc_data->num_clks),
+ clk_hw_data = devm_kzalloc(dev, struct_size(clk_hw_data, hws, bc->pdata->num_clks),
GFP_KERNEL);
if (!clk_hw_data)
return -ENOMEM;
- if (bc_data->rpm_enabled)
- pm_runtime_enable(&pdev->dev);
+ if (bc->pdata->rpm_enabled) {
+ devm_pm_runtime_enable(&pdev->dev);
+ pm_runtime_resume_and_get(&pdev->dev);
+ }
- clk_hw_data->num = bc_data->num_clks;
+ clk_hw_data->num = bc->pdata->num_clks;
hws = clk_hw_data->hws;
- for (i = 0; i < bc_data->num_clks; i++) {
- const struct imx95_blk_ctl_clk_dev_data *data = &bc_data->clk_dev_data[i];
+ for (i = 0; i < bc->pdata->num_clks; i++) {
+ const struct imx95_blk_ctl_clk_dev_data *data = &bc->pdata->clk_dev_data[i];
void __iomem *reg = base + data->reg;
if (data->type == CLK_MUX) {
@@ -379,21 +431,20 @@ static int imx95_bc_probe(struct platform_device *pdev)
goto cleanup;
}
- if (pm_runtime_enabled(bc->dev))
+ if (pm_runtime_enabled(bc->dev)) {
+ pm_runtime_put_sync(&pdev->dev);
clk_disable_unprepare(bc->clk_apb);
+ }
return 0;
cleanup:
- for (i = 0; i < bc_data->num_clks; i++) {
+ for (i = 0; i < bc->pdata->num_clks; i++) {
if (IS_ERR_OR_NULL(hws[i]))
continue;
clk_hw_unregister(hws[i]);
}
- if (bc_data->rpm_enabled)
- pm_runtime_disable(&pdev->dev);
-
return ret;
}
@@ -402,15 +453,24 @@ static int imx95_bc_runtime_suspend(struct device *dev)
{
struct imx95_blk_ctl *bc = dev_get_drvdata(dev);
+ bc->clk_reg_restore = readl(bc->base + bc->pdata->clk_reg_offset);
clk_disable_unprepare(bc->clk_apb);
+
return 0;
}
static int imx95_bc_runtime_resume(struct device *dev)
{
struct imx95_blk_ctl *bc = dev_get_drvdata(dev);
+ int ret;
- return clk_prepare_enable(bc->clk_apb);
+ ret = clk_prepare_enable(bc->clk_apb);
+ if (ret)
+ return ret;
+
+ writel(bc->clk_reg_restore, bc->base + bc->pdata->clk_reg_offset);
+
+ return 0;
}
#endif
@@ -418,22 +478,12 @@ static int imx95_bc_runtime_resume(struct device *dev)
static int imx95_bc_suspend(struct device *dev)
{
struct imx95_blk_ctl *bc = dev_get_drvdata(dev);
- const struct imx95_blk_ctl_dev_data *bc_data;
- int ret;
- bc_data = of_device_get_match_data(dev);
- if (!bc_data)
+ if (pm_runtime_suspended(dev))
return 0;
- if (bc_data->rpm_enabled) {
- ret = pm_runtime_get_sync(bc->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(bc->dev);
- return ret;
- }
- }
-
- bc->clk_reg_restore = readl(bc->base + bc_data->clk_reg_offset);
+ bc->clk_reg_restore = readl(bc->base + bc->pdata->clk_reg_offset);
+ clk_disable_unprepare(bc->clk_apb);
return 0;
}
@@ -441,16 +491,16 @@ static int imx95_bc_suspend(struct device *dev)
static int imx95_bc_resume(struct device *dev)
{
struct imx95_blk_ctl *bc = dev_get_drvdata(dev);
- const struct imx95_blk_ctl_dev_data *bc_data;
+ int ret;
- bc_data = of_device_get_match_data(dev);
- if (!bc_data)
+ if (pm_runtime_suspended(dev))
return 0;
- writel(bc->clk_reg_restore, bc->base + bc_data->clk_reg_offset);
+ ret = clk_prepare_enable(bc->clk_apb);
+ if (ret)
+ return ret;
- if (bc_data->rpm_enabled)
- pm_runtime_put(bc->dev);
+ writel(bc->clk_reg_restore, bc->base + bc->pdata->clk_reg_offset);
return 0;
}
@@ -462,10 +512,12 @@ static const struct dev_pm_ops imx95_bc_pm_ops = {
};
static const struct of_device_id imx95_bc_of_match[] = {
+ { .compatible = "nxp,imx94-display-csr", .data = &imx94_dispmix_csr_dev_data },
+ { .compatible = "nxp,imx94-lvds-csr", .data = &imx94_lvds_csr_dev_data },
{ .compatible = "nxp,imx95-camera-csr", .data = &camblk_dev_data },
{ .compatible = "nxp,imx95-display-master-csr", },
- { .compatible = "nxp,imx95-lvds-csr", .data = &lvds_csr_dev_data },
- { .compatible = "nxp,imx95-display-csr", .data = &dispmix_csr_dev_data },
+ { .compatible = "nxp,imx95-display-csr", .data = &imx95_dispmix_csr_dev_data },
+ { .compatible = "nxp,imx95-lvds-csr", .data = &imx95_lvds_csr_dev_data },
{ .compatible = "nxp,imx95-hsio-blk-ctl", .data = &hsio_blk_ctl_dev_data },
{ .compatible = "nxp,imx95-vpu-csr", .data = &vpublk_dev_data },
{ .compatible = "nxp,imx95-netcmix-blk-ctrl", .data = &netcmix_dev_data},
diff --git a/drivers/clk/imx/clk-pfd.c b/drivers/clk/imx/clk-pfd.c
index 5cf0149dfa15..31220fa7882b 100644
--- a/drivers/clk/imx/clk-pfd.c
+++ b/drivers/clk/imx/clk-pfd.c
@@ -62,24 +62,26 @@ static unsigned long clk_pfd_recalc_rate(struct clk_hw *hw,
return tmp;
}
-static long clk_pfd_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_pfd_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- u64 tmp = *prate;
+ u64 tmp = req->best_parent_rate;
u8 frac;
- tmp = tmp * 18 + rate / 2;
- do_div(tmp, rate);
+ tmp = tmp * 18 + req->rate / 2;
+ do_div(tmp, req->rate);
frac = tmp;
if (frac < 12)
frac = 12;
else if (frac > 35)
frac = 35;
- tmp = *prate;
+ tmp = req->best_parent_rate;
tmp *= 18;
do_div(tmp, frac);
- return tmp;
+ req->rate = tmp;
+
+ return 0;
}
static int clk_pfd_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -117,7 +119,7 @@ static const struct clk_ops clk_pfd_ops = {
.enable = clk_pfd_enable,
.disable = clk_pfd_disable,
.recalc_rate = clk_pfd_recalc_rate,
- .round_rate = clk_pfd_round_rate,
+ .determine_rate = clk_pfd_determine_rate,
.set_rate = clk_pfd_set_rate,
.is_enabled = clk_pfd_is_enabled,
};
diff --git a/drivers/clk/imx/clk-pll14xx.c b/drivers/clk/imx/clk-pll14xx.c
index f290981ea13b..36d0e80b55b8 100644
--- a/drivers/clk/imx/clk-pll14xx.c
+++ b/drivers/clk/imx/clk-pll14xx.c
@@ -216,8 +216,8 @@ found:
t->mdiv, t->kdiv);
}
-static long clk_pll1416x_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_pll1416x_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_pll14xx *pll = to_clk_pll14xx(hw);
const struct imx_pll14xx_rate_table *rate_table = pll->rate_table;
@@ -225,22 +225,29 @@ static long clk_pll1416x_round_rate(struct clk_hw *hw, unsigned long rate,
/* Assuming rate_table is in descending order */
for (i = 0; i < pll->rate_count; i++)
- if (rate >= rate_table[i].rate)
- return rate_table[i].rate;
+ if (req->rate >= rate_table[i].rate) {
+ req->rate = rate_table[i].rate;
+
+ return 0;
+ }
/* return minimum supported value */
- return rate_table[pll->rate_count - 1].rate;
+ req->rate = rate_table[pll->rate_count - 1].rate;
+
+ return 0;
}
-static long clk_pll1443x_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_pll1443x_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_pll14xx *pll = to_clk_pll14xx(hw);
struct imx_pll14xx_rate_table t;
- imx_pll14xx_calc_settings(pll, rate, *prate, &t);
+ imx_pll14xx_calc_settings(pll, req->rate, req->best_parent_rate, &t);
+
+ req->rate = t.rate;
- return t.rate;
+ return 0;
}
static unsigned long clk_pll14xx_recalc_rate(struct clk_hw *hw,
@@ -470,7 +477,7 @@ static const struct clk_ops clk_pll1416x_ops = {
.unprepare = clk_pll14xx_unprepare,
.is_prepared = clk_pll14xx_is_prepared,
.recalc_rate = clk_pll14xx_recalc_rate,
- .round_rate = clk_pll1416x_round_rate,
+ .determine_rate = clk_pll1416x_determine_rate,
.set_rate = clk_pll1416x_set_rate,
};
@@ -483,7 +490,7 @@ static const struct clk_ops clk_pll1443x_ops = {
.unprepare = clk_pll14xx_unprepare,
.is_prepared = clk_pll14xx_is_prepared,
.recalc_rate = clk_pll14xx_recalc_rate,
- .round_rate = clk_pll1443x_round_rate,
+ .determine_rate = clk_pll1443x_determine_rate,
.set_rate = clk_pll1443x_set_rate,
};
diff --git a/drivers/clk/imx/clk-pllv2.c b/drivers/clk/imx/clk-pllv2.c
index ff17f0664faa..bb497ad5e0ae 100644
--- a/drivers/clk/imx/clk-pllv2.c
+++ b/drivers/clk/imx/clk-pllv2.c
@@ -178,18 +178,25 @@ static int clk_pllv2_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
-static long clk_pllv2_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_pllv2_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
u32 dp_op, dp_mfd, dp_mfn;
int ret;
- ret = __clk_pllv2_set_rate(rate, *prate, &dp_op, &dp_mfd, &dp_mfn);
- if (ret)
- return ret;
+ ret = __clk_pllv2_set_rate(req->rate, req->best_parent_rate, &dp_op,
+ &dp_mfd, &dp_mfn);
+ if (ret) {
+ req->rate = ret;
- return __clk_pllv2_recalc_rate(*prate, MXC_PLL_DP_CTL_DPDCK0_2_EN,
- dp_op, dp_mfd, dp_mfn);
+ return 0;
+ }
+
+ req->rate = __clk_pllv2_recalc_rate(req->best_parent_rate,
+ MXC_PLL_DP_CTL_DPDCK0_2_EN, dp_op,
+ dp_mfd, dp_mfn);
+
+ return 0;
}
static int clk_pllv2_prepare(struct clk_hw *hw)
@@ -235,7 +242,7 @@ static const struct clk_ops clk_pllv2_ops = {
.prepare = clk_pllv2_prepare,
.unprepare = clk_pllv2_unprepare,
.recalc_rate = clk_pllv2_recalc_rate,
- .round_rate = clk_pllv2_round_rate,
+ .determine_rate = clk_pllv2_determine_rate,
.set_rate = clk_pllv2_set_rate,
};
diff --git a/drivers/clk/imx/clk-pllv3.c b/drivers/clk/imx/clk-pllv3.c
index 11fb238ee8f0..b99508367bcb 100644
--- a/drivers/clk/imx/clk-pllv3.c
+++ b/drivers/clk/imx/clk-pllv3.c
@@ -117,13 +117,14 @@ static unsigned long clk_pllv3_recalc_rate(struct clk_hw *hw,
return (div == 1) ? parent_rate * 22 : parent_rate * 20;
}
-static long clk_pllv3_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_pllv3_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- unsigned long parent_rate = *prate;
+ unsigned long parent_rate = req->best_parent_rate;
- return (rate >= parent_rate * 22) ? parent_rate * 22 :
- parent_rate * 20;
+ req->rate = (req->rate >= parent_rate * 22) ? parent_rate * 22 : parent_rate * 20;
+
+ return 0;
}
static int clk_pllv3_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -152,7 +153,7 @@ static const struct clk_ops clk_pllv3_ops = {
.unprepare = clk_pllv3_unprepare,
.is_prepared = clk_pllv3_is_prepared,
.recalc_rate = clk_pllv3_recalc_rate,
- .round_rate = clk_pllv3_round_rate,
+ .determine_rate = clk_pllv3_determine_rate,
.set_rate = clk_pllv3_set_rate,
};
@@ -165,21 +166,23 @@ static unsigned long clk_pllv3_sys_recalc_rate(struct clk_hw *hw,
return parent_rate * div / 2;
}
-static long clk_pllv3_sys_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_pllv3_sys_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- unsigned long parent_rate = *prate;
+ unsigned long parent_rate = req->best_parent_rate;
unsigned long min_rate = parent_rate * 54 / 2;
unsigned long max_rate = parent_rate * 108 / 2;
u32 div;
- if (rate > max_rate)
- rate = max_rate;
- else if (rate < min_rate)
- rate = min_rate;
- div = rate * 2 / parent_rate;
+ if (req->rate > max_rate)
+ req->rate = max_rate;
+ else if (req->rate < min_rate)
+ req->rate = min_rate;
+ div = req->rate * 2 / parent_rate;
- return parent_rate * div / 2;
+ req->rate = parent_rate * div / 2;
+
+ return 0;
}
static int clk_pllv3_sys_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -207,7 +210,7 @@ static const struct clk_ops clk_pllv3_sys_ops = {
.unprepare = clk_pllv3_unprepare,
.is_prepared = clk_pllv3_is_prepared,
.recalc_rate = clk_pllv3_sys_recalc_rate,
- .round_rate = clk_pllv3_sys_round_rate,
+ .determine_rate = clk_pllv3_sys_determine_rate,
.set_rate = clk_pllv3_sys_set_rate,
};
@@ -226,10 +229,10 @@ static unsigned long clk_pllv3_av_recalc_rate(struct clk_hw *hw,
return parent_rate * div + (unsigned long)temp64;
}
-static long clk_pllv3_av_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_pllv3_av_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- unsigned long parent_rate = *prate;
+ unsigned long parent_rate = req->best_parent_rate;
unsigned long min_rate = parent_rate * 27;
unsigned long max_rate = parent_rate * 54;
u32 div;
@@ -237,16 +240,16 @@ static long clk_pllv3_av_round_rate(struct clk_hw *hw, unsigned long rate,
u32 max_mfd = 0x3FFFFFFF;
u64 temp64;
- if (rate > max_rate)
- rate = max_rate;
- else if (rate < min_rate)
- rate = min_rate;
+ if (req->rate > max_rate)
+ req->rate = max_rate;
+ else if (req->rate < min_rate)
+ req->rate = min_rate;
if (parent_rate <= max_mfd)
mfd = parent_rate;
- div = rate / parent_rate;
- temp64 = (u64) (rate - div * parent_rate);
+ div = req->rate / parent_rate;
+ temp64 = (u64) (req->rate - div * parent_rate);
temp64 *= mfd;
temp64 = div64_ul(temp64, parent_rate);
mfn = temp64;
@@ -255,7 +258,9 @@ static long clk_pllv3_av_round_rate(struct clk_hw *hw, unsigned long rate,
temp64 *= mfn;
do_div(temp64, mfd);
- return parent_rate * div + (unsigned long)temp64;
+ req->rate = parent_rate * div + (unsigned long)temp64;
+
+ return 0;
}
static int clk_pllv3_av_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -296,7 +301,7 @@ static const struct clk_ops clk_pllv3_av_ops = {
.unprepare = clk_pllv3_unprepare,
.is_prepared = clk_pllv3_is_prepared,
.recalc_rate = clk_pllv3_av_recalc_rate,
- .round_rate = clk_pllv3_av_round_rate,
+ .determine_rate = clk_pllv3_av_determine_rate,
.set_rate = clk_pllv3_av_set_rate,
};
@@ -355,12 +360,15 @@ static unsigned long clk_pllv3_vf610_recalc_rate(struct clk_hw *hw,
return clk_pllv3_vf610_mf_to_rate(parent_rate, mf);
}
-static long clk_pllv3_vf610_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_pllv3_vf610_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- struct clk_pllv3_vf610_mf mf = clk_pllv3_vf610_rate_to_mf(*prate, rate);
+ struct clk_pllv3_vf610_mf mf = clk_pllv3_vf610_rate_to_mf(req->best_parent_rate,
+ req->rate);
+
+ req->rate = clk_pllv3_vf610_mf_to_rate(req->best_parent_rate, mf);
- return clk_pllv3_vf610_mf_to_rate(*prate, mf);
+ return 0;
}
static int clk_pllv3_vf610_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -389,7 +397,7 @@ static const struct clk_ops clk_pllv3_vf610_ops = {
.unprepare = clk_pllv3_unprepare,
.is_prepared = clk_pllv3_is_prepared,
.recalc_rate = clk_pllv3_vf610_recalc_rate,
- .round_rate = clk_pllv3_vf610_round_rate,
+ .determine_rate = clk_pllv3_vf610_determine_rate,
.set_rate = clk_pllv3_vf610_set_rate,
};
diff --git a/drivers/clk/imx/clk-pllv4.c b/drivers/clk/imx/clk-pllv4.c
index 9b136c951762..01d05b5d5438 100644
--- a/drivers/clk/imx/clk-pllv4.c
+++ b/drivers/clk/imx/clk-pllv4.c
@@ -95,11 +95,11 @@ static unsigned long clk_pllv4_recalc_rate(struct clk_hw *hw,
return (parent_rate * mult) + (u32)temp64;
}
-static long clk_pllv4_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_pllv4_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_pllv4 *pll = to_clk_pllv4(hw);
- unsigned long parent_rate = *prate;
+ unsigned long parent_rate = req->best_parent_rate;
unsigned long round_rate, i;
u32 mfn, mfd = DEFAULT_MFD;
bool found = false;
@@ -107,7 +107,7 @@ static long clk_pllv4_round_rate(struct clk_hw *hw, unsigned long rate,
u32 mult;
if (pll->use_mult_range) {
- temp64 = (u64)rate;
+ temp64 = (u64) req->rate;
do_div(temp64, parent_rate);
mult = temp64;
if (mult >= pllv4_mult_range[1] &&
@@ -118,7 +118,7 @@ static long clk_pllv4_round_rate(struct clk_hw *hw, unsigned long rate,
} else {
for (i = 0; i < ARRAY_SIZE(pllv4_mult_table); i++) {
round_rate = parent_rate * pllv4_mult_table[i];
- if (rate >= round_rate) {
+ if (req->rate >= round_rate) {
found = true;
break;
}
@@ -127,14 +127,16 @@ static long clk_pllv4_round_rate(struct clk_hw *hw, unsigned long rate,
if (!found) {
pr_warn("%s: unable to round rate %lu, parent rate %lu\n",
- clk_hw_get_name(hw), rate, parent_rate);
+ clk_hw_get_name(hw), req->rate, parent_rate);
+ req->rate = 0;
+
return 0;
}
if (parent_rate <= MAX_MFD)
mfd = parent_rate;
- temp64 = (u64)(rate - round_rate);
+ temp64 = (u64)(req->rate - round_rate);
temp64 *= mfd;
do_div(temp64, parent_rate);
mfn = temp64;
@@ -145,14 +147,19 @@ static long clk_pllv4_round_rate(struct clk_hw *hw, unsigned long rate,
* pair of mfn/mfd, we simply return the round_rate without using
* the frac part.
*/
- if (mfn >= mfd)
- return round_rate;
+ if (mfn >= mfd) {
+ req->rate = round_rate;
+
+ return 0;
+ }
temp64 = (u64)parent_rate;
temp64 *= mfn;
do_div(temp64, mfd);
- return round_rate + (u32)temp64;
+ req->rate = round_rate + (u32)temp64;
+
+ return 0;
}
static bool clk_pllv4_is_valid_mult(struct clk_pllv4 *pll, unsigned int mult)
@@ -229,7 +236,7 @@ static void clk_pllv4_unprepare(struct clk_hw *hw)
static const struct clk_ops clk_pllv4_ops = {
.recalc_rate = clk_pllv4_recalc_rate,
- .round_rate = clk_pllv4_round_rate,
+ .determine_rate = clk_pllv4_determine_rate,
.set_rate = clk_pllv4_set_rate,
.prepare = clk_pllv4_prepare,
.unprepare = clk_pllv4_unprepare,
diff --git a/drivers/clk/imx/clk-scu.c b/drivers/clk/imx/clk-scu.c
index b27186aaf2a1..34c9dc1fb20e 100644
--- a/drivers/clk/imx/clk-scu.c
+++ b/drivers/clk/imx/clk-scu.c
@@ -269,24 +269,6 @@ static int clk_scu_determine_rate(struct clk_hw *hw,
return 0;
}
-/*
- * clk_scu_round_rate - Round clock rate for a SCU clock
- * @hw: clock to round rate for
- * @rate: rate to round
- * @parent_rate: parent rate provided by common clock framework, not used
- *
- * Returns the current clock rate, or zero in failure.
- */
-static long clk_scu_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
-{
- /*
- * Assume we support all the requested rate and let the SCU firmware
- * to handle the left work
- */
- return rate;
-}
-
static int clk_scu_atf_set_cpu_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
@@ -454,7 +436,7 @@ static const struct clk_ops clk_scu_ops = {
static const struct clk_ops clk_scu_cpu_ops = {
.recalc_rate = clk_scu_recalc_rate,
- .round_rate = clk_scu_round_rate,
+ .determine_rate = clk_scu_determine_rate,
.set_rate = clk_scu_atf_set_cpu_rate,
.prepare = clk_scu_prepare,
.unprepare = clk_scu_unprepare,
@@ -462,7 +444,7 @@ static const struct clk_ops clk_scu_cpu_ops = {
static const struct clk_ops clk_scu_pi_ops = {
.recalc_rate = clk_scu_recalc_rate,
- .round_rate = clk_scu_round_rate,
+ .determine_rate = clk_scu_determine_rate,
.set_rate = clk_scu_set_rate,
};
@@ -567,7 +549,6 @@ static int imx_clk_scu_probe(struct platform_device *pdev)
if (!((clk->rsrc == IMX_SC_R_A35) || (clk->rsrc == IMX_SC_R_A53) ||
(clk->rsrc == IMX_SC_R_A72))) {
- pm_runtime_mark_last_busy(&pdev->dev);
pm_runtime_put_autosuspend(&pdev->dev);
}
@@ -729,7 +710,7 @@ struct clk_hw *imx_clk_scu_alloc_dev(const char *name,
if (ret)
goto put_device;
- /* For API backwards compatiblilty, simply return NULL for success */
+ /* For API backwards compatibility, simply return NULL for success */
return NULL;
put_device:
@@ -766,15 +747,15 @@ static unsigned long clk_gpr_div_scu_recalc_rate(struct clk_hw *hw,
return err ? 0 : rate;
}
-static long clk_gpr_div_scu_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_gpr_div_scu_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- if (rate < *prate)
- rate = *prate / 2;
+ if (req->rate < req->best_parent_rate)
+ req->rate = req->best_parent_rate / 2;
else
- rate = *prate;
+ req->rate = req->best_parent_rate;
- return rate;
+ return 0;
}
static int clk_gpr_div_scu_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -793,7 +774,7 @@ static int clk_gpr_div_scu_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops clk_gpr_div_scu_ops = {
.recalc_rate = clk_gpr_div_scu_recalc_rate,
- .round_rate = clk_gpr_div_scu_round_rate,
+ .determine_rate = clk_gpr_div_scu_determine_rate,
.set_rate = clk_gpr_div_scu_set_rate,
};
diff --git a/drivers/clk/imx/clk-vf610.c b/drivers/clk/imx/clk-vf610.c
index 9e11f1c7c397..41eb38552a9c 100644
--- a/drivers/clk/imx/clk-vf610.c
+++ b/drivers/clk/imx/clk-vf610.c
@@ -139,7 +139,7 @@ static struct clk * __init vf610_get_fixed_clock(
return clk;
};
-static int vf610_clk_suspend(void)
+static int vf610_clk_suspend(void *data)
{
int i;
@@ -156,7 +156,7 @@ static int vf610_clk_suspend(void)
return 0;
}
-static void vf610_clk_resume(void)
+static void vf610_clk_resume(void *data)
{
int i;
@@ -171,11 +171,15 @@ static void vf610_clk_resume(void)
writel_relaxed(ccgr[i], CCM_CCGRx(i));
}
-static struct syscore_ops vf610_clk_syscore_ops = {
+static const struct syscore_ops vf610_clk_syscore_ops = {
.suspend = vf610_clk_suspend,
.resume = vf610_clk_resume,
};
+static struct syscore vf610_clk_syscore = {
+ .ops = &vf610_clk_syscore_ops,
+};
+
static void __init vf610_clocks_init(struct device_node *ccm_node)
{
struct device_node *np;
@@ -462,7 +466,7 @@ static void __init vf610_clocks_init(struct device_node *ccm_node)
for (i = 0; i < ARRAY_SIZE(clks_init_on); i++)
clk_prepare_enable(clk[clks_init_on[i]]);
- register_syscore_ops(&vf610_clk_syscore_ops);
+ register_syscore(&vf610_clk_syscore);
/* Add the clocks to provider list */
clk_data.clks = clk;
diff --git a/drivers/clk/ingenic/cgu.c b/drivers/clk/ingenic/cgu.c
index 0c9c8344ad11..91e7ac0cc334 100644
--- a/drivers/clk/ingenic/cgu.c
+++ b/drivers/clk/ingenic/cgu.c
@@ -174,14 +174,16 @@ ingenic_pll_calc(const struct ingenic_cgu_clk_info *clk_info,
n * od);
}
-static long
-ingenic_pll_round_rate(struct clk_hw *hw, unsigned long req_rate,
- unsigned long *prate)
+static int ingenic_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
- return ingenic_pll_calc(clk_info, req_rate, *prate, NULL, NULL, NULL);
+ req->rate = ingenic_pll_calc(clk_info, req->rate, req->best_parent_rate,
+ NULL, NULL, NULL);
+
+ return 0;
}
static inline int ingenic_pll_check_stable(struct ingenic_cgu *cgu,
@@ -317,7 +319,7 @@ static int ingenic_pll_is_enabled(struct clk_hw *hw)
static const struct clk_ops ingenic_pll_ops = {
.recalc_rate = ingenic_pll_recalc_rate,
- .round_rate = ingenic_pll_round_rate,
+ .determine_rate = ingenic_pll_determine_rate,
.set_rate = ingenic_pll_set_rate,
.enable = ingenic_pll_enable,
diff --git a/drivers/clk/ingenic/cgu.h b/drivers/clk/ingenic/cgu.h
index 99da9bd86e63..0d417d69dab7 100644
--- a/drivers/clk/ingenic/cgu.h
+++ b/drivers/clk/ingenic/cgu.h
@@ -239,7 +239,7 @@ ingenic_cgu_new(const struct ingenic_cgu_clk_info *clock_info,
*
* Register the clocks described by the CGU with the common clock framework.
*
- * Return: 0 on success or -errno if unsuccesful.
+ * Return: 0 on success or -errno if unsuccessful.
*/
int ingenic_cgu_register_clocks(struct ingenic_cgu *cgu);
diff --git a/drivers/clk/ingenic/jz4725b-cgu.c b/drivers/clk/ingenic/jz4725b-cgu.c
index 590e9c85cb25..94cee44c854f 100644
--- a/drivers/clk/ingenic/jz4725b-cgu.c
+++ b/drivers/clk/ingenic/jz4725b-cgu.c
@@ -268,6 +268,6 @@ static void __init jz4725b_cgu_init(struct device_node *np)
if (retval)
pr_err("%s: failed to register CGU Clocks\n", __func__);
- ingenic_cgu_register_syscore_ops(cgu);
+ ingenic_cgu_register_syscore(cgu);
}
CLK_OF_DECLARE_DRIVER(jz4725b_cgu, "ingenic,jz4725b-cgu", jz4725b_cgu_init);
diff --git a/drivers/clk/ingenic/jz4740-cgu.c b/drivers/clk/ingenic/jz4740-cgu.c
index 3e0a30574ebb..2def3aedc8dd 100644
--- a/drivers/clk/ingenic/jz4740-cgu.c
+++ b/drivers/clk/ingenic/jz4740-cgu.c
@@ -266,6 +266,6 @@ static void __init jz4740_cgu_init(struct device_node *np)
if (retval)
pr_err("%s: failed to register CGU Clocks\n", __func__);
- ingenic_cgu_register_syscore_ops(cgu);
+ ingenic_cgu_register_syscore(cgu);
}
CLK_OF_DECLARE_DRIVER(jz4740_cgu, "ingenic,jz4740-cgu", jz4740_cgu_init);
diff --git a/drivers/clk/ingenic/jz4755-cgu.c b/drivers/clk/ingenic/jz4755-cgu.c
index f2c2d848dab7..17cf5dcaece9 100644
--- a/drivers/clk/ingenic/jz4755-cgu.c
+++ b/drivers/clk/ingenic/jz4755-cgu.c
@@ -337,7 +337,7 @@ static void __init jz4755_cgu_init(struct device_node *np)
if (retval)
pr_err("%s: failed to register CGU Clocks\n", __func__);
- ingenic_cgu_register_syscore_ops(cgu);
+ ingenic_cgu_register_syscore(cgu);
}
/*
* CGU has some children devices, this is useful for probing children devices
diff --git a/drivers/clk/ingenic/jz4760-cgu.c b/drivers/clk/ingenic/jz4760-cgu.c
index e407f00bd594..372fe4b07992 100644
--- a/drivers/clk/ingenic/jz4760-cgu.c
+++ b/drivers/clk/ingenic/jz4760-cgu.c
@@ -436,7 +436,7 @@ static void __init jz4760_cgu_init(struct device_node *np)
if (retval)
pr_err("%s: failed to register CGU Clocks\n", __func__);
- ingenic_cgu_register_syscore_ops(cgu);
+ ingenic_cgu_register_syscore(cgu);
}
/* We only probe via devicetree, no need for a platform driver */
diff --git a/drivers/clk/ingenic/jz4770-cgu.c b/drivers/clk/ingenic/jz4770-cgu.c
index 6ae1740367f9..58f1d3bad677 100644
--- a/drivers/clk/ingenic/jz4770-cgu.c
+++ b/drivers/clk/ingenic/jz4770-cgu.c
@@ -456,7 +456,7 @@ static void __init jz4770_cgu_init(struct device_node *np)
if (retval)
pr_err("%s: failed to register CGU Clocks\n", __func__);
- ingenic_cgu_register_syscore_ops(cgu);
+ ingenic_cgu_register_syscore(cgu);
}
/* We only probe via devicetree, no need for a platform driver */
diff --git a/drivers/clk/ingenic/jz4780-cgu.c b/drivers/clk/ingenic/jz4780-cgu.c
index b1dadc0a5e75..1e88aef7ac0f 100644
--- a/drivers/clk/ingenic/jz4780-cgu.c
+++ b/drivers/clk/ingenic/jz4780-cgu.c
@@ -128,19 +128,19 @@ static unsigned long jz4780_otg_phy_recalc_rate(struct clk_hw *hw,
return parent_rate;
}
-static long jz4780_otg_phy_round_rate(struct clk_hw *hw, unsigned long req_rate,
- unsigned long *parent_rate)
+static int jz4780_otg_phy_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- if (req_rate < 15600000)
- return 12000000;
-
- if (req_rate < 21600000)
- return 19200000;
+ if (req->rate < 15600000)
+ req->rate = 12000000;
+ else if (req->rate < 21600000)
+ req->rate = 19200000;
+ else if (req->rate < 36000000)
+ req->rate = 24000000;
+ else
+ req->rate = 48000000;
- if (req_rate < 36000000)
- return 24000000;
-
- return 48000000;
+ return 0;
}
static int jz4780_otg_phy_set_rate(struct clk_hw *hw, unsigned long req_rate,
@@ -212,7 +212,7 @@ static int jz4780_otg_phy_is_enabled(struct clk_hw *hw)
static const struct clk_ops jz4780_otg_phy_ops = {
.recalc_rate = jz4780_otg_phy_recalc_rate,
- .round_rate = jz4780_otg_phy_round_rate,
+ .determine_rate = jz4780_otg_phy_determine_rate,
.set_rate = jz4780_otg_phy_set_rate,
.enable = jz4780_otg_phy_enable,
@@ -803,6 +803,6 @@ static void __init jz4780_cgu_init(struct device_node *np)
return;
}
- ingenic_cgu_register_syscore_ops(cgu);
+ ingenic_cgu_register_syscore(cgu);
}
CLK_OF_DECLARE_DRIVER(jz4780_cgu, "ingenic,jz4780-cgu", jz4780_cgu_init);
diff --git a/drivers/clk/ingenic/pm.c b/drivers/clk/ingenic/pm.c
index 341752b640d2..206d5cf2872f 100644
--- a/drivers/clk/ingenic/pm.c
+++ b/drivers/clk/ingenic/pm.c
@@ -15,7 +15,7 @@
static void __iomem * __maybe_unused ingenic_cgu_base;
-static int __maybe_unused ingenic_cgu_pm_suspend(void)
+static int __maybe_unused ingenic_cgu_pm_suspend(void *data)
{
u32 val = readl(ingenic_cgu_base + CGU_REG_LCR);
@@ -24,22 +24,26 @@ static int __maybe_unused ingenic_cgu_pm_suspend(void)
return 0;
}
-static void __maybe_unused ingenic_cgu_pm_resume(void)
+static void __maybe_unused ingenic_cgu_pm_resume(void *data)
{
u32 val = readl(ingenic_cgu_base + CGU_REG_LCR);
writel(val & ~LCR_LOW_POWER_MODE, ingenic_cgu_base + CGU_REG_LCR);
}
-static struct syscore_ops __maybe_unused ingenic_cgu_pm_ops = {
+static const struct syscore_ops __maybe_unused ingenic_cgu_pm_ops = {
.suspend = ingenic_cgu_pm_suspend,
.resume = ingenic_cgu_pm_resume,
};
-void ingenic_cgu_register_syscore_ops(struct ingenic_cgu *cgu)
+static struct syscore __maybe_unused ingenic_cgu_pm = {
+ .ops = &ingenic_cgu_pm_ops,
+};
+
+void ingenic_cgu_register_syscore(struct ingenic_cgu *cgu)
{
if (IS_ENABLED(CONFIG_PM_SLEEP)) {
ingenic_cgu_base = cgu->base;
- register_syscore_ops(&ingenic_cgu_pm_ops);
+ register_syscore(&ingenic_cgu_pm);
}
}
diff --git a/drivers/clk/ingenic/pm.h b/drivers/clk/ingenic/pm.h
index fa7540407b6b..0dcb57dc64cb 100644
--- a/drivers/clk/ingenic/pm.h
+++ b/drivers/clk/ingenic/pm.h
@@ -7,6 +7,6 @@
struct ingenic_cgu;
-void ingenic_cgu_register_syscore_ops(struct ingenic_cgu *cgu);
+void ingenic_cgu_register_syscore(struct ingenic_cgu *cgu);
#endif /* DRIVERS_CLK_INGENIC_PM_H */
diff --git a/drivers/clk/ingenic/tcu.c b/drivers/clk/ingenic/tcu.c
index 7d04ef40b7cf..bc6a51da2072 100644
--- a/drivers/clk/ingenic/tcu.c
+++ b/drivers/clk/ingenic/tcu.c
@@ -455,7 +455,7 @@ err_free_tcu:
return ret;
}
-static int __maybe_unused tcu_pm_suspend(void)
+static int __maybe_unused tcu_pm_suspend(void *data)
{
struct ingenic_tcu *tcu = ingenic_tcu;
@@ -465,7 +465,7 @@ static int __maybe_unused tcu_pm_suspend(void)
return 0;
}
-static void __maybe_unused tcu_pm_resume(void)
+static void __maybe_unused tcu_pm_resume(void *data)
{
struct ingenic_tcu *tcu = ingenic_tcu;
@@ -473,11 +473,15 @@ static void __maybe_unused tcu_pm_resume(void)
clk_enable(tcu->clk);
}
-static struct syscore_ops __maybe_unused tcu_pm_ops = {
+static const struct syscore_ops __maybe_unused tcu_pm_ops = {
.suspend = tcu_pm_suspend,
.resume = tcu_pm_resume,
};
+static struct syscore __maybe_unused tcu_pm = {
+ .ops = &tcu_pm_ops,
+};
+
static void __init ingenic_tcu_init(struct device_node *np)
{
int ret = ingenic_tcu_probe(np);
@@ -486,7 +490,7 @@ static void __init ingenic_tcu_init(struct device_node *np)
pr_crit("Failed to initialize TCU clocks: %d\n", ret);
if (IS_ENABLED(CONFIG_PM_SLEEP))
- register_syscore_ops(&tcu_pm_ops);
+ register_syscore(&tcu_pm);
}
CLK_OF_DECLARE_DRIVER(jz4740_cgu, "ingenic,jz4740-tcu", ingenic_tcu_init);
diff --git a/drivers/clk/ingenic/x1000-cgu.c b/drivers/clk/ingenic/x1000-cgu.c
index feb03eed4fe8..d89bdfb7c219 100644
--- a/drivers/clk/ingenic/x1000-cgu.c
+++ b/drivers/clk/ingenic/x1000-cgu.c
@@ -84,16 +84,17 @@ static unsigned long x1000_otg_phy_recalc_rate(struct clk_hw *hw,
return parent_rate;
}
-static long x1000_otg_phy_round_rate(struct clk_hw *hw, unsigned long req_rate,
- unsigned long *parent_rate)
+static int x1000_otg_phy_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- if (req_rate < 18000000)
- return 12000000;
-
- if (req_rate < 36000000)
- return 24000000;
+ if (req->rate < 18000000)
+ req->rate = 12000000;
+ else if (req->rate < 36000000)
+ req->rate = 24000000;
+ else
+ req->rate = 48000000;
- return 48000000;
+ return 0;
}
static int x1000_otg_phy_set_rate(struct clk_hw *hw, unsigned long req_rate,
@@ -161,7 +162,7 @@ static int x1000_usb_phy_is_enabled(struct clk_hw *hw)
static const struct clk_ops x1000_otg_phy_ops = {
.recalc_rate = x1000_otg_phy_recalc_rate,
- .round_rate = x1000_otg_phy_round_rate,
+ .determine_rate = x1000_otg_phy_determine_rate,
.set_rate = x1000_otg_phy_set_rate,
.enable = x1000_usb_phy_enable,
@@ -555,7 +556,7 @@ static void __init x1000_cgu_init(struct device_node *np)
return;
}
- ingenic_cgu_register_syscore_ops(cgu);
+ ingenic_cgu_register_syscore(cgu);
}
/*
* CGU has some children devices, this is useful for probing children devices
diff --git a/drivers/clk/ingenic/x1830-cgu.c b/drivers/clk/ingenic/x1830-cgu.c
index 0fd46e50a513..acf856e5009e 100644
--- a/drivers/clk/ingenic/x1830-cgu.c
+++ b/drivers/clk/ingenic/x1830-cgu.c
@@ -463,7 +463,7 @@ static void __init x1830_cgu_init(struct device_node *np)
return;
}
- ingenic_cgu_register_syscore_ops(cgu);
+ ingenic_cgu_register_syscore(cgu);
}
/*
* CGU has some children devices, this is useful for probing children devices
diff --git a/drivers/clk/keystone/sci-clk.c b/drivers/clk/keystone/sci-clk.c
index c5894fc9395e..9d5071223f4c 100644
--- a/drivers/clk/keystone/sci-clk.c
+++ b/drivers/clk/keystone/sci-clk.c
@@ -480,13 +480,10 @@ static int ti_sci_scan_clocks_from_fw(struct sci_clk_provider *provider)
num_clks++;
}
- provider->clocks = devm_kmalloc_array(dev, num_clks, sizeof(sci_clk),
- GFP_KERNEL);
+ provider->clocks = devm_kmemdup_array(dev, clks, num_clks, sizeof(sci_clk), GFP_KERNEL);
if (!provider->clocks)
return -ENOMEM;
- memcpy(provider->clocks, clks, num_clks * sizeof(sci_clk));
-
provider->num_clocks = num_clks;
devm_kfree(dev, clks);
@@ -499,8 +496,8 @@ static int ti_sci_scan_clocks_from_fw(struct sci_clk_provider *provider)
static int _cmp_sci_clk_list(void *priv, const struct list_head *a,
const struct list_head *b)
{
- struct sci_clk *ca = container_of(a, struct sci_clk, node);
- struct sci_clk *cb = container_of(b, struct sci_clk, node);
+ const struct sci_clk *ca = container_of(a, struct sci_clk, node);
+ const struct sci_clk *cb = container_of(b, struct sci_clk, node);
return _cmp_sci_clk(ca, &cb);
}
diff --git a/drivers/clk/keystone/syscon-clk.c b/drivers/clk/keystone/syscon-clk.c
index c509929da854..ecf180a7949c 100644
--- a/drivers/clk/keystone/syscon-clk.c
+++ b/drivers/clk/keystone/syscon-clk.c
@@ -129,7 +129,7 @@ static int ti_syscon_gate_clk_probe(struct platform_device *pdev)
if (IS_ERR(base))
return PTR_ERR(base);
- regmap = regmap_init_mmio(dev, base, &ti_syscon_regmap_cfg);
+ regmap = devm_regmap_init_mmio(dev, base, &ti_syscon_regmap_cfg);
if (IS_ERR(regmap))
return dev_err_probe(dev, PTR_ERR(regmap),
"failed to get regmap\n");
diff --git a/drivers/clk/kunit_clk_hw_get_dev_of_node.dtso b/drivers/clk/kunit_clk_hw_get_dev_of_node.dtso
new file mode 100644
index 000000000000..760717da3235
--- /dev/null
+++ b/drivers/clk/kunit_clk_hw_get_dev_of_node.dtso
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/plugin/;
+
+&{/} {
+ kunit-clock-controller {
+ compatible = "test,clk-hw-get-dev-of-node";
+ #clock-cells = <0>;
+ };
+};
diff --git a/drivers/clk/mediatek/Kconfig b/drivers/clk/mediatek/Kconfig
index 5f8e6d68fa14..0e8dd82aa84e 100644
--- a/drivers/clk/mediatek/Kconfig
+++ b/drivers/clk/mediatek/Kconfig
@@ -1002,6 +1002,77 @@ config COMMON_CLK_MT8195_VENCSYS
help
This driver supports MediaTek MT8195 vencsys clocks.
+config COMMON_CLK_MT8196
+ tristate "Clock driver for MediaTek MT8196"
+ depends on ARM64 || COMPILE_TEST
+ select COMMON_CLK_MEDIATEK
+ default ARCH_MEDIATEK
+ help
+ This driver supports MediaTek MT8196 basic clocks.
+
+config COMMON_CLK_MT8196_IMP_IIC_WRAP
+ tristate "Clock driver for MediaTek MT8196 imp_iic_wrap"
+ depends on COMMON_CLK_MT8196
+ default COMMON_CLK_MT8196
+ help
+ This driver supports MediaTek MT8196 i2c clocks.
+
+config COMMON_CLK_MT8196_MCUSYS
+ tristate "Clock driver for MediaTek MT8196 mcusys"
+ depends on COMMON_CLK_MT8196
+ default COMMON_CLK_MT8196
+ help
+ This driver supports MediaTek MT8196 mcusys clocks.
+
+config COMMON_CLK_MT8196_MDPSYS
+ tristate "Clock driver for MediaTek MT8196 mdpsys"
+ depends on COMMON_CLK_MT8196
+ default COMMON_CLK_MT8196
+ help
+ This driver supports MediaTek MT8196 mdpsys clocks.
+
+config COMMON_CLK_MT8196_MFGCFG
+ tristate "Clock driver for MediaTek MT8196 mfgcfg"
+ depends on COMMON_CLK_MT8196
+ default m
+ help
+ This driver supports MediaTek MT8196 mfgcfg clocks.
+
+config COMMON_CLK_MT8196_MMSYS
+ tristate "Clock driver for MediaTek MT8196 mmsys"
+ depends on COMMON_CLK_MT8196
+ default m
+ help
+ This driver supports MediaTek MT8196 mmsys clocks.
+
+config COMMON_CLK_MT8196_PEXTPSYS
+ tristate "Clock driver for MediaTek MT8196 pextpsys"
+ depends on COMMON_CLK_MT8196
+ default COMMON_CLK_MT8196
+ help
+ This driver supports MediaTek MT8196 pextpsys clocks.
+
+config COMMON_CLK_MT8196_UFSSYS
+ tristate "Clock driver for MediaTek MT8196 ufssys"
+ depends on COMMON_CLK_MT8196
+ default COMMON_CLK_MT8196
+ help
+ This driver supports MediaTek MT8196 ufssys clocks.
+
+config COMMON_CLK_MT8196_VDECSYS
+ tristate "Clock driver for MediaTek MT8196 vdecsys"
+ depends on COMMON_CLK_MT8196
+ default m
+ help
+ This driver supports MediaTek MT8196 vdecsys clocks.
+
+config COMMON_CLK_MT8196_VENCSYS
+ tristate "Clock driver for MediaTek MT8196 vencsys"
+ depends on COMMON_CLK_MT8196
+ default m
+ help
+ This driver supports MediaTek MT8196 vencsys clocks.
+
config COMMON_CLK_MT8365
tristate "Clock driver for MediaTek MT8365"
depends on ARCH_MEDIATEK || COMPILE_TEST
diff --git a/drivers/clk/mediatek/Makefile b/drivers/clk/mediatek/Makefile
index 6efec95406bd..d8736a060dbd 100644
--- a/drivers/clk/mediatek/Makefile
+++ b/drivers/clk/mediatek/Makefile
@@ -150,6 +150,19 @@ obj-$(CONFIG_COMMON_CLK_MT8195_VDOSYS) += clk-mt8195-vdo0.o clk-mt8195-vdo1.o
obj-$(CONFIG_COMMON_CLK_MT8195_VENCSYS) += clk-mt8195-venc.o
obj-$(CONFIG_COMMON_CLK_MT8195_VPPSYS) += clk-mt8195-vpp0.o clk-mt8195-vpp1.o
obj-$(CONFIG_COMMON_CLK_MT8195_WPESYS) += clk-mt8195-wpe.o
+obj-$(CONFIG_COMMON_CLK_MT8196) += clk-mt8196-apmixedsys.o clk-mt8196-topckgen.o \
+ clk-mt8196-topckgen2.o clk-mt8196-vlpckgen.o \
+ clk-mt8196-peri_ao.o
+obj-$(CONFIG_COMMON_CLK_MT8196_IMP_IIC_WRAP) += clk-mt8196-imp_iic_wrap.o
+obj-$(CONFIG_COMMON_CLK_MT8196_MCUSYS) += clk-mt8196-mcu.o
+obj-$(CONFIG_COMMON_CLK_MT8196_MDPSYS) += clk-mt8196-mdpsys.o
+obj-$(CONFIG_COMMON_CLK_MT8196_MFGCFG) += clk-mt8196-mfg.o
+obj-$(CONFIG_COMMON_CLK_MT8196_MMSYS) += clk-mt8196-disp0.o clk-mt8196-disp1.o clk-mt8196-vdisp_ao.o \
+ clk-mt8196-ovl0.o clk-mt8196-ovl1.o
+obj-$(CONFIG_COMMON_CLK_MT8196_PEXTPSYS) += clk-mt8196-pextp.o
+obj-$(CONFIG_COMMON_CLK_MT8196_UFSSYS) += clk-mt8196-ufs_ao.o
+obj-$(CONFIG_COMMON_CLK_MT8196_VDECSYS) += clk-mt8196-vdec.o
+obj-$(CONFIG_COMMON_CLK_MT8196_VENCSYS) += clk-mt8196-venc.o
obj-$(CONFIG_COMMON_CLK_MT8365) += clk-mt8365-apmixedsys.o clk-mt8365.o
obj-$(CONFIG_COMMON_CLK_MT8365_APU) += clk-mt8365-apu.o
obj-$(CONFIG_COMMON_CLK_MT8365_CAM) += clk-mt8365-cam.o
diff --git a/drivers/clk/mediatek/clk-gate.c b/drivers/clk/mediatek/clk-gate.c
index 67d9e741c5e7..f6b1429ff757 100644
--- a/drivers/clk/mediatek/clk-gate.c
+++ b/drivers/clk/mediatek/clk-gate.c
@@ -5,6 +5,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/dev_printk.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/printk.h>
@@ -12,15 +13,14 @@
#include <linux/slab.h>
#include <linux/types.h>
+#include "clk-mtk.h"
#include "clk-gate.h"
struct mtk_clk_gate {
struct clk_hw hw;
struct regmap *regmap;
- int set_ofs;
- int clr_ofs;
- int sta_ofs;
- u8 bit;
+ struct regmap *regmap_hwv;
+ const struct mtk_gate *gate;
};
static inline struct mtk_clk_gate *to_mtk_clk_gate(struct clk_hw *hw)
@@ -33,9 +33,9 @@ static u32 mtk_get_clockgating(struct clk_hw *hw)
struct mtk_clk_gate *cg = to_mtk_clk_gate(hw);
u32 val;
- regmap_read(cg->regmap, cg->sta_ofs, &val);
+ regmap_read(cg->regmap, cg->gate->regs->sta_ofs, &val);
- return val & BIT(cg->bit);
+ return val & BIT(cg->gate->shift);
}
static int mtk_cg_bit_is_cleared(struct clk_hw *hw)
@@ -52,28 +52,30 @@ static void mtk_cg_set_bit(struct clk_hw *hw)
{
struct mtk_clk_gate *cg = to_mtk_clk_gate(hw);
- regmap_write(cg->regmap, cg->set_ofs, BIT(cg->bit));
+ regmap_write(cg->regmap, cg->gate->regs->set_ofs, BIT(cg->gate->shift));
}
static void mtk_cg_clr_bit(struct clk_hw *hw)
{
struct mtk_clk_gate *cg = to_mtk_clk_gate(hw);
- regmap_write(cg->regmap, cg->clr_ofs, BIT(cg->bit));
+ regmap_write(cg->regmap, cg->gate->regs->clr_ofs, BIT(cg->gate->shift));
}
static void mtk_cg_set_bit_no_setclr(struct clk_hw *hw)
{
struct mtk_clk_gate *cg = to_mtk_clk_gate(hw);
- regmap_set_bits(cg->regmap, cg->sta_ofs, BIT(cg->bit));
+ regmap_set_bits(cg->regmap, cg->gate->regs->sta_ofs,
+ BIT(cg->gate->shift));
}
static void mtk_cg_clr_bit_no_setclr(struct clk_hw *hw)
{
struct mtk_clk_gate *cg = to_mtk_clk_gate(hw);
- regmap_clear_bits(cg->regmap, cg->sta_ofs, BIT(cg->bit));
+ regmap_clear_bits(cg->regmap, cg->gate->regs->sta_ofs,
+ BIT(cg->gate->shift));
}
static int mtk_cg_enable(struct clk_hw *hw)
@@ -100,6 +102,32 @@ static void mtk_cg_disable_inv(struct clk_hw *hw)
mtk_cg_clr_bit(hw);
}
+static int mtk_cg_hwv_set_en(struct clk_hw *hw, bool enable)
+{
+ struct mtk_clk_gate *cg = to_mtk_clk_gate(hw);
+ u32 val;
+
+ regmap_write(cg->regmap_hwv,
+ enable ? cg->gate->hwv_regs->set_ofs :
+ cg->gate->hwv_regs->clr_ofs,
+ BIT(cg->gate->shift));
+
+ return regmap_read_poll_timeout_atomic(cg->regmap_hwv,
+ cg->gate->hwv_regs->sta_ofs, val,
+ val & BIT(cg->gate->shift), 0,
+ MTK_WAIT_HWV_DONE_US);
+}
+
+static int mtk_cg_hwv_enable(struct clk_hw *hw)
+{
+ return mtk_cg_hwv_set_en(hw, true);
+}
+
+static void mtk_cg_hwv_disable(struct clk_hw *hw)
+{
+ mtk_cg_hwv_set_en(hw, false);
+}
+
static int mtk_cg_enable_no_setclr(struct clk_hw *hw)
{
mtk_cg_clr_bit_no_setclr(hw);
@@ -124,6 +152,15 @@ static void mtk_cg_disable_inv_no_setclr(struct clk_hw *hw)
mtk_cg_clr_bit_no_setclr(hw);
}
+static bool mtk_cg_uses_hwv(const struct clk_ops *ops)
+{
+ if (ops == &mtk_clk_gate_hwv_ops_setclr ||
+ ops == &mtk_clk_gate_hwv_ops_setclr_inv)
+ return true;
+
+ return false;
+}
+
const struct clk_ops mtk_clk_gate_ops_setclr = {
.is_enabled = mtk_cg_bit_is_cleared,
.enable = mtk_cg_enable,
@@ -138,6 +175,20 @@ const struct clk_ops mtk_clk_gate_ops_setclr_inv = {
};
EXPORT_SYMBOL_GPL(mtk_clk_gate_ops_setclr_inv);
+const struct clk_ops mtk_clk_gate_hwv_ops_setclr = {
+ .is_enabled = mtk_cg_bit_is_cleared,
+ .enable = mtk_cg_hwv_enable,
+ .disable = mtk_cg_hwv_disable,
+};
+EXPORT_SYMBOL_GPL(mtk_clk_gate_hwv_ops_setclr);
+
+const struct clk_ops mtk_clk_gate_hwv_ops_setclr_inv = {
+ .is_enabled = mtk_cg_bit_is_set,
+ .enable = mtk_cg_hwv_enable,
+ .disable = mtk_cg_hwv_disable,
+};
+EXPORT_SYMBOL_GPL(mtk_clk_gate_hwv_ops_setclr_inv);
+
const struct clk_ops mtk_clk_gate_ops_no_setclr = {
.is_enabled = mtk_cg_bit_is_cleared,
.enable = mtk_cg_enable_no_setclr,
@@ -152,12 +203,10 @@ const struct clk_ops mtk_clk_gate_ops_no_setclr_inv = {
};
EXPORT_SYMBOL_GPL(mtk_clk_gate_ops_no_setclr_inv);
-static struct clk_hw *mtk_clk_register_gate(struct device *dev, const char *name,
- const char *parent_name,
- struct regmap *regmap, int set_ofs,
- int clr_ofs, int sta_ofs, u8 bit,
- const struct clk_ops *ops,
- unsigned long flags)
+static struct clk_hw *mtk_clk_register_gate(struct device *dev,
+ const struct mtk_gate *gate,
+ struct regmap *regmap,
+ struct regmap *regmap_hwv)
{
struct mtk_clk_gate *cg;
int ret;
@@ -167,18 +216,19 @@ static struct clk_hw *mtk_clk_register_gate(struct device *dev, const char *name
if (!cg)
return ERR_PTR(-ENOMEM);
- init.name = name;
- init.flags = flags | CLK_SET_RATE_PARENT;
- init.parent_names = parent_name ? &parent_name : NULL;
- init.num_parents = parent_name ? 1 : 0;
- init.ops = ops;
+ init.name = gate->name;
+ init.flags = gate->flags | CLK_SET_RATE_PARENT;
+ init.parent_names = gate->parent_name ? &gate->parent_name : NULL;
+ init.num_parents = gate->parent_name ? 1 : 0;
+ init.ops = gate->ops;
+ if (mtk_cg_uses_hwv(init.ops) && !regmap_hwv)
+ return dev_err_ptr_probe(
+ dev, -ENXIO,
+ "regmap not found for hardware voter clocks\n");
cg->regmap = regmap;
- cg->set_ofs = set_ofs;
- cg->clr_ofs = clr_ofs;
- cg->sta_ofs = sta_ofs;
- cg->bit = bit;
-
+ cg->regmap_hwv = regmap_hwv;
+ cg->gate = gate;
cg->hw.init = &init;
ret = clk_hw_register(dev, &cg->hw);
@@ -209,6 +259,7 @@ int mtk_clk_register_gates(struct device *dev, struct device_node *node,
int i;
struct clk_hw *hw;
struct regmap *regmap;
+ struct regmap *regmap_hwv;
if (!clk_data)
return -ENOMEM;
@@ -219,6 +270,12 @@ int mtk_clk_register_gates(struct device *dev, struct device_node *node,
return PTR_ERR(regmap);
}
+ regmap_hwv = mtk_clk_get_hwv_regmap(node);
+ if (IS_ERR(regmap_hwv))
+ return dev_err_probe(
+ dev, PTR_ERR(regmap_hwv),
+ "Cannot find hardware voter regmap for %pOF\n", node);
+
for (i = 0; i < num; i++) {
const struct mtk_gate *gate = &clks[i];
@@ -228,13 +285,7 @@ int mtk_clk_register_gates(struct device *dev, struct device_node *node,
continue;
}
- hw = mtk_clk_register_gate(dev, gate->name, gate->parent_name,
- regmap,
- gate->regs->set_ofs,
- gate->regs->clr_ofs,
- gate->regs->sta_ofs,
- gate->shift, gate->ops,
- gate->flags);
+ hw = mtk_clk_register_gate(dev, gate, regmap, regmap_hwv);
if (IS_ERR(hw)) {
pr_err("Failed to register clk %s: %pe\n", gate->name,
diff --git a/drivers/clk/mediatek/clk-gate.h b/drivers/clk/mediatek/clk-gate.h
index 1a46b4c56fc5..4f05b9855dae 100644
--- a/drivers/clk/mediatek/clk-gate.h
+++ b/drivers/clk/mediatek/clk-gate.h
@@ -19,6 +19,8 @@ extern const struct clk_ops mtk_clk_gate_ops_setclr;
extern const struct clk_ops mtk_clk_gate_ops_setclr_inv;
extern const struct clk_ops mtk_clk_gate_ops_no_setclr;
extern const struct clk_ops mtk_clk_gate_ops_no_setclr_inv;
+extern const struct clk_ops mtk_clk_gate_hwv_ops_setclr;
+extern const struct clk_ops mtk_clk_gate_hwv_ops_setclr_inv;
struct mtk_gate_regs {
u32 sta_ofs;
@@ -31,6 +33,7 @@ struct mtk_gate {
const char *name;
const char *parent_name;
const struct mtk_gate_regs *regs;
+ const struct mtk_gate_regs *hwv_regs;
int shift;
const struct clk_ops *ops;
unsigned long flags;
diff --git a/drivers/clk/mediatek/clk-mt7622-aud.c b/drivers/clk/mediatek/clk-mt7622-aud.c
index 931a0598e598..a4ea5e20efa2 100644
--- a/drivers/clk/mediatek/clk-mt7622-aud.c
+++ b/drivers/clk/mediatek/clk-mt7622-aud.c
@@ -75,6 +75,7 @@ static const struct mtk_gate audio_clks[] = {
GATE_AUDIO1(CLK_AUDIO_A1SYS, "audio_a1sys", "a1sys_hp_sel", 21),
GATE_AUDIO1(CLK_AUDIO_A2SYS, "audio_a2sys", "a2sys_hp_sel", 22),
GATE_AUDIO1(CLK_AUDIO_AFE_CONN, "audio_afe_conn", "a1sys_hp_sel", 23),
+ GATE_AUDIO1(CLK_AUDIO_AFE_MRGIF, "audio_afe_mrgif", "aud_mux1_sel", 25),
/* AUDIO2 */
GATE_AUDIO2(CLK_AUDIO_UL1, "audio_ul1", "a1sys_hp_sel", 0),
GATE_AUDIO2(CLK_AUDIO_UL2, "audio_ul2", "a1sys_hp_sel", 1),
diff --git a/drivers/clk/mediatek/clk-mt8195-infra_ao.c b/drivers/clk/mediatek/clk-mt8195-infra_ao.c
index bb648a88e43a..ad47fdb23460 100644
--- a/drivers/clk/mediatek/clk-mt8195-infra_ao.c
+++ b/drivers/clk/mediatek/clk-mt8195-infra_ao.c
@@ -103,7 +103,7 @@ static const struct mtk_gate infra_ao_clks[] = {
GATE_INFRA_AO0(CLK_INFRA_AO_CQ_DMA_FPC, "infra_ao_cq_dma_fpc", "fpc", 28),
GATE_INFRA_AO0(CLK_INFRA_AO_UART5, "infra_ao_uart5", "top_uart", 29),
/* INFRA_AO1 */
- GATE_INFRA_AO1(CLK_INFRA_AO_HDMI_26M, "infra_ao_hdmi_26m", "clk26m", 0),
+ GATE_INFRA_AO1(CLK_INFRA_AO_HDMI_26M, "infra_ao_hdmi_26m", "top_hdmi_xtal", 0),
GATE_INFRA_AO1(CLK_INFRA_AO_SPI0, "infra_ao_spi0", "top_spi", 1),
GATE_INFRA_AO1(CLK_INFRA_AO_MSDC0, "infra_ao_msdc0", "top_msdc50_0_hclk", 2),
GATE_INFRA_AO1(CLK_INFRA_AO_MSDC1, "infra_ao_msdc1", "top_axi", 4),
diff --git a/drivers/clk/mediatek/clk-mt8196-apmixedsys.c b/drivers/clk/mediatek/clk-mt8196-apmixedsys.c
new file mode 100644
index 000000000000..617f5449b88b
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-apmixedsys.c
@@ -0,0 +1,204 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-pll.h"
+
+/* APMIXEDSYS PLL control register offsets */
+#define MAINPLL_CON0 0x250
+#define MAINPLL_CON1 0x254
+#define UNIVPLL_CON0 0x264
+#define UNIVPLL_CON1 0x268
+#define MSDCPLL_CON0 0x278
+#define MSDCPLL_CON1 0x27c
+#define ADSPPLL_CON0 0x28c
+#define ADSPPLL_CON1 0x290
+#define EMIPLL_CON0 0x2a0
+#define EMIPLL_CON1 0x2a4
+#define EMIPLL2_CON0 0x2b4
+#define EMIPLL2_CON1 0x2b8
+#define NET1PLL_CON0 0x2c8
+#define NET1PLL_CON1 0x2cc
+#define SGMIIPLL_CON0 0x2dc
+#define SGMIIPLL_CON1 0x2e0
+
+/* APMIXEDSYS_GP2 PLL control register offsets*/
+#define MAINPLL2_CON0 0x250
+#define MAINPLL2_CON1 0x254
+#define UNIVPLL2_CON0 0x264
+#define UNIVPLL2_CON1 0x268
+#define MMPLL2_CON0 0x278
+#define MMPLL2_CON1 0x27c
+#define IMGPLL_CON0 0x28c
+#define IMGPLL_CON1 0x290
+#define TVDPLL1_CON0 0x2a0
+#define TVDPLL1_CON1 0x2a4
+#define TVDPLL2_CON0 0x2b4
+#define TVDPLL2_CON1 0x2b8
+#define TVDPLL3_CON0 0x2c8
+#define TVDPLL3_CON1 0x2cc
+
+#define PLLEN_ALL 0x080
+#define PLLEN_ALL_SET 0x084
+#define PLLEN_ALL_CLR 0x088
+
+#define FENC_STATUS_CON0 0x03c
+
+#define MT8196_PLL_FMAX (3800UL * MHZ)
+#define MT8196_PLL_FMIN (1500UL * MHZ)
+#define MT8196_INTEGER_BITS 8
+
+#define PLL_FENC(_id, _name, _reg, _fenc_sta_ofs, _fenc_sta_bit,\
+ _flags, _pd_reg, _pd_shift, \
+ _pcw_reg, _pcw_shift, _pcwbits, \
+ _pll_en_bit) { \
+ .id = _id, \
+ .name = _name, \
+ .reg = _reg, \
+ .fenc_sta_ofs = _fenc_sta_ofs, \
+ .fenc_sta_bit = _fenc_sta_bit, \
+ .flags = _flags, \
+ .fmax = MT8196_PLL_FMAX, \
+ .fmin = MT8196_PLL_FMIN, \
+ .pd_reg = _pd_reg, \
+ .pd_shift = _pd_shift, \
+ .pcw_reg = _pcw_reg, \
+ .pcw_shift = _pcw_shift, \
+ .pcwbits = _pcwbits, \
+ .pcwibits = MT8196_INTEGER_BITS, \
+ .en_reg = PLLEN_ALL, \
+ .en_set_reg = PLLEN_ALL_SET, \
+ .en_clr_reg = PLLEN_ALL_CLR, \
+ .pll_en_bit = _pll_en_bit, \
+ .ops = &mtk_pll_fenc_clr_set_ops, \
+}
+
+struct mtk_pll_desc {
+ const struct mtk_pll_data *clks;
+ size_t num_clks;
+};
+
+static const struct mtk_pll_data apmixed_plls[] = {
+ PLL_FENC(CLK_APMIXED_MAINPLL, "mainpll", MAINPLL_CON0, FENC_STATUS_CON0,
+ 7, PLL_AO, MAINPLL_CON1, 24, MAINPLL_CON1, 0, 22, 0),
+ PLL_FENC(CLK_APMIXED_UNIVPLL, "univpll", UNIVPLL_CON0, FENC_STATUS_CON0,
+ 6, 0, UNIVPLL_CON1, 24, UNIVPLL_CON1, 0, 22, 1),
+ PLL_FENC(CLK_APMIXED_MSDCPLL, "msdcpll", MSDCPLL_CON0, FENC_STATUS_CON0,
+ 5, 0, MSDCPLL_CON1, 24, MSDCPLL_CON1, 0, 22, 2),
+ PLL_FENC(CLK_APMIXED_ADSPPLL, "adsppll", ADSPPLL_CON0, FENC_STATUS_CON0,
+ 4, 0, ADSPPLL_CON1, 24, ADSPPLL_CON1, 0, 22, 3),
+ PLL_FENC(CLK_APMIXED_EMIPLL, "emipll", EMIPLL_CON0, FENC_STATUS_CON0, 3,
+ PLL_AO, EMIPLL_CON1, 24, EMIPLL_CON1, 0, 22, 4),
+ PLL_FENC(CLK_APMIXED_EMIPLL2, "emipll2", EMIPLL2_CON0, FENC_STATUS_CON0,
+ 2, PLL_AO, EMIPLL2_CON1, 24, EMIPLL2_CON1, 0, 22, 5),
+ PLL_FENC(CLK_APMIXED_NET1PLL, "net1pll", NET1PLL_CON0, FENC_STATUS_CON0,
+ 1, 0, NET1PLL_CON1, 24, NET1PLL_CON1, 0, 22, 6),
+ PLL_FENC(CLK_APMIXED_SGMIIPLL, "sgmiipll", SGMIIPLL_CON0, FENC_STATUS_CON0,
+ 0, 0, SGMIIPLL_CON1, 24, SGMIIPLL_CON1, 0, 22, 7),
+};
+
+static const struct mtk_pll_desc apmixed_desc = {
+ .clks = apmixed_plls,
+ .num_clks = ARRAY_SIZE(apmixed_plls),
+};
+
+static const struct mtk_pll_data apmixed2_plls[] = {
+ PLL_FENC(CLK_APMIXED2_MAINPLL2, "mainpll2", MAINPLL2_CON0, FENC_STATUS_CON0,
+ 6, 0, MAINPLL2_CON1, 24, MAINPLL2_CON1, 0, 22, 0),
+ PLL_FENC(CLK_APMIXED2_UNIVPLL2, "univpll2", UNIVPLL2_CON0, FENC_STATUS_CON0,
+ 5, 0, UNIVPLL2_CON1, 24, UNIVPLL2_CON1, 0, 22, 1),
+ PLL_FENC(CLK_APMIXED2_MMPLL2, "mmpll2", MMPLL2_CON0, FENC_STATUS_CON0,
+ 4, 0, MMPLL2_CON1, 24, MMPLL2_CON1, 0, 22, 2),
+ PLL_FENC(CLK_APMIXED2_IMGPLL, "imgpll", IMGPLL_CON0, FENC_STATUS_CON0,
+ 3, 0, IMGPLL_CON1, 24, IMGPLL_CON1, 0, 22, 3),
+ PLL_FENC(CLK_APMIXED2_TVDPLL1, "tvdpll1", TVDPLL1_CON0, FENC_STATUS_CON0,
+ 2, 0, TVDPLL1_CON1, 24, TVDPLL1_CON1, 0, 22, 4),
+ PLL_FENC(CLK_APMIXED2_TVDPLL2, "tvdpll2", TVDPLL2_CON0, FENC_STATUS_CON0,
+ 1, 0, TVDPLL2_CON1, 24, TVDPLL2_CON1, 0, 22, 5),
+ PLL_FENC(CLK_APMIXED2_TVDPLL3, "tvdpll3", TVDPLL3_CON0, FENC_STATUS_CON0,
+ 0, 0, TVDPLL3_CON1, 24, TVDPLL3_CON1, 0, 22, 6),
+};
+
+static const struct mtk_pll_desc apmixed2_desc = {
+ .clks = apmixed2_plls,
+ .num_clks = ARRAY_SIZE(apmixed2_plls),
+};
+
+static int clk_mt8196_apmixed_probe(struct platform_device *pdev)
+{
+ struct clk_hw_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ const struct mtk_pll_desc *mcd;
+ int r;
+
+ mcd = device_get_match_data(&pdev->dev);
+ if (!mcd)
+ return -EINVAL;
+
+ clk_data = mtk_alloc_clk_data(mcd->num_clks);
+ if (!clk_data)
+ return -ENOMEM;
+
+ r = mtk_clk_register_plls(node, mcd->clks, mcd->num_clks, clk_data);
+ if (r)
+ goto free_apmixed_data;
+
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ if (r)
+ goto unregister_plls;
+
+ platform_set_drvdata(pdev, clk_data);
+
+ return r;
+
+unregister_plls:
+ mtk_clk_unregister_plls(mcd->clks, mcd->num_clks, clk_data);
+free_apmixed_data:
+ mtk_free_clk_data(clk_data);
+ return r;
+}
+
+static void clk_mt8196_apmixed_remove(struct platform_device *pdev)
+{
+ const struct mtk_pll_desc *mcd = device_get_match_data(&pdev->dev);
+ struct clk_hw_onecell_data *clk_data = platform_get_drvdata(pdev);
+ struct device_node *node = pdev->dev.of_node;
+
+ of_clk_del_provider(node);
+ mtk_clk_unregister_plls(mcd->clks, mcd->num_clks, clk_data);
+ mtk_free_clk_data(clk_data);
+}
+
+static const struct of_device_id of_match_clk_mt8196_apmixed[] = {
+ { .compatible = "mediatek,mt8196-apmixedsys", .data = &apmixed_desc },
+ { .compatible = "mediatek,mt8196-apmixedsys-gp2",
+ .data = &apmixed2_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8196_apmixed);
+
+static struct platform_driver clk_mt8196_apmixed_drv = {
+ .probe = clk_mt8196_apmixed_probe,
+ .remove = clk_mt8196_apmixed_remove,
+ .driver = {
+ .name = "clk-mt8196-apmixed",
+ .of_match_table = of_match_clk_mt8196_apmixed,
+ },
+};
+module_platform_driver(clk_mt8196_apmixed_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8196 apmixedsys clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-disp0.c b/drivers/clk/mediatek/clk-mt8196-disp0.c
new file mode 100644
index 000000000000..9474aad26e92
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-disp0.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs mm0_cg_regs = {
+ .set_ofs = 0x104,
+ .clr_ofs = 0x108,
+ .sta_ofs = 0x100,
+};
+
+static const struct mtk_gate_regs mm0_hwv_regs = {
+ .set_ofs = 0x0020,
+ .clr_ofs = 0x0024,
+ .sta_ofs = 0x2c10,
+};
+
+static const struct mtk_gate_regs mm1_cg_regs = {
+ .set_ofs = 0x114,
+ .clr_ofs = 0x118,
+ .sta_ofs = 0x110,
+};
+
+static const struct mtk_gate_regs mm1_hwv_regs = {
+ .set_ofs = 0x0028,
+ .clr_ofs = 0x002c,
+ .sta_ofs = 0x2c14,
+};
+
+#define GATE_MM0(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mm0_cg_regs, \
+ .shift = _shift, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ .ops = &mtk_clk_gate_ops_setclr,\
+ }
+
+#define GATE_HWV_MM0(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mm0_cg_regs, \
+ .hwv_regs = &mm0_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr, \
+ .flags = CLK_OPS_PARENT_ENABLE \
+ }
+
+#define GATE_MM1(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mm1_cg_regs, \
+ .shift = _shift, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ .ops = &mtk_clk_gate_ops_setclr,\
+ }
+
+#define GATE_HWV_MM1(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mm1_cg_regs, \
+ .hwv_regs = &mm1_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+static const struct mtk_gate mm_clks[] = {
+ /* MM0 */
+ GATE_HWV_MM0(CLK_MM_CONFIG, "mm_config", "disp", 0),
+ GATE_HWV_MM0(CLK_MM_DISP_MUTEX0, "mm_disp_mutex0", "disp", 1),
+ GATE_HWV_MM0(CLK_MM_DISP_AAL0, "mm_disp_aal0", "disp", 2),
+ GATE_HWV_MM0(CLK_MM_DISP_AAL1, "mm_disp_aal1", "disp", 3),
+ GATE_MM0(CLK_MM_DISP_C3D0, "mm_disp_c3d0", "disp", 4),
+ GATE_MM0(CLK_MM_DISP_C3D1, "mm_disp_c3d1", "disp", 5),
+ GATE_MM0(CLK_MM_DISP_C3D2, "mm_disp_c3d2", "disp", 6),
+ GATE_MM0(CLK_MM_DISP_C3D3, "mm_disp_c3d3", "disp", 7),
+ GATE_MM0(CLK_MM_DISP_CCORR0, "mm_disp_ccorr0", "disp", 8),
+ GATE_MM0(CLK_MM_DISP_CCORR1, "mm_disp_ccorr1", "disp", 9),
+ GATE_MM0(CLK_MM_DISP_CCORR2, "mm_disp_ccorr2", "disp", 10),
+ GATE_MM0(CLK_MM_DISP_CCORR3, "mm_disp_ccorr3", "disp", 11),
+ GATE_MM0(CLK_MM_DISP_CHIST0, "mm_disp_chist0", "disp", 12),
+ GATE_MM0(CLK_MM_DISP_CHIST1, "mm_disp_chist1", "disp", 13),
+ GATE_MM0(CLK_MM_DISP_COLOR0, "mm_disp_color0", "disp", 14),
+ GATE_MM0(CLK_MM_DISP_COLOR1, "mm_disp_color1", "disp", 15),
+ GATE_MM0(CLK_MM_DISP_DITHER0, "mm_disp_dither0", "disp", 16),
+ GATE_MM0(CLK_MM_DISP_DITHER1, "mm_disp_dither1", "disp", 17),
+ GATE_HWV_MM0(CLK_MM_DISP_DLI_ASYNC0, "mm_disp_dli_async0", "disp", 18),
+ GATE_HWV_MM0(CLK_MM_DISP_DLI_ASYNC1, "mm_disp_dli_async1", "disp", 19),
+ GATE_HWV_MM0(CLK_MM_DISP_DLI_ASYNC2, "mm_disp_dli_async2", "disp", 20),
+ GATE_HWV_MM0(CLK_MM_DISP_DLI_ASYNC3, "mm_disp_dli_async3", "disp", 21),
+ GATE_HWV_MM0(CLK_MM_DISP_DLI_ASYNC4, "mm_disp_dli_async4", "disp", 22),
+ GATE_HWV_MM0(CLK_MM_DISP_DLI_ASYNC5, "mm_disp_dli_async5", "disp", 23),
+ GATE_HWV_MM0(CLK_MM_DISP_DLI_ASYNC6, "mm_disp_dli_async6", "disp", 24),
+ GATE_HWV_MM0(CLK_MM_DISP_DLI_ASYNC7, "mm_disp_dli_async7", "disp", 25),
+ GATE_HWV_MM0(CLK_MM_DISP_DLI_ASYNC8, "mm_disp_dli_async8", "disp", 26),
+ GATE_HWV_MM0(CLK_MM_DISP_DLI_ASYNC9, "mm_disp_dli_async9", "disp", 27),
+ GATE_HWV_MM0(CLK_MM_DISP_DLI_ASYNC10, "mm_disp_dli_async10", "disp", 28),
+ GATE_HWV_MM0(CLK_MM_DISP_DLI_ASYNC11, "mm_disp_dli_async11", "disp", 29),
+ GATE_HWV_MM0(CLK_MM_DISP_DLI_ASYNC12, "mm_disp_dli_async12", "disp", 30),
+ GATE_HWV_MM0(CLK_MM_DISP_DLI_ASYNC13, "mm_disp_dli_async13", "disp", 31),
+ /* MM1 */
+ GATE_HWV_MM1(CLK_MM_DISP_DLI_ASYNC14, "mm_disp_dli_async14", "disp", 0),
+ GATE_HWV_MM1(CLK_MM_DISP_DLI_ASYNC15, "mm_disp_dli_async15", "disp", 1),
+ GATE_HWV_MM1(CLK_MM_DISP_DLO_ASYNC0, "mm_disp_dlo_async0", "disp", 2),
+ GATE_HWV_MM1(CLK_MM_DISP_DLO_ASYNC1, "mm_disp_dlo_async1", "disp", 3),
+ GATE_HWV_MM1(CLK_MM_DISP_DLO_ASYNC2, "mm_disp_dlo_async2", "disp", 4),
+ GATE_HWV_MM1(CLK_MM_DISP_DLO_ASYNC3, "mm_disp_dlo_async3", "disp", 5),
+ GATE_HWV_MM1(CLK_MM_DISP_DLO_ASYNC4, "mm_disp_dlo_async4", "disp", 6),
+ GATE_HWV_MM1(CLK_MM_DISP_DLO_ASYNC5, "mm_disp_dlo_async5", "disp", 7),
+ GATE_HWV_MM1(CLK_MM_DISP_DLO_ASYNC6, "mm_disp_dlo_async6", "disp", 8),
+ GATE_HWV_MM1(CLK_MM_DISP_DLO_ASYNC7, "mm_disp_dlo_async7", "disp", 9),
+ GATE_HWV_MM1(CLK_MM_DISP_DLO_ASYNC8, "mm_disp_dlo_async8", "disp", 10),
+ GATE_MM1(CLK_MM_DISP_GAMMA0, "mm_disp_gamma0", "disp", 11),
+ GATE_MM1(CLK_MM_DISP_GAMMA1, "mm_disp_gamma1", "disp", 12),
+ GATE_MM1(CLK_MM_MDP_AAL0, "mm_mdp_aal0", "disp", 13),
+ GATE_MM1(CLK_MM_MDP_AAL1, "mm_mdp_aal1", "disp", 14),
+ GATE_HWV_MM1(CLK_MM_MDP_RDMA0, "mm_mdp_rdma0", "disp", 15),
+ GATE_HWV_MM1(CLK_MM_DISP_POSTMASK0, "mm_disp_postmask0", "disp", 16),
+ GATE_HWV_MM1(CLK_MM_DISP_POSTMASK1, "mm_disp_postmask1", "disp", 17),
+ GATE_HWV_MM1(CLK_MM_MDP_RSZ0, "mm_mdp_rsz0", "disp", 18),
+ GATE_HWV_MM1(CLK_MM_MDP_RSZ1, "mm_mdp_rsz1", "disp", 19),
+ GATE_HWV_MM1(CLK_MM_DISP_SPR0, "mm_disp_spr0", "disp", 20),
+ GATE_MM1(CLK_MM_DISP_TDSHP0, "mm_disp_tdshp0", "disp", 21),
+ GATE_MM1(CLK_MM_DISP_TDSHP1, "mm_disp_tdshp1", "disp", 22),
+ GATE_HWV_MM1(CLK_MM_DISP_WDMA0, "mm_disp_wdma0", "disp", 23),
+ GATE_HWV_MM1(CLK_MM_DISP_Y2R0, "mm_disp_y2r0", "disp", 24),
+ GATE_HWV_MM1(CLK_MM_SMI_SUB_COMM0, "mm_ssc", "disp", 25),
+ GATE_HWV_MM1(CLK_MM_DISP_FAKE_ENG0, "mm_disp_fake_eng0", "disp", 26),
+};
+
+static const struct mtk_clk_desc mm_mcd = {
+ .clks = mm_clks,
+ .num_clks = ARRAY_SIZE(mm_clks),
+};
+
+static const struct platform_device_id clk_mt8196_disp0_id_table[] = {
+ { .name = "clk-mt8196-disp0", .driver_data = (kernel_ulong_t)&mm_mcd },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, clk_mt8196_disp0_id_table);
+
+static struct platform_driver clk_mt8196_disp0_drv = {
+ .probe = mtk_clk_pdev_probe,
+ .remove = mtk_clk_pdev_remove,
+ .driver = {
+ .name = "clk-mt8196-disp0",
+ },
+ .id_table = clk_mt8196_disp0_id_table,
+};
+module_platform_driver(clk_mt8196_disp0_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8196 disp0 clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-disp1.c b/drivers/clk/mediatek/clk-mt8196-disp1.c
new file mode 100644
index 000000000000..3bbec79a7010
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-disp1.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs mm10_cg_regs = {
+ .set_ofs = 0x104,
+ .clr_ofs = 0x108,
+ .sta_ofs = 0x100,
+};
+
+static const struct mtk_gate_regs mm10_hwv_regs = {
+ .set_ofs = 0x0010,
+ .clr_ofs = 0x0014,
+ .sta_ofs = 0x2c08,
+};
+
+static const struct mtk_gate_regs mm11_cg_regs = {
+ .set_ofs = 0x114,
+ .clr_ofs = 0x118,
+ .sta_ofs = 0x110,
+};
+
+static const struct mtk_gate_regs mm11_hwv_regs = {
+ .set_ofs = 0x0018,
+ .clr_ofs = 0x001c,
+ .sta_ofs = 0x2c0c,
+};
+
+#define GATE_MM10(_id, _name, _parent, _shift) {\
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mm10_cg_regs, \
+ .shift = _shift, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ .ops = &mtk_clk_gate_ops_setclr,\
+ }
+
+#define GATE_HWV_MM10(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mm10_cg_regs, \
+ .hwv_regs = &mm10_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+#define GATE_MM11(_id, _name, _parent, _shift) {\
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mm11_cg_regs, \
+ .shift = _shift, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ .ops = &mtk_clk_gate_ops_setclr,\
+ }
+
+#define GATE_HWV_MM11(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mm11_cg_regs, \
+ .hwv_regs = &mm11_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr, \
+ }
+
+static const struct mtk_gate mm1_clks[] = {
+ /* MM10 */
+ GATE_HWV_MM10(CLK_MM1_DISPSYS1_CONFIG, "mm1_dispsys1_config", "disp", 0),
+ GATE_HWV_MM10(CLK_MM1_DISPSYS1_S_CONFIG, "mm1_dispsys1_s_config", "disp", 1),
+ GATE_HWV_MM10(CLK_MM1_DISP_MUTEX0, "mm1_disp_mutex0", "disp", 2),
+ GATE_HWV_MM10(CLK_MM1_DISP_DLI_ASYNC20, "mm1_disp_dli_async20", "disp", 3),
+ GATE_HWV_MM10(CLK_MM1_DISP_DLI_ASYNC21, "mm1_disp_dli_async21", "disp", 4),
+ GATE_HWV_MM10(CLK_MM1_DISP_DLI_ASYNC22, "mm1_disp_dli_async22", "disp", 5),
+ GATE_HWV_MM10(CLK_MM1_DISP_DLI_ASYNC23, "mm1_disp_dli_async23", "disp", 6),
+ GATE_HWV_MM10(CLK_MM1_DISP_DLI_ASYNC24, "mm1_disp_dli_async24", "disp", 7),
+ GATE_HWV_MM10(CLK_MM1_DISP_DLI_ASYNC25, "mm1_disp_dli_async25", "disp", 8),
+ GATE_HWV_MM10(CLK_MM1_DISP_DLI_ASYNC26, "mm1_disp_dli_async26", "disp", 9),
+ GATE_HWV_MM10(CLK_MM1_DISP_DLI_ASYNC27, "mm1_disp_dli_async27", "disp", 10),
+ GATE_HWV_MM10(CLK_MM1_DISP_DLI_ASYNC28, "mm1_disp_dli_async28", "disp", 11),
+ GATE_HWV_MM10(CLK_MM1_DISP_RELAY0, "mm1_disp_relay0", "disp", 12),
+ GATE_HWV_MM10(CLK_MM1_DISP_RELAY1, "mm1_disp_relay1", "disp", 13),
+ GATE_HWV_MM10(CLK_MM1_DISP_RELAY2, "mm1_disp_relay2", "disp", 14),
+ GATE_HWV_MM10(CLK_MM1_DISP_RELAY3, "mm1_disp_relay3", "disp", 15),
+ GATE_HWV_MM10(CLK_MM1_DISP_DP_INTF0, "mm1_DP_CLK", "disp", 16),
+ GATE_HWV_MM10(CLK_MM1_DISP_DP_INTF1, "mm1_disp_dp_intf1", "disp", 17),
+ GATE_HWV_MM10(CLK_MM1_DISP_DSC_WRAP0, "mm1_disp_dsc_wrap0", "disp", 18),
+ GATE_HWV_MM10(CLK_MM1_DISP_DSC_WRAP1, "mm1_disp_dsc_wrap1", "disp", 19),
+ GATE_HWV_MM10(CLK_MM1_DISP_DSC_WRAP2, "mm1_disp_dsc_wrap2", "disp", 20),
+ GATE_HWV_MM10(CLK_MM1_DISP_DSC_WRAP3, "mm1_disp_dsc_wrap3", "disp", 21),
+ GATE_HWV_MM10(CLK_MM1_DISP_DSI0, "mm1_CLK0", "disp", 22),
+ GATE_HWV_MM10(CLK_MM1_DISP_DSI1, "mm1_CLK1", "disp", 23),
+ GATE_HWV_MM10(CLK_MM1_DISP_DSI2, "mm1_CLK2", "disp", 24),
+ GATE_HWV_MM10(CLK_MM1_DISP_DVO0, "mm1_disp_dvo0", "disp", 25),
+ GATE_HWV_MM10(CLK_MM1_DISP_GDMA0, "mm1_disp_gdma0", "disp", 26),
+ GATE_HWV_MM10(CLK_MM1_DISP_MERGE0, "mm1_disp_merge0", "disp", 27),
+ GATE_HWV_MM10(CLK_MM1_DISP_MERGE1, "mm1_disp_merge1", "disp", 28),
+ GATE_HWV_MM10(CLK_MM1_DISP_MERGE2, "mm1_disp_merge2", "disp", 29),
+ GATE_HWV_MM10(CLK_MM1_DISP_ODDMR0, "mm1_disp_oddmr0", "disp", 30),
+ GATE_HWV_MM10(CLK_MM1_DISP_POSTALIGN0, "mm1_disp_postalign0", "disp", 31),
+ /* MM11 */
+ GATE_HWV_MM11(CLK_MM1_DISP_DITHER2, "mm1_disp_dither2", "disp", 0),
+ GATE_HWV_MM11(CLK_MM1_DISP_R2Y0, "mm1_disp_r2y0", "disp", 1),
+ GATE_HWV_MM11(CLK_MM1_DISP_SPLITTER0, "mm1_disp_splitter0", "disp", 2),
+ GATE_HWV_MM11(CLK_MM1_DISP_SPLITTER1, "mm1_disp_splitter1", "disp", 3),
+ GATE_HWV_MM11(CLK_MM1_DISP_SPLITTER2, "mm1_disp_splitter2", "disp", 4),
+ GATE_HWV_MM11(CLK_MM1_DISP_SPLITTER3, "mm1_disp_splitter3", "disp", 5),
+ GATE_HWV_MM11(CLK_MM1_DISP_VDCM0, "mm1_disp_vdcm0", "disp", 6),
+ GATE_HWV_MM11(CLK_MM1_DISP_WDMA1, "mm1_disp_wdma1", "disp", 7),
+ GATE_HWV_MM11(CLK_MM1_DISP_WDMA2, "mm1_disp_wdma2", "disp", 8),
+ GATE_HWV_MM11(CLK_MM1_DISP_WDMA3, "mm1_disp_wdma3", "disp", 9),
+ GATE_HWV_MM11(CLK_MM1_DISP_WDMA4, "mm1_disp_wdma4", "disp", 10),
+ GATE_HWV_MM11(CLK_MM1_MDP_RDMA1, "mm1_mdp_rdma1", "disp", 11),
+ GATE_HWV_MM11(CLK_MM1_SMI_LARB0, "mm1_smi_larb0", "disp", 12),
+ GATE_HWV_MM11(CLK_MM1_MOD1, "mm1_mod1", "clk26m", 13),
+ GATE_HWV_MM11(CLK_MM1_MOD2, "mm1_mod2", "clk26m", 14),
+ GATE_HWV_MM11(CLK_MM1_MOD3, "mm1_mod3", "clk26m", 15),
+ GATE_HWV_MM11(CLK_MM1_MOD4, "mm1_mod4", "dp0", 16),
+ GATE_HWV_MM11(CLK_MM1_MOD5, "mm1_mod5", "dp1", 17),
+ GATE_HWV_MM11(CLK_MM1_MOD6, "mm1_mod6", "dp1", 18),
+ GATE_HWV_MM11(CLK_MM1_CG0, "mm1_cg0", "disp", 20),
+ GATE_HWV_MM11(CLK_MM1_CG1, "mm1_cg1", "disp", 21),
+ GATE_HWV_MM11(CLK_MM1_CG2, "mm1_cg2", "disp", 22),
+ GATE_HWV_MM11(CLK_MM1_CG3, "mm1_cg3", "disp", 23),
+ GATE_HWV_MM11(CLK_MM1_CG4, "mm1_cg4", "disp", 24),
+ GATE_HWV_MM11(CLK_MM1_CG5, "mm1_cg5", "disp", 25),
+ GATE_HWV_MM11(CLK_MM1_CG6, "mm1_cg6", "disp", 26),
+ GATE_HWV_MM11(CLK_MM1_CG7, "mm1_cg7", "disp", 27),
+ GATE_HWV_MM11(CLK_MM1_F26M, "mm1_f26m_ck", "clk26m", 28),
+};
+
+static const struct mtk_clk_desc mm1_mcd = {
+ .clks = mm1_clks,
+ .num_clks = ARRAY_SIZE(mm1_clks),
+};
+
+static const struct platform_device_id clk_mt8196_disp1_id_table[] = {
+ { .name = "clk-mt8196-disp1", .driver_data = (kernel_ulong_t)&mm1_mcd },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, clk_mt8196_disp1_id_table);
+
+static struct platform_driver clk_mt8196_disp1_drv = {
+ .probe = mtk_clk_pdev_probe,
+ .remove = mtk_clk_pdev_remove,
+ .driver = {
+ .name = "clk-mt8196-disp1",
+ },
+ .id_table = clk_mt8196_disp1_id_table,
+};
+module_platform_driver(clk_mt8196_disp1_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8196 disp1 clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-imp_iic_wrap.c b/drivers/clk/mediatek/clk-mt8196-imp_iic_wrap.c
new file mode 100644
index 000000000000..a63241671650
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-imp_iic_wrap.c
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs imp_cg_regs = {
+ .set_ofs = 0xe08,
+ .clr_ofs = 0xe04,
+ .sta_ofs = 0xe00,
+};
+
+#define GATE_IMP(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &imp_cg_regs, \
+ .shift = _shift, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+static const struct mtk_gate impc_clks[] = {
+ GATE_IMP(CLK_IMPC_I2C11, "impc_i2c11", "i2c_p", 0),
+ GATE_IMP(CLK_IMPC_I2C12, "impc_i2c12", "i2c_p", 1),
+ GATE_IMP(CLK_IMPC_I2C13, "impc_i2c13", "i2c_p", 2),
+ GATE_IMP(CLK_IMPC_I2C14, "impc_i2c14", "i2c_p", 3),
+};
+
+static const struct mtk_clk_desc impc_mcd = {
+ .clks = impc_clks,
+ .num_clks = ARRAY_SIZE(impc_clks),
+};
+
+static const struct mtk_gate impe_clks[] = {
+ GATE_IMP(CLK_IMPE_I2C5, "impe_i2c5", "i2c_east", 0),
+};
+
+static const struct mtk_clk_desc impe_mcd = {
+ .clks = impe_clks,
+ .num_clks = ARRAY_SIZE(impe_clks),
+};
+
+static const struct mtk_gate_regs impn_hwv_regs = {
+ .set_ofs = 0x0000,
+ .clr_ofs = 0x0004,
+ .sta_ofs = 0x2c00,
+};
+
+#define GATE_HWV_IMPN(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &imp_cg_regs, \
+ .hwv_regs = &impn_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+static const struct mtk_gate impn_clks[] = {
+ GATE_IMP(CLK_IMPN_I2C1, "impn_i2c1", "i2c_north", 0),
+ GATE_IMP(CLK_IMPN_I2C2, "impn_i2c2", "i2c_north", 1),
+ GATE_IMP(CLK_IMPN_I2C4, "impn_i2c4", "i2c_north", 2),
+ GATE_HWV_IMPN(CLK_IMPN_I2C7, "impn_i2c7", "i2c_north", 3),
+ GATE_IMP(CLK_IMPN_I2C8, "impn_i2c8", "i2c_north", 4),
+ GATE_IMP(CLK_IMPN_I2C9, "impn_i2c9", "i2c_north", 5),
+};
+
+static const struct mtk_clk_desc impn_mcd = {
+ .clks = impn_clks,
+ .num_clks = ARRAY_SIZE(impn_clks),
+};
+
+static const struct mtk_gate impw_clks[] = {
+ GATE_IMP(CLK_IMPW_I2C0, "impw_i2c0", "i2c_west", 0),
+ GATE_IMP(CLK_IMPW_I2C3, "impw_i2c3", "i2c_west", 1),
+ GATE_IMP(CLK_IMPW_I2C6, "impw_i2c6", "i2c_west", 2),
+ GATE_IMP(CLK_IMPW_I2C10, "impw_i2c10", "i2c_west", 3),
+};
+
+static const struct mtk_clk_desc impw_mcd = {
+ .clks = impw_clks,
+ .num_clks = ARRAY_SIZE(impw_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8196_imp_iic_wrap[] = {
+ { .compatible = "mediatek,mt8196-imp-iic-wrap-c", .data = &impc_mcd },
+ { .compatible = "mediatek,mt8196-imp-iic-wrap-e", .data = &impe_mcd },
+ { .compatible = "mediatek,mt8196-imp-iic-wrap-n", .data = &impn_mcd },
+ { .compatible = "mediatek,mt8196-imp-iic-wrap-w", .data = &impw_mcd },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8196_imp_iic_wrap);
+
+static struct platform_driver clk_mt8196_imp_iic_wrap_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8196-imp_iic_wrap",
+ .of_match_table = of_match_clk_mt8196_imp_iic_wrap,
+ },
+};
+module_platform_driver(clk_mt8196_imp_iic_wrap_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8196 I2C Wrapper clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-mcu.c b/drivers/clk/mediatek/clk-mt8196-mcu.c
new file mode 100644
index 000000000000..5cbcc411ae73
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-mcu.c
@@ -0,0 +1,167 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-pll.h"
+
+#define ARMPLL_LL_CON0 0x008
+#define ARMPLL_LL_CON1 0x00c
+#define ARMPLL_LL_CON2 0x010
+#define ARMPLL_LL_CON3 0x014
+#define ARMPLL_BL_CON0 0x008
+#define ARMPLL_BL_CON1 0x00c
+#define ARMPLL_BL_CON2 0x010
+#define ARMPLL_BL_CON3 0x014
+#define ARMPLL_B_CON0 0x008
+#define ARMPLL_B_CON1 0x00c
+#define ARMPLL_B_CON2 0x010
+#define ARMPLL_B_CON3 0x014
+#define CCIPLL_CON0 0x008
+#define CCIPLL_CON1 0x00c
+#define CCIPLL_CON2 0x010
+#define CCIPLL_CON3 0x014
+#define PTPPLL_CON0 0x008
+#define PTPPLL_CON1 0x00c
+#define PTPPLL_CON2 0x010
+#define PTPPLL_CON3 0x014
+
+#define MT8196_PLL_FMAX (3800UL * MHZ)
+#define MT8196_PLL_FMIN (1500UL * MHZ)
+#define MT8196_INTEGER_BITS 8
+
+#define PLL(_id, _name, _reg, _en_reg, _en_mask, _pll_en_bit, \
+ _flags, _rst_bar_mask, \
+ _pd_reg, _pd_shift, _tuner_reg, \
+ _tuner_en_reg, _tuner_en_bit, \
+ _pcw_reg, _pcw_shift, _pcwbits) { \
+ .id = _id, \
+ .name = _name, \
+ .reg = _reg, \
+ .en_reg = _en_reg, \
+ .en_mask = _en_mask, \
+ .pll_en_bit = _pll_en_bit, \
+ .flags = _flags, \
+ .rst_bar_mask = _rst_bar_mask, \
+ .fmax = MT8196_PLL_FMAX, \
+ .fmin = MT8196_PLL_FMIN, \
+ .pd_reg = _pd_reg, \
+ .pd_shift = _pd_shift, \
+ .tuner_reg = _tuner_reg, \
+ .tuner_en_reg = _tuner_en_reg, \
+ .tuner_en_bit = _tuner_en_bit, \
+ .pcw_reg = _pcw_reg, \
+ .pcw_shift = _pcw_shift, \
+ .pcwbits = _pcwbits, \
+ .pcwibits = MT8196_INTEGER_BITS, \
+ }
+
+static const struct mtk_pll_data cpu_bl_plls[] = {
+ PLL(CLK_CPBL_ARMPLL_BL, "armpll-bl", ARMPLL_BL_CON0, ARMPLL_BL_CON0, 0,
+ 0, PLL_AO, BIT(0), ARMPLL_BL_CON1, 24, 0, 0, 0, ARMPLL_BL_CON1, 0, 22),
+};
+
+static const struct mtk_pll_data cpu_b_plls[] = {
+ PLL(CLK_CPB_ARMPLL_B, "armpll-b", ARMPLL_B_CON0, ARMPLL_B_CON0, 0, 0,
+ PLL_AO, BIT(0), ARMPLL_B_CON1, 24, 0, 0, 0, ARMPLL_B_CON1, 0, 22),
+};
+
+static const struct mtk_pll_data cpu_ll_plls[] = {
+ PLL(CLK_CPLL_ARMPLL_LL, "armpll-ll", ARMPLL_LL_CON0, ARMPLL_LL_CON0, 0,
+ 0, PLL_AO, BIT(0), ARMPLL_LL_CON1, 24, 0, 0, 0, ARMPLL_LL_CON1, 0, 22),
+};
+
+static const struct mtk_pll_data cci_plls[] = {
+ PLL(CLK_CCIPLL, "ccipll", CCIPLL_CON0, CCIPLL_CON0, 0, 0, PLL_AO,
+ BIT(0), CCIPLL_CON1, 24, 0, 0, 0, CCIPLL_CON1, 0, 22),
+};
+
+static const struct mtk_pll_data ptp_plls[] = {
+ PLL(CLK_PTPPLL, "ptppll", PTPPLL_CON0, PTPPLL_CON0, 0, 0, PLL_AO,
+ BIT(0), PTPPLL_CON1, 24, 0, 0, 0, PTPPLL_CON1, 0, 22),
+};
+
+static const struct of_device_id of_match_clk_mt8196_mcu[] = {
+ { .compatible = "mediatek,mt8196-armpll-bl-pll-ctrl",
+ .data = &cpu_bl_plls },
+ { .compatible = "mediatek,mt8196-armpll-b-pll-ctrl",
+ .data = &cpu_b_plls },
+ { .compatible = "mediatek,mt8196-armpll-ll-pll-ctrl",
+ .data = &cpu_ll_plls },
+ { .compatible = "mediatek,mt8196-ccipll-pll-ctrl", .data = &cci_plls },
+ { .compatible = "mediatek,mt8196-ptppll-pll-ctrl", .data = &ptp_plls },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8196_mcu);
+
+static int clk_mt8196_mcu_probe(struct platform_device *pdev)
+{
+ const struct mtk_pll_data *plls;
+ struct clk_hw_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ const int num_plls = 1;
+ int r;
+
+ plls = of_device_get_match_data(&pdev->dev);
+ if (!plls)
+ return -EINVAL;
+
+ clk_data = mtk_alloc_clk_data(num_plls);
+ if (!clk_data)
+ return -ENOMEM;
+
+ r = mtk_clk_register_plls(node, plls, num_plls, clk_data);
+ if (r)
+ goto free_clk_data;
+
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ if (r)
+ goto unregister_plls;
+
+ platform_set_drvdata(pdev, clk_data);
+
+ return r;
+
+unregister_plls:
+ mtk_clk_unregister_plls(plls, num_plls, clk_data);
+free_clk_data:
+ mtk_free_clk_data(clk_data);
+
+ return r;
+}
+
+static void clk_mt8196_mcu_remove(struct platform_device *pdev)
+{
+ const struct mtk_pll_data *plls = of_device_get_match_data(&pdev->dev);
+ struct clk_hw_onecell_data *clk_data = platform_get_drvdata(pdev);
+ struct device_node *node = pdev->dev.of_node;
+
+ of_clk_del_provider(node);
+ mtk_clk_unregister_plls(plls, 1, clk_data);
+ mtk_free_clk_data(clk_data);
+}
+
+static struct platform_driver clk_mt8196_mcu_drv = {
+ .probe = clk_mt8196_mcu_probe,
+ .remove = clk_mt8196_mcu_remove,
+ .driver = {
+ .name = "clk-mt8196-mcu",
+ .of_match_table = of_match_clk_mt8196_mcu,
+ },
+};
+module_platform_driver(clk_mt8196_mcu_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8196 mcusys clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-mdpsys.c b/drivers/clk/mediatek/clk-mt8196-mdpsys.c
new file mode 100644
index 000000000000..7667d88f0eb0
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-mdpsys.c
@@ -0,0 +1,186 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs mdp0_cg_regs = {
+ .set_ofs = 0x104,
+ .clr_ofs = 0x108,
+ .sta_ofs = 0x100,
+};
+
+static const struct mtk_gate_regs mdp1_cg_regs = {
+ .set_ofs = 0x114,
+ .clr_ofs = 0x118,
+ .sta_ofs = 0x110,
+};
+
+static const struct mtk_gate_regs mdp2_cg_regs = {
+ .set_ofs = 0x124,
+ .clr_ofs = 0x128,
+ .sta_ofs = 0x120,
+};
+
+#define GATE_MDP0(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mdp0_cg_regs, \
+ .shift = _shift, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_MDP1(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mdp1_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_MDP2(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mdp2_cg_regs, \
+ .shift = _shift, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+static const struct mtk_gate mdp1_clks[] = {
+ /* MDP1-0 */
+ GATE_MDP0(CLK_MDP1_MDP_MUTEX0, "mdp1_mdp_mutex0", "mdp", 0),
+ GATE_MDP0(CLK_MDP1_SMI0, "mdp1_smi0", "mdp", 1),
+ GATE_MDP0(CLK_MDP1_APB_BUS, "mdp1_apb_bus", "mdp", 2),
+ GATE_MDP0(CLK_MDP1_MDP_RDMA0, "mdp1_mdp_rdma0", "mdp", 3),
+ GATE_MDP0(CLK_MDP1_MDP_RDMA1, "mdp1_mdp_rdma1", "mdp", 4),
+ GATE_MDP0(CLK_MDP1_MDP_RDMA2, "mdp1_mdp_rdma2", "mdp", 5),
+ GATE_MDP0(CLK_MDP1_MDP_BIRSZ0, "mdp1_mdp_birsz0", "mdp", 6),
+ GATE_MDP0(CLK_MDP1_MDP_HDR0, "mdp1_mdp_hdr0", "mdp", 7),
+ GATE_MDP0(CLK_MDP1_MDP_AAL0, "mdp1_mdp_aal0", "mdp", 8),
+ GATE_MDP0(CLK_MDP1_MDP_RSZ0, "mdp1_mdp_rsz0", "mdp", 9),
+ GATE_MDP0(CLK_MDP1_MDP_RSZ2, "mdp1_mdp_rsz2", "mdp", 10),
+ GATE_MDP0(CLK_MDP1_MDP_TDSHP0, "mdp1_mdp_tdshp0", "mdp", 11),
+ GATE_MDP0(CLK_MDP1_MDP_COLOR0, "mdp1_mdp_color0", "mdp", 12),
+ GATE_MDP0(CLK_MDP1_MDP_WROT0, "mdp1_mdp_wrot0", "mdp", 13),
+ GATE_MDP0(CLK_MDP1_MDP_WROT1, "mdp1_mdp_wrot1", "mdp", 14),
+ GATE_MDP0(CLK_MDP1_MDP_WROT2, "mdp1_mdp_wrot2", "mdp", 15),
+ GATE_MDP0(CLK_MDP1_MDP_FAKE_ENG0, "mdp1_mdp_fake_eng0", "mdp", 16),
+ GATE_MDP0(CLK_MDP1_APB_DB, "mdp1_apb_db", "mdp", 17),
+ GATE_MDP0(CLK_MDP1_MDP_DLI_ASYNC0, "mdp1_mdp_dli_async0", "mdp", 18),
+ GATE_MDP0(CLK_MDP1_MDP_DLI_ASYNC1, "mdp1_mdp_dli_async1", "mdp", 19),
+ GATE_MDP0(CLK_MDP1_MDP_DLO_ASYNC0, "mdp1_mdp_dlo_async0", "mdp", 20),
+ GATE_MDP0(CLK_MDP1_MDP_DLO_ASYNC1, "mdp1_mdp_dlo_async1", "mdp", 21),
+ GATE_MDP0(CLK_MDP1_MDP_DLI_ASYNC2, "mdp1_mdp_dli_async2", "mdp", 22),
+ GATE_MDP0(CLK_MDP1_MDP_DLO_ASYNC2, "mdp1_mdp_dlo_async2", "mdp", 23),
+ GATE_MDP0(CLK_MDP1_MDP_DLO_ASYNC3, "mdp1_mdp_dlo_async3", "mdp", 24),
+ GATE_MDP0(CLK_MDP1_IMG_DL_ASYNC0, "mdp1_img_dl_async0", "mdp", 25),
+ GATE_MDP0(CLK_MDP1_MDP_RROT0, "mdp1_mdp_rrot0", "mdp", 26),
+ GATE_MDP0(CLK_MDP1_MDP_MERGE0, "mdp1_mdp_merge0", "mdp", 27),
+ GATE_MDP0(CLK_MDP1_MDP_C3D0, "mdp1_mdp_c3d0", "mdp", 28),
+ GATE_MDP0(CLK_MDP1_MDP_FG0, "mdp1_mdp_fg0", "mdp", 29),
+ GATE_MDP0(CLK_MDP1_MDP_CLA2, "mdp1_mdp_cla2", "mdp", 30),
+ GATE_MDP0(CLK_MDP1_MDP_DLO_ASYNC4, "mdp1_mdp_dlo_async4", "mdp", 31),
+ /* MDP1-1 */
+ GATE_MDP1(CLK_MDP1_VPP_RSZ0, "mdp1_vpp_rsz0", "mdp", 0),
+ GATE_MDP1(CLK_MDP1_VPP_RSZ1, "mdp1_vpp_rsz1", "mdp", 1),
+ GATE_MDP1(CLK_MDP1_MDP_DLO_ASYNC5, "mdp1_mdp_dlo_async5", "mdp", 2),
+ GATE_MDP1(CLK_MDP1_IMG0, "mdp1_img0", "mdp", 3),
+ GATE_MDP1(CLK_MDP1_F26M, "mdp1_f26m", "clk26m", 27),
+ /* MDP1-2 */
+ GATE_MDP2(CLK_MDP1_IMG_DL_RELAY0, "mdp1_img_dl_relay0", "mdp", 0),
+ GATE_MDP2(CLK_MDP1_IMG_DL_RELAY1, "mdp1_img_dl_relay1", "mdp", 8),
+};
+
+static const struct mtk_clk_desc mdp1_mcd = {
+ .clks = mdp1_clks,
+ .num_clks = ARRAY_SIZE(mdp1_clks),
+ .need_runtime_pm = true,
+};
+
+
+static const struct mtk_gate mdp_clks[] = {
+ /* MDP0 */
+ GATE_MDP0(CLK_MDP_MDP_MUTEX0, "mdp_mdp_mutex0", "mdp", 0),
+ GATE_MDP0(CLK_MDP_SMI0, "mdp_smi0", "mdp", 1),
+ GATE_MDP0(CLK_MDP_APB_BUS, "mdp_apb_bus", "mdp", 2),
+ GATE_MDP0(CLK_MDP_MDP_RDMA0, "mdp_mdp_rdma0", "mdp", 3),
+ GATE_MDP0(CLK_MDP_MDP_RDMA1, "mdp_mdp_rdma1", "mdp", 4),
+ GATE_MDP0(CLK_MDP_MDP_RDMA2, "mdp_mdp_rdma2", "mdp", 5),
+ GATE_MDP0(CLK_MDP_MDP_BIRSZ0, "mdp_mdp_birsz0", "mdp", 6),
+ GATE_MDP0(CLK_MDP_MDP_HDR0, "mdp_mdp_hdr0", "mdp", 7),
+ GATE_MDP0(CLK_MDP_MDP_AAL0, "mdp_mdp_aal0", "mdp", 8),
+ GATE_MDP0(CLK_MDP_MDP_RSZ0, "mdp_mdp_rsz0", "mdp", 9),
+ GATE_MDP0(CLK_MDP_MDP_RSZ2, "mdp_mdp_rsz2", "mdp", 10),
+ GATE_MDP0(CLK_MDP_MDP_TDSHP0, "mdp_mdp_tdshp0", "mdp", 11),
+ GATE_MDP0(CLK_MDP_MDP_COLOR0, "mdp_mdp_color0", "mdp", 12),
+ GATE_MDP0(CLK_MDP_MDP_WROT0, "mdp_mdp_wrot0", "mdp", 13),
+ GATE_MDP0(CLK_MDP_MDP_WROT1, "mdp_mdp_wrot1", "mdp", 14),
+ GATE_MDP0(CLK_MDP_MDP_WROT2, "mdp_mdp_wrot2", "mdp", 15),
+ GATE_MDP0(CLK_MDP_MDP_FAKE_ENG0, "mdp_mdp_fake_eng0", "mdp", 16),
+ GATE_MDP0(CLK_MDP_APB_DB, "mdp_apb_db", "mdp", 17),
+ GATE_MDP0(CLK_MDP_MDP_DLI_ASYNC0, "mdp_mdp_dli_async0", "mdp", 18),
+ GATE_MDP0(CLK_MDP_MDP_DLI_ASYNC1, "mdp_mdp_dli_async1", "mdp", 19),
+ GATE_MDP0(CLK_MDP_MDP_DLO_ASYNC0, "mdp_mdp_dlo_async0", "mdp", 20),
+ GATE_MDP0(CLK_MDP_MDP_DLO_ASYNC1, "mdp_mdp_dlo_async1", "mdp", 21),
+ GATE_MDP0(CLK_MDP_MDP_DLI_ASYNC2, "mdp_mdp_dli_async2", "mdp", 22),
+ GATE_MDP0(CLK_MDP_MDP_DLO_ASYNC2, "mdp_mdp_dlo_async2", "mdp", 23),
+ GATE_MDP0(CLK_MDP_MDP_DLO_ASYNC3, "mdp_mdp_dlo_async3", "mdp", 24),
+ GATE_MDP0(CLK_MDP_IMG_DL_ASYNC0, "mdp_img_dl_async0", "mdp", 25),
+ GATE_MDP0(CLK_MDP_MDP_RROT0, "mdp_mdp_rrot0", "mdp", 26),
+ GATE_MDP0(CLK_MDP_MDP_MERGE0, "mdp_mdp_merge0", "mdp", 27),
+ GATE_MDP0(CLK_MDP_MDP_C3D0, "mdp_mdp_c3d0", "mdp", 28),
+ GATE_MDP0(CLK_MDP_MDP_FG0, "mdp_mdp_fg0", "mdp", 29),
+ GATE_MDP0(CLK_MDP_MDP_CLA2, "mdp_mdp_cla2", "mdp", 30),
+ GATE_MDP0(CLK_MDP_MDP_DLO_ASYNC4, "mdp_mdp_dlo_async4", "mdp", 31),
+ /* MDP1 */
+ GATE_MDP1(CLK_MDP_VPP_RSZ0, "mdp_vpp_rsz0", "mdp", 0),
+ GATE_MDP1(CLK_MDP_VPP_RSZ1, "mdp_vpp_rsz1", "mdp", 1),
+ GATE_MDP1(CLK_MDP_MDP_DLO_ASYNC5, "mdp_mdp_dlo_async5", "mdp", 2),
+ GATE_MDP1(CLK_MDP_IMG0, "mdp_img0", "mdp", 3),
+ GATE_MDP1(CLK_MDP_F26M, "mdp_f26m", "clk26m", 27),
+ /* MDP2 */
+ GATE_MDP2(CLK_MDP_IMG_DL_RELAY0, "mdp_img_dl_relay0", "mdp", 0),
+ GATE_MDP2(CLK_MDP_IMG_DL_RELAY1, "mdp_img_dl_relay1", "mdp", 8),
+};
+
+static const struct mtk_clk_desc mdp_mcd = {
+ .clks = mdp_clks,
+ .num_clks = ARRAY_SIZE(mdp_clks),
+ .need_runtime_pm = true,
+};
+
+static const struct of_device_id of_match_clk_mt8196_mdpsys[] = {
+ { .compatible = "mediatek,mt8196-mdpsys0", .data = &mdp_mcd },
+ { .compatible = "mediatek,mt8196-mdpsys1", .data = &mdp1_mcd },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8196_mdpsys);
+
+static struct platform_driver clk_mt8196_mdpsys_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8196-mdpsys",
+ .of_match_table = of_match_clk_mt8196_mdpsys,
+ },
+};
+module_platform_driver(clk_mt8196_mdpsys_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8196 Multimedia Data Path clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-mfg.c b/drivers/clk/mediatek/clk-mt8196-mfg.c
new file mode 100644
index 000000000000..ae1eb9de79ae
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-mfg.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-pll.h"
+
+#define MFGPLL_CON0 0x008
+#define MFGPLL_CON1 0x00c
+#define MFGPLL_CON2 0x010
+#define MFGPLL_CON3 0x014
+#define MFGPLL_SC0_CON0 0x008
+#define MFGPLL_SC0_CON1 0x00c
+#define MFGPLL_SC0_CON2 0x010
+#define MFGPLL_SC0_CON3 0x014
+#define MFGPLL_SC1_CON0 0x008
+#define MFGPLL_SC1_CON1 0x00c
+#define MFGPLL_SC1_CON2 0x010
+#define MFGPLL_SC1_CON3 0x014
+
+#define MT8196_PLL_FMAX (3800UL * MHZ)
+#define MT8196_PLL_FMIN (1500UL * MHZ)
+#define MT8196_INTEGER_BITS 8
+
+#define PLL(_id, _name, _reg, _en_reg, _en_mask, _pll_en_bit, \
+ _flags, _rst_bar_mask, \
+ _pd_reg, _pd_shift, _tuner_reg, \
+ _tuner_en_reg, _tuner_en_bit, \
+ _pcw_reg, _pcw_shift, _pcwbits) { \
+ .id = _id, \
+ .name = _name, \
+ .reg = _reg, \
+ .en_reg = _en_reg, \
+ .en_mask = _en_mask, \
+ .pll_en_bit = _pll_en_bit, \
+ .flags = _flags, \
+ .rst_bar_mask = _rst_bar_mask, \
+ .fmax = MT8196_PLL_FMAX, \
+ .fmin = MT8196_PLL_FMIN, \
+ .pd_reg = _pd_reg, \
+ .pd_shift = _pd_shift, \
+ .tuner_reg = _tuner_reg, \
+ .tuner_en_reg = _tuner_en_reg, \
+ .tuner_en_bit = _tuner_en_bit, \
+ .pcw_reg = _pcw_reg, \
+ .pcw_shift = _pcw_shift, \
+ .pcwbits = _pcwbits, \
+ .pcwibits = MT8196_INTEGER_BITS, \
+ }
+
+static const struct mtk_pll_data mfg_ao_plls[] = {
+ PLL(CLK_MFG_AO_MFGPLL, "mfgpll", MFGPLL_CON0, MFGPLL_CON0, 0, 0, 0,
+ BIT(0), MFGPLL_CON1, 24, 0, 0, 0,
+ MFGPLL_CON1, 0, 22),
+};
+
+static const struct mtk_pll_data mfgsc0_ao_plls[] = {
+ PLL(CLK_MFGSC0_AO_MFGPLL_SC0, "mfgpll-sc0", MFGPLL_SC0_CON0,
+ MFGPLL_SC0_CON0, 0, 0, 0, BIT(0), MFGPLL_SC0_CON1, 24, 0, 0, 0,
+ MFGPLL_SC0_CON1, 0, 22),
+};
+
+static const struct mtk_pll_data mfgsc1_ao_plls[] = {
+ PLL(CLK_MFGSC1_AO_MFGPLL_SC1, "mfgpll-sc1", MFGPLL_SC1_CON0,
+ MFGPLL_SC1_CON0, 0, 0, 0, BIT(0), MFGPLL_SC1_CON1, 24, 0, 0, 0,
+ MFGPLL_SC1_CON1, 0, 22),
+};
+
+static const struct of_device_id of_match_clk_mt8196_mfg[] = {
+ { .compatible = "mediatek,mt8196-mfgpll-pll-ctrl",
+ .data = &mfg_ao_plls },
+ { .compatible = "mediatek,mt8196-mfgpll-sc0-pll-ctrl",
+ .data = &mfgsc0_ao_plls },
+ { .compatible = "mediatek,mt8196-mfgpll-sc1-pll-ctrl",
+ .data = &mfgsc1_ao_plls },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8196_mfg);
+
+static int clk_mt8196_mfg_probe(struct platform_device *pdev)
+{
+ const struct mtk_pll_data *plls;
+ struct clk_hw_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ const int num_plls = 1;
+ int r;
+
+ plls = of_device_get_match_data(&pdev->dev);
+ if (!plls)
+ return -EINVAL;
+
+ clk_data = mtk_alloc_clk_data(num_plls);
+ if (!clk_data)
+ return -ENOMEM;
+
+ r = mtk_clk_register_plls(node, plls, num_plls, clk_data);
+ if (r)
+ goto free_clk_data;
+
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ if (r)
+ goto unregister_plls;
+
+ platform_set_drvdata(pdev, clk_data);
+
+ return r;
+
+unregister_plls:
+ mtk_clk_unregister_plls(plls, num_plls, clk_data);
+free_clk_data:
+ mtk_free_clk_data(clk_data);
+
+ return r;
+}
+
+static void clk_mt8196_mfg_remove(struct platform_device *pdev)
+{
+ const struct mtk_pll_data *plls = of_device_get_match_data(&pdev->dev);
+ struct clk_hw_onecell_data *clk_data = platform_get_drvdata(pdev);
+ struct device_node *node = pdev->dev.of_node;
+
+ of_clk_del_provider(node);
+ mtk_clk_unregister_plls(plls, 1, clk_data);
+ mtk_free_clk_data(clk_data);
+}
+
+static struct platform_driver clk_mt8196_mfg_drv = {
+ .probe = clk_mt8196_mfg_probe,
+ .remove = clk_mt8196_mfg_remove,
+ .driver = {
+ .name = "clk-mt8196-mfg",
+ .of_match_table = of_match_clk_mt8196_mfg,
+ },
+};
+module_platform_driver(clk_mt8196_mfg_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8196 GPU mfg clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-ovl0.c b/drivers/clk/mediatek/clk-mt8196-ovl0.c
new file mode 100644
index 000000000000..d4affd14d2c4
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-ovl0.c
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs ovl0_cg_regs = {
+ .set_ofs = 0x104,
+ .clr_ofs = 0x108,
+ .sta_ofs = 0x100,
+};
+
+static const struct mtk_gate_regs ovl0_hwv_regs = {
+ .set_ofs = 0x0060,
+ .clr_ofs = 0x0064,
+ .sta_ofs = 0x2c30,
+};
+
+static const struct mtk_gate_regs ovl1_cg_regs = {
+ .set_ofs = 0x114,
+ .clr_ofs = 0x118,
+ .sta_ofs = 0x110,
+};
+
+static const struct mtk_gate_regs ovl1_hwv_regs = {
+ .set_ofs = 0x0068,
+ .clr_ofs = 0x006c,
+ .sta_ofs = 0x2c34,
+};
+
+#define GATE_HWV_OVL0(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ovl0_cg_regs, \
+ .hwv_regs = &ovl0_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+#define GATE_HWV_OVL1(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ovl1_cg_regs, \
+ .hwv_regs = &ovl1_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+static const struct mtk_gate ovl_clks[] = {
+ /* OVL0 */
+ GATE_HWV_OVL0(CLK_OVLSYS_CONFIG, "ovlsys_config", "disp", 0),
+ GATE_HWV_OVL0(CLK_OVL_FAKE_ENG0, "ovl_fake_eng0", "disp", 1),
+ GATE_HWV_OVL0(CLK_OVL_FAKE_ENG1, "ovl_fake_eng1", "disp", 2),
+ GATE_HWV_OVL0(CLK_OVL_MUTEX0, "ovl_mutex0", "disp", 3),
+ GATE_HWV_OVL0(CLK_OVL_EXDMA0, "ovl_exdma0", "disp", 4),
+ GATE_HWV_OVL0(CLK_OVL_EXDMA1, "ovl_exdma1", "disp", 5),
+ GATE_HWV_OVL0(CLK_OVL_EXDMA2, "ovl_exdma2", "disp", 6),
+ GATE_HWV_OVL0(CLK_OVL_EXDMA3, "ovl_exdma3", "disp", 7),
+ GATE_HWV_OVL0(CLK_OVL_EXDMA4, "ovl_exdma4", "disp", 8),
+ GATE_HWV_OVL0(CLK_OVL_EXDMA5, "ovl_exdma5", "disp", 9),
+ GATE_HWV_OVL0(CLK_OVL_EXDMA6, "ovl_exdma6", "disp", 10),
+ GATE_HWV_OVL0(CLK_OVL_EXDMA7, "ovl_exdma7", "disp", 11),
+ GATE_HWV_OVL0(CLK_OVL_EXDMA8, "ovl_exdma8", "disp", 12),
+ GATE_HWV_OVL0(CLK_OVL_EXDMA9, "ovl_exdma9", "disp", 13),
+ GATE_HWV_OVL0(CLK_OVL_BLENDER0, "ovl_blender0", "disp", 14),
+ GATE_HWV_OVL0(CLK_OVL_BLENDER1, "ovl_blender1", "disp", 15),
+ GATE_HWV_OVL0(CLK_OVL_BLENDER2, "ovl_blender2", "disp", 16),
+ GATE_HWV_OVL0(CLK_OVL_BLENDER3, "ovl_blender3", "disp", 17),
+ GATE_HWV_OVL0(CLK_OVL_BLENDER4, "ovl_blender4", "disp", 18),
+ GATE_HWV_OVL0(CLK_OVL_BLENDER5, "ovl_blender5", "disp", 19),
+ GATE_HWV_OVL0(CLK_OVL_BLENDER6, "ovl_blender6", "disp", 20),
+ GATE_HWV_OVL0(CLK_OVL_BLENDER7, "ovl_blender7", "disp", 21),
+ GATE_HWV_OVL0(CLK_OVL_BLENDER8, "ovl_blender8", "disp", 22),
+ GATE_HWV_OVL0(CLK_OVL_BLENDER9, "ovl_blender9", "disp", 23),
+ GATE_HWV_OVL0(CLK_OVL_OUTPROC0, "ovl_outproc0", "disp", 24),
+ GATE_HWV_OVL0(CLK_OVL_OUTPROC1, "ovl_outproc1", "disp", 25),
+ GATE_HWV_OVL0(CLK_OVL_OUTPROC2, "ovl_outproc2", "disp", 26),
+ GATE_HWV_OVL0(CLK_OVL_OUTPROC3, "ovl_outproc3", "disp", 27),
+ GATE_HWV_OVL0(CLK_OVL_OUTPROC4, "ovl_outproc4", "disp", 28),
+ GATE_HWV_OVL0(CLK_OVL_OUTPROC5, "ovl_outproc5", "disp", 29),
+ GATE_HWV_OVL0(CLK_OVL_MDP_RSZ0, "ovl_mdp_rsz0", "disp", 30),
+ GATE_HWV_OVL0(CLK_OVL_MDP_RSZ1, "ovl_mdp_rsz1", "disp", 31),
+ /* OVL1 */
+ GATE_HWV_OVL1(CLK_OVL_DISP_WDMA0, "ovl_disp_wdma0", "disp", 0),
+ GATE_HWV_OVL1(CLK_OVL_DISP_WDMA1, "ovl_disp_wdma1", "disp", 1),
+ GATE_HWV_OVL1(CLK_OVL_UFBC_WDMA0, "ovl_ufbc_wdma0", "disp", 2),
+ GATE_HWV_OVL1(CLK_OVL_MDP_RDMA0, "ovl_mdp_rdma0", "disp", 3),
+ GATE_HWV_OVL1(CLK_OVL_MDP_RDMA1, "ovl_mdp_rdma1", "disp", 4),
+ GATE_HWV_OVL1(CLK_OVL_BWM0, "ovl_bwm0", "disp", 5),
+ GATE_HWV_OVL1(CLK_OVL_DLI0, "ovl_dli0", "disp", 6),
+ GATE_HWV_OVL1(CLK_OVL_DLI1, "ovl_dli1", "disp", 7),
+ GATE_HWV_OVL1(CLK_OVL_DLI2, "ovl_dli2", "disp", 8),
+ GATE_HWV_OVL1(CLK_OVL_DLI3, "ovl_dli3", "disp", 9),
+ GATE_HWV_OVL1(CLK_OVL_DLI4, "ovl_dli4", "disp", 10),
+ GATE_HWV_OVL1(CLK_OVL_DLI5, "ovl_dli5", "disp", 11),
+ GATE_HWV_OVL1(CLK_OVL_DLI6, "ovl_dli6", "disp", 12),
+ GATE_HWV_OVL1(CLK_OVL_DLI7, "ovl_dli7", "disp", 13),
+ GATE_HWV_OVL1(CLK_OVL_DLI8, "ovl_dli8", "disp", 14),
+ GATE_HWV_OVL1(CLK_OVL_DLO0, "ovl_dlo0", "disp", 15),
+ GATE_HWV_OVL1(CLK_OVL_DLO1, "ovl_dlo1", "disp", 16),
+ GATE_HWV_OVL1(CLK_OVL_DLO2, "ovl_dlo2", "disp", 17),
+ GATE_HWV_OVL1(CLK_OVL_DLO3, "ovl_dlo3", "disp", 18),
+ GATE_HWV_OVL1(CLK_OVL_DLO4, "ovl_dlo4", "disp", 19),
+ GATE_HWV_OVL1(CLK_OVL_DLO5, "ovl_dlo5", "disp", 20),
+ GATE_HWV_OVL1(CLK_OVL_DLO6, "ovl_dlo6", "disp", 21),
+ GATE_HWV_OVL1(CLK_OVL_DLO7, "ovl_dlo7", "disp", 22),
+ GATE_HWV_OVL1(CLK_OVL_DLO8, "ovl_dlo8", "disp", 23),
+ GATE_HWV_OVL1(CLK_OVL_DLO9, "ovl_dlo9", "disp", 24),
+ GATE_HWV_OVL1(CLK_OVL_DLO10, "ovl_dlo10", "disp", 25),
+ GATE_HWV_OVL1(CLK_OVL_DLO11, "ovl_dlo11", "disp", 26),
+ GATE_HWV_OVL1(CLK_OVL_DLO12, "ovl_dlo12", "disp", 27),
+ GATE_HWV_OVL1(CLK_OVLSYS_RELAY0, "ovlsys_relay0", "disp", 28),
+ GATE_HWV_OVL1(CLK_OVL_INLINEROT0, "ovl_inlinerot0", "disp", 29),
+ GATE_HWV_OVL1(CLK_OVL_SMI, "ovl_smi", "disp", 30),
+};
+
+static const struct mtk_clk_desc ovl_mcd = {
+ .clks = ovl_clks,
+ .num_clks = ARRAY_SIZE(ovl_clks),
+};
+
+static const struct platform_device_id clk_mt8196_ovl0_id_table[] = {
+ { .name = "clk-mt8196-ovl0", .driver_data = (kernel_ulong_t)&ovl_mcd },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, clk_mt8196_ovl0_id_table);
+
+static struct platform_driver clk_mt8196_ovl0_drv = {
+ .probe = mtk_clk_pdev_probe,
+ .remove = mtk_clk_pdev_remove,
+ .driver = {
+ .name = "clk-mt8196-ovl0",
+ },
+ .id_table = clk_mt8196_ovl0_id_table,
+};
+module_platform_driver(clk_mt8196_ovl0_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8196 ovl0 clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-ovl1.c b/drivers/clk/mediatek/clk-mt8196-ovl1.c
new file mode 100644
index 000000000000..c8843d0d3ede
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-ovl1.c
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs ovl10_cg_regs = {
+ .set_ofs = 0x104,
+ .clr_ofs = 0x108,
+ .sta_ofs = 0x100,
+};
+
+static const struct mtk_gate_regs ovl10_hwv_regs = {
+ .set_ofs = 0x0050,
+ .clr_ofs = 0x0054,
+ .sta_ofs = 0x2c28,
+};
+
+static const struct mtk_gate_regs ovl11_cg_regs = {
+ .set_ofs = 0x114,
+ .clr_ofs = 0x118,
+ .sta_ofs = 0x110,
+};
+
+static const struct mtk_gate_regs ovl11_hwv_regs = {
+ .set_ofs = 0x0058,
+ .clr_ofs = 0x005c,
+ .sta_ofs = 0x2c2c,
+};
+
+#define GATE_HWV_OVL10(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ovl10_cg_regs, \
+ .hwv_regs = &ovl10_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+#define GATE_HWV_OVL11(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ovl11_cg_regs, \
+ .hwv_regs = &ovl11_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+static const struct mtk_gate ovl1_clks[] = {
+ /* OVL10 */
+ GATE_HWV_OVL10(CLK_OVL1_OVLSYS_CONFIG, "ovl1_ovlsys_config", "disp", 0),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_FAKE_ENG0, "ovl1_ovl_fake_eng0", "disp", 1),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_FAKE_ENG1, "ovl1_ovl_fake_eng1", "disp", 2),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_MUTEX0, "ovl1_ovl_mutex0", "disp", 3),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_EXDMA0, "ovl1_ovl_exdma0", "disp", 4),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_EXDMA1, "ovl1_ovl_exdma1", "disp", 5),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_EXDMA2, "ovl1_ovl_exdma2", "disp", 6),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_EXDMA3, "ovl1_ovl_exdma3", "disp", 7),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_EXDMA4, "ovl1_ovl_exdma4", "disp", 8),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_EXDMA5, "ovl1_ovl_exdma5", "disp", 9),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_EXDMA6, "ovl1_ovl_exdma6", "disp", 10),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_EXDMA7, "ovl1_ovl_exdma7", "disp", 11),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_EXDMA8, "ovl1_ovl_exdma8", "disp", 12),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_EXDMA9, "ovl1_ovl_exdma9", "disp", 13),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_BLENDER0, "ovl1_ovl_blender0", "disp", 14),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_BLENDER1, "ovl1_ovl_blender1", "disp", 15),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_BLENDER2, "ovl1_ovl_blender2", "disp", 16),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_BLENDER3, "ovl1_ovl_blender3", "disp", 17),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_BLENDER4, "ovl1_ovl_blender4", "disp", 18),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_BLENDER5, "ovl1_ovl_blender5", "disp", 19),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_BLENDER6, "ovl1_ovl_blender6", "disp", 20),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_BLENDER7, "ovl1_ovl_blender7", "disp", 21),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_BLENDER8, "ovl1_ovl_blender8", "disp", 22),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_BLENDER9, "ovl1_ovl_blender9", "disp", 23),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_OUTPROC0, "ovl1_ovl_outproc0", "disp", 24),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_OUTPROC1, "ovl1_ovl_outproc1", "disp", 25),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_OUTPROC2, "ovl1_ovl_outproc2", "disp", 26),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_OUTPROC3, "ovl1_ovl_outproc3", "disp", 27),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_OUTPROC4, "ovl1_ovl_outproc4", "disp", 28),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_OUTPROC5, "ovl1_ovl_outproc5", "disp", 29),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_MDP_RSZ0, "ovl1_ovl_mdp_rsz0", "disp", 30),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_MDP_RSZ1, "ovl1_ovl_mdp_rsz1", "disp", 31),
+ /* OVL11 */
+ GATE_HWV_OVL11(CLK_OVL1_OVL_DISP_WDMA0, "ovl1_ovl_disp_wdma0", "disp", 0),
+ GATE_HWV_OVL11(CLK_OVL1_OVL_DISP_WDMA1, "ovl1_ovl_disp_wdma1", "disp", 1),
+ GATE_HWV_OVL11(CLK_OVL1_OVL_UFBC_WDMA0, "ovl1_ovl_ufbc_wdma0", "disp", 2),
+ GATE_HWV_OVL11(CLK_OVL1_OVL_MDP_RDMA0, "ovl1_ovl_mdp_rdma0", "disp", 3),
+ GATE_HWV_OVL11(CLK_OVL1_OVL_MDP_RDMA1, "ovl1_ovl_mdp_rdma1", "disp", 4),
+ GATE_HWV_OVL11(CLK_OVL1_OVL_BWM0, "ovl1_ovl_bwm0", "disp", 5),
+ GATE_HWV_OVL11(CLK_OVL1_DLI0, "ovl1_dli0", "disp", 6),
+ GATE_HWV_OVL11(CLK_OVL1_DLI1, "ovl1_dli1", "disp", 7),
+ GATE_HWV_OVL11(CLK_OVL1_DLI2, "ovl1_dli2", "disp", 8),
+ GATE_HWV_OVL11(CLK_OVL1_DLI3, "ovl1_dli3", "disp", 9),
+ GATE_HWV_OVL11(CLK_OVL1_DLI4, "ovl1_dli4", "disp", 10),
+ GATE_HWV_OVL11(CLK_OVL1_DLI5, "ovl1_dli5", "disp", 11),
+ GATE_HWV_OVL11(CLK_OVL1_DLI6, "ovl1_dli6", "disp", 12),
+ GATE_HWV_OVL11(CLK_OVL1_DLI7, "ovl1_dli7", "disp", 13),
+ GATE_HWV_OVL11(CLK_OVL1_DLI8, "ovl1_dli8", "disp", 14),
+ GATE_HWV_OVL11(CLK_OVL1_DLO0, "ovl1_dlo0", "disp", 15),
+ GATE_HWV_OVL11(CLK_OVL1_DLO1, "ovl1_dlo1", "disp", 16),
+ GATE_HWV_OVL11(CLK_OVL1_DLO2, "ovl1_dlo2", "disp", 17),
+ GATE_HWV_OVL11(CLK_OVL1_DLO3, "ovl1_dlo3", "disp", 18),
+ GATE_HWV_OVL11(CLK_OVL1_DLO4, "ovl1_dlo4", "disp", 19),
+ GATE_HWV_OVL11(CLK_OVL1_DLO5, "ovl1_dlo5", "disp", 20),
+ GATE_HWV_OVL11(CLK_OVL1_DLO6, "ovl1_dlo6", "disp", 21),
+ GATE_HWV_OVL11(CLK_OVL1_DLO7, "ovl1_dlo7", "disp", 22),
+ GATE_HWV_OVL11(CLK_OVL1_DLO8, "ovl1_dlo8", "disp", 23),
+ GATE_HWV_OVL11(CLK_OVL1_DLO9, "ovl1_dlo9", "disp", 24),
+ GATE_HWV_OVL11(CLK_OVL1_DLO10, "ovl1_dlo10", "disp", 25),
+ GATE_HWV_OVL11(CLK_OVL1_DLO11, "ovl1_dlo11", "disp", 26),
+ GATE_HWV_OVL11(CLK_OVL1_DLO12, "ovl1_dlo12", "disp", 27),
+ GATE_HWV_OVL11(CLK_OVL1_OVLSYS_RELAY0, "ovl1_ovlsys_relay0", "disp", 28),
+ GATE_HWV_OVL11(CLK_OVL1_OVL_INLINEROT0, "ovl1_ovl_inlinerot0", "disp", 29),
+ GATE_HWV_OVL11(CLK_OVL1_SMI, "ovl1_smi", "disp", 30),
+};
+
+static const struct mtk_clk_desc ovl1_mcd = {
+ .clks = ovl1_clks,
+ .num_clks = ARRAY_SIZE(ovl1_clks),
+};
+
+static const struct platform_device_id clk_mt8196_ovl1_id_table[] = {
+ { .name = "clk-mt8196-ovl1", .driver_data = (kernel_ulong_t)&ovl1_mcd },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, clk_mt8196_ovl1_id_table);
+
+static struct platform_driver clk_mt8196_ovl1_drv = {
+ .probe = mtk_clk_pdev_probe,
+ .remove = mtk_clk_pdev_remove,
+ .driver = {
+ .name = "clk-mt8196-ovl1",
+ },
+ .id_table = clk_mt8196_ovl1_id_table,
+};
+module_platform_driver(clk_mt8196_ovl1_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8196 ovl1 clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-peri_ao.c b/drivers/clk/mediatek/clk-mt8196-peri_ao.c
new file mode 100644
index 000000000000..f227a86c5d60
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-peri_ao.c
@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs peri_ao0_cg_regs = {
+ .set_ofs = 0x24,
+ .clr_ofs = 0x28,
+ .sta_ofs = 0x10,
+};
+
+static const struct mtk_gate_regs peri_ao1_cg_regs = {
+ .set_ofs = 0x2c,
+ .clr_ofs = 0x30,
+ .sta_ofs = 0x14,
+};
+
+static const struct mtk_gate_regs peri_ao1_hwv_regs = {
+ .set_ofs = 0x0008,
+ .clr_ofs = 0x000c,
+ .sta_ofs = 0x2c04,
+};
+
+static const struct mtk_gate_regs peri_ao2_cg_regs = {
+ .set_ofs = 0x34,
+ .clr_ofs = 0x38,
+ .sta_ofs = 0x18,
+};
+
+#define GATE_PERI_AO0(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &peri_ao0_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_PERI_AO1(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &peri_ao1_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_HWV_PERI_AO1(_id, _name, _parent, _shift) {\
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &peri_ao1_cg_regs, \
+ .hwv_regs = &peri_ao1_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr, \
+ }
+
+#define GATE_PERI_AO2(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &peri_ao2_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+static const struct mtk_gate peri_ao_clks[] = {
+ /* PERI_AO0 */
+ GATE_PERI_AO0(CLK_PERI_AO_UART0_BCLK, "peri_ao_uart0_bclk", "uart", 0),
+ GATE_PERI_AO0(CLK_PERI_AO_UART1_BCLK, "peri_ao_uart1_bclk", "uart", 1),
+ GATE_PERI_AO0(CLK_PERI_AO_UART2_BCLK, "peri_ao_uart2_bclk", "uart", 2),
+ GATE_PERI_AO0(CLK_PERI_AO_UART3_BCLK, "peri_ao_uart3_bclk", "uart", 3),
+ GATE_PERI_AO0(CLK_PERI_AO_UART4_BCLK, "peri_ao_uart4_bclk", "uart", 4),
+ GATE_PERI_AO0(CLK_PERI_AO_UART5_BCLK, "peri_ao_uart5_bclk", "uart", 5),
+ GATE_PERI_AO0(CLK_PERI_AO_PWM_X16W_HCLK, "peri_ao_pwm_x16w", "p_axi", 12),
+ GATE_PERI_AO0(CLK_PERI_AO_PWM_X16W_BCLK, "peri_ao_pwm_x16w_bclk", "pwm", 13),
+ GATE_PERI_AO0(CLK_PERI_AO_PWM_PWM_BCLK0, "peri_ao_pwm_pwm_bclk0", "pwm", 14),
+ GATE_PERI_AO0(CLK_PERI_AO_PWM_PWM_BCLK1, "peri_ao_pwm_pwm_bclk1", "pwm", 15),
+ GATE_PERI_AO0(CLK_PERI_AO_PWM_PWM_BCLK2, "peri_ao_pwm_pwm_bclk2", "pwm", 16),
+ GATE_PERI_AO0(CLK_PERI_AO_PWM_PWM_BCLK3, "peri_ao_pwm_pwm_bclk3", "pwm", 17),
+ /* PERI_AO1 */
+ GATE_HWV_PERI_AO1(CLK_PERI_AO_SPI0_BCLK, "peri_ao_spi0_bclk", "spi0_b", 0),
+ GATE_HWV_PERI_AO1(CLK_PERI_AO_SPI1_BCLK, "peri_ao_spi1_bclk", "spi1_b", 2),
+ GATE_HWV_PERI_AO1(CLK_PERI_AO_SPI2_BCLK, "peri_ao_spi2_bclk", "spi2_b", 3),
+ GATE_HWV_PERI_AO1(CLK_PERI_AO_SPI3_BCLK, "peri_ao_spi3_bclk", "spi3_b", 4),
+ GATE_HWV_PERI_AO1(CLK_PERI_AO_SPI4_BCLK, "peri_ao_spi4_bclk", "spi4_b", 5),
+ GATE_HWV_PERI_AO1(CLK_PERI_AO_SPI5_BCLK, "peri_ao_spi5_bclk", "spi5_b", 6),
+ GATE_HWV_PERI_AO1(CLK_PERI_AO_SPI6_BCLK, "peri_ao_spi6_bclk", "spi6_b", 7),
+ GATE_HWV_PERI_AO1(CLK_PERI_AO_SPI7_BCLK, "peri_ao_spi7_bclk", "spi7_b", 8),
+ GATE_PERI_AO1(CLK_PERI_AO_FLASHIF_FLASH, "peri_ao_flashif_flash", "peri_ao_flashif_27m",
+ 18),
+ GATE_PERI_AO1(CLK_PERI_AO_FLASHIF_27M, "peri_ao_flashif_27m", "sflash", 19),
+ GATE_PERI_AO1(CLK_PERI_AO_FLASHIF_DRAM, "peri_ao_flashif_dram", "p_axi", 20),
+ GATE_PERI_AO1(CLK_PERI_AO_FLASHIF_AXI, "peri_ao_flashif_axi", "peri_ao_flashif_dram", 21),
+ GATE_PERI_AO1(CLK_PERI_AO_FLASHIF_BCLK, "peri_ao_flashif_bclk", "p_axi", 22),
+ GATE_PERI_AO1(CLK_PERI_AO_AP_DMA_X32W_BCLK, "peri_ao_ap_dma_x32w_bclk", "p_axi", 26),
+ /* PERI_AO2 */
+ GATE_PERI_AO2(CLK_PERI_AO_MSDC1_MSDC_SRC, "peri_ao_msdc1_msdc_src", "msdc30_1", 1),
+ GATE_PERI_AO2(CLK_PERI_AO_MSDC1_HCLK, "peri_ao_msdc1", "peri_ao_msdc1_axi", 2),
+ GATE_PERI_AO2(CLK_PERI_AO_MSDC1_AXI, "peri_ao_msdc1_axi", "p_axi", 3),
+ GATE_PERI_AO2(CLK_PERI_AO_MSDC1_HCLK_WRAP, "peri_ao_msdc1_h_wrap", "peri_ao_msdc1", 4),
+ GATE_PERI_AO2(CLK_PERI_AO_MSDC2_MSDC_SRC, "peri_ao_msdc2_msdc_src", "msdc30_2", 10),
+ GATE_PERI_AO2(CLK_PERI_AO_MSDC2_HCLK, "peri_ao_msdc2", "peri_ao_msdc2_axi", 11),
+ GATE_PERI_AO2(CLK_PERI_AO_MSDC2_AXI, "peri_ao_msdc2_axi", "p_axi", 12),
+ GATE_PERI_AO2(CLK_PERI_AO_MSDC2_HCLK_WRAP, "peri_ao_msdc2_h_wrap", "peri_ao_msdc2", 13),
+};
+
+static const struct mtk_clk_desc peri_ao_mcd = {
+ .clks = peri_ao_clks,
+ .num_clks = ARRAY_SIZE(peri_ao_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8196_peri_ao[] = {
+ { .compatible = "mediatek,mt8196-pericfg-ao", .data = &peri_ao_mcd },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8196_peri_ao);
+
+static struct platform_driver clk_mt8196_peri_ao_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8196-peri-ao",
+ .of_match_table = of_match_clk_mt8196_peri_ao,
+ },
+};
+
+MODULE_DESCRIPTION("MediaTek MT8196 pericfg_ao clock controller driver");
+module_platform_driver(clk_mt8196_peri_ao_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-pextp.c b/drivers/clk/mediatek/clk-mt8196-pextp.c
new file mode 100644
index 000000000000..3e505ecc4b6e
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-pextp.c
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+#include <dt-bindings/reset/mediatek,mt8196-resets.h>
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+#include "reset.h"
+
+#define MT8196_PEXTP_RST0_SET_OFFSET 0x8
+
+static const struct mtk_gate_regs pext_cg_regs = {
+ .set_ofs = 0x18,
+ .clr_ofs = 0x1c,
+ .sta_ofs = 0x14,
+};
+
+#define GATE_PEXT(_id, _name, _parent, _shift) {\
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &pext_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr,\
+ }
+
+static const struct mtk_gate pext_clks[] = {
+ GATE_PEXT(CLK_PEXT_PEXTP_MAC_P0_TL, "pext_pm0_tl", "tl", 0),
+ GATE_PEXT(CLK_PEXT_PEXTP_MAC_P0_REF, "pext_pm0_ref", "clk26m", 1),
+ GATE_PEXT(CLK_PEXT_PEXTP_PHY_P0_MCU_BUS, "pext_pp0_mcu_bus", "clk26m", 6),
+ GATE_PEXT(CLK_PEXT_PEXTP_PHY_P0_PEXTP_REF, "pext_pp0_pextp_ref", "clk26m", 7),
+ GATE_PEXT(CLK_PEXT_PEXTP_MAC_P0_AXI_250, "pext_pm0_axi_250", "ufs_pexpt0_mem_sub", 12),
+ GATE_PEXT(CLK_PEXT_PEXTP_MAC_P0_AHB_APB, "pext_pm0_ahb_apb", "ufs_pextp0_axi", 13),
+ GATE_PEXT(CLK_PEXT_PEXTP_MAC_P0_PL_P, "pext_pm0_pl_p", "clk26m", 14),
+ GATE_PEXT(CLK_PEXT_PEXTP_VLP_AO_P0_LP, "pext_pextp_vlp_ao_p0_lp", "clk26m", 19),
+};
+
+static u16 pext_rst_ofs[] = { MT8196_PEXTP_RST0_SET_OFFSET };
+
+static u16 pext_rst_idx_map[] = {
+ [MT8196_PEXTP0_RST0_PCIE0_MAC] = 0,
+ [MT8196_PEXTP0_RST0_PCIE0_PHY] = 1,
+};
+
+static const struct mtk_clk_rst_desc pext_rst_desc = {
+ .version = MTK_RST_SET_CLR,
+ .rst_bank_ofs = pext_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(pext_rst_ofs),
+ .rst_idx_map = pext_rst_idx_map,
+ .rst_idx_map_nr = ARRAY_SIZE(pext_rst_idx_map),
+};
+
+static const struct mtk_clk_desc pext_mcd = {
+ .clks = pext_clks,
+ .num_clks = ARRAY_SIZE(pext_clks),
+ .rst_desc = &pext_rst_desc,
+};
+
+static const struct mtk_gate pext1_clks[] = {
+ GATE_PEXT(CLK_PEXT1_PEXTP_MAC_P1_TL, "pext1_pm1_tl", "tl_p1", 0),
+ GATE_PEXT(CLK_PEXT1_PEXTP_MAC_P1_REF, "pext1_pm1_ref", "clk26m", 1),
+ GATE_PEXT(CLK_PEXT1_PEXTP_MAC_P2_TL, "pext1_pm2_tl", "tl_p2", 2),
+ GATE_PEXT(CLK_PEXT1_PEXTP_MAC_P2_REF, "pext1_pm2_ref", "clk26m", 3),
+ GATE_PEXT(CLK_PEXT1_PEXTP_PHY_P1_MCU_BUS, "pext1_pp1_mcu_bus", "clk26m", 8),
+ GATE_PEXT(CLK_PEXT1_PEXTP_PHY_P1_PEXTP_REF, "pext1_pp1_pextp_ref", "clk26m", 9),
+ GATE_PEXT(CLK_PEXT1_PEXTP_PHY_P2_MCU_BUS, "pext1_pp2_mcu_bus", "clk26m", 10),
+ GATE_PEXT(CLK_PEXT1_PEXTP_PHY_P2_PEXTP_REF, "pext1_pp2_pextp_ref", "clk26m", 11),
+ GATE_PEXT(CLK_PEXT1_PEXTP_MAC_P1_AXI_250, "pext1_pm1_axi_250",
+ "pextp1_usb_axi", 16),
+ GATE_PEXT(CLK_PEXT1_PEXTP_MAC_P1_AHB_APB, "pext1_pm1_ahb_apb",
+ "pextp1_usb_mem_sub", 17),
+ GATE_PEXT(CLK_PEXT1_PEXTP_MAC_P1_PL_P, "pext1_pm1_pl_p", "clk26m", 18),
+ GATE_PEXT(CLK_PEXT1_PEXTP_MAC_P2_AXI_250, "pext1_pm2_axi_250",
+ "pextp1_usb_axi", 19),
+ GATE_PEXT(CLK_PEXT1_PEXTP_MAC_P2_AHB_APB, "pext1_pm2_ahb_apb",
+ "pextp1_usb_mem_sub", 20),
+ GATE_PEXT(CLK_PEXT1_PEXTP_MAC_P2_PL_P, "pext1_pm2_pl_p", "clk26m", 21),
+ GATE_PEXT(CLK_PEXT1_PEXTP_VLP_AO_P1_LP, "pext1_pextp_vlp_ao_p1_lp", "clk26m", 26),
+ GATE_PEXT(CLK_PEXT1_PEXTP_VLP_AO_P2_LP, "pext1_pextp_vlp_ao_p2_lp", "clk26m", 27),
+};
+
+static u16 pext1_rst_idx_map[] = {
+ [MT8196_PEXTP1_RST0_PCIE1_MAC] = 0,
+ [MT8196_PEXTP1_RST0_PCIE1_PHY] = 1,
+ [MT8196_PEXTP1_RST0_PCIE2_MAC] = 8,
+ [MT8196_PEXTP1_RST0_PCIE2_PHY] = 9,
+};
+
+static const struct mtk_clk_rst_desc pext1_rst_desc = {
+ .version = MTK_RST_SET_CLR,
+ .rst_bank_ofs = pext_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(pext_rst_ofs),
+ .rst_idx_map = pext1_rst_idx_map,
+ .rst_idx_map_nr = ARRAY_SIZE(pext1_rst_idx_map),
+};
+
+static const struct mtk_clk_desc pext1_mcd = {
+ .clks = pext1_clks,
+ .num_clks = ARRAY_SIZE(pext1_clks),
+ .rst_desc = &pext1_rst_desc,
+};
+
+static const struct of_device_id of_match_clk_mt8196_pextp[] = {
+ { .compatible = "mediatek,mt8196-pextp0cfg-ao", .data = &pext_mcd },
+ { .compatible = "mediatek,mt8196-pextp1cfg-ao", .data = &pext1_mcd },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8196_pextp);
+
+static struct platform_driver clk_mt8196_pextp_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8196-pextp",
+ .of_match_table = of_match_clk_mt8196_pextp,
+ },
+};
+
+module_platform_driver(clk_mt8196_pextp_drv);
+MODULE_DESCRIPTION("MediaTek MT8196 PCIe transmit phy clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-topckgen.c b/drivers/clk/mediatek/clk-mt8196-topckgen.c
new file mode 100644
index 000000000000..6ace11ef6b69
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-topckgen.c
@@ -0,0 +1,985 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-mux.h"
+
+/* MUX SEL REG */
+#define CLK_CFG_UPDATE 0x0004
+#define CLK_CFG_UPDATE1 0x0008
+#define CLK_CFG_UPDATE2 0x000c
+#define CLK_CFG_0 0x0010
+#define CLK_CFG_0_SET 0x0014
+#define CLK_CFG_0_CLR 0x0018
+#define CLK_CFG_1 0x0020
+#define CLK_CFG_1_SET 0x0024
+#define CLK_CFG_1_CLR 0x0028
+#define CLK_CFG_2 0x0030
+#define CLK_CFG_2_SET 0x0034
+#define CLK_CFG_2_CLR 0x0038
+#define CLK_CFG_3 0x0040
+#define CLK_CFG_3_SET 0x0044
+#define CLK_CFG_3_CLR 0x0048
+#define CLK_CFG_4 0x0050
+#define CLK_CFG_4_SET 0x0054
+#define CLK_CFG_4_CLR 0x0058
+#define CLK_CFG_5 0x0060
+#define CLK_CFG_5_SET 0x0064
+#define CLK_CFG_5_CLR 0x0068
+#define CLK_CFG_6 0x0070
+#define CLK_CFG_6_SET 0x0074
+#define CLK_CFG_6_CLR 0x0078
+#define CLK_CFG_7 0x0080
+#define CLK_CFG_7_SET 0x0084
+#define CLK_CFG_7_CLR 0x0088
+#define CLK_CFG_8 0x0090
+#define CLK_CFG_8_SET 0x0094
+#define CLK_CFG_8_CLR 0x0098
+#define CLK_CFG_9 0x00a0
+#define CLK_CFG_9_SET 0x00a4
+#define CLK_CFG_9_CLR 0x00a8
+#define CLK_CFG_10 0x00b0
+#define CLK_CFG_10_SET 0x00b4
+#define CLK_CFG_10_CLR 0x00b8
+#define CLK_CFG_11 0x00c0
+#define CLK_CFG_11_SET 0x00c4
+#define CLK_CFG_11_CLR 0x00c8
+#define CLK_CFG_12 0x00d0
+#define CLK_CFG_12_SET 0x00d4
+#define CLK_CFG_12_CLR 0x00d8
+#define CLK_CFG_13 0x00e0
+#define CLK_CFG_13_SET 0x00e4
+#define CLK_CFG_13_CLR 0x00e8
+#define CLK_CFG_14 0x00f0
+#define CLK_CFG_14_SET 0x00f4
+#define CLK_CFG_14_CLR 0x00f8
+#define CLK_CFG_15 0x0100
+#define CLK_CFG_15_SET 0x0104
+#define CLK_CFG_15_CLR 0x0108
+#define CLK_CFG_16 0x0110
+#define CLK_CFG_16_SET 0x0114
+#define CLK_CFG_16_CLR 0x0118
+#define CLK_CFG_17 0x0120
+#define CLK_CFG_17_SET 0x0124
+#define CLK_CFG_17_CLR 0x0128
+#define CLK_CFG_18 0x0130
+#define CLK_CFG_18_SET 0x0134
+#define CLK_CFG_18_CLR 0x0138
+#define CLK_CFG_19 0x0140
+#define CLK_CFG_19_SET 0x0144
+#define CLK_CFG_19_CLR 0x0148
+#define CLK_AUDDIV_0 0x020c
+#define CLK_FENC_STATUS_MON_0 0x0270
+#define CLK_FENC_STATUS_MON_1 0x0274
+#define CLK_FENC_STATUS_MON_2 0x0278
+
+/* MUX SHIFT */
+#define TOP_MUX_AXI_SHIFT 0
+#define TOP_MUX_MEM_SUB_SHIFT 1
+#define TOP_MUX_IO_NOC_SHIFT 2
+#define TOP_MUX_PERI_AXI_SHIFT 3
+#define TOP_MUX_UFS_PEXTP0_AXI_SHIFT 4
+#define TOP_MUX_PEXTP1_USB_AXI_SHIFT 5
+#define TOP_MUX_PERI_FMEM_SUB_SHIFT 6
+#define TOP_MUX_UFS_PEXPT0_MEM_SUB_SHIFT 7
+#define TOP_MUX_PEXTP1_USB_MEM_SUB_SHIFT 8
+#define TOP_MUX_PERI_NOC_SHIFT 9
+#define TOP_MUX_EMI_N_SHIFT 10
+#define TOP_MUX_EMI_S_SHIFT 11
+#define TOP_MUX_AP2CONN_HOST_SHIFT 14
+#define TOP_MUX_ATB_SHIFT 15
+#define TOP_MUX_CIRQ_SHIFT 16
+#define TOP_MUX_PBUS_156M_SHIFT 17
+#define TOP_MUX_EFUSE_SHIFT 20
+#define TOP_MUX_MCU_L3GIC_SHIFT 21
+#define TOP_MUX_MCU_INFRA_SHIFT 22
+#define TOP_MUX_DSP_SHIFT 23
+#define TOP_MUX_MFG_REF_SHIFT 24
+#define TOP_MUX_MFG_EB_SHIFT 26
+#define TOP_MUX_UART_SHIFT 27
+#define TOP_MUX_SPI0_BCLK_SHIFT 28
+#define TOP_MUX_SPI1_BCLK_SHIFT 29
+#define TOP_MUX_SPI2_BCLK_SHIFT 30
+#define TOP_MUX_SPI3_BCLK_SHIFT 0
+#define TOP_MUX_SPI4_BCLK_SHIFT 1
+#define TOP_MUX_SPI5_BCLK_SHIFT 2
+#define TOP_MUX_SPI6_BCLK_SHIFT 3
+#define TOP_MUX_SPI7_BCLK_SHIFT 4
+#define TOP_MUX_MSDC30_1_SHIFT 7
+#define TOP_MUX_MSDC30_2_SHIFT 8
+#define TOP_MUX_DISP_PWM_SHIFT 9
+#define TOP_MUX_USB_TOP_1P_SHIFT 10
+#define TOP_MUX_SSUSB_XHCI_1P_SHIFT 11
+#define TOP_MUX_SSUSB_FMCNT_P1_SHIFT 12
+#define TOP_MUX_I2C_PERI_SHIFT 13
+#define TOP_MUX_I2C_EAST_SHIFT 14
+#define TOP_MUX_I2C_WEST_SHIFT 15
+#define TOP_MUX_I2C_NORTH_SHIFT 16
+#define TOP_MUX_AES_UFSFDE_SHIFT 17
+#define TOP_MUX_UFS_SHIFT 18
+#define TOP_MUX_AUD_1_SHIFT 21
+#define TOP_MUX_AUD_2_SHIFT 22
+#define TOP_MUX_ADSP_SHIFT 23
+#define TOP_MUX_ADSP_UARTHUB_B_SHIFT 24
+#define TOP_MUX_DPMAIF_MAIN_SHIFT 25
+#define TOP_MUX_PWM_SHIFT 26
+#define TOP_MUX_MCUPM_SHIFT 27
+#define TOP_MUX_SFLASH_SHIFT 28
+#define TOP_MUX_IPSEAST_SHIFT 29
+#define TOP_MUX_TL_SHIFT 0
+#define TOP_MUX_TL_P1_SHIFT 1
+#define TOP_MUX_TL_P2_SHIFT 2
+#define TOP_MUX_EMI_INTERFACE_546_SHIFT 3
+#define TOP_MUX_SDF_SHIFT 4
+#define TOP_MUX_UARTHUB_BCLK_SHIFT 5
+#define TOP_MUX_DPSW_CMP_26M_SHIFT 6
+#define TOP_MUX_SMAPCK_SHIFT 7
+#define TOP_MUX_SSR_PKA_SHIFT 8
+#define TOP_MUX_SSR_DMA_SHIFT 9
+#define TOP_MUX_SSR_KDF_SHIFT 10
+#define TOP_MUX_SSR_RNG_SHIFT 11
+#define TOP_MUX_SPU0_SHIFT 12
+#define TOP_MUX_SPU1_SHIFT 13
+#define TOP_MUX_DXCC_SHIFT 14
+
+/* CKSTA REG */
+#define CKSTA_REG 0x01c8
+#define CKSTA_REG1 0x01cc
+#define CKSTA_REG2 0x01d0
+
+/* DIVIDER REG */
+#define CLK_AUDDIV_2 0x0214
+#define CLK_AUDDIV_3 0x0220
+#define CLK_AUDDIV_4 0x0224
+#define CLK_AUDDIV_5 0x0228
+
+/* HW Voter REG */
+#define HWV_CG_0_SET 0x0000
+#define HWV_CG_0_CLR 0x0004
+#define HWV_CG_0_DONE 0x2c00
+#define HWV_CG_1_SET 0x0008
+#define HWV_CG_1_CLR 0x000c
+#define HWV_CG_1_DONE 0x2c04
+#define HWV_CG_2_SET 0x0010
+#define HWV_CG_2_CLR 0x0014
+#define HWV_CG_2_DONE 0x2c08
+#define HWV_CG_3_SET 0x0018
+#define HWV_CG_3_CLR 0x001c
+#define HWV_CG_3_DONE 0x2c0c
+#define HWV_CG_4_SET 0x0020
+#define HWV_CG_4_CLR 0x0024
+#define HWV_CG_4_DONE 0x2c10
+#define HWV_CG_5_SET 0x0028
+#define HWV_CG_5_CLR 0x002c
+#define HWV_CG_5_DONE 0x2c14
+#define HWV_CG_6_SET 0x0030
+#define HWV_CG_6_CLR 0x0034
+#define HWV_CG_6_DONE 0x2c18
+#define HWV_CG_7_SET 0x0038
+#define HWV_CG_7_CLR 0x003c
+#define HWV_CG_7_DONE 0x2c1c
+#define HWV_CG_8_SET 0x0040
+#define HWV_CG_8_CLR 0x0044
+#define HWV_CG_8_DONE 0x2c20
+
+static const struct mtk_fixed_factor top_divs[] = {
+ FACTOR(CLK_TOP_MAINPLL_D3, "mainpll_d3", "mainpll", 1, 3),
+ FACTOR(CLK_TOP_MAINPLL_D4, "mainpll_d4", "mainpll", 1, 4),
+ FACTOR(CLK_TOP_MAINPLL_D4_D2, "mainpll_d4_d2", "mainpll", 1, 8),
+ FACTOR(CLK_TOP_MAINPLL_D4_D4, "mainpll_d4_d4", "mainpll", 1, 16),
+ FACTOR(CLK_TOP_MAINPLL_D4_D8, "mainpll_d4_d8", "mainpll", 1, 32),
+ FACTOR(CLK_TOP_MAINPLL_D5, "mainpll_d5", "mainpll", 1, 5),
+ FACTOR(CLK_TOP_MAINPLL_D5_D2, "mainpll_d5_d2", "mainpll", 1, 10),
+ FACTOR(CLK_TOP_MAINPLL_D5_D4, "mainpll_d5_d4", "mainpll", 1, 20),
+ FACTOR(CLK_TOP_MAINPLL_D5_D8, "mainpll_d5_d8", "mainpll", 1, 40),
+ FACTOR(CLK_TOP_MAINPLL_D6, "mainpll_d6", "mainpll", 1, 6),
+ FACTOR(CLK_TOP_MAINPLL_D6_D2, "mainpll_d6_d2", "mainpll", 1, 12),
+ FACTOR(CLK_TOP_MAINPLL_D7, "mainpll_d7", "mainpll", 1, 7),
+ FACTOR(CLK_TOP_MAINPLL_D7_D2, "mainpll_d7_d2", "mainpll", 1, 14),
+ FACTOR(CLK_TOP_MAINPLL_D7_D4, "mainpll_d7_d4", "mainpll", 1, 28),
+ FACTOR(CLK_TOP_MAINPLL_D7_D8, "mainpll_d7_d8", "mainpll", 1, 56),
+ FACTOR(CLK_TOP_MAINPLL_D9, "mainpll_d9", "mainpll", 1, 9),
+ FACTOR(CLK_TOP_UNIVPLL_D4, "univpll_d4", "univpll", 1, 4),
+ FACTOR(CLK_TOP_UNIVPLL_D4_D2, "univpll_d4_d2", "univpll", 1, 8),
+ FACTOR(CLK_TOP_UNIVPLL_D4_D4, "univpll_d4_d4", "univpll", 1, 16),
+ FACTOR(CLK_TOP_UNIVPLL_D4_D8, "univpll_d4_d8", "univpll", 1, 32),
+ FACTOR(CLK_TOP_UNIVPLL_D5, "univpll_d5", "univpll", 1, 5),
+ FACTOR(CLK_TOP_UNIVPLL_D5_D2, "univpll_d5_d2", "univpll", 1, 10),
+ FACTOR(CLK_TOP_UNIVPLL_D5_D4, "univpll_d5_d4", "univpll", 1, 20),
+ FACTOR(CLK_TOP_UNIVPLL_D6, "univpll_d6", "univpll", 1, 6),
+ FACTOR(CLK_TOP_UNIVPLL_D6_D2, "univpll_d6_d2", "univpll", 1, 12),
+ FACTOR(CLK_TOP_UNIVPLL_D6_D4, "univpll_d6_d4", "univpll", 1, 24),
+ FACTOR(CLK_TOP_UNIVPLL_D6_D8, "univpll_d6_d8", "univpll", 1, 48),
+ FACTOR(CLK_TOP_UNIVPLL_D6_D16, "univpll_d6_d16", "univpll", 1, 96),
+ FACTOR(CLK_TOP_UNIVPLL_192M, "univpll_192m", "univpll", 1, 13),
+ FACTOR(CLK_TOP_UNIVPLL_192M_D4, "univpll_192m_d4", "univpll", 1, 52),
+ FACTOR(CLK_TOP_UNIVPLL_192M_D8, "univpll_192m_d8", "univpll", 1, 104),
+ FACTOR(CLK_TOP_UNIVPLL_192M_D16, "univpll_192m_d16", "univpll", 1, 208),
+ FACTOR(CLK_TOP_UNIVPLL_192M_D32, "univpll_192m_d32", "univpll", 1, 416),
+ FACTOR(CLK_TOP_UNIVPLL_192M_D10, "univpll_192m_d10", "univpll", 1, 130),
+ FACTOR(CLK_TOP_TVDPLL1_D2, "tvdpll1_d2", "tvdpll1", 1, 2),
+ FACTOR(CLK_TOP_MSDCPLL_D2, "msdcpll_d2", "msdcpll", 1, 2),
+ FACTOR(CLK_TOP_OSC_D2, "osc_d2", "ulposc", 1, 2),
+ FACTOR(CLK_TOP_OSC_D3, "osc_d3", "ulposc", 1, 3),
+ FACTOR(CLK_TOP_OSC_D4, "osc_d4", "ulposc", 1, 4),
+ FACTOR(CLK_TOP_OSC_D5, "osc_d5", "ulposc", 1, 5),
+ FACTOR(CLK_TOP_OSC_D7, "osc_d7", "ulposc", 1, 7),
+ FACTOR(CLK_TOP_OSC_D8, "osc_d8", "ulposc", 1, 8),
+ FACTOR(CLK_TOP_OSC_D10, "osc_d10", "ulposc", 1, 10),
+ FACTOR(CLK_TOP_OSC_D14, "osc_d14", "ulposc", 1, 14),
+ FACTOR(CLK_TOP_OSC_D20, "osc_d20", "ulposc", 1, 20),
+ FACTOR(CLK_TOP_OSC_D32, "osc_d32", "ulposc", 1, 32),
+ FACTOR(CLK_TOP_OSC_D40, "osc_d40", "ulposc", 1, 40),
+};
+
+static const char * const axi_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "osc_d8",
+ "osc_d4",
+ "mainpll_d4_d4",
+ "mainpll_d7_d2"
+};
+
+static const char * const mem_sub_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "osc_d4",
+ "univpll_d4_d4",
+ "osc_d3",
+ "mainpll_d5_d2",
+ "mainpll_d4_d2",
+ "mainpll_d6",
+ "mainpll_d5",
+ "univpll_d5",
+ "mainpll_d4",
+ "mainpll_d3"
+};
+
+static const char * const io_noc_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "osc_d8",
+ "osc_d4",
+ "mainpll_d6_d2",
+ "mainpll_d9"
+};
+
+static const char * const shared_axi_parents[] = {
+ "clk26m",
+ "mainpll_d7_d8",
+ "mainpll_d5_d8",
+ "osc_d8",
+ "mainpll_d7_d4",
+ "mainpll_d5_d4",
+ "mainpll_d4_d4",
+ "mainpll_d7_d2"
+};
+
+static const char * const shared_sub_parents[] = {
+ "clk26m",
+ "mainpll_d5_d8",
+ "mainpll_d5_d4",
+ "osc_d4",
+ "univpll_d4_d4",
+ "mainpll_d5_d2",
+ "mainpll_d4_d2",
+ "mainpll_d6",
+ "mainpll_d5",
+ "univpll_d5",
+ "mainpll_d4"
+};
+
+static const char * const p_noc_parents[] = {
+ "clk26m",
+ "mainpll_d5_d8",
+ "mainpll_d5_d4",
+ "osc_d4",
+ "univpll_d4_d4",
+ "mainpll_d5_d2",
+ "mainpll_d4_d2",
+ "mainpll_d6",
+ "mainpll_d5",
+ "univpll_d5",
+ "mainpll_d4",
+ "mainpll_d3"
+};
+
+static const char * const emi_parents[] = {
+ "clk26m",
+ "osc_d4",
+ "mainpll_d5_d8",
+ "mainpll_d5_d4",
+ "mainpll_d4_d4",
+ "emipll1_ck"
+};
+
+static const char * const ap2conn_host_parents[] = {
+ "clk26m",
+ "mainpll_d7_d4"
+};
+
+static const char * const atb_parents[] = {
+ "clk26m",
+ "mainpll_d5_d2",
+ "mainpll_d4_d2",
+ "mainpll_d6"
+};
+
+static const char * const cirq_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "mainpll_d7_d4"
+};
+
+static const char * const pbus_156m_parents[] = {
+ "clk26m",
+ "mainpll_d7_d2",
+ "osc_d2",
+ "mainpll_d7"
+};
+
+static const char * const efuse_parents[] = {
+ "clk26m",
+ "osc_d20"
+};
+
+static const char * const mcu_l3gic_parents[] = {
+ "clk26m",
+ "osc_d8",
+ "mainpll_d4_d4",
+ "mainpll_d7_d2"
+};
+
+static const char * const mcu_infra_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "mainpll_d7_d2",
+ "mainpll_d5_d2",
+ "mainpll_d4_d2",
+ "mainpll_d9",
+ "mainpll_d6"
+};
+
+static const char * const dsp_parents[] = {
+ "clk26m",
+ "osc_d5",
+ "osc_d4",
+ "osc_d3",
+ "univpll_d6_d2",
+ "osc_d2",
+ "univpll_d5",
+ "osc"
+};
+
+static const char * const mfg_ref_parents[] = {
+ "clk26m",
+ "mainpll_d7_d2"
+};
+
+static const char * const mfg_eb_parents[] = {
+ "clk26m",
+ "mainpll_d7_d2",
+ "mainpll_d6_d2",
+ "mainpll_d5_d2"
+};
+
+static const char * const uart_parents[] = {
+ "clk26m",
+ "univpll_d6_d8",
+ "univpll_d6_d4",
+ "univpll_d6_d2"
+};
+
+static const char * const spi_b_parents[] = {
+ "clk26m",
+ "univpll_d6_d4",
+ "univpll_d5_d4",
+ "mainpll_d4_d4",
+ "univpll_d4_d4",
+ "mainpll_d6_d2",
+ "univpll_192m",
+ "univpll_d6_d2"
+};
+
+static const char * const msdc30_parents[] = {
+ "clk26m",
+ "univpll_d6_d4",
+ "mainpll_d6_d2",
+ "univpll_d6_d2",
+ "msdcpll_d2"
+};
+
+static const char * const disp_pwm_parents[] = {
+ "clk26m",
+ "osc_d32",
+ "osc_d8",
+ "univpll_d6_d4",
+ "univpll_d5_d4",
+ "osc_d4",
+ "mainpll_d4_d4"
+};
+
+static const char * const usb_1p_parents[] = {
+ "clk26m",
+ "univpll_d5_d4"
+};
+
+static const char * const usb_fmcnt_p1_parents[] = {
+ "clk26m",
+ "univpll_192m_d4"
+};
+
+static const char * const i2c_parents[] = {
+ "clk26m",
+ "mainpll_d4_d8",
+ "univpll_d5_d4",
+ "mainpll_d4_d4",
+ "univpll_d5_d2"
+};
+
+static const char * const aes_ufsfde_parents[] = {
+ "clk26m",
+ "mainpll_d4_d4",
+ "univpll_d6_d2",
+ "mainpll_d4_d2",
+ "univpll_d6",
+ "mainpll_d4"
+};
+
+static const char * const ufs_parents[] = {
+ "clk26m",
+ "mainpll_d4_d4",
+ "univpll_d6_d2",
+ "mainpll_d4_d2",
+ "univpll_d6",
+ "mainpll_d5",
+ "univpll_d5"
+};
+
+static const char * const aud_1_parents[] = {
+ "clk26m",
+ "vlp_apll1"
+};
+
+static const char * const aud_2_parents[] = {
+ "clk26m",
+ "vlp_apll2"
+};
+
+static const char * const adsp_parents[] = {
+ "clk26m",
+ "adsppll"
+};
+
+static const char * const adsp_uarthub_b_parents[] = {
+ "clk26m",
+ "univpll_d6_d4",
+ "univpll_d6_d2"
+};
+
+static const char * const dpmaif_main_parents[] = {
+ "clk26m",
+ "univpll_d4_d4",
+ "univpll_d5_d2",
+ "mainpll_d4_d2",
+ "univpll_d4_d2",
+ "mainpll_d6",
+ "univpll_d6",
+ "mainpll_d5",
+ "univpll_d5"
+};
+
+static const char * const pwm_parents[] = {
+ "clk26m",
+ "mainpll_d7_d4",
+ "univpll_d4_d8"
+};
+
+static const char * const mcupm_parents[] = {
+ "clk26m",
+ "mainpll_d7_d2",
+ "mainpll_d6_d2",
+ "univpll_d6_d2",
+ "mainpll_d5_d2"
+};
+
+static const char * const ipseast_parents[] = {
+ "clk26m",
+ "mainpll_d6",
+ "mainpll_d5",
+ "mainpll_d4",
+ "mainpll_d3"
+};
+
+static const char * const tl_parents[] = {
+ "clk26m",
+ "mainpll_d7_d4",
+ "mainpll_d4_d4",
+ "mainpll_d5_d2"
+};
+
+static const char * const md_emi_parents[] = {
+ "clk26m",
+ "mainpll_d4"
+};
+
+static const char * const sdf_parents[] = {
+ "clk26m",
+ "mainpll_d5_d2",
+ "mainpll_d4_d2",
+ "mainpll_d6",
+ "mainpll_d4",
+ "univpll_d4"
+};
+
+static const char * const uarthub_b_parents[] = {
+ "clk26m",
+ "univpll_d6_d4",
+ "univpll_d6_d2"
+};
+
+static const char * const dpsw_cmp_26m_parents[] = {
+ "clk26m",
+ "osc_d20"
+};
+
+static const char * const smapparents[] = {
+ "clk26m",
+ "mainpll_d4_d8"
+};
+
+static const char * const ssr_parents[] = {
+ "clk26m",
+ "mainpll_d4_d4",
+ "mainpll_d4_d2",
+ "mainpll_d7",
+ "mainpll_d6",
+ "mainpll_d5"
+};
+
+static const char * const ssr_kdf_parents[] = {
+ "clk26m",
+ "mainpll_d4_d4",
+ "mainpll_d4_d2",
+ "mainpll_d7"
+};
+
+static const char * const ssr_rng_parents[] = {
+ "clk26m",
+ "mainpll_d4_d4",
+ "mainpll_d5_d2",
+ "mainpll_d4_d2"
+};
+
+static const char * const spu_parents[] = {
+ "clk26m",
+ "mainpll_d4_d4",
+ "mainpll_d4_d2",
+ "mainpll_d7",
+ "mainpll_d6",
+ "mainpll_d5"
+};
+
+static const char * const dxcc_parents[] = {
+ "clk26m",
+ "mainpll_d4_d8",
+ "mainpll_d4_d4",
+ "mainpll_d4_d2"
+};
+
+static const char * const apll_m_parents[] = {
+ "aud_1",
+ "aud_2"
+};
+
+static const char * const sflash_parents[] = {
+ "clk26m",
+ "mainpll_d7_d8",
+ "univpll_d6_d8"
+};
+
+static const struct mtk_mux top_muxes[] = {
+ /* CLK_CFG_0 */
+ MUX_CLR_SET_UPD(CLK_TOP_AXI, "axi",
+ axi_parents, CLK_CFG_0, CLK_CFG_0_SET,
+ CLK_CFG_0_CLR, 0, 3,
+ CLK_CFG_UPDATE, TOP_MUX_AXI_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_MEM_SUB, "mem_sub",
+ mem_sub_parents, CLK_CFG_0, CLK_CFG_0_SET,
+ CLK_CFG_0_CLR, 8, 4,
+ CLK_CFG_UPDATE, TOP_MUX_MEM_SUB_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_IO_NOC, "io_noc",
+ io_noc_parents, CLK_CFG_0, CLK_CFG_0_SET,
+ CLK_CFG_0_CLR, 16, 3,
+ CLK_CFG_UPDATE, TOP_MUX_IO_NOC_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_P_AXI, "p_axi",
+ shared_axi_parents, CLK_CFG_0, CLK_CFG_0_SET,
+ CLK_CFG_0_CLR, 24, 3,
+ CLK_CFG_UPDATE, TOP_MUX_PERI_AXI_SHIFT),
+ /* CLK_CFG_1 */
+ MUX_CLR_SET_UPD(CLK_TOP_UFS_PEXTP0_AXI, "ufs_pextp0_axi",
+ shared_axi_parents, CLK_CFG_1, CLK_CFG_1_SET,
+ CLK_CFG_1_CLR, 0, 3,
+ CLK_CFG_UPDATE, TOP_MUX_UFS_PEXTP0_AXI_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_PEXTP1_USB_AXI, "pextp1_usb_axi",
+ shared_axi_parents, CLK_CFG_1, CLK_CFG_1_SET,
+ CLK_CFG_1_CLR, 8, 3,
+ CLK_CFG_UPDATE, TOP_MUX_PEXTP1_USB_AXI_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_P_FMEM_SUB, "p_fmem_sub",
+ shared_sub_parents, CLK_CFG_1, CLK_CFG_1_SET,
+ CLK_CFG_1_CLR, 16, 4,
+ CLK_CFG_UPDATE, TOP_MUX_PERI_FMEM_SUB_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_PEXPT0_MEM_SUB, "ufs_pexpt0_mem_sub",
+ shared_sub_parents, CLK_CFG_1, CLK_CFG_1_SET,
+ CLK_CFG_1_CLR, 24, 4,
+ CLK_CFG_UPDATE, TOP_MUX_UFS_PEXPT0_MEM_SUB_SHIFT),
+ /* CLK_CFG_2 */
+ MUX_CLR_SET_UPD(CLK_TOP_PEXTP1_USB_MEM_SUB, "pextp1_usb_mem_sub",
+ shared_sub_parents, CLK_CFG_2, CLK_CFG_2_SET,
+ CLK_CFG_2_CLR, 0, 4,
+ CLK_CFG_UPDATE, TOP_MUX_PEXTP1_USB_MEM_SUB_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_P_NOC, "p_noc",
+ p_noc_parents, CLK_CFG_2, CLK_CFG_2_SET,
+ CLK_CFG_2_CLR, 8, 4,
+ CLK_CFG_UPDATE, TOP_MUX_PERI_NOC_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_EMI_N, "emi_n",
+ emi_parents, CLK_CFG_2, CLK_CFG_2_SET,
+ CLK_CFG_2_CLR, 16, 3,
+ CLK_CFG_UPDATE, TOP_MUX_EMI_N_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_EMI_S, "emi_s",
+ emi_parents, CLK_CFG_2, CLK_CFG_2_SET,
+ CLK_CFG_2_CLR, 24, 3,
+ CLK_CFG_UPDATE, TOP_MUX_EMI_S_SHIFT),
+ /* CLK_CFG_3 */
+ MUX_CLR_SET_UPD(CLK_TOP_AP2CONN_HOST, "ap2conn_host",
+ ap2conn_host_parents, CLK_CFG_3, CLK_CFG_3_SET,
+ CLK_CFG_3_CLR, 16, 1,
+ CLK_CFG_UPDATE, TOP_MUX_AP2CONN_HOST_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_ATB, "atb",
+ atb_parents, CLK_CFG_3, CLK_CFG_3_SET,
+ CLK_CFG_3_CLR, 24, 2,
+ CLK_CFG_UPDATE, TOP_MUX_ATB_SHIFT),
+ /* CLK_CFG_4 */
+ MUX_CLR_SET_UPD(CLK_TOP_CIRQ, "cirq",
+ cirq_parents, CLK_CFG_4, CLK_CFG_4_SET,
+ CLK_CFG_4_CLR, 0, 2,
+ CLK_CFG_UPDATE, TOP_MUX_CIRQ_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_PBUS_156M, "pbus_156m",
+ pbus_156m_parents, CLK_CFG_4, CLK_CFG_4_SET,
+ CLK_CFG_4_CLR, 8, 2,
+ CLK_CFG_UPDATE, TOP_MUX_PBUS_156M_SHIFT),
+ /* CLK_CFG_5 */
+ MUX_CLR_SET_UPD(CLK_TOP_EFUSE, "efuse",
+ efuse_parents, CLK_CFG_5, CLK_CFG_5_SET,
+ CLK_CFG_5_CLR, 0, 1,
+ CLK_CFG_UPDATE, TOP_MUX_EFUSE_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_MCL3GIC, "mcu_l3gic",
+ mcu_l3gic_parents, CLK_CFG_5, CLK_CFG_5_SET,
+ CLK_CFG_5_CLR, 8, 2,
+ CLK_CFG_UPDATE, TOP_MUX_MCU_L3GIC_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_MCINFRA, "mcu_infra",
+ mcu_infra_parents, CLK_CFG_5, CLK_CFG_5_SET,
+ CLK_CFG_5_CLR, 16, 3,
+ CLK_CFG_UPDATE, TOP_MUX_MCU_INFRA_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_DSP, "dsp",
+ dsp_parents, CLK_CFG_5, CLK_CFG_5_SET,
+ CLK_CFG_5_CLR, 24, 3,
+ CLK_CFG_UPDATE, TOP_MUX_DSP_SHIFT),
+ /* CLK_CFG_6 */
+ MUX_GATE_FENC_CLR_SET_UPD_FLAGS(CLK_TOP_MFG_REF, "mfg_ref", mfg_ref_parents,
+ NULL, ARRAY_SIZE(mfg_ref_parents),
+ CLK_CFG_6, CLK_CFG_6_SET, CLK_CFG_6_CLR,
+ 0, 1, 7, CLK_CFG_UPDATE, TOP_MUX_MFG_REF_SHIFT,
+ CLK_FENC_STATUS_MON_0, 7, CLK_IGNORE_UNUSED),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MFG_EB, "mfg_eb",
+ mfg_eb_parents, CLK_CFG_6, CLK_CFG_6_SET,
+ CLK_CFG_6_CLR, 16, 2,
+ 23, CLK_CFG_UPDATE, TOP_MUX_MFG_EB_SHIFT),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP_UART, "uart", uart_parents,
+ CLK_CFG_6, CLK_CFG_6_SET, CLK_CFG_6_CLR,
+ HWV_CG_3_DONE, HWV_CG_3_SET, HWV_CG_3_CLR,
+ 24, 2, 31, CLK_CFG_UPDATE, TOP_MUX_UART_SHIFT,
+ CLK_FENC_STATUS_MON_0, 4),
+ /* CLK_CFG_7 */
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP_SPI0_BCLK, "spi0_b", spi_b_parents,
+ CLK_CFG_7, CLK_CFG_7_SET, CLK_CFG_7_CLR,
+ HWV_CG_4_DONE, HWV_CG_4_SET, HWV_CG_4_CLR,
+ 0, 3, 7, CLK_CFG_UPDATE, TOP_MUX_SPI0_BCLK_SHIFT,
+ CLK_FENC_STATUS_MON_0, 3),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP_SPI1_BCLK, "spi1_b", spi_b_parents,
+ CLK_CFG_7, CLK_CFG_7_SET, CLK_CFG_7_CLR,
+ HWV_CG_4_DONE, HWV_CG_4_SET, HWV_CG_4_CLR,
+ 8, 3, 15, CLK_CFG_UPDATE, TOP_MUX_SPI1_BCLK_SHIFT,
+ CLK_FENC_STATUS_MON_0, 2),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP_SPI2_BCLK, "spi2_b", spi_b_parents,
+ CLK_CFG_7, CLK_CFG_7_SET, CLK_CFG_7_CLR,
+ HWV_CG_4_DONE, HWV_CG_4_SET, HWV_CG_4_CLR,
+ 16, 3, 23, CLK_CFG_UPDATE, TOP_MUX_SPI2_BCLK_SHIFT,
+ CLK_FENC_STATUS_MON_0, 1),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP_SPI3_BCLK, "spi3_b", spi_b_parents,
+ CLK_CFG_7, CLK_CFG_7_SET, CLK_CFG_7_CLR,
+ HWV_CG_4_DONE, HWV_CG_4_SET, HWV_CG_4_CLR,
+ 24, 3, 31, CLK_CFG_UPDATE1, TOP_MUX_SPI3_BCLK_SHIFT,
+ CLK_FENC_STATUS_MON_0, 0),
+ /* CLK_CFG_8 */
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP_SPI4_BCLK, "spi4_b", spi_b_parents,
+ CLK_CFG_8, CLK_CFG_8_SET, CLK_CFG_8_CLR,
+ HWV_CG_5_DONE, HWV_CG_5_SET, HWV_CG_5_CLR,
+ 0, 3, 7, CLK_CFG_UPDATE1, TOP_MUX_SPI4_BCLK_SHIFT,
+ CLK_FENC_STATUS_MON_1, 31),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP_SPI5_BCLK, "spi5_b", spi_b_parents,
+ CLK_CFG_8, CLK_CFG_8_SET, CLK_CFG_8_CLR,
+ HWV_CG_5_DONE, HWV_CG_5_SET, HWV_CG_5_CLR,
+ 8, 3, 15, CLK_CFG_UPDATE1, TOP_MUX_SPI5_BCLK_SHIFT,
+ CLK_FENC_STATUS_MON_1, 30),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP_SPI6_BCLK, "spi6_b", spi_b_parents,
+ CLK_CFG_8, CLK_CFG_8_SET, CLK_CFG_8_CLR,
+ HWV_CG_5_DONE, HWV_CG_5_SET, HWV_CG_5_CLR,
+ 16, 3, 23, CLK_CFG_UPDATE1, TOP_MUX_SPI6_BCLK_SHIFT,
+ CLK_FENC_STATUS_MON_1, 29),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP_SPI7_BCLK, "spi7_b", spi_b_parents,
+ CLK_CFG_8, CLK_CFG_8_SET, CLK_CFG_8_CLR,
+ HWV_CG_5_DONE, HWV_CG_5_SET, HWV_CG_5_CLR,
+ 24, 3, 31, CLK_CFG_UPDATE1, TOP_MUX_SPI7_BCLK_SHIFT,
+ CLK_FENC_STATUS_MON_1, 28),
+ /* CLK_CFG_9 */
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_MSDC30_1, "msdc30_1", msdc30_parents,
+ CLK_CFG_9, CLK_CFG_9_SET, CLK_CFG_9_CLR,
+ 16, 3, 23, CLK_CFG_UPDATE1, TOP_MUX_MSDC30_1_SHIFT,
+ CLK_FENC_STATUS_MON_1, 25),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_MSDC30_2, "msdc30_2", msdc30_parents,
+ CLK_CFG_9, CLK_CFG_9_SET, CLK_CFG_9_CLR,
+ 24, 3, 31, CLK_CFG_UPDATE1, TOP_MUX_MSDC30_2_SHIFT,
+ CLK_FENC_STATUS_MON_1, 24),
+ /* CLK_CFG_10 */
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_DISP_PWM, "disp_pwm", disp_pwm_parents,
+ CLK_CFG_10, CLK_CFG_10_SET, CLK_CFG_10_CLR,
+ 0, 3, 7, CLK_CFG_UPDATE1, TOP_MUX_DISP_PWM_SHIFT,
+ CLK_FENC_STATUS_MON_1, 23),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_USB_TOP_1P, "usb_1p", usb_1p_parents,
+ CLK_CFG_10, CLK_CFG_10_SET, CLK_CFG_10_CLR,
+ 8, 1, 15, CLK_CFG_UPDATE1, TOP_MUX_USB_TOP_1P_SHIFT,
+ CLK_FENC_STATUS_MON_1, 22),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_USB_XHCI_1P, "usb_xhci_1p", usb_1p_parents,
+ CLK_CFG_10, CLK_CFG_10_SET, CLK_CFG_10_CLR,
+ 16, 1, 23, CLK_CFG_UPDATE1, TOP_MUX_SSUSB_XHCI_1P_SHIFT,
+ CLK_FENC_STATUS_MON_1, 21),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_USB_FMCNT_P1, "usb_fmcnt_p1", usb_fmcnt_p1_parents,
+ CLK_CFG_10, CLK_CFG_10_SET, CLK_CFG_10_CLR,
+ 24, 1, 31, CLK_CFG_UPDATE1, TOP_MUX_SSUSB_FMCNT_P1_SHIFT,
+ CLK_FENC_STATUS_MON_1, 20),
+ /* CLK_CFG_11 */
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_I2C_P, "i2c_p", i2c_parents,
+ CLK_CFG_11, CLK_CFG_11_SET, CLK_CFG_11_CLR,
+ 0, 3, 7, CLK_CFG_UPDATE1, TOP_MUX_I2C_PERI_SHIFT,
+ CLK_FENC_STATUS_MON_1, 19),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_I2C_EAST, "i2c_east", i2c_parents,
+ CLK_CFG_11, CLK_CFG_11_SET, CLK_CFG_11_CLR,
+ 8, 3, 15, CLK_CFG_UPDATE1, TOP_MUX_I2C_EAST_SHIFT,
+ CLK_FENC_STATUS_MON_1, 18),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_I2C_WEST, "i2c_west", i2c_parents,
+ CLK_CFG_11, CLK_CFG_11_SET, CLK_CFG_11_CLR,
+ 16, 3, 23, CLK_CFG_UPDATE1, TOP_MUX_I2C_WEST_SHIFT,
+ CLK_FENC_STATUS_MON_1, 17),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP_I2C_NORTH, "i2c_north", i2c_parents,
+ CLK_CFG_11, CLK_CFG_11_SET, CLK_CFG_11_CLR,
+ HWV_CG_6_DONE, HWV_CG_6_SET, HWV_CG_6_CLR,
+ 24, 3, 31, CLK_CFG_UPDATE1, TOP_MUX_I2C_NORTH_SHIFT,
+ CLK_FENC_STATUS_MON_1, 16),
+ /* CLK_CFG_12 */
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_AES_UFSFDE, "aes_ufsfde", aes_ufsfde_parents,
+ CLK_CFG_12, CLK_CFG_12_SET, CLK_CFG_12_CLR,
+ 0, 3, 7, CLK_CFG_UPDATE1, TOP_MUX_AES_UFSFDE_SHIFT,
+ CLK_FENC_STATUS_MON_1, 15),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_UFS, "ufs", ufs_parents,
+ CLK_CFG_12, CLK_CFG_12_SET, CLK_CFG_12_CLR,
+ 8, 3, 15, CLK_CFG_UPDATE1, TOP_MUX_UFS_SHIFT,
+ CLK_FENC_STATUS_MON_1, 14),
+ /* CLK_CFG_13 */
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_AUD_1, "aud_1", aud_1_parents,
+ CLK_CFG_13, CLK_CFG_13_SET, CLK_CFG_13_CLR,
+ 0, 1, 7, CLK_CFG_UPDATE1, TOP_MUX_AUD_1_SHIFT,
+ CLK_FENC_STATUS_MON_1, 11),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_AUD_2, "aud_2", aud_2_parents,
+ CLK_CFG_13, CLK_CFG_13_SET, CLK_CFG_13_CLR,
+ 8, 1, 15, CLK_CFG_UPDATE1, TOP_MUX_AUD_2_SHIFT,
+ CLK_FENC_STATUS_MON_1, 10),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_ADSP, "adsp", adsp_parents,
+ CLK_CFG_13, CLK_CFG_13_SET, CLK_CFG_13_CLR,
+ 16, 1, 23, CLK_CFG_UPDATE1, TOP_MUX_ADSP_SHIFT,
+ CLK_FENC_STATUS_MON_1, 9),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_ADSP_UARTHUB_B, "adsp_uarthub_b",
+ adsp_uarthub_b_parents, CLK_CFG_13, CLK_CFG_13_SET,
+ CLK_CFG_13_CLR, 24, 2, 31,
+ CLK_CFG_UPDATE1, TOP_MUX_ADSP_UARTHUB_B_SHIFT),
+ /* CLK_CFG_14 */
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_DPMAIF_MAIN, "dpmaif_main", dpmaif_main_parents,
+ CLK_CFG_14, CLK_CFG_14_SET, CLK_CFG_14_CLR,
+ 0, 4, 7, CLK_CFG_UPDATE1, TOP_MUX_DPMAIF_MAIN_SHIFT,
+ CLK_FENC_STATUS_MON_1, 7),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_PWM, "pwm", pwm_parents,
+ CLK_CFG_14, CLK_CFG_14_SET, CLK_CFG_14_CLR,
+ 8, 2, 15, CLK_CFG_UPDATE1, TOP_MUX_PWM_SHIFT,
+ CLK_FENC_STATUS_MON_1, 6),
+ MUX_CLR_SET_UPD(CLK_TOP_MCUPM, "mcupm",
+ mcupm_parents, CLK_CFG_14, CLK_CFG_14_SET,
+ CLK_CFG_14_CLR, 16, 3,
+ CLK_CFG_UPDATE1, TOP_MUX_MCUPM_SHIFT),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_SFLASH, "sflash", sflash_parents,
+ CLK_CFG_14, CLK_CFG_14_SET, CLK_CFG_14_CLR,
+ 24, 2, 31, CLK_CFG_UPDATE1, TOP_MUX_SFLASH_SHIFT,
+ CLK_FENC_STATUS_MON_1, 4),
+ /* CLK_CFG_15 */
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_IPSEAST, "ipseast", ipseast_parents,
+ CLK_CFG_15, CLK_CFG_15_SET, CLK_CFG_15_CLR,
+ 0, 3, 7, CLK_CFG_UPDATE1, TOP_MUX_IPSEAST_SHIFT,
+ CLK_FENC_STATUS_MON_1, 3),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_TL, "tl", tl_parents,
+ CLK_CFG_15, CLK_CFG_15_SET, CLK_CFG_15_CLR,
+ 16, 2, 23, CLK_CFG_UPDATE2, TOP_MUX_TL_SHIFT,
+ CLK_FENC_STATUS_MON_1, 1),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_TL_P1, "tl_p1", tl_parents,
+ CLK_CFG_15, CLK_CFG_15_SET, CLK_CFG_15_CLR,
+ 24, 2, 31, CLK_CFG_UPDATE2, TOP_MUX_TL_P1_SHIFT,
+ CLK_FENC_STATUS_MON_1, 0),
+ /* CLK_CFG_16 */
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_TL_P2, "tl_p2", tl_parents,
+ CLK_CFG_16, CLK_CFG_16_SET, CLK_CFG_16_CLR,
+ 0, 2, 7, CLK_CFG_UPDATE2, TOP_MUX_TL_P2_SHIFT,
+ CLK_FENC_STATUS_MON_2, 31),
+ MUX_CLR_SET_UPD(CLK_TOP_EMI_INTERFACE_546, "emi_interface_546",
+ md_emi_parents, CLK_CFG_16, CLK_CFG_16_SET,
+ CLK_CFG_16_CLR, 8, 1,
+ CLK_CFG_UPDATE2, TOP_MUX_EMI_INTERFACE_546_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_SDF, "sdf",
+ sdf_parents, CLK_CFG_16, CLK_CFG_16_SET,
+ CLK_CFG_16_CLR, 16, 3,
+ CLK_CFG_UPDATE2, TOP_MUX_SDF_SHIFT),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP_UARTHUB_BCLK, "uarthub_b", uarthub_b_parents,
+ CLK_CFG_16, CLK_CFG_16_SET, CLK_CFG_16_CLR,
+ HWV_CG_7_DONE, HWV_CG_7_SET, HWV_CG_7_CLR,
+ 24, 2, 31, CLK_CFG_UPDATE2, TOP_MUX_UARTHUB_BCLK_SHIFT,
+ CLK_FENC_STATUS_MON_2, 28),
+ /* CLK_CFG_17 */
+ MUX_CLR_SET_UPD(CLK_TOP_DPSW_CMP_26M, "dpsw_cmp_26m",
+ dpsw_cmp_26m_parents, CLK_CFG_17, CLK_CFG_17_SET,
+ CLK_CFG_17_CLR, 0, 1,
+ CLK_CFG_UPDATE2, TOP_MUX_DPSW_CMP_26M_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_SMAP, "smap",
+ smapparents, CLK_CFG_17, CLK_CFG_17_SET,
+ CLK_CFG_17_CLR, 8, 1,
+ CLK_CFG_UPDATE2, TOP_MUX_SMAPCK_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_SSR_PKA, "ssr_pka",
+ ssr_parents, CLK_CFG_17, CLK_CFG_17_SET,
+ CLK_CFG_17_CLR, 16, 3,
+ CLK_CFG_UPDATE2, TOP_MUX_SSR_PKA_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_SSR_DMA, "ssr_dma",
+ ssr_parents, CLK_CFG_17, CLK_CFG_17_SET,
+ CLK_CFG_17_CLR, 24, 3,
+ CLK_CFG_UPDATE2, TOP_MUX_SSR_DMA_SHIFT),
+ /* CLK_CFG_18 */
+ MUX_CLR_SET_UPD(CLK_TOP_SSR_KDF, "ssr_kdf",
+ ssr_kdf_parents, CLK_CFG_18, CLK_CFG_18_SET,
+ CLK_CFG_18_CLR, 0, 2,
+ CLK_CFG_UPDATE2, TOP_MUX_SSR_KDF_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_SSR_RNG, "ssr_rng",
+ ssr_rng_parents, CLK_CFG_18, CLK_CFG_18_SET,
+ CLK_CFG_18_CLR, 8, 2,
+ CLK_CFG_UPDATE2, TOP_MUX_SSR_RNG_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_SPU0, "spu0",
+ spu_parents, CLK_CFG_18, CLK_CFG_18_SET,
+ CLK_CFG_18_CLR, 16, 3,
+ CLK_CFG_UPDATE2, TOP_MUX_SPU0_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_SPU1, "spu1",
+ spu_parents, CLK_CFG_18, CLK_CFG_18_SET,
+ CLK_CFG_18_CLR, 24, 3,
+ CLK_CFG_UPDATE2, TOP_MUX_SPU1_SHIFT),
+ /* CLK_CFG_19 */
+ MUX_CLR_SET_UPD(CLK_TOP_DXCC, "dxcc",
+ dxcc_parents, CLK_CFG_19, CLK_CFG_19_SET,
+ CLK_CFG_19_CLR, 0, 2,
+ CLK_CFG_UPDATE2, TOP_MUX_DXCC_SHIFT),
+};
+
+static const struct mtk_composite top_aud_divs[] = {
+ /* CLK_AUDDIV_2 */
+ MUX_DIV_GATE(CLK_TOP_APLL_I2SIN0, "apll_i2sin0_m", apll_m_parents,
+ CLK_AUDDIV_0, 16, 1, CLK_AUDDIV_2, 0, 8, CLK_AUDDIV_0, 0),
+ MUX_DIV_GATE(CLK_TOP_APLL_I2SIN1, "apll_i2sin1_m", apll_m_parents,
+ CLK_AUDDIV_0, 17, 1, CLK_AUDDIV_2, 8, 8, CLK_AUDDIV_0, 1),
+ MUX_DIV_GATE(CLK_TOP_APLL_I2SIN2, "apll_i2sin2_m", apll_m_parents,
+ CLK_AUDDIV_0, 18, 1, CLK_AUDDIV_2, 16, 8, CLK_AUDDIV_0, 2),
+ MUX_DIV_GATE(CLK_TOP_APLL_I2SIN3, "apll_i2sin3_m", apll_m_parents,
+ CLK_AUDDIV_0, 19, 1, CLK_AUDDIV_2, 24, 8, CLK_AUDDIV_0, 3),
+ /* CLK_AUDDIV_3 */
+ MUX_DIV_GATE(CLK_TOP_APLL_I2SIN4, "apll_i2sin4_m", apll_m_parents,
+ CLK_AUDDIV_0, 20, 1, CLK_AUDDIV_3, 0, 8, CLK_AUDDIV_0, 4),
+ MUX_DIV_GATE(CLK_TOP_APLL_I2SIN6, "apll_i2sin6_m", apll_m_parents,
+ CLK_AUDDIV_0, 21, 1, CLK_AUDDIV_3, 8, 8, CLK_AUDDIV_0, 5),
+ MUX_DIV_GATE(CLK_TOP_APLL_I2SOUT0, "apll_i2sout0_m", apll_m_parents,
+ CLK_AUDDIV_0, 22, 1, CLK_AUDDIV_3, 16, 8, CLK_AUDDIV_0, 6),
+ MUX_DIV_GATE(CLK_TOP_APLL_I2SOUT1, "apll_i2sout1_m", apll_m_parents,
+ CLK_AUDDIV_0, 23, 1, CLK_AUDDIV_3, 24, 8, CLK_AUDDIV_0, 7),
+ /* CLK_AUDDIV_4 */
+ MUX_DIV_GATE(CLK_TOP_APLL_I2SOUT2, "apll_i2sout2_m", apll_m_parents,
+ CLK_AUDDIV_0, 24, 1, CLK_AUDDIV_4, 0, 8, CLK_AUDDIV_0, 8),
+ MUX_DIV_GATE(CLK_TOP_APLL_I2SOUT3, "apll_i2sout3_m", apll_m_parents,
+ CLK_AUDDIV_0, 25, 1, CLK_AUDDIV_4, 8, 8, CLK_AUDDIV_0, 9),
+ MUX_DIV_GATE(CLK_TOP_APLL_I2SOUT4, "apll_i2sout4_m", apll_m_parents,
+ CLK_AUDDIV_0, 26, 1, CLK_AUDDIV_4, 16, 8, CLK_AUDDIV_0, 10),
+ MUX_DIV_GATE(CLK_TOP_APLL_I2SOUT6, "apll_i2sout6_m", apll_m_parents,
+ CLK_AUDDIV_0, 27, 1, CLK_AUDDIV_4, 24, 8, CLK_AUDDIV_0, 11),
+ /* CLK_AUDDIV_5 */
+ MUX_DIV_GATE(CLK_TOP_APLL_FMI2S, "apll_fmi2s_m", apll_m_parents,
+ CLK_AUDDIV_0, 28, 1, CLK_AUDDIV_5, 0, 8, CLK_AUDDIV_0, 12),
+ MUX(CLK_TOP_APLL_TDMOUT, "apll_tdmout_m",
+ apll_m_parents, CLK_AUDDIV_0, 29, 1),
+ DIV_GATE(CLK_TOP_APLL12_DIV_TDMOUT_M, "apll12_div_tdmout_m",
+ "apll_tdmout_m", CLK_AUDDIV_0,
+ 13, CLK_AUDDIV_5, 8, 8),
+ DIV_GATE(CLK_TOP_APLL12_DIV_TDMOUT_B, "apll12_div_tdmout_b",
+ "apll_tdmout_m", CLK_AUDDIV_0,
+ 14, CLK_AUDDIV_5, 8, 16),
+};
+
+static const struct mtk_clk_desc topck_desc = {
+ .factor_clks = top_divs,
+ .num_factor_clks = ARRAY_SIZE(top_divs),
+ .mux_clks = top_muxes,
+ .num_mux_clks = ARRAY_SIZE(top_muxes),
+ .composite_clks = top_aud_divs,
+ .num_composite_clks = ARRAY_SIZE(top_aud_divs)
+};
+
+static const struct of_device_id of_match_clk_mt8196_ck[] = {
+ { .compatible = "mediatek,mt8196-topckgen", .data = &topck_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8196_ck);
+
+static struct platform_driver clk_mt8196_topck_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8196-topck",
+ .of_match_table = of_match_clk_mt8196_ck,
+ },
+};
+
+MODULE_DESCRIPTION("MediaTek MT8196 top clock generators driver");
+module_platform_driver(clk_mt8196_topck_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-topckgen2.c b/drivers/clk/mediatek/clk-mt8196-topckgen2.c
new file mode 100644
index 000000000000..6df93d7fbf91
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-topckgen2.c
@@ -0,0 +1,568 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-mux.h"
+
+/* MUX SEL REG */
+#define CKSYS2_CLK_CFG_UPDATE 0x0004
+#define CKSYS2_CLK_CFG_0 0x0010
+#define CKSYS2_CLK_CFG_0_SET 0x0014
+#define CKSYS2_CLK_CFG_0_CLR 0x0018
+#define CKSYS2_CLK_CFG_1 0x0020
+#define CKSYS2_CLK_CFG_1_SET 0x0024
+#define CKSYS2_CLK_CFG_1_CLR 0x0028
+#define CKSYS2_CLK_CFG_2 0x0030
+#define CKSYS2_CLK_CFG_2_SET 0x0034
+#define CKSYS2_CLK_CFG_2_CLR 0x0038
+#define CKSYS2_CLK_CFG_3 0x0040
+#define CKSYS2_CLK_CFG_3_SET 0x0044
+#define CKSYS2_CLK_CFG_3_CLR 0x0048
+#define CKSYS2_CLK_CFG_4 0x0050
+#define CKSYS2_CLK_CFG_4_SET 0x0054
+#define CKSYS2_CLK_CFG_4_CLR 0x0058
+#define CKSYS2_CLK_CFG_5 0x0060
+#define CKSYS2_CLK_CFG_5_SET 0x0064
+#define CKSYS2_CLK_CFG_5_CLR 0x0068
+#define CKSYS2_CLK_CFG_6 0x0070
+#define CKSYS2_CLK_CFG_6_SET 0x0074
+#define CKSYS2_CLK_CFG_6_CLR 0x0078
+#define CKSYS2_CLK_FENC_STATUS_MON_0 0x0174
+
+/* MUX SHIFT */
+#define TOP_MUX_SENINF0_SHIFT 0
+#define TOP_MUX_SENINF1_SHIFT 1
+#define TOP_MUX_SENINF2_SHIFT 2
+#define TOP_MUX_SENINF3_SHIFT 3
+#define TOP_MUX_SENINF4_SHIFT 4
+#define TOP_MUX_SENINF5_SHIFT 5
+#define TOP_MUX_IMG1_SHIFT 6
+#define TOP_MUX_IPE_SHIFT 7
+#define TOP_MUX_CAM_SHIFT 8
+#define TOP_MUX_CAMTM_SHIFT 9
+#define TOP_MUX_DPE_SHIFT 10
+#define TOP_MUX_VDEC_SHIFT 11
+#define TOP_MUX_CCUSYS_SHIFT 12
+#define TOP_MUX_CCUTM_SHIFT 13
+#define TOP_MUX_VENC_SHIFT 14
+#define TOP_MUX_DVO_SHIFT 15
+#define TOP_MUX_DVO_FAVT_SHIFT 16
+#define TOP_MUX_DP1_SHIFT 17
+#define TOP_MUX_DP0_SHIFT 18
+#define TOP_MUX_DISP_SHIFT 19
+#define TOP_MUX_MDP_SHIFT 20
+#define TOP_MUX_MMINFRA_SHIFT 21
+#define TOP_MUX_MMINFRA_SNOC_SHIFT 22
+#define TOP_MUX_MMUP_SHIFT 23
+#define TOP_MUX_MMINFRA_AO_SHIFT 26
+
+/* HW Voter REG */
+#define HWV_CG_30_SET 0x0058
+#define HWV_CG_30_CLR 0x005c
+#define HWV_CG_30_DONE 0x2c2c
+
+#define MM_HWV_CG_30_SET 0x00f0
+#define MM_HWV_CG_30_CLR 0x00f4
+#define MM_HWV_CG_30_DONE 0x2c78
+#define MM_HWV_CG_31_SET 0x00f8
+#define MM_HWV_CG_31_CLR 0x00fc
+#define MM_HWV_CG_31_DONE 0x2c7c
+#define MM_HWV_CG_32_SET 0x0100
+#define MM_HWV_CG_32_CLR 0x0104
+#define MM_HWV_CG_32_DONE 0x2c80
+#define MM_HWV_CG_33_SET 0x0108
+#define MM_HWV_CG_33_CLR 0x010c
+#define MM_HWV_CG_33_DONE 0x2c84
+#define MM_HWV_CG_34_SET 0x0110
+#define MM_HWV_CG_34_CLR 0x0114
+#define MM_HWV_CG_34_DONE 0x2c88
+#define MM_HWV_CG_35_SET 0x0118
+#define MM_HWV_CG_35_CLR 0x011c
+#define MM_HWV_CG_35_DONE 0x2c8c
+#define MM_HWV_CG_36_SET 0x0120
+#define MM_HWV_CG_36_CLR 0x0124
+#define MM_HWV_CG_36_DONE 0x2c90
+#define MM_HWV_MUX_UPDATE_31_0 0x0240
+
+static const struct mtk_fixed_factor top_divs[] = {
+ FACTOR(CLK_TOP2_MAINPLL2_D2, "mainpll2_d2", "mainpll2", 1, 2),
+ FACTOR(CLK_TOP2_MAINPLL2_D3, "mainpll2_d3", "mainpll2", 1, 3),
+ FACTOR(CLK_TOP2_MAINPLL2_D4, "mainpll2_d4", "mainpll2", 1, 4),
+ FACTOR(CLK_TOP2_MAINPLL2_D4_D2, "mainpll2_d4_d2", "mainpll2", 1, 8),
+ FACTOR(CLK_TOP2_MAINPLL2_D4_D4, "mainpll2_d4_d4", "mainpll2", 1, 16),
+ FACTOR(CLK_TOP2_MAINPLL2_D5, "mainpll2_d5", "mainpll2", 1, 5),
+ FACTOR(CLK_TOP2_MAINPLL2_D5_D2, "mainpll2_d5_d2", "mainpll2", 1, 10),
+ FACTOR(CLK_TOP2_MAINPLL2_D6, "mainpll2_d6", "mainpll2", 1, 6),
+ FACTOR(CLK_TOP2_MAINPLL2_D6_D2, "mainpll2_d6_d2", "mainpll2", 1, 12),
+ FACTOR(CLK_TOP2_MAINPLL2_D7, "mainpll2_d7", "mainpll2", 1, 7),
+ FACTOR(CLK_TOP2_MAINPLL2_D7_D2, "mainpll2_d7_d2", "mainpll2", 1, 14),
+ FACTOR(CLK_TOP2_MAINPLL2_D9, "mainpll2_d9", "mainpll2", 1, 9),
+ FACTOR(CLK_TOP2_UNIVPLL2_D3, "univpll2_d3", "univpll2", 1, 3),
+ FACTOR(CLK_TOP2_UNIVPLL2_D4, "univpll2_d4", "univpll2", 1, 4),
+ FACTOR(CLK_TOP2_UNIVPLL2_D4_D2, "univpll2_d4_d2", "univpll2", 1, 8),
+ FACTOR(CLK_TOP2_UNIVPLL2_D5, "univpll2_d5", "univpll2", 1, 5),
+ FACTOR(CLK_TOP2_UNIVPLL2_D5_D2, "univpll2_d5_d2", "univpll2", 1, 10),
+ FACTOR(CLK_TOP2_UNIVPLL2_D6, "univpll2_d6", "univpll2", 1, 6),
+ FACTOR(CLK_TOP2_UNIVPLL2_D6_D2, "univpll2_d6_d2", "univpll2", 1, 12),
+ FACTOR(CLK_TOP2_UNIVPLL2_D6_D4, "univpll2_d6_d4", "univpll2", 1, 24),
+ FACTOR(CLK_TOP2_UNIVPLL2_D7, "univpll2_d7", "univpll2", 1, 7),
+ FACTOR(CLK_TOP2_IMGPLL_D2, "imgpll_d2", "imgpll", 1, 2),
+ FACTOR(CLK_TOP2_IMGPLL_D4, "imgpll_d4", "imgpll", 1, 4),
+ FACTOR(CLK_TOP2_IMGPLL_D5, "imgpll_d5", "imgpll", 1, 5),
+ FACTOR(CLK_TOP2_IMGPLL_D5_D2, "imgpll_d5_d2", "imgpll", 1, 10),
+ FACTOR(CLK_TOP2_MMPLL2_D3, "mmpll2_d3", "mmpll2", 1, 3),
+ FACTOR(CLK_TOP2_MMPLL2_D4, "mmpll2_d4", "mmpll2", 1, 4),
+ FACTOR(CLK_TOP2_MMPLL2_D4_D2, "mmpll2_d4_d2", "mmpll2", 1, 8),
+ FACTOR(CLK_TOP2_MMPLL2_D5, "mmpll2_d5", "mmpll2", 1, 5),
+ FACTOR(CLK_TOP2_MMPLL2_D5_D2, "mmpll2_d5_d2", "mmpll2", 1, 10),
+ FACTOR(CLK_TOP2_MMPLL2_D6, "mmpll2_d6", "mmpll2", 1, 6),
+ FACTOR(CLK_TOP2_MMPLL2_D6_D2, "mmpll2_d6_d2", "mmpll2", 1, 12),
+ FACTOR(CLK_TOP2_MMPLL2_D7, "mmpll2_d7", "mmpll2", 1, 7),
+ FACTOR(CLK_TOP2_MMPLL2_D9, "mmpll2_d9", "mmpll2", 1, 9),
+ FACTOR(CLK_TOP2_TVDPLL1_D4, "tvdpll1_d4", "tvdpll1", 1, 4),
+ FACTOR(CLK_TOP2_TVDPLL1_D8, "tvdpll1_d8", "tvdpll1", 1, 8),
+ FACTOR(CLK_TOP2_TVDPLL1_D16, "tvdpll1_d16", "tvdpll1", 1, 16),
+ FACTOR(CLK_TOP2_TVDPLL2_D2, "tvdpll2_d2", "tvdpll2", 1, 2),
+ FACTOR(CLK_TOP2_TVDPLL2_D4, "tvdpll2_d4", "tvdpll2", 1, 4),
+ FACTOR(CLK_TOP2_TVDPLL2_D8, "tvdpll2_d8", "tvdpll2", 1, 8),
+ FACTOR(CLK_TOP2_TVDPLL2_D16, "tvdpll2_d16", "tvdpll2", 92, 1473),
+ FACTOR(CLK_TOP2_TVDPLL3_D2, "tvdpll3_d2", "tvdpll3", 1, 2),
+ FACTOR(CLK_TOP2_TVDPLL3_D4, "tvdpll3_d4", "tvdpll3", 1, 4),
+ FACTOR(CLK_TOP2_TVDPLL3_D8, "tvdpll3_d8", "tvdpll3", 1, 8),
+ FACTOR(CLK_TOP2_TVDPLL3_D16, "tvdpll3_d16", "tvdpll3", 92, 1473),
+};
+
+static const char * const seninf_parents[] = {
+ "clk26m",
+ "ck_osc_d10",
+ "ck_osc_d8",
+ "ck_osc_d5",
+ "ck_osc_d4",
+ "univpll2_d6_d2",
+ "mainpll2_d9",
+ "ck_osc_d2",
+ "mainpll2_d4_d2",
+ "univpll2_d4_d2",
+ "mmpll2_d4_d2",
+ "univpll2_d7",
+ "mainpll2_d6",
+ "mmpll2_d7",
+ "univpll2_d6",
+ "univpll2_d5"
+};
+
+static const char * const img1_parents[] = {
+ "clk26m",
+ "ck_osc_d4",
+ "ck_osc_d3",
+ "mmpll2_d6_d2",
+ "ck_osc_d2",
+ "imgpll_d5_d2",
+ "mmpll2_d5_d2",
+ "univpll2_d4_d2",
+ "mmpll2_d4_d2",
+ "mmpll2_d7",
+ "univpll2_d6",
+ "mmpll2_d6",
+ "univpll2_d5",
+ "mmpll2_d5",
+ "univpll2_d4",
+ "imgpll_d4"
+};
+
+static const char * const ipe_parents[] = {
+ "clk26m",
+ "ck_osc_d4",
+ "ck_osc_d3",
+ "ck_osc_d2",
+ "univpll2_d6",
+ "mmpll2_d6",
+ "univpll2_d5",
+ "imgpll_d5",
+ "ck_mainpll_d4",
+ "mmpll2_d5",
+ "imgpll_d4"
+};
+
+static const char * const cam_parents[] = {
+ "clk26m",
+ "ck_osc_d10",
+ "ck_osc_d4",
+ "ck_osc_d3",
+ "ck_osc_d2",
+ "mmpll2_d5_d2",
+ "univpll2_d4_d2",
+ "univpll2_d7",
+ "mmpll2_d7",
+ "univpll2_d6",
+ "mmpll2_d6",
+ "univpll2_d5",
+ "mmpll2_d5",
+ "univpll2_d4",
+ "imgpll_d4",
+ "mmpll2_d4"
+};
+
+static const char * const camtm_parents[] = {
+ "clk26m",
+ "univpll2_d6_d4",
+ "ck_osc_d4",
+ "ck_osc_d3",
+ "univpll2_d6_d2"
+};
+
+static const char * const dpe_parents[] = {
+ "clk26m",
+ "mmpll2_d5_d2",
+ "univpll2_d4_d2",
+ "mmpll2_d7",
+ "univpll2_d6",
+ "mmpll2_d6",
+ "univpll2_d5",
+ "mmpll2_d5",
+ "imgpll_d4",
+ "mmpll2_d4"
+};
+
+static const char * const vdec_parents[] = {
+ "clk26m",
+ "ck_mainpll_d5_d2",
+ "mainpll2_d4_d4",
+ "mainpll2_d7_d2",
+ "mainpll2_d6_d2",
+ "mainpll2_d5_d2",
+ "mainpll2_d9",
+ "mainpll2_d4_d2",
+ "mainpll2_d7",
+ "mainpll2_d6",
+ "univpll2_d6",
+ "mainpll2_d5",
+ "mainpll2_d4",
+ "imgpll_d2"
+};
+
+static const char * const ccusys_parents[] = {
+ "clk26m",
+ "ck_osc_d4",
+ "ck_osc_d3",
+ "ck_osc_d2",
+ "mmpll2_d5_d2",
+ "univpll2_d4_d2",
+ "mmpll2_d7",
+ "univpll2_d6",
+ "mmpll2_d6",
+ "univpll2_d5",
+ "mainpll2_d4",
+ "mainpll2_d3",
+ "univpll2_d3"
+};
+
+static const char * const ccutm_parents[] = {
+ "clk26m",
+ "univpll2_d6_d4",
+ "ck_osc_d4",
+ "ck_osc_d3",
+ "univpll2_d6_d2"
+};
+
+static const char * const venc_parents[] = {
+ "clk26m",
+ "mainpll2_d5_d2",
+ "univpll2_d5_d2",
+ "mainpll2_d4_d2",
+ "mmpll2_d9",
+ "univpll2_d4_d2",
+ "mmpll2_d4_d2",
+ "mainpll2_d6",
+ "univpll2_d6",
+ "mainpll2_d5",
+ "mmpll2_d6",
+ "univpll2_d5",
+ "mainpll2_d4",
+ "univpll2_d4",
+ "univpll2_d3"
+};
+
+static const char * const dp1_parents[] = {
+ "clk26m",
+ "tvdpll2_d16",
+ "tvdpll2_d8",
+ "tvdpll2_d4",
+ "tvdpll2_d2"
+};
+
+static const char * const dp0_parents[] = {
+ "clk26m",
+ "tvdpll1_d16",
+ "tvdpll1_d8",
+ "tvdpll1_d4",
+ "ck_tvdpll1_d2"
+};
+
+static const char * const disp_parents[] = {
+ "clk26m",
+ "ck_mainpll_d5_d2",
+ "ck_mainpll_d4_d2",
+ "ck_mainpll_d6",
+ "mainpll2_d5",
+ "mmpll2_d6",
+ "mainpll2_d4",
+ "univpll2_d4",
+ "mainpll2_d3"
+};
+
+static const char * const mdp_parents[] = {
+ "clk26m",
+ "ck_mainpll_d5_d2",
+ "mainpll2_d5_d2",
+ "mmpll2_d6_d2",
+ "mainpll2_d9",
+ "mainpll2_d4_d2",
+ "mainpll2_d7",
+ "mainpll2_d6",
+ "mainpll2_d5",
+ "mmpll2_d6",
+ "mainpll2_d4",
+ "univpll2_d4",
+ "mainpll2_d3"
+};
+
+static const char * const mminfra_parents[] = {
+ "clk26m",
+ "ck_osc_d4",
+ "ck_mainpll_d7_d2",
+ "ck_mainpll_d5_d2",
+ "ck_mainpll_d9",
+ "mmpll2_d6_d2",
+ "mainpll2_d4_d2",
+ "ck_mainpll_d6",
+ "univpll2_d6",
+ "mainpll2_d5",
+ "mmpll2_d6",
+ "univpll2_d5",
+ "mainpll2_d4",
+ "univpll2_d4",
+ "mainpll2_d3",
+ "univpll2_d3"
+};
+
+static const char * const mminfra_snoc_parents[] = {
+ "clk26m",
+ "ck_osc_d4",
+ "ck_mainpll_d7_d2",
+ "ck_mainpll_d9",
+ "ck_mainpll_d7",
+ "ck_mainpll_d6",
+ "mmpll2_d4_d2",
+ "ck_mainpll_d5",
+ "ck_mainpll_d4",
+ "univpll2_d4",
+ "mmpll2_d4",
+ "mainpll2_d3",
+ "univpll2_d3",
+ "mmpll2_d3",
+ "mainpll2_d2"
+};
+
+static const char * const mmup_parents[] = {
+ "clk26m",
+ "mainpll2_d6",
+ "mainpll2_d5",
+ "ck_osc_d2",
+ "ck_osc",
+ "ck_mainpll_d4",
+ "univpll2_d4",
+ "mainpll2_d3"
+};
+
+static const char * const mminfra_ao_parents[] = {
+ "clk26m",
+ "ck_osc_d4",
+ "ck_mainpll_d3"
+};
+
+static const char * const dvo_parents[] = {
+ "clk26m",
+ "tvdpll3_d16",
+ "tvdpll3_d8",
+ "tvdpll3_d4",
+ "tvdpll3_d2"
+};
+
+static const char * const dvo_favt_parents[] = {
+ "clk26m",
+ "tvdpll3_d16",
+ "tvdpll3_d8",
+ "tvdpll3_d4",
+ "vlp_apll1",
+ "vlp_apll2",
+ "tvdpll3_d2"
+};
+
+static const struct mtk_mux top_muxes[] = {
+ /* CKSYS2_CLK_CFG_0 */
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_SENINF0, "seninf0", seninf_parents,
+ CKSYS2_CLK_CFG_0, CKSYS2_CLK_CFG_0_SET, CKSYS2_CLK_CFG_0_CLR,
+ MM_HWV_CG_30_DONE, MM_HWV_CG_30_SET, MM_HWV_CG_30_CLR,
+ 0, 4, 7, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_SENINF0_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 31),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_SENINF1, "seninf1", seninf_parents,
+ CKSYS2_CLK_CFG_0, CKSYS2_CLK_CFG_0_SET, CKSYS2_CLK_CFG_0_CLR,
+ MM_HWV_CG_30_DONE, MM_HWV_CG_30_SET, MM_HWV_CG_30_CLR,
+ 8, 4, 15, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_SENINF1_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 30),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_SENINF2, "seninf2", seninf_parents,
+ CKSYS2_CLK_CFG_0, CKSYS2_CLK_CFG_0_SET, CKSYS2_CLK_CFG_0_CLR,
+ MM_HWV_CG_30_DONE, MM_HWV_CG_30_SET, MM_HWV_CG_30_CLR,
+ 16, 4, 23, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_SENINF2_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 29),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_SENINF3, "seninf3", seninf_parents,
+ CKSYS2_CLK_CFG_0, CKSYS2_CLK_CFG_0_SET, CKSYS2_CLK_CFG_0_CLR,
+ MM_HWV_CG_30_DONE, MM_HWV_CG_30_SET, MM_HWV_CG_30_CLR,
+ 24, 4, 31, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_SENINF3_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 28),
+ /* CKSYS2_CLK_CFG_1 */
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_SENINF4, "seninf4", seninf_parents,
+ CKSYS2_CLK_CFG_1, CKSYS2_CLK_CFG_1_SET, CKSYS2_CLK_CFG_1_CLR,
+ MM_HWV_CG_31_DONE, MM_HWV_CG_31_SET, MM_HWV_CG_31_CLR,
+ 0, 4, 7, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_SENINF4_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 27),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_SENINF5, "seninf5", seninf_parents,
+ CKSYS2_CLK_CFG_1, CKSYS2_CLK_CFG_1_SET, CKSYS2_CLK_CFG_1_CLR,
+ MM_HWV_CG_31_DONE, MM_HWV_CG_31_SET, MM_HWV_CG_31_CLR,
+ 8, 4, 15, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_SENINF5_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 26),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_IMG1, "img1", img1_parents,
+ CKSYS2_CLK_CFG_1, CKSYS2_CLK_CFG_1_SET, CKSYS2_CLK_CFG_1_CLR,
+ MM_HWV_CG_31_DONE, MM_HWV_CG_31_SET, MM_HWV_CG_31_CLR,
+ 16, 4, 23, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_IMG1_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 25),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_IPE, "ipe", ipe_parents,
+ CKSYS2_CLK_CFG_1, CKSYS2_CLK_CFG_1_SET, CKSYS2_CLK_CFG_1_CLR,
+ MM_HWV_CG_31_DONE, MM_HWV_CG_31_SET, MM_HWV_CG_31_CLR,
+ 24, 4, 31, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_IPE_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 24),
+ /* CKSYS2_CLK_CFG_2 */
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_CAM, "cam", cam_parents,
+ CKSYS2_CLK_CFG_2, CKSYS2_CLK_CFG_2_SET, CKSYS2_CLK_CFG_2_CLR,
+ MM_HWV_CG_32_DONE, MM_HWV_CG_32_SET, MM_HWV_CG_32_CLR,
+ 0, 4, 7, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_CAM_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 23),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_CAMTM, "camtm", camtm_parents,
+ CKSYS2_CLK_CFG_2, CKSYS2_CLK_CFG_2_SET, CKSYS2_CLK_CFG_2_CLR,
+ MM_HWV_CG_32_DONE, MM_HWV_CG_32_SET, MM_HWV_CG_32_CLR,
+ 8, 3, 15, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_CAMTM_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 22),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_DPE, "dpe", dpe_parents,
+ CKSYS2_CLK_CFG_2, CKSYS2_CLK_CFG_2_SET, CKSYS2_CLK_CFG_2_CLR,
+ MM_HWV_CG_32_DONE, MM_HWV_CG_32_SET, MM_HWV_CG_32_CLR,
+ 16, 4, 23, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_DPE_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 21),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_VDEC, "vdec", vdec_parents,
+ CKSYS2_CLK_CFG_2, CKSYS2_CLK_CFG_2_SET, CKSYS2_CLK_CFG_2_CLR,
+ MM_HWV_CG_32_DONE, MM_HWV_CG_32_SET, MM_HWV_CG_32_CLR,
+ 24, 4, 31, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_VDEC_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 20),
+ /* CKSYS2_CLK_CFG_3 */
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_CCUSYS, "ccusys", ccusys_parents,
+ CKSYS2_CLK_CFG_3, CKSYS2_CLK_CFG_3_SET, CKSYS2_CLK_CFG_3_CLR,
+ MM_HWV_CG_33_DONE, MM_HWV_CG_33_SET, MM_HWV_CG_33_CLR,
+ 0, 4, 7, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_CCUSYS_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 19),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_CCUTM, "ccutm", ccutm_parents,
+ CKSYS2_CLK_CFG_3, CKSYS2_CLK_CFG_3_SET, CKSYS2_CLK_CFG_3_CLR,
+ MM_HWV_CG_33_DONE, MM_HWV_CG_33_SET, MM_HWV_CG_33_CLR,
+ 8, 3, 15, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_CCUTM_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 18),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_VENC, "venc", venc_parents,
+ CKSYS2_CLK_CFG_3, CKSYS2_CLK_CFG_3_SET, CKSYS2_CLK_CFG_3_CLR,
+ MM_HWV_CG_33_DONE, MM_HWV_CG_33_SET, MM_HWV_CG_33_CLR,
+ 16, 4, 23, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_VENC_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 17),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP2_DVO, "dvo", dvo_parents,
+ CKSYS2_CLK_CFG_3, CKSYS2_CLK_CFG_3_SET, CKSYS2_CLK_CFG_3_CLR,
+ 24, 3, 31, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_DVO_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 16),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP2_DVO_FAVT, "dvo_favt", dvo_favt_parents,
+ CKSYS2_CLK_CFG_4, CKSYS2_CLK_CFG_4_SET, CKSYS2_CLK_CFG_4_CLR,
+ 0, 3, 7, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_DVO_FAVT_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 15),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP2_DP1, "dp1", dp1_parents,
+ CKSYS2_CLK_CFG_4, CKSYS2_CLK_CFG_4_SET, CKSYS2_CLK_CFG_4_CLR,
+ 8, 3, 15, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_DP1_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 14),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP2_DP0, "dp0", dp0_parents,
+ CKSYS2_CLK_CFG_4, CKSYS2_CLK_CFG_4_SET, CKSYS2_CLK_CFG_4_CLR,
+ 16, 3, 23, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_DP0_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 13),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_DISP, "disp", disp_parents,
+ CKSYS2_CLK_CFG_4, CKSYS2_CLK_CFG_4_SET, CKSYS2_CLK_CFG_4_CLR,
+ MM_HWV_CG_34_DONE, MM_HWV_CG_34_SET, MM_HWV_CG_34_CLR,
+ 24, 4, 31, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_DISP_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 12),
+ /* CKSYS2_CLK_CFG_5 */
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_MDP, "mdp", mdp_parents,
+ CKSYS2_CLK_CFG_5, CKSYS2_CLK_CFG_5_SET, CKSYS2_CLK_CFG_5_CLR,
+ MM_HWV_CG_35_DONE, MM_HWV_CG_35_SET, MM_HWV_CG_35_CLR,
+ 0, 4, 7, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_MDP_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 11),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_MMINFRA, "mminfra", mminfra_parents,
+ CKSYS2_CLK_CFG_5, CKSYS2_CLK_CFG_5_SET, CKSYS2_CLK_CFG_5_CLR,
+ MM_HWV_CG_35_DONE, MM_HWV_CG_35_SET, MM_HWV_CG_35_CLR,
+ 8, 4, 15, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_MMINFRA_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 10),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_MMINFRA_SNOC, "mminfra_snoc", mminfra_snoc_parents,
+ CKSYS2_CLK_CFG_5, CKSYS2_CLK_CFG_5_SET, CKSYS2_CLK_CFG_5_CLR,
+ MM_HWV_CG_35_DONE, MM_HWV_CG_35_SET, MM_HWV_CG_35_CLR,
+ 16, 4, 23, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_MMINFRA_SNOC_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 9),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP2_MMUP, "mmup", mmup_parents,
+ CKSYS2_CLK_CFG_5, CKSYS2_CLK_CFG_5_SET, CKSYS2_CLK_CFG_5_CLR,
+ 24, 3, 31, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_MMUP_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 8),
+ /* CKSYS2_CLK_CFG_6 */
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_MMINFRA_AO, "mminfra_ao", mminfra_ao_parents,
+ CKSYS2_CLK_CFG_6, CKSYS2_CLK_CFG_6_SET, CKSYS2_CLK_CFG_6_CLR,
+ MM_HWV_CG_36_DONE, MM_HWV_CG_36_SET, MM_HWV_CG_36_CLR,
+ 16, 2, 7, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_MMINFRA_AO_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 5),
+};
+
+static const struct mtk_clk_desc topck_desc = {
+ .factor_clks = top_divs,
+ .num_factor_clks = ARRAY_SIZE(top_divs),
+ .mux_clks = top_muxes,
+ .num_mux_clks = ARRAY_SIZE(top_muxes),
+};
+
+static const struct of_device_id of_match_clk_mt8196_ck[] = {
+ { .compatible = "mediatek,mt8196-topckgen-gp2", .data = &topck_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8196_ck);
+
+static struct platform_driver clk_mt8196_topck_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8196-topck2",
+ .of_match_table = of_match_clk_mt8196_ck,
+ },
+};
+
+MODULE_DESCRIPTION("MediaTek MT8196 GP2 top clock generators driver");
+module_platform_driver(clk_mt8196_topck_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-ufs_ao.c b/drivers/clk/mediatek/clk-mt8196-ufs_ao.c
new file mode 100644
index 000000000000..0c04717b7b4b
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-ufs_ao.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+#include <dt-bindings/reset/mediatek,mt8196-resets.h>
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+#define MT8196_UFSAO_RST0_SET_OFFSET 0x48
+#define MT8196_UFSAO_RST1_SET_OFFSET 0x148
+
+static const struct mtk_gate_regs ufsao0_cg_regs = {
+ .set_ofs = 0x108,
+ .clr_ofs = 0x10c,
+ .sta_ofs = 0x104,
+};
+
+static const struct mtk_gate_regs ufsao1_cg_regs = {
+ .set_ofs = 0x8,
+ .clr_ofs = 0xc,
+ .sta_ofs = 0x4,
+};
+
+#define GATE_UFSAO0(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ufsao0_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_UFSAO1(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ufsao1_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+static const struct mtk_gate ufsao_clks[] = {
+ /* UFSAO0 */
+ GATE_UFSAO0(CLK_UFSAO_UFSHCI_UFS, "ufsao_ufshci_ufs", "ufs", 0),
+ GATE_UFSAO0(CLK_UFSAO_UFSHCI_AES, "ufsao_ufshci_aes", "aes_ufsfde", 1),
+ /* UFSAO1 */
+ GATE_UFSAO1(CLK_UFSAO_UNIPRO_TX_SYM, "ufsao_unipro_tx_sym", "clk26m", 0),
+ GATE_UFSAO1(CLK_UFSAO_UNIPRO_RX_SYM0, "ufsao_unipro_rx_sym0", "clk26m", 1),
+ GATE_UFSAO1(CLK_UFSAO_UNIPRO_RX_SYM1, "ufsao_unipro_rx_sym1", "clk26m", 2),
+ GATE_UFSAO1(CLK_UFSAO_UNIPRO_SYS, "ufsao_unipro_sys", "ufs", 3),
+ GATE_UFSAO1(CLK_UFSAO_UNIPRO_SAP, "ufsao_unipro_sap", "clk26m", 4),
+ GATE_UFSAO1(CLK_UFSAO_PHY_SAP, "ufsao_phy_sap", "clk26m", 8),
+};
+
+static u16 ufsao_rst_ofs[] = {
+ MT8196_UFSAO_RST0_SET_OFFSET,
+ MT8196_UFSAO_RST1_SET_OFFSET
+};
+
+static u16 ufsao_rst_idx_map[] = {
+ [MT8196_UFSAO_RST0_UFS_MPHY] = 8,
+ [MT8196_UFSAO_RST1_UFS_UNIPRO] = 1 * RST_NR_PER_BANK + 0,
+ [MT8196_UFSAO_RST1_UFS_CRYPTO] = 1 * RST_NR_PER_BANK + 1,
+ [MT8196_UFSAO_RST1_UFSHCI] = 1 * RST_NR_PER_BANK + 2,
+};
+
+static const struct mtk_clk_rst_desc ufsao_rst_desc = {
+ .version = MTK_RST_SET_CLR,
+ .rst_bank_ofs = ufsao_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(ufsao_rst_ofs),
+ .rst_idx_map = ufsao_rst_idx_map,
+ .rst_idx_map_nr = ARRAY_SIZE(ufsao_rst_idx_map),
+};
+
+static const struct mtk_clk_desc ufsao_mcd = {
+ .clks = ufsao_clks,
+ .num_clks = ARRAY_SIZE(ufsao_clks),
+ .rst_desc = &ufsao_rst_desc,
+};
+
+static const struct of_device_id of_match_clk_mt8196_ufs_ao[] = {
+ { .compatible = "mediatek,mt8196-ufscfg-ao", .data = &ufsao_mcd },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8196_ufs_ao);
+
+static struct platform_driver clk_mt8196_ufs_ao_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8196-ufs-ao",
+ .of_match_table = of_match_clk_mt8196_ufs_ao,
+ },
+};
+
+module_platform_driver(clk_mt8196_ufs_ao_drv);
+MODULE_DESCRIPTION("MediaTek MT8196 ufs_ao clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-vdec.c b/drivers/clk/mediatek/clk-mt8196-vdec.c
new file mode 100644
index 000000000000..f8dcd84a2b58
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-vdec.c
@@ -0,0 +1,253 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs vde20_cg_regs = {
+ .set_ofs = 0x0,
+ .clr_ofs = 0x4,
+ .sta_ofs = 0x0,
+};
+
+static const struct mtk_gate_regs vde20_hwv_regs = {
+ .set_ofs = 0x0088,
+ .clr_ofs = 0x008c,
+ .sta_ofs = 0x2c44,
+};
+
+static const struct mtk_gate_regs vde21_cg_regs = {
+ .set_ofs = 0x200,
+ .clr_ofs = 0x204,
+ .sta_ofs = 0x200,
+};
+
+static const struct mtk_gate_regs vde21_hwv_regs = {
+ .set_ofs = 0x0080,
+ .clr_ofs = 0x0084,
+ .sta_ofs = 0x2c40,
+};
+
+static const struct mtk_gate_regs vde22_cg_regs = {
+ .set_ofs = 0x8,
+ .clr_ofs = 0xc,
+ .sta_ofs = 0x8,
+};
+
+static const struct mtk_gate_regs vde22_hwv_regs = {
+ .set_ofs = 0x0078,
+ .clr_ofs = 0x007c,
+ .sta_ofs = 0x2c3c,
+};
+
+#define GATE_HWV_VDE20(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &vde20_cg_regs, \
+ .hwv_regs = &vde20_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr_inv,\
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+#define GATE_HWV_VDE21(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &vde21_cg_regs, \
+ .hwv_regs = &vde21_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr_inv,\
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+#define GATE_HWV_VDE22(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &vde22_cg_regs, \
+ .hwv_regs = &vde22_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr_inv,\
+ .flags = CLK_OPS_PARENT_ENABLE | \
+ CLK_IGNORE_UNUSED, \
+ }
+
+static const struct mtk_gate vde2_clks[] = {
+ /* VDE20 */
+ GATE_HWV_VDE20(CLK_VDE2_VDEC_CKEN, "vde2_vdec_cken", "vdec", 0),
+ GATE_HWV_VDE20(CLK_VDE2_VDEC_ACTIVE, "vde2_vdec_active", "vdec", 4),
+ GATE_HWV_VDE20(CLK_VDE2_VDEC_CKEN_ENG, "vde2_vdec_cken_eng", "vdec", 8),
+ /* VDE21 */
+ GATE_HWV_VDE21(CLK_VDE2_LAT_CKEN, "vde2_lat_cken", "vdec", 0),
+ GATE_HWV_VDE21(CLK_VDE2_LAT_ACTIVE, "vde2_lat_active", "vdec", 4),
+ GATE_HWV_VDE21(CLK_VDE2_LAT_CKEN_ENG, "vde2_lat_cken_eng", "vdec", 8),
+ /* VDE22 */
+ GATE_HWV_VDE22(CLK_VDE2_LARB1_CKEN, "vde2_larb1_cken", "vdec", 0),
+};
+
+static const struct mtk_clk_desc vde2_mcd = {
+ .clks = vde2_clks,
+ .num_clks = ARRAY_SIZE(vde2_clks),
+ .need_runtime_pm = true,
+};
+
+static const struct mtk_gate_regs vde10_hwv_regs = {
+ .set_ofs = 0x00a0,
+ .clr_ofs = 0x00a4,
+ .sta_ofs = 0x2c50,
+};
+
+static const struct mtk_gate_regs vde11_cg_regs = {
+ .set_ofs = 0x1e0,
+ .clr_ofs = 0x1e0,
+ .sta_ofs = 0x1e0,
+};
+
+static const struct mtk_gate_regs vde11_hwv_regs = {
+ .set_ofs = 0x00b0,
+ .clr_ofs = 0x00b4,
+ .sta_ofs = 0x2c58,
+};
+
+static const struct mtk_gate_regs vde12_cg_regs = {
+ .set_ofs = 0x1ec,
+ .clr_ofs = 0x1ec,
+ .sta_ofs = 0x1ec,
+};
+
+static const struct mtk_gate_regs vde12_hwv_regs = {
+ .set_ofs = 0x00a8,
+ .clr_ofs = 0x00ac,
+ .sta_ofs = 0x2c54,
+};
+
+static const struct mtk_gate_regs vde13_cg_regs = {
+ .set_ofs = 0x200,
+ .clr_ofs = 0x204,
+ .sta_ofs = 0x200,
+};
+
+static const struct mtk_gate_regs vde13_hwv_regs = {
+ .set_ofs = 0x0098,
+ .clr_ofs = 0x009c,
+ .sta_ofs = 0x2c4c,
+};
+
+static const struct mtk_gate_regs vde14_hwv_regs = {
+ .set_ofs = 0x0090,
+ .clr_ofs = 0x0094,
+ .sta_ofs = 0x2c48,
+};
+
+#define GATE_HWV_VDE10(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &vde20_cg_regs, \
+ .hwv_regs = &vde10_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr_inv,\
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+#define GATE_HWV_VDE11(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &vde11_cg_regs, \
+ .hwv_regs = &vde11_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr_inv, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+#define GATE_HWV_VDE12(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &vde12_cg_regs, \
+ .hwv_regs = &vde12_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr_inv, \
+ .flags = CLK_OPS_PARENT_ENABLE \
+ }
+
+#define GATE_HWV_VDE13(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &vde13_cg_regs, \
+ .hwv_regs = &vde13_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr_inv,\
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+#define GATE_HWV_VDE14(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &vde22_cg_regs, \
+ .hwv_regs = &vde14_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr_inv,\
+ .flags = CLK_OPS_PARENT_ENABLE | \
+ CLK_IGNORE_UNUSED, \
+ }
+
+static const struct mtk_gate vde1_clks[] = {
+ /* VDE10 */
+ GATE_HWV_VDE10(CLK_VDE1_VDEC_CKEN, "vde1_vdec_cken", "vdec", 0),
+ GATE_HWV_VDE10(CLK_VDE1_VDEC_ACTIVE, "vde1_vdec_active", "vdec", 4),
+ GATE_HWV_VDE10(CLK_VDE1_VDEC_CKEN_ENG, "vde1_vdec_cken_eng", "vdec", 8),
+ /* VDE11 */
+ GATE_HWV_VDE11(CLK_VDE1_VDEC_SOC_IPS_EN, "vde1_vdec_soc_ips_en", "vdec", 0),
+ /* VDE12 */
+ GATE_HWV_VDE12(CLK_VDE1_VDEC_SOC_APTV_EN, "vde1_aptv_en", "ck_tck_26m_mx9_ck", 0),
+ GATE_HWV_VDE12(CLK_VDE1_VDEC_SOC_APTV_TOP_EN, "vde1_aptv_topen", "ck_tck_26m_mx9_ck", 1),
+ /* VDE13 */
+ GATE_HWV_VDE13(CLK_VDE1_LAT_CKEN, "vde1_lat_cken", "vdec", 0),
+ GATE_HWV_VDE13(CLK_VDE1_LAT_ACTIVE, "vde1_lat_active", "vdec", 4),
+ GATE_HWV_VDE13(CLK_VDE1_LAT_CKEN_ENG, "vde1_lat_cken_eng", "vdec", 8),
+ /* VDE14 */
+ GATE_HWV_VDE14(CLK_VDE1_LARB1_CKEN, "vde1_larb1_cken", "vdec", 0),
+};
+
+static const struct mtk_clk_desc vde1_mcd = {
+ .clks = vde1_clks,
+ .num_clks = ARRAY_SIZE(vde1_clks),
+ .need_runtime_pm = true,
+};
+
+static const struct of_device_id of_match_clk_mt8196_vdec[] = {
+ { .compatible = "mediatek,mt8196-vdecsys", .data = &vde2_mcd },
+ { .compatible = "mediatek,mt8196-vdecsys-soc", .data = &vde1_mcd },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8196_vdec);
+
+static struct platform_driver clk_mt8196_vdec_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8196-vdec",
+ .of_match_table = of_match_clk_mt8196_vdec,
+ },
+};
+module_platform_driver(clk_mt8196_vdec_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8196 Video Decoders clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-vdisp_ao.c b/drivers/clk/mediatek/clk-mt8196-vdisp_ao.c
new file mode 100644
index 000000000000..fddb69d1c3eb
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-vdisp_ao.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs mm_v_cg_regs = {
+ .set_ofs = 0x104,
+ .clr_ofs = 0x108,
+ .sta_ofs = 0x100,
+};
+
+static const struct mtk_gate_regs mm_v_hwv_regs = {
+ .set_ofs = 0x0030,
+ .clr_ofs = 0x0034,
+ .sta_ofs = 0x2c18,
+};
+
+#define GATE_MM_AO_V(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mm_v_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ .flags = CLK_OPS_PARENT_ENABLE | \
+ CLK_IS_CRITICAL, \
+ }
+
+#define GATE_HWV_MM_V(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mm_v_cg_regs, \
+ .hwv_regs = &mm_v_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+static const struct mtk_gate mm_v_clks[] = {
+ GATE_HWV_MM_V(CLK_MM_V_DISP_VDISP_AO_CONFIG, "mm_v_disp_vdisp_ao_config", "disp", 0),
+ GATE_HWV_MM_V(CLK_MM_V_DISP_DPC, "mm_v_disp_dpc", "disp", 16),
+ GATE_MM_AO_V(CLK_MM_V_SMI_SUB_SOMM0, "mm_v_smi_sub_somm0", "disp", 2),
+};
+
+static const struct mtk_clk_desc mm_v_mcd = {
+ .clks = mm_v_clks,
+ .num_clks = ARRAY_SIZE(mm_v_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8196_vdisp_ao[] = {
+ { .compatible = "mediatek,mt8196-vdisp-ao", .data = &mm_v_mcd },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8196_vdisp_ao);
+
+static struct platform_driver clk_mt8196_vdisp_ao_drv = {
+ .probe = mtk_clk_pdev_probe,
+ .remove = mtk_clk_pdev_remove,
+ .driver = {
+ .name = "clk-mt8196-vdisp-ao",
+ .of_match_table = of_match_clk_mt8196_vdisp_ao,
+ },
+};
+module_platform_driver(clk_mt8196_vdisp_ao_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8196 vdisp_ao clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-venc.c b/drivers/clk/mediatek/clk-mt8196-venc.c
new file mode 100644
index 000000000000..13e2e36e945f
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-venc.c
@@ -0,0 +1,236 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs ven10_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x0,
+};
+
+static const struct mtk_gate_regs ven10_hwv_regs = {
+ .set_ofs = 0x00b8,
+ .clr_ofs = 0x00bc,
+ .sta_ofs = 0x2c5c,
+};
+
+static const struct mtk_gate_regs ven11_cg_regs = {
+ .set_ofs = 0x10,
+ .clr_ofs = 0x14,
+ .sta_ofs = 0x10,
+};
+
+static const struct mtk_gate_regs ven11_hwv_regs = {
+ .set_ofs = 0x00c0,
+ .clr_ofs = 0x00c4,
+ .sta_ofs = 0x2c60,
+};
+
+#define GATE_VEN10(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ven10_cg_regs, \
+ .shift = _shift, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ .ops = &mtk_clk_gate_ops_setclr_inv, \
+ }
+
+#define GATE_HWV_VEN10_FLAGS(_id, _name, _parent, _shift, _flags) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ven10_cg_regs, \
+ .hwv_regs = &ven10_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr_inv, \
+ .flags = (_flags) | \
+ CLK_OPS_PARENT_ENABLE, \
+ }
+
+#define GATE_HWV_VEN10(_id, _name, _parent, _shift) \
+ GATE_HWV_VEN10_FLAGS(_id, _name, _parent, _shift, 0)
+
+#define GATE_HWV_VEN11(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ven11_cg_regs, \
+ .hwv_regs = &ven11_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr_inv,\
+ .flags = CLK_OPS_PARENT_ENABLE \
+ }
+
+static const struct mtk_gate ven1_clks[] = {
+ /* VEN10 */
+ GATE_HWV_VEN10(CLK_VEN1_CKE0_LARB, "ven1_larb", "venc", 0),
+ GATE_HWV_VEN10(CLK_VEN1_CKE1_VENC, "ven1_venc", "venc", 4),
+ GATE_VEN10(CLK_VEN1_CKE2_JPGENC, "ven1_jpgenc", "venc", 8),
+ GATE_VEN10(CLK_VEN1_CKE3_JPGDEC, "ven1_jpgdec", "venc", 12),
+ GATE_VEN10(CLK_VEN1_CKE4_JPGDEC_C1, "ven1_jpgdec_c1", "venc", 16),
+ GATE_HWV_VEN10(CLK_VEN1_CKE5_GALS, "ven1_gals", "venc", 28),
+ GATE_HWV_VEN10(CLK_VEN1_CKE29_VENC_ADAB_CTRL, "ven1_venc_adab_ctrl",
+ "venc", 29),
+ GATE_HWV_VEN10_FLAGS(CLK_VEN1_CKE29_VENC_XPC_CTRL,
+ "ven1_venc_xpc_ctrl", "venc", 30,
+ CLK_IGNORE_UNUSED),
+ GATE_HWV_VEN10(CLK_VEN1_CKE6_GALS_SRAM, "ven1_gals_sram", "venc", 31),
+ /* VEN11 */
+ GATE_HWV_VEN11(CLK_VEN1_RES_FLAT, "ven1_res_flat", "venc", 0),
+};
+
+static const struct mtk_clk_desc ven1_mcd = {
+ .clks = ven1_clks,
+ .num_clks = ARRAY_SIZE(ven1_clks),
+ .need_runtime_pm = true,
+};
+
+static const struct mtk_gate_regs ven20_hwv_regs = {
+ .set_ofs = 0x00c8,
+ .clr_ofs = 0x00cc,
+ .sta_ofs = 0x2c64,
+};
+
+static const struct mtk_gate_regs ven21_hwv_regs = {
+ .set_ofs = 0x00d0,
+ .clr_ofs = 0x00d4,
+ .sta_ofs = 0x2c68,
+};
+
+#define GATE_VEN20(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ven10_cg_regs, \
+ .shift = _shift, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ .ops = &mtk_clk_gate_ops_setclr_inv, \
+ }
+
+#define GATE_HWV_VEN20(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ven10_cg_regs, \
+ .hwv_regs = &ven20_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr_inv,\
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+#define GATE_HWV_VEN21(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ven11_cg_regs, \
+ .hwv_regs = &ven21_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr, \
+ .flags = CLK_OPS_PARENT_ENABLE \
+ }
+
+static const struct mtk_gate ven2_clks[] = {
+ /* VEN20 */
+ GATE_HWV_VEN20(CLK_VEN2_CKE0_LARB, "ven2_larb", "venc", 0),
+ GATE_HWV_VEN20(CLK_VEN2_CKE1_VENC, "ven2_venc", "venc", 4),
+ GATE_VEN20(CLK_VEN2_CKE2_JPGENC, "ven2_jpgenc", "venc", 8),
+ GATE_VEN20(CLK_VEN2_CKE3_JPGDEC, "ven2_jpgdec", "venc", 12),
+ GATE_HWV_VEN20(CLK_VEN2_CKE5_GALS, "ven2_gals", "venc", 28),
+ GATE_HWV_VEN20(CLK_VEN2_CKE29_VENC_XPC_CTRL, "ven2_venc_xpc_ctrl", "venc", 30),
+ GATE_HWV_VEN20(CLK_VEN2_CKE6_GALS_SRAM, "ven2_gals_sram", "venc", 31),
+ /* VEN21 */
+ GATE_HWV_VEN21(CLK_VEN2_RES_FLAT, "ven2_res_flat", "venc", 0),
+};
+
+static const struct mtk_clk_desc ven2_mcd = {
+ .clks = ven2_clks,
+ .num_clks = ARRAY_SIZE(ven2_clks),
+ .need_runtime_pm = true,
+};
+
+static const struct mtk_gate_regs ven_c20_hwv_regs = {
+ .set_ofs = 0x00d8,
+ .clr_ofs = 0x00dc,
+ .sta_ofs = 0x2c6c,
+};
+
+static const struct mtk_gate_regs ven_c21_hwv_regs = {
+ .set_ofs = 0x00e0,
+ .clr_ofs = 0x00e4,
+ .sta_ofs = 0x2c70,
+};
+
+#define GATE_HWV_VEN_C20(_id, _name, _parent, _shift) {\
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ven10_cg_regs, \
+ .hwv_regs = &ven_c20_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr_inv,\
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+#define GATE_HWV_VEN_C21(_id, _name, _parent, _shift) {\
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ven11_cg_regs, \
+ .hwv_regs = &ven_c21_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+static const struct mtk_gate ven_c2_clks[] = {
+ /* VEN_C20 */
+ GATE_HWV_VEN_C20(CLK_VEN_C2_CKE0_LARB, "ven_c2_larb", "venc", 0),
+ GATE_HWV_VEN_C20(CLK_VEN_C2_CKE1_VENC, "ven_c2_venc", "venc", 4),
+ GATE_HWV_VEN_C20(CLK_VEN_C2_CKE5_GALS, "ven_c2_gals", "venc", 28),
+ GATE_HWV_VEN_C20(CLK_VEN_C2_CKE29_VENC_XPC_CTRL, "ven_c2_venc_xpc_ctrl",
+ "venc", 30),
+ GATE_HWV_VEN_C20(CLK_VEN_C2_CKE6_GALS_SRAM, "ven_c2_gals_sram", "venc", 31),
+ /* VEN_C21 */
+ GATE_HWV_VEN_C21(CLK_VEN_C2_RES_FLAT, "ven_c2_res_flat", "venc", 0),
+};
+
+static const struct mtk_clk_desc ven_c2_mcd = {
+ .clks = ven_c2_clks,
+ .num_clks = ARRAY_SIZE(ven_c2_clks),
+ .need_runtime_pm = true,
+};
+
+static const struct of_device_id of_match_clk_mt8196_venc[] = {
+ { .compatible = "mediatek,mt8196-vencsys", .data = &ven1_mcd },
+ { .compatible = "mediatek,mt8196-vencsys-c1", .data = &ven2_mcd },
+ { .compatible = "mediatek,mt8196-vencsys-c2", .data = &ven_c2_mcd },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8196_venc);
+
+static struct platform_driver clk_mt8196_venc_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8196-venc",
+ .of_match_table = of_match_clk_mt8196_venc,
+ },
+};
+module_platform_driver(clk_mt8196_venc_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8196 Video Encoders clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-vlpckgen.c b/drivers/clk/mediatek/clk-mt8196-vlpckgen.c
new file mode 100644
index 000000000000..d59a8a9d9855
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-vlpckgen.c
@@ -0,0 +1,725 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "clk-mtk.h"
+#include "clk-mux.h"
+#include "clk-pll.h"
+
+/* MUX SEL REG */
+#define VLP_CLK_CFG_UPDATE 0x0004
+#define VLP_CLK_CFG_UPDATE1 0x0008
+#define VLP_CLK_CFG_0 0x0010
+#define VLP_CLK_CFG_0_SET 0x0014
+#define VLP_CLK_CFG_0_CLR 0x0018
+#define VLP_CLK_CFG_1 0x0020
+#define VLP_CLK_CFG_1_SET 0x0024
+#define VLP_CLK_CFG_1_CLR 0x0028
+#define VLP_CLK_CFG_2 0x0030
+#define VLP_CLK_CFG_2_SET 0x0034
+#define VLP_CLK_CFG_2_CLR 0x0038
+#define VLP_CLK_CFG_3 0x0040
+#define VLP_CLK_CFG_3_SET 0x0044
+#define VLP_CLK_CFG_3_CLR 0x0048
+#define VLP_CLK_CFG_4 0x0050
+#define VLP_CLK_CFG_4_SET 0x0054
+#define VLP_CLK_CFG_4_CLR 0x0058
+#define VLP_CLK_CFG_5 0x0060
+#define VLP_CLK_CFG_5_SET 0x0064
+#define VLP_CLK_CFG_5_CLR 0x0068
+#define VLP_CLK_CFG_6 0x0070
+#define VLP_CLK_CFG_6_SET 0x0074
+#define VLP_CLK_CFG_6_CLR 0x0078
+#define VLP_CLK_CFG_7 0x0080
+#define VLP_CLK_CFG_7_SET 0x0084
+#define VLP_CLK_CFG_7_CLR 0x0088
+#define VLP_CLK_CFG_8 0x0090
+#define VLP_CLK_CFG_8_SET 0x0094
+#define VLP_CLK_CFG_8_CLR 0x0098
+#define VLP_CLK_CFG_9 0x00a0
+#define VLP_CLK_CFG_9_SET 0x00a4
+#define VLP_CLK_CFG_9_CLR 0x00a8
+#define VLP_CLK_CFG_10 0x00b0
+#define VLP_CLK_CFG_10_SET 0x00b4
+#define VLP_CLK_CFG_10_CLR 0x00b8
+#define VLP_OCIC_FENC_STATUS_MON_0 0x039c
+#define VLP_OCIC_FENC_STATUS_MON_1 0x03a0
+
+/* MUX SHIFT */
+#define TOP_MUX_SCP_SHIFT 0
+#define TOP_MUX_SCP_SPI_SHIFT 1
+#define TOP_MUX_SCP_IIC_SHIFT 2
+#define TOP_MUX_SCP_IIC_HS_SHIFT 3
+#define TOP_MUX_PWRAP_ULPOSC_SHIFT 4
+#define TOP_MUX_SPMI_M_TIA_32K_SHIFT 5
+#define TOP_MUX_APXGPT_26M_B_SHIFT 6
+#define TOP_MUX_DPSW_SHIFT 7
+#define TOP_MUX_DPSW_CENTRAL_SHIFT 8
+#define TOP_MUX_SPMI_M_MST_SHIFT 9
+#define TOP_MUX_DVFSRC_SHIFT 10
+#define TOP_MUX_PWM_VLP_SHIFT 11
+#define TOP_MUX_AXI_VLP_SHIFT 12
+#define TOP_MUX_SYSTIMER_26M_SHIFT 13
+#define TOP_MUX_SSPM_SHIFT 14
+#define TOP_MUX_SRCK_SHIFT 15
+#define TOP_MUX_CAMTG0_SHIFT 16
+#define TOP_MUX_CAMTG1_SHIFT 17
+#define TOP_MUX_CAMTG2_SHIFT 18
+#define TOP_MUX_CAMTG3_SHIFT 19
+#define TOP_MUX_CAMTG4_SHIFT 20
+#define TOP_MUX_CAMTG5_SHIFT 21
+#define TOP_MUX_CAMTG6_SHIFT 22
+#define TOP_MUX_CAMTG7_SHIFT 23
+#define TOP_MUX_SSPM_26M_SHIFT 25
+#define TOP_MUX_ULPOSC_SSPM_SHIFT 26
+#define TOP_MUX_VLP_PBUS_26M_SHIFT 27
+#define TOP_MUX_DEBUG_ERR_FLAG_VLP_26M_SHIFT 28
+#define TOP_MUX_DPMSRDMA_SHIFT 29
+#define TOP_MUX_VLP_PBUS_156M_SHIFT 30
+#define TOP_MUX_SPM_SHIFT 0
+#define TOP_MUX_MMINFRA_VLP_SHIFT 1
+#define TOP_MUX_USB_TOP_SHIFT 2
+#define TOP_MUX_SSUSB_XHCI_SHIFT 3
+#define TOP_MUX_NOC_VLP_SHIFT 4
+#define TOP_MUX_AUDIO_H_SHIFT 5
+#define TOP_MUX_AUD_ENGEN1_SHIFT 6
+#define TOP_MUX_AUD_ENGEN2_SHIFT 7
+#define TOP_MUX_AUD_INTBUS_SHIFT 8
+#define TOP_MUX_SPU_VLP_26M_SHIFT 9
+#define TOP_MUX_SPU0_VLP_SHIFT 10
+#define TOP_MUX_SPU1_VLP_SHIFT 11
+
+/* CKSTA REG */
+#define VLP_CKSTA_REG0 0x0250
+#define VLP_CKSTA_REG1 0x0254
+
+/* HW Voter REG */
+#define HWV_CG_9_SET 0x0048
+#define HWV_CG_9_CLR 0x004c
+#define HWV_CG_9_DONE 0x2c24
+#define HWV_CG_10_SET 0x0050
+#define HWV_CG_10_CLR 0x0054
+#define HWV_CG_10_DONE 0x2c28
+
+/* PLL REG */
+#define VLP_AP_PLL_CON3 0x264
+#define VLP_APLL1_TUNER_CON0 0x2a4
+#define VLP_APLL2_TUNER_CON0 0x2a8
+#define VLP_APLL1_CON0 0x274
+#define VLP_APLL1_CON1 0x278
+#define VLP_APLL1_CON2 0x27c
+#define VLP_APLL1_CON3 0x280
+#define VLP_APLL2_CON0 0x28c
+#define VLP_APLL2_CON1 0x290
+#define VLP_APLL2_CON2 0x294
+#define VLP_APLL2_CON3 0x298
+
+/* vlp apll1 tuner default value*/
+#define VLP_APLL1_TUNER_CON0_VALUE 0x6f28bd4d
+/* vlp apll2 tuner default value + 1*/
+#define VLP_APLL2_TUNER_CON0_VALUE 0x78fd5265
+
+#define VLP_PLLEN_ALL 0x080
+#define VLP_PLLEN_ALL_SET 0x084
+#define VLP_PLLEN_ALL_CLR 0x088
+
+#define MT8196_PLL_FMAX (3800UL * MHZ)
+#define MT8196_PLL_FMIN (1500UL * MHZ)
+#define MT8196_INTEGER_BITS 8
+
+#define PLL_FENC(_id, _name, _reg, _fenc_sta_ofs, _fenc_sta_bit,\
+ _flags, _pd_reg, _pd_shift, \
+ _pcw_reg, _pcw_shift, _pcwbits, \
+ _pll_en_bit) { \
+ .id = _id, \
+ .name = _name, \
+ .reg = _reg, \
+ .fenc_sta_ofs = _fenc_sta_ofs, \
+ .fenc_sta_bit = _fenc_sta_bit, \
+ .flags = _flags, \
+ .fmax = MT8196_PLL_FMAX, \
+ .fmin = MT8196_PLL_FMIN, \
+ .pd_reg = _pd_reg, \
+ .pd_shift = _pd_shift, \
+ .pcw_reg = _pcw_reg, \
+ .pcw_shift = _pcw_shift, \
+ .pcwbits = _pcwbits, \
+ .pcwibits = MT8196_INTEGER_BITS, \
+ .en_reg = VLP_PLLEN_ALL, \
+ .en_set_reg = VLP_PLLEN_ALL_SET, \
+ .en_clr_reg = VLP_PLLEN_ALL_CLR, \
+ .pll_en_bit = _pll_en_bit, \
+ .ops = &mtk_pll_fenc_clr_set_ops, \
+}
+
+static DEFINE_SPINLOCK(mt8196_clk_vlp_lock);
+
+static const struct mtk_fixed_factor vlp_divs[] = {
+ FACTOR(CLK_VLP_CLK26M, "vlp_clk26m", "clk26m", 1, 1),
+ FACTOR(CLK_VLP_APLL1_D4, "apll1_d4", "vlp_apll1", 1, 4),
+ FACTOR(CLK_VLP_APLL1_D8, "apll1_d8", "vlp_apll1", 1, 8),
+ FACTOR(CLK_VLP_APLL2_D4, "apll2_d4", "vlp_apll2", 1, 4),
+ FACTOR(CLK_VLP_APLL2_D8, "apll2_d8", "vlp_apll2", 1, 8),
+};
+
+static const char * const vlp_scp_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "mainpll_d6",
+ "mainpll_d4",
+ "mainpll_d3",
+ "vlp_apll1"
+};
+
+static const char * const vlp_scp_spi_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "mainpll_d7_d2",
+ "mainpll_d5_d2"
+};
+
+static const char * const vlp_scp_iic_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "mainpll_d5_d4",
+ "mainpll_d7_d2"
+};
+
+static const char * const vlp_scp_iic_hs_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "mainpll_d5_d4",
+ "mainpll_d7_d2",
+ "mainpll_d7"
+};
+
+static const char * const vlp_pwrap_ulposc_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "osc_d14",
+ "osc_d10"
+};
+
+static const char * const vlp_spmi_32k_parents[] = {
+ "clk26m",
+ "clk32k",
+ "osc_d20",
+ "osc_d14",
+ "osc_d10"
+};
+
+static const char * const vlp_apxgpt_26m_b_parents[] = {
+ "clk26m",
+ "osc_d20"
+};
+
+static const char * const vlp_dpsw_parents[] = {
+ "clk26m",
+ "osc_d10",
+ "osc_d7",
+ "mainpll_d7_d4"
+};
+
+static const char * const vlp_dpsw_central_parents[] = {
+ "clk26m",
+ "osc_d10",
+ "osc_d7",
+ "mainpll_d7_d4"
+};
+
+static const char * const vlp_spmi_m_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "osc_d14",
+ "osc_d10"
+};
+
+static const char * const vlp_dvfsrc_parents[] = {
+ "clk26m",
+ "osc_d20"
+};
+
+static const char * const vlp_pwm_vlp_parents[] = {
+ "clk26m",
+ "clk32k",
+ "osc_d20",
+ "osc_d8",
+ "mainpll_d4_d8"
+};
+
+static const char * const vlp_axi_vlp_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "mainpll_d7_d4",
+ "osc_d4",
+ "mainpll_d7_d2"
+};
+
+static const char * const vlp_systimer_26m_parents[] = {
+ "clk26m",
+ "osc_d20"
+};
+
+static const char * const vlp_sspm_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "mainpll_d5_d2",
+ "osc_d2",
+ "mainpll_d6"
+};
+
+static const char * const vlp_srck_parents[] = {
+ "clk26m",
+ "osc_d20"
+};
+
+static const char * const vlp_camtg0_1_parents[] = {
+ "clk26m",
+ "univpll_192m_d32",
+ "univpll_192m_d16",
+ "clk13m",
+ "osc_d40",
+ "osc_d32",
+ "univpll_192m_d10",
+ "univpll_192m_d8",
+ "univpll_d6_d16",
+ "ulposc3",
+ "osc_d20",
+ "ck2_tvdpll1_d16",
+ "univpll_d6_d8"
+};
+
+static const char * const vlp_camtg2_7_parents[] = {
+ "clk26m",
+ "univpll_192m_d32",
+ "univpll_192m_d16",
+ "clk13m",
+ "osc_d40",
+ "osc_d32",
+ "univpll_192m_d10",
+ "univpll_192m_d8",
+ "univpll_d6_d16",
+ "osc_d20",
+ "ck2_tvdpll1_d16",
+ "univpll_d6_d8"
+};
+
+static const char * const vlp_sspm_26m_parents[] = {
+ "clk26m",
+ "osc_d20"
+};
+
+static const char * const vlp_ulposc_sspm_parents[] = {
+ "clk26m",
+ "osc_d2",
+ "mainpll_d4_d2"
+};
+
+static const char * const vlp_vlp_pbus_26m_parents[] = {
+ "clk26m",
+ "osc_d20"
+};
+
+static const char * const vlp_debug_err_flag_parents[] = {
+ "clk26m",
+ "osc_d20"
+};
+
+static const char * const vlp_dpmsrdma_parents[] = {
+ "clk26m",
+ "mainpll_d7_d2"
+};
+
+static const char * const vlp_vlp_pbus_156m_parents[] = {
+ "clk26m",
+ "osc_d2",
+ "mainpll_d7_d2",
+ "mainpll_d7"
+};
+
+static const char * const vlp_spm_parents[] = {
+ "clk26m",
+ "mainpll_d7_d4"
+};
+
+static const char * const vlp_mminfra_parents[] = {
+ "clk26m",
+ "osc_d4",
+ "mainpll_d3"
+};
+
+static const char * const vlp_usb_parents[] = {
+ "clk26m",
+ "mainpll_d9"
+};
+
+static const char * const vlp_noc_vlp_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "mainpll_d9"
+};
+
+static const char * const vlp_audio_h_parents[] = {
+ "vlp_clk26m",
+ "vlp_apll1",
+ "vlp_apll2"
+};
+
+static const char * const vlp_aud_engen1_parents[] = {
+ "vlp_clk26m",
+ "apll1_d8",
+ "apll1_d4"
+};
+
+static const char * const vlp_aud_engen2_parents[] = {
+ "vlp_clk26m",
+ "apll2_d8",
+ "apll2_d4"
+};
+
+static const char * const vlp_aud_intbus_parents[] = {
+ "vlp_clk26m",
+ "mainpll_d7_d4",
+ "mainpll_d4_d4"
+};
+
+static const u8 vlp_aud_parent_index[] = { 1, 2, 3 };
+
+static const char * const vlp_spvlp_26m_parents[] = {
+ "clk26m",
+ "osc_d20"
+};
+
+static const char * const vlp_spu0_vlp_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "mainpll_d4_d4",
+ "mainpll_d4_d2",
+ "mainpll_d7",
+ "mainpll_d6",
+ "mainpll_d5"
+};
+
+static const char * const vlp_spu1_vlp_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "mainpll_d4_d4",
+ "mainpll_d4_d2",
+ "mainpll_d7",
+ "mainpll_d6",
+ "mainpll_d5"
+};
+
+static const struct mtk_mux vlp_muxes[] = {
+ /* VLP_CLK_CFG_0 */
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_VLP_SCP, "vlp_scp", vlp_scp_parents,
+ VLP_CLK_CFG_0, VLP_CLK_CFG_0_SET, VLP_CLK_CFG_0_CLR,
+ 0, 3, 7, VLP_CLK_CFG_UPDATE, TOP_MUX_SCP_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_0, 31),
+ MUX_CLR_SET_UPD(CLK_VLP_SCP_SPI, "vlp_scp_spi",
+ vlp_scp_spi_parents, VLP_CLK_CFG_0, VLP_CLK_CFG_0_SET,
+ VLP_CLK_CFG_0_CLR, 8, 2,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_SCP_SPI_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_SCP_IIC, "vlp_scp_iic",
+ vlp_scp_iic_parents, VLP_CLK_CFG_0, VLP_CLK_CFG_0_SET,
+ VLP_CLK_CFG_0_CLR, 16, 2,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_SCP_IIC_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_SCP_IIC_HS, "vlp_scp_iic_hs",
+ vlp_scp_iic_hs_parents, VLP_CLK_CFG_0, VLP_CLK_CFG_0_SET,
+ VLP_CLK_CFG_0_CLR, 24, 3,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_SCP_IIC_HS_SHIFT),
+ /* VLP_CLK_CFG_1 */
+ MUX_CLR_SET_UPD(CLK_VLP_PWRAP_ULPOSC, "vlp_pwrap_ulposc",
+ vlp_pwrap_ulposc_parents, VLP_CLK_CFG_1, VLP_CLK_CFG_1_SET,
+ VLP_CLK_CFG_1_CLR, 0, 2,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_PWRAP_ULPOSC_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_SPMI_M_TIA_32K, "vlp_spmi_32k",
+ vlp_spmi_32k_parents, VLP_CLK_CFG_1, VLP_CLK_CFG_1_SET,
+ VLP_CLK_CFG_1_CLR, 8, 3,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_SPMI_M_TIA_32K_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_APXGPT_26M_B, "vlp_apxgpt_26m_b",
+ vlp_apxgpt_26m_b_parents, VLP_CLK_CFG_1, VLP_CLK_CFG_1_SET,
+ VLP_CLK_CFG_1_CLR, 16, 1,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_APXGPT_26M_B_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_DPSW, "vlp_dpsw",
+ vlp_dpsw_parents, VLP_CLK_CFG_1, VLP_CLK_CFG_1_SET,
+ VLP_CLK_CFG_1_CLR, 24, 2,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_DPSW_SHIFT),
+ /* VLP_CLK_CFG_2 */
+ MUX_CLR_SET_UPD(CLK_VLP_DPSW_CENTRAL, "vlp_dpsw_central",
+ vlp_dpsw_central_parents, VLP_CLK_CFG_2, VLP_CLK_CFG_2_SET,
+ VLP_CLK_CFG_2_CLR, 0, 2,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_DPSW_CENTRAL_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_SPMI_M_MST, "vlp_spmi_m",
+ vlp_spmi_m_parents, VLP_CLK_CFG_2, VLP_CLK_CFG_2_SET,
+ VLP_CLK_CFG_2_CLR, 8, 2,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_SPMI_M_MST_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_DVFSRC, "vlp_dvfsrc",
+ vlp_dvfsrc_parents, VLP_CLK_CFG_2, VLP_CLK_CFG_2_SET,
+ VLP_CLK_CFG_2_CLR, 16, 1,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_DVFSRC_SHIFT),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_VLP_PWM_VLP, "vlp_pwm_vlp", vlp_pwm_vlp_parents,
+ VLP_CLK_CFG_2, VLP_CLK_CFG_2_SET, VLP_CLK_CFG_2_CLR,
+ 24, 3, 31, VLP_CLK_CFG_UPDATE, TOP_MUX_PWM_VLP_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_0, 20),
+ /* VLP_CLK_CFG_3 */
+ MUX_CLR_SET_UPD(CLK_VLP_AXI_VLP, "vlp_axi_vlp",
+ vlp_axi_vlp_parents, VLP_CLK_CFG_3, VLP_CLK_CFG_3_SET,
+ VLP_CLK_CFG_3_CLR, 0, 3,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_AXI_VLP_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_SYSTIMER_26M, "vlp_systimer_26m",
+ vlp_systimer_26m_parents, VLP_CLK_CFG_3, VLP_CLK_CFG_3_SET,
+ VLP_CLK_CFG_3_CLR, 8, 1,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_SYSTIMER_26M_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_SSPM, "vlp_sspm",
+ vlp_sspm_parents, VLP_CLK_CFG_3, VLP_CLK_CFG_3_SET,
+ VLP_CLK_CFG_3_CLR, 16, 3,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_SSPM_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_SRCK, "vlp_srck",
+ vlp_srck_parents, VLP_CLK_CFG_3, VLP_CLK_CFG_3_SET,
+ VLP_CLK_CFG_3_CLR, 24, 1,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_SRCK_SHIFT),
+ /* VLP_CLK_CFG_4 */
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_VLP_CAMTG0, "vlp_camtg0", vlp_camtg0_1_parents,
+ VLP_CLK_CFG_4, VLP_CLK_CFG_4_SET, VLP_CLK_CFG_4_CLR,
+ HWV_CG_9_DONE, HWV_CG_9_SET, HWV_CG_9_CLR,
+ 0, 4, 7, VLP_CLK_CFG_UPDATE, TOP_MUX_CAMTG0_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_0, 15),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_VLP_CAMTG1, "vlp_camtg1", vlp_camtg0_1_parents,
+ VLP_CLK_CFG_4, VLP_CLK_CFG_4_SET, VLP_CLK_CFG_4_CLR,
+ HWV_CG_9_DONE, HWV_CG_9_SET, HWV_CG_9_CLR,
+ 8, 4, 15, VLP_CLK_CFG_UPDATE, TOP_MUX_CAMTG1_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_0, 14),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_VLP_CAMTG2, "vlp_camtg2", vlp_camtg2_7_parents,
+ VLP_CLK_CFG_4, VLP_CLK_CFG_4_SET, VLP_CLK_CFG_4_CLR,
+ HWV_CG_9_DONE, HWV_CG_9_SET, HWV_CG_9_CLR,
+ 16, 4, 23, VLP_CLK_CFG_UPDATE, TOP_MUX_CAMTG2_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_0, 13),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_VLP_CAMTG3, "vlp_camtg3", vlp_camtg2_7_parents,
+ VLP_CLK_CFG_4, VLP_CLK_CFG_4_SET, VLP_CLK_CFG_4_CLR,
+ HWV_CG_9_DONE, HWV_CG_9_SET, HWV_CG_9_CLR,
+ 24, 4, 31, VLP_CLK_CFG_UPDATE, TOP_MUX_CAMTG3_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_0, 12),
+ /* VLP_CLK_CFG_5 */
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_VLP_CAMTG4, "vlp_camtg4", vlp_camtg2_7_parents,
+ VLP_CLK_CFG_5, VLP_CLK_CFG_5_SET, VLP_CLK_CFG_5_CLR,
+ HWV_CG_10_DONE, HWV_CG_10_SET, HWV_CG_10_CLR,
+ 0, 4, 7, VLP_CLK_CFG_UPDATE, TOP_MUX_CAMTG4_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_0, 11),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_VLP_CAMTG5, "vlp_camtg5", vlp_camtg2_7_parents,
+ VLP_CLK_CFG_5, VLP_CLK_CFG_5_SET, VLP_CLK_CFG_5_CLR,
+ HWV_CG_10_DONE, HWV_CG_10_SET, HWV_CG_10_CLR,
+ 8, 4, 15, VLP_CLK_CFG_UPDATE, TOP_MUX_CAMTG5_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_0, 10),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_VLP_CAMTG6, "vlp_camtg6", vlp_camtg2_7_parents,
+ VLP_CLK_CFG_5, VLP_CLK_CFG_5_SET, VLP_CLK_CFG_5_CLR,
+ HWV_CG_10_DONE, HWV_CG_10_SET, HWV_CG_10_CLR,
+ 16, 4, 23, VLP_CLK_CFG_UPDATE, TOP_MUX_CAMTG6_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_0, 9),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_VLP_CAMTG7, "vlp_camtg7", vlp_camtg2_7_parents,
+ VLP_CLK_CFG_5, VLP_CLK_CFG_5_SET, VLP_CLK_CFG_5_CLR,
+ HWV_CG_10_DONE, HWV_CG_10_SET, HWV_CG_10_CLR,
+ 24, 4, 31, VLP_CLK_CFG_UPDATE, TOP_MUX_CAMTG7_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_0, 8),
+ /* VLP_CLK_CFG_6 */
+ MUX_CLR_SET_UPD(CLK_VLP_SSPM_26M, "vlp_sspm_26m",
+ vlp_sspm_26m_parents, VLP_CLK_CFG_6, VLP_CLK_CFG_6_SET,
+ VLP_CLK_CFG_6_CLR, 8, 1,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_SSPM_26M_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_ULPOSC_SSPM, "vlp_ulposc_sspm",
+ vlp_ulposc_sspm_parents, VLP_CLK_CFG_6, VLP_CLK_CFG_6_SET,
+ VLP_CLK_CFG_6_CLR, 16, 2,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_ULPOSC_SSPM_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_VLP_PBUS_26M, "vlp_vlp_pbus_26m",
+ vlp_vlp_pbus_26m_parents, VLP_CLK_CFG_6, VLP_CLK_CFG_6_SET,
+ VLP_CLK_CFG_6_CLR, 24, 1,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_VLP_PBUS_26M_SHIFT),
+ /* VLP_CLK_CFG_7 */
+ MUX_CLR_SET_UPD(CLK_VLP_DEBUG_ERR_FLAG, "vlp_debug_err_flag",
+ vlp_debug_err_flag_parents, VLP_CLK_CFG_7, VLP_CLK_CFG_7_SET,
+ VLP_CLK_CFG_7_CLR, 0, 1,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_DEBUG_ERR_FLAG_VLP_26M_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_DPMSRDMA, "vlp_dpmsrdma",
+ vlp_dpmsrdma_parents, VLP_CLK_CFG_7, VLP_CLK_CFG_7_SET,
+ VLP_CLK_CFG_7_CLR, 8, 1,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_DPMSRDMA_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_VLP_PBUS_156M, "vlp_vlp_pbus_156m",
+ vlp_vlp_pbus_156m_parents, VLP_CLK_CFG_7, VLP_CLK_CFG_7_SET,
+ VLP_CLK_CFG_7_CLR, 16, 2,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_VLP_PBUS_156M_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_SPM, "vlp_spm",
+ vlp_spm_parents, VLP_CLK_CFG_7, VLP_CLK_CFG_7_SET,
+ VLP_CLK_CFG_7_CLR, 24, 1,
+ VLP_CLK_CFG_UPDATE1, TOP_MUX_SPM_SHIFT),
+ /* VLP_CLK_CFG_8 */
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_VLP_MMINFRA, "vlp_mminfra", vlp_mminfra_parents,
+ VLP_CLK_CFG_8, VLP_CLK_CFG_8_SET, VLP_CLK_CFG_8_CLR,
+ 0, 2, 7, VLP_CLK_CFG_UPDATE1, TOP_MUX_MMINFRA_VLP_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_1, 31),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_VLP_USB_TOP, "vlp_usb", vlp_usb_parents,
+ VLP_CLK_CFG_8, VLP_CLK_CFG_8_SET, VLP_CLK_CFG_8_CLR,
+ 8, 1, 15, VLP_CLK_CFG_UPDATE1, TOP_MUX_USB_TOP_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_1, 30),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_VLP_USB_XHCI, "vlp_usb_xhci", vlp_usb_parents,
+ VLP_CLK_CFG_8, VLP_CLK_CFG_8_SET, VLP_CLK_CFG_8_CLR,
+ 16, 1, 23, VLP_CLK_CFG_UPDATE1, TOP_MUX_SSUSB_XHCI_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_1, 29),
+ MUX_CLR_SET_UPD(CLK_VLP_NOC_VLP, "vlp_noc_vlp",
+ vlp_noc_vlp_parents, VLP_CLK_CFG_8, VLP_CLK_CFG_8_SET,
+ VLP_CLK_CFG_8_CLR, 24, 2,
+ VLP_CLK_CFG_UPDATE1, TOP_MUX_NOC_VLP_SHIFT),
+ /* VLP_CLK_CFG_9 */
+ MUX_GATE_FENC_CLR_SET_UPD_INDEXED(CLK_VLP_AUDIO_H, "vlp_audio_h",
+ vlp_audio_h_parents, vlp_aud_parent_index,
+ VLP_CLK_CFG_9, VLP_CLK_CFG_9_SET, VLP_CLK_CFG_9_CLR,
+ 0, 2, 7, VLP_CLK_CFG_UPDATE1, TOP_MUX_AUDIO_H_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_1, 27),
+ MUX_GATE_FENC_CLR_SET_UPD_INDEXED(CLK_VLP_AUD_ENGEN1, "vlp_aud_engen1",
+ vlp_aud_engen1_parents, vlp_aud_parent_index,
+ VLP_CLK_CFG_9, VLP_CLK_CFG_9_SET, VLP_CLK_CFG_9_CLR,
+ 8, 2, 15, VLP_CLK_CFG_UPDATE1, TOP_MUX_AUD_ENGEN1_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_1, 26),
+ MUX_GATE_FENC_CLR_SET_UPD_INDEXED(CLK_VLP_AUD_ENGEN2, "vlp_aud_engen2",
+ vlp_aud_engen2_parents, vlp_aud_parent_index,
+ VLP_CLK_CFG_9, VLP_CLK_CFG_9_SET, VLP_CLK_CFG_9_CLR,
+ 16, 2, 23, VLP_CLK_CFG_UPDATE1, TOP_MUX_AUD_ENGEN2_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_1, 25),
+ MUX_GATE_FENC_CLR_SET_UPD_INDEXED(CLK_VLP_AUD_INTBUS, "vlp_aud_intbus",
+ vlp_aud_intbus_parents, vlp_aud_parent_index,
+ VLP_CLK_CFG_9, VLP_CLK_CFG_9_SET, VLP_CLK_CFG_9_CLR,
+ 24, 2, 31, VLP_CLK_CFG_UPDATE1, TOP_MUX_AUD_INTBUS_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_1, 24),
+ /* VLP_CLK_CFG_10 */
+ MUX_CLR_SET_UPD(CLK_VLP_SPVLP_26M, "vlp_spvlp_26m",
+ vlp_spvlp_26m_parents, VLP_CLK_CFG_10, VLP_CLK_CFG_10_SET,
+ VLP_CLK_CFG_10_CLR, 0, 1,
+ VLP_CLK_CFG_UPDATE1, TOP_MUX_SPU_VLP_26M_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_SPU0_VLP, "vlp_spu0_vlp",
+ vlp_spu0_vlp_parents, VLP_CLK_CFG_10, VLP_CLK_CFG_10_SET,
+ VLP_CLK_CFG_10_CLR, 8, 3,
+ VLP_CLK_CFG_UPDATE1, TOP_MUX_SPU0_VLP_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_SPU1_VLP, "vlp_spu1_vlp",
+ vlp_spu1_vlp_parents, VLP_CLK_CFG_10, VLP_CLK_CFG_10_SET,
+ VLP_CLK_CFG_10_CLR, 16, 3,
+ VLP_CLK_CFG_UPDATE1, TOP_MUX_SPU1_VLP_SHIFT),
+};
+
+static const struct mtk_pll_data vlp_plls[] = {
+ PLL_FENC(CLK_VLP_APLL1, "vlp_apll1", VLP_APLL1_CON0, 0x0358, 1, 0,
+ VLP_APLL1_CON1, 24, VLP_APLL1_CON2, 0, 32, 0),
+ PLL_FENC(CLK_VLP_APLL2, "vlp_apll2", VLP_APLL2_CON0, 0x0358, 0, 0,
+ VLP_APLL2_CON1, 24, VLP_APLL2_CON2, 0, 32, 1),
+};
+
+static const struct regmap_config vlpckgen_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 0x1000,
+ .fast_io = true,
+};
+
+static int clk_mt8196_vlp_probe(struct platform_device *pdev)
+{
+ static void __iomem *base;
+ struct clk_hw_onecell_data *clk_data;
+ int r;
+ struct device_node *node = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct regmap *regmap;
+
+ clk_data = mtk_alloc_clk_data(ARRAY_SIZE(vlp_muxes) +
+ ARRAY_SIZE(vlp_plls) +
+ ARRAY_SIZE(vlp_divs));
+ if (!clk_data)
+ return -ENOMEM;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ regmap = devm_regmap_init_mmio(dev, base, &vlpckgen_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ r = mtk_clk_register_factors(vlp_divs, ARRAY_SIZE(vlp_divs), clk_data);
+ if (r)
+ goto free_clk_data;
+
+ r = mtk_clk_register_muxes(&pdev->dev, vlp_muxes, ARRAY_SIZE(vlp_muxes),
+ node, &mt8196_clk_vlp_lock, clk_data);
+ if (r)
+ goto unregister_factors;
+
+ r = mtk_clk_register_plls(node, vlp_plls, ARRAY_SIZE(vlp_plls),
+ clk_data);
+ if (r)
+ goto unregister_muxes;
+
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ if (r)
+ goto unregister_plls;
+
+ platform_set_drvdata(pdev, clk_data);
+
+ /* Initialize APLL tuner registers */
+ regmap_write(regmap, VLP_APLL1_TUNER_CON0, VLP_APLL1_TUNER_CON0_VALUE);
+ regmap_write(regmap, VLP_APLL2_TUNER_CON0, VLP_APLL2_TUNER_CON0_VALUE);
+
+ return r;
+
+unregister_plls:
+ mtk_clk_unregister_plls(vlp_plls, ARRAY_SIZE(vlp_plls), clk_data);
+unregister_muxes:
+ mtk_clk_unregister_muxes(vlp_muxes, ARRAY_SIZE(vlp_muxes), clk_data);
+unregister_factors:
+ mtk_clk_unregister_factors(vlp_divs, ARRAY_SIZE(vlp_divs), clk_data);
+free_clk_data:
+ mtk_free_clk_data(clk_data);
+
+ return r;
+}
+
+static void clk_mt8196_vlp_remove(struct platform_device *pdev)
+{
+ struct clk_hw_onecell_data *clk_data = platform_get_drvdata(pdev);
+ struct device_node *node = pdev->dev.of_node;
+
+ of_clk_del_provider(node);
+ mtk_clk_unregister_plls(vlp_plls, ARRAY_SIZE(vlp_plls), clk_data);
+ mtk_clk_unregister_muxes(vlp_muxes, ARRAY_SIZE(vlp_muxes), clk_data);
+ mtk_clk_unregister_factors(vlp_divs, ARRAY_SIZE(vlp_divs), clk_data);
+ mtk_free_clk_data(clk_data);
+}
+
+static const struct of_device_id of_match_clk_mt8196_vlp_ck[] = {
+ { .compatible = "mediatek,mt8196-vlpckgen" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8196_vlp_ck);
+
+static struct platform_driver clk_mt8196_vlp_drv = {
+ .probe = clk_mt8196_vlp_probe,
+ .remove = clk_mt8196_vlp_remove,
+ .driver = {
+ .name = "clk-mt8196-vlpck",
+ .of_match_table = of_match_clk_mt8196_vlp_ck,
+ },
+};
+
+MODULE_DESCRIPTION("MediaTek MT8196 VLP clock generator driver");
+module_platform_driver(clk_mt8196_vlp_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mtk.c b/drivers/clk/mediatek/clk-mtk.c
index ba1d1c495bc2..19cd27941747 100644
--- a/drivers/clk/mediatek/clk-mtk.c
+++ b/drivers/clk/mediatek/clk-mtk.c
@@ -685,4 +685,20 @@ void mtk_clk_simple_remove(struct platform_device *pdev)
}
EXPORT_SYMBOL_GPL(mtk_clk_simple_remove);
+struct regmap *mtk_clk_get_hwv_regmap(struct device_node *node)
+{
+ struct device_node *hwv_node;
+ struct regmap *regmap_hwv;
+
+ hwv_node = of_parse_phandle(node, "mediatek,hardware-voter", 0);
+ if (!hwv_node)
+ return NULL;
+
+ regmap_hwv = device_node_to_regmap(hwv_node);
+ of_node_put(hwv_node);
+
+ return regmap_hwv;
+}
+EXPORT_SYMBOL_GPL(mtk_clk_get_hwv_regmap);
+
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mtk.h b/drivers/clk/mediatek/clk-mtk.h
index c17fe1c2d732..5417b9264e6d 100644
--- a/drivers/clk/mediatek/clk-mtk.h
+++ b/drivers/clk/mediatek/clk-mtk.h
@@ -20,6 +20,8 @@
#define MHZ (1000 * 1000)
+#define MTK_WAIT_HWV_DONE_US 30
+
struct platform_device;
/*
@@ -173,6 +175,25 @@ struct mtk_composite {
.flags = 0, \
}
+#define MUX_DIV_GATE(_id, _name, _parents, \
+ _mux_reg, _mux_shift, _mux_width, \
+ _div_reg, _div_shift, _div_width, \
+ _gate_reg, _gate_shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_names = _parents, \
+ .num_parents = ARRAY_SIZE(_parents), \
+ .mux_reg = _mux_reg, \
+ .mux_shift = _mux_shift, \
+ .mux_width = _mux_width, \
+ .divider_reg = _div_reg, \
+ .divider_shift = _div_shift, \
+ .divider_width = _div_width, \
+ .gate_reg = _gate_reg, \
+ .gate_shift = _gate_shift, \
+ .flags = CLK_SET_RATE_PARENT, \
+ }
+
int mtk_clk_register_composites(struct device *dev,
const struct mtk_composite *mcs, int num,
void __iomem *base, spinlock_t *lock,
@@ -245,5 +266,6 @@ int mtk_clk_pdev_probe(struct platform_device *pdev);
void mtk_clk_pdev_remove(struct platform_device *pdev);
int mtk_clk_simple_probe(struct platform_device *pdev);
void mtk_clk_simple_remove(struct platform_device *pdev);
+struct regmap *mtk_clk_get_hwv_regmap(struct device_node *node);
#endif /* __DRV_CLK_MTK_H */
diff --git a/drivers/clk/mediatek/clk-mux.c b/drivers/clk/mediatek/clk-mux.c
index 60990296450b..c5af6dc078a3 100644
--- a/drivers/clk/mediatek/clk-mux.c
+++ b/drivers/clk/mediatek/clk-mux.c
@@ -8,6 +8,7 @@
#include <linux/clk-provider.h>
#include <linux/compiler_types.h>
#include <linux/container_of.h>
+#include <linux/dev_printk.h>
#include <linux/err.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
@@ -15,11 +16,15 @@
#include <linux/spinlock.h>
#include <linux/slab.h>
+#include "clk-mtk.h"
#include "clk-mux.h"
+#define MTK_WAIT_FENC_DONE_US 30
+
struct mtk_clk_mux {
struct clk_hw hw;
struct regmap *regmap;
+ struct regmap *regmap_hwv;
const struct mtk_mux *data;
spinlock_t *lock;
bool reparent;
@@ -30,6 +35,33 @@ static inline struct mtk_clk_mux *to_mtk_clk_mux(struct clk_hw *hw)
return container_of(hw, struct mtk_clk_mux, hw);
}
+static int mtk_clk_mux_fenc_enable_setclr(struct clk_hw *hw)
+{
+ struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
+ unsigned long flags;
+ u32 val;
+ int ret;
+
+ if (mux->lock)
+ spin_lock_irqsave(mux->lock, flags);
+ else
+ __acquire(mux->lock);
+
+ regmap_write(mux->regmap, mux->data->clr_ofs,
+ BIT(mux->data->gate_shift));
+
+ ret = regmap_read_poll_timeout_atomic(mux->regmap, mux->data->fenc_sta_mon_ofs,
+ val, val & BIT(mux->data->fenc_shift), 1,
+ MTK_WAIT_FENC_DONE_US);
+
+ if (mux->lock)
+ spin_unlock_irqrestore(mux->lock, flags);
+ else
+ __release(mux->lock);
+
+ return ret;
+}
+
static int mtk_clk_mux_enable_setclr(struct clk_hw *hw)
{
struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
@@ -70,6 +102,16 @@ static void mtk_clk_mux_disable_setclr(struct clk_hw *hw)
BIT(mux->data->gate_shift));
}
+static int mtk_clk_mux_fenc_is_enabled(struct clk_hw *hw)
+{
+ struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
+ u32 val;
+
+ regmap_read(mux->regmap, mux->data->fenc_sta_mon_ofs, &val);
+
+ return !!(val & BIT(mux->data->fenc_shift));
+}
+
static int mtk_clk_mux_is_enabled(struct clk_hw *hw)
{
struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
@@ -80,6 +122,41 @@ static int mtk_clk_mux_is_enabled(struct clk_hw *hw)
return (val & BIT(mux->data->gate_shift)) == 0;
}
+static int mtk_clk_mux_hwv_fenc_enable(struct clk_hw *hw)
+{
+ struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
+ u32 val;
+ int ret;
+
+ regmap_write(mux->regmap_hwv, mux->data->hwv_set_ofs,
+ BIT(mux->data->gate_shift));
+
+ ret = regmap_read_poll_timeout_atomic(mux->regmap_hwv, mux->data->hwv_sta_ofs,
+ val, val & BIT(mux->data->gate_shift), 0,
+ MTK_WAIT_HWV_DONE_US);
+ if (ret)
+ return ret;
+
+ ret = regmap_read_poll_timeout_atomic(mux->regmap, mux->data->fenc_sta_mon_ofs,
+ val, val & BIT(mux->data->fenc_shift), 1,
+ MTK_WAIT_FENC_DONE_US);
+
+ return ret;
+}
+
+static void mtk_clk_mux_hwv_disable(struct clk_hw *hw)
+{
+ struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
+ u32 val;
+
+ regmap_write(mux->regmap_hwv, mux->data->hwv_clr_ofs,
+ BIT(mux->data->gate_shift));
+
+ regmap_read_poll_timeout_atomic(mux->regmap_hwv, mux->data->hwv_sta_ofs,
+ val, (val & BIT(mux->data->gate_shift)),
+ 0, MTK_WAIT_HWV_DONE_US);
+}
+
static u8 mtk_clk_mux_get_parent(struct clk_hw *hw)
{
struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
@@ -146,9 +223,15 @@ static int mtk_clk_mux_set_parent_setclr_lock(struct clk_hw *hw, u8 index)
static int mtk_clk_mux_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
- struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
+ return clk_mux_determine_rate_flags(hw, req, 0);
+}
+
+static bool mtk_clk_mux_uses_hwv(const struct clk_ops *ops)
+{
+ if (ops == &mtk_mux_gate_hwv_fenc_clr_set_upd_ops)
+ return true;
- return clk_mux_determine_rate_flags(hw, req, mux->data->flags);
+ return false;
}
const struct clk_ops mtk_mux_clr_set_upd_ops = {
@@ -168,9 +251,30 @@ const struct clk_ops mtk_mux_gate_clr_set_upd_ops = {
};
EXPORT_SYMBOL_GPL(mtk_mux_gate_clr_set_upd_ops);
+const struct clk_ops mtk_mux_gate_fenc_clr_set_upd_ops = {
+ .enable = mtk_clk_mux_fenc_enable_setclr,
+ .disable = mtk_clk_mux_disable_setclr,
+ .is_enabled = mtk_clk_mux_fenc_is_enabled,
+ .get_parent = mtk_clk_mux_get_parent,
+ .set_parent = mtk_clk_mux_set_parent_setclr_lock,
+ .determine_rate = mtk_clk_mux_determine_rate,
+};
+EXPORT_SYMBOL_GPL(mtk_mux_gate_fenc_clr_set_upd_ops);
+
+const struct clk_ops mtk_mux_gate_hwv_fenc_clr_set_upd_ops = {
+ .enable = mtk_clk_mux_hwv_fenc_enable,
+ .disable = mtk_clk_mux_hwv_disable,
+ .is_enabled = mtk_clk_mux_fenc_is_enabled,
+ .get_parent = mtk_clk_mux_get_parent,
+ .set_parent = mtk_clk_mux_set_parent_setclr_lock,
+ .determine_rate = mtk_clk_mux_determine_rate,
+};
+EXPORT_SYMBOL_GPL(mtk_mux_gate_hwv_fenc_clr_set_upd_ops);
+
static struct clk_hw *mtk_clk_register_mux(struct device *dev,
const struct mtk_mux *mux,
struct regmap *regmap,
+ struct regmap *regmap_hwv,
spinlock_t *lock)
{
struct mtk_clk_mux *clk_mux;
@@ -186,8 +290,13 @@ static struct clk_hw *mtk_clk_register_mux(struct device *dev,
init.parent_names = mux->parent_names;
init.num_parents = mux->num_parents;
init.ops = mux->ops;
+ if (mtk_clk_mux_uses_hwv(init.ops) && !regmap_hwv)
+ return dev_err_ptr_probe(
+ dev, -ENXIO,
+ "regmap not found for hardware voter clocks\n");
clk_mux->regmap = regmap;
+ clk_mux->regmap_hwv = regmap_hwv;
clk_mux->data = mux;
clk_mux->lock = lock;
clk_mux->hw.init = &init;
@@ -220,6 +329,7 @@ int mtk_clk_register_muxes(struct device *dev,
struct clk_hw_onecell_data *clk_data)
{
struct regmap *regmap;
+ struct regmap *regmap_hwv;
struct clk_hw *hw;
int i;
@@ -229,6 +339,12 @@ int mtk_clk_register_muxes(struct device *dev,
return PTR_ERR(regmap);
}
+ regmap_hwv = mtk_clk_get_hwv_regmap(node);
+ if (IS_ERR(regmap_hwv))
+ return dev_err_probe(
+ dev, PTR_ERR(regmap_hwv),
+ "Cannot find hardware voter regmap for %pOF\n", node);
+
for (i = 0; i < num; i++) {
const struct mtk_mux *mux = &muxes[i];
@@ -238,7 +354,7 @@ int mtk_clk_register_muxes(struct device *dev,
continue;
}
- hw = mtk_clk_register_mux(dev, mux, regmap, lock);
+ hw = mtk_clk_register_mux(dev, mux, regmap, regmap_hwv, lock);
if (IS_ERR(hw)) {
pr_err("Failed to register clk %s: %pe\n", mux->name,
diff --git a/drivers/clk/mediatek/clk-mux.h b/drivers/clk/mediatek/clk-mux.h
index 943ad1d7ce4b..151e56dcf884 100644
--- a/drivers/clk/mediatek/clk-mux.h
+++ b/drivers/clk/mediatek/clk-mux.h
@@ -29,10 +29,16 @@ struct mtk_mux {
u32 clr_ofs;
u32 upd_ofs;
+ u32 hwv_set_ofs;
+ u32 hwv_clr_ofs;
+ u32 hwv_sta_ofs;
+ u32 fenc_sta_mon_ofs;
+
u8 mux_shift;
u8 mux_width;
u8 gate_shift;
s8 upd_shift;
+ u8 fenc_shift;
const struct clk_ops *ops;
signed char num_parents;
@@ -77,6 +83,8 @@ struct mtk_mux {
extern const struct clk_ops mtk_mux_clr_set_upd_ops;
extern const struct clk_ops mtk_mux_gate_clr_set_upd_ops;
+extern const struct clk_ops mtk_mux_gate_fenc_clr_set_upd_ops;
+extern const struct clk_ops mtk_mux_gate_hwv_fenc_clr_set_upd_ops;
#define MUX_GATE_CLR_SET_UPD_FLAGS(_id, _name, _parents, _mux_ofs, \
_mux_set_ofs, _mux_clr_ofs, _shift, _width, \
@@ -118,6 +126,85 @@ extern const struct clk_ops mtk_mux_gate_clr_set_upd_ops;
0, _upd_ofs, _upd, CLK_SET_RATE_PARENT, \
mtk_mux_clr_set_upd_ops)
+#define MUX_GATE_HWV_FENC_CLR_SET_UPD_FLAGS(_id, _name, _parents, \
+ _mux_ofs, _mux_set_ofs, _mux_clr_ofs, \
+ _hwv_sta_ofs, _hwv_set_ofs, _hwv_clr_ofs, \
+ _shift, _width, _gate, _upd_ofs, _upd, \
+ _fenc_sta_mon_ofs, _fenc, _flags) { \
+ .id = _id, \
+ .name = _name, \
+ .mux_ofs = _mux_ofs, \
+ .set_ofs = _mux_set_ofs, \
+ .clr_ofs = _mux_clr_ofs, \
+ .hwv_sta_ofs = _hwv_sta_ofs, \
+ .hwv_set_ofs = _hwv_set_ofs, \
+ .hwv_clr_ofs = _hwv_clr_ofs, \
+ .upd_ofs = _upd_ofs, \
+ .fenc_sta_mon_ofs = _fenc_sta_mon_ofs, \
+ .mux_shift = _shift, \
+ .mux_width = _width, \
+ .gate_shift = _gate, \
+ .upd_shift = _upd, \
+ .fenc_shift = _fenc, \
+ .parent_names = _parents, \
+ .num_parents = ARRAY_SIZE(_parents), \
+ .flags = _flags, \
+ .ops = &mtk_mux_gate_hwv_fenc_clr_set_upd_ops, \
+ }
+
+#define MUX_GATE_HWV_FENC_CLR_SET_UPD(_id, _name, _parents, \
+ _mux_ofs, _mux_set_ofs, _mux_clr_ofs, \
+ _hwv_sta_ofs, _hwv_set_ofs, _hwv_clr_ofs, \
+ _shift, _width, _gate, _upd_ofs, _upd, \
+ _fenc_sta_mon_ofs, _fenc) \
+ MUX_GATE_HWV_FENC_CLR_SET_UPD_FLAGS(_id, _name, _parents, \
+ _mux_ofs, _mux_set_ofs, _mux_clr_ofs, \
+ _hwv_sta_ofs, _hwv_set_ofs, _hwv_clr_ofs, \
+ _shift, _width, _gate, _upd_ofs, _upd, \
+ _fenc_sta_mon_ofs, _fenc, 0)
+
+#define MUX_GATE_FENC_CLR_SET_UPD_FLAGS(_id, _name, _parents, _paridx, \
+ _num_parents, _mux_ofs, _mux_set_ofs, _mux_clr_ofs, \
+ _shift, _width, _gate, _upd_ofs, _upd, \
+ _fenc_sta_mon_ofs, _fenc, _flags) { \
+ .id = _id, \
+ .name = _name, \
+ .mux_ofs = _mux_ofs, \
+ .set_ofs = _mux_set_ofs, \
+ .clr_ofs = _mux_clr_ofs, \
+ .upd_ofs = _upd_ofs, \
+ .fenc_sta_mon_ofs = _fenc_sta_mon_ofs, \
+ .mux_shift = _shift, \
+ .mux_width = _width, \
+ .gate_shift = _gate, \
+ .upd_shift = _upd, \
+ .fenc_shift = _fenc, \
+ .parent_names = _parents, \
+ .parent_index = _paridx, \
+ .num_parents = _num_parents, \
+ .flags = _flags, \
+ .ops = &mtk_mux_gate_fenc_clr_set_upd_ops, \
+ }
+
+#define MUX_GATE_FENC_CLR_SET_UPD(_id, _name, _parents, \
+ _mux_ofs, _mux_set_ofs, _mux_clr_ofs, \
+ _shift, _width, _gate, _upd_ofs, _upd, \
+ _fenc_sta_mon_ofs, _fenc) \
+ MUX_GATE_FENC_CLR_SET_UPD_FLAGS(_id, _name, _parents, \
+ NULL, ARRAY_SIZE(_parents), _mux_ofs, \
+ _mux_set_ofs, _mux_clr_ofs, _shift, \
+ _width, _gate, _upd_ofs, _upd, \
+ _fenc_sta_mon_ofs, _fenc, 0)
+
+#define MUX_GATE_FENC_CLR_SET_UPD_INDEXED(_id, _name, _parents, _paridx, \
+ _mux_ofs, _mux_set_ofs, _mux_clr_ofs, \
+ _shift, _width, _gate, _upd_ofs, _upd, \
+ _fenc_sta_mon_ofs, _fenc) \
+ MUX_GATE_FENC_CLR_SET_UPD_FLAGS(_id, _name, _parents, _paridx, \
+ ARRAY_SIZE(_paridx), _mux_ofs, _mux_set_ofs, \
+ _mux_clr_ofs, _shift, _width, _gate, _upd_ofs, _upd, \
+ _fenc_sta_mon_ofs, _fenc, 0)
+
int mtk_clk_register_muxes(struct device *dev,
const struct mtk_mux *muxes,
int num, struct device_node *node,
diff --git a/drivers/clk/mediatek/clk-pll.c b/drivers/clk/mediatek/clk-pll.c
index ce453e1718e5..cd2b6ce551c6 100644
--- a/drivers/clk/mediatek/clk-pll.c
+++ b/drivers/clk/mediatek/clk-pll.c
@@ -37,6 +37,13 @@ int mtk_pll_is_prepared(struct clk_hw *hw)
return (readl(pll->en_addr) & BIT(pll->data->pll_en_bit)) != 0;
}
+static int mtk_pll_fenc_is_prepared(struct clk_hw *hw)
+{
+ struct mtk_clk_pll *pll = to_mtk_clk_pll(hw);
+
+ return !!(readl(pll->fenc_addr) & BIT(pll->data->fenc_sta_bit));
+}
+
static unsigned long __mtk_pll_recalc_rate(struct mtk_clk_pll *pll, u32 fin,
u32 pcw, int postdiv)
{
@@ -200,16 +207,19 @@ unsigned long mtk_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
return __mtk_pll_recalc_rate(pll, parent_rate, pcw, postdiv);
}
-long mtk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+int mtk_pll_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
{
struct mtk_clk_pll *pll = to_mtk_clk_pll(hw);
u32 pcw = 0;
int postdiv;
- mtk_pll_calc_values(pll, &pcw, &postdiv, rate, *prate);
+ mtk_pll_calc_values(pll, &pcw, &postdiv, req->rate,
+ req->best_parent_rate);
+
+ req->rate = __mtk_pll_recalc_rate(pll, req->best_parent_rate, pcw,
+ postdiv);
- return __mtk_pll_recalc_rate(pll, *prate, pcw, postdiv);
+ return 0;
}
int mtk_pll_prepare(struct clk_hw *hw)
@@ -274,14 +284,43 @@ void mtk_pll_unprepare(struct clk_hw *hw)
writel(r, pll->pwr_addr);
}
+static int mtk_pll_prepare_setclr(struct clk_hw *hw)
+{
+ struct mtk_clk_pll *pll = to_mtk_clk_pll(hw);
+
+ writel(BIT(pll->data->pll_en_bit), pll->en_set_addr);
+
+ /* Wait 20us after enable for the PLL to stabilize */
+ udelay(20);
+
+ return 0;
+}
+
+static void mtk_pll_unprepare_setclr(struct clk_hw *hw)
+{
+ struct mtk_clk_pll *pll = to_mtk_clk_pll(hw);
+
+ writel(BIT(pll->data->pll_en_bit), pll->en_clr_addr);
+}
+
const struct clk_ops mtk_pll_ops = {
.is_prepared = mtk_pll_is_prepared,
.prepare = mtk_pll_prepare,
.unprepare = mtk_pll_unprepare,
.recalc_rate = mtk_pll_recalc_rate,
- .round_rate = mtk_pll_round_rate,
+ .determine_rate = mtk_pll_determine_rate,
+ .set_rate = mtk_pll_set_rate,
+};
+
+const struct clk_ops mtk_pll_fenc_clr_set_ops = {
+ .is_prepared = mtk_pll_fenc_is_prepared,
+ .prepare = mtk_pll_prepare_setclr,
+ .unprepare = mtk_pll_unprepare_setclr,
+ .recalc_rate = mtk_pll_recalc_rate,
+ .determine_rate = mtk_pll_determine_rate,
.set_rate = mtk_pll_set_rate,
};
+EXPORT_SYMBOL_GPL(mtk_pll_fenc_clr_set_ops);
struct clk_hw *mtk_clk_register_pll_ops(struct mtk_clk_pll *pll,
const struct mtk_pll_data *data,
@@ -308,9 +347,15 @@ struct clk_hw *mtk_clk_register_pll_ops(struct mtk_clk_pll *pll,
pll->en_addr = base + data->en_reg;
else
pll->en_addr = pll->base_addr + REG_CON0;
+ if (data->en_set_reg)
+ pll->en_set_addr = base + data->en_set_reg;
+ if (data->en_clr_reg)
+ pll->en_clr_addr = base + data->en_clr_reg;
pll->hw.init = &init;
pll->data = data;
+ pll->fenc_addr = base + data->fenc_sta_ofs;
+
init.name = data->name;
init.flags = (data->flags & PLL_AO) ? CLK_IS_CRITICAL : 0;
init.ops = pll_ops;
@@ -333,12 +378,13 @@ struct clk_hw *mtk_clk_register_pll(const struct mtk_pll_data *data,
{
struct mtk_clk_pll *pll;
struct clk_hw *hw;
+ const struct clk_ops *pll_ops = data->ops ? data->ops : &mtk_pll_ops;
pll = kzalloc(sizeof(*pll), GFP_KERNEL);
if (!pll)
return ERR_PTR(-ENOMEM);
- hw = mtk_clk_register_pll_ops(pll, data, base, &mtk_pll_ops);
+ hw = mtk_clk_register_pll_ops(pll, data, base, pll_ops);
if (IS_ERR(hw))
kfree(pll);
diff --git a/drivers/clk/mediatek/clk-pll.h b/drivers/clk/mediatek/clk-pll.h
index 285c8db958b3..d71c150ce83e 100644
--- a/drivers/clk/mediatek/clk-pll.h
+++ b/drivers/clk/mediatek/clk-pll.h
@@ -29,6 +29,7 @@ struct mtk_pll_data {
u32 reg;
u32 pwr_reg;
u32 en_mask;
+ u32 fenc_sta_ofs;
u32 pd_reg;
u32 tuner_reg;
u32 tuner_en_reg;
@@ -47,8 +48,11 @@ struct mtk_pll_data {
const struct mtk_pll_div_table *div_table;
const char *parent_name;
u32 en_reg;
+ u32 en_set_reg;
+ u32 en_clr_reg;
u8 pll_en_bit; /* Assume 0, indicates BIT(0) by default */
u8 pcw_chg_bit;
+ u8 fenc_sta_bit;
};
/*
@@ -68,6 +72,9 @@ struct mtk_clk_pll {
void __iomem *pcw_addr;
void __iomem *pcw_chg_addr;
void __iomem *en_addr;
+ void __iomem *en_set_addr;
+ void __iomem *en_clr_addr;
+ void __iomem *fenc_addr;
const struct mtk_pll_data *data;
};
@@ -78,6 +85,7 @@ void mtk_clk_unregister_plls(const struct mtk_pll_data *plls, int num_plls,
struct clk_hw_onecell_data *clk_data);
extern const struct clk_ops mtk_pll_ops;
+extern const struct clk_ops mtk_pll_fenc_clr_set_ops;
static inline struct mtk_clk_pll *to_mtk_clk_pll(struct clk_hw *hw)
{
@@ -96,8 +104,7 @@ void mtk_pll_calc_values(struct mtk_clk_pll *pll, u32 *pcw, u32 *postdiv,
u32 freq, u32 fin);
int mtk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate);
-long mtk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate);
+int mtk_pll_determine_rate(struct clk_hw *hw, struct clk_rate_request *req);
struct clk_hw *mtk_clk_register_pll_ops(struct mtk_clk_pll *pll,
const struct mtk_pll_data *data,
diff --git a/drivers/clk/mediatek/clk-pllfh.c b/drivers/clk/mediatek/clk-pllfh.c
index 094ec8a26d66..83630ee07ee9 100644
--- a/drivers/clk/mediatek/clk-pllfh.c
+++ b/drivers/clk/mediatek/clk-pllfh.c
@@ -42,7 +42,7 @@ static const struct clk_ops mtk_pllfh_ops = {
.prepare = mtk_pll_prepare,
.unprepare = mtk_pll_unprepare,
.recalc_rate = mtk_pll_recalc_rate,
- .round_rate = mtk_pll_round_rate,
+ .determine_rate = mtk_pll_determine_rate,
.set_rate = mtk_fhctl_set_rate,
};
diff --git a/drivers/clk/meson/Kconfig b/drivers/clk/meson/Kconfig
index ff003dc5ab20..71481607a6d5 100644
--- a/drivers/clk/meson/Kconfig
+++ b/drivers/clk/meson/Kconfig
@@ -5,6 +5,7 @@ menu "Clock support for Amlogic platforms"
config COMMON_CLK_MESON_REGMAP
tristate
select REGMAP
+ select MFD_SYSCON
config COMMON_CLK_MESON_DUALDIV
tristate
@@ -35,6 +36,8 @@ config COMMON_CLK_MESON_VCLK
select COMMON_CLK_MESON_REGMAP
config COMMON_CLK_MESON_CLKC_UTILS
+ select REGMAP
+ select MFD_SYSCON
tristate
config COMMON_CLK_MESON_AO_CLKC
@@ -43,11 +46,6 @@ config COMMON_CLK_MESON_AO_CLKC
select COMMON_CLK_MESON_CLKC_UTILS
select RESET_CONTROLLER
-config COMMON_CLK_MESON_EE_CLKC
- tristate
- select COMMON_CLK_MESON_REGMAP
- select COMMON_CLK_MESON_CLKC_UTILS
-
config COMMON_CLK_MESON_CPU_DYNDIV
tristate
select COMMON_CLK_MESON_REGMAP
@@ -72,12 +70,12 @@ config COMMON_CLK_GXBB
depends on ARM64
default ARCH_MESON
select COMMON_CLK_MESON_REGMAP
+ select COMMON_CLK_MESON_CLKC_UTILS
select COMMON_CLK_MESON_DUALDIV
select COMMON_CLK_MESON_VID_PLL_DIV
select COMMON_CLK_MESON_MPLL
select COMMON_CLK_MESON_PLL
select COMMON_CLK_MESON_AO_CLKC
- select COMMON_CLK_MESON_EE_CLKC
select MFD_SYSCON
help
Support for the clock controller on AmLogic S905 devices, aka gxbb.
@@ -88,11 +86,11 @@ config COMMON_CLK_AXG
depends on ARM64
default ARCH_MESON
select COMMON_CLK_MESON_REGMAP
+ select COMMON_CLK_MESON_CLKC_UTILS
select COMMON_CLK_MESON_DUALDIV
select COMMON_CLK_MESON_MPLL
select COMMON_CLK_MESON_PLL
select COMMON_CLK_MESON_AO_CLKC
- select COMMON_CLK_MESON_EE_CLKC
select MFD_SYSCON
help
Support for the clock controller on AmLogic A113D devices, aka axg.
@@ -106,7 +104,8 @@ config COMMON_CLK_AXG_AUDIO
select COMMON_CLK_MESON_SCLK_DIV
select COMMON_CLK_MESON_CLKC_UTILS
select REGMAP_MMIO
- select RESET_CONTROLLER
+ select AUXILIARY_BUS
+ imply RESET_MESON_AUX
help
Support for the audio clock controller on AmLogic A113D devices,
aka axg, Say Y if you want audio subsystem to work.
@@ -165,11 +164,11 @@ config COMMON_CLK_G12A
depends on ARM64
default ARCH_MESON
select COMMON_CLK_MESON_REGMAP
+ select COMMON_CLK_MESON_CLKC_UTILS
select COMMON_CLK_MESON_DUALDIV
select COMMON_CLK_MESON_MPLL
select COMMON_CLK_MESON_PLL
select COMMON_CLK_MESON_AO_CLKC
- select COMMON_CLK_MESON_EE_CLKC
select COMMON_CLK_MESON_CPU_DYNDIV
select COMMON_CLK_MESON_VID_PLL_DIV
select COMMON_CLK_MESON_VCLK
diff --git a/drivers/clk/meson/Makefile b/drivers/clk/meson/Makefile
index bc56a47931c1..c6998e752c68 100644
--- a/drivers/clk/meson/Makefile
+++ b/drivers/clk/meson/Makefile
@@ -5,7 +5,6 @@ obj-$(CONFIG_COMMON_CLK_MESON_CLKC_UTILS) += meson-clkc-utils.o
obj-$(CONFIG_COMMON_CLK_MESON_AO_CLKC) += meson-aoclk.o
obj-$(CONFIG_COMMON_CLK_MESON_CPU_DYNDIV) += clk-cpu-dyndiv.o
obj-$(CONFIG_COMMON_CLK_MESON_DUALDIV) += clk-dualdiv.o
-obj-$(CONFIG_COMMON_CLK_MESON_EE_CLKC) += meson-eeclk.o
obj-$(CONFIG_COMMON_CLK_MESON_MPLL) += clk-mpll.o
obj-$(CONFIG_COMMON_CLK_MESON_PHASE) += clk-phase.o
obj-$(CONFIG_COMMON_CLK_MESON_PLL) += clk-pll.o
diff --git a/drivers/clk/meson/a1-peripherals.c b/drivers/clk/meson/a1-peripherals.c
index 36489e0f948a..5e0d58c01405 100644
--- a/drivers/clk/meson/a1-peripherals.c
+++ b/drivers/clk/meson/a1-peripherals.c
@@ -10,14 +10,43 @@
#include <linux/clk-provider.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
-#include "a1-peripherals.h"
#include "clk-dualdiv.h"
#include "clk-regmap.h"
#include "meson-clkc-utils.h"
#include <dt-bindings/clock/amlogic,a1-peripherals-clkc.h>
-static struct clk_regmap xtal_in = {
+#define SYS_OSCIN_CTRL 0x0
+#define RTC_BY_OSCIN_CTRL0 0x4
+#define RTC_BY_OSCIN_CTRL1 0x8
+#define RTC_CTRL 0xc
+#define SYS_CLK_CTRL0 0x10
+#define SYS_CLK_EN0 0x1c
+#define SYS_CLK_EN1 0x20
+#define AXI_CLK_EN 0x24
+#define DSPA_CLK_EN 0x28
+#define DSPB_CLK_EN 0x2c
+#define DSPA_CLK_CTRL0 0x30
+#define DSPB_CLK_CTRL0 0x34
+#define CLK12_24_CTRL 0x38
+#define GEN_CLK_CTRL 0x3c
+#define SAR_ADC_CLK_CTRL 0xc0
+#define PWM_CLK_AB_CTRL 0xc4
+#define PWM_CLK_CD_CTRL 0xc8
+#define PWM_CLK_EF_CTRL 0xcc
+#define SPICC_CLK_CTRL 0xd0
+#define TS_CLK_CTRL 0xd4
+#define SPIFC_CLK_CTRL 0xd8
+#define USB_BUSCLK_CTRL 0xdc
+#define SD_EMMC_CLK_CTRL 0xe0
+#define CECA_CLK_CTRL0 0xe4
+#define CECA_CLK_CTRL1 0xe8
+#define CECB_CLK_CTRL0 0xec
+#define CECB_CLK_CTRL1 0xf0
+#define PSRAM_CLK_CTRL 0xf4
+#define DMC_CLK_CTRL 0xf8
+
+static struct clk_regmap a1_xtal_in = {
.data = &(struct clk_regmap_gate_data){
.offset = SYS_OSCIN_CTRL,
.bit_idx = 0,
@@ -32,7 +61,7 @@ static struct clk_regmap xtal_in = {
},
};
-static struct clk_regmap fixpll_in = {
+static struct clk_regmap a1_fixpll_in = {
.data = &(struct clk_regmap_gate_data){
.offset = SYS_OSCIN_CTRL,
.bit_idx = 1,
@@ -47,7 +76,7 @@ static struct clk_regmap fixpll_in = {
},
};
-static struct clk_regmap usb_phy_in = {
+static struct clk_regmap a1_usb_phy_in = {
.data = &(struct clk_regmap_gate_data){
.offset = SYS_OSCIN_CTRL,
.bit_idx = 2,
@@ -62,7 +91,7 @@ static struct clk_regmap usb_phy_in = {
},
};
-static struct clk_regmap usb_ctrl_in = {
+static struct clk_regmap a1_usb_ctrl_in = {
.data = &(struct clk_regmap_gate_data){
.offset = SYS_OSCIN_CTRL,
.bit_idx = 3,
@@ -77,7 +106,7 @@ static struct clk_regmap usb_ctrl_in = {
},
};
-static struct clk_regmap hifipll_in = {
+static struct clk_regmap a1_hifipll_in = {
.data = &(struct clk_regmap_gate_data){
.offset = SYS_OSCIN_CTRL,
.bit_idx = 4,
@@ -92,7 +121,7 @@ static struct clk_regmap hifipll_in = {
},
};
-static struct clk_regmap syspll_in = {
+static struct clk_regmap a1_syspll_in = {
.data = &(struct clk_regmap_gate_data){
.offset = SYS_OSCIN_CTRL,
.bit_idx = 5,
@@ -107,7 +136,7 @@ static struct clk_regmap syspll_in = {
},
};
-static struct clk_regmap dds_in = {
+static struct clk_regmap a1_dds_in = {
.data = &(struct clk_regmap_gate_data){
.offset = SYS_OSCIN_CTRL,
.bit_idx = 6,
@@ -122,7 +151,7 @@ static struct clk_regmap dds_in = {
},
};
-static struct clk_regmap rtc_32k_in = {
+static struct clk_regmap a1_rtc_32k_in = {
.data = &(struct clk_regmap_gate_data){
.offset = RTC_BY_OSCIN_CTRL0,
.bit_idx = 31,
@@ -137,7 +166,7 @@ static struct clk_regmap rtc_32k_in = {
},
};
-static const struct meson_clk_dualdiv_param clk_32k_div_table[] = {
+static const struct meson_clk_dualdiv_param a1_32k_div_table[] = {
{
.dual = 1,
.n1 = 733,
@@ -148,7 +177,7 @@ static const struct meson_clk_dualdiv_param clk_32k_div_table[] = {
{}
};
-static struct clk_regmap rtc_32k_div = {
+static struct clk_regmap a1_rtc_32k_div = {
.data = &(struct meson_clk_dualdiv_data){
.n1 = {
.reg_off = RTC_BY_OSCIN_CTRL0,
@@ -175,19 +204,19 @@ static struct clk_regmap rtc_32k_div = {
.shift = 28,
.width = 1,
},
- .table = clk_32k_div_table,
+ .table = a1_32k_div_table,
},
.hw.init = &(struct clk_init_data){
.name = "rtc_32k_div",
.ops = &meson_clk_dualdiv_ops,
.parent_hws = (const struct clk_hw *[]) {
- &rtc_32k_in.hw
+ &a1_rtc_32k_in.hw
},
.num_parents = 1,
},
};
-static struct clk_regmap rtc_32k_xtal = {
+static struct clk_regmap a1_rtc_32k_xtal = {
.data = &(struct clk_regmap_gate_data){
.offset = RTC_BY_OSCIN_CTRL1,
.bit_idx = 24,
@@ -196,13 +225,13 @@ static struct clk_regmap rtc_32k_xtal = {
.name = "rtc_32k_xtal",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &rtc_32k_in.hw
+ &a1_rtc_32k_in.hw
},
.num_parents = 1,
},
};
-static struct clk_regmap rtc_32k_sel = {
+static struct clk_regmap a1_rtc_32k_sel = {
.data = &(struct clk_regmap_mux_data) {
.offset = RTC_CTRL,
.mask = 0x3,
@@ -213,15 +242,15 @@ static struct clk_regmap rtc_32k_sel = {
.name = "rtc_32k_sel",
.ops = &clk_regmap_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &rtc_32k_xtal.hw,
- &rtc_32k_div.hw,
+ &a1_rtc_32k_xtal.hw,
+ &a1_rtc_32k_div.hw,
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap rtc = {
+static struct clk_regmap a1_rtc = {
.data = &(struct clk_regmap_gate_data){
.offset = RTC_BY_OSCIN_CTRL0,
.bit_idx = 30,
@@ -230,38 +259,38 @@ static struct clk_regmap rtc = {
.name = "rtc",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &rtc_32k_sel.hw
+ &a1_rtc_32k_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static u32 mux_table_sys[] = { 0, 1, 2, 3, 7 };
-static const struct clk_parent_data sys_parents[] = {
+static u32 a1_sys_parents_val_table[] = { 0, 1, 2, 3, 7 };
+static const struct clk_parent_data a1_sys_parents[] = {
{ .fw_name = "xtal" },
{ .fw_name = "fclk_div2" },
{ .fw_name = "fclk_div3" },
{ .fw_name = "fclk_div5" },
- { .hw = &rtc.hw },
+ { .hw = &a1_rtc.hw },
};
-static struct clk_regmap sys_b_sel = {
+static struct clk_regmap a1_sys_b_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = SYS_CLK_CTRL0,
.mask = 0x7,
.shift = 26,
- .table = mux_table_sys,
+ .table = a1_sys_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "sys_b_sel",
.ops = &clk_regmap_mux_ro_ops,
- .parent_data = sys_parents,
- .num_parents = ARRAY_SIZE(sys_parents),
+ .parent_data = a1_sys_parents,
+ .num_parents = ARRAY_SIZE(a1_sys_parents),
},
};
-static struct clk_regmap sys_b_div = {
+static struct clk_regmap a1_sys_b_div = {
.data = &(struct clk_regmap_div_data){
.offset = SYS_CLK_CTRL0,
.shift = 16,
@@ -271,14 +300,14 @@ static struct clk_regmap sys_b_div = {
.name = "sys_b_div",
.ops = &clk_regmap_divider_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &sys_b_sel.hw
+ &a1_sys_b_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap sys_b = {
+static struct clk_regmap a1_sys_b = {
.data = &(struct clk_regmap_gate_data){
.offset = SYS_CLK_CTRL0,
.bit_idx = 29,
@@ -287,29 +316,29 @@ static struct clk_regmap sys_b = {
.name = "sys_b",
.ops = &clk_regmap_gate_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &sys_b_div.hw
+ &a1_sys_b_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap sys_a_sel = {
+static struct clk_regmap a1_sys_a_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = SYS_CLK_CTRL0,
.mask = 0x7,
.shift = 10,
- .table = mux_table_sys,
+ .table = a1_sys_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "sys_a_sel",
.ops = &clk_regmap_mux_ro_ops,
- .parent_data = sys_parents,
- .num_parents = ARRAY_SIZE(sys_parents),
+ .parent_data = a1_sys_parents,
+ .num_parents = ARRAY_SIZE(a1_sys_parents),
},
};
-static struct clk_regmap sys_a_div = {
+static struct clk_regmap a1_sys_a_div = {
.data = &(struct clk_regmap_div_data){
.offset = SYS_CLK_CTRL0,
.shift = 0,
@@ -319,14 +348,14 @@ static struct clk_regmap sys_a_div = {
.name = "sys_a_div",
.ops = &clk_regmap_divider_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &sys_a_sel.hw
+ &a1_sys_a_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap sys_a = {
+static struct clk_regmap a1_sys_a = {
.data = &(struct clk_regmap_gate_data){
.offset = SYS_CLK_CTRL0,
.bit_idx = 13,
@@ -335,14 +364,14 @@ static struct clk_regmap sys_a = {
.name = "sys_a",
.ops = &clk_regmap_gate_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &sys_a_div.hw
+ &a1_sys_a_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap sys = {
+static struct clk_regmap a1_sys = {
.data = &(struct clk_regmap_mux_data){
.offset = SYS_CLK_CTRL0,
.mask = 0x1,
@@ -352,8 +381,8 @@ static struct clk_regmap sys = {
.name = "sys",
.ops = &clk_regmap_mux_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &sys_a.hw,
- &sys_b.hw,
+ &a1_sys_a.hw,
+ &a1_sys_b.hw,
},
.num_parents = 2,
/*
@@ -369,32 +398,32 @@ static struct clk_regmap sys = {
},
};
-static u32 mux_table_dsp_ab[] = { 0, 1, 2, 3, 4, 7 };
-static const struct clk_parent_data dsp_ab_parent_data[] = {
+static u32 a1_dsp_parents_val_table[] = { 0, 1, 2, 3, 4, 7 };
+static const struct clk_parent_data a1_dsp_parents[] = {
{ .fw_name = "xtal", },
{ .fw_name = "fclk_div2", },
{ .fw_name = "fclk_div3", },
{ .fw_name = "fclk_div5", },
{ .fw_name = "hifi_pll", },
- { .hw = &rtc.hw },
+ { .hw = &a1_rtc.hw },
};
-static struct clk_regmap dspa_a_sel = {
+static struct clk_regmap a1_dspa_a_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = DSPA_CLK_CTRL0,
.mask = 0x7,
.shift = 10,
- .table = mux_table_dsp_ab,
+ .table = a1_dsp_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "dspa_a_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = dsp_ab_parent_data,
- .num_parents = ARRAY_SIZE(dsp_ab_parent_data),
+ .parent_data = a1_dsp_parents,
+ .num_parents = ARRAY_SIZE(a1_dsp_parents),
},
};
-static struct clk_regmap dspa_a_div = {
+static struct clk_regmap a1_dspa_a_div = {
.data = &(struct clk_regmap_div_data){
.offset = DSPA_CLK_CTRL0,
.shift = 0,
@@ -404,14 +433,14 @@ static struct clk_regmap dspa_a_div = {
.name = "dspa_a_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &dspa_a_sel.hw
+ &a1_dspa_a_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap dspa_a = {
+static struct clk_regmap a1_dspa_a = {
.data = &(struct clk_regmap_gate_data){
.offset = DSPA_CLK_CTRL0,
.bit_idx = 13,
@@ -420,29 +449,29 @@ static struct clk_regmap dspa_a = {
.name = "dspa_a",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &dspa_a_div.hw
+ &a1_dspa_a_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap dspa_b_sel = {
+static struct clk_regmap a1_dspa_b_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = DSPA_CLK_CTRL0,
.mask = 0x7,
.shift = 26,
- .table = mux_table_dsp_ab,
+ .table = a1_dsp_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "dspa_b_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = dsp_ab_parent_data,
- .num_parents = ARRAY_SIZE(dsp_ab_parent_data),
+ .parent_data = a1_dsp_parents,
+ .num_parents = ARRAY_SIZE(a1_dsp_parents),
},
};
-static struct clk_regmap dspa_b_div = {
+static struct clk_regmap a1_dspa_b_div = {
.data = &(struct clk_regmap_div_data){
.offset = DSPA_CLK_CTRL0,
.shift = 16,
@@ -452,14 +481,14 @@ static struct clk_regmap dspa_b_div = {
.name = "dspa_b_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &dspa_b_sel.hw
+ &a1_dspa_b_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap dspa_b = {
+static struct clk_regmap a1_dspa_b = {
.data = &(struct clk_regmap_gate_data){
.offset = DSPA_CLK_CTRL0,
.bit_idx = 29,
@@ -468,14 +497,14 @@ static struct clk_regmap dspa_b = {
.name = "dspa_b",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &dspa_b_div.hw
+ &a1_dspa_b_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap dspa_sel = {
+static struct clk_regmap a1_dspa_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = DSPA_CLK_CTRL0,
.mask = 0x1,
@@ -485,15 +514,15 @@ static struct clk_regmap dspa_sel = {
.name = "dspa_sel",
.ops = &clk_regmap_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &dspa_a.hw,
- &dspa_b.hw,
+ &a1_dspa_a.hw,
+ &a1_dspa_b.hw,
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap dspa_en = {
+static struct clk_regmap a1_dspa_en = {
.data = &(struct clk_regmap_gate_data){
.offset = DSPA_CLK_EN,
.bit_idx = 1,
@@ -502,14 +531,14 @@ static struct clk_regmap dspa_en = {
.name = "dspa_en",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &dspa_sel.hw
+ &a1_dspa_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap dspa_en_nic = {
+static struct clk_regmap a1_dspa_en_nic = {
.data = &(struct clk_regmap_gate_data){
.offset = DSPA_CLK_EN,
.bit_idx = 0,
@@ -518,29 +547,29 @@ static struct clk_regmap dspa_en_nic = {
.name = "dspa_en_nic",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &dspa_sel.hw
+ &a1_dspa_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap dspb_a_sel = {
+static struct clk_regmap a1_dspb_a_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = DSPB_CLK_CTRL0,
.mask = 0x7,
.shift = 10,
- .table = mux_table_dsp_ab,
+ .table = a1_dsp_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "dspb_a_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = dsp_ab_parent_data,
- .num_parents = ARRAY_SIZE(dsp_ab_parent_data),
+ .parent_data = a1_dsp_parents,
+ .num_parents = ARRAY_SIZE(a1_dsp_parents),
},
};
-static struct clk_regmap dspb_a_div = {
+static struct clk_regmap a1_dspb_a_div = {
.data = &(struct clk_regmap_div_data){
.offset = DSPB_CLK_CTRL0,
.shift = 0,
@@ -550,14 +579,14 @@ static struct clk_regmap dspb_a_div = {
.name = "dspb_a_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &dspb_a_sel.hw
+ &a1_dspb_a_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap dspb_a = {
+static struct clk_regmap a1_dspb_a = {
.data = &(struct clk_regmap_gate_data){
.offset = DSPB_CLK_CTRL0,
.bit_idx = 13,
@@ -566,29 +595,29 @@ static struct clk_regmap dspb_a = {
.name = "dspb_a",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &dspb_a_div.hw
+ &a1_dspb_a_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap dspb_b_sel = {
+static struct clk_regmap a1_dspb_b_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = DSPB_CLK_CTRL0,
.mask = 0x7,
.shift = 26,
- .table = mux_table_dsp_ab,
+ .table = a1_dsp_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "dspb_b_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = dsp_ab_parent_data,
- .num_parents = ARRAY_SIZE(dsp_ab_parent_data),
+ .parent_data = a1_dsp_parents,
+ .num_parents = ARRAY_SIZE(a1_dsp_parents),
},
};
-static struct clk_regmap dspb_b_div = {
+static struct clk_regmap a1_dspb_b_div = {
.data = &(struct clk_regmap_div_data){
.offset = DSPB_CLK_CTRL0,
.shift = 16,
@@ -598,14 +627,14 @@ static struct clk_regmap dspb_b_div = {
.name = "dspb_b_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &dspb_b_sel.hw
+ &a1_dspb_b_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap dspb_b = {
+static struct clk_regmap a1_dspb_b = {
.data = &(struct clk_regmap_gate_data){
.offset = DSPB_CLK_CTRL0,
.bit_idx = 29,
@@ -614,14 +643,14 @@ static struct clk_regmap dspb_b = {
.name = "dspb_b",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &dspb_b_div.hw
+ &a1_dspb_b_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap dspb_sel = {
+static struct clk_regmap a1_dspb_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = DSPB_CLK_CTRL0,
.mask = 0x1,
@@ -631,15 +660,15 @@ static struct clk_regmap dspb_sel = {
.name = "dspb_sel",
.ops = &clk_regmap_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &dspb_a.hw,
- &dspb_b.hw,
+ &a1_dspb_a.hw,
+ &a1_dspb_b.hw,
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap dspb_en = {
+static struct clk_regmap a1_dspb_en = {
.data = &(struct clk_regmap_gate_data){
.offset = DSPB_CLK_EN,
.bit_idx = 1,
@@ -648,14 +677,14 @@ static struct clk_regmap dspb_en = {
.name = "dspb_en",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &dspb_sel.hw
+ &a1_dspb_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap dspb_en_nic = {
+static struct clk_regmap a1_dspb_en_nic = {
.data = &(struct clk_regmap_gate_data){
.offset = DSPB_CLK_EN,
.bit_idx = 0,
@@ -664,14 +693,14 @@ static struct clk_regmap dspb_en_nic = {
.name = "dspb_en_nic",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &dspb_sel.hw
+ &a1_dspb_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap clk_24m = {
+static struct clk_regmap a1_24m = {
.data = &(struct clk_regmap_gate_data){
.offset = CLK12_24_CTRL,
.bit_idx = 11,
@@ -686,20 +715,20 @@ static struct clk_regmap clk_24m = {
},
};
-static struct clk_fixed_factor clk_24m_div2 = {
+static struct clk_fixed_factor a1_24m_div2 = {
.mult = 1,
.div = 2,
.hw.init = &(struct clk_init_data){
.name = "24m_div2",
.ops = &clk_fixed_factor_ops,
.parent_hws = (const struct clk_hw *[]) {
- &clk_24m.hw
+ &a1_24m.hw
},
.num_parents = 1,
},
};
-static struct clk_regmap clk_12m = {
+static struct clk_regmap a1_12m = {
.data = &(struct clk_regmap_gate_data){
.offset = CLK12_24_CTRL,
.bit_idx = 10,
@@ -708,13 +737,13 @@ static struct clk_regmap clk_12m = {
.name = "12m",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &clk_24m_div2.hw
+ &a1_24m_div2.hw
},
.num_parents = 1,
},
};
-static struct clk_regmap fclk_div2_divn_pre = {
+static struct clk_regmap a1_fclk_div2_divn_pre = {
.data = &(struct clk_regmap_div_data){
.offset = CLK12_24_CTRL,
.shift = 0,
@@ -730,7 +759,7 @@ static struct clk_regmap fclk_div2_divn_pre = {
},
};
-static struct clk_regmap fclk_div2_divn = {
+static struct clk_regmap a1_fclk_div2_divn = {
.data = &(struct clk_regmap_gate_data){
.offset = CLK12_24_CTRL,
.bit_idx = 12,
@@ -739,7 +768,7 @@ static struct clk_regmap fclk_div2_divn = {
.name = "fclk_div2_divn",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &fclk_div2_divn_pre.hw
+ &a1_fclk_div2_divn_pre.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -750,10 +779,10 @@ static struct clk_regmap fclk_div2_divn = {
* the index 2 is sys_pll_div16, it will be implemented in the CPU clock driver,
* the index 4 is the clock measurement source, it's not supported yet
*/
-static u32 gen_table[] = { 0, 1, 3, 5, 6, 7, 8 };
-static const struct clk_parent_data gen_parent_data[] = {
+static u32 a1_gen_parents_val_table[] = { 0, 1, 3, 5, 6, 7, 8 };
+static const struct clk_parent_data a1_gen_parents[] = {
{ .fw_name = "xtal", },
- { .hw = &rtc.hw },
+ { .hw = &a1_rtc.hw },
{ .fw_name = "hifi_pll", },
{ .fw_name = "fclk_div2", },
{ .fw_name = "fclk_div3", },
@@ -761,18 +790,18 @@ static const struct clk_parent_data gen_parent_data[] = {
{ .fw_name = "fclk_div7", },
};
-static struct clk_regmap gen_sel = {
+static struct clk_regmap a1_gen_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = GEN_CLK_CTRL,
.mask = 0xf,
.shift = 12,
- .table = gen_table,
+ .table = a1_gen_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "gen_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = gen_parent_data,
- .num_parents = ARRAY_SIZE(gen_parent_data),
+ .parent_data = a1_gen_parents,
+ .num_parents = ARRAY_SIZE(a1_gen_parents),
/*
* The GEN clock can be connected to an external pad, so it
* may be set up directly from the device tree. Additionally,
@@ -784,7 +813,7 @@ static struct clk_regmap gen_sel = {
},
};
-static struct clk_regmap gen_div = {
+static struct clk_regmap a1_gen_div = {
.data = &(struct clk_regmap_div_data){
.offset = GEN_CLK_CTRL,
.shift = 0,
@@ -794,14 +823,14 @@ static struct clk_regmap gen_div = {
.name = "gen_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &gen_sel.hw
+ &a1_gen_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap gen = {
+static struct clk_regmap a1_gen = {
.data = &(struct clk_regmap_gate_data){
.offset = GEN_CLK_CTRL,
.bit_idx = 11,
@@ -810,14 +839,14 @@ static struct clk_regmap gen = {
.name = "gen",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &gen_div.hw
+ &a1_gen_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap saradc_sel = {
+static struct clk_regmap a1_saradc_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = SAR_ADC_CLK_CTRL,
.mask = 0x1,
@@ -828,13 +857,13 @@ static struct clk_regmap saradc_sel = {
.ops = &clk_regmap_mux_ops,
.parent_data = (const struct clk_parent_data []) {
{ .fw_name = "xtal", },
- { .hw = &sys.hw, },
+ { .hw = &a1_sys.hw, },
},
.num_parents = 2,
},
};
-static struct clk_regmap saradc_div = {
+static struct clk_regmap a1_saradc_div = {
.data = &(struct clk_regmap_div_data){
.offset = SAR_ADC_CLK_CTRL,
.shift = 0,
@@ -844,14 +873,14 @@ static struct clk_regmap saradc_div = {
.name = "saradc_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &saradc_sel.hw
+ &a1_saradc_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap saradc = {
+static struct clk_regmap a1_saradc = {
.data = &(struct clk_regmap_gate_data){
.offset = SAR_ADC_CLK_CTRL,
.bit_idx = 8,
@@ -860,20 +889,20 @@ static struct clk_regmap saradc = {
.name = "saradc",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &saradc_div.hw
+ &a1_saradc_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static const struct clk_parent_data pwm_abcd_parents[] = {
+static const struct clk_parent_data a1_pwm_abcd_parents[] = {
{ .fw_name = "xtal", },
- { .hw = &sys.hw },
- { .hw = &rtc.hw },
+ { .hw = &a1_sys.hw },
+ { .hw = &a1_rtc.hw },
};
-static struct clk_regmap pwm_a_sel = {
+static struct clk_regmap a1_pwm_a_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = PWM_CLK_AB_CTRL,
.mask = 0x1,
@@ -882,12 +911,12 @@ static struct clk_regmap pwm_a_sel = {
.hw.init = &(struct clk_init_data){
.name = "pwm_a_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = pwm_abcd_parents,
- .num_parents = ARRAY_SIZE(pwm_abcd_parents),
+ .parent_data = a1_pwm_abcd_parents,
+ .num_parents = ARRAY_SIZE(a1_pwm_abcd_parents),
},
};
-static struct clk_regmap pwm_a_div = {
+static struct clk_regmap a1_pwm_a_div = {
.data = &(struct clk_regmap_div_data){
.offset = PWM_CLK_AB_CTRL,
.shift = 0,
@@ -897,14 +926,14 @@ static struct clk_regmap pwm_a_div = {
.name = "pwm_a_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &pwm_a_sel.hw
+ &a1_pwm_a_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap pwm_a = {
+static struct clk_regmap a1_pwm_a = {
.data = &(struct clk_regmap_gate_data){
.offset = PWM_CLK_AB_CTRL,
.bit_idx = 8,
@@ -913,14 +942,14 @@ static struct clk_regmap pwm_a = {
.name = "pwm_a",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &pwm_a_div.hw
+ &a1_pwm_a_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap pwm_b_sel = {
+static struct clk_regmap a1_pwm_b_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = PWM_CLK_AB_CTRL,
.mask = 0x1,
@@ -929,12 +958,12 @@ static struct clk_regmap pwm_b_sel = {
.hw.init = &(struct clk_init_data){
.name = "pwm_b_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = pwm_abcd_parents,
- .num_parents = ARRAY_SIZE(pwm_abcd_parents),
+ .parent_data = a1_pwm_abcd_parents,
+ .num_parents = ARRAY_SIZE(a1_pwm_abcd_parents),
},
};
-static struct clk_regmap pwm_b_div = {
+static struct clk_regmap a1_pwm_b_div = {
.data = &(struct clk_regmap_div_data){
.offset = PWM_CLK_AB_CTRL,
.shift = 16,
@@ -944,14 +973,14 @@ static struct clk_regmap pwm_b_div = {
.name = "pwm_b_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &pwm_b_sel.hw
+ &a1_pwm_b_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap pwm_b = {
+static struct clk_regmap a1_pwm_b = {
.data = &(struct clk_regmap_gate_data){
.offset = PWM_CLK_AB_CTRL,
.bit_idx = 24,
@@ -960,14 +989,14 @@ static struct clk_regmap pwm_b = {
.name = "pwm_b",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &pwm_b_div.hw
+ &a1_pwm_b_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap pwm_c_sel = {
+static struct clk_regmap a1_pwm_c_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = PWM_CLK_CD_CTRL,
.mask = 0x1,
@@ -976,12 +1005,12 @@ static struct clk_regmap pwm_c_sel = {
.hw.init = &(struct clk_init_data){
.name = "pwm_c_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = pwm_abcd_parents,
- .num_parents = ARRAY_SIZE(pwm_abcd_parents),
+ .parent_data = a1_pwm_abcd_parents,
+ .num_parents = ARRAY_SIZE(a1_pwm_abcd_parents),
},
};
-static struct clk_regmap pwm_c_div = {
+static struct clk_regmap a1_pwm_c_div = {
.data = &(struct clk_regmap_div_data){
.offset = PWM_CLK_CD_CTRL,
.shift = 0,
@@ -991,14 +1020,14 @@ static struct clk_regmap pwm_c_div = {
.name = "pwm_c_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &pwm_c_sel.hw
+ &a1_pwm_c_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap pwm_c = {
+static struct clk_regmap a1_pwm_c = {
.data = &(struct clk_regmap_gate_data){
.offset = PWM_CLK_CD_CTRL,
.bit_idx = 8,
@@ -1007,14 +1036,14 @@ static struct clk_regmap pwm_c = {
.name = "pwm_c",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &pwm_c_div.hw
+ &a1_pwm_c_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap pwm_d_sel = {
+static struct clk_regmap a1_pwm_d_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = PWM_CLK_CD_CTRL,
.mask = 0x1,
@@ -1023,12 +1052,12 @@ static struct clk_regmap pwm_d_sel = {
.hw.init = &(struct clk_init_data){
.name = "pwm_d_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = pwm_abcd_parents,
- .num_parents = ARRAY_SIZE(pwm_abcd_parents),
+ .parent_data = a1_pwm_abcd_parents,
+ .num_parents = ARRAY_SIZE(a1_pwm_abcd_parents),
},
};
-static struct clk_regmap pwm_d_div = {
+static struct clk_regmap a1_pwm_d_div = {
.data = &(struct clk_regmap_div_data){
.offset = PWM_CLK_CD_CTRL,
.shift = 16,
@@ -1038,14 +1067,14 @@ static struct clk_regmap pwm_d_div = {
.name = "pwm_d_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &pwm_d_sel.hw
+ &a1_pwm_d_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap pwm_d = {
+static struct clk_regmap a1_pwm_d = {
.data = &(struct clk_regmap_gate_data){
.offset = PWM_CLK_CD_CTRL,
.bit_idx = 24,
@@ -1054,21 +1083,21 @@ static struct clk_regmap pwm_d = {
.name = "pwm_d",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &pwm_d_div.hw
+ &a1_pwm_d_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static const struct clk_parent_data pwm_ef_parents[] = {
+static const struct clk_parent_data a1_pwm_ef_parents[] = {
{ .fw_name = "xtal", },
- { .hw = &sys.hw },
+ { .hw = &a1_sys.hw },
{ .fw_name = "fclk_div5", },
- { .hw = &rtc.hw },
+ { .hw = &a1_rtc.hw },
};
-static struct clk_regmap pwm_e_sel = {
+static struct clk_regmap a1_pwm_e_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = PWM_CLK_EF_CTRL,
.mask = 0x3,
@@ -1077,12 +1106,12 @@ static struct clk_regmap pwm_e_sel = {
.hw.init = &(struct clk_init_data){
.name = "pwm_e_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = pwm_ef_parents,
- .num_parents = ARRAY_SIZE(pwm_ef_parents),
+ .parent_data = a1_pwm_ef_parents,
+ .num_parents = ARRAY_SIZE(a1_pwm_ef_parents),
},
};
-static struct clk_regmap pwm_e_div = {
+static struct clk_regmap a1_pwm_e_div = {
.data = &(struct clk_regmap_div_data){
.offset = PWM_CLK_EF_CTRL,
.shift = 0,
@@ -1092,14 +1121,14 @@ static struct clk_regmap pwm_e_div = {
.name = "pwm_e_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &pwm_e_sel.hw
+ &a1_pwm_e_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap pwm_e = {
+static struct clk_regmap a1_pwm_e = {
.data = &(struct clk_regmap_gate_data){
.offset = PWM_CLK_EF_CTRL,
.bit_idx = 8,
@@ -1108,14 +1137,14 @@ static struct clk_regmap pwm_e = {
.name = "pwm_e",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &pwm_e_div.hw
+ &a1_pwm_e_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap pwm_f_sel = {
+static struct clk_regmap a1_pwm_f_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = PWM_CLK_EF_CTRL,
.mask = 0x3,
@@ -1124,12 +1153,12 @@ static struct clk_regmap pwm_f_sel = {
.hw.init = &(struct clk_init_data){
.name = "pwm_f_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = pwm_ef_parents,
- .num_parents = ARRAY_SIZE(pwm_ef_parents),
+ .parent_data = a1_pwm_ef_parents,
+ .num_parents = ARRAY_SIZE(a1_pwm_ef_parents),
},
};
-static struct clk_regmap pwm_f_div = {
+static struct clk_regmap a1_pwm_f_div = {
.data = &(struct clk_regmap_div_data){
.offset = PWM_CLK_EF_CTRL,
.shift = 16,
@@ -1139,14 +1168,14 @@ static struct clk_regmap pwm_f_div = {
.name = "pwm_f_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &pwm_f_sel.hw
+ &a1_pwm_f_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap pwm_f = {
+static struct clk_regmap a1_pwm_f = {
.data = &(struct clk_regmap_gate_data){
.offset = PWM_CLK_EF_CTRL,
.bit_idx = 24,
@@ -1155,7 +1184,7 @@ static struct clk_regmap pwm_f = {
.name = "pwm_f",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &pwm_f_div.hw
+ &a1_pwm_f_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1171,14 +1200,14 @@ static struct clk_regmap pwm_f = {
* --------------------|/
* 24M
*/
-static const struct clk_parent_data spicc_spifc_parents[] = {
+static const struct clk_parent_data a1_spi_parents[] = {
{ .fw_name = "fclk_div2"},
{ .fw_name = "fclk_div3"},
{ .fw_name = "fclk_div5"},
{ .fw_name = "hifi_pll" },
};
-static struct clk_regmap spicc_sel = {
+static struct clk_regmap a1_spicc_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = SPICC_CLK_CTRL,
.mask = 0x3,
@@ -1187,12 +1216,12 @@ static struct clk_regmap spicc_sel = {
.hw.init = &(struct clk_init_data){
.name = "spicc_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = spicc_spifc_parents,
- .num_parents = ARRAY_SIZE(spicc_spifc_parents),
+ .parent_data = a1_spi_parents,
+ .num_parents = ARRAY_SIZE(a1_spi_parents),
},
};
-static struct clk_regmap spicc_div = {
+static struct clk_regmap a1_spicc_div = {
.data = &(struct clk_regmap_div_data){
.offset = SPICC_CLK_CTRL,
.shift = 0,
@@ -1202,14 +1231,14 @@ static struct clk_regmap spicc_div = {
.name = "spicc_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &spicc_sel.hw
+ &a1_spicc_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap spicc_sel2 = {
+static struct clk_regmap a1_spicc_sel2 = {
.data = &(struct clk_regmap_mux_data){
.offset = SPICC_CLK_CTRL,
.mask = 0x1,
@@ -1219,7 +1248,7 @@ static struct clk_regmap spicc_sel2 = {
.name = "spicc_sel2",
.ops = &clk_regmap_mux_ops,
.parent_data = (const struct clk_parent_data []) {
- { .hw = &spicc_div.hw },
+ { .hw = &a1_spicc_div.hw },
{ .fw_name = "xtal", },
},
.num_parents = 2,
@@ -1227,7 +1256,7 @@ static struct clk_regmap spicc_sel2 = {
},
};
-static struct clk_regmap spicc = {
+static struct clk_regmap a1_spicc = {
.data = &(struct clk_regmap_gate_data){
.offset = SPICC_CLK_CTRL,
.bit_idx = 8,
@@ -1236,14 +1265,14 @@ static struct clk_regmap spicc = {
.name = "spicc",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &spicc_sel2.hw
+ &a1_spicc_sel2.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap ts_div = {
+static struct clk_regmap a1_ts_div = {
.data = &(struct clk_regmap_div_data){
.offset = TS_CLK_CTRL,
.shift = 0,
@@ -1259,7 +1288,7 @@ static struct clk_regmap ts_div = {
},
};
-static struct clk_regmap ts = {
+static struct clk_regmap a1_ts = {
.data = &(struct clk_regmap_gate_data){
.offset = TS_CLK_CTRL,
.bit_idx = 8,
@@ -1268,14 +1297,14 @@ static struct clk_regmap ts = {
.name = "ts",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &ts_div.hw
+ &a1_ts_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap spifc_sel = {
+static struct clk_regmap a1_spifc_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = SPIFC_CLK_CTRL,
.mask = 0x3,
@@ -1284,12 +1313,12 @@ static struct clk_regmap spifc_sel = {
.hw.init = &(struct clk_init_data){
.name = "spifc_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = spicc_spifc_parents,
- .num_parents = ARRAY_SIZE(spicc_spifc_parents),
+ .parent_data = a1_spi_parents,
+ .num_parents = ARRAY_SIZE(a1_spi_parents),
},
};
-static struct clk_regmap spifc_div = {
+static struct clk_regmap a1_spifc_div = {
.data = &(struct clk_regmap_div_data){
.offset = SPIFC_CLK_CTRL,
.shift = 0,
@@ -1299,14 +1328,14 @@ static struct clk_regmap spifc_div = {
.name = "spifc_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &spifc_sel.hw
+ &a1_spifc_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap spifc_sel2 = {
+static struct clk_regmap a1_spifc_sel2 = {
.data = &(struct clk_regmap_mux_data){
.offset = SPIFC_CLK_CTRL,
.mask = 0x1,
@@ -1316,7 +1345,7 @@ static struct clk_regmap spifc_sel2 = {
.name = "spifc_sel2",
.ops = &clk_regmap_mux_ops,
.parent_data = (const struct clk_parent_data []) {
- { .hw = &spifc_div.hw },
+ { .hw = &a1_spifc_div.hw },
{ .fw_name = "xtal", },
},
.num_parents = 2,
@@ -1324,7 +1353,7 @@ static struct clk_regmap spifc_sel2 = {
},
};
-static struct clk_regmap spifc = {
+static struct clk_regmap a1_spifc = {
.data = &(struct clk_regmap_gate_data){
.offset = SPIFC_CLK_CTRL,
.bit_idx = 8,
@@ -1333,21 +1362,21 @@ static struct clk_regmap spifc = {
.name = "spifc",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &spifc_sel2.hw
+ &a1_spifc_sel2.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static const struct clk_parent_data usb_bus_parents[] = {
+static const struct clk_parent_data a1_usb_bus_parents[] = {
{ .fw_name = "xtal", },
- { .hw = &sys.hw },
+ { .hw = &a1_sys.hw },
{ .fw_name = "fclk_div3", },
{ .fw_name = "fclk_div5", },
};
-static struct clk_regmap usb_bus_sel = {
+static struct clk_regmap a1_usb_bus_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = USB_BUSCLK_CTRL,
.mask = 0x3,
@@ -1356,13 +1385,13 @@ static struct clk_regmap usb_bus_sel = {
.hw.init = &(struct clk_init_data){
.name = "usb_bus_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = usb_bus_parents,
- .num_parents = ARRAY_SIZE(usb_bus_parents),
+ .parent_data = a1_usb_bus_parents,
+ .num_parents = ARRAY_SIZE(a1_usb_bus_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap usb_bus_div = {
+static struct clk_regmap a1_usb_bus_div = {
.data = &(struct clk_regmap_div_data){
.offset = USB_BUSCLK_CTRL,
.shift = 0,
@@ -1372,14 +1401,14 @@ static struct clk_regmap usb_bus_div = {
.name = "usb_bus_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &usb_bus_sel.hw
+ &a1_usb_bus_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap usb_bus = {
+static struct clk_regmap a1_usb_bus = {
.data = &(struct clk_regmap_gate_data){
.offset = USB_BUSCLK_CTRL,
.bit_idx = 8,
@@ -1388,21 +1417,21 @@ static struct clk_regmap usb_bus = {
.name = "usb_bus",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &usb_bus_div.hw
+ &a1_usb_bus_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static const struct clk_parent_data sd_emmc_psram_dmc_parents[] = {
+static const struct clk_parent_data a1_sd_emmc_parents[] = {
{ .fw_name = "fclk_div2", },
{ .fw_name = "fclk_div3", },
{ .fw_name = "fclk_div5", },
{ .fw_name = "hifi_pll", },
};
-static struct clk_regmap sd_emmc_sel = {
+static struct clk_regmap a1_sd_emmc_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = SD_EMMC_CLK_CTRL,
.mask = 0x3,
@@ -1411,12 +1440,12 @@ static struct clk_regmap sd_emmc_sel = {
.hw.init = &(struct clk_init_data){
.name = "sd_emmc_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = sd_emmc_psram_dmc_parents,
- .num_parents = ARRAY_SIZE(sd_emmc_psram_dmc_parents),
+ .parent_data = a1_sd_emmc_parents,
+ .num_parents = ARRAY_SIZE(a1_sd_emmc_parents),
},
};
-static struct clk_regmap sd_emmc_div = {
+static struct clk_regmap a1_sd_emmc_div = {
.data = &(struct clk_regmap_div_data){
.offset = SD_EMMC_CLK_CTRL,
.shift = 0,
@@ -1426,14 +1455,14 @@ static struct clk_regmap sd_emmc_div = {
.name = "sd_emmc_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &sd_emmc_sel.hw
+ &a1_sd_emmc_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap sd_emmc_sel2 = {
+static struct clk_regmap a1_sd_emmc_sel2 = {
.data = &(struct clk_regmap_mux_data){
.offset = SD_EMMC_CLK_CTRL,
.mask = 0x1,
@@ -1443,7 +1472,7 @@ static struct clk_regmap sd_emmc_sel2 = {
.name = "sd_emmc_sel2",
.ops = &clk_regmap_mux_ops,
.parent_data = (const struct clk_parent_data []) {
- { .hw = &sd_emmc_div.hw },
+ { .hw = &a1_sd_emmc_div.hw },
{ .fw_name = "xtal", },
},
.num_parents = 2,
@@ -1451,7 +1480,7 @@ static struct clk_regmap sd_emmc_sel2 = {
},
};
-static struct clk_regmap sd_emmc = {
+static struct clk_regmap a1_sd_emmc = {
.data = &(struct clk_regmap_gate_data){
.offset = SD_EMMC_CLK_CTRL,
.bit_idx = 8,
@@ -1460,14 +1489,14 @@ static struct clk_regmap sd_emmc = {
.name = "sd_emmc",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &sd_emmc_sel2.hw
+ &a1_sd_emmc_sel2.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap psram_sel = {
+static struct clk_regmap a1_psram_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = PSRAM_CLK_CTRL,
.mask = 0x3,
@@ -1476,12 +1505,12 @@ static struct clk_regmap psram_sel = {
.hw.init = &(struct clk_init_data){
.name = "psram_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = sd_emmc_psram_dmc_parents,
- .num_parents = ARRAY_SIZE(sd_emmc_psram_dmc_parents),
+ .parent_data = a1_sd_emmc_parents,
+ .num_parents = ARRAY_SIZE(a1_sd_emmc_parents),
},
};
-static struct clk_regmap psram_div = {
+static struct clk_regmap a1_psram_div = {
.data = &(struct clk_regmap_div_data){
.offset = PSRAM_CLK_CTRL,
.shift = 0,
@@ -1491,14 +1520,14 @@ static struct clk_regmap psram_div = {
.name = "psram_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &psram_sel.hw
+ &a1_psram_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap psram_sel2 = {
+static struct clk_regmap a1_psram_sel2 = {
.data = &(struct clk_regmap_mux_data){
.offset = PSRAM_CLK_CTRL,
.mask = 0x1,
@@ -1508,7 +1537,7 @@ static struct clk_regmap psram_sel2 = {
.name = "psram_sel2",
.ops = &clk_regmap_mux_ops,
.parent_data = (const struct clk_parent_data []) {
- { .hw = &psram_div.hw },
+ { .hw = &a1_psram_div.hw },
{ .fw_name = "xtal", },
},
.num_parents = 2,
@@ -1516,7 +1545,7 @@ static struct clk_regmap psram_sel2 = {
},
};
-static struct clk_regmap psram = {
+static struct clk_regmap a1_psram = {
.data = &(struct clk_regmap_gate_data){
.offset = PSRAM_CLK_CTRL,
.bit_idx = 8,
@@ -1525,14 +1554,14 @@ static struct clk_regmap psram = {
.name = "psram",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &psram_sel2.hw
+ &a1_psram_sel2.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap dmc_sel = {
+static struct clk_regmap a1_dmc_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = DMC_CLK_CTRL,
.mask = 0x3,
@@ -1541,12 +1570,12 @@ static struct clk_regmap dmc_sel = {
.hw.init = &(struct clk_init_data){
.name = "dmc_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = sd_emmc_psram_dmc_parents,
- .num_parents = ARRAY_SIZE(sd_emmc_psram_dmc_parents),
+ .parent_data = a1_sd_emmc_parents,
+ .num_parents = ARRAY_SIZE(a1_sd_emmc_parents),
},
};
-static struct clk_regmap dmc_div = {
+static struct clk_regmap a1_dmc_div = {
.data = &(struct clk_regmap_div_data){
.offset = DMC_CLK_CTRL,
.shift = 0,
@@ -1556,14 +1585,14 @@ static struct clk_regmap dmc_div = {
.name = "dmc_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &dmc_sel.hw
+ &a1_dmc_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap dmc_sel2 = {
+static struct clk_regmap a1_dmc_sel2 = {
.data = &(struct clk_regmap_mux_data){
.offset = DMC_CLK_CTRL,
.mask = 0x1,
@@ -1573,7 +1602,7 @@ static struct clk_regmap dmc_sel2 = {
.name = "dmc_sel2",
.ops = &clk_regmap_mux_ops,
.parent_data = (const struct clk_parent_data []) {
- { .hw = &dmc_div.hw },
+ { .hw = &a1_dmc_div.hw },
{ .fw_name = "xtal", },
},
.num_parents = 2,
@@ -1581,7 +1610,7 @@ static struct clk_regmap dmc_sel2 = {
},
};
-static struct clk_regmap dmc = {
+static struct clk_regmap a1_dmc = {
.data = &(struct clk_regmap_gate_data){
.offset = DMC_CLK_CTRL,
.bit_idx = 8,
@@ -1590,14 +1619,14 @@ static struct clk_regmap dmc = {
.name = "dmc",
.ops = &clk_regmap_gate_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &dmc_sel2.hw
+ &a1_dmc_sel2.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap ceca_32k_in = {
+static struct clk_regmap a1_ceca_32k_in = {
.data = &(struct clk_regmap_gate_data){
.offset = CECA_CLK_CTRL0,
.bit_idx = 31,
@@ -1612,7 +1641,7 @@ static struct clk_regmap ceca_32k_in = {
},
};
-static struct clk_regmap ceca_32k_div = {
+static struct clk_regmap a1_ceca_32k_div = {
.data = &(struct meson_clk_dualdiv_data){
.n1 = {
.reg_off = CECA_CLK_CTRL0,
@@ -1639,19 +1668,19 @@ static struct clk_regmap ceca_32k_div = {
.shift = 28,
.width = 1,
},
- .table = clk_32k_div_table,
+ .table = a1_32k_div_table,
},
.hw.init = &(struct clk_init_data){
.name = "ceca_32k_div",
.ops = &meson_clk_dualdiv_ops,
.parent_hws = (const struct clk_hw *[]) {
- &ceca_32k_in.hw
+ &a1_ceca_32k_in.hw
},
.num_parents = 1,
},
};
-static struct clk_regmap ceca_32k_sel_pre = {
+static struct clk_regmap a1_ceca_32k_sel_pre = {
.data = &(struct clk_regmap_mux_data) {
.offset = CECA_CLK_CTRL1,
.mask = 0x1,
@@ -1662,15 +1691,15 @@ static struct clk_regmap ceca_32k_sel_pre = {
.name = "ceca_32k_sel_pre",
.ops = &clk_regmap_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &ceca_32k_div.hw,
- &ceca_32k_in.hw,
+ &a1_ceca_32k_div.hw,
+ &a1_ceca_32k_in.hw,
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap ceca_32k_sel = {
+static struct clk_regmap a1_ceca_32k_sel = {
.data = &(struct clk_regmap_mux_data) {
.offset = CECA_CLK_CTRL1,
.mask = 0x1,
@@ -1681,14 +1710,14 @@ static struct clk_regmap ceca_32k_sel = {
.name = "ceca_32k_sel",
.ops = &clk_regmap_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &ceca_32k_sel_pre.hw,
- &rtc.hw,
+ &a1_ceca_32k_sel_pre.hw,
+ &a1_rtc.hw,
},
.num_parents = 2,
},
};
-static struct clk_regmap ceca_32k_out = {
+static struct clk_regmap a1_ceca_32k_out = {
.data = &(struct clk_regmap_gate_data){
.offset = CECA_CLK_CTRL0,
.bit_idx = 30,
@@ -1697,14 +1726,14 @@ static struct clk_regmap ceca_32k_out = {
.name = "ceca_32k_out",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &ceca_32k_sel.hw
+ &a1_ceca_32k_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap cecb_32k_in = {
+static struct clk_regmap a1_cecb_32k_in = {
.data = &(struct clk_regmap_gate_data){
.offset = CECB_CLK_CTRL0,
.bit_idx = 31,
@@ -1719,7 +1748,7 @@ static struct clk_regmap cecb_32k_in = {
},
};
-static struct clk_regmap cecb_32k_div = {
+static struct clk_regmap a1_cecb_32k_div = {
.data = &(struct meson_clk_dualdiv_data){
.n1 = {
.reg_off = CECB_CLK_CTRL0,
@@ -1746,19 +1775,19 @@ static struct clk_regmap cecb_32k_div = {
.shift = 28,
.width = 1,
},
- .table = clk_32k_div_table,
+ .table = a1_32k_div_table,
},
.hw.init = &(struct clk_init_data){
.name = "cecb_32k_div",
.ops = &meson_clk_dualdiv_ops,
.parent_hws = (const struct clk_hw *[]) {
- &cecb_32k_in.hw
+ &a1_cecb_32k_in.hw
},
.num_parents = 1,
},
};
-static struct clk_regmap cecb_32k_sel_pre = {
+static struct clk_regmap a1_cecb_32k_sel_pre = {
.data = &(struct clk_regmap_mux_data) {
.offset = CECB_CLK_CTRL1,
.mask = 0x1,
@@ -1769,15 +1798,15 @@ static struct clk_regmap cecb_32k_sel_pre = {
.name = "cecb_32k_sel_pre",
.ops = &clk_regmap_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &cecb_32k_div.hw,
- &cecb_32k_in.hw,
+ &a1_cecb_32k_div.hw,
+ &a1_cecb_32k_in.hw,
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap cecb_32k_sel = {
+static struct clk_regmap a1_cecb_32k_sel = {
.data = &(struct clk_regmap_mux_data) {
.offset = CECB_CLK_CTRL1,
.mask = 0x1,
@@ -1788,14 +1817,14 @@ static struct clk_regmap cecb_32k_sel = {
.name = "cecb_32k_sel",
.ops = &clk_regmap_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &cecb_32k_sel_pre.hw,
- &rtc.hw,
+ &a1_cecb_32k_sel_pre.hw,
+ &a1_rtc.hw,
},
.num_parents = 2,
},
};
-static struct clk_regmap cecb_32k_out = {
+static struct clk_regmap a1_cecb_32k_out = {
.data = &(struct clk_regmap_gate_data){
.offset = CECB_CLK_CTRL0,
.bit_idx = 30,
@@ -1804,443 +1833,265 @@ static struct clk_regmap cecb_32k_out = {
.name = "cecb_32k_out",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &cecb_32k_sel.hw
+ &a1_cecb_32k_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-#define MESON_GATE(_name, _reg, _bit) \
- MESON_PCLK(_name, _reg, _bit, &sys.hw)
-
-static MESON_GATE(clktree, SYS_CLK_EN0, 0);
-static MESON_GATE(reset_ctrl, SYS_CLK_EN0, 1);
-static MESON_GATE(analog_ctrl, SYS_CLK_EN0, 2);
-static MESON_GATE(pwr_ctrl, SYS_CLK_EN0, 3);
-static MESON_GATE(pad_ctrl, SYS_CLK_EN0, 4);
-static MESON_GATE(sys_ctrl, SYS_CLK_EN0, 5);
-static MESON_GATE(temp_sensor, SYS_CLK_EN0, 6);
-static MESON_GATE(am2axi_dev, SYS_CLK_EN0, 7);
-static MESON_GATE(spicc_b, SYS_CLK_EN0, 8);
-static MESON_GATE(spicc_a, SYS_CLK_EN0, 9);
-static MESON_GATE(msr, SYS_CLK_EN0, 10);
-static MESON_GATE(audio, SYS_CLK_EN0, 11);
-static MESON_GATE(jtag_ctrl, SYS_CLK_EN0, 12);
-static MESON_GATE(saradc_en, SYS_CLK_EN0, 13);
-static MESON_GATE(pwm_ef, SYS_CLK_EN0, 14);
-static MESON_GATE(pwm_cd, SYS_CLK_EN0, 15);
-static MESON_GATE(pwm_ab, SYS_CLK_EN0, 16);
-static MESON_GATE(cec, SYS_CLK_EN0, 17);
-static MESON_GATE(i2c_s, SYS_CLK_EN0, 18);
-static MESON_GATE(ir_ctrl, SYS_CLK_EN0, 19);
-static MESON_GATE(i2c_m_d, SYS_CLK_EN0, 20);
-static MESON_GATE(i2c_m_c, SYS_CLK_EN0, 21);
-static MESON_GATE(i2c_m_b, SYS_CLK_EN0, 22);
-static MESON_GATE(i2c_m_a, SYS_CLK_EN0, 23);
-static MESON_GATE(acodec, SYS_CLK_EN0, 24);
-static MESON_GATE(otp, SYS_CLK_EN0, 25);
-static MESON_GATE(sd_emmc_a, SYS_CLK_EN0, 26);
-static MESON_GATE(usb_phy, SYS_CLK_EN0, 27);
-static MESON_GATE(usb_ctrl, SYS_CLK_EN0, 28);
-static MESON_GATE(sys_dspb, SYS_CLK_EN0, 29);
-static MESON_GATE(sys_dspa, SYS_CLK_EN0, 30);
-static MESON_GATE(dma, SYS_CLK_EN0, 31);
-static MESON_GATE(irq_ctrl, SYS_CLK_EN1, 0);
-static MESON_GATE(nic, SYS_CLK_EN1, 1);
-static MESON_GATE(gic, SYS_CLK_EN1, 2);
-static MESON_GATE(uart_c, SYS_CLK_EN1, 3);
-static MESON_GATE(uart_b, SYS_CLK_EN1, 4);
-static MESON_GATE(uart_a, SYS_CLK_EN1, 5);
-static MESON_GATE(sys_psram, SYS_CLK_EN1, 6);
-static MESON_GATE(rsa, SYS_CLK_EN1, 8);
-static MESON_GATE(coresight, SYS_CLK_EN1, 9);
-static MESON_GATE(am2axi_vad, AXI_CLK_EN, 0);
-static MESON_GATE(audio_vad, AXI_CLK_EN, 1);
-static MESON_GATE(axi_dmc, AXI_CLK_EN, 3);
-static MESON_GATE(axi_psram, AXI_CLK_EN, 4);
-static MESON_GATE(ramb, AXI_CLK_EN, 5);
-static MESON_GATE(rama, AXI_CLK_EN, 6);
-static MESON_GATE(axi_spifc, AXI_CLK_EN, 7);
-static MESON_GATE(axi_nic, AXI_CLK_EN, 8);
-static MESON_GATE(axi_dma, AXI_CLK_EN, 9);
-static MESON_GATE(cpu_ctrl, AXI_CLK_EN, 10);
-static MESON_GATE(rom, AXI_CLK_EN, 11);
-static MESON_GATE(prod_i2c, AXI_CLK_EN, 12);
+static const struct clk_parent_data a1_pclk_parents = { .hw = &a1_sys.hw };
+
+#define A1_PCLK(_name, _reg, _bit, _flags) \
+ MESON_PCLK(a1_##_name, _reg, _bit, &a1_pclk_parents, _flags)
+
+/*
+ * NOTE: The gates below are marked with CLK_IGNORE_UNUSED for historic reasons
+ * Users are encouraged to test without it and submit changes to:
+ * - remove the flag if not necessary
+ * - replace the flag with something more adequate, such as CLK_IS_CRITICAL,
+ * if appropriate.
+ * - add a comment explaining why the use of CLK_IGNORE_UNUSED is desirable
+ * for a particular clock.
+ */
+static A1_PCLK(clktree, SYS_CLK_EN0, 0, CLK_IGNORE_UNUSED);
+static A1_PCLK(reset_ctrl, SYS_CLK_EN0, 1, CLK_IGNORE_UNUSED);
+static A1_PCLK(analog_ctrl, SYS_CLK_EN0, 2, CLK_IGNORE_UNUSED);
+static A1_PCLK(pwr_ctrl, SYS_CLK_EN0, 3, CLK_IGNORE_UNUSED);
+static A1_PCLK(pad_ctrl, SYS_CLK_EN0, 4, CLK_IGNORE_UNUSED);
+static A1_PCLK(sys_ctrl, SYS_CLK_EN0, 5, CLK_IGNORE_UNUSED);
+static A1_PCLK(temp_sensor, SYS_CLK_EN0, 6, CLK_IGNORE_UNUSED);
+static A1_PCLK(am2axi_dev, SYS_CLK_EN0, 7, CLK_IGNORE_UNUSED);
+static A1_PCLK(spicc_b, SYS_CLK_EN0, 8, CLK_IGNORE_UNUSED);
+static A1_PCLK(spicc_a, SYS_CLK_EN0, 9, CLK_IGNORE_UNUSED);
+static A1_PCLK(msr, SYS_CLK_EN0, 10, CLK_IGNORE_UNUSED);
+static A1_PCLK(audio, SYS_CLK_EN0, 11, CLK_IGNORE_UNUSED);
+static A1_PCLK(jtag_ctrl, SYS_CLK_EN0, 12, CLK_IGNORE_UNUSED);
+static A1_PCLK(saradc_en, SYS_CLK_EN0, 13, CLK_IGNORE_UNUSED);
+static A1_PCLK(pwm_ef, SYS_CLK_EN0, 14, CLK_IGNORE_UNUSED);
+static A1_PCLK(pwm_cd, SYS_CLK_EN0, 15, CLK_IGNORE_UNUSED);
+static A1_PCLK(pwm_ab, SYS_CLK_EN0, 16, CLK_IGNORE_UNUSED);
+static A1_PCLK(cec, SYS_CLK_EN0, 17, CLK_IGNORE_UNUSED);
+static A1_PCLK(i2c_s, SYS_CLK_EN0, 18, CLK_IGNORE_UNUSED);
+static A1_PCLK(ir_ctrl, SYS_CLK_EN0, 19, CLK_IGNORE_UNUSED);
+static A1_PCLK(i2c_m_d, SYS_CLK_EN0, 20, CLK_IGNORE_UNUSED);
+static A1_PCLK(i2c_m_c, SYS_CLK_EN0, 21, CLK_IGNORE_UNUSED);
+static A1_PCLK(i2c_m_b, SYS_CLK_EN0, 22, CLK_IGNORE_UNUSED);
+static A1_PCLK(i2c_m_a, SYS_CLK_EN0, 23, CLK_IGNORE_UNUSED);
+static A1_PCLK(acodec, SYS_CLK_EN0, 24, CLK_IGNORE_UNUSED);
+static A1_PCLK(otp, SYS_CLK_EN0, 25, CLK_IGNORE_UNUSED);
+static A1_PCLK(sd_emmc_a, SYS_CLK_EN0, 26, CLK_IGNORE_UNUSED);
+static A1_PCLK(usb_phy, SYS_CLK_EN0, 27, CLK_IGNORE_UNUSED);
+static A1_PCLK(usb_ctrl, SYS_CLK_EN0, 28, CLK_IGNORE_UNUSED);
+static A1_PCLK(sys_dspb, SYS_CLK_EN0, 29, CLK_IGNORE_UNUSED);
+static A1_PCLK(sys_dspa, SYS_CLK_EN0, 30, CLK_IGNORE_UNUSED);
+static A1_PCLK(dma, SYS_CLK_EN0, 31, CLK_IGNORE_UNUSED);
+
+static A1_PCLK(irq_ctrl, SYS_CLK_EN1, 0, CLK_IGNORE_UNUSED);
+static A1_PCLK(nic, SYS_CLK_EN1, 1, CLK_IGNORE_UNUSED);
+static A1_PCLK(gic, SYS_CLK_EN1, 2, CLK_IGNORE_UNUSED);
+static A1_PCLK(uart_c, SYS_CLK_EN1, 3, CLK_IGNORE_UNUSED);
+static A1_PCLK(uart_b, SYS_CLK_EN1, 4, CLK_IGNORE_UNUSED);
+static A1_PCLK(uart_a, SYS_CLK_EN1, 5, CLK_IGNORE_UNUSED);
+static A1_PCLK(sys_psram, SYS_CLK_EN1, 6, CLK_IGNORE_UNUSED);
+static A1_PCLK(rsa, SYS_CLK_EN1, 8, CLK_IGNORE_UNUSED);
+static A1_PCLK(coresight, SYS_CLK_EN1, 9, CLK_IGNORE_UNUSED);
+
+static A1_PCLK(am2axi_vad, AXI_CLK_EN, 0, CLK_IGNORE_UNUSED);
+static A1_PCLK(audio_vad, AXI_CLK_EN, 1, CLK_IGNORE_UNUSED);
+static A1_PCLK(axi_dmc, AXI_CLK_EN, 3, CLK_IGNORE_UNUSED);
+static A1_PCLK(axi_psram, AXI_CLK_EN, 4, CLK_IGNORE_UNUSED);
+static A1_PCLK(ramb, AXI_CLK_EN, 5, CLK_IGNORE_UNUSED);
+static A1_PCLK(rama, AXI_CLK_EN, 6, CLK_IGNORE_UNUSED);
+static A1_PCLK(axi_spifc, AXI_CLK_EN, 7, CLK_IGNORE_UNUSED);
+static A1_PCLK(axi_nic, AXI_CLK_EN, 8, CLK_IGNORE_UNUSED);
+static A1_PCLK(axi_dma, AXI_CLK_EN, 9, CLK_IGNORE_UNUSED);
+static A1_PCLK(cpu_ctrl, AXI_CLK_EN, 10, CLK_IGNORE_UNUSED);
+static A1_PCLK(rom, AXI_CLK_EN, 11, CLK_IGNORE_UNUSED);
+static A1_PCLK(prod_i2c, AXI_CLK_EN, 12, CLK_IGNORE_UNUSED);
/* Array of all clocks registered by this provider */
-static struct clk_hw *a1_periphs_hw_clks[] = {
- [CLKID_XTAL_IN] = &xtal_in.hw,
- [CLKID_FIXPLL_IN] = &fixpll_in.hw,
- [CLKID_USB_PHY_IN] = &usb_phy_in.hw,
- [CLKID_USB_CTRL_IN] = &usb_ctrl_in.hw,
- [CLKID_HIFIPLL_IN] = &hifipll_in.hw,
- [CLKID_SYSPLL_IN] = &syspll_in.hw,
- [CLKID_DDS_IN] = &dds_in.hw,
- [CLKID_SYS] = &sys.hw,
- [CLKID_CLKTREE] = &clktree.hw,
- [CLKID_RESET_CTRL] = &reset_ctrl.hw,
- [CLKID_ANALOG_CTRL] = &analog_ctrl.hw,
- [CLKID_PWR_CTRL] = &pwr_ctrl.hw,
- [CLKID_PAD_CTRL] = &pad_ctrl.hw,
- [CLKID_SYS_CTRL] = &sys_ctrl.hw,
- [CLKID_TEMP_SENSOR] = &temp_sensor.hw,
- [CLKID_AM2AXI_DIV] = &am2axi_dev.hw,
- [CLKID_SPICC_B] = &spicc_b.hw,
- [CLKID_SPICC_A] = &spicc_a.hw,
- [CLKID_MSR] = &msr.hw,
- [CLKID_AUDIO] = &audio.hw,
- [CLKID_JTAG_CTRL] = &jtag_ctrl.hw,
- [CLKID_SARADC_EN] = &saradc_en.hw,
- [CLKID_PWM_EF] = &pwm_ef.hw,
- [CLKID_PWM_CD] = &pwm_cd.hw,
- [CLKID_PWM_AB] = &pwm_ab.hw,
- [CLKID_CEC] = &cec.hw,
- [CLKID_I2C_S] = &i2c_s.hw,
- [CLKID_IR_CTRL] = &ir_ctrl.hw,
- [CLKID_I2C_M_D] = &i2c_m_d.hw,
- [CLKID_I2C_M_C] = &i2c_m_c.hw,
- [CLKID_I2C_M_B] = &i2c_m_b.hw,
- [CLKID_I2C_M_A] = &i2c_m_a.hw,
- [CLKID_ACODEC] = &acodec.hw,
- [CLKID_OTP] = &otp.hw,
- [CLKID_SD_EMMC_A] = &sd_emmc_a.hw,
- [CLKID_USB_PHY] = &usb_phy.hw,
- [CLKID_USB_CTRL] = &usb_ctrl.hw,
- [CLKID_SYS_DSPB] = &sys_dspb.hw,
- [CLKID_SYS_DSPA] = &sys_dspa.hw,
- [CLKID_DMA] = &dma.hw,
- [CLKID_IRQ_CTRL] = &irq_ctrl.hw,
- [CLKID_NIC] = &nic.hw,
- [CLKID_GIC] = &gic.hw,
- [CLKID_UART_C] = &uart_c.hw,
- [CLKID_UART_B] = &uart_b.hw,
- [CLKID_UART_A] = &uart_a.hw,
- [CLKID_SYS_PSRAM] = &sys_psram.hw,
- [CLKID_RSA] = &rsa.hw,
- [CLKID_CORESIGHT] = &coresight.hw,
- [CLKID_AM2AXI_VAD] = &am2axi_vad.hw,
- [CLKID_AUDIO_VAD] = &audio_vad.hw,
- [CLKID_AXI_DMC] = &axi_dmc.hw,
- [CLKID_AXI_PSRAM] = &axi_psram.hw,
- [CLKID_RAMB] = &ramb.hw,
- [CLKID_RAMA] = &rama.hw,
- [CLKID_AXI_SPIFC] = &axi_spifc.hw,
- [CLKID_AXI_NIC] = &axi_nic.hw,
- [CLKID_AXI_DMA] = &axi_dma.hw,
- [CLKID_CPU_CTRL] = &cpu_ctrl.hw,
- [CLKID_ROM] = &rom.hw,
- [CLKID_PROC_I2C] = &prod_i2c.hw,
- [CLKID_DSPA_SEL] = &dspa_sel.hw,
- [CLKID_DSPB_SEL] = &dspb_sel.hw,
- [CLKID_DSPA_EN] = &dspa_en.hw,
- [CLKID_DSPA_EN_NIC] = &dspa_en_nic.hw,
- [CLKID_DSPB_EN] = &dspb_en.hw,
- [CLKID_DSPB_EN_NIC] = &dspb_en_nic.hw,
- [CLKID_RTC] = &rtc.hw,
- [CLKID_CECA_32K] = &ceca_32k_out.hw,
- [CLKID_CECB_32K] = &cecb_32k_out.hw,
- [CLKID_24M] = &clk_24m.hw,
- [CLKID_12M] = &clk_12m.hw,
- [CLKID_FCLK_DIV2_DIVN] = &fclk_div2_divn.hw,
- [CLKID_GEN] = &gen.hw,
- [CLKID_SARADC_SEL] = &saradc_sel.hw,
- [CLKID_SARADC] = &saradc.hw,
- [CLKID_PWM_A] = &pwm_a.hw,
- [CLKID_PWM_B] = &pwm_b.hw,
- [CLKID_PWM_C] = &pwm_c.hw,
- [CLKID_PWM_D] = &pwm_d.hw,
- [CLKID_PWM_E] = &pwm_e.hw,
- [CLKID_PWM_F] = &pwm_f.hw,
- [CLKID_SPICC] = &spicc.hw,
- [CLKID_TS] = &ts.hw,
- [CLKID_SPIFC] = &spifc.hw,
- [CLKID_USB_BUS] = &usb_bus.hw,
- [CLKID_SD_EMMC] = &sd_emmc.hw,
- [CLKID_PSRAM] = &psram.hw,
- [CLKID_DMC] = &dmc.hw,
- [CLKID_SYS_A_SEL] = &sys_a_sel.hw,
- [CLKID_SYS_A_DIV] = &sys_a_div.hw,
- [CLKID_SYS_A] = &sys_a.hw,
- [CLKID_SYS_B_SEL] = &sys_b_sel.hw,
- [CLKID_SYS_B_DIV] = &sys_b_div.hw,
- [CLKID_SYS_B] = &sys_b.hw,
- [CLKID_DSPA_A_SEL] = &dspa_a_sel.hw,
- [CLKID_DSPA_A_DIV] = &dspa_a_div.hw,
- [CLKID_DSPA_A] = &dspa_a.hw,
- [CLKID_DSPA_B_SEL] = &dspa_b_sel.hw,
- [CLKID_DSPA_B_DIV] = &dspa_b_div.hw,
- [CLKID_DSPA_B] = &dspa_b.hw,
- [CLKID_DSPB_A_SEL] = &dspb_a_sel.hw,
- [CLKID_DSPB_A_DIV] = &dspb_a_div.hw,
- [CLKID_DSPB_A] = &dspb_a.hw,
- [CLKID_DSPB_B_SEL] = &dspb_b_sel.hw,
- [CLKID_DSPB_B_DIV] = &dspb_b_div.hw,
- [CLKID_DSPB_B] = &dspb_b.hw,
- [CLKID_RTC_32K_IN] = &rtc_32k_in.hw,
- [CLKID_RTC_32K_DIV] = &rtc_32k_div.hw,
- [CLKID_RTC_32K_XTAL] = &rtc_32k_xtal.hw,
- [CLKID_RTC_32K_SEL] = &rtc_32k_sel.hw,
- [CLKID_CECB_32K_IN] = &cecb_32k_in.hw,
- [CLKID_CECB_32K_DIV] = &cecb_32k_div.hw,
- [CLKID_CECB_32K_SEL_PRE] = &cecb_32k_sel_pre.hw,
- [CLKID_CECB_32K_SEL] = &cecb_32k_sel.hw,
- [CLKID_CECA_32K_IN] = &ceca_32k_in.hw,
- [CLKID_CECA_32K_DIV] = &ceca_32k_div.hw,
- [CLKID_CECA_32K_SEL_PRE] = &ceca_32k_sel_pre.hw,
- [CLKID_CECA_32K_SEL] = &ceca_32k_sel.hw,
- [CLKID_DIV2_PRE] = &fclk_div2_divn_pre.hw,
- [CLKID_24M_DIV2] = &clk_24m_div2.hw,
- [CLKID_GEN_SEL] = &gen_sel.hw,
- [CLKID_GEN_DIV] = &gen_div.hw,
- [CLKID_SARADC_DIV] = &saradc_div.hw,
- [CLKID_PWM_A_SEL] = &pwm_a_sel.hw,
- [CLKID_PWM_A_DIV] = &pwm_a_div.hw,
- [CLKID_PWM_B_SEL] = &pwm_b_sel.hw,
- [CLKID_PWM_B_DIV] = &pwm_b_div.hw,
- [CLKID_PWM_C_SEL] = &pwm_c_sel.hw,
- [CLKID_PWM_C_DIV] = &pwm_c_div.hw,
- [CLKID_PWM_D_SEL] = &pwm_d_sel.hw,
- [CLKID_PWM_D_DIV] = &pwm_d_div.hw,
- [CLKID_PWM_E_SEL] = &pwm_e_sel.hw,
- [CLKID_PWM_E_DIV] = &pwm_e_div.hw,
- [CLKID_PWM_F_SEL] = &pwm_f_sel.hw,
- [CLKID_PWM_F_DIV] = &pwm_f_div.hw,
- [CLKID_SPICC_SEL] = &spicc_sel.hw,
- [CLKID_SPICC_DIV] = &spicc_div.hw,
- [CLKID_SPICC_SEL2] = &spicc_sel2.hw,
- [CLKID_TS_DIV] = &ts_div.hw,
- [CLKID_SPIFC_SEL] = &spifc_sel.hw,
- [CLKID_SPIFC_DIV] = &spifc_div.hw,
- [CLKID_SPIFC_SEL2] = &spifc_sel2.hw,
- [CLKID_USB_BUS_SEL] = &usb_bus_sel.hw,
- [CLKID_USB_BUS_DIV] = &usb_bus_div.hw,
- [CLKID_SD_EMMC_SEL] = &sd_emmc_sel.hw,
- [CLKID_SD_EMMC_DIV] = &sd_emmc_div.hw,
- [CLKID_SD_EMMC_SEL2] = &sd_emmc_sel2.hw,
- [CLKID_PSRAM_SEL] = &psram_sel.hw,
- [CLKID_PSRAM_DIV] = &psram_div.hw,
- [CLKID_PSRAM_SEL2] = &psram_sel2.hw,
- [CLKID_DMC_SEL] = &dmc_sel.hw,
- [CLKID_DMC_DIV] = &dmc_div.hw,
- [CLKID_DMC_SEL2] = &dmc_sel2.hw,
-};
-
-/* Convenience table to populate regmap in .probe */
-static struct clk_regmap *const a1_periphs_regmaps[] = {
- &xtal_in,
- &fixpll_in,
- &usb_phy_in,
- &usb_ctrl_in,
- &hifipll_in,
- &syspll_in,
- &dds_in,
- &sys,
- &clktree,
- &reset_ctrl,
- &analog_ctrl,
- &pwr_ctrl,
- &pad_ctrl,
- &sys_ctrl,
- &temp_sensor,
- &am2axi_dev,
- &spicc_b,
- &spicc_a,
- &msr,
- &audio,
- &jtag_ctrl,
- &saradc_en,
- &pwm_ef,
- &pwm_cd,
- &pwm_ab,
- &cec,
- &i2c_s,
- &ir_ctrl,
- &i2c_m_d,
- &i2c_m_c,
- &i2c_m_b,
- &i2c_m_a,
- &acodec,
- &otp,
- &sd_emmc_a,
- &usb_phy,
- &usb_ctrl,
- &sys_dspb,
- &sys_dspa,
- &dma,
- &irq_ctrl,
- &nic,
- &gic,
- &uart_c,
- &uart_b,
- &uart_a,
- &sys_psram,
- &rsa,
- &coresight,
- &am2axi_vad,
- &audio_vad,
- &axi_dmc,
- &axi_psram,
- &ramb,
- &rama,
- &axi_spifc,
- &axi_nic,
- &axi_dma,
- &cpu_ctrl,
- &rom,
- &prod_i2c,
- &dspa_sel,
- &dspb_sel,
- &dspa_en,
- &dspa_en_nic,
- &dspb_en,
- &dspb_en_nic,
- &rtc,
- &ceca_32k_out,
- &cecb_32k_out,
- &clk_24m,
- &clk_12m,
- &fclk_div2_divn,
- &gen,
- &saradc_sel,
- &saradc,
- &pwm_a,
- &pwm_b,
- &pwm_c,
- &pwm_d,
- &pwm_e,
- &pwm_f,
- &spicc,
- &ts,
- &spifc,
- &usb_bus,
- &sd_emmc,
- &psram,
- &dmc,
- &sys_a_sel,
- &sys_a_div,
- &sys_a,
- &sys_b_sel,
- &sys_b_div,
- &sys_b,
- &dspa_a_sel,
- &dspa_a_div,
- &dspa_a,
- &dspa_b_sel,
- &dspa_b_div,
- &dspa_b,
- &dspb_a_sel,
- &dspb_a_div,
- &dspb_a,
- &dspb_b_sel,
- &dspb_b_div,
- &dspb_b,
- &rtc_32k_in,
- &rtc_32k_div,
- &rtc_32k_xtal,
- &rtc_32k_sel,
- &cecb_32k_in,
- &cecb_32k_div,
- &cecb_32k_sel_pre,
- &cecb_32k_sel,
- &ceca_32k_in,
- &ceca_32k_div,
- &ceca_32k_sel_pre,
- &ceca_32k_sel,
- &fclk_div2_divn_pre,
- &gen_sel,
- &gen_div,
- &saradc_div,
- &pwm_a_sel,
- &pwm_a_div,
- &pwm_b_sel,
- &pwm_b_div,
- &pwm_c_sel,
- &pwm_c_div,
- &pwm_d_sel,
- &pwm_d_div,
- &pwm_e_sel,
- &pwm_e_div,
- &pwm_f_sel,
- &pwm_f_div,
- &spicc_sel,
- &spicc_div,
- &spicc_sel2,
- &ts_div,
- &spifc_sel,
- &spifc_div,
- &spifc_sel2,
- &usb_bus_sel,
- &usb_bus_div,
- &sd_emmc_sel,
- &sd_emmc_div,
- &sd_emmc_sel2,
- &psram_sel,
- &psram_div,
- &psram_sel2,
- &dmc_sel,
- &dmc_div,
- &dmc_sel2,
-};
-
-static const struct regmap_config a1_periphs_regmap_cfg = {
- .reg_bits = 32,
- .val_bits = 32,
- .reg_stride = 4,
- .max_register = DMC_CLK_CTRL,
-};
-
-static struct meson_clk_hw_data a1_periphs_clks = {
- .hws = a1_periphs_hw_clks,
- .num = ARRAY_SIZE(a1_periphs_hw_clks),
-};
-
-static int meson_a1_periphs_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- void __iomem *base;
- struct regmap *map;
- int clkid, i, err;
-
- base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(base))
- return dev_err_probe(dev, PTR_ERR(base),
- "can't ioremap resource\n");
-
- map = devm_regmap_init_mmio(dev, base, &a1_periphs_regmap_cfg);
- if (IS_ERR(map))
- return dev_err_probe(dev, PTR_ERR(map),
- "can't init regmap mmio region\n");
-
- /* Populate regmap for the regmap backed clocks */
- for (i = 0; i < ARRAY_SIZE(a1_periphs_regmaps); i++)
- a1_periphs_regmaps[i]->map = map;
-
- for (clkid = 0; clkid < a1_periphs_clks.num; clkid++) {
- err = devm_clk_hw_register(dev, a1_periphs_clks.hws[clkid]);
- if (err)
- return dev_err_probe(dev, err,
- "clock[%d] registration failed\n",
- clkid);
- }
-
- return devm_of_clk_add_hw_provider(dev, meson_clk_hw_get, &a1_periphs_clks);
-}
-
-static const struct of_device_id a1_periphs_clkc_match_table[] = {
- { .compatible = "amlogic,a1-peripherals-clkc", },
+static struct clk_hw *a1_peripherals_hw_clks[] = {
+ [CLKID_XTAL_IN] = &a1_xtal_in.hw,
+ [CLKID_FIXPLL_IN] = &a1_fixpll_in.hw,
+ [CLKID_USB_PHY_IN] = &a1_usb_phy_in.hw,
+ [CLKID_USB_CTRL_IN] = &a1_usb_ctrl_in.hw,
+ [CLKID_HIFIPLL_IN] = &a1_hifipll_in.hw,
+ [CLKID_SYSPLL_IN] = &a1_syspll_in.hw,
+ [CLKID_DDS_IN] = &a1_dds_in.hw,
+ [CLKID_SYS] = &a1_sys.hw,
+ [CLKID_CLKTREE] = &a1_clktree.hw,
+ [CLKID_RESET_CTRL] = &a1_reset_ctrl.hw,
+ [CLKID_ANALOG_CTRL] = &a1_analog_ctrl.hw,
+ [CLKID_PWR_CTRL] = &a1_pwr_ctrl.hw,
+ [CLKID_PAD_CTRL] = &a1_pad_ctrl.hw,
+ [CLKID_SYS_CTRL] = &a1_sys_ctrl.hw,
+ [CLKID_TEMP_SENSOR] = &a1_temp_sensor.hw,
+ [CLKID_AM2AXI_DIV] = &a1_am2axi_dev.hw,
+ [CLKID_SPICC_B] = &a1_spicc_b.hw,
+ [CLKID_SPICC_A] = &a1_spicc_a.hw,
+ [CLKID_MSR] = &a1_msr.hw,
+ [CLKID_AUDIO] = &a1_audio.hw,
+ [CLKID_JTAG_CTRL] = &a1_jtag_ctrl.hw,
+ [CLKID_SARADC_EN] = &a1_saradc_en.hw,
+ [CLKID_PWM_EF] = &a1_pwm_ef.hw,
+ [CLKID_PWM_CD] = &a1_pwm_cd.hw,
+ [CLKID_PWM_AB] = &a1_pwm_ab.hw,
+ [CLKID_CEC] = &a1_cec.hw,
+ [CLKID_I2C_S] = &a1_i2c_s.hw,
+ [CLKID_IR_CTRL] = &a1_ir_ctrl.hw,
+ [CLKID_I2C_M_D] = &a1_i2c_m_d.hw,
+ [CLKID_I2C_M_C] = &a1_i2c_m_c.hw,
+ [CLKID_I2C_M_B] = &a1_i2c_m_b.hw,
+ [CLKID_I2C_M_A] = &a1_i2c_m_a.hw,
+ [CLKID_ACODEC] = &a1_acodec.hw,
+ [CLKID_OTP] = &a1_otp.hw,
+ [CLKID_SD_EMMC_A] = &a1_sd_emmc_a.hw,
+ [CLKID_USB_PHY] = &a1_usb_phy.hw,
+ [CLKID_USB_CTRL] = &a1_usb_ctrl.hw,
+ [CLKID_SYS_DSPB] = &a1_sys_dspb.hw,
+ [CLKID_SYS_DSPA] = &a1_sys_dspa.hw,
+ [CLKID_DMA] = &a1_dma.hw,
+ [CLKID_IRQ_CTRL] = &a1_irq_ctrl.hw,
+ [CLKID_NIC] = &a1_nic.hw,
+ [CLKID_GIC] = &a1_gic.hw,
+ [CLKID_UART_C] = &a1_uart_c.hw,
+ [CLKID_UART_B] = &a1_uart_b.hw,
+ [CLKID_UART_A] = &a1_uart_a.hw,
+ [CLKID_SYS_PSRAM] = &a1_sys_psram.hw,
+ [CLKID_RSA] = &a1_rsa.hw,
+ [CLKID_CORESIGHT] = &a1_coresight.hw,
+ [CLKID_AM2AXI_VAD] = &a1_am2axi_vad.hw,
+ [CLKID_AUDIO_VAD] = &a1_audio_vad.hw,
+ [CLKID_AXI_DMC] = &a1_axi_dmc.hw,
+ [CLKID_AXI_PSRAM] = &a1_axi_psram.hw,
+ [CLKID_RAMB] = &a1_ramb.hw,
+ [CLKID_RAMA] = &a1_rama.hw,
+ [CLKID_AXI_SPIFC] = &a1_axi_spifc.hw,
+ [CLKID_AXI_NIC] = &a1_axi_nic.hw,
+ [CLKID_AXI_DMA] = &a1_axi_dma.hw,
+ [CLKID_CPU_CTRL] = &a1_cpu_ctrl.hw,
+ [CLKID_ROM] = &a1_rom.hw,
+ [CLKID_PROC_I2C] = &a1_prod_i2c.hw,
+ [CLKID_DSPA_SEL] = &a1_dspa_sel.hw,
+ [CLKID_DSPB_SEL] = &a1_dspb_sel.hw,
+ [CLKID_DSPA_EN] = &a1_dspa_en.hw,
+ [CLKID_DSPA_EN_NIC] = &a1_dspa_en_nic.hw,
+ [CLKID_DSPB_EN] = &a1_dspb_en.hw,
+ [CLKID_DSPB_EN_NIC] = &a1_dspb_en_nic.hw,
+ [CLKID_RTC] = &a1_rtc.hw,
+ [CLKID_CECA_32K] = &a1_ceca_32k_out.hw,
+ [CLKID_CECB_32K] = &a1_cecb_32k_out.hw,
+ [CLKID_24M] = &a1_24m.hw,
+ [CLKID_12M] = &a1_12m.hw,
+ [CLKID_FCLK_DIV2_DIVN] = &a1_fclk_div2_divn.hw,
+ [CLKID_GEN] = &a1_gen.hw,
+ [CLKID_SARADC_SEL] = &a1_saradc_sel.hw,
+ [CLKID_SARADC] = &a1_saradc.hw,
+ [CLKID_PWM_A] = &a1_pwm_a.hw,
+ [CLKID_PWM_B] = &a1_pwm_b.hw,
+ [CLKID_PWM_C] = &a1_pwm_c.hw,
+ [CLKID_PWM_D] = &a1_pwm_d.hw,
+ [CLKID_PWM_E] = &a1_pwm_e.hw,
+ [CLKID_PWM_F] = &a1_pwm_f.hw,
+ [CLKID_SPICC] = &a1_spicc.hw,
+ [CLKID_TS] = &a1_ts.hw,
+ [CLKID_SPIFC] = &a1_spifc.hw,
+ [CLKID_USB_BUS] = &a1_usb_bus.hw,
+ [CLKID_SD_EMMC] = &a1_sd_emmc.hw,
+ [CLKID_PSRAM] = &a1_psram.hw,
+ [CLKID_DMC] = &a1_dmc.hw,
+ [CLKID_SYS_A_SEL] = &a1_sys_a_sel.hw,
+ [CLKID_SYS_A_DIV] = &a1_sys_a_div.hw,
+ [CLKID_SYS_A] = &a1_sys_a.hw,
+ [CLKID_SYS_B_SEL] = &a1_sys_b_sel.hw,
+ [CLKID_SYS_B_DIV] = &a1_sys_b_div.hw,
+ [CLKID_SYS_B] = &a1_sys_b.hw,
+ [CLKID_DSPA_A_SEL] = &a1_dspa_a_sel.hw,
+ [CLKID_DSPA_A_DIV] = &a1_dspa_a_div.hw,
+ [CLKID_DSPA_A] = &a1_dspa_a.hw,
+ [CLKID_DSPA_B_SEL] = &a1_dspa_b_sel.hw,
+ [CLKID_DSPA_B_DIV] = &a1_dspa_b_div.hw,
+ [CLKID_DSPA_B] = &a1_dspa_b.hw,
+ [CLKID_DSPB_A_SEL] = &a1_dspb_a_sel.hw,
+ [CLKID_DSPB_A_DIV] = &a1_dspb_a_div.hw,
+ [CLKID_DSPB_A] = &a1_dspb_a.hw,
+ [CLKID_DSPB_B_SEL] = &a1_dspb_b_sel.hw,
+ [CLKID_DSPB_B_DIV] = &a1_dspb_b_div.hw,
+ [CLKID_DSPB_B] = &a1_dspb_b.hw,
+ [CLKID_RTC_32K_IN] = &a1_rtc_32k_in.hw,
+ [CLKID_RTC_32K_DIV] = &a1_rtc_32k_div.hw,
+ [CLKID_RTC_32K_XTAL] = &a1_rtc_32k_xtal.hw,
+ [CLKID_RTC_32K_SEL] = &a1_rtc_32k_sel.hw,
+ [CLKID_CECB_32K_IN] = &a1_cecb_32k_in.hw,
+ [CLKID_CECB_32K_DIV] = &a1_cecb_32k_div.hw,
+ [CLKID_CECB_32K_SEL_PRE] = &a1_cecb_32k_sel_pre.hw,
+ [CLKID_CECB_32K_SEL] = &a1_cecb_32k_sel.hw,
+ [CLKID_CECA_32K_IN] = &a1_ceca_32k_in.hw,
+ [CLKID_CECA_32K_DIV] = &a1_ceca_32k_div.hw,
+ [CLKID_CECA_32K_SEL_PRE] = &a1_ceca_32k_sel_pre.hw,
+ [CLKID_CECA_32K_SEL] = &a1_ceca_32k_sel.hw,
+ [CLKID_DIV2_PRE] = &a1_fclk_div2_divn_pre.hw,
+ [CLKID_24M_DIV2] = &a1_24m_div2.hw,
+ [CLKID_GEN_SEL] = &a1_gen_sel.hw,
+ [CLKID_GEN_DIV] = &a1_gen_div.hw,
+ [CLKID_SARADC_DIV] = &a1_saradc_div.hw,
+ [CLKID_PWM_A_SEL] = &a1_pwm_a_sel.hw,
+ [CLKID_PWM_A_DIV] = &a1_pwm_a_div.hw,
+ [CLKID_PWM_B_SEL] = &a1_pwm_b_sel.hw,
+ [CLKID_PWM_B_DIV] = &a1_pwm_b_div.hw,
+ [CLKID_PWM_C_SEL] = &a1_pwm_c_sel.hw,
+ [CLKID_PWM_C_DIV] = &a1_pwm_c_div.hw,
+ [CLKID_PWM_D_SEL] = &a1_pwm_d_sel.hw,
+ [CLKID_PWM_D_DIV] = &a1_pwm_d_div.hw,
+ [CLKID_PWM_E_SEL] = &a1_pwm_e_sel.hw,
+ [CLKID_PWM_E_DIV] = &a1_pwm_e_div.hw,
+ [CLKID_PWM_F_SEL] = &a1_pwm_f_sel.hw,
+ [CLKID_PWM_F_DIV] = &a1_pwm_f_div.hw,
+ [CLKID_SPICC_SEL] = &a1_spicc_sel.hw,
+ [CLKID_SPICC_DIV] = &a1_spicc_div.hw,
+ [CLKID_SPICC_SEL2] = &a1_spicc_sel2.hw,
+ [CLKID_TS_DIV] = &a1_ts_div.hw,
+ [CLKID_SPIFC_SEL] = &a1_spifc_sel.hw,
+ [CLKID_SPIFC_DIV] = &a1_spifc_div.hw,
+ [CLKID_SPIFC_SEL2] = &a1_spifc_sel2.hw,
+ [CLKID_USB_BUS_SEL] = &a1_usb_bus_sel.hw,
+ [CLKID_USB_BUS_DIV] = &a1_usb_bus_div.hw,
+ [CLKID_SD_EMMC_SEL] = &a1_sd_emmc_sel.hw,
+ [CLKID_SD_EMMC_DIV] = &a1_sd_emmc_div.hw,
+ [CLKID_SD_EMMC_SEL2] = &a1_sd_emmc_sel2.hw,
+ [CLKID_PSRAM_SEL] = &a1_psram_sel.hw,
+ [CLKID_PSRAM_DIV] = &a1_psram_div.hw,
+ [CLKID_PSRAM_SEL2] = &a1_psram_sel2.hw,
+ [CLKID_DMC_SEL] = &a1_dmc_sel.hw,
+ [CLKID_DMC_DIV] = &a1_dmc_div.hw,
+ [CLKID_DMC_SEL2] = &a1_dmc_sel2.hw,
+};
+
+static const struct meson_clkc_data a1_peripherals_clkc_data = {
+ .hw_clks = {
+ .hws = a1_peripherals_hw_clks,
+ .num = ARRAY_SIZE(a1_peripherals_hw_clks),
+ },
+};
+
+static const struct of_device_id a1_peripherals_clkc_match_table[] = {
+ {
+ .compatible = "amlogic,a1-peripherals-clkc",
+ .data = &a1_peripherals_clkc_data,
+ },
{}
};
-MODULE_DEVICE_TABLE(of, a1_periphs_clkc_match_table);
+MODULE_DEVICE_TABLE(of, a1_peripherals_clkc_match_table);
-static struct platform_driver a1_periphs_clkc_driver = {
- .probe = meson_a1_periphs_probe,
+static struct platform_driver a1_peripherals_clkc_driver = {
+ .probe = meson_clkc_mmio_probe,
.driver = {
.name = "a1-peripherals-clkc",
- .of_match_table = a1_periphs_clkc_match_table,
+ .of_match_table = a1_peripherals_clkc_match_table,
},
};
-module_platform_driver(a1_periphs_clkc_driver);
+module_platform_driver(a1_peripherals_clkc_driver);
MODULE_DESCRIPTION("Amlogic A1 Peripherals Clock Controller driver");
MODULE_AUTHOR("Jian Hu <jian.hu@amlogic.com>");
diff --git a/drivers/clk/meson/a1-peripherals.h b/drivers/clk/meson/a1-peripherals.h
deleted file mode 100644
index 26de8530184a..000000000000
--- a/drivers/clk/meson/a1-peripherals.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-/*
- * Amlogic A1 Peripherals Clock Controller internals
- *
- * Copyright (c) 2019 Amlogic, Inc. All rights reserved.
- * Author: Jian Hu <jian.hu@amlogic.com>
- *
- * Copyright (c) 2023, SberDevices. All Rights Reserved.
- * Author: Dmitry Rokosov <ddrokosov@sberdevices.ru>
- */
-
-#ifndef __A1_PERIPHERALS_H
-#define __A1_PERIPHERALS_H
-
-/* peripherals clock controller register offset */
-#define SYS_OSCIN_CTRL 0x0
-#define RTC_BY_OSCIN_CTRL0 0x4
-#define RTC_BY_OSCIN_CTRL1 0x8
-#define RTC_CTRL 0xc
-#define SYS_CLK_CTRL0 0x10
-#define SYS_CLK_EN0 0x1c
-#define SYS_CLK_EN1 0x20
-#define AXI_CLK_EN 0x24
-#define DSPA_CLK_EN 0x28
-#define DSPB_CLK_EN 0x2c
-#define DSPA_CLK_CTRL0 0x30
-#define DSPB_CLK_CTRL0 0x34
-#define CLK12_24_CTRL 0x38
-#define GEN_CLK_CTRL 0x3c
-#define SAR_ADC_CLK_CTRL 0xc0
-#define PWM_CLK_AB_CTRL 0xc4
-#define PWM_CLK_CD_CTRL 0xc8
-#define PWM_CLK_EF_CTRL 0xcc
-#define SPICC_CLK_CTRL 0xd0
-#define TS_CLK_CTRL 0xd4
-#define SPIFC_CLK_CTRL 0xd8
-#define USB_BUSCLK_CTRL 0xdc
-#define SD_EMMC_CLK_CTRL 0xe0
-#define CECA_CLK_CTRL0 0xe4
-#define CECA_CLK_CTRL1 0xe8
-#define CECB_CLK_CTRL0 0xec
-#define CECB_CLK_CTRL1 0xf0
-#define PSRAM_CLK_CTRL 0xf4
-#define DMC_CLK_CTRL 0xf8
-
-#endif /* __A1_PERIPHERALS_H */
diff --git a/drivers/clk/meson/a1-pll.c b/drivers/clk/meson/a1-pll.c
index 86d8159f3319..1f82e9c7c14e 100644
--- a/drivers/clk/meson/a1-pll.c
+++ b/drivers/clk/meson/a1-pll.c
@@ -10,13 +10,23 @@
#include <linux/clk-provider.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
-#include "a1-pll.h"
+#include "clk-pll.h"
#include "clk-regmap.h"
#include "meson-clkc-utils.h"
+#define ANACTRL_FIXPLL_CTRL0 0x0
+#define ANACTRL_FIXPLL_CTRL1 0x4
+#define ANACTRL_FIXPLL_STS 0x14
+#define ANACTRL_HIFIPLL_CTRL0 0xc0
+#define ANACTRL_HIFIPLL_CTRL1 0xc4
+#define ANACTRL_HIFIPLL_CTRL2 0xc8
+#define ANACTRL_HIFIPLL_CTRL3 0xcc
+#define ANACTRL_HIFIPLL_CTRL4 0xd0
+#define ANACTRL_HIFIPLL_STS 0xd4
+
#include <dt-bindings/clock/amlogic,a1-pll-clkc.h>
-static struct clk_regmap fixed_pll_dco = {
+static struct clk_regmap a1_fixed_pll_dco = {
.data = &(struct meson_clk_pll_data){
.en = {
.reg_off = ANACTRL_FIXPLL_CTRL0,
@@ -59,7 +69,7 @@ static struct clk_regmap fixed_pll_dco = {
},
};
-static struct clk_regmap fixed_pll = {
+static struct clk_regmap a1_fixed_pll = {
.data = &(struct clk_regmap_gate_data){
.offset = ANACTRL_FIXPLL_CTRL0,
.bit_idx = 20,
@@ -68,18 +78,18 @@ static struct clk_regmap fixed_pll = {
.name = "fixed_pll",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &fixed_pll_dco.hw
+ &a1_fixed_pll_dco.hw
},
.num_parents = 1,
},
};
-static const struct pll_mult_range hifi_pll_mult_range = {
+static const struct pll_mult_range a1_hifi_pll_range = {
.min = 32,
.max = 64,
};
-static const struct reg_sequence hifi_init_regs[] = {
+static const struct reg_sequence a1_hifi_pll_init_regs[] = {
{ .reg = ANACTRL_HIFIPLL_CTRL1, .def = 0x01800000 },
{ .reg = ANACTRL_HIFIPLL_CTRL2, .def = 0x00001100 },
{ .reg = ANACTRL_HIFIPLL_CTRL3, .def = 0x100a1100 },
@@ -87,7 +97,7 @@ static const struct reg_sequence hifi_init_regs[] = {
{ .reg = ANACTRL_HIFIPLL_CTRL0, .def = 0x01f18000 },
};
-static struct clk_regmap hifi_pll = {
+static struct clk_regmap a1_hifi_pll = {
.data = &(struct meson_clk_pll_data){
.en = {
.reg_off = ANACTRL_HIFIPLL_CTRL0,
@@ -124,9 +134,9 @@ static struct clk_regmap hifi_pll = {
.shift = 6,
.width = 1,
},
- .range = &hifi_pll_mult_range,
- .init_regs = hifi_init_regs,
- .init_count = ARRAY_SIZE(hifi_init_regs),
+ .range = &a1_hifi_pll_range,
+ .init_regs = a1_hifi_pll_init_regs,
+ .init_count = ARRAY_SIZE(a1_hifi_pll_init_regs),
},
.hw.init = &(struct clk_init_data){
.name = "hifi_pll",
@@ -138,20 +148,20 @@ static struct clk_regmap hifi_pll = {
},
};
-static struct clk_fixed_factor fclk_div2_div = {
+static struct clk_fixed_factor a1_fclk_div2_div = {
.mult = 1,
.div = 2,
.hw.init = &(struct clk_init_data){
.name = "fclk_div2_div",
.ops = &clk_fixed_factor_ops,
.parent_hws = (const struct clk_hw *[]) {
- &fixed_pll.hw
+ &a1_fixed_pll.hw
},
.num_parents = 1,
},
};
-static struct clk_regmap fclk_div2 = {
+static struct clk_regmap a1_fclk_div2 = {
.data = &(struct clk_regmap_gate_data){
.offset = ANACTRL_FIXPLL_CTRL0,
.bit_idx = 21,
@@ -160,7 +170,7 @@ static struct clk_regmap fclk_div2 = {
.name = "fclk_div2",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &fclk_div2_div.hw
+ &a1_fclk_div2_div.hw
},
.num_parents = 1,
/*
@@ -176,20 +186,20 @@ static struct clk_regmap fclk_div2 = {
},
};
-static struct clk_fixed_factor fclk_div3_div = {
+static struct clk_fixed_factor a1_fclk_div3_div = {
.mult = 1,
.div = 3,
.hw.init = &(struct clk_init_data){
.name = "fclk_div3_div",
.ops = &clk_fixed_factor_ops,
.parent_hws = (const struct clk_hw *[]) {
- &fixed_pll.hw
+ &a1_fixed_pll.hw
},
.num_parents = 1,
},
};
-static struct clk_regmap fclk_div3 = {
+static struct clk_regmap a1_fclk_div3 = {
.data = &(struct clk_regmap_gate_data){
.offset = ANACTRL_FIXPLL_CTRL0,
.bit_idx = 22,
@@ -198,7 +208,7 @@ static struct clk_regmap fclk_div3 = {
.name = "fclk_div3",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &fclk_div3_div.hw
+ &a1_fclk_div3_div.hw
},
.num_parents = 1,
/*
@@ -209,20 +219,20 @@ static struct clk_regmap fclk_div3 = {
},
};
-static struct clk_fixed_factor fclk_div5_div = {
+static struct clk_fixed_factor a1_fclk_div5_div = {
.mult = 1,
.div = 5,
.hw.init = &(struct clk_init_data){
.name = "fclk_div5_div",
.ops = &clk_fixed_factor_ops,
.parent_hws = (const struct clk_hw *[]) {
- &fixed_pll.hw
+ &a1_fixed_pll.hw
},
.num_parents = 1,
},
};
-static struct clk_regmap fclk_div5 = {
+static struct clk_regmap a1_fclk_div5 = {
.data = &(struct clk_regmap_gate_data){
.offset = ANACTRL_FIXPLL_CTRL0,
.bit_idx = 23,
@@ -231,7 +241,7 @@ static struct clk_regmap fclk_div5 = {
.name = "fclk_div5",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &fclk_div5_div.hw
+ &a1_fclk_div5_div.hw
},
.num_parents = 1,
/*
@@ -242,20 +252,20 @@ static struct clk_regmap fclk_div5 = {
},
};
-static struct clk_fixed_factor fclk_div7_div = {
+static struct clk_fixed_factor a1_fclk_div7_div = {
.mult = 1,
.div = 7,
.hw.init = &(struct clk_init_data){
.name = "fclk_div7_div",
.ops = &clk_fixed_factor_ops,
.parent_hws = (const struct clk_hw *[]) {
- &fixed_pll.hw
+ &a1_fixed_pll.hw
},
.num_parents = 1,
},
};
-static struct clk_regmap fclk_div7 = {
+static struct clk_regmap a1_fclk_div7 = {
.data = &(struct clk_regmap_gate_data){
.offset = ANACTRL_FIXPLL_CTRL0,
.bit_idx = 24,
@@ -264,7 +274,7 @@ static struct clk_regmap fclk_div7 = {
.name = "fclk_div7",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &fclk_div7_div.hw
+ &a1_fclk_div7_div.hw
},
.num_parents = 1,
},
@@ -272,83 +282,37 @@ static struct clk_regmap fclk_div7 = {
/* Array of all clocks registered by this provider */
static struct clk_hw *a1_pll_hw_clks[] = {
- [CLKID_FIXED_PLL_DCO] = &fixed_pll_dco.hw,
- [CLKID_FIXED_PLL] = &fixed_pll.hw,
- [CLKID_FCLK_DIV2_DIV] = &fclk_div2_div.hw,
- [CLKID_FCLK_DIV3_DIV] = &fclk_div3_div.hw,
- [CLKID_FCLK_DIV5_DIV] = &fclk_div5_div.hw,
- [CLKID_FCLK_DIV7_DIV] = &fclk_div7_div.hw,
- [CLKID_FCLK_DIV2] = &fclk_div2.hw,
- [CLKID_FCLK_DIV3] = &fclk_div3.hw,
- [CLKID_FCLK_DIV5] = &fclk_div5.hw,
- [CLKID_FCLK_DIV7] = &fclk_div7.hw,
- [CLKID_HIFI_PLL] = &hifi_pll.hw,
-};
-
-static struct clk_regmap *const a1_pll_regmaps[] = {
- &fixed_pll_dco,
- &fixed_pll,
- &fclk_div2,
- &fclk_div3,
- &fclk_div5,
- &fclk_div7,
- &hifi_pll,
+ [CLKID_FIXED_PLL_DCO] = &a1_fixed_pll_dco.hw,
+ [CLKID_FIXED_PLL] = &a1_fixed_pll.hw,
+ [CLKID_FCLK_DIV2_DIV] = &a1_fclk_div2_div.hw,
+ [CLKID_FCLK_DIV3_DIV] = &a1_fclk_div3_div.hw,
+ [CLKID_FCLK_DIV5_DIV] = &a1_fclk_div5_div.hw,
+ [CLKID_FCLK_DIV7_DIV] = &a1_fclk_div7_div.hw,
+ [CLKID_FCLK_DIV2] = &a1_fclk_div2.hw,
+ [CLKID_FCLK_DIV3] = &a1_fclk_div3.hw,
+ [CLKID_FCLK_DIV5] = &a1_fclk_div5.hw,
+ [CLKID_FCLK_DIV7] = &a1_fclk_div7.hw,
+ [CLKID_HIFI_PLL] = &a1_hifi_pll.hw,
};
-static const struct regmap_config a1_pll_regmap_cfg = {
- .reg_bits = 32,
- .val_bits = 32,
- .reg_stride = 4,
- .max_register = ANACTRL_HIFIPLL_STS,
-};
-
-static struct meson_clk_hw_data a1_pll_clks = {
- .hws = a1_pll_hw_clks,
- .num = ARRAY_SIZE(a1_pll_hw_clks),
+static const struct meson_clkc_data a1_pll_clkc_data = {
+ .hw_clks = {
+ .hws = a1_pll_hw_clks,
+ .num = ARRAY_SIZE(a1_pll_hw_clks),
+ },
};
-static int meson_a1_pll_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- void __iomem *base;
- struct regmap *map;
- int clkid, i, err;
-
- base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(base))
- return dev_err_probe(dev, PTR_ERR(base),
- "can't ioremap resource\n");
-
- map = devm_regmap_init_mmio(dev, base, &a1_pll_regmap_cfg);
- if (IS_ERR(map))
- return dev_err_probe(dev, PTR_ERR(map),
- "can't init regmap mmio region\n");
-
- /* Populate regmap for the regmap backed clocks */
- for (i = 0; i < ARRAY_SIZE(a1_pll_regmaps); i++)
- a1_pll_regmaps[i]->map = map;
-
- /* Register clocks */
- for (clkid = 0; clkid < a1_pll_clks.num; clkid++) {
- err = devm_clk_hw_register(dev, a1_pll_clks.hws[clkid]);
- if (err)
- return dev_err_probe(dev, err,
- "clock[%d] registration failed\n",
- clkid);
- }
-
- return devm_of_clk_add_hw_provider(dev, meson_clk_hw_get,
- &a1_pll_clks);
-}
-
static const struct of_device_id a1_pll_clkc_match_table[] = {
- { .compatible = "amlogic,a1-pll-clkc", },
+ {
+ .compatible = "amlogic,a1-pll-clkc",
+ .data = &a1_pll_clkc_data,
+ },
{}
};
MODULE_DEVICE_TABLE(of, a1_pll_clkc_match_table);
static struct platform_driver a1_pll_clkc_driver = {
- .probe = meson_a1_pll_probe,
+ .probe = meson_clkc_mmio_probe,
.driver = {
.name = "a1-pll-clkc",
.of_match_table = a1_pll_clkc_match_table,
diff --git a/drivers/clk/meson/a1-pll.h b/drivers/clk/meson/a1-pll.h
deleted file mode 100644
index 4be17b2bf383..000000000000
--- a/drivers/clk/meson/a1-pll.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-/*
- * Amlogic A1 PLL Clock Controller internals
- *
- * Copyright (c) 2019 Amlogic, Inc. All rights reserved.
- * Author: Jian Hu <jian.hu@amlogic.com>
- *
- * Copyright (c) 2023, SberDevices. All Rights Reserved.
- * Author: Dmitry Rokosov <ddrokosov@sberdevices.ru>
- */
-
-#ifndef __A1_PLL_H
-#define __A1_PLL_H
-
-#include "clk-pll.h"
-
-/* PLL register offset */
-#define ANACTRL_FIXPLL_CTRL0 0x0
-#define ANACTRL_FIXPLL_CTRL1 0x4
-#define ANACTRL_FIXPLL_STS 0x14
-#define ANACTRL_HIFIPLL_CTRL0 0xc0
-#define ANACTRL_HIFIPLL_CTRL1 0xc4
-#define ANACTRL_HIFIPLL_CTRL2 0xc8
-#define ANACTRL_HIFIPLL_CTRL3 0xcc
-#define ANACTRL_HIFIPLL_CTRL4 0xd0
-#define ANACTRL_HIFIPLL_STS 0xd4
-
-#endif /* __A1_PLL_H */
diff --git a/drivers/clk/meson/axg-aoclk.c b/drivers/clk/meson/axg-aoclk.c
index f44091ffb57d..902fbd34039c 100644
--- a/drivers/clk/meson/axg-aoclk.c
+++ b/drivers/clk/meson/axg-aoclk.c
@@ -34,32 +34,21 @@
#define AO_RTC_ALT_CLK_CNTL0 0x94
#define AO_RTC_ALT_CLK_CNTL1 0x98
-#define AXG_AO_GATE(_name, _bit) \
-static struct clk_regmap axg_aoclk_##_name = { \
- .data = &(struct clk_regmap_gate_data) { \
- .offset = (AO_RTI_GEN_CNTL_REG0), \
- .bit_idx = (_bit), \
- }, \
- .hw.init = &(struct clk_init_data) { \
- .name = "axg_ao_" #_name, \
- .ops = &clk_regmap_gate_ops, \
- .parent_data = &(const struct clk_parent_data) { \
- .fw_name = "mpeg-clk", \
- }, \
- .num_parents = 1, \
- .flags = CLK_IGNORE_UNUSED, \
- }, \
-}
+static const struct clk_parent_data axg_ao_pclk_parents = { .fw_name = "mpeg-clk" };
-AXG_AO_GATE(remote, 0);
-AXG_AO_GATE(i2c_master, 1);
-AXG_AO_GATE(i2c_slave, 2);
-AXG_AO_GATE(uart1, 3);
-AXG_AO_GATE(uart2, 5);
-AXG_AO_GATE(ir_blaster, 6);
-AXG_AO_GATE(saradc, 7);
+#define AXG_AO_GATE(_name, _bit, _flags) \
+ MESON_PCLK(axg_ao_##_name, AO_RTI_GEN_CNTL_REG0, _bit, \
+ &axg_ao_pclk_parents, _flags)
-static struct clk_regmap axg_aoclk_cts_oscin = {
+static AXG_AO_GATE(remote, 0, CLK_IGNORE_UNUSED);
+static AXG_AO_GATE(i2c_master, 1, CLK_IGNORE_UNUSED);
+static AXG_AO_GATE(i2c_slave, 2, CLK_IGNORE_UNUSED);
+static AXG_AO_GATE(uart1, 3, CLK_IGNORE_UNUSED);
+static AXG_AO_GATE(uart2, 5, CLK_IGNORE_UNUSED);
+static AXG_AO_GATE(ir_blaster, 6, CLK_IGNORE_UNUSED);
+static AXG_AO_GATE(saradc, 7, CLK_IGNORE_UNUSED);
+
+static struct clk_regmap axg_ao_cts_oscin = {
.data = &(struct clk_regmap_gate_data){
.offset = AO_RTI_PWR_CNTL_REG0,
.bit_idx = 14,
@@ -74,7 +63,7 @@ static struct clk_regmap axg_aoclk_cts_oscin = {
},
};
-static struct clk_regmap axg_aoclk_32k_pre = {
+static struct clk_regmap axg_ao_32k_pre = {
.data = &(struct clk_regmap_gate_data){
.offset = AO_RTC_ALT_CLK_CNTL0,
.bit_idx = 31,
@@ -83,7 +72,7 @@ static struct clk_regmap axg_aoclk_32k_pre = {
.name = "axg_ao_32k_pre",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &axg_aoclk_cts_oscin.hw
+ &axg_ao_cts_oscin.hw
},
.num_parents = 1,
},
@@ -99,7 +88,7 @@ static const struct meson_clk_dualdiv_param axg_32k_div_table[] = {
}, {}
};
-static struct clk_regmap axg_aoclk_32k_div = {
+static struct clk_regmap axg_ao_32k_div = {
.data = &(struct meson_clk_dualdiv_data){
.n1 = {
.reg_off = AO_RTC_ALT_CLK_CNTL0,
@@ -132,13 +121,13 @@ static struct clk_regmap axg_aoclk_32k_div = {
.name = "axg_ao_32k_div",
.ops = &meson_clk_dualdiv_ops,
.parent_hws = (const struct clk_hw *[]) {
- &axg_aoclk_32k_pre.hw
+ &axg_ao_32k_pre.hw
},
.num_parents = 1,
},
};
-static struct clk_regmap axg_aoclk_32k_sel = {
+static struct clk_regmap axg_ao_32k_sel = {
.data = &(struct clk_regmap_mux_data) {
.offset = AO_RTC_ALT_CLK_CNTL1,
.mask = 0x1,
@@ -149,15 +138,15 @@ static struct clk_regmap axg_aoclk_32k_sel = {
.name = "axg_ao_32k_sel",
.ops = &clk_regmap_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &axg_aoclk_32k_div.hw,
- &axg_aoclk_32k_pre.hw,
+ &axg_ao_32k_div.hw,
+ &axg_ao_32k_pre.hw,
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap axg_aoclk_32k = {
+static struct clk_regmap axg_ao_32k = {
.data = &(struct clk_regmap_gate_data){
.offset = AO_RTC_ALT_CLK_CNTL0,
.bit_idx = 30,
@@ -166,14 +155,14 @@ static struct clk_regmap axg_aoclk_32k = {
.name = "axg_ao_32k",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &axg_aoclk_32k_sel.hw
+ &axg_ao_32k_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap axg_aoclk_cts_rtc_oscin = {
+static struct clk_regmap axg_ao_cts_rtc_oscin = {
.data = &(struct clk_regmap_mux_data) {
.offset = AO_RTI_PWR_CNTL_REG0,
.mask = 0x1,
@@ -184,7 +173,7 @@ static struct clk_regmap axg_aoclk_cts_rtc_oscin = {
.name = "axg_ao_cts_rtc_oscin",
.ops = &clk_regmap_mux_ops,
.parent_data = (const struct clk_parent_data []) {
- { .hw = &axg_aoclk_32k.hw },
+ { .hw = &axg_ao_32k.hw },
{ .fw_name = "ext_32k-0", },
},
.num_parents = 2,
@@ -192,7 +181,7 @@ static struct clk_regmap axg_aoclk_cts_rtc_oscin = {
},
};
-static struct clk_regmap axg_aoclk_clk81 = {
+static struct clk_regmap axg_ao_clk81 = {
.data = &(struct clk_regmap_mux_data) {
.offset = AO_RTI_PWR_CNTL_REG0,
.mask = 0x1,
@@ -200,68 +189,74 @@ static struct clk_regmap axg_aoclk_clk81 = {
.flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
+ /*
+ * NOTE: this is one of the infamous clock the pwm driver
+ * can request directly by its global name. It's wrong but
+ * there is not much we can do about it until the support
+ * for the old pwm bindings is dropped
+ */
.name = "axg_ao_clk81",
.ops = &clk_regmap_mux_ro_ops,
.parent_data = (const struct clk_parent_data []) {
{ .fw_name = "mpeg-clk", },
- { .hw = &axg_aoclk_cts_rtc_oscin.hw },
+ { .hw = &axg_ao_cts_rtc_oscin.hw },
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap axg_aoclk_saradc_mux = {
+static struct clk_regmap axg_ao_saradc_mux = {
.data = &(struct clk_regmap_mux_data) {
.offset = AO_SAR_CLK,
.mask = 0x3,
.shift = 9,
},
.hw.init = &(struct clk_init_data){
- .name = "axg_ao_saradc_mux",
+ .name = "ao_saradc_mux",
.ops = &clk_regmap_mux_ops,
.parent_data = (const struct clk_parent_data []) {
{ .fw_name = "xtal", },
- { .hw = &axg_aoclk_clk81.hw },
+ { .hw = &axg_ao_clk81.hw },
},
.num_parents = 2,
},
};
-static struct clk_regmap axg_aoclk_saradc_div = {
+static struct clk_regmap axg_ao_saradc_div = {
.data = &(struct clk_regmap_div_data) {
.offset = AO_SAR_CLK,
.shift = 0,
.width = 8,
},
.hw.init = &(struct clk_init_data){
- .name = "axg_ao_saradc_div",
+ .name = "ao_saradc_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &axg_aoclk_saradc_mux.hw
+ &axg_ao_saradc_mux.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap axg_aoclk_saradc_gate = {
+static struct clk_regmap axg_ao_saradc_gate = {
.data = &(struct clk_regmap_gate_data) {
.offset = AO_SAR_CLK,
.bit_idx = 8,
},
.hw.init = &(struct clk_init_data){
- .name = "axg_ao_saradc_gate",
+ .name = "ao_saradc_gate",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &axg_aoclk_saradc_div.hw
+ &axg_ao_saradc_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static const unsigned int axg_aoclk_reset[] = {
+static const unsigned int axg_ao_reset[] = {
[RESET_AO_REMOTE] = 16,
[RESET_AO_I2C_MASTER] = 18,
[RESET_AO_I2C_SLAVE] = 19,
@@ -270,75 +265,55 @@ static const unsigned int axg_aoclk_reset[] = {
[RESET_AO_IR_BLASTER] = 23,
};
-static struct clk_regmap *axg_aoclk_regmap[] = {
- &axg_aoclk_remote,
- &axg_aoclk_i2c_master,
- &axg_aoclk_i2c_slave,
- &axg_aoclk_uart1,
- &axg_aoclk_uart2,
- &axg_aoclk_ir_blaster,
- &axg_aoclk_saradc,
- &axg_aoclk_cts_oscin,
- &axg_aoclk_32k_pre,
- &axg_aoclk_32k_div,
- &axg_aoclk_32k_sel,
- &axg_aoclk_32k,
- &axg_aoclk_cts_rtc_oscin,
- &axg_aoclk_clk81,
- &axg_aoclk_saradc_mux,
- &axg_aoclk_saradc_div,
- &axg_aoclk_saradc_gate,
-};
-
-static struct clk_hw *axg_aoclk_hw_clks[] = {
- [CLKID_AO_REMOTE] = &axg_aoclk_remote.hw,
- [CLKID_AO_I2C_MASTER] = &axg_aoclk_i2c_master.hw,
- [CLKID_AO_I2C_SLAVE] = &axg_aoclk_i2c_slave.hw,
- [CLKID_AO_UART1] = &axg_aoclk_uart1.hw,
- [CLKID_AO_UART2] = &axg_aoclk_uart2.hw,
- [CLKID_AO_IR_BLASTER] = &axg_aoclk_ir_blaster.hw,
- [CLKID_AO_SAR_ADC] = &axg_aoclk_saradc.hw,
- [CLKID_AO_CLK81] = &axg_aoclk_clk81.hw,
- [CLKID_AO_SAR_ADC_SEL] = &axg_aoclk_saradc_mux.hw,
- [CLKID_AO_SAR_ADC_DIV] = &axg_aoclk_saradc_div.hw,
- [CLKID_AO_SAR_ADC_CLK] = &axg_aoclk_saradc_gate.hw,
- [CLKID_AO_CTS_OSCIN] = &axg_aoclk_cts_oscin.hw,
- [CLKID_AO_32K_PRE] = &axg_aoclk_32k_pre.hw,
- [CLKID_AO_32K_DIV] = &axg_aoclk_32k_div.hw,
- [CLKID_AO_32K_SEL] = &axg_aoclk_32k_sel.hw,
- [CLKID_AO_32K] = &axg_aoclk_32k.hw,
- [CLKID_AO_CTS_RTC_OSCIN] = &axg_aoclk_cts_rtc_oscin.hw,
+static struct clk_hw *axg_ao_hw_clks[] = {
+ [CLKID_AO_REMOTE] = &axg_ao_remote.hw,
+ [CLKID_AO_I2C_MASTER] = &axg_ao_i2c_master.hw,
+ [CLKID_AO_I2C_SLAVE] = &axg_ao_i2c_slave.hw,
+ [CLKID_AO_UART1] = &axg_ao_uart1.hw,
+ [CLKID_AO_UART2] = &axg_ao_uart2.hw,
+ [CLKID_AO_IR_BLASTER] = &axg_ao_ir_blaster.hw,
+ [CLKID_AO_SAR_ADC] = &axg_ao_saradc.hw,
+ [CLKID_AO_CLK81] = &axg_ao_clk81.hw,
+ [CLKID_AO_SAR_ADC_SEL] = &axg_ao_saradc_mux.hw,
+ [CLKID_AO_SAR_ADC_DIV] = &axg_ao_saradc_div.hw,
+ [CLKID_AO_SAR_ADC_CLK] = &axg_ao_saradc_gate.hw,
+ [CLKID_AO_CTS_OSCIN] = &axg_ao_cts_oscin.hw,
+ [CLKID_AO_32K_PRE] = &axg_ao_32k_pre.hw,
+ [CLKID_AO_32K_DIV] = &axg_ao_32k_div.hw,
+ [CLKID_AO_32K_SEL] = &axg_ao_32k_sel.hw,
+ [CLKID_AO_32K] = &axg_ao_32k.hw,
+ [CLKID_AO_CTS_RTC_OSCIN] = &axg_ao_cts_rtc_oscin.hw,
};
-static const struct meson_aoclk_data axg_aoclkc_data = {
+static const struct meson_aoclk_data axg_ao_clkc_data = {
.reset_reg = AO_RTI_GEN_CNTL_REG0,
- .num_reset = ARRAY_SIZE(axg_aoclk_reset),
- .reset = axg_aoclk_reset,
- .num_clks = ARRAY_SIZE(axg_aoclk_regmap),
- .clks = axg_aoclk_regmap,
- .hw_clks = {
- .hws = axg_aoclk_hw_clks,
- .num = ARRAY_SIZE(axg_aoclk_hw_clks),
+ .num_reset = ARRAY_SIZE(axg_ao_reset),
+ .reset = axg_ao_reset,
+ .clkc_data = {
+ .hw_clks = {
+ .hws = axg_ao_hw_clks,
+ .num = ARRAY_SIZE(axg_ao_hw_clks),
+ },
},
};
-static const struct of_device_id axg_aoclkc_match_table[] = {
+static const struct of_device_id axg_ao_clkc_match_table[] = {
{
.compatible = "amlogic,meson-axg-aoclkc",
- .data = &axg_aoclkc_data,
+ .data = &axg_ao_clkc_data.clkc_data,
},
{ }
};
-MODULE_DEVICE_TABLE(of, axg_aoclkc_match_table);
+MODULE_DEVICE_TABLE(of, axg_ao_clkc_match_table);
-static struct platform_driver axg_aoclkc_driver = {
+static struct platform_driver axg_ao_clkc_driver = {
.probe = meson_aoclkc_probe,
.driver = {
- .name = "axg-aoclkc",
- .of_match_table = axg_aoclkc_match_table,
+ .name = "axg-ao-clkc",
+ .of_match_table = axg_ao_clkc_match_table,
},
};
-module_platform_driver(axg_aoclkc_driver);
+module_platform_driver(axg_ao_clkc_driver);
MODULE_DESCRIPTION("Amlogic AXG Always-ON Clock Controller driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/meson/axg-audio.c b/drivers/clk/meson/axg-audio.c
index 9df627b142f8..fd7eca652261 100644
--- a/drivers/clk/meson/axg-audio.c
+++ b/drivers/clk/meson/axg-audio.c
@@ -4,6 +4,7 @@
* Author: Jerome Brunet <jbrunet@baylibre.com>
*/
+#include <linux/auxiliary_bus.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/init.h>
@@ -12,17 +13,70 @@
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/reset.h>
-#include <linux/reset-controller.h>
#include <linux/slab.h>
#include "meson-clkc-utils.h"
-#include "axg-audio.h"
#include "clk-regmap.h"
#include "clk-phase.h"
#include "sclk-div.h"
#include <dt-bindings/clock/axg-audio-clkc.h>
+/* Audio clock register offsets */
+#define AUDIO_CLK_GATE_EN 0x000
+#define AUDIO_MCLK_A_CTRL 0x004
+#define AUDIO_MCLK_B_CTRL 0x008
+#define AUDIO_MCLK_C_CTRL 0x00C
+#define AUDIO_MCLK_D_CTRL 0x010
+#define AUDIO_MCLK_E_CTRL 0x014
+#define AUDIO_MCLK_F_CTRL 0x018
+#define AUDIO_MST_PAD_CTRL0 0x01c
+#define AUDIO_MST_PAD_CTRL1 0x020
+#define AUDIO_SW_RESET 0x024
+#define AUDIO_MST_A_SCLK_CTRL0 0x040
+#define AUDIO_MST_A_SCLK_CTRL1 0x044
+#define AUDIO_MST_B_SCLK_CTRL0 0x048
+#define AUDIO_MST_B_SCLK_CTRL1 0x04C
+#define AUDIO_MST_C_SCLK_CTRL0 0x050
+#define AUDIO_MST_C_SCLK_CTRL1 0x054
+#define AUDIO_MST_D_SCLK_CTRL0 0x058
+#define AUDIO_MST_D_SCLK_CTRL1 0x05C
+#define AUDIO_MST_E_SCLK_CTRL0 0x060
+#define AUDIO_MST_E_SCLK_CTRL1 0x064
+#define AUDIO_MST_F_SCLK_CTRL0 0x068
+#define AUDIO_MST_F_SCLK_CTRL1 0x06C
+#define AUDIO_CLK_TDMIN_A_CTRL 0x080
+#define AUDIO_CLK_TDMIN_B_CTRL 0x084
+#define AUDIO_CLK_TDMIN_C_CTRL 0x088
+#define AUDIO_CLK_TDMIN_LB_CTRL 0x08C
+#define AUDIO_CLK_TDMOUT_A_CTRL 0x090
+#define AUDIO_CLK_TDMOUT_B_CTRL 0x094
+#define AUDIO_CLK_TDMOUT_C_CTRL 0x098
+#define AUDIO_CLK_SPDIFIN_CTRL 0x09C
+#define AUDIO_CLK_SPDIFOUT_CTRL 0x0A0
+#define AUDIO_CLK_RESAMPLE_CTRL 0x0A4
+#define AUDIO_CLK_LOCKER_CTRL 0x0A8
+#define AUDIO_CLK_PDMIN_CTRL0 0x0AC
+#define AUDIO_CLK_PDMIN_CTRL1 0x0B0
+#define AUDIO_CLK_SPDIFOUT_B_CTRL 0x0B4
+
+/* SM1 introduce new register and some shifts :( */
+#define AUDIO_CLK_GATE_EN1 0x004
+#define AUDIO_SM1_MCLK_A_CTRL 0x008
+#define AUDIO_SM1_MCLK_B_CTRL 0x00C
+#define AUDIO_SM1_MCLK_C_CTRL 0x010
+#define AUDIO_SM1_MCLK_D_CTRL 0x014
+#define AUDIO_SM1_MCLK_E_CTRL 0x018
+#define AUDIO_SM1_MCLK_F_CTRL 0x01C
+#define AUDIO_SM1_MST_PAD_CTRL0 0x020
+#define AUDIO_SM1_MST_PAD_CTRL1 0x024
+#define AUDIO_SM1_SW_RESET0 0x028
+#define AUDIO_SM1_SW_RESET1 0x02C
+#define AUDIO_CLK81_CTRL 0x030
+#define AUDIO_CLK81_EN 0x034
+#define AUDIO_EARCRX_CMDC_CLK_CTRL 0x0D0
+#define AUDIO_EARCRX_DMAC_CLK_CTRL 0x0D4
+
#define AUD_GATE(_name, _reg, _bit, _pname, _iflags) { \
.data = &(struct clk_regmap_gate_data){ \
.offset = (_reg), \
@@ -1257,505 +1311,6 @@ static struct clk_hw *sm1_audio_hw_clks[] = {
[AUD_CLKID_EARCRX_DMAC] = &sm1_earcrx_dmac_clk.hw,
};
-
-/* Convenience table to populate regmap in .probe(). */
-static struct clk_regmap *const axg_clk_regmaps[] = {
- &ddr_arb,
- &pdm,
- &tdmin_a,
- &tdmin_b,
- &tdmin_c,
- &tdmin_lb,
- &tdmout_a,
- &tdmout_b,
- &tdmout_c,
- &frddr_a,
- &frddr_b,
- &frddr_c,
- &toddr_a,
- &toddr_b,
- &toddr_c,
- &loopback,
- &spdifin,
- &spdifout,
- &resample,
- &power_detect,
- &mst_a_mclk_sel,
- &mst_b_mclk_sel,
- &mst_c_mclk_sel,
- &mst_d_mclk_sel,
- &mst_e_mclk_sel,
- &mst_f_mclk_sel,
- &mst_a_mclk_div,
- &mst_b_mclk_div,
- &mst_c_mclk_div,
- &mst_d_mclk_div,
- &mst_e_mclk_div,
- &mst_f_mclk_div,
- &mst_a_mclk,
- &mst_b_mclk,
- &mst_c_mclk,
- &mst_d_mclk,
- &mst_e_mclk,
- &mst_f_mclk,
- &spdifout_clk_sel,
- &spdifout_clk_div,
- &spdifout_clk,
- &spdifin_clk_sel,
- &spdifin_clk_div,
- &spdifin_clk,
- &pdm_dclk_sel,
- &pdm_dclk_div,
- &pdm_dclk,
- &pdm_sysclk_sel,
- &pdm_sysclk_div,
- &pdm_sysclk,
- &mst_a_sclk_pre_en,
- &mst_b_sclk_pre_en,
- &mst_c_sclk_pre_en,
- &mst_d_sclk_pre_en,
- &mst_e_sclk_pre_en,
- &mst_f_sclk_pre_en,
- &mst_a_sclk_div,
- &mst_b_sclk_div,
- &mst_c_sclk_div,
- &mst_d_sclk_div,
- &mst_e_sclk_div,
- &mst_f_sclk_div,
- &mst_a_sclk_post_en,
- &mst_b_sclk_post_en,
- &mst_c_sclk_post_en,
- &mst_d_sclk_post_en,
- &mst_e_sclk_post_en,
- &mst_f_sclk_post_en,
- &mst_a_sclk,
- &mst_b_sclk,
- &mst_c_sclk,
- &mst_d_sclk,
- &mst_e_sclk,
- &mst_f_sclk,
- &mst_a_lrclk_div,
- &mst_b_lrclk_div,
- &mst_c_lrclk_div,
- &mst_d_lrclk_div,
- &mst_e_lrclk_div,
- &mst_f_lrclk_div,
- &mst_a_lrclk,
- &mst_b_lrclk,
- &mst_c_lrclk,
- &mst_d_lrclk,
- &mst_e_lrclk,
- &mst_f_lrclk,
- &tdmin_a_sclk_sel,
- &tdmin_b_sclk_sel,
- &tdmin_c_sclk_sel,
- &tdmin_lb_sclk_sel,
- &tdmout_a_sclk_sel,
- &tdmout_b_sclk_sel,
- &tdmout_c_sclk_sel,
- &tdmin_a_sclk_pre_en,
- &tdmin_b_sclk_pre_en,
- &tdmin_c_sclk_pre_en,
- &tdmin_lb_sclk_pre_en,
- &tdmout_a_sclk_pre_en,
- &tdmout_b_sclk_pre_en,
- &tdmout_c_sclk_pre_en,
- &tdmin_a_sclk_post_en,
- &tdmin_b_sclk_post_en,
- &tdmin_c_sclk_post_en,
- &tdmin_lb_sclk_post_en,
- &tdmout_a_sclk_post_en,
- &tdmout_b_sclk_post_en,
- &tdmout_c_sclk_post_en,
- &tdmin_a_sclk,
- &tdmin_b_sclk,
- &tdmin_c_sclk,
- &tdmin_lb_sclk,
- &axg_tdmout_a_sclk,
- &axg_tdmout_b_sclk,
- &axg_tdmout_c_sclk,
- &tdmin_a_lrclk,
- &tdmin_b_lrclk,
- &tdmin_c_lrclk,
- &tdmin_lb_lrclk,
- &tdmout_a_lrclk,
- &tdmout_b_lrclk,
- &tdmout_c_lrclk,
-};
-
-static struct clk_regmap *const g12a_clk_regmaps[] = {
- &ddr_arb,
- &pdm,
- &tdmin_a,
- &tdmin_b,
- &tdmin_c,
- &tdmin_lb,
- &tdmout_a,
- &tdmout_b,
- &tdmout_c,
- &frddr_a,
- &frddr_b,
- &frddr_c,
- &toddr_a,
- &toddr_b,
- &toddr_c,
- &loopback,
- &spdifin,
- &spdifout,
- &resample,
- &power_detect,
- &spdifout_b,
- &mst_a_mclk_sel,
- &mst_b_mclk_sel,
- &mst_c_mclk_sel,
- &mst_d_mclk_sel,
- &mst_e_mclk_sel,
- &mst_f_mclk_sel,
- &mst_a_mclk_div,
- &mst_b_mclk_div,
- &mst_c_mclk_div,
- &mst_d_mclk_div,
- &mst_e_mclk_div,
- &mst_f_mclk_div,
- &mst_a_mclk,
- &mst_b_mclk,
- &mst_c_mclk,
- &mst_d_mclk,
- &mst_e_mclk,
- &mst_f_mclk,
- &spdifout_clk_sel,
- &spdifout_clk_div,
- &spdifout_clk,
- &spdifin_clk_sel,
- &spdifin_clk_div,
- &spdifin_clk,
- &pdm_dclk_sel,
- &pdm_dclk_div,
- &pdm_dclk,
- &pdm_sysclk_sel,
- &pdm_sysclk_div,
- &pdm_sysclk,
- &mst_a_sclk_pre_en,
- &mst_b_sclk_pre_en,
- &mst_c_sclk_pre_en,
- &mst_d_sclk_pre_en,
- &mst_e_sclk_pre_en,
- &mst_f_sclk_pre_en,
- &mst_a_sclk_div,
- &mst_b_sclk_div,
- &mst_c_sclk_div,
- &mst_d_sclk_div,
- &mst_e_sclk_div,
- &mst_f_sclk_div,
- &mst_a_sclk_post_en,
- &mst_b_sclk_post_en,
- &mst_c_sclk_post_en,
- &mst_d_sclk_post_en,
- &mst_e_sclk_post_en,
- &mst_f_sclk_post_en,
- &mst_a_sclk,
- &mst_b_sclk,
- &mst_c_sclk,
- &mst_d_sclk,
- &mst_e_sclk,
- &mst_f_sclk,
- &mst_a_lrclk_div,
- &mst_b_lrclk_div,
- &mst_c_lrclk_div,
- &mst_d_lrclk_div,
- &mst_e_lrclk_div,
- &mst_f_lrclk_div,
- &mst_a_lrclk,
- &mst_b_lrclk,
- &mst_c_lrclk,
- &mst_d_lrclk,
- &mst_e_lrclk,
- &mst_f_lrclk,
- &tdmin_a_sclk_sel,
- &tdmin_b_sclk_sel,
- &tdmin_c_sclk_sel,
- &tdmin_lb_sclk_sel,
- &tdmout_a_sclk_sel,
- &tdmout_b_sclk_sel,
- &tdmout_c_sclk_sel,
- &tdmin_a_sclk_pre_en,
- &tdmin_b_sclk_pre_en,
- &tdmin_c_sclk_pre_en,
- &tdmin_lb_sclk_pre_en,
- &tdmout_a_sclk_pre_en,
- &tdmout_b_sclk_pre_en,
- &tdmout_c_sclk_pre_en,
- &tdmin_a_sclk_post_en,
- &tdmin_b_sclk_post_en,
- &tdmin_c_sclk_post_en,
- &tdmin_lb_sclk_post_en,
- &tdmout_a_sclk_post_en,
- &tdmout_b_sclk_post_en,
- &tdmout_c_sclk_post_en,
- &tdmin_a_sclk,
- &tdmin_b_sclk,
- &tdmin_c_sclk,
- &tdmin_lb_sclk,
- &g12a_tdmout_a_sclk,
- &g12a_tdmout_b_sclk,
- &g12a_tdmout_c_sclk,
- &tdmin_a_lrclk,
- &tdmin_b_lrclk,
- &tdmin_c_lrclk,
- &tdmin_lb_lrclk,
- &tdmout_a_lrclk,
- &tdmout_b_lrclk,
- &tdmout_c_lrclk,
- &spdifout_b_clk_sel,
- &spdifout_b_clk_div,
- &spdifout_b_clk,
- &g12a_tdm_mclk_pad_0,
- &g12a_tdm_mclk_pad_1,
- &g12a_tdm_lrclk_pad_0,
- &g12a_tdm_lrclk_pad_1,
- &g12a_tdm_lrclk_pad_2,
- &g12a_tdm_sclk_pad_0,
- &g12a_tdm_sclk_pad_1,
- &g12a_tdm_sclk_pad_2,
- &toram,
- &eqdrc,
-};
-
-static struct clk_regmap *const sm1_clk_regmaps[] = {
- &ddr_arb,
- &pdm,
- &tdmin_a,
- &tdmin_b,
- &tdmin_c,
- &tdmin_lb,
- &tdmout_a,
- &tdmout_b,
- &tdmout_c,
- &frddr_a,
- &frddr_b,
- &frddr_c,
- &toddr_a,
- &toddr_b,
- &toddr_c,
- &loopback,
- &spdifin,
- &spdifout,
- &resample,
- &spdifout_b,
- &sm1_mst_a_mclk_sel,
- &sm1_mst_b_mclk_sel,
- &sm1_mst_c_mclk_sel,
- &sm1_mst_d_mclk_sel,
- &sm1_mst_e_mclk_sel,
- &sm1_mst_f_mclk_sel,
- &sm1_mst_a_mclk_div,
- &sm1_mst_b_mclk_div,
- &sm1_mst_c_mclk_div,
- &sm1_mst_d_mclk_div,
- &sm1_mst_e_mclk_div,
- &sm1_mst_f_mclk_div,
- &sm1_mst_a_mclk,
- &sm1_mst_b_mclk,
- &sm1_mst_c_mclk,
- &sm1_mst_d_mclk,
- &sm1_mst_e_mclk,
- &sm1_mst_f_mclk,
- &spdifout_clk_sel,
- &spdifout_clk_div,
- &spdifout_clk,
- &spdifin_clk_sel,
- &spdifin_clk_div,
- &spdifin_clk,
- &pdm_dclk_sel,
- &pdm_dclk_div,
- &pdm_dclk,
- &pdm_sysclk_sel,
- &pdm_sysclk_div,
- &pdm_sysclk,
- &mst_a_sclk_pre_en,
- &mst_b_sclk_pre_en,
- &mst_c_sclk_pre_en,
- &mst_d_sclk_pre_en,
- &mst_e_sclk_pre_en,
- &mst_f_sclk_pre_en,
- &mst_a_sclk_div,
- &mst_b_sclk_div,
- &mst_c_sclk_div,
- &mst_d_sclk_div,
- &mst_e_sclk_div,
- &mst_f_sclk_div,
- &mst_a_sclk_post_en,
- &mst_b_sclk_post_en,
- &mst_c_sclk_post_en,
- &mst_d_sclk_post_en,
- &mst_e_sclk_post_en,
- &mst_f_sclk_post_en,
- &mst_a_sclk,
- &mst_b_sclk,
- &mst_c_sclk,
- &mst_d_sclk,
- &mst_e_sclk,
- &mst_f_sclk,
- &mst_a_lrclk_div,
- &mst_b_lrclk_div,
- &mst_c_lrclk_div,
- &mst_d_lrclk_div,
- &mst_e_lrclk_div,
- &mst_f_lrclk_div,
- &mst_a_lrclk,
- &mst_b_lrclk,
- &mst_c_lrclk,
- &mst_d_lrclk,
- &mst_e_lrclk,
- &mst_f_lrclk,
- &tdmin_a_sclk_sel,
- &tdmin_b_sclk_sel,
- &tdmin_c_sclk_sel,
- &tdmin_lb_sclk_sel,
- &tdmout_a_sclk_sel,
- &tdmout_b_sclk_sel,
- &tdmout_c_sclk_sel,
- &tdmin_a_sclk_pre_en,
- &tdmin_b_sclk_pre_en,
- &tdmin_c_sclk_pre_en,
- &tdmin_lb_sclk_pre_en,
- &tdmout_a_sclk_pre_en,
- &tdmout_b_sclk_pre_en,
- &tdmout_c_sclk_pre_en,
- &tdmin_a_sclk_post_en,
- &tdmin_b_sclk_post_en,
- &tdmin_c_sclk_post_en,
- &tdmin_lb_sclk_post_en,
- &tdmout_a_sclk_post_en,
- &tdmout_b_sclk_post_en,
- &tdmout_c_sclk_post_en,
- &tdmin_a_sclk,
- &tdmin_b_sclk,
- &tdmin_c_sclk,
- &tdmin_lb_sclk,
- &g12a_tdmout_a_sclk,
- &g12a_tdmout_b_sclk,
- &g12a_tdmout_c_sclk,
- &tdmin_a_lrclk,
- &tdmin_b_lrclk,
- &tdmin_c_lrclk,
- &tdmin_lb_lrclk,
- &tdmout_a_lrclk,
- &tdmout_b_lrclk,
- &tdmout_c_lrclk,
- &spdifout_b_clk_sel,
- &spdifout_b_clk_div,
- &spdifout_b_clk,
- &sm1_tdm_mclk_pad_0,
- &sm1_tdm_mclk_pad_1,
- &sm1_tdm_lrclk_pad_0,
- &sm1_tdm_lrclk_pad_1,
- &sm1_tdm_lrclk_pad_2,
- &sm1_tdm_sclk_pad_0,
- &sm1_tdm_sclk_pad_1,
- &sm1_tdm_sclk_pad_2,
- &sm1_aud_top,
- &toram,
- &eqdrc,
- &resample_b,
- &tovad,
- &locker,
- &spdifin_lb,
- &frddr_d,
- &toddr_d,
- &loopback_b,
- &sm1_clk81_en,
- &sm1_sysclk_a_div,
- &sm1_sysclk_a_en,
- &sm1_sysclk_b_div,
- &sm1_sysclk_b_en,
- &earcrx,
- &sm1_earcrx_cmdc_clk_sel,
- &sm1_earcrx_cmdc_clk_div,
- &sm1_earcrx_cmdc_clk,
- &sm1_earcrx_dmac_clk_sel,
- &sm1_earcrx_dmac_clk_div,
- &sm1_earcrx_dmac_clk,
-};
-
-struct axg_audio_reset_data {
- struct reset_controller_dev rstc;
- struct regmap *map;
- unsigned int offset;
-};
-
-static void axg_audio_reset_reg_and_bit(struct axg_audio_reset_data *rst,
- unsigned long id,
- unsigned int *reg,
- unsigned int *bit)
-{
- unsigned int stride = regmap_get_reg_stride(rst->map);
-
- *reg = (id / (stride * BITS_PER_BYTE)) * stride;
- *reg += rst->offset;
- *bit = id % (stride * BITS_PER_BYTE);
-}
-
-static int axg_audio_reset_update(struct reset_controller_dev *rcdev,
- unsigned long id, bool assert)
-{
- struct axg_audio_reset_data *rst =
- container_of(rcdev, struct axg_audio_reset_data, rstc);
- unsigned int offset, bit;
-
- axg_audio_reset_reg_and_bit(rst, id, &offset, &bit);
-
- regmap_update_bits(rst->map, offset, BIT(bit),
- assert ? BIT(bit) : 0);
-
- return 0;
-}
-
-static int axg_audio_reset_status(struct reset_controller_dev *rcdev,
- unsigned long id)
-{
- struct axg_audio_reset_data *rst =
- container_of(rcdev, struct axg_audio_reset_data, rstc);
- unsigned int val, offset, bit;
-
- axg_audio_reset_reg_and_bit(rst, id, &offset, &bit);
-
- regmap_read(rst->map, offset, &val);
-
- return !!(val & BIT(bit));
-}
-
-static int axg_audio_reset_assert(struct reset_controller_dev *rcdev,
- unsigned long id)
-{
- return axg_audio_reset_update(rcdev, id, true);
-}
-
-static int axg_audio_reset_deassert(struct reset_controller_dev *rcdev,
- unsigned long id)
-{
- return axg_audio_reset_update(rcdev, id, false);
-}
-
-static int axg_audio_reset_toggle(struct reset_controller_dev *rcdev,
- unsigned long id)
-{
- int ret;
-
- ret = axg_audio_reset_assert(rcdev, id);
- if (ret)
- return ret;
-
- return axg_audio_reset_deassert(rcdev, id);
-}
-
-static const struct reset_control_ops axg_audio_rstc_ops = {
- .assert = axg_audio_reset_assert,
- .deassert = axg_audio_reset_deassert,
- .reset = axg_audio_reset_toggle,
- .status = axg_audio_reset_status,
-};
-
static struct regmap_config axg_audio_regmap_cfg = {
.reg_bits = 32,
.val_bits = 32,
@@ -1763,11 +1318,8 @@ static struct regmap_config axg_audio_regmap_cfg = {
};
struct audioclk_data {
- struct clk_regmap *const *regmap_clks;
- unsigned int regmap_clk_num;
struct meson_clk_hw_data hw_clks;
- unsigned int reset_offset;
- unsigned int reset_num;
+ const char *rst_drvname;
unsigned int max_register;
};
@@ -1775,7 +1327,7 @@ static int axg_audio_clkc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct audioclk_data *data;
- struct axg_audio_reset_data *rst;
+ struct auxiliary_device *auxdev;
struct regmap *map;
void __iomem *regs;
struct clk_hw *hw;
@@ -1808,10 +1360,6 @@ static int axg_audio_clkc_probe(struct platform_device *pdev)
return ret;
}
- /* Populate regmap for the regmap backed clocks */
- for (i = 0; i < data->regmap_clk_num; i++)
- data->regmap_clks[i]->map = map;
-
/* Take care to skip the registered input clocks */
for (i = AUD_CLKID_DDR_ARB; i < data->hw_clks.num; i++) {
const char *name;
@@ -1834,27 +1382,18 @@ static int axg_audio_clkc_probe(struct platform_device *pdev)
if (ret)
return ret;
- /* Stop here if there is no reset */
- if (!data->reset_num)
- return 0;
-
- rst = devm_kzalloc(dev, sizeof(*rst), GFP_KERNEL);
- if (!rst)
- return -ENOMEM;
-
- rst->map = map;
- rst->offset = data->reset_offset;
- rst->rstc.nr_resets = data->reset_num;
- rst->rstc.ops = &axg_audio_rstc_ops;
- rst->rstc.of_node = dev->of_node;
- rst->rstc.owner = THIS_MODULE;
+ /* Register auxiliary reset driver when applicable */
+ if (data->rst_drvname) {
+ auxdev = __devm_auxiliary_device_create(dev, dev->driver->name,
+ data->rst_drvname, NULL, 0);
+ if (!auxdev)
+ return -ENODEV;
+ }
- return devm_reset_controller_register(dev, &rst->rstc);
+ return 0;
}
static const struct audioclk_data axg_audioclk_data = {
- .regmap_clks = axg_clk_regmaps,
- .regmap_clk_num = ARRAY_SIZE(axg_clk_regmaps),
.hw_clks = {
.hws = axg_audio_hw_clks,
.num = ARRAY_SIZE(axg_audio_hw_clks),
@@ -1863,26 +1402,20 @@ static const struct audioclk_data axg_audioclk_data = {
};
static const struct audioclk_data g12a_audioclk_data = {
- .regmap_clks = g12a_clk_regmaps,
- .regmap_clk_num = ARRAY_SIZE(g12a_clk_regmaps),
.hw_clks = {
.hws = g12a_audio_hw_clks,
.num = ARRAY_SIZE(g12a_audio_hw_clks),
},
- .reset_offset = AUDIO_SW_RESET,
- .reset_num = 26,
+ .rst_drvname = "rst-g12a",
.max_register = AUDIO_CLK_SPDIFOUT_B_CTRL,
};
static const struct audioclk_data sm1_audioclk_data = {
- .regmap_clks = sm1_clk_regmaps,
- .regmap_clk_num = ARRAY_SIZE(sm1_clk_regmaps),
.hw_clks = {
.hws = sm1_audio_hw_clks,
.num = ARRAY_SIZE(sm1_audio_hw_clks),
},
- .reset_offset = AUDIO_SM1_SW_RESET0,
- .reset_num = 39,
+ .rst_drvname = "rst-sm1",
.max_register = AUDIO_EARCRX_DMAC_CLK_CTRL,
};
diff --git a/drivers/clk/meson/axg-audio.h b/drivers/clk/meson/axg-audio.h
deleted file mode 100644
index 9e7765b630c9..000000000000
--- a/drivers/clk/meson/axg-audio.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
-/*
- * Copyright (c) 2018 BayLibre, SAS.
- * Author: Jerome Brunet <jbrunet@baylibre.com>
- */
-
-#ifndef __AXG_AUDIO_CLKC_H
-#define __AXG_AUDIO_CLKC_H
-
-/*
- * Audio Clock register offsets
- *
- * Register offsets from the datasheet must be multiplied by 4 before
- * to get the right offset
- */
-#define AUDIO_CLK_GATE_EN 0x000
-#define AUDIO_MCLK_A_CTRL 0x004
-#define AUDIO_MCLK_B_CTRL 0x008
-#define AUDIO_MCLK_C_CTRL 0x00C
-#define AUDIO_MCLK_D_CTRL 0x010
-#define AUDIO_MCLK_E_CTRL 0x014
-#define AUDIO_MCLK_F_CTRL 0x018
-#define AUDIO_MST_PAD_CTRL0 0x01c
-#define AUDIO_MST_PAD_CTRL1 0x020
-#define AUDIO_SW_RESET 0x024
-#define AUDIO_MST_A_SCLK_CTRL0 0x040
-#define AUDIO_MST_A_SCLK_CTRL1 0x044
-#define AUDIO_MST_B_SCLK_CTRL0 0x048
-#define AUDIO_MST_B_SCLK_CTRL1 0x04C
-#define AUDIO_MST_C_SCLK_CTRL0 0x050
-#define AUDIO_MST_C_SCLK_CTRL1 0x054
-#define AUDIO_MST_D_SCLK_CTRL0 0x058
-#define AUDIO_MST_D_SCLK_CTRL1 0x05C
-#define AUDIO_MST_E_SCLK_CTRL0 0x060
-#define AUDIO_MST_E_SCLK_CTRL1 0x064
-#define AUDIO_MST_F_SCLK_CTRL0 0x068
-#define AUDIO_MST_F_SCLK_CTRL1 0x06C
-#define AUDIO_CLK_TDMIN_A_CTRL 0x080
-#define AUDIO_CLK_TDMIN_B_CTRL 0x084
-#define AUDIO_CLK_TDMIN_C_CTRL 0x088
-#define AUDIO_CLK_TDMIN_LB_CTRL 0x08C
-#define AUDIO_CLK_TDMOUT_A_CTRL 0x090
-#define AUDIO_CLK_TDMOUT_B_CTRL 0x094
-#define AUDIO_CLK_TDMOUT_C_CTRL 0x098
-#define AUDIO_CLK_SPDIFIN_CTRL 0x09C
-#define AUDIO_CLK_SPDIFOUT_CTRL 0x0A0
-#define AUDIO_CLK_RESAMPLE_CTRL 0x0A4
-#define AUDIO_CLK_LOCKER_CTRL 0x0A8
-#define AUDIO_CLK_PDMIN_CTRL0 0x0AC
-#define AUDIO_CLK_PDMIN_CTRL1 0x0B0
-#define AUDIO_CLK_SPDIFOUT_B_CTRL 0x0B4
-
-/* SM1 introduce new register and some shifts :( */
-#define AUDIO_CLK_GATE_EN1 0x004
-#define AUDIO_SM1_MCLK_A_CTRL 0x008
-#define AUDIO_SM1_MCLK_B_CTRL 0x00C
-#define AUDIO_SM1_MCLK_C_CTRL 0x010
-#define AUDIO_SM1_MCLK_D_CTRL 0x014
-#define AUDIO_SM1_MCLK_E_CTRL 0x018
-#define AUDIO_SM1_MCLK_F_CTRL 0x01C
-#define AUDIO_SM1_MST_PAD_CTRL0 0x020
-#define AUDIO_SM1_MST_PAD_CTRL1 0x024
-#define AUDIO_SM1_SW_RESET0 0x028
-#define AUDIO_SM1_SW_RESET1 0x02C
-#define AUDIO_CLK81_CTRL 0x030
-#define AUDIO_CLK81_EN 0x034
-#define AUDIO_EARCRX_CMDC_CLK_CTRL 0x0D0
-#define AUDIO_EARCRX_DMAC_CLK_CTRL 0x0D4
-
-#endif /*__AXG_AUDIO_CLKC_H */
diff --git a/drivers/clk/meson/axg.c b/drivers/clk/meson/axg.c
index 448eece246ca..0a25c649ef1d 100644
--- a/drivers/clk/meson/axg.c
+++ b/drivers/clk/meson/axg.c
@@ -18,11 +18,96 @@
#include "clk-regmap.h"
#include "clk-pll.h"
#include "clk-mpll.h"
-#include "axg.h"
-#include "meson-eeclk.h"
+#include "meson-clkc-utils.h"
#include <dt-bindings/clock/axg-clkc.h>
+#define HHI_GP0_PLL_CNTL 0x40
+#define HHI_GP0_PLL_CNTL2 0x44
+#define HHI_GP0_PLL_CNTL3 0x48
+#define HHI_GP0_PLL_CNTL4 0x4c
+#define HHI_GP0_PLL_CNTL5 0x50
+#define HHI_GP0_PLL_STS 0x54
+#define HHI_GP0_PLL_CNTL1 0x58
+#define HHI_HIFI_PLL_CNTL 0x80
+#define HHI_HIFI_PLL_CNTL2 0x84
+#define HHI_HIFI_PLL_CNTL3 0x88
+#define HHI_HIFI_PLL_CNTL4 0x8C
+#define HHI_HIFI_PLL_CNTL5 0x90
+#define HHI_HIFI_PLL_STS 0x94
+#define HHI_HIFI_PLL_CNTL1 0x98
+
+#define HHI_XTAL_DIVN_CNTL 0xbc
+#define HHI_GCLK2_MPEG0 0xc0
+#define HHI_GCLK2_MPEG1 0xc4
+#define HHI_GCLK2_MPEG2 0xc8
+#define HHI_GCLK2_OTHER 0xd0
+#define HHI_GCLK2_AO 0xd4
+#define HHI_PCIE_PLL_CNTL 0xd8
+#define HHI_PCIE_PLL_CNTL1 0xdC
+#define HHI_PCIE_PLL_CNTL2 0xe0
+#define HHI_PCIE_PLL_CNTL3 0xe4
+#define HHI_PCIE_PLL_CNTL4 0xe8
+#define HHI_PCIE_PLL_CNTL5 0xec
+#define HHI_PCIE_PLL_CNTL6 0xf0
+#define HHI_PCIE_PLL_STS 0xf4
+
+#define HHI_MEM_PD_REG0 0x100
+#define HHI_VPU_MEM_PD_REG0 0x104
+#define HHI_VIID_CLK_DIV 0x128
+#define HHI_VIID_CLK_CNTL 0x12c
+
+#define HHI_GCLK_MPEG0 0x140
+#define HHI_GCLK_MPEG1 0x144
+#define HHI_GCLK_MPEG2 0x148
+#define HHI_GCLK_OTHER 0x150
+#define HHI_GCLK_AO 0x154
+#define HHI_SYS_CPU_CLK_CNTL1 0x15c
+#define HHI_SYS_CPU_RESET_CNTL 0x160
+#define HHI_VID_CLK_DIV 0x164
+#define HHI_SPICC_HCLK_CNTL 0x168
+
+#define HHI_MPEG_CLK_CNTL 0x174
+#define HHI_VID_CLK_CNTL 0x17c
+#define HHI_TS_CLK_CNTL 0x190
+#define HHI_VID_CLK_CNTL2 0x194
+#define HHI_SYS_CPU_CLK_CNTL0 0x19c
+#define HHI_VID_PLL_CLK_DIV 0x1a0
+#define HHI_VPU_CLK_CNTL 0x1bC
+
+#define HHI_VAPBCLK_CNTL 0x1F4
+
+#define HHI_GEN_CLK_CNTL 0x228
+
+#define HHI_VDIN_MEAS_CLK_CNTL 0x250
+#define HHI_NAND_CLK_CNTL 0x25C
+#define HHI_SD_EMMC_CLK_CNTL 0x264
+
+#define HHI_MPLL_CNTL 0x280
+#define HHI_MPLL_CNTL2 0x284
+#define HHI_MPLL_CNTL3 0x288
+#define HHI_MPLL_CNTL4 0x28C
+#define HHI_MPLL_CNTL5 0x290
+#define HHI_MPLL_CNTL6 0x294
+#define HHI_MPLL_CNTL7 0x298
+#define HHI_MPLL_CNTL8 0x29C
+#define HHI_MPLL_CNTL9 0x2A0
+#define HHI_MPLL_CNTL10 0x2A4
+
+#define HHI_MPLL3_CNTL0 0x2E0
+#define HHI_MPLL3_CNTL1 0x2E4
+#define HHI_PLL_TOP_MISC 0x2E8
+
+#define HHI_SYS_PLL_CNTL1 0x2FC
+#define HHI_SYS_PLL_CNTL 0x300
+#define HHI_SYS_PLL_CNTL2 0x304
+#define HHI_SYS_PLL_CNTL3 0x308
+#define HHI_SYS_PLL_CNTL4 0x30c
+#define HHI_SYS_PLL_CNTL5 0x310
+#define HHI_SYS_PLL_STS 0x314
+#define HHI_DPLL_TOP_I 0x318
+#define HHI_DPLL_TOP2_I 0x31C
+
static struct clk_regmap axg_fixed_pll_dco = {
.data = &(struct meson_clk_pll_data){
.en = {
@@ -248,7 +333,7 @@ static struct clk_regmap axg_gp0_pll = {
},
};
-static const struct reg_sequence axg_hifi_init_regs[] = {
+static const struct reg_sequence axg_hifi_pll_init_regs[] = {
{ .reg = HHI_HIFI_PLL_CNTL1, .def = 0xc084b000 },
{ .reg = HHI_HIFI_PLL_CNTL2, .def = 0xb75020be },
{ .reg = HHI_HIFI_PLL_CNTL3, .def = 0x0a6a3a88 },
@@ -289,8 +374,8 @@ static struct clk_regmap axg_hifi_pll_dco = {
.width = 1,
},
.table = axg_gp0_pll_params_table,
- .init_regs = axg_hifi_init_regs,
- .init_count = ARRAY_SIZE(axg_hifi_init_regs),
+ .init_regs = axg_hifi_pll_init_regs,
+ .init_count = ARRAY_SIZE(axg_hifi_pll_init_regs),
.flags = CLK_MESON_PLL_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
@@ -695,7 +780,7 @@ static const struct pll_params_table axg_pcie_pll_params_table[] = {
{ /* sentinel */ },
};
-static const struct reg_sequence axg_pcie_init_regs[] = {
+static const struct reg_sequence axg_pcie_pll_init_regs[] = {
{ .reg = HHI_PCIE_PLL_CNTL1, .def = 0x0084a2aa },
{ .reg = HHI_PCIE_PLL_CNTL2, .def = 0xb75020be },
{ .reg = HHI_PCIE_PLL_CNTL3, .def = 0x0a47488e },
@@ -738,8 +823,8 @@ static struct clk_regmap axg_pcie_pll_dco = {
.width = 1,
},
.table = axg_pcie_pll_params_table,
- .init_regs = axg_pcie_init_regs,
- .init_count = ARRAY_SIZE(axg_pcie_init_regs),
+ .init_regs = axg_pcie_pll_init_regs,
+ .init_count = ARRAY_SIZE(axg_pcie_pll_init_regs),
},
.hw.init = &(struct clk_init_data){
.name = "pcie_pll_dco",
@@ -850,8 +935,9 @@ static struct clk_regmap axg_pcie_cml_en1 = {
},
};
-static u32 mux_table_clk81[] = { 0, 2, 3, 4, 5, 6, 7 };
-static const struct clk_parent_data clk81_parent_data[] = {
+/* clk81 is often referred as "mpeg_clk" */
+static u32 clk81_parents_val_table[] = { 0, 2, 3, 4, 5, 6, 7 };
+static const struct clk_parent_data clk81_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &axg_fclk_div7.hw },
{ .hw = &axg_mpll1.hw },
@@ -861,32 +947,32 @@ static const struct clk_parent_data clk81_parent_data[] = {
{ .hw = &axg_fclk_div5.hw },
};
-static struct clk_regmap axg_mpeg_clk_sel = {
+static struct clk_regmap axg_clk81_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_MPEG_CLK_CNTL,
.mask = 0x7,
.shift = 12,
- .table = mux_table_clk81,
+ .table = clk81_parents_val_table,
},
.hw.init = &(struct clk_init_data){
- .name = "mpeg_clk_sel",
+ .name = "clk81_sel",
.ops = &clk_regmap_mux_ro_ops,
- .parent_data = clk81_parent_data,
- .num_parents = ARRAY_SIZE(clk81_parent_data),
+ .parent_data = clk81_parents,
+ .num_parents = ARRAY_SIZE(clk81_parents),
},
};
-static struct clk_regmap axg_mpeg_clk_div = {
+static struct clk_regmap axg_clk81_div = {
.data = &(struct clk_regmap_div_data){
.offset = HHI_MPEG_CLK_CNTL,
.shift = 0,
.width = 7,
},
.hw.init = &(struct clk_init_data){
- .name = "mpeg_clk_div",
+ .name = "clk81_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &axg_mpeg_clk_sel.hw
+ &axg_clk81_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -902,14 +988,14 @@ static struct clk_regmap axg_clk81 = {
.name = "clk81",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &axg_mpeg_clk_div.hw
+ &axg_clk81_div.hw
},
.num_parents = 1,
.flags = (CLK_SET_RATE_PARENT | CLK_IS_CRITICAL),
},
};
-static const struct clk_parent_data axg_sd_emmc_clk0_parent_data[] = {
+static const struct clk_parent_data axg_sd_emmc_clk0_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &axg_fclk_div2.hw },
{ .hw = &axg_fclk_div3.hw },
@@ -918,7 +1004,7 @@ static const struct clk_parent_data axg_sd_emmc_clk0_parent_data[] = {
/*
* Following these parent clocks, we should also have had mpll2, mpll3
* and gp0_pll but these clocks are too precious to be used here. All
- * the necessary rates for MMC and NAND operation can be acheived using
+ * the necessary rates for MMC and NAND operation can be achieved using
* xtal or fclk_div clocks
*/
};
@@ -933,8 +1019,8 @@ static struct clk_regmap axg_sd_emmc_b_clk0_sel = {
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_b_clk0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = axg_sd_emmc_clk0_parent_data,
- .num_parents = ARRAY_SIZE(axg_sd_emmc_clk0_parent_data),
+ .parent_data = axg_sd_emmc_clk0_parents,
+ .num_parents = ARRAY_SIZE(axg_sd_emmc_clk0_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -983,8 +1069,8 @@ static struct clk_regmap axg_sd_emmc_c_clk0_sel = {
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_c_clk0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = axg_sd_emmc_clk0_parent_data,
- .num_parents = ARRAY_SIZE(axg_sd_emmc_clk0_parent_data),
+ .parent_data = axg_sd_emmc_clk0_parents,
+ .num_parents = ARRAY_SIZE(axg_sd_emmc_clk0_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1025,7 +1111,7 @@ static struct clk_regmap axg_sd_emmc_c_clk0 = {
/* VPU Clock */
-static const struct clk_hw *axg_vpu_parent_hws[] = {
+static const struct clk_hw *axg_vpu_parents[] = {
&axg_fclk_div4.hw,
&axg_fclk_div3.hw,
&axg_fclk_div5.hw,
@@ -1041,8 +1127,8 @@ static struct clk_regmap axg_vpu_0_sel = {
.hw.init = &(struct clk_init_data){
.name = "vpu_0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = axg_vpu_parent_hws,
- .num_parents = ARRAY_SIZE(axg_vpu_parent_hws),
+ .parent_hws = axg_vpu_parents,
+ .num_parents = ARRAY_SIZE(axg_vpu_parents),
/* We need a specific parent for VPU clock source, let it be set in DT */
.flags = CLK_SET_RATE_NO_REPARENT,
},
@@ -1090,8 +1176,8 @@ static struct clk_regmap axg_vpu_1_sel = {
.hw.init = &(struct clk_init_data){
.name = "vpu_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = axg_vpu_parent_hws,
- .num_parents = ARRAY_SIZE(axg_vpu_parent_hws),
+ .parent_hws = axg_vpu_parents,
+ .num_parents = ARRAY_SIZE(axg_vpu_parents),
/* We need a specific parent for VPU clock source, let it be set in DT */
.flags = CLK_SET_RATE_NO_REPARENT,
},
@@ -1159,8 +1245,8 @@ static struct clk_regmap axg_vapb_0_sel = {
.hw.init = &(struct clk_init_data){
.name = "vapb_0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = axg_vpu_parent_hws,
- .num_parents = ARRAY_SIZE(axg_vpu_parent_hws),
+ .parent_hws = axg_vpu_parents,
+ .num_parents = ARRAY_SIZE(axg_vpu_parents),
.flags = CLK_SET_RATE_NO_REPARENT,
},
};
@@ -1207,8 +1293,8 @@ static struct clk_regmap axg_vapb_1_sel = {
.hw.init = &(struct clk_init_data){
.name = "vapb_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = axg_vpu_parent_hws,
- .num_parents = ARRAY_SIZE(axg_vpu_parent_hws),
+ .parent_hws = axg_vpu_parents,
+ .num_parents = ARRAY_SIZE(axg_vpu_parents),
.flags = CLK_SET_RATE_NO_REPARENT,
},
};
@@ -1280,7 +1366,7 @@ static struct clk_regmap axg_vapb = {
/* Video Clocks */
-static const struct clk_hw *axg_vclk_parent_hws[] = {
+static const struct clk_hw *axg_vclk_parents[] = {
&axg_gp0_pll.hw,
&axg_fclk_div4.hw,
&axg_fclk_div3.hw,
@@ -1299,8 +1385,8 @@ static struct clk_regmap axg_vclk_sel = {
.hw.init = &(struct clk_init_data){
.name = "vclk_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = axg_vclk_parent_hws,
- .num_parents = ARRAY_SIZE(axg_vclk_parent_hws),
+ .parent_hws = axg_vclk_parents,
+ .num_parents = ARRAY_SIZE(axg_vclk_parents),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -1314,8 +1400,8 @@ static struct clk_regmap axg_vclk2_sel = {
.hw.init = &(struct clk_init_data){
.name = "vclk2_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = axg_vclk_parent_hws,
- .num_parents = ARRAY_SIZE(axg_vclk_parent_hws),
+ .parent_hws = axg_vclk_parents,
+ .num_parents = ARRAY_SIZE(axg_vclk_parents),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -1654,8 +1740,8 @@ static struct clk_fixed_factor axg_vclk2_div12 = {
},
};
-static u32 mux_table_cts_sel[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 };
-static const struct clk_hw *axg_cts_parent_hws[] = {
+static u32 axg_cts_encl_parents_val_table[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 };
+static const struct clk_hw *axg_cts_encl_parents[] = {
&axg_vclk_div1.hw,
&axg_vclk_div2.hw,
&axg_vclk_div4.hw,
@@ -1673,13 +1759,13 @@ static struct clk_regmap axg_cts_encl_sel = {
.offset = HHI_VIID_CLK_DIV,
.mask = 0xf,
.shift = 12,
- .table = mux_table_cts_sel,
+ .table = axg_cts_encl_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "cts_encl_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = axg_cts_parent_hws,
- .num_parents = ARRAY_SIZE(axg_cts_parent_hws),
+ .parent_hws = axg_cts_encl_parents,
+ .num_parents = ARRAY_SIZE(axg_cts_encl_parents),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -1702,8 +1788,8 @@ static struct clk_regmap axg_cts_encl = {
/* MIPI DSI Host Clock */
-static u32 mux_table_axg_vdin_meas[] = { 0, 1, 2, 3, 6, 7 };
-static const struct clk_parent_data axg_vdin_meas_parent_data[] = {
+static u32 axg_vdin_meas_parents_val_table[] = { 0, 1, 2, 3, 6, 7 };
+static const struct clk_parent_data axg_vdin_meas_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &axg_fclk_div4.hw },
{ .hw = &axg_fclk_div3.hw },
@@ -1718,13 +1804,13 @@ static struct clk_regmap axg_vdin_meas_sel = {
.mask = 0x7,
.shift = 21,
.flags = CLK_MUX_ROUND_CLOSEST,
- .table = mux_table_axg_vdin_meas,
+ .table = axg_vdin_meas_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "vdin_meas_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = axg_vdin_meas_parent_data,
- .num_parents = ARRAY_SIZE(axg_vdin_meas_parent_data),
+ .parent_data = axg_vdin_meas_parents,
+ .num_parents = ARRAY_SIZE(axg_vdin_meas_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1760,9 +1846,8 @@ static struct clk_regmap axg_vdin_meas = {
},
};
-static u32 mux_table_gen_clk[] = { 0, 4, 5, 6, 7, 8,
- 9, 10, 11, 13, 14, };
-static const struct clk_parent_data gen_clk_parent_data[] = {
+static u32 gen_clk_parents_val_table[] = { 0, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, };
+static const struct clk_parent_data gen_clk_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &axg_hifi_pll.hw },
{ .hw = &axg_mpll0.hw },
@@ -1781,7 +1866,7 @@ static struct clk_regmap axg_gen_clk_sel = {
.offset = HHI_GEN_CLK_CNTL,
.mask = 0xf,
.shift = 12,
- .table = mux_table_gen_clk,
+ .table = gen_clk_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "gen_clk_sel",
@@ -1792,8 +1877,8 @@ static struct clk_regmap axg_gen_clk_sel = {
* hifi_pll, mpll0, mpll1, mpll2, mpll3, fdiv4,
* fdiv3, fdiv5, [cts_msr_clk], fdiv7, gp0_pll
*/
- .parent_data = gen_clk_parent_data,
- .num_parents = ARRAY_SIZE(gen_clk_parent_data),
+ .parent_data = gen_clk_parents,
+ .num_parents = ARRAY_SIZE(gen_clk_parents),
},
};
@@ -1830,59 +1915,71 @@ static struct clk_regmap axg_gen_clk = {
},
};
-#define MESON_GATE(_name, _reg, _bit) \
- MESON_PCLK(_name, _reg, _bit, &axg_clk81.hw)
-
-/* Everything Else (EE) domain gates */
-static MESON_GATE(axg_ddr, HHI_GCLK_MPEG0, 0);
-static MESON_GATE(axg_audio_locker, HHI_GCLK_MPEG0, 2);
-static MESON_GATE(axg_mipi_dsi_host, HHI_GCLK_MPEG0, 3);
-static MESON_GATE(axg_isa, HHI_GCLK_MPEG0, 5);
-static MESON_GATE(axg_pl301, HHI_GCLK_MPEG0, 6);
-static MESON_GATE(axg_periphs, HHI_GCLK_MPEG0, 7);
-static MESON_GATE(axg_spicc_0, HHI_GCLK_MPEG0, 8);
-static MESON_GATE(axg_i2c, HHI_GCLK_MPEG0, 9);
-static MESON_GATE(axg_rng0, HHI_GCLK_MPEG0, 12);
-static MESON_GATE(axg_uart0, HHI_GCLK_MPEG0, 13);
-static MESON_GATE(axg_mipi_dsi_phy, HHI_GCLK_MPEG0, 14);
-static MESON_GATE(axg_spicc_1, HHI_GCLK_MPEG0, 15);
-static MESON_GATE(axg_pcie_a, HHI_GCLK_MPEG0, 16);
-static MESON_GATE(axg_pcie_b, HHI_GCLK_MPEG0, 17);
-static MESON_GATE(axg_hiu_reg, HHI_GCLK_MPEG0, 19);
-static MESON_GATE(axg_assist_misc, HHI_GCLK_MPEG0, 23);
-static MESON_GATE(axg_emmc_b, HHI_GCLK_MPEG0, 25);
-static MESON_GATE(axg_emmc_c, HHI_GCLK_MPEG0, 26);
-static MESON_GATE(axg_dma, HHI_GCLK_MPEG0, 27);
-static MESON_GATE(axg_spi, HHI_GCLK_MPEG0, 30);
-
-static MESON_GATE(axg_audio, HHI_GCLK_MPEG1, 0);
-static MESON_GATE(axg_eth_core, HHI_GCLK_MPEG1, 3);
-static MESON_GATE(axg_uart1, HHI_GCLK_MPEG1, 16);
-static MESON_GATE(axg_g2d, HHI_GCLK_MPEG1, 20);
-static MESON_GATE(axg_usb0, HHI_GCLK_MPEG1, 21);
-static MESON_GATE(axg_usb1, HHI_GCLK_MPEG1, 22);
-static MESON_GATE(axg_reset, HHI_GCLK_MPEG1, 23);
-static MESON_GATE(axg_usb_general, HHI_GCLK_MPEG1, 26);
-static MESON_GATE(axg_ahb_arb0, HHI_GCLK_MPEG1, 29);
-static MESON_GATE(axg_efuse, HHI_GCLK_MPEG1, 30);
-static MESON_GATE(axg_boot_rom, HHI_GCLK_MPEG1, 31);
-
-static MESON_GATE(axg_ahb_data_bus, HHI_GCLK_MPEG2, 1);
-static MESON_GATE(axg_ahb_ctrl_bus, HHI_GCLK_MPEG2, 2);
-static MESON_GATE(axg_usb1_to_ddr, HHI_GCLK_MPEG2, 8);
-static MESON_GATE(axg_usb0_to_ddr, HHI_GCLK_MPEG2, 9);
-static MESON_GATE(axg_mmc_pclk, HHI_GCLK_MPEG2, 11);
-static MESON_GATE(axg_vpu_intr, HHI_GCLK_MPEG2, 25);
-static MESON_GATE(axg_sec_ahb_ahb3_bridge, HHI_GCLK_MPEG2, 26);
-static MESON_GATE(axg_gic, HHI_GCLK_MPEG2, 30);
+static const struct clk_parent_data axg_pclk_parents = { .hw = &axg_clk81.hw };
+
+#define AXG_PCLK(_name, _reg, _bit, _flags) \
+ MESON_PCLK(axg_##_name, _reg, _bit, &axg_pclk_parents, _flags)
+
+/*
+ * Everything Else (EE) domain gates
+ *
+ * NOTE: The gates below are marked with CLK_IGNORE_UNUSED for historic reasons
+ * Users are encouraged to test without it and submit changes to:
+ * - remove the flag if not necessary
+ * - replace the flag with something more adequate, such as CLK_IS_CRITICAL,
+ * if appropriate.
+ * - add a comment explaining why the use of CLK_IGNORE_UNUSED is desirable
+ * for a particular clock.
+ */
+static AXG_PCLK(ddr, HHI_GCLK_MPEG0, 0, CLK_IGNORE_UNUSED);
+static AXG_PCLK(audio_locker, HHI_GCLK_MPEG0, 2, CLK_IGNORE_UNUSED);
+static AXG_PCLK(mipi_dsi_host, HHI_GCLK_MPEG0, 3, CLK_IGNORE_UNUSED);
+static AXG_PCLK(isa, HHI_GCLK_MPEG0, 5, CLK_IGNORE_UNUSED);
+static AXG_PCLK(pl301, HHI_GCLK_MPEG0, 6, CLK_IGNORE_UNUSED);
+static AXG_PCLK(periphs, HHI_GCLK_MPEG0, 7, CLK_IGNORE_UNUSED);
+static AXG_PCLK(spicc_0, HHI_GCLK_MPEG0, 8, CLK_IGNORE_UNUSED);
+static AXG_PCLK(i2c, HHI_GCLK_MPEG0, 9, CLK_IGNORE_UNUSED);
+static AXG_PCLK(rng0, HHI_GCLK_MPEG0, 12, CLK_IGNORE_UNUSED);
+static AXG_PCLK(uart0, HHI_GCLK_MPEG0, 13, CLK_IGNORE_UNUSED);
+static AXG_PCLK(mipi_dsi_phy, HHI_GCLK_MPEG0, 14, CLK_IGNORE_UNUSED);
+static AXG_PCLK(spicc_1, HHI_GCLK_MPEG0, 15, CLK_IGNORE_UNUSED);
+static AXG_PCLK(pcie_a, HHI_GCLK_MPEG0, 16, CLK_IGNORE_UNUSED);
+static AXG_PCLK(pcie_b, HHI_GCLK_MPEG0, 17, CLK_IGNORE_UNUSED);
+static AXG_PCLK(hiu_reg, HHI_GCLK_MPEG0, 19, CLK_IGNORE_UNUSED);
+static AXG_PCLK(assist_misc, HHI_GCLK_MPEG0, 23, CLK_IGNORE_UNUSED);
+static AXG_PCLK(emmc_b, HHI_GCLK_MPEG0, 25, CLK_IGNORE_UNUSED);
+static AXG_PCLK(emmc_c, HHI_GCLK_MPEG0, 26, CLK_IGNORE_UNUSED);
+static AXG_PCLK(dma, HHI_GCLK_MPEG0, 27, CLK_IGNORE_UNUSED);
+static AXG_PCLK(spi, HHI_GCLK_MPEG0, 30, CLK_IGNORE_UNUSED);
+
+static AXG_PCLK(audio, HHI_GCLK_MPEG1, 0, CLK_IGNORE_UNUSED);
+static AXG_PCLK(eth_core, HHI_GCLK_MPEG1, 3, CLK_IGNORE_UNUSED);
+static AXG_PCLK(uart1, HHI_GCLK_MPEG1, 16, CLK_IGNORE_UNUSED);
+static AXG_PCLK(g2d, HHI_GCLK_MPEG1, 20, CLK_IGNORE_UNUSED);
+static AXG_PCLK(usb0, HHI_GCLK_MPEG1, 21, CLK_IGNORE_UNUSED);
+static AXG_PCLK(usb1, HHI_GCLK_MPEG1, 22, CLK_IGNORE_UNUSED);
+static AXG_PCLK(reset, HHI_GCLK_MPEG1, 23, CLK_IGNORE_UNUSED);
+static AXG_PCLK(usb_general, HHI_GCLK_MPEG1, 26, CLK_IGNORE_UNUSED);
+static AXG_PCLK(ahb_arb0, HHI_GCLK_MPEG1, 29, CLK_IGNORE_UNUSED);
+static AXG_PCLK(efuse, HHI_GCLK_MPEG1, 30, CLK_IGNORE_UNUSED);
+static AXG_PCLK(boot_rom, HHI_GCLK_MPEG1, 31, CLK_IGNORE_UNUSED);
+
+static AXG_PCLK(ahb_data_bus, HHI_GCLK_MPEG2, 1, CLK_IGNORE_UNUSED);
+static AXG_PCLK(ahb_ctrl_bus, HHI_GCLK_MPEG2, 2, CLK_IGNORE_UNUSED);
+static AXG_PCLK(usb1_to_ddr, HHI_GCLK_MPEG2, 8, CLK_IGNORE_UNUSED);
+static AXG_PCLK(usb0_to_ddr, HHI_GCLK_MPEG2, 9, CLK_IGNORE_UNUSED);
+static AXG_PCLK(mmc_pclk, HHI_GCLK_MPEG2, 11, CLK_IGNORE_UNUSED);
+static AXG_PCLK(vpu_intr, HHI_GCLK_MPEG2, 25, CLK_IGNORE_UNUSED);
+static AXG_PCLK(sec_ahb_ahb3_bridge, HHI_GCLK_MPEG2, 26, CLK_IGNORE_UNUSED);
+static AXG_PCLK(gic, HHI_GCLK_MPEG2, 30, CLK_IGNORE_UNUSED);
/* Always On (AO) domain gates */
-static MESON_GATE(axg_ao_media_cpu, HHI_GCLK_AO, 0);
-static MESON_GATE(axg_ao_ahb_sram, HHI_GCLK_AO, 1);
-static MESON_GATE(axg_ao_ahb_bus, HHI_GCLK_AO, 2);
-static MESON_GATE(axg_ao_iface, HHI_GCLK_AO, 3);
-static MESON_GATE(axg_ao_i2c, HHI_GCLK_AO, 4);
+static AXG_PCLK(ao_media_cpu, HHI_GCLK_AO, 0, CLK_IGNORE_UNUSED);
+static AXG_PCLK(ao_ahb_sram, HHI_GCLK_AO, 1, CLK_IGNORE_UNUSED);
+static AXG_PCLK(ao_ahb_bus, HHI_GCLK_AO, 2, CLK_IGNORE_UNUSED);
+static AXG_PCLK(ao_iface, HHI_GCLK_AO, 3, CLK_IGNORE_UNUSED);
+static AXG_PCLK(ao_i2c, HHI_GCLK_AO, 4, CLK_IGNORE_UNUSED);
/* Array of all clocks provided by this provider */
@@ -1895,8 +1992,8 @@ static struct clk_hw *axg_hw_clks[] = {
[CLKID_FCLK_DIV5] = &axg_fclk_div5.hw,
[CLKID_FCLK_DIV7] = &axg_fclk_div7.hw,
[CLKID_GP0_PLL] = &axg_gp0_pll.hw,
- [CLKID_MPEG_SEL] = &axg_mpeg_clk_sel.hw,
- [CLKID_MPEG_DIV] = &axg_mpeg_clk_div.hw,
+ [CLKID_MPEG_SEL] = &axg_clk81_sel.hw,
+ [CLKID_MPEG_DIV] = &axg_clk81_div.hw,
[CLKID_CLK81] = &axg_clk81.hw,
[CLKID_MPLL0] = &axg_mpll0.hw,
[CLKID_MPLL1] = &axg_mpll1.hw,
@@ -2025,159 +2122,27 @@ static struct clk_hw *axg_hw_clks[] = {
[CLKID_VDIN_MEAS] = &axg_vdin_meas.hw,
};
-/* Convenience table to populate regmap in .probe */
-static struct clk_regmap *const axg_clk_regmaps[] = {
- &axg_clk81,
- &axg_ddr,
- &axg_audio_locker,
- &axg_mipi_dsi_host,
- &axg_isa,
- &axg_pl301,
- &axg_periphs,
- &axg_spicc_0,
- &axg_i2c,
- &axg_rng0,
- &axg_uart0,
- &axg_mipi_dsi_phy,
- &axg_spicc_1,
- &axg_pcie_a,
- &axg_pcie_b,
- &axg_hiu_reg,
- &axg_assist_misc,
- &axg_emmc_b,
- &axg_emmc_c,
- &axg_dma,
- &axg_spi,
- &axg_audio,
- &axg_eth_core,
- &axg_uart1,
- &axg_g2d,
- &axg_usb0,
- &axg_usb1,
- &axg_reset,
- &axg_usb_general,
- &axg_ahb_arb0,
- &axg_efuse,
- &axg_boot_rom,
- &axg_ahb_data_bus,
- &axg_ahb_ctrl_bus,
- &axg_usb1_to_ddr,
- &axg_usb0_to_ddr,
- &axg_mmc_pclk,
- &axg_vpu_intr,
- &axg_sec_ahb_ahb3_bridge,
- &axg_gic,
- &axg_ao_media_cpu,
- &axg_ao_ahb_sram,
- &axg_ao_ahb_bus,
- &axg_ao_iface,
- &axg_ao_i2c,
- &axg_sd_emmc_b_clk0,
- &axg_sd_emmc_c_clk0,
- &axg_mpeg_clk_div,
- &axg_sd_emmc_b_clk0_div,
- &axg_sd_emmc_c_clk0_div,
- &axg_mpeg_clk_sel,
- &axg_sd_emmc_b_clk0_sel,
- &axg_sd_emmc_c_clk0_sel,
- &axg_mpll0,
- &axg_mpll1,
- &axg_mpll2,
- &axg_mpll3,
- &axg_mpll0_div,
- &axg_mpll1_div,
- &axg_mpll2_div,
- &axg_mpll3_div,
- &axg_fixed_pll,
- &axg_sys_pll,
- &axg_gp0_pll,
- &axg_hifi_pll,
- &axg_mpll_prediv,
- &axg_fclk_div2,
- &axg_fclk_div3,
- &axg_fclk_div4,
- &axg_fclk_div5,
- &axg_fclk_div7,
- &axg_pcie_pll_dco,
- &axg_pcie_pll_od,
- &axg_pcie_pll,
- &axg_pcie_mux,
- &axg_pcie_ref,
- &axg_pcie_cml_en0,
- &axg_pcie_cml_en1,
- &axg_gen_clk_sel,
- &axg_gen_clk_div,
- &axg_gen_clk,
- &axg_fixed_pll_dco,
- &axg_sys_pll_dco,
- &axg_gp0_pll_dco,
- &axg_hifi_pll_dco,
- &axg_pcie_pll_dco,
- &axg_pcie_pll_od,
- &axg_vpu_0_div,
- &axg_vpu_0_sel,
- &axg_vpu_0,
- &axg_vpu_1_div,
- &axg_vpu_1_sel,
- &axg_vpu_1,
- &axg_vpu,
- &axg_vapb_0_div,
- &axg_vapb_0_sel,
- &axg_vapb_0,
- &axg_vapb_1_div,
- &axg_vapb_1_sel,
- &axg_vapb_1,
- &axg_vapb_sel,
- &axg_vapb,
- &axg_vclk,
- &axg_vclk2,
- &axg_vclk_sel,
- &axg_vclk2_sel,
- &axg_vclk_input,
- &axg_vclk2_input,
- &axg_vclk_div,
- &axg_vclk_div1,
- &axg_vclk2_div,
- &axg_vclk2_div1,
- &axg_vclk_div2_en,
- &axg_vclk_div4_en,
- &axg_vclk_div6_en,
- &axg_vclk_div12_en,
- &axg_vclk2_div2_en,
- &axg_vclk2_div4_en,
- &axg_vclk2_div6_en,
- &axg_vclk2_div12_en,
- &axg_cts_encl_sel,
- &axg_cts_encl,
- &axg_vdin_meas_sel,
- &axg_vdin_meas_div,
- &axg_vdin_meas,
-};
-
-static const struct meson_eeclkc_data axg_clkc_data = {
- .regmap_clks = axg_clk_regmaps,
- .regmap_clk_num = ARRAY_SIZE(axg_clk_regmaps),
+static const struct meson_clkc_data axg_clkc_data = {
.hw_clks = {
.hws = axg_hw_clks,
.num = ARRAY_SIZE(axg_hw_clks),
},
};
-
-static const struct of_device_id clkc_match_table[] = {
+static const struct of_device_id axg_clkc_match_table[] = {
{ .compatible = "amlogic,axg-clkc", .data = &axg_clkc_data },
{}
};
-MODULE_DEVICE_TABLE(of, clkc_match_table);
+MODULE_DEVICE_TABLE(of, axg_clkc_match_table);
-static struct platform_driver axg_driver = {
- .probe = meson_eeclkc_probe,
+static struct platform_driver axg_clkc_driver = {
+ .probe = meson_clkc_syscon_probe,
.driver = {
.name = "axg-clkc",
- .of_match_table = clkc_match_table,
+ .of_match_table = axg_clkc_match_table,
},
};
-module_platform_driver(axg_driver);
+module_platform_driver(axg_clkc_driver);
MODULE_DESCRIPTION("Amlogic AXG Main Clock Controller driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/meson/axg.h b/drivers/clk/meson/axg.h
deleted file mode 100644
index 624d8d3ce7c4..000000000000
--- a/drivers/clk/meson/axg.h
+++ /dev/null
@@ -1,105 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
-/*
- * Copyright (c) 2016 AmLogic, Inc.
- * Author: Michael Turquette <mturquette@baylibre.com>
- *
- * Copyright (c) 2017 Amlogic, inc.
- * Author: Qiufang Dai <qiufang.dai@amlogic.com>
- *
- */
-#ifndef __AXG_H
-#define __AXG_H
-
-/*
- * Clock controller register offsets
- *
- * Register offsets from the data sheet must be multiplied by 4 before
- * adding them to the base address to get the right value.
- */
-#define HHI_GP0_PLL_CNTL 0x40
-#define HHI_GP0_PLL_CNTL2 0x44
-#define HHI_GP0_PLL_CNTL3 0x48
-#define HHI_GP0_PLL_CNTL4 0x4c
-#define HHI_GP0_PLL_CNTL5 0x50
-#define HHI_GP0_PLL_STS 0x54
-#define HHI_GP0_PLL_CNTL1 0x58
-#define HHI_HIFI_PLL_CNTL 0x80
-#define HHI_HIFI_PLL_CNTL2 0x84
-#define HHI_HIFI_PLL_CNTL3 0x88
-#define HHI_HIFI_PLL_CNTL4 0x8C
-#define HHI_HIFI_PLL_CNTL5 0x90
-#define HHI_HIFI_PLL_STS 0x94
-#define HHI_HIFI_PLL_CNTL1 0x98
-
-#define HHI_XTAL_DIVN_CNTL 0xbc
-#define HHI_GCLK2_MPEG0 0xc0
-#define HHI_GCLK2_MPEG1 0xc4
-#define HHI_GCLK2_MPEG2 0xc8
-#define HHI_GCLK2_OTHER 0xd0
-#define HHI_GCLK2_AO 0xd4
-#define HHI_PCIE_PLL_CNTL 0xd8
-#define HHI_PCIE_PLL_CNTL1 0xdC
-#define HHI_PCIE_PLL_CNTL2 0xe0
-#define HHI_PCIE_PLL_CNTL3 0xe4
-#define HHI_PCIE_PLL_CNTL4 0xe8
-#define HHI_PCIE_PLL_CNTL5 0xec
-#define HHI_PCIE_PLL_CNTL6 0xf0
-#define HHI_PCIE_PLL_STS 0xf4
-
-#define HHI_MEM_PD_REG0 0x100
-#define HHI_VPU_MEM_PD_REG0 0x104
-#define HHI_VIID_CLK_DIV 0x128
-#define HHI_VIID_CLK_CNTL 0x12c
-
-#define HHI_GCLK_MPEG0 0x140
-#define HHI_GCLK_MPEG1 0x144
-#define HHI_GCLK_MPEG2 0x148
-#define HHI_GCLK_OTHER 0x150
-#define HHI_GCLK_AO 0x154
-#define HHI_SYS_CPU_CLK_CNTL1 0x15c
-#define HHI_SYS_CPU_RESET_CNTL 0x160
-#define HHI_VID_CLK_DIV 0x164
-#define HHI_SPICC_HCLK_CNTL 0x168
-
-#define HHI_MPEG_CLK_CNTL 0x174
-#define HHI_VID_CLK_CNTL 0x17c
-#define HHI_TS_CLK_CNTL 0x190
-#define HHI_VID_CLK_CNTL2 0x194
-#define HHI_SYS_CPU_CLK_CNTL0 0x19c
-#define HHI_VID_PLL_CLK_DIV 0x1a0
-#define HHI_VPU_CLK_CNTL 0x1bC
-
-#define HHI_VAPBCLK_CNTL 0x1F4
-
-#define HHI_GEN_CLK_CNTL 0x228
-
-#define HHI_VDIN_MEAS_CLK_CNTL 0x250
-#define HHI_NAND_CLK_CNTL 0x25C
-#define HHI_SD_EMMC_CLK_CNTL 0x264
-
-#define HHI_MPLL_CNTL 0x280
-#define HHI_MPLL_CNTL2 0x284
-#define HHI_MPLL_CNTL3 0x288
-#define HHI_MPLL_CNTL4 0x28C
-#define HHI_MPLL_CNTL5 0x290
-#define HHI_MPLL_CNTL6 0x294
-#define HHI_MPLL_CNTL7 0x298
-#define HHI_MPLL_CNTL8 0x29C
-#define HHI_MPLL_CNTL9 0x2A0
-#define HHI_MPLL_CNTL10 0x2A4
-
-#define HHI_MPLL3_CNTL0 0x2E0
-#define HHI_MPLL3_CNTL1 0x2E4
-#define HHI_PLL_TOP_MISC 0x2E8
-
-#define HHI_SYS_PLL_CNTL1 0x2FC
-#define HHI_SYS_PLL_CNTL 0x300
-#define HHI_SYS_PLL_CNTL2 0x304
-#define HHI_SYS_PLL_CNTL3 0x308
-#define HHI_SYS_PLL_CNTL4 0x30c
-#define HHI_SYS_PLL_CNTL5 0x310
-#define HHI_SYS_PLL_STS 0x314
-#define HHI_DPLL_TOP_I 0x318
-#define HHI_DPLL_TOP2_I 0x31C
-
-#endif /* __AXG_H */
diff --git a/drivers/clk/meson/c3-peripherals.c b/drivers/clk/meson/c3-peripherals.c
index 2075668ed306..b158756cfee4 100644
--- a/drivers/clk/meson/c3-peripherals.c
+++ b/drivers/clk/meson/c3-peripherals.c
@@ -48,7 +48,16 @@
#define SPIFC_CLK_CTRL 0x1a0
#define NNA_CLK_CTRL 0x220
-static struct clk_regmap rtc_xtal_clkin = {
+#define C3_COMP_SEL(_name, _reg, _shift, _mask, _pdata) \
+ MESON_COMP_SEL(c3_, _name, _reg, _shift, _mask, _pdata, NULL, 0, 0)
+
+#define C3_COMP_DIV(_name, _reg, _shift, _width) \
+ MESON_COMP_DIV(c3_, _name, _reg, _shift, _width, 0, CLK_SET_RATE_PARENT)
+
+#define C3_COMP_GATE(_name, _reg, _bit) \
+ MESON_COMP_GATE(c3_, _name, _reg, _bit, CLK_SET_RATE_PARENT)
+
+static struct clk_regmap c3_rtc_xtal_clkin = {
.data = &(struct clk_regmap_gate_data) {
.offset = RTC_BY_OSCIN_CTRL0,
.bit_idx = 31,
@@ -63,12 +72,12 @@ static struct clk_regmap rtc_xtal_clkin = {
},
};
-static const struct meson_clk_dualdiv_param rtc_32k_div_table[] = {
+static const struct meson_clk_dualdiv_param c3_rtc_32k_div_table[] = {
{ 733, 732, 8, 11, 1 },
{ /* sentinel */ }
};
-static struct clk_regmap rtc_32k_div = {
+static struct clk_regmap c3_rtc_32k_div = {
.data = &(struct meson_clk_dualdiv_data) {
.n1 = {
.reg_off = RTC_BY_OSCIN_CTRL0,
@@ -95,39 +104,39 @@ static struct clk_regmap rtc_32k_div = {
.shift = 28,
.width = 1,
},
- .table = rtc_32k_div_table,
+ .table = c3_rtc_32k_div_table,
},
.hw.init = &(struct clk_init_data) {
.name = "rtc_32k_div",
.ops = &meson_clk_dualdiv_ops,
.parent_hws = (const struct clk_hw *[]) {
- &rtc_xtal_clkin.hw
+ &c3_rtc_xtal_clkin.hw
},
.num_parents = 1,
},
};
-static const struct clk_parent_data rtc_32k_mux_parent_data[] = {
- { .hw = &rtc_32k_div.hw },
- { .hw = &rtc_xtal_clkin.hw }
+static const struct clk_parent_data c3_rtc_32k_parents[] = {
+ { .hw = &c3_rtc_32k_div.hw },
+ { .hw = &c3_rtc_xtal_clkin.hw }
};
-static struct clk_regmap rtc_32k_mux = {
+static struct clk_regmap c3_rtc_32k_sel = {
.data = &(struct clk_regmap_mux_data) {
.offset = RTC_BY_OSCIN_CTRL1,
.mask = 0x1,
.shift = 24,
},
.hw.init = &(struct clk_init_data) {
- .name = "rtc_32k_mux",
+ .name = "rtc_32k_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = rtc_32k_mux_parent_data,
- .num_parents = ARRAY_SIZE(rtc_32k_mux_parent_data),
+ .parent_data = c3_rtc_32k_parents,
+ .num_parents = ARRAY_SIZE(c3_rtc_32k_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap rtc_32k = {
+static struct clk_regmap c3_rtc_32k = {
.data = &(struct clk_regmap_gate_data) {
.offset = RTC_BY_OSCIN_CTRL0,
.bit_idx = 30,
@@ -136,20 +145,20 @@ static struct clk_regmap rtc_32k = {
.name = "rtc_32k",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &rtc_32k_mux.hw
+ &c3_rtc_32k_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static const struct clk_parent_data rtc_clk_mux_parent_data[] = {
+static const struct clk_parent_data c3_rtc_clk_parents[] = {
{ .fw_name = "oscin" },
- { .hw = &rtc_32k.hw },
+ { .hw = &c3_rtc_32k.hw },
{ .fw_name = "pad_osc" }
};
-static struct clk_regmap rtc_clk = {
+static struct clk_regmap c3_rtc_clk = {
.data = &(struct clk_regmap_mux_data) {
.offset = RTC_CTRL,
.mask = 0x3,
@@ -158,62 +167,45 @@ static struct clk_regmap rtc_clk = {
.hw.init = &(struct clk_init_data) {
.name = "rtc_clk",
.ops = &clk_regmap_mux_ops,
- .parent_data = rtc_clk_mux_parent_data,
- .num_parents = ARRAY_SIZE(rtc_clk_mux_parent_data),
+ .parent_data = c3_rtc_clk_parents,
+ .num_parents = ARRAY_SIZE(c3_rtc_clk_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
-#define C3_CLK_GATE(_name, _reg, _bit, _fw_name, _ops, _flags) \
-struct clk_regmap _name = { \
- .data = &(struct clk_regmap_gate_data){ \
- .offset = (_reg), \
- .bit_idx = (_bit), \
- }, \
- .hw.init = &(struct clk_init_data) { \
- .name = #_name, \
- .ops = _ops, \
- .parent_data = &(const struct clk_parent_data) { \
- .fw_name = #_fw_name, \
- }, \
- .num_parents = 1, \
- .flags = (_flags), \
- }, \
-}
-
-#define C3_SYS_GATE(_name, _reg, _bit, _flags) \
- C3_CLK_GATE(_name, _reg, _bit, sysclk, \
- &clk_regmap_gate_ops, _flags)
-
-#define C3_SYS_GATE_RO(_name, _reg, _bit) \
- C3_CLK_GATE(_name, _reg, _bit, sysclk, \
- &clk_regmap_gate_ro_ops, 0)
-
-static C3_SYS_GATE(sys_reset_ctrl, SYS_CLK_EN0_REG0, 1, 0);
-static C3_SYS_GATE(sys_pwr_ctrl, SYS_CLK_EN0_REG0, 3, 0);
-static C3_SYS_GATE(sys_pad_ctrl, SYS_CLK_EN0_REG0, 4, 0);
-static C3_SYS_GATE(sys_ctrl, SYS_CLK_EN0_REG0, 5, 0);
-static C3_SYS_GATE(sys_ts_pll, SYS_CLK_EN0_REG0, 6, 0);
+static const struct clk_parent_data c3_sys_pclk_parents = { .fw_name = "sysclk" };
+
+#define C3_SYS_PCLK(_name, _reg, _bit, _flags) \
+ MESON_PCLK(c3_##_name, _reg, _bit, &c3_sys_pclk_parents, _flags)
+
+#define C3_SYS_PCLK_RO(_name, _reg, _bit) \
+ MESON_PCLK_RO(c3_##_name, _reg, _bit, &c3_sys_pclk_parents, 0)
+
+static C3_SYS_PCLK(sys_reset_ctrl, SYS_CLK_EN0_REG0, 1, 0);
+static C3_SYS_PCLK(sys_pwr_ctrl, SYS_CLK_EN0_REG0, 3, 0);
+static C3_SYS_PCLK(sys_pad_ctrl, SYS_CLK_EN0_REG0, 4, 0);
+static C3_SYS_PCLK(sys_ctrl, SYS_CLK_EN0_REG0, 5, 0);
+static C3_SYS_PCLK(sys_ts_pll, SYS_CLK_EN0_REG0, 6, 0);
/*
* NOTE: sys_dev_arb provides the clock to the ETH and SPICC arbiters that
* access the AXI bus.
*/
-static C3_SYS_GATE(sys_dev_arb, SYS_CLK_EN0_REG0, 7, 0);
+static C3_SYS_PCLK(sys_dev_arb, SYS_CLK_EN0_REG0, 7, 0);
/*
* FIXME: sys_mmc_pclk provides the clock for the DDR PHY, DDR will only be
* initialized in bl2, and this clock should not be touched in linux.
*/
-static C3_SYS_GATE_RO(sys_mmc_pclk, SYS_CLK_EN0_REG0, 8);
+static C3_SYS_PCLK_RO(sys_mmc_pclk, SYS_CLK_EN0_REG0, 8);
/*
* NOTE: sys_cpu_ctrl provides the clock for CPU controller. After clock is
* disabled, cpu_clk and other key CPU-related configurations cannot take effect.
*/
-static C3_SYS_GATE(sys_cpu_ctrl, SYS_CLK_EN0_REG0, 11, CLK_IS_CRITICAL);
-static C3_SYS_GATE(sys_jtag_ctrl, SYS_CLK_EN0_REG0, 12, 0);
-static C3_SYS_GATE(sys_ir_ctrl, SYS_CLK_EN0_REG0, 13, 0);
+static C3_SYS_PCLK(sys_cpu_ctrl, SYS_CLK_EN0_REG0, 11, CLK_IS_CRITICAL);
+static C3_SYS_PCLK(sys_jtag_ctrl, SYS_CLK_EN0_REG0, 12, 0);
+static C3_SYS_PCLK(sys_ir_ctrl, SYS_CLK_EN0_REG0, 13, 0);
/*
* NOTE: sys_irq_ctrl provides the clock for IRQ controller. The IRQ controller
@@ -221,18 +213,18 @@ static C3_SYS_GATE(sys_ir_ctrl, SYS_CLK_EN0_REG0, 13, 0);
* AOCPU. If the clock is disabled, interrupt-related functions will occurs an
* exception.
*/
-static C3_SYS_GATE(sys_irq_ctrl, SYS_CLK_EN0_REG0, 14, CLK_IS_CRITICAL);
-static C3_SYS_GATE(sys_msr_clk, SYS_CLK_EN0_REG0, 15, 0);
-static C3_SYS_GATE(sys_rom, SYS_CLK_EN0_REG0, 16, 0);
-static C3_SYS_GATE(sys_uart_f, SYS_CLK_EN0_REG0, 17, 0);
-static C3_SYS_GATE(sys_cpu_apb, SYS_CLK_EN0_REG0, 18, 0);
-static C3_SYS_GATE(sys_rsa, SYS_CLK_EN0_REG0, 19, 0);
-static C3_SYS_GATE(sys_sar_adc, SYS_CLK_EN0_REG0, 20, 0);
-static C3_SYS_GATE(sys_startup, SYS_CLK_EN0_REG0, 21, 0);
-static C3_SYS_GATE(sys_secure, SYS_CLK_EN0_REG0, 22, 0);
-static C3_SYS_GATE(sys_spifc, SYS_CLK_EN0_REG0, 23, 0);
-static C3_SYS_GATE(sys_nna, SYS_CLK_EN0_REG0, 25, 0);
-static C3_SYS_GATE(sys_eth_mac, SYS_CLK_EN0_REG0, 26, 0);
+static C3_SYS_PCLK(sys_irq_ctrl, SYS_CLK_EN0_REG0, 14, CLK_IS_CRITICAL);
+static C3_SYS_PCLK(sys_msr_clk, SYS_CLK_EN0_REG0, 15, 0);
+static C3_SYS_PCLK(sys_rom, SYS_CLK_EN0_REG0, 16, 0);
+static C3_SYS_PCLK(sys_uart_f, SYS_CLK_EN0_REG0, 17, 0);
+static C3_SYS_PCLK(sys_cpu_apb, SYS_CLK_EN0_REG0, 18, 0);
+static C3_SYS_PCLK(sys_rsa, SYS_CLK_EN0_REG0, 19, 0);
+static C3_SYS_PCLK(sys_sar_adc, SYS_CLK_EN0_REG0, 20, 0);
+static C3_SYS_PCLK(sys_startup, SYS_CLK_EN0_REG0, 21, 0);
+static C3_SYS_PCLK(sys_secure, SYS_CLK_EN0_REG0, 22, 0);
+static C3_SYS_PCLK(sys_spifc, SYS_CLK_EN0_REG0, 23, 0);
+static C3_SYS_PCLK(sys_nna, SYS_CLK_EN0_REG0, 25, 0);
+static C3_SYS_PCLK(sys_eth_mac, SYS_CLK_EN0_REG0, 26, 0);
/*
* FIXME: sys_gic provides the clock for GIC(Generic Interrupt Controller).
@@ -240,8 +232,8 @@ static C3_SYS_GATE(sys_eth_mac, SYS_CLK_EN0_REG0, 26, 0);
* used by our GIC is the public driver in kernel, and there is no management
* clock in the driver.
*/
-static C3_SYS_GATE(sys_gic, SYS_CLK_EN0_REG0, 27, CLK_IS_CRITICAL);
-static C3_SYS_GATE(sys_rama, SYS_CLK_EN0_REG0, 28, 0);
+static C3_SYS_PCLK(sys_gic, SYS_CLK_EN0_REG0, 27, CLK_IS_CRITICAL);
+static C3_SYS_PCLK(sys_rama, SYS_CLK_EN0_REG0, 28, 0);
/*
* NOTE: sys_big_nic provides the clock to the control bus of the NIC(Network
@@ -249,84 +241,85 @@ static C3_SYS_GATE(sys_rama, SYS_CLK_EN0_REG0, 28, 0);
* SPIFC, CAPU, JTAG, EMMC, SDIO, sec_top, USB, Audio, ETH, SPICC) in the
* system. After clock is disabled, The NIC cannot work.
*/
-static C3_SYS_GATE(sys_big_nic, SYS_CLK_EN0_REG0, 29, CLK_IS_CRITICAL);
-static C3_SYS_GATE(sys_ramb, SYS_CLK_EN0_REG0, 30, 0);
-static C3_SYS_GATE(sys_audio_pclk, SYS_CLK_EN0_REG0, 31, 0);
-static C3_SYS_GATE(sys_pwm_kl, SYS_CLK_EN0_REG1, 0, 0);
-static C3_SYS_GATE(sys_pwm_ij, SYS_CLK_EN0_REG1, 1, 0);
-static C3_SYS_GATE(sys_usb, SYS_CLK_EN0_REG1, 2, 0);
-static C3_SYS_GATE(sys_sd_emmc_a, SYS_CLK_EN0_REG1, 3, 0);
-static C3_SYS_GATE(sys_sd_emmc_c, SYS_CLK_EN0_REG1, 4, 0);
-static C3_SYS_GATE(sys_pwm_ab, SYS_CLK_EN0_REG1, 5, 0);
-static C3_SYS_GATE(sys_pwm_cd, SYS_CLK_EN0_REG1, 6, 0);
-static C3_SYS_GATE(sys_pwm_ef, SYS_CLK_EN0_REG1, 7, 0);
-static C3_SYS_GATE(sys_pwm_gh, SYS_CLK_EN0_REG1, 8, 0);
-static C3_SYS_GATE(sys_spicc_1, SYS_CLK_EN0_REG1, 9, 0);
-static C3_SYS_GATE(sys_spicc_0, SYS_CLK_EN0_REG1, 10, 0);
-static C3_SYS_GATE(sys_uart_a, SYS_CLK_EN0_REG1, 11, 0);
-static C3_SYS_GATE(sys_uart_b, SYS_CLK_EN0_REG1, 12, 0);
-static C3_SYS_GATE(sys_uart_c, SYS_CLK_EN0_REG1, 13, 0);
-static C3_SYS_GATE(sys_uart_d, SYS_CLK_EN0_REG1, 14, 0);
-static C3_SYS_GATE(sys_uart_e, SYS_CLK_EN0_REG1, 15, 0);
-static C3_SYS_GATE(sys_i2c_m_a, SYS_CLK_EN0_REG1, 16, 0);
-static C3_SYS_GATE(sys_i2c_m_b, SYS_CLK_EN0_REG1, 17, 0);
-static C3_SYS_GATE(sys_i2c_m_c, SYS_CLK_EN0_REG1, 18, 0);
-static C3_SYS_GATE(sys_i2c_m_d, SYS_CLK_EN0_REG1, 19, 0);
-static C3_SYS_GATE(sys_i2c_s_a, SYS_CLK_EN0_REG1, 20, 0);
-static C3_SYS_GATE(sys_rtc, SYS_CLK_EN0_REG1, 21, 0);
-static C3_SYS_GATE(sys_ge2d, SYS_CLK_EN0_REG1, 22, 0);
-static C3_SYS_GATE(sys_isp, SYS_CLK_EN0_REG1, 23, 0);
-static C3_SYS_GATE(sys_gpv_isp_nic, SYS_CLK_EN0_REG1, 24, 0);
-static C3_SYS_GATE(sys_gpv_cve_nic, SYS_CLK_EN0_REG1, 25, 0);
-static C3_SYS_GATE(sys_mipi_dsi_host, SYS_CLK_EN0_REG1, 26, 0);
-static C3_SYS_GATE(sys_mipi_dsi_phy, SYS_CLK_EN0_REG1, 27, 0);
-static C3_SYS_GATE(sys_eth_phy, SYS_CLK_EN0_REG1, 28, 0);
-static C3_SYS_GATE(sys_acodec, SYS_CLK_EN0_REG1, 29, 0);
-static C3_SYS_GATE(sys_dwap, SYS_CLK_EN0_REG1, 30, 0);
-static C3_SYS_GATE(sys_dos, SYS_CLK_EN0_REG1, 31, 0);
-static C3_SYS_GATE(sys_cve, SYS_CLK_EN0_REG2, 0, 0);
-static C3_SYS_GATE(sys_vout, SYS_CLK_EN0_REG2, 1, 0);
-static C3_SYS_GATE(sys_vc9000e, SYS_CLK_EN0_REG2, 2, 0);
-static C3_SYS_GATE(sys_pwm_mn, SYS_CLK_EN0_REG2, 3, 0);
-static C3_SYS_GATE(sys_sd_emmc_b, SYS_CLK_EN0_REG2, 4, 0);
-
-#define C3_AXI_GATE(_name, _reg, _bit, _flags) \
- C3_CLK_GATE(_name, _reg, _bit, axiclk, \
- &clk_regmap_gate_ops, _flags)
+static C3_SYS_PCLK(sys_big_nic, SYS_CLK_EN0_REG0, 29, CLK_IS_CRITICAL);
+static C3_SYS_PCLK(sys_ramb, SYS_CLK_EN0_REG0, 30, 0);
+static C3_SYS_PCLK(sys_audio_pclk, SYS_CLK_EN0_REG0, 31, 0);
+static C3_SYS_PCLK(sys_pwm_kl, SYS_CLK_EN0_REG1, 0, 0);
+static C3_SYS_PCLK(sys_pwm_ij, SYS_CLK_EN0_REG1, 1, 0);
+static C3_SYS_PCLK(sys_usb, SYS_CLK_EN0_REG1, 2, 0);
+static C3_SYS_PCLK(sys_sd_emmc_a, SYS_CLK_EN0_REG1, 3, 0);
+static C3_SYS_PCLK(sys_sd_emmc_c, SYS_CLK_EN0_REG1, 4, 0);
+static C3_SYS_PCLK(sys_pwm_ab, SYS_CLK_EN0_REG1, 5, 0);
+static C3_SYS_PCLK(sys_pwm_cd, SYS_CLK_EN0_REG1, 6, 0);
+static C3_SYS_PCLK(sys_pwm_ef, SYS_CLK_EN0_REG1, 7, 0);
+static C3_SYS_PCLK(sys_pwm_gh, SYS_CLK_EN0_REG1, 8, 0);
+static C3_SYS_PCLK(sys_spicc_1, SYS_CLK_EN0_REG1, 9, 0);
+static C3_SYS_PCLK(sys_spicc_0, SYS_CLK_EN0_REG1, 10, 0);
+static C3_SYS_PCLK(sys_uart_a, SYS_CLK_EN0_REG1, 11, 0);
+static C3_SYS_PCLK(sys_uart_b, SYS_CLK_EN0_REG1, 12, 0);
+static C3_SYS_PCLK(sys_uart_c, SYS_CLK_EN0_REG1, 13, 0);
+static C3_SYS_PCLK(sys_uart_d, SYS_CLK_EN0_REG1, 14, 0);
+static C3_SYS_PCLK(sys_uart_e, SYS_CLK_EN0_REG1, 15, 0);
+static C3_SYS_PCLK(sys_i2c_m_a, SYS_CLK_EN0_REG1, 16, 0);
+static C3_SYS_PCLK(sys_i2c_m_b, SYS_CLK_EN0_REG1, 17, 0);
+static C3_SYS_PCLK(sys_i2c_m_c, SYS_CLK_EN0_REG1, 18, 0);
+static C3_SYS_PCLK(sys_i2c_m_d, SYS_CLK_EN0_REG1, 19, 0);
+static C3_SYS_PCLK(sys_i2c_s_a, SYS_CLK_EN0_REG1, 20, 0);
+static C3_SYS_PCLK(sys_rtc, SYS_CLK_EN0_REG1, 21, 0);
+static C3_SYS_PCLK(sys_ge2d, SYS_CLK_EN0_REG1, 22, 0);
+static C3_SYS_PCLK(sys_isp, SYS_CLK_EN0_REG1, 23, 0);
+static C3_SYS_PCLK(sys_gpv_isp_nic, SYS_CLK_EN0_REG1, 24, 0);
+static C3_SYS_PCLK(sys_gpv_cve_nic, SYS_CLK_EN0_REG1, 25, 0);
+static C3_SYS_PCLK(sys_mipi_dsi_host, SYS_CLK_EN0_REG1, 26, 0);
+static C3_SYS_PCLK(sys_mipi_dsi_phy, SYS_CLK_EN0_REG1, 27, 0);
+static C3_SYS_PCLK(sys_eth_phy, SYS_CLK_EN0_REG1, 28, 0);
+static C3_SYS_PCLK(sys_acodec, SYS_CLK_EN0_REG1, 29, 0);
+static C3_SYS_PCLK(sys_dwap, SYS_CLK_EN0_REG1, 30, 0);
+static C3_SYS_PCLK(sys_dos, SYS_CLK_EN0_REG1, 31, 0);
+static C3_SYS_PCLK(sys_cve, SYS_CLK_EN0_REG2, 0, 0);
+static C3_SYS_PCLK(sys_vout, SYS_CLK_EN0_REG2, 1, 0);
+static C3_SYS_PCLK(sys_vc9000e, SYS_CLK_EN0_REG2, 2, 0);
+static C3_SYS_PCLK(sys_pwm_mn, SYS_CLK_EN0_REG2, 3, 0);
+static C3_SYS_PCLK(sys_sd_emmc_b, SYS_CLK_EN0_REG2, 4, 0);
+
+static const struct clk_parent_data c3_axi_pclk_parents = { .fw_name = "axiclk" };
+
+#define C3_AXI_PCLK(_name, _reg, _bit, _flags) \
+ MESON_PCLK(c3_##_name, _reg, _bit, &c3_axi_pclk_parents, _flags)
/*
* NOTE: axi_sys_nic provides the clock to the AXI bus of the system NIC. After
* clock is disabled, The NIC cannot work.
*/
-static C3_AXI_GATE(axi_sys_nic, AXI_CLK_EN0, 2, CLK_IS_CRITICAL);
-static C3_AXI_GATE(axi_isp_nic, AXI_CLK_EN0, 3, 0);
-static C3_AXI_GATE(axi_cve_nic, AXI_CLK_EN0, 4, 0);
-static C3_AXI_GATE(axi_ramb, AXI_CLK_EN0, 5, 0);
-static C3_AXI_GATE(axi_rama, AXI_CLK_EN0, 6, 0);
+static C3_AXI_PCLK(axi_sys_nic, AXI_CLK_EN0, 2, CLK_IS_CRITICAL);
+static C3_AXI_PCLK(axi_isp_nic, AXI_CLK_EN0, 3, 0);
+static C3_AXI_PCLK(axi_cve_nic, AXI_CLK_EN0, 4, 0);
+static C3_AXI_PCLK(axi_ramb, AXI_CLK_EN0, 5, 0);
+static C3_AXI_PCLK(axi_rama, AXI_CLK_EN0, 6, 0);
/*
* NOTE: axi_cpu_dmc provides the clock to the AXI bus where the CPU accesses
* the DDR. After clock is disabled, The CPU will not have access to the DDR.
*/
-static C3_AXI_GATE(axi_cpu_dmc, AXI_CLK_EN0, 7, CLK_IS_CRITICAL);
-static C3_AXI_GATE(axi_nic, AXI_CLK_EN0, 8, 0);
-static C3_AXI_GATE(axi_dma, AXI_CLK_EN0, 9, 0);
+static C3_AXI_PCLK(axi_cpu_dmc, AXI_CLK_EN0, 7, CLK_IS_CRITICAL);
+static C3_AXI_PCLK(axi_nic, AXI_CLK_EN0, 8, 0);
+static C3_AXI_PCLK(axi_dma, AXI_CLK_EN0, 9, 0);
/*
* NOTE: axi_mux_nic provides the clock to the NIC's AXI bus for NN(Neural
* Network) and other devices(CPU, EMMC, SDIO, sec_top, USB, Audio, ETH, SPICC)
* to access RAM space.
*/
-static C3_AXI_GATE(axi_mux_nic, AXI_CLK_EN0, 10, 0);
-static C3_AXI_GATE(axi_cve, AXI_CLK_EN0, 12, 0);
+static C3_AXI_PCLK(axi_mux_nic, AXI_CLK_EN0, 10, 0);
+static C3_AXI_PCLK(axi_cve, AXI_CLK_EN0, 12, 0);
/*
* NOTE: axi_dev1_dmc provides the clock for the peripherals(EMMC, SDIO,
* sec_top, USB, Audio, ETH, SPICC) to access the AXI bus of the DDR.
*/
-static C3_AXI_GATE(axi_dev1_dmc, AXI_CLK_EN0, 13, 0);
-static C3_AXI_GATE(axi_dev0_dmc, AXI_CLK_EN0, 14, 0);
-static C3_AXI_GATE(axi_dsp_dmc, AXI_CLK_EN0, 15, 0);
+static C3_AXI_PCLK(axi_dev1_dmc, AXI_CLK_EN0, 13, 0);
+static C3_AXI_PCLK(axi_dev0_dmc, AXI_CLK_EN0, 14, 0);
+static C3_AXI_PCLK(axi_dsp_dmc, AXI_CLK_EN0, 15, 0);
/*
* clk_12_24m model
@@ -335,7 +328,7 @@ static C3_AXI_GATE(axi_dsp_dmc, AXI_CLK_EN0, 15, 0);
* xtal---->| gate |---->| div |------------>| pad |
* |------| |-----| |-----|
*/
-static struct clk_regmap clk_12_24m_in = {
+static struct clk_regmap c3_clk_12_24m_in = {
.data = &(struct clk_regmap_gate_data) {
.offset = CLK12_24_CTRL,
.bit_idx = 11,
@@ -350,7 +343,7 @@ static struct clk_regmap clk_12_24m_in = {
},
};
-static struct clk_regmap clk_12_24m = {
+static struct clk_regmap c3_clk_12_24m = {
.data = &(struct clk_regmap_div_data) {
.offset = CLK12_24_CTRL,
.shift = 10,
@@ -360,14 +353,14 @@ static struct clk_regmap clk_12_24m = {
.name = "clk_12_24m",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &clk_12_24m_in.hw
+ &c3_clk_12_24m_in.hw
},
.num_parents = 1,
},
};
/* Fix me: set value 0 will div by 2 like value 1 */
-static struct clk_regmap fclk_25m_div = {
+static struct clk_regmap c3_fclk_25m_div = {
.data = &(struct clk_regmap_div_data) {
.offset = CLK12_24_CTRL,
.shift = 0,
@@ -383,7 +376,7 @@ static struct clk_regmap fclk_25m_div = {
},
};
-static struct clk_regmap fclk_25m = {
+static struct clk_regmap c3_fclk_25m = {
.data = &(struct clk_regmap_gate_data) {
.offset = CLK12_24_CTRL,
.bit_idx = 12,
@@ -392,7 +385,7 @@ static struct clk_regmap fclk_25m = {
.name = "fclk_25m",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &fclk_25m_div.hw
+ &c3_fclk_25m_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -404,11 +397,10 @@ static struct clk_regmap fclk_25m = {
* is manged by clock measures module. Their hardware are out of clock tree.
* Channel 4 8 9 10 11 13 14 15 16 18 are not connected.
*/
-static u32 gen_parent_table[] = { 0, 1, 2, 5, 6, 7, 17, 19, 20, 21, 22, 23, 24};
-
-static const struct clk_parent_data gen_parent_data[] = {
+static u32 c3_gen_parents_val_table[] = { 0, 1, 2, 5, 6, 7, 17, 19, 20, 21, 22, 23, 24};
+static const struct clk_parent_data c3_gen_parents[] = {
{ .fw_name = "oscin" },
- { .hw = &rtc_clk.hw },
+ { .hw = &c3_rtc_clk.hw },
{ .fw_name = "sysplldiv16" },
{ .fw_name = "gp0" },
{ .fw_name = "gp1" },
@@ -422,22 +414,22 @@ static const struct clk_parent_data gen_parent_data[] = {
{ .fw_name = "fdiv7" }
};
-static struct clk_regmap gen_sel = {
+static struct clk_regmap c3_gen_sel = {
.data = &(struct clk_regmap_mux_data) {
.offset = GEN_CLK_CTRL,
.mask = 0x1f,
.shift = 12,
- .table = gen_parent_table,
+ .table = c3_gen_parents_val_table,
},
.hw.init = &(struct clk_init_data) {
.name = "gen_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = gen_parent_data,
- .num_parents = ARRAY_SIZE(gen_parent_data),
+ .parent_data = c3_gen_parents,
+ .num_parents = ARRAY_SIZE(c3_gen_parents),
},
};
-static struct clk_regmap gen_div = {
+static struct clk_regmap c3_gen_div = {
.data = &(struct clk_regmap_div_data) {
.offset = GEN_CLK_CTRL,
.shift = 0,
@@ -447,14 +439,14 @@ static struct clk_regmap gen_div = {
.name = "gen_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &gen_sel.hw
+ &c3_gen_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap gen = {
+static struct clk_regmap c3_gen = {
.data = &(struct clk_regmap_gate_data) {
.offset = GEN_CLK_CTRL,
.bit_idx = 11,
@@ -463,214 +455,86 @@ static struct clk_regmap gen = {
.name = "gen",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &gen_div.hw
+ &c3_gen_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static const struct clk_parent_data saradc_parent_data[] = {
+static const struct clk_parent_data c3_saradc_parents[] = {
{ .fw_name = "oscin" },
{ .fw_name = "sysclk" }
};
-static struct clk_regmap saradc_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = SAR_CLK_CTRL0,
- .mask = 0x1,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "saradc_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = saradc_parent_data,
- .num_parents = ARRAY_SIZE(saradc_parent_data),
- },
-};
-
-static struct clk_regmap saradc_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = SAR_CLK_CTRL0,
- .shift = 0,
- .width = 8,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "saradc_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &saradc_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap saradc = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = SAR_CLK_CTRL0,
- .bit_idx = 8,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "saradc",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &saradc_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static C3_COMP_SEL(saradc, SAR_CLK_CTRL0, 9, 0x1, c3_saradc_parents);
+static C3_COMP_DIV(saradc, SAR_CLK_CTRL0, 0, 8);
+static C3_COMP_GATE(saradc, SAR_CLK_CTRL0, 8);
-static const struct clk_parent_data pwm_parent_data[] = {
+static const struct clk_parent_data c3_pwm_parents[] = {
{ .fw_name = "oscin" },
{ .fw_name = "gp1" },
{ .fw_name = "fdiv4" },
{ .fw_name = "fdiv3" }
};
-#define AML_PWM_CLK_MUX(_name, _reg, _shift) { \
- .data = &(struct clk_regmap_mux_data) { \
- .offset = _reg, \
- .mask = 0x3, \
- .shift = _shift, \
- }, \
- .hw.init = &(struct clk_init_data) { \
- .name = #_name "_sel", \
- .ops = &clk_regmap_mux_ops, \
- .parent_data = pwm_parent_data, \
- .num_parents = ARRAY_SIZE(pwm_parent_data), \
- }, \
-}
-
-#define AML_PWM_CLK_DIV(_name, _reg, _shift) { \
- .data = &(struct clk_regmap_div_data) { \
- .offset = _reg, \
- .shift = _shift, \
- .width = 8, \
- }, \
- .hw.init = &(struct clk_init_data) { \
- .name = #_name "_div", \
- .ops = &clk_regmap_divider_ops, \
- .parent_names = (const char *[]) { #_name "_sel" },\
- .num_parents = 1, \
- .flags = CLK_SET_RATE_PARENT, \
- }, \
-}
-
-#define AML_PWM_CLK_GATE(_name, _reg, _bit) { \
- .data = &(struct clk_regmap_gate_data) { \
- .offset = _reg, \
- .bit_idx = _bit, \
- }, \
- .hw.init = &(struct clk_init_data) { \
- .name = #_name, \
- .ops = &clk_regmap_gate_ops, \
- .parent_names = (const char *[]) { #_name "_div" },\
- .num_parents = 1, \
- .flags = CLK_SET_RATE_PARENT, \
- }, \
-}
-
-static struct clk_regmap pwm_a_sel =
- AML_PWM_CLK_MUX(pwm_a, PWM_CLK_AB_CTRL, 9);
-static struct clk_regmap pwm_a_div =
- AML_PWM_CLK_DIV(pwm_a, PWM_CLK_AB_CTRL, 0);
-static struct clk_regmap pwm_a =
- AML_PWM_CLK_GATE(pwm_a, PWM_CLK_AB_CTRL, 8);
-
-static struct clk_regmap pwm_b_sel =
- AML_PWM_CLK_MUX(pwm_b, PWM_CLK_AB_CTRL, 25);
-static struct clk_regmap pwm_b_div =
- AML_PWM_CLK_DIV(pwm_b, PWM_CLK_AB_CTRL, 16);
-static struct clk_regmap pwm_b =
- AML_PWM_CLK_GATE(pwm_b, PWM_CLK_AB_CTRL, 24);
-
-static struct clk_regmap pwm_c_sel =
- AML_PWM_CLK_MUX(pwm_c, PWM_CLK_CD_CTRL, 9);
-static struct clk_regmap pwm_c_div =
- AML_PWM_CLK_DIV(pwm_c, PWM_CLK_CD_CTRL, 0);
-static struct clk_regmap pwm_c =
- AML_PWM_CLK_GATE(pwm_c, PWM_CLK_CD_CTRL, 8);
-
-static struct clk_regmap pwm_d_sel =
- AML_PWM_CLK_MUX(pwm_d, PWM_CLK_CD_CTRL, 25);
-static struct clk_regmap pwm_d_div =
- AML_PWM_CLK_DIV(pwm_d, PWM_CLK_CD_CTRL, 16);
-static struct clk_regmap pwm_d =
- AML_PWM_CLK_GATE(pwm_d, PWM_CLK_CD_CTRL, 24);
-
-static struct clk_regmap pwm_e_sel =
- AML_PWM_CLK_MUX(pwm_e, PWM_CLK_EF_CTRL, 9);
-static struct clk_regmap pwm_e_div =
- AML_PWM_CLK_DIV(pwm_e, PWM_CLK_EF_CTRL, 0);
-static struct clk_regmap pwm_e =
- AML_PWM_CLK_GATE(pwm_e, PWM_CLK_EF_CTRL, 8);
-
-static struct clk_regmap pwm_f_sel =
- AML_PWM_CLK_MUX(pwm_f, PWM_CLK_EF_CTRL, 25);
-static struct clk_regmap pwm_f_div =
- AML_PWM_CLK_DIV(pwm_f, PWM_CLK_EF_CTRL, 16);
-static struct clk_regmap pwm_f =
- AML_PWM_CLK_GATE(pwm_f, PWM_CLK_EF_CTRL, 24);
-
-static struct clk_regmap pwm_g_sel =
- AML_PWM_CLK_MUX(pwm_g, PWM_CLK_GH_CTRL, 9);
-static struct clk_regmap pwm_g_div =
- AML_PWM_CLK_DIV(pwm_g, PWM_CLK_GH_CTRL, 0);
-static struct clk_regmap pwm_g =
- AML_PWM_CLK_GATE(pwm_g, PWM_CLK_GH_CTRL, 8);
-
-static struct clk_regmap pwm_h_sel =
- AML_PWM_CLK_MUX(pwm_h, PWM_CLK_GH_CTRL, 25);
-static struct clk_regmap pwm_h_div =
- AML_PWM_CLK_DIV(pwm_h, PWM_CLK_GH_CTRL, 16);
-static struct clk_regmap pwm_h =
- AML_PWM_CLK_GATE(pwm_h, PWM_CLK_GH_CTRL, 24);
-
-static struct clk_regmap pwm_i_sel =
- AML_PWM_CLK_MUX(pwm_i, PWM_CLK_IJ_CTRL, 9);
-static struct clk_regmap pwm_i_div =
- AML_PWM_CLK_DIV(pwm_i, PWM_CLK_IJ_CTRL, 0);
-static struct clk_regmap pwm_i =
- AML_PWM_CLK_GATE(pwm_i, PWM_CLK_IJ_CTRL, 8);
-
-static struct clk_regmap pwm_j_sel =
- AML_PWM_CLK_MUX(pwm_j, PWM_CLK_IJ_CTRL, 25);
-static struct clk_regmap pwm_j_div =
- AML_PWM_CLK_DIV(pwm_j, PWM_CLK_IJ_CTRL, 16);
-static struct clk_regmap pwm_j =
- AML_PWM_CLK_GATE(pwm_j, PWM_CLK_IJ_CTRL, 24);
-
-static struct clk_regmap pwm_k_sel =
- AML_PWM_CLK_MUX(pwm_k, PWM_CLK_KL_CTRL, 9);
-static struct clk_regmap pwm_k_div =
- AML_PWM_CLK_DIV(pwm_k, PWM_CLK_KL_CTRL, 0);
-static struct clk_regmap pwm_k =
- AML_PWM_CLK_GATE(pwm_k, PWM_CLK_KL_CTRL, 8);
-
-static struct clk_regmap pwm_l_sel =
- AML_PWM_CLK_MUX(pwm_l, PWM_CLK_KL_CTRL, 25);
-static struct clk_regmap pwm_l_div =
- AML_PWM_CLK_DIV(pwm_l, PWM_CLK_KL_CTRL, 16);
-static struct clk_regmap pwm_l =
- AML_PWM_CLK_GATE(pwm_l, PWM_CLK_KL_CTRL, 24);
-
-static struct clk_regmap pwm_m_sel =
- AML_PWM_CLK_MUX(pwm_m, PWM_CLK_MN_CTRL, 9);
-static struct clk_regmap pwm_m_div =
- AML_PWM_CLK_DIV(pwm_m, PWM_CLK_MN_CTRL, 0);
-static struct clk_regmap pwm_m =
- AML_PWM_CLK_GATE(pwm_m, PWM_CLK_MN_CTRL, 8);
-
-static struct clk_regmap pwm_n_sel =
- AML_PWM_CLK_MUX(pwm_n, PWM_CLK_MN_CTRL, 25);
-static struct clk_regmap pwm_n_div =
- AML_PWM_CLK_DIV(pwm_n, PWM_CLK_MN_CTRL, 16);
-static struct clk_regmap pwm_n =
- AML_PWM_CLK_GATE(pwm_n, PWM_CLK_MN_CTRL, 24);
-
-static const struct clk_parent_data spicc_parent_data[] = {
+static C3_COMP_SEL(pwm_a, PWM_CLK_AB_CTRL, 9, 0x3, c3_pwm_parents);
+static C3_COMP_DIV(pwm_a, PWM_CLK_AB_CTRL, 0, 8);
+static C3_COMP_GATE(pwm_a, PWM_CLK_AB_CTRL, 8);
+
+static C3_COMP_SEL(pwm_b, PWM_CLK_AB_CTRL, 25, 0x3, c3_pwm_parents);
+static C3_COMP_DIV(pwm_b, PWM_CLK_AB_CTRL, 16, 8);
+static C3_COMP_GATE(pwm_b, PWM_CLK_AB_CTRL, 24);
+
+static C3_COMP_SEL(pwm_c, PWM_CLK_CD_CTRL, 9, 0x3, c3_pwm_parents);
+static C3_COMP_DIV(pwm_c, PWM_CLK_CD_CTRL, 0, 8);
+static C3_COMP_GATE(pwm_c, PWM_CLK_CD_CTRL, 8);
+
+static C3_COMP_SEL(pwm_d, PWM_CLK_CD_CTRL, 25, 0x3, c3_pwm_parents);
+static C3_COMP_DIV(pwm_d, PWM_CLK_CD_CTRL, 16, 8);
+static C3_COMP_GATE(pwm_d, PWM_CLK_CD_CTRL, 24);
+
+static C3_COMP_SEL(pwm_e, PWM_CLK_EF_CTRL, 9, 0x3, c3_pwm_parents);
+static C3_COMP_DIV(pwm_e, PWM_CLK_EF_CTRL, 0, 8);
+static C3_COMP_GATE(pwm_e, PWM_CLK_EF_CTRL, 8);
+
+static C3_COMP_SEL(pwm_f, PWM_CLK_EF_CTRL, 25, 0x3, c3_pwm_parents);
+static C3_COMP_DIV(pwm_f, PWM_CLK_EF_CTRL, 16, 8);
+static C3_COMP_GATE(pwm_f, PWM_CLK_EF_CTRL, 24);
+
+static C3_COMP_SEL(pwm_g, PWM_CLK_GH_CTRL, 9, 0x3, c3_pwm_parents);
+static C3_COMP_DIV(pwm_g, PWM_CLK_GH_CTRL, 0, 8);
+static C3_COMP_GATE(pwm_g, PWM_CLK_GH_CTRL, 8);
+
+static C3_COMP_SEL(pwm_h, PWM_CLK_GH_CTRL, 25, 0x3, c3_pwm_parents);
+static C3_COMP_DIV(pwm_h, PWM_CLK_GH_CTRL, 16, 8);
+static C3_COMP_GATE(pwm_h, PWM_CLK_GH_CTRL, 24);
+
+static C3_COMP_SEL(pwm_i, PWM_CLK_IJ_CTRL, 9, 0x3, c3_pwm_parents);
+static C3_COMP_DIV(pwm_i, PWM_CLK_IJ_CTRL, 0, 8);
+static C3_COMP_GATE(pwm_i, PWM_CLK_IJ_CTRL, 8);
+
+static C3_COMP_SEL(pwm_j, PWM_CLK_IJ_CTRL, 25, 0x3, c3_pwm_parents);
+static C3_COMP_DIV(pwm_j, PWM_CLK_IJ_CTRL, 16, 8);
+static C3_COMP_GATE(pwm_j, PWM_CLK_IJ_CTRL, 24);
+
+static C3_COMP_SEL(pwm_k, PWM_CLK_KL_CTRL, 9, 0x3, c3_pwm_parents);
+static C3_COMP_DIV(pwm_k, PWM_CLK_KL_CTRL, 0, 8);
+static C3_COMP_GATE(pwm_k, PWM_CLK_KL_CTRL, 8);
+
+static C3_COMP_SEL(pwm_l, PWM_CLK_KL_CTRL, 25, 0x3, c3_pwm_parents);
+static C3_COMP_DIV(pwm_l, PWM_CLK_KL_CTRL, 16, 8);
+static C3_COMP_GATE(pwm_l, PWM_CLK_KL_CTRL, 24);
+
+static C3_COMP_SEL(pwm_m, PWM_CLK_MN_CTRL, 9, 0x3, c3_pwm_parents);
+static C3_COMP_DIV(pwm_m, PWM_CLK_MN_CTRL, 0, 8);
+static C3_COMP_GATE(pwm_m, PWM_CLK_MN_CTRL, 8);
+
+static C3_COMP_SEL(pwm_n, PWM_CLK_MN_CTRL, 25, 0x3, c3_pwm_parents);
+static C3_COMP_DIV(pwm_n, PWM_CLK_MN_CTRL, 16, 8);
+static C3_COMP_GATE(pwm_n, PWM_CLK_MN_CTRL, 24);
+
+static const struct clk_parent_data c3_spicc_parents[] = {
{ .fw_name = "oscin" },
{ .fw_name = "sysclk" },
{ .fw_name = "fdiv4" },
@@ -681,101 +545,15 @@ static const struct clk_parent_data spicc_parent_data[] = {
{ .fw_name = "gp1" }
};
-static struct clk_regmap spicc_a_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = SPICC_CLK_CTRL,
- .mask = 0x7,
- .shift = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "spicc_a_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = spicc_parent_data,
- .num_parents = ARRAY_SIZE(spicc_parent_data),
- },
-};
-
-static struct clk_regmap spicc_a_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = SPICC_CLK_CTRL,
- .shift = 0,
- .width = 6,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "spicc_a_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &spicc_a_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap spicc_a = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = SPICC_CLK_CTRL,
- .bit_idx = 6,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "spicc_a",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &spicc_a_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap spicc_b_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = SPICC_CLK_CTRL,
- .mask = 0x7,
- .shift = 23,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "spicc_b_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = spicc_parent_data,
- .num_parents = ARRAY_SIZE(spicc_parent_data),
- },
-};
-
-static struct clk_regmap spicc_b_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = SPICC_CLK_CTRL,
- .shift = 16,
- .width = 6,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "spicc_b_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &spicc_b_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static C3_COMP_SEL(spicc_a, SPICC_CLK_CTRL, 7, 0x7, c3_spicc_parents);
+static C3_COMP_DIV(spicc_a, SPICC_CLK_CTRL, 0, 6);
+static C3_COMP_GATE(spicc_a, SPICC_CLK_CTRL, 6);
-static struct clk_regmap spicc_b = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = SPICC_CLK_CTRL,
- .bit_idx = 22,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "spicc_b",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &spicc_b_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static C3_COMP_SEL(spicc_b, SPICC_CLK_CTRL, 23, 0x7, c3_spicc_parents);
+static C3_COMP_DIV(spicc_b, SPICC_CLK_CTRL, 16, 6);
+static C3_COMP_GATE(spicc_b, SPICC_CLK_CTRL, 22);
-static const struct clk_parent_data spifc_parent_data[] = {
+static const struct clk_parent_data c3_spifc_parents[] = {
{ .fw_name = "gp0" },
{ .fw_name = "fdiv2" },
{ .fw_name = "fdiv3" },
@@ -786,54 +564,11 @@ static const struct clk_parent_data spifc_parent_data[] = {
{ .fw_name = "fdiv7" }
};
-static struct clk_regmap spifc_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = SPIFC_CLK_CTRL,
- .mask = 0x7,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "spifc_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = spifc_parent_data,
- .num_parents = ARRAY_SIZE(spifc_parent_data),
- },
-};
-
-static struct clk_regmap spifc_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = SPIFC_CLK_CTRL,
- .shift = 0,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "spifc_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &spifc_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap spifc = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = SPIFC_CLK_CTRL,
- .bit_idx = 8,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "spifc",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &spifc_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static C3_COMP_SEL(spifc, SPIFC_CLK_CTRL, 9, 0x7, c3_spifc_parents);
+static C3_COMP_DIV(spifc, SPIFC_CLK_CTRL, 0, 7);
+static C3_COMP_GATE(spifc, SPIFC_CLK_CTRL, 8);
-static const struct clk_parent_data emmc_parent_data[] = {
+static const struct clk_parent_data c3_sd_emmc_parents[] = {
{ .fw_name = "oscin" },
{ .fw_name = "fdiv2" },
{ .fw_name = "fdiv3" },
@@ -844,148 +579,19 @@ static const struct clk_parent_data emmc_parent_data[] = {
{ .fw_name = "gp0" }
};
-static struct clk_regmap sd_emmc_a_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = SD_EMMC_CLK_CTRL,
- .mask = 0x7,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "sd_emmc_a_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = emmc_parent_data,
- .num_parents = ARRAY_SIZE(emmc_parent_data),
- },
-};
+static C3_COMP_SEL(sd_emmc_a, SD_EMMC_CLK_CTRL, 9, 0x7, c3_sd_emmc_parents);
+static C3_COMP_DIV(sd_emmc_a, SD_EMMC_CLK_CTRL, 0, 7);
+static C3_COMP_GATE(sd_emmc_a, SD_EMMC_CLK_CTRL, 7);
-static struct clk_regmap sd_emmc_a_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = SD_EMMC_CLK_CTRL,
- .shift = 0,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "sd_emmc_a_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &sd_emmc_a_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static C3_COMP_SEL(sd_emmc_b, SD_EMMC_CLK_CTRL, 25, 0x7, c3_sd_emmc_parents);
+static C3_COMP_DIV(sd_emmc_b, SD_EMMC_CLK_CTRL, 16, 7);
+static C3_COMP_GATE(sd_emmc_b, SD_EMMC_CLK_CTRL, 23);
-static struct clk_regmap sd_emmc_a = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = SD_EMMC_CLK_CTRL,
- .bit_idx = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "sd_emmc_a",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &sd_emmc_a_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap sd_emmc_b_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = SD_EMMC_CLK_CTRL,
- .mask = 0x7,
- .shift = 25,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "sd_emmc_b_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = emmc_parent_data,
- .num_parents = ARRAY_SIZE(emmc_parent_data),
- },
-};
-
-static struct clk_regmap sd_emmc_b_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = SD_EMMC_CLK_CTRL,
- .shift = 16,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "sd_emmc_b_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &sd_emmc_b_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap sd_emmc_b = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = SD_EMMC_CLK_CTRL,
- .bit_idx = 23,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "sd_emmc_b",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &sd_emmc_b_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap sd_emmc_c_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = NAND_CLK_CTRL,
- .mask = 0x7,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "sd_emmc_c_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = emmc_parent_data,
- .num_parents = ARRAY_SIZE(emmc_parent_data),
- },
-};
-
-static struct clk_regmap sd_emmc_c_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = NAND_CLK_CTRL,
- .shift = 0,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "sd_emmc_c_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &sd_emmc_c_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap sd_emmc_c = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = NAND_CLK_CTRL,
- .bit_idx = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "sd_emmc_c",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &sd_emmc_c_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static C3_COMP_SEL(sd_emmc_c, NAND_CLK_CTRL, 9, 0x7, c3_sd_emmc_parents);
+static C3_COMP_DIV(sd_emmc_c, NAND_CLK_CTRL, 0, 7);
+static C3_COMP_GATE(sd_emmc_c, NAND_CLK_CTRL, 7);
-static struct clk_regmap ts_div = {
+static struct clk_regmap c3_ts_div = {
.data = &(struct clk_regmap_div_data) {
.offset = TS_CLK_CTRL,
.shift = 0,
@@ -1001,7 +607,7 @@ static struct clk_regmap ts_div = {
},
};
-static struct clk_regmap ts = {
+static struct clk_regmap c3_ts = {
.data = &(struct clk_regmap_gate_data) {
.offset = TS_CLK_CTRL,
.bit_idx = 8,
@@ -1010,29 +616,29 @@ static struct clk_regmap ts = {
.name = "ts",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &ts_div.hw
+ &c3_ts_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static const struct clk_parent_data eth_parent = {
+static const struct clk_parent_data c3_eth_parents = {
.fw_name = "fdiv2",
};
-static struct clk_fixed_factor eth_125m_div = {
+static struct clk_fixed_factor c3_eth_125m_div = {
.mult = 1,
.div = 8,
.hw.init = &(struct clk_init_data) {
.name = "eth_125m_div",
.ops = &clk_fixed_factor_ops,
- .parent_data = &eth_parent,
+ .parent_data = &c3_eth_parents,
.num_parents = 1,
},
};
-static struct clk_regmap eth_125m = {
+static struct clk_regmap c3_eth_125m = {
.data = &(struct clk_regmap_gate_data) {
.offset = ETH_CLK_CTRL,
.bit_idx = 7,
@@ -1041,14 +647,14 @@ static struct clk_regmap eth_125m = {
.name = "eth_125m",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &eth_125m_div.hw
+ &c3_eth_125m_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap eth_rmii_div = {
+static struct clk_regmap c3_eth_rmii_div = {
.data = &(struct clk_regmap_div_data) {
.offset = ETH_CLK_CTRL,
.shift = 0,
@@ -1057,12 +663,12 @@ static struct clk_regmap eth_rmii_div = {
.hw.init = &(struct clk_init_data) {
.name = "eth_rmii_div",
.ops = &clk_regmap_divider_ops,
- .parent_data = &eth_parent,
+ .parent_data = &c3_eth_parents,
.num_parents = 1,
},
};
-static struct clk_regmap eth_rmii = {
+static struct clk_regmap c3_eth_rmii = {
.data = &(struct clk_regmap_gate_data) {
.offset = ETH_CLK_CTRL,
.bit_idx = 8,
@@ -1071,14 +677,14 @@ static struct clk_regmap eth_rmii = {
.name = "eth_rmii",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &eth_rmii_div.hw
+ &c3_eth_rmii_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static const struct clk_parent_data mipi_dsi_meas_parent_data[] = {
+static const struct clk_parent_data c3_mipi_dsi_meas_parents[] = {
{ .fw_name = "oscin" },
{ .fw_name = "fdiv4" },
{ .fw_name = "fdiv3" },
@@ -1089,54 +695,11 @@ static const struct clk_parent_data mipi_dsi_meas_parent_data[] = {
{ .fw_name = "fdiv7" }
};
-static struct clk_regmap mipi_dsi_meas_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = VDIN_MEAS_CLK_CTRL,
- .mask = 0x7,
- .shift = 21,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "mipi_dsi_meas_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = mipi_dsi_meas_parent_data,
- .num_parents = ARRAY_SIZE(mipi_dsi_meas_parent_data),
- },
-};
+static C3_COMP_SEL(mipi_dsi_meas, VDIN_MEAS_CLK_CTRL, 21, 0x7, c3_mipi_dsi_meas_parents);
+static C3_COMP_DIV(mipi_dsi_meas, VDIN_MEAS_CLK_CTRL, 12, 7);
+static C3_COMP_GATE(mipi_dsi_meas, VDIN_MEAS_CLK_CTRL, 20);
-static struct clk_regmap mipi_dsi_meas_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = VDIN_MEAS_CLK_CTRL,
- .shift = 12,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "mipi_dsi_meas_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &mipi_dsi_meas_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap mipi_dsi_meas = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = VDIN_MEAS_CLK_CTRL,
- .bit_idx = 20,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "mipi_dsi_meas",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &mipi_dsi_meas_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static const struct clk_parent_data dsi_phy_parent_data[] = {
+static const struct clk_parent_data c3_dsi_phy_parents[] = {
{ .fw_name = "gp1" },
{ .fw_name = "gp0" },
{ .fw_name = "hifi" },
@@ -1147,54 +710,11 @@ static const struct clk_parent_data dsi_phy_parent_data[] = {
{ .fw_name = "fdiv7" }
};
-static struct clk_regmap dsi_phy_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = MIPIDSI_PHY_CLK_CTRL,
- .mask = 0x7,
- .shift = 12,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "dsi_phy_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = dsi_phy_parent_data,
- .num_parents = ARRAY_SIZE(dsi_phy_parent_data),
- },
-};
-
-static struct clk_regmap dsi_phy_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = MIPIDSI_PHY_CLK_CTRL,
- .shift = 0,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "dsi_phy_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &dsi_phy_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap dsi_phy = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = MIPIDSI_PHY_CLK_CTRL,
- .bit_idx = 8,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "dsi_phy",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &dsi_phy_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static C3_COMP_SEL(dsi_phy, MIPIDSI_PHY_CLK_CTRL, 12, 0x7, c3_dsi_phy_parents);
+static C3_COMP_DIV(dsi_phy, MIPIDSI_PHY_CLK_CTRL, 0, 7);
+static C3_COMP_GATE(dsi_phy, MIPIDSI_PHY_CLK_CTRL, 8);
-static const struct clk_parent_data vout_mclk_parent_data[] = {
+static const struct clk_parent_data c3_vout_mclk_parents[] = {
{ .fw_name = "fdiv2p5" },
{ .fw_name = "fdiv3" },
{ .fw_name = "fdiv4" },
@@ -1205,54 +725,11 @@ static const struct clk_parent_data vout_mclk_parent_data[] = {
{ .fw_name = "fdiv7" }
};
-static struct clk_regmap vout_mclk_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = VOUTENC_CLK_CTRL,
- .mask = 0x7,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "vout_mclk_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = vout_mclk_parent_data,
- .num_parents = ARRAY_SIZE(vout_mclk_parent_data),
- },
-};
+static C3_COMP_SEL(vout_mclk, VOUTENC_CLK_CTRL, 9, 0x7, c3_vout_mclk_parents);
+static C3_COMP_DIV(vout_mclk, VOUTENC_CLK_CTRL, 0, 7);
+static C3_COMP_GATE(vout_mclk, VOUTENC_CLK_CTRL, 8);
-static struct clk_regmap vout_mclk_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = VOUTENC_CLK_CTRL,
- .shift = 0,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "vout_mclk_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &vout_mclk_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap vout_mclk = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = VOUTENC_CLK_CTRL,
- .bit_idx = 8,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "vout_mclk",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &vout_mclk_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static const struct clk_parent_data vout_enc_parent_data[] = {
+static const struct clk_parent_data c3_vout_enc_parents[] = {
{ .fw_name = "gp1" },
{ .fw_name = "fdiv3" },
{ .fw_name = "fdiv4" },
@@ -1263,54 +740,11 @@ static const struct clk_parent_data vout_enc_parent_data[] = {
{ .fw_name = "fdiv7" }
};
-static struct clk_regmap vout_enc_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = VOUTENC_CLK_CTRL,
- .mask = 0x7,
- .shift = 25,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "vout_enc_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = vout_enc_parent_data,
- .num_parents = ARRAY_SIZE(vout_enc_parent_data),
- },
-};
-
-static struct clk_regmap vout_enc_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = VOUTENC_CLK_CTRL,
- .shift = 16,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "vout_enc_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &vout_enc_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap vout_enc = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = VOUTENC_CLK_CTRL,
- .bit_idx = 24,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "vout_enc",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &vout_enc_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static C3_COMP_SEL(vout_enc, VOUTENC_CLK_CTRL, 25, 0x7, c3_vout_enc_parents);
+static C3_COMP_DIV(vout_enc, VOUTENC_CLK_CTRL, 16, 7);
+static C3_COMP_GATE(vout_enc, VOUTENC_CLK_CTRL, 24);
-static const struct clk_parent_data hcodec_pre_parent_data[] = {
+static const struct clk_parent_data c3_hcodec_pre_parents[] = {
{ .fw_name = "fdiv2p5" },
{ .fw_name = "fdiv3" },
{ .fw_name = "fdiv4" },
@@ -1321,106 +755,20 @@ static const struct clk_parent_data hcodec_pre_parent_data[] = {
{ .fw_name = "oscin" }
};
-static struct clk_regmap hcodec_0_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = VDEC_CLK_CTRL,
- .mask = 0x7,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "hcodec_0_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = hcodec_pre_parent_data,
- .num_parents = ARRAY_SIZE(hcodec_pre_parent_data),
- },
-};
-
-static struct clk_regmap hcodec_0_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = VDEC_CLK_CTRL,
- .shift = 0,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "hcodec_0_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &hcodec_0_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap hcodec_0 = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = VDEC_CLK_CTRL,
- .bit_idx = 8,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "hcodec_0",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &hcodec_0_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap hcodec_1_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = VDEC3_CLK_CTRL,
- .mask = 0x7,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "hcodec_1_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = hcodec_pre_parent_data,
- .num_parents = ARRAY_SIZE(hcodec_pre_parent_data),
- },
-};
-
-static struct clk_regmap hcodec_1_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = VDEC3_CLK_CTRL,
- .shift = 0,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "hcodec_1_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &hcodec_1_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static C3_COMP_SEL(hcodec_0, VDEC_CLK_CTRL, 9, 0x7, c3_hcodec_pre_parents);
+static C3_COMP_DIV(hcodec_0, VDEC_CLK_CTRL, 0, 7);
+static C3_COMP_GATE(hcodec_0, VDEC_CLK_CTRL, 8);
-static struct clk_regmap hcodec_1 = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = VDEC3_CLK_CTRL,
- .bit_idx = 8,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "hcodec_1",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &hcodec_1_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static C3_COMP_SEL(hcodec_1, VDEC3_CLK_CTRL, 9, 0x7, c3_hcodec_pre_parents);
+static C3_COMP_DIV(hcodec_1, VDEC3_CLK_CTRL, 0, 7);
+static C3_COMP_GATE(hcodec_1, VDEC3_CLK_CTRL, 8);
-static const struct clk_parent_data hcodec_parent_data[] = {
- { .hw = &hcodec_0.hw },
- { .hw = &hcodec_1.hw }
+static const struct clk_parent_data c3_hcodec_parents[] = {
+ { .hw = &c3_hcodec_0.hw },
+ { .hw = &c3_hcodec_1.hw }
};
-static struct clk_regmap hcodec = {
+static struct clk_regmap c3_hcodec = {
.data = &(struct clk_regmap_mux_data) {
.offset = VDEC3_CLK_CTRL,
.mask = 0x1,
@@ -1429,13 +777,13 @@ static struct clk_regmap hcodec = {
.hw.init = &(struct clk_init_data) {
.name = "hcodec",
.ops = &clk_regmap_mux_ops,
- .parent_data = hcodec_parent_data,
- .num_parents = ARRAY_SIZE(hcodec_parent_data),
+ .parent_data = c3_hcodec_parents,
+ .num_parents = ARRAY_SIZE(c3_hcodec_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
-static const struct clk_parent_data vc9000e_parent_data[] = {
+static const struct clk_parent_data c3_vc9000e_parents[] = {
{ .fw_name = "oscin" },
{ .fw_name = "fdiv4" },
{ .fw_name = "fdiv3" },
@@ -1446,101 +794,15 @@ static const struct clk_parent_data vc9000e_parent_data[] = {
{ .fw_name = "gp0" }
};
-static struct clk_regmap vc9000e_aclk_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = VC9000E_CLK_CTRL,
- .mask = 0x7,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "vc9000e_aclk_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = vc9000e_parent_data,
- .num_parents = ARRAY_SIZE(vc9000e_parent_data),
- },
-};
-
-static struct clk_regmap vc9000e_aclk_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = VC9000E_CLK_CTRL,
- .shift = 0,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "vc9000e_aclk_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &vc9000e_aclk_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap vc9000e_aclk = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = VC9000E_CLK_CTRL,
- .bit_idx = 8,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "vc9000e_aclk",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &vc9000e_aclk_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap vc9000e_core_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = VC9000E_CLK_CTRL,
- .mask = 0x7,
- .shift = 25,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "vc9000e_core_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = vc9000e_parent_data,
- .num_parents = ARRAY_SIZE(vc9000e_parent_data),
- },
-};
-
-static struct clk_regmap vc9000e_core_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = VC9000E_CLK_CTRL,
- .shift = 16,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "vc9000e_core_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &vc9000e_core_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static C3_COMP_SEL(vc9000e_aclk, VC9000E_CLK_CTRL, 9, 0x7, c3_vc9000e_parents);
+static C3_COMP_DIV(vc9000e_aclk, VC9000E_CLK_CTRL, 0, 7);
+static C3_COMP_GATE(vc9000e_aclk, VC9000E_CLK_CTRL, 8);
-static struct clk_regmap vc9000e_core = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = VC9000E_CLK_CTRL,
- .bit_idx = 24,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "vc9000e_core",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &vc9000e_core_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static C3_COMP_SEL(vc9000e_core, VC9000E_CLK_CTRL, 25, 0x7, c3_vc9000e_parents);
+static C3_COMP_DIV(vc9000e_core, VC9000E_CLK_CTRL, 16, 7);
+static C3_COMP_GATE(vc9000e_core, VC9000E_CLK_CTRL, 24);
-static const struct clk_parent_data csi_phy_parent_data[] = {
+static const struct clk_parent_data c3_csi_phy_parents[] = {
{ .fw_name = "fdiv2p5" },
{ .fw_name = "fdiv3" },
{ .fw_name = "fdiv4" },
@@ -1551,54 +813,11 @@ static const struct clk_parent_data csi_phy_parent_data[] = {
{ .fw_name = "oscin" }
};
-static struct clk_regmap csi_phy0_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = ISP0_CLK_CTRL,
- .mask = 0x7,
- .shift = 25,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "csi_phy0_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = csi_phy_parent_data,
- .num_parents = ARRAY_SIZE(csi_phy_parent_data),
- },
-};
-
-static struct clk_regmap csi_phy0_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = ISP0_CLK_CTRL,
- .shift = 16,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "csi_phy0_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &csi_phy0_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static C3_COMP_SEL(csi_phy0, ISP0_CLK_CTRL, 25, 0x7, c3_csi_phy_parents);
+static C3_COMP_DIV(csi_phy0, ISP0_CLK_CTRL, 16, 7);
+static C3_COMP_GATE(csi_phy0, ISP0_CLK_CTRL, 24);
-static struct clk_regmap csi_phy0 = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = ISP0_CLK_CTRL,
- .bit_idx = 24,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "csi_phy0",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &csi_phy0_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static const struct clk_parent_data dewarpa_parent_data[] = {
+static const struct clk_parent_data c3_dewarpa_parents[] = {
{ .fw_name = "fdiv2p5" },
{ .fw_name = "fdiv3" },
{ .fw_name = "fdiv4" },
@@ -1609,54 +828,11 @@ static const struct clk_parent_data dewarpa_parent_data[] = {
{ .fw_name = "fdiv7" }
};
-static struct clk_regmap dewarpa_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = DEWARPA_CLK_CTRL,
- .mask = 0x7,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "dewarpa_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = dewarpa_parent_data,
- .num_parents = ARRAY_SIZE(dewarpa_parent_data),
- },
-};
-
-static struct clk_regmap dewarpa_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = DEWARPA_CLK_CTRL,
- .shift = 0,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "dewarpa_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &dewarpa_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap dewarpa = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = DEWARPA_CLK_CTRL,
- .bit_idx = 8,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "dewarpa",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &dewarpa_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static C3_COMP_SEL(dewarpa, DEWARPA_CLK_CTRL, 9, 0x7, c3_dewarpa_parents);
+static C3_COMP_DIV(dewarpa, DEWARPA_CLK_CTRL, 0, 7);
+static C3_COMP_GATE(dewarpa, DEWARPA_CLK_CTRL, 8);
-static const struct clk_parent_data isp_parent_data[] = {
+static const struct clk_parent_data c3_isp_parents[] = {
{ .fw_name = "fdiv2p5" },
{ .fw_name = "fdiv3" },
{ .fw_name = "fdiv4" },
@@ -1667,54 +843,11 @@ static const struct clk_parent_data isp_parent_data[] = {
{ .fw_name = "oscin" }
};
-static struct clk_regmap isp0_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = ISP0_CLK_CTRL,
- .mask = 0x7,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "isp0_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = isp_parent_data,
- .num_parents = ARRAY_SIZE(isp_parent_data),
- },
-};
-
-static struct clk_regmap isp0_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = ISP0_CLK_CTRL,
- .shift = 0,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "isp0_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &isp0_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap isp0 = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = ISP0_CLK_CTRL,
- .bit_idx = 8,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "isp0",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &isp0_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static C3_COMP_SEL(isp0, ISP0_CLK_CTRL, 9, 0x7, c3_isp_parents);
+static C3_COMP_DIV(isp0, ISP0_CLK_CTRL, 0, 7);
+static C3_COMP_GATE(isp0, ISP0_CLK_CTRL, 8);
-static const struct clk_parent_data nna_core_parent_data[] = {
+static const struct clk_parent_data c3_nna_core_parents[] = {
{ .fw_name = "oscin" },
{ .fw_name = "fdiv2p5" },
{ .fw_name = "fdiv4" },
@@ -1725,54 +858,11 @@ static const struct clk_parent_data nna_core_parent_data[] = {
{ .fw_name = "hifi" }
};
-static struct clk_regmap nna_core_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = NNA_CLK_CTRL,
- .mask = 0x7,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "nna_core_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = nna_core_parent_data,
- .num_parents = ARRAY_SIZE(nna_core_parent_data),
- },
-};
+static C3_COMP_SEL(nna_core, NNA_CLK_CTRL, 9, 0x7, c3_nna_core_parents);
+static C3_COMP_DIV(nna_core, NNA_CLK_CTRL, 0, 7);
+static C3_COMP_GATE(nna_core, NNA_CLK_CTRL, 8);
-static struct clk_regmap nna_core_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = NNA_CLK_CTRL,
- .shift = 0,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "nna_core_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &nna_core_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap nna_core = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = NNA_CLK_CTRL,
- .bit_idx = 8,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "nna_core",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &nna_core_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static const struct clk_parent_data ge2d_parent_data[] = {
+static const struct clk_parent_data c3_ge2d_parents[] = {
{ .fw_name = "oscin" },
{ .fw_name = "fdiv2p5" },
{ .fw_name = "fdiv3" },
@@ -1780,57 +870,14 @@ static const struct clk_parent_data ge2d_parent_data[] = {
{ .fw_name = "hifi" },
{ .fw_name = "fdiv5" },
{ .fw_name = "gp0" },
- { .hw = &rtc_clk.hw }
-};
-
-static struct clk_regmap ge2d_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = GE2D_CLK_CTRL,
- .mask = 0x7,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "ge2d_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = ge2d_parent_data,
- .num_parents = ARRAY_SIZE(ge2d_parent_data),
- },
+ { .hw = &c3_rtc_clk.hw }
};
-static struct clk_regmap ge2d_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = GE2D_CLK_CTRL,
- .shift = 0,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "ge2d_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &ge2d_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static C3_COMP_SEL(ge2d, GE2D_CLK_CTRL, 9, 0x7, c3_ge2d_parents);
+static C3_COMP_DIV(ge2d, GE2D_CLK_CTRL, 0, 7);
+static C3_COMP_GATE(ge2d, GE2D_CLK_CTRL, 8);
-static struct clk_regmap ge2d = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = GE2D_CLK_CTRL,
- .bit_idx = 8,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "ge2d",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &ge2d_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static const struct clk_parent_data vapb_parent_data[] = {
+static const struct clk_parent_data c3_vapb_parents[] = {
{ .fw_name = "fdiv2p5" },
{ .fw_name = "fdiv3" },
{ .fw_name = "fdiv4" },
@@ -1841,525 +888,239 @@ static const struct clk_parent_data vapb_parent_data[] = {
{ .fw_name = "oscin" },
};
-static struct clk_regmap vapb_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = VAPB_CLK_CTRL,
- .mask = 0x7,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "vapb_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = vapb_parent_data,
- .num_parents = ARRAY_SIZE(vapb_parent_data),
+static C3_COMP_SEL(vapb, VAPB_CLK_CTRL, 9, 0x7, c3_vapb_parents);
+static C3_COMP_DIV(vapb, VAPB_CLK_CTRL, 0, 7);
+static C3_COMP_GATE(vapb, VAPB_CLK_CTRL, 8);
+
+static struct clk_hw *c3_peripherals_hw_clks[] = {
+ [CLKID_RTC_XTAL_CLKIN] = &c3_rtc_xtal_clkin.hw,
+ [CLKID_RTC_32K_DIV] = &c3_rtc_32k_div.hw,
+ [CLKID_RTC_32K_MUX] = &c3_rtc_32k_sel.hw,
+ [CLKID_RTC_32K] = &c3_rtc_32k.hw,
+ [CLKID_RTC_CLK] = &c3_rtc_clk.hw,
+ [CLKID_SYS_RESET_CTRL] = &c3_sys_reset_ctrl.hw,
+ [CLKID_SYS_PWR_CTRL] = &c3_sys_pwr_ctrl.hw,
+ [CLKID_SYS_PAD_CTRL] = &c3_sys_pad_ctrl.hw,
+ [CLKID_SYS_CTRL] = &c3_sys_ctrl.hw,
+ [CLKID_SYS_TS_PLL] = &c3_sys_ts_pll.hw,
+ [CLKID_SYS_DEV_ARB] = &c3_sys_dev_arb.hw,
+ [CLKID_SYS_MMC_PCLK] = &c3_sys_mmc_pclk.hw,
+ [CLKID_SYS_CPU_CTRL] = &c3_sys_cpu_ctrl.hw,
+ [CLKID_SYS_JTAG_CTRL] = &c3_sys_jtag_ctrl.hw,
+ [CLKID_SYS_IR_CTRL] = &c3_sys_ir_ctrl.hw,
+ [CLKID_SYS_IRQ_CTRL] = &c3_sys_irq_ctrl.hw,
+ [CLKID_SYS_MSR_CLK] = &c3_sys_msr_clk.hw,
+ [CLKID_SYS_ROM] = &c3_sys_rom.hw,
+ [CLKID_SYS_UART_F] = &c3_sys_uart_f.hw,
+ [CLKID_SYS_CPU_ARB] = &c3_sys_cpu_apb.hw,
+ [CLKID_SYS_RSA] = &c3_sys_rsa.hw,
+ [CLKID_SYS_SAR_ADC] = &c3_sys_sar_adc.hw,
+ [CLKID_SYS_STARTUP] = &c3_sys_startup.hw,
+ [CLKID_SYS_SECURE] = &c3_sys_secure.hw,
+ [CLKID_SYS_SPIFC] = &c3_sys_spifc.hw,
+ [CLKID_SYS_NNA] = &c3_sys_nna.hw,
+ [CLKID_SYS_ETH_MAC] = &c3_sys_eth_mac.hw,
+ [CLKID_SYS_GIC] = &c3_sys_gic.hw,
+ [CLKID_SYS_RAMA] = &c3_sys_rama.hw,
+ [CLKID_SYS_BIG_NIC] = &c3_sys_big_nic.hw,
+ [CLKID_SYS_RAMB] = &c3_sys_ramb.hw,
+ [CLKID_SYS_AUDIO_PCLK] = &c3_sys_audio_pclk.hw,
+ [CLKID_SYS_PWM_KL] = &c3_sys_pwm_kl.hw,
+ [CLKID_SYS_PWM_IJ] = &c3_sys_pwm_ij.hw,
+ [CLKID_SYS_USB] = &c3_sys_usb.hw,
+ [CLKID_SYS_SD_EMMC_A] = &c3_sys_sd_emmc_a.hw,
+ [CLKID_SYS_SD_EMMC_C] = &c3_sys_sd_emmc_c.hw,
+ [CLKID_SYS_PWM_AB] = &c3_sys_pwm_ab.hw,
+ [CLKID_SYS_PWM_CD] = &c3_sys_pwm_cd.hw,
+ [CLKID_SYS_PWM_EF] = &c3_sys_pwm_ef.hw,
+ [CLKID_SYS_PWM_GH] = &c3_sys_pwm_gh.hw,
+ [CLKID_SYS_SPICC_1] = &c3_sys_spicc_1.hw,
+ [CLKID_SYS_SPICC_0] = &c3_sys_spicc_0.hw,
+ [CLKID_SYS_UART_A] = &c3_sys_uart_a.hw,
+ [CLKID_SYS_UART_B] = &c3_sys_uart_b.hw,
+ [CLKID_SYS_UART_C] = &c3_sys_uart_c.hw,
+ [CLKID_SYS_UART_D] = &c3_sys_uart_d.hw,
+ [CLKID_SYS_UART_E] = &c3_sys_uart_e.hw,
+ [CLKID_SYS_I2C_M_A] = &c3_sys_i2c_m_a.hw,
+ [CLKID_SYS_I2C_M_B] = &c3_sys_i2c_m_b.hw,
+ [CLKID_SYS_I2C_M_C] = &c3_sys_i2c_m_c.hw,
+ [CLKID_SYS_I2C_M_D] = &c3_sys_i2c_m_d.hw,
+ [CLKID_SYS_I2S_S_A] = &c3_sys_i2c_s_a.hw,
+ [CLKID_SYS_RTC] = &c3_sys_rtc.hw,
+ [CLKID_SYS_GE2D] = &c3_sys_ge2d.hw,
+ [CLKID_SYS_ISP] = &c3_sys_isp.hw,
+ [CLKID_SYS_GPV_ISP_NIC] = &c3_sys_gpv_isp_nic.hw,
+ [CLKID_SYS_GPV_CVE_NIC] = &c3_sys_gpv_cve_nic.hw,
+ [CLKID_SYS_MIPI_DSI_HOST] = &c3_sys_mipi_dsi_host.hw,
+ [CLKID_SYS_MIPI_DSI_PHY] = &c3_sys_mipi_dsi_phy.hw,
+ [CLKID_SYS_ETH_PHY] = &c3_sys_eth_phy.hw,
+ [CLKID_SYS_ACODEC] = &c3_sys_acodec.hw,
+ [CLKID_SYS_DWAP] = &c3_sys_dwap.hw,
+ [CLKID_SYS_DOS] = &c3_sys_dos.hw,
+ [CLKID_SYS_CVE] = &c3_sys_cve.hw,
+ [CLKID_SYS_VOUT] = &c3_sys_vout.hw,
+ [CLKID_SYS_VC9000E] = &c3_sys_vc9000e.hw,
+ [CLKID_SYS_PWM_MN] = &c3_sys_pwm_mn.hw,
+ [CLKID_SYS_SD_EMMC_B] = &c3_sys_sd_emmc_b.hw,
+ [CLKID_AXI_SYS_NIC] = &c3_axi_sys_nic.hw,
+ [CLKID_AXI_ISP_NIC] = &c3_axi_isp_nic.hw,
+ [CLKID_AXI_CVE_NIC] = &c3_axi_cve_nic.hw,
+ [CLKID_AXI_RAMB] = &c3_axi_ramb.hw,
+ [CLKID_AXI_RAMA] = &c3_axi_rama.hw,
+ [CLKID_AXI_CPU_DMC] = &c3_axi_cpu_dmc.hw,
+ [CLKID_AXI_NIC] = &c3_axi_nic.hw,
+ [CLKID_AXI_DMA] = &c3_axi_dma.hw,
+ [CLKID_AXI_MUX_NIC] = &c3_axi_mux_nic.hw,
+ [CLKID_AXI_CVE] = &c3_axi_cve.hw,
+ [CLKID_AXI_DEV1_DMC] = &c3_axi_dev1_dmc.hw,
+ [CLKID_AXI_DEV0_DMC] = &c3_axi_dev0_dmc.hw,
+ [CLKID_AXI_DSP_DMC] = &c3_axi_dsp_dmc.hw,
+ [CLKID_12_24M_IN] = &c3_clk_12_24m_in.hw,
+ [CLKID_12M_24M] = &c3_clk_12_24m.hw,
+ [CLKID_FCLK_25M_DIV] = &c3_fclk_25m_div.hw,
+ [CLKID_FCLK_25M] = &c3_fclk_25m.hw,
+ [CLKID_GEN_SEL] = &c3_gen_sel.hw,
+ [CLKID_GEN_DIV] = &c3_gen_div.hw,
+ [CLKID_GEN] = &c3_gen.hw,
+ [CLKID_SARADC_SEL] = &c3_saradc_sel.hw,
+ [CLKID_SARADC_DIV] = &c3_saradc_div.hw,
+ [CLKID_SARADC] = &c3_saradc.hw,
+ [CLKID_PWM_A_SEL] = &c3_pwm_a_sel.hw,
+ [CLKID_PWM_A_DIV] = &c3_pwm_a_div.hw,
+ [CLKID_PWM_A] = &c3_pwm_a.hw,
+ [CLKID_PWM_B_SEL] = &c3_pwm_b_sel.hw,
+ [CLKID_PWM_B_DIV] = &c3_pwm_b_div.hw,
+ [CLKID_PWM_B] = &c3_pwm_b.hw,
+ [CLKID_PWM_C_SEL] = &c3_pwm_c_sel.hw,
+ [CLKID_PWM_C_DIV] = &c3_pwm_c_div.hw,
+ [CLKID_PWM_C] = &c3_pwm_c.hw,
+ [CLKID_PWM_D_SEL] = &c3_pwm_d_sel.hw,
+ [CLKID_PWM_D_DIV] = &c3_pwm_d_div.hw,
+ [CLKID_PWM_D] = &c3_pwm_d.hw,
+ [CLKID_PWM_E_SEL] = &c3_pwm_e_sel.hw,
+ [CLKID_PWM_E_DIV] = &c3_pwm_e_div.hw,
+ [CLKID_PWM_E] = &c3_pwm_e.hw,
+ [CLKID_PWM_F_SEL] = &c3_pwm_f_sel.hw,
+ [CLKID_PWM_F_DIV] = &c3_pwm_f_div.hw,
+ [CLKID_PWM_F] = &c3_pwm_f.hw,
+ [CLKID_PWM_G_SEL] = &c3_pwm_g_sel.hw,
+ [CLKID_PWM_G_DIV] = &c3_pwm_g_div.hw,
+ [CLKID_PWM_G] = &c3_pwm_g.hw,
+ [CLKID_PWM_H_SEL] = &c3_pwm_h_sel.hw,
+ [CLKID_PWM_H_DIV] = &c3_pwm_h_div.hw,
+ [CLKID_PWM_H] = &c3_pwm_h.hw,
+ [CLKID_PWM_I_SEL] = &c3_pwm_i_sel.hw,
+ [CLKID_PWM_I_DIV] = &c3_pwm_i_div.hw,
+ [CLKID_PWM_I] = &c3_pwm_i.hw,
+ [CLKID_PWM_J_SEL] = &c3_pwm_j_sel.hw,
+ [CLKID_PWM_J_DIV] = &c3_pwm_j_div.hw,
+ [CLKID_PWM_J] = &c3_pwm_j.hw,
+ [CLKID_PWM_K_SEL] = &c3_pwm_k_sel.hw,
+ [CLKID_PWM_K_DIV] = &c3_pwm_k_div.hw,
+ [CLKID_PWM_K] = &c3_pwm_k.hw,
+ [CLKID_PWM_L_SEL] = &c3_pwm_l_sel.hw,
+ [CLKID_PWM_L_DIV] = &c3_pwm_l_div.hw,
+ [CLKID_PWM_L] = &c3_pwm_l.hw,
+ [CLKID_PWM_M_SEL] = &c3_pwm_m_sel.hw,
+ [CLKID_PWM_M_DIV] = &c3_pwm_m_div.hw,
+ [CLKID_PWM_M] = &c3_pwm_m.hw,
+ [CLKID_PWM_N_SEL] = &c3_pwm_n_sel.hw,
+ [CLKID_PWM_N_DIV] = &c3_pwm_n_div.hw,
+ [CLKID_PWM_N] = &c3_pwm_n.hw,
+ [CLKID_SPICC_A_SEL] = &c3_spicc_a_sel.hw,
+ [CLKID_SPICC_A_DIV] = &c3_spicc_a_div.hw,
+ [CLKID_SPICC_A] = &c3_spicc_a.hw,
+ [CLKID_SPICC_B_SEL] = &c3_spicc_b_sel.hw,
+ [CLKID_SPICC_B_DIV] = &c3_spicc_b_div.hw,
+ [CLKID_SPICC_B] = &c3_spicc_b.hw,
+ [CLKID_SPIFC_SEL] = &c3_spifc_sel.hw,
+ [CLKID_SPIFC_DIV] = &c3_spifc_div.hw,
+ [CLKID_SPIFC] = &c3_spifc.hw,
+ [CLKID_SD_EMMC_A_SEL] = &c3_sd_emmc_a_sel.hw,
+ [CLKID_SD_EMMC_A_DIV] = &c3_sd_emmc_a_div.hw,
+ [CLKID_SD_EMMC_A] = &c3_sd_emmc_a.hw,
+ [CLKID_SD_EMMC_B_SEL] = &c3_sd_emmc_b_sel.hw,
+ [CLKID_SD_EMMC_B_DIV] = &c3_sd_emmc_b_div.hw,
+ [CLKID_SD_EMMC_B] = &c3_sd_emmc_b.hw,
+ [CLKID_SD_EMMC_C_SEL] = &c3_sd_emmc_c_sel.hw,
+ [CLKID_SD_EMMC_C_DIV] = &c3_sd_emmc_c_div.hw,
+ [CLKID_SD_EMMC_C] = &c3_sd_emmc_c.hw,
+ [CLKID_TS_DIV] = &c3_ts_div.hw,
+ [CLKID_TS] = &c3_ts.hw,
+ [CLKID_ETH_125M_DIV] = &c3_eth_125m_div.hw,
+ [CLKID_ETH_125M] = &c3_eth_125m.hw,
+ [CLKID_ETH_RMII_DIV] = &c3_eth_rmii_div.hw,
+ [CLKID_ETH_RMII] = &c3_eth_rmii.hw,
+ [CLKID_MIPI_DSI_MEAS_SEL] = &c3_mipi_dsi_meas_sel.hw,
+ [CLKID_MIPI_DSI_MEAS_DIV] = &c3_mipi_dsi_meas_div.hw,
+ [CLKID_MIPI_DSI_MEAS] = &c3_mipi_dsi_meas.hw,
+ [CLKID_DSI_PHY_SEL] = &c3_dsi_phy_sel.hw,
+ [CLKID_DSI_PHY_DIV] = &c3_dsi_phy_div.hw,
+ [CLKID_DSI_PHY] = &c3_dsi_phy.hw,
+ [CLKID_VOUT_MCLK_SEL] = &c3_vout_mclk_sel.hw,
+ [CLKID_VOUT_MCLK_DIV] = &c3_vout_mclk_div.hw,
+ [CLKID_VOUT_MCLK] = &c3_vout_mclk.hw,
+ [CLKID_VOUT_ENC_SEL] = &c3_vout_enc_sel.hw,
+ [CLKID_VOUT_ENC_DIV] = &c3_vout_enc_div.hw,
+ [CLKID_VOUT_ENC] = &c3_vout_enc.hw,
+ [CLKID_HCODEC_0_SEL] = &c3_hcodec_0_sel.hw,
+ [CLKID_HCODEC_0_DIV] = &c3_hcodec_0_div.hw,
+ [CLKID_HCODEC_0] = &c3_hcodec_0.hw,
+ [CLKID_HCODEC_1_SEL] = &c3_hcodec_1_sel.hw,
+ [CLKID_HCODEC_1_DIV] = &c3_hcodec_1_div.hw,
+ [CLKID_HCODEC_1] = &c3_hcodec_1.hw,
+ [CLKID_HCODEC] = &c3_hcodec.hw,
+ [CLKID_VC9000E_ACLK_SEL] = &c3_vc9000e_aclk_sel.hw,
+ [CLKID_VC9000E_ACLK_DIV] = &c3_vc9000e_aclk_div.hw,
+ [CLKID_VC9000E_ACLK] = &c3_vc9000e_aclk.hw,
+ [CLKID_VC9000E_CORE_SEL] = &c3_vc9000e_core_sel.hw,
+ [CLKID_VC9000E_CORE_DIV] = &c3_vc9000e_core_div.hw,
+ [CLKID_VC9000E_CORE] = &c3_vc9000e_core.hw,
+ [CLKID_CSI_PHY0_SEL] = &c3_csi_phy0_sel.hw,
+ [CLKID_CSI_PHY0_DIV] = &c3_csi_phy0_div.hw,
+ [CLKID_CSI_PHY0] = &c3_csi_phy0.hw,
+ [CLKID_DEWARPA_SEL] = &c3_dewarpa_sel.hw,
+ [CLKID_DEWARPA_DIV] = &c3_dewarpa_div.hw,
+ [CLKID_DEWARPA] = &c3_dewarpa.hw,
+ [CLKID_ISP0_SEL] = &c3_isp0_sel.hw,
+ [CLKID_ISP0_DIV] = &c3_isp0_div.hw,
+ [CLKID_ISP0] = &c3_isp0.hw,
+ [CLKID_NNA_CORE_SEL] = &c3_nna_core_sel.hw,
+ [CLKID_NNA_CORE_DIV] = &c3_nna_core_div.hw,
+ [CLKID_NNA_CORE] = &c3_nna_core.hw,
+ [CLKID_GE2D_SEL] = &c3_ge2d_sel.hw,
+ [CLKID_GE2D_DIV] = &c3_ge2d_div.hw,
+ [CLKID_GE2D] = &c3_ge2d.hw,
+ [CLKID_VAPB_SEL] = &c3_vapb_sel.hw,
+ [CLKID_VAPB_DIV] = &c3_vapb_div.hw,
+ [CLKID_VAPB] = &c3_vapb.hw,
+};
+
+static const struct meson_clkc_data c3_peripherals_clkc_data = {
+ .hw_clks = {
+ .hws = c3_peripherals_hw_clks,
+ .num = ARRAY_SIZE(c3_peripherals_hw_clks),
},
};
-static struct clk_regmap vapb_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = VAPB_CLK_CTRL,
- .shift = 0,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "vapb_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &vapb_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap vapb = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = VAPB_CLK_CTRL,
- .bit_idx = 8,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "vapb",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &vapb_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_hw *c3_periphs_hw_clks[] = {
- [CLKID_RTC_XTAL_CLKIN] = &rtc_xtal_clkin.hw,
- [CLKID_RTC_32K_DIV] = &rtc_32k_div.hw,
- [CLKID_RTC_32K_MUX] = &rtc_32k_mux.hw,
- [CLKID_RTC_32K] = &rtc_32k.hw,
- [CLKID_RTC_CLK] = &rtc_clk.hw,
- [CLKID_SYS_RESET_CTRL] = &sys_reset_ctrl.hw,
- [CLKID_SYS_PWR_CTRL] = &sys_pwr_ctrl.hw,
- [CLKID_SYS_PAD_CTRL] = &sys_pad_ctrl.hw,
- [CLKID_SYS_CTRL] = &sys_ctrl.hw,
- [CLKID_SYS_TS_PLL] = &sys_ts_pll.hw,
- [CLKID_SYS_DEV_ARB] = &sys_dev_arb.hw,
- [CLKID_SYS_MMC_PCLK] = &sys_mmc_pclk.hw,
- [CLKID_SYS_CPU_CTRL] = &sys_cpu_ctrl.hw,
- [CLKID_SYS_JTAG_CTRL] = &sys_jtag_ctrl.hw,
- [CLKID_SYS_IR_CTRL] = &sys_ir_ctrl.hw,
- [CLKID_SYS_IRQ_CTRL] = &sys_irq_ctrl.hw,
- [CLKID_SYS_MSR_CLK] = &sys_msr_clk.hw,
- [CLKID_SYS_ROM] = &sys_rom.hw,
- [CLKID_SYS_UART_F] = &sys_uart_f.hw,
- [CLKID_SYS_CPU_ARB] = &sys_cpu_apb.hw,
- [CLKID_SYS_RSA] = &sys_rsa.hw,
- [CLKID_SYS_SAR_ADC] = &sys_sar_adc.hw,
- [CLKID_SYS_STARTUP] = &sys_startup.hw,
- [CLKID_SYS_SECURE] = &sys_secure.hw,
- [CLKID_SYS_SPIFC] = &sys_spifc.hw,
- [CLKID_SYS_NNA] = &sys_nna.hw,
- [CLKID_SYS_ETH_MAC] = &sys_eth_mac.hw,
- [CLKID_SYS_GIC] = &sys_gic.hw,
- [CLKID_SYS_RAMA] = &sys_rama.hw,
- [CLKID_SYS_BIG_NIC] = &sys_big_nic.hw,
- [CLKID_SYS_RAMB] = &sys_ramb.hw,
- [CLKID_SYS_AUDIO_PCLK] = &sys_audio_pclk.hw,
- [CLKID_SYS_PWM_KL] = &sys_pwm_kl.hw,
- [CLKID_SYS_PWM_IJ] = &sys_pwm_ij.hw,
- [CLKID_SYS_USB] = &sys_usb.hw,
- [CLKID_SYS_SD_EMMC_A] = &sys_sd_emmc_a.hw,
- [CLKID_SYS_SD_EMMC_C] = &sys_sd_emmc_c.hw,
- [CLKID_SYS_PWM_AB] = &sys_pwm_ab.hw,
- [CLKID_SYS_PWM_CD] = &sys_pwm_cd.hw,
- [CLKID_SYS_PWM_EF] = &sys_pwm_ef.hw,
- [CLKID_SYS_PWM_GH] = &sys_pwm_gh.hw,
- [CLKID_SYS_SPICC_1] = &sys_spicc_1.hw,
- [CLKID_SYS_SPICC_0] = &sys_spicc_0.hw,
- [CLKID_SYS_UART_A] = &sys_uart_a.hw,
- [CLKID_SYS_UART_B] = &sys_uart_b.hw,
- [CLKID_SYS_UART_C] = &sys_uart_c.hw,
- [CLKID_SYS_UART_D] = &sys_uart_d.hw,
- [CLKID_SYS_UART_E] = &sys_uart_e.hw,
- [CLKID_SYS_I2C_M_A] = &sys_i2c_m_a.hw,
- [CLKID_SYS_I2C_M_B] = &sys_i2c_m_b.hw,
- [CLKID_SYS_I2C_M_C] = &sys_i2c_m_c.hw,
- [CLKID_SYS_I2C_M_D] = &sys_i2c_m_d.hw,
- [CLKID_SYS_I2S_S_A] = &sys_i2c_s_a.hw,
- [CLKID_SYS_RTC] = &sys_rtc.hw,
- [CLKID_SYS_GE2D] = &sys_ge2d.hw,
- [CLKID_SYS_ISP] = &sys_isp.hw,
- [CLKID_SYS_GPV_ISP_NIC] = &sys_gpv_isp_nic.hw,
- [CLKID_SYS_GPV_CVE_NIC] = &sys_gpv_cve_nic.hw,
- [CLKID_SYS_MIPI_DSI_HOST] = &sys_mipi_dsi_host.hw,
- [CLKID_SYS_MIPI_DSI_PHY] = &sys_mipi_dsi_phy.hw,
- [CLKID_SYS_ETH_PHY] = &sys_eth_phy.hw,
- [CLKID_SYS_ACODEC] = &sys_acodec.hw,
- [CLKID_SYS_DWAP] = &sys_dwap.hw,
- [CLKID_SYS_DOS] = &sys_dos.hw,
- [CLKID_SYS_CVE] = &sys_cve.hw,
- [CLKID_SYS_VOUT] = &sys_vout.hw,
- [CLKID_SYS_VC9000E] = &sys_vc9000e.hw,
- [CLKID_SYS_PWM_MN] = &sys_pwm_mn.hw,
- [CLKID_SYS_SD_EMMC_B] = &sys_sd_emmc_b.hw,
- [CLKID_AXI_SYS_NIC] = &axi_sys_nic.hw,
- [CLKID_AXI_ISP_NIC] = &axi_isp_nic.hw,
- [CLKID_AXI_CVE_NIC] = &axi_cve_nic.hw,
- [CLKID_AXI_RAMB] = &axi_ramb.hw,
- [CLKID_AXI_RAMA] = &axi_rama.hw,
- [CLKID_AXI_CPU_DMC] = &axi_cpu_dmc.hw,
- [CLKID_AXI_NIC] = &axi_nic.hw,
- [CLKID_AXI_DMA] = &axi_dma.hw,
- [CLKID_AXI_MUX_NIC] = &axi_mux_nic.hw,
- [CLKID_AXI_CVE] = &axi_cve.hw,
- [CLKID_AXI_DEV1_DMC] = &axi_dev1_dmc.hw,
- [CLKID_AXI_DEV0_DMC] = &axi_dev0_dmc.hw,
- [CLKID_AXI_DSP_DMC] = &axi_dsp_dmc.hw,
- [CLKID_12_24M_IN] = &clk_12_24m_in.hw,
- [CLKID_12M_24M] = &clk_12_24m.hw,
- [CLKID_FCLK_25M_DIV] = &fclk_25m_div.hw,
- [CLKID_FCLK_25M] = &fclk_25m.hw,
- [CLKID_GEN_SEL] = &gen_sel.hw,
- [CLKID_GEN_DIV] = &gen_div.hw,
- [CLKID_GEN] = &gen.hw,
- [CLKID_SARADC_SEL] = &saradc_sel.hw,
- [CLKID_SARADC_DIV] = &saradc_div.hw,
- [CLKID_SARADC] = &saradc.hw,
- [CLKID_PWM_A_SEL] = &pwm_a_sel.hw,
- [CLKID_PWM_A_DIV] = &pwm_a_div.hw,
- [CLKID_PWM_A] = &pwm_a.hw,
- [CLKID_PWM_B_SEL] = &pwm_b_sel.hw,
- [CLKID_PWM_B_DIV] = &pwm_b_div.hw,
- [CLKID_PWM_B] = &pwm_b.hw,
- [CLKID_PWM_C_SEL] = &pwm_c_sel.hw,
- [CLKID_PWM_C_DIV] = &pwm_c_div.hw,
- [CLKID_PWM_C] = &pwm_c.hw,
- [CLKID_PWM_D_SEL] = &pwm_d_sel.hw,
- [CLKID_PWM_D_DIV] = &pwm_d_div.hw,
- [CLKID_PWM_D] = &pwm_d.hw,
- [CLKID_PWM_E_SEL] = &pwm_e_sel.hw,
- [CLKID_PWM_E_DIV] = &pwm_e_div.hw,
- [CLKID_PWM_E] = &pwm_e.hw,
- [CLKID_PWM_F_SEL] = &pwm_f_sel.hw,
- [CLKID_PWM_F_DIV] = &pwm_f_div.hw,
- [CLKID_PWM_F] = &pwm_f.hw,
- [CLKID_PWM_G_SEL] = &pwm_g_sel.hw,
- [CLKID_PWM_G_DIV] = &pwm_g_div.hw,
- [CLKID_PWM_G] = &pwm_g.hw,
- [CLKID_PWM_H_SEL] = &pwm_h_sel.hw,
- [CLKID_PWM_H_DIV] = &pwm_h_div.hw,
- [CLKID_PWM_H] = &pwm_h.hw,
- [CLKID_PWM_I_SEL] = &pwm_i_sel.hw,
- [CLKID_PWM_I_DIV] = &pwm_i_div.hw,
- [CLKID_PWM_I] = &pwm_i.hw,
- [CLKID_PWM_J_SEL] = &pwm_j_sel.hw,
- [CLKID_PWM_J_DIV] = &pwm_j_div.hw,
- [CLKID_PWM_J] = &pwm_j.hw,
- [CLKID_PWM_K_SEL] = &pwm_k_sel.hw,
- [CLKID_PWM_K_DIV] = &pwm_k_div.hw,
- [CLKID_PWM_K] = &pwm_k.hw,
- [CLKID_PWM_L_SEL] = &pwm_l_sel.hw,
- [CLKID_PWM_L_DIV] = &pwm_l_div.hw,
- [CLKID_PWM_L] = &pwm_l.hw,
- [CLKID_PWM_M_SEL] = &pwm_m_sel.hw,
- [CLKID_PWM_M_DIV] = &pwm_m_div.hw,
- [CLKID_PWM_M] = &pwm_m.hw,
- [CLKID_PWM_N_SEL] = &pwm_n_sel.hw,
- [CLKID_PWM_N_DIV] = &pwm_n_div.hw,
- [CLKID_PWM_N] = &pwm_n.hw,
- [CLKID_SPICC_A_SEL] = &spicc_a_sel.hw,
- [CLKID_SPICC_A_DIV] = &spicc_a_div.hw,
- [CLKID_SPICC_A] = &spicc_a.hw,
- [CLKID_SPICC_B_SEL] = &spicc_b_sel.hw,
- [CLKID_SPICC_B_DIV] = &spicc_b_div.hw,
- [CLKID_SPICC_B] = &spicc_b.hw,
- [CLKID_SPIFC_SEL] = &spifc_sel.hw,
- [CLKID_SPIFC_DIV] = &spifc_div.hw,
- [CLKID_SPIFC] = &spifc.hw,
- [CLKID_SD_EMMC_A_SEL] = &sd_emmc_a_sel.hw,
- [CLKID_SD_EMMC_A_DIV] = &sd_emmc_a_div.hw,
- [CLKID_SD_EMMC_A] = &sd_emmc_a.hw,
- [CLKID_SD_EMMC_B_SEL] = &sd_emmc_b_sel.hw,
- [CLKID_SD_EMMC_B_DIV] = &sd_emmc_b_div.hw,
- [CLKID_SD_EMMC_B] = &sd_emmc_b.hw,
- [CLKID_SD_EMMC_C_SEL] = &sd_emmc_c_sel.hw,
- [CLKID_SD_EMMC_C_DIV] = &sd_emmc_c_div.hw,
- [CLKID_SD_EMMC_C] = &sd_emmc_c.hw,
- [CLKID_TS_DIV] = &ts_div.hw,
- [CLKID_TS] = &ts.hw,
- [CLKID_ETH_125M_DIV] = &eth_125m_div.hw,
- [CLKID_ETH_125M] = &eth_125m.hw,
- [CLKID_ETH_RMII_DIV] = &eth_rmii_div.hw,
- [CLKID_ETH_RMII] = &eth_rmii.hw,
- [CLKID_MIPI_DSI_MEAS_SEL] = &mipi_dsi_meas_sel.hw,
- [CLKID_MIPI_DSI_MEAS_DIV] = &mipi_dsi_meas_div.hw,
- [CLKID_MIPI_DSI_MEAS] = &mipi_dsi_meas.hw,
- [CLKID_DSI_PHY_SEL] = &dsi_phy_sel.hw,
- [CLKID_DSI_PHY_DIV] = &dsi_phy_div.hw,
- [CLKID_DSI_PHY] = &dsi_phy.hw,
- [CLKID_VOUT_MCLK_SEL] = &vout_mclk_sel.hw,
- [CLKID_VOUT_MCLK_DIV] = &vout_mclk_div.hw,
- [CLKID_VOUT_MCLK] = &vout_mclk.hw,
- [CLKID_VOUT_ENC_SEL] = &vout_enc_sel.hw,
- [CLKID_VOUT_ENC_DIV] = &vout_enc_div.hw,
- [CLKID_VOUT_ENC] = &vout_enc.hw,
- [CLKID_HCODEC_0_SEL] = &hcodec_0_sel.hw,
- [CLKID_HCODEC_0_DIV] = &hcodec_0_div.hw,
- [CLKID_HCODEC_0] = &hcodec_0.hw,
- [CLKID_HCODEC_1_SEL] = &hcodec_1_sel.hw,
- [CLKID_HCODEC_1_DIV] = &hcodec_1_div.hw,
- [CLKID_HCODEC_1] = &hcodec_1.hw,
- [CLKID_HCODEC] = &hcodec.hw,
- [CLKID_VC9000E_ACLK_SEL] = &vc9000e_aclk_sel.hw,
- [CLKID_VC9000E_ACLK_DIV] = &vc9000e_aclk_div.hw,
- [CLKID_VC9000E_ACLK] = &vc9000e_aclk.hw,
- [CLKID_VC9000E_CORE_SEL] = &vc9000e_core_sel.hw,
- [CLKID_VC9000E_CORE_DIV] = &vc9000e_core_div.hw,
- [CLKID_VC9000E_CORE] = &vc9000e_core.hw,
- [CLKID_CSI_PHY0_SEL] = &csi_phy0_sel.hw,
- [CLKID_CSI_PHY0_DIV] = &csi_phy0_div.hw,
- [CLKID_CSI_PHY0] = &csi_phy0.hw,
- [CLKID_DEWARPA_SEL] = &dewarpa_sel.hw,
- [CLKID_DEWARPA_DIV] = &dewarpa_div.hw,
- [CLKID_DEWARPA] = &dewarpa.hw,
- [CLKID_ISP0_SEL] = &isp0_sel.hw,
- [CLKID_ISP0_DIV] = &isp0_div.hw,
- [CLKID_ISP0] = &isp0.hw,
- [CLKID_NNA_CORE_SEL] = &nna_core_sel.hw,
- [CLKID_NNA_CORE_DIV] = &nna_core_div.hw,
- [CLKID_NNA_CORE] = &nna_core.hw,
- [CLKID_GE2D_SEL] = &ge2d_sel.hw,
- [CLKID_GE2D_DIV] = &ge2d_div.hw,
- [CLKID_GE2D] = &ge2d.hw,
- [CLKID_VAPB_SEL] = &vapb_sel.hw,
- [CLKID_VAPB_DIV] = &vapb_div.hw,
- [CLKID_VAPB] = &vapb.hw,
-};
-
-/* Convenience table to populate regmap in .probe */
-static struct clk_regmap *const c3_periphs_clk_regmaps[] = {
- &rtc_xtal_clkin,
- &rtc_32k_div,
- &rtc_32k_mux,
- &rtc_32k,
- &rtc_clk,
- &sys_reset_ctrl,
- &sys_pwr_ctrl,
- &sys_pad_ctrl,
- &sys_ctrl,
- &sys_ts_pll,
- &sys_dev_arb,
- &sys_mmc_pclk,
- &sys_cpu_ctrl,
- &sys_jtag_ctrl,
- &sys_ir_ctrl,
- &sys_irq_ctrl,
- &sys_msr_clk,
- &sys_rom,
- &sys_uart_f,
- &sys_cpu_apb,
- &sys_rsa,
- &sys_sar_adc,
- &sys_startup,
- &sys_secure,
- &sys_spifc,
- &sys_nna,
- &sys_eth_mac,
- &sys_gic,
- &sys_rama,
- &sys_big_nic,
- &sys_ramb,
- &sys_audio_pclk,
- &sys_pwm_kl,
- &sys_pwm_ij,
- &sys_usb,
- &sys_sd_emmc_a,
- &sys_sd_emmc_c,
- &sys_pwm_ab,
- &sys_pwm_cd,
- &sys_pwm_ef,
- &sys_pwm_gh,
- &sys_spicc_1,
- &sys_spicc_0,
- &sys_uart_a,
- &sys_uart_b,
- &sys_uart_c,
- &sys_uart_d,
- &sys_uart_e,
- &sys_i2c_m_a,
- &sys_i2c_m_b,
- &sys_i2c_m_c,
- &sys_i2c_m_d,
- &sys_i2c_s_a,
- &sys_rtc,
- &sys_ge2d,
- &sys_isp,
- &sys_gpv_isp_nic,
- &sys_gpv_cve_nic,
- &sys_mipi_dsi_host,
- &sys_mipi_dsi_phy,
- &sys_eth_phy,
- &sys_acodec,
- &sys_dwap,
- &sys_dos,
- &sys_cve,
- &sys_vout,
- &sys_vc9000e,
- &sys_pwm_mn,
- &sys_sd_emmc_b,
- &axi_sys_nic,
- &axi_isp_nic,
- &axi_cve_nic,
- &axi_ramb,
- &axi_rama,
- &axi_cpu_dmc,
- &axi_nic,
- &axi_dma,
- &axi_mux_nic,
- &axi_cve,
- &axi_dev1_dmc,
- &axi_dev0_dmc,
- &axi_dsp_dmc,
- &clk_12_24m_in,
- &clk_12_24m,
- &fclk_25m_div,
- &fclk_25m,
- &gen_sel,
- &gen_div,
- &gen,
- &saradc_sel,
- &saradc_div,
- &saradc,
- &pwm_a_sel,
- &pwm_a_div,
- &pwm_a,
- &pwm_b_sel,
- &pwm_b_div,
- &pwm_b,
- &pwm_c_sel,
- &pwm_c_div,
- &pwm_c,
- &pwm_d_sel,
- &pwm_d_div,
- &pwm_d,
- &pwm_e_sel,
- &pwm_e_div,
- &pwm_e,
- &pwm_f_sel,
- &pwm_f_div,
- &pwm_f,
- &pwm_g_sel,
- &pwm_g_div,
- &pwm_g,
- &pwm_h_sel,
- &pwm_h_div,
- &pwm_h,
- &pwm_i_sel,
- &pwm_i_div,
- &pwm_i,
- &pwm_j_sel,
- &pwm_j_div,
- &pwm_j,
- &pwm_k_sel,
- &pwm_k_div,
- &pwm_k,
- &pwm_l_sel,
- &pwm_l_div,
- &pwm_l,
- &pwm_m_sel,
- &pwm_m_div,
- &pwm_m,
- &pwm_n_sel,
- &pwm_n_div,
- &pwm_n,
- &spicc_a_sel,
- &spicc_a_div,
- &spicc_a,
- &spicc_b_sel,
- &spicc_b_div,
- &spicc_b,
- &spifc_sel,
- &spifc_div,
- &spifc,
- &sd_emmc_a_sel,
- &sd_emmc_a_div,
- &sd_emmc_a,
- &sd_emmc_b_sel,
- &sd_emmc_b_div,
- &sd_emmc_b,
- &sd_emmc_c_sel,
- &sd_emmc_c_div,
- &sd_emmc_c,
- &ts_div,
- &ts,
- &eth_125m,
- &eth_rmii_div,
- &eth_rmii,
- &mipi_dsi_meas_sel,
- &mipi_dsi_meas_div,
- &mipi_dsi_meas,
- &dsi_phy_sel,
- &dsi_phy_div,
- &dsi_phy,
- &vout_mclk_sel,
- &vout_mclk_div,
- &vout_mclk,
- &vout_enc_sel,
- &vout_enc_div,
- &vout_enc,
- &hcodec_0_sel,
- &hcodec_0_div,
- &hcodec_0,
- &hcodec_1_sel,
- &hcodec_1_div,
- &hcodec_1,
- &hcodec,
- &vc9000e_aclk_sel,
- &vc9000e_aclk_div,
- &vc9000e_aclk,
- &vc9000e_core_sel,
- &vc9000e_core_div,
- &vc9000e_core,
- &csi_phy0_sel,
- &csi_phy0_div,
- &csi_phy0,
- &dewarpa_sel,
- &dewarpa_div,
- &dewarpa,
- &isp0_sel,
- &isp0_div,
- &isp0,
- &nna_core_sel,
- &nna_core_div,
- &nna_core,
- &ge2d_sel,
- &ge2d_div,
- &ge2d,
- &vapb_sel,
- &vapb_div,
- &vapb,
-};
-
-static const struct regmap_config clkc_regmap_config = {
- .reg_bits = 32,
- .val_bits = 32,
- .reg_stride = 4,
- .max_register = NNA_CLK_CTRL,
-};
-
-static struct meson_clk_hw_data c3_periphs_clks = {
- .hws = c3_periphs_hw_clks,
- .num = ARRAY_SIZE(c3_periphs_hw_clks),
-};
-
-static int c3_peripherals_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct regmap *regmap;
- void __iomem *base;
- int clkid, ret, i;
-
- base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(base))
- return PTR_ERR(base);
-
- regmap = devm_regmap_init_mmio(dev, base, &clkc_regmap_config);
- if (IS_ERR(regmap))
- return PTR_ERR(regmap);
-
- /* Populate regmap for the regmap backed clocks */
- for (i = 0; i < ARRAY_SIZE(c3_periphs_clk_regmaps); i++)
- c3_periphs_clk_regmaps[i]->map = regmap;
-
- for (clkid = 0; clkid < c3_periphs_clks.num; clkid++) {
- /* array might be sparse */
- if (!c3_periphs_clks.hws[clkid])
- continue;
-
- ret = devm_clk_hw_register(dev, c3_periphs_clks.hws[clkid]);
- if (ret) {
- dev_err(dev, "Clock registration failed\n");
- return ret;
- }
- }
-
- return devm_of_clk_add_hw_provider(dev, meson_clk_hw_get,
- &c3_periphs_clks);
-}
-
static const struct of_device_id c3_peripherals_clkc_match_table[] = {
{
.compatible = "amlogic,c3-peripherals-clkc",
+ .data = &c3_peripherals_clkc_data,
},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, c3_peripherals_clkc_match_table);
-static struct platform_driver c3_peripherals_driver = {
- .probe = c3_peripherals_probe,
+static struct platform_driver c3_peripherals_clkc_driver = {
+ .probe = meson_clkc_mmio_probe,
.driver = {
.name = "c3-peripherals-clkc",
.of_match_table = c3_peripherals_clkc_match_table,
},
};
-module_platform_driver(c3_peripherals_driver);
+module_platform_driver(c3_peripherals_clkc_driver);
MODULE_DESCRIPTION("Amlogic C3 Peripherals Clock Controller driver");
MODULE_AUTHOR("Chuan Liu <chuan.liu@amlogic.com>");
diff --git a/drivers/clk/meson/c3-pll.c b/drivers/clk/meson/c3-pll.c
index ed4bc495862e..dd047d17488c 100644
--- a/drivers/clk/meson/c3-pll.c
+++ b/drivers/clk/meson/c3-pll.c
@@ -34,7 +34,7 @@
#define ANACTRL_MPLL_CTRL3 0x18c
#define ANACTRL_MPLL_CTRL4 0x190
-static struct clk_regmap fclk_50m_en = {
+static struct clk_regmap c3_fclk_50m_en = {
.data = &(struct clk_regmap_gate_data) {
.offset = ANACTRL_FIXPLL_CTRL4,
.bit_idx = 0,
@@ -49,20 +49,20 @@ static struct clk_regmap fclk_50m_en = {
},
};
-static struct clk_fixed_factor fclk_50m = {
+static struct clk_fixed_factor c3_fclk_50m = {
.mult = 1,
.div = 40,
.hw.init = &(struct clk_init_data) {
.name = "fclk_50m",
.ops = &clk_fixed_factor_ops,
.parent_hws = (const struct clk_hw *[]) {
- &fclk_50m_en.hw
+ &c3_fclk_50m_en.hw
},
.num_parents = 1,
},
};
-static struct clk_fixed_factor fclk_div2_div = {
+static struct clk_fixed_factor c3_fclk_div2_div = {
.mult = 1,
.div = 2,
.hw.init = &(struct clk_init_data) {
@@ -75,7 +75,7 @@ static struct clk_fixed_factor fclk_div2_div = {
},
};
-static struct clk_regmap fclk_div2 = {
+static struct clk_regmap c3_fclk_div2 = {
.data = &(struct clk_regmap_gate_data) {
.offset = ANACTRL_FIXPLL_CTRL4,
.bit_idx = 24,
@@ -84,13 +84,13 @@ static struct clk_regmap fclk_div2 = {
.name = "fclk_div2",
.ops = &clk_regmap_gate_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &fclk_div2_div.hw
+ &c3_fclk_div2_div.hw
},
.num_parents = 1,
},
};
-static struct clk_fixed_factor fclk_div2p5_div = {
+static struct clk_fixed_factor c3_fclk_div2p5_div = {
.mult = 2,
.div = 5,
.hw.init = &(struct clk_init_data) {
@@ -103,7 +103,7 @@ static struct clk_fixed_factor fclk_div2p5_div = {
},
};
-static struct clk_regmap fclk_div2p5 = {
+static struct clk_regmap c3_fclk_div2p5 = {
.data = &(struct clk_regmap_gate_data) {
.offset = ANACTRL_FIXPLL_CTRL4,
.bit_idx = 4,
@@ -112,13 +112,13 @@ static struct clk_regmap fclk_div2p5 = {
.name = "fclk_div2p5",
.ops = &clk_regmap_gate_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &fclk_div2p5_div.hw
+ &c3_fclk_div2p5_div.hw
},
.num_parents = 1,
},
};
-static struct clk_fixed_factor fclk_div3_div = {
+static struct clk_fixed_factor c3_fclk_div3_div = {
.mult = 1,
.div = 3,
.hw.init = &(struct clk_init_data) {
@@ -131,7 +131,7 @@ static struct clk_fixed_factor fclk_div3_div = {
},
};
-static struct clk_regmap fclk_div3 = {
+static struct clk_regmap c3_fclk_div3 = {
.data = &(struct clk_regmap_gate_data) {
.offset = ANACTRL_FIXPLL_CTRL4,
.bit_idx = 20,
@@ -140,13 +140,13 @@ static struct clk_regmap fclk_div3 = {
.name = "fclk_div3",
.ops = &clk_regmap_gate_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &fclk_div3_div.hw
+ &c3_fclk_div3_div.hw
},
.num_parents = 1,
},
};
-static struct clk_fixed_factor fclk_div4_div = {
+static struct clk_fixed_factor c3_fclk_div4_div = {
.mult = 1,
.div = 4,
.hw.init = &(struct clk_init_data) {
@@ -159,7 +159,7 @@ static struct clk_fixed_factor fclk_div4_div = {
},
};
-static struct clk_regmap fclk_div4 = {
+static struct clk_regmap c3_fclk_div4 = {
.data = &(struct clk_regmap_gate_data) {
.offset = ANACTRL_FIXPLL_CTRL4,
.bit_idx = 21,
@@ -168,13 +168,13 @@ static struct clk_regmap fclk_div4 = {
.name = "fclk_div4",
.ops = &clk_regmap_gate_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &fclk_div4_div.hw
+ &c3_fclk_div4_div.hw
},
.num_parents = 1,
},
};
-static struct clk_fixed_factor fclk_div5_div = {
+static struct clk_fixed_factor c3_fclk_div5_div = {
.mult = 1,
.div = 5,
.hw.init = &(struct clk_init_data) {
@@ -187,7 +187,7 @@ static struct clk_fixed_factor fclk_div5_div = {
},
};
-static struct clk_regmap fclk_div5 = {
+static struct clk_regmap c3_fclk_div5 = {
.data = &(struct clk_regmap_gate_data) {
.offset = ANACTRL_FIXPLL_CTRL4,
.bit_idx = 22,
@@ -196,13 +196,13 @@ static struct clk_regmap fclk_div5 = {
.name = "fclk_div5",
.ops = &clk_regmap_gate_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &fclk_div5_div.hw
+ &c3_fclk_div5_div.hw
},
.num_parents = 1,
},
};
-static struct clk_fixed_factor fclk_div7_div = {
+static struct clk_fixed_factor c3_fclk_div7_div = {
.mult = 1,
.div = 7,
.hw.init = &(struct clk_init_data) {
@@ -215,7 +215,7 @@ static struct clk_fixed_factor fclk_div7_div = {
},
};
-static struct clk_regmap fclk_div7 = {
+static struct clk_regmap c3_fclk_div7 = {
.data = &(struct clk_regmap_gate_data) {
.offset = ANACTRL_FIXPLL_CTRL4,
.bit_idx = 23,
@@ -224,13 +224,13 @@ static struct clk_regmap fclk_div7 = {
.name = "fclk_div7",
.ops = &clk_regmap_gate_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &fclk_div7_div.hw
+ &c3_fclk_div7_div.hw
},
.num_parents = 1,
},
};
-static const struct reg_sequence c3_gp0_init_regs[] = {
+static const struct reg_sequence c3_gp0_pll_init_regs[] = {
{ .reg = ANACTRL_GP0PLL_CTRL2, .def = 0x0 },
{ .reg = ANACTRL_GP0PLL_CTRL3, .def = 0x48681c00 },
{ .reg = ANACTRL_GP0PLL_CTRL4, .def = 0x88770290 },
@@ -243,7 +243,7 @@ static const struct pll_mult_range c3_gp0_pll_mult_range = {
.max = 250,
};
-static struct clk_regmap gp0_pll_dco = {
+static struct clk_regmap c3_gp0_pll_dco = {
.data = &(struct meson_clk_pll_data) {
.en = {
.reg_off = ANACTRL_GP0PLL_CTRL0,
@@ -276,8 +276,8 @@ static struct clk_regmap gp0_pll_dco = {
.width = 1,
},
.range = &c3_gp0_pll_mult_range,
- .init_regs = c3_gp0_init_regs,
- .init_count = ARRAY_SIZE(c3_gp0_init_regs),
+ .init_regs = c3_gp0_pll_init_regs,
+ .init_count = ARRAY_SIZE(c3_gp0_pll_init_regs),
},
.hw.init = &(struct clk_init_data) {
.name = "gp0_pll_dco",
@@ -300,7 +300,7 @@ static const struct clk_div_table c3_gp0_pll_od_table[] = {
{ /* sentinel */ }
};
-static struct clk_regmap gp0_pll = {
+static struct clk_regmap c3_gp0_pll = {
.data = &(struct clk_regmap_div_data) {
.offset = ANACTRL_GP0PLL_CTRL0,
.shift = 16,
@@ -311,14 +311,14 @@ static struct clk_regmap gp0_pll = {
.name = "gp0_pll",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &gp0_pll_dco.hw
+ &c3_gp0_pll_dco.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static const struct reg_sequence c3_hifi_init_regs[] = {
+static const struct reg_sequence c3_hifi_pll_init_regs[] = {
{ .reg = ANACTRL_HIFIPLL_CTRL2, .def = 0x0 },
{ .reg = ANACTRL_HIFIPLL_CTRL3, .def = 0x6a285c00 },
{ .reg = ANACTRL_HIFIPLL_CTRL4, .def = 0x65771290 },
@@ -326,7 +326,7 @@ static const struct reg_sequence c3_hifi_init_regs[] = {
{ .reg = ANACTRL_HIFIPLL_CTRL6, .def = 0x56540000 },
};
-static struct clk_regmap hifi_pll_dco = {
+static struct clk_regmap c3_hifi_pll_dco = {
.data = &(struct meson_clk_pll_data) {
.en = {
.reg_off = ANACTRL_HIFIPLL_CTRL0,
@@ -359,8 +359,8 @@ static struct clk_regmap hifi_pll_dco = {
.width = 1,
},
.range = &c3_gp0_pll_mult_range,
- .init_regs = c3_hifi_init_regs,
- .init_count = ARRAY_SIZE(c3_hifi_init_regs),
+ .init_regs = c3_hifi_pll_init_regs,
+ .init_count = ARRAY_SIZE(c3_hifi_pll_init_regs),
.frac_max = 100000,
},
.hw.init = &(struct clk_init_data) {
@@ -373,7 +373,7 @@ static struct clk_regmap hifi_pll_dco = {
},
};
-static struct clk_regmap hifi_pll = {
+static struct clk_regmap c3_hifi_pll = {
.data = &(struct clk_regmap_div_data) {
.offset = ANACTRL_HIFIPLL_CTRL0,
.shift = 16,
@@ -384,14 +384,14 @@ static struct clk_regmap hifi_pll = {
.name = "hifi_pll",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &hifi_pll_dco.hw
+ &c3_hifi_pll_dco.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static const struct reg_sequence c3_mclk_init_regs[] = {
+static const struct reg_sequence c3_mclk_pll_init_regs[] = {
{ .reg = ANACTRL_MPLL_CTRL1, .def = 0x1420500f },
{ .reg = ANACTRL_MPLL_CTRL2, .def = 0x00023041 },
{ .reg = ANACTRL_MPLL_CTRL3, .def = 0x18180000 },
@@ -403,7 +403,7 @@ static const struct pll_mult_range c3_mclk_pll_mult_range = {
.max = 133,
};
-static struct clk_regmap mclk_pll_dco = {
+static struct clk_regmap c3_mclk_pll_dco = {
.data = &(struct meson_clk_pll_data) {
.en = {
.reg_off = ANACTRL_MPLL_CTRL0,
@@ -431,8 +431,8 @@ static struct clk_regmap mclk_pll_dco = {
.width = 1,
},
.range = &c3_mclk_pll_mult_range,
- .init_regs = c3_mclk_init_regs,
- .init_count = ARRAY_SIZE(c3_mclk_init_regs),
+ .init_regs = c3_mclk_pll_init_regs,
+ .init_count = ARRAY_SIZE(c3_mclk_pll_init_regs),
},
.hw.init = &(struct clk_init_data) {
.name = "mclk_pll_dco",
@@ -444,7 +444,7 @@ static struct clk_regmap mclk_pll_dco = {
},
};
-static const struct clk_div_table c3_mpll_od_table[] = {
+static const struct clk_div_table c3_mpll_pll_od_table[] = {
{ 0, 1 },
{ 1, 2 },
{ 2, 4 },
@@ -453,25 +453,25 @@ static const struct clk_div_table c3_mpll_od_table[] = {
{ /* sentinel */ }
};
-static struct clk_regmap mclk_pll_od = {
+static struct clk_regmap c3_mclk_pll_od = {
.data = &(struct clk_regmap_div_data) {
.offset = ANACTRL_MPLL_CTRL0,
.shift = 12,
.width = 3,
- .table = c3_mpll_od_table,
+ .table = c3_mpll_pll_od_table,
},
.hw.init = &(struct clk_init_data) {
.name = "mclk_pll_od",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &mclk_pll_dco.hw },
+ &c3_mclk_pll_dco.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
/* both value 0 and 1 gives divide the input rate by one */
-static struct clk_regmap mclk_pll = {
+static struct clk_regmap c3_mclk_pll = {
.data = &(struct clk_regmap_div_data) {
.offset = ANACTRL_MPLL_CTRL4,
.shift = 16,
@@ -482,20 +482,20 @@ static struct clk_regmap mclk_pll = {
.name = "mclk_pll",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &mclk_pll_od.hw
+ &c3_mclk_pll_od.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static const struct clk_parent_data mclk_parent[] = {
- { .hw = &mclk_pll.hw },
+static const struct clk_parent_data c3_mclk_parents[] = {
+ { .hw = &c3_mclk_pll.hw },
{ .fw_name = "mclk" },
- { .hw = &fclk_50m.hw }
+ { .hw = &c3_fclk_50m.hw }
};
-static struct clk_regmap mclk0_sel = {
+static struct clk_regmap c3_mclk0_sel = {
.data = &(struct clk_regmap_mux_data) {
.offset = ANACTRL_MPLL_CTRL4,
.mask = 0x3,
@@ -504,12 +504,12 @@ static struct clk_regmap mclk0_sel = {
.hw.init = &(struct clk_init_data) {
.name = "mclk0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = mclk_parent,
- .num_parents = ARRAY_SIZE(mclk_parent),
+ .parent_data = c3_mclk_parents,
+ .num_parents = ARRAY_SIZE(c3_mclk_parents),
},
};
-static struct clk_regmap mclk0_div_en = {
+static struct clk_regmap c3_mclk0_div_en = {
.data = &(struct clk_regmap_gate_data) {
.offset = ANACTRL_MPLL_CTRL4,
.bit_idx = 1,
@@ -518,14 +518,14 @@ static struct clk_regmap mclk0_div_en = {
.name = "mclk0_div_en",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &mclk0_sel.hw
+ &c3_mclk0_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap mclk0_div = {
+static struct clk_regmap c3_mclk0_div = {
.data = &(struct clk_regmap_div_data) {
.offset = ANACTRL_MPLL_CTRL4,
.shift = 2,
@@ -535,14 +535,14 @@ static struct clk_regmap mclk0_div = {
.name = "mclk0_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &mclk0_div_en.hw
+ &c3_mclk0_div_en.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap mclk0 = {
+static struct clk_regmap c3_mclk0 = {
.data = &(struct clk_regmap_gate_data) {
.offset = ANACTRL_MPLL_CTRL4,
.bit_idx = 0,
@@ -551,14 +551,14 @@ static struct clk_regmap mclk0 = {
.name = "mclk0",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &mclk0_div.hw
+ &c3_mclk0_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap mclk1_sel = {
+static struct clk_regmap c3_mclk1_sel = {
.data = &(struct clk_regmap_mux_data) {
.offset = ANACTRL_MPLL_CTRL4,
.mask = 0x3,
@@ -567,12 +567,12 @@ static struct clk_regmap mclk1_sel = {
.hw.init = &(struct clk_init_data) {
.name = "mclk1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = mclk_parent,
- .num_parents = ARRAY_SIZE(mclk_parent),
+ .parent_data = c3_mclk_parents,
+ .num_parents = ARRAY_SIZE(c3_mclk_parents),
},
};
-static struct clk_regmap mclk1_div_en = {
+static struct clk_regmap c3_mclk1_div_en = {
.data = &(struct clk_regmap_gate_data) {
.offset = ANACTRL_MPLL_CTRL4,
.bit_idx = 9,
@@ -581,14 +581,14 @@ static struct clk_regmap mclk1_div_en = {
.name = "mclk1_div_en",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &mclk1_sel.hw
+ &c3_mclk1_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap mclk1_div = {
+static struct clk_regmap c3_mclk1_div = {
.data = &(struct clk_regmap_div_data) {
.offset = ANACTRL_MPLL_CTRL4,
.shift = 10,
@@ -598,14 +598,14 @@ static struct clk_regmap mclk1_div = {
.name = "mclk1_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &mclk1_div_en.hw
+ &c3_mclk1_div_en.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap mclk1 = {
+static struct clk_regmap c3_mclk1 = {
.data = &(struct clk_regmap_gate_data) {
.offset = ANACTRL_MPLL_CTRL4,
.bit_idx = 8,
@@ -614,7 +614,7 @@ static struct clk_regmap mclk1 = {
.name = "mclk1",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &mclk1_div.hw
+ &c3_mclk1_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -622,126 +622,61 @@ static struct clk_regmap mclk1 = {
};
static struct clk_hw *c3_pll_hw_clks[] = {
- [CLKID_FCLK_50M_EN] = &fclk_50m_en.hw,
- [CLKID_FCLK_50M] = &fclk_50m.hw,
- [CLKID_FCLK_DIV2_DIV] = &fclk_div2_div.hw,
- [CLKID_FCLK_DIV2] = &fclk_div2.hw,
- [CLKID_FCLK_DIV2P5_DIV] = &fclk_div2p5_div.hw,
- [CLKID_FCLK_DIV2P5] = &fclk_div2p5.hw,
- [CLKID_FCLK_DIV3_DIV] = &fclk_div3_div.hw,
- [CLKID_FCLK_DIV3] = &fclk_div3.hw,
- [CLKID_FCLK_DIV4_DIV] = &fclk_div4_div.hw,
- [CLKID_FCLK_DIV4] = &fclk_div4.hw,
- [CLKID_FCLK_DIV5_DIV] = &fclk_div5_div.hw,
- [CLKID_FCLK_DIV5] = &fclk_div5.hw,
- [CLKID_FCLK_DIV7_DIV] = &fclk_div7_div.hw,
- [CLKID_FCLK_DIV7] = &fclk_div7.hw,
- [CLKID_GP0_PLL_DCO] = &gp0_pll_dco.hw,
- [CLKID_GP0_PLL] = &gp0_pll.hw,
- [CLKID_HIFI_PLL_DCO] = &hifi_pll_dco.hw,
- [CLKID_HIFI_PLL] = &hifi_pll.hw,
- [CLKID_MCLK_PLL_DCO] = &mclk_pll_dco.hw,
- [CLKID_MCLK_PLL_OD] = &mclk_pll_od.hw,
- [CLKID_MCLK_PLL] = &mclk_pll.hw,
- [CLKID_MCLK0_SEL] = &mclk0_sel.hw,
- [CLKID_MCLK0_SEL_EN] = &mclk0_div_en.hw,
- [CLKID_MCLK0_DIV] = &mclk0_div.hw,
- [CLKID_MCLK0] = &mclk0.hw,
- [CLKID_MCLK1_SEL] = &mclk1_sel.hw,
- [CLKID_MCLK1_SEL_EN] = &mclk1_div_en.hw,
- [CLKID_MCLK1_DIV] = &mclk1_div.hw,
- [CLKID_MCLK1] = &mclk1.hw
-};
-
-/* Convenience table to populate regmap in .probe */
-static struct clk_regmap *const c3_pll_clk_regmaps[] = {
- &fclk_50m_en,
- &fclk_div2,
- &fclk_div2p5,
- &fclk_div3,
- &fclk_div4,
- &fclk_div5,
- &fclk_div7,
- &gp0_pll_dco,
- &gp0_pll,
- &hifi_pll_dco,
- &hifi_pll,
- &mclk_pll_dco,
- &mclk_pll_od,
- &mclk_pll,
- &mclk0_sel,
- &mclk0_div_en,
- &mclk0_div,
- &mclk0,
- &mclk1_sel,
- &mclk1_div_en,
- &mclk1_div,
- &mclk1,
-};
-
-static const struct regmap_config clkc_regmap_config = {
- .reg_bits = 32,
- .val_bits = 32,
- .reg_stride = 4,
- .max_register = ANACTRL_MPLL_CTRL4,
-};
-
-static struct meson_clk_hw_data c3_pll_clks = {
- .hws = c3_pll_hw_clks,
- .num = ARRAY_SIZE(c3_pll_hw_clks),
-};
-
-static int c3_pll_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct regmap *regmap;
- void __iomem *base;
- int clkid, ret, i;
-
- base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(base))
- return PTR_ERR(base);
-
- regmap = devm_regmap_init_mmio(dev, base, &clkc_regmap_config);
- if (IS_ERR(regmap))
- return PTR_ERR(regmap);
-
- /* Populate regmap for the regmap backed clocks */
- for (i = 0; i < ARRAY_SIZE(c3_pll_clk_regmaps); i++)
- c3_pll_clk_regmaps[i]->map = regmap;
-
- for (clkid = 0; clkid < c3_pll_clks.num; clkid++) {
- /* array might be sparse */
- if (!c3_pll_clks.hws[clkid])
- continue;
-
- ret = devm_clk_hw_register(dev, c3_pll_clks.hws[clkid]);
- if (ret) {
- dev_err(dev, "Clock registration failed\n");
- return ret;
- }
- }
-
- return devm_of_clk_add_hw_provider(dev, meson_clk_hw_get,
- &c3_pll_clks);
-}
+ [CLKID_FCLK_50M_EN] = &c3_fclk_50m_en.hw,
+ [CLKID_FCLK_50M] = &c3_fclk_50m.hw,
+ [CLKID_FCLK_DIV2_DIV] = &c3_fclk_div2_div.hw,
+ [CLKID_FCLK_DIV2] = &c3_fclk_div2.hw,
+ [CLKID_FCLK_DIV2P5_DIV] = &c3_fclk_div2p5_div.hw,
+ [CLKID_FCLK_DIV2P5] = &c3_fclk_div2p5.hw,
+ [CLKID_FCLK_DIV3_DIV] = &c3_fclk_div3_div.hw,
+ [CLKID_FCLK_DIV3] = &c3_fclk_div3.hw,
+ [CLKID_FCLK_DIV4_DIV] = &c3_fclk_div4_div.hw,
+ [CLKID_FCLK_DIV4] = &c3_fclk_div4.hw,
+ [CLKID_FCLK_DIV5_DIV] = &c3_fclk_div5_div.hw,
+ [CLKID_FCLK_DIV5] = &c3_fclk_div5.hw,
+ [CLKID_FCLK_DIV7_DIV] = &c3_fclk_div7_div.hw,
+ [CLKID_FCLK_DIV7] = &c3_fclk_div7.hw,
+ [CLKID_GP0_PLL_DCO] = &c3_gp0_pll_dco.hw,
+ [CLKID_GP0_PLL] = &c3_gp0_pll.hw,
+ [CLKID_HIFI_PLL_DCO] = &c3_hifi_pll_dco.hw,
+ [CLKID_HIFI_PLL] = &c3_hifi_pll.hw,
+ [CLKID_MCLK_PLL_DCO] = &c3_mclk_pll_dco.hw,
+ [CLKID_MCLK_PLL_OD] = &c3_mclk_pll_od.hw,
+ [CLKID_MCLK_PLL] = &c3_mclk_pll.hw,
+ [CLKID_MCLK0_SEL] = &c3_mclk0_sel.hw,
+ [CLKID_MCLK0_SEL_EN] = &c3_mclk0_div_en.hw,
+ [CLKID_MCLK0_DIV] = &c3_mclk0_div.hw,
+ [CLKID_MCLK0] = &c3_mclk0.hw,
+ [CLKID_MCLK1_SEL] = &c3_mclk1_sel.hw,
+ [CLKID_MCLK1_SEL_EN] = &c3_mclk1_div_en.hw,
+ [CLKID_MCLK1_DIV] = &c3_mclk1_div.hw,
+ [CLKID_MCLK1] = &c3_mclk1.hw
+};
+
+static const struct meson_clkc_data c3_pll_clkc_data = {
+ .hw_clks = {
+ .hws = c3_pll_hw_clks,
+ .num = ARRAY_SIZE(c3_pll_hw_clks),
+ },
+};
static const struct of_device_id c3_pll_clkc_match_table[] = {
{
.compatible = "amlogic,c3-pll-clkc",
+ .data = &c3_pll_clkc_data,
},
{}
};
MODULE_DEVICE_TABLE(of, c3_pll_clkc_match_table);
-static struct platform_driver c3_pll_driver = {
- .probe = c3_pll_probe,
+static struct platform_driver c3_pll_clkc_driver = {
+ .probe = meson_clkc_mmio_probe,
.driver = {
.name = "c3-pll-clkc",
.of_match_table = c3_pll_clkc_match_table,
},
};
-module_platform_driver(c3_pll_driver);
+module_platform_driver(c3_pll_clkc_driver);
MODULE_DESCRIPTION("Amlogic C3 PLL Clock Controller driver");
MODULE_AUTHOR("Chuan Liu <chuan.liu@amlogic.com>");
diff --git a/drivers/clk/meson/clk-cpu-dyndiv.c b/drivers/clk/meson/clk-cpu-dyndiv.c
index cb043b52b65d..83aedbfd2891 100644
--- a/drivers/clk/meson/clk-cpu-dyndiv.c
+++ b/drivers/clk/meson/clk-cpu-dyndiv.c
@@ -61,6 +61,7 @@ static int meson_clk_cpu_dyndiv_set_rate(struct clk_hw *hw, unsigned long rate,
};
const struct clk_ops meson_clk_cpu_dyndiv_ops = {
+ .init = clk_regmap_init,
.recalc_rate = meson_clk_cpu_dyndiv_recalc_rate,
.determine_rate = meson_clk_cpu_dyndiv_determine_rate,
.set_rate = meson_clk_cpu_dyndiv_set_rate,
diff --git a/drivers/clk/meson/clk-dualdiv.c b/drivers/clk/meson/clk-dualdiv.c
index c896cf29b318..787df6cdf841 100644
--- a/drivers/clk/meson/clk-dualdiv.c
+++ b/drivers/clk/meson/clk-dualdiv.c
@@ -126,6 +126,7 @@ static int meson_clk_dualdiv_set_rate(struct clk_hw *hw, unsigned long rate,
}
const struct clk_ops meson_clk_dualdiv_ops = {
+ .init = clk_regmap_init,
.recalc_rate = meson_clk_dualdiv_recalc_rate,
.determine_rate = meson_clk_dualdiv_determine_rate,
.set_rate = meson_clk_dualdiv_set_rate,
@@ -133,6 +134,7 @@ const struct clk_ops meson_clk_dualdiv_ops = {
EXPORT_SYMBOL_NS_GPL(meson_clk_dualdiv_ops, "CLK_MESON");
const struct clk_ops meson_clk_dualdiv_ro_ops = {
+ .init = clk_regmap_init,
.recalc_rate = meson_clk_dualdiv_recalc_rate,
};
EXPORT_SYMBOL_NS_GPL(meson_clk_dualdiv_ro_ops, "CLK_MESON");
diff --git a/drivers/clk/meson/clk-mpll.c b/drivers/clk/meson/clk-mpll.c
index ee91e32b4050..7f8dada66e16 100644
--- a/drivers/clk/meson/clk-mpll.c
+++ b/drivers/clk/meson/clk-mpll.c
@@ -128,6 +128,11 @@ static int mpll_init(struct clk_hw *hw)
{
struct clk_regmap *clk = to_clk_regmap(hw);
struct meson_clk_mpll_data *mpll = meson_clk_mpll_data(clk);
+ int ret;
+
+ ret = clk_regmap_init(hw);
+ if (ret)
+ return ret;
if (mpll->init_count)
regmap_multi_reg_write(clk->map, mpll->init_regs,
@@ -151,6 +156,7 @@ static int mpll_init(struct clk_hw *hw)
}
const struct clk_ops meson_clk_mpll_ro_ops = {
+ .init = clk_regmap_init,
.recalc_rate = mpll_recalc_rate,
.determine_rate = mpll_determine_rate,
};
diff --git a/drivers/clk/meson/clk-phase.c b/drivers/clk/meson/clk-phase.c
index 701211120610..58dd982e6878 100644
--- a/drivers/clk/meson/clk-phase.c
+++ b/drivers/clk/meson/clk-phase.c
@@ -58,6 +58,7 @@ static int meson_clk_phase_set_phase(struct clk_hw *hw, int degrees)
}
const struct clk_ops meson_clk_phase_ops = {
+ .init = clk_regmap_init,
.get_phase = meson_clk_phase_get_phase,
.set_phase = meson_clk_phase_set_phase,
};
@@ -83,6 +84,11 @@ static int meson_clk_triphase_sync(struct clk_hw *hw)
struct clk_regmap *clk = to_clk_regmap(hw);
struct meson_clk_triphase_data *tph = meson_clk_triphase_data(clk);
unsigned int val;
+ int ret;
+
+ ret = clk_regmap_init(hw);
+ if (ret)
+ return ret;
/* Get phase 0 and sync it to phase 1 and 2 */
val = meson_parm_read(clk->map, &tph->ph0);
@@ -142,6 +148,11 @@ static int meson_sclk_ws_inv_sync(struct clk_hw *hw)
struct clk_regmap *clk = to_clk_regmap(hw);
struct meson_sclk_ws_inv_data *tph = meson_sclk_ws_inv_data(clk);
unsigned int val;
+ int ret;
+
+ ret = clk_regmap_init(hw);
+ if (ret)
+ return ret;
/* Get phase and sync the inverted value to ws */
val = meson_parm_read(clk->map, &tph->ph);
diff --git a/drivers/clk/meson/clk-pll.c b/drivers/clk/meson/clk-pll.c
index e8e53855b00a..1ea6579a760f 100644
--- a/drivers/clk/meson/clk-pll.c
+++ b/drivers/clk/meson/clk-pll.c
@@ -311,6 +311,11 @@ static int meson_clk_pll_init(struct clk_hw *hw)
{
struct clk_regmap *clk = to_clk_regmap(hw);
struct meson_clk_pll_data *pll = meson_clk_pll_data(clk);
+ int ret;
+
+ ret = clk_regmap_init(hw);
+ if (ret)
+ return ret;
/*
* Keep the clock running, which was already initialized and enabled
@@ -468,6 +473,7 @@ static int meson_clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
* the other ops except set_rate since the rate is fixed.
*/
const struct clk_ops meson_clk_pcie_pll_ops = {
+ .init = clk_regmap_init,
.recalc_rate = meson_clk_pll_recalc_rate,
.determine_rate = meson_clk_pll_determine_rate,
.is_enabled = meson_clk_pll_is_enabled,
@@ -488,6 +494,7 @@ const struct clk_ops meson_clk_pll_ops = {
EXPORT_SYMBOL_NS_GPL(meson_clk_pll_ops, "CLK_MESON");
const struct clk_ops meson_clk_pll_ro_ops = {
+ .init = clk_regmap_init,
.recalc_rate = meson_clk_pll_recalc_rate,
.is_enabled = meson_clk_pll_is_enabled,
};
diff --git a/drivers/clk/meson/clk-regmap.c b/drivers/clk/meson/clk-regmap.c
index f3e504f67571..1ed56fe63cae 100644
--- a/drivers/clk/meson/clk-regmap.c
+++ b/drivers/clk/meson/clk-regmap.c
@@ -4,9 +4,52 @@
* Author: Jerome Brunet <jbrunet@baylibre.com>
*/
+#include <linux/device.h>
#include <linux/module.h>
+#include <linux/mfd/syscon.h>
#include "clk-regmap.h"
+int clk_regmap_init(struct clk_hw *hw)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct device_node *np, *parent_np;
+ struct device *dev;
+
+ /* Allow regmap to be preset as it was historically done */
+ if (clk->map)
+ return 0;
+
+ /*
+ * FIXME: what follows couples the controller implementation
+ * and clk_regmap clock type. This situation is not desirable
+ * but temporary, until the controller is able to register
+ * a hook to initialize a clock type
+ */
+
+ /* Check the usual dev enabled controller with an basic IO regmap */
+ dev = clk_hw_get_dev(hw);
+ if (dev) {
+ clk->map = dev_get_regmap(dev, NULL);
+ if (clk->map)
+ return 0;
+ }
+
+ /* Move on to early and syscon based controllers */
+ np = clk_hw_get_of_node(hw);
+ if (np) {
+ parent_np = of_get_parent(np);
+ clk->map = syscon_node_to_regmap(parent_np);
+ of_node_put(parent_np);
+
+ if (!IS_ERR_OR_NULL(clk->map))
+ return 0;
+ }
+
+ /* Bail out if regmap can't be found */
+ return -EINVAL;
+}
+EXPORT_SYMBOL_NS_GPL(clk_regmap_init, "CLK_MESON");
+
static int clk_regmap_gate_endisable(struct clk_hw *hw, int enable)
{
struct clk_regmap *clk = to_clk_regmap(hw);
@@ -45,6 +88,7 @@ static int clk_regmap_gate_is_enabled(struct clk_hw *hw)
}
const struct clk_ops clk_regmap_gate_ops = {
+ .init = clk_regmap_init,
.enable = clk_regmap_gate_enable,
.disable = clk_regmap_gate_disable,
.is_enabled = clk_regmap_gate_is_enabled,
@@ -52,6 +96,7 @@ const struct clk_ops clk_regmap_gate_ops = {
EXPORT_SYMBOL_NS_GPL(clk_regmap_gate_ops, "CLK_MESON");
const struct clk_ops clk_regmap_gate_ro_ops = {
+ .init = clk_regmap_init,
.is_enabled = clk_regmap_gate_is_enabled,
};
EXPORT_SYMBOL_NS_GPL(clk_regmap_gate_ro_ops, "CLK_MESON");
@@ -121,6 +166,7 @@ static int clk_regmap_div_set_rate(struct clk_hw *hw, unsigned long rate,
/* Would prefer clk_regmap_div_ro_ops but clashes with qcom */
const struct clk_ops clk_regmap_divider_ops = {
+ .init = clk_regmap_init,
.recalc_rate = clk_regmap_div_recalc_rate,
.determine_rate = clk_regmap_div_determine_rate,
.set_rate = clk_regmap_div_set_rate,
@@ -128,6 +174,7 @@ const struct clk_ops clk_regmap_divider_ops = {
EXPORT_SYMBOL_NS_GPL(clk_regmap_divider_ops, "CLK_MESON");
const struct clk_ops clk_regmap_divider_ro_ops = {
+ .init = clk_regmap_init,
.recalc_rate = clk_regmap_div_recalc_rate,
.determine_rate = clk_regmap_div_determine_rate,
};
@@ -170,6 +217,7 @@ static int clk_regmap_mux_determine_rate(struct clk_hw *hw,
}
const struct clk_ops clk_regmap_mux_ops = {
+ .init = clk_regmap_init,
.get_parent = clk_regmap_mux_get_parent,
.set_parent = clk_regmap_mux_set_parent,
.determine_rate = clk_regmap_mux_determine_rate,
@@ -177,6 +225,7 @@ const struct clk_ops clk_regmap_mux_ops = {
EXPORT_SYMBOL_NS_GPL(clk_regmap_mux_ops, "CLK_MESON");
const struct clk_ops clk_regmap_mux_ro_ops = {
+ .init = clk_regmap_init,
.get_parent = clk_regmap_mux_get_parent,
};
EXPORT_SYMBOL_NS_GPL(clk_regmap_mux_ro_ops, "CLK_MESON");
diff --git a/drivers/clk/meson/clk-regmap.h b/drivers/clk/meson/clk-regmap.h
index e365312da54e..8e5c39b023e1 100644
--- a/drivers/clk/meson/clk-regmap.h
+++ b/drivers/clk/meson/clk-regmap.h
@@ -7,6 +7,7 @@
#ifndef __CLK_REGMAP_H
#define __CLK_REGMAP_H
+#include <linux/device.h>
#include <linux/clk-provider.h>
#include <linux/regmap.h>
@@ -31,6 +32,9 @@ static inline struct clk_regmap *to_clk_regmap(struct clk_hw *hw)
return container_of(hw, struct clk_regmap, hw);
}
+/* clk_regmap init op to get and cache regmap from the controllers */
+int clk_regmap_init(struct clk_hw *hw);
+
/**
* struct clk_regmap_gate_data - regmap backed gate specific data
*
@@ -114,24 +118,4 @@ clk_get_regmap_mux_data(struct clk_regmap *clk)
extern const struct clk_ops clk_regmap_mux_ops;
extern const struct clk_ops clk_regmap_mux_ro_ops;
-#define __MESON_PCLK(_name, _reg, _bit, _ops, _pname) \
-struct clk_regmap _name = { \
- .data = &(struct clk_regmap_gate_data){ \
- .offset = (_reg), \
- .bit_idx = (_bit), \
- }, \
- .hw.init = &(struct clk_init_data) { \
- .name = #_name, \
- .ops = _ops, \
- .parent_hws = (const struct clk_hw *[]) { _pname }, \
- .num_parents = 1, \
- .flags = (CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED), \
- }, \
-}
-
-#define MESON_PCLK(_name, _reg, _bit, _pname) \
- __MESON_PCLK(_name, _reg, _bit, &clk_regmap_gate_ops, _pname)
-
-#define MESON_PCLK_RO(_name, _reg, _bit, _pname) \
- __MESON_PCLK(_name, _reg, _bit, &clk_regmap_gate_ro_ops, _pname)
#endif /* __CLK_REGMAP_H */
diff --git a/drivers/clk/meson/g12a-aoclk.c b/drivers/clk/meson/g12a-aoclk.c
index 71c758ffa493..96981da271fa 100644
--- a/drivers/clk/meson/g12a-aoclk.c
+++ b/drivers/clk/meson/g12a-aoclk.c
@@ -37,46 +37,38 @@
#define AO_RTC_ALT_CLK_CNTL0 0x94
#define AO_RTC_ALT_CLK_CNTL1 0x98
+static const struct clk_parent_data g12a_ao_pclk_parents = { .fw_name = "mpeg-clk" };
+
+#define G12A_AO_PCLK(_name, _reg, _bit, _flags) \
+ MESON_PCLK(g12a_ao_##_name, _reg, _bit, &g12a_ao_pclk_parents, _flags)
+
/*
- * Like every other peripheral clock gate in Amlogic Clock drivers,
- * we are using CLK_IGNORE_UNUSED here, so we keep the state of the
- * bootloader. The goal is to remove this flag at some point.
- * Actually removing it will require some extensive test to be done safely.
+ * NOTE: The gates below are marked with CLK_IGNORE_UNUSED for historic reasons
+ * Users are encouraged to test without it and submit changes to:
+ * - remove the flag if not necessary
+ * - replace the flag with something more adequate, such as CLK_IS_CRITICAL,
+ * if appropriate.
+ * - add a comment explaining why the use of CLK_IGNORE_UNUSED is desirable
+ * for a particular clock.
*/
-#define AXG_AO_GATE(_name, _reg, _bit) \
-static struct clk_regmap g12a_aoclk_##_name = { \
- .data = &(struct clk_regmap_gate_data) { \
- .offset = (_reg), \
- .bit_idx = (_bit), \
- }, \
- .hw.init = &(struct clk_init_data) { \
- .name = "g12a_ao_" #_name, \
- .ops = &clk_regmap_gate_ops, \
- .parent_data = &(const struct clk_parent_data) { \
- .fw_name = "mpeg-clk", \
- }, \
- .num_parents = 1, \
- .flags = CLK_IGNORE_UNUSED, \
- }, \
-}
+static G12A_AO_PCLK(ahb, AO_CLK_GATE0, 0, CLK_IGNORE_UNUSED);
+static G12A_AO_PCLK(ir_in, AO_CLK_GATE0, 1, CLK_IGNORE_UNUSED);
+static G12A_AO_PCLK(i2c_m0, AO_CLK_GATE0, 2, CLK_IGNORE_UNUSED);
+static G12A_AO_PCLK(i2c_s0, AO_CLK_GATE0, 3, CLK_IGNORE_UNUSED);
+static G12A_AO_PCLK(uart, AO_CLK_GATE0, 4, CLK_IGNORE_UNUSED);
+static G12A_AO_PCLK(prod_i2c, AO_CLK_GATE0, 5, CLK_IGNORE_UNUSED);
+static G12A_AO_PCLK(uart2, AO_CLK_GATE0, 6, CLK_IGNORE_UNUSED);
+static G12A_AO_PCLK(ir_out, AO_CLK_GATE0, 7, CLK_IGNORE_UNUSED);
+static G12A_AO_PCLK(saradc, AO_CLK_GATE0, 8, CLK_IGNORE_UNUSED);
-AXG_AO_GATE(ahb, AO_CLK_GATE0, 0);
-AXG_AO_GATE(ir_in, AO_CLK_GATE0, 1);
-AXG_AO_GATE(i2c_m0, AO_CLK_GATE0, 2);
-AXG_AO_GATE(i2c_s0, AO_CLK_GATE0, 3);
-AXG_AO_GATE(uart, AO_CLK_GATE0, 4);
-AXG_AO_GATE(prod_i2c, AO_CLK_GATE0, 5);
-AXG_AO_GATE(uart2, AO_CLK_GATE0, 6);
-AXG_AO_GATE(ir_out, AO_CLK_GATE0, 7);
-AXG_AO_GATE(saradc, AO_CLK_GATE0, 8);
-AXG_AO_GATE(mailbox, AO_CLK_GATE0_SP, 0);
-AXG_AO_GATE(m3, AO_CLK_GATE0_SP, 1);
-AXG_AO_GATE(ahb_sram, AO_CLK_GATE0_SP, 2);
-AXG_AO_GATE(rti, AO_CLK_GATE0_SP, 3);
-AXG_AO_GATE(m4_fclk, AO_CLK_GATE0_SP, 4);
-AXG_AO_GATE(m4_hclk, AO_CLK_GATE0_SP, 5);
+static G12A_AO_PCLK(mailbox, AO_CLK_GATE0_SP, 0, CLK_IGNORE_UNUSED);
+static G12A_AO_PCLK(m3, AO_CLK_GATE0_SP, 1, CLK_IGNORE_UNUSED);
+static G12A_AO_PCLK(ahb_sram, AO_CLK_GATE0_SP, 2, CLK_IGNORE_UNUSED);
+static G12A_AO_PCLK(rti, AO_CLK_GATE0_SP, 3, CLK_IGNORE_UNUSED);
+static G12A_AO_PCLK(m4_fclk, AO_CLK_GATE0_SP, 4, CLK_IGNORE_UNUSED);
+static G12A_AO_PCLK(m4_hclk, AO_CLK_GATE0_SP, 5, CLK_IGNORE_UNUSED);
-static struct clk_regmap g12a_aoclk_cts_oscin = {
+static struct clk_regmap g12a_ao_cts_oscin = {
.data = &(struct clk_regmap_gate_data){
.offset = AO_RTI_PWR_CNTL_REG0,
.bit_idx = 14,
@@ -103,22 +95,22 @@ static const struct meson_clk_dualdiv_param g12a_32k_div_table[] = {
/* 32k_by_oscin clock */
-static struct clk_regmap g12a_aoclk_32k_by_oscin_pre = {
+static struct clk_regmap g12a_ao_32k_by_oscin_pre = {
.data = &(struct clk_regmap_gate_data){
.offset = AO_RTC_ALT_CLK_CNTL0,
.bit_idx = 31,
},
.hw.init = &(struct clk_init_data){
- .name = "g12a_ao_32k_by_oscin_pre",
+ .name = "ao_32k_by_oscin_pre",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12a_aoclk_cts_oscin.hw
+ &g12a_ao_cts_oscin.hw
},
.num_parents = 1,
},
};
-static struct clk_regmap g12a_aoclk_32k_by_oscin_div = {
+static struct clk_regmap g12a_ao_32k_by_oscin_div = {
.data = &(struct meson_clk_dualdiv_data){
.n1 = {
.reg_off = AO_RTC_ALT_CLK_CNTL0,
@@ -148,16 +140,16 @@ static struct clk_regmap g12a_aoclk_32k_by_oscin_div = {
.table = g12a_32k_div_table,
},
.hw.init = &(struct clk_init_data){
- .name = "g12a_ao_32k_by_oscin_div",
+ .name = "ao_32k_by_oscin_div",
.ops = &meson_clk_dualdiv_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12a_aoclk_32k_by_oscin_pre.hw
+ &g12a_ao_32k_by_oscin_pre.hw
},
.num_parents = 1,
},
};
-static struct clk_regmap g12a_aoclk_32k_by_oscin_sel = {
+static struct clk_regmap g12a_ao_32k_by_oscin_sel = {
.data = &(struct clk_regmap_mux_data) {
.offset = AO_RTC_ALT_CLK_CNTL1,
.mask = 0x1,
@@ -165,27 +157,27 @@ static struct clk_regmap g12a_aoclk_32k_by_oscin_sel = {
.flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
- .name = "g12a_ao_32k_by_oscin_sel",
+ .name = "ao_32k_by_oscin_sel",
.ops = &clk_regmap_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12a_aoclk_32k_by_oscin_div.hw,
- &g12a_aoclk_32k_by_oscin_pre.hw,
+ &g12a_ao_32k_by_oscin_div.hw,
+ &g12a_ao_32k_by_oscin_pre.hw,
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap g12a_aoclk_32k_by_oscin = {
+static struct clk_regmap g12a_ao_32k_by_oscin = {
.data = &(struct clk_regmap_gate_data){
.offset = AO_RTC_ALT_CLK_CNTL0,
.bit_idx = 30,
},
.hw.init = &(struct clk_init_data){
- .name = "g12a_ao_32k_by_oscin",
+ .name = "ao_32k_by_oscin",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12a_aoclk_32k_by_oscin_sel.hw
+ &g12a_ao_32k_by_oscin_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -194,22 +186,22 @@ static struct clk_regmap g12a_aoclk_32k_by_oscin = {
/* cec clock */
-static struct clk_regmap g12a_aoclk_cec_pre = {
+static struct clk_regmap g12a_ao_cec_pre = {
.data = &(struct clk_regmap_gate_data){
.offset = AO_CEC_CLK_CNTL_REG0,
.bit_idx = 31,
},
.hw.init = &(struct clk_init_data){
- .name = "g12a_ao_cec_pre",
+ .name = "ao_cec_pre",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12a_aoclk_cts_oscin.hw
+ &g12a_ao_cts_oscin.hw
},
.num_parents = 1,
},
};
-static struct clk_regmap g12a_aoclk_cec_div = {
+static struct clk_regmap g12a_ao_cec_div = {
.data = &(struct meson_clk_dualdiv_data){
.n1 = {
.reg_off = AO_CEC_CLK_CNTL_REG0,
@@ -239,16 +231,16 @@ static struct clk_regmap g12a_aoclk_cec_div = {
.table = g12a_32k_div_table,
},
.hw.init = &(struct clk_init_data){
- .name = "g12a_ao_cec_div",
+ .name = "ao_cec_div",
.ops = &meson_clk_dualdiv_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12a_aoclk_cec_pre.hw
+ &g12a_ao_cec_pre.hw
},
.num_parents = 1,
},
};
-static struct clk_regmap g12a_aoclk_cec_sel = {
+static struct clk_regmap g12a_ao_cec_sel = {
.data = &(struct clk_regmap_mux_data) {
.offset = AO_CEC_CLK_CNTL_REG1,
.mask = 0x1,
@@ -256,34 +248,34 @@ static struct clk_regmap g12a_aoclk_cec_sel = {
.flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
- .name = "g12a_ao_cec_sel",
+ .name = "ao_cec_sel",
.ops = &clk_regmap_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12a_aoclk_cec_div.hw,
- &g12a_aoclk_cec_pre.hw,
+ &g12a_ao_cec_div.hw,
+ &g12a_ao_cec_pre.hw,
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap g12a_aoclk_cec = {
+static struct clk_regmap g12a_ao_cec = {
.data = &(struct clk_regmap_gate_data){
.offset = AO_CEC_CLK_CNTL_REG0,
.bit_idx = 30,
},
.hw.init = &(struct clk_init_data){
- .name = "g12a_ao_cec",
+ .name = "ao_cec",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12a_aoclk_cec_sel.hw
+ &g12a_ao_cec_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap g12a_aoclk_cts_rtc_oscin = {
+static struct clk_regmap g12a_ao_cts_rtc_oscin = {
.data = &(struct clk_regmap_mux_data) {
.offset = AO_RTI_PWR_CNTL_REG0,
.mask = 0x1,
@@ -291,10 +283,10 @@ static struct clk_regmap g12a_aoclk_cts_rtc_oscin = {
.flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
- .name = "g12a_ao_cts_rtc_oscin",
+ .name = "ao_cts_rtc_oscin",
.ops = &clk_regmap_mux_ops,
.parent_data = (const struct clk_parent_data []) {
- { .hw = &g12a_aoclk_32k_by_oscin.hw },
+ { .hw = &g12a_ao_32k_by_oscin.hw },
{ .fw_name = "ext-32k-0", },
},
.num_parents = 2,
@@ -302,7 +294,7 @@ static struct clk_regmap g12a_aoclk_cts_rtc_oscin = {
},
};
-static struct clk_regmap g12a_aoclk_clk81 = {
+static struct clk_regmap g12a_ao_clk81 = {
.data = &(struct clk_regmap_mux_data) {
.offset = AO_RTI_PWR_CNTL_REG0,
.mask = 0x1,
@@ -310,68 +302,74 @@ static struct clk_regmap g12a_aoclk_clk81 = {
.flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
+ /*
+ * NOTE: this is one of the infamous clock the pwm driver
+ * can request directly by its global name. It's wrong but
+ * there is not much we can do about it until the support
+ * for the old pwm bindings is dropped
+ */
.name = "g12a_ao_clk81",
.ops = &clk_regmap_mux_ro_ops,
.parent_data = (const struct clk_parent_data []) {
{ .fw_name = "mpeg-clk", },
- { .hw = &g12a_aoclk_cts_rtc_oscin.hw },
+ { .hw = &g12a_ao_cts_rtc_oscin.hw },
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap g12a_aoclk_saradc_mux = {
+static struct clk_regmap g12a_ao_saradc_mux = {
.data = &(struct clk_regmap_mux_data) {
.offset = AO_SAR_CLK,
.mask = 0x3,
.shift = 9,
},
.hw.init = &(struct clk_init_data){
- .name = "g12a_ao_saradc_mux",
+ .name = "ao_saradc_mux",
.ops = &clk_regmap_mux_ops,
.parent_data = (const struct clk_parent_data []) {
{ .fw_name = "xtal", },
- { .hw = &g12a_aoclk_clk81.hw },
+ { .hw = &g12a_ao_clk81.hw },
},
.num_parents = 2,
},
};
-static struct clk_regmap g12a_aoclk_saradc_div = {
+static struct clk_regmap g12a_ao_saradc_div = {
.data = &(struct clk_regmap_div_data) {
.offset = AO_SAR_CLK,
.shift = 0,
.width = 8,
},
.hw.init = &(struct clk_init_data){
- .name = "g12a_ao_saradc_div",
+ .name = "ao_saradc_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12a_aoclk_saradc_mux.hw
+ &g12a_ao_saradc_mux.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap g12a_aoclk_saradc_gate = {
+static struct clk_regmap g12a_ao_saradc_gate = {
.data = &(struct clk_regmap_gate_data) {
.offset = AO_SAR_CLK,
.bit_idx = 8,
},
.hw.init = &(struct clk_init_data){
- .name = "g12a_ao_saradc_gate",
+ .name = "ao_saradc_gate",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12a_aoclk_saradc_div.hw
+ &g12a_ao_saradc_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static const unsigned int g12a_aoclk_reset[] = {
+static const unsigned int g12a_ao_reset[] = {
[RESET_AO_IR_IN] = 16,
[RESET_AO_UART] = 17,
[RESET_AO_I2C_M] = 18,
@@ -381,99 +379,67 @@ static const unsigned int g12a_aoclk_reset[] = {
[RESET_AO_IR_OUT] = 23,
};
-static struct clk_regmap *g12a_aoclk_regmap[] = {
- &g12a_aoclk_ahb,
- &g12a_aoclk_ir_in,
- &g12a_aoclk_i2c_m0,
- &g12a_aoclk_i2c_s0,
- &g12a_aoclk_uart,
- &g12a_aoclk_prod_i2c,
- &g12a_aoclk_uart2,
- &g12a_aoclk_ir_out,
- &g12a_aoclk_saradc,
- &g12a_aoclk_mailbox,
- &g12a_aoclk_m3,
- &g12a_aoclk_ahb_sram,
- &g12a_aoclk_rti,
- &g12a_aoclk_m4_fclk,
- &g12a_aoclk_m4_hclk,
- &g12a_aoclk_cts_oscin,
- &g12a_aoclk_32k_by_oscin_pre,
- &g12a_aoclk_32k_by_oscin_div,
- &g12a_aoclk_32k_by_oscin_sel,
- &g12a_aoclk_32k_by_oscin,
- &g12a_aoclk_cec_pre,
- &g12a_aoclk_cec_div,
- &g12a_aoclk_cec_sel,
- &g12a_aoclk_cec,
- &g12a_aoclk_cts_rtc_oscin,
- &g12a_aoclk_clk81,
- &g12a_aoclk_saradc_mux,
- &g12a_aoclk_saradc_div,
- &g12a_aoclk_saradc_gate,
+static struct clk_hw *g12a_ao_hw_clks[] = {
+ [CLKID_AO_AHB] = &g12a_ao_ahb.hw,
+ [CLKID_AO_IR_IN] = &g12a_ao_ir_in.hw,
+ [CLKID_AO_I2C_M0] = &g12a_ao_i2c_m0.hw,
+ [CLKID_AO_I2C_S0] = &g12a_ao_i2c_s0.hw,
+ [CLKID_AO_UART] = &g12a_ao_uart.hw,
+ [CLKID_AO_PROD_I2C] = &g12a_ao_prod_i2c.hw,
+ [CLKID_AO_UART2] = &g12a_ao_uart2.hw,
+ [CLKID_AO_IR_OUT] = &g12a_ao_ir_out.hw,
+ [CLKID_AO_SAR_ADC] = &g12a_ao_saradc.hw,
+ [CLKID_AO_MAILBOX] = &g12a_ao_mailbox.hw,
+ [CLKID_AO_M3] = &g12a_ao_m3.hw,
+ [CLKID_AO_AHB_SRAM] = &g12a_ao_ahb_sram.hw,
+ [CLKID_AO_RTI] = &g12a_ao_rti.hw,
+ [CLKID_AO_M4_FCLK] = &g12a_ao_m4_fclk.hw,
+ [CLKID_AO_M4_HCLK] = &g12a_ao_m4_hclk.hw,
+ [CLKID_AO_CLK81] = &g12a_ao_clk81.hw,
+ [CLKID_AO_SAR_ADC_SEL] = &g12a_ao_saradc_mux.hw,
+ [CLKID_AO_SAR_ADC_DIV] = &g12a_ao_saradc_div.hw,
+ [CLKID_AO_SAR_ADC_CLK] = &g12a_ao_saradc_gate.hw,
+ [CLKID_AO_CTS_OSCIN] = &g12a_ao_cts_oscin.hw,
+ [CLKID_AO_32K_PRE] = &g12a_ao_32k_by_oscin_pre.hw,
+ [CLKID_AO_32K_DIV] = &g12a_ao_32k_by_oscin_div.hw,
+ [CLKID_AO_32K_SEL] = &g12a_ao_32k_by_oscin_sel.hw,
+ [CLKID_AO_32K] = &g12a_ao_32k_by_oscin.hw,
+ [CLKID_AO_CEC_PRE] = &g12a_ao_cec_pre.hw,
+ [CLKID_AO_CEC_DIV] = &g12a_ao_cec_div.hw,
+ [CLKID_AO_CEC_SEL] = &g12a_ao_cec_sel.hw,
+ [CLKID_AO_CEC] = &g12a_ao_cec.hw,
+ [CLKID_AO_CTS_RTC_OSCIN] = &g12a_ao_cts_rtc_oscin.hw,
};
-static struct clk_hw *g12a_aoclk_hw_clks[] = {
- [CLKID_AO_AHB] = &g12a_aoclk_ahb.hw,
- [CLKID_AO_IR_IN] = &g12a_aoclk_ir_in.hw,
- [CLKID_AO_I2C_M0] = &g12a_aoclk_i2c_m0.hw,
- [CLKID_AO_I2C_S0] = &g12a_aoclk_i2c_s0.hw,
- [CLKID_AO_UART] = &g12a_aoclk_uart.hw,
- [CLKID_AO_PROD_I2C] = &g12a_aoclk_prod_i2c.hw,
- [CLKID_AO_UART2] = &g12a_aoclk_uart2.hw,
- [CLKID_AO_IR_OUT] = &g12a_aoclk_ir_out.hw,
- [CLKID_AO_SAR_ADC] = &g12a_aoclk_saradc.hw,
- [CLKID_AO_MAILBOX] = &g12a_aoclk_mailbox.hw,
- [CLKID_AO_M3] = &g12a_aoclk_m3.hw,
- [CLKID_AO_AHB_SRAM] = &g12a_aoclk_ahb_sram.hw,
- [CLKID_AO_RTI] = &g12a_aoclk_rti.hw,
- [CLKID_AO_M4_FCLK] = &g12a_aoclk_m4_fclk.hw,
- [CLKID_AO_M4_HCLK] = &g12a_aoclk_m4_hclk.hw,
- [CLKID_AO_CLK81] = &g12a_aoclk_clk81.hw,
- [CLKID_AO_SAR_ADC_SEL] = &g12a_aoclk_saradc_mux.hw,
- [CLKID_AO_SAR_ADC_DIV] = &g12a_aoclk_saradc_div.hw,
- [CLKID_AO_SAR_ADC_CLK] = &g12a_aoclk_saradc_gate.hw,
- [CLKID_AO_CTS_OSCIN] = &g12a_aoclk_cts_oscin.hw,
- [CLKID_AO_32K_PRE] = &g12a_aoclk_32k_by_oscin_pre.hw,
- [CLKID_AO_32K_DIV] = &g12a_aoclk_32k_by_oscin_div.hw,
- [CLKID_AO_32K_SEL] = &g12a_aoclk_32k_by_oscin_sel.hw,
- [CLKID_AO_32K] = &g12a_aoclk_32k_by_oscin.hw,
- [CLKID_AO_CEC_PRE] = &g12a_aoclk_cec_pre.hw,
- [CLKID_AO_CEC_DIV] = &g12a_aoclk_cec_div.hw,
- [CLKID_AO_CEC_SEL] = &g12a_aoclk_cec_sel.hw,
- [CLKID_AO_CEC] = &g12a_aoclk_cec.hw,
- [CLKID_AO_CTS_RTC_OSCIN] = &g12a_aoclk_cts_rtc_oscin.hw,
-};
-
-static const struct meson_aoclk_data g12a_aoclkc_data = {
+static const struct meson_aoclk_data g12a_ao_clkc_data = {
.reset_reg = AO_RTI_GEN_CNTL_REG0,
- .num_reset = ARRAY_SIZE(g12a_aoclk_reset),
- .reset = g12a_aoclk_reset,
- .num_clks = ARRAY_SIZE(g12a_aoclk_regmap),
- .clks = g12a_aoclk_regmap,
- .hw_clks = {
- .hws = g12a_aoclk_hw_clks,
- .num = ARRAY_SIZE(g12a_aoclk_hw_clks),
+ .num_reset = ARRAY_SIZE(g12a_ao_reset),
+ .reset = g12a_ao_reset,
+ .clkc_data = {
+ .hw_clks = {
+ .hws = g12a_ao_hw_clks,
+ .num = ARRAY_SIZE(g12a_ao_hw_clks),
+ },
},
};
-static const struct of_device_id g12a_aoclkc_match_table[] = {
+static const struct of_device_id g12a_ao_clkc_match_table[] = {
{
.compatible = "amlogic,meson-g12a-aoclkc",
- .data = &g12a_aoclkc_data,
+ .data = &g12a_ao_clkc_data.clkc_data,
},
{ }
};
-MODULE_DEVICE_TABLE(of, g12a_aoclkc_match_table);
+MODULE_DEVICE_TABLE(of, g12a_ao_clkc_match_table);
-static struct platform_driver g12a_aoclkc_driver = {
+static struct platform_driver g12a_ao_clkc_driver = {
.probe = meson_aoclkc_probe,
.driver = {
.name = "g12a-aoclkc",
- .of_match_table = g12a_aoclkc_match_table,
+ .of_match_table = g12a_ao_clkc_match_table,
},
};
-module_platform_driver(g12a_aoclkc_driver);
+module_platform_driver(g12a_ao_clkc_driver);
MODULE_DESCRIPTION("Amlogic G12A Always-ON Clock Controller driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c
index d9e546e006d7..185b6348251d 100644
--- a/drivers/clk/meson/g12a.c
+++ b/drivers/clk/meson/g12a.c
@@ -23,11 +23,120 @@
#include "clk-cpu-dyndiv.h"
#include "vid-pll-div.h"
#include "vclk.h"
-#include "meson-eeclk.h"
-#include "g12a.h"
+#include "meson-clkc-utils.h"
#include <dt-bindings/clock/g12a-clkc.h>
+#define HHI_MIPI_CNTL0 0x000
+#define HHI_MIPI_CNTL1 0x004
+#define HHI_MIPI_CNTL2 0x008
+#define HHI_MIPI_STS 0x00c
+#define HHI_GP0_PLL_CNTL0 0x040
+#define HHI_GP0_PLL_CNTL1 0x044
+#define HHI_GP0_PLL_CNTL2 0x048
+#define HHI_GP0_PLL_CNTL3 0x04c
+#define HHI_GP0_PLL_CNTL4 0x050
+#define HHI_GP0_PLL_CNTL5 0x054
+#define HHI_GP0_PLL_CNTL6 0x058
+#define HHI_GP0_PLL_STS 0x05c
+#define HHI_GP1_PLL_CNTL0 0x060
+#define HHI_GP1_PLL_CNTL1 0x064
+#define HHI_GP1_PLL_CNTL2 0x068
+#define HHI_GP1_PLL_CNTL3 0x06c
+#define HHI_GP1_PLL_CNTL4 0x070
+#define HHI_GP1_PLL_CNTL5 0x074
+#define HHI_GP1_PLL_CNTL6 0x078
+#define HHI_GP1_PLL_STS 0x07c
+#define HHI_PCIE_PLL_CNTL0 0x098
+#define HHI_PCIE_PLL_CNTL1 0x09c
+#define HHI_PCIE_PLL_CNTL2 0x0a0
+#define HHI_PCIE_PLL_CNTL3 0x0a4
+#define HHI_PCIE_PLL_CNTL4 0x0a8
+#define HHI_PCIE_PLL_CNTL5 0x0ac
+#define HHI_PCIE_PLL_STS 0x0b8
+#define HHI_HIFI_PLL_CNTL0 0x0d8
+#define HHI_HIFI_PLL_CNTL1 0x0dc
+#define HHI_HIFI_PLL_CNTL2 0x0e0
+#define HHI_HIFI_PLL_CNTL3 0x0e4
+#define HHI_HIFI_PLL_CNTL4 0x0e8
+#define HHI_HIFI_PLL_CNTL5 0x0ec
+#define HHI_HIFI_PLL_CNTL6 0x0f0
+#define HHI_VIID_CLK_DIV 0x128
+#define HHI_VIID_CLK_CNTL 0x12c
+#define HHI_GCLK_MPEG0 0x140
+#define HHI_GCLK_MPEG1 0x144
+#define HHI_GCLK_MPEG2 0x148
+#define HHI_GCLK_OTHER 0x150
+#define HHI_GCLK_OTHER2 0x154
+#define HHI_SYS_CPU_CLK_CNTL1 0x15c
+#define HHI_VID_CLK_DIV 0x164
+#define HHI_MPEG_CLK_CNTL 0x174
+#define HHI_AUD_CLK_CNTL 0x178
+#define HHI_VID_CLK_CNTL 0x17c
+#define HHI_TS_CLK_CNTL 0x190
+#define HHI_VID_CLK_CNTL2 0x194
+#define HHI_SYS_CPU_CLK_CNTL0 0x19c
+#define HHI_VID_PLL_CLK_DIV 0x1a0
+#define HHI_MALI_CLK_CNTL 0x1b0
+#define HHI_VPU_CLKC_CNTL 0x1b4
+#define HHI_VPU_CLK_CNTL 0x1bc
+#define HHI_ISP_CLK_CNTL 0x1c0
+#define HHI_NNA_CLK_CNTL 0x1c8
+#define HHI_HDMI_CLK_CNTL 0x1cc
+#define HHI_VDEC_CLK_CNTL 0x1e0
+#define HHI_VDEC2_CLK_CNTL 0x1e4
+#define HHI_VDEC3_CLK_CNTL 0x1e8
+#define HHI_VDEC4_CLK_CNTL 0x1ec
+#define HHI_HDCP22_CLK_CNTL 0x1f0
+#define HHI_VAPBCLK_CNTL 0x1f4
+#define HHI_SYS_CPUB_CLK_CNTL1 0x200
+#define HHI_SYS_CPUB_CLK_CNTL 0x208
+#define HHI_VPU_CLKB_CNTL 0x20c
+#define HHI_SYS_CPU_CLK_CNTL2 0x210
+#define HHI_SYS_CPU_CLK_CNTL3 0x214
+#define HHI_SYS_CPU_CLK_CNTL4 0x218
+#define HHI_SYS_CPU_CLK_CNTL5 0x21c
+#define HHI_SYS_CPU_CLK_CNTL6 0x220
+#define HHI_GEN_CLK_CNTL 0x228
+#define HHI_VDIN_MEAS_CLK_CNTL 0x250
+#define HHI_MIPIDSI_PHY_CLK_CNTL 0x254
+#define HHI_NAND_CLK_CNTL 0x25c
+#define HHI_SD_EMMC_CLK_CNTL 0x264
+#define HHI_MPLL_CNTL0 0x278
+#define HHI_MPLL_CNTL1 0x27c
+#define HHI_MPLL_CNTL2 0x280
+#define HHI_MPLL_CNTL3 0x284
+#define HHI_MPLL_CNTL4 0x288
+#define HHI_MPLL_CNTL5 0x28c
+#define HHI_MPLL_CNTL6 0x290
+#define HHI_MPLL_CNTL7 0x294
+#define HHI_MPLL_CNTL8 0x298
+#define HHI_FIX_PLL_CNTL0 0x2a0
+#define HHI_FIX_PLL_CNTL1 0x2a4
+#define HHI_FIX_PLL_CNTL3 0x2ac
+#define HHI_SYS_PLL_CNTL0 0x2f4
+#define HHI_SYS_PLL_CNTL1 0x2f8
+#define HHI_SYS_PLL_CNTL2 0x2fc
+#define HHI_SYS_PLL_CNTL3 0x300
+#define HHI_SYS_PLL_CNTL4 0x304
+#define HHI_SYS_PLL_CNTL5 0x308
+#define HHI_SYS_PLL_CNTL6 0x30c
+#define HHI_HDMI_PLL_CNTL0 0x320
+#define HHI_HDMI_PLL_CNTL1 0x324
+#define HHI_HDMI_PLL_CNTL2 0x328
+#define HHI_HDMI_PLL_CNTL3 0x32c
+#define HHI_HDMI_PLL_CNTL4 0x330
+#define HHI_HDMI_PLL_CNTL5 0x334
+#define HHI_HDMI_PLL_CNTL6 0x338
+#define HHI_SPICC_CLK_CNTL 0x3dc
+#define HHI_SYS1_PLL_CNTL0 0x380
+#define HHI_SYS1_PLL_CNTL1 0x384
+#define HHI_SYS1_PLL_CNTL2 0x388
+#define HHI_SYS1_PLL_CNTL3 0x38c
+#define HHI_SYS1_PLL_CNTL4 0x390
+#define HHI_SYS1_PLL_CNTL5 0x394
+#define HHI_SYS1_PLL_CNTL6 0x398
+
static struct clk_regmap g12a_fixed_pll_dco = {
.data = &(struct meson_clk_pll_data){
.en = {
@@ -277,6 +386,451 @@ static struct clk_fixed_factor g12b_sys1_pll_div16 = {
},
};
+static const struct pll_mult_range g12a_gp0_pll_mult_range = {
+ .min = 125,
+ .max = 255,
+};
+
+/*
+ * Internal gp0 pll emulation configuration parameters
+ */
+static const struct reg_sequence g12a_gp0_pll_init_regs[] = {
+ { .reg = HHI_GP0_PLL_CNTL1, .def = 0x00000000 },
+ { .reg = HHI_GP0_PLL_CNTL2, .def = 0x00000000 },
+ { .reg = HHI_GP0_PLL_CNTL3, .def = 0x48681c00 },
+ { .reg = HHI_GP0_PLL_CNTL4, .def = 0x33771290 },
+ { .reg = HHI_GP0_PLL_CNTL5, .def = 0x39272000 },
+ { .reg = HHI_GP0_PLL_CNTL6, .def = 0x56540000 },
+};
+
+static struct clk_regmap g12a_gp0_pll_dco = {
+ .data = &(struct meson_clk_pll_data){
+ .en = {
+ .reg_off = HHI_GP0_PLL_CNTL0,
+ .shift = 28,
+ .width = 1,
+ },
+ .m = {
+ .reg_off = HHI_GP0_PLL_CNTL0,
+ .shift = 0,
+ .width = 8,
+ },
+ .n = {
+ .reg_off = HHI_GP0_PLL_CNTL0,
+ .shift = 10,
+ .width = 5,
+ },
+ .frac = {
+ .reg_off = HHI_GP0_PLL_CNTL1,
+ .shift = 0,
+ .width = 17,
+ },
+ .l = {
+ .reg_off = HHI_GP0_PLL_CNTL0,
+ .shift = 31,
+ .width = 1,
+ },
+ .rst = {
+ .reg_off = HHI_GP0_PLL_CNTL0,
+ .shift = 29,
+ .width = 1,
+ },
+ .range = &g12a_gp0_pll_mult_range,
+ .init_regs = g12a_gp0_pll_init_regs,
+ .init_count = ARRAY_SIZE(g12a_gp0_pll_init_regs),
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "gp0_pll_dco",
+ .ops = &meson_clk_pll_ops,
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xtal",
+ },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_gp0_pll = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_GP0_PLL_CNTL0,
+ .shift = 16,
+ .width = 3,
+ .flags = (CLK_DIVIDER_POWER_OF_TWO |
+ CLK_DIVIDER_ROUND_CLOSEST),
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "gp0_pll",
+ .ops = &clk_regmap_divider_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_gp0_pll_dco.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap sm1_gp1_pll_dco = {
+ .data = &(struct meson_clk_pll_data){
+ .en = {
+ .reg_off = HHI_GP1_PLL_CNTL0,
+ .shift = 28,
+ .width = 1,
+ },
+ .m = {
+ .reg_off = HHI_GP1_PLL_CNTL0,
+ .shift = 0,
+ .width = 8,
+ },
+ .n = {
+ .reg_off = HHI_GP1_PLL_CNTL0,
+ .shift = 10,
+ .width = 5,
+ },
+ .frac = {
+ .reg_off = HHI_GP1_PLL_CNTL1,
+ .shift = 0,
+ .width = 17,
+ },
+ .l = {
+ .reg_off = HHI_GP1_PLL_CNTL0,
+ .shift = 31,
+ .width = 1,
+ },
+ .rst = {
+ .reg_off = HHI_GP1_PLL_CNTL0,
+ .shift = 29,
+ .width = 1,
+ },
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "gp1_pll_dco",
+ .ops = &meson_clk_pll_ro_ops,
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xtal",
+ },
+ .num_parents = 1,
+ /* This clock feeds the DSU, avoid disabling it */
+ .flags = CLK_IS_CRITICAL,
+ },
+};
+
+static struct clk_regmap sm1_gp1_pll = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_GP1_PLL_CNTL0,
+ .shift = 16,
+ .width = 3,
+ .flags = (CLK_DIVIDER_POWER_OF_TWO |
+ CLK_DIVIDER_ROUND_CLOSEST),
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "gp1_pll",
+ .ops = &clk_regmap_divider_ro_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &sm1_gp1_pll_dco.hw
+ },
+ .num_parents = 1,
+ },
+};
+
+/*
+ * Internal hifi pll emulation configuration parameters
+ */
+static const struct reg_sequence g12a_hifi_pll_init_regs[] = {
+ { .reg = HHI_HIFI_PLL_CNTL1, .def = 0x00000000 },
+ { .reg = HHI_HIFI_PLL_CNTL2, .def = 0x00000000 },
+ { .reg = HHI_HIFI_PLL_CNTL3, .def = 0x6a285c00 },
+ { .reg = HHI_HIFI_PLL_CNTL4, .def = 0x65771290 },
+ { .reg = HHI_HIFI_PLL_CNTL5, .def = 0x39272000 },
+ { .reg = HHI_HIFI_PLL_CNTL6, .def = 0x56540000 },
+};
+
+static struct clk_regmap g12a_hifi_pll_dco = {
+ .data = &(struct meson_clk_pll_data){
+ .en = {
+ .reg_off = HHI_HIFI_PLL_CNTL0,
+ .shift = 28,
+ .width = 1,
+ },
+ .m = {
+ .reg_off = HHI_HIFI_PLL_CNTL0,
+ .shift = 0,
+ .width = 8,
+ },
+ .n = {
+ .reg_off = HHI_HIFI_PLL_CNTL0,
+ .shift = 10,
+ .width = 5,
+ },
+ .frac = {
+ .reg_off = HHI_HIFI_PLL_CNTL1,
+ .shift = 0,
+ .width = 17,
+ },
+ .l = {
+ .reg_off = HHI_HIFI_PLL_CNTL0,
+ .shift = 31,
+ .width = 1,
+ },
+ .rst = {
+ .reg_off = HHI_HIFI_PLL_CNTL0,
+ .shift = 29,
+ .width = 1,
+ },
+ .range = &g12a_gp0_pll_mult_range,
+ .init_regs = g12a_hifi_pll_init_regs,
+ .init_count = ARRAY_SIZE(g12a_hifi_pll_init_regs),
+ .flags = CLK_MESON_PLL_ROUND_CLOSEST,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "hifi_pll_dco",
+ .ops = &meson_clk_pll_ops,
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xtal",
+ },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_hifi_pll = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_HIFI_PLL_CNTL0,
+ .shift = 16,
+ .width = 2,
+ .flags = (CLK_DIVIDER_POWER_OF_TWO |
+ CLK_DIVIDER_ROUND_CLOSEST),
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "hifi_pll",
+ .ops = &clk_regmap_divider_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_hifi_pll_dco.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+/*
+ * The Meson G12A PCIE PLL is fined tuned to deliver a very precise
+ * 100MHz reference clock for the PCIe Analog PHY, and thus requires
+ * a strict register sequence to enable the PLL.
+ */
+static const struct reg_sequence g12a_pcie_pll_init_regs[] = {
+ { .reg = HHI_PCIE_PLL_CNTL0, .def = 0x20090496 },
+ { .reg = HHI_PCIE_PLL_CNTL0, .def = 0x30090496 },
+ { .reg = HHI_PCIE_PLL_CNTL1, .def = 0x00000000 },
+ { .reg = HHI_PCIE_PLL_CNTL2, .def = 0x00001100 },
+ { .reg = HHI_PCIE_PLL_CNTL3, .def = 0x10058e00 },
+ { .reg = HHI_PCIE_PLL_CNTL4, .def = 0x000100c0 },
+ { .reg = HHI_PCIE_PLL_CNTL5, .def = 0x68000048 },
+ { .reg = HHI_PCIE_PLL_CNTL5, .def = 0x68000068, .delay_us = 20 },
+ { .reg = HHI_PCIE_PLL_CNTL4, .def = 0x008100c0, .delay_us = 10 },
+ { .reg = HHI_PCIE_PLL_CNTL0, .def = 0x34090496 },
+ { .reg = HHI_PCIE_PLL_CNTL0, .def = 0x14090496, .delay_us = 10 },
+ { .reg = HHI_PCIE_PLL_CNTL2, .def = 0x00001000 },
+};
+
+/* Keep a single entry table for recalc/round_rate() ops */
+static const struct pll_params_table g12a_pcie_pll_table[] = {
+ PLL_PARAMS(150, 1),
+ {0, 0},
+};
+
+static struct clk_regmap g12a_pcie_pll_dco = {
+ .data = &(struct meson_clk_pll_data){
+ .en = {
+ .reg_off = HHI_PCIE_PLL_CNTL0,
+ .shift = 28,
+ .width = 1,
+ },
+ .m = {
+ .reg_off = HHI_PCIE_PLL_CNTL0,
+ .shift = 0,
+ .width = 8,
+ },
+ .n = {
+ .reg_off = HHI_PCIE_PLL_CNTL0,
+ .shift = 10,
+ .width = 5,
+ },
+ .frac = {
+ .reg_off = HHI_PCIE_PLL_CNTL1,
+ .shift = 0,
+ .width = 12,
+ },
+ .l = {
+ .reg_off = HHI_PCIE_PLL_CNTL0,
+ .shift = 31,
+ .width = 1,
+ },
+ .rst = {
+ .reg_off = HHI_PCIE_PLL_CNTL0,
+ .shift = 29,
+ .width = 1,
+ },
+ .table = g12a_pcie_pll_table,
+ .init_regs = g12a_pcie_pll_init_regs,
+ .init_count = ARRAY_SIZE(g12a_pcie_pll_init_regs),
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "pcie_pll_dco",
+ .ops = &meson_clk_pcie_pll_ops,
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xtal",
+ },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor g12a_pcie_pll_dco_div2 = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "pcie_pll_dco_div2",
+ .ops = &clk_fixed_factor_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_pcie_pll_dco.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_pcie_pll_od = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_PCIE_PLL_CNTL0,
+ .shift = 16,
+ .width = 5,
+ .flags = CLK_DIVIDER_ROUND_CLOSEST |
+ CLK_DIVIDER_ONE_BASED |
+ CLK_DIVIDER_ALLOW_ZERO,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "pcie_pll_od",
+ .ops = &clk_regmap_divider_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_pcie_pll_dco_div2.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_fixed_factor g12a_pcie_pll = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "pcie_pll_pll",
+ .ops = &clk_fixed_factor_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_pcie_pll_od.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_hdmi_pll_dco = {
+ .data = &(struct meson_clk_pll_data){
+ .en = {
+ .reg_off = HHI_HDMI_PLL_CNTL0,
+ .shift = 28,
+ .width = 1,
+ },
+ .m = {
+ .reg_off = HHI_HDMI_PLL_CNTL0,
+ .shift = 0,
+ .width = 8,
+ },
+ .n = {
+ .reg_off = HHI_HDMI_PLL_CNTL0,
+ .shift = 10,
+ .width = 5,
+ },
+ .frac = {
+ .reg_off = HHI_HDMI_PLL_CNTL1,
+ .shift = 0,
+ .width = 16,
+ },
+ .l = {
+ .reg_off = HHI_HDMI_PLL_CNTL0,
+ .shift = 30,
+ .width = 1,
+ },
+ .rst = {
+ .reg_off = HHI_HDMI_PLL_CNTL0,
+ .shift = 29,
+ .width = 1,
+ },
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "hdmi_pll_dco",
+ .ops = &meson_clk_pll_ro_ops,
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xtal",
+ },
+ .num_parents = 1,
+ /*
+ * Display directly handle hdmi pll registers ATM, we need
+ * NOCACHE to keep our view of the clock as accurate as possible
+ */
+ .flags = CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct clk_regmap g12a_hdmi_pll_od = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_HDMI_PLL_CNTL0,
+ .shift = 16,
+ .width = 2,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "hdmi_pll_od",
+ .ops = &clk_regmap_divider_ro_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_hdmi_pll_dco.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_hdmi_pll_od2 = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_HDMI_PLL_CNTL0,
+ .shift = 18,
+ .width = 2,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "hdmi_pll_od2",
+ .ops = &clk_regmap_divider_ro_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_hdmi_pll_od.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_hdmi_pll = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_HDMI_PLL_CNTL0,
+ .shift = 20,
+ .width = 2,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "hdmi_pll",
+ .ops = &clk_regmap_divider_ro_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_hdmi_pll_od2.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT,
+ },
+};
+
static struct clk_fixed_factor g12a_fclk_div2_div = {
.mult = 1,
.div = 2,
@@ -350,36 +904,166 @@ static struct clk_regmap g12a_fclk_div3 = {
},
};
-/* Datasheet names this field as "premux0" */
-static struct clk_regmap g12a_cpu_clk_premux0 = {
+
+static struct clk_fixed_factor g12a_fclk_div4_div = {
+ .mult = 1,
+ .div = 4,
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div4_div",
+ .ops = &clk_fixed_factor_ops,
+ .parent_hws = (const struct clk_hw *[]) { &g12a_fixed_pll.hw },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_fclk_div4 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_FIX_PLL_CNTL1,
+ .bit_idx = 21,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div4",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_fclk_div4_div.hw
+ },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor g12a_fclk_div5_div = {
+ .mult = 1,
+ .div = 5,
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div5_div",
+ .ops = &clk_fixed_factor_ops,
+ .parent_hws = (const struct clk_hw *[]) { &g12a_fixed_pll.hw },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_fclk_div5 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_FIX_PLL_CNTL1,
+ .bit_idx = 22,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div5",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_fclk_div5_div.hw
+ },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor g12a_fclk_div7_div = {
+ .mult = 1,
+ .div = 7,
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div7_div",
+ .ops = &clk_fixed_factor_ops,
+ .parent_hws = (const struct clk_hw *[]) { &g12a_fixed_pll.hw },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_fclk_div7 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_FIX_PLL_CNTL1,
+ .bit_idx = 23,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div7",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_fclk_div7_div.hw
+ },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor g12a_fclk_div2p5_div = {
+ .mult = 1,
+ .div = 5,
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div2p5_div",
+ .ops = &clk_fixed_factor_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_fixed_pll_dco.hw
+ },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_fclk_div2p5 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_FIX_PLL_CNTL1,
+ .bit_idx = 25,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div2p5",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_fclk_div2p5_div.hw
+ },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor g12a_mpll_50m_div = {
+ .mult = 1,
+ .div = 80,
+ .hw.init = &(struct clk_init_data){
+ .name = "mpll_50m_div",
+ .ops = &clk_fixed_factor_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_fixed_pll_dco.hw
+ },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_mpll_50m = {
.data = &(struct clk_regmap_mux_data){
- .offset = HHI_SYS_CPU_CLK_CNTL0,
- .mask = 0x3,
- .shift = 0,
- .flags = CLK_MUX_ROUND_CLOSEST,
+ .offset = HHI_FIX_PLL_CNTL3,
+ .mask = 0x1,
+ .shift = 5,
},
.hw.init = &(struct clk_init_data){
- .name = "cpu_clk_dyn0_sel",
- .ops = &clk_regmap_mux_ops,
+ .name = "mpll_50m",
+ .ops = &clk_regmap_mux_ro_ops,
.parent_data = (const struct clk_parent_data []) {
{ .fw_name = "xtal", },
- { .hw = &g12a_fclk_div2.hw },
- { .hw = &g12a_fclk_div3.hw },
+ { .hw = &g12a_mpll_50m_div.hw },
},
- .num_parents = 3,
- .flags = CLK_SET_RATE_PARENT,
+ .num_parents = 2,
},
};
-/* Datasheet names this field as "premux1" */
-static struct clk_regmap g12a_cpu_clk_premux1 = {
+static struct clk_fixed_factor g12a_mpll_prediv = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "mpll_prediv",
+ .ops = &clk_fixed_factor_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_fixed_pll_dco.hw
+ },
+ .num_parents = 1,
+ },
+};
+
+/* Datasheet names this field as "premux0" */
+static struct clk_regmap g12a_cpu_clk_dyn0_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_SYS_CPU_CLK_CNTL0,
.mask = 0x3,
- .shift = 16,
+ .shift = 0,
+ .flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
- .name = "cpu_clk_dyn1_sel",
+ .name = "cpu_clk_dyn0_sel",
.ops = &clk_regmap_mux_ops,
.parent_data = (const struct clk_parent_data []) {
{ .fw_name = "xtal", },
@@ -387,13 +1071,12 @@ static struct clk_regmap g12a_cpu_clk_premux1 = {
{ .hw = &g12a_fclk_div3.hw },
},
.num_parents = 3,
- /* This sub-tree is used a parking clock */
- .flags = CLK_SET_RATE_NO_REPARENT
+ .flags = CLK_SET_RATE_PARENT,
},
};
/* Datasheet names this field as "mux0_divn_tcnt" */
-static struct clk_regmap g12a_cpu_clk_mux0_div = {
+static struct clk_regmap g12a_cpu_clk_dyn0_div = {
.data = &(struct meson_clk_cpu_dyndiv_data){
.div = {
.reg_off = HHI_SYS_CPU_CLK_CNTL0,
@@ -410,7 +1093,7 @@ static struct clk_regmap g12a_cpu_clk_mux0_div = {
.name = "cpu_clk_dyn0_div",
.ops = &meson_clk_cpu_dyndiv_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12a_cpu_clk_premux0.hw
+ &g12a_cpu_clk_dyn0_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -418,7 +1101,7 @@ static struct clk_regmap g12a_cpu_clk_mux0_div = {
};
/* Datasheet names this field as "postmux0" */
-static struct clk_regmap g12a_cpu_clk_postmux0 = {
+static struct clk_regmap g12a_cpu_clk_dyn0 = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_SYS_CPU_CLK_CNTL0,
.mask = 0x1,
@@ -429,16 +1112,37 @@ static struct clk_regmap g12a_cpu_clk_postmux0 = {
.name = "cpu_clk_dyn0",
.ops = &clk_regmap_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12a_cpu_clk_premux0.hw,
- &g12a_cpu_clk_mux0_div.hw,
+ &g12a_cpu_clk_dyn0_sel.hw,
+ &g12a_cpu_clk_dyn0_div.hw,
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
+/* Datasheet names this field as "premux1" */
+static struct clk_regmap g12a_cpu_clk_dyn1_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_SYS_CPU_CLK_CNTL0,
+ .mask = 0x3,
+ .shift = 16,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "cpu_clk_dyn1_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_data = (const struct clk_parent_data []) {
+ { .fw_name = "xtal", },
+ { .hw = &g12a_fclk_div2.hw },
+ { .hw = &g12a_fclk_div3.hw },
+ },
+ .num_parents = 3,
+ /* This sub-tree is used a parking clock */
+ .flags = CLK_SET_RATE_NO_REPARENT
+ },
+};
+
/* Datasheet names this field as "Mux1_divn_tcnt" */
-static struct clk_regmap g12a_cpu_clk_mux1_div = {
+static struct clk_regmap g12a_cpu_clk_dyn1_div = {
.data = &(struct clk_regmap_div_data){
.offset = HHI_SYS_CPU_CLK_CNTL0,
.shift = 20,
@@ -448,14 +1152,14 @@ static struct clk_regmap g12a_cpu_clk_mux1_div = {
.name = "cpu_clk_dyn1_div",
.ops = &clk_regmap_divider_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12a_cpu_clk_premux1.hw
+ &g12a_cpu_clk_dyn1_sel.hw
},
.num_parents = 1,
},
};
/* Datasheet names this field as "postmux1" */
-static struct clk_regmap g12a_cpu_clk_postmux1 = {
+static struct clk_regmap g12a_cpu_clk_dyn1 = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_SYS_CPU_CLK_CNTL0,
.mask = 0x1,
@@ -465,8 +1169,8 @@ static struct clk_regmap g12a_cpu_clk_postmux1 = {
.name = "cpu_clk_dyn1",
.ops = &clk_regmap_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12a_cpu_clk_premux1.hw,
- &g12a_cpu_clk_mux1_div.hw,
+ &g12a_cpu_clk_dyn1_sel.hw,
+ &g12a_cpu_clk_dyn1_div.hw,
},
.num_parents = 2,
/* This sub-tree is used a parking clock */
@@ -486,8 +1190,8 @@ static struct clk_regmap g12a_cpu_clk_dyn = {
.name = "cpu_clk_dyn",
.ops = &clk_regmap_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12a_cpu_clk_postmux0.hw,
- &g12a_cpu_clk_postmux1.hw,
+ &g12a_cpu_clk_dyn0.hw,
+ &g12a_cpu_clk_dyn1.hw,
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
@@ -535,7 +1239,7 @@ static struct clk_regmap g12b_cpu_clk = {
};
/* Datasheet names this field as "premux0" */
-static struct clk_regmap g12b_cpub_clk_premux0 = {
+static struct clk_regmap g12b_cpub_clk_dyn0_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_SYS_CPUB_CLK_CNTL,
.mask = 0x3,
@@ -556,7 +1260,7 @@ static struct clk_regmap g12b_cpub_clk_premux0 = {
};
/* Datasheet names this field as "mux0_divn_tcnt" */
-static struct clk_regmap g12b_cpub_clk_mux0_div = {
+static struct clk_regmap g12b_cpub_clk_dyn0_div = {
.data = &(struct meson_clk_cpu_dyndiv_data){
.div = {
.reg_off = HHI_SYS_CPUB_CLK_CNTL,
@@ -573,7 +1277,7 @@ static struct clk_regmap g12b_cpub_clk_mux0_div = {
.name = "cpub_clk_dyn0_div",
.ops = &meson_clk_cpu_dyndiv_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12b_cpub_clk_premux0.hw
+ &g12b_cpub_clk_dyn0_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -581,7 +1285,7 @@ static struct clk_regmap g12b_cpub_clk_mux0_div = {
};
/* Datasheet names this field as "postmux0" */
-static struct clk_regmap g12b_cpub_clk_postmux0 = {
+static struct clk_regmap g12b_cpub_clk_dyn0 = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_SYS_CPUB_CLK_CNTL,
.mask = 0x1,
@@ -592,8 +1296,8 @@ static struct clk_regmap g12b_cpub_clk_postmux0 = {
.name = "cpub_clk_dyn0",
.ops = &clk_regmap_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12b_cpub_clk_premux0.hw,
- &g12b_cpub_clk_mux0_div.hw
+ &g12b_cpub_clk_dyn0_sel.hw,
+ &g12b_cpub_clk_dyn0_div.hw
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
@@ -601,7 +1305,7 @@ static struct clk_regmap g12b_cpub_clk_postmux0 = {
};
/* Datasheet names this field as "premux1" */
-static struct clk_regmap g12b_cpub_clk_premux1 = {
+static struct clk_regmap g12b_cpub_clk_dyn1_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_SYS_CPUB_CLK_CNTL,
.mask = 0x3,
@@ -622,7 +1326,7 @@ static struct clk_regmap g12b_cpub_clk_premux1 = {
};
/* Datasheet names this field as "Mux1_divn_tcnt" */
-static struct clk_regmap g12b_cpub_clk_mux1_div = {
+static struct clk_regmap g12b_cpub_clk_dyn1_div = {
.data = &(struct clk_regmap_div_data){
.offset = HHI_SYS_CPUB_CLK_CNTL,
.shift = 20,
@@ -632,14 +1336,14 @@ static struct clk_regmap g12b_cpub_clk_mux1_div = {
.name = "cpub_clk_dyn1_div",
.ops = &clk_regmap_divider_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12b_cpub_clk_premux1.hw
+ &g12b_cpub_clk_dyn1_sel.hw
},
.num_parents = 1,
},
};
/* Datasheet names this field as "postmux1" */
-static struct clk_regmap g12b_cpub_clk_postmux1 = {
+static struct clk_regmap g12b_cpub_clk_dyn1 = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_SYS_CPUB_CLK_CNTL,
.mask = 0x1,
@@ -649,8 +1353,8 @@ static struct clk_regmap g12b_cpub_clk_postmux1 = {
.name = "cpub_clk_dyn1",
.ops = &clk_regmap_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12b_cpub_clk_premux1.hw,
- &g12b_cpub_clk_mux1_div.hw
+ &g12b_cpub_clk_dyn1_sel.hw,
+ &g12b_cpub_clk_dyn1_div.hw
},
.num_parents = 2,
/* This sub-tree is used a parking clock */
@@ -670,8 +1374,8 @@ static struct clk_regmap g12b_cpub_clk_dyn = {
.name = "cpub_clk_dyn",
.ops = &clk_regmap_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12b_cpub_clk_postmux0.hw,
- &g12b_cpub_clk_postmux1.hw
+ &g12b_cpub_clk_dyn0.hw,
+ &g12b_cpub_clk_dyn1.hw
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
@@ -698,10 +1402,8 @@ static struct clk_regmap g12b_cpub_clk = {
},
};
-static struct clk_regmap sm1_gp1_pll;
-
/* Datasheet names this field as "premux0" */
-static struct clk_regmap sm1_dsu_clk_premux0 = {
+static struct clk_regmap sm1_dsu_clk_dyn0_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_SYS_CPU_CLK_CNTL5,
.mask = 0x3,
@@ -720,28 +1422,8 @@ static struct clk_regmap sm1_dsu_clk_premux0 = {
},
};
-/* Datasheet names this field as "premux1" */
-static struct clk_regmap sm1_dsu_clk_premux1 = {
- .data = &(struct clk_regmap_mux_data){
- .offset = HHI_SYS_CPU_CLK_CNTL5,
- .mask = 0x3,
- .shift = 16,
- },
- .hw.init = &(struct clk_init_data){
- .name = "dsu_clk_dyn1_sel",
- .ops = &clk_regmap_mux_ro_ops,
- .parent_data = (const struct clk_parent_data []) {
- { .fw_name = "xtal", },
- { .hw = &g12a_fclk_div2.hw },
- { .hw = &g12a_fclk_div3.hw },
- { .hw = &sm1_gp1_pll.hw },
- },
- .num_parents = 4,
- },
-};
-
/* Datasheet names this field as "Mux0_divn_tcnt" */
-static struct clk_regmap sm1_dsu_clk_mux0_div = {
+static struct clk_regmap sm1_dsu_clk_dyn0_div = {
.data = &(struct clk_regmap_div_data){
.offset = HHI_SYS_CPU_CLK_CNTL5,
.shift = 4,
@@ -751,14 +1433,14 @@ static struct clk_regmap sm1_dsu_clk_mux0_div = {
.name = "dsu_clk_dyn0_div",
.ops = &clk_regmap_divider_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &sm1_dsu_clk_premux0.hw
+ &sm1_dsu_clk_dyn0_sel.hw
},
.num_parents = 1,
},
};
/* Datasheet names this field as "postmux0" */
-static struct clk_regmap sm1_dsu_clk_postmux0 = {
+static struct clk_regmap sm1_dsu_clk_dyn0 = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_SYS_CPU_CLK_CNTL5,
.mask = 0x1,
@@ -768,15 +1450,35 @@ static struct clk_regmap sm1_dsu_clk_postmux0 = {
.name = "dsu_clk_dyn0",
.ops = &clk_regmap_mux_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &sm1_dsu_clk_premux0.hw,
- &sm1_dsu_clk_mux0_div.hw,
+ &sm1_dsu_clk_dyn0_sel.hw,
+ &sm1_dsu_clk_dyn0_div.hw,
},
.num_parents = 2,
},
};
+/* Datasheet names this field as "premux1" */
+static struct clk_regmap sm1_dsu_clk_dyn1_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_SYS_CPU_CLK_CNTL5,
+ .mask = 0x3,
+ .shift = 16,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "dsu_clk_dyn1_sel",
+ .ops = &clk_regmap_mux_ro_ops,
+ .parent_data = (const struct clk_parent_data []) {
+ { .fw_name = "xtal", },
+ { .hw = &g12a_fclk_div2.hw },
+ { .hw = &g12a_fclk_div3.hw },
+ { .hw = &sm1_gp1_pll.hw },
+ },
+ .num_parents = 4,
+ },
+};
+
/* Datasheet names this field as "Mux1_divn_tcnt" */
-static struct clk_regmap sm1_dsu_clk_mux1_div = {
+static struct clk_regmap sm1_dsu_clk_dyn1_div = {
.data = &(struct clk_regmap_div_data){
.offset = HHI_SYS_CPU_CLK_CNTL5,
.shift = 20,
@@ -786,14 +1488,14 @@ static struct clk_regmap sm1_dsu_clk_mux1_div = {
.name = "dsu_clk_dyn1_div",
.ops = &clk_regmap_divider_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &sm1_dsu_clk_premux1.hw
+ &sm1_dsu_clk_dyn1_sel.hw
},
.num_parents = 1,
},
};
/* Datasheet names this field as "postmux1" */
-static struct clk_regmap sm1_dsu_clk_postmux1 = {
+static struct clk_regmap sm1_dsu_clk_dyn1 = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_SYS_CPU_CLK_CNTL5,
.mask = 0x1,
@@ -803,8 +1505,8 @@ static struct clk_regmap sm1_dsu_clk_postmux1 = {
.name = "dsu_clk_dyn1",
.ops = &clk_regmap_mux_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &sm1_dsu_clk_premux1.hw,
- &sm1_dsu_clk_mux1_div.hw,
+ &sm1_dsu_clk_dyn1_sel.hw,
+ &sm1_dsu_clk_dyn1_div.hw,
},
.num_parents = 2,
},
@@ -821,8 +1523,8 @@ static struct clk_regmap sm1_dsu_clk_dyn = {
.name = "dsu_clk_dyn",
.ops = &clk_regmap_mux_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &sm1_dsu_clk_postmux0.hw,
- &sm1_dsu_clk_postmux1.hw,
+ &sm1_dsu_clk_dyn0.hw,
+ &sm1_dsu_clk_dyn1.hw,
},
.num_parents = 2,
},
@@ -934,7 +1636,7 @@ static struct notifier_block g12a_cpu_clk_mux_nb = {
.notifier_call = g12a_cpu_clk_mux_notifier_cb,
};
-struct g12a_cpu_clk_postmux_nb_data {
+struct g12a_cpu_clk_dyn_nb_data {
struct notifier_block nb;
struct clk_hw *xtal;
struct clk_hw *cpu_clk_dyn;
@@ -943,33 +1645,33 @@ struct g12a_cpu_clk_postmux_nb_data {
struct clk_hw *cpu_clk_premux1;
};
-static int g12a_cpu_clk_postmux_notifier_cb(struct notifier_block *nb,
- unsigned long event, void *data)
+static int g12a_cpu_clk_dyn_notifier_cb(struct notifier_block *nb,
+ unsigned long event, void *data)
{
- struct g12a_cpu_clk_postmux_nb_data *nb_data =
- container_of(nb, struct g12a_cpu_clk_postmux_nb_data, nb);
+ struct g12a_cpu_clk_dyn_nb_data *nb_data =
+ container_of(nb, struct g12a_cpu_clk_dyn_nb_data, nb);
switch (event) {
case PRE_RATE_CHANGE:
/*
- * This notifier means cpu_clk_postmux0 clock will be changed
+ * This notifier means cpu_clk_dyn0 clock will be changed
* to feed cpu_clk, this is the current path :
* cpu_clk
* \- cpu_clk_dyn
- * \- cpu_clk_postmux0
- * \- cpu_clk_muxX_div
- * \- cpu_clk_premux0
+ * \- cpu_clk_dyn0
+ * \- cpu_clk_dyn0_div
+ * \- cpu_clk_dyn0_sel
* \- fclk_div3 or fclk_div2
* OR
- * \- cpu_clk_premux0
+ * \- cpu_clk_dyn0_sel
* \- fclk_div3 or fclk_div2
*/
- /* Setup cpu_clk_premux1 to xtal */
+ /* Setup cpu_clk_dyn1_sel to xtal */
clk_hw_set_parent(nb_data->cpu_clk_premux1,
nb_data->xtal);
- /* Setup cpu_clk_postmux1 to bypass divider */
+ /* Setup cpu_clk_dyn1 to bypass divider */
clk_hw_set_parent(nb_data->cpu_clk_postmux1,
nb_data->cpu_clk_premux1);
@@ -981,8 +1683,8 @@ static int g12a_cpu_clk_postmux_notifier_cb(struct notifier_block *nb,
* Now, cpu_clk is 24MHz in the current path :
* cpu_clk
* \- cpu_clk_dyn
- * \- cpu_clk_postmux1
- * \- cpu_clk_premux1
+ * \- cpu_clk_dyn1
+ * \- cpu_clk_dyn1_sel
* \- xtal
*/
@@ -992,8 +1694,8 @@ static int g12a_cpu_clk_postmux_notifier_cb(struct notifier_block *nb,
case POST_RATE_CHANGE:
/*
- * The cpu_clk_postmux0 has ben updated, now switch back
- * cpu_clk_dyn to cpu_clk_postmux0 and take the changes
+ * The cpu_clk_dyn0 has ben updated, now switch back
+ * cpu_clk_dyn to cpu_clk_dyn0 and take the changes
* in account.
*/
@@ -1005,12 +1707,12 @@ static int g12a_cpu_clk_postmux_notifier_cb(struct notifier_block *nb,
* new path :
* cpu_clk
* \- cpu_clk_dyn
- * \- cpu_clk_postmux0
- * \- cpu_clk_muxX_div
- * \- cpu_clk_premux0
+ * \- cpu_clk_dyn0
+ * \- cpu_clk_dyn0_div
+ * \- cpu_clk_dyn0_sel
* \- fclk_div3 or fclk_div2
* OR
- * \- cpu_clk_premux0
+ * \- cpu_clk_dyn0_sel
* \- fclk_div3 or fclk_div2
*/
@@ -1023,20 +1725,20 @@ static int g12a_cpu_clk_postmux_notifier_cb(struct notifier_block *nb,
}
}
-static struct g12a_cpu_clk_postmux_nb_data g12a_cpu_clk_postmux0_nb_data = {
+static struct g12a_cpu_clk_dyn_nb_data g12a_cpu_clk_dyn0_nb_data = {
.cpu_clk_dyn = &g12a_cpu_clk_dyn.hw,
- .cpu_clk_postmux0 = &g12a_cpu_clk_postmux0.hw,
- .cpu_clk_postmux1 = &g12a_cpu_clk_postmux1.hw,
- .cpu_clk_premux1 = &g12a_cpu_clk_premux1.hw,
- .nb.notifier_call = g12a_cpu_clk_postmux_notifier_cb,
+ .cpu_clk_postmux0 = &g12a_cpu_clk_dyn0.hw,
+ .cpu_clk_postmux1 = &g12a_cpu_clk_dyn1.hw,
+ .cpu_clk_premux1 = &g12a_cpu_clk_dyn1_sel.hw,
+ .nb.notifier_call = g12a_cpu_clk_dyn_notifier_cb,
};
-static struct g12a_cpu_clk_postmux_nb_data g12b_cpub_clk_postmux0_nb_data = {
+static struct g12a_cpu_clk_dyn_nb_data g12b_cpub_clk_dyn0_nb_data = {
.cpu_clk_dyn = &g12b_cpub_clk_dyn.hw,
- .cpu_clk_postmux0 = &g12b_cpub_clk_postmux0.hw,
- .cpu_clk_postmux1 = &g12b_cpub_clk_postmux1.hw,
- .cpu_clk_premux1 = &g12b_cpub_clk_premux1.hw,
- .nb.notifier_call = g12a_cpu_clk_postmux_notifier_cb,
+ .cpu_clk_postmux0 = &g12b_cpub_clk_dyn0.hw,
+ .cpu_clk_postmux1 = &g12b_cpub_clk_dyn1.hw,
+ .cpu_clk_premux1 = &g12b_cpub_clk_dyn1_sel.hw,
+ .nb.notifier_call = g12a_cpu_clk_dyn_notifier_cb,
};
struct g12a_sys_pll_nb_data {
@@ -1442,27 +2144,29 @@ static struct clk_fixed_factor g12b_cpub_clk_div8 = {
},
};
-static u32 mux_table_cpub[] = { 1, 2, 3, 4, 5, 6, 7 };
+static u32 g12b_cpub_clk_if_parents_val_table[] = { 1, 2, 3, 4, 5, 6, 7 };
+static const struct clk_hw *g12b_cpub_clk_if_parents[] = {
+ &g12b_cpub_clk_div2.hw,
+ &g12b_cpub_clk_div3.hw,
+ &g12b_cpub_clk_div4.hw,
+ &g12b_cpub_clk_div5.hw,
+ &g12b_cpub_clk_div6.hw,
+ &g12b_cpub_clk_div7.hw,
+ &g12b_cpub_clk_div8.hw,
+};
+
static struct clk_regmap g12b_cpub_clk_apb_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_SYS_CPUB_CLK_CNTL1,
.mask = 7,
.shift = 3,
- .table = mux_table_cpub,
+ .table = g12b_cpub_clk_if_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "cpub_clk_apb_sel",
.ops = &clk_regmap_mux_ro_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12b_cpub_clk_div2.hw,
- &g12b_cpub_clk_div3.hw,
- &g12b_cpub_clk_div4.hw,
- &g12b_cpub_clk_div5.hw,
- &g12b_cpub_clk_div6.hw,
- &g12b_cpub_clk_div7.hw,
- &g12b_cpub_clk_div8.hw
- },
- .num_parents = 7,
+ .parent_hws = g12b_cpub_clk_if_parents,
+ .num_parents = ARRAY_SIZE(g12b_cpub_clk_if_parents),
},
};
@@ -1491,21 +2195,13 @@ static struct clk_regmap g12b_cpub_clk_atb_sel = {
.offset = HHI_SYS_CPUB_CLK_CNTL1,
.mask = 7,
.shift = 6,
- .table = mux_table_cpub,
+ .table = g12b_cpub_clk_if_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "cpub_clk_atb_sel",
.ops = &clk_regmap_mux_ro_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12b_cpub_clk_div2.hw,
- &g12b_cpub_clk_div3.hw,
- &g12b_cpub_clk_div4.hw,
- &g12b_cpub_clk_div5.hw,
- &g12b_cpub_clk_div6.hw,
- &g12b_cpub_clk_div7.hw,
- &g12b_cpub_clk_div8.hw
- },
- .num_parents = 7,
+ .parent_hws = g12b_cpub_clk_if_parents,
+ .num_parents = ARRAY_SIZE(g12b_cpub_clk_if_parents),
},
};
@@ -1534,21 +2230,13 @@ static struct clk_regmap g12b_cpub_clk_axi_sel = {
.offset = HHI_SYS_CPUB_CLK_CNTL1,
.mask = 7,
.shift = 9,
- .table = mux_table_cpub,
+ .table = g12b_cpub_clk_if_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "cpub_clk_axi_sel",
.ops = &clk_regmap_mux_ro_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12b_cpub_clk_div2.hw,
- &g12b_cpub_clk_div3.hw,
- &g12b_cpub_clk_div4.hw,
- &g12b_cpub_clk_div5.hw,
- &g12b_cpub_clk_div6.hw,
- &g12b_cpub_clk_div7.hw,
- &g12b_cpub_clk_div8.hw
- },
- .num_parents = 7,
+ .parent_hws = g12b_cpub_clk_if_parents,
+ .num_parents = ARRAY_SIZE(g12b_cpub_clk_if_parents),
},
};
@@ -1577,21 +2265,13 @@ static struct clk_regmap g12b_cpub_clk_trace_sel = {
.offset = HHI_SYS_CPUB_CLK_CNTL1,
.mask = 7,
.shift = 20,
- .table = mux_table_cpub,
+ .table = g12b_cpub_clk_if_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "cpub_clk_trace_sel",
.ops = &clk_regmap_mux_ro_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12b_cpub_clk_div2.hw,
- &g12b_cpub_clk_div3.hw,
- &g12b_cpub_clk_div4.hw,
- &g12b_cpub_clk_div5.hw,
- &g12b_cpub_clk_div6.hw,
- &g12b_cpub_clk_div7.hw,
- &g12b_cpub_clk_div8.hw
- },
- .num_parents = 7,
+ .parent_hws = g12b_cpub_clk_if_parents,
+ .num_parents = ARRAY_SIZE(g12b_cpub_clk_if_parents),
},
};
@@ -1615,600 +2295,6 @@ static struct clk_regmap g12b_cpub_clk_trace = {
},
};
-static const struct pll_mult_range g12a_gp0_pll_mult_range = {
- .min = 125,
- .max = 255,
-};
-
-/*
- * Internal gp0 pll emulation configuration parameters
- */
-static const struct reg_sequence g12a_gp0_init_regs[] = {
- { .reg = HHI_GP0_PLL_CNTL1, .def = 0x00000000 },
- { .reg = HHI_GP0_PLL_CNTL2, .def = 0x00000000 },
- { .reg = HHI_GP0_PLL_CNTL3, .def = 0x48681c00 },
- { .reg = HHI_GP0_PLL_CNTL4, .def = 0x33771290 },
- { .reg = HHI_GP0_PLL_CNTL5, .def = 0x39272000 },
- { .reg = HHI_GP0_PLL_CNTL6, .def = 0x56540000 },
-};
-
-static struct clk_regmap g12a_gp0_pll_dco = {
- .data = &(struct meson_clk_pll_data){
- .en = {
- .reg_off = HHI_GP0_PLL_CNTL0,
- .shift = 28,
- .width = 1,
- },
- .m = {
- .reg_off = HHI_GP0_PLL_CNTL0,
- .shift = 0,
- .width = 8,
- },
- .n = {
- .reg_off = HHI_GP0_PLL_CNTL0,
- .shift = 10,
- .width = 5,
- },
- .frac = {
- .reg_off = HHI_GP0_PLL_CNTL1,
- .shift = 0,
- .width = 17,
- },
- .l = {
- .reg_off = HHI_GP0_PLL_CNTL0,
- .shift = 31,
- .width = 1,
- },
- .rst = {
- .reg_off = HHI_GP0_PLL_CNTL0,
- .shift = 29,
- .width = 1,
- },
- .range = &g12a_gp0_pll_mult_range,
- .init_regs = g12a_gp0_init_regs,
- .init_count = ARRAY_SIZE(g12a_gp0_init_regs),
- },
- .hw.init = &(struct clk_init_data){
- .name = "gp0_pll_dco",
- .ops = &meson_clk_pll_ops,
- .parent_data = &(const struct clk_parent_data) {
- .fw_name = "xtal",
- },
- .num_parents = 1,
- },
-};
-
-static struct clk_regmap g12a_gp0_pll = {
- .data = &(struct clk_regmap_div_data){
- .offset = HHI_GP0_PLL_CNTL0,
- .shift = 16,
- .width = 3,
- .flags = (CLK_DIVIDER_POWER_OF_TWO |
- CLK_DIVIDER_ROUND_CLOSEST),
- },
- .hw.init = &(struct clk_init_data){
- .name = "gp0_pll",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12a_gp0_pll_dco.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap sm1_gp1_pll_dco = {
- .data = &(struct meson_clk_pll_data){
- .en = {
- .reg_off = HHI_GP1_PLL_CNTL0,
- .shift = 28,
- .width = 1,
- },
- .m = {
- .reg_off = HHI_GP1_PLL_CNTL0,
- .shift = 0,
- .width = 8,
- },
- .n = {
- .reg_off = HHI_GP1_PLL_CNTL0,
- .shift = 10,
- .width = 5,
- },
- .frac = {
- .reg_off = HHI_GP1_PLL_CNTL1,
- .shift = 0,
- .width = 17,
- },
- .l = {
- .reg_off = HHI_GP1_PLL_CNTL0,
- .shift = 31,
- .width = 1,
- },
- .rst = {
- .reg_off = HHI_GP1_PLL_CNTL0,
- .shift = 29,
- .width = 1,
- },
- },
- .hw.init = &(struct clk_init_data){
- .name = "gp1_pll_dco",
- .ops = &meson_clk_pll_ro_ops,
- .parent_data = &(const struct clk_parent_data) {
- .fw_name = "xtal",
- },
- .num_parents = 1,
- /* This clock feeds the DSU, avoid disabling it */
- .flags = CLK_IS_CRITICAL,
- },
-};
-
-static struct clk_regmap sm1_gp1_pll = {
- .data = &(struct clk_regmap_div_data){
- .offset = HHI_GP1_PLL_CNTL0,
- .shift = 16,
- .width = 3,
- .flags = (CLK_DIVIDER_POWER_OF_TWO |
- CLK_DIVIDER_ROUND_CLOSEST),
- },
- .hw.init = &(struct clk_init_data){
- .name = "gp1_pll",
- .ops = &clk_regmap_divider_ro_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &sm1_gp1_pll_dco.hw
- },
- .num_parents = 1,
- },
-};
-
-/*
- * Internal hifi pll emulation configuration parameters
- */
-static const struct reg_sequence g12a_hifi_init_regs[] = {
- { .reg = HHI_HIFI_PLL_CNTL1, .def = 0x00000000 },
- { .reg = HHI_HIFI_PLL_CNTL2, .def = 0x00000000 },
- { .reg = HHI_HIFI_PLL_CNTL3, .def = 0x6a285c00 },
- { .reg = HHI_HIFI_PLL_CNTL4, .def = 0x65771290 },
- { .reg = HHI_HIFI_PLL_CNTL5, .def = 0x39272000 },
- { .reg = HHI_HIFI_PLL_CNTL6, .def = 0x56540000 },
-};
-
-static struct clk_regmap g12a_hifi_pll_dco = {
- .data = &(struct meson_clk_pll_data){
- .en = {
- .reg_off = HHI_HIFI_PLL_CNTL0,
- .shift = 28,
- .width = 1,
- },
- .m = {
- .reg_off = HHI_HIFI_PLL_CNTL0,
- .shift = 0,
- .width = 8,
- },
- .n = {
- .reg_off = HHI_HIFI_PLL_CNTL0,
- .shift = 10,
- .width = 5,
- },
- .frac = {
- .reg_off = HHI_HIFI_PLL_CNTL1,
- .shift = 0,
- .width = 17,
- },
- .l = {
- .reg_off = HHI_HIFI_PLL_CNTL0,
- .shift = 31,
- .width = 1,
- },
- .rst = {
- .reg_off = HHI_HIFI_PLL_CNTL0,
- .shift = 29,
- .width = 1,
- },
- .range = &g12a_gp0_pll_mult_range,
- .init_regs = g12a_hifi_init_regs,
- .init_count = ARRAY_SIZE(g12a_hifi_init_regs),
- .flags = CLK_MESON_PLL_ROUND_CLOSEST,
- },
- .hw.init = &(struct clk_init_data){
- .name = "hifi_pll_dco",
- .ops = &meson_clk_pll_ops,
- .parent_data = &(const struct clk_parent_data) {
- .fw_name = "xtal",
- },
- .num_parents = 1,
- },
-};
-
-static struct clk_regmap g12a_hifi_pll = {
- .data = &(struct clk_regmap_div_data){
- .offset = HHI_HIFI_PLL_CNTL0,
- .shift = 16,
- .width = 2,
- .flags = (CLK_DIVIDER_POWER_OF_TWO |
- CLK_DIVIDER_ROUND_CLOSEST),
- },
- .hw.init = &(struct clk_init_data){
- .name = "hifi_pll",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12a_hifi_pll_dco.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-/*
- * The Meson G12A PCIE PLL is fined tuned to deliver a very precise
- * 100MHz reference clock for the PCIe Analog PHY, and thus requires
- * a strict register sequence to enable the PLL.
- */
-static const struct reg_sequence g12a_pcie_pll_init_regs[] = {
- { .reg = HHI_PCIE_PLL_CNTL0, .def = 0x20090496 },
- { .reg = HHI_PCIE_PLL_CNTL0, .def = 0x30090496 },
- { .reg = HHI_PCIE_PLL_CNTL1, .def = 0x00000000 },
- { .reg = HHI_PCIE_PLL_CNTL2, .def = 0x00001100 },
- { .reg = HHI_PCIE_PLL_CNTL3, .def = 0x10058e00 },
- { .reg = HHI_PCIE_PLL_CNTL4, .def = 0x000100c0 },
- { .reg = HHI_PCIE_PLL_CNTL5, .def = 0x68000048 },
- { .reg = HHI_PCIE_PLL_CNTL5, .def = 0x68000068, .delay_us = 20 },
- { .reg = HHI_PCIE_PLL_CNTL4, .def = 0x008100c0, .delay_us = 10 },
- { .reg = HHI_PCIE_PLL_CNTL0, .def = 0x34090496 },
- { .reg = HHI_PCIE_PLL_CNTL0, .def = 0x14090496, .delay_us = 10 },
- { .reg = HHI_PCIE_PLL_CNTL2, .def = 0x00001000 },
-};
-
-/* Keep a single entry table for recalc/round_rate() ops */
-static const struct pll_params_table g12a_pcie_pll_table[] = {
- PLL_PARAMS(150, 1),
- {0, 0},
-};
-
-static struct clk_regmap g12a_pcie_pll_dco = {
- .data = &(struct meson_clk_pll_data){
- .en = {
- .reg_off = HHI_PCIE_PLL_CNTL0,
- .shift = 28,
- .width = 1,
- },
- .m = {
- .reg_off = HHI_PCIE_PLL_CNTL0,
- .shift = 0,
- .width = 8,
- },
- .n = {
- .reg_off = HHI_PCIE_PLL_CNTL0,
- .shift = 10,
- .width = 5,
- },
- .frac = {
- .reg_off = HHI_PCIE_PLL_CNTL1,
- .shift = 0,
- .width = 12,
- },
- .l = {
- .reg_off = HHI_PCIE_PLL_CNTL0,
- .shift = 31,
- .width = 1,
- },
- .rst = {
- .reg_off = HHI_PCIE_PLL_CNTL0,
- .shift = 29,
- .width = 1,
- },
- .table = g12a_pcie_pll_table,
- .init_regs = g12a_pcie_pll_init_regs,
- .init_count = ARRAY_SIZE(g12a_pcie_pll_init_regs),
- },
- .hw.init = &(struct clk_init_data){
- .name = "pcie_pll_dco",
- .ops = &meson_clk_pcie_pll_ops,
- .parent_data = &(const struct clk_parent_data) {
- .fw_name = "xtal",
- },
- .num_parents = 1,
- },
-};
-
-static struct clk_fixed_factor g12a_pcie_pll_dco_div2 = {
- .mult = 1,
- .div = 2,
- .hw.init = &(struct clk_init_data){
- .name = "pcie_pll_dco_div2",
- .ops = &clk_fixed_factor_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12a_pcie_pll_dco.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap g12a_pcie_pll_od = {
- .data = &(struct clk_regmap_div_data){
- .offset = HHI_PCIE_PLL_CNTL0,
- .shift = 16,
- .width = 5,
- .flags = CLK_DIVIDER_ROUND_CLOSEST |
- CLK_DIVIDER_ONE_BASED |
- CLK_DIVIDER_ALLOW_ZERO,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pcie_pll_od",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12a_pcie_pll_dco_div2.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_fixed_factor g12a_pcie_pll = {
- .mult = 1,
- .div = 2,
- .hw.init = &(struct clk_init_data){
- .name = "pcie_pll_pll",
- .ops = &clk_fixed_factor_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12a_pcie_pll_od.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap g12a_hdmi_pll_dco = {
- .data = &(struct meson_clk_pll_data){
- .en = {
- .reg_off = HHI_HDMI_PLL_CNTL0,
- .shift = 28,
- .width = 1,
- },
- .m = {
- .reg_off = HHI_HDMI_PLL_CNTL0,
- .shift = 0,
- .width = 8,
- },
- .n = {
- .reg_off = HHI_HDMI_PLL_CNTL0,
- .shift = 10,
- .width = 5,
- },
- .frac = {
- .reg_off = HHI_HDMI_PLL_CNTL1,
- .shift = 0,
- .width = 16,
- },
- .l = {
- .reg_off = HHI_HDMI_PLL_CNTL0,
- .shift = 30,
- .width = 1,
- },
- .rst = {
- .reg_off = HHI_HDMI_PLL_CNTL0,
- .shift = 29,
- .width = 1,
- },
- },
- .hw.init = &(struct clk_init_data){
- .name = "hdmi_pll_dco",
- .ops = &meson_clk_pll_ro_ops,
- .parent_data = &(const struct clk_parent_data) {
- .fw_name = "xtal",
- },
- .num_parents = 1,
- /*
- * Display directly handle hdmi pll registers ATM, we need
- * NOCACHE to keep our view of the clock as accurate as possible
- */
- .flags = CLK_GET_RATE_NOCACHE,
- },
-};
-
-static struct clk_regmap g12a_hdmi_pll_od = {
- .data = &(struct clk_regmap_div_data){
- .offset = HHI_HDMI_PLL_CNTL0,
- .shift = 16,
- .width = 2,
- .flags = CLK_DIVIDER_POWER_OF_TWO,
- },
- .hw.init = &(struct clk_init_data){
- .name = "hdmi_pll_od",
- .ops = &clk_regmap_divider_ro_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12a_hdmi_pll_dco.hw
- },
- .num_parents = 1,
- .flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap g12a_hdmi_pll_od2 = {
- .data = &(struct clk_regmap_div_data){
- .offset = HHI_HDMI_PLL_CNTL0,
- .shift = 18,
- .width = 2,
- .flags = CLK_DIVIDER_POWER_OF_TWO,
- },
- .hw.init = &(struct clk_init_data){
- .name = "hdmi_pll_od2",
- .ops = &clk_regmap_divider_ro_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12a_hdmi_pll_od.hw
- },
- .num_parents = 1,
- .flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap g12a_hdmi_pll = {
- .data = &(struct clk_regmap_div_data){
- .offset = HHI_HDMI_PLL_CNTL0,
- .shift = 20,
- .width = 2,
- .flags = CLK_DIVIDER_POWER_OF_TWO,
- },
- .hw.init = &(struct clk_init_data){
- .name = "hdmi_pll",
- .ops = &clk_regmap_divider_ro_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12a_hdmi_pll_od2.hw
- },
- .num_parents = 1,
- .flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_fixed_factor g12a_fclk_div4_div = {
- .mult = 1,
- .div = 4,
- .hw.init = &(struct clk_init_data){
- .name = "fclk_div4_div",
- .ops = &clk_fixed_factor_ops,
- .parent_hws = (const struct clk_hw *[]) { &g12a_fixed_pll.hw },
- .num_parents = 1,
- },
-};
-
-static struct clk_regmap g12a_fclk_div4 = {
- .data = &(struct clk_regmap_gate_data){
- .offset = HHI_FIX_PLL_CNTL1,
- .bit_idx = 21,
- },
- .hw.init = &(struct clk_init_data){
- .name = "fclk_div4",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12a_fclk_div4_div.hw
- },
- .num_parents = 1,
- },
-};
-
-static struct clk_fixed_factor g12a_fclk_div5_div = {
- .mult = 1,
- .div = 5,
- .hw.init = &(struct clk_init_data){
- .name = "fclk_div5_div",
- .ops = &clk_fixed_factor_ops,
- .parent_hws = (const struct clk_hw *[]) { &g12a_fixed_pll.hw },
- .num_parents = 1,
- },
-};
-
-static struct clk_regmap g12a_fclk_div5 = {
- .data = &(struct clk_regmap_gate_data){
- .offset = HHI_FIX_PLL_CNTL1,
- .bit_idx = 22,
- },
- .hw.init = &(struct clk_init_data){
- .name = "fclk_div5",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12a_fclk_div5_div.hw
- },
- .num_parents = 1,
- },
-};
-
-static struct clk_fixed_factor g12a_fclk_div7_div = {
- .mult = 1,
- .div = 7,
- .hw.init = &(struct clk_init_data){
- .name = "fclk_div7_div",
- .ops = &clk_fixed_factor_ops,
- .parent_hws = (const struct clk_hw *[]) { &g12a_fixed_pll.hw },
- .num_parents = 1,
- },
-};
-
-static struct clk_regmap g12a_fclk_div7 = {
- .data = &(struct clk_regmap_gate_data){
- .offset = HHI_FIX_PLL_CNTL1,
- .bit_idx = 23,
- },
- .hw.init = &(struct clk_init_data){
- .name = "fclk_div7",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12a_fclk_div7_div.hw
- },
- .num_parents = 1,
- },
-};
-
-static struct clk_fixed_factor g12a_fclk_div2p5_div = {
- .mult = 1,
- .div = 5,
- .hw.init = &(struct clk_init_data){
- .name = "fclk_div2p5_div",
- .ops = &clk_fixed_factor_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12a_fixed_pll_dco.hw
- },
- .num_parents = 1,
- },
-};
-
-static struct clk_regmap g12a_fclk_div2p5 = {
- .data = &(struct clk_regmap_gate_data){
- .offset = HHI_FIX_PLL_CNTL1,
- .bit_idx = 25,
- },
- .hw.init = &(struct clk_init_data){
- .name = "fclk_div2p5",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12a_fclk_div2p5_div.hw
- },
- .num_parents = 1,
- },
-};
-
-static struct clk_fixed_factor g12a_mpll_50m_div = {
- .mult = 1,
- .div = 80,
- .hw.init = &(struct clk_init_data){
- .name = "mpll_50m_div",
- .ops = &clk_fixed_factor_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12a_fixed_pll_dco.hw
- },
- .num_parents = 1,
- },
-};
-
-static struct clk_regmap g12a_mpll_50m = {
- .data = &(struct clk_regmap_mux_data){
- .offset = HHI_FIX_PLL_CNTL3,
- .mask = 0x1,
- .shift = 5,
- },
- .hw.init = &(struct clk_init_data){
- .name = "mpll_50m",
- .ops = &clk_regmap_mux_ro_ops,
- .parent_data = (const struct clk_parent_data []) {
- { .fw_name = "xtal", },
- { .hw = &g12a_mpll_50m_div.hw },
- },
- .num_parents = 2,
- },
-};
-
-static struct clk_fixed_factor g12a_mpll_prediv = {
- .mult = 1,
- .div = 2,
- .hw.init = &(struct clk_init_data){
- .name = "mpll_prediv",
- .ops = &clk_fixed_factor_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12a_fixed_pll_dco.hw
- },
- .num_parents = 1,
- },
-};
-
static const struct reg_sequence g12a_mpll0_init_regs[] = {
{ .reg = HHI_MPLL_CNTL2, .def = 0x40000033 },
};
@@ -2421,8 +2507,9 @@ static struct clk_regmap g12a_mpll3 = {
},
};
-static u32 mux_table_clk81[] = { 0, 2, 3, 4, 5, 6, 7 };
-static const struct clk_parent_data clk81_parent_data[] = {
+/* clk81 is often referred as "mpeg_clk" */
+static u32 g12a_clk81_parents_val_table[] = { 0, 2, 3, 4, 5, 6, 7 };
+static const struct clk_parent_data g12a_clk81_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &g12a_fclk_div7.hw },
{ .hw = &g12a_mpll1.hw },
@@ -2432,32 +2519,32 @@ static const struct clk_parent_data clk81_parent_data[] = {
{ .hw = &g12a_fclk_div5.hw },
};
-static struct clk_regmap g12a_mpeg_clk_sel = {
+static struct clk_regmap g12a_clk81_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_MPEG_CLK_CNTL,
.mask = 0x7,
.shift = 12,
- .table = mux_table_clk81,
+ .table = g12a_clk81_parents_val_table,
},
.hw.init = &(struct clk_init_data){
- .name = "mpeg_clk_sel",
+ .name = "clk81_sel",
.ops = &clk_regmap_mux_ro_ops,
- .parent_data = clk81_parent_data,
- .num_parents = ARRAY_SIZE(clk81_parent_data),
+ .parent_data = g12a_clk81_parents,
+ .num_parents = ARRAY_SIZE(g12a_clk81_parents),
},
};
-static struct clk_regmap g12a_mpeg_clk_div = {
+static struct clk_regmap g12a_clk81_div = {
.data = &(struct clk_regmap_div_data){
.offset = HHI_MPEG_CLK_CNTL,
.shift = 0,
.width = 7,
},
.hw.init = &(struct clk_init_data){
- .name = "mpeg_clk_div",
+ .name = "clk81_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12a_mpeg_clk_sel.hw
+ &g12a_clk81_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2473,14 +2560,14 @@ static struct clk_regmap g12a_clk81 = {
.name = "clk81",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12a_mpeg_clk_div.hw
+ &g12a_clk81_div.hw
},
.num_parents = 1,
.flags = (CLK_SET_RATE_PARENT | CLK_IS_CRITICAL),
},
};
-static const struct clk_parent_data g12a_sd_emmc_clk0_parent_data[] = {
+static const struct clk_parent_data g12a_sd_emmc_clk0_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &g12a_fclk_div2.hw },
{ .hw = &g12a_fclk_div3.hw },
@@ -2489,7 +2576,7 @@ static const struct clk_parent_data g12a_sd_emmc_clk0_parent_data[] = {
/*
* Following these parent clocks, we should also have had mpll2, mpll3
* and gp0_pll but these clocks are too precious to be used here. All
- * the necessary rates for MMC and NAND operation can be acheived using
+ * the necessary rates for MMC and NAND operation can be achieved using
* g12a_ee_core or fclk_div clocks
*/
};
@@ -2504,8 +2591,8 @@ static struct clk_regmap g12a_sd_emmc_a_clk0_sel = {
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_a_clk0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = g12a_sd_emmc_clk0_parent_data,
- .num_parents = ARRAY_SIZE(g12a_sd_emmc_clk0_parent_data),
+ .parent_data = g12a_sd_emmc_clk0_parents,
+ .num_parents = ARRAY_SIZE(g12a_sd_emmc_clk0_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2553,8 +2640,8 @@ static struct clk_regmap g12a_sd_emmc_b_clk0_sel = {
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_b_clk0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = g12a_sd_emmc_clk0_parent_data,
- .num_parents = ARRAY_SIZE(g12a_sd_emmc_clk0_parent_data),
+ .parent_data = g12a_sd_emmc_clk0_parents,
+ .num_parents = ARRAY_SIZE(g12a_sd_emmc_clk0_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2602,8 +2689,8 @@ static struct clk_regmap g12a_sd_emmc_c_clk0_sel = {
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_c_clk0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = g12a_sd_emmc_clk0_parent_data,
- .num_parents = ARRAY_SIZE(g12a_sd_emmc_clk0_parent_data),
+ .parent_data = g12a_sd_emmc_clk0_parents,
+ .num_parents = ARRAY_SIZE(g12a_sd_emmc_clk0_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2665,7 +2752,7 @@ static struct clk_regmap g12a_vid_pll_div = {
},
};
-static const struct clk_hw *g12a_vid_pll_parent_hws[] = {
+static const struct clk_hw *g12a_vid_pll_parents[] = {
&g12a_vid_pll_div.hw,
&g12a_hdmi_pll.hw,
};
@@ -2683,8 +2770,8 @@ static struct clk_regmap g12a_vid_pll_sel = {
* bit 18 selects from 2 possible parents:
* vid_pll_div or hdmi_pll
*/
- .parent_hws = g12a_vid_pll_parent_hws,
- .num_parents = ARRAY_SIZE(g12a_vid_pll_parent_hws),
+ .parent_hws = g12a_vid_pll_parents,
+ .num_parents = ARRAY_SIZE(g12a_vid_pll_parents),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -2707,7 +2794,7 @@ static struct clk_regmap g12a_vid_pll = {
/* VPU Clock */
-static const struct clk_hw *g12a_vpu_parent_hws[] = {
+static const struct clk_hw *g12a_vpu_parents[] = {
&g12a_fclk_div3.hw,
&g12a_fclk_div4.hw,
&g12a_fclk_div5.hw,
@@ -2727,8 +2814,8 @@ static struct clk_regmap g12a_vpu_0_sel = {
.hw.init = &(struct clk_init_data){
.name = "vpu_0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = g12a_vpu_parent_hws,
- .num_parents = ARRAY_SIZE(g12a_vpu_parent_hws),
+ .parent_hws = g12a_vpu_parents,
+ .num_parents = ARRAY_SIZE(g12a_vpu_parents),
.flags = CLK_SET_RATE_NO_REPARENT,
},
};
@@ -2771,8 +2858,8 @@ static struct clk_regmap g12a_vpu_1_sel = {
.hw.init = &(struct clk_init_data){
.name = "vpu_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = g12a_vpu_parent_hws,
- .num_parents = ARRAY_SIZE(g12a_vpu_parent_hws),
+ .parent_hws = g12a_vpu_parents,
+ .num_parents = ARRAY_SIZE(g12a_vpu_parents),
.flags = CLK_SET_RATE_NO_REPARENT,
},
};
@@ -2830,7 +2917,7 @@ static struct clk_regmap g12a_vpu = {
/* VDEC clocks */
-static const struct clk_hw *g12a_vdec_parent_hws[] = {
+static const struct clk_hw *g12a_vdec_parents[] = {
&g12a_fclk_div2p5.hw,
&g12a_fclk_div3.hw,
&g12a_fclk_div4.hw,
@@ -2850,8 +2937,8 @@ static struct clk_regmap g12a_vdec_1_sel = {
.hw.init = &(struct clk_init_data){
.name = "vdec_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = g12a_vdec_parent_hws,
- .num_parents = ARRAY_SIZE(g12a_vdec_parent_hws),
+ .parent_hws = g12a_vdec_parents,
+ .num_parents = ARRAY_SIZE(g12a_vdec_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2900,8 +2987,8 @@ static struct clk_regmap g12a_vdec_hevcf_sel = {
.hw.init = &(struct clk_init_data){
.name = "vdec_hevcf_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = g12a_vdec_parent_hws,
- .num_parents = ARRAY_SIZE(g12a_vdec_parent_hws),
+ .parent_hws = g12a_vdec_parents,
+ .num_parents = ARRAY_SIZE(g12a_vdec_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2950,8 +3037,8 @@ static struct clk_regmap g12a_vdec_hevc_sel = {
.hw.init = &(struct clk_init_data){
.name = "vdec_hevc_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = g12a_vdec_parent_hws,
- .num_parents = ARRAY_SIZE(g12a_vdec_parent_hws),
+ .parent_hws = g12a_vdec_parents,
+ .num_parents = ARRAY_SIZE(g12a_vdec_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2992,7 +3079,7 @@ static struct clk_regmap g12a_vdec_hevc = {
/* VAPB Clock */
-static const struct clk_hw *g12a_vapb_parent_hws[] = {
+static const struct clk_hw *g12a_vapb_parents[] = {
&g12a_fclk_div4.hw,
&g12a_fclk_div3.hw,
&g12a_fclk_div5.hw,
@@ -3012,8 +3099,8 @@ static struct clk_regmap g12a_vapb_0_sel = {
.hw.init = &(struct clk_init_data){
.name = "vapb_0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = g12a_vapb_parent_hws,
- .num_parents = ARRAY_SIZE(g12a_vapb_parent_hws),
+ .parent_hws = g12a_vapb_parents,
+ .num_parents = ARRAY_SIZE(g12a_vapb_parents),
.flags = CLK_SET_RATE_NO_REPARENT,
},
};
@@ -3060,8 +3147,8 @@ static struct clk_regmap g12a_vapb_1_sel = {
.hw.init = &(struct clk_init_data){
.name = "vapb_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = g12a_vapb_parent_hws,
- .num_parents = ARRAY_SIZE(g12a_vapb_parent_hws),
+ .parent_hws = g12a_vapb_parents,
+ .num_parents = ARRAY_SIZE(g12a_vapb_parents),
.flags = CLK_SET_RATE_NO_REPARENT,
},
};
@@ -3135,7 +3222,7 @@ static struct clk_regmap g12a_vapb = {
},
};
-static const struct clk_hw *g12a_vclk_parent_hws[] = {
+static const struct clk_hw *g12a_vclk_parents[] = {
&g12a_vid_pll.hw,
&g12a_gp0_pll.hw,
&g12a_hifi_pll.hw,
@@ -3155,8 +3242,8 @@ static struct clk_regmap g12a_vclk_sel = {
.hw.init = &(struct clk_init_data){
.name = "vclk_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = g12a_vclk_parent_hws,
- .num_parents = ARRAY_SIZE(g12a_vclk_parent_hws),
+ .parent_hws = g12a_vclk_parents,
+ .num_parents = ARRAY_SIZE(g12a_vclk_parents),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -3170,8 +3257,8 @@ static struct clk_regmap g12a_vclk2_sel = {
.hw.init = &(struct clk_init_data){
.name = "vclk2_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = g12a_vclk_parent_hws,
- .num_parents = ARRAY_SIZE(g12a_vclk_parent_hws),
+ .parent_hws = g12a_vclk_parents,
+ .num_parents = ARRAY_SIZE(g12a_vclk_parents),
.flags = CLK_SET_RATE_NO_REPARENT,
},
};
@@ -3534,8 +3621,8 @@ static struct clk_fixed_factor g12a_vclk2_div12 = {
},
};
-static u32 mux_table_cts_sel[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 };
-static const struct clk_hw *g12a_cts_parent_hws[] = {
+static u32 g12a_cts_parents_val_table[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 };
+static const struct clk_hw *g12a_cts_parents[] = {
&g12a_vclk_div1.hw,
&g12a_vclk_div2.hw,
&g12a_vclk_div4.hw,
@@ -3553,13 +3640,13 @@ static struct clk_regmap g12a_cts_enci_sel = {
.offset = HHI_VID_CLK_DIV,
.mask = 0xf,
.shift = 28,
- .table = mux_table_cts_sel,
+ .table = g12a_cts_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "cts_enci_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = g12a_cts_parent_hws,
- .num_parents = ARRAY_SIZE(g12a_cts_parent_hws),
+ .parent_hws = g12a_cts_parents,
+ .num_parents = ARRAY_SIZE(g12a_cts_parents),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -3569,13 +3656,13 @@ static struct clk_regmap g12a_cts_encp_sel = {
.offset = HHI_VID_CLK_DIV,
.mask = 0xf,
.shift = 20,
- .table = mux_table_cts_sel,
+ .table = g12a_cts_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "cts_encp_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = g12a_cts_parent_hws,
- .num_parents = ARRAY_SIZE(g12a_cts_parent_hws),
+ .parent_hws = g12a_cts_parents,
+ .num_parents = ARRAY_SIZE(g12a_cts_parents),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -3585,13 +3672,13 @@ static struct clk_regmap g12a_cts_encl_sel = {
.offset = HHI_VIID_CLK_DIV,
.mask = 0xf,
.shift = 12,
- .table = mux_table_cts_sel,
+ .table = g12a_cts_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "cts_encl_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = g12a_cts_parent_hws,
- .num_parents = ARRAY_SIZE(g12a_cts_parent_hws),
+ .parent_hws = g12a_cts_parents,
+ .num_parents = ARRAY_SIZE(g12a_cts_parents),
.flags = CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
},
};
@@ -3601,20 +3688,20 @@ static struct clk_regmap g12a_cts_vdac_sel = {
.offset = HHI_VIID_CLK_DIV,
.mask = 0xf,
.shift = 28,
- .table = mux_table_cts_sel,
+ .table = g12a_cts_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "cts_vdac_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = g12a_cts_parent_hws,
- .num_parents = ARRAY_SIZE(g12a_cts_parent_hws),
+ .parent_hws = g12a_cts_parents,
+ .num_parents = ARRAY_SIZE(g12a_cts_parents),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
/* TOFIX: add support for cts_tcon */
-static u32 mux_table_hdmi_tx_sel[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 };
-static const struct clk_hw *g12a_cts_hdmi_tx_parent_hws[] = {
+static u32 g12a_hdmi_tx_parents_val_table[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 };
+static const struct clk_hw *g12a_hdmi_tx_parents[] = {
&g12a_vclk_div1.hw,
&g12a_vclk_div2.hw,
&g12a_vclk_div4.hw,
@@ -3632,13 +3719,13 @@ static struct clk_regmap g12a_hdmi_tx_sel = {
.offset = HHI_HDMI_CLK_CNTL,
.mask = 0xf,
.shift = 16,
- .table = mux_table_hdmi_tx_sel,
+ .table = g12a_hdmi_tx_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "hdmi_tx_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = g12a_cts_hdmi_tx_parent_hws,
- .num_parents = ARRAY_SIZE(g12a_cts_hdmi_tx_parent_hws),
+ .parent_hws = g12a_hdmi_tx_parents,
+ .num_parents = ARRAY_SIZE(g12a_hdmi_tx_parents),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -3725,7 +3812,7 @@ static struct clk_regmap g12a_hdmi_tx = {
/* MIPI DSI Host Clocks */
-static const struct clk_hw *g12a_mipi_dsi_pxclk_parent_hws[] = {
+static const struct clk_hw *g12a_mipi_dsi_pxclk_parents[] = {
&g12a_vid_pll.hw,
&g12a_gp0_pll.hw,
&g12a_hifi_pll.hw,
@@ -3746,15 +3833,15 @@ static struct clk_regmap g12a_mipi_dsi_pxclk_sel = {
.hw.init = &(struct clk_init_data){
.name = "mipi_dsi_pxclk_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = g12a_mipi_dsi_pxclk_parent_hws,
- .num_parents = ARRAY_SIZE(g12a_mipi_dsi_pxclk_parent_hws),
+ .parent_hws = g12a_mipi_dsi_pxclk_parents,
+ .num_parents = ARRAY_SIZE(g12a_mipi_dsi_pxclk_parents),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_SET_RATE_PARENT,
},
};
/*
- * FIXME: Force as bypass by forcing a single /1 table entry, and doensn't on boot value
- * when setting a clock whith this node in the clock path, but doesn't garantee the divider
+ * FIXME: Force as bypass by forcing a single /1 table entry, and doesn't on boot value
+ * when setting a clock with this node in the clock path, but doesn't guarantee the divider
* is at /1 at boot until a rate is set.
*/
static const struct clk_div_table g12a_mipi_dsi_pxclk_div_table[] = {
@@ -3798,7 +3885,7 @@ static struct clk_regmap g12a_mipi_dsi_pxclk = {
/* MIPI ISP Clocks */
-static const struct clk_parent_data g12b_mipi_isp_parent_data[] = {
+static const struct clk_parent_data g12b_mipi_isp_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &g12a_gp0_pll.hw },
{ .hw = &g12a_hifi_pll.hw },
@@ -3818,8 +3905,8 @@ static struct clk_regmap g12b_mipi_isp_sel = {
.hw.init = &(struct clk_init_data){
.name = "mipi_isp_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = g12b_mipi_isp_parent_data,
- .num_parents = ARRAY_SIZE(g12b_mipi_isp_parent_data),
+ .parent_data = g12b_mipi_isp_parents,
+ .num_parents = ARRAY_SIZE(g12b_mipi_isp_parents),
},
};
@@ -3858,7 +3945,7 @@ static struct clk_regmap g12b_mipi_isp = {
/* HDMI Clocks */
-static const struct clk_parent_data g12a_hdmi_parent_data[] = {
+static const struct clk_parent_data g12a_hdmi_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &g12a_fclk_div4.hw },
{ .hw = &g12a_fclk_div3.hw },
@@ -3875,8 +3962,8 @@ static struct clk_regmap g12a_hdmi_sel = {
.hw.init = &(struct clk_init_data){
.name = "hdmi_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = g12a_hdmi_parent_data,
- .num_parents = ARRAY_SIZE(g12a_hdmi_parent_data),
+ .parent_data = g12a_hdmi_parents,
+ .num_parents = ARRAY_SIZE(g12a_hdmi_parents),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -3916,7 +4003,7 @@ static struct clk_regmap g12a_hdmi = {
* mux because it does top-to-bottom updates the each clock tree and
* switches to the "inactive" one when CLK_SET_RATE_GATE is set.
*/
-static const struct clk_parent_data g12a_mali_0_1_parent_data[] = {
+static const struct clk_parent_data g12a_mali_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &g12a_gp0_pll.hw },
{ .hw = &g12a_hifi_pll.hw },
@@ -3936,8 +4023,8 @@ static struct clk_regmap g12a_mali_0_sel = {
.hw.init = &(struct clk_init_data){
.name = "mali_0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = g12a_mali_0_1_parent_data,
- .num_parents = 8,
+ .parent_data = g12a_mali_parents,
+ .num_parents = ARRAY_SIZE(g12a_mali_parents),
/*
* Don't request the parent to change the rate because
* all GPU frequencies can be derived from the fclk_*
@@ -3990,8 +4077,8 @@ static struct clk_regmap g12a_mali_1_sel = {
.hw.init = &(struct clk_init_data){
.name = "mali_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = g12a_mali_0_1_parent_data,
- .num_parents = 8,
+ .parent_data = g12a_mali_parents,
+ .num_parents = ARRAY_SIZE(g12a_mali_parents),
/*
* Don't request the parent to change the rate because
* all GPU frequencies can be derived from the fclk_*
@@ -4035,11 +4122,6 @@ static struct clk_regmap g12a_mali_1 = {
},
};
-static const struct clk_hw *g12a_mali_parent_hws[] = {
- &g12a_mali_0.hw,
- &g12a_mali_1.hw,
-};
-
static struct clk_regmap g12a_mali = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_MALI_CLK_CNTL,
@@ -4049,7 +4131,10 @@ static struct clk_regmap g12a_mali = {
.hw.init = &(struct clk_init_data){
.name = "mali",
.ops = &clk_regmap_mux_ops,
- .parent_hws = g12a_mali_parent_hws,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_mali_0.hw,
+ &g12a_mali_1.hw,
+ },
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
@@ -4088,7 +4173,7 @@ static struct clk_regmap g12a_ts = {
/* SPICC SCLK source clock */
-static const struct clk_parent_data spicc_sclk_parent_data[] = {
+static const struct clk_parent_data g12a_spicc_sclk_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &g12a_clk81.hw },
{ .hw = &g12a_fclk_div4.hw },
@@ -4107,8 +4192,8 @@ static struct clk_regmap g12a_spicc0_sclk_sel = {
.hw.init = &(struct clk_init_data){
.name = "spicc0_sclk_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = spicc_sclk_parent_data,
- .num_parents = ARRAY_SIZE(spicc_sclk_parent_data),
+ .parent_data = g12a_spicc_sclk_parents,
+ .num_parents = ARRAY_SIZE(g12a_spicc_sclk_parents),
},
};
@@ -4154,8 +4239,8 @@ static struct clk_regmap g12a_spicc1_sclk_sel = {
.hw.init = &(struct clk_init_data){
.name = "spicc1_sclk_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = spicc_sclk_parent_data,
- .num_parents = ARRAY_SIZE(spicc_sclk_parent_data),
+ .parent_data = g12a_spicc_sclk_parents,
+ .num_parents = ARRAY_SIZE(g12a_spicc_sclk_parents),
},
};
@@ -4194,7 +4279,7 @@ static struct clk_regmap g12a_spicc1_sclk = {
/* Neural Network Accelerator source clock */
-static const struct clk_parent_data nna_clk_parent_data[] = {
+static const struct clk_parent_data sm1_nna_clk_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &g12a_gp0_pll.hw, },
{ .hw = &g12a_hifi_pll.hw, },
@@ -4214,8 +4299,8 @@ static struct clk_regmap sm1_nna_axi_clk_sel = {
.hw.init = &(struct clk_init_data){
.name = "nna_axi_clk_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = nna_clk_parent_data,
- .num_parents = ARRAY_SIZE(nna_clk_parent_data),
+ .parent_data = sm1_nna_clk_parents,
+ .num_parents = ARRAY_SIZE(sm1_nna_clk_parents),
},
};
@@ -4261,8 +4346,8 @@ static struct clk_regmap sm1_nna_core_clk_sel = {
.hw.init = &(struct clk_init_data){
.name = "nna_core_clk_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = nna_clk_parent_data,
- .num_parents = ARRAY_SIZE(nna_clk_parent_data),
+ .parent_data = sm1_nna_clk_parents,
+ .num_parents = ARRAY_SIZE(sm1_nna_clk_parents),
},
};
@@ -4299,89 +4384,101 @@ static struct clk_regmap sm1_nna_core_clk = {
},
};
-#define MESON_GATE(_name, _reg, _bit) \
- MESON_PCLK(_name, _reg, _bit, &g12a_clk81.hw)
-
-#define MESON_GATE_RO(_name, _reg, _bit) \
- MESON_PCLK_RO(_name, _reg, _bit, &g12a_clk81.hw)
-
-/* Everything Else (EE) domain gates */
-static MESON_GATE(g12a_ddr, HHI_GCLK_MPEG0, 0);
-static MESON_GATE(g12a_dos, HHI_GCLK_MPEG0, 1);
-static MESON_GATE(g12a_audio_locker, HHI_GCLK_MPEG0, 2);
-static MESON_GATE(g12a_mipi_dsi_host, HHI_GCLK_MPEG0, 3);
-static MESON_GATE(g12a_eth_phy, HHI_GCLK_MPEG0, 4);
-static MESON_GATE(g12a_isa, HHI_GCLK_MPEG0, 5);
-static MESON_GATE(g12a_pl301, HHI_GCLK_MPEG0, 6);
-static MESON_GATE(g12a_periphs, HHI_GCLK_MPEG0, 7);
-static MESON_GATE(g12a_spicc_0, HHI_GCLK_MPEG0, 8);
-static MESON_GATE(g12a_i2c, HHI_GCLK_MPEG0, 9);
-static MESON_GATE(g12a_sana, HHI_GCLK_MPEG0, 10);
-static MESON_GATE(g12a_sd, HHI_GCLK_MPEG0, 11);
-static MESON_GATE(g12a_rng0, HHI_GCLK_MPEG0, 12);
-static MESON_GATE(g12a_uart0, HHI_GCLK_MPEG0, 13);
-static MESON_GATE(g12a_spicc_1, HHI_GCLK_MPEG0, 14);
-static MESON_GATE(g12a_hiu_reg, HHI_GCLK_MPEG0, 19);
-static MESON_GATE(g12a_mipi_dsi_phy, HHI_GCLK_MPEG0, 20);
-static MESON_GATE(g12a_assist_misc, HHI_GCLK_MPEG0, 23);
-static MESON_GATE(g12a_emmc_a, HHI_GCLK_MPEG0, 24);
-static MESON_GATE(g12a_emmc_b, HHI_GCLK_MPEG0, 25);
-static MESON_GATE(g12a_emmc_c, HHI_GCLK_MPEG0, 26);
-static MESON_GATE(g12a_audio_codec, HHI_GCLK_MPEG0, 28);
-
-static MESON_GATE(g12a_audio, HHI_GCLK_MPEG1, 0);
-static MESON_GATE(g12a_eth_core, HHI_GCLK_MPEG1, 3);
-static MESON_GATE(g12a_demux, HHI_GCLK_MPEG1, 4);
-static MESON_GATE(g12a_audio_ififo, HHI_GCLK_MPEG1, 11);
-static MESON_GATE(g12a_adc, HHI_GCLK_MPEG1, 13);
-static MESON_GATE(g12a_uart1, HHI_GCLK_MPEG1, 16);
-static MESON_GATE(g12a_g2d, HHI_GCLK_MPEG1, 20);
-static MESON_GATE(g12a_reset, HHI_GCLK_MPEG1, 23);
-static MESON_GATE(g12a_pcie_comb, HHI_GCLK_MPEG1, 24);
-static MESON_GATE(g12a_parser, HHI_GCLK_MPEG1, 25);
-static MESON_GATE(g12a_usb_general, HHI_GCLK_MPEG1, 26);
-static MESON_GATE(g12a_pcie_phy, HHI_GCLK_MPEG1, 27);
-static MESON_GATE(g12a_ahb_arb0, HHI_GCLK_MPEG1, 29);
-
-static MESON_GATE(g12a_ahb_data_bus, HHI_GCLK_MPEG2, 1);
-static MESON_GATE(g12a_ahb_ctrl_bus, HHI_GCLK_MPEG2, 2);
-static MESON_GATE(g12a_htx_hdcp22, HHI_GCLK_MPEG2, 3);
-static MESON_GATE(g12a_htx_pclk, HHI_GCLK_MPEG2, 4);
-static MESON_GATE(g12a_bt656, HHI_GCLK_MPEG2, 6);
-static MESON_GATE(g12a_usb1_to_ddr, HHI_GCLK_MPEG2, 8);
-static MESON_GATE(g12b_mipi_isp_gate, HHI_GCLK_MPEG2, 17);
-static MESON_GATE(g12a_mmc_pclk, HHI_GCLK_MPEG2, 11);
-static MESON_GATE(g12a_uart2, HHI_GCLK_MPEG2, 15);
-static MESON_GATE(g12a_vpu_intr, HHI_GCLK_MPEG2, 25);
-static MESON_GATE(g12b_csi_phy1, HHI_GCLK_MPEG2, 28);
-static MESON_GATE(g12b_csi_phy0, HHI_GCLK_MPEG2, 29);
-static MESON_GATE(g12a_gic, HHI_GCLK_MPEG2, 30);
-
-static MESON_GATE(g12a_vclk2_venci0, HHI_GCLK_OTHER, 1);
-static MESON_GATE(g12a_vclk2_venci1, HHI_GCLK_OTHER, 2);
-static MESON_GATE(g12a_vclk2_vencp0, HHI_GCLK_OTHER, 3);
-static MESON_GATE(g12a_vclk2_vencp1, HHI_GCLK_OTHER, 4);
-static MESON_GATE(g12a_vclk2_venct0, HHI_GCLK_OTHER, 5);
-static MESON_GATE(g12a_vclk2_venct1, HHI_GCLK_OTHER, 6);
-static MESON_GATE(g12a_vclk2_other, HHI_GCLK_OTHER, 7);
-static MESON_GATE(g12a_vclk2_enci, HHI_GCLK_OTHER, 8);
-static MESON_GATE(g12a_vclk2_encp, HHI_GCLK_OTHER, 9);
-static MESON_GATE(g12a_dac_clk, HHI_GCLK_OTHER, 10);
-static MESON_GATE(g12a_aoclk_gate, HHI_GCLK_OTHER, 14);
-static MESON_GATE(g12a_iec958_gate, HHI_GCLK_OTHER, 16);
-static MESON_GATE(g12a_enc480p, HHI_GCLK_OTHER, 20);
-static MESON_GATE(g12a_rng1, HHI_GCLK_OTHER, 21);
-static MESON_GATE(g12a_vclk2_enct, HHI_GCLK_OTHER, 22);
-static MESON_GATE(g12a_vclk2_encl, HHI_GCLK_OTHER, 23);
-static MESON_GATE(g12a_vclk2_venclmmc, HHI_GCLK_OTHER, 24);
-static MESON_GATE(g12a_vclk2_vencl, HHI_GCLK_OTHER, 25);
-static MESON_GATE(g12a_vclk2_other1, HHI_GCLK_OTHER, 26);
-
-static MESON_GATE_RO(g12a_dma, HHI_GCLK_OTHER2, 0);
-static MESON_GATE_RO(g12a_efuse, HHI_GCLK_OTHER2, 1);
-static MESON_GATE_RO(g12a_rom_boot, HHI_GCLK_OTHER2, 2);
-static MESON_GATE_RO(g12a_reset_sec, HHI_GCLK_OTHER2, 3);
-static MESON_GATE_RO(g12a_sec_ahb_apb3, HHI_GCLK_OTHER2, 4);
+static const struct clk_parent_data g12a_pclk_parents = { .hw = &g12a_clk81.hw };
+
+#define G12A_PCLK(_name, _reg, _bit, _flags) \
+ MESON_PCLK(_name, _reg, _bit, &g12a_pclk_parents, _flags)
+
+#define G12A_PCLK_RO(_name, _reg, _bit, _flags) \
+ MESON_PCLK_RO(_name, _reg, _bit, &g12a_pclk_parents, _flags)
+
+/*
+ * Everything Else (EE) domain gates
+ *
+ * NOTE: The gates below are marked with CLK_IGNORE_UNUSED for historic reasons
+ * Users are encouraged to test without it and submit changes to:
+ * - remove the flag if not necessary
+ * - replace the flag with something more adequate, such as CLK_IS_CRITICAL,
+ * if appropriate.
+ * - add a comment explaining why the use of CLK_IGNORE_UNUSED is desirable
+ * for a particular clock.
+ */
+static G12A_PCLK(g12a_ddr, HHI_GCLK_MPEG0, 0, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_dos, HHI_GCLK_MPEG0, 1, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_audio_locker, HHI_GCLK_MPEG0, 2, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_mipi_dsi_host, HHI_GCLK_MPEG0, 3, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_eth_phy, HHI_GCLK_MPEG0, 4, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_isa, HHI_GCLK_MPEG0, 5, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_pl301, HHI_GCLK_MPEG0, 6, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_periphs, HHI_GCLK_MPEG0, 7, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_spicc_0, HHI_GCLK_MPEG0, 8, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_i2c, HHI_GCLK_MPEG0, 9, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_sana, HHI_GCLK_MPEG0, 10, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_sd, HHI_GCLK_MPEG0, 11, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_rng0, HHI_GCLK_MPEG0, 12, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_uart0, HHI_GCLK_MPEG0, 13, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_spicc_1, HHI_GCLK_MPEG0, 14, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_hiu_reg, HHI_GCLK_MPEG0, 19, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_mipi_dsi_phy, HHI_GCLK_MPEG0, 20, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_assist_misc, HHI_GCLK_MPEG0, 23, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_emmc_a, HHI_GCLK_MPEG0, 24, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_emmc_b, HHI_GCLK_MPEG0, 25, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_emmc_c, HHI_GCLK_MPEG0, 26, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_audio_codec, HHI_GCLK_MPEG0, 28, CLK_IGNORE_UNUSED);
+
+static G12A_PCLK(g12a_audio, HHI_GCLK_MPEG1, 0, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_eth_core, HHI_GCLK_MPEG1, 3, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_demux, HHI_GCLK_MPEG1, 4, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_audio_ififo, HHI_GCLK_MPEG1, 11, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_adc, HHI_GCLK_MPEG1, 13, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_uart1, HHI_GCLK_MPEG1, 16, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_g2d, HHI_GCLK_MPEG1, 20, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_reset, HHI_GCLK_MPEG1, 23, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_pcie_comb, HHI_GCLK_MPEG1, 24, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_parser, HHI_GCLK_MPEG1, 25, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_usb_general, HHI_GCLK_MPEG1, 26, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_pcie_phy, HHI_GCLK_MPEG1, 27, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_ahb_arb0, HHI_GCLK_MPEG1, 29, CLK_IGNORE_UNUSED);
+
+static G12A_PCLK(g12a_ahb_data_bus, HHI_GCLK_MPEG2, 1, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_ahb_ctrl_bus, HHI_GCLK_MPEG2, 2, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_htx_hdcp22, HHI_GCLK_MPEG2, 3, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_htx_pclk, HHI_GCLK_MPEG2, 4, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_bt656, HHI_GCLK_MPEG2, 6, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_usb1_to_ddr, HHI_GCLK_MPEG2, 8, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12b_mipi_isp_gate, HHI_GCLK_MPEG2, 17, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_mmc_pclk, HHI_GCLK_MPEG2, 11, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_uart2, HHI_GCLK_MPEG2, 15, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_vpu_intr, HHI_GCLK_MPEG2, 25, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12b_csi_phy1, HHI_GCLK_MPEG2, 28, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12b_csi_phy0, HHI_GCLK_MPEG2, 29, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_gic, HHI_GCLK_MPEG2, 30, CLK_IGNORE_UNUSED);
+
+static G12A_PCLK(g12a_vclk2_venci0, HHI_GCLK_OTHER, 1, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_vclk2_venci1, HHI_GCLK_OTHER, 2, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_vclk2_vencp0, HHI_GCLK_OTHER, 3, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_vclk2_vencp1, HHI_GCLK_OTHER, 4, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_vclk2_venct0, HHI_GCLK_OTHER, 5, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_vclk2_venct1, HHI_GCLK_OTHER, 6, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_vclk2_other, HHI_GCLK_OTHER, 7, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_vclk2_enci, HHI_GCLK_OTHER, 8, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_vclk2_encp, HHI_GCLK_OTHER, 9, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_dac_clk, HHI_GCLK_OTHER, 10, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_aoclk_gate, HHI_GCLK_OTHER, 14, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_iec958_gate, HHI_GCLK_OTHER, 16, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_enc480p, HHI_GCLK_OTHER, 20, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_rng1, HHI_GCLK_OTHER, 21, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_vclk2_enct, HHI_GCLK_OTHER, 22, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_vclk2_encl, HHI_GCLK_OTHER, 23, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_vclk2_venclmmc, HHI_GCLK_OTHER, 24, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_vclk2_vencl, HHI_GCLK_OTHER, 25, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_vclk2_other1, HHI_GCLK_OTHER, 26, CLK_IGNORE_UNUSED);
+
+static G12A_PCLK_RO(g12a_dma, HHI_GCLK_OTHER2, 0, 0);
+static G12A_PCLK_RO(g12a_efuse, HHI_GCLK_OTHER2, 1, 0);
+static G12A_PCLK_RO(g12a_rom_boot, HHI_GCLK_OTHER2, 2, 0);
+static G12A_PCLK_RO(g12a_reset_sec, HHI_GCLK_OTHER2, 3, 0);
+static G12A_PCLK_RO(g12a_sec_ahb_apb3, HHI_GCLK_OTHER2, 4, 0);
/* Array of all clocks provided by this provider */
static struct clk_hw *g12a_hw_clks[] = {
@@ -4394,8 +4491,8 @@ static struct clk_hw *g12a_hw_clks[] = {
[CLKID_FCLK_DIV7] = &g12a_fclk_div7.hw,
[CLKID_FCLK_DIV2P5] = &g12a_fclk_div2p5.hw,
[CLKID_GP0_PLL] = &g12a_gp0_pll.hw,
- [CLKID_MPEG_SEL] = &g12a_mpeg_clk_sel.hw,
- [CLKID_MPEG_DIV] = &g12a_mpeg_clk_div.hw,
+ [CLKID_MPEG_SEL] = &g12a_clk81_sel.hw,
+ [CLKID_MPEG_DIV] = &g12a_clk81_div.hw,
[CLKID_CLK81] = &g12a_clk81.hw,
[CLKID_MPLL0] = &g12a_mpll0.hw,
[CLKID_MPLL1] = &g12a_mpll1.hw,
@@ -4567,12 +4664,12 @@ static struct clk_hw *g12a_hw_clks[] = {
[CLKID_MPLL_50M] = &g12a_mpll_50m.hw,
[CLKID_SYS_PLL_DIV16_EN] = &g12a_sys_pll_div16_en.hw,
[CLKID_SYS_PLL_DIV16] = &g12a_sys_pll_div16.hw,
- [CLKID_CPU_CLK_DYN0_SEL] = &g12a_cpu_clk_premux0.hw,
- [CLKID_CPU_CLK_DYN0_DIV] = &g12a_cpu_clk_mux0_div.hw,
- [CLKID_CPU_CLK_DYN0] = &g12a_cpu_clk_postmux0.hw,
- [CLKID_CPU_CLK_DYN1_SEL] = &g12a_cpu_clk_premux1.hw,
- [CLKID_CPU_CLK_DYN1_DIV] = &g12a_cpu_clk_mux1_div.hw,
- [CLKID_CPU_CLK_DYN1] = &g12a_cpu_clk_postmux1.hw,
+ [CLKID_CPU_CLK_DYN0_SEL] = &g12a_cpu_clk_dyn0_sel.hw,
+ [CLKID_CPU_CLK_DYN0_DIV] = &g12a_cpu_clk_dyn0_div.hw,
+ [CLKID_CPU_CLK_DYN0] = &g12a_cpu_clk_dyn0.hw,
+ [CLKID_CPU_CLK_DYN1_SEL] = &g12a_cpu_clk_dyn1_sel.hw,
+ [CLKID_CPU_CLK_DYN1_DIV] = &g12a_cpu_clk_dyn1_div.hw,
+ [CLKID_CPU_CLK_DYN1] = &g12a_cpu_clk_dyn1.hw,
[CLKID_CPU_CLK_DYN] = &g12a_cpu_clk_dyn.hw,
[CLKID_CPU_CLK] = &g12a_cpu_clk.hw,
[CLKID_CPU_CLK_DIV16_EN] = &g12a_cpu_clk_div16_en.hw,
@@ -4621,8 +4718,8 @@ static struct clk_hw *g12b_hw_clks[] = {
[CLKID_FCLK_DIV7] = &g12a_fclk_div7.hw,
[CLKID_FCLK_DIV2P5] = &g12a_fclk_div2p5.hw,
[CLKID_GP0_PLL] = &g12a_gp0_pll.hw,
- [CLKID_MPEG_SEL] = &g12a_mpeg_clk_sel.hw,
- [CLKID_MPEG_DIV] = &g12a_mpeg_clk_div.hw,
+ [CLKID_MPEG_SEL] = &g12a_clk81_sel.hw,
+ [CLKID_MPEG_DIV] = &g12a_clk81_div.hw,
[CLKID_CLK81] = &g12a_clk81.hw,
[CLKID_MPLL0] = &g12a_mpll0.hw,
[CLKID_MPLL1] = &g12a_mpll1.hw,
@@ -4794,12 +4891,12 @@ static struct clk_hw *g12b_hw_clks[] = {
[CLKID_MPLL_50M] = &g12a_mpll_50m.hw,
[CLKID_SYS_PLL_DIV16_EN] = &g12a_sys_pll_div16_en.hw,
[CLKID_SYS_PLL_DIV16] = &g12a_sys_pll_div16.hw,
- [CLKID_CPU_CLK_DYN0_SEL] = &g12a_cpu_clk_premux0.hw,
- [CLKID_CPU_CLK_DYN0_DIV] = &g12a_cpu_clk_mux0_div.hw,
- [CLKID_CPU_CLK_DYN0] = &g12a_cpu_clk_postmux0.hw,
- [CLKID_CPU_CLK_DYN1_SEL] = &g12a_cpu_clk_premux1.hw,
- [CLKID_CPU_CLK_DYN1_DIV] = &g12a_cpu_clk_mux1_div.hw,
- [CLKID_CPU_CLK_DYN1] = &g12a_cpu_clk_postmux1.hw,
+ [CLKID_CPU_CLK_DYN0_SEL] = &g12a_cpu_clk_dyn0_sel.hw,
+ [CLKID_CPU_CLK_DYN0_DIV] = &g12a_cpu_clk_dyn0_div.hw,
+ [CLKID_CPU_CLK_DYN0] = &g12a_cpu_clk_dyn0.hw,
+ [CLKID_CPU_CLK_DYN1_SEL] = &g12a_cpu_clk_dyn1_sel.hw,
+ [CLKID_CPU_CLK_DYN1_DIV] = &g12a_cpu_clk_dyn1_div.hw,
+ [CLKID_CPU_CLK_DYN1] = &g12a_cpu_clk_dyn1.hw,
[CLKID_CPU_CLK_DYN] = &g12a_cpu_clk_dyn.hw,
[CLKID_CPU_CLK] = &g12b_cpu_clk.hw,
[CLKID_CPU_CLK_DIV16_EN] = &g12a_cpu_clk_div16_en.hw,
@@ -4831,12 +4928,12 @@ static struct clk_hw *g12b_hw_clks[] = {
[CLKID_SYS1_PLL] = &g12b_sys1_pll.hw,
[CLKID_SYS1_PLL_DIV16_EN] = &g12b_sys1_pll_div16_en.hw,
[CLKID_SYS1_PLL_DIV16] = &g12b_sys1_pll_div16.hw,
- [CLKID_CPUB_CLK_DYN0_SEL] = &g12b_cpub_clk_premux0.hw,
- [CLKID_CPUB_CLK_DYN0_DIV] = &g12b_cpub_clk_mux0_div.hw,
- [CLKID_CPUB_CLK_DYN0] = &g12b_cpub_clk_postmux0.hw,
- [CLKID_CPUB_CLK_DYN1_SEL] = &g12b_cpub_clk_premux1.hw,
- [CLKID_CPUB_CLK_DYN1_DIV] = &g12b_cpub_clk_mux1_div.hw,
- [CLKID_CPUB_CLK_DYN1] = &g12b_cpub_clk_postmux1.hw,
+ [CLKID_CPUB_CLK_DYN0_SEL] = &g12b_cpub_clk_dyn0_sel.hw,
+ [CLKID_CPUB_CLK_DYN0_DIV] = &g12b_cpub_clk_dyn0_div.hw,
+ [CLKID_CPUB_CLK_DYN0] = &g12b_cpub_clk_dyn0.hw,
+ [CLKID_CPUB_CLK_DYN1_SEL] = &g12b_cpub_clk_dyn1_sel.hw,
+ [CLKID_CPUB_CLK_DYN1_DIV] = &g12b_cpub_clk_dyn1_div.hw,
+ [CLKID_CPUB_CLK_DYN1] = &g12b_cpub_clk_dyn1.hw,
[CLKID_CPUB_CLK_DYN] = &g12b_cpub_clk_dyn.hw,
[CLKID_CPUB_CLK] = &g12b_cpub_clk.hw,
[CLKID_CPUB_CLK_DIV16_EN] = &g12b_cpub_clk_div16_en.hw,
@@ -4889,8 +4986,8 @@ static struct clk_hw *sm1_hw_clks[] = {
[CLKID_FCLK_DIV7] = &g12a_fclk_div7.hw,
[CLKID_FCLK_DIV2P5] = &g12a_fclk_div2p5.hw,
[CLKID_GP0_PLL] = &g12a_gp0_pll.hw,
- [CLKID_MPEG_SEL] = &g12a_mpeg_clk_sel.hw,
- [CLKID_MPEG_DIV] = &g12a_mpeg_clk_div.hw,
+ [CLKID_MPEG_SEL] = &g12a_clk81_sel.hw,
+ [CLKID_MPEG_DIV] = &g12a_clk81_div.hw,
[CLKID_CLK81] = &g12a_clk81.hw,
[CLKID_MPLL0] = &g12a_mpll0.hw,
[CLKID_MPLL1] = &g12a_mpll1.hw,
@@ -5062,12 +5159,12 @@ static struct clk_hw *sm1_hw_clks[] = {
[CLKID_MPLL_50M] = &g12a_mpll_50m.hw,
[CLKID_SYS_PLL_DIV16_EN] = &g12a_sys_pll_div16_en.hw,
[CLKID_SYS_PLL_DIV16] = &g12a_sys_pll_div16.hw,
- [CLKID_CPU_CLK_DYN0_SEL] = &g12a_cpu_clk_premux0.hw,
- [CLKID_CPU_CLK_DYN0_DIV] = &g12a_cpu_clk_mux0_div.hw,
- [CLKID_CPU_CLK_DYN0] = &g12a_cpu_clk_postmux0.hw,
- [CLKID_CPU_CLK_DYN1_SEL] = &g12a_cpu_clk_premux1.hw,
- [CLKID_CPU_CLK_DYN1_DIV] = &g12a_cpu_clk_mux1_div.hw,
- [CLKID_CPU_CLK_DYN1] = &g12a_cpu_clk_postmux1.hw,
+ [CLKID_CPU_CLK_DYN0_SEL] = &g12a_cpu_clk_dyn0_sel.hw,
+ [CLKID_CPU_CLK_DYN0_DIV] = &g12a_cpu_clk_dyn0_div.hw,
+ [CLKID_CPU_CLK_DYN0] = &g12a_cpu_clk_dyn0.hw,
+ [CLKID_CPU_CLK_DYN1_SEL] = &g12a_cpu_clk_dyn1_sel.hw,
+ [CLKID_CPU_CLK_DYN1_DIV] = &g12a_cpu_clk_dyn1_div.hw,
+ [CLKID_CPU_CLK_DYN1] = &g12a_cpu_clk_dyn1.hw,
[CLKID_CPU_CLK_DYN] = &g12a_cpu_clk_dyn.hw,
[CLKID_CPU_CLK] = &g12a_cpu_clk.hw,
[CLKID_CPU_CLK_DIV16_EN] = &g12a_cpu_clk_div16_en.hw,
@@ -5097,12 +5194,12 @@ static struct clk_hw *sm1_hw_clks[] = {
[CLKID_TS] = &g12a_ts.hw,
[CLKID_GP1_PLL_DCO] = &sm1_gp1_pll_dco.hw,
[CLKID_GP1_PLL] = &sm1_gp1_pll.hw,
- [CLKID_DSU_CLK_DYN0_SEL] = &sm1_dsu_clk_premux0.hw,
- [CLKID_DSU_CLK_DYN0_DIV] = &sm1_dsu_clk_premux1.hw,
- [CLKID_DSU_CLK_DYN0] = &sm1_dsu_clk_mux0_div.hw,
- [CLKID_DSU_CLK_DYN1_SEL] = &sm1_dsu_clk_postmux0.hw,
- [CLKID_DSU_CLK_DYN1_DIV] = &sm1_dsu_clk_mux1_div.hw,
- [CLKID_DSU_CLK_DYN1] = &sm1_dsu_clk_postmux1.hw,
+ [CLKID_DSU_CLK_DYN0_SEL] = &sm1_dsu_clk_dyn0_sel.hw,
+ [CLKID_DSU_CLK_DYN0_DIV] = &sm1_dsu_clk_dyn0_div.hw,
+ [CLKID_DSU_CLK_DYN0] = &sm1_dsu_clk_dyn0.hw,
+ [CLKID_DSU_CLK_DYN1_SEL] = &sm1_dsu_clk_dyn1_sel.hw,
+ [CLKID_DSU_CLK_DYN1_DIV] = &sm1_dsu_clk_dyn1_div.hw,
+ [CLKID_DSU_CLK_DYN1] = &sm1_dsu_clk_dyn1.hw,
[CLKID_DSU_CLK_DYN] = &sm1_dsu_clk_dyn.hw,
[CLKID_DSU_CLK_FINAL] = &sm1_dsu_final_clk.hw,
[CLKID_DSU_CLK] = &sm1_dsu_clk.hw,
@@ -5126,269 +5223,13 @@ static struct clk_hw *sm1_hw_clks[] = {
[CLKID_MIPI_DSI_PXCLK] = &g12a_mipi_dsi_pxclk.hw,
};
-/* Convenience table to populate regmap in .probe */
-static struct clk_regmap *const g12a_clk_regmaps[] = {
- &g12a_clk81,
- &g12a_dos,
- &g12a_ddr,
- &g12a_audio_locker,
- &g12a_mipi_dsi_host,
- &g12a_eth_phy,
- &g12a_isa,
- &g12a_pl301,
- &g12a_periphs,
- &g12a_spicc_0,
- &g12a_i2c,
- &g12a_sana,
- &g12a_sd,
- &g12a_rng0,
- &g12a_uart0,
- &g12a_spicc_1,
- &g12a_hiu_reg,
- &g12a_mipi_dsi_phy,
- &g12a_assist_misc,
- &g12a_emmc_a,
- &g12a_emmc_b,
- &g12a_emmc_c,
- &g12a_audio_codec,
- &g12a_audio,
- &g12a_eth_core,
- &g12a_demux,
- &g12a_audio_ififo,
- &g12a_adc,
- &g12a_uart1,
- &g12a_g2d,
- &g12a_reset,
- &g12a_pcie_comb,
- &g12a_parser,
- &g12a_usb_general,
- &g12a_pcie_phy,
- &g12a_ahb_arb0,
- &g12a_ahb_data_bus,
- &g12a_ahb_ctrl_bus,
- &g12a_htx_hdcp22,
- &g12a_htx_pclk,
- &g12a_bt656,
- &g12a_usb1_to_ddr,
- &g12a_mmc_pclk,
- &g12a_uart2,
- &g12a_vpu_intr,
- &g12a_gic,
- &g12a_sd_emmc_a_clk0,
- &g12a_sd_emmc_b_clk0,
- &g12a_sd_emmc_c_clk0,
- &g12a_mpeg_clk_div,
- &g12a_sd_emmc_a_clk0_div,
- &g12a_sd_emmc_b_clk0_div,
- &g12a_sd_emmc_c_clk0_div,
- &g12a_mpeg_clk_sel,
- &g12a_sd_emmc_a_clk0_sel,
- &g12a_sd_emmc_b_clk0_sel,
- &g12a_sd_emmc_c_clk0_sel,
- &g12a_mpll0,
- &g12a_mpll1,
- &g12a_mpll2,
- &g12a_mpll3,
- &g12a_mpll0_div,
- &g12a_mpll1_div,
- &g12a_mpll2_div,
- &g12a_mpll3_div,
- &g12a_fixed_pll,
- &g12a_sys_pll,
- &g12a_gp0_pll,
- &g12a_hifi_pll,
- &g12a_vclk2_venci0,
- &g12a_vclk2_venci1,
- &g12a_vclk2_vencp0,
- &g12a_vclk2_vencp1,
- &g12a_vclk2_venct0,
- &g12a_vclk2_venct1,
- &g12a_vclk2_other,
- &g12a_vclk2_enci,
- &g12a_vclk2_encp,
- &g12a_dac_clk,
- &g12a_aoclk_gate,
- &g12a_iec958_gate,
- &g12a_enc480p,
- &g12a_rng1,
- &g12a_vclk2_enct,
- &g12a_vclk2_encl,
- &g12a_vclk2_venclmmc,
- &g12a_vclk2_vencl,
- &g12a_vclk2_other1,
- &g12a_fixed_pll_dco,
- &g12a_sys_pll_dco,
- &g12a_gp0_pll_dco,
- &g12a_hifi_pll_dco,
- &g12a_fclk_div2,
- &g12a_fclk_div3,
- &g12a_fclk_div4,
- &g12a_fclk_div5,
- &g12a_fclk_div7,
- &g12a_fclk_div2p5,
- &g12a_dma,
- &g12a_efuse,
- &g12a_rom_boot,
- &g12a_reset_sec,
- &g12a_sec_ahb_apb3,
- &g12a_vpu_0_sel,
- &g12a_vpu_0_div,
- &g12a_vpu_0,
- &g12a_vpu_1_sel,
- &g12a_vpu_1_div,
- &g12a_vpu_1,
- &g12a_vpu,
- &g12a_vapb_0_sel,
- &g12a_vapb_0_div,
- &g12a_vapb_0,
- &g12a_vapb_1_sel,
- &g12a_vapb_1_div,
- &g12a_vapb_1,
- &g12a_vapb_sel,
- &g12a_vapb,
- &g12a_hdmi_pll_dco,
- &g12a_hdmi_pll_od,
- &g12a_hdmi_pll_od2,
- &g12a_hdmi_pll,
- &g12a_vid_pll_div,
- &g12a_vid_pll_sel,
- &g12a_vid_pll,
- &g12a_vclk_sel,
- &g12a_vclk2_sel,
- &g12a_vclk_input,
- &g12a_vclk2_input,
- &g12a_vclk_div,
- &g12a_vclk2_div,
- &g12a_vclk,
- &g12a_vclk2,
- &g12a_vclk_div1,
- &g12a_vclk_div2_en,
- &g12a_vclk_div4_en,
- &g12a_vclk_div6_en,
- &g12a_vclk_div12_en,
- &g12a_vclk2_div1,
- &g12a_vclk2_div2_en,
- &g12a_vclk2_div4_en,
- &g12a_vclk2_div6_en,
- &g12a_vclk2_div12_en,
- &g12a_cts_enci_sel,
- &g12a_cts_encp_sel,
- &g12a_cts_encl_sel,
- &g12a_cts_vdac_sel,
- &g12a_hdmi_tx_sel,
- &g12a_cts_enci,
- &g12a_cts_encp,
- &g12a_cts_encl,
- &g12a_cts_vdac,
- &g12a_hdmi_tx,
- &g12a_hdmi_sel,
- &g12a_hdmi_div,
- &g12a_hdmi,
- &g12a_mali_0_sel,
- &g12a_mali_0_div,
- &g12a_mali_0,
- &g12a_mali_1_sel,
- &g12a_mali_1_div,
- &g12a_mali_1,
- &g12a_mali,
- &g12a_mpll_50m,
- &g12a_sys_pll_div16_en,
- &g12a_cpu_clk_premux0,
- &g12a_cpu_clk_mux0_div,
- &g12a_cpu_clk_postmux0,
- &g12a_cpu_clk_premux1,
- &g12a_cpu_clk_mux1_div,
- &g12a_cpu_clk_postmux1,
- &g12a_cpu_clk_dyn,
- &g12a_cpu_clk,
- &g12a_cpu_clk_div16_en,
- &g12a_cpu_clk_apb_div,
- &g12a_cpu_clk_apb,
- &g12a_cpu_clk_atb_div,
- &g12a_cpu_clk_atb,
- &g12a_cpu_clk_axi_div,
- &g12a_cpu_clk_axi,
- &g12a_cpu_clk_trace_div,
- &g12a_cpu_clk_trace,
- &g12a_pcie_pll_od,
- &g12a_pcie_pll_dco,
- &g12a_vdec_1_sel,
- &g12a_vdec_1_div,
- &g12a_vdec_1,
- &g12a_vdec_hevc_sel,
- &g12a_vdec_hevc_div,
- &g12a_vdec_hevc,
- &g12a_vdec_hevcf_sel,
- &g12a_vdec_hevcf_div,
- &g12a_vdec_hevcf,
- &g12a_ts_div,
- &g12a_ts,
- &g12b_cpu_clk,
- &g12b_sys1_pll_dco,
- &g12b_sys1_pll,
- &g12b_sys1_pll_div16_en,
- &g12b_cpub_clk_premux0,
- &g12b_cpub_clk_mux0_div,
- &g12b_cpub_clk_postmux0,
- &g12b_cpub_clk_premux1,
- &g12b_cpub_clk_mux1_div,
- &g12b_cpub_clk_postmux1,
- &g12b_cpub_clk_dyn,
- &g12b_cpub_clk,
- &g12b_cpub_clk_div16_en,
- &g12b_cpub_clk_apb_sel,
- &g12b_cpub_clk_apb,
- &g12b_cpub_clk_atb_sel,
- &g12b_cpub_clk_atb,
- &g12b_cpub_clk_axi_sel,
- &g12b_cpub_clk_axi,
- &g12b_cpub_clk_trace_sel,
- &g12b_cpub_clk_trace,
- &sm1_gp1_pll_dco,
- &sm1_gp1_pll,
- &sm1_dsu_clk_premux0,
- &sm1_dsu_clk_premux1,
- &sm1_dsu_clk_mux0_div,
- &sm1_dsu_clk_postmux0,
- &sm1_dsu_clk_mux1_div,
- &sm1_dsu_clk_postmux1,
- &sm1_dsu_clk_dyn,
- &sm1_dsu_final_clk,
- &sm1_dsu_clk,
- &sm1_cpu1_clk,
- &sm1_cpu2_clk,
- &sm1_cpu3_clk,
- &g12a_spicc0_sclk_sel,
- &g12a_spicc0_sclk_div,
- &g12a_spicc0_sclk,
- &g12a_spicc1_sclk_sel,
- &g12a_spicc1_sclk_div,
- &g12a_spicc1_sclk,
- &sm1_nna_axi_clk_sel,
- &sm1_nna_axi_clk_div,
- &sm1_nna_axi_clk,
- &sm1_nna_core_clk_sel,
- &sm1_nna_core_clk_div,
- &sm1_nna_core_clk,
- &g12a_mipi_dsi_pxclk_sel,
- &g12a_mipi_dsi_pxclk_div,
- &g12a_mipi_dsi_pxclk,
- &g12b_mipi_isp_sel,
- &g12b_mipi_isp_div,
- &g12b_mipi_isp,
- &g12b_mipi_isp_gate,
- &g12b_csi_phy1,
- &g12b_csi_phy0,
-};
-
static const struct reg_sequence g12a_init_regs[] = {
{ .reg = HHI_MPLL_CNTL0, .def = 0x00000543 },
};
#define DVFS_CON_ID "dvfs"
-static int meson_g12a_dvfs_setup_common(struct device *dev,
- struct clk_hw **hws)
+static int g12a_dvfs_setup_common(struct device *dev, struct clk_hw **hws)
{
struct clk *notifier_clk;
struct clk_hw *xtal;
@@ -5397,13 +5238,13 @@ static int meson_g12a_dvfs_setup_common(struct device *dev,
xtal = clk_hw_get_parent_by_index(hws[CLKID_CPU_CLK_DYN1_SEL], 0);
/* Setup clock notifier for cpu_clk_postmux0 */
- g12a_cpu_clk_postmux0_nb_data.xtal = xtal;
- notifier_clk = devm_clk_hw_get_clk(dev, &g12a_cpu_clk_postmux0.hw,
+ g12a_cpu_clk_dyn0_nb_data.xtal = xtal;
+ notifier_clk = devm_clk_hw_get_clk(dev, &g12a_cpu_clk_dyn0.hw,
DVFS_CON_ID);
ret = devm_clk_notifier_register(dev, notifier_clk,
- &g12a_cpu_clk_postmux0_nb_data.nb);
+ &g12a_cpu_clk_dyn0_nb_data.nb);
if (ret) {
- dev_err(dev, "failed to register the cpu_clk_postmux0 notifier\n");
+ dev_err(dev, "failed to register the cpu_clk_dyn0 notifier\n");
return ret;
}
@@ -5420,7 +5261,7 @@ static int meson_g12a_dvfs_setup_common(struct device *dev,
return 0;
}
-static int meson_g12b_dvfs_setup(struct platform_device *pdev)
+static int g12b_dvfs_setup(struct platform_device *pdev)
{
struct clk_hw **hws = g12b_hw_clks;
struct device *dev = &pdev->dev;
@@ -5428,7 +5269,7 @@ static int meson_g12b_dvfs_setup(struct platform_device *pdev)
struct clk_hw *xtal;
int ret;
- ret = meson_g12a_dvfs_setup_common(dev, hws);
+ ret = g12a_dvfs_setup_common(dev, hws);
if (ret)
return ret;
@@ -5457,18 +5298,19 @@ static int meson_g12b_dvfs_setup(struct platform_device *pdev)
/* Add notifiers for the second CPU cluster */
/* Setup clock notifier for cpub_clk_postmux0 */
- g12b_cpub_clk_postmux0_nb_data.xtal = xtal;
- notifier_clk = devm_clk_hw_get_clk(dev, &g12b_cpub_clk_postmux0.hw,
+ g12b_cpub_clk_dyn0_nb_data.xtal = xtal;
+ notifier_clk = devm_clk_hw_get_clk(dev, &g12b_cpub_clk_dyn0.hw,
DVFS_CON_ID);
ret = devm_clk_notifier_register(dev, notifier_clk,
- &g12b_cpub_clk_postmux0_nb_data.nb);
+ &g12b_cpub_clk_dyn0_nb_data.nb);
if (ret) {
- dev_err(dev, "failed to register the cpub_clk_postmux0 notifier\n");
+ dev_err(dev, "failed to register the cpub_clk_dyn0 notifier\n");
return ret;
}
/* Setup clock notifier for cpub_clk_dyn mux */
- notifier_clk = devm_clk_hw_get_clk(dev, &g12b_cpub_clk_dyn.hw, "dvfs");
+ notifier_clk = devm_clk_hw_get_clk(dev, &g12b_cpub_clk_dyn.hw,
+ DVFS_CON_ID);
ret = devm_clk_notifier_register(dev, notifier_clk,
&g12a_cpu_clk_mux_nb);
if (ret) {
@@ -5497,14 +5339,14 @@ static int meson_g12b_dvfs_setup(struct platform_device *pdev)
return 0;
}
-static int meson_g12a_dvfs_setup(struct platform_device *pdev)
+static int g12a_dvfs_setup(struct platform_device *pdev)
{
struct clk_hw **hws = g12a_hw_clks;
struct device *dev = &pdev->dev;
struct clk *notifier_clk;
int ret;
- ret = meson_g12a_dvfs_setup_common(dev, hws);
+ ret = g12a_dvfs_setup_common(dev, hws);
if (ret)
return ret;
@@ -5529,27 +5371,27 @@ static int meson_g12a_dvfs_setup(struct platform_device *pdev)
return 0;
}
-struct meson_g12a_data {
- const struct meson_eeclkc_data eeclkc_data;
+struct g12a_clkc_data {
+ const struct meson_clkc_data clkc_data;
int (*dvfs_setup)(struct platform_device *pdev);
};
-static int meson_g12a_probe(struct platform_device *pdev)
+static int g12a_clkc_probe(struct platform_device *pdev)
{
- const struct meson_eeclkc_data *eeclkc_data;
- const struct meson_g12a_data *g12a_data;
+ const struct meson_clkc_data *clkc_data;
+ const struct g12a_clkc_data *g12a_data;
int ret;
- eeclkc_data = of_device_get_match_data(&pdev->dev);
- if (!eeclkc_data)
+ clkc_data = of_device_get_match_data(&pdev->dev);
+ if (!clkc_data)
return -EINVAL;
- ret = meson_eeclkc_probe(pdev);
+ ret = meson_clkc_syscon_probe(pdev);
if (ret)
return ret;
- g12a_data = container_of(eeclkc_data, struct meson_g12a_data,
- eeclkc_data);
+ g12a_data = container_of(clkc_data, struct g12a_clkc_data,
+ clkc_data);
if (g12a_data->dvfs_setup)
return g12a_data->dvfs_setup(pdev);
@@ -5557,10 +5399,8 @@ static int meson_g12a_probe(struct platform_device *pdev)
return 0;
}
-static const struct meson_g12a_data g12a_clkc_data = {
- .eeclkc_data = {
- .regmap_clks = g12a_clk_regmaps,
- .regmap_clk_num = ARRAY_SIZE(g12a_clk_regmaps),
+static const struct g12a_clkc_data g12a_clkc_data = {
+ .clkc_data = {
.hw_clks = {
.hws = g12a_hw_clks,
.num = ARRAY_SIZE(g12a_hw_clks),
@@ -5568,58 +5408,54 @@ static const struct meson_g12a_data g12a_clkc_data = {
.init_regs = g12a_init_regs,
.init_count = ARRAY_SIZE(g12a_init_regs),
},
- .dvfs_setup = meson_g12a_dvfs_setup,
+ .dvfs_setup = g12a_dvfs_setup,
};
-static const struct meson_g12a_data g12b_clkc_data = {
- .eeclkc_data = {
- .regmap_clks = g12a_clk_regmaps,
- .regmap_clk_num = ARRAY_SIZE(g12a_clk_regmaps),
+static const struct g12a_clkc_data g12b_clkc_data = {
+ .clkc_data = {
.hw_clks = {
.hws = g12b_hw_clks,
.num = ARRAY_SIZE(g12b_hw_clks),
},
},
- .dvfs_setup = meson_g12b_dvfs_setup,
+ .dvfs_setup = g12b_dvfs_setup,
};
-static const struct meson_g12a_data sm1_clkc_data = {
- .eeclkc_data = {
- .regmap_clks = g12a_clk_regmaps,
- .regmap_clk_num = ARRAY_SIZE(g12a_clk_regmaps),
+static const struct g12a_clkc_data sm1_clkc_data = {
+ .clkc_data = {
.hw_clks = {
.hws = sm1_hw_clks,
.num = ARRAY_SIZE(sm1_hw_clks),
},
},
- .dvfs_setup = meson_g12a_dvfs_setup,
+ .dvfs_setup = g12a_dvfs_setup,
};
-static const struct of_device_id clkc_match_table[] = {
+static const struct of_device_id g12a_clkc_match_table[] = {
{
.compatible = "amlogic,g12a-clkc",
- .data = &g12a_clkc_data.eeclkc_data
+ .data = &g12a_clkc_data.clkc_data
},
{
.compatible = "amlogic,g12b-clkc",
- .data = &g12b_clkc_data.eeclkc_data
+ .data = &g12b_clkc_data.clkc_data
},
{
.compatible = "amlogic,sm1-clkc",
- .data = &sm1_clkc_data.eeclkc_data
+ .data = &sm1_clkc_data.clkc_data
},
{}
};
-MODULE_DEVICE_TABLE(of, clkc_match_table);
+MODULE_DEVICE_TABLE(of, g12a_clkc_match_table);
-static struct platform_driver g12a_driver = {
- .probe = meson_g12a_probe,
+static struct platform_driver g12a_clkc_driver = {
+ .probe = g12a_clkc_probe,
.driver = {
.name = "g12a-clkc",
- .of_match_table = clkc_match_table,
+ .of_match_table = g12a_clkc_match_table,
},
};
-module_platform_driver(g12a_driver);
+module_platform_driver(g12a_clkc_driver);
MODULE_DESCRIPTION("Amlogic G12/SM1 Main Clock Controller driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/meson/g12a.h b/drivers/clk/meson/g12a.h
deleted file mode 100644
index 27df99c4565a..000000000000
--- a/drivers/clk/meson/g12a.h
+++ /dev/null
@@ -1,130 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
-/*
- * Copyright (c) 2016 Amlogic, Inc.
- * Author: Michael Turquette <mturquette@baylibre.com>
- *
- * Copyright (c) 2018 Amlogic, inc.
- * Author: Qiufang Dai <qiufang.dai@amlogic.com>
- * Author: Jian Hu <jian.hu@amlogic.com>
- *
- */
-#ifndef __G12A_H
-#define __G12A_H
-
-/*
- * Clock controller register offsets
- *
- * Register offsets from the data sheet must be multiplied by 4 before
- * adding them to the base address to get the right value.
- */
-#define HHI_MIPI_CNTL0 0x000
-#define HHI_MIPI_CNTL1 0x004
-#define HHI_MIPI_CNTL2 0x008
-#define HHI_MIPI_STS 0x00C
-#define HHI_GP0_PLL_CNTL0 0x040
-#define HHI_GP0_PLL_CNTL1 0x044
-#define HHI_GP0_PLL_CNTL2 0x048
-#define HHI_GP0_PLL_CNTL3 0x04C
-#define HHI_GP0_PLL_CNTL4 0x050
-#define HHI_GP0_PLL_CNTL5 0x054
-#define HHI_GP0_PLL_CNTL6 0x058
-#define HHI_GP0_PLL_STS 0x05C
-#define HHI_GP1_PLL_CNTL0 0x060
-#define HHI_GP1_PLL_CNTL1 0x064
-#define HHI_GP1_PLL_CNTL2 0x068
-#define HHI_GP1_PLL_CNTL3 0x06C
-#define HHI_GP1_PLL_CNTL4 0x070
-#define HHI_GP1_PLL_CNTL5 0x074
-#define HHI_GP1_PLL_CNTL6 0x078
-#define HHI_GP1_PLL_STS 0x07C
-#define HHI_PCIE_PLL_CNTL0 0x098
-#define HHI_PCIE_PLL_CNTL1 0x09C
-#define HHI_PCIE_PLL_CNTL2 0x0A0
-#define HHI_PCIE_PLL_CNTL3 0x0A4
-#define HHI_PCIE_PLL_CNTL4 0x0A8
-#define HHI_PCIE_PLL_CNTL5 0x0AC
-#define HHI_PCIE_PLL_STS 0x0B8
-#define HHI_HIFI_PLL_CNTL0 0x0D8
-#define HHI_HIFI_PLL_CNTL1 0x0DC
-#define HHI_HIFI_PLL_CNTL2 0x0E0
-#define HHI_HIFI_PLL_CNTL3 0x0E4
-#define HHI_HIFI_PLL_CNTL4 0x0E8
-#define HHI_HIFI_PLL_CNTL5 0x0EC
-#define HHI_HIFI_PLL_CNTL6 0x0F0
-#define HHI_VIID_CLK_DIV 0x128
-#define HHI_VIID_CLK_CNTL 0x12C
-#define HHI_GCLK_MPEG0 0x140
-#define HHI_GCLK_MPEG1 0x144
-#define HHI_GCLK_MPEG2 0x148
-#define HHI_GCLK_OTHER 0x150
-#define HHI_GCLK_OTHER2 0x154
-#define HHI_SYS_CPU_CLK_CNTL1 0x15c
-#define HHI_VID_CLK_DIV 0x164
-#define HHI_MPEG_CLK_CNTL 0x174
-#define HHI_AUD_CLK_CNTL 0x178
-#define HHI_VID_CLK_CNTL 0x17c
-#define HHI_TS_CLK_CNTL 0x190
-#define HHI_VID_CLK_CNTL2 0x194
-#define HHI_SYS_CPU_CLK_CNTL0 0x19c
-#define HHI_VID_PLL_CLK_DIV 0x1A0
-#define HHI_MALI_CLK_CNTL 0x1b0
-#define HHI_VPU_CLKC_CNTL 0x1b4
-#define HHI_VPU_CLK_CNTL 0x1bC
-#define HHI_ISP_CLK_CNTL 0x1C0
-#define HHI_NNA_CLK_CNTL 0x1C8
-#define HHI_HDMI_CLK_CNTL 0x1CC
-#define HHI_VDEC_CLK_CNTL 0x1E0
-#define HHI_VDEC2_CLK_CNTL 0x1E4
-#define HHI_VDEC3_CLK_CNTL 0x1E8
-#define HHI_VDEC4_CLK_CNTL 0x1EC
-#define HHI_HDCP22_CLK_CNTL 0x1F0
-#define HHI_VAPBCLK_CNTL 0x1F4
-#define HHI_SYS_CPUB_CLK_CNTL1 0x200
-#define HHI_SYS_CPUB_CLK_CNTL 0x208
-#define HHI_VPU_CLKB_CNTL 0x20C
-#define HHI_SYS_CPU_CLK_CNTL2 0x210
-#define HHI_SYS_CPU_CLK_CNTL3 0x214
-#define HHI_SYS_CPU_CLK_CNTL4 0x218
-#define HHI_SYS_CPU_CLK_CNTL5 0x21c
-#define HHI_SYS_CPU_CLK_CNTL6 0x220
-#define HHI_GEN_CLK_CNTL 0x228
-#define HHI_VDIN_MEAS_CLK_CNTL 0x250
-#define HHI_MIPIDSI_PHY_CLK_CNTL 0x254
-#define HHI_NAND_CLK_CNTL 0x25C
-#define HHI_SD_EMMC_CLK_CNTL 0x264
-#define HHI_MPLL_CNTL0 0x278
-#define HHI_MPLL_CNTL1 0x27C
-#define HHI_MPLL_CNTL2 0x280
-#define HHI_MPLL_CNTL3 0x284
-#define HHI_MPLL_CNTL4 0x288
-#define HHI_MPLL_CNTL5 0x28c
-#define HHI_MPLL_CNTL6 0x290
-#define HHI_MPLL_CNTL7 0x294
-#define HHI_MPLL_CNTL8 0x298
-#define HHI_FIX_PLL_CNTL0 0x2A0
-#define HHI_FIX_PLL_CNTL1 0x2A4
-#define HHI_FIX_PLL_CNTL3 0x2AC
-#define HHI_SYS_PLL_CNTL0 0x2f4
-#define HHI_SYS_PLL_CNTL1 0x2f8
-#define HHI_SYS_PLL_CNTL2 0x2fc
-#define HHI_SYS_PLL_CNTL3 0x300
-#define HHI_SYS_PLL_CNTL4 0x304
-#define HHI_SYS_PLL_CNTL5 0x308
-#define HHI_SYS_PLL_CNTL6 0x30c
-#define HHI_HDMI_PLL_CNTL0 0x320
-#define HHI_HDMI_PLL_CNTL1 0x324
-#define HHI_HDMI_PLL_CNTL2 0x328
-#define HHI_HDMI_PLL_CNTL3 0x32c
-#define HHI_HDMI_PLL_CNTL4 0x330
-#define HHI_HDMI_PLL_CNTL5 0x334
-#define HHI_HDMI_PLL_CNTL6 0x338
-#define HHI_SPICC_CLK_CNTL 0x3dc
-#define HHI_SYS1_PLL_CNTL0 0x380
-#define HHI_SYS1_PLL_CNTL1 0x384
-#define HHI_SYS1_PLL_CNTL2 0x388
-#define HHI_SYS1_PLL_CNTL3 0x38c
-#define HHI_SYS1_PLL_CNTL4 0x390
-#define HHI_SYS1_PLL_CNTL5 0x394
-#define HHI_SYS1_PLL_CNTL6 0x398
-
-#endif /* __G12A_H */
diff --git a/drivers/clk/meson/gxbb-aoclk.c b/drivers/clk/meson/gxbb-aoclk.c
index 43940232f718..c7dfb3a06cb5 100644
--- a/drivers/clk/meson/gxbb-aoclk.c
+++ b/drivers/clk/meson/gxbb-aoclk.c
@@ -23,31 +23,20 @@
#define AO_RTC_ALT_CLK_CNTL0 0x94
#define AO_RTC_ALT_CLK_CNTL1 0x98
-#define GXBB_AO_GATE(_name, _bit) \
-static struct clk_regmap _name##_ao = { \
- .data = &(struct clk_regmap_gate_data) { \
- .offset = AO_RTI_GEN_CNTL_REG0, \
- .bit_idx = (_bit), \
- }, \
- .hw.init = &(struct clk_init_data) { \
- .name = #_name "_ao", \
- .ops = &clk_regmap_gate_ops, \
- .parent_data = &(const struct clk_parent_data) { \
- .fw_name = "mpeg-clk", \
- }, \
- .num_parents = 1, \
- .flags = CLK_IGNORE_UNUSED, \
- }, \
-}
+static const struct clk_parent_data gxbb_ao_pclk_parents = { .fw_name = "mpeg-clk" };
-GXBB_AO_GATE(remote, 0);
-GXBB_AO_GATE(i2c_master, 1);
-GXBB_AO_GATE(i2c_slave, 2);
-GXBB_AO_GATE(uart1, 3);
-GXBB_AO_GATE(uart2, 5);
-GXBB_AO_GATE(ir_blaster, 6);
+#define GXBB_AO_PCLK(_name, _bit, _flags) \
+ MESON_PCLK(gxbb_ao_##_name, AO_RTI_GEN_CNTL_REG0, _bit, \
+ &gxbb_ao_pclk_parents, _flags)
-static struct clk_regmap ao_cts_oscin = {
+static GXBB_AO_PCLK(remote, 0, CLK_IGNORE_UNUSED);
+static GXBB_AO_PCLK(i2c_master, 1, CLK_IGNORE_UNUSED);
+static GXBB_AO_PCLK(i2c_slave, 2, CLK_IGNORE_UNUSED);
+static GXBB_AO_PCLK(uart1, 3, CLK_IGNORE_UNUSED);
+static GXBB_AO_PCLK(uart2, 5, CLK_IGNORE_UNUSED);
+static GXBB_AO_PCLK(ir_blaster, 6, CLK_IGNORE_UNUSED);
+
+static struct clk_regmap gxbb_ao_cts_oscin = {
.data = &(struct clk_regmap_gate_data){
.offset = AO_RTI_PWR_CNTL_REG0,
.bit_idx = 6,
@@ -62,7 +51,7 @@ static struct clk_regmap ao_cts_oscin = {
},
};
-static struct clk_regmap ao_32k_pre = {
+static struct clk_regmap gxbb_ao_32k_pre = {
.data = &(struct clk_regmap_gate_data){
.offset = AO_RTC_ALT_CLK_CNTL0,
.bit_idx = 31,
@@ -70,7 +59,7 @@ static struct clk_regmap ao_32k_pre = {
.hw.init = &(struct clk_init_data){
.name = "ao_32k_pre",
.ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) { &ao_cts_oscin.hw },
+ .parent_hws = (const struct clk_hw *[]) { &gxbb_ao_cts_oscin.hw },
.num_parents = 1,
},
};
@@ -85,7 +74,7 @@ static const struct meson_clk_dualdiv_param gxbb_32k_div_table[] = {
}, {}
};
-static struct clk_regmap ao_32k_div = {
+static struct clk_regmap gxbb_ao_32k_div = {
.data = &(struct meson_clk_dualdiv_data){
.n1 = {
.reg_off = AO_RTC_ALT_CLK_CNTL0,
@@ -117,12 +106,12 @@ static struct clk_regmap ao_32k_div = {
.hw.init = &(struct clk_init_data){
.name = "ao_32k_div",
.ops = &meson_clk_dualdiv_ops,
- .parent_hws = (const struct clk_hw *[]) { &ao_32k_pre.hw },
+ .parent_hws = (const struct clk_hw *[]) { &gxbb_ao_32k_pre.hw },
.num_parents = 1,
},
};
-static struct clk_regmap ao_32k_sel = {
+static struct clk_regmap gxbb_ao_32k_sel = {
.data = &(struct clk_regmap_mux_data) {
.offset = AO_RTC_ALT_CLK_CNTL1,
.mask = 0x1,
@@ -133,15 +122,15 @@ static struct clk_regmap ao_32k_sel = {
.name = "ao_32k_sel",
.ops = &clk_regmap_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &ao_32k_div.hw,
- &ao_32k_pre.hw
+ &gxbb_ao_32k_div.hw,
+ &gxbb_ao_32k_pre.hw
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap ao_32k = {
+static struct clk_regmap gxbb_ao_32k = {
.data = &(struct clk_regmap_gate_data){
.offset = AO_RTC_ALT_CLK_CNTL0,
.bit_idx = 30,
@@ -149,13 +138,13 @@ static struct clk_regmap ao_32k = {
.hw.init = &(struct clk_init_data){
.name = "ao_32k",
.ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) { &ao_32k_sel.hw },
+ .parent_hws = (const struct clk_hw *[]) { &gxbb_ao_32k_sel.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap ao_cts_rtc_oscin = {
+static struct clk_regmap gxbb_ao_cts_rtc_oscin = {
.data = &(struct clk_regmap_mux_data) {
.offset = AO_RTI_PWR_CNTL_REG0,
.mask = 0x7,
@@ -170,14 +159,14 @@ static struct clk_regmap ao_cts_rtc_oscin = {
{ .fw_name = "ext-32k-0", },
{ .fw_name = "ext-32k-1", },
{ .fw_name = "ext-32k-2", },
- { .hw = &ao_32k.hw },
+ { .hw = &gxbb_ao_32k.hw },
},
.num_parents = 4,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap ao_clk81 = {
+static struct clk_regmap gxbb_ao_clk81 = {
.data = &(struct clk_regmap_mux_data) {
.offset = AO_RTI_PWR_CNTL_REG0,
.mask = 0x1,
@@ -189,14 +178,14 @@ static struct clk_regmap ao_clk81 = {
.ops = &clk_regmap_mux_ro_ops,
.parent_data = (const struct clk_parent_data []) {
{ .fw_name = "mpeg-clk", },
- { .hw = &ao_cts_rtc_oscin.hw },
+ { .hw = &gxbb_ao_cts_rtc_oscin.hw },
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap ao_cts_cec = {
+static struct clk_regmap gxbb_ao_cts_cec = {
.data = &(struct clk_regmap_mux_data) {
.offset = AO_CRT_CLK_CNTL1,
.mask = 0x1,
@@ -221,14 +210,14 @@ static struct clk_regmap ao_cts_cec = {
*/
.parent_data = (const struct clk_parent_data []) {
{ .name = "fixme", .index = -1, },
- { .hw = &ao_cts_rtc_oscin.hw },
+ { .hw = &gxbb_ao_cts_rtc_oscin.hw },
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
-static const unsigned int gxbb_aoclk_reset[] = {
+static const unsigned int gxbb_ao_reset[] = {
[RESET_AO_REMOTE] = 16,
[RESET_AO_I2C_MASTER] = 18,
[RESET_AO_I2C_SLAVE] = 19,
@@ -237,69 +226,52 @@ static const unsigned int gxbb_aoclk_reset[] = {
[RESET_AO_IR_BLASTER] = 23,
};
-static struct clk_regmap *gxbb_aoclk[] = {
- &remote_ao,
- &i2c_master_ao,
- &i2c_slave_ao,
- &uart1_ao,
- &uart2_ao,
- &ir_blaster_ao,
- &ao_cts_oscin,
- &ao_32k_pre,
- &ao_32k_div,
- &ao_32k_sel,
- &ao_32k,
- &ao_cts_rtc_oscin,
- &ao_clk81,
- &ao_cts_cec,
-};
-
-static struct clk_hw *gxbb_aoclk_hw_clks[] = {
- [CLKID_AO_REMOTE] = &remote_ao.hw,
- [CLKID_AO_I2C_MASTER] = &i2c_master_ao.hw,
- [CLKID_AO_I2C_SLAVE] = &i2c_slave_ao.hw,
- [CLKID_AO_UART1] = &uart1_ao.hw,
- [CLKID_AO_UART2] = &uart2_ao.hw,
- [CLKID_AO_IR_BLASTER] = &ir_blaster_ao.hw,
- [CLKID_AO_CEC_32K] = &ao_cts_cec.hw,
- [CLKID_AO_CTS_OSCIN] = &ao_cts_oscin.hw,
- [CLKID_AO_32K_PRE] = &ao_32k_pre.hw,
- [CLKID_AO_32K_DIV] = &ao_32k_div.hw,
- [CLKID_AO_32K_SEL] = &ao_32k_sel.hw,
- [CLKID_AO_32K] = &ao_32k.hw,
- [CLKID_AO_CTS_RTC_OSCIN] = &ao_cts_rtc_oscin.hw,
- [CLKID_AO_CLK81] = &ao_clk81.hw,
+static struct clk_hw *gxbb_ao_hw_clks[] = {
+ [CLKID_AO_REMOTE] = &gxbb_ao_remote.hw,
+ [CLKID_AO_I2C_MASTER] = &gxbb_ao_i2c_master.hw,
+ [CLKID_AO_I2C_SLAVE] = &gxbb_ao_i2c_slave.hw,
+ [CLKID_AO_UART1] = &gxbb_ao_uart1.hw,
+ [CLKID_AO_UART2] = &gxbb_ao_uart2.hw,
+ [CLKID_AO_IR_BLASTER] = &gxbb_ao_ir_blaster.hw,
+ [CLKID_AO_CEC_32K] = &gxbb_ao_cts_cec.hw,
+ [CLKID_AO_CTS_OSCIN] = &gxbb_ao_cts_oscin.hw,
+ [CLKID_AO_32K_PRE] = &gxbb_ao_32k_pre.hw,
+ [CLKID_AO_32K_DIV] = &gxbb_ao_32k_div.hw,
+ [CLKID_AO_32K_SEL] = &gxbb_ao_32k_sel.hw,
+ [CLKID_AO_32K] = &gxbb_ao_32k.hw,
+ [CLKID_AO_CTS_RTC_OSCIN] = &gxbb_ao_cts_rtc_oscin.hw,
+ [CLKID_AO_CLK81] = &gxbb_ao_clk81.hw,
};
-static const struct meson_aoclk_data gxbb_aoclkc_data = {
+static const struct meson_aoclk_data gxbb_ao_clkc_data = {
.reset_reg = AO_RTI_GEN_CNTL_REG0,
- .num_reset = ARRAY_SIZE(gxbb_aoclk_reset),
- .reset = gxbb_aoclk_reset,
- .num_clks = ARRAY_SIZE(gxbb_aoclk),
- .clks = gxbb_aoclk,
- .hw_clks = {
- .hws = gxbb_aoclk_hw_clks,
- .num = ARRAY_SIZE(gxbb_aoclk_hw_clks),
+ .num_reset = ARRAY_SIZE(gxbb_ao_reset),
+ .reset = gxbb_ao_reset,
+ .clkc_data = {
+ .hw_clks = {
+ .hws = gxbb_ao_hw_clks,
+ .num = ARRAY_SIZE(gxbb_ao_hw_clks),
+ },
},
};
-static const struct of_device_id gxbb_aoclkc_match_table[] = {
+static const struct of_device_id gxbb_ao_clkc_match_table[] = {
{
.compatible = "amlogic,meson-gx-aoclkc",
- .data = &gxbb_aoclkc_data,
+ .data = &gxbb_ao_clkc_data.clkc_data,
},
{ }
};
-MODULE_DEVICE_TABLE(of, gxbb_aoclkc_match_table);
+MODULE_DEVICE_TABLE(of, gxbb_ao_clkc_match_table);
-static struct platform_driver gxbb_aoclkc_driver = {
+static struct platform_driver gxbb_ao_clkc_driver = {
.probe = meson_aoclkc_probe,
.driver = {
.name = "gxbb-aoclkc",
- .of_match_table = gxbb_aoclkc_match_table,
+ .of_match_table = gxbb_ao_clkc_match_table,
},
};
-module_platform_driver(gxbb_aoclkc_driver);
+module_platform_driver(gxbb_ao_clkc_driver);
MODULE_DESCRIPTION("Amlogic GXBB Always-ON Clock Controller driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c
index 3abb44a2532b..5a229c4ffae1 100644
--- a/drivers/clk/meson/gxbb.c
+++ b/drivers/clk/meson/gxbb.c
@@ -10,78 +10,111 @@
#include <linux/platform_device.h>
#include <linux/module.h>
-#include "gxbb.h"
#include "clk-regmap.h"
#include "clk-pll.h"
#include "clk-mpll.h"
-#include "meson-eeclk.h"
+#include "meson-clkc-utils.h"
#include "vid-pll-div.h"
#include <dt-bindings/clock/gxbb-clkc.h>
-static const struct pll_params_table gxbb_gp0_pll_params_table[] = {
- PLL_PARAMS(32, 1),
- PLL_PARAMS(33, 1),
- PLL_PARAMS(34, 1),
- PLL_PARAMS(35, 1),
- PLL_PARAMS(36, 1),
- PLL_PARAMS(37, 1),
- PLL_PARAMS(38, 1),
- PLL_PARAMS(39, 1),
- PLL_PARAMS(40, 1),
- PLL_PARAMS(41, 1),
- PLL_PARAMS(42, 1),
- PLL_PARAMS(43, 1),
- PLL_PARAMS(44, 1),
- PLL_PARAMS(45, 1),
- PLL_PARAMS(46, 1),
- PLL_PARAMS(47, 1),
- PLL_PARAMS(48, 1),
- PLL_PARAMS(49, 1),
- PLL_PARAMS(50, 1),
- PLL_PARAMS(51, 1),
- PLL_PARAMS(52, 1),
- PLL_PARAMS(53, 1),
- PLL_PARAMS(54, 1),
- PLL_PARAMS(55, 1),
- PLL_PARAMS(56, 1),
- PLL_PARAMS(57, 1),
- PLL_PARAMS(58, 1),
- PLL_PARAMS(59, 1),
- PLL_PARAMS(60, 1),
- PLL_PARAMS(61, 1),
- PLL_PARAMS(62, 1),
- { /* sentinel */ },
-};
-
-static const struct pll_params_table gxl_gp0_pll_params_table[] = {
- PLL_PARAMS(42, 1),
- PLL_PARAMS(43, 1),
- PLL_PARAMS(44, 1),
- PLL_PARAMS(45, 1),
- PLL_PARAMS(46, 1),
- PLL_PARAMS(47, 1),
- PLL_PARAMS(48, 1),
- PLL_PARAMS(49, 1),
- PLL_PARAMS(50, 1),
- PLL_PARAMS(51, 1),
- PLL_PARAMS(52, 1),
- PLL_PARAMS(53, 1),
- PLL_PARAMS(54, 1),
- PLL_PARAMS(55, 1),
- PLL_PARAMS(56, 1),
- PLL_PARAMS(57, 1),
- PLL_PARAMS(58, 1),
- PLL_PARAMS(59, 1),
- PLL_PARAMS(60, 1),
- PLL_PARAMS(61, 1),
- PLL_PARAMS(62, 1),
- PLL_PARAMS(63, 1),
- PLL_PARAMS(64, 1),
- PLL_PARAMS(65, 1),
- PLL_PARAMS(66, 1),
- { /* sentinel */ },
-};
+#define SCR 0x2c
+#define TIMEOUT_VALUE 0x3c
+
+#define HHI_GP0_PLL_CNTL 0x40
+#define HHI_GP0_PLL_CNTL2 0x44
+#define HHI_GP0_PLL_CNTL3 0x48
+#define HHI_GP0_PLL_CNTL4 0x4c
+#define HHI_GP0_PLL_CNTL5 0x50
+#define HHI_GP0_PLL_CNTL1 0x58
+
+#define HHI_XTAL_DIVN_CNTL 0xbc
+#define HHI_TIMER90K 0xec
+
+#define HHI_MEM_PD_REG0 0x100
+#define HHI_MEM_PD_REG1 0x104
+#define HHI_VPU_MEM_PD_REG1 0x108
+#define HHI_VIID_CLK_DIV 0x128
+#define HHI_VIID_CLK_CNTL 0x12c
+
+#define HHI_GCLK_MPEG0 0x140
+#define HHI_GCLK_MPEG1 0x144
+#define HHI_GCLK_MPEG2 0x148
+#define HHI_GCLK_OTHER 0x150
+#define HHI_GCLK_AO 0x154
+#define HHI_SYS_OSCIN_CNTL 0x158
+#define HHI_SYS_CPU_CLK_CNTL1 0x15c
+#define HHI_SYS_CPU_RESET_CNTL 0x160
+#define HHI_VID_CLK_DIV 0x164
+
+#define HHI_MPEG_CLK_CNTL 0x174
+#define HHI_AUD_CLK_CNTL 0x178
+#define HHI_VID_CLK_CNTL 0x17c
+#define HHI_AUD_CLK_CNTL2 0x190
+#define HHI_VID_CLK_CNTL2 0x194
+#define HHI_SYS_CPU_CLK_CNTL0 0x19c
+#define HHI_VID_PLL_CLK_DIV 0x1a0
+#define HHI_AUD_CLK_CNTL3 0x1a4
+#define HHI_MALI_CLK_CNTL 0x1b0
+#define HHI_VPU_CLK_CNTL 0x1bc
+
+#define HHI_HDMI_CLK_CNTL 0x1cc
+#define HHI_VDEC_CLK_CNTL 0x1e0
+#define HHI_VDEC2_CLK_CNTL 0x1e4
+#define HHI_VDEC3_CLK_CNTL 0x1e8
+#define HHI_VDEC4_CLK_CNTL 0x1ec
+#define HHI_HDCP22_CLK_CNTL 0x1f0
+#define HHI_VAPBCLK_CNTL 0x1f4
+
+#define HHI_VPU_CLKB_CNTL 0x20c
+#define HHI_USB_CLK_CNTL 0x220
+#define HHI_32K_CLK_CNTL 0x224
+#define HHI_GEN_CLK_CNTL 0x228
+
+#define HHI_PCM_CLK_CNTL 0x258
+#define HHI_NAND_CLK_CNTL 0x25c
+#define HHI_SD_EMMC_CLK_CNTL 0x264
+
+#define HHI_MPLL_CNTL 0x280
+#define HHI_MPLL_CNTL2 0x284
+#define HHI_MPLL_CNTL3 0x288
+#define HHI_MPLL_CNTL4 0x28c
+#define HHI_MPLL_CNTL5 0x290
+#define HHI_MPLL_CNTL6 0x294
+#define HHI_MPLL_CNTL7 0x298
+#define HHI_MPLL_CNTL8 0x29c
+#define HHI_MPLL_CNTL9 0x2a0
+#define HHI_MPLL_CNTL10 0x2a4
+
+#define HHI_MPLL3_CNTL0 0x2e0
+#define HHI_MPLL3_CNTL1 0x2e4
+#define HHI_VDAC_CNTL0 0x2f4
+#define HHI_VDAC_CNTL1 0x2f8
+
+#define HHI_SYS_PLL_CNTL 0x300
+#define HHI_SYS_PLL_CNTL2 0x304
+#define HHI_SYS_PLL_CNTL3 0x308
+#define HHI_SYS_PLL_CNTL4 0x30c
+#define HHI_SYS_PLL_CNTL5 0x310
+#define HHI_DPLL_TOP_I 0x318
+#define HHI_DPLL_TOP2_I 0x31c
+#define HHI_HDMI_PLL_CNTL 0x320
+#define HHI_HDMI_PLL_CNTL2 0x324
+#define HHI_HDMI_PLL_CNTL3 0x328
+#define HHI_HDMI_PLL_CNTL4 0x32c
+#define HHI_HDMI_PLL_CNTL5 0x330
+#define HHI_HDMI_PLL_CNTL6 0x334
+#define HHI_HDMI_PLL_CNTL_I 0x338
+#define HHI_HDMI_PLL_CNTL7 0x33c
+
+#define HHI_HDMI_PHY_CNTL0 0x3a0
+#define HHI_HDMI_PHY_CNTL1 0x3a4
+#define HHI_HDMI_PHY_CNTL2 0x3a8
+#define HHI_HDMI_PHY_CNTL3 0x3ac
+
+#define HHI_VID_LOCK_CLK_CNTL 0x3c8
+#define HHI_BT656_CLK_CNTL 0x3d4
+#define HHI_SAR_CLK_CNTL 0x3d8
static struct clk_regmap gxbb_fixed_pll_dco = {
.data = &(struct meson_clk_pll_data){
@@ -426,7 +459,42 @@ static struct clk_regmap gxbb_sys_pll = {
},
};
-static const struct reg_sequence gxbb_gp0_init_regs[] = {
+static const struct pll_params_table gxbb_gp0_pll_params_table[] = {
+ PLL_PARAMS(32, 1),
+ PLL_PARAMS(33, 1),
+ PLL_PARAMS(34, 1),
+ PLL_PARAMS(35, 1),
+ PLL_PARAMS(36, 1),
+ PLL_PARAMS(37, 1),
+ PLL_PARAMS(38, 1),
+ PLL_PARAMS(39, 1),
+ PLL_PARAMS(40, 1),
+ PLL_PARAMS(41, 1),
+ PLL_PARAMS(42, 1),
+ PLL_PARAMS(43, 1),
+ PLL_PARAMS(44, 1),
+ PLL_PARAMS(45, 1),
+ PLL_PARAMS(46, 1),
+ PLL_PARAMS(47, 1),
+ PLL_PARAMS(48, 1),
+ PLL_PARAMS(49, 1),
+ PLL_PARAMS(50, 1),
+ PLL_PARAMS(51, 1),
+ PLL_PARAMS(52, 1),
+ PLL_PARAMS(53, 1),
+ PLL_PARAMS(54, 1),
+ PLL_PARAMS(55, 1),
+ PLL_PARAMS(56, 1),
+ PLL_PARAMS(57, 1),
+ PLL_PARAMS(58, 1),
+ PLL_PARAMS(59, 1),
+ PLL_PARAMS(60, 1),
+ PLL_PARAMS(61, 1),
+ PLL_PARAMS(62, 1),
+ { /* sentinel */ },
+};
+
+static const struct reg_sequence gxbb_gp0_pll_init_regs[] = {
{ .reg = HHI_GP0_PLL_CNTL2, .def = 0x69c80000 },
{ .reg = HHI_GP0_PLL_CNTL3, .def = 0x0a5590c4 },
{ .reg = HHI_GP0_PLL_CNTL4, .def = 0x0000500d },
@@ -460,8 +528,8 @@ static struct clk_regmap gxbb_gp0_pll_dco = {
.width = 1,
},
.table = gxbb_gp0_pll_params_table,
- .init_regs = gxbb_gp0_init_regs,
- .init_count = ARRAY_SIZE(gxbb_gp0_init_regs),
+ .init_regs = gxbb_gp0_pll_init_regs,
+ .init_count = ARRAY_SIZE(gxbb_gp0_pll_init_regs),
},
.hw.init = &(struct clk_init_data){
.name = "gp0_pll_dco",
@@ -473,7 +541,36 @@ static struct clk_regmap gxbb_gp0_pll_dco = {
},
};
-static const struct reg_sequence gxl_gp0_init_regs[] = {
+static const struct pll_params_table gxl_gp0_pll_params_table[] = {
+ PLL_PARAMS(42, 1),
+ PLL_PARAMS(43, 1),
+ PLL_PARAMS(44, 1),
+ PLL_PARAMS(45, 1),
+ PLL_PARAMS(46, 1),
+ PLL_PARAMS(47, 1),
+ PLL_PARAMS(48, 1),
+ PLL_PARAMS(49, 1),
+ PLL_PARAMS(50, 1),
+ PLL_PARAMS(51, 1),
+ PLL_PARAMS(52, 1),
+ PLL_PARAMS(53, 1),
+ PLL_PARAMS(54, 1),
+ PLL_PARAMS(55, 1),
+ PLL_PARAMS(56, 1),
+ PLL_PARAMS(57, 1),
+ PLL_PARAMS(58, 1),
+ PLL_PARAMS(59, 1),
+ PLL_PARAMS(60, 1),
+ PLL_PARAMS(61, 1),
+ PLL_PARAMS(62, 1),
+ PLL_PARAMS(63, 1),
+ PLL_PARAMS(64, 1),
+ PLL_PARAMS(65, 1),
+ PLL_PARAMS(66, 1),
+ { /* sentinel */ },
+};
+
+static const struct reg_sequence gxl_gp0_pll_init_regs[] = {
{ .reg = HHI_GP0_PLL_CNTL1, .def = 0xc084b000 },
{ .reg = HHI_GP0_PLL_CNTL2, .def = 0xb75020be },
{ .reg = HHI_GP0_PLL_CNTL3, .def = 0x0a59a288 },
@@ -514,8 +611,8 @@ static struct clk_regmap gxl_gp0_pll_dco = {
.width = 1,
},
.table = gxl_gp0_pll_params_table,
- .init_regs = gxl_gp0_init_regs,
- .init_count = ARRAY_SIZE(gxl_gp0_init_regs),
+ .init_regs = gxl_gp0_pll_init_regs,
+ .init_count = ARRAY_SIZE(gxl_gp0_pll_init_regs),
},
.hw.init = &(struct clk_init_data){
.name = "gp0_pll_dco",
@@ -875,8 +972,9 @@ static struct clk_regmap gxbb_mpll2 = {
},
};
-static u32 mux_table_clk81[] = { 0, 2, 3, 4, 5, 6, 7 };
-static const struct clk_parent_data clk81_parent_data[] = {
+/* clk81 is often referred as "mpeg_clk" */
+static u32 clk81_parents_val_table[] = { 0, 2, 3, 4, 5, 6, 7 };
+static const struct clk_parent_data clk81_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &gxbb_fclk_div7.hw },
{ .hw = &gxbb_mpll1.hw },
@@ -886,37 +984,37 @@ static const struct clk_parent_data clk81_parent_data[] = {
{ .hw = &gxbb_fclk_div5.hw },
};
-static struct clk_regmap gxbb_mpeg_clk_sel = {
+static struct clk_regmap gxbb_clk81_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_MPEG_CLK_CNTL,
.mask = 0x7,
.shift = 12,
- .table = mux_table_clk81,
+ .table = clk81_parents_val_table,
},
.hw.init = &(struct clk_init_data){
- .name = "mpeg_clk_sel",
+ .name = "clk81_sel",
.ops = &clk_regmap_mux_ro_ops,
/*
* bits 14:12 selects from 8 possible parents:
* xtal, 1'b0 (wtf), fclk_div7, mpll_clkout1, mpll_clkout2,
* fclk_div4, fclk_div3, fclk_div5
*/
- .parent_data = clk81_parent_data,
- .num_parents = ARRAY_SIZE(clk81_parent_data),
+ .parent_data = clk81_parents,
+ .num_parents = ARRAY_SIZE(clk81_parents),
},
};
-static struct clk_regmap gxbb_mpeg_clk_div = {
+static struct clk_regmap gxbb_clk81_div = {
.data = &(struct clk_regmap_div_data){
.offset = HHI_MPEG_CLK_CNTL,
.shift = 0,
.width = 7,
},
.hw.init = &(struct clk_init_data){
- .name = "mpeg_clk_div",
+ .name = "clk81_div",
.ops = &clk_regmap_divider_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &gxbb_mpeg_clk_sel.hw
+ &gxbb_clk81_sel.hw
},
.num_parents = 1,
},
@@ -932,7 +1030,7 @@ static struct clk_regmap gxbb_clk81 = {
.name = "clk81",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &gxbb_mpeg_clk_div.hw
+ &gxbb_clk81_div.hw
},
.num_parents = 1,
.flags = CLK_IS_CRITICAL,
@@ -997,7 +1095,7 @@ static struct clk_regmap gxbb_sar_adc_clk = {
* switches to the "inactive" one when CLK_SET_RATE_GATE is set.
*/
-static const struct clk_parent_data gxbb_mali_0_1_parent_data[] = {
+static const struct clk_parent_data gxbb_mali_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &gxbb_gp0_pll.hw },
{ .hw = &gxbb_mpll2.hw },
@@ -1017,8 +1115,8 @@ static struct clk_regmap gxbb_mali_0_sel = {
.hw.init = &(struct clk_init_data){
.name = "mali_0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = gxbb_mali_0_1_parent_data,
- .num_parents = 8,
+ .parent_data = gxbb_mali_parents,
+ .num_parents = ARRAY_SIZE(gxbb_mali_parents),
/*
* Don't request the parent to change the rate because
* all GPU frequencies can be derived from the fclk_*
@@ -1071,8 +1169,8 @@ static struct clk_regmap gxbb_mali_1_sel = {
.hw.init = &(struct clk_init_data){
.name = "mali_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = gxbb_mali_0_1_parent_data,
- .num_parents = 8,
+ .parent_data = gxbb_mali_parents,
+ .num_parents = ARRAY_SIZE(gxbb_mali_parents),
/*
* Don't request the parent to change the rate because
* all GPU frequencies can be derived from the fclk_*
@@ -1116,11 +1214,6 @@ static struct clk_regmap gxbb_mali_1 = {
},
};
-static const struct clk_hw *gxbb_mali_parent_hws[] = {
- &gxbb_mali_0.hw,
- &gxbb_mali_1.hw,
-};
-
static struct clk_regmap gxbb_mali = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_MALI_CLK_CNTL,
@@ -1130,29 +1223,35 @@ static struct clk_regmap gxbb_mali = {
.hw.init = &(struct clk_init_data){
.name = "mali",
.ops = &clk_regmap_mux_ops,
- .parent_hws = gxbb_mali_parent_hws,
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxbb_mali_0.hw,
+ &gxbb_mali_1.hw,
+ },
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
+static u32 gxbb_cts_mclk_parents_val_table[] = { 1, 2, 3 };
+static const struct clk_hw *gxbb_cts_mclk_parents[] = {
+ &gxbb_mpll0.hw,
+ &gxbb_mpll1.hw,
+ &gxbb_mpll2.hw,
+};
+
static struct clk_regmap gxbb_cts_amclk_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_AUD_CLK_CNTL,
.mask = 0x3,
.shift = 9,
- .table = (u32[]){ 1, 2, 3 },
+ .table = gxbb_cts_mclk_parents_val_table,
.flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
.name = "cts_amclk_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &gxbb_mpll0.hw,
- &gxbb_mpll1.hw,
- &gxbb_mpll2.hw,
- },
- .num_parents = 3,
+ .parent_hws = gxbb_cts_mclk_parents,
+ .num_parents = ARRAY_SIZE(gxbb_cts_mclk_parents),
},
};
@@ -1195,18 +1294,14 @@ static struct clk_regmap gxbb_cts_mclk_i958_sel = {
.offset = HHI_AUD_CLK_CNTL2,
.mask = 0x3,
.shift = 25,
- .table = (u32[]){ 1, 2, 3 },
+ .table = gxbb_cts_mclk_parents_val_table,
.flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data) {
.name = "cts_mclk_i958_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &gxbb_mpll0.hw,
- &gxbb_mpll1.hw,
- &gxbb_mpll2.hw,
- },
- .num_parents = 3,
+ .parent_hws = gxbb_cts_mclk_parents,
+ .num_parents = ARRAY_SIZE(gxbb_cts_mclk_parents),
},
};
@@ -1271,7 +1366,7 @@ static struct clk_regmap gxbb_cts_i958 = {
* This clock does not exist yet in this controller or the AO one
*/
static u32 gxbb_32k_clk_parents_val_table[] = { 0, 2, 3 };
-static const struct clk_parent_data gxbb_32k_clk_parent_data[] = {
+static const struct clk_parent_data gxbb_32k_clk_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &gxbb_fclk_div3.hw },
{ .hw = &gxbb_fclk_div5.hw },
@@ -1283,11 +1378,11 @@ static struct clk_regmap gxbb_32k_clk_sel = {
.mask = 0x3,
.shift = 16,
.table = gxbb_32k_clk_parents_val_table,
- },
+ },
.hw.init = &(struct clk_init_data){
.name = "32k_clk_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = gxbb_32k_clk_parent_data,
+ .parent_data = gxbb_32k_clk_parents,
.num_parents = 4,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1326,7 +1421,7 @@ static struct clk_regmap gxbb_32k_clk = {
},
};
-static const struct clk_parent_data gxbb_sd_emmc_clk0_parent_data[] = {
+static const struct clk_parent_data gxbb_sd_emmc_clk0_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &gxbb_fclk_div2.hw },
{ .hw = &gxbb_fclk_div3.hw },
@@ -1335,7 +1430,7 @@ static const struct clk_parent_data gxbb_sd_emmc_clk0_parent_data[] = {
/*
* Following these parent clocks, we should also have had mpll2, mpll3
* and gp0_pll but these clocks are too precious to be used here. All
- * the necessary rates for MMC and NAND operation can be acheived using
+ * the necessary rates for MMC and NAND operation can be achieved using
* xtal or fclk_div clocks
*/
};
@@ -1350,8 +1445,8 @@ static struct clk_regmap gxbb_sd_emmc_a_clk0_sel = {
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_a_clk0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = gxbb_sd_emmc_clk0_parent_data,
- .num_parents = ARRAY_SIZE(gxbb_sd_emmc_clk0_parent_data),
+ .parent_data = gxbb_sd_emmc_clk0_parents,
+ .num_parents = ARRAY_SIZE(gxbb_sd_emmc_clk0_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1400,8 +1495,8 @@ static struct clk_regmap gxbb_sd_emmc_b_clk0_sel = {
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_b_clk0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = gxbb_sd_emmc_clk0_parent_data,
- .num_parents = ARRAY_SIZE(gxbb_sd_emmc_clk0_parent_data),
+ .parent_data = gxbb_sd_emmc_clk0_parents,
+ .num_parents = ARRAY_SIZE(gxbb_sd_emmc_clk0_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1450,8 +1545,8 @@ static struct clk_regmap gxbb_sd_emmc_c_clk0_sel = {
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_c_clk0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = gxbb_sd_emmc_clk0_parent_data,
- .num_parents = ARRAY_SIZE(gxbb_sd_emmc_clk0_parent_data),
+ .parent_data = gxbb_sd_emmc_clk0_parents,
+ .num_parents = ARRAY_SIZE(gxbb_sd_emmc_clk0_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1492,7 +1587,7 @@ static struct clk_regmap gxbb_sd_emmc_c_clk0 = {
/* VPU Clock */
-static const struct clk_hw *gxbb_vpu_parent_hws[] = {
+static const struct clk_hw *gxbb_vpu_parents[] = {
&gxbb_fclk_div4.hw,
&gxbb_fclk_div3.hw,
&gxbb_fclk_div5.hw,
@@ -1512,8 +1607,8 @@ static struct clk_regmap gxbb_vpu_0_sel = {
* bits 9:10 selects from 4 possible parents:
* fclk_div4, fclk_div3, fclk_div5, fclk_div7,
*/
- .parent_hws = gxbb_vpu_parent_hws,
- .num_parents = ARRAY_SIZE(gxbb_vpu_parent_hws),
+ .parent_hws = gxbb_vpu_parents,
+ .num_parents = ARRAY_SIZE(gxbb_vpu_parents),
.flags = CLK_SET_RATE_NO_REPARENT,
},
};
@@ -1560,8 +1655,8 @@ static struct clk_regmap gxbb_vpu_1_sel = {
* bits 25:26 selects from 4 possible parents:
* fclk_div4, fclk_div3, fclk_div5, fclk_div7,
*/
- .parent_hws = gxbb_vpu_parent_hws,
- .num_parents = ARRAY_SIZE(gxbb_vpu_parent_hws),
+ .parent_hws = gxbb_vpu_parents,
+ .num_parents = ARRAY_SIZE(gxbb_vpu_parents),
.flags = CLK_SET_RATE_NO_REPARENT,
},
};
@@ -1619,7 +1714,7 @@ static struct clk_regmap gxbb_vpu = {
/* VAPB Clock */
-static const struct clk_hw *gxbb_vapb_parent_hws[] = {
+static const struct clk_hw *gxbb_vapb_parents[] = {
&gxbb_fclk_div4.hw,
&gxbb_fclk_div3.hw,
&gxbb_fclk_div5.hw,
@@ -1639,8 +1734,8 @@ static struct clk_regmap gxbb_vapb_0_sel = {
* bits 9:10 selects from 4 possible parents:
* fclk_div4, fclk_div3, fclk_div5, fclk_div7,
*/
- .parent_hws = gxbb_vapb_parent_hws,
- .num_parents = ARRAY_SIZE(gxbb_vapb_parent_hws),
+ .parent_hws = gxbb_vapb_parents,
+ .num_parents = ARRAY_SIZE(gxbb_vapb_parents),
.flags = CLK_SET_RATE_NO_REPARENT,
},
};
@@ -1691,8 +1786,8 @@ static struct clk_regmap gxbb_vapb_1_sel = {
* bits 25:26 selects from 4 possible parents:
* fclk_div4, fclk_div3, fclk_div5, fclk_div7,
*/
- .parent_hws = gxbb_vapb_parent_hws,
- .num_parents = ARRAY_SIZE(gxbb_vapb_parent_hws),
+ .parent_hws = gxbb_vapb_parents,
+ .num_parents = ARRAY_SIZE(gxbb_vapb_parents),
.flags = CLK_SET_RATE_NO_REPARENT,
},
};
@@ -1800,7 +1895,7 @@ static struct clk_regmap gxbb_vid_pll_div = {
},
};
-static const struct clk_parent_data gxbb_vid_pll_parent_data[] = {
+static const struct clk_parent_data gxbb_vid_pll_parents[] = {
{ .hw = &gxbb_vid_pll_div.hw },
/*
* Note:
@@ -1825,8 +1920,8 @@ static struct clk_regmap gxbb_vid_pll_sel = {
* bit 18 selects from 2 possible parents:
* vid_pll_div or hdmi_pll
*/
- .parent_data = gxbb_vid_pll_parent_data,
- .num_parents = ARRAY_SIZE(gxbb_vid_pll_parent_data),
+ .parent_data = gxbb_vid_pll_parents,
+ .num_parents = ARRAY_SIZE(gxbb_vid_pll_parents),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -1847,7 +1942,7 @@ static struct clk_regmap gxbb_vid_pll = {
},
};
-static const struct clk_hw *gxbb_vclk_parent_hws[] = {
+static const struct clk_hw *gxbb_vclk_parents[] = {
&gxbb_vid_pll.hw,
&gxbb_fclk_div4.hw,
&gxbb_fclk_div3.hw,
@@ -1871,8 +1966,8 @@ static struct clk_regmap gxbb_vclk_sel = {
* vid_pll, fclk_div4, fclk_div3, fclk_div5,
* vid_pll, fclk_div7, mp1
*/
- .parent_hws = gxbb_vclk_parent_hws,
- .num_parents = ARRAY_SIZE(gxbb_vclk_parent_hws),
+ .parent_hws = gxbb_vclk_parents,
+ .num_parents = ARRAY_SIZE(gxbb_vclk_parents),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -1891,8 +1986,8 @@ static struct clk_regmap gxbb_vclk2_sel = {
* vid_pll, fclk_div4, fclk_div3, fclk_div5,
* vid_pll, fclk_div7, mp1
*/
- .parent_hws = gxbb_vclk_parent_hws,
- .num_parents = ARRAY_SIZE(gxbb_vclk_parent_hws),
+ .parent_hws = gxbb_vclk_parents,
+ .num_parents = ARRAY_SIZE(gxbb_vclk_parents),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -2231,8 +2326,8 @@ static struct clk_fixed_factor gxbb_vclk2_div12 = {
},
};
-static u32 mux_table_cts_sel[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 };
-static const struct clk_hw *gxbb_cts_parent_hws[] = {
+static u32 gxbb_cts_parents_val_table[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 };
+static const struct clk_hw *gxbb_cts_parents[] = {
&gxbb_vclk_div1.hw,
&gxbb_vclk_div2.hw,
&gxbb_vclk_div4.hw,
@@ -2250,13 +2345,13 @@ static struct clk_regmap gxbb_cts_enci_sel = {
.offset = HHI_VID_CLK_DIV,
.mask = 0xf,
.shift = 28,
- .table = mux_table_cts_sel,
+ .table = gxbb_cts_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "cts_enci_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = gxbb_cts_parent_hws,
- .num_parents = ARRAY_SIZE(gxbb_cts_parent_hws),
+ .parent_hws = gxbb_cts_parents,
+ .num_parents = ARRAY_SIZE(gxbb_cts_parents),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -2266,13 +2361,13 @@ static struct clk_regmap gxbb_cts_encp_sel = {
.offset = HHI_VID_CLK_DIV,
.mask = 0xf,
.shift = 20,
- .table = mux_table_cts_sel,
+ .table = gxbb_cts_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "cts_encp_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = gxbb_cts_parent_hws,
- .num_parents = ARRAY_SIZE(gxbb_cts_parent_hws),
+ .parent_hws = gxbb_cts_parents,
+ .num_parents = ARRAY_SIZE(gxbb_cts_parents),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -2282,50 +2377,13 @@ static struct clk_regmap gxbb_cts_vdac_sel = {
.offset = HHI_VIID_CLK_DIV,
.mask = 0xf,
.shift = 28,
- .table = mux_table_cts_sel,
+ .table = gxbb_cts_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "cts_vdac_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = gxbb_cts_parent_hws,
- .num_parents = ARRAY_SIZE(gxbb_cts_parent_hws),
- .flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
- },
-};
-
-/* TOFIX: add support for cts_tcon */
-static u32 mux_table_hdmi_tx_sel[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 };
-static const struct clk_hw *gxbb_cts_hdmi_tx_parent_hws[] = {
- &gxbb_vclk_div1.hw,
- &gxbb_vclk_div2.hw,
- &gxbb_vclk_div4.hw,
- &gxbb_vclk_div6.hw,
- &gxbb_vclk_div12.hw,
- &gxbb_vclk2_div1.hw,
- &gxbb_vclk2_div2.hw,
- &gxbb_vclk2_div4.hw,
- &gxbb_vclk2_div6.hw,
- &gxbb_vclk2_div12.hw,
-};
-
-static struct clk_regmap gxbb_hdmi_tx_sel = {
- .data = &(struct clk_regmap_mux_data){
- .offset = HHI_HDMI_CLK_CNTL,
- .mask = 0xf,
- .shift = 16,
- .table = mux_table_hdmi_tx_sel,
- },
- .hw.init = &(struct clk_init_data){
- .name = "hdmi_tx_sel",
- .ops = &clk_regmap_mux_ops,
- /*
- * bits 31:28 selects from 12 possible parents:
- * vclk_div1, vclk_div2, vclk_div4, vclk_div6, vclk_div12
- * vclk2_div1, vclk2_div2, vclk2_div4, vclk2_div6, vclk2_div12,
- * cts_tcon
- */
- .parent_hws = gxbb_cts_hdmi_tx_parent_hws,
- .num_parents = ARRAY_SIZE(gxbb_cts_hdmi_tx_parent_hws),
+ .parent_hws = gxbb_cts_parents,
+ .num_parents = ARRAY_SIZE(gxbb_cts_parents),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -2378,6 +2436,43 @@ static struct clk_regmap gxbb_cts_vdac = {
},
};
+/* TOFIX: add support for cts_tcon */
+static u32 gxbb_hdmi_tx_parents_val_table[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 };
+static const struct clk_hw *gxbb_hdmi_tx_parents[] = {
+ &gxbb_vclk_div1.hw,
+ &gxbb_vclk_div2.hw,
+ &gxbb_vclk_div4.hw,
+ &gxbb_vclk_div6.hw,
+ &gxbb_vclk_div12.hw,
+ &gxbb_vclk2_div1.hw,
+ &gxbb_vclk2_div2.hw,
+ &gxbb_vclk2_div4.hw,
+ &gxbb_vclk2_div6.hw,
+ &gxbb_vclk2_div12.hw,
+};
+
+static struct clk_regmap gxbb_hdmi_tx_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_HDMI_CLK_CNTL,
+ .mask = 0xf,
+ .shift = 16,
+ .table = gxbb_hdmi_tx_parents_val_table,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "hdmi_tx_sel",
+ .ops = &clk_regmap_mux_ops,
+ /*
+ * bits 31:28 selects from 12 possible parents:
+ * vclk_div1, vclk_div2, vclk_div4, vclk_div6, vclk_div12
+ * vclk2_div1, vclk2_div2, vclk2_div4, vclk2_div6, vclk2_div12,
+ * cts_tcon
+ */
+ .parent_hws = gxbb_hdmi_tx_parents,
+ .num_parents = ARRAY_SIZE(gxbb_hdmi_tx_parents),
+ .flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
+ },
+};
+
static struct clk_regmap gxbb_hdmi_tx = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_VID_CLK_CNTL2,
@@ -2396,7 +2491,7 @@ static struct clk_regmap gxbb_hdmi_tx = {
/* HDMI Clocks */
-static const struct clk_parent_data gxbb_hdmi_parent_data[] = {
+static const struct clk_parent_data gxbb_hdmi_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &gxbb_fclk_div4.hw },
{ .hw = &gxbb_fclk_div3.hw },
@@ -2413,8 +2508,8 @@ static struct clk_regmap gxbb_hdmi_sel = {
.hw.init = &(struct clk_init_data){
.name = "hdmi_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = gxbb_hdmi_parent_data,
- .num_parents = ARRAY_SIZE(gxbb_hdmi_parent_data),
+ .parent_data = gxbb_hdmi_parents,
+ .num_parents = ARRAY_SIZE(gxbb_hdmi_parents),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -2450,7 +2545,7 @@ static struct clk_regmap gxbb_hdmi = {
/* VDEC clocks */
-static const struct clk_hw *gxbb_vdec_parent_hws[] = {
+static const struct clk_hw *gxbb_vdec_parents[] = {
&gxbb_fclk_div4.hw,
&gxbb_fclk_div3.hw,
&gxbb_fclk_div5.hw,
@@ -2467,8 +2562,8 @@ static struct clk_regmap gxbb_vdec_1_sel = {
.hw.init = &(struct clk_init_data){
.name = "vdec_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = gxbb_vdec_parent_hws,
- .num_parents = ARRAY_SIZE(gxbb_vdec_parent_hws),
+ .parent_hws = gxbb_vdec_parents,
+ .num_parents = ARRAY_SIZE(gxbb_vdec_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2517,8 +2612,8 @@ static struct clk_regmap gxbb_vdec_hevc_sel = {
.hw.init = &(struct clk_init_data){
.name = "vdec_hevc_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = gxbb_vdec_parent_hws,
- .num_parents = ARRAY_SIZE(gxbb_vdec_parent_hws),
+ .parent_hws = gxbb_vdec_parents,
+ .num_parents = ARRAY_SIZE(gxbb_vdec_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2557,9 +2652,8 @@ static struct clk_regmap gxbb_vdec_hevc = {
},
};
-static u32 mux_table_gen_clk[] = { 0, 4, 5, 6, 7, 8,
- 9, 10, 11, 13, 14, };
-static const struct clk_parent_data gen_clk_parent_data[] = {
+static u32 gxbb_gen_clk_parents_val_table[] = { 0, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, };
+static const struct clk_parent_data gxbb_gen_clk_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &gxbb_vdec_1.hw },
{ .hw = &gxbb_vdec_hevc.hw },
@@ -2578,7 +2672,7 @@ static struct clk_regmap gxbb_gen_clk_sel = {
.offset = HHI_GEN_CLK_CNTL,
.mask = 0xf,
.shift = 12,
- .table = mux_table_gen_clk,
+ .table = gxbb_gen_clk_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "gen_clk_sel",
@@ -2589,8 +2683,8 @@ static struct clk_regmap gxbb_gen_clk_sel = {
* vid_pll, vid2_pll (hevc), mpll0, mpll1, mpll2, fdiv4,
* fdiv3, fdiv5, [cts_msr_clk], fdiv7, gp0_pll
*/
- .parent_data = gen_clk_parent_data,
- .num_parents = ARRAY_SIZE(gen_clk_parent_data),
+ .parent_data = gxbb_gen_clk_parents,
+ .num_parents = ARRAY_SIZE(gxbb_gen_clk_parents),
},
};
@@ -2627,100 +2721,118 @@ static struct clk_regmap gxbb_gen_clk = {
},
};
-#define MESON_GATE(_name, _reg, _bit) \
- MESON_PCLK(_name, _reg, _bit, &gxbb_clk81.hw)
-
-/* Everything Else (EE) domain gates */
-static MESON_GATE(gxbb_ddr, HHI_GCLK_MPEG0, 0);
-static MESON_GATE(gxbb_dos, HHI_GCLK_MPEG0, 1);
-static MESON_GATE(gxbb_isa, HHI_GCLK_MPEG0, 5);
-static MESON_GATE(gxbb_pl301, HHI_GCLK_MPEG0, 6);
-static MESON_GATE(gxbb_periphs, HHI_GCLK_MPEG0, 7);
-static MESON_GATE(gxbb_spicc, HHI_GCLK_MPEG0, 8);
-static MESON_GATE(gxbb_i2c, HHI_GCLK_MPEG0, 9);
-static MESON_GATE(gxbb_sana, HHI_GCLK_MPEG0, 10);
-static MESON_GATE(gxbb_smart_card, HHI_GCLK_MPEG0, 11);
-static MESON_GATE(gxbb_rng0, HHI_GCLK_MPEG0, 12);
-static MESON_GATE(gxbb_uart0, HHI_GCLK_MPEG0, 13);
-static MESON_GATE(gxbb_sdhc, HHI_GCLK_MPEG0, 14);
-static MESON_GATE(gxbb_stream, HHI_GCLK_MPEG0, 15);
-static MESON_GATE(gxbb_async_fifo, HHI_GCLK_MPEG0, 16);
-static MESON_GATE(gxbb_sdio, HHI_GCLK_MPEG0, 17);
-static MESON_GATE(gxbb_abuf, HHI_GCLK_MPEG0, 18);
-static MESON_GATE(gxbb_hiu_iface, HHI_GCLK_MPEG0, 19);
-static MESON_GATE(gxbb_assist_misc, HHI_GCLK_MPEG0, 23);
-static MESON_GATE(gxbb_emmc_a, HHI_GCLK_MPEG0, 24);
-static MESON_GATE(gxbb_emmc_b, HHI_GCLK_MPEG0, 25);
-static MESON_GATE(gxbb_emmc_c, HHI_GCLK_MPEG0, 26);
-static MESON_GATE(gxl_acodec, HHI_GCLK_MPEG0, 28);
-static MESON_GATE(gxbb_spi, HHI_GCLK_MPEG0, 30);
-
-static MESON_GATE(gxbb_i2s_spdif, HHI_GCLK_MPEG1, 2);
-static MESON_GATE(gxbb_eth, HHI_GCLK_MPEG1, 3);
-static MESON_GATE(gxbb_demux, HHI_GCLK_MPEG1, 4);
-static MESON_GATE(gxbb_blkmv, HHI_GCLK_MPEG1, 14);
-static MESON_GATE(gxbb_aiu, HHI_GCLK_MPEG1, 15);
-static MESON_GATE(gxbb_uart1, HHI_GCLK_MPEG1, 16);
-static MESON_GATE(gxbb_g2d, HHI_GCLK_MPEG1, 20);
-static MESON_GATE(gxbb_usb0, HHI_GCLK_MPEG1, 21);
-static MESON_GATE(gxbb_usb1, HHI_GCLK_MPEG1, 22);
-static MESON_GATE(gxbb_reset, HHI_GCLK_MPEG1, 23);
-static MESON_GATE(gxbb_nand, HHI_GCLK_MPEG1, 24);
-static MESON_GATE(gxbb_dos_parser, HHI_GCLK_MPEG1, 25);
-static MESON_GATE(gxbb_usb, HHI_GCLK_MPEG1, 26);
-static MESON_GATE(gxbb_vdin1, HHI_GCLK_MPEG1, 28);
-static MESON_GATE(gxbb_ahb_arb0, HHI_GCLK_MPEG1, 29);
-static MESON_GATE(gxbb_efuse, HHI_GCLK_MPEG1, 30);
-static MESON_GATE(gxbb_boot_rom, HHI_GCLK_MPEG1, 31);
-
-static MESON_GATE(gxbb_ahb_data_bus, HHI_GCLK_MPEG2, 1);
-static MESON_GATE(gxbb_ahb_ctrl_bus, HHI_GCLK_MPEG2, 2);
-static MESON_GATE(gxbb_hdmi_intr_sync, HHI_GCLK_MPEG2, 3);
-static MESON_GATE(gxbb_hdmi_pclk, HHI_GCLK_MPEG2, 4);
-static MESON_GATE(gxbb_usb1_ddr_bridge, HHI_GCLK_MPEG2, 8);
-static MESON_GATE(gxbb_usb0_ddr_bridge, HHI_GCLK_MPEG2, 9);
-static MESON_GATE(gxbb_mmc_pclk, HHI_GCLK_MPEG2, 11);
-static MESON_GATE(gxbb_dvin, HHI_GCLK_MPEG2, 12);
-static MESON_GATE(gxbb_uart2, HHI_GCLK_MPEG2, 15);
-static MESON_GATE(gxbb_sar_adc, HHI_GCLK_MPEG2, 22);
-static MESON_GATE(gxbb_vpu_intr, HHI_GCLK_MPEG2, 25);
-static MESON_GATE(gxbb_sec_ahb_ahb3_bridge, HHI_GCLK_MPEG2, 26);
-static MESON_GATE(gxbb_clk81_a53, HHI_GCLK_MPEG2, 29);
-
-static MESON_GATE(gxbb_vclk2_venci0, HHI_GCLK_OTHER, 1);
-static MESON_GATE(gxbb_vclk2_venci1, HHI_GCLK_OTHER, 2);
-static MESON_GATE(gxbb_vclk2_vencp0, HHI_GCLK_OTHER, 3);
-static MESON_GATE(gxbb_vclk2_vencp1, HHI_GCLK_OTHER, 4);
-static MESON_GATE(gxbb_gclk_venci_int0, HHI_GCLK_OTHER, 8);
-static MESON_GATE(gxbb_gclk_vencp_int, HHI_GCLK_OTHER, 9);
-static MESON_GATE(gxbb_dac_clk, HHI_GCLK_OTHER, 10);
-static MESON_GATE(gxbb_aoclk_gate, HHI_GCLK_OTHER, 14);
-static MESON_GATE(gxbb_iec958_gate, HHI_GCLK_OTHER, 16);
-static MESON_GATE(gxbb_enc480p, HHI_GCLK_OTHER, 20);
-static MESON_GATE(gxbb_rng1, HHI_GCLK_OTHER, 21);
-static MESON_GATE(gxbb_gclk_venci_int1, HHI_GCLK_OTHER, 22);
-static MESON_GATE(gxbb_vclk2_venclmcc, HHI_GCLK_OTHER, 24);
-static MESON_GATE(gxbb_vclk2_vencl, HHI_GCLK_OTHER, 25);
-static MESON_GATE(gxbb_vclk_other, HHI_GCLK_OTHER, 26);
-static MESON_GATE(gxbb_edp, HHI_GCLK_OTHER, 31);
+static const struct clk_parent_data gxbb_pclk_parents = { .hw = &gxbb_clk81.hw };
+
+#define GXBB_PCLK(_name, _reg, _bit, _flags) \
+ MESON_PCLK(_name, _reg, _bit, &gxbb_pclk_parents, _flags)
+
+/*
+ * Everything Else (EE) domain gates
+ *
+ * NOTE: The gates below are marked with CLK_IGNORE_UNUSED for historic reasons
+ * Users are encouraged to test without it and submit changes to:
+ * - remove the flag if not necessary
+ * - replace the flag with something more adequate, such as CLK_IS_CRITICAL,
+ * if appropriate.
+ * - add a comment explaining why the use of CLK_IGNORE_UNUSED is desirable
+ * for a particular clock.
+ */
+static GXBB_PCLK(gxbb_ddr, HHI_GCLK_MPEG0, 0, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_dos, HHI_GCLK_MPEG0, 1, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_isa, HHI_GCLK_MPEG0, 5, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_pl301, HHI_GCLK_MPEG0, 6, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_periphs, HHI_GCLK_MPEG0, 7, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_spicc, HHI_GCLK_MPEG0, 8, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_i2c, HHI_GCLK_MPEG0, 9, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_sana, HHI_GCLK_MPEG0, 10, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_smart_card, HHI_GCLK_MPEG0, 11, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_rng0, HHI_GCLK_MPEG0, 12, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_uart0, HHI_GCLK_MPEG0, 13, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_sdhc, HHI_GCLK_MPEG0, 14, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_stream, HHI_GCLK_MPEG0, 15, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_async_fifo, HHI_GCLK_MPEG0, 16, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_sdio, HHI_GCLK_MPEG0, 17, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_abuf, HHI_GCLK_MPEG0, 18, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_hiu_iface, HHI_GCLK_MPEG0, 19, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_assist_misc, HHI_GCLK_MPEG0, 23, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_emmc_a, HHI_GCLK_MPEG0, 24, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_emmc_b, HHI_GCLK_MPEG0, 25, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_emmc_c, HHI_GCLK_MPEG0, 26, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxl_acodec, HHI_GCLK_MPEG0, 28, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_spi, HHI_GCLK_MPEG0, 30, CLK_IGNORE_UNUSED);
+
+static GXBB_PCLK(gxbb_i2s_spdif, HHI_GCLK_MPEG1, 2, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_eth, HHI_GCLK_MPEG1, 3, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_demux, HHI_GCLK_MPEG1, 4, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_blkmv, HHI_GCLK_MPEG1, 14, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_aiu, HHI_GCLK_MPEG1, 15, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_uart1, HHI_GCLK_MPEG1, 16, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_g2d, HHI_GCLK_MPEG1, 20, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_usb0, HHI_GCLK_MPEG1, 21, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_usb1, HHI_GCLK_MPEG1, 22, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_reset, HHI_GCLK_MPEG1, 23, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_nand, HHI_GCLK_MPEG1, 24, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_dos_parser, HHI_GCLK_MPEG1, 25, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_usb, HHI_GCLK_MPEG1, 26, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_vdin1, HHI_GCLK_MPEG1, 28, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_ahb_arb0, HHI_GCLK_MPEG1, 29, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_efuse, HHI_GCLK_MPEG1, 30, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_boot_rom, HHI_GCLK_MPEG1, 31, CLK_IGNORE_UNUSED);
+
+static GXBB_PCLK(gxbb_ahb_data_bus, HHI_GCLK_MPEG2, 1, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_ahb_ctrl_bus, HHI_GCLK_MPEG2, 2, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_hdmi_intr_sync, HHI_GCLK_MPEG2, 3, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_hdmi_pclk, HHI_GCLK_MPEG2, 4, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_usb1_ddr_bridge, HHI_GCLK_MPEG2, 8, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_usb0_ddr_bridge, HHI_GCLK_MPEG2, 9, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_mmc_pclk, HHI_GCLK_MPEG2, 11, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_dvin, HHI_GCLK_MPEG2, 12, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_uart2, HHI_GCLK_MPEG2, 15, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_sar_adc, HHI_GCLK_MPEG2, 22, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_vpu_intr, HHI_GCLK_MPEG2, 25, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_sec_ahb_ahb3_bridge, HHI_GCLK_MPEG2, 26, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_clk81_a53, HHI_GCLK_MPEG2, 29, CLK_IGNORE_UNUSED);
+
+static GXBB_PCLK(gxbb_vclk2_venci0, HHI_GCLK_OTHER, 1, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_vclk2_venci1, HHI_GCLK_OTHER, 2, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_vclk2_vencp0, HHI_GCLK_OTHER, 3, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_vclk2_vencp1, HHI_GCLK_OTHER, 4, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_gclk_venci_int0, HHI_GCLK_OTHER, 8, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_gclk_vencp_int, HHI_GCLK_OTHER, 9, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_dac_clk, HHI_GCLK_OTHER, 10, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_aoclk_gate, HHI_GCLK_OTHER, 14, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_iec958_gate, HHI_GCLK_OTHER, 16, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_enc480p, HHI_GCLK_OTHER, 20, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_rng1, HHI_GCLK_OTHER, 21, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_gclk_venci_int1, HHI_GCLK_OTHER, 22, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_vclk2_venclmcc, HHI_GCLK_OTHER, 24, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_vclk2_vencl, HHI_GCLK_OTHER, 25, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_vclk_other, HHI_GCLK_OTHER, 26, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_edp, HHI_GCLK_OTHER, 31, CLK_IGNORE_UNUSED);
/* Always On (AO) domain gates */
-static MESON_GATE(gxbb_ao_media_cpu, HHI_GCLK_AO, 0);
-static MESON_GATE(gxbb_ao_ahb_sram, HHI_GCLK_AO, 1);
-static MESON_GATE(gxbb_ao_ahb_bus, HHI_GCLK_AO, 2);
-static MESON_GATE(gxbb_ao_iface, HHI_GCLK_AO, 3);
-static MESON_GATE(gxbb_ao_i2c, HHI_GCLK_AO, 4);
+static GXBB_PCLK(gxbb_ao_media_cpu, HHI_GCLK_AO, 0, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_ao_ahb_sram, HHI_GCLK_AO, 1, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_ao_ahb_bus, HHI_GCLK_AO, 2, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_ao_iface, HHI_GCLK_AO, 3, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_ao_i2c, HHI_GCLK_AO, 4, CLK_IGNORE_UNUSED);
/* AIU gates */
-static MESON_PCLK(gxbb_aiu_glue, HHI_GCLK_MPEG1, 6, &gxbb_aiu.hw);
-static MESON_PCLK(gxbb_iec958, HHI_GCLK_MPEG1, 7, &gxbb_aiu_glue.hw);
-static MESON_PCLK(gxbb_i2s_out, HHI_GCLK_MPEG1, 8, &gxbb_aiu_glue.hw);
-static MESON_PCLK(gxbb_amclk, HHI_GCLK_MPEG1, 9, &gxbb_aiu_glue.hw);
-static MESON_PCLK(gxbb_aififo2, HHI_GCLK_MPEG1, 10, &gxbb_aiu_glue.hw);
-static MESON_PCLK(gxbb_mixer, HHI_GCLK_MPEG1, 11, &gxbb_aiu_glue.hw);
-static MESON_PCLK(gxbb_mixer_iface, HHI_GCLK_MPEG1, 12, &gxbb_aiu_glue.hw);
-static MESON_PCLK(gxbb_adc, HHI_GCLK_MPEG1, 13, &gxbb_aiu_glue.hw);
+static const struct clk_parent_data gxbb_aiu_glue_parents = { .hw = &gxbb_aiu.hw };
+static MESON_PCLK(gxbb_aiu_glue, HHI_GCLK_MPEG1, 6, &gxbb_aiu_glue_parents, CLK_IGNORE_UNUSED);
+
+static const struct clk_parent_data gxbb_aiu_pclk_parents = { .hw = &gxbb_aiu_glue.hw };
+#define GXBB_AIU_PCLK(_name, _bit, _flags) \
+ MESON_PCLK(_name, HHI_GCLK_MPEG1, _bit, &gxbb_aiu_pclk_parents, _flags)
+
+static GXBB_AIU_PCLK(gxbb_iec958, 7, CLK_IGNORE_UNUSED);
+static GXBB_AIU_PCLK(gxbb_i2s_out, 8, CLK_IGNORE_UNUSED);
+static GXBB_AIU_PCLK(gxbb_amclk, 9, CLK_IGNORE_UNUSED);
+static GXBB_AIU_PCLK(gxbb_aififo2, 10, CLK_IGNORE_UNUSED);
+static GXBB_AIU_PCLK(gxbb_mixer, 11, CLK_IGNORE_UNUSED);
+static GXBB_AIU_PCLK(gxbb_mixer_iface, 12, CLK_IGNORE_UNUSED);
+static GXBB_AIU_PCLK(gxbb_adc, 13, CLK_IGNORE_UNUSED);
/* Array of all clocks provided by this provider */
@@ -2734,8 +2846,8 @@ static struct clk_hw *gxbb_hw_clks[] = {
[CLKID_FCLK_DIV5] = &gxbb_fclk_div5.hw,
[CLKID_FCLK_DIV7] = &gxbb_fclk_div7.hw,
[CLKID_GP0_PLL] = &gxbb_gp0_pll.hw,
- [CLKID_MPEG_SEL] = &gxbb_mpeg_clk_sel.hw,
- [CLKID_MPEG_DIV] = &gxbb_mpeg_clk_div.hw,
+ [CLKID_MPEG_SEL] = &gxbb_clk81_sel.hw,
+ [CLKID_MPEG_DIV] = &gxbb_clk81_div.hw,
[CLKID_CLK81] = &gxbb_clk81.hw,
[CLKID_MPLL0] = &gxbb_mpll0.hw,
[CLKID_MPLL1] = &gxbb_mpll1.hw,
@@ -2942,8 +3054,8 @@ static struct clk_hw *gxl_hw_clks[] = {
[CLKID_FCLK_DIV5] = &gxbb_fclk_div5.hw,
[CLKID_FCLK_DIV7] = &gxbb_fclk_div7.hw,
[CLKID_GP0_PLL] = &gxbb_gp0_pll.hw,
- [CLKID_MPEG_SEL] = &gxbb_mpeg_clk_sel.hw,
- [CLKID_MPEG_DIV] = &gxbb_mpeg_clk_div.hw,
+ [CLKID_MPEG_SEL] = &gxbb_clk81_sel.hw,
+ [CLKID_MPEG_DIV] = &gxbb_clk81_div.hw,
[CLKID_CLK81] = &gxbb_clk81.hw,
[CLKID_MPLL0] = &gxbb_mpll0.hw,
[CLKID_MPLL1] = &gxbb_mpll1.hw,
@@ -3140,428 +3252,35 @@ static struct clk_hw *gxl_hw_clks[] = {
[CLKID_ACODEC] = &gxl_acodec.hw,
};
-static struct clk_regmap *const gxbb_clk_regmaps[] = {
- &gxbb_clk81,
- &gxbb_ddr,
- &gxbb_dos,
- &gxbb_isa,
- &gxbb_pl301,
- &gxbb_periphs,
- &gxbb_spicc,
- &gxbb_i2c,
- &gxbb_sar_adc,
- &gxbb_smart_card,
- &gxbb_rng0,
- &gxbb_uart0,
- &gxbb_sdhc,
- &gxbb_stream,
- &gxbb_async_fifo,
- &gxbb_sdio,
- &gxbb_abuf,
- &gxbb_hiu_iface,
- &gxbb_assist_misc,
- &gxbb_spi,
- &gxbb_i2s_spdif,
- &gxbb_eth,
- &gxbb_demux,
- &gxbb_aiu_glue,
- &gxbb_iec958,
- &gxbb_i2s_out,
- &gxbb_amclk,
- &gxbb_aififo2,
- &gxbb_mixer,
- &gxbb_mixer_iface,
- &gxbb_adc,
- &gxbb_blkmv,
- &gxbb_aiu,
- &gxbb_uart1,
- &gxbb_g2d,
- &gxbb_usb0,
- &gxbb_usb1,
- &gxbb_reset,
- &gxbb_nand,
- &gxbb_dos_parser,
- &gxbb_usb,
- &gxbb_vdin1,
- &gxbb_ahb_arb0,
- &gxbb_efuse,
- &gxbb_boot_rom,
- &gxbb_ahb_data_bus,
- &gxbb_ahb_ctrl_bus,
- &gxbb_hdmi_intr_sync,
- &gxbb_hdmi_pclk,
- &gxbb_usb1_ddr_bridge,
- &gxbb_usb0_ddr_bridge,
- &gxbb_mmc_pclk,
- &gxbb_dvin,
- &gxbb_uart2,
- &gxbb_sana,
- &gxbb_vpu_intr,
- &gxbb_sec_ahb_ahb3_bridge,
- &gxbb_clk81_a53,
- &gxbb_vclk2_venci0,
- &gxbb_vclk2_venci1,
- &gxbb_vclk2_vencp0,
- &gxbb_vclk2_vencp1,
- &gxbb_gclk_venci_int0,
- &gxbb_gclk_vencp_int,
- &gxbb_dac_clk,
- &gxbb_aoclk_gate,
- &gxbb_iec958_gate,
- &gxbb_enc480p,
- &gxbb_rng1,
- &gxbb_gclk_venci_int1,
- &gxbb_vclk2_venclmcc,
- &gxbb_vclk2_vencl,
- &gxbb_vclk_other,
- &gxbb_edp,
- &gxbb_ao_media_cpu,
- &gxbb_ao_ahb_sram,
- &gxbb_ao_ahb_bus,
- &gxbb_ao_iface,
- &gxbb_ao_i2c,
- &gxbb_emmc_a,
- &gxbb_emmc_b,
- &gxbb_emmc_c,
- &gxbb_sar_adc_clk,
- &gxbb_mali_0,
- &gxbb_mali_1,
- &gxbb_cts_amclk,
- &gxbb_cts_mclk_i958,
- &gxbb_32k_clk,
- &gxbb_sd_emmc_a_clk0,
- &gxbb_sd_emmc_b_clk0,
- &gxbb_sd_emmc_c_clk0,
- &gxbb_vpu_0,
- &gxbb_vpu_1,
- &gxbb_vapb_0,
- &gxbb_vapb_1,
- &gxbb_vapb,
- &gxbb_mpeg_clk_div,
- &gxbb_sar_adc_clk_div,
- &gxbb_mali_0_div,
- &gxbb_mali_1_div,
- &gxbb_cts_mclk_i958_div,
- &gxbb_32k_clk_div,
- &gxbb_sd_emmc_a_clk0_div,
- &gxbb_sd_emmc_b_clk0_div,
- &gxbb_sd_emmc_c_clk0_div,
- &gxbb_vpu_0_div,
- &gxbb_vpu_1_div,
- &gxbb_vapb_0_div,
- &gxbb_vapb_1_div,
- &gxbb_mpeg_clk_sel,
- &gxbb_sar_adc_clk_sel,
- &gxbb_mali_0_sel,
- &gxbb_mali_1_sel,
- &gxbb_mali,
- &gxbb_cts_amclk_sel,
- &gxbb_cts_mclk_i958_sel,
- &gxbb_cts_i958,
- &gxbb_32k_clk_sel,
- &gxbb_sd_emmc_a_clk0_sel,
- &gxbb_sd_emmc_b_clk0_sel,
- &gxbb_sd_emmc_c_clk0_sel,
- &gxbb_vpu_0_sel,
- &gxbb_vpu_1_sel,
- &gxbb_vpu,
- &gxbb_vapb_0_sel,
- &gxbb_vapb_1_sel,
- &gxbb_vapb_sel,
- &gxbb_mpll0,
- &gxbb_mpll1,
- &gxbb_mpll2,
- &gxbb_mpll0_div,
- &gxbb_mpll1_div,
- &gxbb_mpll2_div,
- &gxbb_cts_amclk_div,
- &gxbb_fixed_pll,
- &gxbb_sys_pll,
- &gxbb_mpll_prediv,
- &gxbb_fclk_div2,
- &gxbb_fclk_div3,
- &gxbb_fclk_div4,
- &gxbb_fclk_div5,
- &gxbb_fclk_div7,
- &gxbb_vdec_1_sel,
- &gxbb_vdec_1_div,
- &gxbb_vdec_1,
- &gxbb_vdec_hevc_sel,
- &gxbb_vdec_hevc_div,
- &gxbb_vdec_hevc,
- &gxbb_gen_clk_sel,
- &gxbb_gen_clk_div,
- &gxbb_gen_clk,
- &gxbb_fixed_pll_dco,
- &gxbb_sys_pll_dco,
- &gxbb_gp0_pll,
- &gxbb_vid_pll,
- &gxbb_vid_pll_sel,
- &gxbb_vid_pll_div,
- &gxbb_vclk,
- &gxbb_vclk_sel,
- &gxbb_vclk_div,
- &gxbb_vclk_input,
- &gxbb_vclk_div1,
- &gxbb_vclk_div2_en,
- &gxbb_vclk_div4_en,
- &gxbb_vclk_div6_en,
- &gxbb_vclk_div12_en,
- &gxbb_vclk2,
- &gxbb_vclk2_sel,
- &gxbb_vclk2_div,
- &gxbb_vclk2_input,
- &gxbb_vclk2_div1,
- &gxbb_vclk2_div2_en,
- &gxbb_vclk2_div4_en,
- &gxbb_vclk2_div6_en,
- &gxbb_vclk2_div12_en,
- &gxbb_cts_enci,
- &gxbb_cts_enci_sel,
- &gxbb_cts_encp,
- &gxbb_cts_encp_sel,
- &gxbb_cts_vdac,
- &gxbb_cts_vdac_sel,
- &gxbb_hdmi_tx,
- &gxbb_hdmi_tx_sel,
- &gxbb_hdmi_sel,
- &gxbb_hdmi_div,
- &gxbb_hdmi,
- &gxbb_gp0_pll_dco,
- &gxbb_hdmi_pll,
- &gxbb_hdmi_pll_od,
- &gxbb_hdmi_pll_od2,
- &gxbb_hdmi_pll_dco,
-};
-
-static struct clk_regmap *const gxl_clk_regmaps[] = {
- &gxbb_clk81,
- &gxbb_ddr,
- &gxbb_dos,
- &gxbb_isa,
- &gxbb_pl301,
- &gxbb_periphs,
- &gxbb_spicc,
- &gxbb_i2c,
- &gxbb_sar_adc,
- &gxbb_smart_card,
- &gxbb_rng0,
- &gxbb_uart0,
- &gxbb_sdhc,
- &gxbb_stream,
- &gxbb_async_fifo,
- &gxbb_sdio,
- &gxbb_abuf,
- &gxbb_hiu_iface,
- &gxbb_assist_misc,
- &gxbb_spi,
- &gxbb_i2s_spdif,
- &gxbb_eth,
- &gxbb_demux,
- &gxbb_aiu_glue,
- &gxbb_iec958,
- &gxbb_i2s_out,
- &gxbb_amclk,
- &gxbb_aififo2,
- &gxbb_mixer,
- &gxbb_mixer_iface,
- &gxbb_adc,
- &gxbb_blkmv,
- &gxbb_aiu,
- &gxbb_uart1,
- &gxbb_g2d,
- &gxbb_usb0,
- &gxbb_usb1,
- &gxbb_reset,
- &gxbb_nand,
- &gxbb_dos_parser,
- &gxbb_usb,
- &gxbb_vdin1,
- &gxbb_ahb_arb0,
- &gxbb_efuse,
- &gxbb_boot_rom,
- &gxbb_ahb_data_bus,
- &gxbb_ahb_ctrl_bus,
- &gxbb_hdmi_intr_sync,
- &gxbb_hdmi_pclk,
- &gxbb_usb1_ddr_bridge,
- &gxbb_usb0_ddr_bridge,
- &gxbb_mmc_pclk,
- &gxbb_dvin,
- &gxbb_uart2,
- &gxbb_sana,
- &gxbb_vpu_intr,
- &gxbb_sec_ahb_ahb3_bridge,
- &gxbb_clk81_a53,
- &gxbb_vclk2_venci0,
- &gxbb_vclk2_venci1,
- &gxbb_vclk2_vencp0,
- &gxbb_vclk2_vencp1,
- &gxbb_gclk_venci_int0,
- &gxbb_gclk_vencp_int,
- &gxbb_dac_clk,
- &gxbb_aoclk_gate,
- &gxbb_iec958_gate,
- &gxbb_enc480p,
- &gxbb_rng1,
- &gxbb_gclk_venci_int1,
- &gxbb_vclk2_venclmcc,
- &gxbb_vclk2_vencl,
- &gxbb_vclk_other,
- &gxbb_edp,
- &gxbb_ao_media_cpu,
- &gxbb_ao_ahb_sram,
- &gxbb_ao_ahb_bus,
- &gxbb_ao_iface,
- &gxbb_ao_i2c,
- &gxbb_emmc_a,
- &gxbb_emmc_b,
- &gxbb_emmc_c,
- &gxbb_sar_adc_clk,
- &gxbb_mali_0,
- &gxbb_mali_1,
- &gxbb_cts_amclk,
- &gxbb_cts_mclk_i958,
- &gxbb_32k_clk,
- &gxbb_sd_emmc_a_clk0,
- &gxbb_sd_emmc_b_clk0,
- &gxbb_sd_emmc_c_clk0,
- &gxbb_vpu_0,
- &gxbb_vpu_1,
- &gxbb_vapb_0,
- &gxbb_vapb_1,
- &gxbb_vapb,
- &gxbb_mpeg_clk_div,
- &gxbb_sar_adc_clk_div,
- &gxbb_mali_0_div,
- &gxbb_mali_1_div,
- &gxbb_cts_mclk_i958_div,
- &gxbb_32k_clk_div,
- &gxbb_sd_emmc_a_clk0_div,
- &gxbb_sd_emmc_b_clk0_div,
- &gxbb_sd_emmc_c_clk0_div,
- &gxbb_vpu_0_div,
- &gxbb_vpu_1_div,
- &gxbb_vapb_0_div,
- &gxbb_vapb_1_div,
- &gxbb_mpeg_clk_sel,
- &gxbb_sar_adc_clk_sel,
- &gxbb_mali_0_sel,
- &gxbb_mali_1_sel,
- &gxbb_mali,
- &gxbb_cts_amclk_sel,
- &gxbb_cts_mclk_i958_sel,
- &gxbb_cts_i958,
- &gxbb_32k_clk_sel,
- &gxbb_sd_emmc_a_clk0_sel,
- &gxbb_sd_emmc_b_clk0_sel,
- &gxbb_sd_emmc_c_clk0_sel,
- &gxbb_vpu_0_sel,
- &gxbb_vpu_1_sel,
- &gxbb_vpu,
- &gxbb_vapb_0_sel,
- &gxbb_vapb_1_sel,
- &gxbb_vapb_sel,
- &gxbb_mpll0,
- &gxbb_mpll1,
- &gxbb_mpll2,
- &gxl_mpll0_div,
- &gxbb_mpll1_div,
- &gxbb_mpll2_div,
- &gxbb_cts_amclk_div,
- &gxbb_fixed_pll,
- &gxbb_sys_pll,
- &gxbb_mpll_prediv,
- &gxbb_fclk_div2,
- &gxbb_fclk_div3,
- &gxbb_fclk_div4,
- &gxbb_fclk_div5,
- &gxbb_fclk_div7,
- &gxbb_vdec_1_sel,
- &gxbb_vdec_1_div,
- &gxbb_vdec_1,
- &gxbb_vdec_hevc_sel,
- &gxbb_vdec_hevc_div,
- &gxbb_vdec_hevc,
- &gxbb_gen_clk_sel,
- &gxbb_gen_clk_div,
- &gxbb_gen_clk,
- &gxbb_fixed_pll_dco,
- &gxbb_sys_pll_dco,
- &gxbb_gp0_pll,
- &gxbb_vid_pll,
- &gxbb_vid_pll_sel,
- &gxbb_vid_pll_div,
- &gxbb_vclk,
- &gxbb_vclk_sel,
- &gxbb_vclk_div,
- &gxbb_vclk_input,
- &gxbb_vclk_div1,
- &gxbb_vclk_div2_en,
- &gxbb_vclk_div4_en,
- &gxbb_vclk_div6_en,
- &gxbb_vclk_div12_en,
- &gxbb_vclk2,
- &gxbb_vclk2_sel,
- &gxbb_vclk2_div,
- &gxbb_vclk2_input,
- &gxbb_vclk2_div1,
- &gxbb_vclk2_div2_en,
- &gxbb_vclk2_div4_en,
- &gxbb_vclk2_div6_en,
- &gxbb_vclk2_div12_en,
- &gxbb_cts_enci,
- &gxbb_cts_enci_sel,
- &gxbb_cts_encp,
- &gxbb_cts_encp_sel,
- &gxbb_cts_vdac,
- &gxbb_cts_vdac_sel,
- &gxbb_hdmi_tx,
- &gxbb_hdmi_tx_sel,
- &gxbb_hdmi_sel,
- &gxbb_hdmi_div,
- &gxbb_hdmi,
- &gxl_gp0_pll_dco,
- &gxl_hdmi_pll,
- &gxl_hdmi_pll_od,
- &gxl_hdmi_pll_od2,
- &gxl_hdmi_pll_dco,
- &gxl_acodec,
-};
-
-static const struct meson_eeclkc_data gxbb_clkc_data = {
- .regmap_clks = gxbb_clk_regmaps,
- .regmap_clk_num = ARRAY_SIZE(gxbb_clk_regmaps),
+static const struct meson_clkc_data gxbb_clkc_data = {
.hw_clks = {
.hws = gxbb_hw_clks,
.num = ARRAY_SIZE(gxbb_hw_clks),
},
};
-static const struct meson_eeclkc_data gxl_clkc_data = {
- .regmap_clks = gxl_clk_regmaps,
- .regmap_clk_num = ARRAY_SIZE(gxl_clk_regmaps),
+static const struct meson_clkc_data gxl_clkc_data = {
.hw_clks = {
.hws = gxl_hw_clks,
.num = ARRAY_SIZE(gxl_hw_clks),
},
};
-static const struct of_device_id clkc_match_table[] = {
+static const struct of_device_id gxbb_clkc_match_table[] = {
{ .compatible = "amlogic,gxbb-clkc", .data = &gxbb_clkc_data },
{ .compatible = "amlogic,gxl-clkc", .data = &gxl_clkc_data },
{},
};
-MODULE_DEVICE_TABLE(of, clkc_match_table);
+MODULE_DEVICE_TABLE(of, gxbb_clkc_match_table);
-static struct platform_driver gxbb_driver = {
- .probe = meson_eeclkc_probe,
+static struct platform_driver gxbb_clkc_driver = {
+ .probe = meson_clkc_syscon_probe,
.driver = {
.name = "gxbb-clkc",
- .of_match_table = clkc_match_table,
+ .of_match_table = gxbb_clkc_match_table,
},
};
-module_platform_driver(gxbb_driver);
+module_platform_driver(gxbb_clkc_driver);
MODULE_DESCRIPTION("Amlogic GXBB Main Clock Controller driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/meson/gxbb.h b/drivers/clk/meson/gxbb.h
deleted file mode 100644
index ba5f39a8d746..000000000000
--- a/drivers/clk/meson/gxbb.h
+++ /dev/null
@@ -1,115 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
-/*
- * Copyright (c) 2016 AmLogic, Inc.
- * Author: Michael Turquette <mturquette@baylibre.com>
- */
-
-#ifndef __GXBB_H
-#define __GXBB_H
-
-/*
- * Clock controller register offsets
- *
- * Register offsets from the data sheet are listed in comment blocks below.
- * Those offsets must be multiplied by 4 before adding them to the base address
- * to get the right value
- */
-#define SCR 0x2C /* 0x0b offset in data sheet */
-#define TIMEOUT_VALUE 0x3c /* 0x0f offset in data sheet */
-
-#define HHI_GP0_PLL_CNTL 0x40 /* 0x10 offset in data sheet */
-#define HHI_GP0_PLL_CNTL2 0x44 /* 0x11 offset in data sheet */
-#define HHI_GP0_PLL_CNTL3 0x48 /* 0x12 offset in data sheet */
-#define HHI_GP0_PLL_CNTL4 0x4c /* 0x13 offset in data sheet */
-#define HHI_GP0_PLL_CNTL5 0x50 /* 0x14 offset in data sheet */
-#define HHI_GP0_PLL_CNTL1 0x58 /* 0x16 offset in data sheet */
-
-#define HHI_XTAL_DIVN_CNTL 0xbc /* 0x2f offset in data sheet */
-#define HHI_TIMER90K 0xec /* 0x3b offset in data sheet */
-
-#define HHI_MEM_PD_REG0 0x100 /* 0x40 offset in data sheet */
-#define HHI_MEM_PD_REG1 0x104 /* 0x41 offset in data sheet */
-#define HHI_VPU_MEM_PD_REG1 0x108 /* 0x42 offset in data sheet */
-#define HHI_VIID_CLK_DIV 0x128 /* 0x4a offset in data sheet */
-#define HHI_VIID_CLK_CNTL 0x12c /* 0x4b offset in data sheet */
-
-#define HHI_GCLK_MPEG0 0x140 /* 0x50 offset in data sheet */
-#define HHI_GCLK_MPEG1 0x144 /* 0x51 offset in data sheet */
-#define HHI_GCLK_MPEG2 0x148 /* 0x52 offset in data sheet */
-#define HHI_GCLK_OTHER 0x150 /* 0x54 offset in data sheet */
-#define HHI_GCLK_AO 0x154 /* 0x55 offset in data sheet */
-#define HHI_SYS_OSCIN_CNTL 0x158 /* 0x56 offset in data sheet */
-#define HHI_SYS_CPU_CLK_CNTL1 0x15c /* 0x57 offset in data sheet */
-#define HHI_SYS_CPU_RESET_CNTL 0x160 /* 0x58 offset in data sheet */
-#define HHI_VID_CLK_DIV 0x164 /* 0x59 offset in data sheet */
-
-#define HHI_MPEG_CLK_CNTL 0x174 /* 0x5d offset in data sheet */
-#define HHI_AUD_CLK_CNTL 0x178 /* 0x5e offset in data sheet */
-#define HHI_VID_CLK_CNTL 0x17c /* 0x5f offset in data sheet */
-#define HHI_AUD_CLK_CNTL2 0x190 /* 0x64 offset in data sheet */
-#define HHI_VID_CLK_CNTL2 0x194 /* 0x65 offset in data sheet */
-#define HHI_SYS_CPU_CLK_CNTL0 0x19c /* 0x67 offset in data sheet */
-#define HHI_VID_PLL_CLK_DIV 0x1a0 /* 0x68 offset in data sheet */
-#define HHI_AUD_CLK_CNTL3 0x1a4 /* 0x69 offset in data sheet */
-#define HHI_MALI_CLK_CNTL 0x1b0 /* 0x6c offset in data sheet */
-#define HHI_VPU_CLK_CNTL 0x1bC /* 0x6f offset in data sheet */
-
-#define HHI_HDMI_CLK_CNTL 0x1CC /* 0x73 offset in data sheet */
-#define HHI_VDEC_CLK_CNTL 0x1E0 /* 0x78 offset in data sheet */
-#define HHI_VDEC2_CLK_CNTL 0x1E4 /* 0x79 offset in data sheet */
-#define HHI_VDEC3_CLK_CNTL 0x1E8 /* 0x7a offset in data sheet */
-#define HHI_VDEC4_CLK_CNTL 0x1EC /* 0x7b offset in data sheet */
-#define HHI_HDCP22_CLK_CNTL 0x1F0 /* 0x7c offset in data sheet */
-#define HHI_VAPBCLK_CNTL 0x1F4 /* 0x7d offset in data sheet */
-
-#define HHI_VPU_CLKB_CNTL 0x20C /* 0x83 offset in data sheet */
-#define HHI_USB_CLK_CNTL 0x220 /* 0x88 offset in data sheet */
-#define HHI_32K_CLK_CNTL 0x224 /* 0x89 offset in data sheet */
-#define HHI_GEN_CLK_CNTL 0x228 /* 0x8a offset in data sheet */
-
-#define HHI_PCM_CLK_CNTL 0x258 /* 0x96 offset in data sheet */
-#define HHI_NAND_CLK_CNTL 0x25C /* 0x97 offset in data sheet */
-#define HHI_SD_EMMC_CLK_CNTL 0x264 /* 0x99 offset in data sheet */
-
-#define HHI_MPLL_CNTL 0x280 /* 0xa0 offset in data sheet */
-#define HHI_MPLL_CNTL2 0x284 /* 0xa1 offset in data sheet */
-#define HHI_MPLL_CNTL3 0x288 /* 0xa2 offset in data sheet */
-#define HHI_MPLL_CNTL4 0x28C /* 0xa3 offset in data sheet */
-#define HHI_MPLL_CNTL5 0x290 /* 0xa4 offset in data sheet */
-#define HHI_MPLL_CNTL6 0x294 /* 0xa5 offset in data sheet */
-#define HHI_MPLL_CNTL7 0x298 /* MP0, 0xa6 offset in data sheet */
-#define HHI_MPLL_CNTL8 0x29C /* MP1, 0xa7 offset in data sheet */
-#define HHI_MPLL_CNTL9 0x2A0 /* MP2, 0xa8 offset in data sheet */
-#define HHI_MPLL_CNTL10 0x2A4 /* MP2, 0xa9 offset in data sheet */
-
-#define HHI_MPLL3_CNTL0 0x2E0 /* 0xb8 offset in data sheet */
-#define HHI_MPLL3_CNTL1 0x2E4 /* 0xb9 offset in data sheet */
-#define HHI_VDAC_CNTL0 0x2F4 /* 0xbd offset in data sheet */
-#define HHI_VDAC_CNTL1 0x2F8 /* 0xbe offset in data sheet */
-
-#define HHI_SYS_PLL_CNTL 0x300 /* 0xc0 offset in data sheet */
-#define HHI_SYS_PLL_CNTL2 0x304 /* 0xc1 offset in data sheet */
-#define HHI_SYS_PLL_CNTL3 0x308 /* 0xc2 offset in data sheet */
-#define HHI_SYS_PLL_CNTL4 0x30c /* 0xc3 offset in data sheet */
-#define HHI_SYS_PLL_CNTL5 0x310 /* 0xc4 offset in data sheet */
-#define HHI_DPLL_TOP_I 0x318 /* 0xc6 offset in data sheet */
-#define HHI_DPLL_TOP2_I 0x31C /* 0xc7 offset in data sheet */
-#define HHI_HDMI_PLL_CNTL 0x320 /* 0xc8 offset in data sheet */
-#define HHI_HDMI_PLL_CNTL2 0x324 /* 0xc9 offset in data sheet */
-#define HHI_HDMI_PLL_CNTL3 0x328 /* 0xca offset in data sheet */
-#define HHI_HDMI_PLL_CNTL4 0x32C /* 0xcb offset in data sheet */
-#define HHI_HDMI_PLL_CNTL5 0x330 /* 0xcc offset in data sheet */
-#define HHI_HDMI_PLL_CNTL6 0x334 /* 0xcd offset in data sheet */
-#define HHI_HDMI_PLL_CNTL_I 0x338 /* 0xce offset in data sheet */
-#define HHI_HDMI_PLL_CNTL7 0x33C /* 0xcf offset in data sheet */
-
-#define HHI_HDMI_PHY_CNTL0 0x3A0 /* 0xe8 offset in data sheet */
-#define HHI_HDMI_PHY_CNTL1 0x3A4 /* 0xe9 offset in data sheet */
-#define HHI_HDMI_PHY_CNTL2 0x3A8 /* 0xea offset in data sheet */
-#define HHI_HDMI_PHY_CNTL3 0x3AC /* 0xeb offset in data sheet */
-
-#define HHI_VID_LOCK_CLK_CNTL 0x3C8 /* 0xf2 offset in data sheet */
-#define HHI_BT656_CLK_CNTL 0x3D4 /* 0xf5 offset in data sheet */
-#define HHI_SAR_CLK_CNTL 0x3D8 /* 0xf6 offset in data sheet */
-
-#endif /* __GXBB_H */
diff --git a/drivers/clk/meson/meson-aoclk.c b/drivers/clk/meson/meson-aoclk.c
index 995be51987f4..8f6bdea18119 100644
--- a/drivers/clk/meson/meson-aoclk.c
+++ b/drivers/clk/meson/meson-aoclk.c
@@ -18,6 +18,7 @@
#include <linux/slab.h>
#include "meson-aoclk.h"
+#include "clk-regmap.h"
static int meson_aoclk_do_reset(struct reset_controller_dev *rcdev,
unsigned long id)
@@ -36,15 +37,23 @@ static const struct reset_control_ops meson_aoclk_reset_ops = {
int meson_aoclkc_probe(struct platform_device *pdev)
{
struct meson_aoclk_reset_controller *rstc;
- struct meson_aoclk_data *data;
+ const struct meson_clkc_data *clkc_data;
+ const struct meson_aoclk_data *data;
struct device *dev = &pdev->dev;
struct device_node *np;
struct regmap *regmap;
- int ret, clkid;
+ int ret;
- data = (struct meson_aoclk_data *) of_device_get_match_data(dev);
- if (!data)
- return -ENODEV;
+ clkc_data = of_device_get_match_data(dev);
+ if (!clkc_data)
+ return -EINVAL;
+
+ ret = meson_clkc_syscon_probe(pdev);
+ if (ret)
+ return ret;
+
+ data = container_of(clkc_data, struct meson_aoclk_data,
+ clkc_data);
rstc = devm_kzalloc(dev, sizeof(*rstc), GFP_KERNEL);
if (!rstc)
@@ -70,23 +79,7 @@ int meson_aoclkc_probe(struct platform_device *pdev)
return ret;
}
- /* Populate regmap */
- for (clkid = 0; clkid < data->num_clks; clkid++)
- data->clks[clkid]->map = regmap;
-
- /* Register all clks */
- for (clkid = 0; clkid < data->hw_clks.num; clkid++) {
- if (!data->hw_clks.hws[clkid])
- continue;
-
- ret = devm_clk_hw_register(dev, data->hw_clks.hws[clkid]);
- if (ret) {
- dev_err(dev, "Clock registration failed\n");
- return ret;
- }
- }
-
- return devm_of_clk_add_hw_provider(dev, meson_clk_hw_get, (void *)&data->hw_clks);
+ return 0;
}
EXPORT_SYMBOL_NS_GPL(meson_aoclkc_probe, "CLK_MESON");
diff --git a/drivers/clk/meson/meson-aoclk.h b/drivers/clk/meson/meson-aoclk.h
index 308be3e4814a..2c83e73d3a77 100644
--- a/drivers/clk/meson/meson-aoclk.h
+++ b/drivers/clk/meson/meson-aoclk.h
@@ -20,12 +20,10 @@
#include "meson-clkc-utils.h"
struct meson_aoclk_data {
+ const struct meson_clkc_data clkc_data;
const unsigned int reset_reg;
const int num_reset;
const unsigned int *reset;
- const int num_clks;
- struct clk_regmap **clks;
- struct meson_clk_hw_data hw_clks;
};
struct meson_aoclk_reset_controller {
diff --git a/drivers/clk/meson/meson-clkc-utils.c b/drivers/clk/meson/meson-clkc-utils.c
index 6937d1482719..870f50548e26 100644
--- a/drivers/clk/meson/meson-clkc-utils.c
+++ b/drivers/clk/meson/meson-clkc-utils.c
@@ -3,9 +3,13 @@
* Copyright (c) 2023 Neil Armstrong <neil.armstrong@linaro.org>
*/
-#include <linux/of_device.h>
#include <linux/clk-provider.h>
+#include <linux/mfd/syscon.h>
#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
#include "meson-clkc-utils.h"
struct clk_hw *meson_clk_hw_get(struct of_phandle_args *clkspec, void *clk_hw_data)
@@ -22,6 +26,86 @@ struct clk_hw *meson_clk_hw_get(struct of_phandle_args *clkspec, void *clk_hw_da
}
EXPORT_SYMBOL_NS_GPL(meson_clk_hw_get, "CLK_MESON");
+static int meson_clkc_init(struct device *dev, struct regmap *map)
+{
+ const struct meson_clkc_data *data;
+ struct clk_hw *hw;
+ int ret, i;
+
+ data = of_device_get_match_data(dev);
+ if (!data)
+ return -EINVAL;
+
+ if (data->init_count)
+ regmap_multi_reg_write(map, data->init_regs, data->init_count);
+
+ for (i = 0; i < data->hw_clks.num; i++) {
+ hw = data->hw_clks.hws[i];
+
+ /* array might be sparse */
+ if (!hw)
+ continue;
+
+ ret = devm_clk_hw_register(dev, hw);
+ if (ret) {
+ dev_err(dev, "registering %s clock failed\n",
+ hw->init->name);
+ return ret;
+ }
+ }
+
+ return devm_of_clk_add_hw_provider(dev, meson_clk_hw_get, (void *)&data->hw_clks);
+}
+
+int meson_clkc_syscon_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np;
+ struct regmap *map;
+
+ np = of_get_parent(dev->of_node);
+ map = syscon_node_to_regmap(np);
+ of_node_put(np);
+ if (IS_ERR(map)) {
+ dev_err(dev, "failed to get parent syscon regmap\n");
+ return PTR_ERR(map);
+ }
+
+ return meson_clkc_init(dev, map);
+}
+EXPORT_SYMBOL_NS_GPL(meson_clkc_syscon_probe, "CLK_MESON");
+
+int meson_clkc_mmio_probe(struct platform_device *pdev)
+{
+ const struct meson_clkc_data *data;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ void __iomem *base;
+ struct regmap *map;
+ struct regmap_config regmap_cfg = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ };
+
+ data = of_device_get_match_data(dev);
+ if (!data)
+ return -EINVAL;
+
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ regmap_cfg.max_register = resource_size(res) - regmap_cfg.reg_stride;
+
+ map = devm_regmap_init_mmio(dev, base, &regmap_cfg);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
+
+ return meson_clkc_init(dev, map);
+}
+EXPORT_SYMBOL_NS_GPL(meson_clkc_mmio_probe, "CLK_MESON");
+
MODULE_DESCRIPTION("Amlogic Clock Controller Utilities");
MODULE_LICENSE("GPL");
MODULE_IMPORT_NS("CLK_MESON");
diff --git a/drivers/clk/meson/meson-clkc-utils.h b/drivers/clk/meson/meson-clkc-utils.h
index fe6f40728949..ddadf14b4923 100644
--- a/drivers/clk/meson/meson-clkc-utils.h
+++ b/drivers/clk/meson/meson-clkc-utils.h
@@ -9,6 +9,8 @@
#include <linux/of_device.h>
#include <linux/clk-provider.h>
+struct platform_device;
+
struct meson_clk_hw_data {
struct clk_hw **hws;
unsigned int num;
@@ -16,4 +18,91 @@ struct meson_clk_hw_data {
struct clk_hw *meson_clk_hw_get(struct of_phandle_args *clkspec, void *clk_hw_data);
+struct meson_clkc_data {
+ const struct reg_sequence *init_regs;
+ unsigned int init_count;
+ struct meson_clk_hw_data hw_clks;
+};
+
+int meson_clkc_syscon_probe(struct platform_device *pdev);
+int meson_clkc_mmio_probe(struct platform_device *pdev);
+
+#define __MESON_PCLK(_name, _reg, _bit, _ops, _pdata, _flags) \
+struct clk_regmap _name = { \
+ .data = &(struct clk_regmap_gate_data) { \
+ .offset = (_reg), \
+ .bit_idx = (_bit), \
+ }, \
+ .hw.init = &(struct clk_init_data) { \
+ .name = #_name, \
+ .ops = _ops, \
+ .parent_data = (_pdata), \
+ .num_parents = 1, \
+ .flags = (_flags), \
+ }, \
+}
+
+#define MESON_PCLK(_name, _reg, _bit, _pdata, _flags) \
+ __MESON_PCLK(_name, _reg, _bit, &clk_regmap_gate_ops, _pdata, _flags)
+
+#define MESON_PCLK_RO(_name, _reg, _bit, _pdata, _flags) \
+ __MESON_PCLK(_name, _reg, _bit, &clk_regmap_gate_ro_ops, _pdata, _flags)
+
+/* Helpers for the usual sel/div/gate composite clocks */
+#define MESON_COMP_SEL(_prefix, _name, _reg, _shift, _mask, _pdata, \
+ _table, _dflags, _iflags) \
+struct clk_regmap _prefix##_name##_sel = { \
+ .data = &(struct clk_regmap_mux_data) { \
+ .offset = (_reg), \
+ .mask = (_mask), \
+ .shift = (_shift), \
+ .flags = (_dflags), \
+ .table = (_table), \
+ }, \
+ .hw.init = &(struct clk_init_data){ \
+ .name = #_name "_sel", \
+ .ops = &clk_regmap_mux_ops, \
+ .parent_data = _pdata, \
+ .num_parents = ARRAY_SIZE(_pdata), \
+ .flags = (_iflags), \
+ }, \
+}
+
+#define MESON_COMP_DIV(_prefix, _name, _reg, _shift, _width, \
+ _dflags, _iflags) \
+struct clk_regmap _prefix##_name##_div = { \
+ .data = &(struct clk_regmap_div_data) { \
+ .offset = (_reg), \
+ .shift = (_shift), \
+ .width = (_width), \
+ .flags = (_dflags), \
+ }, \
+ .hw.init = &(struct clk_init_data) { \
+ .name = #_name "_div", \
+ .ops = &clk_regmap_divider_ops, \
+ .parent_hws = (const struct clk_hw *[]) { \
+ &_prefix##_name##_sel.hw \
+ }, \
+ .num_parents = 1, \
+ .flags = (_iflags), \
+ }, \
+}
+
+#define MESON_COMP_GATE(_prefix, _name, _reg, _bit, _iflags) \
+struct clk_regmap _prefix##_name = { \
+ .data = &(struct clk_regmap_gate_data) { \
+ .offset = (_reg), \
+ .bit_idx = (_bit), \
+ }, \
+ .hw.init = &(struct clk_init_data) { \
+ .name = #_name, \
+ .ops = &clk_regmap_gate_ops, \
+ .parent_hws = (const struct clk_hw *[]) { \
+ &_prefix##_name##_div.hw \
+ }, \
+ .num_parents = 1, \
+ .flags = (_iflags), \
+ }, \
+}
+
#endif
diff --git a/drivers/clk/meson/meson-eeclk.c b/drivers/clk/meson/meson-eeclk.c
deleted file mode 100644
index 3053ee7425eb..000000000000
--- a/drivers/clk/meson/meson-eeclk.c
+++ /dev/null
@@ -1,64 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (c) 2019 BayLibre, SAS.
- * Author: Jerome Brunet <jbrunet@baylibre.com>
- */
-
-#include <linux/clk-provider.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-#include <linux/mfd/syscon.h>
-#include <linux/regmap.h>
-#include <linux/module.h>
-
-#include "clk-regmap.h"
-#include "meson-eeclk.h"
-
-int meson_eeclkc_probe(struct platform_device *pdev)
-{
- const struct meson_eeclkc_data *data;
- struct device *dev = &pdev->dev;
- struct device_node *np;
- struct regmap *map;
- int ret, i;
-
- data = of_device_get_match_data(dev);
- if (!data)
- return -EINVAL;
-
- /* Get the hhi system controller node */
- np = of_get_parent(dev->of_node);
- map = syscon_node_to_regmap(np);
- of_node_put(np);
- if (IS_ERR(map)) {
- dev_err(dev,
- "failed to get HHI regmap\n");
- return PTR_ERR(map);
- }
-
- if (data->init_count)
- regmap_multi_reg_write(map, data->init_regs, data->init_count);
-
- /* Populate regmap for the regmap backed clocks */
- for (i = 0; i < data->regmap_clk_num; i++)
- data->regmap_clks[i]->map = map;
-
- for (i = 0; i < data->hw_clks.num; i++) {
- /* array might be sparse */
- if (!data->hw_clks.hws[i])
- continue;
-
- ret = devm_clk_hw_register(dev, data->hw_clks.hws[i]);
- if (ret) {
- dev_err(dev, "Clock registration failed\n");
- return ret;
- }
- }
-
- return devm_of_clk_add_hw_provider(dev, meson_clk_hw_get, (void *)&data->hw_clks);
-}
-EXPORT_SYMBOL_NS_GPL(meson_eeclkc_probe, "CLK_MESON");
-
-MODULE_DESCRIPTION("Amlogic Main Clock Controller Helpers");
-MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS("CLK_MESON");
diff --git a/drivers/clk/meson/meson-eeclk.h b/drivers/clk/meson/meson-eeclk.h
deleted file mode 100644
index 37a48b75c660..000000000000
--- a/drivers/clk/meson/meson-eeclk.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (c) 2019 BayLibre, SAS.
- * Author: Jerome Brunet <jbrunet@baylibre.com>
- */
-
-#ifndef __MESON_CLKC_H
-#define __MESON_CLKC_H
-
-#include <linux/clk-provider.h>
-#include "clk-regmap.h"
-#include "meson-clkc-utils.h"
-
-struct platform_device;
-
-struct meson_eeclkc_data {
- struct clk_regmap *const *regmap_clks;
- unsigned int regmap_clk_num;
- const struct reg_sequence *init_regs;
- unsigned int init_count;
- struct meson_clk_hw_data hw_clks;
-};
-
-int meson_eeclkc_probe(struct platform_device *pdev);
-
-#endif /* __MESON_CLKC_H */
diff --git a/drivers/clk/meson/meson8-ddr.c b/drivers/clk/meson/meson8-ddr.c
index 4b73ea244b63..0f93774f7371 100644
--- a/drivers/clk/meson/meson8-ddr.c
+++ b/drivers/clk/meson/meson8-ddr.c
@@ -12,6 +12,7 @@
#include "clk-regmap.h"
#include "clk-pll.h"
+#include "meson-clkc-utils.h"
#define AM_DDR_PLL_CNTL 0x00
#define AM_DDR_PLL_CNTL1 0x04
@@ -77,69 +78,31 @@ static struct clk_regmap meson8_ddr_pll = {
},
};
-static struct clk_hw_onecell_data meson8_ddr_clk_hw_onecell_data = {
- .hws = {
- [DDR_CLKID_DDR_PLL_DCO] = &meson8_ddr_pll_dco.hw,
- [DDR_CLKID_DDR_PLL] = &meson8_ddr_pll.hw,
- },
- .num = 2,
-};
-
-static struct clk_regmap *const meson8_ddr_clk_regmaps[] = {
- &meson8_ddr_pll_dco,
- &meson8_ddr_pll,
+static struct clk_hw *meson8_ddr_hw_clks[] = {
+ [DDR_CLKID_DDR_PLL_DCO] = &meson8_ddr_pll_dco.hw,
+ [DDR_CLKID_DDR_PLL] = &meson8_ddr_pll.hw,
};
-static const struct regmap_config meson8_ddr_clkc_regmap_config = {
- .reg_bits = 8,
- .val_bits = 32,
- .reg_stride = 4,
- .max_register = DDR_CLK_STS,
+static const struct meson_clkc_data meson8_ddr_clkc_data = {
+ .hw_clks = {
+ .hws = meson8_ddr_hw_clks,
+ .num = ARRAY_SIZE(meson8_ddr_hw_clks),
+ },
};
-static int meson8_ddr_clkc_probe(struct platform_device *pdev)
-{
- struct regmap *regmap;
- void __iomem *base;
- struct clk_hw *hw;
- int ret, i;
-
- base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(base))
- return PTR_ERR(base);
-
- regmap = devm_regmap_init_mmio(&pdev->dev, base,
- &meson8_ddr_clkc_regmap_config);
- if (IS_ERR(regmap))
- return PTR_ERR(regmap);
-
- /* Populate regmap */
- for (i = 0; i < ARRAY_SIZE(meson8_ddr_clk_regmaps); i++)
- meson8_ddr_clk_regmaps[i]->map = regmap;
-
- /* Register all clks */
- for (i = 0; i < meson8_ddr_clk_hw_onecell_data.num; i++) {
- hw = meson8_ddr_clk_hw_onecell_data.hws[i];
-
- ret = devm_clk_hw_register(&pdev->dev, hw);
- if (ret) {
- dev_err(&pdev->dev, "Clock registration failed\n");
- return ret;
- }
- }
-
- return devm_of_clk_add_hw_provider(&pdev->dev, of_clk_hw_onecell_get,
- &meson8_ddr_clk_hw_onecell_data);
-}
-
static const struct of_device_id meson8_ddr_clkc_match_table[] = {
- { .compatible = "amlogic,meson8-ddr-clkc" },
- { .compatible = "amlogic,meson8b-ddr-clkc" },
+ {
+ .compatible = "amlogic,meson8-ddr-clkc",
+ .data = &meson8_ddr_clkc_data,
+ }, {
+ .compatible = "amlogic,meson8b-ddr-clkc",
+ .data = &meson8_ddr_clkc_data,
+ },
{ /* sentinel */ }
};
static struct platform_driver meson8_ddr_clkc_driver = {
- .probe = meson8_ddr_clkc_probe,
+ .probe = meson_clkc_mmio_probe,
.driver = {
.name = "meson8-ddr-clkc",
.of_match_table = meson8_ddr_clkc_match_table,
diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c
index e4b474c5f86c..95d0b9cbd904 100644
--- a/drivers/clk/meson/meson8b.c
+++ b/drivers/clk/meson/meson8b.c
@@ -16,7 +16,6 @@
#include <linux/slab.h>
#include <linux/regmap.h>
-#include "meson8b.h"
#include "clk-regmap.h"
#include "meson-clkc-utils.h"
#include "clk-pll.h"
@@ -25,6 +24,72 @@
#include <dt-bindings/clock/meson8b-clkc.h>
#include <dt-bindings/reset/amlogic,meson8b-clkc-reset.h>
+/*
+ * Clock controller register offsets
+ *
+ * Register offsets from the HardKernel[0] data sheet must be multiplied
+ * by 4 before adding them to the base address to get the right value
+ *
+ * [0] https://dn.odroid.com/S805/Datasheet/S805_Datasheet%20V0.8%2020150126.pdf
+ */
+#define HHI_GP_PLL_CNTL 0x40
+#define HHI_GP_PLL_CNTL2 0x44
+#define HHI_GP_PLL_CNTL3 0x48
+#define HHI_GP_PLL_CNTL4 0x4C
+#define HHI_GP_PLL_CNTL5 0x50
+#define HHI_VIID_CLK_DIV 0x128
+#define HHI_VIID_CLK_CNTL 0x12c
+#define HHI_GCLK_MPEG0 0x140
+#define HHI_GCLK_MPEG1 0x144
+#define HHI_GCLK_MPEG2 0x148
+#define HHI_GCLK_OTHER 0x150
+#define HHI_GCLK_AO 0x154
+#define HHI_SYS_CPU_CLK_CNTL1 0x15c
+#define HHI_VID_CLK_DIV 0x164
+#define HHI_MPEG_CLK_CNTL 0x174
+#define HHI_AUD_CLK_CNTL 0x178
+#define HHI_VID_CLK_CNTL 0x17c
+#define HHI_AUD_CLK_CNTL2 0x190
+#define HHI_VID_CLK_CNTL2 0x194
+#define HHI_VID_DIVIDER_CNTL 0x198
+#define HHI_SYS_CPU_CLK_CNTL0 0x19c
+#define HHI_MALI_CLK_CNTL 0x1b0
+#define HHI_VPU_CLK_CNTL 0x1bc
+#define HHI_HDMI_CLK_CNTL 0x1cc
+#define HHI_VDEC_CLK_CNTL 0x1e0
+#define HHI_VDEC2_CLK_CNTL 0x1e4
+#define HHI_VDEC3_CLK_CNTL 0x1e8
+#define HHI_NAND_CLK_CNTL 0x25c
+#define HHI_MPLL_CNTL 0x280
+#define HHI_SYS_PLL_CNTL 0x300
+#define HHI_VID_PLL_CNTL 0x320
+#define HHI_VID_PLL_CNTL2 0x324
+#define HHI_VID_PLL_CNTL3 0x328
+#define HHI_VID_PLL_CNTL4 0x32c
+#define HHI_VID_PLL_CNTL5 0x330
+#define HHI_VID_PLL_CNTL6 0x334
+#define HHI_VID2_PLL_CNTL 0x380
+#define HHI_VID2_PLL_CNTL2 0x384
+#define HHI_VID2_PLL_CNTL3 0x388
+#define HHI_VID2_PLL_CNTL4 0x38c
+#define HHI_VID2_PLL_CNTL5 0x390
+#define HHI_VID2_PLL_CNTL6 0x394
+
+/*
+ * MPLL register offeset taken from the S905 datasheet. Vendor kernel source
+ * confirm these are the same for the S805.
+ */
+#define HHI_MPLL_CNTL 0x280
+#define HHI_MPLL_CNTL2 0x284
+#define HHI_MPLL_CNTL3 0x288
+#define HHI_MPLL_CNTL4 0x28c
+#define HHI_MPLL_CNTL5 0x290
+#define HHI_MPLL_CNTL6 0x294
+#define HHI_MPLL_CNTL7 0x298
+#define HHI_MPLL_CNTL8 0x29c
+#define HHI_MPLL_CNTL9 0x2a0
+#define HHI_MPLL_CNTL10 0x2a4
+
struct meson8b_clk_reset {
struct reset_controller_dev reset;
struct regmap *regmap;
@@ -149,7 +214,7 @@ static const struct reg_sequence meson8b_hdmi_pll_init_regs[] = {
{ .reg = HHI_VID2_PLL_CNTL2, .def = 0x0430a800 },
};
-static const struct pll_params_table hdmi_pll_params_table[] = {
+static const struct pll_params_table meson8b_hdmi_pll_params_table[] = {
PLL_PARAMS(40, 1),
PLL_PARAMS(42, 1),
PLL_PARAMS(44, 1),
@@ -202,7 +267,7 @@ static struct clk_regmap meson8b_hdmi_pll_dco = {
.shift = 29,
.width = 1,
},
- .table = hdmi_pll_params_table,
+ .table = meson8b_hdmi_pll_params_table,
.init_regs = meson8b_hdmi_pll_init_regs,
.init_count = ARRAY_SIZE(meson8b_hdmi_pll_init_regs),
},
@@ -605,16 +670,17 @@ static struct clk_regmap meson8b_mpll2 = {
},
};
-static u32 mux_table_clk81[] = { 6, 5, 7 };
-static struct clk_regmap meson8b_mpeg_clk_sel = {
+/* clk81 is often referred as "mpeg_clk" */
+static u32 meson8b_clk81_parents_val_table[] = { 6, 5, 7 };
+static struct clk_regmap meson8b_clk81_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_MPEG_CLK_CNTL,
.mask = 0x7,
.shift = 12,
- .table = mux_table_clk81,
+ .table = meson8b_clk81_parents_val_table,
},
.hw.init = &(struct clk_init_data){
- .name = "mpeg_clk_sel",
+ .name = "clk81_sel",
.ops = &clk_regmap_mux_ro_ops,
/*
* FIXME bits 14:12 selects from 8 possible parents:
@@ -630,17 +696,17 @@ static struct clk_regmap meson8b_mpeg_clk_sel = {
},
};
-static struct clk_regmap meson8b_mpeg_clk_div = {
+static struct clk_regmap meson8b_clk81_div = {
.data = &(struct clk_regmap_div_data){
.offset = HHI_MPEG_CLK_CNTL,
.shift = 0,
.width = 7,
},
.hw.init = &(struct clk_init_data){
- .name = "mpeg_clk_div",
+ .name = "clk81_div",
.ops = &clk_regmap_divider_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &meson8b_mpeg_clk_sel.hw
+ &meson8b_clk81_sel.hw
},
.num_parents = 1,
},
@@ -655,7 +721,7 @@ static struct clk_regmap meson8b_clk81 = {
.name = "clk81",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &meson8b_mpeg_clk_div.hw
+ &meson8b_clk81_div.hw
},
.num_parents = 1,
.flags = CLK_IS_CRITICAL,
@@ -709,7 +775,7 @@ static struct clk_fixed_factor meson8b_cpu_in_div3 = {
},
};
-static const struct clk_div_table cpu_scale_table[] = {
+static const struct clk_div_table meson8b_cpu_scale_div_table[] = {
{ .val = 1, .div = 4 },
{ .val = 2, .div = 6 },
{ .val = 3, .div = 8 },
@@ -726,7 +792,7 @@ static struct clk_regmap meson8b_cpu_scale_div = {
.offset = HHI_SYS_CPU_CLK_CNTL1,
.shift = 20,
.width = 10,
- .table = cpu_scale_table,
+ .table = meson8b_cpu_scale_div_table,
.flags = CLK_DIVIDER_ALLOW_ZERO,
},
.hw.init = &(struct clk_init_data){
@@ -740,13 +806,13 @@ static struct clk_regmap meson8b_cpu_scale_div = {
},
};
-static u32 mux_table_cpu_scale_out_sel[] = { 0, 1, 3 };
+static u32 meson8b_cpu_scale_out_parents_val_table[] = { 0, 1, 3 };
static struct clk_regmap meson8b_cpu_scale_out_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_SYS_CPU_CLK_CNTL0,
.mask = 0x3,
.shift = 2,
- .table = mux_table_cpu_scale_out_sel,
+ .table = meson8b_cpu_scale_out_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "cpu_scale_out_sel",
@@ -828,13 +894,13 @@ static struct clk_regmap meson8b_nand_clk_div = {
},
};
-static struct clk_regmap meson8b_nand_clk_gate = {
+static struct clk_regmap meson8b_nand_clk = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_NAND_CLK_CNTL,
.bit_idx = 8,
},
.hw.init = &(struct clk_init_data){
- .name = "nand_clk_gate",
+ .name = "nand_clk",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_nand_clk_div.hw
@@ -935,160 +1001,137 @@ static struct clk_fixed_factor meson8b_cpu_clk_div8 = {
},
};
-static u32 mux_table_apb[] = { 1, 2, 3, 4, 5, 6, 7 };
-static struct clk_regmap meson8b_apb_clk_sel = {
+static u32 meson8b_cpu_if_parents_val_table[] = { 1, 2, 3, 4, 5, 6, 7 };
+static const struct clk_hw *meson8b_cpu_if_parents[] = {
+ &meson8b_cpu_clk_div2.hw,
+ &meson8b_cpu_clk_div3.hw,
+ &meson8b_cpu_clk_div4.hw,
+ &meson8b_cpu_clk_div5.hw,
+ &meson8b_cpu_clk_div6.hw,
+ &meson8b_cpu_clk_div7.hw,
+ &meson8b_cpu_clk_div8.hw,
+};
+
+static struct clk_regmap meson8b_apb_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_SYS_CPU_CLK_CNTL1,
.mask = 0x7,
.shift = 3,
- .table = mux_table_apb,
+ .table = meson8b_cpu_if_parents_val_table,
},
.hw.init = &(struct clk_init_data){
- .name = "apb_clk_sel",
+ .name = "apb_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &meson8b_cpu_clk_div2.hw,
- &meson8b_cpu_clk_div3.hw,
- &meson8b_cpu_clk_div4.hw,
- &meson8b_cpu_clk_div5.hw,
- &meson8b_cpu_clk_div6.hw,
- &meson8b_cpu_clk_div7.hw,
- &meson8b_cpu_clk_div8.hw,
- },
- .num_parents = 7,
+ .parent_hws = meson8b_cpu_if_parents,
+ .num_parents = ARRAY_SIZE(meson8b_cpu_if_parents),
},
};
-static struct clk_regmap meson8b_apb_clk_gate = {
+static struct clk_regmap meson8b_apb = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_SYS_CPU_CLK_CNTL1,
.bit_idx = 16,
.flags = CLK_GATE_SET_TO_DISABLE,
},
.hw.init = &(struct clk_init_data){
- .name = "apb_clk_dis",
+ .name = "apb",
.ops = &clk_regmap_gate_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &meson8b_apb_clk_sel.hw
+ &meson8b_apb_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap meson8b_periph_clk_sel = {
+static struct clk_regmap meson8b_periph_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_SYS_CPU_CLK_CNTL1,
.mask = 0x7,
.shift = 6,
},
.hw.init = &(struct clk_init_data){
- .name = "periph_clk_sel",
+ .name = "periph_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &meson8b_cpu_clk_div2.hw,
- &meson8b_cpu_clk_div3.hw,
- &meson8b_cpu_clk_div4.hw,
- &meson8b_cpu_clk_div5.hw,
- &meson8b_cpu_clk_div6.hw,
- &meson8b_cpu_clk_div7.hw,
- &meson8b_cpu_clk_div8.hw,
- },
- .num_parents = 7,
+ .parent_hws = meson8b_cpu_if_parents,
+ .num_parents = ARRAY_SIZE(meson8b_cpu_if_parents),
},
};
-static struct clk_regmap meson8b_periph_clk_gate = {
+static struct clk_regmap meson8b_periph = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_SYS_CPU_CLK_CNTL1,
.bit_idx = 17,
.flags = CLK_GATE_SET_TO_DISABLE,
},
.hw.init = &(struct clk_init_data){
- .name = "periph_clk_dis",
+ .name = "periph",
.ops = &clk_regmap_gate_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &meson8b_periph_clk_sel.hw
+ &meson8b_periph_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static u32 mux_table_axi[] = { 1, 2, 3, 4, 5, 6, 7 };
-static struct clk_regmap meson8b_axi_clk_sel = {
+static struct clk_regmap meson8b_axi_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_SYS_CPU_CLK_CNTL1,
.mask = 0x7,
.shift = 9,
- .table = mux_table_axi,
+ .table = meson8b_cpu_if_parents_val_table,
},
.hw.init = &(struct clk_init_data){
- .name = "axi_clk_sel",
+ .name = "axi_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &meson8b_cpu_clk_div2.hw,
- &meson8b_cpu_clk_div3.hw,
- &meson8b_cpu_clk_div4.hw,
- &meson8b_cpu_clk_div5.hw,
- &meson8b_cpu_clk_div6.hw,
- &meson8b_cpu_clk_div7.hw,
- &meson8b_cpu_clk_div8.hw,
- },
- .num_parents = 7,
+ .parent_hws = meson8b_cpu_if_parents,
+ .num_parents = ARRAY_SIZE(meson8b_cpu_if_parents),
},
};
-static struct clk_regmap meson8b_axi_clk_gate = {
+static struct clk_regmap meson8b_axi = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_SYS_CPU_CLK_CNTL1,
.bit_idx = 18,
.flags = CLK_GATE_SET_TO_DISABLE,
},
.hw.init = &(struct clk_init_data){
- .name = "axi_clk_dis",
+ .name = "axi",
.ops = &clk_regmap_gate_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &meson8b_axi_clk_sel.hw
+ &meson8b_axi_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap meson8b_l2_dram_clk_sel = {
+static struct clk_regmap meson8b_l2_dram_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_SYS_CPU_CLK_CNTL1,
.mask = 0x7,
.shift = 12,
},
.hw.init = &(struct clk_init_data){
- .name = "l2_dram_clk_sel",
+ .name = "l2_dram_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &meson8b_cpu_clk_div2.hw,
- &meson8b_cpu_clk_div3.hw,
- &meson8b_cpu_clk_div4.hw,
- &meson8b_cpu_clk_div5.hw,
- &meson8b_cpu_clk_div6.hw,
- &meson8b_cpu_clk_div7.hw,
- &meson8b_cpu_clk_div8.hw,
- },
- .num_parents = 7,
+ .parent_hws = meson8b_cpu_if_parents,
+ .num_parents = ARRAY_SIZE(meson8b_cpu_if_parents),
},
};
-static struct clk_regmap meson8b_l2_dram_clk_gate = {
+static struct clk_regmap meson8b_l2_dram = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_SYS_CPU_CLK_CNTL1,
.bit_idx = 19,
.flags = CLK_GATE_SET_TO_DISABLE,
},
.hw.init = &(struct clk_init_data){
- .name = "l2_dram_clk_dis",
+ .name = "l2_dram",
.ops = &clk_regmap_gate_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &meson8b_l2_dram_clk_sel.hw
+ &meson8b_l2_dram_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1221,7 +1264,7 @@ static struct clk_regmap meson8b_vid_pll_final_div = {
},
};
-static const struct clk_hw *meson8b_vclk_mux_parent_hws[] = {
+static const struct clk_hw *meson8b_vclk_parents[] = {
&meson8b_vid_pll_final_div.hw,
&meson8b_fclk_div4.hw,
&meson8b_fclk_div3.hw,
@@ -1240,8 +1283,8 @@ static struct clk_regmap meson8b_vclk_in_sel = {
.hw.init = &(struct clk_init_data){
.name = "vclk_in_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = meson8b_vclk_mux_parent_hws,
- .num_parents = ARRAY_SIZE(meson8b_vclk_mux_parent_hws),
+ .parent_hws = meson8b_vclk_parents,
+ .num_parents = ARRAY_SIZE(meson8b_vclk_parents),
.flags = CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
},
};
@@ -1278,13 +1321,13 @@ static struct clk_regmap meson8b_vclk_en = {
},
};
-static struct clk_regmap meson8b_vclk_div1_gate = {
+static struct clk_regmap meson8b_vclk_div1 = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_VID_CLK_CNTL,
.bit_idx = 0,
},
.hw.init = &(struct clk_init_data){
- .name = "vclk_div1_en",
+ .name = "vclk_div1",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk_en.hw
@@ -1298,7 +1341,7 @@ static struct clk_fixed_factor meson8b_vclk_div2_div = {
.mult = 1,
.div = 2,
.hw.init = &(struct clk_init_data){
- .name = "vclk_div2",
+ .name = "vclk_div2_div",
.ops = &clk_fixed_factor_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk_en.hw
@@ -1308,13 +1351,13 @@ static struct clk_fixed_factor meson8b_vclk_div2_div = {
}
};
-static struct clk_regmap meson8b_vclk_div2_div_gate = {
+static struct clk_regmap meson8b_vclk_div2 = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_VID_CLK_CNTL,
.bit_idx = 1,
},
.hw.init = &(struct clk_init_data){
- .name = "vclk_div2_en",
+ .name = "vclk_div2",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk_div2_div.hw
@@ -1328,7 +1371,7 @@ static struct clk_fixed_factor meson8b_vclk_div4_div = {
.mult = 1,
.div = 4,
.hw.init = &(struct clk_init_data){
- .name = "vclk_div4",
+ .name = "vclk_div4_div",
.ops = &clk_fixed_factor_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk_en.hw
@@ -1338,13 +1381,13 @@ static struct clk_fixed_factor meson8b_vclk_div4_div = {
}
};
-static struct clk_regmap meson8b_vclk_div4_div_gate = {
+static struct clk_regmap meson8b_vclk_div4 = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_VID_CLK_CNTL,
.bit_idx = 2,
},
.hw.init = &(struct clk_init_data){
- .name = "vclk_div4_en",
+ .name = "vclk_div4",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk_div4_div.hw
@@ -1358,7 +1401,7 @@ static struct clk_fixed_factor meson8b_vclk_div6_div = {
.mult = 1,
.div = 6,
.hw.init = &(struct clk_init_data){
- .name = "vclk_div6",
+ .name = "vclk_div6_div",
.ops = &clk_fixed_factor_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk_en.hw
@@ -1368,13 +1411,13 @@ static struct clk_fixed_factor meson8b_vclk_div6_div = {
}
};
-static struct clk_regmap meson8b_vclk_div6_div_gate = {
+static struct clk_regmap meson8b_vclk_div6 = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_VID_CLK_CNTL,
.bit_idx = 3,
},
.hw.init = &(struct clk_init_data){
- .name = "vclk_div6_en",
+ .name = "vclk_div6",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk_div6_div.hw
@@ -1388,7 +1431,7 @@ static struct clk_fixed_factor meson8b_vclk_div12_div = {
.mult = 1,
.div = 12,
.hw.init = &(struct clk_init_data){
- .name = "vclk_div12",
+ .name = "vclk_div12_div",
.ops = &clk_fixed_factor_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk_en.hw
@@ -1398,13 +1441,13 @@ static struct clk_fixed_factor meson8b_vclk_div12_div = {
}
};
-static struct clk_regmap meson8b_vclk_div12_div_gate = {
+static struct clk_regmap meson8b_vclk_div12 = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_VID_CLK_CNTL,
.bit_idx = 4,
},
.hw.init = &(struct clk_init_data){
- .name = "vclk_div12_en",
+ .name = "vclk_div12",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk_div12_div.hw
@@ -1423,13 +1466,13 @@ static struct clk_regmap meson8b_vclk2_in_sel = {
.hw.init = &(struct clk_init_data){
.name = "vclk2_in_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = meson8b_vclk_mux_parent_hws,
- .num_parents = ARRAY_SIZE(meson8b_vclk_mux_parent_hws),
+ .parent_hws = meson8b_vclk_parents,
+ .num_parents = ARRAY_SIZE(meson8b_vclk_parents),
.flags = CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
},
};
-static struct clk_regmap meson8b_vclk2_clk_in_en = {
+static struct clk_regmap meson8b_vclk2_in_en = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_VIID_CLK_DIV,
.bit_idx = 16,
@@ -1445,7 +1488,7 @@ static struct clk_regmap meson8b_vclk2_clk_in_en = {
},
};
-static struct clk_regmap meson8b_vclk2_clk_en = {
+static struct clk_regmap meson8b_vclk2_en = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_VIID_CLK_DIV,
.bit_idx = 19,
@@ -1454,23 +1497,23 @@ static struct clk_regmap meson8b_vclk2_clk_en = {
.name = "vclk2_en",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &meson8b_vclk2_clk_in_en.hw
+ &meson8b_vclk2_in_en.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap meson8b_vclk2_div1_gate = {
+static struct clk_regmap meson8b_vclk2_div1 = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_VIID_CLK_DIV,
.bit_idx = 0,
},
.hw.init = &(struct clk_init_data){
- .name = "vclk2_div1_en",
+ .name = "vclk2_div1",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &meson8b_vclk2_clk_en.hw
+ &meson8b_vclk2_en.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1481,23 +1524,23 @@ static struct clk_fixed_factor meson8b_vclk2_div2_div = {
.mult = 1,
.div = 2,
.hw.init = &(struct clk_init_data){
- .name = "vclk2_div2",
+ .name = "vclk2_div2_div",
.ops = &clk_fixed_factor_ops,
.parent_hws = (const struct clk_hw *[]) {
- &meson8b_vclk2_clk_en.hw
+ &meson8b_vclk2_en.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
}
};
-static struct clk_regmap meson8b_vclk2_div2_div_gate = {
+static struct clk_regmap meson8b_vclk2_div2 = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_VIID_CLK_DIV,
.bit_idx = 1,
},
.hw.init = &(struct clk_init_data){
- .name = "vclk2_div2_en",
+ .name = "vclk2_div2",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk2_div2_div.hw
@@ -1511,23 +1554,23 @@ static struct clk_fixed_factor meson8b_vclk2_div4_div = {
.mult = 1,
.div = 4,
.hw.init = &(struct clk_init_data){
- .name = "vclk2_div4",
+ .name = "vclk2_div4_div",
.ops = &clk_fixed_factor_ops,
.parent_hws = (const struct clk_hw *[]) {
- &meson8b_vclk2_clk_en.hw
+ &meson8b_vclk2_en.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
}
};
-static struct clk_regmap meson8b_vclk2_div4_div_gate = {
+static struct clk_regmap meson8b_vclk2_div4 = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_VIID_CLK_DIV,
.bit_idx = 2,
},
.hw.init = &(struct clk_init_data){
- .name = "vclk2_div4_en",
+ .name = "vclk2_div4",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk2_div4_div.hw
@@ -1541,23 +1584,23 @@ static struct clk_fixed_factor meson8b_vclk2_div6_div = {
.mult = 1,
.div = 6,
.hw.init = &(struct clk_init_data){
- .name = "vclk2_div6",
+ .name = "vclk2_div6_div",
.ops = &clk_fixed_factor_ops,
.parent_hws = (const struct clk_hw *[]) {
- &meson8b_vclk2_clk_en.hw
+ &meson8b_vclk2_en.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
}
};
-static struct clk_regmap meson8b_vclk2_div6_div_gate = {
+static struct clk_regmap meson8b_vclk2_div6 = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_VIID_CLK_DIV,
.bit_idx = 3,
},
.hw.init = &(struct clk_init_data){
- .name = "vclk2_div6_en",
+ .name = "vclk2_div6",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk2_div6_div.hw
@@ -1571,23 +1614,23 @@ static struct clk_fixed_factor meson8b_vclk2_div12_div = {
.mult = 1,
.div = 12,
.hw.init = &(struct clk_init_data){
- .name = "vclk2_div12",
+ .name = "vclk2_div12_div",
.ops = &clk_fixed_factor_ops,
.parent_hws = (const struct clk_hw *[]) {
- &meson8b_vclk2_clk_en.hw
+ &meson8b_vclk2_en.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
}
};
-static struct clk_regmap meson8b_vclk2_div12_div_gate = {
+static struct clk_regmap meson8b_vclk2_div12 = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_VIID_CLK_DIV,
.bit_idx = 4,
},
.hw.init = &(struct clk_init_data){
- .name = "vclk2_div12_en",
+ .name = "vclk2_div12",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk2_div12_div.hw
@@ -1597,12 +1640,12 @@ static struct clk_regmap meson8b_vclk2_div12_div_gate = {
},
};
-static const struct clk_hw *meson8b_vclk_enc_mux_parent_hws[] = {
- &meson8b_vclk_div1_gate.hw,
- &meson8b_vclk_div2_div_gate.hw,
- &meson8b_vclk_div4_div_gate.hw,
- &meson8b_vclk_div6_div_gate.hw,
- &meson8b_vclk_div12_div_gate.hw,
+static const struct clk_hw *meson8b_vclk_enc_parents[] = {
+ &meson8b_vclk_div1.hw,
+ &meson8b_vclk_div2.hw,
+ &meson8b_vclk_div4.hw,
+ &meson8b_vclk_div6.hw,
+ &meson8b_vclk_div12.hw,
};
static struct clk_regmap meson8b_cts_enct_sel = {
@@ -1614,8 +1657,8 @@ static struct clk_regmap meson8b_cts_enct_sel = {
.hw.init = &(struct clk_init_data){
.name = "cts_enct_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = meson8b_vclk_enc_mux_parent_hws,
- .num_parents = ARRAY_SIZE(meson8b_vclk_enc_mux_parent_hws),
+ .parent_hws = meson8b_vclk_enc_parents,
+ .num_parents = ARRAY_SIZE(meson8b_vclk_enc_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1645,8 +1688,8 @@ static struct clk_regmap meson8b_cts_encp_sel = {
.hw.init = &(struct clk_init_data){
.name = "cts_encp_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = meson8b_vclk_enc_mux_parent_hws,
- .num_parents = ARRAY_SIZE(meson8b_vclk_enc_mux_parent_hws),
+ .parent_hws = meson8b_vclk_enc_parents,
+ .num_parents = ARRAY_SIZE(meson8b_vclk_enc_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1676,8 +1719,8 @@ static struct clk_regmap meson8b_cts_enci_sel = {
.hw.init = &(struct clk_init_data){
.name = "cts_enci_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = meson8b_vclk_enc_mux_parent_hws,
- .num_parents = ARRAY_SIZE(meson8b_vclk_enc_mux_parent_hws),
+ .parent_hws = meson8b_vclk_enc_parents,
+ .num_parents = ARRAY_SIZE(meson8b_vclk_enc_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1707,8 +1750,8 @@ static struct clk_regmap meson8b_hdmi_tx_pixel_sel = {
.hw.init = &(struct clk_init_data){
.name = "hdmi_tx_pixel_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = meson8b_vclk_enc_mux_parent_hws,
- .num_parents = ARRAY_SIZE(meson8b_vclk_enc_mux_parent_hws),
+ .parent_hws = meson8b_vclk_enc_parents,
+ .num_parents = ARRAY_SIZE(meson8b_vclk_enc_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1729,14 +1772,6 @@ static struct clk_regmap meson8b_hdmi_tx_pixel = {
},
};
-static const struct clk_hw *meson8b_vclk2_enc_mux_parent_hws[] = {
- &meson8b_vclk2_div1_gate.hw,
- &meson8b_vclk2_div2_div_gate.hw,
- &meson8b_vclk2_div4_div_gate.hw,
- &meson8b_vclk2_div6_div_gate.hw,
- &meson8b_vclk2_div12_div_gate.hw,
-};
-
static struct clk_regmap meson8b_cts_encl_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_VIID_CLK_DIV,
@@ -1746,8 +1781,8 @@ static struct clk_regmap meson8b_cts_encl_sel = {
.hw.init = &(struct clk_init_data){
.name = "cts_encl_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = meson8b_vclk2_enc_mux_parent_hws,
- .num_parents = ARRAY_SIZE(meson8b_vclk2_enc_mux_parent_hws),
+ .parent_hws = meson8b_vclk_enc_parents,
+ .num_parents = ARRAY_SIZE(meson8b_vclk_enc_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1777,8 +1812,8 @@ static struct clk_regmap meson8b_cts_vdac0_sel = {
.hw.init = &(struct clk_init_data){
.name = "cts_vdac0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = meson8b_vclk2_enc_mux_parent_hws,
- .num_parents = ARRAY_SIZE(meson8b_vclk2_enc_mux_parent_hws),
+ .parent_hws = meson8b_vclk_enc_parents,
+ .num_parents = ARRAY_SIZE(meson8b_vclk_enc_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1861,7 +1896,8 @@ static struct clk_regmap meson8b_hdmi_sys = {
* CLK_SET_RATE_GATE is set.
* Meson8 only has mali_0 and no glitch-free mux.
*/
-static const struct clk_parent_data meson8b_mali_0_1_parent_data[] = {
+static u32 meson8b_mali_parents_val_table[] = { 0, 2, 3, 4, 5, 6, 7 };
+static const struct clk_parent_data meson8b_mali_parents[] = {
{ .fw_name = "xtal", .name = "xtal", .index = -1, },
{ .hw = &meson8b_mpll2.hw, },
{ .hw = &meson8b_mpll1.hw, },
@@ -1871,20 +1907,18 @@ static const struct clk_parent_data meson8b_mali_0_1_parent_data[] = {
{ .hw = &meson8b_fclk_div5.hw, },
};
-static u32 meson8b_mali_0_1_mux_table[] = { 0, 2, 3, 4, 5, 6, 7 };
-
static struct clk_regmap meson8b_mali_0_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_MALI_CLK_CNTL,
.mask = 0x7,
.shift = 9,
- .table = meson8b_mali_0_1_mux_table,
+ .table = meson8b_mali_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "mali_0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = meson8b_mali_0_1_parent_data,
- .num_parents = ARRAY_SIZE(meson8b_mali_0_1_parent_data),
+ .parent_data = meson8b_mali_parents,
+ .num_parents = ARRAY_SIZE(meson8b_mali_parents),
/*
* Don't propagate rate changes up because the only changeable
* parents are mpll1 and mpll2 but we need those for audio and
@@ -1933,13 +1967,13 @@ static struct clk_regmap meson8b_mali_1_sel = {
.offset = HHI_MALI_CLK_CNTL,
.mask = 0x7,
.shift = 25,
- .table = meson8b_mali_0_1_mux_table,
+ .table = meson8b_mali_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "mali_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = meson8b_mali_0_1_parent_data,
- .num_parents = ARRAY_SIZE(meson8b_mali_0_1_parent_data),
+ .parent_data = meson8b_mali_parents,
+ .num_parents = ARRAY_SIZE(meson8b_mali_parents),
/*
* Don't propagate rate changes up because the only changeable
* parents are mpll1 and mpll2 but we need those for audio and
@@ -2074,20 +2108,13 @@ static struct clk_regmap meson8m2_gp_pll = {
},
};
-static const struct clk_hw *meson8b_vpu_0_1_parent_hws[] = {
+static const struct clk_hw *meson8b_vpu_parents[] = {
&meson8b_fclk_div4.hw,
&meson8b_fclk_div3.hw,
&meson8b_fclk_div5.hw,
&meson8b_fclk_div7.hw,
};
-static const struct clk_hw *mmeson8m2_vpu_0_1_parent_hws[] = {
- &meson8b_fclk_div4.hw,
- &meson8b_fclk_div3.hw,
- &meson8b_fclk_div5.hw,
- &meson8m2_gp_pll.hw,
-};
-
static struct clk_regmap meson8b_vpu_0_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_VPU_CLK_CNTL,
@@ -2097,12 +2124,19 @@ static struct clk_regmap meson8b_vpu_0_sel = {
.hw.init = &(struct clk_init_data){
.name = "vpu_0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = meson8b_vpu_0_1_parent_hws,
- .num_parents = ARRAY_SIZE(meson8b_vpu_0_1_parent_hws),
+ .parent_hws = meson8b_vpu_parents,
+ .num_parents = ARRAY_SIZE(meson8b_vpu_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
+static const struct clk_hw *mmeson8m2_vpu_parents[] = {
+ &meson8b_fclk_div4.hw,
+ &meson8b_fclk_div3.hw,
+ &meson8b_fclk_div5.hw,
+ &meson8m2_gp_pll.hw,
+};
+
static struct clk_regmap meson8m2_vpu_0_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_VPU_CLK_CNTL,
@@ -2112,8 +2146,8 @@ static struct clk_regmap meson8m2_vpu_0_sel = {
.hw.init = &(struct clk_init_data){
.name = "vpu_0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = mmeson8m2_vpu_0_1_parent_hws,
- .num_parents = ARRAY_SIZE(mmeson8m2_vpu_0_1_parent_hws),
+ .parent_hws = mmeson8m2_vpu_parents,
+ .num_parents = ARRAY_SIZE(mmeson8m2_vpu_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2168,8 +2202,8 @@ static struct clk_regmap meson8b_vpu_1_sel = {
.hw.init = &(struct clk_init_data){
.name = "vpu_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = meson8b_vpu_0_1_parent_hws,
- .num_parents = ARRAY_SIZE(meson8b_vpu_0_1_parent_hws),
+ .parent_hws = meson8b_vpu_parents,
+ .num_parents = ARRAY_SIZE(meson8b_vpu_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2183,8 +2217,8 @@ static struct clk_regmap meson8m2_vpu_1_sel = {
.hw.init = &(struct clk_init_data){
.name = "vpu_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = mmeson8m2_vpu_0_1_parent_hws,
- .num_parents = ARRAY_SIZE(mmeson8m2_vpu_0_1_parent_hws),
+ .parent_hws = mmeson8m2_vpu_parents,
+ .num_parents = ARRAY_SIZE(mmeson8m2_vpu_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2256,7 +2290,7 @@ static struct clk_regmap meson8b_vpu = {
},
};
-static const struct clk_hw *meson8b_vdec_parent_hws[] = {
+static const struct clk_hw *meson8b_vdec_parents[] = {
&meson8b_fclk_div4.hw,
&meson8b_fclk_div3.hw,
&meson8b_fclk_div5.hw,
@@ -2275,8 +2309,8 @@ static struct clk_regmap meson8b_vdec_1_sel = {
.hw.init = &(struct clk_init_data){
.name = "vdec_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = meson8b_vdec_parent_hws,
- .num_parents = ARRAY_SIZE(meson8b_vdec_parent_hws),
+ .parent_hws = meson8b_vdec_parents,
+ .num_parents = ARRAY_SIZE(meson8b_vdec_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2378,8 +2412,8 @@ static struct clk_regmap meson8b_vdec_hcodec_sel = {
.hw.init = &(struct clk_init_data){
.name = "vdec_hcodec_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = meson8b_vdec_parent_hws,
- .num_parents = ARRAY_SIZE(meson8b_vdec_parent_hws),
+ .parent_hws = meson8b_vdec_parents,
+ .num_parents = ARRAY_SIZE(meson8b_vdec_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2428,8 +2462,8 @@ static struct clk_regmap meson8b_vdec_2_sel = {
.hw.init = &(struct clk_init_data){
.name = "vdec_2_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = meson8b_vdec_parent_hws,
- .num_parents = ARRAY_SIZE(meson8b_vdec_parent_hws),
+ .parent_hws = meson8b_vdec_parents,
+ .num_parents = ARRAY_SIZE(meson8b_vdec_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2478,8 +2512,8 @@ static struct clk_regmap meson8b_vdec_hevc_sel = {
.hw.init = &(struct clk_init_data){
.name = "vdec_hevc_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = meson8b_vdec_parent_hws,
- .num_parents = ARRAY_SIZE(meson8b_vdec_parent_hws),
+ .parent_hws = meson8b_vdec_parents,
+ .num_parents = ARRAY_SIZE(meson8b_vdec_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2538,27 +2572,26 @@ static struct clk_regmap meson8b_vdec_hevc = {
};
/* TODO: the clock at index 0 is "DDR_PLL" which we don't support yet */
-static const struct clk_hw *meson8b_cts_amclk_parent_hws[] = {
+static u32 meson8b_cts_mclk_parents_val_table[] = { 1, 2, 3 };
+static const struct clk_hw *meson8b_cts_mclk_parents[] = {
&meson8b_mpll0.hw,
&meson8b_mpll1.hw,
&meson8b_mpll2.hw
};
-static u32 meson8b_cts_amclk_mux_table[] = { 1, 2, 3 };
-
static struct clk_regmap meson8b_cts_amclk_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_AUD_CLK_CNTL,
.mask = 0x3,
.shift = 9,
- .table = meson8b_cts_amclk_mux_table,
+ .table = meson8b_cts_mclk_parents_val_table,
.flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
.name = "cts_amclk_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = meson8b_cts_amclk_parent_hws,
- .num_parents = ARRAY_SIZE(meson8b_cts_amclk_parent_hws),
+ .parent_hws = meson8b_cts_mclk_parents,
+ .num_parents = ARRAY_SIZE(meson8b_cts_mclk_parents),
},
};
@@ -2596,28 +2629,19 @@ static struct clk_regmap meson8b_cts_amclk = {
},
};
-/* TODO: the clock at index 0 is "DDR_PLL" which we don't support yet */
-static const struct clk_hw *meson8b_cts_mclk_i958_parent_hws[] = {
- &meson8b_mpll0.hw,
- &meson8b_mpll1.hw,
- &meson8b_mpll2.hw
-};
-
-static u32 meson8b_cts_mclk_i958_mux_table[] = { 1, 2, 3 };
-
static struct clk_regmap meson8b_cts_mclk_i958_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_AUD_CLK_CNTL2,
.mask = 0x3,
.shift = 25,
- .table = meson8b_cts_mclk_i958_mux_table,
+ .table = meson8b_cts_mclk_parents_val_table,
.flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data) {
.name = "cts_mclk_i958_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = meson8b_cts_mclk_i958_parent_hws,
- .num_parents = ARRAY_SIZE(meson8b_cts_mclk_i958_parent_hws),
+ .parent_hws = meson8b_cts_mclk_parents,
+ .num_parents = ARRAY_SIZE(meson8b_cts_mclk_parents),
},
};
@@ -2677,113 +2701,128 @@ static struct clk_regmap meson8b_cts_i958 = {
},
};
-#define MESON_GATE(_name, _reg, _bit) \
- MESON_PCLK(_name, _reg, _bit, &meson8b_clk81.hw)
-
-/* Everything Else (EE) domain gates */
-
-static MESON_GATE(meson8b_ddr, HHI_GCLK_MPEG0, 0);
-static MESON_GATE(meson8b_dos, HHI_GCLK_MPEG0, 1);
-static MESON_GATE(meson8b_isa, HHI_GCLK_MPEG0, 5);
-static MESON_GATE(meson8b_pl301, HHI_GCLK_MPEG0, 6);
-static MESON_GATE(meson8b_periphs, HHI_GCLK_MPEG0, 7);
-static MESON_GATE(meson8b_spicc, HHI_GCLK_MPEG0, 8);
-static MESON_GATE(meson8b_i2c, HHI_GCLK_MPEG0, 9);
-static MESON_GATE(meson8b_sar_adc, HHI_GCLK_MPEG0, 10);
-static MESON_GATE(meson8b_smart_card, HHI_GCLK_MPEG0, 11);
-static MESON_GATE(meson8b_rng0, HHI_GCLK_MPEG0, 12);
-static MESON_GATE(meson8b_uart0, HHI_GCLK_MPEG0, 13);
-static MESON_GATE(meson8b_sdhc, HHI_GCLK_MPEG0, 14);
-static MESON_GATE(meson8b_stream, HHI_GCLK_MPEG0, 15);
-static MESON_GATE(meson8b_async_fifo, HHI_GCLK_MPEG0, 16);
-static MESON_GATE(meson8b_sdio, HHI_GCLK_MPEG0, 17);
-static MESON_GATE(meson8b_abuf, HHI_GCLK_MPEG0, 18);
-static MESON_GATE(meson8b_hiu_iface, HHI_GCLK_MPEG0, 19);
-static MESON_GATE(meson8b_assist_misc, HHI_GCLK_MPEG0, 23);
-static MESON_GATE(meson8b_spi, HHI_GCLK_MPEG0, 30);
-
-static MESON_GATE(meson8b_i2s_spdif, HHI_GCLK_MPEG1, 2);
-static MESON_GATE(meson8b_eth, HHI_GCLK_MPEG1, 3);
-static MESON_GATE(meson8b_demux, HHI_GCLK_MPEG1, 4);
-static MESON_GATE(meson8b_blkmv, HHI_GCLK_MPEG1, 14);
-static MESON_GATE(meson8b_aiu, HHI_GCLK_MPEG1, 15);
-static MESON_GATE(meson8b_uart1, HHI_GCLK_MPEG1, 16);
-static MESON_GATE(meson8b_g2d, HHI_GCLK_MPEG1, 20);
-static MESON_GATE(meson8b_usb0, HHI_GCLK_MPEG1, 21);
-static MESON_GATE(meson8b_usb1, HHI_GCLK_MPEG1, 22);
-static MESON_GATE(meson8b_reset, HHI_GCLK_MPEG1, 23);
-static MESON_GATE(meson8b_nand, HHI_GCLK_MPEG1, 24);
-static MESON_GATE(meson8b_dos_parser, HHI_GCLK_MPEG1, 25);
-static MESON_GATE(meson8b_usb, HHI_GCLK_MPEG1, 26);
-static MESON_GATE(meson8b_vdin1, HHI_GCLK_MPEG1, 28);
-static MESON_GATE(meson8b_ahb_arb0, HHI_GCLK_MPEG1, 29);
-static MESON_GATE(meson8b_efuse, HHI_GCLK_MPEG1, 30);
-static MESON_GATE(meson8b_boot_rom, HHI_GCLK_MPEG1, 31);
-
-static MESON_GATE(meson8b_ahb_data_bus, HHI_GCLK_MPEG2, 1);
-static MESON_GATE(meson8b_ahb_ctrl_bus, HHI_GCLK_MPEG2, 2);
-static MESON_GATE(meson8b_hdmi_intr_sync, HHI_GCLK_MPEG2, 3);
-static MESON_GATE(meson8b_hdmi_pclk, HHI_GCLK_MPEG2, 4);
-static MESON_GATE(meson8b_usb1_ddr_bridge, HHI_GCLK_MPEG2, 8);
-static MESON_GATE(meson8b_usb0_ddr_bridge, HHI_GCLK_MPEG2, 9);
-static MESON_GATE(meson8b_mmc_pclk, HHI_GCLK_MPEG2, 11);
-static MESON_GATE(meson8b_dvin, HHI_GCLK_MPEG2, 12);
-static MESON_GATE(meson8b_uart2, HHI_GCLK_MPEG2, 15);
-static MESON_GATE(meson8b_sana, HHI_GCLK_MPEG2, 22);
-static MESON_GATE(meson8b_vpu_intr, HHI_GCLK_MPEG2, 25);
-static MESON_GATE(meson8b_sec_ahb_ahb3_bridge, HHI_GCLK_MPEG2, 26);
-static MESON_GATE(meson8b_clk81_a9, HHI_GCLK_MPEG2, 29);
-
-static MESON_GATE(meson8b_vclk2_venci0, HHI_GCLK_OTHER, 1);
-static MESON_GATE(meson8b_vclk2_venci1, HHI_GCLK_OTHER, 2);
-static MESON_GATE(meson8b_vclk2_vencp0, HHI_GCLK_OTHER, 3);
-static MESON_GATE(meson8b_vclk2_vencp1, HHI_GCLK_OTHER, 4);
-static MESON_GATE(meson8b_gclk_venci_int, HHI_GCLK_OTHER, 8);
-static MESON_GATE(meson8b_gclk_vencp_int, HHI_GCLK_OTHER, 9);
-static MESON_GATE(meson8b_dac_clk, HHI_GCLK_OTHER, 10);
-static MESON_GATE(meson8b_aoclk_gate, HHI_GCLK_OTHER, 14);
-static MESON_GATE(meson8b_iec958_gate, HHI_GCLK_OTHER, 16);
-static MESON_GATE(meson8b_enc480p, HHI_GCLK_OTHER, 20);
-static MESON_GATE(meson8b_rng1, HHI_GCLK_OTHER, 21);
-static MESON_GATE(meson8b_gclk_vencl_int, HHI_GCLK_OTHER, 22);
-static MESON_GATE(meson8b_vclk2_venclmcc, HHI_GCLK_OTHER, 24);
-static MESON_GATE(meson8b_vclk2_vencl, HHI_GCLK_OTHER, 25);
-static MESON_GATE(meson8b_vclk2_other, HHI_GCLK_OTHER, 26);
-static MESON_GATE(meson8b_edp, HHI_GCLK_OTHER, 31);
+static const struct clk_parent_data meson8b_pclk_parents = { .hw = &meson8b_clk81.hw };
+
+#define MESON8B_PCLK(_name, _reg, _bit, _flags) \
+ MESON_PCLK(_name, _reg, _bit, &meson8b_pclk_parents, _flags)
+
+/*
+ * Everything Else (EE) domain gates
+ *
+ * NOTE: The gates below are marked with CLK_IGNORE_UNUSED for historic reasons
+ * Users are encouraged to test without it and submit changes to:
+ * - remove the flag if not necessary
+ * - replace the flag with something more adequate, such as CLK_IS_CRITICAL,
+ * if appropriate.
+ * - add a comment explaining why the use of CLK_IGNORE_UNUSED is desirable
+ * for a particular clock.
+ */
+static MESON8B_PCLK(meson8b_ddr, HHI_GCLK_MPEG0, 0, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_dos, HHI_GCLK_MPEG0, 1, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_isa, HHI_GCLK_MPEG0, 5, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_pl301, HHI_GCLK_MPEG0, 6, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_periphs, HHI_GCLK_MPEG0, 7, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_spicc, HHI_GCLK_MPEG0, 8, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_i2c, HHI_GCLK_MPEG0, 9, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_sar_adc, HHI_GCLK_MPEG0, 10, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_smart_card, HHI_GCLK_MPEG0, 11, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_rng0, HHI_GCLK_MPEG0, 12, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_uart0, HHI_GCLK_MPEG0, 13, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_sdhc, HHI_GCLK_MPEG0, 14, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_stream, HHI_GCLK_MPEG0, 15, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_async_fifo, HHI_GCLK_MPEG0, 16, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_sdio, HHI_GCLK_MPEG0, 17, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_abuf, HHI_GCLK_MPEG0, 18, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_hiu_iface, HHI_GCLK_MPEG0, 19, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_assist_misc, HHI_GCLK_MPEG0, 23, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_spi, HHI_GCLK_MPEG0, 30, CLK_IGNORE_UNUSED);
+
+static MESON8B_PCLK(meson8b_i2s_spdif, HHI_GCLK_MPEG1, 2, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_eth, HHI_GCLK_MPEG1, 3, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_demux, HHI_GCLK_MPEG1, 4, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_blkmv, HHI_GCLK_MPEG1, 14, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_aiu, HHI_GCLK_MPEG1, 15, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_uart1, HHI_GCLK_MPEG1, 16, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_g2d, HHI_GCLK_MPEG1, 20, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_usb0, HHI_GCLK_MPEG1, 21, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_usb1, HHI_GCLK_MPEG1, 22, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_reset, HHI_GCLK_MPEG1, 23, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_nand, HHI_GCLK_MPEG1, 24, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_dos_parser, HHI_GCLK_MPEG1, 25, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_usb, HHI_GCLK_MPEG1, 26, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_vdin1, HHI_GCLK_MPEG1, 28, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_ahb_arb0, HHI_GCLK_MPEG1, 29, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_efuse, HHI_GCLK_MPEG1, 30, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_boot_rom, HHI_GCLK_MPEG1, 31, CLK_IGNORE_UNUSED);
+
+static MESON8B_PCLK(meson8b_ahb_data_bus, HHI_GCLK_MPEG2, 1, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_ahb_ctrl_bus, HHI_GCLK_MPEG2, 2, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_hdmi_intr_sync, HHI_GCLK_MPEG2, 3, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_hdmi_pclk, HHI_GCLK_MPEG2, 4, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_usb1_ddr_bridge, HHI_GCLK_MPEG2, 8, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_usb0_ddr_bridge, HHI_GCLK_MPEG2, 9, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_mmc_pclk, HHI_GCLK_MPEG2, 11, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_dvin, HHI_GCLK_MPEG2, 12, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_uart2, HHI_GCLK_MPEG2, 15, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_sana, HHI_GCLK_MPEG2, 22, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_vpu_intr, HHI_GCLK_MPEG2, 25, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_sec_ahb_ahb3_bridge, HHI_GCLK_MPEG2, 26, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_clk81_a9, HHI_GCLK_MPEG2, 29, CLK_IGNORE_UNUSED);
+
+static MESON8B_PCLK(meson8b_vclk2_venci0, HHI_GCLK_OTHER, 1, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_vclk2_venci1, HHI_GCLK_OTHER, 2, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_vclk2_vencp0, HHI_GCLK_OTHER, 3, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_vclk2_vencp1, HHI_GCLK_OTHER, 4, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_gclk_venci_int, HHI_GCLK_OTHER, 8, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_gclk_vencp_int, HHI_GCLK_OTHER, 9, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_dac_clk, HHI_GCLK_OTHER, 10, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_aoclk_gate, HHI_GCLK_OTHER, 14, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_iec958_gate, HHI_GCLK_OTHER, 16, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_enc480p, HHI_GCLK_OTHER, 20, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_rng1, HHI_GCLK_OTHER, 21, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_gclk_vencl_int, HHI_GCLK_OTHER, 22, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_vclk2_venclmcc, HHI_GCLK_OTHER, 24, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_vclk2_vencl, HHI_GCLK_OTHER, 25, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_vclk2_other, HHI_GCLK_OTHER, 26, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_edp, HHI_GCLK_OTHER, 31, CLK_IGNORE_UNUSED);
/* AIU gates */
-#define MESON_AIU_GLUE_GATE(_name, _reg, _bit) \
- MESON_PCLK(_name, _reg, _bit, &meson8b_aiu_glue.hw)
-
-static MESON_PCLK(meson8b_aiu_glue, HHI_GCLK_MPEG1, 6, &meson8b_aiu.hw);
-static MESON_AIU_GLUE_GATE(meson8b_iec958, HHI_GCLK_MPEG1, 7);
-static MESON_AIU_GLUE_GATE(meson8b_i2s_out, HHI_GCLK_MPEG1, 8);
-static MESON_AIU_GLUE_GATE(meson8b_amclk, HHI_GCLK_MPEG1, 9);
-static MESON_AIU_GLUE_GATE(meson8b_aififo2, HHI_GCLK_MPEG1, 10);
-static MESON_AIU_GLUE_GATE(meson8b_mixer, HHI_GCLK_MPEG1, 11);
-static MESON_AIU_GLUE_GATE(meson8b_mixer_iface, HHI_GCLK_MPEG1, 12);
-static MESON_AIU_GLUE_GATE(meson8b_adc, HHI_GCLK_MPEG1, 13);
+static const struct clk_parent_data meson8b_aiu_glue_parents = { .hw = &meson8b_aiu.hw };
+static MESON_PCLK(meson8b_aiu_glue, HHI_GCLK_MPEG1, 6,
+ &meson8b_aiu_glue_parents, CLK_IGNORE_UNUSED);
+
+static const struct clk_parent_data meson8b_aiu_pclk_parents = { .hw = &meson8b_aiu_glue.hw };
+#define MESON8B_AIU_PCLK(_name, _bit, _flags) \
+ MESON_PCLK(_name, HHI_GCLK_MPEG1, _bit, &meson8b_aiu_pclk_parents, _flags)
+
+static MESON8B_AIU_PCLK(meson8b_iec958, 7, CLK_IGNORE_UNUSED);
+static MESON8B_AIU_PCLK(meson8b_i2s_out, 8, CLK_IGNORE_UNUSED);
+static MESON8B_AIU_PCLK(meson8b_amclk, 9, CLK_IGNORE_UNUSED);
+static MESON8B_AIU_PCLK(meson8b_aififo2, 10, CLK_IGNORE_UNUSED);
+static MESON8B_AIU_PCLK(meson8b_mixer, 11, CLK_IGNORE_UNUSED);
+static MESON8B_AIU_PCLK(meson8b_mixer_iface, 12, CLK_IGNORE_UNUSED);
+static MESON8B_AIU_PCLK(meson8b_adc, 13, CLK_IGNORE_UNUSED);
/* Always On (AO) domain gates */
-static MESON_GATE(meson8b_ao_media_cpu, HHI_GCLK_AO, 0);
-static MESON_GATE(meson8b_ao_ahb_sram, HHI_GCLK_AO, 1);
-static MESON_GATE(meson8b_ao_ahb_bus, HHI_GCLK_AO, 2);
-static MESON_GATE(meson8b_ao_iface, HHI_GCLK_AO, 3);
+static MESON8B_PCLK(meson8b_ao_media_cpu, HHI_GCLK_AO, 0, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_ao_ahb_sram, HHI_GCLK_AO, 1, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_ao_ahb_bus, HHI_GCLK_AO, 2, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_ao_iface, HHI_GCLK_AO, 3, CLK_IGNORE_UNUSED);
static struct clk_hw *meson8_hw_clks[] = {
- [CLKID_PLL_FIXED] = &meson8b_fixed_pll.hw,
- [CLKID_PLL_VID] = &meson8b_vid_pll.hw,
- [CLKID_PLL_SYS] = &meson8b_sys_pll.hw,
- [CLKID_FCLK_DIV2] = &meson8b_fclk_div2.hw,
- [CLKID_FCLK_DIV3] = &meson8b_fclk_div3.hw,
- [CLKID_FCLK_DIV4] = &meson8b_fclk_div4.hw,
- [CLKID_FCLK_DIV5] = &meson8b_fclk_div5.hw,
- [CLKID_FCLK_DIV7] = &meson8b_fclk_div7.hw,
- [CLKID_CPUCLK] = &meson8b_cpu_clk.hw,
- [CLKID_MPEG_SEL] = &meson8b_mpeg_clk_sel.hw,
- [CLKID_MPEG_DIV] = &meson8b_mpeg_clk_div.hw,
- [CLKID_CLK81] = &meson8b_clk81.hw,
+ [CLKID_PLL_FIXED] = &meson8b_fixed_pll.hw,
+ [CLKID_PLL_VID] = &meson8b_vid_pll.hw,
+ [CLKID_PLL_SYS] = &meson8b_sys_pll.hw,
+ [CLKID_FCLK_DIV2] = &meson8b_fclk_div2.hw,
+ [CLKID_FCLK_DIV3] = &meson8b_fclk_div3.hw,
+ [CLKID_FCLK_DIV4] = &meson8b_fclk_div4.hw,
+ [CLKID_FCLK_DIV5] = &meson8b_fclk_div5.hw,
+ [CLKID_FCLK_DIV7] = &meson8b_fclk_div7.hw,
+ [CLKID_CPUCLK] = &meson8b_cpu_clk.hw,
+ [CLKID_MPEG_SEL] = &meson8b_clk81_sel.hw,
+ [CLKID_MPEG_DIV] = &meson8b_clk81_div.hw,
+ [CLKID_CLK81] = &meson8b_clk81.hw,
[CLKID_DDR] = &meson8b_ddr.hw,
[CLKID_DOS] = &meson8b_dos.hw,
[CLKID_ISA] = &meson8b_isa.hw,
@@ -2880,7 +2919,7 @@ static struct clk_hw *meson8_hw_clks[] = {
[CLKID_FCLK_DIV7_DIV] = &meson8b_fclk_div7_div.hw,
[CLKID_NAND_SEL] = &meson8b_nand_clk_sel.hw,
[CLKID_NAND_DIV] = &meson8b_nand_clk_div.hw,
- [CLKID_NAND_CLK] = &meson8b_nand_clk_gate.hw,
+ [CLKID_NAND_CLK] = &meson8b_nand_clk.hw,
[CLKID_PLL_FIXED_DCO] = &meson8b_fixed_pll_dco.hw,
[CLKID_HDMI_PLL_DCO] = &meson8b_hdmi_pll_dco.hw,
[CLKID_PLL_SYS_DCO] = &meson8b_sys_pll_dco.hw,
@@ -2891,14 +2930,14 @@ static struct clk_hw *meson8_hw_clks[] = {
[CLKID_CPU_CLK_DIV6] = &meson8b_cpu_clk_div6.hw,
[CLKID_CPU_CLK_DIV7] = &meson8b_cpu_clk_div7.hw,
[CLKID_CPU_CLK_DIV8] = &meson8b_cpu_clk_div8.hw,
- [CLKID_APB_SEL] = &meson8b_apb_clk_sel.hw,
- [CLKID_APB] = &meson8b_apb_clk_gate.hw,
- [CLKID_PERIPH_SEL] = &meson8b_periph_clk_sel.hw,
- [CLKID_PERIPH] = &meson8b_periph_clk_gate.hw,
- [CLKID_AXI_SEL] = &meson8b_axi_clk_sel.hw,
- [CLKID_AXI] = &meson8b_axi_clk_gate.hw,
- [CLKID_L2_DRAM_SEL] = &meson8b_l2_dram_clk_sel.hw,
- [CLKID_L2_DRAM] = &meson8b_l2_dram_clk_gate.hw,
+ [CLKID_APB_SEL] = &meson8b_apb_sel.hw,
+ [CLKID_APB] = &meson8b_apb.hw,
+ [CLKID_PERIPH_SEL] = &meson8b_periph_sel.hw,
+ [CLKID_PERIPH] = &meson8b_periph.hw,
+ [CLKID_AXI_SEL] = &meson8b_axi_sel.hw,
+ [CLKID_AXI] = &meson8b_axi.hw,
+ [CLKID_L2_DRAM_SEL] = &meson8b_l2_dram_sel.hw,
+ [CLKID_L2_DRAM] = &meson8b_l2_dram.hw,
[CLKID_HDMI_PLL_LVDS_OUT] = &meson8b_hdmi_pll_lvds_out.hw,
[CLKID_HDMI_PLL_HDMI_OUT] = &meson8b_hdmi_pll_hdmi_out.hw,
[CLKID_VID_PLL_IN_SEL] = &meson8b_vid_pll_in_sel.hw,
@@ -2909,27 +2948,27 @@ static struct clk_hw *meson8_hw_clks[] = {
[CLKID_VCLK_IN_SEL] = &meson8b_vclk_in_sel.hw,
[CLKID_VCLK_IN_EN] = &meson8b_vclk_in_en.hw,
[CLKID_VCLK_EN] = &meson8b_vclk_en.hw,
- [CLKID_VCLK_DIV1] = &meson8b_vclk_div1_gate.hw,
+ [CLKID_VCLK_DIV1] = &meson8b_vclk_div1.hw,
[CLKID_VCLK_DIV2_DIV] = &meson8b_vclk_div2_div.hw,
- [CLKID_VCLK_DIV2] = &meson8b_vclk_div2_div_gate.hw,
+ [CLKID_VCLK_DIV2] = &meson8b_vclk_div2.hw,
[CLKID_VCLK_DIV4_DIV] = &meson8b_vclk_div4_div.hw,
- [CLKID_VCLK_DIV4] = &meson8b_vclk_div4_div_gate.hw,
+ [CLKID_VCLK_DIV4] = &meson8b_vclk_div4.hw,
[CLKID_VCLK_DIV6_DIV] = &meson8b_vclk_div6_div.hw,
- [CLKID_VCLK_DIV6] = &meson8b_vclk_div6_div_gate.hw,
+ [CLKID_VCLK_DIV6] = &meson8b_vclk_div6.hw,
[CLKID_VCLK_DIV12_DIV] = &meson8b_vclk_div12_div.hw,
- [CLKID_VCLK_DIV12] = &meson8b_vclk_div12_div_gate.hw,
+ [CLKID_VCLK_DIV12] = &meson8b_vclk_div12.hw,
[CLKID_VCLK2_IN_SEL] = &meson8b_vclk2_in_sel.hw,
- [CLKID_VCLK2_IN_EN] = &meson8b_vclk2_clk_in_en.hw,
- [CLKID_VCLK2_EN] = &meson8b_vclk2_clk_en.hw,
- [CLKID_VCLK2_DIV1] = &meson8b_vclk2_div1_gate.hw,
+ [CLKID_VCLK2_IN_EN] = &meson8b_vclk2_in_en.hw,
+ [CLKID_VCLK2_EN] = &meson8b_vclk2_en.hw,
+ [CLKID_VCLK2_DIV1] = &meson8b_vclk2_div1.hw,
[CLKID_VCLK2_DIV2_DIV] = &meson8b_vclk2_div2_div.hw,
- [CLKID_VCLK2_DIV2] = &meson8b_vclk2_div2_div_gate.hw,
+ [CLKID_VCLK2_DIV2] = &meson8b_vclk2_div2.hw,
[CLKID_VCLK2_DIV4_DIV] = &meson8b_vclk2_div4_div.hw,
- [CLKID_VCLK2_DIV4] = &meson8b_vclk2_div4_div_gate.hw,
+ [CLKID_VCLK2_DIV4] = &meson8b_vclk2_div4.hw,
[CLKID_VCLK2_DIV6_DIV] = &meson8b_vclk2_div6_div.hw,
- [CLKID_VCLK2_DIV6] = &meson8b_vclk2_div6_div_gate.hw,
+ [CLKID_VCLK2_DIV6] = &meson8b_vclk2_div6.hw,
[CLKID_VCLK2_DIV12_DIV] = &meson8b_vclk2_div12_div.hw,
- [CLKID_VCLK2_DIV12] = &meson8b_vclk2_div12_div_gate.hw,
+ [CLKID_VCLK2_DIV12] = &meson8b_vclk2_div12.hw,
[CLKID_CTS_ENCT_SEL] = &meson8b_cts_enct_sel.hw,
[CLKID_CTS_ENCT] = &meson8b_cts_enct.hw,
[CLKID_CTS_ENCP_SEL] = &meson8b_cts_encp_sel.hw,
@@ -2976,18 +3015,18 @@ static struct clk_hw *meson8_hw_clks[] = {
};
static struct clk_hw *meson8b_hw_clks[] = {
- [CLKID_PLL_FIXED] = &meson8b_fixed_pll.hw,
- [CLKID_PLL_VID] = &meson8b_vid_pll.hw,
- [CLKID_PLL_SYS] = &meson8b_sys_pll.hw,
- [CLKID_FCLK_DIV2] = &meson8b_fclk_div2.hw,
- [CLKID_FCLK_DIV3] = &meson8b_fclk_div3.hw,
- [CLKID_FCLK_DIV4] = &meson8b_fclk_div4.hw,
- [CLKID_FCLK_DIV5] = &meson8b_fclk_div5.hw,
- [CLKID_FCLK_DIV7] = &meson8b_fclk_div7.hw,
- [CLKID_CPUCLK] = &meson8b_cpu_clk.hw,
- [CLKID_MPEG_SEL] = &meson8b_mpeg_clk_sel.hw,
- [CLKID_MPEG_DIV] = &meson8b_mpeg_clk_div.hw,
- [CLKID_CLK81] = &meson8b_clk81.hw,
+ [CLKID_PLL_FIXED] = &meson8b_fixed_pll.hw,
+ [CLKID_PLL_VID] = &meson8b_vid_pll.hw,
+ [CLKID_PLL_SYS] = &meson8b_sys_pll.hw,
+ [CLKID_FCLK_DIV2] = &meson8b_fclk_div2.hw,
+ [CLKID_FCLK_DIV3] = &meson8b_fclk_div3.hw,
+ [CLKID_FCLK_DIV4] = &meson8b_fclk_div4.hw,
+ [CLKID_FCLK_DIV5] = &meson8b_fclk_div5.hw,
+ [CLKID_FCLK_DIV7] = &meson8b_fclk_div7.hw,
+ [CLKID_CPUCLK] = &meson8b_cpu_clk.hw,
+ [CLKID_MPEG_SEL] = &meson8b_clk81_sel.hw,
+ [CLKID_MPEG_DIV] = &meson8b_clk81_div.hw,
+ [CLKID_CLK81] = &meson8b_clk81.hw,
[CLKID_DDR] = &meson8b_ddr.hw,
[CLKID_DOS] = &meson8b_dos.hw,
[CLKID_ISA] = &meson8b_isa.hw,
@@ -3084,7 +3123,7 @@ static struct clk_hw *meson8b_hw_clks[] = {
[CLKID_FCLK_DIV7_DIV] = &meson8b_fclk_div7_div.hw,
[CLKID_NAND_SEL] = &meson8b_nand_clk_sel.hw,
[CLKID_NAND_DIV] = &meson8b_nand_clk_div.hw,
- [CLKID_NAND_CLK] = &meson8b_nand_clk_gate.hw,
+ [CLKID_NAND_CLK] = &meson8b_nand_clk.hw,
[CLKID_PLL_FIXED_DCO] = &meson8b_fixed_pll_dco.hw,
[CLKID_HDMI_PLL_DCO] = &meson8b_hdmi_pll_dco.hw,
[CLKID_PLL_SYS_DCO] = &meson8b_sys_pll_dco.hw,
@@ -3095,14 +3134,14 @@ static struct clk_hw *meson8b_hw_clks[] = {
[CLKID_CPU_CLK_DIV6] = &meson8b_cpu_clk_div6.hw,
[CLKID_CPU_CLK_DIV7] = &meson8b_cpu_clk_div7.hw,
[CLKID_CPU_CLK_DIV8] = &meson8b_cpu_clk_div8.hw,
- [CLKID_APB_SEL] = &meson8b_apb_clk_sel.hw,
- [CLKID_APB] = &meson8b_apb_clk_gate.hw,
- [CLKID_PERIPH_SEL] = &meson8b_periph_clk_sel.hw,
- [CLKID_PERIPH] = &meson8b_periph_clk_gate.hw,
- [CLKID_AXI_SEL] = &meson8b_axi_clk_sel.hw,
- [CLKID_AXI] = &meson8b_axi_clk_gate.hw,
- [CLKID_L2_DRAM_SEL] = &meson8b_l2_dram_clk_sel.hw,
- [CLKID_L2_DRAM] = &meson8b_l2_dram_clk_gate.hw,
+ [CLKID_APB_SEL] = &meson8b_apb_sel.hw,
+ [CLKID_APB] = &meson8b_apb.hw,
+ [CLKID_PERIPH_SEL] = &meson8b_periph_sel.hw,
+ [CLKID_PERIPH] = &meson8b_periph.hw,
+ [CLKID_AXI_SEL] = &meson8b_axi_sel.hw,
+ [CLKID_AXI] = &meson8b_axi.hw,
+ [CLKID_L2_DRAM_SEL] = &meson8b_l2_dram_sel.hw,
+ [CLKID_L2_DRAM] = &meson8b_l2_dram.hw,
[CLKID_HDMI_PLL_LVDS_OUT] = &meson8b_hdmi_pll_lvds_out.hw,
[CLKID_HDMI_PLL_HDMI_OUT] = &meson8b_hdmi_pll_hdmi_out.hw,
[CLKID_VID_PLL_IN_SEL] = &meson8b_vid_pll_in_sel.hw,
@@ -3113,27 +3152,27 @@ static struct clk_hw *meson8b_hw_clks[] = {
[CLKID_VCLK_IN_SEL] = &meson8b_vclk_in_sel.hw,
[CLKID_VCLK_IN_EN] = &meson8b_vclk_in_en.hw,
[CLKID_VCLK_EN] = &meson8b_vclk_en.hw,
- [CLKID_VCLK_DIV1] = &meson8b_vclk_div1_gate.hw,
+ [CLKID_VCLK_DIV1] = &meson8b_vclk_div1.hw,
[CLKID_VCLK_DIV2_DIV] = &meson8b_vclk_div2_div.hw,
- [CLKID_VCLK_DIV2] = &meson8b_vclk_div2_div_gate.hw,
+ [CLKID_VCLK_DIV2] = &meson8b_vclk_div2.hw,
[CLKID_VCLK_DIV4_DIV] = &meson8b_vclk_div4_div.hw,
- [CLKID_VCLK_DIV4] = &meson8b_vclk_div4_div_gate.hw,
+ [CLKID_VCLK_DIV4] = &meson8b_vclk_div4.hw,
[CLKID_VCLK_DIV6_DIV] = &meson8b_vclk_div6_div.hw,
- [CLKID_VCLK_DIV6] = &meson8b_vclk_div6_div_gate.hw,
+ [CLKID_VCLK_DIV6] = &meson8b_vclk_div6.hw,
[CLKID_VCLK_DIV12_DIV] = &meson8b_vclk_div12_div.hw,
- [CLKID_VCLK_DIV12] = &meson8b_vclk_div12_div_gate.hw,
+ [CLKID_VCLK_DIV12] = &meson8b_vclk_div12.hw,
[CLKID_VCLK2_IN_SEL] = &meson8b_vclk2_in_sel.hw,
- [CLKID_VCLK2_IN_EN] = &meson8b_vclk2_clk_in_en.hw,
- [CLKID_VCLK2_EN] = &meson8b_vclk2_clk_en.hw,
- [CLKID_VCLK2_DIV1] = &meson8b_vclk2_div1_gate.hw,
+ [CLKID_VCLK2_IN_EN] = &meson8b_vclk2_in_en.hw,
+ [CLKID_VCLK2_EN] = &meson8b_vclk2_en.hw,
+ [CLKID_VCLK2_DIV1] = &meson8b_vclk2_div1.hw,
[CLKID_VCLK2_DIV2_DIV] = &meson8b_vclk2_div2_div.hw,
- [CLKID_VCLK2_DIV2] = &meson8b_vclk2_div2_div_gate.hw,
+ [CLKID_VCLK2_DIV2] = &meson8b_vclk2_div2.hw,
[CLKID_VCLK2_DIV4_DIV] = &meson8b_vclk2_div4_div.hw,
- [CLKID_VCLK2_DIV4] = &meson8b_vclk2_div4_div_gate.hw,
+ [CLKID_VCLK2_DIV4] = &meson8b_vclk2_div4.hw,
[CLKID_VCLK2_DIV6_DIV] = &meson8b_vclk2_div6_div.hw,
- [CLKID_VCLK2_DIV6] = &meson8b_vclk2_div6_div_gate.hw,
+ [CLKID_VCLK2_DIV6] = &meson8b_vclk2_div6.hw,
[CLKID_VCLK2_DIV12_DIV] = &meson8b_vclk2_div12_div.hw,
- [CLKID_VCLK2_DIV12] = &meson8b_vclk2_div12_div_gate.hw,
+ [CLKID_VCLK2_DIV12] = &meson8b_vclk2_div12.hw,
[CLKID_CTS_ENCT_SEL] = &meson8b_cts_enct_sel.hw,
[CLKID_CTS_ENCT] = &meson8b_cts_enct.hw,
[CLKID_CTS_ENCP_SEL] = &meson8b_cts_encp_sel.hw,
@@ -3191,18 +3230,18 @@ static struct clk_hw *meson8b_hw_clks[] = {
};
static struct clk_hw *meson8m2_hw_clks[] = {
- [CLKID_PLL_FIXED] = &meson8b_fixed_pll.hw,
- [CLKID_PLL_VID] = &meson8b_vid_pll.hw,
- [CLKID_PLL_SYS] = &meson8b_sys_pll.hw,
- [CLKID_FCLK_DIV2] = &meson8b_fclk_div2.hw,
- [CLKID_FCLK_DIV3] = &meson8b_fclk_div3.hw,
- [CLKID_FCLK_DIV4] = &meson8b_fclk_div4.hw,
- [CLKID_FCLK_DIV5] = &meson8b_fclk_div5.hw,
- [CLKID_FCLK_DIV7] = &meson8b_fclk_div7.hw,
- [CLKID_CPUCLK] = &meson8b_cpu_clk.hw,
- [CLKID_MPEG_SEL] = &meson8b_mpeg_clk_sel.hw,
- [CLKID_MPEG_DIV] = &meson8b_mpeg_clk_div.hw,
- [CLKID_CLK81] = &meson8b_clk81.hw,
+ [CLKID_PLL_FIXED] = &meson8b_fixed_pll.hw,
+ [CLKID_PLL_VID] = &meson8b_vid_pll.hw,
+ [CLKID_PLL_SYS] = &meson8b_sys_pll.hw,
+ [CLKID_FCLK_DIV2] = &meson8b_fclk_div2.hw,
+ [CLKID_FCLK_DIV3] = &meson8b_fclk_div3.hw,
+ [CLKID_FCLK_DIV4] = &meson8b_fclk_div4.hw,
+ [CLKID_FCLK_DIV5] = &meson8b_fclk_div5.hw,
+ [CLKID_FCLK_DIV7] = &meson8b_fclk_div7.hw,
+ [CLKID_CPUCLK] = &meson8b_cpu_clk.hw,
+ [CLKID_MPEG_SEL] = &meson8b_clk81_sel.hw,
+ [CLKID_MPEG_DIV] = &meson8b_clk81_div.hw,
+ [CLKID_CLK81] = &meson8b_clk81.hw,
[CLKID_DDR] = &meson8b_ddr.hw,
[CLKID_DOS] = &meson8b_dos.hw,
[CLKID_ISA] = &meson8b_isa.hw,
@@ -3299,7 +3338,7 @@ static struct clk_hw *meson8m2_hw_clks[] = {
[CLKID_FCLK_DIV7_DIV] = &meson8b_fclk_div7_div.hw,
[CLKID_NAND_SEL] = &meson8b_nand_clk_sel.hw,
[CLKID_NAND_DIV] = &meson8b_nand_clk_div.hw,
- [CLKID_NAND_CLK] = &meson8b_nand_clk_gate.hw,
+ [CLKID_NAND_CLK] = &meson8b_nand_clk.hw,
[CLKID_PLL_FIXED_DCO] = &meson8b_fixed_pll_dco.hw,
[CLKID_HDMI_PLL_DCO] = &meson8b_hdmi_pll_dco.hw,
[CLKID_PLL_SYS_DCO] = &meson8b_sys_pll_dco.hw,
@@ -3310,14 +3349,14 @@ static struct clk_hw *meson8m2_hw_clks[] = {
[CLKID_CPU_CLK_DIV6] = &meson8b_cpu_clk_div6.hw,
[CLKID_CPU_CLK_DIV7] = &meson8b_cpu_clk_div7.hw,
[CLKID_CPU_CLK_DIV8] = &meson8b_cpu_clk_div8.hw,
- [CLKID_APB_SEL] = &meson8b_apb_clk_sel.hw,
- [CLKID_APB] = &meson8b_apb_clk_gate.hw,
- [CLKID_PERIPH_SEL] = &meson8b_periph_clk_sel.hw,
- [CLKID_PERIPH] = &meson8b_periph_clk_gate.hw,
- [CLKID_AXI_SEL] = &meson8b_axi_clk_sel.hw,
- [CLKID_AXI] = &meson8b_axi_clk_gate.hw,
- [CLKID_L2_DRAM_SEL] = &meson8b_l2_dram_clk_sel.hw,
- [CLKID_L2_DRAM] = &meson8b_l2_dram_clk_gate.hw,
+ [CLKID_APB_SEL] = &meson8b_apb_sel.hw,
+ [CLKID_APB] = &meson8b_apb.hw,
+ [CLKID_PERIPH_SEL] = &meson8b_periph_sel.hw,
+ [CLKID_PERIPH] = &meson8b_periph.hw,
+ [CLKID_AXI_SEL] = &meson8b_axi_sel.hw,
+ [CLKID_AXI] = &meson8b_axi.hw,
+ [CLKID_L2_DRAM_SEL] = &meson8b_l2_dram_sel.hw,
+ [CLKID_L2_DRAM] = &meson8b_l2_dram.hw,
[CLKID_HDMI_PLL_LVDS_OUT] = &meson8b_hdmi_pll_lvds_out.hw,
[CLKID_HDMI_PLL_HDMI_OUT] = &meson8b_hdmi_pll_hdmi_out.hw,
[CLKID_VID_PLL_IN_SEL] = &meson8b_vid_pll_in_sel.hw,
@@ -3328,27 +3367,27 @@ static struct clk_hw *meson8m2_hw_clks[] = {
[CLKID_VCLK_IN_SEL] = &meson8b_vclk_in_sel.hw,
[CLKID_VCLK_IN_EN] = &meson8b_vclk_in_en.hw,
[CLKID_VCLK_EN] = &meson8b_vclk_en.hw,
- [CLKID_VCLK_DIV1] = &meson8b_vclk_div1_gate.hw,
+ [CLKID_VCLK_DIV1] = &meson8b_vclk_div1.hw,
[CLKID_VCLK_DIV2_DIV] = &meson8b_vclk_div2_div.hw,
- [CLKID_VCLK_DIV2] = &meson8b_vclk_div2_div_gate.hw,
+ [CLKID_VCLK_DIV2] = &meson8b_vclk_div2.hw,
[CLKID_VCLK_DIV4_DIV] = &meson8b_vclk_div4_div.hw,
- [CLKID_VCLK_DIV4] = &meson8b_vclk_div4_div_gate.hw,
+ [CLKID_VCLK_DIV4] = &meson8b_vclk_div4.hw,
[CLKID_VCLK_DIV6_DIV] = &meson8b_vclk_div6_div.hw,
- [CLKID_VCLK_DIV6] = &meson8b_vclk_div6_div_gate.hw,
+ [CLKID_VCLK_DIV6] = &meson8b_vclk_div6.hw,
[CLKID_VCLK_DIV12_DIV] = &meson8b_vclk_div12_div.hw,
- [CLKID_VCLK_DIV12] = &meson8b_vclk_div12_div_gate.hw,
+ [CLKID_VCLK_DIV12] = &meson8b_vclk_div12.hw,
[CLKID_VCLK2_IN_SEL] = &meson8b_vclk2_in_sel.hw,
- [CLKID_VCLK2_IN_EN] = &meson8b_vclk2_clk_in_en.hw,
- [CLKID_VCLK2_EN] = &meson8b_vclk2_clk_en.hw,
- [CLKID_VCLK2_DIV1] = &meson8b_vclk2_div1_gate.hw,
+ [CLKID_VCLK2_IN_EN] = &meson8b_vclk2_in_en.hw,
+ [CLKID_VCLK2_EN] = &meson8b_vclk2_en.hw,
+ [CLKID_VCLK2_DIV1] = &meson8b_vclk2_div1.hw,
[CLKID_VCLK2_DIV2_DIV] = &meson8b_vclk2_div2_div.hw,
- [CLKID_VCLK2_DIV2] = &meson8b_vclk2_div2_div_gate.hw,
+ [CLKID_VCLK2_DIV2] = &meson8b_vclk2_div2.hw,
[CLKID_VCLK2_DIV4_DIV] = &meson8b_vclk2_div4_div.hw,
- [CLKID_VCLK2_DIV4] = &meson8b_vclk2_div4_div_gate.hw,
+ [CLKID_VCLK2_DIV4] = &meson8b_vclk2_div4.hw,
[CLKID_VCLK2_DIV6_DIV] = &meson8b_vclk2_div6_div.hw,
- [CLKID_VCLK2_DIV6] = &meson8b_vclk2_div6_div_gate.hw,
+ [CLKID_VCLK2_DIV6] = &meson8b_vclk2_div6.hw,
[CLKID_VCLK2_DIV12_DIV] = &meson8b_vclk2_div12_div.hw,
- [CLKID_VCLK2_DIV12] = &meson8b_vclk2_div12_div_gate.hw,
+ [CLKID_VCLK2_DIV12] = &meson8b_vclk2_div12.hw,
[CLKID_CTS_ENCT_SEL] = &meson8b_cts_enct_sel.hw,
[CLKID_CTS_ENCT] = &meson8b_cts_enct.hw,
[CLKID_CTS_ENCP_SEL] = &meson8b_cts_encp_sel.hw,
@@ -3407,202 +3446,6 @@ static struct clk_hw *meson8m2_hw_clks[] = {
[CLKID_HDMI_PLL_DCO_IN] = &hdmi_pll_dco_in.hw,
};
-static struct clk_regmap *const meson8b_clk_regmaps[] = {
- &meson8b_clk81,
- &meson8b_ddr,
- &meson8b_dos,
- &meson8b_isa,
- &meson8b_pl301,
- &meson8b_periphs,
- &meson8b_spicc,
- &meson8b_i2c,
- &meson8b_sar_adc,
- &meson8b_smart_card,
- &meson8b_rng0,
- &meson8b_uart0,
- &meson8b_sdhc,
- &meson8b_stream,
- &meson8b_async_fifo,
- &meson8b_sdio,
- &meson8b_abuf,
- &meson8b_hiu_iface,
- &meson8b_assist_misc,
- &meson8b_spi,
- &meson8b_i2s_spdif,
- &meson8b_eth,
- &meson8b_demux,
- &meson8b_aiu_glue,
- &meson8b_iec958,
- &meson8b_i2s_out,
- &meson8b_amclk,
- &meson8b_aififo2,
- &meson8b_mixer,
- &meson8b_mixer_iface,
- &meson8b_adc,
- &meson8b_blkmv,
- &meson8b_aiu,
- &meson8b_uart1,
- &meson8b_g2d,
- &meson8b_usb0,
- &meson8b_usb1,
- &meson8b_reset,
- &meson8b_nand,
- &meson8b_dos_parser,
- &meson8b_usb,
- &meson8b_vdin1,
- &meson8b_ahb_arb0,
- &meson8b_efuse,
- &meson8b_boot_rom,
- &meson8b_ahb_data_bus,
- &meson8b_ahb_ctrl_bus,
- &meson8b_hdmi_intr_sync,
- &meson8b_hdmi_pclk,
- &meson8b_usb1_ddr_bridge,
- &meson8b_usb0_ddr_bridge,
- &meson8b_mmc_pclk,
- &meson8b_dvin,
- &meson8b_uart2,
- &meson8b_sana,
- &meson8b_vpu_intr,
- &meson8b_sec_ahb_ahb3_bridge,
- &meson8b_clk81_a9,
- &meson8b_vclk2_venci0,
- &meson8b_vclk2_venci1,
- &meson8b_vclk2_vencp0,
- &meson8b_vclk2_vencp1,
- &meson8b_gclk_venci_int,
- &meson8b_gclk_vencp_int,
- &meson8b_dac_clk,
- &meson8b_aoclk_gate,
- &meson8b_iec958_gate,
- &meson8b_enc480p,
- &meson8b_rng1,
- &meson8b_gclk_vencl_int,
- &meson8b_vclk2_venclmcc,
- &meson8b_vclk2_vencl,
- &meson8b_vclk2_other,
- &meson8b_edp,
- &meson8b_ao_media_cpu,
- &meson8b_ao_ahb_sram,
- &meson8b_ao_ahb_bus,
- &meson8b_ao_iface,
- &meson8b_mpeg_clk_div,
- &meson8b_mpeg_clk_sel,
- &meson8b_mpll0,
- &meson8b_mpll1,
- &meson8b_mpll2,
- &meson8b_mpll0_div,
- &meson8b_mpll1_div,
- &meson8b_mpll2_div,
- &meson8b_fixed_pll,
- &meson8b_sys_pll,
- &meson8b_cpu_in_sel,
- &meson8b_cpu_scale_div,
- &meson8b_cpu_scale_out_sel,
- &meson8b_cpu_clk,
- &meson8b_mpll_prediv,
- &meson8b_fclk_div2,
- &meson8b_fclk_div3,
- &meson8b_fclk_div4,
- &meson8b_fclk_div5,
- &meson8b_fclk_div7,
- &meson8b_nand_clk_sel,
- &meson8b_nand_clk_div,
- &meson8b_nand_clk_gate,
- &meson8b_fixed_pll_dco,
- &meson8b_hdmi_pll_dco,
- &meson8b_sys_pll_dco,
- &meson8b_apb_clk_sel,
- &meson8b_apb_clk_gate,
- &meson8b_periph_clk_sel,
- &meson8b_periph_clk_gate,
- &meson8b_axi_clk_sel,
- &meson8b_axi_clk_gate,
- &meson8b_l2_dram_clk_sel,
- &meson8b_l2_dram_clk_gate,
- &meson8b_hdmi_pll_lvds_out,
- &meson8b_hdmi_pll_hdmi_out,
- &meson8b_vid_pll_in_sel,
- &meson8b_vid_pll_in_en,
- &meson8b_vid_pll_pre_div,
- &meson8b_vid_pll_post_div,
- &meson8b_vid_pll,
- &meson8b_vid_pll_final_div,
- &meson8b_vclk_in_sel,
- &meson8b_vclk_in_en,
- &meson8b_vclk_en,
- &meson8b_vclk_div1_gate,
- &meson8b_vclk_div2_div_gate,
- &meson8b_vclk_div4_div_gate,
- &meson8b_vclk_div6_div_gate,
- &meson8b_vclk_div12_div_gate,
- &meson8b_vclk2_in_sel,
- &meson8b_vclk2_clk_in_en,
- &meson8b_vclk2_clk_en,
- &meson8b_vclk2_div1_gate,
- &meson8b_vclk2_div2_div_gate,
- &meson8b_vclk2_div4_div_gate,
- &meson8b_vclk2_div6_div_gate,
- &meson8b_vclk2_div12_div_gate,
- &meson8b_cts_enct_sel,
- &meson8b_cts_enct,
- &meson8b_cts_encp_sel,
- &meson8b_cts_encp,
- &meson8b_cts_enci_sel,
- &meson8b_cts_enci,
- &meson8b_hdmi_tx_pixel_sel,
- &meson8b_hdmi_tx_pixel,
- &meson8b_cts_encl_sel,
- &meson8b_cts_encl,
- &meson8b_cts_vdac0_sel,
- &meson8b_cts_vdac0,
- &meson8b_hdmi_sys_sel,
- &meson8b_hdmi_sys_div,
- &meson8b_hdmi_sys,
- &meson8b_mali_0_sel,
- &meson8b_mali_0_div,
- &meson8b_mali_0,
- &meson8b_mali_1_sel,
- &meson8b_mali_1_div,
- &meson8b_mali_1,
- &meson8b_mali,
- &meson8m2_gp_pll_dco,
- &meson8m2_gp_pll,
- &meson8b_vpu_0_sel,
- &meson8m2_vpu_0_sel,
- &meson8b_vpu_0_div,
- &meson8b_vpu_0,
- &meson8b_vpu_1_sel,
- &meson8m2_vpu_1_sel,
- &meson8b_vpu_1_div,
- &meson8b_vpu_1,
- &meson8b_vpu,
- &meson8b_vdec_1_sel,
- &meson8b_vdec_1_1_div,
- &meson8b_vdec_1_1,
- &meson8b_vdec_1_2_div,
- &meson8b_vdec_1_2,
- &meson8b_vdec_1,
- &meson8b_vdec_hcodec_sel,
- &meson8b_vdec_hcodec_div,
- &meson8b_vdec_hcodec,
- &meson8b_vdec_2_sel,
- &meson8b_vdec_2_div,
- &meson8b_vdec_2,
- &meson8b_vdec_hevc_sel,
- &meson8b_vdec_hevc_div,
- &meson8b_vdec_hevc_en,
- &meson8b_vdec_hevc,
- &meson8b_cts_amclk,
- &meson8b_cts_amclk_sel,
- &meson8b_cts_amclk_div,
- &meson8b_cts_mclk_i958_sel,
- &meson8b_cts_mclk_i958_div,
- &meson8b_cts_mclk_i958,
- &meson8b_cts_i958,
- &meson8b_vid_pll_lvds_en,
-};
-
static const struct meson8b_clk_reset_line {
u32 reg;
u8 bit_idx;
@@ -3819,10 +3662,6 @@ static void __init meson8b_clkc_init_common(struct device_node *np,
return;
}
- /* Populate regmap for the regmap backed clocks */
- for (i = 0; i < ARRAY_SIZE(meson8b_clk_regmaps); i++)
- meson8b_clk_regmaps[i]->map = map;
-
/*
* register all clks and start with the first used ID (which is
* CLKID_PLL_FIXED)
diff --git a/drivers/clk/meson/meson8b.h b/drivers/clk/meson/meson8b.h
deleted file mode 100644
index a5b6e67eeefb..000000000000
--- a/drivers/clk/meson/meson8b.h
+++ /dev/null
@@ -1,80 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (c) 2015 Endless Mobile, Inc.
- * Author: Carlo Caione <carlo@endlessm.com>
- *
- * Copyright (c) 2016 BayLibre, Inc.
- * Michael Turquette <mturquette@baylibre.com>
- */
-
-#ifndef __MESON8B_H
-#define __MESON8B_H
-
-/*
- * Clock controller register offsets
- *
- * Register offsets from the HardKernel[0] data sheet are listed in comment
- * blocks below. Those offsets must be multiplied by 4 before adding them to
- * the base address to get the right value
- *
- * [0] https://dn.odroid.com/S805/Datasheet/S805_Datasheet%20V0.8%2020150126.pdf
- */
-#define HHI_GP_PLL_CNTL 0x40 /* 0x10 offset in data sheet */
-#define HHI_GP_PLL_CNTL2 0x44 /* 0x11 offset in data sheet */
-#define HHI_GP_PLL_CNTL3 0x48 /* 0x12 offset in data sheet */
-#define HHI_GP_PLL_CNTL4 0x4C /* 0x13 offset in data sheet */
-#define HHI_GP_PLL_CNTL5 0x50 /* 0x14 offset in data sheet */
-#define HHI_VIID_CLK_DIV 0x128 /* 0x4a offset in data sheet */
-#define HHI_VIID_CLK_CNTL 0x12c /* 0x4b offset in data sheet */
-#define HHI_GCLK_MPEG0 0x140 /* 0x50 offset in data sheet */
-#define HHI_GCLK_MPEG1 0x144 /* 0x51 offset in data sheet */
-#define HHI_GCLK_MPEG2 0x148 /* 0x52 offset in data sheet */
-#define HHI_GCLK_OTHER 0x150 /* 0x54 offset in data sheet */
-#define HHI_GCLK_AO 0x154 /* 0x55 offset in data sheet */
-#define HHI_SYS_CPU_CLK_CNTL1 0x15c /* 0x57 offset in data sheet */
-#define HHI_VID_CLK_DIV 0x164 /* 0x59 offset in data sheet */
-#define HHI_MPEG_CLK_CNTL 0x174 /* 0x5d offset in data sheet */
-#define HHI_AUD_CLK_CNTL 0x178 /* 0x5e offset in data sheet */
-#define HHI_VID_CLK_CNTL 0x17c /* 0x5f offset in data sheet */
-#define HHI_AUD_CLK_CNTL2 0x190 /* 0x64 offset in data sheet */
-#define HHI_VID_CLK_CNTL2 0x194 /* 0x65 offset in data sheet */
-#define HHI_VID_DIVIDER_CNTL 0x198 /* 0x66 offset in data sheet */
-#define HHI_SYS_CPU_CLK_CNTL0 0x19c /* 0x67 offset in data sheet */
-#define HHI_MALI_CLK_CNTL 0x1b0 /* 0x6c offset in data sheet */
-#define HHI_VPU_CLK_CNTL 0x1bc /* 0x6f offset in data sheet */
-#define HHI_HDMI_CLK_CNTL 0x1cc /* 0x73 offset in data sheet */
-#define HHI_VDEC_CLK_CNTL 0x1e0 /* 0x78 offset in data sheet */
-#define HHI_VDEC2_CLK_CNTL 0x1e4 /* 0x79 offset in data sheet */
-#define HHI_VDEC3_CLK_CNTL 0x1e8 /* 0x7a offset in data sheet */
-#define HHI_NAND_CLK_CNTL 0x25c /* 0x97 offset in data sheet */
-#define HHI_MPLL_CNTL 0x280 /* 0xa0 offset in data sheet */
-#define HHI_SYS_PLL_CNTL 0x300 /* 0xc0 offset in data sheet */
-#define HHI_VID_PLL_CNTL 0x320 /* 0xc8 offset in data sheet */
-#define HHI_VID_PLL_CNTL2 0x324 /* 0xc9 offset in data sheet */
-#define HHI_VID_PLL_CNTL3 0x328 /* 0xca offset in data sheet */
-#define HHI_VID_PLL_CNTL4 0x32c /* 0xcb offset in data sheet */
-#define HHI_VID_PLL_CNTL5 0x330 /* 0xcc offset in data sheet */
-#define HHI_VID_PLL_CNTL6 0x334 /* 0xcd offset in data sheet */
-#define HHI_VID2_PLL_CNTL 0x380 /* 0xe0 offset in data sheet */
-#define HHI_VID2_PLL_CNTL2 0x384 /* 0xe1 offset in data sheet */
-#define HHI_VID2_PLL_CNTL3 0x388 /* 0xe2 offset in data sheet */
-#define HHI_VID2_PLL_CNTL4 0x38c /* 0xe3 offset in data sheet */
-#define HHI_VID2_PLL_CNTL5 0x390 /* 0xe4 offset in data sheet */
-#define HHI_VID2_PLL_CNTL6 0x394 /* 0xe5 offset in data sheet */
-
-/*
- * MPLL register offeset taken from the S905 datasheet. Vendor kernel source
- * confirm these are the same for the S805.
- */
-#define HHI_MPLL_CNTL 0x280 /* 0xa0 offset in data sheet */
-#define HHI_MPLL_CNTL2 0x284 /* 0xa1 offset in data sheet */
-#define HHI_MPLL_CNTL3 0x288 /* 0xa2 offset in data sheet */
-#define HHI_MPLL_CNTL4 0x28C /* 0xa3 offset in data sheet */
-#define HHI_MPLL_CNTL5 0x290 /* 0xa4 offset in data sheet */
-#define HHI_MPLL_CNTL6 0x294 /* 0xa5 offset in data sheet */
-#define HHI_MPLL_CNTL7 0x298 /* 0xa6 offset in data sheet */
-#define HHI_MPLL_CNTL8 0x29C /* 0xa7 offset in data sheet */
-#define HHI_MPLL_CNTL9 0x2A0 /* 0xa8 offset in data sheet */
-#define HHI_MPLL_CNTL10 0x2A4 /* 0xa9 offset in data sheet */
-
-#endif /* __MESON8B_H */
diff --git a/drivers/clk/meson/s4-peripherals.c b/drivers/clk/meson/s4-peripherals.c
index 8a4037377787..6d69b132d1e1 100644
--- a/drivers/clk/meson/s4-peripherals.c
+++ b/drivers/clk/meson/s4-peripherals.c
@@ -13,10 +13,64 @@
#include "clk-regmap.h"
#include "vid-pll-div.h"
#include "clk-dualdiv.h"
-#include "s4-peripherals.h"
#include "meson-clkc-utils.h"
#include <dt-bindings/clock/amlogic,s4-peripherals-clkc.h>
+#define CLKCTRL_RTC_BY_OSCIN_CTRL0 0x008
+#define CLKCTRL_RTC_BY_OSCIN_CTRL1 0x00c
+#define CLKCTRL_RTC_CTRL 0x010
+#define CLKCTRL_SYS_CLK_CTRL0 0x040
+#define CLKCTRL_SYS_CLK_EN0_REG0 0x044
+#define CLKCTRL_SYS_CLK_EN0_REG1 0x048
+#define CLKCTRL_SYS_CLK_EN0_REG2 0x04c
+#define CLKCTRL_SYS_CLK_EN0_REG3 0x050
+#define CLKCTRL_CECA_CTRL0 0x088
+#define CLKCTRL_CECA_CTRL1 0x08c
+#define CLKCTRL_CECB_CTRL0 0x090
+#define CLKCTRL_CECB_CTRL1 0x094
+#define CLKCTRL_SC_CLK_CTRL 0x098
+#define CLKCTRL_CLK12_24_CTRL 0x0a8
+#define CLKCTRL_VID_CLK_CTRL 0x0c0
+#define CLKCTRL_VID_CLK_CTRL2 0x0c4
+#define CLKCTRL_VID_CLK_DIV 0x0c8
+#define CLKCTRL_VIID_CLK_DIV 0x0cc
+#define CLKCTRL_VIID_CLK_CTRL 0x0d0
+#define CLKCTRL_HDMI_CLK_CTRL 0x0e0
+#define CLKCTRL_VID_PLL_CLK_DIV 0x0e4
+#define CLKCTRL_VPU_CLK_CTRL 0x0e8
+#define CLKCTRL_VPU_CLKB_CTRL 0x0ec
+#define CLKCTRL_VPU_CLKC_CTRL 0x0f0
+#define CLKCTRL_VID_LOCK_CLK_CTRL 0x0f4
+#define CLKCTRL_VDIN_MEAS_CLK_CTRL 0x0f8
+#define CLKCTRL_VAPBCLK_CTRL 0x0fc
+#define CLKCTRL_HDCP22_CTRL 0x100
+#define CLKCTRL_VDEC_CLK_CTRL 0x140
+#define CLKCTRL_VDEC2_CLK_CTRL 0x144
+#define CLKCTRL_VDEC3_CLK_CTRL 0x148
+#define CLKCTRL_VDEC4_CLK_CTRL 0x14c
+#define CLKCTRL_TS_CLK_CTRL 0x158
+#define CLKCTRL_MALI_CLK_CTRL 0x15c
+#define CLKCTRL_NAND_CLK_CTRL 0x168
+#define CLKCTRL_SD_EMMC_CLK_CTRL 0x16c
+#define CLKCTRL_SPICC_CLK_CTRL 0x174
+#define CLKCTRL_GEN_CLK_CTRL 0x178
+#define CLKCTRL_SAR_CLK_CTRL 0x17c
+#define CLKCTRL_PWM_CLK_AB_CTRL 0x180
+#define CLKCTRL_PWM_CLK_CD_CTRL 0x184
+#define CLKCTRL_PWM_CLK_EF_CTRL 0x188
+#define CLKCTRL_PWM_CLK_GH_CTRL 0x18c
+#define CLKCTRL_PWM_CLK_IJ_CTRL 0x190
+#define CLKCTRL_DEMOD_CLK_CTRL 0x200
+
+#define S4_COMP_SEL(_name, _reg, _shift, _mask, _pdata) \
+ MESON_COMP_SEL(s4_, _name, _reg, _shift, _mask, _pdata, NULL, 0, 0)
+
+#define S4_COMP_DIV(_name, _reg, _shift, _width) \
+ MESON_COMP_DIV(s4_, _name, _reg, _shift, _width, 0, CLK_SET_RATE_PARENT)
+
+#define S4_COMP_GATE(_name, _reg, _bit) \
+ MESON_COMP_GATE(s4_, _name, _reg, _bit, CLK_SET_RATE_PARENT)
+
static struct clk_regmap s4_rtc_32k_by_oscin_clkin = {
.data = &(struct clk_regmap_gate_data){
.offset = CLKCTRL_RTC_BY_OSCIN_CTRL0,
@@ -137,8 +191,8 @@ static struct clk_regmap s4_rtc_clk = {
};
/* The index 5 is AXI_CLK, which is dedicated to AXI. So skip it. */
-static u32 mux_table_sys_ab_clk_sel[] = { 0, 1, 2, 3, 4, 6, 7 };
-static const struct clk_parent_data sys_ab_clk_parent_data[] = {
+static u32 s4_sysclk_parents_val_table[] = { 0, 1, 2, 3, 4, 6, 7 };
+static const struct clk_parent_data s4_sysclk_parents[] = {
{ .fw_name = "xtal" },
{ .fw_name = "fclk_div2" },
{ .fw_name = "fclk_div3" },
@@ -160,13 +214,13 @@ static struct clk_regmap s4_sysclk_b_sel = {
.offset = CLKCTRL_SYS_CLK_CTRL0,
.mask = 0x7,
.shift = 26,
- .table = mux_table_sys_ab_clk_sel,
+ .table = s4_sysclk_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "sysclk_b_sel",
.ops = &clk_regmap_mux_ro_ops,
- .parent_data = sys_ab_clk_parent_data,
- .num_parents = ARRAY_SIZE(sys_ab_clk_parent_data),
+ .parent_data = s4_sysclk_parents,
+ .num_parents = ARRAY_SIZE(s4_sysclk_parents),
},
};
@@ -206,13 +260,13 @@ static struct clk_regmap s4_sysclk_a_sel = {
.offset = CLKCTRL_SYS_CLK_CTRL0,
.mask = 0x7,
.shift = 10,
- .table = mux_table_sys_ab_clk_sel,
+ .table = s4_sysclk_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "sysclk_a_sel",
.ops = &clk_regmap_mux_ro_ops,
- .parent_data = sys_ab_clk_parent_data,
- .num_parents = ARRAY_SIZE(sys_ab_clk_parent_data),
+ .parent_data = s4_sysclk_parents,
+ .num_parents = ARRAY_SIZE(s4_sysclk_parents),
},
};
@@ -478,24 +532,24 @@ static struct clk_regmap s4_cecb_32k_clkout = {
},
};
-static const struct clk_parent_data s4_sc_parent_data[] = {
+static const struct clk_parent_data s4_sc_clk_parents[] = {
{ .fw_name = "fclk_div4" },
{ .fw_name = "fclk_div3" },
{ .fw_name = "fclk_div5" },
{ .fw_name = "xtal", }
};
-static struct clk_regmap s4_sc_clk_mux = {
+static struct clk_regmap s4_sc_clk_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = CLKCTRL_SC_CLK_CTRL,
.mask = 0x3,
.shift = 9,
},
.hw.init = &(struct clk_init_data) {
- .name = "sc_clk_mux",
+ .name = "sc_clk_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_sc_parent_data,
- .num_parents = ARRAY_SIZE(s4_sc_parent_data),
+ .parent_data = s4_sc_clk_parents,
+ .num_parents = ARRAY_SIZE(s4_sc_clk_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -510,20 +564,20 @@ static struct clk_regmap s4_sc_clk_div = {
.name = "sc_clk_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &s4_sc_clk_mux.hw
+ &s4_sc_clk_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap s4_sc_clk_gate = {
+static struct clk_regmap s4_sc_clk = {
.data = &(struct clk_regmap_gate_data){
.offset = CLKCTRL_SC_CLK_CTRL,
.bit_idx = 8,
},
.hw.init = &(struct clk_init_data){
- .name = "sc_clk_gate",
+ .name = "sc_clk",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&s4_sc_clk_div.hw
@@ -533,13 +587,13 @@ static struct clk_regmap s4_sc_clk_gate = {
},
};
-static struct clk_regmap s4_12_24M_clk_gate = {
+static struct clk_regmap s4_12_24M = {
.data = &(struct clk_regmap_gate_data){
.offset = CLKCTRL_CLK12_24_CTRL,
.bit_idx = 11,
},
.hw.init = &(struct clk_init_data) {
- .name = "12_24m_gate",
+ .name = "12_24M",
.ops = &clk_regmap_gate_ops,
.parent_data = (const struct clk_parent_data []) {
{ .fw_name = "xtal", }
@@ -548,32 +602,32 @@ static struct clk_regmap s4_12_24M_clk_gate = {
},
};
-static struct clk_fixed_factor s4_12M_clk_div = {
+static struct clk_fixed_factor s4_12M_div = {
.mult = 1,
.div = 2,
.hw.init = &(struct clk_init_data){
- .name = "12M",
+ .name = "12M_div",
.ops = &clk_fixed_factor_ops,
.parent_hws = (const struct clk_hw *[]) {
- &s4_12_24M_clk_gate.hw
+ &s4_12_24M.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap s4_12_24M_clk = {
+static struct clk_regmap s4_12_24M_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = CLKCTRL_CLK12_24_CTRL,
.mask = 0x1,
.shift = 10,
},
.hw.init = &(struct clk_init_data) {
- .name = "12_24m",
+ .name = "12_24M_sel",
.ops = &clk_regmap_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &s4_12_24M_clk_gate.hw,
- &s4_12M_clk_div.hw,
+ &s4_12_24M.hw,
+ &s4_12M_div.hw,
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
@@ -642,7 +696,7 @@ static struct clk_regmap s4_vid_pll = {
},
};
-static const struct clk_parent_data s4_vclk_parent_data[] = {
+static const struct clk_parent_data s4_vclk_parents[] = {
{ .hw = &s4_vid_pll.hw },
{ .fw_name = "gp0_pll", },
{ .fw_name = "hifi_pll", },
@@ -662,8 +716,8 @@ static struct clk_regmap s4_vclk_sel = {
.hw.init = &(struct clk_init_data){
.name = "vclk_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_vclk_parent_data,
- .num_parents = ARRAY_SIZE(s4_vclk_parent_data),
+ .parent_data = s4_vclk_parents,
+ .num_parents = ARRAY_SIZE(s4_vclk_parents),
.flags = 0,
},
};
@@ -677,8 +731,8 @@ static struct clk_regmap s4_vclk2_sel = {
.hw.init = &(struct clk_init_data){
.name = "vclk2_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_vclk_parent_data,
- .num_parents = ARRAY_SIZE(s4_vclk_parent_data),
+ .parent_data = s4_vclk_parents,
+ .num_parents = ARRAY_SIZE(s4_vclk_parents),
.flags = 0,
},
};
@@ -1026,8 +1080,8 @@ static struct clk_fixed_factor s4_vclk2_div12 = {
};
/* The 5,6,7 indexes corresponds to no real clock, so there are not used. */
-static u32 mux_table_cts_sel[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 };
-static const struct clk_hw *s4_cts_parent_hws[] = {
+static u32 s4_cts_parents_val_table[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 };
+static const struct clk_hw *s4_cts_parents[] = {
&s4_vclk_div1.hw,
&s4_vclk_div2.hw,
&s4_vclk_div4.hw,
@@ -1045,13 +1099,13 @@ static struct clk_regmap s4_cts_enci_sel = {
.offset = CLKCTRL_VID_CLK_DIV,
.mask = 0xf,
.shift = 28,
- .table = mux_table_cts_sel,
+ .table = s4_cts_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "cts_enci_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = s4_cts_parent_hws,
- .num_parents = ARRAY_SIZE(s4_cts_parent_hws),
+ .parent_hws = s4_cts_parents,
+ .num_parents = ARRAY_SIZE(s4_cts_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1061,13 +1115,13 @@ static struct clk_regmap s4_cts_encp_sel = {
.offset = CLKCTRL_VID_CLK_DIV,
.mask = 0xf,
.shift = 20,
- .table = mux_table_cts_sel,
+ .table = s4_cts_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "cts_encp_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = s4_cts_parent_hws,
- .num_parents = ARRAY_SIZE(s4_cts_parent_hws),
+ .parent_hws = s4_cts_parents,
+ .num_parents = ARRAY_SIZE(s4_cts_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1077,20 +1131,20 @@ static struct clk_regmap s4_cts_vdac_sel = {
.offset = CLKCTRL_VIID_CLK_DIV,
.mask = 0xf,
.shift = 28,
- .table = mux_table_cts_sel,
+ .table = s4_cts_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "cts_vdac_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = s4_cts_parent_hws,
- .num_parents = ARRAY_SIZE(s4_cts_parent_hws),
+ .parent_hws = s4_cts_parents,
+ .num_parents = ARRAY_SIZE(s4_cts_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
/* The 5,6,7 indexes corresponds to no real clock, so there are not used. */
-static u32 mux_table_hdmi_tx_sel[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 };
-static const struct clk_hw *s4_cts_hdmi_tx_parent_hws[] = {
+static u32 s4_hdmi_tx_parents_val_table[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 };
+static const struct clk_hw *s4_hdmi_tx_parents[] = {
&s4_vclk_div1.hw,
&s4_vclk_div2.hw,
&s4_vclk_div4.hw,
@@ -1108,13 +1162,13 @@ static struct clk_regmap s4_hdmi_tx_sel = {
.offset = CLKCTRL_HDMI_CLK_CTRL,
.mask = 0xf,
.shift = 16,
- .table = mux_table_hdmi_tx_sel,
+ .table = s4_hdmi_tx_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "hdmi_tx_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = s4_cts_hdmi_tx_parent_hws,
- .num_parents = ARRAY_SIZE(s4_cts_hdmi_tx_parent_hws),
+ .parent_hws = s4_hdmi_tx_parents,
+ .num_parents = ARRAY_SIZE(s4_hdmi_tx_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1184,7 +1238,7 @@ static struct clk_regmap s4_hdmi_tx = {
};
/* HDMI Clocks */
-static const struct clk_parent_data s4_hdmi_parent_data[] = {
+static const struct clk_parent_data s4_hdmi_parents[] = {
{ .fw_name = "xtal", },
{ .fw_name = "fclk_div4", },
{ .fw_name = "fclk_div3", },
@@ -1201,8 +1255,8 @@ static struct clk_regmap s4_hdmi_sel = {
.hw.init = &(struct clk_init_data){
.name = "hdmi_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_hdmi_parent_data,
- .num_parents = ARRAY_SIZE(s4_hdmi_parent_data),
+ .parent_data = s4_hdmi_parents,
+ .num_parents = ARRAY_SIZE(s4_hdmi_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1253,7 +1307,7 @@ static struct clk_regmap s4_ts_clk_div = {
},
};
-static struct clk_regmap s4_ts_clk_gate = {
+static struct clk_regmap s4_ts_clk = {
.data = &(struct clk_regmap_gate_data){
.offset = CLKCTRL_TS_CLK_CTRL,
.bit_idx = 8,
@@ -1275,7 +1329,7 @@ static struct clk_regmap s4_ts_clk_gate = {
* mux because it does top-to-bottom updates the each clock tree and
* switches to the "inactive" one when CLK_SET_RATE_GATE is set.
*/
-static const struct clk_parent_data s4_mali_0_1_parent_data[] = {
+static const struct clk_parent_data s4_mali_parents[] = {
{ .fw_name = "xtal", },
{ .fw_name = "gp0_pll", },
{ .fw_name = "hifi_pll", },
@@ -1295,8 +1349,8 @@ static struct clk_regmap s4_mali_0_sel = {
.hw.init = &(struct clk_init_data){
.name = "mali_0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_mali_0_1_parent_data,
- .num_parents = ARRAY_SIZE(s4_mali_0_1_parent_data),
+ .parent_data = s4_mali_parents,
+ .num_parents = ARRAY_SIZE(s4_mali_parents),
/*
* Don't request the parent to change the rate because
* all GPU frequencies can be derived from the fclk_*
@@ -1349,8 +1403,8 @@ static struct clk_regmap s4_mali_1_sel = {
.hw.init = &(struct clk_init_data){
.name = "mali_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_mali_0_1_parent_data,
- .num_parents = ARRAY_SIZE(s4_mali_0_1_parent_data),
+ .parent_data = s4_mali_parents,
+ .num_parents = ARRAY_SIZE(s4_mali_parents),
.flags = 0,
},
};
@@ -1388,28 +1442,26 @@ static struct clk_regmap s4_mali_1 = {
},
};
-static const struct clk_hw *s4_mali_parent_hws[] = {
- &s4_mali_0.hw,
- &s4_mali_1.hw
-};
-
-static struct clk_regmap s4_mali_mux = {
+static struct clk_regmap s4_mali_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = CLKCTRL_MALI_CLK_CTRL,
.mask = 1,
.shift = 31,
},
.hw.init = &(struct clk_init_data){
- .name = "mali",
+ .name = "mali_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = s4_mali_parent_hws,
+ .parent_hws = (const struct clk_hw *[]) {
+ &s4_mali_0.hw,
+ &s4_mali_1.hw,
+ },
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
/* VDEC clocks */
-static const struct clk_parent_data s4_dec_parent_data[] = {
+static const struct clk_parent_data s4_dec_parents[] = {
{ .fw_name = "fclk_div2p5", },
{ .fw_name = "fclk_div3", },
{ .fw_name = "fclk_div4", },
@@ -1420,7 +1472,7 @@ static const struct clk_parent_data s4_dec_parent_data[] = {
{ .fw_name = "xtal", }
};
-static struct clk_regmap s4_vdec_p0_mux = {
+static struct clk_regmap s4_vdec_p0_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = CLKCTRL_VDEC_CLK_CTRL,
.mask = 0x7,
@@ -1428,10 +1480,10 @@ static struct clk_regmap s4_vdec_p0_mux = {
.flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data) {
- .name = "vdec_p0_mux",
+ .name = "vdec_p0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_dec_parent_data,
- .num_parents = ARRAY_SIZE(s4_dec_parent_data),
+ .parent_data = s4_dec_parents,
+ .num_parents = ARRAY_SIZE(s4_dec_parents),
.flags = 0,
},
};
@@ -1447,7 +1499,7 @@ static struct clk_regmap s4_vdec_p0_div = {
.name = "vdec_p0_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &s4_vdec_p0_mux.hw
+ &s4_vdec_p0_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1470,7 +1522,7 @@ static struct clk_regmap s4_vdec_p0 = {
},
};
-static struct clk_regmap s4_vdec_p1_mux = {
+static struct clk_regmap s4_vdec_p1_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = CLKCTRL_VDEC3_CLK_CTRL,
.mask = 0x7,
@@ -1478,10 +1530,10 @@ static struct clk_regmap s4_vdec_p1_mux = {
.flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data) {
- .name = "vdec_p1_mux",
+ .name = "vdec_p1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_dec_parent_data,
- .num_parents = ARRAY_SIZE(s4_dec_parent_data),
+ .parent_data = s4_dec_parents,
+ .num_parents = ARRAY_SIZE(s4_dec_parents),
.flags = 0,
},
};
@@ -1497,7 +1549,7 @@ static struct clk_regmap s4_vdec_p1_div = {
.name = "vdec_p1_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &s4_vdec_p1_mux.hw
+ &s4_vdec_p1_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1520,27 +1572,25 @@ static struct clk_regmap s4_vdec_p1 = {
},
};
-static const struct clk_hw *s4_vdec_mux_parent_hws[] = {
- &s4_vdec_p0.hw,
- &s4_vdec_p1.hw
-};
-
-static struct clk_regmap s4_vdec_mux = {
+static struct clk_regmap s4_vdec_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = CLKCTRL_VDEC3_CLK_CTRL,
.mask = 0x1,
.shift = 15,
},
.hw.init = &(struct clk_init_data) {
- .name = "vdec_mux",
+ .name = "vdec_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = s4_vdec_mux_parent_hws,
- .num_parents = ARRAY_SIZE(s4_vdec_mux_parent_hws),
+ .parent_hws = (const struct clk_hw *[]) {
+ &s4_vdec_p0.hw,
+ &s4_vdec_p1.hw,
+ },
+ .num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap s4_hevcf_p0_mux = {
+static struct clk_regmap s4_hevcf_p0_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = CLKCTRL_VDEC2_CLK_CTRL,
.mask = 0x7,
@@ -1548,10 +1598,10 @@ static struct clk_regmap s4_hevcf_p0_mux = {
.flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data) {
- .name = "hevcf_p0_mux",
+ .name = "hevcf_p0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_dec_parent_data,
- .num_parents = ARRAY_SIZE(s4_dec_parent_data),
+ .parent_data = s4_dec_parents,
+ .num_parents = ARRAY_SIZE(s4_dec_parents),
.flags = 0,
},
};
@@ -1567,7 +1617,7 @@ static struct clk_regmap s4_hevcf_p0_div = {
.name = "hevcf_p0_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &s4_hevcf_p0_mux.hw
+ &s4_hevcf_p0_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1580,7 +1630,7 @@ static struct clk_regmap s4_hevcf_p0 = {
.bit_idx = 8,
},
.hw.init = &(struct clk_init_data){
- .name = "hevcf_p0_gate",
+ .name = "hevcf_p0",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&s4_hevcf_p0_div.hw
@@ -1590,7 +1640,7 @@ static struct clk_regmap s4_hevcf_p0 = {
},
};
-static struct clk_regmap s4_hevcf_p1_mux = {
+static struct clk_regmap s4_hevcf_p1_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = CLKCTRL_VDEC4_CLK_CTRL,
.mask = 0x7,
@@ -1598,10 +1648,10 @@ static struct clk_regmap s4_hevcf_p1_mux = {
.flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data) {
- .name = "hevcf_p1_mux",
+ .name = "hevcf_p1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_dec_parent_data,
- .num_parents = ARRAY_SIZE(s4_dec_parent_data),
+ .parent_data = s4_dec_parents,
+ .num_parents = ARRAY_SIZE(s4_dec_parents),
.flags = 0,
},
};
@@ -1617,7 +1667,7 @@ static struct clk_regmap s4_hevcf_p1_div = {
.name = "hevcf_p1_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &s4_hevcf_p1_mux.hw
+ &s4_hevcf_p1_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1640,28 +1690,26 @@ static struct clk_regmap s4_hevcf_p1 = {
},
};
-static const struct clk_hw *s4_hevcf_mux_parent_hws[] = {
- &s4_hevcf_p0.hw,
- &s4_hevcf_p1.hw
-};
-
-static struct clk_regmap s4_hevcf_mux = {
+static struct clk_regmap s4_hevcf_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = CLKCTRL_VDEC4_CLK_CTRL,
.mask = 0x1,
.shift = 15,
},
.hw.init = &(struct clk_init_data) {
- .name = "hevcf",
+ .name = "hevcf_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = s4_hevcf_mux_parent_hws,
- .num_parents = ARRAY_SIZE(s4_hevcf_mux_parent_hws),
+ .parent_hws = (const struct clk_hw *[]) {
+ &s4_hevcf_p0.hw,
+ &s4_hevcf_p1.hw,
+ },
+ .num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
/* VPU Clock */
-static const struct clk_parent_data s4_vpu_parent_data[] = {
+static const struct clk_parent_data s4_vpu_parents[] = {
{ .fw_name = "fclk_div3", },
{ .fw_name = "fclk_div4", },
{ .fw_name = "fclk_div5", },
@@ -1681,8 +1729,8 @@ static struct clk_regmap s4_vpu_0_sel = {
.hw.init = &(struct clk_init_data){
.name = "vpu_0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_vpu_parent_data,
- .num_parents = ARRAY_SIZE(s4_vpu_parent_data),
+ .parent_data = s4_vpu_parents,
+ .num_parents = ARRAY_SIZE(s4_vpu_parents),
.flags = 0,
},
};
@@ -1725,8 +1773,8 @@ static struct clk_regmap s4_vpu_1_sel = {
.hw.init = &(struct clk_init_data){
.name = "vpu_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_vpu_parent_data,
- .num_parents = ARRAY_SIZE(s4_vpu_parent_data),
+ .parent_data = s4_vpu_parents,
+ .num_parents = ARRAY_SIZE(s4_vpu_parents),
.flags = 0,
},
};
@@ -1778,24 +1826,24 @@ static struct clk_regmap s4_vpu = {
},
};
-static const struct clk_parent_data vpu_clkb_tmp_parent_data[] = {
+static const struct clk_parent_data vpu_clkb_tmp_parents[] = {
{ .hw = &s4_vpu.hw },
{ .fw_name = "fclk_div4", },
{ .fw_name = "fclk_div5", },
{ .fw_name = "fclk_div7", }
};
-static struct clk_regmap s4_vpu_clkb_tmp_mux = {
+static struct clk_regmap s4_vpu_clkb_tmp_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = CLKCTRL_VPU_CLKB_CTRL,
.mask = 0x3,
.shift = 20,
},
.hw.init = &(struct clk_init_data) {
- .name = "vpu_clkb_tmp_mux",
+ .name = "vpu_clkb_tmp_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = vpu_clkb_tmp_parent_data,
- .num_parents = ARRAY_SIZE(vpu_clkb_tmp_parent_data),
+ .parent_data = vpu_clkb_tmp_parents,
+ .num_parents = ARRAY_SIZE(vpu_clkb_tmp_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1810,7 +1858,7 @@ static struct clk_regmap s4_vpu_clkb_tmp_div = {
.name = "vpu_clkb_tmp_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &s4_vpu_clkb_tmp_mux.hw
+ &s4_vpu_clkb_tmp_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1866,7 +1914,7 @@ static struct clk_regmap s4_vpu_clkb = {
},
};
-static const struct clk_parent_data s4_vpu_clkc_parent_data[] = {
+static const struct clk_parent_data s4_vpu_clkc_parents[] = {
{ .fw_name = "fclk_div4", },
{ .fw_name = "fclk_div3", },
{ .fw_name = "fclk_div5", },
@@ -1877,17 +1925,17 @@ static const struct clk_parent_data s4_vpu_clkc_parent_data[] = {
{ .fw_name = "gp0_pll", },
};
-static struct clk_regmap s4_vpu_clkc_p0_mux = {
+static struct clk_regmap s4_vpu_clkc_p0_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = CLKCTRL_VPU_CLKC_CTRL,
.mask = 0x7,
.shift = 9,
},
.hw.init = &(struct clk_init_data) {
- .name = "vpu_clkc_p0_mux",
+ .name = "vpu_clkc_p0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_vpu_clkc_parent_data,
- .num_parents = ARRAY_SIZE(s4_vpu_clkc_parent_data),
+ .parent_data = s4_vpu_clkc_parents,
+ .num_parents = ARRAY_SIZE(s4_vpu_clkc_parents),
.flags = 0,
},
};
@@ -1902,7 +1950,7 @@ static struct clk_regmap s4_vpu_clkc_p0_div = {
.name = "vpu_clkc_p0_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &s4_vpu_clkc_p0_mux.hw
+ &s4_vpu_clkc_p0_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1925,17 +1973,17 @@ static struct clk_regmap s4_vpu_clkc_p0 = {
},
};
-static struct clk_regmap s4_vpu_clkc_p1_mux = {
+static struct clk_regmap s4_vpu_clkc_p1_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = CLKCTRL_VPU_CLKC_CTRL,
.mask = 0x7,
.shift = 25,
},
.hw.init = &(struct clk_init_data) {
- .name = "vpu_clkc_p1_mux",
+ .name = "vpu_clkc_p1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_vpu_clkc_parent_data,
- .num_parents = ARRAY_SIZE(s4_vpu_clkc_parent_data),
+ .parent_data = s4_vpu_clkc_parents,
+ .num_parents = ARRAY_SIZE(s4_vpu_clkc_parents),
.flags = 0,
},
};
@@ -1950,7 +1998,7 @@ static struct clk_regmap s4_vpu_clkc_p1_div = {
.name = "vpu_clkc_p1_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &s4_vpu_clkc_p1_mux.hw
+ &s4_vpu_clkc_p1_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1973,28 +2021,26 @@ static struct clk_regmap s4_vpu_clkc_p1 = {
},
};
-static const struct clk_hw *s4_vpu_mux_parent_hws[] = {
- &s4_vpu_clkc_p0.hw,
- &s4_vpu_clkc_p1.hw
-};
-
-static struct clk_regmap s4_vpu_clkc_mux = {
+static struct clk_regmap s4_vpu_clkc_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = CLKCTRL_VPU_CLKC_CTRL,
.mask = 0x1,
.shift = 31,
},
.hw.init = &(struct clk_init_data) {
- .name = "vpu_clkc_mux",
+ .name = "vpu_clkc_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = s4_vpu_mux_parent_hws,
- .num_parents = ARRAY_SIZE(s4_vpu_mux_parent_hws),
+ .parent_hws = (const struct clk_hw *[]) {
+ &s4_vpu_clkc_p0.hw,
+ &s4_vpu_clkc_p1.hw,
+ },
+ .num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
/* VAPB Clock */
-static const struct clk_parent_data s4_vapb_parent_data[] = {
+static const struct clk_parent_data s4_vapb_parents[] = {
{ .fw_name = "fclk_div4", },
{ .fw_name = "fclk_div3", },
{ .fw_name = "fclk_div5", },
@@ -2014,8 +2060,8 @@ static struct clk_regmap s4_vapb_0_sel = {
.hw.init = &(struct clk_init_data){
.name = "vapb_0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_vapb_parent_data,
- .num_parents = ARRAY_SIZE(s4_vapb_parent_data),
+ .parent_data = s4_vapb_parents,
+ .num_parents = ARRAY_SIZE(s4_vapb_parents),
.flags = 0,
},
};
@@ -2062,8 +2108,8 @@ static struct clk_regmap s4_vapb_1_sel = {
.hw.init = &(struct clk_init_data){
.name = "vapb_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_vapb_parent_data,
- .num_parents = ARRAY_SIZE(s4_vapb_parent_data),
+ .parent_data = s4_vapb_parents,
+ .num_parents = ARRAY_SIZE(s4_vapb_parents),
.flags = 0,
},
};
@@ -2119,13 +2165,13 @@ static struct clk_regmap s4_vapb = {
},
};
-static struct clk_regmap s4_ge2d_gate = {
+static struct clk_regmap s4_ge2d = {
.data = &(struct clk_regmap_gate_data){
.offset = CLKCTRL_VAPBCLK_CTRL,
.bit_idx = 30,
},
.hw.init = &(struct clk_init_data) {
- .name = "ge2d_clk",
+ .name = "ge2d",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) { &s4_vapb.hw },
.num_parents = 1,
@@ -2133,24 +2179,24 @@ static struct clk_regmap s4_ge2d_gate = {
},
};
-static const struct clk_parent_data s4_esmclk_parent_data[] = {
+static const struct clk_parent_data s4_hdcp22_esmclk_parents[] = {
{ .fw_name = "fclk_div7", },
{ .fw_name = "fclk_div4", },
{ .fw_name = "fclk_div3", },
{ .fw_name = "fclk_div5", },
};
-static struct clk_regmap s4_hdcp22_esmclk_mux = {
+static struct clk_regmap s4_hdcp22_esmclk_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = CLKCTRL_HDCP22_CTRL,
.mask = 0x3,
.shift = 9,
},
.hw.init = &(struct clk_init_data) {
- .name = "hdcp22_esmclk_mux",
+ .name = "hdcp22_esmclk_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_esmclk_parent_data,
- .num_parents = ARRAY_SIZE(s4_esmclk_parent_data),
+ .parent_data = s4_hdcp22_esmclk_parents,
+ .num_parents = ARRAY_SIZE(s4_hdcp22_esmclk_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2165,20 +2211,20 @@ static struct clk_regmap s4_hdcp22_esmclk_div = {
.name = "hdcp22_esmclk_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &s4_hdcp22_esmclk_mux.hw
+ &s4_hdcp22_esmclk_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap s4_hdcp22_esmclk_gate = {
+static struct clk_regmap s4_hdcp22_esmclk = {
.data = &(struct clk_regmap_gate_data){
.offset = CLKCTRL_HDCP22_CTRL,
.bit_idx = 8,
},
.hw.init = &(struct clk_init_data){
- .name = "hdcp22_esmclk_gate",
+ .name = "hdcp22_esmclk",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&s4_hdcp22_esmclk_div.hw
@@ -2188,24 +2234,24 @@ static struct clk_regmap s4_hdcp22_esmclk_gate = {
},
};
-static const struct clk_parent_data s4_skpclk_parent_data[] = {
+static const struct clk_parent_data s4_hdcp22_skpclk_parents[] = {
{ .fw_name = "xtal", },
{ .fw_name = "fclk_div4", },
{ .fw_name = "fclk_div3", },
{ .fw_name = "fclk_div5", },
};
-static struct clk_regmap s4_hdcp22_skpclk_mux = {
+static struct clk_regmap s4_hdcp22_skpclk_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = CLKCTRL_HDCP22_CTRL,
.mask = 0x3,
.shift = 25,
},
.hw.init = &(struct clk_init_data) {
- .name = "hdcp22_skpclk_mux",
+ .name = "hdcp22_skpclk_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_skpclk_parent_data,
- .num_parents = ARRAY_SIZE(s4_skpclk_parent_data),
+ .parent_data = s4_hdcp22_skpclk_parents,
+ .num_parents = ARRAY_SIZE(s4_hdcp22_skpclk_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2220,20 +2266,20 @@ static struct clk_regmap s4_hdcp22_skpclk_div = {
.name = "hdcp22_skpclk_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &s4_hdcp22_skpclk_mux.hw
+ &s4_hdcp22_skpclk_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap s4_hdcp22_skpclk_gate = {
+static struct clk_regmap s4_hdcp22_skpclk = {
.data = &(struct clk_regmap_gate_data){
.offset = CLKCTRL_HDCP22_CTRL,
.bit_idx = 24,
},
.hw.init = &(struct clk_init_data){
- .name = "hdcp22_skpclk_gate",
+ .name = "hdcp22_skpclk",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&s4_hdcp22_skpclk_div.hw
@@ -2243,7 +2289,7 @@ static struct clk_regmap s4_hdcp22_skpclk_gate = {
},
};
-static const struct clk_parent_data s4_vdin_parent_data[] = {
+static const struct clk_parent_data s4_vdin_parents[] = {
{ .fw_name = "xtal", },
{ .fw_name = "fclk_div4", },
{ .fw_name = "fclk_div3", },
@@ -2251,17 +2297,17 @@ static const struct clk_parent_data s4_vdin_parent_data[] = {
{ .hw = &s4_vid_pll.hw }
};
-static struct clk_regmap s4_vdin_meas_mux = {
+static struct clk_regmap s4_vdin_meas_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = CLKCTRL_VDIN_MEAS_CLK_CTRL,
.mask = 0x7,
.shift = 9,
},
.hw.init = &(struct clk_init_data) {
- .name = "vdin_meas_mux",
+ .name = "vdin_meas_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_vdin_parent_data,
- .num_parents = ARRAY_SIZE(s4_vdin_parent_data),
+ .parent_data = s4_vdin_parents,
+ .num_parents = ARRAY_SIZE(s4_vdin_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2276,20 +2322,20 @@ static struct clk_regmap s4_vdin_meas_div = {
.name = "vdin_meas_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &s4_vdin_meas_mux.hw
+ &s4_vdin_meas_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap s4_vdin_meas_gate = {
+static struct clk_regmap s4_vdin_meas = {
.data = &(struct clk_regmap_gate_data){
.offset = CLKCTRL_VDIN_MEAS_CLK_CTRL,
.bit_idx = 8,
},
.hw.init = &(struct clk_init_data){
- .name = "vdin_meas_gate",
+ .name = "vdin_meas",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&s4_vdin_meas_div.hw
@@ -2300,7 +2346,7 @@ static struct clk_regmap s4_vdin_meas_gate = {
};
/* EMMC/NAND clock */
-static const struct clk_parent_data s4_sd_emmc_clk0_parent_data[] = {
+static const struct clk_parent_data s4_sd_emmc_clk0_parents[] = {
{ .fw_name = "xtal", },
{ .fw_name = "fclk_div2", },
{ .fw_name = "fclk_div3", },
@@ -2320,8 +2366,8 @@ static struct clk_regmap s4_sd_emmc_c_clk0_sel = {
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_c_clk0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_sd_emmc_clk0_parent_data,
- .num_parents = ARRAY_SIZE(s4_sd_emmc_clk0_parent_data),
+ .parent_data = s4_sd_emmc_clk0_parents,
+ .num_parents = ARRAY_SIZE(s4_sd_emmc_clk0_parents),
.flags = 0,
},
};
@@ -2368,8 +2414,8 @@ static struct clk_regmap s4_sd_emmc_a_clk0_sel = {
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_a_clk0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_sd_emmc_clk0_parent_data,
- .num_parents = ARRAY_SIZE(s4_sd_emmc_clk0_parent_data),
+ .parent_data = s4_sd_emmc_clk0_parents,
+ .num_parents = ARRAY_SIZE(s4_sd_emmc_clk0_parents),
.flags = 0,
},
};
@@ -2416,8 +2462,8 @@ static struct clk_regmap s4_sd_emmc_b_clk0_sel = {
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_b_clk0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_sd_emmc_clk0_parent_data,
- .num_parents = ARRAY_SIZE(s4_sd_emmc_clk0_parent_data),
+ .parent_data = s4_sd_emmc_clk0_parents,
+ .num_parents = ARRAY_SIZE(s4_sd_emmc_clk0_parents),
.flags = 0,
},
};
@@ -2456,7 +2502,7 @@ static struct clk_regmap s4_sd_emmc_b_clk0 = {
};
/* SPICC Clock */
-static const struct clk_parent_data s4_spicc_parent_data[] = {
+static const struct clk_parent_data s4_spicc_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &s4_sys_clk.hw },
{ .fw_name = "fclk_div4", },
@@ -2466,17 +2512,17 @@ static const struct clk_parent_data s4_spicc_parent_data[] = {
{ .fw_name = "fclk_div7", },
};
-static struct clk_regmap s4_spicc0_mux = {
+static struct clk_regmap s4_spicc0_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = CLKCTRL_SPICC_CLK_CTRL,
.mask = 0x7,
.shift = 7,
},
.hw.init = &(struct clk_init_data) {
- .name = "spicc0_mux",
+ .name = "spicc0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_spicc_parent_data,
- .num_parents = ARRAY_SIZE(s4_spicc_parent_data),
+ .parent_data = s4_spicc_parents,
+ .num_parents = ARRAY_SIZE(s4_spicc_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2491,20 +2537,20 @@ static struct clk_regmap s4_spicc0_div = {
.name = "spicc0_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &s4_spicc0_mux.hw
+ &s4_spicc0_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap s4_spicc0_gate = {
+static struct clk_regmap s4_spicc0_en = {
.data = &(struct clk_regmap_gate_data){
.offset = CLKCTRL_SPICC_CLK_CTRL,
.bit_idx = 6,
},
.hw.init = &(struct clk_init_data){
- .name = "spicc0",
+ .name = "spicc0_en",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&s4_spicc0_div.hw
@@ -2515,500 +2561,61 @@ static struct clk_regmap s4_spicc0_gate = {
};
/* PWM Clock */
-static const struct clk_parent_data s4_pwm_parent_data[] = {
+static const struct clk_parent_data s4_pwm_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &s4_vid_pll.hw },
{ .fw_name = "fclk_div4", },
{ .fw_name = "fclk_div3", },
};
-static struct clk_regmap s4_pwm_a_mux = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = CLKCTRL_PWM_CLK_AB_CTRL,
- .mask = 0x3,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_a_mux",
- .ops = &clk_regmap_mux_ops,
- .parent_data = s4_pwm_parent_data,
- .num_parents = ARRAY_SIZE(s4_pwm_parent_data),
- .flags = 0,
- },
-};
+static S4_COMP_SEL(pwm_a, CLKCTRL_PWM_CLK_AB_CTRL, 9, 0x3, s4_pwm_parents);
+static S4_COMP_DIV(pwm_a, CLKCTRL_PWM_CLK_AB_CTRL, 0, 8);
+static S4_COMP_GATE(pwm_a, CLKCTRL_PWM_CLK_AB_CTRL, 8);
-static struct clk_regmap s4_pwm_a_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = CLKCTRL_PWM_CLK_AB_CTRL,
- .shift = 0,
- .width = 8,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_a_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_a_mux.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static S4_COMP_SEL(pwm_b, CLKCTRL_PWM_CLK_AB_CTRL, 25, 0x3, s4_pwm_parents);
+static S4_COMP_DIV(pwm_b, CLKCTRL_PWM_CLK_AB_CTRL, 16, 8);
+static S4_COMP_GATE(pwm_b, CLKCTRL_PWM_CLK_AB_CTRL, 24);
-static struct clk_regmap s4_pwm_a_gate = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = CLKCTRL_PWM_CLK_AB_CTRL,
- .bit_idx = 8,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_a_gate",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_a_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static S4_COMP_SEL(pwm_c, CLKCTRL_PWM_CLK_CD_CTRL, 9, 0x3, s4_pwm_parents);
+static S4_COMP_DIV(pwm_c, CLKCTRL_PWM_CLK_CD_CTRL, 0, 8);
+static S4_COMP_GATE(pwm_c, CLKCTRL_PWM_CLK_CD_CTRL, 8);
-static struct clk_regmap s4_pwm_b_mux = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = CLKCTRL_PWM_CLK_AB_CTRL,
- .mask = 0x3,
- .shift = 25,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_b_mux",
- .ops = &clk_regmap_mux_ops,
- .parent_data = s4_pwm_parent_data,
- .num_parents = ARRAY_SIZE(s4_pwm_parent_data),
- .flags = 0,
- },
-};
+static S4_COMP_SEL(pwm_d, CLKCTRL_PWM_CLK_CD_CTRL, 25, 0x3, s4_pwm_parents);
+static S4_COMP_DIV(pwm_d, CLKCTRL_PWM_CLK_CD_CTRL, 16, 8);
+static S4_COMP_GATE(pwm_d, CLKCTRL_PWM_CLK_CD_CTRL, 24);
-static struct clk_regmap s4_pwm_b_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = CLKCTRL_PWM_CLK_AB_CTRL,
- .shift = 16,
- .width = 8,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_b_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_b_mux.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static S4_COMP_SEL(pwm_e, CLKCTRL_PWM_CLK_EF_CTRL, 9, 0x3, s4_pwm_parents);
+static S4_COMP_DIV(pwm_e, CLKCTRL_PWM_CLK_EF_CTRL, 0, 8);
+static S4_COMP_GATE(pwm_e, CLKCTRL_PWM_CLK_EF_CTRL, 8);
-static struct clk_regmap s4_pwm_b_gate = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = CLKCTRL_PWM_CLK_AB_CTRL,
- .bit_idx = 24,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_b_gate",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_b_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static S4_COMP_SEL(pwm_f, CLKCTRL_PWM_CLK_EF_CTRL, 25, 0x3, s4_pwm_parents);
+static S4_COMP_DIV(pwm_f, CLKCTRL_PWM_CLK_EF_CTRL, 16, 8);
+static S4_COMP_GATE(pwm_f, CLKCTRL_PWM_CLK_EF_CTRL, 24);
-static struct clk_regmap s4_pwm_c_mux = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = CLKCTRL_PWM_CLK_CD_CTRL,
- .mask = 0x3,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_c_mux",
- .ops = &clk_regmap_mux_ops,
- .parent_data = s4_pwm_parent_data,
- .num_parents = ARRAY_SIZE(s4_pwm_parent_data),
- .flags = 0,
- },
-};
+static S4_COMP_SEL(pwm_g, CLKCTRL_PWM_CLK_GH_CTRL, 9, 0x3, s4_pwm_parents);
+static S4_COMP_DIV(pwm_g, CLKCTRL_PWM_CLK_GH_CTRL, 0, 8);
+static S4_COMP_GATE(pwm_g, CLKCTRL_PWM_CLK_GH_CTRL, 8);
-static struct clk_regmap s4_pwm_c_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = CLKCTRL_PWM_CLK_CD_CTRL,
- .shift = 0,
- .width = 8,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_c_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_c_mux.hw
- },
- .num_parents = 1,
- },
-};
+static S4_COMP_SEL(pwm_h, CLKCTRL_PWM_CLK_GH_CTRL, 25, 0x3, s4_pwm_parents);
+static S4_COMP_DIV(pwm_h, CLKCTRL_PWM_CLK_GH_CTRL, 16, 8);
+static S4_COMP_GATE(pwm_h, CLKCTRL_PWM_CLK_GH_CTRL, 24);
-static struct clk_regmap s4_pwm_c_gate = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = CLKCTRL_PWM_CLK_CD_CTRL,
- .bit_idx = 8,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_c_gate",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_c_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap s4_pwm_d_mux = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = CLKCTRL_PWM_CLK_CD_CTRL,
- .mask = 0x3,
- .shift = 25,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_d_mux",
- .ops = &clk_regmap_mux_ops,
- .parent_data = s4_pwm_parent_data,
- .num_parents = ARRAY_SIZE(s4_pwm_parent_data),
- .flags = 0,
- },
-};
-
-static struct clk_regmap s4_pwm_d_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = CLKCTRL_PWM_CLK_CD_CTRL,
- .shift = 16,
- .width = 8,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_d_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_d_mux.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap s4_pwm_d_gate = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = CLKCTRL_PWM_CLK_CD_CTRL,
- .bit_idx = 24,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_d_gate",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_d_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap s4_pwm_e_mux = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = CLKCTRL_PWM_CLK_EF_CTRL,
- .mask = 0x3,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_e_mux",
- .ops = &clk_regmap_mux_ops,
- .parent_data = s4_pwm_parent_data,
- .num_parents = ARRAY_SIZE(s4_pwm_parent_data),
- .flags = 0,
- },
-};
-
-static struct clk_regmap s4_pwm_e_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = CLKCTRL_PWM_CLK_EF_CTRL,
- .shift = 0,
- .width = 8,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_e_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_e_mux.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static S4_COMP_SEL(pwm_i, CLKCTRL_PWM_CLK_IJ_CTRL, 9, 0x3, s4_pwm_parents);
+static S4_COMP_DIV(pwm_i, CLKCTRL_PWM_CLK_IJ_CTRL, 0, 8);
+static S4_COMP_GATE(pwm_i, CLKCTRL_PWM_CLK_IJ_CTRL, 8);
-static struct clk_regmap s4_pwm_e_gate = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = CLKCTRL_PWM_CLK_EF_CTRL,
- .bit_idx = 8,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_e_gate",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_e_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static S4_COMP_SEL(pwm_j, CLKCTRL_PWM_CLK_IJ_CTRL, 25, 0x3, s4_pwm_parents);
+static S4_COMP_DIV(pwm_j, CLKCTRL_PWM_CLK_IJ_CTRL, 16, 8);
+static S4_COMP_GATE(pwm_j, CLKCTRL_PWM_CLK_IJ_CTRL, 24);
-static struct clk_regmap s4_pwm_f_mux = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = CLKCTRL_PWM_CLK_EF_CTRL,
- .mask = 0x3,
- .shift = 25,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_f_mux",
- .ops = &clk_regmap_mux_ops,
- .parent_data = s4_pwm_parent_data,
- .num_parents = ARRAY_SIZE(s4_pwm_parent_data),
- .flags = 0,
- },
-};
-
-static struct clk_regmap s4_pwm_f_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = CLKCTRL_PWM_CLK_EF_CTRL,
- .shift = 16,
- .width = 8,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_f_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_f_mux.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap s4_pwm_f_gate = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = CLKCTRL_PWM_CLK_EF_CTRL,
- .bit_idx = 24,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_f_gate",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_f_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap s4_pwm_g_mux = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = CLKCTRL_PWM_CLK_GH_CTRL,
- .mask = 0x3,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_g_mux",
- .ops = &clk_regmap_mux_ops,
- .parent_data = s4_pwm_parent_data,
- .num_parents = ARRAY_SIZE(s4_pwm_parent_data),
- .flags = 0,
- },
-};
-
-static struct clk_regmap s4_pwm_g_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = CLKCTRL_PWM_CLK_GH_CTRL,
- .shift = 0,
- .width = 8,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_g_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_g_mux.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap s4_pwm_g_gate = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = CLKCTRL_PWM_CLK_GH_CTRL,
- .bit_idx = 8,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_g_gate",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_g_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap s4_pwm_h_mux = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = CLKCTRL_PWM_CLK_GH_CTRL,
- .mask = 0x3,
- .shift = 25,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_h_mux",
- .ops = &clk_regmap_mux_ops,
- .parent_data = s4_pwm_parent_data,
- .num_parents = ARRAY_SIZE(s4_pwm_parent_data),
- .flags = 0,
- },
-};
-
-static struct clk_regmap s4_pwm_h_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = CLKCTRL_PWM_CLK_GH_CTRL,
- .shift = 16,
- .width = 8,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_h_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_h_mux.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap s4_pwm_h_gate = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = CLKCTRL_PWM_CLK_GH_CTRL,
- .bit_idx = 24,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_h_gate",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_h_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap s4_pwm_i_mux = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = CLKCTRL_PWM_CLK_IJ_CTRL,
- .mask = 0x3,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_i_mux",
- .ops = &clk_regmap_mux_ops,
- .parent_data = s4_pwm_parent_data,
- .num_parents = ARRAY_SIZE(s4_pwm_parent_data),
- .flags = 0,
- },
-};
-
-static struct clk_regmap s4_pwm_i_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = CLKCTRL_PWM_CLK_IJ_CTRL,
- .shift = 0,
- .width = 8,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_i_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_i_mux.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap s4_pwm_i_gate = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = CLKCTRL_PWM_CLK_IJ_CTRL,
- .bit_idx = 8,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_i_gate",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_i_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap s4_pwm_j_mux = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = CLKCTRL_PWM_CLK_IJ_CTRL,
- .mask = 0x3,
- .shift = 25,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_j_mux",
- .ops = &clk_regmap_mux_ops,
- .parent_data = s4_pwm_parent_data,
- .num_parents = ARRAY_SIZE(s4_pwm_parent_data),
- .flags = 0,
- },
-};
-
-static struct clk_regmap s4_pwm_j_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = CLKCTRL_PWM_CLK_IJ_CTRL,
- .shift = 16,
- .width = 8,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_j_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_j_mux.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap s4_pwm_j_gate = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = CLKCTRL_PWM_CLK_IJ_CTRL,
- .bit_idx = 24,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_j_gate",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_j_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap s4_saradc_mux = {
+static struct clk_regmap s4_saradc_sel = {
.data = &(struct clk_regmap_mux_data) {
.offset = CLKCTRL_SAR_CLK_CTRL,
.mask = 0x3,
.shift = 9,
},
.hw.init = &(struct clk_init_data){
- .name = "saradc_mux",
+ .name = "saradc_sel",
.ops = &clk_regmap_mux_ops,
.parent_data = (const struct clk_parent_data []) {
{ .fw_name = "xtal", },
@@ -3029,20 +2636,20 @@ static struct clk_regmap s4_saradc_div = {
.name = "saradc_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &s4_saradc_mux.hw
+ &s4_saradc_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap s4_saradc_gate = {
+static struct clk_regmap s4_saradc = {
.data = &(struct clk_regmap_gate_data) {
.offset = CLKCTRL_SAR_CLK_CTRL,
.bit_idx = 8,
},
.hw.init = &(struct clk_init_data){
- .name = "saradc_clk",
+ .name = "saradc",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&s4_saradc_div.hw
@@ -3057,9 +2664,8 @@ static struct clk_regmap s4_saradc_gate = {
* corresponding clock sources are not described in the clock tree and internal clock
* for debug, so they are skipped.
*/
-static u32 s4_gen_clk_mux_table[] = { 0, 4, 5, 7, 19, 21, 22,
- 23, 24, 25, 26, 27, 28 };
-static const struct clk_parent_data s4_gen_clk_parent_data[] = {
+static u32 s4_gen_clk_parents_val_table[] = { 0, 4, 5, 7, 19, 21, 22, 23, 24, 25, 26, 27, 28 };
+static const struct clk_parent_data s4_gen_clk_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &s4_vid_pll.hw },
{ .fw_name = "gp0_pll", },
@@ -3080,13 +2686,13 @@ static struct clk_regmap s4_gen_clk_sel = {
.offset = CLKCTRL_GEN_CLK_CTRL,
.mask = 0x1f,
.shift = 12,
- .table = s4_gen_clk_mux_table,
+ .table = s4_gen_clk_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "gen_clk_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_gen_clk_parent_data,
- .num_parents = ARRAY_SIZE(s4_gen_clk_parent_data),
+ .parent_data = s4_gen_clk_parents,
+ .num_parents = ARRAY_SIZE(s4_gen_clk_parents),
/*
* Because the GEN clock can be connected to an external pad
* and may be set up directly from the device tree. Don't
@@ -3129,173 +2735,75 @@ static struct clk_regmap s4_gen_clk = {
},
};
-static const struct clk_parent_data s4_adc_extclk_in_parent_data[] = {
- { .fw_name = "xtal", },
- { .fw_name = "fclk_div4", },
- { .fw_name = "fclk_div3", },
- { .fw_name = "fclk_div5", },
- { .fw_name = "fclk_div7", },
- { .fw_name = "mpll2", },
- { .fw_name = "gp0_pll", },
- { .fw_name = "hifi_pll", },
-};
-
-static struct clk_regmap s4_adc_extclk_in_mux = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = CLKCTRL_DEMOD_CLK_CTRL,
- .mask = 0x7,
- .shift = 25,
- },
- .hw.init = &(struct clk_init_data){
- .name = "adc_extclk_in_mux",
- .ops = &clk_regmap_mux_ops,
- .parent_data = s4_adc_extclk_in_parent_data,
- .num_parents = ARRAY_SIZE(s4_adc_extclk_in_parent_data),
- .flags = 0,
- },
-};
-
-static struct clk_regmap s4_adc_extclk_in_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = CLKCTRL_DEMOD_CLK_CTRL,
- .shift = 16,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data){
- .name = "adc_extclk_in_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_adc_extclk_in_mux.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap s4_adc_extclk_in_gate = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = CLKCTRL_DEMOD_CLK_CTRL,
- .bit_idx = 24,
- },
- .hw.init = &(struct clk_init_data){
- .name = "adc_extclk_in",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_adc_extclk_in_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static const struct clk_parent_data s4_pclk_parents = { .hw = &s4_sys_clk.hw };
-static struct clk_regmap s4_demod_core_clk_mux = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = CLKCTRL_DEMOD_CLK_CTRL,
- .mask = 0x3,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data){
- .name = "demod_core_clk_mux",
- .ops = &clk_regmap_mux_ops,
- .parent_data = (const struct clk_parent_data []) {
- { .fw_name = "xtal", },
- { .fw_name = "fclk_div7", },
- { .fw_name = "fclk_div4", },
- { .hw = &s4_adc_extclk_in_gate.hw }
- },
- .num_parents = 4,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap s4_demod_core_clk_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = CLKCTRL_DEMOD_CLK_CTRL,
- .shift = 0,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data){
- .name = "demod_core_clk_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_demod_core_clk_mux.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+#define S4_PCLK(_name, _reg, _bit, _flags) \
+ MESON_PCLK(_name, _reg, _bit, &s4_pclk_parents, _flags)
-static struct clk_regmap s4_demod_core_clk_gate = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = CLKCTRL_DEMOD_CLK_CTRL,
- .bit_idx = 8,
- },
- .hw.init = &(struct clk_init_data){
- .name = "demod_core_clk",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_demod_core_clk_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-#define MESON_GATE(_name, _reg, _bit) \
- MESON_PCLK(_name, _reg, _bit, &s4_sys_clk.hw)
-
-static MESON_GATE(s4_ddr, CLKCTRL_SYS_CLK_EN0_REG0, 0);
-static MESON_GATE(s4_dos, CLKCTRL_SYS_CLK_EN0_REG0, 1);
-static MESON_GATE(s4_ethphy, CLKCTRL_SYS_CLK_EN0_REG0, 4);
-static MESON_GATE(s4_mali, CLKCTRL_SYS_CLK_EN0_REG0, 6);
-static MESON_GATE(s4_aocpu, CLKCTRL_SYS_CLK_EN0_REG0, 13);
-static MESON_GATE(s4_aucpu, CLKCTRL_SYS_CLK_EN0_REG0, 14);
-static MESON_GATE(s4_cec, CLKCTRL_SYS_CLK_EN0_REG0, 16);
-static MESON_GATE(s4_sdemmca, CLKCTRL_SYS_CLK_EN0_REG0, 24);
-static MESON_GATE(s4_sdemmcb, CLKCTRL_SYS_CLK_EN0_REG0, 25);
-static MESON_GATE(s4_nand, CLKCTRL_SYS_CLK_EN0_REG0, 26);
-static MESON_GATE(s4_smartcard, CLKCTRL_SYS_CLK_EN0_REG0, 27);
-static MESON_GATE(s4_acodec, CLKCTRL_SYS_CLK_EN0_REG0, 28);
-static MESON_GATE(s4_spifc, CLKCTRL_SYS_CLK_EN0_REG0, 29);
-static MESON_GATE(s4_msr_clk, CLKCTRL_SYS_CLK_EN0_REG0, 30);
-static MESON_GATE(s4_ir_ctrl, CLKCTRL_SYS_CLK_EN0_REG0, 31);
-static MESON_GATE(s4_audio, CLKCTRL_SYS_CLK_EN0_REG1, 0);
-static MESON_GATE(s4_eth, CLKCTRL_SYS_CLK_EN0_REG1, 3);
-static MESON_GATE(s4_uart_a, CLKCTRL_SYS_CLK_EN0_REG1, 5);
-static MESON_GATE(s4_uart_b, CLKCTRL_SYS_CLK_EN0_REG1, 6);
-static MESON_GATE(s4_uart_c, CLKCTRL_SYS_CLK_EN0_REG1, 7);
-static MESON_GATE(s4_uart_d, CLKCTRL_SYS_CLK_EN0_REG1, 8);
-static MESON_GATE(s4_uart_e, CLKCTRL_SYS_CLK_EN0_REG1, 9);
-static MESON_GATE(s4_aififo, CLKCTRL_SYS_CLK_EN0_REG1, 11);
-static MESON_GATE(s4_ts_ddr, CLKCTRL_SYS_CLK_EN0_REG1, 15);
-static MESON_GATE(s4_ts_pll, CLKCTRL_SYS_CLK_EN0_REG1, 16);
-static MESON_GATE(s4_g2d, CLKCTRL_SYS_CLK_EN0_REG1, 20);
-static MESON_GATE(s4_spicc0, CLKCTRL_SYS_CLK_EN0_REG1, 21);
-static MESON_GATE(s4_usb, CLKCTRL_SYS_CLK_EN0_REG1, 26);
-static MESON_GATE(s4_i2c_m_a, CLKCTRL_SYS_CLK_EN0_REG1, 30);
-static MESON_GATE(s4_i2c_m_b, CLKCTRL_SYS_CLK_EN0_REG1, 31);
-static MESON_GATE(s4_i2c_m_c, CLKCTRL_SYS_CLK_EN0_REG2, 0);
-static MESON_GATE(s4_i2c_m_d, CLKCTRL_SYS_CLK_EN0_REG2, 1);
-static MESON_GATE(s4_i2c_m_e, CLKCTRL_SYS_CLK_EN0_REG2, 2);
-static MESON_GATE(s4_hdmitx_apb, CLKCTRL_SYS_CLK_EN0_REG2, 4);
-static MESON_GATE(s4_i2c_s_a, CLKCTRL_SYS_CLK_EN0_REG2, 5);
-static MESON_GATE(s4_usb1_to_ddr, CLKCTRL_SYS_CLK_EN0_REG2, 8);
-static MESON_GATE(s4_hdcp22, CLKCTRL_SYS_CLK_EN0_REG2, 10);
-static MESON_GATE(s4_mmc_apb, CLKCTRL_SYS_CLK_EN0_REG2, 11);
-static MESON_GATE(s4_rsa, CLKCTRL_SYS_CLK_EN0_REG2, 18);
-static MESON_GATE(s4_cpu_debug, CLKCTRL_SYS_CLK_EN0_REG2, 19);
-static MESON_GATE(s4_vpu_intr, CLKCTRL_SYS_CLK_EN0_REG2, 25);
-static MESON_GATE(s4_demod, CLKCTRL_SYS_CLK_EN0_REG2, 27);
-static MESON_GATE(s4_sar_adc, CLKCTRL_SYS_CLK_EN0_REG2, 28);
-static MESON_GATE(s4_gic, CLKCTRL_SYS_CLK_EN0_REG2, 30);
-static MESON_GATE(s4_pwm_ab, CLKCTRL_SYS_CLK_EN0_REG3, 7);
-static MESON_GATE(s4_pwm_cd, CLKCTRL_SYS_CLK_EN0_REG3, 8);
-static MESON_GATE(s4_pwm_ef, CLKCTRL_SYS_CLK_EN0_REG3, 9);
-static MESON_GATE(s4_pwm_gh, CLKCTRL_SYS_CLK_EN0_REG3, 10);
-static MESON_GATE(s4_pwm_ij, CLKCTRL_SYS_CLK_EN0_REG3, 11);
+/*
+ * NOTE: The gates below are marked with CLK_IGNORE_UNUSED for historic reasons
+ * Users are encouraged to test without it and submit changes to:
+ * - remove the flag if not necessary
+ * - replace the flag with something more adequate, such as CLK_IS_CRITICAL,
+ * if appropriate.
+ * - add a comment explaining why the use of CLK_IGNORE_UNUSED is desirable
+ * for a particular clock.
+ */
+static S4_PCLK(s4_ddr, CLKCTRL_SYS_CLK_EN0_REG0, 0, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_dos, CLKCTRL_SYS_CLK_EN0_REG0, 1, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_ethphy, CLKCTRL_SYS_CLK_EN0_REG0, 4, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_mali, CLKCTRL_SYS_CLK_EN0_REG0, 6, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_aocpu, CLKCTRL_SYS_CLK_EN0_REG0, 13, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_aucpu, CLKCTRL_SYS_CLK_EN0_REG0, 14, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_cec, CLKCTRL_SYS_CLK_EN0_REG0, 16, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_sdemmca, CLKCTRL_SYS_CLK_EN0_REG0, 24, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_sdemmcb, CLKCTRL_SYS_CLK_EN0_REG0, 25, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_nand, CLKCTRL_SYS_CLK_EN0_REG0, 26, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_smartcard, CLKCTRL_SYS_CLK_EN0_REG0, 27, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_acodec, CLKCTRL_SYS_CLK_EN0_REG0, 28, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_spifc, CLKCTRL_SYS_CLK_EN0_REG0, 29, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_msr_clk, CLKCTRL_SYS_CLK_EN0_REG0, 30, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_ir_ctrl, CLKCTRL_SYS_CLK_EN0_REG0, 31, CLK_IGNORE_UNUSED);
+
+static S4_PCLK(s4_audio, CLKCTRL_SYS_CLK_EN0_REG1, 0, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_eth, CLKCTRL_SYS_CLK_EN0_REG1, 3, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_uart_a, CLKCTRL_SYS_CLK_EN0_REG1, 5, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_uart_b, CLKCTRL_SYS_CLK_EN0_REG1, 6, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_uart_c, CLKCTRL_SYS_CLK_EN0_REG1, 7, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_uart_d, CLKCTRL_SYS_CLK_EN0_REG1, 8, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_uart_e, CLKCTRL_SYS_CLK_EN0_REG1, 9, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_aififo, CLKCTRL_SYS_CLK_EN0_REG1, 11, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_ts_ddr, CLKCTRL_SYS_CLK_EN0_REG1, 15, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_ts_pll, CLKCTRL_SYS_CLK_EN0_REG1, 16, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_g2d, CLKCTRL_SYS_CLK_EN0_REG1, 20, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_spicc0, CLKCTRL_SYS_CLK_EN0_REG1, 21, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_usb, CLKCTRL_SYS_CLK_EN0_REG1, 26, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_i2c_m_a, CLKCTRL_SYS_CLK_EN0_REG1, 30, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_i2c_m_b, CLKCTRL_SYS_CLK_EN0_REG1, 31, CLK_IGNORE_UNUSED);
+
+static S4_PCLK(s4_i2c_m_c, CLKCTRL_SYS_CLK_EN0_REG2, 0, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_i2c_m_d, CLKCTRL_SYS_CLK_EN0_REG2, 1, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_i2c_m_e, CLKCTRL_SYS_CLK_EN0_REG2, 2, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_hdmitx_apb, CLKCTRL_SYS_CLK_EN0_REG2, 4, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_i2c_s_a, CLKCTRL_SYS_CLK_EN0_REG2, 5, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_usb1_to_ddr, CLKCTRL_SYS_CLK_EN0_REG2, 8, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_hdcp22, CLKCTRL_SYS_CLK_EN0_REG2, 10, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_mmc_apb, CLKCTRL_SYS_CLK_EN0_REG2, 11, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_rsa, CLKCTRL_SYS_CLK_EN0_REG2, 18, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_cpu_debug, CLKCTRL_SYS_CLK_EN0_REG2, 19, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_vpu_intr, CLKCTRL_SYS_CLK_EN0_REG2, 25, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_demod, CLKCTRL_SYS_CLK_EN0_REG2, 27, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_sar_adc, CLKCTRL_SYS_CLK_EN0_REG2, 28, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_gic, CLKCTRL_SYS_CLK_EN0_REG2, 30, CLK_IGNORE_UNUSED);
+
+static S4_PCLK(s4_pwm_ab, CLKCTRL_SYS_CLK_EN0_REG3, 7, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_pwm_cd, CLKCTRL_SYS_CLK_EN0_REG3, 8, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_pwm_ef, CLKCTRL_SYS_CLK_EN0_REG3, 9, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_pwm_gh, CLKCTRL_SYS_CLK_EN0_REG3, 10, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_pwm_ij, CLKCTRL_SYS_CLK_EN0_REG3, 11, CLK_IGNORE_UNUSED);
/* Array of all clocks provided by this provider */
-static struct clk_hw *s4_periphs_hw_clks[] = {
+static struct clk_hw *s4_peripherals_hw_clks[] = {
[CLKID_RTC_32K_CLKIN] = &s4_rtc_32k_by_oscin_clkin.hw,
[CLKID_RTC_32K_DIV] = &s4_rtc_32k_by_oscin_div.hw,
[CLKID_RTC_32K_SEL] = &s4_rtc_32k_by_oscin_sel.hw,
@@ -3318,12 +2826,12 @@ static struct clk_hw *s4_periphs_hw_clks[] = {
[CLKID_CECB_32K_SEL_PRE] = &s4_cecb_32k_sel_pre.hw,
[CLKID_CECB_32K_SEL] = &s4_cecb_32k_sel.hw,
[CLKID_CECB_32K_CLKOUT] = &s4_cecb_32k_clkout.hw,
- [CLKID_SC_CLK_SEL] = &s4_sc_clk_mux.hw,
+ [CLKID_SC_CLK_SEL] = &s4_sc_clk_sel.hw,
[CLKID_SC_CLK_DIV] = &s4_sc_clk_div.hw,
- [CLKID_SC] = &s4_sc_clk_gate.hw,
- [CLKID_12_24M] = &s4_12_24M_clk_gate.hw,
- [CLKID_12M_CLK_DIV] = &s4_12M_clk_div.hw,
- [CLKID_12_24M_CLK_SEL] = &s4_12_24M_clk.hw,
+ [CLKID_SC] = &s4_sc_clk.hw,
+ [CLKID_12_24M] = &s4_12_24M.hw,
+ [CLKID_12M_CLK_DIV] = &s4_12M_div.hw,
+ [CLKID_12_24M_CLK_SEL] = &s4_12_24M_sel.hw,
[CLKID_VID_PLL_DIV] = &s4_vid_pll_div.hw,
[CLKID_VID_PLL_SEL] = &s4_vid_pll_sel.hw,
[CLKID_VID_PLL] = &s4_vid_pll.hw,
@@ -3365,28 +2873,28 @@ static struct clk_hw *s4_periphs_hw_clks[] = {
[CLKID_HDMI_DIV] = &s4_hdmi_div.hw,
[CLKID_HDMI] = &s4_hdmi.hw,
[CLKID_TS_CLK_DIV] = &s4_ts_clk_div.hw,
- [CLKID_TS] = &s4_ts_clk_gate.hw,
+ [CLKID_TS] = &s4_ts_clk.hw,
[CLKID_MALI_0_SEL] = &s4_mali_0_sel.hw,
[CLKID_MALI_0_DIV] = &s4_mali_0_div.hw,
[CLKID_MALI_0] = &s4_mali_0.hw,
[CLKID_MALI_1_SEL] = &s4_mali_1_sel.hw,
[CLKID_MALI_1_DIV] = &s4_mali_1_div.hw,
[CLKID_MALI_1] = &s4_mali_1.hw,
- [CLKID_MALI_SEL] = &s4_mali_mux.hw,
- [CLKID_VDEC_P0_SEL] = &s4_vdec_p0_mux.hw,
+ [CLKID_MALI_SEL] = &s4_mali_sel.hw,
+ [CLKID_VDEC_P0_SEL] = &s4_vdec_p0_sel.hw,
[CLKID_VDEC_P0_DIV] = &s4_vdec_p0_div.hw,
[CLKID_VDEC_P0] = &s4_vdec_p0.hw,
- [CLKID_VDEC_P1_SEL] = &s4_vdec_p1_mux.hw,
+ [CLKID_VDEC_P1_SEL] = &s4_vdec_p1_sel.hw,
[CLKID_VDEC_P1_DIV] = &s4_vdec_p1_div.hw,
[CLKID_VDEC_P1] = &s4_vdec_p1.hw,
- [CLKID_VDEC_SEL] = &s4_vdec_mux.hw,
- [CLKID_HEVCF_P0_SEL] = &s4_hevcf_p0_mux.hw,
+ [CLKID_VDEC_SEL] = &s4_vdec_sel.hw,
+ [CLKID_HEVCF_P0_SEL] = &s4_hevcf_p0_sel.hw,
[CLKID_HEVCF_P0_DIV] = &s4_hevcf_p0_div.hw,
[CLKID_HEVCF_P0] = &s4_hevcf_p0.hw,
- [CLKID_HEVCF_P1_SEL] = &s4_hevcf_p1_mux.hw,
+ [CLKID_HEVCF_P1_SEL] = &s4_hevcf_p1_sel.hw,
[CLKID_HEVCF_P1_DIV] = &s4_hevcf_p1_div.hw,
[CLKID_HEVCF_P1] = &s4_hevcf_p1.hw,
- [CLKID_HEVCF_SEL] = &s4_hevcf_mux.hw,
+ [CLKID_HEVCF_SEL] = &s4_hevcf_sel.hw,
[CLKID_VPU_0_SEL] = &s4_vpu_0_sel.hw,
[CLKID_VPU_0_DIV] = &s4_vpu_0_div.hw,
[CLKID_VPU_0] = &s4_vpu_0.hw,
@@ -3394,18 +2902,18 @@ static struct clk_hw *s4_periphs_hw_clks[] = {
[CLKID_VPU_1_DIV] = &s4_vpu_1_div.hw,
[CLKID_VPU_1] = &s4_vpu_1.hw,
[CLKID_VPU] = &s4_vpu.hw,
- [CLKID_VPU_CLKB_TMP_SEL] = &s4_vpu_clkb_tmp_mux.hw,
+ [CLKID_VPU_CLKB_TMP_SEL] = &s4_vpu_clkb_tmp_sel.hw,
[CLKID_VPU_CLKB_TMP_DIV] = &s4_vpu_clkb_tmp_div.hw,
[CLKID_VPU_CLKB_TMP] = &s4_vpu_clkb_tmp.hw,
[CLKID_VPU_CLKB_DIV] = &s4_vpu_clkb_div.hw,
[CLKID_VPU_CLKB] = &s4_vpu_clkb.hw,
- [CLKID_VPU_CLKC_P0_SEL] = &s4_vpu_clkc_p0_mux.hw,
+ [CLKID_VPU_CLKC_P0_SEL] = &s4_vpu_clkc_p0_sel.hw,
[CLKID_VPU_CLKC_P0_DIV] = &s4_vpu_clkc_p0_div.hw,
[CLKID_VPU_CLKC_P0] = &s4_vpu_clkc_p0.hw,
- [CLKID_VPU_CLKC_P1_SEL] = &s4_vpu_clkc_p1_mux.hw,
+ [CLKID_VPU_CLKC_P1_SEL] = &s4_vpu_clkc_p1_sel.hw,
[CLKID_VPU_CLKC_P1_DIV] = &s4_vpu_clkc_p1_div.hw,
[CLKID_VPU_CLKC_P1] = &s4_vpu_clkc_p1.hw,
- [CLKID_VPU_CLKC_SEL] = &s4_vpu_clkc_mux.hw,
+ [CLKID_VPU_CLKC_SEL] = &s4_vpu_clkc_sel.hw,
[CLKID_VAPB_0_SEL] = &s4_vapb_0_sel.hw,
[CLKID_VAPB_0_DIV] = &s4_vapb_0_div.hw,
[CLKID_VAPB_0] = &s4_vapb_0.hw,
@@ -3413,10 +2921,10 @@ static struct clk_hw *s4_periphs_hw_clks[] = {
[CLKID_VAPB_1_DIV] = &s4_vapb_1_div.hw,
[CLKID_VAPB_1] = &s4_vapb_1.hw,
[CLKID_VAPB] = &s4_vapb.hw,
- [CLKID_GE2D] = &s4_ge2d_gate.hw,
- [CLKID_VDIN_MEAS_SEL] = &s4_vdin_meas_mux.hw,
+ [CLKID_GE2D] = &s4_ge2d.hw,
+ [CLKID_VDIN_MEAS_SEL] = &s4_vdin_meas_sel.hw,
[CLKID_VDIN_MEAS_DIV] = &s4_vdin_meas_div.hw,
- [CLKID_VDIN_MEAS] = &s4_vdin_meas_gate.hw,
+ [CLKID_VDIN_MEAS] = &s4_vdin_meas.hw,
[CLKID_SD_EMMC_C_CLK_SEL] = &s4_sd_emmc_c_clk0_sel.hw,
[CLKID_SD_EMMC_C_CLK_DIV] = &s4_sd_emmc_c_clk0_div.hw,
[CLKID_SD_EMMC_C] = &s4_sd_emmc_c_clk0.hw,
@@ -3426,42 +2934,42 @@ static struct clk_hw *s4_periphs_hw_clks[] = {
[CLKID_SD_EMMC_B_CLK_SEL] = &s4_sd_emmc_b_clk0_sel.hw,
[CLKID_SD_EMMC_B_CLK_DIV] = &s4_sd_emmc_b_clk0_div.hw,
[CLKID_SD_EMMC_B] = &s4_sd_emmc_b_clk0.hw,
- [CLKID_SPICC0_SEL] = &s4_spicc0_mux.hw,
+ [CLKID_SPICC0_SEL] = &s4_spicc0_sel.hw,
[CLKID_SPICC0_DIV] = &s4_spicc0_div.hw,
- [CLKID_SPICC0_EN] = &s4_spicc0_gate.hw,
- [CLKID_PWM_A_SEL] = &s4_pwm_a_mux.hw,
+ [CLKID_SPICC0_EN] = &s4_spicc0_en.hw,
+ [CLKID_PWM_A_SEL] = &s4_pwm_a_sel.hw,
[CLKID_PWM_A_DIV] = &s4_pwm_a_div.hw,
- [CLKID_PWM_A] = &s4_pwm_a_gate.hw,
- [CLKID_PWM_B_SEL] = &s4_pwm_b_mux.hw,
+ [CLKID_PWM_A] = &s4_pwm_a.hw,
+ [CLKID_PWM_B_SEL] = &s4_pwm_b_sel.hw,
[CLKID_PWM_B_DIV] = &s4_pwm_b_div.hw,
- [CLKID_PWM_B] = &s4_pwm_b_gate.hw,
- [CLKID_PWM_C_SEL] = &s4_pwm_c_mux.hw,
+ [CLKID_PWM_B] = &s4_pwm_b.hw,
+ [CLKID_PWM_C_SEL] = &s4_pwm_c_sel.hw,
[CLKID_PWM_C_DIV] = &s4_pwm_c_div.hw,
- [CLKID_PWM_C] = &s4_pwm_c_gate.hw,
- [CLKID_PWM_D_SEL] = &s4_pwm_d_mux.hw,
+ [CLKID_PWM_C] = &s4_pwm_c.hw,
+ [CLKID_PWM_D_SEL] = &s4_pwm_d_sel.hw,
[CLKID_PWM_D_DIV] = &s4_pwm_d_div.hw,
- [CLKID_PWM_D] = &s4_pwm_d_gate.hw,
- [CLKID_PWM_E_SEL] = &s4_pwm_e_mux.hw,
+ [CLKID_PWM_D] = &s4_pwm_d.hw,
+ [CLKID_PWM_E_SEL] = &s4_pwm_e_sel.hw,
[CLKID_PWM_E_DIV] = &s4_pwm_e_div.hw,
- [CLKID_PWM_E] = &s4_pwm_e_gate.hw,
- [CLKID_PWM_F_SEL] = &s4_pwm_f_mux.hw,
+ [CLKID_PWM_E] = &s4_pwm_e.hw,
+ [CLKID_PWM_F_SEL] = &s4_pwm_f_sel.hw,
[CLKID_PWM_F_DIV] = &s4_pwm_f_div.hw,
- [CLKID_PWM_F] = &s4_pwm_f_gate.hw,
- [CLKID_PWM_G_SEL] = &s4_pwm_g_mux.hw,
+ [CLKID_PWM_F] = &s4_pwm_f.hw,
+ [CLKID_PWM_G_SEL] = &s4_pwm_g_sel.hw,
[CLKID_PWM_G_DIV] = &s4_pwm_g_div.hw,
- [CLKID_PWM_G] = &s4_pwm_g_gate.hw,
- [CLKID_PWM_H_SEL] = &s4_pwm_h_mux.hw,
+ [CLKID_PWM_G] = &s4_pwm_g.hw,
+ [CLKID_PWM_H_SEL] = &s4_pwm_h_sel.hw,
[CLKID_PWM_H_DIV] = &s4_pwm_h_div.hw,
- [CLKID_PWM_H] = &s4_pwm_h_gate.hw,
- [CLKID_PWM_I_SEL] = &s4_pwm_i_mux.hw,
+ [CLKID_PWM_H] = &s4_pwm_h.hw,
+ [CLKID_PWM_I_SEL] = &s4_pwm_i_sel.hw,
[CLKID_PWM_I_DIV] = &s4_pwm_i_div.hw,
- [CLKID_PWM_I] = &s4_pwm_i_gate.hw,
- [CLKID_PWM_J_SEL] = &s4_pwm_j_mux.hw,
+ [CLKID_PWM_I] = &s4_pwm_i.hw,
+ [CLKID_PWM_J_SEL] = &s4_pwm_j_sel.hw,
[CLKID_PWM_J_DIV] = &s4_pwm_j_div.hw,
- [CLKID_PWM_J] = &s4_pwm_j_gate.hw,
- [CLKID_SARADC_SEL] = &s4_saradc_mux.hw,
+ [CLKID_PWM_J] = &s4_pwm_j.hw,
+ [CLKID_SARADC_SEL] = &s4_saradc_sel.hw,
[CLKID_SARADC_DIV] = &s4_saradc_div.hw,
- [CLKID_SARADC] = &s4_saradc_gate.hw,
+ [CLKID_SARADC] = &s4_saradc.hw,
[CLKID_GEN_SEL] = &s4_gen_clk_sel.hw,
[CLKID_GEN_DIV] = &s4_gen_clk_div.hw,
[CLKID_GEN] = &s4_gen_clk.hw,
@@ -3514,302 +3022,38 @@ static struct clk_hw *s4_periphs_hw_clks[] = {
[CLKID_PWM_EF] = &s4_pwm_ef.hw,
[CLKID_PWM_GH] = &s4_pwm_gh.hw,
[CLKID_PWM_IJ] = &s4_pwm_ij.hw,
- [CLKID_HDCP22_ESMCLK_SEL] = &s4_hdcp22_esmclk_mux.hw,
+ [CLKID_HDCP22_ESMCLK_SEL] = &s4_hdcp22_esmclk_sel.hw,
[CLKID_HDCP22_ESMCLK_DIV] = &s4_hdcp22_esmclk_div.hw,
- [CLKID_HDCP22_ESMCLK] = &s4_hdcp22_esmclk_gate.hw,
- [CLKID_HDCP22_SKPCLK_SEL] = &s4_hdcp22_skpclk_mux.hw,
+ [CLKID_HDCP22_ESMCLK] = &s4_hdcp22_esmclk.hw,
+ [CLKID_HDCP22_SKPCLK_SEL] = &s4_hdcp22_skpclk_sel.hw,
[CLKID_HDCP22_SKPCLK_DIV] = &s4_hdcp22_skpclk_div.hw,
- [CLKID_HDCP22_SKPCLK] = &s4_hdcp22_skpclk_gate.hw,
-};
-
-/* Convenience table to populate regmap in .probe */
-static struct clk_regmap *const s4_periphs_clk_regmaps[] = {
- &s4_rtc_32k_by_oscin_clkin,
- &s4_rtc_32k_by_oscin_div,
- &s4_rtc_32k_by_oscin_sel,
- &s4_rtc_32k_by_oscin,
- &s4_rtc_clk,
- &s4_sysclk_b_sel,
- &s4_sysclk_b_div,
- &s4_sysclk_b,
- &s4_sysclk_a_sel,
- &s4_sysclk_a_div,
- &s4_sysclk_a,
- &s4_sys_clk,
- &s4_ceca_32k_clkin,
- &s4_ceca_32k_div,
- &s4_ceca_32k_sel_pre,
- &s4_ceca_32k_sel,
- &s4_ceca_32k_clkout,
- &s4_cecb_32k_clkin,
- &s4_cecb_32k_div,
- &s4_cecb_32k_sel_pre,
- &s4_cecb_32k_sel,
- &s4_cecb_32k_clkout,
- &s4_sc_clk_mux,
- &s4_sc_clk_div,
- &s4_sc_clk_gate,
- &s4_12_24M_clk_gate,
- &s4_12_24M_clk,
- &s4_vid_pll_div,
- &s4_vid_pll_sel,
- &s4_vid_pll,
- &s4_vclk_sel,
- &s4_vclk2_sel,
- &s4_vclk_input,
- &s4_vclk2_input,
- &s4_vclk_div,
- &s4_vclk2_div,
- &s4_vclk,
- &s4_vclk2,
- &s4_vclk_div1,
- &s4_vclk_div2_en,
- &s4_vclk_div4_en,
- &s4_vclk_div6_en,
- &s4_vclk_div12_en,
- &s4_vclk2_div1,
- &s4_vclk2_div2_en,
- &s4_vclk2_div4_en,
- &s4_vclk2_div6_en,
- &s4_vclk2_div12_en,
- &s4_cts_enci_sel,
- &s4_cts_encp_sel,
- &s4_cts_vdac_sel,
- &s4_hdmi_tx_sel,
- &s4_cts_enci,
- &s4_cts_encp,
- &s4_cts_vdac,
- &s4_hdmi_tx,
- &s4_hdmi_sel,
- &s4_hdmi_div,
- &s4_hdmi,
- &s4_ts_clk_div,
- &s4_ts_clk_gate,
- &s4_mali_0_sel,
- &s4_mali_0_div,
- &s4_mali_0,
- &s4_mali_1_sel,
- &s4_mali_1_div,
- &s4_mali_1,
- &s4_mali_mux,
- &s4_vdec_p0_mux,
- &s4_vdec_p0_div,
- &s4_vdec_p0,
- &s4_vdec_p1_mux,
- &s4_vdec_p1_div,
- &s4_vdec_p1,
- &s4_vdec_mux,
- &s4_hevcf_p0_mux,
- &s4_hevcf_p0_div,
- &s4_hevcf_p0,
- &s4_hevcf_p1_mux,
- &s4_hevcf_p1_div,
- &s4_hevcf_p1,
- &s4_hevcf_mux,
- &s4_vpu_0_sel,
- &s4_vpu_0_div,
- &s4_vpu_0,
- &s4_vpu_1_sel,
- &s4_vpu_1_div,
- &s4_vpu_1,
- &s4_vpu,
- &s4_vpu_clkb_tmp_mux,
- &s4_vpu_clkb_tmp_div,
- &s4_vpu_clkb_tmp,
- &s4_vpu_clkb_div,
- &s4_vpu_clkb,
- &s4_vpu_clkc_p0_mux,
- &s4_vpu_clkc_p0_div,
- &s4_vpu_clkc_p0,
- &s4_vpu_clkc_p1_mux,
- &s4_vpu_clkc_p1_div,
- &s4_vpu_clkc_p1,
- &s4_vpu_clkc_mux,
- &s4_vapb_0_sel,
- &s4_vapb_0_div,
- &s4_vapb_0,
- &s4_vapb_1_sel,
- &s4_vapb_1_div,
- &s4_vapb_1,
- &s4_vapb,
- &s4_ge2d_gate,
- &s4_hdcp22_esmclk_mux,
- &s4_hdcp22_esmclk_div,
- &s4_hdcp22_esmclk_gate,
- &s4_hdcp22_skpclk_mux,
- &s4_hdcp22_skpclk_div,
- &s4_hdcp22_skpclk_gate,
- &s4_vdin_meas_mux,
- &s4_vdin_meas_div,
- &s4_vdin_meas_gate,
- &s4_sd_emmc_c_clk0_sel,
- &s4_sd_emmc_c_clk0_div,
- &s4_sd_emmc_c_clk0,
- &s4_sd_emmc_a_clk0_sel,
- &s4_sd_emmc_a_clk0_div,
- &s4_sd_emmc_a_clk0,
- &s4_sd_emmc_b_clk0_sel,
- &s4_sd_emmc_b_clk0_div,
- &s4_sd_emmc_b_clk0,
- &s4_spicc0_mux,
- &s4_spicc0_div,
- &s4_spicc0_gate,
- &s4_pwm_a_mux,
- &s4_pwm_a_div,
- &s4_pwm_a_gate,
- &s4_pwm_b_mux,
- &s4_pwm_b_div,
- &s4_pwm_b_gate,
- &s4_pwm_c_mux,
- &s4_pwm_c_div,
- &s4_pwm_c_gate,
- &s4_pwm_d_mux,
- &s4_pwm_d_div,
- &s4_pwm_d_gate,
- &s4_pwm_e_mux,
- &s4_pwm_e_div,
- &s4_pwm_e_gate,
- &s4_pwm_f_mux,
- &s4_pwm_f_div,
- &s4_pwm_f_gate,
- &s4_pwm_g_mux,
- &s4_pwm_g_div,
- &s4_pwm_g_gate,
- &s4_pwm_h_mux,
- &s4_pwm_h_div,
- &s4_pwm_h_gate,
- &s4_pwm_i_mux,
- &s4_pwm_i_div,
- &s4_pwm_i_gate,
- &s4_pwm_j_mux,
- &s4_pwm_j_div,
- &s4_pwm_j_gate,
- &s4_saradc_mux,
- &s4_saradc_div,
- &s4_saradc_gate,
- &s4_gen_clk_sel,
- &s4_gen_clk_div,
- &s4_gen_clk,
- &s4_ddr,
- &s4_dos,
- &s4_ethphy,
- &s4_mali,
- &s4_aocpu,
- &s4_aucpu,
- &s4_cec,
- &s4_sdemmca,
- &s4_sdemmcb,
- &s4_nand,
- &s4_smartcard,
- &s4_acodec,
- &s4_spifc,
- &s4_msr_clk,
- &s4_ir_ctrl,
- &s4_audio,
- &s4_eth,
- &s4_uart_a,
- &s4_uart_b,
- &s4_uart_c,
- &s4_uart_d,
- &s4_uart_e,
- &s4_aififo,
- &s4_ts_ddr,
- &s4_ts_pll,
- &s4_g2d,
- &s4_spicc0,
- &s4_usb,
- &s4_i2c_m_a,
- &s4_i2c_m_b,
- &s4_i2c_m_c,
- &s4_i2c_m_d,
- &s4_i2c_m_e,
- &s4_hdmitx_apb,
- &s4_i2c_s_a,
- &s4_usb1_to_ddr,
- &s4_hdcp22,
- &s4_mmc_apb,
- &s4_rsa,
- &s4_cpu_debug,
- &s4_vpu_intr,
- &s4_demod,
- &s4_sar_adc,
- &s4_gic,
- &s4_pwm_ab,
- &s4_pwm_cd,
- &s4_pwm_ef,
- &s4_pwm_gh,
- &s4_pwm_ij,
- &s4_demod_core_clk_mux,
- &s4_demod_core_clk_div,
- &s4_demod_core_clk_gate,
- &s4_adc_extclk_in_mux,
- &s4_adc_extclk_in_div,
- &s4_adc_extclk_in_gate,
-};
-
-static const struct regmap_config clkc_regmap_config = {
- .reg_bits = 32,
- .val_bits = 32,
- .reg_stride = 4,
- .max_register = CLKCTRL_DEMOD_CLK_CTRL,
-};
-
-static struct meson_clk_hw_data s4_periphs_clks = {
- .hws = s4_periphs_hw_clks,
- .num = ARRAY_SIZE(s4_periphs_hw_clks),
-};
-
-static int meson_s4_periphs_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct regmap *regmap;
- void __iomem *base;
- int ret, i;
-
- base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(base))
- return dev_err_probe(dev, PTR_ERR(base),
- "can't ioremap resource\n");
-
- regmap = devm_regmap_init_mmio(dev, base, &clkc_regmap_config);
- if (IS_ERR(regmap))
- return dev_err_probe(dev, PTR_ERR(regmap),
- "can't init regmap mmio region\n");
-
- /* Populate regmap for the regmap backed clocks */
- for (i = 0; i < ARRAY_SIZE(s4_periphs_clk_regmaps); i++)
- s4_periphs_clk_regmaps[i]->map = regmap;
-
- for (i = 0; i < s4_periphs_clks.num; i++) {
- /* array might be sparse */
- if (!s4_periphs_clks.hws[i])
- continue;
-
- ret = devm_clk_hw_register(dev, s4_periphs_clks.hws[i]);
- if (ret)
- return dev_err_probe(dev, ret,
- "clock[%d] registration failed\n", i);
- }
-
- return devm_of_clk_add_hw_provider(dev, meson_clk_hw_get, &s4_periphs_clks);
-}
-
-static const struct of_device_id clkc_match_table[] = {
+ [CLKID_HDCP22_SKPCLK] = &s4_hdcp22_skpclk.hw,
+};
+
+static const struct meson_clkc_data s4_peripherals_clkc_data = {
+ .hw_clks = {
+ .hws = s4_peripherals_hw_clks,
+ .num = ARRAY_SIZE(s4_peripherals_hw_clks),
+ },
+};
+
+static const struct of_device_id s4_peripherals_clkc_match_table[] = {
{
.compatible = "amlogic,s4-peripherals-clkc",
+ .data = &s4_peripherals_clkc_data,
},
{}
};
-MODULE_DEVICE_TABLE(of, clkc_match_table);
+MODULE_DEVICE_TABLE(of, s4_peripherals_clkc_match_table);
-static struct platform_driver s4_driver = {
- .probe = meson_s4_periphs_probe,
+static struct platform_driver s4_peripherals_clkc_driver = {
+ .probe = meson_clkc_mmio_probe,
.driver = {
- .name = "s4-periphs-clkc",
- .of_match_table = clkc_match_table,
+ .name = "s4-peripherals-clkc",
+ .of_match_table = s4_peripherals_clkc_match_table,
},
};
-module_platform_driver(s4_driver);
+module_platform_driver(s4_peripherals_clkc_driver);
MODULE_DESCRIPTION("Amlogic S4 Peripherals Clock Controller driver");
MODULE_AUTHOR("Yu Tu <yu.tu@amlogic.com>");
diff --git a/drivers/clk/meson/s4-peripherals.h b/drivers/clk/meson/s4-peripherals.h
deleted file mode 100644
index 1e298713c2b2..000000000000
--- a/drivers/clk/meson/s4-peripherals.h
+++ /dev/null
@@ -1,56 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
-/*
- * Copyright (c) 2022-2023 Amlogic, inc. All rights reserved
- * Author: Yu Tu <yu.tu@amlogic.com>
- */
-
-#ifndef __MESON_S4_PERIPHERALS_H__
-#define __MESON_S4_PERIPHERALS_H__
-
-#define CLKCTRL_RTC_BY_OSCIN_CTRL0 0x008
-#define CLKCTRL_RTC_BY_OSCIN_CTRL1 0x00c
-#define CLKCTRL_RTC_CTRL 0x010
-#define CLKCTRL_SYS_CLK_CTRL0 0x040
-#define CLKCTRL_SYS_CLK_EN0_REG0 0x044
-#define CLKCTRL_SYS_CLK_EN0_REG1 0x048
-#define CLKCTRL_SYS_CLK_EN0_REG2 0x04c
-#define CLKCTRL_SYS_CLK_EN0_REG3 0x050
-#define CLKCTRL_CECA_CTRL0 0x088
-#define CLKCTRL_CECA_CTRL1 0x08c
-#define CLKCTRL_CECB_CTRL0 0x090
-#define CLKCTRL_CECB_CTRL1 0x094
-#define CLKCTRL_SC_CLK_CTRL 0x098
-#define CLKCTRL_CLK12_24_CTRL 0x0a8
-#define CLKCTRL_VID_CLK_CTRL 0x0c0
-#define CLKCTRL_VID_CLK_CTRL2 0x0c4
-#define CLKCTRL_VID_CLK_DIV 0x0c8
-#define CLKCTRL_VIID_CLK_DIV 0x0cc
-#define CLKCTRL_VIID_CLK_CTRL 0x0d0
-#define CLKCTRL_HDMI_CLK_CTRL 0x0e0
-#define CLKCTRL_VID_PLL_CLK_DIV 0x0e4
-#define CLKCTRL_VPU_CLK_CTRL 0x0e8
-#define CLKCTRL_VPU_CLKB_CTRL 0x0ec
-#define CLKCTRL_VPU_CLKC_CTRL 0x0f0
-#define CLKCTRL_VID_LOCK_CLK_CTRL 0x0f4
-#define CLKCTRL_VDIN_MEAS_CLK_CTRL 0x0f8
-#define CLKCTRL_VAPBCLK_CTRL 0x0fc
-#define CLKCTRL_HDCP22_CTRL 0x100
-#define CLKCTRL_VDEC_CLK_CTRL 0x140
-#define CLKCTRL_VDEC2_CLK_CTRL 0x144
-#define CLKCTRL_VDEC3_CLK_CTRL 0x148
-#define CLKCTRL_VDEC4_CLK_CTRL 0x14c
-#define CLKCTRL_TS_CLK_CTRL 0x158
-#define CLKCTRL_MALI_CLK_CTRL 0x15c
-#define CLKCTRL_NAND_CLK_CTRL 0x168
-#define CLKCTRL_SD_EMMC_CLK_CTRL 0x16c
-#define CLKCTRL_SPICC_CLK_CTRL 0x174
-#define CLKCTRL_GEN_CLK_CTRL 0x178
-#define CLKCTRL_SAR_CLK_CTRL 0x17c
-#define CLKCTRL_PWM_CLK_AB_CTRL 0x180
-#define CLKCTRL_PWM_CLK_CD_CTRL 0x184
-#define CLKCTRL_PWM_CLK_EF_CTRL 0x188
-#define CLKCTRL_PWM_CLK_GH_CTRL 0x18c
-#define CLKCTRL_PWM_CLK_IJ_CTRL 0x190
-#define CLKCTRL_DEMOD_CLK_CTRL 0x200
-
-#endif /* __MESON_S4_PERIPHERALS_H__ */
diff --git a/drivers/clk/meson/s4-pll.c b/drivers/clk/meson/s4-pll.c
index f9cc05a506e3..56ce6f566e53 100644
--- a/drivers/clk/meson/s4-pll.c
+++ b/drivers/clk/meson/s4-pll.c
@@ -13,10 +13,37 @@
#include "clk-mpll.h"
#include "clk-pll.h"
#include "clk-regmap.h"
-#include "s4-pll.h"
#include "meson-clkc-utils.h"
#include <dt-bindings/clock/amlogic,s4-pll-clkc.h>
+#define ANACTRL_FIXPLL_CTRL0 0x040
+#define ANACTRL_FIXPLL_CTRL1 0x044
+#define ANACTRL_FIXPLL_CTRL3 0x04c
+#define ANACTRL_GP0PLL_CTRL0 0x080
+#define ANACTRL_GP0PLL_CTRL1 0x084
+#define ANACTRL_GP0PLL_CTRL2 0x088
+#define ANACTRL_GP0PLL_CTRL3 0x08c
+#define ANACTRL_GP0PLL_CTRL4 0x090
+#define ANACTRL_GP0PLL_CTRL5 0x094
+#define ANACTRL_GP0PLL_CTRL6 0x098
+#define ANACTRL_HIFIPLL_CTRL0 0x100
+#define ANACTRL_HIFIPLL_CTRL1 0x104
+#define ANACTRL_HIFIPLL_CTRL2 0x108
+#define ANACTRL_HIFIPLL_CTRL3 0x10c
+#define ANACTRL_HIFIPLL_CTRL4 0x110
+#define ANACTRL_HIFIPLL_CTRL5 0x114
+#define ANACTRL_HIFIPLL_CTRL6 0x118
+#define ANACTRL_MPLL_CTRL0 0x180
+#define ANACTRL_MPLL_CTRL1 0x184
+#define ANACTRL_MPLL_CTRL2 0x188
+#define ANACTRL_MPLL_CTRL3 0x18c
+#define ANACTRL_MPLL_CTRL4 0x190
+#define ANACTRL_MPLL_CTRL5 0x194
+#define ANACTRL_MPLL_CTRL6 0x198
+#define ANACTRL_MPLL_CTRL7 0x19c
+#define ANACTRL_MPLL_CTRL8 0x1a0
+#define ANACTRL_HDMIPLL_CTRL0 0x1c0
+
/*
* These clock are a fixed value (fixed_pll is 2GHz) that is initialized by ROMcode.
* The chip was changed fixed pll for security reasons. Fixed PLL registers are not writable
@@ -254,7 +281,7 @@ static const struct pll_mult_range s4_gp0_pll_mult_range = {
/*
* Internal gp0 pll emulation configuration parameters
*/
-static const struct reg_sequence s4_gp0_init_regs[] = {
+static const struct reg_sequence s4_gp0_pll_init_regs[] = {
{ .reg = ANACTRL_GP0PLL_CTRL1, .def = 0x00000000 },
{ .reg = ANACTRL_GP0PLL_CTRL2, .def = 0x00000000 },
{ .reg = ANACTRL_GP0PLL_CTRL3, .def = 0x48681c00 },
@@ -291,8 +318,8 @@ static struct clk_regmap s4_gp0_pll_dco = {
.width = 1,
},
.range = &s4_gp0_pll_mult_range,
- .init_regs = s4_gp0_init_regs,
- .init_count = ARRAY_SIZE(s4_gp0_init_regs),
+ .init_regs = s4_gp0_pll_init_regs,
+ .init_count = ARRAY_SIZE(s4_gp0_pll_init_regs),
},
.hw.init = &(struct clk_init_data){
.name = "gp0_pll_dco",
@@ -326,7 +353,7 @@ static struct clk_regmap s4_gp0_pll = {
/*
* Internal hifi pll emulation configuration parameters
*/
-static const struct reg_sequence s4_hifi_init_regs[] = {
+static const struct reg_sequence s4_hifi_pll_init_regs[] = {
{ .reg = ANACTRL_HIFIPLL_CTRL2, .def = 0x00000000 },
{ .reg = ANACTRL_HIFIPLL_CTRL3, .def = 0x6a285c00 },
{ .reg = ANACTRL_HIFIPLL_CTRL4, .def = 0x65771290 },
@@ -367,8 +394,8 @@ static struct clk_regmap s4_hifi_pll_dco = {
.width = 1,
},
.range = &s4_gp0_pll_mult_range,
- .init_regs = s4_hifi_init_regs,
- .init_count = ARRAY_SIZE(s4_hifi_init_regs),
+ .init_regs = s4_hifi_pll_init_regs,
+ .init_count = ARRAY_SIZE(s4_hifi_pll_init_regs),
.frac_max = 100000,
.flags = CLK_MESON_PLL_ROUND_CLOSEST,
},
@@ -767,107 +794,36 @@ static struct clk_hw *s4_pll_hw_clks[] = {
[CLKID_MPLL3] = &s4_mpll3.hw,
};
-static struct clk_regmap *const s4_pll_clk_regmaps[] = {
- &s4_fixed_pll_dco,
- &s4_fixed_pll,
- &s4_fclk_div2,
- &s4_fclk_div3,
- &s4_fclk_div4,
- &s4_fclk_div5,
- &s4_fclk_div7,
- &s4_fclk_div2p5,
- &s4_gp0_pll_dco,
- &s4_gp0_pll,
- &s4_hifi_pll_dco,
- &s4_hifi_pll,
- &s4_hdmi_pll_dco,
- &s4_hdmi_pll_od,
- &s4_hdmi_pll,
- &s4_mpll_50m,
- &s4_mpll0_div,
- &s4_mpll0,
- &s4_mpll1_div,
- &s4_mpll1,
- &s4_mpll2_div,
- &s4_mpll2,
- &s4_mpll3_div,
- &s4_mpll3,
-};
-
-static const struct reg_sequence s4_init_regs[] = {
+static const struct reg_sequence s4_pll_init_regs[] = {
{ .reg = ANACTRL_MPLL_CTRL0, .def = 0x00000543 },
};
-static const struct regmap_config clkc_regmap_config = {
- .reg_bits = 32,
- .val_bits = 32,
- .reg_stride = 4,
- .max_register = ANACTRL_HDMIPLL_CTRL0,
-};
-
-static struct meson_clk_hw_data s4_pll_clks = {
- .hws = s4_pll_hw_clks,
- .num = ARRAY_SIZE(s4_pll_hw_clks),
+static const struct meson_clkc_data s4_pll_clkc_data = {
+ .hw_clks = {
+ .hws = s4_pll_hw_clks,
+ .num = ARRAY_SIZE(s4_pll_hw_clks),
+ },
+ .init_regs = s4_pll_init_regs,
+ .init_count = ARRAY_SIZE(s4_pll_init_regs),
};
-static int meson_s4_pll_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct regmap *regmap;
- void __iomem *base;
- int ret, i;
-
- base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(base))
- return dev_err_probe(dev, PTR_ERR(base),
- "can't ioremap resource\n");
-
- regmap = devm_regmap_init_mmio(dev, base, &clkc_regmap_config);
- if (IS_ERR(regmap))
- return dev_err_probe(dev, PTR_ERR(regmap),
- "can't init regmap mmio region\n");
-
- ret = regmap_multi_reg_write(regmap, s4_init_regs, ARRAY_SIZE(s4_init_regs));
- if (ret)
- return dev_err_probe(dev, ret,
- "Failed to init registers\n");
-
- /* Populate regmap for the regmap backed clocks */
- for (i = 0; i < ARRAY_SIZE(s4_pll_clk_regmaps); i++)
- s4_pll_clk_regmaps[i]->map = regmap;
-
- /* Register clocks */
- for (i = 0; i < s4_pll_clks.num; i++) {
- /* array might be sparse */
- if (!s4_pll_clks.hws[i])
- continue;
-
- ret = devm_clk_hw_register(dev, s4_pll_clks.hws[i]);
- if (ret)
- return dev_err_probe(dev, ret,
- "clock[%d] registration failed\n", i);
- }
-
- return devm_of_clk_add_hw_provider(dev, meson_clk_hw_get,
- &s4_pll_clks);
-}
-
-static const struct of_device_id clkc_match_table[] = {
+static const struct of_device_id s4_pll_clkc_match_table[] = {
{
.compatible = "amlogic,s4-pll-clkc",
+ .data = &s4_pll_clkc_data,
},
{}
};
-MODULE_DEVICE_TABLE(of, clkc_match_table);
+MODULE_DEVICE_TABLE(of, s4_pll_clkc_match_table);
-static struct platform_driver s4_driver = {
- .probe = meson_s4_pll_probe,
+static struct platform_driver s4_pll_clkc_driver = {
+ .probe = meson_clkc_mmio_probe,
.driver = {
.name = "s4-pll-clkc",
- .of_match_table = clkc_match_table,
+ .of_match_table = s4_pll_clkc_match_table,
},
};
-module_platform_driver(s4_driver);
+module_platform_driver(s4_pll_clkc_driver);
MODULE_DESCRIPTION("Amlogic S4 PLL Clock Controller driver");
MODULE_AUTHOR("Yu Tu <yu.tu@amlogic.com>");
diff --git a/drivers/clk/meson/s4-pll.h b/drivers/clk/meson/s4-pll.h
deleted file mode 100644
index ff7d58302f2a..000000000000
--- a/drivers/clk/meson/s4-pll.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
-/*
- * Copyright (c) 2022-2023 Amlogic, inc. All rights reserved
- * Author: Yu Tu <yu.tu@amlogic.com>
- */
-
-#ifndef __MESON_S4_PLL_H__
-#define __MESON_S4_PLL_H__
-
-#define ANACTRL_FIXPLL_CTRL0 0x040
-#define ANACTRL_FIXPLL_CTRL1 0x044
-#define ANACTRL_FIXPLL_CTRL3 0x04c
-#define ANACTRL_GP0PLL_CTRL0 0x080
-#define ANACTRL_GP0PLL_CTRL1 0x084
-#define ANACTRL_GP0PLL_CTRL2 0x088
-#define ANACTRL_GP0PLL_CTRL3 0x08c
-#define ANACTRL_GP0PLL_CTRL4 0x090
-#define ANACTRL_GP0PLL_CTRL5 0x094
-#define ANACTRL_GP0PLL_CTRL6 0x098
-#define ANACTRL_HIFIPLL_CTRL0 0x100
-#define ANACTRL_HIFIPLL_CTRL1 0x104
-#define ANACTRL_HIFIPLL_CTRL2 0x108
-#define ANACTRL_HIFIPLL_CTRL3 0x10c
-#define ANACTRL_HIFIPLL_CTRL4 0x110
-#define ANACTRL_HIFIPLL_CTRL5 0x114
-#define ANACTRL_HIFIPLL_CTRL6 0x118
-#define ANACTRL_MPLL_CTRL0 0x180
-#define ANACTRL_MPLL_CTRL1 0x184
-#define ANACTRL_MPLL_CTRL2 0x188
-#define ANACTRL_MPLL_CTRL3 0x18c
-#define ANACTRL_MPLL_CTRL4 0x190
-#define ANACTRL_MPLL_CTRL5 0x194
-#define ANACTRL_MPLL_CTRL6 0x198
-#define ANACTRL_MPLL_CTRL7 0x19c
-#define ANACTRL_MPLL_CTRL8 0x1a0
-#define ANACTRL_HDMIPLL_CTRL0 0x1c0
-
-#endif /* __MESON_S4_PLL_H__ */
diff --git a/drivers/clk/meson/sclk-div.c b/drivers/clk/meson/sclk-div.c
index 9c4945234f26..4ba3d82810e8 100644
--- a/drivers/clk/meson/sclk-div.c
+++ b/drivers/clk/meson/sclk-div.c
@@ -222,6 +222,11 @@ static int sclk_div_init(struct clk_hw *hw)
struct clk_regmap *clk = to_clk_regmap(hw);
struct meson_sclk_div_data *sclk = meson_sclk_div_data(clk);
unsigned int val;
+ int ret;
+
+ ret = clk_regmap_init(hw);
+ if (ret)
+ return ret;
val = meson_parm_read(clk->map, &sclk->div);
diff --git a/drivers/clk/meson/vclk.c b/drivers/clk/meson/vclk.c
index 6a167ebdc8d7..009bd1193042 100644
--- a/drivers/clk/meson/vclk.c
+++ b/drivers/clk/meson/vclk.c
@@ -45,6 +45,7 @@ static int meson_vclk_gate_is_enabled(struct clk_hw *hw)
}
const struct clk_ops meson_vclk_gate_ops = {
+ .init = clk_regmap_init,
.enable = meson_vclk_gate_enable,
.disable = meson_vclk_gate_disable,
.is_enabled = meson_vclk_gate_is_enabled,
@@ -127,6 +128,7 @@ static int meson_vclk_div_is_enabled(struct clk_hw *hw)
}
const struct clk_ops meson_vclk_div_ops = {
+ .init = clk_regmap_init,
.recalc_rate = meson_vclk_div_recalc_rate,
.determine_rate = meson_vclk_div_determine_rate,
.set_rate = meson_vclk_div_set_rate,
diff --git a/drivers/clk/meson/vid-pll-div.c b/drivers/clk/meson/vid-pll-div.c
index 965ed7281f57..2a3cdbe6d86a 100644
--- a/drivers/clk/meson/vid-pll-div.c
+++ b/drivers/clk/meson/vid-pll-div.c
@@ -90,6 +90,7 @@ static unsigned long meson_vid_pll_div_recalc_rate(struct clk_hw *hw,
}
const struct clk_ops meson_vid_pll_div_ro_ops = {
+ .init = clk_regmap_init,
.recalc_rate = meson_vid_pll_div_recalc_rate,
};
EXPORT_SYMBOL_NS_GPL(meson_vid_pll_div_ro_ops, "CLK_MESON");
diff --git a/drivers/clk/microchip/Kconfig b/drivers/clk/microchip/Kconfig
index 0724ce65898f..1b9e43eb5497 100644
--- a/drivers/clk/microchip/Kconfig
+++ b/drivers/clk/microchip/Kconfig
@@ -7,6 +7,8 @@ config MCHP_CLK_MPFS
bool "Clk driver for PolarFire SoC"
depends on ARCH_MICROCHIP_POLARFIRE || COMPILE_TEST
default ARCH_MICROCHIP_POLARFIRE
+ depends on MFD_SYSCON
select AUXILIARY_BUS
+ select REGMAP_MMIO
help
Supports Clock Configuration for PolarFire SoC
diff --git a/drivers/clk/microchip/clk-core.c b/drivers/clk/microchip/clk-core.c
index 1b4f023cdc8b..b34348d491f3 100644
--- a/drivers/clk/microchip/clk-core.c
+++ b/drivers/clk/microchip/clk-core.c
@@ -155,11 +155,13 @@ static unsigned long pbclk_recalc_rate(struct clk_hw *hw,
return parent_rate / pbclk_read_pbdiv(pb);
}
-static long pbclk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int pbclk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- return calc_best_divided_rate(rate, *parent_rate,
- PB_DIV_MAX, PB_DIV_MIN);
+ req->rate = calc_best_divided_rate(req->rate, req->best_parent_rate,
+ PB_DIV_MAX, PB_DIV_MIN);
+
+ return 0;
}
static int pbclk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -207,7 +209,7 @@ const struct clk_ops pic32_pbclk_ops = {
.disable = pbclk_disable,
.is_enabled = pbclk_is_enabled,
.recalc_rate = pbclk_recalc_rate,
- .round_rate = pbclk_round_rate,
+ .determine_rate = pbclk_determine_rate,
.set_rate = pbclk_set_rate,
};
@@ -326,7 +328,7 @@ static void roclk_calc_div_trim(unsigned long rate,
* i.e. fout = fin / 2 * DIV
* whereas DIV = rodiv + (rotrim / 512)
*
- * Since kernel does not perform floating-point arithmatic so
+ * Since kernel does not perform floating-point arithmetic so
* (rotrim/512) will be zero. And DIV & rodiv will result same.
*
* ie. fout = (fin * 256) / [(512 * rodiv) + rotrim] ... from (1)
@@ -372,18 +374,6 @@ static unsigned long roclk_recalc_rate(struct clk_hw *hw,
return roclk_calc_rate(parent_rate, rodiv, rotrim);
}
-static long roclk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
-{
- u32 rotrim, rodiv;
-
- /* calculate dividers for new rate */
- roclk_calc_div_trim(rate, *parent_rate, &rodiv, &rotrim);
-
- /* caclulate new rate (rounding) based on new rodiv & rotrim */
- return roclk_calc_rate(*parent_rate, rodiv, rotrim);
-}
-
static int roclk_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
@@ -394,6 +384,8 @@ static int roclk_determine_rate(struct clk_hw *hw,
/* find a parent which can generate nearest clkrate >= rate */
for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
+ u32 rotrim, rodiv;
+
/* get parent */
parent_clk = clk_hw_get_parent_by_index(hw, i);
if (!parent_clk)
@@ -404,7 +396,12 @@ static int roclk_determine_rate(struct clk_hw *hw,
if (req->rate > parent_rate)
continue;
- nearest_rate = roclk_round_rate(hw, req->rate, &parent_rate);
+ /* calculate dividers for new rate */
+ roclk_calc_div_trim(req->rate, req->best_parent_rate, &rodiv, &rotrim);
+
+ /* caclulate new rate (rounding) based on new rodiv & rotrim */
+ nearest_rate = roclk_calc_rate(req->best_parent_rate, rodiv, rotrim);
+
delta = abs(nearest_rate - req->rate);
if ((nearest_rate >= req->rate) && (delta < best_delta)) {
best_parent_clk = parent_clk;
@@ -665,12 +662,15 @@ static unsigned long spll_clk_recalc_rate(struct clk_hw *hw,
return rate64;
}
-static long spll_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int spll_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct pic32_sys_pll *pll = clkhw_to_spll(hw);
- return spll_calc_mult_div(pll, rate, *parent_rate, NULL, NULL);
+ req->rate = spll_calc_mult_div(pll, req->rate, req->best_parent_rate,
+ NULL, NULL);
+
+ return 0;
}
static int spll_clk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -725,7 +725,7 @@ static int spll_clk_set_rate(struct clk_hw *hw, unsigned long rate,
/* SPLL clock operation */
const struct clk_ops pic32_spll_ops = {
.recalc_rate = spll_clk_recalc_rate,
- .round_rate = spll_clk_round_rate,
+ .determine_rate = spll_clk_determine_rate,
.set_rate = spll_clk_set_rate,
};
@@ -780,10 +780,13 @@ static unsigned long sclk_get_rate(struct clk_hw *hw, unsigned long parent_rate)
return parent_rate / div;
}
-static long sclk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int sclk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- return calc_best_divided_rate(rate, *parent_rate, SLEW_SYSDIV, 1);
+ req->rate = calc_best_divided_rate(req->rate, req->best_parent_rate,
+ SLEW_SYSDIV, 1);
+
+ return 0;
}
static int sclk_set_rate(struct clk_hw *hw,
@@ -909,7 +912,7 @@ static int sclk_init(struct clk_hw *hw)
const struct clk_ops pic32_sclk_ops = {
.get_parent = sclk_get_parent,
.set_parent = sclk_set_parent,
- .round_rate = sclk_round_rate,
+ .determine_rate = sclk_determine_rate,
.set_rate = sclk_set_rate,
.recalc_rate = sclk_get_rate,
.init = sclk_init,
diff --git a/drivers/clk/microchip/clk-mpfs.c b/drivers/clk/microchip/clk-mpfs.c
index c22632a7439c..ee58304913ef 100644
--- a/drivers/clk/microchip/clk-mpfs.c
+++ b/drivers/clk/microchip/clk-mpfs.c
@@ -4,10 +4,13 @@
*
* Copyright (C) 2020-2022 Microchip Technology Inc. All rights reserved.
*/
+#include <linux/cleanup.h>
#include <linux/clk-provider.h>
#include <linux/io.h>
+#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
#include <dt-bindings/clock/microchip,mpfs-clock.h>
#include <soc/microchip/mpfs.h>
@@ -30,6 +33,14 @@
#define MSSPLL_POSTDIV_WIDTH 0x07u
#define MSSPLL_FIXED_DIV 4u
+static const struct regmap_config mpfs_clk_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .val_format_endian = REGMAP_ENDIAN_LITTLE,
+ .max_register = REG_SUBBLK_RESET_CR,
+};
+
/*
* This clock ID is defined here, rather than the binding headers, as it is an
* internal clock only, and therefore has no consumers in other peripheral
@@ -39,6 +50,7 @@
struct mpfs_clock_data {
struct device *dev;
+ struct regmap *regmap;
void __iomem *base;
void __iomem *msspll_base;
struct clk_hw_onecell_data hw_data;
@@ -67,21 +79,39 @@ struct mpfs_msspll_out_hw_clock {
#define to_mpfs_msspll_out_clk(_hw) container_of(_hw, struct mpfs_msspll_out_hw_clock, hw)
+struct mpfs_cfg_clock {
+ struct regmap *map;
+ const struct clk_div_table *table;
+ u8 map_offset;
+ u8 shift;
+ u8 width;
+ u8 flags;
+};
+
struct mpfs_cfg_hw_clock {
- struct clk_divider cfg;
- struct clk_init_data init;
+ struct clk_hw hw;
+ struct mpfs_cfg_clock cfg;
unsigned int id;
- u32 reg_offset;
+};
+
+#define to_mpfs_cfg_clk(_hw) container_of(_hw, struct mpfs_cfg_hw_clock, hw)
+
+struct mpfs_periph_clock {
+ struct regmap *map;
+ u8 map_offset;
+ u8 shift;
};
struct mpfs_periph_hw_clock {
- struct clk_gate periph;
+ struct clk_hw hw;
+ struct mpfs_periph_clock periph;
unsigned int id;
};
+#define to_mpfs_periph_clk(_hw) container_of(_hw, struct mpfs_periph_hw_clock, hw)
+
/*
- * mpfs_clk_lock prevents anything else from writing to the
- * mpfs clk block while a software locked register is being written.
+ * Protects MSSPLL outputs, since there's two to a register
*/
static DEFINE_SPINLOCK(mpfs_clk_lock);
@@ -219,16 +249,61 @@ static int mpfs_clk_register_msspll_outs(struct device *dev,
/*
* "CFG" clocks
*/
+static unsigned long mpfs_cfg_clk_recalc_rate(struct clk_hw *hw, unsigned long prate)
+{
+ struct mpfs_cfg_hw_clock *cfg_hw = to_mpfs_cfg_clk(hw);
+ struct mpfs_cfg_clock *cfg = &cfg_hw->cfg;
+ u32 val;
-#define CLK_CFG(_id, _name, _parent, _shift, _width, _table, _flags, _offset) { \
- .id = _id, \
- .cfg.shift = _shift, \
- .cfg.width = _width, \
- .cfg.table = _table, \
- .reg_offset = _offset, \
- .cfg.flags = _flags, \
- .cfg.hw.init = CLK_HW_INIT(_name, _parent, &clk_divider_ops, 0), \
- .cfg.lock = &mpfs_clk_lock, \
+ regmap_read(cfg->map, cfg->map_offset, &val);
+ val >>= cfg->shift;
+ val &= clk_div_mask(cfg->width);
+
+ return divider_recalc_rate(hw, prate, val, cfg->table, cfg->flags, cfg->width);
+}
+
+static int mpfs_cfg_clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
+{
+ struct mpfs_cfg_hw_clock *cfg_hw = to_mpfs_cfg_clk(hw);
+ struct mpfs_cfg_clock *cfg = &cfg_hw->cfg;
+
+ return divider_determine_rate(hw, req, cfg->table, cfg->width, 0);
+}
+
+static int mpfs_cfg_clk_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long prate)
+{
+ struct mpfs_cfg_hw_clock *cfg_hw = to_mpfs_cfg_clk(hw);
+ struct mpfs_cfg_clock *cfg = &cfg_hw->cfg;
+ int divider_setting;
+ u32 val;
+ u32 mask;
+
+ divider_setting = divider_get_val(rate, prate, cfg->table, cfg->width, 0);
+
+ if (divider_setting < 0)
+ return divider_setting;
+
+ mask = clk_div_mask(cfg->width) << cfg->shift;
+ val = divider_setting << cfg->shift;
+ regmap_update_bits(cfg->map, cfg->map_offset, val, mask);
+
+ return 0;
+}
+
+static const struct clk_ops mpfs_clk_cfg_ops = {
+ .recalc_rate = mpfs_cfg_clk_recalc_rate,
+ .determine_rate = mpfs_cfg_clk_determine_rate,
+ .set_rate = mpfs_cfg_clk_set_rate,
+};
+
+#define CLK_CFG(_id, _name, _parent, _shift, _width, _table, _flags, _offset) { \
+ .id = _id, \
+ .cfg.shift = _shift, \
+ .cfg.width = _width, \
+ .cfg.table = _table, \
+ .cfg.map_offset = _offset, \
+ .cfg.flags = _flags, \
+ .hw.init = CLK_HW_INIT(_name, _parent, &mpfs_clk_cfg_ops, 0), \
}
#define CLK_CPU_OFFSET 0u
@@ -248,10 +323,10 @@ static struct mpfs_cfg_hw_clock mpfs_cfg_clks[] = {
.cfg.shift = 0,
.cfg.width = 12,
.cfg.table = mpfs_div_rtcref_table,
- .reg_offset = REG_RTC_CLOCK_CR,
+ .cfg.map_offset = REG_RTC_CLOCK_CR,
.cfg.flags = CLK_DIVIDER_ONE_BASED,
- .cfg.hw.init =
- CLK_HW_INIT_PARENTS_DATA("clk_rtcref", mpfs_ext_ref, &clk_divider_ops, 0),
+ .hw.init =
+ CLK_HW_INIT_PARENTS_DATA("clk_rtcref", mpfs_ext_ref, &mpfs_clk_cfg_ops, 0),
}
};
@@ -264,14 +339,14 @@ static int mpfs_clk_register_cfgs(struct device *dev, struct mpfs_cfg_hw_clock *
for (i = 0; i < num_clks; i++) {
struct mpfs_cfg_hw_clock *cfg_hw = &cfg_hws[i];
- cfg_hw->cfg.reg = data->base + cfg_hw->reg_offset;
- ret = devm_clk_hw_register(dev, &cfg_hw->cfg.hw);
+ cfg_hw->cfg.map = data->regmap;
+ ret = devm_clk_hw_register(dev, &cfg_hw->hw);
if (ret)
return dev_err_probe(dev, ret, "failed to register clock id: %d\n",
cfg_hw->id);
id = cfg_hw->id;
- data->hw_data.hws[id] = &cfg_hw->cfg.hw;
+ data->hw_data.hws[id] = &cfg_hw->hw;
}
return 0;
@@ -281,15 +356,50 @@ static int mpfs_clk_register_cfgs(struct device *dev, struct mpfs_cfg_hw_clock *
* peripheral clocks - devices connected to axi or ahb buses.
*/
-#define CLK_PERIPH(_id, _name, _parent, _shift, _flags) { \
- .id = _id, \
- .periph.bit_idx = _shift, \
- .periph.hw.init = CLK_HW_INIT_HW(_name, _parent, &clk_gate_ops, \
- _flags), \
- .periph.lock = &mpfs_clk_lock, \
+static int mpfs_periph_clk_enable(struct clk_hw *hw)
+{
+ struct mpfs_periph_hw_clock *periph_hw = to_mpfs_periph_clk(hw);
+ struct mpfs_periph_clock *periph = &periph_hw->periph;
+
+ regmap_update_bits(periph->map, periph->map_offset,
+ BIT(periph->shift), BIT(periph->shift));
+
+ return 0;
}
-#define PARENT_CLK(PARENT) (&mpfs_cfg_clks[CLK_##PARENT##_OFFSET].cfg.hw)
+static void mpfs_periph_clk_disable(struct clk_hw *hw)
+{
+ struct mpfs_periph_hw_clock *periph_hw = to_mpfs_periph_clk(hw);
+ struct mpfs_periph_clock *periph = &periph_hw->periph;
+
+ regmap_update_bits(periph->map, periph->map_offset, BIT(periph->shift), 0);
+}
+
+static int mpfs_periph_clk_is_enabled(struct clk_hw *hw)
+{
+ struct mpfs_periph_hw_clock *periph_hw = to_mpfs_periph_clk(hw);
+ struct mpfs_periph_clock *periph = &periph_hw->periph;
+ u32 val;
+
+ regmap_read(periph->map, periph->map_offset, &val);
+
+ return !!(val & BIT(periph->shift));
+}
+
+static const struct clk_ops mpfs_periph_clk_ops = {
+ .enable = mpfs_periph_clk_enable,
+ .disable = mpfs_periph_clk_disable,
+ .is_enabled = mpfs_periph_clk_is_enabled,
+};
+
+#define CLK_PERIPH(_id, _name, _parent, _shift, _flags) { \
+ .id = _id, \
+ .periph.map_offset = REG_SUBBLK_CLOCK_CR, \
+ .periph.shift = _shift, \
+ .hw.init = CLK_HW_INIT_HW(_name, _parent, &mpfs_periph_clk_ops, _flags), \
+}
+
+#define PARENT_CLK(PARENT) (&mpfs_cfg_clks[CLK_##PARENT##_OFFSET].hw)
/*
* Critical clocks:
@@ -346,19 +456,55 @@ static int mpfs_clk_register_periphs(struct device *dev, struct mpfs_periph_hw_c
for (i = 0; i < num_clks; i++) {
struct mpfs_periph_hw_clock *periph_hw = &periph_hws[i];
- periph_hw->periph.reg = data->base + REG_SUBBLK_CLOCK_CR;
- ret = devm_clk_hw_register(dev, &periph_hw->periph.hw);
+ periph_hw->periph.map = data->regmap;
+ ret = devm_clk_hw_register(dev, &periph_hw->hw);
if (ret)
return dev_err_probe(dev, ret, "failed to register clock id: %d\n",
periph_hw->id);
id = periph_hws[i].id;
- data->hw_data.hws[id] = &periph_hw->periph.hw;
+ data->hw_data.hws[id] = &periph_hw->hw;
}
return 0;
}
+static inline int mpfs_clk_syscon_probe(struct mpfs_clock_data *clk_data,
+ struct platform_device *pdev)
+{
+ clk_data->regmap = syscon_regmap_lookup_by_compatible("microchip,mpfs-mss-top-sysreg");
+ if (IS_ERR(clk_data->regmap))
+ return PTR_ERR(clk_data->regmap);
+
+ clk_data->msspll_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(clk_data->msspll_base))
+ return PTR_ERR(clk_data->msspll_base);
+
+ return 0;
+}
+
+static inline int mpfs_clk_old_format_probe(struct mpfs_clock_data *clk_data,
+ struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ dev_warn(&pdev->dev, "falling back to old devicetree format");
+
+ clk_data->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(clk_data->base))
+ return PTR_ERR(clk_data->base);
+
+ clk_data->msspll_base = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(clk_data->msspll_base))
+ return PTR_ERR(clk_data->msspll_base);
+
+ clk_data->regmap = devm_regmap_init_mmio(dev, clk_data->base, &mpfs_clk_regmap_config);
+ if (IS_ERR(clk_data->regmap))
+ return PTR_ERR(clk_data->regmap);
+
+ return mpfs_reset_controller_register(dev, clk_data->regmap);
+}
+
static int mpfs_clk_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -374,13 +520,12 @@ static int mpfs_clk_probe(struct platform_device *pdev)
if (!clk_data)
return -ENOMEM;
- clk_data->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(clk_data->base))
- return PTR_ERR(clk_data->base);
-
- clk_data->msspll_base = devm_platform_ioremap_resource(pdev, 1);
- if (IS_ERR(clk_data->msspll_base))
- return PTR_ERR(clk_data->msspll_base);
+ ret = mpfs_clk_syscon_probe(clk_data, pdev);
+ if (ret) {
+ ret = mpfs_clk_old_format_probe(clk_data, pdev);
+ if (ret)
+ return ret;
+ }
clk_data->hw_data.num = num_clks;
clk_data->dev = dev;
@@ -406,11 +551,7 @@ static int mpfs_clk_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, &clk_data->hw_data);
- if (ret)
- return ret;
-
- return mpfs_reset_controller_register(dev, clk_data->base + REG_SUBBLK_RESET_CR);
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, &clk_data->hw_data);
}
static const struct of_device_id mpfs_clk_of_match_table[] = {
diff --git a/drivers/clk/mmp/Kconfig b/drivers/clk/mmp/Kconfig
new file mode 100644
index 000000000000..b0d2fea3cda5
--- /dev/null
+++ b/drivers/clk/mmp/Kconfig
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config COMMON_CLK_PXA1908
+ bool "Clock driver for Marvell PXA1908"
+ depends on ARCH_MMP || COMPILE_TEST
+ depends on OF
+ default y if ARCH_MMP && ARM64
+ select AUXILIARY_BUS
+ help
+ This driver supports the Marvell PXA1908 SoC clocks.
diff --git a/drivers/clk/mmp/Makefile b/drivers/clk/mmp/Makefile
index 062cd87fa8dd..0a94f2f08563 100644
--- a/drivers/clk/mmp/Makefile
+++ b/drivers/clk/mmp/Makefile
@@ -11,4 +11,7 @@ obj-$(CONFIG_MACH_MMP_DT) += clk-of-pxa168.o clk-of-pxa910.o
obj-$(CONFIG_COMMON_CLK_MMP2) += clk-of-mmp2.o clk-pll.o pwr-island.o
obj-$(CONFIG_COMMON_CLK_MMP2_AUDIO) += clk-audio.o
-obj-$(CONFIG_ARCH_MMP) += clk-of-pxa1928.o clk-pxa1908-apbc.o clk-pxa1908-apbcp.o clk-pxa1908-apmu.o clk-pxa1908-mpmu.o
+obj-$(CONFIG_COMMON_CLK_PXA1908) += clk-pxa1908-apbc.o clk-pxa1908-apbcp.o \
+ clk-pxa1908-mpmu.o clk-pxa1908-apmu.o
+
+obj-$(CONFIG_ARCH_MMP) += clk-of-pxa1928.o
diff --git a/drivers/clk/mmp/clk-audio.c b/drivers/clk/mmp/clk-audio.c
index 88d798d510cd..ed27fc796c94 100644
--- a/drivers/clk/mmp/clk-audio.c
+++ b/drivers/clk/mmp/clk-audio.c
@@ -164,23 +164,23 @@ static unsigned long audio_pll_recalc_rate(struct clk_hw *hw,
return 0;
}
-static long audio_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int audio_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
unsigned int prediv;
unsigned int postdiv;
long rounded = 0;
for (prediv = 0; prediv < ARRAY_SIZE(predivs); prediv++) {
- if (predivs[prediv].parent_rate != *parent_rate)
+ if (predivs[prediv].parent_rate != req->best_parent_rate)
continue;
for (postdiv = 0; postdiv < ARRAY_SIZE(postdivs); postdiv++) {
long freq = predivs[prediv].freq_vco;
freq /= postdivs[postdiv].divisor;
- if (freq == rate)
- return rate;
- if (freq < rate)
+ if (freq == req->rate)
+ return 0;
+ if (freq < req->rate)
continue;
if (rounded && freq > rounded)
continue;
@@ -188,7 +188,9 @@ static long audio_pll_round_rate(struct clk_hw *hw, unsigned long rate,
}
}
- return rounded;
+ req->rate = rounded;
+
+ return 0;
}
static int audio_pll_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -228,7 +230,7 @@ static int audio_pll_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops audio_pll_ops = {
.recalc_rate = audio_pll_recalc_rate,
- .round_rate = audio_pll_round_rate,
+ .determine_rate = audio_pll_determine_rate,
.set_rate = audio_pll_set_rate,
};
diff --git a/drivers/clk/mmp/clk-frac.c b/drivers/clk/mmp/clk-frac.c
index 6556f6ada2e8..0b1bb01346f0 100644
--- a/drivers/clk/mmp/clk-frac.c
+++ b/drivers/clk/mmp/clk-frac.c
@@ -21,8 +21,8 @@
#define to_clk_factor(hw) container_of(hw, struct mmp_clk_factor, hw)
-static long clk_factor_round_rate(struct clk_hw *hw, unsigned long drate,
- unsigned long *prate)
+static int clk_factor_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct mmp_clk_factor *factor = to_clk_factor(hw);
u64 rate = 0, prev_rate;
@@ -33,19 +33,20 @@ static long clk_factor_round_rate(struct clk_hw *hw, unsigned long drate,
d = &factor->ftbl[i];
prev_rate = rate;
- rate = (u64)(*prate) * d->denominator;
+ rate = (u64)(req->best_parent_rate) * d->denominator;
do_div(rate, d->numerator * factor->masks->factor);
- if (rate > drate)
+ if (rate > req->rate)
break;
}
- if ((i == 0) || (i == factor->ftbl_cnt)) {
- return rate;
- } else {
- if ((drate - prev_rate) > (rate - drate))
- return rate;
- else
- return prev_rate;
- }
+
+ if ((i == 0) || (i == factor->ftbl_cnt))
+ req->rate = rate;
+ else if ((req->rate - prev_rate) > (rate - req->rate))
+ req->rate = rate;
+ else
+ req->rate = prev_rate;
+
+ return 0;
}
static unsigned long clk_factor_recalc_rate(struct clk_hw *hw,
@@ -160,7 +161,7 @@ static int clk_factor_init(struct clk_hw *hw)
static const struct clk_ops clk_factor_ops = {
.recalc_rate = clk_factor_recalc_rate,
- .round_rate = clk_factor_round_rate,
+ .determine_rate = clk_factor_determine_rate,
.set_rate = clk_factor_set_rate,
.init = clk_factor_init,
};
diff --git a/drivers/clk/mmp/clk-gate.c b/drivers/clk/mmp/clk-gate.c
index 350eeb3e9e25..6855815ee8be 100644
--- a/drivers/clk/mmp/clk-gate.c
+++ b/drivers/clk/mmp/clk-gate.c
@@ -15,7 +15,7 @@
#include "clk.h"
/*
- * Some clocks will have mutiple bits to enable the clocks, and
+ * Some clocks will have multiple bits to enable the clocks, and
* the bits to disable the clock is not same as enabling bits.
*/
diff --git a/drivers/clk/mmp/clk-pxa1908-apmu.c b/drivers/clk/mmp/clk-pxa1908-apmu.c
index d3a070687fc5..7594a495a009 100644
--- a/drivers/clk/mmp/clk-pxa1908-apmu.c
+++ b/drivers/clk/mmp/clk-pxa1908-apmu.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/auxiliary_bus.h>
#include <linux/clk-provider.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -85,6 +86,7 @@ static void pxa1908_axi_periph_clk_init(struct pxa1908_clk_unit *pxa_unit)
static int pxa1908_apmu_probe(struct platform_device *pdev)
{
struct pxa1908_clk_unit *pxa_unit;
+ struct auxiliary_device *adev;
pxa_unit = devm_kzalloc(&pdev->dev, sizeof(*pxa_unit), GFP_KERNEL);
if (!pxa_unit)
@@ -94,6 +96,11 @@ static int pxa1908_apmu_probe(struct platform_device *pdev)
if (IS_ERR(pxa_unit->base))
return PTR_ERR(pxa_unit->base);
+ adev = devm_auxiliary_device_create(&pdev->dev, "power", NULL);
+ if (IS_ERR(adev))
+ return dev_err_probe(&pdev->dev, PTR_ERR(adev),
+ "Failed to register power controller\n");
+
mmp_clk_init(pdev->dev.of_node, &pxa_unit->unit, APMU_NR_CLKS);
pxa1908_axi_periph_clk_init(pxa_unit);
diff --git a/drivers/clk/mstar/clk-msc313-cpupll.c b/drivers/clk/mstar/clk-msc313-cpupll.c
index a93e2dba09d3..3e643be02fe2 100644
--- a/drivers/clk/mstar/clk-msc313-cpupll.c
+++ b/drivers/clk/mstar/clk-msc313-cpupll.c
@@ -140,20 +140,22 @@ static unsigned long msc313_cpupll_recalc_rate(struct clk_hw *hw, unsigned long
parent_rate);
}
-static long msc313_cpupll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int msc313_cpupll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- u32 reg = msc313_cpupll_regforfrequecy(rate, *parent_rate);
- long rounded = msc313_cpupll_frequencyforreg(reg, *parent_rate);
+ u32 reg = msc313_cpupll_regforfrequecy(req->rate, req->best_parent_rate);
+ long rounded = msc313_cpupll_frequencyforreg(reg, req->best_parent_rate);
/*
* This is my poor attempt at making sure the resulting
* rate doesn't overshoot the requested rate.
*/
- for (; rounded >= rate && reg > 0; reg--)
- rounded = msc313_cpupll_frequencyforreg(reg, *parent_rate);
+ for (; rounded >= req->rate && reg > 0; reg--)
+ rounded = msc313_cpupll_frequencyforreg(reg, req->best_parent_rate);
- return rounded;
+ req->rate = rounded;
+
+ return 0;
}
static int msc313_cpupll_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate)
@@ -168,7 +170,7 @@ static int msc313_cpupll_set_rate(struct clk_hw *hw, unsigned long rate, unsigne
static const struct clk_ops msc313_cpupll_ops = {
.recalc_rate = msc313_cpupll_recalc_rate,
- .round_rate = msc313_cpupll_round_rate,
+ .determine_rate = msc313_cpupll_determine_rate,
.set_rate = msc313_cpupll_set_rate,
};
diff --git a/drivers/clk/mvebu/ap-cpu-clk.c b/drivers/clk/mvebu/ap-cpu-clk.c
index 677cc3514849..1e44ace7d951 100644
--- a/drivers/clk/mvebu/ap-cpu-clk.c
+++ b/drivers/clk/mvebu/ap-cpu-clk.c
@@ -210,19 +210,21 @@ static int ap_cpu_clk_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
-static long ap_cpu_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int ap_cpu_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- int divider = *parent_rate / rate;
+ int divider = req->best_parent_rate / req->rate;
divider = min(divider, APN806_MAX_DIVIDER);
- return *parent_rate / divider;
+ req->rate = req->best_parent_rate / divider;
+
+ return 0;
}
static const struct clk_ops ap_cpu_clk_ops = {
.recalc_rate = ap_cpu_clk_recalc_rate,
- .round_rate = ap_cpu_clk_round_rate,
+ .determine_rate = ap_cpu_clk_determine_rate,
.set_rate = ap_cpu_clk_set_rate,
};
diff --git a/drivers/clk/mvebu/armada-37xx-periph.c b/drivers/clk/mvebu/armada-37xx-periph.c
index 13906e31bef8..bd0bc8e7b1e7 100644
--- a/drivers/clk/mvebu/armada-37xx-periph.c
+++ b/drivers/clk/mvebu/armada-37xx-periph.c
@@ -454,12 +454,12 @@ static unsigned long clk_pm_cpu_recalc_rate(struct clk_hw *hw,
return DIV_ROUND_UP_ULL((u64)parent_rate, div);
}
-static long clk_pm_cpu_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_pm_cpu_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_pm_cpu *pm_cpu = to_clk_pm_cpu(hw);
struct regmap *base = pm_cpu->nb_pm_base;
- unsigned int div = *parent_rate / rate;
+ unsigned int div = req->best_parent_rate / req->rate;
unsigned int load_level;
/* only available when DVFS is enabled */
if (!armada_3700_pm_dvfs_is_enabled(base))
@@ -474,13 +474,16 @@ static long clk_pm_cpu_round_rate(struct clk_hw *hw, unsigned long rate,
val >>= offset;
val &= ARMADA_37XX_NB_TBG_DIV_MASK;
- if (val == div)
+ if (val == div) {
/*
* We found a load level matching the target
* divider, switch to this load level and
* return.
*/
- return *parent_rate / div;
+ req->rate = req->best_parent_rate / div;
+
+ return 0;
+ }
}
/* We didn't find any valid divider */
@@ -600,7 +603,7 @@ static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops clk_pm_cpu_ops = {
.get_parent = clk_pm_cpu_get_parent,
- .round_rate = clk_pm_cpu_round_rate,
+ .determine_rate = clk_pm_cpu_determine_rate,
.set_rate = clk_pm_cpu_set_rate,
.recalc_rate = clk_pm_cpu_recalc_rate,
};
diff --git a/drivers/clk/mvebu/armada-xp.c b/drivers/clk/mvebu/armada-xp.c
index 45665655a258..8d31a595a27c 100644
--- a/drivers/clk/mvebu/armada-xp.c
+++ b/drivers/clk/mvebu/armada-xp.c
@@ -7,7 +7,6 @@
* Gregory CLEMENT <gregory.clement@free-electrons.com>
* Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
* Andrew Lunn <andrew@lunn.ch>
- *
*/
#include <linux/kernel.h>
@@ -19,8 +18,8 @@
/*
* Core Clocks
*
- * Armada XP Sample At Reset is a 64 bit bitfiled split in two
- * register of 32 bits
+ * Armada XP Sample At Reset is a 64 bit bitfield split in two
+ * registers of 32 bits
*/
#define SARL 0 /* Low part [0:31] */
diff --git a/drivers/clk/mvebu/clk-corediv.c b/drivers/clk/mvebu/clk-corediv.c
index 818b175391fa..628032341cbb 100644
--- a/drivers/clk/mvebu/clk-corediv.c
+++ b/drivers/clk/mvebu/clk-corediv.c
@@ -135,19 +135,21 @@ static unsigned long clk_corediv_recalc_rate(struct clk_hw *hwclk,
return parent_rate / div;
}
-static long clk_corediv_round_rate(struct clk_hw *hwclk, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_corediv_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
/* Valid ratio are 1:4, 1:5, 1:6 and 1:8 */
u32 div;
- div = *parent_rate / rate;
+ div = req->best_parent_rate / req->rate;
if (div < 4)
div = 4;
else if (div > 6)
div = 8;
- return *parent_rate / div;
+ req->rate = req->best_parent_rate / div;
+
+ return 0;
}
static int clk_corediv_set_rate(struct clk_hw *hwclk, unsigned long rate,
@@ -199,7 +201,7 @@ static const struct clk_corediv_soc_desc armada370_corediv_soc = {
.disable = clk_corediv_disable,
.is_enabled = clk_corediv_is_enabled,
.recalc_rate = clk_corediv_recalc_rate,
- .round_rate = clk_corediv_round_rate,
+ .determine_rate = clk_corediv_determine_rate,
.set_rate = clk_corediv_set_rate,
},
.ratio_reload = BIT(8),
@@ -215,7 +217,7 @@ static const struct clk_corediv_soc_desc armada380_corediv_soc = {
.disable = clk_corediv_disable,
.is_enabled = clk_corediv_is_enabled,
.recalc_rate = clk_corediv_recalc_rate,
- .round_rate = clk_corediv_round_rate,
+ .determine_rate = clk_corediv_determine_rate,
.set_rate = clk_corediv_set_rate,
},
.ratio_reload = BIT(8),
@@ -228,7 +230,7 @@ static const struct clk_corediv_soc_desc armada375_corediv_soc = {
.ndescs = ARRAY_SIZE(mvebu_corediv_desc),
.ops = {
.recalc_rate = clk_corediv_recalc_rate,
- .round_rate = clk_corediv_round_rate,
+ .determine_rate = clk_corediv_determine_rate,
.set_rate = clk_corediv_set_rate,
},
.ratio_reload = BIT(8),
@@ -240,7 +242,7 @@ static const struct clk_corediv_soc_desc mv98dx3236_corediv_soc = {
.ndescs = ARRAY_SIZE(mv98dx3236_corediv_desc),
.ops = {
.recalc_rate = clk_corediv_recalc_rate,
- .round_rate = clk_corediv_round_rate,
+ .determine_rate = clk_corediv_determine_rate,
.set_rate = clk_corediv_set_rate,
},
.ratio_reload = BIT(10),
diff --git a/drivers/clk/mvebu/clk-cpu.c b/drivers/clk/mvebu/clk-cpu.c
index db2b38c21304..0de7660e73d2 100644
--- a/drivers/clk/mvebu/clk-cpu.c
+++ b/drivers/clk/mvebu/clk-cpu.c
@@ -56,19 +56,21 @@ static unsigned long clk_cpu_recalc_rate(struct clk_hw *hwclk,
return parent_rate / div;
}
-static long clk_cpu_round_rate(struct clk_hw *hwclk, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_cpu_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
/* Valid ratio are 1:1, 1:2 and 1:3 */
u32 div;
- div = *parent_rate / rate;
+ div = req->best_parent_rate / req->rate;
if (div == 0)
div = 1;
else if (div > 3)
div = 3;
- return *parent_rate / div;
+ req->rate = req->best_parent_rate / div;
+
+ return 0;
}
static int clk_cpu_off_set_rate(struct clk_hw *hwclk, unsigned long rate,
@@ -159,7 +161,7 @@ static int clk_cpu_set_rate(struct clk_hw *hwclk, unsigned long rate,
static const struct clk_ops cpu_ops = {
.recalc_rate = clk_cpu_recalc_rate,
- .round_rate = clk_cpu_round_rate,
+ .determine_rate = clk_cpu_determine_rate,
.set_rate = clk_cpu_set_rate,
};
diff --git a/drivers/clk/mvebu/common.c b/drivers/clk/mvebu/common.c
index 785dbede4835..5adbbd91a6db 100644
--- a/drivers/clk/mvebu/common.c
+++ b/drivers/clk/mvebu/common.c
@@ -215,22 +215,26 @@ static struct clk *clk_gating_get_src(
return ERR_PTR(-ENODEV);
}
-static int mvebu_clk_gating_suspend(void)
+static int mvebu_clk_gating_suspend(void *data)
{
ctrl->saved_reg = readl(ctrl->base);
return 0;
}
-static void mvebu_clk_gating_resume(void)
+static void mvebu_clk_gating_resume(void *data)
{
writel(ctrl->saved_reg, ctrl->base);
}
-static struct syscore_ops clk_gate_syscore_ops = {
+static const struct syscore_ops clk_gate_syscore_ops = {
.suspend = mvebu_clk_gating_suspend,
.resume = mvebu_clk_gating_resume,
};
+static struct syscore clk_gate_syscore = {
+ .ops = &clk_gate_syscore_ops,
+};
+
void __init mvebu_clk_gating_setup(struct device_node *np,
const struct clk_gating_soc_desc *desc)
{
@@ -284,7 +288,7 @@ void __init mvebu_clk_gating_setup(struct device_node *np,
of_clk_add_provider(np, clk_gating_get_src, ctrl);
- register_syscore_ops(&clk_gate_syscore_ops);
+ register_syscore(&clk_gate_syscore);
return;
gates_out:
diff --git a/drivers/clk/mvebu/cp110-system-controller.c b/drivers/clk/mvebu/cp110-system-controller.c
index 03c59bf22106..b47c86906046 100644
--- a/drivers/clk/mvebu/cp110-system-controller.c
+++ b/drivers/clk/mvebu/cp110-system-controller.c
@@ -110,6 +110,25 @@ static const char * const gate_base_names[] = {
[CP110_GATE_EIP197] = "eip197"
};
+static unsigned long gate_flags(const u8 bit_idx)
+{
+ switch (bit_idx) {
+ case CP110_GATE_PCIE_X1_0:
+ case CP110_GATE_PCIE_X1_1:
+ case CP110_GATE_PCIE_X4:
+ /*
+ * If a port had an active link at boot time, stopping
+ * the clock creates a failed state from which controller
+ * driver can not recover.
+ * Prevent stopping this clock till after a driver has taken
+ * ownership.
+ */
+ return CLK_IGNORE_UNUSED;
+ default:
+ return 0;
+ }
+};
+
struct cp110_gate_clk {
struct clk_hw hw;
struct regmap *regmap;
@@ -171,6 +190,7 @@ static struct clk_hw *cp110_register_gate(const char *name,
init.ops = &cp110_gate_ops;
init.parent_names = &parent_name;
init.num_parents = 1;
+ init.flags = gate_flags(bit_idx);
gate->regmap = regmap;
gate->bit_idx = bit_idx;
diff --git a/drivers/clk/mvebu/dove-divider.c b/drivers/clk/mvebu/dove-divider.c
index 0a90452ee808..47cc49e4cd99 100644
--- a/drivers/clk/mvebu/dove-divider.c
+++ b/drivers/clk/mvebu/dove-divider.c
@@ -108,23 +108,23 @@ static unsigned long dove_recalc_rate(struct clk_hw *hw, unsigned long parent)
return rate;
}
-static long dove_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent)
+static int dove_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct dove_clk *dc = to_dove_clk(hw);
- unsigned long parent_rate = *parent;
+ unsigned long parent_rate = req->best_parent_rate;
int divider;
- divider = dove_calc_divider(dc, rate, parent_rate, false);
+ divider = dove_calc_divider(dc, req->rate, parent_rate, false);
if (divider < 0)
return divider;
- rate = DIV_ROUND_CLOSEST(parent_rate, divider);
+ req->rate = DIV_ROUND_CLOSEST(parent_rate, divider);
pr_debug("%s(): %s divider=%u parent=%lu rate=%lu\n",
- __func__, dc->name, divider, parent_rate, rate);
+ __func__, dc->name, divider, parent_rate, req->rate);
- return rate;
+ return 0;
}
static int dove_set_clock(struct clk_hw *hw, unsigned long rate,
@@ -154,7 +154,7 @@ static int dove_set_clock(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops dove_divider_ops = {
.set_rate = dove_set_clock,
- .round_rate = dove_round_rate,
+ .determine_rate = dove_determine_rate,
.recalc_rate = dove_recalc_rate,
};
diff --git a/drivers/clk/mxs/clk-div.c b/drivers/clk/mxs/clk-div.c
index 928e8b1c46a1..8afe1a9c1552 100644
--- a/drivers/clk/mxs/clk-div.c
+++ b/drivers/clk/mxs/clk-div.c
@@ -16,7 +16,7 @@
* @busy: busy bit shift
*
* The mxs divider clock is a subclass of basic clk_divider with an
- * addtional busy bit.
+ * additional busy bit.
*/
struct clk_div {
struct clk_divider divider;
@@ -40,12 +40,12 @@ static unsigned long clk_div_recalc_rate(struct clk_hw *hw,
return div->ops->recalc_rate(&div->divider.hw, parent_rate);
}
-static long clk_div_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_div_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_div *div = to_clk_div(hw);
- return div->ops->round_rate(&div->divider.hw, rate, prate);
+ return div->ops->determine_rate(&div->divider.hw, req);
}
static int clk_div_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -63,7 +63,7 @@ static int clk_div_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops clk_div_ops = {
.recalc_rate = clk_div_recalc_rate,
- .round_rate = clk_div_round_rate,
+ .determine_rate = clk_div_determine_rate,
.set_rate = clk_div_set_rate,
};
diff --git a/drivers/clk/mxs/clk-frac.c b/drivers/clk/mxs/clk-frac.c
index bba0d840dd76..73f514fb84ff 100644
--- a/drivers/clk/mxs/clk-frac.c
+++ b/drivers/clk/mxs/clk-frac.c
@@ -44,18 +44,18 @@ static unsigned long clk_frac_recalc_rate(struct clk_hw *hw,
return tmp_rate >> frac->width;
}
-static long clk_frac_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_frac_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_frac *frac = to_clk_frac(hw);
- unsigned long parent_rate = *prate;
+ unsigned long parent_rate = req->best_parent_rate;
u32 div;
u64 tmp, tmp_rate, result;
- if (rate > parent_rate)
+ if (req->rate > parent_rate)
return -EINVAL;
- tmp = rate;
+ tmp = req->rate;
tmp <<= frac->width;
do_div(tmp, parent_rate);
div = tmp;
@@ -67,7 +67,9 @@ static long clk_frac_round_rate(struct clk_hw *hw, unsigned long rate,
result = tmp_rate >> frac->width;
if ((result << frac->width) < tmp_rate)
result += 1;
- return result;
+ req->rate = result;
+
+ return 0;
}
static int clk_frac_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -103,7 +105,7 @@ static int clk_frac_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops clk_frac_ops = {
.recalc_rate = clk_frac_recalc_rate,
- .round_rate = clk_frac_round_rate,
+ .determine_rate = clk_frac_determine_rate,
.set_rate = clk_frac_set_rate,
};
diff --git a/drivers/clk/mxs/clk-ref.c b/drivers/clk/mxs/clk-ref.c
index 2297259da89a..a99ee4cd2ece 100644
--- a/drivers/clk/mxs/clk-ref.c
+++ b/drivers/clk/mxs/clk-ref.c
@@ -57,22 +57,24 @@ static unsigned long clk_ref_recalc_rate(struct clk_hw *hw,
return tmp;
}
-static long clk_ref_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_ref_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- unsigned long parent_rate = *prate;
+ unsigned long parent_rate = req->best_parent_rate;
u64 tmp = parent_rate;
u8 frac;
- tmp = tmp * 18 + rate / 2;
- do_div(tmp, rate);
+ tmp = tmp * 18 + req->rate / 2;
+ do_div(tmp, req->rate);
frac = clamp(tmp, 18, 35);
tmp = parent_rate;
tmp *= 18;
do_div(tmp, frac);
- return tmp;
+ req->rate = tmp;
+
+ return 0;
}
static int clk_ref_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -104,7 +106,7 @@ static const struct clk_ops clk_ref_ops = {
.enable = clk_ref_enable,
.disable = clk_ref_disable,
.recalc_rate = clk_ref_recalc_rate,
- .round_rate = clk_ref_round_rate,
+ .determine_rate = clk_ref_determine_rate,
.set_rate = clk_ref_set_rate,
};
diff --git a/drivers/clk/nuvoton/Kconfig b/drivers/clk/nuvoton/Kconfig
index fe4b7f62f467..e7019b69ea74 100644
--- a/drivers/clk/nuvoton/Kconfig
+++ b/drivers/clk/nuvoton/Kconfig
@@ -4,7 +4,7 @@
config COMMON_CLK_NUVOTON
bool "Nuvoton clock controller common support"
depends on ARCH_MA35 || COMPILE_TEST
- default y
+ default ARCH_MA35
help
Say y here to enable common clock controller for Nuvoton platforms.
@@ -12,7 +12,7 @@ if COMMON_CLK_NUVOTON
config CLK_MA35D1
bool "Nuvoton MA35D1 clock controller support"
- default y
+ default ARCH_MA35
help
Build the clock controller driver for MA35D1 SoC.
diff --git a/drivers/clk/nuvoton/clk-ma35d1-divider.c b/drivers/clk/nuvoton/clk-ma35d1-divider.c
index bb8c23d2b895..e39f53d5bf45 100644
--- a/drivers/clk/nuvoton/clk-ma35d1-divider.c
+++ b/drivers/clk/nuvoton/clk-ma35d1-divider.c
@@ -39,12 +39,16 @@ static unsigned long ma35d1_clkdiv_recalc_rate(struct clk_hw *hw, unsigned long
CLK_DIVIDER_ROUND_CLOSEST, dclk->width);
}
-static long ma35d1_clkdiv_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *prate)
+static int ma35d1_clkdiv_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct ma35d1_adc_clk_div *dclk = to_ma35d1_adc_clk_div(hw);
- return divider_round_rate(hw, rate, prate, dclk->table,
- dclk->width, CLK_DIVIDER_ROUND_CLOSEST);
+ req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate,
+ dclk->table, dclk->width,
+ CLK_DIVIDER_ROUND_CLOSEST);
+
+ return 0;
}
static int ma35d1_clkdiv_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate)
@@ -71,7 +75,7 @@ static int ma35d1_clkdiv_set_rate(struct clk_hw *hw, unsigned long rate, unsigne
static const struct clk_ops ma35d1_adc_clkdiv_ops = {
.recalc_rate = ma35d1_clkdiv_recalc_rate,
- .round_rate = ma35d1_clkdiv_round_rate,
+ .determine_rate = ma35d1_clkdiv_determine_rate,
.set_rate = ma35d1_clkdiv_set_rate,
};
diff --git a/drivers/clk/nuvoton/clk-ma35d1-pll.c b/drivers/clk/nuvoton/clk-ma35d1-pll.c
index ff3fb8b87c24..4620acfe47e8 100644
--- a/drivers/clk/nuvoton/clk-ma35d1-pll.c
+++ b/drivers/clk/nuvoton/clk-ma35d1-pll.c
@@ -244,35 +244,43 @@ static unsigned long ma35d1_clk_pll_recalc_rate(struct clk_hw *hw, unsigned long
return 0;
}
-static long ma35d1_clk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int ma35d1_clk_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct ma35d1_clk_pll *pll = to_ma35d1_clk_pll(hw);
u32 reg_ctl[3] = { 0 };
unsigned long pll_freq;
long ret;
- if (*parent_rate < PLL_FREF_MIN_FREQ || *parent_rate > PLL_FREF_MAX_FREQ)
+ if (req->best_parent_rate < PLL_FREF_MIN_FREQ || req->best_parent_rate > PLL_FREF_MAX_FREQ)
return -EINVAL;
- ret = ma35d1_pll_find_closest(pll, rate, *parent_rate, reg_ctl, &pll_freq);
+ ret = ma35d1_pll_find_closest(pll, req->rate, req->best_parent_rate,
+ reg_ctl, &pll_freq);
if (ret < 0)
return ret;
switch (pll->id) {
case CAPLL:
reg_ctl[0] = readl_relaxed(pll->ctl0_base);
- pll_freq = ma35d1_calc_smic_pll_freq(reg_ctl[0], *parent_rate);
- return pll_freq;
+ pll_freq = ma35d1_calc_smic_pll_freq(reg_ctl[0], req->best_parent_rate);
+ req->rate = pll_freq;
+
+ return 0;
case DDRPLL:
case APLL:
case EPLL:
case VPLL:
reg_ctl[0] = readl_relaxed(pll->ctl0_base);
reg_ctl[1] = readl_relaxed(pll->ctl1_base);
- pll_freq = ma35d1_calc_pll_freq(pll->mode, reg_ctl, *parent_rate);
- return pll_freq;
+ pll_freq = ma35d1_calc_pll_freq(pll->mode, reg_ctl, req->best_parent_rate);
+ req->rate = pll_freq;
+
+ return 0;
}
+
+ req->rate = 0;
+
return 0;
}
@@ -311,12 +319,12 @@ static const struct clk_ops ma35d1_clk_pll_ops = {
.unprepare = ma35d1_clk_pll_unprepare,
.set_rate = ma35d1_clk_pll_set_rate,
.recalc_rate = ma35d1_clk_pll_recalc_rate,
- .round_rate = ma35d1_clk_pll_round_rate,
+ .determine_rate = ma35d1_clk_pll_determine_rate,
};
static const struct clk_ops ma35d1_clk_fixed_pll_ops = {
.recalc_rate = ma35d1_clk_pll_recalc_rate,
- .round_rate = ma35d1_clk_pll_round_rate,
+ .determine_rate = ma35d1_clk_pll_determine_rate,
};
struct clk_hw *ma35d1_reg_clk_pll(struct device *dev, u32 id, u8 u8mode, const char *name,
diff --git a/drivers/clk/nxp/clk-lpc18xx-ccu.c b/drivers/clk/nxp/clk-lpc18xx-ccu.c
index ddb28b38f549..751b786d73f8 100644
--- a/drivers/clk/nxp/clk-lpc18xx-ccu.c
+++ b/drivers/clk/nxp/clk-lpc18xx-ccu.c
@@ -148,7 +148,7 @@ static int lpc18xx_ccu_gate_endisable(struct clk_hw *hw, bool enable)
val |= LPC18XX_CCU_RUN;
} else {
/*
- * To safely disable a branch clock a squence of two separate
+ * To safely disable a branch clock a sequence of two separate
* writes must be used. First write should set the AUTO bit
* and the next write should clear the RUN bit.
*/
diff --git a/drivers/clk/nxp/clk-lpc18xx-cgu.c b/drivers/clk/nxp/clk-lpc18xx-cgu.c
index 81efa885069b..b9e204d63a97 100644
--- a/drivers/clk/nxp/clk-lpc18xx-cgu.c
+++ b/drivers/clk/nxp/clk-lpc18xx-cgu.c
@@ -370,23 +370,25 @@ static unsigned long lpc18xx_pll0_recalc_rate(struct clk_hw *hw,
return 0;
}
-static long lpc18xx_pll0_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int lpc18xx_pll0_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
unsigned long m;
- if (*prate < rate) {
+ if (req->best_parent_rate < req->rate) {
pr_warn("%s: pll dividers not supported\n", __func__);
return -EINVAL;
}
- m = DIV_ROUND_UP_ULL(*prate, rate * 2);
- if (m <= 0 && m > LPC18XX_PLL0_MSEL_MAX) {
- pr_warn("%s: unable to support rate %lu\n", __func__, rate);
+ m = DIV_ROUND_UP_ULL(req->best_parent_rate, req->rate * 2);
+ if (m == 0 || m > LPC18XX_PLL0_MSEL_MAX) {
+ pr_warn("%s: unable to support rate %lu\n", __func__, req->rate);
return -EINVAL;
}
- return 2 * *prate * m;
+ req->rate = 2 * req->best_parent_rate * m;
+
+ return 0;
}
static int lpc18xx_pll0_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -402,7 +404,7 @@ static int lpc18xx_pll0_set_rate(struct clk_hw *hw, unsigned long rate,
}
m = DIV_ROUND_UP_ULL(parent_rate, rate * 2);
- if (m <= 0 && m > LPC18XX_PLL0_MSEL_MAX) {
+ if (m == 0 || m > LPC18XX_PLL0_MSEL_MAX) {
pr_warn("%s: unable to support rate %lu\n", __func__, rate);
return -EINVAL;
}
@@ -443,7 +445,7 @@ static int lpc18xx_pll0_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops lpc18xx_pll0_ops = {
.recalc_rate = lpc18xx_pll0_recalc_rate,
- .round_rate = lpc18xx_pll0_round_rate,
+ .determine_rate = lpc18xx_pll0_determine_rate,
.set_rate = lpc18xx_pll0_set_rate,
};
diff --git a/drivers/clk/nxp/clk-lpc32xx.c b/drivers/clk/nxp/clk-lpc32xx.c
index e00f270bc6aa..23f980cf6a2b 100644
--- a/drivers/clk/nxp/clk-lpc32xx.c
+++ b/drivers/clk/nxp/clk-lpc32xx.c
@@ -68,7 +68,6 @@ static const struct regmap_config lpc32xx_scb_regmap_config = {
.reg_stride = 4,
.val_format_endian = REGMAP_ENDIAN_LITTLE,
.max_register = 0x114,
- .fast_io = true,
};
static struct regmap *clk_regmap;
@@ -579,17 +578,17 @@ static int clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
return regmap_update_bits(clk_regmap, clk->reg, 0x1FFFF, val);
}
-static long clk_hclk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_hclk_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct lpc32xx_pll_clk *clk = to_lpc32xx_pll_clk(hw);
- u64 m_i, o = rate, i = *parent_rate, d = (u64)rate << 6;
+ u64 m_i, o = req->rate, i = req->best_parent_rate, d = (u64)req->rate << 6;
u64 m = 0, n = 0, p = 0;
int p_i, n_i;
- pr_debug("%s: %lu/%lu\n", clk_hw_get_name(hw), *parent_rate, rate);
+ pr_debug("%s: %lu/%lu\n", clk_hw_get_name(hw), req->best_parent_rate, req->rate);
- if (rate > 266500000)
+ if (req->rate > 266500000)
return -EINVAL;
/* Have to check all 20 possibilities to find the minimal M */
@@ -614,9 +613,9 @@ static long clk_hclk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
}
}
- if (d == (u64)rate << 6) {
+ if (d == (u64)req->rate << 6) {
pr_err("%s: %lu: no valid PLL parameters are found\n",
- clk_hw_get_name(hw), rate);
+ clk_hw_get_name(hw), req->rate);
return -EINVAL;
}
@@ -634,22 +633,25 @@ static long clk_hclk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
if (!d)
pr_debug("%s: %lu: found exact match: %llu/%llu/%llu\n",
- clk_hw_get_name(hw), rate, m, n, p);
+ clk_hw_get_name(hw), req->rate, m, n, p);
else
pr_debug("%s: %lu: found closest: %llu/%llu/%llu - %llu\n",
- clk_hw_get_name(hw), rate, m, n, p, o);
+ clk_hw_get_name(hw), req->rate, m, n, p, o);
- return o;
+ req->rate = o;
+
+ return 0;
}
-static long clk_usb_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_usb_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct lpc32xx_pll_clk *clk = to_lpc32xx_pll_clk(hw);
struct clk_hw *usb_div_hw, *osc_hw;
u64 d_i, n_i, m, o;
- pr_debug("%s: %lu/%lu\n", clk_hw_get_name(hw), *parent_rate, rate);
+ pr_debug("%s: %lu/%lu\n", clk_hw_get_name(hw), req->best_parent_rate,
+ req->rate);
/*
* The only supported USB clock is 48MHz, with PLL internal constraints
@@ -657,7 +659,7 @@ static long clk_usb_pll_round_rate(struct clk_hw *hw, unsigned long rate,
* and post-divider must be 4, this slightly simplifies calculation of
* USB divider, USB PLL N and M parameters.
*/
- if (rate != 48000000)
+ if (req->rate != 48000000)
return -EINVAL;
/* USB divider clock */
@@ -685,30 +687,30 @@ static long clk_usb_pll_round_rate(struct clk_hw *hw, unsigned long rate,
clk->m_div = m;
clk->p_div = 2;
clk->mode = PLL_NON_INTEGER;
- *parent_rate = div64_u64(o, d_i);
+ req->best_parent_rate = div64_u64(o, d_i);
- return rate;
+ return 0;
}
}
return -EINVAL;
}
-#define LPC32XX_DEFINE_PLL_OPS(_name, _rc, _sr, _rr) \
+#define LPC32XX_DEFINE_PLL_OPS(_name, _rc, _sr, _dr) \
static const struct clk_ops clk_ ##_name ## _ops = { \
.enable = clk_pll_enable, \
.disable = clk_pll_disable, \
.is_enabled = clk_pll_is_enabled, \
.recalc_rate = _rc, \
.set_rate = _sr, \
- .round_rate = _rr, \
+ .determine_rate = _dr, \
}
LPC32XX_DEFINE_PLL_OPS(pll_397x, clk_pll_397x_recalc_rate, NULL, NULL);
LPC32XX_DEFINE_PLL_OPS(hclk_pll, clk_pll_recalc_rate,
- clk_pll_set_rate, clk_hclk_pll_round_rate);
+ clk_pll_set_rate, clk_hclk_pll_determine_rate);
LPC32XX_DEFINE_PLL_OPS(usb_pll, clk_pll_recalc_rate,
- clk_pll_set_rate, clk_usb_pll_round_rate);
+ clk_pll_set_rate, clk_usb_pll_determine_rate);
static int clk_ddram_is_enabled(struct clk_hw *hw)
{
@@ -955,8 +957,8 @@ static unsigned long clk_divider_recalc_rate(struct clk_hw *hw,
divider->flags, divider->width);
}
-static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_divider_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct lpc32xx_clk_div *divider = to_lpc32xx_div(hw);
unsigned int bestdiv;
@@ -968,11 +970,15 @@ static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
bestdiv &= div_mask(divider->width);
bestdiv = _get_div(divider->table, bestdiv, divider->flags,
divider->width);
- return DIV_ROUND_UP(*prate, bestdiv);
+ req->rate = DIV_ROUND_UP(req->best_parent_rate, bestdiv);
+
+ return 0;
}
- return divider_round_rate(hw, rate, prate, divider->table,
- divider->width, divider->flags);
+ req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate,
+ divider->table, divider->width, divider->flags);
+
+ return 0;
}
static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -991,7 +997,7 @@ static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops lpc32xx_clk_divider_ops = {
.recalc_rate = clk_divider_recalc_rate,
- .round_rate = clk_divider_round_rate,
+ .determine_rate = clk_divider_determine_rate,
.set_rate = clk_divider_set_rate,
};
diff --git a/drivers/clk/pistachio/clk-pll.c b/drivers/clk/pistachio/clk-pll.c
index 025b9df76cdb..d05337915e2b 100644
--- a/drivers/clk/pistachio/clk-pll.c
+++ b/drivers/clk/pistachio/clk-pll.c
@@ -139,19 +139,23 @@ pll_get_params(struct pistachio_clk_pll *pll, unsigned long fref,
return NULL;
}
-static long pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int pll_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
{
struct pistachio_clk_pll *pll = to_pistachio_pll(hw);
unsigned int i;
for (i = 0; i < pll->nr_rates; i++) {
- if (i > 0 && pll->rates[i].fref == *parent_rate &&
- pll->rates[i].fout <= rate)
- return pll->rates[i - 1].fout;
+ if (i > 0 && pll->rates[i].fref == req->best_parent_rate &&
+ pll->rates[i].fout <= req->rate) {
+ req->rate = pll->rates[i - 1].fout;
+
+ return 0;
+ }
}
- return pll->rates[0].fout;
+ req->rate = pll->rates[0].fout;
+
+ return 0;
}
static int pll_gf40lp_frac_enable(struct clk_hw *hw)
@@ -300,7 +304,7 @@ static const struct clk_ops pll_gf40lp_frac_ops = {
.disable = pll_gf40lp_frac_disable,
.is_enabled = pll_gf40lp_frac_is_enabled,
.recalc_rate = pll_gf40lp_frac_recalc_rate,
- .round_rate = pll_round_rate,
+ .determine_rate = pll_determine_rate,
.set_rate = pll_gf40lp_frac_set_rate,
};
@@ -432,7 +436,7 @@ static const struct clk_ops pll_gf40lp_laint_ops = {
.disable = pll_gf40lp_laint_disable,
.is_enabled = pll_gf40lp_laint_is_enabled,
.recalc_rate = pll_gf40lp_laint_recalc_rate,
- .round_rate = pll_round_rate,
+ .determine_rate = pll_determine_rate,
.set_rate = pll_gf40lp_laint_set_rate,
};
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 7d5dac26b244..a284ba040b78 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -19,6 +19,33 @@ menuconfig COMMON_CLK_QCOM
if COMMON_CLK_QCOM
+config CLK_GLYMUR_DISPCC
+ tristate "GLYMUR Display Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select CLK_GLYMUR_GCC
+ help
+ Support for the display clock controllers on Qualcomm
+ Technologies, Inc. GLYMUR devices.
+ Say Y if you want to support display devices and functionality such as
+ splash screen.
+
+config CLK_GLYMUR_GCC
+ tristate "GLYMUR Global Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select QCOM_GDSC
+ help
+ Support for the global clock controller on GLYMUR devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ I2C, USB, UFS, SDCC, etc.
+
+config CLK_GLYMUR_TCSRCC
+ tristate "GLYMUR TCSR Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select QCOM_GDSC
+ help
+ Support for the TCSR clock controller on GLYMUR devices.
+ Say Y if you want to use peripheral devices such as USB/PCIe/EDP.
+
config CLK_X1E80100_CAMCC
tristate "X1E80100 Camera Clock Controller"
depends on ARM64 || COMPILE_TEST
@@ -187,8 +214,17 @@ config IPQ_APSS_PLL
Say Y if you want to support CPU frequency scaling on ipq based
devices.
+config IPQ_APSS_5424
+ tristate "IPQ5424 APSS Clock Controller"
+ select IPQ_APSS_PLL
+ default y if IPQ_GCC_5424
+ help
+ Support for APSS Clock controller on Qualcomm IPQ5424 platform.
+ Say Y if you want to support CPU frequency scaling on ipq based
+ devices.
+
config IPQ_APSS_6018
- tristate "IPQ APSS Clock Controller"
+ tristate "IPQ6018 APSS Clock Controller"
select IPQ_APSS_PLL
depends on QCOM_APCS_IPC || COMPILE_TEST
depends on QCOM_SMEM
@@ -281,6 +317,17 @@ config IPQ_GCC_9574
i2c, USB, SD/eMMC, etc. Select this for the root clock
of ipq9574.
+config IPQ_NSSCC_5424
+ tristate "IPQ5424 NSS Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ depends on IPQ_GCC_5424
+ help
+ Support for NSS clock controller on ipq5424 devices.
+ NSSCC receives the clock sources from GCC, CMN PLL and UNIPHY (PCS).
+ It in turn supplies the clocks and resets to the networking hardware.
+ Say Y or M if you want to enable networking function on the
+ IPQ5424 devices.
+
config IPQ_NSSCC_9574
tristate "IPQ9574 NSS Clock Controller"
depends on ARM64 || COMPILE_TEST
@@ -323,12 +370,12 @@ config MSM_GCC_8916
SD/eMMC, display, graphics, camera etc.
config MSM_GCC_8917
- tristate "MSM8917/QM215 Global Clock Controller"
+ tristate "MSM89(17/37)/QM215 Global Clock Controller"
depends on ARM64 || COMPILE_TEST
select QCOM_GDSC
help
- Support for the global clock controller on msm8917 and qm215
- devices.
+ Support for the global clock controller on msm8917, msm8937
+ and qm215 devices.
Say Y if you want to use devices such as UART, SPI i2c, USB,
SD/eMMC, display, graphics, camera etc.
@@ -493,6 +540,26 @@ config QCM_DISPCC_2290
Say Y if you want to support display devices and functionality such as
splash screen.
+config QCS_DISPCC_615
+ tristate "QCS615 Display Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select QCS_GCC_615
+ help
+ Support for the display clock controller on Qualcomm Technologies, Inc
+ QCS615 devices.
+ Say Y if you want to support display devices and functionality such as
+ splash screen.
+
+config QCS_CAMCC_615
+ tristate "QCS615 Camera Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select QCS_GCC_615
+ help
+ Support for the camera clock controller on Qualcomm Technologies, Inc
+ QCS615 devices.
+ Say Y if you want to support camera devices and functionality such as
+ capturing pictures.
+
config QCS_GCC_404
tristate "QCS404 Global Clock Controller"
help
@@ -529,6 +596,24 @@ config QCS_GCC_615
Say Y if you want to use multimedia devices or peripheral
devices such as UART, SPI, I2C, USB, SD/eMMC, PCIe etc.
+config QCS_GPUCC_615
+ tristate "QCS615 Graphics clock controller"
+ depends on ARM64 || COMPILE_TEST
+ select QCS_GCC_615
+ help
+ Support for the graphics clock controller on QCS615 devices.
+ Say Y if you want to support graphics controller devices and
+ functionality such as 3D graphics.
+
+config QCS_VIDEOCC_615
+ tristate "QCS615 Video Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select QCS_GCC_615
+ help
+ Support for the video clock controller on QCS615 devices.
+ Say Y if you want to support video devices and functionality such as
+ video encode and decode.
+
config SC_CAMCC_7180
tristate "SC7180 Camera Clock Controller"
depends on ARM64 || COMPILE_TEST
@@ -549,6 +634,16 @@ config SC_CAMCC_7280
Say Y if you want to support camera devices and functionality such as
capturing pictures.
+config SC_CAMCC_8180X
+ tristate "SC8180X Camera Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select SC_GCC_8180X
+ help
+ Support for the camera clock controller on Qualcomm Technologies, Inc
+ SC8180X devices.
+ Say Y if you want to support camera devices and functionality such as
+ capturing pictures.
+
config SC_CAMCC_8280XP
tristate "SC8280XP Camera Clock Controller"
depends on ARM64 || COMPILE_TEST
@@ -924,6 +1019,14 @@ config SM_CAMCC_7150
Support for the camera clock controller on SM7150 devices.
Say Y if you want to support camera devices and camera functionality.
+config SM_CAMCC_MILOS
+ tristate "Milos Camera Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select SM_GCC_MILOS
+ help
+ Support for the camera clock controller on Milos devices.
+ Say Y if you want to support camera devices and camera functionality.
+
config SM_CAMCC_8150
tristate "SM8150 Camera Clock Controller"
depends on ARM64 || COMPILE_TEST
@@ -1036,6 +1139,16 @@ config SM_DISPCC_6375
Say Y if you want to support display devices and functionality such as
splash screen.
+config SM_DISPCC_MILOS
+ tristate "Milos Display Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ depends on SM_GCC_MILOS
+ help
+ Support for the display clock controller on Qualcomm Technologies, Inc
+ Milos devices.
+ Say Y if you want to support display devices and functionality such as
+ splash screen.
+
config SM_DISPCC_8450
tristate "SM8450 Display Clock Controller"
depends on ARM64 || COMPILE_TEST
@@ -1121,6 +1234,15 @@ config SM_GCC_7150
Say Y if you want to use peripheral devices such as UART,
SPI, I2C, USB, SD/UFS, PCIe etc.
+config SM_GCC_MILOS
+ tristate "Milos Global Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select QCOM_GDSC
+ help
+ Support for the global clock controller on Milos devices.
+ Say Y if you want to use peripheral devices such as UART,
+ SPI, I2C, USB, SD/UFS, PCIe etc.
+
config SM_GCC_8150
tristate "SM8150 Global Clock Controller"
depends on ARM64 || COMPILE_TEST
@@ -1230,6 +1352,15 @@ config SM_GPUCC_6350
Say Y if you want to support graphics controller devices and
functionality such as 3D graphics.
+config SM_GPUCC_MILOS
+ tristate "Milos Graphics Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select SM_GCC_MILOS
+ help
+ Support for the graphics clock controller on Milos devices.
+ Say Y if you want to support graphics controller devices and
+ functionality such as 3D graphics.
+
config SM_GPUCC_8150
tristate "SM8150 Graphics Clock Controller"
depends on ARM64 || COMPILE_TEST
@@ -1329,6 +1460,16 @@ config SA_VIDEOCC_8775P
Say Y if you want to support video devices and functionality such as
video encode/decode.
+config SM_VIDEOCC_6350
+ tristate "SM6350 Video Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select SM_GCC_6350
+ select QCOM_GDSC
+ help
+ Support for the video clock controller on SM6350 devices.
+ Say Y if you want to support video devices and functionality such as
+ video encode and decode.
+
config SM_VIDEOCC_7150
tristate "SM7150 Video Clock Controller"
depends on ARM64 || COMPILE_TEST
@@ -1339,6 +1480,17 @@ config SM_VIDEOCC_7150
Say Y if you want to support video devices and functionality such as
video encode and decode.
+config SM_VIDEOCC_MILOS
+ tristate "Milos Video Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select SM_GCC_MILOS
+ select QCOM_GDSC
+ help
+ Support for the video clock controller on Qualcomm Technologies, Inc.
+ Milos devices.
+ Say Y if you want to support video devices and functionality such as
+ video encode/decode.
+
config SM_VIDEOCC_8150
tristate "SM8150 Video Clock Controller"
depends on ARM64 || COMPILE_TEST
@@ -1372,11 +1524,21 @@ config SM_VIDEOCC_8350
config SM_VIDEOCC_8550
tristate "SM8550 Video Clock Controller"
depends on ARM64 || COMPILE_TEST
- depends on SM_GCC_8550 || SM_GCC_8650
select QCOM_GDSC
help
Support for the video clock controller on Qualcomm Technologies, Inc.
- SM8550 or SM8650 devices.
+ SM8550 or SM8650 or X1E80100 devices.
+ Say Y if you want to support video devices and functionality such as
+ video encode/decode.
+
+config SM_VIDEOCC_8750
+ tristate "SM8750 Video Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select SM_GCC_8750
+ select QCOM_GDSC
+ help
+ Support for the video clock controller on Qualcomm Technologies, Inc.
+ SM8750 devices.
Say Y if you want to support video devices and functionality such as
video encode/decode.
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 96862e99e5d4..0ac8a9055a43 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -21,6 +21,9 @@ clk-qcom-$(CONFIG_QCOM_GDSC) += gdsc.o
obj-$(CONFIG_APQ_GCC_8084) += gcc-apq8084.o
obj-$(CONFIG_APQ_MMCC_8084) += mmcc-apq8084.o
obj-$(CONFIG_CLK_GFM_LPASS_SM8250) += lpass-gfm-sm8250.o
+obj-$(CONFIG_CLK_GLYMUR_DISPCC) += dispcc-glymur.o
+obj-$(CONFIG_CLK_GLYMUR_GCC) += gcc-glymur.o
+obj-$(CONFIG_CLK_GLYMUR_TCSRCC) += tcsrcc-glymur.o
obj-$(CONFIG_CLK_X1E80100_CAMCC) += camcc-x1e80100.o
obj-$(CONFIG_CLK_X1E80100_DISPCC) += dispcc-x1e80100.o
obj-$(CONFIG_CLK_X1E80100_GCC) += gcc-x1e80100.o
@@ -29,6 +32,7 @@ obj-$(CONFIG_CLK_X1E80100_TCSRCC) += tcsrcc-x1e80100.o
obj-$(CONFIG_CLK_X1P42100_GPUCC) += gpucc-x1p42100.o
obj-$(CONFIG_CLK_QCM2290_GPUCC) += gpucc-qcm2290.o
obj-$(CONFIG_IPQ_APSS_PLL) += apss-ipq-pll.o
+obj-$(CONFIG_IPQ_APSS_5424) += apss-ipq5424.o
obj-$(CONFIG_IPQ_APSS_6018) += apss-ipq6018.o
obj-$(CONFIG_IPQ_CMN_PLL) += ipq-cmn-pll.o
obj-$(CONFIG_IPQ_GCC_4019) += gcc-ipq4019.o
@@ -39,6 +43,7 @@ obj-$(CONFIG_IPQ_GCC_6018) += gcc-ipq6018.o
obj-$(CONFIG_IPQ_GCC_806X) += gcc-ipq806x.o
obj-$(CONFIG_IPQ_GCC_8074) += gcc-ipq8074.o
obj-$(CONFIG_IPQ_GCC_9574) += gcc-ipq9574.o
+obj-$(CONFIG_IPQ_NSSCC_5424) += nsscc-ipq5424.o
obj-$(CONFIG_IPQ_NSSCC_9574) += nsscc-ipq9574.o
obj-$(CONFIG_IPQ_LCC_806X) += lcc-ipq806x.o
obj-$(CONFIG_IPQ_NSSCC_QCA8K) += nsscc-qca8k.o
@@ -73,15 +78,20 @@ obj-$(CONFIG_QCOM_CLK_RPMH) += clk-rpmh.o
obj-$(CONFIG_QCOM_CLK_SMD_RPM) += clk-smd-rpm.o
obj-$(CONFIG_QCM_GCC_2290) += gcc-qcm2290.o
obj-$(CONFIG_QCM_DISPCC_2290) += dispcc-qcm2290.o
+obj-$(CONFIG_QCS_DISPCC_615) += dispcc-qcs615.o
+obj-$(CONFIG_QCS_CAMCC_615) += camcc-qcs615.o
obj-$(CONFIG_QCS_GCC_404) += gcc-qcs404.o
obj-$(CONFIG_QCS_GCC_615) += gcc-qcs615.o
obj-$(CONFIG_QCS_GCC_8300) += gcc-qcs8300.o
+obj-$(CONFIG_QCS_GPUCC_615) += gpucc-qcs615.o
+obj-$(CONFIG_QCS_VIDEOCC_615) += videocc-qcs615.o
obj-$(CONFIG_QCS_Q6SSTOP_404) += q6sstop-qcs404.o
obj-$(CONFIG_QCS_TURING_404) += turingcc-qcs404.o
obj-$(CONFIG_QDU_ECPRICC_1000) += ecpricc-qdu1000.o
obj-$(CONFIG_QDU_GCC_1000) += gcc-qdu1000.o
obj-$(CONFIG_SC_CAMCC_7180) += camcc-sc7180.o
obj-$(CONFIG_SC_CAMCC_7280) += camcc-sc7280.o
+obj-$(CONFIG_SC_CAMCC_8180X) += camcc-sc8180x.o
obj-$(CONFIG_SC_CAMCC_8280XP) += camcc-sc8280xp.o
obj-$(CONFIG_SC_DISPCC_7180) += dispcc-sc7180.o
obj-$(CONFIG_SC_DISPCC_7280) += dispcc-sc7280.o
@@ -126,6 +136,7 @@ obj-$(CONFIG_SM_CAMCC_8250) += camcc-sm8250.o
obj-$(CONFIG_SM_CAMCC_8450) += camcc-sm8450.o
obj-$(CONFIG_SM_CAMCC_8550) += camcc-sm8550.o
obj-$(CONFIG_SM_CAMCC_8650) += camcc-sm8650.o
+obj-$(CONFIG_SM_CAMCC_MILOS) += camcc-milos.o
obj-$(CONFIG_SM_DISPCC_4450) += dispcc-sm4450.o
obj-$(CONFIG_SM_DISPCC_6115) += dispcc-sm6115.o
obj-$(CONFIG_SM_DISPCC_6125) += dispcc-sm6125.o
@@ -136,6 +147,7 @@ obj-$(CONFIG_SM_DISPCC_8250) += dispcc-sm8250.o
obj-$(CONFIG_SM_DISPCC_8450) += dispcc-sm8450.o
obj-$(CONFIG_SM_DISPCC_8550) += dispcc-sm8550.o
obj-$(CONFIG_SM_DISPCC_8750) += dispcc-sm8750.o
+obj-$(CONFIG_SM_DISPCC_MILOS) += dispcc-milos.o
obj-$(CONFIG_SM_GCC_4450) += gcc-sm4450.o
obj-$(CONFIG_SM_GCC_6115) += gcc-sm6115.o
obj-$(CONFIG_SM_GCC_6125) += gcc-sm6125.o
@@ -149,6 +161,7 @@ obj-$(CONFIG_SM_GCC_8450) += gcc-sm8450.o
obj-$(CONFIG_SM_GCC_8550) += gcc-sm8550.o
obj-$(CONFIG_SM_GCC_8650) += gcc-sm8650.o
obj-$(CONFIG_SM_GCC_8750) += gcc-sm8750.o
+obj-$(CONFIG_SM_GCC_MILOS) += gcc-milos.o
obj-$(CONFIG_SM_GPUCC_4450) += gpucc-sm4450.o
obj-$(CONFIG_SM_GPUCC_6115) += gpucc-sm6115.o
obj-$(CONFIG_SM_GPUCC_6125) += gpucc-sm6125.o
@@ -160,16 +173,20 @@ obj-$(CONFIG_SM_GPUCC_8350) += gpucc-sm8350.o
obj-$(CONFIG_SM_GPUCC_8450) += gpucc-sm8450.o
obj-$(CONFIG_SM_GPUCC_8550) += gpucc-sm8550.o
obj-$(CONFIG_SM_GPUCC_8650) += gpucc-sm8650.o
+obj-$(CONFIG_SM_GPUCC_MILOS) += gpucc-milos.o
obj-$(CONFIG_SM_LPASSCC_6115) += lpasscc-sm6115.o
obj-$(CONFIG_SM_TCSRCC_8550) += tcsrcc-sm8550.o
obj-$(CONFIG_SM_TCSRCC_8650) += tcsrcc-sm8650.o
obj-$(CONFIG_SM_TCSRCC_8750) += tcsrcc-sm8750.o
+obj-$(CONFIG_SM_VIDEOCC_6350) += videocc-sm6350.o
obj-$(CONFIG_SM_VIDEOCC_7150) += videocc-sm7150.o
obj-$(CONFIG_SM_VIDEOCC_8150) += videocc-sm8150.o
obj-$(CONFIG_SM_VIDEOCC_8250) += videocc-sm8250.o
obj-$(CONFIG_SM_VIDEOCC_8350) += videocc-sm8350.o
obj-$(CONFIG_SM_VIDEOCC_8450) += videocc-sm8450.o
obj-$(CONFIG_SM_VIDEOCC_8550) += videocc-sm8550.o
+obj-$(CONFIG_SM_VIDEOCC_8750) += videocc-sm8750.o
+obj-$(CONFIG_SM_VIDEOCC_MILOS) += videocc-milos.o
obj-$(CONFIG_SPMI_PMIC_CLKDIV) += clk-spmi-pmic-div.o
obj-$(CONFIG_KPSS_XCC) += kpss-xcc.o
obj-$(CONFIG_QCOM_HFPLL) += hfpll.o
diff --git a/drivers/clk/qcom/a53-pll.c b/drivers/clk/qcom/a53-pll.c
index f43d455ab4b8..724a642311e5 100644
--- a/drivers/clk/qcom/a53-pll.c
+++ b/drivers/clk/qcom/a53-pll.c
@@ -33,7 +33,6 @@ static const struct regmap_config a53pll_regmap_config = {
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x40,
- .fast_io = true,
};
static struct pll_freq_tbl *qcom_a53pll_get_freq_tbl(struct device *dev)
diff --git a/drivers/clk/qcom/a7-pll.c b/drivers/clk/qcom/a7-pll.c
index c4a53e5db229..04b5492a3c21 100644
--- a/drivers/clk/qcom/a7-pll.c
+++ b/drivers/clk/qcom/a7-pll.c
@@ -27,7 +27,7 @@ static struct clk_alpha_pll a7pll = {
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "a7pll",
- .parent_data = &(const struct clk_parent_data){
+ .parent_data = &(const struct clk_parent_data){
.fw_name = "bi_tcxo",
},
.num_parents = 1,
@@ -50,7 +50,6 @@ static const struct regmap_config a7pll_regmap_config = {
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x1000,
- .fast_io = true,
};
static int qcom_a7pll_probe(struct platform_device *pdev)
diff --git a/drivers/clk/qcom/apcs-sdx55.c b/drivers/clk/qcom/apcs-sdx55.c
index 3ba01622d8f0..90dd1f1855c2 100644
--- a/drivers/clk/qcom/apcs-sdx55.c
+++ b/drivers/clk/qcom/apcs-sdx55.c
@@ -111,7 +111,7 @@ static int qcom_apcs_sdx55_clk_probe(struct platform_device *pdev)
* driver, there seems to be no better place to do this. So do it here!
*/
cpu_dev = get_cpu_device(0);
- ret = dev_pm_domain_attach(cpu_dev, true);
+ ret = dev_pm_domain_attach(cpu_dev, PD_FLAG_ATTACH_POWER_ON);
if (ret) {
dev_err_probe(dev, ret, "can't get PM domain: %d\n", ret);
goto err;
diff --git a/drivers/clk/qcom/apss-ipq-pll.c b/drivers/clk/qcom/apss-ipq-pll.c
index d6c1aea7e9e1..3a8987fe7008 100644
--- a/drivers/clk/qcom/apss-ipq-pll.c
+++ b/drivers/clk/qcom/apss-ipq-pll.c
@@ -169,7 +169,6 @@ static const struct regmap_config ipq_pll_regmap_config = {
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x40,
- .fast_io = true,
};
static int apss_ipq_pll_probe(struct platform_device *pdev)
diff --git a/drivers/clk/qcom/apss-ipq5424.c b/drivers/clk/qcom/apss-ipq5424.c
new file mode 100644
index 000000000000..2d622c1fe5d0
--- /dev/null
+++ b/drivers/clk/qcom/apss-ipq5424.c
@@ -0,0 +1,258 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2025, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/interconnect-provider.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/arm/qcom,ids.h>
+#include <dt-bindings/clock/qcom,apss-ipq.h>
+#include <dt-bindings/interconnect/qcom,ipq5424.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "common.h"
+
+enum {
+ DT_XO,
+ DT_CLK_REF,
+};
+
+enum {
+ P_XO,
+ P_GPLL0,
+ P_APSS_PLL_EARLY,
+ P_L3_PLL,
+};
+
+static const struct alpha_pll_config apss_pll_config = {
+ .l = 0x3b,
+ .config_ctl_val = 0x08200920,
+ .config_ctl_hi_val = 0x05008001,
+ .config_ctl_hi1_val = 0x04000000,
+ .user_ctl_val = 0xf,
+};
+
+static struct clk_alpha_pll ipq5424_apss_pll = {
+ .offset = 0x0,
+ .config = &apss_pll_config,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_HUAYRA_2290],
+ .flags = SUPPORTS_DYNAMIC_UPDATE,
+ .clkr = {
+ .enable_reg = 0x0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "apss_pll",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_XO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_huayra_ops,
+ },
+ },
+};
+
+static const struct clk_parent_data parents_apss_silver_clk_src[] = {
+ { .index = DT_XO },
+ { .index = DT_CLK_REF },
+ { .hw = &ipq5424_apss_pll.clkr.hw },
+};
+
+static const struct parent_map parents_apss_silver_clk_src_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 4 },
+ { P_APSS_PLL_EARLY, 5 },
+};
+
+static const struct freq_tbl ftbl_apss_clk_src[] = {
+ F(816000000, P_APSS_PLL_EARLY, 1, 0, 0),
+ F(1416000000, P_APSS_PLL_EARLY, 1, 0, 0),
+ F(1800000000, P_APSS_PLL_EARLY, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 apss_silver_clk_src = {
+ .cmd_rcgr = 0x0080,
+ .freq_tbl = ftbl_apss_clk_src,
+ .hid_width = 5,
+ .parent_map = parents_apss_silver_clk_src_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "apss_silver_clk_src",
+ .parent_data = parents_apss_silver_clk_src,
+ .num_parents = ARRAY_SIZE(parents_apss_silver_clk_src),
+ .ops = &clk_rcg2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_branch apss_silver_core_clk = {
+ .halt_reg = 0x008c,
+ .clkr = {
+ .enable_reg = 0x008c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "apss_silver_core_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &apss_silver_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static const struct alpha_pll_config l3_pll_config = {
+ .l = 0x29,
+ .config_ctl_val = 0x08200920,
+ .config_ctl_hi_val = 0x05008001,
+ .config_ctl_hi1_val = 0x04000000,
+ .user_ctl_val = 0xf,
+};
+
+static struct clk_alpha_pll ipq5424_l3_pll = {
+ .offset = 0x10000,
+ .config = &l3_pll_config,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_HUAYRA_2290],
+ .flags = SUPPORTS_DYNAMIC_UPDATE,
+ .clkr = {
+ .enable_reg = 0x0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "l3_pll",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_XO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_huayra_ops,
+ },
+ },
+};
+
+static const struct clk_parent_data parents_l3_clk_src[] = {
+ { .index = DT_XO },
+ { .index = DT_CLK_REF },
+ { .hw = &ipq5424_l3_pll.clkr.hw },
+};
+
+static const struct parent_map parents_l3_clk_src_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 4 },
+ { P_L3_PLL, 5 },
+};
+
+static const struct freq_tbl ftbl_l3_clk_src[] = {
+ F(816000000, P_L3_PLL, 1, 0, 0),
+ F(984000000, P_L3_PLL, 1, 0, 0),
+ F(1272000000, P_L3_PLL, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 l3_clk_src = {
+ .cmd_rcgr = 0x10080,
+ .freq_tbl = ftbl_l3_clk_src,
+ .hid_width = 5,
+ .parent_map = parents_l3_clk_src_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "l3_clk_src",
+ .parent_data = parents_l3_clk_src,
+ .num_parents = ARRAY_SIZE(parents_l3_clk_src),
+ .ops = &clk_rcg2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_branch l3_core_clk = {
+ .halt_reg = 0x1008c,
+ .clkr = {
+ .enable_reg = 0x1008c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "l3_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &l3_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static const struct regmap_config apss_ipq5424_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x20000,
+ .fast_io = true,
+};
+
+static struct clk_regmap *apss_ipq5424_clks[] = {
+ [APSS_PLL_EARLY] = &ipq5424_apss_pll.clkr,
+ [APSS_SILVER_CLK_SRC] = &apss_silver_clk_src.clkr,
+ [APSS_SILVER_CORE_CLK] = &apss_silver_core_clk.clkr,
+ [L3_PLL] = &ipq5424_l3_pll.clkr,
+ [L3_CLK_SRC] = &l3_clk_src.clkr,
+ [L3_CORE_CLK] = &l3_core_clk.clkr,
+};
+
+static struct clk_alpha_pll *ipa5424_apss_plls[] = {
+ &ipq5424_l3_pll,
+ &ipq5424_apss_pll,
+};
+
+static struct qcom_cc_driver_data ipa5424_apss_driver_data = {
+ .alpha_plls = ipa5424_apss_plls,
+ .num_alpha_plls = ARRAY_SIZE(ipa5424_apss_plls),
+};
+
+#define IPQ_APPS_PLL_ID (5424 * 3) /* some unique value */
+
+static const struct qcom_icc_hws_data icc_ipq5424_cpu_l3[] = {
+ { MASTER_CPU, SLAVE_L3, L3_CORE_CLK },
+};
+
+static const struct qcom_cc_desc apss_ipq5424_desc = {
+ .config = &apss_ipq5424_regmap_config,
+ .clks = apss_ipq5424_clks,
+ .num_clks = ARRAY_SIZE(apss_ipq5424_clks),
+ .icc_hws = icc_ipq5424_cpu_l3,
+ .num_icc_hws = ARRAY_SIZE(icc_ipq5424_cpu_l3),
+ .icc_first_node_id = IPQ_APPS_PLL_ID,
+ .driver_data = &ipa5424_apss_driver_data,
+};
+
+static int apss_ipq5424_probe(struct platform_device *pdev)
+{
+ return qcom_cc_probe(pdev, &apss_ipq5424_desc);
+}
+
+static const struct of_device_id apss_ipq5424_match_table[] = {
+ { .compatible = "qcom,ipq5424-apss-clk" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, apss_ipq5424_match_table);
+
+static struct platform_driver apss_ipq5424_driver = {
+ .probe = apss_ipq5424_probe,
+ .driver = {
+ .name = "apss-ipq5424-clk",
+ .of_match_table = apss_ipq5424_match_table,
+ .sync_state = icc_sync_state,
+ },
+};
+
+module_platform_driver(apss_ipq5424_driver);
+
+MODULE_DESCRIPTION("QCOM APSS IPQ5424 CLK Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/camcc-milos.c b/drivers/clk/qcom/camcc-milos.c
new file mode 100644
index 000000000000..0077c9c9249f
--- /dev/null
+++ b/drivers/clk/qcom/camcc-milos.c
@@ -0,0 +1,2161 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2025, Luca Weiss <luca.weiss@fairphone.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,milos-camcc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+/* Need to match the order of clocks in DT binding */
+enum {
+ DT_BI_TCXO,
+ DT_SLEEP_CLK,
+ DT_IFACE,
+};
+
+enum {
+ P_BI_TCXO,
+ P_CAM_CC_PLL0_OUT_EVEN,
+ P_CAM_CC_PLL0_OUT_MAIN,
+ P_CAM_CC_PLL0_OUT_ODD,
+ P_CAM_CC_PLL1_OUT_EVEN,
+ P_CAM_CC_PLL1_OUT_MAIN,
+ P_CAM_CC_PLL2_OUT_MAIN,
+ P_CAM_CC_PLL3_OUT_EVEN,
+ P_CAM_CC_PLL4_OUT_EVEN,
+ P_CAM_CC_PLL4_OUT_MAIN,
+ P_CAM_CC_PLL5_OUT_EVEN,
+ P_CAM_CC_PLL5_OUT_MAIN,
+ P_CAM_CC_PLL6_OUT_EVEN,
+ P_CAM_CC_PLL6_OUT_MAIN,
+ P_SLEEP_CLK,
+};
+
+static const struct pll_vco lucid_ole_vco[] = {
+ { 249600000, 2300000000, 0 },
+};
+
+static const struct pll_vco rivian_ole_vco[] = {
+ { 777000000, 1285000000, 0 },
+};
+
+/* 1200.0 MHz Configuration */
+static const struct alpha_pll_config cam_cc_pll0_config = {
+ .l = 0x3e,
+ .alpha = 0x8000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00008400,
+ .user_ctl_hi_val = 0x00000005,
+};
+
+static struct clk_alpha_pll cam_cc_pll0 = {
+ .offset = 0x0,
+ .config = &cam_cc_pll0_config,
+ .vco_table = lucid_ole_vco,
+ .num_vco = ARRAY_SIZE(lucid_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll0_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll0_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll0_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll0_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll0_out_odd[] = {
+ { 0x2, 3 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll0_out_odd = {
+ .offset = 0x0,
+ .post_div_shift = 14,
+ .post_div_table = post_div_table_cam_cc_pll0_out_odd,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll0_out_odd),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll0_out_odd",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+ },
+};
+
+/* 600.0 MHz Configuration */
+static const struct alpha_pll_config cam_cc_pll1_config = {
+ .l = 0x1f,
+ .alpha = 0x4000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00000005,
+};
+
+static struct clk_alpha_pll cam_cc_pll1 = {
+ .offset = 0x1000,
+ .config = &cam_cc_pll1_config,
+ .vco_table = lucid_ole_vco,
+ .num_vco = ARRAY_SIZE(lucid_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll1",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll1_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll1_out_even = {
+ .offset = 0x1000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll1_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll1_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll1_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll1.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+ },
+};
+
+/* 960.0 MHz Configuration */
+static const struct alpha_pll_config cam_cc_pll2_config = {
+ .l = 0x32,
+ .alpha = 0x0,
+ .config_ctl_val = 0x10000030,
+ .config_ctl_hi_val = 0x80890263,
+ .config_ctl_hi1_val = 0x00000217,
+ .user_ctl_val = 0x00000001,
+ .user_ctl_hi_val = 0x00100000,
+};
+
+static struct clk_alpha_pll cam_cc_pll2 = {
+ .offset = 0x2000,
+ .config = &cam_cc_pll2_config,
+ .vco_table = rivian_ole_vco,
+ .num_vco = ARRAY_SIZE(rivian_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_RIVIAN_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll2",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_rivian_evo_ops,
+ },
+ },
+};
+
+/* 600.0 MHz Configuration */
+static const struct alpha_pll_config cam_cc_pll3_config = {
+ .l = 0x1f,
+ .alpha = 0x4000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00000005,
+};
+
+static struct clk_alpha_pll cam_cc_pll3 = {
+ .offset = 0x3000,
+ .config = &cam_cc_pll3_config,
+ .vco_table = lucid_ole_vco,
+ .num_vco = ARRAY_SIZE(lucid_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll3",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll3_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll3_out_even = {
+ .offset = 0x3000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll3_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll3_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll3_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll3.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+ },
+};
+
+/* 700.0 MHz Configuration */
+static const struct alpha_pll_config cam_cc_pll4_config = {
+ .l = 0x24,
+ .alpha = 0x7555,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00000005,
+};
+
+static struct clk_alpha_pll cam_cc_pll4 = {
+ .offset = 0x4000,
+ .config = &cam_cc_pll4_config,
+ .vco_table = lucid_ole_vco,
+ .num_vco = ARRAY_SIZE(lucid_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll4",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll4_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll4_out_even = {
+ .offset = 0x4000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll4_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll4_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll4_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll4.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+ },
+};
+
+/* 700.0 MHz Configuration */
+static const struct alpha_pll_config cam_cc_pll5_config = {
+ .l = 0x24,
+ .alpha = 0x7555,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00000005,
+};
+
+static struct clk_alpha_pll cam_cc_pll5 = {
+ .offset = 0x5000,
+ .config = &cam_cc_pll5_config,
+ .vco_table = lucid_ole_vco,
+ .num_vco = ARRAY_SIZE(lucid_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll5",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll5_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll5_out_even = {
+ .offset = 0x5000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll5_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll5_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll5_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll5.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+ },
+};
+
+/* 700.0 MHz Configuration */
+static const struct alpha_pll_config cam_cc_pll6_config = {
+ .l = 0x24,
+ .alpha = 0x7555,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00000005,
+};
+
+static struct clk_alpha_pll cam_cc_pll6 = {
+ .offset = 0x6000,
+ .config = &cam_cc_pll6_config,
+ .vco_table = lucid_ole_vco,
+ .num_vco = ARRAY_SIZE(lucid_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll6",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll6_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll6_out_even = {
+ .offset = 0x6000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll6_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll6_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll6_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll6.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+ },
+};
+
+static const struct parent_map cam_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL0_OUT_MAIN, 1 },
+ { P_CAM_CC_PLL0_OUT_ODD, 5 },
+ { P_CAM_CC_PLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll0.clkr.hw },
+ { .hw = &cam_cc_pll0_out_odd.clkr.hw },
+ { .hw = &cam_cc_pll0_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL2_OUT_MAIN, 4 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll2.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL0_OUT_MAIN, 1 },
+ { P_CAM_CC_PLL1_OUT_MAIN, 2 },
+ { P_CAM_CC_PLL1_OUT_EVEN, 3 },
+ { P_CAM_CC_PLL0_OUT_ODD, 5 },
+ { P_CAM_CC_PLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll0.clkr.hw },
+ { .hw = &cam_cc_pll1.clkr.hw },
+ { .hw = &cam_cc_pll1_out_even.clkr.hw },
+ { .hw = &cam_cc_pll0_out_odd.clkr.hw },
+ { .hw = &cam_cc_pll0_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL0_OUT_ODD, 5 },
+ { P_CAM_CC_PLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll0_out_odd.clkr.hw },
+ { .hw = &cam_cc_pll0_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL0_OUT_MAIN, 1 },
+ { P_CAM_CC_PLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll0.clkr.hw },
+ { .hw = &cam_cc_pll0_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL0_OUT_MAIN, 1 },
+ { P_CAM_CC_PLL3_OUT_EVEN, 5 },
+ { P_CAM_CC_PLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_5[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll0.clkr.hw },
+ { .hw = &cam_cc_pll3_out_even.clkr.hw },
+ { .hw = &cam_cc_pll0_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_6[] = {
+ { P_SLEEP_CLK, 0 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_6_ao[] = {
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct parent_map cam_cc_parent_map_7[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL0_OUT_MAIN, 1 },
+ { P_CAM_CC_PLL4_OUT_EVEN, 2 },
+ { P_CAM_CC_PLL4_OUT_MAIN, 3 },
+ { P_CAM_CC_PLL0_OUT_ODD, 5 },
+ { P_CAM_CC_PLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_7[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll0.clkr.hw },
+ { .hw = &cam_cc_pll4_out_even.clkr.hw },
+ { .hw = &cam_cc_pll4.clkr.hw },
+ { .hw = &cam_cc_pll0_out_odd.clkr.hw },
+ { .hw = &cam_cc_pll0_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_8[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL0_OUT_MAIN, 1 },
+ { P_CAM_CC_PLL5_OUT_EVEN, 2 },
+ { P_CAM_CC_PLL5_OUT_MAIN, 3 },
+ { P_CAM_CC_PLL0_OUT_ODD, 5 },
+ { P_CAM_CC_PLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_8[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll0.clkr.hw },
+ { .hw = &cam_cc_pll5_out_even.clkr.hw },
+ { .hw = &cam_cc_pll5.clkr.hw },
+ { .hw = &cam_cc_pll0_out_odd.clkr.hw },
+ { .hw = &cam_cc_pll0_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_9[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL0_OUT_MAIN, 1 },
+ { P_CAM_CC_PLL6_OUT_EVEN, 2 },
+ { P_CAM_CC_PLL6_OUT_MAIN, 3 },
+ { P_CAM_CC_PLL0_OUT_ODD, 5 },
+ { P_CAM_CC_PLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_9[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll0.clkr.hw },
+ { .hw = &cam_cc_pll6_out_even.clkr.hw },
+ { .hw = &cam_cc_pll6.clkr.hw },
+ { .hw = &cam_cc_pll0_out_odd.clkr.hw },
+ { .hw = &cam_cc_pll0_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_10[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_10[] = {
+ { .index = DT_BI_TCXO },
+};
+
+static const struct freq_tbl ftbl_cam_cc_bps_clk_src[] = {
+ F(300000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ F(410000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ F(460000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ F(700000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_bps_clk_src = {
+ .cmd_rcgr = 0x1a004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_2,
+ .freq_tbl = ftbl_cam_cc_bps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_clk_src",
+ .parent_data = cam_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_camnoc_axi_clk_src[] = {
+ F(150000000, P_CAM_CC_PLL0_OUT_EVEN, 4, 0, 0),
+ F(240000000, P_CAM_CC_PLL0_OUT_EVEN, 2.5, 0, 0),
+ F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_camnoc_axi_clk_src = {
+ .cmd_rcgr = 0x2401c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_camnoc_axi_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_camnoc_axi_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_cci_0_clk_src[] = {
+ F(37500000, P_CAM_CC_PLL0_OUT_EVEN, 16, 0, 0),
+ F(50000000, P_CAM_CC_PLL0_OUT_EVEN, 12, 0, 0),
+ F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_cci_0_clk_src = {
+ .cmd_rcgr = 0x21004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_3,
+ .freq_tbl = ftbl_cam_cc_cci_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_0_clk_src",
+ .parent_data = cam_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_cci_1_clk_src = {
+ .cmd_rcgr = 0x22004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_3,
+ .freq_tbl = ftbl_cam_cc_cci_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_1_clk_src",
+ .parent_data = cam_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_cphy_rx_clk_src[] = {
+ F(200000000, P_CAM_CC_PLL0_OUT_EVEN, 3, 0, 0),
+ F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_EVEN, 1.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_cphy_rx_clk_src = {
+ .cmd_rcgr = 0x1c05c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cphy_rx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cphy_rx_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_cre_clk_src[] = {
+ F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_cre_clk_src = {
+ .cmd_rcgr = 0x27004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_2,
+ .freq_tbl = ftbl_cam_cc_cre_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cre_clk_src",
+ .parent_data = cam_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_csi0phytimer_clk_src[] = {
+ F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_csi0phytimer_clk_src = {
+ .cmd_rcgr = 0x19004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi0phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi1phytimer_clk_src = {
+ .cmd_rcgr = 0x19028,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi1phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi2phytimer_clk_src = {
+ .cmd_rcgr = 0x1904c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi2phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi3phytimer_clk_src = {
+ .cmd_rcgr = 0x19070,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi3phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_fast_ahb_clk_src[] = {
+ F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
+ F(150000000, P_CAM_CC_PLL0_OUT_EVEN, 4, 0, 0),
+ F(200000000, P_CAM_CC_PLL0_OUT_MAIN, 6, 0, 0),
+ F(240000000, P_CAM_CC_PLL0_OUT_MAIN, 5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_fast_ahb_clk_src = {
+ .cmd_rcgr = 0x1a030,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_fast_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_fast_ahb_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_icp_clk_src[] = {
+ F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(480000000, P_CAM_CC_PLL0_OUT_MAIN, 2.5, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_icp_clk_src = {
+ .cmd_rcgr = 0x20014,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_4,
+ .freq_tbl = ftbl_cam_cc_icp_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_icp_clk_src",
+ .parent_data = cam_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_mclk0_clk_src[] = {
+ F(19200000, P_CAM_CC_PLL2_OUT_MAIN, 1, 1, 50),
+ F(24000000, P_CAM_CC_PLL2_OUT_MAIN, 10, 1, 4),
+ F(64000000, P_CAM_CC_PLL2_OUT_MAIN, 15, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_mclk0_clk_src = {
+ .cmd_rcgr = 0x18004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk0_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk1_clk_src = {
+ .cmd_rcgr = 0x18024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk1_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk2_clk_src = {
+ .cmd_rcgr = 0x18044,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk2_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk3_clk_src = {
+ .cmd_rcgr = 0x18064,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk3_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk4_clk_src = {
+ .cmd_rcgr = 0x18084,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk4_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ope_0_clk_src[] = {
+ F(300000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ F(410000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ F(520000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ F(645000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ F(700000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ope_0_clk_src = {
+ .cmd_rcgr = 0x1b004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_5,
+ .freq_tbl = ftbl_cam_cc_ope_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ope_0_clk_src",
+ .parent_data = cam_cc_parent_data_5,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_sleep_clk_src[] = {
+ F(32000, P_SLEEP_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_sleep_clk_src = {
+ .cmd_rcgr = 0x25044,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_6,
+ .freq_tbl = ftbl_cam_cc_sleep_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sleep_clk_src",
+ .parent_data = cam_cc_parent_data_6_ao,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_6_ao),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_slow_ahb_clk_src[] = {
+ F(80000000, P_CAM_CC_PLL0_OUT_EVEN, 7.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_slow_ahb_clk_src = {
+ .cmd_rcgr = 0x1a04c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_slow_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_slow_ahb_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_tfe_0_clk_src[] = {
+ F(350000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ F(570000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ F(725000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_tfe_0_clk_src = {
+ .cmd_rcgr = 0x1c004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_7,
+ .freq_tbl = ftbl_cam_cc_tfe_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_0_clk_src",
+ .parent_data = cam_cc_parent_data_7,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_7),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_tfe_0_csid_clk_src = {
+ .cmd_rcgr = 0x1c030,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cphy_rx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_0_csid_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_tfe_1_clk_src[] = {
+ F(350000000, P_CAM_CC_PLL5_OUT_EVEN, 1, 0, 0),
+ F(570000000, P_CAM_CC_PLL5_OUT_EVEN, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL5_OUT_EVEN, 1, 0, 0),
+ F(725000000, P_CAM_CC_PLL5_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_tfe_1_clk_src = {
+ .cmd_rcgr = 0x1d004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_8,
+ .freq_tbl = ftbl_cam_cc_tfe_1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_1_clk_src",
+ .parent_data = cam_cc_parent_data_8,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_8),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_tfe_1_csid_clk_src = {
+ .cmd_rcgr = 0x1d030,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cphy_rx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_1_csid_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_tfe_2_clk_src[] = {
+ F(350000000, P_CAM_CC_PLL6_OUT_EVEN, 1, 0, 0),
+ F(570000000, P_CAM_CC_PLL6_OUT_EVEN, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL6_OUT_EVEN, 1, 0, 0),
+ F(725000000, P_CAM_CC_PLL6_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_tfe_2_clk_src = {
+ .cmd_rcgr = 0x1e004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_9,
+ .freq_tbl = ftbl_cam_cc_tfe_2_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_2_clk_src",
+ .parent_data = cam_cc_parent_data_9,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_9),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_tfe_2_csid_clk_src = {
+ .cmd_rcgr = 0x1e030,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cphy_rx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_2_csid_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_xo_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_xo_clk_src = {
+ .cmd_rcgr = 0x25020,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_10,
+ .freq_tbl = ftbl_cam_cc_xo_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_xo_clk_src",
+ .parent_data = cam_cc_parent_data_10,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_10),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch cam_cc_bps_ahb_clk = {
+ .halt_reg = 0x1a064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1a064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_bps_areg_clk = {
+ .halt_reg = 0x1a048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1a048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_areg_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_bps_clk = {
+ .halt_reg = 0x1a01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1a01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_bps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_camnoc_atb_clk = {
+ .halt_reg = 0x24040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x24040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_camnoc_atb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_camnoc_axi_hf_clk = {
+ .halt_reg = 0x24010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x24010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_camnoc_axi_hf_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_camnoc_axi_sf_clk = {
+ .halt_reg = 0x24004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x24004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_camnoc_axi_sf_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_camnoc_nrt_axi_clk = {
+ .halt_reg = 0x2404c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2404c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2404c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_camnoc_nrt_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_camnoc_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_camnoc_rt_axi_clk = {
+ .halt_reg = 0x24034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x24034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_camnoc_rt_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_camnoc_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cci_0_clk = {
+ .halt_reg = 0x2101c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2101c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cci_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cci_1_clk = {
+ .halt_reg = 0x2201c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2201c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cci_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_core_ahb_clk = {
+ .halt_reg = 0x2501c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x2501c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_core_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_ahb_clk = {
+ .halt_reg = 0x23004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x23004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cre_ahb_clk = {
+ .halt_reg = 0x27020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x27020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cre_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cre_clk = {
+ .halt_reg = 0x2701c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2701c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cre_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cre_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi0phytimer_clk = {
+ .halt_reg = 0x1901c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1901c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi0phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi0phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi1phytimer_clk = {
+ .halt_reg = 0x19040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x19040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi1phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi1phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi2phytimer_clk = {
+ .halt_reg = 0x19064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x19064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi2phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi2phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi3phytimer_clk = {
+ .halt_reg = 0x19088,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x19088,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi3phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi3phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy0_clk = {
+ .halt_reg = 0x19020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x19020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy1_clk = {
+ .halt_reg = 0x19044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x19044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy2_clk = {
+ .halt_reg = 0x19068,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x19068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy3_clk = {
+ .halt_reg = 0x1908c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1908c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_icp_atb_clk = {
+ .halt_reg = 0x20004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x20004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_icp_atb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_icp_clk = {
+ .halt_reg = 0x2002c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2002c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_icp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_icp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_icp_cti_clk = {
+ .halt_reg = 0x20008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x20008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_icp_cti_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_icp_ts_clk = {
+ .halt_reg = 0x2000c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2000c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_icp_ts_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk0_clk = {
+ .halt_reg = 0x1801c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1801c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk1_clk = {
+ .halt_reg = 0x1803c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1803c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk2_clk = {
+ .halt_reg = 0x1805c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1805c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk3_clk = {
+ .halt_reg = 0x1807c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1807c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk4_clk = {
+ .halt_reg = 0x1809c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1809c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk4_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ope_0_ahb_clk = {
+ .halt_reg = 0x1b034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1b034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ope_0_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ope_0_areg_clk = {
+ .halt_reg = 0x1b030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1b030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ope_0_areg_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ope_0_clk = {
+ .halt_reg = 0x1b01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1b01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ope_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ope_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_soc_ahb_clk = {
+ .halt_reg = 0x25018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x25018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_soc_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_sys_tmr_clk = {
+ .halt_reg = 0x20038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x20038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sys_tmr_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_tfe_0_ahb_clk = {
+ .halt_reg = 0x1c078,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1c078,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_0_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_tfe_0_clk = {
+ .halt_reg = 0x1c01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1c01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_tfe_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_tfe_0_cphy_rx_clk = {
+ .halt_reg = 0x1c074,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1c074,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_0_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_tfe_0_csid_clk = {
+ .halt_reg = 0x1c048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1c048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_0_csid_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_tfe_0_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_tfe_1_ahb_clk = {
+ .halt_reg = 0x1d058,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1d058,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_1_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_tfe_1_clk = {
+ .halt_reg = 0x1d01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1d01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_tfe_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_tfe_1_cphy_rx_clk = {
+ .halt_reg = 0x1d054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1d054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_1_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_tfe_1_csid_clk = {
+ .halt_reg = 0x1d048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1d048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_1_csid_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_tfe_1_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_tfe_2_ahb_clk = {
+ .halt_reg = 0x1e058,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1e058,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_2_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_tfe_2_clk = {
+ .halt_reg = 0x1e01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1e01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_tfe_2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_tfe_2_cphy_rx_clk = {
+ .halt_reg = 0x1e054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1e054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_2_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_tfe_2_csid_clk = {
+ .halt_reg = 0x1e048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1e048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_tfe_2_csid_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_tfe_2_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_top_shift_clk = {
+ .halt_reg = 0x25040,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x25040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_top_shift_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc cam_cc_camss_top_gdsc = {
+ .gdscr = 0x25004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "cam_cc_camss_top_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct clk_regmap *cam_cc_milos_clocks[] = {
+ [CAM_CC_BPS_AHB_CLK] = &cam_cc_bps_ahb_clk.clkr,
+ [CAM_CC_BPS_AREG_CLK] = &cam_cc_bps_areg_clk.clkr,
+ [CAM_CC_BPS_CLK] = &cam_cc_bps_clk.clkr,
+ [CAM_CC_BPS_CLK_SRC] = &cam_cc_bps_clk_src.clkr,
+ [CAM_CC_CAMNOC_ATB_CLK] = &cam_cc_camnoc_atb_clk.clkr,
+ [CAM_CC_CAMNOC_AXI_CLK_SRC] = &cam_cc_camnoc_axi_clk_src.clkr,
+ [CAM_CC_CAMNOC_AXI_HF_CLK] = &cam_cc_camnoc_axi_hf_clk.clkr,
+ [CAM_CC_CAMNOC_AXI_SF_CLK] = &cam_cc_camnoc_axi_sf_clk.clkr,
+ [CAM_CC_CAMNOC_NRT_AXI_CLK] = &cam_cc_camnoc_nrt_axi_clk.clkr,
+ [CAM_CC_CAMNOC_RT_AXI_CLK] = &cam_cc_camnoc_rt_axi_clk.clkr,
+ [CAM_CC_CCI_0_CLK] = &cam_cc_cci_0_clk.clkr,
+ [CAM_CC_CCI_0_CLK_SRC] = &cam_cc_cci_0_clk_src.clkr,
+ [CAM_CC_CCI_1_CLK] = &cam_cc_cci_1_clk.clkr,
+ [CAM_CC_CCI_1_CLK_SRC] = &cam_cc_cci_1_clk_src.clkr,
+ [CAM_CC_CORE_AHB_CLK] = &cam_cc_core_ahb_clk.clkr,
+ [CAM_CC_CPAS_AHB_CLK] = &cam_cc_cpas_ahb_clk.clkr,
+ [CAM_CC_CPHY_RX_CLK_SRC] = &cam_cc_cphy_rx_clk_src.clkr,
+ [CAM_CC_CRE_AHB_CLK] = &cam_cc_cre_ahb_clk.clkr,
+ [CAM_CC_CRE_CLK] = &cam_cc_cre_clk.clkr,
+ [CAM_CC_CRE_CLK_SRC] = &cam_cc_cre_clk_src.clkr,
+ [CAM_CC_CSI0PHYTIMER_CLK] = &cam_cc_csi0phytimer_clk.clkr,
+ [CAM_CC_CSI0PHYTIMER_CLK_SRC] = &cam_cc_csi0phytimer_clk_src.clkr,
+ [CAM_CC_CSI1PHYTIMER_CLK] = &cam_cc_csi1phytimer_clk.clkr,
+ [CAM_CC_CSI1PHYTIMER_CLK_SRC] = &cam_cc_csi1phytimer_clk_src.clkr,
+ [CAM_CC_CSI2PHYTIMER_CLK] = &cam_cc_csi2phytimer_clk.clkr,
+ [CAM_CC_CSI2PHYTIMER_CLK_SRC] = &cam_cc_csi2phytimer_clk_src.clkr,
+ [CAM_CC_CSI3PHYTIMER_CLK] = &cam_cc_csi3phytimer_clk.clkr,
+ [CAM_CC_CSI3PHYTIMER_CLK_SRC] = &cam_cc_csi3phytimer_clk_src.clkr,
+ [CAM_CC_CSIPHY0_CLK] = &cam_cc_csiphy0_clk.clkr,
+ [CAM_CC_CSIPHY1_CLK] = &cam_cc_csiphy1_clk.clkr,
+ [CAM_CC_CSIPHY2_CLK] = &cam_cc_csiphy2_clk.clkr,
+ [CAM_CC_CSIPHY3_CLK] = &cam_cc_csiphy3_clk.clkr,
+ [CAM_CC_FAST_AHB_CLK_SRC] = &cam_cc_fast_ahb_clk_src.clkr,
+ [CAM_CC_ICP_ATB_CLK] = &cam_cc_icp_atb_clk.clkr,
+ [CAM_CC_ICP_CLK] = &cam_cc_icp_clk.clkr,
+ [CAM_CC_ICP_CLK_SRC] = &cam_cc_icp_clk_src.clkr,
+ [CAM_CC_ICP_CTI_CLK] = &cam_cc_icp_cti_clk.clkr,
+ [CAM_CC_ICP_TS_CLK] = &cam_cc_icp_ts_clk.clkr,
+ [CAM_CC_MCLK0_CLK] = &cam_cc_mclk0_clk.clkr,
+ [CAM_CC_MCLK0_CLK_SRC] = &cam_cc_mclk0_clk_src.clkr,
+ [CAM_CC_MCLK1_CLK] = &cam_cc_mclk1_clk.clkr,
+ [CAM_CC_MCLK1_CLK_SRC] = &cam_cc_mclk1_clk_src.clkr,
+ [CAM_CC_MCLK2_CLK] = &cam_cc_mclk2_clk.clkr,
+ [CAM_CC_MCLK2_CLK_SRC] = &cam_cc_mclk2_clk_src.clkr,
+ [CAM_CC_MCLK3_CLK] = &cam_cc_mclk3_clk.clkr,
+ [CAM_CC_MCLK3_CLK_SRC] = &cam_cc_mclk3_clk_src.clkr,
+ [CAM_CC_MCLK4_CLK] = &cam_cc_mclk4_clk.clkr,
+ [CAM_CC_MCLK4_CLK_SRC] = &cam_cc_mclk4_clk_src.clkr,
+ [CAM_CC_OPE_0_AHB_CLK] = &cam_cc_ope_0_ahb_clk.clkr,
+ [CAM_CC_OPE_0_AREG_CLK] = &cam_cc_ope_0_areg_clk.clkr,
+ [CAM_CC_OPE_0_CLK] = &cam_cc_ope_0_clk.clkr,
+ [CAM_CC_OPE_0_CLK_SRC] = &cam_cc_ope_0_clk_src.clkr,
+ [CAM_CC_PLL0] = &cam_cc_pll0.clkr,
+ [CAM_CC_PLL0_OUT_EVEN] = &cam_cc_pll0_out_even.clkr,
+ [CAM_CC_PLL0_OUT_ODD] = &cam_cc_pll0_out_odd.clkr,
+ [CAM_CC_PLL1] = &cam_cc_pll1.clkr,
+ [CAM_CC_PLL1_OUT_EVEN] = &cam_cc_pll1_out_even.clkr,
+ [CAM_CC_PLL2] = &cam_cc_pll2.clkr,
+ [CAM_CC_PLL3] = &cam_cc_pll3.clkr,
+ [CAM_CC_PLL3_OUT_EVEN] = &cam_cc_pll3_out_even.clkr,
+ [CAM_CC_PLL4] = &cam_cc_pll4.clkr,
+ [CAM_CC_PLL4_OUT_EVEN] = &cam_cc_pll4_out_even.clkr,
+ [CAM_CC_PLL5] = &cam_cc_pll5.clkr,
+ [CAM_CC_PLL5_OUT_EVEN] = &cam_cc_pll5_out_even.clkr,
+ [CAM_CC_PLL6] = &cam_cc_pll6.clkr,
+ [CAM_CC_PLL6_OUT_EVEN] = &cam_cc_pll6_out_even.clkr,
+ [CAM_CC_SLEEP_CLK_SRC] = &cam_cc_sleep_clk_src.clkr,
+ [CAM_CC_SLOW_AHB_CLK_SRC] = &cam_cc_slow_ahb_clk_src.clkr,
+ [CAM_CC_SOC_AHB_CLK] = &cam_cc_soc_ahb_clk.clkr,
+ [CAM_CC_SYS_TMR_CLK] = &cam_cc_sys_tmr_clk.clkr,
+ [CAM_CC_TFE_0_AHB_CLK] = &cam_cc_tfe_0_ahb_clk.clkr,
+ [CAM_CC_TFE_0_CLK] = &cam_cc_tfe_0_clk.clkr,
+ [CAM_CC_TFE_0_CLK_SRC] = &cam_cc_tfe_0_clk_src.clkr,
+ [CAM_CC_TFE_0_CPHY_RX_CLK] = &cam_cc_tfe_0_cphy_rx_clk.clkr,
+ [CAM_CC_TFE_0_CSID_CLK] = &cam_cc_tfe_0_csid_clk.clkr,
+ [CAM_CC_TFE_0_CSID_CLK_SRC] = &cam_cc_tfe_0_csid_clk_src.clkr,
+ [CAM_CC_TFE_1_AHB_CLK] = &cam_cc_tfe_1_ahb_clk.clkr,
+ [CAM_CC_TFE_1_CLK] = &cam_cc_tfe_1_clk.clkr,
+ [CAM_CC_TFE_1_CLK_SRC] = &cam_cc_tfe_1_clk_src.clkr,
+ [CAM_CC_TFE_1_CPHY_RX_CLK] = &cam_cc_tfe_1_cphy_rx_clk.clkr,
+ [CAM_CC_TFE_1_CSID_CLK] = &cam_cc_tfe_1_csid_clk.clkr,
+ [CAM_CC_TFE_1_CSID_CLK_SRC] = &cam_cc_tfe_1_csid_clk_src.clkr,
+ [CAM_CC_TFE_2_AHB_CLK] = &cam_cc_tfe_2_ahb_clk.clkr,
+ [CAM_CC_TFE_2_CLK] = &cam_cc_tfe_2_clk.clkr,
+ [CAM_CC_TFE_2_CLK_SRC] = &cam_cc_tfe_2_clk_src.clkr,
+ [CAM_CC_TFE_2_CPHY_RX_CLK] = &cam_cc_tfe_2_cphy_rx_clk.clkr,
+ [CAM_CC_TFE_2_CSID_CLK] = &cam_cc_tfe_2_csid_clk.clkr,
+ [CAM_CC_TFE_2_CSID_CLK_SRC] = &cam_cc_tfe_2_csid_clk_src.clkr,
+ [CAM_CC_TOP_SHIFT_CLK] = &cam_cc_top_shift_clk.clkr,
+ [CAM_CC_XO_CLK_SRC] = &cam_cc_xo_clk_src.clkr,
+};
+
+static const struct qcom_reset_map cam_cc_milos_resets[] = {
+ [CAM_CC_BPS_BCR] = { 0x1a000 },
+ [CAM_CC_CAMNOC_BCR] = { 0x24000 },
+ [CAM_CC_CAMSS_TOP_BCR] = { 0x25000 },
+ [CAM_CC_CCI_0_BCR] = { 0x21000 },
+ [CAM_CC_CCI_1_BCR] = { 0x22000 },
+ [CAM_CC_CPAS_BCR] = { 0x23000 },
+ [CAM_CC_CRE_BCR] = { 0x27000 },
+ [CAM_CC_CSI0PHY_BCR] = { 0x19000 },
+ [CAM_CC_CSI1PHY_BCR] = { 0x19024 },
+ [CAM_CC_CSI2PHY_BCR] = { 0x19048 },
+ [CAM_CC_CSI3PHY_BCR] = { 0x1906c },
+ [CAM_CC_ICP_BCR] = { 0x20000 },
+ [CAM_CC_MCLK0_BCR] = { 0x18000 },
+ [CAM_CC_MCLK1_BCR] = { 0x18020 },
+ [CAM_CC_MCLK2_BCR] = { 0x18040 },
+ [CAM_CC_MCLK3_BCR] = { 0x18060 },
+ [CAM_CC_MCLK4_BCR] = { 0x18080 },
+ [CAM_CC_OPE_0_BCR] = { 0x1b000 },
+ [CAM_CC_TFE_0_BCR] = { 0x1c000 },
+ [CAM_CC_TFE_1_BCR] = { 0x1d000 },
+ [CAM_CC_TFE_2_BCR] = { 0x1e000 },
+};
+
+static struct gdsc *cam_cc_milos_gdscs[] = {
+ [CAM_CC_CAMSS_TOP_GDSC] = &cam_cc_camss_top_gdsc,
+};
+
+static struct clk_alpha_pll *cam_cc_milos_plls[] = {
+ &cam_cc_pll0,
+ &cam_cc_pll1,
+ &cam_cc_pll2,
+ &cam_cc_pll3,
+ &cam_cc_pll4,
+ &cam_cc_pll5,
+ &cam_cc_pll6,
+};
+
+static u32 cam_cc_milos_critical_cbcrs[] = {
+ 0x25038, /* CAM_CC_GDSC_CLK */
+ 0x2505c, /* CAM_CC_SLEEP_CLK */
+};
+
+static const struct regmap_config cam_cc_milos_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x30728,
+ .fast_io = true,
+};
+
+static struct qcom_cc_driver_data cam_cc_milos_driver_data = {
+ .alpha_plls = cam_cc_milos_plls,
+ .num_alpha_plls = ARRAY_SIZE(cam_cc_milos_plls),
+ .clk_cbcrs = cam_cc_milos_critical_cbcrs,
+ .num_clk_cbcrs = ARRAY_SIZE(cam_cc_milos_critical_cbcrs),
+};
+
+static const struct qcom_cc_desc cam_cc_milos_desc = {
+ .config = &cam_cc_milos_regmap_config,
+ .clks = cam_cc_milos_clocks,
+ .num_clks = ARRAY_SIZE(cam_cc_milos_clocks),
+ .resets = cam_cc_milos_resets,
+ .num_resets = ARRAY_SIZE(cam_cc_milos_resets),
+ .gdscs = cam_cc_milos_gdscs,
+ .num_gdscs = ARRAY_SIZE(cam_cc_milos_gdscs),
+ .use_rpm = true,
+ .driver_data = &cam_cc_milos_driver_data,
+};
+
+static const struct of_device_id cam_cc_milos_match_table[] = {
+ { .compatible = "qcom,milos-camcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, cam_cc_milos_match_table);
+
+static int cam_cc_milos_probe(struct platform_device *pdev)
+{
+ return qcom_cc_probe(pdev, &cam_cc_milos_desc);
+}
+
+static struct platform_driver cam_cc_milos_driver = {
+ .probe = cam_cc_milos_probe,
+ .driver = {
+ .name = "cam_cc-milos",
+ .of_match_table = cam_cc_milos_match_table,
+ },
+};
+
+module_platform_driver(cam_cc_milos_driver);
+
+MODULE_DESCRIPTION("QTI CAM_CC Milos Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/camcc-qcs615.c b/drivers/clk/qcom/camcc-qcs615.c
new file mode 100644
index 000000000000..c063a3bfacd0
--- /dev/null
+++ b/drivers/clk/qcom/camcc-qcs615.c
@@ -0,0 +1,1597 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,qcs615-camcc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_BI_TCXO,
+};
+
+enum {
+ P_BI_TCXO,
+ P_CAM_CC_PLL0_OUT_AUX,
+ P_CAM_CC_PLL1_OUT_AUX,
+ P_CAM_CC_PLL2_OUT_AUX2,
+ P_CAM_CC_PLL2_OUT_EARLY,
+ P_CAM_CC_PLL3_OUT_MAIN,
+};
+
+static const struct pll_vco brammo_vco[] = {
+ { 500000000, 1250000000, 0 },
+};
+
+static const struct pll_vco spark_vco[] = {
+ { 1000000000, 2100000000, 0 },
+ { 750000000, 1500000000, 1 },
+ { 500000000, 1000000000, 2 },
+ { 300000000, 500000000, 3 },
+ { 550000000, 1100000000, 4 },
+};
+
+/* 600MHz configuration VCO - 2 */
+static const struct alpha_pll_config cam_cc_pll0_config = {
+ .l = 0x1f,
+ .alpha_hi = 0x40,
+ .alpha_en_mask = BIT(24),
+ .vco_val = BIT(21),
+ .vco_mask = GENMASK(21, 20),
+ .aux_output_mask = BIT(1),
+ .config_ctl_val = 0x4001055b,
+ .test_ctl_hi_val = 0x1,
+ .test_ctl_hi_mask = 0x1,
+};
+
+static struct clk_alpha_pll cam_cc_pll0 = {
+ .offset = 0x0,
+ .config = &cam_cc_pll0_config,
+ .vco_table = spark_vco,
+ .num_vco = ARRAY_SIZE(spark_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+/* 808MHz configuration VCO - 2 */
+static struct alpha_pll_config cam_cc_pll1_config = {
+ .l = 0x2a,
+ .alpha_hi = 0x15,
+ .alpha = 0x55555555,
+ .alpha_en_mask = BIT(24),
+ .vco_val = BIT(21),
+ .vco_mask = GENMASK(21, 20),
+ .aux_output_mask = BIT(1),
+ .config_ctl_val = 0x4001055b,
+ .test_ctl_hi_val = 0x1,
+ .test_ctl_hi_mask = 0x1,
+};
+
+static struct clk_alpha_pll cam_cc_pll1 = {
+ .offset = 0x1000,
+ .config = &cam_cc_pll1_config,
+ .vco_table = spark_vco,
+ .num_vco = ARRAY_SIZE(spark_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll1",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+/* 960MHz configuration VCO - 0 */
+static struct alpha_pll_config cam_cc_pll2_config = {
+ .l = 0x32,
+ .vco_val = 0x0,
+ .vco_mask = GENMASK(21, 20),
+ .early_output_mask = BIT(3),
+ .aux2_output_mask = BIT(2),
+ .post_div_val = 0x1 << 8,
+ .post_div_mask = 0x3 << 8,
+ .config_ctl_val = 0x04289,
+ .test_ctl_val = 0x08000000,
+ .test_ctl_mask = 0x08000000,
+};
+
+static struct clk_alpha_pll cam_cc_pll2 = {
+ .offset = 0x2000,
+ .config = &cam_cc_pll2_config,
+ .vco_table = brammo_vco,
+ .num_vco = ARRAY_SIZE(brammo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_BRAMMO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll2",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll2_out_aux2[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll2_out_aux2 = {
+ .offset = 0x2000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_cam_cc_pll2_out_aux2,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll2_out_aux2),
+ .width = 2,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_BRAMMO],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll2_out_aux2",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll2.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+/* 1080MHz configuration - VCO - 0 */
+static struct alpha_pll_config cam_cc_pll3_config = {
+ .l = 0x38,
+ .alpha_hi = 0x40,
+ .alpha_en_mask = BIT(24),
+ .vco_val = 0x0,
+ .vco_mask = GENMASK(21, 20),
+ .main_output_mask = BIT(0),
+ .config_ctl_val = 0x4001055b,
+ .test_ctl_hi_val = 0x1,
+ .test_ctl_hi_mask = 0x1,
+};
+
+static struct clk_alpha_pll cam_cc_pll3 = {
+ .offset = 0x3000,
+ .config = &cam_cc_pll3_config,
+ .vco_table = spark_vco,
+ .num_vco = ARRAY_SIZE(spark_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll3",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static const struct parent_map cam_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL1_OUT_AUX, 2 },
+ { P_CAM_CC_PLL0_OUT_AUX, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll1.clkr.hw },
+ { .hw = &cam_cc_pll0.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL2_OUT_EARLY, 4 },
+ { P_CAM_CC_PLL3_OUT_MAIN, 5 },
+ { P_CAM_CC_PLL0_OUT_AUX, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll2.clkr.hw },
+ { .hw = &cam_cc_pll3.clkr.hw },
+ { .hw = &cam_cc_pll0.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL1_OUT_AUX, 2 },
+ { P_CAM_CC_PLL2_OUT_EARLY, 4 },
+ { P_CAM_CC_PLL3_OUT_MAIN, 5 },
+ { P_CAM_CC_PLL0_OUT_AUX, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll1.clkr.hw },
+ { .hw = &cam_cc_pll2.clkr.hw },
+ { .hw = &cam_cc_pll3.clkr.hw },
+ { .hw = &cam_cc_pll0.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL2_OUT_AUX2, 1 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll2_out_aux2.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL3_OUT_MAIN, 5 },
+ { P_CAM_CC_PLL0_OUT_AUX, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll3.clkr.hw },
+ { .hw = &cam_cc_pll0.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL0_OUT_AUX, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_5[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll0.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL1_OUT_AUX, 2 },
+ { P_CAM_CC_PLL3_OUT_MAIN, 5 },
+ { P_CAM_CC_PLL0_OUT_AUX, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_6[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll1.clkr.hw },
+ { .hw = &cam_cc_pll3.clkr.hw },
+ { .hw = &cam_cc_pll0.clkr.hw },
+};
+
+static const struct freq_tbl ftbl_cam_cc_bps_clk_src[] = {
+ F(200000000, P_CAM_CC_PLL0_OUT_AUX, 3, 0, 0),
+ F(360000000, P_CAM_CC_PLL3_OUT_MAIN, 3, 0, 0),
+ F(432000000, P_CAM_CC_PLL3_OUT_MAIN, 2.5, 0, 0),
+ F(480000000, P_CAM_CC_PLL2_OUT_EARLY, 2, 0, 0),
+ F(540000000, P_CAM_CC_PLL3_OUT_MAIN, 2, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_AUX, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_bps_clk_src = {
+ .cmd_rcgr = 0x6010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_bps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_cci_clk_src[] = {
+ F(37500000, P_CAM_CC_PLL0_OUT_AUX, 16, 0, 0),
+ F(50000000, P_CAM_CC_PLL0_OUT_AUX, 12, 0, 0),
+ F(100000000, P_CAM_CC_PLL0_OUT_AUX, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_cci_clk_src = {
+ .cmd_rcgr = 0xb0d8,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_5,
+ .freq_tbl = ftbl_cam_cc_cci_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_clk_src",
+ .parent_data = cam_cc_parent_data_5,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_5),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_cphy_rx_clk_src[] = {
+ F(100000000, P_CAM_CC_PLL0_OUT_AUX, 6, 0, 0),
+ F(200000000, P_CAM_CC_PLL0_OUT_AUX, 3, 0, 0),
+ F(269333333, P_CAM_CC_PLL1_OUT_AUX, 3, 0, 0),
+ F(320000000, P_CAM_CC_PLL2_OUT_EARLY, 3, 0, 0),
+ F(384000000, P_CAM_CC_PLL2_OUT_EARLY, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_cphy_rx_clk_src = {
+ .cmd_rcgr = 0x9064,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_2,
+ .freq_tbl = ftbl_cam_cc_cphy_rx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cphy_rx_clk_src",
+ .parent_data = cam_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_2),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_csi0phytimer_clk_src[] = {
+ F(100000000, P_CAM_CC_PLL0_OUT_AUX, 6, 0, 0),
+ F(200000000, P_CAM_CC_PLL0_OUT_AUX, 3, 0, 0),
+ F(269333333, P_CAM_CC_PLL1_OUT_AUX, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_csi0phytimer_clk_src = {
+ .cmd_rcgr = 0x5004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi0phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi1phytimer_clk_src = {
+ .cmd_rcgr = 0x5028,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi1phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi2phytimer_clk_src = {
+ .cmd_rcgr = 0x504c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi2phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_fast_ahb_clk_src[] = {
+ F(100000000, P_CAM_CC_PLL0_OUT_AUX, 6, 0, 0),
+ F(200000000, P_CAM_CC_PLL0_OUT_AUX, 3, 0, 0),
+ F(300000000, P_CAM_CC_PLL0_OUT_AUX, 2, 0, 0),
+ F(404000000, P_CAM_CC_PLL1_OUT_AUX, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_fast_ahb_clk_src = {
+ .cmd_rcgr = 0x603c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_fast_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_fast_ahb_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_icp_clk_src[] = {
+ F(240000000, P_CAM_CC_PLL0_OUT_AUX, 2.5, 0, 0),
+ F(360000000, P_CAM_CC_PLL3_OUT_MAIN, 3, 0, 0),
+ F(432000000, P_CAM_CC_PLL3_OUT_MAIN, 2.5, 0, 0),
+ F(480000000, P_CAM_CC_PLL2_OUT_EARLY, 2, 0, 0),
+ F(540000000, P_CAM_CC_PLL3_OUT_MAIN, 2, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_AUX, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_icp_clk_src = {
+ .cmd_rcgr = 0xb088,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_icp_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_icp_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_0_clk_src[] = {
+ F(240000000, P_CAM_CC_PLL0_OUT_AUX, 2.5, 0, 0),
+ F(360000000, P_CAM_CC_PLL3_OUT_MAIN, 3, 0, 0),
+ F(432000000, P_CAM_CC_PLL3_OUT_MAIN, 2.5, 0, 0),
+ F(540000000, P_CAM_CC_PLL3_OUT_MAIN, 2, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_AUX, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ife_0_clk_src = {
+ .cmd_rcgr = 0x9010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_4,
+ .freq_tbl = ftbl_cam_cc_ife_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_clk_src",
+ .parent_data = cam_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_4),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_0_csid_clk_src[] = {
+ F(100000000, P_CAM_CC_PLL0_OUT_AUX, 6, 0, 0),
+ F(200000000, P_CAM_CC_PLL0_OUT_AUX, 3, 0, 0),
+ F(320000000, P_CAM_CC_PLL2_OUT_EARLY, 3, 0, 0),
+ F(404000000, P_CAM_CC_PLL1_OUT_AUX, 2, 0, 0),
+ F(480000000, P_CAM_CC_PLL2_OUT_EARLY, 2, 0, 0),
+ F(540000000, P_CAM_CC_PLL3_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ife_0_csid_clk_src = {
+ .cmd_rcgr = 0x903c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_2,
+ .freq_tbl = ftbl_cam_cc_ife_0_csid_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_csid_clk_src",
+ .parent_data = cam_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_2),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_1_clk_src = {
+ .cmd_rcgr = 0xa010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_4,
+ .freq_tbl = ftbl_cam_cc_ife_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_clk_src",
+ .parent_data = cam_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_4),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_1_csid_clk_src = {
+ .cmd_rcgr = 0xa034,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_2,
+ .freq_tbl = ftbl_cam_cc_ife_0_csid_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_csid_clk_src",
+ .parent_data = cam_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_2),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_clk_src = {
+ .cmd_rcgr = 0xb004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_4,
+ .freq_tbl = ftbl_cam_cc_ife_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_clk_src",
+ .parent_data = cam_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_4),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_csid_clk_src = {
+ .cmd_rcgr = 0xb024,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_2,
+ .freq_tbl = ftbl_cam_cc_ife_0_csid_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_csid_clk_src",
+ .parent_data = cam_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_2),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ipe_0_clk_src = {
+ .cmd_rcgr = 0x7010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_icp_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_0_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_jpeg_clk_src[] = {
+ F(66666667, P_CAM_CC_PLL0_OUT_AUX, 9, 0, 0),
+ F(133333333, P_CAM_CC_PLL0_OUT_AUX, 4.5, 0, 0),
+ F(216000000, P_CAM_CC_PLL3_OUT_MAIN, 5, 0, 0),
+ F(320000000, P_CAM_CC_PLL2_OUT_EARLY, 3, 0, 0),
+ F(480000000, P_CAM_CC_PLL2_OUT_EARLY, 2, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_AUX, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_jpeg_clk_src = {
+ .cmd_rcgr = 0xb04c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_jpeg_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_jpeg_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_lrme_clk_src[] = {
+ F(200000000, P_CAM_CC_PLL0_OUT_AUX, 3, 0, 0),
+ F(216000000, P_CAM_CC_PLL3_OUT_MAIN, 5, 0, 0),
+ F(300000000, P_CAM_CC_PLL0_OUT_AUX, 2, 0, 0),
+ F(404000000, P_CAM_CC_PLL1_OUT_AUX, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_lrme_clk_src = {
+ .cmd_rcgr = 0xb0f8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_6,
+ .freq_tbl = ftbl_cam_cc_lrme_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_lrme_clk_src",
+ .parent_data = cam_cc_parent_data_6,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_6),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_mclk0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(24000000, P_CAM_CC_PLL2_OUT_AUX2, 10, 1, 2),
+ F(34285714, P_CAM_CC_PLL2_OUT_AUX2, 14, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_mclk0_clk_src = {
+ .cmd_rcgr = 0x4004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_3,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk0_clk_src",
+ .parent_data = cam_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_3),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk1_clk_src = {
+ .cmd_rcgr = 0x4024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_3,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk1_clk_src",
+ .parent_data = cam_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_3),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk2_clk_src = {
+ .cmd_rcgr = 0x4044,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_3,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk2_clk_src",
+ .parent_data = cam_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_3),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk3_clk_src = {
+ .cmd_rcgr = 0x4064,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_3,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk3_clk_src",
+ .parent_data = cam_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_3),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_slow_ahb_clk_src[] = {
+ F(80000000, P_CAM_CC_PLL0_OUT_AUX, 7.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_slow_ahb_clk_src = {
+ .cmd_rcgr = 0x6058,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_slow_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_slow_ahb_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_branch cam_cc_bps_ahb_clk = {
+ .halt_reg = 0x6070,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6070,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_bps_areg_clk = {
+ .halt_reg = 0x6054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_areg_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_bps_axi_clk = {
+ .halt_reg = 0x6038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_bps_clk = {
+ .halt_reg = 0x6028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_bps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_camnoc_axi_clk = {
+ .halt_reg = 0xb124,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb124,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_camnoc_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cci_clk = {
+ .halt_reg = 0xb0f0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb0f0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cci_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_core_ahb_clk = {
+ .halt_reg = 0xb144,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xb144,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_core_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_ahb_clk = {
+ .halt_reg = 0xb11c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb11c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi0phytimer_clk = {
+ .halt_reg = 0x501c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x501c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi0phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi0phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi1phytimer_clk = {
+ .halt_reg = 0x5040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi1phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi1phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi2phytimer_clk = {
+ .halt_reg = 0x5064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi2phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi2phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy0_clk = {
+ .halt_reg = 0x5020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy1_clk = {
+ .halt_reg = 0x5044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy2_clk = {
+ .halt_reg = 0x5068,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_icp_clk = {
+ .halt_reg = 0xb0a0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb0a0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_icp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_icp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_axi_clk = {
+ .halt_reg = 0x9080,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9080,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_clk = {
+ .halt_reg = 0x9028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_cphy_rx_clk = {
+ .halt_reg = 0x907c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x907c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_csid_clk = {
+ .halt_reg = 0x9054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_csid_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_0_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_dsp_clk = {
+ .halt_reg = 0x9038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_dsp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_axi_clk = {
+ .halt_reg = 0xa058,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa058,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_clk = {
+ .halt_reg = 0xa028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_cphy_rx_clk = {
+ .halt_reg = 0xa054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_csid_clk = {
+ .halt_reg = 0xa04c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa04c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_csid_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_1_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_dsp_clk = {
+ .halt_reg = 0xa030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_dsp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_clk = {
+ .halt_reg = 0xb01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_lite_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_cphy_rx_clk = {
+ .halt_reg = 0xb044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_csid_clk = {
+ .halt_reg = 0xb03c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb03c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_csid_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_lite_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_0_ahb_clk = {
+ .halt_reg = 0x7040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_0_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_0_areg_clk = {
+ .halt_reg = 0x703c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x703c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_0_areg_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_0_axi_clk = {
+ .halt_reg = 0x7038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_0_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_0_clk = {
+ .halt_reg = 0x7028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ipe_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_jpeg_clk = {
+ .halt_reg = 0xb064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_jpeg_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_jpeg_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_lrme_clk = {
+ .halt_reg = 0xb110,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb110,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_lrme_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_lrme_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk0_clk = {
+ .halt_reg = 0x401c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x401c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk1_clk = {
+ .halt_reg = 0x403c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x403c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk2_clk = {
+ .halt_reg = 0x405c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x405c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk3_clk = {
+ .halt_reg = 0x407c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x407c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_soc_ahb_clk = {
+ .halt_reg = 0xb140,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb140,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_soc_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_sys_tmr_clk = {
+ .halt_reg = 0xb0a8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb0a8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sys_tmr_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc titan_top_gdsc = {
+ .gdscr = 0xb134,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "titan_top_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc bps_gdsc = {
+ .gdscr = 0x6004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "bps_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .parent = &titan_top_gdsc.pd,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc ife_0_gdsc = {
+ .gdscr = 0x9004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "ife_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .parent = &titan_top_gdsc.pd,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc ife_1_gdsc = {
+ .gdscr = 0xa004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "ife_1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .parent = &titan_top_gdsc.pd,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc ipe_0_gdsc = {
+ .gdscr = 0x7004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "ipe_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .parent = &titan_top_gdsc.pd,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct clk_regmap *cam_cc_qcs615_clocks[] = {
+ [CAM_CC_BPS_AHB_CLK] = &cam_cc_bps_ahb_clk.clkr,
+ [CAM_CC_BPS_AREG_CLK] = &cam_cc_bps_areg_clk.clkr,
+ [CAM_CC_BPS_AXI_CLK] = &cam_cc_bps_axi_clk.clkr,
+ [CAM_CC_BPS_CLK] = &cam_cc_bps_clk.clkr,
+ [CAM_CC_BPS_CLK_SRC] = &cam_cc_bps_clk_src.clkr,
+ [CAM_CC_CAMNOC_AXI_CLK] = &cam_cc_camnoc_axi_clk.clkr,
+ [CAM_CC_CCI_CLK] = &cam_cc_cci_clk.clkr,
+ [CAM_CC_CCI_CLK_SRC] = &cam_cc_cci_clk_src.clkr,
+ [CAM_CC_CORE_AHB_CLK] = &cam_cc_core_ahb_clk.clkr,
+ [CAM_CC_CPAS_AHB_CLK] = &cam_cc_cpas_ahb_clk.clkr,
+ [CAM_CC_CPHY_RX_CLK_SRC] = &cam_cc_cphy_rx_clk_src.clkr,
+ [CAM_CC_CSI0PHYTIMER_CLK] = &cam_cc_csi0phytimer_clk.clkr,
+ [CAM_CC_CSI0PHYTIMER_CLK_SRC] = &cam_cc_csi0phytimer_clk_src.clkr,
+ [CAM_CC_CSI1PHYTIMER_CLK] = &cam_cc_csi1phytimer_clk.clkr,
+ [CAM_CC_CSI1PHYTIMER_CLK_SRC] = &cam_cc_csi1phytimer_clk_src.clkr,
+ [CAM_CC_CSI2PHYTIMER_CLK] = &cam_cc_csi2phytimer_clk.clkr,
+ [CAM_CC_CSI2PHYTIMER_CLK_SRC] = &cam_cc_csi2phytimer_clk_src.clkr,
+ [CAM_CC_CSIPHY0_CLK] = &cam_cc_csiphy0_clk.clkr,
+ [CAM_CC_CSIPHY1_CLK] = &cam_cc_csiphy1_clk.clkr,
+ [CAM_CC_CSIPHY2_CLK] = &cam_cc_csiphy2_clk.clkr,
+ [CAM_CC_FAST_AHB_CLK_SRC] = &cam_cc_fast_ahb_clk_src.clkr,
+ [CAM_CC_ICP_CLK] = &cam_cc_icp_clk.clkr,
+ [CAM_CC_ICP_CLK_SRC] = &cam_cc_icp_clk_src.clkr,
+ [CAM_CC_IFE_0_AXI_CLK] = &cam_cc_ife_0_axi_clk.clkr,
+ [CAM_CC_IFE_0_CLK] = &cam_cc_ife_0_clk.clkr,
+ [CAM_CC_IFE_0_CLK_SRC] = &cam_cc_ife_0_clk_src.clkr,
+ [CAM_CC_IFE_0_CPHY_RX_CLK] = &cam_cc_ife_0_cphy_rx_clk.clkr,
+ [CAM_CC_IFE_0_CSID_CLK] = &cam_cc_ife_0_csid_clk.clkr,
+ [CAM_CC_IFE_0_CSID_CLK_SRC] = &cam_cc_ife_0_csid_clk_src.clkr,
+ [CAM_CC_IFE_0_DSP_CLK] = &cam_cc_ife_0_dsp_clk.clkr,
+ [CAM_CC_IFE_1_AXI_CLK] = &cam_cc_ife_1_axi_clk.clkr,
+ [CAM_CC_IFE_1_CLK] = &cam_cc_ife_1_clk.clkr,
+ [CAM_CC_IFE_1_CLK_SRC] = &cam_cc_ife_1_clk_src.clkr,
+ [CAM_CC_IFE_1_CPHY_RX_CLK] = &cam_cc_ife_1_cphy_rx_clk.clkr,
+ [CAM_CC_IFE_1_CSID_CLK] = &cam_cc_ife_1_csid_clk.clkr,
+ [CAM_CC_IFE_1_CSID_CLK_SRC] = &cam_cc_ife_1_csid_clk_src.clkr,
+ [CAM_CC_IFE_1_DSP_CLK] = &cam_cc_ife_1_dsp_clk.clkr,
+ [CAM_CC_IFE_LITE_CLK] = &cam_cc_ife_lite_clk.clkr,
+ [CAM_CC_IFE_LITE_CLK_SRC] = &cam_cc_ife_lite_clk_src.clkr,
+ [CAM_CC_IFE_LITE_CPHY_RX_CLK] = &cam_cc_ife_lite_cphy_rx_clk.clkr,
+ [CAM_CC_IFE_LITE_CSID_CLK] = &cam_cc_ife_lite_csid_clk.clkr,
+ [CAM_CC_IFE_LITE_CSID_CLK_SRC] = &cam_cc_ife_lite_csid_clk_src.clkr,
+ [CAM_CC_IPE_0_AHB_CLK] = &cam_cc_ipe_0_ahb_clk.clkr,
+ [CAM_CC_IPE_0_AREG_CLK] = &cam_cc_ipe_0_areg_clk.clkr,
+ [CAM_CC_IPE_0_AXI_CLK] = &cam_cc_ipe_0_axi_clk.clkr,
+ [CAM_CC_IPE_0_CLK] = &cam_cc_ipe_0_clk.clkr,
+ [CAM_CC_IPE_0_CLK_SRC] = &cam_cc_ipe_0_clk_src.clkr,
+ [CAM_CC_JPEG_CLK] = &cam_cc_jpeg_clk.clkr,
+ [CAM_CC_JPEG_CLK_SRC] = &cam_cc_jpeg_clk_src.clkr,
+ [CAM_CC_LRME_CLK] = &cam_cc_lrme_clk.clkr,
+ [CAM_CC_LRME_CLK_SRC] = &cam_cc_lrme_clk_src.clkr,
+ [CAM_CC_MCLK0_CLK] = &cam_cc_mclk0_clk.clkr,
+ [CAM_CC_MCLK0_CLK_SRC] = &cam_cc_mclk0_clk_src.clkr,
+ [CAM_CC_MCLK1_CLK] = &cam_cc_mclk1_clk.clkr,
+ [CAM_CC_MCLK1_CLK_SRC] = &cam_cc_mclk1_clk_src.clkr,
+ [CAM_CC_MCLK2_CLK] = &cam_cc_mclk2_clk.clkr,
+ [CAM_CC_MCLK2_CLK_SRC] = &cam_cc_mclk2_clk_src.clkr,
+ [CAM_CC_MCLK3_CLK] = &cam_cc_mclk3_clk.clkr,
+ [CAM_CC_MCLK3_CLK_SRC] = &cam_cc_mclk3_clk_src.clkr,
+ [CAM_CC_PLL0] = &cam_cc_pll0.clkr,
+ [CAM_CC_PLL1] = &cam_cc_pll1.clkr,
+ [CAM_CC_PLL2] = &cam_cc_pll2.clkr,
+ [CAM_CC_PLL2_OUT_AUX2] = &cam_cc_pll2_out_aux2.clkr,
+ [CAM_CC_PLL3] = &cam_cc_pll3.clkr,
+ [CAM_CC_SLOW_AHB_CLK_SRC] = &cam_cc_slow_ahb_clk_src.clkr,
+ [CAM_CC_SOC_AHB_CLK] = &cam_cc_soc_ahb_clk.clkr,
+ [CAM_CC_SYS_TMR_CLK] = &cam_cc_sys_tmr_clk.clkr,
+};
+
+static struct gdsc *cam_cc_qcs615_gdscs[] = {
+ [BPS_GDSC] = &bps_gdsc,
+ [IFE_0_GDSC] = &ife_0_gdsc,
+ [IFE_1_GDSC] = &ife_1_gdsc,
+ [IPE_0_GDSC] = &ipe_0_gdsc,
+ [TITAN_TOP_GDSC] = &titan_top_gdsc,
+};
+
+static const struct qcom_reset_map cam_cc_qcs615_resets[] = {
+ [CAM_CC_BPS_BCR] = { 0x6000 },
+ [CAM_CC_CAMNOC_BCR] = { 0xb120 },
+ [CAM_CC_CCI_BCR] = { 0xb0d4 },
+ [CAM_CC_CPAS_BCR] = { 0xb118 },
+ [CAM_CC_CSI0PHY_BCR] = { 0x5000 },
+ [CAM_CC_CSI1PHY_BCR] = { 0x5024 },
+ [CAM_CC_CSI2PHY_BCR] = { 0x5048 },
+ [CAM_CC_ICP_BCR] = { 0xb074 },
+ [CAM_CC_IFE_0_BCR] = { 0x9000 },
+ [CAM_CC_IFE_1_BCR] = { 0xa000 },
+ [CAM_CC_IFE_LITE_BCR] = { 0xb000 },
+ [CAM_CC_IPE_0_BCR] = { 0x7000 },
+ [CAM_CC_JPEG_BCR] = { 0xb048 },
+ [CAM_CC_LRME_BCR] = { 0xb0f4 },
+ [CAM_CC_MCLK0_BCR] = { 0x4000 },
+ [CAM_CC_MCLK1_BCR] = { 0x4020 },
+ [CAM_CC_MCLK2_BCR] = { 0x4040 },
+ [CAM_CC_MCLK3_BCR] = { 0x4060 },
+ [CAM_CC_TITAN_TOP_BCR] = { 0xb130 },
+};
+
+static struct clk_alpha_pll *cam_cc_qcs615_plls[] = {
+ &cam_cc_pll0,
+ &cam_cc_pll1,
+ &cam_cc_pll2,
+ &cam_cc_pll3,
+};
+
+static const struct regmap_config cam_cc_qcs615_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xd004,
+ .fast_io = true,
+};
+
+static struct qcom_cc_driver_data cam_cc_qcs615_driver_data = {
+ .alpha_plls = cam_cc_qcs615_plls,
+ .num_alpha_plls = ARRAY_SIZE(cam_cc_qcs615_plls),
+};
+
+static const struct qcom_cc_desc cam_cc_qcs615_desc = {
+ .config = &cam_cc_qcs615_regmap_config,
+ .clks = cam_cc_qcs615_clocks,
+ .num_clks = ARRAY_SIZE(cam_cc_qcs615_clocks),
+ .resets = cam_cc_qcs615_resets,
+ .num_resets = ARRAY_SIZE(cam_cc_qcs615_resets),
+ .gdscs = cam_cc_qcs615_gdscs,
+ .num_gdscs = ARRAY_SIZE(cam_cc_qcs615_gdscs),
+ .driver_data = &cam_cc_qcs615_driver_data,
+};
+
+static const struct of_device_id cam_cc_qcs615_match_table[] = {
+ { .compatible = "qcom,qcs615-camcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, cam_cc_qcs615_match_table);
+
+static int cam_cc_qcs615_probe(struct platform_device *pdev)
+{
+ return qcom_cc_probe(pdev, &cam_cc_qcs615_desc);
+}
+
+static struct platform_driver cam_cc_qcs615_driver = {
+ .probe = cam_cc_qcs615_probe,
+ .driver = {
+ .name = "camcc-qcs615",
+ .of_match_table = cam_cc_qcs615_match_table,
+ },
+};
+
+module_platform_driver(cam_cc_qcs615_driver);
+
+MODULE_DESCRIPTION("QTI CAMCC QCS615 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/camcc-sc8180x.c b/drivers/clk/qcom/camcc-sc8180x.c
new file mode 100644
index 000000000000..388fedf1dc81
--- /dev/null
+++ b/drivers/clk/qcom/camcc-sc8180x.c
@@ -0,0 +1,2889 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,sc8180x-camcc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_IFACE,
+ DT_BI_TCXO,
+ DT_SLEEP_CLK,
+};
+
+enum {
+ P_BI_TCXO,
+ P_CAM_CC_PLL0_OUT_EVEN,
+ P_CAM_CC_PLL0_OUT_MAIN,
+ P_CAM_CC_PLL0_OUT_ODD,
+ P_CAM_CC_PLL1_OUT_EVEN,
+ P_CAM_CC_PLL2_OUT_EARLY,
+ P_CAM_CC_PLL2_OUT_MAIN,
+ P_CAM_CC_PLL3_OUT_EVEN,
+ P_CAM_CC_PLL4_OUT_EVEN,
+ P_CAM_CC_PLL5_OUT_EVEN,
+ P_CAM_CC_PLL6_OUT_EVEN,
+ P_SLEEP_CLK,
+};
+
+static const struct pll_vco regera_vco[] = {
+ { 600000000, 3300000000, 0 },
+};
+
+static const struct pll_vco trion_vco[] = {
+ { 249600000, 2000000000, 0 },
+};
+
+static const struct alpha_pll_config cam_cc_pll0_config = {
+ .l = 0x3e,
+ .alpha = 0x8000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002267,
+ .config_ctl_hi1_val = 0x00000024,
+ .test_ctl_hi1_val = 0x00000020,
+ .user_ctl_val = 0x00003100,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x000000d0,
+};
+
+static struct clk_alpha_pll cam_cc_pll0 = {
+ .offset = 0x0,
+ .vco_table = trion_vco,
+ .num_vco = ARRAY_SIZE(trion_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_trion_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll0_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_cam_cc_pll0_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll0_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll0_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_trion_ops,
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll0_out_odd[] = {
+ { 0x3, 3 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll0_out_odd = {
+ .offset = 0x0,
+ .post_div_shift = 12,
+ .post_div_table = post_div_table_cam_cc_pll0_out_odd,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll0_out_odd),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll0_out_odd",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_trion_ops,
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll1_config = {
+ .l = 0x13,
+ .alpha = 0x8800,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002267,
+ .config_ctl_hi1_val = 0x00000024,
+ .test_ctl_hi1_val = 0x00000020,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x000000d0,
+};
+
+static struct clk_alpha_pll cam_cc_pll1 = {
+ .offset = 0x1000,
+ .vco_table = trion_vco,
+ .num_vco = ARRAY_SIZE(trion_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll1",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_trion_ops,
+ },
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll2_config = {
+ .l = 0x32,
+ .alpha = 0x0,
+ .config_ctl_val = 0x10000807,
+ .config_ctl_hi_val = 0x00000011,
+ .config_ctl_hi1_val = 0x04300142,
+ .test_ctl_val = 0x04000400,
+ .test_ctl_hi_val = 0x00004000,
+ .test_ctl_hi1_val = 0x00000000,
+ .user_ctl_val = 0x00000100,
+};
+
+static struct clk_alpha_pll cam_cc_pll2 = {
+ .offset = 0x2000,
+ .vco_table = regera_vco,
+ .num_vco = ARRAY_SIZE(regera_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_REGERA],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll2",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_regera_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll2_out_main[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll2_out_main = {
+ .offset = 0x2000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_cam_cc_pll2_out_main,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll2_out_main),
+ .width = 2,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_REGERA],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll2_out_main",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll2.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_trion_ops,
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll3_config = {
+ .l = 0x14,
+ .alpha = 0xd555,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002267,
+ .config_ctl_hi1_val = 0x00000024,
+ .test_ctl_hi1_val = 0x00000020,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x000000d0,
+};
+
+static struct clk_alpha_pll cam_cc_pll3 = {
+ .offset = 0x3000,
+ .vco_table = trion_vco,
+ .num_vco = ARRAY_SIZE(trion_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll3",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_trion_ops,
+ },
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll4_config = {
+ .l = 0x14,
+ .alpha = 0xd555,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002267,
+ .config_ctl_hi1_val = 0x00000024,
+ .test_ctl_hi1_val = 0x00000020,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x000000d0,
+};
+
+static struct clk_alpha_pll cam_cc_pll4 = {
+ .offset = 0x4000,
+ .vco_table = trion_vco,
+ .num_vco = ARRAY_SIZE(trion_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll4",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_trion_ops,
+ },
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll5_config = {
+ .l = 0x14,
+ .alpha = 0xd555,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002267,
+ .config_ctl_hi1_val = 0x00000024,
+ .test_ctl_hi1_val = 0x00000020,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x000000d0,
+};
+
+static struct clk_alpha_pll cam_cc_pll5 = {
+ .offset = 0x4078,
+ .vco_table = trion_vco,
+ .num_vco = ARRAY_SIZE(trion_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll5",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_trion_ops,
+ },
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll6_config = {
+ .l = 0x14,
+ .alpha = 0xd555,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002267,
+ .config_ctl_hi1_val = 0x00000024,
+ .test_ctl_hi1_val = 0x00000020,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x000000d0,
+};
+
+static struct clk_alpha_pll cam_cc_pll6 = {
+ .offset = 0x40f0,
+ .vco_table = trion_vco,
+ .num_vco = ARRAY_SIZE(trion_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll6",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_trion_ops,
+ },
+ },
+};
+
+static const struct parent_map cam_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL0_OUT_MAIN, 1 },
+ { P_CAM_CC_PLL0_OUT_EVEN, 2 },
+ { P_CAM_CC_PLL0_OUT_ODD, 3 },
+ { P_CAM_CC_PLL2_OUT_MAIN, 5 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll0.clkr.hw },
+ { .hw = &cam_cc_pll0_out_even.clkr.hw },
+ { .hw = &cam_cc_pll0_out_odd.clkr.hw },
+ { .hw = &cam_cc_pll2_out_main.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL2_OUT_EARLY, 5 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll2.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL3_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll3.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL4_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll4.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL5_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll5.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL6_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_5[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll6.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL1_OUT_EVEN, 4 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_6[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll1.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_7[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_7[] = {
+ { .index = DT_BI_TCXO },
+};
+
+static const struct freq_tbl ftbl_cam_cc_bps_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
+ F(200000000, P_CAM_CC_PLL0_OUT_ODD, 2, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+ F(480000000, P_CAM_CC_PLL2_OUT_MAIN, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_bps_clk_src = {
+ .cmd_rcgr = 0x7010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_bps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_camnoc_axi_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(150000000, P_CAM_CC_PLL0_OUT_EVEN, 4, 0, 0),
+ F(266666667, P_CAM_CC_PLL0_OUT_ODD, 1.5, 0, 0),
+ F(320000000, P_CAM_CC_PLL2_OUT_MAIN, 1.5, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(480000000, P_CAM_CC_PLL2_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_camnoc_axi_clk_src = {
+ .cmd_rcgr = 0xc170,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_camnoc_axi_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_camnoc_axi_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_cci_0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(37500000, P_CAM_CC_PLL0_OUT_EVEN, 16, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_cci_0_clk_src = {
+ .cmd_rcgr = 0xc108,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cci_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_0_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_cci_1_clk_src = {
+ .cmd_rcgr = 0xc124,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cci_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_1_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_cci_2_clk_src = {
+ .cmd_rcgr = 0xc204,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cci_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_2_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_cci_3_clk_src = {
+ .cmd_rcgr = 0xc220,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cci_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_3_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_cphy_rx_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_cphy_rx_clk_src = {
+ .cmd_rcgr = 0xa064,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cphy_rx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cphy_rx_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_csi0phytimer_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_csi0phytimer_clk_src = {
+ .cmd_rcgr = 0x6004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi0phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi1phytimer_clk_src = {
+ .cmd_rcgr = 0x6028,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi1phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi2phytimer_clk_src = {
+ .cmd_rcgr = 0x604c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi2phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi3phytimer_clk_src = {
+ .cmd_rcgr = 0x6070,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi3phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_fast_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(50000000, P_CAM_CC_PLL0_OUT_EVEN, 12, 0, 0),
+ F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
+ F(200000000, P_CAM_CC_PLL0_OUT_EVEN, 3, 0, 0),
+ F(300000000, P_CAM_CC_PLL0_OUT_MAIN, 4, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_fast_ahb_clk_src = {
+ .cmd_rcgr = 0x703c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_fast_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_fast_ahb_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_fd_core_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+ F(480000000, P_CAM_CC_PLL2_OUT_MAIN, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_fd_core_clk_src = {
+ .cmd_rcgr = 0xc0e0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_fd_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_fd_core_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_icp_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_icp_clk_src = {
+ .cmd_rcgr = 0xc0b8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_icp_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_icp_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(400000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ F(558000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ F(637000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ F(760000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ife_0_clk_src = {
+ .cmd_rcgr = 0xa010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_2,
+ .freq_tbl = ftbl_cam_cc_ife_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_clk_src",
+ .parent_data = cam_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_0_csid_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(75000000, P_CAM_CC_PLL0_OUT_EVEN, 8, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+ F(480000000, P_CAM_CC_PLL2_OUT_MAIN, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ife_0_csid_clk_src = {
+ .cmd_rcgr = 0xa03c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_ife_0_csid_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_csid_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_1_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(400000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ F(558000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ F(637000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ F(760000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ife_1_clk_src = {
+ .cmd_rcgr = 0xb010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_3,
+ .freq_tbl = ftbl_cam_cc_ife_1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_clk_src",
+ .parent_data = cam_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_1_csid_clk_src = {
+ .cmd_rcgr = 0xb034,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_ife_0_csid_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_csid_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_2_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(400000000, P_CAM_CC_PLL5_OUT_EVEN, 1, 0, 0),
+ F(558000000, P_CAM_CC_PLL5_OUT_EVEN, 1, 0, 0),
+ F(637000000, P_CAM_CC_PLL5_OUT_EVEN, 1, 0, 0),
+ F(760000000, P_CAM_CC_PLL5_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ife_2_clk_src = {
+ .cmd_rcgr = 0xf010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_4,
+ .freq_tbl = ftbl_cam_cc_ife_2_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_2_clk_src",
+ .parent_data = cam_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_2_csid_clk_src = {
+ .cmd_rcgr = 0xf03c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_fd_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_2_csid_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_3_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(400000000, P_CAM_CC_PLL6_OUT_EVEN, 1, 0, 0),
+ F(558000000, P_CAM_CC_PLL6_OUT_EVEN, 1, 0, 0),
+ F(637000000, P_CAM_CC_PLL6_OUT_EVEN, 1, 0, 0),
+ F(760000000, P_CAM_CC_PLL6_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ife_3_clk_src = {
+ .cmd_rcgr = 0xf07c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_5,
+ .freq_tbl = ftbl_cam_cc_ife_3_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_3_clk_src",
+ .parent_data = cam_cc_parent_data_5,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_3_csid_clk_src = {
+ .cmd_rcgr = 0xf0a8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_fd_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_3_csid_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_lite_0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(320000000, P_CAM_CC_PLL2_OUT_MAIN, 1.5, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+ F(480000000, P_CAM_CC_PLL2_OUT_MAIN, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_0_clk_src = {
+ .cmd_rcgr = 0xc004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_ife_lite_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_0_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_0_csid_clk_src = {
+ .cmd_rcgr = 0xc020,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_fd_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_0_csid_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_1_clk_src = {
+ .cmd_rcgr = 0xc048,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_ife_lite_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_1_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_1_csid_clk_src = {
+ .cmd_rcgr = 0xc064,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_fd_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_1_csid_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_2_clk_src = {
+ .cmd_rcgr = 0xc240,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_ife_lite_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_2_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_2_csid_clk_src = {
+ .cmd_rcgr = 0xc25c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_fd_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_2_csid_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_3_clk_src = {
+ .cmd_rcgr = 0xc284,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_ife_lite_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_3_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_3_csid_clk_src = {
+ .cmd_rcgr = 0xc2a0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_fd_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_3_csid_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ipe_0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(375000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ F(475000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ F(520000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ipe_0_clk_src = {
+ .cmd_rcgr = 0x8010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_6,
+ .freq_tbl = ftbl_cam_cc_ipe_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_0_clk_src",
+ .parent_data = cam_cc_parent_data_6,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_6),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_jpeg_clk_src = {
+ .cmd_rcgr = 0xc08c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_bps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_jpeg_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_lrme_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
+ F(240000000, P_CAM_CC_PLL2_OUT_MAIN, 2, 0, 0),
+ F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0),
+ F(320000000, P_CAM_CC_PLL2_OUT_MAIN, 1.5, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_lrme_clk_src = {
+ .cmd_rcgr = 0xc144,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_lrme_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_lrme_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_mclk0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(24000000, P_CAM_CC_PLL2_OUT_EARLY, 10, 1, 4),
+ F(68571429, P_CAM_CC_PLL2_OUT_EARLY, 14, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_mclk0_clk_src = {
+ .cmd_rcgr = 0x5004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk0_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk1_clk_src = {
+ .cmd_rcgr = 0x5024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk1_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk2_clk_src = {
+ .cmd_rcgr = 0x5044,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk2_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk3_clk_src = {
+ .cmd_rcgr = 0x5064,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk3_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk4_clk_src = {
+ .cmd_rcgr = 0x5084,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk4_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk5_clk_src = {
+ .cmd_rcgr = 0x50a4,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk5_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk6_clk_src = {
+ .cmd_rcgr = 0x50c4,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk6_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk7_clk_src = {
+ .cmd_rcgr = 0x50e4,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk7_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_slow_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(80000000, P_CAM_CC_PLL0_OUT_EVEN, 7.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_slow_ahb_clk_src = {
+ .cmd_rcgr = 0x7058,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_slow_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_slow_ahb_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_xo_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_xo_clk_src = {
+ .cmd_rcgr = 0xc1cc,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_7,
+ .freq_tbl = ftbl_cam_cc_xo_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_xo_clk_src",
+ .parent_data = cam_cc_parent_data_7,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_7),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_branch cam_cc_bps_ahb_clk = {
+ .halt_reg = 0x7070,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7070,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_bps_areg_clk = {
+ .halt_reg = 0x7054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_areg_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_bps_axi_clk = {
+ .halt_reg = 0x7038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_camnoc_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_bps_clk = {
+ .halt_reg = 0x7028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_bps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_bps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_camnoc_axi_clk = {
+ .halt_reg = 0xc18c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc18c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_camnoc_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_camnoc_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_camnoc_dcd_xo_clk = {
+ .halt_reg = 0xc194,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc194,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_camnoc_dcd_xo_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cci_0_clk = {
+ .halt_reg = 0xc120,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc120,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cci_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cci_1_clk = {
+ .halt_reg = 0xc13c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc13c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cci_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cci_2_clk = {
+ .halt_reg = 0xc21c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc21c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cci_2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cci_3_clk = {
+ .halt_reg = 0xc238,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc238,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cci_3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_core_ahb_clk = {
+ .halt_reg = 0xc1c8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xc1c8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_core_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_ahb_clk = {
+ .halt_reg = 0xc168,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc168,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi0phytimer_clk = {
+ .halt_reg = 0x601c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x601c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi0phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi0phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi1phytimer_clk = {
+ .halt_reg = 0x6040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi1phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi1phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi2phytimer_clk = {
+ .halt_reg = 0x6064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi2phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi2phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi3phytimer_clk = {
+ .halt_reg = 0x6088,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6088,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi3phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi3phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy0_clk = {
+ .halt_reg = 0x6020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy1_clk = {
+ .halt_reg = 0x6044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy2_clk = {
+ .halt_reg = 0x6068,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy3_clk = {
+ .halt_reg = 0x608c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x608c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_fd_core_clk = {
+ .halt_reg = 0xc0f8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc0f8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_fd_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fd_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_fd_core_uar_clk = {
+ .halt_reg = 0xc100,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc100,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_fd_core_uar_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fd_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_icp_ahb_clk = {
+ .halt_reg = 0xc0d8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc0d8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_icp_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_icp_clk = {
+ .halt_reg = 0xc0d0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc0d0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_icp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_icp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_axi_clk = {
+ .halt_reg = 0xa080,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa080,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_camnoc_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_clk = {
+ .halt_reg = 0xa028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_cphy_rx_clk = {
+ .halt_reg = 0xa07c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa07c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_csid_clk = {
+ .halt_reg = 0xa054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_csid_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_0_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_dsp_clk = {
+ .halt_reg = 0xa038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_dsp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_axi_clk = {
+ .halt_reg = 0xb058,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb058,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_camnoc_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_clk = {
+ .halt_reg = 0xb028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_cphy_rx_clk = {
+ .halt_reg = 0xb054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_csid_clk = {
+ .halt_reg = 0xb04c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb04c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_csid_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_1_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_dsp_clk = {
+ .halt_reg = 0xb030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_dsp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_2_axi_clk = {
+ .halt_reg = 0xf068,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_2_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_camnoc_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_2_clk = {
+ .halt_reg = 0xf028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_2_cphy_rx_clk = {
+ .halt_reg = 0xf064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_2_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_2_csid_clk = {
+ .halt_reg = 0xf054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_2_csid_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_2_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_2_dsp_clk = {
+ .halt_reg = 0xf038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_2_dsp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_3_axi_clk = {
+ .halt_reg = 0xf0d4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf0d4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_3_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_camnoc_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_3_clk = {
+ .halt_reg = 0xf094,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf094,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_3_cphy_rx_clk = {
+ .halt_reg = 0xf0d0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf0d0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_3_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_3_csid_clk = {
+ .halt_reg = 0xf0c0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf0c0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_3_csid_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_3_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_3_dsp_clk = {
+ .halt_reg = 0xf0a4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf0a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_3_dsp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_0_clk = {
+ .halt_reg = 0xc01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_lite_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_0_cphy_rx_clk = {
+ .halt_reg = 0xc040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_0_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_0_csid_clk = {
+ .halt_reg = 0xc038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_0_csid_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_lite_0_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_1_clk = {
+ .halt_reg = 0xc060,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc060,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_lite_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_1_cphy_rx_clk = {
+ .halt_reg = 0xc084,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc084,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_1_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_1_csid_clk = {
+ .halt_reg = 0xc07c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc07c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_1_csid_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_lite_1_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_2_clk = {
+ .halt_reg = 0xc258,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc258,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_lite_2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_2_cphy_rx_clk = {
+ .halt_reg = 0xc27c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc27c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_2_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_2_csid_clk = {
+ .halt_reg = 0xc274,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc274,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_2_csid_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_lite_2_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_3_clk = {
+ .halt_reg = 0xc29c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc29c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_lite_3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_3_cphy_rx_clk = {
+ .halt_reg = 0xc2c0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc2c0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_3_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_3_csid_clk = {
+ .halt_reg = 0xc2b8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc2b8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_3_csid_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_lite_3_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_0_ahb_clk = {
+ .halt_reg = 0x8040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_0_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_0_areg_clk = {
+ .halt_reg = 0x803c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x803c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_0_areg_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_0_axi_clk = {
+ .halt_reg = 0x8038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_0_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_camnoc_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_0_clk = {
+ .halt_reg = 0x8028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ipe_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_1_ahb_clk = {
+ .halt_reg = 0x9028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_1_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_1_areg_clk = {
+ .halt_reg = 0x9024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_1_areg_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_1_axi_clk = {
+ .halt_reg = 0x9020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_1_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_camnoc_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_1_clk = {
+ .halt_reg = 0x9010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ipe_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_jpeg_clk = {
+ .halt_reg = 0xc0a4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc0a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_jpeg_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_jpeg_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_lrme_clk = {
+ .halt_reg = 0xc15c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc15c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_lrme_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_lrme_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk0_clk = {
+ .halt_reg = 0x501c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x501c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk1_clk = {
+ .halt_reg = 0x503c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x503c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk2_clk = {
+ .halt_reg = 0x505c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x505c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk3_clk = {
+ .halt_reg = 0x507c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x507c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk4_clk = {
+ .halt_reg = 0x509c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x509c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk4_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk5_clk = {
+ .halt_reg = 0x50bc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x50bc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk5_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk6_clk = {
+ .halt_reg = 0x50dc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x50dc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk6_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk7_clk = {
+ .halt_reg = 0x50fc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x50fc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk7_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk7_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc titan_top_gdsc = {
+ .gdscr = 0xc1bc,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "titan_top_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc bps_gdsc = {
+ .gdscr = 0x7004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "bps_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .parent = &titan_top_gdsc.pd,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc ife_0_gdsc = {
+ .gdscr = 0xa004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "ife_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .parent = &titan_top_gdsc.pd,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc ife_1_gdsc = {
+ .gdscr = 0xb004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "ife_1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .parent = &titan_top_gdsc.pd,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc ife_2_gdsc = {
+ .gdscr = 0xf004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "ife_2_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .parent = &titan_top_gdsc.pd,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc ife_3_gdsc = {
+ .gdscr = 0xf070,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "ife_3_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .parent = &titan_top_gdsc.pd,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc ipe_0_gdsc = {
+ .gdscr = 0x8004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "ipe_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .parent = &titan_top_gdsc.pd,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc ipe_1_gdsc = {
+ .gdscr = 0x9004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "ipe_1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .parent = &titan_top_gdsc.pd,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct clk_regmap *cam_cc_sc8180x_clocks[] = {
+ [CAM_CC_BPS_AHB_CLK] = &cam_cc_bps_ahb_clk.clkr,
+ [CAM_CC_BPS_AREG_CLK] = &cam_cc_bps_areg_clk.clkr,
+ [CAM_CC_BPS_AXI_CLK] = &cam_cc_bps_axi_clk.clkr,
+ [CAM_CC_BPS_CLK] = &cam_cc_bps_clk.clkr,
+ [CAM_CC_BPS_CLK_SRC] = &cam_cc_bps_clk_src.clkr,
+ [CAM_CC_CAMNOC_AXI_CLK] = &cam_cc_camnoc_axi_clk.clkr,
+ [CAM_CC_CAMNOC_AXI_CLK_SRC] = &cam_cc_camnoc_axi_clk_src.clkr,
+ [CAM_CC_CAMNOC_DCD_XO_CLK] = &cam_cc_camnoc_dcd_xo_clk.clkr,
+ [CAM_CC_CCI_0_CLK] = &cam_cc_cci_0_clk.clkr,
+ [CAM_CC_CCI_0_CLK_SRC] = &cam_cc_cci_0_clk_src.clkr,
+ [CAM_CC_CCI_1_CLK] = &cam_cc_cci_1_clk.clkr,
+ [CAM_CC_CCI_1_CLK_SRC] = &cam_cc_cci_1_clk_src.clkr,
+ [CAM_CC_CCI_2_CLK] = &cam_cc_cci_2_clk.clkr,
+ [CAM_CC_CCI_2_CLK_SRC] = &cam_cc_cci_2_clk_src.clkr,
+ [CAM_CC_CCI_3_CLK] = &cam_cc_cci_3_clk.clkr,
+ [CAM_CC_CCI_3_CLK_SRC] = &cam_cc_cci_3_clk_src.clkr,
+ [CAM_CC_CORE_AHB_CLK] = &cam_cc_core_ahb_clk.clkr,
+ [CAM_CC_CPAS_AHB_CLK] = &cam_cc_cpas_ahb_clk.clkr,
+ [CAM_CC_CPHY_RX_CLK_SRC] = &cam_cc_cphy_rx_clk_src.clkr,
+ [CAM_CC_CSI0PHYTIMER_CLK] = &cam_cc_csi0phytimer_clk.clkr,
+ [CAM_CC_CSI0PHYTIMER_CLK_SRC] = &cam_cc_csi0phytimer_clk_src.clkr,
+ [CAM_CC_CSI1PHYTIMER_CLK] = &cam_cc_csi1phytimer_clk.clkr,
+ [CAM_CC_CSI1PHYTIMER_CLK_SRC] = &cam_cc_csi1phytimer_clk_src.clkr,
+ [CAM_CC_CSI2PHYTIMER_CLK] = &cam_cc_csi2phytimer_clk.clkr,
+ [CAM_CC_CSI2PHYTIMER_CLK_SRC] = &cam_cc_csi2phytimer_clk_src.clkr,
+ [CAM_CC_CSI3PHYTIMER_CLK] = &cam_cc_csi3phytimer_clk.clkr,
+ [CAM_CC_CSI3PHYTIMER_CLK_SRC] = &cam_cc_csi3phytimer_clk_src.clkr,
+ [CAM_CC_CSIPHY0_CLK] = &cam_cc_csiphy0_clk.clkr,
+ [CAM_CC_CSIPHY1_CLK] = &cam_cc_csiphy1_clk.clkr,
+ [CAM_CC_CSIPHY2_CLK] = &cam_cc_csiphy2_clk.clkr,
+ [CAM_CC_CSIPHY3_CLK] = &cam_cc_csiphy3_clk.clkr,
+ [CAM_CC_FAST_AHB_CLK_SRC] = &cam_cc_fast_ahb_clk_src.clkr,
+ [CAM_CC_FD_CORE_CLK] = &cam_cc_fd_core_clk.clkr,
+ [CAM_CC_FD_CORE_CLK_SRC] = &cam_cc_fd_core_clk_src.clkr,
+ [CAM_CC_FD_CORE_UAR_CLK] = &cam_cc_fd_core_uar_clk.clkr,
+ [CAM_CC_ICP_AHB_CLK] = &cam_cc_icp_ahb_clk.clkr,
+ [CAM_CC_ICP_CLK] = &cam_cc_icp_clk.clkr,
+ [CAM_CC_ICP_CLK_SRC] = &cam_cc_icp_clk_src.clkr,
+ [CAM_CC_IFE_0_AXI_CLK] = &cam_cc_ife_0_axi_clk.clkr,
+ [CAM_CC_IFE_0_CLK] = &cam_cc_ife_0_clk.clkr,
+ [CAM_CC_IFE_0_CLK_SRC] = &cam_cc_ife_0_clk_src.clkr,
+ [CAM_CC_IFE_0_CPHY_RX_CLK] = &cam_cc_ife_0_cphy_rx_clk.clkr,
+ [CAM_CC_IFE_0_CSID_CLK] = &cam_cc_ife_0_csid_clk.clkr,
+ [CAM_CC_IFE_0_CSID_CLK_SRC] = &cam_cc_ife_0_csid_clk_src.clkr,
+ [CAM_CC_IFE_0_DSP_CLK] = &cam_cc_ife_0_dsp_clk.clkr,
+ [CAM_CC_IFE_1_AXI_CLK] = &cam_cc_ife_1_axi_clk.clkr,
+ [CAM_CC_IFE_1_CLK] = &cam_cc_ife_1_clk.clkr,
+ [CAM_CC_IFE_1_CLK_SRC] = &cam_cc_ife_1_clk_src.clkr,
+ [CAM_CC_IFE_1_CPHY_RX_CLK] = &cam_cc_ife_1_cphy_rx_clk.clkr,
+ [CAM_CC_IFE_1_CSID_CLK] = &cam_cc_ife_1_csid_clk.clkr,
+ [CAM_CC_IFE_1_CSID_CLK_SRC] = &cam_cc_ife_1_csid_clk_src.clkr,
+ [CAM_CC_IFE_1_DSP_CLK] = &cam_cc_ife_1_dsp_clk.clkr,
+ [CAM_CC_IFE_2_AXI_CLK] = &cam_cc_ife_2_axi_clk.clkr,
+ [CAM_CC_IFE_2_CLK] = &cam_cc_ife_2_clk.clkr,
+ [CAM_CC_IFE_2_CLK_SRC] = &cam_cc_ife_2_clk_src.clkr,
+ [CAM_CC_IFE_2_CPHY_RX_CLK] = &cam_cc_ife_2_cphy_rx_clk.clkr,
+ [CAM_CC_IFE_2_CSID_CLK] = &cam_cc_ife_2_csid_clk.clkr,
+ [CAM_CC_IFE_2_CSID_CLK_SRC] = &cam_cc_ife_2_csid_clk_src.clkr,
+ [CAM_CC_IFE_2_DSP_CLK] = &cam_cc_ife_2_dsp_clk.clkr,
+ [CAM_CC_IFE_3_AXI_CLK] = &cam_cc_ife_3_axi_clk.clkr,
+ [CAM_CC_IFE_3_CLK] = &cam_cc_ife_3_clk.clkr,
+ [CAM_CC_IFE_3_CLK_SRC] = &cam_cc_ife_3_clk_src.clkr,
+ [CAM_CC_IFE_3_CPHY_RX_CLK] = &cam_cc_ife_3_cphy_rx_clk.clkr,
+ [CAM_CC_IFE_3_CSID_CLK] = &cam_cc_ife_3_csid_clk.clkr,
+ [CAM_CC_IFE_3_CSID_CLK_SRC] = &cam_cc_ife_3_csid_clk_src.clkr,
+ [CAM_CC_IFE_3_DSP_CLK] = &cam_cc_ife_3_dsp_clk.clkr,
+ [CAM_CC_IFE_LITE_0_CLK] = &cam_cc_ife_lite_0_clk.clkr,
+ [CAM_CC_IFE_LITE_0_CLK_SRC] = &cam_cc_ife_lite_0_clk_src.clkr,
+ [CAM_CC_IFE_LITE_0_CPHY_RX_CLK] = &cam_cc_ife_lite_0_cphy_rx_clk.clkr,
+ [CAM_CC_IFE_LITE_0_CSID_CLK] = &cam_cc_ife_lite_0_csid_clk.clkr,
+ [CAM_CC_IFE_LITE_0_CSID_CLK_SRC] = &cam_cc_ife_lite_0_csid_clk_src.clkr,
+ [CAM_CC_IFE_LITE_1_CLK] = &cam_cc_ife_lite_1_clk.clkr,
+ [CAM_CC_IFE_LITE_1_CLK_SRC] = &cam_cc_ife_lite_1_clk_src.clkr,
+ [CAM_CC_IFE_LITE_1_CPHY_RX_CLK] = &cam_cc_ife_lite_1_cphy_rx_clk.clkr,
+ [CAM_CC_IFE_LITE_1_CSID_CLK] = &cam_cc_ife_lite_1_csid_clk.clkr,
+ [CAM_CC_IFE_LITE_1_CSID_CLK_SRC] = &cam_cc_ife_lite_1_csid_clk_src.clkr,
+ [CAM_CC_IFE_LITE_2_CLK] = &cam_cc_ife_lite_2_clk.clkr,
+ [CAM_CC_IFE_LITE_2_CLK_SRC] = &cam_cc_ife_lite_2_clk_src.clkr,
+ [CAM_CC_IFE_LITE_2_CPHY_RX_CLK] = &cam_cc_ife_lite_2_cphy_rx_clk.clkr,
+ [CAM_CC_IFE_LITE_2_CSID_CLK] = &cam_cc_ife_lite_2_csid_clk.clkr,
+ [CAM_CC_IFE_LITE_2_CSID_CLK_SRC] = &cam_cc_ife_lite_2_csid_clk_src.clkr,
+ [CAM_CC_IFE_LITE_3_CLK] = &cam_cc_ife_lite_3_clk.clkr,
+ [CAM_CC_IFE_LITE_3_CLK_SRC] = &cam_cc_ife_lite_3_clk_src.clkr,
+ [CAM_CC_IFE_LITE_3_CPHY_RX_CLK] = &cam_cc_ife_lite_3_cphy_rx_clk.clkr,
+ [CAM_CC_IFE_LITE_3_CSID_CLK] = &cam_cc_ife_lite_3_csid_clk.clkr,
+ [CAM_CC_IFE_LITE_3_CSID_CLK_SRC] = &cam_cc_ife_lite_3_csid_clk_src.clkr,
+ [CAM_CC_IPE_0_AHB_CLK] = &cam_cc_ipe_0_ahb_clk.clkr,
+ [CAM_CC_IPE_0_AREG_CLK] = &cam_cc_ipe_0_areg_clk.clkr,
+ [CAM_CC_IPE_0_AXI_CLK] = &cam_cc_ipe_0_axi_clk.clkr,
+ [CAM_CC_IPE_0_CLK] = &cam_cc_ipe_0_clk.clkr,
+ [CAM_CC_IPE_0_CLK_SRC] = &cam_cc_ipe_0_clk_src.clkr,
+ [CAM_CC_IPE_1_AHB_CLK] = &cam_cc_ipe_1_ahb_clk.clkr,
+ [CAM_CC_IPE_1_AREG_CLK] = &cam_cc_ipe_1_areg_clk.clkr,
+ [CAM_CC_IPE_1_AXI_CLK] = &cam_cc_ipe_1_axi_clk.clkr,
+ [CAM_CC_IPE_1_CLK] = &cam_cc_ipe_1_clk.clkr,
+ [CAM_CC_JPEG_CLK] = &cam_cc_jpeg_clk.clkr,
+ [CAM_CC_JPEG_CLK_SRC] = &cam_cc_jpeg_clk_src.clkr,
+ [CAM_CC_LRME_CLK] = &cam_cc_lrme_clk.clkr,
+ [CAM_CC_LRME_CLK_SRC] = &cam_cc_lrme_clk_src.clkr,
+ [CAM_CC_MCLK0_CLK] = &cam_cc_mclk0_clk.clkr,
+ [CAM_CC_MCLK0_CLK_SRC] = &cam_cc_mclk0_clk_src.clkr,
+ [CAM_CC_MCLK1_CLK] = &cam_cc_mclk1_clk.clkr,
+ [CAM_CC_MCLK1_CLK_SRC] = &cam_cc_mclk1_clk_src.clkr,
+ [CAM_CC_MCLK2_CLK] = &cam_cc_mclk2_clk.clkr,
+ [CAM_CC_MCLK2_CLK_SRC] = &cam_cc_mclk2_clk_src.clkr,
+ [CAM_CC_MCLK3_CLK] = &cam_cc_mclk3_clk.clkr,
+ [CAM_CC_MCLK3_CLK_SRC] = &cam_cc_mclk3_clk_src.clkr,
+ [CAM_CC_MCLK4_CLK] = &cam_cc_mclk4_clk.clkr,
+ [CAM_CC_MCLK4_CLK_SRC] = &cam_cc_mclk4_clk_src.clkr,
+ [CAM_CC_MCLK5_CLK] = &cam_cc_mclk5_clk.clkr,
+ [CAM_CC_MCLK5_CLK_SRC] = &cam_cc_mclk5_clk_src.clkr,
+ [CAM_CC_MCLK6_CLK] = &cam_cc_mclk6_clk.clkr,
+ [CAM_CC_MCLK6_CLK_SRC] = &cam_cc_mclk6_clk_src.clkr,
+ [CAM_CC_MCLK7_CLK] = &cam_cc_mclk7_clk.clkr,
+ [CAM_CC_MCLK7_CLK_SRC] = &cam_cc_mclk7_clk_src.clkr,
+ [CAM_CC_PLL0] = &cam_cc_pll0.clkr,
+ [CAM_CC_PLL0_OUT_EVEN] = &cam_cc_pll0_out_even.clkr,
+ [CAM_CC_PLL0_OUT_ODD] = &cam_cc_pll0_out_odd.clkr,
+ [CAM_CC_PLL1] = &cam_cc_pll1.clkr,
+ [CAM_CC_PLL2] = &cam_cc_pll2.clkr,
+ [CAM_CC_PLL2_OUT_MAIN] = &cam_cc_pll2_out_main.clkr,
+ [CAM_CC_PLL3] = &cam_cc_pll3.clkr,
+ [CAM_CC_PLL4] = &cam_cc_pll4.clkr,
+ [CAM_CC_PLL5] = &cam_cc_pll5.clkr,
+ [CAM_CC_PLL6] = &cam_cc_pll6.clkr,
+ [CAM_CC_SLOW_AHB_CLK_SRC] = &cam_cc_slow_ahb_clk_src.clkr,
+ [CAM_CC_XO_CLK_SRC] = &cam_cc_xo_clk_src.clkr,
+};
+
+static struct gdsc *cam_cc_sc8180x_gdscs[] = {
+ [BPS_GDSC] = &bps_gdsc,
+ [IFE_0_GDSC] = &ife_0_gdsc,
+ [IFE_1_GDSC] = &ife_1_gdsc,
+ [IFE_2_GDSC] = &ife_2_gdsc,
+ [IFE_3_GDSC] = &ife_3_gdsc,
+ [IPE_0_GDSC] = &ipe_0_gdsc,
+ [IPE_1_GDSC] = &ipe_1_gdsc,
+ [TITAN_TOP_GDSC] = &titan_top_gdsc,
+};
+
+static const struct qcom_reset_map cam_cc_sc8180x_resets[] = {
+ [CAM_CC_BPS_BCR] = { 0x7000 },
+ [CAM_CC_CAMNOC_BCR] = { 0xc16c },
+ [CAM_CC_CCI_BCR] = { 0xc104 },
+ [CAM_CC_CPAS_BCR] = { 0xc164 },
+ [CAM_CC_CSI0PHY_BCR] = { 0x6000 },
+ [CAM_CC_CSI1PHY_BCR] = { 0x6024 },
+ [CAM_CC_CSI2PHY_BCR] = { 0x6048 },
+ [CAM_CC_CSI3PHY_BCR] = { 0x606c },
+ [CAM_CC_FD_BCR] = { 0xc0dc },
+ [CAM_CC_ICP_BCR] = { 0xc0b4 },
+ [CAM_CC_IFE_0_BCR] = { 0xa000 },
+ [CAM_CC_IFE_1_BCR] = { 0xb000 },
+ [CAM_CC_IFE_2_BCR] = { 0xf000 },
+ [CAM_CC_IFE_3_BCR] = { 0xf06c },
+ [CAM_CC_IFE_LITE_0_BCR] = { 0xc000 },
+ [CAM_CC_IFE_LITE_1_BCR] = { 0xc044 },
+ [CAM_CC_IFE_LITE_2_BCR] = { 0xc23c },
+ [CAM_CC_IFE_LITE_3_BCR] = { 0xc280 },
+ [CAM_CC_IPE_0_BCR] = { 0x8000 },
+ [CAM_CC_IPE_1_BCR] = { 0x9000 },
+ [CAM_CC_JPEG_BCR] = { 0xc088 },
+ [CAM_CC_LRME_BCR] = { 0xc140 },
+ [CAM_CC_MCLK0_BCR] = { 0x5000 },
+ [CAM_CC_MCLK1_BCR] = { 0x5020 },
+ [CAM_CC_MCLK2_BCR] = { 0x5040 },
+ [CAM_CC_MCLK3_BCR] = { 0x5060 },
+ [CAM_CC_MCLK4_BCR] = { 0x5080 },
+ [CAM_CC_MCLK5_BCR] = { 0x50a0 },
+ [CAM_CC_MCLK6_BCR] = { 0x50c0 },
+ [CAM_CC_MCLK7_BCR] = { 0x50e0 },
+};
+
+static const struct regmap_config cam_cc_sc8180x_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xf0d4,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc cam_cc_sc8180x_desc = {
+ .config = &cam_cc_sc8180x_regmap_config,
+ .clks = cam_cc_sc8180x_clocks,
+ .num_clks = ARRAY_SIZE(cam_cc_sc8180x_clocks),
+ .resets = cam_cc_sc8180x_resets,
+ .num_resets = ARRAY_SIZE(cam_cc_sc8180x_resets),
+ .gdscs = cam_cc_sc8180x_gdscs,
+ .num_gdscs = ARRAY_SIZE(cam_cc_sc8180x_gdscs),
+};
+
+static const struct of_device_id cam_cc_sc8180x_match_table[] = {
+ { .compatible = "qcom,sc8180x-camcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, cam_cc_sc8180x_match_table);
+
+static int cam_cc_sc8180x_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ ret = devm_pm_runtime_enable(&pdev->dev);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret)
+ return ret;
+
+ regmap = qcom_cc_map(pdev, &cam_cc_sc8180x_desc);
+ if (IS_ERR(regmap)) {
+ pm_runtime_put(&pdev->dev);
+ return PTR_ERR(regmap);
+ }
+
+ clk_trion_pll_configure(&cam_cc_pll0, regmap, &cam_cc_pll0_config);
+ clk_trion_pll_configure(&cam_cc_pll1, regmap, &cam_cc_pll1_config);
+ clk_regera_pll_configure(&cam_cc_pll2, regmap, &cam_cc_pll2_config);
+ clk_trion_pll_configure(&cam_cc_pll3, regmap, &cam_cc_pll3_config);
+ clk_trion_pll_configure(&cam_cc_pll4, regmap, &cam_cc_pll4_config);
+ clk_trion_pll_configure(&cam_cc_pll5, regmap, &cam_cc_pll5_config);
+ clk_trion_pll_configure(&cam_cc_pll6, regmap, &cam_cc_pll6_config);
+
+ /* Keep some clocks always enabled */
+ qcom_branch_set_clk_en(regmap, 0xc1e4); /* CAM_CC_GDSC_CLK */
+ qcom_branch_set_clk_en(regmap, 0xc200); /* CAM_CC_SLEEP_CLK */
+
+ ret = qcom_cc_really_probe(&pdev->dev, &cam_cc_sc8180x_desc, regmap);
+
+ pm_runtime_put(&pdev->dev);
+
+ return ret;
+}
+
+static struct platform_driver cam_cc_sc8180x_driver = {
+ .probe = cam_cc_sc8180x_probe,
+ .driver = {
+ .name = "camcc-sc8180x",
+ .of_match_table = cam_cc_sc8180x_match_table,
+ },
+};
+
+module_platform_driver(cam_cc_sc8180x_driver);
+
+MODULE_DESCRIPTION("QTI CAMCC SC8180X Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/camcc-sdm845.c b/drivers/clk/qcom/camcc-sdm845.c
index cf60e8dd292a..fb313da7165b 100644
--- a/drivers/clk/qcom/camcc-sdm845.c
+++ b/drivers/clk/qcom/camcc-sdm845.c
@@ -1543,6 +1543,7 @@ static struct gdsc bps_gdsc = {
.name = "bps_gdsc",
},
.flags = HW_CTRL | POLL_CFG_GDSCR,
+ .parent = &titan_top_gdsc.pd,
.pwrsts = PWRSTS_OFF_ON,
};
@@ -1552,6 +1553,7 @@ static struct gdsc ipe_0_gdsc = {
.name = "ipe_0_gdsc",
},
.flags = HW_CTRL | POLL_CFG_GDSCR,
+ .parent = &titan_top_gdsc.pd,
.pwrsts = PWRSTS_OFF_ON,
};
@@ -1561,6 +1563,7 @@ static struct gdsc ipe_1_gdsc = {
.name = "ipe_1_gdsc",
},
.flags = HW_CTRL | POLL_CFG_GDSCR,
+ .parent = &titan_top_gdsc.pd,
.pwrsts = PWRSTS_OFF_ON,
};
diff --git a/drivers/clk/qcom/camcc-sm6350.c b/drivers/clk/qcom/camcc-sm6350.c
index 8aac97d29ce3..7df12c1311c6 100644
--- a/drivers/clk/qcom/camcc-sm6350.c
+++ b/drivers/clk/qcom/camcc-sm6350.c
@@ -145,15 +145,11 @@ static struct clk_alpha_pll_postdiv camcc_pll1_out_even = {
static const struct alpha_pll_config camcc_pll2_config = {
.l = 0x64,
.alpha = 0x0,
- .post_div_val = 0x3 << 8,
- .post_div_mask = 0x3 << 8,
- .aux_output_mask = BIT(1),
- .main_output_mask = BIT(0),
- .early_output_mask = BIT(3),
.config_ctl_val = 0x20000800,
.config_ctl_hi_val = 0x400003d2,
.test_ctl_val = 0x04000400,
.test_ctl_hi_val = 0x00004000,
+ .user_ctl_val = 0x0000030b,
};
static struct clk_alpha_pll camcc_pll2 = {
@@ -1693,6 +1689,8 @@ static struct clk_branch camcc_sys_tmr_clk = {
},
};
+static struct gdsc titan_top_gdsc;
+
static struct gdsc bps_gdsc = {
.gdscr = 0x6004,
.en_rest_wait_val = 0x2,
@@ -1702,6 +1700,7 @@ static struct gdsc bps_gdsc = {
.name = "bps_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .parent = &titan_top_gdsc.pd,
.flags = VOTABLE,
};
@@ -1714,6 +1713,7 @@ static struct gdsc ipe_0_gdsc = {
.name = "ipe_0_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .parent = &titan_top_gdsc.pd,
.flags = VOTABLE,
};
@@ -1726,6 +1726,7 @@ static struct gdsc ife_0_gdsc = {
.name = "ife_0_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .parent = &titan_top_gdsc.pd,
};
static struct gdsc ife_1_gdsc = {
@@ -1737,6 +1738,7 @@ static struct gdsc ife_1_gdsc = {
.name = "ife_1_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .parent = &titan_top_gdsc.pd,
};
static struct gdsc ife_2_gdsc = {
@@ -1748,6 +1750,7 @@ static struct gdsc ife_2_gdsc = {
.name = "ife_2_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .parent = &titan_top_gdsc.pd,
};
static struct gdsc titan_top_gdsc = {
diff --git a/drivers/clk/qcom/camcc-sm7150.c b/drivers/clk/qcom/camcc-sm7150.c
index 4a3baf5d8e85..ee963ed341c3 100644
--- a/drivers/clk/qcom/camcc-sm7150.c
+++ b/drivers/clk/qcom/camcc-sm7150.c
@@ -139,13 +139,9 @@ static struct clk_fixed_factor camcc_pll1_out_even = {
/* 1920MHz configuration */
static const struct alpha_pll_config camcc_pll2_config = {
.l = 0x64,
- .post_div_val = 0x3 << 8,
- .post_div_mask = 0x3 << 8,
- .early_output_mask = BIT(3),
- .aux_output_mask = BIT(1),
- .main_output_mask = BIT(0),
.config_ctl_hi_val = 0x400003d6,
.config_ctl_val = 0x20000954,
+ .user_ctl_val = 0x0000030b,
};
static struct clk_alpha_pll camcc_pll2 = {
@@ -1846,6 +1842,7 @@ static struct gdsc camcc_bps_gdsc = {
.name = "camcc_bps_gdsc",
},
.flags = HW_CTRL | POLL_CFG_GDSCR,
+ .parent = &camcc_titan_top_gdsc.pd,
.pwrsts = PWRSTS_OFF_ON,
};
@@ -1875,6 +1872,7 @@ static struct gdsc camcc_ipe_0_gdsc = {
.name = "camcc_ipe_0_gdsc",
},
.flags = HW_CTRL | POLL_CFG_GDSCR,
+ .parent = &camcc_titan_top_gdsc.pd,
.pwrsts = PWRSTS_OFF_ON,
};
@@ -1884,6 +1882,7 @@ static struct gdsc camcc_ipe_1_gdsc = {
.name = "camcc_ipe_1_gdsc",
},
.flags = HW_CTRL | POLL_CFG_GDSCR,
+ .parent = &camcc_titan_top_gdsc.pd,
.pwrsts = PWRSTS_OFF_ON,
};
@@ -1896,7 +1895,7 @@ static struct gdsc camcc_titan_top_gdsc = {
.pwrsts = PWRSTS_OFF_ON,
};
-struct clk_hw *camcc_sm7150_hws[] = {
+static struct clk_hw *camcc_sm7150_hws[] = {
[CAMCC_PLL0_OUT_EVEN] = &camcc_pll0_out_even.hw,
[CAMCC_PLL0_OUT_ODD] = &camcc_pll0_out_odd.hw,
[CAMCC_PLL1_OUT_EVEN] = &camcc_pll1_out_even.hw,
diff --git a/drivers/clk/qcom/camcc-sm8250.c b/drivers/clk/qcom/camcc-sm8250.c
index 6da89c49ba3d..c95a00628630 100644
--- a/drivers/clk/qcom/camcc-sm8250.c
+++ b/drivers/clk/qcom/camcc-sm8250.c
@@ -2213,6 +2213,7 @@ static struct gdsc bps_gdsc = {
.name = "bps_gdsc",
},
.flags = HW_CTRL | POLL_CFG_GDSCR,
+ .parent = &titan_top_gdsc.pd,
.pwrsts = PWRSTS_OFF_ON,
};
@@ -2222,6 +2223,7 @@ static struct gdsc ipe_0_gdsc = {
.name = "ipe_0_gdsc",
},
.flags = HW_CTRL | POLL_CFG_GDSCR,
+ .parent = &titan_top_gdsc.pd,
.pwrsts = PWRSTS_OFF_ON,
};
@@ -2231,6 +2233,7 @@ static struct gdsc sbi_gdsc = {
.name = "sbi_gdsc",
},
.flags = HW_CTRL | POLL_CFG_GDSCR,
+ .parent = &titan_top_gdsc.pd,
.pwrsts = PWRSTS_OFF_ON,
};
diff --git a/drivers/clk/qcom/camcc-sm8450.c b/drivers/clk/qcom/camcc-sm8450.c
index 08982737e490..ef8cf54d0eed 100644
--- a/drivers/clk/qcom/camcc-sm8450.c
+++ b/drivers/clk/qcom/camcc-sm8450.c
@@ -86,6 +86,7 @@ static const struct alpha_pll_config sm8475_cam_cc_pll0_config = {
static struct clk_alpha_pll cam_cc_pll0 = {
.offset = 0x0,
+ .config = &cam_cc_pll0_config,
.vco_table = lucid_evo_vco,
.num_vco = ARRAY_SIZE(lucid_evo_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
@@ -191,6 +192,7 @@ static const struct alpha_pll_config sm8475_cam_cc_pll1_config = {
static struct clk_alpha_pll cam_cc_pll1 = {
.offset = 0x1000,
+ .config = &cam_cc_pll1_config,
.vco_table = lucid_evo_vco,
.num_vco = ARRAY_SIZE(lucid_evo_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
@@ -257,6 +259,7 @@ static const struct alpha_pll_config sm8475_cam_cc_pll2_config = {
static struct clk_alpha_pll cam_cc_pll2 = {
.offset = 0x2000,
+ .config = &cam_cc_pll2_config,
.vco_table = rivian_evo_vco,
.num_vco = ARRAY_SIZE(rivian_evo_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_RIVIAN_EVO],
@@ -296,6 +299,7 @@ static const struct alpha_pll_config sm8475_cam_cc_pll3_config = {
static struct clk_alpha_pll cam_cc_pll3 = {
.offset = 0x3000,
+ .config = &cam_cc_pll3_config,
.vco_table = lucid_evo_vco,
.num_vco = ARRAY_SIZE(lucid_evo_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
@@ -368,6 +372,7 @@ static const struct alpha_pll_config sm8475_cam_cc_pll4_config = {
static struct clk_alpha_pll cam_cc_pll4 = {
.offset = 0x4000,
+ .config = &cam_cc_pll4_config,
.vco_table = lucid_evo_vco,
.num_vco = ARRAY_SIZE(lucid_evo_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
@@ -440,6 +445,7 @@ static const struct alpha_pll_config sm8475_cam_cc_pll5_config = {
static struct clk_alpha_pll cam_cc_pll5 = {
.offset = 0x5000,
+ .config = &cam_cc_pll5_config,
.vco_table = lucid_evo_vco,
.num_vco = ARRAY_SIZE(lucid_evo_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
@@ -512,6 +518,7 @@ static const struct alpha_pll_config sm8475_cam_cc_pll6_config = {
static struct clk_alpha_pll cam_cc_pll6 = {
.offset = 0x6000,
+ .config = &cam_cc_pll6_config,
.vco_table = lucid_evo_vco,
.num_vco = ARRAY_SIZE(lucid_evo_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
@@ -584,6 +591,7 @@ static const struct alpha_pll_config sm8475_cam_cc_pll7_config = {
static struct clk_alpha_pll cam_cc_pll7 = {
.offset = 0x7000,
+ .config = &cam_cc_pll7_config,
.vco_table = lucid_evo_vco,
.num_vco = ARRAY_SIZE(lucid_evo_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
@@ -656,6 +664,7 @@ static const struct alpha_pll_config sm8475_cam_cc_pll8_config = {
static struct clk_alpha_pll cam_cc_pll8 = {
.offset = 0x8000,
+ .config = &cam_cc_pll8_config,
.vco_table = lucid_evo_vco,
.num_vco = ARRAY_SIZE(lucid_evo_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
@@ -1476,24 +1485,6 @@ static struct clk_rcg2 cam_cc_xo_clk_src = {
},
};
-static struct clk_branch cam_cc_gdsc_clk = {
- .halt_reg = 0x1320c,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x1320c,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "cam_cc_gdsc_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &cam_cc_xo_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch cam_cc_bps_ahb_clk = {
.halt_reg = 0x1004c,
.halt_check = BRANCH_HALT,
@@ -2819,7 +2810,6 @@ static struct clk_regmap *cam_cc_sm8450_clocks[] = {
[CAM_CC_CSIPHY4_CLK] = &cam_cc_csiphy4_clk.clkr,
[CAM_CC_CSIPHY5_CLK] = &cam_cc_csiphy5_clk.clkr,
[CAM_CC_FAST_AHB_CLK_SRC] = &cam_cc_fast_ahb_clk_src.clkr,
- [CAM_CC_GDSC_CLK] = &cam_cc_gdsc_clk.clkr,
[CAM_CC_ICP_AHB_CLK] = &cam_cc_icp_ahb_clk.clkr,
[CAM_CC_ICP_CLK] = &cam_cc_icp_clk.clkr,
[CAM_CC_ICP_CLK_SRC] = &cam_cc_icp_clk_src.clkr,
@@ -2913,6 +2903,22 @@ static const struct qcom_reset_map cam_cc_sm8450_resets[] = {
[CAM_CC_SFE_1_BCR] = { 0x13094 },
};
+static struct clk_alpha_pll *cam_cc_sm8450_plls[] = {
+ &cam_cc_pll0,
+ &cam_cc_pll1,
+ &cam_cc_pll2,
+ &cam_cc_pll3,
+ &cam_cc_pll4,
+ &cam_cc_pll5,
+ &cam_cc_pll6,
+ &cam_cc_pll7,
+ &cam_cc_pll8,
+};
+
+static u32 cam_cc_sm8450_critical_cbcrs[] = {
+ 0x1320c, /* CAM_CC_GDSC_CLK */
+};
+
static const struct regmap_config cam_cc_sm8450_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
@@ -2929,6 +2935,7 @@ static struct gdsc bps_gdsc = {
.name = "bps_gdsc",
},
.flags = HW_CTRL | POLL_CFG_GDSCR,
+ .parent = &titan_top_gdsc.pd,
.pwrsts = PWRSTS_OFF_ON,
};
@@ -2938,6 +2945,7 @@ static struct gdsc ipe_0_gdsc = {
.name = "ipe_0_gdsc",
},
.flags = HW_CTRL | POLL_CFG_GDSCR,
+ .parent = &titan_top_gdsc.pd,
.pwrsts = PWRSTS_OFF_ON,
};
@@ -2947,6 +2955,7 @@ static struct gdsc sbi_gdsc = {
.name = "sbi_gdsc",
},
.flags = POLL_CFG_GDSCR,
+ .parent = &titan_top_gdsc.pd,
.pwrsts = PWRSTS_OFF_ON,
};
@@ -3021,6 +3030,13 @@ static struct gdsc *cam_cc_sm8450_gdscs[] = {
[TITAN_TOP_GDSC] = &titan_top_gdsc,
};
+static struct qcom_cc_driver_data cam_cc_sm8450_driver_data = {
+ .alpha_plls = cam_cc_sm8450_plls,
+ .num_alpha_plls = ARRAY_SIZE(cam_cc_sm8450_plls),
+ .clk_cbcrs = cam_cc_sm8450_critical_cbcrs,
+ .num_clk_cbcrs = ARRAY_SIZE(cam_cc_sm8450_critical_cbcrs),
+};
+
static const struct qcom_cc_desc cam_cc_sm8450_desc = {
.config = &cam_cc_sm8450_regmap_config,
.clks = cam_cc_sm8450_clocks,
@@ -3029,6 +3045,8 @@ static const struct qcom_cc_desc cam_cc_sm8450_desc = {
.num_resets = ARRAY_SIZE(cam_cc_sm8450_resets),
.gdscs = cam_cc_sm8450_gdscs,
.num_gdscs = ARRAY_SIZE(cam_cc_sm8450_gdscs),
+ .use_rpm = true,
+ .driver_data = &cam_cc_sm8450_driver_data,
};
static const struct of_device_id cam_cc_sm8450_match_table[] = {
@@ -3040,12 +3058,6 @@ MODULE_DEVICE_TABLE(of, cam_cc_sm8450_match_table);
static int cam_cc_sm8450_probe(struct platform_device *pdev)
{
- struct regmap *regmap;
-
- regmap = qcom_cc_map(pdev, &cam_cc_sm8450_desc);
- if (IS_ERR(regmap))
- return PTR_ERR(regmap);
-
if (of_device_is_compatible(pdev->dev.of_node, "qcom,sm8475-camcc")) {
/* Update CAMCC PLL0 */
cam_cc_pll0.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE];
@@ -3092,28 +3104,18 @@ static int cam_cc_sm8450_probe(struct platform_device *pdev)
cam_cc_pll8_out_even.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE];
cam_cc_pll8_out_even.clkr.hw.init = &sm8475_cam_cc_pll8_out_even_init;
- clk_lucid_ole_pll_configure(&cam_cc_pll0, regmap, &sm8475_cam_cc_pll0_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll1, regmap, &sm8475_cam_cc_pll1_config);
- clk_rivian_evo_pll_configure(&cam_cc_pll2, regmap, &sm8475_cam_cc_pll2_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll3, regmap, &sm8475_cam_cc_pll3_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll4, regmap, &sm8475_cam_cc_pll4_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll5, regmap, &sm8475_cam_cc_pll5_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll6, regmap, &sm8475_cam_cc_pll6_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll7, regmap, &sm8475_cam_cc_pll7_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll8, regmap, &sm8475_cam_cc_pll8_config);
- } else {
- clk_lucid_evo_pll_configure(&cam_cc_pll0, regmap, &cam_cc_pll0_config);
- clk_lucid_evo_pll_configure(&cam_cc_pll1, regmap, &cam_cc_pll1_config);
- clk_rivian_evo_pll_configure(&cam_cc_pll2, regmap, &cam_cc_pll2_config);
- clk_lucid_evo_pll_configure(&cam_cc_pll3, regmap, &cam_cc_pll3_config);
- clk_lucid_evo_pll_configure(&cam_cc_pll4, regmap, &cam_cc_pll4_config);
- clk_lucid_evo_pll_configure(&cam_cc_pll5, regmap, &cam_cc_pll5_config);
- clk_lucid_evo_pll_configure(&cam_cc_pll6, regmap, &cam_cc_pll6_config);
- clk_lucid_evo_pll_configure(&cam_cc_pll7, regmap, &cam_cc_pll7_config);
- clk_lucid_evo_pll_configure(&cam_cc_pll8, regmap, &cam_cc_pll8_config);
+ cam_cc_pll0.config = &sm8475_cam_cc_pll0_config;
+ cam_cc_pll1.config = &sm8475_cam_cc_pll1_config;
+ cam_cc_pll2.config = &sm8475_cam_cc_pll2_config;
+ cam_cc_pll3.config = &sm8475_cam_cc_pll3_config;
+ cam_cc_pll4.config = &sm8475_cam_cc_pll4_config;
+ cam_cc_pll5.config = &sm8475_cam_cc_pll5_config;
+ cam_cc_pll6.config = &sm8475_cam_cc_pll6_config;
+ cam_cc_pll7.config = &sm8475_cam_cc_pll7_config;
+ cam_cc_pll8.config = &sm8475_cam_cc_pll8_config;
}
- return qcom_cc_really_probe(&pdev->dev, &cam_cc_sm8450_desc, regmap);
+ return qcom_cc_probe(pdev, &cam_cc_sm8450_desc);
}
static struct platform_driver cam_cc_sm8450_driver = {
diff --git a/drivers/clk/qcom/camcc-sm8550.c b/drivers/clk/qcom/camcc-sm8550.c
index 871155783c79..b8ece8a57a8a 100644
--- a/drivers/clk/qcom/camcc-sm8550.c
+++ b/drivers/clk/qcom/camcc-sm8550.c
@@ -7,7 +7,6 @@
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <dt-bindings/clock/qcom,sm8550-camcc.h>
@@ -74,6 +73,7 @@ static const struct alpha_pll_config cam_cc_pll0_config = {
static struct clk_alpha_pll cam_cc_pll0 = {
.offset = 0x0,
+ .config = &cam_cc_pll0_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -151,6 +151,7 @@ static const struct alpha_pll_config cam_cc_pll1_config = {
static struct clk_alpha_pll cam_cc_pll1 = {
.offset = 0x1000,
+ .config = &cam_cc_pll1_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -201,6 +202,7 @@ static const struct alpha_pll_config cam_cc_pll2_config = {
static struct clk_alpha_pll cam_cc_pll2 = {
.offset = 0x2000,
+ .config = &cam_cc_pll2_config,
.vco_table = rivian_ole_vco,
.num_vco = ARRAY_SIZE(rivian_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_RIVIAN_EVO],
@@ -232,6 +234,7 @@ static const struct alpha_pll_config cam_cc_pll3_config = {
static struct clk_alpha_pll cam_cc_pll3 = {
.offset = 0x3000,
+ .config = &cam_cc_pll3_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -286,6 +289,7 @@ static const struct alpha_pll_config cam_cc_pll4_config = {
static struct clk_alpha_pll cam_cc_pll4 = {
.offset = 0x4000,
+ .config = &cam_cc_pll4_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -340,6 +344,7 @@ static const struct alpha_pll_config cam_cc_pll5_config = {
static struct clk_alpha_pll cam_cc_pll5 = {
.offset = 0x5000,
+ .config = &cam_cc_pll5_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -394,6 +399,7 @@ static const struct alpha_pll_config cam_cc_pll6_config = {
static struct clk_alpha_pll cam_cc_pll6 = {
.offset = 0x6000,
+ .config = &cam_cc_pll6_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -448,6 +454,7 @@ static const struct alpha_pll_config cam_cc_pll7_config = {
static struct clk_alpha_pll cam_cc_pll7 = {
.offset = 0x7000,
+ .config = &cam_cc_pll7_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -502,6 +509,7 @@ static const struct alpha_pll_config cam_cc_pll8_config = {
static struct clk_alpha_pll cam_cc_pll8 = {
.offset = 0x8000,
+ .config = &cam_cc_pll8_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -556,6 +564,7 @@ static const struct alpha_pll_config cam_cc_pll9_config = {
static struct clk_alpha_pll cam_cc_pll9 = {
.offset = 0x9000,
+ .config = &cam_cc_pll9_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -610,6 +619,7 @@ static const struct alpha_pll_config cam_cc_pll10_config = {
static struct clk_alpha_pll cam_cc_pll10 = {
.offset = 0xa000,
+ .config = &cam_cc_pll10_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -664,6 +674,7 @@ static const struct alpha_pll_config cam_cc_pll11_config = {
static struct clk_alpha_pll cam_cc_pll11 = {
.offset = 0xb000,
+ .config = &cam_cc_pll11_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -718,6 +729,7 @@ static const struct alpha_pll_config cam_cc_pll12_config = {
static struct clk_alpha_pll cam_cc_pll12 = {
.offset = 0xc000,
+ .config = &cam_cc_pll12_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -3192,6 +3204,8 @@ static struct clk_branch cam_cc_sfe_1_fast_ahb_clk = {
},
};
+static struct gdsc cam_cc_titan_top_gdsc;
+
static struct gdsc cam_cc_bps_gdsc = {
.gdscr = 0x10004,
.en_rest_wait_val = 0x2,
@@ -3201,6 +3215,7 @@ static struct gdsc cam_cc_bps_gdsc = {
.name = "cam_cc_bps_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .parent = &cam_cc_titan_top_gdsc.pd,
.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};
@@ -3213,6 +3228,7 @@ static struct gdsc cam_cc_ife_0_gdsc = {
.name = "cam_cc_ife_0_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .parent = &cam_cc_titan_top_gdsc.pd,
.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};
@@ -3225,6 +3241,7 @@ static struct gdsc cam_cc_ife_1_gdsc = {
.name = "cam_cc_ife_1_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .parent = &cam_cc_titan_top_gdsc.pd,
.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};
@@ -3237,6 +3254,7 @@ static struct gdsc cam_cc_ife_2_gdsc = {
.name = "cam_cc_ife_2_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .parent = &cam_cc_titan_top_gdsc.pd,
.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};
@@ -3249,6 +3267,7 @@ static struct gdsc cam_cc_ipe_0_gdsc = {
.name = "cam_cc_ipe_0_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .parent = &cam_cc_titan_top_gdsc.pd,
.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};
@@ -3261,6 +3280,7 @@ static struct gdsc cam_cc_sbi_gdsc = {
.name = "cam_cc_sbi_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .parent = &cam_cc_titan_top_gdsc.pd,
.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};
@@ -3273,6 +3293,7 @@ static struct gdsc cam_cc_sfe_0_gdsc = {
.name = "cam_cc_sfe_0_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .parent = &cam_cc_titan_top_gdsc.pd,
.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};
@@ -3285,6 +3306,7 @@ static struct gdsc cam_cc_sfe_1_gdsc = {
.name = "cam_cc_sfe_1_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .parent = &cam_cc_titan_top_gdsc.pd,
.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};
@@ -3479,6 +3501,27 @@ static const struct qcom_reset_map cam_cc_sm8550_resets[] = {
[CAM_CC_SFE_1_BCR] = { 0x133dc },
};
+static struct clk_alpha_pll *cam_cc_sm8550_plls[] = {
+ &cam_cc_pll0,
+ &cam_cc_pll1,
+ &cam_cc_pll2,
+ &cam_cc_pll3,
+ &cam_cc_pll4,
+ &cam_cc_pll5,
+ &cam_cc_pll6,
+ &cam_cc_pll7,
+ &cam_cc_pll8,
+ &cam_cc_pll9,
+ &cam_cc_pll10,
+ &cam_cc_pll11,
+ &cam_cc_pll12,
+};
+
+static u32 cam_cc_sm8550_critical_cbcrs[] = {
+ 0x1419c, /* CAM_CC_GDSC_CLK */
+ 0x142cc, /* CAM_CC_SLEEP_CLK */
+};
+
static const struct regmap_config cam_cc_sm8550_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
@@ -3487,6 +3530,13 @@ static const struct regmap_config cam_cc_sm8550_regmap_config = {
.fast_io = true,
};
+static struct qcom_cc_driver_data cam_cc_sm8550_driver_data = {
+ .alpha_plls = cam_cc_sm8550_plls,
+ .num_alpha_plls = ARRAY_SIZE(cam_cc_sm8550_plls),
+ .clk_cbcrs = cam_cc_sm8550_critical_cbcrs,
+ .num_clk_cbcrs = ARRAY_SIZE(cam_cc_sm8550_critical_cbcrs),
+};
+
static const struct qcom_cc_desc cam_cc_sm8550_desc = {
.config = &cam_cc_sm8550_regmap_config,
.clks = cam_cc_sm8550_clocks,
@@ -3495,6 +3545,8 @@ static const struct qcom_cc_desc cam_cc_sm8550_desc = {
.num_resets = ARRAY_SIZE(cam_cc_sm8550_resets),
.gdscs = cam_cc_sm8550_gdscs,
.num_gdscs = ARRAY_SIZE(cam_cc_sm8550_gdscs),
+ .use_rpm = true,
+ .driver_data = &cam_cc_sm8550_driver_data,
};
static const struct of_device_id cam_cc_sm8550_match_table[] = {
@@ -3505,46 +3557,7 @@ MODULE_DEVICE_TABLE(of, cam_cc_sm8550_match_table);
static int cam_cc_sm8550_probe(struct platform_device *pdev)
{
- struct regmap *regmap;
- int ret;
-
- ret = devm_pm_runtime_enable(&pdev->dev);
- if (ret)
- return ret;
-
- ret = pm_runtime_resume_and_get(&pdev->dev);
- if (ret)
- return ret;
-
- regmap = qcom_cc_map(pdev, &cam_cc_sm8550_desc);
- if (IS_ERR(regmap)) {
- pm_runtime_put(&pdev->dev);
- return PTR_ERR(regmap);
- }
-
- clk_lucid_ole_pll_configure(&cam_cc_pll0, regmap, &cam_cc_pll0_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll1, regmap, &cam_cc_pll1_config);
- clk_rivian_evo_pll_configure(&cam_cc_pll2, regmap, &cam_cc_pll2_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll3, regmap, &cam_cc_pll3_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll4, regmap, &cam_cc_pll4_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll5, regmap, &cam_cc_pll5_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll6, regmap, &cam_cc_pll6_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll7, regmap, &cam_cc_pll7_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll8, regmap, &cam_cc_pll8_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll9, regmap, &cam_cc_pll9_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll10, regmap, &cam_cc_pll10_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll11, regmap, &cam_cc_pll11_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll12, regmap, &cam_cc_pll12_config);
-
- /* Keep some clocks always-on */
- qcom_branch_set_clk_en(regmap, 0x1419c); /* CAM_CC_GDSC_CLK */
- qcom_branch_set_clk_en(regmap, 0x142cc); /* CAM_CC_SLEEP_CLK */
-
- ret = qcom_cc_really_probe(&pdev->dev, &cam_cc_sm8550_desc, regmap);
-
- pm_runtime_put(&pdev->dev);
-
- return ret;
+ return qcom_cc_probe(pdev, &cam_cc_sm8550_desc);
}
static struct platform_driver cam_cc_sm8550_driver = {
diff --git a/drivers/clk/qcom/camcc-sm8650.c b/drivers/clk/qcom/camcc-sm8650.c
index 0ccd6de8ba78..8b388904f56f 100644
--- a/drivers/clk/qcom/camcc-sm8650.c
+++ b/drivers/clk/qcom/camcc-sm8650.c
@@ -7,7 +7,6 @@
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <dt-bindings/clock/qcom,sm8650-camcc.h>
@@ -72,6 +71,7 @@ static const struct alpha_pll_config cam_cc_pll0_config = {
static struct clk_alpha_pll cam_cc_pll0 = {
.offset = 0x0,
+ .config = &cam_cc_pll0_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -149,6 +149,7 @@ static const struct alpha_pll_config cam_cc_pll1_config = {
static struct clk_alpha_pll cam_cc_pll1 = {
.offset = 0x1000,
+ .config = &cam_cc_pll1_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -199,6 +200,7 @@ static const struct alpha_pll_config cam_cc_pll2_config = {
static struct clk_alpha_pll cam_cc_pll2 = {
.offset = 0x2000,
+ .config = &cam_cc_pll2_config,
.vco_table = rivian_ole_vco,
.num_vco = ARRAY_SIZE(rivian_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_RIVIAN_EVO],
@@ -230,6 +232,7 @@ static const struct alpha_pll_config cam_cc_pll3_config = {
static struct clk_alpha_pll cam_cc_pll3 = {
.offset = 0x3000,
+ .config = &cam_cc_pll3_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -284,6 +287,7 @@ static const struct alpha_pll_config cam_cc_pll4_config = {
static struct clk_alpha_pll cam_cc_pll4 = {
.offset = 0x4000,
+ .config = &cam_cc_pll4_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -338,6 +342,7 @@ static const struct alpha_pll_config cam_cc_pll5_config = {
static struct clk_alpha_pll cam_cc_pll5 = {
.offset = 0x5000,
+ .config = &cam_cc_pll5_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -392,6 +397,7 @@ static const struct alpha_pll_config cam_cc_pll6_config = {
static struct clk_alpha_pll cam_cc_pll6 = {
.offset = 0x6000,
+ .config = &cam_cc_pll6_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -446,6 +452,7 @@ static const struct alpha_pll_config cam_cc_pll7_config = {
static struct clk_alpha_pll cam_cc_pll7 = {
.offset = 0x7000,
+ .config = &cam_cc_pll7_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -500,6 +507,7 @@ static const struct alpha_pll_config cam_cc_pll8_config = {
static struct clk_alpha_pll cam_cc_pll8 = {
.offset = 0x8000,
+ .config = &cam_cc_pll8_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -554,6 +562,7 @@ static const struct alpha_pll_config cam_cc_pll9_config = {
static struct clk_alpha_pll cam_cc_pll9 = {
.offset = 0x9000,
+ .config = &cam_cc_pll9_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -631,6 +640,7 @@ static const struct alpha_pll_config cam_cc_pll10_config = {
static struct clk_alpha_pll cam_cc_pll10 = {
.offset = 0xa000,
+ .config = &cam_cc_pll10_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -3509,6 +3519,27 @@ static const struct qcom_reset_map cam_cc_sm8650_resets[] = {
[CAM_CC_SFE_2_BCR] = { 0x130f4 },
};
+static struct clk_alpha_pll *cam_cc_sm8650_plls[] = {
+ &cam_cc_pll0,
+ &cam_cc_pll1,
+ &cam_cc_pll2,
+ &cam_cc_pll3,
+ &cam_cc_pll4,
+ &cam_cc_pll5,
+ &cam_cc_pll6,
+ &cam_cc_pll7,
+ &cam_cc_pll8,
+ &cam_cc_pll9,
+ &cam_cc_pll10,
+};
+
+static u32 cam_cc_sm8650_critical_cbcrs[] = {
+ 0x132ec, /* CAM_CC_GDSC_CLK */
+ 0x13308, /* CAM_CC_SLEEP_CLK */
+ 0x13314, /* CAM_CC_DRV_XO_CLK */
+ 0x13318, /* CAM_CC_DRV_AHB_CLK */
+};
+
static const struct regmap_config cam_cc_sm8650_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
@@ -3517,6 +3548,13 @@ static const struct regmap_config cam_cc_sm8650_regmap_config = {
.fast_io = true,
};
+static struct qcom_cc_driver_data cam_cc_sm8650_driver_data = {
+ .alpha_plls = cam_cc_sm8650_plls,
+ .num_alpha_plls = ARRAY_SIZE(cam_cc_sm8650_plls),
+ .clk_cbcrs = cam_cc_sm8650_critical_cbcrs,
+ .num_clk_cbcrs = ARRAY_SIZE(cam_cc_sm8650_critical_cbcrs),
+};
+
static const struct qcom_cc_desc cam_cc_sm8650_desc = {
.config = &cam_cc_sm8650_regmap_config,
.clks = cam_cc_sm8650_clocks,
@@ -3525,6 +3563,8 @@ static const struct qcom_cc_desc cam_cc_sm8650_desc = {
.num_resets = ARRAY_SIZE(cam_cc_sm8650_resets),
.gdscs = cam_cc_sm8650_gdscs,
.num_gdscs = ARRAY_SIZE(cam_cc_sm8650_gdscs),
+ .use_rpm = true,
+ .driver_data = &cam_cc_sm8650_driver_data,
};
static const struct of_device_id cam_cc_sm8650_match_table[] = {
@@ -3535,46 +3575,7 @@ MODULE_DEVICE_TABLE(of, cam_cc_sm8650_match_table);
static int cam_cc_sm8650_probe(struct platform_device *pdev)
{
- struct regmap *regmap;
- int ret;
-
- ret = devm_pm_runtime_enable(&pdev->dev);
- if (ret)
- return ret;
-
- ret = pm_runtime_resume_and_get(&pdev->dev);
- if (ret)
- return ret;
-
- regmap = qcom_cc_map(pdev, &cam_cc_sm8650_desc);
- if (IS_ERR(regmap)) {
- pm_runtime_put(&pdev->dev);
- return PTR_ERR(regmap);
- }
-
- clk_lucid_ole_pll_configure(&cam_cc_pll0, regmap, &cam_cc_pll0_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll1, regmap, &cam_cc_pll1_config);
- clk_rivian_evo_pll_configure(&cam_cc_pll2, regmap, &cam_cc_pll2_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll3, regmap, &cam_cc_pll3_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll4, regmap, &cam_cc_pll4_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll5, regmap, &cam_cc_pll5_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll6, regmap, &cam_cc_pll6_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll7, regmap, &cam_cc_pll7_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll8, regmap, &cam_cc_pll8_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll9, regmap, &cam_cc_pll9_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll10, regmap, &cam_cc_pll10_config);
-
- /* Keep clocks always enabled */
- qcom_branch_set_clk_en(regmap, 0x13318); /* CAM_CC_DRV_AHB_CLK */
- qcom_branch_set_clk_en(regmap, 0x13314); /* CAM_CC_DRV_XO_CLK */
- qcom_branch_set_clk_en(regmap, 0x132ec); /* CAM_CC_GDSC_CLK */
- qcom_branch_set_clk_en(regmap, 0x13308); /* CAM_CC_SLEEP_CLK */
-
- ret = qcom_cc_really_probe(&pdev->dev, &cam_cc_sm8650_desc, regmap);
-
- pm_runtime_put(&pdev->dev);
-
- return ret;
+ return qcom_cc_probe(pdev, &cam_cc_sm8650_desc);
}
static struct platform_driver cam_cc_sm8650_driver = {
diff --git a/drivers/clk/qcom/camcc-x1e80100.c b/drivers/clk/qcom/camcc-x1e80100.c
index b73524ae64b1..cbcc1c9fcb34 100644
--- a/drivers/clk/qcom/camcc-x1e80100.c
+++ b/drivers/clk/qcom/camcc-x1e80100.c
@@ -7,7 +7,6 @@
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <dt-bindings/clock/qcom,x1e80100-camcc.h>
@@ -67,6 +66,7 @@ static const struct alpha_pll_config cam_cc_pll0_config = {
static struct clk_alpha_pll cam_cc_pll0 = {
.offset = 0x0,
+ .config = &cam_cc_pll0_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -144,6 +144,7 @@ static const struct alpha_pll_config cam_cc_pll1_config = {
static struct clk_alpha_pll cam_cc_pll1 = {
.offset = 0x1000,
+ .config = &cam_cc_pll1_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -194,6 +195,7 @@ static const struct alpha_pll_config cam_cc_pll2_config = {
static struct clk_alpha_pll cam_cc_pll2 = {
.offset = 0x2000,
+ .config = &cam_cc_pll2_config,
.vco_table = rivian_ole_vco,
.num_vco = ARRAY_SIZE(rivian_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_RIVIAN_EVO],
@@ -225,6 +227,7 @@ static const struct alpha_pll_config cam_cc_pll3_config = {
static struct clk_alpha_pll cam_cc_pll3 = {
.offset = 0x3000,
+ .config = &cam_cc_pll3_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -279,6 +282,7 @@ static const struct alpha_pll_config cam_cc_pll4_config = {
static struct clk_alpha_pll cam_cc_pll4 = {
.offset = 0x4000,
+ .config = &cam_cc_pll4_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -333,6 +337,7 @@ static const struct alpha_pll_config cam_cc_pll6_config = {
static struct clk_alpha_pll cam_cc_pll6 = {
.offset = 0x6000,
+ .config = &cam_cc_pll6_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -387,6 +392,7 @@ static const struct alpha_pll_config cam_cc_pll8_config = {
static struct clk_alpha_pll cam_cc_pll8 = {
.offset = 0x8000,
+ .config = &cam_cc_pll8_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -2418,6 +2424,21 @@ static const struct qcom_reset_map cam_cc_x1e80100_resets[] = {
[CAM_CC_SFE_0_BCR] = { 0x1327c },
};
+static struct clk_alpha_pll *cam_cc_x1e80100_plls[] = {
+ &cam_cc_pll0,
+ &cam_cc_pll1,
+ &cam_cc_pll2,
+ &cam_cc_pll3,
+ &cam_cc_pll4,
+ &cam_cc_pll6,
+ &cam_cc_pll8,
+};
+
+static u32 cam_cc_x1e80100_critical_cbcrs[] = {
+ 0x13a9c, /* CAM_CC_GDSC_CLK */
+ 0x13ab8, /* CAM_CC_SLEEP_CLK */
+};
+
static const struct regmap_config cam_cc_x1e80100_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
@@ -2426,6 +2447,13 @@ static const struct regmap_config cam_cc_x1e80100_regmap_config = {
.fast_io = true,
};
+static struct qcom_cc_driver_data cam_cc_x1e80100_driver_data = {
+ .alpha_plls = cam_cc_x1e80100_plls,
+ .num_alpha_plls = ARRAY_SIZE(cam_cc_x1e80100_plls),
+ .clk_cbcrs = cam_cc_x1e80100_critical_cbcrs,
+ .num_clk_cbcrs = ARRAY_SIZE(cam_cc_x1e80100_critical_cbcrs),
+};
+
static const struct qcom_cc_desc cam_cc_x1e80100_desc = {
.config = &cam_cc_x1e80100_regmap_config,
.clks = cam_cc_x1e80100_clocks,
@@ -2434,6 +2462,8 @@ static const struct qcom_cc_desc cam_cc_x1e80100_desc = {
.num_resets = ARRAY_SIZE(cam_cc_x1e80100_resets),
.gdscs = cam_cc_x1e80100_gdscs,
.num_gdscs = ARRAY_SIZE(cam_cc_x1e80100_gdscs),
+ .use_rpm = true,
+ .driver_data = &cam_cc_x1e80100_driver_data,
};
static const struct of_device_id cam_cc_x1e80100_match_table[] = {
@@ -2444,40 +2474,7 @@ MODULE_DEVICE_TABLE(of, cam_cc_x1e80100_match_table);
static int cam_cc_x1e80100_probe(struct platform_device *pdev)
{
- struct regmap *regmap;
- int ret;
-
- ret = devm_pm_runtime_enable(&pdev->dev);
- if (ret)
- return ret;
-
- ret = pm_runtime_resume_and_get(&pdev->dev);
- if (ret)
- return ret;
-
- regmap = qcom_cc_map(pdev, &cam_cc_x1e80100_desc);
- if (IS_ERR(regmap)) {
- pm_runtime_put(&pdev->dev);
- return PTR_ERR(regmap);
- }
-
- clk_lucid_ole_pll_configure(&cam_cc_pll0, regmap, &cam_cc_pll0_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll1, regmap, &cam_cc_pll1_config);
- clk_rivian_evo_pll_configure(&cam_cc_pll2, regmap, &cam_cc_pll2_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll3, regmap, &cam_cc_pll3_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll4, regmap, &cam_cc_pll4_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll6, regmap, &cam_cc_pll6_config);
- clk_lucid_ole_pll_configure(&cam_cc_pll8, regmap, &cam_cc_pll8_config);
-
- /* Keep clocks always enabled */
- qcom_branch_set_clk_en(regmap, 0x13a9c); /* CAM_CC_GDSC_CLK */
- qcom_branch_set_clk_en(regmap, 0x13ab8); /* CAM_CC_SLEEP_CLK */
-
- ret = qcom_cc_really_probe(&pdev->dev, &cam_cc_x1e80100_desc, regmap);
-
- pm_runtime_put(&pdev->dev);
-
- return ret;
+ return qcom_cc_probe(pdev, &cam_cc_x1e80100_desc);
}
static struct platform_driver cam_cc_x1e80100_driver = {
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index cec0afea8e44..6aeba40358c1 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -63,8 +63,10 @@
#define PLL_OPMODE(p) ((p)->offset + (p)->regs[PLL_OFF_OPMODE])
#define PLL_FRAC(p) ((p)->offset + (p)->regs[PLL_OFF_FRAC])
+#define GET_PLL_TYPE(pll) (((pll)->regs - clk_alpha_pll_regs[0]) / PLL_OFF_MAX_REGS)
+
const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
- [CLK_ALPHA_PLL_TYPE_DEFAULT] = {
+ [CLK_ALPHA_PLL_TYPE_DEFAULT] = {
[PLL_OFF_L_VAL] = 0x04,
[PLL_OFF_ALPHA_VAL] = 0x08,
[PLL_OFF_ALPHA_VAL_U] = 0x0c,
@@ -75,7 +77,7 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_TEST_CTL_U] = 0x20,
[PLL_OFF_STATUS] = 0x24,
},
- [CLK_ALPHA_PLL_TYPE_HUAYRA] = {
+ [CLK_ALPHA_PLL_TYPE_HUAYRA] = {
[PLL_OFF_L_VAL] = 0x04,
[PLL_OFF_ALPHA_VAL] = 0x08,
[PLL_OFF_USER_CTL] = 0x10,
@@ -85,7 +87,7 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_TEST_CTL_U] = 0x20,
[PLL_OFF_STATUS] = 0x24,
},
- [CLK_ALPHA_PLL_TYPE_HUAYRA_APSS] = {
+ [CLK_ALPHA_PLL_TYPE_HUAYRA_APSS] = {
[PLL_OFF_L_VAL] = 0x08,
[PLL_OFF_ALPHA_VAL] = 0x10,
[PLL_OFF_USER_CTL] = 0x18,
@@ -95,7 +97,7 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_TEST_CTL] = 0x30,
[PLL_OFF_TEST_CTL_U] = 0x34,
},
- [CLK_ALPHA_PLL_TYPE_HUAYRA_2290] = {
+ [CLK_ALPHA_PLL_TYPE_HUAYRA_2290] = {
[PLL_OFF_L_VAL] = 0x04,
[PLL_OFF_ALPHA_VAL] = 0x08,
[PLL_OFF_USER_CTL] = 0x0c,
@@ -108,7 +110,7 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_OPMODE] = 0x28,
[PLL_OFF_STATUS] = 0x38,
},
- [CLK_ALPHA_PLL_TYPE_BRAMMO] = {
+ [CLK_ALPHA_PLL_TYPE_BRAMMO] = {
[PLL_OFF_L_VAL] = 0x04,
[PLL_OFF_ALPHA_VAL] = 0x08,
[PLL_OFF_ALPHA_VAL_U] = 0x0c,
@@ -117,7 +119,7 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_TEST_CTL] = 0x1c,
[PLL_OFF_STATUS] = 0x24,
},
- [CLK_ALPHA_PLL_TYPE_FABIA] = {
+ [CLK_ALPHA_PLL_TYPE_FABIA] = {
[PLL_OFF_L_VAL] = 0x04,
[PLL_OFF_USER_CTL] = 0x0c,
[PLL_OFF_USER_CTL_U] = 0x10,
@@ -145,7 +147,7 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_OPMODE] = 0x38,
[PLL_OFF_ALPHA_VAL] = 0x40,
},
- [CLK_ALPHA_PLL_TYPE_AGERA] = {
+ [CLK_ALPHA_PLL_TYPE_AGERA] = {
[PLL_OFF_L_VAL] = 0x04,
[PLL_OFF_ALPHA_VAL] = 0x08,
[PLL_OFF_USER_CTL] = 0x0c,
@@ -155,7 +157,7 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_TEST_CTL_U] = 0x1c,
[PLL_OFF_STATUS] = 0x2c,
},
- [CLK_ALPHA_PLL_TYPE_ZONDA] = {
+ [CLK_ALPHA_PLL_TYPE_ZONDA] = {
[PLL_OFF_L_VAL] = 0x04,
[PLL_OFF_ALPHA_VAL] = 0x08,
[PLL_OFF_USER_CTL] = 0x0c,
@@ -241,7 +243,7 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_TEST_CTL] = 0x28,
[PLL_OFF_TEST_CTL_U] = 0x2c,
},
- [CLK_ALPHA_PLL_TYPE_DEFAULT_EVO] = {
+ [CLK_ALPHA_PLL_TYPE_DEFAULT_EVO] = {
[PLL_OFF_L_VAL] = 0x04,
[PLL_OFF_ALPHA_VAL] = 0x08,
[PLL_OFF_ALPHA_VAL_U] = 0x0c,
@@ -252,7 +254,7 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_CONFIG_CTL] = 0x20,
[PLL_OFF_STATUS] = 0x24,
},
- [CLK_ALPHA_PLL_TYPE_BRAMMO_EVO] = {
+ [CLK_ALPHA_PLL_TYPE_BRAMMO_EVO] = {
[PLL_OFF_L_VAL] = 0x04,
[PLL_OFF_ALPHA_VAL] = 0x08,
[PLL_OFF_ALPHA_VAL_U] = 0x0c,
@@ -273,7 +275,7 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_TEST_CTL] = 0x30,
[PLL_OFF_TEST_CTL_U] = 0x34,
},
- [CLK_ALPHA_PLL_TYPE_STROMER_PLUS] = {
+ [CLK_ALPHA_PLL_TYPE_STROMER_PLUS] = {
[PLL_OFF_L_VAL] = 0x04,
[PLL_OFF_USER_CTL] = 0x08,
[PLL_OFF_USER_CTL_U] = 0x0c,
@@ -284,7 +286,7 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_ALPHA_VAL] = 0x24,
[PLL_OFF_ALPHA_VAL_U] = 0x28,
},
- [CLK_ALPHA_PLL_TYPE_ZONDA_OLE] = {
+ [CLK_ALPHA_PLL_TYPE_ZONDA_OLE] = {
[PLL_OFF_L_VAL] = 0x04,
[PLL_OFF_ALPHA_VAL] = 0x08,
[PLL_OFF_USER_CTL] = 0x0c,
@@ -299,7 +301,7 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_OPMODE] = 0x30,
[PLL_OFF_STATUS] = 0x3c,
},
- [CLK_ALPHA_PLL_TYPE_NSS_HUAYRA] = {
+ [CLK_ALPHA_PLL_TYPE_NSS_HUAYRA] = {
[PLL_OFF_L_VAL] = 0x04,
[PLL_OFF_ALPHA_VAL] = 0x08,
[PLL_OFF_TEST_CTL] = 0x0c,
@@ -788,6 +790,29 @@ static int clk_alpha_pll_update_latch(struct clk_alpha_pll *pll,
return __clk_alpha_pll_update_latch(pll);
}
+static void clk_alpha_pll_update_configs(struct clk_alpha_pll *pll, const struct pll_vco *vco,
+ u32 l, u64 alpha, u32 alpha_width, bool alpha_en)
+{
+ regmap_write(pll->clkr.regmap, PLL_L_VAL(pll), l);
+
+ if (alpha_width > ALPHA_BITWIDTH)
+ alpha <<= alpha_width - ALPHA_BITWIDTH;
+
+ if (alpha_width > 32)
+ regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL_U(pll), upper_32_bits(alpha));
+
+ regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL(pll), lower_32_bits(alpha));
+
+ if (vco) {
+ regmap_update_bits(pll->clkr.regmap, PLL_USER_CTL(pll),
+ PLL_VCO_MASK << PLL_VCO_SHIFT,
+ vco->val << PLL_VCO_SHIFT);
+ }
+
+ if (alpha_en)
+ regmap_set_bits(pll->clkr.regmap, PLL_USER_CTL(pll), PLL_ALPHA_EN);
+}
+
static int __clk_alpha_pll_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long prate,
int (*is_enabled)(struct clk_hw *))
@@ -805,24 +830,7 @@ static int __clk_alpha_pll_set_rate(struct clk_hw *hw, unsigned long rate,
return -EINVAL;
}
- regmap_write(pll->clkr.regmap, PLL_L_VAL(pll), l);
-
- if (alpha_width > ALPHA_BITWIDTH)
- a <<= alpha_width - ALPHA_BITWIDTH;
-
- if (alpha_width > 32)
- regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL_U(pll), a >> 32);
-
- regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL(pll), a);
-
- if (vco) {
- regmap_update_bits(pll->clkr.regmap, PLL_USER_CTL(pll),
- PLL_VCO_MASK << PLL_VCO_SHIFT,
- vco->val << PLL_VCO_SHIFT);
- }
-
- regmap_update_bits(pll->clkr.regmap, PLL_USER_CTL(pll),
- PLL_ALPHA_EN, PLL_ALPHA_EN);
+ clk_alpha_pll_update_configs(pll, vco, l, a, alpha_width, true);
return clk_alpha_pll_update_latch(pll, is_enabled);
}
@@ -841,22 +849,25 @@ static int clk_alpha_pll_hwfsm_set_rate(struct clk_hw *hw, unsigned long rate,
clk_alpha_pll_hwfsm_is_enabled);
}
-static long clk_alpha_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_alpha_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
u32 l, alpha_width = pll_alpha_width(pll);
u64 a;
unsigned long min_freq, max_freq;
- rate = alpha_pll_round_rate(rate, *prate, &l, &a, alpha_width);
- if (!pll->vco_table || alpha_pll_find_vco(pll, rate))
- return rate;
+ req->rate = alpha_pll_round_rate(req->rate, req->best_parent_rate, &l,
+ &a, alpha_width);
+ if (!pll->vco_table || alpha_pll_find_vco(pll, req->rate))
+ return 0;
min_freq = pll->vco_table[0].min_freq;
max_freq = pll->vco_table[pll->num_vco - 1].max_freq;
- return clamp(rate, min_freq, max_freq);
+ req->rate = clamp(req->rate, min_freq, max_freq);
+
+ return 0;
}
void clk_huayra_2290_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
@@ -1040,12 +1051,15 @@ static int alpha_pll_huayra_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
-static long alpha_pll_huayra_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int alpha_pll_huayra_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
u32 l, a;
- return alpha_huayra_pll_round_rate(rate, *prate, &l, &a);
+ req->rate = alpha_huayra_pll_round_rate(req->rate,
+ req->best_parent_rate, &l, &a);
+
+ return 0;
}
static int trion_pll_is_enabled(struct clk_alpha_pll *pll,
@@ -1167,7 +1181,7 @@ const struct clk_ops clk_alpha_pll_ops = {
.disable = clk_alpha_pll_disable,
.is_enabled = clk_alpha_pll_is_enabled,
.recalc_rate = clk_alpha_pll_recalc_rate,
- .round_rate = clk_alpha_pll_round_rate,
+ .determine_rate = clk_alpha_pll_determine_rate,
.set_rate = clk_alpha_pll_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_ops);
@@ -1177,7 +1191,7 @@ const struct clk_ops clk_alpha_pll_huayra_ops = {
.disable = clk_alpha_pll_disable,
.is_enabled = clk_alpha_pll_is_enabled,
.recalc_rate = alpha_pll_huayra_recalc_rate,
- .round_rate = alpha_pll_huayra_round_rate,
+ .determine_rate = alpha_pll_huayra_determine_rate,
.set_rate = alpha_pll_huayra_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_huayra_ops);
@@ -1187,7 +1201,7 @@ const struct clk_ops clk_alpha_pll_hwfsm_ops = {
.disable = clk_alpha_pll_hwfsm_disable,
.is_enabled = clk_alpha_pll_hwfsm_is_enabled,
.recalc_rate = clk_alpha_pll_recalc_rate,
- .round_rate = clk_alpha_pll_round_rate,
+ .determine_rate = clk_alpha_pll_determine_rate,
.set_rate = clk_alpha_pll_hwfsm_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_hwfsm_ops);
@@ -1197,7 +1211,7 @@ const struct clk_ops clk_alpha_pll_fixed_trion_ops = {
.disable = clk_trion_pll_disable,
.is_enabled = clk_trion_pll_is_enabled,
.recalc_rate = clk_trion_pll_recalc_rate,
- .round_rate = clk_alpha_pll_round_rate,
+ .determine_rate = clk_alpha_pll_determine_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_fixed_trion_ops);
@@ -1232,9 +1246,8 @@ static const struct clk_div_table clk_alpha_2bit_div_table[] = {
{ }
};
-static long
-clk_alpha_pll_postdiv_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_alpha_pll_postdiv_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
const struct clk_div_table *table;
@@ -1244,13 +1257,15 @@ clk_alpha_pll_postdiv_round_rate(struct clk_hw *hw, unsigned long rate,
else
table = clk_alpha_div_table;
- return divider_round_rate(hw, rate, prate, table,
- pll->width, CLK_DIVIDER_POWER_OF_TWO);
+ req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate,
+ table, pll->width,
+ CLK_DIVIDER_POWER_OF_TWO);
+
+ return 0;
}
-static long
-clk_alpha_pll_postdiv_round_ro_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_alpha_pll_postdiv_ro_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
u32 ctl, div;
@@ -1262,9 +1277,12 @@ clk_alpha_pll_postdiv_round_ro_rate(struct clk_hw *hw, unsigned long rate,
div = 1 << fls(ctl);
if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)
- *prate = clk_hw_round_rate(clk_hw_get_parent(hw), div * rate);
+ req->best_parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw),
+ div * req->rate);
- return DIV_ROUND_UP_ULL((u64)*prate, div);
+ req->rate = DIV_ROUND_UP_ULL((u64)req->best_parent_rate, div);
+
+ return 0;
}
static int clk_alpha_pll_postdiv_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -1283,13 +1301,13 @@ static int clk_alpha_pll_postdiv_set_rate(struct clk_hw *hw, unsigned long rate,
const struct clk_ops clk_alpha_pll_postdiv_ops = {
.recalc_rate = clk_alpha_pll_postdiv_recalc_rate,
- .round_rate = clk_alpha_pll_postdiv_round_rate,
+ .determine_rate = clk_alpha_pll_postdiv_determine_rate,
.set_rate = clk_alpha_pll_postdiv_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_ops);
const struct clk_ops clk_alpha_pll_postdiv_ro_ops = {
- .round_rate = clk_alpha_pll_postdiv_round_ro_rate,
+ .determine_rate = clk_alpha_pll_postdiv_ro_determine_rate,
.recalc_rate = clk_alpha_pll_postdiv_recalc_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_ro_ops);
@@ -1534,7 +1552,7 @@ const struct clk_ops clk_alpha_pll_fabia_ops = {
.is_enabled = clk_alpha_pll_is_enabled,
.set_rate = alpha_pll_fabia_set_rate,
.recalc_rate = alpha_pll_fabia_recalc_rate,
- .round_rate = clk_alpha_pll_round_rate,
+ .determine_rate = clk_alpha_pll_determine_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_fabia_ops);
@@ -1543,7 +1561,7 @@ const struct clk_ops clk_alpha_pll_fixed_fabia_ops = {
.disable = alpha_pll_fabia_disable,
.is_enabled = clk_alpha_pll_is_enabled,
.recalc_rate = alpha_pll_fabia_recalc_rate,
- .round_rate = clk_alpha_pll_round_rate,
+ .determine_rate = clk_alpha_pll_determine_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_fixed_fabia_ops);
@@ -1594,14 +1612,16 @@ clk_trion_pll_postdiv_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
return (parent_rate / div);
}
-static long
-clk_trion_pll_postdiv_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_trion_pll_postdiv_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
- return divider_round_rate(hw, rate, prate, pll->post_div_table,
- pll->width, CLK_DIVIDER_ROUND_CLOSEST);
+ req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate,
+ pll->post_div_table,
+ pll->width, CLK_DIVIDER_ROUND_CLOSEST);
+
+ return 0;
};
static int
@@ -1627,18 +1647,21 @@ clk_trion_pll_postdiv_set_rate(struct clk_hw *hw, unsigned long rate,
const struct clk_ops clk_alpha_pll_postdiv_trion_ops = {
.recalc_rate = clk_trion_pll_postdiv_recalc_rate,
- .round_rate = clk_trion_pll_postdiv_round_rate,
+ .determine_rate = clk_trion_pll_postdiv_determine_rate,
.set_rate = clk_trion_pll_postdiv_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_trion_ops);
-static long clk_alpha_pll_postdiv_fabia_round_rate(struct clk_hw *hw,
- unsigned long rate, unsigned long *prate)
+static int clk_alpha_pll_postdiv_fabia_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
- return divider_round_rate(hw, rate, prate, pll->post_div_table,
- pll->width, CLK_DIVIDER_ROUND_CLOSEST);
+ req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate,
+ pll->post_div_table,
+ pll->width, CLK_DIVIDER_ROUND_CLOSEST);
+
+ return 0;
}
static int clk_alpha_pll_postdiv_fabia_set_rate(struct clk_hw *hw,
@@ -1673,7 +1696,7 @@ static int clk_alpha_pll_postdiv_fabia_set_rate(struct clk_hw *hw,
const struct clk_ops clk_alpha_pll_postdiv_fabia_ops = {
.recalc_rate = clk_alpha_pll_postdiv_fabia_recalc_rate,
- .round_rate = clk_alpha_pll_postdiv_fabia_round_rate,
+ .determine_rate = clk_alpha_pll_postdiv_fabia_determine_rate,
.set_rate = clk_alpha_pll_postdiv_fabia_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_fabia_ops);
@@ -1825,7 +1848,7 @@ const struct clk_ops clk_alpha_pll_trion_ops = {
.disable = clk_trion_pll_disable,
.is_enabled = clk_trion_pll_is_enabled,
.recalc_rate = clk_trion_pll_recalc_rate,
- .round_rate = clk_alpha_pll_round_rate,
+ .determine_rate = clk_alpha_pll_determine_rate,
.set_rate = alpha_pll_trion_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_trion_ops);
@@ -1836,14 +1859,14 @@ const struct clk_ops clk_alpha_pll_lucid_ops = {
.disable = clk_trion_pll_disable,
.is_enabled = clk_trion_pll_is_enabled,
.recalc_rate = clk_trion_pll_recalc_rate,
- .round_rate = clk_alpha_pll_round_rate,
+ .determine_rate = clk_alpha_pll_determine_rate,
.set_rate = alpha_pll_trion_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_lucid_ops);
const struct clk_ops clk_alpha_pll_postdiv_lucid_ops = {
.recalc_rate = clk_alpha_pll_postdiv_fabia_recalc_rate,
- .round_rate = clk_alpha_pll_postdiv_fabia_round_rate,
+ .determine_rate = clk_alpha_pll_postdiv_fabia_determine_rate,
.set_rate = clk_alpha_pll_postdiv_fabia_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_lucid_ops);
@@ -1895,7 +1918,7 @@ const struct clk_ops clk_alpha_pll_agera_ops = {
.disable = clk_alpha_pll_disable,
.is_enabled = clk_alpha_pll_is_enabled,
.recalc_rate = alpha_pll_fabia_recalc_rate,
- .round_rate = clk_alpha_pll_round_rate,
+ .determine_rate = clk_alpha_pll_determine_rate,
.set_rate = clk_alpha_pll_agera_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_agera_ops);
@@ -2111,7 +2134,7 @@ const struct clk_ops clk_alpha_pll_lucid_5lpe_ops = {
.disable = alpha_pll_lucid_5lpe_disable,
.is_enabled = clk_trion_pll_is_enabled,
.recalc_rate = clk_trion_pll_recalc_rate,
- .round_rate = clk_alpha_pll_round_rate,
+ .determine_rate = clk_alpha_pll_determine_rate,
.set_rate = alpha_pll_lucid_5lpe_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_lucid_5lpe_ops);
@@ -2121,13 +2144,13 @@ const struct clk_ops clk_alpha_pll_fixed_lucid_5lpe_ops = {
.disable = alpha_pll_lucid_5lpe_disable,
.is_enabled = clk_trion_pll_is_enabled,
.recalc_rate = clk_trion_pll_recalc_rate,
- .round_rate = clk_alpha_pll_round_rate,
+ .determine_rate = clk_alpha_pll_determine_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_fixed_lucid_5lpe_ops);
const struct clk_ops clk_alpha_pll_postdiv_lucid_5lpe_ops = {
.recalc_rate = clk_alpha_pll_postdiv_fabia_recalc_rate,
- .round_rate = clk_alpha_pll_postdiv_fabia_round_rate,
+ .determine_rate = clk_alpha_pll_postdiv_fabia_determine_rate,
.set_rate = clk_lucid_5lpe_pll_postdiv_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_lucid_5lpe_ops);
@@ -2296,7 +2319,7 @@ const struct clk_ops clk_alpha_pll_zonda_ops = {
.disable = clk_zonda_pll_disable,
.is_enabled = clk_trion_pll_is_enabled,
.recalc_rate = clk_trion_pll_recalc_rate,
- .round_rate = clk_alpha_pll_round_rate,
+ .determine_rate = clk_alpha_pll_determine_rate,
.set_rate = clk_zonda_pll_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_zonda_ops);
@@ -2521,13 +2544,13 @@ const struct clk_ops clk_alpha_pll_fixed_lucid_evo_ops = {
.disable = alpha_pll_lucid_evo_disable,
.is_enabled = clk_trion_pll_is_enabled,
.recalc_rate = alpha_pll_lucid_evo_recalc_rate,
- .round_rate = clk_alpha_pll_round_rate,
+ .determine_rate = clk_alpha_pll_determine_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_fixed_lucid_evo_ops);
const struct clk_ops clk_alpha_pll_postdiv_lucid_evo_ops = {
.recalc_rate = clk_alpha_pll_postdiv_fabia_recalc_rate,
- .round_rate = clk_alpha_pll_postdiv_fabia_round_rate,
+ .determine_rate = clk_alpha_pll_postdiv_fabia_determine_rate,
.set_rate = clk_lucid_evo_pll_postdiv_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_lucid_evo_ops);
@@ -2538,7 +2561,7 @@ const struct clk_ops clk_alpha_pll_lucid_evo_ops = {
.disable = alpha_pll_lucid_evo_disable,
.is_enabled = clk_trion_pll_is_enabled,
.recalc_rate = alpha_pll_lucid_evo_recalc_rate,
- .round_rate = clk_alpha_pll_round_rate,
+ .determine_rate = clk_alpha_pll_determine_rate,
.set_rate = alpha_pll_lucid_5lpe_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_lucid_evo_ops);
@@ -2549,7 +2572,7 @@ const struct clk_ops clk_alpha_pll_reset_lucid_evo_ops = {
.disable = alpha_pll_reset_lucid_evo_disable,
.is_enabled = clk_trion_pll_is_enabled,
.recalc_rate = alpha_pll_lucid_evo_recalc_rate,
- .round_rate = clk_alpha_pll_round_rate,
+ .determine_rate = clk_alpha_pll_determine_rate,
.set_rate = alpha_pll_lucid_5lpe_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_reset_lucid_evo_ops);
@@ -2724,22 +2747,25 @@ static unsigned long clk_rivian_evo_pll_recalc_rate(struct clk_hw *hw,
return parent_rate * l;
}
-static long clk_rivian_evo_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_rivian_evo_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
unsigned long min_freq, max_freq;
u32 l;
u64 a;
- rate = alpha_pll_round_rate(rate, *prate, &l, &a, 0);
- if (!pll->vco_table || alpha_pll_find_vco(pll, rate))
- return rate;
+ req->rate = alpha_pll_round_rate(req->rate, req->best_parent_rate, &l,
+ &a, 0);
+ if (!pll->vco_table || alpha_pll_find_vco(pll, req->rate))
+ return 0;
min_freq = pll->vco_table[0].min_freq;
max_freq = pll->vco_table[pll->num_vco - 1].max_freq;
- return clamp(rate, min_freq, max_freq);
+ req->rate = clamp(req->rate, min_freq, max_freq);
+
+ return 0;
}
const struct clk_ops clk_alpha_pll_rivian_evo_ops = {
@@ -2747,7 +2773,7 @@ const struct clk_ops clk_alpha_pll_rivian_evo_ops = {
.disable = alpha_pll_lucid_5lpe_disable,
.is_enabled = clk_trion_pll_is_enabled,
.recalc_rate = clk_rivian_evo_pll_recalc_rate,
- .round_rate = clk_rivian_evo_pll_round_rate,
+ .determine_rate = clk_rivian_evo_pll_determine_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_rivian_evo_ops);
@@ -2956,7 +2982,212 @@ const struct clk_ops clk_alpha_pll_regera_ops = {
.disable = clk_zonda_pll_disable,
.is_enabled = clk_alpha_pll_is_enabled,
.recalc_rate = clk_trion_pll_recalc_rate,
- .round_rate = clk_alpha_pll_round_rate,
+ .determine_rate = clk_alpha_pll_determine_rate,
.set_rate = clk_zonda_pll_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_regera_ops);
+
+void qcom_clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap)
+{
+ const struct clk_init_data *init = pll->clkr.hw.init;
+
+ switch (GET_PLL_TYPE(pll)) {
+ case CLK_ALPHA_PLL_TYPE_LUCID_OLE:
+ clk_lucid_ole_pll_configure(pll, regmap, pll->config);
+ break;
+ case CLK_ALPHA_PLL_TYPE_LUCID_EVO:
+ clk_lucid_evo_pll_configure(pll, regmap, pll->config);
+ break;
+ case CLK_ALPHA_PLL_TYPE_TAYCAN_ELU:
+ clk_taycan_elu_pll_configure(pll, regmap, pll->config);
+ break;
+ case CLK_ALPHA_PLL_TYPE_RIVIAN_EVO:
+ clk_rivian_evo_pll_configure(pll, regmap, pll->config);
+ break;
+ case CLK_ALPHA_PLL_TYPE_TRION:
+ clk_trion_pll_configure(pll, regmap, pll->config);
+ break;
+ case CLK_ALPHA_PLL_TYPE_HUAYRA_2290:
+ clk_huayra_2290_pll_configure(pll, regmap, pll->config);
+ break;
+ case CLK_ALPHA_PLL_TYPE_FABIA:
+ clk_fabia_pll_configure(pll, regmap, pll->config);
+ break;
+ case CLK_ALPHA_PLL_TYPE_AGERA:
+ clk_agera_pll_configure(pll, regmap, pll->config);
+ break;
+ case CLK_ALPHA_PLL_TYPE_PONGO_ELU:
+ clk_pongo_elu_pll_configure(pll, regmap, pll->config);
+ break;
+ case CLK_ALPHA_PLL_TYPE_ZONDA:
+ case CLK_ALPHA_PLL_TYPE_ZONDA_OLE:
+ clk_zonda_pll_configure(pll, regmap, pll->config);
+ break;
+ case CLK_ALPHA_PLL_TYPE_STROMER:
+ case CLK_ALPHA_PLL_TYPE_STROMER_PLUS:
+ clk_stromer_pll_configure(pll, regmap, pll->config);
+ break;
+ case CLK_ALPHA_PLL_TYPE_DEFAULT:
+ case CLK_ALPHA_PLL_TYPE_DEFAULT_EVO:
+ case CLK_ALPHA_PLL_TYPE_HUAYRA:
+ case CLK_ALPHA_PLL_TYPE_HUAYRA_APSS:
+ case CLK_ALPHA_PLL_TYPE_BRAMMO:
+ case CLK_ALPHA_PLL_TYPE_BRAMMO_EVO:
+ clk_alpha_pll_configure(pll, regmap, pll->config);
+ break;
+ default:
+ WARN(1, "%s: invalid pll type\n", init->name);
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(qcom_clk_alpha_pll_configure);
+
+static int clk_alpha_pll_slew_update(struct clk_alpha_pll *pll)
+{
+ u32 val;
+ int ret;
+
+ regmap_set_bits(pll->clkr.regmap, PLL_MODE(pll), PLL_UPDATE);
+ regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
+
+ ret = wait_for_pll_update(pll);
+ if (ret)
+ return ret;
+ /*
+ * Hardware programming mandates a wait of at least 570ns before polling the LOCK
+ * detect bit. Have a delay of 1us just to be safe.
+ */
+ udelay(1);
+
+ return wait_for_pll_enable_lock(pll);
+}
+
+static int clk_alpha_pll_slew_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ const struct pll_vco *curr_vco, *vco;
+ unsigned long freq_hz;
+ u64 a;
+ u32 l;
+
+ freq_hz = alpha_pll_round_rate(rate, parent_rate, &l, &a, ALPHA_REG_BITWIDTH);
+ if (freq_hz != rate) {
+ pr_err("alpha_pll: Call clk_set_rate with rounded rates!\n");
+ return -EINVAL;
+ }
+
+ curr_vco = alpha_pll_find_vco(pll, clk_hw_get_rate(hw));
+ if (!curr_vco) {
+ pr_err("alpha pll: not in a valid vco range\n");
+ return -EINVAL;
+ }
+
+ vco = alpha_pll_find_vco(pll, freq_hz);
+ if (!vco) {
+ pr_err("alpha pll: not in a valid vco range\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Dynamic pll update will not support switching frequencies across
+ * vco ranges. In those cases fall back to normal alpha set rate.
+ */
+ if (curr_vco->val != vco->val)
+ return clk_alpha_pll_set_rate(hw, rate, parent_rate);
+
+ clk_alpha_pll_update_configs(pll, NULL, l, a, ALPHA_REG_BITWIDTH, false);
+
+ /* Ensure that the write above goes before slewing the PLL */
+ mb();
+
+ if (clk_hw_is_enabled(hw))
+ return clk_alpha_pll_slew_update(pll);
+
+ return 0;
+}
+
+/*
+ * Slewing plls should be bought up at frequency which is in the middle of the
+ * desired VCO range. So after bringing up the pll at calibration freq, set it
+ * back to desired frequency(that was set by previous clk_set_rate).
+ */
+static int clk_alpha_pll_calibrate(struct clk_hw *hw)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ struct clk_hw *parent;
+ const struct pll_vco *vco;
+ unsigned long calibration_freq, freq_hz;
+ u64 a;
+ u32 l;
+ int rc;
+
+ parent = clk_hw_get_parent(hw);
+ if (!parent) {
+ pr_err("alpha pll: no valid parent found\n");
+ return -EINVAL;
+ }
+
+ vco = alpha_pll_find_vco(pll, clk_hw_get_rate(hw));
+ if (!vco) {
+ pr_err("alpha pll: not in a valid vco range\n");
+ return -EINVAL;
+ }
+
+ /*
+ * As during slewing plls vco_sel won't be allowed to change, vco table
+ * should have only one entry table, i.e. index = 0, find the
+ * calibration frequency.
+ */
+ calibration_freq = (pll->vco_table[0].min_freq + pll->vco_table[0].max_freq) / 2;
+
+ freq_hz = alpha_pll_round_rate(calibration_freq, clk_hw_get_rate(parent),
+ &l, &a, ALPHA_REG_BITWIDTH);
+ if (freq_hz != calibration_freq) {
+ pr_err("alpha_pll: call clk_set_rate with rounded rates!\n");
+ return -EINVAL;
+ }
+
+ clk_alpha_pll_update_configs(pll, vco, l, a, ALPHA_REG_BITWIDTH, false);
+
+ /* Bringup the pll at calibration frequency */
+ rc = clk_alpha_pll_enable(hw);
+ if (rc) {
+ pr_err("alpha pll calibration failed\n");
+ return rc;
+ }
+
+ /*
+ * PLL is already running at calibration frequency.
+ * So slew pll to the previously set frequency.
+ */
+ freq_hz = alpha_pll_round_rate(clk_hw_get_rate(hw),
+ clk_hw_get_rate(parent), &l, &a, ALPHA_REG_BITWIDTH);
+
+ pr_debug("pll %s: setting back to required rate %lu, freq_hz %ld\n",
+ clk_hw_get_name(hw), clk_hw_get_rate(hw), freq_hz);
+
+ clk_alpha_pll_update_configs(pll, NULL, l, a, ALPHA_REG_BITWIDTH, true);
+
+ return clk_alpha_pll_slew_update(pll);
+}
+
+static int clk_alpha_pll_slew_enable(struct clk_hw *hw)
+{
+ int rc;
+
+ rc = clk_alpha_pll_calibrate(hw);
+ if (rc)
+ return rc;
+
+ return clk_alpha_pll_enable(hw);
+}
+
+const struct clk_ops clk_alpha_pll_slew_ops = {
+ .enable = clk_alpha_pll_slew_enable,
+ .disable = clk_alpha_pll_disable,
+ .recalc_rate = clk_alpha_pll_recalc_rate,
+ .determine_rate = clk_alpha_pll_determine_rate,
+ .set_rate = clk_alpha_pll_slew_set_rate,
+};
+EXPORT_SYMBOL(clk_alpha_pll_slew_ops);
diff --git a/drivers/clk/qcom/clk-alpha-pll.h b/drivers/clk/qcom/clk-alpha-pll.h
index 79aca8525262..0903a05b18cc 100644
--- a/drivers/clk/qcom/clk-alpha-pll.h
+++ b/drivers/clk/qcom/clk-alpha-pll.h
@@ -29,6 +29,7 @@ enum {
CLK_ALPHA_PLL_TYPE_LUCID_OLE,
CLK_ALPHA_PLL_TYPE_PONGO_ELU,
CLK_ALPHA_PLL_TYPE_TAYCAN_ELU,
+ CLK_ALPHA_PLL_TYPE_TAYCAN_EKO_T = CLK_ALPHA_PLL_TYPE_TAYCAN_ELU,
CLK_ALPHA_PLL_TYPE_RIVIAN_EVO,
CLK_ALPHA_PLL_TYPE_DEFAULT_EVO,
CLK_ALPHA_PLL_TYPE_BRAMMO_EVO,
@@ -81,6 +82,7 @@ struct pll_vco {
* struct clk_alpha_pll - phase locked loop (PLL)
* @offset: base address of registers
* @regs: alpha pll register map (see @clk_alpha_pll_regs)
+ * @config: array of pll settings
* @vco_table: array of VCO settings
* @num_vco: number of VCO settings in @vco_table
* @flags: bitmask to indicate features supported by the hardware
@@ -90,6 +92,7 @@ struct clk_alpha_pll {
u32 offset;
const u8 *regs;
+ const struct alpha_pll_config *config;
const struct pll_vco *vco_table;
size_t num_vco;
#define SUPPORTS_OFFLINE_REQ BIT(0)
@@ -190,20 +193,24 @@ extern const struct clk_ops clk_alpha_pll_zonda_ops;
extern const struct clk_ops clk_alpha_pll_lucid_evo_ops;
#define clk_alpha_pll_taycan_elu_ops clk_alpha_pll_lucid_evo_ops
+#define clk_alpha_pll_taycan_eko_t_ops clk_alpha_pll_lucid_evo_ops
extern const struct clk_ops clk_alpha_pll_reset_lucid_evo_ops;
#define clk_alpha_pll_reset_lucid_ole_ops clk_alpha_pll_reset_lucid_evo_ops
extern const struct clk_ops clk_alpha_pll_fixed_lucid_evo_ops;
#define clk_alpha_pll_fixed_lucid_ole_ops clk_alpha_pll_fixed_lucid_evo_ops
#define clk_alpha_pll_fixed_taycan_elu_ops clk_alpha_pll_fixed_lucid_evo_ops
+#define clk_alpha_pll_fixed_taycan_eko_t_ops clk_alpha_pll_fixed_lucid_evo_ops
extern const struct clk_ops clk_alpha_pll_postdiv_lucid_evo_ops;
#define clk_alpha_pll_postdiv_lucid_ole_ops clk_alpha_pll_postdiv_lucid_evo_ops
#define clk_alpha_pll_postdiv_taycan_elu_ops clk_alpha_pll_postdiv_lucid_evo_ops
+#define clk_alpha_pll_postdiv_taycan_eko_t_ops clk_alpha_pll_postdiv_lucid_evo_ops
extern const struct clk_ops clk_alpha_pll_pongo_elu_ops;
extern const struct clk_ops clk_alpha_pll_rivian_evo_ops;
#define clk_alpha_pll_postdiv_rivian_evo_ops clk_alpha_pll_postdiv_fabia_ops
extern const struct clk_ops clk_alpha_pll_regera_ops;
+extern const struct clk_ops clk_alpha_pll_slew_ops;
void clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config);
@@ -230,6 +237,8 @@ void clk_pongo_elu_pll_configure(struct clk_alpha_pll *pll, struct regmap *regma
const struct alpha_pll_config *config);
#define clk_taycan_elu_pll_configure(pll, regmap, config) \
clk_lucid_evo_pll_configure(pll, regmap, config)
+#define clk_taycan_eko_t_pll_configure(pll, regmap, config) \
+ clk_lucid_evo_pll_configure(pll, regmap, config)
void clk_rivian_evo_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config);
@@ -237,5 +246,6 @@ void clk_stromer_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config);
void clk_regera_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config);
+void qcom_clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap);
#endif
diff --git a/drivers/clk/qcom/clk-branch.c b/drivers/clk/qcom/clk-branch.c
index 0f10090d4ae6..444e7d8648d4 100644
--- a/drivers/clk/qcom/clk-branch.c
+++ b/drivers/clk/qcom/clk-branch.c
@@ -142,8 +142,8 @@ static int clk_branch2_mem_enable(struct clk_hw *hw)
u32 val;
int ret;
- regmap_update_bits(branch.clkr.regmap, mem_br->mem_enable_reg,
- mem_br->mem_enable_ack_mask, mem_br->mem_enable_ack_mask);
+ regmap_assign_bits(branch.clkr.regmap, mem_br->mem_enable_reg,
+ mem_br->mem_enable_mask, !mem_br->mem_enable_invert);
ret = regmap_read_poll_timeout(branch.clkr.regmap, mem_br->mem_ack_reg,
val, val & mem_br->mem_enable_ack_mask, 0, 200);
@@ -159,8 +159,8 @@ static void clk_branch2_mem_disable(struct clk_hw *hw)
{
struct clk_mem_branch *mem_br = to_clk_mem_branch(hw);
- regmap_update_bits(mem_br->branch.clkr.regmap, mem_br->mem_enable_reg,
- mem_br->mem_enable_ack_mask, 0);
+ regmap_assign_bits(mem_br->branch.clkr.regmap, mem_br->mem_enable_reg,
+ mem_br->mem_enable_mask, mem_br->mem_enable_invert);
return clk_branch2_disable(hw);
}
diff --git a/drivers/clk/qcom/clk-branch.h b/drivers/clk/qcom/clk-branch.h
index 292756435f53..6bc2ba2b5350 100644
--- a/drivers/clk/qcom/clk-branch.h
+++ b/drivers/clk/qcom/clk-branch.h
@@ -44,6 +44,8 @@ struct clk_branch {
* @mem_enable_reg: branch clock memory gating register
* @mem_ack_reg: branch clock memory ack register
* @mem_enable_ack_mask: branch clock memory enable and ack field in @mem_ack_reg
+ * @mem_enable_mask: branch clock memory enable mask
+ * @mem_enable_invert: branch clock memory enable and disable has invert logic
* @branch: branch clock gating handle
*
* Clock which can gate its memories.
@@ -52,6 +54,8 @@ struct clk_mem_branch {
u32 mem_enable_reg;
u32 mem_ack_reg;
u32 mem_enable_ack_mask;
+ u32 mem_enable_mask;
+ bool mem_enable_invert;
struct clk_branch branch;
};
diff --git a/drivers/clk/qcom/clk-cbf-8996.c b/drivers/clk/qcom/clk-cbf-8996.c
index ce4efcd995ea..0b40ed601f9a 100644
--- a/drivers/clk/qcom/clk-cbf-8996.c
+++ b/drivers/clk/qcom/clk-cbf-8996.c
@@ -212,7 +212,6 @@ static const struct regmap_config cbf_msm8996_regmap_config = {
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x10000,
- .fast_io = true,
.val_format_endian = REGMAP_ENDIAN_LITTLE,
};
diff --git a/drivers/clk/qcom/clk-cpu-8996.c b/drivers/clk/qcom/clk-cpu-8996.c
index 72689448a653..21d13c0841ed 100644
--- a/drivers/clk/qcom/clk-cpu-8996.c
+++ b/drivers/clk/qcom/clk-cpu-8996.c
@@ -411,7 +411,6 @@ static const struct regmap_config cpu_msm8996_regmap_config = {
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x80210,
- .fast_io = true,
.val_format_endian = REGMAP_ENDIAN_LITTLE,
};
diff --git a/drivers/clk/qcom/clk-rcg.c b/drivers/clk/qcom/clk-rcg.c
index 987141c91fe0..31f0650b48ba 100644
--- a/drivers/clk/qcom/clk-rcg.c
+++ b/drivers/clk/qcom/clk-rcg.c
@@ -423,7 +423,7 @@ static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f,
rate = tmp;
}
} else {
- rate = clk_hw_get_rate(p);
+ rate = clk_hw_get_rate(p);
}
req->best_parent_hw = p;
req->best_parent_rate = rate;
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index 8001fd9faf9d..e18cb8807d73 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -201,7 +201,7 @@ __clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate, u32 cfg)
regmap_read(rcg->clkr.regmap, RCG_M_OFFSET(rcg), &m);
m &= mask;
regmap_read(rcg->clkr.regmap, RCG_N_OFFSET(rcg), &n);
- n = ~n;
+ n = ~n;
n &= mask;
n += m;
mode = cfg & CFG_MODE_MASK;
@@ -274,7 +274,7 @@ static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f,
rate = tmp;
}
} else {
- rate = clk_hw_get_rate(p);
+ rate = clk_hw_get_rate(p);
}
req->best_parent_hw = p;
req->best_parent_rate = rate;
@@ -311,7 +311,7 @@ __clk_rcg2_select_conf(struct clk_hw *hw, const struct freq_multi_tbl *f,
if (!p)
continue;
- parent_rate = clk_hw_get_rate(p);
+ parent_rate = clk_hw_get_rate(p);
rate = calc_rate(parent_rate, conf->n, conf->m, conf->n, conf->pre_div);
if (rate == req_rate) {
@@ -382,7 +382,7 @@ static int _freq_tbl_fm_determine_rate(struct clk_hw *hw, const struct freq_mult
rate = tmp;
}
} else {
- rate = clk_hw_get_rate(p);
+ rate = clk_hw_get_rate(p);
}
req->best_parent_hw = p;
diff --git a/drivers/clk/qcom/clk-regmap-divider.c b/drivers/clk/qcom/clk-regmap-divider.c
index 63c9fca0d65d..4f5395f0ab6d 100644
--- a/drivers/clk/qcom/clk-regmap-divider.c
+++ b/drivers/clk/qcom/clk-regmap-divider.c
@@ -15,8 +15,8 @@ static inline struct clk_regmap_div *to_clk_regmap_div(struct clk_hw *hw)
return container_of(to_clk_regmap(hw), struct clk_regmap_div, clkr);
}
-static long div_round_ro_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int div_ro_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_regmap_div *divider = to_clk_regmap_div(hw);
struct clk_regmap *clkr = &divider->clkr;
@@ -26,17 +26,24 @@ static long div_round_ro_rate(struct clk_hw *hw, unsigned long rate,
val >>= divider->shift;
val &= BIT(divider->width) - 1;
- return divider_ro_round_rate(hw, rate, prate, NULL, divider->width,
- CLK_DIVIDER_ROUND_CLOSEST, val);
+ req->rate = divider_ro_round_rate(hw, req->rate,
+ &req->best_parent_rate, NULL,
+ divider->width,
+ CLK_DIVIDER_ROUND_CLOSEST, val);
+
+ return 0;
}
-static long div_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int div_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
{
struct clk_regmap_div *divider = to_clk_regmap_div(hw);
- return divider_round_rate(hw, rate, prate, NULL, divider->width,
- CLK_DIVIDER_ROUND_CLOSEST);
+ req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate,
+ NULL,
+ divider->width,
+ CLK_DIVIDER_ROUND_CLOSEST);
+
+ return 0;
}
static int div_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -70,14 +77,14 @@ static unsigned long div_recalc_rate(struct clk_hw *hw,
}
const struct clk_ops clk_regmap_div_ops = {
- .round_rate = div_round_rate,
+ .determine_rate = div_determine_rate,
.set_rate = div_set_rate,
.recalc_rate = div_recalc_rate,
};
EXPORT_SYMBOL_GPL(clk_regmap_div_ops);
const struct clk_ops clk_regmap_div_ro_ops = {
- .round_rate = div_round_ro_rate,
+ .determine_rate = div_ro_determine_rate,
.recalc_rate = div_recalc_rate,
};
EXPORT_SYMBOL_GPL(clk_regmap_div_ro_ops);
diff --git a/drivers/clk/qcom/clk-rpm.c b/drivers/clk/qcom/clk-rpm.c
index ccc112c21667..be0145631197 100644
--- a/drivers/clk/qcom/clk-rpm.c
+++ b/drivers/clk/qcom/clk-rpm.c
@@ -351,15 +351,15 @@ static int clk_rpm_set_rate(struct clk_hw *hw,
return 0;
}
-static long clk_rpm_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_rpm_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
/*
* RPM handles rate rounding and we don't have a way to
* know what the rate will be, so just return whatever
* rate is requested.
*/
- return rate;
+ return 0;
}
static unsigned long clk_rpm_recalc_rate(struct clk_hw *hw,
@@ -383,7 +383,7 @@ static const struct clk_ops clk_rpm_xo_ops = {
static const struct clk_ops clk_rpm_fixed_ops = {
.prepare = clk_rpm_fixed_prepare,
.unprepare = clk_rpm_fixed_unprepare,
- .round_rate = clk_rpm_round_rate,
+ .determine_rate = clk_rpm_determine_rate,
.recalc_rate = clk_rpm_recalc_rate,
};
@@ -391,7 +391,7 @@ static const struct clk_ops clk_rpm_ops = {
.prepare = clk_rpm_prepare,
.unprepare = clk_rpm_unprepare,
.set_rate = clk_rpm_set_rate,
- .round_rate = clk_rpm_round_rate,
+ .determine_rate = clk_rpm_determine_rate,
.recalc_rate = clk_rpm_recalc_rate,
};
diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c
index 00fb3e53a388..1a98b3a0c528 100644
--- a/drivers/clk/qcom/clk-rpmh.c
+++ b/drivers/clk/qcom/clk-rpmh.c
@@ -87,7 +87,7 @@ static DEFINE_MUTEX(rpmh_clk_lock);
.hw.init = &(struct clk_init_data){ \
.ops = &clk_rpmh_ops, \
.name = #_name, \
- .parent_data = &(const struct clk_parent_data){ \
+ .parent_data = &(const struct clk_parent_data){ \
.fw_name = "xo", \
.name = "xo_board", \
}, \
@@ -105,7 +105,7 @@ static DEFINE_MUTEX(rpmh_clk_lock);
.hw.init = &(struct clk_init_data){ \
.ops = &clk_rpmh_ops, \
.name = #_name "_ao", \
- .parent_data = &(const struct clk_parent_data){ \
+ .parent_data = &(const struct clk_parent_data){ \
.fw_name = "xo", \
.name = "xo_board", \
}, \
@@ -182,7 +182,7 @@ static int clk_rpmh_send_aggregate_command(struct clk_rpmh *c)
}
c->last_sent_aggr_state = c->aggr_state;
- c->peer->last_sent_aggr_state = c->last_sent_aggr_state;
+ c->peer->last_sent_aggr_state = c->last_sent_aggr_state;
return 0;
}
@@ -321,10 +321,10 @@ static int clk_rpmh_bcm_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
-static long clk_rpmh_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_rpmh_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- return rate;
+ return 0;
}
static unsigned long clk_rpmh_bcm_recalc_rate(struct clk_hw *hw,
@@ -339,7 +339,7 @@ static const struct clk_ops clk_rpmh_bcm_ops = {
.prepare = clk_rpmh_bcm_prepare,
.unprepare = clk_rpmh_bcm_unprepare,
.set_rate = clk_rpmh_bcm_set_rate,
- .round_rate = clk_rpmh_round_rate,
+ .determine_rate = clk_rpmh_determine_rate,
.recalc_rate = clk_rpmh_bcm_recalc_rate,
};
@@ -386,8 +386,15 @@ DEFINE_CLK_RPMH_VRM(clk6, _a2, "clka6", 2);
DEFINE_CLK_RPMH_VRM(clk7, _a2, "clka7", 2);
DEFINE_CLK_RPMH_VRM(clk8, _a2, "clka8", 2);
+DEFINE_CLK_RPMH_VRM(clk7, _a4, "clka7", 4);
+
DEFINE_CLK_RPMH_VRM(div_clk1, _div2, "divclka1", 2);
+DEFINE_CLK_RPMH_VRM(clk3, _a, "C3A_E0", 1);
+DEFINE_CLK_RPMH_VRM(clk4, _a, "C4A_E0", 1);
+DEFINE_CLK_RPMH_VRM(clk5, _a, "C5A_E0", 1);
+DEFINE_CLK_RPMH_VRM(clk8, _a, "C8A_E0", 1);
+
DEFINE_CLK_RPMH_BCM(ce, "CE0");
DEFINE_CLK_RPMH_BCM(hwkm, "HK0");
DEFINE_CLK_RPMH_BCM(ipa, "IP0");
@@ -541,6 +548,29 @@ static const struct clk_rpmh_desc clk_rpmh_sc8180x = {
.num_clks = ARRAY_SIZE(sc8180x_rpmh_clocks),
};
+static struct clk_hw *milos_rpmh_clocks[] = {
+ [RPMH_CXO_CLK] = &clk_rpmh_bi_tcxo_div4.hw,
+ [RPMH_CXO_CLK_A] = &clk_rpmh_bi_tcxo_div4_ao.hw,
+ [RPMH_LN_BB_CLK2] = &clk_rpmh_clk7_a4.hw,
+ [RPMH_LN_BB_CLK2_A] = &clk_rpmh_clk7_a4_ao.hw,
+ /*
+ * RPMH_LN_BB_CLK3(_A) and RPMH_LN_BB_CLK4(_A) are marked as optional
+ * downstream, but do not exist in cmd-db on SM7635, so skip them.
+ */
+ [RPMH_RF_CLK1] = &clk_rpmh_clk1_a1.hw,
+ [RPMH_RF_CLK1_A] = &clk_rpmh_clk1_a1_ao.hw,
+ [RPMH_RF_CLK2] = &clk_rpmh_clk2_a1.hw,
+ [RPMH_RF_CLK2_A] = &clk_rpmh_clk2_a1_ao.hw,
+ [RPMH_RF_CLK3] = &clk_rpmh_clk3_a1.hw,
+ [RPMH_RF_CLK3_A] = &clk_rpmh_clk3_a1_ao.hw,
+ [RPMH_IPA_CLK] = &clk_rpmh_ipa.hw,
+};
+
+static const struct clk_rpmh_desc clk_rpmh_milos = {
+ .clks = milos_rpmh_clocks,
+ .num_clks = ARRAY_SIZE(milos_rpmh_clocks),
+};
+
static struct clk_hw *sm8250_rpmh_clocks[] = {
[RPMH_CXO_CLK] = &clk_rpmh_bi_tcxo_div2.hw,
[RPMH_CXO_CLK_A] = &clk_rpmh_bi_tcxo_div2_ao.hw,
@@ -825,6 +855,7 @@ static struct clk_hw *qcs615_rpmh_clocks[] = {
[RPMH_RF_CLK1_A] = &clk_rpmh_rf_clk1_a_ao.hw,
[RPMH_RF_CLK2] = &clk_rpmh_rf_clk2_a.hw,
[RPMH_RF_CLK2_A] = &clk_rpmh_rf_clk2_a_ao.hw,
+ [RPMH_IPA_CLK] = &clk_rpmh_ipa.hw,
};
static const struct clk_rpmh_desc clk_rpmh_qcs615 = {
@@ -854,6 +885,22 @@ static const struct clk_rpmh_desc clk_rpmh_sm8750 = {
.clka_optional = true,
};
+static struct clk_hw *glymur_rpmh_clocks[] = {
+ [RPMH_CXO_CLK] = &clk_rpmh_bi_tcxo_div2.hw,
+ [RPMH_CXO_CLK_A] = &clk_rpmh_bi_tcxo_div2_ao.hw,
+ [RPMH_RF_CLK3] = &clk_rpmh_clk3_a.hw,
+ [RPMH_RF_CLK3_A] = &clk_rpmh_clk3_a_ao.hw,
+ [RPMH_RF_CLK4] = &clk_rpmh_clk4_a.hw,
+ [RPMH_RF_CLK4_A] = &clk_rpmh_clk4_a_ao.hw,
+ [RPMH_RF_CLK5] = &clk_rpmh_clk5_a.hw,
+ [RPMH_RF_CLK5_A] = &clk_rpmh_clk5_a_ao.hw,
+};
+
+static const struct clk_rpmh_desc clk_rpmh_glymur = {
+ .clks = glymur_rpmh_clocks,
+ .num_clks = ARRAY_SIZE(glymur_rpmh_clocks),
+};
+
static struct clk_hw *of_clk_rpmh_hw_get(struct of_phandle_args *clkspec,
void *data)
{
@@ -943,6 +990,8 @@ static int clk_rpmh_probe(struct platform_device *pdev)
}
static const struct of_device_id clk_rpmh_match_table[] = {
+ { .compatible = "qcom,glymur-rpmh-clk", .data = &clk_rpmh_glymur},
+ { .compatible = "qcom,milos-rpmh-clk", .data = &clk_rpmh_milos},
{ .compatible = "qcom,qcs615-rpmh-clk", .data = &clk_rpmh_qcs615},
{ .compatible = "qcom,qdu1000-rpmh-clk", .data = &clk_rpmh_qdu1000},
{ .compatible = "qcom,sa8775p-rpmh-clk", .data = &clk_rpmh_sa8775p},
diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c
index 3fbaa646286f..103db984a40b 100644
--- a/drivers/clk/qcom/clk-smd-rpm.c
+++ b/drivers/clk/qcom/clk-smd-rpm.c
@@ -30,7 +30,7 @@
.hw.init = &(struct clk_init_data){ \
.ops = &clk_smd_rpm_ops, \
.name = #_name, \
- .parent_data = &(const struct clk_parent_data){ \
+ .parent_data = &(const struct clk_parent_data){ \
.fw_name = "xo", \
.name = "xo_board", \
}, \
@@ -47,7 +47,7 @@
.hw.init = &(struct clk_init_data){ \
.ops = &clk_smd_rpm_ops, \
.name = #_active, \
- .parent_data = &(const struct clk_parent_data){ \
+ .parent_data = &(const struct clk_parent_data){ \
.fw_name = "xo", \
.name = "xo_board", \
}, \
@@ -74,7 +74,7 @@
.hw.init = &(struct clk_init_data){ \
.ops = &clk_smd_rpm_branch_ops, \
.name = #_name, \
- .parent_data = &(const struct clk_parent_data){ \
+ .parent_data = &(const struct clk_parent_data){ \
.fw_name = "xo", \
.name = "xo_board", \
}, \
@@ -92,7 +92,7 @@
.hw.init = &(struct clk_init_data){ \
.ops = &clk_smd_rpm_branch_ops, \
.name = #_active, \
- .parent_data = &(const struct clk_parent_data){ \
+ .parent_data = &(const struct clk_parent_data){ \
.fw_name = "xo", \
.name = "xo_board", \
}, \
@@ -370,15 +370,15 @@ static int clk_smd_rpm_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
-static long clk_smd_rpm_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_smd_rpm_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
/*
* RPM handles rate rounding and we don't have a way to
* know what the rate will be, so just return whatever
* rate is requested.
*/
- return rate;
+ return 0;
}
static unsigned long clk_smd_rpm_recalc_rate(struct clk_hw *hw,
@@ -427,7 +427,7 @@ static const struct clk_ops clk_smd_rpm_ops = {
.prepare = clk_smd_rpm_prepare,
.unprepare = clk_smd_rpm_unprepare,
.set_rate = clk_smd_rpm_set_rate,
- .round_rate = clk_smd_rpm_round_rate,
+ .determine_rate = clk_smd_rpm_determine_rate,
.recalc_rate = clk_smd_rpm_recalc_rate,
};
diff --git a/drivers/clk/qcom/clk-spmi-pmic-div.c b/drivers/clk/qcom/clk-spmi-pmic-div.c
index 41a0a4f3b4fb..3e2ac6745325 100644
--- a/drivers/clk/qcom/clk-spmi-pmic-div.c
+++ b/drivers/clk/qcom/clk-spmi-pmic-div.c
@@ -112,16 +112,18 @@ static void clk_spmi_pmic_div_disable(struct clk_hw *hw)
spin_unlock_irqrestore(&clkdiv->lock, flags);
}
-static long clk_spmi_pmic_div_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_spmi_pmic_div_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
unsigned int div, div_factor;
- div = DIV_ROUND_UP(*parent_rate, rate);
+ div = DIV_ROUND_UP(req->best_parent_rate, req->rate);
div_factor = div_to_div_factor(div);
div = div_factor_to_div(div_factor);
- return *parent_rate / div;
+ req->rate = req->best_parent_rate / div;
+
+ return 0;
}
static unsigned long
@@ -169,7 +171,7 @@ static const struct clk_ops clk_spmi_pmic_div_ops = {
.disable = clk_spmi_pmic_div_disable,
.set_rate = clk_spmi_pmic_div_set_rate,
.recalc_rate = clk_spmi_pmic_div_recalc_rate,
- .round_rate = clk_spmi_pmic_div_round_rate,
+ .determine_rate = clk_spmi_pmic_div_determine_rate,
};
struct spmi_pmic_div_clk_cc {
diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c
index 9e3380fd7181..121591886774 100644
--- a/drivers/clk/qcom/common.c
+++ b/drivers/clk/qcom/common.c
@@ -9,10 +9,13 @@
#include <linux/platform_device.h>
#include <linux/clk-provider.h>
#include <linux/interconnect-clk.h>
+#include <linux/pm_runtime.h>
#include <linux/reset-controller.h>
#include <linux/of.h>
#include "common.h"
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
#include "clk-rcg.h"
#include "clk-regmap.h"
#include "reset.h"
@@ -274,8 +277,8 @@ static int qcom_cc_icc_register(struct device *dev,
icd[i].slave_id = desc->icc_hws[i].slave_id;
hws = &desc->clks[desc->icc_hws[i].clk_id]->hw;
icd[i].clk = devm_clk_hw_get_clk(dev, hws, "icc");
- if (!icd[i].clk)
- return dev_err_probe(dev, -ENOENT,
+ if (IS_ERR(icd[i].clk))
+ return dev_err_probe(dev, PTR_ERR(icd[i].clk),
"(%d) clock entry is null\n", i);
icd[i].name = clk_hw_get_name(hws);
}
@@ -284,6 +287,40 @@ static int qcom_cc_icc_register(struct device *dev,
desc->num_icc_hws, icd);
}
+static int qcom_cc_clk_pll_configure(const struct qcom_cc_driver_data *data,
+ struct regmap *regmap)
+{
+ const struct clk_init_data *init;
+ struct clk_alpha_pll *pll;
+ int i;
+
+ for (i = 0; i < data->num_alpha_plls; i++) {
+ pll = data->alpha_plls[i];
+ init = pll->clkr.hw.init;
+
+ if (!pll->config || !pll->regs) {
+ pr_err("%s: missing pll config or regs\n", init->name);
+ return -EINVAL;
+ }
+
+ qcom_clk_alpha_pll_configure(pll, regmap);
+ }
+
+ return 0;
+}
+
+static void qcom_cc_clk_regs_configure(struct device *dev, const struct qcom_cc_driver_data *data,
+ struct regmap *regmap)
+{
+ int i;
+
+ for (i = 0; i < data->num_clk_cbcrs; i++)
+ qcom_branch_set_clk_en(regmap, data->clk_cbcrs[i]);
+
+ if (data->clk_regs_configure)
+ data->clk_regs_configure(dev, regmap);
+}
+
int qcom_cc_really_probe(struct device *dev,
const struct qcom_cc_desc *desc, struct regmap *regmap)
{
@@ -304,6 +341,24 @@ int qcom_cc_really_probe(struct device *dev,
if (ret < 0 && ret != -EEXIST)
return ret;
+ if (desc->use_rpm) {
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ return ret;
+ }
+
+ if (desc->driver_data) {
+ ret = qcom_cc_clk_pll_configure(desc->driver_data, regmap);
+ if (ret)
+ goto put_rpm;
+
+ qcom_cc_clk_regs_configure(dev, desc->driver_data, regmap);
+ }
+
reset = &cc->reset;
reset->rcdev.of_node = dev->of_node;
reset->rcdev.ops = &qcom_reset_ops;
@@ -314,23 +369,35 @@ int qcom_cc_really_probe(struct device *dev,
ret = devm_reset_controller_register(dev, &reset->rcdev);
if (ret)
- return ret;
+ goto put_rpm;
if (desc->gdscs && desc->num_gdscs) {
scd = devm_kzalloc(dev, sizeof(*scd), GFP_KERNEL);
- if (!scd)
- return -ENOMEM;
+ if (!scd) {
+ ret = -ENOMEM;
+ goto put_rpm;
+ }
scd->dev = dev;
scd->scs = desc->gdscs;
scd->num = desc->num_gdscs;
scd->pd_list = cc->pd_list;
ret = gdsc_register(scd, &reset->rcdev, regmap);
if (ret)
- return ret;
+ goto put_rpm;
ret = devm_add_action_or_reset(dev, qcom_cc_gdsc_unregister,
scd);
if (ret)
- return ret;
+ goto put_rpm;
+ }
+
+ if (desc->driver_data &&
+ desc->driver_data->dfs_rcgs &&
+ desc->driver_data->num_dfs_rcgs) {
+ ret = qcom_cc_register_rcg_dfs(regmap,
+ desc->driver_data->dfs_rcgs,
+ desc->driver_data->num_dfs_rcgs);
+ if (ret)
+ goto put_rpm;
}
cc->rclks = rclks;
@@ -341,7 +408,7 @@ int qcom_cc_really_probe(struct device *dev,
for (i = 0; i < num_clk_hws; i++) {
ret = devm_clk_hw_register(dev, clk_hws[i]);
if (ret)
- return ret;
+ goto put_rpm;
}
for (i = 0; i < num_clks; i++) {
@@ -350,14 +417,20 @@ int qcom_cc_really_probe(struct device *dev,
ret = devm_clk_register_regmap(dev, rclks[i]);
if (ret)
- return ret;
+ goto put_rpm;
}
ret = devm_of_clk_add_hw_provider(dev, qcom_cc_clk_hw_get, cc);
if (ret)
- return ret;
+ goto put_rpm;
+
+ ret = qcom_cc_icc_register(dev, desc);
+
+put_rpm:
+ if (desc->use_rpm)
+ pm_runtime_put(dev);
- return qcom_cc_icc_register(dev, desc);
+ return ret;
}
EXPORT_SYMBOL_GPL(qcom_cc_really_probe);
diff --git a/drivers/clk/qcom/common.h b/drivers/clk/qcom/common.h
index 7ace5d7f5836..953c91f7b145 100644
--- a/drivers/clk/qcom/common.h
+++ b/drivers/clk/qcom/common.h
@@ -25,6 +25,16 @@ struct qcom_icc_hws_data {
int clk_id;
};
+struct qcom_cc_driver_data {
+ struct clk_alpha_pll **alpha_plls;
+ size_t num_alpha_plls;
+ u32 *clk_cbcrs;
+ size_t num_clk_cbcrs;
+ const struct clk_rcg_dfs_data *dfs_rcgs;
+ size_t num_dfs_rcgs;
+ void (*clk_regs_configure)(struct device *dev, struct regmap *regmap);
+};
+
struct qcom_cc_desc {
const struct regmap_config *config;
struct clk_regmap **clks;
@@ -38,6 +48,8 @@ struct qcom_cc_desc {
const struct qcom_icc_hws_data *icc_hws;
size_t num_icc_hws;
unsigned int icc_first_node_id;
+ bool use_rpm;
+ struct qcom_cc_driver_data *driver_data;
};
/**
diff --git a/drivers/clk/qcom/dispcc-glymur.c b/drivers/clk/qcom/dispcc-glymur.c
new file mode 100644
index 000000000000..5203fa6383f6
--- /dev/null
+++ b/drivers/clk/qcom/dispcc-glymur.c
@@ -0,0 +1,1982 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025, Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,glymur-dispcc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_BI_TCXO,
+ DT_SLEEP_CLK,
+ DT_DP0_PHY_PLL_LINK_CLK,
+ DT_DP0_PHY_PLL_VCO_DIV_CLK,
+ DT_DP1_PHY_PLL_LINK_CLK,
+ DT_DP1_PHY_PLL_VCO_DIV_CLK,
+ DT_DP2_PHY_PLL_LINK_CLK,
+ DT_DP2_PHY_PLL_VCO_DIV_CLK,
+ DT_DP3_PHY_PLL_LINK_CLK,
+ DT_DP3_PHY_PLL_VCO_DIV_CLK,
+ DT_DSI0_PHY_PLL_OUT_BYTECLK,
+ DT_DSI0_PHY_PLL_OUT_DSICLK,
+ DT_DSI1_PHY_PLL_OUT_BYTECLK,
+ DT_DSI1_PHY_PLL_OUT_DSICLK,
+ DT_STANDALONE_PHY_PLL0_LINK_CLK,
+ DT_STANDALONE_PHY_PLL0_VCO_DIV_CLK,
+ DT_STANDALONE_PHY_PLL1_LINK_CLK,
+ DT_STANDALONE_PHY_PLL1_VCO_DIV_CLK,
+};
+
+enum {
+ P_BI_TCXO,
+ P_SLEEP_CLK,
+ P_DISP_CC_PLL0_OUT_MAIN,
+ P_DISP_CC_PLL1_OUT_EVEN,
+ P_DISP_CC_PLL1_OUT_MAIN,
+ P_DP0_PHY_PLL_LINK_CLK,
+ P_DP0_PHY_PLL_VCO_DIV_CLK,
+ P_DP1_PHY_PLL_LINK_CLK,
+ P_DP1_PHY_PLL_VCO_DIV_CLK,
+ P_DP2_PHY_PLL_LINK_CLK,
+ P_DP2_PHY_PLL_VCO_DIV_CLK,
+ P_DP3_PHY_PLL_LINK_CLK,
+ P_DP3_PHY_PLL_VCO_DIV_CLK,
+ P_DSI0_PHY_PLL_OUT_BYTECLK,
+ P_DSI0_PHY_PLL_OUT_DSICLK,
+ P_DSI1_PHY_PLL_OUT_BYTECLK,
+ P_DSI1_PHY_PLL_OUT_DSICLK,
+ P_STANDALONE_PHY_PLL0_LINK_CLK,
+ P_STANDALONE_PHY_PLL0_VCO_DIV_CLK,
+ P_STANDALONE_PHY_PLL1_LINK_CLK,
+ P_STANDALONE_PHY_PLL1_VCO_DIV_CLK,
+};
+
+static const struct pll_vco taycan_eko_t_vco[] = {
+ { 249600000, 2500000000, 0 },
+};
+
+/* 257.142858 MHz Configuration */
+static const struct alpha_pll_config disp_cc_pll0_config = {
+ .l = 0xd,
+ .alpha = 0x6492,
+ .config_ctl_val = 0x25c400e7,
+ .config_ctl_hi_val = 0x0a8060e0,
+ .config_ctl_hi1_val = 0xf51dea20,
+ .user_ctl_val = 0x00000008,
+ .user_ctl_hi_val = 0x00000002,
+};
+
+static struct clk_alpha_pll disp_cc_pll0 = {
+ .offset = 0x0,
+ .config = &disp_cc_pll0_config,
+ .vco_table = taycan_eko_t_vco,
+ .num_vco = ARRAY_SIZE(taycan_eko_t_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_EKO_T],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_taycan_eko_t_ops,
+ },
+ },
+};
+
+/* 600.0 MHz Configuration */
+static const struct alpha_pll_config disp_cc_pll1_config = {
+ .l = 0x1f,
+ .alpha = 0x4000,
+ .config_ctl_val = 0x25c400e7,
+ .config_ctl_hi_val = 0x0a8060e0,
+ .config_ctl_hi1_val = 0xf51dea20,
+ .user_ctl_val = 0x00000008,
+ .user_ctl_hi_val = 0x00000002,
+};
+
+static struct clk_alpha_pll disp_cc_pll1 = {
+ .offset = 0x1000,
+ .config = &disp_cc_pll1_config,
+ .vco_table = taycan_eko_t_vco,
+ .num_vco = ARRAY_SIZE(taycan_eko_t_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_EKO_T],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_pll1",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_taycan_eko_t_ops,
+ },
+ },
+};
+
+static const struct parent_map disp_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_STANDALONE_PHY_PLL0_VCO_DIV_CLK, 1 },
+ { P_DP0_PHY_PLL_VCO_DIV_CLK, 2 },
+ { P_DP3_PHY_PLL_VCO_DIV_CLK, 3 },
+ { P_DP1_PHY_PLL_VCO_DIV_CLK, 4 },
+ { P_STANDALONE_PHY_PLL1_VCO_DIV_CLK, 5 },
+ { P_DP2_PHY_PLL_VCO_DIV_CLK, 6 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_STANDALONE_PHY_PLL0_VCO_DIV_CLK },
+ { .index = DT_DP0_PHY_PLL_VCO_DIV_CLK },
+ { .index = DT_DP3_PHY_PLL_VCO_DIV_CLK },
+ { .index = DT_DP1_PHY_PLL_VCO_DIV_CLK },
+ { .index = DT_STANDALONE_PHY_PLL1_VCO_DIV_CLK },
+ { .index = DT_DP2_PHY_PLL_VCO_DIV_CLK },
+};
+
+static const struct parent_map disp_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map disp_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_DSICLK, 1 },
+ { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 },
+ { P_DSI1_PHY_PLL_OUT_DSICLK, 3 },
+ { P_DSI1_PHY_PLL_OUT_BYTECLK, 4 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DSI0_PHY_PLL_OUT_DSICLK },
+ { .index = DT_DSI0_PHY_PLL_OUT_BYTECLK },
+ { .index = DT_DSI1_PHY_PLL_OUT_DSICLK },
+ { .index = DT_DSI1_PHY_PLL_OUT_BYTECLK },
+};
+
+static const struct parent_map disp_cc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_DP0_PHY_PLL_LINK_CLK, 1 },
+ { P_DP1_PHY_PLL_LINK_CLK, 2 },
+ { P_DP2_PHY_PLL_LINK_CLK, 3 },
+ { P_DP3_PHY_PLL_LINK_CLK, 4 },
+ { P_STANDALONE_PHY_PLL1_LINK_CLK, 5 },
+ { P_STANDALONE_PHY_PLL0_LINK_CLK, 6 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DP0_PHY_PLL_LINK_CLK },
+ { .index = DT_DP1_PHY_PLL_LINK_CLK },
+ { .index = DT_DP2_PHY_PLL_LINK_CLK },
+ { .index = DT_DP3_PHY_PLL_LINK_CLK },
+ { .index = DT_STANDALONE_PHY_PLL1_LINK_CLK },
+ { .index = DT_STANDALONE_PHY_PLL0_LINK_CLK },
+};
+
+static const struct parent_map disp_cc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_DSICLK, 1 },
+ { P_DSI1_PHY_PLL_OUT_DSICLK, 3 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DSI0_PHY_PLL_OUT_DSICLK },
+ { .index = DT_DSI1_PHY_PLL_OUT_DSICLK },
+};
+
+static const struct parent_map disp_cc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 },
+ { P_DSI1_PHY_PLL_OUT_BYTECLK, 4 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_5[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DSI0_PHY_PLL_OUT_BYTECLK },
+ { .index = DT_DSI1_PHY_PLL_OUT_BYTECLK },
+};
+
+static const struct parent_map disp_cc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_DISP_CC_PLL1_OUT_MAIN, 4 },
+ { P_DISP_CC_PLL1_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_6[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &disp_cc_pll1.clkr.hw },
+ { .hw = &disp_cc_pll1.clkr.hw },
+};
+
+static const struct parent_map disp_cc_parent_map_7[] = {
+ { P_BI_TCXO, 0 },
+ { P_DISP_CC_PLL0_OUT_MAIN, 1 },
+ { P_DISP_CC_PLL1_OUT_MAIN, 4 },
+ { P_DISP_CC_PLL1_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_7[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &disp_cc_pll0.clkr.hw },
+ { .hw = &disp_cc_pll1.clkr.hw },
+ { .hw = &disp_cc_pll1.clkr.hw },
+};
+
+static const struct parent_map disp_cc_parent_map_8[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_8[] = {
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map disp_cc_parent_map_9[] = {
+ { P_SLEEP_CLK, 0 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_9[] = {
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct freq_tbl ftbl_disp_cc_esync0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_esync0_clk_src = {
+ .cmd_rcgr = 0x80c0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_4,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_esync0_clk_src",
+ .parent_data = disp_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_esync1_clk_src = {
+ .cmd_rcgr = 0x80d8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_4,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_esync1_clk_src",
+ .parent_data = disp_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(37500000, P_DISP_CC_PLL1_OUT_MAIN, 16, 0, 0),
+ F(75000000, P_DISP_CC_PLL1_OUT_MAIN, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_ahb_clk_src = {
+ .cmd_rcgr = 0x8360,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_6,
+ .freq_tbl = ftbl_disp_cc_mdss_ahb_clk_src,
+ .hw_clk_ctrl = true,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_ahb_clk_src",
+ .parent_data = disp_cc_parent_data_6,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_6),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_byte0_clk_src = {
+ .cmd_rcgr = 0x8180,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_byte1_clk_src = {
+ .cmd_rcgr = 0x819c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte1_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx0_aux_clk_src = {
+ .cmd_rcgr = 0x8234,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_aux_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx0_link_clk_src = {
+ .cmd_rcgr = 0x81e8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx0_pixel0_clk_src = {
+ .cmd_rcgr = 0x8204,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_pixel0_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx0_pixel1_clk_src = {
+ .cmd_rcgr = 0x821c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_pixel1_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx1_aux_clk_src = {
+ .cmd_rcgr = 0x8298,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_aux_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx1_link_clk_src = {
+ .cmd_rcgr = 0x827c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_link_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx1_pixel0_clk_src = {
+ .cmd_rcgr = 0x824c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_pixel0_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx1_pixel1_clk_src = {
+ .cmd_rcgr = 0x8264,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_pixel1_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx2_aux_clk_src = {
+ .cmd_rcgr = 0x82fc,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_aux_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx2_link_clk_src = {
+ .cmd_rcgr = 0x82b0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_link_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx2_pixel0_clk_src = {
+ .cmd_rcgr = 0x82cc,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_pixel0_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx2_pixel1_clk_src = {
+ .cmd_rcgr = 0x82e4,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_pixel1_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx3_aux_clk_src = {
+ .cmd_rcgr = 0x8348,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_aux_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx3_link_clk_src = {
+ .cmd_rcgr = 0x832c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_link_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx3_pixel0_clk_src = {
+ .cmd_rcgr = 0x8314,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_pixel0_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_esc0_clk_src = {
+ .cmd_rcgr = 0x81b8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_5,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_esc0_clk_src",
+ .parent_data = disp_cc_parent_data_5,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_esc1_clk_src = {
+ .cmd_rcgr = 0x81d0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_5,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_esc1_clk_src",
+ .parent_data = disp_cc_parent_data_5,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(85714286, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(100000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(150000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(156000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(205000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(337000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(417000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(532000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(600000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(660000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(717000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_mdp_clk_src = {
+ .cmd_rcgr = 0x8150,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_7,
+ .freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src,
+ .hw_clk_ctrl = true,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_clk_src",
+ .parent_data = disp_cc_parent_data_7,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_7),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_pclk0_clk_src = {
+ .cmd_rcgr = 0x8108,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk0_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_pixel_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_pclk1_clk_src = {
+ .cmd_rcgr = 0x8120,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk1_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_pixel_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_pclk2_clk_src = {
+ .cmd_rcgr = 0x8138,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk2_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_pixel_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_vsync_clk_src = {
+ .cmd_rcgr = 0x8168,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_vsync_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_osc_clk_src = {
+ .cmd_rcgr = 0x80f0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_8,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_osc_clk_src",
+ .parent_data = disp_cc_parent_data_8,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_8),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_sleep_clk_src[] = {
+ F(32000, P_SLEEP_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_sleep_clk_src = {
+ .cmd_rcgr = 0xe064,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_9,
+ .freq_tbl = ftbl_disp_cc_sleep_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_sleep_clk_src",
+ .parent_data = disp_cc_parent_data_9,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_9),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_xo_clk_src = {
+ .cmd_rcgr = 0xe044,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_xo_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_byte0_div_clk_src = {
+ .reg = 0x8198,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_byte1_div_clk_src = {
+ .reg = 0x81b4,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte1_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dptx0_link_div_clk_src = {
+ .reg = 0x8200,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dptx0_link_dpin_div_clk_src = {
+ .reg = 0x838c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_dpin_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dptx1_link_div_clk_src = {
+ .reg = 0x8294,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_link_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dptx1_link_dpin_div_clk_src = {
+ .reg = 0x8390,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_link_dpin_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dptx2_link_div_clk_src = {
+ .reg = 0x82c8,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_link_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dptx2_link_dpin_div_clk_src = {
+ .reg = 0x8394,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_link_dpin_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dptx3_link_div_clk_src = {
+ .reg = 0x8344,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_link_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dptx3_link_dpin_div_clk_src = {
+ .reg = 0x8398,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_link_dpin_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch disp_cc_esync0_clk = {
+ .halt_reg = 0x80b8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80b8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_esync0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_esync0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_esync1_clk = {
+ .halt_reg = 0x80bc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80bc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_esync1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_esync1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_accu_shift_clk = {
+ .halt_reg = 0xe060,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xe060,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_accu_shift_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_ahb1_clk = {
+ .halt_reg = 0xa028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_ahb1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_ahb_clk = {
+ .halt_reg = 0x80b0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80b0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte0_clk = {
+ .halt_reg = 0x8034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte0_intf_clk = {
+ .halt_reg = 0x8038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte0_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte1_clk = {
+ .halt_reg = 0x803c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x803c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte1_intf_clk = {
+ .halt_reg = 0x8040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte1_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte1_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_aux_clk = {
+ .halt_reg = 0x8064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_link_clk = {
+ .halt_reg = 0x804c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x804c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_link_dpin_clk = {
+ .halt_reg = 0x837c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x837c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_dpin_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_dpin_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_link_intf_clk = {
+ .halt_reg = 0x8054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_pixel0_clk = {
+ .halt_reg = 0x805c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x805c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_pixel0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_pixel0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_pixel1_clk = {
+ .halt_reg = 0x8060,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8060,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_pixel1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_pixel1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_usb_router_link_intf_clk = {
+ .halt_reg = 0x8050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8050,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_usb_router_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_aux_clk = {
+ .halt_reg = 0x8080,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8080,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_link_clk = {
+ .halt_reg = 0x8070,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8070,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_link_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_link_dpin_clk = {
+ .halt_reg = 0x8380,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8380,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_link_dpin_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_link_dpin_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_link_intf_clk = {
+ .halt_reg = 0x8078,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8078,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_pixel0_clk = {
+ .halt_reg = 0x8068,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_pixel0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_pixel0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_pixel1_clk = {
+ .halt_reg = 0x806c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x806c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_pixel1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_pixel1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_usb_router_link_intf_clk = {
+ .halt_reg = 0x8074,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8074,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_usb_router_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_aux_clk = {
+ .halt_reg = 0x8098,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8098,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_link_clk = {
+ .halt_reg = 0x808c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x808c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_link_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_link_dpin_clk = {
+ .halt_reg = 0x8384,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8384,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_link_dpin_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_link_dpin_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_link_intf_clk = {
+ .halt_reg = 0x8090,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8090,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_pixel0_clk = {
+ .halt_reg = 0x8084,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8084,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_pixel0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_pixel0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_pixel1_clk = {
+ .halt_reg = 0x8088,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8088,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_pixel1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_pixel1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_usb_router_link_intf_clk = {
+ .halt_reg = 0x8378,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8378,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_usb_router_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx3_aux_clk = {
+ .halt_reg = 0x80a8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80a8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx3_link_clk = {
+ .halt_reg = 0x80a0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80a0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_link_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx3_link_dpin_clk = {
+ .halt_reg = 0x8388,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8388,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_link_dpin_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_link_dpin_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx3_link_intf_clk = {
+ .halt_reg = 0x80a4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx3_pixel0_clk = {
+ .halt_reg = 0x809c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x809c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_pixel0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_pixel0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_esc0_clk = {
+ .halt_reg = 0x8044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_esc0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_esc0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_esc1_clk = {
+ .halt_reg = 0x8048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_esc1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_esc1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp1_clk = {
+ .halt_reg = 0xa004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_clk = {
+ .halt_reg = 0x8010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_lut1_clk = {
+ .halt_reg = 0xa014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xa014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_lut1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_lut_clk = {
+ .halt_reg = 0x8020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x8020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_lut_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_non_gdsc_ahb_clk = {
+ .halt_reg = 0xc004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xc004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_non_gdsc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_pclk0_clk = {
+ .halt_reg = 0x8004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_pclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_pclk1_clk = {
+ .halt_reg = 0x8008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_pclk1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_pclk2_clk = {
+ .halt_reg = 0x800c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x800c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_pclk2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_rscc_ahb_clk = {
+ .halt_reg = 0xc00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_rscc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_rscc_vsync_clk = {
+ .halt_reg = 0xc008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_rscc_vsync_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_vsync1_clk = {
+ .halt_reg = 0xa024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_vsync1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_vsync_clk = {
+ .halt_reg = 0x8030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_vsync_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_osc_clk = {
+ .halt_reg = 0x80b4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80b4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_osc_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_osc_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc disp_cc_mdss_core_gdsc = {
+ .gdscr = 0x9000,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "disp_cc_mdss_core_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = HW_CTRL | POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc disp_cc_mdss_core_int2_gdsc = {
+ .gdscr = 0xb000,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "disp_cc_mdss_core_int2_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = HW_CTRL | POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct clk_regmap *disp_cc_glymur_clocks[] = {
+ [DISP_CC_ESYNC0_CLK] = &disp_cc_esync0_clk.clkr,
+ [DISP_CC_ESYNC0_CLK_SRC] = &disp_cc_esync0_clk_src.clkr,
+ [DISP_CC_ESYNC1_CLK] = &disp_cc_esync1_clk.clkr,
+ [DISP_CC_ESYNC1_CLK_SRC] = &disp_cc_esync1_clk_src.clkr,
+ [DISP_CC_MDSS_ACCU_SHIFT_CLK] = &disp_cc_mdss_accu_shift_clk.clkr,
+ [DISP_CC_MDSS_AHB1_CLK] = &disp_cc_mdss_ahb1_clk.clkr,
+ [DISP_CC_MDSS_AHB_CLK] = &disp_cc_mdss_ahb_clk.clkr,
+ [DISP_CC_MDSS_AHB_CLK_SRC] = &disp_cc_mdss_ahb_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_CLK] = &disp_cc_mdss_byte0_clk.clkr,
+ [DISP_CC_MDSS_BYTE0_CLK_SRC] = &disp_cc_mdss_byte0_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_DIV_CLK_SRC] = &disp_cc_mdss_byte0_div_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_INTF_CLK] = &disp_cc_mdss_byte0_intf_clk.clkr,
+ [DISP_CC_MDSS_BYTE1_CLK] = &disp_cc_mdss_byte1_clk.clkr,
+ [DISP_CC_MDSS_BYTE1_CLK_SRC] = &disp_cc_mdss_byte1_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE1_DIV_CLK_SRC] = &disp_cc_mdss_byte1_div_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE1_INTF_CLK] = &disp_cc_mdss_byte1_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_AUX_CLK] = &disp_cc_mdss_dptx0_aux_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_AUX_CLK_SRC] = &disp_cc_mdss_dptx0_aux_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_LINK_CLK] = &disp_cc_mdss_dptx0_link_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_LINK_CLK_SRC] = &disp_cc_mdss_dptx0_link_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx0_link_div_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_LINK_DPIN_CLK] = &disp_cc_mdss_dptx0_link_dpin_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_LINK_DPIN_DIV_CLK_SRC] = &disp_cc_mdss_dptx0_link_dpin_div_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_LINK_INTF_CLK] = &disp_cc_mdss_dptx0_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_PIXEL0_CLK] = &disp_cc_mdss_dptx0_pixel0_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx0_pixel0_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_PIXEL1_CLK] = &disp_cc_mdss_dptx0_pixel1_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_PIXEL1_CLK_SRC] = &disp_cc_mdss_dptx0_pixel1_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_USB_ROUTER_LINK_INTF_CLK] =
+ &disp_cc_mdss_dptx0_usb_router_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_AUX_CLK] = &disp_cc_mdss_dptx1_aux_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_AUX_CLK_SRC] = &disp_cc_mdss_dptx1_aux_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX1_LINK_CLK] = &disp_cc_mdss_dptx1_link_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_LINK_CLK_SRC] = &disp_cc_mdss_dptx1_link_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX1_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx1_link_div_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX1_LINK_DPIN_CLK] = &disp_cc_mdss_dptx1_link_dpin_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_LINK_DPIN_DIV_CLK_SRC] = &disp_cc_mdss_dptx1_link_dpin_div_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX1_LINK_INTF_CLK] = &disp_cc_mdss_dptx1_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_PIXEL0_CLK] = &disp_cc_mdss_dptx1_pixel0_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx1_pixel0_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX1_PIXEL1_CLK] = &disp_cc_mdss_dptx1_pixel1_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_PIXEL1_CLK_SRC] = &disp_cc_mdss_dptx1_pixel1_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX1_USB_ROUTER_LINK_INTF_CLK] =
+ &disp_cc_mdss_dptx1_usb_router_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_AUX_CLK] = &disp_cc_mdss_dptx2_aux_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_AUX_CLK_SRC] = &disp_cc_mdss_dptx2_aux_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX2_LINK_CLK] = &disp_cc_mdss_dptx2_link_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_LINK_CLK_SRC] = &disp_cc_mdss_dptx2_link_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX2_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx2_link_div_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX2_LINK_DPIN_CLK] = &disp_cc_mdss_dptx2_link_dpin_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_LINK_DPIN_DIV_CLK_SRC] = &disp_cc_mdss_dptx2_link_dpin_div_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX2_LINK_INTF_CLK] = &disp_cc_mdss_dptx2_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_PIXEL0_CLK] = &disp_cc_mdss_dptx2_pixel0_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx2_pixel0_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX2_PIXEL1_CLK] = &disp_cc_mdss_dptx2_pixel1_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_PIXEL1_CLK_SRC] = &disp_cc_mdss_dptx2_pixel1_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX2_USB_ROUTER_LINK_INTF_CLK] =
+ &disp_cc_mdss_dptx2_usb_router_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX3_AUX_CLK] = &disp_cc_mdss_dptx3_aux_clk.clkr,
+ [DISP_CC_MDSS_DPTX3_AUX_CLK_SRC] = &disp_cc_mdss_dptx3_aux_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX3_LINK_CLK] = &disp_cc_mdss_dptx3_link_clk.clkr,
+ [DISP_CC_MDSS_DPTX3_LINK_CLK_SRC] = &disp_cc_mdss_dptx3_link_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX3_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx3_link_div_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX3_LINK_DPIN_CLK] = &disp_cc_mdss_dptx3_link_dpin_clk.clkr,
+ [DISP_CC_MDSS_DPTX3_LINK_DPIN_DIV_CLK_SRC] = &disp_cc_mdss_dptx3_link_dpin_div_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX3_LINK_INTF_CLK] = &disp_cc_mdss_dptx3_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX3_PIXEL0_CLK] = &disp_cc_mdss_dptx3_pixel0_clk.clkr,
+ [DISP_CC_MDSS_DPTX3_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx3_pixel0_clk_src.clkr,
+ [DISP_CC_MDSS_ESC0_CLK] = &disp_cc_mdss_esc0_clk.clkr,
+ [DISP_CC_MDSS_ESC0_CLK_SRC] = &disp_cc_mdss_esc0_clk_src.clkr,
+ [DISP_CC_MDSS_ESC1_CLK] = &disp_cc_mdss_esc1_clk.clkr,
+ [DISP_CC_MDSS_ESC1_CLK_SRC] = &disp_cc_mdss_esc1_clk_src.clkr,
+ [DISP_CC_MDSS_MDP1_CLK] = &disp_cc_mdss_mdp1_clk.clkr,
+ [DISP_CC_MDSS_MDP_CLK] = &disp_cc_mdss_mdp_clk.clkr,
+ [DISP_CC_MDSS_MDP_CLK_SRC] = &disp_cc_mdss_mdp_clk_src.clkr,
+ [DISP_CC_MDSS_MDP_LUT1_CLK] = &disp_cc_mdss_mdp_lut1_clk.clkr,
+ [DISP_CC_MDSS_MDP_LUT_CLK] = &disp_cc_mdss_mdp_lut_clk.clkr,
+ [DISP_CC_MDSS_NON_GDSC_AHB_CLK] = &disp_cc_mdss_non_gdsc_ahb_clk.clkr,
+ [DISP_CC_MDSS_PCLK0_CLK] = &disp_cc_mdss_pclk0_clk.clkr,
+ [DISP_CC_MDSS_PCLK0_CLK_SRC] = &disp_cc_mdss_pclk0_clk_src.clkr,
+ [DISP_CC_MDSS_PCLK1_CLK] = &disp_cc_mdss_pclk1_clk.clkr,
+ [DISP_CC_MDSS_PCLK1_CLK_SRC] = &disp_cc_mdss_pclk1_clk_src.clkr,
+ [DISP_CC_MDSS_PCLK2_CLK] = &disp_cc_mdss_pclk2_clk.clkr,
+ [DISP_CC_MDSS_PCLK2_CLK_SRC] = &disp_cc_mdss_pclk2_clk_src.clkr,
+ [DISP_CC_MDSS_RSCC_AHB_CLK] = &disp_cc_mdss_rscc_ahb_clk.clkr,
+ [DISP_CC_MDSS_RSCC_VSYNC_CLK] = &disp_cc_mdss_rscc_vsync_clk.clkr,
+ [DISP_CC_MDSS_VSYNC1_CLK] = &disp_cc_mdss_vsync1_clk.clkr,
+ [DISP_CC_MDSS_VSYNC_CLK] = &disp_cc_mdss_vsync_clk.clkr,
+ [DISP_CC_MDSS_VSYNC_CLK_SRC] = &disp_cc_mdss_vsync_clk_src.clkr,
+ [DISP_CC_OSC_CLK] = &disp_cc_osc_clk.clkr,
+ [DISP_CC_OSC_CLK_SRC] = &disp_cc_osc_clk_src.clkr,
+ [DISP_CC_PLL0] = &disp_cc_pll0.clkr,
+ [DISP_CC_PLL1] = &disp_cc_pll1.clkr,
+ [DISP_CC_SLEEP_CLK_SRC] = &disp_cc_sleep_clk_src.clkr,
+ [DISP_CC_XO_CLK_SRC] = &disp_cc_xo_clk_src.clkr,
+};
+
+static struct gdsc *disp_cc_glymur_gdscs[] = {
+ [DISP_CC_MDSS_CORE_GDSC] = &disp_cc_mdss_core_gdsc,
+ [DISP_CC_MDSS_CORE_INT2_GDSC] = &disp_cc_mdss_core_int2_gdsc,
+};
+
+static const struct qcom_reset_map disp_cc_glymur_resets[] = {
+ [DISP_CC_MDSS_CORE_BCR] = { 0x8000 },
+ [DISP_CC_MDSS_CORE_INT2_BCR] = { 0xa000 },
+ [DISP_CC_MDSS_RSCC_BCR] = { 0xc000 },
+};
+
+static struct clk_alpha_pll *disp_cc_glymur_plls[] = {
+ &disp_cc_pll0,
+ &disp_cc_pll1,
+};
+
+static u32 disp_cc_glymur_critical_cbcrs[] = {
+ 0xe07c, /* DISP_CC_SLEEP_CLK */
+ 0xe05c, /* DISP_CC_XO_CLK */
+};
+
+static const struct regmap_config disp_cc_glymur_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x11014,
+ .fast_io = true,
+};
+
+static struct qcom_cc_driver_data disp_cc_glymur_driver_data = {
+ .alpha_plls = disp_cc_glymur_plls,
+ .num_alpha_plls = ARRAY_SIZE(disp_cc_glymur_plls),
+ .clk_cbcrs = disp_cc_glymur_critical_cbcrs,
+ .num_clk_cbcrs = ARRAY_SIZE(disp_cc_glymur_critical_cbcrs),
+};
+
+static const struct qcom_cc_desc disp_cc_glymur_desc = {
+ .config = &disp_cc_glymur_regmap_config,
+ .clks = disp_cc_glymur_clocks,
+ .num_clks = ARRAY_SIZE(disp_cc_glymur_clocks),
+ .resets = disp_cc_glymur_resets,
+ .num_resets = ARRAY_SIZE(disp_cc_glymur_resets),
+ .gdscs = disp_cc_glymur_gdscs,
+ .num_gdscs = ARRAY_SIZE(disp_cc_glymur_gdscs),
+ .use_rpm = true,
+ .driver_data = &disp_cc_glymur_driver_data,
+};
+
+static const struct of_device_id disp_cc_glymur_match_table[] = {
+ { .compatible = "qcom,glymur-dispcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, disp_cc_glymur_match_table);
+
+static int disp_cc_glymur_probe(struct platform_device *pdev)
+{
+ return qcom_cc_probe(pdev, &disp_cc_glymur_desc);
+}
+
+static struct platform_driver disp_cc_glymur_driver = {
+ .probe = disp_cc_glymur_probe,
+ .driver = {
+ .name = "dispcc-glymur",
+ .of_match_table = disp_cc_glymur_match_table,
+ },
+};
+
+module_platform_driver(disp_cc_glymur_driver);
+
+MODULE_DESCRIPTION("QTI DISPCC GLYMUR Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/dispcc-milos.c b/drivers/clk/qcom/dispcc-milos.c
new file mode 100644
index 000000000000..95b6dd89d9ae
--- /dev/null
+++ b/drivers/clk/qcom/dispcc-milos.c
@@ -0,0 +1,974 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2025, Luca Weiss <luca.weiss@fairphone.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,milos-dispcc.h>
+
+#include "common.h"
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "reset.h"
+#include "gdsc.h"
+
+/* Need to match the order of clocks in DT binding */
+enum {
+ DT_BI_TCXO,
+ DT_SLEEP_CLK,
+ DT_AHB_CLK,
+ DT_GCC_DISP_GPLL0_CLK,
+ DT_DSI0_PHY_PLL_OUT_BYTECLK,
+ DT_DSI0_PHY_PLL_OUT_DSICLK,
+ DT_DP0_PHY_PLL_LINK_CLK,
+ DT_DP0_PHY_PLL_VCO_DIV_CLK,
+};
+
+#define DISP_CC_MISC_CMD 0xF000
+
+enum {
+ P_BI_TCXO,
+ P_DISP_CC_PLL0_OUT_EVEN,
+ P_DISP_CC_PLL0_OUT_MAIN,
+ P_DP0_PHY_PLL_LINK_CLK,
+ P_DP0_PHY_PLL_VCO_DIV_CLK,
+ P_DSI0_PHY_PLL_OUT_BYTECLK,
+ P_DSI0_PHY_PLL_OUT_DSICLK,
+ P_GCC_DISP_GPLL0_CLK,
+ P_SLEEP_CLK,
+};
+
+static const struct pll_vco lucid_ole_vco[] = {
+ { 249600000, 2300000000, 0 },
+};
+
+/* 257.142858 MHz Configuration */
+static const struct alpha_pll_config disp_cc_pll0_config = {
+ .l = 0xd,
+ .alpha = 0x6492,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000005,
+};
+
+static struct clk_alpha_pll disp_cc_pll0 = {
+ .offset = 0x0,
+ .config = &disp_cc_pll0_config,
+ .vco_table = lucid_ole_vco,
+ .num_vco = ARRAY_SIZE(lucid_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct parent_map disp_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map disp_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_DSICLK, 1 },
+ { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DSI0_PHY_PLL_OUT_DSICLK },
+ { .index = DT_DSI0_PHY_PLL_OUT_BYTECLK },
+};
+
+static const struct parent_map disp_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_DP0_PHY_PLL_LINK_CLK, 1 },
+ { P_DP0_PHY_PLL_VCO_DIV_CLK, 2 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DP0_PHY_PLL_LINK_CLK },
+ { .index = DT_DP0_PHY_PLL_VCO_DIV_CLK },
+};
+
+static const struct parent_map disp_cc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_DISP_GPLL0_CLK, 4 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_GCC_DISP_GPLL0_CLK },
+};
+
+static const struct parent_map disp_cc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_DP0_PHY_PLL_LINK_CLK, 1 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DP0_PHY_PLL_LINK_CLK },
+};
+
+static const struct parent_map disp_cc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_5[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DSI0_PHY_PLL_OUT_BYTECLK },
+};
+
+static const struct parent_map disp_cc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_DISP_CC_PLL0_OUT_MAIN, 1 },
+ { P_GCC_DISP_GPLL0_CLK, 4 },
+ { P_DISP_CC_PLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_6[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &disp_cc_pll0.clkr.hw },
+ { .index = DT_GCC_DISP_GPLL0_CLK },
+ { .hw = &disp_cc_pll0.clkr.hw },
+};
+
+static const struct parent_map disp_cc_parent_map_7[] = {
+ { P_SLEEP_CLK, 0 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_7_ao[] = {
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(37500000, P_GCC_DISP_GPLL0_CLK, 8, 0, 0),
+ F(75000000, P_GCC_DISP_GPLL0_CLK, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_ahb_clk_src = {
+ .cmd_rcgr = 0x8130,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .freq_tbl = ftbl_disp_cc_mdss_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_ahb_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_byte0_clk_src = {
+ .cmd_rcgr = 0x8098,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_dptx0_aux_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx0_aux_clk_src = {
+ .cmd_rcgr = 0x8118,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_mdss_dptx0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_aux_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx0_link_clk_src = {
+ .cmd_rcgr = 0x80cc,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_clk_src",
+ .parent_data = disp_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx0_pixel0_clk_src = {
+ .cmd_rcgr = 0x80e8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_pixel0_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx0_pixel1_clk_src = {
+ .cmd_rcgr = 0x8100,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_pixel1_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_esc0_clk_src[] = {
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(12800000, P_BI_TCXO, 1.5, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+static struct clk_rcg2 disp_cc_mdss_esc0_clk_src = {
+ .cmd_rcgr = 0x80b4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_5,
+ .freq_tbl = ftbl_disp_cc_mdss_esc0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_esc0_clk_src",
+ .parent_data = disp_cc_parent_data_5,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(85714286, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(100000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(200000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(342000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(402000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(535000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(600000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(630000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_mdp_clk_src = {
+ .cmd_rcgr = 0x8068,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_6,
+ .freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_clk_src",
+ .parent_data = disp_cc_parent_data_6,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_6),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_pclk0_clk_src = {
+ .cmd_rcgr = 0x8050,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk0_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_pixel_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_vsync_clk_src = {
+ .cmd_rcgr = 0x8080,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_mdss_dptx0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_vsync_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_sleep_clk_src[] = {
+ F(32000, P_SLEEP_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_sleep_clk_src = {
+ .cmd_rcgr = 0xe054,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_7,
+ .freq_tbl = ftbl_disp_cc_sleep_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_sleep_clk_src",
+ .parent_data = disp_cc_parent_data_7_ao,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_7_ao),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_xo_clk_src = {
+ .cmd_rcgr = 0xe034,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_mdss_dptx0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_xo_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_byte0_div_clk_src = {
+ .reg = 0x80b0,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dptx0_link_div_clk_src = {
+ .reg = 0x80e4,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch disp_cc_mdss_accu_clk = {
+ .halt_reg = 0xe050,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xe050,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_accu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_ahb1_clk = {
+ .halt_reg = 0xa020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_ahb1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_ahb_clk = {
+ .halt_reg = 0x804c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x804c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte0_clk = {
+ .halt_reg = 0x8024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte0_intf_clk = {
+ .halt_reg = 0x8028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte0_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_aux_clk = {
+ .halt_reg = 0x8048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_crypto_clk = {
+ .halt_reg = 0x803c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x803c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_crypto_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_link_clk = {
+ .halt_reg = 0x8030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_link_intf_clk = {
+ .halt_reg = 0x8038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_pixel0_clk = {
+ .halt_reg = 0x8040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_pixel0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_pixel0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_pixel1_clk = {
+ .halt_reg = 0x8044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_pixel1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_pixel1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_usb_router_link_intf_clk = {
+ .halt_reg = 0x8034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_usb_router_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_esc0_clk = {
+ .halt_reg = 0x802c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x802c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_esc0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_esc0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp1_clk = {
+ .halt_reg = 0xa004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_clk = {
+ .halt_reg = 0x8008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_lut1_clk = {
+ .halt_reg = 0xa010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_lut1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_lut_clk = {
+ .halt_reg = 0x8014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x8014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_lut_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_non_gdsc_ahb_clk = {
+ .halt_reg = 0xc004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xc004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_non_gdsc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_pclk0_clk = {
+ .halt_reg = 0x8004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_pclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_rscc_ahb_clk = {
+ .halt_reg = 0xc00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_rscc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_rscc_vsync_clk = {
+ .halt_reg = 0xc008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_rscc_vsync_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_vsync1_clk = {
+ .halt_reg = 0xa01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_vsync1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_vsync_clk = {
+ .halt_reg = 0x8020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_vsync_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc disp_cc_mdss_core_gdsc = {
+ .gdscr = 0x9000,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "disp_cc_mdss_core_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | HW_CTRL | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc disp_cc_mdss_core_int2_gdsc = {
+ .gdscr = 0xb000,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "disp_cc_mdss_core_int2_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | HW_CTRL | RETAIN_FF_ENABLE,
+};
+
+static struct clk_regmap *disp_cc_milos_clocks[] = {
+ [DISP_CC_MDSS_ACCU_CLK] = &disp_cc_mdss_accu_clk.clkr,
+ [DISP_CC_MDSS_AHB1_CLK] = &disp_cc_mdss_ahb1_clk.clkr,
+ [DISP_CC_MDSS_AHB_CLK] = &disp_cc_mdss_ahb_clk.clkr,
+ [DISP_CC_MDSS_AHB_CLK_SRC] = &disp_cc_mdss_ahb_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_CLK] = &disp_cc_mdss_byte0_clk.clkr,
+ [DISP_CC_MDSS_BYTE0_CLK_SRC] = &disp_cc_mdss_byte0_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_DIV_CLK_SRC] = &disp_cc_mdss_byte0_div_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_INTF_CLK] = &disp_cc_mdss_byte0_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_AUX_CLK] = &disp_cc_mdss_dptx0_aux_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_AUX_CLK_SRC] = &disp_cc_mdss_dptx0_aux_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_CRYPTO_CLK] = &disp_cc_mdss_dptx0_crypto_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_LINK_CLK] = &disp_cc_mdss_dptx0_link_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_LINK_CLK_SRC] = &disp_cc_mdss_dptx0_link_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx0_link_div_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_LINK_INTF_CLK] = &disp_cc_mdss_dptx0_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_PIXEL0_CLK] = &disp_cc_mdss_dptx0_pixel0_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx0_pixel0_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_PIXEL1_CLK] = &disp_cc_mdss_dptx0_pixel1_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_PIXEL1_CLK_SRC] = &disp_cc_mdss_dptx0_pixel1_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_USB_ROUTER_LINK_INTF_CLK] =
+ &disp_cc_mdss_dptx0_usb_router_link_intf_clk.clkr,
+ [DISP_CC_MDSS_ESC0_CLK] = &disp_cc_mdss_esc0_clk.clkr,
+ [DISP_CC_MDSS_ESC0_CLK_SRC] = &disp_cc_mdss_esc0_clk_src.clkr,
+ [DISP_CC_MDSS_MDP1_CLK] = &disp_cc_mdss_mdp1_clk.clkr,
+ [DISP_CC_MDSS_MDP_CLK] = &disp_cc_mdss_mdp_clk.clkr,
+ [DISP_CC_MDSS_MDP_CLK_SRC] = &disp_cc_mdss_mdp_clk_src.clkr,
+ [DISP_CC_MDSS_MDP_LUT1_CLK] = &disp_cc_mdss_mdp_lut1_clk.clkr,
+ [DISP_CC_MDSS_MDP_LUT_CLK] = &disp_cc_mdss_mdp_lut_clk.clkr,
+ [DISP_CC_MDSS_NON_GDSC_AHB_CLK] = &disp_cc_mdss_non_gdsc_ahb_clk.clkr,
+ [DISP_CC_MDSS_PCLK0_CLK] = &disp_cc_mdss_pclk0_clk.clkr,
+ [DISP_CC_MDSS_PCLK0_CLK_SRC] = &disp_cc_mdss_pclk0_clk_src.clkr,
+ [DISP_CC_MDSS_RSCC_AHB_CLK] = &disp_cc_mdss_rscc_ahb_clk.clkr,
+ [DISP_CC_MDSS_RSCC_VSYNC_CLK] = &disp_cc_mdss_rscc_vsync_clk.clkr,
+ [DISP_CC_MDSS_VSYNC1_CLK] = &disp_cc_mdss_vsync1_clk.clkr,
+ [DISP_CC_MDSS_VSYNC_CLK] = &disp_cc_mdss_vsync_clk.clkr,
+ [DISP_CC_MDSS_VSYNC_CLK_SRC] = &disp_cc_mdss_vsync_clk_src.clkr,
+ [DISP_CC_PLL0] = &disp_cc_pll0.clkr,
+ [DISP_CC_SLEEP_CLK_SRC] = &disp_cc_sleep_clk_src.clkr,
+ [DISP_CC_XO_CLK_SRC] = &disp_cc_xo_clk_src.clkr,
+};
+
+static const struct qcom_reset_map disp_cc_milos_resets[] = {
+ [DISP_CC_MDSS_CORE_BCR] = { 0x8000 },
+ [DISP_CC_MDSS_CORE_INT2_BCR] = { 0xa000 },
+ [DISP_CC_MDSS_RSCC_BCR] = { 0xc000 },
+};
+
+static struct gdsc *disp_cc_milos_gdscs[] = {
+ [DISP_CC_MDSS_CORE_GDSC] = &disp_cc_mdss_core_gdsc,
+ [DISP_CC_MDSS_CORE_INT2_GDSC] = &disp_cc_mdss_core_int2_gdsc,
+};
+
+static struct clk_alpha_pll *disp_cc_milos_plls[] = {
+ &disp_cc_pll0,
+};
+
+static u32 disp_cc_milos_critical_cbcrs[] = {
+ 0xe06c, /* DISP_CC_SLEEP_CLK */
+ 0xe04c, /* DISP_CC_XO_CLK */
+};
+
+static const struct regmap_config disp_cc_milos_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x11008,
+ .fast_io = true,
+};
+
+static void disp_cc_milos_clk_regs_configure(struct device *dev, struct regmap *regmap)
+{
+ /* Enable clock gating for MDP clocks */
+ regmap_update_bits(regmap, DISP_CC_MISC_CMD, 0x10, 0x10);
+}
+
+
+static struct qcom_cc_driver_data disp_cc_milos_driver_data = {
+ .alpha_plls = disp_cc_milos_plls,
+ .num_alpha_plls = ARRAY_SIZE(disp_cc_milos_plls),
+ .clk_cbcrs = disp_cc_milos_critical_cbcrs,
+ .num_clk_cbcrs = ARRAY_SIZE(disp_cc_milos_critical_cbcrs),
+ .clk_regs_configure = disp_cc_milos_clk_regs_configure,
+};
+
+static const struct qcom_cc_desc disp_cc_milos_desc = {
+ .config = &disp_cc_milos_regmap_config,
+ .clks = disp_cc_milos_clocks,
+ .num_clks = ARRAY_SIZE(disp_cc_milos_clocks),
+ .resets = disp_cc_milos_resets,
+ .num_resets = ARRAY_SIZE(disp_cc_milos_resets),
+ .gdscs = disp_cc_milos_gdscs,
+ .num_gdscs = ARRAY_SIZE(disp_cc_milos_gdscs),
+ .use_rpm = true,
+ .driver_data = &disp_cc_milos_driver_data,
+};
+
+static const struct of_device_id disp_cc_milos_match_table[] = {
+ { .compatible = "qcom,milos-dispcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, disp_cc_milos_match_table);
+
+static int disp_cc_milos_probe(struct platform_device *pdev)
+{
+ return qcom_cc_probe(pdev, &disp_cc_milos_desc);
+}
+
+static struct platform_driver disp_cc_milos_driver = {
+ .probe = disp_cc_milos_probe,
+ .driver = {
+ .name = "disp_cc-milos",
+ .of_match_table = disp_cc_milos_match_table,
+ },
+};
+
+module_platform_driver(disp_cc_milos_driver);
+
+MODULE_DESCRIPTION("QTI DISP_CC Milos Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/dispcc-qcs615.c b/drivers/clk/qcom/dispcc-qcs615.c
new file mode 100644
index 000000000000..4a6d78466098
--- /dev/null
+++ b/drivers/clk/qcom/dispcc-qcs615.c
@@ -0,0 +1,792 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,qcs615-dispcc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_BI_TCXO,
+ DT_GPLL0,
+ DT_DSI0_PHY_PLL_OUT_BYTECLK,
+ DT_DSI0_PHY_PLL_OUT_DSICLK,
+ DT_DSI1_PHY_PLL_OUT_DSICLK,
+ DT_DP_PHY_PLL_LINK_CLK,
+ DT_DP_PHY_PLL_VCO_DIV_CLK,
+};
+
+enum {
+ P_BI_TCXO,
+ P_DISP_CC_PLL0_OUT_MAIN,
+ P_DP_PHY_PLL_LINK_CLK,
+ P_DP_PHY_PLL_VCO_DIV_CLK,
+ P_DSI0_PHY_PLL_OUT_BYTECLK,
+ P_DSI0_PHY_PLL_OUT_DSICLK,
+ P_DSI1_PHY_PLL_OUT_DSICLK,
+ P_GPLL0_OUT_MAIN,
+};
+
+static const struct pll_vco disp_cc_pll_vco[] = {
+ { 500000000, 1000000000, 2 },
+};
+
+/* 576MHz configuration VCO - 2 */
+static struct alpha_pll_config disp_cc_pll0_config = {
+ .l = 0x1e,
+ .vco_val = BIT(21),
+ .vco_mask = GENMASK(21, 20),
+ .main_output_mask = BIT(0),
+ .config_ctl_val = 0x4001055b,
+ .test_ctl_hi_val = 0x1,
+ .test_ctl_hi_mask = 0x1,
+};
+
+static struct clk_alpha_pll disp_cc_pll0 = {
+ .offset = 0x0,
+ .config = &disp_cc_pll0_config,
+ .vco_table = disp_cc_pll_vco,
+ .num_vco = ARRAY_SIZE(disp_cc_pll_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_slew_ops,
+ },
+ },
+};
+
+static const struct parent_map disp_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_DP_PHY_PLL_LINK_CLK, 1 },
+ { P_DP_PHY_PLL_VCO_DIV_CLK, 2 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DP_PHY_PLL_LINK_CLK },
+ { .index = DT_DP_PHY_PLL_VCO_DIV_CLK },
+};
+
+static const struct parent_map disp_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map disp_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_BYTECLK, 1 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DSI0_PHY_PLL_OUT_BYTECLK },
+};
+
+static const struct parent_map disp_cc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_DISP_CC_PLL0_OUT_MAIN, 1 },
+ { P_GPLL0_OUT_MAIN, 4 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &disp_cc_pll0.clkr.hw },
+ { .index = DT_GPLL0 },
+};
+
+static const struct parent_map disp_cc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 4 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_GPLL0 },
+};
+
+static const struct parent_map disp_cc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_DSICLK, 1 },
+ { P_DSI1_PHY_PLL_OUT_DSICLK, 2 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_5[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DSI0_PHY_PLL_OUT_DSICLK },
+ { .index = DT_DSI1_PHY_PLL_OUT_DSICLK },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(37500000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ F(75000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_ahb_clk_src = {
+ .cmd_rcgr = 0x2170,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_4,
+ .freq_tbl = ftbl_disp_cc_mdss_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_ahb_clk_src",
+ .parent_data = disp_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_4),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_byte0_clk_src = {
+ .cmd_rcgr = 0x20c0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_dp_aux1_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_dp_aux_clk_src = {
+ .cmd_rcgr = 0x2158,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_mdss_dp_aux1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_dp_aux_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dp_crypto_clk_src = {
+ .cmd_rcgr = 0x2110,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dp_crypto_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dp_link_clk_src = {
+ .cmd_rcgr = 0x20f4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dp_link_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dp_pixel1_clk_src = {
+ .cmd_rcgr = 0x2140,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dp_pixel1_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dp_pixel_clk_src = {
+ .cmd_rcgr = 0x2128,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dp_pixel_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_esc0_clk_src = {
+ .cmd_rcgr = 0x20dc,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .freq_tbl = ftbl_disp_cc_mdss_dp_aux1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_esc0_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(192000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(256000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(307000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_mdp_clk_src = {
+ .cmd_rcgr = 0x2078,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_pclk0_clk_src = {
+ .cmd_rcgr = 0x2060,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_5,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk0_clk_src",
+ .parent_data = disp_cc_parent_data_5,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_pixel_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_rot_clk_src = {
+ .cmd_rcgr = 0x2090,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_rot_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_vsync_clk_src = {
+ .cmd_rcgr = 0x20a8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_mdss_dp_aux1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_vsync_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_byte0_div_clk_src = {
+ .reg = 0x20d8,
+ .shift = 0,
+ .width = 2,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dp_link_div_clk_src = {
+ .reg = 0x210c,
+ .shift = 0,
+ .width = 2,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dp_link_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dp_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch disp_cc_mdss_ahb_clk = {
+ .halt_reg = 0x2048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte0_clk = {
+ .halt_reg = 0x2024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte0_intf_clk = {
+ .halt_reg = 0x2028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte0_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dp_aux_clk = {
+ .halt_reg = 0x2044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dp_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dp_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dp_crypto_clk = {
+ .halt_reg = 0x2038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dp_crypto_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dp_crypto_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dp_link_clk = {
+ .halt_reg = 0x2030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dp_link_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dp_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dp_link_intf_clk = {
+ .halt_reg = 0x2034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dp_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dp_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dp_pixel1_clk = {
+ .halt_reg = 0x2040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dp_pixel1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dp_pixel1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dp_pixel_clk = {
+ .halt_reg = 0x203c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x203c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dp_pixel_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dp_pixel_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_esc0_clk = {
+ .halt_reg = 0x202c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x202c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_esc0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_esc0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_clk = {
+ .halt_reg = 0x2008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_lut_clk = {
+ .halt_reg = 0x2018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_lut_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_non_gdsc_ahb_clk = {
+ .halt_reg = 0x4004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_non_gdsc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_pclk0_clk = {
+ .halt_reg = 0x2004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_pclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_rot_clk = {
+ .halt_reg = 0x2010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_rot_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_rot_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_rscc_ahb_clk = {
+ .halt_reg = 0x400c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x400c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_rscc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_rscc_vsync_clk = {
+ .halt_reg = 0x4008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_rscc_vsync_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_vsync_clk = {
+ .halt_reg = 0x2020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_vsync_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc mdss_core_gdsc = {
+ .gdscr = 0x3000,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "mdss_core_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = HW_CTRL | POLL_CFG_GDSCR,
+};
+
+static struct clk_regmap *disp_cc_qcs615_clocks[] = {
+ [DISP_CC_MDSS_AHB_CLK] = &disp_cc_mdss_ahb_clk.clkr,
+ [DISP_CC_MDSS_AHB_CLK_SRC] = &disp_cc_mdss_ahb_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_CLK] = &disp_cc_mdss_byte0_clk.clkr,
+ [DISP_CC_MDSS_BYTE0_CLK_SRC] = &disp_cc_mdss_byte0_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_DIV_CLK_SRC] = &disp_cc_mdss_byte0_div_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_INTF_CLK] = &disp_cc_mdss_byte0_intf_clk.clkr,
+ [DISP_CC_MDSS_DP_AUX_CLK] = &disp_cc_mdss_dp_aux_clk.clkr,
+ [DISP_CC_MDSS_DP_AUX_CLK_SRC] = &disp_cc_mdss_dp_aux_clk_src.clkr,
+ [DISP_CC_MDSS_DP_CRYPTO_CLK] = &disp_cc_mdss_dp_crypto_clk.clkr,
+ [DISP_CC_MDSS_DP_CRYPTO_CLK_SRC] = &disp_cc_mdss_dp_crypto_clk_src.clkr,
+ [DISP_CC_MDSS_DP_LINK_CLK] = &disp_cc_mdss_dp_link_clk.clkr,
+ [DISP_CC_MDSS_DP_LINK_CLK_SRC] = &disp_cc_mdss_dp_link_clk_src.clkr,
+ [DISP_CC_MDSS_DP_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dp_link_div_clk_src.clkr,
+ [DISP_CC_MDSS_DP_LINK_INTF_CLK] = &disp_cc_mdss_dp_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DP_PIXEL1_CLK] = &disp_cc_mdss_dp_pixel1_clk.clkr,
+ [DISP_CC_MDSS_DP_PIXEL1_CLK_SRC] = &disp_cc_mdss_dp_pixel1_clk_src.clkr,
+ [DISP_CC_MDSS_DP_PIXEL_CLK] = &disp_cc_mdss_dp_pixel_clk.clkr,
+ [DISP_CC_MDSS_DP_PIXEL_CLK_SRC] = &disp_cc_mdss_dp_pixel_clk_src.clkr,
+ [DISP_CC_MDSS_ESC0_CLK] = &disp_cc_mdss_esc0_clk.clkr,
+ [DISP_CC_MDSS_ESC0_CLK_SRC] = &disp_cc_mdss_esc0_clk_src.clkr,
+ [DISP_CC_MDSS_MDP_CLK] = &disp_cc_mdss_mdp_clk.clkr,
+ [DISP_CC_MDSS_MDP_CLK_SRC] = &disp_cc_mdss_mdp_clk_src.clkr,
+ [DISP_CC_MDSS_MDP_LUT_CLK] = &disp_cc_mdss_mdp_lut_clk.clkr,
+ [DISP_CC_MDSS_NON_GDSC_AHB_CLK] = &disp_cc_mdss_non_gdsc_ahb_clk.clkr,
+ [DISP_CC_MDSS_PCLK0_CLK] = &disp_cc_mdss_pclk0_clk.clkr,
+ [DISP_CC_MDSS_PCLK0_CLK_SRC] = &disp_cc_mdss_pclk0_clk_src.clkr,
+ [DISP_CC_MDSS_ROT_CLK] = &disp_cc_mdss_rot_clk.clkr,
+ [DISP_CC_MDSS_ROT_CLK_SRC] = &disp_cc_mdss_rot_clk_src.clkr,
+ [DISP_CC_MDSS_RSCC_AHB_CLK] = &disp_cc_mdss_rscc_ahb_clk.clkr,
+ [DISP_CC_MDSS_RSCC_VSYNC_CLK] = &disp_cc_mdss_rscc_vsync_clk.clkr,
+ [DISP_CC_MDSS_VSYNC_CLK] = &disp_cc_mdss_vsync_clk.clkr,
+ [DISP_CC_MDSS_VSYNC_CLK_SRC] = &disp_cc_mdss_vsync_clk_src.clkr,
+ [DISP_CC_PLL0] = &disp_cc_pll0.clkr,
+};
+
+static struct gdsc *disp_cc_qcs615_gdscs[] = {
+ [MDSS_CORE_GDSC] = &mdss_core_gdsc,
+};
+
+static struct clk_alpha_pll *disp_cc_qcs615_plls[] = {
+ &disp_cc_pll0,
+};
+
+static u32 disp_cc_qcs615_critical_cbcrs[] = {
+ 0x6054, /* DISP_CC_XO_CLK */
+};
+
+static const struct regmap_config disp_cc_qcs615_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x10000,
+ .fast_io = true,
+};
+
+static struct qcom_cc_driver_data disp_cc_qcs615_driver_data = {
+ .alpha_plls = disp_cc_qcs615_plls,
+ .num_alpha_plls = ARRAY_SIZE(disp_cc_qcs615_plls),
+ .clk_cbcrs = disp_cc_qcs615_critical_cbcrs,
+ .num_clk_cbcrs = ARRAY_SIZE(disp_cc_qcs615_critical_cbcrs),
+};
+
+static const struct qcom_cc_desc disp_cc_qcs615_desc = {
+ .config = &disp_cc_qcs615_regmap_config,
+ .clks = disp_cc_qcs615_clocks,
+ .num_clks = ARRAY_SIZE(disp_cc_qcs615_clocks),
+ .gdscs = disp_cc_qcs615_gdscs,
+ .num_gdscs = ARRAY_SIZE(disp_cc_qcs615_gdscs),
+ .driver_data = &disp_cc_qcs615_driver_data,
+};
+
+static const struct of_device_id disp_cc_qcs615_match_table[] = {
+ { .compatible = "qcom,qcs615-dispcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, disp_cc_qcs615_match_table);
+
+static int disp_cc_qcs615_probe(struct platform_device *pdev)
+{
+ return qcom_cc_probe(pdev, &disp_cc_qcs615_desc);
+}
+
+static struct platform_driver disp_cc_qcs615_driver = {
+ .probe = disp_cc_qcs615_probe,
+ .driver = {
+ .name = "dispcc-qcs615",
+ .of_match_table = disp_cc_qcs615_match_table,
+ },
+};
+
+module_platform_driver(disp_cc_qcs615_driver);
+
+MODULE_DESCRIPTION("QTI DISPCC QCS615 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/dispcc-sc7280.c b/drivers/clk/qcom/dispcc-sc7280.c
index 8bdf57734a3d..465dc06c8712 100644
--- a/drivers/clk/qcom/dispcc-sc7280.c
+++ b/drivers/clk/qcom/dispcc-sc7280.c
@@ -17,6 +17,7 @@
#include "clk-regmap-divider.h"
#include "common.h"
#include "gdsc.h"
+#include "reset.h"
enum {
P_BI_TCXO,
@@ -847,6 +848,11 @@ static struct gdsc *disp_cc_sc7280_gdscs[] = {
[DISP_CC_MDSS_CORE_GDSC] = &disp_cc_mdss_core_gdsc,
};
+static const struct qcom_reset_map disp_cc_sc7280_resets[] = {
+ [DISP_CC_MDSS_CORE_BCR] = { 0x1000 },
+ [DISP_CC_MDSS_RSCC_BCR] = { 0x2000 },
+};
+
static const struct regmap_config disp_cc_sc7280_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
@@ -861,6 +867,8 @@ static const struct qcom_cc_desc disp_cc_sc7280_desc = {
.num_clks = ARRAY_SIZE(disp_cc_sc7280_clocks),
.gdscs = disp_cc_sc7280_gdscs,
.num_gdscs = ARRAY_SIZE(disp_cc_sc7280_gdscs),
+ .resets = disp_cc_sc7280_resets,
+ .num_resets = ARRAY_SIZE(disp_cc_sc7280_resets),
};
static const struct of_device_id disp_cc_sc7280_match_table[] = {
diff --git a/drivers/clk/qcom/dispcc-sm6350.c b/drivers/clk/qcom/dispcc-sm6350.c
index b0bd163a449c..5b1d8f86515f 100644
--- a/drivers/clk/qcom/dispcc-sm6350.c
+++ b/drivers/clk/qcom/dispcc-sm6350.c
@@ -679,6 +679,11 @@ static struct clk_branch disp_cc_xo_clk = {
},
};
+static const struct qcom_reset_map disp_cc_sm6350_resets[] = {
+ [DISP_CC_MDSS_CORE_BCR] = { 0x1000 },
+ [DISP_CC_MDSS_RSCC_BCR] = { 0x2000 },
+};
+
static struct gdsc mdss_gdsc = {
.gdscr = 0x1004,
.en_rest_wait_val = 0x2,
@@ -746,6 +751,8 @@ static const struct qcom_cc_desc disp_cc_sm6350_desc = {
.num_clks = ARRAY_SIZE(disp_cc_sm6350_clocks),
.gdscs = disp_cc_sm6350_gdscs,
.num_gdscs = ARRAY_SIZE(disp_cc_sm6350_gdscs),
+ .resets = disp_cc_sm6350_resets,
+ .num_resets = ARRAY_SIZE(disp_cc_sm6350_resets),
};
static const struct of_device_id disp_cc_sm6350_match_table[] = {
diff --git a/drivers/clk/qcom/dispcc-sm7150.c b/drivers/clk/qcom/dispcc-sm7150.c
index bdfff246ed3f..811d380a8e9f 100644
--- a/drivers/clk/qcom/dispcc-sm7150.c
+++ b/drivers/clk/qcom/dispcc-sm7150.c
@@ -20,6 +20,7 @@
#include "clk-regmap-divider.h"
#include "common.h"
#include "gdsc.h"
+#include "reset.h"
enum {
DT_BI_TCXO,
@@ -356,7 +357,7 @@ static struct clk_rcg2 dispcc_mdss_pclk0_clk_src = {
.name = "dispcc_mdss_pclk0_clk_src",
.parent_data = dispcc_parent_data_4,
.num_parents = ARRAY_SIZE(dispcc_parent_data_4),
- .flags = CLK_SET_RATE_PARENT,
+ .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
.ops = &clk_pixel_ops,
},
};
@@ -951,6 +952,10 @@ static struct gdsc *dispcc_sm7150_gdscs[] = {
[MDSS_GDSC] = &mdss_gdsc,
};
+static const struct qcom_reset_map dispcc_sm7150_resets[] = {
+ [DISPCC_MDSS_CORE_BCR] = { 0x2000 },
+};
+
static const struct regmap_config dispcc_sm7150_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
@@ -965,6 +970,8 @@ static const struct qcom_cc_desc dispcc_sm7150_desc = {
.num_clks = ARRAY_SIZE(dispcc_sm7150_clocks),
.gdscs = dispcc_sm7150_gdscs,
.num_gdscs = ARRAY_SIZE(dispcc_sm7150_gdscs),
+ .resets = dispcc_sm7150_resets,
+ .num_resets = ARRAY_SIZE(dispcc_sm7150_resets),
};
static const struct of_device_id dispcc_sm7150_match_table[] = {
diff --git a/drivers/clk/qcom/dispcc-sm8750.c b/drivers/clk/qcom/dispcc-sm8750.c
index 877b40d50e6f..ca09da111a50 100644
--- a/drivers/clk/qcom/dispcc-sm8750.c
+++ b/drivers/clk/qcom/dispcc-sm8750.c
@@ -393,7 +393,7 @@ static struct clk_rcg2 disp_cc_mdss_byte0_clk_src = {
.name = "disp_cc_mdss_byte0_clk_src",
.parent_data = disp_cc_parent_data_1,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
- .flags = CLK_SET_RATE_PARENT,
+ .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
.ops = &clk_byte2_ops,
},
};
@@ -408,7 +408,7 @@ static struct clk_rcg2 disp_cc_mdss_byte1_clk_src = {
.name = "disp_cc_mdss_byte1_clk_src",
.parent_data = disp_cc_parent_data_1,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
- .flags = CLK_SET_RATE_PARENT,
+ .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
.ops = &clk_byte2_ops,
},
};
@@ -712,7 +712,7 @@ static struct clk_rcg2 disp_cc_mdss_pclk0_clk_src = {
.name = "disp_cc_mdss_pclk0_clk_src",
.parent_data = disp_cc_parent_data_1,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
- .flags = CLK_SET_RATE_PARENT,
+ .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
.ops = &clk_pixel_ops,
},
};
@@ -727,7 +727,7 @@ static struct clk_rcg2 disp_cc_mdss_pclk1_clk_src = {
.name = "disp_cc_mdss_pclk1_clk_src",
.parent_data = disp_cc_parent_data_1,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
- .flags = CLK_SET_RATE_PARENT,
+ .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
.ops = &clk_pixel_ops,
},
};
@@ -742,7 +742,7 @@ static struct clk_rcg2 disp_cc_mdss_pclk2_clk_src = {
.name = "disp_cc_mdss_pclk2_clk_src",
.parent_data = disp_cc_parent_data_1,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
- .flags = CLK_SET_RATE_PARENT,
+ .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
.ops = &clk_pixel_ops,
},
};
diff --git a/drivers/clk/qcom/dispcc-x1e80100.c b/drivers/clk/qcom/dispcc-x1e80100.c
index 40069eba41f2..aa7fd43969f9 100644
--- a/drivers/clk/qcom/dispcc-x1e80100.c
+++ b/drivers/clk/qcom/dispcc-x1e80100.c
@@ -1618,6 +1618,9 @@ static struct clk_regmap *disp_cc_x1e80100_clocks[] = {
static const struct qcom_reset_map disp_cc_x1e80100_resets[] = {
[DISP_CC_MDSS_CORE_BCR] = { 0x8000 },
+ [DISP_CC_MDSS_DPTX0_USB_ROUTER_LINK_INTF_CLK_ARES] = { .reg = 0x8044, .bit = 2 },
+ [DISP_CC_MDSS_DPTX1_USB_ROUTER_LINK_INTF_CLK_ARES] = { .reg = 0x8068, .bit = 2 },
+ [DISP_CC_MDSS_DPTX2_USB_ROUTER_LINK_INTF_CLK_ARES] = { .reg = 0x8088, .bit = 2 },
[DISP_CC_MDSS_CORE_INT2_BCR] = { 0xa000 },
[DISP_CC_MDSS_RSCC_BCR] = { 0xc000 },
};
diff --git a/drivers/clk/qcom/ecpricc-qdu1000.c b/drivers/clk/qcom/ecpricc-qdu1000.c
index dbc11260479b..c2a16616ed64 100644
--- a/drivers/clk/qcom/ecpricc-qdu1000.c
+++ b/drivers/clk/qcom/ecpricc-qdu1000.c
@@ -920,6 +920,7 @@ static struct clk_branch ecpri_cc_eth_100g_c2c1_udp_fifo_clk = {
static struct clk_mem_branch ecpri_cc_eth_100g_c2c_0_hm_ff_0_clk = {
.mem_enable_reg = 0x8410,
.mem_ack_reg = 0x8424,
+ .mem_enable_mask = BIT(0),
.mem_enable_ack_mask = BIT(0),
.branch = {
.halt_reg = 0x80b4,
@@ -943,6 +944,7 @@ static struct clk_mem_branch ecpri_cc_eth_100g_c2c_0_hm_ff_0_clk = {
static struct clk_mem_branch ecpri_cc_eth_100g_c2c_0_hm_ff_1_clk = {
.mem_enable_reg = 0x8410,
.mem_ack_reg = 0x8424,
+ .mem_enable_mask = BIT(1),
.mem_enable_ack_mask = BIT(1),
.branch = {
.halt_reg = 0x80bc,
@@ -966,6 +968,7 @@ static struct clk_mem_branch ecpri_cc_eth_100g_c2c_0_hm_ff_1_clk = {
static struct clk_mem_branch ecpri_cc_eth_100g_c2c_hm_macsec_clk = {
.mem_enable_reg = 0x8410,
.mem_ack_reg = 0x8424,
+ .mem_enable_mask = BIT(4),
.mem_enable_ack_mask = BIT(4),
.branch = {
.halt_reg = 0x80ac,
@@ -989,6 +992,7 @@ static struct clk_mem_branch ecpri_cc_eth_100g_c2c_hm_macsec_clk = {
static struct clk_mem_branch ecpri_cc_eth_100g_dbg_c2c_hm_ff_0_clk = {
.mem_enable_reg = 0x8414,
.mem_ack_reg = 0x8428,
+ .mem_enable_mask = BIT(0),
.mem_enable_ack_mask = BIT(0),
.branch = {
.halt_reg = 0x80d8,
@@ -1012,6 +1016,7 @@ static struct clk_mem_branch ecpri_cc_eth_100g_dbg_c2c_hm_ff_0_clk = {
static struct clk_mem_branch ecpri_cc_eth_100g_dbg_c2c_hm_ff_1_clk = {
.mem_enable_reg = 0x8414,
.mem_ack_reg = 0x8428,
+ .mem_enable_mask = BIT(1),
.mem_enable_ack_mask = BIT(1),
.branch = {
.halt_reg = 0x80e0,
@@ -1053,6 +1058,7 @@ static struct clk_branch ecpri_cc_eth_100g_dbg_c2c_udp_fifo_clk = {
static struct clk_mem_branch ecpri_cc_eth_100g_fh_0_hm_ff_0_clk = {
.mem_enable_reg = 0x8404,
.mem_ack_reg = 0x8418,
+ .mem_enable_mask = BIT(0),
.mem_enable_ack_mask = BIT(0),
.branch = {
.halt_reg = 0x800c,
@@ -1076,6 +1082,7 @@ static struct clk_mem_branch ecpri_cc_eth_100g_fh_0_hm_ff_0_clk = {
static struct clk_mem_branch ecpri_cc_eth_100g_fh_0_hm_ff_1_clk = {
.mem_enable_reg = 0x8404,
.mem_ack_reg = 0x8418,
+ .mem_enable_mask = BIT(1),
.mem_enable_ack_mask = BIT(1),
.branch = {
.halt_reg = 0x8014,
@@ -1099,6 +1106,7 @@ static struct clk_mem_branch ecpri_cc_eth_100g_fh_0_hm_ff_1_clk = {
static struct clk_mem_branch ecpri_cc_eth_100g_fh_0_hm_ff_2_clk = {
.mem_enable_reg = 0x8404,
.mem_ack_reg = 0x8418,
+ .mem_enable_mask = BIT(2),
.mem_enable_ack_mask = BIT(2),
.branch = {
.halt_reg = 0x801c,
@@ -1122,6 +1130,7 @@ static struct clk_mem_branch ecpri_cc_eth_100g_fh_0_hm_ff_2_clk = {
static struct clk_mem_branch ecpri_cc_eth_100g_fh_0_hm_ff_3_clk = {
.mem_enable_reg = 0x8404,
.mem_ack_reg = 0x8418,
+ .mem_enable_mask = BIT(3),
.mem_enable_ack_mask = BIT(3),
.branch = {
.halt_reg = 0x8024,
@@ -1163,6 +1172,7 @@ static struct clk_branch ecpri_cc_eth_100g_fh_0_udp_fifo_clk = {
static struct clk_mem_branch ecpri_cc_eth_100g_fh_1_hm_ff_0_clk = {
.mem_enable_reg = 0x8408,
.mem_ack_reg = 0x841c,
+ .mem_enable_mask = BIT(0),
.mem_enable_ack_mask = BIT(0),
.branch = {
.halt_reg = 0x8044,
@@ -1186,6 +1196,7 @@ static struct clk_mem_branch ecpri_cc_eth_100g_fh_1_hm_ff_0_clk = {
static struct clk_mem_branch ecpri_cc_eth_100g_fh_1_hm_ff_1_clk = {
.mem_enable_reg = 0x8408,
.mem_ack_reg = 0x841c,
+ .mem_enable_mask = BIT(1),
.mem_enable_ack_mask = BIT(1),
.branch = {
.halt_reg = 0x804c,
@@ -1209,6 +1220,7 @@ static struct clk_mem_branch ecpri_cc_eth_100g_fh_1_hm_ff_1_clk = {
static struct clk_mem_branch ecpri_cc_eth_100g_fh_1_hm_ff_2_clk = {
.mem_enable_reg = 0x8408,
.mem_ack_reg = 0x841c,
+ .mem_enable_mask = BIT(2),
.mem_enable_ack_mask = BIT(2),
.branch = {
.halt_reg = 0x8054,
@@ -1232,6 +1244,7 @@ static struct clk_mem_branch ecpri_cc_eth_100g_fh_1_hm_ff_2_clk = {
static struct clk_mem_branch ecpri_cc_eth_100g_fh_1_hm_ff_3_clk = {
.mem_enable_reg = 0x8408,
.mem_ack_reg = 0x841c,
+ .mem_enable_mask = BIT(3),
.mem_enable_ack_mask = BIT(3),
.branch = {
.halt_reg = 0x805c,
@@ -1273,6 +1286,7 @@ static struct clk_branch ecpri_cc_eth_100g_fh_1_udp_fifo_clk = {
static struct clk_mem_branch ecpri_cc_eth_100g_fh_2_hm_ff_0_clk = {
.mem_enable_reg = 0x840c,
.mem_ack_reg = 0x8420,
+ .mem_enable_mask = BIT(0),
.mem_enable_ack_mask = BIT(0),
.branch = {
.halt_reg = 0x807c,
@@ -1296,6 +1310,7 @@ static struct clk_mem_branch ecpri_cc_eth_100g_fh_2_hm_ff_0_clk = {
static struct clk_mem_branch ecpri_cc_eth_100g_fh_2_hm_ff_1_clk = {
.mem_enable_reg = 0x840c,
.mem_ack_reg = 0x8420,
+ .mem_enable_mask = BIT(1),
.mem_enable_ack_mask = BIT(1),
.branch = {
.halt_reg = 0x8084,
@@ -1319,6 +1334,7 @@ static struct clk_mem_branch ecpri_cc_eth_100g_fh_2_hm_ff_1_clk = {
static struct clk_mem_branch ecpri_cc_eth_100g_fh_2_hm_ff_2_clk = {
.mem_enable_reg = 0x840c,
.mem_ack_reg = 0x8420,
+ .mem_enable_mask = BIT(2),
.mem_enable_ack_mask = BIT(2),
.branch = {
.halt_reg = 0x808c,
@@ -1342,6 +1358,7 @@ static struct clk_mem_branch ecpri_cc_eth_100g_fh_2_hm_ff_2_clk = {
static struct clk_mem_branch ecpri_cc_eth_100g_fh_2_hm_ff_3_clk = {
.mem_enable_reg = 0x840c,
.mem_ack_reg = 0x8420,
+ .mem_enable_mask = BIT(3),
.mem_enable_ack_mask = BIT(3),
.branch = {
.halt_reg = 0x8094,
@@ -1383,6 +1400,7 @@ static struct clk_branch ecpri_cc_eth_100g_fh_2_udp_fifo_clk = {
static struct clk_mem_branch ecpri_cc_eth_100g_fh_macsec_0_clk = {
.mem_enable_reg = 0x8404,
.mem_ack_reg = 0x8418,
+ .mem_enable_mask = BIT(4),
.mem_enable_ack_mask = BIT(4),
.branch = {
.halt_reg = 0x8004,
@@ -1406,6 +1424,7 @@ static struct clk_mem_branch ecpri_cc_eth_100g_fh_macsec_0_clk = {
static struct clk_mem_branch ecpri_cc_eth_100g_fh_macsec_1_clk = {
.mem_enable_reg = 0x8408,
.mem_ack_reg = 0x841c,
+ .mem_enable_mask = BIT(4),
.mem_enable_ack_mask = BIT(4),
.branch = {
.halt_reg = 0x803c,
@@ -1429,6 +1448,7 @@ static struct clk_mem_branch ecpri_cc_eth_100g_fh_macsec_1_clk = {
static struct clk_mem_branch ecpri_cc_eth_100g_fh_macsec_2_clk = {
.mem_enable_reg = 0x840c,
.mem_ack_reg = 0x8420,
+ .mem_enable_mask = BIT(4),
.mem_enable_ack_mask = BIT(4),
.branch = {
.halt_reg = 0x8074,
@@ -1452,6 +1472,7 @@ static struct clk_mem_branch ecpri_cc_eth_100g_fh_macsec_2_clk = {
static struct clk_mem_branch ecpri_cc_eth_100g_mac_c2c_hm_ref_clk = {
.mem_enable_reg = 0x8410,
.mem_ack_reg = 0x8424,
+ .mem_enable_mask = BIT(5),
.mem_enable_ack_mask = BIT(5),
.branch = {
.halt_reg = 0x80c4,
@@ -1475,6 +1496,7 @@ static struct clk_mem_branch ecpri_cc_eth_100g_mac_c2c_hm_ref_clk = {
static struct clk_mem_branch ecpri_cc_eth_100g_mac_dbg_c2c_hm_ref_clk = {
.mem_enable_reg = 0x8414,
.mem_ack_reg = 0x8428,
+ .mem_enable_mask = BIT(5),
.mem_enable_ack_mask = BIT(5),
.branch = {
.halt_reg = 0x80e8,
@@ -1498,6 +1520,7 @@ static struct clk_mem_branch ecpri_cc_eth_100g_mac_dbg_c2c_hm_ref_clk = {
static struct clk_mem_branch ecpri_cc_eth_100g_mac_fh0_hm_ref_clk = {
.mem_enable_reg = 0x8404,
.mem_ack_reg = 0x8418,
+ .mem_enable_mask = BIT(5),
.mem_enable_ack_mask = BIT(5),
.branch = {
.halt_reg = 0x802c,
@@ -1521,6 +1544,7 @@ static struct clk_mem_branch ecpri_cc_eth_100g_mac_fh0_hm_ref_clk = {
static struct clk_mem_branch ecpri_cc_eth_100g_mac_fh1_hm_ref_clk = {
.mem_enable_reg = 0x8408,
.mem_ack_reg = 0x841c,
+ .mem_enable_mask = BIT(5),
.mem_enable_ack_mask = BIT(5),
.branch = {
.halt_reg = 0x8064,
@@ -1544,6 +1568,7 @@ static struct clk_mem_branch ecpri_cc_eth_100g_mac_fh1_hm_ref_clk = {
static struct clk_mem_branch ecpri_cc_eth_100g_mac_fh2_hm_ref_clk = {
.mem_enable_reg = 0x840c,
.mem_ack_reg = 0x8420,
+ .mem_enable_mask = BIT(5),
.mem_enable_ack_mask = BIT(5),
.branch = {
.halt_reg = 0x809c,
@@ -1603,6 +1628,7 @@ static struct clk_branch ecpri_cc_eth_dbg_noc_axi_clk = {
static struct clk_mem_branch ecpri_cc_eth_phy_0_ock_sram_clk = {
.mem_enable_reg = 0x8404,
.mem_ack_reg = 0x8418,
+ .mem_enable_mask = BIT(6),
.mem_enable_ack_mask = BIT(6),
.branch = {
.halt_reg = 0xd140,
@@ -1621,6 +1647,7 @@ static struct clk_mem_branch ecpri_cc_eth_phy_0_ock_sram_clk = {
static struct clk_mem_branch ecpri_cc_eth_phy_1_ock_sram_clk = {
.mem_enable_reg = 0x8408,
.mem_ack_reg = 0x841C,
+ .mem_enable_mask = BIT(6),
.mem_enable_ack_mask = BIT(6),
.branch = {
.halt_reg = 0xd148,
@@ -1639,6 +1666,7 @@ static struct clk_mem_branch ecpri_cc_eth_phy_1_ock_sram_clk = {
static struct clk_mem_branch ecpri_cc_eth_phy_2_ock_sram_clk = {
.mem_enable_reg = 0x840c,
.mem_ack_reg = 0x8420,
+ .mem_enable_mask = BIT(6),
.mem_enable_ack_mask = BIT(6),
.branch = {
.halt_reg = 0xd150,
@@ -1657,6 +1685,7 @@ static struct clk_mem_branch ecpri_cc_eth_phy_2_ock_sram_clk = {
static struct clk_mem_branch ecpri_cc_eth_phy_3_ock_sram_clk = {
.mem_enable_reg = 0x8410,
.mem_ack_reg = 0x8424,
+ .mem_enable_mask = BIT(6),
.mem_enable_ack_mask = BIT(6),
.branch = {
.halt_reg = 0xd158,
@@ -1675,6 +1704,7 @@ static struct clk_mem_branch ecpri_cc_eth_phy_3_ock_sram_clk = {
static struct clk_mem_branch ecpri_cc_eth_phy_4_ock_sram_clk = {
.mem_enable_reg = 0x8414,
.mem_ack_reg = 0x8428,
+ .mem_enable_mask = BIT(6),
.mem_enable_ack_mask = BIT(6),
.branch = {
.halt_reg = 0xd160,
diff --git a/drivers/clk/qcom/gcc-glymur.c b/drivers/clk/qcom/gcc-glymur.c
new file mode 100644
index 000000000000..deab819576d0
--- /dev/null
+++ b/drivers/clk/qcom/gcc-glymur.c
@@ -0,0 +1,8615 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025, Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,glymur-gcc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "clk-regmap-phy-mux.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_BI_TCXO,
+ DT_BI_TCXO_AO,
+ DT_SLEEP_CLK,
+ DT_GCC_USB4_0_PHY_DP0_GMUX_CLK_SRC,
+ DT_GCC_USB4_0_PHY_DP1_GMUX_CLK_SRC,
+ DT_GCC_USB4_0_PHY_PCIE_PIPEGMUX_CLK_SRC,
+ DT_GCC_USB4_0_PHY_PIPEGMUX_CLK_SRC,
+ DT_GCC_USB4_0_PHY_SYS_PIPEGMUX_CLK_SRC,
+ DT_GCC_USB4_1_PHY_DP0_GMUX_2_CLK_SRC,
+ DT_GCC_USB4_1_PHY_DP1_GMUX_2_CLK_SRC,
+ DT_GCC_USB4_1_PHY_PCIE_PIPEGMUX_CLK_SRC,
+ DT_GCC_USB4_1_PHY_PIPEGMUX_CLK_SRC,
+ DT_GCC_USB4_1_PHY_SYS_PIPEGMUX_CLK_SRC,
+ DT_GCC_USB4_2_PHY_DP0_GMUX_CLK_SRC,
+ DT_GCC_USB4_2_PHY_DP1_GMUX_CLK_SRC,
+ DT_GCC_USB4_2_PHY_PCIE_PIPEGMUX_CLK_SRC,
+ DT_GCC_USB4_2_PHY_PIPEGMUX_CLK_SRC,
+ DT_GCC_USB4_2_PHY_SYS_PIPEGMUX_CLK_SRC,
+ DT_PCIE_3A_PIPE_CLK,
+ DT_PCIE_3B_PIPE_CLK,
+ DT_PCIE_4_PIPE_CLK,
+ DT_PCIE_5_PIPE_CLK,
+ DT_PCIE_6_PIPE_CLK,
+ DT_QUSB4PHY_0_GCC_USB4_RX0_CLK,
+ DT_QUSB4PHY_0_GCC_USB4_RX1_CLK,
+ DT_QUSB4PHY_1_GCC_USB4_RX0_CLK,
+ DT_QUSB4PHY_1_GCC_USB4_RX1_CLK,
+ DT_QUSB4PHY_2_GCC_USB4_RX0_CLK,
+ DT_QUSB4PHY_2_GCC_USB4_RX1_CLK,
+ DT_UFS_PHY_RX_SYMBOL_0_CLK,
+ DT_UFS_PHY_RX_SYMBOL_1_CLK,
+ DT_UFS_PHY_TX_SYMBOL_0_CLK,
+ DT_USB3_PHY_0_WRAPPER_GCC_USB30_PIPE_CLK,
+ DT_USB3_PHY_1_WRAPPER_GCC_USB30_PIPE_CLK,
+ DT_USB3_PHY_2_WRAPPER_GCC_USB30_PIPE_CLK,
+ DT_USB3_UNI_PHY_MP_GCC_USB30_PIPE_0_CLK,
+ DT_USB3_UNI_PHY_MP_GCC_USB30_PIPE_1_CLK,
+ DT_USB4_0_PHY_GCC_USB4_PCIE_PIPE_CLK,
+ DT_USB4_0_PHY_GCC_USB4RTR_MAX_PIPE_CLK,
+ DT_USB4_1_PHY_GCC_USB4_PCIE_PIPE_CLK,
+ DT_USB4_1_PHY_GCC_USB4RTR_MAX_PIPE_CLK,
+ DT_USB4_2_PHY_GCC_USB4_PCIE_PIPE_CLK,
+ DT_USB4_2_PHY_GCC_USB4RTR_MAX_PIPE_CLK,
+};
+
+enum {
+ P_BI_TCXO,
+ P_GCC_GPLL0_OUT_EVEN,
+ P_GCC_GPLL0_OUT_MAIN,
+ P_GCC_GPLL14_OUT_EVEN,
+ P_GCC_GPLL14_OUT_MAIN,
+ P_GCC_GPLL1_OUT_MAIN,
+ P_GCC_GPLL4_OUT_MAIN,
+ P_GCC_GPLL5_OUT_MAIN,
+ P_GCC_GPLL7_OUT_MAIN,
+ P_GCC_GPLL8_OUT_MAIN,
+ P_GCC_GPLL9_OUT_MAIN,
+ P_GCC_USB3_PRIM_PHY_PIPE_CLK_SRC,
+ P_GCC_USB3_SEC_PHY_PIPE_CLK_SRC,
+ P_GCC_USB3_TERT_PHY_PIPE_CLK_SRC,
+ P_GCC_USB4_0_PHY_DP0_GMUX_CLK_SRC,
+ P_GCC_USB4_0_PHY_DP1_GMUX_CLK_SRC,
+ P_GCC_USB4_0_PHY_PCIE_PIPEGMUX_CLK_SRC,
+ P_GCC_USB4_0_PHY_PIPEGMUX_CLK_SRC,
+ P_GCC_USB4_0_PHY_SYS_PIPEGMUX_CLK_SRC,
+ P_GCC_USB4_1_PHY_DP0_GMUX_2_CLK_SRC,
+ P_GCC_USB4_1_PHY_DP1_GMUX_2_CLK_SRC,
+ P_GCC_USB4_1_PHY_PCIE_PIPEGMUX_CLK_SRC,
+ P_GCC_USB4_1_PHY_PIPEGMUX_CLK_SRC,
+ P_GCC_USB4_1_PHY_PLL_PIPE_CLK_SRC,
+ P_GCC_USB4_1_PHY_SYS_PIPEGMUX_CLK_SRC,
+ P_GCC_USB4_2_PHY_DP0_GMUX_CLK_SRC,
+ P_GCC_USB4_2_PHY_DP1_GMUX_CLK_SRC,
+ P_GCC_USB4_2_PHY_PCIE_PIPEGMUX_CLK_SRC,
+ P_GCC_USB4_2_PHY_PIPEGMUX_CLK_SRC,
+ P_GCC_USB4_2_PHY_SYS_PIPEGMUX_CLK_SRC,
+ P_PCIE_3A_PIPE_CLK,
+ P_PCIE_3B_PIPE_CLK,
+ P_PCIE_4_PIPE_CLK,
+ P_PCIE_5_PIPE_CLK,
+ P_PCIE_6_PIPE_CLK,
+ P_QUSB4PHY_0_GCC_USB4_RX0_CLK,
+ P_QUSB4PHY_0_GCC_USB4_RX1_CLK,
+ P_QUSB4PHY_1_GCC_USB4_RX0_CLK,
+ P_QUSB4PHY_1_GCC_USB4_RX1_CLK,
+ P_QUSB4PHY_2_GCC_USB4_RX0_CLK,
+ P_QUSB4PHY_2_GCC_USB4_RX1_CLK,
+ P_SLEEP_CLK,
+ P_UFS_PHY_RX_SYMBOL_0_CLK,
+ P_UFS_PHY_RX_SYMBOL_1_CLK,
+ P_UFS_PHY_TX_SYMBOL_0_CLK,
+ P_USB3_PHY_0_WRAPPER_GCC_USB30_PIPE_CLK,
+ P_USB3_PHY_1_WRAPPER_GCC_USB30_PIPE_CLK,
+ P_USB3_PHY_2_WRAPPER_GCC_USB30_PIPE_CLK,
+ P_USB3_UNI_PHY_MP_GCC_USB30_PIPE_0_CLK,
+ P_USB3_UNI_PHY_MP_GCC_USB30_PIPE_1_CLK,
+ P_USB4_0_PHY_GCC_USB4_PCIE_PIPE_CLK,
+ P_USB4_0_PHY_GCC_USB4RTR_MAX_PIPE_CLK,
+ P_USB4_1_PHY_GCC_USB4_PCIE_PIPE_CLK,
+ P_USB4_1_PHY_GCC_USB4RTR_MAX_PIPE_CLK,
+ P_USB4_2_PHY_GCC_USB4_PCIE_PIPE_CLK,
+ P_USB4_2_PHY_GCC_USB4RTR_MAX_PIPE_CLK,
+};
+
+static struct clk_alpha_pll gcc_gpll0 = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_EKO_T],
+ .clkr = {
+ .enable_reg = 0x62040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_taycan_eko_t_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gcc_gpll0_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gcc_gpll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_gcc_gpll0_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_gcc_gpll0_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_EKO_T],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll0_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_taycan_eko_t_ops,
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll1 = {
+ .offset = 0x1000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_EKO_T],
+ .clkr = {
+ .enable_reg = 0x62040,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll1",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_taycan_eko_t_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll14 = {
+ .offset = 0xe000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_EKO_T],
+ .clkr = {
+ .enable_reg = 0x62040,
+ .enable_mask = BIT(14),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll14",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_taycan_eko_t_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gcc_gpll14_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gcc_gpll14_out_even = {
+ .offset = 0xe000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_gcc_gpll14_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_gcc_gpll14_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_EKO_T],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll14_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll14.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_taycan_eko_t_ops,
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll4 = {
+ .offset = 0x4000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_EKO_T],
+ .clkr = {
+ .enable_reg = 0x62040,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll4",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_taycan_eko_t_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll5 = {
+ .offset = 0x5000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_EKO_T],
+ .clkr = {
+ .enable_reg = 0x62040,
+ .enable_mask = BIT(5),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll5",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_taycan_eko_t_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll7 = {
+ .offset = 0x7000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_EKO_T],
+ .clkr = {
+ .enable_reg = 0x62040,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll7",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_taycan_eko_t_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll8 = {
+ .offset = 0x8000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_EKO_T],
+ .clkr = {
+ .enable_reg = 0x62040,
+ .enable_mask = BIT(8),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll8",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_taycan_eko_t_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll9 = {
+ .offset = 0x9000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_EKO_T],
+ .clkr = {
+ .enable_reg = 0x62040,
+ .enable_mask = BIT(9),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll9",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_taycan_eko_t_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb3_prim_phy_pipe_clk_src;
+static struct clk_regmap_mux gcc_usb3_sec_phy_pipe_clk_src;
+static struct clk_regmap_mux gcc_usb3_tert_phy_pipe_clk_src;
+
+static struct clk_rcg2 gcc_usb4_1_phy_pll_pipe_clk_src;
+
+static const struct parent_map gcc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL1_OUT_MAIN, 4 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll1.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_SLEEP_CLK, 5 },
+};
+
+static const struct clk_parent_data gcc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct parent_map gcc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL1_OUT_MAIN, 4 },
+ { P_GCC_GPLL4_OUT_MAIN, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll1.clkr.hw },
+ { .hw = &gcc_gpll4.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_SLEEP_CLK, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .index = DT_SLEEP_CLK },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data gcc_parent_data_5[] = {
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL4_OUT_MAIN, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_6[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll4.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_7[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL14_OUT_MAIN, 1 },
+ { P_GCC_GPLL14_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_7[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll14.clkr.hw },
+ { .hw = &gcc_gpll14_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_8[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL4_OUT_MAIN, 5 },
+};
+
+static const struct clk_parent_data gcc_parent_data_8[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll4.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_9[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL8_OUT_MAIN, 2 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_9[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll8.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_10[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL7_OUT_MAIN, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_10[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll7.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_11[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL7_OUT_MAIN, 2 },
+ { P_GCC_GPLL8_OUT_MAIN, 3 },
+ { P_SLEEP_CLK, 5 },
+};
+
+static const struct clk_parent_data gcc_parent_data_11[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll7.clkr.hw },
+ { .hw = &gcc_gpll8.clkr.hw },
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct parent_map gcc_parent_map_17[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL9_OUT_MAIN, 2 },
+ { P_GCC_GPLL4_OUT_MAIN, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_17[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll9.clkr.hw },
+ { .hw = &gcc_gpll4.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_18[] = {
+ { P_UFS_PHY_RX_SYMBOL_0_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_18[] = {
+ { .index = DT_UFS_PHY_RX_SYMBOL_0_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_19[] = {
+ { P_UFS_PHY_RX_SYMBOL_1_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_19[] = {
+ { .index = DT_UFS_PHY_RX_SYMBOL_1_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_20[] = {
+ { P_UFS_PHY_TX_SYMBOL_0_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_20[] = {
+ { .index = DT_UFS_PHY_TX_SYMBOL_0_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_21[] = {
+ { P_GCC_USB3_PRIM_PHY_PIPE_CLK_SRC, 0 },
+ { P_USB4_0_PHY_GCC_USB4RTR_MAX_PIPE_CLK, 1 },
+ { P_GCC_USB4_0_PHY_PIPEGMUX_CLK_SRC, 3 },
+};
+
+static const struct clk_parent_data gcc_parent_data_21[] = {
+ { .hw = &gcc_usb3_prim_phy_pipe_clk_src.clkr.hw },
+ { .index = DT_USB4_0_PHY_GCC_USB4RTR_MAX_PIPE_CLK },
+ { .index = DT_GCC_USB4_0_PHY_PIPEGMUX_CLK_SRC },
+};
+
+static const struct parent_map gcc_parent_map_22[] = {
+ { P_GCC_USB3_SEC_PHY_PIPE_CLK_SRC, 0 },
+ { P_USB4_1_PHY_GCC_USB4RTR_MAX_PIPE_CLK, 1 },
+ { P_GCC_USB4_1_PHY_PLL_PIPE_CLK_SRC, 2 },
+ { P_GCC_USB4_1_PHY_PIPEGMUX_CLK_SRC, 3 },
+};
+
+static const struct clk_parent_data gcc_parent_data_22[] = {
+ { .hw = &gcc_usb3_sec_phy_pipe_clk_src.clkr.hw },
+ { .index = DT_USB4_1_PHY_GCC_USB4RTR_MAX_PIPE_CLK },
+ { .hw = &gcc_usb4_1_phy_pll_pipe_clk_src.clkr.hw },
+ { .index = DT_GCC_USB4_1_PHY_PIPEGMUX_CLK_SRC },
+};
+
+static const struct parent_map gcc_parent_map_23[] = {
+ { P_GCC_USB3_TERT_PHY_PIPE_CLK_SRC, 0 },
+ { P_USB4_2_PHY_GCC_USB4RTR_MAX_PIPE_CLK, 1 },
+ { P_GCC_USB4_2_PHY_PIPEGMUX_CLK_SRC, 3 },
+};
+
+static const struct clk_parent_data gcc_parent_data_23[] = {
+ { .hw = &gcc_usb3_tert_phy_pipe_clk_src.clkr.hw },
+ { .index = DT_USB4_2_PHY_GCC_USB4RTR_MAX_PIPE_CLK },
+ { .index = DT_GCC_USB4_2_PHY_PIPEGMUX_CLK_SRC },
+};
+
+static const struct parent_map gcc_parent_map_24[] = {
+ { P_USB3_UNI_PHY_MP_GCC_USB30_PIPE_0_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_24[] = {
+ { .index = DT_USB3_UNI_PHY_MP_GCC_USB30_PIPE_0_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_25[] = {
+ { P_USB3_UNI_PHY_MP_GCC_USB30_PIPE_1_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_25[] = {
+ { .index = DT_USB3_UNI_PHY_MP_GCC_USB30_PIPE_1_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_26[] = {
+ { P_USB3_PHY_0_WRAPPER_GCC_USB30_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_26[] = {
+ { .index = DT_USB3_PHY_0_WRAPPER_GCC_USB30_PIPE_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_27[] = {
+ { P_USB3_PHY_1_WRAPPER_GCC_USB30_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_27[] = {
+ { .index = DT_USB3_PHY_1_WRAPPER_GCC_USB30_PIPE_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_28[] = {
+ { P_USB3_PHY_2_WRAPPER_GCC_USB30_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_28[] = {
+ { .index = DT_USB3_PHY_2_WRAPPER_GCC_USB30_PIPE_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_29[] = {
+ { P_GCC_USB4_0_PHY_DP0_GMUX_CLK_SRC, 0 },
+ { P_USB4_0_PHY_GCC_USB4RTR_MAX_PIPE_CLK, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_29[] = {
+ { .index = DT_GCC_USB4_0_PHY_DP0_GMUX_CLK_SRC },
+ { .index = DT_USB4_0_PHY_GCC_USB4RTR_MAX_PIPE_CLK },
+};
+
+static const struct parent_map gcc_parent_map_30[] = {
+ { P_GCC_USB4_0_PHY_DP1_GMUX_CLK_SRC, 0 },
+ { P_USB4_0_PHY_GCC_USB4RTR_MAX_PIPE_CLK, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_30[] = {
+ { .index = DT_GCC_USB4_0_PHY_DP1_GMUX_CLK_SRC },
+ { .index = DT_USB4_0_PHY_GCC_USB4RTR_MAX_PIPE_CLK },
+};
+
+static const struct parent_map gcc_parent_map_31[] = {
+ { P_USB4_0_PHY_GCC_USB4_PCIE_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_31[] = {
+ { .index = DT_USB4_0_PHY_GCC_USB4_PCIE_PIPE_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_32[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL7_OUT_MAIN, 2 },
+ { P_SLEEP_CLK, 5 },
+};
+
+static const struct clk_parent_data gcc_parent_data_32[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll7.clkr.hw },
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct parent_map gcc_parent_map_33[] = {
+ { P_GCC_USB4_0_PHY_PCIE_PIPEGMUX_CLK_SRC, 0 },
+ { P_USB4_0_PHY_GCC_USB4_PCIE_PIPE_CLK, 1 },
+};
+
+static const struct clk_parent_data gcc_parent_data_33[] = {
+ { .index = DT_GCC_USB4_0_PHY_PCIE_PIPEGMUX_CLK_SRC },
+ { .index = DT_USB4_0_PHY_GCC_USB4_PCIE_PIPE_CLK },
+};
+
+static const struct parent_map gcc_parent_map_34[] = {
+ { P_QUSB4PHY_0_GCC_USB4_RX0_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_34[] = {
+ { .index = DT_QUSB4PHY_0_GCC_USB4_RX0_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_35[] = {
+ { P_QUSB4PHY_0_GCC_USB4_RX1_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_35[] = {
+ { .index = DT_QUSB4PHY_0_GCC_USB4_RX1_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_36[] = {
+ { P_GCC_USB4_0_PHY_SYS_PIPEGMUX_CLK_SRC, 0 },
+ { P_USB4_0_PHY_GCC_USB4_PCIE_PIPE_CLK, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_36[] = {
+ { .index = DT_GCC_USB4_0_PHY_SYS_PIPEGMUX_CLK_SRC },
+ { .index = DT_USB4_0_PHY_GCC_USB4_PCIE_PIPE_CLK },
+};
+
+static const struct parent_map gcc_parent_map_37[] = {
+ { P_GCC_USB4_1_PHY_DP0_GMUX_2_CLK_SRC, 0 },
+ { P_USB4_1_PHY_GCC_USB4RTR_MAX_PIPE_CLK, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_37[] = {
+ { .index = DT_GCC_USB4_1_PHY_DP0_GMUX_2_CLK_SRC },
+ { .index = DT_USB4_1_PHY_GCC_USB4RTR_MAX_PIPE_CLK },
+};
+
+static const struct parent_map gcc_parent_map_38[] = {
+ { P_GCC_USB4_1_PHY_DP1_GMUX_2_CLK_SRC, 0 },
+ { P_USB4_1_PHY_GCC_USB4RTR_MAX_PIPE_CLK, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_38[] = {
+ { .index = DT_GCC_USB4_1_PHY_DP1_GMUX_2_CLK_SRC },
+ { .index = DT_USB4_1_PHY_GCC_USB4RTR_MAX_PIPE_CLK },
+};
+
+static const struct parent_map gcc_parent_map_39[] = {
+ { P_USB4_1_PHY_GCC_USB4_PCIE_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_39[] = {
+ { .index = DT_USB4_1_PHY_GCC_USB4_PCIE_PIPE_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_40[] = {
+ { P_GCC_USB4_1_PHY_PCIE_PIPEGMUX_CLK_SRC, 0 },
+ { P_USB4_1_PHY_GCC_USB4_PCIE_PIPE_CLK, 1 },
+};
+
+static const struct clk_parent_data gcc_parent_data_40[] = {
+ { .index = DT_GCC_USB4_1_PHY_PCIE_PIPEGMUX_CLK_SRC },
+ { .index = DT_USB4_1_PHY_GCC_USB4_PCIE_PIPE_CLK },
+};
+
+static const struct parent_map gcc_parent_map_41[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL5_OUT_MAIN, 3 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_41[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll5.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_42[] = {
+ { P_QUSB4PHY_1_GCC_USB4_RX0_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_42[] = {
+ { .index = DT_QUSB4PHY_1_GCC_USB4_RX0_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_43[] = {
+ { P_QUSB4PHY_1_GCC_USB4_RX1_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_43[] = {
+ { .index = DT_QUSB4PHY_1_GCC_USB4_RX1_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_44[] = {
+ { P_GCC_USB4_1_PHY_SYS_PIPEGMUX_CLK_SRC, 0 },
+ { P_USB4_1_PHY_GCC_USB4_PCIE_PIPE_CLK, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_44[] = {
+ { .index = DT_GCC_USB4_1_PHY_SYS_PIPEGMUX_CLK_SRC },
+ { .index = DT_USB4_1_PHY_GCC_USB4_PCIE_PIPE_CLK },
+};
+
+static const struct parent_map gcc_parent_map_45[] = {
+ { P_GCC_USB4_2_PHY_DP0_GMUX_CLK_SRC, 0 },
+ { P_USB4_2_PHY_GCC_USB4RTR_MAX_PIPE_CLK, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_45[] = {
+ { .index = DT_GCC_USB4_2_PHY_DP0_GMUX_CLK_SRC },
+ { .index = DT_USB4_2_PHY_GCC_USB4RTR_MAX_PIPE_CLK },
+};
+
+static const struct parent_map gcc_parent_map_46[] = {
+ { P_GCC_USB4_2_PHY_DP1_GMUX_CLK_SRC, 0 },
+ { P_USB4_2_PHY_GCC_USB4RTR_MAX_PIPE_CLK, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_46[] = {
+ { .index = DT_GCC_USB4_2_PHY_DP1_GMUX_CLK_SRC },
+ { .index = DT_USB4_2_PHY_GCC_USB4RTR_MAX_PIPE_CLK },
+};
+
+static const struct parent_map gcc_parent_map_47[] = {
+ { P_USB4_2_PHY_GCC_USB4_PCIE_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_47[] = {
+ { .index = DT_USB4_2_PHY_GCC_USB4_PCIE_PIPE_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_48[] = {
+ { P_GCC_USB4_2_PHY_PCIE_PIPEGMUX_CLK_SRC, 0 },
+ { P_USB4_2_PHY_GCC_USB4_PCIE_PIPE_CLK, 1 },
+};
+
+static const struct clk_parent_data gcc_parent_data_48[] = {
+ { .index = DT_GCC_USB4_2_PHY_PCIE_PIPEGMUX_CLK_SRC },
+ { .index = DT_USB4_2_PHY_GCC_USB4_PCIE_PIPE_CLK },
+};
+
+static const struct parent_map gcc_parent_map_49[] = {
+ { P_QUSB4PHY_2_GCC_USB4_RX0_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_49[] = {
+ { .index = DT_QUSB4PHY_2_GCC_USB4_RX0_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_50[] = {
+ { P_QUSB4PHY_2_GCC_USB4_RX1_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_50[] = {
+ { .index = DT_QUSB4PHY_2_GCC_USB4_RX1_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_51[] = {
+ { P_GCC_USB4_2_PHY_SYS_PIPEGMUX_CLK_SRC, 0 },
+ { P_USB4_2_PHY_GCC_USB4_PCIE_PIPE_CLK, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_51[] = {
+ { .index = DT_GCC_USB4_2_PHY_SYS_PIPEGMUX_CLK_SRC },
+ { .index = DT_USB4_2_PHY_GCC_USB4_PCIE_PIPE_CLK },
+};
+
+static struct clk_regmap_phy_mux gcc_pcie_3a_pipe_clk_src = {
+ .reg = 0xdc088,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3a_pipe_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_PCIE_3A_PIPE_CLK,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_pcie_3b_pipe_clk_src = {
+ .reg = 0x941b4,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3b_pipe_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_PCIE_3B_PIPE_CLK,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_pcie_4_pipe_clk_src = {
+ .reg = 0x881a4,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_4_pipe_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_PCIE_4_PIPE_CLK,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_pcie_5_pipe_clk_src = {
+ .reg = 0xc309c,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_5_pipe_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_PCIE_5_PIPE_CLK,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_pcie_6_pipe_clk_src = {
+ .reg = 0x8a1a4,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_6_pipe_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_PCIE_6_PIPE_CLK,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_phy_rx_symbol_0_clk_src = {
+ .reg = 0x7706c,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_18,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_rx_symbol_0_clk_src",
+ .parent_data = gcc_parent_data_18,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_18),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_phy_rx_symbol_1_clk_src = {
+ .reg = 0x770f0,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_19,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_rx_symbol_1_clk_src",
+ .parent_data = gcc_parent_data_19,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_19),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_phy_tx_symbol_0_clk_src = {
+ .reg = 0x7705c,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_20,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_tx_symbol_0_clk_src",
+ .parent_data = gcc_parent_data_20,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_20),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb34_prim_phy_pipe_clk_src = {
+ .reg = 0x2b0b8,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_21,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb34_prim_phy_pipe_clk_src",
+ .parent_data = gcc_parent_data_21,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_21),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb34_sec_phy_pipe_clk_src = {
+ .reg = 0x2d0c4,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_22,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb34_sec_phy_pipe_clk_src",
+ .parent_data = gcc_parent_data_22,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_22),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb34_tert_phy_pipe_clk_src = {
+ .reg = 0xe00bc,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_23,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb34_tert_phy_pipe_clk_src",
+ .parent_data = gcc_parent_data_23,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_23),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb3_mp_phy_pipe_0_clk_src = {
+ .reg = 0x9a07c,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_24,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_mp_phy_pipe_0_clk_src",
+ .parent_data = gcc_parent_data_24,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_24),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb3_mp_phy_pipe_1_clk_src = {
+ .reg = 0x9a084,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_25,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_mp_phy_pipe_1_clk_src",
+ .parent_data = gcc_parent_data_25,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_25),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb3_prim_phy_pipe_clk_src = {
+ .reg = 0x3f08c,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_26,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_pipe_clk_src",
+ .parent_data = gcc_parent_data_26,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_26),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb3_sec_phy_pipe_clk_src = {
+ .reg = 0xe207c,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_27,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_sec_phy_pipe_clk_src",
+ .parent_data = gcc_parent_data_27,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_27),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb3_tert_phy_pipe_clk_src = {
+ .reg = 0xe107c,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_28,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_tert_phy_pipe_clk_src",
+ .parent_data = gcc_parent_data_28,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_28),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_0_phy_dp0_clk_src = {
+ .reg = 0x2b080,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_29,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_phy_dp0_clk_src",
+ .parent_data = gcc_parent_data_29,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_29),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_0_phy_dp1_clk_src = {
+ .reg = 0x2b134,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_30,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_phy_dp1_clk_src",
+ .parent_data = gcc_parent_data_30,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_30),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_0_phy_p2rr2p_pipe_clk_src = {
+ .reg = 0x2b0f0,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_31,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_phy_p2rr2p_pipe_clk_src",
+ .parent_data = gcc_parent_data_31,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_31),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_0_phy_pcie_pipe_mux_clk_src = {
+ .reg = 0x2b120,
+ .shift = 0,
+ .width = 1,
+ .parent_map = gcc_parent_map_33,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_phy_pcie_pipe_mux_clk_src",
+ .parent_data = gcc_parent_data_33,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_33),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_0_phy_rx0_clk_src = {
+ .reg = 0x2b0c0,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_34,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_phy_rx0_clk_src",
+ .parent_data = gcc_parent_data_34,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_34),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_0_phy_rx1_clk_src = {
+ .reg = 0x2b0d4,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_35,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_phy_rx1_clk_src",
+ .parent_data = gcc_parent_data_35,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_35),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_0_phy_sys_clk_src = {
+ .reg = 0x2b100,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_36,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_phy_sys_clk_src",
+ .parent_data = gcc_parent_data_36,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_36),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_1_phy_dp0_clk_src = {
+ .reg = 0x2d08c,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_37,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_dp0_clk_src",
+ .parent_data = gcc_parent_data_37,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_37),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_1_phy_dp1_clk_src = {
+ .reg = 0x2d154,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_38,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_dp1_clk_src",
+ .parent_data = gcc_parent_data_38,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_38),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_1_phy_p2rr2p_pipe_clk_src = {
+ .reg = 0x2d114,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_39,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_p2rr2p_pipe_clk_src",
+ .parent_data = gcc_parent_data_39,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_39),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_1_phy_pcie_pipe_mux_clk_src = {
+ .reg = 0x2d140,
+ .shift = 0,
+ .width = 1,
+ .parent_map = gcc_parent_map_40,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_pcie_pipe_mux_clk_src",
+ .parent_data = gcc_parent_data_40,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_40),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_1_phy_rx0_clk_src = {
+ .reg = 0x2d0e4,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_42,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_rx0_clk_src",
+ .parent_data = gcc_parent_data_42,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_42),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_1_phy_rx1_clk_src = {
+ .reg = 0x2d0f8,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_43,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_rx1_clk_src",
+ .parent_data = gcc_parent_data_43,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_43),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_1_phy_sys_clk_src = {
+ .reg = 0x2d124,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_44,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_sys_clk_src",
+ .parent_data = gcc_parent_data_44,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_44),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_2_phy_dp0_clk_src = {
+ .reg = 0xe0084,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_45,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_phy_dp0_clk_src",
+ .parent_data = gcc_parent_data_45,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_45),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_2_phy_dp1_clk_src = {
+ .reg = 0xe013c,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_46,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_phy_dp1_clk_src",
+ .parent_data = gcc_parent_data_46,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_46),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_2_phy_p2rr2p_pipe_clk_src = {
+ .reg = 0xe00f4,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_47,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_phy_p2rr2p_pipe_clk_src",
+ .parent_data = gcc_parent_data_47,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_47),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_2_phy_pcie_pipe_mux_clk_src = {
+ .reg = 0xe0124,
+ .shift = 0,
+ .width = 1,
+ .parent_map = gcc_parent_map_48,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_phy_pcie_pipe_mux_clk_src",
+ .parent_data = gcc_parent_data_48,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_48),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_2_phy_rx0_clk_src = {
+ .reg = 0xe00c4,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_49,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_phy_rx0_clk_src",
+ .parent_data = gcc_parent_data_49,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_49),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_2_phy_rx1_clk_src = {
+ .reg = 0xe00d8,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_50,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_phy_rx1_clk_src",
+ .parent_data = gcc_parent_data_50,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_50),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_2_phy_sys_clk_src = {
+ .reg = 0xe0104,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_51,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_phy_sys_clk_src",
+ .parent_data = gcc_parent_data_51,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_51),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
+ F(50000000, P_GCC_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GCC_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_gp1_clk_src = {
+ .cmd_rcgr = 0x64004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp1_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp2_clk_src = {
+ .cmd_rcgr = 0x92004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp2_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp3_clk_src = {
+ .cmd_rcgr = 0x93004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp3_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_0_aux_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_0_aux_clk_src = {
+ .cmd_rcgr = 0xc8168,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_0_phy_rchng_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_0_phy_rchng_clk_src = {
+ .cmd_rcgr = 0xc803c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_1_aux_clk_src = {
+ .cmd_rcgr = 0x2e168,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_1_phy_rchng_clk_src = {
+ .cmd_rcgr = 0x2e03c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_2_aux_clk_src = {
+ .cmd_rcgr = 0xc0168,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_2_phy_rchng_clk_src = {
+ .cmd_rcgr = 0xc003c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_3a_aux_clk_src = {
+ .cmd_rcgr = 0xdc08c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3a_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_3a_phy_rchng_clk_src = {
+ .cmd_rcgr = 0xdc070,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3a_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_3b_aux_clk_src = {
+ .cmd_rcgr = 0x941b8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3b_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_3b_phy_rchng_clk_src = {
+ .cmd_rcgr = 0x94088,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3b_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_4_aux_clk_src = {
+ .cmd_rcgr = 0x881a8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_4_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_4_phy_rchng_clk_src = {
+ .cmd_rcgr = 0x88078,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_4_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_5_aux_clk_src = {
+ .cmd_rcgr = 0xc30a0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_5_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_5_phy_rchng_clk_src = {
+ .cmd_rcgr = 0xc3084,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_5_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_6_aux_clk_src = {
+ .cmd_rcgr = 0x8a1a8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_6_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_6_phy_rchng_clk_src = {
+ .cmd_rcgr = 0x8a078,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_6_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_phy_3a_aux_clk_src = {
+ .cmd_rcgr = 0x6c01c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_phy_3a_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_phy_3b_aux_clk_src = {
+ .cmd_rcgr = 0x7501c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_phy_3b_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_phy_4_aux_clk_src = {
+ .cmd_rcgr = 0xd3018,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_phy_4_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_phy_5_aux_clk_src = {
+ .cmd_rcgr = 0xd2018,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_phy_5_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_phy_6_aux_clk_src = {
+ .cmd_rcgr = 0xd4018,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_phy_6_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pdm2_clk_src[] = {
+ F(60000000, P_GCC_GPLL0_OUT_MAIN, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pdm2_clk_src = {
+ .cmd_rcgr = 0x33010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pdm2_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_oob_qspi_s0_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(120000000, P_GCC_GPLL0_OUT_MAIN, 5, 0, 0),
+ F(150000000, P_GCC_GPLL0_OUT_EVEN, 2, 0, 0),
+ F(200000000, P_GCC_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(403000000, P_GCC_GPLL4_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_oob_qspi_s0_clk_src_init = {
+ .name = "gcc_qupv3_oob_qspi_s0_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_oob_qspi_s0_clk_src = {
+ .cmd_rcgr = 0xe7044,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_qupv3_oob_qspi_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_oob_qspi_s0_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_oob_qspi_s1_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(120000000, P_GCC_GPLL0_OUT_MAIN, 5, 0, 0),
+ F(150000000, P_GCC_GPLL0_OUT_EVEN, 2, 0, 0),
+ F(200000000, P_GCC_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_oob_qspi_s1_clk_src_init = {
+ .name = "gcc_qupv3_oob_qspi_s1_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_oob_qspi_s1_clk_src = {
+ .cmd_rcgr = 0xe7170,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_oob_qspi_s1_clk_src,
+ .clkr.hw.init = &gcc_qupv3_oob_qspi_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_qspi_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_qspi_s2_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_qspi_s2_clk_src = {
+ .cmd_rcgr = 0x287a0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_oob_qspi_s1_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_qspi_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_qspi_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_qspi_s3_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_qspi_s3_clk_src = {
+ .cmd_rcgr = 0x288d0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_oob_qspi_s1_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_qspi_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_qspi_s6_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_qspi_s6_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_qspi_s6_clk_src = {
+ .cmd_rcgr = 0x2866c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_oob_qspi_s1_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_qspi_s6_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(120000000, P_GCC_GPLL0_OUT_MAIN, 5, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s0_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
+ .cmd_rcgr = 0x28014,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s1_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
+ .cmd_rcgr = 0x28150,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s1_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s4_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s4_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
+ .cmd_rcgr = 0x282b4,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s4_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s5_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
+ .cmd_rcgr = 0x283f0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s4_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s7_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s7_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s7_clk_src = {
+ .cmd_rcgr = 0x28540,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s4_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s7_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_qspi_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_qspi_s2_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_qspi_s2_clk_src = {
+ .cmd_rcgr = 0xb37a0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_oob_qspi_s1_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_qspi_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_qspi_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_qspi_s3_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_qspi_s3_clk_src = {
+ .cmd_rcgr = 0xb38d0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_oob_qspi_s1_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_qspi_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_qspi_s6_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_qspi_s6_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_qspi_s6_clk_src = {
+ .cmd_rcgr = 0xb366c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_oob_qspi_s1_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_qspi_s6_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s0_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
+ .cmd_rcgr = 0xb3014,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s1_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
+ .cmd_rcgr = 0xb3150,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s4_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
+ .cmd_rcgr = 0xb32b4,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s4_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s5_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
+ .cmd_rcgr = 0xb33f0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s4_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s7_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s7_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s7_clk_src = {
+ .cmd_rcgr = 0xb3540,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s4_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s7_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_qspi_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_qspi_s2_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_qspi_s2_clk_src = {
+ .cmd_rcgr = 0xb47a0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_oob_qspi_s1_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_qspi_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_qspi_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_qspi_s3_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_qspi_s3_clk_src = {
+ .cmd_rcgr = 0xb48d0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_oob_qspi_s1_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_qspi_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_qspi_s6_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_qspi_s6_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_qspi_s6_clk_src = {
+ .cmd_rcgr = 0xb466c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_oob_qspi_s1_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_qspi_s6_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s0_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s0_clk_src = {
+ .cmd_rcgr = 0xb4014,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s1_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s1_clk_src = {
+ .cmd_rcgr = 0xb4150,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s4_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s4_clk_src = {
+ .cmd_rcgr = 0xb42b4,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s4_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s5_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s5_clk_src = {
+ .cmd_rcgr = 0xb43f0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s4_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s7_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s7_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s7_clk_src = {
+ .cmd_rcgr = 0xb4540,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s4_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s7_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GCC_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(202000000, P_GCC_GPLL9_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
+ .cmd_rcgr = 0xb001c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_17,
+ .freq_tbl = ftbl_gcc_sdcc2_apps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc2_apps_clk_src",
+ .parent_data = gcc_parent_data_17,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_17),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc4_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(75000000, P_GCC_GPLL0_OUT_MAIN, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc4_apps_clk_src = {
+ .cmd_rcgr = 0xdf01c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_sdcc4_apps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc4_apps_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_axi_clk_src[] = {
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(201500000, P_GCC_GPLL4_OUT_MAIN, 4, 0, 0),
+ F(403000000, P_GCC_GPLL4_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_axi_clk_src = {
+ .cmd_rcgr = 0x77038,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_6,
+ .freq_tbl = ftbl_gcc_ufs_phy_axi_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_axi_clk_src",
+ .parent_data = gcc_parent_data_6,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_6),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_ice_core_clk_src[] = {
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(201500000, P_GCC_GPLL4_OUT_MAIN, 4, 0, 0),
+ F(403000000, P_GCC_GPLL4_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_ice_core_clk_src = {
+ .cmd_rcgr = 0x77090,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_6,
+ .freq_tbl = ftbl_gcc_ufs_phy_ice_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_ice_core_clk_src",
+ .parent_data = gcc_parent_data_6,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_6),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_phy_phy_aux_clk_src = {
+ .cmd_rcgr = 0x770c4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_phy_unipro_core_clk_src = {
+ .cmd_rcgr = 0x770a8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_6,
+ .freq_tbl = ftbl_gcc_ufs_phy_ice_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_unipro_core_clk_src",
+ .parent_data = gcc_parent_data_6,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_6),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb20_master_clk_src[] = {
+ F(60000000, P_GCC_GPLL14_OUT_MAIN, 10, 0, 0),
+ F(120000000, P_GCC_GPLL14_OUT_MAIN, 5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb20_master_clk_src = {
+ .cmd_rcgr = 0xbc030,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_7,
+ .freq_tbl = ftbl_gcc_usb20_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb20_master_clk_src",
+ .parent_data = gcc_parent_data_7,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_7),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb20_mock_utmi_clk_src = {
+ .cmd_rcgr = 0xbc048,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_7,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb20_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_7,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_7),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_mp_master_clk_src[] = {
+ F(66666667, P_GCC_GPLL0_OUT_EVEN, 4.5, 0, 0),
+ F(133333333, P_GCC_GPLL0_OUT_MAIN, 4.5, 0, 0),
+ F(200000000, P_GCC_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(240000000, P_GCC_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_mp_master_clk_src = {
+ .cmd_rcgr = 0x9a03c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_mp_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_mp_master_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_mp_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x9a054,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_mp_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_prim_master_clk_src = {
+ .cmd_rcgr = 0x3f04c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_mp_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_master_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x3f064,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_sec_master_clk_src = {
+ .cmd_rcgr = 0xe203c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_mp_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_sec_master_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_sec_mock_utmi_clk_src = {
+ .cmd_rcgr = 0xe2054,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_sec_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_tert_master_clk_src = {
+ .cmd_rcgr = 0xe103c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_mp_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_tert_master_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_tert_mock_utmi_clk_src = {
+ .cmd_rcgr = 0xe1054,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_tert_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_mp_phy_aux_clk_src = {
+ .cmd_rcgr = 0x9a088,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_mp_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_8),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_prim_phy_aux_clk_src = {
+ .cmd_rcgr = 0x3f090,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_8),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_sec_phy_aux_clk_src = {
+ .cmd_rcgr = 0xe2080,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_sec_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_8),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_tert_phy_aux_clk_src = {
+ .cmd_rcgr = 0xe1080,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_tert_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_8),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb4_0_master_clk_src[] = {
+ F(177666750, P_GCC_GPLL8_OUT_MAIN, 4, 0, 0),
+ F(355333500, P_GCC_GPLL8_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb4_0_master_clk_src = {
+ .cmd_rcgr = 0x2b02c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_9,
+ .freq_tbl = ftbl_gcc_usb4_0_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_master_clk_src",
+ .parent_data = gcc_parent_data_9,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_9),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb4_0_phy_pcie_pipe_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(125000000, P_GCC_GPLL7_OUT_MAIN, 4, 0, 0),
+ F(250000000, P_GCC_GPLL7_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb4_0_phy_pcie_pipe_clk_src = {
+ .cmd_rcgr = 0x2b104,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_32,
+ .freq_tbl = ftbl_gcc_usb4_0_phy_pcie_pipe_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_phy_pcie_pipe_clk_src",
+ .parent_data = gcc_parent_data_32,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_32),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb4_0_sb_if_clk_src = {
+ .cmd_rcgr = 0x2b0a0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_sb_if_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb4_0_tmu_clk_src = {
+ .cmd_rcgr = 0x2b084,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_10,
+ .freq_tbl = ftbl_gcc_usb4_0_phy_pcie_pipe_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_tmu_clk_src",
+ .parent_data = gcc_parent_data_10,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_10),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb4_1_master_clk_src = {
+ .cmd_rcgr = 0x2d02c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_9,
+ .freq_tbl = ftbl_gcc_usb4_0_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_master_clk_src",
+ .parent_data = gcc_parent_data_9,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_9),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb4_1_phy_pcie_pipe_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(177666750, P_GCC_GPLL8_OUT_MAIN, 4, 0, 0),
+ F(355333500, P_GCC_GPLL8_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb4_1_phy_pcie_pipe_clk_src = {
+ .cmd_rcgr = 0x2d128,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_11,
+ .freq_tbl = ftbl_gcc_usb4_1_phy_pcie_pipe_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_pcie_pipe_clk_src",
+ .parent_data = gcc_parent_data_11,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_11),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb4_1_phy_pll_pipe_clk_src[] = {
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(311000000, P_GCC_GPLL5_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb4_1_phy_pll_pipe_clk_src = {
+ .cmd_rcgr = 0x2d0c8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_41,
+ .freq_tbl = ftbl_gcc_usb4_1_phy_pll_pipe_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_pll_pipe_clk_src",
+ .parent_data = gcc_parent_data_41,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_41),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb4_1_sb_if_clk_src = {
+ .cmd_rcgr = 0x2d0ac,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_sb_if_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb4_1_tmu_clk_src = {
+ .cmd_rcgr = 0x2d090,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_10,
+ .freq_tbl = ftbl_gcc_usb4_0_phy_pcie_pipe_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_tmu_clk_src",
+ .parent_data = gcc_parent_data_10,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_10),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb4_2_master_clk_src = {
+ .cmd_rcgr = 0xe002c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_9,
+ .freq_tbl = ftbl_gcc_usb4_0_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_master_clk_src",
+ .parent_data = gcc_parent_data_9,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_9),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb4_2_phy_pcie_pipe_clk_src = {
+ .cmd_rcgr = 0xe0108,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_11,
+ .freq_tbl = ftbl_gcc_usb4_0_phy_pcie_pipe_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_phy_pcie_pipe_clk_src",
+ .parent_data = gcc_parent_data_11,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_11),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb4_2_sb_if_clk_src = {
+ .cmd_rcgr = 0xe00a4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_sb_if_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb4_2_tmu_clk_src = {
+ .cmd_rcgr = 0xe0088,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_10,
+ .freq_tbl = ftbl_gcc_usb4_0_phy_pcie_pipe_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_tmu_clk_src",
+ .parent_data = gcc_parent_data_10,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_10),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_pcie_3b_pipe_div_clk_src = {
+ .reg = 0x94070,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3b_pipe_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_3b_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_pcie_4_pipe_div_clk_src = {
+ .reg = 0x88060,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_4_pipe_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_4_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_pcie_5_pipe_div_clk_src = {
+ .reg = 0xc306c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_5_pipe_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_5_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_pcie_6_pipe_div_clk_src = {
+ .reg = 0x8a060,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_6_pipe_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_6_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_qupv3_oob_s0_clk_src = {
+ .reg = 0xe7024,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_oob_s0_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_oob_qspi_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_qupv3_oob_s1_clk_src = {
+ .reg = 0xe7038,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_oob_s1_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_oob_qspi_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_qupv3_wrap0_s2_clk_src = {
+ .reg = 0x2828c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s2_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_qspi_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_qupv3_wrap0_s3_clk_src = {
+ .reg = 0x282a0,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s3_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_qspi_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_qupv3_wrap0_s6_clk_src = {
+ .reg = 0x2852c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s6_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_qspi_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_qupv3_wrap1_s2_clk_src = {
+ .reg = 0xb328c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s2_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_qspi_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_qupv3_wrap1_s3_clk_src = {
+ .reg = 0xb32a0,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s3_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_qspi_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_qupv3_wrap1_s6_clk_src = {
+ .reg = 0xb352c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s6_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_qspi_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_qupv3_wrap2_s2_clk_src = {
+ .reg = 0xb428c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s2_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_qspi_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_qupv3_wrap2_s3_clk_src = {
+ .reg = 0xb42a0,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s3_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_qspi_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_qupv3_wrap2_s6_clk_src = {
+ .reg = 0xb452c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s6_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_qspi_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb20_mock_utmi_postdiv_clk_src = {
+ .reg = 0xbc174,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb20_mock_utmi_postdiv_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb20_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb30_mp_mock_utmi_postdiv_clk_src = {
+ .reg = 0x9a06c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_mp_mock_utmi_postdiv_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_mp_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb30_prim_mock_utmi_postdiv_clk_src = {
+ .reg = 0x3f07c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_postdiv_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb30_sec_mock_utmi_postdiv_clk_src = {
+ .reg = 0xe206c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_sec_mock_utmi_postdiv_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_sec_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb30_tert_mock_utmi_postdiv_clk_src = {
+ .reg = 0xe106c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_tert_mock_utmi_postdiv_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_tert_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch gcc_aggre_noc_pcie_3a_west_sf_axi_clk = {
+ .halt_reg = 0xdc0bc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(27),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_noc_pcie_3a_west_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_noc_pcie_3b_west_sf_axi_clk = {
+ .halt_reg = 0x941ec,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(28),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_noc_pcie_3b_west_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_noc_pcie_4_west_sf_axi_clk = {
+ .halt_reg = 0x881d0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(29),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_noc_pcie_4_west_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_noc_pcie_5_east_sf_axi_clk = {
+ .halt_reg = 0xc30d0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(30),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_noc_pcie_5_east_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_noc_pcie_6_west_sf_axi_clk = {
+ .halt_reg = 0x8a1d0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(31),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_noc_pcie_6_west_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_ufs_phy_axi_clk = {
+ .halt_reg = 0x77000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77000,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_ufs_phy_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb2_prim_axi_clk = {
+ .halt_reg = 0xbc17c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xbc17c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xbc17c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_usb2_prim_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb20_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_mp_axi_clk = {
+ .halt_reg = 0x9a004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x9a004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x9a004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_usb3_mp_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_mp_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_prim_axi_clk = {
+ .halt_reg = 0x3f00c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x3f00c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3f00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_usb3_prim_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_sec_axi_clk = {
+ .halt_reg = 0xe2004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xe2004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xe2004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_usb3_sec_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_sec_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_tert_axi_clk = {
+ .halt_reg = 0xe1004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xe1004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xe1004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_usb3_tert_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_tert_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb4_0_axi_clk = {
+ .halt_reg = 0x2b000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2b000,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2b000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_usb4_0_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_0_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb4_1_axi_clk = {
+ .halt_reg = 0x2d000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2d000,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2d000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_usb4_1_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_1_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb4_2_axi_clk = {
+ .halt_reg = 0xe0000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xe0000,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xe0000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_usb4_2_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_2_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_av1e_ahb_clk = {
+ .halt_reg = 0x9b02c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x9b02c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x9b02c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_av1e_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_av1e_axi_clk = {
+ .halt_reg = 0x9b030,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x9b030,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x9b030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_av1e_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_av1e_xo_clk = {
+ .halt_reg = 0x9b044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9b044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_av1e_xo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x34038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x34038,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(27),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_boot_rom_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_hf_axi_clk = {
+ .halt_reg = 0x26014,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x26014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x26014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_camera_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_sf_axi_clk = {
+ .halt_reg = 0x26028,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x26028,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x26028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_camera_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_pcie_anoc_ahb_clk = {
+ .halt_reg = 0x82004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x82004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(19),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_pcie_anoc_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_pcie_anoc_south_ahb_clk = {
+ .halt_reg = 0xba2ec,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba2ec,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_pcie_anoc_south_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb2_prim_axi_clk = {
+ .halt_reg = 0xbc178,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xbc178,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xbc178,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_usb2_prim_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb20_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_mp_axi_clk = {
+ .halt_reg = 0x9a000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x9a000,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x9a000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_usb3_mp_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_mp_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_prim_axi_clk = {
+ .halt_reg = 0x3f000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x3f000,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3f000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_usb3_prim_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_sec_axi_clk = {
+ .halt_reg = 0xe2000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xe2000,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xe2000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_usb3_sec_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_sec_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_tert_axi_clk = {
+ .halt_reg = 0xe1000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xe1000,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xe1000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_usb3_tert_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_tert_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb_anoc_ahb_clk = {
+ .halt_reg = 0x3f004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x3f004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(17),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_usb_anoc_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb_anoc_south_ahb_clk = {
+ .halt_reg = 0x3f008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x3f008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(18),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_usb_anoc_south_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_hf_axi_clk = {
+ .halt_reg = 0x27008,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x27008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_disp_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ .flags = CLK_IS_CRITICAL,
+ },
+ },
+};
+
+static struct clk_branch gcc_eva_ahb_clk = {
+ .halt_reg = 0x9b004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x9b004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x9b004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_eva_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_eva_axi0_clk = {
+ .halt_reg = 0x9b008,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x9b008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x9b008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_eva_axi0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_eva_axi0c_clk = {
+ .halt_reg = 0x9b01c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x9b01c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x9b01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_eva_axi0c_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_eva_xo_clk = {
+ .halt_reg = 0x9b024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9b024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_eva_xo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x64000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x64000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x92000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x92000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x93000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x93000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gemnoc_gfx_clk = {
+ .halt_reg = 0x71010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x71010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x71010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_gemnoc_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_clk_src = {
+ .halt_reg = 0x71024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x71024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_gpll0_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_div_clk_src = {
+ .halt_reg = 0x7102c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7102c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62038,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_gpll0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll0_out_even.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_aux_clk = {
+ .halt_reg = 0xc8018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(25),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_cfg_ahb_clk = {
+ .halt_reg = 0xba4a8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba4a8,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(24),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_mstr_axi_clk = {
+ .halt_reg = 0xba498,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0xba498,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(23),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_phy_rchng_clk = {
+ .halt_reg = 0xc8038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(27),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_phy_rchng_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_pipe_clk = {
+ .halt_reg = 0xc8028,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(26),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_0_phy_pcie_pipe_mux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_axi_clk = {
+ .halt_reg = 0xba488,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba488,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(22),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_q2a_axi_clk = {
+ .halt_reg = 0xba484,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(21),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_aux_clk = {
+ .halt_reg = 0x2e018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(18),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_1_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_cfg_ahb_clk = {
+ .halt_reg = 0xba480,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba480,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(17),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_mstr_axi_clk = {
+ .halt_reg = 0xba470,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0xba470,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_phy_rchng_clk = {
+ .halt_reg = 0x2e038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(20),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_phy_rchng_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_1_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_pipe_clk = {
+ .halt_reg = 0x2e028,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(19),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_1_phy_pcie_pipe_mux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_slv_axi_clk = {
+ .halt_reg = 0xba460,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba460,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_slv_q2a_axi_clk = {
+ .halt_reg = 0xba45c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(14),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_aux_clk = {
+ .halt_reg = 0xc0018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_2_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_cfg_ahb_clk = {
+ .halt_reg = 0xba4d0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba4d0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(31),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_mstr_axi_clk = {
+ .halt_reg = 0xba4c0,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0xba4c0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(30),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_phy_rchng_clk = {
+ .halt_reg = 0xc0038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(2),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2_phy_rchng_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_2_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_pipe_clk = {
+ .halt_reg = 0xc0028,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_2_phy_pcie_pipe_mux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_slv_axi_clk = {
+ .halt_reg = 0xba4b0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba4b0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(29),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_slv_q2a_axi_clk = {
+ .halt_reg = 0xba4ac,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(28),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3a_aux_clk = {
+ .halt_reg = 0xdc04c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xdc04c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3a_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_3a_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3a_cfg_ahb_clk = {
+ .halt_reg = 0xba4f0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba4f0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3a_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3a_mstr_axi_clk = {
+ .halt_reg = 0xdc038,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0xdc038,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(14),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3a_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3a_phy_rchng_clk = {
+ .halt_reg = 0xdc06c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xdc06c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(18),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3a_phy_rchng_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_3a_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3a_pipe_clk = {
+ .halt_reg = 0xdc05c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0xdc05c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(17),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3a_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_3a_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3a_slv_axi_clk = {
+ .halt_reg = 0xdc024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xdc024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(13),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3a_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3a_slv_q2a_axi_clk = {
+ .halt_reg = 0xdc01c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xdc01c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(12),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3a_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3b_aux_clk = {
+ .halt_reg = 0x94050,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(25),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3b_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_3b_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3b_cfg_ahb_clk = {
+ .halt_reg = 0xba4f4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba4f4,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(24),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3b_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3b_mstr_axi_clk = {
+ .halt_reg = 0x94038,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x94038,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(23),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3b_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3b_phy_rchng_clk = {
+ .halt_reg = 0x94084,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(28),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3b_phy_rchng_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_3b_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3b_pipe_clk = {
+ .halt_reg = 0x94060,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(26),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3b_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_3b_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3b_pipe_div2_clk = {
+ .halt_reg = 0x94074,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(27),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3b_pipe_div2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_3b_pipe_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3b_slv_axi_clk = {
+ .halt_reg = 0x94024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x94024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(22),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3b_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3b_slv_q2a_axi_clk = {
+ .halt_reg = 0x9401c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(21),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3b_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_4_aux_clk = {
+ .halt_reg = 0x88040,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(17),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_4_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_4_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_4_cfg_ahb_clk = {
+ .halt_reg = 0xba4fc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba4fc,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_4_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_4_mstr_axi_clk = {
+ .halt_reg = 0x88030,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x88030,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_4_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_4_phy_rchng_clk = {
+ .halt_reg = 0x88074,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(20),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_4_phy_rchng_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_4_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_4_pipe_clk = {
+ .halt_reg = 0x88050,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(18),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_4_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_4_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_4_pipe_div2_clk = {
+ .halt_reg = 0x88064,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(19),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_4_pipe_div2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_4_pipe_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_4_slv_axi_clk = {
+ .halt_reg = 0x88020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x88020,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(14),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_4_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_4_slv_q2a_axi_clk = {
+ .halt_reg = 0x8801c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(13),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_4_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_5_aux_clk = {
+ .halt_reg = 0xc304c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(5),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_5_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_5_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_5_cfg_ahb_clk = {
+ .halt_reg = 0xba4f8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba4f8,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_5_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_5_mstr_axi_clk = {
+ .halt_reg = 0xc3038,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0xc3038,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(3),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_5_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_5_phy_rchng_clk = {
+ .halt_reg = 0xc3080,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(8),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_5_phy_rchng_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_5_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_5_pipe_clk = {
+ .halt_reg = 0xc305c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(6),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_5_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_5_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_5_pipe_div2_clk = {
+ .halt_reg = 0xc3070,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_5_pipe_div2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_5_pipe_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_5_slv_axi_clk = {
+ .halt_reg = 0xc3024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xc3024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(2),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_5_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_5_slv_q2a_axi_clk = {
+ .halt_reg = 0xc301c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_5_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_6_aux_clk = {
+ .halt_reg = 0x8a040,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(27),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_6_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_6_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_6_cfg_ahb_clk = {
+ .halt_reg = 0xba500,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba500,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(26),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_6_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_6_mstr_axi_clk = {
+ .halt_reg = 0x8a030,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x8a030,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(25),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_6_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_6_phy_rchng_clk = {
+ .halt_reg = 0x8a074,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(30),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_6_phy_rchng_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_6_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_6_pipe_clk = {
+ .halt_reg = 0x8a050,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(28),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_6_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_6_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_6_pipe_div2_clk = {
+ .halt_reg = 0x8a064,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(29),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_6_pipe_div2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_6_pipe_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_6_slv_axi_clk = {
+ .halt_reg = 0x8a020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x8a020,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(24),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_6_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_6_slv_q2a_axi_clk = {
+ .halt_reg = 0x8a01c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(23),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_6_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_noc_pwrctl_clk = {
+ .halt_reg = 0xba2ac,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_noc_pwrctl_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_noc_qosgen_extref_clk = {
+ .halt_reg = 0xba2a8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(6),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_noc_qosgen_extref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_noc_sf_center_clk = {
+ .halt_reg = 0xba2b0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba2b0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(8),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_noc_sf_center_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_noc_slave_sf_east_clk = {
+ .halt_reg = 0xba2b8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba2b8,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(9),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_noc_slave_sf_east_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_noc_slave_sf_west_clk = {
+ .halt_reg = 0xba2c0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba2c0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(10),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_noc_slave_sf_west_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_noc_tsctr_clk = {
+ .halt_reg = 0xba2a4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba2a4,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(5),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_noc_tsctr_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_phy_3a_aux_clk = {
+ .halt_reg = 0x6c038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6c038,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(19),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_phy_3a_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_phy_3a_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_phy_3b_aux_clk = {
+ .halt_reg = 0x75034,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(31),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_phy_3b_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_phy_3b_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_phy_4_aux_clk = {
+ .halt_reg = 0xd3030,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(21),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_phy_4_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_phy_4_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_phy_5_aux_clk = {
+ .halt_reg = 0xd2030,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(11),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_phy_5_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_phy_5_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_phy_6_aux_clk = {
+ .halt_reg = 0xd4030,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(31),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_phy_6_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_phy_6_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_rscc_cfg_ahb_clk = {
+ .halt_reg = 0xb8004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xb8004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62038,
+ .enable_mask = BIT(2),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_rscc_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_rscc_xo_clk = {
+ .halt_reg = 0xb8008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62038,
+ .enable_mask = BIT(3),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_rscc_xo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x3300c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3300c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pdm2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x33004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x33004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x33004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_xo4_clk = {
+ .halt_reg = 0x33008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x33008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm_xo4_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_av1e_ahb_clk = {
+ .halt_reg = 0x9b048,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x9b048,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x9b048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_av1e_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_cmd_ahb_clk = {
+ .halt_reg = 0x26010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x26010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x26010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_camera_cmd_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_nrt_ahb_clk = {
+ .halt_reg = 0x26008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x26008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x26008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_camera_nrt_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_rt_ahb_clk = {
+ .halt_reg = 0x2600c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2600c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2600c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_camera_rt_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_gpu_ahb_clk = {
+ .halt_reg = 0x71008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x71008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x71008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_gpu_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_pcie_3a_ahb_clk = {
+ .halt_reg = 0xdc018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xdc018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(11),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_pcie_3a_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_pcie_3b_ahb_clk = {
+ .halt_reg = 0x94018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x94018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(20),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_pcie_3b_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_pcie_4_ahb_clk = {
+ .halt_reg = 0x88018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x88018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(12),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_pcie_4_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_pcie_5_ahb_clk = {
+ .halt_reg = 0xc3018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xc3018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_pcie_5_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_pcie_6_ahb_clk = {
+ .halt_reg = 0x8a018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x8a018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(22),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_pcie_6_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_cv_cpu_ahb_clk = {
+ .halt_reg = 0x32018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x32018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_cv_cpu_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_cvp_ahb_clk = {
+ .halt_reg = 0x32008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x32008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_cvp_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_v_cpu_ahb_clk = {
+ .halt_reg = 0x32014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x32014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_v_cpu_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_vcodec1_ahb_clk = {
+ .halt_reg = 0x32010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x32010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_vcodec1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_vcodec_ahb_clk = {
+ .halt_reg = 0x3200c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x3200c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3200c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_vcodec_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_oob_core_2x_clk = {
+ .halt_reg = 0xc5040,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(5),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_oob_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_oob_core_clk = {
+ .halt_reg = 0xc502c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_oob_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_oob_m_ahb_clk = {
+ .halt_reg = 0xe7004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xe7004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xe7004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_oob_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_oob_qspi_s0_clk = {
+ .halt_reg = 0xe7040,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(9),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_oob_qspi_s0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_oob_qspi_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_oob_qspi_s1_clk = {
+ .halt_reg = 0xe729c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(10),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_oob_qspi_s1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_oob_qspi_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_oob_s0_clk = {
+ .halt_reg = 0xe7014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(6),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_oob_s0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_oob_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_oob_s1_clk = {
+ .halt_reg = 0xe7028,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_oob_s1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_oob_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_oob_s_ahb_clk = {
+ .halt_reg = 0xc5028,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xc5028,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(3),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_oob_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_oob_tcxo_clk = {
+ .halt_reg = 0xe703c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(8),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_oob_tcxo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_2x_clk = {
+ .halt_reg = 0xc5448,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(12),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_clk = {
+ .halt_reg = 0xc5434,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(11),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_qspi_s2_clk = {
+ .halt_reg = 0x2879c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(22),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_qspi_s2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_qspi_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_qspi_s3_clk = {
+ .halt_reg = 0x288cc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(23),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_qspi_s3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_qspi_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_qspi_s6_clk = {
+ .halt_reg = 0x28798,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(21),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_qspi_s6_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_qspi_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s0_clk = {
+ .halt_reg = 0x28004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(13),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s1_clk = {
+ .halt_reg = 0x28140,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(14),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s2_clk = {
+ .halt_reg = 0x2827c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s3_clk = {
+ .halt_reg = 0x28290,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s4_clk = {
+ .halt_reg = 0x282a4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(17),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s4_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s5_clk = {
+ .halt_reg = 0x283e0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(18),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s5_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s6_clk = {
+ .halt_reg = 0x2851c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(19),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s6_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s7_clk = {
+ .halt_reg = 0x28530,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(20),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s7_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s7_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_2x_clk = {
+ .halt_reg = 0xc5198,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(14),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_clk = {
+ .halt_reg = 0xc5184,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(13),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_qspi_s2_clk = {
+ .halt_reg = 0xb379c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(24),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_qspi_s2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_qspi_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_qspi_s3_clk = {
+ .halt_reg = 0xb38cc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(25),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_qspi_s3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_qspi_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_qspi_s6_clk = {
+ .halt_reg = 0xb3798,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(23),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_qspi_s6_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_qspi_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s0_clk = {
+ .halt_reg = 0xb3004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s1_clk = {
+ .halt_reg = 0xb3140,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s2_clk = {
+ .halt_reg = 0xb327c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(17),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s3_clk = {
+ .halt_reg = 0xb3290,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(18),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s4_clk = {
+ .halt_reg = 0xb32a4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(19),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s4_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s5_clk = {
+ .halt_reg = 0xb33e0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(20),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s5_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s6_clk = {
+ .halt_reg = 0xb351c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(21),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s6_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s7_clk = {
+ .halt_reg = 0xb3530,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(22),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s7_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s7_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_core_2x_clk = {
+ .halt_reg = 0xc52f0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(29),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_core_clk = {
+ .halt_reg = 0xc52dc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(28),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_qspi_s2_clk = {
+ .halt_reg = 0xb479c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_qspi_s2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_qspi_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_qspi_s3_clk = {
+ .halt_reg = 0xb48cc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(8),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_qspi_s3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_qspi_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_qspi_s6_clk = {
+ .halt_reg = 0xb4798,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(6),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_qspi_s6_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_qspi_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s0_clk = {
+ .halt_reg = 0xb4004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(30),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s1_clk = {
+ .halt_reg = 0xb4140,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(31),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s2_clk = {
+ .halt_reg = 0xb427c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s3_clk = {
+ .halt_reg = 0xb4290,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s4_clk = {
+ .halt_reg = 0xb42a4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(2),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s4_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s5_clk = {
+ .halt_reg = 0xb43e0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(3),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s5_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s6_clk = {
+ .halt_reg = 0xb451c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s6_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s7_clk = {
+ .halt_reg = 0xb4530,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(5),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s7_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s7_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_m_ahb_clk = {
+ .halt_reg = 0xc542c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xc542c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(9),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_0_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_s_ahb_clk = {
+ .halt_reg = 0xc5430,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xc5430,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(10),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_0_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_m_ahb_clk = {
+ .halt_reg = 0xc517c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xc517c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(11),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_1_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_s_ahb_clk = {
+ .halt_reg = 0xc5180,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xc5180,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(12),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_1_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_2_m_ahb_clk = {
+ .halt_reg = 0xc52d4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xc52d4,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(26),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_2_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_2_s_ahb_clk = {
+ .halt_reg = 0xc52d8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xc52d8,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(27),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_2_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_ahb_clk = {
+ .halt_reg = 0xb0014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb0014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc2_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_apps_clk = {
+ .halt_reg = 0xb0004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb0004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc2_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sdcc2_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc4_ahb_clk = {
+ .halt_reg = 0xdf014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xdf014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc4_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc4_apps_clk = {
+ .halt_reg = 0xdf004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xdf004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc4_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sdcc4_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ahb_clk = {
+ .halt_reg = 0xba504,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba504,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xba504,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_axi_clk = {
+ .halt_reg = 0x7701c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7701c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7701c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ice_core_clk = {
+ .halt_reg = 0x77080,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77080,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77080,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_ice_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_phy_aux_clk = {
+ .halt_reg = 0x770c0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x770c0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x770c0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_0_clk = {
+ .halt_reg = 0x77034,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x77034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_rx_symbol_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_rx_symbol_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_1_clk = {
+ .halt_reg = 0x770dc,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x770dc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_rx_symbol_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_rx_symbol_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_tx_symbol_0_clk = {
+ .halt_reg = 0x77030,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x77030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_tx_symbol_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_tx_symbol_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_unipro_core_clk = {
+ .halt_reg = 0x77070,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77070,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77070,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_unipro_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_unipro_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb20_master_clk = {
+ .halt_reg = 0xbc018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xbc018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb20_master_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb20_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb20_mock_utmi_clk = {
+ .halt_reg = 0xbc02c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xbc02c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb20_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb20_mock_utmi_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb20_sleep_clk = {
+ .halt_reg = 0xbc028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xbc028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb20_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_mp_master_clk = {
+ .halt_reg = 0x9a024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9a024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_mp_master_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_mp_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_mp_mock_utmi_clk = {
+ .halt_reg = 0x9a038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9a038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_mp_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_mp_mock_utmi_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_mp_sleep_clk = {
+ .halt_reg = 0x9a034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9a034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_mp_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_master_clk = {
+ .halt_reg = 0x3f030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3f030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_master_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_mock_utmi_clk = {
+ .halt_reg = 0x3f048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3f048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_sleep_clk = {
+ .halt_reg = 0x3f044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3f044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sec_master_clk = {
+ .halt_reg = 0xe2024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe2024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_sec_master_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_sec_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sec_mock_utmi_clk = {
+ .halt_reg = 0xe2038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe2038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_sec_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_sec_mock_utmi_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sec_sleep_clk = {
+ .halt_reg = 0xe2034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe2034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_sec_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_tert_master_clk = {
+ .halt_reg = 0xe1024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe1024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_tert_master_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_tert_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_tert_mock_utmi_clk = {
+ .halt_reg = 0xe1038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe1038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_tert_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_tert_mock_utmi_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_tert_sleep_clk = {
+ .halt_reg = 0xe1034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe1034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_tert_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_mp_phy_aux_clk = {
+ .halt_reg = 0x9a070,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9a070,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_mp_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_mp_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_mp_phy_com_aux_clk = {
+ .halt_reg = 0x9a074,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9a074,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_mp_phy_com_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_mp_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_mp_phy_pipe_0_clk = {
+ .halt_reg = 0x9a078,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x9a078,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_mp_phy_pipe_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_mp_phy_pipe_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_mp_phy_pipe_1_clk = {
+ .halt_reg = 0x9a080,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x9a080,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_mp_phy_pipe_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_mp_phy_pipe_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_aux_clk = {
+ .halt_reg = 0x3f080,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3f080,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_com_aux_clk = {
+ .halt_reg = 0x3f084,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3f084,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_com_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_pipe_clk = {
+ .halt_reg = 0x3f088,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x3f088,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3f088,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb34_prim_phy_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_phy_aux_clk = {
+ .halt_reg = 0xe2070,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe2070,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_sec_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_sec_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_phy_com_aux_clk = {
+ .halt_reg = 0xe2074,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe2074,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_sec_phy_com_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_sec_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_phy_pipe_clk = {
+ .halt_reg = 0xe2078,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0xe2078,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xe2078,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_sec_phy_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb34_sec_phy_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_tert_phy_aux_clk = {
+ .halt_reg = 0xe1070,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe1070,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_tert_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_tert_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_tert_phy_com_aux_clk = {
+ .halt_reg = 0xe1074,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe1074,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_tert_phy_com_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_tert_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_tert_phy_pipe_clk = {
+ .halt_reg = 0xe1078,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0xe1078,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xe1078,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_tert_phy_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb34_tert_phy_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_0_cfg_ahb_clk = {
+ .halt_reg = 0xba450,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba450,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xba450,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_0_dp0_clk = {
+ .halt_reg = 0x2b070,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2b070,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_dp0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_0_phy_dp0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_0_dp1_clk = {
+ .halt_reg = 0x2b124,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2b124,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_dp1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_0_phy_dp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_0_master_clk = {
+ .halt_reg = 0x2b01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2b01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_master_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_0_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_0_phy_p2rr2p_pipe_clk = {
+ .halt_reg = 0x2b0f4,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x2b0f4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_phy_p2rr2p_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_0_phy_p2rr2p_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_0_phy_pcie_pipe_clk = {
+ .halt_reg = 0x2b04c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(11),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_phy_pcie_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_0_phy_pcie_pipe_mux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_0_phy_rx0_clk = {
+ .halt_reg = 0x2b0c4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2b0c4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_phy_rx0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_0_phy_rx0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_0_phy_rx1_clk = {
+ .halt_reg = 0x2b0d8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2b0d8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_phy_rx1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_0_phy_rx1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_0_phy_usb_pipe_clk = {
+ .halt_reg = 0x2b0bc,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x2b0bc,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2b0bc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_phy_usb_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb34_prim_phy_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_0_sb_if_clk = {
+ .halt_reg = 0x2b048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2b048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_sb_if_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_0_sb_if_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_0_sys_clk = {
+ .halt_reg = 0x2b05c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2b05c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_sys_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_0_phy_sys_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_0_tmu_clk = {
+ .halt_reg = 0x2b09c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2b09c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2b09c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_tmu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_0_tmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_0_uc_hrr_clk = {
+ .halt_reg = 0x2b06c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2b06c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_uc_hrr_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_0_phy_sys_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_cfg_ahb_clk = {
+ .halt_reg = 0xba454,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba454,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xba454,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_dp0_clk = {
+ .halt_reg = 0x2d07c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2d07c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_dp0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_1_phy_dp0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_dp1_clk = {
+ .halt_reg = 0x2d144,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2d144,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_dp1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_1_phy_dp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_master_clk = {
+ .halt_reg = 0x2d01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2d01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_master_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_1_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_phy_p2rr2p_pipe_clk = {
+ .halt_reg = 0x2d118,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x2d118,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_p2rr2p_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_1_phy_p2rr2p_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_phy_pcie_pipe_clk = {
+ .halt_reg = 0x2d04c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(12),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_pcie_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_1_phy_pcie_pipe_mux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_phy_rx0_clk = {
+ .halt_reg = 0x2d0e8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2d0e8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_rx0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_1_phy_rx0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_phy_rx1_clk = {
+ .halt_reg = 0x2d0fc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2d0fc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_rx1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_1_phy_rx1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_phy_usb_pipe_clk = {
+ .halt_reg = 0x2d0e0,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x2d0e0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2d0e0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_usb_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb34_sec_phy_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_sb_if_clk = {
+ .halt_reg = 0x2d048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2d048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_sb_if_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_1_sb_if_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_sys_clk = {
+ .halt_reg = 0x2d05c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2d05c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_sys_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_1_phy_sys_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_tmu_clk = {
+ .halt_reg = 0x2d0a8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2d0a8,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2d0a8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_tmu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_1_tmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_uc_hrr_clk = {
+ .halt_reg = 0x2d06c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2d06c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_uc_hrr_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_1_phy_sys_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_2_cfg_ahb_clk = {
+ .halt_reg = 0xba458,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba458,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xba458,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_2_dp0_clk = {
+ .halt_reg = 0xe0070,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe0070,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_dp0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_2_phy_dp0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_2_dp1_clk = {
+ .halt_reg = 0xe0128,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe0128,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_dp1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_2_phy_dp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_2_master_clk = {
+ .halt_reg = 0xe001c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe001c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_master_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_2_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_2_phy_p2rr2p_pipe_clk = {
+ .halt_reg = 0xe00f8,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0xe00f8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_phy_p2rr2p_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_2_phy_p2rr2p_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_2_phy_pcie_pipe_clk = {
+ .halt_reg = 0xe004c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(13),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_phy_pcie_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_2_phy_pcie_pipe_mux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_2_phy_rx0_clk = {
+ .halt_reg = 0xe00c8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe00c8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_phy_rx0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_2_phy_rx0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_2_phy_rx1_clk = {
+ .halt_reg = 0xe00dc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe00dc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_phy_rx1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_2_phy_rx1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_2_phy_usb_pipe_clk = {
+ .halt_reg = 0xe00c0,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0xe00c0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xe00c0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_phy_usb_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb34_tert_phy_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_2_sb_if_clk = {
+ .halt_reg = 0xe0048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe0048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_sb_if_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_2_sb_if_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_2_sys_clk = {
+ .halt_reg = 0xe005c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe005c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_sys_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_2_phy_sys_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_2_tmu_clk = {
+ .halt_reg = 0xe00a0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xe00a0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xe00a0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_tmu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_2_tmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_2_uc_hrr_clk = {
+ .halt_reg = 0xe006c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe006c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_uc_hrr_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_2_phy_sys_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axi0_clk = {
+ .halt_reg = 0x3201c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x3201c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3201c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_video_axi0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axi0c_clk = {
+ .halt_reg = 0x32030,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x32030,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_video_axi0c_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axi1_clk = {
+ .halt_reg = 0x32044,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x32044,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_video_axi1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc gcc_pcie_0_tunnel_gdsc = {
+ .gdscr = 0xc8004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_pcie_0_tunnel_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gcc_pcie_1_tunnel_gdsc = {
+ .gdscr = 0x2e004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_pcie_1_tunnel_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gcc_pcie_2_tunnel_gdsc = {
+ .gdscr = 0xc0004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_pcie_2_tunnel_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gcc_pcie_3a_gdsc = {
+ .gdscr = 0xdc004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_pcie_3a_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gcc_pcie_3a_phy_gdsc = {
+ .gdscr = 0x6c004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "gcc_pcie_3a_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gcc_pcie_3b_gdsc = {
+ .gdscr = 0x94004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_pcie_3b_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gcc_pcie_3b_phy_gdsc = {
+ .gdscr = 0x75004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "gcc_pcie_3b_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gcc_pcie_4_gdsc = {
+ .gdscr = 0x88004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_pcie_4_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gcc_pcie_4_phy_gdsc = {
+ .gdscr = 0xd3004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "gcc_pcie_4_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gcc_pcie_5_gdsc = {
+ .gdscr = 0xc3004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_pcie_5_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gcc_pcie_5_phy_gdsc = {
+ .gdscr = 0xd2004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "gcc_pcie_5_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gcc_pcie_6_gdsc = {
+ .gdscr = 0x8a004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_pcie_6_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gcc_pcie_6_phy_gdsc = {
+ .gdscr = 0xd4004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "gcc_pcie_6_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gcc_ufs_phy_gdsc = {
+ .gdscr = 0x77008,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_ufs_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb20_prim_gdsc = {
+ .gdscr = 0xbc004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_usb20_prim_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb30_mp_gdsc = {
+ .gdscr = 0x9a010,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_usb30_mp_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb30_prim_gdsc = {
+ .gdscr = 0x3f01c,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_usb30_prim_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb30_sec_gdsc = {
+ .gdscr = 0xe2010,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_usb30_sec_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb30_tert_gdsc = {
+ .gdscr = 0xe1010,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_usb30_tert_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb3_mp_ss0_phy_gdsc = {
+ .gdscr = 0x5400c,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "gcc_usb3_mp_ss0_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb3_mp_ss1_phy_gdsc = {
+ .gdscr = 0x5402c,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "gcc_usb3_mp_ss1_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb4_0_gdsc = {
+ .gdscr = 0x2b008,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_usb4_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb4_1_gdsc = {
+ .gdscr = 0x2d008,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_usb4_1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb4_2_gdsc = {
+ .gdscr = 0xe0008,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_usb4_2_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb_0_phy_gdsc = {
+ .gdscr = 0xdb024,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "gcc_usb_0_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb_1_phy_gdsc = {
+ .gdscr = 0x2c024,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "gcc_usb_1_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb_2_phy_gdsc = {
+ .gdscr = 0xbe024,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "gcc_usb_2_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct clk_regmap *gcc_glymur_clocks[] = {
+ [GCC_AGGRE_NOC_PCIE_3A_WEST_SF_AXI_CLK] = &gcc_aggre_noc_pcie_3a_west_sf_axi_clk.clkr,
+ [GCC_AGGRE_NOC_PCIE_3B_WEST_SF_AXI_CLK] = &gcc_aggre_noc_pcie_3b_west_sf_axi_clk.clkr,
+ [GCC_AGGRE_NOC_PCIE_4_WEST_SF_AXI_CLK] = &gcc_aggre_noc_pcie_4_west_sf_axi_clk.clkr,
+ [GCC_AGGRE_NOC_PCIE_5_EAST_SF_AXI_CLK] = &gcc_aggre_noc_pcie_5_east_sf_axi_clk.clkr,
+ [GCC_AGGRE_NOC_PCIE_6_WEST_SF_AXI_CLK] = &gcc_aggre_noc_pcie_6_west_sf_axi_clk.clkr,
+ [GCC_AGGRE_UFS_PHY_AXI_CLK] = &gcc_aggre_ufs_phy_axi_clk.clkr,
+ [GCC_AGGRE_USB2_PRIM_AXI_CLK] = &gcc_aggre_usb2_prim_axi_clk.clkr,
+ [GCC_AGGRE_USB3_MP_AXI_CLK] = &gcc_aggre_usb3_mp_axi_clk.clkr,
+ [GCC_AGGRE_USB3_PRIM_AXI_CLK] = &gcc_aggre_usb3_prim_axi_clk.clkr,
+ [GCC_AGGRE_USB3_SEC_AXI_CLK] = &gcc_aggre_usb3_sec_axi_clk.clkr,
+ [GCC_AGGRE_USB3_TERT_AXI_CLK] = &gcc_aggre_usb3_tert_axi_clk.clkr,
+ [GCC_AGGRE_USB4_0_AXI_CLK] = &gcc_aggre_usb4_0_axi_clk.clkr,
+ [GCC_AGGRE_USB4_1_AXI_CLK] = &gcc_aggre_usb4_1_axi_clk.clkr,
+ [GCC_AGGRE_USB4_2_AXI_CLK] = &gcc_aggre_usb4_2_axi_clk.clkr,
+ [GCC_AV1E_AHB_CLK] = &gcc_av1e_ahb_clk.clkr,
+ [GCC_AV1E_AXI_CLK] = &gcc_av1e_axi_clk.clkr,
+ [GCC_AV1E_XO_CLK] = &gcc_av1e_xo_clk.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CAMERA_HF_AXI_CLK] = &gcc_camera_hf_axi_clk.clkr,
+ [GCC_CAMERA_SF_AXI_CLK] = &gcc_camera_sf_axi_clk.clkr,
+ [GCC_CFG_NOC_PCIE_ANOC_AHB_CLK] = &gcc_cfg_noc_pcie_anoc_ahb_clk.clkr,
+ [GCC_CFG_NOC_PCIE_ANOC_SOUTH_AHB_CLK] = &gcc_cfg_noc_pcie_anoc_south_ahb_clk.clkr,
+ [GCC_CFG_NOC_USB2_PRIM_AXI_CLK] = &gcc_cfg_noc_usb2_prim_axi_clk.clkr,
+ [GCC_CFG_NOC_USB3_MP_AXI_CLK] = &gcc_cfg_noc_usb3_mp_axi_clk.clkr,
+ [GCC_CFG_NOC_USB3_PRIM_AXI_CLK] = &gcc_cfg_noc_usb3_prim_axi_clk.clkr,
+ [GCC_CFG_NOC_USB3_SEC_AXI_CLK] = &gcc_cfg_noc_usb3_sec_axi_clk.clkr,
+ [GCC_CFG_NOC_USB3_TERT_AXI_CLK] = &gcc_cfg_noc_usb3_tert_axi_clk.clkr,
+ [GCC_CFG_NOC_USB_ANOC_AHB_CLK] = &gcc_cfg_noc_usb_anoc_ahb_clk.clkr,
+ [GCC_CFG_NOC_USB_ANOC_SOUTH_AHB_CLK] = &gcc_cfg_noc_usb_anoc_south_ahb_clk.clkr,
+ [GCC_DISP_HF_AXI_CLK] = &gcc_disp_hf_axi_clk.clkr,
+ [GCC_EVA_AHB_CLK] = &gcc_eva_ahb_clk.clkr,
+ [GCC_EVA_AXI0_CLK] = &gcc_eva_axi0_clk.clkr,
+ [GCC_EVA_AXI0C_CLK] = &gcc_eva_axi0c_clk.clkr,
+ [GCC_EVA_XO_CLK] = &gcc_eva_xo_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr,
+ [GCC_GPLL0] = &gcc_gpll0.clkr,
+ [GCC_GPLL0_OUT_EVEN] = &gcc_gpll0_out_even.clkr,
+ [GCC_GPLL1] = &gcc_gpll1.clkr,
+ [GCC_GPLL14] = &gcc_gpll14.clkr,
+ [GCC_GPLL14_OUT_EVEN] = &gcc_gpll14_out_even.clkr,
+ [GCC_GPLL4] = &gcc_gpll4.clkr,
+ [GCC_GPLL5] = &gcc_gpll5.clkr,
+ [GCC_GPLL7] = &gcc_gpll7.clkr,
+ [GCC_GPLL8] = &gcc_gpll8.clkr,
+ [GCC_GPLL9] = &gcc_gpll9.clkr,
+ [GCC_GPU_GEMNOC_GFX_CLK] = &gcc_gpu_gemnoc_gfx_clk.clkr,
+ [GCC_GPU_GPLL0_CLK_SRC] = &gcc_gpu_gpll0_clk_src.clkr,
+ [GCC_GPU_GPLL0_DIV_CLK_SRC] = &gcc_gpu_gpll0_div_clk_src.clkr,
+ [GCC_PCIE_0_AUX_CLK] = &gcc_pcie_0_aux_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK_SRC] = &gcc_pcie_0_aux_clk_src.clkr,
+ [GCC_PCIE_0_CFG_AHB_CLK] = &gcc_pcie_0_cfg_ahb_clk.clkr,
+ [GCC_PCIE_0_MSTR_AXI_CLK] = &gcc_pcie_0_mstr_axi_clk.clkr,
+ [GCC_PCIE_0_PHY_RCHNG_CLK] = &gcc_pcie_0_phy_rchng_clk.clkr,
+ [GCC_PCIE_0_PHY_RCHNG_CLK_SRC] = &gcc_pcie_0_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_0_PIPE_CLK] = &gcc_pcie_0_pipe_clk.clkr,
+ [GCC_PCIE_0_SLV_AXI_CLK] = &gcc_pcie_0_slv_axi_clk.clkr,
+ [GCC_PCIE_0_SLV_Q2A_AXI_CLK] = &gcc_pcie_0_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_1_AUX_CLK] = &gcc_pcie_1_aux_clk.clkr,
+ [GCC_PCIE_1_AUX_CLK_SRC] = &gcc_pcie_1_aux_clk_src.clkr,
+ [GCC_PCIE_1_CFG_AHB_CLK] = &gcc_pcie_1_cfg_ahb_clk.clkr,
+ [GCC_PCIE_1_MSTR_AXI_CLK] = &gcc_pcie_1_mstr_axi_clk.clkr,
+ [GCC_PCIE_1_PHY_RCHNG_CLK] = &gcc_pcie_1_phy_rchng_clk.clkr,
+ [GCC_PCIE_1_PHY_RCHNG_CLK_SRC] = &gcc_pcie_1_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_1_PIPE_CLK] = &gcc_pcie_1_pipe_clk.clkr,
+ [GCC_PCIE_1_SLV_AXI_CLK] = &gcc_pcie_1_slv_axi_clk.clkr,
+ [GCC_PCIE_1_SLV_Q2A_AXI_CLK] = &gcc_pcie_1_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_2_AUX_CLK] = &gcc_pcie_2_aux_clk.clkr,
+ [GCC_PCIE_2_AUX_CLK_SRC] = &gcc_pcie_2_aux_clk_src.clkr,
+ [GCC_PCIE_2_CFG_AHB_CLK] = &gcc_pcie_2_cfg_ahb_clk.clkr,
+ [GCC_PCIE_2_MSTR_AXI_CLK] = &gcc_pcie_2_mstr_axi_clk.clkr,
+ [GCC_PCIE_2_PHY_RCHNG_CLK] = &gcc_pcie_2_phy_rchng_clk.clkr,
+ [GCC_PCIE_2_PHY_RCHNG_CLK_SRC] = &gcc_pcie_2_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_2_PIPE_CLK] = &gcc_pcie_2_pipe_clk.clkr,
+ [GCC_PCIE_2_SLV_AXI_CLK] = &gcc_pcie_2_slv_axi_clk.clkr,
+ [GCC_PCIE_2_SLV_Q2A_AXI_CLK] = &gcc_pcie_2_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_3A_AUX_CLK] = &gcc_pcie_3a_aux_clk.clkr,
+ [GCC_PCIE_3A_AUX_CLK_SRC] = &gcc_pcie_3a_aux_clk_src.clkr,
+ [GCC_PCIE_3A_CFG_AHB_CLK] = &gcc_pcie_3a_cfg_ahb_clk.clkr,
+ [GCC_PCIE_3A_MSTR_AXI_CLK] = &gcc_pcie_3a_mstr_axi_clk.clkr,
+ [GCC_PCIE_3A_PHY_RCHNG_CLK] = &gcc_pcie_3a_phy_rchng_clk.clkr,
+ [GCC_PCIE_3A_PHY_RCHNG_CLK_SRC] = &gcc_pcie_3a_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_3A_PIPE_CLK] = &gcc_pcie_3a_pipe_clk.clkr,
+ [GCC_PCIE_3A_PIPE_CLK_SRC] = &gcc_pcie_3a_pipe_clk_src.clkr,
+ [GCC_PCIE_3A_SLV_AXI_CLK] = &gcc_pcie_3a_slv_axi_clk.clkr,
+ [GCC_PCIE_3A_SLV_Q2A_AXI_CLK] = &gcc_pcie_3a_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_3B_AUX_CLK] = &gcc_pcie_3b_aux_clk.clkr,
+ [GCC_PCIE_3B_AUX_CLK_SRC] = &gcc_pcie_3b_aux_clk_src.clkr,
+ [GCC_PCIE_3B_CFG_AHB_CLK] = &gcc_pcie_3b_cfg_ahb_clk.clkr,
+ [GCC_PCIE_3B_MSTR_AXI_CLK] = &gcc_pcie_3b_mstr_axi_clk.clkr,
+ [GCC_PCIE_3B_PHY_RCHNG_CLK] = &gcc_pcie_3b_phy_rchng_clk.clkr,
+ [GCC_PCIE_3B_PHY_RCHNG_CLK_SRC] = &gcc_pcie_3b_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_3B_PIPE_CLK] = &gcc_pcie_3b_pipe_clk.clkr,
+ [GCC_PCIE_3B_PIPE_CLK_SRC] = &gcc_pcie_3b_pipe_clk_src.clkr,
+ [GCC_PCIE_3B_PIPE_DIV2_CLK] = &gcc_pcie_3b_pipe_div2_clk.clkr,
+ [GCC_PCIE_3B_PIPE_DIV_CLK_SRC] = &gcc_pcie_3b_pipe_div_clk_src.clkr,
+ [GCC_PCIE_3B_SLV_AXI_CLK] = &gcc_pcie_3b_slv_axi_clk.clkr,
+ [GCC_PCIE_3B_SLV_Q2A_AXI_CLK] = &gcc_pcie_3b_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_4_AUX_CLK] = &gcc_pcie_4_aux_clk.clkr,
+ [GCC_PCIE_4_AUX_CLK_SRC] = &gcc_pcie_4_aux_clk_src.clkr,
+ [GCC_PCIE_4_CFG_AHB_CLK] = &gcc_pcie_4_cfg_ahb_clk.clkr,
+ [GCC_PCIE_4_MSTR_AXI_CLK] = &gcc_pcie_4_mstr_axi_clk.clkr,
+ [GCC_PCIE_4_PHY_RCHNG_CLK] = &gcc_pcie_4_phy_rchng_clk.clkr,
+ [GCC_PCIE_4_PHY_RCHNG_CLK_SRC] = &gcc_pcie_4_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_4_PIPE_CLK] = &gcc_pcie_4_pipe_clk.clkr,
+ [GCC_PCIE_4_PIPE_CLK_SRC] = &gcc_pcie_4_pipe_clk_src.clkr,
+ [GCC_PCIE_4_PIPE_DIV2_CLK] = &gcc_pcie_4_pipe_div2_clk.clkr,
+ [GCC_PCIE_4_PIPE_DIV_CLK_SRC] = &gcc_pcie_4_pipe_div_clk_src.clkr,
+ [GCC_PCIE_4_SLV_AXI_CLK] = &gcc_pcie_4_slv_axi_clk.clkr,
+ [GCC_PCIE_4_SLV_Q2A_AXI_CLK] = &gcc_pcie_4_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_5_AUX_CLK] = &gcc_pcie_5_aux_clk.clkr,
+ [GCC_PCIE_5_AUX_CLK_SRC] = &gcc_pcie_5_aux_clk_src.clkr,
+ [GCC_PCIE_5_CFG_AHB_CLK] = &gcc_pcie_5_cfg_ahb_clk.clkr,
+ [GCC_PCIE_5_MSTR_AXI_CLK] = &gcc_pcie_5_mstr_axi_clk.clkr,
+ [GCC_PCIE_5_PHY_RCHNG_CLK] = &gcc_pcie_5_phy_rchng_clk.clkr,
+ [GCC_PCIE_5_PHY_RCHNG_CLK_SRC] = &gcc_pcie_5_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_5_PIPE_CLK] = &gcc_pcie_5_pipe_clk.clkr,
+ [GCC_PCIE_5_PIPE_CLK_SRC] = &gcc_pcie_5_pipe_clk_src.clkr,
+ [GCC_PCIE_5_PIPE_DIV2_CLK] = &gcc_pcie_5_pipe_div2_clk.clkr,
+ [GCC_PCIE_5_PIPE_DIV_CLK_SRC] = &gcc_pcie_5_pipe_div_clk_src.clkr,
+ [GCC_PCIE_5_SLV_AXI_CLK] = &gcc_pcie_5_slv_axi_clk.clkr,
+ [GCC_PCIE_5_SLV_Q2A_AXI_CLK] = &gcc_pcie_5_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_6_AUX_CLK] = &gcc_pcie_6_aux_clk.clkr,
+ [GCC_PCIE_6_AUX_CLK_SRC] = &gcc_pcie_6_aux_clk_src.clkr,
+ [GCC_PCIE_6_CFG_AHB_CLK] = &gcc_pcie_6_cfg_ahb_clk.clkr,
+ [GCC_PCIE_6_MSTR_AXI_CLK] = &gcc_pcie_6_mstr_axi_clk.clkr,
+ [GCC_PCIE_6_PHY_RCHNG_CLK] = &gcc_pcie_6_phy_rchng_clk.clkr,
+ [GCC_PCIE_6_PHY_RCHNG_CLK_SRC] = &gcc_pcie_6_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_6_PIPE_CLK] = &gcc_pcie_6_pipe_clk.clkr,
+ [GCC_PCIE_6_PIPE_CLK_SRC] = &gcc_pcie_6_pipe_clk_src.clkr,
+ [GCC_PCIE_6_PIPE_DIV2_CLK] = &gcc_pcie_6_pipe_div2_clk.clkr,
+ [GCC_PCIE_6_PIPE_DIV_CLK_SRC] = &gcc_pcie_6_pipe_div_clk_src.clkr,
+ [GCC_PCIE_6_SLV_AXI_CLK] = &gcc_pcie_6_slv_axi_clk.clkr,
+ [GCC_PCIE_6_SLV_Q2A_AXI_CLK] = &gcc_pcie_6_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_NOC_PWRCTL_CLK] = &gcc_pcie_noc_pwrctl_clk.clkr,
+ [GCC_PCIE_NOC_QOSGEN_EXTREF_CLK] = &gcc_pcie_noc_qosgen_extref_clk.clkr,
+ [GCC_PCIE_NOC_SF_CENTER_CLK] = &gcc_pcie_noc_sf_center_clk.clkr,
+ [GCC_PCIE_NOC_SLAVE_SF_EAST_CLK] = &gcc_pcie_noc_slave_sf_east_clk.clkr,
+ [GCC_PCIE_NOC_SLAVE_SF_WEST_CLK] = &gcc_pcie_noc_slave_sf_west_clk.clkr,
+ [GCC_PCIE_NOC_TSCTR_CLK] = &gcc_pcie_noc_tsctr_clk.clkr,
+ [GCC_PCIE_PHY_3A_AUX_CLK] = &gcc_pcie_phy_3a_aux_clk.clkr,
+ [GCC_PCIE_PHY_3A_AUX_CLK_SRC] = &gcc_pcie_phy_3a_aux_clk_src.clkr,
+ [GCC_PCIE_PHY_3B_AUX_CLK] = &gcc_pcie_phy_3b_aux_clk.clkr,
+ [GCC_PCIE_PHY_3B_AUX_CLK_SRC] = &gcc_pcie_phy_3b_aux_clk_src.clkr,
+ [GCC_PCIE_PHY_4_AUX_CLK] = &gcc_pcie_phy_4_aux_clk.clkr,
+ [GCC_PCIE_PHY_4_AUX_CLK_SRC] = &gcc_pcie_phy_4_aux_clk_src.clkr,
+ [GCC_PCIE_PHY_5_AUX_CLK] = &gcc_pcie_phy_5_aux_clk.clkr,
+ [GCC_PCIE_PHY_5_AUX_CLK_SRC] = &gcc_pcie_phy_5_aux_clk_src.clkr,
+ [GCC_PCIE_PHY_6_AUX_CLK] = &gcc_pcie_phy_6_aux_clk.clkr,
+ [GCC_PCIE_PHY_6_AUX_CLK_SRC] = &gcc_pcie_phy_6_aux_clk_src.clkr,
+ [GCC_PCIE_RSCC_CFG_AHB_CLK] = &gcc_pcie_rscc_cfg_ahb_clk.clkr,
+ [GCC_PCIE_RSCC_XO_CLK] = &gcc_pcie_rscc_xo_clk.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM2_CLK_SRC] = &gcc_pdm2_clk_src.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr,
+ [GCC_QMIP_AV1E_AHB_CLK] = &gcc_qmip_av1e_ahb_clk.clkr,
+ [GCC_QMIP_CAMERA_CMD_AHB_CLK] = &gcc_qmip_camera_cmd_ahb_clk.clkr,
+ [GCC_QMIP_CAMERA_NRT_AHB_CLK] = &gcc_qmip_camera_nrt_ahb_clk.clkr,
+ [GCC_QMIP_CAMERA_RT_AHB_CLK] = &gcc_qmip_camera_rt_ahb_clk.clkr,
+ [GCC_QMIP_GPU_AHB_CLK] = &gcc_qmip_gpu_ahb_clk.clkr,
+ [GCC_QMIP_PCIE_3A_AHB_CLK] = &gcc_qmip_pcie_3a_ahb_clk.clkr,
+ [GCC_QMIP_PCIE_3B_AHB_CLK] = &gcc_qmip_pcie_3b_ahb_clk.clkr,
+ [GCC_QMIP_PCIE_4_AHB_CLK] = &gcc_qmip_pcie_4_ahb_clk.clkr,
+ [GCC_QMIP_PCIE_5_AHB_CLK] = &gcc_qmip_pcie_5_ahb_clk.clkr,
+ [GCC_QMIP_PCIE_6_AHB_CLK] = &gcc_qmip_pcie_6_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_CV_CPU_AHB_CLK] = &gcc_qmip_video_cv_cpu_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_CVP_AHB_CLK] = &gcc_qmip_video_cvp_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_V_CPU_AHB_CLK] = &gcc_qmip_video_v_cpu_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_VCODEC1_AHB_CLK] = &gcc_qmip_video_vcodec1_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_VCODEC_AHB_CLK] = &gcc_qmip_video_vcodec_ahb_clk.clkr,
+ [GCC_QUPV3_OOB_CORE_2X_CLK] = &gcc_qupv3_oob_core_2x_clk.clkr,
+ [GCC_QUPV3_OOB_CORE_CLK] = &gcc_qupv3_oob_core_clk.clkr,
+ [GCC_QUPV3_OOB_M_AHB_CLK] = &gcc_qupv3_oob_m_ahb_clk.clkr,
+ [GCC_QUPV3_OOB_QSPI_S0_CLK] = &gcc_qupv3_oob_qspi_s0_clk.clkr,
+ [GCC_QUPV3_OOB_QSPI_S0_CLK_SRC] = &gcc_qupv3_oob_qspi_s0_clk_src.clkr,
+ [GCC_QUPV3_OOB_QSPI_S1_CLK] = &gcc_qupv3_oob_qspi_s1_clk.clkr,
+ [GCC_QUPV3_OOB_QSPI_S1_CLK_SRC] = &gcc_qupv3_oob_qspi_s1_clk_src.clkr,
+ [GCC_QUPV3_OOB_S0_CLK] = &gcc_qupv3_oob_s0_clk.clkr,
+ [GCC_QUPV3_OOB_S0_CLK_SRC] = &gcc_qupv3_oob_s0_clk_src.clkr,
+ [GCC_QUPV3_OOB_S1_CLK] = &gcc_qupv3_oob_s1_clk.clkr,
+ [GCC_QUPV3_OOB_S1_CLK_SRC] = &gcc_qupv3_oob_s1_clk_src.clkr,
+ [GCC_QUPV3_OOB_S_AHB_CLK] = &gcc_qupv3_oob_s_ahb_clk.clkr,
+ [GCC_QUPV3_OOB_TCXO_CLK] = &gcc_qupv3_oob_tcxo_clk.clkr,
+ [GCC_QUPV3_WRAP0_CORE_2X_CLK] = &gcc_qupv3_wrap0_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP0_CORE_CLK] = &gcc_qupv3_wrap0_core_clk.clkr,
+ [GCC_QUPV3_WRAP0_QSPI_S2_CLK] = &gcc_qupv3_wrap0_qspi_s2_clk.clkr,
+ [GCC_QUPV3_WRAP0_QSPI_S2_CLK_SRC] = &gcc_qupv3_wrap0_qspi_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_QSPI_S3_CLK] = &gcc_qupv3_wrap0_qspi_s3_clk.clkr,
+ [GCC_QUPV3_WRAP0_QSPI_S3_CLK_SRC] = &gcc_qupv3_wrap0_qspi_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_QSPI_S6_CLK] = &gcc_qupv3_wrap0_qspi_s6_clk.clkr,
+ [GCC_QUPV3_WRAP0_QSPI_S6_CLK_SRC] = &gcc_qupv3_wrap0_qspi_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK] = &gcc_qupv3_wrap0_s0_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK_SRC] = &gcc_qupv3_wrap0_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK] = &gcc_qupv3_wrap0_s1_clk.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK_SRC] = &gcc_qupv3_wrap0_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK] = &gcc_qupv3_wrap0_s2_clk.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK_SRC] = &gcc_qupv3_wrap0_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK] = &gcc_qupv3_wrap0_s3_clk.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK_SRC] = &gcc_qupv3_wrap0_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK] = &gcc_qupv3_wrap0_s4_clk.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK_SRC] = &gcc_qupv3_wrap0_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK] = &gcc_qupv3_wrap0_s5_clk.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK_SRC] = &gcc_qupv3_wrap0_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S6_CLK] = &gcc_qupv3_wrap0_s6_clk.clkr,
+ [GCC_QUPV3_WRAP0_S6_CLK_SRC] = &gcc_qupv3_wrap0_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S7_CLK] = &gcc_qupv3_wrap0_s7_clk.clkr,
+ [GCC_QUPV3_WRAP0_S7_CLK_SRC] = &gcc_qupv3_wrap0_s7_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_CORE_2X_CLK] = &gcc_qupv3_wrap1_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP1_CORE_CLK] = &gcc_qupv3_wrap1_core_clk.clkr,
+ [GCC_QUPV3_WRAP1_QSPI_S2_CLK] = &gcc_qupv3_wrap1_qspi_s2_clk.clkr,
+ [GCC_QUPV3_WRAP1_QSPI_S2_CLK_SRC] = &gcc_qupv3_wrap1_qspi_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_QSPI_S3_CLK] = &gcc_qupv3_wrap1_qspi_s3_clk.clkr,
+ [GCC_QUPV3_WRAP1_QSPI_S3_CLK_SRC] = &gcc_qupv3_wrap1_qspi_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_QSPI_S6_CLK] = &gcc_qupv3_wrap1_qspi_s6_clk.clkr,
+ [GCC_QUPV3_WRAP1_QSPI_S6_CLK_SRC] = &gcc_qupv3_wrap1_qspi_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK] = &gcc_qupv3_wrap1_s0_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK_SRC] = &gcc_qupv3_wrap1_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK] = &gcc_qupv3_wrap1_s1_clk.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK_SRC] = &gcc_qupv3_wrap1_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK] = &gcc_qupv3_wrap1_s2_clk.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK_SRC] = &gcc_qupv3_wrap1_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK] = &gcc_qupv3_wrap1_s3_clk.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK_SRC] = &gcc_qupv3_wrap1_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK] = &gcc_qupv3_wrap1_s4_clk.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK_SRC] = &gcc_qupv3_wrap1_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK] = &gcc_qupv3_wrap1_s5_clk.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK_SRC] = &gcc_qupv3_wrap1_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S6_CLK] = &gcc_qupv3_wrap1_s6_clk.clkr,
+ [GCC_QUPV3_WRAP1_S6_CLK_SRC] = &gcc_qupv3_wrap1_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S7_CLK] = &gcc_qupv3_wrap1_s7_clk.clkr,
+ [GCC_QUPV3_WRAP1_S7_CLK_SRC] = &gcc_qupv3_wrap1_s7_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_CORE_2X_CLK] = &gcc_qupv3_wrap2_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP2_CORE_CLK] = &gcc_qupv3_wrap2_core_clk.clkr,
+ [GCC_QUPV3_WRAP2_QSPI_S2_CLK] = &gcc_qupv3_wrap2_qspi_s2_clk.clkr,
+ [GCC_QUPV3_WRAP2_QSPI_S2_CLK_SRC] = &gcc_qupv3_wrap2_qspi_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_QSPI_S3_CLK] = &gcc_qupv3_wrap2_qspi_s3_clk.clkr,
+ [GCC_QUPV3_WRAP2_QSPI_S3_CLK_SRC] = &gcc_qupv3_wrap2_qspi_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_QSPI_S6_CLK] = &gcc_qupv3_wrap2_qspi_s6_clk.clkr,
+ [GCC_QUPV3_WRAP2_QSPI_S6_CLK_SRC] = &gcc_qupv3_wrap2_qspi_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S0_CLK] = &gcc_qupv3_wrap2_s0_clk.clkr,
+ [GCC_QUPV3_WRAP2_S0_CLK_SRC] = &gcc_qupv3_wrap2_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S1_CLK] = &gcc_qupv3_wrap2_s1_clk.clkr,
+ [GCC_QUPV3_WRAP2_S1_CLK_SRC] = &gcc_qupv3_wrap2_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S2_CLK] = &gcc_qupv3_wrap2_s2_clk.clkr,
+ [GCC_QUPV3_WRAP2_S2_CLK_SRC] = &gcc_qupv3_wrap2_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S3_CLK] = &gcc_qupv3_wrap2_s3_clk.clkr,
+ [GCC_QUPV3_WRAP2_S3_CLK_SRC] = &gcc_qupv3_wrap2_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S4_CLK] = &gcc_qupv3_wrap2_s4_clk.clkr,
+ [GCC_QUPV3_WRAP2_S4_CLK_SRC] = &gcc_qupv3_wrap2_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S5_CLK] = &gcc_qupv3_wrap2_s5_clk.clkr,
+ [GCC_QUPV3_WRAP2_S5_CLK_SRC] = &gcc_qupv3_wrap2_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S6_CLK] = &gcc_qupv3_wrap2_s6_clk.clkr,
+ [GCC_QUPV3_WRAP2_S6_CLK_SRC] = &gcc_qupv3_wrap2_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S7_CLK] = &gcc_qupv3_wrap2_s7_clk.clkr,
+ [GCC_QUPV3_WRAP2_S7_CLK_SRC] = &gcc_qupv3_wrap2_s7_clk_src.clkr,
+ [GCC_QUPV3_WRAP_0_M_AHB_CLK] = &gcc_qupv3_wrap_0_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_0_S_AHB_CLK] = &gcc_qupv3_wrap_0_s_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_M_AHB_CLK] = &gcc_qupv3_wrap_1_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_S_AHB_CLK] = &gcc_qupv3_wrap_1_s_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_2_M_AHB_CLK] = &gcc_qupv3_wrap_2_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_2_S_AHB_CLK] = &gcc_qupv3_wrap_2_s_ahb_clk.clkr,
+ [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
+ [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+ [GCC_SDCC2_APPS_CLK_SRC] = &gcc_sdcc2_apps_clk_src.clkr,
+ [GCC_SDCC4_AHB_CLK] = &gcc_sdcc4_ahb_clk.clkr,
+ [GCC_SDCC4_APPS_CLK] = &gcc_sdcc4_apps_clk.clkr,
+ [GCC_SDCC4_APPS_CLK_SRC] = &gcc_sdcc4_apps_clk_src.clkr,
+ [GCC_UFS_PHY_AHB_CLK] = &gcc_ufs_phy_ahb_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK] = &gcc_ufs_phy_axi_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK_SRC] = &gcc_ufs_phy_axi_clk_src.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK] = &gcc_ufs_phy_ice_core_clk.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK_SRC] = &gcc_ufs_phy_ice_core_clk_src.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK] = &gcc_ufs_phy_phy_aux_clk.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK_SRC] = &gcc_ufs_phy_phy_aux_clk_src.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_0_CLK] = &gcc_ufs_phy_rx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_0_CLK_SRC] = &gcc_ufs_phy_rx_symbol_0_clk_src.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_1_CLK] = &gcc_ufs_phy_rx_symbol_1_clk.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_1_CLK_SRC] = &gcc_ufs_phy_rx_symbol_1_clk_src.clkr,
+ [GCC_UFS_PHY_TX_SYMBOL_0_CLK] = &gcc_ufs_phy_tx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_TX_SYMBOL_0_CLK_SRC] = &gcc_ufs_phy_tx_symbol_0_clk_src.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK] = &gcc_ufs_phy_unipro_core_clk.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC] = &gcc_ufs_phy_unipro_core_clk_src.clkr,
+ [GCC_USB20_MASTER_CLK] = &gcc_usb20_master_clk.clkr,
+ [GCC_USB20_MASTER_CLK_SRC] = &gcc_usb20_master_clk_src.clkr,
+ [GCC_USB20_MOCK_UTMI_CLK] = &gcc_usb20_mock_utmi_clk.clkr,
+ [GCC_USB20_MOCK_UTMI_CLK_SRC] = &gcc_usb20_mock_utmi_clk_src.clkr,
+ [GCC_USB20_MOCK_UTMI_POSTDIV_CLK_SRC] = &gcc_usb20_mock_utmi_postdiv_clk_src.clkr,
+ [GCC_USB20_SLEEP_CLK] = &gcc_usb20_sleep_clk.clkr,
+ [GCC_USB30_MP_MASTER_CLK] = &gcc_usb30_mp_master_clk.clkr,
+ [GCC_USB30_MP_MASTER_CLK_SRC] = &gcc_usb30_mp_master_clk_src.clkr,
+ [GCC_USB30_MP_MOCK_UTMI_CLK] = &gcc_usb30_mp_mock_utmi_clk.clkr,
+ [GCC_USB30_MP_MOCK_UTMI_CLK_SRC] = &gcc_usb30_mp_mock_utmi_clk_src.clkr,
+ [GCC_USB30_MP_MOCK_UTMI_POSTDIV_CLK_SRC] = &gcc_usb30_mp_mock_utmi_postdiv_clk_src.clkr,
+ [GCC_USB30_MP_SLEEP_CLK] = &gcc_usb30_mp_sleep_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK] = &gcc_usb30_prim_master_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK_SRC] = &gcc_usb30_prim_master_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK] = &gcc_usb30_prim_mock_utmi_clk.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC] = &gcc_usb30_prim_mock_utmi_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC] = &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr,
+ [GCC_USB30_PRIM_SLEEP_CLK] = &gcc_usb30_prim_sleep_clk.clkr,
+ [GCC_USB30_SEC_MASTER_CLK] = &gcc_usb30_sec_master_clk.clkr,
+ [GCC_USB30_SEC_MASTER_CLK_SRC] = &gcc_usb30_sec_master_clk_src.clkr,
+ [GCC_USB30_SEC_MOCK_UTMI_CLK] = &gcc_usb30_sec_mock_utmi_clk.clkr,
+ [GCC_USB30_SEC_MOCK_UTMI_CLK_SRC] = &gcc_usb30_sec_mock_utmi_clk_src.clkr,
+ [GCC_USB30_SEC_MOCK_UTMI_POSTDIV_CLK_SRC] = &gcc_usb30_sec_mock_utmi_postdiv_clk_src.clkr,
+ [GCC_USB30_SEC_SLEEP_CLK] = &gcc_usb30_sec_sleep_clk.clkr,
+ [GCC_USB30_TERT_MASTER_CLK] = &gcc_usb30_tert_master_clk.clkr,
+ [GCC_USB30_TERT_MASTER_CLK_SRC] = &gcc_usb30_tert_master_clk_src.clkr,
+ [GCC_USB30_TERT_MOCK_UTMI_CLK] = &gcc_usb30_tert_mock_utmi_clk.clkr,
+ [GCC_USB30_TERT_MOCK_UTMI_CLK_SRC] = &gcc_usb30_tert_mock_utmi_clk_src.clkr,
+ [GCC_USB30_TERT_MOCK_UTMI_POSTDIV_CLK_SRC] = &gcc_usb30_tert_mock_utmi_postdiv_clk_src.clkr,
+ [GCC_USB30_TERT_SLEEP_CLK] = &gcc_usb30_tert_sleep_clk.clkr,
+ [GCC_USB34_PRIM_PHY_PIPE_CLK_SRC] = &gcc_usb34_prim_phy_pipe_clk_src.clkr,
+ [GCC_USB34_SEC_PHY_PIPE_CLK_SRC] = &gcc_usb34_sec_phy_pipe_clk_src.clkr,
+ [GCC_USB34_TERT_PHY_PIPE_CLK_SRC] = &gcc_usb34_tert_phy_pipe_clk_src.clkr,
+ [GCC_USB3_MP_PHY_AUX_CLK] = &gcc_usb3_mp_phy_aux_clk.clkr,
+ [GCC_USB3_MP_PHY_AUX_CLK_SRC] = &gcc_usb3_mp_phy_aux_clk_src.clkr,
+ [GCC_USB3_MP_PHY_COM_AUX_CLK] = &gcc_usb3_mp_phy_com_aux_clk.clkr,
+ [GCC_USB3_MP_PHY_PIPE_0_CLK] = &gcc_usb3_mp_phy_pipe_0_clk.clkr,
+ [GCC_USB3_MP_PHY_PIPE_0_CLK_SRC] = &gcc_usb3_mp_phy_pipe_0_clk_src.clkr,
+ [GCC_USB3_MP_PHY_PIPE_1_CLK] = &gcc_usb3_mp_phy_pipe_1_clk.clkr,
+ [GCC_USB3_MP_PHY_PIPE_1_CLK_SRC] = &gcc_usb3_mp_phy_pipe_1_clk_src.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK] = &gcc_usb3_prim_phy_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK_SRC] = &gcc_usb3_prim_phy_aux_clk_src.clkr,
+ [GCC_USB3_PRIM_PHY_COM_AUX_CLK] = &gcc_usb3_prim_phy_com_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK] = &gcc_usb3_prim_phy_pipe_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK_SRC] = &gcc_usb3_prim_phy_pipe_clk_src.clkr,
+ [GCC_USB3_SEC_PHY_AUX_CLK] = &gcc_usb3_sec_phy_aux_clk.clkr,
+ [GCC_USB3_SEC_PHY_AUX_CLK_SRC] = &gcc_usb3_sec_phy_aux_clk_src.clkr,
+ [GCC_USB3_SEC_PHY_COM_AUX_CLK] = &gcc_usb3_sec_phy_com_aux_clk.clkr,
+ [GCC_USB3_SEC_PHY_PIPE_CLK] = &gcc_usb3_sec_phy_pipe_clk.clkr,
+ [GCC_USB3_SEC_PHY_PIPE_CLK_SRC] = &gcc_usb3_sec_phy_pipe_clk_src.clkr,
+ [GCC_USB3_TERT_PHY_AUX_CLK] = &gcc_usb3_tert_phy_aux_clk.clkr,
+ [GCC_USB3_TERT_PHY_AUX_CLK_SRC] = &gcc_usb3_tert_phy_aux_clk_src.clkr,
+ [GCC_USB3_TERT_PHY_COM_AUX_CLK] = &gcc_usb3_tert_phy_com_aux_clk.clkr,
+ [GCC_USB3_TERT_PHY_PIPE_CLK] = &gcc_usb3_tert_phy_pipe_clk.clkr,
+ [GCC_USB3_TERT_PHY_PIPE_CLK_SRC] = &gcc_usb3_tert_phy_pipe_clk_src.clkr,
+ [GCC_USB4_0_CFG_AHB_CLK] = &gcc_usb4_0_cfg_ahb_clk.clkr,
+ [GCC_USB4_0_DP0_CLK] = &gcc_usb4_0_dp0_clk.clkr,
+ [GCC_USB4_0_DP1_CLK] = &gcc_usb4_0_dp1_clk.clkr,
+ [GCC_USB4_0_MASTER_CLK] = &gcc_usb4_0_master_clk.clkr,
+ [GCC_USB4_0_MASTER_CLK_SRC] = &gcc_usb4_0_master_clk_src.clkr,
+ [GCC_USB4_0_PHY_DP0_CLK_SRC] = &gcc_usb4_0_phy_dp0_clk_src.clkr,
+ [GCC_USB4_0_PHY_DP1_CLK_SRC] = &gcc_usb4_0_phy_dp1_clk_src.clkr,
+ [GCC_USB4_0_PHY_P2RR2P_PIPE_CLK] = &gcc_usb4_0_phy_p2rr2p_pipe_clk.clkr,
+ [GCC_USB4_0_PHY_P2RR2P_PIPE_CLK_SRC] = &gcc_usb4_0_phy_p2rr2p_pipe_clk_src.clkr,
+ [GCC_USB4_0_PHY_PCIE_PIPE_CLK] = &gcc_usb4_0_phy_pcie_pipe_clk.clkr,
+ [GCC_USB4_0_PHY_PCIE_PIPE_CLK_SRC] = &gcc_usb4_0_phy_pcie_pipe_clk_src.clkr,
+ [GCC_USB4_0_PHY_PCIE_PIPE_MUX_CLK_SRC] = &gcc_usb4_0_phy_pcie_pipe_mux_clk_src.clkr,
+ [GCC_USB4_0_PHY_RX0_CLK] = &gcc_usb4_0_phy_rx0_clk.clkr,
+ [GCC_USB4_0_PHY_RX0_CLK_SRC] = &gcc_usb4_0_phy_rx0_clk_src.clkr,
+ [GCC_USB4_0_PHY_RX1_CLK] = &gcc_usb4_0_phy_rx1_clk.clkr,
+ [GCC_USB4_0_PHY_RX1_CLK_SRC] = &gcc_usb4_0_phy_rx1_clk_src.clkr,
+ [GCC_USB4_0_PHY_SYS_CLK_SRC] = &gcc_usb4_0_phy_sys_clk_src.clkr,
+ [GCC_USB4_0_PHY_USB_PIPE_CLK] = &gcc_usb4_0_phy_usb_pipe_clk.clkr,
+ [GCC_USB4_0_SB_IF_CLK] = &gcc_usb4_0_sb_if_clk.clkr,
+ [GCC_USB4_0_SB_IF_CLK_SRC] = &gcc_usb4_0_sb_if_clk_src.clkr,
+ [GCC_USB4_0_SYS_CLK] = &gcc_usb4_0_sys_clk.clkr,
+ [GCC_USB4_0_TMU_CLK] = &gcc_usb4_0_tmu_clk.clkr,
+ [GCC_USB4_0_TMU_CLK_SRC] = &gcc_usb4_0_tmu_clk_src.clkr,
+ [GCC_USB4_0_UC_HRR_CLK] = &gcc_usb4_0_uc_hrr_clk.clkr,
+ [GCC_USB4_1_CFG_AHB_CLK] = &gcc_usb4_1_cfg_ahb_clk.clkr,
+ [GCC_USB4_1_DP0_CLK] = &gcc_usb4_1_dp0_clk.clkr,
+ [GCC_USB4_1_DP1_CLK] = &gcc_usb4_1_dp1_clk.clkr,
+ [GCC_USB4_1_MASTER_CLK] = &gcc_usb4_1_master_clk.clkr,
+ [GCC_USB4_1_MASTER_CLK_SRC] = &gcc_usb4_1_master_clk_src.clkr,
+ [GCC_USB4_1_PHY_DP0_CLK_SRC] = &gcc_usb4_1_phy_dp0_clk_src.clkr,
+ [GCC_USB4_1_PHY_DP1_CLK_SRC] = &gcc_usb4_1_phy_dp1_clk_src.clkr,
+ [GCC_USB4_1_PHY_P2RR2P_PIPE_CLK] = &gcc_usb4_1_phy_p2rr2p_pipe_clk.clkr,
+ [GCC_USB4_1_PHY_P2RR2P_PIPE_CLK_SRC] = &gcc_usb4_1_phy_p2rr2p_pipe_clk_src.clkr,
+ [GCC_USB4_1_PHY_PCIE_PIPE_CLK] = &gcc_usb4_1_phy_pcie_pipe_clk.clkr,
+ [GCC_USB4_1_PHY_PCIE_PIPE_CLK_SRC] = &gcc_usb4_1_phy_pcie_pipe_clk_src.clkr,
+ [GCC_USB4_1_PHY_PCIE_PIPE_MUX_CLK_SRC] = &gcc_usb4_1_phy_pcie_pipe_mux_clk_src.clkr,
+ [GCC_USB4_1_PHY_PLL_PIPE_CLK_SRC] = &gcc_usb4_1_phy_pll_pipe_clk_src.clkr,
+ [GCC_USB4_1_PHY_RX0_CLK] = &gcc_usb4_1_phy_rx0_clk.clkr,
+ [GCC_USB4_1_PHY_RX0_CLK_SRC] = &gcc_usb4_1_phy_rx0_clk_src.clkr,
+ [GCC_USB4_1_PHY_RX1_CLK] = &gcc_usb4_1_phy_rx1_clk.clkr,
+ [GCC_USB4_1_PHY_RX1_CLK_SRC] = &gcc_usb4_1_phy_rx1_clk_src.clkr,
+ [GCC_USB4_1_PHY_SYS_CLK_SRC] = &gcc_usb4_1_phy_sys_clk_src.clkr,
+ [GCC_USB4_1_PHY_USB_PIPE_CLK] = &gcc_usb4_1_phy_usb_pipe_clk.clkr,
+ [GCC_USB4_1_SB_IF_CLK] = &gcc_usb4_1_sb_if_clk.clkr,
+ [GCC_USB4_1_SB_IF_CLK_SRC] = &gcc_usb4_1_sb_if_clk_src.clkr,
+ [GCC_USB4_1_SYS_CLK] = &gcc_usb4_1_sys_clk.clkr,
+ [GCC_USB4_1_TMU_CLK] = &gcc_usb4_1_tmu_clk.clkr,
+ [GCC_USB4_1_TMU_CLK_SRC] = &gcc_usb4_1_tmu_clk_src.clkr,
+ [GCC_USB4_1_UC_HRR_CLK] = &gcc_usb4_1_uc_hrr_clk.clkr,
+ [GCC_USB4_2_CFG_AHB_CLK] = &gcc_usb4_2_cfg_ahb_clk.clkr,
+ [GCC_USB4_2_DP0_CLK] = &gcc_usb4_2_dp0_clk.clkr,
+ [GCC_USB4_2_DP1_CLK] = &gcc_usb4_2_dp1_clk.clkr,
+ [GCC_USB4_2_MASTER_CLK] = &gcc_usb4_2_master_clk.clkr,
+ [GCC_USB4_2_MASTER_CLK_SRC] = &gcc_usb4_2_master_clk_src.clkr,
+ [GCC_USB4_2_PHY_DP0_CLK_SRC] = &gcc_usb4_2_phy_dp0_clk_src.clkr,
+ [GCC_USB4_2_PHY_DP1_CLK_SRC] = &gcc_usb4_2_phy_dp1_clk_src.clkr,
+ [GCC_USB4_2_PHY_P2RR2P_PIPE_CLK] = &gcc_usb4_2_phy_p2rr2p_pipe_clk.clkr,
+ [GCC_USB4_2_PHY_P2RR2P_PIPE_CLK_SRC] = &gcc_usb4_2_phy_p2rr2p_pipe_clk_src.clkr,
+ [GCC_USB4_2_PHY_PCIE_PIPE_CLK] = &gcc_usb4_2_phy_pcie_pipe_clk.clkr,
+ [GCC_USB4_2_PHY_PCIE_PIPE_CLK_SRC] = &gcc_usb4_2_phy_pcie_pipe_clk_src.clkr,
+ [GCC_USB4_2_PHY_PCIE_PIPE_MUX_CLK_SRC] = &gcc_usb4_2_phy_pcie_pipe_mux_clk_src.clkr,
+ [GCC_USB4_2_PHY_RX0_CLK] = &gcc_usb4_2_phy_rx0_clk.clkr,
+ [GCC_USB4_2_PHY_RX0_CLK_SRC] = &gcc_usb4_2_phy_rx0_clk_src.clkr,
+ [GCC_USB4_2_PHY_RX1_CLK] = &gcc_usb4_2_phy_rx1_clk.clkr,
+ [GCC_USB4_2_PHY_RX1_CLK_SRC] = &gcc_usb4_2_phy_rx1_clk_src.clkr,
+ [GCC_USB4_2_PHY_SYS_CLK_SRC] = &gcc_usb4_2_phy_sys_clk_src.clkr,
+ [GCC_USB4_2_PHY_USB_PIPE_CLK] = &gcc_usb4_2_phy_usb_pipe_clk.clkr,
+ [GCC_USB4_2_SB_IF_CLK] = &gcc_usb4_2_sb_if_clk.clkr,
+ [GCC_USB4_2_SB_IF_CLK_SRC] = &gcc_usb4_2_sb_if_clk_src.clkr,
+ [GCC_USB4_2_SYS_CLK] = &gcc_usb4_2_sys_clk.clkr,
+ [GCC_USB4_2_TMU_CLK] = &gcc_usb4_2_tmu_clk.clkr,
+ [GCC_USB4_2_TMU_CLK_SRC] = &gcc_usb4_2_tmu_clk_src.clkr,
+ [GCC_USB4_2_UC_HRR_CLK] = &gcc_usb4_2_uc_hrr_clk.clkr,
+ [GCC_VIDEO_AXI0_CLK] = &gcc_video_axi0_clk.clkr,
+ [GCC_VIDEO_AXI0C_CLK] = &gcc_video_axi0c_clk.clkr,
+ [GCC_VIDEO_AXI1_CLK] = &gcc_video_axi1_clk.clkr,
+};
+
+static struct gdsc *gcc_glymur_gdscs[] = {
+ [GCC_PCIE_0_TUNNEL_GDSC] = &gcc_pcie_0_tunnel_gdsc,
+ [GCC_PCIE_1_TUNNEL_GDSC] = &gcc_pcie_1_tunnel_gdsc,
+ [GCC_PCIE_2_TUNNEL_GDSC] = &gcc_pcie_2_tunnel_gdsc,
+ [GCC_PCIE_3A_GDSC] = &gcc_pcie_3a_gdsc,
+ [GCC_PCIE_3A_PHY_GDSC] = &gcc_pcie_3a_phy_gdsc,
+ [GCC_PCIE_3B_GDSC] = &gcc_pcie_3b_gdsc,
+ [GCC_PCIE_3B_PHY_GDSC] = &gcc_pcie_3b_phy_gdsc,
+ [GCC_PCIE_4_GDSC] = &gcc_pcie_4_gdsc,
+ [GCC_PCIE_4_PHY_GDSC] = &gcc_pcie_4_phy_gdsc,
+ [GCC_PCIE_5_GDSC] = &gcc_pcie_5_gdsc,
+ [GCC_PCIE_5_PHY_GDSC] = &gcc_pcie_5_phy_gdsc,
+ [GCC_PCIE_6_GDSC] = &gcc_pcie_6_gdsc,
+ [GCC_PCIE_6_PHY_GDSC] = &gcc_pcie_6_phy_gdsc,
+ [GCC_UFS_PHY_GDSC] = &gcc_ufs_phy_gdsc,
+ [GCC_USB20_PRIM_GDSC] = &gcc_usb20_prim_gdsc,
+ [GCC_USB30_MP_GDSC] = &gcc_usb30_mp_gdsc,
+ [GCC_USB30_PRIM_GDSC] = &gcc_usb30_prim_gdsc,
+ [GCC_USB30_SEC_GDSC] = &gcc_usb30_sec_gdsc,
+ [GCC_USB30_TERT_GDSC] = &gcc_usb30_tert_gdsc,
+ [GCC_USB3_MP_SS0_PHY_GDSC] = &gcc_usb3_mp_ss0_phy_gdsc,
+ [GCC_USB3_MP_SS1_PHY_GDSC] = &gcc_usb3_mp_ss1_phy_gdsc,
+ [GCC_USB4_0_GDSC] = &gcc_usb4_0_gdsc,
+ [GCC_USB4_1_GDSC] = &gcc_usb4_1_gdsc,
+ [GCC_USB4_2_GDSC] = &gcc_usb4_2_gdsc,
+ [GCC_USB_0_PHY_GDSC] = &gcc_usb_0_phy_gdsc,
+ [GCC_USB_1_PHY_GDSC] = &gcc_usb_1_phy_gdsc,
+ [GCC_USB_2_PHY_GDSC] = &gcc_usb_2_phy_gdsc,
+};
+
+static const struct qcom_reset_map gcc_glymur_resets[] = {
+ [GCC_AV1E_BCR] = { 0x9b028 },
+ [GCC_CAMERA_BCR] = { 0x26000 },
+ [GCC_DISPLAY_BCR] = { 0x27000 },
+ [GCC_EVA_BCR] = { 0x9b000 },
+ [GCC_GPU_BCR] = { 0x71000 },
+ [GCC_PCIE_0_LINK_DOWN_BCR] = { 0xbc2d0 },
+ [GCC_PCIE_0_NOCSR_COM_PHY_BCR] = { 0xbc2dc },
+ [GCC_PCIE_0_PHY_BCR] = { 0xbc2d8 },
+ [GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR] = { 0xbc2e0 },
+ [GCC_PCIE_0_TUNNEL_BCR] = { 0xc8000 },
+ [GCC_PCIE_1_LINK_DOWN_BCR] = { 0x7f018 },
+ [GCC_PCIE_1_NOCSR_COM_PHY_BCR] = { 0x7f024 },
+ [GCC_PCIE_1_PHY_BCR] = { 0x7f020 },
+ [GCC_PCIE_1_PHY_NOCSR_COM_PHY_BCR] = { 0x7f028 },
+ [GCC_PCIE_1_TUNNEL_BCR] = { 0x2e000 },
+ [GCC_PCIE_2_LINK_DOWN_BCR] = { 0x281d0 },
+ [GCC_PCIE_2_NOCSR_COM_PHY_BCR] = { 0x281dc },
+ [GCC_PCIE_2_PHY_BCR] = { 0x281d8 },
+ [GCC_PCIE_2_PHY_NOCSR_COM_PHY_BCR] = { 0x281e0 },
+ [GCC_PCIE_2_TUNNEL_BCR] = { 0xc0000 },
+ [GCC_PCIE_3A_BCR] = { 0xdc000 },
+ [GCC_PCIE_3A_LINK_DOWN_BCR] = { 0x7b0a0 },
+ [GCC_PCIE_3A_NOCSR_COM_PHY_BCR] = { 0x7b0ac },
+ [GCC_PCIE_3A_PHY_BCR] = { 0x6c000 },
+ [GCC_PCIE_3A_PHY_NOCSR_COM_PHY_BCR] = { 0x7b0b0 },
+ [GCC_PCIE_3B_BCR] = { 0x94000 },
+ [GCC_PCIE_3B_LINK_DOWN_BCR] = { 0x7a0c0 },
+ [GCC_PCIE_3B_NOCSR_COM_PHY_BCR] = { 0x7a0cc },
+ [GCC_PCIE_3B_PHY_BCR] = { 0x75000 },
+ [GCC_PCIE_3B_PHY_NOCSR_COM_PHY_BCR] = { 0x7a0c8 },
+ [GCC_PCIE_4_BCR] = { 0x88000 },
+ [GCC_PCIE_4_LINK_DOWN_BCR] = { 0x980c0 },
+ [GCC_PCIE_4_NOCSR_COM_PHY_BCR] = { 0x980cc },
+ [GCC_PCIE_4_PHY_BCR] = { 0xd3000 },
+ [GCC_PCIE_4_PHY_NOCSR_COM_PHY_BCR] = { 0x980d0 },
+ [GCC_PCIE_5_BCR] = { 0xc3000 },
+ [GCC_PCIE_5_LINK_DOWN_BCR] = { 0x850c0 },
+ [GCC_PCIE_5_NOCSR_COM_PHY_BCR] = { 0x850cc },
+ [GCC_PCIE_5_PHY_BCR] = { 0xd2000 },
+ [GCC_PCIE_5_PHY_NOCSR_COM_PHY_BCR] = { 0x850d0 },
+ [GCC_PCIE_6_BCR] = { 0x8a000 },
+ [GCC_PCIE_6_LINK_DOWN_BCR] = { 0x3a0b0 },
+ [GCC_PCIE_6_NOCSR_COM_PHY_BCR] = { 0x3a0bc },
+ [GCC_PCIE_6_PHY_BCR] = { 0xd4000 },
+ [GCC_PCIE_6_PHY_NOCSR_COM_PHY_BCR] = { 0x3a0c0 },
+ [GCC_PCIE_NOC_BCR] = { 0xba294 },
+ [GCC_PCIE_PHY_BCR] = { 0x6f000 },
+ [GCC_PCIE_PHY_CFG_AHB_BCR] = { 0x7f00c },
+ [GCC_PCIE_PHY_COM_BCR] = { 0x7f010 },
+ [GCC_PCIE_RSCC_BCR] = { 0xb8000 },
+ [GCC_PDM_BCR] = { 0x33000 },
+ [GCC_QUPV3_WRAPPER_0_BCR] = { 0x28000 },
+ [GCC_QUPV3_WRAPPER_1_BCR] = { 0xb3000 },
+ [GCC_QUPV3_WRAPPER_2_BCR] = { 0xb4000 },
+ [GCC_QUPV3_WRAPPER_OOB_BCR] = { 0xe7000 },
+ [GCC_QUSB2PHY_HS0_MP_BCR] = { 0xca000 },
+ [GCC_QUSB2PHY_HS1_MP_BCR] = { 0xe6000 },
+ [GCC_QUSB2PHY_PRIM_BCR] = { 0xad024 },
+ [GCC_QUSB2PHY_SEC_BCR] = { 0xae000 },
+ [GCC_QUSB2PHY_TERT_BCR] = { 0xc9000 },
+ [GCC_QUSB2PHY_USB20_HS_BCR] = { 0xe9000 },
+ [GCC_SDCC2_BCR] = { 0xb0000 },
+ [GCC_SDCC4_BCR] = { 0xdf000 },
+ [GCC_TCSR_PCIE_BCR] = { 0x281e4 },
+ [GCC_UFS_PHY_BCR] = { 0x77004 },
+ [GCC_USB20_PRIM_BCR] = { 0xbc000 },
+ [GCC_USB30_MP_BCR] = { 0x9a00c },
+ [GCC_USB30_PRIM_BCR] = { 0x3f018 },
+ [GCC_USB30_SEC_BCR] = { 0xe200c },
+ [GCC_USB30_TERT_BCR] = { 0xe100c },
+ [GCC_USB3_MP_SS0_PHY_BCR] = { 0x54008 },
+ [GCC_USB3_MP_SS1_PHY_BCR] = { 0x54028 },
+ [GCC_USB3_PHY_PRIM_BCR] = { 0xdb000 },
+ [GCC_USB3_PHY_SEC_BCR] = { 0x2c000 },
+ [GCC_USB3_PHY_TERT_BCR] = { 0xbe000 },
+ [GCC_USB3_UNIPHY_MP0_BCR] = { 0x54000 },
+ [GCC_USB3_UNIPHY_MP1_BCR] = { 0x54020 },
+ [GCC_USB3PHY_PHY_PRIM_BCR] = { 0xdb004 },
+ [GCC_USB3PHY_PHY_SEC_BCR] = { 0x2c004 },
+ [GCC_USB3PHY_PHY_TERT_BCR] = { 0xbe004 },
+ [GCC_USB3UNIPHY_PHY_MP0_BCR] = { 0x54004 },
+ [GCC_USB3UNIPHY_PHY_MP1_BCR] = { 0x54024 },
+ [GCC_USB4_0_BCR] = { 0x2b004 },
+ [GCC_USB4_0_DP0_PHY_PRIM_BCR] = { 0xdb010 },
+ [GCC_USB4_1_BCR] = { 0x2d004 },
+ [GCC_USB4_2_BCR] = { 0xe0004 },
+ [GCC_USB_0_PHY_BCR] = { 0xdb020 },
+ [GCC_USB_1_PHY_BCR] = { 0x2c020 },
+ [GCC_USB_2_PHY_BCR] = { 0xbe020 },
+ [GCC_VIDEO_AXI0_CLK_ARES] = { 0x3201c, 2 },
+ [GCC_VIDEO_AXI1_CLK_ARES] = { 0x32044, 2 },
+ [GCC_VIDEO_BCR] = { 0x32000 },
+};
+
+static const struct clk_rcg_dfs_data gcc_dfs_clocks[] = {
+ DEFINE_RCG_DFS(gcc_qupv3_oob_qspi_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_oob_qspi_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_qspi_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_qspi_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_qspi_s6_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s7_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_qspi_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_qspi_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_qspi_s6_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s7_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_qspi_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_qspi_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_qspi_s6_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s7_clk_src),
+};
+
+static u32 gcc_glymur_critical_cbcrs[] = {
+ 0x26004, /* GCC_CAMERA_AHB_CLK */
+ 0x26040, /* GCC_CAMERA_XO_CLK */
+ 0x27004, /* GCC_DISP_AHB_CLK */
+ 0x71004, /* GCC_GPU_CFG_AHB_CLK */
+ 0x32004, /* GCC_VIDEO_AHB_CLK */
+ 0x32058, /* GCC_VIDEO_XO_CLK */
+};
+
+static const struct regmap_config gcc_glymur_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x1f8ff0,
+ .fast_io = true,
+};
+
+static void clk_glymur_regs_configure(struct device *dev, struct regmap *regmap)
+{
+ /* FORCE_MEM_CORE_ON for ufs phy ice core clocks */
+ qcom_branch_set_force_mem_core(regmap, gcc_ufs_phy_ice_core_clk, true);
+}
+
+static struct qcom_cc_driver_data gcc_glymur_driver_data = {
+ .clk_cbcrs = gcc_glymur_critical_cbcrs,
+ .num_clk_cbcrs = ARRAY_SIZE(gcc_glymur_critical_cbcrs),
+ .dfs_rcgs = gcc_dfs_clocks,
+ .num_dfs_rcgs = ARRAY_SIZE(gcc_dfs_clocks),
+ .clk_regs_configure = clk_glymur_regs_configure,
+};
+
+static const struct qcom_cc_desc gcc_glymur_desc = {
+ .config = &gcc_glymur_regmap_config,
+ .clks = gcc_glymur_clocks,
+ .num_clks = ARRAY_SIZE(gcc_glymur_clocks),
+ .resets = gcc_glymur_resets,
+ .num_resets = ARRAY_SIZE(gcc_glymur_resets),
+ .gdscs = gcc_glymur_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_glymur_gdscs),
+ .driver_data = &gcc_glymur_driver_data,
+};
+
+static const struct of_device_id gcc_glymur_match_table[] = {
+ { .compatible = "qcom,glymur-gcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_glymur_match_table);
+
+static int gcc_glymur_probe(struct platform_device *pdev)
+{
+ return qcom_cc_probe(pdev, &gcc_glymur_desc);
+}
+
+static struct platform_driver gcc_glymur_driver = {
+ .probe = gcc_glymur_probe,
+ .driver = {
+ .name = "gcc-glymur",
+ .of_match_table = gcc_glymur_match_table,
+ },
+};
+
+static int __init gcc_glymur_init(void)
+{
+ return platform_driver_register(&gcc_glymur_driver);
+}
+subsys_initcall(gcc_glymur_init);
+
+static void __exit gcc_glymur_exit(void)
+{
+ platform_driver_unregister(&gcc_glymur_driver);
+}
+module_exit(gcc_glymur_exit);
+
+MODULE_DESCRIPTION("QTI GCC GLYMUR Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/gcc-ipq4019.c b/drivers/clk/qcom/gcc-ipq4019.c
index d38628b52268..5ac44cfb53ce 100644
--- a/drivers/clk/qcom/gcc-ipq4019.c
+++ b/drivers/clk/qcom/gcc-ipq4019.c
@@ -125,21 +125,23 @@ static const struct clk_fepll_vco gcc_fepll_vco = {
* It looks up the frequency table and returns the next higher frequency
* supported in hardware.
*/
-static long clk_cpu_div_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *p_rate)
+static int clk_cpu_div_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_fepll *pll = to_clk_fepll(hw);
struct clk_hw *p_hw;
const struct freq_tbl *f;
- f = qcom_find_freq(pll->freq_tbl, rate);
+ f = qcom_find_freq(pll->freq_tbl, req->rate);
if (!f)
return -EINVAL;
p_hw = clk_hw_get_parent_by_index(hw, f->src);
- *p_rate = clk_hw_get_rate(p_hw);
+ req->best_parent_rate = clk_hw_get_rate(p_hw);
+
+ req->rate = f->freq;
- return f->freq;
+ return 0;
};
/*
@@ -205,7 +207,7 @@ clk_cpu_div_recalc_rate(struct clk_hw *hw,
};
static const struct clk_ops clk_regmap_cpu_div_ops = {
- .round_rate = clk_cpu_div_round_rate,
+ .determine_rate = clk_cpu_div_determine_rate,
.set_rate = clk_cpu_div_set_rate,
.recalc_rate = clk_cpu_div_recalc_rate,
};
diff --git a/drivers/clk/qcom/gcc-ipq5018.c b/drivers/clk/qcom/gcc-ipq5018.c
index 70f5dcb96700..dcda2be8c1a5 100644
--- a/drivers/clk/qcom/gcc-ipq5018.c
+++ b/drivers/clk/qcom/gcc-ipq5018.c
@@ -1371,7 +1371,7 @@ static struct clk_branch gcc_xo_clk = {
&gcc_xo_clk_src.clkr.hw,
},
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
+ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
},
},
@@ -3660,7 +3660,7 @@ static const struct qcom_reset_map gcc_ipq5018_resets[] = {
[GCC_WCSS_AXI_S_ARES] = { 0x59008, 6 },
[GCC_WCSS_Q6_BCR] = { 0x18004, 0 },
[GCC_WCSSAON_RESET] = { 0x59010, 0},
- [GCC_GEPHY_MISC_ARES] = { 0x56004, 0 },
+ [GCC_GEPHY_MISC_ARES] = { 0x56004, .bitmask = GENMASK(3, 0) },
};
static const struct of_device_id gcc_ipq5018_match_table[] = {
diff --git a/drivers/clk/qcom/gcc-ipq5424.c b/drivers/clk/qcom/gcc-ipq5424.c
index 3d42f3d85c7a..35af6ffeeb85 100644
--- a/drivers/clk/qcom/gcc-ipq5424.c
+++ b/drivers/clk/qcom/gcc-ipq5424.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2018,2020 The Linux Foundation. All rights reserved.
- * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#include <linux/clk-provider.h>
@@ -79,6 +79,20 @@ static struct clk_fixed_factor gpll0_div2 = {
},
};
+static struct clk_alpha_pll_postdiv gpll0_out_aux = {
+ .offset = 0x20000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpll0_out_aux",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gpll0.clkr.hw
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+ },
+};
+
static struct clk_alpha_pll gpll2 = {
.offset = 0x21000,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_NSS_HUAYRA],
@@ -2934,6 +2948,7 @@ static struct clk_regmap *gcc_ipq5424_clocks[] = {
[GPLL2] = &gpll2.clkr,
[GPLL2_OUT_MAIN] = &gpll2_out_main.clkr,
[GPLL4] = &gpll4.clkr,
+ [GPLL0_OUT_AUX] = &gpll0_out_aux.clkr,
};
static const struct qcom_reset_map gcc_ipq5424_resets[] = {
@@ -3250,6 +3265,16 @@ static const struct qcom_icc_hws_data icc_ipq5424_hws[] = {
{ MASTER_ANOC_PCIE3, SLAVE_ANOC_PCIE3, GCC_ANOC_PCIE3_2LANE_M_CLK },
{ MASTER_CNOC_PCIE3, SLAVE_CNOC_PCIE3, GCC_CNOC_PCIE3_2LANE_S_CLK },
{ MASTER_CNOC_USB, SLAVE_CNOC_USB, GCC_CNOC_USB_CLK },
+ { MASTER_NSSNOC_NSSCC, SLAVE_NSSNOC_NSSCC, GCC_NSSNOC_NSSCC_CLK },
+ { MASTER_NSSNOC_SNOC_0, SLAVE_NSSNOC_SNOC_0, GCC_NSSNOC_SNOC_CLK },
+ { MASTER_NSSNOC_SNOC_1, SLAVE_NSSNOC_SNOC_1, GCC_NSSNOC_SNOC_1_CLK },
+ { MASTER_NSSNOC_PCNOC_1, SLAVE_NSSNOC_PCNOC_1, GCC_NSSNOC_PCNOC_1_CLK },
+ { MASTER_NSSNOC_QOSGEN_REF, SLAVE_NSSNOC_QOSGEN_REF, GCC_NSSNOC_QOSGEN_REF_CLK },
+ { MASTER_NSSNOC_TIMEOUT_REF, SLAVE_NSSNOC_TIMEOUT_REF, GCC_NSSNOC_TIMEOUT_REF_CLK },
+ { MASTER_NSSNOC_XO_DCD, SLAVE_NSSNOC_XO_DCD, GCC_NSSNOC_XO_DCD_CLK },
+ { MASTER_NSSNOC_ATB, SLAVE_NSSNOC_ATB, GCC_NSSNOC_ATB_CLK },
+ { MASTER_CNOC_LPASS_CFG, SLAVE_CNOC_LPASS_CFG, GCC_CNOC_LPASS_CFG_CLK },
+ { MASTER_SNOC_LPASS, SLAVE_SNOC_LPASS, GCC_SNOC_LPASS_CLK },
};
static const struct of_device_id gcc_ipq5424_match_table[] = {
@@ -3284,6 +3309,7 @@ static const struct qcom_cc_desc gcc_ipq5424_desc = {
.num_clk_hws = ARRAY_SIZE(gcc_ipq5424_hws),
.icc_hws = icc_ipq5424_hws,
.num_icc_hws = ARRAY_SIZE(icc_ipq5424_hws),
+ .icc_first_node_id = IPQ_APPS_ID,
};
static int gcc_ipq5424_probe(struct platform_device *pdev)
diff --git a/drivers/clk/qcom/gcc-ipq6018.c b/drivers/clk/qcom/gcc-ipq6018.c
index d861191b0c85..d4fc491a18b2 100644
--- a/drivers/clk/qcom/gcc-ipq6018.c
+++ b/drivers/clk/qcom/gcc-ipq6018.c
@@ -511,15 +511,23 @@ static struct clk_rcg2 apss_ahb_clk_src = {
},
};
-static const struct freq_tbl ftbl_nss_port5_rx_clk_src[] = {
- F(24000000, P_XO, 1, 0, 0),
- F(25000000, P_UNIPHY1_RX, 12.5, 0, 0),
- F(25000000, P_UNIPHY0_RX, 5, 0, 0),
- F(78125000, P_UNIPHY1_RX, 4, 0, 0),
- F(125000000, P_UNIPHY1_RX, 2.5, 0, 0),
- F(125000000, P_UNIPHY0_RX, 1, 0, 0),
- F(156250000, P_UNIPHY1_RX, 2, 0, 0),
- F(312500000, P_UNIPHY1_RX, 1, 0, 0),
+static const struct freq_conf ftbl_nss_port5_rx_clk_src_25[] = {
+ C(P_UNIPHY1_RX, 12.5, 0, 0),
+ C(P_UNIPHY0_RX, 5, 0, 0),
+};
+
+static const struct freq_conf ftbl_nss_port5_rx_clk_src_125[] = {
+ C(P_UNIPHY1_RX, 2.5, 0, 0),
+ C(P_UNIPHY0_RX, 1, 0, 0),
+};
+
+static const struct freq_multi_tbl ftbl_nss_port5_rx_clk_src[] = {
+ FMS(24000000, P_XO, 1, 0, 0),
+ FM(25000000, ftbl_nss_port5_rx_clk_src_25),
+ FMS(78125000, P_UNIPHY1_RX, 4, 0, 0),
+ FM(125000000, ftbl_nss_port5_rx_clk_src_125),
+ FMS(156250000, P_UNIPHY1_RX, 2, 0, 0),
+ FMS(312500000, P_UNIPHY1_RX, 1, 0, 0),
{ }
};
@@ -547,26 +555,34 @@ gcc_xo_uniphy0_rx_tx_uniphy1_rx_tx_ubi32_bias_map[] = {
static struct clk_rcg2 nss_port5_rx_clk_src = {
.cmd_rcgr = 0x68060,
- .freq_tbl = ftbl_nss_port5_rx_clk_src,
+ .freq_multi_tbl = ftbl_nss_port5_rx_clk_src,
.hid_width = 5,
.parent_map = gcc_xo_uniphy0_rx_tx_uniphy1_rx_tx_ubi32_bias_map,
.clkr.hw.init = &(struct clk_init_data){
.name = "nss_port5_rx_clk_src",
.parent_data = gcc_xo_uniphy0_rx_tx_uniphy1_rx_tx_ubi32_bias,
.num_parents = 7,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_fm_ops,
},
};
-static const struct freq_tbl ftbl_nss_port5_tx_clk_src[] = {
- F(24000000, P_XO, 1, 0, 0),
- F(25000000, P_UNIPHY1_TX, 12.5, 0, 0),
- F(25000000, P_UNIPHY0_TX, 5, 0, 0),
- F(78125000, P_UNIPHY1_TX, 4, 0, 0),
- F(125000000, P_UNIPHY1_TX, 2.5, 0, 0),
- F(125000000, P_UNIPHY0_TX, 1, 0, 0),
- F(156250000, P_UNIPHY1_TX, 2, 0, 0),
- F(312500000, P_UNIPHY1_TX, 1, 0, 0),
+static const struct freq_conf ftbl_nss_port5_tx_clk_src_25[] = {
+ C(P_UNIPHY1_TX, 12.5, 0, 0),
+ C(P_UNIPHY0_TX, 5, 0, 0),
+};
+
+static const struct freq_conf ftbl_nss_port5_tx_clk_src_125[] = {
+ C(P_UNIPHY1_TX, 2.5, 0, 0),
+ C(P_UNIPHY0_TX, 1, 0, 0),
+};
+
+static const struct freq_multi_tbl ftbl_nss_port5_tx_clk_src[] = {
+ FMS(24000000, P_XO, 1, 0, 0),
+ FM(25000000, ftbl_nss_port5_tx_clk_src_25),
+ FMS(78125000, P_UNIPHY1_TX, 4, 0, 0),
+ FM(125000000, ftbl_nss_port5_tx_clk_src_125),
+ FMS(156250000, P_UNIPHY1_TX, 2, 0, 0),
+ FMS(312500000, P_UNIPHY1_TX, 1, 0, 0),
{ }
};
@@ -594,14 +610,14 @@ gcc_xo_uniphy0_tx_rx_uniphy1_tx_rx_ubi32_bias_map[] = {
static struct clk_rcg2 nss_port5_tx_clk_src = {
.cmd_rcgr = 0x68068,
- .freq_tbl = ftbl_nss_port5_tx_clk_src,
+ .freq_multi_tbl = ftbl_nss_port5_tx_clk_src,
.hid_width = 5,
.parent_map = gcc_xo_uniphy0_tx_rx_uniphy1_tx_rx_ubi32_bias_map,
.clkr.hw.init = &(struct clk_init_data){
.name = "nss_port5_tx_clk_src",
.parent_data = gcc_xo_uniphy0_tx_rx_uniphy1_tx_rx_ubi32_bias,
.num_parents = 7,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_fm_ops,
},
};
diff --git a/drivers/clk/qcom/gcc-ipq8074.c b/drivers/clk/qcom/gcc-ipq8074.c
index 7258ba5c0900..1329ea28d703 100644
--- a/drivers/clk/qcom/gcc-ipq8074.c
+++ b/drivers/clk/qcom/gcc-ipq8074.c
@@ -1895,10 +1895,10 @@ static const struct freq_conf ftbl_nss_port6_tx_clk_src_125[] = {
static const struct freq_multi_tbl ftbl_nss_port6_tx_clk_src[] = {
FMS(19200000, P_XO, 1, 0, 0),
FM(25000000, ftbl_nss_port6_tx_clk_src_25),
- FMS(78125000, P_UNIPHY1_RX, 4, 0, 0),
+ FMS(78125000, P_UNIPHY2_TX, 4, 0, 0),
FM(125000000, ftbl_nss_port6_tx_clk_src_125),
- FMS(156250000, P_UNIPHY1_RX, 2, 0, 0),
- FMS(312500000, P_UNIPHY1_RX, 1, 0, 0),
+ FMS(156250000, P_UNIPHY2_TX, 2, 0, 0),
+ FMS(312500000, P_UNIPHY2_TX, 1, 0, 0),
{ }
};
diff --git a/drivers/clk/qcom/gcc-milos.c b/drivers/clk/qcom/gcc-milos.c
new file mode 100644
index 000000000000..c9d61b05bafa
--- /dev/null
+++ b/drivers/clk/qcom/gcc-milos.c
@@ -0,0 +1,3225 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2025, Luca Weiss <luca.weiss@fairphone.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,milos-gcc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "gdsc.h"
+#include "reset.h"
+
+/* Need to match the order of clocks in DT binding */
+enum {
+ DT_BI_TCXO,
+ DT_SLEEP_CLK,
+ DT_PCIE_0_PIPE,
+ DT_PCIE_1_PIPE,
+ DT_UFS_PHY_RX_SYMBOL_0,
+ DT_UFS_PHY_RX_SYMBOL_1,
+ DT_UFS_PHY_TX_SYMBOL_0,
+ DT_USB3_PHY_WRAPPER_GCC_USB30_PIPE,
+};
+
+enum {
+ P_BI_TCXO,
+ P_GCC_GPLL0_OUT_EVEN,
+ P_GCC_GPLL0_OUT_MAIN,
+ P_GCC_GPLL0_OUT_ODD,
+ P_GCC_GPLL2_OUT_MAIN,
+ P_GCC_GPLL4_OUT_MAIN,
+ P_GCC_GPLL6_OUT_MAIN,
+ P_GCC_GPLL7_OUT_MAIN,
+ P_GCC_GPLL9_OUT_MAIN,
+ P_PCIE_0_PIPE_CLK,
+ P_PCIE_1_PIPE_CLK,
+ P_SLEEP_CLK,
+ P_UFS_PHY_RX_SYMBOL_0_CLK,
+ P_UFS_PHY_RX_SYMBOL_1_CLK,
+ P_UFS_PHY_TX_SYMBOL_0_CLK,
+ P_USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK,
+};
+
+static struct clk_alpha_pll gcc_gpll0 = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ole_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gcc_gpll0_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gcc_gpll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_gcc_gpll0_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_gcc_gpll0_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll0_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll2 = {
+ .offset = 0x2000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(2),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll2",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ole_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll4 = {
+ .offset = 0x4000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll4",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ole_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll6 = {
+ .offset = 0x6000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(6),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll6",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ole_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll7 = {
+ .offset = 0x7000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll7",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ole_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll9 = {
+ .offset = 0x9000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(9),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll9",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ole_ops,
+ },
+ },
+};
+
+static const struct parent_map gcc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_SLEEP_CLK, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .index = DT_SLEEP_CLK },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL4_OUT_MAIN, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll4.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_SLEEP_CLK, 5 },
+};
+
+static const struct clk_parent_data gcc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct parent_map gcc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL6_OUT_MAIN, 2 },
+ { P_GCC_GPLL7_OUT_MAIN, 3 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll6.clkr.hw },
+ { .hw = &gcc_gpll7.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data gcc_parent_data_5[] = {
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_6[] = {
+ { P_PCIE_0_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_6[] = {
+ { .index = DT_PCIE_0_PIPE },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_7[] = {
+ { P_PCIE_1_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_7[] = {
+ { .index = DT_PCIE_1_PIPE },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_8[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL7_OUT_MAIN, 2 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_8[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll7.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_9[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL6_OUT_MAIN, 2 },
+ { P_GCC_GPLL0_OUT_ODD, 3 },
+ { P_GCC_GPLL2_OUT_MAIN, 4 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_9[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll6.clkr.hw },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll2.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_10[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL6_OUT_MAIN, 2 },
+ { P_GCC_GPLL0_OUT_ODD, 3 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_10[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll6.clkr.hw },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_11[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL9_OUT_MAIN, 2 },
+ { P_GCC_GPLL4_OUT_MAIN, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_11[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll9.clkr.hw },
+ { .hw = &gcc_gpll4.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_12[] = {
+ { P_UFS_PHY_RX_SYMBOL_0_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_12[] = {
+ { .index = DT_UFS_PHY_RX_SYMBOL_0 },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_13[] = {
+ { P_UFS_PHY_RX_SYMBOL_1_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_13[] = {
+ { .index = DT_UFS_PHY_RX_SYMBOL_1 },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_14[] = {
+ { P_UFS_PHY_TX_SYMBOL_0_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_14[] = {
+ { .index = DT_UFS_PHY_TX_SYMBOL_0 },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_15[] = {
+ { P_USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_15[] = {
+ { .index = DT_USB3_PHY_WRAPPER_GCC_USB30_PIPE },
+ { .index = DT_BI_TCXO },
+};
+
+static struct clk_regmap_mux gcc_pcie_0_pipe_clk_src = {
+ .reg = 0x6b070,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_6,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_pipe_clk_src",
+ .parent_data = gcc_parent_data_6,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_6),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_pcie_1_pipe_clk_src = {
+ .reg = 0x9006c,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_7,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_pipe_clk_src",
+ .parent_data = gcc_parent_data_7,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_7),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_phy_rx_symbol_0_clk_src = {
+ .reg = 0x77064,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_12,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_rx_symbol_0_clk_src",
+ .parent_data = gcc_parent_data_12,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_12),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_phy_rx_symbol_1_clk_src = {
+ .reg = 0x770e0,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_13,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_rx_symbol_1_clk_src",
+ .parent_data = gcc_parent_data_13,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_13),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_phy_tx_symbol_0_clk_src = {
+ .reg = 0x77054,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_14,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_tx_symbol_0_clk_src",
+ .parent_data = gcc_parent_data_14,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_14),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb3_prim_phy_pipe_clk_src = {
+ .reg = 0x3906c,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_15,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_pipe_clk_src",
+ .parent_data = gcc_parent_data_15,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_15),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
+ F(50000000, P_GCC_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GCC_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_gp1_clk_src = {
+ .cmd_rcgr = 0x64004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp1_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp2_clk_src = {
+ .cmd_rcgr = 0x65004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp2_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp3_clk_src = {
+ .cmd_rcgr = 0x66004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp3_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_0_aux_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_0_aux_clk_src = {
+ .cmd_rcgr = 0x6b074,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_aux_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_0_phy_rchng_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_0_phy_rchng_clk_src = {
+ .cmd_rcgr = 0x6b058,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_1_aux_clk_src = {
+ .cmd_rcgr = 0x90070,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_aux_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_1_phy_rchng_clk_src = {
+ .cmd_rcgr = 0x90054,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pdm2_clk_src[] = {
+ F(60000000, P_GCC_GPLL0_OUT_MAIN, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pdm2_clk_src = {
+ .cmd_rcgr = 0x33010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pdm2_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_qspi_ref_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(51200000, P_GCC_GPLL0_OUT_EVEN, 1, 64, 375),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(102400000, P_GCC_GPLL0_OUT_EVEN, 1, 128, 375),
+ F(112000000, P_GCC_GPLL0_OUT_EVEN, 1, 28, 75),
+ F(117964800, P_GCC_GPLL0_OUT_EVEN, 1, 6144, 15625),
+ F(120000000, P_GCC_GPLL0_OUT_MAIN, 5, 0, 0),
+ F(150000000, P_GCC_GPLL0_OUT_EVEN, 2, 0, 0),
+ F(200000000, P_GCC_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_qspi_ref_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_qspi_ref_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_qspi_ref_clk_src = {
+ .cmd_rcgr = 0x18768,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_qspi_ref_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_qspi_ref_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(51200000, P_GCC_GPLL0_OUT_EVEN, 1, 64, 375),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(102400000, P_GCC_GPLL0_OUT_EVEN, 1, 128, 375),
+ F(112000000, P_GCC_GPLL0_OUT_EVEN, 1, 28, 75),
+ F(117964800, P_GCC_GPLL0_OUT_EVEN, 1, 6144, 15625),
+ F(120000000, P_GCC_GPLL0_OUT_MAIN, 5, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
+ .cmd_rcgr = 0x18010,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
+ .cmd_rcgr = 0x18148,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s1_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s3_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(51200000, P_GCC_GPLL0_OUT_EVEN, 1, 64, 375),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s3_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
+ .cmd_rcgr = 0x18290,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s3_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s3_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s4_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(51200000, P_GCC_GPLL0_OUT_EVEN, 1, 64, 375),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(128000000, P_GCC_GPLL6_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s4_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
+ .cmd_rcgr = 0x183c8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s4_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s5_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
+ .cmd_rcgr = 0x18500,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s3_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s6_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s6_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s6_clk_src = {
+ .cmd_rcgr = 0x18638,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s3_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s6_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_qspi_ref_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_qspi_ref_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_qspi_ref_clk_src = {
+ .cmd_rcgr = 0x1e768,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_qspi_ref_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_qspi_ref_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
+ .cmd_rcgr = 0x1e010,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
+ .cmd_rcgr = 0x1e148,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s3_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
+ .cmd_rcgr = 0x1e290,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s3_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s4_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
+ .cmd_rcgr = 0x1e3c8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s4_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s5_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
+ .cmd_rcgr = 0x1e500,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s3_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s6_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s6_clk_src",
+ .parent_data = gcc_parent_data_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_8),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s6_clk_src = {
+ .cmd_rcgr = 0x1e638,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s3_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s6_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc1_apps_clk_src[] = {
+ F(144000, P_BI_TCXO, 16, 3, 25),
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(20000000, P_GCC_GPLL0_OUT_EVEN, 5, 1, 3),
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GCC_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(192000000, P_GCC_GPLL6_OUT_MAIN, 2, 0, 0),
+ F(384000000, P_GCC_GPLL6_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc1_apps_clk_src = {
+ .cmd_rcgr = 0xa3014,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_9,
+ .freq_tbl = ftbl_gcc_sdcc1_apps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc1_apps_clk_src",
+ .parent_data = gcc_parent_data_9,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_9),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc1_ice_core_clk_src[] = {
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(150000000, P_GCC_GPLL0_OUT_EVEN, 2, 0, 0),
+ F(300000000, P_GCC_GPLL0_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc1_ice_core_clk_src = {
+ .cmd_rcgr = 0xa3038,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_10,
+ .freq_tbl = ftbl_gcc_sdcc1_ice_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc1_ice_core_clk_src",
+ .parent_data = gcc_parent_data_10,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_10),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(37500000, P_GCC_GPLL0_OUT_EVEN, 8, 0, 0),
+ F(50000000, P_GCC_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(202000000, P_GCC_GPLL9_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
+ .cmd_rcgr = 0x14018,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_11,
+ .freq_tbl = ftbl_gcc_sdcc2_apps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc2_apps_clk_src",
+ .parent_data = gcc_parent_data_11,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_11),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_axi_clk_src[] = {
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(150000000, P_GCC_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(300000000, P_GCC_GPLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_axi_clk_src = {
+ .cmd_rcgr = 0x77030,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_ufs_phy_axi_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_axi_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_ice_core_clk_src[] = {
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(201500000, P_GCC_GPLL4_OUT_MAIN, 4, 0, 0),
+ F(403000000, P_GCC_GPLL4_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_ice_core_clk_src = {
+ .cmd_rcgr = 0x77080,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_ufs_phy_ice_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_ice_core_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_phy_aux_clk_src[] = {
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_phy_aux_clk_src = {
+ .cmd_rcgr = 0x770b4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_ufs_phy_phy_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_5),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_unipro_core_clk_src[] = {
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(150000000, P_GCC_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(300000000, P_GCC_GPLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_unipro_core_clk_src = {
+ .cmd_rcgr = 0x77098,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_ufs_phy_unipro_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_unipro_core_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_prim_master_clk_src[] = {
+ F(66666667, P_GCC_GPLL0_OUT_EVEN, 4.5, 0, 0),
+ F(133333333, P_GCC_GPLL0_OUT_MAIN, 4.5, 0, 0),
+ F(200000000, P_GCC_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(240000000, P_GCC_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_prim_master_clk_src = {
+ .cmd_rcgr = 0x3902c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_prim_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_master_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x39044,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_prim_phy_aux_clk_src = {
+ .cmd_rcgr = 0x39070,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_pcie_0_pipe_div2_clk_src = {
+ .reg = 0x6b094,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_pipe_div2_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_pcie_1_pipe_div2_clk_src = {
+ .reg = 0x90090,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_pipe_div2_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_1_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_qupv3_wrap0_s2_clk_src = {
+ .reg = 0x18280,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s2_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_qspi_ref_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_qupv3_wrap1_s2_clk_src = {
+ .reg = 0x1e280,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s2_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_qspi_ref_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb30_prim_mock_utmi_postdiv_clk_src = {
+ .reg = 0x3905c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_postdiv_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch gcc_aggre_noc_pcie_axi_clk = {
+ .halt_reg = 0x1005c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x1005c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(12),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_noc_pcie_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_ufs_phy_axi_clk = {
+ .halt_reg = 0x770e4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x770e4,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x770e4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_ufs_phy_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_ufs_phy_axi_hw_ctl_clk = {
+ .halt_reg = 0x770e4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x770e4,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x770e4,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_ufs_phy_axi_hw_ctl_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_prim_axi_clk = {
+ .halt_reg = 0x39090,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x39090,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x39090,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_usb3_prim_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x38004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x38004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(10),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_boot_rom_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_hf_axi_clk = {
+ .halt_reg = 0x26010,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x26010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x26010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_camera_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_sf_axi_clk = {
+ .halt_reg = 0x26014,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x26014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x26014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_camera_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_pcie_anoc_ahb_clk = {
+ .halt_reg = 0x10050,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x10050,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(20),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_pcie_anoc_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_prim_axi_clk = {
+ .halt_reg = 0x3908c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x3908c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_usb3_prim_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cnoc_pcie_sf_axi_clk = {
+ .halt_reg = 0x10058,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x10058,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(6),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cnoc_pcie_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ddrss_gpu_axi_clk = {
+ .halt_reg = 0x7115c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x7115c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7115c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ddrss_gpu_axi_clk",
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ddrss_pcie_sf_qtb_clk = {
+ .halt_reg = 0x1006c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x1006c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(19),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ddrss_pcie_sf_qtb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_gpll0_div_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(23),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_disp_gpll0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll0_out_even.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_hf_axi_clk = {
+ .halt_reg = 0x2700c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x2700c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2700c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_disp_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x64000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x64000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x65000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x65000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x66000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x66000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_gpll0_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_div_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_gpll0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll0_out_even.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_memnoc_gfx_clk = {
+ .halt_reg = 0x71010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x71010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x71010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_memnoc_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_snoc_dvm_gfx_clk = {
+ .halt_reg = 0x71018,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x71018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_snoc_dvm_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_aux_clk = {
+ .halt_reg = 0x6b03c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(3),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_cfg_ahb_clk = {
+ .halt_reg = 0x6b038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6b038,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(2),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_mstr_axi_clk = {
+ .halt_reg = 0x6b02c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x6b02c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_phy_rchng_clk = {
+ .halt_reg = 0x6b054,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(22),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_phy_rchng_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_pipe_clk = {
+ .halt_reg = 0x6b048,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_pipe_div2_clk = {
+ .halt_reg = 0x6b098,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(13),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_pipe_div2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_pipe_div2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_axi_clk = {
+ .halt_reg = 0x6b020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6b020,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_q2a_axi_clk = {
+ .halt_reg = 0x6b01c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(5),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_aux_clk = {
+ .halt_reg = 0x90038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(29),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_1_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_cfg_ahb_clk = {
+ .halt_reg = 0x90034,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x90034,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(28),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_mstr_axi_clk = {
+ .halt_reg = 0x90028,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x90028,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(27),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_phy_rchng_clk = {
+ .halt_reg = 0x90050,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(8),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_phy_rchng_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_1_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_pipe_clk = {
+ .halt_reg = 0x90044,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_1_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_pipe_div2_clk = {
+ .halt_reg = 0x90094,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_pipe_div2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_1_pipe_div2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_slv_axi_clk = {
+ .halt_reg = 0x9001c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x9001c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(26),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_slv_q2a_axi_clk = {
+ .halt_reg = 0x90018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(25),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_rscc_cfg_ahb_clk = {
+ .halt_reg = 0x11004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x11004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(20),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_rscc_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_rscc_xo_clk = {
+ .halt_reg = 0x11008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(21),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_rscc_xo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x3300c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3300c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pdm2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x33004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x33004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x33004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_xo4_clk = {
+ .halt_reg = 0x33008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x33008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm_xo4_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_nrt_ahb_clk = {
+ .halt_reg = 0x26008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x26008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x26008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_camera_nrt_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_rt_ahb_clk = {
+ .halt_reg = 0x2600c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2600c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2600c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_camera_rt_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_disp_ahb_clk = {
+ .halt_reg = 0x27008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x27008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x27008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_disp_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_gpu_ahb_clk = {
+ .halt_reg = 0x71008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x71008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x71008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_gpu_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_pcie_ahb_clk = {
+ .halt_reg = 0x6b018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6b018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(11),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_pcie_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_cv_cpu_ahb_clk = {
+ .halt_reg = 0x32014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x32014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_cv_cpu_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_cvp_ahb_clk = {
+ .halt_reg = 0x32008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x32008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_cvp_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_v_cpu_ahb_clk = {
+ .halt_reg = 0x32010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x32010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_v_cpu_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_vcodec_ahb_clk = {
+ .halt_reg = 0x3200c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x3200c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3200c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_vcodec_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_2x_clk = {
+ .halt_reg = 0x23018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(18),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_clk = {
+ .halt_reg = 0x23008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(19),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_qspi_ref_clk = {
+ .halt_reg = 0x18764,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(29),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_qspi_ref_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_qspi_ref_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s0_clk = {
+ .halt_reg = 0x18004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(22),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s1_clk = {
+ .halt_reg = 0x1813c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(23),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s2_clk = {
+ .halt_reg = 0x18274,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(24),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s3_clk = {
+ .halt_reg = 0x18284,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(25),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s4_clk = {
+ .halt_reg = 0x183bc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(26),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s4_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s5_clk = {
+ .halt_reg = 0x184f4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(27),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s5_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s6_clk = {
+ .halt_reg = 0x1862c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(28),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s6_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_2x_clk = {
+ .halt_reg = 0x23168,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(3),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_clk = {
+ .halt_reg = 0x23158,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_qspi_ref_clk = {
+ .halt_reg = 0x1e764,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(30),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_qspi_ref_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_qspi_ref_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s0_clk = {
+ .halt_reg = 0x1e004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s1_clk = {
+ .halt_reg = 0x1e13c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(5),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s2_clk = {
+ .halt_reg = 0x1e274,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(6),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s3_clk = {
+ .halt_reg = 0x1e284,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s4_clk = {
+ .halt_reg = 0x1e3bc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(8),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s4_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s5_clk = {
+ .halt_reg = 0x1e4f4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(9),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s5_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s6_clk = {
+ .halt_reg = 0x1e62c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(10),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s6_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_m_ahb_clk = {
+ .halt_reg = 0x23000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(20),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_0_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_s_ahb_clk = {
+ .halt_reg = 0x23004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x23004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(21),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_0_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_m_ahb_clk = {
+ .halt_reg = 0x23150,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(2),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_1_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_s_ahb_clk = {
+ .halt_reg = 0x23154,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x23154,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_1_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ahb_clk = {
+ .halt_reg = 0xa3004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa3004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_apps_clk = {
+ .halt_reg = 0xa3008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa3008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc1_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sdcc1_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ice_core_clk = {
+ .halt_reg = 0xa302c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xa302c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xa302c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc1_ice_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sdcc1_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_ahb_clk = {
+ .halt_reg = 0x14010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc2_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_apps_clk = {
+ .halt_reg = 0x14004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc2_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sdcc2_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ahb_clk = {
+ .halt_reg = 0x77024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_axi_clk = {
+ .halt_reg = 0x77018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_axi_hw_ctl_clk = {
+ .halt_reg = 0x77018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77018,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_axi_hw_ctl_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ice_core_clk = {
+ .halt_reg = 0x77074,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77074,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77074,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_ice_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ice_core_hw_ctl_clk = {
+ .halt_reg = 0x77074,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77074,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77074,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_ice_core_hw_ctl_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_phy_aux_clk = {
+ .halt_reg = 0x770b0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x770b0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x770b0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_phy_aux_hw_ctl_clk = {
+ .halt_reg = 0x770b0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x770b0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x770b0,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_phy_aux_hw_ctl_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_0_clk = {
+ .halt_reg = 0x7702c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x7702c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_rx_symbol_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_rx_symbol_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_1_clk = {
+ .halt_reg = 0x770cc,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x770cc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_rx_symbol_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_rx_symbol_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_tx_symbol_0_clk = {
+ .halt_reg = 0x77028,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x77028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_tx_symbol_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_tx_symbol_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_unipro_core_clk = {
+ .halt_reg = 0x77068,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77068,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_unipro_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_unipro_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_unipro_core_hw_ctl_clk = {
+ .halt_reg = 0x77068,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77068,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77068,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_unipro_core_hw_ctl_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_unipro_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_atb_clk = {
+ .halt_reg = 0x39088,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x39088,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_atb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_master_clk = {
+ .halt_reg = 0x39018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x39018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_master_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_mock_utmi_clk = {
+ .halt_reg = 0x39028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x39028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_sleep_clk = {
+ .halt_reg = 0x39024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x39024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_aux_clk = {
+ .halt_reg = 0x39060,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x39060,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_com_aux_clk = {
+ .halt_reg = 0x39064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x39064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_com_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_pipe_clk = {
+ .halt_reg = 0x39068,
+ .halt_check = BRANCH_HALT_DELAY,
+ .hwcg_reg = 0x39068,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x39068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_prim_phy_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axi0_clk = {
+ .halt_reg = 0x32018,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x32018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_video_axi0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc pcie_0_gdsc = {
+ .gdscr = 0x6b004,
+ .collapse_ctrl = 0x5214c,
+ .collapse_mask = BIT(0),
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "pcie_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE | POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc pcie_0_phy_gdsc = {
+ .gdscr = 0x6c000,
+ .collapse_ctrl = 0x5214c,
+ .collapse_mask = BIT(1),
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "pcie_0_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE | POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc pcie_1_gdsc = {
+ .gdscr = 0x90004,
+ .collapse_ctrl = 0x5214c,
+ .collapse_mask = BIT(3),
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "pcie_1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE | POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc pcie_1_phy_gdsc = {
+ .gdscr = 0xa2000,
+ .collapse_ctrl = 0x5214c,
+ .collapse_mask = BIT(4),
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "pcie_1_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE | POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc ufs_phy_gdsc = {
+ .gdscr = 0x77004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "ufs_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc ufs_mem_phy_gdsc = {
+ .gdscr = 0x9e000,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "ufs_mem_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc usb30_prim_gdsc = {
+ .gdscr = 0x39004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "usb30_prim_gdsc",
+ },
+ .pwrsts = PWRSTS_RET_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc usb3_phy_gdsc = {
+ .gdscr = 0x5000c,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "usb3_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_RET_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct clk_regmap *gcc_milos_clocks[] = {
+ [GCC_AGGRE_NOC_PCIE_AXI_CLK] = &gcc_aggre_noc_pcie_axi_clk.clkr,
+ [GCC_AGGRE_UFS_PHY_AXI_CLK] = &gcc_aggre_ufs_phy_axi_clk.clkr,
+ [GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK] = &gcc_aggre_ufs_phy_axi_hw_ctl_clk.clkr,
+ [GCC_AGGRE_USB3_PRIM_AXI_CLK] = &gcc_aggre_usb3_prim_axi_clk.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CAMERA_HF_AXI_CLK] = &gcc_camera_hf_axi_clk.clkr,
+ [GCC_CAMERA_SF_AXI_CLK] = &gcc_camera_sf_axi_clk.clkr,
+ [GCC_CFG_NOC_PCIE_ANOC_AHB_CLK] = &gcc_cfg_noc_pcie_anoc_ahb_clk.clkr,
+ [GCC_CFG_NOC_USB3_PRIM_AXI_CLK] = &gcc_cfg_noc_usb3_prim_axi_clk.clkr,
+ [GCC_CNOC_PCIE_SF_AXI_CLK] = &gcc_cnoc_pcie_sf_axi_clk.clkr,
+ [GCC_DDRSS_GPU_AXI_CLK] = &gcc_ddrss_gpu_axi_clk.clkr,
+ [GCC_DDRSS_PCIE_SF_QTB_CLK] = &gcc_ddrss_pcie_sf_qtb_clk.clkr,
+ [GCC_DISP_GPLL0_DIV_CLK_SRC] = &gcc_disp_gpll0_div_clk_src.clkr,
+ [GCC_DISP_HF_AXI_CLK] = &gcc_disp_hf_axi_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr,
+ [GCC_GPLL0] = &gcc_gpll0.clkr,
+ [GCC_GPLL0_OUT_EVEN] = &gcc_gpll0_out_even.clkr,
+ [GCC_GPLL2] = &gcc_gpll2.clkr,
+ [GCC_GPLL4] = &gcc_gpll4.clkr,
+ [GCC_GPLL6] = &gcc_gpll6.clkr,
+ [GCC_GPLL7] = &gcc_gpll7.clkr,
+ [GCC_GPLL9] = &gcc_gpll9.clkr,
+ [GCC_GPU_GPLL0_CLK_SRC] = &gcc_gpu_gpll0_clk_src.clkr,
+ [GCC_GPU_GPLL0_DIV_CLK_SRC] = &gcc_gpu_gpll0_div_clk_src.clkr,
+ [GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr,
+ [GCC_GPU_SNOC_DVM_GFX_CLK] = &gcc_gpu_snoc_dvm_gfx_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK] = &gcc_pcie_0_aux_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK_SRC] = &gcc_pcie_0_aux_clk_src.clkr,
+ [GCC_PCIE_0_CFG_AHB_CLK] = &gcc_pcie_0_cfg_ahb_clk.clkr,
+ [GCC_PCIE_0_MSTR_AXI_CLK] = &gcc_pcie_0_mstr_axi_clk.clkr,
+ [GCC_PCIE_0_PHY_RCHNG_CLK] = &gcc_pcie_0_phy_rchng_clk.clkr,
+ [GCC_PCIE_0_PHY_RCHNG_CLK_SRC] = &gcc_pcie_0_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_0_PIPE_CLK] = &gcc_pcie_0_pipe_clk.clkr,
+ [GCC_PCIE_0_PIPE_CLK_SRC] = &gcc_pcie_0_pipe_clk_src.clkr,
+ [GCC_PCIE_0_PIPE_DIV2_CLK] = &gcc_pcie_0_pipe_div2_clk.clkr,
+ [GCC_PCIE_0_PIPE_DIV2_CLK_SRC] = &gcc_pcie_0_pipe_div2_clk_src.clkr,
+ [GCC_PCIE_0_SLV_AXI_CLK] = &gcc_pcie_0_slv_axi_clk.clkr,
+ [GCC_PCIE_0_SLV_Q2A_AXI_CLK] = &gcc_pcie_0_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_1_AUX_CLK] = &gcc_pcie_1_aux_clk.clkr,
+ [GCC_PCIE_1_AUX_CLK_SRC] = &gcc_pcie_1_aux_clk_src.clkr,
+ [GCC_PCIE_1_CFG_AHB_CLK] = &gcc_pcie_1_cfg_ahb_clk.clkr,
+ [GCC_PCIE_1_MSTR_AXI_CLK] = &gcc_pcie_1_mstr_axi_clk.clkr,
+ [GCC_PCIE_1_PHY_RCHNG_CLK] = &gcc_pcie_1_phy_rchng_clk.clkr,
+ [GCC_PCIE_1_PHY_RCHNG_CLK_SRC] = &gcc_pcie_1_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_1_PIPE_CLK] = &gcc_pcie_1_pipe_clk.clkr,
+ [GCC_PCIE_1_PIPE_CLK_SRC] = &gcc_pcie_1_pipe_clk_src.clkr,
+ [GCC_PCIE_1_PIPE_DIV2_CLK] = &gcc_pcie_1_pipe_div2_clk.clkr,
+ [GCC_PCIE_1_PIPE_DIV2_CLK_SRC] = &gcc_pcie_1_pipe_div2_clk_src.clkr,
+ [GCC_PCIE_1_SLV_AXI_CLK] = &gcc_pcie_1_slv_axi_clk.clkr,
+ [GCC_PCIE_1_SLV_Q2A_AXI_CLK] = &gcc_pcie_1_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_RSCC_CFG_AHB_CLK] = &gcc_pcie_rscc_cfg_ahb_clk.clkr,
+ [GCC_PCIE_RSCC_XO_CLK] = &gcc_pcie_rscc_xo_clk.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM2_CLK_SRC] = &gcc_pdm2_clk_src.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr,
+ [GCC_QMIP_CAMERA_NRT_AHB_CLK] = &gcc_qmip_camera_nrt_ahb_clk.clkr,
+ [GCC_QMIP_CAMERA_RT_AHB_CLK] = &gcc_qmip_camera_rt_ahb_clk.clkr,
+ [GCC_QMIP_DISP_AHB_CLK] = &gcc_qmip_disp_ahb_clk.clkr,
+ [GCC_QMIP_GPU_AHB_CLK] = &gcc_qmip_gpu_ahb_clk.clkr,
+ [GCC_QMIP_PCIE_AHB_CLK] = &gcc_qmip_pcie_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_CV_CPU_AHB_CLK] = &gcc_qmip_video_cv_cpu_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_CVP_AHB_CLK] = &gcc_qmip_video_cvp_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_V_CPU_AHB_CLK] = &gcc_qmip_video_v_cpu_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_VCODEC_AHB_CLK] = &gcc_qmip_video_vcodec_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP0_CORE_2X_CLK] = &gcc_qupv3_wrap0_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP0_CORE_CLK] = &gcc_qupv3_wrap0_core_clk.clkr,
+ [GCC_QUPV3_WRAP0_QSPI_REF_CLK] = &gcc_qupv3_wrap0_qspi_ref_clk.clkr,
+ [GCC_QUPV3_WRAP0_QSPI_REF_CLK_SRC] = &gcc_qupv3_wrap0_qspi_ref_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK] = &gcc_qupv3_wrap0_s0_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK_SRC] = &gcc_qupv3_wrap0_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK] = &gcc_qupv3_wrap0_s1_clk.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK_SRC] = &gcc_qupv3_wrap0_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK] = &gcc_qupv3_wrap0_s2_clk.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK_SRC] = &gcc_qupv3_wrap0_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK] = &gcc_qupv3_wrap0_s3_clk.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK_SRC] = &gcc_qupv3_wrap0_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK] = &gcc_qupv3_wrap0_s4_clk.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK_SRC] = &gcc_qupv3_wrap0_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK] = &gcc_qupv3_wrap0_s5_clk.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK_SRC] = &gcc_qupv3_wrap0_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S6_CLK] = &gcc_qupv3_wrap0_s6_clk.clkr,
+ [GCC_QUPV3_WRAP0_S6_CLK_SRC] = &gcc_qupv3_wrap0_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_CORE_2X_CLK] = &gcc_qupv3_wrap1_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP1_CORE_CLK] = &gcc_qupv3_wrap1_core_clk.clkr,
+ [GCC_QUPV3_WRAP1_QSPI_REF_CLK] = &gcc_qupv3_wrap1_qspi_ref_clk.clkr,
+ [GCC_QUPV3_WRAP1_QSPI_REF_CLK_SRC] = &gcc_qupv3_wrap1_qspi_ref_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK] = &gcc_qupv3_wrap1_s0_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK_SRC] = &gcc_qupv3_wrap1_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK] = &gcc_qupv3_wrap1_s1_clk.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK_SRC] = &gcc_qupv3_wrap1_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK] = &gcc_qupv3_wrap1_s2_clk.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK_SRC] = &gcc_qupv3_wrap1_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK] = &gcc_qupv3_wrap1_s3_clk.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK_SRC] = &gcc_qupv3_wrap1_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK] = &gcc_qupv3_wrap1_s4_clk.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK_SRC] = &gcc_qupv3_wrap1_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK] = &gcc_qupv3_wrap1_s5_clk.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK_SRC] = &gcc_qupv3_wrap1_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S6_CLK] = &gcc_qupv3_wrap1_s6_clk.clkr,
+ [GCC_QUPV3_WRAP1_S6_CLK_SRC] = &gcc_qupv3_wrap1_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP_0_M_AHB_CLK] = &gcc_qupv3_wrap_0_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_0_S_AHB_CLK] = &gcc_qupv3_wrap_0_s_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_M_AHB_CLK] = &gcc_qupv3_wrap_1_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_S_AHB_CLK] = &gcc_qupv3_wrap_1_s_ahb_clk.clkr,
+ [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr,
+ [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr,
+ [GCC_SDCC1_APPS_CLK_SRC] = &gcc_sdcc1_apps_clk_src.clkr,
+ [GCC_SDCC1_ICE_CORE_CLK] = &gcc_sdcc1_ice_core_clk.clkr,
+ [GCC_SDCC1_ICE_CORE_CLK_SRC] = &gcc_sdcc1_ice_core_clk_src.clkr,
+ [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
+ [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+ [GCC_SDCC2_APPS_CLK_SRC] = &gcc_sdcc2_apps_clk_src.clkr,
+ [GCC_UFS_PHY_AHB_CLK] = &gcc_ufs_phy_ahb_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK] = &gcc_ufs_phy_axi_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK_SRC] = &gcc_ufs_phy_axi_clk_src.clkr,
+ [GCC_UFS_PHY_AXI_HW_CTL_CLK] = &gcc_ufs_phy_axi_hw_ctl_clk.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK] = &gcc_ufs_phy_ice_core_clk.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK_SRC] = &gcc_ufs_phy_ice_core_clk_src.clkr,
+ [GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK] = &gcc_ufs_phy_ice_core_hw_ctl_clk.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK] = &gcc_ufs_phy_phy_aux_clk.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK_SRC] = &gcc_ufs_phy_phy_aux_clk_src.clkr,
+ [GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK] = &gcc_ufs_phy_phy_aux_hw_ctl_clk.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_0_CLK] = &gcc_ufs_phy_rx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_0_CLK_SRC] = &gcc_ufs_phy_rx_symbol_0_clk_src.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_1_CLK] = &gcc_ufs_phy_rx_symbol_1_clk.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_1_CLK_SRC] = &gcc_ufs_phy_rx_symbol_1_clk_src.clkr,
+ [GCC_UFS_PHY_TX_SYMBOL_0_CLK] = &gcc_ufs_phy_tx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_TX_SYMBOL_0_CLK_SRC] = &gcc_ufs_phy_tx_symbol_0_clk_src.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK] = &gcc_ufs_phy_unipro_core_clk.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC] = &gcc_ufs_phy_unipro_core_clk_src.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK] = &gcc_ufs_phy_unipro_core_hw_ctl_clk.clkr,
+ [GCC_USB30_PRIM_ATB_CLK] = &gcc_usb30_prim_atb_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK] = &gcc_usb30_prim_master_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK_SRC] = &gcc_usb30_prim_master_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK] = &gcc_usb30_prim_mock_utmi_clk.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC] = &gcc_usb30_prim_mock_utmi_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC] = &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr,
+ [GCC_USB30_PRIM_SLEEP_CLK] = &gcc_usb30_prim_sleep_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK] = &gcc_usb3_prim_phy_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK_SRC] = &gcc_usb3_prim_phy_aux_clk_src.clkr,
+ [GCC_USB3_PRIM_PHY_COM_AUX_CLK] = &gcc_usb3_prim_phy_com_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK] = &gcc_usb3_prim_phy_pipe_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK_SRC] = &gcc_usb3_prim_phy_pipe_clk_src.clkr,
+ [GCC_VIDEO_AXI0_CLK] = &gcc_video_axi0_clk.clkr,
+};
+
+static const struct qcom_reset_map gcc_milos_resets[] = {
+ [GCC_CAMERA_BCR] = { 0x26000 },
+ [GCC_DISPLAY_BCR] = { 0x27000 },
+ [GCC_GPU_BCR] = { 0x71000 },
+ [GCC_PCIE_0_BCR] = { 0x6b000 },
+ [GCC_PCIE_0_LINK_DOWN_BCR] = { 0x6c014 },
+ [GCC_PCIE_0_NOCSR_COM_PHY_BCR] = { 0x6c020 },
+ [GCC_PCIE_0_PHY_BCR] = { 0x6c01c },
+ [GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR] = { 0x6c028 },
+ [GCC_PCIE_1_BCR] = { 0x90000 },
+ [GCC_PCIE_1_LINK_DOWN_BCR] = { 0x8e014 },
+ [GCC_PCIE_1_NOCSR_COM_PHY_BCR] = { 0x8e020 },
+ [GCC_PCIE_1_PHY_BCR] = { 0x8e01c },
+ [GCC_PCIE_1_PHY_NOCSR_COM_PHY_BCR] = { 0x8e024 },
+ [GCC_PCIE_RSCC_BCR] = { 0x11000 },
+ [GCC_PDM_BCR] = { 0x33000 },
+ [GCC_QUPV3_WRAPPER_0_BCR] = { 0x18000 },
+ [GCC_QUPV3_WRAPPER_1_BCR] = { 0x1e000 },
+ [GCC_QUSB2PHY_PRIM_BCR] = { 0x12000 },
+ [GCC_QUSB2PHY_SEC_BCR] = { 0x12004 },
+ [GCC_SDCC1_BCR] = { 0xa3000 },
+ [GCC_SDCC2_BCR] = { 0x14000 },
+ [GCC_UFS_PHY_BCR] = { 0x77000 },
+ [GCC_USB30_PRIM_BCR] = { 0x39000 },
+ [GCC_USB3_DP_PHY_PRIM_BCR] = { 0x50008 },
+ [GCC_USB3_PHY_PRIM_BCR] = { 0x50000 },
+ [GCC_USB3PHY_PHY_PRIM_BCR] = { 0x50004 },
+ [GCC_VIDEO_AXI0_CLK_ARES] = { 0x32018, 2 },
+ [GCC_VIDEO_BCR] = { 0x32000 },
+};
+
+static const struct clk_rcg_dfs_data gcc_milos_dfs_clocks[] = {
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_qspi_ref_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s6_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_qspi_ref_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s6_clk_src),
+};
+
+static struct gdsc *gcc_milos_gdscs[] = {
+ [PCIE_0_GDSC] = &pcie_0_gdsc,
+ [PCIE_0_PHY_GDSC] = &pcie_0_phy_gdsc,
+ [PCIE_1_GDSC] = &pcie_1_gdsc,
+ [PCIE_1_PHY_GDSC] = &pcie_1_phy_gdsc,
+ [UFS_PHY_GDSC] = &ufs_phy_gdsc,
+ [UFS_MEM_PHY_GDSC] = &ufs_mem_phy_gdsc,
+ [USB30_PRIM_GDSC] = &usb30_prim_gdsc,
+ [USB3_PHY_GDSC] = &usb3_phy_gdsc,
+};
+
+static u32 gcc_milos_critical_cbcrs[] = {
+ 0x26004, /* GCC_CAMERA_AHB_CLK */
+ 0x26018, /* GCC_CAMERA_HF_XO_CLK */
+ 0x2601c, /* GCC_CAMERA_SF_XO_CLK */
+ 0x27004, /* GCC_DISP_AHB_CLK */
+ 0x27018, /* GCC_DISP_XO_CLK */
+ 0x71004, /* GCC_GPU_CFG_AHB_CLK */
+ 0x32004, /* GCC_VIDEO_AHB_CLK */
+ 0x32024, /* GCC_VIDEO_XO_CLK */
+};
+
+static const struct regmap_config gcc_milos_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x1f41f0,
+ .fast_io = true,
+};
+
+static struct qcom_cc_driver_data gcc_milos_driver_data = {
+ .clk_cbcrs = gcc_milos_critical_cbcrs,
+ .num_clk_cbcrs = ARRAY_SIZE(gcc_milos_critical_cbcrs),
+ .dfs_rcgs = gcc_milos_dfs_clocks,
+ .num_dfs_rcgs = ARRAY_SIZE(gcc_milos_dfs_clocks),
+};
+
+static const struct qcom_cc_desc gcc_milos_desc = {
+ .config = &gcc_milos_regmap_config,
+ .clks = gcc_milos_clocks,
+ .num_clks = ARRAY_SIZE(gcc_milos_clocks),
+ .resets = gcc_milos_resets,
+ .num_resets = ARRAY_SIZE(gcc_milos_resets),
+ .gdscs = gcc_milos_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_milos_gdscs),
+ .use_rpm = true,
+ .driver_data = &gcc_milos_driver_data,
+};
+
+static const struct of_device_id gcc_milos_match_table[] = {
+ { .compatible = "qcom,milos-gcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_milos_match_table);
+
+static int gcc_milos_probe(struct platform_device *pdev)
+{
+ return qcom_cc_probe(pdev, &gcc_milos_desc);
+}
+
+static struct platform_driver gcc_milos_driver = {
+ .probe = gcc_milos_probe,
+ .driver = {
+ .name = "gcc-milos",
+ .of_match_table = gcc_milos_match_table,
+ },
+};
+
+static int __init gcc_milos_init(void)
+{
+ return platform_driver_register(&gcc_milos_driver);
+}
+subsys_initcall(gcc_milos_init);
+
+static void __exit gcc_milos_exit(void)
+{
+ platform_driver_unregister(&gcc_milos_driver);
+}
+module_exit(gcc_milos_exit);
+
+MODULE_DESCRIPTION("QTI GCC Milos Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/gcc-msm8917.c b/drivers/clk/qcom/gcc-msm8917.c
index 3e2a2ae2ee6e..0a1aa623cd49 100644
--- a/drivers/clk/qcom/gcc-msm8917.c
+++ b/drivers/clk/qcom/gcc-msm8917.c
@@ -37,6 +37,8 @@ enum {
DT_SLEEP_CLK,
DT_DSI0PLL,
DT_DSI0PLL_BYTE,
+ DT_DSI1PLL,
+ DT_DSI1PLL_BYTE,
};
enum {
@@ -48,6 +50,8 @@ enum {
P_GPLL6,
P_DSI0PLL,
P_DSI0PLL_BYTE,
+ P_DSI1PLL,
+ P_DSI1PLL_BYTE,
};
static struct clk_alpha_pll gpll0_sleep_clk_src = {
@@ -102,7 +106,11 @@ static const struct pll_vco gpll3_p_vco[] = {
{ 700000000, 1400000000, 0 },
};
-static const struct alpha_pll_config gpll3_early_config = {
+static const struct pll_vco gpll3_p_vco_msm8937[] = {
+ { 525000000, 1066000000, 0 },
+};
+
+static struct alpha_pll_config gpll3_early_config = {
.l = 63,
.config_ctl_val = 0x4001055b,
.early_output_mask = 0,
@@ -273,6 +281,19 @@ static const struct freq_tbl ftbl_blsp_i2c_apps_clk_src[] = {
{ }
};
+static struct clk_rcg2 blsp1_qup1_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x0200c,
+ .hid_width = 5,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup1_i2c_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
static struct clk_rcg2 blsp1_qup2_i2c_apps_clk_src = {
.cmd_rcgr = 0x03000,
.hid_width = 5,
@@ -351,6 +372,19 @@ static struct clk_rcg2 blsp2_qup3_i2c_apps_clk_src = {
}
};
+static struct clk_rcg2 blsp2_qup4_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x18000,
+ .hid_width = 5,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup4_i2c_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
static const struct freq_tbl ftbl_blsp_spi_apps_clk_src[] = {
F(960000, P_XO, 10, 1, 2),
F(4800000, P_XO, 4, 0, 0),
@@ -362,6 +396,20 @@ static const struct freq_tbl ftbl_blsp_spi_apps_clk_src[] = {
{ }
};
+static struct clk_rcg2 blsp1_qup1_spi_apps_clk_src = {
+ .cmd_rcgr = 0x02024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .freq_tbl = ftbl_blsp_spi_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup1_spi_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
static struct clk_rcg2 blsp1_qup2_spi_apps_clk_src = {
.cmd_rcgr = 0x03014,
.hid_width = 5,
@@ -446,6 +494,20 @@ static struct clk_rcg2 blsp2_qup3_spi_apps_clk_src = {
}
};
+static struct clk_rcg2 blsp2_qup4_spi_apps_clk_src = {
+ .cmd_rcgr = 0x18024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .freq_tbl = ftbl_blsp_spi_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup4_spi_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
static const struct freq_tbl ftbl_blsp_uart_apps_clk_src[] = {
F(3686400, P_GPLL0, 1, 72, 15625),
F(7372800, P_GPLL0, 1, 144, 15625),
@@ -525,11 +587,19 @@ static struct clk_rcg2 blsp2_uart2_apps_clk_src = {
static const struct parent_map gcc_byte0_map[] = {
{ P_XO, 0 },
{ P_DSI0PLL_BYTE, 1 },
+ { P_DSI1PLL_BYTE, 3 },
+};
+
+static const struct parent_map gcc_byte1_map[] = {
+ { P_XO, 0 },
+ { P_DSI0PLL_BYTE, 3 },
+ { P_DSI1PLL_BYTE, 1 },
};
static const struct clk_parent_data gcc_byte_data[] = {
{ .index = DT_XO },
{ .index = DT_DSI0PLL_BYTE },
+ { .index = DT_DSI1PLL_BYTE },
};
static struct clk_rcg2 byte0_clk_src = {
@@ -545,6 +615,19 @@ static struct clk_rcg2 byte0_clk_src = {
}
};
+static struct clk_rcg2 byte1_clk_src = {
+ .cmd_rcgr = 0x4d0b0,
+ .hid_width = 5,
+ .parent_map = gcc_byte1_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "byte1_clk_src",
+ .parent_data = gcc_byte_data,
+ .num_parents = ARRAY_SIZE(gcc_byte_data),
+ .ops = &clk_byte2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
static const struct freq_tbl ftbl_camss_gp_clk_src[] = {
F(100000000, P_GPLL0, 8, 0, 0),
F(160000000, P_GPLL0, 5, 0, 0),
@@ -642,6 +725,17 @@ static const struct freq_tbl ftbl_cpp_clk_src[] = {
{ }
};
+static const struct freq_tbl ftbl_cpp_clk_src_msm8937[] = {
+ F(133330000, P_GPLL0, 6, 0, 0),
+ F(160000000, P_GPLL0, 5, 0, 0),
+ F(200000000, P_GPLL0, 5, 0, 0),
+ F(266666667, P_GPLL0, 3, 0, 0),
+ F(308570000, P_GPLL6, 3.5, 0, 0),
+ F(320000000, P_GPLL0, 2.5, 0, 0),
+ F(360000000, P_GPLL6, 3, 0, 0),
+ { }
+};
+
static struct clk_rcg2 cpp_clk_src = {
.cmd_rcgr = 0x58018,
.hid_width = 5,
@@ -655,6 +749,13 @@ static struct clk_rcg2 cpp_clk_src = {
}
};
+static struct clk_init_data vcodec0_clk_src_init_msm8937 = {
+ .name = "vcodec0_clk_src",
+ .parent_data = gcc_cpp_data,
+ .num_parents = ARRAY_SIZE(gcc_cpp_data),
+ .ops = &clk_rcg2_ops,
+};
+
static const struct freq_tbl ftbl_crypto_clk_src[] = {
F(50000000, P_GPLL0, 16, 0, 0),
F(80000000, P_GPLL0, 10, 0, 0),
@@ -730,6 +831,13 @@ static const struct freq_tbl ftbl_csi_phytimer_clk_src[] = {
{ }
};
+static const struct freq_tbl ftbl_csi_phytimer_clk_src_msm8937[] = {
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(160000000, P_GPLL0, 5, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ { }
+};
+
static struct clk_rcg2 csi0phytimer_clk_src = {
.cmd_rcgr = 0x4e000,
.hid_width = 5,
@@ -774,6 +882,19 @@ static struct clk_rcg2 esc0_clk_src = {
}
};
+static struct clk_rcg2 esc1_clk_src = {
+ .cmd_rcgr = 0x4d0a8,
+ .hid_width = 5,
+ .freq_tbl = ftbl_esc0_1_clk_src,
+ .parent_map = gcc_xo_gpll0_out_aux_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "esc1_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
static const struct parent_map gcc_gfx3d_map[] = {
{ P_XO, 0 },
{ P_GPLL0, 1 },
@@ -817,6 +938,25 @@ static const struct freq_tbl ftbl_gfx3d_clk_src[] = {
{ }
};
+static const struct freq_tbl ftbl_gfx3d_clk_src_msm8937[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(50000000, P_GPLL0, 16, 0, 0),
+ F(80000000, P_GPLL0, 10, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(160000000, P_GPLL0, 5, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ F(216000000, P_GPLL6, 5, 0, 0),
+ F(228570000, P_GPLL0, 3.5, 0, 0),
+ F(240000000, P_GPLL6, 4.5, 0, 0),
+ F(266670000, P_GPLL0, 3, 0, 0),
+ F(300000000, P_GPLL3, 1, 0, 0),
+ F(320000000, P_GPLL0, 2.5, 0, 0),
+ F(375000000, P_GPLL3, 1, 0, 0),
+ F(400000000, P_GPLL0, 2, 0, 0),
+ F(450000000, P_GPLL3, 1, 0, 0),
+ { }
+};
+
static struct clk_rcg2 gfx3d_clk_src = {
.cmd_rcgr = 0x59000,
.hid_width = 5,
@@ -973,21 +1113,29 @@ static struct clk_rcg2 mdp_clk_src = {
}
};
-static const struct parent_map gcc_pclk_map[] = {
+static const struct parent_map gcc_pclk0_map[] = {
{ P_XO, 0 },
{ P_DSI0PLL, 1 },
+ { P_DSI1PLL, 3 },
+};
+
+static const struct parent_map gcc_pclk1_map[] = {
+ { P_XO, 0 },
+ { P_DSI0PLL, 3 },
+ { P_DSI1PLL, 1 },
};
static const struct clk_parent_data gcc_pclk_data[] = {
{ .index = DT_XO },
{ .index = DT_DSI0PLL },
+ { .index = DT_DSI1PLL },
};
static struct clk_rcg2 pclk0_clk_src = {
.cmd_rcgr = 0x4d000,
.hid_width = 5,
.mnd_width = 8,
- .parent_map = gcc_pclk_map,
+ .parent_map = gcc_pclk0_map,
.clkr.hw.init = &(struct clk_init_data) {
.name = "pclk0_clk_src",
.parent_data = gcc_pclk_data,
@@ -997,6 +1145,20 @@ static struct clk_rcg2 pclk0_clk_src = {
}
};
+static struct clk_rcg2 pclk1_clk_src = {
+ .cmd_rcgr = 0x4d0b8,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .parent_map = gcc_pclk1_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "pclk1_clk_src",
+ .parent_data = gcc_pclk_data,
+ .num_parents = ARRAY_SIZE(gcc_pclk_data),
+ .ops = &clk_pixel_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
static const struct freq_tbl ftbl_pdm2_clk_src[] = {
F(64000000, P_GPLL0, 12.5, 0, 0),
{ }
@@ -1108,6 +1270,14 @@ static const struct freq_tbl ftbl_usb_hs_system_clk_src[] = {
{ }
};
+static const struct freq_tbl ftbl_usb_hs_system_clk_src_msm8937[] = {
+ F(57142857, P_GPLL0, 14, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(133333333, P_GPLL0, 6, 0, 0),
+ F(177777778, P_GPLL0, 4.5, 0, 0),
+ { }
+};
+
static struct clk_rcg2 usb_hs_system_clk_src = {
.cmd_rcgr = 0x41010,
.hid_width = 5,
@@ -1132,6 +1302,15 @@ static const struct freq_tbl ftbl_vcodec0_clk_src[] = {
{ }
};
+static const struct freq_tbl ftbl_vcodec0_clk_src_msm8937[] = {
+ F(166150000, P_GPLL6, 6.5, 0, 0),
+ F(240000000, P_GPLL6, 4.5, 0, 0),
+ F(308571428, P_GPLL6, 3.5, 0, 0),
+ F(320000000, P_GPLL0, 2.5, 0, 0),
+ F(360000000, P_GPLL6, 3, 0, 0),
+ { }
+};
+
static struct clk_rcg2 vcodec0_clk_src = {
.cmd_rcgr = 0x4c000,
.hid_width = 5,
@@ -1160,6 +1339,23 @@ static const struct freq_tbl ftbl_vfe_clk_src[] = {
{ }
};
+static const struct freq_tbl ftbl_vfe_clk_src_msm8937[] = {
+ F(50000000, P_GPLL0, 16, 0, 0),
+ F(80000000, P_GPLL0, 10, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(133333333, P_GPLL0, 6, 0, 0),
+ F(160000000, P_GPLL0, 5, 0, 0),
+ F(177777778, P_GPLL0, 4.5, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ F(266666667, P_GPLL0, 3, 0, 0),
+ F(308571428, P_GPLL6, 3.5, 0, 0),
+ F(320000000, P_GPLL0, 2.5, 0, 0),
+ F(360000000, P_GPLL6, 3, 0, 0),
+ F(400000000, P_GPLL0, 2, 0, 0),
+ F(432000000, P_GPLL6, 2.5, 0, 0),
+ { }
+};
+
static struct clk_rcg2 vfe0_clk_src = {
.cmd_rcgr = 0x58000,
.hid_width = 5,
@@ -1269,6 +1465,24 @@ static struct clk_branch gcc_blsp2_ahb_clk = {
}
};
+static struct clk_branch gcc_blsp1_qup1_i2c_apps_clk = {
+ .halt_reg = 0x02008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x02008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup1_i2c_apps_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup1_i2c_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
static struct clk_branch gcc_blsp1_qup2_i2c_apps_clk = {
.halt_reg = 0x03010,
.halt_check = BRANCH_HALT,
@@ -1377,6 +1591,42 @@ static struct clk_branch gcc_blsp2_qup3_i2c_apps_clk = {
}
};
+static struct clk_branch gcc_blsp2_qup4_i2c_apps_clk = {
+ .halt_reg = 0x18020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x18020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup4_i2c_apps_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp2_qup4_i2c_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup1_spi_apps_clk = {
+ .halt_reg = 0x02004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x02004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup1_spi_apps_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup1_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
static struct clk_branch gcc_blsp1_qup2_spi_apps_clk = {
.halt_reg = 0x0300c,
.halt_check = BRANCH_HALT,
@@ -1485,6 +1735,24 @@ static struct clk_branch gcc_blsp2_qup3_spi_apps_clk = {
}
};
+static struct clk_branch gcc_blsp2_qup4_spi_apps_clk = {
+ .halt_reg = 0x1801c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1801c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup4_spi_apps_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp2_qup4_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
static struct clk_branch gcc_blsp1_uart1_apps_clk = {
.halt_reg = 0x0203c,
.halt_check = BRANCH_HALT,
@@ -2521,6 +2789,24 @@ static struct clk_branch gcc_mdss_byte0_clk = {
}
};
+static struct clk_branch gcc_mdss_byte1_clk = {
+ .halt_reg = 0x4d0a0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d0a0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mdss_byte1_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &byte1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
static struct clk_branch gcc_mdss_esc0_clk = {
.halt_reg = 0x4d098,
.halt_check = BRANCH_HALT,
@@ -2539,6 +2825,24 @@ static struct clk_branch gcc_mdss_esc0_clk = {
}
};
+static struct clk_branch gcc_mdss_esc1_clk = {
+ .halt_reg = 0x4d09c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d09c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mdss_esc1_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &esc1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
static struct clk_branch gcc_mdss_mdp_clk = {
.halt_reg = 0x4d088,
.halt_check = BRANCH_HALT,
@@ -2575,6 +2879,24 @@ static struct clk_branch gcc_mdss_pclk0_clk = {
}
};
+static struct clk_branch gcc_mdss_pclk1_clk = {
+ .halt_reg = 0x4d0a4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d0a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mdss_pclk1_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &pclk1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
static struct clk_branch gcc_mdss_vsync_clk = {
.halt_reg = 0x4d090,
.halt_check = BRANCH_HALT,
@@ -2632,6 +2954,24 @@ static struct clk_branch gcc_oxili_ahb_clk = {
}
};
+static struct clk_branch gcc_oxili_aon_clk = {
+ .halt_reg = 0x5904c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5904c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_oxili_aon_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gfx3d_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
static struct clk_branch gcc_oxili_gfx3d_clk = {
.halt_reg = 0x59020,
.halt_check = BRANCH_HALT,
@@ -2650,6 +2990,19 @@ static struct clk_branch gcc_oxili_gfx3d_clk = {
}
};
+static struct clk_branch gcc_oxili_timer_clk = {
+ .halt_reg = 0x59040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x59040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_oxili_timer_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_pdm2_clk = {
.halt_reg = 0x4400c,
.halt_check = BRANCH_HALT,
@@ -3027,6 +3380,28 @@ static struct gdsc oxili_gx_gdsc = {
.flags = CLAMP_IO,
};
+static struct gdsc oxili_gx_gdsc_msm8937 = {
+ .gdscr = 0x5901c,
+ .clamp_io_ctrl = 0x5b00c,
+ .cxcs = (unsigned int []){ 0x59000 },
+ .cxc_count = 1,
+ .pd = {
+ .name = "oxili_gx_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = CLAMP_IO,
+};
+
+static struct gdsc oxili_cx_gdsc = {
+ .gdscr = 0x59044,
+ .cxcs = (unsigned int []){ 0x59020 },
+ .cxc_count = 1,
+ .pd = {
+ .name = "oxili_cx_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
static struct gdsc cpp_gdsc = {
.gdscr = 0x58078,
.cxcs = (unsigned int []){ 0x5803c, 0x58064 },
@@ -3207,6 +3582,188 @@ static struct clk_regmap *gcc_msm8917_clocks[] = {
[GCC_VFE_TBU_CLK] = &gcc_vfe_tbu_clk.clkr,
};
+static struct clk_regmap *gcc_msm8937_clocks[] = {
+ [GPLL0] = &gpll0.clkr,
+ [GPLL0_EARLY] = &gpll0_early.clkr,
+ [GPLL0_SLEEP_CLK_SRC] = &gpll0_sleep_clk_src.clkr,
+ [GPLL3] = &gpll3.clkr,
+ [GPLL3_EARLY] = &gpll3_early.clkr,
+ [GPLL4] = &gpll4.clkr,
+ [GPLL4_EARLY] = &gpll4_early.clkr,
+ [GPLL6] = &gpll6,
+ [GPLL6_EARLY] = &gpll6_early.clkr,
+ [APSS_AHB_CLK_SRC] = &apss_ahb_clk_src.clkr,
+ [MSM8937_BLSP1_QUP1_I2C_APPS_CLK_SRC] = &blsp1_qup1_i2c_apps_clk_src.clkr,
+ [MSM8937_BLSP1_QUP1_SPI_APPS_CLK_SRC] = &blsp1_qup1_spi_apps_clk_src.clkr,
+ [BLSP1_QUP2_I2C_APPS_CLK_SRC] = &blsp1_qup2_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP2_SPI_APPS_CLK_SRC] = &blsp1_qup2_spi_apps_clk_src.clkr,
+ [BLSP1_QUP3_I2C_APPS_CLK_SRC] = &blsp1_qup3_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP3_SPI_APPS_CLK_SRC] = &blsp1_qup3_spi_apps_clk_src.clkr,
+ [BLSP1_QUP4_I2C_APPS_CLK_SRC] = &blsp1_qup4_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP4_SPI_APPS_CLK_SRC] = &blsp1_qup4_spi_apps_clk_src.clkr,
+ [BLSP1_UART1_APPS_CLK_SRC] = &blsp1_uart1_apps_clk_src.clkr,
+ [BLSP1_UART2_APPS_CLK_SRC] = &blsp1_uart2_apps_clk_src.clkr,
+ [BLSP2_QUP1_I2C_APPS_CLK_SRC] = &blsp2_qup1_i2c_apps_clk_src.clkr,
+ [BLSP2_QUP1_SPI_APPS_CLK_SRC] = &blsp2_qup1_spi_apps_clk_src.clkr,
+ [BLSP2_QUP2_I2C_APPS_CLK_SRC] = &blsp2_qup2_i2c_apps_clk_src.clkr,
+ [BLSP2_QUP2_SPI_APPS_CLK_SRC] = &blsp2_qup2_spi_apps_clk_src.clkr,
+ [BLSP2_QUP3_I2C_APPS_CLK_SRC] = &blsp2_qup3_i2c_apps_clk_src.clkr,
+ [BLSP2_QUP3_SPI_APPS_CLK_SRC] = &blsp2_qup3_spi_apps_clk_src.clkr,
+ [MSM8937_BLSP2_QUP4_I2C_APPS_CLK_SRC] = &blsp2_qup4_i2c_apps_clk_src.clkr,
+ [MSM8937_BLSP2_QUP4_SPI_APPS_CLK_SRC] = &blsp2_qup4_spi_apps_clk_src.clkr,
+ [BLSP2_UART1_APPS_CLK_SRC] = &blsp2_uart1_apps_clk_src.clkr,
+ [BLSP2_UART2_APPS_CLK_SRC] = &blsp2_uart2_apps_clk_src.clkr,
+ [BYTE0_CLK_SRC] = &byte0_clk_src.clkr,
+ [MSM8937_BYTE1_CLK_SRC] = &byte1_clk_src.clkr,
+ [CAMSS_GP0_CLK_SRC] = &camss_gp0_clk_src.clkr,
+ [CAMSS_GP1_CLK_SRC] = &camss_gp1_clk_src.clkr,
+ [CAMSS_TOP_AHB_CLK_SRC] = &camss_top_ahb_clk_src.clkr,
+ [CCI_CLK_SRC] = &cci_clk_src.clkr,
+ [CPP_CLK_SRC] = &cpp_clk_src.clkr,
+ [CRYPTO_CLK_SRC] = &crypto_clk_src.clkr,
+ [CSI0PHYTIMER_CLK_SRC] = &csi0phytimer_clk_src.clkr,
+ [CSI0_CLK_SRC] = &csi0_clk_src.clkr,
+ [CSI1PHYTIMER_CLK_SRC] = &csi1phytimer_clk_src.clkr,
+ [CSI1_CLK_SRC] = &csi1_clk_src.clkr,
+ [CSI2_CLK_SRC] = &csi2_clk_src.clkr,
+ [ESC0_CLK_SRC] = &esc0_clk_src.clkr,
+ [MSM8937_ESC1_CLK_SRC] = &esc1_clk_src.clkr,
+ [GFX3D_CLK_SRC] = &gfx3d_clk_src.clkr,
+ [GP1_CLK_SRC] = &gp1_clk_src.clkr,
+ [GP2_CLK_SRC] = &gp2_clk_src.clkr,
+ [GP3_CLK_SRC] = &gp3_clk_src.clkr,
+ [JPEG0_CLK_SRC] = &jpeg0_clk_src.clkr,
+ [MCLK0_CLK_SRC] = &mclk0_clk_src.clkr,
+ [MCLK1_CLK_SRC] = &mclk1_clk_src.clkr,
+ [MCLK2_CLK_SRC] = &mclk2_clk_src.clkr,
+ [MDP_CLK_SRC] = &mdp_clk_src.clkr,
+ [PCLK0_CLK_SRC] = &pclk0_clk_src.clkr,
+ [MSM8937_PCLK1_CLK_SRC] = &pclk1_clk_src.clkr,
+ [PDM2_CLK_SRC] = &pdm2_clk_src.clkr,
+ [SDCC1_APPS_CLK_SRC] = &sdcc1_apps_clk_src.clkr,
+ [SDCC1_ICE_CORE_CLK_SRC] = &sdcc1_ice_core_clk_src.clkr,
+ [SDCC2_APPS_CLK_SRC] = &sdcc2_apps_clk_src.clkr,
+ [USB_HS_SYSTEM_CLK_SRC] = &usb_hs_system_clk_src.clkr,
+ [VCODEC0_CLK_SRC] = &vcodec0_clk_src.clkr,
+ [VFE0_CLK_SRC] = &vfe0_clk_src.clkr,
+ [VFE1_CLK_SRC] = &vfe1_clk_src.clkr,
+ [VSYNC_CLK_SRC] = &vsync_clk_src.clkr,
+ [GCC_APSS_TCU_CLK] = &gcc_apss_tcu_clk.clkr,
+ [GCC_BIMC_GFX_CLK] = &gcc_bimc_gfx_clk.clkr,
+ [GCC_BIMC_GPU_CLK] = &gcc_bimc_gpu_clk.clkr,
+ [GCC_BLSP1_AHB_CLK] = &gcc_blsp1_ahb_clk.clkr,
+ [MSM8937_GCC_BLSP1_QUP1_I2C_APPS_CLK] = &gcc_blsp1_qup1_i2c_apps_clk.clkr,
+ [MSM8937_GCC_BLSP1_QUP1_SPI_APPS_CLK] = &gcc_blsp1_qup1_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP2_I2C_APPS_CLK] = &gcc_blsp1_qup2_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP2_SPI_APPS_CLK] = &gcc_blsp1_qup2_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP3_I2C_APPS_CLK] = &gcc_blsp1_qup3_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP3_SPI_APPS_CLK] = &gcc_blsp1_qup3_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP4_I2C_APPS_CLK] = &gcc_blsp1_qup4_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP4_SPI_APPS_CLK] = &gcc_blsp1_qup4_spi_apps_clk.clkr,
+ [GCC_BLSP1_UART1_APPS_CLK] = &gcc_blsp1_uart1_apps_clk.clkr,
+ [GCC_BLSP1_UART2_APPS_CLK] = &gcc_blsp1_uart2_apps_clk.clkr,
+ [GCC_BLSP2_AHB_CLK] = &gcc_blsp2_ahb_clk.clkr,
+ [GCC_BLSP2_QUP1_I2C_APPS_CLK] = &gcc_blsp2_qup1_i2c_apps_clk.clkr,
+ [GCC_BLSP2_QUP1_SPI_APPS_CLK] = &gcc_blsp2_qup1_spi_apps_clk.clkr,
+ [GCC_BLSP2_QUP2_I2C_APPS_CLK] = &gcc_blsp2_qup2_i2c_apps_clk.clkr,
+ [GCC_BLSP2_QUP2_SPI_APPS_CLK] = &gcc_blsp2_qup2_spi_apps_clk.clkr,
+ [GCC_BLSP2_QUP3_I2C_APPS_CLK] = &gcc_blsp2_qup3_i2c_apps_clk.clkr,
+ [GCC_BLSP2_QUP3_SPI_APPS_CLK] = &gcc_blsp2_qup3_spi_apps_clk.clkr,
+ [MSM8937_GCC_BLSP2_QUP4_I2C_APPS_CLK] = &gcc_blsp2_qup4_i2c_apps_clk.clkr,
+ [MSM8937_GCC_BLSP2_QUP4_SPI_APPS_CLK] = &gcc_blsp2_qup4_spi_apps_clk.clkr,
+ [GCC_BLSP2_UART1_APPS_CLK] = &gcc_blsp2_uart1_apps_clk.clkr,
+ [GCC_BLSP2_UART2_APPS_CLK] = &gcc_blsp2_uart2_apps_clk.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CAMSS_AHB_CLK] = &gcc_camss_ahb_clk.clkr,
+ [GCC_CAMSS_CCI_AHB_CLK] = &gcc_camss_cci_ahb_clk.clkr,
+ [GCC_CAMSS_CCI_CLK] = &gcc_camss_cci_clk.clkr,
+ [GCC_CAMSS_CPP_AHB_CLK] = &gcc_camss_cpp_ahb_clk.clkr,
+ [GCC_CAMSS_CPP_CLK] = &gcc_camss_cpp_clk.clkr,
+ [GCC_CAMSS_CSI0PHYTIMER_CLK] = &gcc_camss_csi0phytimer_clk.clkr,
+ [GCC_CAMSS_CSI0PHY_CLK] = &gcc_camss_csi0phy_clk.clkr,
+ [GCC_CAMSS_CSI0PIX_CLK] = &gcc_camss_csi0pix_clk.clkr,
+ [GCC_CAMSS_CSI0RDI_CLK] = &gcc_camss_csi0rdi_clk.clkr,
+ [GCC_CAMSS_CSI0_AHB_CLK] = &gcc_camss_csi0_ahb_clk.clkr,
+ [GCC_CAMSS_CSI0_CLK] = &gcc_camss_csi0_clk.clkr,
+ [GCC_CAMSS_CSI1PHYTIMER_CLK] = &gcc_camss_csi1phytimer_clk.clkr,
+ [GCC_CAMSS_CSI1PHY_CLK] = &gcc_camss_csi1phy_clk.clkr,
+ [GCC_CAMSS_CSI1PIX_CLK] = &gcc_camss_csi1pix_clk.clkr,
+ [GCC_CAMSS_CSI1RDI_CLK] = &gcc_camss_csi1rdi_clk.clkr,
+ [GCC_CAMSS_CSI1_AHB_CLK] = &gcc_camss_csi1_ahb_clk.clkr,
+ [GCC_CAMSS_CSI1_CLK] = &gcc_camss_csi1_clk.clkr,
+ [GCC_CAMSS_CSI2PHY_CLK] = &gcc_camss_csi2phy_clk.clkr,
+ [GCC_CAMSS_CSI2PIX_CLK] = &gcc_camss_csi2pix_clk.clkr,
+ [GCC_CAMSS_CSI2RDI_CLK] = &gcc_camss_csi2rdi_clk.clkr,
+ [GCC_CAMSS_CSI2_AHB_CLK] = &gcc_camss_csi2_ahb_clk.clkr,
+ [GCC_CAMSS_CSI2_CLK] = &gcc_camss_csi2_clk.clkr,
+ [GCC_CAMSS_CSI_VFE0_CLK] = &gcc_camss_csi_vfe0_clk.clkr,
+ [GCC_CAMSS_CSI_VFE1_CLK] = &gcc_camss_csi_vfe1_clk.clkr,
+ [GCC_CAMSS_GP0_CLK] = &gcc_camss_gp0_clk.clkr,
+ [GCC_CAMSS_GP1_CLK] = &gcc_camss_gp1_clk.clkr,
+ [GCC_CAMSS_ISPIF_AHB_CLK] = &gcc_camss_ispif_ahb_clk.clkr,
+ [GCC_CAMSS_JPEG0_CLK] = &gcc_camss_jpeg0_clk.clkr,
+ [GCC_CAMSS_JPEG_AHB_CLK] = &gcc_camss_jpeg_ahb_clk.clkr,
+ [GCC_CAMSS_JPEG_AXI_CLK] = &gcc_camss_jpeg_axi_clk.clkr,
+ [GCC_CAMSS_MCLK0_CLK] = &gcc_camss_mclk0_clk.clkr,
+ [GCC_CAMSS_MCLK1_CLK] = &gcc_camss_mclk1_clk.clkr,
+ [GCC_CAMSS_MCLK2_CLK] = &gcc_camss_mclk2_clk.clkr,
+ [GCC_CAMSS_MICRO_AHB_CLK] = &gcc_camss_micro_ahb_clk.clkr,
+ [GCC_CAMSS_TOP_AHB_CLK] = &gcc_camss_top_ahb_clk.clkr,
+ [GCC_CAMSS_VFE0_AHB_CLK] = &gcc_camss_vfe0_ahb_clk.clkr,
+ [GCC_CAMSS_VFE0_AXI_CLK] = &gcc_camss_vfe0_axi_clk.clkr,
+ [GCC_CAMSS_VFE0_CLK] = &gcc_camss_vfe0_clk.clkr,
+ [GCC_CAMSS_VFE1_AHB_CLK] = &gcc_camss_vfe1_ahb_clk.clkr,
+ [GCC_CAMSS_VFE1_AXI_CLK] = &gcc_camss_vfe1_axi_clk.clkr,
+ [GCC_CAMSS_VFE1_CLK] = &gcc_camss_vfe1_clk.clkr,
+ [GCC_CPP_TBU_CLK] = &gcc_cpp_tbu_clk.clkr,
+ [GCC_CRYPTO_AHB_CLK] = &gcc_crypto_ahb_clk.clkr,
+ [GCC_CRYPTO_AXI_CLK] = &gcc_crypto_axi_clk.clkr,
+ [GCC_CRYPTO_CLK] = &gcc_crypto_clk.clkr,
+ [GCC_DCC_CLK] = &gcc_dcc_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_JPEG_TBU_CLK] = &gcc_jpeg_tbu_clk.clkr,
+ [GCC_MDP_TBU_CLK] = &gcc_mdp_tbu_clk.clkr,
+ [GCC_MDSS_AHB_CLK] = &gcc_mdss_ahb_clk.clkr,
+ [GCC_MDSS_AXI_CLK] = &gcc_mdss_axi_clk.clkr,
+ [GCC_MDSS_BYTE0_CLK] = &gcc_mdss_byte0_clk.clkr,
+ [MSM8937_GCC_MDSS_BYTE1_CLK] = &gcc_mdss_byte1_clk.clkr,
+ [GCC_MDSS_ESC0_CLK] = &gcc_mdss_esc0_clk.clkr,
+ [MSM8937_GCC_MDSS_ESC1_CLK] = &gcc_mdss_esc1_clk.clkr,
+ [GCC_MDSS_MDP_CLK] = &gcc_mdss_mdp_clk.clkr,
+ [GCC_MDSS_PCLK0_CLK] = &gcc_mdss_pclk0_clk.clkr,
+ [MSM8937_GCC_MDSS_PCLK1_CLK] = &gcc_mdss_pclk1_clk.clkr,
+ [GCC_MDSS_VSYNC_CLK] = &gcc_mdss_vsync_clk.clkr,
+ [GCC_MSS_CFG_AHB_CLK] = &gcc_mss_cfg_ahb_clk.clkr,
+ [GCC_MSS_Q6_BIMC_AXI_CLK] = &gcc_mss_q6_bimc_axi_clk.clkr,
+ [GCC_OXILI_AHB_CLK] = &gcc_oxili_ahb_clk.clkr,
+ [MSM8937_GCC_OXILI_AON_CLK] = &gcc_oxili_aon_clk.clkr,
+ [GCC_OXILI_GFX3D_CLK] = &gcc_oxili_gfx3d_clk.clkr,
+ [MSM8937_GCC_OXILI_TIMER_CLK] = &gcc_oxili_timer_clk.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
+ [GCC_QDSS_DAP_CLK] = &gcc_qdss_dap_clk.clkr,
+ [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr,
+ [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr,
+ [GCC_SDCC1_ICE_CORE_CLK] = &gcc_sdcc1_ice_core_clk.clkr,
+ [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
+ [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+ [GCC_SMMU_CFG_CLK] = &gcc_smmu_cfg_clk.clkr,
+ [GCC_USB2A_PHY_SLEEP_CLK] = &gcc_usb2a_phy_sleep_clk.clkr,
+ [GCC_USB_HS_AHB_CLK] = &gcc_usb_hs_ahb_clk.clkr,
+ [GCC_USB_HS_PHY_CFG_AHB_CLK] = &gcc_usb_hs_phy_cfg_ahb_clk.clkr,
+ [GCC_USB_HS_SYSTEM_CLK] = &gcc_usb_hs_system_clk.clkr,
+ [GCC_VENUS0_AHB_CLK] = &gcc_venus0_ahb_clk.clkr,
+ [GCC_VENUS0_AXI_CLK] = &gcc_venus0_axi_clk.clkr,
+ [GCC_VENUS0_CORE0_VCODEC0_CLK] = &gcc_venus0_core0_vcodec0_clk.clkr,
+ [GCC_VENUS0_VCODEC0_CLK] = &gcc_venus0_vcodec0_clk.clkr,
+ [GCC_VENUS_TBU_CLK] = &gcc_venus_tbu_clk.clkr,
+ [GCC_VFE1_TBU_CLK] = &gcc_vfe1_tbu_clk.clkr,
+ [GCC_VFE_TBU_CLK] = &gcc_vfe_tbu_clk.clkr,
+};
+
static const struct qcom_reset_map gcc_msm8917_resets[] = {
[GCC_CAMSS_MICRO_BCR] = { 0x56008 },
[GCC_MSS_BCR] = { 0x71000 },
@@ -3234,6 +3791,18 @@ static struct gdsc *gcc_msm8917_gdscs[] = {
[VFE1_GDSC] = &vfe1_gdsc,
};
+static struct gdsc *gcc_msm8937_gdscs[] = {
+ [CPP_GDSC] = &cpp_gdsc,
+ [JPEG_GDSC] = &jpeg_gdsc,
+ [MDSS_GDSC] = &mdss_gdsc,
+ [OXILI_GX_GDSC] = &oxili_gx_gdsc_msm8937,
+ [MSM8937_OXILI_CX_GDSC] = &oxili_cx_gdsc,
+ [VENUS_CORE0_GDSC] = &venus_core0_gdsc,
+ [VENUS_GDSC] = &venus_gdsc,
+ [VFE0_GDSC] = &vfe0_gdsc,
+ [VFE1_GDSC] = &vfe1_gdsc,
+};
+
static const struct qcom_cc_desc gcc_msm8917_desc = {
.config = &gcc_msm8917_regmap_config,
.clks = gcc_msm8917_clocks,
@@ -3254,6 +3823,41 @@ static const struct qcom_cc_desc gcc_qm215_desc = {
.num_gdscs = ARRAY_SIZE(gcc_msm8917_gdscs),
};
+static const struct qcom_cc_desc gcc_msm8937_desc = {
+ .config = &gcc_msm8917_regmap_config,
+ .clks = gcc_msm8937_clocks,
+ .num_clks = ARRAY_SIZE(gcc_msm8937_clocks),
+ .resets = gcc_msm8917_resets,
+ .num_resets = ARRAY_SIZE(gcc_msm8917_resets),
+ .gdscs = gcc_msm8937_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_msm8937_gdscs),
+};
+
+static void msm8937_clock_override(void)
+{
+ /* GPLL3 750MHz configuration */
+ gpll3_early_config.l = 47;
+ gpll3_early.vco_table = gpll3_p_vco_msm8937;
+ gpll3_early.num_vco = ARRAY_SIZE(gpll3_p_vco_msm8937);
+
+ /*
+ * Set below clocks for use specific msm8937 parent map.
+ */
+ vcodec0_clk_src.parent_map = gcc_cpp_map;
+ vcodec0_clk_src.clkr.hw.init = &vcodec0_clk_src_init_msm8937;
+
+ /*
+ * Set below clocks for use specific msm8937 freq table.
+ */
+ vfe0_clk_src.freq_tbl = ftbl_vfe_clk_src_msm8937;
+ vfe1_clk_src.freq_tbl = ftbl_vfe_clk_src_msm8937;
+ cpp_clk_src.freq_tbl = ftbl_cpp_clk_src_msm8937;
+ vcodec0_clk_src.freq_tbl = ftbl_vcodec0_clk_src_msm8937;
+ csi0phytimer_clk_src.freq_tbl = ftbl_csi_phytimer_clk_src_msm8937;
+ csi1phytimer_clk_src.freq_tbl = ftbl_csi_phytimer_clk_src_msm8937;
+ usb_hs_system_clk_src.freq_tbl = ftbl_usb_hs_system_clk_src_msm8937;
+}
+
static int gcc_msm8917_probe(struct platform_device *pdev)
{
struct regmap *regmap;
@@ -3261,8 +3865,12 @@ static int gcc_msm8917_probe(struct platform_device *pdev)
gcc_desc = of_device_get_match_data(&pdev->dev);
- if (gcc_desc == &gcc_qm215_desc)
+ if (gcc_desc == &gcc_qm215_desc) {
gfx3d_clk_src.parent_map = gcc_gfx3d_map_qm215;
+ } else if (gcc_desc == &gcc_msm8937_desc) {
+ msm8937_clock_override();
+ gfx3d_clk_src.freq_tbl = ftbl_gfx3d_clk_src_msm8937;
+ }
regmap = qcom_cc_map(pdev, gcc_desc);
if (IS_ERR(regmap))
@@ -3276,6 +3884,7 @@ static int gcc_msm8917_probe(struct platform_device *pdev)
static const struct of_device_id gcc_msm8917_match_table[] = {
{ .compatible = "qcom,gcc-msm8917", .data = &gcc_msm8917_desc },
{ .compatible = "qcom,gcc-qm215", .data = &gcc_qm215_desc },
+ { .compatible = "qcom,gcc-msm8937", .data = &gcc_msm8937_desc },
{},
};
MODULE_DEVICE_TABLE(of, gcc_msm8917_match_table);
diff --git a/drivers/clk/qcom/gcc-qcm2290.c b/drivers/clk/qcom/gcc-qcm2290.c
index 9a6703365e61..6684cab63ae1 100644
--- a/drivers/clk/qcom/gcc-qcm2290.c
+++ b/drivers/clk/qcom/gcc-qcm2290.c
@@ -2720,6 +2720,7 @@ static struct gdsc gcc_vcodec0_gdsc = {
.pd = {
.name = "gcc_vcodec0",
},
+ .flags = HW_CTRL_TRIGGER,
.pwrsts = PWRSTS_OFF_ON,
};
diff --git a/drivers/clk/qcom/gcc-qcs404.c b/drivers/clk/qcom/gcc-qcs404.c
index 5ca003c9bfba..efc75a3814ab 100644
--- a/drivers/clk/qcom/gcc-qcs404.c
+++ b/drivers/clk/qcom/gcc-qcs404.c
@@ -2754,7 +2754,7 @@ static struct clk_regmap *gcc_qcs404_clocks[] = {
[GCC_DCC_CLK] = &gcc_dcc_clk.clkr,
[GCC_DCC_XO_CLK] = &gcc_dcc_xo_clk.clkr,
[GCC_WCSS_Q6_AHB_CLK] = &gcc_wdsp_q6ss_ahbs_clk.clkr,
- [GCC_WCSS_Q6_AXIM_CLK] = &gcc_wdsp_q6ss_axim_clk.clkr,
+ [GCC_WCSS_Q6_AXIM_CLK] = &gcc_wdsp_q6ss_axim_clk.clkr,
};
diff --git a/drivers/clk/qcom/gcc-qcs615.c b/drivers/clk/qcom/gcc-qcs615.c
index 9695446bc2a3..5b3b8dd4f114 100644
--- a/drivers/clk/qcom/gcc-qcs615.c
+++ b/drivers/clk/qcom/gcc-qcs615.c
@@ -784,7 +784,7 @@ static struct clk_rcg2 gcc_sdcc1_apps_clk_src = {
.name = "gcc_sdcc1_apps_clk_src",
.parent_data = gcc_parent_data_1,
.num_parents = ARRAY_SIZE(gcc_parent_data_1),
- .ops = &clk_rcg2_floor_ops,
+ .ops = &clk_rcg2_shared_floor_ops,
},
};
@@ -806,7 +806,7 @@ static struct clk_rcg2 gcc_sdcc1_ice_core_clk_src = {
.name = "gcc_sdcc1_ice_core_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
- .ops = &clk_rcg2_floor_ops,
+ .ops = &clk_rcg2_shared_floor_ops,
},
};
@@ -830,7 +830,7 @@ static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
.name = "gcc_sdcc2_apps_clk_src",
.parent_data = gcc_parent_data_8,
.num_parents = ARRAY_SIZE(gcc_parent_data_8),
- .ops = &clk_rcg2_floor_ops,
+ .ops = &clk_rcg2_shared_floor_ops,
},
};
diff --git a/drivers/clk/qcom/gcc-sc8280xp.c b/drivers/clk/qcom/gcc-sc8280xp.c
index f27d0003f427..2ab111585d7f 100644
--- a/drivers/clk/qcom/gcc-sc8280xp.c
+++ b/drivers/clk/qcom/gcc-sc8280xp.c
@@ -2224,7 +2224,6 @@ static struct clk_rcg2 gcc_usb3_sec_phy_aux_clk_src = {
};
static const struct freq_tbl ftbl_gcc_usb4_1_master_clk_src[] = {
- F(85714286, P_GCC_GPLL0_OUT_EVEN, 3.5, 0, 0),
F(175000000, P_GCC_GPLL8_OUT_MAIN, 4, 0, 0),
F(350000000, P_GCC_GPLL8_OUT_MAIN, 2, 0, 0),
{ }
@@ -6775,10 +6774,6 @@ static struct gdsc pcie_1_tunnel_gdsc = {
.flags = VOTABLE | RETAIN_FF_ENABLE,
};
-/*
- * The Qualcomm PCIe driver does not yet implement suspend so to keep the
- * PCIe power domains always-on for now.
- */
static struct gdsc pcie_2a_gdsc = {
.gdscr = 0x9d004,
.collapse_ctrl = 0x52128,
diff --git a/drivers/clk/qcom/gcc-sdm660.c b/drivers/clk/qcom/gcc-sdm660.c
index 01a76f1b5b4c..20253a06a583 100644
--- a/drivers/clk/qcom/gcc-sdm660.c
+++ b/drivers/clk/qcom/gcc-sdm660.c
@@ -2247,6 +2247,45 @@ static struct clk_branch gcc_usb_phy_cfg_ahb2phy_clk = {
},
};
+static struct clk_branch hlos1_vote_lpass_adsp_smmu_clk = {
+ .halt_reg = 0x7d014,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x7d014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "hlos1_vote_lpass_adsp_smmu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch hlos1_vote_turing_adsp_smmu_clk = {
+ .halt_reg = 0x7d048,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x7d048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "hlos1_vote_turing_adsp_smmu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch hlos2_vote_turing_adsp_smmu_clk = {
+ .halt_reg = 0x7e048,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x7e048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "hlos2_vote_turing_adsp_smmu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct gdsc ufs_gdsc = {
.gdscr = 0x75004,
.gds_hw_ctrl = 0x0,
@@ -2277,6 +2316,33 @@ static struct gdsc pcie_0_gdsc = {
.flags = VOTABLE,
};
+static struct gdsc hlos1_vote_turing_adsp_gdsc = {
+ .gdscr = 0x7d04c,
+ .pd = {
+ .name = "hlos1_vote_turing_adsp_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos2_vote_turing_adsp_gdsc = {
+ .gdscr = 0x7e04c,
+ .pd = {
+ .name = "hlos2_vote_turing_adsp_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_lpass_adsp_gdsc = {
+ .gdscr = 0x7d034,
+ .pd = {
+ .name = "hlos1_vote_lpass_adsp_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
static struct clk_hw *gcc_sdm660_hws[] = {
&xo.hw,
&gpll0_early_div.hw,
@@ -2409,12 +2475,18 @@ static struct clk_regmap *gcc_sdm660_clocks[] = {
[USB30_MASTER_CLK_SRC] = &usb30_master_clk_src.clkr,
[USB30_MOCK_UTMI_CLK_SRC] = &usb30_mock_utmi_clk_src.clkr,
[USB3_PHY_AUX_CLK_SRC] = &usb3_phy_aux_clk_src.clkr,
+ [GCC_HLOS1_VOTE_LPASS_ADSP_SMMU_CLK] = &hlos1_vote_lpass_adsp_smmu_clk.clkr,
+ [GCC_HLOS1_VOTE_TURING_ADSP_SMMU_CLK] = &hlos1_vote_turing_adsp_smmu_clk.clkr,
+ [GCC_HLOS2_VOTE_TURING_ADSP_SMMU_CLK] = &hlos2_vote_turing_adsp_smmu_clk.clkr,
};
static struct gdsc *gcc_sdm660_gdscs[] = {
[UFS_GDSC] = &ufs_gdsc,
[USB_30_GDSC] = &usb_30_gdsc,
[PCIE_0_GDSC] = &pcie_0_gdsc,
+ [HLOS1_VOTE_TURING_ADSP_GDSC] = &hlos1_vote_turing_adsp_gdsc,
+ [HLOS2_VOTE_TURING_ADSP_GDSC] = &hlos2_vote_turing_adsp_gdsc,
+ [HLOS1_VOTE_LPASS_ADSP_GDSC] = &hlos1_vote_lpass_adsp_gdsc,
};
static const struct qcom_reset_map gcc_sdm660_resets[] = {
diff --git a/drivers/clk/qcom/gcc-sm8150.c b/drivers/clk/qcom/gcc-sm8150.c
index cefceb780889..a93d1f412a7b 100644
--- a/drivers/clk/qcom/gcc-sm8150.c
+++ b/drivers/clk/qcom/gcc-sm8150.c
@@ -1245,7 +1245,7 @@ static struct clk_branch gcc_boot_rom_ahb_clk = {
};
/*
- * Clock ON depends on external parent 'config noc', so cant poll
+ * Clock ON depends on external parent 'config noc', so can't poll
* delay and also mark as crtitical for camss boot
*/
static struct clk_branch gcc_camera_ahb_clk = {
@@ -1398,7 +1398,7 @@ static struct clk_branch gcc_ddrss_gpu_axi_clk = {
};
/*
- * Clock ON depends on external parent 'config noc', so cant poll
+ * Clock ON depends on external parent 'config noc', so can't poll
* delay and also mark as crtitical for disp boot
*/
static struct clk_branch gcc_disp_ahb_clk = {
@@ -3339,7 +3339,7 @@ static struct clk_branch gcc_usb3_sec_phy_pipe_clk = {
};
/*
- * Clock ON depends on external parent 'config noc', so cant poll
+ * Clock ON depends on external parent 'config noc', so can't poll
* delay and also mark as crtitical for video boot
*/
static struct clk_branch gcc_video_ahb_clk = {
diff --git a/drivers/clk/qcom/gcc-sm8750.c b/drivers/clk/qcom/gcc-sm8750.c
index 8092dd6b37b5..def86b71a3da 100644
--- a/drivers/clk/qcom/gcc-sm8750.c
+++ b/drivers/clk/qcom/gcc-sm8750.c
@@ -1012,6 +1012,7 @@ static struct clk_rcg2 gcc_qupv3_wrap2_s7_clk_src = {
static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = {
F(400000, P_BI_TCXO, 12, 1, 4),
F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(37500000, P_GCC_GPLL0_OUT_EVEN, 8, 0, 0),
F(50000000, P_GCC_GPLL0_OUT_EVEN, 6, 0, 0),
F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
F(202000000, P_GCC_GPLL9_OUT_MAIN, 4, 0, 0),
diff --git a/drivers/clk/qcom/gcc-x1e80100.c b/drivers/clk/qcom/gcc-x1e80100.c
index 3e44757e25d3..b63c8abdd2fc 100644
--- a/drivers/clk/qcom/gcc-x1e80100.c
+++ b/drivers/clk/qcom/gcc-x1e80100.c
@@ -32,6 +32,33 @@ enum {
DT_USB3_PHY_0_WRAPPER_GCC_USB30_PIPE,
DT_USB3_PHY_1_WRAPPER_GCC_USB30_PIPE,
DT_USB3_PHY_2_WRAPPER_GCC_USB30_PIPE,
+ DT_GCC_USB4_0_PHY_DP0_GMUX_CLK_SRC,
+ DT_GCC_USB4_0_PHY_DP1_GMUX_CLK_SRC,
+ DT_GCC_USB4_0_PHY_PCIE_PIPEGMUX_CLK_SRC,
+ DT_GCC_USB4_0_PHY_PIPEGMUX_CLK_SRC,
+ DT_GCC_USB4_0_PHY_SYS_PIPEGMUX_CLK_SRC,
+ DT_GCC_USB4_1_PHY_DP0_GMUX_CLK_SRC,
+ DT_GCC_USB4_1_PHY_DP1_GMUX_CLK_SRC,
+ DT_GCC_USB4_1_PHY_PCIE_PIPEGMUX_CLK_SRC,
+ DT_GCC_USB4_1_PHY_PIPEGMUX_CLK_SRC,
+ DT_GCC_USB4_1_PHY_SYS_PIPEGMUX_CLK_SRC,
+ DT_GCC_USB4_2_PHY_DP0_GMUX_CLK_SRC,
+ DT_GCC_USB4_2_PHY_DP1_GMUX_CLK_SRC,
+ DT_GCC_USB4_2_PHY_PCIE_PIPEGMUX_CLK_SRC,
+ DT_GCC_USB4_2_PHY_PIPEGMUX_CLK_SRC,
+ DT_GCC_USB4_2_PHY_SYS_PIPEGMUX_CLK_SRC,
+ DT_QUSB4PHY_0_GCC_USB4_RX0_CLK,
+ DT_QUSB4PHY_0_GCC_USB4_RX1_CLK,
+ DT_QUSB4PHY_1_GCC_USB4_RX0_CLK,
+ DT_QUSB4PHY_1_GCC_USB4_RX1_CLK,
+ DT_QUSB4PHY_2_GCC_USB4_RX0_CLK,
+ DT_QUSB4PHY_2_GCC_USB4_RX1_CLK,
+ DT_USB4_0_PHY_GCC_USB4_PCIE_PIPE_CLK,
+ DT_USB4_0_PHY_GCC_USB4RTR_MAX_PIPE_CLK,
+ DT_USB4_1_PHY_GCC_USB4_PCIE_PIPE_CLK,
+ DT_USB4_1_PHY_GCC_USB4RTR_MAX_PIPE_CLK,
+ DT_USB4_2_PHY_GCC_USB4_PCIE_PIPE_CLK,
+ DT_USB4_2_PHY_GCC_USB4RTR_MAX_PIPE_CLK,
};
enum {
@@ -42,10 +69,40 @@ enum {
P_GCC_GPLL7_OUT_MAIN,
P_GCC_GPLL8_OUT_MAIN,
P_GCC_GPLL9_OUT_MAIN,
+ P_GCC_USB3_PRIM_PHY_PIPE_CLK_SRC,
+ P_GCC_USB3_SEC_PHY_PIPE_CLK_SRC,
+ P_GCC_USB3_TERT_PHY_PIPE_CLK_SRC,
+ P_GCC_USB4_0_PHY_DP0_GMUX_CLK_SRC,
+ P_GCC_USB4_0_PHY_DP1_GMUX_CLK_SRC,
+ P_GCC_USB4_0_PHY_PCIE_PIPEGMUX_CLK_SRC,
+ P_GCC_USB4_0_PHY_PIPEGMUX_CLK_SRC,
+ P_GCC_USB4_0_PHY_SYS_PIPEGMUX_CLK_SRC,
+ P_GCC_USB4_1_PHY_DP0_GMUX_CLK_SRC,
+ P_GCC_USB4_1_PHY_DP1_GMUX_CLK_SRC,
+ P_GCC_USB4_1_PHY_PCIE_PIPEGMUX_CLK_SRC,
+ P_GCC_USB4_1_PHY_PIPEGMUX_CLK_SRC,
+ P_GCC_USB4_1_PHY_SYS_PIPEGMUX_CLK_SRC,
+ P_GCC_USB4_2_PHY_DP0_GMUX_CLK_SRC,
+ P_GCC_USB4_2_PHY_DP1_GMUX_CLK_SRC,
+ P_GCC_USB4_2_PHY_PCIE_PIPEGMUX_CLK_SRC,
+ P_GCC_USB4_2_PHY_PIPEGMUX_CLK_SRC,
+ P_GCC_USB4_2_PHY_SYS_PIPEGMUX_CLK_SRC,
+ P_QUSB4PHY_0_GCC_USB4_RX0_CLK,
+ P_QUSB4PHY_0_GCC_USB4_RX1_CLK,
+ P_QUSB4PHY_1_GCC_USB4_RX0_CLK,
+ P_QUSB4PHY_1_GCC_USB4_RX1_CLK,
+ P_QUSB4PHY_2_GCC_USB4_RX0_CLK,
+ P_QUSB4PHY_2_GCC_USB4_RX1_CLK,
P_SLEEP_CLK,
P_USB3_PHY_0_WRAPPER_GCC_USB30_PIPE_CLK,
P_USB3_PHY_1_WRAPPER_GCC_USB30_PIPE_CLK,
P_USB3_PHY_2_WRAPPER_GCC_USB30_PIPE_CLK,
+ P_USB4_0_PHY_GCC_USB4_PCIE_PIPE_CLK,
+ P_USB4_0_PHY_GCC_USB4RTR_MAX_PIPE_CLK,
+ P_USB4_1_PHY_GCC_USB4_PCIE_PIPE_CLK,
+ P_USB4_1_PHY_GCC_USB4RTR_MAX_PIPE_CLK,
+ P_USB4_2_PHY_GCC_USB4_PCIE_PIPE_CLK,
+ P_USB4_2_PHY_GCC_USB4RTR_MAX_PIPE_CLK,
};
static struct clk_alpha_pll gcc_gpll0 = {
@@ -320,6 +377,342 @@ static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
{ }
};
+static const struct clk_parent_data gcc_parent_data_13[] = {
+ { .index = DT_GCC_USB4_0_PHY_DP0_GMUX_CLK_SRC },
+ { .index = DT_USB4_0_PHY_GCC_USB4RTR_MAX_PIPE_CLK },
+};
+
+static const struct clk_parent_data gcc_parent_data_14[] = {
+ { .index = DT_GCC_USB4_0_PHY_DP1_GMUX_CLK_SRC },
+ { .index = DT_USB4_0_PHY_GCC_USB4RTR_MAX_PIPE_CLK },
+};
+
+static const struct clk_parent_data gcc_parent_data_15[] = {
+ { .index = DT_USB4_0_PHY_GCC_USB4_PCIE_PIPE_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct clk_parent_data gcc_parent_data_16[] = {
+ { .index = DT_GCC_USB4_0_PHY_PCIE_PIPEGMUX_CLK_SRC },
+ { .index = DT_USB4_0_PHY_GCC_USB4_PCIE_PIPE_CLK },
+};
+
+static const struct clk_parent_data gcc_parent_data_17[] = {
+ { .index = DT_QUSB4PHY_0_GCC_USB4_RX0_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct clk_parent_data gcc_parent_data_18[] = {
+ { .index = DT_QUSB4PHY_0_GCC_USB4_RX1_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct clk_parent_data gcc_parent_data_19[] = {
+ { .index = DT_GCC_USB4_0_PHY_SYS_PIPEGMUX_CLK_SRC },
+ { .index = DT_USB4_0_PHY_GCC_USB4_PCIE_PIPE_CLK },
+};
+
+static const struct clk_parent_data gcc_parent_data_20[] = {
+ { .index = DT_GCC_USB4_1_PHY_DP0_GMUX_CLK_SRC },
+ { .index = DT_USB4_1_PHY_GCC_USB4RTR_MAX_PIPE_CLK },
+};
+
+static const struct clk_parent_data gcc_parent_data_21[] = {
+ { .index = DT_GCC_USB4_1_PHY_DP1_GMUX_CLK_SRC },
+ { .index = DT_USB4_1_PHY_GCC_USB4RTR_MAX_PIPE_CLK },
+};
+
+static const struct clk_parent_data gcc_parent_data_22[] = {
+ { .index = DT_USB4_1_PHY_GCC_USB4_PCIE_PIPE_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct clk_parent_data gcc_parent_data_23[] = {
+ { .index = DT_GCC_USB4_1_PHY_PCIE_PIPEGMUX_CLK_SRC },
+ { .index = DT_USB4_1_PHY_GCC_USB4_PCIE_PIPE_CLK },
+};
+
+static const struct clk_parent_data gcc_parent_data_24[] = {
+ { .index = DT_QUSB4PHY_1_GCC_USB4_RX0_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct clk_parent_data gcc_parent_data_25[] = {
+ { .index = DT_QUSB4PHY_1_GCC_USB4_RX1_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct clk_parent_data gcc_parent_data_26[] = {
+ { .index = DT_GCC_USB4_1_PHY_SYS_PIPEGMUX_CLK_SRC },
+ { .index = DT_USB4_1_PHY_GCC_USB4_PCIE_PIPE_CLK },
+};
+
+static const struct clk_parent_data gcc_parent_data_27[] = {
+ { .index = DT_GCC_USB4_2_PHY_DP0_GMUX_CLK_SRC },
+ { .index = DT_USB4_2_PHY_GCC_USB4RTR_MAX_PIPE_CLK },
+};
+
+static const struct clk_parent_data gcc_parent_data_28[] = {
+ { .index = DT_GCC_USB4_2_PHY_DP1_GMUX_CLK_SRC },
+ { .index = DT_USB4_2_PHY_GCC_USB4RTR_MAX_PIPE_CLK },
+};
+
+static const struct clk_parent_data gcc_parent_data_29[] = {
+ { .index = DT_USB4_2_PHY_GCC_USB4_PCIE_PIPE_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct clk_parent_data gcc_parent_data_30[] = {
+ { .index = DT_GCC_USB4_2_PHY_PCIE_PIPEGMUX_CLK_SRC },
+ { .index = DT_USB4_2_PHY_GCC_USB4_PCIE_PIPE_CLK },
+};
+
+static const struct clk_parent_data gcc_parent_data_31[] = {
+ { .index = DT_QUSB4PHY_2_GCC_USB4_RX0_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct clk_parent_data gcc_parent_data_32[] = {
+ { .index = DT_QUSB4PHY_2_GCC_USB4_RX1_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct clk_parent_data gcc_parent_data_33[] = {
+ { .index = DT_GCC_USB4_2_PHY_SYS_PIPEGMUX_CLK_SRC },
+ { .index = DT_USB4_2_PHY_GCC_USB4_PCIE_PIPE_CLK },
+};
+
+static struct clk_regmap_phy_mux gcc_usb4_0_phy_dp0_clk_src = {
+ .reg = 0x9f06c,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_phy_dp0_clk_src",
+ .parent_data = gcc_parent_data_13,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_usb4_0_phy_dp1_clk_src = {
+ .reg = 0x9f114,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_phy_dp1_clk_src",
+ .parent_data = gcc_parent_data_14,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_usb4_0_phy_p2rr2p_pipe_clk_src = {
+ .reg = 0x9f0d4,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_phy_p2rr2p_pipe_clk_src",
+ .parent_data = gcc_parent_data_15,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_usb4_0_phy_pcie_pipe_mux_clk_src = {
+ .reg = 0x9f104,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_phy_pcie_pipe_mux_clk_src",
+ .parent_data = gcc_parent_data_16,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_usb4_0_phy_rx0_clk_src = {
+ .reg = 0x9f0ac,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_phy_rx0_clk_src",
+ .parent_data = gcc_parent_data_17,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_usb4_0_phy_rx1_clk_src = {
+ .reg = 0x9f0bc,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_phy_rx1_clk_src",
+ .parent_data = gcc_parent_data_18,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_usb4_0_phy_sys_clk_src = {
+ .reg = 0x9f0e4,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_phy_sys_clk_src",
+ .parent_data = gcc_parent_data_19,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_usb4_1_phy_dp0_clk_src = {
+ .reg = 0x2b06c,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_dp0_clk_src",
+ .parent_data = gcc_parent_data_20,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_usb4_1_phy_dp1_clk_src = {
+ .reg = 0x2b114,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_dp1_clk_src",
+ .parent_data = gcc_parent_data_21,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_usb4_1_phy_p2rr2p_pipe_clk_src = {
+ .reg = 0x2b0d4,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_p2rr2p_pipe_clk_src",
+ .parent_data = gcc_parent_data_22,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_usb4_1_phy_pcie_pipe_mux_clk_src = {
+ .reg = 0x2b104,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_pcie_pipe_mux_clk_src",
+ .parent_data = gcc_parent_data_23,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_usb4_1_phy_rx0_clk_src = {
+ .reg = 0x2b0ac,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_rx0_clk_src",
+ .parent_data = gcc_parent_data_24,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_usb4_1_phy_rx1_clk_src = {
+ .reg = 0x2b0bc,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_rx1_clk_src",
+ .parent_data = gcc_parent_data_25,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_usb4_1_phy_sys_clk_src = {
+ .reg = 0x2b0e4,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_sys_clk_src",
+ .parent_data = gcc_parent_data_26,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_usb4_2_phy_dp0_clk_src = {
+ .reg = 0x1106c,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_phy_dp0_clk_src",
+ .parent_data = gcc_parent_data_27,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_usb4_2_phy_dp1_clk_src = {
+ .reg = 0x11114,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_phy_dp1_clk_src",
+ .parent_data = gcc_parent_data_28,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_usb4_2_phy_p2rr2p_pipe_clk_src = {
+ .reg = 0x110d4,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_phy_p2rr2p_pipe_clk_src",
+ .parent_data = gcc_parent_data_29,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_usb4_2_phy_pcie_pipe_mux_clk_src = {
+ .reg = 0x11104,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_phy_pcie_pipe_mux_clk_src",
+ .parent_data = gcc_parent_data_30,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_usb4_2_phy_rx0_clk_src = {
+ .reg = 0x110ac,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_phy_rx0_clk_src",
+ .parent_data = gcc_parent_data_31,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_usb4_2_phy_rx1_clk_src = {
+ .reg = 0x110bc,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_phy_rx1_clk_src",
+ .parent_data = gcc_parent_data_32,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_usb4_2_phy_sys_clk_src = {
+ .reg = 0x110e4,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_phy_sys_clk_src",
+ .parent_data = gcc_parent_data_33,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
static struct clk_rcg2 gcc_gp1_clk_src = {
.cmd_rcgr = 0x64004,
.mnd_width = 16,
@@ -1456,7 +1849,6 @@ static struct clk_rcg2 gcc_usb3_tert_phy_aux_clk_src = {
};
static const struct freq_tbl ftbl_gcc_usb4_0_master_clk_src[] = {
- F(85714286, P_GCC_GPLL0_OUT_EVEN, 3.5, 0, 0),
F(175000000, P_GCC_GPLL8_OUT_MAIN, 4, 0, 0),
F(350000000, P_GCC_GPLL8_OUT_MAIN, 2, 0, 0),
{ }
@@ -2790,6 +3182,11 @@ static struct clk_branch gcc_pcie_0_pipe_clk = {
.enable_mask = BIT(25),
.hw.init = &(const struct clk_init_data) {
.name = "gcc_pcie_0_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_0_phy_pcie_pipe_mux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2879,6 +3276,11 @@ static struct clk_branch gcc_pcie_1_pipe_clk = {
.enable_mask = BIT(30),
.hw.init = &(const struct clk_init_data) {
.name = "gcc_pcie_1_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_1_phy_pcie_pipe_mux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2968,6 +3370,11 @@ static struct clk_branch gcc_pcie_2_pipe_clk = {
.enable_mask = BIT(23),
.hw.init = &(const struct clk_init_data) {
.name = "gcc_pcie_2_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_2_phy_pcie_pipe_mux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -5156,6 +5563,33 @@ static struct clk_regmap_mux gcc_usb3_prim_phy_pipe_clk_src = {
},
};
+static const struct parent_map gcc_parent_map_34[] = {
+ { P_GCC_USB3_PRIM_PHY_PIPE_CLK_SRC, 0 },
+ { P_USB4_0_PHY_GCC_USB4RTR_MAX_PIPE_CLK, 1 },
+ { P_GCC_USB4_0_PHY_PIPEGMUX_CLK_SRC, 3 },
+};
+
+static const struct clk_parent_data gcc_parent_data_34[] = {
+ { .hw = &gcc_usb3_prim_phy_pipe_clk_src.clkr.hw },
+ { .index = DT_USB4_0_PHY_GCC_USB4RTR_MAX_PIPE_CLK },
+ { .index = DT_GCC_USB4_0_PHY_PIPEGMUX_CLK_SRC },
+};
+
+static struct clk_regmap_mux gcc_usb34_prim_phy_pipe_clk_src = {
+ .reg = 0x39070,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_34,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb34_prim_phy_pipe_clk_src",
+ .parent_data = gcc_parent_data_34,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_34),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_usb3_prim_phy_pipe_clk = {
.halt_reg = 0x39068,
.halt_check = BRANCH_HALT_SKIP,
@@ -5167,7 +5601,7 @@ static struct clk_branch gcc_usb3_prim_phy_pipe_clk = {
.hw.init = &(const struct clk_init_data) {
.name = "gcc_usb3_prim_phy_pipe_clk",
.parent_hws = (const struct clk_hw*[]) {
- &gcc_usb3_prim_phy_pipe_clk_src.clkr.hw,
+ &gcc_usb34_prim_phy_pipe_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -5227,6 +5661,33 @@ static struct clk_regmap_mux gcc_usb3_sec_phy_pipe_clk_src = {
},
};
+static const struct parent_map gcc_parent_map_35[] = {
+ { P_GCC_USB3_SEC_PHY_PIPE_CLK_SRC, 0 },
+ { P_USB4_1_PHY_GCC_USB4RTR_MAX_PIPE_CLK, 1 },
+ { P_GCC_USB4_1_PHY_PIPEGMUX_CLK_SRC, 3 },
+};
+
+static const struct clk_parent_data gcc_parent_data_35[] = {
+ { .hw = &gcc_usb3_sec_phy_pipe_clk_src.clkr.hw },
+ { .index = DT_USB4_1_PHY_GCC_USB4RTR_MAX_PIPE_CLK },
+ { .index = DT_GCC_USB4_1_PHY_PIPEGMUX_CLK_SRC },
+};
+
+static struct clk_regmap_mux gcc_usb34_sec_phy_pipe_clk_src = {
+ .reg = 0xa1070,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_35,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb34_sec_phy_pipe_clk_src",
+ .parent_data = gcc_parent_data_35,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_35),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_usb3_sec_phy_pipe_clk = {
.halt_reg = 0xa1068,
.halt_check = BRANCH_HALT_SKIP,
@@ -5238,7 +5699,7 @@ static struct clk_branch gcc_usb3_sec_phy_pipe_clk = {
.hw.init = &(const struct clk_init_data) {
.name = "gcc_usb3_sec_phy_pipe_clk",
.parent_hws = (const struct clk_hw*[]) {
- &gcc_usb3_sec_phy_pipe_clk_src.clkr.hw,
+ &gcc_usb34_sec_phy_pipe_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -5298,6 +5759,33 @@ static struct clk_regmap_mux gcc_usb3_tert_phy_pipe_clk_src = {
},
};
+static const struct parent_map gcc_parent_map_36[] = {
+ { P_GCC_USB3_TERT_PHY_PIPE_CLK_SRC, 0 },
+ { P_USB4_2_PHY_GCC_USB4RTR_MAX_PIPE_CLK, 1 },
+ { P_GCC_USB4_2_PHY_PIPEGMUX_CLK_SRC, 3 },
+};
+
+static const struct clk_parent_data gcc_parent_data_36[] = {
+ { .hw = &gcc_usb3_tert_phy_pipe_clk_src.clkr.hw },
+ { .index = DT_USB4_2_PHY_GCC_USB4RTR_MAX_PIPE_CLK },
+ { .index = DT_GCC_USB4_2_PHY_PIPEGMUX_CLK_SRC },
+};
+
+static struct clk_regmap_mux gcc_usb34_tert_phy_pipe_clk_src = {
+ .reg = 0xa2070,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_36,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb34_tert_phy_pipe_clk_src",
+ .parent_data = gcc_parent_data_36,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_36),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_usb3_tert_phy_pipe_clk = {
.halt_reg = 0xa2068,
.halt_check = BRANCH_HALT_SKIP,
@@ -5309,7 +5797,7 @@ static struct clk_branch gcc_usb3_tert_phy_pipe_clk = {
.hw.init = &(const struct clk_init_data) {
.name = "gcc_usb3_tert_phy_pipe_clk",
.parent_hws = (const struct clk_hw*[]) {
- &gcc_usb3_tert_phy_pipe_clk_src.clkr.hw,
+ &gcc_usb34_tert_phy_pipe_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -5335,12 +5823,17 @@ static struct clk_branch gcc_usb4_0_cfg_ahb_clk = {
static struct clk_branch gcc_usb4_0_dp0_clk = {
.halt_reg = 0x9f060,
- .halt_check = BRANCH_HALT,
+ .halt_check = BRANCH_HALT_SKIP,
.clkr = {
.enable_reg = 0x9f060,
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "gcc_usb4_0_dp0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_0_phy_dp0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -5348,12 +5841,17 @@ static struct clk_branch gcc_usb4_0_dp0_clk = {
static struct clk_branch gcc_usb4_0_dp1_clk = {
.halt_reg = 0x9f108,
- .halt_check = BRANCH_HALT,
+ .halt_check = BRANCH_HALT_SKIP,
.clkr = {
.enable_reg = 0x9f108,
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "gcc_usb4_0_dp1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_0_phy_dp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -5385,6 +5883,11 @@ static struct clk_branch gcc_usb4_0_phy_p2rr2p_pipe_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "gcc_usb4_0_phy_p2rr2p_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_0_phy_p2rr2p_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -5398,6 +5901,11 @@ static struct clk_branch gcc_usb4_0_phy_pcie_pipe_clk = {
.enable_mask = BIT(19),
.hw.init = &(const struct clk_init_data) {
.name = "gcc_usb4_0_phy_pcie_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_0_phy_pcie_pipe_mux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -5405,12 +5913,17 @@ static struct clk_branch gcc_usb4_0_phy_pcie_pipe_clk = {
static struct clk_branch gcc_usb4_0_phy_rx0_clk = {
.halt_reg = 0x9f0b0,
- .halt_check = BRANCH_HALT,
+ .halt_check = BRANCH_HALT_SKIP,
.clkr = {
.enable_reg = 0x9f0b0,
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "gcc_usb4_0_phy_rx0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_0_phy_rx0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -5418,12 +5931,17 @@ static struct clk_branch gcc_usb4_0_phy_rx0_clk = {
static struct clk_branch gcc_usb4_0_phy_rx1_clk = {
.halt_reg = 0x9f0c0,
- .halt_check = BRANCH_HALT,
+ .halt_check = BRANCH_HALT_SKIP,
.clkr = {
.enable_reg = 0x9f0c0,
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "gcc_usb4_0_phy_rx1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_0_phy_rx1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -5439,6 +5957,11 @@ static struct clk_branch gcc_usb4_0_phy_usb_pipe_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "gcc_usb4_0_phy_usb_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb34_prim_phy_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -5470,6 +5993,11 @@ static struct clk_branch gcc_usb4_0_sys_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "gcc_usb4_0_sys_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_0_phy_sys_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -5512,12 +6040,17 @@ static struct clk_branch gcc_usb4_1_cfg_ahb_clk = {
static struct clk_branch gcc_usb4_1_dp0_clk = {
.halt_reg = 0x2b060,
- .halt_check = BRANCH_HALT,
+ .halt_check = BRANCH_HALT_SKIP,
.clkr = {
.enable_reg = 0x2b060,
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "gcc_usb4_1_dp0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_1_phy_dp0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -5525,12 +6058,17 @@ static struct clk_branch gcc_usb4_1_dp0_clk = {
static struct clk_branch gcc_usb4_1_dp1_clk = {
.halt_reg = 0x2b108,
- .halt_check = BRANCH_HALT,
+ .halt_check = BRANCH_HALT_SKIP,
.clkr = {
.enable_reg = 0x2b108,
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "gcc_usb4_1_dp1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_1_phy_dp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -5562,6 +6100,11 @@ static struct clk_branch gcc_usb4_1_phy_p2rr2p_pipe_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "gcc_usb4_1_phy_p2rr2p_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_1_phy_p2rr2p_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -5575,6 +6118,11 @@ static struct clk_branch gcc_usb4_1_phy_pcie_pipe_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "gcc_usb4_1_phy_pcie_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_1_phy_pcie_pipe_mux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -5582,12 +6130,17 @@ static struct clk_branch gcc_usb4_1_phy_pcie_pipe_clk = {
static struct clk_branch gcc_usb4_1_phy_rx0_clk = {
.halt_reg = 0x2b0b0,
- .halt_check = BRANCH_HALT,
+ .halt_check = BRANCH_HALT_SKIP,
.clkr = {
.enable_reg = 0x2b0b0,
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "gcc_usb4_1_phy_rx0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_1_phy_rx0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -5595,12 +6148,17 @@ static struct clk_branch gcc_usb4_1_phy_rx0_clk = {
static struct clk_branch gcc_usb4_1_phy_rx1_clk = {
.halt_reg = 0x2b0c0,
- .halt_check = BRANCH_HALT,
+ .halt_check = BRANCH_HALT_SKIP,
.clkr = {
.enable_reg = 0x2b0c0,
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "gcc_usb4_1_phy_rx1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_1_phy_rx1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -5616,6 +6174,11 @@ static struct clk_branch gcc_usb4_1_phy_usb_pipe_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "gcc_usb4_1_phy_usb_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb34_sec_phy_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -5647,6 +6210,11 @@ static struct clk_branch gcc_usb4_1_sys_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "gcc_usb4_1_sys_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_1_phy_sys_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -5689,12 +6257,17 @@ static struct clk_branch gcc_usb4_2_cfg_ahb_clk = {
static struct clk_branch gcc_usb4_2_dp0_clk = {
.halt_reg = 0x11060,
- .halt_check = BRANCH_HALT,
+ .halt_check = BRANCH_HALT_SKIP,
.clkr = {
.enable_reg = 0x11060,
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "gcc_usb4_2_dp0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_2_phy_dp0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -5702,12 +6275,17 @@ static struct clk_branch gcc_usb4_2_dp0_clk = {
static struct clk_branch gcc_usb4_2_dp1_clk = {
.halt_reg = 0x11108,
- .halt_check = BRANCH_HALT,
+ .halt_check = BRANCH_HALT_SKIP,
.clkr = {
.enable_reg = 0x11108,
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "gcc_usb4_2_dp1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_2_phy_dp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -5739,6 +6317,11 @@ static struct clk_branch gcc_usb4_2_phy_p2rr2p_pipe_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "gcc_usb4_2_phy_p2rr2p_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_2_phy_p2rr2p_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -5752,6 +6335,11 @@ static struct clk_branch gcc_usb4_2_phy_pcie_pipe_clk = {
.enable_mask = BIT(1),
.hw.init = &(const struct clk_init_data) {
.name = "gcc_usb4_2_phy_pcie_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_2_phy_pcie_pipe_mux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -5759,12 +6347,17 @@ static struct clk_branch gcc_usb4_2_phy_pcie_pipe_clk = {
static struct clk_branch gcc_usb4_2_phy_rx0_clk = {
.halt_reg = 0x110b0,
- .halt_check = BRANCH_HALT,
+ .halt_check = BRANCH_HALT_SKIP,
.clkr = {
.enable_reg = 0x110b0,
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "gcc_usb4_2_phy_rx0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_2_phy_rx0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -5772,12 +6365,17 @@ static struct clk_branch gcc_usb4_2_phy_rx0_clk = {
static struct clk_branch gcc_usb4_2_phy_rx1_clk = {
.halt_reg = 0x110c0,
- .halt_check = BRANCH_HALT,
+ .halt_check = BRANCH_HALT_SKIP,
.clkr = {
.enable_reg = 0x110c0,
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "gcc_usb4_2_phy_rx1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_2_phy_rx1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -5793,6 +6391,11 @@ static struct clk_branch gcc_usb4_2_phy_usb_pipe_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "gcc_usb4_2_phy_usb_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb34_tert_phy_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -6483,6 +7086,9 @@ static struct clk_regmap *gcc_x1e80100_clocks[] = {
[GCC_USB30_TERT_MOCK_UTMI_CLK_SRC] = &gcc_usb30_tert_mock_utmi_clk_src.clkr,
[GCC_USB30_TERT_MOCK_UTMI_POSTDIV_CLK_SRC] = &gcc_usb30_tert_mock_utmi_postdiv_clk_src.clkr,
[GCC_USB30_TERT_SLEEP_CLK] = &gcc_usb30_tert_sleep_clk.clkr,
+ [GCC_USB34_PRIM_PHY_PIPE_CLK_SRC] = &gcc_usb34_prim_phy_pipe_clk_src.clkr,
+ [GCC_USB34_SEC_PHY_PIPE_CLK_SRC] = &gcc_usb34_sec_phy_pipe_clk_src.clkr,
+ [GCC_USB34_TERT_PHY_PIPE_CLK_SRC] = &gcc_usb34_tert_phy_pipe_clk_src.clkr,
[GCC_USB3_MP_PHY_AUX_CLK] = &gcc_usb3_mp_phy_aux_clk.clkr,
[GCC_USB3_MP_PHY_AUX_CLK_SRC] = &gcc_usb3_mp_phy_aux_clk_src.clkr,
[GCC_USB3_MP_PHY_COM_AUX_CLK] = &gcc_usb3_mp_phy_com_aux_clk.clkr,
@@ -6508,11 +7114,18 @@ static struct clk_regmap *gcc_x1e80100_clocks[] = {
[GCC_USB4_0_DP1_CLK] = &gcc_usb4_0_dp1_clk.clkr,
[GCC_USB4_0_MASTER_CLK] = &gcc_usb4_0_master_clk.clkr,
[GCC_USB4_0_MASTER_CLK_SRC] = &gcc_usb4_0_master_clk_src.clkr,
+ [GCC_USB4_0_PHY_DP0_CLK_SRC] = &gcc_usb4_0_phy_dp0_clk_src.clkr,
+ [GCC_USB4_0_PHY_DP1_CLK_SRC] = &gcc_usb4_0_phy_dp1_clk_src.clkr,
[GCC_USB4_0_PHY_P2RR2P_PIPE_CLK] = &gcc_usb4_0_phy_p2rr2p_pipe_clk.clkr,
+ [GCC_USB4_0_PHY_P2RR2P_PIPE_CLK_SRC] = &gcc_usb4_0_phy_p2rr2p_pipe_clk_src.clkr,
[GCC_USB4_0_PHY_PCIE_PIPE_CLK] = &gcc_usb4_0_phy_pcie_pipe_clk.clkr,
[GCC_USB4_0_PHY_PCIE_PIPE_CLK_SRC] = &gcc_usb4_0_phy_pcie_pipe_clk_src.clkr,
+ [GCC_USB4_0_PHY_PCIE_PIPE_MUX_CLK_SRC] = &gcc_usb4_0_phy_pcie_pipe_mux_clk_src.clkr,
[GCC_USB4_0_PHY_RX0_CLK] = &gcc_usb4_0_phy_rx0_clk.clkr,
+ [GCC_USB4_0_PHY_RX0_CLK_SRC] = &gcc_usb4_0_phy_rx0_clk_src.clkr,
[GCC_USB4_0_PHY_RX1_CLK] = &gcc_usb4_0_phy_rx1_clk.clkr,
+ [GCC_USB4_0_PHY_RX1_CLK_SRC] = &gcc_usb4_0_phy_rx1_clk_src.clkr,
+ [GCC_USB4_0_PHY_SYS_CLK_SRC] = &gcc_usb4_0_phy_sys_clk_src.clkr,
[GCC_USB4_0_PHY_USB_PIPE_CLK] = &gcc_usb4_0_phy_usb_pipe_clk.clkr,
[GCC_USB4_0_SB_IF_CLK] = &gcc_usb4_0_sb_if_clk.clkr,
[GCC_USB4_0_SB_IF_CLK_SRC] = &gcc_usb4_0_sb_if_clk_src.clkr,
@@ -6524,11 +7137,18 @@ static struct clk_regmap *gcc_x1e80100_clocks[] = {
[GCC_USB4_1_DP1_CLK] = &gcc_usb4_1_dp1_clk.clkr,
[GCC_USB4_1_MASTER_CLK] = &gcc_usb4_1_master_clk.clkr,
[GCC_USB4_1_MASTER_CLK_SRC] = &gcc_usb4_1_master_clk_src.clkr,
+ [GCC_USB4_1_PHY_DP0_CLK_SRC] = &gcc_usb4_1_phy_dp0_clk_src.clkr,
+ [GCC_USB4_1_PHY_DP1_CLK_SRC] = &gcc_usb4_1_phy_dp1_clk_src.clkr,
[GCC_USB4_1_PHY_P2RR2P_PIPE_CLK] = &gcc_usb4_1_phy_p2rr2p_pipe_clk.clkr,
+ [GCC_USB4_1_PHY_P2RR2P_PIPE_CLK_SRC] = &gcc_usb4_1_phy_p2rr2p_pipe_clk_src.clkr,
[GCC_USB4_1_PHY_PCIE_PIPE_CLK] = &gcc_usb4_1_phy_pcie_pipe_clk.clkr,
[GCC_USB4_1_PHY_PCIE_PIPE_CLK_SRC] = &gcc_usb4_1_phy_pcie_pipe_clk_src.clkr,
+ [GCC_USB4_1_PHY_PCIE_PIPE_MUX_CLK_SRC] = &gcc_usb4_1_phy_pcie_pipe_mux_clk_src.clkr,
[GCC_USB4_1_PHY_RX0_CLK] = &gcc_usb4_1_phy_rx0_clk.clkr,
+ [GCC_USB4_1_PHY_RX0_CLK_SRC] = &gcc_usb4_1_phy_rx0_clk_src.clkr,
[GCC_USB4_1_PHY_RX1_CLK] = &gcc_usb4_1_phy_rx1_clk.clkr,
+ [GCC_USB4_1_PHY_RX1_CLK_SRC] = &gcc_usb4_1_phy_rx1_clk_src.clkr,
+ [GCC_USB4_1_PHY_SYS_CLK_SRC] = &gcc_usb4_1_phy_sys_clk_src.clkr,
[GCC_USB4_1_PHY_USB_PIPE_CLK] = &gcc_usb4_1_phy_usb_pipe_clk.clkr,
[GCC_USB4_1_SB_IF_CLK] = &gcc_usb4_1_sb_if_clk.clkr,
[GCC_USB4_1_SB_IF_CLK_SRC] = &gcc_usb4_1_sb_if_clk_src.clkr,
@@ -6540,11 +7160,18 @@ static struct clk_regmap *gcc_x1e80100_clocks[] = {
[GCC_USB4_2_DP1_CLK] = &gcc_usb4_2_dp1_clk.clkr,
[GCC_USB4_2_MASTER_CLK] = &gcc_usb4_2_master_clk.clkr,
[GCC_USB4_2_MASTER_CLK_SRC] = &gcc_usb4_2_master_clk_src.clkr,
+ [GCC_USB4_2_PHY_DP0_CLK_SRC] = &gcc_usb4_2_phy_dp0_clk_src.clkr,
+ [GCC_USB4_2_PHY_DP1_CLK_SRC] = &gcc_usb4_2_phy_dp1_clk_src.clkr,
[GCC_USB4_2_PHY_P2RR2P_PIPE_CLK] = &gcc_usb4_2_phy_p2rr2p_pipe_clk.clkr,
+ [GCC_USB4_2_PHY_P2RR2P_PIPE_CLK_SRC] = &gcc_usb4_2_phy_p2rr2p_pipe_clk_src.clkr,
[GCC_USB4_2_PHY_PCIE_PIPE_CLK] = &gcc_usb4_2_phy_pcie_pipe_clk.clkr,
[GCC_USB4_2_PHY_PCIE_PIPE_CLK_SRC] = &gcc_usb4_2_phy_pcie_pipe_clk_src.clkr,
+ [GCC_USB4_2_PHY_PCIE_PIPE_MUX_CLK_SRC] = &gcc_usb4_2_phy_pcie_pipe_mux_clk_src.clkr,
[GCC_USB4_2_PHY_RX0_CLK] = &gcc_usb4_2_phy_rx0_clk.clkr,
+ [GCC_USB4_2_PHY_RX0_CLK_SRC] = &gcc_usb4_2_phy_rx0_clk_src.clkr,
[GCC_USB4_2_PHY_RX1_CLK] = &gcc_usb4_2_phy_rx1_clk.clkr,
+ [GCC_USB4_2_PHY_RX1_CLK_SRC] = &gcc_usb4_2_phy_rx1_clk_src.clkr,
+ [GCC_USB4_2_PHY_SYS_CLK_SRC] = &gcc_usb4_2_phy_sys_clk_src.clkr,
[GCC_USB4_2_PHY_USB_PIPE_CLK] = &gcc_usb4_2_phy_usb_pipe_clk.clkr,
[GCC_USB4_2_SB_IF_CLK] = &gcc_usb4_2_sb_if_clk.clkr,
[GCC_USB4_2_SB_IF_CLK_SRC] = &gcc_usb4_2_sb_if_clk_src.clkr,
@@ -6660,20 +7287,58 @@ static const struct qcom_reset_map gcc_x1e80100_resets[] = {
[GCC_USB3_UNIPHY_MP0_BCR] = { 0x19000 },
[GCC_USB3_UNIPHY_MP1_BCR] = { 0x54000 },
[GCC_USB3PHY_PHY_PRIM_BCR] = { 0x50004 },
+ [GCC_USB4PHY_PHY_PRIM_BCR] = { 0x5000c },
[GCC_USB3PHY_PHY_SEC_BCR] = { 0x2a004 },
+ [GCC_USB4PHY_PHY_SEC_BCR] = { 0x2a00c },
[GCC_USB3PHY_PHY_TERT_BCR] = { 0xa3004 },
+ [GCC_USB4PHY_PHY_TERT_BCR] = { 0xa300c },
[GCC_USB3UNIPHY_PHY_MP0_BCR] = { 0x19004 },
[GCC_USB3UNIPHY_PHY_MP1_BCR] = { 0x54004 },
[GCC_USB4_0_BCR] = { 0x9f000 },
[GCC_USB4_0_DP0_PHY_PRIM_BCR] = { 0x50010 },
- [GCC_USB4_1_DP0_PHY_SEC_BCR] = { 0x2a010 },
- [GCC_USB4_2_DP0_PHY_TERT_BCR] = { 0xa3010 },
+ [GCC_USB4_0_MISC_USB4_SYS_BCR] = { .reg = 0xad0f8, .bit = 0 },
+ [GCC_USB4_0_MISC_RX_CLK_0_BCR] = { .reg = 0xad0f8, .bit = 1 },
+ [GCC_USB4_0_MISC_RX_CLK_1_BCR] = { .reg = 0xad0f8, .bit = 2 },
+ [GCC_USB4_0_MISC_USB_PIPE_BCR] = { .reg = 0xad0f8, .bit = 3 },
+ [GCC_USB4_0_MISC_PCIE_PIPE_BCR] = { .reg = 0xad0f8, .bit = 4 },
+ [GCC_USB4_0_MISC_TMU_BCR] = { .reg = 0xad0f8, .bit = 5 },
+ [GCC_USB4_0_MISC_SB_IF_BCR] = { .reg = 0xad0f8, .bit = 6 },
+ [GCC_USB4_0_MISC_HIA_MSTR_BCR] = { .reg = 0xad0f8, .bit = 7 },
+ [GCC_USB4_0_MISC_AHB_BCR] = { .reg = 0xad0f8, .bit = 8 },
+ [GCC_USB4_0_MISC_DP0_MAX_PCLK_BCR] = { .reg = 0xad0f8, .bit = 9 },
+ [GCC_USB4_0_MISC_DP1_MAX_PCLK_BCR] = { .reg = 0xad0f8, .bit = 10 },
[GCC_USB4_1_BCR] = { 0x2b000 },
+ [GCC_USB4_1_DP0_PHY_SEC_BCR] = { 0x2a010 },
+ [GCC_USB4_1_MISC_USB4_SYS_BCR] = { .reg = 0xae0f8, .bit = 0 },
+ [GCC_USB4_1_MISC_RX_CLK_0_BCR] = { .reg = 0xae0f8, .bit = 1 },
+ [GCC_USB4_1_MISC_RX_CLK_1_BCR] = { .reg = 0xae0f8, .bit = 2 },
+ [GCC_USB4_1_MISC_USB_PIPE_BCR] = { .reg = 0xae0f8, .bit = 3 },
+ [GCC_USB4_1_MISC_PCIE_PIPE_BCR] = { .reg = 0xae0f8, .bit = 4 },
+ [GCC_USB4_1_MISC_TMU_BCR] = { .reg = 0xae0f8, .bit = 5 },
+ [GCC_USB4_1_MISC_SB_IF_BCR] = { .reg = 0xae0f8, .bit = 6 },
+ [GCC_USB4_1_MISC_HIA_MSTR_BCR] = { .reg = 0xae0f8, .bit = 7 },
+ [GCC_USB4_1_MISC_AHB_BCR] = { .reg = 0xae0f8, .bit = 8 },
+ [GCC_USB4_1_MISC_DP0_MAX_PCLK_BCR] = { .reg = 0xae0f8, .bit = 9 },
+ [GCC_USB4_1_MISC_DP1_MAX_PCLK_BCR] = { .reg = 0xae0f8, .bit = 10 },
[GCC_USB4_2_BCR] = { 0x11000 },
+ [GCC_USB4_2_DP0_PHY_TERT_BCR] = { 0xa3010 },
+ [GCC_USB4_2_MISC_USB4_SYS_BCR] = { .reg = 0xaf0f8, .bit = 0 },
+ [GCC_USB4_2_MISC_RX_CLK_0_BCR] = { .reg = 0xaf0f8, .bit = 1 },
+ [GCC_USB4_2_MISC_RX_CLK_1_BCR] = { .reg = 0xaf0f8, .bit = 2 },
+ [GCC_USB4_2_MISC_USB_PIPE_BCR] = { .reg = 0xaf0f8, .bit = 3 },
+ [GCC_USB4_2_MISC_PCIE_PIPE_BCR] = { .reg = 0xaf0f8, .bit = 4 },
+ [GCC_USB4_2_MISC_TMU_BCR] = { .reg = 0xaf0f8, .bit = 5 },
+ [GCC_USB4_2_MISC_SB_IF_BCR] = { .reg = 0xaf0f8, .bit = 6 },
+ [GCC_USB4_2_MISC_HIA_MSTR_BCR] = { .reg = 0xaf0f8, .bit = 7 },
+ [GCC_USB4_2_MISC_AHB_BCR] = { .reg = 0xaf0f8, .bit = 8 },
+ [GCC_USB4_2_MISC_DP0_MAX_PCLK_BCR] = { .reg = 0xaf0f8, .bit = 9 },
+ [GCC_USB4_2_MISC_DP1_MAX_PCLK_BCR] = { .reg = 0xaf0f8, .bit = 10 },
[GCC_USB_0_PHY_BCR] = { 0x50020 },
[GCC_USB_1_PHY_BCR] = { 0x2a020 },
[GCC_USB_2_PHY_BCR] = { 0xa3020 },
[GCC_VIDEO_BCR] = { 0x32000 },
+ [GCC_VIDEO_AXI0_CLK_ARES] = { .reg = 0x32018, .bit = 2, .udelay = 1000 },
+ [GCC_VIDEO_AXI1_CLK_ARES] = { .reg = 0x32024, .bit = 2, .udelay = 1000 },
};
static const struct clk_rcg_dfs_data gcc_dfs_clocks[] = {
diff --git a/drivers/clk/qcom/gpucc-milos.c b/drivers/clk/qcom/gpucc-milos.c
new file mode 100644
index 000000000000..4ee09879156e
--- /dev/null
+++ b/drivers/clk/qcom/gpucc-milos.c
@@ -0,0 +1,562 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2025, Luca Weiss <luca.weiss@fairphone.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,milos-gpucc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+/* Need to match the order of clocks in DT binding */
+enum {
+ DT_BI_TCXO,
+ DT_GPLL0_OUT_MAIN,
+ DT_GPLL0_OUT_MAIN_DIV,
+};
+
+enum {
+ P_BI_TCXO,
+ P_GPLL0_OUT_MAIN,
+ P_GPLL0_OUT_MAIN_DIV,
+ P_GPU_CC_PLL0_OUT_EVEN,
+ P_GPU_CC_PLL0_OUT_MAIN,
+ P_GPU_CC_PLL0_OUT_ODD,
+};
+
+static const struct pll_vco lucid_ole_vco[] = {
+ { 249600000, 2300000000, 0 },
+};
+
+/* 700.0 MHz Configuration */
+static const struct alpha_pll_config gpu_cc_pll0_config = {
+ .l = 0x24,
+ .alpha = 0x7555,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00000005,
+};
+
+static struct clk_alpha_pll gpu_cc_pll0 = {
+ .offset = 0x0,
+ .config = &gpu_cc_pll0_config,
+ .vco_table = lucid_ole_vco,
+ .num_vco = ARRAY_SIZE(lucid_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gpu_cc_pll0_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpu_cc_pll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_gpu_cc_pll0_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_gpu_cc_pll0_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_pll0_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_pll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+ },
+};
+
+static const struct parent_map gpu_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_MAIN_DIV, 6 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_GPLL0_OUT_MAIN },
+ { .index = DT_GPLL0_OUT_MAIN_DIV },
+};
+
+static const struct parent_map gpu_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPU_CC_PLL0_OUT_MAIN, 1 },
+ { P_GPU_CC_PLL0_OUT_EVEN, 2 },
+ { P_GPU_CC_PLL0_OUT_ODD, 3 },
+ { P_GPLL0_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_MAIN_DIV, 6 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpu_cc_pll0.clkr.hw },
+ { .hw = &gpu_cc_pll0_out_even.clkr.hw },
+ { .hw = &gpu_cc_pll0.clkr.hw },
+ { .index = DT_GPLL0_OUT_MAIN },
+ { .index = DT_GPLL0_OUT_MAIN_DIV },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_ff_clk_src[] = {
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_ff_clk_src = {
+ .cmd_rcgr = 0x9474,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_0,
+ .freq_tbl = ftbl_gpu_cc_ff_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_ff_clk_src",
+ .parent_data = gpu_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_gmu_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(350000000, P_GPU_CC_PLL0_OUT_EVEN, 1, 0, 0),
+ F(650000000, P_GPU_CC_PLL0_OUT_EVEN, 1, 0, 0),
+ F(687500000, P_GPU_CC_PLL0_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_gmu_clk_src = {
+ .cmd_rcgr = 0x9318,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_1,
+ .freq_tbl = ftbl_gpu_cc_gmu_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_gmu_clk_src",
+ .parent_data = gpu_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_hub_clk_src[] = {
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+ F(400000000, P_GPLL0_OUT_MAIN, 1.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_hub_clk_src = {
+ .cmd_rcgr = 0x93ec,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_1,
+ .freq_tbl = ftbl_gpu_cc_hub_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_hub_clk_src",
+ .parent_data = gpu_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_regmap_div gpu_cc_hub_div_clk_src = {
+ .reg = 0x942c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_hub_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch gpu_cc_ahb_clk = {
+ .halt_reg = 0x90bc,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x90bc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_hub_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_accu_shift_clk = {
+ .halt_reg = 0x910c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x910c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_cx_accu_shift_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_ff_clk = {
+ .halt_reg = 0x90ec,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x90ec,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_cx_ff_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_ff_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_gmu_clk = {
+ .halt_reg = 0x90d4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x90d4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_cx_gmu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_gmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cxo_clk = {
+ .halt_reg = 0x90e4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x90e4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_cxo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_dpm_clk = {
+ .halt_reg = 0x9110,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9110,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_dpm_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_freq_measure_clk = {
+ .halt_reg = 0x900c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x900c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_freq_measure_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_accu_shift_clk = {
+ .halt_reg = 0x9070,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x9070,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_gx_accu_shift_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_acd_ahb_ff_clk = {
+ .halt_reg = 0x9068,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_gx_acd_ahb_ff_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_ff_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_gmu_clk = {
+ .halt_reg = 0x9060,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9060,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_gx_gmu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_gmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_rcg_ahb_ff_clk = {
+ .halt_reg = 0x906c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x906c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_gx_rcg_ahb_ff_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_ff_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_hlos1_vote_gpu_smmu_clk = {
+ .halt_reg = 0x7000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_hlos1_vote_gpu_smmu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_hub_aon_clk = {
+ .halt_reg = 0x93e8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x93e8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_hub_aon_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_hub_cx_int_clk = {
+ .halt_reg = 0x90e8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x90e8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_hub_cx_int_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_memnoc_gfx_clk = {
+ .halt_reg = 0x90f4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x90f4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_memnoc_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc gpu_cc_cx_gdsc = {
+ .gdscr = 0x9080,
+ .gds_hw_ctrl = 0x9094,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x8,
+ .pd = {
+ .name = "gpu_cc_cx_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct clk_regmap *gpu_cc_milos_clocks[] = {
+ [GPU_CC_AHB_CLK] = &gpu_cc_ahb_clk.clkr,
+ [GPU_CC_CX_ACCU_SHIFT_CLK] = &gpu_cc_cx_accu_shift_clk.clkr,
+ [GPU_CC_CX_FF_CLK] = &gpu_cc_cx_ff_clk.clkr,
+ [GPU_CC_CX_GMU_CLK] = &gpu_cc_cx_gmu_clk.clkr,
+ [GPU_CC_CXO_CLK] = &gpu_cc_cxo_clk.clkr,
+ [GPU_CC_DPM_CLK] = &gpu_cc_dpm_clk.clkr,
+ [GPU_CC_FF_CLK_SRC] = &gpu_cc_ff_clk_src.clkr,
+ [GPU_CC_FREQ_MEASURE_CLK] = &gpu_cc_freq_measure_clk.clkr,
+ [GPU_CC_GMU_CLK_SRC] = &gpu_cc_gmu_clk_src.clkr,
+ [GPU_CC_GX_ACCU_SHIFT_CLK] = &gpu_cc_gx_accu_shift_clk.clkr,
+ [GPU_CC_GX_ACD_AHB_FF_CLK] = &gpu_cc_gx_acd_ahb_ff_clk.clkr,
+ [GPU_CC_GX_GMU_CLK] = &gpu_cc_gx_gmu_clk.clkr,
+ [GPU_CC_GX_RCG_AHB_FF_CLK] = &gpu_cc_gx_rcg_ahb_ff_clk.clkr,
+ [GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK] = &gpu_cc_hlos1_vote_gpu_smmu_clk.clkr,
+ [GPU_CC_HUB_AON_CLK] = &gpu_cc_hub_aon_clk.clkr,
+ [GPU_CC_HUB_CLK_SRC] = &gpu_cc_hub_clk_src.clkr,
+ [GPU_CC_HUB_CX_INT_CLK] = &gpu_cc_hub_cx_int_clk.clkr,
+ [GPU_CC_HUB_DIV_CLK_SRC] = &gpu_cc_hub_div_clk_src.clkr,
+ [GPU_CC_MEMNOC_GFX_CLK] = &gpu_cc_memnoc_gfx_clk.clkr,
+ [GPU_CC_PLL0] = &gpu_cc_pll0.clkr,
+ [GPU_CC_PLL0_OUT_EVEN] = &gpu_cc_pll0_out_even.clkr,
+};
+
+static struct gdsc *gpu_cc_milos_gdscs[] = {
+ [GPU_CC_CX_GDSC] = &gpu_cc_cx_gdsc,
+};
+
+static const struct qcom_reset_map gpu_cc_milos_resets[] = {
+ [GPU_CC_CB_BCR] = { 0x93a0 },
+ [GPU_CC_CX_BCR] = { 0x907c },
+ [GPU_CC_FAST_HUB_BCR] = { 0x93e4 },
+ [GPU_CC_FF_BCR] = { 0x9470 },
+ [GPU_CC_GMU_BCR] = { 0x9314 },
+ [GPU_CC_GX_BCR] = { 0x905c },
+ [GPU_CC_RBCPR_BCR] = { 0x91e0 },
+ [GPU_CC_XO_BCR] = { 0x9000 },
+};
+
+static struct clk_alpha_pll *gpu_cc_milos_plls[] = {
+ &gpu_cc_pll0,
+};
+
+static u32 gpu_cc_milos_critical_cbcrs[] = {
+ 0x93a4, /* GPU_CC_CB_CLK */
+ 0x9008, /* GPU_CC_CXO_AON_CLK */
+ 0x9010, /* GPU_CC_DEMET_CLK */
+ 0x9064, /* GPU_CC_GX_AHB_FF_CLK */
+ 0x93a8, /* GPU_CC_RSCC_HUB_AON_CLK */
+ 0x9004, /* GPU_CC_RSCC_XO_AON_CLK */
+ 0x90cc, /* GPU_CC_SLEEP_CLK */
+};
+
+static const struct regmap_config gpu_cc_milos_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x95e8,
+ .fast_io = true,
+};
+
+static struct qcom_cc_driver_data gpu_cc_milos_driver_data = {
+ .alpha_plls = gpu_cc_milos_plls,
+ .num_alpha_plls = ARRAY_SIZE(gpu_cc_milos_plls),
+ .clk_cbcrs = gpu_cc_milos_critical_cbcrs,
+ .num_clk_cbcrs = ARRAY_SIZE(gpu_cc_milos_critical_cbcrs),
+};
+
+static const struct qcom_cc_desc gpu_cc_milos_desc = {
+ .config = &gpu_cc_milos_regmap_config,
+ .clks = gpu_cc_milos_clocks,
+ .num_clks = ARRAY_SIZE(gpu_cc_milos_clocks),
+ .resets = gpu_cc_milos_resets,
+ .num_resets = ARRAY_SIZE(gpu_cc_milos_resets),
+ .gdscs = gpu_cc_milos_gdscs,
+ .num_gdscs = ARRAY_SIZE(gpu_cc_milos_gdscs),
+ .use_rpm = true,
+ .driver_data = &gpu_cc_milos_driver_data,
+};
+
+static const struct of_device_id gpu_cc_milos_match_table[] = {
+ { .compatible = "qcom,milos-gpucc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gpu_cc_milos_match_table);
+
+static int gpu_cc_milos_probe(struct platform_device *pdev)
+{
+ return qcom_cc_probe(pdev, &gpu_cc_milos_desc);
+}
+
+static struct platform_driver gpu_cc_milos_driver = {
+ .probe = gpu_cc_milos_probe,
+ .driver = {
+ .name = "gpu_cc-milos",
+ .of_match_table = gpu_cc_milos_match_table,
+ },
+};
+
+module_platform_driver(gpu_cc_milos_driver);
+
+MODULE_DESCRIPTION("QTI GPU_CC Milos Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/gpucc-qcs615.c b/drivers/clk/qcom/gpucc-qcs615.c
new file mode 100644
index 000000000000..ec6739c08425
--- /dev/null
+++ b/drivers/clk/qcom/gpucc-qcs615.c
@@ -0,0 +1,531 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,qcs615-gpucc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_BI_TCXO,
+ DT_GPLL0_OUT_MAIN,
+ DT_GPLL0_OUT_MAIN_DIV,
+};
+
+enum {
+ P_BI_TCXO,
+ P_GPLL0_OUT_MAIN,
+ P_GPLL0_OUT_MAIN_DIV,
+ P_GPU_CC_PLL0_2X_CLK,
+ P_CRC_DIV_PLL0_OUT_AUX2,
+ P_GPU_CC_PLL0_OUT_MAIN,
+ P_GPU_CC_PLL1_OUT_AUX,
+ P_CRC_DIV_PLL1_OUT_AUX2,
+ P_GPU_CC_PLL1_OUT_MAIN,
+};
+
+static const struct pll_vco gpu_cc_pll0_vco[] = {
+ { 1000000000, 2100000000, 0 },
+};
+
+static struct pll_vco gpu_cc_pll1_vco[] = {
+ { 500000000, 1000000000, 2 },
+};
+
+/* 1020MHz configuration VCO - 0 */
+static struct alpha_pll_config gpu_cc_pll0_config = {
+ .l = 0x35,
+ .config_ctl_val = 0x4001055b,
+ .test_ctl_hi_val = 0x1,
+ .test_ctl_hi_mask = 0x1,
+ .alpha_hi = 0x20,
+ .alpha = 0x00,
+ .alpha_en_mask = BIT(24),
+ .vco_val = 0x0,
+ .vco_mask = GENMASK(21, 20),
+ .aux2_output_mask = BIT(2),
+};
+
+static struct clk_alpha_pll gpu_cc_pll0 = {
+ .offset = 0x0,
+ .config = &gpu_cc_pll0_config,
+ .vco_table = gpu_cc_pll0_vco,
+ .num_vco = ARRAY_SIZE(gpu_cc_pll0_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_slew_ops,
+ },
+ },
+};
+
+/* 930MHz configuration VCO - 2 */
+static struct alpha_pll_config gpu_cc_pll1_config = {
+ .l = 0x30,
+ .config_ctl_val = 0x4001055b,
+ .test_ctl_hi_val = 0x1,
+ .test_ctl_hi_mask = 0x1,
+ .alpha_hi = 0x70,
+ .alpha = 0x00,
+ .alpha_en_mask = BIT(24),
+ .vco_val = BIT(21),
+ .vco_mask = GENMASK(21, 20),
+ .aux2_output_mask = BIT(2),
+};
+
+static struct clk_alpha_pll gpu_cc_pll1 = {
+ .offset = 0x100,
+ .config = &gpu_cc_pll1_config,
+ .vco_table = gpu_cc_pll1_vco,
+ .num_vco = ARRAY_SIZE(gpu_cc_pll1_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_pll1",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_slew_ops,
+ },
+ }
+};
+
+/* Clock Ramp Controller */
+static struct clk_fixed_factor crc_div_pll0 = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "crc_div_pll0",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gpu_cc_pll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+/* Clock Ramp Controller */
+static struct clk_fixed_factor crc_div_pll1 = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "crc_div_pll1",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gpu_cc_pll1.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static const struct parent_map gpu_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPU_CC_PLL0_OUT_MAIN, 1 },
+ { P_GPU_CC_PLL1_OUT_MAIN, 3 },
+ { P_GPLL0_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_MAIN_DIV, 6 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpu_cc_pll0.clkr.hw },
+ { .hw = &gpu_cc_pll1.clkr.hw },
+ { .index = DT_GPLL0_OUT_MAIN },
+ { .index = DT_GPLL0_OUT_MAIN_DIV },
+};
+
+static const struct parent_map gpu_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPU_CC_PLL0_2X_CLK, 1 },
+ { P_CRC_DIV_PLL0_OUT_AUX2, 2 },
+ { P_GPU_CC_PLL1_OUT_AUX, 3 },
+ { P_CRC_DIV_PLL1_OUT_AUX2, 4 },
+ { P_GPLL0_OUT_MAIN, 5 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpu_cc_pll0.clkr.hw },
+ { .hw = &crc_div_pll0.hw },
+ { .hw = &gpu_cc_pll1.clkr.hw },
+ { .hw = &crc_div_pll1.hw },
+ { .index = DT_GPLL0_OUT_MAIN },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_gmu_clk_src[] = {
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_gmu_clk_src = {
+ .cmd_rcgr = 0x1120,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_0,
+ .freq_tbl = ftbl_gpu_cc_gmu_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_gmu_clk_src",
+ .parent_data = gpu_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_gx_gfx3d_clk_src[] = {
+ F(290000000, P_CRC_DIV_PLL1_OUT_AUX2, 1, 0, 0),
+ F(350000000, P_CRC_DIV_PLL1_OUT_AUX2, 1, 0, 0),
+ F(435000000, P_CRC_DIV_PLL1_OUT_AUX2, 1, 0, 0),
+ F(500000000, P_CRC_DIV_PLL0_OUT_AUX2, 1, 0, 0),
+ F(550000000, P_CRC_DIV_PLL0_OUT_AUX2, 1, 0, 0),
+ F(650000000, P_CRC_DIV_PLL0_OUT_AUX2, 1, 0, 0),
+ F(700000000, P_CRC_DIV_PLL0_OUT_AUX2, 1, 0, 0),
+ F(745000000, P_CRC_DIV_PLL0_OUT_AUX2, 1, 0, 0),
+ F(845000000, P_CRC_DIV_PLL0_OUT_AUX2, 1, 0, 0),
+ F(895000000, P_CRC_DIV_PLL0_OUT_AUX2, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_gx_gfx3d_clk_src = {
+ .cmd_rcgr = 0x101c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_1,
+ .freq_tbl = ftbl_gpu_cc_gx_gfx3d_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_gx_gfx3d_clk_src",
+ .parent_data = gpu_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_branch gpu_cc_crc_ahb_clk = {
+ .halt_reg = 0x107c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x107c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_crc_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_gfx3d_clk = {
+ .halt_reg = 0x10a4,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x10a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_cx_gfx3d_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_gx_gfx3d_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_gfx3d_slv_clk = {
+ .halt_reg = 0x10a8,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x10a8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_cx_gfx3d_slv_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_gx_gfx3d_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_gmu_clk = {
+ .halt_reg = 0x1098,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1098,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_cx_gmu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_gmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_snoc_dvm_clk = {
+ .halt_reg = 0x108c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x108c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_cx_snoc_dvm_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cxo_aon_clk = {
+ .halt_reg = 0x1004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x1004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cxo_aon_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cxo_clk = {
+ .halt_reg = 0x109c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x109c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_cxo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_gfx3d_clk = {
+ .halt_reg = 0x1054,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x1054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_gx_gfx3d_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_gx_gfx3d_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_gmu_clk = {
+ .halt_reg = 0x1064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_gx_gmu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_gmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_hlos1_vote_gpu_smmu_clk = {
+ .halt_reg = 0x5000,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x5000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_hlos1_vote_gpu_smmu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_sleep_clk = {
+ .halt_reg = 0x1090,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x1090,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_hw *gpu_cc_qcs615_hws[] = {
+ [CRC_DIV_PLL0] = &crc_div_pll0.hw,
+ [CRC_DIV_PLL1] = &crc_div_pll1.hw,
+};
+
+static struct gdsc cx_gdsc = {
+ .gdscr = 0x106c,
+ .gds_hw_ctrl = 0x1540,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x8,
+ .pd = {
+ .name = "cx_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct gdsc gx_gdsc = {
+ .gdscr = 0x100c,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "gx_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct clk_regmap *gpu_cc_qcs615_clocks[] = {
+ [GPU_CC_CRC_AHB_CLK] = &gpu_cc_crc_ahb_clk.clkr,
+ [GPU_CC_CX_GFX3D_CLK] = &gpu_cc_cx_gfx3d_clk.clkr,
+ [GPU_CC_CX_GFX3D_SLV_CLK] = &gpu_cc_cx_gfx3d_slv_clk.clkr,
+ [GPU_CC_CX_GMU_CLK] = &gpu_cc_cx_gmu_clk.clkr,
+ [GPU_CC_CX_SNOC_DVM_CLK] = &gpu_cc_cx_snoc_dvm_clk.clkr,
+ [GPU_CC_CXO_AON_CLK] = &gpu_cc_cxo_aon_clk.clkr,
+ [GPU_CC_CXO_CLK] = &gpu_cc_cxo_clk.clkr,
+ [GPU_CC_GMU_CLK_SRC] = &gpu_cc_gmu_clk_src.clkr,
+ [GPU_CC_GX_GFX3D_CLK] = &gpu_cc_gx_gfx3d_clk.clkr,
+ [GPU_CC_GX_GFX3D_CLK_SRC] = &gpu_cc_gx_gfx3d_clk_src.clkr,
+ [GPU_CC_GX_GMU_CLK] = &gpu_cc_gx_gmu_clk.clkr,
+ [GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK] = &gpu_cc_hlos1_vote_gpu_smmu_clk.clkr,
+ [GPU_CC_PLL0] = &gpu_cc_pll0.clkr,
+ [GPU_CC_PLL1] = &gpu_cc_pll1.clkr,
+ [GPU_CC_SLEEP_CLK] = &gpu_cc_sleep_clk.clkr,
+};
+
+static struct gdsc *gpu_cc_qcs615_gdscs[] = {
+ [CX_GDSC] = &cx_gdsc,
+ [GX_GDSC] = &gx_gdsc,
+};
+
+static const struct qcom_reset_map gpu_cc_qcs615_resets[] = {
+ [GPU_CC_CX_BCR] = { 0x1068 },
+ [GPU_CC_GFX3D_AON_BCR] = { 0x10a0 },
+ [GPU_CC_GMU_BCR] = { 0x111c },
+ [GPU_CC_GX_BCR] = { 0x1008 },
+ [GPU_CC_XO_BCR] = { 0x1000 },
+};
+
+static struct clk_alpha_pll *gpu_cc_qcs615_plls[] = {
+ &gpu_cc_pll0,
+ &gpu_cc_pll1,
+};
+
+static u32 gpu_cc_qcs615_critical_cbcrs[] = {
+ 0x1078, /* GPU_CC_AHB_CLK */
+};
+
+static const struct regmap_config gpu_cc_qcs615_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x7008,
+ .fast_io = true,
+};
+
+static void clk_qcs615_regs_crc_configure(struct device *dev, struct regmap *regmap)
+{
+ /* Recommended WAKEUP/SLEEP settings for the gpu_cc_cx_gmu_clk */
+ regmap_update_bits(regmap, gpu_cc_cx_gmu_clk.clkr.enable_reg, 0xff0, 0xff0);
+
+ /*
+ * After POR, Clock Ramp Controller(CRC) will be in bypass mode.
+ * Software needs to do the following operation to enable the CRC
+ * for GFX3D clock and divide the input clock by div by 2.
+ */
+ regmap_update_bits(regmap, 0x1028, 0x00015011, 0x00015011);
+ regmap_update_bits(regmap, 0x1024, 0x00800000, 0x00800000);
+}
+
+static struct qcom_cc_driver_data gpu_cc_qcs615_driver_data = {
+ .alpha_plls = gpu_cc_qcs615_plls,
+ .num_alpha_plls = ARRAY_SIZE(gpu_cc_qcs615_plls),
+ .clk_cbcrs = gpu_cc_qcs615_critical_cbcrs,
+ .num_clk_cbcrs = ARRAY_SIZE(gpu_cc_qcs615_critical_cbcrs),
+ .clk_regs_configure = clk_qcs615_regs_crc_configure,
+};
+
+static const struct qcom_cc_desc gpu_cc_qcs615_desc = {
+ .config = &gpu_cc_qcs615_regmap_config,
+ .clks = gpu_cc_qcs615_clocks,
+ .num_clks = ARRAY_SIZE(gpu_cc_qcs615_clocks),
+ .clk_hws = gpu_cc_qcs615_hws,
+ .num_clk_hws = ARRAY_SIZE(gpu_cc_qcs615_hws),
+ .resets = gpu_cc_qcs615_resets,
+ .num_resets = ARRAY_SIZE(gpu_cc_qcs615_resets),
+ .gdscs = gpu_cc_qcs615_gdscs,
+ .num_gdscs = ARRAY_SIZE(gpu_cc_qcs615_gdscs),
+ .driver_data = &gpu_cc_qcs615_driver_data,
+};
+
+static const struct of_device_id gpu_cc_qcs615_match_table[] = {
+ { .compatible = "qcom,qcs615-gpucc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gpu_cc_qcs615_match_table);
+
+static int gpu_cc_qcs615_probe(struct platform_device *pdev)
+{
+ return qcom_cc_probe(pdev, &gpu_cc_qcs615_desc);
+}
+
+static struct platform_driver gpu_cc_qcs615_driver = {
+ .probe = gpu_cc_qcs615_probe,
+ .driver = {
+ .name = "gpucc-qcs615",
+ .of_match_table = gpu_cc_qcs615_match_table,
+ },
+};
+
+module_platform_driver(gpu_cc_qcs615_driver);
+
+MODULE_DESCRIPTION("QTI GPUCC QCS615 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/gpucc-sa8775p.c b/drivers/clk/qcom/gpucc-sa8775p.c
index 78cad622cb5a..25dcc5912f99 100644
--- a/drivers/clk/qcom/gpucc-sa8775p.c
+++ b/drivers/clk/qcom/gpucc-sa8775p.c
@@ -365,7 +365,7 @@ static struct clk_branch gpu_cc_cx_gmu_clk = {
&gpu_cc_gmu_clk_src.clkr.hw,
},
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_aon_ops,
},
},
@@ -414,7 +414,7 @@ static struct clk_branch gpu_cc_cxo_clk = {
&gpu_cc_xo_clk_src.clkr.hw,
},
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -499,7 +499,7 @@ static struct clk_branch gpu_cc_hub_cx_int_clk = {
&gpu_cc_hub_cx_int_div_clk_src.clkr.hw,
},
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_aon_ops,
},
},
diff --git a/drivers/clk/qcom/gpucc-sc7180.c b/drivers/clk/qcom/gpucc-sc7180.c
index a7bf44544b95..97287488e05a 100644
--- a/drivers/clk/qcom/gpucc-sc7180.c
+++ b/drivers/clk/qcom/gpucc-sc7180.c
@@ -42,7 +42,7 @@ static struct clk_alpha_pll gpu_cc_pll1 = {
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "gpu_cc_pll1",
- .parent_data = &(const struct clk_parent_data){
+ .parent_data = &(const struct clk_parent_data){
.fw_name = "bi_tcxo",
},
.num_parents = 1,
diff --git a/drivers/clk/qcom/gpucc-sm6350.c b/drivers/clk/qcom/gpucc-sm6350.c
index ee89c42413f8..efbee1518dd3 100644
--- a/drivers/clk/qcom/gpucc-sm6350.c
+++ b/drivers/clk/qcom/gpucc-sm6350.c
@@ -67,7 +67,7 @@ static struct clk_alpha_pll gpu_cc_pll0 = {
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "gpu_cc_pll0",
- .parent_data = &(const struct clk_parent_data){
+ .parent_data = &(const struct clk_parent_data){
.index = DT_BI_TCXO,
.fw_name = "bi_tcxo",
},
@@ -111,7 +111,7 @@ static struct clk_alpha_pll gpu_cc_pll1 = {
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "gpu_cc_pll1",
- .parent_data = &(const struct clk_parent_data){
+ .parent_data = &(const struct clk_parent_data){
.index = DT_BI_TCXO,
.fw_name = "bi_tcxo",
},
diff --git a/drivers/clk/qcom/gpucc-sm8150.c b/drivers/clk/qcom/gpucc-sm8150.c
index 7ce91208c0bc..5701031c17f3 100644
--- a/drivers/clk/qcom/gpucc-sm8150.c
+++ b/drivers/clk/qcom/gpucc-sm8150.c
@@ -53,7 +53,7 @@ static struct clk_alpha_pll gpu_cc_pll1 = {
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "gpu_cc_pll1",
- .parent_data = &(const struct clk_parent_data){
+ .parent_data = &(const struct clk_parent_data){
.fw_name = "bi_tcxo",
},
.num_parents = 1,
diff --git a/drivers/clk/qcom/gpucc-sm8250.c b/drivers/clk/qcom/gpucc-sm8250.c
index ca0a1681d352..eee3208640cd 100644
--- a/drivers/clk/qcom/gpucc-sm8250.c
+++ b/drivers/clk/qcom/gpucc-sm8250.c
@@ -56,7 +56,7 @@ static struct clk_alpha_pll gpu_cc_pll1 = {
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "gpu_cc_pll1",
- .parent_data = &(const struct clk_parent_data){
+ .parent_data = &(const struct clk_parent_data){
.fw_name = "bi_tcxo",
},
.num_parents = 1,
diff --git a/drivers/clk/qcom/hfpll.c b/drivers/clk/qcom/hfpll.c
index b0b0cb074b4a..385964196185 100644
--- a/drivers/clk/qcom/hfpll.c
+++ b/drivers/clk/qcom/hfpll.c
@@ -99,7 +99,6 @@ static const struct regmap_config hfpll_regmap_config = {
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x30,
- .fast_io = true,
};
static int qcom_hfpll_probe(struct platform_device *pdev)
diff --git a/drivers/clk/qcom/ipq-cmn-pll.c b/drivers/clk/qcom/ipq-cmn-pll.c
index 432d4c4b7aa6..dafbf5732048 100644
--- a/drivers/clk/qcom/ipq-cmn-pll.c
+++ b/drivers/clk/qcom/ipq-cmn-pll.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2024-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
/*
@@ -16,6 +16,10 @@
* are supplied to GCC (24 MHZ as XO and 32 KHZ as sleep clock), and to PCS
* with 31.25 MHZ.
*
+ * On the IPQ5424 SoC, there is an output clock from CMN PLL to PPE at 375 MHZ,
+ * and an output clock to NSS (network subsystem) at 300 MHZ. The other output
+ * clocks from CMN PLL on IPQ5424 are the same as IPQ9574.
+ *
* +---------+
* | GCC |
* +--+---+--+
@@ -46,6 +50,8 @@
#include <linux/regmap.h>
#include <dt-bindings/clock/qcom,ipq-cmn-pll.h>
+#include <dt-bindings/clock/qcom,ipq5018-cmn-pll.h>
+#include <dt-bindings/clock/qcom,ipq5424-cmn-pll.h>
#define CMN_PLL_REFCLK_SRC_SELECTION 0x28
#define CMN_PLL_REFCLK_SRC_DIV GENMASK(9, 8)
@@ -102,7 +108,26 @@ static const struct regmap_config ipq_cmn_pll_regmap_config = {
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x7fc,
- .fast_io = true,
+};
+
+static const struct cmn_pll_fixed_output_clk ipq5018_output_clks[] = {
+ CLK_PLL_OUTPUT(IPQ5018_XO_24MHZ_CLK, "xo-24mhz", 24000000UL),
+ CLK_PLL_OUTPUT(IPQ5018_SLEEP_32KHZ_CLK, "sleep-32khz", 32000UL),
+ CLK_PLL_OUTPUT(IPQ5018_ETH_50MHZ_CLK, "eth-50mhz", 50000000UL),
+ { /* Sentinel */ }
+};
+
+static const struct cmn_pll_fixed_output_clk ipq5424_output_clks[] = {
+ CLK_PLL_OUTPUT(IPQ5424_XO_24MHZ_CLK, "xo-24mhz", 24000000UL),
+ CLK_PLL_OUTPUT(IPQ5424_SLEEP_32KHZ_CLK, "sleep-32khz", 32000UL),
+ CLK_PLL_OUTPUT(IPQ5424_PCS_31P25MHZ_CLK, "pcs-31p25mhz", 31250000UL),
+ CLK_PLL_OUTPUT(IPQ5424_NSS_300MHZ_CLK, "nss-300mhz", 300000000UL),
+ CLK_PLL_OUTPUT(IPQ5424_PPE_375MHZ_CLK, "ppe-375mhz", 375000000UL),
+ CLK_PLL_OUTPUT(IPQ5424_ETH0_50MHZ_CLK, "eth0-50mhz", 50000000UL),
+ CLK_PLL_OUTPUT(IPQ5424_ETH1_50MHZ_CLK, "eth1-50mhz", 50000000UL),
+ CLK_PLL_OUTPUT(IPQ5424_ETH2_50MHZ_CLK, "eth2-50mhz", 50000000UL),
+ CLK_PLL_OUTPUT(IPQ5424_ETH_25MHZ_CLK, "eth-25mhz", 25000000UL),
+ { /* Sentinel */ }
};
static const struct cmn_pll_fixed_output_clk ipq9574_output_clks[] = {
@@ -115,6 +140,7 @@ static const struct cmn_pll_fixed_output_clk ipq9574_output_clks[] = {
CLK_PLL_OUTPUT(ETH1_50MHZ_CLK, "eth1-50mhz", 50000000UL),
CLK_PLL_OUTPUT(ETH2_50MHZ_CLK, "eth2-50mhz", 50000000UL),
CLK_PLL_OUTPUT(ETH_25MHZ_CLK, "eth-25mhz", 25000000UL),
+ { /* Sentinel */ }
};
/*
@@ -297,7 +323,7 @@ static struct clk_hw *ipq_cmn_pll_clk_hw_register(struct platform_device *pdev)
static int ipq_cmn_pll_register_clks(struct platform_device *pdev)
{
- const struct cmn_pll_fixed_output_clk *fixed_clk;
+ const struct cmn_pll_fixed_output_clk *p, *fixed_clk;
struct clk_hw_onecell_data *hw_data;
struct device *dev = &pdev->dev;
struct clk_hw *cmn_pll_hw;
@@ -305,8 +331,13 @@ static int ipq_cmn_pll_register_clks(struct platform_device *pdev)
struct clk_hw *hw;
int ret, i;
- fixed_clk = ipq9574_output_clks;
- num_clks = ARRAY_SIZE(ipq9574_output_clks);
+ fixed_clk = device_get_match_data(dev);
+ if (!fixed_clk)
+ return -EINVAL;
+
+ num_clks = 0;
+ for (p = fixed_clk; p->name; p++)
+ num_clks++;
hw_data = devm_kzalloc(dev, struct_size(hw_data, hws, num_clks + 1),
GFP_KERNEL);
@@ -375,11 +406,11 @@ static int ipq_cmn_pll_clk_probe(struct platform_device *pdev)
*/
ret = pm_clk_add(dev, "ahb");
if (ret)
- return dev_err_probe(dev, ret, "Fail to add AHB clock\n");
+ return dev_err_probe(dev, ret, "Failed to add AHB clock\n");
ret = pm_clk_add(dev, "sys");
if (ret)
- return dev_err_probe(dev, ret, "Fail to add SYS clock\n");
+ return dev_err_probe(dev, ret, "Failed to add SYS clock\n");
ret = pm_runtime_resume_and_get(dev);
if (ret)
@@ -390,7 +421,7 @@ static int ipq_cmn_pll_clk_probe(struct platform_device *pdev)
pm_runtime_put(dev);
if (ret)
return dev_err_probe(dev, ret,
- "Fail to register CMN PLL clocks\n");
+ "Failed to register CMN PLL clocks\n");
return 0;
}
@@ -415,7 +446,9 @@ static const struct dev_pm_ops ipq_cmn_pll_pm_ops = {
};
static const struct of_device_id ipq_cmn_pll_clk_ids[] = {
- { .compatible = "qcom,ipq9574-cmn-pll", },
+ { .compatible = "qcom,ipq5018-cmn-pll", .data = &ipq5018_output_clks },
+ { .compatible = "qcom,ipq5424-cmn-pll", .data = &ipq5424_output_clks },
+ { .compatible = "qcom,ipq9574-cmn-pll", .data = &ipq9574_output_clks },
{ }
};
MODULE_DEVICE_TABLE(of, ipq_cmn_pll_clk_ids);
diff --git a/drivers/clk/qcom/lpassaudiocc-sc7280.c b/drivers/clk/qcom/lpassaudiocc-sc7280.c
index 22169da08a51..7e2172969289 100644
--- a/drivers/clk/qcom/lpassaudiocc-sc7280.c
+++ b/drivers/clk/qcom/lpassaudiocc-sc7280.c
@@ -709,8 +709,8 @@ static const struct qcom_cc_desc lpass_audio_cc_sc7280_desc = {
};
static const struct qcom_reset_map lpass_audio_cc_sc7280_resets[] = {
- [LPASS_AUDIO_SWR_RX_CGCR] = { 0xa0, 1 },
- [LPASS_AUDIO_SWR_TX_CGCR] = { 0xa8, 1 },
+ [LPASS_AUDIO_SWR_RX_CGCR] = { 0xa0, 1 },
+ [LPASS_AUDIO_SWR_TX_CGCR] = { 0xa8, 1 },
[LPASS_AUDIO_SWR_WSA_CGCR] = { 0xb0, 1 },
};
@@ -799,7 +799,6 @@ static int lpass_audio_cc_sc7280_probe(struct platform_device *pdev)
goto exit;
}
- pm_runtime_mark_last_busy(&pdev->dev);
exit:
pm_runtime_put_autosuspend(&pdev->dev);
@@ -868,7 +867,6 @@ static int lpass_aon_cc_sc7280_probe(struct platform_device *pdev)
goto exit;
}
- pm_runtime_mark_last_busy(&pdev->dev);
exit:
pm_runtime_put_autosuspend(&pdev->dev);
diff --git a/drivers/clk/qcom/lpasscc-sc8280xp.c b/drivers/clk/qcom/lpasscc-sc8280xp.c
index 9fd9498d7dc8..ff839788c40e 100644
--- a/drivers/clk/qcom/lpasscc-sc8280xp.c
+++ b/drivers/clk/qcom/lpasscc-sc8280xp.c
@@ -18,9 +18,9 @@
#include "reset.h"
static const struct qcom_reset_map lpass_audiocc_sc8280xp_resets[] = {
- [LPASS_AUDIO_SWR_RX_CGCR] = { 0xa0, 1 },
+ [LPASS_AUDIO_SWR_RX_CGCR] = { 0xa0, 1 },
[LPASS_AUDIO_SWR_WSA_CGCR] = { 0xb0, 1 },
- [LPASS_AUDIO_SWR_WSA2_CGCR] = { 0xd8, 1 },
+ [LPASS_AUDIO_SWR_WSA2_CGCR] = { 0xd8, 1 },
};
static const struct regmap_config lpass_audiocc_sc8280xp_regmap_config = {
diff --git a/drivers/clk/qcom/lpasscc-sm6115.c b/drivers/clk/qcom/lpasscc-sm6115.c
index 8ffdab71b948..ac6d219233b4 100644
--- a/drivers/clk/qcom/lpasscc-sm6115.c
+++ b/drivers/clk/qcom/lpasscc-sm6115.c
@@ -17,7 +17,7 @@
#include "reset.h"
static const struct qcom_reset_map lpass_audiocc_sm6115_resets[] = {
- [LPASS_AUDIO_SWR_RX_CGCR] = { .reg = 0x98, .bit = 1, .udelay = 500 },
+ [LPASS_AUDIO_SWR_RX_CGCR] = { .reg = 0x98, .bit = 1, .udelay = 500 },
};
static struct regmap_config lpass_audiocc_sm6115_regmap_config = {
diff --git a/drivers/clk/qcom/lpasscorecc-sc7180.c b/drivers/clk/qcom/lpasscorecc-sc7180.c
index 605516d03993..5174bd3dcdc5 100644
--- a/drivers/clk/qcom/lpasscorecc-sc7180.c
+++ b/drivers/clk/qcom/lpasscorecc-sc7180.c
@@ -42,7 +42,7 @@ static const struct alpha_pll_config lpass_lpaaudio_dig_pll_config = {
};
static const u8 clk_alpha_pll_regs_offset[][PLL_OFF_MAX_REGS] = {
- [CLK_ALPHA_PLL_TYPE_FABIA] = {
+ [CLK_ALPHA_PLL_TYPE_FABIA] = {
[PLL_OFF_L_VAL] = 0x04,
[PLL_OFF_CAL_L_VAL] = 0x8,
[PLL_OFF_USER_CTL] = 0x0c,
@@ -412,7 +412,6 @@ static int lpass_core_cc_sc7180_probe(struct platform_device *pdev)
ret = qcom_cc_really_probe(&pdev->dev, &lpass_core_cc_sc7180_desc, regmap);
- pm_runtime_mark_last_busy(&pdev->dev);
exit:
pm_runtime_put_autosuspend(&pdev->dev);
@@ -433,7 +432,6 @@ static int lpass_hm_core_probe(struct platform_device *pdev)
ret = qcom_cc_probe_by_index(pdev, 0, desc);
- pm_runtime_mark_last_busy(&pdev->dev);
pm_runtime_put_autosuspend(&pdev->dev);
return ret;
diff --git a/drivers/clk/qcom/mmcc-sdm660.c b/drivers/clk/qcom/mmcc-sdm660.c
index e69fc65b13da..dbd3f561dc6d 100644
--- a/drivers/clk/qcom/mmcc-sdm660.c
+++ b/drivers/clk/qcom/mmcc-sdm660.c
@@ -74,7 +74,7 @@ static struct clk_alpha_pll mmpll0 = {
},
};
-static struct clk_alpha_pll mmpll6 = {
+static struct clk_alpha_pll mmpll6 = {
.offset = 0xf0,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.clkr = {
@@ -2781,6 +2781,7 @@ static struct gdsc *mmcc_sdm660_gdscs[] = {
};
static const struct qcom_reset_map mmcc_660_resets[] = {
+ [MDSS_BCR] = { 0x2300 },
[CAMSS_MICRO_BCR] = { 0x3490 },
};
diff --git a/drivers/clk/qcom/nsscc-ipq5424.c b/drivers/clk/qcom/nsscc-ipq5424.c
new file mode 100644
index 000000000000..5893c7146180
--- /dev/null
+++ b/drivers/clk/qcom/nsscc-ipq5424.c
@@ -0,0 +1,1340 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/interconnect-provider.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_clock.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,ipq5424-nsscc.h>
+#include <dt-bindings/interconnect/qcom,ipq5424.h>
+#include <dt-bindings/reset/qcom,ipq5424-nsscc.h>
+
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "common.h"
+#include "reset.h"
+
+/* Need to match the order of clocks in DT binding */
+enum {
+ DT_CMN_PLL_XO_CLK,
+ DT_CMN_PLL_NSS_300M_CLK,
+ DT_CMN_PLL_NSS_375M_CLK,
+ DT_GCC_GPLL0_OUT_AUX,
+ DT_UNIPHY0_NSS_RX_CLK,
+ DT_UNIPHY0_NSS_TX_CLK,
+ DT_UNIPHY1_NSS_RX_CLK,
+ DT_UNIPHY1_NSS_TX_CLK,
+ DT_UNIPHY2_NSS_RX_CLK,
+ DT_UNIPHY2_NSS_TX_CLK,
+};
+
+enum {
+ P_CMN_PLL_XO_CLK,
+ P_CMN_PLL_NSS_300M_CLK,
+ P_CMN_PLL_NSS_375M_CLK,
+ P_GCC_GPLL0_OUT_AUX,
+ P_UNIPHY0_NSS_RX_CLK,
+ P_UNIPHY0_NSS_TX_CLK,
+ P_UNIPHY1_NSS_RX_CLK,
+ P_UNIPHY1_NSS_TX_CLK,
+ P_UNIPHY2_NSS_RX_CLK,
+ P_UNIPHY2_NSS_TX_CLK,
+};
+
+static const struct parent_map nss_cc_parent_map_0[] = {
+ { P_CMN_PLL_XO_CLK, 0 },
+ { P_GCC_GPLL0_OUT_AUX, 2 },
+ { P_CMN_PLL_NSS_300M_CLK, 5 },
+ { P_CMN_PLL_NSS_375M_CLK, 6 },
+};
+
+static const struct clk_parent_data nss_cc_parent_data_0[] = {
+ { .index = DT_CMN_PLL_XO_CLK },
+ { .index = DT_GCC_GPLL0_OUT_AUX },
+ { .index = DT_CMN_PLL_NSS_300M_CLK },
+ { .index = DT_CMN_PLL_NSS_375M_CLK },
+};
+
+static const struct parent_map nss_cc_parent_map_1[] = {
+ { P_CMN_PLL_XO_CLK, 0 },
+ { P_GCC_GPLL0_OUT_AUX, 2 },
+ { P_UNIPHY0_NSS_RX_CLK, 3 },
+ { P_UNIPHY0_NSS_TX_CLK, 4 },
+ { P_CMN_PLL_NSS_300M_CLK, 5 },
+ { P_CMN_PLL_NSS_375M_CLK, 6 },
+};
+
+static const struct clk_parent_data nss_cc_parent_data_1[] = {
+ { .index = DT_CMN_PLL_XO_CLK },
+ { .index = DT_GCC_GPLL0_OUT_AUX },
+ { .index = DT_UNIPHY0_NSS_RX_CLK },
+ { .index = DT_UNIPHY0_NSS_TX_CLK },
+ { .index = DT_CMN_PLL_NSS_300M_CLK },
+ { .index = DT_CMN_PLL_NSS_375M_CLK },
+};
+
+static const struct parent_map nss_cc_parent_map_2[] = {
+ { P_CMN_PLL_XO_CLK, 0 },
+ { P_GCC_GPLL0_OUT_AUX, 2 },
+ { P_UNIPHY1_NSS_RX_CLK, 3 },
+ { P_UNIPHY1_NSS_TX_CLK, 4 },
+ { P_CMN_PLL_NSS_300M_CLK, 5 },
+ { P_CMN_PLL_NSS_375M_CLK, 6 },
+};
+
+static const struct clk_parent_data nss_cc_parent_data_2[] = {
+ { .index = DT_CMN_PLL_XO_CLK },
+ { .index = DT_GCC_GPLL0_OUT_AUX },
+ { .index = DT_UNIPHY1_NSS_RX_CLK },
+ { .index = DT_UNIPHY1_NSS_TX_CLK },
+ { .index = DT_CMN_PLL_NSS_300M_CLK },
+ { .index = DT_CMN_PLL_NSS_375M_CLK },
+};
+
+static const struct parent_map nss_cc_parent_map_3[] = {
+ { P_CMN_PLL_XO_CLK, 0 },
+ { P_GCC_GPLL0_OUT_AUX, 2 },
+ { P_UNIPHY2_NSS_RX_CLK, 3 },
+ { P_UNIPHY2_NSS_TX_CLK, 4 },
+ { P_CMN_PLL_NSS_300M_CLK, 5 },
+ { P_CMN_PLL_NSS_375M_CLK, 6 },
+};
+
+static const struct clk_parent_data nss_cc_parent_data_3[] = {
+ { .index = DT_CMN_PLL_XO_CLK },
+ { .index = DT_GCC_GPLL0_OUT_AUX },
+ { .index = DT_UNIPHY2_NSS_RX_CLK },
+ { .index = DT_UNIPHY2_NSS_TX_CLK },
+ { .index = DT_CMN_PLL_NSS_300M_CLK },
+ { .index = DT_CMN_PLL_NSS_375M_CLK },
+};
+
+static const struct freq_tbl ftbl_nss_cc_ce_clk_src[] = {
+ F(24000000, P_CMN_PLL_XO_CLK, 1, 0, 0),
+ F(375000000, P_CMN_PLL_NSS_375M_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 nss_cc_ce_clk_src = {
+ .cmd_rcgr = 0x5e0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = nss_cc_parent_map_0,
+ .freq_tbl = ftbl_nss_cc_ce_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_ce_clk_src",
+ .parent_data = nss_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_nss_cc_cfg_clk_src[] = {
+ F(100000000, P_GCC_GPLL0_OUT_AUX, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 nss_cc_cfg_clk_src = {
+ .cmd_rcgr = 0x6a8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = nss_cc_parent_map_0,
+ .freq_tbl = ftbl_nss_cc_cfg_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_cfg_clk_src",
+ .parent_data = nss_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_nss_cc_eip_bfdcd_clk_src[] = {
+ F(300000000, P_CMN_PLL_NSS_300M_CLK, 1, 0, 0),
+ F(375000000, P_CMN_PLL_NSS_375M_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 nss_cc_eip_bfdcd_clk_src = {
+ .cmd_rcgr = 0x644,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = nss_cc_parent_map_0,
+ .freq_tbl = ftbl_nss_cc_eip_bfdcd_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_eip_bfdcd_clk_src",
+ .parent_data = nss_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_conf ftbl_nss_cc_port1_rx_clk_src_25[] = {
+ C(P_UNIPHY0_NSS_RX_CLK, 12.5, 0, 0),
+ C(P_UNIPHY0_NSS_RX_CLK, 5, 0, 0),
+};
+
+static const struct freq_conf ftbl_nss_cc_port1_rx_clk_src_125[] = {
+ C(P_UNIPHY0_NSS_RX_CLK, 2.5, 0, 0),
+ C(P_UNIPHY0_NSS_RX_CLK, 1, 0, 0),
+};
+
+static const struct freq_multi_tbl ftbl_nss_cc_port1_rx_clk_src[] = {
+ FMS(24000000, P_CMN_PLL_XO_CLK, 1, 0, 0),
+ FM(25000000, ftbl_nss_cc_port1_rx_clk_src_25),
+ FMS(78125000, P_UNIPHY0_NSS_RX_CLK, 4, 0, 0),
+ FM(125000000, ftbl_nss_cc_port1_rx_clk_src_125),
+ FMS(156250000, P_UNIPHY0_NSS_RX_CLK, 2, 0, 0),
+ FMS(312500000, P_UNIPHY0_NSS_RX_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 nss_cc_port1_rx_clk_src = {
+ .cmd_rcgr = 0x4b4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = nss_cc_parent_map_1,
+ .freq_multi_tbl = ftbl_nss_cc_port1_rx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_port1_rx_clk_src",
+ .parent_data = nss_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_1),
+ .ops = &clk_rcg2_fm_ops,
+ },
+};
+
+static const struct freq_conf ftbl_nss_cc_port1_tx_clk_src_25[] = {
+ C(P_UNIPHY0_NSS_TX_CLK, 12.5, 0, 0),
+ C(P_UNIPHY0_NSS_TX_CLK, 5, 0, 0),
+};
+
+static const struct freq_conf ftbl_nss_cc_port1_tx_clk_src_125[] = {
+ C(P_UNIPHY0_NSS_TX_CLK, 2.5, 0, 0),
+ C(P_UNIPHY0_NSS_TX_CLK, 1, 0, 0),
+};
+
+static const struct freq_multi_tbl ftbl_nss_cc_port1_tx_clk_src[] = {
+ FMS(24000000, P_CMN_PLL_XO_CLK, 1, 0, 0),
+ FM(25000000, ftbl_nss_cc_port1_tx_clk_src_25),
+ FMS(78125000, P_UNIPHY0_NSS_TX_CLK, 4, 0, 0),
+ FM(125000000, ftbl_nss_cc_port1_tx_clk_src_125),
+ FMS(156250000, P_UNIPHY0_NSS_TX_CLK, 2, 0, 0),
+ FMS(312500000, P_UNIPHY0_NSS_TX_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 nss_cc_port1_tx_clk_src = {
+ .cmd_rcgr = 0x4c0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = nss_cc_parent_map_1,
+ .freq_multi_tbl = ftbl_nss_cc_port1_tx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_port1_tx_clk_src",
+ .parent_data = nss_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_1),
+ .ops = &clk_rcg2_fm_ops,
+ },
+};
+
+static const struct freq_conf ftbl_nss_cc_port2_rx_clk_src_25[] = {
+ C(P_UNIPHY1_NSS_RX_CLK, 12.5, 0, 0),
+ C(P_UNIPHY1_NSS_RX_CLK, 5, 0, 0),
+};
+
+static const struct freq_conf ftbl_nss_cc_port2_rx_clk_src_125[] = {
+ C(P_UNIPHY1_NSS_RX_CLK, 2.5, 0, 0),
+ C(P_UNIPHY1_NSS_RX_CLK, 1, 0, 0),
+};
+
+static const struct freq_multi_tbl ftbl_nss_cc_port2_rx_clk_src[] = {
+ FMS(24000000, P_CMN_PLL_XO_CLK, 1, 0, 0),
+ FM(25000000, ftbl_nss_cc_port2_rx_clk_src_25),
+ FMS(78125000, P_UNIPHY1_NSS_RX_CLK, 4, 0, 0),
+ FM(125000000, ftbl_nss_cc_port2_rx_clk_src_125),
+ FMS(156250000, P_UNIPHY1_NSS_RX_CLK, 2, 0, 0),
+ FMS(312500000, P_UNIPHY1_NSS_RX_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 nss_cc_port2_rx_clk_src = {
+ .cmd_rcgr = 0x4cc,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = nss_cc_parent_map_2,
+ .freq_multi_tbl = ftbl_nss_cc_port2_rx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_port2_rx_clk_src",
+ .parent_data = nss_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_2),
+ .ops = &clk_rcg2_fm_ops,
+ },
+};
+
+static const struct freq_conf ftbl_nss_cc_port2_tx_clk_src_25[] = {
+ C(P_UNIPHY1_NSS_TX_CLK, 12.5, 0, 0),
+ C(P_UNIPHY1_NSS_TX_CLK, 5, 0, 0),
+};
+
+static const struct freq_conf ftbl_nss_cc_port2_tx_clk_src_125[] = {
+ C(P_UNIPHY1_NSS_TX_CLK, 2.5, 0, 0),
+ C(P_UNIPHY1_NSS_TX_CLK, 1, 0, 0),
+};
+
+static const struct freq_multi_tbl ftbl_nss_cc_port2_tx_clk_src[] = {
+ FMS(24000000, P_CMN_PLL_XO_CLK, 1, 0, 0),
+ FM(25000000, ftbl_nss_cc_port2_tx_clk_src_25),
+ FMS(78125000, P_UNIPHY1_NSS_TX_CLK, 4, 0, 0),
+ FM(125000000, ftbl_nss_cc_port2_tx_clk_src_125),
+ FMS(156250000, P_UNIPHY1_NSS_TX_CLK, 2, 0, 0),
+ FMS(312500000, P_UNIPHY1_NSS_TX_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 nss_cc_port2_tx_clk_src = {
+ .cmd_rcgr = 0x4d8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = nss_cc_parent_map_2,
+ .freq_multi_tbl = ftbl_nss_cc_port2_tx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_port2_tx_clk_src",
+ .parent_data = nss_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_2),
+ .ops = &clk_rcg2_fm_ops,
+ },
+};
+
+static const struct freq_conf ftbl_nss_cc_port3_rx_clk_src_25[] = {
+ C(P_UNIPHY2_NSS_RX_CLK, 12.5, 0, 0),
+ C(P_UNIPHY2_NSS_RX_CLK, 5, 0, 0),
+};
+
+static const struct freq_conf ftbl_nss_cc_port3_rx_clk_src_125[] = {
+ C(P_UNIPHY2_NSS_RX_CLK, 2.5, 0, 0),
+ C(P_UNIPHY2_NSS_RX_CLK, 1, 0, 0),
+};
+
+static const struct freq_multi_tbl ftbl_nss_cc_port3_rx_clk_src[] = {
+ FMS(24000000, P_CMN_PLL_XO_CLK, 1, 0, 0),
+ FM(25000000, ftbl_nss_cc_port3_rx_clk_src_25),
+ FMS(78125000, P_UNIPHY2_NSS_RX_CLK, 4, 0, 0),
+ FM(125000000, ftbl_nss_cc_port3_rx_clk_src_125),
+ FMS(156250000, P_UNIPHY2_NSS_RX_CLK, 2, 0, 0),
+ FMS(312500000, P_UNIPHY2_NSS_RX_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 nss_cc_port3_rx_clk_src = {
+ .cmd_rcgr = 0x4e4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = nss_cc_parent_map_3,
+ .freq_multi_tbl = ftbl_nss_cc_port3_rx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_port3_rx_clk_src",
+ .parent_data = nss_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_3),
+ .ops = &clk_rcg2_fm_ops,
+ },
+};
+
+static const struct freq_conf ftbl_nss_cc_port3_tx_clk_src_25[] = {
+ C(P_UNIPHY2_NSS_TX_CLK, 12.5, 0, 0),
+ C(P_UNIPHY2_NSS_TX_CLK, 5, 0, 0),
+};
+
+static const struct freq_conf ftbl_nss_cc_port3_tx_clk_src_125[] = {
+ C(P_UNIPHY2_NSS_TX_CLK, 2.5, 0, 0),
+ C(P_UNIPHY2_NSS_TX_CLK, 1, 0, 0),
+};
+
+static const struct freq_multi_tbl ftbl_nss_cc_port3_tx_clk_src[] = {
+ FMS(24000000, P_CMN_PLL_XO_CLK, 1, 0, 0),
+ FM(25000000, ftbl_nss_cc_port3_tx_clk_src_25),
+ FMS(78125000, P_UNIPHY2_NSS_TX_CLK, 4, 0, 0),
+ FM(125000000, ftbl_nss_cc_port3_tx_clk_src_125),
+ FMS(156250000, P_UNIPHY2_NSS_TX_CLK, 2, 0, 0),
+ FMS(312500000, P_UNIPHY2_NSS_TX_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 nss_cc_port3_tx_clk_src = {
+ .cmd_rcgr = 0x4f0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = nss_cc_parent_map_3,
+ .freq_multi_tbl = ftbl_nss_cc_port3_tx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_port3_tx_clk_src",
+ .parent_data = nss_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_3),
+ .ops = &clk_rcg2_fm_ops,
+ },
+};
+
+static struct clk_rcg2 nss_cc_ppe_clk_src = {
+ .cmd_rcgr = 0x3ec,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = nss_cc_parent_map_0,
+ .freq_tbl = ftbl_nss_cc_ce_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_ppe_clk_src",
+ .parent_data = nss_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_regmap_div nss_cc_port1_rx_div_clk_src = {
+ .reg = 0x4bc,
+ .shift = 0,
+ .width = 9,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port1_rx_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_port1_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div nss_cc_port1_tx_div_clk_src = {
+ .reg = 0x4c8,
+ .shift = 0,
+ .width = 9,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port1_tx_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_port1_tx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div nss_cc_port2_rx_div_clk_src = {
+ .reg = 0x4d4,
+ .shift = 0,
+ .width = 9,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port2_rx_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_port2_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div nss_cc_port2_tx_div_clk_src = {
+ .reg = 0x4e0,
+ .shift = 0,
+ .width = 9,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port2_tx_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_port2_tx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div nss_cc_port3_rx_div_clk_src = {
+ .reg = 0x4ec,
+ .shift = 0,
+ .width = 9,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port3_rx_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_port3_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div nss_cc_port3_tx_div_clk_src = {
+ .reg = 0x4f8,
+ .shift = 0,
+ .width = 9,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port3_tx_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_port3_tx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div nss_cc_xgmac0_ptp_ref_div_clk_src = {
+ .reg = 0x3f4,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_xgmac0_ptp_ref_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_ppe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div nss_cc_xgmac1_ptp_ref_div_clk_src = {
+ .reg = 0x3f8,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_xgmac1_ptp_ref_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_ppe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div nss_cc_xgmac2_ptp_ref_div_clk_src = {
+ .reg = 0x3fc,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_xgmac2_ptp_ref_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_ppe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch nss_cc_ce_apb_clk = {
+ .halt_reg = 0x5e8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5e8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_ce_apb_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_ce_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ce_axi_clk = {
+ .halt_reg = 0x5ec,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5ec,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_ce_axi_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_ce_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_debug_clk = {
+ .halt_reg = 0x70c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x70c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_debug_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_eip_clk = {
+ .halt_reg = 0x658,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x658,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_eip_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_eip_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_nss_csr_clk = {
+ .halt_reg = 0x6b0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6b0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_nss_csr_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_cfg_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_nssnoc_ce_apb_clk = {
+ .halt_reg = 0x5f4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5f4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_nssnoc_ce_apb_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_ce_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_nssnoc_ce_axi_clk = {
+ .halt_reg = 0x5f8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5f8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_nssnoc_ce_axi_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_ce_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_nssnoc_eip_clk = {
+ .halt_reg = 0x660,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x660,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_nssnoc_eip_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_eip_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_nssnoc_nss_csr_clk = {
+ .halt_reg = 0x6b4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6b4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_nssnoc_nss_csr_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_cfg_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_nssnoc_ppe_cfg_clk = {
+ .halt_reg = 0x444,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x444,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_nssnoc_ppe_cfg_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_ppe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_nssnoc_ppe_clk = {
+ .halt_reg = 0x440,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x440,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_nssnoc_ppe_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_ppe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_port1_mac_clk = {
+ .halt_reg = 0x428,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x428,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_port1_mac_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_ppe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_port1_rx_clk = {
+ .halt_reg = 0x4fc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4fc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_port1_rx_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_port1_rx_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_port1_tx_clk = {
+ .halt_reg = 0x504,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x504,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_port1_tx_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_port1_tx_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_port2_mac_clk = {
+ .halt_reg = 0x430,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x430,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_port2_mac_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_ppe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_port2_rx_clk = {
+ .halt_reg = 0x50c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x50c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_port2_rx_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_port2_rx_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_port2_tx_clk = {
+ .halt_reg = 0x514,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x514,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_port2_tx_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_port2_tx_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_port3_mac_clk = {
+ .halt_reg = 0x438,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x438,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_port3_mac_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_ppe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_port3_rx_clk = {
+ .halt_reg = 0x51c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x51c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_port3_rx_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_port3_rx_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_port3_tx_clk = {
+ .halt_reg = 0x524,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x524,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_port3_tx_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_port3_tx_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ppe_edma_cfg_clk = {
+ .halt_reg = 0x424,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x424,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_ppe_edma_cfg_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_ppe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ppe_edma_clk = {
+ .halt_reg = 0x41c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x41c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_ppe_edma_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_ppe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ppe_switch_btq_clk = {
+ .halt_reg = 0x408,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x408,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_ppe_switch_btq_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_ppe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ppe_switch_cfg_clk = {
+ .halt_reg = 0x418,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x418,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_ppe_switch_cfg_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_ppe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ppe_switch_clk = {
+ .halt_reg = 0x410,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x410,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_ppe_switch_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_ppe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ppe_switch_ipe_clk = {
+ .halt_reg = 0x400,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x400,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_ppe_switch_ipe_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_ppe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_uniphy_port1_rx_clk = {
+ .halt_reg = 0x57c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x57c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_uniphy_port1_rx_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_port1_rx_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_uniphy_port1_tx_clk = {
+ .halt_reg = 0x580,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x580,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_uniphy_port1_tx_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_port1_tx_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_uniphy_port2_rx_clk = {
+ .halt_reg = 0x584,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x584,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_uniphy_port2_rx_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_port2_rx_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_uniphy_port2_tx_clk = {
+ .halt_reg = 0x588,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x588,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_uniphy_port2_tx_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_port2_tx_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_uniphy_port3_rx_clk = {
+ .halt_reg = 0x58c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x58c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_uniphy_port3_rx_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_port3_rx_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_uniphy_port3_tx_clk = {
+ .halt_reg = 0x590,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x590,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_uniphy_port3_tx_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_port3_tx_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_xgmac0_ptp_ref_clk = {
+ .halt_reg = 0x448,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x448,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_xgmac0_ptp_ref_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_xgmac0_ptp_ref_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_xgmac1_ptp_ref_clk = {
+ .halt_reg = 0x44c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x44c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_xgmac1_ptp_ref_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_xgmac1_ptp_ref_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_xgmac2_ptp_ref_clk = {
+ .halt_reg = 0x450,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x450,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_xgmac2_ptp_ref_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_xgmac2_ptp_ref_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_regmap *nss_cc_ipq5424_clocks[] = {
+ [NSS_CC_CE_APB_CLK] = &nss_cc_ce_apb_clk.clkr,
+ [NSS_CC_CE_AXI_CLK] = &nss_cc_ce_axi_clk.clkr,
+ [NSS_CC_CE_CLK_SRC] = &nss_cc_ce_clk_src.clkr,
+ [NSS_CC_CFG_CLK_SRC] = &nss_cc_cfg_clk_src.clkr,
+ [NSS_CC_DEBUG_CLK] = &nss_cc_debug_clk.clkr,
+ [NSS_CC_EIP_BFDCD_CLK_SRC] = &nss_cc_eip_bfdcd_clk_src.clkr,
+ [NSS_CC_EIP_CLK] = &nss_cc_eip_clk.clkr,
+ [NSS_CC_NSS_CSR_CLK] = &nss_cc_nss_csr_clk.clkr,
+ [NSS_CC_NSSNOC_CE_APB_CLK] = &nss_cc_nssnoc_ce_apb_clk.clkr,
+ [NSS_CC_NSSNOC_CE_AXI_CLK] = &nss_cc_nssnoc_ce_axi_clk.clkr,
+ [NSS_CC_NSSNOC_EIP_CLK] = &nss_cc_nssnoc_eip_clk.clkr,
+ [NSS_CC_NSSNOC_NSS_CSR_CLK] = &nss_cc_nssnoc_nss_csr_clk.clkr,
+ [NSS_CC_NSSNOC_PPE_CFG_CLK] = &nss_cc_nssnoc_ppe_cfg_clk.clkr,
+ [NSS_CC_NSSNOC_PPE_CLK] = &nss_cc_nssnoc_ppe_clk.clkr,
+ [NSS_CC_PORT1_MAC_CLK] = &nss_cc_port1_mac_clk.clkr,
+ [NSS_CC_PORT1_RX_CLK] = &nss_cc_port1_rx_clk.clkr,
+ [NSS_CC_PORT1_RX_CLK_SRC] = &nss_cc_port1_rx_clk_src.clkr,
+ [NSS_CC_PORT1_RX_DIV_CLK_SRC] = &nss_cc_port1_rx_div_clk_src.clkr,
+ [NSS_CC_PORT1_TX_CLK] = &nss_cc_port1_tx_clk.clkr,
+ [NSS_CC_PORT1_TX_CLK_SRC] = &nss_cc_port1_tx_clk_src.clkr,
+ [NSS_CC_PORT1_TX_DIV_CLK_SRC] = &nss_cc_port1_tx_div_clk_src.clkr,
+ [NSS_CC_PORT2_MAC_CLK] = &nss_cc_port2_mac_clk.clkr,
+ [NSS_CC_PORT2_RX_CLK] = &nss_cc_port2_rx_clk.clkr,
+ [NSS_CC_PORT2_RX_CLK_SRC] = &nss_cc_port2_rx_clk_src.clkr,
+ [NSS_CC_PORT2_RX_DIV_CLK_SRC] = &nss_cc_port2_rx_div_clk_src.clkr,
+ [NSS_CC_PORT2_TX_CLK] = &nss_cc_port2_tx_clk.clkr,
+ [NSS_CC_PORT2_TX_CLK_SRC] = &nss_cc_port2_tx_clk_src.clkr,
+ [NSS_CC_PORT2_TX_DIV_CLK_SRC] = &nss_cc_port2_tx_div_clk_src.clkr,
+ [NSS_CC_PORT3_MAC_CLK] = &nss_cc_port3_mac_clk.clkr,
+ [NSS_CC_PORT3_RX_CLK] = &nss_cc_port3_rx_clk.clkr,
+ [NSS_CC_PORT3_RX_CLK_SRC] = &nss_cc_port3_rx_clk_src.clkr,
+ [NSS_CC_PORT3_RX_DIV_CLK_SRC] = &nss_cc_port3_rx_div_clk_src.clkr,
+ [NSS_CC_PORT3_TX_CLK] = &nss_cc_port3_tx_clk.clkr,
+ [NSS_CC_PORT3_TX_CLK_SRC] = &nss_cc_port3_tx_clk_src.clkr,
+ [NSS_CC_PORT3_TX_DIV_CLK_SRC] = &nss_cc_port3_tx_div_clk_src.clkr,
+ [NSS_CC_PPE_CLK_SRC] = &nss_cc_ppe_clk_src.clkr,
+ [NSS_CC_PPE_EDMA_CFG_CLK] = &nss_cc_ppe_edma_cfg_clk.clkr,
+ [NSS_CC_PPE_EDMA_CLK] = &nss_cc_ppe_edma_clk.clkr,
+ [NSS_CC_PPE_SWITCH_BTQ_CLK] = &nss_cc_ppe_switch_btq_clk.clkr,
+ [NSS_CC_PPE_SWITCH_CFG_CLK] = &nss_cc_ppe_switch_cfg_clk.clkr,
+ [NSS_CC_PPE_SWITCH_CLK] = &nss_cc_ppe_switch_clk.clkr,
+ [NSS_CC_PPE_SWITCH_IPE_CLK] = &nss_cc_ppe_switch_ipe_clk.clkr,
+ [NSS_CC_UNIPHY_PORT1_RX_CLK] = &nss_cc_uniphy_port1_rx_clk.clkr,
+ [NSS_CC_UNIPHY_PORT1_TX_CLK] = &nss_cc_uniphy_port1_tx_clk.clkr,
+ [NSS_CC_UNIPHY_PORT2_RX_CLK] = &nss_cc_uniphy_port2_rx_clk.clkr,
+ [NSS_CC_UNIPHY_PORT2_TX_CLK] = &nss_cc_uniphy_port2_tx_clk.clkr,
+ [NSS_CC_UNIPHY_PORT3_RX_CLK] = &nss_cc_uniphy_port3_rx_clk.clkr,
+ [NSS_CC_UNIPHY_PORT3_TX_CLK] = &nss_cc_uniphy_port3_tx_clk.clkr,
+ [NSS_CC_XGMAC0_PTP_REF_CLK] = &nss_cc_xgmac0_ptp_ref_clk.clkr,
+ [NSS_CC_XGMAC0_PTP_REF_DIV_CLK_SRC] = &nss_cc_xgmac0_ptp_ref_div_clk_src.clkr,
+ [NSS_CC_XGMAC1_PTP_REF_CLK] = &nss_cc_xgmac1_ptp_ref_clk.clkr,
+ [NSS_CC_XGMAC1_PTP_REF_DIV_CLK_SRC] = &nss_cc_xgmac1_ptp_ref_div_clk_src.clkr,
+ [NSS_CC_XGMAC2_PTP_REF_CLK] = &nss_cc_xgmac2_ptp_ref_clk.clkr,
+ [NSS_CC_XGMAC2_PTP_REF_DIV_CLK_SRC] = &nss_cc_xgmac2_ptp_ref_div_clk_src.clkr,
+};
+
+static const struct qcom_reset_map nss_cc_ipq5424_resets[] = {
+ [NSS_CC_CE_APB_CLK_ARES] = { 0x5e8, 2 },
+ [NSS_CC_CE_AXI_CLK_ARES] = { 0x5ec, 2 },
+ [NSS_CC_DEBUG_CLK_ARES] = { 0x70c, 2 },
+ [NSS_CC_EIP_CLK_ARES] = { 0x658, 2 },
+ [NSS_CC_NSS_CSR_CLK_ARES] = { 0x6b0, 2 },
+ [NSS_CC_NSSNOC_CE_APB_CLK_ARES] = { 0x5f4, 2 },
+ [NSS_CC_NSSNOC_CE_AXI_CLK_ARES] = { 0x5f8, 2 },
+ [NSS_CC_NSSNOC_EIP_CLK_ARES] = { 0x660, 2 },
+ [NSS_CC_NSSNOC_NSS_CSR_CLK_ARES] = { 0x6b4, 2 },
+ [NSS_CC_NSSNOC_PPE_CLK_ARES] = { 0x440, 2 },
+ [NSS_CC_NSSNOC_PPE_CFG_CLK_ARES] = { 0x444, 2 },
+ [NSS_CC_PORT1_MAC_CLK_ARES] = { 0x428, 2 },
+ [NSS_CC_PORT1_RX_CLK_ARES] = { 0x4fc, 2 },
+ [NSS_CC_PORT1_TX_CLK_ARES] = { 0x504, 2 },
+ [NSS_CC_PORT2_MAC_CLK_ARES] = { 0x430, 2 },
+ [NSS_CC_PORT2_RX_CLK_ARES] = { 0x50c, 2 },
+ [NSS_CC_PORT2_TX_CLK_ARES] = { 0x514, 2 },
+ [NSS_CC_PORT3_MAC_CLK_ARES] = { 0x438, 2 },
+ [NSS_CC_PORT3_RX_CLK_ARES] = { 0x51c, 2 },
+ [NSS_CC_PORT3_TX_CLK_ARES] = { 0x524, 2 },
+ [NSS_CC_PPE_BCR] = { 0x3e8 },
+ [NSS_CC_PPE_EDMA_CLK_ARES] = { 0x41c, 2 },
+ [NSS_CC_PPE_EDMA_CFG_CLK_ARES] = { 0x424, 2 },
+ [NSS_CC_PPE_SWITCH_BTQ_CLK_ARES] = { 0x408, 2 },
+ [NSS_CC_PPE_SWITCH_CLK_ARES] = { 0x410, 2 },
+ [NSS_CC_PPE_SWITCH_CFG_CLK_ARES] = { 0x418, 2 },
+ [NSS_CC_PPE_SWITCH_IPE_CLK_ARES] = { 0x400, 2 },
+ [NSS_CC_UNIPHY_PORT1_RX_CLK_ARES] = { 0x57c, 2 },
+ [NSS_CC_UNIPHY_PORT1_TX_CLK_ARES] = { 0x580, 2 },
+ [NSS_CC_UNIPHY_PORT2_RX_CLK_ARES] = { 0x584, 2 },
+ [NSS_CC_UNIPHY_PORT2_TX_CLK_ARES] = { 0x588, 2 },
+ [NSS_CC_UNIPHY_PORT3_RX_CLK_ARES] = { 0x58c, 2 },
+ [NSS_CC_UNIPHY_PORT3_TX_CLK_ARES] = { 0x590, 2 },
+ [NSS_CC_XGMAC0_PTP_REF_CLK_ARES] = { 0x448, 2 },
+ [NSS_CC_XGMAC1_PTP_REF_CLK_ARES] = { 0x44c, 2 },
+ [NSS_CC_XGMAC2_PTP_REF_CLK_ARES] = { 0x450, 2 },
+};
+
+static const struct regmap_config nss_cc_ipq5424_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x800,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_hws_data icc_ipq5424_nss_hws[] = {
+ { MASTER_NSSNOC_PPE, SLAVE_NSSNOC_PPE, NSS_CC_NSSNOC_PPE_CLK },
+ { MASTER_NSSNOC_PPE_CFG, SLAVE_NSSNOC_PPE_CFG, NSS_CC_NSSNOC_PPE_CFG_CLK },
+ { MASTER_NSSNOC_NSS_CSR, SLAVE_NSSNOC_NSS_CSR, NSS_CC_NSSNOC_NSS_CSR_CLK },
+ { MASTER_NSSNOC_CE_AXI, SLAVE_NSSNOC_CE_AXI, NSS_CC_NSSNOC_CE_AXI_CLK},
+ { MASTER_NSSNOC_CE_APB, SLAVE_NSSNOC_CE_APB, NSS_CC_NSSNOC_CE_APB_CLK},
+ { MASTER_NSSNOC_EIP, SLAVE_NSSNOC_EIP, NSS_CC_NSSNOC_EIP_CLK},
+};
+
+#define IPQ_NSSCC_ID (5424 * 2) /* some unique value */
+
+static const struct qcom_cc_desc nss_cc_ipq5424_desc = {
+ .config = &nss_cc_ipq5424_regmap_config,
+ .clks = nss_cc_ipq5424_clocks,
+ .num_clks = ARRAY_SIZE(nss_cc_ipq5424_clocks),
+ .resets = nss_cc_ipq5424_resets,
+ .num_resets = ARRAY_SIZE(nss_cc_ipq5424_resets),
+ .icc_hws = icc_ipq5424_nss_hws,
+ .num_icc_hws = ARRAY_SIZE(icc_ipq5424_nss_hws),
+ .icc_first_node_id = IPQ_NSSCC_ID,
+};
+
+static const struct dev_pm_ops nss_cc_ipq5424_pm_ops = {
+ SET_RUNTIME_PM_OPS(pm_clk_suspend, pm_clk_resume, NULL)
+};
+
+static const struct of_device_id nss_cc_ipq5424_match_table[] = {
+ { .compatible = "qcom,ipq5424-nsscc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, nss_cc_ipq5424_match_table);
+
+static int nss_cc_ipq5424_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = devm_pm_runtime_enable(&pdev->dev);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Fail to enable runtime PM\n");
+
+ ret = devm_pm_clk_create(&pdev->dev);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Fail to create PM clock\n");
+
+ ret = pm_clk_add(&pdev->dev, "bus");
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Fail to add bus clock\n");
+
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Fail to resume\n");
+
+ ret = qcom_cc_probe(pdev, &nss_cc_ipq5424_desc);
+ pm_runtime_put(&pdev->dev);
+
+ return ret;
+}
+
+static struct platform_driver nss_cc_ipq5424_driver = {
+ .probe = nss_cc_ipq5424_probe,
+ .driver = {
+ .name = "qcom,ipq5424-nsscc",
+ .of_match_table = nss_cc_ipq5424_match_table,
+ .pm = &nss_cc_ipq5424_pm_ops,
+ .sync_state = icc_sync_state,
+ },
+};
+module_platform_driver(nss_cc_ipq5424_driver);
+
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. NSSCC IPQ5424 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/nsscc-ipq9574.c b/drivers/clk/qcom/nsscc-ipq9574.c
index 64c6b05ff066..c8b11b04a7c2 100644
--- a/drivers/clk/qcom/nsscc-ipq9574.c
+++ b/drivers/clk/qcom/nsscc-ipq9574.c
@@ -3016,7 +3016,7 @@ static const struct qcom_reset_map nss_cc_ipq9574_resets[] = {
[NSSPORT4_RESET] = { .reg = 0x28a24, .bitmask = GENMASK(5, 4) },
[NSSPORT5_RESET] = { .reg = 0x28a24, .bitmask = GENMASK(3, 2) },
[NSSPORT6_RESET] = { .reg = 0x28a24, .bitmask = GENMASK(1, 0) },
- [EDMA_HW_RESET] = { .reg = 0x28a08, .bitmask = GENMASK(16, 15) },
+ [EDMA_HW_RESET] = { .reg = 0x28a08, .bitmask = GENMASK(16, 15) },
};
static const struct regmap_config nss_cc_ipq9574_regmap_config = {
diff --git a/drivers/clk/qcom/tcsrcc-glymur.c b/drivers/clk/qcom/tcsrcc-glymur.c
new file mode 100644
index 000000000000..215bc2ac548d
--- /dev/null
+++ b/drivers/clk/qcom/tcsrcc-glymur.c
@@ -0,0 +1,313 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025, Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,glymur-tcsr.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_BI_TCXO_PAD,
+};
+
+static struct clk_branch tcsr_edp_clkref_en = {
+ .halt_reg = 0x60,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x60,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "tcsr_edp_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_pcie_1_clkref_en = {
+ .halt_reg = 0x48,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x48,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "tcsr_pcie_1_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_pcie_2_clkref_en = {
+ .halt_reg = 0x4c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x4c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "tcsr_pcie_2_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_pcie_3_clkref_en = {
+ .halt_reg = 0x54,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x54,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "tcsr_pcie_3_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_pcie_4_clkref_en = {
+ .halt_reg = 0x58,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x58,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "tcsr_pcie_4_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_usb2_1_clkref_en = {
+ .halt_reg = 0x6c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x6c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "tcsr_usb2_1_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_usb2_2_clkref_en = {
+ .halt_reg = 0x70,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x70,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "tcsr_usb2_2_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_usb2_3_clkref_en = {
+ .halt_reg = 0x74,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x74,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "tcsr_usb2_3_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_usb2_4_clkref_en = {
+ .halt_reg = 0x88,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x88,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "tcsr_usb2_4_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_usb3_0_clkref_en = {
+ .halt_reg = 0x64,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x64,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "tcsr_usb3_0_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_usb3_1_clkref_en = {
+ .halt_reg = 0x68,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x68,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "tcsr_usb3_1_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_usb4_1_clkref_en = {
+ .halt_reg = 0x44,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x44,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "tcsr_usb4_1_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_usb4_2_clkref_en = {
+ .halt_reg = 0x5c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x5c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "tcsr_usb4_2_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_regmap *tcsr_cc_glymur_clocks[] = {
+ [TCSR_EDP_CLKREF_EN] = &tcsr_edp_clkref_en.clkr,
+ [TCSR_PCIE_1_CLKREF_EN] = &tcsr_pcie_1_clkref_en.clkr,
+ [TCSR_PCIE_2_CLKREF_EN] = &tcsr_pcie_2_clkref_en.clkr,
+ [TCSR_PCIE_3_CLKREF_EN] = &tcsr_pcie_3_clkref_en.clkr,
+ [TCSR_PCIE_4_CLKREF_EN] = &tcsr_pcie_4_clkref_en.clkr,
+ [TCSR_USB2_1_CLKREF_EN] = &tcsr_usb2_1_clkref_en.clkr,
+ [TCSR_USB2_2_CLKREF_EN] = &tcsr_usb2_2_clkref_en.clkr,
+ [TCSR_USB2_3_CLKREF_EN] = &tcsr_usb2_3_clkref_en.clkr,
+ [TCSR_USB2_4_CLKREF_EN] = &tcsr_usb2_4_clkref_en.clkr,
+ [TCSR_USB3_0_CLKREF_EN] = &tcsr_usb3_0_clkref_en.clkr,
+ [TCSR_USB3_1_CLKREF_EN] = &tcsr_usb3_1_clkref_en.clkr,
+ [TCSR_USB4_1_CLKREF_EN] = &tcsr_usb4_1_clkref_en.clkr,
+ [TCSR_USB4_2_CLKREF_EN] = &tcsr_usb4_2_clkref_en.clkr,
+};
+
+static const struct regmap_config tcsr_cc_glymur_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x94,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc tcsr_cc_glymur_desc = {
+ .config = &tcsr_cc_glymur_regmap_config,
+ .clks = tcsr_cc_glymur_clocks,
+ .num_clks = ARRAY_SIZE(tcsr_cc_glymur_clocks),
+};
+
+static const struct of_device_id tcsr_cc_glymur_match_table[] = {
+ { .compatible = "qcom,glymur-tcsr" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, tcsr_cc_glymur_match_table);
+
+static int tcsr_cc_glymur_probe(struct platform_device *pdev)
+{
+ return qcom_cc_probe(pdev, &tcsr_cc_glymur_desc);
+}
+
+static struct platform_driver tcsr_cc_glymur_driver = {
+ .probe = tcsr_cc_glymur_probe,
+ .driver = {
+ .name = "tcsrcc-glymur",
+ .of_match_table = tcsr_cc_glymur_match_table,
+ },
+};
+
+static int __init tcsr_cc_glymur_init(void)
+{
+ return platform_driver_register(&tcsr_cc_glymur_driver);
+}
+subsys_initcall(tcsr_cc_glymur_init);
+
+static void __exit tcsr_cc_glymur_exit(void)
+{
+ platform_driver_unregister(&tcsr_cc_glymur_driver);
+}
+module_exit(tcsr_cc_glymur_exit);
+
+MODULE_DESCRIPTION("QTI TCSRCC GLYMUR Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/tcsrcc-sm8650.c b/drivers/clk/qcom/tcsrcc-sm8650.c
index 11c7d6df48c7..3685dcde9a4b 100644
--- a/drivers/clk/qcom/tcsrcc-sm8650.c
+++ b/drivers/clk/qcom/tcsrcc-sm8650.c
@@ -148,6 +148,7 @@ static const struct qcom_cc_desc tcsr_cc_sm8650_desc = {
};
static const struct of_device_id tcsr_cc_sm8650_match_table[] = {
+ { .compatible = "qcom,milos-tcsr" },
{ .compatible = "qcom,sm8650-tcsr" },
{ }
};
@@ -155,6 +156,13 @@ MODULE_DEVICE_TABLE(of, tcsr_cc_sm8650_match_table);
static int tcsr_cc_sm8650_probe(struct platform_device *pdev)
{
+ if (of_device_is_compatible(pdev->dev.of_node, "qcom,milos-tcsr")) {
+ tcsr_ufs_clkref_en.halt_reg = 0x31118;
+ tcsr_ufs_clkref_en.clkr.enable_reg = 0x31118;
+ tcsr_cc_sm8650_clocks[TCSR_USB2_CLKREF_EN] = NULL;
+ tcsr_cc_sm8650_clocks[TCSR_USB3_CLKREF_EN] = NULL;
+ }
+
return qcom_cc_probe(pdev, &tcsr_cc_sm8650_desc);
}
diff --git a/drivers/clk/qcom/tcsrcc-x1e80100.c b/drivers/clk/qcom/tcsrcc-x1e80100.c
index ff61769a0807..a367e1f55622 100644
--- a/drivers/clk/qcom/tcsrcc-x1e80100.c
+++ b/drivers/clk/qcom/tcsrcc-x1e80100.c
@@ -29,6 +29,10 @@ static struct clk_branch tcsr_edp_clkref_en = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "tcsr_edp_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
diff --git a/drivers/clk/qcom/videocc-milos.c b/drivers/clk/qcom/videocc-milos.c
new file mode 100644
index 000000000000..acc9df295d4f
--- /dev/null
+++ b/drivers/clk/qcom/videocc-milos.c
@@ -0,0 +1,403 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2025, Luca Weiss <luca.weiss@fairphone.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,milos-videocc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+/* Need to match the order of clocks in DT binding */
+enum {
+ DT_BI_TCXO,
+ DT_BI_TCXO_AO,
+ DT_SLEEP_CLK,
+ DT_IFACE,
+};
+
+enum {
+ P_BI_TCXO,
+ P_SLEEP_CLK,
+ P_VIDEO_CC_PLL0_OUT_MAIN,
+};
+
+static const struct pll_vco lucid_ole_vco[] = {
+ { 249600000, 2300000000, 0 },
+};
+
+/* 604.8 MHz Configuration */
+static const struct alpha_pll_config video_cc_pll0_config = {
+ .l = 0x1f,
+ .alpha = 0x8000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000005,
+};
+
+static struct clk_alpha_pll video_cc_pll0 = {
+ .offset = 0x0,
+ .config = &video_cc_pll0_config,
+ .vco_table = lucid_ole_vco,
+ .num_vco = ARRAY_SIZE(lucid_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct parent_map video_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data video_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+};
+
+static const struct clk_parent_data video_cc_parent_data_0_ao[] = {
+ { .index = DT_BI_TCXO_AO },
+};
+
+static const struct parent_map video_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_VIDEO_CC_PLL0_OUT_MAIN, 1 },
+};
+
+static const struct clk_parent_data video_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &video_cc_pll0.clkr.hw },
+};
+
+static const struct parent_map video_cc_parent_map_2[] = {
+ { P_SLEEP_CLK, 0 },
+};
+
+static const struct clk_parent_data video_cc_parent_data_2_ao[] = {
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct freq_tbl ftbl_video_cc_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 video_cc_ahb_clk_src = {
+ .cmd_rcgr = 0x8030,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = video_cc_parent_map_0,
+ .freq_tbl = ftbl_video_cc_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_ahb_clk_src",
+ .parent_data = video_cc_parent_data_0_ao,
+ .num_parents = ARRAY_SIZE(video_cc_parent_data_0_ao),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_video_cc_mvs0_clk_src[] = {
+ F(604800000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0),
+ F(720000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0),
+ F(1014000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0),
+ F(1098000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0),
+ F(1332000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0),
+ F(1656000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 video_cc_mvs0_clk_src = {
+ .cmd_rcgr = 0x8000,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = video_cc_parent_map_1,
+ .freq_tbl = ftbl_video_cc_mvs0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs0_clk_src",
+ .parent_data = video_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(video_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_video_cc_sleep_clk_src[] = {
+ F(32000, P_SLEEP_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 video_cc_sleep_clk_src = {
+ .cmd_rcgr = 0x8128,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = video_cc_parent_map_2,
+ .freq_tbl = ftbl_video_cc_sleep_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_sleep_clk_src",
+ .parent_data = video_cc_parent_data_2_ao,
+ .num_parents = ARRAY_SIZE(video_cc_parent_data_2_ao),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 video_cc_xo_clk_src = {
+ .cmd_rcgr = 0x810c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = video_cc_parent_map_0,
+ .freq_tbl = ftbl_video_cc_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_xo_clk_src",
+ .parent_data = video_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(video_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_regmap_div video_cc_mvs0_div_clk_src = {
+ .reg = 0x80c4,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_mvs0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div video_cc_mvs0c_div2_div_clk_src = {
+ .reg = 0x8070,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs0c_div2_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_mvs0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch video_cc_mvs0_clk = {
+ .halt_reg = 0x80b8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x80b8,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x80b8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_mvs0_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_mvs0_shift_clk = {
+ .halt_reg = 0x8144,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x8144,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x8144,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs0_shift_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_mvs0c_clk = {
+ .halt_reg = 0x8064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs0c_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_mvs0c_div2_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_mvs0c_shift_clk = {
+ .halt_reg = 0x8148,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x8148,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x8148,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs0c_shift_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc video_cc_mvs0c_gdsc = {
+ .gdscr = 0x804c,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x6,
+ .pd = {
+ .name = "video_cc_mvs0c_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc video_cc_mvs0_gdsc = {
+ .gdscr = 0x80a4,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x6,
+ .pd = {
+ .name = "video_cc_mvs0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .parent = &video_cc_mvs0c_gdsc.pd,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | HW_CTRL_TRIGGER,
+};
+
+static struct clk_regmap *video_cc_milos_clocks[] = {
+ [VIDEO_CC_AHB_CLK_SRC] = &video_cc_ahb_clk_src.clkr,
+ [VIDEO_CC_MVS0_CLK] = &video_cc_mvs0_clk.clkr,
+ [VIDEO_CC_MVS0_CLK_SRC] = &video_cc_mvs0_clk_src.clkr,
+ [VIDEO_CC_MVS0_DIV_CLK_SRC] = &video_cc_mvs0_div_clk_src.clkr,
+ [VIDEO_CC_MVS0_SHIFT_CLK] = &video_cc_mvs0_shift_clk.clkr,
+ [VIDEO_CC_MVS0C_CLK] = &video_cc_mvs0c_clk.clkr,
+ [VIDEO_CC_MVS0C_DIV2_DIV_CLK_SRC] = &video_cc_mvs0c_div2_div_clk_src.clkr,
+ [VIDEO_CC_MVS0C_SHIFT_CLK] = &video_cc_mvs0c_shift_clk.clkr,
+ [VIDEO_CC_PLL0] = &video_cc_pll0.clkr,
+ [VIDEO_CC_SLEEP_CLK_SRC] = &video_cc_sleep_clk_src.clkr,
+ [VIDEO_CC_XO_CLK_SRC] = &video_cc_xo_clk_src.clkr,
+};
+
+static struct gdsc *video_cc_milos_gdscs[] = {
+ [VIDEO_CC_MVS0C_GDSC] = &video_cc_mvs0c_gdsc,
+ [VIDEO_CC_MVS0_GDSC] = &video_cc_mvs0_gdsc,
+};
+
+static const struct qcom_reset_map video_cc_milos_resets[] = {
+ [VIDEO_CC_INTERFACE_BCR] = { 0x80f0 },
+ [VIDEO_CC_MVS0_BCR] = { 0x80a0 },
+ [VIDEO_CC_MVS0C_CLK_ARES] = { 0x8064, 2 },
+ [VIDEO_CC_MVS0C_BCR] = { 0x8048 },
+};
+
+static struct clk_alpha_pll *video_cc_milos_plls[] = {
+ &video_cc_pll0,
+};
+
+static u32 video_cc_milos_critical_cbcrs[] = {
+ 0x80f4, /* VIDEO_CC_AHB_CLK */
+ 0x8140, /* VIDEO_CC_SLEEP_CLK */
+ 0x8124, /* VIDEO_CC_XO_CLK */
+};
+
+static const struct regmap_config video_cc_milos_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x9f50,
+ .fast_io = true,
+};
+
+static struct qcom_cc_driver_data video_cc_milos_driver_data = {
+ .alpha_plls = video_cc_milos_plls,
+ .num_alpha_plls = ARRAY_SIZE(video_cc_milos_plls),
+ .clk_cbcrs = video_cc_milos_critical_cbcrs,
+ .num_clk_cbcrs = ARRAY_SIZE(video_cc_milos_critical_cbcrs),
+};
+
+static const struct qcom_cc_desc video_cc_milos_desc = {
+ .config = &video_cc_milos_regmap_config,
+ .clks = video_cc_milos_clocks,
+ .num_clks = ARRAY_SIZE(video_cc_milos_clocks),
+ .resets = video_cc_milos_resets,
+ .num_resets = ARRAY_SIZE(video_cc_milos_resets),
+ .gdscs = video_cc_milos_gdscs,
+ .num_gdscs = ARRAY_SIZE(video_cc_milos_gdscs),
+ .use_rpm = true,
+ .driver_data = &video_cc_milos_driver_data,
+};
+
+static const struct of_device_id video_cc_milos_match_table[] = {
+ { .compatible = "qcom,milos-videocc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, video_cc_milos_match_table);
+
+static int video_cc_milos_probe(struct platform_device *pdev)
+{
+ return qcom_cc_probe(pdev, &video_cc_milos_desc);
+}
+
+static struct platform_driver video_cc_milos_driver = {
+ .probe = video_cc_milos_probe,
+ .driver = {
+ .name = "video_cc-milos",
+ .of_match_table = video_cc_milos_match_table,
+ },
+};
+
+module_platform_driver(video_cc_milos_driver);
+
+MODULE_DESCRIPTION("QTI VIDEO_CC Milos Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/videocc-qcs615.c b/drivers/clk/qcom/videocc-qcs615.c
new file mode 100644
index 000000000000..1b41fa44c17e
--- /dev/null
+++ b/drivers/clk/qcom/videocc-qcs615.c
@@ -0,0 +1,338 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,qcs615-videocc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_BI_TCXO,
+ DT_SLEEP_CLK,
+};
+
+enum {
+ P_BI_TCXO,
+ P_SLEEP_CLK,
+ P_VIDEO_PLL0_OUT_AUX,
+ P_VIDEO_PLL0_OUT_AUX2,
+ P_VIDEO_PLL0_OUT_MAIN,
+};
+
+static const struct pll_vco video_cc_pll0_vco[] = {
+ { 500000000, 1000000000, 2 },
+};
+
+/* 600MHz configuration VCO - 2 */
+static struct alpha_pll_config video_pll0_config = {
+ .l = 0x1f,
+ .alpha_hi = 0x40,
+ .alpha = 0x00,
+ .alpha_en_mask = BIT(24),
+ .vco_val = BIT(21),
+ .vco_mask = GENMASK(21, 20),
+ .main_output_mask = BIT(0),
+ .config_ctl_val = 0x4001055b,
+ .test_ctl_hi_val = 0x1,
+ .test_ctl_hi_mask = 0x1,
+};
+
+static struct clk_alpha_pll video_pll0 = {
+ .offset = 0x42c,
+ .config = &video_pll0_config,
+ .vco_table = video_cc_pll0_vco,
+ .num_vco = ARRAY_SIZE(video_cc_pll0_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_slew_ops,
+ },
+ },
+};
+
+static const struct parent_map video_cc_parent_map_0[] = {
+ { P_SLEEP_CLK, 0 },
+};
+
+static const struct clk_parent_data video_cc_parent_data_0_ao[] = {
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct parent_map video_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_VIDEO_PLL0_OUT_MAIN, 1 },
+ { P_VIDEO_PLL0_OUT_AUX, 2 },
+ { P_VIDEO_PLL0_OUT_AUX2, 3 },
+};
+
+static const struct clk_parent_data video_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &video_pll0.clkr.hw },
+ { .hw = &video_pll0.clkr.hw },
+ { .hw = &video_pll0.clkr.hw },
+};
+
+static const struct freq_tbl ftbl_video_cc_sleep_clk_src[] = {
+ F(32000, P_SLEEP_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 video_cc_sleep_clk_src = {
+ .cmd_rcgr = 0xaf8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = video_cc_parent_map_0,
+ .freq_tbl = ftbl_video_cc_sleep_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_sleep_clk_src",
+ .parent_data = video_cc_parent_data_0_ao,
+ .num_parents = ARRAY_SIZE(video_cc_parent_data_0_ao),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_video_cc_venus_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(133333333, P_VIDEO_PLL0_OUT_MAIN, 4.5, 0, 0),
+ F(240000000, P_VIDEO_PLL0_OUT_MAIN, 2.5, 0, 0),
+ F(300000000, P_VIDEO_PLL0_OUT_MAIN, 2, 0, 0),
+ F(380000000, P_VIDEO_PLL0_OUT_MAIN, 2, 0, 0),
+ F(410000000, P_VIDEO_PLL0_OUT_MAIN, 2, 0, 0),
+ F(460000000, P_VIDEO_PLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 video_cc_venus_clk_src = {
+ .cmd_rcgr = 0x7f0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = video_cc_parent_map_1,
+ .freq_tbl = ftbl_video_cc_venus_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_venus_clk_src",
+ .parent_data = video_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(video_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_branch video_cc_sleep_clk = {
+ .halt_reg = 0xb18,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb18,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "video_cc_sleep_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &video_cc_sleep_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_vcodec0_axi_clk = {
+ .halt_reg = 0x8f0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8f0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_vcodec0_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_vcodec0_core_clk = {
+ .halt_reg = 0x890,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x890,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_vcodec0_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_venus_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_venus_ahb_clk = {
+ .halt_reg = 0x9b0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9b0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_venus_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_venus_ctl_axi_clk = {
+ .halt_reg = 0x8d0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8d0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_venus_ctl_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_venus_ctl_core_clk = {
+ .halt_reg = 0x850,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x850,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_venus_ctl_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_venus_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc vcodec0_gdsc = {
+ .gdscr = 0x874,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x6,
+ .pd = {
+ .name = "vcodec0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = HW_CTRL_TRIGGER | POLL_CFG_GDSCR,
+};
+
+static struct gdsc venus_gdsc = {
+ .gdscr = 0x814,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x6,
+ .pd = {
+ .name = "venus_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR,
+};
+
+static struct clk_regmap *video_cc_qcs615_clocks[] = {
+ [VIDEO_CC_SLEEP_CLK] = &video_cc_sleep_clk.clkr,
+ [VIDEO_CC_SLEEP_CLK_SRC] = &video_cc_sleep_clk_src.clkr,
+ [VIDEO_CC_VCODEC0_AXI_CLK] = &video_cc_vcodec0_axi_clk.clkr,
+ [VIDEO_CC_VCODEC0_CORE_CLK] = &video_cc_vcodec0_core_clk.clkr,
+ [VIDEO_CC_VENUS_AHB_CLK] = &video_cc_venus_ahb_clk.clkr,
+ [VIDEO_CC_VENUS_CLK_SRC] = &video_cc_venus_clk_src.clkr,
+ [VIDEO_CC_VENUS_CTL_AXI_CLK] = &video_cc_venus_ctl_axi_clk.clkr,
+ [VIDEO_CC_VENUS_CTL_CORE_CLK] = &video_cc_venus_ctl_core_clk.clkr,
+ [VIDEO_PLL0] = &video_pll0.clkr,
+};
+
+static struct gdsc *video_cc_qcs615_gdscs[] = {
+ [VCODEC0_GDSC] = &vcodec0_gdsc,
+ [VENUS_GDSC] = &venus_gdsc,
+};
+
+static const struct qcom_reset_map video_cc_qcs615_resets[] = {
+ [VIDEO_CC_INTERFACE_BCR] = { 0x8b0 },
+ [VIDEO_CC_VCODEC0_BCR] = { 0x870 },
+ [VIDEO_CC_VENUS_BCR] = { 0x810 },
+};
+
+static struct clk_alpha_pll *video_cc_qcs615_plls[] = {
+ &video_pll0,
+};
+
+static u32 video_cc_qcs615_critical_cbcrs[] = {
+ 0xab8, /* VIDEO_CC_XO_CLK */
+};
+
+static const struct regmap_config video_cc_qcs615_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xb94,
+ .fast_io = true,
+};
+
+static struct qcom_cc_driver_data video_cc_qcs615_driver_data = {
+ .alpha_plls = video_cc_qcs615_plls,
+ .num_alpha_plls = ARRAY_SIZE(video_cc_qcs615_plls),
+ .clk_cbcrs = video_cc_qcs615_critical_cbcrs,
+ .num_clk_cbcrs = ARRAY_SIZE(video_cc_qcs615_critical_cbcrs),
+};
+
+static const struct qcom_cc_desc video_cc_qcs615_desc = {
+ .config = &video_cc_qcs615_regmap_config,
+ .clks = video_cc_qcs615_clocks,
+ .num_clks = ARRAY_SIZE(video_cc_qcs615_clocks),
+ .resets = video_cc_qcs615_resets,
+ .num_resets = ARRAY_SIZE(video_cc_qcs615_resets),
+ .gdscs = video_cc_qcs615_gdscs,
+ .num_gdscs = ARRAY_SIZE(video_cc_qcs615_gdscs),
+ .driver_data = &video_cc_qcs615_driver_data,
+};
+
+static const struct of_device_id video_cc_qcs615_match_table[] = {
+ { .compatible = "qcom,qcs615-videocc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, video_cc_qcs615_match_table);
+
+static int video_cc_qcs615_probe(struct platform_device *pdev)
+{
+ return qcom_cc_probe(pdev, &video_cc_qcs615_desc);
+}
+
+static struct platform_driver video_cc_qcs615_driver = {
+ .probe = video_cc_qcs615_probe,
+ .driver = {
+ .name = "videocc-qcs615",
+ .of_match_table = video_cc_qcs615_match_table,
+ },
+};
+
+module_platform_driver(video_cc_qcs615_driver);
+
+MODULE_DESCRIPTION("QTI VIDEOCC QCS615 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/videocc-sc7180.c b/drivers/clk/qcom/videocc-sc7180.c
index d7f845480396..dd2441d6aa83 100644
--- a/drivers/clk/qcom/videocc-sc7180.c
+++ b/drivers/clk/qcom/videocc-sc7180.c
@@ -166,7 +166,7 @@ static struct gdsc vcodec0_gdsc = {
.pd = {
.name = "vcodec0_gdsc",
},
- .flags = HW_CTRL,
+ .flags = HW_CTRL_TRIGGER,
.pwrsts = PWRSTS_OFF_ON,
};
diff --git a/drivers/clk/qcom/videocc-sdm845.c b/drivers/clk/qcom/videocc-sdm845.c
index f77a07779477..6dedc80a8b3e 100644
--- a/drivers/clk/qcom/videocc-sdm845.c
+++ b/drivers/clk/qcom/videocc-sdm845.c
@@ -260,7 +260,7 @@ static struct gdsc vcodec0_gdsc = {
},
.cxcs = (unsigned int []){ 0x890, 0x930 },
.cxc_count = 2,
- .flags = HW_CTRL | POLL_CFG_GDSCR,
+ .flags = HW_CTRL_TRIGGER | POLL_CFG_GDSCR,
.pwrsts = PWRSTS_OFF_ON,
};
@@ -271,7 +271,7 @@ static struct gdsc vcodec1_gdsc = {
},
.cxcs = (unsigned int []){ 0x8d0, 0x950 },
.cxc_count = 2,
- .flags = HW_CTRL | POLL_CFG_GDSCR,
+ .flags = HW_CTRL_TRIGGER | POLL_CFG_GDSCR,
.pwrsts = PWRSTS_OFF_ON,
};
diff --git a/drivers/clk/qcom/videocc-sm6350.c b/drivers/clk/qcom/videocc-sm6350.c
new file mode 100644
index 000000000000..34bdc5aa865a
--- /dev/null
+++ b/drivers/clk/qcom/videocc-sm6350.c
@@ -0,0 +1,355 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021, Konrad Dybcio <konrad.dybcio@somainline.org>
+ * Copyright (c) 2025, Luca Weiss <luca.weiss@fairphone.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,sm6350-videocc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "common.h"
+#include "gdsc.h"
+
+enum {
+ DT_IFACE,
+ DT_BI_TCXO,
+ DT_SLEEP_CLK,
+};
+
+enum {
+ P_BI_TCXO,
+ P_CHIP_SLEEP_CLK,
+ P_VIDEO_PLL0_OUT_EVEN,
+};
+
+static const struct pll_vco fabia_vco[] = {
+ { 125000000, 1000000000, 1 },
+};
+
+/* 600 MHz */
+static const struct alpha_pll_config video_pll0_config = {
+ .l = 0x1f,
+ .alpha = 0x4000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002067,
+ .test_ctl_val = 0x40000000,
+ .test_ctl_hi_val = 0x00000002,
+ .user_ctl_val = 0x00000101,
+ .user_ctl_hi_val = 0x00004005,
+};
+
+static struct clk_alpha_pll video_pll0 = {
+ .offset = 0x0,
+ .vco_table = fabia_vco,
+ .num_vco = ARRAY_SIZE(fabia_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fabia_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_video_pll0_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv video_pll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_video_pll0_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_video_pll0_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_pll0_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_pll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+ },
+};
+
+static const struct parent_map video_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_VIDEO_PLL0_OUT_EVEN, 3 },
+};
+
+static const struct clk_parent_data video_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &video_pll0_out_even.clkr.hw },
+};
+
+static const struct parent_map video_cc_parent_map_1[] = {
+ { P_CHIP_SLEEP_CLK, 0 },
+};
+
+static const struct clk_parent_data video_cc_parent_data_1[] = {
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct freq_tbl ftbl_video_cc_iris_clk_src[] = {
+ F(133250000, P_VIDEO_PLL0_OUT_EVEN, 2, 0, 0),
+ F(240000000, P_VIDEO_PLL0_OUT_EVEN, 1.5, 0, 0),
+ F(300000000, P_VIDEO_PLL0_OUT_EVEN, 1, 0, 0),
+ F(380000000, P_VIDEO_PLL0_OUT_EVEN, 1, 0, 0),
+ F(460000000, P_VIDEO_PLL0_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 video_cc_iris_clk_src = {
+ .cmd_rcgr = 0x1000,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = video_cc_parent_map_0,
+ .freq_tbl = ftbl_video_cc_iris_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_iris_clk_src",
+ .parent_data = video_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(video_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_video_cc_sleep_clk_src[] = {
+ F(32764, P_CHIP_SLEEP_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 video_cc_sleep_clk_src = {
+ .cmd_rcgr = 0x701c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = video_cc_parent_map_1,
+ .freq_tbl = ftbl_video_cc_sleep_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_sleep_clk_src",
+ .parent_data = video_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(video_cc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch video_cc_iris_ahb_clk = {
+ .halt_reg = 0x5004,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x5004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_iris_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_iris_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_mvs0_axi_clk = {
+ .halt_reg = 0x800c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x800c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs0_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_mvs0_core_clk = {
+ .halt_reg = 0x3010,
+ .halt_check = BRANCH_VOTED,
+ .hwcg_reg = 0x3010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs0_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_iris_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_mvsc_core_clk = {
+ .halt_reg = 0x2014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvsc_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_iris_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_mvsc_ctl_axi_clk = {
+ .halt_reg = 0x8004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvsc_ctl_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_sleep_clk = {
+ .halt_reg = 0x7034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_sleep_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_sleep_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_venus_ahb_clk = {
+ .halt_reg = 0x801c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x801c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_venus_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc mvsc_gdsc = {
+ .gdscr = 0x2004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x6,
+ .pd = {
+ .name = "mvsc_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc mvs0_gdsc = {
+ .gdscr = 0x3004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x6,
+ .pd = {
+ .name = "mvs0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = HW_CTRL_TRIGGER,
+};
+
+static struct gdsc *video_cc_sm6350_gdscs[] = {
+ [MVSC_GDSC] = &mvsc_gdsc,
+ [MVS0_GDSC] = &mvs0_gdsc,
+};
+
+static struct clk_regmap *video_cc_sm6350_clocks[] = {
+ [VIDEO_CC_IRIS_AHB_CLK] = &video_cc_iris_ahb_clk.clkr,
+ [VIDEO_CC_IRIS_CLK_SRC] = &video_cc_iris_clk_src.clkr,
+ [VIDEO_CC_MVS0_AXI_CLK] = &video_cc_mvs0_axi_clk.clkr,
+ [VIDEO_CC_MVS0_CORE_CLK] = &video_cc_mvs0_core_clk.clkr,
+ [VIDEO_CC_MVSC_CORE_CLK] = &video_cc_mvsc_core_clk.clkr,
+ [VIDEO_CC_MVSC_CTL_AXI_CLK] = &video_cc_mvsc_ctl_axi_clk.clkr,
+ [VIDEO_CC_SLEEP_CLK] = &video_cc_sleep_clk.clkr,
+ [VIDEO_CC_SLEEP_CLK_SRC] = &video_cc_sleep_clk_src.clkr,
+ [VIDEO_CC_VENUS_AHB_CLK] = &video_cc_venus_ahb_clk.clkr,
+ [VIDEO_PLL0] = &video_pll0.clkr,
+ [VIDEO_PLL0_OUT_EVEN] = &video_pll0_out_even.clkr,
+};
+
+static const struct regmap_config video_cc_sm6350_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xb000,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc video_cc_sm6350_desc = {
+ .config = &video_cc_sm6350_regmap_config,
+ .clks = video_cc_sm6350_clocks,
+ .num_clks = ARRAY_SIZE(video_cc_sm6350_clocks),
+ .gdscs = video_cc_sm6350_gdscs,
+ .num_gdscs = ARRAY_SIZE(video_cc_sm6350_gdscs),
+};
+
+static const struct of_device_id video_cc_sm6350_match_table[] = {
+ { .compatible = "qcom,sm6350-videocc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, video_cc_sm6350_match_table);
+
+static int video_cc_sm6350_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+
+ regmap = qcom_cc_map(pdev, &video_cc_sm6350_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ clk_fabia_pll_configure(&video_pll0, regmap, &video_pll0_config);
+
+ /* Keep some clocks always-on */
+ qcom_branch_set_clk_en(regmap, 0x7018); /* VIDEO_CC_XO_CLK */
+
+ return qcom_cc_really_probe(&pdev->dev, &video_cc_sm6350_desc, regmap);
+}
+
+static struct platform_driver video_cc_sm6350_driver = {
+ .probe = video_cc_sm6350_probe,
+ .driver = {
+ .name = "video_cc-sm6350",
+ .of_match_table = video_cc_sm6350_match_table,
+ },
+};
+
+module_platform_driver(video_cc_sm6350_driver);
+
+MODULE_DESCRIPTION("QTI VIDEO_CC SM6350 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/videocc-sm7150.c b/drivers/clk/qcom/videocc-sm7150.c
index 14ef7f561753..b6912560ef9b 100644
--- a/drivers/clk/qcom/videocc-sm7150.c
+++ b/drivers/clk/qcom/videocc-sm7150.c
@@ -271,7 +271,7 @@ static struct gdsc vcodec0_gdsc = {
},
.cxcs = (unsigned int []){ 0x890, 0x9ec },
.cxc_count = 2,
- .flags = HW_CTRL | POLL_CFG_GDSCR,
+ .flags = HW_CTRL_TRIGGER | POLL_CFG_GDSCR,
.pwrsts = PWRSTS_OFF_ON,
};
@@ -282,7 +282,7 @@ static struct gdsc vcodec1_gdsc = {
},
.cxcs = (unsigned int []){ 0x8d0, 0xa0c },
.cxc_count = 2,
- .flags = HW_CTRL | POLL_CFG_GDSCR,
+ .flags = HW_CTRL_TRIGGER | POLL_CFG_GDSCR,
.pwrsts = PWRSTS_OFF_ON,
};
diff --git a/drivers/clk/qcom/videocc-sm8150.c b/drivers/clk/qcom/videocc-sm8150.c
index daab3237eec1..3024f6fc89c8 100644
--- a/drivers/clk/qcom/videocc-sm8150.c
+++ b/drivers/clk/qcom/videocc-sm8150.c
@@ -179,7 +179,7 @@ static struct gdsc vcodec0_gdsc = {
.pd = {
.name = "vcodec0_gdsc",
},
- .flags = HW_CTRL,
+ .flags = HW_CTRL_TRIGGER,
.pwrsts = PWRSTS_OFF_ON,
};
@@ -188,7 +188,7 @@ static struct gdsc vcodec1_gdsc = {
.pd = {
.name = "vcodec1_gdsc",
},
- .flags = HW_CTRL,
+ .flags = HW_CTRL_TRIGGER,
.pwrsts = PWRSTS_OFF_ON,
};
static struct clk_regmap *video_cc_sm8150_clocks[] = {
diff --git a/drivers/clk/qcom/videocc-sm8450.c b/drivers/clk/qcom/videocc-sm8450.c
index 2e11dcffb664..dc168ce199cc 100644
--- a/drivers/clk/qcom/videocc-sm8450.c
+++ b/drivers/clk/qcom/videocc-sm8450.c
@@ -7,7 +7,6 @@
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <dt-bindings/clock/qcom,sm8450-videocc.h>
@@ -63,6 +62,7 @@ static const struct alpha_pll_config sm8475_video_cc_pll0_config = {
static struct clk_alpha_pll video_cc_pll0 = {
.offset = 0x0,
+ .config = &video_cc_pll0_config,
.vco_table = lucid_evo_vco,
.num_vco = ARRAY_SIZE(lucid_evo_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
@@ -106,6 +106,7 @@ static const struct alpha_pll_config sm8475_video_cc_pll1_config = {
static struct clk_alpha_pll video_cc_pll1 = {
.offset = 0x1000,
+ .config = &video_cc_pll1_config,
.vco_table = lucid_evo_vco,
.num_vco = ARRAY_SIZE(lucid_evo_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
@@ -347,7 +348,7 @@ static struct gdsc video_cc_mvs0_gdsc = {
},
.pwrsts = PWRSTS_OFF_ON,
.parent = &video_cc_mvs0c_gdsc.pd,
- .flags = RETAIN_FF_ENABLE | HW_CTRL,
+ .flags = HW_CTRL_TRIGGER | RETAIN_FF_ENABLE,
};
static struct gdsc video_cc_mvs1c_gdsc = {
@@ -372,7 +373,7 @@ static struct gdsc video_cc_mvs1_gdsc = {
},
.pwrsts = PWRSTS_OFF_ON,
.parent = &video_cc_mvs1c_gdsc.pd,
- .flags = RETAIN_FF_ENABLE | HW_CTRL,
+ .flags = HW_CTRL_TRIGGER | RETAIN_FF_ENABLE,
};
static struct clk_regmap *video_cc_sm8450_clocks[] = {
@@ -407,6 +408,17 @@ static const struct qcom_reset_map video_cc_sm8450_resets[] = {
[VIDEO_CC_MVS1C_CLK_ARES] = { .reg = 0x808c, .bit = 2, .udelay = 1000 },
};
+static struct clk_alpha_pll *video_cc_sm8450_plls[] = {
+ &video_cc_pll0,
+ &video_cc_pll1,
+};
+
+static u32 video_cc_sm8450_critical_cbcrs[] = {
+ 0x80e4, /* VIDEO_CC_AHB_CLK */
+ 0x8114, /* VIDEO_CC_XO_CLK */
+ 0x8130, /* VIDEO_CC_SLEEP_CLK */
+};
+
static const struct regmap_config video_cc_sm8450_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
@@ -415,6 +427,13 @@ static const struct regmap_config video_cc_sm8450_regmap_config = {
.fast_io = true,
};
+static struct qcom_cc_driver_data video_cc_sm8450_driver_data = {
+ .alpha_plls = video_cc_sm8450_plls,
+ .num_alpha_plls = ARRAY_SIZE(video_cc_sm8450_plls),
+ .clk_cbcrs = video_cc_sm8450_critical_cbcrs,
+ .num_clk_cbcrs = ARRAY_SIZE(video_cc_sm8450_critical_cbcrs),
+};
+
static const struct qcom_cc_desc video_cc_sm8450_desc = {
.config = &video_cc_sm8450_regmap_config,
.clks = video_cc_sm8450_clocks,
@@ -423,6 +442,8 @@ static const struct qcom_cc_desc video_cc_sm8450_desc = {
.num_resets = ARRAY_SIZE(video_cc_sm8450_resets),
.gdscs = video_cc_sm8450_gdscs,
.num_gdscs = ARRAY_SIZE(video_cc_sm8450_gdscs),
+ .use_rpm = true,
+ .driver_data = &video_cc_sm8450_driver_data,
};
static const struct of_device_id video_cc_sm8450_match_table[] = {
@@ -434,23 +455,6 @@ MODULE_DEVICE_TABLE(of, video_cc_sm8450_match_table);
static int video_cc_sm8450_probe(struct platform_device *pdev)
{
- struct regmap *regmap;
- int ret;
-
- ret = devm_pm_runtime_enable(&pdev->dev);
- if (ret)
- return ret;
-
- ret = pm_runtime_resume_and_get(&pdev->dev);
- if (ret)
- return ret;
-
- regmap = qcom_cc_map(pdev, &video_cc_sm8450_desc);
- if (IS_ERR(regmap)) {
- pm_runtime_put(&pdev->dev);
- return PTR_ERR(regmap);
- }
-
if (of_device_is_compatible(pdev->dev.of_node, "qcom,sm8475-videocc")) {
/* Update VideoCC PLL0 */
video_cc_pll0.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE];
@@ -458,23 +462,11 @@ static int video_cc_sm8450_probe(struct platform_device *pdev)
/* Update VideoCC PLL1 */
video_cc_pll1.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE];
- clk_lucid_ole_pll_configure(&video_cc_pll0, regmap, &sm8475_video_cc_pll0_config);
- clk_lucid_ole_pll_configure(&video_cc_pll1, regmap, &sm8475_video_cc_pll1_config);
- } else {
- clk_lucid_evo_pll_configure(&video_cc_pll0, regmap, &video_cc_pll0_config);
- clk_lucid_evo_pll_configure(&video_cc_pll1, regmap, &video_cc_pll1_config);
+ video_cc_pll0.config = &sm8475_video_cc_pll0_config;
+ video_cc_pll1.config = &sm8475_video_cc_pll1_config;
}
- /* Keep some clocks always-on */
- qcom_branch_set_clk_en(regmap, 0x80e4); /* VIDEO_CC_AHB_CLK */
- qcom_branch_set_clk_en(regmap, 0x8130); /* VIDEO_CC_SLEEP_CLK */
- qcom_branch_set_clk_en(regmap, 0x8114); /* VIDEO_CC_XO_CLK */
-
- ret = qcom_cc_really_probe(&pdev->dev, &video_cc_sm8450_desc, regmap);
-
- pm_runtime_put(&pdev->dev);
-
- return ret;
+ return qcom_cc_probe(pdev, &video_cc_sm8450_desc);
}
static struct platform_driver video_cc_sm8450_driver = {
diff --git a/drivers/clk/qcom/videocc-sm8550.c b/drivers/clk/qcom/videocc-sm8550.c
index fcfe0cade6d0..32a6505abe26 100644
--- a/drivers/clk/qcom/videocc-sm8550.c
+++ b/drivers/clk/qcom/videocc-sm8550.c
@@ -7,7 +7,6 @@
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <dt-bindings/clock/qcom,sm8650-videocc.h>
@@ -51,6 +50,7 @@ static struct alpha_pll_config video_cc_pll0_config = {
static struct clk_alpha_pll video_cc_pll0 = {
.offset = 0x0,
+ .config = &video_cc_pll0_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -82,6 +82,7 @@ static struct alpha_pll_config video_cc_pll1_config = {
static struct clk_alpha_pll video_cc_pll1 = {
.offset = 0x1000,
+ .config = &video_cc_pll1_config,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
@@ -144,6 +145,16 @@ static const struct freq_tbl ftbl_video_cc_mvs0_clk_src_sm8650[] = {
{ }
};
+static const struct freq_tbl ftbl_video_cc_mvs0_clk_src_x1e80100[] = {
+ F(576000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0),
+ F(720000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0),
+ F(1014000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0),
+ F(1098000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0),
+ F(1332000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0),
+ F(1443000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
static struct clk_rcg2 video_cc_mvs0_clk_src = {
.cmd_rcgr = 0x8000,
.mnd_width = 0,
@@ -176,6 +187,15 @@ static const struct freq_tbl ftbl_video_cc_mvs1_clk_src_sm8650[] = {
{ }
};
+static const struct freq_tbl ftbl_video_cc_mvs1_clk_src_x1e80100[] = {
+ F(840000000, P_VIDEO_CC_PLL1_OUT_MAIN, 1, 0, 0),
+ F(1050000000, P_VIDEO_CC_PLL1_OUT_MAIN, 1, 0, 0),
+ F(1350000000, P_VIDEO_CC_PLL1_OUT_MAIN, 1, 0, 0),
+ F(1500000000, P_VIDEO_CC_PLL1_OUT_MAIN, 1, 0, 0),
+ F(1650000000, P_VIDEO_CC_PLL1_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
static struct clk_rcg2 video_cc_mvs1_clk_src = {
.cmd_rcgr = 0x8018,
.mnd_width = 0,
@@ -511,6 +531,23 @@ static const struct qcom_reset_map video_cc_sm8550_resets[] = {
[VIDEO_CC_XO_CLK_ARES] = { .reg = 0x8124, .bit = 2, .udelay = 100 },
};
+static struct clk_alpha_pll *video_cc_sm8550_plls[] = {
+ &video_cc_pll0,
+ &video_cc_pll1,
+};
+
+static u32 video_cc_sm8550_critical_cbcrs[] = {
+ 0x80f4, /* VIDEO_CC_AHB_CLK */
+ 0x8124, /* VIDEO_CC_XO_CLK */
+ 0x8140, /* VIDEO_CC_SLEEP_CLK */
+};
+
+static u32 video_cc_sm8650_critical_cbcrs[] = {
+ 0x80f4, /* VIDEO_CC_AHB_CLK */
+ 0x8124, /* VIDEO_CC_XO_CLK */
+ 0x8150, /* VIDEO_CC_SLEEP_CLK */
+};
+
static const struct regmap_config video_cc_sm8550_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
@@ -519,6 +556,13 @@ static const struct regmap_config video_cc_sm8550_regmap_config = {
.fast_io = true,
};
+static struct qcom_cc_driver_data video_cc_sm8550_driver_data = {
+ .alpha_plls = video_cc_sm8550_plls,
+ .num_alpha_plls = ARRAY_SIZE(video_cc_sm8550_plls),
+ .clk_cbcrs = video_cc_sm8550_critical_cbcrs,
+ .num_clk_cbcrs = ARRAY_SIZE(video_cc_sm8550_critical_cbcrs),
+};
+
static const struct qcom_cc_desc video_cc_sm8550_desc = {
.config = &video_cc_sm8550_regmap_config,
.clks = video_cc_sm8550_clocks,
@@ -527,37 +571,30 @@ static const struct qcom_cc_desc video_cc_sm8550_desc = {
.num_resets = ARRAY_SIZE(video_cc_sm8550_resets),
.gdscs = video_cc_sm8550_gdscs,
.num_gdscs = ARRAY_SIZE(video_cc_sm8550_gdscs),
+ .use_rpm = true,
+ .driver_data = &video_cc_sm8550_driver_data,
};
static const struct of_device_id video_cc_sm8550_match_table[] = {
{ .compatible = "qcom,sm8550-videocc" },
{ .compatible = "qcom,sm8650-videocc" },
+ { .compatible = "qcom,x1e80100-videocc" },
{ }
};
MODULE_DEVICE_TABLE(of, video_cc_sm8550_match_table);
static int video_cc_sm8550_probe(struct platform_device *pdev)
{
- struct regmap *regmap;
- int ret;
- u32 sleep_clk_offset = 0x8140;
-
- ret = devm_pm_runtime_enable(&pdev->dev);
- if (ret)
- return ret;
-
- ret = pm_runtime_resume_and_get(&pdev->dev);
- if (ret)
- return ret;
-
- regmap = qcom_cc_map(pdev, &video_cc_sm8550_desc);
- if (IS_ERR(regmap)) {
- pm_runtime_put(&pdev->dev);
- return PTR_ERR(regmap);
+ if (of_device_is_compatible(pdev->dev.of_node, "qcom,x1e80100-videocc")) {
+ video_cc_pll0_config.l = 0x1e;
+ video_cc_pll0_config.alpha = 0x0000;
+ video_cc_pll1_config.l = 0x2b;
+ video_cc_pll1_config.alpha = 0xc000;
+ video_cc_mvs0_clk_src.freq_tbl = ftbl_video_cc_mvs0_clk_src_x1e80100;
+ video_cc_mvs1_clk_src.freq_tbl = ftbl_video_cc_mvs1_clk_src_x1e80100;
}
if (of_device_is_compatible(pdev->dev.of_node, "qcom,sm8650-videocc")) {
- sleep_clk_offset = 0x8150;
video_cc_pll0_config.l = 0x1e;
video_cc_pll0_config.alpha = 0xa000;
video_cc_pll1_config.l = 0x2b;
@@ -569,21 +606,13 @@ static int video_cc_sm8550_probe(struct platform_device *pdev)
video_cc_sm8550_clocks[VIDEO_CC_MVS1_SHIFT_CLK] = &video_cc_mvs1_shift_clk.clkr;
video_cc_sm8550_clocks[VIDEO_CC_MVS1C_SHIFT_CLK] = &video_cc_mvs1c_shift_clk.clkr;
video_cc_sm8550_clocks[VIDEO_CC_XO_CLK_SRC] = &video_cc_xo_clk_src.clkr;
- }
-
- clk_lucid_ole_pll_configure(&video_cc_pll0, regmap, &video_cc_pll0_config);
- clk_lucid_ole_pll_configure(&video_cc_pll1, regmap, &video_cc_pll1_config);
- /* Keep some clocks always-on */
- qcom_branch_set_clk_en(regmap, 0x80f4); /* VIDEO_CC_AHB_CLK */
- qcom_branch_set_clk_en(regmap, sleep_clk_offset); /* VIDEO_CC_SLEEP_CLK */
- qcom_branch_set_clk_en(regmap, 0x8124); /* VIDEO_CC_XO_CLK */
-
- ret = qcom_cc_really_probe(&pdev->dev, &video_cc_sm8550_desc, regmap);
-
- pm_runtime_put(&pdev->dev);
+ video_cc_sm8550_driver_data.clk_cbcrs = video_cc_sm8650_critical_cbcrs;
+ video_cc_sm8550_driver_data.num_clk_cbcrs =
+ ARRAY_SIZE(video_cc_sm8650_critical_cbcrs);
+ }
- return ret;
+ return qcom_cc_probe(pdev, &video_cc_sm8550_desc);
}
static struct platform_driver video_cc_sm8550_driver = {
diff --git a/drivers/clk/qcom/videocc-sm8750.c b/drivers/clk/qcom/videocc-sm8750.c
new file mode 100644
index 000000000000..0acf3104d702
--- /dev/null
+++ b/drivers/clk/qcom/videocc-sm8750.c
@@ -0,0 +1,463 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,sm8750-videocc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_BI_TCXO,
+ DT_BI_TCXO_AO,
+ DT_SLEEP_CLK,
+};
+
+enum {
+ P_BI_TCXO,
+ P_SLEEP_CLK,
+ P_VIDEO_CC_PLL0_OUT_MAIN,
+};
+
+static const struct pll_vco taycan_elu_vco[] = {
+ { 249600000, 2500000000, 0 },
+};
+
+static const struct alpha_pll_config video_cc_pll0_config = {
+ .l = 0x25,
+ .alpha = 0x8000,
+ .config_ctl_val = 0x19660387,
+ .config_ctl_hi_val = 0x098060a0,
+ .config_ctl_hi1_val = 0xb416cb20,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000002,
+};
+
+static struct clk_alpha_pll video_cc_pll0 = {
+ .offset = 0x0,
+ .config = &video_cc_pll0_config,
+ .vco_table = taycan_elu_vco,
+ .num_vco = ARRAY_SIZE(taycan_elu_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_ELU],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_taycan_elu_ops,
+ },
+ },
+};
+
+static const struct parent_map video_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data video_cc_parent_data_0_ao[] = {
+ { .index = DT_BI_TCXO_AO },
+};
+
+static const struct parent_map video_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_VIDEO_CC_PLL0_OUT_MAIN, 1 },
+};
+
+static const struct clk_parent_data video_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &video_cc_pll0.clkr.hw },
+};
+
+static const struct parent_map video_cc_parent_map_2[] = {
+ { P_SLEEP_CLK, 0 },
+};
+
+static const struct clk_parent_data video_cc_parent_data_2_ao[] = {
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct freq_tbl ftbl_video_cc_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 video_cc_ahb_clk_src = {
+ .cmd_rcgr = 0x8018,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = video_cc_parent_map_0,
+ .freq_tbl = ftbl_video_cc_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_ahb_clk_src",
+ .parent_data = video_cc_parent_data_0_ao,
+ .num_parents = ARRAY_SIZE(video_cc_parent_data_0_ao),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_video_cc_mvs0_clk_src[] = {
+ F(720000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0),
+ F(1014000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0),
+ F(1260000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0),
+ F(1332000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0),
+ F(1600000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0),
+ F(1710000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0),
+ F(1890000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 video_cc_mvs0_clk_src = {
+ .cmd_rcgr = 0x8000,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = video_cc_parent_map_1,
+ .freq_tbl = ftbl_video_cc_mvs0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs0_clk_src",
+ .parent_data = video_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(video_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_video_cc_sleep_clk_src[] = {
+ F(32000, P_SLEEP_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 video_cc_sleep_clk_src = {
+ .cmd_rcgr = 0x80e0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = video_cc_parent_map_2,
+ .freq_tbl = ftbl_video_cc_sleep_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_sleep_clk_src",
+ .parent_data = video_cc_parent_data_2_ao,
+ .num_parents = ARRAY_SIZE(video_cc_parent_data_2_ao),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 video_cc_xo_clk_src = {
+ .cmd_rcgr = 0x80bc,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = video_cc_parent_map_0,
+ .freq_tbl = ftbl_video_cc_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_xo_clk_src",
+ .parent_data = video_cc_parent_data_0_ao,
+ .num_parents = ARRAY_SIZE(video_cc_parent_data_0_ao),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_regmap_div video_cc_mvs0_div_clk_src = {
+ .reg = 0x809c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_mvs0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div video_cc_mvs0c_div2_div_clk_src = {
+ .reg = 0x8060,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs0c_div2_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_mvs0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch video_cc_mvs0_clk = {
+ .halt_reg = 0x807c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x807c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x807c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_mvs0_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_mem_branch video_cc_mvs0_freerun_clk = {
+ .mem_enable_reg = 0x8090,
+ .mem_ack_reg = 0x8090,
+ .mem_enable_mask = BIT(3),
+ .mem_enable_ack_mask = GENMASK(11, 10),
+ .mem_enable_invert = true,
+ .branch = {
+ .halt_reg = 0x808c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x808c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs0_freerun_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_mvs0_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_mem_ops,
+ },
+ },
+ },
+};
+
+static struct clk_branch video_cc_mvs0_shift_clk = {
+ .halt_reg = 0x80d8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x80d8,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x80d8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs0_shift_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_mvs0c_clk = {
+ .halt_reg = 0x804c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x804c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs0c_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_mvs0c_div2_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_mvs0c_freerun_clk = {
+ .halt_reg = 0x805c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x805c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs0c_freerun_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_mvs0c_div2_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_mvs0c_shift_clk = {
+ .halt_reg = 0x80dc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x80dc,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x80dc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs0c_shift_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc video_cc_mvs0c_gdsc = {
+ .gdscr = 0x8034,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x6,
+ .pd = {
+ .name = "video_cc_mvs0c_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc video_cc_mvs0_gdsc = {
+ .gdscr = 0x8068,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x6,
+ .pd = {
+ .name = "video_cc_mvs0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .parent = &video_cc_mvs0c_gdsc.pd,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | HW_CTRL_TRIGGER,
+};
+
+static struct clk_regmap *video_cc_sm8750_clocks[] = {
+ [VIDEO_CC_AHB_CLK_SRC] = &video_cc_ahb_clk_src.clkr,
+ [VIDEO_CC_MVS0_CLK] = &video_cc_mvs0_clk.clkr,
+ [VIDEO_CC_MVS0_CLK_SRC] = &video_cc_mvs0_clk_src.clkr,
+ [VIDEO_CC_MVS0_DIV_CLK_SRC] = &video_cc_mvs0_div_clk_src.clkr,
+ [VIDEO_CC_MVS0_FREERUN_CLK] = &video_cc_mvs0_freerun_clk.branch.clkr,
+ [VIDEO_CC_MVS0_SHIFT_CLK] = &video_cc_mvs0_shift_clk.clkr,
+ [VIDEO_CC_MVS0C_CLK] = &video_cc_mvs0c_clk.clkr,
+ [VIDEO_CC_MVS0C_DIV2_DIV_CLK_SRC] = &video_cc_mvs0c_div2_div_clk_src.clkr,
+ [VIDEO_CC_MVS0C_FREERUN_CLK] = &video_cc_mvs0c_freerun_clk.clkr,
+ [VIDEO_CC_MVS0C_SHIFT_CLK] = &video_cc_mvs0c_shift_clk.clkr,
+ [VIDEO_CC_PLL0] = &video_cc_pll0.clkr,
+ [VIDEO_CC_SLEEP_CLK_SRC] = &video_cc_sleep_clk_src.clkr,
+ [VIDEO_CC_XO_CLK_SRC] = &video_cc_xo_clk_src.clkr,
+};
+
+static struct gdsc *video_cc_sm8750_gdscs[] = {
+ [VIDEO_CC_MVS0_GDSC] = &video_cc_mvs0_gdsc,
+ [VIDEO_CC_MVS0C_GDSC] = &video_cc_mvs0c_gdsc,
+};
+
+static const struct qcom_reset_map video_cc_sm8750_resets[] = {
+ [VIDEO_CC_INTERFACE_BCR] = { 0x80a0 },
+ [VIDEO_CC_MVS0_BCR] = { 0x8064 },
+ [VIDEO_CC_MVS0C_CLK_ARES] = { 0x804c, 2 },
+ [VIDEO_CC_MVS0C_BCR] = { 0x8030 },
+ [VIDEO_CC_MVS0_FREERUN_CLK_ARES] = { 0x808c, 2 },
+ [VIDEO_CC_MVS0C_FREERUN_CLK_ARES] = { 0x805c, 2 },
+ [VIDEO_CC_XO_CLK_ARES] = { 0x80d4, 2 },
+};
+
+static const struct regmap_config video_cc_sm8750_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x9f4c,
+ .fast_io = true,
+};
+
+static struct clk_alpha_pll *video_cc_sm8750_plls[] = {
+ &video_cc_pll0,
+};
+
+static u32 video_cc_sm8750_critical_cbcrs[] = {
+ 0x80a4, /* VIDEO_CC_AHB_CLK */
+ 0x80f8, /* VIDEO_CC_SLEEP_CLK */
+ 0x80d4, /* VIDEO_CC_XO_CLK */
+};
+
+static void clk_sm8750_regs_configure(struct device *dev, struct regmap *regmap)
+{
+ /* Update DLY_ACCU_RED_SHIFTER_DONE to 0xF for mvs0, mvs0c */
+ regmap_update_bits(regmap, 0x8074, GENMASK(25, 21), GENMASK(25, 21));
+ regmap_update_bits(regmap, 0x8040, GENMASK(25, 21), GENMASK(25, 21));
+
+ regmap_update_bits(regmap, 0x9f24, BIT(0), BIT(0));
+}
+
+static struct qcom_cc_driver_data video_cc_sm8750_driver_data = {
+ .alpha_plls = video_cc_sm8750_plls,
+ .num_alpha_plls = ARRAY_SIZE(video_cc_sm8750_plls),
+ .clk_cbcrs = video_cc_sm8750_critical_cbcrs,
+ .num_clk_cbcrs = ARRAY_SIZE(video_cc_sm8750_critical_cbcrs),
+ .clk_regs_configure = clk_sm8750_regs_configure,
+};
+
+static struct qcom_cc_desc video_cc_sm8750_desc = {
+ .config = &video_cc_sm8750_regmap_config,
+ .clks = video_cc_sm8750_clocks,
+ .num_clks = ARRAY_SIZE(video_cc_sm8750_clocks),
+ .resets = video_cc_sm8750_resets,
+ .num_resets = ARRAY_SIZE(video_cc_sm8750_resets),
+ .gdscs = video_cc_sm8750_gdscs,
+ .num_gdscs = ARRAY_SIZE(video_cc_sm8750_gdscs),
+ .use_rpm = true,
+ .driver_data = &video_cc_sm8750_driver_data,
+};
+
+static const struct of_device_id video_cc_sm8750_match_table[] = {
+ { .compatible = "qcom,sm8750-videocc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, video_cc_sm8750_match_table);
+
+static int video_cc_sm8750_probe(struct platform_device *pdev)
+{
+ return qcom_cc_probe(pdev, &video_cc_sm8750_desc);
+}
+
+static struct platform_driver video_cc_sm8750_driver = {
+ .probe = video_cc_sm8750_probe,
+ .driver = {
+ .name = "video_cc-sm8750",
+ .of_match_table = video_cc_sm8750_match_table,
+ },
+};
+
+static int __init video_cc_sm8750_init(void)
+{
+ return platform_driver_register(&video_cc_sm8750_driver);
+}
+subsys_initcall(video_cc_sm8750_init);
+
+static void __exit video_cc_sm8750_exit(void)
+{
+ platform_driver_unregister(&video_cc_sm8750_driver);
+}
+module_exit(video_cc_sm8750_exit);
+
+MODULE_DESCRIPTION("QTI VIDEO_CC SM8750 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/renesas/Kconfig b/drivers/clk/renesas/Kconfig
index 50c20119d12a..6a5a04664990 100644
--- a/drivers/clk/renesas/Kconfig
+++ b/drivers/clk/renesas/Kconfig
@@ -43,6 +43,8 @@ config CLK_RENESAS
select CLK_R9A09G047 if ARCH_R9A09G047
select CLK_R9A09G056 if ARCH_R9A09G056
select CLK_R9A09G057 if ARCH_R9A09G057
+ select CLK_R9A09G077 if ARCH_R9A09G077
+ select CLK_R9A09G087 if ARCH_R9A09G087
select CLK_SH73A0 if ARCH_SH73A0
if CLK_RENESAS
@@ -208,6 +210,14 @@ config CLK_R9A09G057
bool "RZ/V2H(P) clock support" if COMPILE_TEST
select CLK_RZV2H
+config CLK_R9A09G077
+ bool "RZ/T2H clock support" if COMPILE_TEST
+ select CLK_RENESAS_CPG_MSSR
+
+config CLK_R9A09G087
+ bool "RZ/N2H clock support" if COMPILE_TEST
+ select CLK_RENESAS_CPG_MSSR
+
config CLK_SH73A0
bool "SH-Mobile AG5 clock support" if COMPILE_TEST
select CLK_RENESAS_CPG_MSTP
diff --git a/drivers/clk/renesas/Makefile b/drivers/clk/renesas/Makefile
index f9075bca6e95..d28eb276a153 100644
--- a/drivers/clk/renesas/Makefile
+++ b/drivers/clk/renesas/Makefile
@@ -40,6 +40,8 @@ obj-$(CONFIG_CLK_R9A09G011) += r9a09g011-cpg.o
obj-$(CONFIG_CLK_R9A09G047) += r9a09g047-cpg.o
obj-$(CONFIG_CLK_R9A09G056) += r9a09g056-cpg.o
obj-$(CONFIG_CLK_R9A09G057) += r9a09g057-cpg.o
+obj-$(CONFIG_CLK_R9A09G077) += r9a09g077-cpg.o
+obj-$(CONFIG_CLK_R9A09G087) += r9a09g077-cpg.o
obj-$(CONFIG_CLK_SH73A0) += clk-sh73a0.o
# Family
diff --git a/drivers/clk/renesas/clk-div6.c b/drivers/clk/renesas/clk-div6.c
index 3abd6e5400ad..f7b827b5e9b2 100644
--- a/drivers/clk/renesas/clk-div6.c
+++ b/drivers/clk/renesas/clk-div6.c
@@ -7,6 +7,7 @@
* Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
*/
+#include <linux/bitfield.h>
#include <linux/clk-provider.h>
#include <linux/init.h>
#include <linux/io.h>
@@ -171,8 +172,7 @@ static u8 cpg_div6_clock_get_parent(struct clk_hw *hw)
if (clock->src_mask == 0)
return 0;
- hw_index = (readl(clock->reg) & clock->src_mask) >>
- __ffs(clock->src_mask);
+ hw_index = field_get(clock->src_mask, readl(clock->reg));
for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
if (clock->parents[i] == hw_index)
return i;
@@ -191,7 +191,7 @@ static int cpg_div6_clock_set_parent(struct clk_hw *hw, u8 index)
if (index >= clk_hw_get_num_parents(hw))
return -EINVAL;
- src = clock->parents[index] << __ffs(clock->src_mask);
+ src = field_prep(clock->src_mask, clock->parents[index]);
writel((readl(clock->reg) & ~clock->src_mask) | src, clock->reg);
return 0;
}
diff --git a/drivers/clk/renesas/clk-mstp.c b/drivers/clk/renesas/clk-mstp.c
index 5bc473c2adb3..2f65fe2c6bdf 100644
--- a/drivers/clk/renesas/clk-mstp.c
+++ b/drivers/clk/renesas/clk-mstp.c
@@ -303,6 +303,9 @@ void cpg_mstp_detach_dev(struct generic_pm_domain *unused, struct device *dev)
pm_clk_destroy(dev);
}
+static struct device_node *cpg_mstp_pd_np __initdata = NULL;
+static struct generic_pm_domain *cpg_mstp_pd_genpd __initdata = NULL;
+
void __init cpg_mstp_add_clk_domain(struct device_node *np)
{
struct generic_pm_domain *pd;
@@ -324,5 +327,20 @@ void __init cpg_mstp_add_clk_domain(struct device_node *np)
pd->detach_dev = cpg_mstp_detach_dev;
pm_genpd_init(pd, &pm_domain_always_on_gov, false);
- of_genpd_add_provider_simple(np, pd);
+ cpg_mstp_pd_np = of_node_get(np);
+ cpg_mstp_pd_genpd = pd;
+}
+
+static int __init cpg_mstp_pd_init_provider(void)
+{
+ int error;
+
+ if (!cpg_mstp_pd_np)
+ return -ENODEV;
+
+ error = of_genpd_add_provider_simple(cpg_mstp_pd_np, cpg_mstp_pd_genpd);
+
+ of_node_put(cpg_mstp_pd_np);
+ return error;
}
+postcore_initcall(cpg_mstp_pd_init_provider);
diff --git a/drivers/clk/renesas/r7s9210-cpg-mssr.c b/drivers/clk/renesas/r7s9210-cpg-mssr.c
index e1812867a6da..a8ed87c11ba1 100644
--- a/drivers/clk/renesas/r7s9210-cpg-mssr.c
+++ b/drivers/clk/renesas/r7s9210-cpg-mssr.c
@@ -159,12 +159,13 @@ static void __init r7s9210_update_clk_table(struct clk *extal_clk,
static struct clk * __init rza2_cpg_clk_register(struct device *dev,
const struct cpg_core_clk *core, const struct cpg_mssr_info *info,
- struct clk **clks, void __iomem *base,
- struct raw_notifier_head *notifiers)
+ struct cpg_mssr_pub *pub)
{
- struct clk *parent;
+ void __iomem *base = pub->base0;
+ struct clk **clks = pub->clks;
unsigned int mult = 1;
unsigned int div = 1;
+ struct clk *parent;
parent = clks[core->parent];
if (IS_ERR(parent))
diff --git a/drivers/clk/renesas/r8a77970-cpg-mssr.c b/drivers/clk/renesas/r8a77970-cpg-mssr.c
index 3cec0f501b94..e2bda2c10730 100644
--- a/drivers/clk/renesas/r8a77970-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a77970-cpg-mssr.c
@@ -219,10 +219,11 @@ static int __init r8a77970_cpg_mssr_init(struct device *dev)
static struct clk * __init r8a77970_cpg_clk_register(struct device *dev,
const struct cpg_core_clk *core, const struct cpg_mssr_info *info,
- struct clk **clks, void __iomem *base,
- struct raw_notifier_head *notifiers)
+ struct cpg_mssr_pub *pub)
{
const struct clk_div_table *table;
+ void __iomem *base = pub->base0;
+ struct clk **clks = pub->clks;
const struct clk *parent;
unsigned int shift;
@@ -236,8 +237,7 @@ static struct clk * __init r8a77970_cpg_clk_register(struct device *dev,
shift = 4;
break;
default:
- return rcar_gen3_cpg_clk_register(dev, core, info, clks, base,
- notifiers);
+ return rcar_gen3_cpg_clk_register(dev, core, info, pub);
}
parent = clks[core->parent];
diff --git a/drivers/clk/renesas/r8a779a0-cpg-mssr.c b/drivers/clk/renesas/r8a779a0-cpg-mssr.c
index 1be7b9592aa6..d67dff05d9f4 100644
--- a/drivers/clk/renesas/r8a779a0-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a779a0-cpg-mssr.c
@@ -26,7 +26,7 @@
enum clk_ids {
/* Core Clock Outputs exported to DT */
- LAST_DT_CORE_CLK = R8A779A0_CLK_OSC,
+ LAST_DT_CORE_CLK = R8A779A0_CLK_ZG,
/* External Input Clocks */
CLK_EXTAL,
@@ -39,6 +39,7 @@ enum clk_ids {
CLK_PLL21,
CLK_PLL30,
CLK_PLL31,
+ CLK_PLL4,
CLK_PLL5,
CLK_PLL1_DIV2,
CLK_PLL20_DIV2,
@@ -65,6 +66,7 @@ enum clk_ids {
#define CPG_PLL21CR 0x0838 /* PLL21 Control Register */
#define CPG_PLL30CR 0x083c /* PLL30 Control Register */
#define CPG_PLL31CR 0x0840 /* PLL31 Control Register */
+#define CPG_PLL4CR 0x0844 /* PLL4 Control Register */
static const struct cpg_core_clk r8a779a0_core_clks[] __initconst = {
/* External Clock Inputs */
@@ -79,6 +81,7 @@ static const struct cpg_core_clk r8a779a0_core_clks[] __initconst = {
DEF_PLL(".pll21", CLK_PLL21, CPG_PLL21CR),
DEF_PLL(".pll30", CLK_PLL30, CPG_PLL30CR),
DEF_PLL(".pll31", CLK_PLL31, CPG_PLL31CR),
+ DEF_PLL(".pll4", CLK_PLL4, CPG_PLL4CR),
DEF_FIXED(".pll1_div2", CLK_PLL1_DIV2, CLK_PLL1, 2, 1),
DEF_FIXED(".pll20_div2", CLK_PLL20_DIV2, CLK_PLL20, 2, 1),
@@ -98,6 +101,7 @@ static const struct cpg_core_clk r8a779a0_core_clks[] __initconst = {
/* Core Clock Outputs */
DEF_GEN4_Z("z0", R8A779A0_CLK_Z0, CLK_TYPE_GEN4_Z, CLK_PLL20, 2, 0),
DEF_GEN4_Z("z1", R8A779A0_CLK_Z1, CLK_TYPE_GEN4_Z, CLK_PLL21, 2, 8),
+ DEF_GEN4_Z("zg", R8A779A0_CLK_ZG, CLK_TYPE_GEN4_Z, CLK_PLL4, 2, 88),
DEF_FIXED("zx", R8A779A0_CLK_ZX, CLK_PLL20_DIV2, 2, 1),
DEF_FIXED("s1d1", R8A779A0_CLK_S1D1, CLK_S1, 1, 1),
DEF_FIXED("s1d2", R8A779A0_CLK_S1D2, CLK_S1, 2, 1),
@@ -138,6 +142,7 @@ static const struct cpg_core_clk r8a779a0_core_clks[] __initconst = {
};
static const struct mssr_mod_clk r8a779a0_mod_clks[] __initconst = {
+ DEF_MOD("3dge", 0, R8A779A0_CLK_ZG),
DEF_MOD("isp0", 16, R8A779A0_CLK_S1D1),
DEF_MOD("isp1", 17, R8A779A0_CLK_S1D1),
DEF_MOD("isp2", 18, R8A779A0_CLK_S1D1),
diff --git a/drivers/clk/renesas/r9a06g032-clocks.c b/drivers/clk/renesas/r9a06g032-clocks.c
index dcda19318b2a..0f5c91b5dfa9 100644
--- a/drivers/clk/renesas/r9a06g032-clocks.c
+++ b/drivers/clk/renesas/r9a06g032-clocks.c
@@ -1333,9 +1333,9 @@ static int __init r9a06g032_clocks_probe(struct platform_device *pdev)
if (IS_ERR(mclk))
return PTR_ERR(mclk);
- clocks->reg = of_iomap(np, 0);
- if (WARN_ON(!clocks->reg))
- return -ENOMEM;
+ clocks->reg = devm_of_iomap(dev, np, 0, NULL);
+ if (IS_ERR(clocks->reg))
+ return PTR_ERR(clocks->reg);
r9a06g032_init_h2mode(clocks);
diff --git a/drivers/clk/renesas/r9a07g043-cpg.c b/drivers/clk/renesas/r9a07g043-cpg.c
index fce2eecfa8c0..33e9a1223c72 100644
--- a/drivers/clk/renesas/r9a07g043-cpg.c
+++ b/drivers/clk/renesas/r9a07g043-cpg.c
@@ -164,143 +164,143 @@ static const struct cpg_core_clk r9a07g043_core_clks[] __initconst = {
static const struct rzg2l_mod_clk r9a07g043_mod_clks[] = {
#ifdef CONFIG_ARM64
DEF_MOD("gic", R9A07G043_GIC600_GICCLK, R9A07G043_CLK_P1,
- 0x514, 0),
+ 0x514, 0, MSTOP(BUS_REG1, BIT(7))),
DEF_MOD("ia55_pclk", R9A07G043_IA55_PCLK, R9A07G043_CLK_P2,
- 0x518, 0),
+ 0x518, 0, MSTOP(BUS_PERI_CPU, BIT(13))),
DEF_MOD("ia55_clk", R9A07G043_IA55_CLK, R9A07G043_CLK_P1,
- 0x518, 1),
+ 0x518, 1, MSTOP(BUS_PERI_CPU, BIT(13))),
#endif
#ifdef CONFIG_RISCV
DEF_MOD("iax45_pclk", R9A07G043_IAX45_PCLK, R9A07G043_CLK_P2,
- 0x518, 0),
+ 0x518, 0, MSTOP(BUS_PERI_CPU, BIT(13))),
DEF_MOD("iax45_clk", R9A07G043_IAX45_CLK, R9A07G043_CLK_P1,
- 0x518, 1),
+ 0x518, 1, MSTOP(BUS_PERI_CPU, BIT(13))),
#endif
DEF_MOD("dmac_aclk", R9A07G043_DMAC_ACLK, R9A07G043_CLK_P1,
- 0x52c, 0),
+ 0x52c, 0, MSTOP(BUS_REG1, BIT(2))),
DEF_MOD("dmac_pclk", R9A07G043_DMAC_PCLK, CLK_P1_DIV2,
- 0x52c, 1),
+ 0x52c, 1, MSTOP(BUS_REG1, BIT(3))),
DEF_MOD("ostm0_pclk", R9A07G043_OSTM0_PCLK, R9A07G043_CLK_P0,
- 0x534, 0),
+ 0x534, 0, MSTOP(BUS_REG0, BIT(4))),
DEF_MOD("ostm1_pclk", R9A07G043_OSTM1_PCLK, R9A07G043_CLK_P0,
- 0x534, 1),
+ 0x534, 1, MSTOP(BUS_REG0, BIT(5))),
DEF_MOD("ostm2_pclk", R9A07G043_OSTM2_PCLK, R9A07G043_CLK_P0,
- 0x534, 2),
+ 0x534, 2, MSTOP(BUS_REG0, BIT(6))),
DEF_MOD("mtu_x_mck", R9A07G043_MTU_X_MCK_MTU3, R9A07G043_CLK_P0,
- 0x538, 0),
+ 0x538, 0, MSTOP(BUS_MCPU1, BIT(2))),
DEF_MOD("wdt0_pclk", R9A07G043_WDT0_PCLK, R9A07G043_CLK_P0,
- 0x548, 0),
+ 0x548, 0, MSTOP(BUS_REG0, BIT(2))),
DEF_MOD("wdt0_clk", R9A07G043_WDT0_CLK, R9A07G043_OSCCLK,
- 0x548, 1),
+ 0x548, 1, MSTOP(BUS_REG0, BIT(2))),
DEF_MOD("spi_clk2", R9A07G043_SPI_CLK2, R9A07G043_CLK_SPI1,
- 0x550, 0),
+ 0x550, 0, MSTOP(BUS_MCPU1, BIT(1))),
DEF_MOD("spi_clk", R9A07G043_SPI_CLK, R9A07G043_CLK_SPI0,
- 0x550, 1),
+ 0x550, 1, MSTOP(BUS_MCPU1, BIT(1))),
DEF_MOD("sdhi0_imclk", R9A07G043_SDHI0_IMCLK, CLK_SD0_DIV4,
- 0x554, 0),
+ 0x554, 0, MSTOP(BUS_PERI_COM, BIT(0))),
DEF_MOD("sdhi0_imclk2", R9A07G043_SDHI0_IMCLK2, CLK_SD0_DIV4,
- 0x554, 1),
+ 0x554, 1, MSTOP(BUS_PERI_COM, BIT(0))),
DEF_MOD("sdhi0_clk_hs", R9A07G043_SDHI0_CLK_HS, R9A07G043_CLK_SD0,
- 0x554, 2),
+ 0x554, 2, MSTOP(BUS_PERI_COM, BIT(0))),
DEF_MOD("sdhi0_aclk", R9A07G043_SDHI0_ACLK, R9A07G043_CLK_P1,
- 0x554, 3),
+ 0x554, 3, MSTOP(BUS_PERI_COM, BIT(0))),
DEF_MOD("sdhi1_imclk", R9A07G043_SDHI1_IMCLK, CLK_SD1_DIV4,
- 0x554, 4),
+ 0x554, 4, MSTOP(BUS_PERI_COM, BIT(1))),
DEF_MOD("sdhi1_imclk2", R9A07G043_SDHI1_IMCLK2, CLK_SD1_DIV4,
- 0x554, 5),
+ 0x554, 5, MSTOP(BUS_PERI_COM, BIT(1))),
DEF_MOD("sdhi1_clk_hs", R9A07G043_SDHI1_CLK_HS, R9A07G043_CLK_SD1,
- 0x554, 6),
+ 0x554, 6, MSTOP(BUS_PERI_COM, BIT(1))),
DEF_MOD("sdhi1_aclk", R9A07G043_SDHI1_ACLK, R9A07G043_CLK_P1,
- 0x554, 7),
+ 0x554, 7, MSTOP(BUS_PERI_COM, BIT(1))),
#ifdef CONFIG_ARM64
- DEF_MOD("cru_sysclk", R9A07G043_CRU_SYSCLK, CLK_M2_DIV2,
- 0x564, 0),
- DEF_MOD("cru_vclk", R9A07G043_CRU_VCLK, R9A07G043_CLK_M2,
- 0x564, 1),
- DEF_MOD("cru_pclk", R9A07G043_CRU_PCLK, R9A07G043_CLK_ZT,
- 0x564, 2),
- DEF_MOD("cru_aclk", R9A07G043_CRU_ACLK, R9A07G043_CLK_M0,
- 0x564, 3),
+ DEF_MOD("cru_sysclk", R9A07G043_CRU_SYSCLK, CLK_M2_DIV2,
+ 0x564, 0, MSTOP(BUS_PERI_VIDEO, BIT(3))),
+ DEF_MOD("cru_vclk", R9A07G043_CRU_VCLK, R9A07G043_CLK_M2,
+ 0x564, 1, MSTOP(BUS_PERI_VIDEO, BIT(3))),
+ DEF_MOD("cru_pclk", R9A07G043_CRU_PCLK, R9A07G043_CLK_ZT,
+ 0x564, 2, MSTOP(BUS_PERI_VIDEO, BIT(3))),
+ DEF_MOD("cru_aclk", R9A07G043_CRU_ACLK, R9A07G043_CLK_M0,
+ 0x564, 3, MSTOP(BUS_PERI_VIDEO, BIT(3))),
DEF_COUPLED("lcdc_clk_a", R9A07G043_LCDC_CLK_A, R9A07G043_CLK_M0,
- 0x56c, 0),
+ 0x56c, 0, MSTOP(BUS_PERI_VIDEO, GENMASK(8, 7))),
DEF_COUPLED("lcdc_clk_p", R9A07G043_LCDC_CLK_P, R9A07G043_CLK_ZT,
- 0x56c, 0),
+ 0x56c, 0, MSTOP(BUS_PERI_VIDEO, GENMASK(8, 7))),
DEF_MOD("lcdc_clk_d", R9A07G043_LCDC_CLK_D, R9A07G043_CLK_M3,
- 0x56c, 1),
+ 0x56c, 1, MSTOP(BUS_PERI_VIDEO, BIT(9))),
#endif
DEF_MOD("ssi0_pclk", R9A07G043_SSI0_PCLK2, R9A07G043_CLK_P0,
- 0x570, 0),
+ 0x570, 0, MSTOP(BUS_MCPU1, BIT(10))),
DEF_MOD("ssi0_sfr", R9A07G043_SSI0_PCLK_SFR, R9A07G043_CLK_P0,
- 0x570, 1),
+ 0x570, 1, MSTOP(BUS_MCPU1, BIT(10))),
DEF_MOD("ssi1_pclk", R9A07G043_SSI1_PCLK2, R9A07G043_CLK_P0,
- 0x570, 2),
+ 0x570, 2, MSTOP(BUS_MCPU1, BIT(11))),
DEF_MOD("ssi1_sfr", R9A07G043_SSI1_PCLK_SFR, R9A07G043_CLK_P0,
- 0x570, 3),
+ 0x570, 3, MSTOP(BUS_MCPU1, BIT(11))),
DEF_MOD("ssi2_pclk", R9A07G043_SSI2_PCLK2, R9A07G043_CLK_P0,
- 0x570, 4),
+ 0x570, 4, MSTOP(BUS_MCPU1, BIT(12))),
DEF_MOD("ssi2_sfr", R9A07G043_SSI2_PCLK_SFR, R9A07G043_CLK_P0,
- 0x570, 5),
+ 0x570, 5, MSTOP(BUS_MCPU1, BIT(12))),
DEF_MOD("ssi3_pclk", R9A07G043_SSI3_PCLK2, R9A07G043_CLK_P0,
- 0x570, 6),
+ 0x570, 6, MSTOP(BUS_MCPU1, BIT(13))),
DEF_MOD("ssi3_sfr", R9A07G043_SSI3_PCLK_SFR, R9A07G043_CLK_P0,
- 0x570, 7),
+ 0x570, 7, MSTOP(BUS_MCPU1, BIT(13))),
DEF_MOD("usb0_host", R9A07G043_USB_U2H0_HCLK, R9A07G043_CLK_P1,
- 0x578, 0),
+ 0x578, 0, MSTOP(BUS_PERI_COM, BIT(5))),
DEF_MOD("usb1_host", R9A07G043_USB_U2H1_HCLK, R9A07G043_CLK_P1,
- 0x578, 1),
+ 0x578, 1, MSTOP(BUS_PERI_COM, BIT(7))),
DEF_MOD("usb0_func", R9A07G043_USB_U2P_EXR_CPUCLK, R9A07G043_CLK_P1,
- 0x578, 2),
+ 0x578, 2, MSTOP(BUS_PERI_COM, BIT(6))),
DEF_MOD("usb_pclk", R9A07G043_USB_PCLK, R9A07G043_CLK_P1,
- 0x578, 3),
+ 0x578, 3, MSTOP(BUS_PERI_COM, BIT(4))),
DEF_COUPLED("eth0_axi", R9A07G043_ETH0_CLK_AXI, R9A07G043_CLK_M0,
- 0x57c, 0),
+ 0x57c, 0, MSTOP(BUS_PERI_COM, BIT(2))),
DEF_COUPLED("eth0_chi", R9A07G043_ETH0_CLK_CHI, R9A07G043_CLK_ZT,
- 0x57c, 0),
+ 0x57c, 0, MSTOP(BUS_PERI_COM, BIT(2))),
DEF_COUPLED("eth1_axi", R9A07G043_ETH1_CLK_AXI, R9A07G043_CLK_M0,
- 0x57c, 1),
+ 0x57c, 1, MSTOP(BUS_PERI_COM, BIT(3))),
DEF_COUPLED("eth1_chi", R9A07G043_ETH1_CLK_CHI, R9A07G043_CLK_ZT,
- 0x57c, 1),
+ 0x57c, 1, MSTOP(BUS_PERI_COM, BIT(3))),
DEF_MOD("i2c0", R9A07G043_I2C0_PCLK, R9A07G043_CLK_P0,
- 0x580, 0),
+ 0x580, 0, MSTOP(BUS_MCPU2, BIT(10))),
DEF_MOD("i2c1", R9A07G043_I2C1_PCLK, R9A07G043_CLK_P0,
- 0x580, 1),
+ 0x580, 1, MSTOP(BUS_MCPU2, BIT(11))),
DEF_MOD("i2c2", R9A07G043_I2C2_PCLK, R9A07G043_CLK_P0,
- 0x580, 2),
+ 0x580, 2, MSTOP(BUS_MCPU2, BIT(12))),
DEF_MOD("i2c3", R9A07G043_I2C3_PCLK, R9A07G043_CLK_P0,
- 0x580, 3),
+ 0x580, 3, MSTOP(BUS_MCPU2, BIT(13))),
DEF_MOD("scif0", R9A07G043_SCIF0_CLK_PCK, R9A07G043_CLK_P0,
- 0x584, 0),
+ 0x584, 0, MSTOP(BUS_MCPU2, BIT(1))),
DEF_MOD("scif1", R9A07G043_SCIF1_CLK_PCK, R9A07G043_CLK_P0,
- 0x584, 1),
+ 0x584, 1, MSTOP(BUS_MCPU2, BIT(2))),
DEF_MOD("scif2", R9A07G043_SCIF2_CLK_PCK, R9A07G043_CLK_P0,
- 0x584, 2),
+ 0x584, 2, MSTOP(BUS_MCPU2, BIT(3))),
DEF_MOD("scif3", R9A07G043_SCIF3_CLK_PCK, R9A07G043_CLK_P0,
- 0x584, 3),
+ 0x584, 3, MSTOP(BUS_MCPU2, BIT(4))),
DEF_MOD("scif4", R9A07G043_SCIF4_CLK_PCK, R9A07G043_CLK_P0,
- 0x584, 4),
+ 0x584, 4, MSTOP(BUS_MCPU2, BIT(5))),
DEF_MOD("sci0", R9A07G043_SCI0_CLKP, R9A07G043_CLK_P0,
- 0x588, 0),
+ 0x588, 0, MSTOP(BUS_MCPU2, BIT(7))),
DEF_MOD("sci1", R9A07G043_SCI1_CLKP, R9A07G043_CLK_P0,
- 0x588, 1),
+ 0x588, 1, MSTOP(BUS_MCPU2, BIT(8))),
DEF_MOD("rspi0", R9A07G043_RSPI0_CLKB, R9A07G043_CLK_P0,
- 0x590, 0),
+ 0x590, 0, MSTOP(BUS_MCPU1, BIT(14))),
DEF_MOD("rspi1", R9A07G043_RSPI1_CLKB, R9A07G043_CLK_P0,
- 0x590, 1),
+ 0x590, 1, MSTOP(BUS_MCPU1, BIT(15))),
DEF_MOD("rspi2", R9A07G043_RSPI2_CLKB, R9A07G043_CLK_P0,
- 0x590, 2),
+ 0x590, 2, MSTOP(BUS_MCPU2, BIT(0))),
DEF_MOD("canfd", R9A07G043_CANFD_PCLK, R9A07G043_CLK_P0,
- 0x594, 0),
+ 0x594, 0, MSTOP(BUS_MCPU2, BIT(9))),
DEF_MOD("gpio", R9A07G043_GPIO_HCLK, R9A07G043_OSCCLK,
- 0x598, 0),
+ 0x598, 0, MSTOP(BUS_PERI_CPU, BIT(6))),
DEF_MOD("adc_adclk", R9A07G043_ADC_ADCLK, R9A07G043_CLK_TSU,
- 0x5a8, 0),
+ 0x5a8, 0, MSTOP(BUS_MCPU2, BIT(14))),
DEF_MOD("adc_pclk", R9A07G043_ADC_PCLK, R9A07G043_CLK_P0,
- 0x5a8, 1),
+ 0x5a8, 1, MSTOP(BUS_MCPU2, BIT(14))),
DEF_MOD("tsu_pclk", R9A07G043_TSU_PCLK, R9A07G043_CLK_TSU,
- 0x5ac, 0),
+ 0x5ac, 0, MSTOP(BUS_MCPU2, BIT(15))),
#ifdef CONFIG_RISCV
DEF_MOD("nceplic_aclk", R9A07G043_NCEPLIC_ACLK, R9A07G043_CLK_P1,
- 0x608, 0),
+ 0x608, 0, MSTOP(BUS_REG1, BIT(7))),
#endif
};
diff --git a/drivers/clk/renesas/r9a07g044-cpg.c b/drivers/clk/renesas/r9a07g044-cpg.c
index 77ca3a789568..0dd264877b9a 100644
--- a/drivers/clk/renesas/r9a07g044-cpg.c
+++ b/drivers/clk/renesas/r9a07g044-cpg.c
@@ -242,176 +242,176 @@ static const struct {
} mod_clks = {
.common = {
DEF_MOD("gic", R9A07G044_GIC600_GICCLK, R9A07G044_CLK_P1,
- 0x514, 0),
+ 0x514, 0, MSTOP(BUS_REG1, BIT(7))),
DEF_MOD("ia55_pclk", R9A07G044_IA55_PCLK, R9A07G044_CLK_P2,
- 0x518, 0),
+ 0x518, 0, MSTOP(BUS_PERI_CPU, BIT(13))),
DEF_MOD("ia55_clk", R9A07G044_IA55_CLK, R9A07G044_CLK_P1,
- 0x518, 1),
+ 0x518, 1, MSTOP(BUS_PERI_CPU, BIT(13))),
DEF_MOD("dmac_aclk", R9A07G044_DMAC_ACLK, R9A07G044_CLK_P1,
- 0x52c, 0),
+ 0x52c, 0, MSTOP(BUS_REG1, BIT(2))),
DEF_MOD("dmac_pclk", R9A07G044_DMAC_PCLK, CLK_P1_DIV2,
- 0x52c, 1),
+ 0x52c, 1, MSTOP(BUS_REG1, BIT(3))),
DEF_MOD("ostm0_pclk", R9A07G044_OSTM0_PCLK, R9A07G044_CLK_P0,
- 0x534, 0),
+ 0x534, 0, MSTOP(BUS_REG0, BIT(4))),
DEF_MOD("ostm1_pclk", R9A07G044_OSTM1_PCLK, R9A07G044_CLK_P0,
- 0x534, 1),
+ 0x534, 1, MSTOP(BUS_REG0, BIT(5))),
DEF_MOD("ostm2_pclk", R9A07G044_OSTM2_PCLK, R9A07G044_CLK_P0,
- 0x534, 2),
+ 0x534, 2, MSTOP(BUS_REG0, BIT(6))),
DEF_MOD("mtu_x_mck", R9A07G044_MTU_X_MCK_MTU3, R9A07G044_CLK_P0,
- 0x538, 0),
+ 0x538, 0, MSTOP(BUS_MCPU1, BIT(2))),
DEF_MOD("gpt_pclk", R9A07G044_GPT_PCLK, R9A07G044_CLK_P0,
- 0x540, 0),
+ 0x540, 0, MSTOP(BUS_MCPU1, BIT(4))),
DEF_MOD("poeg_a_clkp", R9A07G044_POEG_A_CLKP, R9A07G044_CLK_P0,
- 0x544, 0),
+ 0x544, 0, MSTOP(BUS_MCPU1, BIT(5))),
DEF_MOD("poeg_b_clkp", R9A07G044_POEG_B_CLKP, R9A07G044_CLK_P0,
- 0x544, 1),
+ 0x544, 1, MSTOP(BUS_MCPU1, BIT(6))),
DEF_MOD("poeg_c_clkp", R9A07G044_POEG_C_CLKP, R9A07G044_CLK_P0,
- 0x544, 2),
+ 0x544, 2, MSTOP(BUS_MCPU1, BIT(7))),
DEF_MOD("poeg_d_clkp", R9A07G044_POEG_D_CLKP, R9A07G044_CLK_P0,
- 0x544, 3),
+ 0x544, 3, MSTOP(BUS_MCPU1, BIT(8))),
DEF_MOD("wdt0_pclk", R9A07G044_WDT0_PCLK, R9A07G044_CLK_P0,
- 0x548, 0),
+ 0x548, 0, MSTOP(BUS_REG0, BIT(2))),
DEF_MOD("wdt0_clk", R9A07G044_WDT0_CLK, R9A07G044_OSCCLK,
- 0x548, 1),
+ 0x548, 1, MSTOP(BUS_REG0, BIT(2))),
DEF_MOD("wdt1_pclk", R9A07G044_WDT1_PCLK, R9A07G044_CLK_P0,
- 0x548, 2),
+ 0x548, 2, MSTOP(BUS_REG0, BIT(3))),
DEF_MOD("wdt1_clk", R9A07G044_WDT1_CLK, R9A07G044_OSCCLK,
- 0x548, 3),
+ 0x548, 3, MSTOP(BUS_REG0, BIT(3))),
DEF_MOD("spi_clk2", R9A07G044_SPI_CLK2, R9A07G044_CLK_SPI1,
- 0x550, 0),
+ 0x550, 0, MSTOP(BUS_MCPU1, BIT(1))),
DEF_MOD("spi_clk", R9A07G044_SPI_CLK, R9A07G044_CLK_SPI0,
- 0x550, 1),
+ 0x550, 1, MSTOP(BUS_MCPU1, BIT(1))),
DEF_MOD("sdhi0_imclk", R9A07G044_SDHI0_IMCLK, CLK_SD0_DIV4,
- 0x554, 0),
+ 0x554, 0, MSTOP(BUS_PERI_COM, BIT(0))),
DEF_MOD("sdhi0_imclk2", R9A07G044_SDHI0_IMCLK2, CLK_SD0_DIV4,
- 0x554, 1),
+ 0x554, 1, MSTOP(BUS_PERI_COM, BIT(0))),
DEF_MOD("sdhi0_clk_hs", R9A07G044_SDHI0_CLK_HS, R9A07G044_CLK_SD0,
- 0x554, 2),
+ 0x554, 2, MSTOP(BUS_PERI_COM, BIT(0))),
DEF_MOD("sdhi0_aclk", R9A07G044_SDHI0_ACLK, R9A07G044_CLK_P1,
- 0x554, 3),
+ 0x554, 3, MSTOP(BUS_PERI_COM, BIT(0))),
DEF_MOD("sdhi1_imclk", R9A07G044_SDHI1_IMCLK, CLK_SD1_DIV4,
- 0x554, 4),
+ 0x554, 4, MSTOP(BUS_PERI_COM, BIT(1))),
DEF_MOD("sdhi1_imclk2", R9A07G044_SDHI1_IMCLK2, CLK_SD1_DIV4,
- 0x554, 5),
+ 0x554, 5, MSTOP(BUS_PERI_COM, BIT(1))),
DEF_MOD("sdhi1_clk_hs", R9A07G044_SDHI1_CLK_HS, R9A07G044_CLK_SD1,
- 0x554, 6),
+ 0x554, 6, MSTOP(BUS_PERI_COM, BIT(1))),
DEF_MOD("sdhi1_aclk", R9A07G044_SDHI1_ACLK, R9A07G044_CLK_P1,
- 0x554, 7),
+ 0x554, 7, MSTOP(BUS_PERI_COM, BIT(1))),
DEF_MOD("gpu_clk", R9A07G044_GPU_CLK, R9A07G044_CLK_G,
- 0x558, 0),
+ 0x558, 0, MSTOP(BUS_REG1, BIT(4))),
DEF_MOD("gpu_axi_clk", R9A07G044_GPU_AXI_CLK, R9A07G044_CLK_P1,
- 0x558, 1),
+ 0x558, 1, 0),
DEF_MOD("gpu_ace_clk", R9A07G044_GPU_ACE_CLK, R9A07G044_CLK_P1,
- 0x558, 2),
- DEF_MOD("cru_sysclk", R9A07G044_CRU_SYSCLK, CLK_M2_DIV2,
- 0x564, 0),
- DEF_MOD("cru_vclk", R9A07G044_CRU_VCLK, R9A07G044_CLK_M2,
- 0x564, 1),
- DEF_MOD("cru_pclk", R9A07G044_CRU_PCLK, R9A07G044_CLK_ZT,
- 0x564, 2),
- DEF_MOD("cru_aclk", R9A07G044_CRU_ACLK, R9A07G044_CLK_M0,
- 0x564, 3),
+ 0x558, 2, 0),
+ DEF_MOD("cru_sysclk", R9A07G044_CRU_SYSCLK, CLK_M2_DIV2,
+ 0x564, 0, MSTOP(BUS_PERI_VIDEO, BIT(3))),
+ DEF_MOD("cru_vclk", R9A07G044_CRU_VCLK, R9A07G044_CLK_M2,
+ 0x564, 1, MSTOP(BUS_PERI_VIDEO, BIT(3))),
+ DEF_MOD("cru_pclk", R9A07G044_CRU_PCLK, R9A07G044_CLK_ZT,
+ 0x564, 2, MSTOP(BUS_PERI_VIDEO, BIT(3))),
+ DEF_MOD("cru_aclk", R9A07G044_CRU_ACLK, R9A07G044_CLK_M0,
+ 0x564, 3, MSTOP(BUS_PERI_VIDEO, BIT(3))),
DEF_MOD("dsi_pll_clk", R9A07G044_MIPI_DSI_PLLCLK, R9A07G044_CLK_M1,
- 0x568, 0),
+ 0x568, 0, MSTOP(BUS_PERI_VIDEO, GENMASK(6, 5))),
DEF_MOD("dsi_sys_clk", R9A07G044_MIPI_DSI_SYSCLK, CLK_M2_DIV2,
- 0x568, 1),
+ 0x568, 1, MSTOP(BUS_PERI_VIDEO, GENMASK(6, 5))),
DEF_MOD("dsi_aclk", R9A07G044_MIPI_DSI_ACLK, R9A07G044_CLK_P1,
- 0x568, 2),
+ 0x568, 2, MSTOP(BUS_PERI_VIDEO, GENMASK(6, 5))),
DEF_MOD("dsi_pclk", R9A07G044_MIPI_DSI_PCLK, R9A07G044_CLK_P2,
- 0x568, 3),
+ 0x568, 3, MSTOP(BUS_PERI_VIDEO, GENMASK(6, 5))),
DEF_MOD("dsi_vclk", R9A07G044_MIPI_DSI_VCLK, R9A07G044_CLK_M3,
- 0x568, 4),
+ 0x568, 4, MSTOP(BUS_PERI_VIDEO, GENMASK(6, 5))),
DEF_MOD("dsi_lpclk", R9A07G044_MIPI_DSI_LPCLK, R9A07G044_CLK_M4,
- 0x568, 5),
+ 0x568, 5, MSTOP(BUS_PERI_VIDEO, GENMASK(6, 5))),
DEF_COUPLED("lcdc_a", R9A07G044_LCDC_CLK_A, R9A07G044_CLK_M0,
- 0x56c, 0),
+ 0x56c, 0, MSTOP(BUS_PERI_VIDEO, GENMASK(8, 7))),
DEF_COUPLED("lcdc_p", R9A07G044_LCDC_CLK_P, R9A07G044_CLK_ZT,
- 0x56c, 0),
+ 0x56c, 0, MSTOP(BUS_PERI_VIDEO, GENMASK(8, 7))),
DEF_MOD("lcdc_clk_d", R9A07G044_LCDC_CLK_D, R9A07G044_CLK_M3,
- 0x56c, 1),
+ 0x56c, 1, MSTOP(BUS_PERI_VIDEO, BIT(9))),
DEF_MOD("ssi0_pclk", R9A07G044_SSI0_PCLK2, R9A07G044_CLK_P0,
- 0x570, 0),
+ 0x570, 0, MSTOP(BUS_MCPU1, BIT(10))),
DEF_MOD("ssi0_sfr", R9A07G044_SSI0_PCLK_SFR, R9A07G044_CLK_P0,
- 0x570, 1),
+ 0x570, 1, MSTOP(BUS_MCPU1, BIT(10))),
DEF_MOD("ssi1_pclk", R9A07G044_SSI1_PCLK2, R9A07G044_CLK_P0,
- 0x570, 2),
+ 0x570, 2, MSTOP(BUS_MCPU1, BIT(11))),
DEF_MOD("ssi1_sfr", R9A07G044_SSI1_PCLK_SFR, R9A07G044_CLK_P0,
- 0x570, 3),
+ 0x570, 3, MSTOP(BUS_MCPU1, BIT(11))),
DEF_MOD("ssi2_pclk", R9A07G044_SSI2_PCLK2, R9A07G044_CLK_P0,
- 0x570, 4),
+ 0x570, 4, MSTOP(BUS_MCPU1, BIT(12))),
DEF_MOD("ssi2_sfr", R9A07G044_SSI2_PCLK_SFR, R9A07G044_CLK_P0,
- 0x570, 5),
+ 0x570, 5, MSTOP(BUS_MCPU1, BIT(12))),
DEF_MOD("ssi3_pclk", R9A07G044_SSI3_PCLK2, R9A07G044_CLK_P0,
- 0x570, 6),
+ 0x570, 6, MSTOP(BUS_MCPU1, BIT(13))),
DEF_MOD("ssi3_sfr", R9A07G044_SSI3_PCLK_SFR, R9A07G044_CLK_P0,
- 0x570, 7),
+ 0x570, 7, MSTOP(BUS_MCPU1, BIT(13))),
DEF_MOD("usb0_host", R9A07G044_USB_U2H0_HCLK, R9A07G044_CLK_P1,
- 0x578, 0),
+ 0x578, 0, MSTOP(BUS_PERI_COM, BIT(5))),
DEF_MOD("usb1_host", R9A07G044_USB_U2H1_HCLK, R9A07G044_CLK_P1,
- 0x578, 1),
+ 0x578, 1, MSTOP(BUS_PERI_COM, BIT(7))),
DEF_MOD("usb0_func", R9A07G044_USB_U2P_EXR_CPUCLK, R9A07G044_CLK_P1,
- 0x578, 2),
+ 0x578, 2, MSTOP(BUS_PERI_COM, BIT(6))),
DEF_MOD("usb_pclk", R9A07G044_USB_PCLK, R9A07G044_CLK_P1,
- 0x578, 3),
+ 0x578, 3, MSTOP(BUS_PERI_COM, BIT(4))),
DEF_COUPLED("eth0_axi", R9A07G044_ETH0_CLK_AXI, R9A07G044_CLK_M0,
- 0x57c, 0),
+ 0x57c, 0, MSTOP(BUS_PERI_COM, BIT(2))),
DEF_COUPLED("eth0_chi", R9A07G044_ETH0_CLK_CHI, R9A07G044_CLK_ZT,
- 0x57c, 0),
+ 0x57c, 0, MSTOP(BUS_PERI_COM, BIT(2))),
DEF_COUPLED("eth1_axi", R9A07G044_ETH1_CLK_AXI, R9A07G044_CLK_M0,
- 0x57c, 1),
+ 0x57c, 1, MSTOP(BUS_PERI_COM, BIT(3))),
DEF_COUPLED("eth1_chi", R9A07G044_ETH1_CLK_CHI, R9A07G044_CLK_ZT,
- 0x57c, 1),
+ 0x57c, 1, MSTOP(BUS_PERI_COM, BIT(3))),
DEF_MOD("i2c0", R9A07G044_I2C0_PCLK, R9A07G044_CLK_P0,
- 0x580, 0),
+ 0x580, 0, MSTOP(BUS_MCPU2, BIT(10))),
DEF_MOD("i2c1", R9A07G044_I2C1_PCLK, R9A07G044_CLK_P0,
- 0x580, 1),
+ 0x580, 1, MSTOP(BUS_MCPU2, BIT(11))),
DEF_MOD("i2c2", R9A07G044_I2C2_PCLK, R9A07G044_CLK_P0,
- 0x580, 2),
+ 0x580, 2, MSTOP(BUS_MCPU2, BIT(12))),
DEF_MOD("i2c3", R9A07G044_I2C3_PCLK, R9A07G044_CLK_P0,
- 0x580, 3),
+ 0x580, 3, MSTOP(BUS_MCPU2, BIT(13))),
DEF_MOD("scif0", R9A07G044_SCIF0_CLK_PCK, R9A07G044_CLK_P0,
- 0x584, 0),
+ 0x584, 0, MSTOP(BUS_MCPU2, BIT(1))),
DEF_MOD("scif1", R9A07G044_SCIF1_CLK_PCK, R9A07G044_CLK_P0,
- 0x584, 1),
+ 0x584, 1, MSTOP(BUS_MCPU2, BIT(2))),
DEF_MOD("scif2", R9A07G044_SCIF2_CLK_PCK, R9A07G044_CLK_P0,
- 0x584, 2),
+ 0x584, 2, MSTOP(BUS_MCPU2, BIT(3))),
DEF_MOD("scif3", R9A07G044_SCIF3_CLK_PCK, R9A07G044_CLK_P0,
- 0x584, 3),
+ 0x584, 3, MSTOP(BUS_MCPU2, BIT(4))),
DEF_MOD("scif4", R9A07G044_SCIF4_CLK_PCK, R9A07G044_CLK_P0,
- 0x584, 4),
+ 0x584, 4, MSTOP(BUS_MCPU2, BIT(5))),
DEF_MOD("sci0", R9A07G044_SCI0_CLKP, R9A07G044_CLK_P0,
- 0x588, 0),
+ 0x588, 0, MSTOP(BUS_MCPU2, BIT(7))),
DEF_MOD("sci1", R9A07G044_SCI1_CLKP, R9A07G044_CLK_P0,
- 0x588, 1),
+ 0x588, 1, MSTOP(BUS_MCPU2, BIT(8))),
DEF_MOD("rspi0", R9A07G044_RSPI0_CLKB, R9A07G044_CLK_P0,
- 0x590, 0),
+ 0x590, 0, MSTOP(BUS_MCPU1, BIT(14))),
DEF_MOD("rspi1", R9A07G044_RSPI1_CLKB, R9A07G044_CLK_P0,
- 0x590, 1),
+ 0x590, 1, MSTOP(BUS_MCPU1, BIT(15))),
DEF_MOD("rspi2", R9A07G044_RSPI2_CLKB, R9A07G044_CLK_P0,
- 0x590, 2),
+ 0x590, 2, MSTOP(BUS_MCPU2, BIT(0))),
DEF_MOD("canfd", R9A07G044_CANFD_PCLK, R9A07G044_CLK_P0,
- 0x594, 0),
+ 0x594, 0, MSTOP(BUS_MCPU2, BIT(9))),
DEF_MOD("gpio", R9A07G044_GPIO_HCLK, R9A07G044_OSCCLK,
- 0x598, 0),
+ 0x598, 0, MSTOP(BUS_PERI_CPU, BIT(6))),
DEF_MOD("adc_adclk", R9A07G044_ADC_ADCLK, R9A07G044_CLK_TSU,
- 0x5a8, 0),
+ 0x5a8, 0, MSTOP(BUS_MCPU2, BIT(14))),
DEF_MOD("adc_pclk", R9A07G044_ADC_PCLK, R9A07G044_CLK_P0,
- 0x5a8, 1),
+ 0x5a8, 1, MSTOP(BUS_MCPU2, BIT(14))),
DEF_MOD("tsu_pclk", R9A07G044_TSU_PCLK, R9A07G044_CLK_TSU,
- 0x5ac, 0),
+ 0x5ac, 0, MSTOP(BUS_MCPU2, BIT(15))),
},
#ifdef CONFIG_CLK_R9A07G054
.drp = {
DEF_MOD("stpai_initclk", R9A07G054_STPAI_INITCLK, R9A07G044_OSCCLK,
- 0x5e8, 0),
+ 0x5e8, 0, 0),
DEF_MOD("stpai_aclk", R9A07G054_STPAI_ACLK, R9A07G044_CLK_P1,
- 0x5e8, 1),
+ 0x5e8, 1, 0),
DEF_MOD("stpai_mclk", R9A07G054_STPAI_MCLK, R9A07G054_CLK_DRP_M,
- 0x5e8, 2),
+ 0x5e8, 2, 0),
DEF_MOD("stpai_dclkin", R9A07G054_STPAI_DCLKIN, R9A07G054_CLK_DRP_D,
- 0x5e8, 3),
+ 0x5e8, 3, 0),
DEF_MOD("stpai_aclk_drp", R9A07G054_STPAI_ACLK_DRP, R9A07G054_CLK_DRP_A,
- 0x5e8, 4),
+ 0x5e8, 4, 0),
},
#endif
};
diff --git a/drivers/clk/renesas/r9a08g045-cpg.c b/drivers/clk/renesas/r9a08g045-cpg.c
index 4035f3443598..79e7b19c7882 100644
--- a/drivers/clk/renesas/r9a08g045-cpg.c
+++ b/drivers/clk/renesas/r9a08g045-cpg.c
@@ -183,6 +183,7 @@ static const struct cpg_core_clk r9a08g045_core_clks[] __initconst = {
DEF_G3S_DIV("P3", R9A08G045_CLK_P3, CLK_PLL3_DIV2_4, DIVPL3C, G3S_DIVPL3C_STS,
dtable_1_32, 0, 0, 0, NULL),
DEF_FIXED("P3_DIV2", CLK_P3_DIV2, R9A08G045_CLK_P3, 1, 2),
+ DEF_FIXED("P5", R9A08G045_CLK_P5, CLK_PLL2_DIV2, 1, 4),
DEF_FIXED("ZT", R9A08G045_CLK_ZT, CLK_PLL3_DIV2_8, 1, 1),
DEF_FIXED("S0", R9A08G045_CLK_S0, CLK_SEL_PLL4, 1, 2),
DEF_FIXED("OSC", R9A08G045_OSCCLK, CLK_EXTAL, 1, 1),
@@ -192,58 +193,116 @@ static const struct cpg_core_clk r9a08g045_core_clks[] __initconst = {
};
static const struct rzg2l_mod_clk r9a08g045_mod_clks[] = {
- DEF_MOD("gic_gicclk", R9A08G045_GIC600_GICCLK, R9A08G045_CLK_P1, 0x514, 0),
- DEF_MOD("ia55_pclk", R9A08G045_IA55_PCLK, R9A08G045_CLK_P2, 0x518, 0),
- DEF_MOD("ia55_clk", R9A08G045_IA55_CLK, R9A08G045_CLK_P1, 0x518, 1),
- DEF_MOD("dmac_aclk", R9A08G045_DMAC_ACLK, R9A08G045_CLK_P3, 0x52c, 0),
- DEF_MOD("dmac_pclk", R9A08G045_DMAC_PCLK, CLK_P3_DIV2, 0x52c, 1),
- DEF_MOD("wdt0_pclk", R9A08G045_WDT0_PCLK, R9A08G045_CLK_P0, 0x548, 0),
- DEF_MOD("wdt0_clk", R9A08G045_WDT0_CLK, R9A08G045_OSCCLK, 0x548, 1),
- DEF_MOD("sdhi0_imclk", R9A08G045_SDHI0_IMCLK, CLK_SD0_DIV4, 0x554, 0),
- DEF_MOD("sdhi0_imclk2", R9A08G045_SDHI0_IMCLK2, CLK_SD0_DIV4, 0x554, 1),
- DEF_MOD("sdhi0_clk_hs", R9A08G045_SDHI0_CLK_HS, R9A08G045_CLK_SD0, 0x554, 2),
- DEF_MOD("sdhi0_aclk", R9A08G045_SDHI0_ACLK, R9A08G045_CLK_P1, 0x554, 3),
- DEF_MOD("sdhi1_imclk", R9A08G045_SDHI1_IMCLK, CLK_SD1_DIV4, 0x554, 4),
- DEF_MOD("sdhi1_imclk2", R9A08G045_SDHI1_IMCLK2, CLK_SD1_DIV4, 0x554, 5),
- DEF_MOD("sdhi1_clk_hs", R9A08G045_SDHI1_CLK_HS, R9A08G045_CLK_SD1, 0x554, 6),
- DEF_MOD("sdhi1_aclk", R9A08G045_SDHI1_ACLK, R9A08G045_CLK_P1, 0x554, 7),
- DEF_MOD("sdhi2_imclk", R9A08G045_SDHI2_IMCLK, CLK_SD2_DIV4, 0x554, 8),
- DEF_MOD("sdhi2_imclk2", R9A08G045_SDHI2_IMCLK2, CLK_SD2_DIV4, 0x554, 9),
- DEF_MOD("sdhi2_clk_hs", R9A08G045_SDHI2_CLK_HS, R9A08G045_CLK_SD2, 0x554, 10),
- DEF_MOD("sdhi2_aclk", R9A08G045_SDHI2_ACLK, R9A08G045_CLK_P1, 0x554, 11),
- DEF_MOD("ssi0_pclk2", R9A08G045_SSI0_PCLK2, R9A08G045_CLK_P0, 0x570, 0),
- DEF_MOD("ssi0_sfr", R9A08G045_SSI0_PCLK_SFR, R9A08G045_CLK_P0, 0x570, 1),
- DEF_MOD("ssi1_pclk2", R9A08G045_SSI1_PCLK2, R9A08G045_CLK_P0, 0x570, 2),
- DEF_MOD("ssi1_sfr", R9A08G045_SSI1_PCLK_SFR, R9A08G045_CLK_P0, 0x570, 3),
- DEF_MOD("ssi2_pclk2", R9A08G045_SSI2_PCLK2, R9A08G045_CLK_P0, 0x570, 4),
- DEF_MOD("ssi2_sfr", R9A08G045_SSI2_PCLK_SFR, R9A08G045_CLK_P0, 0x570, 5),
- DEF_MOD("ssi3_pclk2", R9A08G045_SSI3_PCLK2, R9A08G045_CLK_P0, 0x570, 6),
- DEF_MOD("ssi3_sfr", R9A08G045_SSI3_PCLK_SFR, R9A08G045_CLK_P0, 0x570, 7),
- DEF_MOD("usb0_host", R9A08G045_USB_U2H0_HCLK, R9A08G045_CLK_P1, 0x578, 0),
- DEF_MOD("usb1_host", R9A08G045_USB_U2H1_HCLK, R9A08G045_CLK_P1, 0x578, 1),
- DEF_MOD("usb0_func", R9A08G045_USB_U2P_EXR_CPUCLK, R9A08G045_CLK_P1, 0x578, 2),
- DEF_MOD("usb_pclk", R9A08G045_USB_PCLK, R9A08G045_CLK_P1, 0x578, 3),
- DEF_COUPLED("eth0_axi", R9A08G045_ETH0_CLK_AXI, R9A08G045_CLK_M0, 0x57c, 0),
- DEF_COUPLED("eth0_chi", R9A08G045_ETH0_CLK_CHI, R9A08G045_CLK_ZT, 0x57c, 0),
- DEF_MOD("eth0_refclk", R9A08G045_ETH0_REFCLK, R9A08G045_CLK_HP, 0x57c, 8),
- DEF_COUPLED("eth1_axi", R9A08G045_ETH1_CLK_AXI, R9A08G045_CLK_M0, 0x57c, 1),
- DEF_COUPLED("eth1_chi", R9A08G045_ETH1_CLK_CHI, R9A08G045_CLK_ZT, 0x57c, 1),
- DEF_MOD("eth1_refclk", R9A08G045_ETH1_REFCLK, R9A08G045_CLK_HP, 0x57c, 9),
- DEF_MOD("i2c0_pclk", R9A08G045_I2C0_PCLK, R9A08G045_CLK_P0, 0x580, 0),
- DEF_MOD("i2c1_pclk", R9A08G045_I2C1_PCLK, R9A08G045_CLK_P0, 0x580, 1),
- DEF_MOD("i2c2_pclk", R9A08G045_I2C2_PCLK, R9A08G045_CLK_P0, 0x580, 2),
- DEF_MOD("i2c3_pclk", R9A08G045_I2C3_PCLK, R9A08G045_CLK_P0, 0x580, 3),
- DEF_MOD("scif0_clk_pck", R9A08G045_SCIF0_CLK_PCK, R9A08G045_CLK_P0, 0x584, 0),
- DEF_MOD("scif1_clk_pck", R9A08G045_SCIF1_CLK_PCK, R9A08G045_CLK_P0, 0x584, 1),
- DEF_MOD("scif2_clk_pck", R9A08G045_SCIF2_CLK_PCK, R9A08G045_CLK_P0, 0x584, 2),
- DEF_MOD("scif3_clk_pck", R9A08G045_SCIF3_CLK_PCK, R9A08G045_CLK_P0, 0x584, 3),
- DEF_MOD("scif4_clk_pck", R9A08G045_SCIF4_CLK_PCK, R9A08G045_CLK_P0, 0x584, 4),
- DEF_MOD("scif5_clk_pck", R9A08G045_SCIF5_CLK_PCK, R9A08G045_CLK_P0, 0x584, 5),
- DEF_MOD("gpio_hclk", R9A08G045_GPIO_HCLK, R9A08G045_OSCCLK, 0x598, 0),
- DEF_MOD("adc_adclk", R9A08G045_ADC_ADCLK, R9A08G045_CLK_TSU, 0x5a8, 0),
- DEF_MOD("adc_pclk", R9A08G045_ADC_PCLK, R9A08G045_CLK_TSU, 0x5a8, 1),
- DEF_MOD("tsu_pclk", R9A08G045_TSU_PCLK, R9A08G045_CLK_TSU, 0x5ac, 0),
- DEF_MOD("vbat_bclk", R9A08G045_VBAT_BCLK, R9A08G045_OSCCLK, 0x614, 0),
+ DEF_MOD("gic_gicclk", R9A08G045_GIC600_GICCLK, R9A08G045_CLK_P1, 0x514, 0,
+ MSTOP(BUS_ACPU, BIT(3))),
+ DEF_MOD("ia55_pclk", R9A08G045_IA55_PCLK, R9A08G045_CLK_P2, 0x518, 0,
+ MSTOP(BUS_PERI_CPU, BIT(13))),
+ DEF_MOD("ia55_clk", R9A08G045_IA55_CLK, R9A08G045_CLK_P1, 0x518, 1,
+ MSTOP(BUS_PERI_CPU, BIT(13))),
+ DEF_MOD("dmac_aclk", R9A08G045_DMAC_ACLK, R9A08G045_CLK_P3, 0x52c, 0,
+ MSTOP(BUS_REG1, BIT(2))),
+ DEF_MOD("dmac_pclk", R9A08G045_DMAC_PCLK, CLK_P3_DIV2, 0x52c, 1,
+ MSTOP(BUS_REG1, BIT(3))),
+ DEF_MOD("wdt0_pclk", R9A08G045_WDT0_PCLK, R9A08G045_CLK_P0, 0x548, 0,
+ MSTOP(BUS_REG0, BIT(0))),
+ DEF_MOD("wdt0_clk", R9A08G045_WDT0_CLK, R9A08G045_OSCCLK, 0x548, 1,
+ MSTOP(BUS_REG0, BIT(0))),
+ DEF_MOD("sdhi0_imclk", R9A08G045_SDHI0_IMCLK, CLK_SD0_DIV4, 0x554, 0,
+ MSTOP(BUS_PERI_COM, BIT(0))),
+ DEF_MOD("sdhi0_imclk2", R9A08G045_SDHI0_IMCLK2, CLK_SD0_DIV4, 0x554, 1,
+ MSTOP(BUS_PERI_COM, BIT(0))),
+ DEF_MOD("sdhi0_clk_hs", R9A08G045_SDHI0_CLK_HS, R9A08G045_CLK_SD0, 0x554, 2,
+ MSTOP(BUS_PERI_COM, BIT(0))),
+ DEF_MOD("sdhi0_aclk", R9A08G045_SDHI0_ACLK, R9A08G045_CLK_P1, 0x554, 3,
+ MSTOP(BUS_PERI_COM, BIT(0))),
+ DEF_MOD("sdhi1_imclk", R9A08G045_SDHI1_IMCLK, CLK_SD1_DIV4, 0x554, 4,
+ MSTOP(BUS_PERI_COM, BIT(1))),
+ DEF_MOD("sdhi1_imclk2", R9A08G045_SDHI1_IMCLK2, CLK_SD1_DIV4, 0x554, 5,
+ MSTOP(BUS_PERI_COM, BIT(1))),
+ DEF_MOD("sdhi1_clk_hs", R9A08G045_SDHI1_CLK_HS, R9A08G045_CLK_SD1, 0x554, 6,
+ MSTOP(BUS_PERI_COM, BIT(1))),
+ DEF_MOD("sdhi1_aclk", R9A08G045_SDHI1_ACLK, R9A08G045_CLK_P1, 0x554, 7,
+ MSTOP(BUS_PERI_COM, BIT(1))),
+ DEF_MOD("sdhi2_imclk", R9A08G045_SDHI2_IMCLK, CLK_SD2_DIV4, 0x554, 8,
+ MSTOP(BUS_PERI_COM, BIT(11))),
+ DEF_MOD("sdhi2_imclk2", R9A08G045_SDHI2_IMCLK2, CLK_SD2_DIV4, 0x554, 9,
+ MSTOP(BUS_PERI_COM, BIT(11))),
+ DEF_MOD("sdhi2_clk_hs", R9A08G045_SDHI2_CLK_HS, R9A08G045_CLK_SD2, 0x554, 10,
+ MSTOP(BUS_PERI_COM, BIT(11))),
+ DEF_MOD("sdhi2_aclk", R9A08G045_SDHI2_ACLK, R9A08G045_CLK_P1, 0x554, 11,
+ MSTOP(BUS_PERI_COM, BIT(11))),
+ DEF_MOD("ssi0_pclk2", R9A08G045_SSI0_PCLK2, R9A08G045_CLK_P0, 0x570, 0,
+ MSTOP(BUS_MCPU1, BIT(10))),
+ DEF_MOD("ssi0_sfr", R9A08G045_SSI0_PCLK_SFR, R9A08G045_CLK_P0, 0x570, 1,
+ MSTOP(BUS_MCPU1, BIT(10))),
+ DEF_MOD("ssi1_pclk2", R9A08G045_SSI1_PCLK2, R9A08G045_CLK_P0, 0x570, 2,
+ MSTOP(BUS_MCPU1, BIT(11))),
+ DEF_MOD("ssi1_sfr", R9A08G045_SSI1_PCLK_SFR, R9A08G045_CLK_P0, 0x570, 3,
+ MSTOP(BUS_MCPU1, BIT(11))),
+ DEF_MOD("ssi2_pclk2", R9A08G045_SSI2_PCLK2, R9A08G045_CLK_P0, 0x570, 4,
+ MSTOP(BUS_MCPU1, BIT(12))),
+ DEF_MOD("ssi2_sfr", R9A08G045_SSI2_PCLK_SFR, R9A08G045_CLK_P0, 0x570, 5,
+ MSTOP(BUS_MCPU1, BIT(12))),
+ DEF_MOD("ssi3_pclk2", R9A08G045_SSI3_PCLK2, R9A08G045_CLK_P0, 0x570, 6,
+ MSTOP(BUS_MCPU1, BIT(13))),
+ DEF_MOD("ssi3_sfr", R9A08G045_SSI3_PCLK_SFR, R9A08G045_CLK_P0, 0x570, 7,
+ MSTOP(BUS_MCPU1, BIT(13))),
+ DEF_MOD("usb0_host", R9A08G045_USB_U2H0_HCLK, R9A08G045_CLK_P1, 0x578, 0,
+ MSTOP(BUS_PERI_COM, BIT(5))),
+ DEF_MOD("usb1_host", R9A08G045_USB_U2H1_HCLK, R9A08G045_CLK_P1, 0x578, 1,
+ MSTOP(BUS_PERI_COM, BIT(7))),
+ DEF_MOD("usb0_func", R9A08G045_USB_U2P_EXR_CPUCLK, R9A08G045_CLK_P1, 0x578, 2,
+ MSTOP(BUS_PERI_COM, BIT(6))),
+ DEF_MOD("usb_pclk", R9A08G045_USB_PCLK, R9A08G045_CLK_P1, 0x578, 3,
+ MSTOP(BUS_PERI_COM, BIT(4))),
+ DEF_COUPLED("eth0_axi", R9A08G045_ETH0_CLK_AXI, R9A08G045_CLK_M0, 0x57c, 0,
+ MSTOP(BUS_PERI_COM, BIT(2))),
+ DEF_COUPLED("eth0_chi", R9A08G045_ETH0_CLK_CHI, R9A08G045_CLK_ZT, 0x57c, 0,
+ MSTOP(BUS_PERI_COM, BIT(2))),
+ DEF_MOD("eth0_refclk", R9A08G045_ETH0_REFCLK, R9A08G045_CLK_HP, 0x57c, 8, 0),
+ DEF_COUPLED("eth1_axi", R9A08G045_ETH1_CLK_AXI, R9A08G045_CLK_M0, 0x57c, 1,
+ MSTOP(BUS_PERI_COM, BIT(3))),
+ DEF_COUPLED("eth1_chi", R9A08G045_ETH1_CLK_CHI, R9A08G045_CLK_ZT, 0x57c, 1,
+ MSTOP(BUS_PERI_COM, BIT(3))),
+ DEF_MOD("eth1_refclk", R9A08G045_ETH1_REFCLK, R9A08G045_CLK_HP, 0x57c, 9, 0),
+ DEF_MOD("i2c0_pclk", R9A08G045_I2C0_PCLK, R9A08G045_CLK_P0, 0x580, 0,
+ MSTOP(BUS_MCPU2, BIT(10))),
+ DEF_MOD("i2c1_pclk", R9A08G045_I2C1_PCLK, R9A08G045_CLK_P0, 0x580, 1,
+ MSTOP(BUS_MCPU2, BIT(11))),
+ DEF_MOD("i2c2_pclk", R9A08G045_I2C2_PCLK, R9A08G045_CLK_P0, 0x580, 2,
+ MSTOP(BUS_MCPU2, BIT(12))),
+ DEF_MOD("i2c3_pclk", R9A08G045_I2C3_PCLK, R9A08G045_CLK_P0, 0x580, 3,
+ MSTOP(BUS_MCPU2, BIT(13))),
+ DEF_MOD("scif0_clk_pck", R9A08G045_SCIF0_CLK_PCK, R9A08G045_CLK_P0, 0x584, 0,
+ MSTOP(BUS_MCPU2, BIT(1))),
+ DEF_MOD("scif1_clk_pck", R9A08G045_SCIF1_CLK_PCK, R9A08G045_CLK_P0, 0x584, 1,
+ MSTOP(BUS_MCPU2, BIT(2))),
+ DEF_MOD("scif2_clk_pck", R9A08G045_SCIF2_CLK_PCK, R9A08G045_CLK_P0, 0x584, 2,
+ MSTOP(BUS_MCPU2, BIT(3))),
+ DEF_MOD("scif3_clk_pck", R9A08G045_SCIF3_CLK_PCK, R9A08G045_CLK_P0, 0x584, 3,
+ MSTOP(BUS_MCPU2, BIT(4))),
+ DEF_MOD("scif4_clk_pck", R9A08G045_SCIF4_CLK_PCK, R9A08G045_CLK_P0, 0x584, 4,
+ MSTOP(BUS_MCPU2, BIT(5))),
+ DEF_MOD("scif5_clk_pck", R9A08G045_SCIF5_CLK_PCK, R9A08G045_CLK_P0, 0x584, 5,
+ MSTOP(BUS_MCPU3, BIT(4))),
+ DEF_MOD("gpio_hclk", R9A08G045_GPIO_HCLK, R9A08G045_OSCCLK, 0x598, 0,
+ MSTOP(BUS_PERI_CPU, BIT(6))),
+ DEF_MOD("adc_adclk", R9A08G045_ADC_ADCLK, R9A08G045_CLK_TSU, 0x5a8, 0,
+ MSTOP(BUS_MCPU2, BIT(14))),
+ DEF_MOD("adc_pclk", R9A08G045_ADC_PCLK, R9A08G045_CLK_TSU, 0x5a8, 1,
+ MSTOP(BUS_MCPU2, BIT(14))),
+ DEF_MOD("tsu_pclk", R9A08G045_TSU_PCLK, R9A08G045_CLK_TSU, 0x5ac, 0,
+ MSTOP(BUS_MCPU2, BIT(15))),
+ DEF_MOD("pci_aclk", R9A08G045_PCI_ACLK, R9A08G045_CLK_M0, 0x608, 0,
+ MSTOP(BUS_PERI_COM, BIT(10))),
+ DEF_MOD("pci_clkl1pm", R9A08G045_PCI_CLKL1PM, R9A08G045_CLK_ZT, 0x608, 1,
+ MSTOP(BUS_PERI_COM, BIT(10))),
+ DEF_MOD("i3c_pclk", R9A08G045_I3C_PCLK, R9A08G045_CLK_TSU, 0x610, 0,
+ MSTOP(BUS_MCPU3, BIT(10))),
+ DEF_MOD("i3c_tclk", R9A08G045_I3C_TCLK, R9A08G045_CLK_P5, 0x610, 1,
+ MSTOP(BUS_MCPU3, BIT(10))),
+ DEF_MOD("vbat_bclk", R9A08G045_VBAT_BCLK, R9A08G045_OSCCLK, 0x614, 0,
+ MSTOP(BUS_MCPU3, GENMASK(8, 7))),
};
static const struct rzg2l_reset r9a08g045_resets[] = {
@@ -282,6 +341,15 @@ static const struct rzg2l_reset r9a08g045_resets[] = {
DEF_RST(R9A08G045_ADC_PRESETN, 0x8a8, 0),
DEF_RST(R9A08G045_ADC_ADRST_N, 0x8a8, 1),
DEF_RST(R9A08G045_TSU_PRESETN, 0x8ac, 0),
+ DEF_RST(R9A08G045_PCI_ARESETN, 0x908, 0),
+ DEF_RST(R9A08G045_PCI_RST_B, 0x908, 1),
+ DEF_RST(R9A08G045_PCI_RST_GP_B, 0x908, 2),
+ DEF_RST(R9A08G045_PCI_RST_PS_B, 0x908, 3),
+ DEF_RST(R9A08G045_PCI_RST_RSM_B, 0x908, 4),
+ DEF_RST(R9A08G045_PCI_RST_CFG_B, 0x908, 5),
+ DEF_RST(R9A08G045_PCI_RST_LOAD_B, 0x908, 6),
+ DEF_RST(R9A08G045_I3C_TRESETN, 0x910, 0),
+ DEF_RST(R9A08G045_I3C_PRESETN, 0x910, 1),
DEF_RST(R9A08G045_VBAT_BRESETN, 0x914, 0),
};
@@ -293,76 +361,8 @@ static const unsigned int r9a08g045_crit_mod_clks[] __initconst = {
MOD_CLK_BASE + R9A08G045_VBAT_BCLK,
};
-static const struct rzg2l_cpg_pm_domain_init_data r9a08g045_pm_domains[] = {
- /* Keep always-on domain on the first position for proper domains registration. */
- DEF_PD("always-on", R9A08G045_PD_ALWAYS_ON,
- DEF_REG_CONF(0, 0),
- GENPD_FLAG_ALWAYS_ON | GENPD_FLAG_IRQ_SAFE),
- DEF_PD("gic", R9A08G045_PD_GIC,
- DEF_REG_CONF(CPG_BUS_ACPU_MSTOP, BIT(3)),
- GENPD_FLAG_ALWAYS_ON),
- DEF_PD("ia55", R9A08G045_PD_IA55,
- DEF_REG_CONF(CPG_BUS_PERI_CPU_MSTOP, BIT(13)),
- GENPD_FLAG_ALWAYS_ON),
- DEF_PD("dmac", R9A08G045_PD_DMAC,
- DEF_REG_CONF(CPG_BUS_REG1_MSTOP, GENMASK(3, 0)),
- GENPD_FLAG_ALWAYS_ON),
- DEF_PD("wdt0", R9A08G045_PD_WDT0,
- DEF_REG_CONF(CPG_BUS_REG0_MSTOP, BIT(0)),
- GENPD_FLAG_IRQ_SAFE),
- DEF_PD("sdhi0", R9A08G045_PD_SDHI0,
- DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(0)), 0),
- DEF_PD("sdhi1", R9A08G045_PD_SDHI1,
- DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(1)), 0),
- DEF_PD("sdhi2", R9A08G045_PD_SDHI2,
- DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(11)), 0),
- DEF_PD("ssi0", R9A08G045_PD_SSI0,
- DEF_REG_CONF(CPG_BUS_MCPU1_MSTOP, BIT(10)), 0),
- DEF_PD("ssi1", R9A08G045_PD_SSI1,
- DEF_REG_CONF(CPG_BUS_MCPU1_MSTOP, BIT(11)), 0),
- DEF_PD("ssi2", R9A08G045_PD_SSI2,
- DEF_REG_CONF(CPG_BUS_MCPU1_MSTOP, BIT(12)), 0),
- DEF_PD("ssi3", R9A08G045_PD_SSI3,
- DEF_REG_CONF(CPG_BUS_MCPU1_MSTOP, BIT(13)), 0),
- DEF_PD("usb0", R9A08G045_PD_USB0,
- DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, GENMASK(6, 5)), 0),
- DEF_PD("usb1", R9A08G045_PD_USB1,
- DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(7)), 0),
- DEF_PD("usb-phy", R9A08G045_PD_USB_PHY,
- DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(4)), 0),
- DEF_PD("eth0", R9A08G045_PD_ETHER0,
- DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(2)), 0),
- DEF_PD("eth1", R9A08G045_PD_ETHER1,
- DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(3)), 0),
- DEF_PD("i2c0", R9A08G045_PD_I2C0,
- DEF_REG_CONF(CPG_BUS_MCPU2_MSTOP, BIT(10)), 0),
- DEF_PD("i2c1", R9A08G045_PD_I2C1,
- DEF_REG_CONF(CPG_BUS_MCPU2_MSTOP, BIT(11)), 0),
- DEF_PD("i2c2", R9A08G045_PD_I2C2,
- DEF_REG_CONF(CPG_BUS_MCPU2_MSTOP, BIT(12)), 0),
- DEF_PD("i2c3", R9A08G045_PD_I2C3,
- DEF_REG_CONF(CPG_BUS_MCPU2_MSTOP, BIT(13)), 0),
- DEF_PD("scif0", R9A08G045_PD_SCIF0,
- DEF_REG_CONF(CPG_BUS_MCPU2_MSTOP, BIT(1)), 0),
- DEF_PD("scif1", R9A08G045_PD_SCIF1,
- DEF_REG_CONF(CPG_BUS_MCPU2_MSTOP, BIT(2)), 0),
- DEF_PD("scif2", R9A08G045_PD_SCIF2,
- DEF_REG_CONF(CPG_BUS_MCPU2_MSTOP, BIT(3)), 0),
- DEF_PD("scif3", R9A08G045_PD_SCIF3,
- DEF_REG_CONF(CPG_BUS_MCPU2_MSTOP, BIT(4)), 0),
- DEF_PD("scif4", R9A08G045_PD_SCIF4,
- DEF_REG_CONF(CPG_BUS_MCPU2_MSTOP, BIT(5)), 0),
- DEF_PD("scif5", R9A08G045_PD_SCIF5,
- DEF_REG_CONF(CPG_BUS_MCPU3_MSTOP, BIT(4)), 0),
- DEF_PD("adc", R9A08G045_PD_ADC,
- DEF_REG_CONF(CPG_BUS_MCPU2_MSTOP, BIT(14)), 0),
- DEF_PD("tsu", R9A08G045_PD_TSU,
- DEF_REG_CONF(CPG_BUS_MCPU2_MSTOP, BIT(15)), 0),
- DEF_PD("vbat", R9A08G045_PD_VBAT,
- DEF_REG_CONF(CPG_BUS_MCPU3_MSTOP, BIT(8)),
- GENPD_FLAG_ALWAYS_ON),
- DEF_PD("rtc", R9A08G045_PD_RTC,
- DEF_REG_CONF(CPG_BUS_MCPU3_MSTOP, BIT(7)), 0),
+static const unsigned int r9a08g045_no_pm_mod_clks[] = {
+ MOD_CLK_BASE + R9A08G045_PCI_CLKL1PM,
};
const struct rzg2l_cpg_info r9a08g045_cpg_info = {
@@ -381,13 +381,13 @@ const struct rzg2l_cpg_info r9a08g045_cpg_info = {
.num_mod_clks = ARRAY_SIZE(r9a08g045_mod_clks),
.num_hw_mod_clks = R9A08G045_VBAT_BCLK + 1,
+ /* No PM modules Clocks */
+ .no_pm_mod_clks = r9a08g045_no_pm_mod_clks,
+ .num_no_pm_mod_clks = ARRAY_SIZE(r9a08g045_no_pm_mod_clks),
+
/* Resets */
.resets = r9a08g045_resets,
.num_resets = R9A08G045_VBAT_BRESETN + 1, /* Last reset ID + 1 */
- /* Power domains */
- .pm_domains = r9a08g045_pm_domains,
- .num_pm_domains = ARRAY_SIZE(r9a08g045_pm_domains),
-
.has_clk_mon_regs = true,
};
diff --git a/drivers/clk/renesas/r9a09g011-cpg.c b/drivers/clk/renesas/r9a09g011-cpg.c
index 22272279b104..ba25429c244d 100644
--- a/drivers/clk/renesas/r9a09g011-cpg.c
+++ b/drivers/clk/renesas/r9a09g011-cpg.c
@@ -151,64 +151,64 @@ static const struct cpg_core_clk r9a09g011_core_clks[] __initconst = {
};
static const struct rzg2l_mod_clk r9a09g011_mod_clks[] __initconst = {
- DEF_MOD("pfc", R9A09G011_PFC_PCLK, CLK_MAIN, 0x400, 2),
- DEF_MOD("gic", R9A09G011_GIC_CLK, CLK_SEL_B_D2, 0x400, 5),
- DEF_MOD("sdi0_aclk", R9A09G011_SDI0_ACLK, CLK_SEL_D, 0x408, 0),
- DEF_MOD("sdi0_imclk", R9A09G011_SDI0_IMCLK, CLK_SEL_SDI, 0x408, 1),
- DEF_MOD("sdi0_imclk2", R9A09G011_SDI0_IMCLK2, CLK_SEL_SDI, 0x408, 2),
- DEF_MOD("sdi0_clk_hs", R9A09G011_SDI0_CLK_HS, CLK_PLL2_800, 0x408, 3),
- DEF_MOD("sdi1_aclk", R9A09G011_SDI1_ACLK, CLK_SEL_D, 0x408, 4),
- DEF_MOD("sdi1_imclk", R9A09G011_SDI1_IMCLK, CLK_SEL_SDI, 0x408, 5),
- DEF_MOD("sdi1_imclk2", R9A09G011_SDI1_IMCLK2, CLK_SEL_SDI, 0x408, 6),
- DEF_MOD("sdi1_clk_hs", R9A09G011_SDI1_CLK_HS, CLK_PLL2_800, 0x408, 7),
- DEF_MOD("emm_aclk", R9A09G011_EMM_ACLK, CLK_SEL_D, 0x408, 8),
- DEF_MOD("emm_imclk", R9A09G011_EMM_IMCLK, CLK_SEL_SDI, 0x408, 9),
- DEF_MOD("emm_imclk2", R9A09G011_EMM_IMCLK2, CLK_SEL_SDI, 0x408, 10),
- DEF_MOD("emm_clk_hs", R9A09G011_EMM_CLK_HS, CLK_PLL2_800, 0x408, 11),
- DEF_COUPLED("eth_axi", R9A09G011_ETH0_CLK_AXI, CLK_PLL2_200, 0x40c, 8),
- DEF_COUPLED("eth_chi", R9A09G011_ETH0_CLK_CHI, CLK_PLL2_100, 0x40c, 8),
- DEF_MOD("eth_clk_gptp", R9A09G011_ETH0_GPTP_EXT, CLK_PLL2_100, 0x40c, 9),
- DEF_MOD("usb_aclk_h", R9A09G011_USB_ACLK_H, CLK_SEL_D, 0x40c, 4),
- DEF_MOD("usb_aclk_p", R9A09G011_USB_ACLK_P, CLK_SEL_D, 0x40c, 5),
- DEF_MOD("usb_pclk", R9A09G011_USB_PCLK, CLK_SEL_E, 0x40c, 6),
- DEF_MOD("syc_cnt_clk", R9A09G011_SYC_CNT_CLK, CLK_MAIN_24, 0x41c, 12),
- DEF_MOD("iic_pclk0", R9A09G011_IIC_PCLK0, CLK_SEL_E, 0x420, 12),
- DEF_MOD("cperi_grpb", R9A09G011_CPERI_GRPB_PCLK, CLK_SEL_E, 0x424, 0),
- DEF_MOD("tim_clk_8", R9A09G011_TIM8_CLK, CLK_MAIN_2, 0x424, 4),
- DEF_MOD("tim_clk_9", R9A09G011_TIM9_CLK, CLK_MAIN_2, 0x424, 5),
- DEF_MOD("tim_clk_10", R9A09G011_TIM10_CLK, CLK_MAIN_2, 0x424, 6),
- DEF_MOD("tim_clk_11", R9A09G011_TIM11_CLK, CLK_MAIN_2, 0x424, 7),
- DEF_MOD("tim_clk_12", R9A09G011_TIM12_CLK, CLK_MAIN_2, 0x424, 8),
- DEF_MOD("tim_clk_13", R9A09G011_TIM13_CLK, CLK_MAIN_2, 0x424, 9),
- DEF_MOD("tim_clk_14", R9A09G011_TIM14_CLK, CLK_MAIN_2, 0x424, 10),
- DEF_MOD("tim_clk_15", R9A09G011_TIM15_CLK, CLK_MAIN_2, 0x424, 11),
- DEF_MOD("iic_pclk1", R9A09G011_IIC_PCLK1, CLK_SEL_E, 0x424, 12),
- DEF_MOD("cperi_grpc", R9A09G011_CPERI_GRPC_PCLK, CLK_SEL_E, 0x428, 0),
- DEF_MOD("tim_clk_16", R9A09G011_TIM16_CLK, CLK_MAIN_2, 0x428, 4),
- DEF_MOD("tim_clk_17", R9A09G011_TIM17_CLK, CLK_MAIN_2, 0x428, 5),
- DEF_MOD("tim_clk_18", R9A09G011_TIM18_CLK, CLK_MAIN_2, 0x428, 6),
- DEF_MOD("tim_clk_19", R9A09G011_TIM19_CLK, CLK_MAIN_2, 0x428, 7),
- DEF_MOD("tim_clk_20", R9A09G011_TIM20_CLK, CLK_MAIN_2, 0x428, 8),
- DEF_MOD("tim_clk_21", R9A09G011_TIM21_CLK, CLK_MAIN_2, 0x428, 9),
- DEF_MOD("tim_clk_22", R9A09G011_TIM22_CLK, CLK_MAIN_2, 0x428, 10),
- DEF_MOD("tim_clk_23", R9A09G011_TIM23_CLK, CLK_MAIN_2, 0x428, 11),
- DEF_MOD("wdt0_pclk", R9A09G011_WDT0_PCLK, CLK_SEL_E, 0x428, 12),
- DEF_MOD("wdt0_clk", R9A09G011_WDT0_CLK, CLK_MAIN, 0x428, 13),
- DEF_MOD("cperi_grpf", R9A09G011_CPERI_GRPF_PCLK, CLK_SEL_E, 0x434, 0),
- DEF_MOD("pwm8_clk", R9A09G011_PWM8_CLK, CLK_MAIN, 0x434, 4),
- DEF_MOD("pwm9_clk", R9A09G011_PWM9_CLK, CLK_MAIN, 0x434, 5),
- DEF_MOD("pwm10_clk", R9A09G011_PWM10_CLK, CLK_MAIN, 0x434, 6),
- DEF_MOD("pwm11_clk", R9A09G011_PWM11_CLK, CLK_MAIN, 0x434, 7),
- DEF_MOD("pwm12_clk", R9A09G011_PWM12_CLK, CLK_MAIN, 0x434, 8),
- DEF_MOD("pwm13_clk", R9A09G011_PWM13_CLK, CLK_MAIN, 0x434, 9),
- DEF_MOD("pwm14_clk", R9A09G011_PWM14_CLK, CLK_MAIN, 0x434, 10),
- DEF_MOD("cperi_grpg", R9A09G011_CPERI_GRPG_PCLK, CLK_SEL_E, 0x438, 0),
- DEF_MOD("cperi_grph", R9A09G011_CPERI_GRPH_PCLK, CLK_SEL_E, 0x438, 1),
- DEF_MOD("urt_pclk", R9A09G011_URT_PCLK, CLK_SEL_E, 0x438, 4),
- DEF_MOD("urt0_clk", R9A09G011_URT0_CLK, CLK_SEL_W0, 0x438, 5),
- DEF_MOD("csi0_clk", R9A09G011_CSI0_CLK, CLK_SEL_CSI0, 0x438, 8),
- DEF_MOD("csi4_clk", R9A09G011_CSI4_CLK, CLK_SEL_CSI4, 0x438, 12),
- DEF_MOD("ca53", R9A09G011_CA53_CLK, CLK_DIV_A, 0x448, 0),
+ DEF_MOD("pfc", R9A09G011_PFC_PCLK, CLK_MAIN, 0x400, 2, 0),
+ DEF_MOD("gic", R9A09G011_GIC_CLK, CLK_SEL_B_D2, 0x400, 5, 0),
+ DEF_MOD("sdi0_aclk", R9A09G011_SDI0_ACLK, CLK_SEL_D, 0x408, 0, 0),
+ DEF_MOD("sdi0_imclk", R9A09G011_SDI0_IMCLK, CLK_SEL_SDI, 0x408, 1, 0),
+ DEF_MOD("sdi0_imclk2", R9A09G011_SDI0_IMCLK2, CLK_SEL_SDI, 0x408, 2, 0),
+ DEF_MOD("sdi0_clk_hs", R9A09G011_SDI0_CLK_HS, CLK_PLL2_800, 0x408, 3, 0),
+ DEF_MOD("sdi1_aclk", R9A09G011_SDI1_ACLK, CLK_SEL_D, 0x408, 4, 0),
+ DEF_MOD("sdi1_imclk", R9A09G011_SDI1_IMCLK, CLK_SEL_SDI, 0x408, 5, 0),
+ DEF_MOD("sdi1_imclk2", R9A09G011_SDI1_IMCLK2, CLK_SEL_SDI, 0x408, 6, 0),
+ DEF_MOD("sdi1_clk_hs", R9A09G011_SDI1_CLK_HS, CLK_PLL2_800, 0x408, 7, 0),
+ DEF_MOD("emm_aclk", R9A09G011_EMM_ACLK, CLK_SEL_D, 0x408, 8, 0),
+ DEF_MOD("emm_imclk", R9A09G011_EMM_IMCLK, CLK_SEL_SDI, 0x408, 9, 0),
+ DEF_MOD("emm_imclk2", R9A09G011_EMM_IMCLK2, CLK_SEL_SDI, 0x408, 10, 0),
+ DEF_MOD("emm_clk_hs", R9A09G011_EMM_CLK_HS, CLK_PLL2_800, 0x408, 11, 0),
+ DEF_COUPLED("eth_axi", R9A09G011_ETH0_CLK_AXI, CLK_PLL2_200, 0x40c, 8, 0),
+ DEF_COUPLED("eth_chi", R9A09G011_ETH0_CLK_CHI, CLK_PLL2_100, 0x40c, 8, 0),
+ DEF_MOD("eth_clk_gptp", R9A09G011_ETH0_GPTP_EXT, CLK_PLL2_100, 0x40c, 9, 0),
+ DEF_MOD("usb_aclk_h", R9A09G011_USB_ACLK_H, CLK_SEL_D, 0x40c, 4, 0),
+ DEF_MOD("usb_aclk_p", R9A09G011_USB_ACLK_P, CLK_SEL_D, 0x40c, 5, 0),
+ DEF_MOD("usb_pclk", R9A09G011_USB_PCLK, CLK_SEL_E, 0x40c, 6, 0),
+ DEF_MOD("syc_cnt_clk", R9A09G011_SYC_CNT_CLK, CLK_MAIN_24, 0x41c, 12, 0),
+ DEF_MOD("iic_pclk0", R9A09G011_IIC_PCLK0, CLK_SEL_E, 0x420, 12, 0),
+ DEF_MOD("cperi_grpb", R9A09G011_CPERI_GRPB_PCLK, CLK_SEL_E, 0x424, 0, 0),
+ DEF_MOD("tim_clk_8", R9A09G011_TIM8_CLK, CLK_MAIN_2, 0x424, 4, 0),
+ DEF_MOD("tim_clk_9", R9A09G011_TIM9_CLK, CLK_MAIN_2, 0x424, 5, 0),
+ DEF_MOD("tim_clk_10", R9A09G011_TIM10_CLK, CLK_MAIN_2, 0x424, 6, 0),
+ DEF_MOD("tim_clk_11", R9A09G011_TIM11_CLK, CLK_MAIN_2, 0x424, 7, 0),
+ DEF_MOD("tim_clk_12", R9A09G011_TIM12_CLK, CLK_MAIN_2, 0x424, 8, 0),
+ DEF_MOD("tim_clk_13", R9A09G011_TIM13_CLK, CLK_MAIN_2, 0x424, 9, 0),
+ DEF_MOD("tim_clk_14", R9A09G011_TIM14_CLK, CLK_MAIN_2, 0x424, 10, 0),
+ DEF_MOD("tim_clk_15", R9A09G011_TIM15_CLK, CLK_MAIN_2, 0x424, 11, 0),
+ DEF_MOD("iic_pclk1", R9A09G011_IIC_PCLK1, CLK_SEL_E, 0x424, 12, 0),
+ DEF_MOD("cperi_grpc", R9A09G011_CPERI_GRPC_PCLK, CLK_SEL_E, 0x428, 0, 0),
+ DEF_MOD("tim_clk_16", R9A09G011_TIM16_CLK, CLK_MAIN_2, 0x428, 4, 0),
+ DEF_MOD("tim_clk_17", R9A09G011_TIM17_CLK, CLK_MAIN_2, 0x428, 5, 0),
+ DEF_MOD("tim_clk_18", R9A09G011_TIM18_CLK, CLK_MAIN_2, 0x428, 6, 0),
+ DEF_MOD("tim_clk_19", R9A09G011_TIM19_CLK, CLK_MAIN_2, 0x428, 7, 0),
+ DEF_MOD("tim_clk_20", R9A09G011_TIM20_CLK, CLK_MAIN_2, 0x428, 8, 0),
+ DEF_MOD("tim_clk_21", R9A09G011_TIM21_CLK, CLK_MAIN_2, 0x428, 9, 0),
+ DEF_MOD("tim_clk_22", R9A09G011_TIM22_CLK, CLK_MAIN_2, 0x428, 10, 0),
+ DEF_MOD("tim_clk_23", R9A09G011_TIM23_CLK, CLK_MAIN_2, 0x428, 11, 0),
+ DEF_MOD("wdt0_pclk", R9A09G011_WDT0_PCLK, CLK_SEL_E, 0x428, 12, 0),
+ DEF_MOD("wdt0_clk", R9A09G011_WDT0_CLK, CLK_MAIN, 0x428, 13, 0),
+ DEF_MOD("cperi_grpf", R9A09G011_CPERI_GRPF_PCLK, CLK_SEL_E, 0x434, 0, 0),
+ DEF_MOD("pwm8_clk", R9A09G011_PWM8_CLK, CLK_MAIN, 0x434, 4, 0),
+ DEF_MOD("pwm9_clk", R9A09G011_PWM9_CLK, CLK_MAIN, 0x434, 5, 0),
+ DEF_MOD("pwm10_clk", R9A09G011_PWM10_CLK, CLK_MAIN, 0x434, 6, 0),
+ DEF_MOD("pwm11_clk", R9A09G011_PWM11_CLK, CLK_MAIN, 0x434, 7, 0),
+ DEF_MOD("pwm12_clk", R9A09G011_PWM12_CLK, CLK_MAIN, 0x434, 8, 0),
+ DEF_MOD("pwm13_clk", R9A09G011_PWM13_CLK, CLK_MAIN, 0x434, 9, 0),
+ DEF_MOD("pwm14_clk", R9A09G011_PWM14_CLK, CLK_MAIN, 0x434, 10, 0),
+ DEF_MOD("cperi_grpg", R9A09G011_CPERI_GRPG_PCLK, CLK_SEL_E, 0x438, 0, 0),
+ DEF_MOD("cperi_grph", R9A09G011_CPERI_GRPH_PCLK, CLK_SEL_E, 0x438, 1, 0),
+ DEF_MOD("urt_pclk", R9A09G011_URT_PCLK, CLK_SEL_E, 0x438, 4, 0),
+ DEF_MOD("urt0_clk", R9A09G011_URT0_CLK, CLK_SEL_W0, 0x438, 5, 0),
+ DEF_MOD("csi0_clk", R9A09G011_CSI0_CLK, CLK_SEL_CSI0, 0x438, 8, 0),
+ DEF_MOD("csi4_clk", R9A09G011_CSI4_CLK, CLK_SEL_CSI4, 0x438, 12, 0),
+ DEF_MOD("ca53", R9A09G011_CA53_CLK, CLK_DIV_A, 0x448, 0, 0),
};
static const struct rzg2l_reset r9a09g011_resets[] = {
diff --git a/drivers/clk/renesas/r9a09g047-cpg.c b/drivers/clk/renesas/r9a09g047-cpg.c
index 21699999cedd..1e9896742a06 100644
--- a/drivers/clk/renesas/r9a09g047-cpg.c
+++ b/drivers/clk/renesas/r9a09g047-cpg.c
@@ -16,7 +16,7 @@
enum clk_ids {
/* Core Clock Outputs exported to DT */
- LAST_DT_CORE_CLK = R9A09G047_GBETH_1_CLK_PTP_REF_I,
+ LAST_DT_CORE_CLK = R9A09G047_USB2_0_CLK_CORE1,
/* External Input Clocks */
CLK_AUDIO_EXTAL,
@@ -29,6 +29,7 @@ enum clk_ids {
CLK_PLLDTY,
CLK_PLLCA55,
CLK_PLLVDO,
+ CLK_PLLETH,
/* Internal Core Clocks */
CLK_PLLCM33_DIV3,
@@ -43,9 +44,23 @@ enum clk_ids {
CLK_PLLCLN_DIV8,
CLK_PLLCLN_DIV16,
CLK_PLLCLN_DIV20,
+ CLK_PLLCLN_DIV64,
+ CLK_PLLCLN_DIV256,
+ CLK_PLLCLN_DIV1024,
CLK_PLLDTY_ACPU,
CLK_PLLDTY_ACPU_DIV2,
CLK_PLLDTY_ACPU_DIV4,
+ CLK_PLLDTY_DIV8,
+ CLK_PLLDTY_RCPU,
+ CLK_PLLDTY_RCPU_DIV4,
+ CLK_PLLETH_DIV_250_FIX,
+ CLK_PLLETH_DIV_125_FIX,
+ CLK_CSDIV_PLLETH_GBE0,
+ CLK_CSDIV_PLLETH_GBE1,
+ CLK_SMUX2_GBE0_TXCLK,
+ CLK_SMUX2_GBE0_RXCLK,
+ CLK_SMUX2_GBE1_TXCLK,
+ CLK_SMUX2_GBE1_RXCLK,
CLK_PLLDTY_DIV16,
CLK_PLLVDO_CRU0,
CLK_PLLVDO_GPU,
@@ -85,7 +100,18 @@ static const struct clk_div_table dtable_2_64[] = {
{0, 0},
};
+static const struct clk_div_table dtable_2_100[] = {
+ {0, 2},
+ {1, 10},
+ {2, 100},
+ {0, 0},
+};
+
/* Mux clock tables */
+static const char * const smux2_gbe0_rxclk[] = { ".plleth_gbe0", "et0_rxclk" };
+static const char * const smux2_gbe0_txclk[] = { ".plleth_gbe0", "et0_txclk" };
+static const char * const smux2_gbe1_rxclk[] = { ".plleth_gbe1", "et1_rxclk" };
+static const char * const smux2_gbe1_txclk[] = { ".plleth_gbe1", "et1_txclk" };
static const char * const smux2_xspi_clk0[] = { ".pllcm33_div3", ".pllcm33_div4" };
static const char * const smux2_xspi_clk1[] = { ".smux2_xspi_clk0", ".pllcm33_div5" };
@@ -100,6 +126,7 @@ static const struct cpg_core_clk r9a09g047_core_clks[] __initconst = {
DEF_FIXED(".pllcln", CLK_PLLCLN, CLK_QEXTAL, 200, 3),
DEF_FIXED(".plldty", CLK_PLLDTY, CLK_QEXTAL, 200, 3),
DEF_PLL(".pllca55", CLK_PLLCA55, CLK_QEXTAL, PLLCA55),
+ DEF_FIXED(".plleth", CLK_PLLETH, CLK_QEXTAL, 125, 3),
DEF_FIXED(".pllvdo", CLK_PLLVDO, CLK_QEXTAL, 105, 2),
/* Internal Core Clocks */
@@ -118,11 +145,28 @@ static const struct cpg_core_clk r9a09g047_core_clks[] __initconst = {
DEF_FIXED(".pllcln_div8", CLK_PLLCLN_DIV8, CLK_PLLCLN, 1, 8),
DEF_FIXED(".pllcln_div16", CLK_PLLCLN_DIV16, CLK_PLLCLN, 1, 16),
DEF_FIXED(".pllcln_div20", CLK_PLLCLN_DIV20, CLK_PLLCLN, 1, 20),
+ DEF_FIXED(".pllcln_div64", CLK_PLLCLN_DIV64, CLK_PLLCLN, 1, 64),
+ DEF_FIXED(".pllcln_div256", CLK_PLLCLN_DIV256, CLK_PLLCLN, 1, 256),
+ DEF_FIXED(".pllcln_div1024", CLK_PLLCLN_DIV1024, CLK_PLLCLN, 1, 1024),
DEF_DDIV(".plldty_acpu", CLK_PLLDTY_ACPU, CLK_PLLDTY, CDDIV0_DIVCTL2, dtable_2_64),
DEF_FIXED(".plldty_acpu_div2", CLK_PLLDTY_ACPU_DIV2, CLK_PLLDTY_ACPU, 1, 2),
DEF_FIXED(".plldty_acpu_div4", CLK_PLLDTY_ACPU_DIV4, CLK_PLLDTY_ACPU, 1, 4),
+ DEF_FIXED(".plldty_div8", CLK_PLLDTY_DIV8, CLK_PLLDTY, 1, 8),
+
+ DEF_FIXED(".plleth_250_fix", CLK_PLLETH_DIV_250_FIX, CLK_PLLETH, 1, 4),
+ DEF_FIXED(".plleth_125_fix", CLK_PLLETH_DIV_125_FIX, CLK_PLLETH_DIV_250_FIX, 1, 2),
+ DEF_CSDIV(".plleth_gbe0", CLK_CSDIV_PLLETH_GBE0, CLK_PLLETH_DIV_250_FIX,
+ CSDIV0_DIVCTL0, dtable_2_100),
+ DEF_CSDIV(".plleth_gbe1", CLK_CSDIV_PLLETH_GBE1, CLK_PLLETH_DIV_250_FIX,
+ CSDIV0_DIVCTL1, dtable_2_100),
+ DEF_SMUX(".smux2_gbe0_txclk", CLK_SMUX2_GBE0_TXCLK, SSEL0_SELCTL2, smux2_gbe0_txclk),
+ DEF_SMUX(".smux2_gbe0_rxclk", CLK_SMUX2_GBE0_RXCLK, SSEL0_SELCTL3, smux2_gbe0_rxclk),
+ DEF_SMUX(".smux2_gbe1_txclk", CLK_SMUX2_GBE1_TXCLK, SSEL1_SELCTL0, smux2_gbe1_txclk),
+ DEF_SMUX(".smux2_gbe1_rxclk", CLK_SMUX2_GBE1_RXCLK, SSEL1_SELCTL1, smux2_gbe1_rxclk),
DEF_FIXED(".plldty_div16", CLK_PLLDTY_DIV16, CLK_PLLDTY, 1, 16),
+ DEF_DDIV(".plldty_rcpu", CLK_PLLDTY_RCPU, CLK_PLLDTY, CDDIV3_DIVCTL2, dtable_2_64),
+ DEF_FIXED(".plldty_rcpu_div4", CLK_PLLDTY_RCPU_DIV4, CLK_PLLDTY_RCPU, 1, 4),
DEF_DDIV(".pllvdo_cru0", CLK_PLLVDO_CRU0, CLK_PLLVDO, CDDIV3_DIVCTL3, dtable_2_4),
DEF_DDIV(".pllvdo_gpu", CLK_PLLVDO_GPU, CLK_PLLVDO, CDDIV3_DIVCTL1, dtable_2_64),
@@ -139,13 +183,35 @@ static const struct cpg_core_clk r9a09g047_core_clks[] __initconst = {
CDDIV1_DIVCTL3, dtable_1_8),
DEF_FIXED("iotop_0_shclk", R9A09G047_IOTOP_0_SHCLK, CLK_PLLCM33_DIV16, 1, 1),
DEF_FIXED("spi_clk_spi", R9A09G047_SPI_CLK_SPI, CLK_PLLCM33_XSPI, 1, 2),
+ DEF_FIXED("usb2_0_clk_core0", R9A09G047_USB2_0_CLK_CORE0, CLK_QEXTAL, 1, 1),
+ DEF_FIXED("usb2_0_clk_core1", R9A09G047_USB2_0_CLK_CORE1, CLK_QEXTAL, 1, 1),
+ DEF_FIXED("gbeth_0_clk_ptp_ref_i", R9A09G047_GBETH_0_CLK_PTP_REF_I,
+ CLK_PLLETH_DIV_125_FIX, 1, 1),
+ DEF_FIXED("gbeth_1_clk_ptp_ref_i", R9A09G047_GBETH_1_CLK_PTP_REF_I,
+ CLK_PLLETH_DIV_125_FIX, 1, 1),
+ DEF_FIXED("usb3_0_ref_alt_clk_p", R9A09G047_USB3_0_REF_ALT_CLK_P, CLK_QEXTAL, 1, 1),
+ DEF_FIXED("usb3_0_core_clk", R9A09G047_USB3_0_CLKCORE, CLK_QEXTAL, 1, 1),
};
static const struct rzv2h_mod_clk r9a09g047_mod_clks[] __initconst = {
+ DEF_MOD("dmac_0_aclk", CLK_PLLCM33_GEAR, 0, 0, 0, 0,
+ BUS_MSTOP(5, BIT(9))),
+ DEF_MOD("dmac_1_aclk", CLK_PLLDTY_ACPU_DIV2, 0, 1, 0, 1,
+ BUS_MSTOP(3, BIT(2))),
+ DEF_MOD("dmac_2_aclk", CLK_PLLDTY_ACPU_DIV2, 0, 2, 0, 2,
+ BUS_MSTOP(3, BIT(3))),
+ DEF_MOD("dmac_3_aclk", CLK_PLLDTY_RCPU_DIV4, 0, 3, 0, 3,
+ BUS_MSTOP(10, BIT(11))),
+ DEF_MOD("dmac_4_aclk", CLK_PLLDTY_RCPU_DIV4, 0, 4, 0, 4,
+ BUS_MSTOP(10, BIT(12))),
DEF_MOD_CRITICAL("icu_0_pclk_i", CLK_PLLCM33_DIV16, 0, 5, 0, 5,
BUS_MSTOP_NONE),
DEF_MOD_CRITICAL("gic_0_gicclk", CLK_PLLDTY_ACPU_DIV4, 1, 3, 0, 19,
BUS_MSTOP(3, BIT(5))),
+ DEF_MOD("gpt_0_pclk_sfr", CLK_PLLCLN_DIV8, 3, 1, 1, 17,
+ BUS_MSTOP(6, BIT(11))),
+ DEF_MOD("gpt_1_pclk_sfr", CLK_PLLCLN_DIV8, 3, 2, 1, 18,
+ BUS_MSTOP(6, BIT(12))),
DEF_MOD("wdt_1_clkp", CLK_PLLCLN_DIV16, 4, 13, 2, 13,
BUS_MSTOP(1, BIT(0))),
DEF_MOD("wdt_1_clk_loco", CLK_QEXTAL, 4, 14, 2, 14,
@@ -158,8 +224,114 @@ static const struct rzv2h_mod_clk r9a09g047_mod_clks[] __initconst = {
BUS_MSTOP(5, BIT(13))),
DEF_MOD("wdt_3_clk_loco", CLK_QEXTAL, 5, 2, 2, 18,
BUS_MSTOP(5, BIT(13))),
+ DEF_MOD("rsci0_pclk", CLK_PLLCLN_DIV16, 5, 13, 2, 29,
+ BUS_MSTOP(11, BIT(3))),
+ DEF_MOD("rsci0_tclk", CLK_PLLCLN_DIV16, 5, 14, 2, 30,
+ BUS_MSTOP(11, BIT(3))),
+ DEF_MOD("rsci0_ps_ps3_n", CLK_PLLCLN_DIV1024, 5, 15, 2, 31,
+ BUS_MSTOP(11, BIT(3))),
+ DEF_MOD("rsci0_ps_ps2_n", CLK_PLLCLN_DIV256, 6, 0, 3, 0,
+ BUS_MSTOP(11, BIT(3))),
+ DEF_MOD("rsci0_ps_ps1_n", CLK_PLLCLN_DIV64, 6, 1, 3, 1,
+ BUS_MSTOP(11, BIT(3))),
+ DEF_MOD("rsci1_pclk", CLK_PLLCLN_DIV16, 6, 2, 3, 2,
+ BUS_MSTOP(11, BIT(4))),
+ DEF_MOD("rsci1_tclk", CLK_PLLCLN_DIV16, 6, 3, 3, 3,
+ BUS_MSTOP(11, BIT(4))),
+ DEF_MOD("rsci1_ps_ps3_n", CLK_PLLCLN_DIV1024, 6, 4, 3, 4,
+ BUS_MSTOP(11, BIT(4))),
+ DEF_MOD("rsci1_ps_ps2_n", CLK_PLLCLN_DIV256, 6, 5, 3, 5,
+ BUS_MSTOP(11, BIT(4))),
+ DEF_MOD("rsci1_ps_ps1_n", CLK_PLLCLN_DIV64, 6, 6, 3, 6,
+ BUS_MSTOP(11, BIT(4))),
+ DEF_MOD("rsci2_pclk", CLK_PLLCLN_DIV16, 6, 7, 3, 7,
+ BUS_MSTOP(11, BIT(5))),
+ DEF_MOD("rsci2_tclk", CLK_PLLCLN_DIV16, 6, 8, 3, 8,
+ BUS_MSTOP(11, BIT(5))),
+ DEF_MOD("rsci2_ps_ps3_n", CLK_PLLCLN_DIV1024, 6, 9, 3, 9,
+ BUS_MSTOP(11, BIT(5))),
+ DEF_MOD("rsci2_ps_ps2_n", CLK_PLLCLN_DIV256, 6, 10, 3, 10,
+ BUS_MSTOP(11, BIT(5))),
+ DEF_MOD("rsci2_ps_ps1_n", CLK_PLLCLN_DIV64, 6, 11, 3, 11,
+ BUS_MSTOP(11, BIT(5))),
+ DEF_MOD("rsci3_pclk", CLK_PLLCLN_DIV16, 6, 12, 3, 12,
+ BUS_MSTOP(11, BIT(6))),
+ DEF_MOD("rsci3_tclk", CLK_PLLCLN_DIV16, 6, 13, 3, 13,
+ BUS_MSTOP(11, BIT(6))),
+ DEF_MOD("rsci3_ps_ps3_n", CLK_PLLCLN_DIV1024, 6, 14, 3, 14,
+ BUS_MSTOP(11, BIT(6))),
+ DEF_MOD("rsci3_ps_ps2_n", CLK_PLLCLN_DIV256, 6, 15, 3, 15,
+ BUS_MSTOP(11, BIT(6))),
+ DEF_MOD("rsci3_ps_ps1_n", CLK_PLLCLN_DIV64, 7, 0, 3, 16,
+ BUS_MSTOP(11, BIT(6))),
+ DEF_MOD("rsci4_pclk", CLK_PLLCLN_DIV16, 7, 1, 3, 17,
+ BUS_MSTOP(11, BIT(7))),
+ DEF_MOD("rsci4_tclk", CLK_PLLCLN_DIV16, 7, 2, 3, 18,
+ BUS_MSTOP(11, BIT(7))),
+ DEF_MOD("rsci4_ps_ps3_n", CLK_PLLCLN_DIV1024, 7, 3, 3, 19,
+ BUS_MSTOP(11, BIT(7))),
+ DEF_MOD("rsci4_ps_ps2_n", CLK_PLLCLN_DIV256, 7, 4, 3, 20,
+ BUS_MSTOP(11, BIT(7))),
+ DEF_MOD("rsci4_ps_ps1_n", CLK_PLLCLN_DIV64, 7, 5, 3, 21,
+ BUS_MSTOP(11, BIT(7))),
+ DEF_MOD("rsci5_pclk", CLK_PLLCLN_DIV16, 7, 6, 3, 22,
+ BUS_MSTOP(11, BIT(8))),
+ DEF_MOD("rsci5_tclk", CLK_PLLCLN_DIV16, 7, 7, 3, 23,
+ BUS_MSTOP(11, BIT(8))),
+ DEF_MOD("rsci5_ps_ps3_n", CLK_PLLCLN_DIV1024, 7, 8, 3, 24,
+ BUS_MSTOP(11, BIT(8))),
+ DEF_MOD("rsci5_ps_ps2_n", CLK_PLLCLN_DIV256, 7, 9, 3, 25,
+ BUS_MSTOP(11, BIT(8))),
+ DEF_MOD("rsci5_ps_ps1_n", CLK_PLLCLN_DIV64, 7, 10, 3, 26,
+ BUS_MSTOP(11, BIT(8))),
+ DEF_MOD("rsci6_pclk", CLK_PLLCLN_DIV16, 7, 11, 3, 27,
+ BUS_MSTOP(11, BIT(9))),
+ DEF_MOD("rsci6_tclk", CLK_PLLCLN_DIV16, 7, 12, 3, 28,
+ BUS_MSTOP(11, BIT(9))),
+ DEF_MOD("rsci6_ps_ps3_n", CLK_PLLCLN_DIV1024, 7, 13, 3, 29,
+ BUS_MSTOP(11, BIT(9))),
+ DEF_MOD("rsci6_ps_ps2_n", CLK_PLLCLN_DIV256, 7, 14, 3, 30,
+ BUS_MSTOP(11, BIT(9))),
+ DEF_MOD("rsci6_ps_ps1_n", CLK_PLLCLN_DIV64, 7, 15, 3, 31,
+ BUS_MSTOP(11, BIT(9))),
+ DEF_MOD("rsci7_pclk", CLK_PLLCLN_DIV16, 8, 0, 4, 0,
+ BUS_MSTOP(11, BIT(10))),
+ DEF_MOD("rsci7_tclk", CLK_PLLCLN_DIV16, 8, 1, 4, 1,
+ BUS_MSTOP(11, BIT(10))),
+ DEF_MOD("rsci7_ps_ps3_n", CLK_PLLCLN_DIV1024, 8, 2, 4, 2,
+ BUS_MSTOP(11, BIT(10))),
+ DEF_MOD("rsci7_ps_ps2_n", CLK_PLLCLN_DIV256, 8, 3, 4, 3,
+ BUS_MSTOP(11, BIT(10))),
+ DEF_MOD("rsci7_ps_ps1_n", CLK_PLLCLN_DIV64, 8, 4, 4, 4,
+ BUS_MSTOP(11, BIT(10))),
+ DEF_MOD("rsci8_pclk", CLK_PLLCLN_DIV16, 8, 5, 4, 5,
+ BUS_MSTOP(11, BIT(11))),
+ DEF_MOD("rsci8_tclk", CLK_PLLCLN_DIV16, 8, 6, 4, 6,
+ BUS_MSTOP(11, BIT(11))),
+ DEF_MOD("rsci8_ps_ps3_n", CLK_PLLCLN_DIV1024, 8, 7, 4, 7,
+ BUS_MSTOP(11, BIT(11))),
+ DEF_MOD("rsci8_ps_ps2_n", CLK_PLLCLN_DIV256, 8, 8, 4, 8,
+ BUS_MSTOP(11, BIT(11))),
+ DEF_MOD("rsci8_ps_ps1_n", CLK_PLLCLN_DIV64, 8, 9, 4, 9,
+ BUS_MSTOP(11, BIT(11))),
+ DEF_MOD("rsci9_pclk", CLK_PLLCLN_DIV16, 8, 10, 4, 10,
+ BUS_MSTOP(11, BIT(12))),
+ DEF_MOD("rsci9_tclk", CLK_PLLCLN_DIV16, 8, 11, 4, 11,
+ BUS_MSTOP(11, BIT(12))),
+ DEF_MOD("rsci9_ps_ps3_n", CLK_PLLCLN_DIV1024, 8, 12, 4, 12,
+ BUS_MSTOP(11, BIT(12))),
+ DEF_MOD("rsci9_ps_ps2_n", CLK_PLLCLN_DIV256, 8, 13, 4, 13,
+ BUS_MSTOP(11, BIT(12))),
+ DEF_MOD("rsci9_ps_ps1_n", CLK_PLLCLN_DIV64, 8, 14, 4, 14,
+ BUS_MSTOP(11, BIT(12))),
DEF_MOD("scif_0_clk_pck", CLK_PLLCM33_DIV16, 8, 15, 4, 15,
BUS_MSTOP(3, BIT(14))),
+ DEF_MOD("i3c_0_pclkrw", CLK_PLLCLN_DIV16, 9, 0, 4, 16,
+ BUS_MSTOP(10, BIT(15))),
+ DEF_MOD("i3c_0_pclk", CLK_PLLCLN_DIV16, 9, 1, 4, 17,
+ BUS_MSTOP(10, BIT(15))),
+ DEF_MOD("i3c_0_tclk", CLK_PLLCLN_DIV8, 9, 2, 4, 18,
+ BUS_MSTOP(10, BIT(15))),
DEF_MOD("riic_8_ckm", CLK_PLLCM33_DIV16, 9, 3, 4, 19,
BUS_MSTOP(3, BIT(13))),
DEF_MOD("riic_0_ckm", CLK_PLLCLN_DIV16, 9, 4, 4, 20,
@@ -214,6 +386,44 @@ static const struct rzv2h_mod_clk r9a09g047_mod_clks[] __initconst = {
BUS_MSTOP(8, BIT(4))),
DEF_MOD("sdhi_2_aclk", CLK_PLLDTY_ACPU_DIV4, 10, 14, 5, 14,
BUS_MSTOP(8, BIT(4))),
+ DEF_MOD("usb3_0_aclk", CLK_PLLDTY_DIV8, 10, 15, 5, 15,
+ BUS_MSTOP(7, BIT(12))),
+ DEF_MOD("usb3_0_pclk_usbtst", CLK_PLLDTY_ACPU_DIV4, 11, 0, 5, 16,
+ BUS_MSTOP(7, BIT(14))),
+ DEF_MOD("usb2_0_u2h0_hclk", CLK_PLLDTY_DIV8, 11, 3, 5, 19,
+ BUS_MSTOP(7, BIT(7))),
+ DEF_MOD("usb2_0_u2h1_hclk", CLK_PLLDTY_DIV8, 11, 4, 5, 20,
+ BUS_MSTOP(7, BIT(8))),
+ DEF_MOD("usb2_0_u2p_exr_cpuclk", CLK_PLLDTY_ACPU_DIV4, 11, 5, 5, 21,
+ BUS_MSTOP(7, BIT(9))),
+ DEF_MOD("usb2_0_pclk_usbtst0", CLK_PLLDTY_ACPU_DIV4, 11, 6, 5, 22,
+ BUS_MSTOP(7, BIT(10))),
+ DEF_MOD("usb2_0_pclk_usbtst1", CLK_PLLDTY_ACPU_DIV4, 11, 7, 5, 23,
+ BUS_MSTOP(7, BIT(11))),
+ DEF_MOD_MUX_EXTERNAL("gbeth_0_clk_tx_i", CLK_SMUX2_GBE0_TXCLK, 11, 8, 5, 24,
+ BUS_MSTOP(8, BIT(5)), 1),
+ DEF_MOD_MUX_EXTERNAL("gbeth_0_clk_rx_i", CLK_SMUX2_GBE0_RXCLK, 11, 9, 5, 25,
+ BUS_MSTOP(8, BIT(5)), 1),
+ DEF_MOD_MUX_EXTERNAL("gbeth_0_clk_tx_180_i", CLK_SMUX2_GBE0_TXCLK, 11, 10, 5, 26,
+ BUS_MSTOP(8, BIT(5)), 1),
+ DEF_MOD_MUX_EXTERNAL("gbeth_0_clk_rx_180_i", CLK_SMUX2_GBE0_RXCLK, 11, 11, 5, 27,
+ BUS_MSTOP(8, BIT(5)), 1),
+ DEF_MOD("gbeth_0_aclk_csr_i", CLK_PLLDTY_DIV8, 11, 12, 5, 28,
+ BUS_MSTOP(8, BIT(5))),
+ DEF_MOD("gbeth_0_aclk_i", CLK_PLLDTY_DIV8, 11, 13, 5, 29,
+ BUS_MSTOP(8, BIT(5))),
+ DEF_MOD_MUX_EXTERNAL("gbeth_1_clk_tx_i", CLK_SMUX2_GBE1_TXCLK, 11, 14, 5, 30,
+ BUS_MSTOP(8, BIT(6)), 1),
+ DEF_MOD_MUX_EXTERNAL("gbeth_1_clk_rx_i", CLK_SMUX2_GBE1_RXCLK, 11, 15, 5, 31,
+ BUS_MSTOP(8, BIT(6)), 1),
+ DEF_MOD_MUX_EXTERNAL("gbeth_1_clk_tx_180_i", CLK_SMUX2_GBE1_TXCLK, 12, 0, 6, 0,
+ BUS_MSTOP(8, BIT(6)), 1),
+ DEF_MOD_MUX_EXTERNAL("gbeth_1_clk_rx_180_i", CLK_SMUX2_GBE1_RXCLK, 12, 1, 6, 1,
+ BUS_MSTOP(8, BIT(6)), 1),
+ DEF_MOD("gbeth_1_aclk_csr_i", CLK_PLLDTY_DIV8, 12, 2, 6, 2,
+ BUS_MSTOP(8, BIT(6))),
+ DEF_MOD("gbeth_1_aclk_i", CLK_PLLDTY_DIV8, 12, 3, 6, 3,
+ BUS_MSTOP(8, BIT(6))),
DEF_MOD("cru_0_aclk", CLK_PLLDTY_ACPU_DIV2, 13, 2, 6, 18,
BUS_MSTOP(9, BIT(4))),
DEF_MOD_NO_PM("cru_0_vclk", CLK_PLLVDO_CRU0, 13, 3, 6, 19,
@@ -232,13 +442,44 @@ static const struct rzv2h_mod_clk r9a09g047_mod_clks[] __initconst = {
static const struct rzv2h_reset r9a09g047_resets[] __initconst = {
DEF_RST(3, 0, 1, 1), /* SYS_0_PRESETN */
+ DEF_RST(3, 1, 1, 2), /* DMAC_0_ARESETN */
+ DEF_RST(3, 2, 1, 3), /* DMAC_1_ARESETN */
+ DEF_RST(3, 3, 1, 4), /* DMAC_2_ARESETN */
+ DEF_RST(3, 4, 1, 5), /* DMAC_3_ARESETN */
+ DEF_RST(3, 5, 1, 6), /* DMAC_4_ARESETN */
DEF_RST(3, 6, 1, 7), /* ICU_0_PRESETN_I */
DEF_RST(3, 8, 1, 9), /* GIC_0_GICRESET_N */
DEF_RST(3, 9, 1, 10), /* GIC_0_DBG_GICRESET_N */
+ DEF_RST(5, 9, 2, 10), /* GPT_0_RST_P_REG */
+ DEF_RST(5, 10, 2, 11), /* GPT_0_RST_S_REG */
+ DEF_RST(5, 11, 2, 12), /* GPT_1_RST_P_REG */
+ DEF_RST(5, 12, 2, 13), /* GPT_1_RST_S_REG */
DEF_RST(7, 6, 3, 7), /* WDT_1_RESET */
DEF_RST(7, 7, 3, 8), /* WDT_2_RESET */
DEF_RST(7, 8, 3, 9), /* WDT_3_RESET */
+ DEF_RST(8, 1, 3, 18), /* RSCI0_PRESETN */
+ DEF_RST(8, 2, 3, 19), /* RSCI0_TRESETN */
+ DEF_RST(8, 3, 3, 20), /* RSCI1_PRESETN */
+ DEF_RST(8, 4, 3, 21), /* RSCI1_TRESETN */
+ DEF_RST(8, 5, 3, 22), /* RSCI2_PRESETN */
+ DEF_RST(8, 6, 3, 23), /* RSCI2_TRESETN */
+ DEF_RST(8, 7, 3, 24), /* RSCI3_PRESETN */
+ DEF_RST(8, 8, 3, 25), /* RSCI3_TRESETN */
+ DEF_RST(8, 9, 3, 26), /* RSCI4_PRESETN */
+ DEF_RST(8, 10, 3, 27), /* RSCI4_TRESETN */
+ DEF_RST(8, 11, 3, 28), /* RSCI5_PRESETN */
+ DEF_RST(8, 12, 3, 29), /* RSCI5_TRESETN */
+ DEF_RST(8, 13, 3, 30), /* RSCI6_PRESETN */
+ DEF_RST(8, 14, 3, 31), /* RSCI6_TRESETN */
+ DEF_RST(8, 15, 4, 0), /* RSCI7_PRESETN */
+ DEF_RST(9, 0, 4, 1), /* RSCI7_TRESETN */
+ DEF_RST(9, 1, 4, 2), /* RSCI8_PRESETN */
+ DEF_RST(9, 2, 4, 3), /* RSCI8_TRESETN */
+ DEF_RST(9, 3, 4, 4), /* RSCI9_PRESETN */
+ DEF_RST(9, 4, 4, 5), /* RSCI9_TRESETN */
DEF_RST(9, 5, 4, 6), /* SCIF_0_RST_SYSTEM_N */
+ DEF_RST(9, 6, 4, 7), /* I3C_0_PRESETN */
+ DEF_RST(9, 7, 4, 8), /* I3C_0_TRESETN */
DEF_RST(9, 8, 4, 9), /* RIIC_0_MRST */
DEF_RST(9, 9, 4, 10), /* RIIC_1_MRST */
DEF_RST(9, 10, 4, 11), /* RIIC_2_MRST */
@@ -255,6 +496,13 @@ static const struct rzv2h_reset r9a09g047_resets[] __initconst = {
DEF_RST(10, 7, 4, 24), /* SDHI_0_IXRST */
DEF_RST(10, 8, 4, 25), /* SDHI_1_IXRST */
DEF_RST(10, 9, 4, 26), /* SDHI_2_IXRST */
+ DEF_RST(10, 10, 4, 27), /* USB3_0_ARESETN */
+ DEF_RST(10, 12, 4, 29), /* USB2_0_U2H0_HRESETN */
+ DEF_RST(10, 13, 4, 30), /* USB2_0_U2H1_HRESETN */
+ DEF_RST(10, 14, 4, 31), /* USB2_0_U2P_EXL_SYSRST */
+ DEF_RST(10, 15, 5, 0), /* USB2_0_PRESETN */
+ DEF_RST(11, 0, 5, 1), /* GBETH_0_ARESETN_I */
+ DEF_RST(11, 1, 5, 2), /* GBETH_1_ARESETN_I */
DEF_RST(12, 5, 5, 22), /* CRU_0_PRESETN */
DEF_RST(12, 6, 5, 23), /* CRU_0_ARESETN */
DEF_RST(12, 7, 5, 24), /* CRU_0_S_RESETN */
diff --git a/drivers/clk/renesas/r9a09g056-cpg.c b/drivers/clk/renesas/r9a09g056-cpg.c
index e2712a25c43a..f48a082e65d7 100644
--- a/drivers/clk/renesas/r9a09g056-cpg.c
+++ b/drivers/clk/renesas/r9a09g056-cpg.c
@@ -6,6 +6,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/clk/renesas.h>
#include <linux/device.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -16,7 +17,7 @@
enum clk_ids {
/* Core Clock Outputs exported to DT */
- LAST_DT_CORE_CLK = R9A09G056_GBETH_1_CLK_PTP_REF_I,
+ LAST_DT_CORE_CLK = R9A09G056_USB3_0_CLKCORE,
/* External Input Clocks */
CLK_AUDIO_EXTAL,
@@ -28,13 +29,43 @@ enum clk_ids {
CLK_PLLCLN,
CLK_PLLDTY,
CLK_PLLCA55,
+ CLK_PLLVDO,
+ CLK_PLLETH,
+ CLK_PLLDSI,
+ CLK_PLLGPU,
/* Internal Core Clocks */
+ CLK_PLLCM33_DIV3,
+ CLK_PLLCM33_DIV4,
+ CLK_PLLCM33_DIV5,
CLK_PLLCM33_DIV16,
+ CLK_PLLCM33_GEAR,
+ CLK_SMUX2_XSPI_CLK0,
+ CLK_SMUX2_XSPI_CLK1,
+ CLK_PLLCM33_XSPI,
CLK_PLLCLN_DIV2,
CLK_PLLCLN_DIV8,
+ CLK_PLLCLN_DIV16,
CLK_PLLDTY_ACPU,
+ CLK_PLLDTY_ACPU_DIV2,
CLK_PLLDTY_ACPU_DIV4,
+ CLK_PLLDTY_DIV8,
+ CLK_PLLDTY_DIV16,
+ CLK_PLLVDO_CRU0,
+ CLK_PLLVDO_CRU1,
+ CLK_PLLVDO_ISP,
+ CLK_PLLETH_DIV_250_FIX,
+ CLK_PLLETH_DIV_125_FIX,
+ CLK_CSDIV_PLLETH_GBE0,
+ CLK_CSDIV_PLLETH_GBE1,
+ CLK_SMUX2_GBE0_TXCLK,
+ CLK_SMUX2_GBE0_RXCLK,
+ CLK_SMUX2_GBE1_TXCLK,
+ CLK_SMUX2_GBE1_RXCLK,
+ CLK_CDIV4_PLLETH_LPCLK,
+ CLK_PLLETH_LPCLK_GEAR,
+ CLK_PLLDSI_GEAR,
+ CLK_PLLGPU_GEAR,
/* Module Clocks */
MOD_CLK_BASE,
@@ -48,6 +79,40 @@ static const struct clk_div_table dtable_1_8[] = {
{0, 0},
};
+static const struct clk_div_table dtable_2_4[] = {
+ {0, 2},
+ {1, 4},
+ {0, 0},
+};
+
+static const struct clk_div_table dtable_2_16[] = {
+ {0, 2},
+ {1, 4},
+ {2, 8},
+ {3, 16},
+ {0, 0},
+};
+
+static const struct clk_div_table dtable_2_32[] = {
+ {0, 2},
+ {1, 4},
+ {2, 6},
+ {3, 8},
+ {4, 10},
+ {5, 12},
+ {6, 14},
+ {7, 16},
+ {8, 18},
+ {9, 20},
+ {10, 22},
+ {11, 24},
+ {12, 26},
+ {13, 28},
+ {14, 30},
+ {15, 32},
+ {0, 0},
+};
+
static const struct clk_div_table dtable_2_64[] = {
{0, 2},
{1, 4},
@@ -57,6 +122,32 @@ static const struct clk_div_table dtable_2_64[] = {
{0, 0},
};
+static const struct clk_div_table dtable_2_100[] = {
+ {0, 2},
+ {1, 10},
+ {2, 100},
+ {0, 0},
+};
+
+static const struct clk_div_table dtable_16_128[] = {
+ {0, 16},
+ {1, 32},
+ {2, 64},
+ {3, 128},
+ {0, 0},
+};
+
+RZV2H_CPG_PLL_DSI_LIMITS(rzv2n_cpg_pll_dsi_limits);
+#define PLLDSI PLL_PACK_LIMITS(0xc0, 1, 0, &rzv2n_cpg_pll_dsi_limits)
+
+/* Mux clock tables */
+static const char * const smux2_gbe0_rxclk[] = { ".plleth_gbe0", "et0_rxclk" };
+static const char * const smux2_gbe0_txclk[] = { ".plleth_gbe0", "et0_txclk" };
+static const char * const smux2_gbe1_rxclk[] = { ".plleth_gbe1", "et1_rxclk" };
+static const char * const smux2_gbe1_txclk[] = { ".plleth_gbe1", "et1_txclk" };
+static const char * const smux2_xspi_clk0[] = { ".pllcm33_div3", ".pllcm33_div4" };
+static const char * const smux2_xspi_clk1[] = { ".smux2_xspi_clk0", ".pllcm33_div5" };
+
static const struct cpg_core_clk r9a09g056_core_clks[] __initconst = {
/* External Clock Inputs */
DEF_INPUT("audio_extal", CLK_AUDIO_EXTAL),
@@ -68,15 +159,54 @@ static const struct cpg_core_clk r9a09g056_core_clks[] __initconst = {
DEF_FIXED(".pllcln", CLK_PLLCLN, CLK_QEXTAL, 200, 3),
DEF_FIXED(".plldty", CLK_PLLDTY, CLK_QEXTAL, 200, 3),
DEF_PLL(".pllca55", CLK_PLLCA55, CLK_QEXTAL, PLLCA55),
+ DEF_FIXED(".pllvdo", CLK_PLLVDO, CLK_QEXTAL, 105, 2),
+ DEF_FIXED(".plleth", CLK_PLLETH, CLK_QEXTAL, 125, 3),
+ DEF_PLLDSI(".plldsi", CLK_PLLDSI, CLK_QEXTAL, PLLDSI),
+ DEF_PLL(".pllgpu", CLK_PLLGPU, CLK_QEXTAL, PLLGPU),
/* Internal Core Clocks */
+ DEF_FIXED(".pllcm33_div3", CLK_PLLCM33_DIV3, CLK_PLLCM33, 1, 3),
+ DEF_FIXED(".pllcm33_div4", CLK_PLLCM33_DIV4, CLK_PLLCM33, 1, 4),
+ DEF_FIXED(".pllcm33_div5", CLK_PLLCM33_DIV5, CLK_PLLCM33, 1, 5),
DEF_FIXED(".pllcm33_div16", CLK_PLLCM33_DIV16, CLK_PLLCM33, 1, 16),
+ DEF_DDIV(".pllcm33_gear", CLK_PLLCM33_GEAR, CLK_PLLCM33_DIV4, CDDIV0_DIVCTL1, dtable_2_64),
+ DEF_SMUX(".smux2_xspi_clk0", CLK_SMUX2_XSPI_CLK0, SSEL1_SELCTL2, smux2_xspi_clk0),
+ DEF_SMUX(".smux2_xspi_clk1", CLK_SMUX2_XSPI_CLK1, SSEL1_SELCTL3, smux2_xspi_clk1),
+ DEF_CSDIV(".pllcm33_xspi", CLK_PLLCM33_XSPI, CLK_SMUX2_XSPI_CLK1, CSDIV0_DIVCTL3,
+ dtable_2_16),
DEF_FIXED(".pllcln_div2", CLK_PLLCLN_DIV2, CLK_PLLCLN, 1, 2),
DEF_FIXED(".pllcln_div8", CLK_PLLCLN_DIV8, CLK_PLLCLN, 1, 8),
+ DEF_FIXED(".pllcln_div16", CLK_PLLCLN_DIV16, CLK_PLLCLN, 1, 16),
DEF_DDIV(".plldty_acpu", CLK_PLLDTY_ACPU, CLK_PLLDTY, CDDIV0_DIVCTL2, dtable_2_64),
+ DEF_FIXED(".plldty_acpu_div2", CLK_PLLDTY_ACPU_DIV2, CLK_PLLDTY_ACPU, 1, 2),
DEF_FIXED(".plldty_acpu_div4", CLK_PLLDTY_ACPU_DIV4, CLK_PLLDTY_ACPU, 1, 4),
+ DEF_FIXED(".plldty_div8", CLK_PLLDTY_DIV8, CLK_PLLDTY, 1, 8),
+ DEF_FIXED(".plldty_div16", CLK_PLLDTY_DIV16, CLK_PLLDTY, 1, 16),
+
+ DEF_DDIV(".pllvdo_cru0", CLK_PLLVDO_CRU0, CLK_PLLVDO, CDDIV3_DIVCTL3, dtable_2_4),
+ DEF_DDIV(".pllvdo_cru1", CLK_PLLVDO_CRU1, CLK_PLLVDO, CDDIV4_DIVCTL0, dtable_2_4),
+ DEF_DDIV(".pllvdo_isp", CLK_PLLVDO_ISP, CLK_PLLVDO, CDDIV2_DIVCTL3, dtable_2_64),
+
+ DEF_FIXED(".plleth_250_fix", CLK_PLLETH_DIV_250_FIX, CLK_PLLETH, 1, 4),
+ DEF_FIXED(".plleth_125_fix", CLK_PLLETH_DIV_125_FIX, CLK_PLLETH_DIV_250_FIX, 1, 2),
+ DEF_CSDIV(".plleth_gbe0", CLK_CSDIV_PLLETH_GBE0,
+ CLK_PLLETH_DIV_250_FIX, CSDIV0_DIVCTL0, dtable_2_100),
+ DEF_CSDIV(".plleth_gbe1", CLK_CSDIV_PLLETH_GBE1,
+ CLK_PLLETH_DIV_250_FIX, CSDIV0_DIVCTL1, dtable_2_100),
+ DEF_SMUX(".smux2_gbe0_txclk", CLK_SMUX2_GBE0_TXCLK, SSEL0_SELCTL2, smux2_gbe0_txclk),
+ DEF_SMUX(".smux2_gbe0_rxclk", CLK_SMUX2_GBE0_RXCLK, SSEL0_SELCTL3, smux2_gbe0_rxclk),
+ DEF_SMUX(".smux2_gbe1_txclk", CLK_SMUX2_GBE1_TXCLK, SSEL1_SELCTL0, smux2_gbe1_txclk),
+ DEF_SMUX(".smux2_gbe1_rxclk", CLK_SMUX2_GBE1_RXCLK, SSEL1_SELCTL1, smux2_gbe1_rxclk),
+ DEF_FIXED(".cdiv4_plleth_lpclk", CLK_CDIV4_PLLETH_LPCLK, CLK_PLLETH, 1, 4),
+ DEF_CSDIV(".plleth_lpclk_gear", CLK_PLLETH_LPCLK_GEAR, CLK_CDIV4_PLLETH_LPCLK,
+ CSDIV0_DIVCTL2, dtable_16_128),
+
+ DEF_PLLDSI_DIV(".plldsi_gear", CLK_PLLDSI_GEAR, CLK_PLLDSI,
+ CSDIV1_DIVCTL2, dtable_2_32),
+
+ DEF_DDIV(".pllgpu_gear", CLK_PLLGPU_GEAR, CLK_PLLGPU, CDDIV3_DIVCTL1, dtable_2_64),
/* Core Clocks */
DEF_FIXED("sys_0_pclk", R9A09G056_SYS_0_PCLK, CLK_QEXTAL, 1, 1),
@@ -89,13 +219,84 @@ static const struct cpg_core_clk r9a09g056_core_clks[] __initconst = {
DEF_DDIV("ca55_0_coreclk3", R9A09G056_CA55_0_CORE_CLK3, CLK_PLLCA55,
CDDIV1_DIVCTL3, dtable_1_8),
DEF_FIXED("iotop_0_shclk", R9A09G056_IOTOP_0_SHCLK, CLK_PLLCM33_DIV16, 1, 1),
+ DEF_FIXED("usb2_0_clk_core0", R9A09G056_USB2_0_CLK_CORE0, CLK_QEXTAL, 1, 1),
+ DEF_FIXED("gbeth_0_clk_ptp_ref_i", R9A09G056_GBETH_0_CLK_PTP_REF_I,
+ CLK_PLLETH_DIV_125_FIX, 1, 1),
+ DEF_FIXED("gbeth_1_clk_ptp_ref_i", R9A09G056_GBETH_1_CLK_PTP_REF_I,
+ CLK_PLLETH_DIV_125_FIX, 1, 1),
+ DEF_FIXED_MOD_STATUS("spi_clk_spi", R9A09G056_SPI_CLK_SPI, CLK_PLLCM33_XSPI, 1, 2,
+ FIXED_MOD_CONF_XSPI),
+ DEF_FIXED("usb3_0_ref_alt_clk_p", R9A09G056_USB3_0_REF_ALT_CLK_P, CLK_QEXTAL, 1, 1),
+ DEF_FIXED("usb3_0_core_clk", R9A09G056_USB3_0_CLKCORE, CLK_QEXTAL, 1, 1),
};
static const struct rzv2h_mod_clk r9a09g056_mod_clks[] __initconst = {
DEF_MOD_CRITICAL("gic_0_gicclk", CLK_PLLDTY_ACPU_DIV4, 1, 3, 0, 19,
BUS_MSTOP(3, BIT(5))),
+ DEF_MOD("gtm_0_pclk", CLK_PLLCM33_DIV16, 4, 3, 2, 3,
+ BUS_MSTOP(5, BIT(10))),
+ DEF_MOD("gtm_1_pclk", CLK_PLLCM33_DIV16, 4, 4, 2, 4,
+ BUS_MSTOP(5, BIT(11))),
+ DEF_MOD("gtm_2_pclk", CLK_PLLCLN_DIV16, 4, 5, 2, 5,
+ BUS_MSTOP(2, BIT(13))),
+ DEF_MOD("gtm_3_pclk", CLK_PLLCLN_DIV16, 4, 6, 2, 6,
+ BUS_MSTOP(2, BIT(14))),
+ DEF_MOD("gtm_4_pclk", CLK_PLLCLN_DIV16, 4, 7, 2, 7,
+ BUS_MSTOP(11, BIT(13))),
+ DEF_MOD("gtm_5_pclk", CLK_PLLCLN_DIV16, 4, 8, 2, 8,
+ BUS_MSTOP(11, BIT(14))),
+ DEF_MOD("gtm_6_pclk", CLK_PLLCLN_DIV16, 4, 9, 2, 9,
+ BUS_MSTOP(11, BIT(15))),
+ DEF_MOD("gtm_7_pclk", CLK_PLLCLN_DIV16, 4, 10, 2, 10,
+ BUS_MSTOP(12, BIT(0))),
+ DEF_MOD("wdt_0_clkp", CLK_PLLCM33_DIV16, 4, 11, 2, 11,
+ BUS_MSTOP(3, BIT(10))),
+ DEF_MOD("wdt_0_clk_loco", CLK_QEXTAL, 4, 12, 2, 12,
+ BUS_MSTOP(3, BIT(10))),
+ DEF_MOD("wdt_1_clkp", CLK_PLLCLN_DIV16, 4, 13, 2, 13,
+ BUS_MSTOP(1, BIT(0))),
+ DEF_MOD("wdt_1_clk_loco", CLK_QEXTAL, 4, 14, 2, 14,
+ BUS_MSTOP(1, BIT(0))),
+ DEF_MOD("wdt_2_clkp", CLK_PLLCLN_DIV16, 4, 15, 2, 15,
+ BUS_MSTOP(5, BIT(12))),
+ DEF_MOD("wdt_2_clk_loco", CLK_QEXTAL, 5, 0, 2, 16,
+ BUS_MSTOP(5, BIT(12))),
+ DEF_MOD("wdt_3_clkp", CLK_PLLCLN_DIV16, 5, 1, 2, 17,
+ BUS_MSTOP(5, BIT(13))),
+ DEF_MOD("wdt_3_clk_loco", CLK_QEXTAL, 5, 2, 2, 18,
+ BUS_MSTOP(5, BIT(13))),
DEF_MOD("scif_0_clk_pck", CLK_PLLCM33_DIV16, 8, 15, 4, 15,
BUS_MSTOP(3, BIT(14))),
+ DEF_MOD("i3c_0_pclkrw", CLK_PLLCLN_DIV16, 9, 0, 4, 16,
+ BUS_MSTOP(10, BIT(15))),
+ DEF_MOD("i3c_0_pclk", CLK_PLLCLN_DIV16, 9, 1, 4, 17,
+ BUS_MSTOP(10, BIT(15))),
+ DEF_MOD("i3c_0_tclk", CLK_PLLCLN_DIV8, 9, 2, 4, 18,
+ BUS_MSTOP(10, BIT(15))),
+ DEF_MOD("riic_8_ckm", CLK_PLLCM33_DIV16, 9, 3, 4, 19,
+ BUS_MSTOP(3, BIT(13))),
+ DEF_MOD("riic_0_ckm", CLK_PLLCLN_DIV16, 9, 4, 4, 20,
+ BUS_MSTOP(1, BIT(1))),
+ DEF_MOD("riic_1_ckm", CLK_PLLCLN_DIV16, 9, 5, 4, 21,
+ BUS_MSTOP(1, BIT(2))),
+ DEF_MOD("riic_2_ckm", CLK_PLLCLN_DIV16, 9, 6, 4, 22,
+ BUS_MSTOP(1, BIT(3))),
+ DEF_MOD("riic_3_ckm", CLK_PLLCLN_DIV16, 9, 7, 4, 23,
+ BUS_MSTOP(1, BIT(4))),
+ DEF_MOD("riic_4_ckm", CLK_PLLCLN_DIV16, 9, 8, 4, 24,
+ BUS_MSTOP(1, BIT(5))),
+ DEF_MOD("riic_5_ckm", CLK_PLLCLN_DIV16, 9, 9, 4, 25,
+ BUS_MSTOP(1, BIT(6))),
+ DEF_MOD("riic_6_ckm", CLK_PLLCLN_DIV16, 9, 10, 4, 26,
+ BUS_MSTOP(1, BIT(7))),
+ DEF_MOD("riic_7_ckm", CLK_PLLCLN_DIV16, 9, 11, 4, 27,
+ BUS_MSTOP(1, BIT(8))),
+ DEF_MOD("spi_hclk", CLK_PLLCM33_GEAR, 9, 15, 4, 31,
+ BUS_MSTOP(4, BIT(5))),
+ DEF_MOD("spi_aclk", CLK_PLLCM33_GEAR, 10, 0, 5, 0,
+ BUS_MSTOP(4, BIT(5))),
+ DEF_MOD("spi_clk_spix2", CLK_PLLCM33_XSPI, 10, 1, 5, 2,
+ BUS_MSTOP(4, BIT(5))),
DEF_MOD("sdhi_0_imclk", CLK_PLLCLN_DIV8, 10, 3, 5, 3,
BUS_MSTOP(8, BIT(2))),
DEF_MOD("sdhi_0_imclk2", CLK_PLLCLN_DIV8, 10, 4, 5, 4,
@@ -120,16 +321,139 @@ static const struct rzv2h_mod_clk r9a09g056_mod_clks[] __initconst = {
BUS_MSTOP(8, BIT(4))),
DEF_MOD("sdhi_2_aclk", CLK_PLLDTY_ACPU_DIV4, 10, 14, 5, 14,
BUS_MSTOP(8, BIT(4))),
+ DEF_MOD("usb3_0_aclk", CLK_PLLDTY_DIV8, 10, 15, 5, 15,
+ BUS_MSTOP(7, BIT(12))),
+ DEF_MOD("usb3_0_pclk_usbtst", CLK_PLLDTY_ACPU_DIV4, 11, 0, 5, 16,
+ BUS_MSTOP(7, BIT(14))),
+ DEF_MOD("usb2_0_u2h0_hclk", CLK_PLLDTY_DIV8, 11, 3, 5, 19,
+ BUS_MSTOP(7, BIT(7))),
+ DEF_MOD("usb2_0_u2p_exr_cpuclk", CLK_PLLDTY_ACPU_DIV4, 11, 5, 5, 21,
+ BUS_MSTOP(7, BIT(9))),
+ DEF_MOD("usb2_0_pclk_usbtst0", CLK_PLLDTY_ACPU_DIV4, 11, 6, 5, 22,
+ BUS_MSTOP(7, BIT(10))),
+ DEF_MOD_MUX_EXTERNAL("gbeth_0_clk_tx_i", CLK_SMUX2_GBE0_TXCLK, 11, 8, 5, 24,
+ BUS_MSTOP(8, BIT(5)), 1),
+ DEF_MOD_MUX_EXTERNAL("gbeth_0_clk_rx_i", CLK_SMUX2_GBE0_RXCLK, 11, 9, 5, 25,
+ BUS_MSTOP(8, BIT(5)), 1),
+ DEF_MOD_MUX_EXTERNAL("gbeth_0_clk_tx_180_i", CLK_SMUX2_GBE0_TXCLK, 11, 10, 5, 26,
+ BUS_MSTOP(8, BIT(5)), 1),
+ DEF_MOD_MUX_EXTERNAL("gbeth_0_clk_rx_180_i", CLK_SMUX2_GBE0_RXCLK, 11, 11, 5, 27,
+ BUS_MSTOP(8, BIT(5)), 1),
+ DEF_MOD("gbeth_0_aclk_csr_i", CLK_PLLDTY_DIV8, 11, 12, 5, 28,
+ BUS_MSTOP(8, BIT(5))),
+ DEF_MOD("gbeth_0_aclk_i", CLK_PLLDTY_DIV8, 11, 13, 5, 29,
+ BUS_MSTOP(8, BIT(5))),
+ DEF_MOD_MUX_EXTERNAL("gbeth_1_clk_tx_i", CLK_SMUX2_GBE1_TXCLK, 11, 14, 5, 30,
+ BUS_MSTOP(8, BIT(6)), 1),
+ DEF_MOD_MUX_EXTERNAL("gbeth_1_clk_rx_i", CLK_SMUX2_GBE1_RXCLK, 11, 15, 5, 31,
+ BUS_MSTOP(8, BIT(6)), 1),
+ DEF_MOD_MUX_EXTERNAL("gbeth_1_clk_tx_180_i", CLK_SMUX2_GBE1_TXCLK, 12, 0, 6, 0,
+ BUS_MSTOP(8, BIT(6)), 1),
+ DEF_MOD_MUX_EXTERNAL("gbeth_1_clk_rx_180_i", CLK_SMUX2_GBE1_RXCLK, 12, 1, 6, 1,
+ BUS_MSTOP(8, BIT(6)), 1),
+ DEF_MOD("gbeth_1_aclk_csr_i", CLK_PLLDTY_DIV8, 12, 2, 6, 2,
+ BUS_MSTOP(8, BIT(6))),
+ DEF_MOD("gbeth_1_aclk_i", CLK_PLLDTY_DIV8, 12, 3, 6, 3,
+ BUS_MSTOP(8, BIT(6))),
+ DEF_MOD("cru_0_aclk", CLK_PLLDTY_ACPU_DIV2, 13, 2, 6, 18,
+ BUS_MSTOP(9, BIT(4))),
+ DEF_MOD_NO_PM("cru_0_vclk", CLK_PLLVDO_CRU0, 13, 3, 6, 19,
+ BUS_MSTOP(9, BIT(4))),
+ DEF_MOD("cru_0_pclk", CLK_PLLDTY_DIV16, 13, 4, 6, 20,
+ BUS_MSTOP(9, BIT(4))),
+ DEF_MOD("cru_1_aclk", CLK_PLLDTY_ACPU_DIV2, 13, 5, 6, 21,
+ BUS_MSTOP(9, BIT(5))),
+ DEF_MOD_NO_PM("cru_1_vclk", CLK_PLLVDO_CRU1, 13, 6, 6, 22,
+ BUS_MSTOP(9, BIT(5))),
+ DEF_MOD("cru_1_pclk", CLK_PLLDTY_DIV16, 13, 7, 6, 23,
+ BUS_MSTOP(9, BIT(5))),
+ DEF_MOD("isp_0_reg_aclk", CLK_PLLDTY_ACPU_DIV2, 14, 2, 7, 2,
+ BUS_MSTOP(9, BIT(8))),
+ DEF_MOD("isp_0_pclk", CLK_PLLDTY_DIV16, 14, 3, 7, 3,
+ BUS_MSTOP(9, BIT(8))),
+ DEF_MOD("isp_0_vin_aclk", CLK_PLLDTY_ACPU_DIV2, 14, 4, 7, 4,
+ BUS_MSTOP(9, BIT(9))),
+ DEF_MOD("isp_0_isp_sclk", CLK_PLLVDO_ISP, 14, 5, 7, 5,
+ BUS_MSTOP(9, BIT(9))),
+ DEF_MOD("dsi_0_pclk", CLK_PLLDTY_DIV16, 14, 8, 7, 8,
+ BUS_MSTOP(9, BIT(14) | BIT(15))),
+ DEF_MOD("dsi_0_aclk", CLK_PLLDTY_ACPU_DIV2, 14, 9, 7, 9,
+ BUS_MSTOP(9, BIT(14) | BIT(15))),
+ DEF_MOD("dsi_0_vclk1", CLK_PLLDSI_GEAR, 14, 10, 7, 10,
+ BUS_MSTOP(9, BIT(14) | BIT(15))),
+ DEF_MOD("dsi_0_lpclk", CLK_PLLETH_LPCLK_GEAR, 14, 11, 7, 11,
+ BUS_MSTOP(9, BIT(14) | BIT(15))),
+ DEF_MOD("dsi_0_pllref_clk", CLK_QEXTAL, 14, 12, 7, 12,
+ BUS_MSTOP(9, BIT(14) | BIT(15))),
+ DEF_MOD("lcdc_0_clk_a", CLK_PLLDTY_ACPU_DIV2, 14, 13, 7, 13,
+ BUS_MSTOP(10, BIT(1) | BIT(2) | BIT(3))),
+ DEF_MOD("lcdc_0_clk_p", CLK_PLLDTY_DIV16, 14, 14, 7, 14,
+ BUS_MSTOP(10, BIT(1) | BIT(2) | BIT(3))),
+ DEF_MOD("lcdc_0_clk_d", CLK_PLLDSI_GEAR, 14, 15, 7, 15,
+ BUS_MSTOP(10, BIT(1) | BIT(2) | BIT(3))),
+ DEF_MOD("gpu_0_clk", CLK_PLLGPU_GEAR, 15, 0, 7, 16,
+ BUS_MSTOP(3, BIT(4))),
+ DEF_MOD("gpu_0_axi_clk", CLK_PLLDTY_ACPU_DIV2, 15, 1, 7, 17,
+ BUS_MSTOP(3, BIT(4))),
+ DEF_MOD("gpu_0_ace_clk", CLK_PLLDTY_ACPU_DIV2, 15, 2, 7, 18,
+ BUS_MSTOP(3, BIT(4))),
};
static const struct rzv2h_reset r9a09g056_resets[] __initconst = {
DEF_RST(3, 0, 1, 1), /* SYS_0_PRESETN */
DEF_RST(3, 8, 1, 9), /* GIC_0_GICRESET_N */
DEF_RST(3, 9, 1, 10), /* GIC_0_DBG_GICRESET_N */
+ DEF_RST(6, 13, 2, 30), /* GTM_0_PRESETZ */
+ DEF_RST(6, 14, 2, 31), /* GTM_1_PRESETZ */
+ DEF_RST(6, 15, 3, 0), /* GTM_2_PRESETZ */
+ DEF_RST(7, 0, 3, 1), /* GTM_3_PRESETZ */
+ DEF_RST(7, 1, 3, 2), /* GTM_4_PRESETZ */
+ DEF_RST(7, 2, 3, 3), /* GTM_5_PRESETZ */
+ DEF_RST(7, 3, 3, 4), /* GTM_6_PRESETZ */
+ DEF_RST(7, 4, 3, 5), /* GTM_7_PRESETZ */
+ DEF_RST(7, 5, 3, 6), /* WDT_0_RESET */
+ DEF_RST(7, 6, 3, 7), /* WDT_1_RESET */
+ DEF_RST(7, 7, 3, 8), /* WDT_2_RESET */
+ DEF_RST(7, 8, 3, 9), /* WDT_3_RESET */
DEF_RST(9, 5, 4, 6), /* SCIF_0_RST_SYSTEM_N */
+ DEF_RST(9, 6, 4, 7), /* I3C_0_PRESETN */
+ DEF_RST(9, 7, 4, 8), /* I3C_0_TRESETN */
+ DEF_RST(9, 8, 4, 9), /* RIIC_0_MRST */
+ DEF_RST(9, 9, 4, 10), /* RIIC_1_MRST */
+ DEF_RST(9, 10, 4, 11), /* RIIC_2_MRST */
+ DEF_RST(9, 11, 4, 12), /* RIIC_3_MRST */
+ DEF_RST(9, 12, 4, 13), /* RIIC_4_MRST */
+ DEF_RST(9, 13, 4, 14), /* RIIC_5_MRST */
+ DEF_RST(9, 14, 4, 15), /* RIIC_6_MRST */
+ DEF_RST(9, 15, 4, 16), /* RIIC_7_MRST */
+ DEF_RST(10, 0, 4, 17), /* RIIC_8_MRST */
+ DEF_RST(10, 3, 4, 20), /* SPI_HRESETN */
+ DEF_RST(10, 4, 4, 21), /* SPI_ARESETN */
DEF_RST(10, 7, 4, 24), /* SDHI_0_IXRST */
DEF_RST(10, 8, 4, 25), /* SDHI_1_IXRST */
DEF_RST(10, 9, 4, 26), /* SDHI_2_IXRST */
+ DEF_RST(10, 10, 4, 27), /* USB3_0_ARESETN */
+ DEF_RST(10, 12, 4, 29), /* USB2_0_U2H0_HRESETN */
+ DEF_RST(10, 14, 4, 31), /* USB2_0_U2P_EXL_SYSRST */
+ DEF_RST(10, 15, 5, 0), /* USB2_0_PRESETN */
+ DEF_RST(11, 0, 5, 1), /* GBETH_0_ARESETN_I */
+ DEF_RST(11, 1, 5, 2), /* GBETH_1_ARESETN_I */
+ DEF_RST(12, 5, 5, 22), /* CRU_0_PRESETN */
+ DEF_RST(12, 6, 5, 23), /* CRU_0_ARESETN */
+ DEF_RST(12, 7, 5, 24), /* CRU_0_S_RESETN */
+ DEF_RST(12, 8, 5, 25), /* CRU_1_PRESETN */
+ DEF_RST(12, 9, 5, 26), /* CRU_1_ARESETN */
+ DEF_RST(12, 10, 5, 27), /* CRU_1_S_RESETN */
+ DEF_RST(13, 1, 6, 2), /* ISP_0_VIN_ARESETN */
+ DEF_RST(13, 2, 6, 3), /* ISP_0_REG_ARESETN */
+ DEF_RST(13, 3, 6, 4), /* ISP_0_ISP_SRESETN */
+ DEF_RST(13, 4, 6, 5), /* ISP_0_PRESETN */
+ DEF_RST(13, 7, 6, 8), /* DSI_0_PRESETN */
+ DEF_RST(13, 8, 6, 9), /* DSI_0_ARESETN */
+ DEF_RST(13, 12, 6, 13), /* LCDC_0_RESET_N */
+ DEF_RST(13, 13, 6, 14), /* GPU_0_RESETN */
+ DEF_RST(13, 14, 6, 15), /* GPU_0_AXI_RESETN */
+ DEF_RST(13, 15, 6, 16), /* GPU_0_ACE_RESETN */
};
const struct rzv2h_cpg_info r9a09g056_cpg_info __initconst = {
diff --git a/drivers/clk/renesas/r9a09g057-cpg.c b/drivers/clk/renesas/r9a09g057-cpg.c
index 3c40e36259fe..400d9e94f2e9 100644
--- a/drivers/clk/renesas/r9a09g057-cpg.c
+++ b/drivers/clk/renesas/r9a09g057-cpg.c
@@ -6,6 +6,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/clk/renesas.h>
#include <linux/device.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -16,7 +17,7 @@
enum clk_ids {
/* Core Clock Outputs exported to DT */
- LAST_DT_CORE_CLK = R9A09G057_GBETH_1_CLK_PTP_REF_I,
+ LAST_DT_CORE_CLK = R9A09G057_USB3_1_CLKCORE,
/* External Input Clocks */
CLK_AUDIO_EXTAL,
@@ -29,12 +30,19 @@ enum clk_ids {
CLK_PLLDTY,
CLK_PLLCA55,
CLK_PLLVDO,
+ CLK_PLLETH,
+ CLK_PLLDSI,
CLK_PLLGPU,
/* Internal Core Clocks */
+ CLK_PLLCM33_DIV3,
CLK_PLLCM33_DIV4,
- CLK_PLLCM33_DIV4_PLLCM33,
+ CLK_PLLCM33_DIV5,
CLK_PLLCM33_DIV16,
+ CLK_PLLCM33_GEAR,
+ CLK_SMUX2_XSPI_CLK0,
+ CLK_SMUX2_XSPI_CLK1,
+ CLK_PLLCM33_XSPI,
CLK_PLLCLN_DIV2,
CLK_PLLCLN_DIV8,
CLK_PLLCLN_DIV16,
@@ -49,6 +57,18 @@ enum clk_ids {
CLK_PLLVDO_CRU1,
CLK_PLLVDO_CRU2,
CLK_PLLVDO_CRU3,
+ CLK_PLLVDO_ISP,
+ CLK_PLLETH_DIV_250_FIX,
+ CLK_PLLETH_DIV_125_FIX,
+ CLK_CSDIV_PLLETH_GBE0,
+ CLK_CSDIV_PLLETH_GBE1,
+ CLK_SMUX2_GBE0_TXCLK,
+ CLK_SMUX2_GBE0_RXCLK,
+ CLK_SMUX2_GBE1_TXCLK,
+ CLK_SMUX2_GBE1_RXCLK,
+ CLK_CDIV4_PLLETH_LPCLK,
+ CLK_PLLETH_LPCLK_GEAR,
+ CLK_PLLDSI_GEAR,
CLK_PLLGPU_GEAR,
/* Module Clocks */
@@ -69,6 +89,34 @@ static const struct clk_div_table dtable_2_4[] = {
{0, 0},
};
+static const struct clk_div_table dtable_2_16[] = {
+ {0, 2},
+ {1, 4},
+ {2, 8},
+ {3, 16},
+ {0, 0},
+};
+
+static const struct clk_div_table dtable_2_32[] = {
+ {0, 2},
+ {1, 4},
+ {2, 6},
+ {3, 8},
+ {4, 10},
+ {5, 12},
+ {6, 14},
+ {7, 16},
+ {8, 18},
+ {9, 20},
+ {10, 22},
+ {11, 24},
+ {12, 26},
+ {13, 28},
+ {14, 30},
+ {15, 32},
+ {0, 0},
+};
+
static const struct clk_div_table dtable_2_64[] = {
{0, 2},
{1, 4},
@@ -78,6 +126,32 @@ static const struct clk_div_table dtable_2_64[] = {
{0, 0},
};
+static const struct clk_div_table dtable_2_100[] = {
+ {0, 2},
+ {1, 10},
+ {2, 100},
+ {0, 0},
+};
+
+static const struct clk_div_table dtable_16_128[] = {
+ {0, 16},
+ {1, 32},
+ {2, 64},
+ {3, 128},
+ {0, 0},
+};
+
+RZV2H_CPG_PLL_DSI_LIMITS(rzv2h_cpg_pll_dsi_limits);
+#define PLLDSI PLL_PACK_LIMITS(0xc0, 1, 0, &rzv2h_cpg_pll_dsi_limits)
+
+/* Mux clock tables */
+static const char * const smux2_gbe0_rxclk[] = { ".plleth_gbe0", "et0_rxclk" };
+static const char * const smux2_gbe0_txclk[] = { ".plleth_gbe0", "et0_txclk" };
+static const char * const smux2_gbe1_rxclk[] = { ".plleth_gbe1", "et1_rxclk" };
+static const char * const smux2_gbe1_txclk[] = { ".plleth_gbe1", "et1_txclk" };
+static const char * const smux2_xspi_clk0[] = { ".pllcm33_div3", ".pllcm33_div4" };
+static const char * const smux2_xspi_clk1[] = { ".smux2_xspi_clk0", ".pllcm33_div5" };
+
static const struct cpg_core_clk r9a09g057_core_clks[] __initconst = {
/* External Clock Inputs */
DEF_INPUT("audio_extal", CLK_AUDIO_EXTAL),
@@ -90,13 +164,20 @@ static const struct cpg_core_clk r9a09g057_core_clks[] __initconst = {
DEF_FIXED(".plldty", CLK_PLLDTY, CLK_QEXTAL, 200, 3),
DEF_PLL(".pllca55", CLK_PLLCA55, CLK_QEXTAL, PLLCA55),
DEF_FIXED(".pllvdo", CLK_PLLVDO, CLK_QEXTAL, 105, 2),
+ DEF_FIXED(".plleth", CLK_PLLETH, CLK_QEXTAL, 125, 3),
+ DEF_PLLDSI(".plldsi", CLK_PLLDSI, CLK_QEXTAL, PLLDSI),
DEF_PLL(".pllgpu", CLK_PLLGPU, CLK_QEXTAL, PLLGPU),
/* Internal Core Clocks */
+ DEF_FIXED(".pllcm33_div3", CLK_PLLCM33_DIV3, CLK_PLLCM33, 1, 3),
DEF_FIXED(".pllcm33_div4", CLK_PLLCM33_DIV4, CLK_PLLCM33, 1, 4),
- DEF_DDIV(".pllcm33_div4_pllcm33", CLK_PLLCM33_DIV4_PLLCM33,
- CLK_PLLCM33_DIV4, CDDIV0_DIVCTL1, dtable_2_64),
+ DEF_FIXED(".pllcm33_div5", CLK_PLLCM33_DIV5, CLK_PLLCM33, 1, 5),
DEF_FIXED(".pllcm33_div16", CLK_PLLCM33_DIV16, CLK_PLLCM33, 1, 16),
+ DEF_DDIV(".pllcm33_gear", CLK_PLLCM33_GEAR, CLK_PLLCM33_DIV4, CDDIV0_DIVCTL1, dtable_2_64),
+ DEF_SMUX(".smux2_xspi_clk0", CLK_SMUX2_XSPI_CLK0, SSEL1_SELCTL2, smux2_xspi_clk0),
+ DEF_SMUX(".smux2_xspi_clk1", CLK_SMUX2_XSPI_CLK1, SSEL1_SELCTL3, smux2_xspi_clk1),
+ DEF_CSDIV(".pllcm33_xspi", CLK_PLLCM33_XSPI, CLK_SMUX2_XSPI_CLK1, CSDIV0_DIVCTL3,
+ dtable_2_16),
DEF_FIXED(".pllcln_div2", CLK_PLLCLN_DIV2, CLK_PLLCLN, 1, 2),
DEF_FIXED(".pllcln_div8", CLK_PLLCLN_DIV8, CLK_PLLCLN, 1, 8),
@@ -114,6 +195,24 @@ static const struct cpg_core_clk r9a09g057_core_clks[] __initconst = {
DEF_DDIV(".pllvdo_cru1", CLK_PLLVDO_CRU1, CLK_PLLVDO, CDDIV4_DIVCTL0, dtable_2_4),
DEF_DDIV(".pllvdo_cru2", CLK_PLLVDO_CRU2, CLK_PLLVDO, CDDIV4_DIVCTL1, dtable_2_4),
DEF_DDIV(".pllvdo_cru3", CLK_PLLVDO_CRU3, CLK_PLLVDO, CDDIV4_DIVCTL2, dtable_2_4),
+ DEF_DDIV(".pllvdo_isp", CLK_PLLVDO_ISP, CLK_PLLVDO, CDDIV2_DIVCTL3, dtable_2_64),
+
+ DEF_FIXED(".plleth_250_fix", CLK_PLLETH_DIV_250_FIX, CLK_PLLETH, 1, 4),
+ DEF_FIXED(".plleth_125_fix", CLK_PLLETH_DIV_125_FIX, CLK_PLLETH_DIV_250_FIX, 1, 2),
+ DEF_CSDIV(".plleth_gbe0", CLK_CSDIV_PLLETH_GBE0,
+ CLK_PLLETH_DIV_250_FIX, CSDIV0_DIVCTL0, dtable_2_100),
+ DEF_CSDIV(".plleth_gbe1", CLK_CSDIV_PLLETH_GBE1,
+ CLK_PLLETH_DIV_250_FIX, CSDIV0_DIVCTL1, dtable_2_100),
+ DEF_SMUX(".smux2_gbe0_txclk", CLK_SMUX2_GBE0_TXCLK, SSEL0_SELCTL2, smux2_gbe0_txclk),
+ DEF_SMUX(".smux2_gbe0_rxclk", CLK_SMUX2_GBE0_RXCLK, SSEL0_SELCTL3, smux2_gbe0_rxclk),
+ DEF_SMUX(".smux2_gbe1_txclk", CLK_SMUX2_GBE1_TXCLK, SSEL1_SELCTL0, smux2_gbe1_txclk),
+ DEF_SMUX(".smux2_gbe1_rxclk", CLK_SMUX2_GBE1_RXCLK, SSEL1_SELCTL1, smux2_gbe1_rxclk),
+ DEF_FIXED(".cdiv4_plleth_lpclk", CLK_CDIV4_PLLETH_LPCLK, CLK_PLLETH, 1, 4),
+ DEF_CSDIV(".plleth_lpclk_gear", CLK_PLLETH_LPCLK_GEAR, CLK_CDIV4_PLLETH_LPCLK,
+ CSDIV0_DIVCTL2, dtable_16_128),
+
+ DEF_PLLDSI_DIV(".plldsi_gear", CLK_PLLDSI_GEAR, CLK_PLLDSI,
+ CSDIV1_DIVCTL2, dtable_2_32),
DEF_DDIV(".pllgpu_gear", CLK_PLLGPU_GEAR, CLK_PLLGPU, CDDIV3_DIVCTL1, dtable_2_64),
@@ -130,10 +229,20 @@ static const struct cpg_core_clk r9a09g057_core_clks[] __initconst = {
DEF_FIXED("iotop_0_shclk", R9A09G057_IOTOP_0_SHCLK, CLK_PLLCM33_DIV16, 1, 1),
DEF_FIXED("usb2_0_clk_core0", R9A09G057_USB2_0_CLK_CORE0, CLK_QEXTAL, 1, 1),
DEF_FIXED("usb2_0_clk_core1", R9A09G057_USB2_0_CLK_CORE1, CLK_QEXTAL, 1, 1),
+ DEF_FIXED("gbeth_0_clk_ptp_ref_i", R9A09G057_GBETH_0_CLK_PTP_REF_I,
+ CLK_PLLETH_DIV_125_FIX, 1, 1),
+ DEF_FIXED("gbeth_1_clk_ptp_ref_i", R9A09G057_GBETH_1_CLK_PTP_REF_I,
+ CLK_PLLETH_DIV_125_FIX, 1, 1),
+ DEF_FIXED_MOD_STATUS("spi_clk_spi", R9A09G057_SPI_CLK_SPI, CLK_PLLCM33_XSPI, 1, 2,
+ FIXED_MOD_CONF_XSPI),
+ DEF_FIXED("usb3_0_ref_alt_clk_p", R9A09G057_USB3_0_REF_ALT_CLK_P, CLK_QEXTAL, 1, 1),
+ DEF_FIXED("usb3_0_core_clk", R9A09G057_USB3_0_CLKCORE, CLK_QEXTAL, 1, 1),
+ DEF_FIXED("usb3_1_ref_alt_clk_p", R9A09G057_USB3_1_REF_ALT_CLK_P, CLK_QEXTAL, 1, 1),
+ DEF_FIXED("usb3_1_core_clk", R9A09G057_USB3_1_CLKCORE, CLK_QEXTAL, 1, 1),
};
static const struct rzv2h_mod_clk r9a09g057_mod_clks[] __initconst = {
- DEF_MOD("dmac_0_aclk", CLK_PLLCM33_DIV4_PLLCM33, 0, 0, 0, 0,
+ DEF_MOD("dmac_0_aclk", CLK_PLLCM33_GEAR, 0, 0, 0, 0,
BUS_MSTOP(5, BIT(9))),
DEF_MOD("dmac_1_aclk", CLK_PLLDTY_ACPU_DIV2, 0, 1, 0, 1,
BUS_MSTOP(3, BIT(2))),
@@ -179,8 +288,34 @@ static const struct rzv2h_mod_clk r9a09g057_mod_clks[] __initconst = {
BUS_MSTOP(5, BIT(13))),
DEF_MOD("wdt_3_clk_loco", CLK_QEXTAL, 5, 2, 2, 18,
BUS_MSTOP(5, BIT(13))),
+ DEF_MOD("rtc_0_clk_rtc", CLK_PLLCM33_DIV16, 5, 3, 2, 19,
+ BUS_MSTOP(3, BIT(11) | BIT(12))),
+ DEF_MOD("rspi_0_pclk", CLK_PLLCLN_DIV8, 5, 4, 2, 20,
+ BUS_MSTOP(11, BIT(0))),
+ DEF_MOD("rspi_0_pclk_sfr", CLK_PLLCLN_DIV8, 5, 5, 2, 21,
+ BUS_MSTOP(11, BIT(0))),
+ DEF_MOD("rspi_0_tclk", CLK_PLLCLN_DIV8, 5, 6, 2, 22,
+ BUS_MSTOP(11, BIT(0))),
+ DEF_MOD("rspi_1_pclk", CLK_PLLCLN_DIV8, 5, 7, 2, 23,
+ BUS_MSTOP(11, BIT(1))),
+ DEF_MOD("rspi_1_pclk_sfr", CLK_PLLCLN_DIV8, 5, 8, 2, 24,
+ BUS_MSTOP(11, BIT(1))),
+ DEF_MOD("rspi_1_tclk", CLK_PLLCLN_DIV8, 5, 9, 2, 25,
+ BUS_MSTOP(11, BIT(1))),
+ DEF_MOD("rspi_2_pclk", CLK_PLLCLN_DIV8, 5, 10, 2, 26,
+ BUS_MSTOP(11, BIT(2))),
+ DEF_MOD("rspi_2_pclk_sfr", CLK_PLLCLN_DIV8, 5, 11, 2, 27,
+ BUS_MSTOP(11, BIT(2))),
+ DEF_MOD("rspi_2_tclk", CLK_PLLCLN_DIV8, 5, 12, 2, 28,
+ BUS_MSTOP(11, BIT(2))),
DEF_MOD("scif_0_clk_pck", CLK_PLLCM33_DIV16, 8, 15, 4, 15,
BUS_MSTOP(3, BIT(14))),
+ DEF_MOD("i3c_0_pclkrw", CLK_PLLCLN_DIV16, 9, 0, 4, 16,
+ BUS_MSTOP(10, BIT(15))),
+ DEF_MOD("i3c_0_pclk", CLK_PLLCLN_DIV16, 9, 1, 4, 17,
+ BUS_MSTOP(10, BIT(15))),
+ DEF_MOD("i3c_0_tclk", CLK_PLLCLN_DIV8, 9, 2, 4, 18,
+ BUS_MSTOP(10, BIT(15))),
DEF_MOD("riic_8_ckm", CLK_PLLCM33_DIV16, 9, 3, 4, 19,
BUS_MSTOP(3, BIT(13))),
DEF_MOD("riic_0_ckm", CLK_PLLCLN_DIV16, 9, 4, 4, 20,
@@ -199,6 +334,12 @@ static const struct rzv2h_mod_clk r9a09g057_mod_clks[] __initconst = {
BUS_MSTOP(1, BIT(7))),
DEF_MOD("riic_7_ckm", CLK_PLLCLN_DIV16, 9, 11, 4, 27,
BUS_MSTOP(1, BIT(8))),
+ DEF_MOD("spi_hclk", CLK_PLLCM33_GEAR, 9, 15, 4, 31,
+ BUS_MSTOP(4, BIT(5))),
+ DEF_MOD("spi_aclk", CLK_PLLCM33_GEAR, 10, 0, 5, 0,
+ BUS_MSTOP(4, BIT(5))),
+ DEF_MOD("spi_clk_spix2", CLK_PLLCM33_XSPI, 10, 1, 5, 2,
+ BUS_MSTOP(4, BIT(5))),
DEF_MOD("sdhi_0_imclk", CLK_PLLCLN_DIV8, 10, 3, 5, 3,
BUS_MSTOP(8, BIT(2))),
DEF_MOD("sdhi_0_imclk2", CLK_PLLCLN_DIV8, 10, 4, 5, 4,
@@ -223,6 +364,14 @@ static const struct rzv2h_mod_clk r9a09g057_mod_clks[] __initconst = {
BUS_MSTOP(8, BIT(4))),
DEF_MOD("sdhi_2_aclk", CLK_PLLDTY_ACPU_DIV4, 10, 14, 5, 14,
BUS_MSTOP(8, BIT(4))),
+ DEF_MOD("usb3_0_aclk", CLK_PLLDTY_DIV8, 10, 15, 5, 15,
+ BUS_MSTOP(7, BIT(12))),
+ DEF_MOD("usb3_0_pclk_usbtst", CLK_PLLDTY_ACPU_DIV4, 11, 0, 5, 16,
+ BUS_MSTOP(7, BIT(14))),
+ DEF_MOD("usb3_1_aclk", CLK_PLLDTY_DIV8, 11, 1, 5, 17,
+ BUS_MSTOP(7, BIT(13))),
+ DEF_MOD("usb3_1_pclk_usbtst", CLK_PLLDTY_ACPU_DIV4, 11, 2, 5, 18,
+ BUS_MSTOP(7, BIT(15))),
DEF_MOD("usb2_0_u2h0_hclk", CLK_PLLDTY_DIV8, 11, 3, 5, 19,
BUS_MSTOP(7, BIT(7))),
DEF_MOD("usb2_0_u2h1_hclk", CLK_PLLDTY_DIV8, 11, 4, 5, 20,
@@ -233,6 +382,30 @@ static const struct rzv2h_mod_clk r9a09g057_mod_clks[] __initconst = {
BUS_MSTOP(7, BIT(10))),
DEF_MOD("usb2_0_pclk_usbtst1", CLK_PLLDTY_ACPU_DIV4, 11, 7, 5, 23,
BUS_MSTOP(7, BIT(11))),
+ DEF_MOD_MUX_EXTERNAL("gbeth_0_clk_tx_i", CLK_SMUX2_GBE0_TXCLK, 11, 8, 5, 24,
+ BUS_MSTOP(8, BIT(5)), 1),
+ DEF_MOD_MUX_EXTERNAL("gbeth_0_clk_rx_i", CLK_SMUX2_GBE0_RXCLK, 11, 9, 5, 25,
+ BUS_MSTOP(8, BIT(5)), 1),
+ DEF_MOD_MUX_EXTERNAL("gbeth_0_clk_tx_180_i", CLK_SMUX2_GBE0_TXCLK, 11, 10, 5, 26,
+ BUS_MSTOP(8, BIT(5)), 1),
+ DEF_MOD_MUX_EXTERNAL("gbeth_0_clk_rx_180_i", CLK_SMUX2_GBE0_RXCLK, 11, 11, 5, 27,
+ BUS_MSTOP(8, BIT(5)), 1),
+ DEF_MOD("gbeth_0_aclk_csr_i", CLK_PLLDTY_DIV8, 11, 12, 5, 28,
+ BUS_MSTOP(8, BIT(5))),
+ DEF_MOD("gbeth_0_aclk_i", CLK_PLLDTY_DIV8, 11, 13, 5, 29,
+ BUS_MSTOP(8, BIT(5))),
+ DEF_MOD_MUX_EXTERNAL("gbeth_1_clk_tx_i", CLK_SMUX2_GBE1_TXCLK, 11, 14, 5, 30,
+ BUS_MSTOP(8, BIT(6)), 1),
+ DEF_MOD_MUX_EXTERNAL("gbeth_1_clk_rx_i", CLK_SMUX2_GBE1_RXCLK, 11, 15, 5, 31,
+ BUS_MSTOP(8, BIT(6)), 1),
+ DEF_MOD_MUX_EXTERNAL("gbeth_1_clk_tx_180_i", CLK_SMUX2_GBE1_TXCLK, 12, 0, 6, 0,
+ BUS_MSTOP(8, BIT(6)), 1),
+ DEF_MOD_MUX_EXTERNAL("gbeth_1_clk_rx_180_i", CLK_SMUX2_GBE1_RXCLK, 12, 1, 6, 1,
+ BUS_MSTOP(8, BIT(6)), 1),
+ DEF_MOD("gbeth_1_aclk_csr_i", CLK_PLLDTY_DIV8, 12, 2, 6, 2,
+ BUS_MSTOP(8, BIT(6))),
+ DEF_MOD("gbeth_1_aclk_i", CLK_PLLDTY_DIV8, 12, 3, 6, 3,
+ BUS_MSTOP(8, BIT(6))),
DEF_MOD("cru_0_aclk", CLK_PLLDTY_ACPU_DIV2, 13, 2, 6, 18,
BUS_MSTOP(9, BIT(4))),
DEF_MOD_NO_PM("cru_0_vclk", CLK_PLLVDO_CRU0, 13, 3, 6, 19,
@@ -257,12 +430,40 @@ static const struct rzv2h_mod_clk r9a09g057_mod_clks[] __initconst = {
BUS_MSTOP(9, BIT(7))),
DEF_MOD("cru_3_pclk", CLK_PLLDTY_DIV16, 13, 13, 6, 29,
BUS_MSTOP(9, BIT(7))),
+ DEF_MOD("isp_0_reg_aclk", CLK_PLLDTY_ACPU_DIV2, 14, 2, 7, 2,
+ BUS_MSTOP(9, BIT(8))),
+ DEF_MOD("isp_0_pclk", CLK_PLLDTY_DIV16, 14, 3, 7, 3,
+ BUS_MSTOP(9, BIT(8))),
+ DEF_MOD("isp_0_vin_aclk", CLK_PLLDTY_ACPU_DIV2, 14, 4, 7, 4,
+ BUS_MSTOP(9, BIT(9))),
+ DEF_MOD("isp_0_isp_sclk", CLK_PLLVDO_ISP, 14, 5, 7, 5,
+ BUS_MSTOP(9, BIT(9))),
+ DEF_MOD("dsi_0_pclk", CLK_PLLDTY_DIV16, 14, 8, 7, 8,
+ BUS_MSTOP(9, BIT(14) | BIT(15))),
+ DEF_MOD("dsi_0_aclk", CLK_PLLDTY_ACPU_DIV2, 14, 9, 7, 9,
+ BUS_MSTOP(9, BIT(14) | BIT(15))),
+ DEF_MOD("dsi_0_vclk1", CLK_PLLDSI_GEAR, 14, 10, 7, 10,
+ BUS_MSTOP(9, BIT(14) | BIT(15))),
+ DEF_MOD("dsi_0_lpclk", CLK_PLLETH_LPCLK_GEAR, 14, 11, 7, 11,
+ BUS_MSTOP(9, BIT(14) | BIT(15))),
+ DEF_MOD("dsi_0_pllref_clk", CLK_QEXTAL, 14, 12, 7, 12,
+ BUS_MSTOP(9, BIT(14) | BIT(15))),
+ DEF_MOD("lcdc_0_clk_a", CLK_PLLDTY_ACPU_DIV2, 14, 13, 7, 13,
+ BUS_MSTOP(10, BIT(1) | BIT(2) | BIT(3))),
+ DEF_MOD("lcdc_0_clk_p", CLK_PLLDTY_DIV16, 14, 14, 7, 14,
+ BUS_MSTOP(10, BIT(1) | BIT(2) | BIT(3))),
+ DEF_MOD("lcdc_0_clk_d", CLK_PLLDSI_GEAR, 14, 15, 7, 15,
+ BUS_MSTOP(10, BIT(1) | BIT(2) | BIT(3))),
DEF_MOD("gpu_0_clk", CLK_PLLGPU_GEAR, 15, 0, 7, 16,
BUS_MSTOP(3, BIT(4))),
DEF_MOD("gpu_0_axi_clk", CLK_PLLDTY_ACPU_DIV2, 15, 1, 7, 17,
BUS_MSTOP(3, BIT(4))),
DEF_MOD("gpu_0_ace_clk", CLK_PLLDTY_ACPU_DIV2, 15, 2, 7, 18,
BUS_MSTOP(3, BIT(4))),
+ DEF_MOD("tsu_0_pclk", CLK_QEXTAL, 16, 9, 8, 9,
+ BUS_MSTOP(5, BIT(2))),
+ DEF_MOD("tsu_1_pclk", CLK_QEXTAL, 16, 10, 8, 10,
+ BUS_MSTOP(2, BIT(15))),
};
static const struct rzv2h_reset r9a09g057_resets[] __initconst = {
@@ -287,7 +488,17 @@ static const struct rzv2h_reset r9a09g057_resets[] __initconst = {
DEF_RST(7, 6, 3, 7), /* WDT_1_RESET */
DEF_RST(7, 7, 3, 8), /* WDT_2_RESET */
DEF_RST(7, 8, 3, 9), /* WDT_3_RESET */
+ DEF_RST(7, 9, 3, 10), /* RTC_0_RST_RTC */
+ DEF_RST(7, 10, 3, 11), /* RTC_0_RST_RTC_V */
+ DEF_RST(7, 11, 3, 12), /* RSPI_0_PRESETN */
+ DEF_RST(7, 12, 3, 13), /* RSPI_0_TRESETN */
+ DEF_RST(7, 13, 3, 14), /* RSPI_1_PRESETN */
+ DEF_RST(7, 14, 3, 15), /* RSPI_1_TRESETN */
+ DEF_RST(7, 15, 3, 16), /* RSPI_2_PRESETN */
+ DEF_RST(8, 0, 3, 17), /* RSPI_2_TRESETN */
DEF_RST(9, 5, 4, 6), /* SCIF_0_RST_SYSTEM_N */
+ DEF_RST(9, 6, 4, 7), /* I3C_0_PRESETN */
+ DEF_RST(9, 7, 4, 8), /* I3C_0_TRESETN */
DEF_RST(9, 8, 4, 9), /* RIIC_0_MRST */
DEF_RST(9, 9, 4, 10), /* RIIC_1_MRST */
DEF_RST(9, 10, 4, 11), /* RIIC_2_MRST */
@@ -297,13 +508,19 @@ static const struct rzv2h_reset r9a09g057_resets[] __initconst = {
DEF_RST(9, 14, 4, 15), /* RIIC_6_MRST */
DEF_RST(9, 15, 4, 16), /* RIIC_7_MRST */
DEF_RST(10, 0, 4, 17), /* RIIC_8_MRST */
+ DEF_RST(10, 3, 4, 20), /* SPI_HRESETN */
+ DEF_RST(10, 4, 4, 21), /* SPI_ARESETN */
DEF_RST(10, 7, 4, 24), /* SDHI_0_IXRST */
DEF_RST(10, 8, 4, 25), /* SDHI_1_IXRST */
DEF_RST(10, 9, 4, 26), /* SDHI_2_IXRST */
+ DEF_RST(10, 10, 4, 27), /* USB3_0_ARESETN */
+ DEF_RST(10, 11, 4, 28), /* USB3_1_ARESETN */
DEF_RST(10, 12, 4, 29), /* USB2_0_U2H0_HRESETN */
DEF_RST(10, 13, 4, 30), /* USB2_0_U2H1_HRESETN */
DEF_RST(10, 14, 4, 31), /* USB2_0_U2P_EXL_SYSRST */
DEF_RST(10, 15, 5, 0), /* USB2_0_PRESETN */
+ DEF_RST(11, 0, 5, 1), /* GBETH_0_ARESETN_I */
+ DEF_RST(11, 1, 5, 2), /* GBETH_1_ARESETN_I */
DEF_RST(12, 5, 5, 22), /* CRU_0_PRESETN */
DEF_RST(12, 6, 5, 23), /* CRU_0_ARESETN */
DEF_RST(12, 7, 5, 24), /* CRU_0_S_RESETN */
@@ -316,9 +533,18 @@ static const struct rzv2h_reset r9a09g057_resets[] __initconst = {
DEF_RST(12, 14, 5, 31), /* CRU_3_PRESETN */
DEF_RST(12, 15, 6, 0), /* CRU_3_ARESETN */
DEF_RST(13, 0, 6, 1), /* CRU_3_S_RESETN */
+ DEF_RST(13, 1, 6, 2), /* ISP_0_VIN_ARESETN */
+ DEF_RST(13, 2, 6, 3), /* ISP_0_REG_ARESETN */
+ DEF_RST(13, 3, 6, 4), /* ISP_0_ISP_SRESETN */
+ DEF_RST(13, 4, 6, 5), /* ISP_0_PRESETN */
+ DEF_RST(13, 7, 6, 8), /* DSI_0_PRESETN */
+ DEF_RST(13, 8, 6, 9), /* DSI_0_ARESETN */
+ DEF_RST(13, 12, 6, 13), /* LCDC_0_RESET_N */
DEF_RST(13, 13, 6, 14), /* GPU_0_RESETN */
DEF_RST(13, 14, 6, 15), /* GPU_0_AXI_RESETN */
DEF_RST(13, 15, 6, 16), /* GPU_0_ACE_RESETN */
+ DEF_RST(15, 7, 7, 8), /* TSU_0_PRESETN */
+ DEF_RST(15, 8, 7, 9), /* TSU_1_PRESETN */
};
const struct rzv2h_cpg_info r9a09g057_cpg_info __initconst = {
diff --git a/drivers/clk/renesas/r9a09g077-cpg.c b/drivers/clk/renesas/r9a09g077-cpg.c
new file mode 100644
index 000000000000..fb6cc94d08a1
--- /dev/null
+++ b/drivers/clk/renesas/r9a09g077-cpg.c
@@ -0,0 +1,320 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * r9a09g077 Clock Pulse Generator / Module Standby and Software Reset
+ *
+ * Copyright (C) 2025 Renesas Electronics Corp.
+ *
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+
+#include <dt-bindings/clock/renesas,r9a09g077-cpg-mssr.h>
+#include <dt-bindings/clock/renesas,r9a09g087-cpg-mssr.h>
+#include "renesas-cpg-mssr.h"
+
+#define RZT2H_REG_BLOCK_SHIFT 11
+#define RZT2H_REG_OFFSET_MASK GENMASK(10, 0)
+#define RZT2H_REG_CONF(block, offset) (((block) << RZT2H_REG_BLOCK_SHIFT) | \
+ ((offset) & RZT2H_REG_OFFSET_MASK))
+
+#define RZT2H_REG_BLOCK(x) ((x) >> RZT2H_REG_BLOCK_SHIFT)
+#define RZT2H_REG_OFFSET(x) ((x) & RZT2H_REG_OFFSET_MASK)
+
+#define SCKCR RZT2H_REG_CONF(0, 0x00)
+#define SCKCR2 RZT2H_REG_CONF(1, 0x04)
+#define SCKCR3 RZT2H_REG_CONF(0, 0x08)
+
+#define OFFSET_MASK GENMASK(31, 20)
+#define SHIFT_MASK GENMASK(19, 12)
+#define WIDTH_MASK GENMASK(11, 8)
+
+#define CONF_PACK(offset, shift, width) \
+ (FIELD_PREP_CONST(OFFSET_MASK, (offset)) | \
+ FIELD_PREP_CONST(SHIFT_MASK, (shift)) | \
+ FIELD_PREP_CONST(WIDTH_MASK, (width)))
+
+#define GET_SHIFT(val) FIELD_GET(SHIFT_MASK, val)
+#define GET_WIDTH(val) FIELD_GET(WIDTH_MASK, val)
+#define GET_REG_OFFSET(val) FIELD_GET(OFFSET_MASK, val)
+
+#define DIVCA55C0 CONF_PACK(SCKCR2, 8, 1)
+#define DIVCA55C1 CONF_PACK(SCKCR2, 9, 1)
+#define DIVCA55C2 CONF_PACK(SCKCR2, 10, 1)
+#define DIVCA55C3 CONF_PACK(SCKCR2, 11, 1)
+#define DIVCA55S CONF_PACK(SCKCR2, 12, 1)
+#define DIVSPI3ASYNC CONF_PACK(SCKCR2, 16, 2)
+#define DIVSCI5ASYNC CONF_PACK(SCKCR2, 18, 2)
+
+#define DIVSPI0ASYNC CONF_PACK(SCKCR3, 0, 2)
+#define DIVSPI1ASYNC CONF_PACK(SCKCR3, 2, 2)
+#define DIVSPI2ASYNC CONF_PACK(SCKCR3, 4, 2)
+#define DIVSCI0ASYNC CONF_PACK(SCKCR3, 6, 2)
+#define DIVSCI1ASYNC CONF_PACK(SCKCR3, 8, 2)
+#define DIVSCI2ASYNC CONF_PACK(SCKCR3, 10, 2)
+#define DIVSCI3ASYNC CONF_PACK(SCKCR3, 12, 2)
+#define DIVSCI4ASYNC CONF_PACK(SCKCR3, 14, 2)
+
+#define SEL_PLL CONF_PACK(SCKCR, 22, 1)
+
+enum rzt2h_clk_types {
+ CLK_TYPE_RZT2H_DIV = CLK_TYPE_CUSTOM, /* Clock with divider */
+ CLK_TYPE_RZT2H_MUX, /* Clock with clock source selector */
+};
+
+#define DEF_DIV(_name, _id, _parent, _conf, _dtable) \
+ DEF_TYPE(_name, _id, CLK_TYPE_RZT2H_DIV, .conf = _conf, \
+ .parent = _parent, .dtable = _dtable, .flag = 0)
+#define DEF_MUX(_name, _id, _conf, _parent_names, _num_parents, _mux_flags) \
+ DEF_TYPE(_name, _id, CLK_TYPE_RZT2H_MUX, .conf = _conf, \
+ .parent_names = _parent_names, .num_parents = _num_parents, \
+ .flag = 0, .mux_flags = _mux_flags)
+
+enum clk_ids {
+ /* Core Clock Outputs exported to DT */
+ LAST_DT_CORE_CLK = R9A09G077_ETCLKE,
+
+ /* External Input Clocks */
+ CLK_EXTAL,
+
+ /* Internal Core Clocks */
+ CLK_LOCO,
+ CLK_PLL0,
+ CLK_PLL1,
+ CLK_PLL2,
+ CLK_PLL4,
+ CLK_SEL_CLK_PLL0,
+ CLK_SEL_CLK_PLL1,
+ CLK_SEL_CLK_PLL2,
+ CLK_SEL_CLK_PLL4,
+ CLK_PLL4D1,
+ CLK_SCI0ASYNC,
+ CLK_SCI1ASYNC,
+ CLK_SCI2ASYNC,
+ CLK_SCI3ASYNC,
+ CLK_SCI4ASYNC,
+ CLK_SCI5ASYNC,
+ CLK_SPI0ASYNC,
+ CLK_SPI1ASYNC,
+ CLK_SPI2ASYNC,
+ CLK_SPI3ASYNC,
+
+ /* Module Clocks */
+ MOD_CLK_BASE,
+};
+
+static const struct clk_div_table dtable_1_2[] = {
+ {0, 2},
+ {1, 1},
+ {0, 0},
+};
+
+static const struct clk_div_table dtable_24_25_30_32[] = {
+ {0, 32},
+ {1, 30},
+ {2, 25},
+ {3, 24},
+ {0, 0},
+};
+
+/* Mux clock tables */
+
+static const char * const sel_clk_pll0[] = { ".loco", ".pll0" };
+static const char * const sel_clk_pll1[] = { ".loco", ".pll1" };
+static const char * const sel_clk_pll2[] = { ".loco", ".pll2" };
+static const char * const sel_clk_pll4[] = { ".loco", ".pll4" };
+
+static const struct cpg_core_clk r9a09g077_core_clks[] __initconst = {
+ /* External Clock Inputs */
+ DEF_INPUT("extal", CLK_EXTAL),
+
+ /* Internal Core Clocks */
+ DEF_RATE(".loco", CLK_LOCO, 1000 * 1000),
+ DEF_FIXED(".pll0", CLK_PLL0, CLK_EXTAL, 1, 48),
+ DEF_FIXED(".pll1", CLK_PLL1, CLK_EXTAL, 1, 40),
+ DEF_FIXED(".pll2", CLK_PLL2, CLK_EXTAL, 1, 32),
+ DEF_FIXED(".pll4", CLK_PLL4, CLK_EXTAL, 1, 96),
+
+ DEF_MUX(".sel_clk_pll0", CLK_SEL_CLK_PLL0, SEL_PLL,
+ sel_clk_pll0, ARRAY_SIZE(sel_clk_pll0), CLK_MUX_READ_ONLY),
+ DEF_MUX(".sel_clk_pll1", CLK_SEL_CLK_PLL1, SEL_PLL,
+ sel_clk_pll1, ARRAY_SIZE(sel_clk_pll1), CLK_MUX_READ_ONLY),
+ DEF_MUX(".sel_clk_pll2", CLK_SEL_CLK_PLL2, SEL_PLL,
+ sel_clk_pll2, ARRAY_SIZE(sel_clk_pll2), CLK_MUX_READ_ONLY),
+ DEF_MUX(".sel_clk_pll4", CLK_SEL_CLK_PLL4, SEL_PLL,
+ sel_clk_pll4, ARRAY_SIZE(sel_clk_pll4), CLK_MUX_READ_ONLY),
+
+ DEF_FIXED(".pll4d1", CLK_PLL4D1, CLK_SEL_CLK_PLL4, 1, 1),
+ DEF_DIV(".sci0async", CLK_SCI0ASYNC, CLK_PLL4D1, DIVSCI0ASYNC,
+ dtable_24_25_30_32),
+ DEF_DIV(".sci1async", CLK_SCI1ASYNC, CLK_PLL4D1, DIVSCI1ASYNC,
+ dtable_24_25_30_32),
+ DEF_DIV(".sci2async", CLK_SCI2ASYNC, CLK_PLL4D1, DIVSCI2ASYNC,
+ dtable_24_25_30_32),
+ DEF_DIV(".sci3async", CLK_SCI3ASYNC, CLK_PLL4D1, DIVSCI3ASYNC,
+ dtable_24_25_30_32),
+ DEF_DIV(".sci4async", CLK_SCI4ASYNC, CLK_PLL4D1, DIVSCI4ASYNC,
+ dtable_24_25_30_32),
+ DEF_DIV(".sci5async", CLK_SCI5ASYNC, CLK_PLL4D1, DIVSCI5ASYNC,
+ dtable_24_25_30_32),
+
+ DEF_DIV(".spi0async", CLK_SPI0ASYNC, CLK_PLL4D1, DIVSPI0ASYNC,
+ dtable_24_25_30_32),
+ DEF_DIV(".spi1async", CLK_SPI1ASYNC, CLK_PLL4D1, DIVSPI1ASYNC,
+ dtable_24_25_30_32),
+ DEF_DIV(".spi2async", CLK_SPI2ASYNC, CLK_PLL4D1, DIVSPI2ASYNC,
+ dtable_24_25_30_32),
+ DEF_DIV(".spi3async", CLK_SPI3ASYNC, CLK_PLL4D1, DIVSPI3ASYNC,
+ dtable_24_25_30_32),
+
+ /* Core output clk */
+ DEF_DIV("CA55C0", R9A09G077_CLK_CA55C0, CLK_SEL_CLK_PLL0, DIVCA55C0,
+ dtable_1_2),
+ DEF_DIV("CA55C1", R9A09G077_CLK_CA55C1, CLK_SEL_CLK_PLL0, DIVCA55C1,
+ dtable_1_2),
+ DEF_DIV("CA55C2", R9A09G077_CLK_CA55C2, CLK_SEL_CLK_PLL0, DIVCA55C2,
+ dtable_1_2),
+ DEF_DIV("CA55C3", R9A09G077_CLK_CA55C3, CLK_SEL_CLK_PLL0, DIVCA55C3,
+ dtable_1_2),
+ DEF_DIV("CA55S", R9A09G077_CLK_CA55S, CLK_SEL_CLK_PLL0, DIVCA55S,
+ dtable_1_2),
+ DEF_FIXED("PCLKGPTL", R9A09G077_CLK_PCLKGPTL, CLK_SEL_CLK_PLL1, 2, 1),
+ DEF_FIXED("PCLKH", R9A09G077_CLK_PCLKH, CLK_SEL_CLK_PLL1, 4, 1),
+ DEF_FIXED("PCLKM", R9A09G077_CLK_PCLKM, CLK_SEL_CLK_PLL1, 8, 1),
+ DEF_FIXED("PCLKL", R9A09G077_CLK_PCLKL, CLK_SEL_CLK_PLL1, 16, 1),
+ DEF_FIXED("PCLKAH", R9A09G077_CLK_PCLKAH, CLK_PLL4D1, 6, 1),
+ DEF_FIXED("PCLKAM", R9A09G077_CLK_PCLKAM, CLK_PLL4D1, 12, 1),
+ DEF_FIXED("SDHI_CLKHS", R9A09G077_SDHI_CLKHS, CLK_SEL_CLK_PLL2, 1, 1),
+ DEF_FIXED("USB_CLK", R9A09G077_USB_CLK, CLK_PLL4D1, 48, 1),
+ DEF_FIXED("ETCLKA", R9A09G077_ETCLKA, CLK_SEL_CLK_PLL1, 5, 1),
+ DEF_FIXED("ETCLKB", R9A09G077_ETCLKB, CLK_SEL_CLK_PLL1, 8, 1),
+ DEF_FIXED("ETCLKC", R9A09G077_ETCLKC, CLK_SEL_CLK_PLL1, 10, 1),
+ DEF_FIXED("ETCLKD", R9A09G077_ETCLKD, CLK_SEL_CLK_PLL1, 20, 1),
+ DEF_FIXED("ETCLKE", R9A09G077_ETCLKE, CLK_SEL_CLK_PLL1, 40, 1),
+};
+
+static const struct mssr_mod_clk r9a09g077_mod_clks[] __initconst = {
+ DEF_MOD("sci0fck", 8, CLK_SCI0ASYNC),
+ DEF_MOD("sci1fck", 9, CLK_SCI1ASYNC),
+ DEF_MOD("sci2fck", 10, CLK_SCI2ASYNC),
+ DEF_MOD("sci3fck", 11, CLK_SCI3ASYNC),
+ DEF_MOD("sci4fck", 12, CLK_SCI4ASYNC),
+ DEF_MOD("iic0", 100, R9A09G077_CLK_PCLKL),
+ DEF_MOD("iic1", 101, R9A09G077_CLK_PCLKL),
+ DEF_MOD("spi0", 104, CLK_SPI0ASYNC),
+ DEF_MOD("spi1", 105, CLK_SPI1ASYNC),
+ DEF_MOD("spi2", 106, CLK_SPI2ASYNC),
+ DEF_MOD("adc0", 206, R9A09G077_CLK_PCLKH),
+ DEF_MOD("adc1", 207, R9A09G077_CLK_PCLKH),
+ DEF_MOD("adc2", 225, R9A09G077_CLK_PCLKM),
+ DEF_MOD("tsu", 307, R9A09G077_CLK_PCLKL),
+ DEF_MOD("gmac0", 400, R9A09G077_CLK_PCLKM),
+ DEF_MOD("ethsw", 401, R9A09G077_CLK_PCLKM),
+ DEF_MOD("ethss", 403, R9A09G077_CLK_PCLKM),
+ DEF_MOD("usb", 408, R9A09G077_CLK_PCLKAM),
+ DEF_MOD("gmac1", 416, R9A09G077_CLK_PCLKAM),
+ DEF_MOD("gmac2", 417, R9A09G077_CLK_PCLKAM),
+ DEF_MOD("sci5fck", 600, CLK_SCI5ASYNC),
+ DEF_MOD("iic2", 601, R9A09G077_CLK_PCLKL),
+ DEF_MOD("spi3", 602, CLK_SPI3ASYNC),
+ DEF_MOD("sdhi0", 1212, R9A09G077_CLK_PCLKAM),
+ DEF_MOD("sdhi1", 1213, R9A09G077_CLK_PCLKAM),
+};
+
+static struct clk * __init
+r9a09g077_cpg_div_clk_register(struct device *dev,
+ const struct cpg_core_clk *core,
+ void __iomem *addr, struct cpg_mssr_pub *pub)
+{
+ const struct clk *parent;
+ const char *parent_name;
+ struct clk_hw *clk_hw;
+
+ parent = pub->clks[core->parent];
+ if (IS_ERR(parent))
+ return ERR_CAST(parent);
+
+ parent_name = __clk_get_name(parent);
+
+ if (core->dtable)
+ clk_hw = devm_clk_hw_register_divider_table(dev, core->name,
+ parent_name,
+ CLK_SET_RATE_PARENT,
+ addr,
+ GET_SHIFT(core->conf),
+ GET_WIDTH(core->conf),
+ core->flag,
+ core->dtable,
+ &pub->rmw_lock);
+ else
+ clk_hw = devm_clk_hw_register_divider(dev, core->name,
+ parent_name,
+ CLK_SET_RATE_PARENT,
+ addr,
+ GET_SHIFT(core->conf),
+ GET_WIDTH(core->conf),
+ core->flag, &pub->rmw_lock);
+
+ if (IS_ERR(clk_hw))
+ return ERR_CAST(clk_hw);
+
+ return clk_hw->clk;
+}
+
+static struct clk * __init
+r9a09g077_cpg_mux_clk_register(struct device *dev,
+ const struct cpg_core_clk *core,
+ void __iomem *addr, struct cpg_mssr_pub *pub)
+{
+ struct clk_hw *clk_hw;
+
+ clk_hw = devm_clk_hw_register_mux(dev, core->name,
+ core->parent_names, core->num_parents,
+ core->flag,
+ addr,
+ GET_SHIFT(core->conf),
+ GET_WIDTH(core->conf),
+ core->mux_flags, &pub->rmw_lock);
+ if (IS_ERR(clk_hw))
+ return ERR_CAST(clk_hw);
+
+ return clk_hw->clk;
+}
+
+static struct clk * __init
+r9a09g077_cpg_clk_register(struct device *dev, const struct cpg_core_clk *core,
+ const struct cpg_mssr_info *info,
+ struct cpg_mssr_pub *pub)
+{
+ u32 offset = GET_REG_OFFSET(core->conf);
+ void __iomem *base = RZT2H_REG_BLOCK(offset) ? pub->base1 : pub->base0;
+ void __iomem *addr = base + RZT2H_REG_OFFSET(offset);
+
+ switch (core->type) {
+ case CLK_TYPE_RZT2H_DIV:
+ return r9a09g077_cpg_div_clk_register(dev, core, addr, pub);
+ case CLK_TYPE_RZT2H_MUX:
+ return r9a09g077_cpg_mux_clk_register(dev, core, addr, pub);
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+}
+
+const struct cpg_mssr_info r9a09g077_cpg_mssr_info = {
+ /* Core Clocks */
+ .core_clks = r9a09g077_core_clks,
+ .num_core_clks = ARRAY_SIZE(r9a09g077_core_clks),
+ .last_dt_core_clk = LAST_DT_CORE_CLK,
+ .num_total_core_clks = MOD_CLK_BASE,
+
+ /* Module Clocks */
+ .mod_clks = r9a09g077_mod_clks,
+ .num_mod_clks = ARRAY_SIZE(r9a09g077_mod_clks),
+ .num_hw_mod_clks = 14 * 32,
+
+ .reg_layout = CLK_REG_LAYOUT_RZ_T2H,
+ .cpg_clk_register = r9a09g077_cpg_clk_register,
+};
diff --git a/drivers/clk/renesas/rcar-cpg-lib.c b/drivers/clk/renesas/rcar-cpg-lib.c
index a45f8e7e9ab6..7b271de7037a 100644
--- a/drivers/clk/renesas/rcar-cpg-lib.c
+++ b/drivers/clk/renesas/rcar-cpg-lib.c
@@ -35,7 +35,7 @@ void cpg_reg_modify(void __iomem *reg, u32 clear, u32 set)
val |= set;
writel(val, reg);
spin_unlock_irqrestore(&cpg_lock, flags);
-};
+}
static int cpg_simple_notifier_call(struct notifier_block *nb,
unsigned long action, void *data)
diff --git a/drivers/clk/renesas/rcar-gen2-cpg.c b/drivers/clk/renesas/rcar-gen2-cpg.c
index 4c3764972bad..ab34bb8c3e07 100644
--- a/drivers/clk/renesas/rcar-gen2-cpg.c
+++ b/drivers/clk/renesas/rcar-gen2-cpg.c
@@ -274,10 +274,11 @@ static const struct soc_device_attribute cpg_quirks_match[] __initconst = {
struct clk * __init rcar_gen2_cpg_clk_register(struct device *dev,
const struct cpg_core_clk *core, const struct cpg_mssr_info *info,
- struct clk **clks, void __iomem *base,
- struct raw_notifier_head *notifiers)
+ struct cpg_mssr_pub *pub)
{
const struct clk_div_table *table = NULL;
+ void __iomem *base = pub->base0;
+ struct clk **clks = pub->clks;
const struct clk *parent;
const char *parent_name;
unsigned int mult = 1;
diff --git a/drivers/clk/renesas/rcar-gen2-cpg.h b/drivers/clk/renesas/rcar-gen2-cpg.h
index bdcd4a38d48d..3d4b127fdeaf 100644
--- a/drivers/clk/renesas/rcar-gen2-cpg.h
+++ b/drivers/clk/renesas/rcar-gen2-cpg.h
@@ -32,8 +32,7 @@ struct rcar_gen2_cpg_pll_config {
struct clk *rcar_gen2_cpg_clk_register(struct device *dev,
const struct cpg_core_clk *core, const struct cpg_mssr_info *info,
- struct clk **clks, void __iomem *base,
- struct raw_notifier_head *notifiers);
+ struct cpg_mssr_pub *pub);
int rcar_gen2_cpg_init(const struct rcar_gen2_cpg_pll_config *config,
unsigned int pll0_div, u32 mode);
diff --git a/drivers/clk/renesas/rcar-gen3-cpg.c b/drivers/clk/renesas/rcar-gen3-cpg.c
index 027100e84ee4..b954278ddd9d 100644
--- a/drivers/clk/renesas/rcar-gen3-cpg.c
+++ b/drivers/clk/renesas/rcar-gen3-cpg.c
@@ -54,10 +54,8 @@ static unsigned long cpg_pll_clk_recalc_rate(struct clk_hw *hw,
{
struct cpg_pll_clk *pll_clk = to_pll_clk(hw);
unsigned int mult;
- u32 val;
- val = readl(pll_clk->pllcr_reg) & CPG_PLLnCR_STC_MASK;
- mult = (val >> __ffs(CPG_PLLnCR_STC_MASK)) + 1;
+ mult = FIELD_GET(CPG_PLLnCR_STC_MASK, readl(pll_clk->pllcr_reg)) + 1;
return parent_rate * mult * pll_clk->fixed_mult;
}
@@ -94,7 +92,7 @@ static int cpg_pll_clk_set_rate(struct clk_hw *hw, unsigned long rate,
val = readl(pll_clk->pllcr_reg);
val &= ~CPG_PLLnCR_STC_MASK;
- val |= (mult - 1) << __ffs(CPG_PLLnCR_STC_MASK);
+ val |= FIELD_PREP(CPG_PLLnCR_STC_MASK, mult - 1);
writel(val, pll_clk->pllcr_reg);
for (i = 1000; i; i--) {
@@ -176,11 +174,7 @@ static unsigned long cpg_z_clk_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct cpg_z_clk *zclk = to_z_clk(hw);
- unsigned int mult;
- u32 val;
-
- val = readl(zclk->reg) & zclk->mask;
- mult = 32 - (val >> __ffs(zclk->mask));
+ unsigned int mult = 32 - field_get(zclk->mask, readl(zclk->reg));
return DIV_ROUND_CLOSEST_ULL((u64)parent_rate * mult,
32 * zclk->fixed_div);
@@ -231,7 +225,8 @@ static int cpg_z_clk_set_rate(struct clk_hw *hw, unsigned long rate,
if (readl(zclk->kick_reg) & CPG_FRQCRB_KICK)
return -EBUSY;
- cpg_reg_modify(zclk->reg, zclk->mask, (32 - mult) << __ffs(zclk->mask));
+ cpg_reg_modify(zclk->reg, zclk->mask,
+ field_prep(zclk->mask, 32 - mult));
/*
* Set KICK bit in FRQCRB to update hardware setting and wait for
@@ -345,9 +340,11 @@ static const struct soc_device_attribute cpg_quirks_match[] __initconst = {
struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev,
const struct cpg_core_clk *core, const struct cpg_mssr_info *info,
- struct clk **clks, void __iomem *base,
- struct raw_notifier_head *notifiers)
+ struct cpg_mssr_pub *pub)
{
+ struct raw_notifier_head *notifiers = &pub->notifiers;
+ void __iomem *base = pub->base0;
+ struct clk **clks = pub->clks;
const struct clk *parent;
unsigned int mult = 1;
unsigned int div = 1;
diff --git a/drivers/clk/renesas/rcar-gen3-cpg.h b/drivers/clk/renesas/rcar-gen3-cpg.h
index bfdc649bdf12..d15a5d1df71c 100644
--- a/drivers/clk/renesas/rcar-gen3-cpg.h
+++ b/drivers/clk/renesas/rcar-gen3-cpg.h
@@ -81,8 +81,7 @@ struct rcar_gen3_cpg_pll_config {
struct clk *rcar_gen3_cpg_clk_register(struct device *dev,
const struct cpg_core_clk *core, const struct cpg_mssr_info *info,
- struct clk **clks, void __iomem *base,
- struct raw_notifier_head *notifiers);
+ struct cpg_mssr_pub *pub);
int rcar_gen3_cpg_init(const struct rcar_gen3_cpg_pll_config *config,
unsigned int clk_extalr, u32 mode);
diff --git a/drivers/clk/renesas/rcar-gen4-cpg.c b/drivers/clk/renesas/rcar-gen4-cpg.c
index 31aa790fd003..ac2b5afec46d 100644
--- a/drivers/clk/renesas/rcar-gen4-cpg.c
+++ b/drivers/clk/renesas/rcar-gen4-cpg.c
@@ -257,7 +257,7 @@ static struct clk * __init cpg_pll_clk_register(const char *name,
}
/*
- * Z0 Clock & Z1 Clock
+ * Z0, Z1 and ZG Clock
*/
#define CPG_FRQCRB 0x00000804
#define CPG_FRQCRB_KICK BIT(31)
@@ -279,11 +279,7 @@ static unsigned long cpg_z_clk_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct cpg_z_clk *zclk = to_z_clk(hw);
- unsigned int mult;
- u32 val;
-
- val = readl(zclk->reg) & zclk->mask;
- mult = 32 - (val >> __ffs(zclk->mask));
+ unsigned int mult = 32 - field_get(zclk->mask, readl(zclk->reg));
return DIV_ROUND_CLOSEST_ULL((u64)parent_rate * mult,
32 * zclk->fixed_div);
@@ -334,7 +330,8 @@ static int cpg_z_clk_set_rate(struct clk_hw *hw, unsigned long rate,
if (readl(zclk->kick_reg) & CPG_FRQCRB_KICK)
return -EBUSY;
- cpg_reg_modify(zclk->reg, zclk->mask, (32 - mult) << __ffs(zclk->mask));
+ cpg_reg_modify(zclk->reg, zclk->mask,
+ field_prep(zclk->mask, 32 - mult));
/*
* Set KICK bit in FRQCRB to update hardware setting and wait for
@@ -389,9 +386,14 @@ static struct clk * __init cpg_z_clk_register(const char *name,
if (offset < 32) {
zclk->reg = reg + CPG_FRQCRC0;
- } else {
+ } else if (offset < 64) {
zclk->reg = reg + CPG_FRQCRC1;
offset -= 32;
+ } else if (offset < 96) {
+ zclk->reg = reg + CPG_FRQCRB;
+ offset -= 64;
+ } else {
+ return ERR_PTR(-EINVAL);
}
zclk->kick_reg = reg + CPG_FRQCRB;
zclk->hw.init = &init;
@@ -418,9 +420,11 @@ static const struct clk_div_table cpg_rpcsrc_div_table[] = {
struct clk * __init rcar_gen4_cpg_clk_register(struct device *dev,
const struct cpg_core_clk *core, const struct cpg_mssr_info *info,
- struct clk **clks, void __iomem *base,
- struct raw_notifier_head *notifiers)
+ struct cpg_mssr_pub *pub)
{
+ struct raw_notifier_head *notifiers = &pub->notifiers;
+ void __iomem *base = pub->base0;
+ struct clk **clks = pub->clks;
const struct clk *parent;
unsigned int mult = 1;
unsigned int div = 1;
diff --git a/drivers/clk/renesas/rcar-gen4-cpg.h b/drivers/clk/renesas/rcar-gen4-cpg.h
index 717fd148464f..6c8280b37c37 100644
--- a/drivers/clk/renesas/rcar-gen4-cpg.h
+++ b/drivers/clk/renesas/rcar-gen4-cpg.h
@@ -78,8 +78,7 @@ struct rcar_gen4_cpg_pll_config {
struct clk *rcar_gen4_cpg_clk_register(struct device *dev,
const struct cpg_core_clk *core, const struct cpg_mssr_info *info,
- struct clk **clks, void __iomem *base,
- struct raw_notifier_head *notifiers);
+ struct cpg_mssr_pub *pub);
int rcar_gen4_cpg_init(const struct rcar_gen4_cpg_pll_config *config,
unsigned int clk_extalr, u32 mode);
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
index 71431970d6e6..7f9b7aa39790 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.c
+++ b/drivers/clk/renesas/renesas-cpg-mssr.c
@@ -40,8 +40,10 @@
#define WARN_DEBUG(x) do { } while (0)
#endif
+#define RZT2H_RESET_REG_READ_COUNT 7
+
/*
- * Module Standby and Software Reset register offets.
+ * Module Standby and Software Reset register offsets.
*
* If the registers exist, these are valid for SH-Mobile, R-Mobile,
* R-Car Gen2, R-Car Gen3, and RZ/G1.
@@ -81,6 +83,37 @@ static const u16 mstpcr_for_gen4[] = {
};
/*
+ * Module Stop Control Register (RZ/T2H)
+ * RZ/T2H has 2 registers blocks,
+ * Bit 12 is used to differentiate them
+ */
+
+#define RZT2H_MSTPCR_BLOCK_SHIFT 12
+#define RZT2H_MSTPCR_OFFSET_MASK GENMASK(11, 0)
+#define RZT2H_MSTPCR(block, offset) (((block) << RZT2H_MSTPCR_BLOCK_SHIFT) | \
+ ((offset) & RZT2H_MSTPCR_OFFSET_MASK))
+
+#define RZT2H_MSTPCR_BLOCK(x) ((x) >> RZT2H_MSTPCR_BLOCK_SHIFT)
+#define RZT2H_MSTPCR_OFFSET(x) ((x) & RZT2H_MSTPCR_OFFSET_MASK)
+
+static const u16 mstpcr_for_rzt2h[] = {
+ RZT2H_MSTPCR(0, 0x300), /* MSTPCRA */
+ RZT2H_MSTPCR(0, 0x304), /* MSTPCRB */
+ RZT2H_MSTPCR(0, 0x308), /* MSTPCRC */
+ RZT2H_MSTPCR(0, 0x30c), /* MSTPCRD */
+ RZT2H_MSTPCR(0, 0x310), /* MSTPCRE */
+ 0,
+ RZT2H_MSTPCR(1, 0x318), /* MSTPCRG */
+ 0,
+ RZT2H_MSTPCR(1, 0x320), /* MSTPCRI */
+ RZT2H_MSTPCR(0, 0x324), /* MSTPCRJ */
+ RZT2H_MSTPCR(0, 0x328), /* MSTPCRK */
+ RZT2H_MSTPCR(0, 0x32c), /* MSTPCRL */
+ RZT2H_MSTPCR(0, 0x330), /* MSTPCRM */
+ RZT2H_MSTPCR(1, 0x334), /* MSTPCRN */
+};
+
+/*
* Standby Control Register offsets (RZ/A)
* Base address is FRQCR register
*/
@@ -106,6 +139,22 @@ static const u16 srcr_for_gen4[] = {
0x2C60, 0x2C64, 0x2C68, 0x2C6C, 0x2C70, 0x2C74,
};
+static const u16 mrcr_for_rzt2h[] = {
+ 0x240, /* MRCTLA */
+ 0x244, /* Reserved */
+ 0x248, /* Reserved */
+ 0x24C, /* Reserved */
+ 0x250, /* MRCTLE */
+ 0x254, /* Reserved */
+ 0x258, /* Reserved */
+ 0x25C, /* Reserved */
+ 0x260, /* MRCTLI */
+ 0x264, /* Reserved */
+ 0x268, /* Reserved */
+ 0x26C, /* Reserved */
+ 0x270, /* MRCTLM */
+};
+
/*
* Software Reset Clearing Register offsets
*/
@@ -126,16 +175,14 @@ static const u16 srstclr_for_gen4[] = {
* struct cpg_mssr_priv - Clock Pulse Generator / Module Standby
* and Software Reset Private Data
*
+ * @pub: Data passed to clock registration callback
* @rcdev: Optional reset controller entity
* @dev: CPG/MSSR device
- * @base: CPG/MSSR register block base address
* @reg_layout: CPG/MSSR register layout
- * @rmw_lock: protects RMW register accesses
* @np: Device node in DT for this CPG/MSSR module
* @num_core_clks: Number of Core Clocks in clks[]
* @num_mod_clks: Number of Module Clocks in clks[]
* @last_dt_core_clk: ID of the last Core Clock exported to DT
- * @notifiers: Notifier chain to save/restore clock state for system resume
* @status_regs: Pointer to status registers array
* @control_regs: Pointer to control registers array
* @reset_regs: Pointer to reset registers array
@@ -147,20 +194,18 @@ static const u16 srstclr_for_gen4[] = {
* @clks: Array containing all Core and Module Clocks
*/
struct cpg_mssr_priv {
+ struct cpg_mssr_pub pub;
#ifdef CONFIG_RESET_CONTROLLER
struct reset_controller_dev rcdev;
#endif
struct device *dev;
- void __iomem *base;
enum clk_reg_layout reg_layout;
- spinlock_t rmw_lock;
struct device_node *np;
unsigned int num_core_clks;
unsigned int num_mod_clks;
unsigned int last_dt_core_clk;
- struct raw_notifier_head notifiers;
const u16 *status_regs;
const u16 *control_regs;
const u16 *reset_regs;
@@ -192,6 +237,26 @@ struct mstp_clock {
#define to_mstp_clock(_hw) container_of(_hw, struct mstp_clock, hw)
+static u32 cpg_rzt2h_mstp_read(struct clk_hw *hw, u16 offset)
+{
+ struct mstp_clock *clock = to_mstp_clock(hw);
+ struct cpg_mssr_priv *priv = clock->priv;
+ void __iomem *base =
+ RZT2H_MSTPCR_BLOCK(offset) ? priv->pub.base1 : priv->pub.base0;
+
+ return readl(base + RZT2H_MSTPCR_OFFSET(offset));
+}
+
+static void cpg_rzt2h_mstp_write(struct clk_hw *hw, u16 offset, u32 value)
+{
+ struct mstp_clock *clock = to_mstp_clock(hw);
+ struct cpg_mssr_priv *priv = clock->priv;
+ void __iomem *base =
+ RZT2H_MSTPCR_BLOCK(offset) ? priv->pub.base1 : priv->pub.base0;
+
+ writel(value, base + RZT2H_MSTPCR_OFFSET(offset));
+}
+
static int cpg_mstp_clock_endisable(struct clk_hw *hw, bool enable)
{
struct mstp_clock *clock = to_mstp_clock(hw);
@@ -206,38 +271,63 @@ static int cpg_mstp_clock_endisable(struct clk_hw *hw, bool enable)
dev_dbg(dev, "MSTP %u%02u/%pC %s\n", reg, bit, hw->clk,
str_on_off(enable));
- spin_lock_irqsave(&priv->rmw_lock, flags);
+ spin_lock_irqsave(&priv->pub.rmw_lock, flags);
if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A) {
- value = readb(priv->base + priv->control_regs[reg]);
+ value = readb(priv->pub.base0 + priv->control_regs[reg]);
if (enable)
value &= ~bitmask;
else
value |= bitmask;
- writeb(value, priv->base + priv->control_regs[reg]);
+ writeb(value, priv->pub.base0 + priv->control_regs[reg]);
/* dummy read to ensure write has completed */
- readb(priv->base + priv->control_regs[reg]);
- barrier_data(priv->base + priv->control_regs[reg]);
+ readb(priv->pub.base0 + priv->control_regs[reg]);
+ barrier_data(priv->pub.base0 + priv->control_regs[reg]);
+
+ } else if (priv->reg_layout == CLK_REG_LAYOUT_RZ_T2H) {
+ value = cpg_rzt2h_mstp_read(hw,
+ priv->control_regs[reg]);
+
+ if (enable)
+ value &= ~bitmask;
+ else
+ value |= bitmask;
+
+ cpg_rzt2h_mstp_write(hw,
+ priv->control_regs[reg],
+ value);
} else {
- value = readl(priv->base + priv->control_regs[reg]);
+ value = readl(priv->pub.base0 + priv->control_regs[reg]);
if (enable)
value &= ~bitmask;
else
value |= bitmask;
- writel(value, priv->base + priv->control_regs[reg]);
+ writel(value, priv->pub.base0 + priv->control_regs[reg]);
}
- spin_unlock_irqrestore(&priv->rmw_lock, flags);
+ spin_unlock_irqrestore(&priv->pub.rmw_lock, flags);
if (!enable || priv->reg_layout == CLK_REG_LAYOUT_RZ_A)
return 0;
- error = readl_poll_timeout_atomic(priv->base + priv->status_regs[reg],
+ if (priv->reg_layout == CLK_REG_LAYOUT_RZ_T2H) {
+ /*
+ * For the RZ/T2H case, it is necessary to perform a read-back after
+ * accessing the MSTPCRm register and to dummy-read any register of
+ * the IP at least seven times. Instead of memory-mapping the IP
+ * register, we simply add a delay after the read operation.
+ */
+ cpg_rzt2h_mstp_read(hw, priv->control_regs[reg]);
+ udelay(10);
+ return 0;
+ }
+
+ error = readl_poll_timeout_atomic(priv->pub.base0 + priv->status_regs[reg],
value, !(value & bitmask), 0, 10);
if (error)
dev_err(dev, "Failed to enable SMSTP %p[%d]\n",
- priv->base + priv->control_regs[reg], bit);
+ priv->pub.base0 + priv->control_regs[reg], bit);
return error;
}
@@ -256,12 +346,16 @@ static int cpg_mstp_clock_is_enabled(struct clk_hw *hw)
{
struct mstp_clock *clock = to_mstp_clock(hw);
struct cpg_mssr_priv *priv = clock->priv;
+ unsigned int reg = clock->index / 32;
u32 value;
if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A)
- value = readb(priv->base + priv->control_regs[clock->index / 32]);
+ value = readb(priv->pub.base0 + priv->control_regs[reg]);
+ else if (priv->reg_layout == CLK_REG_LAYOUT_RZ_T2H)
+ value = cpg_rzt2h_mstp_read(hw,
+ priv->control_regs[reg]);
else
- value = readl(priv->base + priv->status_regs[clock->index / 32]);
+ value = readl(priv->pub.base0 + priv->status_regs[reg]);
return !(value & BIT(clock->index % 32));
}
@@ -348,7 +442,7 @@ static void __init cpg_mssr_register_core_clk(const struct cpg_core_clk *core,
case CLK_TYPE_DIV6P1:
case CLK_TYPE_DIV6_RO:
WARN_DEBUG(core->parent >= priv->num_core_clks);
- parent = priv->clks[core->parent];
+ parent = priv->pub.clks[core->parent];
if (IS_ERR(parent)) {
clk = parent;
goto fail;
@@ -358,12 +452,12 @@ static void __init cpg_mssr_register_core_clk(const struct cpg_core_clk *core,
if (core->type == CLK_TYPE_DIV6_RO)
/* Multiply with the DIV6 register value */
- div *= (readl(priv->base + core->offset) & 0x3f) + 1;
+ div *= (readl(priv->pub.base0 + core->offset) & 0x3f) + 1;
if (core->type == CLK_TYPE_DIV6P1) {
clk = cpg_div6_register(core->name, 1, &parent_name,
- priv->base + core->offset,
- &priv->notifiers);
+ priv->pub.base0 + core->offset,
+ &priv->pub.notifiers);
} else {
clk = clk_register_fixed_factor(NULL, core->name,
parent_name, 0,
@@ -379,19 +473,18 @@ static void __init cpg_mssr_register_core_clk(const struct cpg_core_clk *core,
default:
if (info->cpg_clk_register)
clk = info->cpg_clk_register(dev, core, info,
- priv->clks, priv->base,
- &priv->notifiers);
+ &priv->pub);
else
dev_err(dev, "%s has unsupported core clock type %u\n",
core->name, core->type);
break;
}
- if (IS_ERR_OR_NULL(clk))
+ if (IS_ERR(clk))
goto fail;
dev_dbg(dev, "Core clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
- priv->clks[id] = clk;
+ priv->pub.clks[id] = clk;
return;
fail:
@@ -414,14 +507,14 @@ static void __init cpg_mssr_register_mod_clk(const struct mssr_mod_clk *mod,
WARN_DEBUG(id < priv->num_core_clks);
WARN_DEBUG(id >= priv->num_core_clks + priv->num_mod_clks);
WARN_DEBUG(mod->parent >= priv->num_core_clks + priv->num_mod_clks);
- WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
+ WARN_DEBUG(PTR_ERR(priv->pub.clks[id]) != -ENOENT);
if (!mod->name) {
/* Skip NULLified clock */
return;
}
- parent = priv->clks[mod->parent];
+ parent = priv->pub.clks[mod->parent];
if (IS_ERR(parent)) {
clk = parent;
goto fail;
@@ -612,64 +705,133 @@ static int __init cpg_mssr_add_clk_domain(struct device *dev,
#define rcdev_to_priv(x) container_of(x, struct cpg_mssr_priv, rcdev)
-static int cpg_mssr_reset(struct reset_controller_dev *rcdev,
- unsigned long id)
+static int cpg_mssr_reset_operate(struct reset_controller_dev *rcdev,
+ const char *func, bool set, unsigned long id)
{
struct cpg_mssr_priv *priv = rcdev_to_priv(rcdev);
unsigned int reg = id / 32;
unsigned int bit = id % 32;
+ const u16 off = set ? priv->reset_regs[reg] : priv->reset_clear_regs[reg];
u32 bitmask = BIT(bit);
- dev_dbg(priv->dev, "reset %u%02u\n", reg, bit);
+ if (func)
+ dev_dbg(priv->dev, "%s %u%02u\n", func, reg, bit);
+
+ writel(bitmask, priv->pub.base0 + off);
+ readl(priv->pub.base0 + off);
+ barrier_data(priv->pub.base0 + off);
+
+ return 0;
+}
+
+static int cpg_mssr_reset(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct cpg_mssr_priv *priv = rcdev_to_priv(rcdev);
/* Reset module */
- writel(bitmask, priv->base + priv->reset_regs[reg]);
+ cpg_mssr_reset_operate(rcdev, "reset", true, id);
- /* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
- udelay(35);
+ /*
+ * On R-Car Gen4, delay after SRCR has been written is 1ms.
+ * On older SoCs, delay after SRCR has been written is 35us
+ * (one cycle of the RCLK clock @ ca. 32 kHz).
+ */
+ if (priv->reg_layout == CLK_REG_LAYOUT_RCAR_GEN4)
+ usleep_range(1000, 2000);
+ else
+ usleep_range(35, 1000);
/* Release module from reset state */
- writel(bitmask, priv->base + priv->reset_clear_regs[reg]);
-
- return 0;
+ return cpg_mssr_reset_operate(rcdev, NULL, false, id);
}
static int cpg_mssr_assert(struct reset_controller_dev *rcdev, unsigned long id)
{
+ return cpg_mssr_reset_operate(rcdev, "assert", true, id);
+}
+
+static int cpg_mssr_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return cpg_mssr_reset_operate(rcdev, "deassert", false, id);
+}
+
+static int cpg_mssr_status(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
struct cpg_mssr_priv *priv = rcdev_to_priv(rcdev);
unsigned int reg = id / 32;
unsigned int bit = id % 32;
u32 bitmask = BIT(bit);
- dev_dbg(priv->dev, "assert %u%02u\n", reg, bit);
-
- writel(bitmask, priv->base + priv->reset_regs[reg]);
- return 0;
+ return !!(readl(priv->pub.base0 + priv->reset_regs[reg]) & bitmask);
}
-static int cpg_mssr_deassert(struct reset_controller_dev *rcdev,
- unsigned long id)
+static int cpg_mrcr_set_reset_state(struct reset_controller_dev *rcdev,
+ unsigned long id, bool set)
{
struct cpg_mssr_priv *priv = rcdev_to_priv(rcdev);
unsigned int reg = id / 32;
unsigned int bit = id % 32;
u32 bitmask = BIT(bit);
+ void __iomem *reg_addr;
+ unsigned long flags;
+ unsigned int i;
+ u32 val;
+
+ dev_dbg(priv->dev, "%s %u%02u\n", set ? "assert" : "deassert", reg, bit);
- dev_dbg(priv->dev, "deassert %u%02u\n", reg, bit);
+ spin_lock_irqsave(&priv->pub.rmw_lock, flags);
+
+ reg_addr = priv->pub.base0 + priv->reset_regs[reg];
+ /* Read current value and modify */
+ val = readl(reg_addr);
+ if (set)
+ val |= bitmask;
+ else
+ val &= ~bitmask;
+ writel(val, reg_addr);
+
+ /*
+ * For secure processing after release from a module reset, one must
+ * perform multiple dummy reads of the same register.
+ */
+ for (i = 0; !set && i < RZT2H_RESET_REG_READ_COUNT; i++)
+ readl(reg_addr);
+
+ /* Verify the operation */
+ val = readl(reg_addr);
+ if (set == !(bitmask & val)) {
+ dev_err(priv->dev, "Reset register %u%02u operation failed\n", reg, bit);
+ spin_unlock_irqrestore(&priv->pub.rmw_lock, flags);
+ return -EIO;
+ }
+
+ spin_unlock_irqrestore(&priv->pub.rmw_lock, flags);
- writel(bitmask, priv->base + priv->reset_clear_regs[reg]);
return 0;
}
-static int cpg_mssr_status(struct reset_controller_dev *rcdev,
- unsigned long id)
+static int cpg_mrcr_reset(struct reset_controller_dev *rcdev, unsigned long id)
{
- struct cpg_mssr_priv *priv = rcdev_to_priv(rcdev);
- unsigned int reg = id / 32;
- unsigned int bit = id % 32;
- u32 bitmask = BIT(bit);
+ int ret;
- return !!(readl(priv->base + priv->reset_regs[reg]) & bitmask);
+ ret = cpg_mrcr_set_reset_state(rcdev, id, true);
+ if (ret)
+ return ret;
+
+ return cpg_mrcr_set_reset_state(rcdev, id, false);
+}
+
+static int cpg_mrcr_assert(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ return cpg_mrcr_set_reset_state(rcdev, id, true);
+}
+
+static int cpg_mrcr_deassert(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ return cpg_mrcr_set_reset_state(rcdev, id, false);
}
static const struct reset_control_ops cpg_mssr_reset_ops = {
@@ -679,6 +841,13 @@ static const struct reset_control_ops cpg_mssr_reset_ops = {
.status = cpg_mssr_status,
};
+static const struct reset_control_ops cpg_mrcr_reset_ops = {
+ .reset = cpg_mrcr_reset,
+ .assert = cpg_mrcr_assert,
+ .deassert = cpg_mrcr_deassert,
+ .status = cpg_mssr_status,
+};
+
static int cpg_mssr_reset_xlate(struct reset_controller_dev *rcdev,
const struct of_phandle_args *reset_spec)
{
@@ -696,11 +865,23 @@ static int cpg_mssr_reset_xlate(struct reset_controller_dev *rcdev,
static int cpg_mssr_reset_controller_register(struct cpg_mssr_priv *priv)
{
- priv->rcdev.ops = &cpg_mssr_reset_ops;
+ /*
+ * RZ/T2H (and family) has the Module Reset Control Registers
+ * which allows control resets of certain modules.
+ * The number of resets is not equal to the number of module clocks.
+ */
+ if (priv->reg_layout == CLK_REG_LAYOUT_RZ_T2H) {
+ priv->rcdev.ops = &cpg_mrcr_reset_ops;
+ priv->rcdev.nr_resets = ARRAY_SIZE(mrcr_for_rzt2h) * 32;
+ } else {
+ priv->rcdev.ops = &cpg_mssr_reset_ops;
+ priv->rcdev.nr_resets = priv->num_mod_clks;
+ }
+
priv->rcdev.of_node = priv->dev->of_node;
priv->rcdev.of_reset_n_cells = 1;
priv->rcdev.of_xlate = cpg_mssr_reset_xlate;
- priv->rcdev.nr_resets = priv->num_mod_clks;
+
return devm_reset_controller_register(priv->dev, &priv->rcdev);
}
@@ -872,6 +1053,18 @@ static const struct of_device_id cpg_mssr_match[] = {
.data = &r8a779h0_cpg_mssr_info,
},
#endif
+#ifdef CONFIG_CLK_R9A09G077
+ {
+ .compatible = "renesas,r9a09g077-cpg-mssr",
+ .data = &r9a09g077_cpg_mssr_info,
+ },
+#endif
+#ifdef CONFIG_CLK_R9A09G087
+ {
+ .compatible = "renesas,r9a09g087-cpg-mssr",
+ .data = &r9a09g077_cpg_mssr_info,
+ },
+#endif
{ /* sentinel */ }
};
@@ -895,12 +1088,12 @@ static int cpg_mssr_suspend_noirq(struct device *dev)
if (priv->smstpcr_saved[reg].mask)
priv->smstpcr_saved[reg].val =
priv->reg_layout == CLK_REG_LAYOUT_RZ_A ?
- readb(priv->base + priv->control_regs[reg]) :
- readl(priv->base + priv->control_regs[reg]);
+ readb(priv->pub.base0 + priv->control_regs[reg]) :
+ readl(priv->pub.base0 + priv->control_regs[reg]);
}
/* Save core clocks */
- raw_notifier_call_chain(&priv->notifiers, PM_EVENT_SUSPEND, NULL);
+ raw_notifier_call_chain(&priv->pub.notifiers, PM_EVENT_SUSPEND, NULL);
return 0;
}
@@ -917,7 +1110,7 @@ static int cpg_mssr_resume_noirq(struct device *dev)
return 0;
/* Restore core clocks */
- raw_notifier_call_chain(&priv->notifiers, PM_EVENT_RESUME, NULL);
+ raw_notifier_call_chain(&priv->pub.notifiers, PM_EVENT_RESUME, NULL);
/* Restore module clocks */
for (reg = 0; reg < ARRAY_SIZE(priv->smstpcr_saved); reg++) {
@@ -926,29 +1119,29 @@ static int cpg_mssr_resume_noirq(struct device *dev)
continue;
if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A)
- oldval = readb(priv->base + priv->control_regs[reg]);
+ oldval = readb(priv->pub.base0 + priv->control_regs[reg]);
else
- oldval = readl(priv->base + priv->control_regs[reg]);
+ oldval = readl(priv->pub.base0 + priv->control_regs[reg]);
newval = oldval & ~mask;
newval |= priv->smstpcr_saved[reg].val & mask;
if (newval == oldval)
continue;
if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A) {
- writeb(newval, priv->base + priv->control_regs[reg]);
+ writeb(newval, priv->pub.base0 + priv->control_regs[reg]);
/* dummy read to ensure write has completed */
- readb(priv->base + priv->control_regs[reg]);
- barrier_data(priv->base + priv->control_regs[reg]);
+ readb(priv->pub.base0 + priv->control_regs[reg]);
+ barrier_data(priv->pub.base0 + priv->control_regs[reg]);
continue;
} else
- writel(newval, priv->base + priv->control_regs[reg]);
+ writel(newval, priv->pub.base0 + priv->control_regs[reg]);
/* Wait until enabled clocks are really enabled */
mask &= ~priv->smstpcr_saved[reg].val;
if (!mask)
continue;
- error = readl_poll_timeout_atomic(priv->base + priv->status_regs[reg],
+ error = readl_poll_timeout_atomic(priv->pub.base0 + priv->status_regs[reg],
oldval, !(oldval & mask), 0, 10);
if (error)
dev_warn(dev, "Failed to enable SMSTP%u[0x%x]\n", reg,
@@ -1006,6 +1199,7 @@ static int __init cpg_mssr_reserved_init(struct cpg_mssr_priv *priv,
of_for_each_phandle(&it, rc, node, "clocks", "#clock-cells", -1) {
int idx;
+ unsigned int *new_ids;
if (it.node != priv->np)
continue;
@@ -1016,11 +1210,13 @@ static int __init cpg_mssr_reserved_init(struct cpg_mssr_priv *priv,
if (args[0] != CPG_MOD)
continue;
- ids = krealloc_array(ids, (num + 1), sizeof(*ids), GFP_KERNEL);
- if (!ids) {
+ new_ids = krealloc_array(ids, (num + 1), sizeof(*ids), GFP_KERNEL);
+ if (!new_ids) {
of_node_put(it.node);
+ kfree(ids);
return -ENOMEM;
}
+ ids = new_ids;
if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A)
idx = MOD_CLK_PACK_10(args[1]); /* for DEF_MOD_STB() */
@@ -1058,20 +1254,28 @@ static int __init cpg_mssr_common_init(struct device *dev,
if (!priv)
return -ENOMEM;
+ priv->pub.clks = priv->clks;
priv->np = np;
priv->dev = dev;
- spin_lock_init(&priv->rmw_lock);
+ spin_lock_init(&priv->pub.rmw_lock);
- priv->base = of_iomap(np, 0);
- if (!priv->base) {
+ priv->pub.base0 = of_iomap(np, 0);
+ if (!priv->pub.base0) {
error = -ENOMEM;
goto out_err;
}
+ if (info->reg_layout == CLK_REG_LAYOUT_RZ_T2H) {
+ priv->pub.base1 = of_iomap(np, 1);
+ if (!priv->pub.base1) {
+ error = -ENOMEM;
+ goto out_err;
+ }
+ }
priv->num_core_clks = info->num_total_core_clks;
priv->num_mod_clks = info->num_hw_mod_clks;
priv->last_dt_core_clk = info->last_dt_core_clk;
- RAW_INIT_NOTIFIER_HEAD(&priv->notifiers);
+ RAW_INIT_NOTIFIER_HEAD(&priv->pub.notifiers);
priv->reg_layout = info->reg_layout;
if (priv->reg_layout == CLK_REG_LAYOUT_RCAR_GEN2_AND_GEN3) {
priv->status_regs = mstpsr;
@@ -1080,6 +1284,9 @@ static int __init cpg_mssr_common_init(struct device *dev,
priv->reset_clear_regs = srstclr;
} else if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A) {
priv->control_regs = stbcr;
+ } else if (priv->reg_layout == CLK_REG_LAYOUT_RZ_T2H) {
+ priv->control_regs = mstpcr_for_rzt2h;
+ priv->reset_regs = mrcr_for_rzt2h;
} else if (priv->reg_layout == CLK_REG_LAYOUT_RCAR_GEN4) {
priv->status_regs = mstpsr_for_gen4;
priv->control_regs = mstpcr_for_gen4;
@@ -1091,7 +1298,7 @@ static int __init cpg_mssr_common_init(struct device *dev,
}
for (i = 0; i < nclks; i++)
- priv->clks[i] = ERR_PTR(-ENOENT);
+ priv->pub.clks[i] = ERR_PTR(-ENOENT);
error = cpg_mssr_reserved_init(priv, info);
if (error)
@@ -1108,8 +1315,10 @@ static int __init cpg_mssr_common_init(struct device *dev,
reserve_err:
cpg_mssr_reserved_exit(priv);
out_err:
- if (priv->base)
- iounmap(priv->base);
+ if (priv->pub.base0)
+ iounmap(priv->pub.base0);
+ if (priv->pub.base1)
+ iounmap(priv->pub.base1);
kfree(priv);
return error;
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.h b/drivers/clk/renesas/renesas-cpg-mssr.h
index a1d6e0cbcff9..ad11ab5f0069 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.h
+++ b/drivers/clk/renesas/renesas-cpg-mssr.h
@@ -8,6 +8,8 @@
#ifndef __CLK_RENESAS_CPG_MSSR_H__
#define __CLK_RENESAS_CPG_MSSR_H__
+#include <linux/notifier.h>
+
/*
* Definitions of CPG Core Clocks
*
@@ -27,6 +29,31 @@ struct cpg_core_clk {
unsigned int div;
unsigned int mult;
unsigned int offset;
+ union {
+ const char * const *parent_names;
+ const struct clk_div_table *dtable;
+ };
+ u32 conf;
+ u16 flag;
+ u8 mux_flags;
+ u8 num_parents;
+};
+
+/**
+ * struct cpg_mssr_pub - data shared with device-specific clk registration code
+ *
+ * @base0: CPG/MSSR register block base0 address
+ * @base1: CPG/MSSR register block base1 address
+ * @notifiers: Notifier chain to save/restore clock state for system resume
+ * @rmw_lock: protects RMW register accesses
+ * @clks: pointer to clocks
+ */
+struct cpg_mssr_pub {
+ void __iomem *base0;
+ void __iomem *base1;
+ struct raw_notifier_head notifiers;
+ spinlock_t rmw_lock;
+ struct clk **clks;
};
enum clk_types {
@@ -89,6 +116,7 @@ enum clk_reg_layout {
CLK_REG_LAYOUT_RCAR_GEN2_AND_GEN3 = 0,
CLK_REG_LAYOUT_RZ_A,
CLK_REG_LAYOUT_RCAR_GEN4,
+ CLK_REG_LAYOUT_RZ_T2H,
};
/**
@@ -153,8 +181,7 @@ struct cpg_mssr_info {
struct clk *(*cpg_clk_register)(struct device *dev,
const struct cpg_core_clk *core,
const struct cpg_mssr_info *info,
- struct clk **clks, void __iomem *base,
- struct raw_notifier_head *notifiers);
+ struct cpg_mssr_pub *pub);
};
extern const struct cpg_mssr_info r7s9210_cpg_mssr_info;
@@ -181,6 +208,7 @@ extern const struct cpg_mssr_info r8a779a0_cpg_mssr_info;
extern const struct cpg_mssr_info r8a779f0_cpg_mssr_info;
extern const struct cpg_mssr_info r8a779g0_cpg_mssr_info;
extern const struct cpg_mssr_info r8a779h0_cpg_mssr_info;
+extern const struct cpg_mssr_info r9a09g077_cpg_mssr_info;
void __init cpg_mssr_early_init(struct device_node *np,
const struct cpg_mssr_info *info);
diff --git a/drivers/clk/renesas/rzg2l-cpg.c b/drivers/clk/renesas/rzg2l-cpg.c
index a8628f64a03b..64d1ef6e4c94 100644
--- a/drivers/clk/renesas/rzg2l-cpg.c
+++ b/drivers/clk/renesas/rzg2l-cpg.c
@@ -11,10 +11,13 @@
* Copyright (C) 2015 Renesas Electronics Corp.
*/
+#include <linux/atomic.h>
#include <linux/bitfield.h>
+#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/clk/renesas.h>
+#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/init.h>
@@ -68,6 +71,9 @@
#define MAX_VCLK_FREQ (148500000)
+#define MSTOP_OFF(conf) FIELD_GET(GENMASK(31, 16), (conf))
+#define MSTOP_MASK(conf) FIELD_GET(GENMASK(15, 0), (conf))
+
/**
* struct clk_hw_data - clock hardware data
* @hw: clock hw
@@ -142,6 +148,7 @@ struct rzg2l_pll5_mux_dsi_div_param {
* @num_resets: Number of Module Resets in info->resets[]
* @last_dt_core_clk: ID of the last Core Clock exported to DT
* @info: Pointer to platform data
+ * @genpd: PM domain
* @mux_dsi_div_params: pll5 mux and dsi div parameters
*/
struct rzg2l_cpg_priv {
@@ -158,6 +165,8 @@ struct rzg2l_cpg_priv {
const struct rzg2l_cpg_info *info;
+ struct generic_pm_domain genpd;
+
struct rzg2l_pll5_mux_dsi_div_param mux_dsi_div_params;
};
@@ -815,11 +824,10 @@ static unsigned long rzg2l_cpg_sipll5_recalc_rate(struct clk_hw *hw,
return pll5_rate;
}
-static long rzg2l_cpg_sipll5_round_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long *parent_rate)
+static int rzg2l_cpg_sipll5_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- return rate;
+ return 0;
}
static int rzg2l_cpg_sipll5_set_rate(struct clk_hw *hw,
@@ -893,7 +901,7 @@ static int rzg2l_cpg_sipll5_set_rate(struct clk_hw *hw,
static const struct clk_ops rzg2l_cpg_sipll5_ops = {
.recalc_rate = rzg2l_cpg_sipll5_recalc_rate,
- .round_rate = rzg2l_cpg_sipll5_round_rate,
+ .determine_rate = rzg2l_cpg_sipll5_determine_rate,
.set_rate = rzg2l_cpg_sipll5_set_rate,
};
@@ -1169,7 +1177,7 @@ rzg2l_cpg_register_core_clk(const struct cpg_core_clk *core,
goto fail;
}
- if (IS_ERR_OR_NULL(clk))
+ if (IS_ERR(clk))
goto fail;
dev_dbg(dev, "Core clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
@@ -1182,29 +1190,146 @@ fail:
}
/**
- * struct mstp_clock - MSTP gating clock
+ * struct mstop - MSTOP specific data structure
+ * @usecnt: Usage counter for MSTOP settings (when zero the settings
+ * are applied to register)
+ * @conf: MSTOP configuration (register offset, setup bits)
+ */
+struct mstop {
+ atomic_t usecnt;
+ u32 conf;
+};
+
+/**
+ * struct mod_clock - Module clock
*
* @hw: handle between common and hardware-specific interfaces
+ * @priv: CPG/MSTP private data
+ * @sibling: pointer to the other coupled clock
+ * @mstop: MSTOP configuration
+ * @shared_mstop_clks: clocks sharing the MSTOP with this clock
* @off: register offset
* @bit: ON/MON bit
+ * @num_shared_mstop_clks: number of the clocks sharing MSTOP with this clock
* @enabled: soft state of the clock, if it is coupled with another clock
- * @priv: CPG/MSTP private data
- * @sibling: pointer to the other coupled clock
*/
-struct mstp_clock {
+struct mod_clock {
struct clk_hw hw;
+ struct rzg2l_cpg_priv *priv;
+ struct mod_clock *sibling;
+ struct mstop *mstop;
+ struct mod_clock **shared_mstop_clks;
u16 off;
u8 bit;
+ u8 num_shared_mstop_clks;
bool enabled;
- struct rzg2l_cpg_priv *priv;
- struct mstp_clock *sibling;
};
-#define to_mod_clock(_hw) container_of(_hw, struct mstp_clock, hw)
+#define to_mod_clock(_hw) container_of(_hw, struct mod_clock, hw)
+
+#define for_each_mod_clock(mod_clock, hw, priv) \
+ for (unsigned int i = 0; (priv) && i < (priv)->num_mod_clks; i++) \
+ if ((priv)->clks[(priv)->num_core_clks + i] == ERR_PTR(-ENOENT)) \
+ continue; \
+ else if (((hw) = __clk_get_hw((priv)->clks[(priv)->num_core_clks + i])) && \
+ ((mod_clock) = to_mod_clock(hw)))
+
+/* Need to be called with a lock held to avoid concurrent access to mstop->usecnt. */
+static void rzg2l_mod_clock_module_set_state(struct mod_clock *clock,
+ bool standby)
+{
+ struct rzg2l_cpg_priv *priv = clock->priv;
+ struct mstop *mstop = clock->mstop;
+ bool update = false;
+ u32 value;
+
+ if (!mstop)
+ return;
+
+ value = MSTOP_MASK(mstop->conf) << 16;
+
+ if (standby) {
+ unsigned int criticals = 0;
+
+ for (unsigned int i = 0; i < clock->num_shared_mstop_clks; i++) {
+ struct mod_clock *clk = clock->shared_mstop_clks[i];
+
+ if (clk_hw_get_flags(&clk->hw) & CLK_IS_CRITICAL)
+ criticals++;
+ }
+
+ if (!clock->num_shared_mstop_clks &&
+ clk_hw_get_flags(&clock->hw) & CLK_IS_CRITICAL)
+ criticals++;
+
+ /*
+ * If this is a shared MSTOP and it is shared with critical clocks,
+ * and the system boots up with this clock enabled but no driver
+ * uses it the CCF will disable it (as it is unused). As we don't
+ * increment reference counter for it at registration (to avoid
+ * messing with clocks enabled at probe but later used by drivers)
+ * do not set the MSTOP here too if it is shared with critical
+ * clocks and ref counted only by those critical clocks.
+ */
+ if (criticals && criticals == atomic_read(&mstop->usecnt))
+ return;
+
+ value |= MSTOP_MASK(mstop->conf);
+
+ /* Allow updates on probe when usecnt = 0. */
+ if (!atomic_read(&mstop->usecnt))
+ update = true;
+ else
+ update = atomic_dec_and_test(&mstop->usecnt);
+ } else {
+ if (!atomic_read(&mstop->usecnt))
+ update = true;
+ atomic_inc(&mstop->usecnt);
+ }
+
+ if (update)
+ writel(value, priv->base + MSTOP_OFF(mstop->conf));
+}
+
+static int rzg2l_mod_clock_mstop_show(struct seq_file *s, void *what)
+{
+ struct rzg2l_cpg_priv *priv = s->private;
+ struct mod_clock *clk;
+ struct clk_hw *hw;
+
+ seq_printf(s, "%-20s %-5s %-10s\n", "", "", "MSTOP");
+ seq_printf(s, "%-20s %-5s %-10s\n", "", "clk", "-------------------------");
+ seq_printf(s, "%-20s %-5s %-5s %-5s %-6s %-6s\n",
+ "clk_name", "cnt", "cnt", "off", "val", "shared");
+ seq_printf(s, "%-20s %-5s %-5s %-5s %-6s %-6s\n",
+ "--------", "-----", "-----", "-----", "------", "------");
+
+ for_each_mod_clock(clk, hw, priv) {
+ u32 val;
+
+ if (!clk->mstop)
+ continue;
+
+ val = readl(priv->base + MSTOP_OFF(clk->mstop->conf)) &
+ MSTOP_MASK(clk->mstop->conf);
+
+ seq_printf(s, "%-20s %-5d %-5d 0x%-3lx 0x%-4x", clk_hw_get_name(hw),
+ __clk_get_enable_count(hw->clk), atomic_read(&clk->mstop->usecnt),
+ MSTOP_OFF(clk->mstop->conf), val);
+
+ for (unsigned int i = 0; i < clk->num_shared_mstop_clks; i++)
+ seq_printf(s, " %pC", clk->shared_mstop_clks[i]->hw.clk);
+
+ seq_puts(s, "\n");
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(rzg2l_mod_clock_mstop);
static int rzg2l_mod_clock_endisable(struct clk_hw *hw, bool enable)
{
- struct mstp_clock *clock = to_mod_clock(hw);
+ struct mod_clock *clock = to_mod_clock(hw);
struct rzg2l_cpg_priv *priv = clock->priv;
unsigned int reg = clock->off;
struct device *dev = priv->dev;
@@ -1224,7 +1349,15 @@ static int rzg2l_mod_clock_endisable(struct clk_hw *hw, bool enable)
if (enable)
value |= bitmask;
- writel(value, priv->base + CLK_ON_R(reg));
+ scoped_guard(spinlock_irqsave, &priv->rmw_lock) {
+ if (enable) {
+ writel(value, priv->base + CLK_ON_R(reg));
+ rzg2l_mod_clock_module_set_state(clock, false);
+ } else {
+ rzg2l_mod_clock_module_set_state(clock, true);
+ writel(value, priv->base + CLK_ON_R(reg));
+ }
+ }
if (!enable)
return 0;
@@ -1243,7 +1376,7 @@ static int rzg2l_mod_clock_endisable(struct clk_hw *hw, bool enable)
static int rzg2l_mod_clock_enable(struct clk_hw *hw)
{
- struct mstp_clock *clock = to_mod_clock(hw);
+ struct mod_clock *clock = to_mod_clock(hw);
if (clock->sibling) {
struct rzg2l_cpg_priv *priv = clock->priv;
@@ -1263,7 +1396,7 @@ static int rzg2l_mod_clock_enable(struct clk_hw *hw)
static void rzg2l_mod_clock_disable(struct clk_hw *hw)
{
- struct mstp_clock *clock = to_mod_clock(hw);
+ struct mod_clock *clock = to_mod_clock(hw);
if (clock->sibling) {
struct rzg2l_cpg_priv *priv = clock->priv;
@@ -1283,7 +1416,7 @@ static void rzg2l_mod_clock_disable(struct clk_hw *hw)
static int rzg2l_mod_clock_is_enabled(struct clk_hw *hw)
{
- struct mstp_clock *clock = to_mod_clock(hw);
+ struct mod_clock *clock = to_mod_clock(hw);
struct rzg2l_cpg_priv *priv = clock->priv;
u32 bitmask = BIT(clock->bit);
u32 value;
@@ -1310,34 +1443,104 @@ static const struct clk_ops rzg2l_mod_clock_ops = {
.is_enabled = rzg2l_mod_clock_is_enabled,
};
-static struct mstp_clock
-*rzg2l_mod_clock_get_sibling(struct mstp_clock *clock,
+static struct mod_clock
+*rzg2l_mod_clock_get_sibling(struct mod_clock *clock,
struct rzg2l_cpg_priv *priv)
{
+ struct mod_clock *clk;
struct clk_hw *hw;
- unsigned int i;
- for (i = 0; i < priv->num_mod_clks; i++) {
- struct mstp_clock *clk;
+ for_each_mod_clock(clk, hw, priv) {
+ if (clock->off == clk->off && clock->bit == clk->bit)
+ return clk;
+ }
- if (priv->clks[priv->num_core_clks + i] == ERR_PTR(-ENOENT))
+ return NULL;
+}
+
+static struct mstop *rzg2l_mod_clock_get_mstop(struct rzg2l_cpg_priv *priv, u32 conf)
+{
+ struct mod_clock *clk;
+ struct clk_hw *hw;
+
+ for_each_mod_clock(clk, hw, priv) {
+ if (!clk->mstop)
continue;
- hw = __clk_get_hw(priv->clks[priv->num_core_clks + i]);
- clk = to_mod_clock(hw);
- if (clock->off == clk->off && clock->bit == clk->bit)
- return clk;
+ if (clk->mstop->conf == conf)
+ return clk->mstop;
}
return NULL;
}
+static void rzg2l_mod_clock_init_mstop(struct rzg2l_cpg_priv *priv)
+{
+ struct mod_clock *clk;
+ struct clk_hw *hw;
+
+ for_each_mod_clock(clk, hw, priv) {
+ if (!clk->mstop)
+ continue;
+
+ /*
+ * Out of reset all modules are enabled. Set module state
+ * in case associated clocks are disabled at probe. Otherwise
+ * module is in invalid HW state.
+ */
+ scoped_guard(spinlock_irqsave, &priv->rmw_lock) {
+ if (!rzg2l_mod_clock_is_enabled(&clk->hw))
+ rzg2l_mod_clock_module_set_state(clk, true);
+ }
+ }
+}
+
+static int rzg2l_mod_clock_update_shared_mstop_clks(struct rzg2l_cpg_priv *priv,
+ struct mod_clock *clock)
+{
+ struct mod_clock *clk;
+ struct clk_hw *hw;
+
+ if (!clock->mstop)
+ return 0;
+
+ for_each_mod_clock(clk, hw, priv) {
+ int num_shared_mstop_clks, incr = 1;
+ struct mod_clock **new_clks;
+
+ if (clk->mstop != clock->mstop)
+ continue;
+
+ num_shared_mstop_clks = clk->num_shared_mstop_clks;
+ if (!num_shared_mstop_clks)
+ incr++;
+
+ new_clks = devm_krealloc(priv->dev, clk->shared_mstop_clks,
+ (num_shared_mstop_clks + incr) * sizeof(*new_clks),
+ GFP_KERNEL);
+ if (!new_clks)
+ return -ENOMEM;
+
+ if (!num_shared_mstop_clks)
+ new_clks[num_shared_mstop_clks++] = clk;
+ new_clks[num_shared_mstop_clks++] = clock;
+
+ for (unsigned int i = 0; i < num_shared_mstop_clks; i++) {
+ new_clks[i]->shared_mstop_clks = new_clks;
+ new_clks[i]->num_shared_mstop_clks = num_shared_mstop_clks;
+ }
+ break;
+ }
+
+ return 0;
+}
+
static void __init
rzg2l_cpg_register_mod_clk(const struct rzg2l_mod_clk *mod,
const struct rzg2l_cpg_info *info,
struct rzg2l_cpg_priv *priv)
{
- struct mstp_clock *clock = NULL;
+ struct mod_clock *clock = NULL;
struct device *dev = priv->dev;
unsigned int id = mod->id;
struct clk_init_data init;
@@ -1383,18 +1586,29 @@ rzg2l_cpg_register_mod_clk(const struct rzg2l_mod_clk *mod,
clock->priv = priv;
clock->hw.init = &init;
+ if (mod->mstop_conf) {
+ struct mstop *mstop = rzg2l_mod_clock_get_mstop(priv, mod->mstop_conf);
+
+ if (!mstop) {
+ mstop = devm_kzalloc(dev, sizeof(*mstop), GFP_KERNEL);
+ if (!mstop) {
+ clk = ERR_PTR(-ENOMEM);
+ goto fail;
+ }
+ mstop->conf = mod->mstop_conf;
+ atomic_set(&mstop->usecnt, 0);
+ }
+ clock->mstop = mstop;
+ }
+
ret = devm_clk_hw_register(dev, &clock->hw);
if (ret) {
clk = ERR_PTR(ret);
goto fail;
}
- clk = clock->hw.clk;
- dev_dbg(dev, "Module clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
- priv->clks[id] = clk;
-
if (mod->is_coupled) {
- struct mstp_clock *sibling;
+ struct mod_clock *sibling;
clock->enabled = rzg2l_mod_clock_is_enabled(&clock->hw);
sibling = rzg2l_mod_clock_get_sibling(clock, priv);
@@ -1404,6 +1618,17 @@ rzg2l_cpg_register_mod_clk(const struct rzg2l_mod_clk *mod,
}
}
+ /* Keep this before priv->clks[id] is updated. */
+ ret = rzg2l_mod_clock_update_shared_mstop_clks(priv, clock);
+ if (ret) {
+ clk = ERR_PTR(ret);
+ goto fail;
+ }
+
+ clk = clock->hw.clk;
+ dev_dbg(dev, "Module clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
+ priv->clks[id] = clk;
+
return;
fail:
@@ -1413,8 +1638,8 @@ fail:
#define rcdev_to_priv(x) container_of(x, struct rzg2l_cpg_priv, rcdev)
-static int rzg2l_cpg_assert(struct reset_controller_dev *rcdev,
- unsigned long id)
+static int __rzg2l_cpg_assert(struct reset_controller_dev *rcdev,
+ unsigned long id, bool assert)
{
struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
const struct rzg2l_cpg_info *info = priv->info;
@@ -1422,9 +1647,13 @@ static int rzg2l_cpg_assert(struct reset_controller_dev *rcdev,
u32 mask = BIT(info->resets[id].bit);
s8 monbit = info->resets[id].monbit;
u32 value = mask << 16;
+ int ret;
- dev_dbg(rcdev->dev, "assert id:%ld offset:0x%x\n", id, CLK_RST_R(reg));
+ dev_dbg(rcdev->dev, "%s id:%ld offset:0x%x\n",
+ assert ? "assert" : "deassert", id, CLK_RST_R(reg));
+ if (!assert)
+ value |= mask;
writel(value, priv->base + CLK_RST_R(reg));
if (info->has_clk_mon_regs) {
@@ -1438,38 +1667,26 @@ static int rzg2l_cpg_assert(struct reset_controller_dev *rcdev,
return 0;
}
- return readl_poll_timeout_atomic(priv->base + reg, value,
- value & mask, 10, 200);
+ ret = readl_poll_timeout_atomic(priv->base + reg, value,
+ assert == !!(value & mask), 10, 200);
+ if (ret && !assert) {
+ value = mask << 16;
+ writel(value, priv->base + CLK_RST_R(info->resets[id].off));
+ }
+
+ return ret;
+}
+
+static int rzg2l_cpg_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return __rzg2l_cpg_assert(rcdev, id, true);
}
static int rzg2l_cpg_deassert(struct reset_controller_dev *rcdev,
unsigned long id)
{
- struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
- const struct rzg2l_cpg_info *info = priv->info;
- unsigned int reg = info->resets[id].off;
- u32 mask = BIT(info->resets[id].bit);
- s8 monbit = info->resets[id].monbit;
- u32 value = (mask << 16) | mask;
-
- dev_dbg(rcdev->dev, "deassert id:%ld offset:0x%x\n", id,
- CLK_RST_R(reg));
-
- writel(value, priv->base + CLK_RST_R(reg));
-
- if (info->has_clk_mon_regs) {
- reg = CLK_MRST_R(reg);
- } else if (monbit >= 0) {
- reg = CPG_RST_MON;
- mask = BIT(monbit);
- } else {
- /* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
- udelay(35);
- return 0;
- }
-
- return readl_poll_timeout_atomic(priv->base + reg, value,
- !(value & mask), 10, 200);
+ return __rzg2l_cpg_assert(rcdev, id, false);
}
static int rzg2l_cpg_reset(struct reset_controller_dev *rcdev,
@@ -1540,39 +1757,14 @@ static int rzg2l_cpg_reset_controller_register(struct rzg2l_cpg_priv *priv)
return devm_reset_controller_register(priv->dev, &priv->rcdev);
}
-/**
- * struct rzg2l_cpg_pm_domains - RZ/G2L PM domains data structure
- * @onecell_data: cell data
- * @domains: generic PM domains
- */
-struct rzg2l_cpg_pm_domains {
- struct genpd_onecell_data onecell_data;
- struct generic_pm_domain *domains[];
-};
-
-/**
- * struct rzg2l_cpg_pd - RZ/G2L power domain data structure
- * @genpd: generic PM domain
- * @priv: pointer to CPG private data structure
- * @conf: CPG PM domain configuration info
- * @id: RZ/G2L power domain ID
- */
-struct rzg2l_cpg_pd {
- struct generic_pm_domain genpd;
- struct rzg2l_cpg_priv *priv;
- struct rzg2l_cpg_pm_domain_conf conf;
- u16 id;
-};
-
-static bool rzg2l_cpg_is_pm_clk(struct rzg2l_cpg_pd *pd,
+static bool rzg2l_cpg_is_pm_clk(struct rzg2l_cpg_priv *priv,
const struct of_phandle_args *clkspec)
{
- if (clkspec->np != pd->genpd.dev.of_node || clkspec->args_count != 2)
+ if (clkspec->np != priv->genpd.dev.of_node || clkspec->args_count != 2)
return false;
switch (clkspec->args[0]) {
case CPG_MOD: {
- struct rzg2l_cpg_priv *priv = pd->priv;
const struct rzg2l_cpg_info *info = priv->info;
unsigned int id = clkspec->args[1];
@@ -1597,7 +1789,7 @@ static bool rzg2l_cpg_is_pm_clk(struct rzg2l_cpg_pd *pd,
static int rzg2l_cpg_attach_dev(struct generic_pm_domain *domain, struct device *dev)
{
- struct rzg2l_cpg_pd *pd = container_of(domain, struct rzg2l_cpg_pd, genpd);
+ struct rzg2l_cpg_priv *priv = container_of(domain, struct rzg2l_cpg_priv, genpd);
struct device_node *np = dev->of_node;
struct of_phandle_args clkspec;
bool once = true;
@@ -1606,7 +1798,7 @@ static int rzg2l_cpg_attach_dev(struct generic_pm_domain *domain, struct device
int error;
for (i = 0; !of_parse_phandle_with_args(np, "clocks", "#clock-cells", i, &clkspec); i++) {
- if (!rzg2l_cpg_is_pm_clk(pd, &clkspec)) {
+ if (!rzg2l_cpg_is_pm_clk(priv, &clkspec)) {
of_node_put(clkspec.np);
continue;
}
@@ -1652,182 +1844,30 @@ static void rzg2l_cpg_detach_dev(struct generic_pm_domain *unused, struct device
static void rzg2l_cpg_genpd_remove(void *data)
{
- struct genpd_onecell_data *celldata = data;
-
- for (unsigned int i = 0; i < celldata->num_domains; i++)
- pm_genpd_remove(celldata->domains[i]);
-}
-
-static void rzg2l_cpg_genpd_remove_simple(void *data)
-{
pm_genpd_remove(data);
}
-static int rzg2l_cpg_power_on(struct generic_pm_domain *domain)
-{
- struct rzg2l_cpg_pd *pd = container_of(domain, struct rzg2l_cpg_pd, genpd);
- struct rzg2l_cpg_reg_conf mstop = pd->conf.mstop;
- struct rzg2l_cpg_priv *priv = pd->priv;
-
- /* Set MSTOP. */
- if (mstop.mask)
- writel(mstop.mask << 16, priv->base + mstop.off);
-
- return 0;
-}
-
-static int rzg2l_cpg_power_off(struct generic_pm_domain *domain)
-{
- struct rzg2l_cpg_pd *pd = container_of(domain, struct rzg2l_cpg_pd, genpd);
- struct rzg2l_cpg_reg_conf mstop = pd->conf.mstop;
- struct rzg2l_cpg_priv *priv = pd->priv;
-
- /* Set MSTOP. */
- if (mstop.mask)
- writel(mstop.mask | (mstop.mask << 16), priv->base + mstop.off);
-
- return 0;
-}
-
-static int __init rzg2l_cpg_pd_setup(struct rzg2l_cpg_pd *pd)
-{
- bool always_on = !!(pd->genpd.flags & GENPD_FLAG_ALWAYS_ON);
- struct dev_power_governor *governor;
- int ret;
-
- if (always_on)
- governor = &pm_domain_always_on_gov;
- else
- governor = &simple_qos_governor;
-
- pd->genpd.flags |= GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP;
- pd->genpd.attach_dev = rzg2l_cpg_attach_dev;
- pd->genpd.detach_dev = rzg2l_cpg_detach_dev;
- pd->genpd.power_on = rzg2l_cpg_power_on;
- pd->genpd.power_off = rzg2l_cpg_power_off;
-
- ret = pm_genpd_init(&pd->genpd, governor, !always_on);
- if (ret)
- return ret;
-
- if (always_on)
- ret = rzg2l_cpg_power_on(&pd->genpd);
-
- return ret;
-}
-
static int __init rzg2l_cpg_add_clk_domain(struct rzg2l_cpg_priv *priv)
{
struct device *dev = priv->dev;
struct device_node *np = dev->of_node;
- struct rzg2l_cpg_pd *pd;
+ struct generic_pm_domain *genpd = &priv->genpd;
int ret;
- pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
- if (!pd)
- return -ENOMEM;
-
- pd->genpd.name = np->name;
- pd->genpd.flags = GENPD_FLAG_ALWAYS_ON;
- pd->priv = priv;
- ret = rzg2l_cpg_pd_setup(pd);
+ genpd->name = np->name;
+ genpd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ALWAYS_ON |
+ GENPD_FLAG_ACTIVE_WAKEUP;
+ genpd->attach_dev = rzg2l_cpg_attach_dev;
+ genpd->detach_dev = rzg2l_cpg_detach_dev;
+ ret = pm_genpd_init(genpd, &pm_domain_always_on_gov, false);
if (ret)
return ret;
- ret = devm_add_action_or_reset(dev, rzg2l_cpg_genpd_remove_simple, &pd->genpd);
+ ret = devm_add_action_or_reset(dev, rzg2l_cpg_genpd_remove, genpd);
if (ret)
return ret;
- return of_genpd_add_provider_simple(np, &pd->genpd);
-}
-
-static struct generic_pm_domain *
-rzg2l_cpg_pm_domain_xlate(const struct of_phandle_args *spec, void *data)
-{
- struct generic_pm_domain *domain = ERR_PTR(-ENOENT);
- struct genpd_onecell_data *genpd = data;
-
- if (spec->args_count != 1)
- return ERR_PTR(-EINVAL);
-
- for (unsigned int i = 0; i < genpd->num_domains; i++) {
- struct rzg2l_cpg_pd *pd = container_of(genpd->domains[i], struct rzg2l_cpg_pd,
- genpd);
-
- if (pd->id == spec->args[0]) {
- domain = &pd->genpd;
- break;
- }
- }
-
- return domain;
-}
-
-static int __init rzg2l_cpg_add_pm_domains(struct rzg2l_cpg_priv *priv)
-{
- const struct rzg2l_cpg_info *info = priv->info;
- struct device *dev = priv->dev;
- struct device_node *np = dev->of_node;
- struct rzg2l_cpg_pm_domains *domains;
- struct generic_pm_domain *parent;
- u32 ncells;
- int ret;
-
- ret = of_property_read_u32(np, "#power-domain-cells", &ncells);
- if (ret)
- return ret;
-
- /* For backward compatibility. */
- if (!ncells)
- return rzg2l_cpg_add_clk_domain(priv);
-
- domains = devm_kzalloc(dev, struct_size(domains, domains, info->num_pm_domains),
- GFP_KERNEL);
- if (!domains)
- return -ENOMEM;
-
- domains->onecell_data.domains = domains->domains;
- domains->onecell_data.num_domains = info->num_pm_domains;
- domains->onecell_data.xlate = rzg2l_cpg_pm_domain_xlate;
-
- ret = devm_add_action_or_reset(dev, rzg2l_cpg_genpd_remove, &domains->onecell_data);
- if (ret)
- return ret;
-
- for (unsigned int i = 0; i < info->num_pm_domains; i++) {
- struct rzg2l_cpg_pd *pd;
-
- pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
- if (!pd)
- return -ENOMEM;
-
- pd->genpd.name = info->pm_domains[i].name;
- pd->genpd.flags = info->pm_domains[i].genpd_flags;
- pd->conf = info->pm_domains[i].conf;
- pd->id = info->pm_domains[i].id;
- pd->priv = priv;
-
- ret = rzg2l_cpg_pd_setup(pd);
- if (ret)
- return ret;
-
- domains->domains[i] = &pd->genpd;
- /* Parent should be on the very first entry of info->pm_domains[]. */
- if (!i) {
- parent = &pd->genpd;
- continue;
- }
-
- ret = pm_genpd_add_subdomain(parent, &pd->genpd);
- if (ret)
- return ret;
- }
-
- ret = of_genpd_add_provider_onecell(np, &domains->onecell_data);
- if (ret)
- return ret;
-
- return 0;
+ return of_genpd_add_provider_simple(np, genpd);
}
static int __init rzg2l_cpg_probe(struct platform_device *pdev)
@@ -1875,6 +1915,13 @@ static int __init rzg2l_cpg_probe(struct platform_device *pdev)
for (i = 0; i < info->num_mod_clks; i++)
rzg2l_cpg_register_mod_clk(&info->mod_clks[i], info, priv);
+ /*
+ * Initialize MSTOP after all the clocks were registered to avoid
+ * invalid reference counting when multiple clocks (critical,
+ * non-critical) share the same MSTOP.
+ */
+ rzg2l_mod_clock_init_mstop(priv);
+
error = of_clk_add_provider(np, rzg2l_cpg_clk_src_twocell_get, priv);
if (error)
return error;
@@ -1883,7 +1930,7 @@ static int __init rzg2l_cpg_probe(struct platform_device *pdev)
if (error)
return error;
- error = rzg2l_cpg_add_pm_domains(priv);
+ error = rzg2l_cpg_add_clk_domain(priv);
if (error)
return error;
@@ -1891,9 +1938,23 @@ static int __init rzg2l_cpg_probe(struct platform_device *pdev)
if (error)
return error;
+ debugfs_create_file("mstop", 0444, NULL, priv, &rzg2l_mod_clock_mstop_fops);
+ return 0;
+}
+
+static int rzg2l_cpg_resume(struct device *dev)
+{
+ struct rzg2l_cpg_priv *priv = dev_get_drvdata(dev);
+
+ rzg2l_mod_clock_init_mstop(priv);
+
return 0;
}
+static const struct dev_pm_ops rzg2l_cpg_pm_ops = {
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, rzg2l_cpg_resume)
+};
+
static const struct of_device_id rzg2l_cpg_match[] = {
#ifdef CONFIG_CLK_R9A07G043
{
@@ -1932,6 +1993,7 @@ static struct platform_driver rzg2l_cpg_driver = {
.driver = {
.name = "rzg2l-cpg",
.of_match_table = rzg2l_cpg_match,
+ .pm = pm_sleep_ptr(&rzg2l_cpg_pm_ops),
},
};
diff --git a/drivers/clk/renesas/rzg2l-cpg.h b/drivers/clk/renesas/rzg2l-cpg.h
index b6eece5ffa20..55e815be16c8 100644
--- a/drivers/clk/renesas/rzg2l-cpg.h
+++ b/drivers/clk/renesas/rzg2l-cpg.h
@@ -34,6 +34,7 @@
#define CPG_BUS_PERI_COM_MSTOP (0xB6C)
#define CPG_BUS_PERI_CPU_MSTOP (0xB70)
#define CPG_BUS_PERI_DDR_MSTOP (0xB74)
+#define CPG_BUS_PERI_VIDEO_MSTOP (0xB78)
#define CPG_BUS_REG0_MSTOP (0xB7C)
#define CPG_BUS_REG1_MSTOP (0xB80)
#define CPG_BUS_TZCDDR_MSTOP (0xB84)
@@ -82,6 +83,8 @@
#define SEL_PLL6_2 SEL_PLL_PACK(CPG_PL6_ETH_SSEL, 0, 1)
#define SEL_GPU2 SEL_PLL_PACK(CPG_PL6_SSEL, 12, 1)
+#define MSTOP(name, bitmask) ((CPG_##name##_MSTOP) << 16 | (bitmask))
+
#define EXTAL_FREQ_IN_MEGA_HZ (24)
/**
@@ -201,6 +204,7 @@ enum clk_types {
* @name: handle between common and hardware-specific interfaces
* @id: clock index in array containing all Core and Module Clocks
* @parent: id of parent clock
+ * @mstop_conf: MSTOP configuration
* @off: register offset
* @bit: ON/MON bit
* @is_coupled: flag to indicate coupled clock
@@ -209,26 +213,28 @@ struct rzg2l_mod_clk {
const char *name;
unsigned int id;
unsigned int parent;
+ u32 mstop_conf;
u16 off;
u8 bit;
bool is_coupled;
};
-#define DEF_MOD_BASE(_name, _id, _parent, _off, _bit, _is_coupled) \
+#define DEF_MOD_BASE(_name, _id, _parent, _off, _bit, _mstop_conf, _is_coupled) \
{ \
.name = _name, \
.id = MOD_CLK_BASE + (_id), \
.parent = (_parent), \
+ .mstop_conf = (_mstop_conf), \
.off = (_off), \
.bit = (_bit), \
.is_coupled = (_is_coupled), \
}
-#define DEF_MOD(_name, _id, _parent, _off, _bit) \
- DEF_MOD_BASE(_name, _id, _parent, _off, _bit, false)
+#define DEF_MOD(_name, _id, _parent, _off, _bit, _mstop_conf) \
+ DEF_MOD_BASE(_name, _id, _parent, _off, _bit, _mstop_conf, false)
-#define DEF_COUPLED(_name, _id, _parent, _off, _bit) \
- DEF_MOD_BASE(_name, _id, _parent, _off, _bit, true)
+#define DEF_COUPLED(_name, _id, _parent, _off, _bit, _mstop_conf) \
+ DEF_MOD_BASE(_name, _id, _parent, _off, _bit, _mstop_conf, true)
/**
* struct rzg2l_reset - Reset definitions
@@ -253,51 +259,6 @@ struct rzg2l_reset {
DEF_RST_MON(_id, _off, _bit, -1)
/**
- * struct rzg2l_cpg_reg_conf - RZ/G2L register configuration data structure
- * @off: register offset
- * @mask: register mask
- */
-struct rzg2l_cpg_reg_conf {
- u16 off;
- u16 mask;
-};
-
-#define DEF_REG_CONF(_off, _mask) ((struct rzg2l_cpg_reg_conf) { .off = (_off), .mask = (_mask) })
-
-/**
- * struct rzg2l_cpg_pm_domain_conf - PM domain configuration data structure
- * @mstop: MSTOP register configuration
- */
-struct rzg2l_cpg_pm_domain_conf {
- struct rzg2l_cpg_reg_conf mstop;
-};
-
-/**
- * struct rzg2l_cpg_pm_domain_init_data - PM domain init data
- * @name: PM domain name
- * @conf: PM domain configuration
- * @genpd_flags: genpd flags (see GENPD_FLAG_*)
- * @id: PM domain ID (similar to the ones defined in
- * include/dt-bindings/clock/<soc-id>-cpg.h)
- */
-struct rzg2l_cpg_pm_domain_init_data {
- const char * const name;
- struct rzg2l_cpg_pm_domain_conf conf;
- u32 genpd_flags;
- u16 id;
-};
-
-#define DEF_PD(_name, _id, _mstop_conf, _flags) \
- { \
- .name = (_name), \
- .id = (_id), \
- .conf = { \
- .mstop = (_mstop_conf), \
- }, \
- .genpd_flags = (_flags), \
- }
-
-/**
* struct rzg2l_cpg_info - SoC-specific CPG Description
*
* @core_clks: Array of Core Clock definitions
@@ -315,8 +276,6 @@ struct rzg2l_cpg_pm_domain_init_data {
* @crit_mod_clks: Array with Module Clock IDs of critical clocks that
* should not be disabled without a knowledgeable driver
* @num_crit_mod_clks: Number of entries in crit_mod_clks[]
- * @pm_domains: PM domains init data array
- * @num_pm_domains: Number of PM domains
* @has_clk_mon_regs: Flag indicating whether the SoC has CLK_MON registers
*/
struct rzg2l_cpg_info {
@@ -343,10 +302,6 @@ struct rzg2l_cpg_info {
const unsigned int *crit_mod_clks;
unsigned int num_crit_mod_clks;
- /* Power domain. */
- const struct rzg2l_cpg_pm_domain_init_data *pm_domains;
- unsigned int num_pm_domains;
-
bool has_clk_mon_regs;
};
diff --git a/drivers/clk/renesas/rzv2h-cpg.c b/drivers/clk/renesas/rzv2h-cpg.c
index bcc496e8cbcd..3f6299b9fec0 100644
--- a/drivers/clk/renesas/rzv2h-cpg.c
+++ b/drivers/clk/renesas/rzv2h-cpg.c
@@ -14,9 +14,14 @@
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/clk/renesas.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/iopoll.h>
+#include <linux/limits.h>
+#include <linux/math.h>
+#include <linux/math64.h>
+#include <linux/minmax.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -26,6 +31,7 @@
#include <linux/refcount.h>
#include <linux/reset-controller.h>
#include <linux/string_choices.h>
+#include <linux/units.h>
#include <dt-bindings/clock/renesas-cpg-mssr.h>
@@ -47,13 +53,15 @@
#define CPG_PLL_STBY(x) ((x))
#define CPG_PLL_STBY_RESETB BIT(0)
+#define CPG_PLL_STBY_SSC_EN BIT(2)
#define CPG_PLL_STBY_RESETB_WEN BIT(16)
+#define CPG_PLL_STBY_SSC_EN_WEN BIT(18)
#define CPG_PLL_CLK1(x) ((x) + 0x004)
-#define CPG_PLL_CLK1_KDIV(x) ((s16)FIELD_GET(GENMASK(31, 16), (x)))
-#define CPG_PLL_CLK1_MDIV(x) FIELD_GET(GENMASK(15, 6), (x))
-#define CPG_PLL_CLK1_PDIV(x) FIELD_GET(GENMASK(5, 0), (x))
+#define CPG_PLL_CLK1_KDIV GENMASK(31, 16)
+#define CPG_PLL_CLK1_MDIV GENMASK(15, 6)
+#define CPG_PLL_CLK1_PDIV GENMASK(5, 0)
#define CPG_PLL_CLK2(x) ((x) + 0x008)
-#define CPG_PLL_CLK2_SDIV(x) FIELD_GET(GENMASK(2, 0), (x))
+#define CPG_PLL_CLK2_SDIV GENMASK(2, 0)
#define CPG_PLL_MON(x) ((x) + 0x010)
#define CPG_PLL_MON_RESETB BIT(0)
#define CPG_PLL_MON_LOCK BIT(4)
@@ -65,6 +73,22 @@
#define CPG_CLKSTATUS0 (0x700)
+/* On RZ/G3E SoC we have two DSI PLLs */
+#define MAX_CPG_DSI_PLL 2
+
+/**
+ * struct rzv2h_pll_dsi_info - PLL DSI information, holds the limits and parameters
+ *
+ * @pll_dsi_limits: PLL DSI parameters limits
+ * @pll_dsi_parameters: Calculated PLL DSI parameters
+ * @req_pll_dsi_rate: Requested PLL DSI rate
+ */
+struct rzv2h_pll_dsi_info {
+ const struct rzv2h_pll_limits *pll_dsi_limits;
+ struct rzv2h_pll_div_pars pll_dsi_parameters;
+ unsigned long req_pll_dsi_rate;
+};
+
/**
* struct rzv2h_cpg_priv - Clock Pulse Generator Private Data
*
@@ -77,8 +101,10 @@
* @resets: Array of resets
* @num_resets: Number of Module Resets in info->resets[]
* @last_dt_core_clk: ID of the last Core Clock exported to DT
+ * @ff_mod_status_ops: Fixed Factor Module Status Clock operations
* @mstop_count: Array of mstop values
* @rcdev: Reset controller entity
+ * @pll_dsi_info: Array of PLL DSI information, holds the limits and parameters
*/
struct rzv2h_cpg_priv {
struct device *dev;
@@ -92,16 +118,19 @@ struct rzv2h_cpg_priv {
unsigned int num_resets;
unsigned int last_dt_core_clk;
+ struct clk_ops *ff_mod_status_ops;
+
atomic_t *mstop_count;
struct reset_controller_dev rcdev;
+
+ struct rzv2h_pll_dsi_info pll_dsi_info[MAX_CPG_DSI_PLL];
};
#define rcdev_to_priv(x) container_of(x, struct rzv2h_cpg_priv, rcdev)
struct pll_clk {
struct rzv2h_cpg_priv *priv;
- void __iomem *base;
struct clk_hw hw;
struct pll pll;
};
@@ -119,6 +148,7 @@ struct pll_clk {
* @on_bit: ON/MON bit
* @mon_index: monitor register offset
* @mon_bit: monitor bit
+ * @ext_clk_mux_index: mux index for external clock source, or -1 if internal
*/
struct mod_clock {
struct rzv2h_cpg_priv *priv;
@@ -129,6 +159,7 @@ struct mod_clock {
u8 on_bit;
s8 mon_index;
u8 mon_bit;
+ s8 ext_clk_mux_index;
};
#define to_mod_clock(_hw) container_of(_hw, struct mod_clock, hw)
@@ -148,6 +179,476 @@ struct ddiv_clk {
#define to_ddiv_clock(_div) container_of(_div, struct ddiv_clk, div)
+/**
+ * struct rzv2h_ff_mod_status_clk - Fixed Factor Module Status Clock
+ *
+ * @priv: CPG private data
+ * @conf: fixed mod configuration
+ * @fix: fixed factor clock
+ */
+struct rzv2h_ff_mod_status_clk {
+ struct rzv2h_cpg_priv *priv;
+ struct fixed_mod_conf conf;
+ struct clk_fixed_factor fix;
+};
+
+#define to_rzv2h_ff_mod_status_clk(_hw) \
+ container_of(_hw, struct rzv2h_ff_mod_status_clk, fix.hw)
+
+/**
+ * struct rzv2h_plldsi_div_clk - PLL DSI DDIV clock
+ *
+ * @dtable: divider table
+ * @priv: CPG private data
+ * @hw: divider clk
+ * @ddiv: divider configuration
+ */
+struct rzv2h_plldsi_div_clk {
+ const struct clk_div_table *dtable;
+ struct rzv2h_cpg_priv *priv;
+ struct clk_hw hw;
+ struct ddiv ddiv;
+};
+
+#define to_plldsi_div_clk(_hw) \
+ container_of(_hw, struct rzv2h_plldsi_div_clk, hw)
+
+#define RZ_V2H_OSC_CLK_IN_MEGA (24 * MEGA)
+#define RZV2H_MAX_DIV_TABLES (16)
+
+/**
+ * rzv2h_get_pll_pars - Finds the best combination of PLL parameters
+ * for a given frequency.
+ *
+ * @limits: Pointer to the structure containing the limits for the PLL parameters
+ * @pars: Pointer to the structure where the best calculated PLL parameters values
+ * will be stored
+ * @freq_millihz: Target output frequency in millihertz
+ *
+ * This function calculates the best set of PLL parameters (M, K, P, S) to achieve
+ * the desired frequency.
+ * There is no direct formula to calculate the PLL parameters, as it's an open
+ * system of equations, therefore this function uses an iterative approach to
+ * determine the best solution. The best solution is one that minimizes the error
+ * (desired frequency - actual frequency).
+ *
+ * Return: true if a valid set of parameters values is found, false otherwise.
+ */
+bool rzv2h_get_pll_pars(const struct rzv2h_pll_limits *limits,
+ struct rzv2h_pll_pars *pars, u64 freq_millihz)
+{
+ u64 fout_min_millihz = mul_u32_u32(limits->fout.min, MILLI);
+ u64 fout_max_millihz = mul_u32_u32(limits->fout.max, MILLI);
+ struct rzv2h_pll_pars p, best;
+
+ if (freq_millihz > fout_max_millihz ||
+ freq_millihz < fout_min_millihz)
+ return false;
+
+ /* Initialize best error to maximum possible value */
+ best.error_millihz = S64_MAX;
+
+ for (p.p = limits->p.min; p.p <= limits->p.max; p.p++) {
+ u32 fref = RZ_V2H_OSC_CLK_IN_MEGA / p.p;
+ u16 divider;
+
+ for (divider = 1 << limits->s.min, p.s = limits->s.min;
+ p.s <= limits->s.max; p.s++, divider <<= 1) {
+ for (p.m = limits->m.min; p.m <= limits->m.max; p.m++) {
+ u64 output_m, output_k_range;
+ s64 pll_k, output_k;
+ u64 fvco, output;
+
+ /*
+ * The frequency generated by the PLL + divider
+ * is calculated as follows:
+ *
+ * With:
+ * Freq = Ffout = Ffvco / 2^(pll_s)
+ * Ffvco = (pll_m + (pll_k / 65536)) * Ffref
+ * Ffref = 24MHz / pll_p
+ *
+ * Freq can also be rewritten as:
+ * Freq = Ffvco / 2^(pll_s)
+ * = ((pll_m + (pll_k / 65536)) * Ffref) / 2^(pll_s)
+ * = (pll_m * Ffref) / 2^(pll_s) + ((pll_k / 65536) * Ffref) / 2^(pll_s)
+ * = output_m + output_k
+ *
+ * Every parameter has been determined at this
+ * point, but pll_k.
+ *
+ * Considering that:
+ * limits->k.min <= pll_k <= limits->k.max
+ * Then:
+ * -0.5 <= (pll_k / 65536) < 0.5
+ * Therefore:
+ * -Ffref / (2 * 2^(pll_s)) <= output_k < Ffref / (2 * 2^(pll_s))
+ */
+
+ /* Compute output M component (in mHz) */
+ output_m = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(p.m, fref) * MILLI,
+ divider);
+ /* Compute range for output K (in mHz) */
+ output_k_range = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(fref, MILLI),
+ 2 * divider);
+ /*
+ * No point in continuing if we can't achieve
+ * the desired frequency
+ */
+ if (freq_millihz < (output_m - output_k_range) ||
+ freq_millihz >= (output_m + output_k_range)) {
+ continue;
+ }
+
+ /*
+ * Compute the K component
+ *
+ * Since:
+ * Freq = output_m + output_k
+ * Then:
+ * output_k = Freq - output_m
+ * = ((pll_k / 65536) * Ffref) / 2^(pll_s)
+ * Therefore:
+ * pll_k = (output_k * 65536 * 2^(pll_s)) / Ffref
+ */
+ output_k = freq_millihz - output_m;
+ pll_k = div_s64(output_k * 65536ULL * divider,
+ fref);
+ pll_k = DIV_S64_ROUND_CLOSEST(pll_k, MILLI);
+
+ /* Validate K value within allowed limits */
+ if (pll_k < limits->k.min ||
+ pll_k > limits->k.max)
+ continue;
+
+ p.k = pll_k;
+
+ /* Compute (Ffvco * 65536) */
+ fvco = mul_u32_u32(p.m * 65536 + p.k, fref);
+ if (fvco < mul_u32_u32(limits->fvco.min, 65536) ||
+ fvco > mul_u32_u32(limits->fvco.max, 65536))
+ continue;
+
+ /* PLL_M component of (output * 65536 * PLL_P) */
+ output = mul_u32_u32(p.m * 65536, RZ_V2H_OSC_CLK_IN_MEGA);
+ /* PLL_K component of (output * 65536 * PLL_P) */
+ output += p.k * RZ_V2H_OSC_CLK_IN_MEGA;
+ /* Make it in mHz */
+ output *= MILLI;
+ output = DIV_U64_ROUND_CLOSEST(output, 65536 * p.p * divider);
+
+ /* Check output frequency against limits */
+ if (output < fout_min_millihz ||
+ output > fout_max_millihz)
+ continue;
+
+ p.error_millihz = freq_millihz - output;
+ p.freq_millihz = output;
+
+ /* If an exact match is found, return immediately */
+ if (p.error_millihz == 0) {
+ *pars = p;
+ return true;
+ }
+
+ /* Update best match if error is smaller */
+ if (abs(best.error_millihz) > abs(p.error_millihz))
+ best = p;
+ }
+ }
+ }
+
+ /* If no valid parameters were found, return false */
+ if (best.error_millihz == S64_MAX)
+ return false;
+
+ *pars = best;
+ return true;
+}
+EXPORT_SYMBOL_NS_GPL(rzv2h_get_pll_pars, "RZV2H_CPG");
+
+/*
+ * rzv2h_get_pll_divs_pars - Finds the best combination of PLL parameters
+ * and divider value for a given frequency.
+ *
+ * @limits: Pointer to the structure containing the limits for the PLL parameters
+ * @pars: Pointer to the structure where the best calculated PLL parameters and
+ * divider values will be stored
+ * @table: Pointer to the array of valid divider values
+ * @table_size: Size of the divider values array
+ * @freq_millihz: Target output frequency in millihertz
+ *
+ * This function calculates the best set of PLL parameters (M, K, P, S) and divider
+ * value to achieve the desired frequency. See rzv2h_get_pll_pars() for more details
+ * on how the PLL parameters are calculated.
+ *
+ * freq_millihz is the desired frequency generated by the PLL followed by a
+ * a gear.
+ */
+bool rzv2h_get_pll_divs_pars(const struct rzv2h_pll_limits *limits,
+ struct rzv2h_pll_div_pars *pars,
+ const u8 *table, u8 table_size, u64 freq_millihz)
+{
+ struct rzv2h_pll_div_pars p, best;
+
+ best.div.error_millihz = S64_MAX;
+ p.div.error_millihz = S64_MAX;
+ for (unsigned int i = 0; i < table_size; i++) {
+ if (!rzv2h_get_pll_pars(limits, &p.pll, freq_millihz * table[i]))
+ continue;
+
+ p.div.divider_value = table[i];
+ p.div.freq_millihz = DIV_U64_ROUND_CLOSEST(p.pll.freq_millihz, table[i]);
+ p.div.error_millihz = freq_millihz - p.div.freq_millihz;
+
+ if (p.div.error_millihz == 0) {
+ *pars = p;
+ return true;
+ }
+
+ if (abs(best.div.error_millihz) > abs(p.div.error_millihz))
+ best = p;
+ }
+
+ if (best.div.error_millihz == S64_MAX)
+ return false;
+
+ *pars = best;
+ return true;
+}
+EXPORT_SYMBOL_NS_GPL(rzv2h_get_pll_divs_pars, "RZV2H_CPG");
+
+static unsigned long rzv2h_cpg_plldsi_div_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct rzv2h_plldsi_div_clk *dsi_div = to_plldsi_div_clk(hw);
+ struct rzv2h_cpg_priv *priv = dsi_div->priv;
+ struct ddiv ddiv = dsi_div->ddiv;
+ u32 div;
+
+ div = readl(priv->base + ddiv.offset);
+ div >>= ddiv.shift;
+ div &= clk_div_mask(ddiv.width);
+ div = dsi_div->dtable[div].div;
+
+ return DIV_ROUND_CLOSEST_ULL(parent_rate, div);
+}
+
+static int rzv2h_cpg_plldsi_div_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct rzv2h_plldsi_div_clk *dsi_div = to_plldsi_div_clk(hw);
+ struct pll_clk *pll_clk = to_pll(clk_hw_get_parent(hw));
+ struct rzv2h_cpg_priv *priv = dsi_div->priv;
+ u8 table[RZV2H_MAX_DIV_TABLES] = { 0 };
+ struct rzv2h_pll_div_pars *dsi_params;
+ struct rzv2h_pll_dsi_info *dsi_info;
+ const struct clk_div_table *div;
+ unsigned int i = 0;
+ u64 rate_millihz;
+
+ dsi_info = &priv->pll_dsi_info[pll_clk->pll.instance];
+ dsi_params = &dsi_info->pll_dsi_parameters;
+
+ rate_millihz = mul_u32_u32(req->rate, MILLI);
+ if (rate_millihz == dsi_params->div.error_millihz + dsi_params->div.freq_millihz)
+ goto exit_determine_rate;
+
+ for (div = dsi_div->dtable; div->div; div++) {
+ if (i >= RZV2H_MAX_DIV_TABLES)
+ return -EINVAL;
+ table[i++] = div->div;
+ }
+
+ if (!rzv2h_get_pll_divs_pars(dsi_info->pll_dsi_limits, dsi_params, table, i,
+ rate_millihz)) {
+ dev_err(priv->dev, "failed to determine rate for req->rate: %lu\n",
+ req->rate);
+ return -EINVAL;
+ }
+
+exit_determine_rate:
+ req->rate = DIV_ROUND_CLOSEST_ULL(dsi_params->div.freq_millihz, MILLI);
+ req->best_parent_rate = req->rate * dsi_params->div.divider_value;
+ dsi_info->req_pll_dsi_rate = req->best_parent_rate;
+
+ return 0;
+}
+
+static int rzv2h_cpg_plldsi_div_set_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct rzv2h_plldsi_div_clk *dsi_div = to_plldsi_div_clk(hw);
+ struct pll_clk *pll_clk = to_pll(clk_hw_get_parent(hw));
+ struct rzv2h_cpg_priv *priv = dsi_div->priv;
+ struct rzv2h_pll_div_pars *dsi_params;
+ struct rzv2h_pll_dsi_info *dsi_info;
+ struct ddiv ddiv = dsi_div->ddiv;
+ const struct clk_div_table *clkt;
+ bool divider_found = false;
+ u32 val, shift;
+
+ dsi_info = &priv->pll_dsi_info[pll_clk->pll.instance];
+ dsi_params = &dsi_info->pll_dsi_parameters;
+
+ for (clkt = dsi_div->dtable; clkt->div; clkt++) {
+ if (clkt->div == dsi_params->div.divider_value) {
+ divider_found = true;
+ break;
+ }
+ }
+
+ if (!divider_found)
+ return -EINVAL;
+
+ shift = ddiv.shift;
+ val = readl(priv->base + ddiv.offset) | DDIV_DIVCTL_WEN(shift);
+ val &= ~(clk_div_mask(ddiv.width) << shift);
+ val |= clkt->val << shift;
+ writel(val, priv->base + ddiv.offset);
+
+ return 0;
+}
+
+static const struct clk_ops rzv2h_cpg_plldsi_div_ops = {
+ .recalc_rate = rzv2h_cpg_plldsi_div_recalc_rate,
+ .determine_rate = rzv2h_cpg_plldsi_div_determine_rate,
+ .set_rate = rzv2h_cpg_plldsi_div_set_rate,
+};
+
+static struct clk * __init
+rzv2h_cpg_plldsi_div_clk_register(const struct cpg_core_clk *core,
+ struct rzv2h_cpg_priv *priv)
+{
+ struct rzv2h_plldsi_div_clk *clk_hw_data;
+ struct clk **clks = priv->clks;
+ struct clk_init_data init;
+ const struct clk *parent;
+ const char *parent_name;
+ struct clk_hw *clk_hw;
+ int ret;
+
+ parent = clks[core->parent];
+ if (IS_ERR(parent))
+ return ERR_CAST(parent);
+
+ clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL);
+ if (!clk_hw_data)
+ return ERR_PTR(-ENOMEM);
+
+ clk_hw_data->priv = priv;
+ clk_hw_data->ddiv = core->cfg.ddiv;
+ clk_hw_data->dtable = core->dtable;
+
+ parent_name = __clk_get_name(parent);
+ init.name = core->name;
+ init.ops = &rzv2h_cpg_plldsi_div_ops;
+ init.flags = core->flag;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ clk_hw = &clk_hw_data->hw;
+ clk_hw->init = &init;
+
+ ret = devm_clk_hw_register(priv->dev, clk_hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return clk_hw->clk;
+}
+
+static int rzv2h_cpg_plldsi_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct pll_clk *pll_clk = to_pll(hw);
+ struct rzv2h_cpg_priv *priv = pll_clk->priv;
+ struct rzv2h_pll_dsi_info *dsi_info;
+ u64 rate_millihz;
+
+ dsi_info = &priv->pll_dsi_info[pll_clk->pll.instance];
+ /* check if the divider has already invoked the algorithm */
+ if (req->rate == dsi_info->req_pll_dsi_rate)
+ return 0;
+
+ /* If the req->rate doesn't match we do the calculation assuming there is no divider */
+ rate_millihz = mul_u32_u32(req->rate, MILLI);
+ if (!rzv2h_get_pll_pars(dsi_info->pll_dsi_limits,
+ &dsi_info->pll_dsi_parameters.pll, rate_millihz)) {
+ dev_err(priv->dev,
+ "failed to determine rate for req->rate: %lu\n",
+ req->rate);
+ return -EINVAL;
+ }
+
+ req->rate = DIV_ROUND_CLOSEST_ULL(dsi_info->pll_dsi_parameters.pll.freq_millihz, MILLI);
+ dsi_info->req_pll_dsi_rate = req->rate;
+
+ return 0;
+}
+
+static int rzv2h_cpg_pll_set_rate(struct pll_clk *pll_clk,
+ struct rzv2h_pll_pars *params,
+ bool ssc_disable)
+{
+ struct rzv2h_cpg_priv *priv = pll_clk->priv;
+ u16 offset = pll_clk->pll.offset;
+ u32 val;
+ int ret;
+
+ /* Put PLL into standby mode */
+ writel(CPG_PLL_STBY_RESETB_WEN, priv->base + CPG_PLL_STBY(offset));
+ ret = readl_poll_timeout_atomic(priv->base + CPG_PLL_MON(offset),
+ val, !(val & CPG_PLL_MON_LOCK),
+ 100, 2000);
+ if (ret) {
+ dev_err(priv->dev, "Failed to put PLLDSI into standby mode");
+ return ret;
+ }
+
+ /* Output clock setting 1 */
+ writel(FIELD_PREP(CPG_PLL_CLK1_KDIV, (u16)params->k) |
+ FIELD_PREP(CPG_PLL_CLK1_MDIV, params->m) |
+ FIELD_PREP(CPG_PLL_CLK1_PDIV, params->p),
+ priv->base + CPG_PLL_CLK1(offset));
+
+ /* Output clock setting 2 */
+ val = readl(priv->base + CPG_PLL_CLK2(offset));
+ writel((val & ~CPG_PLL_CLK2_SDIV) | FIELD_PREP(CPG_PLL_CLK2_SDIV, params->s),
+ priv->base + CPG_PLL_CLK2(offset));
+
+ /* Put PLL to normal mode */
+ if (ssc_disable)
+ val = CPG_PLL_STBY_SSC_EN_WEN;
+ else
+ val = CPG_PLL_STBY_SSC_EN_WEN | CPG_PLL_STBY_SSC_EN;
+ writel(val | CPG_PLL_STBY_RESETB_WEN | CPG_PLL_STBY_RESETB,
+ priv->base + CPG_PLL_STBY(offset));
+
+ /* PLL normal mode transition, output clock stability check */
+ ret = readl_poll_timeout_atomic(priv->base + CPG_PLL_MON(offset),
+ val, (val & CPG_PLL_MON_LOCK),
+ 100, 2000);
+ if (ret) {
+ dev_err(priv->dev, "Failed to put PLLDSI into normal mode");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rzv2h_cpg_plldsi_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct pll_clk *pll_clk = to_pll(hw);
+ struct rzv2h_pll_dsi_info *dsi_info;
+ struct rzv2h_cpg_priv *priv = pll_clk->priv;
+
+ dsi_info = &priv->pll_dsi_info[pll_clk->pll.instance];
+
+ return rzv2h_cpg_pll_set_rate(pll_clk, &dsi_info->pll_dsi_parameters.pll, true);
+}
+
static int rzv2h_cpg_pll_clk_is_enabled(struct clk_hw *hw)
{
struct pll_clk *pll_clk = to_pll(hw);
@@ -211,12 +712,19 @@ static unsigned long rzv2h_cpg_pll_clk_recalc_rate(struct clk_hw *hw,
clk1 = readl(priv->base + CPG_PLL_CLK1(pll.offset));
clk2 = readl(priv->base + CPG_PLL_CLK2(pll.offset));
- rate = mul_u64_u32_shr(parent_rate, (CPG_PLL_CLK1_MDIV(clk1) << 16) +
- CPG_PLL_CLK1_KDIV(clk1), 16 + CPG_PLL_CLK2_SDIV(clk2));
+ rate = mul_u64_u32_shr(parent_rate, (FIELD_GET(CPG_PLL_CLK1_MDIV, clk1) << 16) +
+ (s16)FIELD_GET(CPG_PLL_CLK1_KDIV, clk1),
+ 16 + FIELD_GET(CPG_PLL_CLK2_SDIV, clk2));
- return DIV_ROUND_CLOSEST_ULL(rate, CPG_PLL_CLK1_PDIV(clk1));
+ return DIV_ROUND_CLOSEST_ULL(rate, FIELD_GET(CPG_PLL_CLK1_PDIV, clk1));
}
+static const struct clk_ops rzv2h_cpg_plldsi_ops = {
+ .recalc_rate = rzv2h_cpg_pll_clk_recalc_rate,
+ .determine_rate = rzv2h_cpg_plldsi_determine_rate,
+ .set_rate = rzv2h_cpg_plldsi_set_rate,
+};
+
static const struct clk_ops rzv2h_cpg_pll_ops = {
.is_enabled = rzv2h_cpg_pll_clk_is_enabled,
.enable = rzv2h_cpg_pll_clk_enable,
@@ -228,7 +736,6 @@ rzv2h_cpg_pll_clk_register(const struct cpg_core_clk *core,
struct rzv2h_cpg_priv *priv,
const struct clk_ops *ops)
{
- void __iomem *base = priv->base;
struct device *dev = priv->dev;
struct clk_init_data init;
const struct clk *parent;
@@ -244,6 +751,10 @@ rzv2h_cpg_pll_clk_register(const struct cpg_core_clk *core,
if (!pll_clk)
return ERR_PTR(-ENOMEM);
+ if (core->type == CLK_TYPE_PLLDSI)
+ priv->pll_dsi_info[core->cfg.pll.instance].pll_dsi_limits =
+ core->cfg.pll.limits;
+
parent_name = __clk_get_name(parent);
init.name = core->name;
init.ops = ops;
@@ -253,7 +764,6 @@ rzv2h_cpg_pll_clk_register(const struct cpg_core_clk *core,
pll_clk->hw.init = &init;
pll_clk->pll = core->cfg.pll;
- pll_clk->base = base;
pll_clk->priv = priv;
ret = devm_clk_hw_register(dev, &pll_clk->hw);
@@ -276,15 +786,6 @@ static unsigned long rzv2h_ddiv_recalc_rate(struct clk_hw *hw,
divider->flags, divider->width);
}
-static long rzv2h_ddiv_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
-{
- struct clk_divider *divider = to_clk_divider(hw);
-
- return divider_round_rate(hw, rate, prate, divider->table,
- divider->width, divider->flags);
-}
-
static int rzv2h_ddiv_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
@@ -341,7 +842,6 @@ ddiv_timeout:
static const struct clk_ops rzv2h_ddiv_clk_divider_ops = {
.recalc_rate = rzv2h_ddiv_recalc_rate,
- .round_rate = rzv2h_ddiv_round_rate,
.determine_rate = rzv2h_ddiv_determine_rate,
.set_rate = rzv2h_ddiv_set_rate,
};
@@ -381,6 +881,7 @@ rzv2h_cpg_ddiv_clk_register(const struct cpg_core_clk *core,
init.ops = &rzv2h_ddiv_clk_divider_ops;
init.parent_names = &parent_name;
init.num_parents = 1;
+ init.flags = CLK_SET_RATE_PARENT;
ddiv->priv = priv;
ddiv->mon = cfg_ddiv.monbit;
@@ -418,6 +919,65 @@ rzv2h_cpg_mux_clk_register(const struct cpg_core_clk *core,
return clk_hw->clk;
}
+static int
+rzv2h_clk_ff_mod_status_is_enabled(struct clk_hw *hw)
+{
+ struct rzv2h_ff_mod_status_clk *fix = to_rzv2h_ff_mod_status_clk(hw);
+ struct rzv2h_cpg_priv *priv = fix->priv;
+ u32 offset = GET_CLK_MON_OFFSET(fix->conf.mon_index);
+ u32 bitmask = BIT(fix->conf.mon_bit);
+ u32 val;
+
+ val = readl(priv->base + offset);
+ return !!(val & bitmask);
+}
+
+static struct clk * __init
+rzv2h_cpg_fixed_mod_status_clk_register(const struct cpg_core_clk *core,
+ struct rzv2h_cpg_priv *priv)
+{
+ struct rzv2h_ff_mod_status_clk *clk_hw_data;
+ struct clk_init_data init = { };
+ struct clk_fixed_factor *fix;
+ const struct clk *parent;
+ const char *parent_name;
+ int ret;
+
+ WARN_DEBUG(core->parent >= priv->num_core_clks);
+ parent = priv->clks[core->parent];
+ if (IS_ERR(parent))
+ return ERR_CAST(parent);
+
+ parent_name = __clk_get_name(parent);
+ parent = priv->clks[core->parent];
+ if (IS_ERR(parent))
+ return ERR_CAST(parent);
+
+ clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL);
+ if (!clk_hw_data)
+ return ERR_PTR(-ENOMEM);
+
+ clk_hw_data->priv = priv;
+ clk_hw_data->conf = core->cfg.fixed_mod;
+
+ init.name = core->name;
+ init.ops = priv->ff_mod_status_ops;
+ init.flags = CLK_SET_RATE_PARENT;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ fix = &clk_hw_data->fix;
+ fix->hw.init = &init;
+ fix->mult = core->mult;
+ fix->div = core->div;
+
+ ret = devm_clk_hw_register(priv->dev, &clk_hw_data->fix.hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return clk_hw_data->fix.hw.clk;
+}
+
static struct clk
*rzv2h_cpg_clk_src_twocell_get(struct of_phandle_args *clkspec,
void *data)
@@ -496,6 +1056,20 @@ rzv2h_cpg_register_core_clk(const struct cpg_core_clk *core,
else
clk = clk_hw->clk;
break;
+ case CLK_TYPE_FF_MOD_STATUS:
+ if (!priv->ff_mod_status_ops) {
+ priv->ff_mod_status_ops =
+ devm_kzalloc(dev, sizeof(*priv->ff_mod_status_ops), GFP_KERNEL);
+ if (!priv->ff_mod_status_ops) {
+ clk = ERR_PTR(-ENOMEM);
+ goto fail;
+ }
+ memcpy(priv->ff_mod_status_ops, &clk_fixed_factor_ops,
+ sizeof(const struct clk_ops));
+ priv->ff_mod_status_ops->is_enabled = rzv2h_clk_ff_mod_status_is_enabled;
+ }
+ clk = rzv2h_cpg_fixed_mod_status_clk_register(core, priv);
+ break;
case CLK_TYPE_PLL:
clk = rzv2h_cpg_pll_clk_register(core, priv, &rzv2h_cpg_pll_ops);
break;
@@ -505,11 +1079,17 @@ rzv2h_cpg_register_core_clk(const struct cpg_core_clk *core,
case CLK_TYPE_SMUX:
clk = rzv2h_cpg_mux_clk_register(core, priv);
break;
+ case CLK_TYPE_PLLDSI:
+ clk = rzv2h_cpg_pll_clk_register(core, priv, &rzv2h_cpg_plldsi_ops);
+ break;
+ case CLK_TYPE_PLLDSI_DIV:
+ clk = rzv2h_cpg_plldsi_div_clk_register(core, priv);
+ break;
default:
goto fail;
}
- if (IS_ERR_OR_NULL(clk))
+ if (IS_ERR(clk))
goto fail;
dev_dbg(dev, "Core clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
@@ -563,15 +1143,38 @@ static void rzv2h_mod_clock_mstop_disable(struct rzv2h_cpg_priv *priv,
spin_unlock_irqrestore(&priv->rmw_lock, flags);
}
+static int rzv2h_parent_clk_mux_to_index(struct clk_hw *hw)
+{
+ struct clk_hw *parent_hw;
+ struct clk *parent_clk;
+ struct clk_mux *mux;
+ u32 val;
+
+ /* This will always succeed, so no need to check for IS_ERR() */
+ parent_clk = clk_get_parent(hw->clk);
+
+ parent_hw = __clk_get_hw(parent_clk);
+ mux = to_clk_mux(parent_hw);
+
+ val = readl(mux->reg) >> mux->shift;
+ val &= mux->mask;
+ return clk_mux_val_to_index(parent_hw, mux->table, 0, val);
+}
+
static int rzv2h_mod_clock_is_enabled(struct clk_hw *hw)
{
struct mod_clock *clock = to_mod_clock(hw);
struct rzv2h_cpg_priv *priv = clock->priv;
+ int mon_index = clock->mon_index;
u32 bitmask;
u32 offset;
- if (clock->mon_index >= 0) {
- offset = GET_CLK_MON_OFFSET(clock->mon_index);
+ if (clock->ext_clk_mux_index >= 0 &&
+ rzv2h_parent_clk_mux_to_index(hw) == clock->ext_clk_mux_index)
+ mon_index = -1;
+
+ if (mon_index >= 0) {
+ offset = GET_CLK_MON_OFFSET(mon_index);
bitmask = BIT(clock->mon_bit);
if (!(readl(priv->base + offset) & bitmask))
@@ -687,6 +1290,7 @@ rzv2h_cpg_register_mod_clk(const struct rzv2h_mod_clk *mod,
clock->mon_index = mod->mon_index;
clock->mon_bit = mod->mon_bit;
clock->no_pm = mod->no_pm;
+ clock->ext_clk_mux_index = mod->ext_clk_mux_index;
clock->priv = priv;
clock->hw.init = &init;
clock->mstop_data = mod->mstop_data;
@@ -748,6 +1352,7 @@ static int __rzv2h_cpg_assert(struct reset_controller_dev *rcdev,
u32 mask = BIT(priv->resets[id].reset_bit);
u8 monbit = priv->resets[id].mon_bit;
u32 value = mask << 16;
+ int ret;
dev_dbg(rcdev->dev, "%s id:%ld offset:0x%x\n",
assert ? "assert" : "deassert", id, reg);
@@ -759,9 +1364,14 @@ static int __rzv2h_cpg_assert(struct reset_controller_dev *rcdev,
reg = GET_RST_MON_OFFSET(priv->resets[id].mon_index);
mask = BIT(monbit);
- return readl_poll_timeout_atomic(priv->base + reg, value,
- assert ? (value & mask) : !(value & mask),
- 10, 200);
+ ret = readl_poll_timeout_atomic(priv->base + reg, value,
+ assert == !!(value & mask), 10, 200);
+ if (ret && !assert) {
+ value = mask << 16;
+ writel(value, priv->base + GET_RST_OFFSET(priv->resets[id].reset_index));
+ }
+
+ return ret;
}
static int rzv2h_cpg_assert(struct reset_controller_dev *rcdev,
@@ -1004,8 +1614,8 @@ static int __init rzv2h_cpg_probe(struct platform_device *pdev)
/* Adjust for CPG_BUS_m_MSTOP starting from m = 1 */
priv->mstop_count -= 16;
- priv->resets = devm_kmemdup(dev, info->resets, sizeof(*info->resets) *
- info->num_resets, GFP_KERNEL);
+ priv->resets = devm_kmemdup_array(dev, info->resets, info->num_resets,
+ sizeof(*info->resets), GFP_KERNEL);
if (!priv->resets)
return -ENOMEM;
diff --git a/drivers/clk/renesas/rzv2h-cpg.h b/drivers/clk/renesas/rzv2h-cpg.h
index 9104b1cd276c..dc957bdaf5e9 100644
--- a/drivers/clk/renesas/rzv2h-cpg.h
+++ b/drivers/clk/renesas/rzv2h-cpg.h
@@ -9,26 +9,35 @@
#define __RENESAS_RZV2H_CPG_H__
#include <linux/bitfield.h>
+#include <linux/types.h>
/**
* struct pll - Structure for PLL configuration
*
* @offset: STBY register offset
* @has_clkn: Flag to indicate if CLK1/2 are accessible or not
+ * @instance: PLL instance number
*/
struct pll {
unsigned int offset:9;
unsigned int has_clkn:1;
+ unsigned int instance:2;
+ const struct rzv2h_pll_limits *limits;
};
-#define PLL_PACK(_offset, _has_clkn) \
+#define PLL_PACK_LIMITS(_offset, _has_clkn, _instance, _limits) \
((struct pll){ \
.offset = _offset, \
- .has_clkn = _has_clkn \
+ .has_clkn = _has_clkn, \
+ .instance = _instance, \
+ .limits = _limits \
})
-#define PLLCA55 PLL_PACK(0x60, 1)
-#define PLLGPU PLL_PACK(0x120, 1)
+#define PLL_PACK(_offset, _has_clkn, _instance) \
+ PLL_PACK_LIMITS(_offset, _has_clkn, _instance, NULL)
+
+#define PLLCA55 PLL_PACK(0x60, 1, 0)
+#define PLLGPU PLL_PACK(0x120, 1, 0)
/**
* struct ddiv - Structure for dynamic switching divider
@@ -93,12 +102,32 @@ struct smuxed {
.width = (_width), \
})
+/**
+ * struct fixed_mod_conf - Structure for fixed module configuration
+ *
+ * @mon_index: monitor index
+ * @mon_bit: monitor bit
+ */
+struct fixed_mod_conf {
+ u8 mon_index;
+ u8 mon_bit;
+};
+
+#define FIXED_MOD_CONF_PACK(_index, _bit) \
+ ((struct fixed_mod_conf){ \
+ .mon_index = (_index), \
+ .mon_bit = (_bit), \
+ })
+
+#define CPG_SSEL0 (0x300)
#define CPG_SSEL1 (0x304)
#define CPG_CDDIV0 (0x400)
#define CPG_CDDIV1 (0x404)
+#define CPG_CDDIV2 (0x408)
#define CPG_CDDIV3 (0x40C)
#define CPG_CDDIV4 (0x410)
#define CPG_CSDIV0 (0x500)
+#define CPG_CSDIV1 (0x504)
#define CDDIV0_DIVCTL1 DDIV_PACK(CPG_CDDIV0, 4, 3, 1)
#define CDDIV0_DIVCTL2 DDIV_PACK(CPG_CDDIV0, 8, 3, 2)
@@ -106,6 +135,7 @@ struct smuxed {
#define CDDIV1_DIVCTL1 DDIV_PACK(CPG_CDDIV1, 4, 2, 5)
#define CDDIV1_DIVCTL2 DDIV_PACK(CPG_CDDIV1, 8, 2, 6)
#define CDDIV1_DIVCTL3 DDIV_PACK(CPG_CDDIV1, 12, 2, 7)
+#define CDDIV2_DIVCTL3 DDIV_PACK(CPG_CDDIV2, 12, 3, 11)
#define CDDIV3_DIVCTL1 DDIV_PACK(CPG_CDDIV3, 4, 3, 13)
#define CDDIV3_DIVCTL2 DDIV_PACK(CPG_CDDIV3, 8, 3, 14)
#define CDDIV3_DIVCTL3 DDIV_PACK(CPG_CDDIV3, 12, 1, 15)
@@ -113,8 +143,16 @@ struct smuxed {
#define CDDIV4_DIVCTL1 DDIV_PACK(CPG_CDDIV4, 4, 1, 17)
#define CDDIV4_DIVCTL2 DDIV_PACK(CPG_CDDIV4, 8, 1, 18)
+#define CSDIV0_DIVCTL0 DDIV_PACK(CPG_CSDIV0, 0, 2, CSDIV_NO_MON)
+#define CSDIV0_DIVCTL1 DDIV_PACK(CPG_CSDIV0, 4, 2, CSDIV_NO_MON)
+#define CSDIV0_DIVCTL2 DDIV_PACK(CPG_CSDIV0, 8, 2, CSDIV_NO_MON)
#define CSDIV0_DIVCTL3 DDIV_PACK_NO_RMW(CPG_CSDIV0, 12, 2, CSDIV_NO_MON)
+#define CSDIV1_DIVCTL2 DDIV_PACK(CPG_CSDIV1, 8, 4, CSDIV_NO_MON)
+#define SSEL0_SELCTL2 SMUX_PACK(CPG_SSEL0, 8, 1)
+#define SSEL0_SELCTL3 SMUX_PACK(CPG_SSEL0, 12, 1)
+#define SSEL1_SELCTL0 SMUX_PACK(CPG_SSEL1, 0, 1)
+#define SSEL1_SELCTL1 SMUX_PACK(CPG_SSEL1, 4, 1)
#define SSEL1_SELCTL2 SMUX_PACK(CPG_SSEL1, 8, 1)
#define SSEL1_SELCTL3 SMUX_PACK(CPG_SSEL1, 12, 1)
@@ -124,6 +162,8 @@ struct smuxed {
FIELD_PREP_CONST(BUS_MSTOP_BITS_MASK, (mask)))
#define BUS_MSTOP_NONE GENMASK(31, 0)
+#define FIXED_MOD_CONF_XSPI FIXED_MOD_CONF_PACK(5, 1)
+
/**
* Definitions of CPG Core Clocks
*
@@ -144,6 +184,7 @@ struct cpg_core_clk {
struct ddiv ddiv;
struct pll pll;
struct smuxed smux;
+ struct fixed_mod_conf fixed_mod;
} cfg;
const struct clk_div_table *dtable;
const char * const *parent_names;
@@ -156,9 +197,12 @@ enum clk_types {
/* Generic */
CLK_TYPE_IN, /* External Clock Input */
CLK_TYPE_FF, /* Fixed Factor Clock */
+ CLK_TYPE_FF_MOD_STATUS, /* Fixed Factor Clock which can report the status of module clock */
CLK_TYPE_PLL,
CLK_TYPE_DDIV, /* Dynamic Switching Divider */
CLK_TYPE_SMUX, /* Static Mux */
+ CLK_TYPE_PLLDSI, /* PLLDSI */
+ CLK_TYPE_PLLDSI_DIV, /* PLLDSI divider */
};
#define DEF_TYPE(_name, _id, _type...) \
@@ -171,6 +215,9 @@ enum clk_types {
DEF_TYPE(_name, _id, CLK_TYPE_IN)
#define DEF_FIXED(_name, _id, _parent, _mult, _div) \
DEF_BASE(_name, _id, CLK_TYPE_FF, _parent, .div = _div, .mult = _mult)
+#define DEF_FIXED_MOD_STATUS(_name, _id, _parent, _mult, _div, _gate) \
+ DEF_BASE(_name, _id, CLK_TYPE_FF_MOD_STATUS, _parent, .div = _div, \
+ .mult = _mult, .cfg.fixed_mod = _gate)
#define DEF_DDIV(_name, _id, _parent, _ddiv_packed, _dtable) \
DEF_TYPE(_name, _id, CLK_TYPE_DDIV, \
.cfg.ddiv = _ddiv_packed, \
@@ -186,6 +233,14 @@ enum clk_types {
.num_parents = ARRAY_SIZE(_parent_names), \
.flag = CLK_SET_RATE_PARENT, \
.mux_flags = CLK_MUX_HIWORD_MASK)
+#define DEF_PLLDSI(_name, _id, _parent, _pll_packed) \
+ DEF_TYPE(_name, _id, CLK_TYPE_PLLDSI, .parent = _parent, .cfg.pll = _pll_packed)
+#define DEF_PLLDSI_DIV(_name, _id, _parent, _ddiv_packed, _dtable) \
+ DEF_TYPE(_name, _id, CLK_TYPE_PLLDSI_DIV, \
+ .cfg.ddiv = _ddiv_packed, \
+ .dtable = _dtable, \
+ .parent = _parent, \
+ .flag = CLK_SET_RATE_PARENT)
/**
* struct rzv2h_mod_clk - Module Clocks definitions
@@ -199,6 +254,7 @@ enum clk_types {
* @on_bit: ON bit
* @mon_index: monitor register index
* @mon_bit: monitor bit
+ * @ext_clk_mux_index: mux index for external clock source, or -1 if internal
*/
struct rzv2h_mod_clk {
const char *name;
@@ -210,9 +266,11 @@ struct rzv2h_mod_clk {
u8 on_bit;
s8 mon_index;
u8 mon_bit;
+ s8 ext_clk_mux_index;
};
-#define DEF_MOD_BASE(_name, _mstop, _parent, _critical, _no_pm, _onindex, _onbit, _monindex, _monbit) \
+#define DEF_MOD_BASE(_name, _mstop, _parent, _critical, _no_pm, _onindex, \
+ _onbit, _monindex, _monbit, _ext_clk_mux_index) \
{ \
.name = (_name), \
.mstop_data = (_mstop), \
@@ -223,16 +281,22 @@ struct rzv2h_mod_clk {
.on_bit = (_onbit), \
.mon_index = (_monindex), \
.mon_bit = (_monbit), \
+ .ext_clk_mux_index = (_ext_clk_mux_index), \
}
#define DEF_MOD(_name, _parent, _onindex, _onbit, _monindex, _monbit, _mstop) \
- DEF_MOD_BASE(_name, _mstop, _parent, false, false, _onindex, _onbit, _monindex, _monbit)
+ DEF_MOD_BASE(_name, _mstop, _parent, false, false, _onindex, _onbit, _monindex, _monbit, -1)
#define DEF_MOD_CRITICAL(_name, _parent, _onindex, _onbit, _monindex, _monbit, _mstop) \
- DEF_MOD_BASE(_name, _mstop, _parent, true, false, _onindex, _onbit, _monindex, _monbit)
+ DEF_MOD_BASE(_name, _mstop, _parent, true, false, _onindex, _onbit, _monindex, _monbit, -1)
#define DEF_MOD_NO_PM(_name, _parent, _onindex, _onbit, _monindex, _monbit, _mstop) \
- DEF_MOD_BASE(_name, _mstop, _parent, false, true, _onindex, _onbit, _monindex, _monbit)
+ DEF_MOD_BASE(_name, _mstop, _parent, false, true, _onindex, _onbit, _monindex, _monbit, -1)
+
+#define DEF_MOD_MUX_EXTERNAL(_name, _parent, _onindex, _onbit, _monindex, _monbit, _mstop, \
+ _ext_clk_mux_index) \
+ DEF_MOD_BASE(_name, _mstop, _parent, false, false, _onindex, _onbit, _monindex, _monbit, \
+ _ext_clk_mux_index)
/**
* struct rzv2h_reset - Reset definitions
diff --git a/drivers/clk/rockchip/Kconfig b/drivers/clk/rockchip/Kconfig
index febb7944f34b..5cf1e0fd6fb3 100644
--- a/drivers/clk/rockchip/Kconfig
+++ b/drivers/clk/rockchip/Kconfig
@@ -30,6 +30,13 @@ config CLK_RV1126
help
Build the driver for RV1126 Clock Driver.
+config CLK_RV1126B
+ bool "Rockchip RV1126B clock controller support"
+ depends on ARM64 || COMPILE_TEST
+ default y
+ help
+ Build the driver for RV1126B Clock Driver.
+
config CLK_RK3036
bool "Rockchip RK3036 clock controller support"
depends on ARM || COMPILE_TEST
@@ -93,6 +100,13 @@ config CLK_RK3399
help
Build the driver for RK3399 Clock Driver.
+config CLK_RK3506
+ bool "Rockchip RK3506 clock controller support"
+ depends on ARM || COMPILE_TEST
+ default y
+ help
+ Build the driver for RK3506 Clock Driver.
+
config CLK_RK3528
bool "Rockchip RK3528 clock controller support"
depends on ARM64 || COMPILE_TEST
diff --git a/drivers/clk/rockchip/Makefile b/drivers/clk/rockchip/Makefile
index c281a9738d9f..4d8cbb2044c7 100644
--- a/drivers/clk/rockchip/Makefile
+++ b/drivers/clk/rockchip/Makefile
@@ -20,6 +20,7 @@ clk-rockchip-$(CONFIG_RESET_CONTROLLER) += softrst.o
obj-$(CONFIG_CLK_PX30) += clk-px30.o
obj-$(CONFIG_CLK_RV110X) += clk-rv1108.o
obj-$(CONFIG_CLK_RV1126) += clk-rv1126.o
+obj-$(CONFIG_CLK_RV1126B) += clk-rv1126b.o rst-rv1126b.o
obj-$(CONFIG_CLK_RK3036) += clk-rk3036.o
obj-$(CONFIG_CLK_RK312X) += clk-rk3128.o
obj-$(CONFIG_CLK_RK3188) += clk-rk3188.o
@@ -29,6 +30,7 @@ obj-$(CONFIG_CLK_RK3308) += clk-rk3308.o
obj-$(CONFIG_CLK_RK3328) += clk-rk3328.o
obj-$(CONFIG_CLK_RK3368) += clk-rk3368.o
obj-$(CONFIG_CLK_RK3399) += clk-rk3399.o
+obj-$(CONFIG_CLK_RK3506) += clk-rk3506.o rst-rk3506.o
obj-$(CONFIG_CLK_RK3528) += clk-rk3528.o rst-rk3528.o
obj-$(CONFIG_CLK_RK3562) += clk-rk3562.o rst-rk3562.o
obj-$(CONFIG_CLK_RK3568) += clk-rk3568.o
diff --git a/drivers/clk/rockchip/clk-cpu.c b/drivers/clk/rockchip/clk-cpu.c
index 398a226ad34e..6e91a3041a03 100644
--- a/drivers/clk/rockchip/clk-cpu.c
+++ b/drivers/clk/rockchip/clk-cpu.c
@@ -16,14 +16,14 @@
* of the SoC or supplied after the SoC characterization.
*
* The below implementation of the CPU clock allows the rate changes of the CPU
- * clock and the corresponding rate changes of the auxillary clocks of the CPU
+ * clock and the corresponding rate changes of the auxiliary clocks of the CPU
* domain. The platform clock driver provides a clock register configuration
* for each configurable rate which is then used to program the clock hardware
- * registers to acheive a fast co-oridinated rate change for all the CPU domain
+ * registers to achieve a fast co-oridinated rate change for all the CPU domain
* clocks.
*
* On a rate change request for the CPU clock, the rate change is propagated
- * upto the PLL supplying the clock to the CPU domain clock blocks. While the
+ * up to the PLL supplying the clock to the CPU domain clock blocks. While the
* CPU domain PLL is reconfigured, the CPU domain clocks are driven using an
* alternate clock source. If required, the alternate clock source is divided
* down in order to keep the output clock rate within the previous OPP limits.
@@ -396,3 +396,168 @@ free_cpuclk:
kfree(cpuclk);
return ERR_PTR(ret);
}
+
+static int rockchip_cpuclk_multi_pll_pre_rate_change(struct rockchip_cpuclk *cpuclk,
+ struct clk_notifier_data *ndata)
+{
+ unsigned long new_rate = roundup(ndata->new_rate, 1000);
+ const struct rockchip_cpuclk_rate_table *rate;
+ unsigned long flags;
+
+ rate = rockchip_get_cpuclk_settings(cpuclk, new_rate);
+ if (!rate) {
+ pr_err("%s: Invalid rate : %lu for cpuclk\n",
+ __func__, new_rate);
+ return -EINVAL;
+ }
+
+ if (new_rate > ndata->old_rate) {
+ spin_lock_irqsave(cpuclk->lock, flags);
+ rockchip_cpuclk_set_dividers(cpuclk, rate);
+ spin_unlock_irqrestore(cpuclk->lock, flags);
+ }
+
+ return 0;
+}
+
+static int rockchip_cpuclk_multi_pll_post_rate_change(struct rockchip_cpuclk *cpuclk,
+ struct clk_notifier_data *ndata)
+{
+ unsigned long new_rate = roundup(ndata->new_rate, 1000);
+ const struct rockchip_cpuclk_rate_table *rate;
+ unsigned long flags;
+
+ rate = rockchip_get_cpuclk_settings(cpuclk, new_rate);
+ if (!rate) {
+ pr_err("%s: Invalid rate : %lu for cpuclk\n",
+ __func__, new_rate);
+ return -EINVAL;
+ }
+
+ if (new_rate < ndata->old_rate) {
+ spin_lock_irqsave(cpuclk->lock, flags);
+ rockchip_cpuclk_set_dividers(cpuclk, rate);
+ spin_unlock_irqrestore(cpuclk->lock, flags);
+ }
+
+ return 0;
+}
+
+static int rockchip_cpuclk_multi_pll_notifier_cb(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct clk_notifier_data *ndata = data;
+ struct rockchip_cpuclk *cpuclk = to_rockchip_cpuclk_nb(nb);
+ int ret = 0;
+
+ pr_debug("%s: event %lu, old_rate %lu, new_rate: %lu\n",
+ __func__, event, ndata->old_rate, ndata->new_rate);
+ if (event == PRE_RATE_CHANGE)
+ ret = rockchip_cpuclk_multi_pll_pre_rate_change(cpuclk, ndata);
+ else if (event == POST_RATE_CHANGE)
+ ret = rockchip_cpuclk_multi_pll_post_rate_change(cpuclk, ndata);
+
+ return notifier_from_errno(ret);
+}
+
+struct clk *rockchip_clk_register_cpuclk_multi_pll(const char *name,
+ const char *const *parent_names,
+ u8 num_parents, void __iomem *base,
+ int muxdiv_offset, u8 mux_shift,
+ u8 mux_width, u8 mux_flags,
+ int div_offset, u8 div_shift,
+ u8 div_width, u8 div_flags,
+ unsigned long flags, spinlock_t *lock,
+ const struct rockchip_cpuclk_rate_table *rates,
+ int nrates)
+{
+ struct rockchip_cpuclk *cpuclk;
+ struct clk_hw *hw;
+ struct clk_mux *mux = NULL;
+ struct clk_divider *div = NULL;
+ const struct clk_ops *mux_ops = NULL, *div_ops = NULL;
+ int ret;
+
+ if (num_parents > 1) {
+ mux = kzalloc(sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+ return ERR_PTR(-ENOMEM);
+
+ mux->reg = base + muxdiv_offset;
+ mux->shift = mux_shift;
+ mux->mask = BIT(mux_width) - 1;
+ mux->flags = mux_flags;
+ mux->lock = lock;
+ mux_ops = (mux_flags & CLK_MUX_READ_ONLY) ? &clk_mux_ro_ops
+ : &clk_mux_ops;
+ }
+
+ if (div_width > 0) {
+ div = kzalloc(sizeof(*div), GFP_KERNEL);
+ if (!div) {
+ ret = -ENOMEM;
+ goto free_mux;
+ }
+
+ div->flags = div_flags;
+ if (div_offset)
+ div->reg = base + div_offset;
+ else
+ div->reg = base + muxdiv_offset;
+ div->shift = div_shift;
+ div->width = div_width;
+ div->lock = lock;
+ div_ops = (div_flags & CLK_DIVIDER_READ_ONLY)
+ ? &clk_divider_ro_ops
+ : &clk_divider_ops;
+ }
+
+ hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
+ mux ? &mux->hw : NULL, mux_ops,
+ div ? &div->hw : NULL, div_ops,
+ NULL, NULL, flags);
+ if (IS_ERR(hw)) {
+ ret = PTR_ERR(hw);
+ goto free_div;
+ }
+
+ cpuclk = kzalloc(sizeof(*cpuclk), GFP_KERNEL);
+ if (!cpuclk) {
+ ret = -ENOMEM;
+ goto unregister_clk;
+ }
+
+ cpuclk->reg_base = base;
+ cpuclk->lock = lock;
+ cpuclk->clk_nb.notifier_call = rockchip_cpuclk_multi_pll_notifier_cb;
+ ret = clk_notifier_register(hw->clk, &cpuclk->clk_nb);
+ if (ret) {
+ pr_err("%s: failed to register clock notifier for %s\n",
+ __func__, name);
+ goto free_cpuclk;
+ }
+
+ if (nrates > 0) {
+ cpuclk->rate_count = nrates;
+ cpuclk->rate_table = kmemdup(rates,
+ sizeof(*rates) * nrates,
+ GFP_KERNEL);
+ if (!cpuclk->rate_table) {
+ ret = -ENOMEM;
+ goto free_cpuclk;
+ }
+ }
+
+ return hw->clk;
+
+free_cpuclk:
+ kfree(cpuclk);
+unregister_clk:
+ clk_hw_unregister_composite(hw);
+free_div:
+ kfree(div);
+free_mux:
+ kfree(mux);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/clk/rockchip/clk-ddr.c b/drivers/clk/rockchip/clk-ddr.c
index 86718c54e56b..8866a65982a0 100644
--- a/drivers/clk/rockchip/clk-ddr.c
+++ b/drivers/clk/rockchip/clk-ddr.c
@@ -55,17 +55,18 @@ rockchip_ddrclk_sip_recalc_rate(struct clk_hw *hw,
return res.a0;
}
-static long rockchip_ddrclk_sip_round_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long *prate)
+static int rockchip_ddrclk_sip_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct arm_smccc_res res;
- arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, rate, 0,
+ arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, req->rate, 0,
ROCKCHIP_SIP_CONFIG_DRAM_ROUND_RATE,
0, 0, 0, 0, &res);
- return res.a0;
+ req->rate = res.a0;
+
+ return 0;
}
static u8 rockchip_ddrclk_get_parent(struct clk_hw *hw)
@@ -83,7 +84,7 @@ static u8 rockchip_ddrclk_get_parent(struct clk_hw *hw)
static const struct clk_ops rockchip_ddrclk_sip_ops = {
.recalc_rate = rockchip_ddrclk_sip_recalc_rate,
.set_rate = rockchip_ddrclk_sip_set_rate,
- .round_rate = rockchip_ddrclk_sip_round_rate,
+ .determine_rate = rockchip_ddrclk_sip_determine_rate,
.get_parent = rockchip_ddrclk_get_parent,
};
diff --git a/drivers/clk/rockchip/clk-half-divider.c b/drivers/clk/rockchip/clk-half-divider.c
index 64f7faad2148..fbc018e8afa4 100644
--- a/drivers/clk/rockchip/clk-half-divider.c
+++ b/drivers/clk/rockchip/clk-half-divider.c
@@ -92,17 +92,19 @@ static int clk_half_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
return bestdiv;
}
-static long clk_half_divider_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_half_divider_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_divider *divider = to_clk_divider(hw);
int div;
- div = clk_half_divider_bestdiv(hw, rate, prate,
+ div = clk_half_divider_bestdiv(hw, req->rate, &req->best_parent_rate,
divider->width,
divider->flags);
- return DIV_ROUND_UP_ULL(((u64)*prate * 2), div * 2 + 3);
+ req->rate = DIV_ROUND_UP_ULL(((u64)req->best_parent_rate * 2), div * 2 + 3);
+
+ return 0;
}
static int clk_half_divider_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -141,7 +143,7 @@ static int clk_half_divider_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops clk_half_divider_ops = {
.recalc_rate = clk_half_divider_recalc_rate,
- .round_rate = clk_half_divider_round_rate,
+ .determine_rate = clk_half_divider_determine_rate,
.set_rate = clk_half_divider_set_rate,
};
diff --git a/drivers/clk/rockchip/clk-mmc-phase.c b/drivers/clk/rockchip/clk-mmc-phase.c
index b3ed8e7523e5..8b1292c56863 100644
--- a/drivers/clk/rockchip/clk-mmc-phase.c
+++ b/drivers/clk/rockchip/clk-mmc-phase.c
@@ -174,11 +174,11 @@ static int rockchip_mmc_clk_rate_notify(struct notifier_block *nb,
/*
* rockchip_mmc_clk is mostly used by mmc controllers to sample
- * the intput data, which expects the fixed phase after the tuning
+ * the input data, which expects the fixed phase after the tuning
* process. However if the clock rate is changed, the phase is stale
* and may break the data sampling. So here we try to restore the phase
* for that case, except that
- * (1) cached_phase is invaild since we inevitably cached it when the
+ * (1) cached_phase is invalid since we inevitably cached it when the
* clock provider be reparented from orphan to its real parent in the
* first place. Otherwise we may mess up the initialization of MMC cards
* since we only set the default sample phase and drive phase later on.
diff --git a/drivers/clk/rockchip/clk-pll.c b/drivers/clk/rockchip/clk-pll.c
index af74439a7457..86dba3826a77 100644
--- a/drivers/clk/rockchip/clk-pll.c
+++ b/drivers/clk/rockchip/clk-pll.c
@@ -61,21 +61,26 @@ static const struct rockchip_pll_rate_table *rockchip_get_pll_settings(
return NULL;
}
-static long rockchip_pll_round_rate(struct clk_hw *hw,
- unsigned long drate, unsigned long *prate)
+static int rockchip_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
const struct rockchip_pll_rate_table *rate_table = pll->rate_table;
int i;
- /* Assumming rate_table is in descending order */
+ /* Assuming rate_table is in descending order */
for (i = 0; i < pll->rate_count; i++) {
- if (drate >= rate_table[i].rate)
- return rate_table[i].rate;
+ if (req->rate >= rate_table[i].rate) {
+ req->rate = rate_table[i].rate;
+
+ return 0;
+ }
}
/* return minimum supported value */
- return rate_table[i - 1].rate;
+ req->rate = rate_table[i - 1].rate;
+
+ return 0;
}
/*
@@ -352,7 +357,7 @@ static const struct clk_ops rockchip_rk3036_pll_clk_norate_ops = {
static const struct clk_ops rockchip_rk3036_pll_clk_ops = {
.recalc_rate = rockchip_rk3036_pll_recalc_rate,
- .round_rate = rockchip_pll_round_rate,
+ .determine_rate = rockchip_pll_determine_rate,
.set_rate = rockchip_rk3036_pll_set_rate,
.enable = rockchip_rk3036_pll_enable,
.disable = rockchip_rk3036_pll_disable,
@@ -571,7 +576,7 @@ static const struct clk_ops rockchip_rk3066_pll_clk_norate_ops = {
static const struct clk_ops rockchip_rk3066_pll_clk_ops = {
.recalc_rate = rockchip_rk3066_pll_recalc_rate,
- .round_rate = rockchip_pll_round_rate,
+ .determine_rate = rockchip_pll_determine_rate,
.set_rate = rockchip_rk3066_pll_set_rate,
.enable = rockchip_rk3066_pll_enable,
.disable = rockchip_rk3066_pll_disable,
@@ -836,7 +841,7 @@ static const struct clk_ops rockchip_rk3399_pll_clk_norate_ops = {
static const struct clk_ops rockchip_rk3399_pll_clk_ops = {
.recalc_rate = rockchip_rk3399_pll_recalc_rate,
- .round_rate = rockchip_pll_round_rate,
+ .determine_rate = rockchip_pll_determine_rate,
.set_rate = rockchip_rk3399_pll_set_rate,
.enable = rockchip_rk3399_pll_enable,
.disable = rockchip_rk3399_pll_disable,
@@ -1036,7 +1041,7 @@ static const struct clk_ops rockchip_rk3588_pll_clk_norate_ops = {
static const struct clk_ops rockchip_rk3588_pll_clk_ops = {
.recalc_rate = rockchip_rk3588_pll_recalc_rate,
- .round_rate = rockchip_pll_round_rate,
+ .determine_rate = rockchip_pll_determine_rate,
.set_rate = rockchip_rk3588_pll_set_rate,
.enable = rockchip_rk3588_pll_enable,
.disable = rockchip_rk3588_pll_disable,
diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c
index 0a1e017df7c6..9cf3e1e43b78 100644
--- a/drivers/clk/rockchip/clk-rk3288.c
+++ b/drivers/clk/rockchip/clk-rk3288.c
@@ -871,7 +871,7 @@ static const int rk3288_saved_cru_reg_ids[] = {
static u32 rk3288_saved_cru_regs[ARRAY_SIZE(rk3288_saved_cru_reg_ids)];
-static int rk3288_clk_suspend(void)
+static int rk3288_clk_suspend(void *data)
{
int i, reg_id;
@@ -906,7 +906,7 @@ static int rk3288_clk_suspend(void)
return 0;
}
-static void rk3288_clk_resume(void)
+static void rk3288_clk_resume(void *data)
{
int i, reg_id;
@@ -923,11 +923,15 @@ static void rk3288_clk_shutdown(void)
writel_relaxed(0xf3030000, rk3288_cru_base + RK3288_MODE_CON);
}
-static struct syscore_ops rk3288_clk_syscore_ops = {
+static const struct syscore_ops rk3288_clk_syscore_ops = {
.suspend = rk3288_clk_suspend,
.resume = rk3288_clk_resume,
};
+static struct syscore rk3288_clk_syscore = {
+ .ops = &rk3288_clk_syscore_ops,
+};
+
static void __init rk3288_common_init(struct device_node *np,
enum rk3288_variant soc)
{
@@ -976,7 +980,7 @@ static void __init rk3288_common_init(struct device_node *np,
rockchip_register_restart_notifier(ctx, RK3288_GLB_SRST_FST,
rk3288_clk_shutdown);
- register_syscore_ops(&rk3288_clk_syscore_ops);
+ register_syscore(&rk3288_clk_syscore);
rockchip_clk_of_add_provider(np, ctx);
}
diff --git a/drivers/clk/rockchip/clk-rk3368.c b/drivers/clk/rockchip/clk-rk3368.c
index 04391e4e2874..95e6996adbae 100644
--- a/drivers/clk/rockchip/clk-rk3368.c
+++ b/drivers/clk/rockchip/clk-rk3368.c
@@ -526,7 +526,7 @@ static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
GATE(ACLK_PERI, "aclk_peri", "aclk_peri_src", CLK_IGNORE_UNUSED,
RK3368_CLKGATE_CON(3), 1, GFLAGS),
- GATE(0, "sclk_mipidsi_24m", "xin24m", 0, RK3368_CLKGATE_CON(4), 14, GFLAGS),
+ GATE(SCLK_MIPIDSI_24M, "sclk_mipidsi_24m", "xin24m", 0, RK3368_CLKGATE_CON(4), 14, GFLAGS),
/*
* Clock-Architecture Diagram 4
diff --git a/drivers/clk/rockchip/clk-rk3506.c b/drivers/clk/rockchip/clk-rk3506.c
new file mode 100644
index 000000000000..dd59bd60382e
--- /dev/null
+++ b/drivers/clk/rockchip/clk-rk3506.c
@@ -0,0 +1,869 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2023-2025 Rockchip Electronics Co., Ltd.
+ * Author: Finley Xiao <finley.xiao@rock-chips.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/syscore_ops.h>
+#include <dt-bindings/clock/rockchip,rk3506-cru.h>
+#include "clk.h"
+
+#define PVTPLL_SRC_SEL_PVTPLL (BIT(7) | BIT(23))
+
+enum rk3506_plls {
+ gpll, v0pll, v1pll,
+};
+
+/*
+ * [FRAC PLL]: GPLL, V0PLL, V1PLL
+ * - VCO Frequency: 950MHz to 3800MHZ
+ * - Output Frequency: 19MHz to 3800MHZ
+ * - refdiv: 1 to 63 (Int Mode), 1 to 2 (Frac Mode)
+ * - fbdiv: 16 to 3800 (Int Mode), 20 to 380 (Frac Mode)
+ * - post1div: 1 to 7
+ * - post2div: 1 to 7
+ */
+static struct rockchip_pll_rate_table rk3506_pll_rates[] = {
+ /* _mhz, _refdiv, _fbdiv, _postdiv1, _postdiv2, _dsmpd, _frac */
+ RK3036_PLL_RATE(1896000000, 1, 79, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1800000000, 1, 75, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1704000000, 1, 71, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1608000000, 1, 67, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1512000000, 1, 63, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1416000000, 1, 59, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1350000000, 4, 225, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1296000000, 1, 54, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1200000000, 1, 50, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1188000000, 1, 99, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1179648000, 1, 49, 1, 1, 0, 2550137),
+ RK3036_PLL_RATE(1008000000, 1, 84, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1000000000, 3, 125, 1, 1, 1, 0),
+ RK3036_PLL_RATE(993484800, 1, 41, 1, 1, 0, 6630355),
+ RK3036_PLL_RATE(983040000, 1, 40, 1, 1, 0, 16106127),
+ RK3036_PLL_RATE(960000000, 1, 80, 2, 1, 1, 0),
+ RK3036_PLL_RATE(912000000, 1, 76, 2, 1, 1, 0),
+ RK3036_PLL_RATE(903168000, 1, 75, 2, 1, 0, 4429185),
+ RK3036_PLL_RATE(816000000, 1, 68, 2, 1, 1, 0),
+ RK3036_PLL_RATE(800000000, 3, 200, 2, 1, 1, 0),
+ RK3036_PLL_RATE(600000000, 1, 50, 2, 1, 1, 0),
+ RK3036_PLL_RATE(594000000, 2, 99, 2, 1, 1, 0),
+ RK3036_PLL_RATE(408000000, 1, 68, 2, 2, 1, 0),
+ RK3036_PLL_RATE(312000000, 1, 78, 6, 1, 1, 0),
+ RK3036_PLL_RATE(216000000, 1, 72, 4, 2, 1, 0),
+ RK3036_PLL_RATE(96000000, 1, 48, 6, 2, 1, 0),
+ { /* sentinel */ },
+};
+
+#define RK3506_DIV_ACLK_CORE_MASK 0xf
+#define RK3506_DIV_ACLK_CORE_SHIFT 9
+#define RK3506_DIV_PCLK_CORE_MASK 0xf
+#define RK3506_DIV_PCLK_CORE_SHIFT 0
+
+#define RK3506_CLKSEL15(_aclk_core_div) \
+{ \
+ .reg = RK3506_CLKSEL_CON(15), \
+ .val = HIWORD_UPDATE(_aclk_core_div, RK3506_DIV_ACLK_CORE_MASK, \
+ RK3506_DIV_ACLK_CORE_SHIFT), \
+}
+
+#define RK3506_CLKSEL16(_pclk_core_div) \
+{ \
+ .reg = RK3506_CLKSEL_CON(16), \
+ .val = HIWORD_UPDATE(_pclk_core_div, RK3506_DIV_PCLK_CORE_MASK, \
+ RK3506_DIV_PCLK_CORE_SHIFT), \
+}
+
+/* SIGN-OFF: aclk_core: 500M, pclk_core: 125M, */
+#define RK3506_CPUCLK_RATE(_prate, _aclk_core_div, _pclk_core_div) \
+{ \
+ .prate = _prate, \
+ .divs = { \
+ RK3506_CLKSEL15(_aclk_core_div), \
+ RK3506_CLKSEL16(_pclk_core_div), \
+ }, \
+}
+
+static struct rockchip_cpuclk_rate_table rk3506_cpuclk_rates[] __initdata = {
+ RK3506_CPUCLK_RATE(1608000000, 3, 12),
+ RK3506_CPUCLK_RATE(1512000000, 3, 12),
+ RK3506_CPUCLK_RATE(1416000000, 2, 11),
+ RK3506_CPUCLK_RATE(1296000000, 2, 10),
+ RK3506_CPUCLK_RATE(1200000000, 2, 9),
+ RK3506_CPUCLK_RATE(1179648000, 2, 9),
+ RK3506_CPUCLK_RATE(1008000000, 1, 7),
+ RK3506_CPUCLK_RATE(903168000, 1, 7),
+ RK3506_CPUCLK_RATE(800000000, 1, 6),
+ RK3506_CPUCLK_RATE(750000000, 1, 5),
+ RK3506_CPUCLK_RATE(589824000, 1, 4),
+ RK3506_CPUCLK_RATE(400000000, 1, 3),
+ RK3506_CPUCLK_RATE(200000000, 1, 1),
+};
+
+PNAME(mux_pll_p) = { "xin24m" };
+PNAME(gpll_v0pll_v1pll_parents_p) = { "gpll", "v0pll", "v1pll" };
+PNAME(gpll_v0pll_v1pll_g_parents_p) = { "clk_gpll_gate", "clk_v0pll_gate", "clk_v1pll_gate" };
+PNAME(gpll_v0pll_v1pll_div_parents_p) = { "clk_gpll_div", "clk_v0pll_div", "clk_v1pll_div" };
+PNAME(xin24m_gpll_v0pll_v1pll_g_parents_p) = { "xin24m", "clk_gpll_gate", "clk_v0pll_gate", "clk_v1pll_gate" };
+PNAME(xin24m_g_gpll_v0pll_v1pll_g_parents_p) = { "xin24m_gate", "clk_gpll_gate", "clk_v0pll_gate", "clk_v1pll_gate" };
+PNAME(xin24m_g_gpll_v0pll_v1pll_div_parents_p) = { "xin24m_gate", "clk_gpll_div", "clk_v0pll_div", "clk_v1pll_div" };
+PNAME(xin24m_400k_32k_parents_p) = { "xin24m", "clk_rc", "clk_32k" };
+PNAME(clk_frac_uart_matrix0_mux_parents_p) = { "xin24m", "gpll", "clk_v0pll_gate", "clk_v1pll_gate" };
+PNAME(clk_timer0_parents_p) = { "xin24m", "clk_gpll_div_100m", "clk_32k", "clk_core_pvtpll", "sai0_mclk_in", "sai0_sclk_in" };
+PNAME(clk_timer1_parents_p) = { "xin24m", "clk_gpll_div_100m", "clk_32k", "clk_core_pvtpll", "sai1_mclk_in", "sai1_sclk_in" };
+PNAME(clk_timer2_parents_p) = { "xin24m", "clk_gpll_div_100m", "clk_32k", "clk_core_pvtpll", "sai2_mclk_in", "sai2_sclk_in" };
+PNAME(clk_timer3_parents_p) = { "xin24m", "clk_gpll_div_100m", "clk_32k", "clk_core_pvtpll", "sai3_mclk_in", "sai3_sclk_in" };
+PNAME(clk_timer4_parents_p) = { "xin24m", "clk_gpll_div_100m", "clk_32k", "clk_core_pvtpll", "mclk_asrc0" };
+PNAME(clk_timer5_parents_p) = { "xin24m", "clk_gpll_div_100m", "clk_32k", "clk_core_pvtpll", "mclk_asrc1" };
+PNAME(sclk_uart_parents_p) = { "xin24m", "clk_gpll_gate", "clk_v0pll_gate", "clk_frac_uart_matrix0", "clk_frac_uart_matrix1",
+ "clk_frac_common_matrix0", "clk_frac_common_matrix1", "clk_frac_common_matrix2" };
+PNAME(clk_mac_ptp_root_parents_p) = { "gpll", "v0pll", "v1pll" };
+PNAME(clk_pwm_parents_p) = { "clk_rc", "sai0_mclk_in", "sai1_mclk_in", "sai2_mclk_in", "sai3_mclk_in", "sai0_sclk_in", "sai1_sclk_in",
+ "sai2_sclk_in", "sai3_sclk_in", "mclk_asrc0", "mclk_asrc1" };
+PNAME(clk_can_parents_p) = { "xin24m", "gpll", "clk_v0pll_gate", "clk_v1pll_gate", "clk_frac_voice_matrix1",
+ "clk_frac_common_matrix0", "clk_frac_common_matrix1", "clk_frac_common_matrix2" };
+PNAME(clk_pdm_parents_p) = { "xin24m_gate", "clk_int_voice_matrix0", "clk_int_voice_matrix1", "clk_int_voice_matrix2",
+ "clk_frac_voice_matrix0", "clk_frac_voice_matrix1", "clk_frac_common_matrix0", "clk_frac_common_matrix1",
+ "clk_frac_common_matrix2", "sai0_mclk_in", "sai1_mclk_in", "sai2_mclk_in", "sai3_mclk_in", "clk_gpll_div" };
+PNAME(mclk_sai_asrc_parents_p) = { "xin24m_gate", "clk_int_voice_matrix0", "clk_int_voice_matrix1", "clk_int_voice_matrix2",
+ "clk_frac_voice_matrix0", "clk_frac_voice_matrix1", "clk_frac_common_matrix0", "clk_frac_common_matrix1",
+ "clk_frac_common_matrix2", "sai0_mclk_in", "sai1_mclk_in", "sai2_mclk_in", "sai3_mclk_in" };
+PNAME(lrck_asrc_parents_p) = { "mclk_asrc0", "mclk_asrc1", "mclk_asrc2", "mclk_asrc3", "mclk_spdiftx", "clk_spdifrx_to_asrc", "clkout_pdm",
+ "sai0_fs", "sai1_fs", "sai2_fs", "sai3_fs", "sai4_fs" };
+PNAME(cclk_src_sdmmc_parents_p) = { "xin24m_gate", "gpll", "clk_v0pll_gate", "clk_v1pll_gate" };
+PNAME(dclk_vop_parents_p) = { "xin24m_gate", "clk_gpll_gate", "clk_v0pll_gate", "clk_v1pll_gate", "dummy_vop_dclk",
+ "dummy_vop_dclk", "dummy_vop_dclk", "dummy_vop_dclk" };
+PNAME(dbclk_gpio0_parents_p) = { "xin24m", "clk_rc", "clk_32k_pmu" };
+PNAME(clk_pmu_hp_timer_parents_p) = { "xin24m", "gpll_div_100m", "clk_core_pvtpll" };
+PNAME(clk_ref_out_parents_p) = { "xin24m", "gpll", "v0pll", "v1pll" };
+PNAME(clk_32k_frac_parents_p) = { "xin24m", "v0pll", "v1pll", "clk_rc" };
+PNAME(clk_32k_parents_p) = { "xin32k", "clk_32k_rc", "clk_32k_frac" };
+PNAME(clk_ref_phy_pmu_mux_parents_p) = { "xin24m", "clk_ref_phy_pll" };
+PNAME(clk_vpll_ref_parents_p) = { "xin24m", "clk_pll_ref_io" };
+PNAME(mux_armclk_p) = { "armclk_pll", "clk_core_pvtpll" };
+
+#define MFLAGS CLK_MUX_HIWORD_MASK
+#define DFLAGS CLK_DIVIDER_HIWORD_MASK
+#define GFLAGS (CLK_GATE_HIWORD_MASK | CLK_GATE_SET_TO_DISABLE)
+
+static struct rockchip_pll_clock rk3506_pll_clks[] __initdata = {
+ [gpll] = PLL(pll_rk3328, PLL_GPLL, "gpll", mux_pll_p,
+ CLK_IS_CRITICAL, RK3506_PLL_CON(0),
+ RK3506_MODE_CON, 0, 2, 0, rk3506_pll_rates),
+ [v0pll] = PLL(pll_rk3328, PLL_V0PLL, "v0pll", mux_pll_p,
+ CLK_IS_CRITICAL, RK3506_PLL_CON(8),
+ RK3506_MODE_CON, 2, 0, 0, rk3506_pll_rates),
+ [v1pll] = PLL(pll_rk3328, PLL_V1PLL, "v1pll", mux_pll_p,
+ CLK_IS_CRITICAL, RK3506_PLL_CON(16),
+ RK3506_MODE_CON, 4, 1, 0, rk3506_pll_rates),
+};
+
+static struct rockchip_clk_branch rk3506_armclk __initdata =
+ MUX(ARMCLK, "armclk", mux_armclk_p, CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+ RK3506_CLKSEL_CON(15), 8, 1, MFLAGS);
+
+static struct rockchip_clk_branch rk3506_clk_branches[] __initdata = {
+ /*
+ * CRU Clock-Architecture
+ */
+ /* top */
+ GATE(XIN24M_GATE, "xin24m_gate", "xin24m", CLK_IS_CRITICAL,
+ RK3506_CLKGATE_CON(0), 1, GFLAGS),
+ GATE(CLK_GPLL_GATE, "clk_gpll_gate", "gpll", CLK_IS_CRITICAL,
+ RK3506_CLKGATE_CON(0), 2, GFLAGS),
+ GATE(CLK_V0PLL_GATE, "clk_v0pll_gate", "v0pll", CLK_IS_CRITICAL,
+ RK3506_CLKGATE_CON(0), 3, GFLAGS),
+ GATE(CLK_V1PLL_GATE, "clk_v1pll_gate", "v1pll", 0,
+ RK3506_CLKGATE_CON(0), 4, GFLAGS),
+ COMPOSITE_NOMUX(CLK_GPLL_DIV, "clk_gpll_div", "clk_gpll_gate", CLK_IS_CRITICAL,
+ RK3506_CLKSEL_CON(0), 6, 4, DFLAGS,
+ RK3506_CLKGATE_CON(0), 5, GFLAGS),
+ COMPOSITE_NOMUX(CLK_GPLL_DIV_100M, "clk_gpll_div_100m", "clk_gpll_div", 0,
+ RK3506_CLKSEL_CON(0), 10, 4, DFLAGS,
+ RK3506_CLKGATE_CON(0), 6, GFLAGS),
+ COMPOSITE_NOMUX(CLK_V0PLL_DIV, "clk_v0pll_div", "clk_v0pll_gate", CLK_IS_CRITICAL,
+ RK3506_CLKSEL_CON(1), 0, 4, DFLAGS,
+ RK3506_CLKGATE_CON(0), 7, GFLAGS),
+ COMPOSITE_NOMUX(CLK_V1PLL_DIV, "clk_v1pll_div", "clk_v1pll_gate", 0,
+ RK3506_CLKSEL_CON(1), 4, 4, DFLAGS,
+ RK3506_CLKGATE_CON(0), 8, GFLAGS),
+ COMPOSITE_NOMUX(CLK_INT_VOICE_MATRIX0, "clk_int_voice_matrix0", "clk_v0pll_gate", 0,
+ RK3506_CLKSEL_CON(1), 8, 5, DFLAGS,
+ RK3506_CLKGATE_CON(0), 9, GFLAGS),
+ COMPOSITE_NOMUX(CLK_INT_VOICE_MATRIX1, "clk_int_voice_matrix1", "clk_v1pll_gate", 0,
+ RK3506_CLKSEL_CON(2), 0, 5, DFLAGS,
+ RK3506_CLKGATE_CON(0), 10, GFLAGS),
+ COMPOSITE_NOMUX(CLK_INT_VOICE_MATRIX2, "clk_int_voice_matrix2", "clk_v0pll_gate", 0,
+ RK3506_CLKSEL_CON(2), 5, 5, DFLAGS,
+ RK3506_CLKGATE_CON(0), 11, GFLAGS),
+ MUX(CLK_FRAC_UART_MATRIX0_MUX, "clk_frac_uart_matrix0_mux", clk_frac_uart_matrix0_mux_parents_p, 0,
+ RK3506_CLKSEL_CON(3), 9, 2, MFLAGS),
+ MUX(CLK_FRAC_UART_MATRIX1_MUX, "clk_frac_uart_matrix1_mux", xin24m_gpll_v0pll_v1pll_g_parents_p, 0,
+ RK3506_CLKSEL_CON(3), 11, 2, MFLAGS),
+ MUX(CLK_FRAC_VOICE_MATRIX0_MUX, "clk_frac_voice_matrix0_mux", xin24m_g_gpll_v0pll_v1pll_g_parents_p, 0,
+ RK3506_CLKSEL_CON(3), 13, 2, MFLAGS),
+ MUX(CLK_FRAC_VOICE_MATRIX1_MUX, "clk_frac_voice_matrix1_mux", xin24m_g_gpll_v0pll_v1pll_g_parents_p, 0,
+ RK3506_CLKSEL_CON(4), 0, 2, MFLAGS),
+ MUX(CLK_FRAC_COMMON_MATRIX0_MUX, "clk_frac_common_matrix0_mux", xin24m_gpll_v0pll_v1pll_g_parents_p, 0,
+ RK3506_CLKSEL_CON(4), 2, 2, MFLAGS),
+ MUX(CLK_FRAC_COMMON_MATRIX1_MUX, "clk_frac_common_matrix1_mux", xin24m_g_gpll_v0pll_v1pll_g_parents_p, 0,
+ RK3506_CLKSEL_CON(4), 4, 2, MFLAGS),
+ MUX(CLK_FRAC_COMMON_MATRIX2_MUX, "clk_frac_common_matrix2_mux", xin24m_g_gpll_v0pll_v1pll_g_parents_p, 0,
+ RK3506_CLKSEL_CON(4), 6, 2, MFLAGS),
+ COMPOSITE_FRAC(CLK_FRAC_UART_MATRIX0, "clk_frac_uart_matrix0", "clk_frac_uart_matrix0_mux", 0,
+ RK3506_CLKSEL_CON(5), 0,
+ RK3506_CLKGATE_CON(0), 13, GFLAGS),
+ COMPOSITE_FRAC(CLK_FRAC_UART_MATRIX1, "clk_frac_uart_matrix1", "clk_frac_uart_matrix1_mux", 0,
+ RK3506_CLKSEL_CON(6), 0,
+ RK3506_CLKGATE_CON(0), 14, GFLAGS),
+ COMPOSITE_FRAC(CLK_FRAC_VOICE_MATRIX0, "clk_frac_voice_matrix0", "clk_frac_voice_matrix0_mux", 0,
+ RK3506_CLKSEL_CON(7), 0,
+ RK3506_CLKGATE_CON(0), 15, GFLAGS),
+ COMPOSITE_FRAC(CLK_FRAC_VOICE_MATRIX1, "clk_frac_voice_matrix1", "clk_frac_voice_matrix1_mux", 0,
+ RK3506_CLKSEL_CON(9), 0,
+ RK3506_CLKGATE_CON(1), 0, GFLAGS),
+ COMPOSITE_FRAC(CLK_FRAC_COMMON_MATRIX0, "clk_frac_common_matrix0", "clk_frac_common_matrix0_mux", 0,
+ RK3506_CLKSEL_CON(11), 0,
+ RK3506_CLKGATE_CON(1), 1, GFLAGS),
+ COMPOSITE_FRAC(CLK_FRAC_COMMON_MATRIX1, "clk_frac_common_matrix1", "clk_frac_common_matrix1_mux", 0,
+ RK3506_CLKSEL_CON(12), 0,
+ RK3506_CLKGATE_CON(1), 2, GFLAGS),
+ COMPOSITE_FRAC(CLK_FRAC_COMMON_MATRIX2, "clk_frac_common_matrix2", "clk_frac_common_matrix2_mux", 0,
+ RK3506_CLKSEL_CON(13), 0,
+ RK3506_CLKGATE_CON(1), 3, GFLAGS),
+ GATE(CLK_REF_USBPHY_TOP, "clk_ref_usbphy_top", "xin24m", 0,
+ RK3506_CLKGATE_CON(1), 4, GFLAGS),
+ GATE(CLK_REF_DPHY_TOP, "clk_ref_dphy_top", "xin24m", 0,
+ RK3506_CLKGATE_CON(1), 5, GFLAGS),
+
+ /* core */
+ COMPOSITE_NOGATE(0, "armclk_pll", gpll_v0pll_v1pll_parents_p, CLK_IS_CRITICAL,
+ RK3506_CLKSEL_CON(15), 5, 2, MFLAGS, 0, 5, DFLAGS),
+ COMPOSITE_NOMUX(ACLK_CORE_ROOT, "aclk_core_root", "armclk", CLK_IGNORE_UNUSED,
+ RK3506_CLKSEL_CON(15), 9, 4, DFLAGS | CLK_DIVIDER_READ_ONLY,
+ RK3506_CLKGATE_CON(2), 11, GFLAGS),
+ COMPOSITE_NOMUX(PCLK_CORE_ROOT, "pclk_core_root", "armclk", CLK_IGNORE_UNUSED,
+ RK3506_CLKSEL_CON(16), 0, 4, DFLAGS | CLK_DIVIDER_READ_ONLY,
+ RK3506_CLKGATE_CON(2), 12, GFLAGS),
+ GATE(PCLK_DBG, "pclk_dbg", "pclk_core_root", CLK_IGNORE_UNUSED,
+ RK3506_CLKGATE_CON(3), 1, GFLAGS),
+ GATE(PCLK_CORE_GRF, "pclk_core_grf", "pclk_core_root", CLK_IGNORE_UNUSED,
+ RK3506_CLKGATE_CON(3), 4, GFLAGS),
+ GATE(PCLK_CORE_CRU, "pclk_core_cru", "pclk_core_root", CLK_IGNORE_UNUSED,
+ RK3506_CLKGATE_CON(3), 5, GFLAGS),
+ GATE(CLK_CORE_EMA_DETECT, "clk_core_ema_detect", "xin24m_gate", CLK_IGNORE_UNUSED,
+ RK3506_CLKGATE_CON(3), 6, GFLAGS),
+ GATE(PCLK_GPIO1, "pclk_gpio1", "aclk_core_root", 0,
+ RK3506_CLKGATE_CON(3), 8, GFLAGS),
+ GATE(DBCLK_GPIO1, "dbclk_gpio1", "xin24m_gate", 0,
+ RK3506_CLKGATE_CON(3), 9, GFLAGS),
+
+ /* core peri */
+ COMPOSITE(ACLK_CORE_PERI_ROOT, "aclk_core_peri_root", gpll_v0pll_v1pll_g_parents_p, 0,
+ RK3506_CLKSEL_CON(18), 5, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3506_CLKGATE_CON(4), 0, GFLAGS),
+ GATE(HCLK_CORE_PERI_ROOT, "hclk_core_peri_root", "aclk_core_peri_root", 0,
+ RK3506_CLKGATE_CON(4), 1, GFLAGS),
+ GATE(PCLK_CORE_PERI_ROOT, "pclk_core_peri_root", "aclk_core_peri_root", 0,
+ RK3506_CLKGATE_CON(4), 2, GFLAGS),
+ COMPOSITE(CLK_DSMC, "clk_dsmc", xin24m_gpll_v0pll_v1pll_g_parents_p, 0,
+ RK3506_CLKSEL_CON(18), 12, 2, MFLAGS, 7, 5, DFLAGS,
+ RK3506_CLKGATE_CON(4), 4, GFLAGS),
+ GATE(ACLK_DSMC, "aclk_dsmc", "aclk_core_peri_root", 0,
+ RK3506_CLKGATE_CON(4), 5, GFLAGS),
+ GATE(PCLK_DSMC, "pclk_dsmc", "pclk_core_peri_root", 0,
+ RK3506_CLKGATE_CON(4), 6, GFLAGS),
+ COMPOSITE(CLK_FLEXBUS_TX, "clk_flexbus_tx", xin24m_gpll_v0pll_v1pll_g_parents_p, 0,
+ RK3506_CLKSEL_CON(19), 5, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3506_CLKGATE_CON(4), 7, GFLAGS),
+ COMPOSITE(CLK_FLEXBUS_RX, "clk_flexbus_rx", xin24m_gpll_v0pll_v1pll_g_parents_p, 0,
+ RK3506_CLKSEL_CON(19), 12, 2, MFLAGS, 7, 5, DFLAGS,
+ RK3506_CLKGATE_CON(4), 8, GFLAGS),
+ GATE(ACLK_FLEXBUS, "aclk_flexbus", "aclk_core_peri_root", 0,
+ RK3506_CLKGATE_CON(4), 9, GFLAGS),
+ GATE(HCLK_FLEXBUS, "hclk_flexbus", "hclk_core_peri_root", 0,
+ RK3506_CLKGATE_CON(4), 10, GFLAGS),
+ GATE(ACLK_DSMC_SLV, "aclk_dsmc_slv", "aclk_core_peri_root", 0,
+ RK3506_CLKGATE_CON(4), 11, GFLAGS),
+ GATE(HCLK_DSMC_SLV, "hclk_dsmc_slv", "hclk_core_peri_root", 0,
+ RK3506_CLKGATE_CON(4), 12, GFLAGS),
+
+ /* bus */
+ COMPOSITE(ACLK_BUS_ROOT, "aclk_bus_root", gpll_v0pll_v1pll_div_parents_p, CLK_IS_CRITICAL,
+ RK3506_CLKSEL_CON(21), 5, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3506_CLKGATE_CON(5), 0, GFLAGS),
+ COMPOSITE(HCLK_BUS_ROOT, "hclk_bus_root", gpll_v0pll_v1pll_div_parents_p, CLK_IS_CRITICAL,
+ RK3506_CLKSEL_CON(21), 12, 2, MFLAGS, 7, 5, DFLAGS,
+ RK3506_CLKGATE_CON(5), 1, GFLAGS),
+ COMPOSITE(PCLK_BUS_ROOT, "pclk_bus_root", gpll_v0pll_v1pll_div_parents_p, CLK_IS_CRITICAL,
+ RK3506_CLKSEL_CON(22), 5, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3506_CLKGATE_CON(5), 2, GFLAGS),
+ GATE(ACLK_SYSRAM, "aclk_sysram", "aclk_bus_root", CLK_IGNORE_UNUSED,
+ RK3506_CLKGATE_CON(5), 6, GFLAGS),
+ GATE(HCLK_SYSRAM, "hclk_sysram", "aclk_bus_root", CLK_IGNORE_UNUSED,
+ RK3506_CLKGATE_CON(5), 7, GFLAGS),
+ GATE(ACLK_DMAC0, "aclk_dmac0", "aclk_bus_root", 0,
+ RK3506_CLKGATE_CON(5), 8, GFLAGS),
+ GATE(ACLK_DMAC1, "aclk_dmac1", "aclk_bus_root", 0,
+ RK3506_CLKGATE_CON(5), 9, GFLAGS),
+ GATE(HCLK_M0, "hclk_m0", "aclk_bus_root", 0,
+ RK3506_CLKGATE_CON(5), 10, GFLAGS),
+ GATE(ACLK_CRYPTO_NS, "aclk_crypto_ns", "aclk_bus_root", 0,
+ RK3506_CLKGATE_CON(5), 14, GFLAGS),
+ GATE(HCLK_CRYPTO_NS, "hclk_crypto_ns", "hclk_bus_root", 0,
+ RK3506_CLKGATE_CON(5), 15, GFLAGS),
+ GATE(HCLK_RNG, "hclk_rng", "hclk_bus_root", 0,
+ RK3506_CLKGATE_CON(6), 0, GFLAGS),
+ GATE(PCLK_BUS_GRF, "pclk_bus_grf", "pclk_bus_root", CLK_IGNORE_UNUSED,
+ RK3506_CLKGATE_CON(6), 1, GFLAGS),
+ GATE(PCLK_TIMER, "pclk_timer", "pclk_bus_root", 0,
+ RK3506_CLKGATE_CON(6), 2, GFLAGS),
+ COMPOSITE_NODIV(CLK_TIMER0_CH0, "clk_timer0_ch0", clk_timer0_parents_p, 0,
+ RK3506_CLKSEL_CON(22), 7, 3, MFLAGS,
+ RK3506_CLKGATE_CON(6), 3, GFLAGS),
+ COMPOSITE_NODIV(CLK_TIMER0_CH1, "clk_timer0_ch1", clk_timer1_parents_p, 0,
+ RK3506_CLKSEL_CON(22), 10, 3, MFLAGS,
+ RK3506_CLKGATE_CON(6), 4, GFLAGS),
+ COMPOSITE_NODIV(CLK_TIMER0_CH2, "clk_timer0_ch2", clk_timer2_parents_p, 0,
+ RK3506_CLKSEL_CON(22), 13, 3, MFLAGS,
+ RK3506_CLKGATE_CON(6), 5, GFLAGS),
+ COMPOSITE_NODIV(CLK_TIMER0_CH3, "clk_timer0_ch3", clk_timer3_parents_p, 0,
+ RK3506_CLKSEL_CON(23), 0, 3, MFLAGS,
+ RK3506_CLKGATE_CON(6), 6, GFLAGS),
+ COMPOSITE_NODIV(CLK_TIMER0_CH4, "clk_timer0_ch4", clk_timer4_parents_p, 0,
+ RK3506_CLKSEL_CON(23), 3, 3, MFLAGS,
+ RK3506_CLKGATE_CON(6), 7, GFLAGS),
+ COMPOSITE_NODIV(CLK_TIMER0_CH5, "clk_timer0_ch5", clk_timer5_parents_p, 0,
+ RK3506_CLKSEL_CON(23), 6, 3, MFLAGS,
+ RK3506_CLKGATE_CON(6), 8, GFLAGS),
+ GATE(PCLK_WDT0, "pclk_wdt0", "pclk_bus_root", 0,
+ RK3506_CLKGATE_CON(6), 9, GFLAGS),
+ GATE(TCLK_WDT0, "tclk_wdt0", "xin24m_gate", 0,
+ RK3506_CLKGATE_CON(6), 10, GFLAGS),
+ GATE(PCLK_WDT1, "pclk_wdt1", "pclk_bus_root", 0,
+ RK3506_CLKGATE_CON(6), 11, GFLAGS),
+ GATE(TCLK_WDT1, "tclk_wdt1", "xin24m_gate", 0,
+ RK3506_CLKGATE_CON(6), 12, GFLAGS),
+ GATE(PCLK_MAILBOX, "pclk_mailbox", "pclk_bus_root", 0,
+ RK3506_CLKGATE_CON(6), 13, GFLAGS),
+ GATE(PCLK_INTMUX, "pclk_intmux", "pclk_bus_root", 0,
+ RK3506_CLKGATE_CON(6), 14, GFLAGS),
+ GATE(PCLK_SPINLOCK, "pclk_spinlock", "pclk_bus_root", 0,
+ RK3506_CLKGATE_CON(6), 15, GFLAGS),
+ GATE(PCLK_DDRC, "pclk_ddrc", "pclk_bus_root", CLK_IGNORE_UNUSED,
+ RK3506_CLKGATE_CON(7), 0, GFLAGS),
+ GATE(HCLK_DDRPHY, "hclk_ddrphy", "hclk_bus_root", CLK_IGNORE_UNUSED,
+ RK3506_CLKGATE_CON(7), 1, GFLAGS),
+ GATE(PCLK_DDRMON, "pclk_ddrmon", "pclk_bus_root", CLK_IGNORE_UNUSED,
+ RK3506_CLKGATE_CON(7), 2, GFLAGS),
+ GATE(CLK_DDRMON_OSC, "clk_ddrmon_osc", "xin24m_gate", CLK_IGNORE_UNUSED,
+ RK3506_CLKGATE_CON(7), 3, GFLAGS),
+ GATE(PCLK_STDBY, "pclk_stdby", "pclk_bus_root", CLK_IGNORE_UNUSED,
+ RK3506_CLKGATE_CON(7), 4, GFLAGS),
+ GATE(HCLK_USBOTG0, "hclk_usbotg0", "hclk_bus_root", 0,
+ RK3506_CLKGATE_CON(7), 5, GFLAGS),
+ GATE(HCLK_USBOTG0_PMU, "hclk_usbotg0_pmu", "hclk_bus_root", 0,
+ RK3506_CLKGATE_CON(7), 6, GFLAGS),
+ GATE(CLK_USBOTG0_ADP, "clk_usbotg0_adp", "clk_32k", 0,
+ RK3506_CLKGATE_CON(7), 7, GFLAGS),
+ GATE(HCLK_USBOTG1, "hclk_usbotg1", "hclk_bus_root", 0,
+ RK3506_CLKGATE_CON(7), 8, GFLAGS),
+ GATE(HCLK_USBOTG1_PMU, "hclk_usbotg1_pmu", "hclk_bus_root", 0,
+ RK3506_CLKGATE_CON(7), 9, GFLAGS),
+ GATE(CLK_USBOTG1_ADP, "clk_usbotg1_adp", "clk_32k", 0,
+ RK3506_CLKGATE_CON(7), 10, GFLAGS),
+ GATE(PCLK_USBPHY, "pclk_usbphy", "pclk_bus_root", 0,
+ RK3506_CLKGATE_CON(7), 11, GFLAGS),
+ GATE(ACLK_DMA2DDR, "aclk_dma2ddr", "aclk_bus_root", CLK_IGNORE_UNUSED,
+ RK3506_CLKGATE_CON(8), 0, GFLAGS),
+ GATE(PCLK_DMA2DDR, "pclk_dma2ddr", "pclk_bus_root", CLK_IGNORE_UNUSED,
+ RK3506_CLKGATE_CON(8), 1, GFLAGS),
+ COMPOSITE_NOMUX(STCLK_M0, "stclk_m0", "xin24m_gate", 0,
+ RK3506_CLKSEL_CON(23), 9, 6, DFLAGS,
+ RK3506_CLKGATE_CON(8), 2, GFLAGS),
+ COMPOSITE(CLK_DDRPHY, "clk_ddrphy", gpll_v0pll_v1pll_parents_p, CLK_IGNORE_UNUSED,
+ RK3506_PMU_CLKSEL_CON(4), 4, 2, MFLAGS, 0, 4, DFLAGS,
+ RK3506_PMU_CLKGATE_CON(1), 10, GFLAGS),
+ FACTOR(CLK_DDRC_SRC, "clk_ddrc_src", "clk_ddrphy", 0, 1, 4),
+ GATE(ACLK_DDRC_0, "aclk_ddrc_0", "clk_ddrc_src", CLK_IGNORE_UNUSED,
+ RK3506_CLKGATE_CON(10), 0, GFLAGS),
+ GATE(ACLK_DDRC_1, "aclk_ddrc_1", "clk_ddrc_src", CLK_IGNORE_UNUSED,
+ RK3506_CLKGATE_CON(10), 1, GFLAGS),
+ GATE(CLK_DDRC, "clk_ddrc", "clk_ddrc_src", CLK_IS_CRITICAL,
+ RK3506_CLKGATE_CON(10), 3, GFLAGS),
+ GATE(CLK_DDRMON, "clk_ddrmon", "clk_ddrc_src", CLK_IGNORE_UNUSED,
+ RK3506_CLKGATE_CON(10), 4, GFLAGS),
+
+ /* ls peri */
+ COMPOSITE(HCLK_LSPERI_ROOT, "hclk_lsperi_root", gpll_v0pll_v1pll_div_parents_p, 0,
+ RK3506_CLKSEL_CON(29), 5, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3506_CLKGATE_CON(11), 0, GFLAGS),
+ GATE(PCLK_LSPERI_ROOT, "pclk_lsperi_root", "hclk_lsperi_root", 0,
+ RK3506_CLKGATE_CON(11), 1, GFLAGS),
+ GATE(PCLK_UART0, "pclk_uart0", "pclk_lsperi_root", 0,
+ RK3506_CLKGATE_CON(11), 4, GFLAGS),
+ GATE(PCLK_UART1, "pclk_uart1", "pclk_lsperi_root", 0,
+ RK3506_CLKGATE_CON(11), 5, GFLAGS),
+ GATE(PCLK_UART2, "pclk_uart2", "pclk_lsperi_root", 0,
+ RK3506_CLKGATE_CON(11), 6, GFLAGS),
+ GATE(PCLK_UART3, "pclk_uart3", "pclk_lsperi_root", 0,
+ RK3506_CLKGATE_CON(11), 7, GFLAGS),
+ GATE(PCLK_UART4, "pclk_uart4", "pclk_lsperi_root", 0,
+ RK3506_CLKGATE_CON(11), 8, GFLAGS),
+ COMPOSITE(SCLK_UART0, "sclk_uart0", sclk_uart_parents_p, 0,
+ RK3506_CLKSEL_CON(29), 12, 3, MFLAGS, 7, 5, DFLAGS,
+ RK3506_CLKGATE_CON(11), 9, GFLAGS),
+ COMPOSITE(SCLK_UART1, "sclk_uart1", sclk_uart_parents_p, 0,
+ RK3506_CLKSEL_CON(30), 5, 3, MFLAGS, 0, 5, DFLAGS,
+ RK3506_CLKGATE_CON(11), 10, GFLAGS),
+ COMPOSITE(SCLK_UART2, "sclk_uart2", sclk_uart_parents_p, 0,
+ RK3506_CLKSEL_CON(30), 13, 3, MFLAGS, 8, 5, DFLAGS,
+ RK3506_CLKGATE_CON(11), 11, GFLAGS),
+ COMPOSITE(SCLK_UART3, "sclk_uart3", sclk_uart_parents_p, 0,
+ RK3506_CLKSEL_CON(31), 5, 3, MFLAGS, 0, 5, DFLAGS,
+ RK3506_CLKGATE_CON(11), 12, GFLAGS),
+ COMPOSITE(SCLK_UART4, "sclk_uart4", sclk_uart_parents_p, 0,
+ RK3506_CLKSEL_CON(31), 13, 3, MFLAGS, 8, 5, DFLAGS,
+ RK3506_CLKGATE_CON(11), 13, GFLAGS),
+ GATE(PCLK_I2C0, "pclk_i2c0", "pclk_lsperi_root", 0,
+ RK3506_CLKGATE_CON(11), 14, GFLAGS),
+ COMPOSITE(CLK_I2C0, "clk_i2c0", xin24m_g_gpll_v0pll_v1pll_div_parents_p, 0,
+ RK3506_CLKSEL_CON(32), 4, 2, MFLAGS, 0, 4, DFLAGS,
+ RK3506_CLKGATE_CON(11), 15, GFLAGS),
+ GATE(PCLK_I2C1, "pclk_i2c1", "pclk_lsperi_root", 0,
+ RK3506_CLKGATE_CON(12), 0, GFLAGS),
+ COMPOSITE(CLK_I2C1, "clk_i2c1", xin24m_g_gpll_v0pll_v1pll_div_parents_p, 0,
+ RK3506_CLKSEL_CON(32), 10, 2, MFLAGS, 6, 4, DFLAGS,
+ RK3506_CLKGATE_CON(12), 1, GFLAGS),
+ GATE(PCLK_I2C2, "pclk_i2c2", "pclk_lsperi_root", 0,
+ RK3506_CLKGATE_CON(12), 2, GFLAGS),
+ COMPOSITE(CLK_I2C2, "clk_i2c2", xin24m_g_gpll_v0pll_v1pll_div_parents_p, 0,
+ RK3506_CLKSEL_CON(33), 4, 2, MFLAGS, 0, 4, DFLAGS,
+ RK3506_CLKGATE_CON(12), 3, GFLAGS),
+ GATE(PCLK_PWM1, "pclk_pwm1", "pclk_lsperi_root", 0,
+ RK3506_CLKGATE_CON(12), 4, GFLAGS),
+ COMPOSITE(CLK_PWM1, "clk_pwm1", gpll_v0pll_v1pll_div_parents_p, 0,
+ RK3506_CLKSEL_CON(33), 10, 2, MFLAGS, 6, 4, DFLAGS,
+ RK3506_CLKGATE_CON(12), 5, GFLAGS),
+ GATE(CLK_OSC_PWM1, "clk_osc_pwm1", "xin24m", 0,
+ RK3506_CLKGATE_CON(12), 6, GFLAGS),
+ GATE(CLK_RC_PWM1, "clk_rc_pwm1", "clk_rc", 0,
+ RK3506_CLKGATE_CON(12), 7, GFLAGS),
+ COMPOSITE_NODIV(CLK_FREQ_PWM1, "clk_freq_pwm1", clk_pwm_parents_p, 0,
+ RK3506_CLKSEL_CON(33), 12, 4, MFLAGS,
+ RK3506_CLKGATE_CON(12), 8, GFLAGS),
+ COMPOSITE_NODIV(CLK_COUNTER_PWM1, "clk_counter_pwm1", clk_pwm_parents_p, 0,
+ RK3506_CLKSEL_CON(34), 0, 4, MFLAGS,
+ RK3506_CLKGATE_CON(12), 9, GFLAGS),
+ GATE(PCLK_SPI0, "pclk_spi0", "pclk_lsperi_root", 0,
+ RK3506_CLKGATE_CON(12), 10, GFLAGS),
+ COMPOSITE(CLK_SPI0, "clk_spi0", xin24m_g_gpll_v0pll_v1pll_div_parents_p, 0,
+ RK3506_CLKSEL_CON(34), 8, 2, MFLAGS, 4, 4, DFLAGS,
+ RK3506_CLKGATE_CON(12), 11, GFLAGS),
+ GATE(PCLK_SPI1, "pclk_spi1", "pclk_lsperi_root", 0,
+ RK3506_CLKGATE_CON(12), 12, GFLAGS),
+ COMPOSITE(CLK_SPI1, "clk_spi1", xin24m_g_gpll_v0pll_v1pll_div_parents_p, 0,
+ RK3506_CLKSEL_CON(34), 14, 2, MFLAGS, 10, 4, DFLAGS,
+ RK3506_CLKGATE_CON(12), 13, GFLAGS),
+ GATE(PCLK_GPIO2, "pclk_gpio2", "pclk_lsperi_root", 0,
+ RK3506_CLKGATE_CON(12), 14, GFLAGS),
+ COMPOSITE_NODIV(DBCLK_GPIO2, "dbclk_gpio2", xin24m_400k_32k_parents_p, 0,
+ RK3506_CLKSEL_CON(35), 0, 2, MFLAGS,
+ RK3506_CLKGATE_CON(12), 15, GFLAGS),
+ GATE(PCLK_GPIO3, "pclk_gpio3", "pclk_lsperi_root", 0,
+ RK3506_CLKGATE_CON(13), 0, GFLAGS),
+ COMPOSITE_NODIV(DBCLK_GPIO3, "dbclk_gpio3", xin24m_400k_32k_parents_p, 0,
+ RK3506_CLKSEL_CON(35), 2, 2, MFLAGS,
+ RK3506_CLKGATE_CON(13), 1, GFLAGS),
+ GATE(PCLK_GPIO4, "pclk_gpio4", "pclk_lsperi_root", 0,
+ RK3506_CLKGATE_CON(13), 2, GFLAGS),
+ COMPOSITE_NODIV(DBCLK_GPIO4, "dbclk_gpio4", xin24m_400k_32k_parents_p, 0,
+ RK3506_CLKSEL_CON(35), 4, 2, MFLAGS,
+ RK3506_CLKGATE_CON(13), 3, GFLAGS),
+ GATE(HCLK_CAN0, "hclk_can0", "hclk_lsperi_root", 0,
+ RK3506_CLKGATE_CON(13), 4, GFLAGS),
+ COMPOSITE(CLK_CAN0, "clk_can0", clk_can_parents_p, 0,
+ RK3506_CLKSEL_CON(35), 11, 3, MFLAGS, 6, 5, DFLAGS,
+ RK3506_CLKGATE_CON(13), 5, GFLAGS),
+ GATE(HCLK_CAN1, "hclk_can1", "hclk_lsperi_root", 0,
+ RK3506_CLKGATE_CON(13), 6, GFLAGS),
+ COMPOSITE(CLK_CAN1, "clk_can1", clk_can_parents_p, 0,
+ RK3506_CLKSEL_CON(36), 5, 3, MFLAGS, 0, 5, DFLAGS,
+ RK3506_CLKGATE_CON(13), 7, GFLAGS),
+ GATE(HCLK_PDM, "hclk_pdm", "hclk_lsperi_root", 0,
+ RK3506_CLKGATE_CON(13), 8, GFLAGS),
+ COMPOSITE(MCLK_PDM, "mclk_pdm", clk_pdm_parents_p, 0,
+ RK3506_CLKSEL_CON(37), 5, 4, MFLAGS, 0, 5, DFLAGS,
+ RK3506_CLKGATE_CON(13), 9, GFLAGS),
+ COMPOSITE(CLKOUT_PDM, "clkout_pdm", clk_pdm_parents_p, 0,
+ RK3506_CLKSEL_CON(38), 10, 4, MFLAGS, 0, 10, DFLAGS,
+ RK3506_CLKGATE_CON(13), 10, GFLAGS),
+ COMPOSITE(MCLK_SPDIFTX, "mclk_spdiftx", mclk_sai_asrc_parents_p, 0,
+ RK3506_CLKSEL_CON(39), 5, 4, MFLAGS, 0, 5, DFLAGS,
+ RK3506_CLKGATE_CON(13), 11, GFLAGS),
+ GATE(HCLK_SPDIFTX, "hclk_spdiftx", "hclk_lsperi_root", 0,
+ RK3506_CLKGATE_CON(13), 12, GFLAGS),
+ GATE(HCLK_SPDIFRX, "hclk_spdifrx", "hclk_lsperi_root", 0,
+ RK3506_CLKGATE_CON(13), 13, GFLAGS),
+ COMPOSITE(MCLK_SPDIFRX, "mclk_spdifrx", gpll_v0pll_v1pll_g_parents_p, 0,
+ RK3506_CLKSEL_CON(39), 14, 2, MFLAGS, 9, 5, DFLAGS,
+ RK3506_CLKGATE_CON(13), 14, GFLAGS),
+ COMPOSITE(MCLK_SAI0, "mclk_sai0", mclk_sai_asrc_parents_p, 0,
+ RK3506_CLKSEL_CON(40), 8, 4, MFLAGS, 0, 8, DFLAGS,
+ RK3506_CLKGATE_CON(13), 15, GFLAGS),
+ GATE(HCLK_SAI0, "hclk_sai0", "hclk_lsperi_root", 0,
+ RK3506_CLKGATE_CON(14), 0, GFLAGS),
+ GATE(MCLK_OUT_SAI0, "mclk_out_sai0", "mclk_sai0", 0,
+ RK3506_CLKGATE_CON(14), 1, GFLAGS),
+ COMPOSITE(MCLK_SAI1, "mclk_sai1", mclk_sai_asrc_parents_p, 0,
+ RK3506_CLKSEL_CON(41), 8, 4, MFLAGS, 0, 8, DFLAGS,
+ RK3506_CLKGATE_CON(14), 2, GFLAGS),
+ GATE(HCLK_SAI1, "hclk_sai1", "hclk_lsperi_root", 0,
+ RK3506_CLKGATE_CON(14), 3, GFLAGS),
+ GATE(MCLK_OUT_SAI1, "mclk_out_sai1", "mclk_sai1", 0,
+ RK3506_CLKGATE_CON(14), 4, GFLAGS),
+ GATE(HCLK_ASRC0, "hclk_asrc0", "hclk_lsperi_root", 0,
+ RK3506_CLKGATE_CON(14), 5, GFLAGS),
+ COMPOSITE(CLK_ASRC0, "clk_asrc0", gpll_v0pll_v1pll_g_parents_p, 0,
+ RK3506_CLKSEL_CON(42), 5, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3506_CLKGATE_CON(14), 6, GFLAGS),
+ GATE(HCLK_ASRC1, "hclk_asrc1", "hclk_lsperi_root", 0,
+ RK3506_CLKGATE_CON(14), 7, GFLAGS),
+ COMPOSITE(CLK_ASRC1, "clk_asrc1", gpll_v0pll_v1pll_g_parents_p, 0,
+ RK3506_CLKSEL_CON(42), 12, 2, MFLAGS, 7, 5, DFLAGS,
+ RK3506_CLKGATE_CON(14), 8, GFLAGS),
+ GATE(PCLK_CRU, "pclk_cru", "pclk_lsperi_root", CLK_IS_CRITICAL,
+ RK3506_CLKGATE_CON(14), 9, GFLAGS),
+ GATE(PCLK_PMU_ROOT, "pclk_pmu_root", "pclk_lsperi_root", CLK_IS_CRITICAL,
+ RK3506_CLKGATE_CON(14), 10, GFLAGS),
+ COMPOSITE_NODIV(MCLK_ASRC0, "mclk_asrc0", mclk_sai_asrc_parents_p, 0,
+ RK3506_CLKSEL_CON(46), 0, 4, MFLAGS,
+ RK3506_CLKGATE_CON(16), 0, GFLAGS),
+ COMPOSITE_NODIV(MCLK_ASRC1, "mclk_asrc1", mclk_sai_asrc_parents_p, 0,
+ RK3506_CLKSEL_CON(46), 4, 4, MFLAGS,
+ RK3506_CLKGATE_CON(16), 1, GFLAGS),
+ COMPOSITE_NODIV(MCLK_ASRC2, "mclk_asrc2", mclk_sai_asrc_parents_p, 0,
+ RK3506_CLKSEL_CON(46), 8, 4, MFLAGS,
+ RK3506_CLKGATE_CON(16), 2, GFLAGS),
+ COMPOSITE_NODIV(MCLK_ASRC3, "mclk_asrc3", mclk_sai_asrc_parents_p, 0,
+ RK3506_CLKSEL_CON(46), 12, 4, MFLAGS,
+ RK3506_CLKGATE_CON(16), 3, GFLAGS),
+ COMPOSITE_NODIV(LRCK_ASRC0_SRC, "lrck_asrc0_src", lrck_asrc_parents_p, 0,
+ RK3506_CLKSEL_CON(47), 0, 4, MFLAGS,
+ RK3506_CLKGATE_CON(16), 4, GFLAGS),
+ COMPOSITE_NODIV(LRCK_ASRC0_DST, "lrck_asrc0_dst", lrck_asrc_parents_p, 0,
+ RK3506_CLKSEL_CON(47), 4, 4, MFLAGS,
+ RK3506_CLKGATE_CON(16), 5, GFLAGS),
+ COMPOSITE_NODIV(LRCK_ASRC1_SRC, "lrck_asrc1_src", lrck_asrc_parents_p, 0,
+ RK3506_CLKSEL_CON(47), 8, 4, MFLAGS,
+ RK3506_CLKGATE_CON(16), 6, GFLAGS),
+ COMPOSITE_NODIV(LRCK_ASRC1_DST, "lrck_asrc1_dst", lrck_asrc_parents_p, 0,
+ RK3506_CLKSEL_CON(47), 12, 4, MFLAGS,
+ RK3506_CLKGATE_CON(16), 7, GFLAGS),
+
+ /* hs peri */
+ COMPOSITE(ACLK_HSPERI_ROOT, "aclk_hsperi_root", gpll_v0pll_v1pll_div_parents_p, CLK_IS_CRITICAL,
+ RK3506_CLKSEL_CON(49), 5, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3506_CLKGATE_CON(17), 0, GFLAGS),
+ GATE(HCLK_HSPERI_ROOT, "hclk_hsperi_root", "aclk_hsperi_root", CLK_IS_CRITICAL,
+ RK3506_CLKGATE_CON(17), 1, GFLAGS),
+ GATE(PCLK_HSPERI_ROOT, "pclk_hsperi_root", "hclk_hsperi_root", CLK_IS_CRITICAL,
+ RK3506_CLKGATE_CON(17), 2, GFLAGS),
+ COMPOSITE(CCLK_SRC_SDMMC, "cclk_src_sdmmc", cclk_src_sdmmc_parents_p, 0,
+ RK3506_CLKSEL_CON(49), 13, 2, MFLAGS, 7, 6, DFLAGS,
+ RK3506_CLKGATE_CON(17), 6, GFLAGS),
+ GATE(HCLK_SDMMC, "hclk_sdmmc", "hclk_hsperi_root", 0,
+ RK3506_CLKGATE_CON(17), 7, GFLAGS),
+ GATE(HCLK_FSPI, "hclk_fspi", "hclk_hsperi_root", 0,
+ RK3506_CLKGATE_CON(17), 8, GFLAGS),
+ COMPOSITE(SCLK_FSPI, "sclk_fspi", xin24m_g_gpll_v0pll_v1pll_g_parents_p, 0,
+ RK3506_CLKSEL_CON(50), 5, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3506_CLKGATE_CON(17), 9, GFLAGS),
+ GATE(PCLK_SPI2, "pclk_spi2", "pclk_hsperi_root", 0,
+ RK3506_CLKGATE_CON(17), 10, GFLAGS),
+ GATE(ACLK_MAC0, "aclk_mac0", "aclk_hsperi_root", 0,
+ RK3506_CLKGATE_CON(17), 11, GFLAGS),
+ GATE(ACLK_MAC1, "aclk_mac1", "aclk_hsperi_root", 0,
+ RK3506_CLKGATE_CON(17), 12, GFLAGS),
+ GATE(PCLK_MAC0, "pclk_mac0", "pclk_hsperi_root", 0,
+ RK3506_CLKGATE_CON(17), 13, GFLAGS),
+ GATE(PCLK_MAC1, "pclk_mac1", "pclk_hsperi_root", 0,
+ RK3506_CLKGATE_CON(17), 14, GFLAGS),
+ COMPOSITE_NOMUX(CLK_MAC_ROOT, "clk_mac_root", "gpll", 0,
+ RK3506_CLKSEL_CON(50), 7, 5, DFLAGS,
+ RK3506_CLKGATE_CON(17), 15, GFLAGS),
+ GATE(CLK_MAC0, "clk_mac0", "clk_mac_root", 0,
+ RK3506_CLKGATE_CON(18), 0, GFLAGS),
+ GATE(CLK_MAC1, "clk_mac1", "clk_mac_root", 0,
+ RK3506_CLKGATE_CON(18), 1, GFLAGS),
+ COMPOSITE(MCLK_SAI2, "mclk_sai2", mclk_sai_asrc_parents_p, 0,
+ RK3506_CLKSEL_CON(51), 8, 4, MFLAGS, 0, 8, DFLAGS,
+ RK3506_CLKGATE_CON(18), 2, GFLAGS),
+ GATE(HCLK_SAI2, "hclk_sai2", "hclk_hsperi_root", 0,
+ RK3506_CLKGATE_CON(18), 3, GFLAGS),
+ GATE(MCLK_OUT_SAI2, "mclk_out_sai2", "mclk_sai2", 0,
+ RK3506_CLKGATE_CON(18), 4, GFLAGS),
+ COMPOSITE(MCLK_SAI3_SRC, "mclk_sai3_src", mclk_sai_asrc_parents_p, 0,
+ RK3506_CLKSEL_CON(52), 8, 4, MFLAGS, 0, 8, DFLAGS,
+ RK3506_CLKGATE_CON(18), 5, GFLAGS),
+ GATE(HCLK_SAI3, "hclk_sai3", "hclk_hsperi_root", 0,
+ RK3506_CLKGATE_CON(18), 6, GFLAGS),
+ GATE(MCLK_SAI3, "mclk_sai3", "mclk_sai3_src", 0,
+ RK3506_CLKGATE_CON(18), 7, GFLAGS),
+ GATE(MCLK_OUT_SAI3, "mclk_out_sai3", "mclk_sai3_src", 0,
+ RK3506_CLKGATE_CON(18), 8, GFLAGS),
+ COMPOSITE(MCLK_SAI4_SRC, "mclk_sai4_src", mclk_sai_asrc_parents_p, 0,
+ RK3506_CLKSEL_CON(53), 8, 4, MFLAGS, 0, 8, DFLAGS,
+ RK3506_CLKGATE_CON(18), 9, GFLAGS),
+ GATE(HCLK_SAI4, "hclk_sai4", "hclk_hsperi_root", 0,
+ RK3506_CLKGATE_CON(18), 10, GFLAGS),
+ GATE(MCLK_SAI4, "mclk_sai4", "mclk_sai4_src", 0,
+ RK3506_CLKGATE_CON(18), 11, GFLAGS),
+ GATE(HCLK_DSM, "hclk_dsm", "hclk_hsperi_root", 0,
+ RK3506_CLKGATE_CON(18), 12, GFLAGS),
+ GATE(MCLK_DSM, "mclk_dsm", "mclk_sai3_src", 0,
+ RK3506_CLKGATE_CON(18), 13, GFLAGS),
+ GATE(PCLK_AUDIO_ADC, "pclk_audio_adc", "pclk_hsperi_root", 0,
+ RK3506_CLKGATE_CON(18), 14, GFLAGS),
+ GATE(MCLK_AUDIO_ADC, "mclk_audio_adc", "mclk_sai4_src", 0,
+ RK3506_CLKGATE_CON(18), 15, GFLAGS),
+ FACTOR(MCLK_AUDIO_ADC_DIV4, "mclk_audio_adc_div4", "mclk_audio_adc", 0, 1, 4),
+ GATE(PCLK_SARADC, "pclk_saradc", "pclk_hsperi_root", 0,
+ RK3506_CLKGATE_CON(19), 0, GFLAGS),
+ COMPOSITE(CLK_SARADC, "clk_saradc", xin24m_400k_32k_parents_p, 0,
+ RK3506_CLKSEL_CON(54), 4, 2, MFLAGS, 0, 4, DFLAGS,
+ RK3506_CLKGATE_CON(19), 1, GFLAGS),
+ GATE(PCLK_OTPC_NS, "pclk_otpc_ns", "pclk_hsperi_root", 0,
+ RK3506_CLKGATE_CON(19), 3, GFLAGS),
+ GATE(CLK_SBPI_OTPC_NS, "clk_sbpi_otpc_ns", "xin24m_gate", 0,
+ RK3506_CLKGATE_CON(19), 4, GFLAGS),
+ FACTOR(CLK_USER_OTPC_NS, "clk_user_otpc_ns", "clk_sbpi_otpc_ns", 0, 1, 2),
+ GATE(PCLK_UART5, "pclk_uart5", "pclk_hsperi_root", 0,
+ RK3506_CLKGATE_CON(19), 6, GFLAGS),
+ COMPOSITE(SCLK_UART5, "sclk_uart5", sclk_uart_parents_p, 0,
+ RK3506_CLKSEL_CON(54), 11, 3, MFLAGS, 6, 5, DFLAGS,
+ RK3506_CLKGATE_CON(19), 7, GFLAGS),
+ GATE(PCLK_GPIO234_IOC, "pclk_gpio234_ioc", "pclk_hsperi_root", CLK_IS_CRITICAL,
+ RK3506_CLKGATE_CON(19), 8, GFLAGS),
+ COMPOSITE(CLK_MAC_PTP_ROOT, "clk_mac_ptp_root", clk_mac_ptp_root_parents_p, 0,
+ RK3506_CLKSEL_CON(55), 5, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3506_CLKGATE_CON(19), 9, GFLAGS),
+ GATE(CLK_MAC0_PTP, "clk_mac0_ptp", "clk_mac_ptp_root", 0,
+ RK3506_CLKGATE_CON(19), 10, GFLAGS),
+ GATE(CLK_MAC1_PTP, "clk_mac1_ptp", "clk_mac_ptp_root", 0,
+ RK3506_CLKGATE_CON(19), 11, GFLAGS),
+ COMPOSITE(ACLK_VIO_ROOT, "aclk_vio_root", gpll_v0pll_v1pll_g_parents_p, 0,
+ RK3506_CLKSEL_CON(58), 5, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3506_CLKGATE_CON(21), 0, GFLAGS),
+ COMPOSITE(HCLK_VIO_ROOT, "hclk_vio_root", gpll_v0pll_v1pll_div_parents_p, 0,
+ RK3506_CLKSEL_CON(58), 12, 2, MFLAGS, 7, 5, DFLAGS,
+ RK3506_CLKGATE_CON(21), 1, GFLAGS),
+ GATE(PCLK_VIO_ROOT, "pclk_vio_root", "hclk_vio_root", 0,
+ RK3506_CLKGATE_CON(21), 2, GFLAGS),
+ GATE(HCLK_RGA, "hclk_rga", "hclk_vio_root", 0,
+ RK3506_CLKGATE_CON(21), 6, GFLAGS),
+ GATE(ACLK_RGA, "aclk_rga", "aclk_vio_root", 0,
+ RK3506_CLKGATE_CON(21), 7, GFLAGS),
+ COMPOSITE(CLK_CORE_RGA, "clk_core_rga", gpll_v0pll_v1pll_g_parents_p, 0,
+ RK3506_CLKSEL_CON(59), 5, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3506_CLKGATE_CON(21), 8, GFLAGS),
+ GATE(ACLK_VOP, "aclk_vop", "aclk_vio_root", 0,
+ RK3506_CLKGATE_CON(21), 9, GFLAGS),
+ GATE(HCLK_VOP, "hclk_vop", "hclk_vio_root", 0,
+ RK3506_CLKGATE_CON(21), 10, GFLAGS),
+ COMPOSITE(DCLK_VOP, "dclk_vop", dclk_vop_parents_p, 0,
+ RK3506_CLKSEL_CON(60), 8, 3, MFLAGS, 0, 8, DFLAGS,
+ RK3506_CLKGATE_CON(21), 11, GFLAGS),
+ GATE(PCLK_DPHY, "pclk_dphy", "pclk_vio_root", 0,
+ RK3506_CLKGATE_CON(21), 12, GFLAGS),
+ GATE(PCLK_DSI_HOST, "pclk_dsi_host", "pclk_vio_root", 0,
+ RK3506_CLKGATE_CON(21), 13, GFLAGS),
+ GATE(PCLK_TSADC, "pclk_tsadc", "pclk_vio_root", 0,
+ RK3506_CLKGATE_CON(21), 14, GFLAGS),
+ COMPOSITE_NOMUX(CLK_TSADC, "clk_tsadc", "xin24m_gate", 0,
+ RK3506_CLKSEL_CON(61), 0, 8, DFLAGS,
+ RK3506_CLKGATE_CON(21), 15, GFLAGS),
+ COMPOSITE_NOMUX(CLK_TSADC_TSEN, "clk_tsadc_tsen", "xin24m_gate", 0,
+ RK3506_CLKSEL_CON(61), 8, 3, DFLAGS,
+ RK3506_CLKGATE_CON(22), 0, GFLAGS),
+ GATE(PCLK_GPIO1_IOC, "pclk_gpio1_ioc", "pclk_vio_root", CLK_IS_CRITICAL,
+ RK3506_CLKGATE_CON(22), 1, GFLAGS),
+
+ /* pmu */
+ GATE(CLK_PMU, "clk_pmu", "xin24m", CLK_IGNORE_UNUSED,
+ RK3506_PMU_CLKGATE_CON(0), 1, GFLAGS),
+ GATE(PCLK_PMU, "pclk_pmu", "pclk_pmu_root", CLK_IGNORE_UNUSED,
+ RK3506_PMU_CLKGATE_CON(0), 2, GFLAGS),
+ GATE(PCLK_PMU_CRU, "pclk_pmu_cru", "pclk_pmu_root", CLK_IGNORE_UNUSED,
+ RK3506_PMU_CLKGATE_CON(0), 4, GFLAGS),
+ GATE(PCLK_PMU_GRF, "pclk_pmu_grf", "pclk_pmu_root", CLK_IGNORE_UNUSED,
+ RK3506_PMU_CLKGATE_CON(0), 5, GFLAGS),
+ GATE(PCLK_GPIO0_IOC, "pclk_gpio0_ioc", "pclk_pmu_root", CLK_IS_CRITICAL,
+ RK3506_PMU_CLKGATE_CON(0), 7, GFLAGS),
+ GATE(PCLK_GPIO0, "pclk_gpio0", "pclk_pmu_root", 0,
+ RK3506_PMU_CLKGATE_CON(0), 8, GFLAGS),
+ COMPOSITE_NODIV(DBCLK_GPIO0, "dbclk_gpio0", dbclk_gpio0_parents_p, 0,
+ RK3506_PMU_CLKSEL_CON(0), 0, 2, MFLAGS,
+ RK3506_PMU_CLKGATE_CON(0), 9, GFLAGS),
+ GATE(PCLK_GPIO1_SHADOW, "pclk_gpio1_shadow", "pclk_pmu_root", 0,
+ RK3506_PMU_CLKGATE_CON(0), 10, GFLAGS),
+ COMPOSITE_NODIV(DBCLK_GPIO1_SHADOW, "dbclk_gpio1_shadow", dbclk_gpio0_parents_p, 0,
+ RK3506_PMU_CLKSEL_CON(0), 2, 2, MFLAGS,
+ RK3506_PMU_CLKGATE_CON(0), 11, GFLAGS),
+ GATE(PCLK_PMU_HP_TIMER, "pclk_pmu_hp_timer", "pclk_pmu_root", CLK_IGNORE_UNUSED,
+ RK3506_PMU_CLKGATE_CON(0), 12, GFLAGS),
+ MUX(CLK_PMU_HP_TIMER, "clk_pmu_hp_timer", clk_pmu_hp_timer_parents_p, CLK_IGNORE_UNUSED,
+ RK3506_PMU_CLKSEL_CON(0), 4, 2, MFLAGS),
+ GATE(PCLK_PWM0, "pclk_pwm0", "pclk_pmu_root", 0,
+ RK3506_PMU_CLKGATE_CON(0), 15, GFLAGS),
+ COMPOSITE_NOMUX(CLK_PWM0, "clk_pwm0", "clk_gpll_div_100m", 0,
+ RK3506_PMU_CLKSEL_CON(0), 6, 4, DFLAGS,
+ RK3506_PMU_CLKGATE_CON(1), 0, GFLAGS),
+ GATE(CLK_OSC_PWM0, "clk_osc_pwm0", "xin24m", 0,
+ RK3506_PMU_CLKGATE_CON(1), 1, GFLAGS),
+ GATE(CLK_RC_PWM0, "clk_rc_pwm0", "clk_rc", 0,
+ RK3506_PMU_CLKGATE_CON(1), 2, GFLAGS),
+ COMPOSITE_NOMUX(CLK_MAC_OUT, "clk_mac_out", "gpll", 0,
+ RK3506_PMU_CLKSEL_CON(0), 10, 6, DFLAGS,
+ RK3506_PMU_CLKGATE_CON(1), 3, GFLAGS),
+ COMPOSITE(CLK_REF_OUT0, "clk_ref_out0", clk_ref_out_parents_p, 0,
+ RK3506_PMU_CLKSEL_CON(1), 6, 2, MFLAGS, 0, 6, DFLAGS,
+ RK3506_PMU_CLKGATE_CON(1), 4, GFLAGS),
+ COMPOSITE(CLK_REF_OUT1, "clk_ref_out1", clk_ref_out_parents_p, 0,
+ RK3506_PMU_CLKSEL_CON(1), 14, 2, MFLAGS, 8, 6, DFLAGS,
+ RK3506_PMU_CLKGATE_CON(1), 5, GFLAGS),
+ MUX(CLK_32K_FRAC_MUX, "clk_32k_frac_mux", clk_32k_frac_parents_p, 0,
+ RK3506_PMU_CLKSEL_CON(3), 0, 2, MFLAGS),
+ COMPOSITE_FRAC(CLK_32K_FRAC, "clk_32k_frac", "clk_32k_frac_mux", 0,
+ RK3506_PMU_CLKSEL_CON(2), 0,
+ RK3506_PMU_CLKGATE_CON(1), 6, GFLAGS),
+ COMPOSITE_NOMUX(CLK_32K_RC, "clk_32k_rc", "clk_rc", CLK_IS_CRITICAL,
+ RK3506_PMU_CLKSEL_CON(3), 2, 5, DFLAGS,
+ RK3506_PMU_CLKGATE_CON(1), 7, GFLAGS),
+ COMPOSITE_NODIV(CLK_32K, "clk_32k", clk_32k_parents_p, CLK_IS_CRITICAL,
+ RK3506_PMU_CLKSEL_CON(3), 7, 2, MFLAGS,
+ RK3506_PMU_CLKGATE_CON(1), 8, GFLAGS),
+ COMPOSITE_NODIV(CLK_32K_PMU, "clk_32k_pmu", clk_32k_parents_p, CLK_IS_CRITICAL,
+ RK3506_PMU_CLKSEL_CON(3), 9, 2, MFLAGS,
+ RK3506_PMU_CLKGATE_CON(1), 9, GFLAGS),
+ GATE(CLK_PMU_32K, "clk_pmu_32k", "clk_32k_pmu", CLK_IGNORE_UNUSED,
+ RK3506_PMU_CLKGATE_CON(0), 3, GFLAGS),
+ GATE(CLK_PMU_HP_TIMER_32K, "clk_pmu_hp_timer_32k", "clk_32k_pmu", CLK_IGNORE_UNUSED,
+ RK3506_PMU_CLKGATE_CON(0), 14, GFLAGS),
+ GATE(PCLK_TOUCH_KEY, "pclk_touch_key", "pclk_pmu_root", CLK_IGNORE_UNUSED,
+ RK3506_PMU_CLKGATE_CON(1), 12, GFLAGS),
+ GATE(CLK_TOUCH_KEY, "clk_touch_key", "xin24m", CLK_IGNORE_UNUSED,
+ RK3506_PMU_CLKGATE_CON(1), 13, GFLAGS),
+ COMPOSITE(CLK_REF_PHY_PLL, "clk_ref_phy_pll", gpll_v0pll_v1pll_parents_p, 0,
+ RK3506_PMU_CLKSEL_CON(4), 13, 2, MFLAGS, 6, 7, DFLAGS,
+ RK3506_PMU_CLKGATE_CON(1), 14, GFLAGS),
+ MUX(CLK_REF_PHY_PMU_MUX, "clk_ref_phy_pmu_mux", clk_ref_phy_pmu_mux_parents_p, 0,
+ RK3506_PMU_CLKSEL_CON(4), 15, 1, MFLAGS),
+ GATE(CLK_WIFI_OUT, "clk_wifi_out", "xin24m", 0,
+ RK3506_PMU_CLKGATE_CON(2), 0, GFLAGS),
+ MUX(CLK_V0PLL_REF, "clk_v0pll_ref", clk_vpll_ref_parents_p, CLK_IGNORE_UNUSED,
+ RK3506_PMU_CLKSEL_CON(6), 0, 1, MFLAGS),
+ MUX(CLK_V1PLL_REF, "clk_v1pll_ref", clk_vpll_ref_parents_p, CLK_IGNORE_UNUSED,
+ RK3506_PMU_CLKSEL_CON(6), 1, 1, MFLAGS),
+
+ /* secure ns */
+ GATE(CLK_CORE_CRYPTO_NS, "clk_core_crypto_ns", "clk_core_crypto", 0,
+ RK3506_CLKGATE_CON(5), 12, GFLAGS),
+ GATE(CLK_PKA_CRYPTO_NS, "clk_pka_crypto_ns", "clk_pka_crypto", 0,
+ RK3506_CLKGATE_CON(5), 13, GFLAGS),
+
+ /* io */
+ GATE(CLK_SPI2, "clk_spi2", "clk_spi2_io", 0,
+ RK3506_CLKGATE_CON(20), 0, GFLAGS),
+};
+
+static void __init rk3506_clk_init(struct device_node *np)
+{
+ struct rockchip_clk_provider *ctx;
+ unsigned long clk_nr_clks;
+ void __iomem *reg_base;
+
+ clk_nr_clks = rockchip_clk_find_max_clk_id(rk3506_clk_branches,
+ ARRAY_SIZE(rk3506_clk_branches)) + 1;
+
+ reg_base = of_iomap(np, 0);
+ if (!reg_base) {
+ pr_err("%s: could not map cru region\n", __func__);
+ return;
+ }
+
+ ctx = rockchip_clk_init(np, reg_base, clk_nr_clks);
+ if (IS_ERR(ctx)) {
+ pr_err("%s: rockchip clk init failed\n", __func__);
+ iounmap(reg_base);
+ return;
+ }
+
+ rockchip_clk_register_plls(ctx, rk3506_pll_clks,
+ ARRAY_SIZE(rk3506_pll_clks),
+ 0);
+
+ rockchip_clk_register_armclk_multi_pll(ctx, &rk3506_armclk,
+ rk3506_cpuclk_rates,
+ ARRAY_SIZE(rk3506_cpuclk_rates));
+
+ rockchip_clk_register_branches(ctx, rk3506_clk_branches,
+ ARRAY_SIZE(rk3506_clk_branches));
+
+ rk3506_rst_init(np, reg_base);
+
+ rockchip_register_restart_notifier(ctx, RK3506_GLB_SRST_FST, NULL);
+
+ rockchip_clk_of_add_provider(np, ctx);
+
+ /* pvtpll src init */
+ writel_relaxed(PVTPLL_SRC_SEL_PVTPLL, reg_base + RK3506_CLKSEL_CON(15));
+}
+
+CLK_OF_DECLARE(rk3506_cru, "rockchip,rk3506-cru", rk3506_clk_init);
+
+struct clk_rk3506_inits {
+ void (*inits)(struct device_node *np);
+};
+
+static const struct clk_rk3506_inits clk_rk3506_cru_init = {
+ .inits = rk3506_clk_init,
+};
+
+static const struct of_device_id clk_rk3506_match_table[] = {
+ {
+ .compatible = "rockchip,rk3506-cru",
+ .data = &clk_rk3506_cru_init,
+ },
+ { }
+};
+
+static int clk_rk3506_probe(struct platform_device *pdev)
+{
+ const struct clk_rk3506_inits *init_data;
+ struct device *dev = &pdev->dev;
+
+ init_data = device_get_match_data(dev);
+ if (!init_data)
+ return -EINVAL;
+
+ if (init_data->inits)
+ init_data->inits(dev->of_node);
+
+ return 0;
+}
+
+static struct platform_driver clk_rk3506_driver = {
+ .probe = clk_rk3506_probe,
+ .driver = {
+ .name = "clk-rk3506",
+ .of_match_table = clk_rk3506_match_table,
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver_probe(clk_rk3506_driver, clk_rk3506_probe);
diff --git a/drivers/clk/rockchip/clk-rk3568.c b/drivers/clk/rockchip/clk-rk3568.c
index d48ab9d6c064..74eabf9b2ae2 100644
--- a/drivers/clk/rockchip/clk-rk3568.c
+++ b/drivers/clk/rockchip/clk-rk3568.c
@@ -79,6 +79,7 @@ static struct rockchip_pll_rate_table rk3568_pll_rates[] = {
RK3036_PLL_RATE(200000000, 1, 100, 3, 4, 1, 0),
RK3036_PLL_RATE(148500000, 1, 99, 4, 4, 1, 0),
RK3036_PLL_RATE(135000000, 2, 45, 4, 1, 1, 0),
+ RK3036_PLL_RATE(132000000, 1, 66, 6, 2, 1, 0),
RK3036_PLL_RATE(128000000, 1, 16, 3, 1, 1, 0),
RK3036_PLL_RATE(126400000, 1, 79, 5, 3, 1, 0),
RK3036_PLL_RATE(119000000, 3, 119, 4, 2, 1, 0),
@@ -1651,6 +1652,7 @@ CLK_OF_DECLARE(rk3568_cru_pmu, "rockchip,rk3568-pmucru", rk3568_pmu_clk_init);
static void __init rk3568_clk_init(struct device_node *np)
{
struct rockchip_clk_provider *ctx;
+ unsigned long clk_nr_clks;
void __iomem *reg_base;
reg_base = of_iomap(np, 0);
@@ -1659,7 +1661,9 @@ static void __init rk3568_clk_init(struct device_node *np)
return;
}
- ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS);
+ clk_nr_clks = rockchip_clk_find_max_clk_id(rk3568_clk_branches,
+ ARRAY_SIZE(rk3568_clk_branches)) + 1;
+ ctx = rockchip_clk_init(np, reg_base, clk_nr_clks);
if (IS_ERR(ctx)) {
pr_err("%s: rockchip clk init failed\n", __func__);
iounmap(reg_base);
diff --git a/drivers/clk/rockchip/clk-rv1126b.c b/drivers/clk/rockchip/clk-rv1126b.c
new file mode 100644
index 000000000000..3e27bfc14854
--- /dev/null
+++ b/drivers/clk/rockchip/clk-rv1126b.c
@@ -0,0 +1,1117 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2024 Rockchip Electronics Co., Ltd.
+ * Author: Elaine Zhang <zhangqing@rock-chips.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/syscore_ops.h>
+#include <dt-bindings/clock/rockchip,rv1126b-cru.h>
+#include "clk.h"
+
+#define RV1126B_FRAC_MAX_PRATE 1200000000
+
+#define PVTPLL_SRC_SEL_PVTPLL (BIT(0) | BIT(16))
+
+enum rv1126b_plls {
+ gpll, cpll, aupll, dpll
+};
+
+static struct rockchip_pll_rate_table rv1126b_pll_rates[] = {
+ /* _mhz, _refdiv, _fbdiv, _postdiv1, _postdiv2, _dsmpd, _frac */
+ RK3036_PLL_RATE(1200000000, 1, 100, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1188000000, 1, 99, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1179648000, 1, 49, 1, 1, 0, 2550137),
+ RK3036_PLL_RATE(1000000000, 3, 250, 2, 1, 1, 0),
+ RK3036_PLL_RATE(993484800, 1, 41, 1, 1, 0, 6630355),
+ RK3036_PLL_RATE(983040000, 1, 40, 1, 1, 0, 16106127),
+ RK3036_PLL_RATE(903168000, 1, 75, 2, 1, 0, 4429185),
+ { /* sentinel */ },
+};
+
+#define RV1126B_DIV_ACLK_CORE_MASK 0x1f
+#define RV1126B_DIV_ACLK_CORE_SHIFT 0
+#define RV1126B_DIV_PCLK_CORE_MASK 0x1f
+#define RV1126B_DIV_PCLK_CORE_SHIFT 8
+#define RV1126B_CORE_SEL_MASK 0x1
+#define RV1126B_CORE_SEL_SHIFT 1
+
+#define RV1126B_CLKSEL0(_aclk_core) \
+{ \
+ .reg = RV1126B_CORECLKSEL_CON(2), \
+ .val = HIWORD_UPDATE(_aclk_core - 1, RV1126B_DIV_ACLK_CORE_MASK, \
+ RV1126B_DIV_ACLK_CORE_SHIFT), \
+}
+
+#define RV1126B_CLKSEL1(_pclk_dbg) \
+{ \
+ .reg = RV1126B_CORECLKSEL_CON(2), \
+ .val = HIWORD_UPDATE(_pclk_dbg - 1, RV1126B_DIV_PCLK_CORE_MASK, \
+ RV1126B_DIV_PCLK_CORE_SHIFT), \
+}
+
+#define RV1126B_CPUCLK_RATE(_prate, _aclk_core, _pclk_dbg) \
+{ \
+ .prate = _prate, \
+ .divs = { \
+ RV1126B_CLKSEL0(_aclk_core), \
+ RV1126B_CLKSEL1(_pclk_dbg), \
+ }, \
+}
+
+static struct rockchip_cpuclk_rate_table rv1126b_cpuclk_rates[] __initdata = {
+ RV1126B_CPUCLK_RATE(1608000000, 4, 10),
+ RV1126B_CPUCLK_RATE(1512000000, 4, 10),
+ RV1126B_CPUCLK_RATE(1416000000, 4, 10),
+ RV1126B_CPUCLK_RATE(1296000000, 3, 10),
+ RV1126B_CPUCLK_RATE(1200000000, 3, 10),
+ RV1126B_CPUCLK_RATE(1188000000, 3, 8),
+ RV1126B_CPUCLK_RATE(1104000000, 2, 8),
+ RV1126B_CPUCLK_RATE(1008000000, 2, 8),
+ RV1126B_CPUCLK_RATE(816000000, 2, 6),
+ RV1126B_CPUCLK_RATE(600000000, 2, 4),
+ RV1126B_CPUCLK_RATE(594000000, 2, 4),
+ RV1126B_CPUCLK_RATE(408000000, 1, 3),
+ RV1126B_CPUCLK_RATE(396000000, 1, 3),
+};
+
+PNAME(mux_pll_p) = { "xin24m" };
+PNAME(mux_gpll_cpll_p) = { "gpll", "cpll" };
+PNAME(mux_gpll_aupll_p) = { "gpll", "aupll" };
+PNAME(mux_gpll_aupll_cpll_p) = { "gpll", "aupll", "cpll" };
+PNAME(mux_gpll_cpll_24m_p) = { "gpll", "cpll", "xin24m" };
+PNAME(mux_cpll_24m_p) = { "cpll", "xin24m" };
+PNAME(mux_24m_gpll_aupll_cpll_p) = { "xin24m", "gpll", "aupll", "cpll" };
+PNAME(mux_24m_gpll_cpll_p) = { "xin24m", "gpll", "cpll" };
+PNAME(mux_24m_gpll_aupll_p) = { "xin24m", "gpll", "aupll" };
+PNAME(mux_sclk_uart_src_p) = { "xin24m", "clk_cm_frac0", "clk_cm_frac1",
+ "clk_cm_frac2", "clk_uart_frac0", "clk_uart_frac1" };
+PNAME(mclk_sai0_src_p) = { "xin24m", "clk_cm_frac0", "clk_cm_frac1",
+ "clk_cm_frac2", "clk_audio_frac0", "clk_audio_frac1",
+ "clk_audio_int0", "clk_audio_int1",
+ "mclk_sai0_from_io" };
+PNAME(mclk_sai1_src_p) = { "xin24m", "clk_cm_frac0", "clk_cm_frac1",
+ "clk_cm_frac2", "clk_audio_frac0", "clk_audio_frac1",
+ "clk_audio_int0", "clk_audio_int1",
+ "mclk_sai1_from_io" };
+PNAME(mclk_sai2_src_p) = { "xin24m", "clk_cm_frac0", "clk_cm_frac1",
+ "clk_cm_frac2", "clk_audio_frac0", "clk_audio_frac1",
+ "clk_audio_int0", "clk_audio_int1",
+ "mclk_sai2_from_io" };
+PNAME(mux_sai_src_p) = { "xin24m", "clk_cm_frac0", "clk_cm_frac1",
+ "clk_cm_frac2", "clk_audio_frac0", "clk_audio_frac1",
+ "clk_audio_int0", "clk_audio_int1", "mclk_sai0_from_io",
+ "mclk_sai1_from_io", "mclk_sai2_from_io"};
+PNAME(mux_100m_24m_p) = { "clk_cpll_div10", "xin24m" };
+PNAME(mux_200m_24m_p) = { "clk_gpll_div6", "xin24m" };
+PNAME(mux_500m_400m_200m_p) = { "clk_cpll_div2", "clk_gpll_div3", "clk_gpll_div6" };
+PNAME(mux_300m_200m_p) = { "clk_gpll_div4", "clk_gpll_div6" };
+PNAME(mux_500m_400m_300m_p) = { "clk_cpll_div2", "clk_gpll_div3", "clk_gpll_div4" };
+PNAME(mux_333m_200m_p) = { "clk_cpll_div3", "clk_gpll_div6" };
+PNAME(mux_600m_400m_200m_p) = { "clk_gpll_div2", "clk_gpll_div3", "clk_gpll_div6" };
+PNAME(mux_400m_300m_200m_p) = { "clk_gpll_div3", "clk_gpll_div4", "clk_gpll_div6" };
+PNAME(mux_200m_100m_p) = { "clk_gpll_div6", "clk_cpll_div10" };
+PNAME(mux_200m_100m_50m_24m_p) = { "clk_gpll_div6", "clk_cpll_div10", "clk_cpll_div20",
+ "xin24m" };
+PNAME(mux_600m_24m_p) = { "clk_gpll_div2", "xin24m" };
+PNAME(mux_armclk_p) = { "clk_core_pll", "clk_core_pvtpll" };
+PNAME(aclk_npu_root_p) = { "clk_npu_pll", "clk_npu_pvtpll" };
+PNAME(clk_saradc0_p) = { "clk_saradc0_src", "clk_saradc0_rcosc_io" };
+PNAME(clk_core_vepu_p) = { "clk_vepu_pll", "clk_vepu_pvtpll" };
+PNAME(clk_core_fec_p) = { "clk_core_fec_src", "clk_vcp_pvtpll" };
+PNAME(clk_core_aisp_p) = { "clk_aisp_pll", "clk_vcp_pvtpll" };
+PNAME(clk_core_isp_root_p) = { "clk_isp_pll", "clk_isp_pvtpll" };
+PNAME(clk_gmac_ptp_ref_p) = { "clk_gmac_ptp_ref_src", "clk_gmac_ptp_from_io" };
+PNAME(clk_saradc1_p) = { "clk_saradc1_src", "clk_saradc1_rcosc_io" };
+PNAME(clk_saradc2_p) = { "clk_saradc2_src", "clk_saradc2_rcosc_io" };
+PNAME(clk_rcosc_src_p) = { "xin24m", "clk_rcosc", "clk_rcosc_div2",
+ "clk_rcosc_div3", "clk_rcosc_div4" };
+PNAME(busclk_pmu_mux_p) = { "clk_cpll_div10", "clk_rcosc_src" };
+PNAME(clk_xin_rc_div_p) = { "xin24m", "clk_rcosc_src" };
+PNAME(clk_32k_p) = { "clk_xin_rc_div", "clk_32k_rtc", "clk_32k_io" };
+PNAME(mux_24m_32k_p) = { "xin24m", "clk_32k" };
+PNAME(mux_24m_rcosc_buspmu_p) = { "xin24m", "clk_rcosc_src", "busclk_pmu_src" };
+PNAME(mux_24m_rcosc_buspmu_32k_p) = { "xin24m", "clk_rcosc_src", "busclk_pmu_src",
+ "clk_32k" };
+PNAME(sclk_uart0_p) = { "sclk_uart0_src", "xin24m", "clk_rcosc_src" };
+PNAME(clk_osc_rcosc_ctrl_p) = { "clk_rcosc_src", "clk_testout_out" };
+PNAME(lrck_src_asrc_p) = { "mclk_asrc0", "mclk_asrc1", "mclk_asrc2", "mclk_asrc3",
+ "fs_inter_from_sai0", "fs_inter_from_sai1",
+ "fs_inter_from_sai2", "clkout_pdm"};
+PNAME(clk_ref_pipephy_p) = { "clk_ref_pipephy_cpll_src", "xin24m" };
+PNAME(clk_timer0_parents_p) = { "clk_timer_root", "mclk_sai0_from_io",
+ "sclk_sai0_from_io" };
+PNAME(clk_timer1_parents_p) = { "clk_timer_root", "mclk_sai1_from_io",
+ "sclk_sai1_from_io" };
+PNAME(clk_timer2_parents_p) = { "clk_timer_root", "mclk_sai2_from_io",
+ "sclk_sai2_from_io" };
+PNAME(clk_timer3_parents_p) = { "clk_timer_root", "mclk_asrc0", "mclk_asrc1" };
+PNAME(clk_timer4_parents_p) = { "clk_timer_root", "mclk_asrc2", "mclk_asrc3" };
+PNAME(clk_macphy_p) = { "xin24m", "clk_cpll_div20" };
+PNAME(clk_cpll_div10_p) = { "gpll", "clk_aisp_pll_src" };
+
+static struct rockchip_pll_clock rv1126b_pll_clks[] __initdata = {
+ [gpll] = PLL(pll_rk3328, PLL_GPLL, "gpll", mux_pll_p,
+ CLK_IS_CRITICAL, RV1126B_PLL_CON(8),
+ RV1126B_MODE_CON, 2, 10, 0, rv1126b_pll_rates),
+ [aupll] = PLL(pll_rk3328, PLL_AUPLL, "aupll", mux_pll_p,
+ CLK_IS_CRITICAL, RV1126B_PLL_CON(0),
+ RV1126B_MODE_CON, 0, 10, 0, rv1126b_pll_rates),
+ [cpll] = PLL(pll_rk3328, PLL_CPLL, "cpll", mux_pll_p,
+ CLK_IS_CRITICAL, RV1126B_PERIPLL_CON(0),
+ RV1126B_MODE_CON, 4, 10, 0, rv1126b_pll_rates),
+ [dpll] = PLL(pll_rk3328, 0, "dpll", mux_pll_p,
+ CLK_IS_CRITICAL, RV1126B_SUBDDRPLL_CON(0),
+ RV1126B_MODE_CON, 2, 10, 0, rv1126b_pll_rates),
+};
+
+#define MFLAGS CLK_MUX_HIWORD_MASK
+#define DFLAGS CLK_DIVIDER_HIWORD_MASK
+#define GFLAGS (CLK_GATE_HIWORD_MASK | CLK_GATE_SET_TO_DISABLE)
+
+static struct rockchip_clk_branch rv1126b_rcdiv_pmu_fracmux __initdata =
+ MUX(CLK_32K, "clk_32k", clk_32k_p, CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ RV1126B_PMUCLKSEL_CON(2), 1, 2, MFLAGS);
+
+static struct rockchip_clk_branch rv1126b_clk_branches[] __initdata = {
+
+ FACTOR(0, "clk_rcosc_div2", "clk_rcosc", 0, 1, 2),
+ FACTOR(0, "clk_rcosc_div3", "clk_rcosc", 0, 1, 3),
+ FACTOR(0, "clk_rcosc_div4", "clk_rcosc", 0, 1, 4),
+
+ /* Clock Definition */
+ COMPOSITE_NODIV(CLK_AISP_PLL_SRC, "clk_aisp_pll_src", mux_gpll_aupll_cpll_p, 0,
+ RV1126B_CLKSEL_CON(62), 4, 2, MFLAGS,
+ RV1126B_CLKGATE_CON(5), 4, GFLAGS),
+ DIV(CLK_AISP_PLL, "clk_aisp_pll", "clk_aisp_pll_src", 0,
+ RV1126B_CLKSEL_CON(62), 0, 3, DFLAGS),
+
+ COMPOSITE(CLK_CPLL_DIV10, "clk_cpll_div10", clk_cpll_div10_p, 0,
+ RV1126B_CLKSEL_CON(1), 15, 1, MFLAGS, 5, 5, DFLAGS,
+ RV1126B_CLKGATE_CON(0), 1, GFLAGS),
+ COMPOSITE_NOMUX(CLK_CPLL_DIV20, "clk_cpll_div20", "cpll", 0,
+ RV1126B_CLKSEL_CON(1), 0, 5, DFLAGS,
+ RV1126B_CLKGATE_CON(0), 0, GFLAGS),
+ COMPOSITE_NOMUX(CLK_CPLL_DIV8, "clk_cpll_div8", "cpll", 0,
+ RV1126B_CLKSEL_CON(1), 10, 5, DFLAGS,
+ RV1126B_CLKGATE_CON(0), 2, GFLAGS),
+ COMPOSITE_NOMUX(CLK_GPLL_DIV8, "clk_gpll_div8", "gpll", 0,
+ RV1126B_CLKSEL_CON(2), 0, 5, DFLAGS,
+ RV1126B_CLKGATE_CON(0), 3, GFLAGS),
+ COMPOSITE_NOMUX(CLK_GPLL_DIV6, "clk_gpll_div6", "gpll", 0,
+ RV1126B_CLKSEL_CON(2), 5, 5, DFLAGS,
+ RV1126B_CLKGATE_CON(0), 4, GFLAGS),
+ COMPOSITE_NOMUX(CLK_GPLL_DIV4, "clk_gpll_div4", "gpll", 0,
+ RV1126B_CLKSEL_CON(2), 10, 5, DFLAGS,
+ RV1126B_CLKGATE_CON(0), 5, GFLAGS),
+ COMPOSITE_NOMUX(CLK_CPLL_DIV3, "clk_cpll_div3", "cpll", 0,
+ RV1126B_CLKSEL_CON(3), 0, 5, DFLAGS,
+ RV1126B_CLKGATE_CON(0), 6, GFLAGS),
+ COMPOSITE_NOMUX(CLK_GPLL_DIV3, "clk_gpll_div3", "gpll", 0,
+ RV1126B_CLKSEL_CON(3), 5, 5, DFLAGS,
+ RV1126B_CLKGATE_CON(0), 7, GFLAGS),
+ COMPOSITE_NOMUX(CLK_CPLL_DIV2, "clk_cpll_div2", "cpll", 0,
+ RV1126B_CLKSEL_CON(3), 10, 5, DFLAGS,
+ RV1126B_CLKGATE_CON(0), 8, GFLAGS),
+ COMPOSITE_NOMUX(CLK_GPLL_DIV2, "clk_gpll_div2", "gpll", 0,
+ RV1126B_CLKSEL_CON(4), 0, 5, DFLAGS,
+ RV1126B_CLKGATE_CON(0), 9, GFLAGS),
+ MUX(CLK_CM_FRAC0_SRC, "clk_cm_frac0_src", mux_24m_gpll_aupll_cpll_p, 0,
+ RV1126B_CLKSEL_CON(10), 0, 2, MFLAGS),
+ COMPOSITE_FRAC(CLK_CM_FRAC0, "clk_cm_frac0", "clk_cm_frac0_src", 0,
+ RV1126B_CLKSEL_CON(25), 0,
+ RV1126B_CLKGATE_CON(1), 0, GFLAGS),
+ MUX(CLK_CM_FRAC1_SRC, "clk_cm_frac1_src", mux_24m_gpll_aupll_cpll_p, 0,
+ RV1126B_CLKSEL_CON(10), 2, 2, MFLAGS),
+ COMPOSITE_FRAC(CLK_CM_FRAC1, "clk_cm_frac1", "clk_cm_frac1_src", 0,
+ RV1126B_CLKSEL_CON(26), 0,
+ RV1126B_CLKGATE_CON(1), 1, GFLAGS),
+ MUX(CLK_CM_FRAC2_SRC, "clk_cm_frac2_src", mux_24m_gpll_aupll_cpll_p, 0,
+ RV1126B_CLKSEL_CON(10), 4, 2, MFLAGS),
+ COMPOSITE_FRAC(CLK_CM_FRAC2, "clk_cm_frac2", "clk_cm_frac2_src", 0,
+ RV1126B_CLKSEL_CON(27), 0,
+ RV1126B_CLKGATE_CON(1), 2, GFLAGS),
+ MUX(CLK_UART_FRAC0_SRC, "clk_uart_frac0_src", mux_24m_gpll_cpll_p, 0,
+ RV1126B_CLKSEL_CON(10), 6, 2, MFLAGS),
+ COMPOSITE_FRAC(CLK_UART_FRAC0, "clk_uart_frac0", "clk_uart_frac0_src", 0,
+ RV1126B_CLKSEL_CON(28), 0,
+ RV1126B_CLKGATE_CON(1), 3, GFLAGS),
+ MUX(CLK_UART_FRAC1_SRC, "clk_uart_frac1_src", mux_24m_gpll_cpll_p, 0,
+ RV1126B_CLKSEL_CON(10), 8, 2, MFLAGS),
+ COMPOSITE_FRAC(CLK_UART_FRAC1, "clk_uart_frac1", "clk_uart_frac1_src", 0,
+ RV1126B_CLKSEL_CON(29), 0,
+ RV1126B_CLKGATE_CON(1), 4, GFLAGS),
+ MUX(CLK_AUDIO_FRAC0_SRC, "clk_audio_frac0_src", mux_24m_gpll_aupll_p, 0,
+ RV1126B_CLKSEL_CON(10), 10, 2, MFLAGS),
+ COMPOSITE_FRAC(CLK_AUDIO_FRAC0, "clk_audio_frac0", "clk_audio_frac0_src", 0,
+ RV1126B_CLKSEL_CON(30), 0,
+ RV1126B_CLKGATE_CON(1), 5, GFLAGS),
+ MUX(CLK_AUDIO_FRAC1_SRC, "clk_audio_frac1_src", mux_24m_gpll_aupll_p, 0,
+ RV1126B_CLKSEL_CON(10), 12, 2, MFLAGS),
+ COMPOSITE_FRAC(CLK_AUDIO_FRAC1, "clk_audio_frac1", "clk_audio_frac1_src", 0,
+ RV1126B_CLKSEL_CON(31), 0,
+ RV1126B_CLKGATE_CON(1), 6, GFLAGS),
+ COMPOSITE(CLK_AUDIO_INT0, "clk_audio_int0", mux_24m_gpll_aupll_p, 0,
+ RV1126B_CLKSEL_CON(11), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ RV1126B_CLKGATE_CON(1), 7, GFLAGS),
+ COMPOSITE(CLK_AUDIO_INT1, "clk_audio_int1", mux_24m_gpll_aupll_p, 0,
+ RV1126B_CLKSEL_CON(11), 14, 2, MFLAGS, 8, 5, DFLAGS,
+ RV1126B_CLKGATE_CON(1), 8, GFLAGS),
+ COMPOSITE(SCLK_UART0_SRC, "sclk_uart0_src", mux_sclk_uart_src_p, 0,
+ RV1126B_CLKSEL_CON(12), 5, 3, MFLAGS, 0, 5, DFLAGS,
+ RV1126B_CLKGATE_CON(1), 9, GFLAGS),
+ COMPOSITE(SCLK_UART1, "sclk_uart1", mux_sclk_uart_src_p, 0,
+ RV1126B_CLKSEL_CON(12), 13, 3, MFLAGS, 8, 5, DFLAGS,
+ RV1126B_CLKGATE_CON(1), 10, GFLAGS),
+ COMPOSITE(SCLK_UART2, "sclk_uart2", mux_sclk_uart_src_p, 0,
+ RV1126B_CLKSEL_CON(13), 5, 3, MFLAGS, 0, 5, DFLAGS,
+ RV1126B_CLKGATE_CON(1), 11, GFLAGS),
+ COMPOSITE(SCLK_UART3, "sclk_uart3", mux_sclk_uart_src_p, 0,
+ RV1126B_CLKSEL_CON(13), 13, 3, MFLAGS, 8, 5, DFLAGS,
+ RV1126B_CLKGATE_CON(1), 12, GFLAGS),
+ COMPOSITE(SCLK_UART4, "sclk_uart4", mux_sclk_uart_src_p, 0,
+ RV1126B_CLKSEL_CON(14), 5, 3, MFLAGS, 0, 5, DFLAGS,
+ RV1126B_CLKGATE_CON(1), 13, GFLAGS),
+ COMPOSITE(SCLK_UART5, "sclk_uart5", mux_sclk_uart_src_p, 0,
+ RV1126B_CLKSEL_CON(14), 13, 3, MFLAGS, 8, 5, DFLAGS,
+ RV1126B_CLKGATE_CON(1), 14, GFLAGS),
+ COMPOSITE(SCLK_UART6, "sclk_uart6", mux_sclk_uart_src_p, 0,
+ RV1126B_CLKSEL_CON(15), 5, 3, MFLAGS, 0, 5, DFLAGS,
+ RV1126B_CLKGATE_CON(2), 0, GFLAGS),
+ COMPOSITE(SCLK_UART7, "sclk_uart7", mux_sclk_uart_src_p, 0,
+ RV1126B_CLKSEL_CON(15), 13, 3, MFLAGS, 8, 5, DFLAGS,
+ RV1126B_CLKGATE_CON(2), 1, GFLAGS),
+ COMPOSITE(MCLK_SAI0, "mclk_sai0", mclk_sai0_src_p, 0,
+ RV1126B_CLKSEL_CON(16), 8, 4, MFLAGS, 0, 8, DFLAGS,
+ RV1126B_CLKGATE_CON(2), 2, GFLAGS),
+ COMPOSITE(MCLK_SAI1, "mclk_sai1", mclk_sai1_src_p, 0,
+ RV1126B_CLKSEL_CON(17), 8, 4, MFLAGS, 0, 8, DFLAGS,
+ RV1126B_CLKGATE_CON(2), 3, GFLAGS),
+ COMPOSITE(MCLK_SAI2, "mclk_sai2", mclk_sai2_src_p, 0,
+ RV1126B_CLKSEL_CON(18), 8, 4, MFLAGS, 0, 8, DFLAGS,
+ RV1126B_CLKGATE_CON(2), 4, GFLAGS),
+ COMPOSITE(MCLK_PDM, "mclk_pdm", mux_sai_src_p, 0,
+ RV1126B_CLKSEL_CON(19), 6, 4, MFLAGS, 0, 5, DFLAGS,
+ RV1126B_CLKGATE_CON(2), 5, GFLAGS),
+ COMPOSITE_NOGATE(0, "clkout_pdm_src", mux_sai_src_p, 0,
+ RV1126B_CLKSEL_CON(20), 8, 4, MFLAGS, 0, 8, DFLAGS),
+ GATE(CLKOUT_PDM, "clkout_pdm", "clkout_pdm_src", 0,
+ RV1126B_CLKGATE_CON(2), 6, GFLAGS),
+ COMPOSITE_NODIV(MCLK_ASRC0, "mclk_asrc0", mux_sai_src_p, 0,
+ RV1126B_CLKSEL_CON(16), 12, 4, MFLAGS,
+ RV1126B_CLKGATE_CON(2), 7, GFLAGS),
+ COMPOSITE_NODIV(MCLK_ASRC1, "mclk_asrc1", mux_sai_src_p, 0,
+ RV1126B_CLKSEL_CON(17), 12, 4, MFLAGS,
+ RV1126B_CLKGATE_CON(2), 8, GFLAGS),
+ COMPOSITE_NODIV(MCLK_ASRC2, "mclk_asrc2", mux_sai_src_p, 0,
+ RV1126B_CLKSEL_CON(18), 12, 4, MFLAGS,
+ RV1126B_CLKGATE_CON(2), 9, GFLAGS),
+ COMPOSITE_NODIV(MCLK_ASRC3, "mclk_asrc3", mux_sai_src_p, 0,
+ RV1126B_CLKSEL_CON(19), 12, 4, MFLAGS,
+ RV1126B_CLKGATE_CON(2), 10, GFLAGS),
+ COMPOSITE(CLK_ASRC0, "clk_asrc0", mux_gpll_aupll_p, 0,
+ RV1126B_CLKSEL_CON(21), 6, 1, MFLAGS, 0, 5, DFLAGS,
+ RV1126B_CLKGATE_CON(2), 11, GFLAGS),
+ COMPOSITE(CLK_ASRC1, "clk_asrc1", mux_gpll_aupll_p, 0,
+ RV1126B_CLKSEL_CON(21), 14, 1, MFLAGS, 8, 5, DFLAGS,
+ RV1126B_CLKGATE_CON(2), 12, GFLAGS),
+ COMPOSITE_NOMUX(CLK_CORE_PLL, "clk_core_pll", "gpll", CLK_IS_CRITICAL,
+ RV1126B_CLKSEL_CON(60), 0, 3, DFLAGS,
+ RV1126B_CLKGATE_CON(5), 0, GFLAGS),
+ COMPOSITE_NOMUX(CLK_NPU_PLL, "clk_npu_pll", "gpll", CLK_IS_CRITICAL,
+ RV1126B_CLKSEL_CON(60), 6, 3, DFLAGS,
+ RV1126B_CLKGATE_CON(5), 1, GFLAGS),
+ COMPOSITE(CLK_VEPU_PLL, "clk_vepu_pll", mux_gpll_aupll_cpll_p, 0,
+ RV1126B_CLKSEL_CON(61), 4, 2, MFLAGS, 0, 3, DFLAGS,
+ RV1126B_CLKGATE_CON(5), 2, GFLAGS),
+ COMPOSITE(CLK_ISP_PLL, "clk_isp_pll", mux_gpll_aupll_cpll_p, 0,
+ RV1126B_CLKSEL_CON(61), 10, 2, MFLAGS, 6, 4, DFLAGS,
+ RV1126B_CLKGATE_CON(5), 3, GFLAGS),
+ COMPOSITE(CLK_SARADC0_SRC, "clk_saradc0_src", mux_200m_24m_p, 0,
+ RV1126B_CLKSEL_CON(63), 12, 1, MFLAGS, 0, 3, DFLAGS,
+ RV1126B_CLKGATE_CON(5), 6, GFLAGS),
+ COMPOSITE(CLK_SARADC1_SRC, "clk_saradc1_src", mux_200m_24m_p, 0,
+ RV1126B_CLKSEL_CON(63), 13, 1, MFLAGS, 4, 3, DFLAGS,
+ RV1126B_CLKGATE_CON(5), 7, GFLAGS),
+ COMPOSITE(CLK_SARADC2_SRC, "clk_saradc2_src", mux_200m_24m_p, 0,
+ RV1126B_CLKSEL_CON(63), 14, 1, MFLAGS, 8, 3, DFLAGS,
+ RV1126B_CLKGATE_CON(5), 8, GFLAGS),
+ GATE(HCLK_RKNN, "hclk_rknn", "clk_gpll_div8", CLK_IS_CRITICAL,
+ RV1126B_CLKGATE_CON(5), 10, GFLAGS),
+ GATE(PCLK_NPU_ROOT, "pclk_npu_root", "clk_cpll_div10", CLK_IS_CRITICAL,
+ RV1126B_CLKGATE_CON(5), 11, GFLAGS),
+ COMPOSITE_NODIV(ACLK_VEPU_ROOT, "aclk_vepu_root", mux_500m_400m_200m_p, CLK_IS_CRITICAL,
+ RV1126B_CLKSEL_CON(40), 0, 2, MFLAGS,
+ RV1126B_CLKGATE_CON(5), 12, GFLAGS),
+ GATE(HCLK_VEPU_ROOT, "hclk_vepu_root", "clk_gpll_div8", CLK_IS_CRITICAL,
+ RV1126B_CLKGATE_CON(5), 13, GFLAGS),
+ GATE(PCLK_VEPU_ROOT, "pclk_vepu_root", "clk_cpll_div10", 0,
+ RV1126B_CLKGATE_CON(5), 14, GFLAGS),
+ COMPOSITE(CLK_CORE_RGA_SRC, "clk_core_rga_src", mux_gpll_cpll_p, 0,
+ RV1126B_CLKSEL_CON(40), 5, 1, MFLAGS, 2, 3, DFLAGS,
+ RV1126B_CLKGATE_CON(6), 0, GFLAGS),
+ COMPOSITE_NODIV(ACLK_GMAC_ROOT, "aclk_gmac_root", mux_300m_200m_p, 0,
+ RV1126B_CLKSEL_CON(40), 6, 1, MFLAGS,
+ RV1126B_CLKGATE_CON(6), 1, GFLAGS),
+ COMPOSITE_NODIV(ACLK_VI_ROOT, "aclk_vi_root", mux_500m_400m_300m_p, CLK_IS_CRITICAL,
+ RV1126B_CLKSEL_CON(40), 7, 2, MFLAGS,
+ RV1126B_CLKGATE_CON(6), 2, GFLAGS),
+ GATE(HCLK_VI_ROOT, "hclk_vi_root", "clk_gpll_div8", CLK_IS_CRITICAL,
+ RV1126B_CLKGATE_CON(6), 3, GFLAGS),
+ GATE(PCLK_VI_ROOT, "pclk_vi_root", "clk_cpll_div10", CLK_IS_CRITICAL,
+ RV1126B_CLKGATE_CON(6), 4, GFLAGS),
+ COMPOSITE_NODIV(DCLK_VICAP_ROOT, "dclk_vicap_root", mux_333m_200m_p, 0,
+ RV1126B_CLKSEL_CON(42), 5, 1, MFLAGS,
+ RV1126B_CLKGATE_CON(6), 5, GFLAGS),
+ COMPOSITE(CLK_SYS_DSMC_ROOT, "clk_sys_dsmc_root", mux_24m_gpll_cpll_p, 0,
+ RV1126B_CLKSEL_CON(40), 14, 2, MFLAGS, 9, 5, DFLAGS,
+ RV1126B_CLKGATE_CON(6), 6, GFLAGS),
+ COMPOSITE(ACLK_VDO_ROOT, "aclk_vdo_root", mux_gpll_cpll_p, CLK_IS_CRITICAL,
+ RV1126B_CLKSEL_CON(42), 4, 1, MFLAGS, 0, 4, DFLAGS,
+ RV1126B_CLKGATE_CON(6), 7, GFLAGS),
+ COMPOSITE(ACLK_RKVDEC_ROOT, "aclk_rkvdec_root", mux_gpll_cpll_p, 0,
+ RV1126B_CLKSEL_CON(42), 10, 1, MFLAGS, 6, 4, DFLAGS,
+ RV1126B_CLKGATE_CON(6), 8, GFLAGS),
+ GATE(HCLK_VDO_ROOT, "hclk_vdo_root", "clk_gpll_div8", CLK_IS_CRITICAL,
+ RV1126B_CLKGATE_CON(6), 9, GFLAGS),
+ GATE(PCLK_VDO_ROOT, "pclk_vdo_root", "clk_cpll_div10", CLK_IS_CRITICAL,
+ RV1126B_CLKGATE_CON(6), 10, GFLAGS),
+ COMPOSITE(DCLK_VOP, "dclk_vop", mux_gpll_cpll_p, 0,
+ RV1126B_CLKSEL_CON(43), 8, 1, MFLAGS, 0, 8, DFLAGS,
+ RV1126B_CLKGATE_CON(6), 12, GFLAGS),
+ COMPOSITE(DCLK_OOC_SRC, "dclk_ooc_src", mux_gpll_cpll_p, 0,
+ RV1126B_CLKSEL_CON(62), 7, 1, MFLAGS, 8, 8, DFLAGS,
+ RV1126B_CLKGATE_CON(6), 13, GFLAGS),
+ GATE(DCLK_DECOM_SRC, "dclk_decom_src", "clk_gpll_div3", 0,
+ RV1126B_CLKGATE_CON(6), 14, GFLAGS),
+ GATE(PCLK_DDR_ROOT, "pclk_ddr_root", "clk_cpll_div10", 0,
+ RV1126B_CLKGATE_CON(7), 0, GFLAGS),
+ COMPOSITE(ACLK_SYSMEM, "aclk_sysmem", mux_gpll_cpll_p, CLK_IS_CRITICAL,
+ RV1126B_CLKSEL_CON(44), 3, 1, MFLAGS, 0, 3, DFLAGS,
+ RV1126B_CLKGATE_CON(7), 1, GFLAGS),
+ COMPOSITE_NODIV(ACLK_TOP_ROOT, "aclk_top_root", mux_600m_400m_200m_p, CLK_IS_CRITICAL,
+ RV1126B_CLKSEL_CON(44), 6, 2, MFLAGS,
+ RV1126B_CLKGATE_CON(7), 3, GFLAGS),
+ COMPOSITE_NODIV(ACLK_BUS_ROOT, "aclk_bus_root", mux_400m_300m_200m_p, CLK_IS_CRITICAL,
+ RV1126B_CLKSEL_CON(44), 8, 2, MFLAGS,
+ RV1126B_CLKGATE_CON(7), 4, GFLAGS),
+ COMPOSITE_NODIV(HCLK_BUS_ROOT, "hclk_bus_root", mux_200m_100m_p, CLK_IS_CRITICAL,
+ RV1126B_CLKSEL_CON(44), 10, 1, MFLAGS,
+ RV1126B_CLKGATE_CON(7), 5, GFLAGS),
+ GATE(PCLK_BUS_ROOT, "pclk_bus_root", "clk_cpll_div10", CLK_IS_CRITICAL,
+ RV1126B_CLKGATE_CON(7), 6, GFLAGS),
+ COMPOSITE(CCLK_SDMMC0, "cclk_sdmmc0", mux_gpll_cpll_24m_p, 0,
+ RV1126B_CLKSEL_CON(45), 8, 2, MFLAGS, 0, 8, DFLAGS,
+ RV1126B_CLKGATE_CON(7), 7, GFLAGS),
+ COMPOSITE(CCLK_SDMMC1, "cclk_sdmmc1", mux_gpll_cpll_24m_p, 0,
+ RV1126B_CLKSEL_CON(46), 8, 2, MFLAGS, 0, 8, DFLAGS,
+ RV1126B_CLKGATE_CON(7), 8, GFLAGS),
+ COMPOSITE(CCLK_EMMC, "cclk_emmc", mux_gpll_cpll_24m_p, 0,
+ RV1126B_CLKSEL_CON(47), 8, 2, MFLAGS, 0, 8, DFLAGS,
+ RV1126B_CLKGATE_CON(7), 9, GFLAGS),
+ COMPOSITE(SCLK_2X_FSPI0, "sclk_2x_fspi0", mux_gpll_cpll_24m_p, 0,
+ RV1126B_CLKSEL_CON(48), 8, 2, MFLAGS, 0, 8, DFLAGS,
+ RV1126B_CLKGATE_CON(7), 10, GFLAGS),
+ COMPOSITE(CLK_GMAC_PTP_REF_SRC, "clk_gmac_ptp_ref_src", mux_cpll_24m_p, 0,
+ RV1126B_CLKSEL_CON(45), 10, 1, MFLAGS, 11, 5, DFLAGS,
+ RV1126B_CLKGATE_CON(7), 11, GFLAGS),
+ GATE(CLK_GMAC_125M, "clk_gmac_125m", "clk_cpll_div8", 0,
+ RV1126B_CLKGATE_CON(7), 12, GFLAGS),
+ COMPOSITE_NODIV(CLK_TIMER_ROOT, "clk_timer_root", mux_100m_24m_p, 0,
+ RV1126B_CLKSEL_CON(46), 11, 1, MFLAGS,
+ RV1126B_CLKGATE_CON(7), 13, GFLAGS),
+ COMPOSITE_NODIV(TCLK_WDT_NS_SRC, "tclk_wdt_ns_src", mux_100m_24m_p, 0,
+ RV1126B_CLKSEL_CON(46), 12, 1, MFLAGS,
+ RV1126B_CLKGATE_CON(8), 0, GFLAGS),
+ COMPOSITE_NODIV(TCLK_WDT_S_SRC, "tclk_wdt_s_src", mux_100m_24m_p, 0,
+ RV1126B_CLKSEL_CON(46), 13, 1, MFLAGS,
+ RV1126B_CLKGATE_CON(8), 1, GFLAGS),
+ COMPOSITE_NODIV(TCLK_WDT_HPMCU, "tclk_wdt_hpmcu", mux_100m_24m_p, 0,
+ RV1126B_CLKSEL_CON(46), 14, 1, MFLAGS,
+ RV1126B_CLKGATE_CON(8), 2, GFLAGS),
+ COMPOSITE(CLK_CAN0, "clk_can0", mux_gpll_cpll_24m_p, 0,
+ RV1126B_CLKSEL_CON(49), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ RV1126B_CLKGATE_CON(8), 4, GFLAGS),
+ COMPOSITE(CLK_CAN1, "clk_can1", mux_gpll_cpll_24m_p, 0,
+ RV1126B_CLKSEL_CON(49), 14, 2, MFLAGS, 8, 5, DFLAGS,
+ RV1126B_CLKGATE_CON(8), 5, GFLAGS),
+ COMPOSITE_NODIV(PCLK_PERI_ROOT, "pclk_peri_root", mux_100m_24m_p, CLK_IS_CRITICAL,
+ RV1126B_CLKSEL_CON(47), 12, 1, MFLAGS,
+ RV1126B_CLKGATE_CON(8), 6, GFLAGS),
+ COMPOSITE_NODIV(ACLK_PERI_ROOT, "aclk_peri_root", mux_200m_24m_p, CLK_IS_CRITICAL,
+ RV1126B_CLKSEL_CON(47), 13, 1, MFLAGS,
+ RV1126B_CLKGATE_CON(8), 7, GFLAGS),
+ COMPOSITE_NODIV(CLK_I2C_BUS_SRC, "clk_i2c_bus_src", mux_200m_24m_p, 0,
+ RV1126B_CLKSEL_CON(50), 1, 1, MFLAGS,
+ RV1126B_CLKGATE_CON(8), 9, GFLAGS),
+ COMPOSITE_NODIV(CLK_SPI0, "clk_spi0", mux_200m_100m_50m_24m_p, 0,
+ RV1126B_CLKSEL_CON(50), 2, 2, MFLAGS,
+ RV1126B_CLKGATE_CON(8), 10, GFLAGS),
+ COMPOSITE_NODIV(CLK_SPI1, "clk_spi1", mux_200m_100m_50m_24m_p, 0,
+ RV1126B_CLKSEL_CON(50), 4, 2, MFLAGS,
+ RV1126B_CLKGATE_CON(8), 11, GFLAGS),
+ GATE(BUSCLK_PMU_SRC, "busclk_pmu_src", "clk_cpll_div10", CLK_IS_CRITICAL,
+ RV1126B_CLKGATE_CON(8), 12, GFLAGS),
+ COMPOSITE_NODIV(CLK_PWM0, "clk_pwm0", mux_100m_24m_p, 0,
+ RV1126B_CLKSEL_CON(50), 8, 1, MFLAGS,
+ RV1126B_CLKGATE_CON(9), 0, GFLAGS),
+ COMPOSITE_NODIV(CLK_PWM2, "clk_pwm2", mux_100m_24m_p, 0,
+ RV1126B_CLKSEL_CON(50), 10, 1, MFLAGS,
+ RV1126B_CLKGATE_CON(9), 2, GFLAGS),
+ COMPOSITE_NODIV(CLK_PWM3, "clk_pwm3", mux_100m_24m_p, 0,
+ RV1126B_CLKSEL_CON(50), 11, 1, MFLAGS,
+ RV1126B_CLKGATE_CON(9), 3, GFLAGS),
+ COMPOSITE_NODIV(CLK_PKA_RKCE_SRC, "clk_pka_rkce_src", mux_300m_200m_p, CLK_IS_CRITICAL,
+ RV1126B_CLKSEL_CON(50), 12, 1, MFLAGS,
+ RV1126B_CLKGATE_CON(9), 4, GFLAGS),
+ COMPOSITE_NODIV(ACLK_RKCE_SRC, "aclk_rkce_src", mux_200m_24m_p, CLK_IS_CRITICAL,
+ RV1126B_CLKSEL_CON(50), 13, 1, MFLAGS,
+ RV1126B_CLKGATE_CON(9), 5, GFLAGS),
+ COMPOSITE_NODIV(ACLK_VCP_ROOT, "aclk_vcp_root", mux_500m_400m_200m_p, CLK_IS_CRITICAL,
+ RV1126B_CLKSEL_CON(48), 12, 2, MFLAGS,
+ RV1126B_CLKGATE_CON(9), 6, GFLAGS),
+ GATE(HCLK_VCP_ROOT, "hclk_vcp_root", "clk_gpll_div8", CLK_IS_CRITICAL,
+ RV1126B_CLKGATE_CON(9), 7, GFLAGS),
+ GATE(PCLK_VCP_ROOT, "pclk_vcp_root", "clk_cpll_div10", CLK_IS_CRITICAL,
+ RV1126B_CLKGATE_CON(9), 8, GFLAGS),
+ COMPOSITE(CLK_CORE_FEC_SRC, "clk_core_fec_src", mux_gpll_cpll_p, 0,
+ RV1126B_CLKSEL_CON(51), 3, 1, MFLAGS, 0, 3, DFLAGS,
+ RV1126B_CLKGATE_CON(9), 9, GFLAGS),
+ GATE(CLK_50M_GMAC_IOBUF_VI, "clk_50m_gmac_iobuf_vi", "clk_cpll_div20", 0,
+ RV1126B_CLKGATE_CON(9), 11, GFLAGS),
+ GATE(PCLK_TOP_ROOT, "pclk_top_root", "clk_cpll_div10", CLK_IS_CRITICAL,
+ RV1126B_CLKGATE_CON(15), 0, GFLAGS),
+ COMPOSITE(CLK_MIPI0_OUT2IO, "clk_mipi0_out2io", mux_600m_24m_p, 0,
+ RV1126B_CLKSEL_CON(67), 11, 1, MFLAGS, 0, 5, DFLAGS,
+ RV1126B_CLKGATE_CON(15), 3, GFLAGS),
+ COMPOSITE(CLK_MIPI1_OUT2IO, "clk_mipi1_out2io", mux_600m_24m_p, 0,
+ RV1126B_CLKSEL_CON(67), 12, 1, MFLAGS, 6, 5, DFLAGS,
+ RV1126B_CLKGATE_CON(15), 4, GFLAGS),
+ COMPOSITE(CLK_MIPI2_OUT2IO, "clk_mipi2_out2io", mux_600m_24m_p, 0,
+ RV1126B_CLKSEL_CON(68), 11, 1, MFLAGS, 0, 5, DFLAGS,
+ RV1126B_CLKGATE_CON(15), 5, GFLAGS),
+ COMPOSITE(CLK_MIPI3_OUT2IO, "clk_mipi3_out2io", mux_600m_24m_p, 0,
+ RV1126B_CLKSEL_CON(68), 12, 1, MFLAGS, 6, 5, DFLAGS,
+ RV1126B_CLKGATE_CON(15), 6, GFLAGS),
+ COMPOSITE(CLK_CIF_OUT2IO, "clk_cif_out2io", mux_600m_24m_p, 0,
+ RV1126B_CLKSEL_CON(69), 5, 1, MFLAGS, 0, 5, DFLAGS,
+ RV1126B_CLKGATE_CON(15), 7, GFLAGS),
+ COMPOSITE(CLK_MAC_OUT2IO, "clk_mac_out2io", mux_gpll_cpll_24m_p, 0,
+ RV1126B_CLKSEL_CON(69), 6, 2, MFLAGS, 8, 7, DFLAGS,
+ RV1126B_CLKGATE_CON(15), 8, GFLAGS),
+ COMPOSITE_NOMUX(MCLK_SAI0_OUT2IO, "mclk_sai0_out2io", "mclk_sai0", CLK_SET_RATE_PARENT,
+ RV1126B_CLKSEL_CON(70), 0, 4, DFLAGS,
+ RV1126B_CLKGATE_CON(15), 9, GFLAGS),
+ COMPOSITE_NOMUX(MCLK_SAI1_OUT2IO, "mclk_sai1_out2io", "mclk_sai1", CLK_SET_RATE_PARENT,
+ RV1126B_CLKSEL_CON(70), 5, 4, DFLAGS,
+ RV1126B_CLKGATE_CON(15), 10, GFLAGS),
+ COMPOSITE_NOMUX(MCLK_SAI2_OUT2IO, "mclk_sai2_out2io", "mclk_sai2", CLK_SET_RATE_PARENT,
+ RV1126B_CLKSEL_CON(70), 10, 4, DFLAGS,
+ RV1126B_CLKGATE_CON(15), 11, GFLAGS),
+
+ /* pd _npu */
+ MUX(ACLK_RKNN, "aclk_rknn", aclk_npu_root_p, CLK_SET_RATE_PARENT,
+ RV1126B_NPUCLKSEL_CON(0), 1, 1, MFLAGS),
+
+ /* pd_vepu */
+ GATE(PCLK_GPIO3, "pclk_gpio3", "pclk_vepu_root", 0,
+ RV1126B_VEPUCLKGATE_CON(0), 7, GFLAGS),
+ GATE(DBCLK_GPIO3, "dbclk_gpio3", "xin24m", 0,
+ RV1126B_VEPUCLKGATE_CON(0), 8, GFLAGS),
+ GATE(PCLK_IOC_VCCIO3, "pclk_ioc_vccio3", "pclk_vepu_root", CLK_IS_CRITICAL,
+ RV1126B_VEPUCLKGATE_CON(0), 9, GFLAGS),
+ GATE(PCLK_SARADC0, "pclk_saradc0", "pclk_vepu_root", 0,
+ RV1126B_VEPUCLKGATE_CON(0), 10, GFLAGS),
+ MUX(CLK_SARADC0, "clk_saradc0", clk_saradc0_p, CLK_SET_RATE_PARENT,
+ RV1126B_VEPUCLKSEL_CON(0), 2, 1, MFLAGS),
+ GATE(HCLK_SDMMC1, "hclk_sdmmc1", "hclk_vepu_root", 0,
+ RV1126B_VEPUCLKGATE_CON(0), 12, GFLAGS),
+ GATE(HCLK_VEPU, "hclk_vepu", "hclk_vepu_root", 0,
+ RV1126B_VEPUCLKGATE_CON(1), 1, GFLAGS),
+ GATE(ACLK_VEPU, "aclk_vepu", "aclk_vepu_root", 0,
+ RV1126B_VEPUCLKGATE_CON(1), 2, GFLAGS),
+ COMPOSITE_NODIV(CLK_CORE_VEPU, "clk_core_vepu", clk_core_vepu_p, CLK_SET_RATE_PARENT,
+ RV1126B_VEPUCLKSEL_CON(0), 1, 1, MFLAGS,
+ RV1126B_VEPUCLKGATE_CON(1), 3, GFLAGS),
+
+ /* pd_vcp */
+ GATE(HCLK_FEC, "hclk_fec", "hclk_vcp_root", 0,
+ RV1126B_VCPCLKGATE_CON(1), 0, GFLAGS),
+ GATE(ACLK_FEC, "aclk_fec", "aclk_vcp_root", 0,
+ RV1126B_VCPCLKGATE_CON(1), 1, GFLAGS),
+ COMPOSITE_NODIV(CLK_CORE_FEC, "clk_core_fec", clk_core_fec_p, CLK_SET_RATE_PARENT,
+ RV1126B_VCPCLKSEL_CON(0), 13, 1, MFLAGS,
+ RV1126B_VCPCLKGATE_CON(1), 2, GFLAGS),
+ GATE(HCLK_AVSP, "hclk_avsp", "hclk_vcp_root", 0,
+ RV1126B_VCPCLKGATE_CON(1), 3, GFLAGS),
+ GATE(ACLK_AVSP, "aclk_avsp", "aclk_vcp_root", 0,
+ RV1126B_VCPCLKGATE_CON(1), 4, GFLAGS),
+ GATE(HCLK_AISP, "hclk_aisp", "hclk_vcp_root", 0,
+ RV1126B_VCPCLKGATE_CON(0), 11, GFLAGS),
+ GATE(ACLK_AISP, "aclk_aisp", "aclk_vcp_root", 0,
+ RV1126B_VCPCLKGATE_CON(0), 12, GFLAGS),
+ COMPOSITE_NODIV(CLK_CORE_AISP, "clk_core_aisp", clk_core_aisp_p, CLK_SET_RATE_PARENT,
+ RV1126B_VCPCLKSEL_CON(0), 15, 1, MFLAGS,
+ RV1126B_VCPCLKGATE_CON(0), 13, GFLAGS),
+
+ /* pd_vi */
+ MUX(CLK_CORE_ISP_ROOT, "clk_core_isp_root", clk_core_isp_root_p, CLK_SET_RATE_PARENT,
+ RV1126B_VICLKSEL_CON(0), 1, 1, MFLAGS),
+ GATE(PCLK_DSMC, "pclk_dsmc", "pclk_vi_root", 0,
+ RV1126B_VICLKGATE_CON(0), 8, GFLAGS),
+ GATE(ACLK_DSMC, "aclk_dsmc", "hclk_vi_root", 0,
+ RV1126B_VICLKGATE_CON(0), 9, GFLAGS),
+ GATE(HCLK_CAN0, "hclk_can0", "hclk_vi_root", 0,
+ RV1126B_VICLKGATE_CON(0), 10, GFLAGS),
+ GATE(HCLK_CAN1, "hclk_can1", "hclk_vi_root", 0,
+ RV1126B_VICLKGATE_CON(0), 11, GFLAGS),
+ GATE(PCLK_GPIO2, "pclk_gpio2", "pclk_vi_root", 0,
+ RV1126B_VICLKGATE_CON(1), 0, GFLAGS),
+ GATE(DBCLK_GPIO2, "dbclk_gpio2", "xin24m", 0,
+ RV1126B_VICLKGATE_CON(1), 1, GFLAGS),
+ GATE(PCLK_GPIO4, "pclk_gpio4", "pclk_vi_root", 0,
+ RV1126B_VICLKGATE_CON(1), 2, GFLAGS),
+ GATE(DBCLK_GPIO4, "dbclk_gpio4", "xin24m", 0,
+ RV1126B_VICLKGATE_CON(1), 3, GFLAGS),
+ GATE(PCLK_GPIO5, "pclk_gpio5", "pclk_vi_root", 0,
+ RV1126B_VICLKGATE_CON(1), 4, GFLAGS),
+ GATE(DBCLK_GPIO5, "dbclk_gpio5", "xin24m", 0,
+ RV1126B_VICLKGATE_CON(1), 5, GFLAGS),
+ GATE(PCLK_GPIO6, "pclk_gpio6", "pclk_vi_root", 0,
+ RV1126B_VICLKGATE_CON(1), 6, GFLAGS),
+ GATE(DBCLK_GPIO6, "dbclk_gpio6", "xin24m", 0,
+ RV1126B_VICLKGATE_CON(1), 7, GFLAGS),
+ GATE(PCLK_GPIO7, "pclk_gpio7", "pclk_vi_root", 0,
+ RV1126B_VICLKGATE_CON(1), 8, GFLAGS),
+ GATE(DBCLK_GPIO7, "dbclk_gpio7", "xin24m", 0,
+ RV1126B_VICLKGATE_CON(1), 9, GFLAGS),
+ GATE(PCLK_IOC_VCCIO2, "pclk_ioc_vccio2", "pclk_vi_root", CLK_IS_CRITICAL,
+ RV1126B_VICLKGATE_CON(1), 10, GFLAGS),
+ GATE(PCLK_IOC_VCCIO4, "pclk_ioc_vccio4", "pclk_vi_root", CLK_IS_CRITICAL,
+ RV1126B_VICLKGATE_CON(1), 11, GFLAGS),
+ GATE(PCLK_IOC_VCCIO5, "pclk_ioc_vccio5", "pclk_vi_root", CLK_IS_CRITICAL,
+ RV1126B_VICLKGATE_CON(1), 12, GFLAGS),
+ GATE(PCLK_IOC_VCCIO6, "pclk_ioc_vccio6", "pclk_vi_root", CLK_IS_CRITICAL,
+ RV1126B_VICLKGATE_CON(1), 13, GFLAGS),
+ GATE(PCLK_IOC_VCCIO7, "pclk_ioc_vccio7", "pclk_vi_root", CLK_IS_CRITICAL,
+ RV1126B_VICLKGATE_CON(1), 14, GFLAGS),
+ GATE(HCLK_ISP, "hclk_isp", "hclk_vi_root", 0,
+ RV1126B_VICLKGATE_CON(2), 0, GFLAGS),
+ GATE(ACLK_ISP, "aclk_isp", "aclk_vi_root", 0,
+ RV1126B_VICLKGATE_CON(2), 1, GFLAGS),
+ GATE(CLK_CORE_ISP, "clk_core_isp", "clk_core_isp_root", 0,
+ RV1126B_VICLKGATE_CON(2), 2, GFLAGS),
+ GATE(HCLK_VICAP, "hclk_vicap", "hclk_vi_root", 0,
+ RV1126B_VICLKGATE_CON(2), 3, GFLAGS),
+ GATE(ACLK_VICAP, "aclk_vicap", "aclk_vi_root", 0,
+ RV1126B_VICLKGATE_CON(2), 4, GFLAGS),
+ GATE(DCLK_VICAP, "dclk_vicap", "dclk_vicap_root", 0,
+ RV1126B_VICLKGATE_CON(2), 5, GFLAGS),
+ GATE(ISP0CLK_VICAP, "isp0clk_vicap", "clk_core_isp_root", 0,
+ RV1126B_VICLKGATE_CON(2), 6, GFLAGS),
+ GATE(HCLK_VPSS, "hclk_vpss", "hclk_vi_root", 0,
+ RV1126B_VICLKGATE_CON(2), 7, GFLAGS),
+ GATE(ACLK_VPSS, "aclk_vpss", "aclk_vi_root", 0,
+ RV1126B_VICLKGATE_CON(2), 8, GFLAGS),
+ GATE(CLK_CORE_VPSS, "clk_core_vpss", "clk_core_isp_root", 0,
+ RV1126B_VICLKGATE_CON(2), 9, GFLAGS),
+ GATE(HCLK_VPSL, "hclk_vpsl", "hclk_vi_root", 0,
+ RV1126B_VICLKGATE_CON(2), 10, GFLAGS),
+ GATE(ACLK_VPSL, "aclk_vpsl", "aclk_vi_root", 0,
+ RV1126B_VICLKGATE_CON(2), 11, GFLAGS),
+ GATE(CLK_CORE_VPSL, "clk_core_vpsl", "clk_core_isp_root", 0,
+ RV1126B_VICLKGATE_CON(2), 12, GFLAGS),
+ GATE(PCLK_CSI2HOST0, "pclk_csi2host0", "pclk_vi_root", 0,
+ RV1126B_VICLKGATE_CON(3), 0, GFLAGS),
+ GATE(DCLK_CSI2HOST0, "dclk_csi2host0", "dclk_vicap_root", 0,
+ RV1126B_VICLKGATE_CON(3), 1, GFLAGS),
+ GATE(PCLK_CSI2HOST1, "pclk_csi2host1", "pclk_vi_root", 0,
+ RV1126B_VICLKGATE_CON(3), 2, GFLAGS),
+ GATE(DCLK_CSI2HOST1, "dclk_csi2host1", "dclk_vicap_root", 0,
+ RV1126B_VICLKGATE_CON(3), 3, GFLAGS),
+ GATE(PCLK_CSI2HOST2, "pclk_csi2host2", "pclk_vi_root", 0,
+ RV1126B_VICLKGATE_CON(3), 4, GFLAGS),
+ GATE(DCLK_CSI2HOST2, "dclk_csi2host2", "dclk_vicap_root", 0,
+ RV1126B_VICLKGATE_CON(3), 5, GFLAGS),
+ GATE(PCLK_CSI2HOST3, "pclk_csi2host3", "pclk_vi_root", 0,
+ RV1126B_VICLKGATE_CON(3), 6, GFLAGS),
+ GATE(DCLK_CSI2HOST3, "dclk_csi2host3", "dclk_vicap_root", 0,
+ RV1126B_VICLKGATE_CON(3), 7, GFLAGS),
+ GATE(HCLK_SDMMC0, "hclk_sdmmc0", "hclk_vi_root", 0,
+ RV1126B_VICLKGATE_CON(3), 8, GFLAGS),
+ GATE(ACLK_GMAC, "aclk_gmac", "aclk_gmac_root", 0,
+ RV1126B_VICLKGATE_CON(3), 9, GFLAGS),
+ GATE(PCLK_GMAC, "pclk_gmac", "pclk_vi_root", 0,
+ RV1126B_VICLKGATE_CON(3), 10, GFLAGS),
+ MUX(CLK_GMAC_PTP_REF, "clk_gmac_ptp_ref", clk_gmac_ptp_ref_p, CLK_SET_RATE_PARENT,
+ RV1126B_VICLKSEL_CON(0), 14, 1, MFLAGS),
+ GATE(PCLK_CSIPHY0, "pclk_csiphy0", "pclk_vi_root", 0,
+ RV1126B_VICLKGATE_CON(3), 11, GFLAGS),
+ GATE(PCLK_CSIPHY1, "pclk_csiphy1", "pclk_vi_root", 0,
+ RV1126B_VICLKGATE_CON(3), 12, GFLAGS),
+ GATE(PCLK_MACPHY, "pclk_macphy", "pclk_vi_root", 0,
+ RV1126B_VICLKGATE_CON(3), 13, GFLAGS),
+ GATE(PCLK_SARADC1, "pclk_saradc1", "pclk_vi_root", 0,
+ RV1126B_VICLKGATE_CON(4), 0, GFLAGS),
+ MUX(CLK_SARADC1, "clk_saradc1", clk_saradc1_p, CLK_SET_RATE_PARENT,
+ RV1126B_VICLKSEL_CON(0), 2, 1, MFLAGS),
+ GATE(PCLK_SARADC2, "pclk_saradc2", "pclk_vi_root", 0,
+ RV1126B_VICLKGATE_CON(4), 2, GFLAGS),
+ MUX(CLK_SARADC2, "clk_saradc2", clk_saradc2_p, CLK_SET_RATE_PARENT,
+ RV1126B_VICLKSEL_CON(0), 3, 1, MFLAGS),
+ COMPOSITE_NODIV(CLK_MACPHY, "clk_macphy", clk_macphy_p, 0,
+ RV1126B_VICLKSEL_CON(1), 1, 1, MFLAGS,
+ RV1126B_VICLKGATE_CON(0), 12, GFLAGS),
+
+ /* pd_vdo */
+ GATE(ACLK_RKVDEC, "aclk_rkvdec", "aclk_rkvdec_root", 0,
+ RV1126B_VDOCLKGATE_CON(0), 7, GFLAGS),
+ GATE(HCLK_RKVDEC, "hclk_rkvdec", "hclk_vdo_root", 0,
+ RV1126B_VDOCLKGATE_CON(0), 8, GFLAGS),
+ GATE(CLK_HEVC_CA_RKVDEC, "clk_hevc_ca_rkvdec", "aclk_rkvdec_root", 0,
+ RV1126B_VDOCLKGATE_CON(0), 9, GFLAGS),
+ GATE(ACLK_VOP, "aclk_vop", "aclk_vdo_root", 0,
+ RV1126B_VDOCLKGATE_CON(0), 10, GFLAGS),
+ GATE(HCLK_VOP, "hclk_vop", "hclk_vdo_root", 0,
+ RV1126B_VDOCLKGATE_CON(0), 11, GFLAGS),
+ GATE(ACLK_OOC, "aclk_ooc", "aclk_vdo_root", 0,
+ RV1126B_VDOCLKGATE_CON(0), 13, GFLAGS),
+ GATE(HCLK_OOC, "hclk_ooc", "hclk_vdo_root", 0,
+ RV1126B_VDOCLKGATE_CON(0), 14, GFLAGS),
+ GATE(HCLK_RKJPEG, "hclk_rkjpeg", "hclk_vdo_root", 0,
+ RV1126B_VDOCLKGATE_CON(1), 3, GFLAGS),
+ GATE(ACLK_RKJPEG, "aclk_rkjpeg", "aclk_vdo_root", 0,
+ RV1126B_VDOCLKGATE_CON(1), 4, GFLAGS),
+ GATE(ACLK_RKMMU_DECOM, "aclk_rkmmu_decom", "aclk_vdo_root", 0,
+ RV1126B_VDOCLKGATE_CON(1), 5, GFLAGS),
+ GATE(HCLK_RKMMU_DECOM, "hclk_rkmmu_decom", "hclk_vdo_root", 0,
+ RV1126B_VDOCLKGATE_CON(1), 6, GFLAGS),
+ GATE(DCLK_DECOM, "dclk_decom", "dclk_decom_src", 0,
+ RV1126B_VDOCLKGATE_CON(1), 8, GFLAGS),
+ GATE(ACLK_DECOM, "aclk_decom", "aclk_vdo_root", 0,
+ RV1126B_VDOCLKGATE_CON(1), 9, GFLAGS),
+ GATE(PCLK_DECOM, "pclk_decom", "pclk_vdo_root", 0,
+ RV1126B_VDOCLKGATE_CON(1), 10, GFLAGS),
+ GATE(PCLK_MIPI_DSI, "pclk_mipi_dsi", "pclk_vdo_root", 0,
+ RV1126B_VDOCLKGATE_CON(1), 12, GFLAGS),
+ GATE(PCLK_DSIPHY, "pclk_dsiphy", "pclk_vdo_root", 0,
+ RV1126B_VDOCLKGATE_CON(1), 13, GFLAGS),
+
+ /* pd_ddr */
+ GATE(PCLK_DDRC, "pclk_ddrc", "pclk_ddr_root", CLK_IS_CRITICAL,
+ RV1126B_DDRCLKGATE_CON(0), 2, GFLAGS),
+ GATE(PCLK_DDRMON, "pclk_ddrmon", "pclk_ddr_root", CLK_IS_CRITICAL,
+ RV1126B_DDRCLKGATE_CON(0), 3, GFLAGS),
+ GATE(CLK_TIMER_DDRMON, "clk_timer_ddrmon", "xin24m", 0,
+ RV1126B_DDRCLKGATE_CON(0), 4, GFLAGS),
+ GATE(PCLK_DFICTRL, "pclk_dfictrl", "pclk_ddr_root", CLK_IS_CRITICAL,
+ RV1126B_DDRCLKGATE_CON(0), 5, GFLAGS),
+ GATE(PCLK_DDRPHY, "pclk_ddrphy", "pclk_ddr_root", CLK_IS_CRITICAL,
+ RV1126B_DDRCLKGATE_CON(0), 8, GFLAGS),
+ GATE(PCLK_DMA2DDR, "pclk_dma2ddr", "pclk_ddr_root", CLK_IS_CRITICAL,
+ RV1126B_DDRCLKGATE_CON(0), 9, GFLAGS),
+
+ /* pd_pmu*/
+ COMPOSITE_NODIV(CLK_RCOSC_SRC, "clk_rcosc_src", clk_rcosc_src_p, 0,
+ RV1126B_PMUCLKSEL_CON(1), 0, 3, MFLAGS,
+ RV1126B_PMUCLKGATE_CON(0), 0, GFLAGS),
+ COMPOSITE_NOGATE(BUSCLK_PMU_MUX, "busclk_pmu_mux", busclk_pmu_mux_p, 0,
+ RV1126B_PMUCLKSEL_CON(1), 3, 1, MFLAGS, 4, 2, DFLAGS),
+ GATE(BUSCLK_PMU_ROOT, "busclk_pmu_root", "busclk_pmu_mux", 0,
+ RV1126B_PMUCLKGATE_CON(0), 1, GFLAGS),
+ GATE(BUSCLK_PMU1_ROOT, "busclk_pmu1_root", "busclk_pmu_mux", CLK_IS_CRITICAL,
+ RV1126B_PMUCLKGATE_CON(3), 11, GFLAGS),
+ GATE(PCLK_PMU, "pclk_pmu", "busclk_pmu_root", CLK_IS_CRITICAL,
+ RV1126B_PMUCLKGATE_CON(0), 6, GFLAGS),
+ MUX(0, "xin_rc_src", clk_xin_rc_div_p, 0,
+ RV1126B_PMUCLKSEL_CON(2), 0, 1, MFLAGS),
+ COMPOSITE_FRACMUX_NOGATE(CLK_XIN_RC_DIV, "clk_xin_rc_div", "xin_rc_src", CLK_SET_RATE_PARENT,
+ RV1126B_PMUCLKSEL_CON(8), 0,
+ &rv1126b_rcdiv_pmu_fracmux),
+ GATE(PCLK_PMU_GPIO0, "pclk_pmu_gpio0", "busclk_pmu_root", 0,
+ RV1126B_PMUCLKGATE_CON(0), 7, GFLAGS),
+ COMPOSITE_NODIV(DBCLK_PMU_GPIO0, "dbclk_pmu_gpio0", mux_24m_32k_p, 0,
+ RV1126B_PMUCLKSEL_CON(2), 4, 1, MFLAGS,
+ RV1126B_PMUCLKGATE_CON(0), 8, GFLAGS),
+ GATE(PCLK_PMU_HP_TIMER, "pclk_pmu_hp_timer", "busclk_pmu_root", CLK_IS_CRITICAL,
+ RV1126B_PMUCLKGATE_CON(0), 10, GFLAGS),
+ COMPOSITE(CLK_PMU_HP_TIMER, "clk_pmu_hp_timer", mux_cpll_24m_p, CLK_IS_CRITICAL,
+ RV1126B_PMUCLKSEL_CON(1), 13, 1, MFLAGS, 8, 5, DFLAGS,
+ RV1126B_PMUCLKGATE_CON(0), 11, GFLAGS),
+ GATE(CLK_PMU_32K_HP_TIMER, "clk_pmu_32k_hp_timer", "clk_32k", CLK_IS_CRITICAL,
+ RV1126B_PMUCLKGATE_CON(0), 13, GFLAGS),
+ GATE(PCLK_PWM1, "pclk_pwm1", "busclk_pmu_root", 0,
+ RV1126B_PMUCLKGATE_CON(1), 0, GFLAGS),
+ COMPOSITE(CLK_PWM1, "clk_pwm1", mux_24m_rcosc_buspmu_p, 0,
+ RV1126B_PMUCLKSEL_CON(2), 8, 2, MFLAGS, 6, 2, DFLAGS,
+ RV1126B_PMUCLKGATE_CON(1), 1, GFLAGS),
+ GATE(CLK_OSC_PWM1, "clk_osc_pwm1", "xin24m", 0,
+ RV1126B_PMUCLKGATE_CON(1), 2, GFLAGS),
+ GATE(CLK_RC_PWM1, "clk_rc_pwm1", "clk_32k", 0,
+ RV1126B_PMUCLKGATE_CON(1), 3, GFLAGS),
+ GATE(PCLK_I2C2, "pclk_i2c2", "busclk_pmu_root", 0,
+ RV1126B_PMUCLKGATE_CON(1), 6, GFLAGS),
+ COMPOSITE(CLK_I2C2, "clk_i2c2", mux_24m_rcosc_buspmu_p, 0,
+ RV1126B_PMUCLKSEL_CON(2), 14, 2, MFLAGS, 12, 2, DFLAGS,
+ RV1126B_PMUCLKGATE_CON(1), 7, GFLAGS),
+ GATE(PCLK_UART0, "pclk_uart0", "busclk_pmu_root", 0,
+ RV1126B_PMUCLKGATE_CON(1), 8, GFLAGS),
+ COMPOSITE_NODIV(SCLK_UART0, "sclk_uart0", sclk_uart0_p, CLK_SET_RATE_PARENT,
+ RV1126B_PMUCLKSEL_CON(3), 0, 2, MFLAGS,
+ RV1126B_PMUCLKGATE_CON(1), 11, GFLAGS),
+ GATE(PCLK_RCOSC_CTRL, "pclk_rcosc_ctrl", "busclk_pmu_root", CLK_IS_CRITICAL,
+ RV1126B_PMUCLKGATE_CON(2), 0, GFLAGS),
+ COMPOSITE_NODIV(CLK_OSC_RCOSC_CTRL, "clk_osc_rcosc_ctrl", clk_osc_rcosc_ctrl_p, CLK_IS_CRITICAL,
+ RV1126B_PMUCLKSEL_CON(3), 2, 1, MFLAGS,
+ RV1126B_PMUCLKGATE_CON(2), 1, GFLAGS),
+ GATE(CLK_REF_RCOSC_CTRL, "clk_ref_rcosc_ctrl", "xin24m", CLK_IS_CRITICAL,
+ RV1126B_PMUCLKGATE_CON(2), 2, GFLAGS),
+ GATE(PCLK_IOC_PMUIO0, "pclk_ioc_pmuio0", "busclk_pmu_root", CLK_IS_CRITICAL,
+ RV1126B_PMUCLKGATE_CON(2), 3, GFLAGS),
+ GATE(CLK_REFOUT, "clk_refout", "xin24m", 0,
+ RV1126B_PMUCLKGATE_CON(2), 6, GFLAGS),
+ GATE(CLK_PREROLL, "clk_preroll", "busclk_pmu_root", 0,
+ RV1126B_PMUCLKGATE_CON(2), 7, GFLAGS),
+ GATE(CLK_PREROLL_32K, "clk_preroll_32k", "clk_32k", 0,
+ RV1126B_PMUCLKGATE_CON(2), 8, GFLAGS),
+ GATE(HCLK_PMU_SRAM, "hclk_pmu_sram", "busclk_pmu_root", CLK_IS_CRITICAL,
+ RV1126B_PMUCLKGATE_CON(2), 9, GFLAGS),
+ GATE(PCLK_WDT_LPMCU, "pclk_wdt_lpmcu", "busclk_pmu_root", 0,
+ RV1126B_PMUCLKGATE_CON(3), 0, GFLAGS),
+ COMPOSITE_NODIV(TCLK_WDT_LPMCU, "tclk_wdt_lpmcu", mux_24m_rcosc_buspmu_32k_p, 0,
+ RV1126B_PMUCLKSEL_CON(3), 6, 2, MFLAGS,
+ RV1126B_PMUCLKGATE_CON(3), 1, GFLAGS),
+ GATE(CLK_LPMCU, "clk_lpmcu", "busclk_pmu_root", 0,
+ RV1126B_PMUCLKGATE_CON(3), 2, GFLAGS),
+ GATE(CLK_LPMCU_RTC, "clk_lpmcu_rtc", "xin24m", 0,
+ RV1126B_PMUCLKGATE_CON(3), 3, GFLAGS),
+ GATE(PCLK_LPMCU_MAILBOX, "pclk_lpmcu_mailbox", "busclk_pmu_root", 0,
+ RV1126B_PMUCLKGATE_CON(3), 4, GFLAGS),
+
+ /* pd_pmu1 */
+ GATE(PCLK_SPI2AHB, "pclk_spi2ahb", "busclk_pmu_root", 0,
+ RV1126B_PMU1CLKGATE_CON(0), 0, GFLAGS),
+ GATE(HCLK_SPI2AHB, "hclk_spi2ahb", "busclk_pmu_root", 0,
+ RV1126B_PMU1CLKGATE_CON(0), 1, GFLAGS),
+ GATE(HCLK_FSPI1, "hclk_fspi1", "busclk_pmu_root", 0,
+ RV1126B_PMU1CLKGATE_CON(0), 2, GFLAGS),
+ GATE(HCLK_XIP_FSPI1, "hclk_xip_fspi1", "busclk_pmu_root", 0,
+ RV1126B_PMU1CLKGATE_CON(0), 3, GFLAGS),
+ COMPOSITE(SCLK_1X_FSPI1, "sclk_1x_fspi1", mux_24m_rcosc_buspmu_p, 0,
+ RV1126B_PMU1CLKSEL_CON(0), 0, 2, MFLAGS, 2, 3, DFLAGS,
+ RV1126B_PMU1CLKGATE_CON(0), 4, GFLAGS),
+ GATE(PCLK_IOC_PMUIO1, "pclk_ioc_pmuio1", "busclk_pmu_root", CLK_IS_CRITICAL,
+ RV1126B_PMU1CLKGATE_CON(0), 5, GFLAGS),
+ GATE(PCLK_AUDIO_ADC_PMU, "pclk_audio_adc_pmu", "busclk_pmu_root", 0,
+ RV1126B_PMU1CLKGATE_CON(0), 8, GFLAGS),
+
+ COMPOSITE(MCLK_LPSAI, "mclk_lpsai", mux_24m_rcosc_buspmu_p, 0,
+ RV1126B_PMU1CLKSEL_CON(0), 6, 2, MFLAGS, 8, 5, DFLAGS,
+ RV1126B_PMU1CLKGATE_CON(1), 3, GFLAGS),
+ GATE(MCLK_AUDIO_ADC_PMU, "mclk_audio_adc_pmu", "mclk_lpsai", CLK_IS_CRITICAL,
+ RV1126B_PMU1CLKGATE_CON(0), 9, GFLAGS),
+ FACTOR(MCLK_AUDIO_ADC_DIV4_PMU, "mclk_audio_adc_div4_pmu", "mclk_audio_adc_pmu", 0, 1, 4),
+
+ /* pd_bus */
+ GATE(ACLK_GIC400, "aclk_gic400", "hclk_bus_root", CLK_IS_CRITICAL,
+ RV1126B_BUSCLKGATE_CON(0), 8, GFLAGS),
+ GATE(PCLK_WDT_NS, "pclk_wdt_ns", "pclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(0), 10, GFLAGS),
+ GATE(TCLK_WDT_NS, "tclk_wdt_ns", "tclk_wdt_ns_src", 0,
+ RV1126B_BUSCLKGATE_CON(0), 11, GFLAGS),
+ GATE(PCLK_WDT_HPMCU, "pclk_wdt_hpmcu", "pclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(1), 0, GFLAGS),
+ GATE(HCLK_CACHE, "hclk_cache", "aclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(1), 2, GFLAGS),
+ GATE(PCLK_HPMCU_MAILBOX, "pclk_hpmcu_mailbox", "pclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(1), 3, GFLAGS),
+ GATE(PCLK_HPMCU_INTMUX, "pclk_hpmcu_intmux", "pclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(1), 4, GFLAGS),
+ GATE(CLK_HPMCU, "clk_hpmcu", "aclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(1), 5, GFLAGS),
+ GATE(CLK_HPMCU_RTC, "clk_hpmcu_rtc", "xin24m", 0,
+ RV1126B_BUSCLKGATE_CON(1), 10, GFLAGS),
+ GATE(PCLK_RKDMA, "pclk_rkdma", "pclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(1), 11, GFLAGS),
+ GATE(ACLK_RKDMA, "aclk_rkdma", "aclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(1), 12, GFLAGS),
+ GATE(PCLK_DCF, "pclk_dcf", "pclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(2), 0, GFLAGS),
+ GATE(ACLK_DCF, "aclk_dcf", "aclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(2), 1, GFLAGS),
+ GATE(HCLK_RGA, "hclk_rga", "hclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(2), 2, GFLAGS),
+ GATE(ACLK_RGA, "aclk_rga", "aclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(2), 3, GFLAGS),
+ GATE(CLK_CORE_RGA, "clk_core_rga", "clk_core_rga_src", 0,
+ RV1126B_BUSCLKGATE_CON(2), 4, GFLAGS),
+ GATE(PCLK_TIMER, "pclk_timer", "pclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(2), 5, GFLAGS),
+ COMPOSITE_NODIV(CLK_TIMER0, "clk_timer0", clk_timer0_parents_p, 0,
+ RV1126B_BUSCLKSEL_CON(2), 0, 2, MFLAGS,
+ RV1126B_BUSCLKGATE_CON(2), 6, GFLAGS),
+ COMPOSITE_NODIV(CLK_TIMER1, "clk_timer1", clk_timer1_parents_p, 0,
+ RV1126B_BUSCLKSEL_CON(2), 2, 2, MFLAGS,
+ RV1126B_BUSCLKGATE_CON(2), 7, GFLAGS),
+ COMPOSITE_NODIV(CLK_TIMER2, "clk_timer2", clk_timer2_parents_p, 0,
+ RV1126B_BUSCLKSEL_CON(2), 4, 2, MFLAGS,
+ RV1126B_BUSCLKGATE_CON(2), 8, GFLAGS),
+ COMPOSITE_NODIV(CLK_TIMER3, "clk_timer3", clk_timer3_parents_p, 0,
+ RV1126B_BUSCLKSEL_CON(2), 6, 2, MFLAGS,
+ RV1126B_BUSCLKGATE_CON(2), 9, GFLAGS),
+ COMPOSITE_NODIV(CLK_TIMER4, "clk_timer4", clk_timer4_parents_p, 0,
+ RV1126B_BUSCLKSEL_CON(2), 8, 2, MFLAGS,
+ RV1126B_BUSCLKGATE_CON(2), 10, GFLAGS),
+ GATE(HCLK_RKRNG_S_NS, "hclk_rkrng_s_ns", "hclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(2), 14, GFLAGS),
+ GATE(HCLK_RKRNG_NS, "hclk_rkrng_ns", "hclk_rkrng_s_ns", 0,
+ RV1126B_BUSCLKGATE_CON(2), 15, GFLAGS),
+ GATE(CLK_TIMER5, "clk_timer5", "clk_timer_root", CLK_IS_CRITICAL,
+ RV1126B_BUSCLKGATE_CON(2), 11, GFLAGS),
+ GATE(PCLK_I2C0, "pclk_i2c0", "pclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(3), 0, GFLAGS),
+ GATE(CLK_I2C0, "clk_i2c0", "clk_i2c_bus_src", 0,
+ RV1126B_BUSCLKGATE_CON(3), 1, GFLAGS),
+ GATE(PCLK_I2C1, "pclk_i2c1", "pclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(3), 2, GFLAGS),
+ GATE(CLK_I2C1, "clk_i2c1", "clk_i2c_bus_src", 0,
+ RV1126B_BUSCLKGATE_CON(3), 3, GFLAGS),
+ GATE(PCLK_I2C3, "pclk_i2c3", "pclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(3), 4, GFLAGS),
+ GATE(CLK_I2C3, "clk_i2c3", "clk_i2c_bus_src", 0,
+ RV1126B_BUSCLKGATE_CON(3), 5, GFLAGS),
+ GATE(PCLK_I2C4, "pclk_i2c4", "pclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(3), 6, GFLAGS),
+ GATE(CLK_I2C4, "clk_i2c4", "clk_i2c_bus_src", 0,
+ RV1126B_BUSCLKGATE_CON(3), 7, GFLAGS),
+ GATE(PCLK_I2C5, "pclk_i2c5", "pclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(3), 8, GFLAGS),
+ GATE(CLK_I2C5, "clk_i2c5", "clk_i2c_bus_src", 0,
+ RV1126B_BUSCLKGATE_CON(3), 9, GFLAGS),
+ GATE(PCLK_SPI0, "pclk_spi0", "pclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(3), 10, GFLAGS),
+ GATE(PCLK_SPI1, "pclk_spi1", "pclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(3), 12, GFLAGS),
+ GATE(PCLK_PWM0, "pclk_pwm0", "pclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(4), 0, GFLAGS),
+ GATE(CLK_OSC_PWM0, "clk_osc_pwm0", "xin24m", 0,
+ RV1126B_BUSCLKGATE_CON(4), 1, GFLAGS),
+ GATE(CLK_RC_PWM0, "clk_rc_pwm0", "xin24m", 0,
+ RV1126B_BUSCLKGATE_CON(4), 2, GFLAGS),
+ GATE(PCLK_PWM2, "pclk_pwm2", "pclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(4), 3, GFLAGS),
+ GATE(CLK_OSC_PWM2, "clk_osc_pwm2", "xin24m", 0,
+ RV1126B_BUSCLKGATE_CON(4), 4, GFLAGS),
+ GATE(CLK_RC_PWM2, "clk_rc_pwm2", "xin24m", 0,
+ RV1126B_BUSCLKGATE_CON(4), 5, GFLAGS),
+ GATE(PCLK_PWM3, "pclk_pwm3", "pclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(4), 6, GFLAGS),
+ GATE(CLK_OSC_PWM3, "clk_osc_pwm3", "xin24m", 0,
+ RV1126B_BUSCLKGATE_CON(4), 7, GFLAGS),
+ GATE(CLK_RC_PWM3, "clk_rc_pwm3", "xin24m", 0,
+ RV1126B_BUSCLKGATE_CON(4), 8, GFLAGS),
+ GATE(PCLK_UART1, "pclk_uart1", "pclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(4), 9, GFLAGS),
+ GATE(PCLK_UART2, "pclk_uart2", "pclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(4), 10, GFLAGS),
+ GATE(PCLK_UART3, "pclk_uart3", "pclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(4), 11, GFLAGS),
+ GATE(PCLK_UART4, "pclk_uart4", "pclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(4), 12, GFLAGS),
+ GATE(PCLK_UART5, "pclk_uart5", "pclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(4), 13, GFLAGS),
+ GATE(PCLK_UART6, "pclk_uart6", "pclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(4), 14, GFLAGS),
+ GATE(PCLK_UART7, "pclk_uart7", "pclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(4), 15, GFLAGS),
+ GATE(PCLK_TSADC, "pclk_tsadc", "pclk_bus_root", CLK_IS_CRITICAL,
+ RV1126B_BUSCLKGATE_CON(5), 0, GFLAGS),
+ GATE(CLK_TSADC, "clk_tsadc", "xin24m", CLK_IS_CRITICAL,
+ RV1126B_BUSCLKGATE_CON(5), 1, GFLAGS),
+ GATE(HCLK_SAI0, "hclk_sai0", "hclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(5), 2, GFLAGS),
+ GATE(HCLK_SAI1, "hclk_sai1", "hclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(5), 4, GFLAGS),
+ GATE(HCLK_SAI2, "hclk_sai2", "hclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(5), 6, GFLAGS),
+ GATE(HCLK_RKDSM, "hclk_rkdsm", "hclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(5), 8, GFLAGS),
+ GATE(MCLK_RKDSM, "mclk_rkdsm", "mclk_sai2", 0,
+ RV1126B_BUSCLKGATE_CON(5), 9, GFLAGS),
+ GATE(HCLK_PDM, "hclk_pdm", "hclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(5), 10, GFLAGS),
+ GATE(HCLK_ASRC0, "hclk_asrc0", "hclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(5), 11, GFLAGS),
+ GATE(HCLK_ASRC1, "hclk_asrc1", "hclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(5), 12, GFLAGS),
+ GATE(PCLK_AUDIO_ADC_BUS, "pclk_audio_adc_bus", "pclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(5), 13, GFLAGS),
+ GATE(MCLK_AUDIO_ADC_BUS, "mclk_audio_adc_bus", "mclk_sai2", 0,
+ RV1126B_BUSCLKGATE_CON(5), 14, GFLAGS),
+ FACTOR(MCLK_AUDIO_ADC_DIV4_BUS, "mclk_audio_adc_div4_bus", "mclk_audio_adc_bus", 0, 1, 4),
+ GATE(PCLK_RKCE, "pclk_rkce", "pclk_bus_root", CLK_IS_CRITICAL,
+ RV1126B_BUSCLKGATE_CON(6), 0, GFLAGS),
+ GATE(HCLK_NS_RKCE, "hclk_ns_rkce", "hclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(6), 1, GFLAGS),
+ GATE(PCLK_OTPC_NS, "pclk_otpc_ns", "pclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(6), 2, GFLAGS),
+ GATE(CLK_SBPI_OTPC_NS, "clk_sbpi_otpc_ns", "xin24m", 0,
+ RV1126B_BUSCLKGATE_CON(6), 3, GFLAGS),
+ COMPOSITE_NOMUX(CLK_USER_OTPC_NS, "clk_user_otpc_ns", "xin24m", 0,
+ RV1126B_BUSCLKSEL_CON(2), 12, 3, DFLAGS,
+ RV1126B_BUSCLKGATE_CON(6), 4, GFLAGS),
+ GATE(PCLK_OTP_MASK, "pclk_otp_mask", "pclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(6), 6, GFLAGS),
+ GATE(CLK_TSADC_PHYCTRL, "clk_tsadc_phyctrl", "xin24m", CLK_IS_CRITICAL,
+ RV1126B_BUSCLKGATE_CON(6), 8, GFLAGS),
+ MUX(LRCK_SRC_ASRC0, "lrck_src_asrc0", lrck_src_asrc_p, 0,
+ RV1126B_BUSCLKSEL_CON(3), 0, 3, MFLAGS),
+ MUX(LRCK_DST_ASRC0, "lrck_dst_asrc0", lrck_src_asrc_p, 0,
+ RV1126B_BUSCLKSEL_CON(3), 4, 3, MFLAGS),
+ MUX(LRCK_SRC_ASRC1, "lrck_src_asrc1", lrck_src_asrc_p, 0,
+ RV1126B_BUSCLKSEL_CON(3), 8, 3, MFLAGS),
+ MUX(LRCK_DST_ASRC1, "lrck_dst_asrc1", lrck_src_asrc_p, 0,
+ RV1126B_BUSCLKSEL_CON(3), 12, 3, MFLAGS),
+ GATE(ACLK_NSRKCE, "aclk_nsrkce", "aclk_rkce_src", 0,
+ RV1126B_BUSCLKGATE_CON(2), 12, GFLAGS),
+ GATE(CLK_PKA_NSRKCE, "clk_pka_nsrkce", "clk_pka_rkce_src", 0,
+ RV1126B_BUSCLKGATE_CON(2), 13, GFLAGS),
+
+ /* pd_peri */
+ DIV(PCLK_RTC_ROOT, "pclk_rtc_root", "pclk_peri_root", 0,
+ RV1126B_PERICLKSEL_CON(0), 0, 2, DFLAGS),
+ GATE(PCLK_GPIO1, "pclk_gpio1", "pclk_peri_root", 0,
+ RV1126B_PERICLKGATE_CON(0), 5, GFLAGS),
+ GATE(DBCLK_GPIO1, "dbclk_gpio1", "xin24m", 0,
+ RV1126B_PERICLKGATE_CON(0), 6, GFLAGS),
+ GATE(PCLK_IOC_VCCIO1, "pclk_ioc_vccio1", "pclk_peri_root", CLK_IS_CRITICAL,
+ RV1126B_PERICLKGATE_CON(0), 7, GFLAGS),
+ GATE(ACLK_USB3OTG, "aclk_usb3otg", "aclk_peri_root", 0,
+ RV1126B_PERICLKGATE_CON(0), 8, GFLAGS),
+ GATE(CLK_REF_USB3OTG, "clk_ref_usb3otg", "xin24m", 0,
+ RV1126B_PERICLKGATE_CON(0), 9, GFLAGS),
+ GATE(CLK_SUSPEND_USB3OTG, "clk_suspend_usb3otg", "xin24m", 0,
+ RV1126B_PERICLKGATE_CON(0), 10, GFLAGS),
+ GATE(HCLK_USB2HOST, "hclk_usb2host", "aclk_peri_root", 0,
+ RV1126B_PERICLKGATE_CON(0), 11, GFLAGS),
+ GATE(HCLK_ARB_USB2HOST, "hclk_arb_usb2host", "aclk_peri_root", 0,
+ RV1126B_PERICLKGATE_CON(0), 12, GFLAGS),
+ GATE(PCLK_RTC_TEST, "pclk_rtc_test", "pclk_rtc_root", 0,
+ RV1126B_PERICLKGATE_CON(0), 13, GFLAGS),
+ GATE(HCLK_EMMC, "hclk_emmc", "aclk_peri_root", 0,
+ RV1126B_PERICLKGATE_CON(1), 0, GFLAGS),
+ GATE(HCLK_FSPI0, "hclk_fspi0", "aclk_peri_root", 0,
+ RV1126B_PERICLKGATE_CON(1), 1, GFLAGS),
+ GATE(HCLK_XIP_FSPI0, "hclk_xip_fspi0", "aclk_peri_root", 0,
+ RV1126B_PERICLKGATE_CON(1), 2, GFLAGS),
+ GATE(PCLK_PIPEPHY, "pclk_pipephy", "pclk_peri_root", 0,
+ RV1126B_PERICLKGATE_CON(1), 8, GFLAGS),
+ GATE(PCLK_USB2PHY, "pclk_usb2phy", "pclk_peri_root", 0,
+ RV1126B_PERICLKGATE_CON(1), 10, GFLAGS),
+ COMPOSITE_NOMUX(CLK_REF_PIPEPHY_CPLL_SRC, "clk_ref_pipephy_cpll_src", "cpll", 0,
+ RV1126B_PERICLKSEL_CON(1), 0, 6, DFLAGS,
+ RV1126B_PERICLKGATE_CON(1), 14, GFLAGS),
+ MUX(CLK_REF_PIPEPHY, "clk_ref_pipephy", clk_ref_pipephy_p, 0,
+ RV1126B_PERICLKSEL_CON(1), 12, 1, MFLAGS),
+};
+
+static struct rockchip_clk_branch rv1126b_armclk __initdata =
+ MUX(ARMCLK, "armclk", mux_armclk_p, CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+ RV1126B_CORECLKSEL_CON(0), 1, 1, MFLAGS);
+
+static void __init rv1126b_clk_init(struct device_node *np)
+{
+ struct rockchip_clk_provider *ctx;
+ void __iomem *reg_base;
+ unsigned long clk_nr_clks;
+
+ clk_nr_clks = rockchip_clk_find_max_clk_id(rv1126b_clk_branches,
+ ARRAY_SIZE(rv1126b_clk_branches)) + 1;
+
+ reg_base = of_iomap(np, 0);
+ if (!reg_base) {
+ pr_err("%s: could not map cru region\n", __func__);
+ return;
+ }
+
+ ctx = rockchip_clk_init(np, reg_base, clk_nr_clks);
+ if (IS_ERR(ctx)) {
+ pr_err("%s: rockchip clk init failed\n", __func__);
+ iounmap(reg_base);
+ return;
+ }
+
+ rockchip_clk_register_plls(ctx, rv1126b_pll_clks,
+ ARRAY_SIZE(rv1126b_pll_clks),
+ 0);
+
+ rockchip_clk_register_branches(ctx, rv1126b_clk_branches,
+ ARRAY_SIZE(rv1126b_clk_branches));
+
+ rockchip_clk_register_armclk_multi_pll(ctx, &rv1126b_armclk,
+ rv1126b_cpuclk_rates,
+ ARRAY_SIZE(rv1126b_cpuclk_rates));
+
+ rv1126b_rst_init(np, reg_base);
+
+ rockchip_register_restart_notifier(ctx, RV1126B_GLB_SRST_FST, NULL);
+
+ rockchip_clk_of_add_provider(np, ctx);
+
+ /* pvtpll src init */
+ writel_relaxed(PVTPLL_SRC_SEL_PVTPLL, reg_base + RV1126B_CORECLKSEL_CON(0));
+ writel_relaxed(PVTPLL_SRC_SEL_PVTPLL, reg_base + RV1126B_NPUCLKSEL_CON(0));
+ writel_relaxed(PVTPLL_SRC_SEL_PVTPLL, reg_base + RV1126B_VICLKSEL_CON(0));
+ writel_relaxed(PVTPLL_SRC_SEL_PVTPLL, reg_base + RV1126B_VEPUCLKSEL_CON(0));
+ writel_relaxed(PVTPLL_SRC_SEL_PVTPLL, reg_base + RV1126B_VCPCLKSEL_CON(0));
+}
+
+CLK_OF_DECLARE(rv1126b_cru, "rockchip,rv1126b-cru", rv1126b_clk_init);
+
+struct clk_rv1126b_inits {
+ void (*inits)(struct device_node *np);
+};
+
+static const struct clk_rv1126b_inits clk_rv1126b_init = {
+ .inits = rv1126b_clk_init,
+};
+
+static const struct of_device_id clk_rv1126b_match_table[] = {
+ {
+ .compatible = "rockchip,rv1126b-cru",
+ .data = &clk_rv1126b_init,
+ },
+ { }
+};
+
+static int clk_rv1126b_probe(struct platform_device *pdev)
+{
+ const struct clk_rv1126b_inits *init_data;
+ struct device *dev = &pdev->dev;
+
+ init_data = device_get_match_data(dev);
+ if (!init_data)
+ return -EINVAL;
+
+ if (init_data->inits)
+ init_data->inits(dev->of_node);
+
+ return 0;
+}
+
+static struct platform_driver clk_rv1126b_driver = {
+ .probe = clk_rv1126b_probe,
+ .driver = {
+ .name = "clk-rv1126b",
+ .of_match_table = clk_rv1126b_match_table,
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver_probe(clk_rv1126b_driver, clk_rv1126b_probe);
diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c
index 19caf26c991b..2601df3b1066 100644
--- a/drivers/clk/rockchip/clk.c
+++ b/drivers/clk/rockchip/clk.c
@@ -722,6 +722,30 @@ void rockchip_clk_register_armclk(struct rockchip_clk_provider *ctx,
}
EXPORT_SYMBOL_GPL(rockchip_clk_register_armclk);
+void rockchip_clk_register_armclk_multi_pll(struct rockchip_clk_provider *ctx,
+ struct rockchip_clk_branch *list,
+ const struct rockchip_cpuclk_rate_table *rates,
+ int nrates)
+{
+ struct clk *clk;
+
+ clk = rockchip_clk_register_cpuclk_multi_pll(list->name, list->parent_names,
+ list->num_parents, ctx->reg_base,
+ list->muxdiv_offset, list->mux_shift,
+ list->mux_width, list->mux_flags,
+ list->div_offset, list->div_shift,
+ list->div_width, list->div_flags,
+ list->flags, &ctx->lock, rates, nrates);
+ if (IS_ERR(clk)) {
+ pr_err("%s: failed to register clock %s: %ld\n",
+ __func__, list->name, PTR_ERR(clk));
+ return;
+ }
+
+ rockchip_clk_set_lookup(ctx, clk, list->id);
+}
+EXPORT_SYMBOL_GPL(rockchip_clk_register_armclk_multi_pll);
+
void rockchip_clk_protect_critical(const char *const clocks[],
int nclocks)
{
diff --git a/drivers/clk/rockchip/clk.h b/drivers/clk/rockchip/clk.h
index 1e9c3c0d31e3..b2fff1d13a4a 100644
--- a/drivers/clk/rockchip/clk.h
+++ b/drivers/clk/rockchip/clk.h
@@ -99,6 +99,73 @@ struct clk;
#define RV1126_EMMC_CON0 0x450
#define RV1126_EMMC_CON1 0x454
+#define RV1126B_TOPCRU_BASE 0x0
+#define RV1126B_BUSCRU_BASE 0x10000
+#define RV1126B_PERICRU_BASE 0x20000
+#define RV1126B_CORECRU_BASE 0x30000
+#define RV1126B_PMUCRU_BASE 0x40000
+#define RV1126B_PMU1CRU_BASE 0x50000
+#define RV1126B_DDRCRU_BASE 0x60000
+#define RV1126B_SUBDDRCRU_BASE 0x68000
+#define RV1126B_VICRU_BASE 0x70000
+#define RV1126B_VEPUCRU_BASE 0x80000
+#define RV1126B_NPUCRU_BASE 0x90000
+#define RV1126B_VDOCRU_BASE 0xA0000
+#define RV1126B_VCPCRU_BASE 0xB0000
+
+#define RV1126B_PLL_CON(x) ((x) * 0x4 + RV1126B_TOPCRU_BASE)
+#define RV1126B_MODE_CON (0x280 + RV1126B_TOPCRU_BASE)
+#define RV1126B_CLKSEL_CON(x) ((x) * 0x4 + 0x300 + RV1126B_TOPCRU_BASE)
+#define RV1126B_CLKGATE_CON(x) ((x) * 0x4 + 0x800 + RV1126B_TOPCRU_BASE)
+#define RV1126B_SOFTRST_CON(x) ((x) * 0x4 + 0xa00 + RV1126B_TOPCRU_BASE)
+#define RV1126B_GLB_SRST_FST (0xc08 + RV1126B_TOPCRU_BASE)
+#define RV1126B_GLB_SRST_SND (0xc0c + RV1126B_TOPCRU_BASE)
+#define RV1126B_CLK_CM_FRAC0_DIV_H (0xcc0 + RV1126B_TOPCRU_BASE)
+#define RV1126B_CLK_CM_FRAC1_DIV_H (0xcc4 + RV1126B_TOPCRU_BASE)
+#define RV1126B_CLK_CM_FRAC2_DIV_H (0xcc8 + RV1126B_TOPCRU_BASE)
+#define RV1126B_CLK_UART_FRAC0_DIV_H (0xccc + RV1126B_TOPCRU_BASE)
+#define RV1126B_CLK_UART_FRAC1_DIV_H (0xcd0 + RV1126B_TOPCRU_BASE)
+#define RV1126B_CLK_AUDIO_FRAC0_DIV_H (0xcd4 + RV1126B_TOPCRU_BASE)
+#define RV1126B_CLK_AUDIO_FRAC1_DIV_H (0xcd8 + RV1126B_TOPCRU_BASE)
+#define RV1126B_BUSCLKSEL_CON(x) ((x) * 0x4 + 0x300 + RV1126B_BUSCRU_BASE)
+#define RV1126B_BUSCLKGATE_CON(x) ((x) * 0x4 + 0x800 + RV1126B_BUSCRU_BASE)
+#define RV1126B_BUSSOFTRST_CON(x) ((x) * 0x4 + 0xa00 + RV1126B_BUSCRU_BASE)
+#define RV1126B_PERIPLL_CON(x) ((x) * 0x4 + RV1126B_PERICRU_BASE)
+#define RV1126B_PERICLKSEL_CON(x) ((x) * 0x4 + 0x300 + RV1126B_PERICRU_BASE)
+#define RV1126B_PERICLKGATE_CON(x) ((x) * 0x4 + 0x800 + RV1126B_PERICRU_BASE)
+#define RV1126B_PERISOFTRST_CON(x) ((x) * 0x4 + 0xa00 + RV1126B_PERICRU_BASE)
+#define RV1126B_CORECLKSEL_CON(x) ((x) * 0x4 + 0x300 + RV1126B_CORECRU_BASE)
+#define RV1126B_CORECLKGATE_CON(x) ((x) * 0x4 + 0x800 + RV1126B_CORECRU_BASE)
+#define RV1126B_CORESOFTRST_CON(x) ((x) * 0x4 + 0xa00 + RV1126B_CORECRU_BASE)
+#define RV1126B_PMUCLKSEL_CON(x) ((x) * 0x4 + 0x300 + RV1126B_PMUCRU_BASE)
+#define RV1126B_PMUCLKGATE_CON(x) ((x) * 0x4 + 0x800 + RV1126B_PMUCRU_BASE)
+#define RV1126B_PMUSOFTRST_CON(x) ((x) * 0x4 + 0xa00 + RV1126B_PMUCRU_BASE)
+#define RV1126B_PMU1CLKSEL_CON(x) ((x) * 0x4 + 0x300 + RV1126B_PMU1CRU_BASE)
+#define RV1126B_PMU1CLKGATE_CON(x) ((x) * 0x4 + 0x800 + RV1126B_PMU1CRU_BASE)
+#define RV1126B_PMU1SOFTRST_CON(x) ((x) * 0x4 + 0xa00 + RV1126B_PMU1CRU_BASE)
+#define RV1126B_DDRCLKSEL_CON(x) ((x) * 0x4 + 0x300 + RV1126B_DDRCRU_BASE)
+#define RV1126B_DDRCLKGATE_CON(x) ((x) * 0x4 + 0x800 + RV1126B_DDRCRU_BASE)
+#define RV1126B_DDRSOFTRST_CON(x) ((x) * 0x4 + 0xa00 + RV1126B_DDRCRU_BASE)
+#define RV1126B_SUBDDRPLL_CON(x) ((x) * 0x4 + RV1126B_SUBDDRCRU_BASE)
+#define RV1126B_SUBDDRCLKSEL_CON(x) ((x) * 0x4 + 0x300 + RV1126B_SUBDDRCRU_BASE)
+#define RV1126B_SUBDDRCLKGATE_CON(x) ((x) * 0x4 + 0x800 + RV1126B_SUBDDRCRU_BASE)
+#define RV1126B_SUBDDRSOFTRST_CON(x) ((x) * 0x4 + 0xa00 + RV1126B_SUBDDRCRU_BASE)
+#define RV1126B_VICLKSEL_CON(x) ((x) * 0x4 + 0x300 + RV1126B_VICRU_BASE)
+#define RV1126B_VICLKGATE_CON(x) ((x) * 0x4 + 0x800 + RV1126B_VICRU_BASE)
+#define RV1126B_VISOFTRST_CON(x) ((x) * 0x4 + 0xa00 + RV1126B_VICRU_BASE)
+#define RV1126B_VEPUCLKSEL_CON(x) ((x) * 0x4 + 0x300 + RV1126B_VEPUCRU_BASE)
+#define RV1126B_VEPUCLKGATE_CON(x) ((x) * 0x4 + 0x800 + RV1126B_VEPUCRU_BASE)
+#define RV1126B_VEPUSOFTRST_CON(x) ((x) * 0x4 + 0xa00 + RV1126B_VEPUCRU_BASE)
+#define RV1126B_NPUCLKSEL_CON(x) ((x) * 0x4 + 0x300 + RV1126B_NPUCRU_BASE)
+#define RV1126B_NPUCLKGATE_CON(x) ((x) * 0x4 + 0x800 + RV1126B_NPUCRU_BASE)
+#define RV1126B_NPUSOFTRST_CON(x) ((x) * 0x4 + 0xa00 + RV1126B_NPUCRU_BASE)
+#define RV1126B_VDOCLKSEL_CON(x) ((x) * 0x4 + 0x300 + RV1126B_VDOCRU_BASE)
+#define RV1126B_VDOCLKGATE_CON(x) ((x) * 0x4 + 0x800 + RV1126B_VDOCRU_BASE)
+#define RV1126B_VDOSOFTRST_CON(x) ((x) * 0x4 + 0xa00 + RV1126B_VDOCRU_BASE)
+#define RV1126B_VCPCLKSEL_CON(x) ((x) * 0x4 + 0x300 + RV1126B_VCPCRU_BASE)
+#define RV1126B_VCPCLKGATE_CON(x) ((x) * 0x4 + 0x800 + RV1126B_VCPCRU_BASE)
+#define RV1126B_VCPSOFTRST_CON(x) ((x) * 0x4 + 0xa00 + RV1126B_VCPCRU_BASE)
+
#define RK2928_PLL_CON(x) ((x) * 0x4)
#define RK2928_MODE_CON 0x40
#define RK2928_CLKSEL_CON(x) ((x) * 0x4 + 0x44)
@@ -208,6 +275,18 @@ struct clk;
#define RK3399_PMU_CLKGATE_CON(x) ((x) * 0x4 + 0x100)
#define RK3399_PMU_SOFTRST_CON(x) ((x) * 0x4 + 0x110)
+#define RK3506_PMU_CRU_BASE 0x10000
+#define RK3506_PLL_CON(x) ((x) * 0x4 + RK3506_PMU_CRU_BASE)
+#define RK3506_CLKSEL_CON(x) ((x) * 0x4 + 0x300)
+#define RK3506_CLKGATE_CON(x) ((x) * 0x4 + 0x800)
+#define RK3506_SOFTRST_CON(x) ((x) * 0x4 + 0xa00)
+#define RK3506_PMU_CLKSEL_CON(x) ((x) * 0x4 + 0x300 + RK3506_PMU_CRU_BASE)
+#define RK3506_PMU_CLKGATE_CON(x) ((x) * 0x4 + 0x800 + RK3506_PMU_CRU_BASE)
+#define RK3506_MODE_CON 0x280
+#define RK3506_GLB_CNT_TH 0xc00
+#define RK3506_GLB_SRST_FST 0xc08
+#define RK3506_GLB_SRST_SND 0xc0c
+
#define RK3528_PMU_CRU_BASE 0x10000
#define RK3528_PCIE_CRU_BASE 0x20000
#define RK3528_DDRPHY_CRU_BASE 0x28000
@@ -532,7 +611,7 @@ struct rockchip_pll_rate_table {
*
* Flags:
* ROCKCHIP_PLL_SYNC_RATE - check rate parameters to match against the
- * rate_table parameters and ajust them if necessary.
+ * rate_table parameters and adjust them if necessary.
* ROCKCHIP_PLL_FIXED_MODE - the pll operates in normal mode only
*/
struct rockchip_pll_clock {
@@ -622,6 +701,17 @@ struct clk *rockchip_clk_register_cpuclk(const char *name,
const struct rockchip_cpuclk_rate_table *rates,
int nrates, void __iomem *reg_base, spinlock_t *lock);
+struct clk *rockchip_clk_register_cpuclk_multi_pll(const char *name,
+ const char *const *parent_names,
+ u8 num_parents, void __iomem *base,
+ int muxdiv_offset, u8 mux_shift,
+ u8 mux_width, u8 mux_flags,
+ int div_offset, u8 div_shift,
+ u8 div_width, u8 div_flags,
+ unsigned long flags, spinlock_t *lock,
+ const struct rockchip_cpuclk_rate_table *rates,
+ int nrates);
+
struct clk *rockchip_clk_register_mmc(const char *name,
const char *const *parent_names, u8 num_parents,
void __iomem *reg,
@@ -1208,6 +1298,10 @@ void rockchip_clk_register_armclk(struct rockchip_clk_provider *ctx,
const struct rockchip_cpuclk_reg_data *reg_data,
const struct rockchip_cpuclk_rate_table *rates,
int nrates);
+void rockchip_clk_register_armclk_multi_pll(struct rockchip_clk_provider *ctx,
+ struct rockchip_clk_branch *list,
+ const struct rockchip_cpuclk_rate_table *rates,
+ int nrates);
void rockchip_clk_protect_critical(const char *const clocks[], int nclocks);
void rockchip_register_restart_notifier(struct rockchip_clk_provider *ctx,
unsigned int reg, void (*cb)(void));
@@ -1246,6 +1340,8 @@ static inline void rockchip_register_softrst(struct device_node *np,
return rockchip_register_softrst_lut(np, NULL, num_regs, base, flags);
}
+void rv1126b_rst_init(struct device_node *np, void __iomem *reg_base);
+void rk3506_rst_init(struct device_node *np, void __iomem *reg_base);
void rk3528_rst_init(struct device_node *np, void __iomem *reg_base);
void rk3562_rst_init(struct device_node *np, void __iomem *reg_base);
void rk3576_rst_init(struct device_node *np, void __iomem *reg_base);
diff --git a/drivers/clk/rockchip/rst-rk3506.c b/drivers/clk/rockchip/rst-rk3506.c
new file mode 100644
index 000000000000..c3abde60f3c6
--- /dev/null
+++ b/drivers/clk/rockchip/rst-rk3506.c
@@ -0,0 +1,226 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2025 Rockchip Electronics Co., Ltd.
+ * Author: Finley Xiao <finley.xiao@rock-chips.com>
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <dt-bindings/reset/rockchip,rk3506-cru.h>
+#include "clk.h"
+
+/* 0xFF9A0000 + 0x0A00 */
+#define RK3506_CRU_RESET_OFFSET(id, reg, bit) [id] = (0 + reg * 16 + bit)
+
+/* mapping table for reset ID to register offset */
+static const int rk3506_register_offset[] = {
+ /* CRU-->SOFTRST_CON00 */
+ RK3506_CRU_RESET_OFFSET(SRST_NCOREPORESET0_AC, 0, 0),
+ RK3506_CRU_RESET_OFFSET(SRST_NCOREPORESET1_AC, 0, 1),
+ RK3506_CRU_RESET_OFFSET(SRST_NCOREPORESET2_AC, 0, 2),
+ RK3506_CRU_RESET_OFFSET(SRST_NCORESET0_AC, 0, 4),
+ RK3506_CRU_RESET_OFFSET(SRST_NCORESET1_AC, 0, 5),
+ RK3506_CRU_RESET_OFFSET(SRST_NCORESET2_AC, 0, 6),
+ RK3506_CRU_RESET_OFFSET(SRST_NL2RESET_AC, 0, 8),
+ RK3506_CRU_RESET_OFFSET(SRST_A_CORE_BIU_AC, 0, 9),
+ RK3506_CRU_RESET_OFFSET(SRST_H_M0_AC, 0, 10),
+
+ /* CRU-->SOFTRST_CON02 */
+ RK3506_CRU_RESET_OFFSET(SRST_NDBGRESET, 2, 10),
+ RK3506_CRU_RESET_OFFSET(SRST_P_CORE_BIU, 2, 14),
+ RK3506_CRU_RESET_OFFSET(SRST_PMU, 2, 15),
+
+ /* CRU-->SOFTRST_CON03 */
+ RK3506_CRU_RESET_OFFSET(SRST_P_DBG, 3, 1),
+ RK3506_CRU_RESET_OFFSET(SRST_POT_DBG, 3, 2),
+ RK3506_CRU_RESET_OFFSET(SRST_P_CORE_GRF, 3, 4),
+ RK3506_CRU_RESET_OFFSET(SRST_CORE_EMA_DETECT, 3, 6),
+ RK3506_CRU_RESET_OFFSET(SRST_REF_PVTPLL_CORE, 3, 7),
+ RK3506_CRU_RESET_OFFSET(SRST_P_GPIO1, 3, 8),
+ RK3506_CRU_RESET_OFFSET(SRST_DB_GPIO1, 3, 9),
+
+ /* CRU-->SOFTRST_CON04 */
+ RK3506_CRU_RESET_OFFSET(SRST_A_CORE_PERI_BIU, 4, 3),
+ RK3506_CRU_RESET_OFFSET(SRST_A_DSMC, 4, 5),
+ RK3506_CRU_RESET_OFFSET(SRST_P_DSMC, 4, 6),
+ RK3506_CRU_RESET_OFFSET(SRST_FLEXBUS, 4, 7),
+ RK3506_CRU_RESET_OFFSET(SRST_A_FLEXBUS, 4, 9),
+ RK3506_CRU_RESET_OFFSET(SRST_H_FLEXBUS, 4, 10),
+ RK3506_CRU_RESET_OFFSET(SRST_A_DSMC_SLV, 4, 11),
+ RK3506_CRU_RESET_OFFSET(SRST_H_DSMC_SLV, 4, 12),
+ RK3506_CRU_RESET_OFFSET(SRST_DSMC_SLV, 4, 13),
+
+ /* CRU-->SOFTRST_CON05 */
+ RK3506_CRU_RESET_OFFSET(SRST_A_BUS_BIU, 5, 3),
+ RK3506_CRU_RESET_OFFSET(SRST_H_BUS_BIU, 5, 4),
+ RK3506_CRU_RESET_OFFSET(SRST_P_BUS_BIU, 5, 5),
+ RK3506_CRU_RESET_OFFSET(SRST_A_SYSRAM, 5, 6),
+ RK3506_CRU_RESET_OFFSET(SRST_H_SYSRAM, 5, 7),
+ RK3506_CRU_RESET_OFFSET(SRST_A_DMAC0, 5, 8),
+ RK3506_CRU_RESET_OFFSET(SRST_A_DMAC1, 5, 9),
+ RK3506_CRU_RESET_OFFSET(SRST_H_M0, 5, 10),
+ RK3506_CRU_RESET_OFFSET(SRST_M0_JTAG, 5, 11),
+ RK3506_CRU_RESET_OFFSET(SRST_H_CRYPTO, 5, 15),
+
+ /* CRU-->SOFTRST_CON06 */
+ RK3506_CRU_RESET_OFFSET(SRST_H_RNG, 6, 0),
+ RK3506_CRU_RESET_OFFSET(SRST_P_BUS_GRF, 6, 1),
+ RK3506_CRU_RESET_OFFSET(SRST_P_TIMER0, 6, 2),
+ RK3506_CRU_RESET_OFFSET(SRST_TIMER0_CH0, 6, 3),
+ RK3506_CRU_RESET_OFFSET(SRST_TIMER0_CH1, 6, 4),
+ RK3506_CRU_RESET_OFFSET(SRST_TIMER0_CH2, 6, 5),
+ RK3506_CRU_RESET_OFFSET(SRST_TIMER0_CH3, 6, 6),
+ RK3506_CRU_RESET_OFFSET(SRST_TIMER0_CH4, 6, 7),
+ RK3506_CRU_RESET_OFFSET(SRST_TIMER0_CH5, 6, 8),
+ RK3506_CRU_RESET_OFFSET(SRST_P_WDT0, 6, 9),
+ RK3506_CRU_RESET_OFFSET(SRST_T_WDT0, 6, 10),
+ RK3506_CRU_RESET_OFFSET(SRST_P_WDT1, 6, 11),
+ RK3506_CRU_RESET_OFFSET(SRST_T_WDT1, 6, 12),
+ RK3506_CRU_RESET_OFFSET(SRST_P_MAILBOX, 6, 13),
+ RK3506_CRU_RESET_OFFSET(SRST_P_INTMUX, 6, 14),
+ RK3506_CRU_RESET_OFFSET(SRST_P_SPINLOCK, 6, 15),
+
+ /* CRU-->SOFTRST_CON07 */
+ RK3506_CRU_RESET_OFFSET(SRST_P_DDRC, 7, 0),
+ RK3506_CRU_RESET_OFFSET(SRST_H_DDRPHY, 7, 1),
+ RK3506_CRU_RESET_OFFSET(SRST_P_DDRMON, 7, 2),
+ RK3506_CRU_RESET_OFFSET(SRST_DDRMON_OSC, 7, 3),
+ RK3506_CRU_RESET_OFFSET(SRST_P_DDR_LPC, 7, 4),
+ RK3506_CRU_RESET_OFFSET(SRST_H_USBOTG0, 7, 5),
+ RK3506_CRU_RESET_OFFSET(SRST_USBOTG0_ADP, 7, 7),
+ RK3506_CRU_RESET_OFFSET(SRST_H_USBOTG1, 7, 8),
+ RK3506_CRU_RESET_OFFSET(SRST_USBOTG1_ADP, 7, 10),
+ RK3506_CRU_RESET_OFFSET(SRST_P_USBPHY, 7, 11),
+ RK3506_CRU_RESET_OFFSET(SRST_USBPHY_POR, 7, 12),
+ RK3506_CRU_RESET_OFFSET(SRST_USBPHY_OTG0, 7, 13),
+ RK3506_CRU_RESET_OFFSET(SRST_USBPHY_OTG1, 7, 14),
+
+ /* CRU-->SOFTRST_CON08 */
+ RK3506_CRU_RESET_OFFSET(SRST_A_DMA2DDR, 8, 0),
+ RK3506_CRU_RESET_OFFSET(SRST_P_DMA2DDR, 8, 1),
+
+ /* CRU-->SOFTRST_CON09 */
+ RK3506_CRU_RESET_OFFSET(SRST_USBOTG0_UTMI, 9, 0),
+ RK3506_CRU_RESET_OFFSET(SRST_USBOTG1_UTMI, 9, 1),
+
+ /* CRU-->SOFTRST_CON10 */
+ RK3506_CRU_RESET_OFFSET(SRST_A_DDRC_0, 10, 0),
+ RK3506_CRU_RESET_OFFSET(SRST_A_DDRC_1, 10, 1),
+ RK3506_CRU_RESET_OFFSET(SRST_A_DDR_BIU, 10, 2),
+ RK3506_CRU_RESET_OFFSET(SRST_DDRC, 10, 3),
+ RK3506_CRU_RESET_OFFSET(SRST_DDRMON, 10, 4),
+
+ /* CRU-->SOFTRST_CON11 */
+ RK3506_CRU_RESET_OFFSET(SRST_H_LSPERI_BIU, 11, 2),
+ RK3506_CRU_RESET_OFFSET(SRST_P_UART0, 11, 4),
+ RK3506_CRU_RESET_OFFSET(SRST_P_UART1, 11, 5),
+ RK3506_CRU_RESET_OFFSET(SRST_P_UART2, 11, 6),
+ RK3506_CRU_RESET_OFFSET(SRST_P_UART3, 11, 7),
+ RK3506_CRU_RESET_OFFSET(SRST_P_UART4, 11, 8),
+ RK3506_CRU_RESET_OFFSET(SRST_UART0, 11, 9),
+ RK3506_CRU_RESET_OFFSET(SRST_UART1, 11, 10),
+ RK3506_CRU_RESET_OFFSET(SRST_UART2, 11, 11),
+ RK3506_CRU_RESET_OFFSET(SRST_UART3, 11, 12),
+ RK3506_CRU_RESET_OFFSET(SRST_UART4, 11, 13),
+ RK3506_CRU_RESET_OFFSET(SRST_P_I2C0, 11, 14),
+ RK3506_CRU_RESET_OFFSET(SRST_I2C0, 11, 15),
+
+ /* CRU-->SOFTRST_CON12 */
+ RK3506_CRU_RESET_OFFSET(SRST_P_I2C1, 12, 0),
+ RK3506_CRU_RESET_OFFSET(SRST_I2C1, 12, 1),
+ RK3506_CRU_RESET_OFFSET(SRST_P_I2C2, 12, 2),
+ RK3506_CRU_RESET_OFFSET(SRST_I2C2, 12, 3),
+ RK3506_CRU_RESET_OFFSET(SRST_P_PWM1, 12, 4),
+ RK3506_CRU_RESET_OFFSET(SRST_PWM1, 12, 5),
+ RK3506_CRU_RESET_OFFSET(SRST_P_SPI0, 12, 10),
+ RK3506_CRU_RESET_OFFSET(SRST_SPI0, 12, 11),
+ RK3506_CRU_RESET_OFFSET(SRST_P_SPI1, 12, 12),
+ RK3506_CRU_RESET_OFFSET(SRST_SPI1, 12, 13),
+ RK3506_CRU_RESET_OFFSET(SRST_P_GPIO2, 12, 14),
+ RK3506_CRU_RESET_OFFSET(SRST_DB_GPIO2, 12, 15),
+
+ /* CRU-->SOFTRST_CON13 */
+ RK3506_CRU_RESET_OFFSET(SRST_P_GPIO3, 13, 0),
+ RK3506_CRU_RESET_OFFSET(SRST_DB_GPIO3, 13, 1),
+ RK3506_CRU_RESET_OFFSET(SRST_P_GPIO4, 13, 2),
+ RK3506_CRU_RESET_OFFSET(SRST_DB_GPIO4, 13, 3),
+ RK3506_CRU_RESET_OFFSET(SRST_H_CAN0, 13, 4),
+ RK3506_CRU_RESET_OFFSET(SRST_CAN0, 13, 5),
+ RK3506_CRU_RESET_OFFSET(SRST_H_CAN1, 13, 6),
+ RK3506_CRU_RESET_OFFSET(SRST_CAN1, 13, 7),
+ RK3506_CRU_RESET_OFFSET(SRST_H_PDM, 13, 8),
+ RK3506_CRU_RESET_OFFSET(SRST_M_PDM, 13, 9),
+ RK3506_CRU_RESET_OFFSET(SRST_PDM, 13, 10),
+ RK3506_CRU_RESET_OFFSET(SRST_SPDIFTX, 13, 11),
+ RK3506_CRU_RESET_OFFSET(SRST_H_SPDIFTX, 13, 12),
+ RK3506_CRU_RESET_OFFSET(SRST_H_SPDIFRX, 13, 13),
+ RK3506_CRU_RESET_OFFSET(SRST_SPDIFRX, 13, 14),
+ RK3506_CRU_RESET_OFFSET(SRST_M_SAI0, 13, 15),
+
+ /* CRU-->SOFTRST_CON14 */
+ RK3506_CRU_RESET_OFFSET(SRST_H_SAI0, 14, 0),
+ RK3506_CRU_RESET_OFFSET(SRST_M_SAI1, 14, 2),
+ RK3506_CRU_RESET_OFFSET(SRST_H_SAI1, 14, 3),
+ RK3506_CRU_RESET_OFFSET(SRST_H_ASRC0, 14, 5),
+ RK3506_CRU_RESET_OFFSET(SRST_ASRC0, 14, 6),
+ RK3506_CRU_RESET_OFFSET(SRST_H_ASRC1, 14, 7),
+ RK3506_CRU_RESET_OFFSET(SRST_ASRC1, 14, 8),
+
+ /* CRU-->SOFTRST_CON17 */
+ RK3506_CRU_RESET_OFFSET(SRST_H_HSPERI_BIU, 17, 4),
+ RK3506_CRU_RESET_OFFSET(SRST_H_SDMMC, 17, 7),
+ RK3506_CRU_RESET_OFFSET(SRST_H_FSPI, 17, 8),
+ RK3506_CRU_RESET_OFFSET(SRST_S_FSPI, 17, 9),
+ RK3506_CRU_RESET_OFFSET(SRST_P_SPI2, 17, 10),
+ RK3506_CRU_RESET_OFFSET(SRST_A_MAC0, 17, 11),
+ RK3506_CRU_RESET_OFFSET(SRST_A_MAC1, 17, 12),
+
+ /* CRU-->SOFTRST_CON18 */
+ RK3506_CRU_RESET_OFFSET(SRST_M_SAI2, 18, 2),
+ RK3506_CRU_RESET_OFFSET(SRST_H_SAI2, 18, 3),
+ RK3506_CRU_RESET_OFFSET(SRST_H_SAI3, 18, 6),
+ RK3506_CRU_RESET_OFFSET(SRST_M_SAI3, 18, 7),
+ RK3506_CRU_RESET_OFFSET(SRST_H_SAI4, 18, 10),
+ RK3506_CRU_RESET_OFFSET(SRST_M_SAI4, 18, 11),
+ RK3506_CRU_RESET_OFFSET(SRST_H_DSM, 18, 12),
+ RK3506_CRU_RESET_OFFSET(SRST_M_DSM, 18, 13),
+ RK3506_CRU_RESET_OFFSET(SRST_P_AUDIO_ADC, 18, 14),
+ RK3506_CRU_RESET_OFFSET(SRST_M_AUDIO_ADC, 18, 15),
+
+ /* CRU-->SOFTRST_CON19 */
+ RK3506_CRU_RESET_OFFSET(SRST_P_SARADC, 19, 0),
+ RK3506_CRU_RESET_OFFSET(SRST_SARADC, 19, 1),
+ RK3506_CRU_RESET_OFFSET(SRST_SARADC_PHY, 19, 2),
+ RK3506_CRU_RESET_OFFSET(SRST_P_OTPC_NS, 19, 3),
+ RK3506_CRU_RESET_OFFSET(SRST_SBPI_OTPC_NS, 19, 4),
+ RK3506_CRU_RESET_OFFSET(SRST_USER_OTPC_NS, 19, 5),
+ RK3506_CRU_RESET_OFFSET(SRST_P_UART5, 19, 6),
+ RK3506_CRU_RESET_OFFSET(SRST_UART5, 19, 7),
+ RK3506_CRU_RESET_OFFSET(SRST_P_GPIO234_IOC, 19, 8),
+
+ /* CRU-->SOFTRST_CON21 */
+ RK3506_CRU_RESET_OFFSET(SRST_A_VIO_BIU, 21, 3),
+ RK3506_CRU_RESET_OFFSET(SRST_H_VIO_BIU, 21, 4),
+ RK3506_CRU_RESET_OFFSET(SRST_H_RGA, 21, 6),
+ RK3506_CRU_RESET_OFFSET(SRST_A_RGA, 21, 7),
+ RK3506_CRU_RESET_OFFSET(SRST_CORE_RGA, 21, 8),
+ RK3506_CRU_RESET_OFFSET(SRST_A_VOP, 21, 9),
+ RK3506_CRU_RESET_OFFSET(SRST_H_VOP, 21, 10),
+ RK3506_CRU_RESET_OFFSET(SRST_VOP, 21, 11),
+ RK3506_CRU_RESET_OFFSET(SRST_P_DPHY, 21, 12),
+ RK3506_CRU_RESET_OFFSET(SRST_P_DSI_HOST, 21, 13),
+ RK3506_CRU_RESET_OFFSET(SRST_P_TSADC, 21, 14),
+ RK3506_CRU_RESET_OFFSET(SRST_TSADC, 21, 15),
+
+ /* CRU-->SOFTRST_CON22 */
+ RK3506_CRU_RESET_OFFSET(SRST_P_GPIO1_IOC, 22, 1),
+};
+
+void rk3506_rst_init(struct device_node *np, void __iomem *reg_base)
+{
+ rockchip_register_softrst_lut(np,
+ rk3506_register_offset,
+ ARRAY_SIZE(rk3506_register_offset),
+ reg_base + RK3506_SOFTRST_CON(0),
+ ROCKCHIP_SOFTRST_HIWORD_MASK);
+}
diff --git a/drivers/clk/rockchip/rst-rv1126b.c b/drivers/clk/rockchip/rst-rv1126b.c
new file mode 100644
index 000000000000..c75b0d885ca2
--- /dev/null
+++ b/drivers/clk/rockchip/rst-rv1126b.c
@@ -0,0 +1,443 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2025 Rockchip Electronics Co., Ltd.
+ * Author: Elaine Zhang <zhangqing@rock-chips.com>
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <dt-bindings/reset/rockchip,rv1126b-cru.h>
+#include "clk.h"
+
+/* 0x20000000 + 0x0A00 */
+#define TOPCRU_RESET_OFFSET(id, reg, bit) [id] = (0x0 * 4 + reg * 16 + bit)
+/* 0x20010000 + 0x0A00 */
+#define BUSCRU_RESET_OFFSET(id, reg, bit) [id] = (0x10000 * 4 + reg * 16 + bit)
+/* 0x20020000 + 0x0A00 */
+#define PERICRU_RESET_OFFSET(id, reg, bit) [id] = (0x20000 * 4 + reg * 16 + bit)
+/* 0x20030000 + 0x0A00 */
+#define CORECRU_RESET_OFFSET(id, reg, bit) [id] = (0x30000 * 4 + reg * 16 + bit)
+/* 0x20040000 + 0x0A00 */
+#define PMUCRU_RESET_OFFSET(id, reg, bit) [id] = (0x40000 * 4 + reg * 16 + bit)
+/* 0x20050000 + 0x0A00 */
+#define PMU1CRU_RESET_OFFSET(id, reg, bit) [id] = (0x50000 * 4 + reg * 16 + bit)
+/* 0x20060000 + 0x0A00 */
+#define DDRCRU_RESET_OFFSET(id, reg, bit) [id] = (0x60000 * 4 + reg * 16 + bit)
+/* 0x20068000 + 0x0A00 */
+#define SUBDDRCRU_RESET_OFFSET(id, reg, bit) [id] = (0x68000 * 4 + reg * 16 + bit)
+/* 0x20070000 + 0x0A00 */
+#define VICRU_RESET_OFFSET(id, reg, bit) [id] = (0x70000 * 4 + reg * 16 + bit)
+/* 0x20080000 + 0x0A00 */
+#define VEPUCRU_RESET_OFFSET(id, reg, bit) [id] = (0x80000 * 4 + reg * 16 + bit)
+/* 0x20090000 + 0x0A00 */
+#define NPUCRU_RESET_OFFSET(id, reg, bit) [id] = (0x90000 * 4 + reg * 16 + bit)
+/* 0x200A0000 + 0x0A00 */
+#define VDOCRU_RESET_OFFSET(id, reg, bit) [id] = (0xA0000 * 4 + reg * 16 + bit)
+/* 0x200B0000 + 0x0A00 */
+#define VCPCRU_RESET_OFFSET(id, reg, bit) [id] = (0xB0000 * 4 + reg * 16 + bit)
+
+/* =================mapping table for reset ID to register offset================== */
+static const int rv1126b_register_offset[] = {
+ /* TOPCRU-->SOFTRST_CON00 */
+
+ /* TOPCRU-->SOFTRST_CON15 */
+ TOPCRU_RESET_OFFSET(SRST_P_CRU, 15, 1),
+ TOPCRU_RESET_OFFSET(SRST_P_CRU_BIU, 15, 2),
+
+ /* BUSCRU-->SOFTRST_CON00 */
+ BUSCRU_RESET_OFFSET(SRST_A_TOP_BIU, 0, 0),
+ BUSCRU_RESET_OFFSET(SRST_A_RKCE_BIU, 0, 1),
+ BUSCRU_RESET_OFFSET(SRST_A_BUS_BIU, 0, 2),
+ BUSCRU_RESET_OFFSET(SRST_H_BUS_BIU, 0, 3),
+ BUSCRU_RESET_OFFSET(SRST_P_BUS_BIU, 0, 4),
+ BUSCRU_RESET_OFFSET(SRST_P_CRU_BUS, 0, 5),
+ BUSCRU_RESET_OFFSET(SRST_P_SYS_GRF, 0, 6),
+ BUSCRU_RESET_OFFSET(SRST_H_BOOTROM, 0, 7),
+ BUSCRU_RESET_OFFSET(SRST_A_GIC400, 0, 8),
+ BUSCRU_RESET_OFFSET(SRST_A_SPINLOCK, 0, 9),
+ BUSCRU_RESET_OFFSET(SRST_P_WDT_NS, 0, 10),
+ BUSCRU_RESET_OFFSET(SRST_T_WDT_NS, 0, 11),
+
+ /* BUSCRU-->SOFTRST_CON01 */
+ BUSCRU_RESET_OFFSET(SRST_P_WDT_HPMCU, 1, 0),
+ BUSCRU_RESET_OFFSET(SRST_T_WDT_HPMCU, 1, 1),
+ BUSCRU_RESET_OFFSET(SRST_H_CACHE, 1, 2),
+ BUSCRU_RESET_OFFSET(SRST_P_HPMCU_MAILBOX, 1, 3),
+ BUSCRU_RESET_OFFSET(SRST_P_HPMCU_INTMUX, 1, 4),
+ BUSCRU_RESET_OFFSET(SRST_HPMCU_FULL_CLUSTER, 1, 5),
+ BUSCRU_RESET_OFFSET(SRST_HPMCU_PWUP, 1, 6),
+ BUSCRU_RESET_OFFSET(SRST_HPMCU_ONLY_CORE, 1, 7),
+ BUSCRU_RESET_OFFSET(SRST_T_HPMCU_JTAG, 1, 8),
+ BUSCRU_RESET_OFFSET(SRST_P_RKDMA, 1, 11),
+ BUSCRU_RESET_OFFSET(SRST_A_RKDMA, 1, 12),
+
+ /* BUSCRU-->SOFTRST_CON02 */
+ BUSCRU_RESET_OFFSET(SRST_P_DCF, 2, 0),
+ BUSCRU_RESET_OFFSET(SRST_A_DCF, 2, 1),
+ BUSCRU_RESET_OFFSET(SRST_H_RGA, 2, 2),
+ BUSCRU_RESET_OFFSET(SRST_A_RGA, 2, 3),
+ BUSCRU_RESET_OFFSET(SRST_CORE_RGA, 2, 4),
+ BUSCRU_RESET_OFFSET(SRST_P_TIMER, 2, 5),
+ BUSCRU_RESET_OFFSET(SRST_TIMER0, 2, 6),
+ BUSCRU_RESET_OFFSET(SRST_TIMER1, 2, 7),
+ BUSCRU_RESET_OFFSET(SRST_TIMER2, 2, 8),
+ BUSCRU_RESET_OFFSET(SRST_TIMER3, 2, 9),
+ BUSCRU_RESET_OFFSET(SRST_TIMER4, 2, 10),
+ BUSCRU_RESET_OFFSET(SRST_TIMER5, 2, 11),
+ BUSCRU_RESET_OFFSET(SRST_A_RKCE, 2, 12),
+ BUSCRU_RESET_OFFSET(SRST_PKA_RKCE, 2, 13),
+ BUSCRU_RESET_OFFSET(SRST_H_RKRNG_S, 2, 14),
+ BUSCRU_RESET_OFFSET(SRST_H_RKRNG_NS, 2, 15),
+
+ /* BUSCRU-->SOFTRST_CON03 */
+ BUSCRU_RESET_OFFSET(SRST_P_I2C0, 3, 0),
+ BUSCRU_RESET_OFFSET(SRST_I2C0, 3, 1),
+ BUSCRU_RESET_OFFSET(SRST_P_I2C1, 3, 2),
+ BUSCRU_RESET_OFFSET(SRST_I2C1, 3, 3),
+ BUSCRU_RESET_OFFSET(SRST_P_I2C3, 3, 4),
+ BUSCRU_RESET_OFFSET(SRST_I2C3, 3, 5),
+ BUSCRU_RESET_OFFSET(SRST_P_I2C4, 3, 6),
+ BUSCRU_RESET_OFFSET(SRST_I2C4, 3, 7),
+ BUSCRU_RESET_OFFSET(SRST_P_I2C5, 3, 8),
+ BUSCRU_RESET_OFFSET(SRST_I2C5, 3, 9),
+ BUSCRU_RESET_OFFSET(SRST_P_SPI0, 3, 10),
+ BUSCRU_RESET_OFFSET(SRST_SPI0, 3, 11),
+ BUSCRU_RESET_OFFSET(SRST_P_SPI1, 3, 12),
+ BUSCRU_RESET_OFFSET(SRST_SPI1, 3, 13),
+
+ /* BUSCRU-->SOFTRST_CON04 */
+ BUSCRU_RESET_OFFSET(SRST_P_PWM0, 4, 0),
+ BUSCRU_RESET_OFFSET(SRST_PWM0, 4, 1),
+ BUSCRU_RESET_OFFSET(SRST_P_PWM2, 4, 4),
+ BUSCRU_RESET_OFFSET(SRST_PWM2, 4, 5),
+ BUSCRU_RESET_OFFSET(SRST_P_PWM3, 4, 8),
+ BUSCRU_RESET_OFFSET(SRST_PWM3, 4, 9),
+
+ /* BUSCRU-->SOFTRST_CON05 */
+ BUSCRU_RESET_OFFSET(SRST_P_UART1, 5, 0),
+ BUSCRU_RESET_OFFSET(SRST_S_UART1, 5, 1),
+ BUSCRU_RESET_OFFSET(SRST_P_UART2, 5, 2),
+ BUSCRU_RESET_OFFSET(SRST_S_UART2, 5, 3),
+ BUSCRU_RESET_OFFSET(SRST_P_UART3, 5, 4),
+ BUSCRU_RESET_OFFSET(SRST_S_UART3, 5, 5),
+ BUSCRU_RESET_OFFSET(SRST_P_UART4, 5, 6),
+ BUSCRU_RESET_OFFSET(SRST_S_UART4, 5, 7),
+ BUSCRU_RESET_OFFSET(SRST_P_UART5, 5, 8),
+ BUSCRU_RESET_OFFSET(SRST_S_UART5, 5, 9),
+ BUSCRU_RESET_OFFSET(SRST_P_UART6, 5, 10),
+ BUSCRU_RESET_OFFSET(SRST_S_UART6, 5, 11),
+ BUSCRU_RESET_OFFSET(SRST_P_UART7, 5, 12),
+ BUSCRU_RESET_OFFSET(SRST_S_UART7, 5, 13),
+
+ /* BUSCRU-->SOFTRST_CON06 */
+ BUSCRU_RESET_OFFSET(SRST_P_TSADC, 6, 0),
+ BUSCRU_RESET_OFFSET(SRST_TSADC, 6, 1),
+ BUSCRU_RESET_OFFSET(SRST_H_SAI0, 6, 2),
+ BUSCRU_RESET_OFFSET(SRST_M_SAI0, 6, 3),
+ BUSCRU_RESET_OFFSET(SRST_H_SAI1, 6, 4),
+ BUSCRU_RESET_OFFSET(SRST_M_SAI1, 6, 5),
+ BUSCRU_RESET_OFFSET(SRST_H_SAI2, 6, 6),
+ BUSCRU_RESET_OFFSET(SRST_M_SAI2, 6, 7),
+ BUSCRU_RESET_OFFSET(SRST_H_RKDSM, 6, 8),
+ BUSCRU_RESET_OFFSET(SRST_M_RKDSM, 6, 9),
+ BUSCRU_RESET_OFFSET(SRST_H_PDM, 6, 10),
+ BUSCRU_RESET_OFFSET(SRST_M_PDM, 6, 11),
+ BUSCRU_RESET_OFFSET(SRST_PDM, 6, 12),
+
+ /* BUSCRU-->SOFTRST_CON07 */
+ BUSCRU_RESET_OFFSET(SRST_H_ASRC0, 7, 0),
+ BUSCRU_RESET_OFFSET(SRST_ASRC0, 7, 1),
+ BUSCRU_RESET_OFFSET(SRST_H_ASRC1, 7, 2),
+ BUSCRU_RESET_OFFSET(SRST_ASRC1, 7, 3),
+ BUSCRU_RESET_OFFSET(SRST_P_AUDIO_ADC_BUS, 7, 4),
+ BUSCRU_RESET_OFFSET(SRST_M_AUDIO_ADC_BUS, 7, 5),
+ BUSCRU_RESET_OFFSET(SRST_P_RKCE, 7, 6),
+ BUSCRU_RESET_OFFSET(SRST_H_NS_RKCE, 7, 7),
+ BUSCRU_RESET_OFFSET(SRST_P_OTPC_NS, 7, 8),
+ BUSCRU_RESET_OFFSET(SRST_SBPI_OTPC_NS, 7, 9),
+ BUSCRU_RESET_OFFSET(SRST_USER_OTPC_NS, 7, 10),
+ BUSCRU_RESET_OFFSET(SRST_OTPC_ARB, 7, 11),
+ BUSCRU_RESET_OFFSET(SRST_P_OTP_MASK, 7, 12),
+
+ /* PERICRU-->SOFTRST_CON00 */
+ PERICRU_RESET_OFFSET(SRST_A_PERI_BIU, 0, 0),
+ PERICRU_RESET_OFFSET(SRST_P_PERI_BIU, 0, 1),
+ PERICRU_RESET_OFFSET(SRST_P_RTC_BIU, 0, 2),
+ PERICRU_RESET_OFFSET(SRST_P_CRU_PERI, 0, 3),
+ PERICRU_RESET_OFFSET(SRST_P_PERI_GRF, 0, 4),
+ PERICRU_RESET_OFFSET(SRST_P_GPIO1, 0, 5),
+ PERICRU_RESET_OFFSET(SRST_DB_GPIO1, 0, 6),
+ PERICRU_RESET_OFFSET(SRST_P_IOC_VCCIO1, 0, 7),
+ PERICRU_RESET_OFFSET(SRST_A_USB3OTG, 0, 8),
+ PERICRU_RESET_OFFSET(SRST_H_USB2HOST, 0, 11),
+ PERICRU_RESET_OFFSET(SRST_H_ARB_USB2HOST, 0, 12),
+ PERICRU_RESET_OFFSET(SRST_P_RTC_TEST, 0, 13),
+
+ /* PERICRU-->SOFTRST_CON01 */
+ PERICRU_RESET_OFFSET(SRST_H_EMMC, 1, 0),
+ PERICRU_RESET_OFFSET(SRST_H_FSPI0, 1, 1),
+ PERICRU_RESET_OFFSET(SRST_H_XIP_FSPI0, 1, 2),
+ PERICRU_RESET_OFFSET(SRST_S_2X_FSPI0, 1, 3),
+ PERICRU_RESET_OFFSET(SRST_UTMI_USB2HOST, 1, 5),
+ PERICRU_RESET_OFFSET(SRST_REF_PIPEPHY, 1, 7),
+ PERICRU_RESET_OFFSET(SRST_P_PIPEPHY, 1, 8),
+ PERICRU_RESET_OFFSET(SRST_P_PIPEPHY_GRF, 1, 9),
+ PERICRU_RESET_OFFSET(SRST_P_USB2PHY, 1, 10),
+ PERICRU_RESET_OFFSET(SRST_POR_USB2PHY, 1, 11),
+ PERICRU_RESET_OFFSET(SRST_OTG_USB2PHY, 1, 12),
+ PERICRU_RESET_OFFSET(SRST_HOST_USB2PHY, 1, 13),
+
+ /* CORECRU-->SOFTRST_CON00 */
+ CORECRU_RESET_OFFSET(SRST_REF_PVTPLL_CORE, 0, 0),
+ CORECRU_RESET_OFFSET(SRST_NCOREPORESET0, 0, 1),
+ CORECRU_RESET_OFFSET(SRST_NCORESET0, 0, 2),
+ CORECRU_RESET_OFFSET(SRST_NCOREPORESET1, 0, 3),
+ CORECRU_RESET_OFFSET(SRST_NCORESET1, 0, 4),
+ CORECRU_RESET_OFFSET(SRST_NCOREPORESET2, 0, 5),
+ CORECRU_RESET_OFFSET(SRST_NCORESET2, 0, 6),
+ CORECRU_RESET_OFFSET(SRST_NCOREPORESET3, 0, 7),
+ CORECRU_RESET_OFFSET(SRST_NCORESET3, 0, 8),
+ CORECRU_RESET_OFFSET(SRST_NDBGRESET, 0, 9),
+ CORECRU_RESET_OFFSET(SRST_NL2RESET, 0, 10),
+
+ /* CORECRU-->SOFTRST_CON01 */
+ CORECRU_RESET_OFFSET(SRST_A_CORE_BIU, 1, 0),
+ CORECRU_RESET_OFFSET(SRST_P_CORE_BIU, 1, 1),
+ CORECRU_RESET_OFFSET(SRST_H_CORE_BIU, 1, 2),
+ CORECRU_RESET_OFFSET(SRST_P_DBG, 1, 3),
+ CORECRU_RESET_OFFSET(SRST_POT_DBG, 1, 4),
+ CORECRU_RESET_OFFSET(SRST_NT_DBG, 1, 5),
+ CORECRU_RESET_OFFSET(SRST_P_CORE_PVTPLL, 1, 6),
+ CORECRU_RESET_OFFSET(SRST_P_CRU_CORE, 1, 7),
+ CORECRU_RESET_OFFSET(SRST_P_CORE_GRF, 1, 8),
+ CORECRU_RESET_OFFSET(SRST_P_DFT2APB, 1, 10),
+
+ /* PMUCRU-->SOFTRST_CON00 */
+ PMUCRU_RESET_OFFSET(SRST_H_PMU_BIU, 0, 0),
+ PMUCRU_RESET_OFFSET(SRST_P_PMU_GPIO0, 0, 7),
+ PMUCRU_RESET_OFFSET(SRST_DB_PMU_GPIO0, 0, 8),
+ PMUCRU_RESET_OFFSET(SRST_P_PMU_HP_TIMER, 0, 10),
+ PMUCRU_RESET_OFFSET(SRST_PMU_HP_TIMER, 0, 11),
+ PMUCRU_RESET_OFFSET(SRST_PMU_32K_HP_TIMER, 0, 12),
+
+ /* PMUCRU-->SOFTRST_CON01 */
+ PMUCRU_RESET_OFFSET(SRST_P_PWM1, 1, 0),
+ PMUCRU_RESET_OFFSET(SRST_PWM1, 1, 1),
+ PMUCRU_RESET_OFFSET(SRST_P_I2C2, 1, 2),
+ PMUCRU_RESET_OFFSET(SRST_I2C2, 1, 3),
+ PMUCRU_RESET_OFFSET(SRST_P_UART0, 1, 4),
+ PMUCRU_RESET_OFFSET(SRST_S_UART0, 1, 5),
+
+ /* PMUCRU-->SOFTRST_CON02 */
+ PMUCRU_RESET_OFFSET(SRST_P_RCOSC_CTRL, 2, 0),
+ PMUCRU_RESET_OFFSET(SRST_REF_RCOSC_CTRL, 2, 2),
+ PMUCRU_RESET_OFFSET(SRST_P_IOC_PMUIO0, 2, 3),
+ PMUCRU_RESET_OFFSET(SRST_P_CRU_PMU, 2, 4),
+ PMUCRU_RESET_OFFSET(SRST_P_PMU_GRF, 2, 5),
+ PMUCRU_RESET_OFFSET(SRST_PREROLL, 2, 7),
+ PMUCRU_RESET_OFFSET(SRST_PREROLL_32K, 2, 8),
+ PMUCRU_RESET_OFFSET(SRST_H_PMU_SRAM, 2, 9),
+
+ /* PMUCRU-->SOFTRST_CON03 */
+ PMUCRU_RESET_OFFSET(SRST_P_WDT_LPMCU, 3, 0),
+ PMUCRU_RESET_OFFSET(SRST_T_WDT_LPMCU, 3, 1),
+ PMUCRU_RESET_OFFSET(SRST_LPMCU_FULL_CLUSTER, 3, 2),
+ PMUCRU_RESET_OFFSET(SRST_LPMCU_PWUP, 3, 3),
+ PMUCRU_RESET_OFFSET(SRST_LPMCU_ONLY_CORE, 3, 4),
+ PMUCRU_RESET_OFFSET(SRST_T_LPMCU_JTAG, 3, 5),
+ PMUCRU_RESET_OFFSET(SRST_P_LPMCU_MAILBOX, 3, 6),
+
+ /* PMU1CRU-->SOFTRST_CON00 */
+ PMU1CRU_RESET_OFFSET(SRST_P_SPI2AHB, 0, 0),
+ PMU1CRU_RESET_OFFSET(SRST_H_SPI2AHB, 0, 1),
+ PMU1CRU_RESET_OFFSET(SRST_H_FSPI1, 0, 2),
+ PMU1CRU_RESET_OFFSET(SRST_H_XIP_FSPI1, 0, 3),
+ PMU1CRU_RESET_OFFSET(SRST_S_1X_FSPI1, 0, 4),
+ PMU1CRU_RESET_OFFSET(SRST_P_IOC_PMUIO1, 0, 5),
+ PMU1CRU_RESET_OFFSET(SRST_P_CRU_PMU1, 0, 6),
+ PMU1CRU_RESET_OFFSET(SRST_P_AUDIO_ADC_PMU, 0, 7),
+ PMU1CRU_RESET_OFFSET(SRST_M_AUDIO_ADC_PMU, 0, 8),
+ PMU1CRU_RESET_OFFSET(SRST_H_PMU1_BIU, 0, 9),
+
+ /* PMU1CRU-->SOFTRST_CON01 */
+ PMU1CRU_RESET_OFFSET(SRST_P_LPDMA, 1, 0),
+ PMU1CRU_RESET_OFFSET(SRST_A_LPDMA, 1, 1),
+ PMU1CRU_RESET_OFFSET(SRST_H_LPSAI, 1, 2),
+ PMU1CRU_RESET_OFFSET(SRST_M_LPSAI, 1, 3),
+ PMU1CRU_RESET_OFFSET(SRST_P_AOA_TDD, 1, 4),
+ PMU1CRU_RESET_OFFSET(SRST_P_AOA_FE, 1, 5),
+ PMU1CRU_RESET_OFFSET(SRST_P_AOA_AAD, 1, 6),
+ PMU1CRU_RESET_OFFSET(SRST_P_AOA_APB, 1, 7),
+ PMU1CRU_RESET_OFFSET(SRST_P_AOA_SRAM, 1, 8),
+
+ /* DDRCRU-->SOFTRST_CON00 */
+ DDRCRU_RESET_OFFSET(SRST_P_DDR_BIU, 0, 1),
+ DDRCRU_RESET_OFFSET(SRST_P_DDRC, 0, 2),
+ DDRCRU_RESET_OFFSET(SRST_P_DDRMON, 0, 3),
+ DDRCRU_RESET_OFFSET(SRST_TIMER_DDRMON, 0, 4),
+ DDRCRU_RESET_OFFSET(SRST_P_DFICTRL, 0, 5),
+ DDRCRU_RESET_OFFSET(SRST_P_DDR_GRF, 0, 6),
+ DDRCRU_RESET_OFFSET(SRST_P_CRU_DDR, 0, 7),
+ DDRCRU_RESET_OFFSET(SRST_P_DDRPHY, 0, 8),
+ DDRCRU_RESET_OFFSET(SRST_P_DMA2DDR, 0, 9),
+
+ /* SUBDDRCRU-->SOFTRST_CON00 */
+ SUBDDRCRU_RESET_OFFSET(SRST_A_SYSMEM_BIU, 0, 0),
+ SUBDDRCRU_RESET_OFFSET(SRST_A_SYSMEM, 0, 1),
+ SUBDDRCRU_RESET_OFFSET(SRST_A_DDR_BIU, 0, 2),
+ SUBDDRCRU_RESET_OFFSET(SRST_A_DDRSCH0_CPU, 0, 3),
+ SUBDDRCRU_RESET_OFFSET(SRST_A_DDRSCH1_NPU, 0, 4),
+ SUBDDRCRU_RESET_OFFSET(SRST_A_DDRSCH2_POE, 0, 5),
+ SUBDDRCRU_RESET_OFFSET(SRST_A_DDRSCH3_VI, 0, 6),
+ SUBDDRCRU_RESET_OFFSET(SRST_CORE_DDRC, 0, 7),
+ SUBDDRCRU_RESET_OFFSET(SRST_DDRMON, 0, 8),
+ SUBDDRCRU_RESET_OFFSET(SRST_DFICTRL, 0, 9),
+ SUBDDRCRU_RESET_OFFSET(SRST_RS, 0, 11),
+ SUBDDRCRU_RESET_OFFSET(SRST_A_DMA2DDR, 0, 12),
+ SUBDDRCRU_RESET_OFFSET(SRST_DDRPHY, 0, 13),
+
+ /* VICRU-->SOFTRST_CON00 */
+ VICRU_RESET_OFFSET(SRST_REF_PVTPLL_ISP, 0, 0),
+ VICRU_RESET_OFFSET(SRST_A_GMAC_BIU, 0, 1),
+ VICRU_RESET_OFFSET(SRST_A_VI_BIU, 0, 2),
+ VICRU_RESET_OFFSET(SRST_H_VI_BIU, 0, 3),
+ VICRU_RESET_OFFSET(SRST_P_VI_BIU, 0, 4),
+ VICRU_RESET_OFFSET(SRST_P_CRU_VI, 0, 5),
+ VICRU_RESET_OFFSET(SRST_P_VI_GRF, 0, 6),
+ VICRU_RESET_OFFSET(SRST_P_VI_PVTPLL, 0, 7),
+ VICRU_RESET_OFFSET(SRST_P_DSMC, 0, 8),
+ VICRU_RESET_OFFSET(SRST_A_DSMC, 0, 9),
+ VICRU_RESET_OFFSET(SRST_H_CAN0, 0, 10),
+ VICRU_RESET_OFFSET(SRST_CAN0, 0, 11),
+ VICRU_RESET_OFFSET(SRST_H_CAN1, 0, 12),
+ VICRU_RESET_OFFSET(SRST_CAN1, 0, 13),
+
+ /* VICRU-->SOFTRST_CON01 */
+ VICRU_RESET_OFFSET(SRST_P_GPIO2, 1, 0),
+ VICRU_RESET_OFFSET(SRST_DB_GPIO2, 1, 1),
+ VICRU_RESET_OFFSET(SRST_P_GPIO4, 1, 2),
+ VICRU_RESET_OFFSET(SRST_DB_GPIO4, 1, 3),
+ VICRU_RESET_OFFSET(SRST_P_GPIO5, 1, 4),
+ VICRU_RESET_OFFSET(SRST_DB_GPIO5, 1, 5),
+ VICRU_RESET_OFFSET(SRST_P_GPIO6, 1, 6),
+ VICRU_RESET_OFFSET(SRST_DB_GPIO6, 1, 7),
+ VICRU_RESET_OFFSET(SRST_P_GPIO7, 1, 8),
+ VICRU_RESET_OFFSET(SRST_DB_GPIO7, 1, 9),
+ VICRU_RESET_OFFSET(SRST_P_IOC_VCCIO2, 1, 10),
+ VICRU_RESET_OFFSET(SRST_P_IOC_VCCIO4, 1, 11),
+ VICRU_RESET_OFFSET(SRST_P_IOC_VCCIO5, 1, 12),
+ VICRU_RESET_OFFSET(SRST_P_IOC_VCCIO6, 1, 13),
+ VICRU_RESET_OFFSET(SRST_P_IOC_VCCIO7, 1, 14),
+
+ /* VICRU-->SOFTRST_CON02 */
+ VICRU_RESET_OFFSET(SRST_CORE_ISP, 2, 0),
+ VICRU_RESET_OFFSET(SRST_H_VICAP, 2, 1),
+ VICRU_RESET_OFFSET(SRST_A_VICAP, 2, 2),
+ VICRU_RESET_OFFSET(SRST_D_VICAP, 2, 3),
+ VICRU_RESET_OFFSET(SRST_ISP0_VICAP, 2, 4),
+ VICRU_RESET_OFFSET(SRST_CORE_VPSS, 2, 5),
+ VICRU_RESET_OFFSET(SRST_CORE_VPSL, 2, 6),
+ VICRU_RESET_OFFSET(SRST_P_CSI2HOST0, 2, 7),
+ VICRU_RESET_OFFSET(SRST_P_CSI2HOST1, 2, 8),
+ VICRU_RESET_OFFSET(SRST_P_CSI2HOST2, 2, 9),
+ VICRU_RESET_OFFSET(SRST_P_CSI2HOST3, 2, 10),
+ VICRU_RESET_OFFSET(SRST_H_SDMMC0, 2, 11),
+ VICRU_RESET_OFFSET(SRST_A_GMAC, 2, 12),
+ VICRU_RESET_OFFSET(SRST_P_CSIPHY0, 2, 13),
+ VICRU_RESET_OFFSET(SRST_P_CSIPHY1, 2, 14),
+
+ /* VICRU-->SOFTRST_CON03 */
+ VICRU_RESET_OFFSET(SRST_P_MACPHY, 3, 0),
+ VICRU_RESET_OFFSET(SRST_MACPHY, 3, 1),
+ VICRU_RESET_OFFSET(SRST_P_SARADC1, 3, 2),
+ VICRU_RESET_OFFSET(SRST_SARADC1, 3, 3),
+ VICRU_RESET_OFFSET(SRST_P_SARADC2, 3, 5),
+ VICRU_RESET_OFFSET(SRST_SARADC2, 3, 6),
+
+ /* VEPUCRU-->SOFTRST_CON00 */
+ VEPUCRU_RESET_OFFSET(SRST_REF_PVTPLL_VEPU, 0, 0),
+ VEPUCRU_RESET_OFFSET(SRST_A_VEPU_BIU, 0, 1),
+ VEPUCRU_RESET_OFFSET(SRST_H_VEPU_BIU, 0, 2),
+ VEPUCRU_RESET_OFFSET(SRST_P_VEPU_BIU, 0, 3),
+ VEPUCRU_RESET_OFFSET(SRST_P_CRU_VEPU, 0, 4),
+ VEPUCRU_RESET_OFFSET(SRST_P_VEPU_GRF, 0, 5),
+ VEPUCRU_RESET_OFFSET(SRST_P_GPIO3, 0, 7),
+ VEPUCRU_RESET_OFFSET(SRST_DB_GPIO3, 0, 8),
+ VEPUCRU_RESET_OFFSET(SRST_P_IOC_VCCIO3, 0, 9),
+ VEPUCRU_RESET_OFFSET(SRST_P_SARADC0, 0, 10),
+ VEPUCRU_RESET_OFFSET(SRST_SARADC0, 0, 11),
+ VEPUCRU_RESET_OFFSET(SRST_H_SDMMC1, 0, 13),
+
+ /* VEPUCRU-->SOFTRST_CON01 */
+ VEPUCRU_RESET_OFFSET(SRST_P_VEPU_PVTPLL, 1, 0),
+ VEPUCRU_RESET_OFFSET(SRST_H_VEPU, 1, 1),
+ VEPUCRU_RESET_OFFSET(SRST_A_VEPU, 1, 2),
+ VEPUCRU_RESET_OFFSET(SRST_CORE_VEPU, 1, 3),
+
+ /* NPUCRU-->SOFTRST_CON00 */
+ NPUCRU_RESET_OFFSET(SRST_REF_PVTPLL_NPU, 0, 0),
+ NPUCRU_RESET_OFFSET(SRST_A_NPU_BIU, 0, 2),
+ NPUCRU_RESET_OFFSET(SRST_H_NPU_BIU, 0, 3),
+ NPUCRU_RESET_OFFSET(SRST_P_NPU_BIU, 0, 4),
+ NPUCRU_RESET_OFFSET(SRST_P_CRU_NPU, 0, 5),
+ NPUCRU_RESET_OFFSET(SRST_P_NPU_GRF, 0, 6),
+ NPUCRU_RESET_OFFSET(SRST_P_NPU_PVTPLL, 0, 8),
+ NPUCRU_RESET_OFFSET(SRST_H_RKNN, 0, 9),
+ NPUCRU_RESET_OFFSET(SRST_A_RKNN, 0, 10),
+
+ /* VDOCRU-->SOFTRST_CON00 */
+ VDOCRU_RESET_OFFSET(SRST_A_RKVDEC_BIU, 0, 0),
+ VDOCRU_RESET_OFFSET(SRST_A_VDO_BIU, 0, 1),
+ VDOCRU_RESET_OFFSET(SRST_H_VDO_BIU, 0, 3),
+ VDOCRU_RESET_OFFSET(SRST_P_VDO_BIU, 0, 4),
+ VDOCRU_RESET_OFFSET(SRST_P_CRU_VDO, 0, 5),
+ VDOCRU_RESET_OFFSET(SRST_P_VDO_GRF, 0, 6),
+ VDOCRU_RESET_OFFSET(SRST_A_RKVDEC, 0, 7),
+ VDOCRU_RESET_OFFSET(SRST_H_RKVDEC, 0, 8),
+ VDOCRU_RESET_OFFSET(SRST_HEVC_CA_RKVDEC, 0, 9),
+ VDOCRU_RESET_OFFSET(SRST_A_VOP, 0, 10),
+ VDOCRU_RESET_OFFSET(SRST_H_VOP, 0, 11),
+ VDOCRU_RESET_OFFSET(SRST_D_VOP, 0, 12),
+ VDOCRU_RESET_OFFSET(SRST_A_OOC, 0, 13),
+ VDOCRU_RESET_OFFSET(SRST_H_OOC, 0, 14),
+ VDOCRU_RESET_OFFSET(SRST_D_OOC, 0, 15),
+
+ /* VDOCRU-->SOFTRST_CON01 */
+ VDOCRU_RESET_OFFSET(SRST_H_RKJPEG, 1, 3),
+ VDOCRU_RESET_OFFSET(SRST_A_RKJPEG, 1, 4),
+ VDOCRU_RESET_OFFSET(SRST_A_RKMMU_DECOM, 1, 5),
+ VDOCRU_RESET_OFFSET(SRST_H_RKMMU_DECOM, 1, 6),
+ VDOCRU_RESET_OFFSET(SRST_D_DECOM, 1, 8),
+ VDOCRU_RESET_OFFSET(SRST_A_DECOM, 1, 9),
+ VDOCRU_RESET_OFFSET(SRST_P_DECOM, 1, 10),
+ VDOCRU_RESET_OFFSET(SRST_P_MIPI_DSI, 1, 12),
+ VDOCRU_RESET_OFFSET(SRST_P_DSIPHY, 1, 13),
+
+ /* VCPCRU-->SOFTRST_CON00 */
+ VCPCRU_RESET_OFFSET(SRST_REF_PVTPLL_VCP, 0, 0),
+ VCPCRU_RESET_OFFSET(SRST_A_VCP_BIU, 0, 1),
+ VCPCRU_RESET_OFFSET(SRST_H_VCP_BIU, 0, 2),
+ VCPCRU_RESET_OFFSET(SRST_P_VCP_BIU, 0, 3),
+ VCPCRU_RESET_OFFSET(SRST_P_CRU_VCP, 0, 4),
+ VCPCRU_RESET_OFFSET(SRST_P_VCP_GRF, 0, 5),
+ VCPCRU_RESET_OFFSET(SRST_P_VCP_PVTPLL, 0, 7),
+ VCPCRU_RESET_OFFSET(SRST_A_AISP_BIU, 0, 8),
+ VCPCRU_RESET_OFFSET(SRST_H_AISP_BIU, 0, 9),
+ VCPCRU_RESET_OFFSET(SRST_CORE_AISP, 0, 13),
+
+ /* VCPCRU-->SOFTRST_CON01 */
+ VCPCRU_RESET_OFFSET(SRST_H_FEC, 1, 0),
+ VCPCRU_RESET_OFFSET(SRST_A_FEC, 1, 1),
+ VCPCRU_RESET_OFFSET(SRST_CORE_FEC, 1, 2),
+ VCPCRU_RESET_OFFSET(SRST_H_AVSP, 1, 3),
+ VCPCRU_RESET_OFFSET(SRST_A_AVSP, 1, 4),
+};
+
+void rv1126b_rst_init(struct device_node *np, void __iomem *reg_base)
+{
+ rockchip_register_softrst_lut(np,
+ rv1126b_register_offset,
+ ARRAY_SIZE(rv1126b_register_offset),
+ reg_base + RV1126B_SOFTRST_CON(0),
+ ROCKCHIP_SOFTRST_HIWORD_MASK);
+}
diff --git a/drivers/clk/samsung/Kconfig b/drivers/clk/samsung/Kconfig
index 76a494e95027..70a8b82a0136 100644
--- a/drivers/clk/samsung/Kconfig
+++ b/drivers/clk/samsung/Kconfig
@@ -95,6 +95,16 @@ config EXYNOS_CLKOUT
status of the certains clocks from SoC, but it could also be tied to
other devices as an input clock.
+config EXYNOS_ACPM_CLK
+ tristate "Clock driver controlled via ACPM interface"
+ depends on EXYNOS_ACPM_PROTOCOL || (COMPILE_TEST && !EXYNOS_ACPM_PROTOCOL)
+ help
+ This driver provides support for clocks that are controlled by
+ firmware that implements the ACPM interface.
+
+ This driver uses the ACPM interface to interact with the firmware
+ providing all the clock controlls.
+
config TESLA_FSD_COMMON_CLK
bool "Tesla FSD clock controller support" if COMPILE_TEST
depends on COMMON_CLK_SAMSUNG
diff --git a/drivers/clk/samsung/Makefile b/drivers/clk/samsung/Makefile
index b77fe288e4bb..f3657f2e1b98 100644
--- a/drivers/clk/samsung/Makefile
+++ b/drivers/clk/samsung/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_EXYNOS_5260_COMMON_CLK) += clk-exynos5260.o
obj-$(CONFIG_EXYNOS_5410_COMMON_CLK) += clk-exynos5410.o
obj-$(CONFIG_EXYNOS_5420_COMMON_CLK) += clk-exynos5420.o
obj-$(CONFIG_EXYNOS_5420_COMMON_CLK) += clk-exynos5-subcmu.o
+obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-artpec8.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos5433.o
obj-$(CONFIG_EXYNOS_AUDSS_CLK_CON) += clk-exynos-audss.o
obj-$(CONFIG_EXYNOS_CLKOUT) += clk-exynos-clkout.o
@@ -27,6 +28,7 @@ obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos990.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynosautov9.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynosautov920.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-gs101.o
+obj-$(CONFIG_EXYNOS_ACPM_CLK) += clk-acpm.o
obj-$(CONFIG_S3C64XX_COMMON_CLK) += clk-s3c64xx.o
obj-$(CONFIG_S5PV210_COMMON_CLK) += clk-s5pv210.o clk-s5pv210-audss.o
obj-$(CONFIG_TESLA_FSD_COMMON_CLK) += clk-fsd.o
diff --git a/drivers/clk/samsung/clk-acpm.c b/drivers/clk/samsung/clk-acpm.c
new file mode 100644
index 000000000000..b90809ce3f88
--- /dev/null
+++ b/drivers/clk/samsung/clk-acpm.c
@@ -0,0 +1,185 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Samsung Exynos ACPM protocol based clock driver.
+ *
+ * Copyright 2025 Linaro Ltd.
+ */
+
+#include <linux/array_size.h>
+#include <linux/clk-provider.h>
+#include <linux/container_of.h>
+#include <linux/device/devres.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/firmware/samsung/exynos-acpm-protocol.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+struct acpm_clk {
+ u32 id;
+ struct clk_hw hw;
+ unsigned int mbox_chan_id;
+ const struct acpm_handle *handle;
+};
+
+struct acpm_clk_variant {
+ const char *name;
+};
+
+struct acpm_clk_driver_data {
+ const struct acpm_clk_variant *clks;
+ unsigned int nr_clks;
+ unsigned int mbox_chan_id;
+};
+
+#define to_acpm_clk(clk) container_of(clk, struct acpm_clk, hw)
+
+#define ACPM_CLK(cname) \
+ { \
+ .name = cname, \
+ }
+
+static const struct acpm_clk_variant gs101_acpm_clks[] = {
+ ACPM_CLK("mif"),
+ ACPM_CLK("int"),
+ ACPM_CLK("cpucl0"),
+ ACPM_CLK("cpucl1"),
+ ACPM_CLK("cpucl2"),
+ ACPM_CLK("g3d"),
+ ACPM_CLK("g3dl2"),
+ ACPM_CLK("tpu"),
+ ACPM_CLK("intcam"),
+ ACPM_CLK("tnr"),
+ ACPM_CLK("cam"),
+ ACPM_CLK("mfc"),
+ ACPM_CLK("disp"),
+ ACPM_CLK("bo"),
+};
+
+static const struct acpm_clk_driver_data acpm_clk_gs101 = {
+ .clks = gs101_acpm_clks,
+ .nr_clks = ARRAY_SIZE(gs101_acpm_clks),
+ .mbox_chan_id = 0,
+};
+
+static unsigned long acpm_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct acpm_clk *clk = to_acpm_clk(hw);
+
+ return clk->handle->ops.dvfs_ops.get_rate(clk->handle,
+ clk->mbox_chan_id, clk->id);
+}
+
+static int acpm_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ /*
+ * We can't figure out what rate it will be, so just return the
+ * rate back to the caller. acpm_clk_recalc_rate() will be called
+ * after the rate is set and we'll know what rate the clock is
+ * running at then.
+ */
+ return 0;
+}
+
+static int acpm_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct acpm_clk *clk = to_acpm_clk(hw);
+
+ return clk->handle->ops.dvfs_ops.set_rate(clk->handle,
+ clk->mbox_chan_id, clk->id, rate);
+}
+
+static const struct clk_ops acpm_clk_ops = {
+ .recalc_rate = acpm_clk_recalc_rate,
+ .determine_rate = acpm_clk_determine_rate,
+ .set_rate = acpm_clk_set_rate,
+};
+
+static int acpm_clk_register(struct device *dev, struct acpm_clk *aclk,
+ const char *name)
+{
+ struct clk_init_data init = {};
+
+ init.name = name;
+ init.ops = &acpm_clk_ops;
+ aclk->hw.init = &init;
+
+ return devm_clk_hw_register(dev, &aclk->hw);
+}
+
+static int acpm_clk_probe(struct platform_device *pdev)
+{
+ const struct acpm_handle *acpm_handle;
+ struct clk_hw_onecell_data *clk_data;
+ struct clk_hw **hws;
+ struct device *dev = &pdev->dev;
+ struct acpm_clk *aclks;
+ unsigned int mbox_chan_id;
+ int i, err, count;
+
+ acpm_handle = devm_acpm_get_by_node(dev, dev->parent->of_node);
+ if (IS_ERR(acpm_handle))
+ return dev_err_probe(dev, PTR_ERR(acpm_handle),
+ "Failed to get acpm handle\n");
+
+ count = acpm_clk_gs101.nr_clks;
+ mbox_chan_id = acpm_clk_gs101.mbox_chan_id;
+
+ clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, count),
+ GFP_KERNEL);
+ if (!clk_data)
+ return -ENOMEM;
+
+ clk_data->num = count;
+ hws = clk_data->hws;
+
+ aclks = devm_kcalloc(dev, count, sizeof(*aclks), GFP_KERNEL);
+ if (!aclks)
+ return -ENOMEM;
+
+ for (i = 0; i < count; i++) {
+ struct acpm_clk *aclk = &aclks[i];
+
+ /*
+ * The code assumes the clock IDs start from zero,
+ * are sequential and do not have gaps.
+ */
+ aclk->id = i;
+ aclk->handle = acpm_handle;
+ aclk->mbox_chan_id = mbox_chan_id;
+
+ hws[i] = &aclk->hw;
+
+ err = acpm_clk_register(dev, aclk,
+ acpm_clk_gs101.clks[i].name);
+ if (err)
+ return dev_err_probe(dev, err,
+ "Failed to register clock\n");
+ }
+
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
+ clk_data);
+}
+
+static const struct platform_device_id acpm_clk_id[] = {
+ { "gs101-acpm-clk" },
+ {}
+};
+MODULE_DEVICE_TABLE(platform, acpm_clk_id);
+
+static struct platform_driver acpm_clk_driver = {
+ .driver = {
+ .name = "acpm-clocks",
+ },
+ .probe = acpm_clk_probe,
+ .id_table = acpm_clk_id,
+};
+module_platform_driver(acpm_clk_driver);
+
+MODULE_AUTHOR("Tudor Ambarus <tudor.ambarus@linaro.org>");
+MODULE_DESCRIPTION("Samsung Exynos ACPM clock driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/samsung/clk-artpec8.c b/drivers/clk/samsung/clk-artpec8.c
new file mode 100644
index 000000000000..0ea7c8b58674
--- /dev/null
+++ b/drivers/clk/samsung/clk-artpec8.c
@@ -0,0 +1,1044 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 Samsung Electronics Co., Ltd.
+ * https://www.samsung.com
+ * Copyright (c) 2025 Axis Communications AB.
+ * https://www.axis.com
+ *
+ * Common Clock Framework support for ARTPEC-8 SoC.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+#include <dt-bindings/clock/axis,artpec8-clk.h>
+
+#include "clk.h"
+#include "clk-exynos-arm64.h"
+
+/* NOTE: Must be equal to the last clock ID increased by one */
+#define CMU_CMU_NR_CLK (CLK_DOUT_CMU_VPP_CORE + 1)
+#define CMU_BUS_NR_CLK (CLK_DOUT_BUS_PCLK + 1)
+#define CMU_CORE_NR_CLK (CLK_DOUT_CORE_PCLK + 1)
+#define CMU_CPUCL_NR_CLK (CLK_GOUT_CPUCL_CSSYS_IPCLKPORT_ATCLK + 1)
+#define CMU_FSYS_NR_CLK (CLK_GOUT_FSYS_QSPI_IPCLKPORT_SSI_CLK + 1)
+#define CMU_IMEM_NR_CLK (CLK_GOUT_IMEM_PCLK_TMU0_APBIF + 1)
+#define CMU_PERI_NR_CLK (CLK_GOUT_PERI_DMA4DSIM_IPCLKPORT_CLK_AXI_CLK + 1)
+
+/* Register Offset definitions for CMU_CMU (0x12400000) */
+#define PLL_LOCKTIME_PLL_AUDIO 0x0000
+#define PLL_LOCKTIME_PLL_SHARED0 0x0004
+#define PLL_LOCKTIME_PLL_SHARED1 0x0008
+#define PLL_CON0_PLL_AUDIO 0x0100
+#define PLL_CON0_PLL_SHARED0 0x0120
+#define PLL_CON0_PLL_SHARED1 0x0140
+#define CLK_CON_MUX_CLKCMU_2D 0x1000
+#define CLK_CON_MUX_CLKCMU_3D 0x1004
+#define CLK_CON_MUX_CLKCMU_BUS 0x1008
+#define CLK_CON_MUX_CLKCMU_BUS_DLP 0x100c
+#define CLK_CON_MUX_CLKCMU_CDC_CORE 0x1010
+#define CLK_CON_MUX_CLKCMU_FSYS_SCAN0 0x1014
+#define CLK_CON_MUX_CLKCMU_FSYS_SCAN1 0x1018
+#define CLK_CON_MUX_CLKCMU_IMEM_JPEG 0x101c
+#define CLK_CON_MUX_CLKCMU_PERI_DISP 0x1020
+#define CLK_CON_MUX_CLKCMU_CORE_BUS 0x1024
+#define CLK_CON_MUX_CLKCMU_CORE_DLP 0x1028
+#define CLK_CON_MUX_CLKCMU_CPUCL_SWITCH 0x1030
+#define CLK_CON_MUX_CLKCMU_DLP_CORE 0x1034
+#define CLK_CON_MUX_CLKCMU_FSYS_BUS 0x1038
+#define CLK_CON_MUX_CLKCMU_FSYS_IP 0x103c
+#define CLK_CON_MUX_CLKCMU_IMEM_ACLK 0x1054
+#define CLK_CON_MUX_CLKCMU_MIF_BUSP 0x1080
+#define CLK_CON_MUX_CLKCMU_MIF_SWITCH 0x1084
+#define CLK_CON_MUX_CLKCMU_PERI_IP 0x1088
+#define CLK_CON_MUX_CLKCMU_RSP_CORE 0x108c
+#define CLK_CON_MUX_CLKCMU_TRFM_CORE 0x1090
+#define CLK_CON_MUX_CLKCMU_VCA_ACE 0x1094
+#define CLK_CON_MUX_CLKCMU_VCA_OD 0x1098
+#define CLK_CON_MUX_CLKCMU_VIO_CORE 0x109c
+#define CLK_CON_MUX_CLKCMU_VIP0_CORE 0x10a0
+#define CLK_CON_MUX_CLKCMU_VIP1_CORE 0x10a4
+#define CLK_CON_MUX_CLKCMU_VPP_CORE 0x10a8
+
+#define CLK_CON_DIV_CLKCMU_BUS 0x1800
+#define CLK_CON_DIV_CLKCMU_BUS_DLP 0x1804
+#define CLK_CON_DIV_CLKCMU_CDC_CORE 0x1808
+#define CLK_CON_DIV_CLKCMU_FSYS_SCAN0 0x180c
+#define CLK_CON_DIV_CLKCMU_FSYS_SCAN1 0x1810
+#define CLK_CON_DIV_CLKCMU_IMEM_JPEG 0x1814
+#define CLK_CON_DIV_CLKCMU_MIF_SWITCH 0x1818
+#define CLK_CON_DIV_CLKCMU_CORE_DLP 0x181c
+#define CLK_CON_DIV_CLKCMU_CORE_MAIN 0x1820
+#define CLK_CON_DIV_CLKCMU_PERI_DISP 0x1824
+#define CLK_CON_DIV_CLKCMU_CPUCL_SWITCH 0x1828
+#define CLK_CON_DIV_CLKCMU_DLP_CORE 0x182c
+#define CLK_CON_DIV_CLKCMU_FSYS_BUS 0x1830
+#define CLK_CON_DIV_CLKCMU_FSYS_IP 0x1834
+#define CLK_CON_DIV_CLKCMU_VIO_AUDIO 0x1838
+#define CLK_CON_DIV_CLKCMU_GPU_2D 0x1848
+#define CLK_CON_DIV_CLKCMU_GPU_3D 0x184c
+#define CLK_CON_DIV_CLKCMU_IMEM_ACLK 0x1854
+#define CLK_CON_DIV_CLKCMU_MIF_BUSP 0x1884
+#define CLK_CON_DIV_CLKCMU_PERI_AUDIO 0x1890
+#define CLK_CON_DIV_CLKCMU_PERI_IP 0x1894
+#define CLK_CON_DIV_CLKCMU_RSP_CORE 0x1898
+#define CLK_CON_DIV_CLKCMU_TRFM_CORE 0x189c
+#define CLK_CON_DIV_CLKCMU_VCA_ACE 0x18a0
+#define CLK_CON_DIV_CLKCMU_VCA_OD 0x18a4
+#define CLK_CON_DIV_CLKCMU_VIO_CORE 0x18ac
+#define CLK_CON_DIV_CLKCMU_VIP0_CORE 0x18b0
+#define CLK_CON_DIV_CLKCMU_VIP1_CORE 0x18b4
+#define CLK_CON_DIV_CLKCMU_VPP_CORE 0x18b8
+#define CLK_CON_DIV_PLL_SHARED0_DIV2 0x18bc
+#define CLK_CON_DIV_PLL_SHARED0_DIV3 0x18c0
+#define CLK_CON_DIV_PLL_SHARED0_DIV4 0x18c4
+#define CLK_CON_DIV_PLL_SHARED1_DIV2 0x18c8
+#define CLK_CON_DIV_PLL_SHARED1_DIV3 0x18cc
+#define CLK_CON_DIV_PLL_SHARED1_DIV4 0x18d0
+
+static const unsigned long cmu_cmu_clk_regs[] __initconst = {
+ PLL_LOCKTIME_PLL_AUDIO,
+ PLL_LOCKTIME_PLL_SHARED0,
+ PLL_LOCKTIME_PLL_SHARED1,
+ PLL_CON0_PLL_AUDIO,
+ PLL_CON0_PLL_SHARED0,
+ PLL_CON0_PLL_SHARED1,
+ CLK_CON_MUX_CLKCMU_2D,
+ CLK_CON_MUX_CLKCMU_3D,
+ CLK_CON_MUX_CLKCMU_BUS,
+ CLK_CON_MUX_CLKCMU_BUS_DLP,
+ CLK_CON_MUX_CLKCMU_CDC_CORE,
+ CLK_CON_MUX_CLKCMU_FSYS_SCAN0,
+ CLK_CON_MUX_CLKCMU_FSYS_SCAN1,
+ CLK_CON_MUX_CLKCMU_IMEM_JPEG,
+ CLK_CON_MUX_CLKCMU_PERI_DISP,
+ CLK_CON_MUX_CLKCMU_CORE_BUS,
+ CLK_CON_MUX_CLKCMU_CORE_DLP,
+ CLK_CON_MUX_CLKCMU_CPUCL_SWITCH,
+ CLK_CON_MUX_CLKCMU_DLP_CORE,
+ CLK_CON_MUX_CLKCMU_FSYS_BUS,
+ CLK_CON_MUX_CLKCMU_FSYS_IP,
+ CLK_CON_MUX_CLKCMU_IMEM_ACLK,
+ CLK_CON_MUX_CLKCMU_MIF_BUSP,
+ CLK_CON_MUX_CLKCMU_MIF_SWITCH,
+ CLK_CON_MUX_CLKCMU_PERI_IP,
+ CLK_CON_MUX_CLKCMU_RSP_CORE,
+ CLK_CON_MUX_CLKCMU_TRFM_CORE,
+ CLK_CON_MUX_CLKCMU_VCA_ACE,
+ CLK_CON_MUX_CLKCMU_VCA_OD,
+ CLK_CON_MUX_CLKCMU_VIO_CORE,
+ CLK_CON_MUX_CLKCMU_VIP0_CORE,
+ CLK_CON_MUX_CLKCMU_VIP1_CORE,
+ CLK_CON_MUX_CLKCMU_VPP_CORE,
+ CLK_CON_DIV_CLKCMU_BUS,
+ CLK_CON_DIV_CLKCMU_BUS_DLP,
+ CLK_CON_DIV_CLKCMU_CDC_CORE,
+ CLK_CON_DIV_CLKCMU_FSYS_SCAN0,
+ CLK_CON_DIV_CLKCMU_FSYS_SCAN1,
+ CLK_CON_DIV_CLKCMU_IMEM_JPEG,
+ CLK_CON_DIV_CLKCMU_MIF_SWITCH,
+ CLK_CON_DIV_CLKCMU_CORE_DLP,
+ CLK_CON_DIV_CLKCMU_CORE_MAIN,
+ CLK_CON_DIV_CLKCMU_PERI_DISP,
+ CLK_CON_DIV_CLKCMU_CPUCL_SWITCH,
+ CLK_CON_DIV_CLKCMU_DLP_CORE,
+ CLK_CON_DIV_CLKCMU_FSYS_BUS,
+ CLK_CON_DIV_CLKCMU_FSYS_IP,
+ CLK_CON_DIV_CLKCMU_VIO_AUDIO,
+ CLK_CON_DIV_CLKCMU_GPU_2D,
+ CLK_CON_DIV_CLKCMU_GPU_3D,
+ CLK_CON_DIV_CLKCMU_IMEM_ACLK,
+ CLK_CON_DIV_CLKCMU_MIF_BUSP,
+ CLK_CON_DIV_CLKCMU_PERI_AUDIO,
+ CLK_CON_DIV_CLKCMU_PERI_IP,
+ CLK_CON_DIV_CLKCMU_RSP_CORE,
+ CLK_CON_DIV_CLKCMU_TRFM_CORE,
+ CLK_CON_DIV_CLKCMU_VCA_ACE,
+ CLK_CON_DIV_CLKCMU_VCA_OD,
+ CLK_CON_DIV_CLKCMU_VIO_CORE,
+ CLK_CON_DIV_CLKCMU_VIP0_CORE,
+ CLK_CON_DIV_CLKCMU_VIP1_CORE,
+ CLK_CON_DIV_CLKCMU_VPP_CORE,
+ CLK_CON_DIV_PLL_SHARED0_DIV2,
+ CLK_CON_DIV_PLL_SHARED0_DIV3,
+ CLK_CON_DIV_PLL_SHARED0_DIV4,
+ CLK_CON_DIV_PLL_SHARED1_DIV2,
+ CLK_CON_DIV_PLL_SHARED1_DIV3,
+ CLK_CON_DIV_PLL_SHARED1_DIV4,
+};
+
+static const struct samsung_pll_rate_table artpec8_pll_audio_rates[] __initconst = {
+ PLL_36XX_RATE(25 * MHZ, 589823913U, 47, 1, 1, 12184),
+ PLL_36XX_RATE(25 * MHZ, 393215942U, 47, 3, 0, 12184),
+ PLL_36XX_RATE(25 * MHZ, 294911956U, 47, 1, 2, 12184),
+ PLL_36XX_RATE(25 * MHZ, 100000000U, 32, 2, 2, 0),
+ PLL_36XX_RATE(25 * MHZ, 98303985U, 47, 3, 2, 12184),
+ PLL_36XX_RATE(25 * MHZ, 49151992U, 47, 3, 3, 12184),
+};
+
+static const struct samsung_pll_clock cmu_cmu_pll_clks[] __initconst = {
+ PLL(pll_1017x, CLK_FOUT_SHARED0_PLL, "fout_pll_shared0", "fin_pll",
+ PLL_LOCKTIME_PLL_SHARED0, PLL_CON0_PLL_SHARED0, NULL),
+ PLL(pll_1017x, CLK_FOUT_SHARED1_PLL, "fout_pll_shared1", "fin_pll",
+ PLL_LOCKTIME_PLL_SHARED1, PLL_CON0_PLL_SHARED1, NULL),
+ PLL(pll_1031x, CLK_FOUT_AUDIO_PLL, "fout_pll_audio", "fin_pll",
+ PLL_LOCKTIME_PLL_AUDIO, PLL_CON0_PLL_AUDIO, artpec8_pll_audio_rates),
+};
+
+PNAME(mout_clkcmu_bus_bus_p) = { "dout_pll_shared1_div2", "dout_pll_shared0_div3",
+ "dout_pll_shared1_div3", "dout_pll_shared1_div4" };
+PNAME(mout_clkcmu_bus_dlp_p) = { "dout_pll_shared0_div2", "dout_pll_shared0_div4",
+ "dout_pll_shared1_div2", "dout_pll_shared1_div4" };
+PNAME(mout_clkcmu_core_bus_p) = { "dout_pll_shared1_div2", "dout_pll_shared0_div3",
+ "dout_pll_shared0_div4", "dout_pll_shared1_div3" };
+PNAME(mout_clkcmu_core_dlp_p) = { "dout_pll_shared0_div2", "dout_pll_shared1_div2",
+ "dout_pll_shared0_div3", "dout_pll_shared1_div3" };
+PNAME(mout_clkcmu_cpucl_switch_p) = { "dout_pll_shared0_div2", "dout_pll_shared1_div2",
+ "dout_pll_shared0_div3", "dout_pll_shared1_div3" };
+PNAME(mout_clkcmu_fsys_bus_p) = { "dout_pll_shared1_div2", "dout_pll_shared0_div2",
+ "dout_pll_shared1_div4", "dout_pll_shared1_div3" };
+PNAME(mout_clkcmu_fsys_ip_p) = { "dout_pll_shared0_div2", "dout_pll_shared1_div3",
+ "dout_pll_shared1_div2", "dout_pll_shared0_div3" };
+PNAME(mout_clkcmu_fsys_scan0_p) = { "dout_pll_shared0_div4", "dout_pll_shared1_div4" };
+PNAME(mout_clkcmu_fsys_scan1_p) = { "dout_pll_shared0_div4", "dout_pll_shared1_div4" };
+PNAME(mout_clkcmu_imem_imem_p) = { "dout_pll_shared1_div4", "dout_pll_shared0_div3",
+ "dout_pll_shared1_div3", "dout_pll_shared1_div2" };
+PNAME(mout_clkcmu_imem_jpeg_p) = { "dout_pll_shared0_div2", "dout_pll_shared0_div3",
+ "dout_pll_shared1_div2", "dout_pll_shared1_div3" };
+PNAME(mout_clkcmu_cdc_core_p) = { "dout_pll_shared1_div2", "dout_pll_shared0_div3",
+ "dout_pll_shared1_div3", "dout_pll_shared1_div4" };
+PNAME(mout_clkcmu_dlp_core_p) = { "dout_pll_shared0_div2", "dout_pll_shared1_div2",
+ "dout_pll_shared0_div3", "dout_pll_shared1_div3" };
+PNAME(mout_clkcmu_3d_p) = { "dout_pll_shared0_div2", "dout_pll_shared1_div2",
+ "dout_pll_shared0_div3", "dout_pll_shared1_div3" };
+PNAME(mout_clkcmu_2d_p) = { "dout_pll_shared0_div2", "dout_pll_shared1_div2",
+ "dout_pll_shared0_div3", "dout_pll_shared1_div3" };
+PNAME(mout_clkcmu_mif_switch_p) = { "dout_pll_shared0", "dout_pll_shared1",
+ "dout_pll_shared0_div2", "dout_pll_shared0_div3" };
+PNAME(mout_clkcmu_mif_busp_p) = { "dout_pll_shared0_div3", "dout_pll_shared1_div4",
+ "dout_pll_shared0_div4", "dout_pll_shared0_div2" };
+PNAME(mout_clkcmu_peri_disp_p) = { "dout_pll_shared1_div2", "dout_pll_shared0_div2",
+ "dout_pll_shared1_div4", "dout_pll_shared1_div3" };
+PNAME(mout_clkcmu_peri_ip_p) = { "dout_pll_shared1_div2", "dout_pll_shared0_div4",
+ "dout_pll_shared1_div4", "dout_pll_shared0_div2" };
+PNAME(mout_clkcmu_rsp_core_p) = { "dout_pll_shared1_div2", "dout_pll_shared0_div3",
+ "dout_pll_shared1_div3", "dout_pll_shared1_div4" };
+PNAME(mout_clkcmu_trfm_core_p) = { "dout_pll_shared1_div2", "dout_pll_shared0_div3",
+ "dout_pll_shared1_div3", "dout_pll_shared1_div4" };
+PNAME(mout_clkcmu_vca_ace_p) = { "dout_pll_shared1_div2", "dout_pll_shared0_div3",
+ "dout_pll_shared1_div3", "dout_pll_shared1_div4" };
+PNAME(mout_clkcmu_vca_od_p) = { "dout_pll_shared1_div2", "dout_pll_shared0_div3",
+ "dout_pll_shared1_div3", "dout_pll_shared1_div4" };
+PNAME(mout_clkcmu_vio_core_p) = { "dout_pll_shared0_div3", "dout_pll_shared0_div2",
+ "dout_pll_shared1_div2", "dout_pll_shared1_div3" };
+PNAME(mout_clkcmu_vip0_core_p) = { "dout_pll_shared1_div2", "dout_pll_shared0_div3",
+ "dout_pll_shared1_div3", "dout_pll_shared1_div4" };
+PNAME(mout_clkcmu_vip1_core_p) = { "dout_pll_shared1_div2", "dout_pll_shared0_div3",
+ "dout_pll_shared1_div3", "dout_pll_shared1_div4" };
+PNAME(mout_clkcmu_vpp_core_p) = { "dout_pll_shared1_div2", "dout_pll_shared0_div3",
+ "dout_pll_shared1_div3", "dout_pll_shared1_div4" };
+PNAME(mout_clkcmu_pll_shared0_p) = { "fin_pll", "fout_pll_shared0" };
+PNAME(mout_clkcmu_pll_shared1_p) = { "fin_pll", "fout_pll_shared1" };
+PNAME(mout_clkcmu_pll_audio_p) = { "fin_pll", "fout_pll_audio" };
+
+static const struct samsung_fixed_factor_clock cmu_fixed_factor_clks[] __initconst = {
+ FFACTOR(CLK_DOUT_CMU_OTP, "dout_clkcmu_otp", "fin_pll", 1, 8, 0),
+};
+
+static const struct samsung_mux_clock cmu_cmu_mux_clks[] __initconst = {
+ MUX(0, "mout_clkcmu_pll_shared0", mout_clkcmu_pll_shared0_p, PLL_CON0_PLL_SHARED0, 4, 1),
+ MUX(0, "mout_clkcmu_pll_shared1", mout_clkcmu_pll_shared1_p, PLL_CON0_PLL_SHARED1, 4, 1),
+ MUX(0, "mout_clkcmu_pll_audio", mout_clkcmu_pll_audio_p, PLL_CON0_PLL_AUDIO, 4, 1),
+ MUX(0, "mout_clkcmu_bus_bus", mout_clkcmu_bus_bus_p, CLK_CON_MUX_CLKCMU_BUS, 0, 2),
+ MUX(0, "mout_clkcmu_bus_dlp", mout_clkcmu_bus_dlp_p, CLK_CON_MUX_CLKCMU_BUS_DLP, 0, 2),
+ MUX(0, "mout_clkcmu_core_bus", mout_clkcmu_core_bus_p, CLK_CON_MUX_CLKCMU_CORE_BUS, 0, 2),
+ MUX(0, "mout_clkcmu_core_dlp", mout_clkcmu_core_dlp_p, CLK_CON_MUX_CLKCMU_CORE_DLP, 0, 2),
+ MUX(0, "mout_clkcmu_cpucl_switch", mout_clkcmu_cpucl_switch_p,
+ CLK_CON_MUX_CLKCMU_CPUCL_SWITCH, 0, 3),
+ MUX(0, "mout_clkcmu_fsys_bus", mout_clkcmu_fsys_bus_p, CLK_CON_MUX_CLKCMU_FSYS_BUS, 0, 2),
+ MUX(0, "mout_clkcmu_fsys_ip", mout_clkcmu_fsys_ip_p, CLK_CON_MUX_CLKCMU_FSYS_IP, 0, 2),
+ MUX(0, "mout_clkcmu_fsys_scan0", mout_clkcmu_fsys_scan0_p,
+ CLK_CON_MUX_CLKCMU_FSYS_SCAN0, 0, 1),
+ MUX(0, "mout_clkcmu_fsys_scan1", mout_clkcmu_fsys_scan1_p,
+ CLK_CON_MUX_CLKCMU_FSYS_SCAN1, 0, 1),
+ MUX(0, "mout_clkcmu_imem_imem", mout_clkcmu_imem_imem_p,
+ CLK_CON_MUX_CLKCMU_IMEM_ACLK, 0, 2),
+ MUX(0, "mout_clkcmu_imem_jpeg", mout_clkcmu_imem_jpeg_p,
+ CLK_CON_MUX_CLKCMU_IMEM_JPEG, 0, 2),
+ nMUX(0, "mout_clkcmu_cdc_core", mout_clkcmu_cdc_core_p, CLK_CON_MUX_CLKCMU_CDC_CORE, 0, 2),
+ nMUX(0, "mout_clkcmu_dlp_core", mout_clkcmu_dlp_core_p, CLK_CON_MUX_CLKCMU_DLP_CORE, 0, 2),
+ MUX(0, "mout_clkcmu_3d", mout_clkcmu_3d_p, CLK_CON_MUX_CLKCMU_3D, 0, 2),
+ MUX(0, "mout_clkcmu_2d", mout_clkcmu_2d_p, CLK_CON_MUX_CLKCMU_2D, 0, 2),
+ MUX(0, "mout_clkcmu_mif_switch", mout_clkcmu_mif_switch_p,
+ CLK_CON_MUX_CLKCMU_MIF_SWITCH, 0, 2),
+ MUX(0, "mout_clkcmu_mif_busp", mout_clkcmu_mif_busp_p, CLK_CON_MUX_CLKCMU_MIF_BUSP, 0, 2),
+ MUX(0, "mout_clkcmu_peri_disp", mout_clkcmu_peri_disp_p,
+ CLK_CON_MUX_CLKCMU_PERI_DISP, 0, 2),
+ MUX(0, "mout_clkcmu_peri_ip", mout_clkcmu_peri_ip_p, CLK_CON_MUX_CLKCMU_PERI_IP, 0, 2),
+ MUX(0, "mout_clkcmu_rsp_core", mout_clkcmu_rsp_core_p, CLK_CON_MUX_CLKCMU_RSP_CORE, 0, 2),
+ nMUX(0, "mout_clkcmu_trfm_core", mout_clkcmu_trfm_core_p,
+ CLK_CON_MUX_CLKCMU_TRFM_CORE, 0, 2),
+ MUX(0, "mout_clkcmu_vca_ace", mout_clkcmu_vca_ace_p, CLK_CON_MUX_CLKCMU_VCA_ACE, 0, 2),
+ MUX(0, "mout_clkcmu_vca_od", mout_clkcmu_vca_od_p, CLK_CON_MUX_CLKCMU_VCA_OD, 0, 2),
+ MUX(0, "mout_clkcmu_vio_core", mout_clkcmu_vio_core_p, CLK_CON_MUX_CLKCMU_VIO_CORE, 0, 2),
+ nMUX(0, "mout_clkcmu_vip0_core", mout_clkcmu_vip0_core_p,
+ CLK_CON_MUX_CLKCMU_VIP0_CORE, 0, 2),
+ nMUX(0, "mout_clkcmu_vip1_core", mout_clkcmu_vip1_core_p,
+ CLK_CON_MUX_CLKCMU_VIP1_CORE, 0, 2),
+ nMUX(0, "mout_clkcmu_vpp_core", mout_clkcmu_vpp_core_p, CLK_CON_MUX_CLKCMU_VPP_CORE, 0, 2),
+};
+
+static const struct samsung_div_clock cmu_cmu_div_clks[] __initconst = {
+ DIV(CLK_DOUT_SHARED0_DIV2, "dout_pll_shared0_div2",
+ "mout_clkcmu_pll_shared0", CLK_CON_DIV_PLL_SHARED0_DIV2, 0, 1),
+ DIV(CLK_DOUT_SHARED0_DIV3, "dout_pll_shared0_div3",
+ "mout_clkcmu_pll_shared0", CLK_CON_DIV_PLL_SHARED0_DIV3, 0, 2),
+ DIV(CLK_DOUT_SHARED0_DIV4, "dout_pll_shared0_div4",
+ "dout_pll_shared0_div2", CLK_CON_DIV_PLL_SHARED0_DIV4, 0, 1),
+ DIV(CLK_DOUT_SHARED1_DIV2, "dout_pll_shared1_div2",
+ "mout_clkcmu_pll_shared1", CLK_CON_DIV_PLL_SHARED1_DIV2, 0, 1),
+ DIV(CLK_DOUT_SHARED1_DIV3, "dout_pll_shared1_div3",
+ "mout_clkcmu_pll_shared1", CLK_CON_DIV_PLL_SHARED1_DIV3, 0, 2),
+ DIV(CLK_DOUT_SHARED1_DIV4, "dout_pll_shared1_div4",
+ "dout_pll_shared1_div2", CLK_CON_DIV_PLL_SHARED1_DIV4, 0, 1),
+ DIV(CLK_DOUT_CMU_BUS, "dout_clkcmu_bus",
+ "mout_clkcmu_bus_bus", CLK_CON_DIV_CLKCMU_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_BUS_DLP, "dout_clkcmu_bus_dlp",
+ "mout_clkcmu_bus_dlp", CLK_CON_DIV_CLKCMU_BUS_DLP, 0, 4),
+ DIV(CLK_DOUT_CMU_CORE_MAIN, "dout_clkcmu_core_main",
+ "mout_clkcmu_core_bus", CLK_CON_DIV_CLKCMU_CORE_MAIN, 0, 4),
+ DIV(CLK_DOUT_CMU_CORE_DLP, "dout_clkcmu_core_dlp",
+ "mout_clkcmu_core_dlp", CLK_CON_DIV_CLKCMU_CORE_DLP, 0, 4),
+ DIV(CLK_DOUT_CMU_CPUCL_SWITCH, "dout_clkcmu_cpucl_switch",
+ "mout_clkcmu_cpucl_switch", CLK_CON_DIV_CLKCMU_CPUCL_SWITCH, 0, 3),
+ DIV(CLK_DOUT_CMU_FSYS_BUS, "dout_clkcmu_fsys_bus",
+ "mout_clkcmu_fsys_bus", CLK_CON_DIV_CLKCMU_FSYS_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_FSYS_IP, "dout_clkcmu_fsys_ip",
+ "mout_clkcmu_fsys_ip", CLK_CON_DIV_CLKCMU_FSYS_IP, 0, 9),
+ DIV(CLK_DOUT_CMU_FSYS_SCAN0, "dout_clkcmu_fsys_scan0",
+ "mout_clkcmu_fsys_scan0", CLK_CON_DIV_CLKCMU_FSYS_SCAN0, 0, 4),
+ DIV(CLK_DOUT_CMU_FSYS_SCAN1, "dout_clkcmu_fsys_scan1",
+ "mout_clkcmu_fsys_scan1", CLK_CON_DIV_CLKCMU_FSYS_SCAN1, 0, 4),
+ DIV(CLK_DOUT_CMU_IMEM_ACLK, "dout_clkcmu_imem_aclk",
+ "mout_clkcmu_imem_imem", CLK_CON_DIV_CLKCMU_IMEM_ACLK, 0, 4),
+ DIV(CLK_DOUT_CMU_IMEM_JPEG, "dout_clkcmu_imem_jpeg",
+ "mout_clkcmu_imem_jpeg", CLK_CON_DIV_CLKCMU_IMEM_JPEG, 0, 4),
+ DIV_F(CLK_DOUT_CMU_CDC_CORE, "dout_clkcmu_cdc_core",
+ "mout_clkcmu_cdc_core", CLK_CON_DIV_CLKCMU_CDC_CORE, 0, 4, CLK_SET_RATE_PARENT, 0),
+ DIV_F(CLK_DOUT_CMU_DLP_CORE, "dout_clkcmu_dlp_core",
+ "mout_clkcmu_dlp_core", CLK_CON_DIV_CLKCMU_DLP_CORE, 0, 4, CLK_SET_RATE_PARENT, 0),
+ DIV(CLK_DOUT_CMU_GPU_3D, "dout_clkcmu_gpu_3d",
+ "mout_clkcmu_3d", CLK_CON_DIV_CLKCMU_GPU_3D, 0, 3),
+ DIV(CLK_DOUT_CMU_GPU_2D, "dout_clkcmu_gpu_2d",
+ "mout_clkcmu_2d", CLK_CON_DIV_CLKCMU_GPU_2D, 0, 4),
+ DIV(CLK_DOUT_CMU_MIF_SWITCH, "dout_clkcmu_mif_switch",
+ "mout_clkcmu_mif_switch", CLK_CON_DIV_CLKCMU_MIF_SWITCH, 0, 4),
+ DIV(CLK_DOUT_CMU_MIF_BUSP, "dout_clkcmu_mif_busp",
+ "mout_clkcmu_mif_busp", CLK_CON_DIV_CLKCMU_MIF_BUSP, 0, 3),
+ DIV(CLK_DOUT_CMU_PERI_DISP, "dout_clkcmu_peri_disp",
+ "mout_clkcmu_peri_disp", CLK_CON_DIV_CLKCMU_PERI_DISP, 0, 4),
+ DIV(CLK_DOUT_CMU_PERI_IP, "dout_clkcmu_peri_ip",
+ "mout_clkcmu_peri_ip", CLK_CON_DIV_CLKCMU_PERI_IP, 0, 4),
+ DIV(CLK_DOUT_CMU_PERI_AUDIO, "dout_clkcmu_peri_audio",
+ "mout_clkcmu_pll_audio", CLK_CON_DIV_CLKCMU_PERI_AUDIO, 0, 4),
+ DIV(CLK_DOUT_CMU_RSP_CORE, "dout_clkcmu_rsp_core",
+ "mout_clkcmu_rsp_core", CLK_CON_DIV_CLKCMU_RSP_CORE, 0, 4),
+ DIV_F(CLK_DOUT_CMU_TRFM_CORE, "dout_clkcmu_trfm_core",
+ "mout_clkcmu_trfm_core", CLK_CON_DIV_CLKCMU_TRFM_CORE, 0, 4, CLK_SET_RATE_PARENT, 0),
+ DIV(CLK_DOUT_CMU_VCA_ACE, "dout_clkcmu_vca_ace",
+ "mout_clkcmu_vca_ace", CLK_CON_DIV_CLKCMU_VCA_ACE, 0, 4),
+ DIV(CLK_DOUT_CMU_VCA_OD, "dout_clkcmu_vca_od",
+ "mout_clkcmu_vca_od", CLK_CON_DIV_CLKCMU_VCA_OD, 0, 4),
+ DIV(CLK_DOUT_CMU_VIO_CORE, "dout_clkcmu_vio_core",
+ "mout_clkcmu_vio_core", CLK_CON_DIV_CLKCMU_VIO_CORE, 0, 4),
+ DIV(CLK_DOUT_CMU_VIO_AUDIO, "dout_clkcmu_vio_audio",
+ "mout_clkcmu_pll_audio", CLK_CON_DIV_CLKCMU_VIO_AUDIO, 0, 4),
+ DIV_F(CLK_DOUT_CMU_VIP0_CORE, "dout_clkcmu_vip0_core",
+ "mout_clkcmu_vip0_core", CLK_CON_DIV_CLKCMU_VIP0_CORE, 0, 4, CLK_SET_RATE_PARENT, 0),
+ DIV_F(CLK_DOUT_CMU_VIP1_CORE, "dout_clkcmu_vip1_core",
+ "mout_clkcmu_vip1_core", CLK_CON_DIV_CLKCMU_VIP1_CORE, 0, 4, CLK_SET_RATE_PARENT, 0),
+ DIV_F(CLK_DOUT_CMU_VPP_CORE, "dout_clkcmu_vpp_core",
+ "mout_clkcmu_vpp_core", CLK_CON_DIV_CLKCMU_VPP_CORE, 0, 4, CLK_SET_RATE_PARENT, 0),
+};
+
+static const struct samsung_cmu_info cmu_cmu_info __initconst = {
+ .pll_clks = cmu_cmu_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(cmu_cmu_pll_clks),
+ .fixed_factor_clks = cmu_fixed_factor_clks,
+ .nr_fixed_factor_clks = ARRAY_SIZE(cmu_fixed_factor_clks),
+ .mux_clks = cmu_cmu_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(cmu_cmu_mux_clks),
+ .div_clks = cmu_cmu_div_clks,
+ .nr_div_clks = ARRAY_SIZE(cmu_cmu_div_clks),
+ .nr_clk_ids = CMU_CMU_NR_CLK,
+ .clk_regs = cmu_cmu_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(cmu_cmu_clk_regs),
+};
+
+/* Register Offset definitions for CMU_BUS (0x12c10000) */
+#define PLL_CON0_MUX_CLK_BUS_ACLK_USER 0x0100
+#define PLL_CON0_MUX_CLK_BUS_DLP_USER 0x0120
+#define CLK_CON_DIV_CLK_BUS_PCLK 0x1800
+
+static const unsigned long cmu_bus_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLK_BUS_ACLK_USER,
+ PLL_CON0_MUX_CLK_BUS_DLP_USER,
+ CLK_CON_DIV_CLK_BUS_PCLK,
+};
+
+PNAME(mout_clk_bus_aclk_user_p) = { "fin_pll", "dout_clkcmu_bus" };
+PNAME(mout_clk_bus_dlp_user_p) = { "fin_pll", "dout_clkcmu_bus_dlp" };
+
+static const struct samsung_mux_clock cmu_bus_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_BUS_ACLK_USER, "mout_clk_bus_aclk_user",
+ mout_clk_bus_aclk_user_p, PLL_CON0_MUX_CLK_BUS_ACLK_USER, 4, 1),
+ MUX(CLK_MOUT_BUS_DLP_USER, "mout_clk_bus_dlp_user",
+ mout_clk_bus_dlp_user_p, PLL_CON0_MUX_CLK_BUS_DLP_USER, 4, 1),
+};
+
+static const struct samsung_div_clock cmu_bus_div_clks[] __initconst = {
+ DIV(CLK_DOUT_BUS_PCLK, "dout_clk_bus_pclk", "mout_clk_bus_aclk_user",
+ CLK_CON_DIV_CLK_BUS_PCLK, 0, 4),
+};
+
+static const struct samsung_cmu_info cmu_bus_info __initconst = {
+ .mux_clks = cmu_bus_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(cmu_bus_mux_clks),
+ .div_clks = cmu_bus_div_clks,
+ .nr_div_clks = ARRAY_SIZE(cmu_bus_div_clks),
+ .nr_clk_ids = CMU_BUS_NR_CLK,
+ .clk_regs = cmu_bus_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(cmu_bus_clk_regs),
+};
+
+/* Register Offset definitions for CMU_CORE (0x12410000) */
+#define PLL_CON0_MUX_CLK_CORE_ACLK_USER 0x0100
+#define PLL_CON0_MUX_CLK_CORE_DLP_USER 0x0120
+#define CLK_CON_DIV_CLK_CORE_PCLK 0x1800
+
+static const unsigned long cmu_core_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLK_CORE_ACLK_USER,
+ PLL_CON0_MUX_CLK_CORE_DLP_USER,
+ CLK_CON_DIV_CLK_CORE_PCLK,
+};
+
+PNAME(mout_clk_core_aclk_user_p) = { "fin_pll", "dout_clkcmu_core_main" };
+PNAME(mout_clk_core_dlp_user_p) = { "fin_pll", "dout_clkcmu_core_dlp" };
+
+static const struct samsung_mux_clock cmu_core_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_CORE_ACLK_USER, "mout_clk_core_aclk_user",
+ mout_clk_core_aclk_user_p, PLL_CON0_MUX_CLK_CORE_ACLK_USER, 4, 1),
+ MUX(CLK_MOUT_CORE_DLP_USER, "mout_clk_core_dlp_user",
+ mout_clk_core_dlp_user_p, PLL_CON0_MUX_CLK_CORE_DLP_USER, 4, 1),
+};
+
+static const struct samsung_div_clock cmu_core_div_clks[] __initconst = {
+ DIV(CLK_DOUT_CORE_PCLK, "dout_clk_core_pclk",
+ "mout_clk_core_aclk_user", CLK_CON_DIV_CLK_CORE_PCLK, 0, 4),
+};
+
+static const struct samsung_cmu_info cmu_core_info __initconst = {
+ .mux_clks = cmu_core_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(cmu_core_mux_clks),
+ .div_clks = cmu_core_div_clks,
+ .nr_div_clks = ARRAY_SIZE(cmu_core_div_clks),
+ .nr_clk_ids = CMU_CORE_NR_CLK,
+ .clk_regs = cmu_core_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(cmu_core_clk_regs),
+};
+
+/* Register Offset definitions for CMU_CPUCL (0x11410000) */
+#define PLL_LOCKTIME_PLL_CPUCL 0x0000
+#define PLL_CON0_MUX_CLKCMU_CPUCL_SWITCH_USER 0x0120
+#define PLL_CON0_PLL_CPUCL 0x0140
+#define CLK_CON_MUX_CLK_CPUCL_PLL 0x1000
+#define CLK_CON_DIV_CLK_CLUSTER_ACLK 0x1800
+#define CLK_CON_DIV_CLK_CLUSTER_CNTCLK 0x1804
+#define CLK_CON_DIV_CLK_CLUSTER_PCLKDBG 0x1808
+#define CLK_CON_DIV_CLK_CPUCL_CMUREF 0x180c
+#define CLK_CON_DIV_CLK_CPUCL_PCLK 0x1814
+#define CLK_CON_DIV_CLK_CLUSTER_ATCLK 0x1818
+#define CLK_CON_DIV_CLK_CPUCL_DBG 0x181c
+#define CLK_CON_DIV_CLK_CPUCL_PCLKDBG 0x1820
+#define CLK_CON_GAT_CLK_CLUSTER_CPU 0x2008
+#define CLK_CON_GAT_CLK_CPUCL_SHORTSTOP 0x200c
+#define CLK_CON_DMYQCH_CON_CSSYS_QCH 0x3008
+
+static const unsigned long cmu_cpucl_clk_regs[] __initconst = {
+ PLL_LOCKTIME_PLL_CPUCL,
+ PLL_CON0_MUX_CLKCMU_CPUCL_SWITCH_USER,
+ PLL_CON0_PLL_CPUCL,
+ CLK_CON_MUX_CLK_CPUCL_PLL,
+ CLK_CON_DIV_CLK_CLUSTER_ACLK,
+ CLK_CON_DIV_CLK_CLUSTER_CNTCLK,
+ CLK_CON_DIV_CLK_CLUSTER_PCLKDBG,
+ CLK_CON_DIV_CLK_CPUCL_CMUREF,
+ CLK_CON_DIV_CLK_CPUCL_PCLK,
+ CLK_CON_DIV_CLK_CLUSTER_ATCLK,
+ CLK_CON_DIV_CLK_CPUCL_DBG,
+ CLK_CON_DIV_CLK_CPUCL_PCLKDBG,
+ CLK_CON_GAT_CLK_CLUSTER_CPU,
+ CLK_CON_GAT_CLK_CPUCL_SHORTSTOP,
+ CLK_CON_DMYQCH_CON_CSSYS_QCH,
+};
+
+static const struct samsung_pll_clock cmu_cpucl_pll_clks[] __initconst = {
+ PLL(pll_1017x, CLK_FOUT_CPUCL_PLL, "fout_pll_cpucl", "fin_pll",
+ PLL_LOCKTIME_PLL_CPUCL, PLL_CON0_PLL_CPUCL, NULL),
+};
+
+PNAME(mout_clkcmu_cpucl_switch_user_p) = { "fin_pll", "dout_clkcmu_cpucl_switch" };
+PNAME(mout_pll_cpucl_p) = { "fin_pll", "fout_pll_cpucl" };
+PNAME(mout_clk_cpucl_pll_p) = { "mout_pll_cpucl", "mout_clkcmu_cpucl_switch_user" };
+
+static const struct samsung_mux_clock cmu_cpucl_mux_clks[] __initconst = {
+ MUX_F(0, "mout_pll_cpucl", mout_pll_cpucl_p, PLL_CON0_PLL_CPUCL, 4, 1,
+ CLK_SET_RATE_PARENT | CLK_RECALC_NEW_RATES, 0),
+ MUX(CLK_MOUT_CPUCL_SWITCH_USER, "mout_clkcmu_cpucl_switch_user",
+ mout_clkcmu_cpucl_switch_user_p, PLL_CON0_MUX_CLKCMU_CPUCL_SWITCH_USER, 4, 1),
+ MUX_F(CLK_MOUT_CPUCL_PLL, "mout_clk_cpucl_pll", mout_clk_cpucl_pll_p,
+ CLK_CON_MUX_CLK_CPUCL_PLL, 0, 1, CLK_SET_RATE_PARENT, 0),
+};
+
+static const struct samsung_fixed_factor_clock cpucl_ffactor_clks[] __initconst = {
+ FFACTOR(CLK_DOUT_CPUCL_CPU, "dout_clk_cpucl_cpu",
+ "mout_clk_cpucl_pll", 1, 1, CLK_SET_RATE_PARENT),
+};
+
+static const struct samsung_div_clock cmu_cpucl_div_clks[] __initconst = {
+ DIV(CLK_DOUT_CPUCL_CLUSTER_ACLK, "dout_clk_cluster_aclk",
+ "dout_clk_cpucl_cpu", CLK_CON_DIV_CLK_CLUSTER_ACLK, 0, 4),
+ DIV(CLK_DOUT_CPUCL_CLUSTER_PCLKDBG, "dout_clk_cluster_pclkdbg",
+ "dout_clk_cpucl_cpu", CLK_CON_DIV_CLK_CLUSTER_PCLKDBG, 0, 4),
+ DIV(CLK_DOUT_CPUCL_CLUSTER_CNTCLK, "dout_clk_cluster_cntclk",
+ "dout_clk_cpucl_cpu", CLK_CON_DIV_CLK_CLUSTER_CNTCLK, 0, 4),
+ DIV(CLK_DOUT_CPUCL_CLUSTER_ATCLK, "dout_clk_cluster_atclk",
+ "dout_clk_cpucl_cpu", CLK_CON_DIV_CLK_CLUSTER_ATCLK, 0, 4),
+ DIV(CLK_DOUT_CPUCL_PCLK, "dout_clk_cpucl_pclk",
+ "dout_clk_cpucl_cpu", CLK_CON_DIV_CLK_CPUCL_PCLK, 0, 4),
+ DIV(CLK_DOUT_CPUCL_CMUREF, "dout_clk_cpucl_cmuref",
+ "dout_clk_cpucl_cpu", CLK_CON_DIV_CLK_CPUCL_CMUREF, 0, 3),
+ DIV(CLK_DOUT_CPUCL_DBG, "dout_clk_cpucl_dbg",
+ "dout_clk_cpucl_cpu", CLK_CON_DIV_CLK_CPUCL_DBG, 0, 4),
+ DIV(CLK_DOUT_CPUCL_PCLKDBG, "dout_clk_cpucl_pclkdbg",
+ "dout_clk_cpucl_dbg", CLK_CON_DIV_CLK_CPUCL_PCLKDBG, 0, 4),
+};
+
+static const struct samsung_gate_clock cmu_cpucl_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_CPUCL_CLUSTER_CPU, "clk_con_gat_clk_cluster_cpu",
+ "clk_con_gat_clk_cpucl_shortstop", CLK_CON_GAT_CLK_CLUSTER_CPU, 21,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_CPUCL_SHORTSTOP, "clk_con_gat_clk_cpucl_shortstop",
+ "dout_clk_cpucl_cpu", CLK_CON_GAT_CLK_CPUCL_SHORTSTOP, 21,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_CPUCL_CSSYS_IPCLKPORT_PCLKDBG, "cssys_ipclkport_pclkdbg",
+ "dout_clk_cpucl_pclkdbg", CLK_CON_DMYQCH_CON_CSSYS_QCH, 1,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_CPUCL_CSSYS_IPCLKPORT_ATCLK, "cssys_ipclkport_atclk",
+ "dout_clk_cpucl_dbg", CLK_CON_DMYQCH_CON_CSSYS_QCH, 1,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+};
+
+static const struct samsung_cmu_info cmu_cpucl_info __initconst = {
+ .pll_clks = cmu_cpucl_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(cmu_cpucl_pll_clks),
+ .fixed_factor_clks = cpucl_ffactor_clks,
+ .nr_fixed_factor_clks = ARRAY_SIZE(cpucl_ffactor_clks),
+ .mux_clks = cmu_cpucl_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(cmu_cpucl_mux_clks),
+ .div_clks = cmu_cpucl_div_clks,
+ .nr_div_clks = ARRAY_SIZE(cmu_cpucl_div_clks),
+ .gate_clks = cmu_cpucl_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(cmu_cpucl_gate_clks),
+ .nr_clk_ids = CMU_CPUCL_NR_CLK,
+ .clk_regs = cmu_cpucl_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(cmu_cpucl_clk_regs),
+};
+
+/* Register Offset definitions for CMU_FSYS (0x16c10000) */
+#define PLL_LOCKTIME_PLL_FSYS 0x0004
+#define PLL_CON0_MUX_CLK_FSYS_BUS_USER 0x0120
+#define PLL_CON0_MUX_CLK_FSYS_MMC_USER 0x0140
+#define PLL_CON0_MUX_CLK_FSYS_SCAN0_USER 0x0160
+#define PLL_CON0_MUX_CLK_FSYS_SCAN1_USER 0x0180
+#define PLL_CON0_PLL_FSYS 0x01c0
+#define CLK_CON_DIV_CLK_FSYS_ADC 0x1804
+#define CLK_CON_DIV_CLK_FSYS_BUS300 0x1808
+#define CLK_CON_DIV_CLK_FSYS_BUS_QSPI 0x180c
+#define CLK_CON_DIV_CLK_FSYS_EQOS_25 0x1810
+#define CLK_CON_DIV_CLK_FSYS_EQOS_2P5 0x1814
+#define CLK_CON_DIV_CLK_FSYS_EQOS_500 0x1818
+#define CLK_CON_DIV_CLK_FSYS_EQOS_INT125 0x181c
+#define CLK_CON_DIV_CLK_FSYS_MMC_CARD0 0x1820
+#define CLK_CON_DIV_CLK_FSYS_MMC_CARD1 0x1824
+#define CLK_CON_DIV_CLK_FSYS_OTP_MEM 0x1828
+#define CLK_CON_DIV_CLK_FSYS_PCIE_PHY_REFCLK_SYSPLL 0x182c
+#define CLK_CON_DIV_CLK_FSYS_QSPI 0x1830
+#define CLK_CON_DIV_CLK_FSYS_SCLK_UART 0x1834
+#define CLK_CON_DIV_CLK_FSYS_SFMC_NAND 0x1838
+#define CLK_CON_DIV_SCAN_CLK_FSYS_125 0x183c
+#define CLK_CON_DIV_SCAN_CLK_FSYS_MMC 0x1840
+#define CLK_CON_DIV_SCAN_CLK_FSYS_PCIE_PIPE 0x1844
+#define CLK_CON_FSYS_I2C0_IPCLKPORT_I_PCLK 0x2044
+#define CLK_CON_FSYS_I2C1_IPCLKPORT_I_PCLK 0x2048
+#define CLK_CON_FSYS_UART0_IPCLKPORT_I_PCLK 0x204c
+#define CLK_CON_FSYS_UART0_IPCLKPORT_I_SCLK_UART 0x2050
+#define CLK_CON_MMC0_IPCLKPORT_I_ACLK 0x2070
+#define CLK_CON_MMC1_IPCLKPORT_I_ACLK 0x2078
+#define CLK_CON_DWC_PCIE_CTL_INST_0_DBI_ACLK_UG 0x208c
+#define CLK_CON_DWC_PCIE_CTL_INST_0_MSTR_ACLK_UG 0x2090
+#define CLK_CON_DWC_PCIE_CTL_INST_0_SLV_ACLK_UG 0x2094
+#define CLK_CON_PWM_IPCLKPORT_I_PCLK_S0 0x20a0
+#define CLK_CON_USB20DRD_IPCLKPORT_ACLK_PHYCTRL_20 0x20bc
+#define CLK_CON_USB20DRD_IPCLKPORT_BUS_CLK_EARLY 0x20c0
+#define CLK_CON_XHB_AHBBR_IPCLKPORT_CLK 0x20c4
+#define CLK_CON_XHB_USB_IPCLKPORT_CLK 0x20cc
+#define CLK_CON_BUS_P_FSYS_IPCLKPORT_QSPICLK 0x201c
+#define CLK_CON_DMYQCH_CON_EQOS_TOP_QCH 0x3008
+#define CLK_CON_DMYQCH_CON_MMC0_QCH 0x300c
+#define CLK_CON_DMYQCH_CON_MMC1_QCH 0x3010
+#define CLK_CON_DMYQCH_CON_PCIE_TOP_QCH 0x3018
+#define CLK_CON_DMYQCH_CON_PCIE_TOP_QCH_REF 0x301c
+#define CLK_CON_DMYQCH_CON_QSPI_QCH 0x3020
+#define CLK_CON_DMYQCH_CON_SFMC_QCH 0x3024
+
+static const unsigned long cmu_fsys_clk_regs[] __initconst = {
+ PLL_LOCKTIME_PLL_FSYS,
+ PLL_CON0_MUX_CLK_FSYS_BUS_USER,
+ PLL_CON0_MUX_CLK_FSYS_MMC_USER,
+ PLL_CON0_MUX_CLK_FSYS_SCAN0_USER,
+ PLL_CON0_MUX_CLK_FSYS_SCAN1_USER,
+ PLL_CON0_PLL_FSYS,
+ CLK_CON_DIV_CLK_FSYS_ADC,
+ CLK_CON_DIV_CLK_FSYS_BUS300,
+ CLK_CON_DIV_CLK_FSYS_BUS_QSPI,
+ CLK_CON_DIV_CLK_FSYS_EQOS_25,
+ CLK_CON_DIV_CLK_FSYS_EQOS_2P5,
+ CLK_CON_DIV_CLK_FSYS_EQOS_500,
+ CLK_CON_DIV_CLK_FSYS_EQOS_INT125,
+ CLK_CON_DIV_CLK_FSYS_MMC_CARD0,
+ CLK_CON_DIV_CLK_FSYS_MMC_CARD1,
+ CLK_CON_DIV_CLK_FSYS_OTP_MEM,
+ CLK_CON_DIV_CLK_FSYS_PCIE_PHY_REFCLK_SYSPLL,
+ CLK_CON_DIV_CLK_FSYS_QSPI,
+ CLK_CON_DIV_CLK_FSYS_SCLK_UART,
+ CLK_CON_DIV_CLK_FSYS_SFMC_NAND,
+ CLK_CON_DIV_SCAN_CLK_FSYS_125,
+ CLK_CON_DIV_SCAN_CLK_FSYS_MMC,
+ CLK_CON_DIV_SCAN_CLK_FSYS_PCIE_PIPE,
+ CLK_CON_FSYS_I2C0_IPCLKPORT_I_PCLK,
+ CLK_CON_FSYS_I2C1_IPCLKPORT_I_PCLK,
+ CLK_CON_FSYS_UART0_IPCLKPORT_I_PCLK,
+ CLK_CON_FSYS_UART0_IPCLKPORT_I_SCLK_UART,
+ CLK_CON_MMC0_IPCLKPORT_I_ACLK,
+ CLK_CON_MMC1_IPCLKPORT_I_ACLK,
+ CLK_CON_DWC_PCIE_CTL_INST_0_DBI_ACLK_UG,
+ CLK_CON_DWC_PCIE_CTL_INST_0_MSTR_ACLK_UG,
+ CLK_CON_DWC_PCIE_CTL_INST_0_SLV_ACLK_UG,
+ CLK_CON_PWM_IPCLKPORT_I_PCLK_S0,
+ CLK_CON_USB20DRD_IPCLKPORT_ACLK_PHYCTRL_20,
+ CLK_CON_USB20DRD_IPCLKPORT_BUS_CLK_EARLY,
+ CLK_CON_XHB_AHBBR_IPCLKPORT_CLK,
+ CLK_CON_XHB_USB_IPCLKPORT_CLK,
+ CLK_CON_BUS_P_FSYS_IPCLKPORT_QSPICLK,
+ CLK_CON_DMYQCH_CON_EQOS_TOP_QCH,
+ CLK_CON_DMYQCH_CON_MMC0_QCH,
+ CLK_CON_DMYQCH_CON_MMC1_QCH,
+ CLK_CON_DMYQCH_CON_PCIE_TOP_QCH,
+ CLK_CON_DMYQCH_CON_PCIE_TOP_QCH_REF,
+ CLK_CON_DMYQCH_CON_QSPI_QCH,
+ CLK_CON_DMYQCH_CON_SFMC_QCH,
+};
+
+static const struct samsung_pll_clock cmu_fsys_pll_clks[] __initconst = {
+ PLL(pll_1017x, CLK_FOUT_FSYS_PLL, "fout_pll_fsys", "fin_pll",
+ PLL_LOCKTIME_PLL_FSYS, PLL_CON0_PLL_FSYS, NULL),
+};
+
+PNAME(mout_fsys_scan0_user_p) = { "fin_pll", "dout_clkcmu_fsys_scan0" };
+PNAME(mout_fsys_scan1_user_p) = { "fin_pll", "dout_clkcmu_fsys_scan1" };
+PNAME(mout_fsys_bus_user_p) = { "fin_pll", "dout_clkcmu_fsys_bus" };
+PNAME(mout_fsys_mmc_user_p) = { "fin_pll", "dout_clkcmu_fsys_ip" };
+PNAME(mout_fsys_pll_fsys_p) = { "fin_pll", "fout_pll_fsys" };
+
+static const struct samsung_mux_clock cmu_fsys_mux_clks[] __initconst = {
+ MUX(0, "mout_clk_pll_fsys", mout_fsys_pll_fsys_p, PLL_CON0_PLL_FSYS, 4, 1),
+ MUX(CLK_MOUT_FSYS_SCAN0_USER, "mout_fsys_scan0_user",
+ mout_fsys_scan0_user_p, PLL_CON0_MUX_CLK_FSYS_SCAN0_USER, 4, 1),
+ MUX(CLK_MOUT_FSYS_SCAN1_USER, "mout_fsys_scan1_user",
+ mout_fsys_scan1_user_p, PLL_CON0_MUX_CLK_FSYS_SCAN1_USER, 4, 1),
+ MUX(CLK_MOUT_FSYS_BUS_USER, "mout_fsys_bus_user",
+ mout_fsys_bus_user_p, PLL_CON0_MUX_CLK_FSYS_BUS_USER, 4, 1),
+ MUX(CLK_MOUT_FSYS_MMC_USER, "mout_fsys_mmc_user",
+ mout_fsys_mmc_user_p, PLL_CON0_MUX_CLK_FSYS_MMC_USER, 4, 1),
+};
+
+static const struct samsung_div_clock cmu_fsys_div_clks[] __initconst = {
+ DIV(CLK_DOUT_FSYS_PCIE_PIPE, "dout_fsys_pcie_pipe", "mout_clk_pll_fsys",
+ CLK_CON_DIV_SCAN_CLK_FSYS_PCIE_PIPE, 0, 4),
+ DIV(CLK_DOUT_FSYS_ADC, "dout_fsys_adc", "mout_clk_pll_fsys",
+ CLK_CON_DIV_CLK_FSYS_ADC, 0, 7),
+ DIV(CLK_DOUT_FSYS_PCIE_PHY_REFCLK_SYSPLL, "dout_fsys_pcie_phy_refclk_syspll",
+ "mout_clk_pll_fsys", CLK_CON_DIV_CLK_FSYS_PCIE_PHY_REFCLK_SYSPLL, 0, 8),
+ DIV(CLK_DOUT_FSYS_QSPI, "dout_fsys_qspi", "mout_fsys_mmc_user",
+ CLK_CON_DIV_CLK_FSYS_QSPI, 0, 4),
+ DIV(CLK_DOUT_FSYS_EQOS_INT125, "dout_fsys_eqos_int125", "mout_clk_pll_fsys",
+ CLK_CON_DIV_CLK_FSYS_EQOS_INT125, 0, 4),
+ DIV(CLK_DOUT_FSYS_OTP_MEM, "dout_fsys_otp_mem", "fin_pll",
+ CLK_CON_DIV_CLK_FSYS_OTP_MEM, 0, 9),
+ DIV(CLK_DOUT_FSYS_SCLK_UART, "dout_fsys_sclk_uart", "mout_clk_pll_fsys",
+ CLK_CON_DIV_CLK_FSYS_SCLK_UART, 0, 10),
+ DIV(CLK_DOUT_FSYS_SFMC_NAND, "dout_fsys_sfmc_nand", "mout_fsys_mmc_user",
+ CLK_CON_DIV_CLK_FSYS_SFMC_NAND, 0, 4),
+ DIV(CLK_DOUT_SCAN_CLK_FSYS_125, "dout_scan_clk_fsys_125", "mout_clk_pll_fsys",
+ CLK_CON_DIV_SCAN_CLK_FSYS_125, 0, 4),
+ DIV(CLK_DOUT_FSYS_SCAN_CLK_MMC, "dout_scan_clk_fsys_mmc", "fout_pll_fsys",
+ CLK_CON_DIV_SCAN_CLK_FSYS_MMC, 0, 4),
+ DIV(CLK_DOUT_FSYS_EQOS_25, "dout_fsys_eqos_25", "dout_fsys_eqos_int125",
+ CLK_CON_DIV_CLK_FSYS_EQOS_25, 0, 4),
+ DIV_F(CLK_DOUT_FSYS_EQOS_2p5, "dout_fsys_eqos_2p5", "dout_fsys_eqos_25",
+ CLK_CON_DIV_CLK_FSYS_EQOS_2P5, 0, 4, CLK_SET_RATE_PARENT, 0),
+ DIV(0, "dout_fsys_eqos_500", "mout_clk_pll_fsys",
+ CLK_CON_DIV_CLK_FSYS_EQOS_500, 0, 4),
+ DIV(CLK_DOUT_FSYS_BUS300, "dout_fsys_bus300", "mout_fsys_bus_user",
+ CLK_CON_DIV_CLK_FSYS_BUS300, 0, 4),
+ DIV(CLK_DOUT_FSYS_BUS_QSPI, "dout_fsys_bus_qspi", "mout_fsys_mmc_user",
+ CLK_CON_DIV_CLK_FSYS_BUS_QSPI, 0, 4),
+ DIV(CLK_DOUT_FSYS_MMC_CARD0, "dout_fsys_mmc_card0", "mout_fsys_mmc_user",
+ CLK_CON_DIV_CLK_FSYS_MMC_CARD0, 0, 10),
+ DIV(CLK_DOUT_FSYS_MMC_CARD1, "dout_fsys_mmc_card1", "mout_fsys_mmc_user",
+ CLK_CON_DIV_CLK_FSYS_MMC_CARD1, 0, 10),
+};
+
+static const struct samsung_gate_clock cmu_fsys_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_FSYS_PCIE_PHY_REFCLK_IN, "pcie_sub_ctrl_inst_0_phy_refclk_in",
+ "dout_fsys_pcie_phy_refclk_syspll", CLK_CON_DMYQCH_CON_PCIE_TOP_QCH_REF, 1,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS_EQOS_TOP_IPCLKPORT_I_RGMII_TXCLK_2P5,
+ "eqos_top_ipclkport_i_rgmii_txclk_2p5",
+ "dout_fsys_eqos_2p5", CLK_CON_DMYQCH_CON_EQOS_TOP_QCH, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS_EQOS_TOP_IPCLKPORT_ACLK_I, "eqos_top_ipclkport_aclk_i",
+ "dout_fsys_bus300", CLK_CON_DMYQCH_CON_EQOS_TOP_QCH, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS_EQOS_TOP_IPCLKPORT_CLK_CSR_I, "eqos_top_ipclkport_clk_csr_i",
+ "dout_fsys_bus300", CLK_CON_DMYQCH_CON_EQOS_TOP_QCH, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS_PIPE_PAL_INST_0_I_APB_PCLK, "pipe_pal_inst_0_i_apb_pclk",
+ "dout_fsys_bus300", CLK_CON_DMYQCH_CON_PCIE_TOP_QCH, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS_QSPI_IPCLKPORT_HCLK, "qspi_ipclkport_hclk",
+ "dout_fsys_bus_qspi", CLK_CON_DMYQCH_CON_QSPI_QCH, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS_QSPI_IPCLKPORT_SSI_CLK, "qspi_ipclkport_ssi_clk",
+ "dout_fsys_qspi", CLK_CON_DMYQCH_CON_QSPI_QCH, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS_MMC0_IPCLKPORT_SDCLKIN, "mmc0_ipclkport_sdclkin",
+ "dout_fsys_mmc_card0", CLK_CON_DMYQCH_CON_MMC0_QCH, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS_MMC1_IPCLKPORT_SDCLKIN, "mmc1_ipclkport_sdclkin",
+ "dout_fsys_mmc_card1", CLK_CON_DMYQCH_CON_MMC1_QCH, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS_SFMC_IPCLKPORT_I_ACLK_NAND, "sfmc_ipclkport_i_aclk_nand",
+ "dout_fsys_sfmc_nand", CLK_CON_DMYQCH_CON_SFMC_QCH, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS_UART0_SCLK_UART, "uart0_sclk", "dout_fsys_sclk_uart",
+ CLK_CON_FSYS_UART0_IPCLKPORT_I_SCLK_UART, 21,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS_DWC_PCIE_CTL_INST_0_MSTR_ACLK_UG, "dwc_pcie_ctl_inst_0_mstr_aclk_ug",
+ "mout_fsys_bus_user", CLK_CON_DWC_PCIE_CTL_INST_0_MSTR_ACLK_UG, 21,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS_DWC_PCIE_CTL_INXT_0_SLV_ACLK_UG, "dwc_pcie_ctl_inst_0_slv_aclk_ug",
+ "mout_fsys_bus_user", CLK_CON_DWC_PCIE_CTL_INST_0_SLV_ACLK_UG, 21,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS_I2C0_IPCLKPORT_I_PCLK, "fsys_i2c0_ipclkport_i_pclk", "dout_fsys_bus300",
+ CLK_CON_FSYS_I2C0_IPCLKPORT_I_PCLK, 21, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS_I2C1_IPCLKPORT_I_PCLK, "fsys_i2c1_ipclkport_i_pclk", "dout_fsys_bus300",
+ CLK_CON_FSYS_I2C1_IPCLKPORT_I_PCLK, 21, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS_UART0_PCLK, "uart0_pclk", "dout_fsys_bus300",
+ CLK_CON_FSYS_UART0_IPCLKPORT_I_PCLK, 21, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS_MMC0_IPCLKPORT_I_ACLK, "mmc0_ipclkport_i_aclk", "dout_fsys_bus300",
+ CLK_CON_MMC0_IPCLKPORT_I_ACLK, 21, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS_MMC1_IPCLKPORT_I_ACLK, "mmc1_ipclkport_i_aclk", "dout_fsys_bus300",
+ CLK_CON_MMC1_IPCLKPORT_I_ACLK, 21, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS_DWC_PCIE_CTL_INST_0_DBI_ACLK_UG, "dwc_pcie_ctl_inst_0_dbi_aclk_ug",
+ "dout_fsys_bus300", CLK_CON_DWC_PCIE_CTL_INST_0_DBI_ACLK_UG, 21,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS_PWM_IPCLKPORT_I_PCLK_S0, "pwm_ipclkport_i_pclk_s0", "dout_fsys_bus300",
+ CLK_CON_PWM_IPCLKPORT_I_PCLK_S0, 21, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS_USB20DRD_IPCLKPORT_ACLK_PHYCTRL_20, "usb20drd_ipclkport_aclk_phyctrl_20",
+ "dout_fsys_bus300", CLK_CON_USB20DRD_IPCLKPORT_ACLK_PHYCTRL_20, 21,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS_USB20DRD_IPCLKPORT_BUS_CLK_EARLY, "usb20drd_ipclkport_bus_clk_early",
+ "dout_fsys_bus300", CLK_CON_USB20DRD_IPCLKPORT_BUS_CLK_EARLY, 21,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS_XHB_AHBBR_IPCLKPORT_CLK, "xhb_ahbbr_ipclkport_clk", "dout_fsys_bus300",
+ CLK_CON_XHB_AHBBR_IPCLKPORT_CLK, 21, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS_XHB_USB_IPCLKPORT_CLK, "xhb_usb_ipclkport_clk", "dout_fsys_bus300",
+ CLK_CON_XHB_USB_IPCLKPORT_CLK, 21, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS_BUS_QSPI, "bus_p_fsys_ipclkport_qspiclk", "dout_fsys_bus_qspi",
+ CLK_CON_BUS_P_FSYS_IPCLKPORT_QSPICLK, 21, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+};
+
+static const struct samsung_cmu_info cmu_fsys_info __initconst = {
+ .pll_clks = cmu_fsys_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(cmu_fsys_pll_clks),
+ .mux_clks = cmu_fsys_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(cmu_fsys_mux_clks),
+ .div_clks = cmu_fsys_div_clks,
+ .nr_div_clks = ARRAY_SIZE(cmu_fsys_div_clks),
+ .gate_clks = cmu_fsys_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(cmu_fsys_gate_clks),
+ .nr_clk_ids = CMU_FSYS_NR_CLK,
+ .clk_regs = cmu_fsys_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(cmu_fsys_clk_regs),
+};
+
+/* Register Offset definitions for CMU_IMEM (0x10010000) */
+#define PLL_CON0_MUX_CLK_IMEM_ACLK_USER 0x0100
+#define PLL_CON0_MUX_CLK_IMEM_JPEG_USER 0x0120
+#define CLK_CON_MUX_CLK_IMEM_GIC_CA53 0x1000
+#define CLK_CON_MUX_CLK_IMEM_GIC_CA5 0x1008
+#define CLK_CON_MCT_IPCLKPORT_PCLK 0x2038
+#define CLK_CON_SFRIF_TMU_IMEM_IPCLKPORT_PCLK 0x2044
+
+static const unsigned long cmu_imem_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLK_IMEM_ACLK_USER,
+ PLL_CON0_MUX_CLK_IMEM_JPEG_USER,
+ CLK_CON_MUX_CLK_IMEM_GIC_CA53,
+ CLK_CON_MUX_CLK_IMEM_GIC_CA5,
+ CLK_CON_MCT_IPCLKPORT_PCLK,
+ CLK_CON_SFRIF_TMU_IMEM_IPCLKPORT_PCLK,
+};
+
+PNAME(mout_imem_aclk_user_p) = { "fin_pll", "dout_clkcmu_imem_aclk" };
+PNAME(mout_imem_gic_ca53_p) = { "mout_imem_aclk_user", "fin_pll" };
+PNAME(mout_imem_gic_ca5_p) = { "mout_imem_aclk_user", "fin_pll" };
+PNAME(mout_imem_jpeg_user_p) = { "fin_pll", "dout_clkcmu_imem_jpeg" };
+
+static const struct samsung_mux_clock cmu_imem_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_IMEM_ACLK_USER, "mout_imem_aclk_user",
+ mout_imem_aclk_user_p, PLL_CON0_MUX_CLK_IMEM_ACLK_USER, 4, 1),
+ MUX(CLK_MOUT_IMEM_GIC_CA53, "mout_imem_gic_ca53",
+ mout_imem_gic_ca53_p, CLK_CON_MUX_CLK_IMEM_GIC_CA53, 0, 1),
+ MUX(CLK_MOUT_IMEM_GIC_CA5, "mout_imem_gic_ca5",
+ mout_imem_gic_ca5_p, CLK_CON_MUX_CLK_IMEM_GIC_CA5, 0, 1),
+ MUX(CLK_MOUT_IMEM_JPEG_USER, "mout_imem_jpeg_user",
+ mout_imem_jpeg_user_p, PLL_CON0_MUX_CLK_IMEM_JPEG_USER, 4, 1),
+};
+
+static const struct samsung_gate_clock cmu_imem_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_IMEM_MCT_PCLK, "mct_pclk", "mout_imem_aclk_user",
+ CLK_CON_MCT_IPCLKPORT_PCLK, 21, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_IMEM_PCLK_TMU0_APBIF, "sfrif_tmu_imem_ipclkport_pclk", "mout_imem_aclk_user",
+ CLK_CON_SFRIF_TMU_IMEM_IPCLKPORT_PCLK, 21, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+};
+
+static const struct samsung_cmu_info cmu_imem_info __initconst = {
+ .mux_clks = cmu_imem_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(cmu_imem_mux_clks),
+ .gate_clks = cmu_imem_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(cmu_imem_gate_clks),
+ .nr_clk_ids = CMU_IMEM_NR_CLK,
+ .clk_regs = cmu_imem_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(cmu_imem_clk_regs),
+};
+
+static void __init artpec8_clk_cmu_imem_init(struct device_node *np)
+{
+ samsung_cmu_register_one(np, &cmu_imem_info);
+}
+
+CLK_OF_DECLARE(artpec8_clk_cmu_imem, "axis,artpec8-cmu-imem", artpec8_clk_cmu_imem_init);
+
+/* Register Offset definitions for CMU_PERI (0x16410000) */
+#define PLL_CON0_MUX_CLK_PERI_AUDIO_USER 0x0100
+#define PLL_CON0_MUX_CLK_PERI_DISP_USER 0x0120
+#define PLL_CON0_MUX_CLK_PERI_IP_USER 0x0140
+#define CLK_CON_MUX_CLK_PERI_I2S0 0x1000
+#define CLK_CON_MUX_CLK_PERI_I2S1 0x1004
+#define CLK_CON_DIV_CLK_PERI_DSIM 0x1800
+#define CLK_CON_DIV_CLK_PERI_I2S0 0x1804
+#define CLK_CON_DIV_CLK_PERI_I2S1 0x1808
+#define CLK_CON_DIV_CLK_PERI_PCLK 0x180c
+#define CLK_CON_DIV_CLK_PERI_SPI 0x1810
+#define CLK_CON_DIV_CLK_PERI_UART1 0x1814
+#define CLK_CON_DIV_CLK_PERI_UART2 0x1818
+#define CLK_CON_APB_ASYNC_DSIM_IPCLKPORT_PCLKS 0x2004
+#define CLK_CON_PERI_I2C2_IPCLKPORT_I_PCLK 0x2030
+#define CLK_CON_PERI_I2C3_IPCLKPORT_I_PCLK 0x2034
+#define CLK_CON_PERI_SPI0_IPCLKPORT_I_PCLK 0x2048
+#define CLK_CON_PERI_SPI0_IPCLKPORT_I_SCLK_SPI 0x204c
+#define CLK_CON_PERI_UART1_IPCLKPORT_I_PCLK 0x2050
+#define CLK_CON_PERI_UART1_IPCLKPORT_I_SCLK_UART 0x2054
+#define CLK_CON_PERI_UART2_IPCLKPORT_I_PCLK 0x2058
+#define CLK_CON_PERI_UART2_IPCLKPORT_I_SCLK_UART 0x205c
+#define CLK_CON_DMYQCH_CON_AUDIO_OUT_QCH 0x3000
+#define CLK_CON_DMYQCH_CON_DMA4DSIM_QCH 0x3004
+#define CLK_CON_DMYQCH_CON_PERI_I2SSC0_QCH 0x3008
+#define CLK_CON_DMYQCH_CON_PERI_I2SSC1_QCH 0x300c
+
+static const unsigned long cmu_peri_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLK_PERI_AUDIO_USER,
+ PLL_CON0_MUX_CLK_PERI_DISP_USER,
+ PLL_CON0_MUX_CLK_PERI_IP_USER,
+ CLK_CON_MUX_CLK_PERI_I2S0,
+ CLK_CON_MUX_CLK_PERI_I2S1,
+ CLK_CON_DIV_CLK_PERI_DSIM,
+ CLK_CON_DIV_CLK_PERI_I2S0,
+ CLK_CON_DIV_CLK_PERI_I2S1,
+ CLK_CON_DIV_CLK_PERI_PCLK,
+ CLK_CON_DIV_CLK_PERI_SPI,
+ CLK_CON_DIV_CLK_PERI_UART1,
+ CLK_CON_DIV_CLK_PERI_UART2,
+ CLK_CON_APB_ASYNC_DSIM_IPCLKPORT_PCLKS,
+ CLK_CON_PERI_I2C2_IPCLKPORT_I_PCLK,
+ CLK_CON_PERI_I2C3_IPCLKPORT_I_PCLK,
+ CLK_CON_PERI_SPI0_IPCLKPORT_I_PCLK,
+ CLK_CON_PERI_SPI0_IPCLKPORT_I_SCLK_SPI,
+ CLK_CON_PERI_UART1_IPCLKPORT_I_PCLK,
+ CLK_CON_PERI_UART1_IPCLKPORT_I_SCLK_UART,
+ CLK_CON_PERI_UART2_IPCLKPORT_I_PCLK,
+ CLK_CON_PERI_UART2_IPCLKPORT_I_SCLK_UART,
+ CLK_CON_DMYQCH_CON_AUDIO_OUT_QCH,
+ CLK_CON_DMYQCH_CON_DMA4DSIM_QCH,
+ CLK_CON_DMYQCH_CON_PERI_I2SSC0_QCH,
+ CLK_CON_DMYQCH_CON_PERI_I2SSC1_QCH,
+};
+
+static const struct samsung_fixed_rate_clock peri_fixed_clks[] __initconst = {
+ FRATE(0, "clk_peri_audio", NULL, 0, 100000000),
+};
+
+PNAME(mout_peri_ip_user_p) = { "fin_pll", "dout_clkcmu_peri_ip" };
+PNAME(mout_peri_audio_user_p) = { "fin_pll", "dout_clkcmu_peri_audio" };
+PNAME(mout_peri_disp_user_p) = { "fin_pll", "dout_clkcmu_peri_disp" };
+PNAME(mout_peri_i2s0_p) = { "dout_peri_i2s0", "clk_peri_audio" };
+PNAME(mout_peri_i2s1_p) = { "dout_peri_i2s1", "clk_peri_audio" };
+
+static const struct samsung_mux_clock cmu_peri_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_PERI_IP_USER, "mout_peri_ip_user", mout_peri_ip_user_p,
+ PLL_CON0_MUX_CLK_PERI_IP_USER, 4, 1),
+ MUX(CLK_MOUT_PERI_AUDIO_USER, "mout_peri_audio_user",
+ mout_peri_audio_user_p, PLL_CON0_MUX_CLK_PERI_AUDIO_USER, 4, 1),
+ MUX(CLK_MOUT_PERI_DISP_USER, "mout_peri_disp_user", mout_peri_disp_user_p,
+ PLL_CON0_MUX_CLK_PERI_DISP_USER, 4, 1),
+ MUX(CLK_MOUT_PERI_I2S0, "mout_peri_i2s0", mout_peri_i2s0_p,
+ CLK_CON_MUX_CLK_PERI_I2S0, 0, 1),
+ MUX(CLK_MOUT_PERI_I2S1, "mout_peri_i2s1", mout_peri_i2s1_p,
+ CLK_CON_MUX_CLK_PERI_I2S1, 0, 1),
+};
+
+static const struct samsung_div_clock cmu_peri_div_clks[] __initconst = {
+ DIV(CLK_DOUT_PERI_SPI, "dout_peri_spi", "mout_peri_ip_user",
+ CLK_CON_DIV_CLK_PERI_SPI, 0, 10),
+ DIV(CLK_DOUT_PERI_UART1, "dout_peri_uart1", "mout_peri_ip_user",
+ CLK_CON_DIV_CLK_PERI_UART1, 0, 10),
+ DIV(CLK_DOUT_PERI_UART2, "dout_peri_uart2", "mout_peri_ip_user",
+ CLK_CON_DIV_CLK_PERI_UART2, 0, 10),
+ DIV(CLK_DOUT_PERI_PCLK, "dout_peri_pclk", "mout_peri_ip_user",
+ CLK_CON_DIV_CLK_PERI_PCLK, 0, 4),
+ DIV(CLK_DOUT_PERI_I2S0, "dout_peri_i2s0", "mout_peri_audio_user",
+ CLK_CON_DIV_CLK_PERI_I2S0, 0, 4),
+ DIV(CLK_DOUT_PERI_I2S1, "dout_peri_i2s1", "mout_peri_audio_user",
+ CLK_CON_DIV_CLK_PERI_I2S1, 0, 4),
+ DIV(CLK_DOUT_PERI_DSIM, "dout_peri_dsim", "mout_peri_disp_user",
+ CLK_CON_DIV_CLK_PERI_DSIM, 0, 4),
+};
+
+static const struct samsung_gate_clock cmu_peri_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_PERI_DMA4DSIM_IPCLKPORT_CLK_APB_CLK, "dma4dsim_ipclkport_clk_apb_clk",
+ "dout_peri_pclk", CLK_CON_DMYQCH_CON_DMA4DSIM_QCH, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_I2SSC0_IPCLKPORT_CLK_HST, "i2ssc0_ipclkport_clk_hst", "dout_peri_pclk",
+ CLK_CON_DMYQCH_CON_PERI_I2SSC0_QCH, 1, CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_PERI_I2SSC1_IPCLKPORT_CLK_HST, "i2ssc1_ipclkport_clk_hst", "dout_peri_pclk",
+ CLK_CON_DMYQCH_CON_PERI_I2SSC1_QCH, 1, CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_PERI_AUDIO_OUT_IPCLKPORT_CLK, "audio_out_ipclkport_clk",
+ "mout_peri_audio_user", CLK_CON_DMYQCH_CON_AUDIO_OUT_QCH, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_I2SSC0_IPCLKPORT_CLK, "peri_i2ssc0_ipclkport_clk", "mout_peri_i2s0",
+ CLK_CON_DMYQCH_CON_PERI_I2SSC0_QCH, 1, CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_PERI_I2SSC1_IPCLKPORT_CLK, "peri_i2ssc1_ipclkport_clk", "mout_peri_i2s1",
+ CLK_CON_DMYQCH_CON_PERI_I2SSC1_QCH, 1, CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_PERI_DMA4DSIM_IPCLKPORT_CLK_AXI_CLK, "dma4dsim_ipclkport_clk_axi_clk",
+ "mout_peri_disp_user", CLK_CON_DMYQCH_CON_DMA4DSIM_QCH, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_SPI0_SCLK_SPI, "peri_spi0_ipclkport_i_sclk_spi", "dout_peri_spi",
+ CLK_CON_PERI_SPI0_IPCLKPORT_I_SCLK_SPI, 21, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERI_UART1_SCLK_UART, "uart1_sclk", "dout_peri_uart1",
+ CLK_CON_PERI_UART1_IPCLKPORT_I_SCLK_UART, 21,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERI_UART2_SCLK_UART, "uart2_sclk", "dout_peri_uart2",
+ CLK_CON_PERI_UART2_IPCLKPORT_I_SCLK_UART, 21,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERI_APB_ASYNC_DSIM_IPCLKPORT_PCLKS, "apb_async_dsim_ipclkport_pclks",
+ "dout_peri_pclk", CLK_CON_APB_ASYNC_DSIM_IPCLKPORT_PCLKS, 21,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERI_I2C2_IPCLKPORT_I_PCLK, "peri_i2c2_ipclkport_i_pclk", "dout_peri_pclk",
+ CLK_CON_PERI_I2C2_IPCLKPORT_I_PCLK, 21, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERI_I2C3_IPCLKPORT_I_PCLK, "peri_i2c3_ipclkport_i_pclk", "dout_peri_pclk",
+ CLK_CON_PERI_I2C3_IPCLKPORT_I_PCLK, 21, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERI_SPI0_PCLK, "peri_spi0_ipclkport_i_pclk", "dout_peri_pclk",
+ CLK_CON_PERI_SPI0_IPCLKPORT_I_PCLK, 21, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERI_UART1_PCLK, "uart1_pclk", "dout_peri_pclk",
+ CLK_CON_PERI_UART1_IPCLKPORT_I_PCLK, 21, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERI_UART2_PCLK, "uart2_pclk", "dout_peri_pclk",
+ CLK_CON_PERI_UART2_IPCLKPORT_I_PCLK, 21, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+};
+
+static const struct samsung_cmu_info cmu_peri_info __initconst = {
+ .mux_clks = cmu_peri_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(cmu_peri_mux_clks),
+ .div_clks = cmu_peri_div_clks,
+ .nr_div_clks = ARRAY_SIZE(cmu_peri_div_clks),
+ .gate_clks = cmu_peri_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(cmu_peri_gate_clks),
+ .fixed_clks = peri_fixed_clks,
+ .nr_fixed_clks = ARRAY_SIZE(peri_fixed_clks),
+ .nr_clk_ids = CMU_PERI_NR_CLK,
+ .clk_regs = cmu_peri_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(cmu_peri_clk_regs),
+};
+
+/**
+ * artpec8_cmu_probe - Probe function for ARTPEC platform clocks
+ * @pdev: Pointer to platform device
+ *
+ * Configure clock hierarchy for clock domains of ARTPEC platform
+ */
+static int __init artpec8_cmu_probe(struct platform_device *pdev)
+{
+ const struct samsung_cmu_info *info;
+ struct device *dev = &pdev->dev;
+
+ info = of_device_get_match_data(dev);
+ exynos_arm64_register_cmu(dev, dev->of_node, info);
+
+ return 0;
+}
+
+static const struct of_device_id artpec8_cmu_of_match[] = {
+ {
+ .compatible = "axis,artpec8-cmu-cmu",
+ .data = &cmu_cmu_info,
+ }, {
+ .compatible = "axis,artpec8-cmu-bus",
+ .data = &cmu_bus_info,
+ }, {
+ .compatible = "axis,artpec8-cmu-core",
+ .data = &cmu_core_info,
+ }, {
+ .compatible = "axis,artpec8-cmu-cpucl",
+ .data = &cmu_cpucl_info,
+ }, {
+ .compatible = "axis,artpec8-cmu-fsys",
+ .data = &cmu_fsys_info,
+ }, {
+ .compatible = "axis,artpec8-cmu-peri",
+ .data = &cmu_peri_info,
+ }, {
+ },
+};
+
+static struct platform_driver artpec8_cmu_driver __refdata = {
+ .driver = {
+ .name = "artpec8-cmu",
+ .of_match_table = artpec8_cmu_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = artpec8_cmu_probe,
+};
+
+static int __init artpec8_cmu_init(void)
+{
+ return platform_driver_register(&artpec8_cmu_driver);
+}
+core_initcall(artpec8_cmu_init);
diff --git a/drivers/clk/samsung/clk-cpu.c b/drivers/clk/samsung/clk-cpu.c
index 97982662e1a6..300f8d5d3c48 100644
--- a/drivers/clk/samsung/clk-cpu.c
+++ b/drivers/clk/samsung/clk-cpu.c
@@ -243,7 +243,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) {
/*
* In Exynos4210, ATB clock parent is also mout_core. So
- * ATB clock also needs to be mantained at safe speed.
+ * ATB clock also needs to be maintained at safe speed.
*/
alt_div |= E4210_DIV0_ATB_MASK;
alt_div_mask |= E4210_DIV0_ATB_MASK;
@@ -567,12 +567,14 @@ static int exynos850_cpuclk_post_rate_change(struct clk_notifier_data *ndata,
/* -------------------------------------------------------------------------- */
/* Common round rate callback usable for all types of CPU clocks */
-static long exynos_cpuclk_round_rate(struct clk_hw *hw, unsigned long drate,
- unsigned long *prate)
+static int exynos_cpuclk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_hw *parent = clk_hw_get_parent(hw);
- *prate = clk_hw_round_rate(parent, drate);
- return *prate;
+ req->best_parent_rate = clk_hw_round_rate(parent, req->rate);
+ req->rate = req->best_parent_rate;
+
+ return 0;
}
/* Common recalc rate callback usable for all types of CPU clocks */
@@ -591,7 +593,7 @@ static unsigned long exynos_cpuclk_recalc_rate(struct clk_hw *hw,
static const struct clk_ops exynos_cpuclk_clk_ops = {
.recalc_rate = exynos_cpuclk_recalc_rate,
- .round_rate = exynos_cpuclk_round_rate,
+ .determine_rate = exynos_cpuclk_determine_rate,
};
/*
diff --git a/drivers/clk/samsung/clk-exynos-clkout.c b/drivers/clk/samsung/clk-exynos-clkout.c
index 5f1a4f5e2e59..5b21025338bd 100644
--- a/drivers/clk/samsung/clk-exynos-clkout.c
+++ b/drivers/clk/samsung/clk-exynos-clkout.c
@@ -175,6 +175,7 @@ static int exynos_clkout_probe(struct platform_device *pdev)
clkout->mux.shift = EXYNOS_CLKOUT_MUX_SHIFT;
clkout->mux.lock = &clkout->slock;
+ clkout->data.num = EXYNOS_CLKOUT_NR_CLKS;
clkout->data.hws[0] = clk_hw_register_composite(NULL, "clkout",
parent_names, parent_count, &clkout->mux.hw,
&clk_mux_ops, NULL, NULL, &clkout->gate.hw,
@@ -185,7 +186,6 @@ static int exynos_clkout_probe(struct platform_device *pdev)
goto err_unmap;
}
- clkout->data.num = EXYNOS_CLKOUT_NR_CLKS;
ret = of_clk_add_hw_provider(clkout->np, of_clk_hw_onecell_get, &clkout->data);
if (ret)
goto err_clk_unreg;
diff --git a/drivers/clk/samsung/clk-exynos850.c b/drivers/clk/samsung/clk-exynos850.c
index cf7e08cca78e..56f27697c76b 100644
--- a/drivers/clk/samsung/clk-exynos850.c
+++ b/drivers/clk/samsung/clk-exynos850.c
@@ -1360,7 +1360,7 @@ static const unsigned long cpucl1_clk_regs[] __initconst = {
CLK_CON_GAT_GATE_CLK_CPUCL1_CPU,
};
-/* List of parent clocks for Muxes in CMU_CPUCL0 */
+/* List of parent clocks for Muxes in CMU_CPUCL1 */
PNAME(mout_pll_cpucl1_p) = { "oscclk", "fout_cpucl1_pll" };
PNAME(mout_cpucl1_switch_user_p) = { "oscclk", "dout_cpucl1_switch" };
PNAME(mout_cpucl1_dbg_user_p) = { "oscclk", "dout_cpucl1_dbg" };
diff --git a/drivers/clk/samsung/clk-exynos990.c b/drivers/clk/samsung/clk-exynos990.c
index 8d3f193d2b4d..6277dd557fab 100644
--- a/drivers/clk/samsung/clk-exynos990.c
+++ b/drivers/clk/samsung/clk-exynos990.c
@@ -17,8 +17,10 @@
#include "clk-pll.h"
/* NOTE: Must be equal to the last clock ID increased by one */
-#define CLKS_NR_TOP (CLK_GOUT_CMU_VRA_BUS + 1)
-#define CLKS_NR_HSI0 (CLK_GOUT_HSI0_XIU_D_HSI0_ACLK + 1)
+#define CLKS_NR_TOP (CLK_DOUT_CMU_CLK_CMUREF + 1)
+#define CLKS_NR_HSI0 (CLK_GOUT_HSI0_LHS_ACEL_D_HSI0_CLK + 1)
+#define CLKS_NR_PERIC0 (CLK_GOUT_PERIC0_SYSREG_PCLK + 1)
+#define CLKS_NR_PERIC1 (CLK_GOUT_PERIC1_XIU_P_ACLK + 1)
#define CLKS_NR_PERIS (CLK_GOUT_PERIS_OTP_CON_TOP_OSCCLK + 1)
/* ---- CMU_TOP ------------------------------------------------------------- */
@@ -45,6 +47,7 @@
#define PLL_CON3_PLL_SHARED3 0x024c
#define PLL_CON0_PLL_SHARED4 0x0280
#define PLL_CON3_PLL_SHARED4 0x028c
+#define CLK_CON_MUX_CLKCMU_DPU_BUS 0x1000
#define CLK_CON_MUX_MUX_CLKCMU_APM_BUS 0x1004
#define CLK_CON_MUX_MUX_CLKCMU_AUD_CPU 0x1008
#define CLK_CON_MUX_MUX_CLKCMU_BUS0_BUS 0x100c
@@ -103,6 +106,8 @@
#define CLK_CON_MUX_MUX_CLKCMU_SSP_BUS 0x10e0
#define CLK_CON_MUX_MUX_CLKCMU_TNR_BUS 0x10e4
#define CLK_CON_MUX_MUX_CLKCMU_VRA_BUS 0x10e8
+#define CLK_CON_MUX_MUX_CLK_CMU_CMUREF 0x10f0
+#define CLK_CON_MUX_MUX_CMU_CMUREF 0x10f4
#define CLK_CON_DIV_CLKCMU_APM_BUS 0x1800
#define CLK_CON_DIV_CLKCMU_AUD_CPU 0x1804
#define CLK_CON_DIV_CLKCMU_BUS0_BUS 0x1808
@@ -162,6 +167,7 @@
#define CLK_CON_DIV_CLKCMU_VRA_BUS 0x18e0
#define CLK_CON_DIV_DIV_CLKCMU_DPU 0x18e8
#define CLK_CON_DIV_DIV_CLKCMU_DPU_ALT 0x18ec
+#define CLK_CON_DIV_DIV_CLK_CMU_CMUREF 0x18f0
#define CLK_CON_DIV_PLL_SHARED0_DIV2 0x18f4
#define CLK_CON_DIV_PLL_SHARED0_DIV3 0x18f8
#define CLK_CON_DIV_PLL_SHARED0_DIV4 0x18fc
@@ -239,13 +245,21 @@ static const unsigned long top_clk_regs[] __initconst = {
PLL_LOCKTIME_PLL_SHARED2,
PLL_LOCKTIME_PLL_SHARED3,
PLL_LOCKTIME_PLL_SHARED4,
+ PLL_CON0_PLL_G3D,
PLL_CON3_PLL_G3D,
+ PLL_CON0_PLL_MMC,
PLL_CON3_PLL_MMC,
+ PLL_CON0_PLL_SHARED0,
PLL_CON3_PLL_SHARED0,
+ PLL_CON0_PLL_SHARED1,
PLL_CON3_PLL_SHARED1,
+ PLL_CON0_PLL_SHARED2,
PLL_CON3_PLL_SHARED2,
+ PLL_CON0_PLL_SHARED3,
PLL_CON3_PLL_SHARED3,
+ PLL_CON0_PLL_SHARED4,
PLL_CON3_PLL_SHARED4,
+ CLK_CON_MUX_CLKCMU_DPU_BUS,
CLK_CON_MUX_MUX_CLKCMU_APM_BUS,
CLK_CON_MUX_MUX_CLKCMU_AUD_CPU,
CLK_CON_MUX_MUX_CLKCMU_BUS0_BUS,
@@ -304,6 +318,8 @@ static const unsigned long top_clk_regs[] __initconst = {
CLK_CON_MUX_MUX_CLKCMU_SSP_BUS,
CLK_CON_MUX_MUX_CLKCMU_TNR_BUS,
CLK_CON_MUX_MUX_CLKCMU_VRA_BUS,
+ CLK_CON_MUX_MUX_CLK_CMU_CMUREF,
+ CLK_CON_MUX_MUX_CMU_CMUREF,
CLK_CON_DIV_CLKCMU_APM_BUS,
CLK_CON_DIV_CLKCMU_AUD_CPU,
CLK_CON_DIV_CLKCMU_BUS0_BUS,
@@ -363,6 +379,7 @@ static const unsigned long top_clk_regs[] __initconst = {
CLK_CON_DIV_CLKCMU_VRA_BUS,
CLK_CON_DIV_DIV_CLKCMU_DPU,
CLK_CON_DIV_DIV_CLKCMU_DPU_ALT,
+ CLK_CON_DIV_DIV_CLK_CMU_CMUREF,
CLK_CON_DIV_PLL_SHARED0_DIV2,
CLK_CON_DIV_PLL_SHARED0_DIV3,
CLK_CON_DIV_PLL_SHARED0_DIV4,
@@ -458,6 +475,8 @@ PNAME(mout_pll_shared3_p) = { "oscclk", "fout_shared3_pll" };
PNAME(mout_pll_shared4_p) = { "oscclk", "fout_shared4_pll" };
PNAME(mout_pll_mmc_p) = { "oscclk", "fout_mmc_pll" };
PNAME(mout_pll_g3d_p) = { "oscclk", "fout_g3d_pll" };
+PNAME(mout_cmu_dpu_bus_p) = { "dout_cmu_dpu",
+ "dout_cmu_dpu_alt" };
PNAME(mout_cmu_apm_bus_p) = { "dout_cmu_shared0_div2",
"dout_cmu_shared2_div2" };
PNAME(mout_cmu_aud_cpu_p) = { "dout_cmu_shared0_div2",
@@ -672,6 +691,12 @@ PNAME(mout_cmu_vra_bus_p) = { "dout_cmu_shared0_div3",
"dout_cmu_shared4_div2",
"dout_cmu_shared0_div4",
"dout_cmu_shared4_div3" };
+PNAME(mout_cmu_cmuref_p) = { "oscclk",
+ "dout_cmu_clk_cmuref" };
+PNAME(mout_cmu_clk_cmuref_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "oscclk" };
/*
* Register name to clock name mangling strategy used in this file
@@ -689,19 +714,21 @@ PNAME(mout_cmu_vra_bus_p) = { "dout_cmu_shared0_div3",
static const struct samsung_mux_clock top_mux_clks[] __initconst = {
MUX(CLK_MOUT_PLL_SHARED0, "mout_pll_shared0", mout_pll_shared0_p,
- PLL_CON3_PLL_SHARED0, 4, 1),
+ PLL_CON0_PLL_SHARED0, 4, 1),
MUX(CLK_MOUT_PLL_SHARED1, "mout_pll_shared1", mout_pll_shared1_p,
- PLL_CON3_PLL_SHARED1, 4, 1),
+ PLL_CON0_PLL_SHARED1, 4, 1),
MUX(CLK_MOUT_PLL_SHARED2, "mout_pll_shared2", mout_pll_shared2_p,
- PLL_CON3_PLL_SHARED2, 4, 1),
+ PLL_CON0_PLL_SHARED2, 4, 1),
MUX(CLK_MOUT_PLL_SHARED3, "mout_pll_shared3", mout_pll_shared3_p,
- PLL_CON3_PLL_SHARED3, 4, 1),
+ PLL_CON0_PLL_SHARED3, 4, 1),
MUX(CLK_MOUT_PLL_SHARED4, "mout_pll_shared4", mout_pll_shared4_p,
PLL_CON0_PLL_SHARED4, 4, 1),
MUX(CLK_MOUT_PLL_MMC, "mout_pll_mmc", mout_pll_mmc_p,
PLL_CON0_PLL_MMC, 4, 1),
MUX(CLK_MOUT_PLL_G3D, "mout_pll_g3d", mout_pll_g3d_p,
PLL_CON0_PLL_G3D, 4, 1),
+ MUX(CLK_MOUT_CMU_DPU_BUS, "mout_cmu_dpu_bus",
+ mout_cmu_dpu_bus_p, CLK_CON_MUX_CLKCMU_DPU_BUS, 0, 1),
MUX(CLK_MOUT_CMU_APM_BUS, "mout_cmu_apm_bus",
mout_cmu_apm_bus_p, CLK_CON_MUX_MUX_CLKCMU_APM_BUS, 0, 1),
MUX(CLK_MOUT_CMU_AUD_CPU, "mout_cmu_aud_cpu",
@@ -759,11 +786,11 @@ static const struct samsung_mux_clock top_mux_clks[] __initconst = {
MUX(CLK_MOUT_CMU_DPU_ALT, "mout_cmu_dpu_alt",
mout_cmu_dpu_alt_p, CLK_CON_MUX_MUX_CLKCMU_DPU_ALT, 0, 2),
MUX(CLK_MOUT_CMU_DSP_BUS, "mout_cmu_dsp_bus",
- mout_cmu_dsp_bus_p, CLK_CON_MUX_MUX_CLKCMU_DSP_BUS, 0, 2),
+ mout_cmu_dsp_bus_p, CLK_CON_MUX_MUX_CLKCMU_DSP_BUS, 0, 3),
MUX(CLK_MOUT_CMU_G2D_G2D, "mout_cmu_g2d_g2d",
mout_cmu_g2d_g2d_p, CLK_CON_MUX_MUX_CLKCMU_G2D_G2D, 0, 2),
MUX(CLK_MOUT_CMU_G2D_MSCL, "mout_cmu_g2d_mscl",
- mout_cmu_g2d_mscl_p, CLK_CON_MUX_MUX_CLKCMU_G2D_MSCL, 0, 1),
+ mout_cmu_g2d_mscl_p, CLK_CON_MUX_MUX_CLKCMU_G2D_MSCL, 0, 2),
MUX(CLK_MOUT_CMU_HPM, "mout_cmu_hpm",
mout_cmu_hpm_p, CLK_CON_MUX_MUX_CLKCMU_HPM, 0, 2),
MUX(CLK_MOUT_CMU_HSI0_BUS, "mout_cmu_hsi0_bus",
@@ -775,7 +802,7 @@ static const struct samsung_mux_clock top_mux_clks[] __initconst = {
0, 2),
MUX(CLK_MOUT_CMU_HSI0_USBDP_DEBUG, "mout_cmu_hsi0_usbdp_debug",
mout_cmu_hsi0_usbdp_debug_p,
- CLK_CON_MUX_MUX_CLKCMU_HSI0_USBDP_DEBUG, 0, 2),
+ CLK_CON_MUX_MUX_CLKCMU_HSI0_USBDP_DEBUG, 0, 1),
MUX(CLK_MOUT_CMU_HSI1_BUS, "mout_cmu_hsi1_bus",
mout_cmu_hsi1_bus_p, CLK_CON_MUX_MUX_CLKCMU_HSI1_BUS, 0, 3),
MUX(CLK_MOUT_CMU_HSI1_MMC_CARD, "mout_cmu_hsi1_mmc_card",
@@ -788,7 +815,7 @@ static const struct samsung_mux_clock top_mux_clks[] __initconst = {
0, 2),
MUX(CLK_MOUT_CMU_HSI1_UFS_EMBD, "mout_cmu_hsi1_ufs_embd",
mout_cmu_hsi1_ufs_embd_p, CLK_CON_MUX_MUX_CLKCMU_HSI1_UFS_EMBD,
- 0, 1),
+ 0, 2),
MUX(CLK_MOUT_CMU_HSI2_BUS, "mout_cmu_hsi2_bus",
mout_cmu_hsi2_bus_p, CLK_CON_MUX_MUX_CLKCMU_HSI2_BUS, 0, 1),
MUX(CLK_MOUT_CMU_HSI2_PCIE, "mout_cmu_hsi2_pcie",
@@ -830,6 +857,10 @@ static const struct samsung_mux_clock top_mux_clks[] __initconst = {
mout_cmu_tnr_bus_p, CLK_CON_MUX_MUX_CLKCMU_TNR_BUS, 0, 3),
MUX(CLK_MOUT_CMU_VRA_BUS, "mout_cmu_vra_bus",
mout_cmu_vra_bus_p, CLK_CON_MUX_MUX_CLKCMU_VRA_BUS, 0, 2),
+ MUX(CLK_MOUT_CMU_CMUREF, "mout_cmu_cmuref",
+ mout_cmu_cmuref_p, CLK_CON_MUX_MUX_CMU_CMUREF, 0, 1),
+ MUX(CLK_MOUT_CMU_CLK_CMUREF, "mout_cmu_clk_cmuref",
+ mout_cmu_clk_cmuref_p, CLK_CON_MUX_MUX_CLK_CMU_CMUREF, 0, 2),
};
static const struct samsung_div_clock top_div_clks[] __initconst = {
@@ -862,7 +893,7 @@ static const struct samsung_div_clock top_div_clks[] __initconst = {
CLK_CON_DIV_PLL_SHARED4_DIV4, 0, 1),
DIV(CLK_DOUT_CMU_APM_BUS, "dout_cmu_apm_bus", "gout_cmu_apm_bus",
- CLK_CON_DIV_CLKCMU_APM_BUS, 0, 3),
+ CLK_CON_DIV_CLKCMU_APM_BUS, 0, 2),
DIV(CLK_DOUT_CMU_AUD_CPU, "dout_cmu_aud_cpu", "gout_cmu_aud_cpu",
CLK_CON_DIV_CLKCMU_AUD_CPU, 0, 3),
DIV(CLK_DOUT_CMU_BUS0_BUS, "dout_cmu_bus0_bus", "gout_cmu_bus0_bus",
@@ -887,9 +918,9 @@ static const struct samsung_div_clock top_div_clks[] __initconst = {
CLK_CON_DIV_CLKCMU_CMU_BOOST, 0, 2),
DIV(CLK_DOUT_CMU_CORE_BUS, "dout_cmu_core_bus", "gout_cmu_core_bus",
CLK_CON_DIV_CLKCMU_CORE_BUS, 0, 4),
- DIV(CLK_DOUT_CMU_CPUCL0_DBG_BUS, "dout_cmu_cpucl0_debug",
+ DIV(CLK_DOUT_CMU_CPUCL0_DBG_BUS, "dout_cmu_cpucl0_dbg_bus",
"gout_cmu_cpucl0_dbg_bus", CLK_CON_DIV_CLKCMU_CPUCL0_DBG_BUS,
- 0, 3),
+ 0, 4),
DIV(CLK_DOUT_CMU_CPUCL0_SWITCH, "dout_cmu_cpucl0_switch",
"gout_cmu_cpucl0_switch", CLK_CON_DIV_CLKCMU_CPUCL0_SWITCH, 0, 3),
DIV(CLK_DOUT_CMU_CPUCL1_SWITCH, "dout_cmu_cpucl1_switch",
@@ -924,16 +955,11 @@ static const struct samsung_div_clock top_div_clks[] __initconst = {
CLK_CON_DIV_CLKCMU_HSI0_DPGTC, 0, 3),
DIV(CLK_DOUT_CMU_HSI0_USB31DRD, "dout_cmu_hsi0_usb31drd",
"gout_cmu_hsi0_usb31drd", CLK_CON_DIV_CLKCMU_HSI0_USB31DRD, 0, 4),
- DIV(CLK_DOUT_CMU_HSI0_USBDP_DEBUG, "dout_cmu_hsi0_usbdp_debug",
- "gout_cmu_hsi0_usbdp_debug", CLK_CON_DIV_CLKCMU_HSI0_USBDP_DEBUG,
- 0, 4),
DIV(CLK_DOUT_CMU_HSI1_BUS, "dout_cmu_hsi1_bus", "gout_cmu_hsi1_bus",
CLK_CON_DIV_CLKCMU_HSI1_BUS, 0, 3),
DIV(CLK_DOUT_CMU_HSI1_MMC_CARD, "dout_cmu_hsi1_mmc_card",
"gout_cmu_hsi1_mmc_card", CLK_CON_DIV_CLKCMU_HSI1_MMC_CARD,
0, 9),
- DIV(CLK_DOUT_CMU_HSI1_PCIE, "dout_cmu_hsi1_pcie", "gout_cmu_hsi1_pcie",
- CLK_CON_DIV_CLKCMU_HSI1_PCIE, 0, 7),
DIV(CLK_DOUT_CMU_HSI1_UFS_CARD, "dout_cmu_hsi1_ufs_card",
"gout_cmu_hsi1_ufs_card", CLK_CON_DIV_CLKCMU_HSI1_UFS_CARD,
0, 3),
@@ -942,8 +968,6 @@ static const struct samsung_div_clock top_div_clks[] __initconst = {
0, 3),
DIV(CLK_DOUT_CMU_HSI2_BUS, "dout_cmu_hsi2_bus", "gout_cmu_hsi2_bus",
CLK_CON_DIV_CLKCMU_HSI2_BUS, 0, 4),
- DIV(CLK_DOUT_CMU_HSI2_PCIE, "dout_cmu_hsi2_pcie", "gout_cmu_hsi2_pcie",
- CLK_CON_DIV_CLKCMU_HSI2_PCIE, 0, 7),
DIV(CLK_DOUT_CMU_IPP_BUS, "dout_cmu_ipp_bus", "gout_cmu_ipp_bus",
CLK_CON_DIV_CLKCMU_IPP_BUS, 0, 4),
DIV(CLK_DOUT_CMU_ITP_BUS, "dout_cmu_itp_bus", "gout_cmu_itp_bus",
@@ -979,8 +1003,22 @@ static const struct samsung_div_clock top_div_clks[] __initconst = {
CLK_CON_DIV_CLKCMU_TNR_BUS, 0, 4),
DIV(CLK_DOUT_CMU_VRA_BUS, "dout_cmu_vra_bus", "gout_cmu_vra_bus",
CLK_CON_DIV_CLKCMU_VRA_BUS, 0, 4),
- DIV(CLK_DOUT_CMU_DPU, "dout_cmu_clkcmu_dpu", "gout_cmu_dpu",
- CLK_CON_DIV_DIV_CLKCMU_DPU, 0, 4),
+ DIV(CLK_DOUT_CMU_DPU, "dout_cmu_dpu", "gout_cmu_dpu",
+ CLK_CON_DIV_DIV_CLKCMU_DPU, 0, 3),
+ DIV(CLK_DOUT_CMU_DPU_ALT, "dout_cmu_dpu_alt", "gout_cmu_dpu_bus",
+ CLK_CON_DIV_DIV_CLKCMU_DPU_ALT, 0, 4),
+ DIV(CLK_DOUT_CMU_CLK_CMUREF, "dout_cmu_clk_cmuref", "mout_cmu_clk_cmuref",
+ CLK_CON_DIV_DIV_CLK_CMU_CMUREF, 0, 2),
+};
+
+static const struct samsung_fixed_factor_clock cmu_top_ffactor[] __initconst = {
+ FFACTOR(CLK_DOUT_CMU_HSI1_PCIE, "dout_cmu_hsi1_pcie",
+ "gout_cmu_hsi1_pcie", 1, 8, 0),
+ FFACTOR(CLK_DOUT_CMU_OTP, "dout_cmu_otp", "oscclk", 1, 8, 0),
+ FFACTOR(CLK_DOUT_CMU_HSI0_USBDP_DEBUG, "dout_cmu_hsi0_usbdp_debug",
+ "gout_cmu_hsi0_usbdp_debug", 1, 8, 0),
+ FFACTOR(CLK_DOUT_CMU_HSI2_PCIE, "dout_cmu_hsi2_pcie",
+ "gout_cmu_hsi2_pcie", 1, 8, 0),
};
static const struct samsung_gate_clock top_gate_clks[] __initconst = {
@@ -1126,6 +1164,8 @@ static const struct samsung_cmu_info top_cmu_info __initconst = {
.nr_mux_clks = ARRAY_SIZE(top_mux_clks),
.div_clks = top_div_clks,
.nr_div_clks = ARRAY_SIZE(top_div_clks),
+ .fixed_factor_clks = cmu_top_ffactor,
+ .nr_fixed_factor_clks = ARRAY_SIZE(cmu_top_ffactor),
.gate_clks = top_gate_clks,
.nr_gate_clks = ARRAY_SIZE(top_gate_clks),
.nr_clk_ids = CLKS_NR_TOP,
@@ -1186,6 +1226,8 @@ static const unsigned long hsi0_clk_regs[] __initconst = {
CLK_CON_GAT_GOUT_BLK_HSI0_UID_SYSMMU_USB_IPCLKPORT_CLK_S2,
CLK_CON_GAT_GOUT_BLK_HSI0_UID_SYSREG_HSI0_IPCLKPORT_PCLK,
CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_ACLK_PHYCTRL,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USB31DRD_REF_CLK_40,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USBDPPHY_REF_SOC_PLL,
CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USBDPPHY_SCL_APB_PCLK,
CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USBPCS_APB_CLK,
CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_BUS_CLK_EARLY,
@@ -1294,6 +1336,10 @@ static const struct samsung_gate_clock hsi0_gate_clks[] __initconst = {
"gout_hsi0_xiu_d_hsi0_aclk", "mout_hsi0_bus_user",
CLK_CON_GAT_GOUT_BLK_HSI0_UID_XIU_D_HSI0_IPCLKPORT_ACLK,
21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_HSI0_LHS_ACEL_D_HSI0_CLK,
+ "gout_hsi0_lhs_acel_d_hsi0_clk", "mout_hsi0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_LHS_ACEL_D_HSI0_IPCLKPORT_I_CLK,
+ 21, CLK_IS_CRITICAL, 0),
};
static const struct samsung_cmu_info hsi0_cmu_info __initconst = {
@@ -1307,6 +1353,1150 @@ static const struct samsung_cmu_info hsi0_cmu_info __initconst = {
.clk_name = "bus",
};
+/* ---- CMU_PERIC0 --------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_PERIC0 (0x10400000) */
+#define PLL_CON0_MUX_CLKCMU_PERIC0_BUS_USER 0x0600
+#define PLL_CON1_MUX_CLKCMU_PERIC0_BUS_USER 0x0604
+#define PLL_CON0_MUX_CLKCMU_PERIC0_UART_DBG 0x0610
+#define PLL_CON1_MUX_CLKCMU_PERIC0_UART_DBG 0x0614
+#define PLL_CON0_MUX_CLKCMU_PERIC0_USI00_USI_USER 0x0620
+#define PLL_CON1_MUX_CLKCMU_PERIC0_USI00_USI_USER 0x0624
+#define PLL_CON0_MUX_CLKCMU_PERIC0_USI01_USI_USER 0x0630
+#define PLL_CON1_MUX_CLKCMU_PERIC0_USI01_USI_USER 0x0634
+#define PLL_CON0_MUX_CLKCMU_PERIC0_USI02_USI_USER 0x0640
+#define PLL_CON1_MUX_CLKCMU_PERIC0_USI02_USI_USER 0x0644
+#define PLL_CON0_MUX_CLKCMU_PERIC0_USI03_USI_USER 0x0650
+#define PLL_CON1_MUX_CLKCMU_PERIC0_USI03_USI_USER 0x0654
+#define PLL_CON0_MUX_CLKCMU_PERIC0_USI04_USI_USER 0x0660
+#define PLL_CON1_MUX_CLKCMU_PERIC0_USI04_USI_USER 0x0664
+#define PLL_CON0_MUX_CLKCMU_PERIC0_USI05_USI_USER 0x0670
+#define PLL_CON1_MUX_CLKCMU_PERIC0_USI05_USI_USER 0x0674
+#define PLL_CON0_MUX_CLKCMU_PERIC0_USI13_USI_USER 0x0680
+#define PLL_CON1_MUX_CLKCMU_PERIC0_USI13_USI_USER 0x0684
+#define PLL_CON0_MUX_CLKCMU_PERIC0_USI14_USI_USER 0x0690
+#define PLL_CON1_MUX_CLKCMU_PERIC0_USI14_USI_USER 0x0694
+#define PLL_CON0_MUX_CLKCMU_PERIC0_USI15_USI_USER 0x06a0
+#define PLL_CON1_MUX_CLKCMU_PERIC0_USI15_USI_USER 0x06a4
+#define PLL_CON0_MUX_CLKCMU_PERIC0_USI_I2C_USER 0x06b0
+#define PLL_CON1_MUX_CLKCMU_PERIC0_USI_I2C_USER 0x06b4
+#define CLK_CON_DIV_DIV_CLK_PERIC0_UART_DBG 0x1800
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI00_USI 0x1804
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI01_USI 0x1808
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI02_USI 0x180c
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI03_USI 0x1810
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI04_USI 0x1814
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI05_USI 0x1818
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI13_USI 0x181c
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI14_USI 0x1820
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI15_USI 0x1824
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI_I2C 0x1828
+#define CLK_CON_GAT_CLK_BLK_PERIC0_UID_PERIC0_CMU_PERIC0_IPCLKPORT_PCLK 0x2004
+#define CLK_CON_GAT_CLK_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_OSCCLK_IPCLKPORT_CLK 0x2008
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_D_TZPC_PERIC0_IPCLKPORT_PCLK 0x200c
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_GPIO_PERIC0_IPCLKPORT_PCLK 0x2010
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_LHM_AXI_P_PERIC0_IPCLKPORT_I_CLK 0x2014
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_10 0x2018
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_11 0x201c
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_12 0x2020
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_13 0x2024
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_14 0x2028
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_15 0x202c
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_4 0x2030
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_5 0x2034
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_6 0x2038
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_7 0x203c
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_8 0x2040
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_9 0x2044
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_10 0x2048
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_11 0x204c
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_12 0x2050
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_13 0x2054
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_14 0x2058
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_15 0x205c
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_4 0x2060
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_5 0x2064
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_6 0x2068
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_7 0x206c
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_8 0x2070
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_9 0x2074
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_0 0x2078
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_3 0x207c
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_4 0x2080
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_5 0x2084
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_6 0x2088
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_7 0x208c
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_8 0x2090
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_0 0x2094
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_15 0x2098
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_3 0x209c
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_4 0x20a0
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_5 0x20a4
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_6 0x20a8
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_7 0x20ac
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_8 0x20b0
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_BUSP_IPCLKPORT_CLK 0x20b4
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_UART_DBG_IPCLKPORT_CLK 0x20b8
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI00_USI_IPCLKPORT_CLK 0x20bc
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI01_USI_IPCLKPORT_CLK 0x20c0
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI02_USI_IPCLKPORT_CLK 0x20c4
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI03_USI_IPCLKPORT_CLK 0x20c8
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI04_USI_IPCLKPORT_CLK 0x20cc
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI05_USI_IPCLKPORT_CLK 0x20d0
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI13_USI_IPCLKPORT_CLK 0x20d4
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI14_USI_IPCLKPORT_CLK 0x20d8
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI15_USI_IPCLKPORT_CLK 0x20dc
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI_I2C_IPCLKPORT_CLK 0x20e0
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_SYSREG_PERIC0_IPCLKPORT_PCLK 0x20e4
+
+static const unsigned long peric0_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_PERIC0_BUS_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC0_BUS_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC0_UART_DBG,
+ PLL_CON1_MUX_CLKCMU_PERIC0_UART_DBG,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI00_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC0_USI00_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI01_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC0_USI01_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI02_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC0_USI02_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI03_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC0_USI03_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI04_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC0_USI04_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI05_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC0_USI05_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI13_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC0_USI13_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI14_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC0_USI14_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI15_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC0_USI15_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI_I2C_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC0_USI_I2C_USER,
+ CLK_CON_DIV_DIV_CLK_PERIC0_UART_DBG,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI00_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI01_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI02_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI03_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI04_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI05_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI13_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI14_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI15_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI_I2C,
+ CLK_CON_GAT_CLK_BLK_PERIC0_UID_PERIC0_CMU_PERIC0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_OSCCLK_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_D_TZPC_PERIC0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_GPIO_PERIC0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_LHM_AXI_P_PERIC0_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_10,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_11,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_12,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_13,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_14,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_15,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_4,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_5,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_6,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_7,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_8,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_9,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_10,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_11,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_12,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_13,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_14,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_15,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_4,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_5,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_6,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_7,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_8,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_9,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_0,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_3,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_4,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_5,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_6,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_7,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_8,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_0,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_15,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_3,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_4,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_5,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_6,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_7,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_8,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_BUSP_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_UART_DBG_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI00_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI01_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI02_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI03_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI04_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI05_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI13_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI14_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI15_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI_I2C_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_SYSREG_PERIC0_IPCLKPORT_PCLK,
+};
+
+/* Parent clock list for CMU_PERIC0 muxes */
+PNAME(mout_peric0_bus_user_p) = { "oscclk", "dout_cmu_peric0_bus" };
+PNAME(mout_peric0_uart_dbg_p) = { "oscclk", "dout_cmu_peric0_ip" };
+PNAME(mout_peric0_usi00_user_p) = { "oscclk", "dout_cmu_peric0_ip" };
+PNAME(mout_peric0_usi01_user_p) = { "oscclk", "dout_cmu_peric0_ip" };
+PNAME(mout_peric0_usi02_user_p) = { "oscclk", "dout_cmu_peric0_ip" };
+PNAME(mout_peric0_usi03_user_p) = { "oscclk", "dout_cmu_peric0_ip" };
+PNAME(mout_peric0_usi04_user_p) = { "oscclk", "dout_cmu_peric0_ip" };
+PNAME(mout_peric0_usi05_user_p) = { "oscclk", "dout_cmu_peric0_ip" };
+PNAME(mout_peric0_usi13_user_p) = { "oscclk", "dout_cmu_peric0_ip" };
+PNAME(mout_peric0_usi14_user_p) = { "oscclk", "dout_cmu_peric0_ip" };
+PNAME(mout_peric0_usi15_user_p) = { "oscclk", "dout_cmu_peric0_ip" };
+PNAME(mout_peric0_usi_i2c_user_p) = { "oscclk", "dout_cmu_peric0_ip" };
+
+static const struct samsung_mux_clock peric0_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_PERIC0_BUS_USER, "mout_peric0_bus_user",
+ mout_peric0_bus_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_BUS_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC0_UART_DBG, "mout_peric0_uart_dbg",
+ mout_peric0_uart_dbg_p, PLL_CON0_MUX_CLKCMU_PERIC0_UART_DBG,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC0_USI00_USI_USER, "mout_peric0_usi00_usi_user",
+ mout_peric0_usi00_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_USI00_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC0_USI01_USI_USER, "mout_peric0_usi01_usi_user",
+ mout_peric0_usi01_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_USI01_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC0_USI02_USI_USER, "mout_peric0_usi02_usi_user",
+ mout_peric0_usi02_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_USI02_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC0_USI03_USI_USER, "mout_peric0_usi03_usi_user",
+ mout_peric0_usi03_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_USI03_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC0_USI04_USI_USER, "mout_peric0_usi04_usi_user",
+ mout_peric0_usi04_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_USI04_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC0_USI05_USI_USER, "mout_peric0_usi05_usi_user",
+ mout_peric0_usi05_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_USI05_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC0_USI13_USI_USER, "mout_peric0_usi13_usi_user",
+ mout_peric0_usi13_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_USI13_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC0_USI14_USI_USER, "mout_peric0_usi14_usi_user",
+ mout_peric0_usi14_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_USI14_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC0_USI15_USI_USER, "mout_peric0_usi15_usi_user",
+ mout_peric0_usi15_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_USI15_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC0_USI_I2C_USER, "mout_peric0_usi_i2c_user",
+ mout_peric0_usi_i2c_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_USI_I2C_USER,
+ 4, 1),
+};
+
+static const struct samsung_div_clock peric0_div_clks[] __initconst = {
+ DIV(CLK_DOUT_PERIC0_UART_DBG, "dout_peric0_uart_dbg",
+ "mout_peric0_uart_dbg",
+ CLK_CON_DIV_DIV_CLK_PERIC0_UART_DBG,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI00_USI, "dout_peric0_usi00_usi",
+ "mout_peric0_usi00_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI00_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI01_USI, "dout_peric0_usi01_usi",
+ "mout_peric0_usi01_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI01_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI02_USI, "dout_peric0_usi02_usi",
+ "mout_peric0_usi02_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI02_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI03_USI, "dout_peric0_usi03_usi",
+ "mout_peric0_usi03_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI03_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI04_USI, "dout_peric0_usi04_usi",
+ "mout_peric0_usi04_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI04_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI05_USI, "dout_peric0_usi05_usi",
+ "mout_peric0_usi05_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI05_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI13_USI, "dout_peric0_usi13_usi",
+ "mout_peric0_usi13_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI13_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI14_USI, "dout_peric0_usi14_usi",
+ "mout_peric0_usi14_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI14_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI15_USI, "dout_peric0_usi15_usi",
+ "mout_peric0_usi15_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI15_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI_I2C, "dout_peric0_usi_i2c",
+ "mout_peric0_usi_i2c_user",
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI_I2C,
+ 0, 4),
+};
+
+static const struct samsung_gate_clock peric0_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_PERIC0_CMU_PCLK, "gout_peric0_cmu_pclk",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_CLK_BLK_PERIC0_UID_PERIC0_CMU_PERIC0_IPCLKPORT_PCLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERIC0_OSCCLK_CLK, "gout_peric0_oscclk_clk",
+ "oscclk",
+ CLK_CON_GAT_CLK_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_OSCCLK_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_D_TZPC_PCLK, "gout_peric0_d_tpzc_pclk",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_D_TZPC_PERIC0_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_GPIO_PCLK, "gout_peric0_gpio_pclk",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_GPIO_PERIC0_IPCLKPORT_PCLK,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_PERIC0_LHM_AXI_P_CLK, "gout_peric0_lhm_axi_p_clk",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_LHM_AXI_P_PERIC0_IPCLKPORT_I_CLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_IPCLK_10, "gout_peric0_top0_ipclk_10",
+ "dout_peric0_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_10,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_IPCLK_11, "gout_peric0_top0_ipclk_11",
+ "dout_peric0_usi03_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_11,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_IPCLK_12, "gout_peric0_top0_ipclk_12",
+ "dout_peric0_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_12,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_IPCLK_13, "gout_peric0_top0_ipclk_13",
+ "dout_peric0_usi04_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_13,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_IPCLK_14, "gout_peric0_top0_ipclk_14",
+ "dout_peric0_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_14,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_IPCLK_15, "gout_peric0_top0_ipclk_15",
+ "dout_peric0_usi05_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_15,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_IPCLK_4, "gout_peric0_top0_ipclk_4",
+ "dout_peric0_uart_dbg",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_4,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_IPCLK_5, "gout_peric0_top0_ipclk_5",
+ "dout_peric0_usi00_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_5,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_IPCLK_6, "gout_peric0_top0_ipclk_6",
+ "dout_peric0_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_6,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_IPCLK_7, "gout_peric0_top0_ipclk_7",
+ "dout_peric0_usi01_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_7,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_IPCLK_8, "gout_peric0_top0_ipclk_8",
+ "dout_peric0_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_8,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_IPCLK_9, "gout_peric0_top0_ipclk_9",
+ "dout_peric0_usi02_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_9,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_PCLK_10, "gout_peric0_top0_pclk_10",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_10,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_PCLK_11, "gout_peric0_top0_pclk_11",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_11,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_PCLK_12, "gout_peric0_top0_pclk_12",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_12,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_PCLK_13, "gout_peric0_top0_pclk_13",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_13,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_PCLK_14, "gout_peric0_top0_pclk_14",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_14,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_PCLK_15, "gout_peric0_top0_pclk_15",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_15,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_PCLK_4, "gout_peric0_top0_pclk_4",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_4,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_PCLK_5, "gout_peric0_top0_pclk_5",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_5,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_PCLK_6, "gout_peric0_top0_pclk_6",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_6,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_PCLK_7, "gout_peric0_top0_pclk_7",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_7,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_PCLK_8, "gout_peric0_top0_pclk_8",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_8,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_PCLK_9, "gout_peric0_top0_pclk_9",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_9,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP1_IPCLK_0, "gout_peric0_top1_ipclk_0",
+ "dout_peric0_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_0,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP1_IPCLK_3, "gout_peric0_top1_ipclk_3",
+ "dout_peric0_usi13_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_3,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP1_IPCLK_4, "gout_peric0_top1_ipclk_4",
+ "dout_peric0_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_4,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP1_IPCLK_5, "gout_peric0_top1_ipclk_5",
+ "dout_peric0_usi14_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_5,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP1_IPCLK_6, "gout_peric0_top1_ipclk_6",
+ "dout_peric0_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_6,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP1_IPCLK_7, "gout_peric0_top1_ipclk_7",
+ "dout_peric0_usi15_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_7,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP1_IPCLK_8, "gout_peric0_top1_ipclk_8",
+ "dout_peric0_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_8,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP1_PCLK_0, "gout_peric0_top1_pclk_0",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_0,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP1_PCLK_15, "gout_peric0_top1_pclk_15",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_15,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP1_PCLK_3, "gout_peric0_top1_pclk_3",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_3,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP1_PCLK_4, "gout_peric0_top1_pclk_4",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_4,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP1_PCLK_5, "gout_peric0_top1_pclk_5",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_5,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP1_PCLK_6, "gout_peric0_top1_pclk_6",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_6,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP1_PCLK_7, "gout_peric0_top1_pclk_7",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_7,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP1_PCLK_8, "gout_peric0_top1_pclk_8",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_8,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_BUSP_CLK, "gout_peric0_busp_clk",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_BUSP_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_UART_DBG_CLK, "gout_peric0_uart_dbg_clk",
+ "dout_peric0_uart_dbg",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_UART_DBG_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_USI00_USI_CLK, "gout_peric0_usi00_usi_clk",
+ "dout_peric0_usi00_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI00_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_USI01_USI_CLK, "gout_peric0_usi01_usi_clk",
+ "dout_peric0_usi01_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI01_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_USI02_USI_CLK, "gout_peric0_usi02_usi_clk",
+ "dout_peric0_usi02_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI02_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_USI03_USI_CLK, "gout_peric0_usi03_usi_clk",
+ "dout_peric0_usi03_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI03_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_USI04_USI_CLK, "gout_peric0_usi04_usi_clk",
+ "dout_peric0_usi04_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI04_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_USI05_USI_CLK, "gout_peric0_usi05_usi_clk",
+ "dout_peric0_usi05_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI05_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_USI13_USI_CLK, "gout_peric0_usi13_usi_clk",
+ "dout_peric0_usi13_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI13_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_USI14_USI_CLK, "gout_peric0_usi14_usi_clk",
+ "dout_peric0_usi14_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI14_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_USI15_USI_CLK, "gout_peric0_usi15_usi_clk",
+ "dout_peric0_usi15_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI15_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_USI_I2C_CLK, "gout_peric0_usi_i2c_clk",
+ "dout_peric0_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI_I2C_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_SYSREG_PCLK, "gout_peric0_sysreg_pclk",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_SYSREG_PERIC0_IPCLKPORT_PCLK,
+ 21, 0, 0)
+};
+
+static const struct samsung_cmu_info peric0_cmu_info __initconst = {
+ .mux_clks = peric0_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(peric0_mux_clks),
+ .div_clks = peric0_div_clks,
+ .nr_div_clks = ARRAY_SIZE(peric0_div_clks),
+ .gate_clks = peric0_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(peric0_gate_clks),
+ .nr_clk_ids = CLKS_NR_PERIC0,
+ .clk_regs = peric0_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(peric0_clk_regs),
+ .clk_name = "bus",
+};
+
+/* ---- CMU_PERIC1 --------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_PERIC1 (0x10700000) */
+#define PLL_CON0_MUX_CLKCMU_PERIC1_BUS_USER 0x0600
+#define PLL_CON1_MUX_CLKCMU_PERIC1_BUS_USER 0x0604
+#define PLL_CON0_MUX_CLKCMU_PERIC1_UART_BT_USER 0x0610
+#define PLL_CON1_MUX_CLKCMU_PERIC1_UART_BT_USER 0x0614
+#define PLL_CON0_MUX_CLKCMU_PERIC1_USI06_USI_USER 0x0620
+#define PLL_CON1_MUX_CLKCMU_PERIC1_USI06_USI_USER 0x0624
+#define PLL_CON0_MUX_CLKCMU_PERIC1_USI07_USI_USER 0x0630
+#define PLL_CON1_MUX_CLKCMU_PERIC1_USI07_USI_USER 0x0634
+#define PLL_CON0_MUX_CLKCMU_PERIC1_USI08_USI_USER 0x0640
+#define PLL_CON1_MUX_CLKCMU_PERIC1_USI08_USI_USER 0x0644
+#define PLL_CON0_MUX_CLKCMU_PERIC1_USI09_USI_USER 0x0650
+#define PLL_CON1_MUX_CLKCMU_PERIC1_USI09_USI_USER 0x0654
+#define PLL_CON0_MUX_CLKCMU_PERIC1_USI10_USI_USER 0x0660
+#define PLL_CON1_MUX_CLKCMU_PERIC1_USI10_USI_USER 0x0664
+#define PLL_CON0_MUX_CLKCMU_PERIC1_USI11_USI_USER 0x0670
+#define PLL_CON1_MUX_CLKCMU_PERIC1_USI11_USI_USER 0x0674
+#define PLL_CON0_MUX_CLKCMU_PERIC1_USI12_USI_USER 0x0680
+#define PLL_CON1_MUX_CLKCMU_PERIC1_USI12_USI_USER 0x0684
+#define PLL_CON0_MUX_CLKCMU_PERIC1_USI16_USI_USER 0x0690
+#define PLL_CON1_MUX_CLKCMU_PERIC1_USI16_USI_USER 0x0694
+#define PLL_CON0_MUX_CLKCMU_PERIC1_USI17_USI_USER 0x06a0
+#define PLL_CON1_MUX_CLKCMU_PERIC1_USI17_USI_USER 0x06a4
+#define PLL_CON0_MUX_CLKCMU_PERIC1_USI18_USI_USER 0x06b0
+#define PLL_CON1_MUX_CLKCMU_PERIC1_USI18_USI_USER 0x06b4
+#define PLL_CON0_MUX_CLKCMU_PERIC1_USI_I2C_USER 0x06c0
+#define PLL_CON1_MUX_CLKCMU_PERIC1_USI_I2C_USER 0x06c4
+#define CLK_CON_DIV_DIV_CLK_PERIC1_UART_BT 0x1800
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI06_USI 0x1804
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI07_USI 0x1808
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI08_USI 0x180c
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI09_USI 0x1810
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI10_USI 0x1814
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI11_USI 0x1818
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI12_USI 0x181c
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI16_USI 0x1820
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI17_USI 0x1824
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI18_USI 0x1828
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI_I2C 0x182c
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_PERIC1_CMU_PERIC1_IPCLKPORT_PCLK 0x2004
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_UART_BT_IPCLKPORT_CLK 0x2008
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI12_USI_IPCLKPORT_CLK 0x200c
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI18_USI_IPCLKPORT_CLK 0x2010
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_D_TZPC_PERIC1_IPCLKPORT_PCLK 0x2014
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_GPIO_PERIC1_IPCLKPORT_PCLK 0x2018
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_LHM_AXI_P_CSISPERIC1_IPCLKPORT_I_CLK 0x201c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_LHM_AXI_P_PERIC1_IPCLKPORT_I_CLK 0x2020
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_10 0x2024
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_11 0x2028
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_12 0x202c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_13 0x2030
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_14 0x2034
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_15 0x2038
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_4 0x203c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_10 0x2040
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_11 0x2044
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_12 0x2048
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_13 0x204c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_14 0x2050
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_15 0x2054
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_4 0x2058
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_0 0x205c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_1 0x2060
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_10 0x2064
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_12 0x206c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_13 0x2070
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_14 0x2074
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_15 0x2078
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_2 0x207c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_3 0x2080
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_4 0x2084
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_5 0x2088
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_6 0x208c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_7 0x2090
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_9 0x2098
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_0 0x209c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_1 0x20a0
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_10 0x20a4
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_12 0x20ac
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_13 0x20b0
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_14 0x20b4
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_15 0x20b8
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_2 0x20bc
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_3 0x20c0
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_4 0x20c4
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_5 0x20c8
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_6 0x20cc
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_7 0x20d0
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_9 0x20d8
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_BUSP_IPCLKPORT_CLK 0x20dc
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_OSCCLK_IPCLKPORT_CLK 0x20e0
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI06_USI_IPCLKPORT_CLK 0x20e4
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI07_USI_IPCLKPORT_CLK 0x20e8
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI08_USI_IPCLKPORT_CLK 0x20ec
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI09_USI_IPCLKPORT_CLK 0x20f0
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI10_USI_IPCLKPORT_CLK 0x20f4
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI11_USI_IPCLKPORT_CLK 0x20f8
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI16_USI_IPCLKPORT_CLK 0x20fc
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI17_USI_IPCLKPORT_CLK 0x2100
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI_I2C_IPCLKPORT_CLK 0x2104
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SYSREG_PERIC1_IPCLKPORT_PCLK 0x2108
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI16_I3C_IPCLKPORT_I_PCLK 0x210c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI16_I3C_IPCLKPORT_I_SCLK 0x2110
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI17_I3C_IPCLKPORT_I_PCLK 0x2114
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI17_I3C_IPCLKPORT_I_SCLK 0x2118
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_XIU_P_PERIC1_IPCLKPORT_ACLK 0x211c
+
+static const unsigned long peric1_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_PERIC1_BUS_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC1_BUS_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_UART_BT_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC1_UART_BT_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI06_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC1_USI06_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI07_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC1_USI07_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI08_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC1_USI08_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI09_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC1_USI09_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI10_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC1_USI10_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI11_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC1_USI11_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI12_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC1_USI12_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI16_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC1_USI16_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI17_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC1_USI17_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI18_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC1_USI18_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI_I2C_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC1_USI_I2C_USER,
+ CLK_CON_DIV_DIV_CLK_PERIC1_UART_BT,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI06_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI07_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI08_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI09_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI10_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI11_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI12_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI16_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI17_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI18_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI_I2C,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_PERIC1_CMU_PERIC1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_UART_BT_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI12_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI18_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_D_TZPC_PERIC1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_GPIO_PERIC1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_LHM_AXI_P_CSISPERIC1_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_LHM_AXI_P_PERIC1_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_10,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_11,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_12,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_13,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_14,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_15,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_4,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_10,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_11,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_12,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_13,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_14,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_15,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_4,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_0,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_1,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_10,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_12,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_13,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_14,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_15,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_2,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_3,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_4,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_5,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_6,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_7,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_9,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_0,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_1,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_10,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_12,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_13,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_14,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_15,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_2,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_3,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_4,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_5,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_6,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_7,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_9,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_BUSP_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_OSCCLK_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI06_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI07_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI08_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI09_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI10_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI11_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI16_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI17_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI_I2C_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SYSREG_PERIC1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI16_I3C_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI16_I3C_IPCLKPORT_I_SCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI17_I3C_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI17_I3C_IPCLKPORT_I_SCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_XIU_P_PERIC1_IPCLKPORT_ACLK,
+};
+
+/* Parent clock list for CMU_PERIC1 muxes */
+PNAME(mout_peric1_bus_user_p) = { "oscclk", "dout_cmu_peric1_bus" };
+PNAME(mout_peric1_uart_bt_user_p) = { "oscclk", "dout_cmu_peric1_ip" };
+PNAME(mout_peric1_usi06_user_p) = { "oscclk", "dout_cmu_peric1_ip" };
+PNAME(mout_peric1_usi07_user_p) = { "oscclk", "dout_cmu_peric1_ip" };
+PNAME(mout_peric1_usi08_user_p) = { "oscclk", "dout_cmu_peric1_ip" };
+PNAME(mout_peric1_usi09_user_p) = { "oscclk", "dout_cmu_peric1_ip" };
+PNAME(mout_peric1_usi10_user_p) = { "oscclk", "dout_cmu_peric1_ip" };
+PNAME(mout_peric1_usi11_user_p) = { "oscclk", "dout_cmu_peric1_ip" };
+PNAME(mout_peric1_usi12_user_p) = { "oscclk", "dout_cmu_peric1_ip" };
+PNAME(mout_peric1_usi18_user_p) = { "oscclk", "dout_cmu_peric1_ip" };
+PNAME(mout_peric1_usi16_user_p) = { "oscclk", "dout_cmu_peric1_ip" };
+PNAME(mout_peric1_usi17_user_p) = { "oscclk", "dout_cmu_peric1_ip" };
+PNAME(mout_peric1_usi_i2c_user_p) = { "oscclk", "dout_cmu_peric1_ip" };
+
+static const struct samsung_mux_clock peric1_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_PERIC1_BUS_USER, "mout_peric1_bus_user",
+ mout_peric1_bus_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_BUS_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC1_UART_BT_USER, "mout_peric1_uart_bt_user",
+ mout_peric1_uart_bt_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_UART_BT_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC1_USI06_USI_USER, "mout_peric1_usi06_usi_user",
+ mout_peric1_usi06_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI06_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC1_USI07_USI_USER, "mout_peric1_usi07_usi_user",
+ mout_peric1_usi07_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI07_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC1_USI08_USI_USER, "mout_peric1_usi08_usi_user",
+ mout_peric1_usi08_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI08_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC1_USI09_USI_USER, "mout_peric1_usi09_usi_user",
+ mout_peric1_usi09_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI09_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC1_USI10_USI_USER, "mout_peric1_usi10_usi_user",
+ mout_peric1_usi10_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI10_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC1_USI11_USI_USER, "mout_peric1_usi11_usi_user",
+ mout_peric1_usi11_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI11_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC1_USI12_USI_USER, "mout_peric1_usi12_usi_user",
+ mout_peric1_usi12_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI12_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC1_USI18_USI_USER, "mout_peric1_usi18_usi_user",
+ mout_peric1_usi18_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI18_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC1_USI16_USI_USER, "mout_peric1_usi16_usi_user",
+ mout_peric1_usi16_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI16_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC1_USI17_USI_USER, "mout_peric1_usi17_usi_user",
+ mout_peric1_usi17_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI17_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC1_USI_I2C_USER, "mout_peric1_usi_i2c_user",
+ mout_peric1_usi_i2c_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI_I2C_USER,
+ 4, 1),
+};
+
+static const struct samsung_div_clock peric1_div_clks[] __initconst = {
+ DIV(CLK_DOUT_PERIC1_UART_BT, "dout_peric1_uart_bt",
+ "mout_peric1_uart_bt_user",
+ CLK_CON_DIV_DIV_CLK_PERIC1_UART_BT,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI06_USI, "dout_peric1_usi06_usi",
+ "mout_peric1_usi06_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI06_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI07_USI, "dout_peric1_usi07_usi",
+ "mout_peric1_usi07_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI07_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI08_USI, "dout_peric1_usi08_usi",
+ "mout_peric1_usi08_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI08_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI18_USI, "dout_peric1_usi18_usi",
+ "mout_peric1_usi18_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI18_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI12_USI, "dout_peric1_usi12_usi",
+ "mout_peric1_usi12_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI12_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI09_USI, "dout_peric1_usi09_usi",
+ "mout_peric1_usi09_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI09_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI10_USI, "dout_peric1_usi10_usi",
+ "mout_peric1_usi10_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI10_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI11_USI, "dout_peric1_usi11_usi",
+ "mout_peric1_usi11_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI11_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI16_USI, "dout_peric1_usi16_usi",
+ "mout_peric1_usi16_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI16_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI17_USI, "dout_peric1_usi17_usi",
+ "mout_peric1_usi17_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI17_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI_I2C, "dout_peric1_usi_i2c",
+ "mout_peric1_usi_i2c_user",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI_I2C,
+ 0, 4),
+};
+
+static const struct samsung_gate_clock peric1_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_PERIC1_CMU_PCLK, "gout_peric1_cmu_pclk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_PERIC1_CMU_PERIC1_IPCLKPORT_PCLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERIC1_UART_BT_CLK, "gout_peric1_uart_bt_clk",
+ "dout_peric1_uart_bt",
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_UART_BT_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI12_USI_CLK, "gout_peric1_usi12_usi_clk",
+ "dout_peric1_usi12_usi",
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI12_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI18_USI_CLK, "gout_peric1_usi18_usi_clk",
+ "dout_peric1_usi18_usi",
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI18_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_D_TZPC_PCLK, "gout_peric1_d_tzpc_pclk",
+ "dout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_D_TZPC_PERIC1_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_GPIO_PCLK, "gout_peric1_gpio_pclk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_GPIO_PERIC1_IPCLKPORT_PCLK,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_PERIC1_LHM_AXI_P_CSIS_CLK, "gout_peric1_lhm_axi_p_csis_clk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_LHM_AXI_P_CSISPERIC1_IPCLKPORT_I_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_LHM_AXI_P_CLK, "gout_peric1_lhm_axi_p_clk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_LHM_AXI_P_PERIC1_IPCLKPORT_I_CLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERIC1_TOP0_IPCLK_10, "gout_peric1_top0_ipclk_10",
+ "dout_peric1_usi06_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_10,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP0_IPCLK_11, "gout_peric1_top0_ipclk_11",
+ "dout_peric1_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_11,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP0_IPCLK_12, "gout_peric1_top0_ipclk_12",
+ "dout_peric1_usi07_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_12,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP0_IPCLK_13, "gout_peric1_top0_ipclk_13",
+ "dout_peric1_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_13,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP0_IPCLK_14, "gout_peric1_top0_ipclk_14",
+ "dout_peric1_usi08_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_14,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP0_IPCLK_15, "gout_peric1_top0_ipclk_15",
+ "dout_peric1_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_15,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP0_IPCLK_4, "gout_peric1_top0_ipclk_4",
+ "dout_peric1_uart_bt",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_4,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP0_PCLK_10, "gout_peric1_top0_pclk_10",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_10,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP0_PCLK_11, "gout_peric1_top0_pclk_11",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_11,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP0_PCLK_12, "gout_peric1_top0_pclk_12",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_12,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP0_PCLK_13, "gout_peric1_top0_pclk_13",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_13,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP0_PCLK_14, "gout_peric1_top0_pclk_14",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_14,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP0_PCLK_15, "gout_peric1_top0_pclk_15",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_15,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP0_PCLK_4, "gout_peric1_top0_pclk_4",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_4,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_IPCLK_0, "gout_peric1_top1_ipclk_0",
+ "dout_peric1_usi09_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_0,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_IPCLK_1, "gout_peric1_top1_ipclk_1",
+ "dout_peric1_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_1,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_IPCLK_10, "gout_peric1_top1_ipclk_10",
+ "dout_peric1_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_10,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_IPCLK_12, "gout_peric1_top1_ipclk_12",
+ "dout_peric1_usi12_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_12,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_IPCLK_13, "gout_peric1_top1_ipclk_13",
+ "dout_peric1_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_13,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_IPCLK_14, "gout_peric1_top1_ipclk_14",
+ "dout_peric1_usi18_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_14,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_IPCLK_15, "gout_peric1_top1_ipclk_15",
+ "dout_peric1_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_15,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_IPCLK_2, "gout_peric1_top1_ipclk_2",
+ "dout_peric1_usi10_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_2,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_IPCLK_3, "gout_peric1_top1_ipclk_3",
+ "dout_peric1_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_3,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_IPCLK_4, "gout_peric1_top1_ipclk_4",
+ "dout_peric1_usi11_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_4,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_IPCLK_5, "gout_peric1_top1_ipclk_5",
+ "dout_peric1_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_5,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_IPCLK_6, "gout_peric1_top1_ipclk_6",
+ "dout_peric1_usi16_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_6,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_IPCLK_7, "gout_peric1_top1_ipclk_7",
+ "dout_peric1_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_7,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_IPCLK_9, "gout_peric1_top1_ipclk_9",
+ "dout_peric1_usi17_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_9,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_PCLK_0, "gout_peric1_top1_pclk_0",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_0,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_PCLK_1, "gout_peric1_top1_pclk_1",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_1,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_PCLK_10, "gout_peric1_top1_pclk_10",
+ "dout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_10,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_PCLK_12, "gout_peric1_top1_pclk_12",
+ "dout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_12,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_PCLK_13, "gout_peric1_top1_pclk_13",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_13,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_PCLK_14, "gout_peric1_top1_pclk_14",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_14,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_PCLK_15, "gout_peric1_top1_pclk_15",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_15,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_PCLK_2, "gout_peric1_top1_pclk_2",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_2,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_PCLK_3, "gout_peric1_top1_pclk_3",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_3,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_PCLK_4, "gout_peric1_top1_pclk_4",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_4,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_PCLK_5, "gout_peric1_top1_pclk_5",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_5,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_PCLK_6, "gout_peric1_top1_pclk_6",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_6,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_PCLK_7, "gout_peric1_top1_pclk_7",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_7,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_PCLK_9, "gout_peric1_top1_pclk_9",
+ "dout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_9,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_BUSP_CLK, "gout_peric1_busp_clk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_BUSP_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_OSCCLK_CLK, "gout_peric1_oscclk_clk",
+ "oscclk",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_OSCCLK_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI06_USI_CLK, "gout_peric1_usi06_usi_clk",
+ "dout_peric1_usi06_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI06_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI07_USI_CLK, "gout_peric1_usi07_usi_clk",
+ "dout_peric1_usi07_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI07_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI08_USI_CLK, "gout_peric1_usi08_usi_clk",
+ "dout_peric1_usi08_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI08_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI09_USI_CLK, "gout_peric1_usi09_usi_clk",
+ "dout_peric1_usi09_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI09_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI10_USI_CLK, "gout_peric1_usi10_usi_clk",
+ "dout_peric1_usi10_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI10_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI11_USI_CLK, "gout_peric1_usi11_usi_clk",
+ "dout_peric1_usi11_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI11_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI16_USI_CLK, "gout_peric1_usi16_usi_clk",
+ "dout_peric1_usi16_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI16_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI17_USI_CLK, "gout_peric1_usi17_usi_clk",
+ "dout_peric1_usi17_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI17_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI_I2C_CLK, "gout_peric1_usi_i2c_clk",
+ "dout_peric1_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI_I2C_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_SYSREG_PCLK, "gout_peric1_sysreg_pclk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SYSREG_PERIC1_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI16_I3C_PCLK, "gout_peric1_usi16_i3c_pclk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI16_I3C_IPCLKPORT_I_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI16_I3C_SCLK, "gout_peric1_usi16_i3c_sclk",
+ "dout_peric1_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI16_I3C_IPCLKPORT_I_SCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI17_I3C_PCLK, "gout_peric1_usi17_i3c_pclk",
+ "dout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI17_I3C_IPCLKPORT_I_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI17_I3C_SCLK, "gout_peric1_usi17_i3c_sclk",
+ "dout_peric1_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI17_I3C_IPCLKPORT_I_SCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_XIU_P_ACLK, "gout_peric1_xiu_p_aclk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_XIU_P_PERIC1_IPCLKPORT_ACLK,
+ 21, CLK_IGNORE_UNUSED, 0),
+};
+
+static const struct samsung_cmu_info peric1_cmu_info __initconst = {
+ .mux_clks = peric1_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(peric1_mux_clks),
+ .div_clks = peric1_div_clks,
+ .nr_div_clks = ARRAY_SIZE(peric1_div_clks),
+ .gate_clks = peric1_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(peric1_gate_clks),
+ .nr_clk_ids = CLKS_NR_PERIC1,
+ .clk_regs = peric1_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(peric1_clk_regs),
+ .clk_name = "bus",
+};
+
/* ---- CMU_PERIS ----------------------------------------------------------- */
/* Register Offset definitions for CMU_PERIS (0x10020000) */
@@ -1500,6 +2690,12 @@ static const struct of_device_id exynos990_cmu_of_match[] = {
{
.compatible = "samsung,exynos990-cmu-hsi0",
.data = &hsi0_cmu_info,
+ }, {
+ .compatible = "samsung,exynos990-cmu-peric0",
+ .data = &peric0_cmu_info,
+ }, {
+ .compatible = "samsung,exynos990-cmu-peric1",
+ .data = &peric1_cmu_info,
},
{ },
};
diff --git a/drivers/clk/samsung/clk-exynosautov920.c b/drivers/clk/samsung/clk-exynosautov920.c
index da4afe8ac2ab..b90b73c3518f 100644
--- a/drivers/clk/samsung/clk-exynosautov920.c
+++ b/drivers/clk/samsung/clk-exynosautov920.c
@@ -26,6 +26,9 @@
#define CLKS_NR_MISC (CLK_DOUT_MISC_OSC_DIV2 + 1)
#define CLKS_NR_HSI0 (CLK_DOUT_HSI0_PCIE_APB + 1)
#define CLKS_NR_HSI1 (CLK_MOUT_HSI1_USBDRD + 1)
+#define CLKS_NR_HSI2 (CLK_DOUT_HSI2_ETHERNET_PTP + 1)
+#define CLKS_NR_M2M (CLK_DOUT_M2M_NOCP + 1)
+#define CLKS_NR_MFC (CLK_DOUT_MFC_NOCP + 1)
/* ---- CMU_TOP ------------------------------------------------------------ */
@@ -1752,6 +1755,156 @@ static const struct samsung_cmu_info hsi1_cmu_info __initconst = {
.clk_name = "noc",
};
+/* ---- CMU_HSI2 --------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_HSI2 (0x16b00000) */
+#define PLL_LOCKTIME_PLL_ETH 0x0
+#define PLL_CON3_PLL_ETH 0x10c
+#define PLL_CON0_MUX_CLKCMU_HSI2_ETHERNET_USER 0x600
+#define PLL_CON0_MUX_CLKCMU_HSI2_NOC_UFS_USER 0x610
+#define PLL_CON0_MUX_CLKCMU_HSI2_UFS_EMBD_USER 0x630
+#define CLK_CON_MUX_MUX_CLK_HSI2_ETHERNET 0x1000
+#define CLK_CON_DIV_DIV_CLK_HSI2_ETHERNET 0x1800
+#define CLK_CON_DIV_DIV_CLK_HSI2_ETHERNET_PTP 0x1804
+
+static const unsigned long hsi2_clk_regs[] __initconst = {
+ PLL_LOCKTIME_PLL_ETH,
+ PLL_CON3_PLL_ETH,
+ PLL_CON0_MUX_CLKCMU_HSI2_ETHERNET_USER,
+ PLL_CON0_MUX_CLKCMU_HSI2_NOC_UFS_USER,
+ PLL_CON0_MUX_CLKCMU_HSI2_UFS_EMBD_USER,
+ CLK_CON_MUX_MUX_CLK_HSI2_ETHERNET,
+ CLK_CON_DIV_DIV_CLK_HSI2_ETHERNET,
+ CLK_CON_DIV_DIV_CLK_HSI2_ETHERNET_PTP,
+};
+
+static const struct samsung_pll_clock hsi2_pll_clks[] __initconst = {
+ /* CMU_HSI2_PLL */
+ PLL(pll_531x, FOUT_PLL_ETH, "fout_pll_eth", "oscclk",
+ PLL_LOCKTIME_PLL_ETH, PLL_CON3_PLL_ETH, NULL),
+};
+
+/* List of parent clocks for Muxes in CMU_HSI2 */
+PNAME(mout_clkcmu_hsi2_noc_ufs_user_p) = { "oscclk", "dout_clkcmu_hsi2_noc_ufs" };
+PNAME(mout_clkcmu_hsi2_ufs_embd_user_p) = { "oscclk", "dout_clkcmu_hsi2_ufs_embd" };
+PNAME(mout_hsi2_ethernet_p) = { "fout_pll_eth", "mout_clkcmu_hsi2_ethernet_user" };
+PNAME(mout_clkcmu_hsi2_ethernet_user_p) = { "oscclk", "dout_clkcmu_hsi2_ethernet" };
+
+static const struct samsung_mux_clock hsi2_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_HSI2_NOC_UFS_USER, "mout_clkcmu_hsi2_noc_ufs_user",
+ mout_clkcmu_hsi2_noc_ufs_user_p, PLL_CON0_MUX_CLKCMU_HSI2_NOC_UFS_USER, 4, 1),
+ MUX(CLK_MOUT_HSI2_UFS_EMBD_USER, "mout_clkcmu_hsi2_ufs_embd_user",
+ mout_clkcmu_hsi2_ufs_embd_user_p, PLL_CON0_MUX_CLKCMU_HSI2_UFS_EMBD_USER, 4, 1),
+ MUX(CLK_MOUT_HSI2_ETHERNET, "mout_hsi2_ethernet",
+ mout_hsi2_ethernet_p, CLK_CON_MUX_MUX_CLK_HSI2_ETHERNET, 0, 1),
+ MUX(CLK_MOUT_HSI2_ETHERNET_USER, "mout_clkcmu_hsi2_ethernet_user",
+ mout_clkcmu_hsi2_ethernet_user_p, PLL_CON0_MUX_CLKCMU_HSI2_ETHERNET_USER, 4, 1),
+};
+
+static const struct samsung_div_clock hsi2_div_clks[] __initconst = {
+ DIV(CLK_DOUT_HSI2_ETHERNET, "dout_hsi2_ethernet",
+ "mout_hsi2_ethernet", CLK_CON_DIV_DIV_CLK_HSI2_ETHERNET,
+ 0, 4),
+ DIV(CLK_DOUT_HSI2_ETHERNET_PTP, "dout_hsi2_ethernet_ptp",
+ "mout_hsi2_ethernet", CLK_CON_DIV_DIV_CLK_HSI2_ETHERNET_PTP,
+ 0, 4),
+};
+
+static const struct samsung_cmu_info hsi2_cmu_info __initconst = {
+ .pll_clks = hsi2_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(hsi2_pll_clks),
+ .mux_clks = hsi2_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(hsi2_mux_clks),
+ .div_clks = hsi2_div_clks,
+ .nr_div_clks = ARRAY_SIZE(hsi2_div_clks),
+ .nr_clk_ids = CLKS_NR_HSI2,
+ .clk_regs = hsi2_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(hsi2_clk_regs),
+ .clk_name = "noc",
+};
+
+/* ---- CMU_M2M --------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_M2M (0x1a800000) */
+#define PLL_CON0_MUX_CLKCMU_M2M_JPEG_USER 0x600
+#define PLL_CON0_MUX_CLKCMU_M2M_NOC_USER 0x610
+#define CLK_CON_DIV_DIV_CLK_M2M_NOCP 0x1800
+
+static const unsigned long m2m_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_M2M_JPEG_USER,
+ PLL_CON0_MUX_CLKCMU_M2M_NOC_USER,
+ CLK_CON_DIV_DIV_CLK_M2M_NOCP,
+};
+
+/* List of parent clocks for Muxes in CMU_M2M */
+PNAME(mout_clkcmu_m2m_noc_user_p) = { "oscclk", "dout_clkcmu_m2m_noc" };
+PNAME(mout_clkcmu_m2m_jpeg_user_p) = { "oscclk", "dout_clkcmu_m2m_jpeg" };
+
+static const struct samsung_mux_clock m2m_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_M2M_JPEG_USER, "mout_clkcmu_m2m_jpeg_user",
+ mout_clkcmu_m2m_jpeg_user_p, PLL_CON0_MUX_CLKCMU_M2M_JPEG_USER, 4, 1),
+ MUX(CLK_MOUT_M2M_NOC_USER, "mout_clkcmu_m2m_noc_user",
+ mout_clkcmu_m2m_noc_user_p, PLL_CON0_MUX_CLKCMU_M2M_NOC_USER, 4, 1),
+};
+
+static const struct samsung_div_clock m2m_div_clks[] __initconst = {
+ DIV(CLK_DOUT_M2M_NOCP, "dout_m2m_nocp",
+ "mout_clkcmu_m2m_noc_user", CLK_CON_DIV_DIV_CLK_M2M_NOCP,
+ 0, 3),
+};
+
+static const struct samsung_cmu_info m2m_cmu_info __initconst = {
+ .mux_clks = m2m_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(m2m_mux_clks),
+ .div_clks = m2m_div_clks,
+ .nr_div_clks = ARRAY_SIZE(m2m_div_clks),
+ .nr_clk_ids = CLKS_NR_M2M,
+ .clk_regs = m2m_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(m2m_clk_regs),
+ .clk_name = "noc",
+};
+
+/* ---- CMU_MFC --------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_MFC (0x19c00000) */
+#define PLL_CON0_MUX_CLKCMU_MFC_MFC_USER 0x600
+#define PLL_CON0_MUX_CLKCMU_MFC_WFD_USER 0x610
+#define CLK_CON_DIV_DIV_CLK_MFC_NOCP 0x1800
+
+static const unsigned long mfc_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_MFC_MFC_USER,
+ PLL_CON0_MUX_CLKCMU_MFC_WFD_USER,
+ CLK_CON_DIV_DIV_CLK_MFC_NOCP,
+};
+
+/* List of parent clocks for Muxes in CMU_MFC */
+PNAME(mout_clkcmu_mfc_mfc_user_p) = { "oscclk", "dout_clkcmu_mfc_mfc" };
+PNAME(mout_clkcmu_mfc_wfd_user_p) = { "oscclk", "dout_clkcmu_mfc_wfd" };
+
+static const struct samsung_mux_clock mfc_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_MFC_MFC_USER, "mout_clkcmu_mfc_mfc_user",
+ mout_clkcmu_mfc_mfc_user_p, PLL_CON0_MUX_CLKCMU_MFC_MFC_USER, 4, 1),
+ MUX(CLK_MOUT_MFC_WFD_USER, "mout_clkcmu_mfc_wfd_user",
+ mout_clkcmu_mfc_wfd_user_p, PLL_CON0_MUX_CLKCMU_MFC_WFD_USER, 4, 1),
+};
+
+static const struct samsung_div_clock mfc_div_clks[] __initconst = {
+ DIV(CLK_DOUT_MFC_NOCP, "dout_mfc_nocp",
+ "mout_clkcmu_mfc_mfc_user", CLK_CON_DIV_DIV_CLK_MFC_NOCP,
+ 0, 3),
+};
+
+static const struct samsung_cmu_info mfc_cmu_info __initconst = {
+ .mux_clks = mfc_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(mfc_mux_clks),
+ .div_clks = mfc_div_clks,
+ .nr_div_clks = ARRAY_SIZE(mfc_div_clks),
+ .nr_clk_ids = CLKS_NR_MFC,
+ .clk_regs = mfc_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(mfc_clk_regs),
+ .clk_name = "noc",
+};
+
static int __init exynosautov920_cmu_probe(struct platform_device *pdev)
{
const struct samsung_cmu_info *info;
@@ -1779,6 +1932,15 @@ static const struct of_device_id exynosautov920_cmu_of_match[] = {
}, {
.compatible = "samsung,exynosautov920-cmu-hsi1",
.data = &hsi1_cmu_info,
+ }, {
+ .compatible = "samsung,exynosautov920-cmu-hsi2",
+ .data = &hsi2_cmu_info,
+ }, {
+ .compatible = "samsung,exynosautov920-cmu-m2m",
+ .data = &m2m_cmu_info,
+ }, {
+ .compatible = "samsung,exynosautov920-cmu-mfc",
+ .data = &mfc_cmu_info,
},
{ }
};
diff --git a/drivers/clk/samsung/clk-fsd.c b/drivers/clk/samsung/clk-fsd.c
index 594931334574..4124d65e3d18 100644
--- a/drivers/clk/samsung/clk-fsd.c
+++ b/drivers/clk/samsung/clk-fsd.c
@@ -89,7 +89,7 @@
#define CLKS_NR_FSYS1 (PCIE_LINK1_IPCLKPORT_SLV_ACLK + 1)
#define CLKS_NR_IMEM (IMEM_TMU_GT_IPCLKPORT_I_CLK_TS + 1)
#define CLKS_NR_MFC (MFC_MFC_IPCLKPORT_ACLK + 1)
-#define CLKS_NR_CAM_CSI (CAM_CSI2_3_IPCLKPORT_I_ACLK + 1)
+#define CLKS_NR_CAM_CSI (CAM_CSI2_3_IPCLKPORT_I_PCLK + 1)
static const unsigned long cmu_clk_regs[] __initconst = {
PLL_LOCKTIME_PLL_SHARED0,
@@ -1646,7 +1646,7 @@ static const struct samsung_pll_rate_table pll_cam_csi_rate_table[] __initconst
};
static const struct samsung_pll_clock cam_csi_pll_clks[] __initconst = {
- PLL(pll_142xx, 0, "fout_pll_cam_csi", "fin_pll",
+ PLL(pll_142xx, CAM_CSI_PLL, "fout_pll_cam_csi", "fin_pll",
PLL_LOCKTIME_PLL_CAM_CSI, PLL_CON0_PLL_CAM_CSI, pll_cam_csi_rate_table),
};
@@ -1682,51 +1682,51 @@ static const struct samsung_gate_clock cam_csi_gate_clks[] __initconst = {
GAT_CAM_CSI_BUS_D_CAM_CSI_IPCLKPORT_CLK__SYSTEM__NOC, 21, CLK_IGNORE_UNUSED, 0),
GATE(CAM_CSI0_0_IPCLKPORT_I_ACLK, "cam_csi0_0_ipclkport_i_aclk", "dout_cam_csi0_aclk",
GAT_CAM_CSI0_0_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0),
- GATE(0, "cam_csi0_0_ipclkport_i_pclk", "dout_cam_csi_busp",
+ GATE(CAM_CSI0_0_IPCLKPORT_I_PCLK, "cam_csi0_0_ipclkport_i_pclk", "dout_cam_csi_busp",
GAT_CAM_CSI0_0_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0),
GATE(CAM_CSI0_1_IPCLKPORT_I_ACLK, "cam_csi0_1_ipclkport_i_aclk", "dout_cam_csi0_aclk",
GAT_CAM_CSI0_1_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0),
- GATE(0, "cam_csi0_1_ipclkport_i_pclk", "dout_cam_csi_busp",
+ GATE(CAM_CSI0_1_IPCLKPORT_I_PCLK, "cam_csi0_1_ipclkport_i_pclk", "dout_cam_csi_busp",
GAT_CAM_CSI0_1_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0),
GATE(CAM_CSI0_2_IPCLKPORT_I_ACLK, "cam_csi0_2_ipclkport_i_aclk", "dout_cam_csi0_aclk",
GAT_CAM_CSI0_2_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0),
- GATE(0, "cam_csi0_2_ipclkport_i_pclk", "dout_cam_csi_busp",
+ GATE(CAM_CSI0_2_IPCLKPORT_I_PCLK, "cam_csi0_2_ipclkport_i_pclk", "dout_cam_csi_busp",
GAT_CAM_CSI0_2_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0),
GATE(CAM_CSI0_3_IPCLKPORT_I_ACLK, "cam_csi0_3_ipclkport_i_aclk", "dout_cam_csi0_aclk",
GAT_CAM_CSI0_3_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0),
- GATE(0, "cam_csi0_3_ipclkport_i_pclk", "dout_cam_csi_busp",
+ GATE(CAM_CSI0_3_IPCLKPORT_I_PCLK, "cam_csi0_3_ipclkport_i_pclk", "dout_cam_csi_busp",
GAT_CAM_CSI0_3_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0),
GATE(CAM_CSI1_0_IPCLKPORT_I_ACLK, "cam_csi1_0_ipclkport_i_aclk", "dout_cam_csi1_aclk",
GAT_CAM_CSI1_0_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0),
- GATE(0, "cam_csi1_0_ipclkport_i_pclk", "dout_cam_csi_busp",
+ GATE(CAM_CSI1_0_IPCLKPORT_I_PCLK, "cam_csi1_0_ipclkport_i_pclk", "dout_cam_csi_busp",
GAT_CAM_CSI1_0_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0),
GATE(CAM_CSI1_1_IPCLKPORT_I_ACLK, "cam_csi1_1_ipclkport_i_aclk", "dout_cam_csi1_aclk",
GAT_CAM_CSI1_1_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0),
- GATE(0, "cam_csi1_1_ipclkport_i_pclk", "dout_cam_csi_busp",
+ GATE(CAM_CSI1_1_IPCLKPORT_I_PCLK, "cam_csi1_1_ipclkport_i_pclk", "dout_cam_csi_busp",
GAT_CAM_CSI1_1_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0),
GATE(CAM_CSI1_2_IPCLKPORT_I_ACLK, "cam_csi1_2_ipclkport_i_aclk", "dout_cam_csi1_aclk",
GAT_CAM_CSI1_2_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0),
- GATE(0, "cam_csi1_2_ipclkport_i_pclk", "dout_cam_csi_busp",
+ GATE(CAM_CSI1_2_IPCLKPORT_I_PCLK, "cam_csi1_2_ipclkport_i_pclk", "dout_cam_csi_busp",
GAT_CAM_CSI1_2_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0),
GATE(CAM_CSI1_3_IPCLKPORT_I_ACLK, "cam_csi1_3_ipclkport_i_aclk", "dout_cam_csi1_aclk",
GAT_CAM_CSI1_3_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0),
- GATE(0, "cam_csi1_3_ipclkport_i_pclk", "dout_cam_csi_busp",
+ GATE(CAM_CSI1_3_IPCLKPORT_I_PCLK, "cam_csi1_3_ipclkport_i_pclk", "dout_cam_csi_busp",
GAT_CAM_CSI1_3_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0),
GATE(CAM_CSI2_0_IPCLKPORT_I_ACLK, "cam_csi2_0_ipclkport_i_aclk", "dout_cam_csi2_aclk",
GAT_CAM_CSI2_0_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0),
- GATE(0, "cam_csi2_0_ipclkport_i_pclk", "dout_cam_csi_busp",
+ GATE(CAM_CSI2_0_IPCLKPORT_I_PCLK, "cam_csi2_0_ipclkport_i_pclk", "dout_cam_csi_busp",
GAT_CAM_CSI2_0_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0),
GATE(CAM_CSI2_1_IPCLKPORT_I_ACLK, "cam_csi2_1_ipclkport_i_aclk", "dout_cam_csi2_aclk",
GAT_CAM_CSI2_1_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0),
- GATE(0, "cam_csi2_1_ipclkport_i_pclk", "dout_cam_csi_busp",
+ GATE(CAM_CSI2_1_IPCLKPORT_I_PCLK, "cam_csi2_1_ipclkport_i_pclk", "dout_cam_csi_busp",
GAT_CAM_CSI2_1_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0),
GATE(CAM_CSI2_2_IPCLKPORT_I_ACLK, "cam_csi2_2_ipclkport_i_aclk", "dout_cam_csi2_aclk",
GAT_CAM_CSI2_2_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0),
- GATE(0, "cam_csi2_2_ipclkport_i_pclk", "dout_cam_csi_busp",
+ GATE(CAM_CSI2_2_IPCLKPORT_I_PCLK, "cam_csi2_2_ipclkport_i_pclk", "dout_cam_csi_busp",
GAT_CAM_CSI2_2_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0),
GATE(CAM_CSI2_3_IPCLKPORT_I_ACLK, "cam_csi2_3_ipclkport_i_aclk", "dout_cam_csi2_aclk",
GAT_CAM_CSI2_3_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0),
- GATE(0, "cam_csi2_3_ipclkport_i_pclk", "dout_cam_csi_busp",
+ GATE(CAM_CSI2_3_IPCLKPORT_I_PCLK, "cam_csi2_3_ipclkport_i_pclk", "dout_cam_csi_busp",
GAT_CAM_CSI2_3_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0),
GATE(0, "cam_ns_brdg_cam_csi_ipclkport_clk__psoc_cam_csi__clk_cam_csi_d",
"dout_cam_csi_busd",
diff --git a/drivers/clk/samsung/clk-gs101.c b/drivers/clk/samsung/clk-gs101.c
index f9c3d68d449c..70b26db9b95a 100644
--- a/drivers/clk/samsung/clk-gs101.c
+++ b/drivers/clk/samsung/clk-gs101.c
@@ -1154,7 +1154,7 @@ static const struct samsung_div_clock cmu_top_div_clks[] __initconst = {
CLK_CON_DIV_CLKCMU_G2D_MSCL, 0, 4),
DIV(CLK_DOUT_CMU_G3AA_G3AA, "dout_cmu_g3aa_g3aa", "gout_cmu_g3aa_g3aa",
CLK_CON_DIV_CLKCMU_G3AA_G3AA, 0, 4),
- DIV(CLK_DOUT_CMU_G3D_SWITCH, "dout_cmu_g3d_busd", "gout_cmu_g3d_busd",
+ DIV(CLK_DOUT_CMU_G3D_BUSD, "dout_cmu_g3d_busd", "gout_cmu_g3d_busd",
CLK_CON_DIV_CLKCMU_G3D_BUSD, 0, 4),
DIV(CLK_DOUT_CMU_G3D_GLB, "dout_cmu_g3d_glb", "gout_cmu_g3d_glb",
CLK_CON_DIV_CLKCMU_G3D_GLB, 0, 4),
@@ -2129,7 +2129,7 @@ PNAME(mout_hsi0_usbdpdbg_user_p) = { "oscclk",
"dout_cmu_hsi0_usbdpdbg" };
PNAME(mout_hsi0_bus_p) = { "mout_hsi0_bus_user",
"mout_hsi0_alt_user" };
-PNAME(mout_hsi0_usb20_ref_p) = { "fout_usb_pll",
+PNAME(mout_hsi0_usb20_ref_p) = { "mout_pll_usb",
"mout_hsi0_tcxo_user" };
PNAME(mout_hsi0_usb31drd_p) = { "fout_usb_pll",
"mout_hsi0_usb31drd_user",
diff --git a/drivers/clk/samsung/clk-pll.c b/drivers/clk/samsung/clk-pll.c
index fe8abe442c51..0a8fc9649ae2 100644
--- a/drivers/clk/samsung/clk-pll.c
+++ b/drivers/clk/samsung/clk-pll.c
@@ -11,14 +11,12 @@
#include <linux/iopoll.h>
#include <linux/delay.h>
#include <linux/slab.h>
-#include <linux/timekeeping.h>
#include <linux/clk-provider.h>
#include <linux/io.h>
#include "clk.h"
#include "clk-pll.h"
-#define PLL_TIMEOUT_US 20000U
-#define PLL_TIMEOUT_LOOPS 1000000U
+#define PLL_TIMEOUT_LOOPS 20000U
struct samsung_clk_pll {
struct clk_hw hw;
@@ -49,37 +47,33 @@ static const struct samsung_pll_rate_table *samsung_get_pll_settings(
return NULL;
}
-static long samsung_pll_round_rate(struct clk_hw *hw,
- unsigned long drate, unsigned long *prate)
+static int samsung_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct samsung_clk_pll *pll = to_clk_pll(hw);
const struct samsung_pll_rate_table *rate_table = pll->rate_table;
int i;
- /* Assumming rate_table is in descending order */
+ /* Assuming rate_table is in descending order */
for (i = 0; i < pll->rate_count; i++) {
- if (drate >= rate_table[i].rate)
- return rate_table[i].rate;
+ if (req->rate >= rate_table[i].rate) {
+ req->rate = rate_table[i].rate;
+
+ return 0;
+ }
}
/* return minimum supported value */
- return rate_table[i - 1].rate;
-}
+ req->rate = rate_table[i - 1].rate;
-static bool pll_early_timeout = true;
-
-static int __init samsung_pll_disable_early_timeout(void)
-{
- pll_early_timeout = false;
return 0;
}
-arch_initcall(samsung_pll_disable_early_timeout);
/* Wait until the PLL is locked */
static int samsung_pll_lock_wait(struct samsung_clk_pll *pll,
unsigned int reg_mask)
{
- int i, ret;
+ int ret;
u32 val;
/*
@@ -88,25 +82,15 @@ static int samsung_pll_lock_wait(struct samsung_clk_pll *pll,
* initialized, another when the timekeeping is suspended. udelay() also
* cannot be used when the clocksource is not running on arm64, since
* the current timer is used as cycle counter. So a simple busy loop
- * is used here in that special cases. The limit of iterations has been
- * derived from experimental measurements of various PLLs on multiple
- * Exynos SoC variants. Single register read time was usually in range
- * 0.4...1.5 us, never less than 0.4 us.
+ * is used here.
+ * The limit of iterations has been derived from experimental
+ * measurements of various PLLs on multiple Exynos SoC variants. Single
+ * register read time was usually in range 0.4...1.5 us, never less than
+ * 0.4 us.
*/
- if (pll_early_timeout || timekeeping_suspended) {
- i = PLL_TIMEOUT_LOOPS;
- while (i-- > 0) {
- if (readl_relaxed(pll->con_reg) & reg_mask)
- return 0;
-
- cpu_relax();
- }
- ret = -ETIMEDOUT;
- } else {
- ret = readl_relaxed_poll_timeout_atomic(pll->con_reg, val,
- val & reg_mask, 0, PLL_TIMEOUT_US);
- }
-
+ ret = readl_relaxed_poll_timeout_atomic(pll->con_reg, val,
+ val & reg_mask, 0,
+ PLL_TIMEOUT_LOOPS);
if (ret < 0)
pr_err("Could not lock PLL %s\n", clk_hw_get_name(&pll->hw));
@@ -273,7 +257,7 @@ static int samsung_pll35xx_set_rate(struct clk_hw *hw, unsigned long drate,
}
/* Set PLL lock time. */
- if (pll->type == pll_142xx)
+ if (pll->type == pll_142xx || pll->type == pll_1017x)
writel_relaxed(rate->pdiv * PLL142XX_LOCK_FACTOR,
pll->lock_reg);
else
@@ -298,7 +282,7 @@ static int samsung_pll35xx_set_rate(struct clk_hw *hw, unsigned long drate,
static const struct clk_ops samsung_pll35xx_clk_ops = {
.recalc_rate = samsung_pll35xx_recalc_rate,
- .round_rate = samsung_pll_round_rate,
+ .determine_rate = samsung_pll_determine_rate,
.set_rate = samsung_pll35xx_set_rate,
.enable = samsung_pll3xxx_enable,
.disable = samsung_pll3xxx_disable,
@@ -411,7 +395,7 @@ static int samsung_pll36xx_set_rate(struct clk_hw *hw, unsigned long drate,
static const struct clk_ops samsung_pll36xx_clk_ops = {
.recalc_rate = samsung_pll36xx_recalc_rate,
.set_rate = samsung_pll36xx_set_rate,
- .round_rate = samsung_pll_round_rate,
+ .determine_rate = samsung_pll_determine_rate,
.enable = samsung_pll3xxx_enable,
.disable = samsung_pll3xxx_disable,
};
@@ -514,7 +498,7 @@ static int samsung_pll0822x_set_rate(struct clk_hw *hw, unsigned long drate,
static const struct clk_ops samsung_pll0822x_clk_ops = {
.recalc_rate = samsung_pll0822x_recalc_rate,
- .round_rate = samsung_pll_round_rate,
+ .determine_rate = samsung_pll_determine_rate,
.set_rate = samsung_pll0822x_set_rate,
.enable = samsung_pll3xxx_enable,
.disable = samsung_pll3xxx_disable,
@@ -612,7 +596,7 @@ static int samsung_pll0831x_set_rate(struct clk_hw *hw, unsigned long drate,
static const struct clk_ops samsung_pll0831x_clk_ops = {
.recalc_rate = samsung_pll0831x_recalc_rate,
.set_rate = samsung_pll0831x_set_rate,
- .round_rate = samsung_pll_round_rate,
+ .determine_rate = samsung_pll_determine_rate,
.enable = samsung_pll3xxx_enable,
.disable = samsung_pll3xxx_disable,
};
@@ -735,7 +719,7 @@ static int samsung_pll45xx_set_rate(struct clk_hw *hw, unsigned long drate,
static const struct clk_ops samsung_pll45xx_clk_ops = {
.recalc_rate = samsung_pll45xx_recalc_rate,
- .round_rate = samsung_pll_round_rate,
+ .determine_rate = samsung_pll_determine_rate,
.set_rate = samsung_pll45xx_set_rate,
};
@@ -880,7 +864,7 @@ static int samsung_pll46xx_set_rate(struct clk_hw *hw, unsigned long drate,
static const struct clk_ops samsung_pll46xx_clk_ops = {
.recalc_rate = samsung_pll46xx_recalc_rate,
- .round_rate = samsung_pll_round_rate,
+ .determine_rate = samsung_pll_determine_rate,
.set_rate = samsung_pll46xx_set_rate,
};
@@ -1093,7 +1077,7 @@ static int samsung_pll2550xx_set_rate(struct clk_hw *hw, unsigned long drate,
static const struct clk_ops samsung_pll2550xx_clk_ops = {
.recalc_rate = samsung_pll2550xx_recalc_rate,
- .round_rate = samsung_pll_round_rate,
+ .determine_rate = samsung_pll_determine_rate,
.set_rate = samsung_pll2550xx_set_rate,
};
@@ -1185,7 +1169,7 @@ static int samsung_pll2650x_set_rate(struct clk_hw *hw, unsigned long drate,
static const struct clk_ops samsung_pll2650x_clk_ops = {
.recalc_rate = samsung_pll2650x_recalc_rate,
- .round_rate = samsung_pll_round_rate,
+ .determine_rate = samsung_pll_determine_rate,
.set_rate = samsung_pll2650x_set_rate,
};
@@ -1277,7 +1261,7 @@ static int samsung_pll2650xx_set_rate(struct clk_hw *hw, unsigned long drate,
static const struct clk_ops samsung_pll2650xx_clk_ops = {
.recalc_rate = samsung_pll2650xx_recalc_rate,
.set_rate = samsung_pll2650xx_set_rate,
- .round_rate = samsung_pll_round_rate,
+ .determine_rate = samsung_pll_determine_rate,
};
static const struct clk_ops samsung_pll2650xx_clk_min_ops = {
@@ -1325,6 +1309,125 @@ static const struct clk_ops samsung_pll531x_clk_ops = {
.recalc_rate = samsung_pll531x_recalc_rate,
};
+/*
+ * PLL1031x Clock Type
+ */
+#define PLL1031X_LOCK_FACTOR (500)
+
+#define PLL1031X_MDIV_MASK (0x3ff)
+#define PLL1031X_PDIV_MASK (0x3f)
+#define PLL1031X_SDIV_MASK (0x7)
+#define PLL1031X_MDIV_SHIFT (16)
+#define PLL1031X_PDIV_SHIFT (8)
+#define PLL1031X_SDIV_SHIFT (0)
+
+#define PLL1031X_KDIV_MASK (0xffff)
+#define PLL1031X_KDIV_SHIFT (0)
+#define PLL1031X_MFR_MASK (0x3f)
+#define PLL1031X_MRR_MASK (0x1f)
+#define PLL1031X_MFR_SHIFT (16)
+#define PLL1031X_MRR_SHIFT (24)
+
+static unsigned long samsung_pll1031x_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct samsung_clk_pll *pll = to_clk_pll(hw);
+ u32 mdiv, pdiv, sdiv, kdiv, pll_con0, pll_con3;
+ u64 fvco = parent_rate;
+
+ pll_con0 = readl_relaxed(pll->con_reg);
+ pll_con3 = readl_relaxed(pll->con_reg + 0xc);
+ mdiv = (pll_con0 >> PLL1031X_MDIV_SHIFT) & PLL1031X_MDIV_MASK;
+ pdiv = (pll_con0 >> PLL1031X_PDIV_SHIFT) & PLL1031X_PDIV_MASK;
+ sdiv = (pll_con0 >> PLL1031X_SDIV_SHIFT) & PLL1031X_SDIV_MASK;
+ kdiv = (pll_con3 & PLL1031X_KDIV_MASK);
+
+ fvco *= (mdiv << PLL1031X_MDIV_SHIFT) + kdiv;
+ do_div(fvco, (pdiv << sdiv));
+ fvco >>= PLL1031X_MDIV_SHIFT;
+
+ return (unsigned long)fvco;
+}
+
+static bool samsung_pll1031x_mpk_change(u32 pll_con0, u32 pll_con3,
+ const struct samsung_pll_rate_table *rate)
+{
+ u32 old_mdiv, old_pdiv, old_kdiv;
+
+ old_mdiv = (pll_con0 >> PLL1031X_MDIV_SHIFT) & PLL1031X_MDIV_MASK;
+ old_pdiv = (pll_con0 >> PLL1031X_PDIV_SHIFT) & PLL1031X_PDIV_MASK;
+ old_kdiv = (pll_con3 >> PLL1031X_KDIV_SHIFT) & PLL1031X_KDIV_MASK;
+
+ return (old_mdiv != rate->mdiv || old_pdiv != rate->pdiv ||
+ old_kdiv != rate->kdiv);
+}
+
+static int samsung_pll1031x_set_rate(struct clk_hw *hw, unsigned long drate,
+ unsigned long prate)
+{
+ struct samsung_clk_pll *pll = to_clk_pll(hw);
+ const struct samsung_pll_rate_table *rate;
+ u32 con0, con3;
+
+ /* Get required rate settings from table */
+ rate = samsung_get_pll_settings(pll, drate);
+ if (!rate) {
+ pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
+ drate, clk_hw_get_name(hw));
+ return -EINVAL;
+ }
+
+ con0 = readl_relaxed(pll->con_reg);
+ con3 = readl_relaxed(pll->con_reg + 0xc);
+
+ if (!(samsung_pll1031x_mpk_change(con0, con3, rate))) {
+ /* If only s change, change just s value only */
+ con0 &= ~(PLL1031X_SDIV_MASK << PLL1031X_SDIV_SHIFT);
+ con0 |= rate->sdiv << PLL1031X_SDIV_SHIFT;
+ writel_relaxed(con0, pll->con_reg);
+
+ return 0;
+ }
+
+ /* Set PLL lock time. */
+ writel_relaxed(rate->pdiv * PLL1031X_LOCK_FACTOR, pll->lock_reg);
+
+ /* Set PLL M, P, and S values. */
+ con0 &= ~((PLL1031X_MDIV_MASK << PLL1031X_MDIV_SHIFT) |
+ (PLL1031X_PDIV_MASK << PLL1031X_PDIV_SHIFT) |
+ (PLL1031X_SDIV_MASK << PLL1031X_SDIV_SHIFT));
+
+ con0 |= (rate->mdiv << PLL1031X_MDIV_SHIFT) |
+ (rate->pdiv << PLL1031X_PDIV_SHIFT) |
+ (rate->sdiv << PLL1031X_SDIV_SHIFT);
+
+ /* Set PLL K, MFR and MRR values. */
+ con3 = readl_relaxed(pll->con_reg + 0xc);
+ con3 &= ~((PLL1031X_KDIV_MASK << PLL1031X_KDIV_SHIFT) |
+ (PLL1031X_MFR_MASK << PLL1031X_MFR_SHIFT) |
+ (PLL1031X_MRR_MASK << PLL1031X_MRR_SHIFT));
+ con3 |= (rate->kdiv << PLL1031X_KDIV_SHIFT) |
+ (rate->mfr << PLL1031X_MFR_SHIFT) |
+ (rate->mrr << PLL1031X_MRR_SHIFT);
+
+ /* Write configuration to PLL */
+ writel_relaxed(con0, pll->con_reg);
+ writel_relaxed(con3, pll->con_reg + 0xc);
+
+ /* Wait for PLL lock if the PLL is enabled */
+ return samsung_pll_lock_wait(pll, BIT(pll->lock_offs));
+}
+
+static const struct clk_ops samsung_pll1031x_clk_ops = {
+ .recalc_rate = samsung_pll1031x_recalc_rate,
+ .determine_rate = samsung_pll_determine_rate,
+ .set_rate = samsung_pll1031x_set_rate,
+};
+
+static const struct clk_ops samsung_pll1031x_clk_min_ops = {
+ .recalc_rate = samsung_pll1031x_recalc_rate,
+};
+
static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx,
const struct samsung_pll_clock *pll_clk)
{
@@ -1373,6 +1476,7 @@ static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx,
case pll_1451x:
case pll_1452x:
case pll_142xx:
+ case pll_1017x:
pll->enable_offs = PLL35XX_ENABLE_SHIFT;
pll->lock_offs = PLL35XX_LOCK_STAT_SHIFT;
if (!pll->rate_table)
@@ -1468,6 +1572,12 @@ static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx,
case pll_4311:
init.ops = &samsung_pll531x_clk_ops;
break;
+ case pll_1031x:
+ if (!pll->rate_table)
+ init.ops = &samsung_pll1031x_clk_min_ops;
+ else
+ init.ops = &samsung_pll1031x_clk_ops;
+ break;
default:
pr_warn("%s: Unknown pll type for pll clk %s\n",
__func__, pll_clk->name);
diff --git a/drivers/clk/samsung/clk-pll.h b/drivers/clk/samsung/clk-pll.h
index e9a5f8e0e0a3..6c8bb7f26da5 100644
--- a/drivers/clk/samsung/clk-pll.h
+++ b/drivers/clk/samsung/clk-pll.h
@@ -49,6 +49,8 @@ enum samsung_pll_type {
pll_0718x,
pll_0732x,
pll_4311,
+ pll_1017x,
+ pll_1031x,
};
#define PLL_RATE(_fin, _m, _p, _s, _k, _ks) \
diff --git a/drivers/clk/samsung/clk-s5pv210-audss.c b/drivers/clk/samsung/clk-s5pv210-audss.c
index b1fd8fac3a4c..c9fcb23de183 100644
--- a/drivers/clk/samsung/clk-s5pv210-audss.c
+++ b/drivers/clk/samsung/clk-s5pv210-audss.c
@@ -36,7 +36,7 @@ static unsigned long reg_save[][2] = {
{ASS_CLK_GATE, 0},
};
-static int s5pv210_audss_clk_suspend(void)
+static int s5pv210_audss_clk_suspend(void *data)
{
int i;
@@ -46,7 +46,7 @@ static int s5pv210_audss_clk_suspend(void)
return 0;
}
-static void s5pv210_audss_clk_resume(void)
+static void s5pv210_audss_clk_resume(void *data)
{
int i;
@@ -54,10 +54,14 @@ static void s5pv210_audss_clk_resume(void)
writel(reg_save[i][1], reg_base + reg_save[i][0]);
}
-static struct syscore_ops s5pv210_audss_clk_syscore_ops = {
+static const struct syscore_ops s5pv210_audss_clk_syscore_ops = {
.suspend = s5pv210_audss_clk_suspend,
.resume = s5pv210_audss_clk_resume,
};
+
+static struct syscore s5pv210_audss_clk_syscore = {
+ .ops = &s5pv210_audss_clk_syscore_ops,
+};
#endif /* CONFIG_PM_SLEEP */
/* register s5pv210_audss clocks */
@@ -175,7 +179,7 @@ static int s5pv210_audss_clk_probe(struct platform_device *pdev)
}
#ifdef CONFIG_PM_SLEEP
- register_syscore_ops(&s5pv210_audss_clk_syscore_ops);
+ register_syscore(&s5pv210_audss_clk_syscore);
#endif
return 0;
diff --git a/drivers/clk/samsung/clk.c b/drivers/clk/samsung/clk.c
index dbc9925ca8f4..c149ca6c2217 100644
--- a/drivers/clk/samsung/clk.c
+++ b/drivers/clk/samsung/clk.c
@@ -271,7 +271,7 @@ void __init samsung_clk_of_register_fixed_ext(struct samsung_clk_provider *ctx,
}
#ifdef CONFIG_PM_SLEEP
-static int samsung_clk_suspend(void)
+static int samsung_clk_suspend(void *data)
{
struct samsung_clock_reg_cache *reg_cache;
@@ -284,7 +284,7 @@ static int samsung_clk_suspend(void)
return 0;
}
-static void samsung_clk_resume(void)
+static void samsung_clk_resume(void *data)
{
struct samsung_clock_reg_cache *reg_cache;
@@ -293,11 +293,15 @@ static void samsung_clk_resume(void)
reg_cache->rd_num);
}
-static struct syscore_ops samsung_clk_syscore_ops = {
+static const struct syscore_ops samsung_clk_syscore_ops = {
.suspend = samsung_clk_suspend,
.resume = samsung_clk_resume,
};
+static struct syscore samsung_clk_syscore = {
+ .ops = &samsung_clk_syscore_ops,
+};
+
void samsung_clk_extended_sleep_init(void __iomem *reg_base,
const unsigned long *rdump,
unsigned long nr_rdump,
@@ -316,7 +320,7 @@ void samsung_clk_extended_sleep_init(void __iomem *reg_base,
panic("could not allocate register dump storage.\n");
if (list_empty(&clock_reg_cache_list))
- register_syscore_ops(&samsung_clk_syscore_ops);
+ register_syscore(&samsung_clk_syscore);
reg_cache->reg_base = reg_base;
reg_cache->rd_num = nr_rdump;
diff --git a/drivers/clk/sifive/fu540-prci.h b/drivers/clk/sifive/fu540-prci.h
index e0173324f3c5..d45193c210b4 100644
--- a/drivers/clk/sifive/fu540-prci.h
+++ b/drivers/clk/sifive/fu540-prci.h
@@ -49,7 +49,7 @@ static struct __prci_wrpll_data sifive_fu540_prci_gemgxlpll_data = {
static const struct clk_ops sifive_fu540_prci_wrpll_clk_ops = {
.set_rate = sifive_prci_wrpll_set_rate,
- .round_rate = sifive_prci_wrpll_round_rate,
+ .determine_rate = sifive_prci_wrpll_determine_rate,
.recalc_rate = sifive_prci_wrpll_recalc_rate,
.enable = sifive_prci_clock_enable,
.disable = sifive_prci_clock_disable,
diff --git a/drivers/clk/sifive/fu740-prci.h b/drivers/clk/sifive/fu740-prci.h
index f31cd30fc395..c605a899d97d 100644
--- a/drivers/clk/sifive/fu740-prci.h
+++ b/drivers/clk/sifive/fu740-prci.h
@@ -55,7 +55,7 @@ static struct __prci_wrpll_data sifive_fu740_prci_cltxpll_data = {
static const struct clk_ops sifive_fu740_prci_wrpll_clk_ops = {
.set_rate = sifive_prci_wrpll_set_rate,
- .round_rate = sifive_prci_wrpll_round_rate,
+ .determine_rate = sifive_prci_wrpll_determine_rate,
.recalc_rate = sifive_prci_wrpll_recalc_rate,
.enable = sifive_prci_clock_enable,
.disable = sifive_prci_clock_disable,
diff --git a/drivers/clk/sifive/sifive-prci.c b/drivers/clk/sifive/sifive-prci.c
index caba0400f8a2..4d1cc7adb2b3 100644
--- a/drivers/clk/sifive/sifive-prci.c
+++ b/drivers/clk/sifive/sifive-prci.c
@@ -183,9 +183,8 @@ unsigned long sifive_prci_wrpll_recalc_rate(struct clk_hw *hw,
return wrpll_calc_output_rate(&pwd->c, parent_rate);
}
-long sifive_prci_wrpll_round_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long *parent_rate)
+int sifive_prci_wrpll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
struct __prci_wrpll_data *pwd = pc->pwd;
@@ -193,9 +192,11 @@ long sifive_prci_wrpll_round_rate(struct clk_hw *hw,
memcpy(&c, &pwd->c, sizeof(c));
- wrpll_configure_for_rate(&c, rate, *parent_rate);
+ wrpll_configure_for_rate(&c, req->rate, req->best_parent_rate);
- return wrpll_calc_output_rate(&c, *parent_rate);
+ req->rate = wrpll_calc_output_rate(&c, req->best_parent_rate);
+
+ return 0;
}
int sifive_prci_wrpll_set_rate(struct clk_hw *hw,
diff --git a/drivers/clk/sifive/sifive-prci.h b/drivers/clk/sifive/sifive-prci.h
index 91658a88af4e..d74b2bddd08a 100644
--- a/drivers/clk/sifive/sifive-prci.h
+++ b/drivers/clk/sifive/sifive-prci.h
@@ -291,8 +291,8 @@ void sifive_prci_hfpclkpllsel_use_hfclk(struct __prci_data *pd);
void sifive_prci_hfpclkpllsel_use_hfpclkpll(struct __prci_data *pd);
/* Linux clock framework integration */
-long sifive_prci_wrpll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate);
+int sifive_prci_wrpll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req);
int sifive_prci_wrpll_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate);
int sifive_clk_is_enabled(struct clk_hw *hw);
diff --git a/drivers/clk/socfpga/Kconfig b/drivers/clk/socfpga/Kconfig
index 0cf16b894efb..d88277e2a898 100644
--- a/drivers/clk/socfpga/Kconfig
+++ b/drivers/clk/socfpga/Kconfig
@@ -13,7 +13,7 @@ config CLK_INTEL_SOCFPGA32
default ARM && ARCH_INTEL_SOCFPGA
config CLK_INTEL_SOCFPGA64
- bool "Intel Stratix / Agilex / N5X clock controller support" if COMPILE_TEST && (!ARM64 || !ARCH_INTEL_SOCFPGA)
+ bool "Intel Stratix / Agilex / N5X / Agilex5 clock controller support" if COMPILE_TEST && (!ARM64 || !ARCH_INTEL_SOCFPGA)
default ARM64 && ARCH_INTEL_SOCFPGA
endif # CLK_INTEL_SOCFPGA
diff --git a/drivers/clk/socfpga/Makefile b/drivers/clk/socfpga/Makefile
index e8dfce339c91..a1ea2b988eaf 100644
--- a/drivers/clk/socfpga/Makefile
+++ b/drivers/clk/socfpga/Makefile
@@ -3,4 +3,4 @@ obj-$(CONFIG_CLK_INTEL_SOCFPGA32) += clk.o clk-gate.o clk-pll.o clk-periph.o \
clk-pll-a10.o clk-periph-a10.o clk-gate-a10.o
obj-$(CONFIG_CLK_INTEL_SOCFPGA64) += clk-s10.o \
clk-pll-s10.o clk-periph-s10.o clk-gate-s10.o \
- clk-agilex.o
+ clk-agilex.o clk-agilex5.o
diff --git a/drivers/clk/socfpga/clk-agilex5.c b/drivers/clk/socfpga/clk-agilex5.c
new file mode 100644
index 000000000000..f7f0ad884f64
--- /dev/null
+++ b/drivers/clk/socfpga/clk-agilex5.c
@@ -0,0 +1,561 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022-2024, Intel Corporation
+ * Copyright (C) 2025, Altera Corporation
+ */
+#include <linux/slab.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <dt-bindings/clock/intel,agilex5-clkmgr.h>
+#include "stratix10-clk.h"
+#include "clk.h"
+
+/* External parent clocks come from DT via fw_name */
+static const char * const boot_pll_parents[] = {
+ "osc1",
+ "cb-intosc-hs-div2-clk",
+};
+
+static const char * const main_pll_parents[] = {
+ "osc1",
+ "cb-intosc-hs-div2-clk",
+ "f2s-free-clk",
+};
+
+static const char * const periph_pll_parents[] = {
+ "osc1",
+ "cb-intosc-hs-div2-clk",
+};
+
+/* Core free muxes */
+static const char * const core0_free_mux[] = {
+ "main_pll_c1",
+ "peri_pll_c0",
+ "osc1",
+ "cb-intosc-hs-div2-clk",
+ "f2s-free-clk",
+};
+
+static const char * const core1_free_mux[] = {
+ "main_pll_c1",
+ "peri_pll_c0",
+ "osc1",
+ "cb-intosc-hs-div2-clk",
+ "f2s-free-clk",
+};
+
+static const char * const core2_free_mux[] = {
+ "main_pll_c0",
+ "osc1",
+ "cb-intosc-hs-div2-clk",
+ "f2s-free-clk",
+};
+
+static const char * const core3_free_mux[] = {
+ "main_pll_c0",
+ "osc1",
+ "cb-intosc-hs-div2-clk",
+ "f2s-free-clk",
+};
+
+static const char * const dsu_free_mux[] = {
+ "main_pll_c2",
+ "peri_pll_c0",
+ "osc1",
+ "cb-intosc-hs-div2-clk",
+ "f2s-free-clk",
+};
+
+static const char * const noc_free_mux[] = {
+ "main_pll_c3",
+ "peri_pll_c1",
+ "osc1",
+ "cb-intosc-hs-div2-clk",
+ "f2s-free-clk",
+};
+
+static const char * const emac_ptp_free_mux[] = {
+ "main_pll_c3",
+ "peri_pll_c3",
+ "osc1",
+ "cb-intosc-hs-div2-clk",
+ "f2s-free-clk",
+};
+
+static const char * const emaca_free_mux[] = {
+ "main_pll_c2",
+ "peri_pll_c3",
+ "osc1",
+ "cb-intosc-hs-div2-clk",
+ "f2s-free-clk",
+};
+
+static const char * const emacb_free_mux[] = {
+ "main_pll_c3",
+ "peri_pll_c3",
+ "osc1",
+ "cb-intosc-hs-div2-clk",
+ "f2s-free-clk",
+};
+
+static const char * const gpio_db_free_mux[] = {
+ "main_pll_c3",
+ "peri_pll_c1",
+ "osc1",
+ "cb-intosc-hs-div2-clk",
+ "f2s-free-clk",
+};
+
+static const char * const psi_ref_free_mux[] = {
+ "main_pll_c1",
+ "peri_pll_c3",
+ "osc1",
+ "cb-intosc-hs-div2-clk",
+ "f2s-free-clk",
+};
+
+static const char * const usb31_free_mux[] = {
+ "main_pll_c3",
+ "peri_pll_c2",
+ "osc1",
+ "cb-intosc-hs-div2-clk",
+ "f2s-free-clk",
+};
+
+static const char * const s2f_user0_free_mux[] = {
+ "main_pll_c1",
+ "peri_pll_c3",
+ "osc1",
+ "cb-intosc-hs-div2-clk",
+ "f2s-free-clk",
+};
+
+static const char * const s2f_user1_free_mux[] = {
+ "main_pll_c1",
+ "peri_pll_c3",
+ "osc1",
+ "cb-intosc-hs-div2-clk",
+ "f2s-free-clk",
+};
+
+/* Secondary muxes between free_clk and boot_clk */
+static const char * const core0_mux[] = {
+ "core0_free_clk",
+ "boot_clk",
+};
+
+static const char * const core1_mux[] = {
+ "core1_free_clk",
+ "boot_clk",
+};
+
+static const char * const core2_mux[] = {
+ "core2_free_clk",
+ "boot_clk",
+};
+
+static const char * const core3_mux[] = {
+ "core3_free_clk",
+ "boot_clk",
+};
+
+static const char * const dsu_mux[] = {
+ "dsu_free_clk",
+ "boot_clk",
+};
+
+static const char * const noc_mux[] = {
+ "noc_free_clk",
+ "boot_clk",
+};
+
+static const char * const emac_mux[] = {
+ "emaca_free_clk",
+ "emacb_free_clk",
+ "boot_clk",
+};
+
+static const char * const s2f_user0_mux[] = {
+ "s2f_user0_free_clk",
+ "boot_clk",
+};
+
+static const char * const s2f_user1_mux[] = {
+ "s2f_user1_free_clk",
+ "boot_clk",
+};
+
+static const char * const psi_mux[] = {
+ "psi_ref_free_clk",
+ "boot_clk",
+};
+
+static const char * const gpio_db_mux[] = {
+ "gpio_db_free_clk",
+ "boot_clk",
+};
+
+static const char * const emac_ptp_mux[] = {
+ "emac_ptp_free_clk",
+ "boot_clk",
+};
+
+static const char * const usb31_mux[] = {
+ "usb31_free_clk",
+ "boot_clk",
+};
+
+static const struct agilex5_pll_clock agilex5_pll_clks[] = {
+ {
+ .id = AGILEX5_BOOT_CLK,
+ .name = "boot_clk",
+ .parent_names = boot_pll_parents,
+ .num_parents = ARRAY_SIZE(boot_pll_parents),
+ .flags = 0,
+ .offset = 0x0,
+ },
+ {
+ .id = AGILEX5_MAIN_PLL_CLK,
+ .name = "main_pll",
+ .parent_names = main_pll_parents,
+ .num_parents = ARRAY_SIZE(main_pll_parents),
+ .flags = 0,
+ .offset = 0x48,
+ },
+ {
+ .id = AGILEX5_PERIPH_PLL_CLK,
+ .name = "periph_pll",
+ .parent_names = periph_pll_parents,
+ .num_parents = ARRAY_SIZE(periph_pll_parents),
+ .flags = 0,
+ .offset = 0x9C,
+ },
+};
+
+/* Main PLL C0, C1, C2, C3 and Peri PLL C0, C1, C2, C3. With ping-pong counter. */
+static const struct stratix10_perip_c_clock agilex5_main_perip_c_clks[] = {
+ { AGILEX5_MAIN_PLL_C0_CLK, "main_pll_c0", "main_pll", NULL, 1, 0,
+ 0x5C },
+ { AGILEX5_MAIN_PLL_C1_CLK, "main_pll_c1", "main_pll", NULL, 1, 0,
+ 0x60 },
+ { AGILEX5_MAIN_PLL_C2_CLK, "main_pll_c2", "main_pll", NULL, 1, 0,
+ 0x64 },
+ { AGILEX5_MAIN_PLL_C3_CLK, "main_pll_c3", "main_pll", NULL, 1, 0,
+ 0x68 },
+ { AGILEX5_PERIPH_PLL_C0_CLK, "peri_pll_c0", "periph_pll", NULL, 1, 0,
+ 0xB0 },
+ { AGILEX5_PERIPH_PLL_C1_CLK, "peri_pll_c1", "periph_pll", NULL, 1, 0,
+ 0xB4 },
+ { AGILEX5_PERIPH_PLL_C2_CLK, "peri_pll_c2", "periph_pll", NULL, 1, 0,
+ 0xB8 },
+ { AGILEX5_PERIPH_PLL_C3_CLK, "peri_pll_c3", "periph_pll", NULL, 1, 0,
+ 0xBC },
+};
+
+/* Non-SW clock-gated enabled clocks */
+static const struct agilex5_perip_cnt_clock agilex5_main_perip_cnt_clks[] = {
+ { AGILEX5_CORE0_FREE_CLK, "core0_free_clk", core0_free_mux,
+ ARRAY_SIZE(core0_free_mux), 0, 0x0100, 0, 0, 0},
+ { AGILEX5_CORE1_FREE_CLK, "core1_free_clk", core1_free_mux,
+ ARRAY_SIZE(core1_free_mux), 0, 0x0104, 0, 0, 0},
+ { AGILEX5_CORE2_FREE_CLK, "core2_free_clk", core2_free_mux,
+ ARRAY_SIZE(core2_free_mux), 0, 0x010C, 0, 0, 0},
+ { AGILEX5_CORE3_FREE_CLK, "core3_free_clk", core3_free_mux,
+ ARRAY_SIZE(core3_free_mux), 0, 0x0110, 0, 0, 0},
+ { AGILEX5_DSU_FREE_CLK, "dsu_free_clk", dsu_free_mux,
+ ARRAY_SIZE(dsu_free_mux), 0, 0xfc, 0, 0, 0},
+ { AGILEX5_NOC_FREE_CLK, "noc_free_clk", noc_free_mux,
+ ARRAY_SIZE(noc_free_mux), 0, 0x40, 0, 0, 0 },
+ { AGILEX5_EMAC_A_FREE_CLK, "emaca_free_clk", emaca_free_mux,
+ ARRAY_SIZE(emaca_free_mux), 0, 0xD4, 0, 0x88, 0 },
+ { AGILEX5_EMAC_B_FREE_CLK, "emacb_free_clk", emacb_free_mux,
+ ARRAY_SIZE(emacb_free_mux), 0, 0xD8, 0, 0x88, 1 },
+ { AGILEX5_EMAC_PTP_FREE_CLK, "emac_ptp_free_clk", emac_ptp_free_mux,
+ ARRAY_SIZE(emac_ptp_free_mux), 0, 0xDC, 0, 0x88, 2 },
+ { AGILEX5_GPIO_DB_FREE_CLK, "gpio_db_free_clk", gpio_db_free_mux,
+ ARRAY_SIZE(gpio_db_free_mux), 0, 0xE0, 0, 0x88, 3 },
+ { AGILEX5_S2F_USER0_FREE_CLK, "s2f_user0_free_clk", s2f_user0_free_mux,
+ ARRAY_SIZE(s2f_user0_free_mux), 0, 0xE8, 0, 0x30, 2 },
+ { AGILEX5_S2F_USER1_FREE_CLK, "s2f_user1_free_clk", s2f_user1_free_mux,
+ ARRAY_SIZE(s2f_user1_free_mux), 0, 0xEC, 0, 0x88, 5 },
+ { AGILEX5_PSI_REF_FREE_CLK, "psi_ref_free_clk", psi_ref_free_mux,
+ ARRAY_SIZE(psi_ref_free_mux), 0, 0xF0, 0, 0x88, 6 },
+ { AGILEX5_USB31_FREE_CLK, "usb31_free_clk", usb31_free_mux,
+ ARRAY_SIZE(usb31_free_mux), 0, 0xF8, 0, 0x88, 7},
+};
+
+static const char * const cs_pdbg_parents[] = { "cs_at_clk" };
+static const char * const usb31_bus_clk_early_parents[] = { "l4_main_clk" };
+static const char * const l4_mp_clk_parent[] = { "l4_mp_clk" };
+static const char * const l4_sp_clk_parent[] = { "l4_sp_clk" };
+static const char * const dfi_clk_parent[] = { "dfi_clk" };
+
+/* SW Clock gate enabled clocks */
+static const struct agilex5_gate_clock agilex5_gate_clks[] = {
+ { AGILEX5_CORE0_CLK, "core0_clk", core0_mux,
+ ARRAY_SIZE(core0_mux), 0, 0x24, 8, 0, 0, 0, 0x30, 5, 0 },
+ { AGILEX5_CORE1_CLK, "core1_clk", core1_mux,
+ ARRAY_SIZE(core1_mux), 0, 0x24, 9, 0, 0, 0, 0x30, 5, 0 },
+ { AGILEX5_CORE2_CLK, "core2_clk", core2_mux,
+ ARRAY_SIZE(core2_mux), 0, 0x24, 10, 0, 0, 0, 0x30, 6, 0 },
+ { AGILEX5_CORE3_CLK, "core3_clk", core3_mux,
+ ARRAY_SIZE(core3_mux), 0, 0x24, 11, 0, 0, 0, 0x30, 7, 0 },
+ { AGILEX5_MPU_CLK, "dsu_clk", dsu_mux, ARRAY_SIZE(dsu_mux), 0, 0, 0,
+ 0, 0, 0, 0x34, 4, 0 },
+ { AGILEX5_MPU_PERIPH_CLK, "mpu_periph_clk", dsu_mux,
+ ARRAY_SIZE(dsu_mux), 0, 0, 0, 0x44, 20, 2, 0x34, 4, 0 },
+ { AGILEX5_MPU_CCU_CLK, "mpu_ccu_clk", dsu_mux,
+ ARRAY_SIZE(dsu_mux), 0, 0, 0, 0x44, 18, 2, 0x34, 4, 0 },
+ { AGILEX5_L4_MAIN_CLK, "l4_main_clk", noc_mux, ARRAY_SIZE(noc_mux),
+ CLK_IS_CRITICAL, 0x24, 1, 0, 0, 0, 0, 0, 0 },
+ { AGILEX5_L4_MP_CLK, "l4_mp_clk", noc_mux, ARRAY_SIZE(noc_mux), 0,
+ 0x24, 2, 0x44, 4, 2, 0x30, 1, 0 },
+ { AGILEX5_L4_SYS_FREE_CLK, "l4_sys_free_clk", noc_mux,
+ ARRAY_SIZE(noc_mux), 0, 0, 0, 0x44, 2, 2, 0x30, 1, 0 },
+ { AGILEX5_L4_SP_CLK, "l4_sp_clk", noc_mux, ARRAY_SIZE(noc_mux),
+ CLK_IS_CRITICAL, 0x24, 3, 0x44, 6, 2, 0x30, 1, 0 },
+
+ /* Core sight clocks*/
+ { AGILEX5_CS_AT_CLK, "cs_at_clk", noc_mux, ARRAY_SIZE(noc_mux), 0,
+ 0x24, 4, 0x44, 24, 2, 0x30, 1, 0 },
+ { AGILEX5_CS_TRACE_CLK, "cs_trace_clk", noc_mux,
+ ARRAY_SIZE(noc_mux), 0, 0x24, 4, 0x44, 26, 2, 0x30, 1, 0 },
+ { AGILEX5_CS_PDBG_CLK, "cs_pdbg_clk", cs_pdbg_parents, 1, 0, 0x24, 4,
+ 0x44, 28, 1, 0, 0, 0 },
+
+ /* Main Peripheral PLL1 Begin */
+ { AGILEX5_EMAC0_CLK, "emac0_clk", emac_mux, ARRAY_SIZE(emac_mux),
+ 0, 0x7C, 0, 0, 0, 0, 0x94, 26, 0 },
+ { AGILEX5_EMAC1_CLK, "emac1_clk", emac_mux, ARRAY_SIZE(emac_mux),
+ 0, 0x7C, 1, 0, 0, 0, 0x94, 27, 0 },
+ { AGILEX5_EMAC2_CLK, "emac2_clk", emac_mux, ARRAY_SIZE(emac_mux),
+ 0, 0x7C, 2, 0, 0, 0, 0x94, 28, 0 },
+ { AGILEX5_EMAC_PTP_CLK, "emac_ptp_clk", emac_ptp_mux,
+ ARRAY_SIZE(emac_ptp_mux), 0, 0x7C, 3, 0, 0, 0, 0x88, 2, 0 },
+ { AGILEX5_GPIO_DB_CLK, "gpio_db_clk", gpio_db_mux,
+ ARRAY_SIZE(gpio_db_mux), 0, 0x7C, 4, 0x98, 0, 16, 0x88, 3, 1 },
+ /* Main Peripheral PLL1 End */
+
+ /* Peripheral clocks */
+ { AGILEX5_S2F_USER0_CLK, "s2f_user0_clk", s2f_user0_mux,
+ ARRAY_SIZE(s2f_user0_mux), 0, 0x24, 6, 0, 0, 0, 0x30, 2, 0 },
+ { AGILEX5_S2F_USER1_CLK, "s2f_user1_clk", s2f_user1_mux,
+ ARRAY_SIZE(s2f_user1_mux), 0, 0x7C, 6, 0, 0, 0, 0x88, 5, 0 },
+ { AGILEX5_PSI_REF_CLK, "psi_ref_clk", psi_mux,
+ ARRAY_SIZE(psi_mux), 0, 0x7C, 7, 0, 0, 0, 0x88, 6, 0 },
+ { AGILEX5_USB31_SUSPEND_CLK, "usb31_suspend_clk", usb31_mux,
+ ARRAY_SIZE(usb31_mux), 0, 0x7C, 25, 0, 0, 0, 0x88, 7, 0 },
+ { AGILEX5_USB31_BUS_CLK_EARLY, "usb31_bus_clk_early", usb31_bus_clk_early_parents,
+ 1, 0, 0x7C, 25, 0, 0, 0, 0, 0, 0 },
+ { AGILEX5_USB2OTG_HCLK, "usb2otg_hclk", l4_mp_clk_parent, 1, 0, 0x7C,
+ 8, 0, 0, 0, 0, 0, 0 },
+ { AGILEX5_SPIM_0_CLK, "spim_0_clk", l4_mp_clk_parent, 1, 0, 0x7C, 9,
+ 0, 0, 0, 0, 0, 0 },
+ { AGILEX5_SPIM_1_CLK, "spim_1_clk", l4_mp_clk_parent, 1, 0, 0x7C, 11,
+ 0, 0, 0, 0, 0, 0 },
+ { AGILEX5_SPIS_0_CLK, "spis_0_clk", l4_sp_clk_parent, 1, 0, 0x7C, 12,
+ 0, 0, 0, 0, 0, 0 },
+ { AGILEX5_SPIS_1_CLK, "spis_1_clk", l4_sp_clk_parent, 1, 0, 0x7C, 13,
+ 0, 0, 0, 0, 0, 0 },
+ { AGILEX5_DMA_CORE_CLK, "dma_core_clk", l4_mp_clk_parent, 1, 0, 0x7C,
+ 14, 0, 0, 0, 0, 0, 0 },
+ { AGILEX5_DMA_HS_CLK, "dma_hs_clk", l4_mp_clk_parent, 1, 0, 0x7C, 14,
+ 0, 0, 0, 0, 0, 0 },
+ { AGILEX5_I3C_0_CORE_CLK, "i3c_0_core_clk", l4_mp_clk_parent, 1, 0,
+ 0x7C, 18, 0, 0, 0, 0, 0, 0 },
+ { AGILEX5_I3C_1_CORE_CLK, "i3c_1_core_clk", l4_mp_clk_parent, 1, 0,
+ 0x7C, 19, 0, 0, 0, 0, 0, 0 },
+ { AGILEX5_I2C_0_PCLK, "i2c_0_pclk", l4_sp_clk_parent, 1, 0, 0x7C, 15,
+ 0, 0, 0, 0, 0, 0 },
+ { AGILEX5_I2C_1_PCLK, "i2c_1_pclk", l4_sp_clk_parent, 1, 0, 0x7C, 16,
+ 0, 0, 0, 0, 0, 0 },
+ { AGILEX5_I2C_EMAC0_PCLK, "i2c_emac0_pclk", l4_sp_clk_parent, 1, 0,
+ 0x7C, 17, 0, 0, 0, 0, 0, 0 },
+ { AGILEX5_I2C_EMAC1_PCLK, "i2c_emac1_pclk", l4_sp_clk_parent, 1, 0,
+ 0x7C, 22, 0, 0, 0, 0, 0, 0 },
+ { AGILEX5_I2C_EMAC2_PCLK, "i2c_emac2_pclk", l4_sp_clk_parent, 1, 0,
+ 0x7C, 27, 0, 0, 0, 0, 0, 0 },
+ { AGILEX5_UART_0_PCLK, "uart_0_pclk", l4_sp_clk_parent, 1, 0, 0x7C, 20,
+ 0, 0, 0, 0, 0, 0 },
+ { AGILEX5_UART_1_PCLK, "uart_1_pclk", l4_sp_clk_parent, 1, 0, 0x7C, 21,
+ 0, 0, 0, 0, 0, 0 },
+ { AGILEX5_SPTIMER_0_PCLK, "sptimer_0_pclk", l4_sp_clk_parent, 1, 0,
+ 0x7C, 23, 0, 0, 0, 0, 0, 0 },
+ { AGILEX5_SPTIMER_1_PCLK, "sptimer_1_pclk", l4_sp_clk_parent, 1, 0,
+ 0x7C, 24, 0, 0, 0, 0, 0, 0 },
+
+ /*NAND, SD/MMC and SoftPHY overall clocking*/
+ { AGILEX5_DFI_CLK, "dfi_clk", l4_mp_clk_parent, 1, 0, 0, 0, 0x44, 16,
+ 2, 0, 0, 0 },
+ { AGILEX5_NAND_NF_CLK, "nand_nf_clk", dfi_clk_parent, 1, 0, 0x7C, 10,
+ 0, 0, 0, 0, 0, 0 },
+ { AGILEX5_NAND_BCH_CLK, "nand_bch_clk", l4_mp_clk_parent, 1, 0, 0x7C,
+ 10, 0, 0, 0, 0, 0, 0 },
+ { AGILEX5_SDMMC_SDPHY_REG_CLK, "sdmmc_sdphy_reg_clk", l4_mp_clk_parent,
+ 1, 0, 0x7C, 5, 0, 0, 0, 0, 0, 0 },
+ { AGILEX5_SDMCLK, "sdmclk", dfi_clk_parent, 1, 0, 0x7C, 5, 0, 0, 0,
+ 0, 0, 0 },
+ { AGILEX5_SOFTPHY_REG_PCLK, "softphy_reg_pclk", l4_mp_clk_parent, 1, 0,
+ 0x7C, 26, 0, 0, 0, 0, 0, 0 },
+ { AGILEX5_SOFTPHY_PHY_CLK, "softphy_phy_clk", l4_mp_clk_parent, 1, 0,
+ 0x7C, 26, 0x44, 16, 2, 0, 0, 0 },
+ { AGILEX5_SOFTPHY_CTRL_CLK, "softphy_ctrl_clk", dfi_clk_parent, 1, 0,
+ 0x7C, 26, 0, 0, 0, 0, 0, 0 },
+};
+
+static int
+agilex5_clk_register_c_perip(const struct stratix10_perip_c_clock *clks,
+ int nums, struct stratix10_clock_data *data)
+{
+ struct clk_hw *hw_clk;
+ void __iomem *base = data->base;
+ int i;
+
+ for (i = 0; i < nums; i++) {
+ hw_clk = s10_register_periph(&clks[i], base);
+ if (IS_ERR(hw_clk)) {
+ pr_err("%s: failed to register clock %s\n", __func__,
+ clks[i].name);
+ continue;
+ }
+ data->clk_data.hws[clks[i].id] = hw_clk;
+ }
+ return 0;
+}
+
+static int
+agilex5_clk_register_cnt_perip(const struct agilex5_perip_cnt_clock *clks,
+ int nums, struct stratix10_clock_data *data)
+{
+ struct clk_hw *hw_clk;
+ void __iomem *base = data->base;
+ int i;
+
+ for (i = 0; i < nums; i++) {
+ hw_clk = agilex5_register_cnt_periph(&clks[i], base);
+ if (IS_ERR(hw_clk)) {
+ pr_err("%s: failed to register clock %s\n", __func__,
+ clks[i].name);
+ continue;
+ }
+ data->clk_data.hws[clks[i].id] = hw_clk;
+ }
+
+ return 0;
+}
+
+static int agilex5_clk_register_gate(const struct agilex5_gate_clock *clks,
+ int nums,
+ struct stratix10_clock_data *data)
+{
+ struct clk_hw *hw_clk;
+ void __iomem *base = data->base;
+ int i;
+
+ for (i = 0; i < nums; i++) {
+ hw_clk = agilex5_register_gate(&clks[i], base);
+ if (IS_ERR(hw_clk)) {
+ pr_err("%s: failed to register clock %s\n", __func__,
+ clks[i].name);
+ continue;
+ }
+ data->clk_data.hws[clks[i].id] = hw_clk;
+ }
+
+ return 0;
+}
+
+static int agilex5_clk_register_pll(const struct agilex5_pll_clock *clks,
+ int nums, struct stratix10_clock_data *data)
+{
+ struct clk_hw *hw_clk;
+ void __iomem *base = data->base;
+ int i;
+
+ for (i = 0; i < nums; i++) {
+ hw_clk = agilex5_register_pll(&clks[i], base);
+ if (IS_ERR(hw_clk)) {
+ pr_err("%s: failed to register clock %s\n", __func__,
+ clks[i].name);
+ continue;
+ }
+ data->clk_data.hws[clks[i].id] = hw_clk;
+ }
+
+ return 0;
+}
+
+static int agilex5_clkmgr_init(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct stratix10_clock_data *clk_data;
+ void __iomem *base;
+ int i, num_clks;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ num_clks = AGILEX5_NUM_CLKS;
+
+ clk_data = devm_kzalloc(dev, struct_size(clk_data, clk_data.hws,
+ num_clks), GFP_KERNEL);
+ if (!clk_data)
+ return -ENOMEM;
+
+ clk_data->base = base;
+ clk_data->clk_data.num = num_clks;
+
+ for (i = 0; i < num_clks; i++)
+ clk_data->clk_data.hws[i] = ERR_PTR(-ENOENT);
+
+ agilex5_clk_register_pll(agilex5_pll_clks, ARRAY_SIZE(agilex5_pll_clks),
+ clk_data);
+
+ /* mainPLL C0, C1, C2, C3 and periph PLL C0, C1, C2, C3*/
+ agilex5_clk_register_c_perip(agilex5_main_perip_c_clks,
+ ARRAY_SIZE(agilex5_main_perip_c_clks),
+ clk_data);
+
+ agilex5_clk_register_cnt_perip(agilex5_main_perip_cnt_clks,
+ ARRAY_SIZE(agilex5_main_perip_cnt_clks),
+ clk_data);
+
+ agilex5_clk_register_gate(agilex5_gate_clks,
+ ARRAY_SIZE(agilex5_gate_clks), clk_data);
+
+ of_clk_add_hw_provider(np, of_clk_hw_onecell_get, &clk_data->clk_data);
+ return 0;
+}
+
+static int agilex5_clkmgr_probe(struct platform_device *pdev)
+{
+ int (*probe_func)(struct platform_device *init_func);
+
+ probe_func = of_device_get_match_data(&pdev->dev);
+ if (!probe_func)
+ return -ENODEV;
+ return probe_func(pdev);
+}
+
+static const struct of_device_id agilex5_clkmgr_match_table[] = {
+ { .compatible = "intel,agilex5-clkmgr", .data = agilex5_clkmgr_init },
+ {}
+};
+
+static struct platform_driver agilex5_clkmgr_driver = {
+ .probe = agilex5_clkmgr_probe,
+ .driver = {
+ .name = "agilex5-clkmgr",
+ .suppress_bind_attrs = true,
+ .of_match_table = agilex5_clkmgr_match_table,
+ },
+};
+
+static int __init agilex5_clk_init(void)
+{
+ return platform_driver_register(&agilex5_clkmgr_driver);
+}
+core_initcall(agilex5_clk_init);
diff --git a/drivers/clk/socfpga/clk-gate-s10.c b/drivers/clk/socfpga/clk-gate-s10.c
index 3930d922efb4..dce3ef137bf3 100644
--- a/drivers/clk/socfpga/clk-gate-s10.c
+++ b/drivers/clk/socfpga/clk-gate-s10.c
@@ -239,3 +239,56 @@ struct clk_hw *agilex_register_gate(const struct stratix10_gate_clock *clks, voi
}
return hw_clk;
}
+
+struct clk_hw *agilex5_register_gate(const struct agilex5_gate_clock *clks, void __iomem *regbase)
+{
+ struct clk_hw *hw_clk;
+ struct socfpga_gate_clk *socfpga_clk;
+ struct clk_init_data init;
+ int ret;
+
+ socfpga_clk = kzalloc(sizeof(*socfpga_clk), GFP_KERNEL);
+ if (!socfpga_clk)
+ return NULL;
+
+ socfpga_clk->hw.reg = regbase + clks->gate_reg;
+ socfpga_clk->hw.bit_idx = clks->gate_idx;
+
+ gateclk_ops.enable = clk_gate_ops.enable;
+ gateclk_ops.disable = clk_gate_ops.disable;
+
+ socfpga_clk->fixed_div = clks->fixed_div;
+
+ if (clks->div_reg)
+ socfpga_clk->div_reg = regbase + clks->div_reg;
+ else
+ socfpga_clk->div_reg = NULL;
+
+ socfpga_clk->width = clks->div_width;
+ socfpga_clk->shift = clks->div_offset;
+
+ if (clks->bypass_reg)
+ socfpga_clk->bypass_reg = regbase + clks->bypass_reg;
+ else
+ socfpga_clk->bypass_reg = NULL;
+ socfpga_clk->bypass_shift = clks->bypass_shift;
+
+ if (streq(clks->name, "cs_pdbg_clk"))
+ init.ops = &dbgclk_ops;
+ else
+ init.ops = &agilex_gateclk_ops;
+
+ init.name = clks->name;
+ init.flags = clks->flags;
+ init.num_parents = clks->num_parents;
+ init.parent_names = clks->parent_names;
+ socfpga_clk->hw.hw.init = &init;
+ hw_clk = &socfpga_clk->hw.hw;
+
+ ret = clk_hw_register(NULL, &socfpga_clk->hw.hw);
+ if (ret) {
+ kfree(socfpga_clk);
+ return ERR_PTR(ret);
+ }
+ return hw_clk;
+}
diff --git a/drivers/clk/socfpga/clk-periph-s10.c b/drivers/clk/socfpga/clk-periph-s10.c
index f5c1ca42b668..f12ca43ffe7c 100644
--- a/drivers/clk/socfpga/clk-periph-s10.c
+++ b/drivers/clk/socfpga/clk-periph-s10.c
@@ -214,3 +214,44 @@ struct clk_hw *s10_register_cnt_periph(const struct stratix10_perip_cnt_clock *c
}
return hw_clk;
}
+
+struct clk_hw *agilex5_register_cnt_periph(const struct agilex5_perip_cnt_clock *clks,
+ void __iomem *regbase)
+{
+ struct clk_hw *hw_clk;
+ struct socfpga_periph_clk *periph_clk;
+ struct clk_init_data init;
+ const char *name = clks->name;
+ int ret;
+
+ periph_clk = kzalloc(sizeof(*periph_clk), GFP_KERNEL);
+ if (WARN_ON(!periph_clk))
+ return NULL;
+
+ if (clks->offset)
+ periph_clk->hw.reg = regbase + clks->offset;
+ else
+ periph_clk->hw.reg = NULL;
+
+ if (clks->bypass_reg)
+ periph_clk->bypass_reg = regbase + clks->bypass_reg;
+ else
+ periph_clk->bypass_reg = NULL;
+ periph_clk->bypass_shift = clks->bypass_shift;
+ periph_clk->fixed_div = clks->fixed_divider;
+
+ init.name = name;
+ init.ops = &peri_cnt_clk_ops;
+ init.flags = clks->flags;
+ init.num_parents = clks->num_parents;
+ init.parent_names = clks->parent_names;
+ periph_clk->hw.hw.init = &init;
+ hw_clk = &periph_clk->hw.hw;
+
+ ret = clk_hw_register(NULL, hw_clk);
+ if (ret) {
+ kfree(periph_clk);
+ return ERR_PTR(ret);
+ }
+ return hw_clk;
+}
diff --git a/drivers/clk/socfpga/clk-pll-s10.c b/drivers/clk/socfpga/clk-pll-s10.c
index a88c212bda12..1be92827cd93 100644
--- a/drivers/clk/socfpga/clk-pll-s10.c
+++ b/drivers/clk/socfpga/clk-pll-s10.c
@@ -304,3 +304,39 @@ struct clk_hw *n5x_register_pll(const struct stratix10_pll_clock *clks,
}
return hw_clk;
}
+
+struct clk_hw *agilex5_register_pll(const struct agilex5_pll_clock *clks,
+ void __iomem *reg)
+{
+ struct clk_hw *hw_clk;
+ struct socfpga_pll *pll_clk;
+ struct clk_init_data init;
+ const char *name = clks->name;
+ int ret;
+
+ pll_clk = kzalloc(sizeof(*pll_clk), GFP_KERNEL);
+ if (WARN_ON(!pll_clk))
+ return NULL;
+
+ pll_clk->hw.reg = reg + clks->offset;
+
+ if (streq(name, SOCFPGA_BOOT_CLK))
+ init.ops = &clk_boot_ops;
+ else
+ init.ops = &agilex_clk_pll_ops;
+
+ init.name = name;
+ init.flags = clks->flags;
+ init.num_parents = clks->num_parents;
+ init.parent_names = clks->parent_names;
+ pll_clk->hw.hw.init = &init;
+ pll_clk->hw.bit_idx = SOCFPGA_PLL_POWER;
+ hw_clk = &pll_clk->hw.hw;
+
+ ret = clk_hw_register(NULL, hw_clk);
+ if (ret) {
+ kfree(pll_clk);
+ return ERR_PTR(ret);
+ }
+ return hw_clk;
+}
diff --git a/drivers/clk/socfpga/stratix10-clk.h b/drivers/clk/socfpga/stratix10-clk.h
index 83fe4eb3133c..d1fe4578b3e0 100644
--- a/drivers/clk/socfpga/stratix10-clk.h
+++ b/drivers/clk/socfpga/stratix10-clk.h
@@ -73,12 +73,55 @@ struct stratix10_gate_clock {
u8 fixed_div;
};
+struct agilex5_pll_clock {
+ unsigned int id;
+ const char *name;
+ const char * const *parent_names;
+ u8 num_parents;
+ unsigned long flags;
+ unsigned long offset;
+};
+
+struct agilex5_perip_cnt_clock {
+ unsigned int id;
+ const char *name;
+ const char * const *parent_names;
+ u8 num_parents;
+ unsigned long flags;
+ unsigned long offset;
+ u8 fixed_divider;
+ unsigned long bypass_reg;
+ unsigned long bypass_shift;
+};
+
+struct agilex5_gate_clock {
+ unsigned int id;
+ const char *name;
+ const char * const *parent_names;
+ u8 num_parents;
+ unsigned long flags;
+ unsigned long gate_reg;
+ u8 gate_idx;
+ unsigned long div_reg;
+ u8 div_offset;
+ u8 div_width;
+ unsigned long bypass_reg;
+ u8 bypass_shift;
+ u8 fixed_div;
+};
+
struct clk_hw *s10_register_pll(const struct stratix10_pll_clock *clks,
void __iomem *reg);
struct clk_hw *agilex_register_pll(const struct stratix10_pll_clock *clks,
void __iomem *reg);
struct clk_hw *n5x_register_pll(const struct stratix10_pll_clock *clks,
void __iomem *reg);
+struct clk_hw *agilex5_register_pll(const struct agilex5_pll_clock *clks,
+ void __iomem *reg);
+struct clk_hw *agilex5_register_cnt_periph(const struct agilex5_perip_cnt_clock *clks,
+ void __iomem *regbase);
+struct clk_hw *agilex5_register_gate(const struct agilex5_gate_clock *clks,
+ void __iomem *regbase);
struct clk_hw *s10_register_periph(const struct stratix10_perip_c_clock *clks,
void __iomem *reg);
struct clk_hw *n5x_register_periph(const struct n5x_perip_c_clock *clks,
diff --git a/drivers/clk/sophgo/clk-cv18xx-ip.c b/drivers/clk/sophgo/clk-cv18xx-ip.c
index b186e64d4813..c2b58faf0938 100644
--- a/drivers/clk/sophgo/clk-cv18xx-ip.c
+++ b/drivers/clk/sophgo/clk-cv18xx-ip.c
@@ -45,10 +45,12 @@ static unsigned long gate_recalc_rate(struct clk_hw *hw,
return parent_rate;
}
-static long gate_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int gate_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- return *parent_rate;
+ req->rate = req->best_parent_rate;
+
+ return 0;
}
static int gate_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -63,7 +65,7 @@ const struct clk_ops cv1800_clk_gate_ops = {
.is_enabled = gate_is_enabled,
.recalc_rate = gate_recalc_rate,
- .round_rate = gate_round_rate,
+ .determine_rate = gate_determine_rate,
.set_rate = gate_set_rate,
};
diff --git a/drivers/clk/sophgo/clk-sg2042-clkgen.c b/drivers/clk/sophgo/clk-sg2042-clkgen.c
index a334963e83ce..683661b71787 100644
--- a/drivers/clk/sophgo/clk-sg2042-clkgen.c
+++ b/drivers/clk/sophgo/clk-sg2042-clkgen.c
@@ -176,9 +176,8 @@ static unsigned long sg2042_clk_divider_recalc_rate(struct clk_hw *hw,
return ret_rate;
}
-static long sg2042_clk_divider_round_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long *prate)
+static int sg2042_clk_divider_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct sg2042_divider_clock *divider = to_sg2042_clk_divider(hw);
unsigned long ret_rate;
@@ -192,15 +191,17 @@ static long sg2042_clk_divider_round_rate(struct clk_hw *hw,
bestdiv = readl(divider->reg) >> divider->shift;
bestdiv &= clk_div_mask(divider->width);
}
- ret_rate = DIV_ROUND_UP_ULL((u64)*prate, bestdiv);
+ ret_rate = DIV_ROUND_UP_ULL((u64)req->best_parent_rate, bestdiv);
} else {
- ret_rate = divider_round_rate(hw, rate, prate, NULL,
+ ret_rate = divider_round_rate(hw, req->rate, &req->best_parent_rate, NULL,
divider->width, divider->div_flags);
}
pr_debug("--> %s: divider_round_rate: val = %ld\n",
clk_hw_get_name(hw), ret_rate);
- return ret_rate;
+ req->rate = ret_rate;
+
+ return 0;
}
static int sg2042_clk_divider_set_rate(struct clk_hw *hw,
@@ -258,13 +259,13 @@ static int sg2042_clk_divider_set_rate(struct clk_hw *hw,
static const struct clk_ops sg2042_clk_divider_ops = {
.recalc_rate = sg2042_clk_divider_recalc_rate,
- .round_rate = sg2042_clk_divider_round_rate,
+ .determine_rate = sg2042_clk_divider_determine_rate,
.set_rate = sg2042_clk_divider_set_rate,
};
static const struct clk_ops sg2042_clk_divider_ro_ops = {
.recalc_rate = sg2042_clk_divider_recalc_rate,
- .round_rate = sg2042_clk_divider_round_rate,
+ .determine_rate = sg2042_clk_divider_determine_rate,
};
/*
@@ -968,7 +969,7 @@ static int sg2042_mux_notifier_cb(struct notifier_block *nb,
/*
* "1" is the array index of the second parent input source of
* mux. For SG2042, it's fpll for all mux clocks.
- * "0" is the array index of the frist parent input source of
+ * "0" is the array index of the first parent input source of
* mux, For SG2042, it's mpll.
* FIXME, any good idea to avoid magic number?
*/
diff --git a/drivers/clk/sophgo/clk-sg2042-pll.c b/drivers/clk/sophgo/clk-sg2042-pll.c
index 1537f4f05860..110b6ee06fe4 100644
--- a/drivers/clk/sophgo/clk-sg2042-pll.c
+++ b/drivers/clk/sophgo/clk-sg2042-pll.c
@@ -155,7 +155,7 @@ static unsigned long sg2042_pll_recalc_rate(unsigned int reg_value,
numerator = (u64)parent_rate * ctrl_table.fbdiv;
denominator = ctrl_table.refdiv * ctrl_table.postdiv1 * ctrl_table.postdiv2;
- do_div(numerator, denominator);
+ numerator = div64_u64(numerator, denominator);
return numerator;
}
@@ -212,7 +212,7 @@ static int sg2042_pll_get_postdiv_1_2(unsigned long rate,
tmp0 *= fbdiv;
/* ((prate/REFDIV) x FBDIV)/rate and result save to tmp0 */
- do_div(tmp0, rate);
+ tmp0 = div64_ul(tmp0, rate);
/* tmp0 is POSTDIV1*POSTDIV2, now we calculate div1 and div2 value */
if (tmp0 <= 7) {
@@ -346,37 +346,30 @@ static unsigned long sg2042_clk_pll_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long sg2042_clk_pll_round_rate(struct clk_hw *hw,
- unsigned long req_rate,
- unsigned long *prate)
+static int sg2042_clk_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct sg2042_pll_ctrl pctrl_table;
unsigned int value;
long proper_rate;
int ret;
- ret = sg2042_get_pll_ctl_setting(&pctrl_table, req_rate, *prate);
+ ret = sg2042_get_pll_ctl_setting(&pctrl_table,
+ min(req->rate, req->max_rate),
+ req->best_parent_rate);
if (ret) {
proper_rate = 0;
goto out;
}
value = sg2042_pll_ctrl_encode(&pctrl_table);
- proper_rate = (long)sg2042_pll_recalc_rate(value, *prate);
+ proper_rate = (long)sg2042_pll_recalc_rate(value, req->best_parent_rate);
out:
- pr_debug("--> %s: pll_round_rate: val = %ld\n",
+ pr_debug("--> %s: pll_determine_rate: val = %ld\n",
clk_hw_get_name(hw), proper_rate);
- return proper_rate;
-}
+ req->rate = proper_rate;
-static int sg2042_clk_pll_determine_rate(struct clk_hw *hw,
- struct clk_rate_request *req)
-{
- req->rate = sg2042_clk_pll_round_rate(hw, min(req->rate, req->max_rate),
- &req->best_parent_rate);
- pr_debug("--> %s: pll_determine_rate: val = %ld\n",
- clk_hw_get_name(hw), req->rate);
return 0;
}
@@ -417,14 +410,13 @@ out:
static const struct clk_ops sg2042_clk_pll_ops = {
.recalc_rate = sg2042_clk_pll_recalc_rate,
- .round_rate = sg2042_clk_pll_round_rate,
.determine_rate = sg2042_clk_pll_determine_rate,
.set_rate = sg2042_clk_pll_set_rate,
};
static const struct clk_ops sg2042_clk_pll_ro_ops = {
.recalc_rate = sg2042_clk_pll_recalc_rate,
- .round_rate = sg2042_clk_pll_round_rate,
+ .determine_rate = sg2042_clk_pll_determine_rate,
};
/*
diff --git a/drivers/clk/spacemit/Kconfig b/drivers/clk/spacemit/Kconfig
index 4c4df845b3cb..3854f6ae6d0e 100644
--- a/drivers/clk/spacemit/Kconfig
+++ b/drivers/clk/spacemit/Kconfig
@@ -3,6 +3,7 @@
config SPACEMIT_CCU
tristate "Clock support for SpacemiT SoCs"
depends on ARCH_SPACEMIT || COMPILE_TEST
+ select AUXILIARY_BUS
select MFD_SYSCON
help
Say Y to enable clock controller unit support for SpacemiT SoCs.
diff --git a/drivers/clk/spacemit/ccu-k1.c b/drivers/clk/spacemit/ccu-k1.c
index cdde37a05235..4761bc1e3b6e 100644
--- a/drivers/clk/spacemit/ccu-k1.c
+++ b/drivers/clk/spacemit/ccu-k1.c
@@ -5,12 +5,16 @@
*/
#include <linux/array_size.h>
+#include <linux/auxiliary_bus.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
+#include <linux/idr.h>
#include <linux/mfd/syscon.h>
#include <linux/minmax.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <soc/spacemit/k1-syscon.h>
#include "ccu_common.h"
#include "ccu_pll.h"
@@ -19,121 +23,14 @@
#include <dt-bindings/clock/spacemit,k1-syscon.h>
-/* APBS register offset */
-#define APBS_PLL1_SWCR1 0x100
-#define APBS_PLL1_SWCR2 0x104
-#define APBS_PLL1_SWCR3 0x108
-#define APBS_PLL2_SWCR1 0x118
-#define APBS_PLL2_SWCR2 0x11c
-#define APBS_PLL2_SWCR3 0x120
-#define APBS_PLL3_SWCR1 0x124
-#define APBS_PLL3_SWCR2 0x128
-#define APBS_PLL3_SWCR3 0x12c
-
-/* MPMU register offset */
-#define MPMU_POSR 0x0010
-#define POSR_PLL1_LOCK BIT(27)
-#define POSR_PLL2_LOCK BIT(28)
-#define POSR_PLL3_LOCK BIT(29)
-#define MPMU_SUCCR 0x0014
-#define MPMU_ISCCR 0x0044
-#define MPMU_WDTPCR 0x0200
-#define MPMU_RIPCCR 0x0210
-#define MPMU_ACGR 0x1024
-#define MPMU_APBCSCR 0x1050
-#define MPMU_SUCCR_1 0x10b0
-
-/* APBC register offset */
-#define APBC_UART1_CLK_RST 0x00
-#define APBC_UART2_CLK_RST 0x04
-#define APBC_GPIO_CLK_RST 0x08
-#define APBC_PWM0_CLK_RST 0x0c
-#define APBC_PWM1_CLK_RST 0x10
-#define APBC_PWM2_CLK_RST 0x14
-#define APBC_PWM3_CLK_RST 0x18
-#define APBC_TWSI8_CLK_RST 0x20
-#define APBC_UART3_CLK_RST 0x24
-#define APBC_RTC_CLK_RST 0x28
-#define APBC_TWSI0_CLK_RST 0x2c
-#define APBC_TWSI1_CLK_RST 0x30
-#define APBC_TIMERS1_CLK_RST 0x34
-#define APBC_TWSI2_CLK_RST 0x38
-#define APBC_AIB_CLK_RST 0x3c
-#define APBC_TWSI4_CLK_RST 0x40
-#define APBC_TIMERS2_CLK_RST 0x44
-#define APBC_ONEWIRE_CLK_RST 0x48
-#define APBC_TWSI5_CLK_RST 0x4c
-#define APBC_DRO_CLK_RST 0x58
-#define APBC_IR_CLK_RST 0x5c
-#define APBC_TWSI6_CLK_RST 0x60
-#define APBC_COUNTER_CLK_SEL 0x64
-#define APBC_TWSI7_CLK_RST 0x68
-#define APBC_TSEN_CLK_RST 0x6c
-#define APBC_UART4_CLK_RST 0x70
-#define APBC_UART5_CLK_RST 0x74
-#define APBC_UART6_CLK_RST 0x78
-#define APBC_SSP3_CLK_RST 0x7c
-#define APBC_SSPA0_CLK_RST 0x80
-#define APBC_SSPA1_CLK_RST 0x84
-#define APBC_IPC_AP2AUD_CLK_RST 0x90
-#define APBC_UART7_CLK_RST 0x94
-#define APBC_UART8_CLK_RST 0x98
-#define APBC_UART9_CLK_RST 0x9c
-#define APBC_CAN0_CLK_RST 0xa0
-#define APBC_PWM4_CLK_RST 0xa8
-#define APBC_PWM5_CLK_RST 0xac
-#define APBC_PWM6_CLK_RST 0xb0
-#define APBC_PWM7_CLK_RST 0xb4
-#define APBC_PWM8_CLK_RST 0xb8
-#define APBC_PWM9_CLK_RST 0xbc
-#define APBC_PWM10_CLK_RST 0xc0
-#define APBC_PWM11_CLK_RST 0xc4
-#define APBC_PWM12_CLK_RST 0xc8
-#define APBC_PWM13_CLK_RST 0xcc
-#define APBC_PWM14_CLK_RST 0xd0
-#define APBC_PWM15_CLK_RST 0xd4
-#define APBC_PWM16_CLK_RST 0xd8
-#define APBC_PWM17_CLK_RST 0xdc
-#define APBC_PWM18_CLK_RST 0xe0
-#define APBC_PWM19_CLK_RST 0xe4
-
-/* APMU register offset */
-#define APMU_JPG_CLK_RES_CTRL 0x020
-#define APMU_CSI_CCIC2_CLK_RES_CTRL 0x024
-#define APMU_ISP_CLK_RES_CTRL 0x038
-#define APMU_LCD_CLK_RES_CTRL1 0x044
-#define APMU_LCD_SPI_CLK_RES_CTRL 0x048
-#define APMU_LCD_CLK_RES_CTRL2 0x04c
-#define APMU_CCIC_CLK_RES_CTRL 0x050
-#define APMU_SDH0_CLK_RES_CTRL 0x054
-#define APMU_SDH1_CLK_RES_CTRL 0x058
-#define APMU_USB_CLK_RES_CTRL 0x05c
-#define APMU_QSPI_CLK_RES_CTRL 0x060
-#define APMU_DMA_CLK_RES_CTRL 0x064
-#define APMU_AES_CLK_RES_CTRL 0x068
-#define APMU_VPU_CLK_RES_CTRL 0x0a4
-#define APMU_GPU_CLK_RES_CTRL 0x0cc
-#define APMU_SDH2_CLK_RES_CTRL 0x0e0
-#define APMU_PMUA_MC_CTRL 0x0e8
-#define APMU_PMU_CC2_AP 0x100
-#define APMU_PMUA_EM_CLK_RES_CTRL 0x104
-#define APMU_AUDIO_CLK_RES_CTRL 0x14c
-#define APMU_HDMI_CLK_RES_CTRL 0x1b8
-#define APMU_CCI550_CLK_CTRL 0x300
-#define APMU_ACLK_CLK_CTRL 0x388
-#define APMU_CPU_C0_CLK_CTRL 0x38C
-#define APMU_CPU_C1_CLK_CTRL 0x390
-#define APMU_PCIE_CLK_RES_CTRL_0 0x3cc
-#define APMU_PCIE_CLK_RES_CTRL_1 0x3d4
-#define APMU_PCIE_CLK_RES_CTRL_2 0x3dc
-#define APMU_EMAC0_CLK_RES_CTRL 0x3e4
-#define APMU_EMAC1_CLK_RES_CTRL 0x3ec
-
struct spacemit_ccu_data {
+ const char *reset_name;
struct clk_hw **hws;
size_t num;
};
+static DEFINE_IDA(auxiliary_ids);
+
/* APBS clocks start, APBS region contains and only contains all PLL clocks */
/*
@@ -170,7 +67,8 @@ CCU_FACTOR_GATE_DEFINE(pll1_d4, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(3), 4,
CCU_FACTOR_GATE_DEFINE(pll1_d5, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(4), 5, 1);
CCU_FACTOR_GATE_DEFINE(pll1_d6, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(5), 6, 1);
CCU_FACTOR_GATE_DEFINE(pll1_d7, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(6), 7, 1);
-CCU_FACTOR_GATE_DEFINE(pll1_d8, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(7), 8, 1);
+CCU_FACTOR_GATE_FLAGS_DEFINE(pll1_d8, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(7), 8, 1,
+ CLK_IS_CRITICAL);
CCU_FACTOR_GATE_DEFINE(pll1_d11_223p4, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(15), 11, 1);
CCU_FACTOR_GATE_DEFINE(pll1_d13_189, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(16), 13, 1);
CCU_FACTOR_GATE_DEFINE(pll1_d23_106p8, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(20), 23, 1);
@@ -238,13 +136,33 @@ CCU_GATE_DEFINE(pll1_d3_819p2, CCU_PARENT_HW(pll1_d3), MPMU_ACGR, BIT(14), 0);
CCU_GATE_DEFINE(pll1_d2_1228p8, CCU_PARENT_HW(pll1_d2), MPMU_ACGR, BIT(16), 0);
CCU_GATE_DEFINE(slow_uart, CCU_PARENT_NAME(osc), MPMU_ACGR, BIT(1), CLK_IGNORE_UNUSED);
-CCU_DDN_DEFINE(slow_uart1_14p74, pll1_d16_153p6, MPMU_SUCCR, 16, 13, 0, 13, 0);
-CCU_DDN_DEFINE(slow_uart2_48, pll1_d4_614p4, MPMU_SUCCR_1, 16, 13, 0, 13, 0);
+CCU_DDN_DEFINE(slow_uart1_14p74, pll1_d16_153p6, MPMU_SUCCR, 16, 13, 0, 13, 2, 0);
+CCU_DDN_DEFINE(slow_uart2_48, pll1_d4_614p4, MPMU_SUCCR_1, 16, 13, 0, 13, 2, 0);
CCU_GATE_DEFINE(wdt_clk, CCU_PARENT_HW(pll1_d96_25p6), MPMU_WDTPCR, BIT(1), 0);
-CCU_FACTOR_GATE_DEFINE(i2s_sysclk, CCU_PARENT_HW(pll1_d16_153p6), MPMU_ISCCR, BIT(31), 50, 1);
-CCU_FACTOR_GATE_DEFINE(i2s_bclk, CCU_PARENT_HW(i2s_sysclk), MPMU_ISCCR, BIT(29), 1, 1);
+CCU_FACTOR_DEFINE(i2s_153p6, CCU_PARENT_HW(pll1_d8_307p2), 2, 1);
+
+static const struct clk_parent_data i2s_153p6_base_parents[] = {
+ CCU_PARENT_HW(i2s_153p6),
+ CCU_PARENT_HW(pll1_d8_307p2),
+};
+CCU_MUX_DEFINE(i2s_153p6_base, i2s_153p6_base_parents, MPMU_FCCR, 29, 1, 0);
+
+static const struct clk_parent_data i2s_sysclk_src_parents[] = {
+ CCU_PARENT_HW(pll1_d96_25p6),
+ CCU_PARENT_HW(i2s_153p6_base)
+};
+CCU_MUX_GATE_DEFINE(i2s_sysclk_src, i2s_sysclk_src_parents, MPMU_ISCCR, 30, 1, BIT(31), 0);
+
+CCU_DDN_DEFINE(i2s_sysclk, i2s_sysclk_src, MPMU_ISCCR, 0, 15, 15, 12, 1, 0);
+
+CCU_FACTOR_DEFINE(i2s_bclk_factor, CCU_PARENT_HW(i2s_sysclk), 2, 1);
+/*
+ * Divider of i2s_bclk always implies a 1/2 factor, which is
+ * described by i2s_bclk_factor.
+ */
+CCU_DIV_GATE_DEFINE(i2s_bclk, CCU_PARENT_HW(i2s_bclk_factor), MPMU_ISCCR, 27, 2, BIT(29), 0);
static const struct clk_parent_data apb_parents[] = {
CCU_PARENT_HW(pll1_d96_25p6),
@@ -349,7 +267,14 @@ CCU_GATE_DEFINE(aib_clk, CCU_PARENT_NAME(vctcxo_24m), APBC_AIB_CLK_RST, BIT(1),
CCU_GATE_DEFINE(onewire_clk, CCU_PARENT_NAME(vctcxo_24m), APBC_ONEWIRE_CLK_RST, BIT(1), 0);
-static const struct clk_parent_data sspa_parents[] = {
+/*
+ * When i2s_bclk is selected as the parent clock of sspa,
+ * the hardware requires bit3 to be set
+ */
+CCU_GATE_DEFINE(sspa0_i2s_bclk, CCU_PARENT_HW(i2s_bclk), APBC_SSPA0_CLK_RST, BIT(3), 0);
+CCU_GATE_DEFINE(sspa1_i2s_bclk, CCU_PARENT_HW(i2s_bclk), APBC_SSPA1_CLK_RST, BIT(3), 0);
+
+static const struct clk_parent_data sspa0_parents[] = {
CCU_PARENT_HW(pll1_d384_6p4),
CCU_PARENT_HW(pll1_d192_12p8),
CCU_PARENT_HW(pll1_d96_25p6),
@@ -357,10 +282,22 @@ static const struct clk_parent_data sspa_parents[] = {
CCU_PARENT_HW(pll1_d768_3p2),
CCU_PARENT_HW(pll1_d1536_1p6),
CCU_PARENT_HW(pll1_d3072_0p8),
- CCU_PARENT_HW(i2s_bclk),
+ CCU_PARENT_HW(sspa0_i2s_bclk),
};
-CCU_MUX_GATE_DEFINE(sspa0_clk, sspa_parents, APBC_SSPA0_CLK_RST, 4, 3, BIT(1), 0);
-CCU_MUX_GATE_DEFINE(sspa1_clk, sspa_parents, APBC_SSPA1_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(sspa0_clk, sspa0_parents, APBC_SSPA0_CLK_RST, 4, 3, BIT(1), 0);
+
+static const struct clk_parent_data sspa1_parents[] = {
+ CCU_PARENT_HW(pll1_d384_6p4),
+ CCU_PARENT_HW(pll1_d192_12p8),
+ CCU_PARENT_HW(pll1_d96_25p6),
+ CCU_PARENT_HW(pll1_d48_51p2),
+ CCU_PARENT_HW(pll1_d768_3p2),
+ CCU_PARENT_HW(pll1_d1536_1p6),
+ CCU_PARENT_HW(pll1_d3072_0p8),
+ CCU_PARENT_HW(sspa1_i2s_bclk),
+};
+CCU_MUX_GATE_DEFINE(sspa1_clk, sspa1_parents, APBC_SSPA1_CLK_RST, 4, 3, BIT(1), 0);
+
CCU_GATE_DEFINE(dro_clk, CCU_PARENT_HW(apb_clk), APBC_DRO_CLK_RST, BIT(1), 0);
CCU_GATE_DEFINE(ir_clk, CCU_PARENT_HW(apb_clk), APBC_IR_CLK_RST, BIT(1), 0);
CCU_GATE_DEFINE(tsen_clk, CCU_PARENT_HW(apb_clk), APBC_TSEN_CLK_RST, BIT(1), 0);
@@ -819,8 +756,9 @@ static struct clk_hw *k1_ccu_pll_hws[] = {
};
static const struct spacemit_ccu_data k1_ccu_pll_data = {
- .hws = k1_ccu_pll_hws,
- .num = ARRAY_SIZE(k1_ccu_pll_hws),
+ /* The PLL CCU implements no resets */
+ .hws = k1_ccu_pll_hws,
+ .num = ARRAY_SIZE(k1_ccu_pll_hws),
};
static struct clk_hw *k1_ccu_mpmu_hws[] = {
@@ -857,11 +795,16 @@ static struct clk_hw *k1_ccu_mpmu_hws[] = {
[CLK_I2S_BCLK] = &i2s_bclk.common.hw,
[CLK_APB] = &apb_clk.common.hw,
[CLK_WDT_BUS] = &wdt_bus_clk.common.hw,
+ [CLK_I2S_153P6] = &i2s_153p6.common.hw,
+ [CLK_I2S_153P6_BASE] = &i2s_153p6_base.common.hw,
+ [CLK_I2S_SYSCLK_SRC] = &i2s_sysclk_src.common.hw,
+ [CLK_I2S_BCLK_FACTOR] = &i2s_bclk_factor.common.hw,
};
static const struct spacemit_ccu_data k1_ccu_mpmu_data = {
- .hws = k1_ccu_mpmu_hws,
- .num = ARRAY_SIZE(k1_ccu_mpmu_hws),
+ .reset_name = "mpmu-reset",
+ .hws = k1_ccu_mpmu_hws,
+ .num = ARRAY_SIZE(k1_ccu_mpmu_hws),
};
static struct clk_hw *k1_ccu_apbc_hws[] = {
@@ -965,11 +908,14 @@ static struct clk_hw *k1_ccu_apbc_hws[] = {
[CLK_SSPA1_BUS] = &sspa1_bus_clk.common.hw,
[CLK_TSEN_BUS] = &tsen_bus_clk.common.hw,
[CLK_IPC_AP2AUD_BUS] = &ipc_ap2aud_bus_clk.common.hw,
+ [CLK_SSPA0_I2S_BCLK] = &sspa0_i2s_bclk.common.hw,
+ [CLK_SSPA1_I2S_BCLK] = &sspa1_i2s_bclk.common.hw,
};
static const struct spacemit_ccu_data k1_ccu_apbc_data = {
- .hws = k1_ccu_apbc_hws,
- .num = ARRAY_SIZE(k1_ccu_apbc_hws),
+ .reset_name = "apbc-reset",
+ .hws = k1_ccu_apbc_hws,
+ .num = ARRAY_SIZE(k1_ccu_apbc_hws),
};
static struct clk_hw *k1_ccu_apmu_hws[] = {
@@ -1038,8 +984,21 @@ static struct clk_hw *k1_ccu_apmu_hws[] = {
};
static const struct spacemit_ccu_data k1_ccu_apmu_data = {
- .hws = k1_ccu_apmu_hws,
- .num = ARRAY_SIZE(k1_ccu_apmu_hws),
+ .reset_name = "apmu-reset",
+ .hws = k1_ccu_apmu_hws,
+ .num = ARRAY_SIZE(k1_ccu_apmu_hws),
+};
+
+static const struct spacemit_ccu_data k1_ccu_rcpu_data = {
+ .reset_name = "rcpu-reset",
+};
+
+static const struct spacemit_ccu_data k1_ccu_rcpu2_data = {
+ .reset_name = "rcpu2-reset",
+};
+
+static const struct spacemit_ccu_data k1_ccu_apbc2_data = {
+ .reset_name = "apbc2-reset",
};
static int spacemit_ccu_register(struct device *dev,
@@ -1050,11 +1009,17 @@ static int spacemit_ccu_register(struct device *dev,
struct clk_hw_onecell_data *clk_data;
int i, ret;
+ /* Nothing to do if the CCU does not implement any clocks */
+ if (!data->hws)
+ return 0;
+
clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, data->num),
GFP_KERNEL);
if (!clk_data)
return -ENOMEM;
+ clk_data->num = data->num;
+
for (i = 0; i < data->num; i++) {
struct clk_hw *hw = data->hws[i];
struct ccu_common *common;
@@ -1081,8 +1046,6 @@ static int spacemit_ccu_register(struct device *dev,
clk_data->hws[i] = hw;
}
- clk_data->num = data->num;
-
ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, clk_data);
if (ret)
dev_err(dev, "failed to add clock hardware provider (%d)\n", ret);
@@ -1090,9 +1053,74 @@ static int spacemit_ccu_register(struct device *dev,
return ret;
}
+static void spacemit_cadev_release(struct device *dev)
+{
+ struct auxiliary_device *adev = to_auxiliary_dev(dev);
+
+ ida_free(&auxiliary_ids, adev->id);
+ kfree(to_spacemit_ccu_adev(adev));
+}
+
+static void spacemit_adev_unregister(void *data)
+{
+ struct auxiliary_device *adev = data;
+
+ auxiliary_device_delete(adev);
+ auxiliary_device_uninit(adev);
+}
+
+static int spacemit_ccu_reset_register(struct device *dev,
+ struct regmap *regmap,
+ const char *reset_name)
+{
+ struct spacemit_ccu_adev *cadev;
+ struct auxiliary_device *adev;
+ int ret;
+
+ /* Nothing to do if the CCU does not implement a reset controller */
+ if (!reset_name)
+ return 0;
+
+ cadev = kzalloc(sizeof(*cadev), GFP_KERNEL);
+ if (!cadev)
+ return -ENOMEM;
+
+ cadev->regmap = regmap;
+
+ adev = &cadev->adev;
+ adev->name = reset_name;
+ adev->dev.parent = dev;
+ adev->dev.release = spacemit_cadev_release;
+ adev->dev.of_node = dev->of_node;
+ ret = ida_alloc(&auxiliary_ids, GFP_KERNEL);
+ if (ret < 0)
+ goto err_free_cadev;
+ adev->id = ret;
+
+ ret = auxiliary_device_init(adev);
+ if (ret)
+ goto err_free_aux_id;
+
+ ret = auxiliary_device_add(adev);
+ if (ret) {
+ auxiliary_device_uninit(adev);
+ return ret;
+ }
+
+ return devm_add_action_or_reset(dev, spacemit_adev_unregister, adev);
+
+err_free_aux_id:
+ ida_free(&auxiliary_ids, adev->id);
+err_free_cadev:
+ kfree(cadev);
+
+ return ret;
+}
+
static int k1_ccu_probe(struct platform_device *pdev)
{
struct regmap *base_regmap, *lock_regmap = NULL;
+ const struct spacemit_ccu_data *data;
struct device *dev = &pdev->dev;
int ret;
@@ -1121,11 +1149,16 @@ static int k1_ccu_probe(struct platform_device *pdev)
"failed to get lock regmap\n");
}
- ret = spacemit_ccu_register(dev, base_regmap, lock_regmap,
- of_device_get_match_data(dev));
+ data = of_device_get_match_data(dev);
+
+ ret = spacemit_ccu_register(dev, base_regmap, lock_regmap, data);
if (ret)
return dev_err_probe(dev, ret, "failed to register clocks\n");
+ ret = spacemit_ccu_reset_register(dev, base_regmap, data->reset_name);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to register resets\n");
+
return 0;
}
@@ -1146,6 +1179,18 @@ static const struct of_device_id of_k1_ccu_match[] = {
.compatible = "spacemit,k1-syscon-apmu",
.data = &k1_ccu_apmu_data,
},
+ {
+ .compatible = "spacemit,k1-syscon-rcpu",
+ .data = &k1_ccu_rcpu_data,
+ },
+ {
+ .compatible = "spacemit,k1-syscon-rcpu2",
+ .data = &k1_ccu_rcpu2_data,
+ },
+ {
+ .compatible = "spacemit,k1-syscon-apbc2",
+ .data = &k1_ccu_apbc2_data,
+ },
{ }
};
MODULE_DEVICE_TABLE(of, of_k1_ccu_match);
diff --git a/drivers/clk/spacemit/ccu_ddn.c b/drivers/clk/spacemit/ccu_ddn.c
index be311b045698..5b16e273bee5 100644
--- a/drivers/clk/spacemit/ccu_ddn.c
+++ b/drivers/clk/spacemit/ccu_ddn.c
@@ -22,30 +22,33 @@
#include "ccu_ddn.h"
-static unsigned long ccu_ddn_calc_rate(unsigned long prate,
- unsigned long num, unsigned long den)
+static unsigned long ccu_ddn_calc_rate(unsigned long prate, unsigned long num,
+ unsigned long den, unsigned int pre_div)
{
- return prate * den / 2 / num;
+ return prate * den / pre_div / num;
}
static unsigned long ccu_ddn_calc_best_rate(struct ccu_ddn *ddn,
unsigned long rate, unsigned long prate,
unsigned long *num, unsigned long *den)
{
- rational_best_approximation(rate, prate / 2,
+ rational_best_approximation(rate, prate / ddn->pre_div,
ddn->den_mask >> ddn->den_shift,
ddn->num_mask >> ddn->num_shift,
den, num);
- return ccu_ddn_calc_rate(prate, *num, *den);
+ return ccu_ddn_calc_rate(prate, *num, *den, ddn->pre_div);
}
-static long ccu_ddn_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int ccu_ddn_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct ccu_ddn *ddn = hw_to_ccu_ddn(hw);
unsigned long num, den;
- return ccu_ddn_calc_best_rate(ddn, rate, *prate, &num, &den);
+ req->rate = ccu_ddn_calc_best_rate(ddn, req->rate,
+ req->best_parent_rate, &num, &den);
+
+ return 0;
}
static unsigned long ccu_ddn_recalc_rate(struct clk_hw *hw, unsigned long prate)
@@ -58,7 +61,7 @@ static unsigned long ccu_ddn_recalc_rate(struct clk_hw *hw, unsigned long prate)
num = (val & ddn->num_mask) >> ddn->num_shift;
den = (val & ddn->den_mask) >> ddn->den_shift;
- return ccu_ddn_calc_rate(prate, num, den);
+ return ccu_ddn_calc_rate(prate, num, den, ddn->pre_div);
}
static int ccu_ddn_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -78,6 +81,6 @@ static int ccu_ddn_set_rate(struct clk_hw *hw, unsigned long rate,
const struct clk_ops spacemit_ccu_ddn_ops = {
.recalc_rate = ccu_ddn_recalc_rate,
- .round_rate = ccu_ddn_round_rate,
+ .determine_rate = ccu_ddn_determine_rate,
.set_rate = ccu_ddn_set_rate,
};
diff --git a/drivers/clk/spacemit/ccu_ddn.h b/drivers/clk/spacemit/ccu_ddn.h
index a52fabe77d62..4838414a8e8d 100644
--- a/drivers/clk/spacemit/ccu_ddn.h
+++ b/drivers/clk/spacemit/ccu_ddn.h
@@ -18,13 +18,14 @@ struct ccu_ddn {
unsigned int num_shift;
unsigned int den_mask;
unsigned int den_shift;
+ unsigned int pre_div;
};
#define CCU_DDN_INIT(_name, _parent, _flags) \
CLK_HW_INIT_HW(#_name, &_parent.common.hw, &spacemit_ccu_ddn_ops, _flags)
#define CCU_DDN_DEFINE(_name, _parent, _reg_ctrl, _num_shift, _num_width, \
- _den_shift, _den_width, _flags) \
+ _den_shift, _den_width, _pre_div, _flags) \
static struct ccu_ddn _name = { \
.common = { \
.reg_ctrl = _reg_ctrl, \
@@ -33,7 +34,8 @@ static struct ccu_ddn _name = { \
.num_mask = GENMASK(_num_shift + _num_width - 1, _num_shift), \
.num_shift = _num_shift, \
.den_mask = GENMASK(_den_shift + _den_width - 1, _den_shift), \
- .den_shift = _den_shift, \
+ .den_shift = _den_shift, \
+ .pre_div = _pre_div, \
}
static inline struct ccu_ddn *hw_to_ccu_ddn(struct clk_hw *hw)
diff --git a/drivers/clk/spacemit/ccu_mix.c b/drivers/clk/spacemit/ccu_mix.c
index 9b852aa61f78..7b7990875372 100644
--- a/drivers/clk/spacemit/ccu_mix.c
+++ b/drivers/clk/spacemit/ccu_mix.c
@@ -80,10 +80,12 @@ static int ccu_mix_trigger_fc(struct clk_hw *hw)
MIX_FC_TIMEOUT_US);
}
-static long ccu_factor_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int ccu_factor_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- return ccu_factor_recalc_rate(hw, *prate);
+ req->rate = ccu_factor_recalc_rate(hw, req->best_parent_rate);
+
+ return 0;
}
static int ccu_factor_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -198,7 +200,7 @@ const struct clk_ops spacemit_ccu_gate_ops = {
};
const struct clk_ops spacemit_ccu_factor_ops = {
- .round_rate = ccu_factor_round_rate,
+ .determine_rate = ccu_factor_determine_rate,
.recalc_rate = ccu_factor_recalc_rate,
.set_rate = ccu_factor_set_rate,
};
@@ -220,7 +222,7 @@ const struct clk_ops spacemit_ccu_factor_gate_ops = {
.enable = ccu_gate_enable,
.is_enabled = ccu_gate_is_enabled,
- .round_rate = ccu_factor_round_rate,
+ .determine_rate = ccu_factor_determine_rate,
.recalc_rate = ccu_factor_recalc_rate,
.set_rate = ccu_factor_set_rate,
};
diff --git a/drivers/clk/spacemit/ccu_mix.h b/drivers/clk/spacemit/ccu_mix.h
index 51d19f5d6aac..c406508e3504 100644
--- a/drivers/clk/spacemit/ccu_mix.h
+++ b/drivers/clk/spacemit/ccu_mix.h
@@ -101,17 +101,22 @@ static struct ccu_mix _name = { \
} \
}
-#define CCU_FACTOR_GATE_DEFINE(_name, _parent, _reg_ctrl, _mask_gate, _div, \
- _mul) \
+#define CCU_FACTOR_GATE_FLAGS_DEFINE(_name, _parent, _reg_ctrl, _mask_gate, _div, \
+ _mul, _flags) \
static struct ccu_mix _name = { \
.gate = CCU_GATE_INIT(_mask_gate), \
.factor = CCU_FACTOR_INIT(_div, _mul), \
.common = { \
.reg_ctrl = _reg_ctrl, \
- CCU_MIX_INITHW(_name, _parent, spacemit_ccu_factor_gate_ops, 0) \
+ CCU_MIX_INITHW(_name, _parent, spacemit_ccu_factor_gate_ops, _flags) \
} \
}
+#define CCU_FACTOR_GATE_DEFINE(_name, _parent, _reg_ctrl, _mask_gate, _div, \
+ _mul) \
+ CCU_FACTOR_GATE_FLAGS_DEFINE(_name, _parent, _reg_ctrl, _mask_gate, _div, \
+ _mul, 0)
+
#define CCU_MUX_GATE_DEFINE(_name, _parents, _reg_ctrl, _shift, _width, \
_mask_gate, _flags) \
static struct ccu_mix _name = { \
@@ -215,4 +220,4 @@ extern const struct clk_ops spacemit_ccu_div_gate_ops;
extern const struct clk_ops spacemit_ccu_mux_gate_ops;
extern const struct clk_ops spacemit_ccu_mux_div_ops;
extern const struct clk_ops spacemit_ccu_mux_div_gate_ops;
-#endif /* _CCU_DIV_H_ */
+#endif /* _CCU_MIX_H_ */
diff --git a/drivers/clk/spacemit/ccu_pll.c b/drivers/clk/spacemit/ccu_pll.c
index 4427dcfbbb97..d92f0dae65a4 100644
--- a/drivers/clk/spacemit/ccu_pll.c
+++ b/drivers/clk/spacemit/ccu_pll.c
@@ -122,15 +122,17 @@ static unsigned long ccu_pll_recalc_rate(struct clk_hw *hw,
WARN_ON_ONCE(!entry);
- return entry ? entry->rate : -EINVAL;
+ return entry ? entry->rate : 0;
}
-static long ccu_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int ccu_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct ccu_pll *pll = hw_to_ccu_pll(hw);
- return ccu_pll_lookup_best_rate(pll, rate)->rate;
+ req->rate = ccu_pll_lookup_best_rate(pll, req->rate)->rate;
+
+ return 0;
}
static int ccu_pll_init(struct clk_hw *hw)
@@ -152,6 +154,6 @@ const struct clk_ops spacemit_ccu_pll_ops = {
.disable = ccu_pll_disable,
.set_rate = ccu_pll_set_rate,
.recalc_rate = ccu_pll_recalc_rate,
- .round_rate = ccu_pll_round_rate,
+ .determine_rate = ccu_pll_determine_rate,
.is_enabled = ccu_pll_is_enabled,
};
diff --git a/drivers/clk/spear/clk-aux-synth.c b/drivers/clk/spear/clk-aux-synth.c
index 637938e804f8..d0d063147af8 100644
--- a/drivers/clk/spear/clk-aux-synth.c
+++ b/drivers/clk/spear/clk-aux-synth.c
@@ -49,14 +49,16 @@ static unsigned long aux_calc_rate(struct clk_hw *hw, unsigned long prate,
(rtbl[index].yscale * eq)) * 10000;
}
-static long clk_aux_round_rate(struct clk_hw *hw, unsigned long drate,
- unsigned long *prate)
+static int clk_aux_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_aux *aux = to_clk_aux(hw);
int unused;
- return clk_round_rate_index(hw, drate, *prate, aux_calc_rate,
- aux->rtbl_cnt, &unused);
+ req->rate = clk_round_rate_index(hw, req->rate, req->best_parent_rate,
+ aux_calc_rate, aux->rtbl_cnt, &unused);
+
+ return 0;
}
static unsigned long clk_aux_recalc_rate(struct clk_hw *hw,
@@ -127,7 +129,7 @@ static int clk_aux_set_rate(struct clk_hw *hw, unsigned long drate,
static const struct clk_ops clk_aux_ops = {
.recalc_rate = clk_aux_recalc_rate,
- .round_rate = clk_aux_round_rate,
+ .determine_rate = clk_aux_determine_rate,
.set_rate = clk_aux_set_rate,
};
diff --git a/drivers/clk/spear/clk-frac-synth.c b/drivers/clk/spear/clk-frac-synth.c
index 2380df293a2c..150f051d28e0 100644
--- a/drivers/clk/spear/clk-frac-synth.c
+++ b/drivers/clk/spear/clk-frac-synth.c
@@ -52,14 +52,16 @@ static unsigned long frac_calc_rate(struct clk_hw *hw, unsigned long prate,
return prate;
}
-static long clk_frac_round_rate(struct clk_hw *hw, unsigned long drate,
- unsigned long *prate)
+static int clk_frac_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_frac *frac = to_clk_frac(hw);
int unused;
- return clk_round_rate_index(hw, drate, *prate, frac_calc_rate,
- frac->rtbl_cnt, &unused);
+ req->rate = clk_round_rate_index(hw, req->rate, req->best_parent_rate,
+ frac_calc_rate, frac->rtbl_cnt, &unused);
+
+ return 0;
}
static unsigned long clk_frac_recalc_rate(struct clk_hw *hw,
@@ -115,7 +117,7 @@ static int clk_frac_set_rate(struct clk_hw *hw, unsigned long drate,
static const struct clk_ops clk_frac_ops = {
.recalc_rate = clk_frac_recalc_rate,
- .round_rate = clk_frac_round_rate,
+ .determine_rate = clk_frac_determine_rate,
.set_rate = clk_frac_set_rate,
};
diff --git a/drivers/clk/spear/clk-gpt-synth.c b/drivers/clk/spear/clk-gpt-synth.c
index 4ef747c2abbb..cf9659dc9073 100644
--- a/drivers/clk/spear/clk-gpt-synth.c
+++ b/drivers/clk/spear/clk-gpt-synth.c
@@ -39,14 +39,16 @@ static unsigned long gpt_calc_rate(struct clk_hw *hw, unsigned long prate,
return prate;
}
-static long clk_gpt_round_rate(struct clk_hw *hw, unsigned long drate,
- unsigned long *prate)
+static int clk_gpt_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_gpt *gpt = to_clk_gpt(hw);
int unused;
- return clk_round_rate_index(hw, drate, *prate, gpt_calc_rate,
- gpt->rtbl_cnt, &unused);
+ req->rate = clk_round_rate_index(hw, req->rate, req->best_parent_rate,
+ gpt_calc_rate, gpt->rtbl_cnt, &unused);
+
+ return 0;
}
static unsigned long clk_gpt_recalc_rate(struct clk_hw *hw,
@@ -104,7 +106,7 @@ static int clk_gpt_set_rate(struct clk_hw *hw, unsigned long drate,
static const struct clk_ops clk_gpt_ops = {
.recalc_rate = clk_gpt_recalc_rate,
- .round_rate = clk_gpt_round_rate,
+ .determine_rate = clk_gpt_determine_rate,
.set_rate = clk_gpt_set_rate,
};
diff --git a/drivers/clk/spear/clk-vco-pll.c b/drivers/clk/spear/clk-vco-pll.c
index 348eeab0a906..723a6eb67754 100644
--- a/drivers/clk/spear/clk-vco-pll.c
+++ b/drivers/clk/spear/clk-vco-pll.c
@@ -110,12 +110,15 @@ static long clk_pll_round_rate_index(struct clk_hw *hw, unsigned long drate,
return rate;
}
-static long clk_pll_round_rate(struct clk_hw *hw, unsigned long drate,
- unsigned long *prate)
+static int clk_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
int unused;
- return clk_pll_round_rate_index(hw, drate, prate, &unused);
+ req->rate = clk_pll_round_rate_index(hw, req->rate,
+ &req->best_parent_rate, &unused);
+
+ return 0;
}
static unsigned long clk_pll_recalc_rate(struct clk_hw *hw, unsigned long
@@ -164,7 +167,7 @@ static int clk_pll_set_rate(struct clk_hw *hw, unsigned long drate,
static const struct clk_ops clk_pll_ops = {
.recalc_rate = clk_pll_recalc_rate,
- .round_rate = clk_pll_round_rate,
+ .determine_rate = clk_pll_determine_rate,
.set_rate = clk_pll_set_rate,
};
@@ -176,14 +179,16 @@ static inline unsigned long vco_calc_rate(struct clk_hw *hw,
return pll_calc_rate(vco->rtbl, prate, index, NULL);
}
-static long clk_vco_round_rate(struct clk_hw *hw, unsigned long drate,
- unsigned long *prate)
+static int clk_vco_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_vco *vco = to_clk_vco(hw);
int unused;
- return clk_round_rate_index(hw, drate, *prate, vco_calc_rate,
- vco->rtbl_cnt, &unused);
+ req->rate = clk_round_rate_index(hw, req->rate, req->best_parent_rate,
+ vco_calc_rate, vco->rtbl_cnt, &unused);
+
+ return 0;
}
static unsigned long clk_vco_recalc_rate(struct clk_hw *hw,
@@ -265,7 +270,7 @@ static int clk_vco_set_rate(struct clk_hw *hw, unsigned long drate,
static const struct clk_ops clk_vco_ops = {
.recalc_rate = clk_vco_recalc_rate,
- .round_rate = clk_vco_round_rate,
+ .determine_rate = clk_vco_determine_rate,
.set_rate = clk_vco_set_rate,
};
diff --git a/drivers/clk/spear/spear1340_clock.c b/drivers/clk/spear/spear1340_clock.c
index 361d344bfaf0..fdfb26c67188 100644
--- a/drivers/clk/spear/spear1340_clock.c
+++ b/drivers/clk/spear/spear1340_clock.c
@@ -199,7 +199,7 @@ static struct frac_rate_tbl amba_synth_rtbl[] = {
* We can program this synthesizer to make cpu run on different clock
* frequencies.
* Following table provides configuration values to let cpu run on 200,
- * 250, 332, 400 or 500 MHz considering different possibilites of input
+ * 250, 332, 400 or 500 MHz considering different possibilities of input
* (vco1div2) clock.
*
* --------------------------------------------------------------------
diff --git a/drivers/clk/sprd/div.c b/drivers/clk/sprd/div.c
index 936782c24127..013423881968 100644
--- a/drivers/clk/sprd/div.c
+++ b/drivers/clk/sprd/div.c
@@ -9,13 +9,16 @@
#include "div.h"
-static long sprd_div_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int sprd_div_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct sprd_div *cd = hw_to_sprd_div(hw);
- return divider_round_rate(&cd->common.hw, rate, parent_rate, NULL,
- cd->div.width, 0);
+ req->rate = divider_round_rate(&cd->common.hw, req->rate,
+ &req->best_parent_rate,
+ NULL, cd->div.width, 0);
+
+ return 0;
}
unsigned long sprd_div_helper_recalc_rate(struct sprd_clk_common *common,
@@ -75,7 +78,7 @@ static int sprd_div_set_rate(struct clk_hw *hw, unsigned long rate,
const struct clk_ops sprd_div_ops = {
.recalc_rate = sprd_div_recalc_rate,
- .round_rate = sprd_div_round_rate,
+ .determine_rate = sprd_div_determine_rate,
.set_rate = sprd_div_set_rate,
};
EXPORT_SYMBOL_GPL(sprd_div_ops);
diff --git a/drivers/clk/sprd/gate.h b/drivers/clk/sprd/gate.h
index e738dafa4fe9..775519eb1cb6 100644
--- a/drivers/clk/sprd/gate.h
+++ b/drivers/clk/sprd/gate.h
@@ -26,7 +26,7 @@ struct sprd_gate {
* CLK_GATE_BIG_ENDIAN BIT(2)
* so we define new flags from BIT(3)
*/
-#define SPRD_GATE_NON_AON BIT(3) /* not alway powered on, check before read */
+#define SPRD_GATE_NON_AON BIT(3) /* not always powered on, check before read */
#define SPRD_SC_GATE_CLK_HW_INIT_FN(_struct, _name, _parent, _reg, \
_sc_offset, _enable_mask, _flags, \
diff --git a/drivers/clk/sprd/pll.c b/drivers/clk/sprd/pll.c
index 13a322b2535a..bc6610d5fcb7 100644
--- a/drivers/clk/sprd/pll.c
+++ b/drivers/clk/sprd/pll.c
@@ -254,16 +254,16 @@ static int sprd_pll_clk_prepare(struct clk_hw *hw)
return 0;
}
-static long sprd_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int sprd_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- return rate;
+ return 0;
}
const struct clk_ops sprd_pll_ops = {
.prepare = sprd_pll_clk_prepare,
.recalc_rate = sprd_pll_recalc_rate,
- .round_rate = sprd_pll_round_rate,
+ .determine_rate = sprd_pll_determine_rate,
.set_rate = sprd_pll_set_rate,
};
EXPORT_SYMBOL_GPL(sprd_pll_ops);
diff --git a/drivers/clk/sprd/sc9860-clk.c b/drivers/clk/sprd/sc9860-clk.c
index cc5ed2dd8267..d7fe924fbe97 100644
--- a/drivers/clk/sprd/sc9860-clk.c
+++ b/drivers/clk/sprd/sc9860-clk.c
@@ -2021,17 +2021,13 @@ MODULE_DEVICE_TABLE(of, sprd_sc9860_clk_ids);
static int sc9860_clk_probe(struct platform_device *pdev)
{
- const struct of_device_id *match;
const struct sprd_clk_desc *desc;
int ret;
- match = of_match_node(sprd_sc9860_clk_ids, pdev->dev.of_node);
- if (!match) {
- pr_err("%s: of_match_node() failed", __func__);
+ desc = device_get_match_data(&pdev->dev);
+ if (!desc)
return -ENODEV;
- }
- desc = match->data;
ret = sprd_clk_regmap_init(pdev, desc);
if (ret)
return ret;
diff --git a/drivers/clk/sprd/ums512-clk.c b/drivers/clk/sprd/ums512-clk.c
index 9384ecc6c741..f763d83de9ee 100644
--- a/drivers/clk/sprd/ums512-clk.c
+++ b/drivers/clk/sprd/ums512-clk.c
@@ -1550,7 +1550,7 @@ static struct sprd_clk_desc ums512_aon_gate_desc = {
/* audcp apb gates */
/* Audcp apb clocks configure CLK_IGNORE_UNUSED because these clocks may be
- * controlled by audcp sys at the same time. It may be cause an execption if
+ * controlled by audcp sys at the same time. It may cause an exception if
* kernel gates these clock.
*/
static SPRD_SC_GATE_CLK_HW(audcp_wdg_eb, "audcp-wdg-eb",
@@ -1592,7 +1592,7 @@ static const struct sprd_clk_desc ums512_audcpapb_gate_desc = {
/* audcp ahb gates */
/* Audcp aphb clocks configure CLK_IGNORE_UNUSED because these clocks may be
- * controlled by audcp sys at the same time. It may be cause an execption if
+ * controlled by audcp sys at the same time. It may cause an exception if
* kernel gates these clock.
*/
static SPRD_SC_GATE_CLK_HW(audcp_iis0_eb, "audcp-iis0-eb",
diff --git a/drivers/clk/st/clk-flexgen.c b/drivers/clk/st/clk-flexgen.c
index 5292208c4dd8..e8e7626c76db 100644
--- a/drivers/clk/st/clk-flexgen.c
+++ b/drivers/clk/st/clk-flexgen.c
@@ -303,16 +303,6 @@ static const struct clkgen_data clkgen_video = {
.mode = 1,
};
-static const struct clkgen_clk_out clkgen_stih407_a0_clk_out[] = {
- /* This clk needs to be on so that memory interface is accessible */
- { .name = "clk-ic-lmi0", .flags = CLK_IS_CRITICAL },
-};
-
-static const struct clkgen_data clkgen_stih407_a0 = {
- .outputs = clkgen_stih407_a0_clk_out,
- .outputs_nb = ARRAY_SIZE(clkgen_stih407_a0_clk_out),
-};
-
static const struct clkgen_clk_out clkgen_stih410_a0_clk_out[] = {
/* Those clks need to be on so that memory interface is accessible */
{ .name = "clk-ic-lmi0", .flags = CLK_IS_CRITICAL },
@@ -324,51 +314,6 @@ static const struct clkgen_data clkgen_stih410_a0 = {
.outputs_nb = ARRAY_SIZE(clkgen_stih410_a0_clk_out),
};
-static const struct clkgen_clk_out clkgen_stih407_c0_clk_out[] = {
- { .name = "clk-icn-gpu", },
- { .name = "clk-fdma", },
- { .name = "clk-nand", },
- { .name = "clk-hva", },
- { .name = "clk-proc-stfe", },
- { .name = "clk-proc-tp", },
- { .name = "clk-rx-icn-dmu", },
- { .name = "clk-rx-icn-hva", },
- /* This clk needs to be on to keep bus interconnect alive */
- { .name = "clk-icn-cpu", .flags = CLK_IS_CRITICAL },
- /* This clk needs to be on to keep bus interconnect alive */
- { .name = "clk-tx-icn-dmu", .flags = CLK_IS_CRITICAL },
- { .name = "clk-mmc-0", },
- { .name = "clk-mmc-1", },
- { .name = "clk-jpegdec", },
- /* This clk needs to be on to keep A9 running */
- { .name = "clk-ext2fa9", .flags = CLK_IS_CRITICAL },
- { .name = "clk-ic-bdisp-0", },
- { .name = "clk-ic-bdisp-1", },
- { .name = "clk-pp-dmu", },
- { .name = "clk-vid-dmu", },
- { .name = "clk-dss-lpc", },
- { .name = "clk-st231-aud-0", },
- { .name = "clk-st231-gp-1", },
- { .name = "clk-st231-dmu", },
- /* This clk needs to be on to keep bus interconnect alive */
- { .name = "clk-icn-lmi", .flags = CLK_IS_CRITICAL },
- { .name = "clk-tx-icn-disp-1", },
- /* This clk needs to be on to keep bus interconnect alive */
- { .name = "clk-icn-sbc", .flags = CLK_IS_CRITICAL },
- { .name = "clk-stfe-frc2", },
- { .name = "clk-eth-phy", },
- { .name = "clk-eth-ref-phyclk", },
- { .name = "clk-flash-promip", },
- { .name = "clk-main-disp", },
- { .name = "clk-aux-disp", },
- { .name = "clk-compo-dvp", },
-};
-
-static const struct clkgen_data clkgen_stih407_c0 = {
- .outputs = clkgen_stih407_c0_clk_out,
- .outputs_nb = ARRAY_SIZE(clkgen_stih407_c0_clk_out),
-};
-
static const struct clkgen_clk_out clkgen_stih410_c0_clk_out[] = {
{ .name = "clk-icn-gpu", },
{ .name = "clk-fdma", },
@@ -482,19 +427,6 @@ static const struct clkgen_data clkgen_stih418_c0 = {
.outputs_nb = ARRAY_SIZE(clkgen_stih418_c0_clk_out),
};
-static const struct clkgen_clk_out clkgen_stih407_d0_clk_out[] = {
- { .name = "clk-pcm-0", },
- { .name = "clk-pcm-1", },
- { .name = "clk-pcm-2", },
- { .name = "clk-spdiff", },
-};
-
-static const struct clkgen_data clkgen_stih407_d0 = {
- .flags = CLK_SET_RATE_PARENT,
- .outputs = clkgen_stih407_d0_clk_out,
- .outputs_nb = ARRAY_SIZE(clkgen_stih407_d0_clk_out),
-};
-
static const struct clkgen_clk_out clkgen_stih410_d0_clk_out[] = {
{ .name = "clk-pcm-0", },
{ .name = "clk-pcm-1", },
@@ -597,18 +529,10 @@ static const struct of_device_id flexgen_of_match[] = {
.data = &clkgen_video,
},
{
- .compatible = "st,flexgen-stih407-a0",
- .data = &clkgen_stih407_a0,
- },
- {
.compatible = "st,flexgen-stih410-a0",
.data = &clkgen_stih410_a0,
},
{
- .compatible = "st,flexgen-stih407-c0",
- .data = &clkgen_stih407_c0,
- },
- {
.compatible = "st,flexgen-stih410-c0",
.data = &clkgen_stih410_c0,
},
@@ -617,10 +541,6 @@ static const struct of_device_id flexgen_of_match[] = {
.data = &clkgen_stih418_c0,
},
{
- .compatible = "st,flexgen-stih407-d0",
- .data = &clkgen_stih407_d0,
- },
- {
.compatible = "st,flexgen-stih410-d0",
.data = &clkgen_stih410_d0,
},
diff --git a/drivers/clk/st/clkgen-fsyn.c b/drivers/clk/st/clkgen-fsyn.c
index 40df1db102a7..e06e7e5cc1a5 100644
--- a/drivers/clk/st/clkgen-fsyn.c
+++ b/drivers/clk/st/clkgen-fsyn.c
@@ -375,22 +375,21 @@ static int clk_fs660c32_vco_get_params(unsigned long input,
return 0;
}
-static long quadfs_pll_fs660c32_round_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long *prate)
+static int quadfs_pll_fs660c32_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct stm_fs params;
- if (clk_fs660c32_vco_get_params(*prate, rate, &params))
- return rate;
+ if (clk_fs660c32_vco_get_params(req->best_parent_rate, req->rate, &params))
+ return 0;
- clk_fs660c32_vco_get_rate(*prate, &params, &rate);
+ clk_fs660c32_vco_get_rate(req->best_parent_rate, &params, &req->rate);
pr_debug("%s: %s new rate %ld [ndiv=%u]\n",
__func__, clk_hw_get_name(hw),
- rate, (unsigned int)params.ndiv);
+ req->rate, (unsigned int)params.ndiv);
- return rate;
+ return 0;
}
static int quadfs_pll_fs660c32_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -436,7 +435,7 @@ static const struct clk_ops st_quadfs_pll_c32_ops = {
.disable = quadfs_pll_disable,
.is_enabled = quadfs_pll_is_enabled,
.recalc_rate = quadfs_pll_fs660c32_recalc_rate,
- .round_rate = quadfs_pll_fs660c32_round_rate,
+ .determine_rate = quadfs_pll_fs660c32_determine_rate,
.set_rate = quadfs_pll_fs660c32_set_rate,
};
@@ -814,19 +813,21 @@ static unsigned long quadfs_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long quadfs_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int quadfs_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct stm_fs params;
- rate = quadfs_find_best_rate(hw, rate, *prate, &params);
+ req->rate = quadfs_find_best_rate(hw, req->rate,
+ req->best_parent_rate, &params);
pr_debug("%s: %s new rate %ld [sdiv=0x%x,md=0x%x,pe=0x%x,nsdiv3=%u]\n",
__func__, clk_hw_get_name(hw),
- rate, (unsigned int)params.sdiv, (unsigned int)params.mdiv,
- (unsigned int)params.pe, (unsigned int)params.nsdiv);
+ req->rate, (unsigned int)params.sdiv,
+ (unsigned int)params.mdiv,
+ (unsigned int)params.pe, (unsigned int)params.nsdiv);
- return rate;
+ return 0;
}
@@ -873,7 +874,7 @@ static const struct clk_ops st_quadfs_ops = {
.enable = quadfs_fsynth_enable,
.disable = quadfs_fsynth_disable,
.is_enabled = quadfs_fsynth_is_enabled,
- .round_rate = quadfs_round_rate,
+ .determine_rate = quadfs_determine_rate,
.set_rate = quadfs_set_rate,
.recalc_rate = quadfs_recalc_rate,
};
diff --git a/drivers/clk/st/clkgen-pll.c b/drivers/clk/st/clkgen-pll.c
index b36e4d803636..c258ff87a171 100644
--- a/drivers/clk/st/clkgen-pll.c
+++ b/drivers/clk/st/clkgen-pll.c
@@ -395,25 +395,28 @@ static unsigned long recalc_stm_pll3200c32(struct clk_hw *hw,
return rate;
}
-static long round_rate_stm_pll3200c32(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int stm_pll3200c32_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct stm_pll params;
- if (!clk_pll3200c32_get_params(*prate, rate, &params))
- clk_pll3200c32_get_rate(*prate, &params, &rate);
+ if (!clk_pll3200c32_get_params(req->best_parent_rate, req->rate, &params))
+ clk_pll3200c32_get_rate(req->best_parent_rate, &params,
+ &req->rate);
else {
pr_debug("%s: %s rate %ld Invalid\n", __func__,
- __clk_get_name(hw->clk), rate);
+ __clk_get_name(hw->clk), req->rate);
+ req->rate = 0;
+
return 0;
}
pr_debug("%s: %s new rate %ld [ndiv=%u] [idf=%u]\n",
__func__, __clk_get_name(hw->clk),
- rate, (unsigned int)params.ndiv,
+ req->rate, (unsigned int)params.ndiv,
(unsigned int)params.idf);
- return rate;
+ return 0;
}
static int set_rate_stm_pll3200c32(struct clk_hw *hw, unsigned long rate,
@@ -549,25 +552,28 @@ static unsigned long recalc_stm_pll4600c28(struct clk_hw *hw,
return rate;
}
-static long round_rate_stm_pll4600c28(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int stm_pll4600c28_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct stm_pll params;
- if (!clk_pll4600c28_get_params(*prate, rate, &params)) {
- clk_pll4600c28_get_rate(*prate, &params, &rate);
+ if (!clk_pll4600c28_get_params(req->best_parent_rate, req->rate, &params)) {
+ clk_pll4600c28_get_rate(req->best_parent_rate, &params,
+ &req->rate);
} else {
pr_debug("%s: %s rate %ld Invalid\n", __func__,
- __clk_get_name(hw->clk), rate);
+ __clk_get_name(hw->clk), req->rate);
+ req->rate = 0;
+
return 0;
}
pr_debug("%s: %s new rate %ld [ndiv=%u] [idf=%u]\n",
__func__, __clk_get_name(hw->clk),
- rate, (unsigned int)params.ndiv,
+ req->rate, (unsigned int)params.ndiv,
(unsigned int)params.idf);
- return rate;
+ return 0;
}
static int set_rate_stm_pll4600c28(struct clk_hw *hw, unsigned long rate,
@@ -628,7 +634,7 @@ static const struct clk_ops stm_pll3200c32_a9_ops = {
.disable = clkgen_pll_disable,
.is_enabled = clkgen_pll_is_enabled,
.recalc_rate = recalc_stm_pll3200c32,
- .round_rate = round_rate_stm_pll3200c32,
+ .determine_rate = stm_pll3200c32_determine_rate,
.set_rate = set_rate_stm_pll3200c32,
};
@@ -637,7 +643,7 @@ static const struct clk_ops stm_pll4600c28_ops = {
.disable = clkgen_pll_disable,
.is_enabled = clkgen_pll_is_enabled,
.recalc_rate = recalc_stm_pll4600c28,
- .round_rate = round_rate_stm_pll4600c28,
+ .determine_rate = stm_pll4600c28_determine_rate,
.set_rate = set_rate_stm_pll4600c28,
};
diff --git a/drivers/clk/starfive/clk-starfive-jh7110-sys.c b/drivers/clk/starfive/clk-starfive-jh7110-sys.c
index e9d8168d02b8..52833d4241c5 100644
--- a/drivers/clk/starfive/clk-starfive-jh7110-sys.c
+++ b/drivers/clk/starfive/clk-starfive-jh7110-sys.c
@@ -376,7 +376,7 @@ EXPORT_SYMBOL_GPL(jh7110_reset_controller_register);
/*
* This clock notifier is called when the rate of PLL0 clock is to be changed.
- * The cpu_root clock should save the curent parent clock and switch its parent
+ * The cpu_root clock should save the current parent clock and switch its parent
* clock to osc before PLL0 rate will be changed. Then switch its parent clock
* back after the PLL0 rate is completed.
*/
diff --git a/drivers/clk/stm32/Kconfig b/drivers/clk/stm32/Kconfig
index dca409d52652..5dbd75cde657 100644
--- a/drivers/clk/stm32/Kconfig
+++ b/drivers/clk/stm32/Kconfig
@@ -4,7 +4,7 @@
menuconfig COMMON_CLK_STM32MP
bool "Clock support for common STM32MP clocks"
depends on ARCH_STM32 || COMPILE_TEST
- default y
+ default ARCH_STM32
select RESET_CONTROLLER
help
Support for STM32MP SoC family clocks.
@@ -14,21 +14,28 @@ if COMMON_CLK_STM32MP
config COMMON_CLK_STM32MP135
bool "Clock driver for stm32mp13x clocks"
depends on ARM || COMPILE_TEST
- default y
+ default ARCH_STM32
help
Support for stm32mp13x SoC family clocks.
config COMMON_CLK_STM32MP157
bool "Clock driver for stm32mp15x clocks"
depends on ARM || COMPILE_TEST
- default y
+ default ARCH_STM32
help
Support for stm32mp15x SoC family clocks.
+config COMMON_CLK_STM32MP215
+ bool "Clock driver for stm32mp21x clocks"
+ depends on ARM || ARM64 || COMPILE_TEST
+ default y
+ help
+ Support for stm32mp21x SoC family clocks
+
config COMMON_CLK_STM32MP257
bool "Clock driver for stm32mp25x clocks"
depends on ARM64 || COMPILE_TEST
- default y
+ default ARCH_STM32
help
Support for stm32mp25x SoC family clocks.
diff --git a/drivers/clk/stm32/Makefile b/drivers/clk/stm32/Makefile
index 0a627164fcce..e04727b59449 100644
--- a/drivers/clk/stm32/Makefile
+++ b/drivers/clk/stm32/Makefile
@@ -1,3 +1,4 @@
obj-$(CONFIG_COMMON_CLK_STM32MP135) += clk-stm32mp13.o clk-stm32-core.o reset-stm32.o
obj-$(CONFIG_COMMON_CLK_STM32MP157) += clk-stm32mp1.o reset-stm32.o
+obj-$(CONFIG_COMMON_CLK_STM32MP215) += clk-stm32mp21.o clk-stm32-core.o reset-stm32.o
obj-$(CONFIG_COMMON_CLK_STM32MP257) += clk-stm32mp25.o clk-stm32-core.o reset-stm32.o
diff --git a/drivers/clk/stm32/clk-stm32-core.c b/drivers/clk/stm32/clk-stm32-core.c
index 933e3cde0795..72825b9c36a4 100644
--- a/drivers/clk/stm32/clk-stm32-core.c
+++ b/drivers/clk/stm32/clk-stm32-core.c
@@ -351,14 +351,14 @@ static int clk_stm32_divider_set_rate(struct clk_hw *hw, unsigned long rate,
return ret;
}
-static long clk_stm32_divider_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_stm32_divider_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_stm32_div *div = to_clk_stm32_divider(hw);
const struct stm32_div_cfg *divider;
if (div->div_id == NO_STM32_DIV)
- return rate;
+ return 0;
divider = &div->clock_data->dividers[div->div_id];
@@ -369,14 +369,22 @@ static long clk_stm32_divider_round_rate(struct clk_hw *hw, unsigned long rate,
val = readl(div->base + divider->offset) >> divider->shift;
val &= clk_div_mask(divider->width);
- return divider_ro_round_rate(hw, rate, prate, divider->table,
- divider->width, divider->flags,
- val);
+ req->rate = divider_ro_round_rate(hw, req->rate,
+ &req->best_parent_rate,
+ divider->table,
+ divider->width,
+ divider->flags, val);
+
+ return 0;
}
- return divider_round_rate_parent(hw, clk_hw_get_parent(hw),
- rate, prate, divider->table,
- divider->width, divider->flags);
+ req->rate = divider_round_rate_parent(hw, clk_hw_get_parent(hw),
+ req->rate,
+ &req->best_parent_rate,
+ divider->table,
+ divider->width, divider->flags);
+
+ return 0;
}
static unsigned long clk_stm32_divider_recalc_rate(struct clk_hw *hw,
@@ -392,7 +400,7 @@ static unsigned long clk_stm32_divider_recalc_rate(struct clk_hw *hw,
const struct clk_ops clk_stm32_divider_ops = {
.recalc_rate = clk_stm32_divider_recalc_rate,
- .round_rate = clk_stm32_divider_round_rate,
+ .determine_rate = clk_stm32_divider_determine_rate,
.set_rate = clk_stm32_divider_set_rate,
};
diff --git a/drivers/clk/stm32/clk-stm32mp1.c b/drivers/clk/stm32/clk-stm32mp1.c
index 5fcc4c77c11f..2d9ccd96ec98 100644
--- a/drivers/clk/stm32/clk-stm32mp1.c
+++ b/drivers/clk/stm32/clk-stm32mp1.c
@@ -970,12 +970,15 @@ static unsigned long __bestmult(struct clk_hw *hw, unsigned long rate,
return mult;
}
-static long timer_ker_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int timer_ker_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- unsigned long factor = __bestmult(hw, rate, *parent_rate);
+ unsigned long factor = __bestmult(hw, req->rate,
+ req->best_parent_rate);
- return *parent_rate * factor;
+ req->rate = req->best_parent_rate * factor;
+
+ return 0;
}
static int timer_ker_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -1026,7 +1029,7 @@ static unsigned long timer_ker_recalc_rate(struct clk_hw *hw,
static const struct clk_ops timer_ker_ops = {
.recalc_rate = timer_ker_recalc_rate,
- .round_rate = timer_ker_round_rate,
+ .determine_rate = timer_ker_determine_rate,
.set_rate = timer_ker_set_rate,
};
@@ -2041,7 +2044,7 @@ static const struct clock_config stm32mp1_clock_cfg[] = {
KCLK(ADFSDM_K, "adfsdm_k", sai_src, 0, G_ADFSDM, M_SAI1),
KCLK(USBO_K, "usbo_k", usbo_src, 0, G_USBO, M_USBO),
- /* Particulary Kernel Clocks (no mux or no gate) */
+ /* Particularly Kernel Clocks (no mux or no gate) */
MGATE_MP1(DFSDM_K, "dfsdm_k", "ck_mcu", 0, G_DFSDM),
MGATE_MP1(DSI_PX, "dsi_px", "pll4_q", CLK_SET_RATE_PARENT, G_DSI),
MGATE_MP1(LTDC_PX, "ltdc_px", "pll4_q", CLK_SET_RATE_PARENT, G_LTDC),
diff --git a/drivers/clk/stm32/clk-stm32mp21.c b/drivers/clk/stm32/clk-stm32mp21.c
new file mode 100644
index 000000000000..c8a37b716bd5
--- /dev/null
+++ b/drivers/clk/stm32/clk-stm32mp21.c
@@ -0,0 +1,1586 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) STMicroelectronics 2023 - All Rights Reserved
+ * Author: Gabriel Fernandez <gabriel.fernandez@foss.st.com> for STMicroelectronics.
+ */
+
+#include <linux/bus/stm32_firewall_device.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+
+#include "clk-stm32-core.h"
+#include "reset-stm32.h"
+#include "stm32mp21_rcc.h"
+
+#include <dt-bindings/clock/st,stm32mp21-rcc.h>
+#include <dt-bindings/reset/st,stm32mp21-rcc.h>
+
+/* Max clock binding value */
+#define STM32MP21_LAST_CLK CK_SCMI_KER_ETR
+
+/* Clock security definition */
+#define SECF_NONE -1
+
+#define RCC_REG_SIZE 32
+#define RCC_SECCFGR(x) (((x) / RCC_REG_SIZE) * 0x4 + RCC_SECCFGR0)
+#define RCC_CIDCFGR(x) ((x) * 0x8 + RCC_R0CIDCFGR)
+#define RCC_SEMCR(x) ((x) * 0x8 + RCC_R0SEMCR)
+#define RCC_CID1 1
+
+/* Register: RIFSC_CIDCFGR */
+#define RCC_CIDCFGR_CFEN BIT(0)
+#define RCC_CIDCFGR_SEM_EN BIT(1)
+#define RCC_CIDCFGR_SEMWLC1_EN BIT(17)
+#define RCC_CIDCFGR_SCID_MASK GENMASK(6, 4)
+
+/* Register: RIFSC_SEMCR */
+#define RCC_SEMCR_SEMCID_MASK GENMASK(6, 4)
+
+#define MP21_RIF_RCC_MCO1 108
+#define MP21_RIF_RCC_MCO2 109
+
+#define SEC_RIFSC_FLAG BIT(31)
+#define SEC_RIFSC(_id) ((_id) | SEC_RIFSC_FLAG)
+
+enum {
+ HSE,
+ HSI,
+ MSI,
+ LSE,
+ LSI,
+ HSE_DIV2,
+ ICN_HS_MCU,
+ ICN_LS_MCU,
+ ICN_SDMMC,
+ ICN_DDR,
+ ICN_DISPLAY,
+ ICN_HSL,
+ ICN_NIC,
+ FLEXGEN_07,
+ FLEXGEN_08,
+ FLEXGEN_09,
+ FLEXGEN_10,
+ FLEXGEN_11,
+ FLEXGEN_12,
+ FLEXGEN_13,
+ FLEXGEN_14,
+ FLEXGEN_16,
+ FLEXGEN_17,
+ FLEXGEN_18,
+ FLEXGEN_19,
+ FLEXGEN_20,
+ FLEXGEN_21,
+ FLEXGEN_22,
+ FLEXGEN_23,
+ FLEXGEN_24,
+ FLEXGEN_25,
+ FLEXGEN_26,
+ FLEXGEN_27,
+ FLEXGEN_29,
+ FLEXGEN_30,
+ FLEXGEN_31,
+ FLEXGEN_33,
+ FLEXGEN_36,
+ FLEXGEN_37,
+ FLEXGEN_38,
+ FLEXGEN_39,
+ FLEXGEN_40,
+ FLEXGEN_41,
+ FLEXGEN_42,
+ FLEXGEN_43,
+ FLEXGEN_44,
+ FLEXGEN_45,
+ FLEXGEN_46,
+ FLEXGEN_47,
+ FLEXGEN_48,
+ FLEXGEN_50,
+ FLEXGEN_51,
+ FLEXGEN_52,
+ FLEXGEN_53,
+ FLEXGEN_54,
+ FLEXGEN_55,
+ FLEXGEN_56,
+ FLEXGEN_57,
+ FLEXGEN_58,
+ FLEXGEN_61,
+ FLEXGEN_62,
+ FLEXGEN_63,
+ ICN_APB1,
+ ICN_APB2,
+ ICN_APB3,
+ ICN_APB4,
+ ICN_APB5,
+ ICN_APBDBG,
+ TIMG1,
+ TIMG2,
+};
+
+static const struct clk_parent_data adc1_src[] = {
+ { .index = FLEXGEN_46 },
+ { .index = ICN_LS_MCU },
+};
+
+static const struct clk_parent_data adc2_src[] = {
+ { .index = FLEXGEN_47 },
+ { .index = ICN_LS_MCU },
+ { .index = FLEXGEN_46 },
+};
+
+static const struct clk_parent_data usb2phy1_src[] = {
+ { .index = FLEXGEN_57 },
+ { .index = HSE_DIV2 },
+};
+
+static const struct clk_parent_data usb2phy2_src[] = {
+ { .index = FLEXGEN_58 },
+ { .index = HSE_DIV2 },
+};
+
+static const struct clk_parent_data dts_src[] = {
+ { .index = HSI },
+ { .index = HSE },
+ { .index = MSI },
+};
+
+static const struct clk_parent_data mco1_src[] = {
+ { .index = FLEXGEN_61 },
+};
+
+static const struct clk_parent_data mco2_src[] = {
+ { .index = FLEXGEN_62 },
+};
+
+enum enum_mux_cfg {
+ MUX_ADC1,
+ MUX_ADC2,
+ MUX_DTS,
+ MUX_MCO1,
+ MUX_MCO2,
+ MUX_USB2PHY1,
+ MUX_USB2PHY2,
+ MUX_NB
+};
+
+#define MUX_CFG(id, _offset, _shift, _width) \
+ [id] = { \
+ .offset = (_offset), \
+ .shift = (_shift), \
+ .width = (_width), \
+ }
+
+static const struct stm32_mux_cfg stm32mp21_muxes[MUX_NB] = {
+ MUX_CFG(MUX_ADC1, RCC_ADC1CFGR, 12, 1),
+ MUX_CFG(MUX_ADC2, RCC_ADC2CFGR, 12, 2),
+ MUX_CFG(MUX_DTS, RCC_DTSCFGR, 12, 2),
+ MUX_CFG(MUX_MCO1, RCC_MCO1CFGR, 0, 1),
+ MUX_CFG(MUX_MCO2, RCC_MCO2CFGR, 0, 1),
+ MUX_CFG(MUX_USB2PHY1, RCC_USB2PHY1CFGR, 15, 1),
+ MUX_CFG(MUX_USB2PHY2, RCC_USB2PHY2CFGR, 15, 1),
+};
+
+enum enum_gate_cfg {
+ GATE_ADC1,
+ GATE_ADC2,
+ GATE_CRC,
+ GATE_CRYP1,
+ GATE_CRYP2,
+ GATE_CSI,
+ GATE_DCMIPP,
+ GATE_DCMIPSSI,
+ GATE_DDRPERFM,
+ GATE_DTS,
+ GATE_ETH1,
+ GATE_ETH1MAC,
+ GATE_ETH1RX,
+ GATE_ETH1STP,
+ GATE_ETH1TX,
+ GATE_ETH2,
+ GATE_ETH2MAC,
+ GATE_ETH2RX,
+ GATE_ETH2STP,
+ GATE_ETH2TX,
+ GATE_FDCAN,
+ GATE_HASH1,
+ GATE_HASH2,
+ GATE_HDP,
+ GATE_I2C1,
+ GATE_I2C2,
+ GATE_I2C3,
+ GATE_I3C1,
+ GATE_I3C2,
+ GATE_I3C3,
+ GATE_IWDG1,
+ GATE_IWDG2,
+ GATE_IWDG3,
+ GATE_IWDG4,
+ GATE_LPTIM1,
+ GATE_LPTIM2,
+ GATE_LPTIM3,
+ GATE_LPTIM4,
+ GATE_LPTIM5,
+ GATE_LPUART1,
+ GATE_LTDC,
+ GATE_MCO1,
+ GATE_MCO2,
+ GATE_MDF1,
+ GATE_OTG,
+ GATE_PKA,
+ GATE_RNG1,
+ GATE_RNG2,
+ GATE_SAES,
+ GATE_SAI1,
+ GATE_SAI2,
+ GATE_SAI3,
+ GATE_SAI4,
+ GATE_SDMMC1,
+ GATE_SDMMC2,
+ GATE_SDMMC3,
+ GATE_SERC,
+ GATE_SPDIFRX,
+ GATE_SPI1,
+ GATE_SPI2,
+ GATE_SPI3,
+ GATE_SPI4,
+ GATE_SPI5,
+ GATE_SPI6,
+ GATE_TIM1,
+ GATE_TIM10,
+ GATE_TIM11,
+ GATE_TIM12,
+ GATE_TIM13,
+ GATE_TIM14,
+ GATE_TIM15,
+ GATE_TIM16,
+ GATE_TIM17,
+ GATE_TIM2,
+ GATE_TIM3,
+ GATE_TIM4,
+ GATE_TIM5,
+ GATE_TIM6,
+ GATE_TIM7,
+ GATE_TIM8,
+ GATE_UART4,
+ GATE_UART5,
+ GATE_UART7,
+ GATE_USART1,
+ GATE_USART2,
+ GATE_USART3,
+ GATE_USART6,
+ GATE_USB2PHY1,
+ GATE_USB2PHY2,
+ GATE_USBH,
+ GATE_VREF,
+ GATE_WWDG1,
+ GATE_NB
+};
+
+#define GATE_CFG(id, _offset, _bit_idx, _offset_clr) \
+ [id] = { \
+ .offset = (_offset), \
+ .bit_idx = (_bit_idx), \
+ .set_clr = (_offset_clr), \
+ }
+
+static const struct stm32_gate_cfg stm32mp21_gates[GATE_NB] = {
+ GATE_CFG(GATE_ADC1, RCC_ADC1CFGR, 1, 0),
+ GATE_CFG(GATE_ADC2, RCC_ADC2CFGR, 1, 0),
+ GATE_CFG(GATE_CRC, RCC_CRCCFGR, 1, 0),
+ GATE_CFG(GATE_CRYP1, RCC_CRYP1CFGR, 1, 0),
+ GATE_CFG(GATE_CRYP2, RCC_CRYP2CFGR, 1, 0),
+ GATE_CFG(GATE_CSI, RCC_CSICFGR, 1, 0),
+ GATE_CFG(GATE_DCMIPP, RCC_DCMIPPCFGR, 1, 0),
+ GATE_CFG(GATE_DCMIPSSI, RCC_DCMIPSSICFGR, 1, 0),
+ GATE_CFG(GATE_DDRPERFM, RCC_DDRPERFMCFGR, 1, 0),
+ GATE_CFG(GATE_DTS, RCC_DTSCFGR, 1, 0),
+ GATE_CFG(GATE_ETH1, RCC_ETH1CFGR, 5, 0),
+ GATE_CFG(GATE_ETH1MAC, RCC_ETH1CFGR, 1, 0),
+ GATE_CFG(GATE_ETH1RX, RCC_ETH1CFGR, 10, 0),
+ GATE_CFG(GATE_ETH1STP, RCC_ETH1CFGR, 4, 0),
+ GATE_CFG(GATE_ETH1TX, RCC_ETH1CFGR, 8, 0),
+ GATE_CFG(GATE_ETH2, RCC_ETH2CFGR, 5, 0),
+ GATE_CFG(GATE_ETH2MAC, RCC_ETH2CFGR, 1, 0),
+ GATE_CFG(GATE_ETH2RX, RCC_ETH2CFGR, 10, 0),
+ GATE_CFG(GATE_ETH2STP, RCC_ETH2CFGR, 4, 0),
+ GATE_CFG(GATE_ETH2TX, RCC_ETH2CFGR, 8, 0),
+ GATE_CFG(GATE_FDCAN, RCC_FDCANCFGR, 1, 0),
+ GATE_CFG(GATE_HASH1, RCC_HASH1CFGR, 1, 0),
+ GATE_CFG(GATE_HASH2, RCC_HASH2CFGR, 1, 0),
+ GATE_CFG(GATE_HDP, RCC_HDPCFGR, 1, 0),
+ GATE_CFG(GATE_I2C1, RCC_I2C1CFGR, 1, 0),
+ GATE_CFG(GATE_I2C2, RCC_I2C2CFGR, 1, 0),
+ GATE_CFG(GATE_I2C3, RCC_I2C3CFGR, 1, 0),
+ GATE_CFG(GATE_I3C1, RCC_I3C1CFGR, 1, 0),
+ GATE_CFG(GATE_I3C2, RCC_I3C2CFGR, 1, 0),
+ GATE_CFG(GATE_I3C3, RCC_I3C3CFGR, 1, 0),
+ GATE_CFG(GATE_IWDG1, RCC_IWDG1CFGR, 1, 0),
+ GATE_CFG(GATE_IWDG2, RCC_IWDG2CFGR, 1, 0),
+ GATE_CFG(GATE_IWDG3, RCC_IWDG3CFGR, 1, 0),
+ GATE_CFG(GATE_IWDG4, RCC_IWDG4CFGR, 1, 0),
+ GATE_CFG(GATE_LPTIM1, RCC_LPTIM1CFGR, 1, 0),
+ GATE_CFG(GATE_LPTIM2, RCC_LPTIM2CFGR, 1, 0),
+ GATE_CFG(GATE_LPTIM3, RCC_LPTIM3CFGR, 1, 0),
+ GATE_CFG(GATE_LPTIM4, RCC_LPTIM4CFGR, 1, 0),
+ GATE_CFG(GATE_LPTIM5, RCC_LPTIM5CFGR, 1, 0),
+ GATE_CFG(GATE_LPUART1, RCC_LPUART1CFGR, 1, 0),
+ GATE_CFG(GATE_LTDC, RCC_LTDCCFGR, 1, 0),
+ GATE_CFG(GATE_MCO1, RCC_MCO1CFGR, 8, 0),
+ GATE_CFG(GATE_MCO2, RCC_MCO2CFGR, 8, 0),
+ GATE_CFG(GATE_MDF1, RCC_MDF1CFGR, 1, 0),
+ GATE_CFG(GATE_OTG, RCC_OTGCFGR, 1, 0),
+ GATE_CFG(GATE_PKA, RCC_PKACFGR, 1, 0),
+ GATE_CFG(GATE_RNG1, RCC_RNG1CFGR, 1, 0),
+ GATE_CFG(GATE_RNG2, RCC_RNG2CFGR, 1, 0),
+ GATE_CFG(GATE_SAES, RCC_SAESCFGR, 1, 0),
+ GATE_CFG(GATE_SAI1, RCC_SAI1CFGR, 1, 0),
+ GATE_CFG(GATE_SAI2, RCC_SAI2CFGR, 1, 0),
+ GATE_CFG(GATE_SAI3, RCC_SAI3CFGR, 1, 0),
+ GATE_CFG(GATE_SAI4, RCC_SAI4CFGR, 1, 0),
+ GATE_CFG(GATE_SDMMC1, RCC_SDMMC1CFGR, 1, 0),
+ GATE_CFG(GATE_SDMMC2, RCC_SDMMC2CFGR, 1, 0),
+ GATE_CFG(GATE_SDMMC3, RCC_SDMMC3CFGR, 1, 0),
+ GATE_CFG(GATE_SERC, RCC_SERCCFGR, 1, 0),
+ GATE_CFG(GATE_SPDIFRX, RCC_SPDIFRXCFGR, 1, 0),
+ GATE_CFG(GATE_SPI1, RCC_SPI1CFGR, 1, 0),
+ GATE_CFG(GATE_SPI2, RCC_SPI2CFGR, 1, 0),
+ GATE_CFG(GATE_SPI3, RCC_SPI3CFGR, 1, 0),
+ GATE_CFG(GATE_SPI4, RCC_SPI4CFGR, 1, 0),
+ GATE_CFG(GATE_SPI5, RCC_SPI5CFGR, 1, 0),
+ GATE_CFG(GATE_SPI6, RCC_SPI6CFGR, 1, 0),
+ GATE_CFG(GATE_TIM1, RCC_TIM1CFGR, 1, 0),
+ GATE_CFG(GATE_TIM10, RCC_TIM10CFGR, 1, 0),
+ GATE_CFG(GATE_TIM11, RCC_TIM11CFGR, 1, 0),
+ GATE_CFG(GATE_TIM12, RCC_TIM12CFGR, 1, 0),
+ GATE_CFG(GATE_TIM13, RCC_TIM13CFGR, 1, 0),
+ GATE_CFG(GATE_TIM14, RCC_TIM14CFGR, 1, 0),
+ GATE_CFG(GATE_TIM15, RCC_TIM15CFGR, 1, 0),
+ GATE_CFG(GATE_TIM16, RCC_TIM16CFGR, 1, 0),
+ GATE_CFG(GATE_TIM17, RCC_TIM17CFGR, 1, 0),
+ GATE_CFG(GATE_TIM2, RCC_TIM2CFGR, 1, 0),
+ GATE_CFG(GATE_TIM3, RCC_TIM3CFGR, 1, 0),
+ GATE_CFG(GATE_TIM4, RCC_TIM4CFGR, 1, 0),
+ GATE_CFG(GATE_TIM5, RCC_TIM5CFGR, 1, 0),
+ GATE_CFG(GATE_TIM6, RCC_TIM6CFGR, 1, 0),
+ GATE_CFG(GATE_TIM7, RCC_TIM7CFGR, 1, 0),
+ GATE_CFG(GATE_TIM8, RCC_TIM8CFGR, 1, 0),
+ GATE_CFG(GATE_UART4, RCC_UART4CFGR, 1, 0),
+ GATE_CFG(GATE_UART5, RCC_UART5CFGR, 1, 0),
+ GATE_CFG(GATE_UART7, RCC_UART7CFGR, 1, 0),
+ GATE_CFG(GATE_USART1, RCC_USART1CFGR, 1, 0),
+ GATE_CFG(GATE_USART2, RCC_USART2CFGR, 1, 0),
+ GATE_CFG(GATE_USART3, RCC_USART3CFGR, 1, 0),
+ GATE_CFG(GATE_USART6, RCC_USART6CFGR, 1, 0),
+ GATE_CFG(GATE_USB2PHY1, RCC_USB2PHY1CFGR, 1, 0),
+ GATE_CFG(GATE_USB2PHY2, RCC_USB2PHY2CFGR, 1, 0),
+ GATE_CFG(GATE_USBH, RCC_USBHCFGR, 1, 0),
+ GATE_CFG(GATE_VREF, RCC_VREFCFGR, 1, 0),
+ GATE_CFG(GATE_WWDG1, RCC_WWDG1CFGR, 1, 0),
+};
+
+#define CLK_HW_INIT_INDEX(_name, _parent, _ops, _flags) \
+ (&(struct clk_init_data) { \
+ .flags = _flags, \
+ .name = _name, \
+ .parent_data = (const struct clk_parent_data[]) { \
+ { .index = _parent }, \
+ }, \
+ .num_parents = 1, \
+ .ops = _ops, \
+ })
+
+/* ADC */
+static struct clk_stm32_gate ck_icn_p_adc1 = {
+ .gate_id = GATE_ADC1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_adc1", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_composite ck_ker_adc1 = {
+ .gate_id = GATE_ADC1,
+ .mux_id = MUX_ADC1,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("ck_ker_adc1", adc1_src, &clk_stm32_composite_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_adc2 = {
+ .gate_id = GATE_ADC2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_adc2", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_composite ck_ker_adc2 = {
+ .gate_id = GATE_ADC2,
+ .mux_id = MUX_ADC2,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("ck_ker_adc2", adc2_src, &clk_stm32_composite_ops, 0),
+};
+
+/* CSI-HOST */
+static struct clk_stm32_gate ck_icn_p_csi = {
+ .gate_id = GATE_CSI,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_csi", ICN_APB4, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_csi = {
+ .gate_id = GATE_CSI,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_csi", FLEXGEN_29, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_csitxesc = {
+ .gate_id = GATE_CSI,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_csitxesc", FLEXGEN_30, &clk_stm32_gate_ops, 0),
+};
+
+/* CSI-PHY */
+static struct clk_stm32_gate ck_ker_csiphy = {
+ .gate_id = GATE_CSI,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_csiphy", FLEXGEN_31, &clk_stm32_gate_ops, 0),
+};
+
+/* DCMIPP */
+static struct clk_stm32_gate ck_icn_p_dcmipp = {
+ .gate_id = GATE_DCMIPP,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_dcmipp", ICN_APB4, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_dcmipssi = {
+ .gate_id = GATE_DCMIPSSI,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_dcmipssi", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+/* DDRPERMF */
+static struct clk_stm32_gate ck_icn_p_ddrperfm = {
+ .gate_id = GATE_DDRPERFM,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_ddrperfm", ICN_APB4, &clk_stm32_gate_ops, 0),
+};
+
+/* CRC */
+static struct clk_stm32_gate ck_icn_p_crc = {
+ .gate_id = GATE_CRC,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_crc", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+/* CRYP */
+static struct clk_stm32_gate ck_icn_p_cryp1 = {
+ .gate_id = GATE_CRYP1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_cryp1", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_cryp2 = {
+ .gate_id = GATE_CRYP2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_cryp2", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+/* DBG & TRACE */
+/* Trace and debug clocks are managed by SCMI */
+
+/* LTDC */
+static struct clk_stm32_gate ck_icn_p_ltdc = {
+ .gate_id = GATE_LTDC,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_ltdc", ICN_APB4, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_ltdc = {
+ .gate_id = GATE_LTDC,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_ltdc", FLEXGEN_27, &clk_stm32_gate_ops,
+ CLK_SET_RATE_PARENT),
+};
+
+/* DTS */
+static struct clk_stm32_composite ck_ker_dts = {
+ .gate_id = GATE_DTS,
+ .mux_id = MUX_DTS,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("ck_ker_dts", dts_src,
+ &clk_stm32_composite_ops, 0),
+};
+
+/* ETHERNET */
+static struct clk_stm32_gate ck_icn_p_eth1 = {
+ .gate_id = GATE_ETH1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_eth1", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_eth1stp = {
+ .gate_id = GATE_ETH1STP,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_eth1stp", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_eth1 = {
+ .gate_id = GATE_ETH1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_eth1", FLEXGEN_54, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_eth1ptp = {
+ .gate_id = GATE_ETH1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_eth1ptp", FLEXGEN_56, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_eth1mac = {
+ .gate_id = GATE_ETH1MAC,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_eth1mac", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_eth1tx = {
+ .gate_id = GATE_ETH1TX,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_eth1tx", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_eth1rx = {
+ .gate_id = GATE_ETH1RX,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_eth1rx", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_eth2 = {
+ .gate_id = GATE_ETH2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_eth2", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_eth2stp = {
+ .gate_id = GATE_ETH2STP,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_eth2stp", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_eth2 = {
+ .gate_id = GATE_ETH2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_eth2", FLEXGEN_55, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_eth2ptp = {
+ .gate_id = GATE_ETH2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_eth2ptp", FLEXGEN_56, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_eth2mac = {
+ .gate_id = GATE_ETH2MAC,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_eth2mac", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_eth2tx = {
+ .gate_id = GATE_ETH2TX,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_eth2tx", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_eth2rx = {
+ .gate_id = GATE_ETH2RX,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_eth2rx", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+/* FDCAN */
+static struct clk_stm32_gate ck_icn_p_fdcan = {
+ .gate_id = GATE_FDCAN,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_fdcan", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_fdcan = {
+ .gate_id = GATE_FDCAN,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_fdcan", FLEXGEN_26, &clk_stm32_gate_ops, 0),
+};
+
+/* HASH */
+static struct clk_stm32_gate ck_icn_p_hash1 = {
+ .gate_id = GATE_HASH1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_hash1", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_hash2 = {
+ .gate_id = GATE_HASH2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_hash2", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+/* HDP */
+static struct clk_stm32_gate ck_icn_p_hdp = {
+ .gate_id = GATE_HDP,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_hdp", ICN_APB3, &clk_stm32_gate_ops, 0),
+};
+
+/* I2C */
+static struct clk_stm32_gate ck_icn_p_i2c1 = {
+ .gate_id = GATE_I2C1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_i2c1", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_i2c2 = {
+ .gate_id = GATE_I2C2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_i2c2", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_i2c3 = {
+ .gate_id = GATE_I2C3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_i2c3", ICN_APB5, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_i2c1 = {
+ .gate_id = GATE_I2C1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_i2c1", FLEXGEN_13, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_i2c2 = {
+ .gate_id = GATE_I2C2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_i2c2", FLEXGEN_13, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_i2c3 = {
+ .gate_id = GATE_I2C3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_i2c3", FLEXGEN_38, &clk_stm32_gate_ops, 0),
+};
+
+/* I3C */
+static struct clk_stm32_gate ck_icn_p_i3c1 = {
+ .gate_id = GATE_I3C1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_i3c1", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_i3c2 = {
+ .gate_id = GATE_I3C2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_i3c2", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_i3c3 = {
+ .gate_id = GATE_I3C3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_i3c3", ICN_APB5, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_i3c1 = {
+ .gate_id = GATE_I3C1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_i3c1", FLEXGEN_14, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_i3c2 = {
+ .gate_id = GATE_I3C2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_i3c2", FLEXGEN_14, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_i3c3 = {
+ .gate_id = GATE_I3C3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_i3c3", FLEXGEN_36, &clk_stm32_gate_ops, 0),
+};
+
+/* IWDG */
+static struct clk_stm32_gate ck_icn_p_iwdg1 = {
+ .gate_id = GATE_IWDG1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_iwdg1", ICN_APB3, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_iwdg2 = {
+ .gate_id = GATE_IWDG2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_iwdg2", ICN_APB3, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_iwdg3 = {
+ .gate_id = GATE_IWDG3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_iwdg3", ICN_APB3, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_iwdg4 = {
+ .gate_id = GATE_IWDG4,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_iwdg4", ICN_APB3, &clk_stm32_gate_ops, 0),
+};
+
+/* LPTIM */
+static struct clk_stm32_gate ck_icn_p_lptim1 = {
+ .gate_id = GATE_LPTIM1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_lptim1", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_lptim2 = {
+ .gate_id = GATE_LPTIM2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_lptim2", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_lptim3 = {
+ .gate_id = GATE_LPTIM3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_lptim3", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_lptim4 = {
+ .gate_id = GATE_LPTIM4,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_lptim4", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_lptim5 = {
+ .gate_id = GATE_LPTIM5,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_lptim5", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_lptim1 = {
+ .gate_id = GATE_LPTIM1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_lptim1", FLEXGEN_07, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_lptim2 = {
+ .gate_id = GATE_LPTIM2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_lptim2", FLEXGEN_07, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_lptim3 = {
+ .gate_id = GATE_LPTIM3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_lptim3", FLEXGEN_40, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_lptim4 = {
+ .gate_id = GATE_LPTIM4,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_lptim4", FLEXGEN_41, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_lptim5 = {
+ .gate_id = GATE_LPTIM5,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_lptim5", FLEXGEN_42, &clk_stm32_gate_ops, 0),
+};
+
+/* LPUART */
+static struct clk_stm32_gate ck_icn_p_lpuart1 = {
+ .gate_id = GATE_LPUART1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_lpuart1", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_lpuart1 = {
+ .gate_id = GATE_LPUART1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_lpuart1", FLEXGEN_39, &clk_stm32_gate_ops, 0),
+};
+
+/* MCO1 & MCO2 */
+static struct clk_stm32_composite ck_mco1 = {
+ .gate_id = GATE_MCO1,
+ .mux_id = MUX_MCO1,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("ck_mco1", mco1_src, &clk_stm32_composite_ops, 0),
+};
+
+static struct clk_stm32_composite ck_mco2 = {
+ .gate_id = GATE_MCO2,
+ .mux_id = MUX_MCO2,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("ck_mco2", mco2_src, &clk_stm32_composite_ops, 0),
+};
+
+/* MDF */
+static struct clk_stm32_gate ck_icn_p_mdf1 = {
+ .gate_id = GATE_MDF1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_mdf1", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_mdf1 = {
+ .gate_id = GATE_MDF1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_mdf1", FLEXGEN_21, &clk_stm32_gate_ops, 0),
+};
+
+/* OTG */
+static struct clk_stm32_gate ck_icn_m_otg = {
+ .gate_id = GATE_OTG,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_m_otg", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+/* PKA */
+static struct clk_stm32_gate ck_icn_p_pka = {
+ .gate_id = GATE_PKA,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_pka", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+/* RNG */
+static struct clk_stm32_gate ck_icn_p_rng1 = {
+ .gate_id = GATE_RNG1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_rng1", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_rng2 = {
+ .gate_id = GATE_RNG2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_rng2", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+/* SAES */
+static struct clk_stm32_gate ck_icn_p_saes = {
+ .gate_id = GATE_SAES,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_saes", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+/* SAI */
+static struct clk_stm32_gate ck_icn_p_sai1 = {
+ .gate_id = GATE_SAI1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_sai1", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_sai2 = {
+ .gate_id = GATE_SAI2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_sai2", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_sai3 = {
+ .gate_id = GATE_SAI3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_sai3", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_sai4 = {
+ .gate_id = GATE_SAI4,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_sai4", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_sai1 = {
+ .gate_id = GATE_SAI1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_sai1", FLEXGEN_22, &clk_stm32_gate_ops,
+ CLK_SET_RATE_PARENT),
+};
+
+static struct clk_stm32_gate ck_ker_sai2 = {
+ .gate_id = GATE_SAI2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_sai2", FLEXGEN_23, &clk_stm32_gate_ops,
+ CLK_SET_RATE_PARENT),
+};
+
+static struct clk_stm32_gate ck_ker_sai3 = {
+ .gate_id = GATE_SAI3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_sai3", FLEXGEN_24, &clk_stm32_gate_ops,
+ CLK_SET_RATE_PARENT),
+};
+
+static struct clk_stm32_gate ck_ker_sai4 = {
+ .gate_id = GATE_SAI4,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_sai4", FLEXGEN_25, &clk_stm32_gate_ops,
+ CLK_SET_RATE_PARENT),
+};
+
+/* SDMMC */
+static struct clk_stm32_gate ck_icn_m_sdmmc1 = {
+ .gate_id = GATE_SDMMC1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_m_sdmmc1", ICN_SDMMC, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_m_sdmmc2 = {
+ .gate_id = GATE_SDMMC2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_m_sdmmc2", ICN_SDMMC, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_m_sdmmc3 = {
+ .gate_id = GATE_SDMMC3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_m_sdmmc3", ICN_SDMMC, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_sdmmc1 = {
+ .gate_id = GATE_SDMMC1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_sdmmc1", FLEXGEN_51, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_sdmmc2 = {
+ .gate_id = GATE_SDMMC2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_sdmmc2", FLEXGEN_52, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_sdmmc3 = {
+ .gate_id = GATE_SDMMC3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_sdmmc3", FLEXGEN_53, &clk_stm32_gate_ops, 0),
+};
+
+/* SERC */
+static struct clk_stm32_gate ck_icn_p_serc = {
+ .gate_id = GATE_SERC,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_serc", ICN_APB3, &clk_stm32_gate_ops, 0),
+};
+
+/* SPDIF */
+static struct clk_stm32_gate ck_icn_p_spdifrx = {
+ .gate_id = GATE_SPDIFRX,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_spdifrx", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_spdifrx = {
+ .gate_id = GATE_SPDIFRX,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_spdifrx", FLEXGEN_12, &clk_stm32_gate_ops, 0),
+};
+
+/* SPI */
+static struct clk_stm32_gate ck_icn_p_spi1 = {
+ .gate_id = GATE_SPI1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_spi1", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_spi2 = {
+ .gate_id = GATE_SPI2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_spi2", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_spi3 = {
+ .gate_id = GATE_SPI3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_spi3", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_spi4 = {
+ .gate_id = GATE_SPI4,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_spi4", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_spi5 = {
+ .gate_id = GATE_SPI5,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_spi5", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_spi6 = {
+ .gate_id = GATE_SPI6,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_spi6", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_spi1 = {
+ .gate_id = GATE_SPI1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_spi1", FLEXGEN_16, &clk_stm32_gate_ops,
+ CLK_SET_RATE_PARENT),
+};
+
+static struct clk_stm32_gate ck_ker_spi2 = {
+ .gate_id = GATE_SPI2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_spi2", FLEXGEN_10, &clk_stm32_gate_ops,
+ CLK_SET_RATE_PARENT),
+};
+
+static struct clk_stm32_gate ck_ker_spi3 = {
+ .gate_id = GATE_SPI3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_spi3", FLEXGEN_11, &clk_stm32_gate_ops,
+ CLK_SET_RATE_PARENT),
+};
+
+static struct clk_stm32_gate ck_ker_spi4 = {
+ .gate_id = GATE_SPI4,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_spi4", FLEXGEN_17, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_spi5 = {
+ .gate_id = GATE_SPI5,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_spi5", FLEXGEN_17, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_spi6 = {
+ .gate_id = GATE_SPI6,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_spi6", FLEXGEN_37, &clk_stm32_gate_ops, 0),
+};
+
+/* Timers */
+static struct clk_stm32_gate ck_icn_p_tim2 = {
+ .gate_id = GATE_TIM2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim2", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim3 = {
+ .gate_id = GATE_TIM3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim3", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim4 = {
+ .gate_id = GATE_TIM4,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim4", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim5 = {
+ .gate_id = GATE_TIM5,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim5", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim6 = {
+ .gate_id = GATE_TIM6,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim6", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim7 = {
+ .gate_id = GATE_TIM7,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim7", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim10 = {
+ .gate_id = GATE_TIM10,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim10", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim11 = {
+ .gate_id = GATE_TIM11,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim11", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim12 = {
+ .gate_id = GATE_TIM12,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim12", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim13 = {
+ .gate_id = GATE_TIM13,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim13", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim14 = {
+ .gate_id = GATE_TIM14,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim14", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim1 = {
+ .gate_id = GATE_TIM1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim1", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim8 = {
+ .gate_id = GATE_TIM8,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim8", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim15 = {
+ .gate_id = GATE_TIM15,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim15", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim16 = {
+ .gate_id = GATE_TIM16,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim16", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim17 = {
+ .gate_id = GATE_TIM17,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim17", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim2 = {
+ .gate_id = GATE_TIM2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim2", TIMG1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim3 = {
+ .gate_id = GATE_TIM3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim3", TIMG1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim4 = {
+ .gate_id = GATE_TIM4,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim4", TIMG1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim5 = {
+ .gate_id = GATE_TIM5,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim5", TIMG1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim6 = {
+ .gate_id = GATE_TIM6,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim6", TIMG1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim7 = {
+ .gate_id = GATE_TIM7,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim7", TIMG1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim10 = {
+ .gate_id = GATE_TIM10,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim10", TIMG1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim11 = {
+ .gate_id = GATE_TIM11,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim11", TIMG1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim12 = {
+ .gate_id = GATE_TIM12,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim12", TIMG1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim13 = {
+ .gate_id = GATE_TIM13,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim13", TIMG1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim14 = {
+ .gate_id = GATE_TIM14,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim14", TIMG1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim1 = {
+ .gate_id = GATE_TIM1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim1", TIMG2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim8 = {
+ .gate_id = GATE_TIM8,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim8", TIMG2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim15 = {
+ .gate_id = GATE_TIM15,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim15", TIMG2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim16 = {
+ .gate_id = GATE_TIM16,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim16", TIMG2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim17 = {
+ .gate_id = GATE_TIM17,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim17", TIMG2, &clk_stm32_gate_ops, 0),
+};
+
+/* UART/USART */
+static struct clk_stm32_gate ck_icn_p_usart2 = {
+ .gate_id = GATE_USART2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_usart2", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_usart3 = {
+ .gate_id = GATE_USART3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_usart3", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_uart4 = {
+ .gate_id = GATE_UART4,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_uart4", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_uart5 = {
+ .gate_id = GATE_UART5,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_uart5", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_usart1 = {
+ .gate_id = GATE_USART1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_usart1", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_usart6 = {
+ .gate_id = GATE_USART6,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_usart6", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_uart7 = {
+ .gate_id = GATE_UART7,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_uart7", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_usart2 = {
+ .gate_id = GATE_USART2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_usart2", FLEXGEN_08, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_uart4 = {
+ .gate_id = GATE_UART4,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_uart4", FLEXGEN_08, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_usart3 = {
+ .gate_id = GATE_USART3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_usart3", FLEXGEN_09, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_uart5 = {
+ .gate_id = GATE_UART5,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_uart5", FLEXGEN_09, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_usart1 = {
+ .gate_id = GATE_USART1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_usart1", FLEXGEN_18, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_usart6 = {
+ .gate_id = GATE_USART6,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_usart6", FLEXGEN_19, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_uart7 = {
+ .gate_id = GATE_UART7,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_uart7", FLEXGEN_20, &clk_stm32_gate_ops, 0),
+};
+
+/* USB2PHY1 */
+static struct clk_stm32_composite ck_ker_usb2phy1 = {
+ .gate_id = GATE_USB2PHY1,
+ .mux_id = MUX_USB2PHY1,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("ck_ker_usb2phy1", usb2phy1_src,
+ &clk_stm32_composite_ops, 0),
+};
+
+/* USBH */
+static struct clk_stm32_gate ck_icn_m_usbhehci = {
+ .gate_id = GATE_USBH,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_m_usbhehci", ICN_HSL, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_m_usbhohci = {
+ .gate_id = GATE_USBH,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_m_usbhohci", ICN_HSL, &clk_stm32_gate_ops, 0),
+};
+
+/* USB2PHY2 */
+static struct clk_stm32_composite ck_ker_usb2phy2_en = {
+ .gate_id = GATE_USB2PHY2,
+ .mux_id = MUX_USB2PHY2,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("ck_ker_usb2phy2_en", usb2phy2_src,
+ &clk_stm32_composite_ops, 0),
+};
+
+/* VREF */
+static struct clk_stm32_gate ck_icn_p_vref = {
+ .gate_id = GATE_VREF,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_vref", ICN_APB3, &clk_stm32_gate_ops, 0),
+};
+
+/* WWDG */
+static struct clk_stm32_gate ck_icn_p_wwdg1 = {
+ .gate_id = GATE_WWDG1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_wwdg1", ICN_APB3, &clk_stm32_gate_ops, 0),
+};
+
+static int stm32_rcc_get_access(void __iomem *base, u32 index)
+{
+ u32 seccfgr, cidcfgr, semcr;
+ int bit, cid;
+
+ bit = index % RCC_REG_SIZE;
+
+ seccfgr = readl(base + RCC_SECCFGR(index));
+ if (seccfgr & BIT(bit))
+ return -EACCES;
+
+ cidcfgr = readl(base + RCC_CIDCFGR(index));
+ if (!(cidcfgr & RCC_CIDCFGR_CFEN))
+ /* CID filtering is turned off: access granted */
+ return 0;
+
+ if (!(cidcfgr & RCC_CIDCFGR_SEM_EN)) {
+ /* Static CID mode */
+ cid = FIELD_GET(RCC_CIDCFGR_SCID_MASK, cidcfgr);
+ if (cid != RCC_CID1)
+ return -EACCES;
+ return 0;
+ }
+
+ /* Pass-list with semaphore mode */
+ if (!(cidcfgr & RCC_CIDCFGR_SEMWLC1_EN))
+ return -EACCES;
+
+ semcr = readl(base + RCC_SEMCR(index));
+
+ cid = FIELD_GET(RCC_SEMCR_SEMCID_MASK, semcr);
+ if (cid != RCC_CID1)
+ return -EACCES;
+
+ return 0;
+}
+
+static int stm32mp21_check_security(struct device_node *np, void __iomem *base,
+ const struct clock_config *cfg)
+{
+ int ret = 0;
+
+ if (cfg->sec_id != SECF_NONE) {
+ struct stm32_firewall firewall;
+ u32 index = (u32)cfg->sec_id;
+
+ if (index & SEC_RIFSC_FLAG) {
+ ret = stm32_firewall_get_firewall(np, &firewall, 1);
+ if (ret)
+ return ret;
+ ret = stm32_firewall_grant_access_by_id(&firewall, index & ~SEC_RIFSC_FLAG);
+ } else {
+ ret = stm32_rcc_get_access(base, cfg->sec_id & ~SEC_RIFSC_FLAG);
+ }
+ }
+
+ return ret;
+}
+
+static const struct clock_config stm32mp21_clock_cfg[] = {
+ STM32_GATE_CFG(CK_BUS_ETH1, ck_icn_p_eth1, SEC_RIFSC(60)),
+ STM32_GATE_CFG(CK_BUS_ETH2, ck_icn_p_eth2, SEC_RIFSC(61)),
+ STM32_GATE_CFG(CK_BUS_ADC1, ck_icn_p_adc1, SEC_RIFSC(58)),
+ STM32_GATE_CFG(CK_BUS_ADC2, ck_icn_p_adc2, SEC_RIFSC(59)),
+ STM32_GATE_CFG(CK_BUS_CRC, ck_icn_p_crc, SEC_RIFSC(109)),
+ STM32_GATE_CFG(CK_BUS_MDF1, ck_icn_p_mdf1, SEC_RIFSC(54)),
+ STM32_GATE_CFG(CK_BUS_HASH1, ck_icn_p_hash1, SEC_RIFSC(96)),
+ STM32_GATE_CFG(CK_BUS_HASH2, ck_icn_p_hash2, SEC_RIFSC(97)),
+ STM32_GATE_CFG(CK_BUS_RNG1, ck_icn_p_rng1, SEC_RIFSC(92)),
+ STM32_GATE_CFG(CK_BUS_RNG2, ck_icn_p_rng2, SEC_RIFSC(93)),
+ STM32_GATE_CFG(CK_BUS_CRYP1, ck_icn_p_cryp1, SEC_RIFSC(98)),
+ STM32_GATE_CFG(CK_BUS_CRYP2, ck_icn_p_cryp2, SEC_RIFSC(99)),
+ STM32_GATE_CFG(CK_BUS_SAES, ck_icn_p_saes, SEC_RIFSC(95)),
+ STM32_GATE_CFG(CK_BUS_PKA, ck_icn_p_pka, SEC_RIFSC(94)),
+ STM32_GATE_CFG(CK_BUS_LPUART1, ck_icn_p_lpuart1, SEC_RIFSC(40)),
+ STM32_GATE_CFG(CK_BUS_LPTIM3, ck_icn_p_lptim3, SEC_RIFSC(19)),
+ STM32_GATE_CFG(CK_BUS_LPTIM4, ck_icn_p_lptim4, SEC_RIFSC(20)),
+ STM32_GATE_CFG(CK_BUS_LPTIM5, ck_icn_p_lptim5, SEC_RIFSC(21)),
+ STM32_GATE_CFG(CK_BUS_SDMMC1, ck_icn_m_sdmmc1, SEC_RIFSC(76)),
+ STM32_GATE_CFG(CK_BUS_SDMMC2, ck_icn_m_sdmmc2, SEC_RIFSC(77)),
+ STM32_GATE_CFG(CK_BUS_SDMMC3, ck_icn_m_sdmmc3, SEC_RIFSC(78)),
+ STM32_GATE_CFG(CK_BUS_USBHOHCI, ck_icn_m_usbhohci, SEC_RIFSC(63)),
+ STM32_GATE_CFG(CK_BUS_USBHEHCI, ck_icn_m_usbhehci, SEC_RIFSC(63)),
+ STM32_GATE_CFG(CK_BUS_OTG, ck_icn_m_otg, SEC_RIFSC(66)),
+ STM32_GATE_CFG(CK_BUS_TIM2, ck_icn_p_tim2, SEC_RIFSC(1)),
+ STM32_GATE_CFG(CK_BUS_TIM3, ck_icn_p_tim3, SEC_RIFSC(2)),
+ STM32_GATE_CFG(CK_BUS_TIM4, ck_icn_p_tim4, SEC_RIFSC(3)),
+ STM32_GATE_CFG(CK_BUS_TIM5, ck_icn_p_tim5, SEC_RIFSC(4)),
+ STM32_GATE_CFG(CK_BUS_TIM6, ck_icn_p_tim6, SEC_RIFSC(5)),
+ STM32_GATE_CFG(CK_BUS_TIM7, ck_icn_p_tim7, SEC_RIFSC(6)),
+ STM32_GATE_CFG(CK_BUS_TIM10, ck_icn_p_tim10, SEC_RIFSC(8)),
+ STM32_GATE_CFG(CK_BUS_TIM11, ck_icn_p_tim11, SEC_RIFSC(9)),
+ STM32_GATE_CFG(CK_BUS_TIM12, ck_icn_p_tim12, SEC_RIFSC(10)),
+ STM32_GATE_CFG(CK_BUS_TIM13, ck_icn_p_tim13, SEC_RIFSC(11)),
+ STM32_GATE_CFG(CK_BUS_TIM14, ck_icn_p_tim14, SEC_RIFSC(12)),
+ STM32_GATE_CFG(CK_BUS_LPTIM1, ck_icn_p_lptim1, SEC_RIFSC(17)),
+ STM32_GATE_CFG(CK_BUS_LPTIM2, ck_icn_p_lptim2, SEC_RIFSC(18)),
+ STM32_GATE_CFG(CK_BUS_SPI2, ck_icn_p_spi2, SEC_RIFSC(23)),
+ STM32_GATE_CFG(CK_BUS_SPI3, ck_icn_p_spi3, SEC_RIFSC(24)),
+ STM32_GATE_CFG(CK_BUS_SPDIFRX, ck_icn_p_spdifrx, SEC_RIFSC(30)),
+ STM32_GATE_CFG(CK_BUS_USART2, ck_icn_p_usart2, SEC_RIFSC(32)),
+ STM32_GATE_CFG(CK_BUS_USART3, ck_icn_p_usart3, SEC_RIFSC(33)),
+ STM32_GATE_CFG(CK_BUS_UART4, ck_icn_p_uart4, SEC_RIFSC(34)),
+ STM32_GATE_CFG(CK_BUS_UART5, ck_icn_p_uart5, SEC_RIFSC(35)),
+ STM32_GATE_CFG(CK_BUS_I2C1, ck_icn_p_i2c1, SEC_RIFSC(41)),
+ STM32_GATE_CFG(CK_BUS_I2C2, ck_icn_p_i2c2, SEC_RIFSC(42)),
+ STM32_GATE_CFG(CK_BUS_I2C3, ck_icn_p_i2c3, SEC_RIFSC(43)),
+ STM32_GATE_CFG(CK_BUS_I3C1, ck_icn_p_i3c1, SEC_RIFSC(114)),
+ STM32_GATE_CFG(CK_BUS_I3C2, ck_icn_p_i3c2, SEC_RIFSC(115)),
+ STM32_GATE_CFG(CK_BUS_I3C3, ck_icn_p_i3c3, SEC_RIFSC(116)),
+ STM32_GATE_CFG(CK_BUS_TIM1, ck_icn_p_tim1, SEC_RIFSC(0)),
+ STM32_GATE_CFG(CK_BUS_TIM8, ck_icn_p_tim8, SEC_RIFSC(7)),
+ STM32_GATE_CFG(CK_BUS_TIM15, ck_icn_p_tim15, SEC_RIFSC(13)),
+ STM32_GATE_CFG(CK_BUS_TIM16, ck_icn_p_tim16, SEC_RIFSC(14)),
+ STM32_GATE_CFG(CK_BUS_TIM17, ck_icn_p_tim17, SEC_RIFSC(15)),
+ STM32_GATE_CFG(CK_BUS_SAI1, ck_icn_p_sai1, SEC_RIFSC(49)),
+ STM32_GATE_CFG(CK_BUS_SAI2, ck_icn_p_sai2, SEC_RIFSC(50)),
+ STM32_GATE_CFG(CK_BUS_SAI3, ck_icn_p_sai3, SEC_RIFSC(51)),
+ STM32_GATE_CFG(CK_BUS_SAI4, ck_icn_p_sai4, SEC_RIFSC(52)),
+ STM32_GATE_CFG(CK_BUS_USART1, ck_icn_p_usart1, SEC_RIFSC(31)),
+ STM32_GATE_CFG(CK_BUS_USART6, ck_icn_p_usart6, SEC_RIFSC(36)),
+ STM32_GATE_CFG(CK_BUS_UART7, ck_icn_p_uart7, SEC_RIFSC(37)),
+ STM32_GATE_CFG(CK_BUS_FDCAN, ck_icn_p_fdcan, SEC_RIFSC(56)),
+ STM32_GATE_CFG(CK_BUS_SPI1, ck_icn_p_spi1, SEC_RIFSC(22)),
+ STM32_GATE_CFG(CK_BUS_SPI4, ck_icn_p_spi4, SEC_RIFSC(25)),
+ STM32_GATE_CFG(CK_BUS_SPI5, ck_icn_p_spi5, SEC_RIFSC(26)),
+ STM32_GATE_CFG(CK_BUS_SPI6, ck_icn_p_spi6, SEC_RIFSC(27)),
+ STM32_GATE_CFG(CK_BUS_IWDG1, ck_icn_p_iwdg1, SEC_RIFSC(100)),
+ STM32_GATE_CFG(CK_BUS_IWDG2, ck_icn_p_iwdg2, SEC_RIFSC(101)),
+ STM32_GATE_CFG(CK_BUS_IWDG3, ck_icn_p_iwdg3, SEC_RIFSC(102)),
+ STM32_GATE_CFG(CK_BUS_IWDG4, ck_icn_p_iwdg4, SEC_RIFSC(103)),
+ STM32_GATE_CFG(CK_BUS_WWDG1, ck_icn_p_wwdg1, SEC_RIFSC(104)),
+ STM32_GATE_CFG(CK_BUS_VREF, ck_icn_p_vref, SEC_RIFSC(106)),
+ STM32_GATE_CFG(CK_BUS_SERC, ck_icn_p_serc, SEC_RIFSC(110)),
+ STM32_GATE_CFG(CK_BUS_HDP, ck_icn_p_hdp, SEC_RIFSC(57)),
+ STM32_GATE_CFG(CK_BUS_LTDC, ck_icn_p_ltdc, SEC_RIFSC(80)),
+ STM32_GATE_CFG(CK_BUS_CSI, ck_icn_p_csi, SEC_RIFSC(86)),
+ STM32_GATE_CFG(CK_BUS_DCMIPP, ck_icn_p_dcmipp, SEC_RIFSC(87)),
+ STM32_GATE_CFG(CK_BUS_DCMIPSSI, ck_icn_p_dcmipssi, SEC_RIFSC(88)),
+ STM32_GATE_CFG(CK_BUS_DDRPERFM, ck_icn_p_ddrperfm, SEC_RIFSC(67)),
+ STM32_GATE_CFG(CK_KER_TIM2, ck_ker_tim2, SEC_RIFSC(1)),
+ STM32_GATE_CFG(CK_KER_TIM3, ck_ker_tim3, SEC_RIFSC(2)),
+ STM32_GATE_CFG(CK_KER_TIM4, ck_ker_tim4, SEC_RIFSC(3)),
+ STM32_GATE_CFG(CK_KER_TIM5, ck_ker_tim5, SEC_RIFSC(4)),
+ STM32_GATE_CFG(CK_KER_TIM6, ck_ker_tim6, SEC_RIFSC(5)),
+ STM32_GATE_CFG(CK_KER_TIM7, ck_ker_tim7, SEC_RIFSC(6)),
+ STM32_GATE_CFG(CK_KER_TIM10, ck_ker_tim10, SEC_RIFSC(8)),
+ STM32_GATE_CFG(CK_KER_TIM11, ck_ker_tim11, SEC_RIFSC(9)),
+ STM32_GATE_CFG(CK_KER_TIM12, ck_ker_tim12, SEC_RIFSC(10)),
+ STM32_GATE_CFG(CK_KER_TIM13, ck_ker_tim13, SEC_RIFSC(11)),
+ STM32_GATE_CFG(CK_KER_TIM14, ck_ker_tim14, SEC_RIFSC(12)),
+ STM32_GATE_CFG(CK_KER_TIM1, ck_ker_tim1, SEC_RIFSC(0)),
+ STM32_GATE_CFG(CK_KER_TIM8, ck_ker_tim8, SEC_RIFSC(7)),
+ STM32_GATE_CFG(CK_KER_TIM15, ck_ker_tim15, SEC_RIFSC(13)),
+ STM32_GATE_CFG(CK_KER_TIM16, ck_ker_tim16, SEC_RIFSC(14)),
+ STM32_GATE_CFG(CK_KER_TIM17, ck_ker_tim17, SEC_RIFSC(15)),
+ STM32_GATE_CFG(CK_KER_LPTIM1, ck_ker_lptim1, SEC_RIFSC(17)),
+ STM32_GATE_CFG(CK_KER_LPTIM2, ck_ker_lptim2, SEC_RIFSC(18)),
+ STM32_GATE_CFG(CK_KER_USART2, ck_ker_usart2, SEC_RIFSC(32)),
+ STM32_GATE_CFG(CK_KER_UART4, ck_ker_uart4, SEC_RIFSC(34)),
+ STM32_GATE_CFG(CK_KER_USART3, ck_ker_usart3, SEC_RIFSC(33)),
+ STM32_GATE_CFG(CK_KER_UART5, ck_ker_uart5, SEC_RIFSC(35)),
+ STM32_GATE_CFG(CK_KER_SPI2, ck_ker_spi2, SEC_RIFSC(23)),
+ STM32_GATE_CFG(CK_KER_SPI3, ck_ker_spi3, SEC_RIFSC(24)),
+ STM32_GATE_CFG(CK_KER_SPDIFRX, ck_ker_spdifrx, SEC_RIFSC(30)),
+ STM32_GATE_CFG(CK_KER_I2C1, ck_ker_i2c1, SEC_RIFSC(41)),
+ STM32_GATE_CFG(CK_KER_I2C2, ck_ker_i2c2, SEC_RIFSC(42)),
+ STM32_GATE_CFG(CK_KER_I3C1, ck_ker_i3c1, SEC_RIFSC(114)),
+ STM32_GATE_CFG(CK_KER_I3C2, ck_ker_i3c2, SEC_RIFSC(115)),
+ STM32_GATE_CFG(CK_KER_I2C3, ck_ker_i2c3, SEC_RIFSC(43)),
+ STM32_GATE_CFG(CK_KER_I3C3, ck_ker_i3c3, SEC_RIFSC(116)),
+ STM32_GATE_CFG(CK_KER_SPI1, ck_ker_spi1, SEC_RIFSC(22)),
+ STM32_GATE_CFG(CK_KER_SPI4, ck_ker_spi4, SEC_RIFSC(25)),
+ STM32_GATE_CFG(CK_KER_SPI5, ck_ker_spi5, SEC_RIFSC(26)),
+ STM32_GATE_CFG(CK_KER_SPI6, ck_ker_spi6, SEC_RIFSC(27)),
+ STM32_GATE_CFG(CK_KER_USART1, ck_ker_usart1, SEC_RIFSC(31)),
+ STM32_GATE_CFG(CK_KER_USART6, ck_ker_usart6, SEC_RIFSC(36)),
+ STM32_GATE_CFG(CK_KER_UART7, ck_ker_uart7, SEC_RIFSC(37)),
+ STM32_GATE_CFG(CK_KER_MDF1, ck_ker_mdf1, SEC_RIFSC(54)),
+ STM32_GATE_CFG(CK_KER_SAI1, ck_ker_sai1, SEC_RIFSC(49)),
+ STM32_GATE_CFG(CK_KER_SAI2, ck_ker_sai2, SEC_RIFSC(50)),
+ STM32_GATE_CFG(CK_KER_SAI3, ck_ker_sai3, SEC_RIFSC(51)),
+ STM32_GATE_CFG(CK_KER_SAI4, ck_ker_sai4, SEC_RIFSC(52)),
+ STM32_GATE_CFG(CK_KER_FDCAN, ck_ker_fdcan, SEC_RIFSC(56)),
+ STM32_GATE_CFG(CK_KER_CSI, ck_ker_csi, SEC_RIFSC(86)),
+ STM32_GATE_CFG(CK_KER_CSITXESC, ck_ker_csitxesc, SEC_RIFSC(86)),
+ STM32_GATE_CFG(CK_KER_CSIPHY, ck_ker_csiphy, SEC_RIFSC(86)),
+ STM32_GATE_CFG(CK_KER_LPUART1, ck_ker_lpuart1, SEC_RIFSC(40)),
+ STM32_GATE_CFG(CK_KER_LPTIM3, ck_ker_lptim3, SEC_RIFSC(19)),
+ STM32_GATE_CFG(CK_KER_LPTIM4, ck_ker_lptim4, SEC_RIFSC(20)),
+ STM32_GATE_CFG(CK_KER_LPTIM5, ck_ker_lptim5, SEC_RIFSC(21)),
+ STM32_GATE_CFG(CK_KER_SDMMC1, ck_ker_sdmmc1, SEC_RIFSC(76)),
+ STM32_GATE_CFG(CK_KER_SDMMC2, ck_ker_sdmmc2, SEC_RIFSC(77)),
+ STM32_GATE_CFG(CK_KER_SDMMC3, ck_ker_sdmmc3, SEC_RIFSC(78)),
+ STM32_GATE_CFG(CK_KER_ETH1, ck_ker_eth1, SEC_RIFSC(60)),
+ STM32_GATE_CFG(CK_ETH1_STP, ck_ker_eth1stp, SEC_RIFSC(60)),
+ STM32_GATE_CFG(CK_KER_ETH2, ck_ker_eth2, SEC_RIFSC(61)),
+ STM32_GATE_CFG(CK_ETH2_STP, ck_ker_eth2stp, SEC_RIFSC(61)),
+ STM32_GATE_CFG(CK_KER_ETH1PTP, ck_ker_eth1ptp, SEC_RIFSC(60)),
+ STM32_GATE_CFG(CK_KER_ETH2PTP, ck_ker_eth2ptp, SEC_RIFSC(61)),
+ STM32_GATE_CFG(CK_ETH1_MAC, ck_ker_eth1mac, SEC_RIFSC(60)),
+ STM32_GATE_CFG(CK_ETH1_TX, ck_ker_eth1tx, SEC_RIFSC(60)),
+ STM32_GATE_CFG(CK_ETH1_RX, ck_ker_eth1rx, SEC_RIFSC(60)),
+ STM32_GATE_CFG(CK_ETH2_MAC, ck_ker_eth2mac, SEC_RIFSC(61)),
+ STM32_GATE_CFG(CK_ETH2_TX, ck_ker_eth2tx, SEC_RIFSC(61)),
+ STM32_GATE_CFG(CK_ETH2_RX, ck_ker_eth2rx, SEC_RIFSC(61)),
+ STM32_COMPOSITE_CFG(CK_MCO1, ck_mco1, MP21_RIF_RCC_MCO1),
+ STM32_COMPOSITE_CFG(CK_MCO2, ck_mco2, MP21_RIF_RCC_MCO2),
+ STM32_COMPOSITE_CFG(CK_KER_ADC1, ck_ker_adc1, SEC_RIFSC(58)),
+ STM32_COMPOSITE_CFG(CK_KER_ADC2, ck_ker_adc2, SEC_RIFSC(59)),
+ STM32_COMPOSITE_CFG(CK_KER_USB2PHY1, ck_ker_usb2phy1, SEC_RIFSC(63)),
+ STM32_COMPOSITE_CFG(CK_KER_USB2PHY2EN, ck_ker_usb2phy2_en, SEC_RIFSC(66)),
+ STM32_COMPOSITE_CFG(CK_KER_DTS, ck_ker_dts, SEC_RIFSC(107)),
+ STM32_GATE_CFG(CK_KER_LTDC, ck_ker_ltdc, SEC_RIFSC(80)),
+};
+
+#define RESET_MP21(id, _offset, _bit_idx, _set_clr) \
+ [id] = &(struct stm32_reset_cfg){ \
+ .offset = (_offset), \
+ .bit_idx = (_bit_idx), \
+ .set_clr = (_set_clr), \
+ }
+
+static const struct stm32_reset_cfg *stm32mp21_reset_cfg[] = {
+ RESET_MP21(TIM1_R, RCC_TIM1CFGR, 0, 0),
+ RESET_MP21(TIM2_R, RCC_TIM2CFGR, 0, 0),
+ RESET_MP21(TIM3_R, RCC_TIM3CFGR, 0, 0),
+ RESET_MP21(TIM4_R, RCC_TIM4CFGR, 0, 0),
+ RESET_MP21(TIM5_R, RCC_TIM5CFGR, 0, 0),
+ RESET_MP21(TIM6_R, RCC_TIM6CFGR, 0, 0),
+ RESET_MP21(TIM7_R, RCC_TIM7CFGR, 0, 0),
+ RESET_MP21(TIM8_R, RCC_TIM8CFGR, 0, 0),
+ RESET_MP21(TIM10_R, RCC_TIM10CFGR, 0, 0),
+ RESET_MP21(TIM11_R, RCC_TIM11CFGR, 0, 0),
+ RESET_MP21(TIM12_R, RCC_TIM12CFGR, 0, 0),
+ RESET_MP21(TIM13_R, RCC_TIM13CFGR, 0, 0),
+ RESET_MP21(TIM14_R, RCC_TIM14CFGR, 0, 0),
+ RESET_MP21(TIM15_R, RCC_TIM15CFGR, 0, 0),
+ RESET_MP21(TIM16_R, RCC_TIM16CFGR, 0, 0),
+ RESET_MP21(TIM17_R, RCC_TIM17CFGR, 0, 0),
+ RESET_MP21(LPTIM1_R, RCC_LPTIM1CFGR, 0, 0),
+ RESET_MP21(LPTIM2_R, RCC_LPTIM2CFGR, 0, 0),
+ RESET_MP21(LPTIM3_R, RCC_LPTIM3CFGR, 0, 0),
+ RESET_MP21(LPTIM4_R, RCC_LPTIM4CFGR, 0, 0),
+ RESET_MP21(LPTIM5_R, RCC_LPTIM5CFGR, 0, 0),
+ RESET_MP21(SPI1_R, RCC_SPI1CFGR, 0, 0),
+ RESET_MP21(SPI2_R, RCC_SPI2CFGR, 0, 0),
+ RESET_MP21(SPI3_R, RCC_SPI3CFGR, 0, 0),
+ RESET_MP21(SPI4_R, RCC_SPI4CFGR, 0, 0),
+ RESET_MP21(SPI5_R, RCC_SPI5CFGR, 0, 0),
+ RESET_MP21(SPI6_R, RCC_SPI6CFGR, 0, 0),
+ RESET_MP21(SPDIFRX_R, RCC_SPDIFRXCFGR, 0, 0),
+ RESET_MP21(USART1_R, RCC_USART1CFGR, 0, 0),
+ RESET_MP21(USART2_R, RCC_USART2CFGR, 0, 0),
+ RESET_MP21(USART3_R, RCC_USART3CFGR, 0, 0),
+ RESET_MP21(UART4_R, RCC_UART4CFGR, 0, 0),
+ RESET_MP21(UART5_R, RCC_UART5CFGR, 0, 0),
+ RESET_MP21(USART6_R, RCC_USART6CFGR, 0, 0),
+ RESET_MP21(UART7_R, RCC_UART7CFGR, 0, 0),
+ RESET_MP21(LPUART1_R, RCC_LPUART1CFGR, 0, 0),
+ RESET_MP21(I2C1_R, RCC_I2C1CFGR, 0, 0),
+ RESET_MP21(I2C2_R, RCC_I2C2CFGR, 0, 0),
+ RESET_MP21(I2C3_R, RCC_I2C3CFGR, 0, 0),
+ RESET_MP21(SAI1_R, RCC_SAI1CFGR, 0, 0),
+ RESET_MP21(SAI2_R, RCC_SAI2CFGR, 0, 0),
+ RESET_MP21(SAI3_R, RCC_SAI3CFGR, 0, 0),
+ RESET_MP21(SAI4_R, RCC_SAI4CFGR, 0, 0),
+ RESET_MP21(MDF1_R, RCC_MDF1CFGR, 0, 0),
+ RESET_MP21(FDCAN_R, RCC_FDCANCFGR, 0, 0),
+ RESET_MP21(HDP_R, RCC_HDPCFGR, 0, 0),
+ RESET_MP21(ADC1_R, RCC_ADC1CFGR, 0, 0),
+ RESET_MP21(ADC2_R, RCC_ADC2CFGR, 0, 0),
+ RESET_MP21(ETH1_R, RCC_ETH1CFGR, 0, 0),
+ RESET_MP21(ETH2_R, RCC_ETH2CFGR, 0, 0),
+ RESET_MP21(OTG_R, RCC_OTGCFGR, 0, 0),
+ RESET_MP21(USBH_R, RCC_USBHCFGR, 0, 0),
+ RESET_MP21(USB2PHY1_R, RCC_USB2PHY1CFGR, 0, 0),
+ RESET_MP21(USB2PHY2_R, RCC_USB2PHY2CFGR, 0, 0),
+ RESET_MP21(SDMMC1_R, RCC_SDMMC1CFGR, 0, 0),
+ RESET_MP21(SDMMC1DLL_R, RCC_SDMMC1CFGR, 16, 0),
+ RESET_MP21(SDMMC2_R, RCC_SDMMC2CFGR, 0, 0),
+ RESET_MP21(SDMMC2DLL_R, RCC_SDMMC2CFGR, 16, 0),
+ RESET_MP21(SDMMC3_R, RCC_SDMMC3CFGR, 0, 0),
+ RESET_MP21(SDMMC3DLL_R, RCC_SDMMC3CFGR, 16, 0),
+ RESET_MP21(LTDC_R, RCC_LTDCCFGR, 0, 0),
+ RESET_MP21(CSI_R, RCC_CSICFGR, 0, 0),
+ RESET_MP21(DCMIPP_R, RCC_DCMIPPCFGR, 0, 0),
+ RESET_MP21(DCMIPSSI_R, RCC_DCMIPSSICFGR, 0, 0),
+ RESET_MP21(WWDG1_R, RCC_WWDG1CFGR, 0, 0),
+ RESET_MP21(VREF_R, RCC_VREFCFGR, 0, 0),
+ RESET_MP21(DTS_R, RCC_DTSCFGR, 0, 0),
+ RESET_MP21(CRC_R, RCC_CRCCFGR, 0, 0),
+ RESET_MP21(SERC_R, RCC_SERCCFGR, 0, 0),
+ RESET_MP21(I3C1_R, RCC_I3C1CFGR, 0, 0),
+ RESET_MP21(I3C2_R, RCC_I3C2CFGR, 0, 0),
+ RESET_MP21(IWDG2_KER_R, RCC_IWDGC1CFGSETR, 18, 1),
+ RESET_MP21(IWDG4_KER_R, RCC_IWDGC2CFGSETR, 18, 1),
+ RESET_MP21(RNG1_R, RCC_RNG1CFGR, 0, 0),
+ RESET_MP21(RNG2_R, RCC_RNG2CFGR, 0, 0),
+ RESET_MP21(PKA_R, RCC_PKACFGR, 0, 0),
+ RESET_MP21(SAES_R, RCC_SAESCFGR, 0, 0),
+ RESET_MP21(HASH1_R, RCC_HASH1CFGR, 0, 0),
+ RESET_MP21(HASH2_R, RCC_HASH2CFGR, 0, 0),
+ RESET_MP21(CRYP1_R, RCC_CRYP1CFGR, 0, 0),
+ RESET_MP21(CRYP2_R, RCC_CRYP2CFGR, 0, 0),
+};
+
+static u16 stm32mp21_cpt_gate[GATE_NB];
+
+static struct clk_stm32_clock_data stm32mp21_clock_data = {
+ .gate_cpt = stm32mp21_cpt_gate,
+ .gates = stm32mp21_gates,
+ .muxes = stm32mp21_muxes,
+};
+
+static struct clk_stm32_reset_data stm32mp21_reset_data = {
+ .reset_lines = stm32mp21_reset_cfg,
+ .nr_lines = ARRAY_SIZE(stm32mp21_reset_cfg),
+};
+
+static const struct stm32_rcc_match_data stm32mp21_data = {
+ .tab_clocks = stm32mp21_clock_cfg,
+ .num_clocks = ARRAY_SIZE(stm32mp21_clock_cfg),
+ .maxbinding = STM32MP21_LAST_CLK,
+ .clock_data = &stm32mp21_clock_data,
+ .reset_data = &stm32mp21_reset_data,
+ .check_security = &stm32mp21_check_security,
+};
+
+static const struct of_device_id stm32mp21_match_data[] = {
+ { .compatible = "st,stm32mp21-rcc", .data = &stm32mp21_data, },
+ { }
+};
+MODULE_DEVICE_TABLE(of, stm32mp21_match_data);
+
+static int stm32mp21_rcc_clocks_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ void __iomem *base;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (WARN_ON(IS_ERR(base)))
+ return PTR_ERR(base);
+
+ return stm32_rcc_init(dev, stm32mp21_match_data, base);
+}
+
+static struct platform_driver stm32mp21_rcc_clocks_driver = {
+ .driver = {
+ .name = "stm32mp21_rcc",
+ .of_match_table = stm32mp21_match_data,
+ },
+ .probe = stm32mp21_rcc_clocks_probe,
+};
+
+static int __init stm32mp21_clocks_init(void)
+{
+ return platform_driver_register(&stm32mp21_rcc_clocks_driver);
+}
+
+core_initcall(stm32mp21_clocks_init);
+
diff --git a/drivers/clk/stm32/stm32mp21_rcc.h b/drivers/clk/stm32/stm32mp21_rcc.h
new file mode 100644
index 000000000000..df3ea921ffba
--- /dev/null
+++ b/drivers/clk/stm32/stm32mp21_rcc.h
@@ -0,0 +1,651 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) STMicroelectronics 2025 - All Rights Reserved
+ * Author: Gabriel Fernandez <gabriel.fernandez@foss.st.com> for STMicroelectronics.
+ */
+
+#ifndef STM32MP21_RCC_H
+#define STM32MP21_RCC_H
+
+#define RCC_SECCFGR0 0x0
+#define RCC_SECCFGR1 0x4
+#define RCC_SECCFGR2 0x8
+#define RCC_SECCFGR3 0xC
+#define RCC_PRIVCFGR0 0x10
+#define RCC_PRIVCFGR1 0x14
+#define RCC_PRIVCFGR2 0x18
+#define RCC_PRIVCFGR3 0x1C
+#define RCC_RCFGLOCKR0 0x20
+#define RCC_RCFGLOCKR1 0x24
+#define RCC_RCFGLOCKR2 0x28
+#define RCC_RCFGLOCKR3 0x2C
+#define RCC_R0CIDCFGR 0x30
+#define RCC_R0SEMCR 0x34
+#define RCC_R1CIDCFGR 0x38
+#define RCC_R1SEMCR 0x3C
+#define RCC_R2CIDCFGR 0x40
+#define RCC_R2SEMCR 0x44
+#define RCC_R3CIDCFGR 0x48
+#define RCC_R3SEMCR 0x4C
+#define RCC_R4CIDCFGR 0x50
+#define RCC_R4SEMCR 0x54
+#define RCC_R5CIDCFGR 0x58
+#define RCC_R5SEMCR 0x5C
+#define RCC_R6CIDCFGR 0x60
+#define RCC_R6SEMCR 0x64
+#define RCC_R7CIDCFGR 0x68
+#define RCC_R7SEMCR 0x6C
+#define RCC_R8CIDCFGR 0x70
+#define RCC_R8SEMCR 0x74
+#define RCC_R9CIDCFGR 0x78
+#define RCC_R9SEMCR 0x7C
+#define RCC_R10CIDCFGR 0x80
+#define RCC_R10SEMCR 0x84
+#define RCC_R11CIDCFGR 0x88
+#define RCC_R11SEMCR 0x8C
+#define RCC_R12CIDCFGR 0x90
+#define RCC_R12SEMCR 0x94
+#define RCC_R13CIDCFGR 0x98
+#define RCC_R13SEMCR 0x9C
+#define RCC_R14CIDCFGR 0xA0
+#define RCC_R14SEMCR 0xA4
+#define RCC_R15CIDCFGR 0xA8
+#define RCC_R15SEMCR 0xAC
+#define RCC_R16CIDCFGR 0xB0
+#define RCC_R16SEMCR 0xB4
+#define RCC_R17CIDCFGR 0xB8
+#define RCC_R17SEMCR 0xBC
+#define RCC_R18CIDCFGR 0xC0
+#define RCC_R18SEMCR 0xC4
+#define RCC_R19CIDCFGR 0xC8
+#define RCC_R19SEMCR 0xCC
+#define RCC_R20CIDCFGR 0xD0
+#define RCC_R20SEMCR 0xD4
+#define RCC_R21CIDCFGR 0xD8
+#define RCC_R21SEMCR 0xDC
+#define RCC_R22CIDCFGR 0xE0
+#define RCC_R22SEMCR 0xE4
+#define RCC_R23CIDCFGR 0xE8
+#define RCC_R23SEMCR 0xEC
+#define RCC_R24CIDCFGR 0xF0
+#define RCC_R24SEMCR 0xF4
+#define RCC_R25CIDCFGR 0xF8
+#define RCC_R25SEMCR 0xFC
+#define RCC_R26CIDCFGR 0x100
+#define RCC_R26SEMCR 0x104
+#define RCC_R27CIDCFGR 0x108
+#define RCC_R27SEMCR 0x10C
+#define RCC_R28CIDCFGR 0x110
+#define RCC_R28SEMCR 0x114
+#define RCC_R29CIDCFGR 0x118
+#define RCC_R29SEMCR 0x11C
+#define RCC_R30CIDCFGR 0x120
+#define RCC_R30SEMCR 0x124
+#define RCC_R31CIDCFGR 0x128
+#define RCC_R31SEMCR 0x12C
+#define RCC_R32CIDCFGR 0x130
+#define RCC_R32SEMCR 0x134
+#define RCC_R33CIDCFGR 0x138
+#define RCC_R33SEMCR 0x13C
+#define RCC_R34CIDCFGR 0x140
+#define RCC_R34SEMCR 0x144
+#define RCC_R35CIDCFGR 0x148
+#define RCC_R35SEMCR 0x14C
+#define RCC_R36CIDCFGR 0x150
+#define RCC_R36SEMCR 0x154
+#define RCC_R37CIDCFGR 0x158
+#define RCC_R37SEMCR 0x15C
+#define RCC_R38CIDCFGR 0x160
+#define RCC_R38SEMCR 0x164
+#define RCC_R39CIDCFGR 0x168
+#define RCC_R39SEMCR 0x16C
+#define RCC_R40CIDCFGR 0x170
+#define RCC_R40SEMCR 0x174
+#define RCC_R41CIDCFGR 0x178
+#define RCC_R41SEMCR 0x17C
+#define RCC_R42CIDCFGR 0x180
+#define RCC_R42SEMCR 0x184
+#define RCC_R43CIDCFGR 0x188
+#define RCC_R43SEMCR 0x18C
+#define RCC_R44CIDCFGR 0x190
+#define RCC_R44SEMCR 0x194
+#define RCC_R45CIDCFGR 0x198
+#define RCC_R45SEMCR 0x19C
+#define RCC_R46CIDCFGR 0x1A0
+#define RCC_R46SEMCR 0x1A4
+#define RCC_R47CIDCFGR 0x1A8
+#define RCC_R47SEMCR 0x1AC
+#define RCC_R48CIDCFGR 0x1B0
+#define RCC_R48SEMCR 0x1B4
+#define RCC_R49CIDCFGR 0x1B8
+#define RCC_R49SEMCR 0x1BC
+#define RCC_R50CIDCFGR 0x1C0
+#define RCC_R50SEMCR 0x1C4
+#define RCC_R51CIDCFGR 0x1C8
+#define RCC_R51SEMCR 0x1CC
+#define RCC_R52CIDCFGR 0x1D0
+#define RCC_R52SEMCR 0x1D4
+#define RCC_R53CIDCFGR 0x1D8
+#define RCC_R53SEMCR 0x1DC
+#define RCC_R54CIDCFGR 0x1E0
+#define RCC_R54SEMCR 0x1E4
+#define RCC_R55CIDCFGR 0x1E8
+#define RCC_R55SEMCR 0x1EC
+#define RCC_R56CIDCFGR 0x1F0
+#define RCC_R56SEMCR 0x1F4
+#define RCC_R57CIDCFGR 0x1F8
+#define RCC_R57SEMCR 0x1FC
+#define RCC_R58CIDCFGR 0x200
+#define RCC_R58SEMCR 0x204
+#define RCC_R59CIDCFGR 0x208
+#define RCC_R59SEMCR 0x20C
+#define RCC_R60CIDCFGR 0x210
+#define RCC_R60SEMCR 0x214
+#define RCC_R61CIDCFGR 0x218
+#define RCC_R61SEMCR 0x21C
+#define RCC_R62CIDCFGR 0x220
+#define RCC_R62SEMCR 0x224
+#define RCC_R63CIDCFGR 0x228
+#define RCC_R63SEMCR 0x22C
+#define RCC_R64CIDCFGR 0x230
+#define RCC_R64SEMCR 0x234
+#define RCC_R65CIDCFGR 0x238
+#define RCC_R65SEMCR 0x23C
+#define RCC_R66CIDCFGR 0x240
+#define RCC_R66SEMCR 0x244
+#define RCC_R67CIDCFGR 0x248
+#define RCC_R67SEMCR 0x24C
+#define RCC_R68CIDCFGR 0x250
+#define RCC_R68SEMCR 0x254
+#define RCC_R69CIDCFGR 0x258
+#define RCC_R69SEMCR 0x25C
+#define RCC_R70CIDCFGR 0x260
+#define RCC_R70SEMCR 0x264
+#define RCC_R71CIDCFGR 0x268
+#define RCC_R71SEMCR 0x26C
+#define RCC_R73CIDCFGR 0x278
+#define RCC_R73SEMCR 0x27C
+#define RCC_R74CIDCFGR 0x280
+#define RCC_R74SEMCR 0x284
+#define RCC_R75CIDCFGR 0x288
+#define RCC_R75SEMCR 0x28C
+#define RCC_R76CIDCFGR 0x290
+#define RCC_R76SEMCR 0x294
+#define RCC_R77CIDCFGR 0x298
+#define RCC_R77SEMCR 0x29C
+#define RCC_R78CIDCFGR 0x2A0
+#define RCC_R78SEMCR 0x2A4
+#define RCC_R79CIDCFGR 0x2A8
+#define RCC_R79SEMCR 0x2AC
+#define RCC_R83CIDCFGR 0x2C8
+#define RCC_R83SEMCR 0x2CC
+#define RCC_R84CIDCFGR 0x2D0
+#define RCC_R84SEMCR 0x2D4
+#define RCC_R85CIDCFGR 0x2D8
+#define RCC_R85SEMCR 0x2DC
+#define RCC_R86CIDCFGR 0x2E0
+#define RCC_R86SEMCR 0x2E4
+#define RCC_R87CIDCFGR 0x2E8
+#define RCC_R87SEMCR 0x2EC
+#define RCC_R88CIDCFGR 0x2F0
+#define RCC_R88SEMCR 0x2F4
+#define RCC_R90CIDCFGR 0x300
+#define RCC_R90SEMCR 0x304
+#define RCC_R91CIDCFGR 0x308
+#define RCC_R91SEMCR 0x30C
+#define RCC_R92CIDCFGR 0x310
+#define RCC_R92SEMCR 0x314
+#define RCC_R93CIDCFGR 0x318
+#define RCC_R93SEMCR 0x31C
+#define RCC_R94CIDCFGR 0x320
+#define RCC_R94SEMCR 0x324
+#define RCC_R95CIDCFGR 0x328
+#define RCC_R95SEMCR 0x32C
+#define RCC_R96CIDCFGR 0x330
+#define RCC_R96SEMCR 0x334
+#define RCC_R97CIDCFGR 0x338
+#define RCC_R97SEMCR 0x33C
+#define RCC_R98CIDCFGR 0x340
+#define RCC_R98SEMCR 0x344
+#define RCC_R101CIDCFGR 0x358
+#define RCC_R101SEMCR 0x35C
+#define RCC_R102CIDCFGR 0x360
+#define RCC_R102SEMCR 0x364
+#define RCC_R103CIDCFGR 0x368
+#define RCC_R103SEMCR 0x36C
+#define RCC_R104CIDCFGR 0x370
+#define RCC_R104SEMCR 0x374
+#define RCC_R105CIDCFGR 0x378
+#define RCC_R105SEMCR 0x37C
+#define RCC_R106CIDCFGR 0x380
+#define RCC_R106SEMCR 0x384
+#define RCC_R108CIDCFGR 0x390
+#define RCC_R108SEMCR 0x394
+#define RCC_R109CIDCFGR 0x398
+#define RCC_R109SEMCR 0x39C
+#define RCC_R110CIDCFGR 0x3A0
+#define RCC_R110SEMCR 0x3A4
+#define RCC_R111CIDCFGR 0x3A8
+#define RCC_R111SEMCR 0x3AC
+#define RCC_R112CIDCFGR 0x3B0
+#define RCC_R112SEMCR 0x3B4
+#define RCC_R113CIDCFGR 0x3B8
+#define RCC_R113SEMCR 0x3BC
+#define RCC_GRSTCSETR 0x400
+#define RCC_C1RSTCSETR 0x404
+#define RCC_C2RSTCSETR 0x40C
+#define RCC_HWRSTSCLRR 0x410
+#define RCC_C1HWRSTSCLRR 0x414
+#define RCC_C2HWRSTSCLRR 0x418
+#define RCC_C1BOOTRSTSSETR 0x41C
+#define RCC_C1BOOTRSTSCLRR 0x420
+#define RCC_C2BOOTRSTSSETR 0x424
+#define RCC_C2BOOTRSTSCLRR 0x428
+#define RCC_C1SREQSETR 0x42C
+#define RCC_C1SREQCLRR 0x430
+#define RCC_CPUBOOTCR 0x434
+#define RCC_STBYBOOTCR 0x438
+#define RCC_LEGBOOTCR 0x43C
+#define RCC_BDCR 0x440
+#define RCC_RDCR 0x44C
+#define RCC_C1MSRDCR 0x450
+#define RCC_PWRLPDLYCR 0x454
+#define RCC_C1CIESETR 0x458
+#define RCC_C1CIFCLRR 0x45C
+#define RCC_C2CIESETR 0x460
+#define RCC_C2CIFCLRR 0x464
+#define RCC_IWDGC1FZSETR 0x468
+#define RCC_IWDGC1FZCLRR 0x46C
+#define RCC_IWDGC1CFGSETR 0x470
+#define RCC_IWDGC1CFGCLRR 0x474
+#define RCC_IWDGC2FZSETR 0x478
+#define RCC_IWDGC2FZCLRR 0x47C
+#define RCC_IWDGC2CFGSETR 0x480
+#define RCC_IWDGC2CFGCLRR 0x484
+#define RCC_MCO1CFGR 0x488
+#define RCC_MCO2CFGR 0x48C
+#define RCC_OCENSETR 0x490
+#define RCC_OCENCLRR 0x494
+#define RCC_OCRDYR 0x498
+#define RCC_HSICFGR 0x49C
+#define RCC_MSICFGR 0x4A0
+#define RCC_LSICR 0x4A4
+#define RCC_RTCDIVR 0x4A8
+#define RCC_APB1DIVR 0x4AC
+#define RCC_APB2DIVR 0x4B0
+#define RCC_APB3DIVR 0x4B4
+#define RCC_APB4DIVR 0x4B8
+#define RCC_APB5DIVR 0x4BC
+#define RCC_APBDBGDIVR 0x4C0
+#define RCC_TIMG1PRER 0x4C8
+#define RCC_TIMG2PRER 0x4CC
+#define RCC_LSMCUDIVR 0x4D0
+#define RCC_DDRCPCFGR 0x4D4
+#define RCC_DDRCAPBCFGR 0x4D8
+#define RCC_DDRPHYCAPBCFGR 0x4DC
+#define RCC_DDRPHYCCFGR 0x4E0
+#define RCC_DDRCFGR 0x4E4
+#define RCC_DDRITFCFGR 0x4E8
+#define RCC_SYSRAMCFGR 0x4F0
+#define RCC_SRAM1CFGR 0x4F8
+#define RCC_RETRAMCFGR 0x500
+#define RCC_BKPSRAMCFGR 0x504
+#define RCC_OSPI1CFGR 0x514
+#define RCC_FMCCFGR 0x51C
+#define RCC_DBGCFGR 0x520
+#define RCC_STMCFGR 0x524
+#define RCC_ETRCFGR 0x528
+#define RCC_GPIOACFGR 0x52C
+#define RCC_GPIOBCFGR 0x530
+#define RCC_GPIOCCFGR 0x534
+#define RCC_GPIODCFGR 0x538
+#define RCC_GPIOECFGR 0x53C
+#define RCC_GPIOFCFGR 0x540
+#define RCC_GPIOGCFGR 0x544
+#define RCC_GPIOHCFGR 0x548
+#define RCC_GPIOICFGR 0x54C
+#define RCC_GPIOZCFGR 0x558
+#define RCC_HPDMA1CFGR 0x55C
+#define RCC_HPDMA2CFGR 0x560
+#define RCC_HPDMA3CFGR 0x564
+#define RCC_IPCC1CFGR 0x570
+#define RCC_RTCCFGR 0x578
+#define RCC_SYSCPU1CFGR 0x580
+#define RCC_BSECCFGR 0x584
+#define RCC_PLL2CFGR1 0x590
+#define RCC_PLL2CFGR2 0x594
+#define RCC_PLL2CFGR3 0x598
+#define RCC_PLL2CFGR4 0x59C
+#define RCC_PLL2CFGR5 0x5A0
+#define RCC_PLL2CFGR6 0x5A8
+#define RCC_PLL2CFGR7 0x5AC
+#define RCC_HSIFMONCR 0x5E0
+#define RCC_HSIFVALR 0x5E4
+#define RCC_MSIFMONCR 0x5E8
+#define RCC_MSIFVALR 0x5EC
+#define RCC_TIM1CFGR 0x700
+#define RCC_TIM2CFGR 0x704
+#define RCC_TIM3CFGR 0x708
+#define RCC_TIM4CFGR 0x70C
+#define RCC_TIM5CFGR 0x710
+#define RCC_TIM6CFGR 0x714
+#define RCC_TIM7CFGR 0x718
+#define RCC_TIM8CFGR 0x71C
+#define RCC_TIM10CFGR 0x720
+#define RCC_TIM11CFGR 0x724
+#define RCC_TIM12CFGR 0x728
+#define RCC_TIM13CFGR 0x72C
+#define RCC_TIM14CFGR 0x730
+#define RCC_TIM15CFGR 0x734
+#define RCC_TIM16CFGR 0x738
+#define RCC_TIM17CFGR 0x73C
+#define RCC_LPTIM1CFGR 0x744
+#define RCC_LPTIM2CFGR 0x748
+#define RCC_LPTIM3CFGR 0x74C
+#define RCC_LPTIM4CFGR 0x750
+#define RCC_LPTIM5CFGR 0x754
+#define RCC_SPI1CFGR 0x758
+#define RCC_SPI2CFGR 0x75C
+#define RCC_SPI3CFGR 0x760
+#define RCC_SPI4CFGR 0x764
+#define RCC_SPI5CFGR 0x768
+#define RCC_SPI6CFGR 0x76C
+#define RCC_SPDIFRXCFGR 0x778
+#define RCC_USART1CFGR 0x77C
+#define RCC_USART2CFGR 0x780
+#define RCC_USART3CFGR 0x784
+#define RCC_UART4CFGR 0x788
+#define RCC_UART5CFGR 0x78C
+#define RCC_USART6CFGR 0x790
+#define RCC_UART7CFGR 0x794
+#define RCC_LPUART1CFGR 0x7A0
+#define RCC_I2C1CFGR 0x7A4
+#define RCC_I2C2CFGR 0x7A8
+#define RCC_I2C3CFGR 0x7AC
+#define RCC_SAI1CFGR 0x7C4
+#define RCC_SAI2CFGR 0x7C8
+#define RCC_SAI3CFGR 0x7CC
+#define RCC_SAI4CFGR 0x7D0
+#define RCC_MDF1CFGR 0x7D8
+#define RCC_FDCANCFGR 0x7E0
+#define RCC_HDPCFGR 0x7E4
+#define RCC_ADC1CFGR 0x7E8
+#define RCC_ADC2CFGR 0x7EC
+#define RCC_ETH1CFGR 0x7F0
+#define RCC_ETH2CFGR 0x7F4
+#define RCC_USBHCFGR 0x7FC
+#define RCC_USB2PHY1CFGR 0x800
+#define RCC_OTGCFGR 0x808
+#define RCC_USB2PHY2CFGR 0x80C
+#define RCC_STGENCFGR 0x824
+#define RCC_SDMMC1CFGR 0x830
+#define RCC_SDMMC2CFGR 0x834
+#define RCC_SDMMC3CFGR 0x838
+#define RCC_LTDCCFGR 0x840
+#define RCC_CSICFGR 0x858
+#define RCC_DCMIPPCFGR 0x85C
+#define RCC_DCMIPSSICFGR 0x860
+#define RCC_RNG1CFGR 0x870
+#define RCC_RNG2CFGR 0x874
+#define RCC_PKACFGR 0x878
+#define RCC_SAESCFGR 0x87C
+#define RCC_HASH1CFGR 0x880
+#define RCC_HASH2CFGR 0x884
+#define RCC_CRYP1CFGR 0x888
+#define RCC_CRYP2CFGR 0x88C
+#define RCC_IWDG1CFGR 0x894
+#define RCC_IWDG2CFGR 0x898
+#define RCC_IWDG3CFGR 0x89C
+#define RCC_IWDG4CFGR 0x8A0
+#define RCC_WWDG1CFGR 0x8A4
+#define RCC_VREFCFGR 0x8AC
+#define RCC_DTSCFGR 0x8B0
+#define RCC_CRCCFGR 0x8B4
+#define RCC_SERCCFGR 0x8B8
+#define RCC_DDRPERFMCFGR 0x8C0
+#define RCC_I3C1CFGR 0x8C8
+#define RCC_I3C2CFGR 0x8CC
+#define RCC_I3C3CFGR 0x8D0
+#define RCC_MUXSELCFGR 0x1000
+#define RCC_XBAR0CFGR 0x1018
+#define RCC_XBAR1CFGR 0x101C
+#define RCC_XBAR2CFGR 0x1020
+#define RCC_XBAR3CFGR 0x1024
+#define RCC_XBAR4CFGR 0x1028
+#define RCC_XBAR5CFGR 0x102C
+#define RCC_XBAR6CFGR 0x1030
+#define RCC_XBAR7CFGR 0x1034
+#define RCC_XBAR8CFGR 0x1038
+#define RCC_XBAR9CFGR 0x103C
+#define RCC_XBAR10CFGR 0x1040
+#define RCC_XBAR11CFGR 0x1044
+#define RCC_XBAR12CFGR 0x1048
+#define RCC_XBAR13CFGR 0x104C
+#define RCC_XBAR14CFGR 0x1050
+#define RCC_XBAR15CFGR 0x1054
+#define RCC_XBAR16CFGR 0x1058
+#define RCC_XBAR17CFGR 0x105C
+#define RCC_XBAR18CFGR 0x1060
+#define RCC_XBAR19CFGR 0x1064
+#define RCC_XBAR20CFGR 0x1068
+#define RCC_XBAR21CFGR 0x106C
+#define RCC_XBAR22CFGR 0x1070
+#define RCC_XBAR23CFGR 0x1074
+#define RCC_XBAR24CFGR 0x1078
+#define RCC_XBAR25CFGR 0x107C
+#define RCC_XBAR26CFGR 0x1080
+#define RCC_XBAR27CFGR 0x1084
+#define RCC_XBAR28CFGR 0x1088
+#define RCC_XBAR29CFGR 0x108C
+#define RCC_XBAR30CFGR 0x1090
+#define RCC_XBAR31CFGR 0x1094
+#define RCC_XBAR32CFGR 0x1098
+#define RCC_XBAR33CFGR 0x109C
+#define RCC_XBAR34CFGR 0x10A0
+#define RCC_XBAR35CFGR 0x10A4
+#define RCC_XBAR36CFGR 0x10A8
+#define RCC_XBAR37CFGR 0x10AC
+#define RCC_XBAR38CFGR 0x10B0
+#define RCC_XBAR39CFGR 0x10B4
+#define RCC_XBAR40CFGR 0x10B8
+#define RCC_XBAR41CFGR 0x10BC
+#define RCC_XBAR42CFGR 0x10C0
+#define RCC_XBAR43CFGR 0x10C4
+#define RCC_XBAR44CFGR 0x10C8
+#define RCC_XBAR45CFGR 0x10CC
+#define RCC_XBAR46CFGR 0x10D0
+#define RCC_XBAR47CFGR 0x10D4
+#define RCC_XBAR48CFGR 0x10D8
+#define RCC_XBAR49CFGR 0x10DC
+#define RCC_XBAR50CFGR 0x10E0
+#define RCC_XBAR51CFGR 0x10E4
+#define RCC_XBAR52CFGR 0x10E8
+#define RCC_XBAR53CFGR 0x10EC
+#define RCC_XBAR54CFGR 0x10F0
+#define RCC_XBAR55CFGR 0x10F4
+#define RCC_XBAR56CFGR 0x10F8
+#define RCC_XBAR57CFGR 0x10FC
+#define RCC_XBAR58CFGR 0x1100
+#define RCC_XBAR59CFGR 0x1104
+#define RCC_XBAR60CFGR 0x1108
+#define RCC_XBAR61CFGR 0x110C
+#define RCC_XBAR62CFGR 0x1110
+#define RCC_XBAR63CFGR 0x1114
+#define RCC_PREDIV0CFGR 0x1118
+#define RCC_PREDIV1CFGR 0x111C
+#define RCC_PREDIV2CFGR 0x1120
+#define RCC_PREDIV3CFGR 0x1124
+#define RCC_PREDIV4CFGR 0x1128
+#define RCC_PREDIV5CFGR 0x112C
+#define RCC_PREDIV6CFGR 0x1130
+#define RCC_PREDIV7CFGR 0x1134
+#define RCC_PREDIV8CFGR 0x1138
+#define RCC_PREDIV9CFGR 0x113C
+#define RCC_PREDIV10CFGR 0x1140
+#define RCC_PREDIV11CFGR 0x1144
+#define RCC_PREDIV12CFGR 0x1148
+#define RCC_PREDIV13CFGR 0x114C
+#define RCC_PREDIV14CFGR 0x1150
+#define RCC_PREDIV15CFGR 0x1154
+#define RCC_PREDIV16CFGR 0x1158
+#define RCC_PREDIV17CFGR 0x115C
+#define RCC_PREDIV18CFGR 0x1160
+#define RCC_PREDIV19CFGR 0x1164
+#define RCC_PREDIV20CFGR 0x1168
+#define RCC_PREDIV21CFGR 0x116C
+#define RCC_PREDIV22CFGR 0x1170
+#define RCC_PREDIV23CFGR 0x1174
+#define RCC_PREDIV24CFGR 0x1178
+#define RCC_PREDIV25CFGR 0x117C
+#define RCC_PREDIV26CFGR 0x1180
+#define RCC_PREDIV27CFGR 0x1184
+#define RCC_PREDIV28CFGR 0x1188
+#define RCC_PREDIV29CFGR 0x118C
+#define RCC_PREDIV30CFGR 0x1190
+#define RCC_PREDIV31CFGR 0x1194
+#define RCC_PREDIV32CFGR 0x1198
+#define RCC_PREDIV33CFGR 0x119C
+#define RCC_PREDIV34CFGR 0x11A0
+#define RCC_PREDIV35CFGR 0x11A4
+#define RCC_PREDIV36CFGR 0x11A8
+#define RCC_PREDIV37CFGR 0x11AC
+#define RCC_PREDIV38CFGR 0x11B0
+#define RCC_PREDIV39CFGR 0x11B4
+#define RCC_PREDIV40CFGR 0x11B8
+#define RCC_PREDIV41CFGR 0x11BC
+#define RCC_PREDIV42CFGR 0x11C0
+#define RCC_PREDIV43CFGR 0x11C4
+#define RCC_PREDIV44CFGR 0x11C8
+#define RCC_PREDIV45CFGR 0x11CC
+#define RCC_PREDIV46CFGR 0x11D0
+#define RCC_PREDIV47CFGR 0x11D4
+#define RCC_PREDIV48CFGR 0x11D8
+#define RCC_PREDIV49CFGR 0x11DC
+#define RCC_PREDIV50CFGR 0x11E0
+#define RCC_PREDIV51CFGR 0x11E4
+#define RCC_PREDIV52CFGR 0x11E8
+#define RCC_PREDIV53CFGR 0x11EC
+#define RCC_PREDIV54CFGR 0x11F0
+#define RCC_PREDIV55CFGR 0x11F4
+#define RCC_PREDIV56CFGR 0x11F8
+#define RCC_PREDIV57CFGR 0x11FC
+#define RCC_PREDIV58CFGR 0x1200
+#define RCC_PREDIV59CFGR 0x1204
+#define RCC_PREDIV60CFGR 0x1208
+#define RCC_PREDIV61CFGR 0x120C
+#define RCC_PREDIV62CFGR 0x1210
+#define RCC_PREDIV63CFGR 0x1214
+#define RCC_PREDIVSR1 0x1218
+#define RCC_PREDIVSR2 0x121C
+#define RCC_FINDIV0CFGR 0x1224
+#define RCC_FINDIV1CFGR 0x1228
+#define RCC_FINDIV2CFGR 0x122C
+#define RCC_FINDIV3CFGR 0x1230
+#define RCC_FINDIV4CFGR 0x1234
+#define RCC_FINDIV5CFGR 0x1238
+#define RCC_FINDIV6CFGR 0x123C
+#define RCC_FINDIV7CFGR 0x1240
+#define RCC_FINDIV8CFGR 0x1244
+#define RCC_FINDIV9CFGR 0x1248
+#define RCC_FINDIV10CFGR 0x124C
+#define RCC_FINDIV11CFGR 0x1250
+#define RCC_FINDIV12CFGR 0x1254
+#define RCC_FINDIV13CFGR 0x1258
+#define RCC_FINDIV14CFGR 0x125C
+#define RCC_FINDIV15CFGR 0x1260
+#define RCC_FINDIV16CFGR 0x1264
+#define RCC_FINDIV17CFGR 0x1268
+#define RCC_FINDIV18CFGR 0x126C
+#define RCC_FINDIV19CFGR 0x1270
+#define RCC_FINDIV20CFGR 0x1274
+#define RCC_FINDIV21CFGR 0x1278
+#define RCC_FINDIV22CFGR 0x127C
+#define RCC_FINDIV23CFGR 0x1280
+#define RCC_FINDIV24CFGR 0x1284
+#define RCC_FINDIV25CFGR 0x1288
+#define RCC_FINDIV26CFGR 0x128C
+#define RCC_FINDIV27CFGR 0x1290
+#define RCC_FINDIV28CFGR 0x1294
+#define RCC_FINDIV29CFGR 0x1298
+#define RCC_FINDIV30CFGR 0x129C
+#define RCC_FINDIV31CFGR 0x12A0
+#define RCC_FINDIV32CFGR 0x12A4
+#define RCC_FINDIV33CFGR 0x12A8
+#define RCC_FINDIV34CFGR 0x12AC
+#define RCC_FINDIV35CFGR 0x12B0
+#define RCC_FINDIV36CFGR 0x12B4
+#define RCC_FINDIV37CFGR 0x12B8
+#define RCC_FINDIV38CFGR 0x12BC
+#define RCC_FINDIV39CFGR 0x12C0
+#define RCC_FINDIV40CFGR 0x12C4
+#define RCC_FINDIV41CFGR 0x12C8
+#define RCC_FINDIV42CFGR 0x12CC
+#define RCC_FINDIV43CFGR 0x12D0
+#define RCC_FINDIV44CFGR 0x12D4
+#define RCC_FINDIV45CFGR 0x12D8
+#define RCC_FINDIV46CFGR 0x12DC
+#define RCC_FINDIV47CFGR 0x12E0
+#define RCC_FINDIV48CFGR 0x12E4
+#define RCC_FINDIV49CFGR 0x12E8
+#define RCC_FINDIV50CFGR 0x12EC
+#define RCC_FINDIV51CFGR 0x12F0
+#define RCC_FINDIV52CFGR 0x12F4
+#define RCC_FINDIV53CFGR 0x12F8
+#define RCC_FINDIV54CFGR 0x12FC
+#define RCC_FINDIV55CFGR 0x1300
+#define RCC_FINDIV56CFGR 0x1304
+#define RCC_FINDIV57CFGR 0x1308
+#define RCC_FINDIV58CFGR 0x130C
+#define RCC_FINDIV59CFGR 0x1310
+#define RCC_FINDIV60CFGR 0x1314
+#define RCC_FINDIV61CFGR 0x1318
+#define RCC_FINDIV62CFGR 0x131C
+#define RCC_FINDIV63CFGR 0x1320
+#define RCC_FINDIVSR1 0x1324
+#define RCC_FINDIVSR2 0x1328
+#define RCC_FCALCOBS0CFGR 0x1340
+#define RCC_FCALCOBS1CFGR 0x1344
+#define RCC_FCALCREFCFGR 0x1348
+#define RCC_FCALCCR1 0x134C
+#define RCC_FCALCCR2 0x1354
+#define RCC_FCALCSR 0x1358
+#define RCC_PLL4CFGR1 0x1360
+#define RCC_PLL4CFGR2 0x1364
+#define RCC_PLL4CFGR3 0x1368
+#define RCC_PLL4CFGR4 0x136C
+#define RCC_PLL4CFGR5 0x1370
+#define RCC_PLL4CFGR6 0x1378
+#define RCC_PLL4CFGR7 0x137C
+#define RCC_PLL5CFGR1 0x1388
+#define RCC_PLL5CFGR2 0x138C
+#define RCC_PLL5CFGR3 0x1390
+#define RCC_PLL5CFGR4 0x1394
+#define RCC_PLL5CFGR5 0x1398
+#define RCC_PLL5CFGR6 0x13A0
+#define RCC_PLL5CFGR7 0x13A4
+#define RCC_PLL6CFGR1 0x13B0
+#define RCC_PLL6CFGR2 0x13B4
+#define RCC_PLL6CFGR3 0x13B8
+#define RCC_PLL6CFGR4 0x13BC
+#define RCC_PLL6CFGR5 0x13C0
+#define RCC_PLL6CFGR6 0x13C8
+#define RCC_PLL6CFGR7 0x13CC
+#define RCC_PLL7CFGR1 0x13D8
+#define RCC_PLL7CFGR2 0x13DC
+#define RCC_PLL7CFGR3 0x13E0
+#define RCC_PLL7CFGR4 0x13E4
+#define RCC_PLL7CFGR5 0x13E8
+#define RCC_PLL7CFGR6 0x13F0
+#define RCC_PLL7CFGR7 0x13F4
+#define RCC_PLL8CFGR1 0x1400
+#define RCC_PLL8CFGR2 0x1404
+#define RCC_PLL8CFGR3 0x1408
+#define RCC_PLL8CFGR4 0x140C
+#define RCC_PLL8CFGR5 0x1410
+#define RCC_PLL8CFGR6 0x1418
+#define RCC_PLL8CFGR7 0x141C
+#define RCC_VERR 0xFFF4
+#define RCC_IDR 0xFFF8
+#define RCC_SIDR 0xFFFC
+
+#endif /* STM32MP21_RCC_H */
diff --git a/drivers/clk/sunxi-ng/Kconfig b/drivers/clk/sunxi-ng/Kconfig
index 8896fd052ef1..6af2d020e03e 100644
--- a/drivers/clk/sunxi-ng/Kconfig
+++ b/drivers/clk/sunxi-ng/Kconfig
@@ -57,6 +57,11 @@ config SUN55I_A523_CCU
default ARCH_SUNXI
depends on ARM64 || COMPILE_TEST
+config SUN55I_A523_MCU_CCU
+ tristate "Support for the Allwinner A523/T527 MCU CCU"
+ default ARCH_SUNXI
+ depends on ARM64 || COMPILE_TEST
+
config SUN55I_A523_R_CCU
tristate "Support for the Allwinner A523/T527 PRCM CCU"
default ARCH_SUNXI
diff --git a/drivers/clk/sunxi-ng/Makefile b/drivers/clk/sunxi-ng/Makefile
index 82e471036de6..a1c4087d7241 100644
--- a/drivers/clk/sunxi-ng/Makefile
+++ b/drivers/clk/sunxi-ng/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_SUN50I_H6_CCU) += sun50i-h6-ccu.o
obj-$(CONFIG_SUN50I_H6_R_CCU) += sun50i-h6-r-ccu.o
obj-$(CONFIG_SUN50I_H616_CCU) += sun50i-h616-ccu.o
obj-$(CONFIG_SUN55I_A523_CCU) += sun55i-a523-ccu.o
+obj-$(CONFIG_SUN55I_A523_MCU_CCU) += sun55i-a523-mcu-ccu.o
obj-$(CONFIG_SUN55I_A523_R_CCU) += sun55i-a523-r-ccu.o
obj-$(CONFIG_SUN4I_A10_CCU) += sun4i-a10-ccu.o
obj-$(CONFIG_SUN5I_CCU) += sun5i-ccu.o
@@ -61,6 +62,7 @@ sun50i-h6-ccu-y += ccu-sun50i-h6.o
sun50i-h6-r-ccu-y += ccu-sun50i-h6-r.o
sun50i-h616-ccu-y += ccu-sun50i-h616.o
sun55i-a523-ccu-y += ccu-sun55i-a523.o
+sun55i-a523-mcu-ccu-y += ccu-sun55i-a523-mcu.o
sun55i-a523-r-ccu-y += ccu-sun55i-a523-r.o
sun4i-a10-ccu-y += ccu-sun4i-a10.o
sun5i-ccu-y += ccu-sun5i.o
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c
index acb4e8b9b1ba..d24fa3449303 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c
@@ -80,7 +80,7 @@ static struct ccu_div r_apb2_clk = {
* in the BSP source code, although most of them are unused. The existence
* of the hardware block is verified with "3.1 Memory Mapping" chapter in
* "Allwinner H6 V200 User Manual V1.1"; and the parent APB buses are verified
- * with "3.3.2.1 System Bus Tree" chapter inthe same document.
+ * with "3.3.2.1 System Bus Tree" chapter in the same document.
*/
static SUNXI_CCU_GATE(r_apb1_timer_clk, "r-apb1-timer", "r-apb1",
0x11c, BIT(0), 0);
diff --git a/drivers/clk/sunxi-ng/ccu-sun55i-a523-mcu.c b/drivers/clk/sunxi-ng/ccu-sun55i-a523-mcu.c
new file mode 100644
index 000000000000..197844f0fe4e
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu-sun55i-a523-mcu.c
@@ -0,0 +1,469 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2025 Chen-Yu Tsai <wens@csie.org>
+ *
+ * Based on the A523 CCU driver:
+ * Copyright (C) 2023-2024 Arm Ltd.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/clock/sun55i-a523-mcu-ccu.h>
+#include <dt-bindings/reset/sun55i-a523-mcu-ccu.h>
+
+#include "ccu_common.h"
+#include "ccu_reset.h"
+
+#include "ccu_div.h"
+#include "ccu_gate.h"
+#include "ccu_mp.h"
+#include "ccu_mult.h"
+#include "ccu_nm.h"
+
+static const struct clk_parent_data osc24M[] = {
+ { .fw_name = "hosc" }
+};
+
+static const struct clk_parent_data ahb[] = {
+ { .fw_name = "r-ahb" }
+};
+
+static const struct clk_parent_data apb[] = {
+ { .fw_name = "r-apb0" }
+};
+
+#define SUN55I_A523_PLL_AUDIO1_REG 0x00c
+static struct ccu_sdm_setting pll_audio1_sdm_table[] = {
+ { .rate = 2167603200, .pattern = 0xa000a234, .m = 1, .n = 90 }, /* div2->22.5792 */
+ { .rate = 2359296000, .pattern = 0xa0009ba6, .m = 1, .n = 98 }, /* div2->24.576 */
+ { .rate = 1806336000, .pattern = 0xa000872b, .m = 1, .n = 75 }, /* div5->22.576 */
+};
+
+static struct ccu_nm pll_audio1_clk = {
+ .enable = BIT(27),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 11),
+ .m = _SUNXI_CCU_DIV(1, 1),
+ .sdm = _SUNXI_CCU_SDM(pll_audio1_sdm_table, BIT(24),
+ 0x010, BIT(31)),
+ .min_rate = 180000000U,
+ .max_rate = 3500000000U,
+ .common = {
+ .reg = 0x00c,
+ .features = CCU_FEATURE_SIGMA_DELTA_MOD,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("pll-audio1",
+ osc24M, &ccu_nm_ops,
+ CLK_SET_RATE_GATE),
+ },
+};
+
+/*
+ * /2 and /5 dividers are actually programmable, but we just use the
+ * values from the BSP, since the audio PLL only needs to provide a
+ * couple clock rates. This also matches the names given in the manual.
+ */
+static const struct clk_hw *pll_audio1_div_parents[] = { &pll_audio1_clk.common.hw };
+static CLK_FIXED_FACTOR_HWS(pll_audio1_div2_clk, "pll-audio1-div2",
+ pll_audio1_div_parents, 2, 1,
+ CLK_SET_RATE_PARENT);
+static CLK_FIXED_FACTOR_HWS(pll_audio1_div5_clk, "pll-audio1-div5",
+ pll_audio1_div_parents, 5, 1,
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_M_WITH_GATE(audio_out_clk, "audio-out",
+ "pll-audio1-div2", 0x01c,
+ 0, 5, BIT(31), CLK_SET_RATE_PARENT);
+
+static const struct clk_parent_data dsp_parents[] = {
+ { .fw_name = "hosc" },
+ { .fw_name = "losc" },
+ { .fw_name = "iosc" },
+ /*
+ * The order of the following two parent is from the BSP code. It is
+ * the opposite in the manual. Testing with the DSP is required to
+ * figure out the real order.
+ */
+ { .hw = &pll_audio1_div5_clk.hw },
+ { .hw = &pll_audio1_div2_clk.hw },
+ { .fw_name = "dsp" },
+};
+static SUNXI_CCU_M_DATA_WITH_MUX_GATE(dsp_clk, "mcu-dsp", dsp_parents, 0x0020,
+ 0, 5, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static const struct clk_parent_data i2s_parents[] = {
+ { .fw_name = "pll-audio0-4x" },
+ { .hw = &pll_audio1_div2_clk.hw },
+ { .hw = &pll_audio1_div5_clk.hw },
+};
+
+static SUNXI_CCU_DUALDIV_MUX_GATE(i2s0_clk, "i2s0", i2s_parents, 0x02c,
+ 0, 5, /* M */
+ 5, 5, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+static SUNXI_CCU_DUALDIV_MUX_GATE(i2s1_clk, "i2s1", i2s_parents, 0x030,
+ 0, 5, /* M */
+ 5, 5, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+static SUNXI_CCU_DUALDIV_MUX_GATE(i2s2_clk, "i2s2", i2s_parents, 0x034,
+ 0, 5, /* M */
+ 5, 5, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+static SUNXI_CCU_DUALDIV_MUX_GATE(i2s3_clk, "i2s3", i2s_parents, 0x038,
+ 0, 5, /* M */
+ 5, 5, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static const struct clk_parent_data i2s3_asrc_parents[] = {
+ { .fw_name = "pll-periph0-300m" },
+ { .hw = &pll_audio1_div2_clk.hw },
+ { .hw = &pll_audio1_div5_clk.hw },
+};
+static SUNXI_CCU_DUALDIV_MUX_GATE(i2s3_asrc_clk, "i2s3-asrc",
+ i2s3_asrc_parents, 0x03c,
+ 0, 5, /* M */
+ 5, 5, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE_DATA(bus_i2s0_clk, "bus-i2s0", apb, 0x040, BIT(0), 0);
+static SUNXI_CCU_GATE_DATA(bus_i2s1_clk, "bus-i2s1", apb, 0x040, BIT(1), 0);
+static SUNXI_CCU_GATE_DATA(bus_i2s2_clk, "bus-i2s2", apb, 0x040, BIT(2), 0);
+static SUNXI_CCU_GATE_DATA(bus_i2s3_clk, "bus-i2s3", apb, 0x040, BIT(3), 0);
+
+static const struct clk_parent_data audio_parents[] = {
+ { .fw_name = "pll-audio0-4x" },
+ { .hw = &pll_audio1_div2_clk.hw },
+ { .hw = &pll_audio1_div5_clk.hw },
+};
+static SUNXI_CCU_DUALDIV_MUX_GATE(spdif_tx_clk, "spdif-tx",
+ audio_parents, 0x044,
+ 0, 5, /* M */
+ 5, 5, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+static SUNXI_CCU_DUALDIV_MUX_GATE(spdif_rx_clk, "spdif-rx",
+ i2s3_asrc_parents, 0x048,
+ 0, 5, /* M */
+ 5, 5, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE_DATA(bus_spdif_clk, "bus-spdif", apb, 0x04c, BIT(0), 0);
+
+static SUNXI_CCU_DUALDIV_MUX_GATE(dmic_clk, "dmic", audio_parents, 0x050,
+ 0, 5, /* M */
+ 5, 5, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE_DATA(bus_dmic_clk, "bus-dmic", apb, 0x054, BIT(0), 0);
+
+static SUNXI_CCU_DUALDIV_MUX_GATE(audio_dac_clk, "audio-dac",
+ audio_parents, 0x058,
+ 0, 5, /* M */
+ 5, 5, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+static SUNXI_CCU_DUALDIV_MUX_GATE(audio_adc_clk, "audio-adc",
+ audio_parents, 0x05c,
+ 0, 5, /* M */
+ 5, 5, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE_DATA(bus_audio_codec_clk, "bus-audio-codec",
+ apb, 0x060, BIT(0), 0);
+
+static SUNXI_CCU_GATE_DATA(bus_dsp_msgbox_clk, "bus-dsp-msgbox",
+ ahb, 0x068, BIT(0), 0);
+static SUNXI_CCU_GATE_DATA(bus_dsp_cfg_clk, "bus-dsp-cfg",
+ apb, 0x06c, BIT(0), 0);
+
+static SUNXI_CCU_GATE_DATA(bus_npu_hclk, "bus-npu-hclk", ahb, 0x070, BIT(1), 0);
+static SUNXI_CCU_GATE_DATA(bus_npu_aclk, "bus-npu-aclk", ahb, 0x070, BIT(2), 0);
+
+static const struct clk_parent_data timer_parents[] = {
+ { .fw_name = "hosc" },
+ { .fw_name = "losc" },
+ { .fw_name = "iosc" },
+ { .fw_name = "r-ahb" }
+};
+static SUNXI_CCU_P_DATA_WITH_MUX_GATE(mcu_timer0_clk, "mcu-timer0", timer_parents,
+ 0x074,
+ 1, 3, /* P */
+ 4, 2, /* mux */
+ BIT(0), /* gate */
+ 0);
+static SUNXI_CCU_P_DATA_WITH_MUX_GATE(mcu_timer1_clk, "mcu-timer1", timer_parents,
+ 0x078,
+ 1, 3, /* P */
+ 4, 2, /* mux */
+ BIT(0), /* gate */
+ 0);
+static SUNXI_CCU_P_DATA_WITH_MUX_GATE(mcu_timer2_clk, "mcu-timer2", timer_parents,
+ 0x07c,
+ 1, 3, /* P */
+ 4, 2, /* mux */
+ BIT(0), /* gate */
+ 0);
+static SUNXI_CCU_P_DATA_WITH_MUX_GATE(mcu_timer3_clk, "mcu-timer3", timer_parents,
+ 0x080,
+ 1, 3, /* P */
+ 4, 2, /* mux */
+ BIT(0), /* gate */
+ 0);
+static SUNXI_CCU_P_DATA_WITH_MUX_GATE(mcu_timer4_clk, "mcu-timer4", timer_parents,
+ 0x084,
+ 1, 3, /* P */
+ 4, 2, /* mux */
+ BIT(0), /* gate */
+ 0);
+static SUNXI_CCU_P_DATA_WITH_MUX_GATE(mcu_timer5_clk, "mcu-timer5", timer_parents,
+ 0x088,
+ 1, 3, /* P */
+ 4, 2, /* mux */
+ BIT(0), /* gate */
+ 0);
+static SUNXI_CCU_GATE_DATA(bus_mcu_timer_clk, "bus-mcu-timer", ahb, 0x08c, BIT(0), 0);
+static SUNXI_CCU_GATE_DATA(bus_mcu_dma_clk, "bus-mcu-dma", ahb, 0x104, BIT(0), 0);
+/* tzma* only found in BSP code. */
+static SUNXI_CCU_GATE_DATA(tzma0_clk, "tzma0", ahb, 0x108, BIT(0), 0);
+static SUNXI_CCU_GATE_DATA(tzma1_clk, "tzma1", ahb, 0x10c, BIT(0), 0);
+/* parent is a guess as this block is not shown in the system bus tree diagram */
+static SUNXI_CCU_GATE_DATA(bus_pubsram_clk, "bus-pubsram", ahb, 0x114, BIT(0), 0);
+
+/*
+ * user manual has "mbus" clock as parent of both clocks below,
+ * but this makes more sense, since BSP MCU DMA controller has
+ * reference to both of them, likely needing both enabled.
+ */
+static SUNXI_CCU_GATE_FW(mbus_mcu_clk, "mbus-mcu", "mbus", 0x11c, BIT(1), 0);
+static SUNXI_CCU_GATE_HW(mbus_mcu_dma_clk, "mbus-mcu-dma",
+ &mbus_mcu_clk.common.hw, 0x11c, BIT(0), 0);
+
+static const struct clk_parent_data riscv_pwm_parents[] = {
+ { .fw_name = "hosc" },
+ { .fw_name = "losc" },
+ { .fw_name = "iosc" },
+};
+
+static SUNXI_CCU_MUX_DATA_WITH_GATE(riscv_clk, "riscv",
+ riscv_pwm_parents, 0x120,
+ 27, 3, BIT(31), 0);
+/* Parents are guesses as these two blocks are not shown in the system bus tree diagram */
+static SUNXI_CCU_GATE_DATA(bus_riscv_cfg_clk, "bus-riscv-cfg", ahb,
+ 0x124, BIT(0), 0);
+static SUNXI_CCU_GATE_DATA(bus_riscv_msgbox_clk, "bus-riscv-msgbox", ahb,
+ 0x128, BIT(0), 0);
+
+static SUNXI_CCU_MUX_DATA_WITH_GATE(mcu_pwm0_clk, "mcu-pwm0",
+ riscv_pwm_parents, 0x130,
+ 24, 3, BIT(31), 0);
+static SUNXI_CCU_GATE_DATA(bus_mcu_pwm0_clk, "bus-mcu-pwm0", apb,
+ 0x134, BIT(0), 0);
+
+/*
+ * Contains all clocks that are controlled by a hardware register. They
+ * have a (sunxi) .common member, which needs to be initialised by the common
+ * sunxi CCU code, to be filled with the MMIO base address and the shared lock.
+ */
+static struct ccu_common *sun55i_a523_mcu_ccu_clks[] = {
+ &pll_audio1_clk.common,
+ &audio_out_clk.common,
+ &dsp_clk.common,
+ &i2s0_clk.common,
+ &i2s1_clk.common,
+ &i2s2_clk.common,
+ &i2s3_clk.common,
+ &i2s3_asrc_clk.common,
+ &bus_i2s0_clk.common,
+ &bus_i2s1_clk.common,
+ &bus_i2s2_clk.common,
+ &bus_i2s3_clk.common,
+ &spdif_tx_clk.common,
+ &spdif_rx_clk.common,
+ &bus_spdif_clk.common,
+ &dmic_clk.common,
+ &bus_dmic_clk.common,
+ &audio_dac_clk.common,
+ &audio_adc_clk.common,
+ &bus_audio_codec_clk.common,
+ &bus_dsp_msgbox_clk.common,
+ &bus_dsp_cfg_clk.common,
+ &bus_npu_aclk.common,
+ &bus_npu_hclk.common,
+ &mcu_timer0_clk.common,
+ &mcu_timer1_clk.common,
+ &mcu_timer2_clk.common,
+ &mcu_timer3_clk.common,
+ &mcu_timer4_clk.common,
+ &mcu_timer5_clk.common,
+ &bus_mcu_timer_clk.common,
+ &bus_mcu_dma_clk.common,
+ &tzma0_clk.common,
+ &tzma1_clk.common,
+ &bus_pubsram_clk.common,
+ &mbus_mcu_dma_clk.common,
+ &mbus_mcu_clk.common,
+ &riscv_clk.common,
+ &bus_riscv_cfg_clk.common,
+ &bus_riscv_msgbox_clk.common,
+ &mcu_pwm0_clk.common,
+ &bus_mcu_pwm0_clk.common,
+};
+
+static struct clk_hw_onecell_data sun55i_a523_mcu_hw_clks = {
+ .hws = {
+ [CLK_MCU_PLL_AUDIO1] = &pll_audio1_clk.common.hw,
+ [CLK_MCU_PLL_AUDIO1_DIV2] = &pll_audio1_div2_clk.hw,
+ [CLK_MCU_PLL_AUDIO1_DIV5] = &pll_audio1_div5_clk.hw,
+ [CLK_MCU_AUDIO_OUT] = &audio_out_clk.common.hw,
+ [CLK_MCU_DSP] = &dsp_clk.common.hw,
+ [CLK_MCU_I2S0] = &i2s0_clk.common.hw,
+ [CLK_MCU_I2S1] = &i2s1_clk.common.hw,
+ [CLK_MCU_I2S2] = &i2s2_clk.common.hw,
+ [CLK_MCU_I2S3] = &i2s3_clk.common.hw,
+ [CLK_MCU_I2S3_ASRC] = &i2s3_asrc_clk.common.hw,
+ [CLK_BUS_MCU_I2S0] = &bus_i2s0_clk.common.hw,
+ [CLK_BUS_MCU_I2S1] = &bus_i2s1_clk.common.hw,
+ [CLK_BUS_MCU_I2S2] = &bus_i2s2_clk.common.hw,
+ [CLK_BUS_MCU_I2S3] = &bus_i2s3_clk.common.hw,
+ [CLK_MCU_SPDIF_TX] = &spdif_tx_clk.common.hw,
+ [CLK_MCU_SPDIF_RX] = &spdif_rx_clk.common.hw,
+ [CLK_BUS_MCU_SPDIF] = &bus_spdif_clk.common.hw,
+ [CLK_MCU_DMIC] = &dmic_clk.common.hw,
+ [CLK_BUS_MCU_DMIC] = &bus_dmic_clk.common.hw,
+ [CLK_MCU_AUDIO_CODEC_DAC] = &audio_dac_clk.common.hw,
+ [CLK_MCU_AUDIO_CODEC_ADC] = &audio_adc_clk.common.hw,
+ [CLK_BUS_MCU_AUDIO_CODEC] = &bus_audio_codec_clk.common.hw,
+ [CLK_BUS_MCU_DSP_MSGBOX] = &bus_dsp_msgbox_clk.common.hw,
+ [CLK_BUS_MCU_DSP_CFG] = &bus_dsp_cfg_clk.common.hw,
+ [CLK_BUS_MCU_NPU_HCLK] = &bus_npu_hclk.common.hw,
+ [CLK_BUS_MCU_NPU_ACLK] = &bus_npu_aclk.common.hw,
+ [CLK_MCU_TIMER0] = &mcu_timer0_clk.common.hw,
+ [CLK_MCU_TIMER1] = &mcu_timer1_clk.common.hw,
+ [CLK_MCU_TIMER2] = &mcu_timer2_clk.common.hw,
+ [CLK_MCU_TIMER3] = &mcu_timer3_clk.common.hw,
+ [CLK_MCU_TIMER4] = &mcu_timer4_clk.common.hw,
+ [CLK_MCU_TIMER5] = &mcu_timer5_clk.common.hw,
+ [CLK_BUS_MCU_TIMER] = &bus_mcu_timer_clk.common.hw,
+ [CLK_BUS_MCU_DMA] = &bus_mcu_dma_clk.common.hw,
+ [CLK_MCU_TZMA0] = &tzma0_clk.common.hw,
+ [CLK_MCU_TZMA1] = &tzma1_clk.common.hw,
+ [CLK_BUS_MCU_PUBSRAM] = &bus_pubsram_clk.common.hw,
+ [CLK_MCU_MBUS_DMA] = &mbus_mcu_dma_clk.common.hw,
+ [CLK_MCU_MBUS] = &mbus_mcu_clk.common.hw,
+ [CLK_MCU_RISCV] = &riscv_clk.common.hw,
+ [CLK_BUS_MCU_RISCV_CFG] = &bus_riscv_cfg_clk.common.hw,
+ [CLK_BUS_MCU_RISCV_MSGBOX] = &bus_riscv_msgbox_clk.common.hw,
+ [CLK_MCU_PWM0] = &mcu_pwm0_clk.common.hw,
+ [CLK_BUS_MCU_PWM0] = &bus_mcu_pwm0_clk.common.hw,
+ },
+ .num = CLK_BUS_MCU_PWM0 + 1,
+};
+
+static struct ccu_reset_map sun55i_a523_mcu_ccu_resets[] = {
+ [RST_BUS_MCU_I2S0] = { 0x0040, BIT(16) },
+ [RST_BUS_MCU_I2S1] = { 0x0040, BIT(17) },
+ [RST_BUS_MCU_I2S2] = { 0x0040, BIT(18) },
+ [RST_BUS_MCU_I2S3] = { 0x0040, BIT(19) },
+ [RST_BUS_MCU_SPDIF] = { 0x004c, BIT(16) },
+ [RST_BUS_MCU_DMIC] = { 0x0054, BIT(16) },
+ [RST_BUS_MCU_AUDIO_CODEC] = { 0x0060, BIT(16) },
+ [RST_BUS_MCU_DSP_MSGBOX] = { 0x0068, BIT(16) },
+ [RST_BUS_MCU_DSP_CFG] = { 0x006c, BIT(16) },
+ [RST_BUS_MCU_NPU] = { 0x0070, BIT(16) },
+ [RST_BUS_MCU_TIMER] = { 0x008c, BIT(16) },
+ /* dsp and dsp_debug resets only found in BSP code. */
+ [RST_BUS_MCU_DSP_DEBUG] = { 0x0100, BIT(16) },
+ [RST_BUS_MCU_DSP] = { 0x0100, BIT(17) },
+ [RST_BUS_MCU_DMA] = { 0x0104, BIT(16) },
+ [RST_BUS_MCU_PUBSRAM] = { 0x0114, BIT(16) },
+ [RST_BUS_MCU_RISCV_CFG] = { 0x0124, BIT(16) },
+ [RST_BUS_MCU_RISCV_DEBUG] = { 0x0124, BIT(17) },
+ [RST_BUS_MCU_RISCV_CORE] = { 0x0124, BIT(18) },
+ [RST_BUS_MCU_RISCV_MSGBOX] = { 0x0128, BIT(16) },
+ [RST_BUS_MCU_PWM0] = { 0x0134, BIT(16) },
+};
+
+static const struct sunxi_ccu_desc sun55i_a523_mcu_ccu_desc = {
+ .ccu_clks = sun55i_a523_mcu_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun55i_a523_mcu_ccu_clks),
+
+ .hw_clks = &sun55i_a523_mcu_hw_clks,
+
+ .resets = sun55i_a523_mcu_ccu_resets,
+ .num_resets = ARRAY_SIZE(sun55i_a523_mcu_ccu_resets),
+};
+
+static int sun55i_a523_mcu_ccu_probe(struct platform_device *pdev)
+{
+ void __iomem *reg;
+ u32 val;
+ int ret;
+
+ reg = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
+
+ val = readl(reg + SUN55I_A523_PLL_AUDIO1_REG);
+
+ /*
+ * The PLL clock code does not model all bits, for instance it does
+ * not support a separate enable and gate bit. We present the
+ * gate bit(27) as the enable bit, but then have to set the
+ * PLL Enable, LDO Enable, and Lock Enable bits on all PLLs here.
+ */
+ val |= BIT(31) | BIT(30) | BIT(29);
+
+ /* Enforce p1 = 5, p0 = 2 (the default) for PLL_AUDIO1 */
+ val &= ~(GENMASK(22, 20) | GENMASK(18, 16));
+ val |= (4 << 20) | (1 << 16);
+
+ writel(val, reg + SUN55I_A523_PLL_AUDIO1_REG);
+
+ ret = devm_sunxi_ccu_probe(&pdev->dev, reg, &sun55i_a523_mcu_ccu_desc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct of_device_id sun55i_a523_mcu_ccu_ids[] = {
+ { .compatible = "allwinner,sun55i-a523-mcu-ccu" },
+ { }
+};
+
+static struct platform_driver sun55i_a523_mcu_ccu_driver = {
+ .probe = sun55i_a523_mcu_ccu_probe,
+ .driver = {
+ .name = "sun55i-a523-mcu-ccu",
+ .suppress_bind_attrs = true,
+ .of_match_table = sun55i_a523_mcu_ccu_ids,
+ },
+};
+module_platform_driver(sun55i_a523_mcu_ccu_driver);
+
+MODULE_IMPORT_NS("SUNXI_CCU");
+MODULE_DESCRIPTION("Support for the Allwinner A523 MCU CCU");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun55i-a523-r.c b/drivers/clk/sunxi-ng/ccu-sun55i-a523-r.c
index b5464d8083c8..0339c4af0fe5 100644
--- a/drivers/clk/sunxi-ng/ccu-sun55i-a523-r.c
+++ b/drivers/clk/sunxi-ng/ccu-sun55i-a523-r.c
@@ -121,11 +121,11 @@ static SUNXI_CCU_GATE_HW(bus_r_ir_rx_clk, "bus-r-ir-rx",
&r_apb0_clk.common.hw, 0x1cc, BIT(0), 0);
static SUNXI_CCU_GATE_HW(bus_r_dma_clk, "bus-r-dma",
- &r_apb0_clk.common.hw, 0x1dc, BIT(0), 0);
+ &r_apb0_clk.common.hw, 0x1dc, BIT(0), CLK_IS_CRITICAL);
static SUNXI_CCU_GATE_HW(bus_r_rtc_clk, "bus-r-rtc",
&r_apb0_clk.common.hw, 0x20c, BIT(0), 0);
static SUNXI_CCU_GATE_HW(bus_r_cpucfg_clk, "bus-r-cpucfg",
- &r_apb0_clk.common.hw, 0x22c, BIT(0), 0);
+ &r_apb0_clk.common.hw, 0x22c, BIT(0), CLK_IS_CRITICAL);
static struct ccu_common *sun55i_a523_r_ccu_clks[] = {
&r_ahb_clk.common,
@@ -204,6 +204,7 @@ static struct ccu_reset_map sun55i_a523_r_ccu_resets[] = {
[RST_BUS_R_IR_RX] = { 0x1cc, BIT(16) },
[RST_BUS_R_RTC] = { 0x20c, BIT(16) },
[RST_BUS_R_CPUCFG] = { 0x22c, BIT(16) },
+ [RST_BUS_R_PPU0] = { 0x1ac, BIT(16) },
};
static const struct sunxi_ccu_desc sun55i_a523_r_ccu_desc = {
diff --git a/drivers/clk/sunxi-ng/ccu-sun55i-a523.c b/drivers/clk/sunxi-ng/ccu-sun55i-a523.c
index 9efb9fd24b42..20dad06b37ca 100644
--- a/drivers/clk/sunxi-ng/ccu-sun55i-a523.c
+++ b/drivers/clk/sunxi-ng/ccu-sun55i-a523.c
@@ -11,6 +11,9 @@
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <dt-bindings/clock/sun55i-a523-ccu.h>
+#include <dt-bindings/reset/sun55i-a523-ccu.h>
+
#include "../clk.h"
#include "ccu_common.h"
@@ -25,8 +28,6 @@
#include "ccu_nkmp.h"
#include "ccu_nm.h"
-#include "ccu-sun55i-a523.h"
-
/*
* The 24 MHz oscillator, the root of most of the clock tree.
* .fw_name is the string used in the DT "clock-names" property, used to
@@ -299,7 +300,7 @@ static struct ccu_nm pll_audio0_4x_clk = {
.m = _SUNXI_CCU_DIV(16, 6),
.sdm = _SUNXI_CCU_SDM(pll_audio0_sdm_table, BIT(24),
0x178, BIT(31)),
- .min_rate = 180000000U,
+ .min_rate = 90000000U,
.max_rate = 3000000000U,
.common = {
.reg = 0x078,
@@ -385,7 +386,8 @@ static SUNXI_CCU_MP_DATA_WITH_MUX_GATE_FEAT(mbus_clk, "mbus", mbus_parents,
0, 0, /* no P */
24, 3, /* mux */
BIT(31), /* gate */
- 0, CCU_FEATURE_UPDATE_BIT);
+ CLK_IS_CRITICAL,
+ CCU_FEATURE_UPDATE_BIT);
static const struct clk_hw *mbus_hws[] = { &mbus_clk.common.hw };
@@ -485,6 +487,18 @@ static SUNXI_CCU_M_HW_WITH_MUX_GATE(ve_clk, "ve", ve_parents, 0x690,
static SUNXI_CCU_GATE_HWS(bus_ve_clk, "bus-ve", ahb_hws, 0x69c, BIT(0), 0);
+static const struct clk_hw *npu_parents[] = {
+ &pll_periph0_480M_clk.common.hw,
+ &pll_periph0_600M_clk.hw,
+ &pll_periph0_800M_clk.common.hw,
+ &pll_npu_2x_clk.hw,
+};
+static SUNXI_CCU_M_HW_WITH_MUX_GATE(npu_clk, "npu", npu_parents, 0x6e0,
+ 0, 5, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
static SUNXI_CCU_GATE_HWS(bus_dma_clk, "bus-dma", ahb_hws, 0x70c, BIT(0), 0);
static SUNXI_CCU_GATE_HWS(bus_msgbox_clk, "bus-msgbox", ahb_hws, 0x71c,
@@ -1216,6 +1230,7 @@ static struct ccu_common *sun55i_a523_ccu_clks[] = {
&bus_ce_sys_clk.common,
&ve_clk.common,
&bus_ve_clk.common,
+ &npu_clk.common,
&bus_dma_clk.common,
&bus_msgbox_clk.common,
&bus_spinlock_clk.common,
@@ -1342,7 +1357,6 @@ static struct ccu_common *sun55i_a523_ccu_clks[] = {
};
static struct clk_hw_onecell_data sun55i_a523_hw_clks = {
- .num = CLK_NUMBER,
.hws = {
[CLK_PLL_DDR0] = &pll_ddr_clk.common.hw,
[CLK_PLL_PERIPH0_4X] = &pll_periph0_4x_clk.common.hw,
@@ -1523,7 +1537,9 @@ static struct clk_hw_onecell_data sun55i_a523_hw_clks = {
[CLK_FANOUT0] = &fanout0_clk.common.hw,
[CLK_FANOUT1] = &fanout1_clk.common.hw,
[CLK_FANOUT2] = &fanout2_clk.common.hw,
+ [CLK_NPU] = &npu_clk.common.hw,
},
+ .num = CLK_NPU + 1,
};
static struct ccu_reset_map sun55i_a523_ccu_resets[] = {
diff --git a/drivers/clk/sunxi-ng/ccu-sun55i-a523.h b/drivers/clk/sunxi-ng/ccu-sun55i-a523.h
deleted file mode 100644
index fc8dd42f1b47..000000000000
--- a/drivers/clk/sunxi-ng/ccu-sun55i-a523.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright 2024 Arm Ltd.
- */
-
-#ifndef _CCU_SUN55I_A523_H
-#define _CCU_SUN55I_A523_H
-
-#include <dt-bindings/clock/sun55i-a523-ccu.h>
-#include <dt-bindings/reset/sun55i-a523-ccu.h>
-
-#define CLK_NUMBER (CLK_FANOUT2 + 1)
-
-#endif /* _CCU_SUN55I_A523_H */
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-rtc.c b/drivers/clk/sunxi-ng/ccu-sun6i-rtc.c
index 0536e880b80f..f6bfeba009e8 100644
--- a/drivers/clk/sunxi-ng/ccu-sun6i-rtc.c
+++ b/drivers/clk/sunxi-ng/ccu-sun6i-rtc.c
@@ -325,6 +325,13 @@ static const struct sun6i_rtc_match_data sun50i_r329_rtc_ccu_data = {
.osc32k_fanout_nparents = ARRAY_SIZE(sun50i_r329_osc32k_fanout_parents),
};
+static const struct sun6i_rtc_match_data sun55i_a523_rtc_ccu_data = {
+ .have_ext_osc32k = true,
+ .have_iosc_calibration = true,
+ .osc32k_fanout_parents = sun50i_r329_osc32k_fanout_parents,
+ .osc32k_fanout_nparents = ARRAY_SIZE(sun50i_r329_osc32k_fanout_parents),
+};
+
static const struct of_device_id sun6i_rtc_ccu_match[] = {
{
.compatible = "allwinner,sun50i-h616-rtc",
@@ -334,6 +341,10 @@ static const struct of_device_id sun6i_rtc_ccu_match[] = {
.compatible = "allwinner,sun50i-r329-rtc",
.data = &sun50i_r329_rtc_ccu_data,
},
+ {
+ .compatible = "allwinner,sun55i-a523-rtc",
+ .data = &sun55i_a523_rtc_ccu_data,
+ },
{},
};
MODULE_DEVICE_TABLE(of, sun6i_rtc_ccu_match);
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r40.c b/drivers/clk/sunxi-ng/ccu-sun8i-r40.c
index 8b729c9b3545..44565830881d 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-r40.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-r40.c
@@ -439,7 +439,7 @@ static SUNXI_CCU_GATE(bus_i2c2_clk, "bus-i2c2", "apb2",
static SUNXI_CCU_GATE(bus_i2c3_clk, "bus-i2c3", "apb2",
0x06c, BIT(3), 0);
/*
- * In datasheet here's "Reserved", however the gate exists in BSP soucre
+ * In datasheet here's "Reserved", however the gate exists in BSP source
* code.
*/
static SUNXI_CCU_GATE(bus_can_clk, "bus-can", "apb2",
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
index 579a81bb46df..05595ac51b76 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
@@ -347,12 +347,13 @@ static SUNXI_CCU_GATE(dram_ohci_clk, "dram-ohci", "dram",
static const char * const de_parents[] = { "pll-video", "pll-periph0" };
static SUNXI_CCU_M_WITH_MUX_GATE(de_clk, "de", de_parents,
- 0x104, 0, 4, 24, 2, BIT(31),
- CLK_SET_RATE_PARENT);
+ 0x104, 0, 4, 24, 3, BIT(31),
+ CLK_SET_RATE_NO_REPARENT);
-static const char * const tcon_parents[] = { "pll-video" };
+static const char * const tcon_parents[] = { "pll-video", "pll-periph0" };
static SUNXI_CCU_M_WITH_MUX_GATE(tcon_clk, "tcon", tcon_parents,
- 0x118, 0, 4, 24, 3, BIT(31), 0);
+ 0x118, 0, 4, 24, 3, BIT(31),
+ CLK_SET_RATE_NO_REPARENT);
static SUNXI_CCU_GATE(csi_misc_clk, "csi-misc", "osc24M",
0x130, BIT(31), 0);
@@ -362,11 +363,11 @@ static const char * const csi_mclk_parents[] = { "osc24M", "pll-video",
static SUNXI_CCU_M_WITH_MUX_GATE(csi0_mclk_clk, "csi0-mclk", csi_mclk_parents,
0x130, 0, 5, 8, 3, BIT(15), 0);
-static const char * const csi1_sclk_parents[] = { "pll-video", "pll-isp" };
-static SUNXI_CCU_M_WITH_MUX_GATE(csi1_sclk_clk, "csi-sclk", csi1_sclk_parents,
+static const char * const csi_sclk_parents[] = { "pll-video", "pll-isp" };
+static SUNXI_CCU_M_WITH_MUX_GATE(csi_sclk_clk, "csi-sclk", csi_sclk_parents,
0x134, 16, 4, 24, 3, BIT(31), 0);
-static SUNXI_CCU_M_WITH_MUX_GATE(csi1_mclk_clk, "csi-mclk", csi_mclk_parents,
+static SUNXI_CCU_M_WITH_MUX_GATE(csi1_mclk_clk, "csi1-mclk", csi_mclk_parents,
0x134, 0, 5, 8, 3, BIT(15), 0);
static SUNXI_CCU_M_WITH_GATE(ve_clk, "ve", "pll-ve",
@@ -452,7 +453,7 @@ static struct ccu_common *sun8i_v3s_ccu_clks[] = {
&tcon_clk.common,
&csi_misc_clk.common,
&csi0_mclk_clk.common,
- &csi1_sclk_clk.common,
+ &csi_sclk_clk.common,
&csi1_mclk_clk.common,
&ve_clk.common,
&ac_dig_clk.common,
@@ -551,7 +552,7 @@ static struct clk_hw_onecell_data sun8i_v3s_hw_clks = {
[CLK_TCON0] = &tcon_clk.common.hw,
[CLK_CSI_MISC] = &csi_misc_clk.common.hw,
[CLK_CSI0_MCLK] = &csi0_mclk_clk.common.hw,
- [CLK_CSI1_SCLK] = &csi1_sclk_clk.common.hw,
+ [CLK_CSI_SCLK] = &csi_sclk_clk.common.hw,
[CLK_CSI1_MCLK] = &csi1_mclk_clk.common.hw,
[CLK_VE] = &ve_clk.common.hw,
[CLK_AC_DIG] = &ac_dig_clk.common.hw,
@@ -633,7 +634,7 @@ static struct clk_hw_onecell_data sun8i_v3_hw_clks = {
[CLK_TCON0] = &tcon_clk.common.hw,
[CLK_CSI_MISC] = &csi_misc_clk.common.hw,
[CLK_CSI0_MCLK] = &csi0_mclk_clk.common.hw,
- [CLK_CSI1_SCLK] = &csi1_sclk_clk.common.hw,
+ [CLK_CSI_SCLK] = &csi_sclk_clk.common.hw,
[CLK_CSI1_MCLK] = &csi1_mclk_clk.common.hw,
[CLK_VE] = &ve_clk.common.hw,
[CLK_AC_DIG] = &ac_dig_clk.common.hw,
@@ -754,6 +755,21 @@ static int sun8i_v3s_ccu_probe(struct platform_device *pdev)
val &= ~GENMASK(19, 16);
writel(val, reg + SUN8I_V3S_PLL_AUDIO_REG);
+ /*
+ * Assign the DE and TCON clock to the video PLL. Both clocks need to
+ * have the same parent for the units to work together.
+ */
+
+ val = readl(reg + de_clk.common.reg);
+ val &= ~GENMASK(de_clk.mux.shift + de_clk.mux.width - 1,
+ de_clk.mux.shift);
+ writel(val, reg + de_clk.common.reg);
+
+ val = readl(reg + tcon_clk.common.reg);
+ val &= ~GENMASK(tcon_clk.mux.shift + tcon_clk.mux.width - 1,
+ tcon_clk.mux.shift);
+ writel(val, reg + tcon_clk.common.reg);
+
return devm_sunxi_ccu_probe(&pdev->dev, reg, desc);
}
diff --git a/drivers/clk/sunxi-ng/ccu_common.c b/drivers/clk/sunxi-ng/ccu_common.c
index 88ed89658d45..c7e00f0c29a5 100644
--- a/drivers/clk/sunxi-ng/ccu_common.c
+++ b/drivers/clk/sunxi-ng/ccu_common.c
@@ -66,7 +66,7 @@ EXPORT_SYMBOL_NS_GPL(ccu_is_better_rate, "SUNXI_CCU");
* changed. In common PLL designs, changes to the dividers take effect
* almost immediately, while changes to the multipliers (implemented
* as dividers in the feedback loop) take a few cycles to work into
- * the feedback loop for the PLL to stablize.
+ * the feedback loop for the PLL to stabilize.
*
* Sometimes when the PLL clock rate is changed, the decrease in the
* divider is too much for the decrease in the multiplier to catch up.
diff --git a/drivers/clk/sunxi-ng/ccu_div.h b/drivers/clk/sunxi-ng/ccu_div.h
index 90d49ee8e0cc..be00b3277e97 100644
--- a/drivers/clk/sunxi-ng/ccu_div.h
+++ b/drivers/clk/sunxi-ng/ccu_div.h
@@ -274,6 +274,24 @@ struct ccu_div {
SUNXI_CCU_M_HWS_WITH_GATE(_struct, _name, _parent, _reg, \
_mshift, _mwidth, 0, _flags)
+#define SUNXI_CCU_P_DATA_WITH_MUX_GATE(_struct, _name, _parents, _reg, \
+ _mshift, _mwidth, \
+ _muxshift, _muxwidth, \
+ _gate, _flags) \
+ struct ccu_div _struct = { \
+ .enable = _gate, \
+ .div = _SUNXI_CCU_DIV_FLAGS(_mshift, _mwidth, \
+ CLK_DIVIDER_POWER_OF_TWO), \
+ .mux = _SUNXI_CCU_MUX(_muxshift, _muxwidth), \
+ .common = { \
+ .reg = _reg, \
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(_name, \
+ _parents, \
+ &ccu_div_ops, \
+ _flags), \
+ }, \
+ }
+
static inline struct ccu_div *hw_to_ccu_div(struct clk_hw *hw)
{
struct ccu_common *common = hw_to_ccu_common(hw);
diff --git a/drivers/clk/sunxi-ng/ccu_gate.c b/drivers/clk/sunxi-ng/ccu_gate.c
index 474a9e8831f8..30673fe4e3c2 100644
--- a/drivers/clk/sunxi-ng/ccu_gate.c
+++ b/drivers/clk/sunxi-ng/ccu_gate.c
@@ -91,8 +91,8 @@ static unsigned long ccu_gate_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long ccu_gate_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int ccu_gate_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct ccu_gate *cg = hw_to_ccu_gate(hw);
int div = 1;
@@ -101,14 +101,16 @@ static long ccu_gate_round_rate(struct clk_hw *hw, unsigned long rate,
div = cg->common.prediv;
if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) {
- unsigned long best_parent = rate;
+ unsigned long best_parent = req->rate;
if (cg->common.features & CCU_FEATURE_ALL_PREDIV)
best_parent *= div;
- *prate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent);
+ req->best_parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent);
}
- return *prate / div;
+ req->rate = req->best_parent_rate / div;
+
+ return 0;
}
static int ccu_gate_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -127,7 +129,7 @@ const struct clk_ops ccu_gate_ops = {
.disable = ccu_gate_disable,
.enable = ccu_gate_enable,
.is_enabled = ccu_gate_is_enabled,
- .round_rate = ccu_gate_round_rate,
+ .determine_rate = ccu_gate_determine_rate,
.set_rate = ccu_gate_set_rate,
.recalc_rate = ccu_gate_recalc_rate,
};
diff --git a/drivers/clk/sunxi-ng/ccu_mp.c b/drivers/clk/sunxi-ng/ccu_mp.c
index 354c981943b6..4221b1888b38 100644
--- a/drivers/clk/sunxi-ng/ccu_mp.c
+++ b/drivers/clk/sunxi-ng/ccu_mp.c
@@ -185,7 +185,7 @@ static unsigned long ccu_mp_recalc_rate(struct clk_hw *hw,
p &= (1 << cmp->p.width) - 1;
if (cmp->common.features & CCU_FEATURE_DUAL_DIV)
- rate = (parent_rate / p) / m;
+ rate = (parent_rate / (p + cmp->p.offset)) / m;
else
rate = (parent_rate >> p) / m;
diff --git a/drivers/clk/sunxi-ng/ccu_nk.c b/drivers/clk/sunxi-ng/ccu_nk.c
index 555e99de2cc6..5db748fbb5bd 100644
--- a/drivers/clk/sunxi-ng/ccu_nk.c
+++ b/drivers/clk/sunxi-ng/ccu_nk.c
@@ -92,26 +92,26 @@ static unsigned long ccu_nk_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long ccu_nk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int ccu_nk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct ccu_nk *nk = hw_to_ccu_nk(hw);
struct _ccu_nk _nk;
if (nk->common.features & CCU_FEATURE_FIXED_POSTDIV)
- rate *= nk->fixed_post_div;
+ req->rate *= nk->fixed_post_div;
_nk.min_n = nk->n.min ?: 1;
_nk.max_n = nk->n.max ?: 1 << nk->n.width;
_nk.min_k = nk->k.min ?: 1;
_nk.max_k = nk->k.max ?: 1 << nk->k.width;
- rate = ccu_nk_find_best(*parent_rate, rate, &_nk);
+ req->rate = ccu_nk_find_best(req->best_parent_rate, req->rate, &_nk);
if (nk->common.features & CCU_FEATURE_FIXED_POSTDIV)
- rate = rate / nk->fixed_post_div;
+ req->rate = req->rate / nk->fixed_post_div;
- return rate;
+ return 0;
}
static int ccu_nk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -155,7 +155,7 @@ const struct clk_ops ccu_nk_ops = {
.is_enabled = ccu_nk_is_enabled,
.recalc_rate = ccu_nk_recalc_rate,
- .round_rate = ccu_nk_round_rate,
+ .determine_rate = ccu_nk_determine_rate,
.set_rate = ccu_nk_set_rate,
};
EXPORT_SYMBOL_NS_GPL(ccu_nk_ops, "SUNXI_CCU");
diff --git a/drivers/clk/sunxi-ng/ccu_nkmp.c b/drivers/clk/sunxi-ng/ccu_nkmp.c
index 6e03b69d4028..25efb5b37607 100644
--- a/drivers/clk/sunxi-ng/ccu_nkmp.c
+++ b/drivers/clk/sunxi-ng/ccu_nkmp.c
@@ -127,20 +127,20 @@ static unsigned long ccu_nkmp_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long ccu_nkmp_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int ccu_nkmp_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw);
struct _ccu_nkmp _nkmp;
if (nkmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
- rate *= nkmp->fixed_post_div;
+ req->rate *= nkmp->fixed_post_div;
- if (nkmp->max_rate && rate > nkmp->max_rate) {
- rate = nkmp->max_rate;
+ if (nkmp->max_rate && req->rate > nkmp->max_rate) {
+ req->rate = nkmp->max_rate;
if (nkmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
- rate /= nkmp->fixed_post_div;
- return rate;
+ req->rate /= nkmp->fixed_post_div;
+ return 0;
}
_nkmp.min_n = nkmp->n.min ?: 1;
@@ -152,12 +152,13 @@ static long ccu_nkmp_round_rate(struct clk_hw *hw, unsigned long rate,
_nkmp.min_p = 1;
_nkmp.max_p = nkmp->p.max ?: 1 << ((1 << nkmp->p.width) - 1);
- rate = ccu_nkmp_find_best(*parent_rate, rate, &_nkmp);
+ req->rate = ccu_nkmp_find_best(req->best_parent_rate, req->rate,
+ &_nkmp);
if (nkmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
- rate = rate / nkmp->fixed_post_div;
+ req->rate = req->rate / nkmp->fixed_post_div;
- return rate;
+ return 0;
}
static int ccu_nkmp_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -227,7 +228,7 @@ const struct clk_ops ccu_nkmp_ops = {
.is_enabled = ccu_nkmp_is_enabled,
.recalc_rate = ccu_nkmp_recalc_rate,
- .round_rate = ccu_nkmp_round_rate,
+ .determine_rate = ccu_nkmp_determine_rate,
.set_rate = ccu_nkmp_set_rate,
};
EXPORT_SYMBOL_NS_GPL(ccu_nkmp_ops, "SUNXI_CCU");
diff --git a/drivers/clk/sunxi-ng/ccu_nm.c b/drivers/clk/sunxi-ng/ccu_nm.c
index a4e2243b8d6b..df01ed3b37a6 100644
--- a/drivers/clk/sunxi-ng/ccu_nm.c
+++ b/drivers/clk/sunxi-ng/ccu_nm.c
@@ -116,39 +116,39 @@ static unsigned long ccu_nm_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long ccu_nm_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int ccu_nm_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct ccu_nm *nm = hw_to_ccu_nm(hw);
struct _ccu_nm _nm;
if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
- rate *= nm->fixed_post_div;
+ req->rate *= nm->fixed_post_div;
- if (rate < nm->min_rate) {
- rate = nm->min_rate;
+ if (req->rate < nm->min_rate) {
+ req->rate = nm->min_rate;
if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
- rate /= nm->fixed_post_div;
- return rate;
+ req->rate /= nm->fixed_post_div;
+ return 0;
}
- if (nm->max_rate && rate > nm->max_rate) {
- rate = nm->max_rate;
+ if (nm->max_rate && req->rate > nm->max_rate) {
+ req->rate = nm->max_rate;
if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
- rate /= nm->fixed_post_div;
- return rate;
+ req->rate /= nm->fixed_post_div;
+ return 0;
}
- if (ccu_frac_helper_has_rate(&nm->common, &nm->frac, rate)) {
+ if (ccu_frac_helper_has_rate(&nm->common, &nm->frac, req->rate)) {
if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
- rate /= nm->fixed_post_div;
- return rate;
+ req->rate /= nm->fixed_post_div;
+ return 0;
}
- if (ccu_sdm_helper_has_rate(&nm->common, &nm->sdm, rate)) {
+ if (ccu_sdm_helper_has_rate(&nm->common, &nm->sdm, req->rate)) {
if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
- rate /= nm->fixed_post_div;
- return rate;
+ req->rate /= nm->fixed_post_div;
+ return 0;
}
_nm.min_n = nm->n.min ?: 1;
@@ -156,12 +156,13 @@ static long ccu_nm_round_rate(struct clk_hw *hw, unsigned long rate,
_nm.min_m = 1;
_nm.max_m = nm->m.max ?: 1 << nm->m.width;
- rate = ccu_nm_find_best(&nm->common, *parent_rate, rate, &_nm);
+ req->rate = ccu_nm_find_best(&nm->common, req->best_parent_rate,
+ req->rate, &_nm);
if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
- rate /= nm->fixed_post_div;
+ req->rate /= nm->fixed_post_div;
- return rate;
+ return 0;
}
static int ccu_nm_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -233,7 +234,7 @@ const struct clk_ops ccu_nm_ops = {
.is_enabled = ccu_nm_is_enabled,
.recalc_rate = ccu_nm_recalc_rate,
- .round_rate = ccu_nm_round_rate,
+ .determine_rate = ccu_nm_determine_rate,
.set_rate = ccu_nm_set_rate,
};
EXPORT_SYMBOL_NS_GPL(ccu_nm_ops, "SUNXI_CCU");
diff --git a/drivers/clk/tegra/Kconfig b/drivers/clk/tegra/Kconfig
index 90df619dc087..62147a069606 100644
--- a/drivers/clk/tegra/Kconfig
+++ b/drivers/clk/tegra/Kconfig
@@ -4,7 +4,7 @@ config CLK_TEGRA_BPMP
depends on TEGRA_BPMP
config TEGRA_CLK_DFLL
- depends on ARCH_TEGRA_124_SOC || ARCH_TEGRA_210_SOC
+ depends on ARCH_TEGRA_114_SOC || ARCH_TEGRA_124_SOC || ARCH_TEGRA_210_SOC
select PM_OPP
def_bool y
diff --git a/drivers/clk/tegra/clk-audio-sync.c b/drivers/clk/tegra/clk-audio-sync.c
index 2c4bb96eae16..468a4403f147 100644
--- a/drivers/clk/tegra/clk-audio-sync.c
+++ b/drivers/clk/tegra/clk-audio-sync.c
@@ -17,15 +17,15 @@ static unsigned long clk_sync_source_recalc_rate(struct clk_hw *hw,
return sync->rate;
}
-static long clk_sync_source_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_sync_source_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct tegra_clk_sync_source *sync = to_clk_sync_source(hw);
- if (rate > sync->max_rate)
+ if (req->rate > sync->max_rate)
return -EINVAL;
else
- return rate;
+ return 0;
}
static int clk_sync_source_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -38,7 +38,7 @@ static int clk_sync_source_set_rate(struct clk_hw *hw, unsigned long rate,
}
const struct clk_ops tegra_clk_sync_source_ops = {
- .round_rate = clk_sync_source_round_rate,
+ .determine_rate = clk_sync_source_determine_rate,
.set_rate = clk_sync_source_set_rate,
.recalc_rate = clk_sync_source_recalc_rate,
};
diff --git a/drivers/clk/tegra/clk-bpmp.c b/drivers/clk/tegra/clk-bpmp.c
index b2323cb8eddc..77a2586dbe00 100644
--- a/drivers/clk/tegra/clk-bpmp.c
+++ b/drivers/clk/tegra/clk-bpmp.c
@@ -635,7 +635,7 @@ static int tegra_bpmp_register_clocks(struct tegra_bpmp *bpmp,
bpmp->num_clocks = count;
- bpmp->clocks = devm_kcalloc(bpmp->dev, count, sizeof(struct tegra_bpmp_clk), GFP_KERNEL);
+ bpmp->clocks = devm_kcalloc(bpmp->dev, count, sizeof(*bpmp->clocks), GFP_KERNEL);
if (!bpmp->clocks)
return -ENOMEM;
diff --git a/drivers/clk/tegra/clk-dfll.c b/drivers/clk/tegra/clk-dfll.c
index 58fa5a59e0c7..22dc29432eff 100644
--- a/drivers/clk/tegra/clk-dfll.c
+++ b/drivers/clk/tegra/clk-dfll.c
@@ -882,7 +882,7 @@ static void dfll_set_frequency_request(struct tegra_dfll *td,
{
u32 val = 0;
int force_val;
- int coef = 128; /* FIXME: td->cg_scale? */;
+ int coef = 128; /* FIXME: td->cg_scale? */
force_val = (req->lut_index - td->lut_safe) * coef / td->cg;
force_val = clamp(force_val, FORCE_MIN, FORCE_MAX);
diff --git a/drivers/clk/tegra/clk-divider.c b/drivers/clk/tegra/clk-divider.c
index 38daf483ddf1..37439fcb3ac0 100644
--- a/drivers/clk/tegra/clk-divider.c
+++ b/drivers/clk/tegra/clk-divider.c
@@ -58,23 +58,31 @@ static unsigned long clk_frac_div_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long clk_frac_div_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_frac_div_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct tegra_clk_frac_div *divider = to_clk_frac_div(hw);
int div, mul;
- unsigned long output_rate = *prate;
+ unsigned long output_rate = req->best_parent_rate;
- if (!rate)
- return output_rate;
+ if (!req->rate) {
+ req->rate = output_rate;
- div = get_div(divider, rate, output_rate);
- if (div < 0)
- return *prate;
+ return 0;
+ }
+
+ div = get_div(divider, req->rate, output_rate);
+ if (div < 0) {
+ req->rate = req->best_parent_rate;
+
+ return 0;
+ }
mul = get_mul(divider);
- return DIV_ROUND_UP(output_rate * mul, div + mul);
+ req->rate = DIV_ROUND_UP(output_rate * mul, div + mul);
+
+ return 0;
}
static int clk_frac_div_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -127,7 +135,7 @@ static void clk_divider_restore_context(struct clk_hw *hw)
const struct clk_ops tegra_clk_frac_div_ops = {
.recalc_rate = clk_frac_div_recalc_rate,
.set_rate = clk_frac_div_set_rate,
- .round_rate = clk_frac_div_round_rate,
+ .determine_rate = clk_frac_div_determine_rate,
.restore_context = clk_divider_restore_context,
};
diff --git a/drivers/clk/tegra/clk-periph.c b/drivers/clk/tegra/clk-periph.c
index 0626650a7011..6ebeaa7cb656 100644
--- a/drivers/clk/tegra/clk-periph.c
+++ b/drivers/clk/tegra/clk-periph.c
@@ -51,16 +51,10 @@ static int clk_periph_determine_rate(struct clk_hw *hw,
struct tegra_clk_periph *periph = to_clk_periph(hw);
const struct clk_ops *div_ops = periph->div_ops;
struct clk_hw *div_hw = &periph->divider.hw;
- unsigned long rate;
__clk_hw_set_clk(div_hw, hw);
- rate = div_ops->round_rate(div_hw, req->rate, &req->best_parent_rate);
- if (rate < 0)
- return rate;
-
- req->rate = rate;
- return 0;
+ return div_ops->determine_rate(div_hw, req);
}
static int clk_periph_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -132,7 +126,7 @@ static void clk_periph_restore_context(struct clk_hw *hw)
clk_periph_set_parent(hw, parent_id);
}
-const struct clk_ops tegra_clk_periph_ops = {
+static const struct clk_ops tegra_clk_periph_ops = {
.get_parent = clk_periph_get_parent,
.set_parent = clk_periph_set_parent,
.recalc_rate = clk_periph_recalc_rate,
diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c
index 100b5d9b7e26..591b9f0c155a 100644
--- a/drivers/clk/tegra/clk-pll.c
+++ b/drivers/clk/tegra/clk-pll.c
@@ -840,8 +840,8 @@ static int clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
return ret;
}
-static long clk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct tegra_clk_pll *pll = to_clk_pll(hw);
struct tegra_clk_pll_freq_table cfg;
@@ -849,15 +849,20 @@ static long clk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
if (pll->params->flags & TEGRA_PLL_FIXED) {
/* PLLM/MB are used for memory; we do not change rate */
if (pll->params->flags & (TEGRA_PLLM | TEGRA_PLLMB))
- return clk_hw_get_rate(hw);
- return pll->params->fixed_rate;
+ req->rate = clk_hw_get_rate(hw);
+ else
+ req->rate = pll->params->fixed_rate;
+
+ return 0;
}
- if (_get_table_rate(hw, &cfg, rate, *prate) &&
- pll->params->calc_rate(hw, &cfg, rate, *prate))
+ if (_get_table_rate(hw, &cfg, req->rate, req->best_parent_rate) &&
+ pll->params->calc_rate(hw, &cfg, req->rate, req->best_parent_rate))
return -EINVAL;
- return cfg.output_rate;
+ req->rate = cfg.output_rate;
+
+ return 0;
}
static unsigned long clk_pll_recalc_rate(struct clk_hw *hw,
@@ -1057,7 +1062,7 @@ const struct clk_ops tegra_clk_pll_ops = {
.enable = clk_pll_enable,
.disable = clk_pll_disable,
.recalc_rate = clk_pll_recalc_rate,
- .round_rate = clk_pll_round_rate,
+ .determine_rate = clk_pll_determine_rate,
.set_rate = clk_pll_set_rate,
.restore_context = tegra_clk_pll_restore_context,
};
@@ -1195,7 +1200,7 @@ static const struct clk_ops tegra_clk_pllu_ops = {
.enable = clk_pllu_enable,
.disable = clk_pll_disable,
.recalc_rate = clk_pll_recalc_rate,
- .round_rate = clk_pll_round_rate,
+ .determine_rate = clk_pll_determine_rate,
.set_rate = clk_pll_set_rate,
};
@@ -1353,15 +1358,15 @@ static int clk_pllxc_set_rate(struct clk_hw *hw, unsigned long rate,
return ret;
}
-static long clk_pll_ramp_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_pll_ramp_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct tegra_clk_pll *pll = to_clk_pll(hw);
struct tegra_clk_pll_freq_table cfg;
int ret, p_div;
- u64 output_rate = *prate;
+ u64 output_rate = req->best_parent_rate;
- ret = _pll_ramp_calc_pll(hw, &cfg, rate, *prate);
+ ret = _pll_ramp_calc_pll(hw, &cfg, req->rate, req->best_parent_rate);
if (ret < 0)
return ret;
@@ -1375,7 +1380,9 @@ static long clk_pll_ramp_round_rate(struct clk_hw *hw, unsigned long rate,
output_rate *= cfg.n;
do_div(output_rate, cfg.m * p_div);
- return output_rate;
+ req->rate = output_rate;
+
+ return 0;
}
static void _pllcx_strobe(struct tegra_clk_pll *pll)
@@ -1598,12 +1605,15 @@ static unsigned long clk_pllre_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long clk_pllre_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_pllre_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct tegra_clk_pll *pll = to_clk_pll(hw);
- return _pllre_calc_rate(pll, NULL, rate, *prate);
+ req->rate = _pllre_calc_rate(pll, NULL, req->rate,
+ req->best_parent_rate);
+
+ return 0;
}
static int clk_plle_tegra114_enable(struct clk_hw *hw)
@@ -2003,7 +2013,7 @@ static const struct clk_ops tegra_clk_pllxc_ops = {
.enable = clk_pll_enable,
.disable = clk_pll_disable,
.recalc_rate = clk_pll_recalc_rate,
- .round_rate = clk_pll_ramp_round_rate,
+ .determine_rate = clk_pll_ramp_determine_rate,
.set_rate = clk_pllxc_set_rate,
};
@@ -2012,7 +2022,7 @@ static const struct clk_ops tegra_clk_pllc_ops = {
.enable = clk_pllc_enable,
.disable = clk_pllc_disable,
.recalc_rate = clk_pll_recalc_rate,
- .round_rate = clk_pll_ramp_round_rate,
+ .determine_rate = clk_pll_ramp_determine_rate,
.set_rate = clk_pllc_set_rate,
};
@@ -2021,7 +2031,7 @@ static const struct clk_ops tegra_clk_pllre_ops = {
.enable = clk_pll_enable,
.disable = clk_pll_disable,
.recalc_rate = clk_pllre_recalc_rate,
- .round_rate = clk_pllre_round_rate,
+ .determine_rate = clk_pllre_determine_rate,
.set_rate = clk_pllre_set_rate,
};
@@ -2321,7 +2331,7 @@ static const struct clk_ops tegra_clk_pllss_ops = {
.enable = clk_pll_enable,
.disable = clk_pll_disable,
.recalc_rate = clk_pll_recalc_rate,
- .round_rate = clk_pll_ramp_round_rate,
+ .determine_rate = clk_pll_ramp_determine_rate,
.set_rate = clk_pllxc_set_rate,
.restore_context = tegra_clk_pll_restore_context,
};
diff --git a/drivers/clk/tegra/clk-super.c b/drivers/clk/tegra/clk-super.c
index 7ec47942720c..51fb356e770e 100644
--- a/drivers/clk/tegra/clk-super.c
+++ b/drivers/clk/tegra/clk-super.c
@@ -147,17 +147,10 @@ static int clk_super_determine_rate(struct clk_hw *hw,
{
struct tegra_clk_super_mux *super = to_clk_super_mux(hw);
struct clk_hw *div_hw = &super->frac_div.hw;
- unsigned long rate;
__clk_hw_set_clk(div_hw, hw);
- rate = super->div_ops->round_rate(div_hw, req->rate,
- &req->best_parent_rate);
- if (rate < 0)
- return rate;
-
- req->rate = rate;
- return 0;
+ return super->div_ops->determine_rate(div_hw, req);
}
static unsigned long clk_super_recalc_rate(struct clk_hw *hw,
diff --git a/drivers/clk/tegra/clk-tegra114.c b/drivers/clk/tegra/clk-tegra114.c
index 73303458e886..6c8e053311c3 100644
--- a/drivers/clk/tegra/clk-tegra114.c
+++ b/drivers/clk/tegra/clk-tegra114.c
@@ -11,6 +11,7 @@
#include <linux/export.h>
#include <linux/clk/tegra.h>
#include <dt-bindings/clock/tegra114-car.h>
+#include <dt-bindings/reset/nvidia,tegra114-car.h>
#include "clk.h"
#include "clk-id.h"
@@ -1272,7 +1273,7 @@ EXPORT_SYMBOL(tegra114_clock_tune_cpu_trimmers_init);
*
* Assert the reset line of the DFLL's DVCO. No return value.
*/
-void tegra114_clock_assert_dfll_dvco_reset(void)
+static void tegra114_clock_assert_dfll_dvco_reset(void)
{
u32 v;
@@ -1281,7 +1282,6 @@ void tegra114_clock_assert_dfll_dvco_reset(void)
writel_relaxed(v, clk_base + RST_DFLL_DVCO);
tegra114_car_barrier();
}
-EXPORT_SYMBOL(tegra114_clock_assert_dfll_dvco_reset);
/**
* tegra114_clock_deassert_dfll_dvco_reset - deassert the DFLL's DVCO reset
@@ -1289,7 +1289,7 @@ EXPORT_SYMBOL(tegra114_clock_assert_dfll_dvco_reset);
* Deassert the reset line of the DFLL's DVCO, allowing the DVCO to
* operate. No return value.
*/
-void tegra114_clock_deassert_dfll_dvco_reset(void)
+static void tegra114_clock_deassert_dfll_dvco_reset(void)
{
u32 v;
@@ -1298,7 +1298,26 @@ void tegra114_clock_deassert_dfll_dvco_reset(void)
writel_relaxed(v, clk_base + RST_DFLL_DVCO);
tegra114_car_barrier();
}
-EXPORT_SYMBOL(tegra114_clock_deassert_dfll_dvco_reset);
+
+static int tegra114_reset_assert(unsigned long id)
+{
+ if (id == TEGRA114_RST_DFLL_DVCO)
+ tegra114_clock_assert_dfll_dvco_reset();
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int tegra114_reset_deassert(unsigned long id)
+{
+ if (id == TEGRA114_RST_DFLL_DVCO)
+ tegra114_clock_deassert_dfll_dvco_reset();
+ else
+ return -EINVAL;
+
+ return 0;
+}
static void __init tegra114_clock_init(struct device_node *np)
{
@@ -1344,6 +1363,9 @@ static void __init tegra114_clock_init(struct device_node *np)
tegra_super_clk_gen4_init(clk_base, pmc_base, tegra114_clks,
&pll_x_params);
+ tegra_init_special_resets(1, tegra114_reset_assert,
+ tegra114_reset_deassert);
+
tegra_add_of_provider(np, of_clk_src_onecell_get);
tegra_register_devclks(devclks, ARRAY_SIZE(devclks));
diff --git a/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c b/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
index 0251618b82c8..457a77c5bb62 100644
--- a/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
+++ b/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
@@ -29,6 +29,99 @@ struct dfll_fcpu_data {
};
/* Maximum CPU frequency, indexed by CPU speedo id */
+static const unsigned long tegra114_cpu_max_freq_table[] = {
+ [0] = 2040000000UL,
+ [1] = 1810500000UL,
+ [2] = 1912500000UL,
+ [3] = 1810500000UL,
+};
+
+#define T114_CPU_CVB_TABLE \
+ .min_millivolts = 1000, \
+ .max_millivolts = 1320, \
+ .speedo_scale = 100, \
+ .voltage_scale = 1000, \
+ .entries = { \
+ { 306000000UL, { 2190643, -141851, 3576 } }, \
+ { 408000000UL, { 2250968, -144331, 3576 } }, \
+ { 510000000UL, { 2313333, -146811, 3576 } }, \
+ { 612000000UL, { 2377738, -149291, 3576 } }, \
+ { 714000000UL, { 2444183, -151771, 3576 } }, \
+ { 816000000UL, { 2512669, -154251, 3576 } }, \
+ { 918000000UL, { 2583194, -156731, 3576 } }, \
+ { 1020000000UL, { 2655759, -159211, 3576 } }, \
+ { 1122000000UL, { 2730365, -161691, 3576 } }, \
+ { 1224000000UL, { 2807010, -164171, 3576 } }, \
+ { 1326000000UL, { 2885696, -166651, 3576 } }, \
+ { 1428000000UL, { 2966422, -169131, 3576 } }, \
+ { 1530000000UL, { 3049183, -171601, 3576 } }, \
+ { 1606500000UL, { 3112179, -173451, 3576 } }, \
+ { 1708500000UL, { 3198504, -175931, 3576 } }, \
+ { 1810500000UL, { 3304747, -179126, 3576 } }, \
+ { 1912500000UL, { 3395401, -181606, 3576 } }, \
+ { 0UL, { 0, 0, 0 } }, \
+ }, \
+ .cpu_dfll_data = { \
+ .tune0_low = 0x00b0039d, \
+ .tune0_high = 0x00b0009d, \
+ .tune1 = 0x0000001f, \
+ .tune_high_min_millivolts = 1050, \
+ }
+
+static const struct cvb_table tegra114_cpu_cvb_tables[] = {
+ {
+ .speedo_id = 0,
+ .process_id = -1,
+ .min_millivolts = 1000,
+ .max_millivolts = 1250,
+ .speedo_scale = 100,
+ .voltage_scale = 100,
+ .entries = {
+ { 306000000UL, { 107330, -1569, 0 } },
+ { 408000000UL, { 111250, -1666, 0 } },
+ { 510000000UL, { 110000, -1460, 0 } },
+ { 612000000UL, { 117290, -1745, 0 } },
+ { 714000000UL, { 122700, -1910, 0 } },
+ { 816000000UL, { 125620, -1945, 0 } },
+ { 918000000UL, { 130560, -2076, 0 } },
+ { 1020000000UL, { 137280, -2303, 0 } },
+ { 1122000000UL, { 146440, -2660, 0 } },
+ { 1224000000UL, { 152190, -2825, 0 } },
+ { 1326000000UL, { 157520, -2953, 0 } },
+ { 1428000000UL, { 166100, -3261, 0 } },
+ { 1530000000UL, { 176410, -3647, 0 } },
+ { 1632000000UL, { 189620, -4186, 0 } },
+ { 1734000000UL, { 203190, -4725, 0 } },
+ { 1836000000UL, { 222670, -5573, 0 } },
+ { 1938000000UL, { 256210, -7165, 0 } },
+ { 2040000000UL, { 250050, -6544, 0 } },
+ { 0UL, { 0, 0, 0 } },
+ },
+ .cpu_dfll_data = {
+ .tune0_low = 0x00b0019d,
+ .tune0_high = 0x00b0019d,
+ .tune1 = 0x0000001f,
+ .tune_high_min_millivolts = 1000,
+ }
+ },
+ {
+ .speedo_id = 1,
+ .process_id = -1,
+ T114_CPU_CVB_TABLE
+ },
+ {
+ .speedo_id = 2,
+ .process_id = -1,
+ T114_CPU_CVB_TABLE
+ },
+ {
+ .speedo_id = 3,
+ .process_id = -1,
+ T114_CPU_CVB_TABLE
+ },
+};
+
+/* Maximum CPU frequency, indexed by CPU speedo id */
static const unsigned long tegra124_cpu_max_freq_table[] = {
[0] = 2014500000UL,
[1] = 2320500000UL,
@@ -93,7 +186,7 @@ static const unsigned long tegra210_cpu_max_freq_table[] = {
[10] = 1504500000UL,
};
-#define CPU_CVB_TABLE \
+#define TEGRA210_CPU_CVB_TABLE \
.speedo_scale = 100, \
.voltage_scale = 1000, \
.entries = { \
@@ -120,7 +213,7 @@ static const unsigned long tegra210_cpu_max_freq_table[] = {
{ 0UL, { 0, 0, 0 } }, \
}
-#define CPU_CVB_TABLE_XA \
+#define TEGRA210_CPU_CVB_TABLE_XA \
.speedo_scale = 100, \
.voltage_scale = 1000, \
.entries = { \
@@ -143,7 +236,7 @@ static const unsigned long tegra210_cpu_max_freq_table[] = {
{ 0UL, { 0, 0, 0 } }, \
}
-#define CPU_CVB_TABLE_EUCM1 \
+#define TEGRA210_CPU_CVB_TABLE_EUCM1 \
.speedo_scale = 100, \
.voltage_scale = 1000, \
.entries = { \
@@ -166,7 +259,7 @@ static const unsigned long tegra210_cpu_max_freq_table[] = {
{ 0UL, { 0, 0, 0 } }, \
}
-#define CPU_CVB_TABLE_EUCM2 \
+#define TEGRA210_CPU_CVB_TABLE_EUCM2 \
.speedo_scale = 100, \
.voltage_scale = 1000, \
.entries = { \
@@ -188,7 +281,7 @@ static const unsigned long tegra210_cpu_max_freq_table[] = {
{ 0UL, { 0, 0, 0 } }, \
}
-#define CPU_CVB_TABLE_EUCM2_JOINT_RAIL \
+#define TEGRA210_CPU_CVB_TABLE_EUCM2_JOINT_RAIL \
.speedo_scale = 100, \
.voltage_scale = 1000, \
.entries = { \
@@ -209,7 +302,7 @@ static const unsigned long tegra210_cpu_max_freq_table[] = {
{ 0UL, { 0, 0, 0 } }, \
}
-#define CPU_CVB_TABLE_ODN \
+#define TEGRA210_CPU_CVB_TABLE_ODN \
.speedo_scale = 100, \
.voltage_scale = 1000, \
.entries = { \
@@ -238,7 +331,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 0,
.min_millivolts = 840,
.max_millivolts = 1120,
- CPU_CVB_TABLE_EUCM2_JOINT_RAIL,
+ TEGRA210_CPU_CVB_TABLE_EUCM2_JOINT_RAIL,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune0_high = 0xffead0ff,
@@ -251,7 +344,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 1,
.min_millivolts = 840,
.max_millivolts = 1120,
- CPU_CVB_TABLE_EUCM2_JOINT_RAIL,
+ TEGRA210_CPU_CVB_TABLE_EUCM2_JOINT_RAIL,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune0_high = 0xffead0ff,
@@ -264,7 +357,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 0,
.min_millivolts = 900,
.max_millivolts = 1162,
- CPU_CVB_TABLE_EUCM2,
+ TEGRA210_CPU_CVB_TABLE_EUCM2,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune0_high = 0xffead0ff,
@@ -276,7 +369,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 1,
.min_millivolts = 900,
.max_millivolts = 1162,
- CPU_CVB_TABLE_EUCM2,
+ TEGRA210_CPU_CVB_TABLE_EUCM2,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune0_high = 0xffead0ff,
@@ -288,7 +381,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 0,
.min_millivolts = 900,
.max_millivolts = 1195,
- CPU_CVB_TABLE_EUCM2,
+ TEGRA210_CPU_CVB_TABLE_EUCM2,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune0_high = 0xffead0ff,
@@ -300,7 +393,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 1,
.min_millivolts = 900,
.max_millivolts = 1195,
- CPU_CVB_TABLE_EUCM2,
+ TEGRA210_CPU_CVB_TABLE_EUCM2,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune0_high = 0xffead0ff,
@@ -312,7 +405,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 0,
.min_millivolts = 841,
.max_millivolts = 1227,
- CPU_CVB_TABLE_EUCM1,
+ TEGRA210_CPU_CVB_TABLE_EUCM1,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune0_high = 0xffead0ff,
@@ -325,7 +418,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 1,
.min_millivolts = 841,
.max_millivolts = 1227,
- CPU_CVB_TABLE_EUCM1,
+ TEGRA210_CPU_CVB_TABLE_EUCM1,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune0_high = 0xffead0ff,
@@ -338,7 +431,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 0,
.min_millivolts = 870,
.max_millivolts = 1150,
- CPU_CVB_TABLE,
+ TEGRA210_CPU_CVB_TABLE,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune1 = 0x20091d9,
@@ -349,7 +442,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 1,
.min_millivolts = 870,
.max_millivolts = 1150,
- CPU_CVB_TABLE,
+ TEGRA210_CPU_CVB_TABLE,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune1 = 0x25501d0,
@@ -360,7 +453,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 0,
.min_millivolts = 818,
.max_millivolts = 1227,
- CPU_CVB_TABLE,
+ TEGRA210_CPU_CVB_TABLE,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune0_high = 0xffead0ff,
@@ -373,7 +466,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 1,
.min_millivolts = 818,
.max_millivolts = 1227,
- CPU_CVB_TABLE,
+ TEGRA210_CPU_CVB_TABLE,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune0_high = 0xffead0ff,
@@ -386,7 +479,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = -1,
.min_millivolts = 918,
.max_millivolts = 1113,
- CPU_CVB_TABLE_XA,
+ TEGRA210_CPU_CVB_TABLE_XA,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune1 = 0x17711BD,
@@ -397,7 +490,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 0,
.min_millivolts = 825,
.max_millivolts = 1227,
- CPU_CVB_TABLE_ODN,
+ TEGRA210_CPU_CVB_TABLE_ODN,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune0_high = 0xffead0ff,
@@ -410,7 +503,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 1,
.min_millivolts = 825,
.max_millivolts = 1227,
- CPU_CVB_TABLE_ODN,
+ TEGRA210_CPU_CVB_TABLE_ODN,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune0_high = 0xffead0ff,
@@ -423,7 +516,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 0,
.min_millivolts = 870,
.max_millivolts = 1227,
- CPU_CVB_TABLE,
+ TEGRA210_CPU_CVB_TABLE,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune1 = 0x20091d9,
@@ -434,7 +527,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 1,
.min_millivolts = 870,
.max_millivolts = 1227,
- CPU_CVB_TABLE,
+ TEGRA210_CPU_CVB_TABLE,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune1 = 0x25501d0,
@@ -445,7 +538,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 0,
.min_millivolts = 837,
.max_millivolts = 1227,
- CPU_CVB_TABLE,
+ TEGRA210_CPU_CVB_TABLE,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune0_high = 0xffead0ff,
@@ -458,7 +551,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 1,
.min_millivolts = 837,
.max_millivolts = 1227,
- CPU_CVB_TABLE,
+ TEGRA210_CPU_CVB_TABLE,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune0_high = 0xffead0ff,
@@ -471,7 +564,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 0,
.min_millivolts = 850,
.max_millivolts = 1170,
- CPU_CVB_TABLE,
+ TEGRA210_CPU_CVB_TABLE,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune0_high = 0xffead0ff,
@@ -484,7 +577,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 1,
.min_millivolts = 850,
.max_millivolts = 1170,
- CPU_CVB_TABLE,
+ TEGRA210_CPU_CVB_TABLE,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune0_high = 0xffead0ff,
@@ -494,6 +587,13 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
},
};
+static const struct dfll_fcpu_data tegra114_dfll_fcpu_data = {
+ .cpu_max_freq_table = tegra114_cpu_max_freq_table,
+ .cpu_max_freq_table_size = ARRAY_SIZE(tegra114_cpu_max_freq_table),
+ .cpu_cvb_tables = tegra114_cpu_cvb_tables,
+ .cpu_cvb_tables_size = ARRAY_SIZE(tegra114_cpu_cvb_tables)
+};
+
static const struct dfll_fcpu_data tegra124_dfll_fcpu_data = {
.cpu_max_freq_table = tegra124_cpu_max_freq_table,
.cpu_max_freq_table_size = ARRAY_SIZE(tegra124_cpu_max_freq_table),
@@ -510,6 +610,10 @@ static const struct dfll_fcpu_data tegra210_dfll_fcpu_data = {
static const struct of_device_id tegra124_dfll_fcpu_of_match[] = {
{
+ .compatible = "nvidia,tegra114-dfll",
+ .data = &tegra114_dfll_fcpu_data,
+ },
+ {
.compatible = "nvidia,tegra124-dfll",
.data = &tegra124_dfll_fcpu_data,
},
diff --git a/drivers/clk/tegra/clk-tegra210-emc.c b/drivers/clk/tegra/clk-tegra210-emc.c
index 672ca8c184d2..fbf3c894eb56 100644
--- a/drivers/clk/tegra/clk-tegra210-emc.c
+++ b/drivers/clk/tegra/clk-tegra210-emc.c
@@ -86,22 +86,30 @@ static unsigned long tegra210_clk_emc_recalc_rate(struct clk_hw *hw,
return DIV_ROUND_UP(parent_rate * 2, div);
}
-static long tegra210_clk_emc_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int tegra210_clk_emc_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct tegra210_clk_emc *emc = to_tegra210_clk_emc(hw);
struct tegra210_clk_emc_provider *provider = emc->provider;
unsigned int i;
- if (!provider || !provider->configs || provider->num_configs == 0)
- return clk_hw_get_rate(hw);
+ if (!provider || !provider->configs || provider->num_configs == 0) {
+ req->rate = clk_hw_get_rate(hw);
+
+ return 0;
+ }
for (i = 0; i < provider->num_configs; i++) {
- if (provider->configs[i].rate >= rate)
- return provider->configs[i].rate;
+ if (provider->configs[i].rate >= req->rate) {
+ req->rate = provider->configs[i].rate;
+
+ return 0;
+ }
}
- return provider->configs[i - 1].rate;
+ req->rate = provider->configs[i - 1].rate;
+
+ return 0;
}
static struct clk *tegra210_clk_emc_find_parent(struct tegra210_clk_emc *emc,
@@ -259,7 +267,7 @@ static int tegra210_clk_emc_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops tegra210_clk_emc_ops = {
.get_parent = tegra210_clk_emc_get_parent,
.recalc_rate = tegra210_clk_emc_recalc_rate,
- .round_rate = tegra210_clk_emc_round_rate,
+ .determine_rate = tegra210_clk_emc_determine_rate,
.set_rate = tegra210_clk_emc_set_rate,
};
diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c
index a3488aaac3f7..504d0ea997a5 100644
--- a/drivers/clk/tegra/clk-tegra210.c
+++ b/drivers/clk/tegra/clk-tegra210.c
@@ -255,7 +255,7 @@
/* VIC register to handle during MBIST WAR */
#define NV_PVIC_THI_SLCG_OVERRIDE_LOW 0x8c
-/* APE, DISPA and VIC base addesses needed for MBIST WAR */
+/* APE, DISPA and VIC base addresses needed for MBIST WAR */
#define TEGRA210_AHUB_BASE 0x702d0000
#define TEGRA210_DISPA_BASE 0x54200000
#define TEGRA210_VIC_BASE 0x54340000
@@ -3444,7 +3444,7 @@ static void tegra210_disable_cpu_clock(u32 cpu)
static u32 spare_reg_ctx, misc_clk_enb_ctx, clk_msk_arm_ctx;
static u32 cpu_softrst_ctx[3];
-static int tegra210_clk_suspend(void)
+static int tegra210_clk_suspend(void *data)
{
unsigned int i;
@@ -3465,7 +3465,7 @@ static int tegra210_clk_suspend(void)
return 0;
}
-static void tegra210_clk_resume(void)
+static void tegra210_clk_resume(void *data)
{
unsigned int i;
@@ -3523,13 +3523,17 @@ static void tegra210_cpu_clock_resume(void)
}
#endif
-static struct syscore_ops tegra_clk_syscore_ops = {
+static const struct syscore_ops tegra_clk_syscore_ops = {
#ifdef CONFIG_PM_SLEEP
.suspend = tegra210_clk_suspend,
.resume = tegra210_clk_resume,
#endif
};
+static struct syscore tegra_clk_syscore = {
+ .ops = &tegra_clk_syscore_ops,
+};
+
static struct tegra_cpu_car_ops tegra210_cpu_car_ops = {
.wait_for_reset = tegra210_wait_cpu_in_reset,
.disable_clock = tegra210_disable_cpu_clock,
@@ -3813,6 +3817,6 @@ static void __init tegra210_clock_init(struct device_node *np)
tegra_cpu_car_ops = &tegra210_cpu_car_ops;
- register_syscore_ops(&tegra_clk_syscore_ops);
+ register_syscore(&tegra_clk_syscore);
}
CLK_OF_DECLARE(tegra210, "nvidia,tegra210-car", tegra210_clock_init);
diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c
index 82a8cb9545eb..e7ebb63970d3 100644
--- a/drivers/clk/tegra/clk-tegra30.c
+++ b/drivers/clk/tegra/clk-tegra30.c
@@ -53,6 +53,7 @@
#define SYSTEM_CLK_RATE 0x030
#define TEGRA30_CLK_PERIPH_BANKS 5
+#define TEGRA30_CLK_CLK_MAX 311
#define PLLC_BASE 0x80
#define PLLC_MISC 0x8c
diff --git a/drivers/clk/tegra/clk.h b/drivers/clk/tegra/clk.h
index 5d80d8b79b8e..73efd2ff37c9 100644
--- a/drivers/clk/tegra/clk.h
+++ b/drivers/clk/tegra/clk.h
@@ -629,7 +629,6 @@ struct tegra_clk_periph {
#define TEGRA_CLK_PERIPH_MAGIC 0x18221223
-extern const struct clk_ops tegra_clk_periph_ops;
struct clk *tegra_clk_register_periph(const char *name,
const char * const *parent_names, int num_parents,
struct tegra_clk_periph *periph, void __iomem *clk_base,
@@ -898,8 +897,6 @@ static inline bool tegra124_clk_emc_driver_available(struct clk_hw *emc_hw)
void tegra114_clock_tune_cpu_trimmers_high(void);
void tegra114_clock_tune_cpu_trimmers_low(void);
void tegra114_clock_tune_cpu_trimmers_init(void);
-void tegra114_clock_assert_dfll_dvco_reset(void);
-void tegra114_clock_deassert_dfll_dvco_reset(void);
typedef void (*tegra_clk_apply_init_table_func)(void);
extern tegra_clk_apply_init_table_func tegra_clk_apply_init_table;
diff --git a/drivers/clk/thead/clk-th1520-ap.c b/drivers/clk/thead/clk-th1520-ap.c
index ebfb1d59401d..71ad03a998e8 100644
--- a/drivers/clk/thead/clk-th1520-ap.c
+++ b/drivers/clk/thead/clk-th1520-ap.c
@@ -18,6 +18,7 @@
#define TH1520_PLL_FBDIV GENMASK(19, 8)
#define TH1520_PLL_REFDIV GENMASK(5, 0)
#define TH1520_PLL_BYPASS BIT(30)
+#define TH1520_PLL_VCO_RST BIT(29)
#define TH1520_PLL_DSMPD BIT(24)
#define TH1520_PLL_FRAC GENMASK(23, 0)
#define TH1520_PLL_FRAC_BITS 24
@@ -42,17 +43,20 @@ struct ccu_common {
};
struct ccu_mux {
- struct ccu_internal mux;
- struct ccu_common common;
+ int clkid;
+ u32 reg;
+ struct clk_mux mux;
};
struct ccu_gate {
- u32 enable;
- struct ccu_common common;
+ int clkid;
+ u32 reg;
+ struct clk_gate gate;
};
struct ccu_div {
u32 enable;
+ u32 div_en;
struct ccu_div_internal div;
struct ccu_internal mux;
struct ccu_common common;
@@ -75,12 +79,23 @@ struct ccu_pll {
.flags = _flags, \
}
-#define CCU_GATE(_clkid, _struct, _name, _parent, _reg, _gate, _flags) \
+#define TH_CCU_MUX(_name, _parents, _shift, _width) \
+ { \
+ .mask = GENMASK(_width - 1, 0), \
+ .shift = _shift, \
+ .hw.init = CLK_HW_INIT_PARENTS_DATA( \
+ _name, \
+ _parents, \
+ &clk_mux_ops, \
+ 0), \
+ }
+
+#define CCU_GATE(_clkid, _struct, _name, _parent, _reg, _bit, _flags) \
struct ccu_gate _struct = { \
- .enable = _gate, \
- .common = { \
- .clkid = _clkid, \
- .cfg0 = _reg, \
+ .clkid = _clkid, \
+ .reg = _reg, \
+ .gate = { \
+ .bit_idx = _bit, \
.hw.init = CLK_HW_INIT_PARENTS_DATA( \
_name, \
_parent, \
@@ -94,13 +109,6 @@ static inline struct ccu_common *hw_to_ccu_common(struct clk_hw *hw)
return container_of(hw, struct ccu_common, hw);
}
-static inline struct ccu_mux *hw_to_ccu_mux(struct clk_hw *hw)
-{
- struct ccu_common *common = hw_to_ccu_common(hw);
-
- return container_of(common, struct ccu_mux, common);
-}
-
static inline struct ccu_pll *hw_to_ccu_pll(struct clk_hw *hw)
{
struct ccu_common *common = hw_to_ccu_common(hw);
@@ -115,13 +123,6 @@ static inline struct ccu_div *hw_to_ccu_div(struct clk_hw *hw)
return container_of(common, struct ccu_div, common);
}
-static inline struct ccu_gate *hw_to_ccu_gate(struct clk_hw *hw)
-{
- struct ccu_common *common = hw_to_ccu_common(hw);
-
- return container_of(common, struct ccu_gate, common);
-}
-
static u8 ccu_get_parent_helper(struct ccu_common *common,
struct ccu_internal *mux)
{
@@ -192,6 +193,55 @@ static unsigned long ccu_div_recalc_rate(struct clk_hw *hw,
return rate;
}
+static int ccu_div_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct ccu_div *cd = hw_to_ccu_div(hw);
+ unsigned int val;
+
+ if (cd->div_en)
+ return divider_determine_rate(hw, req, NULL,
+ cd->div.width, cd->div.flags);
+
+ regmap_read(cd->common.map, cd->common.cfg0, &val);
+ val = val >> cd->div.shift;
+ val &= GENMASK(cd->div.width - 1, 0);
+ return divider_ro_determine_rate(hw, req, NULL, cd->div.width,
+ cd->div.flags, val);
+}
+
+static int ccu_div_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct ccu_div *cd = hw_to_ccu_div(hw);
+ int val = divider_get_val(rate, parent_rate, NULL,
+ cd->div.width, cd->div.flags);
+ unsigned int curr_val, reg_val;
+
+ if (val < 0)
+ return val;
+
+ regmap_read(cd->common.map, cd->common.cfg0, &reg_val);
+ curr_val = reg_val >> cd->div.shift;
+ curr_val &= GENMASK(cd->div.width - 1, 0);
+
+ if (!cd->div_en && curr_val != val)
+ return -EINVAL;
+
+ reg_val &= ~cd->div_en;
+ regmap_write(cd->common.map, cd->common.cfg0, reg_val);
+ udelay(1);
+
+ reg_val &= ~GENMASK(cd->div.width + cd->div.shift - 1, cd->div.shift);
+ reg_val |= val << cd->div.shift;
+ regmap_write(cd->common.map, cd->common.cfg0, reg_val);
+
+ reg_val |= cd->div_en;
+ regmap_write(cd->common.map, cd->common.cfg0, reg_val);
+
+ return 0;
+}
+
static u8 ccu_div_get_parent(struct clk_hw *hw)
{
struct ccu_div *cd = hw_to_ccu_div(hw);
@@ -234,9 +284,34 @@ static const struct clk_ops ccu_div_ops = {
.get_parent = ccu_div_get_parent,
.set_parent = ccu_div_set_parent,
.recalc_rate = ccu_div_recalc_rate,
- .determine_rate = clk_hw_determine_rate_no_reparent,
+ .set_rate = ccu_div_set_rate,
+ .determine_rate = ccu_div_determine_rate,
};
+static void ccu_pll_disable(struct clk_hw *hw)
+{
+ struct ccu_pll *pll = hw_to_ccu_pll(hw);
+
+ regmap_set_bits(pll->common.map, pll->common.cfg1,
+ TH1520_PLL_VCO_RST);
+}
+
+static int ccu_pll_enable(struct clk_hw *hw)
+{
+ struct ccu_pll *pll = hw_to_ccu_pll(hw);
+
+ return regmap_clear_bits(pll->common.map, pll->common.cfg1,
+ TH1520_PLL_VCO_RST);
+}
+
+static int ccu_pll_is_enabled(struct clk_hw *hw)
+{
+ struct ccu_pll *pll = hw_to_ccu_pll(hw);
+
+ return !regmap_test_bits(pll->common.map, pll->common.cfg1,
+ TH1520_PLL_VCO_RST);
+}
+
static unsigned long th1520_pll_vco_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
@@ -294,6 +369,9 @@ static unsigned long ccu_pll_recalc_rate(struct clk_hw *hw,
}
static const struct clk_ops clk_pll_ops = {
+ .disable = ccu_pll_disable,
+ .enable = ccu_pll_enable,
+ .is_enabled = ccu_pll_is_enabled,
.recalc_rate = ccu_pll_recalc_rate,
};
@@ -309,7 +387,7 @@ static struct ccu_pll cpu_pll0_clk = {
.hw.init = CLK_HW_INIT_PARENTS_DATA("cpu-pll0",
osc_24m_clk,
&clk_pll_ops,
- 0),
+ CLK_IS_CRITICAL),
},
};
@@ -321,7 +399,7 @@ static struct ccu_pll cpu_pll1_clk = {
.hw.init = CLK_HW_INIT_PARENTS_DATA("cpu-pll1",
osc_24m_clk,
&clk_pll_ops,
- 0),
+ CLK_IS_CRITICAL),
},
};
@@ -333,7 +411,7 @@ static struct ccu_pll gmac_pll_clk = {
.hw.init = CLK_HW_INIT_PARENTS_DATA("gmac-pll",
osc_24m_clk,
&clk_pll_ops,
- 0),
+ CLK_IS_CRITICAL),
},
};
@@ -353,7 +431,7 @@ static struct ccu_pll video_pll_clk = {
.hw.init = CLK_HW_INIT_PARENTS_DATA("video-pll",
osc_24m_clk,
&clk_pll_ops,
- 0),
+ CLK_IS_CRITICAL),
},
};
@@ -405,7 +483,7 @@ static struct ccu_pll tee_pll_clk = {
.hw.init = CLK_HW_INIT_PARENTS_DATA("tee-pll",
osc_24m_clk,
&clk_pll_ops,
- 0),
+ CLK_IS_CRITICAL),
},
};
@@ -415,32 +493,20 @@ static const struct clk_parent_data c910_i0_parents[] = {
};
static struct ccu_mux c910_i0_clk = {
- .mux = TH_CCU_ARG(1, 1),
- .common = {
- .clkid = CLK_C910_I0,
- .cfg0 = 0x100,
- .hw.init = CLK_HW_INIT_PARENTS_DATA("c910-i0",
- c910_i0_parents,
- &clk_mux_ops,
- 0),
- }
+ .clkid = CLK_C910_I0,
+ .reg = 0x100,
+ .mux = TH_CCU_MUX("c910-i0", c910_i0_parents, 1, 1),
};
static const struct clk_parent_data c910_parents[] = {
- { .hw = &c910_i0_clk.common.hw },
+ { .hw = &c910_i0_clk.mux.hw },
{ .hw = &cpu_pll1_clk.common.hw }
};
static struct ccu_mux c910_clk = {
- .mux = TH_CCU_ARG(0, 1),
- .common = {
- .clkid = CLK_C910,
- .cfg0 = 0x100,
- .hw.init = CLK_HW_INIT_PARENTS_DATA("c910",
- c910_parents,
- &clk_mux_ops,
- 0),
- }
+ .clkid = CLK_C910,
+ .reg = 0x100,
+ .mux = TH_CCU_MUX("c910", c910_parents, 0, 1),
};
static const struct clk_parent_data ahb2_cpusys_parents[] = {
@@ -493,7 +559,7 @@ static struct ccu_div axi4_cpusys2_aclk = {
.hw.init = CLK_HW_INIT_PARENTS_HW("axi4-cpusys2-aclk",
gmac_pll_clk_parent,
&ccu_div_ops,
- 0),
+ CLK_IS_CRITICAL),
},
};
@@ -515,7 +581,7 @@ static struct ccu_div axi_aclk = {
.hw.init = CLK_HW_INIT_PARENTS_DATA("axi-aclk",
axi_parents,
&ccu_div_ops,
- 0),
+ CLK_IS_CRITICAL),
},
};
@@ -582,7 +648,14 @@ static const struct clk_parent_data peri2sys_apb_pclk_pd[] = {
{ .hw = &peri2sys_apb_pclk.common.hw }
};
-static CLK_FIXED_FACTOR_FW_NAME(osc12m_clk, "osc_12m", "osc_24m", 2, 1, 0);
+static struct clk_fixed_factor osc12m_clk = {
+ .div = 2,
+ .mult = 1,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("osc_12m",
+ osc_24m_clk,
+ &clk_fixed_factor_ops,
+ 0),
+};
static const char * const out_parents[] = { "osc_24m", "osc_12m" };
@@ -657,7 +730,7 @@ static struct ccu_div apb_pclk = {
.hw.init = CLK_HW_INIT_PARENTS_DATA("apb-pclk",
apb_parents,
&ccu_div_ops,
- CLK_IGNORE_UNUSED),
+ CLK_IS_CRITICAL),
},
};
@@ -688,7 +761,7 @@ static struct ccu_div vi_clk = {
.hw.init = CLK_HW_INIT_PARENTS_HW("vi",
video_pll_clk_parent,
&ccu_div_ops,
- 0),
+ CLK_IS_CRITICAL),
},
};
@@ -713,7 +786,7 @@ static struct ccu_div vo_axi_clk = {
.hw.init = CLK_HW_INIT_PARENTS_HW("vo-axi",
video_pll_clk_parent,
&ccu_div_ops,
- 0),
+ CLK_IS_CRITICAL),
},
};
@@ -738,7 +811,7 @@ static struct ccu_div vp_axi_clk = {
.hw.init = CLK_HW_INIT_PARENTS_HW("vp-axi",
video_pll_clk_parent,
&ccu_div_ops,
- CLK_IGNORE_UNUSED),
+ CLK_IS_CRITICAL),
},
};
@@ -756,6 +829,7 @@ static struct ccu_div venc_clk = {
};
static struct ccu_div dpu0_clk = {
+ .div_en = BIT(8),
.div = TH_CCU_DIV_FLAGS(0, 8, CLK_DIVIDER_ONE_BASED),
.common = {
.clkid = CLK_DPU0,
@@ -763,11 +837,16 @@ static struct ccu_div dpu0_clk = {
.hw.init = CLK_HW_INIT_PARENTS_HW("dpu0",
dpu0_pll_clk_parent,
&ccu_div_ops,
- 0),
+ CLK_SET_RATE_UNGATE),
},
};
+static const struct clk_parent_data dpu0_clk_pd[] = {
+ { .hw = &dpu0_clk.common.hw }
+};
+
static struct ccu_div dpu1_clk = {
+ .div_en = BIT(8),
.div = TH_CCU_DIV_FLAGS(0, 8, CLK_DIVIDER_ONE_BASED),
.common = {
.clkid = CLK_DPU1,
@@ -775,10 +854,14 @@ static struct ccu_div dpu1_clk = {
.hw.init = CLK_HW_INIT_PARENTS_HW("dpu1",
dpu1_pll_clk_parent,
&ccu_div_ops,
- 0),
+ CLK_SET_RATE_UNGATE),
},
};
+static const struct clk_parent_data dpu1_clk_pd[] = {
+ { .hw = &dpu1_clk.common.hw }
+};
+
static CLK_FIXED_FACTOR_HW(emmc_sdio_ref_clk, "emmc-sdio-ref",
&video_pll_clk.common.hw, 4, 1, 0);
@@ -786,127 +869,132 @@ static const struct clk_parent_data emmc_sdio_ref_clk_pd[] = {
{ .hw = &emmc_sdio_ref_clk.hw },
};
-static CCU_GATE(CLK_BROM, brom_clk, "brom", ahb2_cpusys_hclk_pd, 0x100, BIT(4), 0);
-static CCU_GATE(CLK_BMU, bmu_clk, "bmu", axi4_cpusys2_aclk_pd, 0x100, BIT(5), 0);
+static CCU_GATE(CLK_BROM, brom_clk, "brom", ahb2_cpusys_hclk_pd, 0x100, 4, 0);
+static CCU_GATE(CLK_BMU, bmu_clk, "bmu", axi4_cpusys2_aclk_pd, 0x100, 5, 0);
static CCU_GATE(CLK_AON2CPU_A2X, aon2cpu_a2x_clk, "aon2cpu-a2x", axi4_cpusys2_aclk_pd,
- 0x134, BIT(8), 0);
+ 0x134, 8, CLK_IS_CRITICAL);
static CCU_GATE(CLK_X2X_CPUSYS, x2x_cpusys_clk, "x2x-cpusys", axi4_cpusys2_aclk_pd,
- 0x134, BIT(7), 0);
-static CCU_GATE(CLK_CPU2AON_X2H, cpu2aon_x2h_clk, "cpu2aon-x2h", axi_aclk_pd, 0x138, BIT(8), 0);
+ 0x134, 7, CLK_IS_CRITICAL);
+static CCU_GATE(CLK_CPU2AON_X2H, cpu2aon_x2h_clk, "cpu2aon-x2h", axi_aclk_pd,
+ 0x138, 8, CLK_IS_CRITICAL);
static CCU_GATE(CLK_CPU2PERI_X2H, cpu2peri_x2h_clk, "cpu2peri-x2h", axi4_cpusys2_aclk_pd,
- 0x140, BIT(9), CLK_IGNORE_UNUSED);
+ 0x140, 9, CLK_IS_CRITICAL);
static CCU_GATE(CLK_PERISYS_APB1_HCLK, perisys_apb1_hclk, "perisys-apb1-hclk", perisys_ahb_hclk_pd,
- 0x150, BIT(9), 0);
+ 0x150, 9, CLK_IS_CRITICAL);
static CCU_GATE(CLK_PERISYS_APB2_HCLK, perisys_apb2_hclk, "perisys-apb2-hclk", perisys_ahb_hclk_pd,
- 0x150, BIT(10), CLK_IGNORE_UNUSED);
+ 0x150, 10, CLK_IS_CRITICAL);
static CCU_GATE(CLK_PERISYS_APB3_HCLK, perisys_apb3_hclk, "perisys-apb3-hclk", perisys_ahb_hclk_pd,
- 0x150, BIT(11), CLK_IGNORE_UNUSED);
+ 0x150, 11, CLK_IS_CRITICAL);
static CCU_GATE(CLK_PERISYS_APB4_HCLK, perisys_apb4_hclk, "perisys-apb4-hclk", perisys_ahb_hclk_pd,
- 0x150, BIT(12), 0);
-static CCU_GATE(CLK_NPU_AXI, npu_axi_clk, "npu-axi", axi_aclk_pd, 0x1c8, BIT(5), 0);
-static CCU_GATE(CLK_CPU2VP, cpu2vp_clk, "cpu2vp", axi_aclk_pd, 0x1e0, BIT(13), 0);
-static CCU_GATE(CLK_EMMC_SDIO, emmc_sdio_clk, "emmc-sdio", emmc_sdio_ref_clk_pd, 0x204, BIT(30), 0);
-static CCU_GATE(CLK_GMAC1, gmac1_clk, "gmac1", gmac_pll_clk_pd, 0x204, BIT(26), 0);
-static CCU_GATE(CLK_PADCTRL1, padctrl1_clk, "padctrl1", perisys_apb_pclk_pd, 0x204, BIT(24), 0);
-static CCU_GATE(CLK_DSMART, dsmart_clk, "dsmart", perisys_apb_pclk_pd, 0x204, BIT(23), 0);
-static CCU_GATE(CLK_PADCTRL0, padctrl0_clk, "padctrl0", perisys_apb_pclk_pd, 0x204, BIT(22), 0);
-static CCU_GATE(CLK_GMAC_AXI, gmac_axi_clk, "gmac-axi", axi4_cpusys2_aclk_pd, 0x204, BIT(21), 0);
-static CCU_GATE(CLK_GPIO3, gpio3_clk, "gpio3-clk", peri2sys_apb_pclk_pd, 0x204, BIT(20), 0);
-static CCU_GATE(CLK_GMAC0, gmac0_clk, "gmac0", gmac_pll_clk_pd, 0x204, BIT(19), 0);
-static CCU_GATE(CLK_PWM, pwm_clk, "pwm", perisys_apb_pclk_pd, 0x204, BIT(18), 0);
-static CCU_GATE(CLK_QSPI0, qspi0_clk, "qspi0", video_pll_clk_pd, 0x204, BIT(17), 0);
-static CCU_GATE(CLK_QSPI1, qspi1_clk, "qspi1", video_pll_clk_pd, 0x204, BIT(16), 0);
-static CCU_GATE(CLK_SPI, spi_clk, "spi", video_pll_clk_pd, 0x204, BIT(15), 0);
-static CCU_GATE(CLK_UART0_PCLK, uart0_pclk, "uart0-pclk", perisys_apb_pclk_pd, 0x204, BIT(14), 0);
-static CCU_GATE(CLK_UART1_PCLK, uart1_pclk, "uart1-pclk", perisys_apb_pclk_pd, 0x204, BIT(13), 0);
-static CCU_GATE(CLK_UART2_PCLK, uart2_pclk, "uart2-pclk", perisys_apb_pclk_pd, 0x204, BIT(12), 0);
-static CCU_GATE(CLK_UART3_PCLK, uart3_pclk, "uart3-pclk", perisys_apb_pclk_pd, 0x204, BIT(11), 0);
-static CCU_GATE(CLK_UART4_PCLK, uart4_pclk, "uart4-pclk", perisys_apb_pclk_pd, 0x204, BIT(10), 0);
-static CCU_GATE(CLK_UART5_PCLK, uart5_pclk, "uart5-pclk", perisys_apb_pclk_pd, 0x204, BIT(9), 0);
-static CCU_GATE(CLK_GPIO0, gpio0_clk, "gpio0-clk", perisys_apb_pclk_pd, 0x204, BIT(8), 0);
-static CCU_GATE(CLK_GPIO1, gpio1_clk, "gpio1-clk", perisys_apb_pclk_pd, 0x204, BIT(7), 0);
-static CCU_GATE(CLK_GPIO2, gpio2_clk, "gpio2-clk", peri2sys_apb_pclk_pd, 0x204, BIT(6), 0);
-static CCU_GATE(CLK_I2C0, i2c0_clk, "i2c0", perisys_apb_pclk_pd, 0x204, BIT(5), 0);
-static CCU_GATE(CLK_I2C1, i2c1_clk, "i2c1", perisys_apb_pclk_pd, 0x204, BIT(4), 0);
-static CCU_GATE(CLK_I2C2, i2c2_clk, "i2c2", perisys_apb_pclk_pd, 0x204, BIT(3), 0);
-static CCU_GATE(CLK_I2C3, i2c3_clk, "i2c3", perisys_apb_pclk_pd, 0x204, BIT(2), 0);
-static CCU_GATE(CLK_I2C4, i2c4_clk, "i2c4", perisys_apb_pclk_pd, 0x204, BIT(1), 0);
-static CCU_GATE(CLK_I2C5, i2c5_clk, "i2c5", perisys_apb_pclk_pd, 0x204, BIT(0), 0);
-static CCU_GATE(CLK_SPINLOCK, spinlock_clk, "spinlock", ahb2_cpusys_hclk_pd, 0x208, BIT(10), 0);
-static CCU_GATE(CLK_DMA, dma_clk, "dma", axi4_cpusys2_aclk_pd, 0x208, BIT(8), 0);
-static CCU_GATE(CLK_MBOX0, mbox0_clk, "mbox0", apb3_cpusys_pclk_pd, 0x208, BIT(7), 0);
-static CCU_GATE(CLK_MBOX1, mbox1_clk, "mbox1", apb3_cpusys_pclk_pd, 0x208, BIT(6), 0);
-static CCU_GATE(CLK_MBOX2, mbox2_clk, "mbox2", apb3_cpusys_pclk_pd, 0x208, BIT(5), 0);
-static CCU_GATE(CLK_MBOX3, mbox3_clk, "mbox3", apb3_cpusys_pclk_pd, 0x208, BIT(4), 0);
-static CCU_GATE(CLK_WDT0, wdt0_clk, "wdt0", apb3_cpusys_pclk_pd, 0x208, BIT(3), 0);
-static CCU_GATE(CLK_WDT1, wdt1_clk, "wdt1", apb3_cpusys_pclk_pd, 0x208, BIT(2), 0);
-static CCU_GATE(CLK_TIMER0, timer0_clk, "timer0", apb3_cpusys_pclk_pd, 0x208, BIT(1), 0);
-static CCU_GATE(CLK_TIMER1, timer1_clk, "timer1", apb3_cpusys_pclk_pd, 0x208, BIT(0), 0);
-static CCU_GATE(CLK_SRAM0, sram0_clk, "sram0", axi_aclk_pd, 0x20c, BIT(4), 0);
-static CCU_GATE(CLK_SRAM1, sram1_clk, "sram1", axi_aclk_pd, 0x20c, BIT(3), 0);
-static CCU_GATE(CLK_SRAM2, sram2_clk, "sram2", axi_aclk_pd, 0x20c, BIT(2), 0);
-static CCU_GATE(CLK_SRAM3, sram3_clk, "sram3", axi_aclk_pd, 0x20c, BIT(1), 0);
+ 0x150, 12, 0);
+static const struct clk_parent_data perisys_apb4_hclk_pd[] = {
+ { .hw = &perisys_apb4_hclk.gate.hw },
+};
+
+static CCU_GATE(CLK_NPU_AXI, npu_axi_clk, "npu-axi", axi_aclk_pd, 0x1c8, 5, CLK_IS_CRITICAL);
+static CCU_GATE(CLK_CPU2VP, cpu2vp_clk, "cpu2vp", axi_aclk_pd, 0x1e0, 13, CLK_IS_CRITICAL);
+static CCU_GATE(CLK_EMMC_SDIO, emmc_sdio_clk, "emmc-sdio", emmc_sdio_ref_clk_pd, 0x204, 30, 0);
+static CCU_GATE(CLK_GMAC1, gmac1_clk, "gmac1", gmac_pll_clk_pd, 0x204, 26, 0);
+static CCU_GATE(CLK_PADCTRL1, padctrl1_clk, "padctrl1", perisys_apb_pclk_pd, 0x204, 24, 0);
+static CCU_GATE(CLK_DSMART, dsmart_clk, "dsmart", perisys_apb_pclk_pd, 0x204, 23, 0);
+static CCU_GATE(CLK_PADCTRL0, padctrl0_clk, "padctrl0", perisys_apb4_hclk_pd, 0x204, 22, 0);
+static CCU_GATE(CLK_GMAC_AXI, gmac_axi_clk, "gmac-axi", axi4_cpusys2_aclk_pd, 0x204, 21, 0);
+static CCU_GATE(CLK_GPIO3, gpio3_clk, "gpio3-clk", peri2sys_apb_pclk_pd, 0x204, 20, 0);
+static CCU_GATE(CLK_GMAC0, gmac0_clk, "gmac0", gmac_pll_clk_pd, 0x204, 19, 0);
+static CCU_GATE(CLK_PWM, pwm_clk, "pwm", perisys_apb_pclk_pd, 0x204, 18, 0);
+static CCU_GATE(CLK_QSPI0, qspi0_clk, "qspi0", video_pll_clk_pd, 0x204, 17, 0);
+static CCU_GATE(CLK_QSPI1, qspi1_clk, "qspi1", video_pll_clk_pd, 0x204, 16, 0);
+static CCU_GATE(CLK_SPI, spi_clk, "spi", video_pll_clk_pd, 0x204, 15, 0);
+static CCU_GATE(CLK_UART0_PCLK, uart0_pclk, "uart0-pclk", perisys_apb_pclk_pd, 0x204, 14, 0);
+static CCU_GATE(CLK_UART1_PCLK, uart1_pclk, "uart1-pclk", perisys_apb_pclk_pd, 0x204, 13, 0);
+static CCU_GATE(CLK_UART2_PCLK, uart2_pclk, "uart2-pclk", perisys_apb_pclk_pd, 0x204, 12, 0);
+static CCU_GATE(CLK_UART3_PCLK, uart3_pclk, "uart3-pclk", perisys_apb_pclk_pd, 0x204, 11, 0);
+static CCU_GATE(CLK_UART4_PCLK, uart4_pclk, "uart4-pclk", perisys_apb_pclk_pd, 0x204, 10, 0);
+static CCU_GATE(CLK_UART5_PCLK, uart5_pclk, "uart5-pclk", perisys_apb_pclk_pd, 0x204, 9, 0);
+static CCU_GATE(CLK_GPIO0, gpio0_clk, "gpio0-clk", perisys_apb_pclk_pd, 0x204, 8, 0);
+static CCU_GATE(CLK_GPIO1, gpio1_clk, "gpio1-clk", perisys_apb_pclk_pd, 0x204, 7, 0);
+static CCU_GATE(CLK_GPIO2, gpio2_clk, "gpio2-clk", peri2sys_apb_pclk_pd, 0x204, 6, 0);
+static CCU_GATE(CLK_I2C0, i2c0_clk, "i2c0", perisys_apb_pclk_pd, 0x204, 5, 0);
+static CCU_GATE(CLK_I2C1, i2c1_clk, "i2c1", perisys_apb_pclk_pd, 0x204, 4, 0);
+static CCU_GATE(CLK_I2C2, i2c2_clk, "i2c2", perisys_apb_pclk_pd, 0x204, 3, 0);
+static CCU_GATE(CLK_I2C3, i2c3_clk, "i2c3", perisys_apb_pclk_pd, 0x204, 2, 0);
+static CCU_GATE(CLK_I2C4, i2c4_clk, "i2c4", perisys_apb_pclk_pd, 0x204, 1, 0);
+static CCU_GATE(CLK_I2C5, i2c5_clk, "i2c5", perisys_apb_pclk_pd, 0x204, 0, 0);
+static CCU_GATE(CLK_SPINLOCK, spinlock_clk, "spinlock", ahb2_cpusys_hclk_pd, 0x208, 10, 0);
+static CCU_GATE(CLK_DMA, dma_clk, "dma", axi4_cpusys2_aclk_pd, 0x208, 8, 0);
+static CCU_GATE(CLK_MBOX0, mbox0_clk, "mbox0", apb3_cpusys_pclk_pd, 0x208, 7, 0);
+static CCU_GATE(CLK_MBOX1, mbox1_clk, "mbox1", apb3_cpusys_pclk_pd, 0x208, 6, 0);
+static CCU_GATE(CLK_MBOX2, mbox2_clk, "mbox2", apb3_cpusys_pclk_pd, 0x208, 5, 0);
+static CCU_GATE(CLK_MBOX3, mbox3_clk, "mbox3", apb3_cpusys_pclk_pd, 0x208, 4, 0);
+static CCU_GATE(CLK_WDT0, wdt0_clk, "wdt0", apb3_cpusys_pclk_pd, 0x208, 3, 0);
+static CCU_GATE(CLK_WDT1, wdt1_clk, "wdt1", apb3_cpusys_pclk_pd, 0x208, 2, 0);
+static CCU_GATE(CLK_TIMER0, timer0_clk, "timer0", apb3_cpusys_pclk_pd, 0x208, 1, 0);
+static CCU_GATE(CLK_TIMER1, timer1_clk, "timer1", apb3_cpusys_pclk_pd, 0x208, 0, 0);
+static CCU_GATE(CLK_SRAM0, sram0_clk, "sram0", axi_aclk_pd, 0x20c, 4, 0);
+static CCU_GATE(CLK_SRAM1, sram1_clk, "sram1", axi_aclk_pd, 0x20c, 3, 0);
+static CCU_GATE(CLK_SRAM2, sram2_clk, "sram2", axi_aclk_pd, 0x20c, 2, 0);
+static CCU_GATE(CLK_SRAM3, sram3_clk, "sram3", axi_aclk_pd, 0x20c, 1, 0);
static CCU_GATE(CLK_AXI4_VO_ACLK, axi4_vo_aclk, "axi4-vo-aclk",
- video_pll_clk_pd, 0x0, BIT(0), 0);
+ video_pll_clk_pd, 0x0, 0, CLK_IS_CRITICAL);
static CCU_GATE(CLK_GPU_CORE, gpu_core_clk, "gpu-core-clk", video_pll_clk_pd,
- 0x0, BIT(3), 0);
+ 0x0, 3, 0);
static CCU_GATE(CLK_GPU_CFG_ACLK, gpu_cfg_aclk, "gpu-cfg-aclk",
- video_pll_clk_pd, 0x0, BIT(4), 0);
+ video_pll_clk_pd, 0x0, 4, CLK_IS_CRITICAL);
static CCU_GATE(CLK_DPU_PIXELCLK0, dpu0_pixelclk, "dpu0-pixelclk",
- video_pll_clk_pd, 0x0, BIT(5), 0);
+ dpu0_clk_pd, 0x0, 5, CLK_SET_RATE_PARENT);
static CCU_GATE(CLK_DPU_PIXELCLK1, dpu1_pixelclk, "dpu1-pixelclk",
- video_pll_clk_pd, 0x0, BIT(6), 0);
+ dpu1_clk_pd, 0x0, 6, CLK_SET_RATE_PARENT);
static CCU_GATE(CLK_DPU_HCLK, dpu_hclk, "dpu-hclk", video_pll_clk_pd, 0x0,
- BIT(7), 0);
+ 7, 0);
static CCU_GATE(CLK_DPU_ACLK, dpu_aclk, "dpu-aclk", video_pll_clk_pd, 0x0,
- BIT(8), 0);
+ 8, 0);
static CCU_GATE(CLK_DPU_CCLK, dpu_cclk, "dpu-cclk", video_pll_clk_pd, 0x0,
- BIT(9), 0);
+ 9, 0);
static CCU_GATE(CLK_HDMI_SFR, hdmi_sfr_clk, "hdmi-sfr-clk", video_pll_clk_pd,
- 0x0, BIT(10), 0);
+ 0x0, 10, 0);
static CCU_GATE(CLK_HDMI_PCLK, hdmi_pclk, "hdmi-pclk", video_pll_clk_pd, 0x0,
- BIT(11), 0);
+ 11, 0);
static CCU_GATE(CLK_HDMI_CEC, hdmi_cec_clk, "hdmi-cec-clk", video_pll_clk_pd,
- 0x0, BIT(12), 0);
+ 0x0, 12, 0);
static CCU_GATE(CLK_MIPI_DSI0_PCLK, mipi_dsi0_pclk, "mipi-dsi0-pclk",
- video_pll_clk_pd, 0x0, BIT(13), 0);
+ video_pll_clk_pd, 0x0, 13, 0);
static CCU_GATE(CLK_MIPI_DSI1_PCLK, mipi_dsi1_pclk, "mipi-dsi1-pclk",
- video_pll_clk_pd, 0x0, BIT(14), 0);
+ video_pll_clk_pd, 0x0, 14, 0);
static CCU_GATE(CLK_MIPI_DSI0_CFG, mipi_dsi0_cfg_clk, "mipi-dsi0-cfg-clk",
- video_pll_clk_pd, 0x0, BIT(15), 0);
+ video_pll_clk_pd, 0x0, 15, 0);
static CCU_GATE(CLK_MIPI_DSI1_CFG, mipi_dsi1_cfg_clk, "mipi-dsi1-cfg-clk",
- video_pll_clk_pd, 0x0, BIT(16), 0);
+ video_pll_clk_pd, 0x0, 16, 0);
static CCU_GATE(CLK_MIPI_DSI0_REFCLK, mipi_dsi0_refclk, "mipi-dsi0-refclk",
- video_pll_clk_pd, 0x0, BIT(17), 0);
+ video_pll_clk_pd, 0x0, 17, 0);
static CCU_GATE(CLK_MIPI_DSI1_REFCLK, mipi_dsi1_refclk, "mipi-dsi1-refclk",
- video_pll_clk_pd, 0x0, BIT(18), 0);
+ video_pll_clk_pd, 0x0, 18, 0);
static CCU_GATE(CLK_HDMI_I2S, hdmi_i2s_clk, "hdmi-i2s-clk", video_pll_clk_pd,
- 0x0, BIT(19), 0);
+ 0x0, 19, 0);
static CCU_GATE(CLK_X2H_DPU1_ACLK, x2h_dpu1_aclk, "x2h-dpu1-aclk",
- video_pll_clk_pd, 0x0, BIT(20), 0);
+ video_pll_clk_pd, 0x0, 20, CLK_IS_CRITICAL);
static CCU_GATE(CLK_X2H_DPU_ACLK, x2h_dpu_aclk, "x2h-dpu-aclk",
- video_pll_clk_pd, 0x0, BIT(21), 0);
+ video_pll_clk_pd, 0x0, 21, CLK_IS_CRITICAL);
static CCU_GATE(CLK_AXI4_VO_PCLK, axi4_vo_pclk, "axi4-vo-pclk",
- video_pll_clk_pd, 0x0, BIT(22), 0);
+ video_pll_clk_pd, 0x0, 22, 0);
static CCU_GATE(CLK_IOPMP_VOSYS_DPU_PCLK, iopmp_vosys_dpu_pclk,
- "iopmp-vosys-dpu-pclk", video_pll_clk_pd, 0x0, BIT(23), 0);
+ "iopmp-vosys-dpu-pclk", video_pll_clk_pd, 0x0, 23, 0);
static CCU_GATE(CLK_IOPMP_VOSYS_DPU1_PCLK, iopmp_vosys_dpu1_pclk,
- "iopmp-vosys-dpu1-pclk", video_pll_clk_pd, 0x0, BIT(24), 0);
+ "iopmp-vosys-dpu1-pclk", video_pll_clk_pd, 0x0, 24, 0);
static CCU_GATE(CLK_IOPMP_VOSYS_GPU_PCLK, iopmp_vosys_gpu_pclk,
- "iopmp-vosys-gpu-pclk", video_pll_clk_pd, 0x0, BIT(25), 0);
+ "iopmp-vosys-gpu-pclk", video_pll_clk_pd, 0x0, 25, 0);
static CCU_GATE(CLK_IOPMP_DPU1_ACLK, iopmp_dpu1_aclk, "iopmp-dpu1-aclk",
- video_pll_clk_pd, 0x0, BIT(27), 0);
+ video_pll_clk_pd, 0x0, 27, CLK_IS_CRITICAL);
static CCU_GATE(CLK_IOPMP_DPU_ACLK, iopmp_dpu_aclk, "iopmp-dpu-aclk",
- video_pll_clk_pd, 0x0, BIT(28), 0);
+ video_pll_clk_pd, 0x0, 28, CLK_IS_CRITICAL);
static CCU_GATE(CLK_IOPMP_GPU_ACLK, iopmp_gpu_aclk, "iopmp-gpu-aclk",
- video_pll_clk_pd, 0x0, BIT(29), 0);
+ video_pll_clk_pd, 0x0, 29, CLK_IS_CRITICAL);
static CCU_GATE(CLK_MIPIDSI0_PIXCLK, mipi_dsi0_pixclk, "mipi-dsi0-pixclk",
- video_pll_clk_pd, 0x0, BIT(30), 0);
+ video_pll_clk_pd, 0x0, 30, 0);
static CCU_GATE(CLK_MIPIDSI1_PIXCLK, mipi_dsi1_pixclk, "mipi-dsi1-pixclk",
- video_pll_clk_pd, 0x0, BIT(31), 0);
+ video_pll_clk_pd, 0x0, 31, 0);
static CCU_GATE(CLK_HDMI_PIXCLK, hdmi_pixclk, "hdmi-pixclk", video_pll_clk_pd,
- 0x4, BIT(0), 0);
+ 0x4, 0, 0);
static CLK_FIXED_FACTOR_HW(gmac_pll_clk_100m, "gmac-pll-clk-100m",
&gmac_pll_clk.common.hw, 10, 1, 0);
@@ -917,15 +1005,9 @@ static const struct clk_parent_data uart_sclk_parents[] = {
};
static struct ccu_mux uart_sclk = {
- .mux = TH_CCU_ARG(0, 1),
- .common = {
- .clkid = CLK_UART_SCLK,
- .cfg0 = 0x210,
- .hw.init = CLK_HW_INIT_PARENTS_DATA("uart-sclk",
- uart_sclk_parents,
- &clk_mux_ops,
- 0),
- }
+ .clkid = CLK_UART_SCLK,
+ .reg = 0x210,
+ .mux = TH_CCU_MUX("uart-sclk", uart_sclk_parents, 0, 1),
};
static struct ccu_common *th1520_pll_clks[] = {
@@ -962,113 +1044,112 @@ static struct ccu_common *th1520_div_clks[] = {
&dpu1_clk.common,
};
-static struct ccu_common *th1520_mux_clks[] = {
- &c910_i0_clk.common,
- &c910_clk.common,
- &uart_sclk.common,
-};
-
-static struct ccu_common *th1520_gate_clks[] = {
- &emmc_sdio_clk.common,
- &aon2cpu_a2x_clk.common,
- &x2x_cpusys_clk.common,
- &brom_clk.common,
- &bmu_clk.common,
- &cpu2aon_x2h_clk.common,
- &cpu2peri_x2h_clk.common,
- &cpu2vp_clk.common,
- &perisys_apb1_hclk.common,
- &perisys_apb2_hclk.common,
- &perisys_apb3_hclk.common,
- &perisys_apb4_hclk.common,
- &npu_axi_clk.common,
- &gmac1_clk.common,
- &padctrl1_clk.common,
- &dsmart_clk.common,
- &padctrl0_clk.common,
- &gmac_axi_clk.common,
- &gpio3_clk.common,
- &gmac0_clk.common,
- &pwm_clk.common,
- &qspi0_clk.common,
- &qspi1_clk.common,
- &spi_clk.common,
- &uart0_pclk.common,
- &uart1_pclk.common,
- &uart2_pclk.common,
- &uart3_pclk.common,
- &uart4_pclk.common,
- &uart5_pclk.common,
- &gpio0_clk.common,
- &gpio1_clk.common,
- &gpio2_clk.common,
- &i2c0_clk.common,
- &i2c1_clk.common,
- &i2c2_clk.common,
- &i2c3_clk.common,
- &i2c4_clk.common,
- &i2c5_clk.common,
- &spinlock_clk.common,
- &dma_clk.common,
- &mbox0_clk.common,
- &mbox1_clk.common,
- &mbox2_clk.common,
- &mbox3_clk.common,
- &wdt0_clk.common,
- &wdt1_clk.common,
- &timer0_clk.common,
- &timer1_clk.common,
- &sram0_clk.common,
- &sram1_clk.common,
- &sram2_clk.common,
- &sram3_clk.common,
-};
-
-static struct ccu_common *th1520_vo_gate_clks[] = {
- &axi4_vo_aclk.common,
- &gpu_core_clk.common,
- &gpu_cfg_aclk.common,
- &dpu0_pixelclk.common,
- &dpu1_pixelclk.common,
- &dpu_hclk.common,
- &dpu_aclk.common,
- &dpu_cclk.common,
- &hdmi_sfr_clk.common,
- &hdmi_pclk.common,
- &hdmi_cec_clk.common,
- &mipi_dsi0_pclk.common,
- &mipi_dsi1_pclk.common,
- &mipi_dsi0_cfg_clk.common,
- &mipi_dsi1_cfg_clk.common,
- &mipi_dsi0_refclk.common,
- &mipi_dsi1_refclk.common,
- &hdmi_i2s_clk.common,
- &x2h_dpu1_aclk.common,
- &x2h_dpu_aclk.common,
- &axi4_vo_pclk.common,
- &iopmp_vosys_dpu_pclk.common,
- &iopmp_vosys_dpu1_pclk.common,
- &iopmp_vosys_gpu_pclk.common,
- &iopmp_dpu1_aclk.common,
- &iopmp_dpu_aclk.common,
- &iopmp_gpu_aclk.common,
- &mipi_dsi0_pixclk.common,
- &mipi_dsi1_pixclk.common,
- &hdmi_pixclk.common
+static struct ccu_mux *th1520_mux_clks[] = {
+ &c910_i0_clk,
+ &c910_clk,
+ &uart_sclk,
+};
+
+static struct ccu_gate *th1520_gate_clks[] = {
+ &emmc_sdio_clk,
+ &aon2cpu_a2x_clk,
+ &x2x_cpusys_clk,
+ &brom_clk,
+ &bmu_clk,
+ &cpu2aon_x2h_clk,
+ &cpu2peri_x2h_clk,
+ &cpu2vp_clk,
+ &perisys_apb1_hclk,
+ &perisys_apb2_hclk,
+ &perisys_apb3_hclk,
+ &perisys_apb4_hclk,
+ &npu_axi_clk,
+ &gmac1_clk,
+ &padctrl1_clk,
+ &dsmart_clk,
+ &padctrl0_clk,
+ &gmac_axi_clk,
+ &gpio3_clk,
+ &gmac0_clk,
+ &pwm_clk,
+ &qspi0_clk,
+ &qspi1_clk,
+ &spi_clk,
+ &uart0_pclk,
+ &uart1_pclk,
+ &uart2_pclk,
+ &uart3_pclk,
+ &uart4_pclk,
+ &uart5_pclk,
+ &gpio0_clk,
+ &gpio1_clk,
+ &gpio2_clk,
+ &i2c0_clk,
+ &i2c1_clk,
+ &i2c2_clk,
+ &i2c3_clk,
+ &i2c4_clk,
+ &i2c5_clk,
+ &spinlock_clk,
+ &dma_clk,
+ &mbox0_clk,
+ &mbox1_clk,
+ &mbox2_clk,
+ &mbox3_clk,
+ &wdt0_clk,
+ &wdt1_clk,
+ &timer0_clk,
+ &timer1_clk,
+ &sram0_clk,
+ &sram1_clk,
+ &sram2_clk,
+ &sram3_clk,
+};
+
+static struct ccu_gate *th1520_vo_gate_clks[] = {
+ &axi4_vo_aclk,
+ &gpu_core_clk,
+ &gpu_cfg_aclk,
+ &dpu0_pixelclk,
+ &dpu1_pixelclk,
+ &dpu_hclk,
+ &dpu_aclk,
+ &dpu_cclk,
+ &hdmi_sfr_clk,
+ &hdmi_pclk,
+ &hdmi_cec_clk,
+ &mipi_dsi0_pclk,
+ &mipi_dsi1_pclk,
+ &mipi_dsi0_cfg_clk,
+ &mipi_dsi1_cfg_clk,
+ &mipi_dsi0_refclk,
+ &mipi_dsi1_refclk,
+ &hdmi_i2s_clk,
+ &x2h_dpu1_aclk,
+ &x2h_dpu_aclk,
+ &axi4_vo_pclk,
+ &iopmp_vosys_dpu_pclk,
+ &iopmp_vosys_dpu1_pclk,
+ &iopmp_vosys_gpu_pclk,
+ &iopmp_dpu1_aclk,
+ &iopmp_dpu_aclk,
+ &iopmp_gpu_aclk,
+ &mipi_dsi0_pixclk,
+ &mipi_dsi1_pixclk,
+ &hdmi_pixclk
};
static const struct regmap_config th1520_clk_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
- .fast_io = true,
};
struct th1520_plat_data {
struct ccu_common **th1520_pll_clks;
struct ccu_common **th1520_div_clks;
- struct ccu_common **th1520_mux_clks;
- struct ccu_common **th1520_gate_clks;
+ struct ccu_mux **th1520_mux_clks;
+ struct ccu_gate **th1520_gate_clks;
int nr_clks;
int nr_pll_clks;
@@ -1107,7 +1188,6 @@ static int th1520_clk_probe(struct platform_device *pdev)
struct regmap *map;
void __iomem *base;
- struct clk_hw *hw;
int ret, i;
plat_data = device_get_match_data(&pdev->dev);
@@ -1154,40 +1234,27 @@ static int th1520_clk_probe(struct platform_device *pdev)
}
for (i = 0; i < plat_data->nr_mux_clks; i++) {
- struct ccu_mux *cm = hw_to_ccu_mux(&plat_data->th1520_mux_clks[i]->hw);
- const struct clk_init_data *init = cm->common.hw.init;
-
- plat_data->th1520_mux_clks[i]->map = map;
- hw = devm_clk_hw_register_mux_parent_data_table(dev,
- init->name,
- init->parent_data,
- init->num_parents,
- 0,
- base + cm->common.cfg0,
- cm->mux.shift,
- cm->mux.width,
- 0, NULL, NULL);
- if (IS_ERR(hw))
- return PTR_ERR(hw);
-
- priv->hws[cm->common.clkid] = hw;
+ struct ccu_mux *cm = plat_data->th1520_mux_clks[i];
+
+ cm->mux.reg = base + cm->reg;
+
+ ret = devm_clk_hw_register(dev, &cm->mux.hw);
+ if (ret)
+ return ret;
+
+ priv->hws[cm->clkid] = &cm->mux.hw;
}
for (i = 0; i < plat_data->nr_gate_clks; i++) {
- struct ccu_gate *cg = hw_to_ccu_gate(&plat_data->th1520_gate_clks[i]->hw);
+ struct ccu_gate *cg = plat_data->th1520_gate_clks[i];
- plat_data->th1520_gate_clks[i]->map = map;
+ cg->gate.reg = base + cg->reg;
- hw = devm_clk_hw_register_gate_parent_data(dev,
- cg->common.hw.init->name,
- cg->common.hw.init->parent_data,
- cg->common.hw.init->flags,
- base + cg->common.cfg0,
- ffs(cg->enable) - 1, 0, NULL);
- if (IS_ERR(hw))
- return PTR_ERR(hw);
+ ret = devm_clk_hw_register(dev, &cg->gate.hw);
+ if (ret)
+ return ret;
- priv->hws[cg->common.clkid] = hw;
+ priv->hws[cg->clkid] = &cg->gate.hw;
}
if (plat_data == &th1520_ap_platdata) {
diff --git a/drivers/clk/ti/autoidle.c b/drivers/clk/ti/autoidle.c
index 27e6b9cb1881..a99aaf2e7684 100644
--- a/drivers/clk/ti/autoidle.c
+++ b/drivers/clk/ti/autoidle.c
@@ -30,7 +30,7 @@ static LIST_HEAD(autoidle_clks);
/*
* we have some non-atomic read/write
- * operations behind it, so lets
+ * operations behind it, so let's
* take one lock for handling autoidle
* of all clocks
*/
diff --git a/drivers/clk/ti/clk-33xx.c b/drivers/clk/ti/clk-33xx.c
index 85c50ea39e6d..9269e6a0db6a 100644
--- a/drivers/clk/ti/clk-33xx.c
+++ b/drivers/clk/ti/clk-33xx.c
@@ -258,6 +258,8 @@ static const char *enable_init_clks[] = {
"dpll_ddr_m2_ck",
"dpll_mpu_m2_ck",
"l3_gclk",
+ /* WKUP_DEBUGSS_CLKCTRL - disable fails, AM335x Errata Advisory 1.0.42 */
+ "l3-aon-clkctrl:0000:0",
/* AM3_L3_L3_MAIN_CLKCTRL, needed during suspend */
"l3-clkctrl:00bc:0",
"l4hs_gclk",
diff --git a/drivers/clk/ti/clk-43xx.c b/drivers/clk/ti/clk-43xx.c
index f24f6eb2157a..35af3079c002 100644
--- a/drivers/clk/ti/clk-43xx.c
+++ b/drivers/clk/ti/clk-43xx.c
@@ -286,7 +286,7 @@ int __init am43xx_dt_clk_init(void)
/*
* cpsw_cpts_rft_clk has got the choice of 3 clocksources
* dpll_core_m4_ck, dpll_core_m5_ck and dpll_disp_m2_ck.
- * By default dpll_core_m4_ck is selected, witn this as clock
+ * By default dpll_core_m4_ck is selected, with this as clock
* source the CPTS doesnot work properly. It gives clockcheck errors
* while running PTP.
* clockcheck: clock jumped backward or running slower than expected!
diff --git a/drivers/clk/ti/clk-dra7-atl.c b/drivers/clk/ti/clk-dra7-atl.c
index 0eab7f3e2eab..b02f84d49b96 100644
--- a/drivers/clk/ti/clk-dra7-atl.c
+++ b/drivers/clk/ti/clk-dra7-atl.c
@@ -120,16 +120,18 @@ static unsigned long atl_clk_recalc_rate(struct clk_hw *hw,
return parent_rate / cdesc->divider;
}
-static long atl_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int atl_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
unsigned divider;
- divider = (*parent_rate + rate / 2) / rate;
+ divider = (req->best_parent_rate + req->rate / 2) / req->rate;
if (divider > DRA7_ATL_DIVIDER_MASK + 1)
divider = DRA7_ATL_DIVIDER_MASK + 1;
- return *parent_rate / divider;
+ req->rate = req->best_parent_rate / divider;
+
+ return 0;
}
static int atl_clk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -156,7 +158,7 @@ static const struct clk_ops atl_clk_ops = {
.disable = atl_clk_disable,
.is_enabled = atl_clk_is_enabled,
.recalc_rate = atl_clk_recalc_rate,
- .round_rate = atl_clk_round_rate,
+ .determine_rate = atl_clk_determine_rate,
.set_rate = atl_clk_set_rate,
};
diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c
index 9c75dcc9a534..693a4459a01b 100644
--- a/drivers/clk/ti/clk.c
+++ b/drivers/clk/ti/clk.c
@@ -118,13 +118,10 @@ int ti_clk_setup_ll_ops(struct ti_clk_ll_ops *ops)
* Eventually we could standardize to using '_' for clk-*.c files to follow the
* TRM naming.
*/
-static struct device_node *ti_find_clock_provider(struct device_node *from,
- const char *name)
+static struct device_node *ti_find_clock_provider(const char *name)
{
char *tmp __free(kfree) = NULL;
struct device_node *np;
- bool found = false;
- const char *n;
char *p;
tmp = kstrdup_and_replace(name, '-', '_', GFP_KERNEL);
@@ -137,25 +134,13 @@ static struct device_node *ti_find_clock_provider(struct device_node *from,
*p = '\0';
/* Node named "clock" with "clock-output-names" */
- for_each_of_allnodes_from(from, np) {
- if (of_property_read_string_index(np, "clock-output-names",
- 0, &n))
- continue;
-
- if (!strncmp(n, tmp, strlen(tmp))) {
- of_node_get(np);
- found = true;
- break;
- }
- }
-
- if (found) {
- of_node_put(from);
- return np;
+ for_each_node_with_property(np, "clock-output-names") {
+ if (of_property_match_string(np, "clock-output-names", tmp) == 0)
+ return np;
}
/* Fall back to using old node name base provider name */
- return of_find_node_by_name(from, tmp);
+ return of_find_node_by_name(NULL, tmp);
}
/**
@@ -208,7 +193,7 @@ void __init ti_dt_clocks_register(struct ti_dt_clk oclks[])
if (num_args && clkctrl_nodes_missing)
continue;
- node = ti_find_clock_provider(NULL, buf);
+ node = ti_find_clock_provider(buf);
if (num_args && compat_mode) {
parent = node;
child = of_get_child_by_name(parent, "clock");
diff --git a/drivers/clk/ti/clkt_dpll.c b/drivers/clk/ti/clkt_dpll.c
index dfaa4d1f0b64..2ecd66968af4 100644
--- a/drivers/clk/ti/clkt_dpll.c
+++ b/drivers/clk/ti/clkt_dpll.c
@@ -268,20 +268,18 @@ unsigned long omap2_get_dpll_rate(struct clk_hw_omap *clk)
/* DPLL rate rounding code */
/**
- * omap2_dpll_round_rate - round a target rate for an OMAP DPLL
+ * omap2_dpll_determine_rate - round a target rate for an OMAP DPLL
* @hw: struct clk_hw containing the struct clk * for a DPLL
- * @target_rate: desired DPLL clock rate
- * @parent_rate: parent's DPLL clock rate
+ * @req: rate request
*
* Given a DPLL and a desired target rate, round the target rate to a
* possible, programmable rate for this DPLL. Attempts to select the
* minimum possible n. Stores the computed (m, n) in the DPLL's
* dpll_data structure so set_rate() will not need to call this
- * (expensive) function again. Returns ~0 if the target rate cannot
- * be rounded, or the rounded rate upon success.
+ * (expensive) function again. Returns -EINVAL if the target rate
+ * cannot be rounded, or the rounded rate upon success.
*/
-long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate,
- unsigned long *parent_rate)
+int omap2_dpll_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
{
struct clk_hw_omap *clk = to_clk_hw_omap(hw);
int m, n, r, scaled_max_m;
@@ -295,19 +293,19 @@ long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate,
const char *clk_name;
if (!clk || !clk->dpll_data)
- return ~0;
+ return -EINVAL;
dd = clk->dpll_data;
- if (dd->max_rate && target_rate > dd->max_rate)
- target_rate = dd->max_rate;
+ if (dd->max_rate && req->rate > dd->max_rate)
+ req->rate = dd->max_rate;
ref_rate = clk_hw_get_rate(dd->clk_ref);
clk_name = clk_hw_get_name(hw);
pr_debug("clock: %s: starting DPLL round_rate, target rate %lu\n",
- clk_name, target_rate);
+ clk_name, req->rate);
- scaled_rt_rp = target_rate / (ref_rate / DPLL_SCALE_FACTOR);
+ scaled_rt_rp = req->rate / (ref_rate / DPLL_SCALE_FACTOR);
scaled_max_m = dd->max_multiplier * DPLL_SCALE_FACTOR;
dd->last_rounded_rate = 0;
@@ -332,7 +330,7 @@ long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate,
if (m > scaled_max_m)
break;
- r = _dpll_test_mult(&m, n, &new_rate, target_rate,
+ r = _dpll_test_mult(&m, n, &new_rate, req->rate,
ref_rate);
/* m can't be set low enough for this n - try with a larger n */
@@ -340,7 +338,7 @@ long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate,
continue;
/* skip rates above our target rate */
- delta = target_rate - new_rate;
+ delta = req->rate - new_rate;
if (delta < 0)
continue;
@@ -359,13 +357,15 @@ long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate,
if (prev_min_delta == LONG_MAX) {
pr_debug("clock: %s: cannot round to rate %lu\n",
- clk_name, target_rate);
- return ~0;
+ clk_name, req->rate);
+ return -EINVAL;
}
dd->last_rounded_m = min_delta_m;
dd->last_rounded_n = min_delta_n;
- dd->last_rounded_rate = target_rate - prev_min_delta;
+ dd->last_rounded_rate = req->rate - prev_min_delta;
- return dd->last_rounded_rate;
+ req->rate = dd->last_rounded_rate;
+
+ return 0;
}
diff --git a/drivers/clk/ti/clock.h b/drivers/clk/ti/clock.h
index 2de7acea1ea0..d5e24fe4ae3a 100644
--- a/drivers/clk/ti/clock.h
+++ b/drivers/clk/ti/clock.h
@@ -273,8 +273,7 @@ int omap3_noncore_dpll_set_rate_and_parent(struct clk_hw *hw,
u8 index);
int omap3_noncore_dpll_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req);
-long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate,
- unsigned long *parent_rate);
+int omap2_dpll_determine_rate(struct clk_hw *hw, struct clk_rate_request *req);
unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw,
unsigned long parent_rate);
@@ -296,9 +295,6 @@ void omap3_clk_lock_dpll5(void);
unsigned long omap4_dpll_regm4xen_recalc(struct clk_hw *hw,
unsigned long parent_rate);
-long omap4_dpll_regm4xen_round_rate(struct clk_hw *hw,
- unsigned long target_rate,
- unsigned long *parent_rate);
int omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req);
int omap2_clk_for_each(int (*fn)(struct clk_hw_omap *hw));
diff --git a/drivers/clk/ti/composite.c b/drivers/clk/ti/composite.c
index b85382c370f7..8cba259188d4 100644
--- a/drivers/clk/ti/composite.c
+++ b/drivers/clk/ti/composite.c
@@ -26,8 +26,8 @@ static unsigned long ti_composite_recalc_rate(struct clk_hw *hw,
return ti_clk_divider_ops.recalc_rate(hw, parent_rate);
}
-static long ti_composite_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int ti_composite_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
return -EINVAL;
}
@@ -40,7 +40,7 @@ static int ti_composite_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops ti_composite_divider_ops = {
.recalc_rate = &ti_composite_recalc_rate,
- .round_rate = &ti_composite_round_rate,
+ .determine_rate = &ti_composite_determine_rate,
.set_rate = &ti_composite_set_rate,
};
diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c
index ade99ab6cfa9..6f58a0f2e74a 100644
--- a/drivers/clk/ti/divider.c
+++ b/drivers/clk/ti/divider.c
@@ -223,13 +223,15 @@ static int ti_clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
return bestdiv;
}
-static long ti_clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int ti_clk_divider_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
int div;
- div = ti_clk_divider_bestdiv(hw, rate, prate);
+ div = ti_clk_divider_bestdiv(hw, req->rate, &req->best_parent_rate);
- return DIV_ROUND_UP(*prate, div);
+ req->rate = DIV_ROUND_UP(req->best_parent_rate, div);
+
+ return 0;
}
static int ti_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -299,7 +301,7 @@ static void clk_divider_restore_context(struct clk_hw *hw)
const struct clk_ops ti_clk_divider_ops = {
.recalc_rate = ti_clk_divider_recalc_rate,
- .round_rate = ti_clk_divider_round_rate,
+ .determine_rate = ti_clk_divider_determine_rate,
.set_rate = ti_clk_divider_set_rate,
.save_context = clk_divider_save_context,
.restore_context = clk_divider_restore_context,
diff --git a/drivers/clk/ti/dpll.c b/drivers/clk/ti/dpll.c
index 3386bd1903df..971adafd9a8b 100644
--- a/drivers/clk/ti/dpll.c
+++ b/drivers/clk/ti/dpll.c
@@ -25,7 +25,6 @@ static const struct clk_ops dpll_m4xen_ck_ops = {
.enable = &omap3_noncore_dpll_enable,
.disable = &omap3_noncore_dpll_disable,
.recalc_rate = &omap4_dpll_regm4xen_recalc,
- .round_rate = &omap4_dpll_regm4xen_round_rate,
.set_rate = &omap3_noncore_dpll_set_rate,
.set_parent = &omap3_noncore_dpll_set_parent,
.set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent,
@@ -48,7 +47,6 @@ static const struct clk_ops dpll_ck_ops = {
.enable = &omap3_noncore_dpll_enable,
.disable = &omap3_noncore_dpll_disable,
.recalc_rate = &omap3_dpll_recalc,
- .round_rate = &omap2_dpll_round_rate,
.set_rate = &omap3_noncore_dpll_set_rate,
.set_parent = &omap3_noncore_dpll_set_parent,
.set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent,
@@ -61,7 +59,6 @@ static const struct clk_ops dpll_ck_ops = {
static const struct clk_ops dpll_no_gate_ck_ops = {
.recalc_rate = &omap3_dpll_recalc,
.get_parent = &omap2_init_dpll_parent,
- .round_rate = &omap2_dpll_round_rate,
.set_rate = &omap3_noncore_dpll_set_rate,
.set_parent = &omap3_noncore_dpll_set_parent,
.set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent,
@@ -80,7 +77,7 @@ const struct clk_hw_omap_ops clkhwops_omap3_dpll = {};
static const struct clk_ops omap2_dpll_core_ck_ops = {
.get_parent = &omap2_init_dpll_parent,
.recalc_rate = &omap2_dpllcore_recalc,
- .round_rate = &omap2_dpll_round_rate,
+ .determine_rate = &omap2_dpll_determine_rate,
.set_rate = &omap2_reprogram_dpllcore,
};
#else
@@ -91,7 +88,7 @@ static const struct clk_ops omap2_dpll_core_ck_ops = {};
static const struct clk_ops omap3_dpll_core_ck_ops = {
.get_parent = &omap2_init_dpll_parent,
.recalc_rate = &omap3_dpll_recalc,
- .round_rate = &omap2_dpll_round_rate,
+ .determine_rate = &omap2_dpll_determine_rate,
};
static const struct clk_ops omap3_dpll_ck_ops = {
@@ -103,7 +100,6 @@ static const struct clk_ops omap3_dpll_ck_ops = {
.set_parent = &omap3_noncore_dpll_set_parent,
.set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent,
.determine_rate = &omap3_noncore_dpll_determine_rate,
- .round_rate = &omap2_dpll_round_rate,
};
static const struct clk_ops omap3_dpll5_ck_ops = {
@@ -115,7 +111,6 @@ static const struct clk_ops omap3_dpll5_ck_ops = {
.set_parent = &omap3_noncore_dpll_set_parent,
.set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent,
.determine_rate = &omap3_noncore_dpll_determine_rate,
- .round_rate = &omap2_dpll_round_rate,
};
static const struct clk_ops omap3_dpll_per_ck_ops = {
@@ -127,7 +122,6 @@ static const struct clk_ops omap3_dpll_per_ck_ops = {
.set_parent = &omap3_noncore_dpll_set_parent,
.set_rate_and_parent = &omap3_dpll4_set_rate_and_parent,
.determine_rate = &omap3_noncore_dpll_determine_rate,
- .round_rate = &omap2_dpll_round_rate,
};
#endif
diff --git a/drivers/clk/ti/dpll3xxx.c b/drivers/clk/ti/dpll3xxx.c
index 00680486b1bd..8c51b988a04f 100644
--- a/drivers/clk/ti/dpll3xxx.c
+++ b/drivers/clk/ti/dpll3xxx.c
@@ -587,6 +587,7 @@ int omap3_noncore_dpll_determine_rate(struct clk_hw *hw,
{
struct clk_hw_omap *clk = to_clk_hw_omap(hw);
struct dpll_data *dd;
+ int ret;
if (!req->rate)
return -EINVAL;
@@ -599,8 +600,10 @@ int omap3_noncore_dpll_determine_rate(struct clk_hw *hw,
(dd->modes & (1 << DPLL_LOW_POWER_BYPASS))) {
req->best_parent_hw = dd->clk_bypass;
} else {
- req->rate = omap2_dpll_round_rate(hw, req->rate,
- &req->best_parent_rate);
+ ret = omap2_dpll_determine_rate(hw, req);
+ if (ret != 0)
+ return ret;
+
req->best_parent_hw = dd->clk_ref;
}
diff --git a/drivers/clk/ti/dpll44xx.c b/drivers/clk/ti/dpll44xx.c
index 3fc2cab69a3f..08ed57f181b4 100644
--- a/drivers/clk/ti/dpll44xx.c
+++ b/drivers/clk/ti/dpll44xx.c
@@ -134,68 +134,13 @@ unsigned long omap4_dpll_regm4xen_recalc(struct clk_hw *hw,
}
/**
- * omap4_dpll_regm4xen_round_rate - round DPLL rate, considering REGM4XEN bit
- * @hw: struct hw_clk containing the struct clk * of the DPLL to round a rate for
- * @target_rate: the desired rate of the DPLL
- * @parent_rate: clock rate of the DPLL parent
- *
- * Compute the rate that would be programmed into the DPLL hardware
- * for @clk if set_rate() were to be provided with the rate
- * @target_rate. Takes the REGM4XEN bit into consideration, which is
- * needed for the OMAP4 ABE DPLL. Returns the rounded rate (before
- * M-dividers) upon success, -EINVAL if @clk is null or not a DPLL, or
- * ~0 if an error occurred in omap2_dpll_round_rate().
- */
-long omap4_dpll_regm4xen_round_rate(struct clk_hw *hw,
- unsigned long target_rate,
- unsigned long *parent_rate)
-{
- struct clk_hw_omap *clk = to_clk_hw_omap(hw);
- struct dpll_data *dd;
- long r;
-
- if (!clk || !clk->dpll_data)
- return -EINVAL;
-
- dd = clk->dpll_data;
-
- dd->last_rounded_m4xen = 0;
-
- /*
- * First try to compute the DPLL configuration for
- * target rate without using the 4X multiplier.
- */
- r = omap2_dpll_round_rate(hw, target_rate, NULL);
- if (r != ~0)
- goto out;
-
- /*
- * If we did not find a valid DPLL configuration, try again, but
- * this time see if using the 4X multiplier can help. Enabling the
- * 4X multiplier is equivalent to dividing the target rate by 4.
- */
- r = omap2_dpll_round_rate(hw, target_rate / OMAP4430_REGM4XEN_MULT,
- NULL);
- if (r == ~0)
- return r;
-
- dd->last_rounded_rate *= OMAP4430_REGM4XEN_MULT;
- dd->last_rounded_m4xen = 1;
-
-out:
- omap4_dpll_lpmode_recalc(dd);
-
- return dd->last_rounded_rate;
-}
-
-/**
* omap4_dpll_regm4xen_determine_rate - determine rate for a DPLL
* @hw: pointer to the clock to determine rate for
* @req: target rate request
*
* Determines which DPLL mode to use for reaching a desired rate.
* Checks whether the DPLL shall be in bypass or locked mode, and if
- * locked, calculates the M,N values for the DPLL via round-rate.
+ * locked, calculates the M,N values for the DPLL.
* Returns 0 on success and a negative error value otherwise.
*/
int omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw,
@@ -215,8 +160,36 @@ int omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw,
(dd->modes & (1 << DPLL_LOW_POWER_BYPASS))) {
req->best_parent_hw = dd->clk_bypass;
} else {
- req->rate = omap4_dpll_regm4xen_round_rate(hw, req->rate,
- &req->best_parent_rate);
+ struct clk_rate_request tmp_req;
+ long r;
+
+ clk_hw_init_rate_request(hw, &tmp_req, req->rate);
+ dd->last_rounded_m4xen = 0;
+
+ /*
+ * First try to compute the DPLL configuration for
+ * target rate without using the 4X multiplier.
+ */
+
+ r = omap2_dpll_determine_rate(hw, &tmp_req);
+ if (r < 0) {
+ /*
+ * If we did not find a valid DPLL configuration, try again, but
+ * this time see if using the 4X multiplier can help. Enabling the
+ * 4X multiplier is equivalent to dividing the target rate by 4.
+ */
+ tmp_req.rate /= OMAP4430_REGM4XEN_MULT;
+ r = omap2_dpll_determine_rate(hw, &tmp_req);
+ if (r < 0)
+ return r;
+
+ dd->last_rounded_rate *= OMAP4430_REGM4XEN_MULT;
+ dd->last_rounded_m4xen = 1;
+ }
+
+ omap4_dpll_lpmode_recalc(dd);
+
+ req->rate = dd->last_rounded_rate;
req->best_parent_hw = dd->clk_ref;
}
diff --git a/drivers/clk/ti/fapll.c b/drivers/clk/ti/fapll.c
index 2db3fc4a443e..4f28138d2d8a 100644
--- a/drivers/clk/ti/fapll.c
+++ b/drivers/clk/ti/fapll.c
@@ -214,24 +214,27 @@ static int ti_fapll_set_div_mult(unsigned long rate,
return 0;
}
-static long ti_fapll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int ti_fapll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
u32 pre_div_p, mult_n;
int error;
- if (!rate)
+ if (!req->rate)
return -EINVAL;
- error = ti_fapll_set_div_mult(rate, *parent_rate,
+ error = ti_fapll_set_div_mult(req->rate, req->best_parent_rate,
&pre_div_p, &mult_n);
- if (error)
- return error;
+ if (error) {
+ req->rate = error;
- rate = *parent_rate / pre_div_p;
- rate *= mult_n;
+ return 0;
+ }
- return rate;
+ req->rate = req->best_parent_rate / pre_div_p;
+ req->rate *= mult_n;
+
+ return 0;
}
static int ti_fapll_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -268,7 +271,7 @@ static const struct clk_ops ti_fapll_ops = {
.is_enabled = ti_fapll_is_enabled,
.recalc_rate = ti_fapll_recalc_rate,
.get_parent = ti_fapll_get_parent,
- .round_rate = ti_fapll_round_rate,
+ .determine_rate = ti_fapll_determine_rate,
.set_rate = ti_fapll_set_rate,
};
@@ -399,14 +402,14 @@ static u32 ti_fapll_synth_set_frac_rate(struct fapll_synth *synth,
return post_div_m;
}
-static long ti_fapll_synth_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int ti_fapll_synth_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct fapll_synth *synth = to_synth(hw);
struct fapll_data *fd = synth->fd;
unsigned long r;
- if (ti_fapll_clock_is_bypass(fd) || !synth->div || !rate)
+ if (ti_fapll_clock_is_bypass(fd) || !synth->div || !req->rate)
return -EINVAL;
/* Only post divider m available with no fractional divider? */
@@ -414,23 +417,26 @@ static long ti_fapll_synth_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long frac_rate;
u32 synth_post_div_m;
- frac_rate = ti_fapll_synth_get_frac_rate(hw, *parent_rate);
- synth_post_div_m = DIV_ROUND_UP(frac_rate, rate);
+ frac_rate = ti_fapll_synth_get_frac_rate(hw,
+ req->best_parent_rate);
+ synth_post_div_m = DIV_ROUND_UP(frac_rate, req->rate);
r = DIV_ROUND_UP(frac_rate, synth_post_div_m);
goto out;
}
- r = *parent_rate * SYNTH_PHASE_K;
- if (rate > r)
+ r = req->best_parent_rate * SYNTH_PHASE_K;
+ if (req->rate > r)
goto out;
r = DIV_ROUND_UP_ULL(r, SYNTH_MAX_INT_DIV * SYNTH_MAX_DIV_M);
- if (rate < r)
+ if (req->rate < r)
goto out;
- r = rate;
+ r = req->rate;
out:
- return r;
+ req->rate = r;
+
+ return 0;
}
static int ti_fapll_synth_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -477,7 +483,7 @@ static const struct clk_ops ti_fapll_synt_ops = {
.disable = ti_fapll_synth_disable,
.is_enabled = ti_fapll_synth_is_enabled,
.recalc_rate = ti_fapll_synth_recalc_rate,
- .round_rate = ti_fapll_synth_round_rate,
+ .determine_rate = ti_fapll_synth_determine_rate,
.set_rate = ti_fapll_synth_set_rate,
};
diff --git a/drivers/clk/ti/mux.c b/drivers/clk/ti/mux.c
index f684fc306ecc..d6d247ff2be5 100644
--- a/drivers/clk/ti/mux.c
+++ b/drivers/clk/ti/mux.c
@@ -84,7 +84,7 @@ static int ti_clk_mux_set_parent(struct clk_hw *hw, u8 index)
}
/**
- * clk_mux_save_context - Save the parent selcted in the mux
+ * clk_mux_save_context - Save the parent selected in the mux
* @hw: pointer struct clk_hw
*
* Save the parent mux value.
diff --git a/drivers/clk/ux500/clk-prcmu.c b/drivers/clk/ux500/clk-prcmu.c
index 5cbf24c94606..f775e18acd46 100644
--- a/drivers/clk/ux500/clk-prcmu.c
+++ b/drivers/clk/ux500/clk-prcmu.c
@@ -53,11 +53,13 @@ static unsigned long clk_prcmu_recalc_rate(struct clk_hw *hw,
return prcmu_clock_rate(clk->cg_sel);
}
-static long clk_prcmu_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_prcmu_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_prcmu *clk = to_clk_prcmu(hw);
- return prcmu_round_clock_rate(clk->cg_sel, rate);
+ req->rate = prcmu_round_clock_rate(clk->cg_sel, req->rate);
+
+ return 0;
}
static int clk_prcmu_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -157,7 +159,7 @@ static const struct clk_ops clk_prcmu_scalable_ops = {
.prepare = clk_prcmu_prepare,
.unprepare = clk_prcmu_unprepare,
.recalc_rate = clk_prcmu_recalc_rate,
- .round_rate = clk_prcmu_round_rate,
+ .determine_rate = clk_prcmu_determine_rate,
.set_rate = clk_prcmu_set_rate,
};
@@ -169,7 +171,7 @@ static const struct clk_ops clk_prcmu_gate_ops = {
static const struct clk_ops clk_prcmu_scalable_rate_ops = {
.recalc_rate = clk_prcmu_recalc_rate,
- .round_rate = clk_prcmu_round_rate,
+ .determine_rate = clk_prcmu_determine_rate,
.set_rate = clk_prcmu_set_rate,
};
@@ -187,7 +189,7 @@ static const struct clk_ops clk_prcmu_opp_volt_scalable_ops = {
.prepare = clk_prcmu_opp_volt_prepare,
.unprepare = clk_prcmu_opp_volt_unprepare,
.recalc_rate = clk_prcmu_recalc_rate,
- .round_rate = clk_prcmu_round_rate,
+ .determine_rate = clk_prcmu_determine_rate,
.set_rate = clk_prcmu_set_rate,
};
diff --git a/drivers/clk/versatile/clk-icst.c b/drivers/clk/versatile/clk-icst.c
index d5cb372f0901..86ca04ad9fab 100644
--- a/drivers/clk/versatile/clk-icst.c
+++ b/drivers/clk/versatile/clk-icst.c
@@ -194,7 +194,7 @@ static int vco_set(struct clk_icst *icst, struct icst_vco vco)
pr_err("ICST error: tried to use RDW != 22\n");
break;
default:
- /* Regular auxilary oscillator */
+ /* Regular auxiliary oscillator */
mask = VERSATILE_AUX_OSC_BITS;
val = vco.v | (vco.r << 9) | (vco.s << 16);
break;
@@ -234,39 +234,51 @@ static unsigned long icst_recalc_rate(struct clk_hw *hw,
return icst->rate;
}
-static long icst_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int icst_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_icst *icst = to_icst(hw);
struct icst_vco vco;
if (icst->ctype == ICST_INTEGRATOR_AP_CM ||
icst->ctype == ICST_INTEGRATOR_CP_CM_CORE) {
- if (rate <= 12000000)
- return 12000000;
- if (rate >= 160000000)
- return 160000000;
- /* Slam to closest megahertz */
- return DIV_ROUND_CLOSEST(rate, 1000000) * 1000000;
+ if (req->rate <= 12000000)
+ req->rate = 12000000;
+ else if (req->rate >= 160000000)
+ req->rate = 160000000;
+ else {
+ /* Slam to closest megahertz */
+ req->rate = DIV_ROUND_CLOSEST(req->rate, 1000000) * 1000000;
+ }
+
+ return 0;
}
if (icst->ctype == ICST_INTEGRATOR_CP_CM_MEM) {
- if (rate <= 6000000)
- return 6000000;
- if (rate >= 66000000)
- return 66000000;
- /* Slam to closest 0.5 megahertz */
- return DIV_ROUND_CLOSEST(rate, 500000) * 500000;
+ if (req->rate <= 6000000)
+ req->rate = 6000000;
+ else if (req->rate >= 66000000)
+ req->rate = 66000000;
+ else {
+ /* Slam to closest 0.5 megahertz */
+ req->rate = DIV_ROUND_CLOSEST(req->rate, 500000) * 500000;
+ }
+
+ return 0;
}
if (icst->ctype == ICST_INTEGRATOR_AP_SYS) {
/* Divides between 3 and 50 MHz in steps of 0.25 MHz */
- if (rate <= 3000000)
- return 3000000;
- if (rate >= 50000000)
- return 5000000;
- /* Slam to closest 0.25 MHz */
- return DIV_ROUND_CLOSEST(rate, 250000) * 250000;
+ if (req->rate <= 3000000)
+ req->rate = 3000000;
+ else if (req->rate >= 50000000)
+ req->rate = 5000000;
+ else {
+ /* Slam to closest 0.25 MHz */
+ req->rate = DIV_ROUND_CLOSEST(req->rate, 250000) * 250000;
+ }
+
+ return 0;
}
if (icst->ctype == ICST_INTEGRATOR_AP_PCI) {
@@ -274,14 +286,20 @@ static long icst_round_rate(struct clk_hw *hw, unsigned long rate,
* If we're below or less than halfway from 25 to 33 MHz
* select 25 MHz
*/
- if (rate <= 25000000 || rate < 29000000)
- return 25000000;
- /* Else just return the default frequency */
- return 33000000;
+ if (req->rate <= 25000000 || req->rate < 29000000)
+ req->rate = 25000000;
+ else {
+ /* Else just return the default frequency */
+ req->rate = 33000000;
+ }
+
+ return 0;
}
- vco = icst_hz_to_vco(icst->params, rate);
- return icst_hz(icst->params, vco);
+ vco = icst_hz_to_vco(icst->params, req->rate);
+ req->rate = icst_hz(icst->params, vco);
+
+ return 0;
}
static int icst_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -329,7 +347,7 @@ static int icst_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops icst_ops = {
.recalc_rate = icst_recalc_rate,
- .round_rate = icst_round_rate,
+ .determine_rate = icst_determine_rate,
.set_rate = icst_set_rate,
};
diff --git a/drivers/clk/versatile/clk-vexpress-osc.c b/drivers/clk/versatile/clk-vexpress-osc.c
index c385ca2f4a74..9adbf5c33bd1 100644
--- a/drivers/clk/versatile/clk-vexpress-osc.c
+++ b/drivers/clk/versatile/clk-vexpress-osc.c
@@ -33,18 +33,18 @@ static unsigned long vexpress_osc_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long vexpress_osc_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int vexpress_osc_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct vexpress_osc *osc = to_vexpress_osc(hw);
- if (osc->rate_min && rate < osc->rate_min)
- rate = osc->rate_min;
+ if (osc->rate_min && req->rate < osc->rate_min)
+ req->rate = osc->rate_min;
- if (osc->rate_max && rate > osc->rate_max)
- rate = osc->rate_max;
+ if (osc->rate_max && req->rate > osc->rate_max)
+ req->rate = osc->rate_max;
- return rate;
+ return 0;
}
static int vexpress_osc_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -57,7 +57,7 @@ static int vexpress_osc_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops vexpress_osc_ops = {
.recalc_rate = vexpress_osc_recalc_rate,
- .round_rate = vexpress_osc_round_rate,
+ .determine_rate = vexpress_osc_determine_rate,
.set_rate = vexpress_osc_set_rate,
};
diff --git a/drivers/clk/visconti/clkc-tmpv770x.c b/drivers/clk/visconti/clkc-tmpv770x.c
index 6c753b2cb558..1e2e8d6437fe 100644
--- a/drivers/clk/visconti/clkc-tmpv770x.c
+++ b/drivers/clk/visconti/clkc-tmpv770x.c
@@ -17,6 +17,10 @@
#include "clkc.h"
#include "reset.h"
+/* Must be equal to the last clock/reset ID increased by one */
+#define CLKS_NR (TMPV770X_CLK_VIIFBS1_PROC + 1)
+#define RESETS_NR (TMPV770X_RESET_VIIFBS1_L1ISP + 1)
+
static DEFINE_SPINLOCK(tmpv770x_clk_lock);
static DEFINE_SPINLOCK(tmpv770x_rst_lock);
@@ -28,6 +32,10 @@ static const struct clk_parent_data pietherplls_parent_data[] = {
{ .fw_name = "pietherpll", .name = "pietherpll", },
};
+static const struct clk_parent_data pidnnplls_parent_data[] = {
+ { .fw_name = "pidnnpll", .name = "pidnnpll", },
+};
+
static const struct visconti_fixed_clk fixed_clk_tables[] = {
/* PLL1 */
/* PICMPT0/1, PITSC, PIUWDT, PISWDT, PISBUS, PIPMU, PIGPMU, PITMU */
@@ -64,6 +72,41 @@ static const struct visconti_clk_gate_table pietherpll_clk_gate_tables[] = {
TMPV770X_RESET_PIETHER_125M, },
};
+static const struct visconti_clk_gate_table pidnnpll_clk_gate_tables[] = {
+ { TMPV770X_CLK_VIIFBS0, "viifbs0",
+ pidnnplls_parent_data, ARRAY_SIZE(pidnnplls_parent_data),
+ 0, 0x58, 0x158, 1, 1,
+ NO_RESET, },
+ { TMPV770X_CLK_VIIFBS0_PROC, "viifbs0_proc",
+ pidnnplls_parent_data, ARRAY_SIZE(pidnnplls_parent_data),
+ 0, 0x58, 0x158, 18, 1,
+ NO_RESET, },
+ { TMPV770X_CLK_VIIFBS0_L1ISP, "viifbs0_l1isp",
+ pidnnplls_parent_data, ARRAY_SIZE(pidnnplls_parent_data),
+ 0, 0x58, 0x158, 17, 1,
+ NO_RESET, },
+ { TMPV770X_CLK_VIIFBS0_L2ISP, "viifbs0_l2isp",
+ pidnnplls_parent_data, ARRAY_SIZE(pidnnplls_parent_data),
+ 0, 0x58, 0x158, 16, 1,
+ NO_RESET, },
+ { TMPV770X_CLK_VIIFBS1, "viifbs1",
+ pidnnplls_parent_data, ARRAY_SIZE(pidnnplls_parent_data),
+ 0, 0x58, 0x158, 5, 1,
+ NO_RESET, },
+ { TMPV770X_CLK_VIIFBS1_PROC, "viifbs1_proc",
+ pidnnplls_parent_data, ARRAY_SIZE(pidnnplls_parent_data),
+ 0, 0x58, 0x158, 22, 1,
+ NO_RESET, },
+ { TMPV770X_CLK_VIIFBS1_L1ISP, "viifbs1_l1isp",
+ pidnnplls_parent_data, ARRAY_SIZE(pidnnplls_parent_data),
+ 0, 0x58, 0x158, 21, 1,
+ NO_RESET, },
+ { TMPV770X_CLK_VIIFBS1_L2ISP, "viifbs1_l2isp",
+ pidnnplls_parent_data, ARRAY_SIZE(pidnnplls_parent_data),
+ 0, 0x58, 0x158, 20, 1,
+ NO_RESET, },
+};
+
static const struct visconti_clk_gate_table clk_gate_tables[] = {
{ TMPV770X_CLK_HOX, "hox",
clks_parent_data, ARRAY_SIZE(clks_parent_data),
@@ -185,6 +228,22 @@ static const struct visconti_clk_gate_table clk_gate_tables[] = {
clks_parent_data, ARRAY_SIZE(clks_parent_data),
0, 0x14, 0x114, 0, 4,
TMPV770X_RESET_SBUSCLK, },
+ { TMPV770X_CLK_VIIF0_CFGCLK, "csi2rx0cfg",
+ clks_parent_data, ARRAY_SIZE(clks_parent_data),
+ 0, 0x58, 0x158, 0, 24,
+ NO_RESET, },
+ { TMPV770X_CLK_VIIF0_APBCLK, "csi2rx0apb",
+ clks_parent_data, ARRAY_SIZE(clks_parent_data),
+ 0, 0x58, 0x158, 2, 4,
+ NO_RESET, },
+ { TMPV770X_CLK_VIIF1_CFGCLK, "csi2rx1cfg",
+ clks_parent_data, ARRAY_SIZE(clks_parent_data),
+ 0, 0x58, 0x158, 4, 24,
+ NO_RESET, },
+ { TMPV770X_CLK_VIIF1_APBCLK, "csi2rx1apb",
+ clks_parent_data, ARRAY_SIZE(clks_parent_data),
+ 0, 0x58, 0x158, 6, 4,
+ NO_RESET, },
};
static const struct visconti_reset_data clk_reset_data[] = {
@@ -220,6 +279,14 @@ static const struct visconti_reset_data clk_reset_data[] = {
[TMPV770X_RESET_PIPCMIF] = { 0x464, 0x564, 0, },
[TMPV770X_RESET_PICKMON] = { 0x410, 0x510, 8, },
[TMPV770X_RESET_SBUSCLK] = { 0x414, 0x514, 0, },
+ [TMPV770X_RESET_VIIFBS0] = { 0x458, 0x558, 0, },
+ [TMPV770X_RESET_VIIFBS0_APB] = { 0x458, 0x558, 1, },
+ [TMPV770X_RESET_VIIFBS0_L2ISP] = { 0x458, 0x558, 16, },
+ [TMPV770X_RESET_VIIFBS0_L1ISP] = { 0x458, 0x558, 17, },
+ [TMPV770X_RESET_VIIFBS1] = { 0x458, 0x558, 4, },
+ [TMPV770X_RESET_VIIFBS1_APB] = { 0x458, 0x558, 5, },
+ [TMPV770X_RESET_VIIFBS1_L2ISP] = { 0x458, 0x558, 20, },
+ [TMPV770X_RESET_VIIFBS1_L1ISP] = { 0x458, 0x558, 21, },
};
static int visconti_clk_probe(struct platform_device *pdev)
@@ -234,12 +301,12 @@ static int visconti_clk_probe(struct platform_device *pdev)
if (IS_ERR(regmap))
return PTR_ERR(regmap);
- ctx = visconti_init_clk(dev, regmap, TMPV770X_NR_CLK);
+ ctx = visconti_init_clk(dev, regmap, CLKS_NR);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
ret = visconti_register_reset_controller(dev, regmap, clk_reset_data,
- TMPV770X_NR_RESET,
+ RESETS_NR,
&visconti_reset_ops,
&tmpv770x_rst_lock);
if (ret) {
@@ -272,6 +339,14 @@ static int visconti_clk_probe(struct platform_device *pdev)
return ret;
}
+ ret = visconti_clk_register_gates(ctx, pidnnpll_clk_gate_tables,
+ ARRAY_SIZE(pidnnpll_clk_gate_tables),
+ clk_reset_data, &tmpv770x_clk_lock);
+ if (ret) {
+ dev_err(dev, "Failed to register pidnnpll clock gate: %d\n", ret);
+ return ret;
+ }
+
return of_clk_add_hw_provider(np, of_clk_hw_onecell_get, &ctx->clk_data);
}
diff --git a/drivers/clk/visconti/pll-tmpv770x.c b/drivers/clk/visconti/pll-tmpv770x.c
index 8360ccf88867..a2208c5fc12e 100644
--- a/drivers/clk/visconti/pll-tmpv770x.c
+++ b/drivers/clk/visconti/pll-tmpv770x.c
@@ -16,6 +16,9 @@
#include "pll.h"
+/* Must be equal to the last pll ID increased by one */
+#define PLLS_NR (TMPV770X_PLL_PIIMGERPLL + 1)
+
static DEFINE_SPINLOCK(tmpv770x_pll_lock);
static const struct visconti_pll_rate_table pipll0_rates[] __initconst = {
@@ -66,7 +69,7 @@ static void __init tmpv770x_setup_plls(struct device_node *np)
if (!reg_base)
return;
- ctx = visconti_init_pll(np, reg_base, TMPV770X_NR_PLL);
+ ctx = visconti_init_pll(np, reg_base, PLLS_NR);
if (IS_ERR(ctx)) {
iounmap(reg_base);
return;
diff --git a/drivers/clk/visconti/pll.c b/drivers/clk/visconti/pll.c
index 3f929cf8dd2f..681721d85032 100644
--- a/drivers/clk/visconti/pll.c
+++ b/drivers/clk/visconti/pll.c
@@ -100,20 +100,25 @@ static unsigned long visconti_get_pll_rate_from_data(struct visconti_pll *pll,
return rate_table[0].rate;
}
-static long visconti_pll_round_rate(struct clk_hw *hw,
- unsigned long rate, unsigned long *prate)
+static int visconti_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct visconti_pll *pll = to_visconti_pll(hw);
const struct visconti_pll_rate_table *rate_table = pll->rate_table;
int i;
- /* Assumming rate_table is in descending order */
+ /* Assuming rate_table is in descending order */
for (i = 0; i < pll->rate_count; i++)
- if (rate >= rate_table[i].rate)
- return rate_table[i].rate;
+ if (req->rate >= rate_table[i].rate) {
+ req->rate = rate_table[i].rate;
+
+ return 0;
+ }
/* return minimum supported value */
- return rate_table[i - 1].rate;
+ req->rate = rate_table[i - 1].rate;
+
+ return 0;
}
static unsigned long visconti_pll_recalc_rate(struct clk_hw *hw,
@@ -232,7 +237,7 @@ static const struct clk_ops visconti_pll_ops = {
.enable = visconti_pll_enable,
.disable = visconti_pll_disable,
.is_enabled = visconti_pll_is_enabled,
- .round_rate = visconti_pll_round_rate,
+ .determine_rate = visconti_pll_determine_rate,
.recalc_rate = visconti_pll_recalc_rate,
.set_rate = visconti_pll_set_rate,
};
diff --git a/drivers/clk/x86/clk-cgu.c b/drivers/clk/x86/clk-cgu.c
index 89b53f280aee..d099667355f8 100644
--- a/drivers/clk/x86/clk-cgu.c
+++ b/drivers/clk/x86/clk-cgu.c
@@ -132,14 +132,15 @@ lgm_clk_divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
divider->flags, divider->width);
}
-static long
-lgm_clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int lgm_clk_divider_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct lgm_clk_divider *divider = to_lgm_clk_divider(hw);
- return divider_round_rate(hw, rate, prate, divider->table,
- divider->width, divider->flags);
+ req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate, divider->table,
+ divider->width, divider->flags);
+
+ return 0;
}
static int
@@ -182,7 +183,7 @@ static void lgm_clk_divider_disable(struct clk_hw *hw)
static const struct clk_ops lgm_clk_divider_ops = {
.recalc_rate = lgm_clk_divider_recalc_rate,
- .round_rate = lgm_clk_divider_round_rate,
+ .determine_rate = lgm_clk_divider_determine_rate,
.set_rate = lgm_clk_divider_set_rate,
.enable = lgm_clk_divider_enable,
.disable = lgm_clk_divider_disable,
@@ -487,15 +488,14 @@ lgm_clk_ddiv_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
-static long
-lgm_clk_ddiv_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int lgm_clk_ddiv_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
u32 div, ddiv1, ddiv2;
u64 rate64;
- div = DIV_ROUND_CLOSEST_ULL((u64)*prate, rate);
+ div = DIV_ROUND_CLOSEST_ULL((u64)req->best_parent_rate, req->rate);
/* if predivide bit is enabled, modify div by factor of 2.5 */
if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) {
@@ -503,14 +503,17 @@ lgm_clk_ddiv_round_rate(struct clk_hw *hw, unsigned long rate,
div = DIV_ROUND_CLOSEST_ULL((u64)div, 5);
}
- if (div <= 0)
- return *prate;
+ if (div <= 0) {
+ req->rate = req->best_parent_rate;
+
+ return 0;
+ }
if (lgm_clk_get_ddiv_val(div, &ddiv1, &ddiv2) != 0)
if (lgm_clk_get_ddiv_val(div + 1, &ddiv1, &ddiv2) != 0)
return -EINVAL;
- rate64 = *prate;
+ rate64 = req->best_parent_rate;
do_div(rate64, ddiv1);
do_div(rate64, ddiv2);
@@ -520,7 +523,9 @@ lgm_clk_ddiv_round_rate(struct clk_hw *hw, unsigned long rate,
rate64 = DIV_ROUND_CLOSEST_ULL(rate64, 5);
}
- return rate64;
+ req->rate = rate64;
+
+ return 0;
}
static const struct clk_ops lgm_clk_ddiv_ops = {
@@ -528,7 +533,7 @@ static const struct clk_ops lgm_clk_ddiv_ops = {
.enable = lgm_clk_ddiv_enable,
.disable = lgm_clk_ddiv_disable,
.set_rate = lgm_clk_ddiv_set_rate,
- .round_rate = lgm_clk_ddiv_round_rate,
+ .determine_rate = lgm_clk_ddiv_determine_rate,
};
int lgm_clk_register_ddiv(struct lgm_clk_provider *ctx,
diff --git a/drivers/clk/xilinx/clk-xlnx-clock-wizard.c b/drivers/clk/xilinx/clk-xlnx-clock-wizard.c
index bbf7714480e7..4a0136349f71 100644
--- a/drivers/clk/xilinx/clk-xlnx-clock-wizard.c
+++ b/drivers/clk/xilinx/clk-xlnx-clock-wizard.c
@@ -322,8 +322,8 @@ err_reconfig:
return err;
}
-static long clk_wzrd_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_wzrd_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
u8 div;
@@ -331,16 +331,18 @@ static long clk_wzrd_round_rate(struct clk_hw *hw, unsigned long rate,
* since we don't change parent rate we just round rate to closest
* achievable
*/
- div = DIV_ROUND_CLOSEST(*prate, rate);
+ div = DIV_ROUND_CLOSEST(req->best_parent_rate, req->rate);
- return *prate / div;
+ req->rate = req->best_parent_rate / div;
+
+ return 0;
}
static int clk_wzrd_get_divisors_ver(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
- u64 vco_freq, freq, diff, vcomin, vcomax;
+ u64 vco_freq, freq, diff, vcomin, vcomax, best_diff = -1ULL;
u32 m, d, o;
u32 mmin, mmax, dmin, dmax, omin, omax;
@@ -356,22 +358,26 @@ static int clk_wzrd_get_divisors_ver(struct clk_hw *hw, unsigned long rate,
for (m = mmin; m <= mmax; m++) {
for (d = dmin; d <= dmax; d++) {
vco_freq = DIV_ROUND_CLOSEST((parent_rate * m), d);
- if (vco_freq >= vcomin && vco_freq <= vcomax) {
- for (o = omin; o <= omax; o++) {
- freq = DIV_ROUND_CLOSEST_ULL(vco_freq, o);
- diff = abs(freq - rate);
-
- if (diff < WZRD_MIN_ERR) {
- divider->m = m;
- divider->d = d;
- divider->o = o;
- return 0;
- }
- }
+ if (vco_freq < vcomin || vco_freq > vcomax)
+ continue;
+
+ o = DIV_ROUND_CLOSEST_ULL(vco_freq, rate);
+ if (o < omin || o > omax)
+ continue;
+ freq = DIV_ROUND_CLOSEST_ULL(vco_freq, o);
+ diff = abs(freq - rate);
+
+ if (diff < best_diff) {
+ best_diff = diff;
+ divider->m = m;
+ divider->d = d;
+ divider->o = o;
+ if (!diff)
+ return 0;
}
}
}
- return -EBUSY;
+ return 0;
}
static int clk_wzrd_get_divisors(struct clk_hw *hw, unsigned long rate,
@@ -642,14 +648,14 @@ static unsigned long clk_wzrd_recalc_rate_all_ver(struct clk_hw *hw,
divider->flags, divider->width);
}
-static long clk_wzrd_round_rate_all(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_wzrd_determine_rate_all(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
u32 m, d, o;
int err;
- err = clk_wzrd_get_divisors(hw, rate, *prate);
+ err = clk_wzrd_get_divisors(hw, req->rate, req->best_parent_rate);
if (err)
return err;
@@ -657,19 +663,20 @@ static long clk_wzrd_round_rate_all(struct clk_hw *hw, unsigned long rate,
d = divider->d;
o = divider->o;
- rate = div_u64(*prate * (m * 1000 + divider->m_frac), d * (o * 1000 + divider->o_frac));
- return rate;
+ req->rate = div_u64(req->best_parent_rate * (m * 1000 + divider->m_frac),
+ d * (o * 1000 + divider->o_frac));
+ return 0;
}
-static long clk_wzrd_ver_round_rate_all(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_wzrd_ver_determine_rate_all(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
unsigned long int_freq;
u32 m, d, o, div, f;
int err;
- err = clk_wzrd_get_divisors(hw, rate, *prate);
+ err = clk_wzrd_get_divisors_ver(hw, req->rate, req->best_parent_rate);
if (err)
return err;
@@ -678,36 +685,38 @@ static long clk_wzrd_ver_round_rate_all(struct clk_hw *hw, unsigned long rate,
o = divider->o;
div = d * o;
- int_freq = divider_recalc_rate(hw, *prate * m, div, divider->table,
+ int_freq = divider_recalc_rate(hw, req->best_parent_rate * m, div,
+ divider->table,
divider->flags, divider->width);
- if (rate > int_freq) {
- f = DIV_ROUND_CLOSEST_ULL(rate * WZRD_FRAC_POINTS, int_freq);
- rate = DIV_ROUND_CLOSEST(int_freq * f, WZRD_FRAC_POINTS);
+ if (req->rate > int_freq) {
+ f = DIV_ROUND_CLOSEST_ULL(req->rate * WZRD_FRAC_POINTS,
+ int_freq);
+ req->rate = DIV_ROUND_CLOSEST(int_freq * f, WZRD_FRAC_POINTS);
}
- return rate;
+ return 0;
}
static const struct clk_ops clk_wzrd_ver_divider_ops = {
- .round_rate = clk_wzrd_round_rate,
+ .determine_rate = clk_wzrd_determine_rate,
.set_rate = clk_wzrd_ver_dynamic_reconfig,
.recalc_rate = clk_wzrd_recalc_rate_ver,
};
static const struct clk_ops clk_wzrd_ver_div_all_ops = {
- .round_rate = clk_wzrd_ver_round_rate_all,
+ .determine_rate = clk_wzrd_ver_determine_rate_all,
.set_rate = clk_wzrd_dynamic_all_ver,
.recalc_rate = clk_wzrd_recalc_rate_all_ver,
};
static const struct clk_ops clk_wzrd_clk_divider_ops = {
- .round_rate = clk_wzrd_round_rate,
+ .determine_rate = clk_wzrd_determine_rate,
.set_rate = clk_wzrd_dynamic_reconfig,
.recalc_rate = clk_wzrd_recalc_rate,
};
static const struct clk_ops clk_wzrd_clk_div_all_ops = {
- .round_rate = clk_wzrd_round_rate_all,
+ .determine_rate = clk_wzrd_determine_rate_all,
.set_rate = clk_wzrd_dynamic_all,
.recalc_rate = clk_wzrd_recalc_rate_all,
};
@@ -769,14 +778,14 @@ static int clk_wzrd_dynamic_reconfig_f(struct clk_hw *hw, unsigned long rate,
WZRD_USEC_POLL, WZRD_TIMEOUT_POLL);
}
-static long clk_wzrd_round_rate_f(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_wzrd_determine_rate_f(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- return rate;
+ return 0;
}
static const struct clk_ops clk_wzrd_clk_divider_ops_f = {
- .round_rate = clk_wzrd_round_rate_f,
+ .determine_rate = clk_wzrd_determine_rate_f,
.set_rate = clk_wzrd_dynamic_reconfig_f,
.recalc_rate = clk_wzrd_recalc_ratef,
};
@@ -1108,7 +1117,7 @@ static int clk_wzrd_register_output_clocks(struct device *dev, int nr_outputs)
(dev,
clkout_name, clk_name, 0,
clk_wzrd->base,
- (WZRD_CLK_CFG_REG(is_versal, 3) + i * 8),
+ (WZRD_CLK_CFG_REG(is_versal, 2) + i * 8),
WZRD_CLKOUT_DIVIDE_SHIFT,
WZRD_CLKOUT_DIVIDE_WIDTH,
CLK_DIVIDER_ONE_BASED |
diff --git a/drivers/clk/xilinx/xlnx_vcu.c b/drivers/clk/xilinx/xlnx_vcu.c
index 81501b48412e..02699bc0f82c 100644
--- a/drivers/clk/xilinx/xlnx_vcu.c
+++ b/drivers/clk/xilinx/xlnx_vcu.c
@@ -11,6 +11,7 @@
#include <linux/clk-provider.h>
#include <linux/device.h>
#include <linux/errno.h>
+#include <linux/gpio/consumer.h>
#include <linux/io.h>
#include <linux/mfd/syscon.h>
#include <linux/mfd/syscon/xlnx-vcu.h>
@@ -51,6 +52,7 @@
* @dev: Platform device
* @pll_ref: pll ref clock source
* @aclk: axi clock source
+ * @reset_gpio: vcu reset gpio
* @logicore_reg_ba: logicore reg base address
* @vcu_slcr_ba: vcu_slcr Register base address
* @pll: handle for the VCU PLL
@@ -61,6 +63,7 @@ struct xvcu_device {
struct device *dev;
struct clk *pll_ref;
struct clk *aclk;
+ struct gpio_desc *reset_gpio;
struct regmap *logicore_reg_ba;
void __iomem *vcu_slcr_ba;
struct clk_hw *pll;
@@ -308,18 +311,21 @@ static int xvcu_pll_set_div(struct vcu_pll *pll, int div)
return 0;
}
-static long xvcu_pll_round_rate(struct clk_hw *hw,
- unsigned long rate, unsigned long *parent_rate)
+static int xvcu_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct vcu_pll *pll = to_vcu_pll(hw);
unsigned int feedback_div;
- rate = clamp_t(unsigned long, rate, pll->fvco_min, pll->fvco_max);
+ req->rate = clamp_t(unsigned long, req->rate, pll->fvco_min,
+ pll->fvco_max);
- feedback_div = DIV_ROUND_CLOSEST_ULL(rate, *parent_rate);
+ feedback_div = DIV_ROUND_CLOSEST_ULL(req->rate, req->best_parent_rate);
feedback_div = clamp_t(unsigned int, feedback_div, 25, 125);
- return *parent_rate * feedback_div;
+ req->rate = req->best_parent_rate * feedback_div;
+
+ return 0;
}
static unsigned long xvcu_pll_recalc_rate(struct clk_hw *hw,
@@ -391,7 +397,7 @@ static void xvcu_pll_disable(struct clk_hw *hw)
static const struct clk_ops vcu_pll_ops = {
.enable = xvcu_pll_enable,
.disable = xvcu_pll_disable,
- .round_rate = xvcu_pll_round_rate,
+ .determine_rate = xvcu_pll_determine_rate,
.recalc_rate = xvcu_pll_recalc_rate,
.set_rate = xvcu_pll_set_rate,
};
@@ -587,8 +593,8 @@ static void xvcu_unregister_clock_provider(struct xvcu_device *xvcu)
xvcu_clk_hw_unregister_leaf(hws[CLK_XVCU_ENC_MCU]);
if (!IS_ERR_OR_NULL(hws[CLK_XVCU_ENC_CORE]))
xvcu_clk_hw_unregister_leaf(hws[CLK_XVCU_ENC_CORE]);
-
- clk_hw_unregister_fixed_factor(xvcu->pll_post);
+ if (!IS_ERR_OR_NULL(xvcu->pll_post))
+ clk_hw_unregister_fixed_factor(xvcu->pll_post);
}
/**
@@ -676,6 +682,24 @@ static int xvcu_probe(struct platform_device *pdev)
* Bit 0 : Gasket isolation
* Bit 1 : put VCU out of reset
*/
+ xvcu->reset_gpio = devm_gpiod_get_optional(&pdev->dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(xvcu->reset_gpio)) {
+ ret = PTR_ERR(xvcu->reset_gpio);
+ dev_err_probe(&pdev->dev, ret, "failed to get reset gpio for vcu.\n");
+ goto error_get_gpio;
+ }
+
+ if (xvcu->reset_gpio) {
+ gpiod_set_value(xvcu->reset_gpio, 0);
+ /* min 2 clock cycle of vcu pll_ref, slowest freq is 33.33KHz */
+ usleep_range(60, 120);
+ gpiod_set_value(xvcu->reset_gpio, 1);
+ usleep_range(60, 120);
+ } else {
+ dev_dbg(&pdev->dev, "No reset gpio info found in dts for VCU. This may result in incorrect functionality if VCU isolation is removed after initialization in designs where the VCU reset is driven by gpio.\n");
+ }
+
regmap_write(xvcu->logicore_reg_ba, VCU_GASKET_INIT, VCU_GASKET_VALUE);
ret = xvcu_register_clock_provider(xvcu);
@@ -690,6 +714,7 @@ static int xvcu_probe(struct platform_device *pdev)
error_clk_provider:
xvcu_unregister_clock_provider(xvcu);
+error_get_gpio:
clk_disable_unprepare(xvcu->aclk);
return ret;
}
@@ -711,6 +736,13 @@ static void xvcu_remove(struct platform_device *pdev)
xvcu_unregister_clock_provider(xvcu);
/* Add the Gasket isolation and put the VCU in reset. */
+ if (xvcu->reset_gpio) {
+ gpiod_set_value(xvcu->reset_gpio, 0);
+ /* min 2 clock cycle of vcu pll_ref, slowest freq is 33.33KHz */
+ usleep_range(60, 120);
+ gpiod_set_value(xvcu->reset_gpio, 1);
+ usleep_range(60, 120);
+ }
regmap_write(xvcu->logicore_reg_ba, VCU_GASKET_INIT, 0);
clk_disable_unprepare(xvcu->aclk);
diff --git a/drivers/clk/zynq/pll.c b/drivers/clk/zynq/pll.c
index e5f8fb704df2..5eca1c14981a 100644
--- a/drivers/clk/zynq/pll.c
+++ b/drivers/clk/zynq/pll.c
@@ -48,18 +48,20 @@ struct zynq_pll {
* @prate: Clock frequency of parent clock
* Return: frequency closest to @rate the hardware can generate.
*/
-static long zynq_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int zynq_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
u32 fbdiv;
- fbdiv = DIV_ROUND_CLOSEST(rate, *prate);
+ fbdiv = DIV_ROUND_CLOSEST(req->rate, req->best_parent_rate);
if (fbdiv < PLL_FBDIV_MIN)
fbdiv = PLL_FBDIV_MIN;
else if (fbdiv > PLL_FBDIV_MAX)
fbdiv = PLL_FBDIV_MAX;
- return *prate * fbdiv;
+ req->rate = req->best_parent_rate * fbdiv;
+
+ return 0;
}
/**
@@ -167,7 +169,7 @@ static const struct clk_ops zynq_pll_ops = {
.enable = zynq_pll_enable,
.disable = zynq_pll_disable,
.is_enabled = zynq_pll_is_enabled,
- .round_rate = zynq_pll_round_rate,
+ .determine_rate = zynq_pll_determine_rate,
.recalc_rate = zynq_pll_recalc_rate
};
diff --git a/drivers/clk/zynqmp/divider.c b/drivers/clk/zynqmp/divider.c
index 5a00487ae408..c824eeacd8eb 100644
--- a/drivers/clk/zynqmp/divider.c
+++ b/drivers/clk/zynqmp/divider.c
@@ -118,9 +118,8 @@ static unsigned long zynqmp_clk_divider_recalc_rate(struct clk_hw *hw,
*
* Return: 0 on success else error+reason
*/
-static long zynqmp_clk_divider_round_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long *prate)
+static int zynqmp_clk_divider_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct zynqmp_clk_divider *divider = to_zynqmp_clk_divider(hw);
const char *clk_name = clk_hw_get_name(hw);
@@ -145,17 +144,21 @@ static long zynqmp_clk_divider_round_rate(struct clk_hw *hw,
if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
bestdiv = 1 << bestdiv;
- return DIV_ROUND_UP_ULL((u64)*prate, bestdiv);
+ req->rate = DIV_ROUND_UP_ULL((u64)req->best_parent_rate, bestdiv);
+
+ return 0;
}
width = fls(divider->max_div);
- rate = divider_round_rate(hw, rate, prate, NULL, width, divider->flags);
+ req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate,
+ NULL, width, divider->flags);
- if (divider->is_frac && (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) && (rate % *prate))
- *prate = rate;
+ if (divider->is_frac && (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) &&
+ (req->rate % req->best_parent_rate))
+ req->best_parent_rate = req->rate;
- return rate;
+ return 0;
}
/**
@@ -199,13 +202,13 @@ static int zynqmp_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops zynqmp_clk_divider_ops = {
.recalc_rate = zynqmp_clk_divider_recalc_rate,
- .round_rate = zynqmp_clk_divider_round_rate,
+ .determine_rate = zynqmp_clk_divider_determine_rate,
.set_rate = zynqmp_clk_divider_set_rate,
};
static const struct clk_ops zynqmp_clk_divider_ro_ops = {
.recalc_rate = zynqmp_clk_divider_recalc_rate,
- .round_rate = zynqmp_clk_divider_round_rate,
+ .determine_rate = zynqmp_clk_divider_determine_rate,
};
/**
diff --git a/drivers/clk/zynqmp/pll.c b/drivers/clk/zynqmp/pll.c
index 7411a7fd50ac..630a3936c97c 100644
--- a/drivers/clk/zynqmp/pll.c
+++ b/drivers/clk/zynqmp/pll.c
@@ -98,29 +98,29 @@ static inline void zynqmp_pll_set_mode(struct clk_hw *hw, bool on)
*
* Return: Frequency closest to @rate the hardware can generate
*/
-static long zynqmp_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int zynqmp_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
u32 fbdiv;
u32 mult, div;
/* Let rate fall inside the range PS_PLL_VCO_MIN ~ PS_PLL_VCO_MAX */
- if (rate > PS_PLL_VCO_MAX) {
- div = DIV_ROUND_UP(rate, PS_PLL_VCO_MAX);
- rate = rate / div;
+ if (req->rate > PS_PLL_VCO_MAX) {
+ div = DIV_ROUND_UP(req->rate, PS_PLL_VCO_MAX);
+ req->rate = req->rate / div;
}
- if (rate < PS_PLL_VCO_MIN) {
- mult = DIV_ROUND_UP(PS_PLL_VCO_MIN, rate);
- rate = rate * mult;
+ if (req->rate < PS_PLL_VCO_MIN) {
+ mult = DIV_ROUND_UP(PS_PLL_VCO_MIN, req->rate);
+ req->rate = req->rate * mult;
}
- fbdiv = DIV_ROUND_CLOSEST(rate, *prate);
+ fbdiv = DIV_ROUND_CLOSEST(req->rate, req->best_parent_rate);
if (fbdiv < PLL_FBDIV_MIN || fbdiv > PLL_FBDIV_MAX) {
fbdiv = clamp_t(u32, fbdiv, PLL_FBDIV_MIN, PLL_FBDIV_MAX);
- rate = *prate * fbdiv;
+ req->rate = req->best_parent_rate * fbdiv;
}
- return rate;
+ return 0;
}
/**
@@ -294,7 +294,7 @@ static const struct clk_ops zynqmp_pll_ops = {
.enable = zynqmp_pll_enable,
.disable = zynqmp_pll_disable,
.is_enabled = zynqmp_pll_is_enabled,
- .round_rate = zynqmp_pll_round_rate,
+ .determine_rate = zynqmp_pll_determine_rate,
.recalc_rate = zynqmp_pll_recalc_rate,
.set_rate = zynqmp_pll_set_rate,
};
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 645f517a1ac2..aa59e5b13351 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -395,8 +395,7 @@ config ARM_GLOBAL_TIMER
config ARM_GT_INITIAL_PRESCALER_VAL
int "ARM global timer initial prescaler value"
- default 2 if ARCH_ZYNQ
- default 1
+ default 0
depends on ARM_GLOBAL_TIMER
help
When the ARM global timer initializes, its current rate is declared
@@ -406,6 +405,7 @@ config ARM_GT_INITIAL_PRESCALER_VAL
bounds about how much the parent clock is allowed to decrease or
increase wrt the initial clock value.
This affects CPU_FREQ max delta from the initial frequency.
+ Use 0 to use auto-detection in the driver.
config ARM_TIMER_SP804
bool "Support for Dual Timer SP804 module"
@@ -474,11 +474,14 @@ config FSL_FTM_TIMER
help
Support for Freescale FlexTimer Module (FTM) timer.
-config VF_PIT_TIMER
- bool
+config NXP_PIT_TIMER
+ bool "NXP Periodic Interrupt Timer" if COMPILE_TEST
select CLKSRC_MMIO
help
- Support for Periodic Interrupt Timer on Freescale Vybrid Family SoCs.
+ Support for Periodic Interrupt Timer on Freescale / NXP
+ SoCs. This periodic timer is found on the Vybrid Family and
+ the Automotive S32G2/3 platforms. It contains 4 channels
+ where two can be coupled to form a 64 bits channel.
config SYS_SUPPORTS_SH_CMT
bool
@@ -779,4 +782,15 @@ config NXP_STM_TIMER
Enables the support for NXP System Timer Module found in the
s32g NXP platform series.
+config RTK_SYSTIMER
+ bool "Realtek SYSTIMER support"
+ depends on ARM || ARM64
+ depends on ARCH_REALTEK || COMPILE_TEST
+ select TIMER_OF
+ help
+ This option enables the driver that registers the global 1 MHz hardware
+ counter as a clock event device on Realtek SoCs. Make sure to enable
+ this option only when building for a Realtek platform or for compilation
+ testing.
+
endmenu
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 205bf3b0a8f3..b46376af6b49 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -49,7 +49,7 @@ obj-$(CONFIG_CLKSRC_LPC32XX) += timer-lpc32xx.o
obj-$(CONFIG_CLKSRC_MPS2) += mps2-timer.o
obj-$(CONFIG_CLKSRC_SAMSUNG_PWM) += samsung_pwm_timer.o
obj-$(CONFIG_FSL_FTM_TIMER) += timer-fsl-ftm.o
-obj-$(CONFIG_VF_PIT_TIMER) += timer-vf-pit.o
+obj-$(CONFIG_NXP_PIT_TIMER) += timer-nxp-pit.o
obj-$(CONFIG_CLKSRC_QCOM) += timer-qcom.o
obj-$(CONFIG_MTK_TIMER) += timer-mediatek.o
obj-$(CONFIG_MTK_CPUX_TIMER) += timer-mediatek-cpux.o
@@ -64,6 +64,7 @@ obj-$(CONFIG_REALTEK_OTTO_TIMER) += timer-rtl-otto.o
obj-$(CONFIG_ARC_TIMERS) += arc_timer.o
obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o
+obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer_mmio.o
obj-$(CONFIG_ARM_GLOBAL_TIMER) += arm_global_timer.o
obj-$(CONFIG_ARMV7M_SYSTICK) += armv7m_systick.o
obj-$(CONFIG_ARM_TIMER_SP804) += timer-sp804.o
@@ -94,3 +95,4 @@ obj-$(CONFIG_CLKSRC_LOONGSON1_PWM) += timer-loongson1-pwm.o
obj-$(CONFIG_EP93XX_TIMER) += timer-ep93xx.o
obj-$(CONFIG_RALINK_TIMER) += timer-ralink.o
obj-$(CONFIG_NXP_STM_TIMER) += timer-nxp-stm.o
+obj-$(CONFIG_RTK_SYSTIMER) += timer-realtek.o
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 981a578043a5..90aeff44a276 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -34,42 +34,12 @@
#include <clocksource/arm_arch_timer.h>
-#define CNTTIDR 0x08
-#define CNTTIDR_VIRT(n) (BIT(1) << ((n) * 4))
-
-#define CNTACR(n) (0x40 + ((n) * 4))
-#define CNTACR_RPCT BIT(0)
-#define CNTACR_RVCT BIT(1)
-#define CNTACR_RFRQ BIT(2)
-#define CNTACR_RVOFF BIT(3)
-#define CNTACR_RWVT BIT(4)
-#define CNTACR_RWPT BIT(5)
-
-#define CNTPCT_LO 0x00
-#define CNTVCT_LO 0x08
-#define CNTFRQ 0x10
-#define CNTP_CVAL_LO 0x20
-#define CNTP_CTL 0x2c
-#define CNTV_CVAL_LO 0x30
-#define CNTV_CTL 0x3c
-
/*
* The minimum amount of time a generic counter is guaranteed to not roll over
* (40 years)
*/
#define MIN_ROLLOVER_SECS (40ULL * 365 * 24 * 3600)
-static unsigned arch_timers_present __initdata;
-
-struct arch_timer {
- void __iomem *base;
- struct clock_event_device evt;
-};
-
-static struct arch_timer *arch_timer_mem __ro_after_init;
-
-#define to_arch_timer(e) container_of(e, struct arch_timer, evt)
-
static u32 arch_timer_rate __ro_after_init;
static int arch_timer_ppi[ARCH_TIMER_MAX_TIMER_PPI] __ro_after_init;
@@ -85,7 +55,6 @@ static struct clock_event_device __percpu *arch_timer_evt;
static enum arch_timer_ppi_nr arch_timer_uses_ppi __ro_after_init = ARCH_TIMER_VIRT_PPI;
static bool arch_timer_c3stop __ro_after_init;
-static bool arch_timer_mem_use_virtual __ro_after_init;
static bool arch_counter_suspend_stop __ro_after_init;
#ifdef CONFIG_GENERIC_GETTIMEOFDAY
static enum vdso_clock_mode vdso_default = VDSO_CLOCKMODE_ARCHTIMER;
@@ -121,76 +90,6 @@ static int arch_counter_get_width(void)
/*
* Architected system timer support.
*/
-
-static __always_inline
-void arch_timer_reg_write(int access, enum arch_timer_reg reg, u64 val,
- struct clock_event_device *clk)
-{
- if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
- struct arch_timer *timer = to_arch_timer(clk);
- switch (reg) {
- case ARCH_TIMER_REG_CTRL:
- writel_relaxed((u32)val, timer->base + CNTP_CTL);
- break;
- case ARCH_TIMER_REG_CVAL:
- /*
- * Not guaranteed to be atomic, so the timer
- * must be disabled at this point.
- */
- writeq_relaxed(val, timer->base + CNTP_CVAL_LO);
- break;
- default:
- BUILD_BUG();
- }
- } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
- struct arch_timer *timer = to_arch_timer(clk);
- switch (reg) {
- case ARCH_TIMER_REG_CTRL:
- writel_relaxed((u32)val, timer->base + CNTV_CTL);
- break;
- case ARCH_TIMER_REG_CVAL:
- /* Same restriction as above */
- writeq_relaxed(val, timer->base + CNTV_CVAL_LO);
- break;
- default:
- BUILD_BUG();
- }
- } else {
- arch_timer_reg_write_cp15(access, reg, val);
- }
-}
-
-static __always_inline
-u32 arch_timer_reg_read(int access, enum arch_timer_reg reg,
- struct clock_event_device *clk)
-{
- u32 val;
-
- if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
- struct arch_timer *timer = to_arch_timer(clk);
- switch (reg) {
- case ARCH_TIMER_REG_CTRL:
- val = readl_relaxed(timer->base + CNTP_CTL);
- break;
- default:
- BUILD_BUG();
- }
- } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
- struct arch_timer *timer = to_arch_timer(clk);
- switch (reg) {
- case ARCH_TIMER_REG_CTRL:
- val = readl_relaxed(timer->base + CNTV_CTL);
- break;
- default:
- BUILD_BUG();
- }
- } else {
- val = arch_timer_reg_read_cp15(access, reg);
- }
-
- return val;
-}
-
static noinstr u64 raw_counter_get_cntpct_stable(void)
{
return __arch_counter_get_cntpct_stable();
@@ -243,7 +142,7 @@ static u64 arch_counter_read(struct clocksource *cs)
return arch_timer_read_counter();
}
-static u64 arch_counter_read_cc(const struct cyclecounter *cc)
+static u64 arch_counter_read_cc(struct cyclecounter *cc)
{
return arch_timer_read_counter();
}
@@ -424,7 +323,7 @@ void erratum_set_next_event_generic(const int access, unsigned long evt,
unsigned long ctrl;
u64 cval;
- ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
+ ctrl = arch_timer_reg_read_cp15(access, ARCH_TIMER_REG_CTRL);
ctrl |= ARCH_TIMER_CTRL_ENABLE;
ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
@@ -436,7 +335,7 @@ void erratum_set_next_event_generic(const int access, unsigned long evt,
write_sysreg(cval, cntv_cval_el0);
}
- arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
+ arch_timer_reg_write_cp15(access, ARCH_TIMER_REG_CTRL, ctrl);
}
static __maybe_unused int erratum_set_next_event_virt(unsigned long evt,
@@ -667,10 +566,10 @@ static __always_inline irqreturn_t timer_handler(const int access,
{
unsigned long ctrl;
- ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, evt);
+ ctrl = arch_timer_reg_read_cp15(access, ARCH_TIMER_REG_CTRL);
if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
ctrl |= ARCH_TIMER_CTRL_IT_MASK;
- arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, evt);
+ arch_timer_reg_write_cp15(access, ARCH_TIMER_REG_CTRL, ctrl);
evt->event_handler(evt);
return IRQ_HANDLED;
}
@@ -692,28 +591,14 @@ static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id)
return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt);
}
-static irqreturn_t arch_timer_handler_phys_mem(int irq, void *dev_id)
-{
- struct clock_event_device *evt = dev_id;
-
- return timer_handler(ARCH_TIMER_MEM_PHYS_ACCESS, evt);
-}
-
-static irqreturn_t arch_timer_handler_virt_mem(int irq, void *dev_id)
-{
- struct clock_event_device *evt = dev_id;
-
- return timer_handler(ARCH_TIMER_MEM_VIRT_ACCESS, evt);
-}
-
static __always_inline int arch_timer_shutdown(const int access,
struct clock_event_device *clk)
{
unsigned long ctrl;
- ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
+ ctrl = arch_timer_reg_read_cp15(access, ARCH_TIMER_REG_CTRL);
ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
- arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
+ arch_timer_reg_write_cp15(access, ARCH_TIMER_REG_CTRL, ctrl);
return 0;
}
@@ -728,23 +613,13 @@ static int arch_timer_shutdown_phys(struct clock_event_device *clk)
return arch_timer_shutdown(ARCH_TIMER_PHYS_ACCESS, clk);
}
-static int arch_timer_shutdown_virt_mem(struct clock_event_device *clk)
-{
- return arch_timer_shutdown(ARCH_TIMER_MEM_VIRT_ACCESS, clk);
-}
-
-static int arch_timer_shutdown_phys_mem(struct clock_event_device *clk)
-{
- return arch_timer_shutdown(ARCH_TIMER_MEM_PHYS_ACCESS, clk);
-}
-
static __always_inline void set_next_event(const int access, unsigned long evt,
struct clock_event_device *clk)
{
unsigned long ctrl;
u64 cnt;
- ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
+ ctrl = arch_timer_reg_read_cp15(access, ARCH_TIMER_REG_CTRL);
ctrl |= ARCH_TIMER_CTRL_ENABLE;
ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
@@ -753,8 +628,8 @@ static __always_inline void set_next_event(const int access, unsigned long evt,
else
cnt = __arch_counter_get_cntvct();
- arch_timer_reg_write(access, ARCH_TIMER_REG_CVAL, evt + cnt, clk);
- arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
+ arch_timer_reg_write_cp15(access, ARCH_TIMER_REG_CVAL, evt + cnt);
+ arch_timer_reg_write_cp15(access, ARCH_TIMER_REG_CTRL, ctrl);
}
static int arch_timer_set_next_event_virt(unsigned long evt,
@@ -771,60 +646,6 @@ static int arch_timer_set_next_event_phys(unsigned long evt,
return 0;
}
-static noinstr u64 arch_counter_get_cnt_mem(struct arch_timer *t, int offset_lo)
-{
- u32 cnt_lo, cnt_hi, tmp_hi;
-
- do {
- cnt_hi = __le32_to_cpu((__le32 __force)__raw_readl(t->base + offset_lo + 4));
- cnt_lo = __le32_to_cpu((__le32 __force)__raw_readl(t->base + offset_lo));
- tmp_hi = __le32_to_cpu((__le32 __force)__raw_readl(t->base + offset_lo + 4));
- } while (cnt_hi != tmp_hi);
-
- return ((u64) cnt_hi << 32) | cnt_lo;
-}
-
-static __always_inline void set_next_event_mem(const int access, unsigned long evt,
- struct clock_event_device *clk)
-{
- struct arch_timer *timer = to_arch_timer(clk);
- unsigned long ctrl;
- u64 cnt;
-
- ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
-
- /* Timer must be disabled before programming CVAL */
- if (ctrl & ARCH_TIMER_CTRL_ENABLE) {
- ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
- arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
- }
-
- ctrl |= ARCH_TIMER_CTRL_ENABLE;
- ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
-
- if (access == ARCH_TIMER_MEM_VIRT_ACCESS)
- cnt = arch_counter_get_cnt_mem(timer, CNTVCT_LO);
- else
- cnt = arch_counter_get_cnt_mem(timer, CNTPCT_LO);
-
- arch_timer_reg_write(access, ARCH_TIMER_REG_CVAL, evt + cnt, clk);
- arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
-}
-
-static int arch_timer_set_next_event_virt_mem(unsigned long evt,
- struct clock_event_device *clk)
-{
- set_next_event_mem(ARCH_TIMER_MEM_VIRT_ACCESS, evt, clk);
- return 0;
-}
-
-static int arch_timer_set_next_event_phys_mem(unsigned long evt,
- struct clock_event_device *clk)
-{
- set_next_event_mem(ARCH_TIMER_MEM_PHYS_ACCESS, evt, clk);
- return 0;
-}
-
static u64 __arch_timer_check_delta(void)
{
#ifdef CONFIG_ARM64
@@ -850,63 +671,41 @@ static u64 __arch_timer_check_delta(void)
return CLOCKSOURCE_MASK(arch_counter_get_width());
}
-static void __arch_timer_setup(unsigned type,
- struct clock_event_device *clk)
+static void __arch_timer_setup(struct clock_event_device *clk)
{
+ typeof(clk->set_next_event) sne;
u64 max_delta;
clk->features = CLOCK_EVT_FEAT_ONESHOT;
- if (type == ARCH_TIMER_TYPE_CP15) {
- typeof(clk->set_next_event) sne;
-
- arch_timer_check_ool_workaround(ate_match_local_cap_id, NULL);
-
- if (arch_timer_c3stop)
- clk->features |= CLOCK_EVT_FEAT_C3STOP;
- clk->name = "arch_sys_timer";
- clk->rating = 450;
- clk->cpumask = cpumask_of(smp_processor_id());
- clk->irq = arch_timer_ppi[arch_timer_uses_ppi];
- switch (arch_timer_uses_ppi) {
- case ARCH_TIMER_VIRT_PPI:
- clk->set_state_shutdown = arch_timer_shutdown_virt;
- clk->set_state_oneshot_stopped = arch_timer_shutdown_virt;
- sne = erratum_handler(set_next_event_virt);
- break;
- case ARCH_TIMER_PHYS_SECURE_PPI:
- case ARCH_TIMER_PHYS_NONSECURE_PPI:
- case ARCH_TIMER_HYP_PPI:
- clk->set_state_shutdown = arch_timer_shutdown_phys;
- clk->set_state_oneshot_stopped = arch_timer_shutdown_phys;
- sne = erratum_handler(set_next_event_phys);
- break;
- default:
- BUG();
- }
+ arch_timer_check_ool_workaround(ate_match_local_cap_id, NULL);
- clk->set_next_event = sne;
- max_delta = __arch_timer_check_delta();
- } else {
- clk->features |= CLOCK_EVT_FEAT_DYNIRQ;
- clk->name = "arch_mem_timer";
- clk->rating = 400;
- clk->cpumask = cpu_possible_mask;
- if (arch_timer_mem_use_virtual) {
- clk->set_state_shutdown = arch_timer_shutdown_virt_mem;
- clk->set_state_oneshot_stopped = arch_timer_shutdown_virt_mem;
- clk->set_next_event =
- arch_timer_set_next_event_virt_mem;
- } else {
- clk->set_state_shutdown = arch_timer_shutdown_phys_mem;
- clk->set_state_oneshot_stopped = arch_timer_shutdown_phys_mem;
- clk->set_next_event =
- arch_timer_set_next_event_phys_mem;
- }
-
- max_delta = CLOCKSOURCE_MASK(56);
+ if (arch_timer_c3stop)
+ clk->features |= CLOCK_EVT_FEAT_C3STOP;
+ clk->name = "arch_sys_timer";
+ clk->rating = 450;
+ clk->cpumask = cpumask_of(smp_processor_id());
+ clk->irq = arch_timer_ppi[arch_timer_uses_ppi];
+ switch (arch_timer_uses_ppi) {
+ case ARCH_TIMER_VIRT_PPI:
+ clk->set_state_shutdown = arch_timer_shutdown_virt;
+ clk->set_state_oneshot_stopped = arch_timer_shutdown_virt;
+ sne = erratum_handler(set_next_event_virt);
+ break;
+ case ARCH_TIMER_PHYS_SECURE_PPI:
+ case ARCH_TIMER_PHYS_NONSECURE_PPI:
+ case ARCH_TIMER_HYP_PPI:
+ clk->set_state_shutdown = arch_timer_shutdown_phys;
+ clk->set_state_oneshot_stopped = arch_timer_shutdown_phys;
+ sne = erratum_handler(set_next_event_phys);
+ break;
+ default:
+ BUG();
}
+ clk->set_next_event = sne;
+ max_delta = __arch_timer_check_delta();
+
clk->set_state_shutdown(clk);
clockevents_config_and_register(clk, arch_timer_rate, 0xf, max_delta);
@@ -1029,7 +828,7 @@ static int arch_timer_starting_cpu(unsigned int cpu)
struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
u32 flags;
- __arch_timer_setup(ARCH_TIMER_TYPE_CP15, clk);
+ __arch_timer_setup(clk);
flags = check_ppi_trigger(arch_timer_ppi[arch_timer_uses_ppi]);
enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], flags);
@@ -1075,22 +874,12 @@ static void __init arch_timer_of_configure_rate(u32 rate, struct device_node *np
pr_warn("frequency not available\n");
}
-static void __init arch_timer_banner(unsigned type)
+static void __init arch_timer_banner(void)
{
- pr_info("%s%s%s timer(s) running at %lu.%02luMHz (%s%s%s).\n",
- type & ARCH_TIMER_TYPE_CP15 ? "cp15" : "",
- type == (ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM) ?
- " and " : "",
- type & ARCH_TIMER_TYPE_MEM ? "mmio" : "",
+ pr_info("cp15 timer running at %lu.%02luMHz (%s).\n",
(unsigned long)arch_timer_rate / 1000000,
(unsigned long)(arch_timer_rate / 10000) % 100,
- type & ARCH_TIMER_TYPE_CP15 ?
- (arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) ? "virt" : "phys" :
- "",
- type == (ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM) ? "/" : "",
- type & ARCH_TIMER_TYPE_MEM ?
- arch_timer_mem_use_virtual ? "virt" : "phys" :
- "");
+ (arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) ? "virt" : "phys");
}
u32 arch_timer_get_rate(void)
@@ -1108,11 +897,6 @@ bool arch_timer_evtstrm_available(void)
return cpumask_test_cpu(raw_smp_processor_id(), &evtstrm_available);
}
-static noinstr u64 arch_counter_get_cntvct_mem(void)
-{
- return arch_counter_get_cnt_mem(arch_timer_mem, CNTVCT_LO);
-}
-
static struct arch_timer_kvm_info arch_timer_kvm_info;
struct arch_timer_kvm_info *arch_timer_get_kvm_info(void)
@@ -1120,42 +904,35 @@ struct arch_timer_kvm_info *arch_timer_get_kvm_info(void)
return &arch_timer_kvm_info;
}
-static void __init arch_counter_register(unsigned type)
+static void __init arch_counter_register(void)
{
u64 (*scr)(void);
+ u64 (*rd)(void);
u64 start_count;
int width;
- /* Register the CP15 based counter if we have one */
- if (type & ARCH_TIMER_TYPE_CP15) {
- u64 (*rd)(void);
-
- if ((IS_ENABLED(CONFIG_ARM64) && !is_hyp_mode_available()) ||
- arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) {
- if (arch_timer_counter_has_wa()) {
- rd = arch_counter_get_cntvct_stable;
- scr = raw_counter_get_cntvct_stable;
- } else {
- rd = arch_counter_get_cntvct;
- scr = arch_counter_get_cntvct;
- }
+ if ((IS_ENABLED(CONFIG_ARM64) && !is_hyp_mode_available()) ||
+ arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) {
+ if (arch_timer_counter_has_wa()) {
+ rd = arch_counter_get_cntvct_stable;
+ scr = raw_counter_get_cntvct_stable;
} else {
- if (arch_timer_counter_has_wa()) {
- rd = arch_counter_get_cntpct_stable;
- scr = raw_counter_get_cntpct_stable;
- } else {
- rd = arch_counter_get_cntpct;
- scr = arch_counter_get_cntpct;
- }
+ rd = arch_counter_get_cntvct;
+ scr = arch_counter_get_cntvct;
}
-
- arch_timer_read_counter = rd;
- clocksource_counter.vdso_clock_mode = vdso_default;
} else {
- arch_timer_read_counter = arch_counter_get_cntvct_mem;
- scr = arch_counter_get_cntvct_mem;
+ if (arch_timer_counter_has_wa()) {
+ rd = arch_counter_get_cntpct_stable;
+ scr = raw_counter_get_cntpct_stable;
+ } else {
+ rd = arch_counter_get_cntpct;
+ scr = arch_counter_get_cntpct;
+ }
}
+ arch_timer_read_counter = rd;
+ clocksource_counter.vdso_clock_mode = vdso_default;
+
width = arch_counter_get_width();
clocksource_counter.mask = CLOCKSOURCE_MASK(width);
cyclecounter.mask = CLOCKSOURCE_MASK(width);
@@ -1303,76 +1080,10 @@ out:
return err;
}
-static int __init arch_timer_mem_register(void __iomem *base, unsigned int irq)
-{
- int ret;
- irq_handler_t func;
-
- arch_timer_mem = kzalloc(sizeof(*arch_timer_mem), GFP_KERNEL);
- if (!arch_timer_mem)
- return -ENOMEM;
-
- arch_timer_mem->base = base;
- arch_timer_mem->evt.irq = irq;
- __arch_timer_setup(ARCH_TIMER_TYPE_MEM, &arch_timer_mem->evt);
-
- if (arch_timer_mem_use_virtual)
- func = arch_timer_handler_virt_mem;
- else
- func = arch_timer_handler_phys_mem;
-
- ret = request_irq(irq, func, IRQF_TIMER, "arch_mem_timer", &arch_timer_mem->evt);
- if (ret) {
- pr_err("Failed to request mem timer irq\n");
- kfree(arch_timer_mem);
- arch_timer_mem = NULL;
- }
-
- return ret;
-}
-
-static const struct of_device_id arch_timer_of_match[] __initconst = {
- { .compatible = "arm,armv7-timer", },
- { .compatible = "arm,armv8-timer", },
- {},
-};
-
-static const struct of_device_id arch_timer_mem_of_match[] __initconst = {
- { .compatible = "arm,armv7-timer-mem", },
- {},
-};
-
-static bool __init arch_timer_needs_of_probing(void)
-{
- struct device_node *dn;
- bool needs_probing = false;
- unsigned int mask = ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM;
-
- /* We have two timers, and both device-tree nodes are probed. */
- if ((arch_timers_present & mask) == mask)
- return false;
-
- /*
- * Only one type of timer is probed,
- * check if we have another type of timer node in device-tree.
- */
- if (arch_timers_present & ARCH_TIMER_TYPE_CP15)
- dn = of_find_matching_node(NULL, arch_timer_mem_of_match);
- else
- dn = of_find_matching_node(NULL, arch_timer_of_match);
-
- if (dn && of_device_is_available(dn))
- needs_probing = true;
-
- of_node_put(dn);
-
- return needs_probing;
-}
-
static int __init arch_timer_common_init(void)
{
- arch_timer_banner(arch_timers_present);
- arch_counter_register(arch_timers_present);
+ arch_timer_banner();
+ arch_counter_register();
return arch_timer_arch_init();
}
@@ -1421,13 +1132,11 @@ static int __init arch_timer_of_init(struct device_node *np)
u32 rate;
bool has_names;
- if (arch_timers_present & ARCH_TIMER_TYPE_CP15) {
+ if (arch_timer_evt) {
pr_warn("multiple nodes in dt, skipping\n");
return 0;
}
- arch_timers_present |= ARCH_TIMER_TYPE_CP15;
-
has_names = of_property_present(np, "interrupt-names");
for (i = ARCH_TIMER_PHYS_SECURE_PPI; i < ARCH_TIMER_MAX_TIMER_PPI; i++) {
@@ -1472,283 +1181,22 @@ static int __init arch_timer_of_init(struct device_node *np)
if (ret)
return ret;
- if (arch_timer_needs_of_probing())
- return 0;
-
return arch_timer_common_init();
}
TIMER_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_of_init);
TIMER_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_of_init);
-static u32 __init
-arch_timer_mem_frame_get_cntfrq(struct arch_timer_mem_frame *frame)
-{
- void __iomem *base;
- u32 rate;
-
- base = ioremap(frame->cntbase, frame->size);
- if (!base) {
- pr_err("Unable to map frame @ %pa\n", &frame->cntbase);
- return 0;
- }
-
- rate = readl_relaxed(base + CNTFRQ);
-
- iounmap(base);
-
- return rate;
-}
-
-static struct arch_timer_mem_frame * __init
-arch_timer_mem_find_best_frame(struct arch_timer_mem *timer_mem)
-{
- struct arch_timer_mem_frame *frame, *best_frame = NULL;
- void __iomem *cntctlbase;
- u32 cnttidr;
- int i;
-
- cntctlbase = ioremap(timer_mem->cntctlbase, timer_mem->size);
- if (!cntctlbase) {
- pr_err("Can't map CNTCTLBase @ %pa\n",
- &timer_mem->cntctlbase);
- return NULL;
- }
-
- cnttidr = readl_relaxed(cntctlbase + CNTTIDR);
-
- /*
- * Try to find a virtual capable frame. Otherwise fall back to a
- * physical capable frame.
- */
- for (i = 0; i < ARCH_TIMER_MEM_MAX_FRAMES; i++) {
- u32 cntacr = CNTACR_RFRQ | CNTACR_RWPT | CNTACR_RPCT |
- CNTACR_RWVT | CNTACR_RVOFF | CNTACR_RVCT;
-
- frame = &timer_mem->frame[i];
- if (!frame->valid)
- continue;
-
- /* Try enabling everything, and see what sticks */
- writel_relaxed(cntacr, cntctlbase + CNTACR(i));
- cntacr = readl_relaxed(cntctlbase + CNTACR(i));
-
- if ((cnttidr & CNTTIDR_VIRT(i)) &&
- !(~cntacr & (CNTACR_RWVT | CNTACR_RVCT))) {
- best_frame = frame;
- arch_timer_mem_use_virtual = true;
- break;
- }
-
- if (~cntacr & (CNTACR_RWPT | CNTACR_RPCT))
- continue;
-
- best_frame = frame;
- }
-
- iounmap(cntctlbase);
-
- return best_frame;
-}
-
-static int __init
-arch_timer_mem_frame_register(struct arch_timer_mem_frame *frame)
-{
- void __iomem *base;
- int ret, irq;
-
- if (arch_timer_mem_use_virtual)
- irq = frame->virt_irq;
- else
- irq = frame->phys_irq;
-
- if (!irq) {
- pr_err("Frame missing %s irq.\n",
- arch_timer_mem_use_virtual ? "virt" : "phys");
- return -EINVAL;
- }
-
- if (!request_mem_region(frame->cntbase, frame->size,
- "arch_mem_timer"))
- return -EBUSY;
-
- base = ioremap(frame->cntbase, frame->size);
- if (!base) {
- pr_err("Can't map frame's registers\n");
- return -ENXIO;
- }
-
- ret = arch_timer_mem_register(base, irq);
- if (ret) {
- iounmap(base);
- return ret;
- }
-
- arch_timers_present |= ARCH_TIMER_TYPE_MEM;
-
- return 0;
-}
-
-static int __init arch_timer_mem_of_init(struct device_node *np)
-{
- struct arch_timer_mem *timer_mem;
- struct arch_timer_mem_frame *frame;
- struct resource res;
- int ret = -EINVAL;
- u32 rate;
-
- timer_mem = kzalloc(sizeof(*timer_mem), GFP_KERNEL);
- if (!timer_mem)
- return -ENOMEM;
-
- if (of_address_to_resource(np, 0, &res))
- goto out;
- timer_mem->cntctlbase = res.start;
- timer_mem->size = resource_size(&res);
-
- for_each_available_child_of_node_scoped(np, frame_node) {
- u32 n;
- struct arch_timer_mem_frame *frame;
-
- if (of_property_read_u32(frame_node, "frame-number", &n)) {
- pr_err(FW_BUG "Missing frame-number.\n");
- goto out;
- }
- if (n >= ARCH_TIMER_MEM_MAX_FRAMES) {
- pr_err(FW_BUG "Wrong frame-number, only 0-%u are permitted.\n",
- ARCH_TIMER_MEM_MAX_FRAMES - 1);
- goto out;
- }
- frame = &timer_mem->frame[n];
-
- if (frame->valid) {
- pr_err(FW_BUG "Duplicated frame-number.\n");
- goto out;
- }
-
- if (of_address_to_resource(frame_node, 0, &res))
- goto out;
-
- frame->cntbase = res.start;
- frame->size = resource_size(&res);
-
- frame->virt_irq = irq_of_parse_and_map(frame_node,
- ARCH_TIMER_VIRT_SPI);
- frame->phys_irq = irq_of_parse_and_map(frame_node,
- ARCH_TIMER_PHYS_SPI);
-
- frame->valid = true;
- }
-
- frame = arch_timer_mem_find_best_frame(timer_mem);
- if (!frame) {
- pr_err("Unable to find a suitable frame in timer @ %pa\n",
- &timer_mem->cntctlbase);
- ret = -EINVAL;
- goto out;
- }
-
- rate = arch_timer_mem_frame_get_cntfrq(frame);
- arch_timer_of_configure_rate(rate, np);
-
- ret = arch_timer_mem_frame_register(frame);
- if (!ret && !arch_timer_needs_of_probing())
- ret = arch_timer_common_init();
-out:
- kfree(timer_mem);
- return ret;
-}
-TIMER_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem",
- arch_timer_mem_of_init);
-
#ifdef CONFIG_ACPI_GTDT
-static int __init
-arch_timer_mem_verify_cntfrq(struct arch_timer_mem *timer_mem)
-{
- struct arch_timer_mem_frame *frame;
- u32 rate;
- int i;
-
- for (i = 0; i < ARCH_TIMER_MEM_MAX_FRAMES; i++) {
- frame = &timer_mem->frame[i];
-
- if (!frame->valid)
- continue;
-
- rate = arch_timer_mem_frame_get_cntfrq(frame);
- if (rate == arch_timer_rate)
- continue;
-
- pr_err(FW_BUG "CNTFRQ mismatch: frame @ %pa: (0x%08lx), CPU: (0x%08lx)\n",
- &frame->cntbase,
- (unsigned long)rate, (unsigned long)arch_timer_rate);
-
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int __init arch_timer_mem_acpi_init(int platform_timer_count)
-{
- struct arch_timer_mem *timers, *timer;
- struct arch_timer_mem_frame *frame, *best_frame = NULL;
- int timer_count, i, ret = 0;
-
- timers = kcalloc(platform_timer_count, sizeof(*timers),
- GFP_KERNEL);
- if (!timers)
- return -ENOMEM;
-
- ret = acpi_arch_timer_mem_init(timers, &timer_count);
- if (ret || !timer_count)
- goto out;
-
- /*
- * While unlikely, it's theoretically possible that none of the frames
- * in a timer expose the combination of feature we want.
- */
- for (i = 0; i < timer_count; i++) {
- timer = &timers[i];
-
- frame = arch_timer_mem_find_best_frame(timer);
- if (!best_frame)
- best_frame = frame;
-
- ret = arch_timer_mem_verify_cntfrq(timer);
- if (ret) {
- pr_err("Disabling MMIO timers due to CNTFRQ mismatch\n");
- goto out;
- }
-
- if (!best_frame) /* implies !frame */
- /*
- * Only complain about missing suitable frames if we
- * haven't already found one in a previous iteration.
- */
- pr_err("Unable to find a suitable frame in timer @ %pa\n",
- &timer->cntctlbase);
- }
-
- if (best_frame)
- ret = arch_timer_mem_frame_register(best_frame);
-out:
- kfree(timers);
- return ret;
-}
-
-/* Initialize per-processor generic timer and memory-mapped timer(if present) */
static int __init arch_timer_acpi_init(struct acpi_table_header *table)
{
- int ret, platform_timer_count;
+ int ret;
- if (arch_timers_present & ARCH_TIMER_TYPE_CP15) {
+ if (arch_timer_evt) {
pr_warn("already initialized, skipping\n");
return -EINVAL;
}
- arch_timers_present |= ARCH_TIMER_TYPE_CP15;
-
- ret = acpi_gtdt_init(table, &platform_timer_count);
+ ret = acpi_gtdt_init(table, NULL);
if (ret)
return ret;
@@ -1790,10 +1238,6 @@ static int __init arch_timer_acpi_init(struct acpi_table_header *table)
if (ret)
return ret;
- if (platform_timer_count &&
- arch_timer_mem_acpi_init(platform_timer_count))
- pr_err("Failed to initialize memory-mapped timer.\n");
-
return arch_timer_common_init();
}
TIMER_ACPI_DECLARE(arch_timer, ACPI_SIG_GTDT, arch_timer_acpi_init);
diff --git a/drivers/clocksource/arm_arch_timer_mmio.c b/drivers/clocksource/arm_arch_timer_mmio.c
new file mode 100644
index 000000000000..d10362692fdd
--- /dev/null
+++ b/drivers/clocksource/arm_arch_timer_mmio.c
@@ -0,0 +1,442 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ARM Generic Memory Mapped Timer support
+ *
+ * Split from drivers/clocksource/arm_arch_timer.c
+ *
+ * Copyright (C) 2011 ARM Ltd.
+ * All Rights Reserved
+ */
+
+#define pr_fmt(fmt) "arch_timer_mmio: " fmt
+
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+
+#include <clocksource/arm_arch_timer.h>
+
+#define CNTTIDR 0x08
+#define CNTTIDR_VIRT(n) (BIT(1) << ((n) * 4))
+
+#define CNTACR(n) (0x40 + ((n) * 4))
+#define CNTACR_RPCT BIT(0)
+#define CNTACR_RVCT BIT(1)
+#define CNTACR_RFRQ BIT(2)
+#define CNTACR_RVOFF BIT(3)
+#define CNTACR_RWVT BIT(4)
+#define CNTACR_RWPT BIT(5)
+
+#define CNTPCT_LO 0x00
+#define CNTVCT_LO 0x08
+#define CNTFRQ 0x10
+#define CNTP_CVAL_LO 0x20
+#define CNTP_CTL 0x2c
+#define CNTV_CVAL_LO 0x30
+#define CNTV_CTL 0x3c
+
+enum arch_timer_access {
+ PHYS_ACCESS,
+ VIRT_ACCESS,
+};
+
+struct arch_timer {
+ struct clock_event_device evt;
+ struct clocksource cs;
+ struct arch_timer_mem *gt_block;
+ void __iomem *base;
+ enum arch_timer_access access;
+ u32 rate;
+};
+
+#define evt_to_arch_timer(e) container_of(e, struct arch_timer, evt)
+#define cs_to_arch_timer(c) container_of(c, struct arch_timer, cs)
+
+static void arch_timer_mmio_write(struct arch_timer *timer,
+ enum arch_timer_reg reg, u64 val)
+{
+ switch (timer->access) {
+ case PHYS_ACCESS:
+ switch (reg) {
+ case ARCH_TIMER_REG_CTRL:
+ writel_relaxed((u32)val, timer->base + CNTP_CTL);
+ return;
+ case ARCH_TIMER_REG_CVAL:
+ /*
+ * Not guaranteed to be atomic, so the timer
+ * must be disabled at this point.
+ */
+ writeq_relaxed(val, timer->base + CNTP_CVAL_LO);
+ return;
+ }
+ break;
+ case VIRT_ACCESS:
+ switch (reg) {
+ case ARCH_TIMER_REG_CTRL:
+ writel_relaxed((u32)val, timer->base + CNTV_CTL);
+ return;
+ case ARCH_TIMER_REG_CVAL:
+ /* Same restriction as above */
+ writeq_relaxed(val, timer->base + CNTV_CVAL_LO);
+ return;
+ }
+ break;
+ }
+
+ /* Should never be here */
+ WARN_ON_ONCE(1);
+}
+
+static u32 arch_timer_mmio_read(struct arch_timer *timer, enum arch_timer_reg reg)
+{
+ switch (timer->access) {
+ case PHYS_ACCESS:
+ switch (reg) {
+ case ARCH_TIMER_REG_CTRL:
+ return readl_relaxed(timer->base + CNTP_CTL);
+ default:
+ break;
+ }
+ break;
+ case VIRT_ACCESS:
+ switch (reg) {
+ case ARCH_TIMER_REG_CTRL:
+ return readl_relaxed(timer->base + CNTV_CTL);
+ default:
+ break;
+ }
+ break;
+ }
+
+ /* Should never be here */
+ WARN_ON_ONCE(1);
+ return 0;
+}
+
+static noinstr u64 arch_counter_mmio_get_cnt(struct arch_timer *t)
+{
+ int offset_lo = t->access == VIRT_ACCESS ? CNTVCT_LO : CNTPCT_LO;
+ u32 cnt_lo, cnt_hi, tmp_hi;
+
+ do {
+ cnt_hi = __le32_to_cpu((__le32 __force)__raw_readl(t->base + offset_lo + 4));
+ cnt_lo = __le32_to_cpu((__le32 __force)__raw_readl(t->base + offset_lo));
+ tmp_hi = __le32_to_cpu((__le32 __force)__raw_readl(t->base + offset_lo + 4));
+ } while (cnt_hi != tmp_hi);
+
+ return ((u64) cnt_hi << 32) | cnt_lo;
+}
+
+static u64 arch_mmio_counter_read(struct clocksource *cs)
+{
+ struct arch_timer *at = cs_to_arch_timer(cs);
+
+ return arch_counter_mmio_get_cnt(at);
+}
+
+static int arch_timer_mmio_shutdown(struct clock_event_device *clk)
+{
+ struct arch_timer *at = evt_to_arch_timer(clk);
+ unsigned long ctrl;
+
+ ctrl = arch_timer_mmio_read(at, ARCH_TIMER_REG_CTRL);
+ ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
+ arch_timer_mmio_write(at, ARCH_TIMER_REG_CTRL, ctrl);
+
+ return 0;
+}
+
+static int arch_timer_mmio_set_next_event(unsigned long evt,
+ struct clock_event_device *clk)
+{
+ struct arch_timer *timer = evt_to_arch_timer(clk);
+ unsigned long ctrl;
+ u64 cnt;
+
+ ctrl = arch_timer_mmio_read(timer, ARCH_TIMER_REG_CTRL);
+
+ /* Timer must be disabled before programming CVAL */
+ if (ctrl & ARCH_TIMER_CTRL_ENABLE) {
+ ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
+ arch_timer_mmio_write(timer, ARCH_TIMER_REG_CTRL, ctrl);
+ }
+
+ ctrl |= ARCH_TIMER_CTRL_ENABLE;
+ ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
+
+ cnt = arch_counter_mmio_get_cnt(timer);
+
+ arch_timer_mmio_write(timer, ARCH_TIMER_REG_CVAL, evt + cnt);
+ arch_timer_mmio_write(timer, ARCH_TIMER_REG_CTRL, ctrl);
+ return 0;
+}
+
+static irqreturn_t arch_timer_mmio_handler(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = dev_id;
+ struct arch_timer *at = evt_to_arch_timer(evt);
+ unsigned long ctrl;
+
+ ctrl = arch_timer_mmio_read(at, ARCH_TIMER_REG_CTRL);
+ if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
+ ctrl |= ARCH_TIMER_CTRL_IT_MASK;
+ arch_timer_mmio_write(at, ARCH_TIMER_REG_CTRL, ctrl);
+ evt->event_handler(evt);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static struct arch_timer_mem_frame *find_best_frame(struct platform_device *pdev)
+{
+ struct arch_timer_mem_frame *frame, *best_frame = NULL;
+ struct arch_timer *at = platform_get_drvdata(pdev);
+ void __iomem *cntctlbase;
+ u32 cnttidr;
+
+ cntctlbase = ioremap(at->gt_block->cntctlbase, at->gt_block->size);
+ if (!cntctlbase) {
+ dev_err(&pdev->dev, "Can't map CNTCTLBase @ %pa\n",
+ &at->gt_block->cntctlbase);
+ return NULL;
+ }
+
+ cnttidr = readl_relaxed(cntctlbase + CNTTIDR);
+
+ /*
+ * Try to find a virtual capable frame. Otherwise fall back to a
+ * physical capable frame.
+ */
+ for (int i = 0; i < ARCH_TIMER_MEM_MAX_FRAMES; i++) {
+ u32 cntacr = CNTACR_RFRQ | CNTACR_RWPT | CNTACR_RPCT |
+ CNTACR_RWVT | CNTACR_RVOFF | CNTACR_RVCT;
+
+ frame = &at->gt_block->frame[i];
+ if (!frame->valid)
+ continue;
+
+ /* Try enabling everything, and see what sticks */
+ writel_relaxed(cntacr, cntctlbase + CNTACR(i));
+ cntacr = readl_relaxed(cntctlbase + CNTACR(i));
+
+ /* Pick a suitable frame for which we have an IRQ */
+ if ((cnttidr & CNTTIDR_VIRT(i)) &&
+ !(~cntacr & (CNTACR_RWVT | CNTACR_RVCT)) &&
+ frame->virt_irq) {
+ best_frame = frame;
+ at->access = VIRT_ACCESS;
+ break;
+ }
+
+ if ((~cntacr & (CNTACR_RWPT | CNTACR_RPCT)) ||
+ !frame->phys_irq)
+ continue;
+
+ at->access = PHYS_ACCESS;
+ best_frame = frame;
+ }
+
+ iounmap(cntctlbase);
+
+ return best_frame;
+}
+
+static void arch_timer_mmio_setup(struct arch_timer *at, int irq)
+{
+ at->evt = (struct clock_event_device) {
+ .features = (CLOCK_EVT_FEAT_ONESHOT |
+ CLOCK_EVT_FEAT_DYNIRQ),
+ .name = "arch_mem_timer",
+ .rating = 400,
+ .cpumask = cpu_possible_mask,
+ .irq = irq,
+ .set_next_event = arch_timer_mmio_set_next_event,
+ .set_state_oneshot_stopped = arch_timer_mmio_shutdown,
+ .set_state_shutdown = arch_timer_mmio_shutdown,
+ };
+
+ at->evt.set_state_shutdown(&at->evt);
+
+ clockevents_config_and_register(&at->evt, at->rate, 0xf,
+ (unsigned long)CLOCKSOURCE_MASK(56));
+
+ enable_irq(at->evt.irq);
+
+ at->cs = (struct clocksource) {
+ .name = "arch_mmio_counter",
+ .rating = 300,
+ .read = arch_mmio_counter_read,
+ .mask = CLOCKSOURCE_MASK(56),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+ };
+
+ clocksource_register_hz(&at->cs, at->rate);
+}
+
+static int arch_timer_mmio_frame_register(struct platform_device *pdev,
+ struct arch_timer_mem_frame *frame)
+{
+ struct arch_timer *at = platform_get_drvdata(pdev);
+ struct device_node *np = pdev->dev.of_node;
+ int ret, irq;
+ u32 rate;
+
+ if (!devm_request_mem_region(&pdev->dev, frame->cntbase, frame->size,
+ "arch_mem_timer"))
+ return -EBUSY;
+
+ at->base = devm_ioremap(&pdev->dev, frame->cntbase, frame->size);
+ if (!at->base) {
+ dev_err(&pdev->dev, "Can't map frame's registers\n");
+ return -ENXIO;
+ }
+
+ /*
+ * Allow "clock-frequency" to override the probed rate. If neither
+ * lead to something useful, use the CPU timer frequency as the
+ * fallback. The nice thing about that last point is that we woudn't
+ * made it here if we didn't have a valid frequency.
+ */
+ rate = readl_relaxed(at->base + CNTFRQ);
+
+ if (!np || of_property_read_u32(np, "clock-frequency", &at->rate))
+ at->rate = rate;
+
+ if (!at->rate)
+ at->rate = arch_timer_get_rate();
+
+ irq = at->access == VIRT_ACCESS ? frame->virt_irq : frame->phys_irq;
+ ret = devm_request_irq(&pdev->dev, irq, arch_timer_mmio_handler,
+ IRQF_TIMER | IRQF_NO_AUTOEN, "arch_mem_timer",
+ &at->evt);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to request mem timer irq\n");
+ return ret;
+ }
+
+ /* Afer this point, we're not allowed to fail anymore */
+ arch_timer_mmio_setup(at, irq);
+ return 0;
+}
+
+static int of_populate_gt_block(struct platform_device *pdev,
+ struct arch_timer *at)
+{
+ struct resource res;
+
+ if (of_address_to_resource(pdev->dev.of_node, 0, &res))
+ return -EINVAL;
+
+ at->gt_block->cntctlbase = res.start;
+ at->gt_block->size = resource_size(&res);
+
+ for_each_available_child_of_node_scoped(pdev->dev.of_node, frame_node) {
+ struct arch_timer_mem_frame *frame;
+ u32 n;
+
+ if (of_property_read_u32(frame_node, "frame-number", &n)) {
+ dev_err(&pdev->dev, FW_BUG "Missing frame-number\n");
+ return -EINVAL;
+ }
+ if (n >= ARCH_TIMER_MEM_MAX_FRAMES) {
+ dev_err(&pdev->dev,
+ FW_BUG "Wrong frame-number, only 0-%u are permitted\n",
+ ARCH_TIMER_MEM_MAX_FRAMES - 1);
+ return -EINVAL;
+ }
+
+ frame = &at->gt_block->frame[n];
+
+ if (frame->valid) {
+ dev_err(&pdev->dev, FW_BUG "Duplicated frame-number\n");
+ return -EINVAL;
+ }
+
+ if (of_address_to_resource(frame_node, 0, &res))
+ return -EINVAL;
+
+ frame->cntbase = res.start;
+ frame->size = resource_size(&res);
+
+ frame->phys_irq = irq_of_parse_and_map(frame_node, 0);
+ frame->virt_irq = irq_of_parse_and_map(frame_node, 1);
+
+ frame->valid = true;
+ }
+
+ return 0;
+}
+
+static int arch_timer_mmio_probe(struct platform_device *pdev)
+{
+ struct arch_timer_mem_frame *frame;
+ struct arch_timer *at;
+ struct device_node *np;
+ int ret;
+
+ np = pdev->dev.of_node;
+
+ at = devm_kmalloc(&pdev->dev, sizeof(*at), GFP_KERNEL | __GFP_ZERO);
+ if (!at)
+ return -ENOMEM;
+
+ if (np) {
+ at->gt_block = devm_kmalloc(&pdev->dev, sizeof(*at->gt_block),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!at->gt_block)
+ return -ENOMEM;
+ ret = of_populate_gt_block(pdev, at);
+ if (ret)
+ return ret;
+ } else {
+ at->gt_block = dev_get_platdata(&pdev->dev);
+ }
+
+ platform_set_drvdata(pdev, at);
+
+ frame = find_best_frame(pdev);
+ if (!frame) {
+ dev_err(&pdev->dev,
+ "Unable to find a suitable frame in timer @ %pa\n",
+ &at->gt_block->cntctlbase);
+ return -EINVAL;
+ }
+
+ ret = arch_timer_mmio_frame_register(pdev, frame);
+ if (!ret)
+ dev_info(&pdev->dev,
+ "mmio timer running at %lu.%02luMHz (%s)\n",
+ (unsigned long)at->rate / 1000000,
+ (unsigned long)(at->rate / 10000) % 100,
+ at->access == VIRT_ACCESS ? "virt" : "phys");
+
+ return ret;
+}
+
+static const struct of_device_id arch_timer_mmio_of_table[] = {
+ { .compatible = "arm,armv7-timer-mem", },
+ {}
+};
+
+static struct platform_driver arch_timer_mmio_drv = {
+ .driver = {
+ .name = "arch-timer-mmio",
+ .of_match_table = arch_timer_mmio_of_table,
+ .suppress_bind_attrs = true,
+ },
+ .probe = arch_timer_mmio_probe,
+};
+builtin_platform_driver(arch_timer_mmio_drv);
+
+static struct platform_driver arch_timer_mmio_acpi_drv = {
+ .driver = {
+ .name = "gtdt-arm-mmio-timer",
+ .suppress_bind_attrs = true,
+ },
+ .probe = arch_timer_mmio_probe,
+};
+builtin_platform_driver(arch_timer_mmio_acpi_drv);
diff --git a/drivers/clocksource/arm_global_timer.c b/drivers/clocksource/arm_global_timer.c
index 2d86bbc2764a..5e3d6bb7e437 100644
--- a/drivers/clocksource/arm_global_timer.c
+++ b/drivers/clocksource/arm_global_timer.c
@@ -263,14 +263,13 @@ static void __init gt_delay_timer_init(void)
register_current_timer_delay(&gt_delay_timer);
}
-static int __init gt_clocksource_init(void)
+static int __init gt_clocksource_init(unsigned int psv)
{
writel(0, gt_base + GT_CONTROL);
writel(0, gt_base + GT_COUNTER0);
writel(0, gt_base + GT_COUNTER1);
/* set prescaler and enable timer on all the cores */
- writel(FIELD_PREP(GT_CONTROL_PRESCALER_MASK,
- CONFIG_ARM_GT_INITIAL_PRESCALER_VAL - 1) |
+ writel(FIELD_PREP(GT_CONTROL_PRESCALER_MASK, psv - 1) |
GT_CONTROL_TIMER_ENABLE, gt_base + GT_CONTROL);
#ifdef CONFIG_CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
@@ -338,11 +337,45 @@ static int gt_clk_rate_change_cb(struct notifier_block *nb,
return NOTIFY_DONE;
}
+struct gt_prescaler_config {
+ const char *compatible;
+ unsigned long prescaler;
+};
+
+static const struct gt_prescaler_config gt_prescaler_configs[] = {
+ /*
+ * On am43 the global timer clock is a child of the clock used for CPU
+ * OPPs, so the initial prescaler has to be compatible with all OPPs
+ * which are 300, 600, 720, 800 and 1000 with a fixed divider of 2, this
+ * gives us a GCD of 10. Initial frequency is 1000, so the prescaler is
+ * 50.
+ */
+ { .compatible = "ti,am43", .prescaler = 50 },
+ { .compatible = "xlnx,zynq-7000", .prescaler = 2 },
+ { .compatible = NULL }
+};
+
+static unsigned long gt_get_initial_prescaler_value(struct device_node *np)
+{
+ const struct gt_prescaler_config *config;
+
+ if (CONFIG_ARM_GT_INITIAL_PRESCALER_VAL != 0)
+ return CONFIG_ARM_GT_INITIAL_PRESCALER_VAL;
+
+ for (config = gt_prescaler_configs; config->compatible; config++) {
+ if (of_machine_is_compatible(config->compatible))
+ return config->prescaler;
+ }
+
+ return 1;
+}
+
static int __init global_timer_of_register(struct device_node *np)
{
struct clk *gt_clk;
static unsigned long gt_clk_rate;
int err;
+ unsigned long psv;
/*
* In A9 r2p0 the comparators for each processor with the global timer
@@ -378,8 +411,9 @@ static int __init global_timer_of_register(struct device_node *np)
goto out_unmap;
}
+ psv = gt_get_initial_prescaler_value(np);
gt_clk_rate = clk_get_rate(gt_clk);
- gt_target_rate = gt_clk_rate / CONFIG_ARM_GT_INITIAL_PRESCALER_VAL;
+ gt_target_rate = gt_clk_rate / psv;
gt_clk_rate_change_nb.notifier_call =
gt_clk_rate_change_cb;
err = clk_notifier_register(gt_clk, &gt_clk_rate_change_nb);
@@ -404,7 +438,7 @@ static int __init global_timer_of_register(struct device_node *np)
}
/* Register and immediately configure the timer on the boot CPU */
- err = gt_clocksource_init();
+ err = gt_clocksource_init(psv);
if (err)
goto out_irq;
diff --git a/drivers/clocksource/clps711x-timer.c b/drivers/clocksource/clps711x-timer.c
index e95fdc49c226..bbceb0289d45 100644
--- a/drivers/clocksource/clps711x-timer.c
+++ b/drivers/clocksource/clps711x-timer.c
@@ -78,24 +78,33 @@ static int __init clps711x_timer_init(struct device_node *np)
unsigned int irq = irq_of_parse_and_map(np, 0);
struct clk *clock = of_clk_get(np, 0);
void __iomem *base = of_iomap(np, 0);
+ int ret = 0;
if (!base)
return -ENOMEM;
- if (!irq)
- return -EINVAL;
- if (IS_ERR(clock))
- return PTR_ERR(clock);
+ if (!irq) {
+ ret = -EINVAL;
+ goto unmap_io;
+ }
+ if (IS_ERR(clock)) {
+ ret = PTR_ERR(clock);
+ goto unmap_io;
+ }
switch (of_alias_get_id(np, "timer")) {
case CLPS711X_CLKSRC_CLOCKSOURCE:
clps711x_clksrc_init(clock, base);
break;
case CLPS711X_CLKSRC_CLOCKEVENT:
- return _clps711x_clkevt_init(clock, base, irq);
+ ret = _clps711x_clkevt_init(clock, base, irq);
+ break;
default:
- return -EINVAL;
+ ret = -EINVAL;
+ break;
}
- return 0;
+unmap_io:
+ iounmap(base);
+ return ret;
}
TIMER_OF_DECLARE(clps711x, "cirrus,ep7209-timer", clps711x_timer_init);
diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c
index 09549451dd51..10356d4ec55c 100644
--- a/drivers/clocksource/hyperv_timer.c
+++ b/drivers/clocksource/hyperv_timer.c
@@ -22,6 +22,7 @@
#include <linux/irq.h>
#include <linux/acpi.h>
#include <linux/hyperv.h>
+#include <linux/export.h>
#include <clocksource/hyperv_timer.h>
#include <hyperv/hvhdk.h>
#include <asm/mshyperv.h>
@@ -548,14 +549,22 @@ static void __init hv_init_tsc_clocksource(void)
union hv_reference_tsc_msr tsc_msr;
/*
+ * When running as a guest partition:
+ *
* If Hyper-V offers TSC_INVARIANT, then the virtualized TSC correctly
* handles frequency and offset changes due to live migration,
* pause/resume, and other VM management operations. So lower the
* Hyper-V Reference TSC rating, causing the generic TSC to be used.
* TSC_INVARIANT is not offered on ARM64, so the Hyper-V Reference
* TSC will be preferred over the virtualized ARM64 arch counter.
+ *
+ * When running as the root partition:
+ *
+ * There is no HV_ACCESS_TSC_INVARIANT feature. Always lower the rating
+ * of the Hyper-V Reference TSC.
*/
- if (ms_hyperv.features & HV_ACCESS_TSC_INVARIANT) {
+ if ((ms_hyperv.features & HV_ACCESS_TSC_INVARIANT) ||
+ hv_root_partition()) {
hyperv_cs_tsc.rating = 250;
hyperv_cs_msr.rating = 245;
}
diff --git a/drivers/clocksource/ingenic-sysost.c b/drivers/clocksource/ingenic-sysost.c
index cb6fc2f152d4..e79cfb0b8e05 100644
--- a/drivers/clocksource/ingenic-sysost.c
+++ b/drivers/clocksource/ingenic-sysost.c
@@ -127,18 +127,23 @@ static u8 ingenic_ost_get_prescale(unsigned long rate, unsigned long req_rate)
return 2; /* /16 divider */
}
-static long ingenic_ost_round_rate(struct clk_hw *hw, unsigned long req_rate,
- unsigned long *parent_rate)
+static int ingenic_ost_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- unsigned long rate = *parent_rate;
+ unsigned long rate = req->best_parent_rate;
u8 prescale;
- if (req_rate > rate)
- return rate;
+ if (req->rate > rate) {
+ req->rate = rate;
- prescale = ingenic_ost_get_prescale(rate, req_rate);
+ return 0;
+ }
+
+ prescale = ingenic_ost_get_prescale(rate, req->rate);
- return rate >> (prescale * 2);
+ req->rate = rate >> (prescale * 2);
+
+ return 0;
}
static int ingenic_ost_percpu_timer_set_rate(struct clk_hw *hw, unsigned long req_rate,
@@ -175,14 +180,14 @@ static int ingenic_ost_global_timer_set_rate(struct clk_hw *hw, unsigned long re
static const struct clk_ops ingenic_ost_percpu_timer_ops = {
.recalc_rate = ingenic_ost_percpu_timer_recalc_rate,
- .round_rate = ingenic_ost_round_rate,
- .set_rate = ingenic_ost_percpu_timer_set_rate,
+ .determine_rate = ingenic_ost_determine_rate,
+ .set_rate = ingenic_ost_percpu_timer_set_rate,
};
static const struct clk_ops ingenic_ost_global_timer_ops = {
.recalc_rate = ingenic_ost_global_timer_recalc_rate,
- .round_rate = ingenic_ost_round_rate,
- .set_rate = ingenic_ost_global_timer_set_rate,
+ .determine_rate = ingenic_ost_determine_rate,
+ .set_rate = ingenic_ost_global_timer_set_rate,
};
static const char * const ingenic_ost_clk_parents[] = { "ext" };
diff --git a/drivers/clocksource/scx200_hrt.c b/drivers/clocksource/scx200_hrt.c
index c3536fffbe9a..5a99801a1657 100644
--- a/drivers/clocksource/scx200_hrt.c
+++ b/drivers/clocksource/scx200_hrt.c
@@ -52,6 +52,7 @@ static struct clocksource cs_hrt = {
.mask = CLOCKSOURCE_MASK(32),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
/* mult, shift are set based on mhz27 flag */
+ .owner = THIS_MODULE,
};
static int __init init_hrt_clocksource(void)
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index b72b36e0abed..791b298c995b 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -355,14 +355,6 @@ static int sh_cmt_enable(struct sh_cmt_channel *ch)
dev_pm_syscore_device(&ch->cmt->pdev->dev, true);
- /* enable clock */
- ret = clk_enable(ch->cmt->clk);
- if (ret) {
- dev_err(&ch->cmt->pdev->dev, "ch%u: cannot enable clock\n",
- ch->index);
- goto err0;
- }
-
/* make sure channel is disabled */
sh_cmt_start_stop_ch(ch, 0);
@@ -384,19 +376,12 @@ static int sh_cmt_enable(struct sh_cmt_channel *ch)
if (ret || sh_cmt_read_cmcnt(ch)) {
dev_err(&ch->cmt->pdev->dev, "ch%u: cannot clear CMCNT\n",
ch->index);
- ret = -ETIMEDOUT;
- goto err1;
+ return -ETIMEDOUT;
}
/* enable channel */
sh_cmt_start_stop_ch(ch, 1);
return 0;
- err1:
- /* stop clock */
- clk_disable(ch->cmt->clk);
-
- err0:
- return ret;
}
static void sh_cmt_disable(struct sh_cmt_channel *ch)
@@ -407,9 +392,6 @@ static void sh_cmt_disable(struct sh_cmt_channel *ch)
/* disable interrupts in CMT block */
sh_cmt_write_cmcsr(ch, 0);
- /* stop clock */
- clk_disable(ch->cmt->clk);
-
dev_pm_syscore_device(&ch->cmt->pdev->dev, false);
}
@@ -578,37 +560,68 @@ static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int sh_cmt_start(struct sh_cmt_channel *ch, unsigned long flag)
+static int sh_cmt_start_clocksource(struct sh_cmt_channel *ch)
{
int ret = 0;
unsigned long flags;
- if (flag & FLAG_CLOCKSOURCE)
- pm_runtime_get_sync(&ch->cmt->pdev->dev);
-
raw_spin_lock_irqsave(&ch->lock, flags);
- if (!(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) {
- if (flag & FLAG_CLOCKEVENT)
- pm_runtime_get_sync(&ch->cmt->pdev->dev);
+ if (!(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE)))
ret = sh_cmt_enable(ch);
- }
if (ret)
goto out;
- ch->flags |= flag;
+
+ ch->flags |= FLAG_CLOCKSOURCE;
/* setup timeout if no clockevent */
- if (ch->cmt->num_channels == 1 &&
- flag == FLAG_CLOCKSOURCE && (!(ch->flags & FLAG_CLOCKEVENT)))
+ if (ch->cmt->num_channels == 1 && !(ch->flags & FLAG_CLOCKEVENT))
__sh_cmt_set_next(ch, ch->max_match_value);
+out:
+ raw_spin_unlock_irqrestore(&ch->lock, flags);
+
+ return ret;
+}
+
+static void sh_cmt_stop_clocksource(struct sh_cmt_channel *ch)
+{
+ unsigned long flags;
+ unsigned long f;
+
+ raw_spin_lock_irqsave(&ch->lock, flags);
+
+ f = ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE);
+
+ ch->flags &= ~FLAG_CLOCKSOURCE;
+
+ if (f && !(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE)))
+ sh_cmt_disable(ch);
+
+ raw_spin_unlock_irqrestore(&ch->lock, flags);
+}
+
+static int sh_cmt_start_clockevent(struct sh_cmt_channel *ch)
+{
+ int ret = 0;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&ch->lock, flags);
+
+ if (!(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE)))
+ ret = sh_cmt_enable(ch);
+
+ if (ret)
+ goto out;
+
+ ch->flags |= FLAG_CLOCKEVENT;
out:
raw_spin_unlock_irqrestore(&ch->lock, flags);
return ret;
}
-static void sh_cmt_stop(struct sh_cmt_channel *ch, unsigned long flag)
+static void sh_cmt_stop_clockevent(struct sh_cmt_channel *ch)
{
unsigned long flags;
unsigned long f;
@@ -616,22 +629,17 @@ static void sh_cmt_stop(struct sh_cmt_channel *ch, unsigned long flag)
raw_spin_lock_irqsave(&ch->lock, flags);
f = ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE);
- ch->flags &= ~flag;
- if (f && !(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) {
+ ch->flags &= ~FLAG_CLOCKEVENT;
+
+ if (f && !(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE)))
sh_cmt_disable(ch);
- if (flag & FLAG_CLOCKEVENT)
- pm_runtime_put(&ch->cmt->pdev->dev);
- }
/* adjust the timeout to maximum if only clocksource left */
- if ((flag == FLAG_CLOCKEVENT) && (ch->flags & FLAG_CLOCKSOURCE))
+ if (ch->flags & FLAG_CLOCKSOURCE)
__sh_cmt_set_next(ch, ch->max_match_value);
raw_spin_unlock_irqrestore(&ch->lock, flags);
-
- if (flag & FLAG_CLOCKSOURCE)
- pm_runtime_put(&ch->cmt->pdev->dev);
}
static struct sh_cmt_channel *cs_to_sh_cmt(struct clocksource *cs)
@@ -672,7 +680,7 @@ static int sh_cmt_clocksource_enable(struct clocksource *cs)
ch->total_cycles = 0;
- ret = sh_cmt_start(ch, FLAG_CLOCKSOURCE);
+ ret = sh_cmt_start_clocksource(ch);
if (!ret)
ch->cs_enabled = true;
@@ -685,7 +693,7 @@ static void sh_cmt_clocksource_disable(struct clocksource *cs)
WARN_ON(!ch->cs_enabled);
- sh_cmt_stop(ch, FLAG_CLOCKSOURCE);
+ sh_cmt_stop_clocksource(ch);
ch->cs_enabled = false;
}
@@ -696,7 +704,7 @@ static void sh_cmt_clocksource_suspend(struct clocksource *cs)
if (!ch->cs_enabled)
return;
- sh_cmt_stop(ch, FLAG_CLOCKSOURCE);
+ sh_cmt_stop_clocksource(ch);
dev_pm_genpd_suspend(&ch->cmt->pdev->dev);
}
@@ -708,7 +716,7 @@ static void sh_cmt_clocksource_resume(struct clocksource *cs)
return;
dev_pm_genpd_resume(&ch->cmt->pdev->dev);
- sh_cmt_start(ch, FLAG_CLOCKSOURCE);
+ sh_cmt_start_clocksource(ch);
}
static int sh_cmt_register_clocksource(struct sh_cmt_channel *ch,
@@ -740,7 +748,7 @@ static struct sh_cmt_channel *ced_to_sh_cmt(struct clock_event_device *ced)
static void sh_cmt_clock_event_start(struct sh_cmt_channel *ch, int periodic)
{
- sh_cmt_start(ch, FLAG_CLOCKEVENT);
+ sh_cmt_start_clockevent(ch);
if (periodic)
sh_cmt_set_next(ch, ((ch->cmt->rate + HZ/2) / HZ) - 1);
@@ -752,7 +760,7 @@ static int sh_cmt_clock_event_shutdown(struct clock_event_device *ced)
{
struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
- sh_cmt_stop(ch, FLAG_CLOCKEVENT);
+ sh_cmt_stop_clockevent(ch);
return 0;
}
@@ -763,7 +771,7 @@ static int sh_cmt_clock_event_set_state(struct clock_event_device *ced,
/* deal with old setting first */
if (clockevent_state_oneshot(ced) || clockevent_state_periodic(ced))
- sh_cmt_stop(ch, FLAG_CLOCKEVENT);
+ sh_cmt_stop_clockevent(ch);
dev_info(&ch->cmt->pdev->dev, "ch%u: used for %s clock events\n",
ch->index, periodic ? "periodic" : "oneshot");
@@ -1100,8 +1108,6 @@ static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev)
mask &= ~(1 << hwidx);
}
- clk_disable(cmt->clk);
-
platform_set_drvdata(pdev, cmt);
return 0;
@@ -1149,8 +1155,6 @@ static int sh_cmt_probe(struct platform_device *pdev)
out:
if (cmt->has_clockevent || cmt->has_clocksource)
pm_runtime_irq_safe(&pdev->dev);
- else
- pm_runtime_idle(&pdev->dev);
return 0;
}
diff --git a/drivers/clocksource/timer-armada-370-xp.c b/drivers/clocksource/timer-armada-370-xp.c
index 54284c1c0651..f2b4cc40db93 100644
--- a/drivers/clocksource/timer-armada-370-xp.c
+++ b/drivers/clocksource/timer-armada-370-xp.c
@@ -207,14 +207,14 @@ static int armada_370_xp_timer_dying_cpu(unsigned int cpu)
static u32 timer0_ctrl_reg, timer0_local_ctrl_reg;
-static int armada_370_xp_timer_suspend(void)
+static int armada_370_xp_timer_suspend(void *data)
{
timer0_ctrl_reg = readl(timer_base + TIMER_CTRL_OFF);
timer0_local_ctrl_reg = readl(local_base + TIMER_CTRL_OFF);
return 0;
}
-static void armada_370_xp_timer_resume(void)
+static void armada_370_xp_timer_resume(void *data)
{
writel(0xffffffff, timer_base + TIMER0_VAL_OFF);
writel(0xffffffff, timer_base + TIMER0_RELOAD_OFF);
@@ -222,11 +222,15 @@ static void armada_370_xp_timer_resume(void)
writel(timer0_local_ctrl_reg, local_base + TIMER_CTRL_OFF);
}
-static struct syscore_ops armada_370_xp_timer_syscore_ops = {
+static const struct syscore_ops armada_370_xp_timer_syscore_ops = {
.suspend = armada_370_xp_timer_suspend,
.resume = armada_370_xp_timer_resume,
};
+static struct syscore armada_370_xp_timer_syscore = {
+ .ops = &armada_370_xp_timer_syscore_ops,
+};
+
static unsigned long armada_370_delay_timer_read(void)
{
return ~readl(timer_base + TIMER0_VAL_OFF);
@@ -324,7 +328,7 @@ static int __init armada_370_xp_timer_common_init(struct device_node *np)
return res;
}
- register_syscore_ops(&armada_370_xp_timer_syscore_ops);
+ register_syscore(&armada_370_xp_timer_syscore);
return 0;
}
diff --git a/drivers/clocksource/timer-cs5535.c b/drivers/clocksource/timer-cs5535.c
index d47acfe848ae..8af666c39890 100644
--- a/drivers/clocksource/timer-cs5535.c
+++ b/drivers/clocksource/timer-cs5535.c
@@ -101,6 +101,7 @@ static struct clock_event_device cs5535_clockevent = {
.tick_resume = mfgpt_shutdown,
.set_next_event = mfgpt_next_event,
.rating = 250,
+ .owner = THIS_MODULE,
};
static irqreturn_t mfgpt_tick(int irq, void *dev_id)
diff --git a/drivers/clocksource/timer-econet-en751221.c b/drivers/clocksource/timer-econet-en751221.c
index 3b449fdaafee..4008076b1a21 100644
--- a/drivers/clocksource/timer-econet-en751221.c
+++ b/drivers/clocksource/timer-econet-en751221.c
@@ -146,7 +146,7 @@ static int __init cevt_init(struct device_node *np)
for_each_possible_cpu(i) {
struct clock_event_device *cd = &per_cpu(econet_timer_pcpu, i);
- cd->rating = 310,
+ cd->rating = 310;
cd->features = CLOCK_EVT_FEAT_ONESHOT |
CLOCK_EVT_FEAT_C3STOP |
CLOCK_EVT_FEAT_PERCPU;
diff --git a/drivers/clocksource/timer-nxp-pit.c b/drivers/clocksource/timer-nxp-pit.c
new file mode 100644
index 000000000000..d1740f18f718
--- /dev/null
+++ b/drivers/clocksource/timer-nxp-pit.c
@@ -0,0 +1,383 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright 2012-2013 Freescale Semiconductor, Inc.
+ * Copyright 2018,2021-2025 NXP
+ */
+#include <linux/interrupt.h>
+#include <linux/clockchips.h>
+#include <linux/cpuhotplug.h>
+#include <linux/clk.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/sched_clock.h>
+#include <linux/platform_device.h>
+
+/*
+ * Each pit takes 0x10 Bytes register space
+ */
+#define PIT0_OFFSET 0x100
+#define PIT_CH(n) (PIT0_OFFSET + 0x10 * (n))
+
+#define PITMCR(__base) (__base)
+
+#define PITMCR_FRZ BIT(0)
+#define PITMCR_MDIS BIT(1)
+
+#define PITLDVAL(__base) (__base)
+#define PITTCTRL(__base) ((__base) + 0x08)
+
+#define PITCVAL_OFFSET 0x04
+#define PITCVAL(__base) ((__base) + 0x04)
+
+#define PITTCTRL_TEN BIT(0)
+#define PITTCTRL_TIE BIT(1)
+
+#define PITTFLG(__base) ((__base) + 0x0c)
+
+#define PITTFLG_TIF BIT(0)
+
+struct pit_timer {
+ void __iomem *clksrc_base;
+ void __iomem *clkevt_base;
+ struct clock_event_device ced;
+ struct clocksource cs;
+ int rate;
+};
+
+struct pit_timer_data {
+ int max_pit_instances;
+};
+
+static DEFINE_PER_CPU(struct pit_timer *, pit_timers);
+
+/*
+ * Global structure for multiple PITs initialization
+ */
+static int pit_instances;
+static int max_pit_instances = 1;
+
+static void __iomem *sched_clock_base;
+
+static inline struct pit_timer *ced_to_pit(struct clock_event_device *ced)
+{
+ return container_of(ced, struct pit_timer, ced);
+}
+
+static inline struct pit_timer *cs_to_pit(struct clocksource *cs)
+{
+ return container_of(cs, struct pit_timer, cs);
+}
+
+static inline void pit_module_enable(void __iomem *base)
+{
+ writel(0, PITMCR(base));
+}
+
+static inline void pit_module_disable(void __iomem *base)
+{
+ writel(PITMCR_MDIS, PITMCR(base));
+}
+
+static inline void pit_timer_enable(void __iomem *base, bool tie)
+{
+ u32 val = PITTCTRL_TEN | (tie ? PITTCTRL_TIE : 0);
+
+ writel(val, PITTCTRL(base));
+}
+
+static inline void pit_timer_disable(void __iomem *base)
+{
+ writel(0, PITTCTRL(base));
+}
+
+static inline void pit_timer_set_counter(void __iomem *base, unsigned int cnt)
+{
+ writel(cnt, PITLDVAL(base));
+}
+
+static inline void pit_timer_irqack(struct pit_timer *pit)
+{
+ writel(PITTFLG_TIF, PITTFLG(pit->clkevt_base));
+}
+
+static u64 notrace pit_read_sched_clock(void)
+{
+ return ~readl(sched_clock_base);
+}
+
+static u64 pit_timer_clocksource_read(struct clocksource *cs)
+{
+ struct pit_timer *pit = cs_to_pit(cs);
+
+ return (u64)~readl(PITCVAL(pit->clksrc_base));
+}
+
+static int pit_clocksource_init(struct pit_timer *pit, const char *name,
+ void __iomem *base, unsigned long rate)
+{
+ /*
+ * The channels 0 and 1 can be chained to build a 64-bit
+ * timer. Let's use the channel 2 as a clocksource and leave
+ * the channels 0 and 1 unused for anyone else who needs them
+ */
+ pit->clksrc_base = base + PIT_CH(2);
+ pit->cs.name = name;
+ pit->cs.rating = 300;
+ pit->cs.read = pit_timer_clocksource_read;
+ pit->cs.mask = CLOCKSOURCE_MASK(32);
+ pit->cs.flags = CLOCK_SOURCE_IS_CONTINUOUS;
+
+ /* set the max load value and start the clock source counter */
+ pit_timer_disable(pit->clksrc_base);
+ pit_timer_set_counter(pit->clksrc_base, ~0);
+ pit_timer_enable(pit->clksrc_base, 0);
+
+ sched_clock_base = pit->clksrc_base + PITCVAL_OFFSET;
+ sched_clock_register(pit_read_sched_clock, 32, rate);
+
+ return clocksource_register_hz(&pit->cs, rate);
+}
+
+static int pit_set_next_event(unsigned long delta, struct clock_event_device *ced)
+{
+ struct pit_timer *pit = ced_to_pit(ced);
+
+ /*
+ * set a new value to PITLDVAL register will not restart the timer,
+ * to abort the current cycle and start a timer period with the new
+ * value, the timer must be disabled and enabled again.
+ * and the PITLAVAL should be set to delta minus one according to pit
+ * hardware requirement.
+ */
+ pit_timer_disable(pit->clkevt_base);
+ pit_timer_set_counter(pit->clkevt_base, delta - 1);
+ pit_timer_enable(pit->clkevt_base, true);
+
+ return 0;
+}
+
+static int pit_shutdown(struct clock_event_device *ced)
+{
+ struct pit_timer *pit = ced_to_pit(ced);
+
+ pit_timer_disable(pit->clkevt_base);
+
+ return 0;
+}
+
+static int pit_set_periodic(struct clock_event_device *ced)
+{
+ struct pit_timer *pit = ced_to_pit(ced);
+
+ pit_set_next_event(pit->rate / HZ, ced);
+
+ return 0;
+}
+
+static irqreturn_t pit_timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *ced = dev_id;
+ struct pit_timer *pit = ced_to_pit(ced);
+
+ pit_timer_irqack(pit);
+
+ /*
+ * pit hardware doesn't support oneshot, it will generate an interrupt
+ * and reload the counter value from PITLDVAL when PITCVAL reach zero,
+ * and start the counter again. So software need to disable the timer
+ * to stop the counter loop in ONESHOT mode.
+ */
+ if (likely(clockevent_state_oneshot(ced)))
+ pit_timer_disable(pit->clkevt_base);
+
+ ced->event_handler(ced);
+
+ return IRQ_HANDLED;
+}
+
+static int pit_clockevent_per_cpu_init(struct pit_timer *pit, const char *name,
+ void __iomem *base, unsigned long rate,
+ int irq, unsigned int cpu)
+{
+ int ret;
+
+ /*
+ * The channels 0 and 1 can be chained to build a 64-bit
+ * timer. Let's use the channel 3 as a clockevent and leave
+ * the channels 0 and 1 unused for anyone else who needs them
+ */
+ pit->clkevt_base = base + PIT_CH(3);
+ pit->rate = rate;
+
+ pit_timer_disable(pit->clkevt_base);
+
+ pit_timer_irqack(pit);
+
+ ret = request_irq(irq, pit_timer_interrupt, IRQF_TIMER | IRQF_NOBALANCING,
+ name, &pit->ced);
+ if (ret)
+ return ret;
+
+ pit->ced.cpumask = cpumask_of(cpu);
+ pit->ced.irq = irq;
+
+ pit->ced.name = name;
+ pit->ced.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
+ pit->ced.set_state_shutdown = pit_shutdown;
+ pit->ced.set_state_periodic = pit_set_periodic;
+ pit->ced.set_next_event = pit_set_next_event;
+ pit->ced.rating = 300;
+
+ per_cpu(pit_timers, cpu) = pit;
+
+ return 0;
+}
+
+static void pit_clockevent_per_cpu_exit(struct pit_timer *pit, unsigned int cpu)
+{
+ pit_timer_disable(pit->clkevt_base);
+ free_irq(pit->ced.irq, &pit->ced);
+ per_cpu(pit_timers, cpu) = NULL;
+}
+
+static int pit_clockevent_starting_cpu(unsigned int cpu)
+{
+ struct pit_timer *pit = per_cpu(pit_timers, cpu);
+ int ret;
+
+ if (!pit)
+ return 0;
+
+ ret = irq_force_affinity(pit->ced.irq, cpumask_of(cpu));
+ if (ret) {
+ pit_clockevent_per_cpu_exit(pit, cpu);
+ return ret;
+ }
+
+ /*
+ * The value for the LDVAL register trigger is calculated as:
+ * LDVAL trigger = (period / clock period) - 1
+ * The pit is a 32-bit down count timer, when the counter value
+ * reaches 0, it will generate an interrupt, thus the minimal
+ * LDVAL trigger value is 1. And then the min_delta is
+ * minimal LDVAL trigger value + 1, and the max_delta is full 32-bit.
+ */
+ clockevents_config_and_register(&pit->ced, pit->rate, 2, 0xffffffff);
+
+ return 0;
+}
+
+static int pit_timer_init(struct device_node *np)
+{
+ struct pit_timer *pit;
+ struct clk *pit_clk;
+ void __iomem *timer_base;
+ const char *name = of_node_full_name(np);
+ unsigned long clk_rate;
+ int irq, ret;
+
+ pit = kzalloc(sizeof(*pit), GFP_KERNEL);
+ if (!pit)
+ return -ENOMEM;
+
+ ret = -ENXIO;
+ timer_base = of_iomap(np, 0);
+ if (!timer_base) {
+ pr_err("Failed to iomap\n");
+ goto out_kfree;
+ }
+
+ ret = -EINVAL;
+ irq = irq_of_parse_and_map(np, 0);
+ if (irq <= 0) {
+ pr_err("Failed to irq_of_parse_and_map\n");
+ goto out_iounmap;
+ }
+
+ pit_clk = of_clk_get(np, 0);
+ if (IS_ERR(pit_clk)) {
+ ret = PTR_ERR(pit_clk);
+ goto out_irq_dispose_mapping;
+ }
+
+ ret = clk_prepare_enable(pit_clk);
+ if (ret)
+ goto out_clk_put;
+
+ clk_rate = clk_get_rate(pit_clk);
+
+ pit_module_disable(timer_base);
+
+ ret = pit_clocksource_init(pit, name, timer_base, clk_rate);
+ if (ret) {
+ pr_err("Failed to initialize clocksource '%pOF'\n", np);
+ goto out_pit_module_disable;
+ }
+
+ ret = pit_clockevent_per_cpu_init(pit, name, timer_base, clk_rate, irq, pit_instances);
+ if (ret) {
+ pr_err("Failed to initialize clockevent '%pOF'\n", np);
+ goto out_pit_clocksource_unregister;
+ }
+
+ /* enable the pit module */
+ pit_module_enable(timer_base);
+
+ pit_instances++;
+
+ if (pit_instances == max_pit_instances) {
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "PIT timer:starting",
+ pit_clockevent_starting_cpu, NULL);
+ if (ret < 0)
+ goto out_pit_clocksource_unregister;
+ }
+
+ return 0;
+
+out_pit_clocksource_unregister:
+ clocksource_unregister(&pit->cs);
+out_pit_module_disable:
+ pit_module_disable(timer_base);
+ clk_disable_unprepare(pit_clk);
+out_clk_put:
+ clk_put(pit_clk);
+out_irq_dispose_mapping:
+ irq_dispose_mapping(irq);
+out_iounmap:
+ iounmap(timer_base);
+out_kfree:
+ kfree(pit);
+
+ return ret;
+}
+
+static int pit_timer_probe(struct platform_device *pdev)
+{
+ const struct pit_timer_data *pit_timer_data;
+
+ pit_timer_data = of_device_get_match_data(&pdev->dev);
+ if (pit_timer_data)
+ max_pit_instances = pit_timer_data->max_pit_instances;
+
+ return pit_timer_init(pdev->dev.of_node);
+}
+
+static struct pit_timer_data s32g2_data = { .max_pit_instances = 2 };
+
+static const struct of_device_id pit_timer_of_match[] = {
+ { .compatible = "nxp,s32g2-pit", .data = &s32g2_data },
+ { }
+};
+MODULE_DEVICE_TABLE(of, pit_timer_of_match);
+
+static struct platform_driver nxp_pit_driver = {
+ .driver = {
+ .name = "nxp-pit",
+ .of_match_table = pit_timer_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = pit_timer_probe,
+};
+builtin_platform_driver(nxp_pit_driver);
+
+TIMER_OF_DECLARE(vf610, "fsl,vf610-pit", pit_timer_init);
diff --git a/drivers/clocksource/timer-nxp-stm.c b/drivers/clocksource/timer-nxp-stm.c
index d7ccf9001729..1ab907233f48 100644
--- a/drivers/clocksource/timer-nxp-stm.c
+++ b/drivers/clocksource/timer-nxp-stm.c
@@ -177,15 +177,15 @@ static void nxp_stm_clocksource_resume(struct clocksource *cs)
nxp_stm_clocksource_enable(cs);
}
-static void __init devm_clocksource_unregister(void *data)
+static void devm_clocksource_unregister(void *data)
{
struct stm_timer *stm_timer = data;
clocksource_unregister(&stm_timer->cs);
}
-static int __init nxp_stm_clocksource_init(struct device *dev, struct stm_timer *stm_timer,
- const char *name, void __iomem *base, struct clk *clk)
+static int nxp_stm_clocksource_init(struct device *dev, struct stm_timer *stm_timer,
+ const char *name, void __iomem *base, struct clk *clk)
{
int ret;
@@ -201,16 +201,15 @@ static int __init nxp_stm_clocksource_init(struct device *dev, struct stm_timer
stm_timer->cs.resume = nxp_stm_clocksource_resume;
stm_timer->cs.mask = CLOCKSOURCE_MASK(32);
stm_timer->cs.flags = CLOCK_SOURCE_IS_CONTINUOUS;
+ stm_timer->cs.owner = THIS_MODULE;
ret = clocksource_register_hz(&stm_timer->cs, stm_timer->rate);
if (ret)
return ret;
ret = devm_add_action_or_reset(dev, devm_clocksource_unregister, stm_timer);
- if (ret) {
- clocksource_unregister(&stm_timer->cs);
+ if (ret)
return ret;
- }
stm_sched_clock = stm_timer;
@@ -297,9 +296,9 @@ static void nxp_stm_clockevent_resume(struct clock_event_device *ced)
nxp_stm_module_get(stm_timer);
}
-static int __init nxp_stm_clockevent_per_cpu_init(struct device *dev, struct stm_timer *stm_timer,
- const char *name, void __iomem *base, int irq,
- struct clk *clk, int cpu)
+static int nxp_stm_clockevent_per_cpu_init(struct device *dev, struct stm_timer *stm_timer,
+ const char *name, void __iomem *base, int irq,
+ struct clk *clk, int cpu)
{
stm_timer->base = base;
stm_timer->rate = clk_get_rate(clk);
@@ -314,6 +313,7 @@ static int __init nxp_stm_clockevent_per_cpu_init(struct device *dev, struct stm
stm_timer->ced.cpumask = cpumask_of(cpu);
stm_timer->ced.rating = 460;
stm_timer->ced.irq = irq;
+ stm_timer->ced.owner = THIS_MODULE;
per_cpu(stm_timers, cpu) = stm_timer;
@@ -386,7 +386,7 @@ static irqreturn_t nxp_stm_module_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __init nxp_stm_timer_probe(struct platform_device *pdev)
+static int nxp_stm_timer_probe(struct platform_device *pdev)
{
struct stm_timer *stm_timer;
struct device *dev = &pdev->dev;
@@ -482,14 +482,15 @@ static const struct of_device_id nxp_stm_of_match[] = {
};
MODULE_DEVICE_TABLE(of, nxp_stm_of_match);
-static struct platform_driver nxp_stm_probe = {
+static struct platform_driver nxp_stm_driver = {
.probe = nxp_stm_timer_probe,
.driver = {
.name = "nxp-stm",
.of_match_table = nxp_stm_of_match,
+ .suppress_bind_attrs = true,
},
};
-module_platform_driver(nxp_stm_probe);
+builtin_platform_driver(nxp_stm_driver);
MODULE_DESCRIPTION("NXP System Timer Module driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clocksource/timer-orion.c b/drivers/clocksource/timer-orion.c
index 49e86cb70a7a..61f1e27fc41e 100644
--- a/drivers/clocksource/timer-orion.c
+++ b/drivers/clocksource/timer-orion.c
@@ -43,7 +43,7 @@ static struct delay_timer orion_delay_timer = {
.read_current_timer = orion_read_timer,
};
-static void orion_delay_timer_init(unsigned long rate)
+static void __init orion_delay_timer_init(unsigned long rate)
{
orion_delay_timer.freq = rate;
register_current_timer_delay(&orion_delay_timer);
diff --git a/drivers/clocksource/timer-ralink.c b/drivers/clocksource/timer-ralink.c
index 6ecdb4228f76..68434d9ed910 100644
--- a/drivers/clocksource/timer-ralink.c
+++ b/drivers/clocksource/timer-ralink.c
@@ -130,14 +130,15 @@ static int __init ralink_systick_init(struct device_node *np)
systick.dev.irq = irq_of_parse_and_map(np, 0);
if (!systick.dev.irq) {
pr_err("%pOFn: request_irq failed", np);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_iounmap;
}
ret = clocksource_mmio_init(systick.membase + SYSTICK_COUNT, np->name,
SYSTICK_FREQ, 301, 16,
clocksource_mmio_readl_up);
if (ret)
- return ret;
+ goto err_free_irq;
clockevents_register_device(&systick.dev);
@@ -145,6 +146,12 @@ static int __init ralink_systick_init(struct device_node *np)
np, systick.dev.mult, systick.dev.shift);
return 0;
+
+err_free_irq:
+ irq_dispose_mapping(systick.dev.irq);
+err_iounmap:
+ iounmap(systick.membase);
+ return ret;
}
TIMER_OF_DECLARE(systick, "ralink,cevt-systick", ralink_systick_init);
diff --git a/drivers/clocksource/timer-rda.c b/drivers/clocksource/timer-rda.c
index fd1199c189bf..0be8e05970e2 100644
--- a/drivers/clocksource/timer-rda.c
+++ b/drivers/clocksource/timer-rda.c
@@ -13,6 +13,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/sched_clock.h>
#include "timer-of.h"
@@ -153,7 +154,7 @@ static struct timer_of rda_ostimer_of = {
},
};
-static u64 rda_hwtimer_read(struct clocksource *cs)
+static u64 rda_hwtimer_clocksource_read(void)
{
void __iomem *base = timer_of_base(&rda_ostimer_of);
u32 lo, hi;
@@ -167,6 +168,11 @@ static u64 rda_hwtimer_read(struct clocksource *cs)
return ((u64)hi << 32) | lo;
}
+static u64 rda_hwtimer_read(struct clocksource *cs)
+{
+ return rda_hwtimer_clocksource_read();
+}
+
static struct clocksource rda_hwtimer_clocksource = {
.name = "rda-timer",
.rating = 400,
@@ -185,6 +191,7 @@ static int __init rda_timer_init(struct device_node *np)
return ret;
clocksource_register_hz(&rda_hwtimer_clocksource, rate);
+ sched_clock_register(rda_hwtimer_clocksource_read, 64, rate);
clockevents_config_and_register(&rda_ostimer_of.clkevt, rate,
0x2, UINT_MAX);
diff --git a/drivers/clocksource/timer-realtek.c b/drivers/clocksource/timer-realtek.c
new file mode 100644
index 000000000000..4f0439de9939
--- /dev/null
+++ b/drivers/clocksource/timer-realtek.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 Realtek Semiconductor Corp.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/irqflags.h>
+#include <linux/interrupt.h>
+#include "timer-of.h"
+
+#define ENBL 1
+#define DSBL 0
+
+#define SYSTIMER_RATE 1000000
+#define SYSTIMER_MIN_DELTA 0x64
+#define SYSTIMER_MAX_DELTA ULONG_MAX
+
+/* SYSTIMER Register Offset (RTK Internal Use) */
+#define TS_LW_OFST 0x0
+#define TS_HW_OFST 0x4
+#define TS_CMP_VAL_LW_OFST 0x8
+#define TS_CMP_VAL_HW_OFST 0xC
+#define TS_CMP_CTRL_OFST 0x10
+#define TS_CMP_STAT_OFST 0x14
+
+/* SYSTIMER CMP CTRL REG Mask */
+#define TS_CMP_EN_MASK 0x1
+#define TS_WR_EN0_MASK 0x2
+
+static void __iomem *systimer_base;
+
+static u64 rtk_ts64_read(void)
+{
+ u32 low, high;
+ u64 ts;
+
+ /* Caution: Read LSB word (TS_LW_OFST) first then MSB (TS_HW_OFST) */
+ low = readl(systimer_base + TS_LW_OFST);
+ high = readl(systimer_base + TS_HW_OFST);
+ ts = ((u64)high << 32) | low;
+
+ return ts;
+}
+
+static void rtk_cmp_value_write(u64 value)
+{
+ u32 high, low;
+
+ low = value & 0xFFFFFFFF;
+ high = value >> 32;
+
+ writel(high, systimer_base + TS_CMP_VAL_HW_OFST);
+ writel(low, systimer_base + TS_CMP_VAL_LW_OFST);
+}
+
+static inline void rtk_cmp_en_write(bool cmp_en)
+{
+ u32 val;
+
+ val = TS_WR_EN0_MASK;
+ if (cmp_en == ENBL)
+ val |= TS_CMP_EN_MASK;
+
+ writel(val, systimer_base + TS_CMP_CTRL_OFST);
+}
+
+static int rtk_syst_clkevt_next_event(unsigned long cycles, struct clock_event_device *clkevt)
+{
+ u64 cmp_val;
+
+ rtk_cmp_en_write(DSBL);
+ cmp_val = rtk_ts64_read();
+
+ /* Set CMP value to current timestamp plus delta_us */
+ rtk_cmp_value_write(cmp_val + cycles);
+ rtk_cmp_en_write(ENBL);
+ return 0;
+}
+
+static irqreturn_t rtk_ts_match_intr_handler(int irq, void *dev_id)
+{
+ struct clock_event_device *clkevt = dev_id;
+ void __iomem *reg_base;
+ u32 val;
+
+ /* Disable TS CMP Match */
+ rtk_cmp_en_write(DSBL);
+
+ /* Clear TS CMP INTR */
+ reg_base = systimer_base + TS_CMP_STAT_OFST;
+ val = readl(reg_base) & TS_CMP_EN_MASK;
+ writel(val | TS_CMP_EN_MASK, reg_base);
+ clkevt->event_handler(clkevt);
+
+ return IRQ_HANDLED;
+}
+
+static int rtk_syst_shutdown(struct clock_event_device *clkevt)
+{
+ void __iomem *reg_base;
+ u64 cmp_val = 0;
+
+ /* Disable TS CMP Match */
+ rtk_cmp_en_write(DSBL);
+ /* Set compare value to 0 */
+ rtk_cmp_value_write(cmp_val);
+
+ /* Clear TS CMP INTR */
+ reg_base = systimer_base + TS_CMP_STAT_OFST;
+ writel(TS_CMP_EN_MASK, reg_base);
+ return 0;
+}
+
+static struct timer_of rtk_timer_to = {
+ .flags = TIMER_OF_IRQ | TIMER_OF_BASE,
+
+ .clkevt = {
+ .name = "rtk-clkevt",
+ .rating = 300,
+ .cpumask = cpu_possible_mask,
+ .features = CLOCK_EVT_FEAT_DYNIRQ |
+ CLOCK_EVT_FEAT_ONESHOT,
+ .set_next_event = rtk_syst_clkevt_next_event,
+ .set_state_oneshot = rtk_syst_shutdown,
+ .set_state_shutdown = rtk_syst_shutdown,
+ },
+
+ .of_irq = {
+ .flags = IRQF_TIMER | IRQF_IRQPOLL,
+ .handler = rtk_ts_match_intr_handler,
+ },
+};
+
+static int __init rtk_systimer_init(struct device_node *node)
+{
+ int ret;
+
+ ret = timer_of_init(node, &rtk_timer_to);
+ if (ret)
+ return ret;
+
+ systimer_base = timer_of_base(&rtk_timer_to);
+ clockevents_config_and_register(&rtk_timer_to.clkevt, SYSTIMER_RATE,
+ SYSTIMER_MIN_DELTA, SYSTIMER_MAX_DELTA);
+
+ return 0;
+}
+
+TIMER_OF_DECLARE(rtk_systimer, "realtek,rtd1625-systimer", rtk_systimer_init);
diff --git a/drivers/clocksource/timer-rtl-otto.c b/drivers/clocksource/timer-rtl-otto.c
index 8a3068b36e75..6113d2fdd4de 100644
--- a/drivers/clocksource/timer-rtl-otto.c
+++ b/drivers/clocksource/timer-rtl-otto.c
@@ -38,14 +38,13 @@
#define RTTM_BIT_COUNT 28
#define RTTM_MIN_DELTA 8
#define RTTM_MAX_DELTA CLOCKSOURCE_MASK(28)
+#define RTTM_MAX_DIVISOR GENMASK(15, 0)
/*
- * Timers are derived from the LXB clock frequency. Usually this is a fixed
- * multiple of the 25 MHz oscillator. The 930X SOC is an exception from that.
- * Its LXB clock has only dividers and uses the switch PLL of 2.45 GHz as its
- * base. The only meaningful frequencies we can achieve from that are 175.000
- * MHz and 153.125 MHz. The greatest common divisor of all explained possible
- * speeds is 3125000. Pin the timers to this 3.125 MHz reference frequency.
+ * Timers are derived from the lexra bus (LXB) clock frequency. This is 175 MHz
+ * on RTL930x and 200 MHz on the other platforms. With 3.125 MHz choose a common
+ * divisor to have enough range and detail. This provides comparability between
+ * the different platforms.
*/
#define RTTM_TICKS_PER_SEC 3125000
@@ -55,11 +54,6 @@ struct rttm_cs {
};
/* Simple internal register functions */
-static inline void rttm_set_counter(void __iomem *base, unsigned int counter)
-{
- iowrite32(counter, base + RTTM_CNT);
-}
-
static inline unsigned int rttm_get_counter(void __iomem *base)
{
return ioread32(base + RTTM_CNT);
@@ -112,6 +106,22 @@ static irqreturn_t rttm_timer_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static void rttm_bounce_timer(void __iomem *base, u32 mode)
+{
+ /*
+ * When a running timer has less than ~5us left, a stop/start sequence
+ * might fail. While the details are unknown the most evident effect is
+ * that the subsequent interrupt will not be fired.
+ *
+ * As a workaround issue an intermediate restart with a very slow
+ * frequency of ~3kHz keeping the target counter (>=8). So the follow
+ * up restart will always be issued outside the critical window.
+ */
+
+ rttm_disable_timer(base);
+ rttm_enable_timer(base, mode, RTTM_MAX_DIVISOR);
+}
+
static void rttm_stop_timer(void __iomem *base)
{
rttm_disable_timer(base);
@@ -120,7 +130,6 @@ static void rttm_stop_timer(void __iomem *base)
static void rttm_start_timer(struct timer_of *to, u32 mode)
{
- rttm_set_counter(to->of_base.base, 0);
rttm_enable_timer(to->of_base.base, mode, to->of_clk.rate / RTTM_TICKS_PER_SEC);
}
@@ -129,7 +138,8 @@ static int rttm_next_event(unsigned long delta, struct clock_event_device *clkev
struct timer_of *to = to_timer_of(clkevt);
RTTM_DEBUG(to->of_base.base);
- rttm_stop_timer(to->of_base.base);
+ rttm_bounce_timer(to->of_base.base, RTTM_CTRL_COUNTER);
+ rttm_disable_timer(to->of_base.base);
rttm_set_period(to->of_base.base, delta);
rttm_start_timer(to, RTTM_CTRL_COUNTER);
@@ -141,7 +151,8 @@ static int rttm_state_oneshot(struct clock_event_device *clkevt)
struct timer_of *to = to_timer_of(clkevt);
RTTM_DEBUG(to->of_base.base);
- rttm_stop_timer(to->of_base.base);
+ rttm_bounce_timer(to->of_base.base, RTTM_CTRL_COUNTER);
+ rttm_disable_timer(to->of_base.base);
rttm_set_period(to->of_base.base, RTTM_TICKS_PER_SEC / HZ);
rttm_start_timer(to, RTTM_CTRL_COUNTER);
@@ -153,7 +164,8 @@ static int rttm_state_periodic(struct clock_event_device *clkevt)
struct timer_of *to = to_timer_of(clkevt);
RTTM_DEBUG(to->of_base.base);
- rttm_stop_timer(to->of_base.base);
+ rttm_bounce_timer(to->of_base.base, RTTM_CTRL_TIMER);
+ rttm_disable_timer(to->of_base.base);
rttm_set_period(to->of_base.base, RTTM_TICKS_PER_SEC / HZ);
rttm_start_timer(to, RTTM_CTRL_TIMER);
diff --git a/drivers/clocksource/timer-sp804.c b/drivers/clocksource/timer-sp804.c
index cd1916c05325..e82a95ea4724 100644
--- a/drivers/clocksource/timer-sp804.c
+++ b/drivers/clocksource/timer-sp804.c
@@ -21,6 +21,10 @@
#include <linux/of_irq.h>
#include <linux/sched_clock.h>
+#ifdef CONFIG_ARM
+#include <linux/delay.h>
+#endif
+
#include "timer-sp.h"
/* Hisilicon 64-bit timer(a variant of ARM SP804) */
@@ -102,6 +106,23 @@ static u64 notrace sp804_read(void)
return ~readl_relaxed(sched_clkevt->value);
}
+#ifdef CONFIG_ARM
+static struct delay_timer delay;
+static unsigned long sp804_read_delay_timer_read(void)
+{
+ return sp804_read();
+}
+
+static void sp804_register_delay_timer(int freq)
+{
+ delay.freq = freq;
+ delay.read_current_timer = sp804_read_delay_timer_read;
+ register_current_timer_delay(&delay);
+}
+#else
+static inline void sp804_register_delay_timer(int freq) {}
+#endif
+
static int __init sp804_clocksource_and_sched_clock_init(void __iomem *base,
const char *name,
struct clk *clk,
@@ -114,6 +135,8 @@ static int __init sp804_clocksource_and_sched_clock_init(void __iomem *base,
if (rate < 0)
return -EINVAL;
+ sp804_register_delay_timer(rate);
+
clkevt = sp804_clkevt_get(base);
writel(0, clkevt->ctrl);
@@ -318,6 +341,7 @@ static int __init sp804_of_init(struct device_node *np, struct sp804_timer *time
if (ret)
goto err;
}
+
initialized = true;
return 0;
diff --git a/drivers/clocksource/timer-sprd.c b/drivers/clocksource/timer-sprd.c
index 430cb99d8d79..2c07dd2af760 100644
--- a/drivers/clocksource/timer-sprd.c
+++ b/drivers/clocksource/timer-sprd.c
@@ -30,6 +30,7 @@
#define TIMER_VALUE_SHDW_HI 0x1c
#define TIMER_VALUE_LO_MASK GENMASK(31, 0)
+#define TIMER_VALUE_HI_MASK GENMASK(31, 0)
static void sprd_timer_enable(void __iomem *base, u32 flag)
{
@@ -162,15 +163,26 @@ static struct timer_of suspend_to = {
static u64 sprd_suspend_timer_read(struct clocksource *cs)
{
- return ~(u64)readl_relaxed(timer_of_base(&suspend_to) +
- TIMER_VALUE_SHDW_LO) & cs->mask;
+ u32 lo, hi;
+
+ do {
+ hi = readl_relaxed(timer_of_base(&suspend_to) +
+ TIMER_VALUE_SHDW_HI);
+ lo = readl_relaxed(timer_of_base(&suspend_to) +
+ TIMER_VALUE_SHDW_LO);
+ } while (hi != readl_relaxed(timer_of_base(&suspend_to) + TIMER_VALUE_SHDW_HI));
+
+ return ~(((u64)hi << 32) | lo);
}
static int sprd_suspend_timer_enable(struct clocksource *cs)
{
- sprd_timer_update_counter(timer_of_base(&suspend_to),
- TIMER_VALUE_LO_MASK);
- sprd_timer_enable(timer_of_base(&suspend_to), TIMER_CTL_PERIOD_MODE);
+ writel_relaxed(TIMER_VALUE_LO_MASK,
+ timer_of_base(&suspend_to) + TIMER_LOAD_LO);
+ writel_relaxed(TIMER_VALUE_HI_MASK,
+ timer_of_base(&suspend_to) + TIMER_LOAD_HI);
+ sprd_timer_enable(timer_of_base(&suspend_to),
+ TIMER_CTL_PERIOD_MODE|TIMER_CTL_64BIT_WIDTH);
return 0;
}
@@ -186,7 +198,7 @@ static struct clocksource suspend_clocksource = {
.read = sprd_suspend_timer_read,
.enable = sprd_suspend_timer_enable,
.disable = sprd_suspend_timer_disable,
- .mask = CLOCKSOURCE_MASK(32),
+ .mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS | CLOCK_SOURCE_SUSPEND_NONSTOP,
};
diff --git a/drivers/clocksource/timer-stm32-lp.c b/drivers/clocksource/timer-stm32-lp.c
index 6e7944ffd7c0..3d804128c765 100644
--- a/drivers/clocksource/timer-stm32-lp.c
+++ b/drivers/clocksource/timer-stm32-lp.c
@@ -211,6 +211,7 @@ static void stm32_clkevent_lp_init(struct stm32_lp_private *priv,
priv->clkevt.rating = STM32_LP_RATING;
priv->clkevt.suspend = stm32_clkevent_lp_suspend;
priv->clkevt.resume = stm32_clkevent_lp_resume;
+ priv->clkevt.owner = THIS_MODULE;
clockevents_config_and_register(&priv->clkevt, rate, 0x1,
STM32_LPTIM_MAX_ARR);
@@ -288,5 +289,4 @@ static struct platform_driver stm32_clkevent_lp_driver = {
};
module_platform_driver(stm32_clkevent_lp_driver);
-MODULE_ALIAS("platform:stm32-lptimer-timer");
MODULE_DESCRIPTION("STMicroelectronics STM32 clockevent low power driver");
diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c
index 6b48a9006444..f827d3f98f60 100644
--- a/drivers/clocksource/timer-sun5i.c
+++ b/drivers/clocksource/timer-sun5i.c
@@ -185,6 +185,7 @@ static int sun5i_setup_clocksource(struct platform_device *pdev,
cs->clksrc.read = sun5i_clksrc_read;
cs->clksrc.mask = CLOCKSOURCE_MASK(32);
cs->clksrc.flags = CLOCK_SOURCE_IS_CONTINUOUS;
+ cs->clksrc.owner = THIS_MODULE;
ret = clocksource_register_hz(&cs->clksrc, rate);
if (ret) {
@@ -214,6 +215,7 @@ static int sun5i_setup_clockevent(struct platform_device *pdev,
ce->clkevt.rating = 340;
ce->clkevt.irq = irq;
ce->clkevt.cpumask = cpu_possible_mask;
+ ce->clkevt.owner = THIS_MODULE;
/* Enable timer0 interrupt */
val = readl(base + TIMER_IRQ_EN_REG);
diff --git a/drivers/clocksource/timer-tegra186.c b/drivers/clocksource/timer-tegra186.c
index e5394f98a02e..355558893e5f 100644
--- a/drivers/clocksource/timer-tegra186.c
+++ b/drivers/clocksource/timer-tegra186.c
@@ -159,7 +159,7 @@ static void tegra186_wdt_enable(struct tegra186_wdt *wdt)
tmr_writel(wdt->tmr, TMRCSSR_SRC_USEC, TMRCSSR);
/* configure timer (system reset happens on the fifth expiration) */
- value = TMRCR_PTV(wdt->base.timeout * USEC_PER_SEC / 5) |
+ value = TMRCR_PTV(wdt->base.timeout * (USEC_PER_SEC / 5)) |
TMRCR_PERIODIC | TMRCR_ENABLE;
tmr_writel(wdt->tmr, value, TMRCR);
@@ -231,7 +231,7 @@ static unsigned int tegra186_wdt_get_timeleft(struct watchdog_device *wdd)
{
struct tegra186_wdt *wdt = to_tegra186_wdt(wdd);
u32 expiration, val;
- u64 timeleft;
+ u32 timeleft;
if (!watchdog_active(&wdt->base)) {
/* return zero if the watchdog timer is not activated. */
@@ -266,21 +266,26 @@ static unsigned int tegra186_wdt_get_timeleft(struct watchdog_device *wdd)
* Calculate the time remaining by adding the time for the
* counter value to the time of the counter expirations that
* remain.
+ * Note: Since wdt->base.timeout is bound to 255, the maximum
+ * value added to timeleft is
+ * 255 * (1,000,000 / 5) * 4
+ * = 255 * 200,000 * 4
+ * = 204,000,000
+ * TMRSR_PCV is a 29-bit field.
+ * Its maximum value is 0x1fffffff = 536,870,911.
+ * 204,000,000 + 536,870,911 = 740,870,911 = 0x2C28CAFF.
+ * timeleft can therefore not overflow, and 64-bit calculations
+ * are not necessary.
*/
- timeleft += (((u64)wdt->base.timeout * USEC_PER_SEC) / 5) * (4 - expiration);
+ timeleft += (wdt->base.timeout * (USEC_PER_SEC / 5)) * (4 - expiration);
/*
* Convert the current counter value to seconds,
- * rounding up to the nearest second. Cast u64 to
- * u32 under the assumption that no overflow happens
- * when coverting to seconds.
+ * rounding to the nearest second.
*/
- timeleft = DIV_ROUND_CLOSEST_ULL(timeleft, USEC_PER_SEC);
+ timeleft = DIV_ROUND_CLOSEST(timeleft, USEC_PER_SEC);
- if (WARN_ON_ONCE(timeleft > U32_MAX))
- return U32_MAX;
-
- return lower_32_bits(timeleft);
+ return timeleft;
}
static const struct watchdog_ops tegra186_wdt_ops = {
@@ -328,16 +333,12 @@ static struct tegra186_wdt *tegra186_wdt_create(struct tegra186_timer *tegra,
wdt->base.parent = tegra->dev;
err = watchdog_init_timeout(&wdt->base, 5, tegra->dev);
- if (err < 0) {
- dev_err(tegra->dev, "failed to initialize timeout: %d\n", err);
+ if (err < 0)
return ERR_PTR(err);
- }
err = devm_watchdog_register_device(tegra->dev, &wdt->base);
- if (err < 0) {
- dev_err(tegra->dev, "failed to register WDT: %d\n", err);
+ if (err < 0)
return ERR_PTR(err);
- }
return wdt;
}
@@ -373,6 +374,7 @@ static int tegra186_timer_tsc_init(struct tegra186_timer *tegra)
tegra->tsc.read = tegra186_timer_tsc_read;
tegra->tsc.mask = CLOCKSOURCE_MASK(56);
tegra->tsc.flags = CLOCK_SOURCE_IS_CONTINUOUS;
+ tegra->tsc.owner = THIS_MODULE;
return clocksource_register_hz(&tegra->tsc, 31250000);
}
@@ -392,6 +394,7 @@ static int tegra186_timer_osc_init(struct tegra186_timer *tegra)
tegra->osc.read = tegra186_timer_osc_read;
tegra->osc.mask = CLOCKSOURCE_MASK(32);
tegra->osc.flags = CLOCK_SOURCE_IS_CONTINUOUS;
+ tegra->osc.owner = THIS_MODULE;
return clocksource_register_hz(&tegra->osc, 38400000);
}
@@ -411,6 +414,7 @@ static int tegra186_timer_usec_init(struct tegra186_timer *tegra)
tegra->usec.read = tegra186_timer_usec_read;
tegra->usec.mask = CLOCKSOURCE_MASK(32);
tegra->usec.flags = CLOCK_SOURCE_IS_CONTINUOUS;
+ tegra->usec.owner = THIS_MODULE;
return clocksource_register_hz(&tegra->usec, USEC_PER_SEC);
}
diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c
index e9e32df6b566..793e7cdcb1b1 100644
--- a/drivers/clocksource/timer-ti-dm.c
+++ b/drivers/clocksource/timer-ti-dm.c
@@ -31,6 +31,7 @@
#include <linux/platform_data/dmtimer-omap.h>
#include <clocksource/timer-ti-dm.h>
+#include <linux/delay.h>
/*
* timer errata flags
@@ -836,6 +837,48 @@ static int omap_dm_timer_set_match(struct omap_dm_timer *cookie, int enable,
return 0;
}
+static int omap_dm_timer_set_cap(struct omap_dm_timer *cookie,
+ int autoreload, bool config_period)
+{
+ struct dmtimer *timer;
+ struct device *dev;
+ int rc;
+ u32 l;
+
+ timer = to_dmtimer(cookie);
+ if (unlikely(!timer))
+ return -EINVAL;
+
+ dev = &timer->pdev->dev;
+ rc = pm_runtime_resume_and_get(dev);
+ if (rc)
+ return rc;
+ /*
+ * 1. Select autoreload mode. TIMER_TCLR[1] AR bit.
+ * 2. TIMER_TCLR[14]: Sets the functionality of the TIMER IO pin.
+ * 3. TIMER_TCLR[13] : Capture mode select bit.
+ * 3. TIMER_TCLR[9-8] : Select transition capture mode.
+ */
+
+ l = dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
+
+ if (autoreload)
+ l |= OMAP_TIMER_CTRL_AR;
+
+ l |= OMAP_TIMER_CTRL_CAPTMODE | OMAP_TIMER_CTRL_GPOCFG;
+
+ if (config_period == true)
+ l |= OMAP_TIMER_CTRL_TCM_LOWTOHIGH; /* Time Period config */
+ else
+ l |= OMAP_TIMER_CTRL_TCM_BOTHEDGES; /* Duty Cycle config */
+
+ dmtimer_write(timer, OMAP_TIMER_CTRL_REG, l);
+
+ pm_runtime_put_sync(dev);
+
+ return 0;
+}
+
static int omap_dm_timer_set_pwm(struct omap_dm_timer *cookie, int def_on,
int toggle, int trigger, int autoreload)
{
@@ -1023,23 +1066,92 @@ static unsigned int omap_dm_timer_read_counter(struct omap_dm_timer *cookie)
return __omap_dm_timer_read_counter(timer);
}
+static inline unsigned int __omap_dm_timer_cap(struct dmtimer *timer, int idx)
+{
+ return idx == 0 ? dmtimer_read(timer, OMAP_TIMER_CAPTURE_REG) :
+ dmtimer_read(timer, OMAP_TIMER_CAPTURE2_REG);
+}
+
static int omap_dm_timer_write_counter(struct omap_dm_timer *cookie, unsigned int value)
{
struct dmtimer *timer;
+ struct device *dev;
timer = to_dmtimer(cookie);
- if (unlikely(!timer || !atomic_read(&timer->enabled))) {
- pr_err("%s: timer not available or enabled.\n", __func__);
+ if (unlikely(!timer)) {
+ pr_err("%s: timer not available.\n", __func__);
return -EINVAL;
}
+ dev = &timer->pdev->dev;
+
+ pm_runtime_resume_and_get(dev);
dmtimer_write(timer, OMAP_TIMER_COUNTER_REG, value);
+ pm_runtime_put_sync(dev);
/* Save the context */
timer->context.tcrr = value;
return 0;
}
+/**
+ * omap_dm_timer_cap_counter() - Calculate the high count or period count depending on the
+ * configuration.
+ * @cookie:Pointer to OMAP DM timer
+ * @is_period:Whether to configure timer in period or duty cycle mode
+ *
+ * Return high count or period count if timer is enabled else appropriate error.
+ */
+static unsigned int omap_dm_timer_cap_counter(struct omap_dm_timer *cookie, bool is_period)
+{
+ struct dmtimer *timer;
+ unsigned int cap1 = 0;
+ unsigned int cap2 = 0;
+ u32 l, ret;
+
+ timer = to_dmtimer(cookie);
+ if (unlikely(!timer || !atomic_read(&timer->enabled))) {
+ pr_err("%s:timer is not available or enabled.%p\n", __func__, (void *)timer);
+ return -EINVAL;
+ }
+
+ /* Stop the timer */
+ omap_dm_timer_stop(cookie);
+
+ /* Clear the timer counter value to 0 */
+ ret = omap_dm_timer_write_counter(cookie, 0);
+ if (ret)
+ return ret;
+
+ /* Sets the timer capture configuration for period/duty cycle calculation */
+ ret = omap_dm_timer_set_cap(cookie, true, is_period);
+ if (ret) {
+ pr_err("%s: Failed to set timer capture configuration.\n", __func__);
+ return ret;
+ }
+ /* Start the timer */
+ omap_dm_timer_start(cookie);
+
+ /*
+ * 1 sec delay is given so as to provide
+ * enough time to capture low frequency signals.
+ */
+ msleep(1000);
+
+ cap1 = __omap_dm_timer_cap(timer, 0);
+ cap2 = __omap_dm_timer_cap(timer, 1);
+
+ /*
+ * Clears the TCLR configuration.
+ * The start bit must be set to 1 as the timer is already in start mode.
+ */
+ l = dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
+ l &= ~(0xffff) | 0x1;
+ dmtimer_write(timer, OMAP_TIMER_CTRL_REG, l);
+
+ return (cap2-cap1);
+}
+
static int __maybe_unused omap_dm_timer_runtime_suspend(struct device *dev)
{
struct dmtimer *timer = dev_get_drvdata(dev);
@@ -1246,6 +1358,9 @@ static const struct omap_dm_timer_ops dmtimer_ops = {
.write_counter = omap_dm_timer_write_counter,
.read_status = omap_dm_timer_read_status,
.write_status = omap_dm_timer_write_status,
+ .set_cap = omap_dm_timer_set_cap,
+ .get_cap_status = omap_dm_timer_get_pwm_status,
+ .read_cap = omap_dm_timer_cap_counter,
};
static const struct dmtimer_platform_data omap3plus_pdata = {
diff --git a/drivers/clocksource/timer-vf-pit.c b/drivers/clocksource/timer-vf-pit.c
deleted file mode 100644
index 911c92146eca..000000000000
--- a/drivers/clocksource/timer-vf-pit.c
+++ /dev/null
@@ -1,194 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright 2012-2013 Freescale Semiconductor, Inc.
- */
-
-#include <linux/interrupt.h>
-#include <linux/clockchips.h>
-#include <linux/clk.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/sched_clock.h>
-
-/*
- * Each pit takes 0x10 Bytes register space
- */
-#define PITMCR 0x00
-#define PIT0_OFFSET 0x100
-#define PITn_OFFSET(n) (PIT0_OFFSET + 0x10 * (n))
-#define PITLDVAL 0x00
-#define PITCVAL 0x04
-#define PITTCTRL 0x08
-#define PITTFLG 0x0c
-
-#define PITMCR_MDIS (0x1 << 1)
-
-#define PITTCTRL_TEN (0x1 << 0)
-#define PITTCTRL_TIE (0x1 << 1)
-#define PITCTRL_CHN (0x1 << 2)
-
-#define PITTFLG_TIF 0x1
-
-static void __iomem *clksrc_base;
-static void __iomem *clkevt_base;
-static unsigned long cycle_per_jiffy;
-
-static inline void pit_timer_enable(void)
-{
- __raw_writel(PITTCTRL_TEN | PITTCTRL_TIE, clkevt_base + PITTCTRL);
-}
-
-static inline void pit_timer_disable(void)
-{
- __raw_writel(0, clkevt_base + PITTCTRL);
-}
-
-static inline void pit_irq_acknowledge(void)
-{
- __raw_writel(PITTFLG_TIF, clkevt_base + PITTFLG);
-}
-
-static u64 notrace pit_read_sched_clock(void)
-{
- return ~__raw_readl(clksrc_base + PITCVAL);
-}
-
-static int __init pit_clocksource_init(unsigned long rate)
-{
- /* set the max load value and start the clock source counter */
- __raw_writel(0, clksrc_base + PITTCTRL);
- __raw_writel(~0UL, clksrc_base + PITLDVAL);
- __raw_writel(PITTCTRL_TEN, clksrc_base + PITTCTRL);
-
- sched_clock_register(pit_read_sched_clock, 32, rate);
- return clocksource_mmio_init(clksrc_base + PITCVAL, "vf-pit", rate,
- 300, 32, clocksource_mmio_readl_down);
-}
-
-static int pit_set_next_event(unsigned long delta,
- struct clock_event_device *unused)
-{
- /*
- * set a new value to PITLDVAL register will not restart the timer,
- * to abort the current cycle and start a timer period with the new
- * value, the timer must be disabled and enabled again.
- * and the PITLAVAL should be set to delta minus one according to pit
- * hardware requirement.
- */
- pit_timer_disable();
- __raw_writel(delta - 1, clkevt_base + PITLDVAL);
- pit_timer_enable();
-
- return 0;
-}
-
-static int pit_shutdown(struct clock_event_device *evt)
-{
- pit_timer_disable();
- return 0;
-}
-
-static int pit_set_periodic(struct clock_event_device *evt)
-{
- pit_set_next_event(cycle_per_jiffy, evt);
- return 0;
-}
-
-static irqreturn_t pit_timer_interrupt(int irq, void *dev_id)
-{
- struct clock_event_device *evt = dev_id;
-
- pit_irq_acknowledge();
-
- /*
- * pit hardware doesn't support oneshot, it will generate an interrupt
- * and reload the counter value from PITLDVAL when PITCVAL reach zero,
- * and start the counter again. So software need to disable the timer
- * to stop the counter loop in ONESHOT mode.
- */
- if (likely(clockevent_state_oneshot(evt)))
- pit_timer_disable();
-
- evt->event_handler(evt);
-
- return IRQ_HANDLED;
-}
-
-static struct clock_event_device clockevent_pit = {
- .name = "VF pit timer",
- .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
- .set_state_shutdown = pit_shutdown,
- .set_state_periodic = pit_set_periodic,
- .set_next_event = pit_set_next_event,
- .rating = 300,
-};
-
-static int __init pit_clockevent_init(unsigned long rate, int irq)
-{
- __raw_writel(0, clkevt_base + PITTCTRL);
- __raw_writel(PITTFLG_TIF, clkevt_base + PITTFLG);
-
- BUG_ON(request_irq(irq, pit_timer_interrupt, IRQF_TIMER | IRQF_IRQPOLL,
- "VF pit timer", &clockevent_pit));
-
- clockevent_pit.cpumask = cpumask_of(0);
- clockevent_pit.irq = irq;
- /*
- * The value for the LDVAL register trigger is calculated as:
- * LDVAL trigger = (period / clock period) - 1
- * The pit is a 32-bit down count timer, when the counter value
- * reaches 0, it will generate an interrupt, thus the minimal
- * LDVAL trigger value is 1. And then the min_delta is
- * minimal LDVAL trigger value + 1, and the max_delta is full 32-bit.
- */
- clockevents_config_and_register(&clockevent_pit, rate, 2, 0xffffffff);
-
- return 0;
-}
-
-static int __init pit_timer_init(struct device_node *np)
-{
- struct clk *pit_clk;
- void __iomem *timer_base;
- unsigned long clk_rate;
- int irq, ret;
-
- timer_base = of_iomap(np, 0);
- if (!timer_base) {
- pr_err("Failed to iomap\n");
- return -ENXIO;
- }
-
- /*
- * PIT0 and PIT1 can be chained to build a 64-bit timer,
- * so choose PIT2 as clocksource, PIT3 as clockevent device,
- * and leave PIT0 and PIT1 unused for anyone else who needs them.
- */
- clksrc_base = timer_base + PITn_OFFSET(2);
- clkevt_base = timer_base + PITn_OFFSET(3);
-
- irq = irq_of_parse_and_map(np, 0);
- if (irq <= 0)
- return -EINVAL;
-
- pit_clk = of_clk_get(np, 0);
- if (IS_ERR(pit_clk))
- return PTR_ERR(pit_clk);
-
- ret = clk_prepare_enable(pit_clk);
- if (ret)
- return ret;
-
- clk_rate = clk_get_rate(pit_clk);
- cycle_per_jiffy = clk_rate / (HZ);
-
- /* enable the pit module */
- __raw_writel(~PITMCR_MDIS, timer_base + PITMCR);
-
- ret = pit_clocksource_init(clk_rate);
- if (ret)
- return ret;
-
- return pit_clockevent_init(clk_rate, irq);
-}
-TIMER_OF_DECLARE(vf610, "fsl,vf610-pit", pit_timer_init);
diff --git a/drivers/comedi/Kconfig b/drivers/comedi/Kconfig
index 93c68a40a17b..6dcc2567de6d 100644
--- a/drivers/comedi/Kconfig
+++ b/drivers/comedi/Kconfig
@@ -705,6 +705,15 @@ config COMEDI_ADL_PCI6208
To compile this driver as a module, choose M here: the module will be
called adl_pci6208.
+config COMEDI_ADL_PCI7250
+ tristate "ADLink PCI-7250 support"
+ help
+ Enable support for ADLink PCI-7250/LPCI-7250/LPCIe-7250 relay output
+ and isolated digital input boards.
+
+ To compile this driver as a module, choose M here: the module will be
+ called adl_pci7250.
+
config COMEDI_ADL_PCI7X3X
tristate "ADLink PCI-723X/743X isolated digital i/o board support"
depends on HAS_IOPORT
diff --git a/drivers/comedi/comedi_buf.c b/drivers/comedi/comedi_buf.c
index 002c0e76baff..785977b40a93 100644
--- a/drivers/comedi/comedi_buf.c
+++ b/drivers/comedi/comedi_buf.c
@@ -273,19 +273,8 @@ unsigned int comedi_buf_write_n_available(struct comedi_subdevice *s)
return free_end - async->buf_write_count;
}
-/**
- * comedi_buf_write_alloc() - Reserve buffer space for writing
- * @s: COMEDI subdevice.
- * @nbytes: Maximum space to reserve in bytes.
- *
- * Reserve up to @nbytes bytes of space to be written in the COMEDI acquisition
- * data buffer associated with the subdevice. The amount reserved is limited
- * by the space available.
- *
- * Return: The amount of space reserved in bytes.
- */
-unsigned int comedi_buf_write_alloc(struct comedi_subdevice *s,
- unsigned int nbytes)
+unsigned int _comedi_buf_write_alloc(struct comedi_subdevice *s,
+ unsigned int nbytes)
{
struct comedi_async *async = s->async;
unsigned int unalloc = comedi_buf_write_n_unalloc(s);
@@ -303,6 +292,29 @@ unsigned int comedi_buf_write_alloc(struct comedi_subdevice *s,
return nbytes;
}
+
+/**
+ * comedi_buf_write_alloc() - Reserve buffer space for writing
+ * @s: COMEDI subdevice.
+ * @nbytes: Maximum space to reserve in bytes.
+ *
+ * Reserve up to @nbytes bytes of space to be written in the COMEDI acquisition
+ * data buffer associated with the subdevice. The amount reserved is limited
+ * by the space available.
+ *
+ * Return: The amount of space reserved in bytes.
+ */
+unsigned int comedi_buf_write_alloc(struct comedi_subdevice *s,
+ unsigned int nbytes)
+{
+ if (comedi_get_is_subdevice_running(s)) {
+ nbytes = _comedi_buf_write_alloc(s, nbytes);
+ comedi_put_is_subdevice_running(s);
+ } else {
+ nbytes = 0;
+ }
+ return nbytes;
+}
EXPORT_SYMBOL_GPL(comedi_buf_write_alloc);
/*
@@ -317,7 +329,7 @@ static unsigned int comedi_buf_munge(struct comedi_subdevice *s,
unsigned int count = 0;
const unsigned int num_sample_bytes = comedi_bytes_per_sample(s);
- if (!s->munge || (async->cmd.flags & CMDF_RAWDATA)) {
+ if (!s->munge || (async->cmd.flags & CMDF_RAWDATA) || async->cmd.chanlist_len == 0) {
async->munge_count += num_bytes;
return num_bytes;
}
@@ -362,6 +374,24 @@ unsigned int comedi_buf_write_n_allocated(struct comedi_subdevice *s)
return async->buf_write_alloc_count - async->buf_write_count;
}
+unsigned int _comedi_buf_write_free(struct comedi_subdevice *s,
+ unsigned int nbytes)
+{
+ struct comedi_async *async = s->async;
+ unsigned int allocated = comedi_buf_write_n_allocated(s);
+
+ if (nbytes > allocated)
+ nbytes = allocated;
+
+ async->buf_write_count += nbytes;
+ async->buf_write_ptr += nbytes;
+ comedi_buf_munge(s, async->buf_write_count - async->munge_count);
+ if (async->buf_write_ptr >= async->prealloc_bufsz)
+ async->buf_write_ptr %= async->prealloc_bufsz;
+
+ return nbytes;
+}
+
/**
* comedi_buf_write_free() - Free buffer space after it is written
* @s: COMEDI subdevice.
@@ -380,21 +410,34 @@ unsigned int comedi_buf_write_n_allocated(struct comedi_subdevice *s)
unsigned int comedi_buf_write_free(struct comedi_subdevice *s,
unsigned int nbytes)
{
+ if (comedi_get_is_subdevice_running(s)) {
+ nbytes = _comedi_buf_write_free(s, nbytes);
+ comedi_put_is_subdevice_running(s);
+ } else {
+ nbytes = 0;
+ }
+ return nbytes;
+}
+EXPORT_SYMBOL_GPL(comedi_buf_write_free);
+
+unsigned int _comedi_buf_read_n_available(struct comedi_subdevice *s)
+{
struct comedi_async *async = s->async;
- unsigned int allocated = comedi_buf_write_n_allocated(s);
+ unsigned int num_bytes;
- if (nbytes > allocated)
- nbytes = allocated;
+ if (!async)
+ return 0;
- async->buf_write_count += nbytes;
- async->buf_write_ptr += nbytes;
- comedi_buf_munge(s, async->buf_write_count - async->munge_count);
- if (async->buf_write_ptr >= async->prealloc_bufsz)
- async->buf_write_ptr %= async->prealloc_bufsz;
+ num_bytes = async->munge_count - async->buf_read_count;
- return nbytes;
+ /*
+ * ensure the async buffer 'counts' are read before we
+ * attempt to read data from the buffer
+ */
+ smp_rmb();
+
+ return num_bytes;
}
-EXPORT_SYMBOL_GPL(comedi_buf_write_free);
/**
* comedi_buf_read_n_available() - Determine amount of readable buffer space
@@ -409,23 +452,38 @@ EXPORT_SYMBOL_GPL(comedi_buf_write_free);
*/
unsigned int comedi_buf_read_n_available(struct comedi_subdevice *s)
{
- struct comedi_async *async = s->async;
unsigned int num_bytes;
- if (!async)
- return 0;
+ if (comedi_get_is_subdevice_running(s)) {
+ num_bytes = _comedi_buf_read_n_available(s);
+ comedi_put_is_subdevice_running(s);
+ } else {
+ num_bytes = 0;
+ }
+ return num_bytes;
+}
+EXPORT_SYMBOL_GPL(comedi_buf_read_n_available);
- num_bytes = async->munge_count - async->buf_read_count;
+unsigned int _comedi_buf_read_alloc(struct comedi_subdevice *s,
+ unsigned int nbytes)
+{
+ struct comedi_async *async = s->async;
+ unsigned int available;
+
+ available = async->munge_count - async->buf_read_alloc_count;
+ if (nbytes > available)
+ nbytes = available;
+
+ async->buf_read_alloc_count += nbytes;
/*
* ensure the async buffer 'counts' are read before we
- * attempt to read data from the buffer
+ * attempt to read data from the read-alloc'ed buffer space
*/
smp_rmb();
- return num_bytes;
+ return nbytes;
}
-EXPORT_SYMBOL_GPL(comedi_buf_read_n_available);
/**
* comedi_buf_read_alloc() - Reserve buffer space for reading
@@ -445,21 +503,12 @@ EXPORT_SYMBOL_GPL(comedi_buf_read_n_available);
unsigned int comedi_buf_read_alloc(struct comedi_subdevice *s,
unsigned int nbytes)
{
- struct comedi_async *async = s->async;
- unsigned int available;
-
- available = async->munge_count - async->buf_read_alloc_count;
- if (nbytes > available)
- nbytes = available;
-
- async->buf_read_alloc_count += nbytes;
-
- /*
- * ensure the async buffer 'counts' are read before we
- * attempt to read data from the read-alloc'ed buffer space
- */
- smp_rmb();
-
+ if (comedi_get_is_subdevice_running(s)) {
+ nbytes = _comedi_buf_read_alloc(s, nbytes);
+ comedi_put_is_subdevice_running(s);
+ } else {
+ nbytes = 0;
+ }
return nbytes;
}
EXPORT_SYMBOL_GPL(comedi_buf_read_alloc);
@@ -469,21 +518,8 @@ static unsigned int comedi_buf_read_n_allocated(struct comedi_async *async)
return async->buf_read_alloc_count - async->buf_read_count;
}
-/**
- * comedi_buf_read_free() - Free buffer space after it has been read
- * @s: COMEDI subdevice.
- * @nbytes: Maximum space to free in bytes.
- *
- * Free up to @nbytes bytes of buffer space previously reserved for reading in
- * the COMEDI acquisition data buffer associated with the subdevice. The
- * amount of space freed is limited to the amount that was reserved.
- *
- * The freed space becomes available for allocation by the writer.
- *
- * Return: The amount of space freed in bytes.
- */
-unsigned int comedi_buf_read_free(struct comedi_subdevice *s,
- unsigned int nbytes)
+unsigned int _comedi_buf_read_free(struct comedi_subdevice *s,
+ unsigned int nbytes)
{
struct comedi_async *async = s->async;
unsigned int allocated;
@@ -503,6 +539,31 @@ unsigned int comedi_buf_read_free(struct comedi_subdevice *s,
async->buf_read_ptr %= async->prealloc_bufsz;
return nbytes;
}
+
+/**
+ * comedi_buf_read_free() - Free buffer space after it has been read
+ * @s: COMEDI subdevice.
+ * @nbytes: Maximum space to free in bytes.
+ *
+ * Free up to @nbytes bytes of buffer space previously reserved for reading in
+ * the COMEDI acquisition data buffer associated with the subdevice. The
+ * amount of space freed is limited to the amount that was reserved.
+ *
+ * The freed space becomes available for allocation by the writer.
+ *
+ * Return: The amount of space freed in bytes.
+ */
+unsigned int comedi_buf_read_free(struct comedi_subdevice *s,
+ unsigned int nbytes)
+{
+ if (comedi_get_is_subdevice_running(s)) {
+ nbytes = _comedi_buf_read_free(s, nbytes);
+ comedi_put_is_subdevice_running(s);
+ } else {
+ nbytes = 0;
+ }
+ return nbytes;
+}
EXPORT_SYMBOL_GPL(comedi_buf_read_free);
static void comedi_buf_memcpy_to(struct comedi_subdevice *s,
@@ -558,6 +619,38 @@ static void comedi_buf_memcpy_from(struct comedi_subdevice *s,
}
}
+static unsigned int _comedi_buf_write_samples(struct comedi_subdevice *s,
+ const void *data,
+ unsigned int nsamples)
+{
+ unsigned int max_samples;
+ unsigned int nbytes;
+
+ /*
+ * Make sure there is enough room in the buffer for all the samples.
+ * If not, clamp the nsamples to the number that will fit, flag the
+ * buffer overrun and add the samples that fit.
+ */
+ max_samples = comedi_bytes_to_samples(s, comedi_buf_write_n_unalloc(s));
+ if (nsamples > max_samples) {
+ dev_warn(s->device->class_dev, "buffer overrun\n");
+ s->async->events |= COMEDI_CB_OVERFLOW;
+ nsamples = max_samples;
+ }
+
+ if (nsamples == 0)
+ return 0;
+
+ nbytes = comedi_samples_to_bytes(s, nsamples);
+ nbytes = _comedi_buf_write_alloc(s, nbytes);
+ comedi_buf_memcpy_to(s, data, nbytes);
+ _comedi_buf_write_free(s, nbytes);
+ _comedi_inc_scan_progress(s, nbytes);
+ s->async->events |= COMEDI_CB_BLOCK;
+
+ return nbytes;
+}
+
/**
* comedi_buf_write_samples() - Write sample data to COMEDI buffer
* @s: COMEDI subdevice.
@@ -578,34 +671,42 @@ static void comedi_buf_memcpy_from(struct comedi_subdevice *s,
unsigned int comedi_buf_write_samples(struct comedi_subdevice *s,
const void *data, unsigned int nsamples)
{
+ unsigned int nbytes;
+
+ if (comedi_get_is_subdevice_running(s)) {
+ nbytes = _comedi_buf_write_samples(s, data, nsamples);
+ comedi_put_is_subdevice_running(s);
+ } else {
+ nbytes = 0;
+ }
+ return nbytes;
+}
+EXPORT_SYMBOL_GPL(comedi_buf_write_samples);
+
+static unsigned int _comedi_buf_read_samples(struct comedi_subdevice *s,
+ void *data, unsigned int nsamples)
+{
unsigned int max_samples;
unsigned int nbytes;
- /*
- * Make sure there is enough room in the buffer for all the samples.
- * If not, clamp the nsamples to the number that will fit, flag the
- * buffer overrun and add the samples that fit.
- */
- max_samples = comedi_bytes_to_samples(s, comedi_buf_write_n_unalloc(s));
- if (nsamples > max_samples) {
- dev_warn(s->device->class_dev, "buffer overrun\n");
- s->async->events |= COMEDI_CB_OVERFLOW;
+ /* clamp nsamples to the number of full samples available */
+ max_samples = comedi_bytes_to_samples(s,
+ _comedi_buf_read_n_available(s));
+ if (nsamples > max_samples)
nsamples = max_samples;
- }
if (nsamples == 0)
return 0;
- nbytes = comedi_buf_write_alloc(s,
+ nbytes = _comedi_buf_read_alloc(s,
comedi_samples_to_bytes(s, nsamples));
- comedi_buf_memcpy_to(s, data, nbytes);
- comedi_buf_write_free(s, nbytes);
- comedi_inc_scan_progress(s, nbytes);
+ comedi_buf_memcpy_from(s, data, nbytes);
+ _comedi_buf_read_free(s, nbytes);
+ _comedi_inc_scan_progress(s, nbytes);
s->async->events |= COMEDI_CB_BLOCK;
return nbytes;
}
-EXPORT_SYMBOL_GPL(comedi_buf_write_samples);
/**
* comedi_buf_read_samples() - Read sample data from COMEDI buffer
@@ -624,25 +725,14 @@ EXPORT_SYMBOL_GPL(comedi_buf_write_samples);
unsigned int comedi_buf_read_samples(struct comedi_subdevice *s,
void *data, unsigned int nsamples)
{
- unsigned int max_samples;
unsigned int nbytes;
- /* clamp nsamples to the number of full samples available */
- max_samples = comedi_bytes_to_samples(s,
- comedi_buf_read_n_available(s));
- if (nsamples > max_samples)
- nsamples = max_samples;
-
- if (nsamples == 0)
- return 0;
-
- nbytes = comedi_buf_read_alloc(s,
- comedi_samples_to_bytes(s, nsamples));
- comedi_buf_memcpy_from(s, data, nbytes);
- comedi_buf_read_free(s, nbytes);
- comedi_inc_scan_progress(s, nbytes);
- s->async->events |= COMEDI_CB_BLOCK;
-
+ if (comedi_get_is_subdevice_running(s)) {
+ nbytes = _comedi_buf_read_samples(s, data, nsamples);
+ comedi_put_is_subdevice_running(s);
+ } else {
+ nbytes = 0;
+ }
return nbytes;
}
EXPORT_SYMBOL_GPL(comedi_buf_read_samples);
diff --git a/drivers/comedi/comedi_fops.c b/drivers/comedi/comedi_fops.c
index 3383a7ce27ff..657c98cd723e 100644
--- a/drivers/comedi/comedi_fops.c
+++ b/drivers/comedi/comedi_fops.c
@@ -38,6 +38,7 @@
* COMEDI_SRF_ERROR: indicates an COMEDI_CB_ERROR event has occurred
* since the last command was started
* COMEDI_SRF_RUNNING: command is running
+ * COMEDI_SRF_BUSY: command was started and subdevice still busy
* COMEDI_SRF_FREE_SPRIV: free s->private on detach
*
* COMEDI_SRF_BUSY_MASK: runflags that indicate the subdevice is "busy"
@@ -45,9 +46,11 @@
#define COMEDI_SRF_RT BIT(1)
#define COMEDI_SRF_ERROR BIT(2)
#define COMEDI_SRF_RUNNING BIT(27)
+#define COMEDI_SRF_BUSY BIT(28)
#define COMEDI_SRF_FREE_SPRIV BIT(31)
-#define COMEDI_SRF_BUSY_MASK (COMEDI_SRF_ERROR | COMEDI_SRF_RUNNING)
+#define COMEDI_SRF_BUSY_MASK \
+ (COMEDI_SRF_ERROR | COMEDI_SRF_RUNNING | COMEDI_SRF_BUSY)
/**
* struct comedi_file - Per-file private data for COMEDI device
@@ -665,6 +668,11 @@ static bool comedi_is_runflags_in_error(unsigned int runflags)
return runflags & COMEDI_SRF_ERROR;
}
+static bool comedi_is_runflags_busy(unsigned int runflags)
+{
+ return runflags & COMEDI_SRF_BUSY;
+}
+
/**
* comedi_is_subdevice_running() - Check if async command running on subdevice
* @s: COMEDI subdevice.
@@ -687,6 +695,46 @@ static bool __comedi_is_subdevice_running(struct comedi_subdevice *s)
return comedi_is_runflags_running(runflags);
}
+/**
+ * comedi_get_is_subdevice_running() - Get if async command running on subdevice
+ * @s: COMEDI subdevice.
+ *
+ * If an asynchronous COMEDI command is running on the subdevice, increment
+ * a reference counter. If the function return value indicates that a
+ * command is running, then the details of the command will not be destroyed
+ * before a matching call to comedi_put_is_subdevice_running().
+ *
+ * Return: %true if an asynchronous COMEDI command is active on the
+ * subdevice, else %false.
+ */
+bool comedi_get_is_subdevice_running(struct comedi_subdevice *s)
+{
+ unsigned long flags;
+ bool running;
+
+ spin_lock_irqsave(&s->spin_lock, flags);
+ running = __comedi_is_subdevice_running(s);
+ if (running)
+ refcount_inc(&s->async->run_active);
+ spin_unlock_irqrestore(&s->spin_lock, flags);
+ return running;
+}
+EXPORT_SYMBOL_GPL(comedi_get_is_subdevice_running);
+
+/**
+ * comedi_put_is_subdevice_running() - Put if async command running on subdevice
+ * @s: COMEDI subdevice.
+ *
+ * Decrements the reference counter that was incremented when
+ * comedi_get_is_subdevice_running() returned %true.
+ */
+void comedi_put_is_subdevice_running(struct comedi_subdevice *s)
+{
+ if (refcount_dec_and_test(&s->async->run_active))
+ complete_all(&s->async->run_complete);
+}
+EXPORT_SYMBOL_GPL(comedi_put_is_subdevice_running);
+
bool comedi_can_auto_free_spriv(struct comedi_subdevice *s)
{
unsigned int runflags = __comedi_get_subdevice_runflags(s);
@@ -736,20 +784,28 @@ static void do_become_nonbusy(struct comedi_device *dev,
struct comedi_subdevice *s)
{
struct comedi_async *async = s->async;
+ unsigned int runflags;
+ unsigned long flags;
lockdep_assert_held(&dev->mutex);
- comedi_update_subdevice_runflags(s, COMEDI_SRF_RUNNING, 0);
- if (async) {
+ spin_lock_irqsave(&s->spin_lock, flags);
+ runflags = __comedi_get_subdevice_runflags(s);
+ __comedi_clear_subdevice_runflags(s, COMEDI_SRF_RUNNING |
+ COMEDI_SRF_BUSY);
+ spin_unlock_irqrestore(&s->spin_lock, flags);
+ if (comedi_is_runflags_busy(runflags)) {
+ /*
+ * "Run active" counter was set to 1 when setting up the
+ * command. Decrement it and wait for it to become 0.
+ */
+ comedi_put_is_subdevice_running(s);
+ wait_for_completion(&async->run_complete);
comedi_buf_reset(s);
async->inttrig = NULL;
kfree(async->cmd.chanlist);
async->cmd.chanlist = NULL;
s->busy = NULL;
wake_up_interruptible_all(&async->wait_head);
- } else {
- dev_err(dev->class_dev,
- "BUG: (?) %s called with async=NULL\n", __func__);
- s->busy = NULL;
}
}
@@ -787,6 +843,7 @@ static int is_device_busy(struct comedi_device *dev)
struct comedi_subdevice *s;
int i;
+ lockdep_assert_held_write(&dev->attach_lock);
lockdep_assert_held(&dev->mutex);
if (!dev->attached)
return 0;
@@ -795,7 +852,16 @@ static int is_device_busy(struct comedi_device *dev)
s = &dev->subdevices[i];
if (s->busy)
return 1;
- if (s->async && comedi_buf_is_mmapped(s))
+ if (!s->async)
+ continue;
+ if (comedi_buf_is_mmapped(s))
+ return 1;
+ /*
+ * There may be tasks still waiting on the subdevice's wait
+ * queue, although they should already be about to be removed
+ * from it since the subdevice has no active async command.
+ */
+ if (wq_has_sleeper(&s->async->wait_head))
return 1;
}
@@ -825,15 +891,22 @@ static int do_devconfig_ioctl(struct comedi_device *dev,
return -EPERM;
if (!arg) {
- if (is_device_busy(dev))
- return -EBUSY;
+ int rc = 0;
+
if (dev->attached) {
- struct module *driver_module = dev->driver->module;
+ down_write(&dev->attach_lock);
+ if (is_device_busy(dev)) {
+ rc = -EBUSY;
+ } else {
+ struct module *driver_module =
+ dev->driver->module;
- comedi_device_detach(dev);
- module_put(driver_module);
+ comedi_device_detach_locked(dev);
+ module_put(driver_module);
+ }
+ up_write(&dev->attach_lock);
}
- return 0;
+ return rc;
}
if (copy_from_user(&it, arg, sizeof(it)))
@@ -1133,15 +1206,15 @@ static int do_bufinfo_ioctl(struct comedi_device *dev,
if (!(async->cmd.flags & CMDF_WRITE)) {
/* command was set up in "read" direction */
if (bi.bytes_read) {
- comedi_buf_read_alloc(s, bi.bytes_read);
- bi.bytes_read = comedi_buf_read_free(s, bi.bytes_read);
+ _comedi_buf_read_alloc(s, bi.bytes_read);
+ bi.bytes_read = _comedi_buf_read_free(s, bi.bytes_read);
}
/*
* If nothing left to read, and command has stopped, and
* {"read" position not updated or command stopped normally},
* then become non-busy.
*/
- if (comedi_buf_read_n_available(s) == 0 &&
+ if (_comedi_buf_read_n_available(s) == 0 &&
!comedi_is_runflags_running(runflags) &&
(bi.bytes_read == 0 ||
!comedi_is_runflags_in_error(runflags))) {
@@ -1158,9 +1231,9 @@ static int do_bufinfo_ioctl(struct comedi_device *dev,
if (comedi_is_runflags_in_error(runflags))
retval = -EPIPE;
} else if (bi.bytes_written) {
- comedi_buf_write_alloc(s, bi.bytes_written);
+ _comedi_buf_write_alloc(s, bi.bytes_written);
bi.bytes_written =
- comedi_buf_write_free(s, bi.bytes_written);
+ _comedi_buf_write_free(s, bi.bytes_written);
}
bi.bytes_read = 0;
}
@@ -1556,21 +1629,30 @@ static int do_insnlist_ioctl(struct comedi_device *dev,
}
for (i = 0; i < n_insns; ++i) {
+ unsigned int n = insns[i].n;
+
if (insns[i].insn & INSN_MASK_WRITE) {
if (copy_from_user(data, insns[i].data,
- insns[i].n * sizeof(unsigned int))) {
+ n * sizeof(unsigned int))) {
dev_dbg(dev->class_dev,
"copy_from_user failed\n");
ret = -EFAULT;
goto error;
}
+ if (n < MIN_SAMPLES) {
+ memset(&data[n], 0, (MIN_SAMPLES - n) *
+ sizeof(unsigned int));
+ }
+ } else {
+ memset(data, 0, max_t(unsigned int, n, MIN_SAMPLES) *
+ sizeof(unsigned int));
}
ret = parse_insn(dev, insns + i, data, file);
if (ret < 0)
goto error;
if (insns[i].insn & INSN_MASK_READ) {
if (copy_to_user(insns[i].data, data,
- insns[i].n * sizeof(unsigned int))) {
+ n * sizeof(unsigned int))) {
dev_dbg(dev->class_dev,
"copy_to_user failed\n");
ret = -EFAULT;
@@ -1589,6 +1671,16 @@ error:
return i;
}
+#define MAX_INSNS MAX_SAMPLES
+static int check_insnlist_len(struct comedi_device *dev, unsigned int n_insns)
+{
+ if (n_insns > MAX_INSNS) {
+ dev_dbg(dev->class_dev, "insnlist length too large\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
/*
* COMEDI_INSN ioctl
* synchronous instruction
@@ -1633,6 +1725,12 @@ static int do_insn_ioctl(struct comedi_device *dev,
ret = -EFAULT;
goto error;
}
+ if (insn->n < MIN_SAMPLES) {
+ memset(&data[insn->n], 0,
+ (MIN_SAMPLES - insn->n) * sizeof(unsigned int));
+ }
+ } else {
+ memset(data, 0, n_data * sizeof(unsigned int));
}
ret = parse_insn(dev, insn, data, file);
if (ret < 0)
@@ -1818,8 +1916,14 @@ static int do_cmd_ioctl(struct comedi_device *dev,
if (async->cmd.flags & CMDF_WAKE_EOS)
async->cb_mask |= COMEDI_CB_EOS;
+ /*
+ * Set the "run active" counter with an initial count of 1 that will
+ * complete the "safe to reset" event when it is decremented to 0.
+ */
+ refcount_set(&s->async->run_active, 1);
+ reinit_completion(&s->async->run_complete);
comedi_update_subdevice_runflags(s, COMEDI_SRF_BUSY_MASK,
- COMEDI_SRF_RUNNING);
+ COMEDI_SRF_RUNNING | COMEDI_SRF_BUSY);
/*
* Set s->busy _after_ setting COMEDI_SRF_RUNNING flag to avoid
@@ -2239,15 +2343,13 @@ static long comedi_unlocked_ioctl(struct file *file, unsigned int cmd,
rc = -EFAULT;
break;
}
- insns = kcalloc(insnlist.n_insns, sizeof(*insns), GFP_KERNEL);
- if (!insns) {
- rc = -ENOMEM;
+ rc = check_insnlist_len(dev, insnlist.n_insns);
+ if (rc)
break;
- }
- if (copy_from_user(insns, insnlist.insns,
- sizeof(*insns) * insnlist.n_insns)) {
- rc = -EFAULT;
- kfree(insns);
+ insns = memdup_array_user(insnlist.insns, insnlist.n_insns,
+ sizeof(*insns));
+ if (IS_ERR(insns)) {
+ rc = PTR_ERR(insns);
break;
}
rc = do_insnlist_ioctl(dev, insns, insnlist.n_insns, file);
@@ -2467,7 +2569,7 @@ static __poll_t comedi_poll(struct file *file, poll_table *wait)
poll_wait(file, &s->async->wait_head, wait);
if (s->busy != file || !comedi_is_subdevice_running(s) ||
(s->async->cmd.flags & CMDF_WRITE) ||
- comedi_buf_read_n_available(s) > 0)
+ _comedi_buf_read_n_available(s) > 0)
mask |= EPOLLIN | EPOLLRDNORM;
}
@@ -2600,7 +2702,7 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
break;
/* Allocate all free buffer space. */
- comedi_buf_write_alloc(s, async->prealloc_bufsz);
+ _comedi_buf_write_alloc(s, async->prealloc_bufsz);
m = comedi_buf_write_n_allocated(s);
n = min_t(size_t, m, nbytes);
@@ -2628,7 +2730,7 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
n -= m;
retval = -EFAULT;
}
- comedi_buf_write_free(s, n);
+ _comedi_buf_write_free(s, n);
count += n;
nbytes -= n;
@@ -2714,7 +2816,7 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
while (count == 0 && !retval) {
set_current_state(TASK_INTERRUPTIBLE);
- m = comedi_buf_read_n_available(s);
+ m = _comedi_buf_read_n_available(s);
n = min_t(size_t, m, nbytes);
if (n == 0) {
@@ -2754,8 +2856,8 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
retval = -EFAULT;
}
- comedi_buf_read_alloc(s, n);
- comedi_buf_read_free(s, n);
+ _comedi_buf_read_alloc(s, n);
+ _comedi_buf_read_free(s, n);
count += n;
nbytes -= n;
@@ -2789,7 +2891,7 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
s == new_s && new_s->async == async && s->busy == file &&
!(async->cmd.flags & CMDF_WRITE) &&
!comedi_is_subdevice_running(s) &&
- comedi_buf_read_n_available(s) == 0)
+ _comedi_buf_read_n_available(s) == 0)
do_become_nonbusy(dev, s);
mutex_unlock(&dev->mutex);
}
@@ -2978,7 +3080,12 @@ static int compat_chaninfo(struct file *file, unsigned long arg)
chaninfo.rangelist = compat_ptr(chaninfo32.rangelist);
mutex_lock(&dev->mutex);
- err = do_chaninfo_ioctl(dev, &chaninfo);
+ if (!dev->attached) {
+ dev_dbg(dev->class_dev, "no driver attached\n");
+ err = -ENODEV;
+ } else {
+ err = do_chaninfo_ioctl(dev, &chaninfo);
+ }
mutex_unlock(&dev->mutex);
return err;
}
@@ -2999,7 +3106,12 @@ static int compat_rangeinfo(struct file *file, unsigned long arg)
rangeinfo.range_ptr = compat_ptr(rangeinfo32.range_ptr);
mutex_lock(&dev->mutex);
- err = do_rangeinfo_ioctl(dev, &rangeinfo);
+ if (!dev->attached) {
+ dev_dbg(dev->class_dev, "no driver attached\n");
+ err = -ENODEV;
+ } else {
+ err = do_rangeinfo_ioctl(dev, &rangeinfo);
+ }
mutex_unlock(&dev->mutex);
return err;
}
@@ -3075,7 +3187,12 @@ static int compat_cmd(struct file *file, unsigned long arg)
return rc;
mutex_lock(&dev->mutex);
- rc = do_cmd_ioctl(dev, &cmd, &copy, file);
+ if (!dev->attached) {
+ dev_dbg(dev->class_dev, "no driver attached\n");
+ rc = -ENODEV;
+ } else {
+ rc = do_cmd_ioctl(dev, &cmd, &copy, file);
+ }
mutex_unlock(&dev->mutex);
if (copy) {
/* Special case: copy cmd back to user. */
@@ -3100,7 +3217,12 @@ static int compat_cmdtest(struct file *file, unsigned long arg)
return rc;
mutex_lock(&dev->mutex);
- rc = do_cmdtest_ioctl(dev, &cmd, &copy, file);
+ if (!dev->attached) {
+ dev_dbg(dev->class_dev, "no driver attached\n");
+ rc = -ENODEV;
+ } else {
+ rc = do_cmdtest_ioctl(dev, &cmd, &copy, file);
+ }
mutex_unlock(&dev->mutex);
if (copy) {
err = put_compat_cmd(compat_ptr(arg), &cmd);
@@ -3142,6 +3264,9 @@ static int compat_insnlist(struct file *file, unsigned long arg)
if (copy_from_user(&insnlist32, compat_ptr(arg), sizeof(insnlist32)))
return -EFAULT;
+ rc = check_insnlist_len(dev, insnlist32.n_insns);
+ if (rc)
+ return rc;
insns = kcalloc(insnlist32.n_insns, sizeof(*insns), GFP_KERNEL);
if (!insns)
return -ENOMEM;
@@ -3157,7 +3282,12 @@ static int compat_insnlist(struct file *file, unsigned long arg)
}
mutex_lock(&dev->mutex);
- rc = do_insnlist_ioctl(dev, insns, insnlist32.n_insns, file);
+ if (!dev->attached) {
+ dev_dbg(dev->class_dev, "no driver attached\n");
+ rc = -ENODEV;
+ } else {
+ rc = do_insnlist_ioctl(dev, insns, insnlist32.n_insns, file);
+ }
mutex_unlock(&dev->mutex);
kfree(insns);
return rc;
@@ -3176,7 +3306,12 @@ static int compat_insn(struct file *file, unsigned long arg)
return rc;
mutex_lock(&dev->mutex);
- rc = do_insn_ioctl(dev, &insn, file);
+ if (!dev->attached) {
+ dev_dbg(dev->class_dev, "no driver attached\n");
+ rc = -ENODEV;
+ } else {
+ rc = do_insn_ioctl(dev, &insn, file);
+ }
mutex_unlock(&dev->mutex);
return rc;
}
@@ -3251,18 +3386,7 @@ static const struct file_operations comedi_fops = {
.llseek = noop_llseek,
};
-/**
- * comedi_event() - Handle events for asynchronous COMEDI command
- * @dev: COMEDI device.
- * @s: COMEDI subdevice.
- * Context: in_interrupt() (usually), @s->spin_lock spin-lock not held.
- *
- * If an asynchronous COMEDI command is active on the subdevice, process
- * any %COMEDI_CB_... event flags that have been set, usually by an
- * interrupt handler. These may change the run state of the asynchronous
- * command, wake a task, and/or send a %SIGIO signal.
- */
-void comedi_event(struct comedi_device *dev, struct comedi_subdevice *s)
+void _comedi_event(struct comedi_device *dev, struct comedi_subdevice *s)
{
struct comedi_async *async = s->async;
unsigned int events;
@@ -3298,6 +3422,25 @@ void comedi_event(struct comedi_device *dev, struct comedi_subdevice *s)
if (si_code)
kill_fasync(&dev->async_queue, SIGIO, si_code);
}
+
+/**
+ * comedi_event() - Handle events for asynchronous COMEDI command
+ * @dev: COMEDI device.
+ * @s: COMEDI subdevice.
+ * Context: in_interrupt() (usually), @s->spin_lock spin-lock not held.
+ *
+ * If an asynchronous COMEDI command is active on the subdevice, process
+ * any %COMEDI_CB_... event flags that have been set, usually by an
+ * interrupt handler. These may change the run state of the asynchronous
+ * command, wake a task, and/or send a %SIGIO signal.
+ */
+void comedi_event(struct comedi_device *dev, struct comedi_subdevice *s)
+{
+ if (comedi_get_is_subdevice_running(s)) {
+ comedi_event(dev, s);
+ comedi_put_is_subdevice_running(s);
+ }
+}
EXPORT_SYMBOL_GPL(comedi_event);
/* Note: the ->mutex is pre-locked on successful return */
diff --git a/drivers/comedi/comedi_internal.h b/drivers/comedi/comedi_internal.h
index 9b3631a654c8..41a3b09f8f05 100644
--- a/drivers/comedi/comedi_internal.h
+++ b/drivers/comedi/comedi_internal.h
@@ -36,6 +36,18 @@ struct comedi_buf_map *
comedi_buf_map_from_subdev_get(struct comedi_subdevice *s);
unsigned int comedi_buf_write_n_available(struct comedi_subdevice *s);
unsigned int comedi_buf_write_n_allocated(struct comedi_subdevice *s);
+unsigned int _comedi_buf_write_alloc(struct comedi_subdevice *s,
+ unsigned int nbytes);
+unsigned int _comedi_buf_write_free(struct comedi_subdevice *s,
+ unsigned int nbytes);
+unsigned int _comedi_buf_read_n_available(struct comedi_subdevice *s);
+unsigned int _comedi_buf_read_alloc(struct comedi_subdevice *s,
+ unsigned int nbytes);
+unsigned int _comedi_buf_read_free(struct comedi_subdevice *s,
+ unsigned int nbytes);
+void _comedi_inc_scan_progress(struct comedi_subdevice *s,
+ unsigned int num_bytes);
+void _comedi_event(struct comedi_device *dev, struct comedi_subdevice *s);
void comedi_device_cancel_all(struct comedi_device *dev);
bool comedi_can_auto_free_spriv(struct comedi_subdevice *s);
@@ -50,6 +62,7 @@ extern struct mutex comedi_drivers_list_lock;
int insn_inval(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data);
+void comedi_device_detach_locked(struct comedi_device *dev);
void comedi_device_detach(struct comedi_device *dev);
int comedi_device_attach(struct comedi_device *dev,
struct comedi_devconfig *it);
diff --git a/drivers/comedi/drivers.c b/drivers/comedi/drivers.c
index 376130bfba8a..69cd2a253c66 100644
--- a/drivers/comedi/drivers.c
+++ b/drivers/comedi/drivers.c
@@ -158,7 +158,7 @@ static void comedi_device_detach_cleanup(struct comedi_device *dev)
int i;
struct comedi_subdevice *s;
- lockdep_assert_held(&dev->attach_lock);
+ lockdep_assert_held_write(&dev->attach_lock);
lockdep_assert_held(&dev->mutex);
if (dev->subdevices) {
for (i = 0; i < dev->n_subdevices; i++) {
@@ -196,16 +196,23 @@ static void comedi_device_detach_cleanup(struct comedi_device *dev)
comedi_clear_hw_dev(dev);
}
-void comedi_device_detach(struct comedi_device *dev)
+void comedi_device_detach_locked(struct comedi_device *dev)
{
+ lockdep_assert_held_write(&dev->attach_lock);
lockdep_assert_held(&dev->mutex);
comedi_device_cancel_all(dev);
- down_write(&dev->attach_lock);
dev->attached = false;
dev->detach_count++;
if (dev->driver)
dev->driver->detach(dev);
comedi_device_detach_cleanup(dev);
+}
+
+void comedi_device_detach(struct comedi_device *dev)
+{
+ lockdep_assert_held(&dev->mutex);
+ down_write(&dev->attach_lock);
+ comedi_device_detach_locked(dev);
up_write(&dev->attach_lock);
}
@@ -339,10 +346,10 @@ int comedi_dio_insn_config(struct comedi_device *dev,
unsigned int *data,
unsigned int mask)
{
- unsigned int chan_mask = 1 << CR_CHAN(insn->chanspec);
+ unsigned int chan = CR_CHAN(insn->chanspec);
- if (!mask)
- mask = chan_mask;
+ if (!mask && chan < 32)
+ mask = 1U << chan;
switch (data[0]) {
case INSN_CONFIG_DIO_INPUT:
@@ -382,7 +389,7 @@ EXPORT_SYMBOL_GPL(comedi_dio_insn_config);
unsigned int comedi_dio_update_state(struct comedi_subdevice *s,
unsigned int *data)
{
- unsigned int chanmask = (s->n_chan < 32) ? ((1 << s->n_chan) - 1)
+ unsigned int chanmask = (s->n_chan < 32) ? ((1U << s->n_chan) - 1)
: 0xffffffff;
unsigned int mask = data[0] & chanmask;
unsigned int bits = data[1];
@@ -434,6 +441,13 @@ unsigned int comedi_bytes_per_scan_cmd(struct comedi_subdevice *s,
}
EXPORT_SYMBOL_GPL(comedi_bytes_per_scan_cmd);
+static unsigned int _comedi_bytes_per_scan(struct comedi_subdevice *s)
+{
+ struct comedi_cmd *cmd = &s->async->cmd;
+
+ return comedi_bytes_per_scan_cmd(s, cmd);
+}
+
/**
* comedi_bytes_per_scan() - Get length of asynchronous command "scan" in bytes
* @s: COMEDI subdevice.
@@ -451,9 +465,16 @@ EXPORT_SYMBOL_GPL(comedi_bytes_per_scan_cmd);
*/
unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s)
{
- struct comedi_cmd *cmd = &s->async->cmd;
+ unsigned int num_bytes;
- return comedi_bytes_per_scan_cmd(s, cmd);
+ if (comedi_get_is_subdevice_running(s)) {
+ num_bytes = _comedi_bytes_per_scan(s);
+ comedi_put_is_subdevice_running(s);
+ } else {
+ /* Use nomimal, single sample scan length. */
+ num_bytes = comedi_samples_to_bytes(s, 1);
+ }
+ return num_bytes;
}
EXPORT_SYMBOL_GPL(comedi_bytes_per_scan);
@@ -475,6 +496,17 @@ static unsigned int __comedi_nscans_left(struct comedi_subdevice *s,
return nscans;
}
+static unsigned int _comedi_nscans_left(struct comedi_subdevice *s,
+ unsigned int nscans)
+{
+ if (nscans == 0) {
+ unsigned int nbytes = _comedi_buf_read_n_available(s);
+
+ nscans = nbytes / _comedi_bytes_per_scan(s);
+ }
+ return __comedi_nscans_left(s, nscans);
+}
+
/**
* comedi_nscans_left() - Return the number of scans left in the command
* @s: COMEDI subdevice.
@@ -492,25 +524,18 @@ static unsigned int __comedi_nscans_left(struct comedi_subdevice *s,
unsigned int comedi_nscans_left(struct comedi_subdevice *s,
unsigned int nscans)
{
- if (nscans == 0) {
- unsigned int nbytes = comedi_buf_read_n_available(s);
-
- nscans = nbytes / comedi_bytes_per_scan(s);
+ if (comedi_get_is_subdevice_running(s)) {
+ nscans = _comedi_nscans_left(s, nscans);
+ comedi_put_is_subdevice_running(s);
+ } else {
+ nscans = 0;
}
- return __comedi_nscans_left(s, nscans);
+ return nscans;
}
EXPORT_SYMBOL_GPL(comedi_nscans_left);
-/**
- * comedi_nsamples_left() - Return the number of samples left in the command
- * @s: COMEDI subdevice.
- * @nsamples: The expected number of samples.
- *
- * Returns the number of samples remaining to complete the command, or the
- * specified expected number of samples (@nsamples), whichever is fewer.
- */
-unsigned int comedi_nsamples_left(struct comedi_subdevice *s,
- unsigned int nsamples)
+static unsigned int _comedi_nsamples_left(struct comedi_subdevice *s,
+ unsigned int nsamples)
{
struct comedi_async *async = s->async;
struct comedi_cmd *cmd = &async->cmd;
@@ -531,24 +556,34 @@ unsigned int comedi_nsamples_left(struct comedi_subdevice *s,
return samples_left;
return nsamples;
}
-EXPORT_SYMBOL_GPL(comedi_nsamples_left);
/**
- * comedi_inc_scan_progress() - Update scan progress in asynchronous command
+ * comedi_nsamples_left() - Return the number of samples left in the command
* @s: COMEDI subdevice.
- * @num_bytes: Amount of data in bytes to increment scan progress.
+ * @nsamples: The expected number of samples.
*
- * Increments the scan progress by the number of bytes specified by @num_bytes.
- * If the scan progress reaches or exceeds the scan length in bytes, reduce
- * it modulo the scan length in bytes and set the "end of scan" asynchronous
- * event flag (%COMEDI_CB_EOS) to be processed later.
+ * Returns the number of samples remaining to complete the command, or the
+ * specified expected number of samples (@nsamples), whichever is fewer.
*/
-void comedi_inc_scan_progress(struct comedi_subdevice *s,
- unsigned int num_bytes)
+unsigned int comedi_nsamples_left(struct comedi_subdevice *s,
+ unsigned int nsamples)
+{
+ if (comedi_get_is_subdevice_running(s)) {
+ nsamples = _comedi_nsamples_left(s, nsamples);
+ comedi_put_is_subdevice_running(s);
+ } else {
+ nsamples = 0;
+ }
+ return nsamples;
+}
+EXPORT_SYMBOL_GPL(comedi_nsamples_left);
+
+void _comedi_inc_scan_progress(struct comedi_subdevice *s,
+ unsigned int num_bytes)
{
struct comedi_async *async = s->async;
struct comedi_cmd *cmd = &async->cmd;
- unsigned int scan_length = comedi_bytes_per_scan(s);
+ unsigned int scan_length = _comedi_bytes_per_scan(s);
/* track the 'cur_chan' for non-SDF_PACKED subdevices */
if (!(s->subdev_flags & SDF_PACKED)) {
@@ -569,8 +604,43 @@ void comedi_inc_scan_progress(struct comedi_subdevice *s,
async->events |= COMEDI_CB_EOS;
}
}
+
+/**
+ * comedi_inc_scan_progress() - Update scan progress in asynchronous command
+ * @s: COMEDI subdevice.
+ * @num_bytes: Amount of data in bytes to increment scan progress.
+ *
+ * Increments the scan progress by the number of bytes specified by @num_bytes.
+ * If the scan progress reaches or exceeds the scan length in bytes, reduce
+ * it modulo the scan length in bytes and set the "end of scan" asynchronous
+ * event flag (%COMEDI_CB_EOS) to be processed later.
+ */
+void comedi_inc_scan_progress(struct comedi_subdevice *s,
+ unsigned int num_bytes)
+{
+ if (comedi_get_is_subdevice_running(s)) {
+ _comedi_inc_scan_progress(s, num_bytes);
+ comedi_put_is_subdevice_running(s);
+ }
+}
EXPORT_SYMBOL_GPL(comedi_inc_scan_progress);
+static unsigned int _comedi_handle_events(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+{
+ unsigned int events = s->async->events;
+
+ if (events == 0)
+ return events;
+
+ if ((events & COMEDI_CB_CANCEL_MASK) && s->cancel)
+ s->cancel(dev, s);
+
+ _comedi_event(dev, s);
+
+ return events;
+}
+
/**
* comedi_handle_events() - Handle events and possibly stop acquisition
* @dev: COMEDI device.
@@ -590,16 +660,14 @@ EXPORT_SYMBOL_GPL(comedi_inc_scan_progress);
unsigned int comedi_handle_events(struct comedi_device *dev,
struct comedi_subdevice *s)
{
- unsigned int events = s->async->events;
-
- if (events == 0)
- return events;
-
- if ((events & COMEDI_CB_CANCEL_MASK) && s->cancel)
- s->cancel(dev, s);
-
- comedi_event(dev, s);
+ unsigned int events;
+ if (comedi_get_is_subdevice_running(s)) {
+ events = _comedi_handle_events(dev, s);
+ comedi_put_is_subdevice_running(s);
+ } else {
+ events = 0;
+ }
return events;
}
EXPORT_SYMBOL_GPL(comedi_handle_events);
@@ -613,6 +681,7 @@ static int insn_rw_emulate_bits(struct comedi_device *dev,
unsigned int chan = CR_CHAN(insn->chanspec);
unsigned int base_chan = (chan < 32) ? 0 : chan;
unsigned int _data[2];
+ unsigned int i;
int ret;
memset(_data, 0, sizeof(_data));
@@ -625,18 +694,21 @@ static int insn_rw_emulate_bits(struct comedi_device *dev,
if (insn->insn == INSN_WRITE) {
if (!(s->subdev_flags & SDF_WRITABLE))
return -EINVAL;
- _data[0] = 1 << (chan - base_chan); /* mask */
- _data[1] = data[0] ? (1 << (chan - base_chan)) : 0; /* bits */
+ _data[0] = 1U << (chan - base_chan); /* mask */
}
+ for (i = 0; i < insn->n; i++) {
+ if (insn->insn == INSN_WRITE)
+ _data[1] = data[i] ? _data[0] : 0; /* bits */
- ret = s->insn_bits(dev, s, &_insn, _data);
- if (ret < 0)
- return ret;
+ ret = s->insn_bits(dev, s, &_insn, _data);
+ if (ret < 0)
+ return ret;
- if (insn->insn == INSN_READ)
- data[0] = (_data[1] >> (chan - base_chan)) & 1;
+ if (insn->insn == INSN_READ)
+ data[i] = (_data[1] >> (chan - base_chan)) & 1;
+ }
- return 1;
+ return insn->n;
}
static int __comedi_device_postconfig_async(struct comedi_device *dev,
@@ -666,6 +738,7 @@ static int __comedi_device_postconfig_async(struct comedi_device *dev,
return -ENOMEM;
init_waitqueue_head(&async->wait_head);
+ init_completion(&async->run_complete);
s->async = async;
async->max_bufsize = comedi_default_buf_maxsize_kb * 1024;
@@ -709,7 +782,7 @@ static int __comedi_device_postconfig(struct comedi_device *dev)
if (s->type == COMEDI_SUBD_DO) {
if (s->n_chan < 32)
- s->io_bits = (1 << s->n_chan) - 1;
+ s->io_bits = (1U << s->n_chan) - 1;
else
s->io_bits = 0xffffffff;
}
diff --git a/drivers/comedi/drivers/8255.c b/drivers/comedi/drivers/8255.c
index f45f7bd1c61a..5f70938b4477 100644
--- a/drivers/comedi/drivers/8255.c
+++ b/drivers/comedi/drivers/8255.c
@@ -77,19 +77,17 @@ static int dev_8255_attach(struct comedi_device *dev,
* base address of the chip.
*/
ret = __comedi_request_region(dev, iobase, I8255_SIZE);
+ if (ret)
+ return ret;
+ ret = subdev_8255_io_init(dev, s, iobase);
if (ret) {
+ /*
+ * Release the I/O port region here, as the
+ * "detach" handler cannot find it.
+ */
+ release_region(iobase, I8255_SIZE);
s->type = COMEDI_SUBD_UNUSED;
- } else {
- ret = subdev_8255_io_init(dev, s, iobase);
- if (ret) {
- /*
- * Release the I/O port region here, as the
- * "detach" handler cannot find it.
- */
- release_region(iobase, I8255_SIZE);
- s->type = COMEDI_SUBD_UNUSED;
- return ret;
- }
+ return ret;
}
}
diff --git a/drivers/comedi/drivers/Makefile b/drivers/comedi/drivers/Makefile
index b24ac00cab73..7b99a431330d 100644
--- a/drivers/comedi/drivers/Makefile
+++ b/drivers/comedi/drivers/Makefile
@@ -73,6 +73,7 @@ obj-$(CONFIG_COMEDI_ADDI_APCI_3120) += addi_apci_3120.o
obj-$(CONFIG_COMEDI_ADDI_APCI_3501) += addi_apci_3501.o
obj-$(CONFIG_COMEDI_ADDI_APCI_3XXX) += addi_apci_3xxx.o
obj-$(CONFIG_COMEDI_ADL_PCI6208) += adl_pci6208.o
+obj-$(CONFIG_COMEDI_ADL_PCI7250) += adl_pci7250.o
obj-$(CONFIG_COMEDI_ADL_PCI7X3X) += adl_pci7x3x.o
obj-$(CONFIG_COMEDI_ADL_PCI8164) += adl_pci8164.o
obj-$(CONFIG_COMEDI_ADL_PCI9111) += adl_pci9111.o
diff --git a/drivers/comedi/drivers/adl_pci7250.c b/drivers/comedi/drivers/adl_pci7250.c
new file mode 100644
index 000000000000..78c85a402435
--- /dev/null
+++ b/drivers/comedi/drivers/adl_pci7250.c
@@ -0,0 +1,220 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * adl_pci7250.c
+ *
+ * Comedi driver for ADLink PCI-7250 series cards.
+ *
+ * Copyright (C) 2015, 2025 Ian Abbott <abbotti@mev.co.uk>
+ */
+
+/*
+ * Driver: adl_pci7250
+ * Description: Driver for the ADLINK PCI-7250 relay output & digital input card
+ * Devices: [ADLINK] PCI-7250 (adl_pci7250) LPCI-7250 LPCIe-7250
+ * Author: Ian Abbott <abbotti@mev.co.uk>
+ * Status: works
+ * Updated: Mon, 02 Jun 2025 13:54:11 +0100
+ *
+ * The driver assumes that 3 PCI-7251 modules are fitted to the PCI-7250,
+ * giving 32 channels of relay outputs and 32 channels of isolated digital
+ * inputs. That is also the case for the LPCI-7250 and older LPCIe-7250
+ * cards although they do not physically support the PCI-7251 modules.
+ * Newer LPCIe-7250 cards have a different PCI subsystem device ID, so
+ * set the number of channels to 8 for these cards.
+ *
+ * Not fitting the PCI-7251 modules shouldn't do any harm, but the extra
+ * inputs and relay outputs won't work!
+ *
+ * Configuration Options: not applicable, uses PCI auto config
+ */
+
+#include <linux/module.h>
+#include <linux/comedi/comedi_pci.h>
+
+static unsigned char adl_pci7250_read8(struct comedi_device *dev,
+ unsigned int offset)
+{
+#ifdef CONFIG_HAS_IOPORT
+ if (!dev->mmio)
+ return inb(dev->iobase + offset);
+#endif
+ return readb(dev->mmio + offset);
+}
+
+static void adl_pci7250_write8(struct comedi_device *dev, unsigned int offset,
+ unsigned char val)
+{
+#ifdef CONFIG_HAS_IOPORT
+ if (!dev->mmio) {
+ outb(val, dev->iobase + offset);
+ return;
+ }
+#endif
+ writeb(val, dev->mmio + offset);
+}
+
+static int adl_pci7250_do_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ unsigned int mask = comedi_dio_update_state(s, data);
+
+ if (mask) {
+ unsigned int state = s->state;
+ unsigned int i;
+
+ for (i = 0; i * 8 < s->n_chan; i++) {
+ if ((mask & 0xffu) != 0) {
+ /* write relay data to even offset registers */
+ adl_pci7250_write8(dev, i * 2, state & 0xffu);
+ }
+ state >>= 8;
+ mask >>= 8;
+ }
+ }
+
+ data[1] = s->state;
+
+ return 2;
+}
+
+static int adl_pci7250_di_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ unsigned int value = 0;
+ unsigned int i;
+
+ for (i = 0; i * 8 < s->n_chan; i++) {
+ /* read DI value from odd offset registers */
+ value |= (unsigned int)adl_pci7250_read8(dev, i * 2 + 1) <<
+ (i * 8);
+ }
+
+ data[1] = value;
+
+ return 2;
+}
+
+static int pci7250_auto_attach(struct comedi_device *dev,
+ unsigned long context_unused)
+{
+ struct pci_dev *pcidev = comedi_to_pci_dev(dev);
+ struct comedi_subdevice *s;
+ unsigned int max_chans;
+ unsigned int i;
+ int ret;
+
+ ret = comedi_pci_enable(dev);
+ if (ret)
+ return ret;
+
+ if (pci_resource_len(pcidev, 2) < 8)
+ return -ENXIO;
+
+ /*
+ * Newer LPCIe-7250 boards use MMIO. Older LPCIe-7250, LPCI-7250, and
+ * PCI-7250 boards use Port I/O.
+ */
+ if (pci_resource_flags(pcidev, 2) & IORESOURCE_MEM) {
+ dev->mmio = pci_ioremap_bar(pcidev, 2);
+ if (!dev->mmio)
+ return -ENOMEM;
+ } else if (IS_ENABLED(CONFIG_HAS_IOPORT)) {
+ dev->iobase = pci_resource_start(pcidev, 2);
+ } else {
+ dev_err(dev->class_dev,
+ "error! need I/O port support\n");
+ return -ENXIO;
+ }
+
+ if (pcidev->subsystem_device == 0x7000) {
+ /*
+ * This is a newer LPCIe-7250 variant and cannot possibly
+ * have PCI-7251 modules fitted, so limit the number of
+ * channels to 8.
+ */
+ max_chans = 8;
+ } else {
+ /*
+ * It is unknown whether the board is a PCI-7250, an LPCI-7250,
+ * or an older LPCIe-7250 variant, so treat it as a PCI-7250
+ * and assume it can have PCI-7251 modules fitted to increase
+ * the number of channels to a maximum of 32.
+ */
+ max_chans = 32;
+ }
+
+ ret = comedi_alloc_subdevices(dev, 2);
+ if (ret)
+ return ret;
+
+ /* Relay digital output. */
+ s = &dev->subdevices[0];
+ s->type = COMEDI_SUBD_DO;
+ s->subdev_flags = SDF_WRITABLE;
+ s->n_chan = max_chans;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = adl_pci7250_do_insn_bits;
+ /* Read initial state of relays from the even offset registers. */
+ s->state = 0;
+ for (i = 0; i * 8 < max_chans; i++) {
+ s->state |= (unsigned int)adl_pci7250_read8(dev, i * 2) <<
+ (i * 8);
+ }
+
+ /* Isolated digital input. */
+ s = &dev->subdevices[1];
+ s->type = COMEDI_SUBD_DI;
+ s->subdev_flags = SDF_READABLE;
+ s->n_chan = max_chans;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = adl_pci7250_di_insn_bits;
+
+ return 0;
+}
+
+static struct comedi_driver adl_pci7250_driver = {
+ .driver_name = "adl_pci7250",
+ .module = THIS_MODULE,
+ .auto_attach = pci7250_auto_attach,
+ .detach = comedi_pci_detach,
+};
+
+static int adl_pci7250_pci_probe(struct pci_dev *dev,
+ const struct pci_device_id *id)
+{
+ return comedi_pci_auto_config(dev, &adl_pci7250_driver,
+ id->driver_data);
+}
+
+static const struct pci_device_id adl_pci7250_pci_table[] = {
+#ifdef CONFIG_HAS_IOPORT
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050,
+ 0x9999, 0x7250) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADLINK, 0x7250,
+ 0x9999, 0x7250) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADLINK, 0x7250,
+ PCI_VENDOR_ID_ADLINK, 0x7250) },
+#endif
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADLINK, 0x7250,
+ PCI_VENDOR_ID_ADLINK, 0x7000) }, /* newer LPCIe-7250 */
+ { 0 }
+};
+MODULE_DEVICE_TABLE(pci, adl_pci7250_pci_table);
+
+static struct pci_driver adl_pci7250_pci_driver = {
+ .name = "adl_pci7250",
+ .id_table = adl_pci7250_pci_table,
+ .probe = adl_pci7250_pci_probe,
+ .remove = comedi_pci_auto_unconfig,
+};
+module_comedi_pci_driver(adl_pci7250_driver, adl_pci7250_pci_driver);
+
+MODULE_AUTHOR("Comedi https://www.comedi.org");
+MODULE_DESCRIPTION("Comedi driver for ADLink PCI-7250 series boards");
+MODULE_LICENSE("GPL");
diff --git a/drivers/comedi/drivers/aio_iiro_16.c b/drivers/comedi/drivers/aio_iiro_16.c
index b00fab0b89d4..739cc4db52ac 100644
--- a/drivers/comedi/drivers/aio_iiro_16.c
+++ b/drivers/comedi/drivers/aio_iiro_16.c
@@ -177,7 +177,8 @@ static int aio_iiro_16_attach(struct comedi_device *dev,
* Digital input change of state interrupts are optionally supported
* using IRQ 2-7, 10-12, 14, or 15.
*/
- if ((1 << it->options[1]) & 0xdcfc) {
+ if (it->options[1] > 0 && it->options[1] < 16 &&
+ (1 << it->options[1]) & 0xdcfc) {
ret = request_irq(it->options[1], aio_iiro_16_cos, 0,
dev->board_name, dev);
if (ret == 0)
diff --git a/drivers/comedi/drivers/c6xdigio.c b/drivers/comedi/drivers/c6xdigio.c
index 14b90d1c64dc..8a38d97d463b 100644
--- a/drivers/comedi/drivers/c6xdigio.c
+++ b/drivers/comedi/drivers/c6xdigio.c
@@ -249,9 +249,6 @@ static int c6xdigio_attach(struct comedi_device *dev,
if (ret)
return ret;
- /* Make sure that PnP ports get activated */
- pnp_register_driver(&c6xdigio_pnp_driver);
-
s = &dev->subdevices[0];
/* pwm output subdevice */
s->type = COMEDI_SUBD_PWM;
@@ -278,19 +275,46 @@ static int c6xdigio_attach(struct comedi_device *dev,
return 0;
}
-static void c6xdigio_detach(struct comedi_device *dev)
-{
- comedi_legacy_detach(dev);
- pnp_unregister_driver(&c6xdigio_pnp_driver);
-}
-
static struct comedi_driver c6xdigio_driver = {
.driver_name = "c6xdigio",
.module = THIS_MODULE,
.attach = c6xdigio_attach,
- .detach = c6xdigio_detach,
+ .detach = comedi_legacy_detach,
};
-module_comedi_driver(c6xdigio_driver);
+
+static bool c6xdigio_pnp_registered = false;
+
+static int __init c6xdigio_module_init(void)
+{
+ int ret;
+
+ ret = comedi_driver_register(&c6xdigio_driver);
+ if (ret)
+ return ret;
+
+ if (IS_ENABLED(CONFIG_PNP)) {
+ /* Try to activate the PnP ports */
+ ret = pnp_register_driver(&c6xdigio_pnp_driver);
+ if (ret) {
+ pr_warn("failed to register pnp driver - err %d\n",
+ ret);
+ ret = 0; /* ignore the error. */
+ } else {
+ c6xdigio_pnp_registered = true;
+ }
+ }
+
+ return 0;
+}
+module_init(c6xdigio_module_init);
+
+static void __exit c6xdigio_module_exit(void)
+{
+ if (c6xdigio_pnp_registered)
+ pnp_unregister_driver(&c6xdigio_pnp_driver);
+ comedi_driver_unregister(&c6xdigio_driver);
+}
+module_exit(c6xdigio_module_exit);
MODULE_AUTHOR("Comedi https://www.comedi.org");
MODULE_DESCRIPTION("Comedi driver for the C6x_DIGIO DSP daughter card");
diff --git a/drivers/comedi/drivers/comedi_bond.c b/drivers/comedi/drivers/comedi_bond.c
index 78c39fa84177..30650fa36fff 100644
--- a/drivers/comedi/drivers/comedi_bond.c
+++ b/drivers/comedi/drivers/comedi_bond.c
@@ -205,7 +205,7 @@ static int do_dev_config(struct comedi_device *dev, struct comedi_devconfig *it)
snprintf(file, sizeof(file), "/dev/comedi%d", minor);
file[sizeof(file) - 1] = 0;
- d = comedi_open(file);
+ d = comedi_open_from(file, dev->minor);
if (!d) {
dev_err(dev->class_dev,
@@ -326,7 +326,7 @@ static void bonding_detach(struct comedi_device *dev)
if (!bdev)
continue;
if (!test_and_set_bit(bdev->minor, devs_closed))
- comedi_close(bdev->dev);
+ comedi_close_from(bdev->dev, dev->minor);
kfree(bdev);
}
kfree(devpriv->devs);
diff --git a/drivers/comedi/drivers/comedi_test.c b/drivers/comedi/drivers/comedi_test.c
index 9747e6d1f6eb..7984950f0f99 100644
--- a/drivers/comedi/drivers/comedi_test.c
+++ b/drivers/comedi/drivers/comedi_test.c
@@ -792,7 +792,7 @@ static void waveform_detach(struct comedi_device *dev)
{
struct waveform_private *devpriv = dev->private;
- if (devpriv) {
+ if (devpriv && dev->n_subdevices) {
timer_delete_sync(&devpriv->ai_timer);
timer_delete_sync(&devpriv->ao_timer);
}
diff --git a/drivers/comedi/drivers/das16m1.c b/drivers/comedi/drivers/das16m1.c
index b8ea737ad3d1..1b638f5b5a4f 100644
--- a/drivers/comedi/drivers/das16m1.c
+++ b/drivers/comedi/drivers/das16m1.c
@@ -522,7 +522,8 @@ static int das16m1_attach(struct comedi_device *dev,
devpriv->extra_iobase = dev->iobase + DAS16M1_8255_IOBASE;
/* only irqs 2, 3, 4, 5, 6, 7, 10, 11, 12, 14, and 15 are valid */
- if ((1 << it->options[1]) & 0xdcfc) {
+ if (it->options[1] >= 2 && it->options[1] <= 15 &&
+ (1 << it->options[1]) & 0xdcfc) {
ret = request_irq(it->options[1], das16m1_interrupt, 0,
dev->board_name, dev);
if (ret == 0)
diff --git a/drivers/comedi/drivers/das6402.c b/drivers/comedi/drivers/das6402.c
index 68f95330de45..7660487e563c 100644
--- a/drivers/comedi/drivers/das6402.c
+++ b/drivers/comedi/drivers/das6402.c
@@ -567,7 +567,8 @@ static int das6402_attach(struct comedi_device *dev,
das6402_reset(dev);
/* IRQs 2,3,5,6,7, 10,11,15 are valid for "enhanced" mode */
- if ((1 << it->options[1]) & 0x8cec) {
+ if (it->options[1] > 0 && it->options[1] < 16 &&
+ (1 << it->options[1]) & 0x8cec) {
ret = request_irq(it->options[1], das6402_interrupt, 0,
dev->board_name, dev);
if (ret == 0) {
diff --git a/drivers/comedi/drivers/multiq3.c b/drivers/comedi/drivers/multiq3.c
index 07ff5383da99..ac369e9a262d 100644
--- a/drivers/comedi/drivers/multiq3.c
+++ b/drivers/comedi/drivers/multiq3.c
@@ -67,6 +67,11 @@
#define MULTIQ3_TRSFRCNTR_OL 0x10 /* xfer CNTR to OL (x and y) */
#define MULTIQ3_EFLAG_RESET 0x06 /* reset E bit of flag reg */
+/*
+ * Limit on the number of optional encoder channels
+ */
+#define MULTIQ3_MAX_ENC_CHANS 8
+
static void multiq3_set_ctrl(struct comedi_device *dev, unsigned int bits)
{
/*
@@ -312,6 +317,10 @@ static int multiq3_attach(struct comedi_device *dev,
s->insn_read = multiq3_encoder_insn_read;
s->insn_config = multiq3_encoder_insn_config;
+ /* sanity check for number of encoder channels */
+ if (s->n_chan > MULTIQ3_MAX_ENC_CHANS)
+ s->n_chan = MULTIQ3_MAX_ENC_CHANS;
+
for (i = 0; i < s->n_chan; i++)
multiq3_encoder_reset(dev, i);
diff --git a/drivers/comedi/drivers/ni_670x.c b/drivers/comedi/drivers/ni_670x.c
index c875d251c230..563a9c790f12 100644
--- a/drivers/comedi/drivers/ni_670x.c
+++ b/drivers/comedi/drivers/ni_670x.c
@@ -199,7 +199,7 @@ static int ni_670x_auto_attach(struct comedi_device *dev,
const struct comedi_lrange **range_table_list;
range_table_list = kmalloc_array(32,
- sizeof(struct comedi_lrange *),
+ sizeof(*range_table_list),
GFP_KERNEL);
if (!range_table_list)
return -ENOMEM;
diff --git a/drivers/comedi/drivers/pcl726.c b/drivers/comedi/drivers/pcl726.c
index 0430630e6ebb..b542896fa0e4 100644
--- a/drivers/comedi/drivers/pcl726.c
+++ b/drivers/comedi/drivers/pcl726.c
@@ -328,7 +328,8 @@ static int pcl726_attach(struct comedi_device *dev,
* Hook up the external trigger source interrupt only if the
* user config option is valid and the board supports interrupts.
*/
- if (it->options[1] && (board->irq_mask & (1 << it->options[1]))) {
+ if (it->options[1] > 0 && it->options[1] < 16 &&
+ (board->irq_mask & (1U << it->options[1]))) {
ret = request_irq(it->options[1], pcl726_interrupt, 0,
dev->board_name, dev);
if (ret == 0) {
diff --git a/drivers/comedi/drivers/pcl812.c b/drivers/comedi/drivers/pcl812.c
index 0df639c6a595..abca61a72cf7 100644
--- a/drivers/comedi/drivers/pcl812.c
+++ b/drivers/comedi/drivers/pcl812.c
@@ -1149,7 +1149,8 @@ static int pcl812_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (IS_ERR(dev->pacer))
return PTR_ERR(dev->pacer);
- if ((1 << it->options[1]) & board->irq_bits) {
+ if (it->options[1] > 0 && it->options[1] < 16 &&
+ (1 << it->options[1]) & board->irq_bits) {
ret = request_irq(it->options[1], pcl812_interrupt, 0,
dev->board_name, dev);
if (ret == 0)
diff --git a/drivers/comedi/drivers/pcl818.c b/drivers/comedi/drivers/pcl818.c
index 4127adcfb229..06fe06396f23 100644
--- a/drivers/comedi/drivers/pcl818.c
+++ b/drivers/comedi/drivers/pcl818.c
@@ -1111,10 +1111,9 @@ static void pcl818_detach(struct comedi_device *dev)
{
struct pcl818_private *devpriv = dev->private;
- if (devpriv) {
- pcl818_ai_cancel(dev, dev->read_subdev);
+ if (devpriv)
pcl818_reset(dev);
- }
+
pcl818_free_dma(dev);
comedi_legacy_detach(dev);
}
diff --git a/drivers/comedi/kcomedilib/kcomedilib_main.c b/drivers/comedi/kcomedilib/kcomedilib_main.c
index 43fbe1a63b14..baa9eaaf97d4 100644
--- a/drivers/comedi/kcomedilib/kcomedilib_main.c
+++ b/drivers/comedi/kcomedilib/kcomedilib_main.c
@@ -15,6 +15,7 @@
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/io.h>
+#include <linux/bitmap.h>
#include <linux/comedi.h>
#include <linux/comedi/comedidev.h>
@@ -24,7 +25,104 @@ MODULE_AUTHOR("David Schleef <ds@schleef.org>");
MODULE_DESCRIPTION("Comedi kernel library");
MODULE_LICENSE("GPL");
-struct comedi_device *comedi_open(const char *filename)
+static DEFINE_MUTEX(kcomedilib_to_from_lock);
+
+/*
+ * Row index is the "to" node, column index is the "from" node, element value
+ * is the number of links from the "from" node to the "to" node.
+ */
+static unsigned char
+ kcomedilib_to_from[COMEDI_NUM_BOARD_MINORS][COMEDI_NUM_BOARD_MINORS];
+
+static bool kcomedilib_set_link_from_to(unsigned int from, unsigned int to)
+{
+ DECLARE_BITMAP(destinations[2], COMEDI_NUM_BOARD_MINORS);
+ unsigned int cur = 0;
+ bool okay = true;
+
+ /*
+ * Allow "from" node to be out of range (no loop checking),
+ * but require "to" node to be in range.
+ */
+ if (to >= COMEDI_NUM_BOARD_MINORS)
+ return false;
+ if (from >= COMEDI_NUM_BOARD_MINORS)
+ return true;
+
+ /*
+ * Check that kcomedilib_to_from[to][from] can be made non-zero
+ * without creating a loop.
+ *
+ * Termination of the loop-testing code relies on the assumption that
+ * kcomedilib_to_from[][] does not contain any loops.
+ *
+ * Start with a set destinations set containing "from" as the only
+ * element and work backwards looking for loops.
+ */
+ bitmap_zero(destinations[cur], COMEDI_NUM_BOARD_MINORS);
+ set_bit(from, destinations[cur]);
+ mutex_lock(&kcomedilib_to_from_lock);
+ do {
+ unsigned int next = 1 - cur;
+ unsigned int t = 0;
+
+ if (test_bit(to, destinations[cur])) {
+ /* Loop detected. */
+ okay = false;
+ break;
+ }
+ /* Create next set of destinations. */
+ bitmap_zero(destinations[next], COMEDI_NUM_BOARD_MINORS);
+ while ((t = find_next_bit(destinations[cur],
+ COMEDI_NUM_BOARD_MINORS,
+ t)) < COMEDI_NUM_BOARD_MINORS) {
+ unsigned int f;
+
+ for (f = 0; f < COMEDI_NUM_BOARD_MINORS; f++) {
+ if (kcomedilib_to_from[t][f])
+ set_bit(f, destinations[next]);
+ }
+ t++;
+ }
+ cur = next;
+ } while (!bitmap_empty(destinations[cur], COMEDI_NUM_BOARD_MINORS));
+ if (okay) {
+ /* Allow a maximum of 255 links from "from" to "to". */
+ if (kcomedilib_to_from[to][from] < 255)
+ kcomedilib_to_from[to][from]++;
+ else
+ okay = false;
+ }
+ mutex_unlock(&kcomedilib_to_from_lock);
+ return okay;
+}
+
+static void kcomedilib_clear_link_from_to(unsigned int from, unsigned int to)
+{
+ if (to < COMEDI_NUM_BOARD_MINORS && from < COMEDI_NUM_BOARD_MINORS) {
+ mutex_lock(&kcomedilib_to_from_lock);
+ if (kcomedilib_to_from[to][from])
+ kcomedilib_to_from[to][from]--;
+ mutex_unlock(&kcomedilib_to_from_lock);
+ }
+}
+
+/**
+ * comedi_open_from() - Open a COMEDI device from the kernel with loop checks
+ * @filename: Fake pathname of the form "/dev/comediN".
+ * @from: Device number it is being opened from (if in range).
+ *
+ * Converts @filename to a COMEDI device number and "opens" it if it exists
+ * and is attached to a low-level COMEDI driver.
+ *
+ * If @from is in range, refuse to open the device if doing so would form a
+ * loop of devices opening each other. There is also a limit of 255 on the
+ * number of concurrent opens from one device to another.
+ *
+ * Return: A pointer to the COMEDI device on success.
+ * Return %NULL on failure.
+ */
+struct comedi_device *comedi_open_from(const char *filename, int from)
{
struct comedi_device *dev, *retval = NULL;
unsigned int minor;
@@ -43,7 +141,7 @@ struct comedi_device *comedi_open(const char *filename)
return NULL;
down_read(&dev->attach_lock);
- if (dev->attached)
+ if (dev->attached && kcomedilib_set_link_from_to(from, minor))
retval = dev;
else
retval = NULL;
@@ -54,14 +152,26 @@ struct comedi_device *comedi_open(const char *filename)
return retval;
}
-EXPORT_SYMBOL_GPL(comedi_open);
+EXPORT_SYMBOL_GPL(comedi_open_from);
-int comedi_close(struct comedi_device *dev)
+/**
+ * comedi_close_from() - Close a COMEDI device from the kernel with loop checks
+ * @dev: COMEDI device.
+ * @from: Device number it was opened from (if in range).
+ *
+ * Closes a COMEDI device previously opened by comedi_open_from().
+ *
+ * If @from is in range, it should be match the one used by comedi_open_from().
+ *
+ * Returns: 0
+ */
+int comedi_close_from(struct comedi_device *dev, int from)
{
+ kcomedilib_clear_link_from_to(from, dev->minor);
comedi_dev_put(dev);
return 0;
}
-EXPORT_SYMBOL_GPL(comedi_close);
+EXPORT_SYMBOL_GPL(comedi_close_from);
static int comedi_do_insn(struct comedi_device *dev,
struct comedi_insn *insn,
diff --git a/drivers/counter/microchip-tcb-capture.c b/drivers/counter/microchip-tcb-capture.c
index 1a299d1f350b..19d457ae4c3b 100644
--- a/drivers/counter/microchip-tcb-capture.c
+++ b/drivers/counter/microchip-tcb-capture.c
@@ -451,7 +451,7 @@ static void mchp_tc_irq_remove(void *ptr)
static int mchp_tc_irq_enable(struct counter_device *const counter, int irq)
{
struct mchp_tc_data *const priv = counter_priv(counter);
- int ret = devm_request_irq(counter->parent, irq, mchp_tc_isr, 0,
+ int ret = devm_request_irq(counter->parent, irq, mchp_tc_isr, IRQF_SHARED,
dev_name(counter->parent), counter);
if (ret < 0)
diff --git a/drivers/counter/ti-ecap-capture.c b/drivers/counter/ti-ecap-capture.c
index 3faaf7f60539..3586a7ab9887 100644
--- a/drivers/counter/ti-ecap-capture.c
+++ b/drivers/counter/ti-ecap-capture.c
@@ -465,11 +465,6 @@ static irqreturn_t ecap_cnt_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void ecap_cnt_pm_disable(void *dev)
-{
- pm_runtime_disable(dev);
-}
-
static int ecap_cnt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -523,12 +518,9 @@ static int ecap_cnt_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, counter_dev);
- pm_runtime_enable(dev);
-
- /* Register a cleanup callback to care for disabling PM */
- ret = devm_add_action_or_reset(dev, ecap_cnt_pm_disable, dev);
+ ret = devm_pm_runtime_enable(dev);
if (ret)
- return dev_err_probe(dev, ret, "failed to add pm disable action\n");
+ return ret;
ret = devm_counter_add(dev, counter_dev);
if (ret)
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 0d46402e3094..9be0503df55a 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -28,7 +28,6 @@ config ARM_APPLE_SOC_CPUFREQ
tristate "Apple Silicon SoC CPUFreq support"
depends on ARCH_APPLE || (COMPILE_TEST && 64BIT)
select PM_OPP
- default ARCH_APPLE
help
This adds the CPUFreq driver for Apple Silicon machines
(e.g. Apple M1).
@@ -238,7 +237,7 @@ config ARM_TEGRA20_CPUFREQ
This adds the CPUFreq driver support for Tegra20/30 SOCs.
config ARM_TEGRA124_CPUFREQ
- bool "Tegra124 CPUFreq support"
+ tristate "Tegra124 CPUFreq support"
depends on ARCH_TEGRA || COMPILE_TEST
depends on CPUFREQ_DT
default ARCH_TEGRA
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index d38526b8e063..681d687b5a18 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_CPUFREQ_VIRT) += virtual-cpufreq.o
# Traces
CFLAGS_amd-pstate-trace.o := -I$(src)
+CFLAGS_powernv-cpufreq.o := -I$(src)
amd_pstate-y := amd-pstate.o amd-pstate-trace.o
##################################################################################
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 4f7f9201598d..e73a66785d69 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -318,7 +318,6 @@ static u32 drv_read(struct acpi_cpufreq_data *data, const struct cpumask *mask)
return cmd.val;
}
-/* Called via smp_call_function_many(), on the target CPUs */
static void do_drv_write(void *_cmd)
{
struct drv_cmd *cmd = _cmd;
@@ -335,14 +334,8 @@ static void drv_write(struct acpi_cpufreq_data *data,
.val = val,
.func.write = data->cpu_freq_write,
};
- int this_cpu;
- this_cpu = get_cpu();
- if (cpumask_test_cpu(this_cpu, mask))
- do_drv_write(&cmd);
-
- smp_call_function_many(mask, do_drv_write, &cmd, 1);
- put_cpu();
+ on_each_cpu_mask(mask, do_drv_write, &cmd, true);
}
static u32 get_cur_val(const struct cpumask *mask, struct acpi_cpufreq_data *data)
@@ -402,7 +395,7 @@ static unsigned int check_freqs(struct cpufreq_policy *policy,
cur_freq = extract_freq(policy, get_cur_val(mask, data));
if (cur_freq == freq)
return 1;
- udelay(10);
+ usleep_range(10, 15);
}
return 0;
}
diff --git a/drivers/cpufreq/airoha-cpufreq.c b/drivers/cpufreq/airoha-cpufreq.c
index 4fe39eadd163..b6b1cdc4d11d 100644
--- a/drivers/cpufreq/airoha-cpufreq.c
+++ b/drivers/cpufreq/airoha-cpufreq.c
@@ -107,6 +107,7 @@ static struct platform_driver airoha_cpufreq_driver = {
};
static const struct of_device_id airoha_cpufreq_match_list[] __initconst = {
+ { .compatible = "airoha,an7583" },
{ .compatible = "airoha,en7581" },
{},
};
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index f3477ab37742..c45bc98721d2 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -65,13 +65,13 @@ static const char * const amd_pstate_mode_string[] = {
[AMD_PSTATE_PASSIVE] = "passive",
[AMD_PSTATE_ACTIVE] = "active",
[AMD_PSTATE_GUIDED] = "guided",
- NULL,
};
+static_assert(ARRAY_SIZE(amd_pstate_mode_string) == AMD_PSTATE_MAX);
const char *amd_pstate_get_mode_string(enum amd_pstate_mode mode)
{
- if (mode < 0 || mode >= AMD_PSTATE_MAX)
- return NULL;
+ if (mode < AMD_PSTATE_UNDEFINED || mode >= AMD_PSTATE_MAX)
+ mode = AMD_PSTATE_UNDEFINED;
return amd_pstate_mode_string[mode];
}
EXPORT_SYMBOL_GPL(amd_pstate_get_mode_string);
@@ -110,6 +110,7 @@ enum energy_perf_value_index {
EPP_INDEX_BALANCE_PERFORMANCE,
EPP_INDEX_BALANCE_POWERSAVE,
EPP_INDEX_POWERSAVE,
+ EPP_INDEX_MAX,
};
static const char * const energy_perf_strings[] = {
@@ -118,8 +119,8 @@ static const char * const energy_perf_strings[] = {
[EPP_INDEX_BALANCE_PERFORMANCE] = "balance_performance",
[EPP_INDEX_BALANCE_POWERSAVE] = "balance_power",
[EPP_INDEX_POWERSAVE] = "power",
- NULL
};
+static_assert(ARRAY_SIZE(energy_perf_strings) == EPP_INDEX_MAX);
static unsigned int epp_values[] = {
[EPP_INDEX_DEFAULT] = 0,
@@ -127,7 +128,8 @@ static unsigned int epp_values[] = {
[EPP_INDEX_BALANCE_PERFORMANCE] = AMD_CPPC_EPP_BALANCE_PERFORMANCE,
[EPP_INDEX_BALANCE_POWERSAVE] = AMD_CPPC_EPP_BALANCE_POWERSAVE,
[EPP_INDEX_POWERSAVE] = AMD_CPPC_EPP_POWERSAVE,
- };
+};
+static_assert(ARRAY_SIZE(epp_values) == EPP_INDEX_MAX);
typedef int (*cppc_mode_transition_fn)(int);
@@ -183,7 +185,7 @@ static inline int get_mode_idx_from_str(const char *str, size_t size)
{
int i;
- for (i=0; i < AMD_PSTATE_MAX; i++) {
+ for (i = 0; i < AMD_PSTATE_MAX; i++) {
if (!strncmp(str, amd_pstate_mode_string[i], size))
return i;
}
@@ -826,6 +828,13 @@ static void amd_pstate_init_prefcore(struct amd_cpudata *cpudata)
if (!amd_pstate_prefcore)
return;
+ /* should use amd-hfi instead */
+ if (cpu_feature_enabled(X86_FEATURE_AMD_WORKLOAD_CLASS) &&
+ IS_ENABLED(CONFIG_AMD_HFI)) {
+ amd_pstate_prefcore = false;
+ return;
+ }
+
cpudata->hw_prefcore = true;
/* Priorities must be initialized before ITMT support can be toggled on. */
@@ -865,10 +874,10 @@ static void amd_pstate_update_limits(struct cpufreq_policy *policy)
*/
static u32 amd_pstate_get_transition_delay_us(unsigned int cpu)
{
- u32 transition_delay_ns;
+ int transition_delay_ns;
transition_delay_ns = cppc_get_transition_latency(cpu);
- if (transition_delay_ns == CPUFREQ_ETERNAL) {
+ if (transition_delay_ns < 0) {
if (cpu_feature_enabled(X86_FEATURE_AMD_FAST_CPPC))
return AMD_PSTATE_FAST_CPPC_TRANSITION_DELAY;
else
@@ -884,10 +893,10 @@ static u32 amd_pstate_get_transition_delay_us(unsigned int cpu)
*/
static u32 amd_pstate_get_transition_latency(unsigned int cpu)
{
- u32 transition_latency;
+ int transition_latency;
transition_latency = cppc_get_transition_latency(cpu);
- if (transition_latency == CPUFREQ_ETERNAL)
+ if (transition_latency < 0)
return AMD_PSTATE_TRANSITION_LATENCY;
return transition_latency;
@@ -1130,16 +1139,15 @@ static ssize_t show_amd_pstate_hw_prefcore(struct cpufreq_policy *policy,
static ssize_t show_energy_performance_available_preferences(
struct cpufreq_policy *policy, char *buf)
{
- int i = 0;
- int offset = 0;
+ int offset = 0, i;
struct amd_cpudata *cpudata = policy->driver_data;
if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
return sysfs_emit_at(buf, offset, "%s\n",
energy_perf_strings[EPP_INDEX_PERFORMANCE]);
- while (energy_perf_strings[i] != NULL)
- offset += sysfs_emit_at(buf, offset, "%s ", energy_perf_strings[i++]);
+ for (i = 0; i < ARRAY_SIZE(energy_perf_strings); i++)
+ offset += sysfs_emit_at(buf, offset, "%s ", energy_perf_strings[i]);
offset += sysfs_emit_at(buf, offset, "\n");
@@ -1150,15 +1158,10 @@ static ssize_t store_energy_performance_preference(
struct cpufreq_policy *policy, const char *buf, size_t count)
{
struct amd_cpudata *cpudata = policy->driver_data;
- char str_preference[21];
ssize_t ret;
u8 epp;
- ret = sscanf(buf, "%20s", str_preference);
- if (ret != 1)
- return -EINVAL;
-
- ret = match_string(energy_perf_strings, -1, str_preference);
+ ret = sysfs_match_string(energy_perf_strings, buf);
if (ret < 0)
return -EINVAL;
@@ -1275,7 +1278,7 @@ static int amd_pstate_change_mode_without_dvr_change(int mode)
if (cpu_feature_enabled(X86_FEATURE_CPPC) || cppc_state == AMD_PSTATE_ACTIVE)
return 0;
- for_each_present_cpu(cpu) {
+ for_each_online_cpu(cpu) {
cppc_set_auto_sel(cpu, (cppc_state == AMD_PSTATE_PASSIVE) ? 0 : 1);
}
@@ -1346,9 +1349,8 @@ int amd_pstate_update_status(const char *buf, size_t size)
return -EINVAL;
mode_idx = get_mode_idx_from_str(buf, size);
-
- if (mode_idx < 0 || mode_idx >= AMD_PSTATE_MAX)
- return -EINVAL;
+ if (mode_idx < 0)
+ return mode_idx;
if (mode_state_machine[cppc_state][mode_idx]) {
guard(mutex)(&amd_pstate_driver_lock);
@@ -1547,13 +1549,15 @@ static void amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy)
pr_debug("CPU %d exiting\n", policy->cpu);
}
-static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
+static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy, bool policy_change)
{
struct amd_cpudata *cpudata = policy->driver_data;
union perf_cached perf;
u8 epp;
- if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq)
+ if (policy_change ||
+ policy->min != cpudata->min_limit_freq ||
+ policy->max != cpudata->max_limit_freq)
amd_pstate_update_min_max_limit(policy);
if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
@@ -1577,7 +1581,7 @@ static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
cpudata->policy = policy->policy;
- ret = amd_pstate_epp_update_limit(policy);
+ ret = amd_pstate_epp_update_limit(policy, true);
if (ret)
return ret;
@@ -1605,7 +1609,11 @@ static int amd_pstate_cpu_offline(struct cpufreq_policy *policy)
* min_perf value across kexec reboots. If this CPU is just onlined normally after this, the
* limits, epp and desired perf will get reset to the cached values in cpudata struct
*/
- return amd_pstate_update_perf(policy, perf.bios_min_perf, 0U, 0U, 0U, false);
+ return amd_pstate_update_perf(policy, perf.bios_min_perf,
+ FIELD_GET(AMD_CPPC_DES_PERF_MASK, cpudata->cppc_req_cached),
+ FIELD_GET(AMD_CPPC_MAX_PERF_MASK, cpudata->cppc_req_cached),
+ FIELD_GET(AMD_CPPC_EPP_PERF_MASK, cpudata->cppc_req_cached),
+ false);
}
static int amd_pstate_suspend(struct cpufreq_policy *policy)
@@ -1619,13 +1627,14 @@ static int amd_pstate_suspend(struct cpufreq_policy *policy)
* min_perf value across kexec reboots. If this CPU is just resumed back without kexec,
* the limits, epp and desired perf will get reset to the cached values in cpudata struct
*/
- ret = amd_pstate_update_perf(policy, perf.bios_min_perf, 0U, 0U, 0U, false);
+ ret = amd_pstate_update_perf(policy, perf.bios_min_perf,
+ FIELD_GET(AMD_CPPC_DES_PERF_MASK, cpudata->cppc_req_cached),
+ FIELD_GET(AMD_CPPC_MAX_PERF_MASK, cpudata->cppc_req_cached),
+ FIELD_GET(AMD_CPPC_EPP_PERF_MASK, cpudata->cppc_req_cached),
+ false);
if (ret)
return ret;
- /* invalidate to ensure it's rewritten during resume */
- cpudata->cppc_req_cached = 0;
-
/* set this flag to avoid setting core offline*/
cpudata->suspended = true;
@@ -1651,7 +1660,7 @@ static int amd_pstate_epp_resume(struct cpufreq_policy *policy)
int ret;
/* enable amd pstate from suspend state*/
- ret = amd_pstate_epp_update_limit(policy);
+ ret = amd_pstate_epp_update_limit(policy, false);
if (ret)
return ret;
diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c
index f28a4435fba7..0efe403a5980 100644
--- a/drivers/cpufreq/armada-37xx-cpufreq.c
+++ b/drivers/cpufreq/armada-37xx-cpufreq.c
@@ -265,7 +265,7 @@ static void __init armada37xx_cpufreq_avs_configure(struct regmap *base,
*/
target_vm = avs_map[l0_vdd_min] - 100;
- target_vm = target_vm > MIN_VOLT_MV ? target_vm : MIN_VOLT_MV;
+ target_vm = max(target_vm, MIN_VOLT_MV);
dvfs->avs[1] = armada_37xx_avs_val_match(target_vm);
/*
@@ -273,7 +273,7 @@ static void __init armada37xx_cpufreq_avs_configure(struct regmap *base,
* be larger than 1000mv
*/
target_vm = avs_map[l0_vdd_min] - 150;
- target_vm = target_vm > MIN_VOLT_MV ? target_vm : MIN_VOLT_MV;
+ target_vm = max(target_vm, MIN_VOLT_MV);
dvfs->avs[2] = dvfs->avs[3] = armada_37xx_avs_val_match(target_vm);
/*
diff --git a/drivers/cpufreq/armada-8k-cpufreq.c b/drivers/cpufreq/armada-8k-cpufreq.c
index 5a3545bd0d8d..d96c1718f7f8 100644
--- a/drivers/cpufreq/armada-8k-cpufreq.c
+++ b/drivers/cpufreq/armada-8k-cpufreq.c
@@ -103,7 +103,7 @@ static void armada_8k_cpufreq_free_table(struct freq_table *freq_tables)
{
int opps_index, nb_cpus = num_possible_cpus();
- for (opps_index = 0 ; opps_index <= nb_cpus; opps_index++) {
+ for (opps_index = 0 ; opps_index < nb_cpus; opps_index++) {
int i;
/* If cpu_dev is NULL then we reached the end of the array */
@@ -132,7 +132,7 @@ static int __init armada_8k_cpufreq_init(void)
int ret = 0, opps_index = 0, cpu, nb_cpus;
struct freq_table *freq_tables;
struct device_node *node;
- static struct cpumask cpus;
+ static struct cpumask cpus, shared_cpus;
node = of_find_matching_node_and_match(NULL, armada_8k_cpufreq_of_match,
NULL);
@@ -154,7 +154,6 @@ static int __init armada_8k_cpufreq_init(void)
* divisions of it).
*/
for_each_cpu(cpu, &cpus) {
- struct cpumask shared_cpus;
struct device *cpu_dev;
struct clk *clk;
diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c
index 7b841a086acc..71450cca8e9f 100644
--- a/drivers/cpufreq/brcmstb-avs-cpufreq.c
+++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c
@@ -480,7 +480,7 @@ static bool brcm_avs_is_firmware_loaded(struct private_data *priv)
static unsigned int brcm_avs_cpufreq_get(unsigned int cpu)
{
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
struct private_data *priv;
if (!policy)
@@ -488,8 +488,6 @@ static unsigned int brcm_avs_cpufreq_get(unsigned int cpu)
priv = policy->driver_data;
- cpufreq_cpu_put(policy);
-
return brcm_avs_get_frequency(priv->base);
}
@@ -765,7 +763,7 @@ static void brcm_avs_cpufreq_remove(struct platform_device *pdev)
}
static const struct of_device_id brcm_avs_cpufreq_match[] = {
- { .compatible = BRCM_AVS_CPU_DATA },
+ { .compatible = "brcm,avs-cpu-data-mem" },
{ }
};
MODULE_DEVICE_TABLE(of, brcm_avs_cpufreq_match);
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index b7c688a5659c..9eac77c4f294 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -26,14 +26,6 @@
#include <acpi/cppc_acpi.h>
-/*
- * This list contains information parsed from per CPU ACPI _CPC and _PSD
- * structures: e.g. the highest and lowest supported performance, capabilities,
- * desired performance, level requested etc. Depending on the share_type, not
- * all CPUs will have an entry in the list.
- */
-static LIST_HEAD(cpu_data_list);
-
static struct cpufreq_driver cppc_cpufreq_driver;
#ifdef CONFIG_ACPI_CPPC_CPUFREQ_FIE
@@ -58,8 +50,7 @@ struct cppc_freq_invariance {
static DEFINE_PER_CPU(struct cppc_freq_invariance, cppc_freq_inv);
static struct kthread_worker *kworker_fie;
-static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data,
- struct cppc_perf_fb_ctrs *fb_ctrs_t0,
+static int cppc_perf_from_fbctrs(struct cppc_perf_fb_ctrs *fb_ctrs_t0,
struct cppc_perf_fb_ctrs *fb_ctrs_t1);
/**
@@ -95,8 +86,7 @@ static void cppc_scale_freq_workfn(struct kthread_work *work)
return;
}
- perf = cppc_perf_from_fbctrs(cpu_data, &cppc_fi->prev_perf_fb_ctrs,
- &fb_ctrs);
+ perf = cppc_perf_from_fbctrs(&cppc_fi->prev_perf_fb_ctrs, &fb_ctrs);
if (!perf)
return;
@@ -152,16 +142,15 @@ static void cppc_cpufreq_cpu_fie_init(struct cpufreq_policy *policy)
init_irq_work(&cppc_fi->irq_work, cppc_irq_work);
ret = cppc_get_perf_ctrs(cpu, &cppc_fi->prev_perf_fb_ctrs);
- if (ret) {
- pr_warn("%s: failed to read perf counters for cpu:%d: %d\n",
- __func__, cpu, ret);
- /*
- * Don't abort if the CPU was offline while the driver
- * was getting registered.
- */
- if (cpu_online(cpu))
- return;
+ /*
+ * Don't abort as the CPU was offline while the driver was
+ * getting registered.
+ */
+ if (ret && cpu_online(cpu)) {
+ pr_debug("%s: failed to read perf counters for cpu:%d: %d\n",
+ __func__, cpu, ret);
+ return;
}
}
@@ -318,6 +307,16 @@ static int cppc_verify_policy(struct cpufreq_policy_data *policy)
return 0;
}
+static unsigned int __cppc_cpufreq_get_transition_delay_us(unsigned int cpu)
+{
+ int transition_latency_ns = cppc_get_transition_latency(cpu);
+
+ if (transition_latency_ns < 0)
+ return CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS / NSEC_PER_USEC;
+
+ return transition_latency_ns / NSEC_PER_USEC;
+}
+
/*
* The PCC subspace describes the rate at which platform can accept commands
* on the shared PCC channel (including READs which do not count towards freq
@@ -340,19 +339,18 @@ static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu)
return 10000;
}
}
- return cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
+ return __cppc_cpufreq_get_transition_delay_us(cpu);
}
#else
static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu)
{
- return cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
+ return __cppc_cpufreq_get_transition_delay_us(cpu);
}
#endif
#if defined(CONFIG_ARM64) && defined(CONFIG_ENERGY_MODEL)
static DEFINE_PER_CPU(unsigned int, efficiency_class);
-static void cppc_cpufreq_register_em(struct cpufreq_policy *policy);
/* Create an artificial performance state every CPPC_EM_CAP_STEP capacity unit. */
#define CPPC_EM_CAP_STEP (20)
@@ -488,7 +486,19 @@ static int cppc_get_cpu_cost(struct device *cpu_dev, unsigned long KHz,
return 0;
}
-static int populate_efficiency_class(void)
+static void cppc_cpufreq_register_em(struct cpufreq_policy *policy)
+{
+ struct cppc_cpudata *cpu_data;
+ struct em_data_callback em_cb =
+ EM_ADV_DATA_CB(cppc_get_cpu_power, cppc_get_cpu_cost);
+
+ cpu_data = policy->driver_data;
+ em_dev_register_perf_domain(get_cpu_device(policy->cpu),
+ get_perf_level_count(policy), &em_cb,
+ cpu_data->shared_cpu_map, 0);
+}
+
+static void populate_efficiency_class(void)
{
struct acpi_madt_generic_interrupt *gicc;
DECLARE_BITMAP(used_classes, 256) = {};
@@ -503,7 +513,7 @@ static int populate_efficiency_class(void)
if (bitmap_weight(used_classes, 256) <= 1) {
pr_debug("Efficiency classes are all equal (=%d). "
"No EM registered", class);
- return -EINVAL;
+ return;
}
/*
@@ -520,26 +530,11 @@ static int populate_efficiency_class(void)
index++;
}
cppc_cpufreq_driver.register_em = cppc_cpufreq_register_em;
-
- return 0;
-}
-
-static void cppc_cpufreq_register_em(struct cpufreq_policy *policy)
-{
- struct cppc_cpudata *cpu_data;
- struct em_data_callback em_cb =
- EM_ADV_DATA_CB(cppc_get_cpu_power, cppc_get_cpu_cost);
-
- cpu_data = policy->driver_data;
- em_dev_register_perf_domain(get_cpu_device(policy->cpu),
- get_perf_level_count(policy), &em_cb,
- cpu_data->shared_cpu_map, 0);
}
#else
-static int populate_efficiency_class(void)
+static void populate_efficiency_class(void)
{
- return 0;
}
#endif
@@ -567,8 +562,6 @@ static struct cppc_cpudata *cppc_cpufreq_get_cpu_data(unsigned int cpu)
goto free_mask;
}
- list_add(&cpu_data->node, &cpu_data_list);
-
return cpu_data;
free_mask:
@@ -583,7 +576,6 @@ static void cppc_cpufreq_put_cpu_data(struct cpufreq_policy *policy)
{
struct cppc_cpudata *cpu_data = policy->driver_data;
- list_del(&cpu_data->node);
free_cpumask_var(cpu_data->shared_cpu_map);
kfree(cpu_data);
policy->driver_data = NULL;
@@ -699,8 +691,7 @@ static inline u64 get_delta(u64 t1, u64 t0)
return (u32)t1 - (u32)t0;
}
-static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data,
- struct cppc_perf_fb_ctrs *fb_ctrs_t0,
+static int cppc_perf_from_fbctrs(struct cppc_perf_fb_ctrs *fb_ctrs_t0,
struct cppc_perf_fb_ctrs *fb_ctrs_t1)
{
u64 delta_reference, delta_delivered;
@@ -740,8 +731,8 @@ static int cppc_get_perf_ctrs_sample(int cpu,
static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
{
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
struct cppc_perf_fb_ctrs fb_ctrs_t0 = {0}, fb_ctrs_t1 = {0};
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
struct cppc_cpudata *cpu_data;
u64 delivered_perf;
int ret;
@@ -751,8 +742,6 @@ static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
cpu_data = policy->driver_data;
- cpufreq_cpu_put(policy);
-
ret = cppc_get_perf_ctrs_sample(cpu, &fb_ctrs_t0, &fb_ctrs_t1);
if (ret) {
if (ret == -EFAULT)
@@ -762,8 +751,7 @@ static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
return 0;
}
- delivered_perf = cppc_perf_from_fbctrs(cpu_data, &fb_ctrs_t0,
- &fb_ctrs_t1);
+ delivered_perf = cppc_perf_from_fbctrs(&fb_ctrs_t0, &fb_ctrs_t1);
if (!delivered_perf)
goto out_invalid_counters;
@@ -925,7 +913,7 @@ static struct freq_attr *cppc_cpufreq_attr[] = {
};
static struct cpufreq_driver cppc_cpufreq_driver = {
- .flags = CPUFREQ_CONST_LOOPS,
+ .flags = CPUFREQ_CONST_LOOPS | CPUFREQ_NEED_UPDATE_LIMITS,
.verify = cppc_verify_policy,
.target = cppc_cpufreq_set_target,
.get = cppc_cpufreq_get_rate,
@@ -954,24 +942,10 @@ static int __init cppc_cpufreq_init(void)
return ret;
}
-static inline void free_cpu_data(void)
-{
- struct cppc_cpudata *iter, *tmp;
-
- list_for_each_entry_safe(iter, tmp, &cpu_data_list, node) {
- free_cpumask_var(iter->shared_cpu_map);
- list_del(&iter->node);
- kfree(iter);
- }
-
-}
-
static void __exit cppc_cpufreq_exit(void)
{
cpufreq_unregister_driver(&cppc_cpufreq_driver);
cppc_freq_invariance_exit();
-
- free_cpu_data();
}
module_exit(cppc_cpufreq_exit);
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index a010da0f6337..a1d11ecd1ac8 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -87,6 +87,7 @@ static const struct of_device_id allowlist[] __initconst = {
{ .compatible = "st-ericsson,u9540", },
{ .compatible = "starfive,jh7110", },
+ { .compatible = "starfive,jh7110s", },
{ .compatible = "ti,omap2", },
{ .compatible = "ti,omap4", },
@@ -103,6 +104,7 @@ static const struct of_device_id allowlist[] __initconst = {
* platforms using "operating-points-v2" property.
*/
static const struct of_device_id blocklist[] __initconst = {
+ { .compatible = "airoha,an7583", },
{ .compatible = "airoha,en7581", },
{ .compatible = "allwinner,sun50i-a100" },
@@ -143,6 +145,7 @@ static const struct of_device_id blocklist[] __initconst = {
{ .compatible = "nvidia,tegra20", },
{ .compatible = "nvidia,tegra30", },
+ { .compatible = "nvidia,tegra114", },
{ .compatible = "nvidia,tegra124", },
{ .compatible = "nvidia,tegra210", },
{ .compatible = "nvidia,tegra234", },
@@ -187,9 +190,11 @@ static const struct of_device_id blocklist[] __initconst = {
{ .compatible = "ti,omap3", },
{ .compatible = "ti,am625", },
{ .compatible = "ti,am62a7", },
+ { .compatible = "ti,am62d2", },
{ .compatible = "ti,am62p5", },
{ .compatible = "qcom,ipq5332", },
+ { .compatible = "qcom,ipq5424", },
{ .compatible = "qcom,ipq6018", },
{ .compatible = "qcom,ipq8064", },
{ .compatible = "qcom,ipq8074", },
@@ -214,20 +219,13 @@ static bool __init cpu0_node_has_opp_v2_prop(void)
static int __init cpufreq_dt_platdev_init(void)
{
- struct device_node *np __free(device_node) = of_find_node_by_path("/");
- const struct of_device_id *match;
- const void *data = NULL;
+ const void *data;
- if (!np)
- return -ENODEV;
-
- match = of_match_node(allowlist, np);
- if (match) {
- data = match->data;
+ data = of_machine_get_match_data(allowlist);
+ if (data)
goto create_pdev;
- }
- if (cpu0_node_has_opp_v2_prop() && !of_match_node(blocklist, np))
+ if (cpu0_node_has_opp_v2_prop() && !of_machine_device_match(blocklist))
goto create_pdev;
return -ENODEV;
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index e80dd982a3e2..7d5079fd1688 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -104,7 +104,7 @@ static int cpufreq_init(struct cpufreq_policy *policy)
transition_latency = dev_pm_opp_get_max_transition_latency(cpu_dev);
if (!transition_latency)
- transition_latency = CPUFREQ_ETERNAL;
+ transition_latency = CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS;
cpumask_copy(policy->cpus, priv->cpus);
policy->driver_data = priv;
@@ -329,6 +329,17 @@ static struct platform_driver dt_cpufreq_platdrv = {
};
module_platform_driver(dt_cpufreq_platdrv);
+struct platform_device *cpufreq_dt_pdev_register(struct device *dev)
+{
+ struct platform_device_info cpufreq_dt_devinfo = {};
+
+ cpufreq_dt_devinfo.name = "cpufreq-dt";
+ cpufreq_dt_devinfo.parent = dev;
+
+ return platform_device_register_full(&cpufreq_dt_devinfo);
+}
+EXPORT_SYMBOL_GPL(cpufreq_dt_pdev_register);
+
MODULE_ALIAS("platform:cpufreq-dt");
MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>");
MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
diff --git a/drivers/cpufreq/cpufreq-dt.h b/drivers/cpufreq/cpufreq-dt.h
index 28c8af7ec5ef..fc1889aeb4f1 100644
--- a/drivers/cpufreq/cpufreq-dt.h
+++ b/drivers/cpufreq/cpufreq-dt.h
@@ -22,4 +22,6 @@ struct cpufreq_dt_platform_data {
int (*resume)(struct cpufreq_policy *policy);
};
+struct platform_device *cpufreq_dt_pdev_register(struct device *dev);
+
#endif /* __CPUFREQ_DT_H__ */
diff --git a/drivers/cpufreq/cpufreq-nforce2.c b/drivers/cpufreq/cpufreq-nforce2.c
index fedad1081973..fbbbe501cf2d 100644
--- a/drivers/cpufreq/cpufreq-nforce2.c
+++ b/drivers/cpufreq/cpufreq-nforce2.c
@@ -145,6 +145,8 @@ static unsigned int nforce2_fsb_read(int bootfsb)
pci_read_config_dword(nforce2_sub5, NFORCE2_BOOTFSB, &fsb);
fsb /= 1000000;
+ pci_dev_put(nforce2_sub5);
+
/* Check if PLL register is already set */
pci_read_config_byte(nforce2_dev, NFORCE2_PLLENABLE, (u8 *)&temp);
@@ -426,6 +428,7 @@ static int __init nforce2_init(void)
static void __exit nforce2_exit(void)
{
cpufreq_unregister_driver(&nforce2_driver);
+ pci_dev_put(nforce2_dev);
}
module_init(nforce2_init);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index d7426e1d8bdd..4472bb1ec83c 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -109,6 +109,8 @@ void disable_cpufreq(void)
{
off = 1;
}
+EXPORT_SYMBOL_GPL(disable_cpufreq);
+
static DEFINE_MUTEX(cpufreq_governor_mutex);
bool have_governor_per_policy(void)
@@ -662,10 +664,10 @@ unlock:
static unsigned int cpufreq_parse_policy(char *str_governor)
{
- if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN))
+ if (!strncasecmp(str_governor, "performance", strlen("performance")))
return CPUFREQ_POLICY_PERFORMANCE;
- if (!strncasecmp(str_governor, "powersave", CPUFREQ_NAME_LEN))
+ if (!strncasecmp(str_governor, "powersave", strlen("powersave")))
return CPUFREQ_POLICY_POWERSAVE;
return CPUFREQ_POLICY_UNKNOWN;
@@ -912,7 +914,7 @@ static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
const char *buf, size_t count)
{
unsigned int freq = 0;
- unsigned int ret;
+ int ret;
if (!policy->governor || !policy->governor->store_setspeed)
return -EINVAL;
@@ -967,6 +969,7 @@ static struct attribute *cpufreq_attrs[] = {
&cpuinfo_min_freq.attr,
&cpuinfo_max_freq.attr,
&cpuinfo_transition_latency.attr,
+ &scaling_cur_freq.attr,
&scaling_min_freq.attr,
&scaling_max_freq.attr,
&affected_cpus.attr,
@@ -1095,10 +1098,6 @@ static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
return ret;
}
- ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
- if (ret)
- return ret;
-
if (cpufreq_driver->bios_limit) {
ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
if (ret)
@@ -1122,7 +1121,8 @@ static int cpufreq_init_policy(struct cpufreq_policy *policy)
if (has_target()) {
/* Update policy governor to the one used before hotplug. */
- gov = get_governor(policy->last_governor);
+ if (policy->last_governor[0] != '\0')
+ gov = get_governor(policy->last_governor);
if (gov) {
pr_debug("Restoring governor %s for cpu %d\n",
gov->name, policy->cpu);
@@ -1284,6 +1284,8 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
goto err_free_real_cpus;
}
+ init_rwsem(&policy->rwsem);
+
freq_constraints_init(&policy->constraints);
policy->nb_min.notifier_call = cpufreq_notifier_min;
@@ -1306,7 +1308,6 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
}
INIT_LIST_HEAD(&policy->policy_list);
- init_rwsem(&policy->rwsem);
spin_lock_init(&policy->transition_lock);
init_waitqueue_head(&policy->transition_wait);
INIT_WORK(&policy->update, handle_update);
@@ -1420,9 +1421,12 @@ static int cpufreq_policy_online(struct cpufreq_policy *policy,
* If there is a problem with its frequency table, take it
* offline and drop it.
*/
- ret = cpufreq_table_validate_and_sort(policy);
- if (ret)
- goto out_offline_policy;
+ if (policy->freq_table_sorted != CPUFREQ_TABLE_SORTED_ASCENDING &&
+ policy->freq_table_sorted != CPUFREQ_TABLE_SORTED_DESCENDING) {
+ ret = cpufreq_table_validate_and_sort(policy);
+ if (ret)
+ goto out_offline_policy;
+ }
/* related_cpus should at least include policy->cpus. */
cpumask_copy(policy->related_cpus, policy->cpus);
@@ -1694,14 +1698,13 @@ static void __cpufreq_offline(unsigned int cpu, struct cpufreq_policy *policy)
return;
}
- if (has_target())
+ if (has_target()) {
strscpy(policy->last_governor, policy->governor->name,
CPUFREQ_NAME_LEN);
- else
- policy->last_policy = policy->policy;
-
- if (has_target())
cpufreq_exit_governor(policy);
+ } else {
+ policy->last_policy = policy->policy;
+ }
/*
* Perform the ->offline() during light-weight tear-down, as
@@ -1803,6 +1806,9 @@ static unsigned int cpufreq_verify_current_freq(struct cpufreq_policy *policy, b
{
unsigned int new_freq;
+ if (!cpufreq_driver->get)
+ return 0;
+
new_freq = cpufreq_driver->get(policy->cpu);
if (!new_freq)
return 0;
@@ -1842,7 +1848,6 @@ static unsigned int cpufreq_verify_current_freq(struct cpufreq_policy *policy, b
*/
unsigned int cpufreq_quick_get(unsigned int cpu)
{
- struct cpufreq_policy *policy __free(put_cpufreq_policy) = NULL;
unsigned long flags;
read_lock_irqsave(&cpufreq_driver_lock, flags);
@@ -1857,7 +1862,7 @@ unsigned int cpufreq_quick_get(unsigned int cpu)
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
- policy = cpufreq_cpu_get(cpu);
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
if (policy)
return policy->cur;
@@ -1873,9 +1878,7 @@ EXPORT_SYMBOL(cpufreq_quick_get);
*/
unsigned int cpufreq_quick_get_max(unsigned int cpu)
{
- struct cpufreq_policy *policy __free(put_cpufreq_policy);
-
- policy = cpufreq_cpu_get(cpu);
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
if (policy)
return policy->max;
@@ -1891,9 +1894,7 @@ EXPORT_SYMBOL(cpufreq_quick_get_max);
*/
__weak unsigned int cpufreq_get_hw_max_freq(unsigned int cpu)
{
- struct cpufreq_policy *policy __free(put_cpufreq_policy);
-
- policy = cpufreq_cpu_get(cpu);
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
if (policy)
return policy->cpuinfo.max_freq;
@@ -1917,18 +1918,13 @@ static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
*/
unsigned int cpufreq_get(unsigned int cpu)
{
- struct cpufreq_policy *policy __free(put_cpufreq_policy);
-
- policy = cpufreq_cpu_get(cpu);
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
if (!policy)
return 0;
guard(cpufreq_policy_read)(policy);
- if (cpufreq_driver->get)
- return __cpufreq_get(policy);
-
- return 0;
+ return __cpufreq_get(policy);
}
EXPORT_SYMBOL(cpufreq_get);
@@ -2482,8 +2478,7 @@ int cpufreq_start_governor(struct cpufreq_policy *policy)
pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
- if (cpufreq_driver->get)
- cpufreq_verify_current_freq(policy, false);
+ cpufreq_verify_current_freq(policy, false);
if (policy->governor->start) {
ret = policy->governor->start(policy);
@@ -2558,7 +2553,7 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor)
for_each_inactive_policy(policy) {
if (!strcmp(policy->last_governor, governor->name)) {
policy->governor = NULL;
- strcpy(policy->last_governor, "\0");
+ policy->last_governor[0] = '\0';
}
}
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
@@ -2715,10 +2710,12 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
pr_debug("starting governor %s failed\n", policy->governor->name);
if (old_gov) {
policy->governor = old_gov;
- if (cpufreq_init_governor(policy))
+ if (cpufreq_init_governor(policy)) {
policy->governor = NULL;
- else
- cpufreq_start_governor(policy);
+ } else if (cpufreq_start_governor(policy)) {
+ cpufreq_exit_governor(policy);
+ policy->governor = NULL;
+ }
}
return ret;
@@ -2750,9 +2747,7 @@ static void cpufreq_policy_refresh(struct cpufreq_policy *policy)
*/
void cpufreq_update_policy(unsigned int cpu)
{
- struct cpufreq_policy *policy __free(put_cpufreq_policy);
-
- policy = cpufreq_cpu_get(cpu);
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
if (!policy)
return;
@@ -2769,9 +2764,7 @@ EXPORT_SYMBOL(cpufreq_update_policy);
*/
void cpufreq_update_limits(unsigned int cpu)
{
- struct cpufreq_policy *policy __free(put_cpufreq_policy);
-
- policy = cpufreq_cpu_get(cpu);
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
if (!policy)
return;
@@ -2792,7 +2785,7 @@ int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state)
if (!policy->freq_table)
return -ENXIO;
- ret = cpufreq_frequency_table_cpuinfo(policy, policy->freq_table);
+ ret = cpufreq_frequency_table_cpuinfo(policy);
if (ret) {
pr_err("%s: Policy frequency update failed\n", __func__);
return ret;
@@ -2921,10 +2914,8 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
return -EPROBE_DEFER;
if (!driver_data || !driver_data->verify || !driver_data->init ||
- !(driver_data->setpolicy || driver_data->target_index ||
- driver_data->target) ||
- (driver_data->setpolicy && (driver_data->target_index ||
- driver_data->target)) ||
+ (driver_data->target_index && driver_data->target) ||
+ (!!driver_data->setpolicy == (driver_data->target_index || driver_data->target)) ||
(!driver_data->get_intermediate != !driver_data->target_intermediate) ||
(!driver_data->online != !driver_data->offline) ||
(driver_data->adjust_perf && !driver_data->fast_switch))
@@ -2944,15 +2935,6 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
cpufreq_driver = driver_data;
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
- /*
- * Mark support for the scheduler's frequency invariance engine for
- * drivers that implement target(), target_index() or fast_switch().
- */
- if (!cpufreq_driver->setpolicy) {
- static_branch_enable_cpuslocked(&cpufreq_freq_invariance);
- pr_debug("supports frequency invariance");
- }
-
if (driver_data->setpolicy)
driver_data->flags |= CPUFREQ_CONST_LOOPS;
@@ -2962,6 +2944,15 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
goto err_null_driver;
}
+ /*
+ * Mark support for the scheduler's frequency invariance engine for
+ * drivers that implement target(), target_index() or fast_switch().
+ */
+ if (!cpufreq_driver->setpolicy) {
+ static_branch_enable_cpuslocked(&cpufreq_freq_invariance);
+ pr_debug("cpufreq: supports frequency invariance\n");
+ }
+
ret = subsys_interface_register(&cpufreq_interface);
if (ret)
goto err_boost_unreg;
@@ -2989,6 +2980,8 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
err_if_unreg:
subsys_interface_unregister(&cpufreq_interface);
err_boost_unreg:
+ if (!cpufreq_driver->setpolicy)
+ static_branch_disable_cpuslocked(&cpufreq_freq_invariance);
remove_boost_sysfs_file();
err_null_driver:
write_lock_irqsave(&cpufreq_driver_lock, flags);
@@ -3056,9 +3049,7 @@ static int __init cpufreq_core_init(void)
static bool cpufreq_policy_is_good_for_eas(unsigned int cpu)
{
- struct cpufreq_policy *policy __free(put_cpufreq_policy);
-
- policy = cpufreq_cpu_get(cpu);
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
if (!policy) {
pr_debug("cpufreq policy not set for CPU: %d\n", cpu);
return false;
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 56500b25d77c..cce6a8d113e1 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -152,9 +152,9 @@ static ssize_t sampling_down_factor_store(struct gov_attr_set *attr_set,
struct dbs_data *dbs_data = to_dbs_data(attr_set);
unsigned int input;
int ret;
- ret = sscanf(buf, "%u", &input);
+ ret = kstrtouint(buf, 0, &input);
- if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
+ if (ret || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
return -EINVAL;
dbs_data->sampling_down_factor = input;
@@ -168,9 +168,9 @@ static ssize_t up_threshold_store(struct gov_attr_set *attr_set,
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
unsigned int input;
int ret;
- ret = sscanf(buf, "%u", &input);
+ ret = kstrtouint(buf, 0, &input);
- if (ret != 1 || input > 100 || input <= cs_tuners->down_threshold)
+ if (ret || input > 100 || input <= cs_tuners->down_threshold)
return -EINVAL;
dbs_data->up_threshold = input;
@@ -184,10 +184,10 @@ static ssize_t down_threshold_store(struct gov_attr_set *attr_set,
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
unsigned int input;
int ret;
- ret = sscanf(buf, "%u", &input);
+ ret = kstrtouint(buf, 0, &input);
/* cannot be lower than 1 otherwise freq will not fall */
- if (ret != 1 || input < 1 || input >= dbs_data->up_threshold)
+ if (ret || input < 1 || input >= dbs_data->up_threshold)
return -EINVAL;
cs_tuners->down_threshold = input;
@@ -201,9 +201,9 @@ static ssize_t ignore_nice_load_store(struct gov_attr_set *attr_set,
unsigned int input;
int ret;
- ret = sscanf(buf, "%u", &input);
- if (ret != 1)
- return -EINVAL;
+ ret = kstrtouint(buf, 0, &input);
+ if (ret)
+ return ret;
if (input > 1)
input = 1;
@@ -226,10 +226,10 @@ static ssize_t freq_step_store(struct gov_attr_set *attr_set, const char *buf,
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
unsigned int input;
int ret;
- ret = sscanf(buf, "%u", &input);
+ ret = kstrtouint(buf, 0, &input);
- if (ret != 1)
- return -EINVAL;
+ if (ret)
+ return ret;
if (input > 100)
input = 100;
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 0e65d37c9231..a6ecc203f7b7 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -30,29 +30,6 @@ static struct od_ops od_ops;
static unsigned int default_powersave_bias;
/*
- * Not all CPUs want IO time to be accounted as busy; this depends on how
- * efficient idling at a higher frequency/voltage is.
- * Pavel Machek says this is not so for various generations of AMD and old
- * Intel systems.
- * Mike Chan (android.com) claims this is also not true for ARM.
- * Because of this, whitelist specific known (series) of CPUs by default, and
- * leave all others up to the user.
- */
-static int should_io_be_busy(void)
-{
-#if defined(CONFIG_X86)
- /*
- * For Intel, Core 2 (model 15) and later have an efficient idle.
- */
- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
- boot_cpu_data.x86 == 6 &&
- boot_cpu_data.x86_model >= 15)
- return 1;
-#endif
- return 0;
-}
-
-/*
* Find right freq to be set now with powersave_bias on.
* Returns the freq_hi to be used right now and will set freq_hi_delay_us,
* freq_lo, and freq_lo_delay_us in percpu area for averaging freqs.
@@ -377,7 +354,7 @@ static int od_init(struct dbs_data *dbs_data)
dbs_data->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
dbs_data->ignore_nice_load = 0;
tuners->powersave_bias = default_powersave_bias;
- dbs_data->io_is_busy = should_io_be_busy();
+ dbs_data->io_is_busy = od_should_io_be_busy();
dbs_data->tuners = tuners;
return 0;
diff --git a/drivers/cpufreq/cpufreq_ondemand.h b/drivers/cpufreq/cpufreq_ondemand.h
index 1af8e5c4b86f..2ca8f1aaf2e3 100644
--- a/drivers/cpufreq/cpufreq_ondemand.h
+++ b/drivers/cpufreq/cpufreq_ondemand.h
@@ -24,3 +24,26 @@ static inline struct od_policy_dbs_info *to_dbs_info(struct policy_dbs_info *pol
struct od_dbs_tuners {
unsigned int powersave_bias;
};
+
+#ifdef CONFIG_X86
+#include <asm/cpu_device_id.h>
+
+/*
+ * Not all CPUs want IO time to be accounted as busy; this depends on
+ * how efficient idling at a higher frequency/voltage is.
+ *
+ * Pavel Machek says this is not so for various generations of AMD and
+ * old Intel systems. Mike Chan (android.com) claims this is also not
+ * true for ARM.
+ *
+ * Because of this, select a known series of Intel CPUs (Family 6 and
+ * later) by default, and leave all others up to the user.
+ */
+static inline bool od_should_io_be_busy(void)
+{
+ return (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
+ boot_cpu_data.x86_vfm >= INTEL_PENTIUM_PRO);
+}
+#else
+static inline bool od_should_io_be_busy(void) { return false; }
+#endif
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c
index 2c42fee76daa..77d62152cd38 100644
--- a/drivers/cpufreq/cpufreq_userspace.c
+++ b/drivers/cpufreq/cpufreq_userspace.c
@@ -134,6 +134,7 @@ static struct cpufreq_governor cpufreq_gov_userspace = {
.store_setspeed = cpufreq_set,
.show_setspeed = show_speed,
.owner = THIS_MODULE,
+ .flags = CPUFREQ_GOV_STRICT_TARGET,
};
MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>, "
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index 35de513af6c9..7f251daf03ce 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -28,22 +28,21 @@ static bool policy_has_boost_freq(struct cpufreq_policy *policy)
return false;
}
-int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
- struct cpufreq_frequency_table *table)
+int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy)
{
- struct cpufreq_frequency_table *pos;
+ struct cpufreq_frequency_table *pos, *table = policy->freq_table;
unsigned int min_freq = ~0;
unsigned int max_freq = 0;
- unsigned int freq;
+ unsigned int freq, i;
- cpufreq_for_each_valid_entry(pos, table) {
+ cpufreq_for_each_valid_entry_idx(pos, table, i) {
freq = pos->frequency;
if ((!cpufreq_boost_enabled() || !policy->boost_enabled)
&& (pos->flags & CPUFREQ_BOOST_FREQ))
continue;
- pr_debug("table entry %u: %u kHz\n", (int)(pos - table), freq);
+ pr_debug("table entry %u: %u kHz\n", i, freq);
if (freq < min_freq)
min_freq = freq;
if (freq > max_freq)
@@ -65,10 +64,9 @@ int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
return 0;
}
-int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy,
- struct cpufreq_frequency_table *table)
+int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy)
{
- struct cpufreq_frequency_table *pos;
+ struct cpufreq_frequency_table *pos, *table = policy->freq_table;
unsigned int freq, prev_smaller = 0;
bool found = false;
@@ -110,7 +108,7 @@ int cpufreq_generic_frequency_table_verify(struct cpufreq_policy_data *policy)
if (!policy->freq_table)
return -ENODEV;
- return cpufreq_frequency_table_verify(policy, policy->freq_table);
+ return cpufreq_frequency_table_verify(policy);
}
EXPORT_SYMBOL_GPL(cpufreq_generic_frequency_table_verify);
@@ -128,7 +126,7 @@ int cpufreq_table_index_unsorted(struct cpufreq_policy *policy,
};
struct cpufreq_frequency_table *pos;
struct cpufreq_frequency_table *table = policy->freq_table;
- unsigned int freq, diff, i = 0;
+ unsigned int freq, diff, i;
int index;
pr_debug("request for target %u kHz (relation: %u) for cpu %u\n",
@@ -354,7 +352,7 @@ int cpufreq_table_validate_and_sort(struct cpufreq_policy *policy)
return 0;
}
- ret = cpufreq_frequency_table_cpuinfo(policy, policy->freq_table);
+ ret = cpufreq_frequency_table_cpuinfo(policy);
if (ret)
return ret;
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index db1c88e9d3f9..e93697d3edfd 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -442,7 +442,7 @@ soc_opp_out:
}
if (of_property_read_u32(np, "clock-latency", &transition_latency))
- transition_latency = CPUFREQ_ETERNAL;
+ transition_latency = CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS;
/*
* Calculate the ramp time for max voltage change in the
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 64587d318267..ec4abe374573 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -575,13 +575,18 @@ static void intel_pstate_hybrid_hwp_adjust(struct cpudata *cpu)
int scaling = cpu->pstate.scaling;
int freq;
- pr_debug("CPU%d: perf_ctl_max_phys = %d\n", cpu->cpu, perf_ctl_max_phys);
- pr_debug("CPU%d: perf_ctl_turbo = %d\n", cpu->cpu, perf_ctl_turbo);
- pr_debug("CPU%d: perf_ctl_scaling = %d\n", cpu->cpu, perf_ctl_scaling);
+ pr_debug("CPU%d: PERF_CTL max_phys = %d\n", cpu->cpu, perf_ctl_max_phys);
+ pr_debug("CPU%d: PERF_CTL turbo = %d\n", cpu->cpu, perf_ctl_turbo);
+ pr_debug("CPU%d: PERF_CTL scaling = %d\n", cpu->cpu, perf_ctl_scaling);
pr_debug("CPU%d: HWP_CAP guaranteed = %d\n", cpu->cpu, cpu->pstate.max_pstate);
pr_debug("CPU%d: HWP_CAP highest = %d\n", cpu->cpu, cpu->pstate.turbo_pstate);
pr_debug("CPU%d: HWP-to-frequency scaling factor: %d\n", cpu->cpu, scaling);
+ if (scaling == perf_ctl_scaling)
+ return;
+
+ hwp_is_hybrid = true;
+
cpu->pstate.turbo_freq = rounddown(cpu->pstate.turbo_pstate * scaling,
perf_ctl_scaling);
cpu->pstate.max_freq = rounddown(cpu->pstate.max_pstate * scaling,
@@ -603,9 +608,6 @@ static bool turbo_is_disabled(void)
{
u64 misc_en;
- if (!cpu_feature_enabled(X86_FEATURE_IDA))
- return true;
-
rdmsrq(MSR_IA32_MISC_ENABLE, misc_en);
return !!(misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
@@ -620,24 +622,9 @@ static int min_perf_pct_min(void)
(cpu->pstate.min_pstate * 100 / turbo_pstate) : 0;
}
-static s16 intel_pstate_get_epb(struct cpudata *cpu_data)
-{
- u64 epb;
- int ret;
-
- if (!boot_cpu_has(X86_FEATURE_EPB))
- return -ENXIO;
-
- ret = rdmsrq_on_cpu(cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
- if (ret)
- return (s16)ret;
-
- return (s16)(epb & 0x0f);
-}
-
static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data)
{
- s16 epp;
+ s16 epp = -EOPNOTSUPP;
if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
/*
@@ -651,34 +638,13 @@ static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data)
return epp;
}
epp = (hwp_req_data >> 24) & 0xff;
- } else {
- /* When there is no EPP present, HWP uses EPB settings */
- epp = intel_pstate_get_epb(cpu_data);
}
return epp;
}
-static int intel_pstate_set_epb(int cpu, s16 pref)
-{
- u64 epb;
- int ret;
-
- if (!boot_cpu_has(X86_FEATURE_EPB))
- return -ENXIO;
-
- ret = rdmsrq_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
- if (ret)
- return ret;
-
- epb = (epb & ~0x0f) | pref;
- wrmsrq_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, epb);
-
- return 0;
-}
-
/*
- * EPP/EPB display strings corresponding to EPP index in the
+ * EPP display strings corresponding to EPP index in the
* energy_perf_strings[]
* index String
*-------------------------------------
@@ -782,7 +748,7 @@ static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data,
u32 raw_epp)
{
int epp = -EINVAL;
- int ret;
+ int ret = -EOPNOTSUPP;
if (!pref_index)
epp = cpu_data->epp_default;
@@ -802,10 +768,6 @@ static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data,
return -EBUSY;
ret = intel_pstate_set_epp(cpu_data, epp);
- } else {
- if (epp == -EINVAL)
- epp = (pref_index - 1) << 2;
- ret = intel_pstate_set_epb(cpu_data->cpu, epp);
}
return ret;
@@ -937,13 +899,26 @@ static ssize_t show_base_frequency(struct cpufreq_policy *policy, char *buf)
cpufreq_freq_attr_ro(base_frequency);
+enum hwp_cpufreq_attr_index {
+ HWP_BASE_FREQUENCY_INDEX = 0,
+ HWP_PERFORMANCE_PREFERENCE_INDEX,
+ HWP_PERFORMANCE_AVAILABLE_PREFERENCES_INDEX,
+ HWP_CPUFREQ_ATTR_COUNT,
+};
+
static struct freq_attr *hwp_cpufreq_attrs[] = {
- &energy_performance_preference,
- &energy_performance_available_preferences,
- &base_frequency,
- NULL,
+ [HWP_BASE_FREQUENCY_INDEX] = &base_frequency,
+ [HWP_PERFORMANCE_PREFERENCE_INDEX] = &energy_performance_preference,
+ [HWP_PERFORMANCE_AVAILABLE_PREFERENCES_INDEX] =
+ &energy_performance_available_preferences,
+ [HWP_CPUFREQ_ATTR_COUNT] = NULL,
};
+static u8 hybrid_get_cpu_type(unsigned int cpu)
+{
+ return cpu_data(cpu).topo.intel_type;
+}
+
static bool no_cas __ro_after_init;
static struct cpudata *hybrid_max_perf_cpu __read_mostly;
@@ -960,11 +935,8 @@ static int hybrid_active_power(struct device *dev, unsigned long *power,
unsigned long *freq)
{
/*
- * Create "utilization bins" of 0-40%, 40%-60%, 60%-80%, and 80%-100%
- * of the maximum capacity such that two CPUs of the same type will be
- * regarded as equally attractive if the utilization of each of them
- * falls into the same bin, which should prevent tasks from being
- * migrated between them too often.
+ * Create four "states" corresponding to 40%, 60%, 80%, and 100% of the
+ * full capacity.
*
* For this purpose, return the "frequency" of 2 for the first
* performance level and otherwise leave the value set by the caller.
@@ -978,38 +950,40 @@ static int hybrid_active_power(struct device *dev, unsigned long *power,
return 0;
}
+static bool hybrid_has_l3(unsigned int cpu)
+{
+ struct cpu_cacheinfo *cacheinfo = get_cpu_cacheinfo(cpu);
+ unsigned int i;
+
+ if (!cacheinfo)
+ return false;
+
+ for (i = 0; i < cacheinfo->num_leaves; i++) {
+ if (cacheinfo->info_list[i].level == 3)
+ return true;
+ }
+
+ return false;
+}
+
static int hybrid_get_cost(struct device *dev, unsigned long freq,
unsigned long *cost)
{
- struct pstate_data *pstate = &all_cpu_data[dev->id]->pstate;
- struct cpu_cacheinfo *cacheinfo = get_cpu_cacheinfo(dev->id);
-
+ /* Facilitate load balancing between CPUs of the same type. */
+ *cost = freq;
/*
- * The smaller the perf-to-frequency scaling factor, the larger the IPC
- * ratio between the given CPU and the least capable CPU in the system.
- * Regard that IPC ratio as the primary cost component and assume that
- * the scaling factors for different CPU types will differ by at least
- * 5% and they will not be above INTEL_PSTATE_CORE_SCALING.
+ * Adjust the cost depending on CPU type.
*
- * Add the freq value to the cost, so that the cost of running on CPUs
- * of the same type in different "utilization bins" is different.
+ * The idea is to start loading up LPE-cores before E-cores and start
+ * to populate E-cores when LPE-cores are utilized above 60% of the
+ * capacity. Similarly, P-cores start to be populated when E-cores are
+ * utilized above 60% of the capacity.
*/
- *cost = div_u64(100ULL * INTEL_PSTATE_CORE_SCALING, pstate->scaling) + freq;
- /*
- * Increase the cost slightly for CPUs able to access L3 to avoid
- * touching it in case some other CPUs of the same type can do the work
- * without it.
- */
- if (cacheinfo) {
- unsigned int i;
-
- /* Check if L3 cache is there. */
- for (i = 0; i < cacheinfo->num_leaves; i++) {
- if (cacheinfo->info_list[i].level == 3) {
- *cost += 2;
- break;
- }
- }
+ if (hybrid_get_cpu_type(dev->id) == INTEL_CPU_TYPE_ATOM) {
+ if (hybrid_has_l3(dev->id)) /* E-core */
+ *cost += 1;
+ } else { /* P-core */
+ *cost += 2;
}
return 0;
@@ -1034,8 +1008,8 @@ static bool hybrid_register_perf_domain(unsigned int cpu)
if (!cpu_dev)
return false;
- if (em_dev_register_perf_domain(cpu_dev, HYBRID_EM_STATE_COUNT, &cb,
- cpumask_of(cpu), false))
+ if (em_dev_register_pd_no_update(cpu_dev, HYBRID_EM_STATE_COUNT, &cb,
+ cpumask_of(cpu), false))
return false;
cpudata->pd_registered = true;
@@ -1072,9 +1046,9 @@ static void hybrid_set_cpu_capacity(struct cpudata *cpu)
topology_set_cpu_scale(cpu->cpu, arch_scale_cpu_capacity(cpu->cpu));
- pr_debug("CPU%d: perf = %u, max. perf = %u, base perf = %d\n", cpu->cpu,
- cpu->capacity_perf, hybrid_max_perf_cpu->capacity_perf,
- cpu->pstate.max_pstate_physical);
+ pr_debug("CPU%d: capacity perf = %u, base perf = %u, sys max perf = %u\n",
+ cpu->cpu, cpu->capacity_perf, cpu->pstate.max_pstate_physical,
+ hybrid_max_perf_cpu->capacity_perf);
}
static void hybrid_clear_cpu_capacity(unsigned int cpunum)
@@ -1337,9 +1311,8 @@ static void intel_pstate_hwp_set(unsigned int cpu)
if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
value &= ~GENMASK_ULL(31, 24);
value |= (u64)epp << 24;
- } else {
- intel_pstate_set_epb(cpu, epp);
}
+
skip_epp:
WRITE_ONCE(cpu_data->hwp_req_cached, value);
wrmsrq_on_cpu(cpu, MSR_HWP_REQUEST, value);
@@ -1411,13 +1384,17 @@ static void intel_pstate_hwp_offline(struct cpudata *cpu)
#define POWER_CTL_EE_ENABLE 1
#define POWER_CTL_EE_DISABLE 2
+/* Enable bit for Dynamic Efficiency Control (DEC) */
+#define POWER_CTL_DEC_ENABLE 27
+
static int power_ctl_ee_state;
static void set_power_ctl_ee_state(bool input)
{
u64 power_ctl;
- mutex_lock(&intel_pstate_driver_lock);
+ guard(mutex)(&intel_pstate_driver_lock);
+
rdmsrq(MSR_IA32_POWER_CTL, power_ctl);
if (input) {
power_ctl &= ~BIT(MSR_IA32_POWER_CTL_BIT_EE);
@@ -1427,7 +1404,6 @@ static void set_power_ctl_ee_state(bool input)
power_ctl_ee_state = POWER_CTL_EE_DISABLE;
}
wrmsrq(MSR_IA32_POWER_CTL, power_ctl);
- mutex_unlock(&intel_pstate_driver_lock);
}
static void intel_pstate_hwp_enable(struct cpudata *cpudata);
@@ -1502,9 +1478,7 @@ static void __intel_pstate_update_max_freq(struct cpufreq_policy *policy,
static bool intel_pstate_update_max_freq(struct cpudata *cpudata)
{
- struct cpufreq_policy *policy __free(put_cpufreq_policy);
-
- policy = cpufreq_cpu_get(cpudata->cpu);
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpudata->cpu);
if (!policy)
return false;
@@ -1551,13 +1525,9 @@ static int intel_pstate_update_status(const char *buf, size_t size);
static ssize_t show_status(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
- ssize_t ret;
-
- mutex_lock(&intel_pstate_driver_lock);
- ret = intel_pstate_show_status(buf);
- mutex_unlock(&intel_pstate_driver_lock);
+ guard(mutex)(&intel_pstate_driver_lock);
- return ret;
+ return intel_pstate_show_status(buf);
}
static ssize_t store_status(struct kobject *a, struct kobj_attribute *b,
@@ -1566,11 +1536,13 @@ static ssize_t store_status(struct kobject *a, struct kobj_attribute *b,
char *p = memchr(buf, '\n', count);
int ret;
- mutex_lock(&intel_pstate_driver_lock);
+ guard(mutex)(&intel_pstate_driver_lock);
+
ret = intel_pstate_update_status(buf, p ? p - buf : count);
- mutex_unlock(&intel_pstate_driver_lock);
+ if (ret < 0)
+ return ret;
- return ret < 0 ? ret : count;
+ return count;
}
static ssize_t show_turbo_pct(struct kobject *kobj,
@@ -1580,12 +1552,10 @@ static ssize_t show_turbo_pct(struct kobject *kobj,
int total, no_turbo, turbo_pct;
uint32_t turbo_fp;
- mutex_lock(&intel_pstate_driver_lock);
+ guard(mutex)(&intel_pstate_driver_lock);
- if (!intel_pstate_driver) {
- mutex_unlock(&intel_pstate_driver_lock);
+ if (!intel_pstate_driver)
return -EAGAIN;
- }
cpu = all_cpu_data[0];
@@ -1594,8 +1564,6 @@ static ssize_t show_turbo_pct(struct kobject *kobj,
turbo_fp = div_fp(no_turbo, total);
turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100)));
- mutex_unlock(&intel_pstate_driver_lock);
-
return sprintf(buf, "%u\n", turbo_pct);
}
@@ -1605,38 +1573,26 @@ static ssize_t show_num_pstates(struct kobject *kobj,
struct cpudata *cpu;
int total;
- mutex_lock(&intel_pstate_driver_lock);
+ guard(mutex)(&intel_pstate_driver_lock);
- if (!intel_pstate_driver) {
- mutex_unlock(&intel_pstate_driver_lock);
+ if (!intel_pstate_driver)
return -EAGAIN;
- }
cpu = all_cpu_data[0];
total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
- mutex_unlock(&intel_pstate_driver_lock);
-
return sprintf(buf, "%u\n", total);
}
static ssize_t show_no_turbo(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
- ssize_t ret;
-
- mutex_lock(&intel_pstate_driver_lock);
+ guard(mutex)(&intel_pstate_driver_lock);
- if (!intel_pstate_driver) {
- mutex_unlock(&intel_pstate_driver_lock);
+ if (!intel_pstate_driver)
return -EAGAIN;
- }
- ret = sprintf(buf, "%u\n", global.no_turbo);
-
- mutex_unlock(&intel_pstate_driver_lock);
-
- return ret;
+ return sprintf(buf, "%u\n", global.no_turbo);
}
static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
@@ -1648,29 +1604,25 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
if (sscanf(buf, "%u", &input) != 1)
return -EINVAL;
- mutex_lock(&intel_pstate_driver_lock);
+ guard(mutex)(&intel_pstate_driver_lock);
- if (!intel_pstate_driver) {
- count = -EAGAIN;
- goto unlock_driver;
- }
+ if (!intel_pstate_driver)
+ return -EAGAIN;
no_turbo = !!clamp_t(int, input, 0, 1);
WRITE_ONCE(global.turbo_disabled, turbo_is_disabled());
if (global.turbo_disabled && !no_turbo) {
pr_notice("Turbo disabled by BIOS or unavailable on processor\n");
- count = -EPERM;
if (global.no_turbo)
- goto unlock_driver;
- else
- no_turbo = 1;
- }
+ return -EPERM;
- if (no_turbo == global.no_turbo) {
- goto unlock_driver;
+ no_turbo = 1;
}
+ if (no_turbo == global.no_turbo)
+ return count;
+
WRITE_ONCE(global.no_turbo, no_turbo);
mutex_lock(&intel_pstate_limits_lock);
@@ -1689,47 +1641,43 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
intel_pstate_update_limits_for_all();
arch_set_max_freq_ratio(no_turbo);
-unlock_driver:
- mutex_unlock(&intel_pstate_driver_lock);
-
return count;
}
-static void update_qos_request(enum freq_qos_req_type type)
+static void update_cpu_qos_request(int cpu, enum freq_qos_req_type type)
{
+ struct cpudata *cpudata = all_cpu_data[cpu];
+ unsigned int freq = cpudata->pstate.turbo_freq;
struct freq_qos_request *req;
- struct cpufreq_policy *policy;
- int i;
-
- for_each_possible_cpu(i) {
- struct cpudata *cpu = all_cpu_data[i];
- unsigned int freq, perf_pct;
- policy = cpufreq_cpu_get(i);
- if (!policy)
- continue;
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
+ if (!policy)
+ return;
- req = policy->driver_data;
- cpufreq_cpu_put(policy);
+ req = policy->driver_data;
+ if (!req)
+ return;
- if (!req)
- continue;
+ if (hwp_active)
+ intel_pstate_get_hwp_cap(cpudata);
- if (hwp_active)
- intel_pstate_get_hwp_cap(cpu);
+ if (type == FREQ_QOS_MIN) {
+ freq = DIV_ROUND_UP(freq * global.min_perf_pct, 100);
+ } else {
+ req++;
+ freq = (freq * global.max_perf_pct) / 100;
+ }
- if (type == FREQ_QOS_MIN) {
- perf_pct = global.min_perf_pct;
- } else {
- req++;
- perf_pct = global.max_perf_pct;
- }
+ if (freq_qos_update_request(req, freq) < 0)
+ pr_warn("Failed to update freq constraint: CPU%d\n", cpu);
+}
- freq = DIV_ROUND_UP(cpu->pstate.turbo_freq * perf_pct, 100);
+static void update_qos_requests(enum freq_qos_req_type type)
+{
+ int i;
- if (freq_qos_update_request(req, freq) < 0)
- pr_warn("Failed to update freq constraint: CPU%d\n", i);
- }
+ for_each_possible_cpu(i)
+ update_cpu_qos_request(i, type);
}
static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b,
@@ -1742,12 +1690,10 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b,
if (ret != 1)
return -EINVAL;
- mutex_lock(&intel_pstate_driver_lock);
+ guard(mutex)(&intel_pstate_driver_lock);
- if (!intel_pstate_driver) {
- mutex_unlock(&intel_pstate_driver_lock);
+ if (!intel_pstate_driver)
return -EAGAIN;
- }
mutex_lock(&intel_pstate_limits_lock);
@@ -1758,9 +1704,7 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b,
if (intel_pstate_driver == &intel_pstate)
intel_pstate_update_policies();
else
- update_qos_request(FREQ_QOS_MAX);
-
- mutex_unlock(&intel_pstate_driver_lock);
+ update_qos_requests(FREQ_QOS_MAX);
return count;
}
@@ -1775,12 +1719,10 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b,
if (ret != 1)
return -EINVAL;
- mutex_lock(&intel_pstate_driver_lock);
+ guard(mutex)(&intel_pstate_driver_lock);
- if (!intel_pstate_driver) {
- mutex_unlock(&intel_pstate_driver_lock);
+ if (!intel_pstate_driver)
return -EAGAIN;
- }
mutex_lock(&intel_pstate_limits_lock);
@@ -1792,9 +1734,7 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b,
if (intel_pstate_driver == &intel_pstate)
intel_pstate_update_policies();
else
- update_qos_request(FREQ_QOS_MIN);
-
- mutex_unlock(&intel_pstate_driver_lock);
+ update_qos_requests(FREQ_QOS_MIN);
return count;
}
@@ -1816,10 +1756,10 @@ static ssize_t store_hwp_dynamic_boost(struct kobject *a,
if (ret)
return ret;
- mutex_lock(&intel_pstate_driver_lock);
+ guard(mutex)(&intel_pstate_driver_lock);
+
hwp_boost = !!input;
intel_pstate_update_policies();
- mutex_unlock(&intel_pstate_driver_lock);
return count;
}
@@ -2108,6 +2048,18 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata)
intel_pstate_update_epp_defaults(cpudata);
}
+static u64 get_perf_ctl_val(int pstate)
+{
+ u64 val;
+
+ val = (u64)pstate << 8;
+ if (READ_ONCE(global.no_turbo) && !READ_ONCE(global.turbo_disabled) &&
+ cpu_feature_enabled(X86_FEATURE_IDA))
+ val |= (u64)1 << 32;
+
+ return val;
+}
+
static int atom_get_min_pstate(int not_used)
{
u64 value;
@@ -2134,14 +2086,10 @@ static int atom_get_turbo_pstate(int not_used)
static u64 atom_get_val(struct cpudata *cpudata, int pstate)
{
- u64 val;
+ u64 val = get_perf_ctl_val(pstate);
int32_t vid_fp;
u32 vid;
- val = (u64)pstate << 8;
- if (READ_ONCE(global.no_turbo) && !READ_ONCE(global.turbo_disabled))
- val |= (u64)1 << 32;
-
vid_fp = cpudata->vid.min + mul_fp(
int_tofp(pstate - cpudata->pstate.min_pstate),
cpudata->vid.ratio);
@@ -2301,13 +2249,7 @@ static int core_get_turbo_pstate(int cpu)
static u64 core_get_val(struct cpudata *cpudata, int pstate)
{
- u64 val;
-
- val = (u64)pstate << 8;
- if (READ_ONCE(global.no_turbo) && !READ_ONCE(global.turbo_disabled))
- val |= (u64)1 << 32;
-
- return val;
+ return get_perf_ctl_val(pstate);
}
static int knl_get_aperf_mperf_shift(void)
@@ -2331,18 +2273,14 @@ static int knl_get_turbo_pstate(int cpu)
static int hwp_get_cpu_scaling(int cpu)
{
if (hybrid_scaling_factor) {
- struct cpuinfo_x86 *c = &cpu_data(cpu);
- u8 cpu_type = c->topo.intel_type;
-
/*
* Return the hybrid scaling factor for P-cores and use the
* default core scaling for E-cores.
*/
- if (cpu_type == INTEL_CPU_TYPE_CORE)
+ if (hybrid_get_cpu_type(cpu) == INTEL_CPU_TYPE_CORE)
return hybrid_scaling_factor;
- if (cpu_type == INTEL_CPU_TYPE_ATOM)
- return core_get_scaling();
+ return core_get_scaling();
}
/* Use core scaling on non-hybrid systems. */
@@ -2377,11 +2315,10 @@ static void intel_pstate_set_min_pstate(struct cpudata *cpu)
static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
{
- int perf_ctl_max_phys = pstate_funcs.get_max_physical(cpu->cpu);
int perf_ctl_scaling = pstate_funcs.get_scaling();
+ cpu->pstate.max_pstate_physical = pstate_funcs.get_max_physical(cpu->cpu);
cpu->pstate.min_pstate = pstate_funcs.get_min(cpu->cpu);
- cpu->pstate.max_pstate_physical = perf_ctl_max_phys;
cpu->pstate.perf_ctl_scaling = perf_ctl_scaling;
if (hwp_active && !hwp_mode_bdw) {
@@ -2389,10 +2326,7 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
if (pstate_funcs.get_cpu_scaling) {
cpu->pstate.scaling = pstate_funcs.get_cpu_scaling(cpu->cpu);
- if (cpu->pstate.scaling != perf_ctl_scaling) {
- intel_pstate_hybrid_hwp_adjust(cpu);
- hwp_is_hybrid = true;
- }
+ intel_pstate_hybrid_hwp_adjust(cpu);
} else {
cpu->pstate.scaling = perf_ctl_scaling;
}
@@ -2575,7 +2509,7 @@ static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time)
* that sample.time will always be reset before setting the utilization
* update hook and make the caller skip the sample then.
*/
- if (cpu->last_sample_time) {
+ if (likely(cpu->last_sample_time)) {
intel_pstate_calc_avg_perf(cpu);
return true;
}
@@ -2775,6 +2709,8 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
X86_MATCH(INTEL_TIGERLAKE, core_funcs),
X86_MATCH(INTEL_SAPPHIRERAPIDS_X, core_funcs),
X86_MATCH(INTEL_EMERALDRAPIDS_X, core_funcs),
+ X86_MATCH(INTEL_GRANITERAPIDS_D, core_funcs),
+ X86_MATCH(INTEL_GRANITERAPIDS_X, core_funcs),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
@@ -2791,6 +2727,8 @@ static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {
X86_MATCH(INTEL_GRANITERAPIDS_X, core_funcs),
X86_MATCH(INTEL_ATOM_CRESTMONT, core_funcs),
X86_MATCH(INTEL_ATOM_CRESTMONT_X, core_funcs),
+ X86_MATCH(INTEL_ATOM_DARKMONT_X, core_funcs),
+ X86_MATCH(INTEL_DIAMONDRAPIDS_X, core_funcs),
{}
};
#endif
@@ -3249,8 +3187,8 @@ static int intel_cpufreq_update_pstate(struct cpufreq_policy *policy,
int max_pstate = policy->strict_target ?
target_pstate : cpu->max_perf_ratio;
- intel_cpufreq_hwp_update(cpu, target_pstate, max_pstate, 0,
- fast_switch);
+ intel_cpufreq_hwp_update(cpu, target_pstate, max_pstate,
+ target_pstate, fast_switch);
} else if (target_pstate != old_pstate) {
intel_cpufreq_perf_ctl_update(cpu, target_pstate, fast_switch);
}
@@ -3799,6 +3737,26 @@ static const struct x86_cpu_id intel_hybrid_scaling_factor[] = {
{}
};
+static bool hwp_check_epp(void)
+{
+ if (boot_cpu_has(X86_FEATURE_HWP_EPP))
+ return true;
+
+ /* Without EPP support, don't expose EPP-related sysfs attributes. */
+ hwp_cpufreq_attrs[HWP_PERFORMANCE_PREFERENCE_INDEX] = NULL;
+ hwp_cpufreq_attrs[HWP_PERFORMANCE_AVAILABLE_PREFERENCES_INDEX] = NULL;
+
+ return false;
+}
+
+static bool hwp_check_dec(void)
+{
+ u64 power_ctl;
+
+ rdmsrq(MSR_IA32_POWER_CTL, power_ctl);
+ return !!(power_ctl & BIT(POWER_CTL_DEC_ENABLE));
+}
+
static int __init intel_pstate_init(void)
{
static struct cpudata **_all_cpu_data;
@@ -3819,23 +3777,32 @@ static int __init intel_pstate_init(void)
id = x86_match_cpu(hwp_support_ids);
if (id) {
- hwp_forced = intel_pstate_hwp_is_enabled();
+ bool epp_present = hwp_check_epp();
- if (hwp_forced)
+ /*
+ * If HWP is enabled already, there is no choice but to deal
+ * with it.
+ */
+ hwp_forced = intel_pstate_hwp_is_enabled();
+ if (hwp_forced) {
pr_info("HWP enabled by BIOS\n");
- else if (no_load)
+ no_hwp = 0;
+ } else if (no_load) {
return -ENODEV;
+ } else if (!epp_present && !hwp_check_dec()) {
+ /*
+ * Avoid enabling HWP for processors without EPP support
+ * unless the Dynamic Efficiency Control (DEC) enable
+ * bit (MSR_IA32_POWER_CTL, bit 27) is set because that
+ * means incomplete HWP implementation which is a corner
+ * case and supporting it is generally problematic.
+ */
+ no_hwp = 1;
+ }
copy_cpu_funcs(&core_funcs);
- /*
- * Avoid enabling HWP for processors without EPP support,
- * because that means incomplete HWP implementation which is a
- * corner case and supporting it is generally problematic.
- *
- * If HWP is enabled already, though, there is no choice but to
- * deal with it.
- */
- if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) || hwp_forced) {
+
+ if (!no_hwp) {
hwp_active = true;
hwp_mode_bdw = id->driver_data;
intel_pstate.attr = hwp_cpufreq_attrs;
@@ -3914,9 +3881,9 @@ hwp_cpu_matched:
}
- mutex_lock(&intel_pstate_driver_lock);
- rc = intel_pstate_register_driver(default_driver);
- mutex_unlock(&intel_pstate_driver_lock);
+ scoped_guard(mutex, &intel_pstate_driver_lock) {
+ rc = intel_pstate_register_driver(default_driver);
+ }
if (rc) {
intel_pstate_sysfs_remove();
return rc;
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
index ba0e08c8486a..49e76b44468a 100644
--- a/drivers/cpufreq/longhaul.c
+++ b/drivers/cpufreq/longhaul.c
@@ -953,6 +953,9 @@ static void __exit longhaul_exit(void)
struct cpufreq_policy *policy = cpufreq_cpu_get(0);
int i;
+ if (unlikely(!policy))
+ return;
+
for (i = 0; i < numscales; i++) {
if (mults[i] == maxmult) {
struct cpufreq_freqs freqs;
diff --git a/drivers/cpufreq/mediatek-cpufreq-hw.c b/drivers/cpufreq/mediatek-cpufreq-hw.c
index 74f1b4c796e4..ae4500ab4891 100644
--- a/drivers/cpufreq/mediatek-cpufreq-hw.c
+++ b/drivers/cpufreq/mediatek-cpufreq-hw.c
@@ -24,6 +24,8 @@
#define POLL_USEC 1000
#define TIMEOUT_USEC 300000
+#define FDVFS_FDIV_HZ (26 * 1000)
+
enum {
REG_FREQ_LUT_TABLE,
REG_FREQ_ENABLE,
@@ -35,7 +37,14 @@ enum {
REG_ARRAY_SIZE,
};
-struct mtk_cpufreq_data {
+struct mtk_cpufreq_priv {
+ struct device *dev;
+ const struct mtk_cpufreq_variant *variant;
+ void __iomem *fdvfs;
+};
+
+struct mtk_cpufreq_domain {
+ struct mtk_cpufreq_priv *parent;
struct cpufreq_frequency_table *table;
void __iomem *reg_bases[REG_ARRAY_SIZE];
struct resource *res;
@@ -43,20 +52,51 @@ struct mtk_cpufreq_data {
int nr_opp;
};
-static const u16 cpufreq_mtk_offsets[REG_ARRAY_SIZE] = {
- [REG_FREQ_LUT_TABLE] = 0x0,
- [REG_FREQ_ENABLE] = 0x84,
- [REG_FREQ_PERF_STATE] = 0x88,
- [REG_FREQ_HW_STATE] = 0x8c,
- [REG_EM_POWER_TBL] = 0x90,
- [REG_FREQ_LATENCY] = 0x110,
+struct mtk_cpufreq_variant {
+ int (*init)(struct mtk_cpufreq_priv *priv);
+ const u16 reg_offsets[REG_ARRAY_SIZE];
+ const bool is_hybrid_dvfs;
+};
+
+static const struct mtk_cpufreq_variant cpufreq_mtk_base_variant = {
+ .reg_offsets = {
+ [REG_FREQ_LUT_TABLE] = 0x0,
+ [REG_FREQ_ENABLE] = 0x84,
+ [REG_FREQ_PERF_STATE] = 0x88,
+ [REG_FREQ_HW_STATE] = 0x8c,
+ [REG_EM_POWER_TBL] = 0x90,
+ [REG_FREQ_LATENCY] = 0x110,
+ },
+};
+
+static int mtk_cpufreq_hw_mt8196_init(struct mtk_cpufreq_priv *priv)
+{
+ priv->fdvfs = devm_of_iomap(priv->dev, priv->dev->of_node, 0, NULL);
+ if (IS_ERR(priv->fdvfs))
+ return dev_err_probe(priv->dev, PTR_ERR(priv->fdvfs),
+ "failed to get fdvfs iomem\n");
+
+ return 0;
+}
+
+static const struct mtk_cpufreq_variant cpufreq_mtk_mt8196_variant = {
+ .init = mtk_cpufreq_hw_mt8196_init,
+ .reg_offsets = {
+ [REG_FREQ_LUT_TABLE] = 0x0,
+ [REG_FREQ_ENABLE] = 0x84,
+ [REG_FREQ_PERF_STATE] = 0x88,
+ [REG_FREQ_HW_STATE] = 0x8c,
+ [REG_EM_POWER_TBL] = 0x90,
+ [REG_FREQ_LATENCY] = 0x114,
+ },
+ .is_hybrid_dvfs = true,
};
static int __maybe_unused
mtk_cpufreq_get_cpu_power(struct device *cpu_dev, unsigned long *uW,
unsigned long *KHz)
{
- struct mtk_cpufreq_data *data;
+ struct mtk_cpufreq_domain *data;
struct cpufreq_policy *policy;
int i;
@@ -80,19 +120,38 @@ mtk_cpufreq_get_cpu_power(struct device *cpu_dev, unsigned long *uW,
return 0;
}
+static void mtk_cpufreq_hw_fdvfs_switch(unsigned int target_freq,
+ struct cpufreq_policy *policy)
+{
+ struct mtk_cpufreq_domain *data = policy->driver_data;
+ struct mtk_cpufreq_priv *priv = data->parent;
+ unsigned int cpu;
+
+ target_freq = DIV_ROUND_UP(target_freq, FDVFS_FDIV_HZ);
+ for_each_cpu(cpu, policy->real_cpus) {
+ writel_relaxed(target_freq, priv->fdvfs + cpu * 4);
+ }
+}
+
static int mtk_cpufreq_hw_target_index(struct cpufreq_policy *policy,
unsigned int index)
{
- struct mtk_cpufreq_data *data = policy->driver_data;
-
- writel_relaxed(index, data->reg_bases[REG_FREQ_PERF_STATE]);
+ struct mtk_cpufreq_domain *data = policy->driver_data;
+ unsigned int target_freq;
+
+ if (data->parent->fdvfs) {
+ target_freq = policy->freq_table[index].frequency;
+ mtk_cpufreq_hw_fdvfs_switch(target_freq, policy);
+ } else {
+ writel_relaxed(index, data->reg_bases[REG_FREQ_PERF_STATE]);
+ }
return 0;
}
static unsigned int mtk_cpufreq_hw_get(unsigned int cpu)
{
- struct mtk_cpufreq_data *data;
+ struct mtk_cpufreq_domain *data;
struct cpufreq_policy *policy;
unsigned int index;
@@ -111,18 +170,21 @@ static unsigned int mtk_cpufreq_hw_get(unsigned int cpu)
static unsigned int mtk_cpufreq_hw_fast_switch(struct cpufreq_policy *policy,
unsigned int target_freq)
{
- struct mtk_cpufreq_data *data = policy->driver_data;
+ struct mtk_cpufreq_domain *data = policy->driver_data;
unsigned int index;
index = cpufreq_table_find_index_dl(policy, target_freq, false);
- writel_relaxed(index, data->reg_bases[REG_FREQ_PERF_STATE]);
+ if (data->parent->fdvfs)
+ mtk_cpufreq_hw_fdvfs_switch(target_freq, policy);
+ else
+ writel_relaxed(index, data->reg_bases[REG_FREQ_PERF_STATE]);
return policy->freq_table[index].frequency;
}
static int mtk_cpu_create_freq_table(struct platform_device *pdev,
- struct mtk_cpufreq_data *data)
+ struct mtk_cpufreq_domain *data)
{
struct device *dev = &pdev->dev;
u32 temp, i, freq, prev_freq = 0;
@@ -157,9 +219,9 @@ static int mtk_cpu_create_freq_table(struct platform_device *pdev,
static int mtk_cpu_resources_init(struct platform_device *pdev,
struct cpufreq_policy *policy,
- const u16 *offsets)
+ struct mtk_cpufreq_priv *priv)
{
- struct mtk_cpufreq_data *data;
+ struct mtk_cpufreq_domain *data;
struct device *dev = &pdev->dev;
struct resource *res;
struct of_phandle_args args;
@@ -180,6 +242,15 @@ static int mtk_cpu_resources_init(struct platform_device *pdev,
index = args.args[0];
of_node_put(args.np);
+ /*
+ * In a cpufreq with hybrid DVFS, such as the MT8196, the first declared
+ * register range is for FDVFS, followed by the frequency domain MMIOs.
+ */
+ if (priv->variant->is_hybrid_dvfs)
+ index++;
+
+ data->parent = priv;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, index);
if (!res) {
dev_err(dev, "failed to get mem resource %d\n", index);
@@ -202,7 +273,7 @@ static int mtk_cpu_resources_init(struct platform_device *pdev,
data->res = res;
for (i = REG_FREQ_LUT_TABLE; i < REG_ARRAY_SIZE; i++)
- data->reg_bases[i] = base + offsets[i];
+ data->reg_bases[i] = base + priv->variant->reg_offsets[i];
ret = mtk_cpu_create_freq_table(pdev, data);
if (ret) {
@@ -223,7 +294,7 @@ static int mtk_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
{
struct platform_device *pdev = cpufreq_get_driver_data();
int sig, pwr_hw = CPUFREQ_HW_STATUS | SVS_HW_STATUS;
- struct mtk_cpufreq_data *data;
+ struct mtk_cpufreq_domain *data;
unsigned int latency;
int ret;
@@ -238,7 +309,7 @@ static int mtk_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
latency = readl_relaxed(data->reg_bases[REG_FREQ_LATENCY]) * 1000;
if (!latency)
- latency = CPUFREQ_ETERNAL;
+ latency = CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS;
policy->cpuinfo.transition_latency = latency;
policy->fast_switch_possible = true;
@@ -262,7 +333,7 @@ static int mtk_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
static void mtk_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
{
- struct mtk_cpufreq_data *data = policy->driver_data;
+ struct mtk_cpufreq_domain *data = policy->driver_data;
struct resource *res = data->res;
void __iomem *base = data->base;
@@ -275,7 +346,7 @@ static void mtk_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
static void mtk_cpufreq_register_em(struct cpufreq_policy *policy)
{
struct em_data_callback em_cb = EM_DATA_CB(mtk_cpufreq_get_cpu_power);
- struct mtk_cpufreq_data *data = policy->driver_data;
+ struct mtk_cpufreq_domain *data = policy->driver_data;
em_dev_register_perf_domain(get_cpu_device(policy->cpu), data->nr_opp,
&em_cb, policy->cpus, true);
@@ -297,6 +368,7 @@ static struct cpufreq_driver cpufreq_mtk_hw_driver = {
static int mtk_cpufreq_hw_driver_probe(struct platform_device *pdev)
{
+ struct mtk_cpufreq_priv *priv;
const void *data;
int ret, cpu;
struct device *cpu_dev;
@@ -320,7 +392,20 @@ static int mtk_cpufreq_hw_driver_probe(struct platform_device *pdev)
if (!data)
return -EINVAL;
- platform_set_drvdata(pdev, (void *) data);
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->variant = data;
+ priv->dev = &pdev->dev;
+
+ if (priv->variant->init) {
+ ret = priv->variant->init(priv);
+ if (ret)
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, priv);
cpufreq_mtk_hw_driver.driver_data = pdev;
ret = cpufreq_register_driver(&cpufreq_mtk_hw_driver);
@@ -336,7 +421,8 @@ static void mtk_cpufreq_hw_driver_remove(struct platform_device *pdev)
}
static const struct of_device_id mtk_cpufreq_hw_match[] = {
- { .compatible = "mediatek,cpufreq-hw", .data = &cpufreq_mtk_offsets },
+ { .compatible = "mediatek,cpufreq-hw", .data = &cpufreq_mtk_base_variant },
+ { .compatible = "mediatek,mt8196-cpufreq-hw", .data = &cpufreq_mtk_mt8196_variant },
{}
};
MODULE_DEVICE_TABLE(of, mtk_cpufreq_hw_match);
diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c
index f3f02c4b6888..052ca7cd2f4f 100644
--- a/drivers/cpufreq/mediatek-cpufreq.c
+++ b/drivers/cpufreq/mediatek-cpufreq.c
@@ -123,7 +123,7 @@ static int mtk_cpufreq_voltage_tracking(struct mtk_cpu_dvfs_info *info,
soc_data->sram_max_volt);
return ret;
}
- } else if (pre_vproc > new_vproc) {
+ } else {
vproc = max(new_vproc,
pre_vsram - soc_data->max_volt_shift);
ret = regulator_set_voltage(proc_reg, vproc,
@@ -320,7 +320,6 @@ static int mtk_cpufreq_opp_notifier(struct notifier_block *nb,
struct dev_pm_opp *new_opp;
struct mtk_cpu_dvfs_info *info;
unsigned long freq, volt;
- struct cpufreq_policy *policy;
int ret = 0;
info = container_of(nb, struct mtk_cpu_dvfs_info, opp_nb);
@@ -353,12 +352,12 @@ static int mtk_cpufreq_opp_notifier(struct notifier_block *nb,
}
dev_pm_opp_put(new_opp);
- policy = cpufreq_cpu_get(info->opp_cpu);
- if (policy) {
+
+ struct cpufreq_policy *policy __free(put_cpufreq_policy)
+ = cpufreq_cpu_get(info->opp_cpu);
+ if (policy)
cpufreq_driver_target(policy, freq / 1000,
CPUFREQ_RELATION_L);
- cpufreq_cpu_put(policy);
- }
}
}
@@ -404,9 +403,11 @@ static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
}
info->cpu_clk = clk_get(cpu_dev, "cpu");
- if (IS_ERR(info->cpu_clk))
- return dev_err_probe(cpu_dev, PTR_ERR(info->cpu_clk),
- "cpu%d: failed to get cpu clk\n", cpu);
+ if (IS_ERR(info->cpu_clk)) {
+ ret = PTR_ERR(info->cpu_clk);
+ dev_err_probe(cpu_dev, ret, "cpu%d: failed to get cpu clk\n", cpu);
+ goto out_put_cci_dev;
+ }
info->inter_clk = clk_get(cpu_dev, "intermediate");
if (IS_ERR(info->inter_clk)) {
@@ -552,6 +553,10 @@ out_free_inter_clock:
out_free_mux_clock:
clk_put(info->cpu_clk);
+out_put_cci_dev:
+ if (info->soc_data->ccifreq_supported)
+ put_device(info->cci_dev);
+
return ret;
}
@@ -569,6 +574,8 @@ static void mtk_cpu_dvfs_info_release(struct mtk_cpu_dvfs_info *info)
clk_put(info->inter_clk);
dev_pm_opp_of_cpumask_remove_table(&info->cpus);
dev_pm_opp_unregister_notifier(info->cpu_dev, &info->opp_nb);
+ if (info->soc_data->ccifreq_supported)
+ put_device(info->cci_dev);
}
static int mtk_cpufreq_init(struct cpufreq_policy *policy)
@@ -757,22 +764,14 @@ MODULE_DEVICE_TABLE(of, mtk_cpufreq_machines);
static int __init mtk_cpufreq_driver_init(void)
{
- struct device_node *np;
- const struct of_device_id *match;
const struct mtk_cpufreq_platform_data *data;
int err;
- np = of_find_node_by_path("/");
- if (!np)
- return -ENODEV;
-
- match = of_match_node(mtk_cpufreq_machines, np);
- of_node_put(np);
- if (!match) {
+ data = of_machine_get_match_data(mtk_cpufreq_machines);
+ if (!data) {
pr_debug("Machine is not compatible with mtk-cpufreq\n");
return -ENODEV;
}
- data = match->data;
err = platform_driver_register(&mtk_cpufreq_platdrv);
if (err)
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index a8943e2a93be..7d9a5f656de8 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -21,7 +21,6 @@
#include <linux/string_choices.h>
#include <linux/cpu.h>
#include <linux/hashtable.h>
-#include <trace/events/power.h>
#include <asm/cputhreads.h>
#include <asm/firmware.h>
@@ -30,6 +29,9 @@
#include <asm/opal.h>
#include <linux/timer.h>
+#define CREATE_TRACE_POINTS
+#include "powernv-trace.h"
+
#define POWERNV_MAX_PSTATES_ORDER 8
#define POWERNV_MAX_PSTATES (1UL << (POWERNV_MAX_PSTATES_ORDER))
#define PMSR_PSAFE_ENABLE (1UL << 30)
diff --git a/drivers/cpufreq/powernv-trace.h b/drivers/cpufreq/powernv-trace.h
new file mode 100644
index 000000000000..8cadb7c9427b
--- /dev/null
+++ b/drivers/cpufreq/powernv-trace.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#if !defined(_POWERNV_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _POWERNV_TRACE_H
+
+#include <linux/cpufreq.h>
+#include <linux/tracepoint.h>
+#include <linux/trace_events.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM power
+
+TRACE_EVENT(powernv_throttle,
+
+ TP_PROTO(int chip_id, const char *reason, int pmax),
+
+ TP_ARGS(chip_id, reason, pmax),
+
+ TP_STRUCT__entry(
+ __field(int, chip_id)
+ __string(reason, reason)
+ __field(int, pmax)
+ ),
+
+ TP_fast_assign(
+ __entry->chip_id = chip_id;
+ __assign_str(reason);
+ __entry->pmax = pmax;
+ ),
+
+ TP_printk("Chip %d Pmax %d %s", __entry->chip_id,
+ __entry->pmax, __get_str(reason))
+);
+
+#endif /* _POWERNV_TRACE_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE powernv-trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c
index 54f8117103c8..81e16b5a0245 100644
--- a/drivers/cpufreq/qcom-cpufreq-nvmem.c
+++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c
@@ -200,6 +200,10 @@ static int qcom_cpufreq_kryo_name_version(struct device *cpu_dev,
case QCOM_ID_IPQ9574:
drv->versions = 1 << (unsigned int)(*speedbin);
break;
+ case QCOM_ID_IPQ5424:
+ case QCOM_ID_IPQ5404:
+ drv->versions = (*speedbin == 0x3b) ? BIT(1) : BIT(0);
+ break;
case QCOM_ID_MSM8996SG:
case QCOM_ID_APQ8096SG:
drv->versions = 1 << ((unsigned int)(*speedbin) + 4);
@@ -252,13 +256,22 @@ len_error:
return ret;
}
+static const struct of_device_id qcom_cpufreq_ipq806x_match_list[] __maybe_unused = {
+ { .compatible = "qcom,ipq8062", .data = (const void *)QCOM_ID_IPQ8062 },
+ { .compatible = "qcom,ipq8064", .data = (const void *)QCOM_ID_IPQ8064 },
+ { .compatible = "qcom,ipq8065", .data = (const void *)QCOM_ID_IPQ8065 },
+ { .compatible = "qcom,ipq8066", .data = (const void *)QCOM_ID_IPQ8066 },
+ { .compatible = "qcom,ipq8068", .data = (const void *)QCOM_ID_IPQ8068 },
+ { .compatible = "qcom,ipq8069", .data = (const void *)QCOM_ID_IPQ8069 },
+};
+
static int qcom_cpufreq_ipq8064_name_version(struct device *cpu_dev,
struct nvmem_cell *speedbin_nvmem,
char **pvs_name,
struct qcom_cpufreq_drv *drv)
{
+ int msm_id = -1, ret = 0;
int speed = 0, pvs = 0;
- int msm_id, ret = 0;
u8 *speedbin;
size_t len;
@@ -275,8 +288,30 @@ static int qcom_cpufreq_ipq8064_name_version(struct device *cpu_dev,
get_krait_bin_format_a(cpu_dev, &speed, &pvs, speedbin);
ret = qcom_smem_get_soc_id(&msm_id);
- if (ret)
+ if (ret == -ENODEV) {
+ const struct of_device_id *match;
+ struct device_node *root;
+
+ root = of_find_node_by_path("/");
+ if (!root) {
+ ret = -ENODEV;
+ goto exit;
+ }
+
+ /* Fallback to compatible match with no SMEM initialized */
+ match = of_match_node(qcom_cpufreq_ipq806x_match_list, root);
+ of_node_put(root);
+ if (!match) {
+ ret = -ENODEV;
+ goto exit;
+ }
+
+ /* We found a matching device, get the msm_id from the data entry */
+ msm_id = (int)(uintptr_t)match->data;
+ ret = 0;
+ } else if (ret) {
goto exit;
+ }
switch (msm_id) {
case QCOM_ID_IPQ8062:
@@ -591,6 +626,7 @@ static const struct of_device_id qcom_cpufreq_match_list[] __initconst __maybe_u
{ .compatible = "qcom,msm8996", .data = &match_data_kryo },
{ .compatible = "qcom,qcs404", .data = &match_data_qcs404 },
{ .compatible = "qcom,ipq5332", .data = &match_data_kryo },
+ { .compatible = "qcom,ipq5424", .data = &match_data_kryo },
{ .compatible = "qcom,ipq6018", .data = &match_data_ipq6018 },
{ .compatible = "qcom,ipq8064", .data = &match_data_ipq8064 },
{ .compatible = "qcom,ipq8074", .data = &match_data_ipq8074 },
diff --git a/drivers/cpufreq/rcpufreq_dt.rs b/drivers/cpufreq/rcpufreq_dt.rs
index 94ed81644fe1..31e07f0279db 100644
--- a/drivers/cpufreq/rcpufreq_dt.rs
+++ b/drivers/cpufreq/rcpufreq_dt.rs
@@ -9,7 +9,6 @@ use kernel::{
cpumask::CpumaskVar,
device::{Core, Device},
error::code::*,
- fmt,
macros::vtable,
module_platform_driver, of, opp, platform,
prelude::*,
@@ -19,24 +18,21 @@ use kernel::{
/// Finds exact supply name from the OF node.
fn find_supply_name_exact(dev: &Device, name: &str) -> Option<CString> {
- let prop_name = CString::try_from_fmt(fmt!("{}-supply", name)).ok()?;
- dev.property_present(&prop_name)
+ let prop_name = CString::try_from_fmt(fmt!("{name}-supply")).ok()?;
+ dev.fwnode()?
+ .property_present(&prop_name)
.then(|| CString::try_from_fmt(fmt!("{name}")).ok())
.flatten()
}
/// Finds supply name for the CPU from DT.
-fn find_supply_names(dev: &Device, cpu: u32) -> Option<KVec<CString>> {
+fn find_supply_names(dev: &Device, cpu: cpu::CpuId) -> Option<KVec<CString>> {
// Try "cpu0" for older DTs, fallback to "cpu".
- let name = (cpu == 0)
+ (cpu.as_u32() == 0)
.then(|| find_supply_name_exact(dev, "cpu0"))
.flatten()
- .or_else(|| find_supply_name_exact(dev, "cpu"))?;
-
- let mut list = KVec::with_capacity(1, GFP_KERNEL).ok()?;
- list.push(name, GFP_KERNEL).ok()?;
-
- Some(list)
+ .or_else(|| find_supply_name_exact(dev, "cpu"))
+ .and_then(|name| kernel::kvec![name].ok())
}
/// Represents the cpufreq dt device.
@@ -123,7 +119,7 @@ impl cpufreq::Driver for CPUFreqDTDriver {
let mut transition_latency = opp_table.max_transition_latency_ns() as u32;
if transition_latency == 0 {
- transition_latency = cpufreq::ETERNAL_LATENCY_NS;
+ transition_latency = cpufreq::DEFAULT_TRANSITION_LATENCY_NS;
}
policy
@@ -211,16 +207,16 @@ impl platform::Driver for CPUFreqDTDriver {
fn probe(
pdev: &platform::Device<Core>,
_id_info: Option<&Self::IdInfo>,
- ) -> Result<Pin<KBox<Self>>> {
+ ) -> impl PinInit<Self, Error> {
cpufreq::Registration::<CPUFreqDTDriver>::new_foreign_owned(pdev.as_ref())?;
- Ok(KBox::new(Self {}, GFP_KERNEL)?.into())
+ Ok(Self {})
}
}
module_platform_driver! {
type: CPUFreqDTDriver,
name: "cpufreq-dt",
- author: "Viresh Kumar <viresh.kumar@linaro.org>",
+ authors: ["Viresh Kumar <viresh.kumar@linaro.org>"],
description: "Generic CPUFreq DT driver",
license: "GPL v2",
}
diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c
index 76c888ed8d16..ba8a1c96427a 100644
--- a/drivers/cpufreq/s5pv210-cpufreq.c
+++ b/drivers/cpufreq/s5pv210-cpufreq.c
@@ -518,7 +518,7 @@ static int s5pv210_cpu_init(struct cpufreq_policy *policy)
if (policy->cpu != 0) {
ret = -EINVAL;
- goto out_dmc1;
+ goto out;
}
/*
@@ -530,7 +530,7 @@ static int s5pv210_cpu_init(struct cpufreq_policy *policy)
if ((mem_type != LPDDR) && (mem_type != LPDDR2)) {
pr_err("CPUFreq doesn't support this memory type\n");
ret = -EINVAL;
- goto out_dmc1;
+ goto out;
}
/* Find current refresh counter and frequency each DMC */
@@ -544,6 +544,8 @@ static int s5pv210_cpu_init(struct cpufreq_policy *policy)
cpufreq_generic_init(policy, s5pv210_freq_table, 40000);
return 0;
+out:
+ clk_put(dmc1_clk);
out_dmc1:
clk_put(dmc0_clk);
out_dmc0:
@@ -554,17 +556,15 @@ out_dmc0:
static int s5pv210_cpufreq_reboot_notifier_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(0);
int ret;
- struct cpufreq_policy *policy;
- policy = cpufreq_cpu_get(0);
if (!policy) {
pr_debug("cpufreq: get no policy for cpu0\n");
return NOTIFY_BAD;
}
ret = cpufreq_driver_target(policy, SLEEP_FREQ, 0);
- cpufreq_cpu_put(policy);
if (ret < 0)
return NOTIFY_BAD;
diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
index ef078426bfd5..d2a110079f5f 100644
--- a/drivers/cpufreq/scmi-cpufreq.c
+++ b/drivers/cpufreq/scmi-cpufreq.c
@@ -15,6 +15,7 @@
#include <linux/energy_model.h>
#include <linux/export.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/pm_opp.h>
#include <linux/pm_qos.h>
#include <linux/slab.h>
@@ -293,7 +294,7 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
latency = perf_ops->transition_latency_get(ph, domain);
if (!latency)
- latency = CPUFREQ_ETERNAL;
+ latency = CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS;
policy->cpuinfo.transition_latency = latency;
@@ -424,6 +425,15 @@ static bool scmi_dev_used_by_cpus(struct device *scmi_dev)
return true;
}
+ /*
+ * Older Broadcom STB chips had a "clocks" property for CPU node(s)
+ * that did not match the SCMI performance protocol node, if we got
+ * there, it means we had such an older Device Tree, therefore return
+ * true to preserve backwards compatibility.
+ */
+ if (of_machine_is_compatible("brcm,brcmstb"))
+ return true;
+
return false;
}
diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
index dcbb0ae7dd47..e530345baddf 100644
--- a/drivers/cpufreq/scpi-cpufreq.c
+++ b/drivers/cpufreq/scpi-cpufreq.c
@@ -157,7 +157,7 @@ static int scpi_cpufreq_init(struct cpufreq_policy *policy)
latency = scpi_ops->get_transition_latency(cpu_dev);
if (!latency)
- latency = CPUFREQ_ETERNAL;
+ latency = CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS;
policy->cpuinfo.transition_latency = latency;
diff --git a/drivers/cpufreq/sh-cpufreq.c b/drivers/cpufreq/sh-cpufreq.c
index 9c0b01e00508..642ddb9ea217 100644
--- a/drivers/cpufreq/sh-cpufreq.c
+++ b/drivers/cpufreq/sh-cpufreq.c
@@ -89,11 +89,9 @@ static int sh_cpufreq_target(struct cpufreq_policy *policy,
static int sh_cpufreq_verify(struct cpufreq_policy_data *policy)
{
struct clk *cpuclk = &per_cpu(sh_cpuclk, policy->cpu);
- struct cpufreq_frequency_table *freq_table;
- freq_table = cpuclk->nr_freqs ? cpuclk->freq_table : NULL;
- if (freq_table)
- return cpufreq_frequency_table_verify(policy, freq_table);
+ if (policy->freq_table)
+ return cpufreq_frequency_table_verify(policy);
cpufreq_verify_within_cpu_limits(policy);
diff --git a/drivers/cpufreq/spear-cpufreq.c b/drivers/cpufreq/spear-cpufreq.c
index 707c71090cc3..2a1550e1aa21 100644
--- a/drivers/cpufreq/spear-cpufreq.c
+++ b/drivers/cpufreq/spear-cpufreq.c
@@ -182,7 +182,7 @@ static int spear_cpufreq_probe(struct platform_device *pdev)
if (of_property_read_u32(np, "clock-latency",
&spear_cpufreq.transition_latency))
- spear_cpufreq.transition_latency = CPUFREQ_ETERNAL;
+ spear_cpufreq.transition_latency = CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS;
cnt = of_property_count_u32_elems(np, "cpufreq_tbl");
if (cnt <= 0) {
diff --git a/drivers/cpufreq/speedstep-lib.c b/drivers/cpufreq/speedstep-lib.c
index 0b66df4ed513..f8b42e981635 100644
--- a/drivers/cpufreq/speedstep-lib.c
+++ b/drivers/cpufreq/speedstep-lib.c
@@ -378,16 +378,16 @@ EXPORT_SYMBOL_GPL(speedstep_detect_processor);
* DETECT SPEEDSTEP SPEEDS *
*********************************************************************/
-unsigned int speedstep_get_freqs(enum speedstep_processor processor,
- unsigned int *low_speed,
- unsigned int *high_speed,
- unsigned int *transition_latency,
- void (*set_state) (unsigned int state))
+int speedstep_get_freqs(enum speedstep_processor processor,
+ unsigned int *low_speed,
+ unsigned int *high_speed,
+ unsigned int *transition_latency,
+ void (*set_state)(unsigned int state))
{
unsigned int prev_speed;
- unsigned int ret = 0;
unsigned long flags;
ktime_t tv1, tv2;
+ int ret = 0;
if ((!processor) || (!low_speed) || (!high_speed) || (!set_state))
return -EINVAL;
diff --git a/drivers/cpufreq/speedstep-lib.h b/drivers/cpufreq/speedstep-lib.h
index dc762ea786be..48329647d4c4 100644
--- a/drivers/cpufreq/speedstep-lib.h
+++ b/drivers/cpufreq/speedstep-lib.h
@@ -41,8 +41,8 @@ extern unsigned int speedstep_get_frequency(enum speedstep_processor processor);
* SPEEDSTEP_LOW; the second argument is zero so that no
* cpufreq_notify_transition calls are initiated.
*/
-extern unsigned int speedstep_get_freqs(enum speedstep_processor processor,
- unsigned int *low_speed,
- unsigned int *high_speed,
- unsigned int *transition_latency,
- void (*set_state) (unsigned int state));
+extern int speedstep_get_freqs(enum speedstep_processor processor,
+ unsigned int *low_speed,
+ unsigned int *high_speed,
+ unsigned int *transition_latency,
+ void (*set_state)(unsigned int state));
diff --git a/drivers/cpufreq/sun50i-cpufreq-nvmem.c b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
index 744312a44279..4fffc8e83692 100644
--- a/drivers/cpufreq/sun50i-cpufreq-nvmem.c
+++ b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
@@ -332,13 +332,6 @@ static const struct of_device_id sun50i_cpufreq_match_list[] = {
};
MODULE_DEVICE_TABLE(of, sun50i_cpufreq_match_list);
-static const struct of_device_id *sun50i_cpufreq_match_node(void)
-{
- struct device_node *np __free(device_node) = of_find_node_by_path("/");
-
- return of_match_node(sun50i_cpufreq_match_list, np);
-}
-
/*
* Since the driver depends on nvmem drivers, which may return EPROBE_DEFER,
* all the real activity is done in the probe, which may be defered as well.
@@ -346,11 +339,9 @@ static const struct of_device_id *sun50i_cpufreq_match_node(void)
*/
static int __init sun50i_cpufreq_init(void)
{
- const struct of_device_id *match;
int ret;
- match = sun50i_cpufreq_match_node();
- if (!match)
+ if (!of_machine_device_match(sun50i_cpufreq_match_list))
return -ENODEV;
ret = platform_driver_register(&sun50i_cpufreq_driver);
diff --git a/drivers/cpufreq/tegra124-cpufreq.c b/drivers/cpufreq/tegra124-cpufreq.c
index 514146d98bca..f8a76bbecef9 100644
--- a/drivers/cpufreq/tegra124-cpufreq.c
+++ b/drivers/cpufreq/tegra124-cpufreq.c
@@ -16,6 +16,10 @@
#include <linux/pm_opp.h>
#include <linux/types.h>
+#include "cpufreq-dt.h"
+
+static struct platform_device *tegra124_cpufreq_pdev;
+
struct tegra124_cpufreq_priv {
struct clk *cpu_clk;
struct clk *pllp_clk;
@@ -55,7 +59,6 @@ static int tegra124_cpufreq_probe(struct platform_device *pdev)
struct device_node *np __free(device_node) = of_cpu_device_node_get(0);
struct tegra124_cpufreq_priv *priv;
struct device *cpu_dev;
- struct platform_device_info cpufreq_dt_devinfo = {};
int ret;
if (!np)
@@ -95,11 +98,7 @@ static int tegra124_cpufreq_probe(struct platform_device *pdev)
if (ret)
goto out_put_pllp_clk;
- cpufreq_dt_devinfo.name = "cpufreq-dt";
- cpufreq_dt_devinfo.parent = &pdev->dev;
-
- priv->cpufreq_dt_pdev =
- platform_device_register_full(&cpufreq_dt_devinfo);
+ priv->cpufreq_dt_pdev = cpufreq_dt_pdev_register(&pdev->dev);
if (IS_ERR(priv->cpufreq_dt_pdev)) {
ret = PTR_ERR(priv->cpufreq_dt_pdev);
goto out_put_pllp_clk;
@@ -173,6 +172,21 @@ disable_cpufreq:
return err;
}
+static void tegra124_cpufreq_remove(struct platform_device *pdev)
+{
+ struct tegra124_cpufreq_priv *priv = dev_get_drvdata(&pdev->dev);
+
+ if (!IS_ERR(priv->cpufreq_dt_pdev)) {
+ platform_device_unregister(priv->cpufreq_dt_pdev);
+ priv->cpufreq_dt_pdev = ERR_PTR(-ENODEV);
+ }
+
+ clk_put(priv->pllp_clk);
+ clk_put(priv->pllx_clk);
+ clk_put(priv->dfll_clk);
+ clk_put(priv->cpu_clk);
+}
+
static const struct dev_pm_ops tegra124_cpufreq_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(tegra124_cpufreq_suspend,
tegra124_cpufreq_resume)
@@ -182,15 +196,16 @@ static struct platform_driver tegra124_cpufreq_platdrv = {
.driver.name = "cpufreq-tegra124",
.driver.pm = &tegra124_cpufreq_pm_ops,
.probe = tegra124_cpufreq_probe,
+ .remove = tegra124_cpufreq_remove,
};
static int __init tegra_cpufreq_init(void)
{
int ret;
- struct platform_device *pdev;
- if (!(of_machine_is_compatible("nvidia,tegra124") ||
- of_machine_is_compatible("nvidia,tegra210")))
+ if (!(of_machine_is_compatible("nvidia,tegra114") ||
+ of_machine_is_compatible("nvidia,tegra124") ||
+ of_machine_is_compatible("nvidia,tegra210")))
return -ENODEV;
/*
@@ -201,15 +216,25 @@ static int __init tegra_cpufreq_init(void)
if (ret)
return ret;
- pdev = platform_device_register_simple("cpufreq-tegra124", -1, NULL, 0);
- if (IS_ERR(pdev)) {
+ tegra124_cpufreq_pdev = platform_device_register_simple("cpufreq-tegra124", -1, NULL, 0);
+ if (IS_ERR(tegra124_cpufreq_pdev)) {
platform_driver_unregister(&tegra124_cpufreq_platdrv);
- return PTR_ERR(pdev);
+ return PTR_ERR(tegra124_cpufreq_pdev);
}
return 0;
}
module_init(tegra_cpufreq_init);
+static void __exit tegra_cpufreq_module_exit(void)
+{
+ if (!IS_ERR_OR_NULL(tegra124_cpufreq_pdev))
+ platform_device_unregister(tegra124_cpufreq_pdev);
+
+ platform_driver_unregister(&tegra124_cpufreq_platdrv);
+}
+module_exit(tegra_cpufreq_module_exit);
+
MODULE_AUTHOR("Tuomas Tynkkynen <ttynkkynen@nvidia.com>");
MODULE_DESCRIPTION("cpufreq driver for NVIDIA Tegra124");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/tegra186-cpufreq.c b/drivers/cpufreq/tegra186-cpufreq.c
index cbabb726c664..34ed943c5f34 100644
--- a/drivers/cpufreq/tegra186-cpufreq.c
+++ b/drivers/cpufreq/tegra186-cpufreq.c
@@ -8,6 +8,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/units.h>
#include <soc/tegra/bpmp.h>
#include <soc/tegra/bpmp-abi.h>
@@ -58,7 +59,7 @@ static const struct tegra186_cpufreq_cpu tegra186_cpus[] = {
};
struct tegra186_cpufreq_cluster {
- struct cpufreq_frequency_table *table;
+ struct cpufreq_frequency_table *bpmp_lut;
u32 ref_clk_khz;
u32 div;
};
@@ -66,16 +67,119 @@ struct tegra186_cpufreq_cluster {
struct tegra186_cpufreq_data {
void __iomem *regs;
const struct tegra186_cpufreq_cpu *cpus;
+ bool icc_dram_bw_scaling;
struct tegra186_cpufreq_cluster clusters[];
};
+static int tegra_cpufreq_set_bw(struct cpufreq_policy *policy, unsigned long freq_khz)
+{
+ struct tegra186_cpufreq_data *data = cpufreq_get_driver_data();
+ struct device *dev;
+ int ret;
+
+ dev = get_cpu_device(policy->cpu);
+ if (!dev)
+ return -ENODEV;
+
+ struct dev_pm_opp *opp __free(put_opp) =
+ dev_pm_opp_find_freq_exact(dev, freq_khz * HZ_PER_KHZ, true);
+ if (IS_ERR(opp))
+ return PTR_ERR(opp);
+
+ ret = dev_pm_opp_set_opp(dev, opp);
+ if (ret)
+ data->icc_dram_bw_scaling = false;
+
+ return ret;
+}
+
+static int tegra_cpufreq_init_cpufreq_table(struct cpufreq_policy *policy,
+ struct cpufreq_frequency_table *bpmp_lut,
+ struct cpufreq_frequency_table **opp_table)
+{
+ struct tegra186_cpufreq_data *data = cpufreq_get_driver_data();
+ struct cpufreq_frequency_table *freq_table = NULL;
+ struct cpufreq_frequency_table *pos;
+ struct device *cpu_dev;
+ unsigned long rate;
+ int ret, max_opps;
+ int j = 0;
+
+ cpu_dev = get_cpu_device(policy->cpu);
+ if (!cpu_dev) {
+ pr_err("%s: failed to get cpu%d device\n", __func__, policy->cpu);
+ return -ENODEV;
+ }
+
+ /* Initialize OPP table mentioned in operating-points-v2 property in DT */
+ ret = dev_pm_opp_of_add_table_indexed(cpu_dev, 0);
+ if (ret) {
+ dev_err(cpu_dev, "Invalid or empty opp table in device tree\n");
+ data->icc_dram_bw_scaling = false;
+ return ret;
+ }
+
+ max_opps = dev_pm_opp_get_opp_count(cpu_dev);
+ if (max_opps <= 0) {
+ dev_err(cpu_dev, "Failed to add OPPs\n");
+ return max_opps;
+ }
+
+ /* Disable all opps and cross-validate against LUT later */
+ for (rate = 0; ; rate++) {
+ struct dev_pm_opp *opp __free(put_opp) =
+ dev_pm_opp_find_freq_ceil(cpu_dev, &rate);
+ if (IS_ERR(opp))
+ break;
+
+ dev_pm_opp_disable(cpu_dev, rate);
+ }
+
+ freq_table = kcalloc((max_opps + 1), sizeof(*freq_table), GFP_KERNEL);
+ if (!freq_table)
+ return -ENOMEM;
+
+ /*
+ * Cross check the frequencies from BPMP-FW LUT against the OPP's present in DT.
+ * Enable only those DT OPP's which are present in LUT also.
+ */
+ cpufreq_for_each_valid_entry(pos, bpmp_lut) {
+ struct dev_pm_opp *opp __free(put_opp) =
+ dev_pm_opp_find_freq_exact(cpu_dev, pos->frequency * HZ_PER_KHZ, false);
+ if (IS_ERR(opp))
+ continue;
+
+ ret = dev_pm_opp_enable(cpu_dev, pos->frequency * HZ_PER_KHZ);
+ if (ret < 0)
+ return ret;
+
+ freq_table[j].driver_data = pos->driver_data;
+ freq_table[j].frequency = pos->frequency;
+ j++;
+ }
+
+ freq_table[j].driver_data = pos->driver_data;
+ freq_table[j].frequency = CPUFREQ_TABLE_END;
+
+ *opp_table = &freq_table[0];
+
+ dev_pm_opp_set_sharing_cpus(cpu_dev, policy->cpus);
+
+ /* Prime interconnect data */
+ tegra_cpufreq_set_bw(policy, freq_table[j - 1].frequency);
+
+ return ret;
+}
+
static int tegra186_cpufreq_init(struct cpufreq_policy *policy)
{
struct tegra186_cpufreq_data *data = cpufreq_get_driver_data();
unsigned int cluster = data->cpus[policy->cpu].bpmp_cluster_id;
+ struct cpufreq_frequency_table *freq_table;
+ struct cpufreq_frequency_table *bpmp_lut;
u32 cpu;
+ int ret;
- policy->freq_table = data->clusters[cluster].table;
policy->cpuinfo.transition_latency = 300 * 1000;
policy->driver_data = NULL;
@@ -85,6 +189,20 @@ static int tegra186_cpufreq_init(struct cpufreq_policy *policy)
cpumask_set_cpu(cpu, policy->cpus);
}
+ bpmp_lut = data->clusters[cluster].bpmp_lut;
+
+ if (data->icc_dram_bw_scaling) {
+ ret = tegra_cpufreq_init_cpufreq_table(policy, bpmp_lut, &freq_table);
+ if (!ret) {
+ policy->freq_table = freq_table;
+ return 0;
+ }
+ }
+
+ data->icc_dram_bw_scaling = false;
+ policy->freq_table = bpmp_lut;
+ pr_info("OPP tables missing from DT, EMC frequency scaling disabled\n");
+
return 0;
}
@@ -93,23 +211,30 @@ static int tegra186_cpufreq_set_target(struct cpufreq_policy *policy,
{
struct tegra186_cpufreq_data *data = cpufreq_get_driver_data();
struct cpufreq_frequency_table *tbl = policy->freq_table + index;
- unsigned int edvd_offset = data->cpus[policy->cpu].edvd_offset;
+ unsigned int edvd_offset;
u32 edvd_val = tbl->driver_data;
+ u32 cpu;
+
+ for_each_cpu(cpu, policy->cpus) {
+ edvd_offset = data->cpus[cpu].edvd_offset;
+ writel(edvd_val, data->regs + edvd_offset);
+ }
+
+ if (data->icc_dram_bw_scaling)
+ tegra_cpufreq_set_bw(policy, tbl->frequency);
- writel(edvd_val, data->regs + edvd_offset);
return 0;
}
static unsigned int tegra186_cpufreq_get(unsigned int cpu)
{
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
struct tegra186_cpufreq_data *data = cpufreq_get_driver_data();
struct tegra186_cpufreq_cluster *cluster;
- struct cpufreq_policy *policy;
unsigned int edvd_offset, cluster_id;
u32 ndiv;
- policy = cpufreq_cpu_get(cpu);
if (!policy)
return 0;
@@ -117,7 +242,6 @@ static unsigned int tegra186_cpufreq_get(unsigned int cpu)
ndiv = readl(data->regs + edvd_offset) & EDVD_CORE_VOLT_FREQ_F_MASK;
cluster_id = data->cpus[policy->cpu].bpmp_cluster_id;
cluster = &data->clusters[cluster_id];
- cpufreq_cpu_put(policy);
return (cluster->ref_clk_khz * ndiv) / cluster->div;
}
@@ -132,15 +256,16 @@ static struct cpufreq_driver tegra186_cpufreq_driver = {
.init = tegra186_cpufreq_init,
};
-static struct cpufreq_frequency_table *init_vhint_table(
+static struct cpufreq_frequency_table *tegra_cpufreq_bpmp_read_lut(
struct platform_device *pdev, struct tegra_bpmp *bpmp,
- struct tegra186_cpufreq_cluster *cluster, unsigned int cluster_id)
+ struct tegra186_cpufreq_cluster *cluster, unsigned int cluster_id,
+ int *num_rates)
{
struct cpufreq_frequency_table *table;
struct mrq_cpu_vhint_request req;
struct tegra_bpmp_message msg;
struct cpu_vhint_data *data;
- int err, i, j, num_rates = 0;
+ int err, i, j;
dma_addr_t phys;
void *virt;
@@ -170,6 +295,7 @@ static struct cpufreq_frequency_table *init_vhint_table(
goto free;
}
+ *num_rates = 0;
for (i = data->vfloor; i <= data->vceil; i++) {
u16 ndiv = data->ndiv[i];
@@ -180,10 +306,10 @@ static struct cpufreq_frequency_table *init_vhint_table(
if (i > 0 && ndiv == data->ndiv[i - 1])
continue;
- num_rates++;
+ (*num_rates)++;
}
- table = devm_kcalloc(&pdev->dev, num_rates + 1, sizeof(*table),
+ table = devm_kcalloc(&pdev->dev, *num_rates + 1, sizeof(*table),
GFP_KERNEL);
if (!table) {
table = ERR_PTR(-ENOMEM);
@@ -225,7 +351,10 @@ static int tegra186_cpufreq_probe(struct platform_device *pdev)
{
struct tegra186_cpufreq_data *data;
struct tegra_bpmp *bpmp;
- unsigned int i = 0, err;
+ struct device *cpu_dev;
+ unsigned int i = 0, err, edvd_offset;
+ int num_rates = 0;
+ u32 edvd_val, cpu;
data = devm_kzalloc(&pdev->dev,
struct_size(data, clusters, TEGRA186_NUM_CLUSTERS),
@@ -248,15 +377,39 @@ static int tegra186_cpufreq_probe(struct platform_device *pdev)
for (i = 0; i < TEGRA186_NUM_CLUSTERS; i++) {
struct tegra186_cpufreq_cluster *cluster = &data->clusters[i];
- cluster->table = init_vhint_table(pdev, bpmp, cluster, i);
- if (IS_ERR(cluster->table)) {
- err = PTR_ERR(cluster->table);
+ cluster->bpmp_lut = tegra_cpufreq_bpmp_read_lut(pdev, bpmp, cluster, i, &num_rates);
+ if (IS_ERR(cluster->bpmp_lut)) {
+ err = PTR_ERR(cluster->bpmp_lut);
+ goto put_bpmp;
+ } else if (!num_rates) {
+ err = -EINVAL;
goto put_bpmp;
}
+
+ for (cpu = 0; cpu < ARRAY_SIZE(tegra186_cpus); cpu++) {
+ if (data->cpus[cpu].bpmp_cluster_id == i) {
+ edvd_val = cluster->bpmp_lut[num_rates - 1].driver_data;
+ edvd_offset = data->cpus[cpu].edvd_offset;
+ writel(edvd_val, data->regs + edvd_offset);
+ }
+ }
}
tegra186_cpufreq_driver.driver_data = data;
+ /* Check for optional OPPv2 and interconnect paths on CPU0 to enable ICC scaling */
+ cpu_dev = get_cpu_device(0);
+ if (!cpu_dev) {
+ err = -EPROBE_DEFER;
+ goto put_bpmp;
+ }
+
+ if (dev_pm_opp_of_get_opp_desc_node(cpu_dev)) {
+ err = dev_pm_opp_of_find_icc_paths(cpu_dev, NULL);
+ if (!err)
+ data->icc_dram_bw_scaling = true;
+ }
+
err = cpufreq_register_driver(&tegra186_cpufreq_driver);
put_bpmp:
diff --git a/drivers/cpufreq/tegra194-cpufreq.c b/drivers/cpufreq/tegra194-cpufreq.c
index 9b4f516f313e..695599e1001f 100644
--- a/drivers/cpufreq/tegra194-cpufreq.c
+++ b/drivers/cpufreq/tegra194-cpufreq.c
@@ -750,7 +750,8 @@ static int tegra194_cpufreq_probe(struct platform_device *pdev)
if (IS_ERR(bpmp))
return PTR_ERR(bpmp);
- read_counters_wq = alloc_workqueue("read_counters_wq", __WQ_LEGACY, 1);
+ read_counters_wq = alloc_workqueue("read_counters_wq",
+ __WQ_LEGACY | WQ_PERCPU, 1);
if (!read_counters_wq) {
dev_err(&pdev->dev, "fail to create_workqueue\n");
err = -EINVAL;
diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c
index 5a5147277cd0..6ee76f5fe9c5 100644
--- a/drivers/cpufreq/ti-cpufreq.c
+++ b/drivers/cpufreq/ti-cpufreq.c
@@ -72,7 +72,9 @@ enum {
#define AM62P5_EFUSE_O_MPU_OPP 15
#define AM62P5_EFUSE_S_MPU_OPP 19
+#define AM62P5_EFUSE_T_MPU_OPP 20
#define AM62P5_EFUSE_U_MPU_OPP 21
+#define AM62P5_EFUSE_V_MPU_OPP 22
#define AM62P5_SUPPORT_O_MPU_OPP BIT(0)
#define AM62P5_SUPPORT_U_MPU_OPP BIT(2)
@@ -153,7 +155,9 @@ static unsigned long am62p5_efuse_xlate(struct ti_cpufreq_data *opp_data,
unsigned long calculated_efuse = AM62P5_SUPPORT_O_MPU_OPP;
switch (efuse) {
+ case AM62P5_EFUSE_V_MPU_OPP:
case AM62P5_EFUSE_U_MPU_OPP:
+ case AM62P5_EFUSE_T_MPU_OPP:
case AM62P5_EFUSE_S_MPU_OPP:
calculated_efuse |= AM62P5_SUPPORT_U_MPU_OPP;
fallthrough;
@@ -307,9 +311,10 @@ static struct ti_cpufreq_soc_data am3517_soc_data = {
};
static const struct soc_device_attribute k3_cpufreq_soc[] = {
- { .family = "AM62X", .revision = "SR1.0" },
- { .family = "AM62AX", .revision = "SR1.0" },
- { .family = "AM62PX", .revision = "SR1.0" },
+ { .family = "AM62X", },
+ { .family = "AM62AX", },
+ { .family = "AM62PX", },
+ { .family = "AM62DX", },
{ /* sentinel */ }
};
@@ -457,6 +462,7 @@ static const struct of_device_id ti_cpufreq_of_match[] __maybe_unused = {
{ .compatible = "ti,omap36xx", .data = &omap36xx_soc_data, },
{ .compatible = "ti,am625", .data = &am625_soc_data, },
{ .compatible = "ti,am62a7", .data = &am62a7_soc_data, },
+ { .compatible = "ti,am62d2", .data = &am62a7_soc_data, },
{ .compatible = "ti,am62p5", .data = &am62p5_soc_data, },
/* legacy */
{ .compatible = "ti,omap3430", .data = &omap34xx_soc_data, },
diff --git a/drivers/cpufreq/virtual-cpufreq.c b/drivers/cpufreq/virtual-cpufreq.c
index 7dd1b0c263c7..6ffa16d239b2 100644
--- a/drivers/cpufreq/virtual-cpufreq.c
+++ b/drivers/cpufreq/virtual-cpufreq.c
@@ -250,7 +250,7 @@ static int virt_cpufreq_offline(struct cpufreq_policy *policy)
static int virt_cpufreq_verify_policy(struct cpufreq_policy_data *policy)
{
if (policy->freq_table)
- return cpufreq_frequency_table_verify(policy, policy->freq_table);
+ return cpufreq_frequency_table_verify(policy);
cpufreq_verify_within_cpu_limits(policy);
return 0;
diff --git a/drivers/cpuidle/cpuidle-big_little.c b/drivers/cpuidle/cpuidle-big_little.c
index 4abba42fcc31..08f6bf2f6409 100644
--- a/drivers/cpuidle/cpuidle-big_little.c
+++ b/drivers/cpuidle/cpuidle-big_little.c
@@ -166,20 +166,11 @@ static const struct of_device_id compatible_machine_match[] = {
static int __init bl_idle_init(void)
{
int ret;
- struct device_node *root = of_find_node_by_path("/");
- const struct of_device_id *match_id;
-
- if (!root)
- return -ENODEV;
/*
* Initialize the driver just for a compliant set of machines
*/
- match_id = of_match_node(compatible_machine_match, root);
-
- of_node_put(root);
-
- if (!match_id)
+ if (!of_machine_device_match(compatible_machine_match))
return -ENODEV;
if (!mcpm_is_available())
diff --git a/drivers/cpuidle/cpuidle-psci-domain.c b/drivers/cpuidle/cpuidle-psci-domain.c
index 2041f59116ce..37c41209eaf9 100644
--- a/drivers/cpuidle/cpuidle-psci-domain.c
+++ b/drivers/cpuidle/cpuidle-psci-domain.c
@@ -28,7 +28,6 @@ struct psci_pd_provider {
};
static LIST_HEAD(psci_pd_providers);
-static bool psci_pd_allow_domain_state;
static int psci_pd_power_off(struct generic_pm_domain *pd)
{
@@ -38,9 +37,6 @@ static int psci_pd_power_off(struct generic_pm_domain *pd)
if (!state->data)
return 0;
- if (!psci_pd_allow_domain_state)
- return -EBUSY;
-
/* OSI mode is enabled, set the corresponding domain state. */
pd_state = state->data;
psci_set_domain_state(pd, pd->state_idx, *pd_state);
@@ -126,15 +122,6 @@ static void psci_pd_remove(void)
}
}
-static void psci_cpuidle_domain_sync_state(struct device *dev)
-{
- /*
- * All devices have now been attached/probed to the PM domain topology,
- * hence it's fine to allow domain states to be picked.
- */
- psci_pd_allow_domain_state = true;
-}
-
static const struct of_device_id psci_of_match[] = {
{ .compatible = "arm,psci-1.0" },
{}
@@ -195,7 +182,6 @@ static struct platform_driver psci_cpuidle_domain_driver = {
.driver = {
.name = "psci-cpuidle-domain",
.of_match_table = psci_of_match,
- .sync_state = psci_cpuidle_domain_sync_state,
},
};
diff --git a/drivers/cpuidle/cpuidle-psci.c b/drivers/cpuidle/cpuidle-psci.c
index 4e1ba35deda9..dcf20ea5ef5e 100644
--- a/drivers/cpuidle/cpuidle-psci.c
+++ b/drivers/cpuidle/cpuidle-psci.c
@@ -45,7 +45,6 @@ struct psci_cpuidle_domain_state {
static DEFINE_PER_CPU_READ_MOSTLY(struct psci_cpuidle_data, psci_cpuidle_data);
static DEFINE_PER_CPU(struct psci_cpuidle_domain_state, psci_domain_state);
static bool psci_cpuidle_use_syscore;
-static bool psci_cpuidle_use_cpuhp;
void psci_set_domain_state(struct generic_pm_domain *pd, unsigned int state_idx,
u32 state)
@@ -124,8 +123,12 @@ static int psci_idle_cpuhp_up(unsigned int cpu)
{
struct device *pd_dev = __this_cpu_read(psci_cpuidle_data.dev);
- if (pd_dev)
- pm_runtime_get_sync(pd_dev);
+ if (pd_dev) {
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ pm_runtime_get_sync(pd_dev);
+ else
+ dev_pm_genpd_resume(pd_dev);
+ }
return 0;
}
@@ -135,7 +138,11 @@ static int psci_idle_cpuhp_down(unsigned int cpu)
struct device *pd_dev = __this_cpu_read(psci_cpuidle_data.dev);
if (pd_dev) {
- pm_runtime_put_sync(pd_dev);
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ pm_runtime_put_sync(pd_dev);
+ else
+ dev_pm_genpd_suspend(pd_dev);
+
/* Clear domain state to start fresh at next online. */
psci_clear_domain_state();
}
@@ -170,35 +177,36 @@ static void psci_idle_syscore_switch(bool suspend)
}
}
-static int psci_idle_syscore_suspend(void)
+static int psci_idle_syscore_suspend(void *data)
{
psci_idle_syscore_switch(true);
return 0;
}
-static void psci_idle_syscore_resume(void)
+static void psci_idle_syscore_resume(void *data)
{
psci_idle_syscore_switch(false);
}
-static struct syscore_ops psci_idle_syscore_ops = {
+static const struct syscore_ops psci_idle_syscore_ops = {
.suspend = psci_idle_syscore_suspend,
.resume = psci_idle_syscore_resume,
};
+static struct syscore psci_idle_syscore = {
+ .ops = &psci_idle_syscore_ops,
+};
+
static void psci_idle_init_syscore(void)
{
if (psci_cpuidle_use_syscore)
- register_syscore_ops(&psci_idle_syscore_ops);
+ register_syscore(&psci_idle_syscore);
}
static void psci_idle_init_cpuhp(void)
{
int err;
- if (!psci_cpuidle_use_cpuhp)
- return;
-
err = cpuhp_setup_state_nocalls(CPUHP_AP_CPU_PM_STARTING,
"cpuidle/psci:online",
psci_idle_cpuhp_up,
@@ -259,10 +267,8 @@ static int psci_dt_cpu_init_topology(struct cpuidle_driver *drv,
* s2ram and s2idle.
*/
drv->states[state_count - 1].enter_s2idle = psci_enter_s2idle_domain_idle_state;
- if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
drv->states[state_count - 1].enter = psci_enter_domain_idle_state;
- psci_cpuidle_use_cpuhp = true;
- }
return 0;
}
@@ -339,7 +345,6 @@ static void psci_cpu_deinit_idle(int cpu)
dt_idle_detach_cpu(data->dev);
psci_cpuidle_use_syscore = false;
- psci_cpuidle_use_cpuhp = false;
}
static int psci_idle_init_cpu(struct device *dev, int cpu)
@@ -381,8 +386,8 @@ static int psci_idle_init_cpu(struct device *dev, int cpu)
drv->states[0].exit_latency = 1;
drv->states[0].target_residency = 1;
drv->states[0].power_usage = UINT_MAX;
- strcpy(drv->states[0].name, "WFI");
- strcpy(drv->states[0].desc, "ARM WFI");
+ strscpy(drv->states[0].name, "WFI");
+ strscpy(drv->states[0].desc, "ARM WFI");
/*
* If no DT idle states are detected (ret == 0) let the driver
diff --git a/drivers/cpuidle/cpuidle-qcom-spm.c b/drivers/cpuidle/cpuidle-qcom-spm.c
index 5f386761b156..7ab6f68b96a8 100644
--- a/drivers/cpuidle/cpuidle-qcom-spm.c
+++ b/drivers/cpuidle/cpuidle-qcom-spm.c
@@ -86,9 +86,9 @@ static const struct of_device_id qcom_idle_state_match[] = {
static int spm_cpuidle_register(struct device *cpuidle_dev, int cpu)
{
- struct platform_device *pdev = NULL;
+ struct platform_device *pdev;
struct device_node *cpu_node, *saw_node;
- struct cpuidle_qcom_spm_data *data = NULL;
+ struct cpuidle_qcom_spm_data *data;
int ret;
cpu_node = of_cpu_device_node_get(cpu);
@@ -96,20 +96,23 @@ static int spm_cpuidle_register(struct device *cpuidle_dev, int cpu)
return -ENODEV;
saw_node = of_parse_phandle(cpu_node, "qcom,saw", 0);
+ of_node_put(cpu_node);
if (!saw_node)
return -ENODEV;
pdev = of_find_device_by_node(saw_node);
of_node_put(saw_node);
- of_node_put(cpu_node);
if (!pdev)
return -ENODEV;
data = devm_kzalloc(cpuidle_dev, sizeof(*data), GFP_KERNEL);
- if (!data)
+ if (!data) {
+ put_device(&pdev->dev);
return -ENOMEM;
+ }
data->spm = dev_get_drvdata(&pdev->dev);
+ put_device(&pdev->dev);
if (!data->spm)
return -EINVAL;
diff --git a/drivers/cpuidle/cpuidle-riscv-sbi.c b/drivers/cpuidle/cpuidle-riscv-sbi.c
index 0fe1ece9fbdc..19be6475d356 100644
--- a/drivers/cpuidle/cpuidle-riscv-sbi.c
+++ b/drivers/cpuidle/cpuidle-riscv-sbi.c
@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/slab.h>
+#include <linux/string.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
@@ -44,7 +45,6 @@ static DEFINE_PER_CPU_READ_MOSTLY(struct sbi_cpuidle_data, sbi_cpuidle_data);
static DEFINE_PER_CPU(struct sbi_domain_state, domain_state);
static bool sbi_cpuidle_use_osi;
static bool sbi_cpuidle_use_cpuhp;
-static bool sbi_cpuidle_pd_allow_domain_state;
static inline void sbi_set_domain_state(u32 state)
{
@@ -304,8 +304,8 @@ static int sbi_cpuidle_init_cpu(struct device *dev, int cpu)
drv->states[0].exit_latency = 1;
drv->states[0].target_residency = 1;
drv->states[0].power_usage = UINT_MAX;
- strcpy(drv->states[0].name, "WFI");
- strcpy(drv->states[0].desc, "RISC-V WFI");
+ strscpy(drv->states[0].name, "WFI");
+ strscpy(drv->states[0].desc, "RISC-V WFI");
/*
* If no DT idle states are detected (ret == 0) let the driver
@@ -345,15 +345,6 @@ deinit:
return ret;
}
-static void sbi_cpuidle_domain_sync_state(struct device *dev)
-{
- /*
- * All devices have now been attached/probed to the PM domain
- * topology, hence it's fine to allow domain states to be picked.
- */
- sbi_cpuidle_pd_allow_domain_state = true;
-}
-
#ifdef CONFIG_DT_IDLE_GENPD
static int sbi_cpuidle_pd_power_off(struct generic_pm_domain *pd)
@@ -364,9 +355,6 @@ static int sbi_cpuidle_pd_power_off(struct generic_pm_domain *pd)
if (!state->data)
return 0;
- if (!sbi_cpuidle_pd_allow_domain_state)
- return -EBUSY;
-
/* OSI mode is enabled, set the corresponding domain state. */
pd_state = state->data;
sbi_set_domain_state(*pd_state);
@@ -564,7 +552,6 @@ static struct platform_driver sbi_cpuidle_driver = {
.probe = sbi_cpuidle_probe,
.driver = {
.name = "sbi-cpuidle",
- .sync_state = sbi_cpuidle_domain_sync_state,
},
};
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 0835da449db8..c7876e9e024f 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -184,20 +184,22 @@ static noinstr void enter_s2idle_proper(struct cpuidle_driver *drv,
* cpuidle_enter_s2idle - Enter an idle state suitable for suspend-to-idle.
* @drv: cpuidle driver for the given CPU.
* @dev: cpuidle device for the given CPU.
+ * @latency_limit_ns: Idle state exit latency limit
*
* If there are states with the ->enter_s2idle callback, find the deepest of
* them and enter it with frozen tick.
*/
-int cpuidle_enter_s2idle(struct cpuidle_driver *drv, struct cpuidle_device *dev)
+int cpuidle_enter_s2idle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
+ u64 latency_limit_ns)
{
int index;
/*
- * Find the deepest state with ->enter_s2idle present, which guarantees
- * that interrupts won't be enabled when it exits and allows the tick to
- * be frozen safely.
+ * Find the deepest state with ->enter_s2idle present that meets the
+ * specified latency limit, which guarantees that interrupts won't be
+ * enabled when it exits and allows the tick to be frozen safely.
*/
- index = find_deepest_state(drv, dev, U64_MAX, 0, true);
+ index = find_deepest_state(drv, dev, latency_limit_ns, 0, true);
if (index > 0) {
enter_s2idle_proper(drv, dev, index);
local_irq_enable();
@@ -635,8 +637,14 @@ static void __cpuidle_device_init(struct cpuidle_device *dev)
static int __cpuidle_register_device(struct cpuidle_device *dev)
{
struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
+ unsigned int cpu = dev->cpu;
int i, ret;
+ if (per_cpu(cpuidle_devices, cpu)) {
+ pr_info("CPU%d: cpuidle device already registered\n", cpu);
+ return -EEXIST;
+ }
+
if (!try_module_get(drv->owner))
return -EINVAL;
@@ -648,7 +656,7 @@ static int __cpuidle_register_device(struct cpuidle_device *dev)
dev->states_usage[i].disable |= CPUIDLE_STATE_DISABLED_BY_USER;
}
- per_cpu(cpuidle_devices, dev->cpu) = dev;
+ per_cpu(cpuidle_devices, cpu) = dev;
list_add(&dev->device_list, &cpuidle_detected_devices);
ret = cpuidle_coupled_register_device(dev);
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
index 9bbfa594c442..370664c47e65 100644
--- a/drivers/cpuidle/driver.c
+++ b/drivers/cpuidle/driver.c
@@ -8,6 +8,8 @@
* This code is licenced under the GPL.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/mutex.h>
#include <linux/module.h>
#include <linux/sched.h>
@@ -193,6 +195,14 @@ static void __cpuidle_driver_init(struct cpuidle_driver *drv)
s->exit_latency_ns = 0;
else
s->exit_latency = div_u64(s->exit_latency_ns, NSEC_PER_USEC);
+
+ /*
+ * Warn if the exit latency of a CPU idle state exceeds its
+ * target residency which is assumed to never happen in cpuidle
+ * in multiple places.
+ */
+ if (s->exit_latency_ns > s->target_residency_ns)
+ pr_warn("Idle state %d target residency too low\n", i);
}
}
diff --git a/drivers/cpuidle/dt_idle_states.c b/drivers/cpuidle/dt_idle_states.c
index 97feb7d8fb23..558d49838990 100644
--- a/drivers/cpuidle/dt_idle_states.c
+++ b/drivers/cpuidle/dt_idle_states.c
@@ -98,7 +98,6 @@ static bool idle_state_valid(struct device_node *state_node, unsigned int idx,
{
int cpu;
struct device_node *cpu_node, *curr_state_node;
- bool valid = true;
/*
* Compare idle state phandles for index idx on all CPUs in the
@@ -107,20 +106,17 @@ static bool idle_state_valid(struct device_node *state_node, unsigned int idx,
* retrieved from. If a mismatch is found bail out straight
* away since we certainly hit a firmware misconfiguration.
*/
- for (cpu = cpumask_next(cpumask_first(cpumask), cpumask);
- cpu < nr_cpu_ids; cpu = cpumask_next(cpu, cpumask)) {
+ cpu = cpumask_first(cpumask) + 1;
+ for_each_cpu_from(cpu, cpumask) {
cpu_node = of_cpu_device_node_get(cpu);
curr_state_node = of_get_cpu_state_node(cpu_node, idx);
- if (state_node != curr_state_node)
- valid = false;
-
of_node_put(curr_state_node);
of_node_put(cpu_node);
- if (!valid)
- break;
+ if (state_node != curr_state_node)
+ return false;
}
- return valid;
+ return true;
}
/**
diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
index 0d0f9751ff8f..5d0e7f78c6c5 100644
--- a/drivers/cpuidle/governor.c
+++ b/drivers/cpuidle/governor.c
@@ -111,6 +111,10 @@ s64 cpuidle_governor_latency_req(unsigned int cpu)
struct device *device = get_cpu_device(cpu);
int device_req = dev_pm_qos_raw_resume_latency(device);
int global_req = cpu_latency_qos_limit();
+ int global_wake_req = cpu_wakeup_latency_qos_limit();
+
+ if (global_req > global_wake_req)
+ global_req = global_wake_req;
if (device_req > global_req)
device_req = global_req;
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 52d5d26fc7c6..64d6f7a1c776 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -97,6 +97,14 @@ static inline int which_bucket(u64 duration_ns)
static DEFINE_PER_CPU(struct menu_device, menu_devices);
+static void menu_update_intervals(struct menu_device *data, unsigned int interval_us)
+{
+ /* Update the repeating-pattern data. */
+ data->intervals[data->interval_ptr++] = interval_us;
+ if (data->interval_ptr >= INTERVALS)
+ data->interval_ptr = 0;
+}
+
static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev);
/*
@@ -180,20 +188,17 @@ again:
*
* This can deal with workloads that have long pauses interspersed
* with sporadic activity with a bunch of short pauses.
+ *
+ * However, if the number of remaining samples is too small to exclude
+ * any more outliers, allow the deepest available idle state to be
+ * selected because there are systems where the time spent by CPUs in
+ * deep idle states is correlated to the maximum frequency the CPUs
+ * can get to. On those systems, shallow idle states should be avoided
+ * unless there is a clear indication that the given CPU is most likley
+ * going to be woken up shortly.
*/
- if (divisor * 4 <= INTERVALS * 3) {
- /*
- * If there are sufficiently many data points still under
- * consideration after the outliers have been eliminated,
- * returning without a prediction would be a mistake because it
- * is likely that the next interval will not exceed the current
- * maximum, so return the latter in that case.
- */
- if (divisor >= INTERVALS / 2)
- return max;
-
+ if (divisor * 4 <= INTERVALS * 3)
return UINT_MAX;
- }
/* Update the thresholds for the next round. */
if (avg - min > max - avg)
@@ -222,6 +227,14 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
if (data->needs_update) {
menu_update(drv, dev);
data->needs_update = 0;
+ } else if (!dev->last_residency_ns) {
+ /*
+ * This happens when the driver rejects the previously selected
+ * idle state and returns an error, so update the recent
+ * intervals table to prevent invalid information from being
+ * used going forward.
+ */
+ menu_update_intervals(data, UINT_MAX);
}
/* Find the shortest expected idle interval. */
@@ -271,20 +284,15 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
return 0;
}
- if (tick_nohz_tick_stopped()) {
- /*
- * If the tick is already stopped, the cost of possible short
- * idle duration misprediction is much higher, because the CPU
- * may be stuck in a shallow idle state for a long time as a
- * result of it. In that case say we might mispredict and use
- * the known time till the closest timer event for the idle
- * state selection.
- */
- if (predicted_ns < TICK_NSEC)
- predicted_ns = data->next_timer_ns;
- } else if (latency_req > predicted_ns) {
- latency_req = predicted_ns;
- }
+ /*
+ * If the tick is already stopped, the cost of possible short idle
+ * duration misprediction is much higher, because the CPU may be stuck
+ * in a shallow idle state for a long time as a result of it. In that
+ * case, say we might mispredict and use the known time till the closest
+ * timer event for the idle state selection.
+ */
+ if (tick_nohz_tick_stopped() && predicted_ns < TICK_NSEC)
+ predicted_ns = data->next_timer_ns;
/*
* Find the idle state with the lowest power while satisfying
@@ -300,48 +308,54 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
if (idx == -1)
idx = i; /* first enabled state */
- if (s->target_residency_ns > predicted_ns) {
- /*
- * Use a physical idle state, not busy polling, unless
- * a timer is going to trigger soon enough.
- */
- if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) &&
- s->exit_latency_ns <= latency_req &&
- s->target_residency_ns <= data->next_timer_ns) {
- predicted_ns = s->target_residency_ns;
- idx = i;
- break;
- }
- if (predicted_ns < TICK_NSEC)
- break;
-
- if (!tick_nohz_tick_stopped()) {
- /*
- * If the state selected so far is shallow,
- * waking up early won't hurt, so retain the
- * tick in that case and let the governor run
- * again in the next iteration of the loop.
- */
- predicted_ns = drv->states[idx].target_residency_ns;
- break;
- }
+ if (s->exit_latency_ns > latency_req)
+ break;
- /*
- * If the state selected so far is shallow and this
- * state's target residency matches the time till the
- * closest timer event, select this one to avoid getting
- * stuck in the shallow one for too long.
- */
- if (drv->states[idx].target_residency_ns < TICK_NSEC &&
- s->target_residency_ns <= delta_tick)
- idx = i;
+ if (s->target_residency_ns <= predicted_ns) {
+ idx = i;
+ continue;
+ }
- return idx;
+ /*
+ * Use a physical idle state instead of busy polling so long as
+ * its target residency is below the residency threshold, its
+ * exit latency is not greater than the predicted idle duration,
+ * and the next timer doesn't expire soon.
+ */
+ if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) &&
+ s->target_residency_ns < RESIDENCY_THRESHOLD_NS &&
+ s->target_residency_ns <= data->next_timer_ns &&
+ s->exit_latency_ns <= predicted_ns) {
+ predicted_ns = s->target_residency_ns;
+ idx = i;
+ break;
}
- if (s->exit_latency_ns > latency_req)
+
+ if (predicted_ns < TICK_NSEC)
+ break;
+
+ if (!tick_nohz_tick_stopped()) {
+ /*
+ * If the state selected so far is shallow, waking up
+ * early won't hurt, so retain the tick in that case and
+ * let the governor run again in the next iteration of
+ * the idle loop.
+ */
+ predicted_ns = drv->states[idx].target_residency_ns;
break;
+ }
- idx = i;
+ /*
+ * If the state selected so far is shallow and this state's
+ * target residency matches the time till the closest timer
+ * event, select this one to avoid getting stuck in the shallow
+ * one for too long.
+ */
+ if (drv->states[idx].target_residency_ns < TICK_NSEC &&
+ s->target_residency_ns <= delta_tick)
+ idx = i;
+
+ return idx;
}
if (idx == -1)
@@ -482,10 +496,7 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
data->correction_factor[data->bucket] = new_factor;
- /* update the repeating-pattern data */
- data->intervals[data->interval_ptr++] = ktime_to_us(measured_ns);
- if (data->interval_ptr >= INTERVALS)
- data->interval_ptr = 0;
+ menu_update_intervals(data, ktime_to_us(measured_ns));
}
/**
diff --git a/drivers/cpuidle/governors/teo.c b/drivers/cpuidle/governors/teo.c
index bfa55c1eab5b..81ac5fd58a1c 100644
--- a/drivers/cpuidle/governors/teo.c
+++ b/drivers/cpuidle/governors/teo.c
@@ -76,7 +76,7 @@
* likely woken up by a non-timer wakeup source).
*
* 2. If the second sum computed in step 1 is greater than a half of the sum of
- * both metrics for the candidate state bin and all subsequent bins(if any),
+ * both metrics for the candidate state bin and all subsequent bins (if any),
* a shallower idle state is likely to be more suitable, so look for it.
*
* - Traverse the enabled idle states shallower than the candidate one in the
@@ -133,21 +133,33 @@ struct teo_bin {
* @sleep_length_ns: Time till the closest timer event (at the selection time).
* @state_bins: Idle state data bins for this CPU.
* @total: Grand total of the "intercepts" and "hits" metrics for all bins.
+ * @total_tick: Wakeups by the scheduler tick.
* @tick_intercepts: "Intercepts" before TICK_NSEC.
* @short_idles: Wakeups after short idle periods.
- * @artificial_wakeup: Set if the wakeup has been triggered by a safety net.
+ * @tick_wakeup: Set if the last wakeup was by the scheduler tick.
*/
struct teo_cpu {
s64 sleep_length_ns;
struct teo_bin state_bins[CPUIDLE_STATE_MAX];
unsigned int total;
+ unsigned int total_tick;
unsigned int tick_intercepts;
unsigned int short_idles;
- bool artificial_wakeup;
+ bool tick_wakeup;
};
static DEFINE_PER_CPU(struct teo_cpu, teo_cpus);
+static void teo_decay(unsigned int *metric)
+{
+ unsigned int delta = *metric >> DECAY_SHIFT;
+
+ if (delta)
+ *metric -= delta;
+ else
+ *metric = 0;
+}
+
/**
* teo_update - Update CPU metrics after wakeup.
* @drv: cpuidle driver containing state data.
@@ -155,21 +167,22 @@ static DEFINE_PER_CPU(struct teo_cpu, teo_cpus);
*/
static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
{
- struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu);
+ struct teo_cpu *cpu_data = this_cpu_ptr(&teo_cpus);
int i, idx_timer = 0, idx_duration = 0;
- s64 target_residency_ns;
- u64 measured_ns;
+ s64 target_residency_ns, measured_ns;
+ unsigned int total = 0;
- cpu_data->short_idles -= cpu_data->short_idles >> DECAY_SHIFT;
+ teo_decay(&cpu_data->short_idles);
- if (cpu_data->artificial_wakeup) {
+ if (dev->poll_time_limit) {
+ dev->poll_time_limit = false;
/*
- * If one of the safety nets has triggered, assume that this
+ * Polling state timeout has triggered, so assume that this
* might have been a long sleep.
*/
- measured_ns = U64_MAX;
+ measured_ns = S64_MAX;
} else {
- u64 lat_ns = drv->states[dev->last_state_idx].exit_latency_ns;
+ s64 lat_ns = drv->states[dev->last_state_idx].exit_latency_ns;
measured_ns = dev->last_residency_ns;
/*
@@ -196,8 +209,10 @@ static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
for (i = 0; i < drv->state_count; i++) {
struct teo_bin *bin = &cpu_data->state_bins[i];
- bin->hits -= bin->hits >> DECAY_SHIFT;
- bin->intercepts -= bin->intercepts >> DECAY_SHIFT;
+ teo_decay(&bin->hits);
+ total += bin->hits;
+ teo_decay(&bin->intercepts);
+ total += bin->intercepts;
target_residency_ns = drv->states[i].target_residency_ns;
@@ -208,7 +223,24 @@ static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
}
}
- cpu_data->tick_intercepts -= cpu_data->tick_intercepts >> DECAY_SHIFT;
+ cpu_data->total = total + PULSE;
+
+ teo_decay(&cpu_data->tick_intercepts);
+
+ teo_decay(&cpu_data->total_tick);
+ if (cpu_data->tick_wakeup) {
+ cpu_data->total_tick += PULSE;
+ /*
+ * If tick wakeups dominate the wakeup pattern, count this one
+ * as a hit on the deepest available idle state to increase the
+ * likelihood of stopping the tick.
+ */
+ if (3 * cpu_data->total_tick > 2 * cpu_data->total) {
+ cpu_data->state_bins[drv->state_count-1].hits += PULSE;
+ return;
+ }
+ }
+
/*
* If the measured idle duration falls into the same bin as the sleep
* length, this is a "hit", so update the "hits" metric for that bin.
@@ -219,18 +251,9 @@ static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
cpu_data->state_bins[idx_timer].hits += PULSE;
} else {
cpu_data->state_bins[idx_duration].intercepts += PULSE;
- if (TICK_NSEC <= measured_ns)
+ if (measured_ns <= TICK_NSEC)
cpu_data->tick_intercepts += PULSE;
}
-
- cpu_data->total -= cpu_data->total >> DECAY_SHIFT;
- cpu_data->total += PULSE;
-}
-
-static bool teo_state_ok(int i, struct cpuidle_driver *drv)
-{
- return !tick_nohz_tick_stopped() ||
- drv->states[i].target_residency_ns >= TICK_NSEC;
}
/**
@@ -239,17 +262,15 @@ static bool teo_state_ok(int i, struct cpuidle_driver *drv)
* @dev: Target CPU.
* @state_idx: Index of the capping idle state.
* @duration_ns: Idle duration value to match.
- * @no_poll: Don't consider polling states.
*/
static int teo_find_shallower_state(struct cpuidle_driver *drv,
struct cpuidle_device *dev, int state_idx,
- s64 duration_ns, bool no_poll)
+ s64 duration_ns)
{
int i;
for (i = state_idx - 1; i >= 0; i--) {
- if (dev->states_usage[i].disable ||
- (no_poll && drv->states[i].flags & CPUIDLE_FLAG_POLLING))
+ if (dev->states_usage[i].disable)
continue;
state_idx = i;
@@ -268,7 +289,7 @@ static int teo_find_shallower_state(struct cpuidle_driver *drv,
static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
bool *stop_tick)
{
- struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu);
+ struct teo_cpu *cpu_data = this_cpu_ptr(&teo_cpus);
s64 latency_req = cpuidle_governor_latency_req(dev->cpu);
ktime_t delta_tick = TICK_NSEC / 2;
unsigned int idx_intercept_sum = 0;
@@ -356,7 +377,18 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* better choice.
*/
if (2 * idx_intercept_sum > cpu_data->total - idx_hit_sum) {
- int first_suitable_idx = idx;
+ int min_idx = idx0;
+
+ if (tick_nohz_tick_stopped()) {
+ /*
+ * Look for the shallowest idle state below the current
+ * candidate one whose target residency is at least
+ * equal to the tick period length.
+ */
+ while (min_idx < idx &&
+ drv->states[min_idx].target_residency_ns < TICK_NSEC)
+ min_idx++;
+ }
/*
* Look for the deepest idle state whose target residency had
@@ -366,49 +398,14 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* Take the possible duration limitation present if the tick
* has been stopped already into account.
*/
- intercept_sum = 0;
-
- for (i = idx - 1; i >= 0; i--) {
- struct teo_bin *bin = &cpu_data->state_bins[i];
-
- intercept_sum += bin->intercepts;
-
- if (2 * intercept_sum > idx_intercept_sum) {
- /*
- * Use the current state unless it is too
- * shallow or disabled, in which case take the
- * first enabled state that is deep enough.
- */
- if (teo_state_ok(i, drv) &&
- !dev->states_usage[i].disable) {
- idx = i;
- break;
- }
- idx = first_suitable_idx;
- break;
- }
+ for (i = idx - 1, intercept_sum = 0; i >= min_idx; i--) {
+ intercept_sum += cpu_data->state_bins[i].intercepts;
if (dev->states_usage[i].disable)
continue;
- if (teo_state_ok(i, drv)) {
- /*
- * The current state is deep enough, but still
- * there may be a better one.
- */
- first_suitable_idx = i;
- continue;
- }
-
- /*
- * The current state is too shallow, so if no suitable
- * states other than the initial candidate have been
- * found, give up (the remaining states to check are
- * shallower still), but otherwise the first suitable
- * state other than the initial candidate may turn out
- * to be preferable.
- */
- if (first_suitable_idx == idx)
+ idx = i;
+ if (2 * intercept_sum > idx_intercept_sum)
break;
}
}
@@ -458,11 +455,8 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* If the closest expected timer is before the target residency of the
* candidate state, a shallower one needs to be found.
*/
- if (drv->states[idx].target_residency_ns > duration_ns) {
- i = teo_find_shallower_state(drv, dev, idx, duration_ns, false);
- if (teo_state_ok(i, drv))
- idx = i;
- }
+ if (drv->states[idx].target_residency_ns > duration_ns)
+ idx = teo_find_shallower_state(drv, dev, idx, duration_ns);
/*
* If the selected state's target residency is below the tick length
@@ -490,7 +484,7 @@ end:
*/
if (idx > idx0 &&
drv->states[idx].target_residency_ns > delta_tick)
- idx = teo_find_shallower_state(drv, dev, idx, delta_tick, false);
+ idx = teo_find_shallower_state(drv, dev, idx, delta_tick);
out_tick:
*stop_tick = false;
@@ -504,20 +498,11 @@ out_tick:
*/
static void teo_reflect(struct cpuidle_device *dev, int state)
{
- struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu);
+ struct teo_cpu *cpu_data = this_cpu_ptr(&teo_cpus);
+
+ cpu_data->tick_wakeup = tick_nohz_idle_got_tick();
dev->last_state_idx = state;
- if (dev->poll_time_limit ||
- (tick_nohz_idle_got_tick() && cpu_data->sleep_length_ns > TICK_NSEC)) {
- /*
- * The wakeup was not "genuine", but triggered by one of the
- * safety nets.
- */
- dev->poll_time_limit = false;
- cpu_data->artificial_wakeup = true;
- } else {
- cpu_data->artificial_wakeup = false;
- }
}
/**
diff --git a/drivers/cpuidle/poll_state.c b/drivers/cpuidle/poll_state.c
index 9b6d90a72601..c7524e4c522a 100644
--- a/drivers/cpuidle/poll_state.c
+++ b/drivers/cpuidle/poll_state.c
@@ -4,9 +4,13 @@
*/
#include <linux/cpuidle.h>
+#include <linux/export.h>
+#include <linux/irqflags.h>
#include <linux/sched.h>
#include <linux/sched/clock.h>
#include <linux/sched/idle.h>
+#include <linux/sprintf.h>
+#include <linux/types.h>
#define POLL_IDLE_RELAX_COUNT 200
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index d6f5da61cb7d..61de64817604 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -27,14 +27,14 @@ static ssize_t show_available_governors(struct device *dev,
mutex_lock(&cpuidle_lock);
list_for_each_entry(tmp, &cpuidle_governors, governor_list) {
- if (i >= (ssize_t) (PAGE_SIZE - (CPUIDLE_NAME_LEN + 2)))
+ if (i >= (ssize_t)(PAGE_SIZE - (CPUIDLE_NAME_LEN + 2)))
goto out;
- i += scnprintf(&buf[i], CPUIDLE_NAME_LEN + 1, "%s ", tmp->name);
+ i += sysfs_emit_at(buf, i, "%.*s ", CPUIDLE_NAME_LEN, tmp->name);
}
out:
- i+= sprintf(&buf[i], "\n");
+ i += sysfs_emit_at(buf, i, "\n");
mutex_unlock(&cpuidle_lock);
return i;
}
@@ -49,9 +49,9 @@ static ssize_t show_current_driver(struct device *dev,
spin_lock(&cpuidle_driver_lock);
drv = cpuidle_get_driver();
if (drv)
- ret = sprintf(buf, "%s\n", drv->name);
+ ret = sysfs_emit(buf, "%s\n", drv->name);
else
- ret = sprintf(buf, "none\n");
+ ret = sysfs_emit(buf, "none\n");
spin_unlock(&cpuidle_driver_lock);
return ret;
@@ -65,9 +65,9 @@ static ssize_t show_current_governor(struct device *dev,
mutex_lock(&cpuidle_lock);
if (cpuidle_curr_governor)
- ret = sprintf(buf, "%s\n", cpuidle_curr_governor->name);
+ ret = sysfs_emit(buf, "%s\n", cpuidle_curr_governor->name);
else
- ret = sprintf(buf, "none\n");
+ ret = sysfs_emit(buf, "none\n");
mutex_unlock(&cpuidle_lock);
return ret;
@@ -230,7 +230,7 @@ static struct cpuidle_state_attr attr_##_name = __ATTR(_name, 0644, show, store)
static ssize_t show_state_##_name(struct cpuidle_state *state, \
struct cpuidle_state_usage *state_usage, char *buf) \
{ \
- return sprintf(buf, "%u\n", state->_name);\
+ return sysfs_emit(buf, "%u\n", state->_name);\
}
#define define_show_state_ull_function(_name) \
@@ -238,7 +238,7 @@ static ssize_t show_state_##_name(struct cpuidle_state *state, \
struct cpuidle_state_usage *state_usage, \
char *buf) \
{ \
- return sprintf(buf, "%llu\n", state_usage->_name);\
+ return sysfs_emit(buf, "%llu\n", state_usage->_name);\
}
#define define_show_state_str_function(_name) \
@@ -247,8 +247,8 @@ static ssize_t show_state_##_name(struct cpuidle_state *state, \
char *buf) \
{ \
if (state->_name[0] == '\0')\
- return sprintf(buf, "<null>\n");\
- return sprintf(buf, "%s\n", state->_name);\
+ return sysfs_emit(buf, "<null>\n");\
+ return sysfs_emit(buf, "%s\n", state->_name);\
}
#define define_show_state_time_function(_name) \
@@ -256,7 +256,7 @@ static ssize_t show_state_##_name(struct cpuidle_state *state, \
struct cpuidle_state_usage *state_usage, \
char *buf) \
{ \
- return sprintf(buf, "%llu\n", ktime_to_us(state->_name##_ns)); \
+ return sysfs_emit(buf, "%llu\n", ktime_to_us(state->_name##_ns)); \
}
define_show_state_time_function(exit_latency)
@@ -273,14 +273,14 @@ static ssize_t show_state_time(struct cpuidle_state *state,
struct cpuidle_state_usage *state_usage,
char *buf)
{
- return sprintf(buf, "%llu\n", ktime_to_us(state_usage->time_ns));
+ return sysfs_emit(buf, "%llu\n", ktime_to_us(state_usage->time_ns));
}
static ssize_t show_state_disable(struct cpuidle_state *state,
struct cpuidle_state_usage *state_usage,
char *buf)
{
- return sprintf(buf, "%llu\n",
+ return sysfs_emit(buf, "%llu\n",
state_usage->disable & CPUIDLE_STATE_DISABLED_BY_USER);
}
@@ -310,7 +310,7 @@ static ssize_t show_state_default_status(struct cpuidle_state *state,
struct cpuidle_state_usage *state_usage,
char *buf)
{
- return sprintf(buf, "%s\n",
+ return sysfs_emit(buf, "%s\n",
state->flags & CPUIDLE_FLAG_OFF ? "disabled" : "enabled");
}
@@ -358,7 +358,7 @@ static ssize_t show_state_s2idle_##_name(struct cpuidle_state *state, \
struct cpuidle_state_usage *state_usage, \
char *buf) \
{ \
- return sprintf(buf, "%llu\n", state_usage->s2idle_##_name);\
+ return sysfs_emit(buf, "%llu\n", state_usage->s2idle_##_name);\
}
define_show_state_s2idle_ull_function(usage);
@@ -550,7 +550,7 @@ static ssize_t show_driver_name(struct cpuidle_driver *drv, char *buf)
ssize_t ret;
spin_lock(&cpuidle_driver_lock);
- ret = sprintf(buf, "%s\n", drv ? drv->name : "none");
+ ret = sysfs_emit(buf, "%s\n", drv ? drv->name : "none");
spin_unlock(&cpuidle_driver_lock);
return ret;
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 9f8a3a5bed7e..8d3b5d2890f8 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -188,6 +188,19 @@ config CRYPTO_PAES_S390
Select this option if you want to use the paes cipher
for example to use protected key encrypted devices.
+config CRYPTO_PHMAC_S390
+ tristate "PHMAC cipher algorithms"
+ depends on S390
+ depends on PKEY
+ select CRYPTO_HASH
+ select CRYPTO_ENGINE
+ help
+ This is the s390 hardware accelerated implementation of the
+ protected key HMAC support for SHA224, SHA256, SHA384 and SHA512.
+
+ Select this option if you want to use the phmac digests
+ for example to use dm-integrity with secure/protected keys.
+
config S390_PRNG
tristate "Pseudo random number generator device driver"
depends on S390
@@ -426,7 +439,7 @@ config CRYPTO_DEV_ATMEL_AUTHENC
config CRYPTO_DEV_ATMEL_AES
tristate "Support for Atmel AES hw accelerator"
- depends on ARCH_AT91 || COMPILE_TEST
+ depends on ARCH_MICROCHIP || COMPILE_TEST
select CRYPTO_AES
select CRYPTO_AEAD
select CRYPTO_SKCIPHER
@@ -712,6 +725,19 @@ config CRYPTO_DEV_TEGRA
Select this to enable Tegra Security Engine which accelerates various
AES encryption/decryption and HASH algorithms.
+config CRYPTO_DEV_XILINX_TRNG
+ tristate "Support for Xilinx True Random Generator"
+ depends on ZYNQMP_FIRMWARE || COMPILE_TEST
+ select CRYPTO_DF80090A
+ select CRYPTO_RNG
+ select HW_RANDOM
+ help
+ Xilinx Versal SoC driver provides kernel-side support for True Random Number
+ Generator and Pseudo random Number in CTR_DRBG mode as defined in NIST SP800-90A.
+
+ To compile this driver as a module, choose M here: the module
+ will be called xilinx-trng.
+
config CRYPTO_DEV_ZYNQMP_AES
tristate "Support for Xilinx ZynqMP AES hw accelerator"
depends on ZYNQMP_FIRMWARE || COMPILE_TEST
@@ -827,6 +853,7 @@ config CRYPTO_DEV_CCREE
If unsure say Y.
source "drivers/crypto/hisilicon/Kconfig"
+source "drivers/crypto/loongson/Kconfig"
source "drivers/crypto/amlogic/Kconfig"
@@ -850,5 +877,6 @@ config CRYPTO_DEV_SA2UL
source "drivers/crypto/aspeed/Kconfig"
source "drivers/crypto/starfive/Kconfig"
source "drivers/crypto/inside-secure/eip93/Kconfig"
+source "drivers/crypto/ti/Kconfig"
endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 22eadcc8f4a2..322ae8854e3e 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -44,7 +44,9 @@ obj-y += inside-secure/
obj-$(CONFIG_CRYPTO_DEV_ARTPEC6) += axis/
obj-y += xilinx/
obj-y += hisilicon/
+obj-y += loongson/
obj-$(CONFIG_CRYPTO_DEV_AMLOGIC_GXL) += amlogic/
obj-y += intel/
obj-y += starfive/
obj-y += cavium/
+obj-y += ti/
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
index f9cf00d690e2..021614b65e39 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
@@ -111,7 +111,7 @@ static int sun8i_ce_cipher_fallback(struct skcipher_request *areq)
if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) {
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
- struct sun8i_ce_alg_template *algt __maybe_unused;
+ struct sun8i_ce_alg_template *algt;
algt = container_of(alg, struct sun8i_ce_alg_template,
alg.skcipher.base);
@@ -131,21 +131,19 @@ static int sun8i_ce_cipher_fallback(struct skcipher_request *areq)
return err;
}
-static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req)
+static int sun8i_ce_cipher_prepare(struct skcipher_request *areq,
+ struct ce_task *cet)
{
- struct skcipher_request *areq = container_of(async_req, struct skcipher_request, base);
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
struct sun8i_ce_dev *ce = op->ce;
struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
struct sun8i_ce_alg_template *algt;
- struct sun8i_ce_flow *chan;
- struct ce_task *cet;
struct scatterlist *sg;
unsigned int todo, len, offset, ivsize;
u32 common, sym;
- int flow, i;
+ int i;
int nr_sgs = 0;
int nr_sgd = 0;
int err = 0;
@@ -163,14 +161,9 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req
if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
algt->stat_req++;
- flow = rctx->flow;
-
- chan = &ce->chanlist[flow];
-
- cet = chan->tl;
memset(cet, 0, sizeof(struct ce_task));
- cet->t_id = cpu_to_le32(flow);
+ cet->t_id = cpu_to_le32(rctx->flow);
common = ce->variant->alg_cipher[algt->ce_algo_id];
common |= rctx->op_dir | CE_COMM_INT;
cet->t_common_ctl = cpu_to_le32(common);
@@ -206,15 +199,14 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req
cet->t_key = desc_addr_val_le32(ce, rctx->addr_key);
ivsize = crypto_skcipher_ivsize(tfm);
- if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) {
- rctx->ivlen = ivsize;
+ if (areq->iv && ivsize > 0) {
if (rctx->op_dir & CE_DECRYPTION) {
offset = areq->cryptlen - ivsize;
- scatterwalk_map_and_copy(chan->backup_iv, areq->src,
+ scatterwalk_map_and_copy(rctx->backup_iv, areq->src,
offset, ivsize, 0);
}
- memcpy(chan->bounce_iv, areq->iv, ivsize);
- rctx->addr_iv = dma_map_single(ce->dev, chan->bounce_iv, rctx->ivlen,
+ memcpy(rctx->bounce_iv, areq->iv, ivsize);
+ rctx->addr_iv = dma_map_single(ce->dev, rctx->bounce_iv, ivsize,
DMA_TO_DEVICE);
if (dma_mapping_error(ce->dev, rctx->addr_iv)) {
dev_err(ce->dev, "Cannot DMA MAP IV\n");
@@ -277,9 +269,8 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req
goto theend_sgs;
}
- chan->timeout = areq->cryptlen;
- rctx->nr_sgs = nr_sgs;
- rctx->nr_sgd = nr_sgd;
+ rctx->nr_sgs = ns;
+ rctx->nr_sgd = nd;
return 0;
theend_sgs:
@@ -296,17 +287,18 @@ theend_sgs:
theend_iv:
if (areq->iv && ivsize > 0) {
if (!dma_mapping_error(ce->dev, rctx->addr_iv))
- dma_unmap_single(ce->dev, rctx->addr_iv, rctx->ivlen, DMA_TO_DEVICE);
+ dma_unmap_single(ce->dev, rctx->addr_iv, ivsize,
+ DMA_TO_DEVICE);
offset = areq->cryptlen - ivsize;
if (rctx->op_dir & CE_DECRYPTION) {
- memcpy(areq->iv, chan->backup_iv, ivsize);
- memzero_explicit(chan->backup_iv, ivsize);
+ memcpy(areq->iv, rctx->backup_iv, ivsize);
+ memzero_explicit(rctx->backup_iv, ivsize);
} else {
scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
ivsize, 0);
}
- memzero_explicit(chan->bounce_iv, ivsize);
+ memzero_explicit(rctx->bounce_iv, ivsize);
}
dma_unmap_single(ce->dev, rctx->addr_key, op->keylen, DMA_TO_DEVICE);
@@ -315,24 +307,17 @@ theend:
return err;
}
-static void sun8i_ce_cipher_unprepare(struct crypto_engine *engine,
- void *async_req)
+static void sun8i_ce_cipher_unprepare(struct skcipher_request *areq,
+ struct ce_task *cet)
{
- struct skcipher_request *areq = container_of(async_req, struct skcipher_request, base);
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
struct sun8i_ce_dev *ce = op->ce;
struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
- struct sun8i_ce_flow *chan;
- struct ce_task *cet;
unsigned int ivsize, offset;
int nr_sgs = rctx->nr_sgs;
int nr_sgd = rctx->nr_sgd;
- int flow;
- flow = rctx->flow;
- chan = &ce->chanlist[flow];
- cet = chan->tl;
ivsize = crypto_skcipher_ivsize(tfm);
if (areq->src == areq->dst) {
@@ -345,46 +330,47 @@ static void sun8i_ce_cipher_unprepare(struct crypto_engine *engine,
if (areq->iv && ivsize > 0) {
if (cet->t_iv)
- dma_unmap_single(ce->dev, rctx->addr_iv, rctx->ivlen, DMA_TO_DEVICE);
+ dma_unmap_single(ce->dev, rctx->addr_iv, ivsize,
+ DMA_TO_DEVICE);
offset = areq->cryptlen - ivsize;
if (rctx->op_dir & CE_DECRYPTION) {
- memcpy(areq->iv, chan->backup_iv, ivsize);
- memzero_explicit(chan->backup_iv, ivsize);
+ memcpy(areq->iv, rctx->backup_iv, ivsize);
+ memzero_explicit(rctx->backup_iv, ivsize);
} else {
scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
ivsize, 0);
}
- memzero_explicit(chan->bounce_iv, ivsize);
+ memzero_explicit(rctx->bounce_iv, ivsize);
}
dma_unmap_single(ce->dev, rctx->addr_key, op->keylen, DMA_TO_DEVICE);
}
-static void sun8i_ce_cipher_run(struct crypto_engine *engine, void *areq)
-{
- struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(breq);
- struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
- struct sun8i_ce_dev *ce = op->ce;
- struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(breq);
- int flow, err;
-
- flow = rctx->flow;
- err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(breq->base.tfm));
- sun8i_ce_cipher_unprepare(engine, areq);
- local_bh_disable();
- crypto_finalize_skcipher_request(engine, breq, err);
- local_bh_enable();
-}
-
int sun8i_ce_cipher_do_one(struct crypto_engine *engine, void *areq)
{
- int err = sun8i_ce_cipher_prepare(engine, areq);
+ struct skcipher_request *req = skcipher_request_cast(areq);
+ struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct sun8i_cipher_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct sun8i_ce_dev *ce = ctx->ce;
+ struct sun8i_ce_flow *chan;
+ int err;
+
+ chan = &ce->chanlist[rctx->flow];
+ err = sun8i_ce_cipher_prepare(req, chan->tl);
if (err)
return err;
- sun8i_ce_cipher_run(engine, areq);
+ err = sun8i_ce_run_task(ce, rctx->flow,
+ crypto_tfm_alg_name(req->base.tfm));
+
+ sun8i_ce_cipher_unprepare(req, chan->tl);
+
+ local_bh_disable();
+ crypto_finalize_skcipher_request(engine, req, err);
+ local_bh_enable();
+
return 0;
}
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
index 658f520cee0c..c16bb6ce6ee3 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
@@ -169,6 +169,12 @@ static const struct ce_variant ce_r40_variant = {
.trng = CE_ID_NOTSUPP,
};
+static void sun8i_ce_dump_task_descriptors(struct sun8i_ce_flow *chan)
+{
+ print_hex_dump(KERN_INFO, "TASK: ", DUMP_PREFIX_NONE, 16, 4,
+ chan->tl, sizeof(struct ce_task), false);
+}
+
/*
* sun8i_ce_get_engine_number() get the next channel slot
* This is a simple round-robin way of getting the next channel
@@ -183,7 +189,6 @@ int sun8i_ce_run_task(struct sun8i_ce_dev *ce, int flow, const char *name)
{
u32 v;
int err = 0;
- struct ce_task *cet = ce->chanlist[flow].tl;
#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
ce->chanlist[flow].stat_req++;
@@ -210,11 +215,10 @@ int sun8i_ce_run_task(struct sun8i_ce_dev *ce, int flow, const char *name)
mutex_unlock(&ce->mlock);
wait_for_completion_interruptible_timeout(&ce->chanlist[flow].complete,
- msecs_to_jiffies(ce->chanlist[flow].timeout));
+ msecs_to_jiffies(CE_DMA_TIMEOUT_MS));
if (ce->chanlist[flow].status == 0) {
- dev_err(ce->dev, "DMA timeout for %s (tm=%d) on flow %d\n", name,
- ce->chanlist[flow].timeout, flow);
+ dev_err(ce->dev, "DMA timeout for %s on flow %d\n", name, flow);
err = -EFAULT;
}
/* No need to lock for this read, the channel is locked so
@@ -226,9 +230,8 @@ int sun8i_ce_run_task(struct sun8i_ce_dev *ce, int flow, const char *name)
/* Sadly, the error bit is not per flow */
if (v) {
dev_err(ce->dev, "CE ERROR: %x for flow %x\n", v, flow);
+ sun8i_ce_dump_task_descriptors(&ce->chanlist[flow]);
err = -EFAULT;
- print_hex_dump(KERN_INFO, "TASK: ", DUMP_PREFIX_NONE, 16, 4,
- cet, sizeof(struct ce_task), false);
}
if (v & CE_ERR_ALGO_NOTSUP)
dev_err(ce->dev, "CE ERROR: algorithm not supported\n");
@@ -245,9 +248,8 @@ int sun8i_ce_run_task(struct sun8i_ce_dev *ce, int flow, const char *name)
v &= 0xF;
if (v) {
dev_err(ce->dev, "CE ERROR: %x for flow %x\n", v, flow);
+ sun8i_ce_dump_task_descriptors(&ce->chanlist[flow]);
err = -EFAULT;
- print_hex_dump(KERN_INFO, "TASK: ", DUMP_PREFIX_NONE, 16, 4,
- cet, sizeof(struct ce_task), false);
}
if (v & CE_ERR_ALGO_NOTSUP)
dev_err(ce->dev, "CE ERROR: algorithm not supported\n");
@@ -261,9 +263,8 @@ int sun8i_ce_run_task(struct sun8i_ce_dev *ce, int flow, const char *name)
v &= 0xFF;
if (v) {
dev_err(ce->dev, "CE ERROR: %x for flow %x\n", v, flow);
+ sun8i_ce_dump_task_descriptors(&ce->chanlist[flow]);
err = -EFAULT;
- print_hex_dump(KERN_INFO, "TASK: ", DUMP_PREFIX_NONE, 16, 4,
- cet, sizeof(struct ce_task), false);
}
if (v & CE_ERR_ALGO_NOTSUP)
dev_err(ce->dev, "CE ERROR: algorithm not supported\n");
@@ -758,18 +759,6 @@ static int sun8i_ce_allocate_chanlist(struct sun8i_ce_dev *ce)
err = -ENOMEM;
goto error_engine;
}
- ce->chanlist[i].bounce_iv = devm_kmalloc(ce->dev, AES_BLOCK_SIZE,
- GFP_KERNEL | GFP_DMA);
- if (!ce->chanlist[i].bounce_iv) {
- err = -ENOMEM;
- goto error_engine;
- }
- ce->chanlist[i].backup_iv = devm_kmalloc(ce->dev, AES_BLOCK_SIZE,
- GFP_KERNEL);
- if (!ce->chanlist[i].backup_iv) {
- err = -ENOMEM;
- goto error_engine;
- }
}
return 0;
error_engine:
@@ -1063,7 +1052,7 @@ static int sun8i_ce_probe(struct platform_device *pdev)
pm_runtime_put_sync(ce->dev);
if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) {
- struct dentry *dbgfs_dir __maybe_unused;
+ struct dentry *dbgfs_dir;
struct dentry *dbgfs_stats __maybe_unused;
/* Ignore error of debugfs */
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
index bef44f350167..d01594353d9a 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
@@ -26,7 +26,7 @@
static void sun8i_ce_hash_stat_fb_inc(struct crypto_ahash *tfm)
{
if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) {
- struct sun8i_ce_alg_template *algt __maybe_unused;
+ struct sun8i_ce_alg_template *algt;
struct ahash_alg *alg = crypto_ahash_alg(tfm);
algt = container_of(alg, struct sun8i_ce_alg_template,
@@ -58,7 +58,8 @@ int sun8i_ce_hash_init_tfm(struct crypto_ahash *tfm)
crypto_ahash_set_reqsize(tfm,
sizeof(struct sun8i_ce_hash_reqctx) +
- crypto_ahash_reqsize(op->fallback_tfm));
+ crypto_ahash_reqsize(op->fallback_tfm) +
+ CRYPTO_DMA_PADDING);
if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
memcpy(algt->fbname,
@@ -84,7 +85,7 @@ void sun8i_ce_hash_exit_tfm(struct crypto_ahash *tfm)
int sun8i_ce_hash_init(struct ahash_request *areq)
{
- struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
@@ -100,7 +101,7 @@ int sun8i_ce_hash_init(struct ahash_request *areq)
int sun8i_ce_hash_export(struct ahash_request *areq, void *out)
{
- struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
@@ -114,7 +115,7 @@ int sun8i_ce_hash_export(struct ahash_request *areq, void *out)
int sun8i_ce_hash_import(struct ahash_request *areq, const void *in)
{
- struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
@@ -128,7 +129,7 @@ int sun8i_ce_hash_import(struct ahash_request *areq, const void *in)
int sun8i_ce_hash_final(struct ahash_request *areq)
{
- struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
@@ -145,7 +146,7 @@ int sun8i_ce_hash_final(struct ahash_request *areq)
int sun8i_ce_hash_update(struct ahash_request *areq)
{
- struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
@@ -160,7 +161,7 @@ int sun8i_ce_hash_update(struct ahash_request *areq)
int sun8i_ce_hash_finup(struct ahash_request *areq)
{
- struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
@@ -178,7 +179,7 @@ int sun8i_ce_hash_finup(struct ahash_request *areq)
static int sun8i_ce_hash_digest_fb(struct ahash_request *areq)
{
- struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
@@ -238,19 +239,15 @@ static bool sun8i_ce_hash_need_fallback(struct ahash_request *areq)
int sun8i_ce_hash_digest(struct ahash_request *areq)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
- struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
- struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
- struct sun8i_ce_alg_template *algt;
- struct sun8i_ce_dev *ce;
+ struct sun8i_ce_hash_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq);
+ struct sun8i_ce_dev *ce = ctx->ce;
struct crypto_engine *engine;
int e;
if (sun8i_ce_hash_need_fallback(areq))
return sun8i_ce_hash_digest_fb(areq);
- algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash.base);
- ce = algt->ce;
-
e = sun8i_ce_get_engine_number(ce);
rctx->flow = e;
engine = ce->chanlist[e].engine;
@@ -316,65 +313,43 @@ static u64 hash_pad(__le32 *buf, unsigned int bufsize, u64 padi, u64 byte_count,
return j;
}
-int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
+static int sun8i_ce_hash_prepare(struct ahash_request *areq, struct ce_task *cet)
{
- struct ahash_request *areq = container_of(breq, struct ahash_request, base);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
- struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq);
struct sun8i_ce_alg_template *algt;
struct sun8i_ce_dev *ce;
- struct sun8i_ce_flow *chan;
- struct ce_task *cet;
struct scatterlist *sg;
- int nr_sgs, flow, err;
+ int nr_sgs, err;
unsigned int len;
u32 common;
u64 byte_count;
__le32 *bf;
- void *buf, *result;
int j, i, todo;
u64 bs;
int digestsize;
- dma_addr_t addr_res, addr_pad;
- int ns = sg_nents_for_len(areq->src, areq->nbytes);
algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash.base);
ce = algt->ce;
- bs = algt->alg.hash.base.halg.base.cra_blocksize;
- digestsize = algt->alg.hash.base.halg.digestsize;
+ bs = crypto_ahash_blocksize(tfm);
+ digestsize = crypto_ahash_digestsize(tfm);
if (digestsize == SHA224_DIGEST_SIZE)
digestsize = SHA256_DIGEST_SIZE;
if (digestsize == SHA384_DIGEST_SIZE)
digestsize = SHA512_DIGEST_SIZE;
- /* the padding could be up to two block. */
- buf = kcalloc(2, bs, GFP_KERNEL | GFP_DMA);
- if (!buf) {
- err = -ENOMEM;
- goto err_out;
- }
- bf = (__le32 *)buf;
-
- result = kzalloc(digestsize, GFP_KERNEL | GFP_DMA);
- if (!result) {
- err = -ENOMEM;
- goto err_free_buf;
- }
-
- flow = rctx->flow;
- chan = &ce->chanlist[flow];
+ bf = (__le32 *)rctx->pad;
if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
algt->stat_req++;
dev_dbg(ce->dev, "%s %s len=%d\n", __func__, crypto_tfm_alg_name(areq->base.tfm), areq->nbytes);
- cet = chan->tl;
memset(cet, 0, sizeof(struct ce_task));
- cet->t_id = cpu_to_le32(flow);
+ cet->t_id = cpu_to_le32(rctx->flow);
common = ce->variant->alg_hash[algt->ce_algo_id];
common |= CE_COMM_INT;
cet->t_common_ctl = cpu_to_le32(common);
@@ -382,11 +357,12 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
cet->t_sym_ctl = 0;
cet->t_asym_ctl = 0;
- nr_sgs = dma_map_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE);
+ rctx->nr_sgs = sg_nents_for_len(areq->src, areq->nbytes);
+ nr_sgs = dma_map_sg(ce->dev, areq->src, rctx->nr_sgs, DMA_TO_DEVICE);
if (nr_sgs <= 0 || nr_sgs > MAX_SG) {
dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
err = -EINVAL;
- goto err_free_result;
+ goto err_out;
}
len = areq->nbytes;
@@ -401,10 +377,13 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
err = -EINVAL;
goto err_unmap_src;
}
- addr_res = dma_map_single(ce->dev, result, digestsize, DMA_FROM_DEVICE);
- cet->t_dst[0].addr = desc_addr_val_le32(ce, addr_res);
- cet->t_dst[0].len = cpu_to_le32(digestsize / 4);
- if (dma_mapping_error(ce->dev, addr_res)) {
+
+ rctx->result_len = digestsize;
+ rctx->addr_res = dma_map_single(ce->dev, rctx->result, rctx->result_len,
+ DMA_FROM_DEVICE);
+ cet->t_dst[0].addr = desc_addr_val_le32(ce, rctx->addr_res);
+ cet->t_dst[0].len = cpu_to_le32(rctx->result_len / 4);
+ if (dma_mapping_error(ce->dev, rctx->addr_res)) {
dev_err(ce->dev, "DMA map dest\n");
err = -EINVAL;
goto err_unmap_src;
@@ -432,10 +411,12 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
goto err_unmap_result;
}
- addr_pad = dma_map_single(ce->dev, buf, j * 4, DMA_TO_DEVICE);
- cet->t_src[i].addr = desc_addr_val_le32(ce, addr_pad);
+ rctx->pad_len = j * 4;
+ rctx->addr_pad = dma_map_single(ce->dev, rctx->pad, rctx->pad_len,
+ DMA_TO_DEVICE);
+ cet->t_src[i].addr = desc_addr_val_le32(ce, rctx->addr_pad);
cet->t_src[i].len = cpu_to_le32(j);
- if (dma_mapping_error(ce->dev, addr_pad)) {
+ if (dma_mapping_error(ce->dev, rctx->addr_pad)) {
dev_err(ce->dev, "DMA error on padding SG\n");
err = -EINVAL;
goto err_unmap_result;
@@ -446,29 +427,59 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
else
cet->t_dlen = cpu_to_le32(areq->nbytes / 4 + j);
- chan->timeout = areq->nbytes;
-
- err = sun8i_ce_run_task(ce, flow, crypto_ahash_alg_name(tfm));
-
- dma_unmap_single(ce->dev, addr_pad, j * 4, DMA_TO_DEVICE);
+ return 0;
err_unmap_result:
- dma_unmap_single(ce->dev, addr_res, digestsize, DMA_FROM_DEVICE);
- if (!err)
- memcpy(areq->result, result, algt->alg.hash.base.halg.digestsize);
+ dma_unmap_single(ce->dev, rctx->addr_res, rctx->result_len,
+ DMA_FROM_DEVICE);
err_unmap_src:
- dma_unmap_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE);
+ dma_unmap_sg(ce->dev, areq->src, rctx->nr_sgs, DMA_TO_DEVICE);
-err_free_result:
- kfree(result);
+err_out:
+ return err;
+}
-err_free_buf:
- kfree(buf);
+static void sun8i_ce_hash_unprepare(struct ahash_request *areq,
+ struct ce_task *cet)
+{
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct sun8i_ce_hash_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct sun8i_ce_dev *ce = ctx->ce;
+
+ dma_unmap_single(ce->dev, rctx->addr_pad, rctx->pad_len, DMA_TO_DEVICE);
+ dma_unmap_single(ce->dev, rctx->addr_res, rctx->result_len,
+ DMA_FROM_DEVICE);
+ dma_unmap_sg(ce->dev, areq->src, rctx->nr_sgs, DMA_TO_DEVICE);
+}
+
+int sun8i_ce_hash_run(struct crypto_engine *engine, void *async_req)
+{
+ struct ahash_request *areq = ahash_request_cast(async_req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct sun8i_ce_hash_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq);
+ struct sun8i_ce_dev *ce = ctx->ce;
+ struct sun8i_ce_flow *chan;
+ int err;
+
+ chan = &ce->chanlist[rctx->flow];
+
+ err = sun8i_ce_hash_prepare(areq, chan->tl);
+ if (err)
+ return err;
+
+ err = sun8i_ce_run_task(ce, rctx->flow, crypto_ahash_alg_name(tfm));
+
+ sun8i_ce_hash_unprepare(areq, chan->tl);
+
+ if (!err)
+ memcpy(areq->result, rctx->result,
+ crypto_ahash_digestsize(tfm));
-err_out:
local_bh_disable();
- crypto_finalize_hash_request(engine, breq, err);
+ crypto_finalize_hash_request(engine, async_req, err);
local_bh_enable();
return 0;
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c
index 762459867b6c..d0a1ac66738b 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c
@@ -137,7 +137,6 @@ int sun8i_ce_prng_generate(struct crypto_rng *tfm, const u8 *src,
cet->t_dst[0].addr = desc_addr_val_le32(ce, dma_dst);
cet->t_dst[0].len = cpu_to_le32(todo / 4);
- ce->chanlist[flow].timeout = 2000;
err = sun8i_ce_run_task(ce, 3, "PRNG");
mutex_unlock(&ce->rnglock);
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c
index e1e8bc15202e..244529bf0616 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c
@@ -79,7 +79,6 @@ static int sun8i_ce_trng_read(struct hwrng *rng, void *data, size_t max, bool wa
cet->t_dst[0].addr = desc_addr_val_le32(ce, dma_dst);
cet->t_dst[0].len = cpu_to_le32(todo / 4);
- ce->chanlist[flow].timeout = todo;
err = sun8i_ce_run_task(ce, 3, "TRNG");
mutex_unlock(&ce->rnglock);
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
index 83df4d719053..71f5a0cd3d45 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
@@ -106,9 +106,13 @@
#define MAX_SG 8
#define CE_MAX_CLOCKS 4
+#define CE_DMA_TIMEOUT_MS 3000
#define MAXFLOW 4
+#define CE_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
+#define CE_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
+
/*
* struct ce_clock - Describe clocks used by sun8i-ce
* @name: Name of clock needed by this variant
@@ -187,8 +191,6 @@ struct ce_task {
* @status: set to 1 by interrupt if task is done
* @t_phy: Physical address of task
* @tl: pointer to the current ce_task for this flow
- * @backup_iv: buffer which contain the next IV to store
- * @bounce_iv: buffer which contain the IV
* @stat_req: number of request done by this flow
*/
struct sun8i_ce_flow {
@@ -196,10 +198,7 @@ struct sun8i_ce_flow {
struct completion complete;
int status;
dma_addr_t t_phy;
- int timeout;
struct ce_task *tl;
- void *backup_iv;
- void *bounce_iv;
#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
unsigned long stat_req;
#endif
@@ -260,21 +259,23 @@ static inline __le32 desc_addr_val_le32(struct sun8i_ce_dev *dev,
* struct sun8i_cipher_req_ctx - context for a skcipher request
* @op_dir: direction (encrypt vs decrypt) for this request
* @flow: the flow to use for this request
- * @ivlen: size of bounce_iv
* @nr_sgs: The number of source SG (as given by dma_map_sg())
* @nr_sgd: The number of destination SG (as given by dma_map_sg())
* @addr_iv: The IV addr returned by dma_map_single, need to unmap later
* @addr_key: The key addr returned by dma_map_single, need to unmap later
+ * @bounce_iv: Current IV buffer
+ * @backup_iv: Next IV buffer
* @fallback_req: request struct for invoking the fallback skcipher TFM
*/
struct sun8i_cipher_req_ctx {
u32 op_dir;
int flow;
- unsigned int ivlen;
int nr_sgs;
int nr_sgd;
dma_addr_t addr_iv;
dma_addr_t addr_key;
+ u8 bounce_iv[AES_BLOCK_SIZE] __aligned(sizeof(u32));
+ u8 backup_iv[AES_BLOCK_SIZE];
struct skcipher_request fallback_req; // keep at the end
};
@@ -306,9 +307,23 @@ struct sun8i_ce_hash_tfm_ctx {
* struct sun8i_ce_hash_reqctx - context for an ahash request
* @fallback_req: pre-allocated fallback request
* @flow: the flow to use for this request
+ * @nr_sgs: number of entries in the source scatterlist
+ * @result_len: result length in bytes
+ * @pad_len: padding length in bytes
+ * @addr_res: DMA address of the result buffer, returned by dma_map_single()
+ * @addr_pad: DMA address of the padding buffer, returned by dma_map_single()
+ * @result: per-request result buffer
+ * @pad: per-request padding buffer (up to 2 blocks)
*/
struct sun8i_ce_hash_reqctx {
int flow;
+ int nr_sgs;
+ size_t result_len;
+ size_t pad_len;
+ dma_addr_t addr_res;
+ dma_addr_t addr_pad;
+ u8 result[CE_MAX_HASH_DIGEST_SIZE] __aligned(CRYPTO_DMA_ALIGN);
+ u8 pad[2 * CE_MAX_HASH_BLOCK_SIZE];
struct ahash_request fallback_req; // keep at the end
};
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
index 8bc08089f044..36a1ebca2e70 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
@@ -502,6 +502,7 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq)
algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash.base);
ss = algt->ss;
+ j = 0;
digestsize = crypto_ahash_digestsize(tfm);
if (digestsize == SHA224_DIGEST_SIZE)
@@ -536,7 +537,6 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq)
goto err_dma_result;
}
- j = 0;
len = areq->nbytes;
sg = areq->src;
i = 0;
diff --git a/drivers/crypto/aspeed/aspeed-acry.c b/drivers/crypto/aspeed/aspeed-acry.c
index 8d1c79aaca07..5993bcba9716 100644
--- a/drivers/crypto/aspeed/aspeed-acry.c
+++ b/drivers/crypto/aspeed/aspeed-acry.c
@@ -787,7 +787,6 @@ static int aspeed_acry_probe(struct platform_device *pdev)
err_engine_rsa_start:
crypto_engine_exit(acry_dev->crypt_engine_rsa);
clk_exit:
- clk_disable_unprepare(acry_dev->clk);
return rc;
}
@@ -799,7 +798,6 @@ static void aspeed_acry_remove(struct platform_device *pdev)
aspeed_acry_unregister(acry_dev);
crypto_engine_exit(acry_dev->crypt_engine_rsa);
tasklet_kill(&acry_dev->done_task);
- clk_disable_unprepare(acry_dev->clk);
}
MODULE_DEVICE_TABLE(of, aspeed_acry_of_matches);
diff --git a/drivers/crypto/aspeed/aspeed-hace-crypto.c b/drivers/crypto/aspeed/aspeed-hace-crypto.c
index a72dfebc53ff..fa201dae1f81 100644
--- a/drivers/crypto/aspeed/aspeed-hace-crypto.c
+++ b/drivers/crypto/aspeed/aspeed-hace-crypto.c
@@ -346,7 +346,7 @@ free_req:
} else {
dma_unmap_sg(hace_dev->dev, req->dst, rctx->dst_nents,
- DMA_TO_DEVICE);
+ DMA_FROM_DEVICE);
dma_unmap_sg(hace_dev->dev, req->src, rctx->src_nents,
DMA_TO_DEVICE);
}
diff --git a/drivers/crypto/aspeed/aspeed-hace-hash.c b/drivers/crypto/aspeed/aspeed-hace-hash.c
index 0b6e49c06eff..f8f37c9d5f3c 100644
--- a/drivers/crypto/aspeed/aspeed-hace-hash.c
+++ b/drivers/crypto/aspeed/aspeed-hace-hash.c
@@ -5,7 +5,6 @@
#include "aspeed-hace.h"
#include <crypto/engine.h>
-#include <crypto/hmac.h>
#include <crypto/internal/hash.h>
#include <crypto/scatterwalk.h>
#include <crypto/sha1.h>
@@ -14,6 +13,7 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/scatterlist.h>
#include <linux/string.h>
#ifdef CONFIG_CRYPTO_DEV_ASPEED_DEBUG
@@ -59,6 +59,46 @@ static const __be64 sha512_iv[8] = {
cpu_to_be64(SHA512_H6), cpu_to_be64(SHA512_H7)
};
+static int aspeed_sham_init(struct ahash_request *req);
+static int aspeed_ahash_req_update(struct aspeed_hace_dev *hace_dev);
+
+static int aspeed_sham_export(struct ahash_request *req, void *out)
+{
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+ union {
+ u8 *u8;
+ u64 *u64;
+ } p = { .u8 = out };
+
+ memcpy(out, rctx->digest, rctx->ivsize);
+ p.u8 += rctx->ivsize;
+ put_unaligned(rctx->digcnt[0], p.u64++);
+ if (rctx->ivsize == 64)
+ put_unaligned(rctx->digcnt[1], p.u64);
+ return 0;
+}
+
+static int aspeed_sham_import(struct ahash_request *req, const void *in)
+{
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+ union {
+ const u8 *u8;
+ const u64 *u64;
+ } p = { .u8 = in };
+ int err;
+
+ err = aspeed_sham_init(req);
+ if (err)
+ return err;
+
+ memcpy(rctx->digest, in, rctx->ivsize);
+ p.u8 += rctx->ivsize;
+ rctx->digcnt[0] = get_unaligned(p.u64++);
+ if (rctx->ivsize == 64)
+ rctx->digcnt[1] = get_unaligned(p.u64);
+ return 0;
+}
+
/* The purpose of this padding is to ensure that the padded message is a
* multiple of 512 bits (SHA1/SHA224/SHA256) or 1024 bits (SHA384/SHA512).
* The bit "1" is appended at the end of the message followed by
@@ -74,10 +114,10 @@ static const __be64 sha512_iv[8] = {
* - if message length < 112 bytes then padlen = 112 - message length
* - else padlen = 128 + 112 - message length
*/
-static void aspeed_ahash_fill_padding(struct aspeed_hace_dev *hace_dev,
- struct aspeed_sham_reqctx *rctx)
+static int aspeed_ahash_fill_padding(struct aspeed_hace_dev *hace_dev,
+ struct aspeed_sham_reqctx *rctx, u8 *buf)
{
- unsigned int index, padlen;
+ unsigned int index, padlen, bitslen;
__be64 bits[2];
AHASH_DBG(hace_dev, "rctx flags:0x%x\n", (u32)rctx->flags);
@@ -87,25 +127,32 @@ static void aspeed_ahash_fill_padding(struct aspeed_hace_dev *hace_dev,
case SHA_FLAGS_SHA224:
case SHA_FLAGS_SHA256:
bits[0] = cpu_to_be64(rctx->digcnt[0] << 3);
- index = rctx->bufcnt & 0x3f;
+ index = rctx->digcnt[0] & 0x3f;
padlen = (index < 56) ? (56 - index) : ((64 + 56) - index);
- *(rctx->buffer + rctx->bufcnt) = 0x80;
- memset(rctx->buffer + rctx->bufcnt + 1, 0, padlen - 1);
- memcpy(rctx->buffer + rctx->bufcnt + padlen, bits, 8);
- rctx->bufcnt += padlen + 8;
+ bitslen = 8;
break;
default:
bits[1] = cpu_to_be64(rctx->digcnt[0] << 3);
bits[0] = cpu_to_be64(rctx->digcnt[1] << 3 |
rctx->digcnt[0] >> 61);
- index = rctx->bufcnt & 0x7f;
+ index = rctx->digcnt[0] & 0x7f;
padlen = (index < 112) ? (112 - index) : ((128 + 112) - index);
- *(rctx->buffer + rctx->bufcnt) = 0x80;
- memset(rctx->buffer + rctx->bufcnt + 1, 0, padlen - 1);
- memcpy(rctx->buffer + rctx->bufcnt + padlen, bits, 16);
- rctx->bufcnt += padlen + 16;
+ bitslen = 16;
break;
}
+ buf[0] = 0x80;
+ memset(buf + 1, 0, padlen - 1);
+ memcpy(buf + padlen, bits, bitslen);
+ return padlen + bitslen;
+}
+
+static void aspeed_ahash_update_counter(struct aspeed_sham_reqctx *rctx,
+ unsigned int len)
+{
+ rctx->offset += len;
+ rctx->digcnt[0] += len;
+ if (rctx->digcnt[0] < len)
+ rctx->digcnt[1]++;
}
/*
@@ -117,31 +164,31 @@ static int aspeed_ahash_dma_prepare(struct aspeed_hace_dev *hace_dev)
struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
struct ahash_request *req = hash_engine->req;
struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
- int length, remain;
+ unsigned int length, remain;
+ bool final = false;
- length = rctx->total + rctx->bufcnt;
- remain = length % rctx->block_size;
+ length = rctx->total - rctx->offset;
+ remain = length - round_down(length, rctx->block_size);
AHASH_DBG(hace_dev, "length:0x%x, remain:0x%x\n", length, remain);
- if (rctx->bufcnt)
- memcpy(hash_engine->ahash_src_addr, rctx->buffer, rctx->bufcnt);
-
- if (rctx->total + rctx->bufcnt < ASPEED_CRYPTO_SRC_DMA_BUF_LEN) {
- scatterwalk_map_and_copy(hash_engine->ahash_src_addr +
- rctx->bufcnt, rctx->src_sg,
- rctx->offset, rctx->total - remain, 0);
- rctx->offset += rctx->total - remain;
-
- } else {
- dev_warn(hace_dev->dev, "Hash data length is too large\n");
- return -EINVAL;
- }
-
- scatterwalk_map_and_copy(rctx->buffer, rctx->src_sg,
- rctx->offset, remain, 0);
+ if (length > ASPEED_HASH_SRC_DMA_BUF_LEN)
+ length = ASPEED_HASH_SRC_DMA_BUF_LEN;
+ else if (rctx->flags & SHA_FLAGS_FINUP) {
+ if (round_up(length, rctx->block_size) + rctx->block_size >
+ ASPEED_CRYPTO_SRC_DMA_BUF_LEN)
+ length = round_down(length - 1, rctx->block_size);
+ else
+ final = true;
+ } else
+ length -= remain;
+ scatterwalk_map_and_copy(hash_engine->ahash_src_addr, rctx->src_sg,
+ rctx->offset, length, 0);
+ aspeed_ahash_update_counter(rctx, length);
+ if (final)
+ length += aspeed_ahash_fill_padding(
+ hace_dev, rctx, hash_engine->ahash_src_addr + length);
- rctx->bufcnt = remain;
rctx->digest_dma_addr = dma_map_single(hace_dev->dev, rctx->digest,
SHA512_DIGEST_SIZE,
DMA_BIDIRECTIONAL);
@@ -150,7 +197,7 @@ static int aspeed_ahash_dma_prepare(struct aspeed_hace_dev *hace_dev)
return -ENOMEM;
}
- hash_engine->src_length = length - remain;
+ hash_engine->src_length = length;
hash_engine->src_dma = hash_engine->ahash_src_dma_addr;
hash_engine->digest_dma = rctx->digest_dma_addr;
@@ -166,16 +213,20 @@ static int aspeed_ahash_dma_prepare_sg(struct aspeed_hace_dev *hace_dev)
struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
struct ahash_request *req = hash_engine->req;
struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+ bool final = rctx->flags & SHA_FLAGS_FINUP;
+ int remain, sg_len, i, max_sg_nents;
+ unsigned int length, offset, total;
struct aspeed_sg_list *src_list;
struct scatterlist *s;
- int length, remain, sg_len, i;
int rc = 0;
- remain = (rctx->total + rctx->bufcnt) % rctx->block_size;
- length = rctx->total + rctx->bufcnt - remain;
+ offset = rctx->offset;
+ length = rctx->total - offset;
+ remain = final ? 0 : length - round_down(length, rctx->block_size);
+ length -= remain;
- AHASH_DBG(hace_dev, "%s:0x%x, %s:%zu, %s:0x%x, %s:0x%x\n",
- "rctx total", rctx->total, "bufcnt", rctx->bufcnt,
+ AHASH_DBG(hace_dev, "%s:0x%x, %s:0x%x, %s:0x%x\n",
+ "rctx total", rctx->total,
"length", length, "remain", remain);
sg_len = dma_map_sg(hace_dev->dev, rctx->src_sg, rctx->src_nents,
@@ -186,6 +237,8 @@ static int aspeed_ahash_dma_prepare_sg(struct aspeed_hace_dev *hace_dev)
goto end;
}
+ max_sg_nents = ASPEED_HASH_SRC_DMA_BUF_LEN / sizeof(*src_list) - final;
+ sg_len = min(sg_len, max_sg_nents);
src_list = (struct aspeed_sg_list *)hash_engine->ahash_src_addr;
rctx->digest_dma_addr = dma_map_single(hace_dev->dev, rctx->digest,
SHA512_DIGEST_SIZE,
@@ -196,68 +249,66 @@ static int aspeed_ahash_dma_prepare_sg(struct aspeed_hace_dev *hace_dev)
goto free_src_sg;
}
- if (rctx->bufcnt != 0) {
- u32 phy_addr;
- u32 len;
+ total = 0;
+ for_each_sg(rctx->src_sg, s, sg_len, i) {
+ u32 phy_addr = sg_dma_address(s);
+ u32 len = sg_dma_len(s);
- rctx->buffer_dma_addr = dma_map_single(hace_dev->dev,
- rctx->buffer,
- rctx->block_size * 2,
- DMA_TO_DEVICE);
- if (dma_mapping_error(hace_dev->dev, rctx->buffer_dma_addr)) {
- dev_warn(hace_dev->dev, "dma_map() rctx buffer error\n");
- rc = -ENOMEM;
- goto free_rctx_digest;
+ if (len <= offset) {
+ offset -= len;
+ continue;
}
- phy_addr = rctx->buffer_dma_addr;
- len = rctx->bufcnt;
- length -= len;
+ len -= offset;
+ phy_addr += offset;
+ offset = 0;
- /* Last sg list */
- if (length == 0)
- len |= HASH_SG_LAST_LIST;
+ if (length > len)
+ length -= len;
+ else {
+ /* Last sg list */
+ len = length;
+ length = 0;
+ }
- src_list[0].phy_addr = cpu_to_le32(phy_addr);
- src_list[0].len = cpu_to_le32(len);
- src_list++;
+ total += len;
+ src_list[i].phy_addr = cpu_to_le32(phy_addr);
+ src_list[i].len = cpu_to_le32(len);
}
if (length != 0) {
- for_each_sg(rctx->src_sg, s, sg_len, i) {
- u32 phy_addr = sg_dma_address(s);
- u32 len = sg_dma_len(s);
-
- if (length > len)
- length -= len;
- else {
- /* Last sg list */
- len = length;
- len |= HASH_SG_LAST_LIST;
- length = 0;
- }
+ total = round_down(total, rctx->block_size);
+ final = false;
+ }
+
+ aspeed_ahash_update_counter(rctx, total);
+ if (final) {
+ int len = aspeed_ahash_fill_padding(hace_dev, rctx,
+ rctx->buffer);
- src_list[i].phy_addr = cpu_to_le32(phy_addr);
- src_list[i].len = cpu_to_le32(len);
+ total += len;
+ rctx->buffer_dma_addr = dma_map_single(hace_dev->dev,
+ rctx->buffer,
+ sizeof(rctx->buffer),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(hace_dev->dev, rctx->buffer_dma_addr)) {
+ dev_warn(hace_dev->dev, "dma_map() rctx buffer error\n");
+ rc = -ENOMEM;
+ goto free_rctx_digest;
}
- }
- if (length != 0) {
- rc = -EINVAL;
- goto free_rctx_buffer;
+ src_list[i].phy_addr = cpu_to_le32(rctx->buffer_dma_addr);
+ src_list[i].len = cpu_to_le32(len);
+ i++;
}
+ src_list[i - 1].len |= cpu_to_le32(HASH_SG_LAST_LIST);
- rctx->offset = rctx->total - remain;
- hash_engine->src_length = rctx->total + rctx->bufcnt - remain;
+ hash_engine->src_length = total;
hash_engine->src_dma = hash_engine->ahash_src_dma_addr;
hash_engine->digest_dma = rctx->digest_dma_addr;
return 0;
-free_rctx_buffer:
- if (rctx->bufcnt != 0)
- dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr,
- rctx->block_size * 2, DMA_TO_DEVICE);
free_rctx_digest:
dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
@@ -272,24 +323,6 @@ static int aspeed_ahash_complete(struct aspeed_hace_dev *hace_dev)
{
struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
struct ahash_request *req = hash_engine->req;
-
- AHASH_DBG(hace_dev, "\n");
-
- hash_engine->flags &= ~CRYPTO_FLAGS_BUSY;
-
- crypto_finalize_hash_request(hace_dev->crypt_engine_hash, req, 0);
-
- return 0;
-}
-
-/*
- * Copy digest to the corresponding request result.
- * This function will be called at final() stage.
- */
-static int aspeed_ahash_transfer(struct aspeed_hace_dev *hace_dev)
-{
- struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
- struct ahash_request *req = hash_engine->req;
struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
AHASH_DBG(hace_dev, "\n");
@@ -297,12 +330,19 @@ static int aspeed_ahash_transfer(struct aspeed_hace_dev *hace_dev)
dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
- dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr,
- rctx->block_size * 2, DMA_TO_DEVICE);
+ if (rctx->total - rctx->offset >= rctx->block_size ||
+ (rctx->total != rctx->offset && rctx->flags & SHA_FLAGS_FINUP))
+ return aspeed_ahash_req_update(hace_dev);
- memcpy(req->result, rctx->digest, rctx->digsize);
+ hash_engine->flags &= ~CRYPTO_FLAGS_BUSY;
- return aspeed_ahash_complete(hace_dev);
+ if (rctx->flags & SHA_FLAGS_FINUP)
+ memcpy(req->result, rctx->digest, rctx->digsize);
+
+ crypto_finalize_hash_request(hace_dev->crypt_engine_hash, req,
+ rctx->total - rctx->offset);
+
+ return 0;
}
/*
@@ -338,118 +378,6 @@ static int aspeed_hace_ahash_trigger(struct aspeed_hace_dev *hace_dev,
return -EINPROGRESS;
}
-/*
- * HMAC resume aims to do the second pass produces
- * the final HMAC code derived from the inner hash
- * result and the outer key.
- */
-static int aspeed_ahash_hmac_resume(struct aspeed_hace_dev *hace_dev)
-{
- struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
- struct ahash_request *req = hash_engine->req;
- struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
- struct aspeed_sha_hmac_ctx *bctx = tctx->base;
- int rc = 0;
-
- AHASH_DBG(hace_dev, "\n");
-
- dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
- SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
-
- dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr,
- rctx->block_size * 2, DMA_TO_DEVICE);
-
- /* o key pad + hash sum 1 */
- memcpy(rctx->buffer, bctx->opad, rctx->block_size);
- memcpy(rctx->buffer + rctx->block_size, rctx->digest, rctx->digsize);
-
- rctx->bufcnt = rctx->block_size + rctx->digsize;
- rctx->digcnt[0] = rctx->block_size + rctx->digsize;
-
- aspeed_ahash_fill_padding(hace_dev, rctx);
- memcpy(rctx->digest, rctx->sha_iv, rctx->ivsize);
-
- rctx->digest_dma_addr = dma_map_single(hace_dev->dev, rctx->digest,
- SHA512_DIGEST_SIZE,
- DMA_BIDIRECTIONAL);
- if (dma_mapping_error(hace_dev->dev, rctx->digest_dma_addr)) {
- dev_warn(hace_dev->dev, "dma_map() rctx digest error\n");
- rc = -ENOMEM;
- goto end;
- }
-
- rctx->buffer_dma_addr = dma_map_single(hace_dev->dev, rctx->buffer,
- rctx->block_size * 2,
- DMA_TO_DEVICE);
- if (dma_mapping_error(hace_dev->dev, rctx->buffer_dma_addr)) {
- dev_warn(hace_dev->dev, "dma_map() rctx buffer error\n");
- rc = -ENOMEM;
- goto free_rctx_digest;
- }
-
- hash_engine->src_dma = rctx->buffer_dma_addr;
- hash_engine->src_length = rctx->bufcnt;
- hash_engine->digest_dma = rctx->digest_dma_addr;
-
- return aspeed_hace_ahash_trigger(hace_dev, aspeed_ahash_transfer);
-
-free_rctx_digest:
- dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
- SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
-end:
- return rc;
-}
-
-static int aspeed_ahash_req_final(struct aspeed_hace_dev *hace_dev)
-{
- struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
- struct ahash_request *req = hash_engine->req;
- struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
- int rc = 0;
-
- AHASH_DBG(hace_dev, "\n");
-
- aspeed_ahash_fill_padding(hace_dev, rctx);
-
- rctx->digest_dma_addr = dma_map_single(hace_dev->dev,
- rctx->digest,
- SHA512_DIGEST_SIZE,
- DMA_BIDIRECTIONAL);
- if (dma_mapping_error(hace_dev->dev, rctx->digest_dma_addr)) {
- dev_warn(hace_dev->dev, "dma_map() rctx digest error\n");
- rc = -ENOMEM;
- goto end;
- }
-
- rctx->buffer_dma_addr = dma_map_single(hace_dev->dev,
- rctx->buffer,
- rctx->block_size * 2,
- DMA_TO_DEVICE);
- if (dma_mapping_error(hace_dev->dev, rctx->buffer_dma_addr)) {
- dev_warn(hace_dev->dev, "dma_map() rctx buffer error\n");
- rc = -ENOMEM;
- goto free_rctx_digest;
- }
-
- hash_engine->src_dma = rctx->buffer_dma_addr;
- hash_engine->src_length = rctx->bufcnt;
- hash_engine->digest_dma = rctx->digest_dma_addr;
-
- if (rctx->flags & SHA_FLAGS_HMAC)
- return aspeed_hace_ahash_trigger(hace_dev,
- aspeed_ahash_hmac_resume);
-
- return aspeed_hace_ahash_trigger(hace_dev, aspeed_ahash_transfer);
-
-free_rctx_digest:
- dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
- SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
-end:
- return rc;
-}
-
static int aspeed_ahash_update_resume_sg(struct aspeed_hace_dev *hace_dev)
{
struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
@@ -461,40 +389,12 @@ static int aspeed_ahash_update_resume_sg(struct aspeed_hace_dev *hace_dev)
dma_unmap_sg(hace_dev->dev, rctx->src_sg, rctx->src_nents,
DMA_TO_DEVICE);
- if (rctx->bufcnt != 0)
+ if (rctx->flags & SHA_FLAGS_FINUP && rctx->total == rctx->offset)
dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr,
- rctx->block_size * 2,
- DMA_TO_DEVICE);
+ sizeof(rctx->buffer), DMA_TO_DEVICE);
- dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
- SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
-
- scatterwalk_map_and_copy(rctx->buffer, rctx->src_sg, rctx->offset,
- rctx->total - rctx->offset, 0);
-
- rctx->bufcnt = rctx->total - rctx->offset;
rctx->cmd &= ~HASH_CMD_HASH_SRC_SG_CTRL;
- if (rctx->flags & SHA_FLAGS_FINUP)
- return aspeed_ahash_req_final(hace_dev);
-
- return aspeed_ahash_complete(hace_dev);
-}
-
-static int aspeed_ahash_update_resume(struct aspeed_hace_dev *hace_dev)
-{
- struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
- struct ahash_request *req = hash_engine->req;
- struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
-
- AHASH_DBG(hace_dev, "\n");
-
- dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
- SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
-
- if (rctx->flags & SHA_FLAGS_FINUP)
- return aspeed_ahash_req_final(hace_dev);
-
return aspeed_ahash_complete(hace_dev);
}
@@ -513,7 +413,7 @@ static int aspeed_ahash_req_update(struct aspeed_hace_dev *hace_dev)
resume = aspeed_ahash_update_resume_sg;
} else {
- resume = aspeed_ahash_update_resume;
+ resume = aspeed_ahash_complete;
}
ret = hash_engine->dma_prepare(hace_dev);
@@ -530,26 +430,47 @@ static int aspeed_hace_hash_handle_queue(struct aspeed_hace_dev *hace_dev,
hace_dev->crypt_engine_hash, req);
}
+static noinline int aspeed_ahash_fallback(struct ahash_request *req)
+{
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+ HASH_FBREQ_ON_STACK(fbreq, req);
+ u8 *state = rctx->buffer;
+ struct scatterlist sg[2];
+ struct scatterlist *ssg;
+ int ret;
+
+ ssg = scatterwalk_ffwd(sg, req->src, rctx->offset);
+ ahash_request_set_crypt(fbreq, ssg, req->result,
+ rctx->total - rctx->offset);
+
+ ret = aspeed_sham_export(req, state) ?:
+ crypto_ahash_import_core(fbreq, state);
+
+ if (rctx->flags & SHA_FLAGS_FINUP)
+ ret = ret ?: crypto_ahash_finup(fbreq);
+ else
+ ret = ret ?: crypto_ahash_update(fbreq) ?:
+ crypto_ahash_export_core(fbreq, state) ?:
+ aspeed_sham_import(req, state);
+ HASH_REQUEST_ZERO(fbreq);
+ return ret;
+}
+
static int aspeed_ahash_do_request(struct crypto_engine *engine, void *areq)
{
struct ahash_request *req = ahash_request_cast(areq);
- struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
struct aspeed_engine_hash *hash_engine;
- int ret = 0;
+ int ret;
hash_engine = &hace_dev->hash_engine;
hash_engine->flags |= CRYPTO_FLAGS_BUSY;
- if (rctx->op == SHA_OP_UPDATE)
- ret = aspeed_ahash_req_update(hace_dev);
- else if (rctx->op == SHA_OP_FINAL)
- ret = aspeed_ahash_req_final(hace_dev);
-
+ ret = aspeed_ahash_req_update(hace_dev);
if (ret != -EINPROGRESS)
- return ret;
+ return aspeed_ahash_fallback(req);
return 0;
}
@@ -590,45 +511,7 @@ static int aspeed_sham_update(struct ahash_request *req)
rctx->total = req->nbytes;
rctx->src_sg = req->src;
rctx->offset = 0;
- rctx->src_nents = sg_nents(req->src);
- rctx->op = SHA_OP_UPDATE;
-
- rctx->digcnt[0] += rctx->total;
- if (rctx->digcnt[0] < rctx->total)
- rctx->digcnt[1]++;
-
- if (rctx->bufcnt + rctx->total < rctx->block_size) {
- scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt,
- rctx->src_sg, rctx->offset,
- rctx->total, 0);
- rctx->bufcnt += rctx->total;
-
- return 0;
- }
-
- return aspeed_hace_hash_handle_queue(hace_dev, req);
-}
-
-static int aspeed_sham_shash_digest(struct crypto_shash *tfm, u32 flags,
- const u8 *data, unsigned int len, u8 *out)
-{
- SHASH_DESC_ON_STACK(shash, tfm);
-
- shash->tfm = tfm;
-
- return crypto_shash_digest(shash, data, len, out);
-}
-
-static int aspeed_sham_final(struct ahash_request *req)
-{
- struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
- struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
-
- AHASH_DBG(hace_dev, "req->nbytes:%d, rctx->total:%d\n",
- req->nbytes, rctx->total);
- rctx->op = SHA_OP_FINAL;
+ rctx->src_nents = sg_nents_for_len(req->src, req->nbytes);
return aspeed_hace_hash_handle_queue(hace_dev, req);
}
@@ -639,23 +522,12 @@ static int aspeed_sham_finup(struct ahash_request *req)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
- int rc1, rc2;
AHASH_DBG(hace_dev, "req->nbytes: %d\n", req->nbytes);
rctx->flags |= SHA_FLAGS_FINUP;
- rc1 = aspeed_sham_update(req);
- if (rc1 == -EINPROGRESS || rc1 == -EBUSY)
- return rc1;
-
- /*
- * final() has to be always called to cleanup resources
- * even if update() failed, except EINPROGRESS
- */
- rc2 = aspeed_sham_final(req);
-
- return rc1 ? : rc2;
+ return aspeed_sham_update(req);
}
static int aspeed_sham_init(struct ahash_request *req)
@@ -664,7 +536,6 @@ static int aspeed_sham_init(struct ahash_request *req)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
- struct aspeed_sha_hmac_ctx *bctx = tctx->base;
AHASH_DBG(hace_dev, "%s: digest size:%d\n",
crypto_tfm_alg_name(&tfm->base),
@@ -679,7 +550,6 @@ static int aspeed_sham_init(struct ahash_request *req)
rctx->flags |= SHA_FLAGS_SHA1;
rctx->digsize = SHA1_DIGEST_SIZE;
rctx->block_size = SHA1_BLOCK_SIZE;
- rctx->sha_iv = sha1_iv;
rctx->ivsize = 32;
memcpy(rctx->digest, sha1_iv, rctx->ivsize);
break;
@@ -688,7 +558,6 @@ static int aspeed_sham_init(struct ahash_request *req)
rctx->flags |= SHA_FLAGS_SHA224;
rctx->digsize = SHA224_DIGEST_SIZE;
rctx->block_size = SHA224_BLOCK_SIZE;
- rctx->sha_iv = sha224_iv;
rctx->ivsize = 32;
memcpy(rctx->digest, sha224_iv, rctx->ivsize);
break;
@@ -697,7 +566,6 @@ static int aspeed_sham_init(struct ahash_request *req)
rctx->flags |= SHA_FLAGS_SHA256;
rctx->digsize = SHA256_DIGEST_SIZE;
rctx->block_size = SHA256_BLOCK_SIZE;
- rctx->sha_iv = sha256_iv;
rctx->ivsize = 32;
memcpy(rctx->digest, sha256_iv, rctx->ivsize);
break;
@@ -707,7 +575,6 @@ static int aspeed_sham_init(struct ahash_request *req)
rctx->flags |= SHA_FLAGS_SHA384;
rctx->digsize = SHA384_DIGEST_SIZE;
rctx->block_size = SHA384_BLOCK_SIZE;
- rctx->sha_iv = (const __be32 *)sha384_iv;
rctx->ivsize = 64;
memcpy(rctx->digest, sha384_iv, rctx->ivsize);
break;
@@ -717,7 +584,6 @@ static int aspeed_sham_init(struct ahash_request *req)
rctx->flags |= SHA_FLAGS_SHA512;
rctx->digsize = SHA512_DIGEST_SIZE;
rctx->block_size = SHA512_BLOCK_SIZE;
- rctx->sha_iv = (const __be32 *)sha512_iv;
rctx->ivsize = 64;
memcpy(rctx->digest, sha512_iv, rctx->ivsize);
break;
@@ -727,19 +593,10 @@ static int aspeed_sham_init(struct ahash_request *req)
return -EINVAL;
}
- rctx->bufcnt = 0;
rctx->total = 0;
rctx->digcnt[0] = 0;
rctx->digcnt[1] = 0;
- /* HMAC init */
- if (tctx->flags & SHA_FLAGS_HMAC) {
- rctx->digcnt[0] = rctx->block_size;
- rctx->bufcnt = rctx->block_size;
- memcpy(rctx->buffer, bctx->ipad, rctx->block_size);
- rctx->flags |= SHA_FLAGS_HMAC;
- }
-
return 0;
}
@@ -748,102 +605,14 @@ static int aspeed_sham_digest(struct ahash_request *req)
return aspeed_sham_init(req) ? : aspeed_sham_finup(req);
}
-static int aspeed_sham_setkey(struct crypto_ahash *tfm, const u8 *key,
- unsigned int keylen)
+static int aspeed_sham_cra_init(struct crypto_ahash *tfm)
{
+ struct ahash_alg *alg = crypto_ahash_alg(tfm);
struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
- struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
- struct aspeed_sha_hmac_ctx *bctx = tctx->base;
- int ds = crypto_shash_digestsize(bctx->shash);
- int bs = crypto_shash_blocksize(bctx->shash);
- int err = 0;
- int i;
-
- AHASH_DBG(hace_dev, "%s: keylen:%d\n", crypto_tfm_alg_name(&tfm->base),
- keylen);
-
- if (keylen > bs) {
- err = aspeed_sham_shash_digest(bctx->shash,
- crypto_shash_get_flags(bctx->shash),
- key, keylen, bctx->ipad);
- if (err)
- return err;
- keylen = ds;
-
- } else {
- memcpy(bctx->ipad, key, keylen);
- }
-
- memset(bctx->ipad + keylen, 0, bs - keylen);
- memcpy(bctx->opad, bctx->ipad, bs);
-
- for (i = 0; i < bs; i++) {
- bctx->ipad[i] ^= HMAC_IPAD_VALUE;
- bctx->opad[i] ^= HMAC_OPAD_VALUE;
- }
-
- return err;
-}
-
-static int aspeed_sham_cra_init(struct crypto_tfm *tfm)
-{
- struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
- struct aspeed_sham_ctx *tctx = crypto_tfm_ctx(tfm);
struct aspeed_hace_alg *ast_alg;
ast_alg = container_of(alg, struct aspeed_hace_alg, alg.ahash.base);
tctx->hace_dev = ast_alg->hace_dev;
- tctx->flags = 0;
-
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct aspeed_sham_reqctx));
-
- if (ast_alg->alg_base) {
- /* hmac related */
- struct aspeed_sha_hmac_ctx *bctx = tctx->base;
-
- tctx->flags |= SHA_FLAGS_HMAC;
- bctx->shash = crypto_alloc_shash(ast_alg->alg_base, 0,
- CRYPTO_ALG_NEED_FALLBACK);
- if (IS_ERR(bctx->shash)) {
- dev_warn(ast_alg->hace_dev->dev,
- "base driver '%s' could not be loaded.\n",
- ast_alg->alg_base);
- return PTR_ERR(bctx->shash);
- }
- }
-
- return 0;
-}
-
-static void aspeed_sham_cra_exit(struct crypto_tfm *tfm)
-{
- struct aspeed_sham_ctx *tctx = crypto_tfm_ctx(tfm);
- struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
-
- AHASH_DBG(hace_dev, "%s\n", crypto_tfm_alg_name(tfm));
-
- if (tctx->flags & SHA_FLAGS_HMAC) {
- struct aspeed_sha_hmac_ctx *bctx = tctx->base;
-
- crypto_free_shash(bctx->shash);
- }
-}
-
-static int aspeed_sham_export(struct ahash_request *req, void *out)
-{
- struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
-
- memcpy(out, rctx, sizeof(*rctx));
-
- return 0;
-}
-
-static int aspeed_sham_import(struct ahash_request *req, const void *in)
-{
- struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
-
- memcpy(rctx, in, sizeof(*rctx));
return 0;
}
@@ -853,11 +622,11 @@ static struct aspeed_hace_alg aspeed_ahash_algs[] = {
.alg.ahash.base = {
.init = aspeed_sham_init,
.update = aspeed_sham_update,
- .final = aspeed_sham_final,
.finup = aspeed_sham_finup,
.digest = aspeed_sham_digest,
.export = aspeed_sham_export,
.import = aspeed_sham_import,
+ .init_tfm = aspeed_sham_cra_init,
.halg = {
.digestsize = SHA1_DIGEST_SIZE,
.statesize = sizeof(struct aspeed_sham_reqctx),
@@ -867,13 +636,13 @@ static struct aspeed_hace_alg aspeed_ahash_algs[] = {
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC |
+ CRYPTO_AHASH_ALG_BLOCK_ONLY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aspeed_sham_ctx),
+ .cra_reqsize = sizeof(struct aspeed_sham_reqctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
- .cra_init = aspeed_sham_cra_init,
- .cra_exit = aspeed_sham_cra_exit,
}
}
},
@@ -885,11 +654,11 @@ static struct aspeed_hace_alg aspeed_ahash_algs[] = {
.alg.ahash.base = {
.init = aspeed_sham_init,
.update = aspeed_sham_update,
- .final = aspeed_sham_final,
.finup = aspeed_sham_finup,
.digest = aspeed_sham_digest,
.export = aspeed_sham_export,
.import = aspeed_sham_import,
+ .init_tfm = aspeed_sham_cra_init,
.halg = {
.digestsize = SHA256_DIGEST_SIZE,
.statesize = sizeof(struct aspeed_sham_reqctx),
@@ -899,13 +668,13 @@ static struct aspeed_hace_alg aspeed_ahash_algs[] = {
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC |
+ CRYPTO_AHASH_ALG_BLOCK_ONLY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aspeed_sham_ctx),
+ .cra_reqsize = sizeof(struct aspeed_sham_reqctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
- .cra_init = aspeed_sham_cra_init,
- .cra_exit = aspeed_sham_cra_exit,
}
}
},
@@ -917,11 +686,11 @@ static struct aspeed_hace_alg aspeed_ahash_algs[] = {
.alg.ahash.base = {
.init = aspeed_sham_init,
.update = aspeed_sham_update,
- .final = aspeed_sham_final,
.finup = aspeed_sham_finup,
.digest = aspeed_sham_digest,
.export = aspeed_sham_export,
.import = aspeed_sham_import,
+ .init_tfm = aspeed_sham_cra_init,
.halg = {
.digestsize = SHA224_DIGEST_SIZE,
.statesize = sizeof(struct aspeed_sham_reqctx),
@@ -931,118 +700,13 @@ static struct aspeed_hace_alg aspeed_ahash_algs[] = {
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC |
+ CRYPTO_AHASH_ALG_BLOCK_ONLY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aspeed_sham_ctx),
+ .cra_reqsize = sizeof(struct aspeed_sham_reqctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
- .cra_init = aspeed_sham_cra_init,
- .cra_exit = aspeed_sham_cra_exit,
- }
- }
- },
- .alg.ahash.op = {
- .do_one_request = aspeed_ahash_do_one,
- },
- },
- {
- .alg_base = "sha1",
- .alg.ahash.base = {
- .init = aspeed_sham_init,
- .update = aspeed_sham_update,
- .final = aspeed_sham_final,
- .finup = aspeed_sham_finup,
- .digest = aspeed_sham_digest,
- .setkey = aspeed_sham_setkey,
- .export = aspeed_sham_export,
- .import = aspeed_sham_import,
- .halg = {
- .digestsize = SHA1_DIGEST_SIZE,
- .statesize = sizeof(struct aspeed_sham_reqctx),
- .base = {
- .cra_name = "hmac(sha1)",
- .cra_driver_name = "aspeed-hmac-sha1",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = SHA1_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct aspeed_sham_ctx) +
- sizeof(struct aspeed_sha_hmac_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = aspeed_sham_cra_init,
- .cra_exit = aspeed_sham_cra_exit,
- }
- }
- },
- .alg.ahash.op = {
- .do_one_request = aspeed_ahash_do_one,
- },
- },
- {
- .alg_base = "sha224",
- .alg.ahash.base = {
- .init = aspeed_sham_init,
- .update = aspeed_sham_update,
- .final = aspeed_sham_final,
- .finup = aspeed_sham_finup,
- .digest = aspeed_sham_digest,
- .setkey = aspeed_sham_setkey,
- .export = aspeed_sham_export,
- .import = aspeed_sham_import,
- .halg = {
- .digestsize = SHA224_DIGEST_SIZE,
- .statesize = sizeof(struct aspeed_sham_reqctx),
- .base = {
- .cra_name = "hmac(sha224)",
- .cra_driver_name = "aspeed-hmac-sha224",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = SHA224_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct aspeed_sham_ctx) +
- sizeof(struct aspeed_sha_hmac_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = aspeed_sham_cra_init,
- .cra_exit = aspeed_sham_cra_exit,
- }
- }
- },
- .alg.ahash.op = {
- .do_one_request = aspeed_ahash_do_one,
- },
- },
- {
- .alg_base = "sha256",
- .alg.ahash.base = {
- .init = aspeed_sham_init,
- .update = aspeed_sham_update,
- .final = aspeed_sham_final,
- .finup = aspeed_sham_finup,
- .digest = aspeed_sham_digest,
- .setkey = aspeed_sham_setkey,
- .export = aspeed_sham_export,
- .import = aspeed_sham_import,
- .halg = {
- .digestsize = SHA256_DIGEST_SIZE,
- .statesize = sizeof(struct aspeed_sham_reqctx),
- .base = {
- .cra_name = "hmac(sha256)",
- .cra_driver_name = "aspeed-hmac-sha256",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct aspeed_sham_ctx) +
- sizeof(struct aspeed_sha_hmac_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = aspeed_sham_cra_init,
- .cra_exit = aspeed_sham_cra_exit,
}
}
},
@@ -1057,11 +721,11 @@ static struct aspeed_hace_alg aspeed_ahash_algs_g6[] = {
.alg.ahash.base = {
.init = aspeed_sham_init,
.update = aspeed_sham_update,
- .final = aspeed_sham_final,
.finup = aspeed_sham_finup,
.digest = aspeed_sham_digest,
.export = aspeed_sham_export,
.import = aspeed_sham_import,
+ .init_tfm = aspeed_sham_cra_init,
.halg = {
.digestsize = SHA384_DIGEST_SIZE,
.statesize = sizeof(struct aspeed_sham_reqctx),
@@ -1071,13 +735,13 @@ static struct aspeed_hace_alg aspeed_ahash_algs_g6[] = {
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC |
+ CRYPTO_AHASH_ALG_BLOCK_ONLY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aspeed_sham_ctx),
+ .cra_reqsize = sizeof(struct aspeed_sham_reqctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
- .cra_init = aspeed_sham_cra_init,
- .cra_exit = aspeed_sham_cra_exit,
}
}
},
@@ -1089,11 +753,11 @@ static struct aspeed_hace_alg aspeed_ahash_algs_g6[] = {
.alg.ahash.base = {
.init = aspeed_sham_init,
.update = aspeed_sham_update,
- .final = aspeed_sham_final,
.finup = aspeed_sham_finup,
.digest = aspeed_sham_digest,
.export = aspeed_sham_export,
.import = aspeed_sham_import,
+ .init_tfm = aspeed_sham_cra_init,
.halg = {
.digestsize = SHA512_DIGEST_SIZE,
.statesize = sizeof(struct aspeed_sham_reqctx),
@@ -1103,83 +767,13 @@ static struct aspeed_hace_alg aspeed_ahash_algs_g6[] = {
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC |
+ CRYPTO_AHASH_ALG_BLOCK_ONLY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aspeed_sham_ctx),
+ .cra_reqsize = sizeof(struct aspeed_sham_reqctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
- .cra_init = aspeed_sham_cra_init,
- .cra_exit = aspeed_sham_cra_exit,
- }
- }
- },
- .alg.ahash.op = {
- .do_one_request = aspeed_ahash_do_one,
- },
- },
- {
- .alg_base = "sha384",
- .alg.ahash.base = {
- .init = aspeed_sham_init,
- .update = aspeed_sham_update,
- .final = aspeed_sham_final,
- .finup = aspeed_sham_finup,
- .digest = aspeed_sham_digest,
- .setkey = aspeed_sham_setkey,
- .export = aspeed_sham_export,
- .import = aspeed_sham_import,
- .halg = {
- .digestsize = SHA384_DIGEST_SIZE,
- .statesize = sizeof(struct aspeed_sham_reqctx),
- .base = {
- .cra_name = "hmac(sha384)",
- .cra_driver_name = "aspeed-hmac-sha384",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = SHA384_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct aspeed_sham_ctx) +
- sizeof(struct aspeed_sha_hmac_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = aspeed_sham_cra_init,
- .cra_exit = aspeed_sham_cra_exit,
- }
- }
- },
- .alg.ahash.op = {
- .do_one_request = aspeed_ahash_do_one,
- },
- },
- {
- .alg_base = "sha512",
- .alg.ahash.base = {
- .init = aspeed_sham_init,
- .update = aspeed_sham_update,
- .final = aspeed_sham_final,
- .finup = aspeed_sham_finup,
- .digest = aspeed_sham_digest,
- .setkey = aspeed_sham_setkey,
- .export = aspeed_sham_export,
- .import = aspeed_sham_import,
- .halg = {
- .digestsize = SHA512_DIGEST_SIZE,
- .statesize = sizeof(struct aspeed_sham_reqctx),
- .base = {
- .cra_name = "hmac(sha512)",
- .cra_driver_name = "aspeed-hmac-sha512",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = SHA512_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct aspeed_sham_ctx) +
- sizeof(struct aspeed_sha_hmac_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = aspeed_sham_cra_init,
- .cra_exit = aspeed_sham_cra_exit,
}
}
},
diff --git a/drivers/crypto/aspeed/aspeed-hace.h b/drivers/crypto/aspeed/aspeed-hace.h
index 68f70e01fccb..b1d07730d543 100644
--- a/drivers/crypto/aspeed/aspeed-hace.h
+++ b/drivers/crypto/aspeed/aspeed-hace.h
@@ -119,7 +119,6 @@
#define SHA_FLAGS_SHA512 BIT(4)
#define SHA_FLAGS_SHA512_224 BIT(5)
#define SHA_FLAGS_SHA512_256 BIT(6)
-#define SHA_FLAGS_HMAC BIT(8)
#define SHA_FLAGS_FINUP BIT(9)
#define SHA_FLAGS_MASK (0xff)
@@ -161,22 +160,18 @@ struct aspeed_engine_hash {
aspeed_hace_fn_t dma_prepare;
};
-struct aspeed_sha_hmac_ctx {
- struct crypto_shash *shash;
- u8 ipad[SHA512_BLOCK_SIZE];
- u8 opad[SHA512_BLOCK_SIZE];
-};
-
struct aspeed_sham_ctx {
struct aspeed_hace_dev *hace_dev;
- unsigned long flags; /* hmac flag */
-
- struct aspeed_sha_hmac_ctx base[];
};
struct aspeed_sham_reqctx {
+ /* DMA buffer written by hardware */
+ u8 digest[SHA512_DIGEST_SIZE] __aligned(64);
+
+ /* Software state sorted by size. */
+ u64 digcnt[2];
+
unsigned long flags; /* final update flag should no use*/
- unsigned long op; /* final or update */
u32 cmd; /* trigger cmd */
/* walk state */
@@ -188,17 +183,12 @@ struct aspeed_sham_reqctx {
size_t digsize;
size_t block_size;
size_t ivsize;
- const __be32 *sha_iv;
- /* remain data buffer */
- u8 buffer[SHA512_BLOCK_SIZE * 2];
dma_addr_t buffer_dma_addr;
- size_t bufcnt; /* buffer counter */
-
- /* output buffer */
- u8 digest[SHA512_DIGEST_SIZE] __aligned(64);
dma_addr_t digest_dma_addr;
- u64 digcnt[2];
+
+ /* This is DMA too but read-only for hardware. */
+ u8 buffer[SHA512_BLOCK_SIZE + 16];
};
struct aspeed_engine_crypto {
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index 27c5d000b4b2..3a2684208dda 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -2297,6 +2297,7 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
/* keep only major version number */
switch (dd->hw_version & 0xff0) {
+ case 0x800:
case 0x700:
case 0x600:
case 0x500:
diff --git a/drivers/crypto/atmel-i2c.c b/drivers/crypto/atmel-i2c.c
index a895e4289efa..9688d116d07e 100644
--- a/drivers/crypto/atmel-i2c.c
+++ b/drivers/crypto/atmel-i2c.c
@@ -402,7 +402,7 @@ EXPORT_SYMBOL(atmel_i2c_probe);
static int __init atmel_i2c_init(void)
{
- atmel_wq = alloc_workqueue("atmel_wq", 0, 0);
+ atmel_wq = alloc_workqueue("atmel_wq", WQ_PERCPU, 0);
return atmel_wq ? 0 : -ENOMEM;
}
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index 2cc36da163e8..3d7573c7bd1c 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -2534,6 +2534,7 @@ static void atmel_sha_get_cap(struct atmel_sha_dev *dd)
/* keep only major version number */
switch (dd->hw_version & 0xff0) {
+ case 0x800:
case 0x700:
case 0x600:
case 0x510:
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index 098f5532f389..3b2a92029b16 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -512,7 +512,7 @@ static int atmel_tdes_crypt_start(struct atmel_tdes_dev *dd)
if (err && (dd->flags & TDES_FLAGS_FAST)) {
dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
- dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE);
+ dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
}
return err;
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
index 75ee065da1ec..b04d6379244a 100644
--- a/drivers/crypto/axis/artpec6_crypto.c
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -252,7 +252,7 @@ struct artpec6_crypto_dma_descriptors {
};
enum artpec6_crypto_variant {
- ARTPEC6_CRYPTO,
+ ARTPEC6_CRYPTO = 1,
ARTPEC7_CRYPTO,
};
@@ -2842,7 +2842,6 @@ MODULE_DEVICE_TABLE(of, artpec6_crypto_of_match);
static int artpec6_crypto_probe(struct platform_device *pdev)
{
- const struct of_device_id *match;
enum artpec6_crypto_variant variant;
struct artpec6_crypto *ac;
struct device *dev = &pdev->dev;
@@ -2853,12 +2852,10 @@ static int artpec6_crypto_probe(struct platform_device *pdev)
if (artpec6_crypto_dev)
return -ENODEV;
- match = of_match_node(artpec6_crypto_of_match, dev->of_node);
- if (!match)
+ variant = (enum artpec6_crypto_variant)of_device_get_match_data(dev);
+ if (!variant)
return -EINVAL;
- variant = (enum artpec6_crypto_variant)match->data;
-
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/crypto/caam/Makefile b/drivers/crypto/caam/Makefile
index acf1b197eb84..d2eaf5205b1c 100644
--- a/drivers/crypto/caam/Makefile
+++ b/drivers/crypto/caam/Makefile
@@ -25,10 +25,6 @@ caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caampkc.o pkc_desc.o
caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_BLOB_GEN) += blob_gen.o
caam-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += qi.o
-ifneq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI),)
- ccflags-y += -DCONFIG_CAAM_QI
-endif
-
caam-$(CONFIG_DEBUG_FS) += debugfs.o
obj-$(CONFIG_CRYPTO_DEV_FSL_DPAA2_CAAM) += dpaa2_caam.o
diff --git a/drivers/crypto/caam/blob_gen.c b/drivers/crypto/caam/blob_gen.c
index 079a22cc9f02..c18dbac56493 100644
--- a/drivers/crypto/caam/blob_gen.c
+++ b/drivers/crypto/caam/blob_gen.c
@@ -2,13 +2,14 @@
/*
* Copyright (C) 2015 Pengutronix, Steffen Trumtrar <kernel@pengutronix.de>
* Copyright (C) 2021 Pengutronix, Ahmad Fatoum <kernel@pengutronix.de>
- * Copyright 2024 NXP
+ * Copyright 2024-2025 NXP
*/
#define pr_fmt(fmt) "caam blob_gen: " fmt
#include <linux/bitfield.h>
#include <linux/device.h>
+#include <keys/trusted-type.h>
#include <soc/fsl/caam-blob.h>
#include "compat.h"
@@ -60,18 +61,27 @@ static void caam_blob_job_done(struct device *dev, u32 *desc, u32 err, void *con
complete(&res->completion);
}
+static u32 check_caam_state(struct device *jrdev)
+{
+ const struct caam_drv_private *ctrlpriv;
+
+ ctrlpriv = dev_get_drvdata(jrdev->parent);
+ return FIELD_GET(CSTA_MOO, rd_reg32(&ctrlpriv->jr[0]->perfmon.status));
+}
+
int caam_process_blob(struct caam_blob_priv *priv,
struct caam_blob_info *info, bool encap)
{
- const struct caam_drv_private *ctrlpriv;
struct caam_blob_job_result testres;
struct device *jrdev = &priv->jrdev;
dma_addr_t dma_in, dma_out;
int op = OP_PCLID_BLOB;
+ int hwbk_caam_ovhd = 0;
size_t output_len;
u32 *desc;
u32 moo;
int ret;
+ int len;
if (info->key_mod_len > CAAM_BLOB_KEYMOD_LENGTH)
return -EINVAL;
@@ -82,14 +92,29 @@ int caam_process_blob(struct caam_blob_priv *priv,
} else {
op |= OP_TYPE_DECAP_PROTOCOL;
output_len = info->input_len - CAAM_BLOB_OVERHEAD;
+ info->output_len = output_len;
+ }
+
+ if (encap && info->pkey_info.is_pkey) {
+ op |= OP_PCL_BLOB_BLACK;
+ if (info->pkey_info.key_enc_algo == CAAM_ENC_ALGO_CCM) {
+ op |= OP_PCL_BLOB_EKT;
+ hwbk_caam_ovhd = CAAM_CCM_OVERHEAD;
+ }
+ if ((info->input_len + hwbk_caam_ovhd) > MAX_KEY_SIZE)
+ return -EINVAL;
+
+ len = info->input_len + hwbk_caam_ovhd;
+ } else {
+ len = info->input_len;
}
desc = kzalloc(CAAM_BLOB_DESC_BYTES_MAX, GFP_KERNEL);
if (!desc)
return -ENOMEM;
- dma_in = dma_map_single(jrdev, info->input, info->input_len,
- DMA_TO_DEVICE);
+ dma_in = dma_map_single(jrdev, info->input, len,
+ encap ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, dma_in)) {
dev_err(jrdev, "unable to map input DMA buffer\n");
ret = -ENOMEM;
@@ -104,8 +129,7 @@ int caam_process_blob(struct caam_blob_priv *priv,
goto out_unmap_in;
}
- ctrlpriv = dev_get_drvdata(jrdev->parent);
- moo = FIELD_GET(CSTA_MOO, rd_reg32(&ctrlpriv->jr[0]->perfmon.status));
+ moo = check_caam_state(jrdev);
if (moo != CSTA_MOO_SECURE && moo != CSTA_MOO_TRUSTED)
dev_warn(jrdev,
"using insecure test key, enable HAB to use unique device key!\n");
@@ -117,18 +141,48 @@ int caam_process_blob(struct caam_blob_priv *priv,
* Class 1 Context DWords 0+1+2+3. The random BK is stored in the
* Class 1 Key Register. Operation Mode is set to AES-CCM.
*/
-
init_job_desc(desc, 0);
+
+ if (encap && info->pkey_info.is_pkey) {
+ /*!1. key command used to load class 1 key register
+ * from input plain key.
+ */
+ append_key(desc, dma_in, info->input_len,
+ CLASS_1 | KEY_DEST_CLASS_REG);
+ /*!2. Fifostore to store protected key from class 1 key register. */
+ if (info->pkey_info.key_enc_algo == CAAM_ENC_ALGO_CCM) {
+ append_fifo_store(desc, dma_in, info->input_len,
+ LDST_CLASS_1_CCB |
+ FIFOST_TYPE_KEY_CCM_JKEK);
+ } else {
+ append_fifo_store(desc, dma_in, info->input_len,
+ LDST_CLASS_1_CCB |
+ FIFOST_TYPE_KEY_KEK);
+ }
+ /*
+ * JUMP_OFFSET specifies the offset of the JUMP target from
+ * the JUMP command's address in the descriptor buffer.
+ */
+ append_jump(desc, JUMP_COND_NOP | BIT(0) << JUMP_OFFSET_SHIFT);
+ }
+
+ /*!3. Load class 2 key with key modifier. */
append_key_as_imm(desc, info->key_mod, info->key_mod_len,
- info->key_mod_len, CLASS_2 | KEY_DEST_CLASS_REG);
- append_seq_in_ptr_intlen(desc, dma_in, info->input_len, 0);
- append_seq_out_ptr_intlen(desc, dma_out, output_len, 0);
+ info->key_mod_len, CLASS_2 | KEY_DEST_CLASS_REG);
+
+ /*!4. SEQ IN PTR Command. */
+ append_seq_in_ptr(desc, dma_in, info->input_len, 0);
+
+ /*!5. SEQ OUT PTR Command. */
+ append_seq_out_ptr(desc, dma_out, output_len, 0);
+
+ /*!6. Blob encapsulation/decapsulation PROTOCOL Command. */
append_operation(desc, op);
- print_hex_dump_debug("data@"__stringify(__LINE__)": ",
+ print_hex_dump_debug("data@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 1, info->input,
- info->input_len, false);
- print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
+ len, false);
+ print_hex_dump_debug("jobdesc@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 1, desc,
desc_bytes(desc), false);
@@ -139,7 +193,7 @@ int caam_process_blob(struct caam_blob_priv *priv,
if (ret == -EINPROGRESS) {
wait_for_completion(&testres.completion);
ret = testres.err;
- print_hex_dump_debug("output@"__stringify(__LINE__)": ",
+ print_hex_dump_debug("output@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 1, info->output,
output_len, false);
}
@@ -149,10 +203,10 @@ int caam_process_blob(struct caam_blob_priv *priv,
dma_unmap_single(jrdev, dma_out, output_len, DMA_FROM_DEVICE);
out_unmap_in:
- dma_unmap_single(jrdev, dma_in, info->input_len, DMA_TO_DEVICE);
+ dma_unmap_single(jrdev, dma_in, len,
+ encap ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
out_free:
kfree(desc);
-
return ret;
}
EXPORT_SYMBOL(caam_process_blob);
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 2cfb1b8d8c7c..32a6e6e15ee2 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -3,7 +3,7 @@
* caam - Freescale FSL CAAM support for crypto API
*
* Copyright 2008-2011 Freescale Semiconductor, Inc.
- * Copyright 2016-2019, 2023 NXP
+ * Copyright 2016-2019, 2023, 2025 NXP
*
* Based on talitos crypto API driver.
*
@@ -61,13 +61,16 @@
#include <crypto/internal/engine.h>
#include <crypto/internal/skcipher.h>
#include <crypto/xts.h>
+#include <keys/trusted-type.h>
#include <linux/dma-mapping.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/key-type.h>
#include <linux/slab.h>
#include <linux/string.h>
+#include <soc/fsl/caam-blob.h>
/*
* crypto alg
@@ -119,12 +122,15 @@ struct caam_ctx {
dma_addr_t sh_desc_enc_dma;
dma_addr_t sh_desc_dec_dma;
dma_addr_t key_dma;
+ u8 protected_key[CAAM_MAX_KEY_SIZE];
+ dma_addr_t protected_key_dma;
enum dma_data_direction dir;
struct device *jrdev;
struct alginfo adata;
struct alginfo cdata;
unsigned int authsize;
bool xts_key_fallback;
+ bool is_blob;
struct crypto_skcipher *fallback;
};
@@ -751,9 +757,14 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+ /* Here keylen is actual key length */
ctx->cdata.keylen = keylen;
ctx->cdata.key_virt = key;
ctx->cdata.key_inline = true;
+ /* Here protected key len is plain key length */
+ ctx->cdata.plain_keylen = keylen;
+ ctx->cdata.key_cmd_opt = 0;
+
/* skcipher_encrypt shared descriptor */
desc = ctx->sh_desc_enc;
@@ -772,6 +783,62 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
return 0;
}
+static int paes_skcipher_setkey(struct crypto_skcipher *skcipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ struct caam_pkey_info *pkey_info = (struct caam_pkey_info *)key;
+ struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
+ struct device *jrdev = ctx->jrdev;
+ int err;
+
+ ctx->cdata.key_inline = false;
+
+ keylen = keylen - CAAM_PKEY_HEADER;
+
+ /* Retrieve the length of key */
+ ctx->cdata.plain_keylen = pkey_info->plain_key_sz;
+
+ /* Retrieve the length of blob*/
+ ctx->cdata.keylen = keylen;
+
+ /* Retrieve the address of the blob */
+ ctx->cdata.key_virt = pkey_info->key_buf;
+
+ /* Validate key length for AES algorithms */
+ err = aes_check_keylen(ctx->cdata.plain_keylen);
+ if (err) {
+ dev_err(jrdev, "bad key length\n");
+ return err;
+ }
+
+ /* set command option */
+ ctx->cdata.key_cmd_opt |= KEY_ENC;
+
+ /* check if the Protected-Key is CCM key */
+ if (pkey_info->key_enc_algo == CAAM_ENC_ALGO_CCM)
+ ctx->cdata.key_cmd_opt |= KEY_EKT;
+
+ memcpy(ctx->key, ctx->cdata.key_virt, keylen);
+ dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
+ ctx->cdata.key_dma = ctx->key_dma;
+
+ if (pkey_info->key_enc_algo == CAAM_ENC_ALGO_CCM)
+ ctx->protected_key_dma = dma_map_single(jrdev, ctx->protected_key,
+ ctx->cdata.plain_keylen +
+ CAAM_CCM_OVERHEAD,
+ DMA_FROM_DEVICE);
+ else
+ ctx->protected_key_dma = dma_map_single(jrdev, ctx->protected_key,
+ ctx->cdata.plain_keylen,
+ DMA_FROM_DEVICE);
+
+ ctx->cdata.protected_key_dma = ctx->protected_key_dma;
+ ctx->is_blob = true;
+
+ return 0;
+}
+
static int aes_skcipher_setkey(struct crypto_skcipher *skcipher,
const u8 *key, unsigned int keylen)
{
@@ -1254,7 +1321,9 @@ static void init_skcipher_job(struct skcipher_request *req,
struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
struct device *jrdev = ctx->jrdev;
int ivsize = crypto_skcipher_ivsize(skcipher);
- u32 *desc = edesc->hw_desc;
+ u32 *desc = !ctx->is_blob ? edesc->hw_desc :
+ (u32 *)((u8 *)edesc->hw_desc + CAAM_DESC_BYTES_MAX);
+ dma_addr_t desc_dma;
u32 *sh_desc;
u32 in_options = 0, out_options = 0;
dma_addr_t src_dma, dst_dma, ptr;
@@ -1269,11 +1338,6 @@ static void init_skcipher_job(struct skcipher_request *req,
DUMP_PREFIX_ADDRESS, 16, 4, req->src,
edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
- sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec;
- ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma;
-
- len = desc_len(sh_desc);
- init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
if (ivsize || edesc->mapped_src_nents > 1) {
src_dma = edesc->sec4_sg_dma;
@@ -1283,8 +1347,6 @@ static void init_skcipher_job(struct skcipher_request *req,
src_dma = sg_dma_address(req->src);
}
- append_seq_in_ptr(desc, src_dma, req->cryptlen + ivsize, in_options);
-
if (likely(req->src == req->dst)) {
dst_dma = src_dma + !!ivsize * sizeof(struct sec4_sg_entry);
out_options = in_options;
@@ -1296,7 +1358,25 @@ static void init_skcipher_job(struct skcipher_request *req,
out_options = LDST_SGF;
}
- append_seq_out_ptr(desc, dst_dma, req->cryptlen + ivsize, out_options);
+ if (ctx->is_blob) {
+ cnstr_desc_skcipher_enc_dec(desc, &ctx->cdata,
+ src_dma, dst_dma, req->cryptlen + ivsize,
+ in_options, out_options,
+ ivsize, encrypt);
+
+ desc_dma = dma_map_single(jrdev, desc, desc_bytes(desc), DMA_TO_DEVICE);
+
+ cnstr_desc_protected_blob_decap(edesc->hw_desc, &ctx->cdata, desc_dma);
+ } else {
+ sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec;
+ ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma;
+
+ len = desc_len(sh_desc);
+ init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
+ append_seq_in_ptr(desc, src_dma, req->cryptlen + ivsize, in_options);
+
+ append_seq_out_ptr(desc, dst_dma, req->cryptlen + ivsize, out_options);
+ }
}
/*
@@ -1817,6 +1897,7 @@ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
u32 *desc;
int ret = 0;
+ int len;
/*
* XTS is expected to return an error even for input length = 0
@@ -1842,8 +1923,12 @@ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
crypto_skcipher_decrypt(&rctx->fallback_req);
}
+ len = DESC_JOB_IO_LEN * CAAM_CMD_SZ;
+ if (ctx->is_blob)
+ len += CAAM_DESC_BYTES_MAX;
+
/* allocate extended descriptor */
- edesc = skcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
+ edesc = skcipher_edesc_alloc(req, len);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
@@ -1888,6 +1973,27 @@ static struct caam_skcipher_alg driver_algs[] = {
{
.skcipher.base = {
.base = {
+ .cra_name = "cbc(paes)",
+ .cra_driver_name = "cbc-paes-caam",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = paes_skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE + CAAM_BLOB_OVERHEAD +
+ CAAM_PKEY_HEADER,
+ .max_keysize = AES_MAX_KEY_SIZE + CAAM_BLOB_OVERHEAD +
+ CAAM_PKEY_HEADER,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .skcipher.op = {
+ .do_one_request = skcipher_do_one_req,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ },
+ {
+ .skcipher.base = {
+ .base = {
.cra_name = "cbc(aes)",
.cra_driver_name = "cbc-aes-caam",
.cra_blocksize = AES_BLOCK_SIZE,
diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c
index 7571e1ac913b..04c1105eb1f5 100644
--- a/drivers/crypto/caam/caamalg_desc.c
+++ b/drivers/crypto/caam/caamalg_desc.c
@@ -2,12 +2,13 @@
/*
* Shared descriptors for aead, skcipher algorithms
*
- * Copyright 2016-2019 NXP
+ * Copyright 2016-2019, 2025 NXP
*/
#include "compat.h"
#include "desc_constr.h"
#include "caamalg_desc.h"
+#include <soc/fsl/caam-blob.h>
/*
* For aead functions, read payload and write payload,
@@ -1364,6 +1365,84 @@ static inline void skcipher_append_src_dst(u32 *desc)
append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
}
+void cnstr_desc_skcipher_enc_dec(u32 * const desc, struct alginfo *cdata,
+ dma_addr_t src, dma_addr_t dst, unsigned int data_sz,
+ unsigned int in_options, unsigned int out_options,
+ unsigned int ivsize, const bool encrypt)
+{
+ u32 options = cdata->algtype | OP_ALG_AS_INIT;
+
+ if (encrypt)
+ options |= OP_ALG_ENCRYPT;
+ else
+ options |= OP_ALG_DECRYPT;
+
+ init_job_desc(desc, 0);
+
+ append_jump(desc, JUMP_JSL | JUMP_TYPE_LOCAL |
+ JUMP_COND_NOP | JUMP_TEST_ALL | 1);
+
+ append_key(desc, cdata->protected_key_dma, cdata->plain_keylen,
+ CLASS_1 | KEY_DEST_CLASS_REG | cdata->key_cmd_opt);
+
+ append_seq_in_ptr(desc, src, data_sz, in_options);
+
+ append_seq_out_ptr(desc, dst, data_sz, out_options);
+
+ /* Load IV, if there is one */
+ if (ivsize)
+ append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
+ LDST_CLASS_1_CCB);
+
+ append_operation(desc, options);
+
+ skcipher_append_src_dst(desc);
+
+ /* Store IV */
+ if (ivsize)
+ append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
+ LDST_CLASS_1_CCB);
+
+ print_hex_dump_debug("skcipher_enc_dec job desc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
+}
+EXPORT_SYMBOL(cnstr_desc_skcipher_enc_dec);
+
+void cnstr_desc_protected_blob_decap(u32 * const desc, struct alginfo *cdata,
+ dma_addr_t next_desc_addr)
+{
+ u32 protected_store;
+
+ init_job_desc(desc, 0);
+
+ /* Load key modifier */
+ append_load_as_imm(desc, KEYMOD, sizeof(KEYMOD) - 1,
+ LDST_CLASS_2_CCB | LDST_SRCDST_BYTE_KEY);
+
+ append_seq_in_ptr_intlen(desc, cdata->key_dma,
+ cdata->plain_keylen + CAAM_BLOB_OVERHEAD, 0);
+
+ append_seq_out_ptr_intlen(desc, cdata->protected_key_dma,
+ cdata->plain_keylen, 0);
+
+ protected_store = OP_PCLID_BLOB | OP_PCL_BLOB_BLACK;
+ if ((cdata->key_cmd_opt >> KEY_EKT_OFFSET) & 1)
+ protected_store |= OP_PCL_BLOB_EKT;
+
+ append_operation(desc, OP_TYPE_DECAP_PROTOCOL | protected_store);
+
+ if (next_desc_addr) {
+ append_jump(desc, JUMP_TYPE_NONLOCAL | JUMP_TEST_ALL);
+ append_ptr(desc, next_desc_addr);
+ }
+
+ print_hex_dump_debug("protected blob decap job desc@" __stringify(__LINE__) ":",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+}
+EXPORT_SYMBOL(cnstr_desc_protected_blob_decap);
+
/**
* cnstr_shdsc_skcipher_encap - skcipher encapsulation shared descriptor
* @desc: pointer to buffer used for descriptor construction
@@ -1391,7 +1470,8 @@ void cnstr_shdsc_skcipher_encap(u32 * const desc, struct alginfo *cdata,
/* Load class1 key only */
append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
- cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ cdata->plain_keylen, CLASS_1 | KEY_DEST_CLASS_REG
+ | cdata->key_cmd_opt);
/* Load nonce into CONTEXT1 reg */
if (is_rfc3686) {
@@ -1466,7 +1546,8 @@ void cnstr_shdsc_skcipher_decap(u32 * const desc, struct alginfo *cdata,
/* Load class1 key only */
append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
- cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ cdata->plain_keylen, CLASS_1 | KEY_DEST_CLASS_REG
+ | cdata->key_cmd_opt);
/* Load nonce into CONTEXT1 reg */
if (is_rfc3686) {
diff --git a/drivers/crypto/caam/caamalg_desc.h b/drivers/crypto/caam/caamalg_desc.h
index f2893393ba5e..323490a4a756 100644
--- a/drivers/crypto/caam/caamalg_desc.h
+++ b/drivers/crypto/caam/caamalg_desc.h
@@ -2,7 +2,7 @@
/*
* Shared descriptors for aead, skcipher algorithms
*
- * Copyright 2016 NXP
+ * Copyright 2016, 2025 NXP
*/
#ifndef _CAAMALG_DESC_H_
@@ -48,6 +48,9 @@
#define DESC_SKCIPHER_DEC_LEN (DESC_SKCIPHER_BASE + \
16 * CAAM_CMD_SZ)
+/* Key modifier for CAAM Protected blobs */
+#define KEYMOD "SECURE_KEY"
+
void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
unsigned int icvsize, int era);
@@ -113,4 +116,12 @@ void cnstr_shdsc_xts_skcipher_encap(u32 * const desc, struct alginfo *cdata);
void cnstr_shdsc_xts_skcipher_decap(u32 * const desc, struct alginfo *cdata);
+void cnstr_desc_protected_blob_decap(u32 * const desc, struct alginfo *cdata,
+ dma_addr_t next_desc);
+
+void cnstr_desc_skcipher_enc_dec(u32 * const desc, struct alginfo *cdata,
+ dma_addr_t src, dma_addr_t dst, unsigned int data_sz,
+ unsigned int in_options, unsigned int out_options,
+ unsigned int ivsize, const bool encrypt);
+
#endif /* _CAAMALG_DESC_H_ */
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
index b3d14a7f4dd1..0eb43c862516 100644
--- a/drivers/crypto/caam/caamrng.c
+++ b/drivers/crypto/caam/caamrng.c
@@ -181,7 +181,9 @@ static inline void test_len(struct hwrng *rng, size_t len, bool wait)
struct device *dev = ctx->ctrldev;
buf = kcalloc(CAAM_RNG_MAX_FIFO_STORE_SIZE, sizeof(u8), GFP_KERNEL);
-
+ if (!buf) {
+ return;
+ }
while (len > 0) {
read_len = rng->read(rng, buf, len, wait);
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 38ff931059b4..320be5d77737 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -24,7 +24,7 @@
bool caam_dpaa2;
EXPORT_SYMBOL(caam_dpaa2);
-#ifdef CONFIG_CAAM_QI
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
#include "qi.h"
#endif
@@ -573,7 +573,7 @@ static const struct soc_device_attribute caam_imx_soc_table[] = {
{ .soc_id = "i.MX7*", .data = &caam_imx7_data },
{ .soc_id = "i.MX8M*", .data = &caam_imx7_data },
{ .soc_id = "i.MX8ULP", .data = &caam_imx8ulp_data },
- { .soc_id = "i.MX8QM", .data = &caam_imx8ulp_data },
+ { .soc_id = "i.MX8Q*", .data = &caam_imx8ulp_data },
{ .soc_id = "VF*", .data = &caam_vf610_data },
{ .family = "Freescale i.MX" },
{ /* sentinel */ }
@@ -592,9 +592,9 @@ static int init_clocks(struct device *dev, const struct caam_imx_data *data)
int ret;
ctrlpriv->num_clks = data->num_clks;
- ctrlpriv->clks = devm_kmemdup(dev, data->clks,
- data->num_clks * sizeof(data->clks[0]),
- GFP_KERNEL);
+ ctrlpriv->clks = devm_kmemdup_array(dev, data->clks,
+ data->num_clks, sizeof(*data->clks),
+ GFP_KERNEL);
if (!ctrlpriv->clks)
return -ENOMEM;
@@ -703,12 +703,12 @@ static int caam_ctrl_rng_init(struct device *dev)
*/
if (needs_entropy_delay_adjustment())
ent_delay = 12000;
- if (!(ctrlpriv->rng4_sh_init || inst_handles)) {
+ if (!inst_handles) {
dev_info(dev,
"Entropy delay = %u\n",
ent_delay);
kick_trng(dev, ent_delay);
- ent_delay += 400;
+ ent_delay = ent_delay * 2;
}
/*
* if instantiate_rng(...) fails, the loop will rerun
@@ -831,7 +831,7 @@ static int caam_ctrl_suspend(struct device *dev)
{
const struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev);
- if (ctrlpriv->caam_off_during_pm && !ctrlpriv->optee_en)
+ if (ctrlpriv->caam_off_during_pm && !ctrlpriv->no_page0)
caam_state_save(dev);
return 0;
@@ -842,7 +842,7 @@ static int caam_ctrl_resume(struct device *dev)
struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev);
int ret = 0;
- if (ctrlpriv->caam_off_during_pm && !ctrlpriv->optee_en) {
+ if (ctrlpriv->caam_off_during_pm && !ctrlpriv->no_page0) {
caam_state_restore(dev);
/* HW and rng will be reset so deinstantiation can be removed */
@@ -908,6 +908,7 @@ static int caam_probe(struct platform_device *pdev)
imx_soc_data = imx_soc_match->data;
reg_access = reg_access && imx_soc_data->page0_access;
+ ctrlpriv->no_page0 = !reg_access;
/*
* CAAM clocks cannot be controlled from kernel.
*/
@@ -967,7 +968,7 @@ iomap_ctrl:
caam_dpaa2 = !!(comp_params & CTPR_MS_DPAA2);
ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK);
-#ifdef CONFIG_CAAM_QI
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
/* If (DPAA 1.x) QI present, check whether dependencies are available */
if (ctrlpriv->qi_present && !caam_dpaa2) {
ret = qman_is_probed();
@@ -1098,7 +1099,7 @@ set_dma_mask:
wr_reg32(&ctrlpriv->qi->qi_control_lo, QICTL_DQEN);
/* If QMAN driver is present, init CAAM-QI backend */
-#ifdef CONFIG_CAAM_QI
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
ret = caam_qi_init(pdev);
if (ret)
dev_err(dev, "caam qi i/f init failed: %d\n", ret);
diff --git a/drivers/crypto/caam/debugfs.c b/drivers/crypto/caam/debugfs.c
index 6358d3cabf57..718352b7afb5 100644
--- a/drivers/crypto/caam/debugfs.c
+++ b/drivers/crypto/caam/debugfs.c
@@ -22,7 +22,7 @@ static int caam_debugfs_u32_get(void *data, u64 *val)
DEFINE_DEBUGFS_ATTRIBUTE(caam_fops_u32_ro, caam_debugfs_u32_get, NULL, "%llu\n");
DEFINE_DEBUGFS_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n");
-#ifdef CONFIG_CAAM_QI
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
/*
* This is a counter for the number of times the congestion group (where all
* the request and response queueus are) reached congestion. Incremented
diff --git a/drivers/crypto/caam/debugfs.h b/drivers/crypto/caam/debugfs.h
index 8b5d1acd21a7..ef238c71f92a 100644
--- a/drivers/crypto/caam/debugfs.h
+++ b/drivers/crypto/caam/debugfs.h
@@ -18,7 +18,7 @@ static inline void caam_debugfs_init(struct caam_drv_private *ctrlpriv,
{}
#endif
-#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_CAAM_QI)
+#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI)
void caam_debugfs_qi_congested(void);
void caam_debugfs_qi_init(struct caam_drv_private *ctrlpriv);
#else
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
index e13470901586..c28e94fcb8c7 100644
--- a/drivers/crypto/caam/desc.h
+++ b/drivers/crypto/caam/desc.h
@@ -4,7 +4,7 @@
* Definitions to support CAAM descriptor instruction generation
*
* Copyright 2008-2011 Freescale Semiconductor, Inc.
- * Copyright 2018 NXP
+ * Copyright 2018, 2025 NXP
*/
#ifndef DESC_H
@@ -162,6 +162,7 @@
* Enhanced Encryption of Key
*/
#define KEY_EKT 0x00100000
+#define KEY_EKT_OFFSET 20
/*
* Encrypted with Trusted Key
@@ -403,6 +404,7 @@
#define FIFOST_TYPE_PKHA_N (0x08 << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_PKHA_A (0x0c << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_PKHA_B (0x0d << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_KEY_CCM_JKEK (0x14 << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_AF_SBOX_JKEK (0x20 << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_AF_SBOX_TKEK (0x21 << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_PKHA_E_JKEK (0x22 << FIFOST_TYPE_SHIFT)
@@ -1001,6 +1003,11 @@
#define OP_PCL_TLS12_AES_256_CBC_SHA384 0xff63
#define OP_PCL_TLS12_AES_256_CBC_SHA512 0xff65
+/* Blob protocol protinfo bits */
+
+#define OP_PCL_BLOB_BLACK 0x0004
+#define OP_PCL_BLOB_EKT 0x0100
+
/* For DTLS - OP_PCLID_DTLS */
#define OP_PCL_DTLS_AES_128_CBC_SHA 0x002f
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h
index 824c94d44f94..2a29dd2c9c8a 100644
--- a/drivers/crypto/caam/desc_constr.h
+++ b/drivers/crypto/caam/desc_constr.h
@@ -3,7 +3,7 @@
* caam descriptor construction helper functions
*
* Copyright 2008-2012 Freescale Semiconductor, Inc.
- * Copyright 2019 NXP
+ * Copyright 2019, 2025 NXP
*/
#ifndef DESC_CONSTR_H
@@ -498,17 +498,23 @@ do { \
* @keylen: length of the provided algorithm key, in bytes
* @keylen_pad: padded length of the provided algorithm key, in bytes
* @key_dma: dma (bus) address where algorithm key resides
+ * @protected_key_dma: dma (bus) address where protected key resides
* @key_virt: virtual address where algorithm key resides
* @key_inline: true - key can be inlined in the descriptor; false - key is
* referenced by the descriptor
+ * @plain_keylen: size of the key to be loaded by the CAAM
+ * @key_cmd_opt: optional parameters for KEY command
*/
struct alginfo {
u32 algtype;
unsigned int keylen;
unsigned int keylen_pad;
dma_addr_t key_dma;
+ dma_addr_t protected_key_dma;
const void *key_virt;
bool key_inline;
+ u32 plain_keylen;
+ u32 key_cmd_opt;
};
/**
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index e51320150872..a88da0d31b23 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -115,6 +115,7 @@ struct caam_drv_private {
u8 blob_present; /* Nonzero if BLOB support present in device */
u8 mc_en; /* Nonzero if MC f/w is active */
u8 optee_en; /* Nonzero if OP-TEE f/w is active */
+ u8 no_page0; /* Nonzero if register page 0 is not controlled by Linux */
bool pr_support; /* RNG prediction resistance available */
int secvio_irq; /* Security violation interrupt number */
int virt_en; /* Virtualization enabled in CAAM */
@@ -226,7 +227,7 @@ static inline int caam_prng_register(struct device *dev)
static inline void caam_prng_unregister(void *data) {}
#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_PRNG_API */
-#ifdef CONFIG_CAAM_QI
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
int caam_qi_algapi_init(struct device *dev);
void caam_qi_algapi_exit(void);
@@ -242,7 +243,7 @@ static inline void caam_qi_algapi_exit(void)
{
}
-#endif /* CONFIG_CAAM_QI */
+#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI */
static inline u64 caam_get_dma_mask(struct device *dev)
{
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index 9fcdb64084ac..0ef00df9730e 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -629,8 +629,7 @@ static int caam_jr_probe(struct platform_device *pdev)
}
/* Initialize crypto engine */
- jrpriv->engine = crypto_engine_alloc_init_and_set(jrdev, true, NULL,
- false,
+ jrpriv->engine = crypto_engine_alloc_init_and_set(jrdev, true, false,
CRYPTO_ENGINE_MAX_QLEN);
if (!jrpriv->engine) {
dev_err(jrdev, "Could not init crypto-engine\n");
diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
index b6e7c0b29d4e..1e731ed8702b 100644
--- a/drivers/crypto/caam/qi.c
+++ b/drivers/crypto/caam/qi.c
@@ -442,11 +442,8 @@ struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev,
if (!cpumask_test_cpu(*cpu, cpus)) {
int *pcpu = &get_cpu_var(last_cpu);
- *pcpu = cpumask_next(*pcpu, cpus);
- if (*pcpu >= nr_cpu_ids)
- *pcpu = cpumask_first(cpus);
+ *pcpu = cpumask_next_wrap(*pcpu, cpus);
*cpu = *pcpu;
-
put_cpu_var(last_cpu);
}
drv_ctx->cpu = *cpu;
diff --git a/drivers/crypto/cavium/nitrox/nitrox_mbx.c b/drivers/crypto/cavium/nitrox/nitrox_mbx.c
index d4e06999af9b..a6a76e50ba84 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_mbx.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_mbx.c
@@ -192,7 +192,7 @@ int nitrox_mbox_init(struct nitrox_device *ndev)
}
/* allocate pf2vf response workqueue */
- ndev->iov.pf2vf_wq = alloc_workqueue("nitrox_pf2vf", 0, 0);
+ ndev->iov.pf2vf_wq = alloc_workqueue("nitrox_pf2vf", WQ_PERCPU, 0);
if (!ndev->iov.pf2vf_wq) {
kfree(ndev->iov.vfdev);
ndev->iov.vfdev = NULL;
diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig
index f394e45e11ab..f16a0f611317 100644
--- a/drivers/crypto/ccp/Kconfig
+++ b/drivers/crypto/ccp/Kconfig
@@ -39,6 +39,7 @@ config CRYPTO_DEV_SP_PSP
bool "Platform Security Processor (PSP) device"
default y
depends on CRYPTO_DEV_CCP_DD && X86_64 && AMD_IOMMU
+ select PCI_TSM if PCI
help
Provide support for the AMD Platform Security Processor (PSP).
The PSP is a dedicated processor that provides support for key
diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
index 394484929dae..0424e08561ef 100644
--- a/drivers/crypto/ccp/Makefile
+++ b/drivers/crypto/ccp/Makefile
@@ -13,7 +13,12 @@ ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o \
tee-dev.o \
platform-access.o \
dbc.o \
- hsti.o
+ hsti.o \
+ sfs.o
+
+ifeq ($(CONFIG_PCI_TSM),y)
+ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += sev-dev-tsm.o sev-dev-tio.o
+endif
obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o
ccp-crypto-objs := ccp-crypto-main.o \
diff --git a/drivers/crypto/ccp/ccp-debugfs.c b/drivers/crypto/ccp/ccp-debugfs.c
index a1055554b47a..dc26bc22c91d 100644
--- a/drivers/crypto/ccp/ccp-debugfs.c
+++ b/drivers/crypto/ccp/ccp-debugfs.c
@@ -319,5 +319,8 @@ void ccp5_debugfs_setup(struct ccp_device *ccp)
void ccp5_debugfs_destroy(void)
{
+ mutex_lock(&ccp_debugfs_lock);
debugfs_remove_recursive(ccp_debugfs_dir);
+ ccp_debugfs_dir = NULL;
+ mutex_unlock(&ccp_debugfs_lock);
}
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index c531d13d971f..246801912e1a 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -507,7 +507,7 @@ int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
{
struct ccp_device *ccp = container_of(rng, struct ccp_device, hwrng);
u32 trng_value;
- int len = min_t(int, sizeof(trng_value), max);
+ int len = min(sizeof(trng_value), max);
/* Locking is provided by the caller so we can update device
* hwrng-related fields safely
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index 109b5aef4034..d78865d9d5f0 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -633,10 +633,16 @@ static noinline_for_stack int
ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
{
struct ccp_aes_engine *aes = &cmd->u.aes;
- struct ccp_dm_workarea key, ctx, final_wa, tag;
- struct ccp_data src, dst;
- struct ccp_data aad;
- struct ccp_op op;
+ struct {
+ struct ccp_dm_workarea key;
+ struct ccp_dm_workarea ctx;
+ struct ccp_dm_workarea final;
+ struct ccp_dm_workarea tag;
+ struct ccp_data src;
+ struct ccp_data dst;
+ struct ccp_data aad;
+ struct ccp_op op;
+ } *wa __cleanup(kfree) = kzalloc(sizeof *wa, GFP_KERNEL);
unsigned int dm_offset;
unsigned int authsize;
unsigned int jobid;
@@ -650,6 +656,9 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
struct scatterlist *p_outp, sg_outp[2];
struct scatterlist *p_aad;
+ if (!wa)
+ return -ENOMEM;
+
if (!aes->iv)
return -EINVAL;
@@ -696,26 +705,26 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
jobid = CCP_NEW_JOBID(cmd_q->ccp);
- memset(&op, 0, sizeof(op));
- op.cmd_q = cmd_q;
- op.jobid = jobid;
- op.sb_key = cmd_q->sb_key; /* Pre-allocated */
- op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
- op.init = 1;
- op.u.aes.type = aes->type;
+ memset(&wa->op, 0, sizeof(wa->op));
+ wa->op.cmd_q = cmd_q;
+ wa->op.jobid = jobid;
+ wa->op.sb_key = cmd_q->sb_key; /* Pre-allocated */
+ wa->op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
+ wa->op.init = 1;
+ wa->op.u.aes.type = aes->type;
/* Copy the key to the LSB */
- ret = ccp_init_dm_workarea(&key, cmd_q,
+ ret = ccp_init_dm_workarea(&wa->key, cmd_q,
CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
DMA_TO_DEVICE);
if (ret)
return ret;
dm_offset = CCP_SB_BYTES - aes->key_len;
- ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
+ ret = ccp_set_dm_area(&wa->key, dm_offset, aes->key, 0, aes->key_len);
if (ret)
goto e_key;
- ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
+ ret = ccp_copy_to_sb(cmd_q, &wa->key, wa->op.jobid, wa->op.sb_key,
CCP_PASSTHRU_BYTESWAP_256BIT);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
@@ -726,58 +735,58 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
* There is an assumption here that the IV is 96 bits in length, plus
* a nonce of 32 bits. If no IV is present, use a zeroed buffer.
*/
- ret = ccp_init_dm_workarea(&ctx, cmd_q,
+ ret = ccp_init_dm_workarea(&wa->ctx, cmd_q,
CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
DMA_BIDIRECTIONAL);
if (ret)
goto e_key;
dm_offset = CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES - aes->iv_len;
- ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
+ ret = ccp_set_dm_area(&wa->ctx, dm_offset, aes->iv, 0, aes->iv_len);
if (ret)
goto e_ctx;
- ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+ ret = ccp_copy_to_sb(cmd_q, &wa->ctx, wa->op.jobid, wa->op.sb_ctx,
CCP_PASSTHRU_BYTESWAP_256BIT);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_ctx;
}
- op.init = 1;
+ wa->op.init = 1;
if (aes->aad_len > 0) {
/* Step 1: Run a GHASH over the Additional Authenticated Data */
- ret = ccp_init_data(&aad, cmd_q, p_aad, aes->aad_len,
+ ret = ccp_init_data(&wa->aad, cmd_q, p_aad, aes->aad_len,
AES_BLOCK_SIZE,
DMA_TO_DEVICE);
if (ret)
goto e_ctx;
- op.u.aes.mode = CCP_AES_MODE_GHASH;
- op.u.aes.action = CCP_AES_GHASHAAD;
+ wa->op.u.aes.mode = CCP_AES_MODE_GHASH;
+ wa->op.u.aes.action = CCP_AES_GHASHAAD;
- while (aad.sg_wa.bytes_left) {
- ccp_prepare_data(&aad, NULL, &op, AES_BLOCK_SIZE, true);
+ while (wa->aad.sg_wa.bytes_left) {
+ ccp_prepare_data(&wa->aad, NULL, &wa->op, AES_BLOCK_SIZE, true);
- ret = cmd_q->ccp->vdata->perform->aes(&op);
+ ret = cmd_q->ccp->vdata->perform->aes(&wa->op);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_aad;
}
- ccp_process_data(&aad, NULL, &op);
- op.init = 0;
+ ccp_process_data(&wa->aad, NULL, &wa->op);
+ wa->op.init = 0;
}
}
- op.u.aes.mode = CCP_AES_MODE_GCTR;
- op.u.aes.action = aes->action;
+ wa->op.u.aes.mode = CCP_AES_MODE_GCTR;
+ wa->op.u.aes.action = aes->action;
if (ilen > 0) {
/* Step 2: Run a GCTR over the plaintext */
in_place = (sg_virt(p_inp) == sg_virt(p_outp)) ? true : false;
- ret = ccp_init_data(&src, cmd_q, p_inp, ilen,
+ ret = ccp_init_data(&wa->src, cmd_q, p_inp, ilen,
AES_BLOCK_SIZE,
in_place ? DMA_BIDIRECTIONAL
: DMA_TO_DEVICE);
@@ -785,52 +794,52 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
goto e_aad;
if (in_place) {
- dst = src;
+ wa->dst = wa->src;
} else {
- ret = ccp_init_data(&dst, cmd_q, p_outp, ilen,
+ ret = ccp_init_data(&wa->dst, cmd_q, p_outp, ilen,
AES_BLOCK_SIZE, DMA_FROM_DEVICE);
if (ret)
goto e_src;
}
- op.soc = 0;
- op.eom = 0;
- op.init = 1;
- while (src.sg_wa.bytes_left) {
- ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true);
- if (!src.sg_wa.bytes_left) {
+ wa->op.soc = 0;
+ wa->op.eom = 0;
+ wa->op.init = 1;
+ while (wa->src.sg_wa.bytes_left) {
+ ccp_prepare_data(&wa->src, &wa->dst, &wa->op, AES_BLOCK_SIZE, true);
+ if (!wa->src.sg_wa.bytes_left) {
unsigned int nbytes = ilen % AES_BLOCK_SIZE;
if (nbytes) {
- op.eom = 1;
- op.u.aes.size = (nbytes * 8) - 1;
+ wa->op.eom = 1;
+ wa->op.u.aes.size = (nbytes * 8) - 1;
}
}
- ret = cmd_q->ccp->vdata->perform->aes(&op);
+ ret = cmd_q->ccp->vdata->perform->aes(&wa->op);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_dst;
}
- ccp_process_data(&src, &dst, &op);
- op.init = 0;
+ ccp_process_data(&wa->src, &wa->dst, &wa->op);
+ wa->op.init = 0;
}
}
/* Step 3: Update the IV portion of the context with the original IV */
- ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+ ret = ccp_copy_from_sb(cmd_q, &wa->ctx, wa->op.jobid, wa->op.sb_ctx,
CCP_PASSTHRU_BYTESWAP_256BIT);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_dst;
}
- ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
+ ret = ccp_set_dm_area(&wa->ctx, dm_offset, aes->iv, 0, aes->iv_len);
if (ret)
goto e_dst;
- ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+ ret = ccp_copy_to_sb(cmd_q, &wa->ctx, wa->op.jobid, wa->op.sb_ctx,
CCP_PASSTHRU_BYTESWAP_256BIT);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
@@ -840,75 +849,75 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
/* Step 4: Concatenate the lengths of the AAD and source, and
* hash that 16 byte buffer.
*/
- ret = ccp_init_dm_workarea(&final_wa, cmd_q, AES_BLOCK_SIZE,
+ ret = ccp_init_dm_workarea(&wa->final, cmd_q, AES_BLOCK_SIZE,
DMA_BIDIRECTIONAL);
if (ret)
goto e_dst;
- final = (__be64 *)final_wa.address;
+ final = (__be64 *)wa->final.address;
final[0] = cpu_to_be64(aes->aad_len * 8);
final[1] = cpu_to_be64(ilen * 8);
- memset(&op, 0, sizeof(op));
- op.cmd_q = cmd_q;
- op.jobid = jobid;
- op.sb_key = cmd_q->sb_key; /* Pre-allocated */
- op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
- op.init = 1;
- op.u.aes.type = aes->type;
- op.u.aes.mode = CCP_AES_MODE_GHASH;
- op.u.aes.action = CCP_AES_GHASHFINAL;
- op.src.type = CCP_MEMTYPE_SYSTEM;
- op.src.u.dma.address = final_wa.dma.address;
- op.src.u.dma.length = AES_BLOCK_SIZE;
- op.dst.type = CCP_MEMTYPE_SYSTEM;
- op.dst.u.dma.address = final_wa.dma.address;
- op.dst.u.dma.length = AES_BLOCK_SIZE;
- op.eom = 1;
- op.u.aes.size = 0;
- ret = cmd_q->ccp->vdata->perform->aes(&op);
+ memset(&wa->op, 0, sizeof(wa->op));
+ wa->op.cmd_q = cmd_q;
+ wa->op.jobid = jobid;
+ wa->op.sb_key = cmd_q->sb_key; /* Pre-allocated */
+ wa->op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
+ wa->op.init = 1;
+ wa->op.u.aes.type = aes->type;
+ wa->op.u.aes.mode = CCP_AES_MODE_GHASH;
+ wa->op.u.aes.action = CCP_AES_GHASHFINAL;
+ wa->op.src.type = CCP_MEMTYPE_SYSTEM;
+ wa->op.src.u.dma.address = wa->final.dma.address;
+ wa->op.src.u.dma.length = AES_BLOCK_SIZE;
+ wa->op.dst.type = CCP_MEMTYPE_SYSTEM;
+ wa->op.dst.u.dma.address = wa->final.dma.address;
+ wa->op.dst.u.dma.length = AES_BLOCK_SIZE;
+ wa->op.eom = 1;
+ wa->op.u.aes.size = 0;
+ ret = cmd_q->ccp->vdata->perform->aes(&wa->op);
if (ret)
goto e_final_wa;
if (aes->action == CCP_AES_ACTION_ENCRYPT) {
/* Put the ciphered tag after the ciphertext. */
- ccp_get_dm_area(&final_wa, 0, p_tag, 0, authsize);
+ ccp_get_dm_area(&wa->final, 0, p_tag, 0, authsize);
} else {
/* Does this ciphered tag match the input? */
- ret = ccp_init_dm_workarea(&tag, cmd_q, authsize,
+ ret = ccp_init_dm_workarea(&wa->tag, cmd_q, authsize,
DMA_BIDIRECTIONAL);
if (ret)
goto e_final_wa;
- ret = ccp_set_dm_area(&tag, 0, p_tag, 0, authsize);
+ ret = ccp_set_dm_area(&wa->tag, 0, p_tag, 0, authsize);
if (ret) {
- ccp_dm_free(&tag);
+ ccp_dm_free(&wa->tag);
goto e_final_wa;
}
- ret = crypto_memneq(tag.address, final_wa.address,
+ ret = crypto_memneq(wa->tag.address, wa->final.address,
authsize) ? -EBADMSG : 0;
- ccp_dm_free(&tag);
+ ccp_dm_free(&wa->tag);
}
e_final_wa:
- ccp_dm_free(&final_wa);
+ ccp_dm_free(&wa->final);
e_dst:
if (ilen > 0 && !in_place)
- ccp_free_data(&dst, cmd_q);
+ ccp_free_data(&wa->dst, cmd_q);
e_src:
if (ilen > 0)
- ccp_free_data(&src, cmd_q);
+ ccp_free_data(&wa->src, cmd_q);
e_aad:
if (aes->aad_len)
- ccp_free_data(&aad, cmd_q);
+ ccp_free_data(&wa->aad, cmd_q);
e_ctx:
- ccp_dm_free(&ctx);
+ ccp_dm_free(&wa->ctx);
e_key:
- ccp_dm_free(&key);
+ ccp_dm_free(&wa->key);
return ret;
}
diff --git a/drivers/crypto/ccp/hsti.c b/drivers/crypto/ccp/hsti.c
index 1b39a4fb55c0..c29c6a9c0f3f 100644
--- a/drivers/crypto/ccp/hsti.c
+++ b/drivers/crypto/ccp/hsti.c
@@ -74,7 +74,7 @@ struct attribute_group psp_security_attr_group = {
.is_visible = psp_security_is_visible,
};
-static int psp_poulate_hsti(struct psp_device *psp)
+static int psp_populate_hsti(struct psp_device *psp)
{
struct hsti_request *req;
int ret;
@@ -84,11 +84,11 @@ static int psp_poulate_hsti(struct psp_device *psp)
return 0;
/* Allocate command-response buffer */
- req = kzalloc(sizeof(*req), GFP_KERNEL | __GFP_ZERO);
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return -ENOMEM;
- req->header.payload_size = sizeof(req);
+ req->header.payload_size = sizeof(*req);
ret = psp_send_platform_access_msg(PSP_CMD_HSTI_QUERY, (struct psp_request *)req);
if (ret)
@@ -114,7 +114,7 @@ int psp_init_hsti(struct psp_device *psp)
int ret;
if (PSP_FEATURE(psp, HSTI)) {
- ret = psp_poulate_hsti(psp);
+ ret = psp_populate_hsti(psp);
if (ret)
return ret;
}
diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
index 1c5a7189631e..9e21da0e298a 100644
--- a/drivers/crypto/ccp/psp-dev.c
+++ b/drivers/crypto/ccp/psp-dev.c
@@ -17,6 +17,7 @@
#include "psp-dev.h"
#include "sev-dev.h"
#include "tee-dev.h"
+#include "sfs.h"
#include "platform-access.h"
#include "dbc.h"
#include "hsti.h"
@@ -182,6 +183,17 @@ static int psp_check_tee_support(struct psp_device *psp)
return 0;
}
+static int psp_check_sfs_support(struct psp_device *psp)
+{
+ /* Check if device supports SFS feature */
+ if (!psp->capability.sfs) {
+ dev_dbg(psp->dev, "psp does not support SFS\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
static int psp_init(struct psp_device *psp)
{
int ret;
@@ -198,6 +210,12 @@ static int psp_init(struct psp_device *psp)
return ret;
}
+ if (!psp_check_sfs_support(psp)) {
+ ret = sfs_dev_init(psp);
+ if (ret)
+ return ret;
+ }
+
if (psp->vdata->platform_access) {
ret = platform_access_dev_init(psp);
if (ret)
@@ -302,6 +320,8 @@ void psp_dev_destroy(struct sp_device *sp)
tee_dev_destroy(psp);
+ sfs_dev_destroy(psp);
+
dbc_dev_destroy(psp);
platform_access_dev_destroy(psp);
diff --git a/drivers/crypto/ccp/psp-dev.h b/drivers/crypto/ccp/psp-dev.h
index e43ce87ede76..268c83f298cb 100644
--- a/drivers/crypto/ccp/psp-dev.h
+++ b/drivers/crypto/ccp/psp-dev.h
@@ -32,7 +32,8 @@ union psp_cap_register {
unsigned int sev :1,
tee :1,
dbc_thru_ext :1,
- rsvd1 :4,
+ sfs :1,
+ rsvd1 :3,
security_reporting :1,
fused_part :1,
rsvd2 :1,
@@ -68,6 +69,7 @@ struct psp_device {
void *tee_data;
void *platform_access_data;
void *dbc_data;
+ void *sfs_data;
union psp_cap_register capability;
};
@@ -118,12 +120,16 @@ struct psp_ext_request {
* @PSP_SUB_CMD_DBC_SET_UID: Set UID for DBC
* @PSP_SUB_CMD_DBC_GET_PARAMETER: Get parameter from DBC
* @PSP_SUB_CMD_DBC_SET_PARAMETER: Set parameter for DBC
+ * @PSP_SUB_CMD_SFS_GET_FW_VERS: Get firmware versions for ASP and other MP
+ * @PSP_SUB_CMD_SFS_UPDATE: Command to load, verify and execute SFS package
*/
enum psp_sub_cmd {
PSP_SUB_CMD_DBC_GET_NONCE = PSP_DYNAMIC_BOOST_GET_NONCE,
PSP_SUB_CMD_DBC_SET_UID = PSP_DYNAMIC_BOOST_SET_UID,
PSP_SUB_CMD_DBC_GET_PARAMETER = PSP_DYNAMIC_BOOST_GET_PARAMETER,
PSP_SUB_CMD_DBC_SET_PARAMETER = PSP_DYNAMIC_BOOST_SET_PARAMETER,
+ PSP_SUB_CMD_SFS_GET_FW_VERS = PSP_SFS_GET_FW_VERSIONS,
+ PSP_SUB_CMD_SFS_UPDATE = PSP_SFS_UPDATE,
};
int psp_extended_mailbox_cmd(struct psp_device *psp, unsigned int timeout_msecs,
diff --git a/drivers/crypto/ccp/sev-dev-tio.c b/drivers/crypto/ccp/sev-dev-tio.c
new file mode 100644
index 000000000000..9a98f98c20a7
--- /dev/null
+++ b/drivers/crypto/ccp/sev-dev-tio.c
@@ -0,0 +1,864 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+// Interface to PSP for CCP/SEV-TIO/SNP-VM
+
+#include <linux/pci.h>
+#include <linux/tsm.h>
+#include <linux/psp.h>
+#include <linux/vmalloc.h>
+#include <linux/bitfield.h>
+#include <linux/pci-doe.h>
+#include <asm/sev-common.h>
+#include <asm/sev.h>
+#include <asm/page.h>
+#include "sev-dev.h"
+#include "sev-dev-tio.h"
+
+#define to_tio_status(dev_data) \
+ (container_of((dev_data), struct tio_dsm, data)->sev->tio_status)
+
+#define SLA_PAGE_TYPE_DATA 0
+#define SLA_PAGE_TYPE_SCATTER 1
+#define SLA_PAGE_SIZE_4K 0
+#define SLA_PAGE_SIZE_2M 1
+#define SLA_SZ(s) ((s).page_size == SLA_PAGE_SIZE_2M ? SZ_2M : SZ_4K)
+#define SLA_SCATTER_LEN(s) (SLA_SZ(s) / sizeof(struct sla_addr_t))
+#define SLA_EOL ((struct sla_addr_t) { .pfn = ((1UL << 40) - 1) })
+#define SLA_NULL ((struct sla_addr_t) { 0 })
+#define IS_SLA_NULL(s) ((s).sla == SLA_NULL.sla)
+#define IS_SLA_EOL(s) ((s).sla == SLA_EOL.sla)
+
+static phys_addr_t sla_to_pa(struct sla_addr_t sla)
+{
+ u64 pfn = sla.pfn;
+ u64 pa = pfn << PAGE_SHIFT;
+
+ return pa;
+}
+
+static void *sla_to_va(struct sla_addr_t sla)
+{
+ void *va = __va(__sme_clr(sla_to_pa(sla)));
+
+ return va;
+}
+
+#define sla_to_pfn(sla) (__pa(sla_to_va(sla)) >> PAGE_SHIFT)
+#define sla_to_page(sla) virt_to_page(sla_to_va(sla))
+
+static struct sla_addr_t make_sla(struct page *pg, bool stp)
+{
+ u64 pa = __sme_set(page_to_phys(pg));
+ struct sla_addr_t ret = {
+ .pfn = pa >> PAGE_SHIFT,
+ .page_size = SLA_PAGE_SIZE_4K, /* Do not do SLA_PAGE_SIZE_2M ATM */
+ .page_type = stp ? SLA_PAGE_TYPE_SCATTER : SLA_PAGE_TYPE_DATA
+ };
+
+ return ret;
+}
+
+/* the BUFFER Structure */
+#define SLA_BUFFER_FLAG_ENCRYPTION BIT(0)
+
+/*
+ * struct sla_buffer_hdr - Scatter list address buffer header
+ *
+ * @capacity_sz: Total capacity of the buffer in bytes
+ * @payload_sz: Size of buffer payload in bytes, must be multiple of 32B
+ * @flags: Buffer flags (SLA_BUFFER_FLAG_ENCRYPTION: buffer is encrypted)
+ * @iv: Initialization vector used for encryption
+ * @authtag: Authentication tag for encrypted buffer
+ */
+struct sla_buffer_hdr {
+ u32 capacity_sz;
+ u32 payload_sz; /* The size of BUFFER_PAYLOAD in bytes. Must be multiple of 32B */
+ u32 flags;
+ u8 reserved1[4];
+ u8 iv[16]; /* IV used for the encryption of this buffer */
+ u8 authtag[16]; /* Authentication tag for this buffer */
+ u8 reserved2[16];
+} __packed;
+
+enum spdm_data_type_t {
+ DOBJ_DATA_TYPE_SPDM = 0x1,
+ DOBJ_DATA_TYPE_SECURE_SPDM = 0x2,
+};
+
+struct spdm_dobj_hdr_req {
+ struct spdm_dobj_hdr hdr; /* hdr.id == SPDM_DOBJ_ID_REQ */
+ u8 data_type; /* spdm_data_type_t */
+ u8 reserved2[5];
+} __packed;
+
+struct spdm_dobj_hdr_resp {
+ struct spdm_dobj_hdr hdr; /* hdr.id == SPDM_DOBJ_ID_RESP */
+ u8 data_type; /* spdm_data_type_t */
+ u8 reserved2[5];
+} __packed;
+
+/* Defined in sev-dev-tio.h so sev-dev-tsm.c can read types of blobs */
+struct spdm_dobj_hdr_cert;
+struct spdm_dobj_hdr_meas;
+struct spdm_dobj_hdr_report;
+
+/* Used in all SPDM-aware TIO commands */
+struct spdm_ctrl {
+ struct sla_addr_t req;
+ struct sla_addr_t resp;
+ struct sla_addr_t scratch;
+ struct sla_addr_t output;
+} __packed;
+
+static size_t sla_dobj_id_to_size(u8 id)
+{
+ size_t n;
+
+ BUILD_BUG_ON(sizeof(struct spdm_dobj_hdr_resp) != 0x10);
+ switch (id) {
+ case SPDM_DOBJ_ID_REQ:
+ n = sizeof(struct spdm_dobj_hdr_req);
+ break;
+ case SPDM_DOBJ_ID_RESP:
+ n = sizeof(struct spdm_dobj_hdr_resp);
+ break;
+ default:
+ WARN_ON(1);
+ n = 0;
+ break;
+ }
+
+ return n;
+}
+
+#define SPDM_DOBJ_HDR_SIZE(hdr) sla_dobj_id_to_size((hdr)->id)
+#define SPDM_DOBJ_DATA(hdr) ((u8 *)(hdr) + SPDM_DOBJ_HDR_SIZE(hdr))
+#define SPDM_DOBJ_LEN(hdr) ((hdr)->length - SPDM_DOBJ_HDR_SIZE(hdr))
+
+#define sla_to_dobj_resp_hdr(buf) ((struct spdm_dobj_hdr_resp *) \
+ sla_to_dobj_hdr_check((buf), SPDM_DOBJ_ID_RESP))
+#define sla_to_dobj_req_hdr(buf) ((struct spdm_dobj_hdr_req *) \
+ sla_to_dobj_hdr_check((buf), SPDM_DOBJ_ID_REQ))
+
+static struct spdm_dobj_hdr *sla_to_dobj_hdr(struct sla_buffer_hdr *buf)
+{
+ if (!buf)
+ return NULL;
+
+ return (struct spdm_dobj_hdr *) &buf[1];
+}
+
+static struct spdm_dobj_hdr *sla_to_dobj_hdr_check(struct sla_buffer_hdr *buf, u32 check_dobjid)
+{
+ struct spdm_dobj_hdr *hdr = sla_to_dobj_hdr(buf);
+
+ if (WARN_ON_ONCE(!hdr))
+ return NULL;
+
+ if (hdr->id != check_dobjid) {
+ pr_err("! ERROR: expected %d, found %d\n", check_dobjid, hdr->id);
+ return NULL;
+ }
+
+ return hdr;
+}
+
+static void *sla_to_data(struct sla_buffer_hdr *buf, u32 dobjid)
+{
+ struct spdm_dobj_hdr *hdr = sla_to_dobj_hdr(buf);
+
+ if (WARN_ON_ONCE(dobjid != SPDM_DOBJ_ID_REQ && dobjid != SPDM_DOBJ_ID_RESP))
+ return NULL;
+
+ if (!hdr)
+ return NULL;
+
+ return (u8 *) hdr + sla_dobj_id_to_size(dobjid);
+}
+
+/*
+ * struct sev_data_tio_status - SEV_CMD_TIO_STATUS command
+ *
+ * @length: Length of this command buffer in bytes
+ * @status_paddr: System physical address of the TIO_STATUS structure
+ */
+struct sev_data_tio_status {
+ u32 length;
+ u8 reserved[4];
+ u64 status_paddr;
+} __packed;
+
+/* TIO_INIT */
+struct sev_data_tio_init {
+ u32 length;
+ u8 reserved[12];
+} __packed;
+
+/*
+ * struct sev_data_tio_dev_create - TIO_DEV_CREATE command
+ *
+ * @length: Length in bytes of this command buffer
+ * @dev_ctx_sla: Scatter list address pointing to a buffer to be used as a device context buffer
+ * @device_id: PCIe Routing Identifier of the device to connect to
+ * @root_port_id: PCIe Routing Identifier of the root port of the device
+ * @segment_id: PCIe Segment Identifier of the device to connect to
+ */
+struct sev_data_tio_dev_create {
+ u32 length;
+ u8 reserved1[4];
+ struct sla_addr_t dev_ctx_sla;
+ u16 device_id;
+ u16 root_port_id;
+ u8 segment_id;
+ u8 reserved2[11];
+} __packed;
+
+/*
+ * struct sev_data_tio_dev_connect - TIO_DEV_CONNECT command
+ *
+ * @length: Length in bytes of this command buffer
+ * @spdm_ctrl: SPDM control structure defined in Section 5.1
+ * @dev_ctx_sla: Scatter list address of the device context buffer
+ * @tc_mask: Bitmask of the traffic classes to initialize for SEV-TIO usage.
+ * Setting the kth bit of the TC_MASK to 1 indicates that the traffic
+ * class k will be initialized
+ * @cert_slot: Slot number of the certificate requested for constructing the SPDM session
+ * @ide_stream_id: IDE stream IDs to be associated with this device.
+ * Valid only if corresponding bit in TC_MASK is set
+ */
+struct sev_data_tio_dev_connect {
+ u32 length;
+ u8 reserved1[4];
+ struct spdm_ctrl spdm_ctrl;
+ u8 reserved2[8];
+ struct sla_addr_t dev_ctx_sla;
+ u8 tc_mask;
+ u8 cert_slot;
+ u8 reserved3[6];
+ u8 ide_stream_id[8];
+ u8 reserved4[8];
+} __packed;
+
+/*
+ * struct sev_data_tio_dev_disconnect - TIO_DEV_DISCONNECT command
+ *
+ * @length: Length in bytes of this command buffer
+ * @flags: Command flags (TIO_DEV_DISCONNECT_FLAG_FORCE: force disconnect)
+ * @spdm_ctrl: SPDM control structure defined in Section 5.1
+ * @dev_ctx_sla: Scatter list address of the device context buffer
+ */
+#define TIO_DEV_DISCONNECT_FLAG_FORCE BIT(0)
+
+struct sev_data_tio_dev_disconnect {
+ u32 length;
+ u32 flags;
+ struct spdm_ctrl spdm_ctrl;
+ struct sla_addr_t dev_ctx_sla;
+} __packed;
+
+/*
+ * struct sev_data_tio_dev_meas - TIO_DEV_MEASUREMENTS command
+ *
+ * @length: Length in bytes of this command buffer
+ * @flags: Command flags (TIO_DEV_MEAS_FLAG_RAW_BITSTREAM: request raw measurements)
+ * @spdm_ctrl: SPDM control structure defined in Section 5.1
+ * @dev_ctx_sla: Scatter list address of the device context buffer
+ * @meas_nonce: Nonce for measurement freshness verification
+ */
+#define TIO_DEV_MEAS_FLAG_RAW_BITSTREAM BIT(0)
+
+struct sev_data_tio_dev_meas {
+ u32 length;
+ u32 flags;
+ struct spdm_ctrl spdm_ctrl;
+ struct sla_addr_t dev_ctx_sla;
+ u8 meas_nonce[32];
+} __packed;
+
+/*
+ * struct sev_data_tio_dev_certs - TIO_DEV_CERTIFICATES command
+ *
+ * @length: Length in bytes of this command buffer
+ * @spdm_ctrl: SPDM control structure defined in Section 5.1
+ * @dev_ctx_sla: Scatter list address of the device context buffer
+ */
+struct sev_data_tio_dev_certs {
+ u32 length;
+ u8 reserved[4];
+ struct spdm_ctrl spdm_ctrl;
+ struct sla_addr_t dev_ctx_sla;
+} __packed;
+
+/*
+ * struct sev_data_tio_dev_reclaim - TIO_DEV_RECLAIM command
+ *
+ * @length: Length in bytes of this command buffer
+ * @dev_ctx_sla: Scatter list address of the device context buffer
+ *
+ * This command reclaims resources associated with a device context.
+ */
+struct sev_data_tio_dev_reclaim {
+ u32 length;
+ u8 reserved[4];
+ struct sla_addr_t dev_ctx_sla;
+} __packed;
+
+static struct sla_buffer_hdr *sla_buffer_map(struct sla_addr_t sla)
+{
+ struct sla_buffer_hdr *buf;
+
+ BUILD_BUG_ON(sizeof(struct sla_buffer_hdr) != 0x40);
+ if (IS_SLA_NULL(sla))
+ return NULL;
+
+ if (sla.page_type == SLA_PAGE_TYPE_SCATTER) {
+ struct sla_addr_t *scatter = sla_to_va(sla);
+ unsigned int i, npages = 0;
+
+ for (i = 0; i < SLA_SCATTER_LEN(sla); ++i) {
+ if (WARN_ON_ONCE(SLA_SZ(scatter[i]) > SZ_4K))
+ return NULL;
+
+ if (WARN_ON_ONCE(scatter[i].page_type == SLA_PAGE_TYPE_SCATTER))
+ return NULL;
+
+ if (IS_SLA_EOL(scatter[i])) {
+ npages = i;
+ break;
+ }
+ }
+ if (WARN_ON_ONCE(!npages))
+ return NULL;
+
+ struct page **pp = kmalloc_array(npages, sizeof(pp[0]), GFP_KERNEL);
+
+ if (!pp)
+ return NULL;
+
+ for (i = 0; i < npages; ++i)
+ pp[i] = sla_to_page(scatter[i]);
+
+ buf = vm_map_ram(pp, npages, 0);
+ kfree(pp);
+ } else {
+ struct page *pg = sla_to_page(sla);
+
+ buf = vm_map_ram(&pg, 1, 0);
+ }
+
+ return buf;
+}
+
+static void sla_buffer_unmap(struct sla_addr_t sla, struct sla_buffer_hdr *buf)
+{
+ if (!buf)
+ return;
+
+ if (sla.page_type == SLA_PAGE_TYPE_SCATTER) {
+ struct sla_addr_t *scatter = sla_to_va(sla);
+ unsigned int i, npages = 0;
+
+ for (i = 0; i < SLA_SCATTER_LEN(sla); ++i) {
+ if (IS_SLA_EOL(scatter[i])) {
+ npages = i;
+ break;
+ }
+ }
+ if (!npages)
+ return;
+
+ vm_unmap_ram(buf, npages);
+ } else {
+ vm_unmap_ram(buf, 1);
+ }
+}
+
+static void dobj_response_init(struct sla_buffer_hdr *buf)
+{
+ struct spdm_dobj_hdr *dobj = sla_to_dobj_hdr(buf);
+
+ dobj->id = SPDM_DOBJ_ID_RESP;
+ dobj->version.major = 0x1;
+ dobj->version.minor = 0;
+ dobj->length = 0;
+ buf->payload_sz = sla_dobj_id_to_size(dobj->id) + dobj->length;
+}
+
+static void sla_free(struct sla_addr_t sla, size_t len, bool firmware_state)
+{
+ unsigned int npages = PAGE_ALIGN(len) >> PAGE_SHIFT;
+ struct sla_addr_t *scatter = NULL;
+ int ret = 0, i;
+
+ if (IS_SLA_NULL(sla))
+ return;
+
+ if (firmware_state) {
+ if (sla.page_type == SLA_PAGE_TYPE_SCATTER) {
+ scatter = sla_to_va(sla);
+
+ for (i = 0; i < npages; ++i) {
+ if (IS_SLA_EOL(scatter[i]))
+ break;
+
+ ret = snp_reclaim_pages(sla_to_pa(scatter[i]), 1, false);
+ if (ret)
+ break;
+ }
+ } else {
+ ret = snp_reclaim_pages(sla_to_pa(sla), 1, false);
+ }
+ }
+
+ if (WARN_ON(ret))
+ return;
+
+ if (scatter) {
+ for (i = 0; i < npages; ++i) {
+ if (IS_SLA_EOL(scatter[i]))
+ break;
+ free_page((unsigned long)sla_to_va(scatter[i]));
+ }
+ }
+
+ free_page((unsigned long)sla_to_va(sla));
+}
+
+static struct sla_addr_t sla_alloc(size_t len, bool firmware_state)
+{
+ unsigned long i, npages = PAGE_ALIGN(len) >> PAGE_SHIFT;
+ struct sla_addr_t *scatter = NULL;
+ struct sla_addr_t ret = SLA_NULL;
+ struct sla_buffer_hdr *buf;
+ struct page *pg;
+
+ if (npages == 0)
+ return ret;
+
+ if (WARN_ON_ONCE(npages > ((PAGE_SIZE / sizeof(struct sla_addr_t)) + 1)))
+ return ret;
+
+ BUILD_BUG_ON(PAGE_SIZE < SZ_4K);
+
+ if (npages > 1) {
+ pg = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!pg)
+ return SLA_NULL;
+
+ ret = make_sla(pg, true);
+ scatter = page_to_virt(pg);
+ for (i = 0; i < npages; ++i) {
+ pg = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!pg)
+ goto no_reclaim_exit;
+
+ scatter[i] = make_sla(pg, false);
+ }
+ scatter[i] = SLA_EOL;
+ } else {
+ pg = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!pg)
+ return SLA_NULL;
+
+ ret = make_sla(pg, false);
+ }
+
+ buf = sla_buffer_map(ret);
+ if (!buf)
+ goto no_reclaim_exit;
+
+ buf->capacity_sz = (npages << PAGE_SHIFT);
+ sla_buffer_unmap(ret, buf);
+
+ if (firmware_state) {
+ if (scatter) {
+ for (i = 0; i < npages; ++i) {
+ if (rmp_make_private(sla_to_pfn(scatter[i]), 0,
+ PG_LEVEL_4K, 0, true))
+ goto free_exit;
+ }
+ } else {
+ if (rmp_make_private(sla_to_pfn(ret), 0, PG_LEVEL_4K, 0, true))
+ goto no_reclaim_exit;
+ }
+ }
+
+ return ret;
+
+no_reclaim_exit:
+ firmware_state = false;
+free_exit:
+ sla_free(ret, len, firmware_state);
+ return SLA_NULL;
+}
+
+/* Expands a buffer, only firmware owned buffers allowed for now */
+static int sla_expand(struct sla_addr_t *sla, size_t *len)
+{
+ struct sla_buffer_hdr *oldbuf = sla_buffer_map(*sla), *newbuf;
+ struct sla_addr_t oldsla = *sla, newsla;
+ size_t oldlen = *len, newlen;
+
+ if (!oldbuf)
+ return -EFAULT;
+
+ newlen = oldbuf->capacity_sz;
+ if (oldbuf->capacity_sz == oldlen) {
+ /* This buffer does not require expansion, must be another buffer */
+ sla_buffer_unmap(oldsla, oldbuf);
+ return 1;
+ }
+
+ pr_notice("Expanding BUFFER from %ld to %ld bytes\n", oldlen, newlen);
+
+ newsla = sla_alloc(newlen, true);
+ if (IS_SLA_NULL(newsla))
+ return -ENOMEM;
+
+ newbuf = sla_buffer_map(newsla);
+ if (!newbuf) {
+ sla_free(newsla, newlen, true);
+ return -EFAULT;
+ }
+
+ memcpy(newbuf, oldbuf, oldlen);
+
+ sla_buffer_unmap(newsla, newbuf);
+ sla_free(oldsla, oldlen, true);
+ *sla = newsla;
+ *len = newlen;
+
+ return 0;
+}
+
+static int sev_tio_do_cmd(int cmd, void *data, size_t data_len, int *psp_ret,
+ struct tsm_dsm_tio *dev_data)
+{
+ int rc;
+
+ *psp_ret = 0;
+ rc = sev_do_cmd(cmd, data, psp_ret);
+
+ if (WARN_ON(!rc && *psp_ret == SEV_RET_SPDM_REQUEST))
+ return -EIO;
+
+ if (rc == 0 && *psp_ret == SEV_RET_EXPAND_BUFFER_LENGTH_REQUEST) {
+ int rc1, rc2;
+
+ rc1 = sla_expand(&dev_data->output, &dev_data->output_len);
+ if (rc1 < 0)
+ return rc1;
+
+ rc2 = sla_expand(&dev_data->scratch, &dev_data->scratch_len);
+ if (rc2 < 0)
+ return rc2;
+
+ if (!rc1 && !rc2)
+ /* Neither buffer requires expansion, this is wrong */
+ return -EFAULT;
+
+ *psp_ret = 0;
+ rc = sev_do_cmd(cmd, data, psp_ret);
+ }
+
+ if ((rc == 0 || rc == -EIO) && *psp_ret == SEV_RET_SPDM_REQUEST) {
+ struct spdm_dobj_hdr_resp *resp_hdr;
+ struct spdm_dobj_hdr_req *req_hdr;
+ struct sev_tio_status *tio_status = to_tio_status(dev_data);
+ size_t resp_len = tio_status->spdm_req_size_max -
+ (sla_dobj_id_to_size(SPDM_DOBJ_ID_RESP) + sizeof(struct sla_buffer_hdr));
+
+ if (!dev_data->cmd) {
+ if (WARN_ON_ONCE(!data_len || (data_len != *(u32 *) data)))
+ return -EINVAL;
+ if (WARN_ON(data_len > sizeof(dev_data->cmd_data)))
+ return -EFAULT;
+ memcpy(dev_data->cmd_data, data, data_len);
+ memset(&dev_data->cmd_data[data_len], 0xFF,
+ sizeof(dev_data->cmd_data) - data_len);
+ dev_data->cmd = cmd;
+ }
+
+ req_hdr = sla_to_dobj_req_hdr(dev_data->reqbuf);
+ resp_hdr = sla_to_dobj_resp_hdr(dev_data->respbuf);
+ switch (req_hdr->data_type) {
+ case DOBJ_DATA_TYPE_SPDM:
+ rc = PCI_DOE_FEATURE_CMA;
+ break;
+ case DOBJ_DATA_TYPE_SECURE_SPDM:
+ rc = PCI_DOE_FEATURE_SSESSION;
+ break;
+ default:
+ return -EINVAL;
+ }
+ resp_hdr->data_type = req_hdr->data_type;
+ dev_data->spdm.req_len = req_hdr->hdr.length -
+ sla_dobj_id_to_size(SPDM_DOBJ_ID_REQ);
+ dev_data->spdm.rsp_len = resp_len;
+ } else if (dev_data && dev_data->cmd) {
+ /* For either error or success just stop the bouncing */
+ memset(dev_data->cmd_data, 0, sizeof(dev_data->cmd_data));
+ dev_data->cmd = 0;
+ }
+
+ return rc;
+}
+
+int sev_tio_continue(struct tsm_dsm_tio *dev_data)
+{
+ struct spdm_dobj_hdr_resp *resp_hdr;
+ int ret;
+
+ if (!dev_data || !dev_data->cmd)
+ return -EINVAL;
+
+ resp_hdr = sla_to_dobj_resp_hdr(dev_data->respbuf);
+ resp_hdr->hdr.length = ALIGN(sla_dobj_id_to_size(SPDM_DOBJ_ID_RESP) +
+ dev_data->spdm.rsp_len, 32);
+ dev_data->respbuf->payload_sz = resp_hdr->hdr.length;
+
+ ret = sev_tio_do_cmd(dev_data->cmd, dev_data->cmd_data, 0,
+ &dev_data->psp_ret, dev_data);
+ if (ret)
+ return ret;
+
+ if (dev_data->psp_ret != SEV_RET_SUCCESS)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void spdm_ctrl_init(struct spdm_ctrl *ctrl, struct tsm_dsm_tio *dev_data)
+{
+ ctrl->req = dev_data->req;
+ ctrl->resp = dev_data->resp;
+ ctrl->scratch = dev_data->scratch;
+ ctrl->output = dev_data->output;
+}
+
+static void spdm_ctrl_free(struct tsm_dsm_tio *dev_data)
+{
+ struct sev_tio_status *tio_status = to_tio_status(dev_data);
+ size_t len = tio_status->spdm_req_size_max -
+ (sla_dobj_id_to_size(SPDM_DOBJ_ID_RESP) +
+ sizeof(struct sla_buffer_hdr));
+ struct tsm_spdm *spdm = &dev_data->spdm;
+
+ sla_buffer_unmap(dev_data->resp, dev_data->respbuf);
+ sla_buffer_unmap(dev_data->req, dev_data->reqbuf);
+ spdm->rsp = NULL;
+ spdm->req = NULL;
+ sla_free(dev_data->req, len, true);
+ sla_free(dev_data->resp, len, false);
+ sla_free(dev_data->scratch, tio_status->spdm_scratch_size_max, true);
+
+ dev_data->req.sla = 0;
+ dev_data->resp.sla = 0;
+ dev_data->scratch.sla = 0;
+ dev_data->respbuf = NULL;
+ dev_data->reqbuf = NULL;
+ sla_free(dev_data->output, tio_status->spdm_out_size_max, true);
+}
+
+static int spdm_ctrl_alloc(struct tsm_dsm_tio *dev_data)
+{
+ struct sev_tio_status *tio_status = to_tio_status(dev_data);
+ struct tsm_spdm *spdm = &dev_data->spdm;
+ int ret;
+
+ dev_data->req = sla_alloc(tio_status->spdm_req_size_max, true);
+ dev_data->resp = sla_alloc(tio_status->spdm_req_size_max, false);
+ dev_data->scratch_len = tio_status->spdm_scratch_size_max;
+ dev_data->scratch = sla_alloc(dev_data->scratch_len, true);
+ dev_data->output_len = tio_status->spdm_out_size_max;
+ dev_data->output = sla_alloc(dev_data->output_len, true);
+
+ if (IS_SLA_NULL(dev_data->req) || IS_SLA_NULL(dev_data->resp) ||
+ IS_SLA_NULL(dev_data->scratch) || IS_SLA_NULL(dev_data->dev_ctx)) {
+ ret = -ENOMEM;
+ goto free_spdm_exit;
+ }
+
+ dev_data->reqbuf = sla_buffer_map(dev_data->req);
+ dev_data->respbuf = sla_buffer_map(dev_data->resp);
+ if (!dev_data->reqbuf || !dev_data->respbuf) {
+ ret = -EFAULT;
+ goto free_spdm_exit;
+ }
+
+ spdm->req = sla_to_data(dev_data->reqbuf, SPDM_DOBJ_ID_REQ);
+ spdm->rsp = sla_to_data(dev_data->respbuf, SPDM_DOBJ_ID_RESP);
+ if (!spdm->req || !spdm->rsp) {
+ ret = -EFAULT;
+ goto free_spdm_exit;
+ }
+
+ dobj_response_init(dev_data->respbuf);
+
+ return 0;
+
+free_spdm_exit:
+ spdm_ctrl_free(dev_data);
+ return ret;
+}
+
+int sev_tio_init_locked(void *tio_status_page)
+{
+ struct sev_tio_status *tio_status = tio_status_page;
+ struct sev_data_tio_status data_status = {
+ .length = sizeof(data_status),
+ };
+ int ret, psp_ret;
+
+ data_status.status_paddr = __psp_pa(tio_status_page);
+ ret = __sev_do_cmd_locked(SEV_CMD_TIO_STATUS, &data_status, &psp_ret);
+ if (ret)
+ return ret;
+
+ if (tio_status->length < offsetofend(struct sev_tio_status, tdictx_size) ||
+ tio_status->reserved)
+ return -EFAULT;
+
+ if (!tio_status->tio_en && !tio_status->tio_init_done)
+ return -ENOENT;
+
+ if (tio_status->tio_init_done)
+ return -EBUSY;
+
+ struct sev_data_tio_init ti = { .length = sizeof(ti) };
+
+ ret = __sev_do_cmd_locked(SEV_CMD_TIO_INIT, &ti, &psp_ret);
+ if (ret)
+ return ret;
+
+ ret = __sev_do_cmd_locked(SEV_CMD_TIO_STATUS, &data_status, &psp_ret);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int sev_tio_dev_create(struct tsm_dsm_tio *dev_data, u16 device_id,
+ u16 root_port_id, u8 segment_id)
+{
+ struct sev_tio_status *tio_status = to_tio_status(dev_data);
+ struct sev_data_tio_dev_create create = {
+ .length = sizeof(create),
+ .device_id = device_id,
+ .root_port_id = root_port_id,
+ .segment_id = segment_id,
+ };
+ void *data_pg;
+ int ret;
+
+ dev_data->dev_ctx = sla_alloc(tio_status->devctx_size, true);
+ if (IS_SLA_NULL(dev_data->dev_ctx))
+ return -ENOMEM;
+
+ data_pg = snp_alloc_firmware_page(GFP_KERNEL_ACCOUNT);
+ if (!data_pg) {
+ ret = -ENOMEM;
+ goto free_ctx_exit;
+ }
+
+ create.dev_ctx_sla = dev_data->dev_ctx;
+ ret = sev_do_cmd(SEV_CMD_TIO_DEV_CREATE, &create, &dev_data->psp_ret);
+ if (ret)
+ goto free_data_pg_exit;
+
+ dev_data->data_pg = data_pg;
+
+ return 0;
+
+free_data_pg_exit:
+ snp_free_firmware_page(data_pg);
+free_ctx_exit:
+ sla_free(create.dev_ctx_sla, tio_status->devctx_size, true);
+ return ret;
+}
+
+int sev_tio_dev_reclaim(struct tsm_dsm_tio *dev_data)
+{
+ struct sev_tio_status *tio_status = to_tio_status(dev_data);
+ struct sev_data_tio_dev_reclaim r = {
+ .length = sizeof(r),
+ .dev_ctx_sla = dev_data->dev_ctx,
+ };
+ int ret;
+
+ if (dev_data->data_pg) {
+ snp_free_firmware_page(dev_data->data_pg);
+ dev_data->data_pg = NULL;
+ }
+
+ if (IS_SLA_NULL(dev_data->dev_ctx))
+ return 0;
+
+ ret = sev_do_cmd(SEV_CMD_TIO_DEV_RECLAIM, &r, &dev_data->psp_ret);
+
+ sla_free(dev_data->dev_ctx, tio_status->devctx_size, true);
+ dev_data->dev_ctx = SLA_NULL;
+
+ spdm_ctrl_free(dev_data);
+
+ return ret;
+}
+
+int sev_tio_dev_connect(struct tsm_dsm_tio *dev_data, u8 tc_mask, u8 ids[8], u8 cert_slot)
+{
+ struct sev_data_tio_dev_connect connect = {
+ .length = sizeof(connect),
+ .tc_mask = tc_mask,
+ .cert_slot = cert_slot,
+ .dev_ctx_sla = dev_data->dev_ctx,
+ .ide_stream_id = {
+ ids[0], ids[1], ids[2], ids[3],
+ ids[4], ids[5], ids[6], ids[7]
+ },
+ };
+ int ret;
+
+ if (WARN_ON(IS_SLA_NULL(dev_data->dev_ctx)))
+ return -EFAULT;
+ if (!(tc_mask & 1))
+ return -EINVAL;
+
+ ret = spdm_ctrl_alloc(dev_data);
+ if (ret)
+ return ret;
+
+ spdm_ctrl_init(&connect.spdm_ctrl, dev_data);
+
+ return sev_tio_do_cmd(SEV_CMD_TIO_DEV_CONNECT, &connect, sizeof(connect),
+ &dev_data->psp_ret, dev_data);
+}
+
+int sev_tio_dev_disconnect(struct tsm_dsm_tio *dev_data, bool force)
+{
+ struct sev_data_tio_dev_disconnect dc = {
+ .length = sizeof(dc),
+ .dev_ctx_sla = dev_data->dev_ctx,
+ .flags = force ? TIO_DEV_DISCONNECT_FLAG_FORCE : 0,
+ };
+
+ if (WARN_ON_ONCE(IS_SLA_NULL(dev_data->dev_ctx)))
+ return -EFAULT;
+
+ spdm_ctrl_init(&dc.spdm_ctrl, dev_data);
+
+ return sev_tio_do_cmd(SEV_CMD_TIO_DEV_DISCONNECT, &dc, sizeof(dc),
+ &dev_data->psp_ret, dev_data);
+}
+
+int sev_tio_cmd_buffer_len(int cmd)
+{
+ switch (cmd) {
+ case SEV_CMD_TIO_STATUS: return sizeof(struct sev_data_tio_status);
+ case SEV_CMD_TIO_INIT: return sizeof(struct sev_data_tio_init);
+ case SEV_CMD_TIO_DEV_CREATE: return sizeof(struct sev_data_tio_dev_create);
+ case SEV_CMD_TIO_DEV_RECLAIM: return sizeof(struct sev_data_tio_dev_reclaim);
+ case SEV_CMD_TIO_DEV_CONNECT: return sizeof(struct sev_data_tio_dev_connect);
+ case SEV_CMD_TIO_DEV_DISCONNECT: return sizeof(struct sev_data_tio_dev_disconnect);
+ default: return 0;
+ }
+}
diff --git a/drivers/crypto/ccp/sev-dev-tio.h b/drivers/crypto/ccp/sev-dev-tio.h
new file mode 100644
index 000000000000..67512b3dbc53
--- /dev/null
+++ b/drivers/crypto/ccp/sev-dev-tio.h
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __PSP_SEV_TIO_H__
+#define __PSP_SEV_TIO_H__
+
+#include <linux/pci-tsm.h>
+#include <linux/pci-ide.h>
+#include <linux/tsm.h>
+#include <uapi/linux/psp-sev.h>
+
+struct sla_addr_t {
+ union {
+ u64 sla;
+ struct {
+ u64 page_type :1,
+ page_size :1,
+ reserved1 :10,
+ pfn :40,
+ reserved2 :12;
+ };
+ };
+} __packed;
+
+#define SEV_TIO_MAX_COMMAND_LENGTH 128
+
+/* SPDM control structure for DOE */
+struct tsm_spdm {
+ unsigned long req_len;
+ void *req;
+ unsigned long rsp_len;
+ void *rsp;
+};
+
+/* Describes TIO device */
+struct tsm_dsm_tio {
+ u8 cert_slot;
+ struct sla_addr_t dev_ctx;
+ struct sla_addr_t req;
+ struct sla_addr_t resp;
+ struct sla_addr_t scratch;
+ struct sla_addr_t output;
+ size_t output_len;
+ size_t scratch_len;
+ struct tsm_spdm spdm;
+ struct sla_buffer_hdr *reqbuf; /* vmap'ed @req for DOE */
+ struct sla_buffer_hdr *respbuf; /* vmap'ed @resp for DOE */
+
+ int cmd;
+ int psp_ret;
+ u8 cmd_data[SEV_TIO_MAX_COMMAND_LENGTH];
+ void *data_pg; /* Data page for DEV_STATUS/TDI_STATUS/TDI_INFO/ASID_FENCE */
+
+#define TIO_IDE_MAX_TC 8
+ struct pci_ide *ide[TIO_IDE_MAX_TC];
+};
+
+/* Describes TSM structure for PF0 pointed by pci_dev->tsm */
+struct tio_dsm {
+ struct pci_tsm_pf0 tsm;
+ struct tsm_dsm_tio data;
+ struct sev_device *sev;
+};
+
+/* Data object IDs */
+#define SPDM_DOBJ_ID_NONE 0
+#define SPDM_DOBJ_ID_REQ 1
+#define SPDM_DOBJ_ID_RESP 2
+
+struct spdm_dobj_hdr {
+ u32 id; /* Data object type identifier */
+ u32 length; /* Length of the data object, INCLUDING THIS HEADER */
+ struct { /* Version of the data object structure */
+ u8 minor;
+ u8 major;
+ } version;
+} __packed;
+
+/**
+ * struct sev_tio_status - TIO_STATUS command's info_paddr buffer
+ *
+ * @length: Length of this structure in bytes
+ * @tio_en: Indicates that SNP_INIT_EX initialized the RMP for SEV-TIO
+ * @tio_init_done: Indicates TIO_INIT has been invoked
+ * @spdm_req_size_min: Minimum SPDM request buffer size in bytes
+ * @spdm_req_size_max: Maximum SPDM request buffer size in bytes
+ * @spdm_scratch_size_min: Minimum SPDM scratch buffer size in bytes
+ * @spdm_scratch_size_max: Maximum SPDM scratch buffer size in bytes
+ * @spdm_out_size_min: Minimum SPDM output buffer size in bytes
+ * @spdm_out_size_max: Maximum for the SPDM output buffer size in bytes
+ * @spdm_rsp_size_min: Minimum SPDM response buffer size in bytes
+ * @spdm_rsp_size_max: Maximum SPDM response buffer size in bytes
+ * @devctx_size: Size of a device context buffer in bytes
+ * @tdictx_size: Size of a TDI context buffer in bytes
+ * @tio_crypto_alg: TIO crypto algorithms supported
+ */
+struct sev_tio_status {
+ u32 length;
+ u32 tio_en :1,
+ tio_init_done :1,
+ reserved :30;
+ u32 spdm_req_size_min;
+ u32 spdm_req_size_max;
+ u32 spdm_scratch_size_min;
+ u32 spdm_scratch_size_max;
+ u32 spdm_out_size_min;
+ u32 spdm_out_size_max;
+ u32 spdm_rsp_size_min;
+ u32 spdm_rsp_size_max;
+ u32 devctx_size;
+ u32 tdictx_size;
+ u32 tio_crypto_alg;
+ u8 reserved2[12];
+} __packed;
+
+int sev_tio_init_locked(void *tio_status_page);
+int sev_tio_continue(struct tsm_dsm_tio *dev_data);
+
+int sev_tio_dev_create(struct tsm_dsm_tio *dev_data, u16 device_id, u16 root_port_id,
+ u8 segment_id);
+int sev_tio_dev_connect(struct tsm_dsm_tio *dev_data, u8 tc_mask, u8 ids[8], u8 cert_slot);
+int sev_tio_dev_disconnect(struct tsm_dsm_tio *dev_data, bool force);
+int sev_tio_dev_reclaim(struct tsm_dsm_tio *dev_data);
+
+#endif /* __PSP_SEV_TIO_H__ */
diff --git a/drivers/crypto/ccp/sev-dev-tsm.c b/drivers/crypto/ccp/sev-dev-tsm.c
new file mode 100644
index 000000000000..ea29cd5d0ff9
--- /dev/null
+++ b/drivers/crypto/ccp/sev-dev-tsm.c
@@ -0,0 +1,405 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+// Interface to CCP/SEV-TIO for generic PCIe TDISP module
+
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/tsm.h>
+#include <linux/iommu.h>
+#include <linux/pci-doe.h>
+#include <linux/bitfield.h>
+#include <linux/module.h>
+
+#include <asm/sev-common.h>
+#include <asm/sev.h>
+
+#include "psp-dev.h"
+#include "sev-dev.h"
+#include "sev-dev-tio.h"
+
+MODULE_IMPORT_NS("PCI_IDE");
+
+#define TIO_DEFAULT_NR_IDE_STREAMS 1
+
+static uint nr_ide_streams = TIO_DEFAULT_NR_IDE_STREAMS;
+module_param_named(ide_nr, nr_ide_streams, uint, 0644);
+MODULE_PARM_DESC(ide_nr, "Set the maximum number of IDE streams per PHB");
+
+#define dev_to_sp(dev) ((struct sp_device *)dev_get_drvdata(dev))
+#define dev_to_psp(dev) ((struct psp_device *)(dev_to_sp(dev)->psp_data))
+#define dev_to_sev(dev) ((struct sev_device *)(dev_to_psp(dev)->sev_data))
+#define tsm_dev_to_sev(tsmdev) dev_to_sev((tsmdev)->dev.parent)
+
+#define pdev_to_tio_dsm(pdev) (container_of((pdev)->tsm, struct tio_dsm, tsm.base_tsm))
+
+static int sev_tio_spdm_cmd(struct tio_dsm *dsm, int ret)
+{
+ struct tsm_dsm_tio *dev_data = &dsm->data;
+ struct tsm_spdm *spdm = &dev_data->spdm;
+
+ /* Check the main command handler response before entering the loop */
+ if (ret == 0 && dev_data->psp_ret != SEV_RET_SUCCESS)
+ return -EINVAL;
+
+ if (ret <= 0)
+ return ret;
+
+ /* ret > 0 means "SPDM requested" */
+ while (ret == PCI_DOE_FEATURE_CMA || ret == PCI_DOE_FEATURE_SSESSION) {
+ ret = pci_doe(dsm->tsm.doe_mb, PCI_VENDOR_ID_PCI_SIG, ret,
+ spdm->req, spdm->req_len, spdm->rsp, spdm->rsp_len);
+ if (ret < 0)
+ break;
+
+ WARN_ON_ONCE(ret == 0); /* The response should never be empty */
+ spdm->rsp_len = ret;
+ ret = sev_tio_continue(dev_data);
+ }
+
+ return ret;
+}
+
+static int stream_enable(struct pci_ide *ide)
+{
+ struct pci_dev *rp = pcie_find_root_port(ide->pdev);
+ int ret;
+
+ ret = pci_ide_stream_enable(rp, ide);
+ if (ret)
+ return ret;
+
+ ret = pci_ide_stream_enable(ide->pdev, ide);
+ if (ret)
+ pci_ide_stream_disable(rp, ide);
+
+ return ret;
+}
+
+static int streams_enable(struct pci_ide **ide)
+{
+ int ret = 0;
+
+ for (int i = 0; i < TIO_IDE_MAX_TC; ++i) {
+ if (ide[i]) {
+ ret = stream_enable(ide[i]);
+ if (ret)
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static void stream_disable(struct pci_ide *ide)
+{
+ pci_ide_stream_disable(ide->pdev, ide);
+ pci_ide_stream_disable(pcie_find_root_port(ide->pdev), ide);
+}
+
+static void streams_disable(struct pci_ide **ide)
+{
+ for (int i = 0; i < TIO_IDE_MAX_TC; ++i)
+ if (ide[i])
+ stream_disable(ide[i]);
+}
+
+static void stream_setup(struct pci_ide *ide)
+{
+ struct pci_dev *rp = pcie_find_root_port(ide->pdev);
+
+ ide->partner[PCI_IDE_EP].rid_start = 0;
+ ide->partner[PCI_IDE_EP].rid_end = 0xffff;
+ ide->partner[PCI_IDE_RP].rid_start = 0;
+ ide->partner[PCI_IDE_RP].rid_end = 0xffff;
+
+ ide->pdev->ide_cfg = 0;
+ ide->pdev->ide_tee_limit = 1;
+ rp->ide_cfg = 1;
+ rp->ide_tee_limit = 0;
+
+ pci_warn(ide->pdev, "Forcing CFG/TEE for %s", pci_name(rp));
+ pci_ide_stream_setup(ide->pdev, ide);
+ pci_ide_stream_setup(rp, ide);
+}
+
+static u8 streams_setup(struct pci_ide **ide, u8 *ids)
+{
+ bool def = false;
+ u8 tc_mask = 0;
+ int i;
+
+ for (i = 0; i < TIO_IDE_MAX_TC; ++i) {
+ if (!ide[i]) {
+ ids[i] = 0xFF;
+ continue;
+ }
+
+ tc_mask |= BIT(i);
+ ids[i] = ide[i]->stream_id;
+
+ if (!def) {
+ struct pci_ide_partner *settings;
+
+ settings = pci_ide_to_settings(ide[i]->pdev, ide[i]);
+ settings->default_stream = 1;
+ def = true;
+ }
+
+ stream_setup(ide[i]);
+ }
+
+ return tc_mask;
+}
+
+static int streams_register(struct pci_ide **ide)
+{
+ int ret = 0, i;
+
+ for (i = 0; i < TIO_IDE_MAX_TC; ++i) {
+ if (ide[i]) {
+ ret = pci_ide_stream_register(ide[i]);
+ if (ret)
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static void streams_unregister(struct pci_ide **ide)
+{
+ for (int i = 0; i < TIO_IDE_MAX_TC; ++i)
+ if (ide[i])
+ pci_ide_stream_unregister(ide[i]);
+}
+
+static void stream_teardown(struct pci_ide *ide)
+{
+ pci_ide_stream_teardown(ide->pdev, ide);
+ pci_ide_stream_teardown(pcie_find_root_port(ide->pdev), ide);
+}
+
+static void streams_teardown(struct pci_ide **ide)
+{
+ for (int i = 0; i < TIO_IDE_MAX_TC; ++i) {
+ if (ide[i]) {
+ stream_teardown(ide[i]);
+ pci_ide_stream_free(ide[i]);
+ ide[i] = NULL;
+ }
+ }
+}
+
+static int stream_alloc(struct pci_dev *pdev, struct pci_ide **ide,
+ unsigned int tc)
+{
+ struct pci_dev *rp = pcie_find_root_port(pdev);
+ struct pci_ide *ide1;
+
+ if (ide[tc]) {
+ pci_err(pdev, "Stream for class=%d already registered", tc);
+ return -EBUSY;
+ }
+
+ /* FIXME: find a better way */
+ if (nr_ide_streams != TIO_DEFAULT_NR_IDE_STREAMS)
+ pci_notice(pdev, "Enable non-default %d streams", nr_ide_streams);
+ pci_ide_set_nr_streams(to_pci_host_bridge(rp->bus->bridge), nr_ide_streams);
+
+ ide1 = pci_ide_stream_alloc(pdev);
+ if (!ide1)
+ return -EFAULT;
+
+ /* Blindly assign streamid=0 to TC=0, and so on */
+ ide1->stream_id = tc;
+
+ ide[tc] = ide1;
+
+ return 0;
+}
+
+static struct pci_tsm *tio_pf0_probe(struct pci_dev *pdev, struct sev_device *sev)
+{
+ struct tio_dsm *dsm __free(kfree) = kzalloc(sizeof(*dsm), GFP_KERNEL);
+ int rc;
+
+ if (!dsm)
+ return NULL;
+
+ rc = pci_tsm_pf0_constructor(pdev, &dsm->tsm, sev->tsmdev);
+ if (rc)
+ return NULL;
+
+ pci_dbg(pdev, "TSM enabled\n");
+ dsm->sev = sev;
+ return &no_free_ptr(dsm)->tsm.base_tsm;
+}
+
+static struct pci_tsm *dsm_probe(struct tsm_dev *tsmdev, struct pci_dev *pdev)
+{
+ struct sev_device *sev = tsm_dev_to_sev(tsmdev);
+
+ if (is_pci_tsm_pf0(pdev))
+ return tio_pf0_probe(pdev, sev);
+ return 0;
+}
+
+static void dsm_remove(struct pci_tsm *tsm)
+{
+ struct pci_dev *pdev = tsm->pdev;
+
+ pci_dbg(pdev, "TSM disabled\n");
+
+ if (is_pci_tsm_pf0(pdev)) {
+ struct tio_dsm *dsm = container_of(tsm, struct tio_dsm, tsm.base_tsm);
+
+ pci_tsm_pf0_destructor(&dsm->tsm);
+ kfree(dsm);
+ }
+}
+
+static int dsm_create(struct tio_dsm *dsm)
+{
+ struct pci_dev *pdev = dsm->tsm.base_tsm.pdev;
+ u8 segment_id = pdev->bus ? pci_domain_nr(pdev->bus) : 0;
+ struct pci_dev *rootport = pcie_find_root_port(pdev);
+ u16 device_id = pci_dev_id(pdev);
+ u16 root_port_id;
+ u32 lnkcap = 0;
+
+ if (pci_read_config_dword(rootport, pci_pcie_cap(rootport) + PCI_EXP_LNKCAP,
+ &lnkcap))
+ return -ENODEV;
+
+ root_port_id = FIELD_GET(PCI_EXP_LNKCAP_PN, lnkcap);
+
+ return sev_tio_dev_create(&dsm->data, device_id, root_port_id, segment_id);
+}
+
+static int dsm_connect(struct pci_dev *pdev)
+{
+ struct tio_dsm *dsm = pdev_to_tio_dsm(pdev);
+ struct tsm_dsm_tio *dev_data = &dsm->data;
+ u8 ids[TIO_IDE_MAX_TC];
+ u8 tc_mask;
+ int ret;
+
+ if (pci_find_doe_mailbox(pdev, PCI_VENDOR_ID_PCI_SIG,
+ PCI_DOE_FEATURE_SSESSION) != dsm->tsm.doe_mb) {
+ pci_err(pdev, "CMA DOE MB must support SSESSION\n");
+ return -EFAULT;
+ }
+
+ ret = stream_alloc(pdev, dev_data->ide, 0);
+ if (ret)
+ return ret;
+
+ ret = dsm_create(dsm);
+ if (ret)
+ goto ide_free_exit;
+
+ tc_mask = streams_setup(dev_data->ide, ids);
+
+ ret = sev_tio_dev_connect(dev_data, tc_mask, ids, dev_data->cert_slot);
+ ret = sev_tio_spdm_cmd(dsm, ret);
+ if (ret)
+ goto free_exit;
+
+ streams_enable(dev_data->ide);
+
+ ret = streams_register(dev_data->ide);
+ if (ret)
+ goto free_exit;
+
+ return 0;
+
+free_exit:
+ sev_tio_dev_reclaim(dev_data);
+
+ streams_disable(dev_data->ide);
+ide_free_exit:
+
+ streams_teardown(dev_data->ide);
+
+ return ret;
+}
+
+static void dsm_disconnect(struct pci_dev *pdev)
+{
+ bool force = SYSTEM_HALT <= system_state && system_state <= SYSTEM_RESTART;
+ struct tio_dsm *dsm = pdev_to_tio_dsm(pdev);
+ struct tsm_dsm_tio *dev_data = &dsm->data;
+ int ret;
+
+ ret = sev_tio_dev_disconnect(dev_data, force);
+ ret = sev_tio_spdm_cmd(dsm, ret);
+ if (ret && !force) {
+ ret = sev_tio_dev_disconnect(dev_data, true);
+ sev_tio_spdm_cmd(dsm, ret);
+ }
+
+ sev_tio_dev_reclaim(dev_data);
+
+ streams_disable(dev_data->ide);
+ streams_unregister(dev_data->ide);
+ streams_teardown(dev_data->ide);
+}
+
+static struct pci_tsm_ops sev_tsm_ops = {
+ .probe = dsm_probe,
+ .remove = dsm_remove,
+ .connect = dsm_connect,
+ .disconnect = dsm_disconnect,
+};
+
+void sev_tsm_init_locked(struct sev_device *sev, void *tio_status_page)
+{
+ struct sev_tio_status *t = kzalloc(sizeof(*t), GFP_KERNEL);
+ struct tsm_dev *tsmdev;
+ int ret;
+
+ WARN_ON(sev->tio_status);
+
+ if (!t)
+ return;
+
+ ret = sev_tio_init_locked(tio_status_page);
+ if (ret) {
+ pr_warn("SEV-TIO STATUS failed with %d\n", ret);
+ goto error_exit;
+ }
+
+ tsmdev = tsm_register(sev->dev, &sev_tsm_ops);
+ if (IS_ERR(tsmdev))
+ goto error_exit;
+
+ memcpy(t, tio_status_page, sizeof(*t));
+
+ pr_notice("SEV-TIO status: EN=%d INIT_DONE=%d rq=%d..%d rs=%d..%d "
+ "scr=%d..%d out=%d..%d dev=%d tdi=%d algos=%x\n",
+ t->tio_en, t->tio_init_done,
+ t->spdm_req_size_min, t->spdm_req_size_max,
+ t->spdm_rsp_size_min, t->spdm_rsp_size_max,
+ t->spdm_scratch_size_min, t->spdm_scratch_size_max,
+ t->spdm_out_size_min, t->spdm_out_size_max,
+ t->devctx_size, t->tdictx_size,
+ t->tio_crypto_alg);
+
+ sev->tsmdev = tsmdev;
+ sev->tio_status = t;
+
+ return;
+
+error_exit:
+ kfree(t);
+ pr_err("Failed to enable SEV-TIO: ret=%d en=%d initdone=%d SEV=%d\n",
+ ret, t->tio_en, t->tio_init_done, boot_cpu_has(X86_FEATURE_SEV));
+}
+
+void sev_tsm_uninit(struct sev_device *sev)
+{
+ if (sev->tsmdev)
+ tsm_unregister(sev->tsmdev);
+
+ sev->tsmdev = NULL;
+}
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index 3451bada884e..956ea609d0cc 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -28,6 +28,7 @@
#include <linux/fs_struct.h>
#include <linux/psp.h>
#include <linux/amd-iommu.h>
+#include <linux/crash_dump.h>
#include <asm/smp.h>
#include <asm/cacheflush.h>
@@ -74,6 +75,14 @@ static bool psp_init_on_probe = true;
module_param(psp_init_on_probe, bool, 0444);
MODULE_PARM_DESC(psp_init_on_probe, " if true, the PSP will be initialized on module init. Else the PSP will be initialized on the first command requiring it");
+#if IS_ENABLED(CONFIG_PCI_TSM)
+static bool sev_tio_enabled = true;
+module_param_named(tio, sev_tio_enabled, bool, 0444);
+MODULE_PARM_DESC(tio, "Enables TIO in SNP_INIT_EX");
+#else
+static const bool sev_tio_enabled = false;
+#endif
+
MODULE_FIRMWARE("amd/amd_sev_fam17h_model0xh.sbin"); /* 1st gen EPYC */
MODULE_FIRMWARE("amd/amd_sev_fam17h_model3xh.sbin"); /* 2nd gen EPYC */
MODULE_FIRMWARE("amd/amd_sev_fam19h_model0xh.sbin"); /* 3rd gen EPYC */
@@ -82,6 +91,21 @@ MODULE_FIRMWARE("amd/amd_sev_fam19h_model1xh.sbin"); /* 4th gen EPYC */
static bool psp_dead;
static int psp_timeout;
+enum snp_hv_fixed_pages_state {
+ ALLOCATED,
+ HV_FIXED,
+};
+
+struct snp_hv_fixed_pages_entry {
+ struct list_head list;
+ struct page *page;
+ unsigned int order;
+ bool free;
+ enum snp_hv_fixed_pages_state page_state;
+};
+
+static LIST_HEAD(snp_hv_fixed_pages);
+
/* Trusted Memory Region (TMR):
* The TMR is a 1MB area that must be 1MB aligned. Use the page allocator
* to allocate the memory, which will return aligned memory for the specified
@@ -233,7 +257,9 @@ static int sev_cmd_buffer_len(int cmd)
case SEV_CMD_SNP_GUEST_REQUEST: return sizeof(struct sev_data_snp_guest_request);
case SEV_CMD_SNP_CONFIG: return sizeof(struct sev_user_data_snp_config);
case SEV_CMD_SNP_COMMIT: return sizeof(struct sev_data_snp_commit);
- default: return 0;
+ case SEV_CMD_SNP_FEATURE_INFO: return sizeof(struct sev_data_snp_feature_info);
+ case SEV_CMD_SNP_VLEK_LOAD: return sizeof(struct sev_user_data_snp_vlek_load);
+ default: return sev_tio_cmd_buffer_len(cmd);
}
return 0;
@@ -241,27 +267,20 @@ static int sev_cmd_buffer_len(int cmd)
static struct file *open_file_as_root(const char *filename, int flags, umode_t mode)
{
- struct file *fp;
- struct path root;
- struct cred *cred;
- const struct cred *old_cred;
+ struct path root __free(path_put) = {};
task_lock(&init_task);
get_fs_root(init_task.fs, &root);
task_unlock(&init_task);
- cred = prepare_creds();
+ CLASS(prepare_creds, cred)();
if (!cred)
return ERR_PTR(-ENOMEM);
- cred->fsuid = GLOBAL_ROOT_UID;
- old_cred = override_creds(cred);
-
- fp = file_open_root(&root, filename, flags, mode);
- path_put(&root);
- put_cred(revert_creds(old_cred));
+ cred->fsuid = GLOBAL_ROOT_UID;
- return fp;
+ scoped_with_creds(cred)
+ return file_open_root(&root, filename, flags, mode);
}
static int sev_read_init_ex_file(void)
@@ -369,13 +388,7 @@ static int sev_write_init_ex_file_if_required(int cmd_id)
return sev_write_init_ex_file();
}
-/*
- * snp_reclaim_pages() needs __sev_do_cmd_locked(), and __sev_do_cmd_locked()
- * needs snp_reclaim_pages(), so a forward declaration is needed.
- */
-static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret);
-
-static int snp_reclaim_pages(unsigned long paddr, unsigned int npages, bool locked)
+int snp_reclaim_pages(unsigned long paddr, unsigned int npages, bool locked)
{
int ret, err, i;
@@ -409,6 +422,7 @@ cleanup:
snp_leak_pages(__phys_to_pfn(paddr), npages - i);
return ret;
}
+EXPORT_SYMBOL_GPL(snp_reclaim_pages);
static int rmp_mark_pages_firmware(unsigned long paddr, unsigned int npages, bool locked)
{
@@ -434,7 +448,7 @@ cleanup:
return rc;
}
-static struct page *__snp_alloc_firmware_pages(gfp_t gfp_mask, int order)
+static struct page *__snp_alloc_firmware_pages(gfp_t gfp_mask, int order, bool locked)
{
unsigned long npages = 1ul << order, paddr;
struct sev_device *sev;
@@ -453,7 +467,7 @@ static struct page *__snp_alloc_firmware_pages(gfp_t gfp_mask, int order)
return page;
paddr = __pa((unsigned long)page_address(page));
- if (rmp_mark_pages_firmware(paddr, npages, false))
+ if (rmp_mark_pages_firmware(paddr, npages, locked))
return NULL;
return page;
@@ -463,7 +477,7 @@ void *snp_alloc_firmware_page(gfp_t gfp_mask)
{
struct page *page;
- page = __snp_alloc_firmware_pages(gfp_mask, 0);
+ page = __snp_alloc_firmware_pages(gfp_mask, 0, false);
return page ? page_address(page) : NULL;
}
@@ -498,7 +512,7 @@ static void *sev_fw_alloc(unsigned long len)
{
struct page *page;
- page = __snp_alloc_firmware_pages(GFP_KERNEL, get_order(len));
+ page = __snp_alloc_firmware_pages(GFP_KERNEL, get_order(len), true);
if (!page)
return NULL;
@@ -839,16 +853,17 @@ static int snp_reclaim_cmd_buf(int cmd, void *cmd_buf)
return 0;
}
-static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
+int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
{
struct cmd_buf_desc desc_list[CMD_BUF_DESC_MAX] = {0};
struct psp_device *psp = psp_master;
struct sev_device *sev;
unsigned int cmdbuff_hi, cmdbuff_lo;
unsigned int phys_lsb, phys_msb;
- unsigned int reg, ret = 0;
+ unsigned int reg;
void *cmd_buf;
int buf_len;
+ int ret = 0;
if (!psp || !psp->sev_data)
return -ENODEV;
@@ -1073,6 +1088,247 @@ static void snp_set_hsave_pa(void *arg)
wrmsrq(MSR_VM_HSAVE_PA, 0);
}
+/* Hypervisor Fixed pages API interface */
+static void snp_hv_fixed_pages_state_update(struct sev_device *sev,
+ enum snp_hv_fixed_pages_state page_state)
+{
+ struct snp_hv_fixed_pages_entry *entry;
+
+ /* List is protected by sev_cmd_mutex */
+ lockdep_assert_held(&sev_cmd_mutex);
+
+ if (list_empty(&snp_hv_fixed_pages))
+ return;
+
+ list_for_each_entry(entry, &snp_hv_fixed_pages, list)
+ entry->page_state = page_state;
+}
+
+/*
+ * Allocate HV_FIXED pages in 2MB aligned sizes to ensure the whole
+ * 2MB pages are marked as HV_FIXED.
+ */
+struct page *snp_alloc_hv_fixed_pages(unsigned int num_2mb_pages)
+{
+ struct psp_device *psp_master = psp_get_master_device();
+ struct snp_hv_fixed_pages_entry *entry;
+ struct sev_device *sev;
+ unsigned int order;
+ struct page *page;
+
+ if (!psp_master || !psp_master->sev_data)
+ return NULL;
+
+ sev = psp_master->sev_data;
+
+ order = get_order(PMD_SIZE * num_2mb_pages);
+
+ /*
+ * SNP_INIT_EX is protected by sev_cmd_mutex, therefore this list
+ * also needs to be protected using the same mutex.
+ */
+ guard(mutex)(&sev_cmd_mutex);
+
+ /*
+ * This API uses SNP_INIT_EX to transition allocated pages to HV_Fixed
+ * page state, fail if SNP is already initialized.
+ */
+ if (sev->snp_initialized)
+ return NULL;
+
+ /* Re-use freed pages that match the request */
+ list_for_each_entry(entry, &snp_hv_fixed_pages, list) {
+ /* Hypervisor fixed page allocator implements exact fit policy */
+ if (entry->order == order && entry->free) {
+ entry->free = false;
+ memset(page_address(entry->page), 0,
+ (1 << entry->order) * PAGE_SIZE);
+ return entry->page;
+ }
+ }
+
+ page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
+ if (!page)
+ return NULL;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ __free_pages(page, order);
+ return NULL;
+ }
+
+ entry->page = page;
+ entry->order = order;
+ list_add_tail(&entry->list, &snp_hv_fixed_pages);
+
+ return page;
+}
+
+void snp_free_hv_fixed_pages(struct page *page)
+{
+ struct psp_device *psp_master = psp_get_master_device();
+ struct snp_hv_fixed_pages_entry *entry, *nentry;
+
+ if (!psp_master || !psp_master->sev_data)
+ return;
+
+ /*
+ * SNP_INIT_EX is protected by sev_cmd_mutex, therefore this list
+ * also needs to be protected using the same mutex.
+ */
+ guard(mutex)(&sev_cmd_mutex);
+
+ list_for_each_entry_safe(entry, nentry, &snp_hv_fixed_pages, list) {
+ if (entry->page != page)
+ continue;
+
+ /*
+ * HV_FIXED page state cannot be changed until reboot
+ * and they cannot be used by an SNP guest, so they cannot
+ * be returned back to the page allocator.
+ * Mark the pages as free internally to allow possible re-use.
+ */
+ if (entry->page_state == HV_FIXED) {
+ entry->free = true;
+ } else {
+ __free_pages(page, entry->order);
+ list_del(&entry->list);
+ kfree(entry);
+ }
+ return;
+ }
+}
+
+static void snp_add_hv_fixed_pages(struct sev_device *sev, struct sev_data_range_list *range_list)
+{
+ struct snp_hv_fixed_pages_entry *entry;
+ struct sev_data_range *range;
+ int num_elements;
+
+ lockdep_assert_held(&sev_cmd_mutex);
+
+ if (list_empty(&snp_hv_fixed_pages))
+ return;
+
+ num_elements = list_count_nodes(&snp_hv_fixed_pages) +
+ range_list->num_elements;
+
+ /*
+ * Ensure the list of HV_FIXED pages that will be passed to firmware
+ * do not exceed the page-sized argument buffer.
+ */
+ if (num_elements * sizeof(*range) + sizeof(*range_list) > PAGE_SIZE) {
+ dev_warn(sev->dev, "Additional HV_Fixed pages cannot be accommodated, omitting\n");
+ return;
+ }
+
+ range = &range_list->ranges[range_list->num_elements];
+ list_for_each_entry(entry, &snp_hv_fixed_pages, list) {
+ range->base = page_to_pfn(entry->page) << PAGE_SHIFT;
+ range->page_count = 1 << entry->order;
+ range++;
+ }
+ range_list->num_elements = num_elements;
+}
+
+static void snp_leak_hv_fixed_pages(void)
+{
+ struct snp_hv_fixed_pages_entry *entry;
+
+ /* List is protected by sev_cmd_mutex */
+ lockdep_assert_held(&sev_cmd_mutex);
+
+ if (list_empty(&snp_hv_fixed_pages))
+ return;
+
+ list_for_each_entry(entry, &snp_hv_fixed_pages, list)
+ if (entry->page_state == HV_FIXED)
+ __snp_leak_pages(page_to_pfn(entry->page),
+ 1 << entry->order, false);
+}
+
+bool sev_is_snp_ciphertext_hiding_supported(void)
+{
+ struct psp_device *psp = psp_master;
+ struct sev_device *sev;
+
+ if (!psp || !psp->sev_data)
+ return false;
+
+ sev = psp->sev_data;
+
+ /*
+ * Feature information indicates if CipherTextHiding feature is
+ * supported by the SEV firmware and additionally platform status
+ * indicates if CipherTextHiding feature is enabled in the
+ * Platform BIOS.
+ */
+ return ((sev->snp_feat_info_0.ecx & SNP_CIPHER_TEXT_HIDING_SUPPORTED) &&
+ sev->snp_plat_status.ciphertext_hiding_cap);
+}
+EXPORT_SYMBOL_GPL(sev_is_snp_ciphertext_hiding_supported);
+
+static int snp_get_platform_data(struct sev_device *sev, int *error)
+{
+ struct sev_data_snp_feature_info snp_feat_info;
+ struct snp_feature_info *feat_info;
+ struct sev_data_snp_addr buf;
+ struct page *page;
+ int rc;
+
+ /*
+ * This function is expected to be called before SNP is
+ * initialized.
+ */
+ if (sev->snp_initialized)
+ return -EINVAL;
+
+ buf.address = __psp_pa(&sev->snp_plat_status);
+ rc = sev_do_cmd(SEV_CMD_SNP_PLATFORM_STATUS, &buf, error);
+ if (rc) {
+ dev_err(sev->dev, "SNP PLATFORM_STATUS command failed, ret = %d, error = %#x\n",
+ rc, *error);
+ return rc;
+ }
+
+ sev->api_major = sev->snp_plat_status.api_major;
+ sev->api_minor = sev->snp_plat_status.api_minor;
+ sev->build = sev->snp_plat_status.build_id;
+
+ /*
+ * Do feature discovery of the currently loaded firmware,
+ * and cache feature information from CPUID 0x8000_0024,
+ * sub-function 0.
+ */
+ if (!sev->snp_plat_status.feature_info)
+ return 0;
+
+ /*
+ * Use dynamically allocated structure for the SNP_FEATURE_INFO
+ * command to ensure structure is 8-byte aligned, and does not
+ * cross a page boundary.
+ */
+ page = alloc_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+
+ feat_info = page_address(page);
+ snp_feat_info.length = sizeof(snp_feat_info);
+ snp_feat_info.ecx_in = 0;
+ snp_feat_info.feature_info_paddr = __psp_pa(feat_info);
+
+ rc = sev_do_cmd(SEV_CMD_SNP_FEATURE_INFO, &snp_feat_info, error);
+ if (!rc)
+ sev->snp_feat_info_0 = *feat_info;
+ else
+ dev_err(sev->dev, "SNP FEATURE_INFO command failed, ret = %d, error = %#x\n",
+ rc, *error);
+
+ __free_page(page);
+
+ return rc;
+}
+
static int snp_filter_reserved_mem_regions(struct resource *rs, void *arg)
{
struct sev_data_range_list *range_list = arg;
@@ -1103,7 +1359,7 @@ static int snp_filter_reserved_mem_regions(struct resource *rs, void *arg)
return 0;
}
-static int __sev_snp_init_locked(int *error)
+static int __sev_snp_init_locked(int *error, unsigned int max_snp_asid)
{
struct psp_device *psp = psp_master;
struct sev_data_snp_init_ex data;
@@ -1139,6 +1395,8 @@ static int __sev_snp_init_locked(int *error)
*
*/
if (sev_version_greater_or_equal(SNP_MIN_API_MAJOR, 52)) {
+ bool tio_supp = !!(sev->snp_feat_info_0.ebx & SNP_SEV_TIO_SUPPORTED);
+
/*
* Firmware checks that the pages containing the ranges enumerated
* in the RANGES structure are either in the default page state or in the
@@ -1163,10 +1421,33 @@ static int __sev_snp_init_locked(int *error)
return rc;
}
+ /*
+ * Add HV_Fixed pages from other PSP sub-devices, such as SFS to the
+ * HV_Fixed page list.
+ */
+ snp_add_hv_fixed_pages(sev, snp_range_list);
+
memset(&data, 0, sizeof(data));
+
+ if (max_snp_asid) {
+ data.ciphertext_hiding_en = 1;
+ data.max_snp_asid = max_snp_asid;
+ }
+
data.init_rmp = 1;
data.list_paddr_en = 1;
data.list_paddr = __psp_pa(snp_range_list);
+
+ data.tio_en = tio_supp && sev_tio_enabled && amd_iommu_sev_tio_supported();
+
+ /*
+ * When psp_init_on_probe is disabled, the userspace calling
+ * SEV ioctl can inadvertently shut down SNP and SEV-TIO causing
+ * unexpected state loss.
+ */
+ if (data.tio_en && !psp_init_on_probe)
+ dev_warn(sev->dev, "SEV-TIO as incompatible with psp_init_on_probe=0\n");
+
cmd = SEV_CMD_SNP_INIT_EX;
} else {
cmd = SEV_CMD_SNP_INIT;
@@ -1202,8 +1483,10 @@ static int __sev_snp_init_locked(int *error)
return rc;
}
+ snp_hv_fixed_pages_state_update(sev, HV_FIXED);
sev->snp_initialized = true;
- dev_dbg(sev->dev, "SEV-SNP firmware initialized\n");
+ dev_dbg(sev->dev, "SEV-SNP firmware initialized, SEV-TIO is %s\n",
+ data.tio_en ? "enabled" : "disabled");
dev_info(sev->dev, "SEV-SNP API:%d.%d build:%d\n", sev->api_major,
sev->api_minor, sev->build);
@@ -1211,6 +1494,23 @@ static int __sev_snp_init_locked(int *error)
atomic_notifier_chain_register(&panic_notifier_list,
&snp_panic_notifier);
+ if (data.tio_en) {
+ /*
+ * This executes with the sev_cmd_mutex held so down the stack
+ * snp_reclaim_pages(locked=false) might be needed (which is extremely
+ * unlikely) but will cause a deadlock.
+ * Instead of exporting __snp_alloc_firmware_pages(), allocate a page
+ * for this one call here.
+ */
+ void *tio_status = page_address(__snp_alloc_firmware_pages(
+ GFP_KERNEL_ACCOUNT | __GFP_ZERO, 0, true));
+
+ if (tio_status) {
+ sev_tsm_init_locked(sev, tio_status);
+ __snp_free_firmware_pages(virt_to_page(tio_status), 0, true);
+ }
+ }
+
sev_es_tmr_size = SNP_TMR_SIZE;
return 0;
@@ -1276,15 +1576,17 @@ static int __sev_platform_init_handle_init_ex_path(struct sev_device *sev)
static int __sev_platform_init_locked(int *error)
{
- int rc, psp_ret = SEV_RET_NO_FW_CALL;
+ int rc, psp_ret, dfflush_error;
struct sev_device *sev;
+ psp_ret = dfflush_error = SEV_RET_NO_FW_CALL;
+
if (!psp_master || !psp_master->sev_data)
return -ENODEV;
sev = psp_master->sev_data;
- if (sev->state == SEV_STATE_INIT)
+ if (sev->sev_plat_status.state == SEV_STATE_INIT)
return 0;
__sev_platform_init_handle_tmr(sev);
@@ -1316,14 +1618,14 @@ static int __sev_platform_init_locked(int *error)
return rc;
}
- sev->state = SEV_STATE_INIT;
+ sev->sev_plat_status.state = SEV_STATE_INIT;
/* Prepare for first SEV guest launch after INIT */
wbinvd_on_all_cpus();
- rc = __sev_do_cmd_locked(SEV_CMD_DF_FLUSH, NULL, error);
+ rc = __sev_do_cmd_locked(SEV_CMD_DF_FLUSH, NULL, &dfflush_error);
if (rc) {
dev_err(sev->dev, "SEV: DF_FLUSH failed %#x, rc %d\n",
- *error, rc);
+ dfflush_error, rc);
return rc;
}
@@ -1343,12 +1645,21 @@ static int _sev_platform_init_locked(struct sev_platform_init_args *args)
if (!psp_master || !psp_master->sev_data)
return -ENODEV;
+ /*
+ * Skip SNP/SEV initialization under a kdump kernel as SEV/SNP
+ * may already be initialized in the previous kernel. Since no
+ * SNP/SEV guests are run under a kdump kernel, there is no
+ * need to initialize SNP or SEV during kdump boot.
+ */
+ if (is_kdump_kernel())
+ return 0;
+
sev = psp_master->sev_data;
- if (sev->state == SEV_STATE_INIT)
+ if (sev->sev_plat_status.state == SEV_STATE_INIT)
return 0;
- rc = __sev_snp_init_locked(&args->error);
+ rc = __sev_snp_init_locked(&args->error, args->max_snp_asid);
if (rc && rc != -ENODEV)
return rc;
@@ -1382,7 +1693,7 @@ static int __sev_platform_shutdown_locked(int *error)
sev = psp->sev_data;
- if (sev->state == SEV_STATE_UNINIT)
+ if (sev->sev_plat_status.state == SEV_STATE_UNINIT)
return 0;
ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, NULL, error);
@@ -1392,7 +1703,7 @@ static int __sev_platform_shutdown_locked(int *error)
return ret;
}
- sev->state = SEV_STATE_UNINIT;
+ sev->sev_plat_status.state = SEV_STATE_UNINIT;
dev_dbg(sev->dev, "SEV firmware shutdown\n");
return ret;
@@ -1431,7 +1742,7 @@ static int snp_move_to_init_state(struct sev_issue_cmd *argp, bool *shutdown_req
{
int error, rc;
- rc = __sev_snp_init_locked(&error);
+ rc = __sev_snp_init_locked(&error, 0);
if (rc) {
argp->error = SEV_RET_INVALID_PLATFORM_STATE;
return rc;
@@ -1500,7 +1811,7 @@ static int sev_ioctl_do_pek_pdh_gen(int cmd, struct sev_issue_cmd *argp, bool wr
if (!writable)
return -EPERM;
- if (sev->state == SEV_STATE_UNINIT) {
+ if (sev->sev_plat_status.state == SEV_STATE_UNINIT) {
rc = sev_move_to_init_state(argp, &shutdown_required);
if (rc)
return rc;
@@ -1549,7 +1860,7 @@ static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp, bool writable)
data.len = input.length;
cmd:
- if (sev->state == SEV_STATE_UNINIT) {
+ if (sev->sev_plat_status.state == SEV_STATE_UNINIT) {
ret = sev_move_to_init_state(argp, &shutdown_required);
if (ret)
goto e_free_blob;
@@ -1597,6 +1908,16 @@ static int sev_get_api_version(void)
struct sev_user_data_status status;
int error = 0, ret;
+ /*
+ * Cache SNP platform status and SNP feature information
+ * if SNP is available.
+ */
+ if (cc_platform_has(CC_ATTR_HOST_SEV_SNP)) {
+ ret = snp_get_platform_data(sev, &error);
+ if (ret)
+ return 1;
+ }
+
ret = sev_platform_status(&status, &error);
if (ret) {
dev_err(sev->dev,
@@ -1604,10 +1925,12 @@ static int sev_get_api_version(void)
return 1;
}
+ /* Cache SEV platform status */
+ sev->sev_plat_status = status;
+
sev->api_major = status.api_major;
sev->api_minor = status.api_minor;
sev->build = status.build;
- sev->state = status.state;
return 0;
}
@@ -1782,11 +2105,18 @@ static int __sev_snp_shutdown_locked(int *error, bool panic)
return ret;
}
+ snp_leak_hv_fixed_pages();
sev->snp_initialized = false;
dev_dbg(sev->dev, "SEV-SNP firmware shutdown\n");
- atomic_notifier_chain_unregister(&panic_notifier_list,
- &snp_panic_notifier);
+ /*
+ * __sev_snp_shutdown_locked() deadlocks when it tries to unregister
+ * itself during panic as the panic notifier is called with RCU read
+ * lock held and notifier unregistration does RCU synchronization.
+ */
+ if (!panic)
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &snp_panic_notifier);
/* Reset TMR size back to default */
sev_es_tmr_size = SEV_TMR_SIZE;
@@ -1829,7 +2159,7 @@ static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp, bool writable)
data.oca_cert_len = input.oca_cert_len;
/* If platform is not in INIT state then transition it to INIT */
- if (sev->state != SEV_STATE_INIT) {
+ if (sev->sev_plat_status.state != SEV_STATE_INIT) {
ret = sev_move_to_init_state(argp, &shutdown_required);
if (ret)
goto e_free_oca;
@@ -2000,7 +2330,7 @@ static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp, bool writable)
cmd:
/* If platform is not in INIT state then transition it to INIT. */
- if (sev->state != SEV_STATE_INIT) {
+ if (sev->sev_plat_status.state != SEV_STATE_INIT) {
if (!writable) {
ret = -EPERM;
goto e_free_cert;
@@ -2422,7 +2752,7 @@ static void __sev_firmware_shutdown(struct sev_device *sev, bool panic)
{
int error;
- __sev_platform_shutdown_locked(NULL);
+ __sev_platform_shutdown_locked(&error);
if (sev_es_tmr) {
/*
@@ -2460,8 +2790,20 @@ static void __sev_firmware_shutdown(struct sev_device *sev, bool panic)
static void sev_firmware_shutdown(struct sev_device *sev)
{
+ /*
+ * Calling without sev_cmd_mutex held as TSM will likely try disconnecting
+ * IDE and this ends up calling sev_do_cmd() which locks sev_cmd_mutex.
+ */
+ if (sev->tio_status)
+ sev_tsm_uninit(sev);
+
mutex_lock(&sev_cmd_mutex);
+
__sev_firmware_shutdown(sev, false);
+
+ kfree(sev->tio_status);
+ sev->tio_status = NULL;
+
mutex_unlock(&sev_cmd_mutex);
}
@@ -2474,6 +2816,43 @@ void sev_platform_shutdown(void)
}
EXPORT_SYMBOL_GPL(sev_platform_shutdown);
+u64 sev_get_snp_policy_bits(void)
+{
+ struct psp_device *psp = psp_master;
+ struct sev_device *sev;
+ u64 policy_bits;
+
+ if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
+ return 0;
+
+ if (!psp || !psp->sev_data)
+ return 0;
+
+ sev = psp->sev_data;
+
+ policy_bits = SNP_POLICY_MASK_BASE;
+
+ if (sev->snp_plat_status.feature_info) {
+ if (sev->snp_feat_info_0.ecx & SNP_RAPL_DISABLE_SUPPORTED)
+ policy_bits |= SNP_POLICY_MASK_RAPL_DIS;
+
+ if (sev->snp_feat_info_0.ecx & SNP_CIPHER_TEXT_HIDING_SUPPORTED)
+ policy_bits |= SNP_POLICY_MASK_CIPHERTEXT_HIDING_DRAM;
+
+ if (sev->snp_feat_info_0.ecx & SNP_AES_256_XTS_POLICY_SUPPORTED)
+ policy_bits |= SNP_POLICY_MASK_MEM_AES_256_XTS;
+
+ if (sev->snp_feat_info_0.ecx & SNP_CXL_ALLOW_POLICY_SUPPORTED)
+ policy_bits |= SNP_POLICY_MASK_CXL_ALLOW;
+
+ if (sev_version_greater_or_equal(1, 58))
+ policy_bits |= SNP_POLICY_MASK_PAGE_SWAP_DISABLE;
+ }
+
+ return policy_bits;
+}
+EXPORT_SYMBOL_GPL(sev_get_snp_policy_bits);
+
void sev_dev_destroy(struct psp_device *psp)
{
struct sev_device *sev = psp->sev_data;
diff --git a/drivers/crypto/ccp/sev-dev.h b/drivers/crypto/ccp/sev-dev.h
index 3e4e5574e88a..b1cd556bbbf6 100644
--- a/drivers/crypto/ccp/sev-dev.h
+++ b/drivers/crypto/ccp/sev-dev.h
@@ -34,6 +34,8 @@ struct sev_misc_dev {
struct miscdevice misc;
};
+struct sev_tio_status;
+
struct sev_device {
struct device *dev;
struct psp_device *psp;
@@ -42,7 +44,6 @@ struct sev_device {
struct sev_vdata *vdata;
- int state;
unsigned int int_rcvd;
wait_queue_head_t int_queue;
struct sev_misc_dev *misc;
@@ -57,12 +58,29 @@ struct sev_device {
bool cmd_buf_backup_active;
bool snp_initialized;
+
+ struct sev_user_data_status sev_plat_status;
+
+ struct sev_user_data_snp_status snp_plat_status;
+ struct snp_feature_info snp_feat_info_0;
+
+ struct tsm_dev *tsmdev;
+ struct sev_tio_status *tio_status;
};
int sev_dev_init(struct psp_device *psp);
void sev_dev_destroy(struct psp_device *psp);
+int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret);
+
void sev_pci_init(void);
void sev_pci_exit(void);
+struct page *snp_alloc_hv_fixed_pages(unsigned int num_2mb_pages);
+void snp_free_hv_fixed_pages(struct page *page);
+
+void sev_tsm_init_locked(struct sev_device *sev, void *tio_status_page);
+void sev_tsm_uninit(struct sev_device *sev);
+int sev_tio_cmd_buffer_len(int cmd);
+
#endif /* __SEV_DEV_H */
diff --git a/drivers/crypto/ccp/sfs.c b/drivers/crypto/ccp/sfs.c
new file mode 100644
index 000000000000..2f4beaafe7ec
--- /dev/null
+++ b/drivers/crypto/ccp/sfs.c
@@ -0,0 +1,311 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AMD Secure Processor Seamless Firmware Servicing support.
+ *
+ * Copyright (C) 2025 Advanced Micro Devices, Inc.
+ *
+ * Author: Ashish Kalra <ashish.kalra@amd.com>
+ */
+
+#include <linux/firmware.h>
+
+#include "sfs.h"
+#include "sev-dev.h"
+
+#define SFS_DEFAULT_TIMEOUT (10 * MSEC_PER_SEC)
+#define SFS_MAX_PAYLOAD_SIZE (2 * 1024 * 1024)
+#define SFS_NUM_2MB_PAGES_CMDBUF (SFS_MAX_PAYLOAD_SIZE / PMD_SIZE)
+#define SFS_NUM_PAGES_CMDBUF (SFS_MAX_PAYLOAD_SIZE / PAGE_SIZE)
+
+static DEFINE_MUTEX(sfs_ioctl_mutex);
+
+static struct sfs_misc_dev *misc_dev;
+
+static int send_sfs_cmd(struct sfs_device *sfs_dev, int msg)
+{
+ int ret;
+
+ sfs_dev->command_buf->hdr.status = 0;
+ sfs_dev->command_buf->hdr.sub_cmd_id = msg;
+
+ ret = psp_extended_mailbox_cmd(sfs_dev->psp,
+ SFS_DEFAULT_TIMEOUT,
+ (struct psp_ext_request *)sfs_dev->command_buf);
+ if (ret == -EIO) {
+ dev_dbg(sfs_dev->dev,
+ "msg 0x%x failed with PSP error: 0x%x, extended status: 0x%x\n",
+ msg, sfs_dev->command_buf->hdr.status,
+ *(u32 *)sfs_dev->command_buf->buf);
+ }
+
+ return ret;
+}
+
+static int send_sfs_get_fw_versions(struct sfs_device *sfs_dev)
+{
+ /*
+ * SFS_GET_FW_VERSIONS command needs the output buffer to be
+ * initialized to 0xC7 in every byte.
+ */
+ memset(sfs_dev->command_buf->sfs_buffer, 0xc7, PAGE_SIZE);
+ sfs_dev->command_buf->hdr.payload_size = 2 * PAGE_SIZE;
+
+ return send_sfs_cmd(sfs_dev, PSP_SFS_GET_FW_VERSIONS);
+}
+
+static int send_sfs_update_package(struct sfs_device *sfs_dev, const char *payload_name)
+{
+ char payload_path[PAYLOAD_NAME_SIZE + sizeof("amd/")];
+ const struct firmware *firmware;
+ unsigned long package_size;
+ int ret;
+
+ /* Sanitize userspace provided payload name */
+ if (!strnchr(payload_name, PAYLOAD_NAME_SIZE, '\0'))
+ return -EINVAL;
+
+ snprintf(payload_path, sizeof(payload_path), "amd/%s", payload_name);
+
+ ret = firmware_request_nowarn(&firmware, payload_path, sfs_dev->dev);
+ if (ret < 0) {
+ dev_warn_ratelimited(sfs_dev->dev, "firmware request failed for %s (%d)\n",
+ payload_path, ret);
+ return -ENOENT;
+ }
+
+ /*
+ * SFS Update Package command's input buffer contains TEE_EXT_CMD_BUFFER
+ * followed by the Update Package and it should be 64KB aligned.
+ */
+ package_size = ALIGN(firmware->size + PAGE_SIZE, 0x10000U);
+
+ /*
+ * SFS command buffer is a pre-allocated 2MB buffer, fail update package
+ * if SFS payload is larger than the pre-allocated command buffer.
+ */
+ if (package_size > SFS_MAX_PAYLOAD_SIZE) {
+ dev_warn_ratelimited(sfs_dev->dev,
+ "SFS payload size %ld larger than maximum supported payload size of %u\n",
+ package_size, SFS_MAX_PAYLOAD_SIZE);
+ release_firmware(firmware);
+ return -E2BIG;
+ }
+
+ /*
+ * Copy firmware data to a HV_Fixed memory region.
+ */
+ memcpy(sfs_dev->command_buf->sfs_buffer, firmware->data, firmware->size);
+ sfs_dev->command_buf->hdr.payload_size = package_size;
+
+ release_firmware(firmware);
+
+ return send_sfs_cmd(sfs_dev, PSP_SFS_UPDATE);
+}
+
+static long sfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ struct sfs_user_get_fw_versions __user *sfs_get_fw_versions;
+ struct sfs_user_update_package __user *sfs_update_package;
+ struct psp_device *psp_master = psp_get_master_device();
+ char payload_name[PAYLOAD_NAME_SIZE];
+ struct sfs_device *sfs_dev;
+ int ret = 0;
+
+ if (!psp_master || !psp_master->sfs_data)
+ return -ENODEV;
+
+ sfs_dev = psp_master->sfs_data;
+
+ guard(mutex)(&sfs_ioctl_mutex);
+
+ switch (cmd) {
+ case SFSIOCFWVERS:
+ dev_dbg(sfs_dev->dev, "in SFSIOCFWVERS\n");
+
+ sfs_get_fw_versions = (struct sfs_user_get_fw_versions __user *)arg;
+
+ ret = send_sfs_get_fw_versions(sfs_dev);
+ if (ret && ret != -EIO)
+ return ret;
+
+ /*
+ * Return SFS status and extended status back to userspace
+ * if PSP status indicated success or command error.
+ */
+ if (copy_to_user(&sfs_get_fw_versions->blob, sfs_dev->command_buf->sfs_buffer,
+ PAGE_SIZE))
+ return -EFAULT;
+ if (copy_to_user(&sfs_get_fw_versions->sfs_status,
+ &sfs_dev->command_buf->hdr.status,
+ sizeof(sfs_get_fw_versions->sfs_status)))
+ return -EFAULT;
+ if (copy_to_user(&sfs_get_fw_versions->sfs_extended_status,
+ &sfs_dev->command_buf->buf,
+ sizeof(sfs_get_fw_versions->sfs_extended_status)))
+ return -EFAULT;
+ break;
+ case SFSIOCUPDATEPKG:
+ dev_dbg(sfs_dev->dev, "in SFSIOCUPDATEPKG\n");
+
+ sfs_update_package = (struct sfs_user_update_package __user *)arg;
+
+ if (copy_from_user(payload_name, sfs_update_package->payload_name,
+ PAYLOAD_NAME_SIZE))
+ return -EFAULT;
+
+ ret = send_sfs_update_package(sfs_dev, payload_name);
+ if (ret && ret != -EIO)
+ return ret;
+
+ /*
+ * Return SFS status and extended status back to userspace
+ * if PSP status indicated success or command error.
+ */
+ if (copy_to_user(&sfs_update_package->sfs_status,
+ &sfs_dev->command_buf->hdr.status,
+ sizeof(sfs_update_package->sfs_status)))
+ return -EFAULT;
+ if (copy_to_user(&sfs_update_package->sfs_extended_status,
+ &sfs_dev->command_buf->buf,
+ sizeof(sfs_update_package->sfs_extended_status)))
+ return -EFAULT;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct file_operations sfs_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = sfs_ioctl,
+};
+
+static void sfs_exit(struct kref *ref)
+{
+ misc_deregister(&misc_dev->misc);
+ kfree(misc_dev);
+ misc_dev = NULL;
+}
+
+void sfs_dev_destroy(struct psp_device *psp)
+{
+ struct sfs_device *sfs_dev = psp->sfs_data;
+
+ if (!sfs_dev)
+ return;
+
+ /*
+ * Change SFS command buffer back to the default "Write-Back" type.
+ */
+ set_memory_wb((unsigned long)sfs_dev->command_buf, SFS_NUM_PAGES_CMDBUF);
+
+ snp_free_hv_fixed_pages(sfs_dev->page);
+
+ if (sfs_dev->misc)
+ kref_put(&misc_dev->refcount, sfs_exit);
+
+ psp->sfs_data = NULL;
+}
+
+/* Based on sev_misc_init() */
+static int sfs_misc_init(struct sfs_device *sfs)
+{
+ struct device *dev = sfs->dev;
+ int ret;
+
+ /*
+ * SFS feature support can be detected on multiple devices but the SFS
+ * FW commands must be issued on the master. During probe, we do not
+ * know the master hence we create /dev/sfs on the first device probe.
+ */
+ if (!misc_dev) {
+ struct miscdevice *misc;
+
+ misc_dev = kzalloc(sizeof(*misc_dev), GFP_KERNEL);
+ if (!misc_dev)
+ return -ENOMEM;
+
+ misc = &misc_dev->misc;
+ misc->minor = MISC_DYNAMIC_MINOR;
+ misc->name = "sfs";
+ misc->fops = &sfs_fops;
+ misc->mode = 0600;
+
+ ret = misc_register(misc);
+ if (ret)
+ return ret;
+
+ kref_init(&misc_dev->refcount);
+ } else {
+ kref_get(&misc_dev->refcount);
+ }
+
+ sfs->misc = misc_dev;
+ dev_dbg(dev, "registered SFS device\n");
+
+ return 0;
+}
+
+int sfs_dev_init(struct psp_device *psp)
+{
+ struct device *dev = psp->dev;
+ struct sfs_device *sfs_dev;
+ struct page *page;
+ int ret = -ENOMEM;
+
+ sfs_dev = devm_kzalloc(dev, sizeof(*sfs_dev), GFP_KERNEL);
+ if (!sfs_dev)
+ return -ENOMEM;
+
+ /*
+ * Pre-allocate 2MB command buffer for all SFS commands using
+ * SNP HV_Fixed page allocator which also transitions the
+ * SFS command buffer to HV_Fixed page state if SNP is enabled.
+ */
+ page = snp_alloc_hv_fixed_pages(SFS_NUM_2MB_PAGES_CMDBUF);
+ if (!page) {
+ dev_dbg(dev, "Command Buffer HV-Fixed page allocation failed\n");
+ goto cleanup_dev;
+ }
+ sfs_dev->page = page;
+ sfs_dev->command_buf = page_address(page);
+
+ dev_dbg(dev, "Command buffer 0x%px to be marked as HV_Fixed\n", sfs_dev->command_buf);
+
+ /*
+ * SFS command buffer must be mapped as non-cacheable.
+ */
+ ret = set_memory_uc((unsigned long)sfs_dev->command_buf, SFS_NUM_PAGES_CMDBUF);
+ if (ret) {
+ dev_dbg(dev, "Set memory uc failed\n");
+ goto cleanup_cmd_buf;
+ }
+
+ dev_dbg(dev, "Command buffer 0x%px marked uncacheable\n", sfs_dev->command_buf);
+
+ psp->sfs_data = sfs_dev;
+ sfs_dev->dev = dev;
+ sfs_dev->psp = psp;
+
+ ret = sfs_misc_init(sfs_dev);
+ if (ret)
+ goto cleanup_mem_attr;
+
+ dev_notice(sfs_dev->dev, "SFS support is available\n");
+
+ return 0;
+
+cleanup_mem_attr:
+ set_memory_wb((unsigned long)sfs_dev->command_buf, SFS_NUM_PAGES_CMDBUF);
+
+cleanup_cmd_buf:
+ snp_free_hv_fixed_pages(page);
+
+cleanup_dev:
+ psp->sfs_data = NULL;
+ devm_kfree(dev, sfs_dev);
+
+ return ret;
+}
diff --git a/drivers/crypto/ccp/sfs.h b/drivers/crypto/ccp/sfs.h
new file mode 100644
index 000000000000..97704c210efd
--- /dev/null
+++ b/drivers/crypto/ccp/sfs.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * AMD Platform Security Processor (PSP) Seamless Firmware (SFS) Support.
+ *
+ * Copyright (C) 2025 Advanced Micro Devices, Inc.
+ *
+ * Author: Ashish Kalra <ashish.kalra@amd.com>
+ */
+
+#ifndef __SFS_H__
+#define __SFS_H__
+
+#include <uapi/linux/psp-sfs.h>
+
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/psp-sev.h>
+#include <linux/psp-platform-access.h>
+#include <linux/set_memory.h>
+
+#include "psp-dev.h"
+
+struct sfs_misc_dev {
+ struct kref refcount;
+ struct miscdevice misc;
+};
+
+struct sfs_command {
+ struct psp_ext_req_buffer_hdr hdr;
+ u8 buf[PAGE_SIZE - sizeof(struct psp_ext_req_buffer_hdr)];
+ u8 sfs_buffer[];
+} __packed;
+
+struct sfs_device {
+ struct device *dev;
+ struct psp_device *psp;
+
+ struct page *page;
+ struct sfs_command *command_buf;
+
+ struct sfs_misc_dev *misc;
+};
+
+void sfs_dev_destroy(struct psp_device *psp);
+int sfs_dev_init(struct psp_device *psp);
+
+#endif /* __SFS_H__ */
diff --git a/drivers/crypto/ccp/sp-dev.h b/drivers/crypto/ccp/sp-dev.h
index 6f9d7063257d..1335a83fe052 100644
--- a/drivers/crypto/ccp/sp-dev.h
+++ b/drivers/crypto/ccp/sp-dev.h
@@ -95,7 +95,7 @@ struct sp_device {
struct device *dev;
- struct sp_dev_vdata *dev_vdata;
+ const struct sp_dev_vdata *dev_vdata;
unsigned int ord;
char name[SP_MAX_NAME_LEN];
diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c
index e1be2072d680..8891ceee1d7d 100644
--- a/drivers/crypto/ccp/sp-pci.c
+++ b/drivers/crypto/ccp/sp-pci.c
@@ -453,11 +453,23 @@ static const struct psp_vdata pspv6 = {
.cmdresp_reg = 0x10944, /* C2PMSG_17 */
.cmdbuff_addr_lo_reg = 0x10948, /* C2PMSG_18 */
.cmdbuff_addr_hi_reg = 0x1094c, /* C2PMSG_19 */
+ .bootloader_info_reg = 0x109ec, /* C2PMSG_59 */
.feature_reg = 0x109fc, /* C2PMSG_63 */
.inten_reg = 0x10510, /* P2CMSG_INTEN */
.intsts_reg = 0x10514, /* P2CMSG_INTSTS */
};
+static const struct psp_vdata pspv7 = {
+ .tee = &teev2,
+ .cmdresp_reg = 0x10944, /* C2PMSG_17 */
+ .cmdbuff_addr_lo_reg = 0x10948, /* C2PMSG_18 */
+ .cmdbuff_addr_hi_reg = 0x1094c, /* C2PMSG_19 */
+ .bootloader_info_reg = 0x109ec, /* C2PMSG_59 */
+ .feature_reg = 0x109fc, /* C2PMSG_63 */
+ .inten_reg = 0x10510, /* P2CMSG_INTEN */
+ .intsts_reg = 0x10514, /* P2CMSG_INTSTS */
+};
+
#endif
static const struct sp_dev_vdata dev_vdata[] = {
@@ -524,6 +536,13 @@ static const struct sp_dev_vdata dev_vdata[] = {
.psp_vdata = &pspv6,
#endif
},
+ { /* 9 */
+ .bar = 2,
+#ifdef CONFIG_CRYPTO_DEV_SP_PSP
+ .psp_vdata = &pspv7,
+#endif
+ },
+
};
static const struct pci_device_id sp_pci_table[] = {
{ PCI_VDEVICE(AMD, 0x1537), (kernel_ulong_t)&dev_vdata[0] },
@@ -538,6 +557,7 @@ static const struct pci_device_id sp_pci_table[] = {
{ PCI_VDEVICE(AMD, 0x17E0), (kernel_ulong_t)&dev_vdata[7] },
{ PCI_VDEVICE(AMD, 0x156E), (kernel_ulong_t)&dev_vdata[8] },
{ PCI_VDEVICE(AMD, 0x17D8), (kernel_ulong_t)&dev_vdata[8] },
+ { PCI_VDEVICE(AMD, 0x115A), (kernel_ulong_t)&dev_vdata[9] },
/* Last entry must be zero */
{ 0, }
};
diff --git a/drivers/crypto/ccp/sp-platform.c b/drivers/crypto/ccp/sp-platform.c
index 3933cac1694d..3f9843fa7782 100644
--- a/drivers/crypto/ccp/sp-platform.c
+++ b/drivers/crypto/ccp/sp-platform.c
@@ -52,24 +52,13 @@ static const struct of_device_id sp_of_match[] = {
};
MODULE_DEVICE_TABLE(of, sp_of_match);
-static struct sp_dev_vdata *sp_get_of_version(struct platform_device *pdev)
-{
- const struct of_device_id *match;
-
- match = of_match_node(sp_of_match, pdev->dev.of_node);
- if (match && match->data)
- return (struct sp_dev_vdata *)match->data;
-
- return NULL;
-}
-
-static struct sp_dev_vdata *sp_get_acpi_version(struct platform_device *pdev)
+static const struct sp_dev_vdata *sp_get_acpi_version(struct platform_device *pdev)
{
const struct acpi_device_id *match;
match = acpi_match_device(sp_acpi_match, &pdev->dev);
if (match && match->driver_data)
- return (struct sp_dev_vdata *)match->driver_data;
+ return (const struct sp_dev_vdata *)match->driver_data;
return NULL;
}
@@ -123,7 +112,7 @@ static int sp_platform_probe(struct platform_device *pdev)
goto e_err;
sp->dev_specific = sp_platform;
- sp->dev_vdata = pdev->dev.of_node ? sp_get_of_version(pdev)
+ sp->dev_vdata = pdev->dev.of_node ? of_device_get_match_data(&pdev->dev)
: sp_get_acpi_version(pdev);
if (!sp->dev_vdata) {
ret = -ENODEV;
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
index bcca55bff910..dc7e0cd51c25 100644
--- a/drivers/crypto/ccree/cc_buffer_mgr.c
+++ b/drivers/crypto/ccree/cc_buffer_mgr.c
@@ -224,7 +224,7 @@ static int cc_generate_mlli(struct device *dev, struct buffer_array *sg_data,
/* Set MLLI size for the bypass operation */
mlli_params->mlli_len = (total_nents * LLI_ENTRY_BYTE_SIZE);
- dev_dbg(dev, "MLLI params: virt_addr=%pK dma_addr=%pad mlli_len=0x%X\n",
+ dev_dbg(dev, "MLLI params: virt_addr=%p dma_addr=%pad mlli_len=0x%X\n",
mlli_params->mlli_virt_addr, &mlli_params->mlli_dma_addr,
mlli_params->mlli_len);
@@ -239,7 +239,7 @@ static void cc_add_sg_entry(struct device *dev, struct buffer_array *sgl_data,
{
unsigned int index = sgl_data->num_of_buffers;
- dev_dbg(dev, "index=%u nents=%u sgl=%pK data_len=0x%08X is_last=%d\n",
+ dev_dbg(dev, "index=%u nents=%u sgl=%p data_len=0x%08X is_last=%d\n",
index, nents, sgl, data_len, is_last_table);
sgl_data->nents[index] = nents;
sgl_data->entry[index].sgl = sgl;
@@ -298,7 +298,7 @@ cc_set_aead_conf_buf(struct device *dev, struct aead_req_ctx *areq_ctx,
dev_err(dev, "dma_map_sg() config buffer failed\n");
return -ENOMEM;
}
- dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
+ dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%p offset=%u length=%u\n",
&sg_dma_address(&areq_ctx->ccm_adata_sg),
sg_page(&areq_ctx->ccm_adata_sg),
sg_virt(&areq_ctx->ccm_adata_sg),
@@ -323,7 +323,7 @@ static int cc_set_hash_buf(struct device *dev, struct ahash_req_ctx *areq_ctx,
dev_err(dev, "dma_map_sg() src buffer failed\n");
return -ENOMEM;
}
- dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
+ dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%p offset=%u length=%u\n",
&sg_dma_address(areq_ctx->buff_sg), sg_page(areq_ctx->buff_sg),
sg_virt(areq_ctx->buff_sg), areq_ctx->buff_sg->offset,
areq_ctx->buff_sg->length);
@@ -359,11 +359,11 @@ void cc_unmap_cipher_request(struct device *dev, void *ctx,
if (src != dst) {
dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_TO_DEVICE);
dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_FROM_DEVICE);
- dev_dbg(dev, "Unmapped req->dst=%pK\n", sg_virt(dst));
- dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src));
+ dev_dbg(dev, "Unmapped req->dst=%p\n", sg_virt(dst));
+ dev_dbg(dev, "Unmapped req->src=%p\n", sg_virt(src));
} else {
dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL);
- dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src));
+ dev_dbg(dev, "Unmapped req->src=%p\n", sg_virt(src));
}
}
@@ -391,11 +391,11 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
req_ctx->gen_ctx.iv_dma_addr =
dma_map_single(dev, info, ivsize, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, req_ctx->gen_ctx.iv_dma_addr)) {
- dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
+ dev_err(dev, "Mapping iv %u B at va=%p for DMA failed\n",
ivsize, info);
return -ENOMEM;
}
- dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
+ dev_dbg(dev, "Mapped iv %u B at va=%p to dma=%pad\n",
ivsize, info, &req_ctx->gen_ctx.iv_dma_addr);
} else {
req_ctx->gen_ctx.iv_dma_addr = 0;
@@ -506,7 +506,7 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
if ((areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) &&
(areq_ctx->mlli_params.mlli_virt_addr)) {
- dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n",
+ dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%p\n",
&areq_ctx->mlli_params.mlli_dma_addr,
areq_ctx->mlli_params.mlli_virt_addr);
dma_pool_free(areq_ctx->mlli_params.curr_pool,
@@ -514,13 +514,13 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
areq_ctx->mlli_params.mlli_dma_addr);
}
- dev_dbg(dev, "Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n",
+ dev_dbg(dev, "Unmapping src sgl: req->src=%p areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n",
sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents,
areq_ctx->assoclen, req->cryptlen);
dma_unmap_sg(dev, req->src, areq_ctx->src.mapped_nents, src_direction);
if (req->src != req->dst) {
- dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n",
+ dev_dbg(dev, "Unmapping dst sgl: req->dst=%p\n",
sg_virt(req->dst));
dma_unmap_sg(dev, req->dst, areq_ctx->dst.mapped_nents, DMA_FROM_DEVICE);
}
@@ -566,7 +566,7 @@ static int cc_aead_chain_iv(struct cc_drvdata *drvdata,
dma_map_single(dev, areq_ctx->gen_ctx.iv, hw_iv_size,
DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr)) {
- dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
+ dev_err(dev, "Mapping iv %u B at va=%p for DMA failed\n",
hw_iv_size, req->iv);
kfree_sensitive(areq_ctx->gen_ctx.iv);
areq_ctx->gen_ctx.iv = NULL;
@@ -574,7 +574,7 @@ static int cc_aead_chain_iv(struct cc_drvdata *drvdata,
goto chain_iv_exit;
}
- dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
+ dev_dbg(dev, "Mapped iv %u B at va=%p to dma=%pad\n",
hw_iv_size, req->iv, &areq_ctx->gen_ctx.iv_dma_addr);
chain_iv_exit:
@@ -977,7 +977,7 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
dma_addr = dma_map_single(dev, areq_ctx->mac_buf, MAX_MAC_SIZE,
DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, dma_addr)) {
- dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
+ dev_err(dev, "Mapping mac_buf %u B at va=%p for DMA failed\n",
MAX_MAC_SIZE, areq_ctx->mac_buf);
rc = -ENOMEM;
goto aead_map_failure;
@@ -991,7 +991,7 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma_addr)) {
- dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
+ dev_err(dev, "Mapping mac_buf %u B at va=%p for DMA failed\n",
AES_BLOCK_SIZE, addr);
areq_ctx->ccm_iv0_dma_addr = 0;
rc = -ENOMEM;
@@ -1009,7 +1009,7 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
dma_addr = dma_map_single(dev, areq_ctx->hkey, AES_BLOCK_SIZE,
DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, dma_addr)) {
- dev_err(dev, "Mapping hkey %u B at va=%pK for DMA failed\n",
+ dev_err(dev, "Mapping hkey %u B at va=%p for DMA failed\n",
AES_BLOCK_SIZE, areq_ctx->hkey);
rc = -ENOMEM;
goto aead_map_failure;
@@ -1019,7 +1019,7 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
dma_addr = dma_map_single(dev, &areq_ctx->gcm_len_block,
AES_BLOCK_SIZE, DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma_addr)) {
- dev_err(dev, "Mapping gcm_len_block %u B at va=%pK for DMA failed\n",
+ dev_err(dev, "Mapping gcm_len_block %u B at va=%p for DMA failed\n",
AES_BLOCK_SIZE, &areq_ctx->gcm_len_block);
rc = -ENOMEM;
goto aead_map_failure;
@@ -1030,7 +1030,7 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
AES_BLOCK_SIZE, DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma_addr)) {
- dev_err(dev, "Mapping gcm_iv_inc1 %u B at va=%pK for DMA failed\n",
+ dev_err(dev, "Mapping gcm_iv_inc1 %u B at va=%p for DMA failed\n",
AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc1));
areq_ctx->gcm_iv_inc1_dma_addr = 0;
rc = -ENOMEM;
@@ -1042,7 +1042,7 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
AES_BLOCK_SIZE, DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma_addr)) {
- dev_err(dev, "Mapping gcm_iv_inc2 %u B at va=%pK for DMA failed\n",
+ dev_err(dev, "Mapping gcm_iv_inc2 %u B at va=%p for DMA failed\n",
AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc2));
areq_ctx->gcm_iv_inc2_dma_addr = 0;
rc = -ENOMEM;
@@ -1152,7 +1152,7 @@ int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
u32 dummy = 0;
u32 mapped_nents = 0;
- dev_dbg(dev, "final params : curr_buff=%pK curr_buff_cnt=0x%X nbytes = 0x%X src=%pK curr_index=%u\n",
+ dev_dbg(dev, "final params : curr_buff=%p curr_buff_cnt=0x%X nbytes = 0x%X src=%p curr_index=%u\n",
curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
/* Init the type of the dma buffer */
areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL;
@@ -1235,8 +1235,9 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
int rc = 0;
u32 dummy = 0;
u32 mapped_nents = 0;
+ int sg_nents;
- dev_dbg(dev, " update params : curr_buff=%pK curr_buff_cnt=0x%X nbytes=0x%X src=%pK curr_index=%u\n",
+ dev_dbg(dev, " update params : curr_buff=%p curr_buff_cnt=0x%X nbytes=0x%X src=%p curr_index=%u\n",
curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
/* Init the type of the dma buffer */
areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL;
@@ -1246,9 +1247,12 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
areq_ctx->in_nents = 0;
if (total_in_len < block_size) {
- dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n",
+ dev_dbg(dev, " less than one block: curr_buff=%p *curr_buff_cnt=0x%X copy_to=%p\n",
curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]);
- areq_ctx->in_nents = sg_nents_for_len(src, nbytes);
+ sg_nents = sg_nents_for_len(src, nbytes);
+ if (sg_nents < 0)
+ return sg_nents;
+ areq_ctx->in_nents = sg_nents;
sg_copy_to_buffer(src, areq_ctx->in_nents,
&curr_buff[*curr_buff_cnt], nbytes);
*curr_buff_cnt += nbytes;
@@ -1265,7 +1269,7 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
/* Copy the new residue to next buffer */
if (*next_buff_cnt) {
- dev_dbg(dev, " handle residue: next buff %pK skip data %u residue %u\n",
+ dev_dbg(dev, " handle residue: next buff %p skip data %u residue %u\n",
next_buff, (update_data_len - *curr_buff_cnt),
*next_buff_cnt);
cc_copy_sg_portion(dev, next_buff, src,
@@ -1338,7 +1342,7 @@ void cc_unmap_hash_request(struct device *dev, void *ctx,
*allocated and should be released
*/
if (areq_ctx->mlli_params.curr_pool) {
- dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n",
+ dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%p\n",
&areq_ctx->mlli_params.mlli_dma_addr,
areq_ctx->mlli_params.mlli_virt_addr);
dma_pool_free(areq_ctx->mlli_params.curr_pool,
@@ -1347,14 +1351,14 @@ void cc_unmap_hash_request(struct device *dev, void *ctx,
}
if (src && areq_ctx->in_nents) {
- dev_dbg(dev, "Unmapped sg src: virt=%pK dma=%pad len=0x%X\n",
+ dev_dbg(dev, "Unmapped sg src: virt=%p dma=%pad len=0x%X\n",
sg_virt(src), &sg_dma_address(src), sg_dma_len(src));
dma_unmap_sg(dev, src,
areq_ctx->in_nents, DMA_TO_DEVICE);
}
if (*prev_len) {
- dev_dbg(dev, "Unmapped buffer: areq_ctx->buff_sg=%pK dma=%pad len 0x%X\n",
+ dev_dbg(dev, "Unmapped buffer: areq_ctx->buff_sg=%p dma=%pad len 0x%X\n",
sg_virt(areq_ctx->buff_sg),
&sg_dma_address(areq_ctx->buff_sg),
sg_dma_len(areq_ctx->buff_sg));
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
index d39c067672fd..e2cbfdf7a0e4 100644
--- a/drivers/crypto/ccree/cc_cipher.c
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -211,11 +211,11 @@ static int cc_cipher_init(struct crypto_tfm *tfm)
max_key_buf_size,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, ctx_p->user.key_dma_addr)) {
- dev_err(dev, "Mapping Key %u B at va=%pK for DMA failed\n",
+ dev_err(dev, "Mapping Key %u B at va=%p for DMA failed\n",
max_key_buf_size, ctx_p->user.key);
goto free_key;
}
- dev_dbg(dev, "Mapped key %u B at va=%pK to dma=%pad\n",
+ dev_dbg(dev, "Mapped key %u B at va=%p to dma=%pad\n",
max_key_buf_size, ctx_p->user.key, &ctx_p->user.key_dma_addr);
return 0;
diff --git a/drivers/crypto/ccree/cc_hash.c b/drivers/crypto/ccree/cc_hash.c
index d0612bec4d58..c6d085c8ff79 100644
--- a/drivers/crypto/ccree/cc_hash.c
+++ b/drivers/crypto/ccree/cc_hash.c
@@ -125,7 +125,7 @@ static int cc_map_result(struct device *dev, struct ahash_req_ctx *state,
digestsize);
return -ENOMEM;
}
- dev_dbg(dev, "Mapped digest result buffer %u B at va=%pK to dma=%pad\n",
+ dev_dbg(dev, "Mapped digest result buffer %u B at va=%p to dma=%pad\n",
digestsize, state->digest_result_buff,
&state->digest_result_dma_addr);
@@ -184,11 +184,11 @@ static int cc_map_req(struct device *dev, struct ahash_req_ctx *state,
dma_map_single(dev, state->digest_buff,
ctx->inter_digestsize, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, state->digest_buff_dma_addr)) {
- dev_err(dev, "Mapping digest len %d B at va=%pK for DMA failed\n",
+ dev_err(dev, "Mapping digest len %d B at va=%p for DMA failed\n",
ctx->inter_digestsize, state->digest_buff);
return -EINVAL;
}
- dev_dbg(dev, "Mapped digest %d B at va=%pK to dma=%pad\n",
+ dev_dbg(dev, "Mapped digest %d B at va=%p to dma=%pad\n",
ctx->inter_digestsize, state->digest_buff,
&state->digest_buff_dma_addr);
@@ -197,11 +197,11 @@ static int cc_map_req(struct device *dev, struct ahash_req_ctx *state,
dma_map_single(dev, state->digest_bytes_len,
HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, state->digest_bytes_len_dma_addr)) {
- dev_err(dev, "Mapping digest len %u B at va=%pK for DMA failed\n",
+ dev_err(dev, "Mapping digest len %u B at va=%p for DMA failed\n",
HASH_MAX_LEN_SIZE, state->digest_bytes_len);
goto unmap_digest_buf;
}
- dev_dbg(dev, "Mapped digest len %u B at va=%pK to dma=%pad\n",
+ dev_dbg(dev, "Mapped digest len %u B at va=%p to dma=%pad\n",
HASH_MAX_LEN_SIZE, state->digest_bytes_len,
&state->digest_bytes_len_dma_addr);
}
@@ -212,12 +212,12 @@ static int cc_map_req(struct device *dev, struct ahash_req_ctx *state,
ctx->inter_digestsize,
DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, state->opad_digest_dma_addr)) {
- dev_err(dev, "Mapping opad digest %d B at va=%pK for DMA failed\n",
+ dev_err(dev, "Mapping opad digest %d B at va=%p for DMA failed\n",
ctx->inter_digestsize,
state->opad_digest_buff);
goto unmap_digest_len;
}
- dev_dbg(dev, "Mapped opad digest %d B at va=%pK to dma=%pad\n",
+ dev_dbg(dev, "Mapped opad digest %d B at va=%p to dma=%pad\n",
ctx->inter_digestsize, state->opad_digest_buff,
&state->opad_digest_dma_addr);
}
@@ -272,7 +272,7 @@ static void cc_unmap_result(struct device *dev, struct ahash_req_ctx *state,
if (state->digest_result_dma_addr) {
dma_unmap_single(dev, state->digest_result_dma_addr, digestsize,
DMA_BIDIRECTIONAL);
- dev_dbg(dev, "unmpa digest result buffer va (%pK) pa (%pad) len %u\n",
+ dev_dbg(dev, "unmpa digest result buffer va (%p) pa (%pad) len %u\n",
state->digest_result_buff,
&state->digest_result_dma_addr, digestsize);
memcpy(result, state->digest_result_buff, digestsize);
@@ -287,7 +287,7 @@ static void cc_update_complete(struct device *dev, void *cc_req, int err)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm);
- dev_dbg(dev, "req=%pK\n", req);
+ dev_dbg(dev, "req=%p\n", req);
if (err != -EINPROGRESS) {
/* Not a BACKLOG notification */
@@ -306,7 +306,7 @@ static void cc_digest_complete(struct device *dev, void *cc_req, int err)
struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm);
u32 digestsize = crypto_ahash_digestsize(tfm);
- dev_dbg(dev, "req=%pK\n", req);
+ dev_dbg(dev, "req=%p\n", req);
if (err != -EINPROGRESS) {
/* Not a BACKLOG notification */
@@ -326,7 +326,7 @@ static void cc_hash_complete(struct device *dev, void *cc_req, int err)
struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm);
u32 digestsize = crypto_ahash_digestsize(tfm);
- dev_dbg(dev, "req=%pK\n", req);
+ dev_dbg(dev, "req=%p\n", req);
if (err != -EINPROGRESS) {
/* Not a BACKLOG notification */
@@ -1077,11 +1077,11 @@ static int cc_alloc_ctx(struct cc_hash_ctx *ctx)
dma_map_single(dev, ctx->digest_buff, sizeof(ctx->digest_buff),
DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, ctx->digest_buff_dma_addr)) {
- dev_err(dev, "Mapping digest len %zu B at va=%pK for DMA failed\n",
+ dev_err(dev, "Mapping digest len %zu B at va=%p for DMA failed\n",
sizeof(ctx->digest_buff), ctx->digest_buff);
goto fail;
}
- dev_dbg(dev, "Mapped digest %zu B at va=%pK to dma=%pad\n",
+ dev_dbg(dev, "Mapped digest %zu B at va=%p to dma=%pad\n",
sizeof(ctx->digest_buff), ctx->digest_buff,
&ctx->digest_buff_dma_addr);
@@ -1090,12 +1090,12 @@ static int cc_alloc_ctx(struct cc_hash_ctx *ctx)
sizeof(ctx->opad_tmp_keys_buff),
DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, ctx->opad_tmp_keys_dma_addr)) {
- dev_err(dev, "Mapping opad digest %zu B at va=%pK for DMA failed\n",
+ dev_err(dev, "Mapping opad digest %zu B at va=%p for DMA failed\n",
sizeof(ctx->opad_tmp_keys_buff),
ctx->opad_tmp_keys_buff);
goto fail;
}
- dev_dbg(dev, "Mapped opad_tmp_keys %zu B at va=%pK to dma=%pad\n",
+ dev_dbg(dev, "Mapped opad_tmp_keys %zu B at va=%p to dma=%pad\n",
sizeof(ctx->opad_tmp_keys_buff), ctx->opad_tmp_keys_buff,
&ctx->opad_tmp_keys_dma_addr);
diff --git a/drivers/crypto/ccree/cc_pm.c b/drivers/crypto/ccree/cc_pm.c
index 6124fbbbed94..bbd118f8de0e 100644
--- a/drivers/crypto/ccree/cc_pm.c
+++ b/drivers/crypto/ccree/cc_pm.c
@@ -77,6 +77,5 @@ int cc_pm_get(struct device *dev)
void cc_pm_put_suspend(struct device *dev)
{
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
}
diff --git a/drivers/crypto/chelsio/Kconfig b/drivers/crypto/chelsio/Kconfig
index 5dd3f6a4781a..37294bb74003 100644
--- a/drivers/crypto/chelsio/Kconfig
+++ b/drivers/crypto/chelsio/Kconfig
@@ -4,9 +4,9 @@ config CRYPTO_DEV_CHELSIO
depends on CHELSIO_T4
select CRYPTO_LIB_AES
select CRYPTO_LIB_GF128MUL
- select CRYPTO_SHA1
- select CRYPTO_SHA256
- select CRYPTO_SHA512
+ select CRYPTO_LIB_SHA1
+ select CRYPTO_LIB_SHA256
+ select CRYPTO_LIB_SHA512
select CRYPTO_AUTHENC
help
The Chelsio Crypto Co-processor driver for T6 adapters.
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index af37477ffd8d..22cbc343198a 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -51,7 +51,6 @@
#include <crypto/aes.h>
#include <crypto/algapi.h>
-#include <crypto/hash.h>
#include <crypto/gcm.h>
#include <crypto/sha1.h>
#include <crypto/sha2.h>
@@ -277,88 +276,60 @@ static void get_aes_decrypt_key(unsigned char *dec_key,
}
}
-static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
+static int chcr_prepare_hmac_key(const u8 *raw_key, unsigned int raw_key_len,
+ int digestsize, void *istate, void *ostate)
{
- struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
-
- switch (ds) {
+ __be32 *istate32 = istate, *ostate32 = ostate;
+ __be64 *istate64 = istate, *ostate64 = ostate;
+ union {
+ struct hmac_sha1_key sha1;
+ struct hmac_sha224_key sha224;
+ struct hmac_sha256_key sha256;
+ struct hmac_sha384_key sha384;
+ struct hmac_sha512_key sha512;
+ } k;
+
+ switch (digestsize) {
case SHA1_DIGEST_SIZE:
- base_hash = crypto_alloc_shash("sha1", 0, 0);
+ hmac_sha1_preparekey(&k.sha1, raw_key, raw_key_len);
+ for (int i = 0; i < ARRAY_SIZE(k.sha1.istate.h); i++) {
+ istate32[i] = cpu_to_be32(k.sha1.istate.h[i]);
+ ostate32[i] = cpu_to_be32(k.sha1.ostate.h[i]);
+ }
break;
case SHA224_DIGEST_SIZE:
- base_hash = crypto_alloc_shash("sha224", 0, 0);
+ hmac_sha224_preparekey(&k.sha224, raw_key, raw_key_len);
+ for (int i = 0; i < ARRAY_SIZE(k.sha224.key.istate.h); i++) {
+ istate32[i] = cpu_to_be32(k.sha224.key.istate.h[i]);
+ ostate32[i] = cpu_to_be32(k.sha224.key.ostate.h[i]);
+ }
break;
case SHA256_DIGEST_SIZE:
- base_hash = crypto_alloc_shash("sha256", 0, 0);
+ hmac_sha256_preparekey(&k.sha256, raw_key, raw_key_len);
+ for (int i = 0; i < ARRAY_SIZE(k.sha256.key.istate.h); i++) {
+ istate32[i] = cpu_to_be32(k.sha256.key.istate.h[i]);
+ ostate32[i] = cpu_to_be32(k.sha256.key.ostate.h[i]);
+ }
break;
case SHA384_DIGEST_SIZE:
- base_hash = crypto_alloc_shash("sha384", 0, 0);
+ hmac_sha384_preparekey(&k.sha384, raw_key, raw_key_len);
+ for (int i = 0; i < ARRAY_SIZE(k.sha384.key.istate.h); i++) {
+ istate64[i] = cpu_to_be64(k.sha384.key.istate.h[i]);
+ ostate64[i] = cpu_to_be64(k.sha384.key.ostate.h[i]);
+ }
break;
case SHA512_DIGEST_SIZE:
- base_hash = crypto_alloc_shash("sha512", 0, 0);
+ hmac_sha512_preparekey(&k.sha512, raw_key, raw_key_len);
+ for (int i = 0; i < ARRAY_SIZE(k.sha512.key.istate.h); i++) {
+ istate64[i] = cpu_to_be64(k.sha512.key.istate.h[i]);
+ ostate64[i] = cpu_to_be64(k.sha512.key.ostate.h[i]);
+ }
break;
+ default:
+ return -EINVAL;
}
-
- return base_hash;
-}
-
-static int chcr_compute_partial_hash(struct shash_desc *desc,
- char *iopad, char *result_hash,
- int digest_size)
-{
- struct sha1_state sha1_st;
- struct sha256_state sha256_st;
- struct sha512_state sha512_st;
- int error;
-
- if (digest_size == SHA1_DIGEST_SIZE) {
- error = crypto_shash_init(desc) ?:
- crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?:
- crypto_shash_export(desc, (void *)&sha1_st);
- memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE);
- } else if (digest_size == SHA224_DIGEST_SIZE) {
- error = crypto_shash_init(desc) ?:
- crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
- crypto_shash_export(desc, (void *)&sha256_st);
- memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
-
- } else if (digest_size == SHA256_DIGEST_SIZE) {
- error = crypto_shash_init(desc) ?:
- crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
- crypto_shash_export(desc, (void *)&sha256_st);
- memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
-
- } else if (digest_size == SHA384_DIGEST_SIZE) {
- error = crypto_shash_init(desc) ?:
- crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
- crypto_shash_export(desc, (void *)&sha512_st);
- memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
-
- } else if (digest_size == SHA512_DIGEST_SIZE) {
- error = crypto_shash_init(desc) ?:
- crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
- crypto_shash_export(desc, (void *)&sha512_st);
- memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
- } else {
- error = -EINVAL;
- pr_err("Unknown digest size %d\n", digest_size);
- }
- return error;
-}
-
-static void chcr_change_order(char *buf, int ds)
-{
- int i;
-
- if (ds == SHA512_DIGEST_SIZE) {
- for (i = 0; i < (ds / sizeof(u64)); i++)
- *((__be64 *)buf + i) =
- cpu_to_be64(*((u64 *)buf + i));
- } else {
- for (i = 0; i < (ds / sizeof(u32)); i++)
- *((__be32 *)buf + i) =
- cpu_to_be32(*((u32 *)buf + i));
- }
+ memzero_explicit(&k, sizeof(k));
+ return 0;
}
static inline int is_hmac(struct crypto_tfm *tfm)
@@ -1547,11 +1518,6 @@ static int get_alg_config(struct algo_param *params,
return 0;
}
-static inline void chcr_free_shash(struct crypto_shash *base_hash)
-{
- crypto_free_shash(base_hash);
-}
-
/**
* create_hash_wr - Create hash work request
* @req: Cipher req base
@@ -2202,53 +2168,13 @@ static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
- unsigned int digestsize = crypto_ahash_digestsize(tfm);
- unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
- unsigned int i, err = 0, updated_digestsize;
-
- SHASH_DESC_ON_STACK(shash, hmacctx->base_hash);
/* use the key to calculate the ipad and opad. ipad will sent with the
* first request's data. opad will be sent with the final hash result
* ipad in hmacctx->ipad and opad in hmacctx->opad location
*/
- shash->tfm = hmacctx->base_hash;
- if (keylen > bs) {
- err = crypto_shash_digest(shash, key, keylen,
- hmacctx->ipad);
- if (err)
- goto out;
- keylen = digestsize;
- } else {
- memcpy(hmacctx->ipad, key, keylen);
- }
- memset(hmacctx->ipad + keylen, 0, bs - keylen);
- unsafe_memcpy(hmacctx->opad, hmacctx->ipad, bs,
- "fortified memcpy causes -Wrestrict warning");
-
- for (i = 0; i < bs / sizeof(int); i++) {
- *((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA;
- *((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA;
- }
-
- updated_digestsize = digestsize;
- if (digestsize == SHA224_DIGEST_SIZE)
- updated_digestsize = SHA256_DIGEST_SIZE;
- else if (digestsize == SHA384_DIGEST_SIZE)
- updated_digestsize = SHA512_DIGEST_SIZE;
- err = chcr_compute_partial_hash(shash, hmacctx->ipad,
- hmacctx->ipad, digestsize);
- if (err)
- goto out;
- chcr_change_order(hmacctx->ipad, updated_digestsize);
-
- err = chcr_compute_partial_hash(shash, hmacctx->opad,
- hmacctx->opad, digestsize);
- if (err)
- goto out;
- chcr_change_order(hmacctx->opad, updated_digestsize);
-out:
- return err;
+ return chcr_prepare_hmac_key(key, keylen, crypto_ahash_digestsize(tfm),
+ hmacctx->ipad, hmacctx->opad);
}
static int chcr_aes_xts_setkey(struct crypto_skcipher *cipher, const u8 *key,
@@ -2344,30 +2270,11 @@ static int chcr_hmac_init(struct ahash_request *areq)
static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
{
- struct chcr_context *ctx = crypto_tfm_ctx(tfm);
- struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
- unsigned int digestsize =
- crypto_ahash_digestsize(__crypto_ahash_cast(tfm));
-
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct chcr_ahash_req_ctx));
- hmacctx->base_hash = chcr_alloc_shash(digestsize);
- if (IS_ERR(hmacctx->base_hash))
- return PTR_ERR(hmacctx->base_hash);
return chcr_device_init(crypto_tfm_ctx(tfm));
}
-static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
-{
- struct chcr_context *ctx = crypto_tfm_ctx(tfm);
- struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
-
- if (hmacctx->base_hash) {
- chcr_free_shash(hmacctx->base_hash);
- hmacctx->base_hash = NULL;
- }
-}
-
inline void chcr_aead_common_exit(struct aead_request *req)
{
struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
@@ -3557,15 +3464,12 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
/* it contains auth and cipher key both*/
struct crypto_authenc_keys keys;
- unsigned int bs, subtype;
+ unsigned int subtype;
unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
- int err = 0, i, key_ctx_len = 0;
+ int err = 0, key_ctx_len = 0;
unsigned char ck_size = 0;
- unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 };
- struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
struct algo_param param;
int align;
- u8 *o_ptr = NULL;
crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
@@ -3613,68 +3517,26 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
aeadctx->enckey_len << 3);
}
- base_hash = chcr_alloc_shash(max_authsize);
- if (IS_ERR(base_hash)) {
- pr_err("Base driver cannot be loaded\n");
+
+ align = KEYCTX_ALIGN_PAD(max_authsize);
+ err = chcr_prepare_hmac_key(keys.authkey, keys.authkeylen, max_authsize,
+ actx->h_iopad,
+ actx->h_iopad + param.result_size + align);
+ if (err)
goto out;
- }
- {
- SHASH_DESC_ON_STACK(shash, base_hash);
-
- shash->tfm = base_hash;
- bs = crypto_shash_blocksize(base_hash);
- align = KEYCTX_ALIGN_PAD(max_authsize);
- o_ptr = actx->h_iopad + param.result_size + align;
-
- if (keys.authkeylen > bs) {
- err = crypto_shash_digest(shash, keys.authkey,
- keys.authkeylen,
- o_ptr);
- if (err) {
- pr_err("Base driver cannot be loaded\n");
- goto out;
- }
- keys.authkeylen = max_authsize;
- } else
- memcpy(o_ptr, keys.authkey, keys.authkeylen);
-
- /* Compute the ipad-digest*/
- memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
- memcpy(pad, o_ptr, keys.authkeylen);
- for (i = 0; i < bs >> 2; i++)
- *((unsigned int *)pad + i) ^= IPAD_DATA;
-
- if (chcr_compute_partial_hash(shash, pad, actx->h_iopad,
- max_authsize))
- goto out;
- /* Compute the opad-digest */
- memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
- memcpy(pad, o_ptr, keys.authkeylen);
- for (i = 0; i < bs >> 2; i++)
- *((unsigned int *)pad + i) ^= OPAD_DATA;
- if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize))
- goto out;
+ key_ctx_len = sizeof(struct _key_ctx) + roundup(keys.enckeylen, 16) +
+ (param.result_size + align) * 2;
+ aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size, 0, 1,
+ key_ctx_len >> 4);
+ actx->auth_mode = param.auth_mode;
+
+ memzero_explicit(&keys, sizeof(keys));
+ return 0;
- /* convert the ipad and opad digest to network order */
- chcr_change_order(actx->h_iopad, param.result_size);
- chcr_change_order(o_ptr, param.result_size);
- key_ctx_len = sizeof(struct _key_ctx) +
- roundup(keys.enckeylen, 16) +
- (param.result_size + align) * 2;
- aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size,
- 0, 1, key_ctx_len >> 4);
- actx->auth_mode = param.auth_mode;
- chcr_free_shash(base_hash);
-
- memzero_explicit(&keys, sizeof(keys));
- return 0;
- }
out:
aeadctx->enckey_len = 0;
memzero_explicit(&keys, sizeof(keys));
- if (!IS_ERR(base_hash))
- chcr_free_shash(base_hash);
return -EINVAL;
}
@@ -4490,7 +4352,6 @@ static int chcr_register_alg(void)
if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
a_hash->halg.base.cra_init = chcr_hmac_cra_init;
- a_hash->halg.base.cra_exit = chcr_hmac_cra_exit;
a_hash->init = chcr_hmac_init;
a_hash->setkey = chcr_ahash_setkey;
a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX;
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h
index 1d693b8436e6..e1e79e5f01e7 100644
--- a/drivers/crypto/chelsio/chcr_crypto.h
+++ b/drivers/crypto/chelsio/chcr_crypto.h
@@ -241,7 +241,6 @@ struct chcr_aead_ctx {
};
struct hmac_ctx {
- struct crypto_shash *base_hash;
u8 ipad[CHCR_HASH_MAX_BLOCK_SIZE_128];
u8 opad[CHCR_HASH_MAX_BLOCK_SIZE_128];
};
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
index 925991526745..edf36f6add52 100644
--- a/drivers/crypto/hifn_795x.c
+++ b/drivers/crypto/hifn_795x.c
@@ -913,11 +913,10 @@ static void hifn_init_pll(struct hifn_device *dev)
else
pllcfg |= HIFN_PLL_REF_CLK_HBI;
- if (hifn_pll_ref[3] != '\0')
- freq = simple_strtoul(hifn_pll_ref + 3, NULL, 10);
- else {
+ if (hifn_pll_ref[3] == '\0' ||
+ kstrtouint(hifn_pll_ref + 3, 10, &freq)) {
freq = 66;
- dev_info(&dev->pdev->dev, "assuming %uMHz clock speed, override with hifn_pll_ref=%.3s<frequency>\n",
+ dev_info(&dev->pdev->dev, "assuming %u MHz clock speed, override with hifn_pll_ref=%.3s<frequency>\n",
freq, hifn_pll_ref);
}
diff --git a/drivers/crypto/hisilicon/Kconfig b/drivers/crypto/hisilicon/Kconfig
index 4137a8bf131f..4835bdebdbb3 100644
--- a/drivers/crypto/hisilicon/Kconfig
+++ b/drivers/crypto/hisilicon/Kconfig
@@ -69,7 +69,6 @@ config CRYPTO_DEV_HISI_HPRE
select CRYPTO_DEV_HISI_QM
select CRYPTO_DH
select CRYPTO_RSA
- select CRYPTO_CURVE25519
select CRYPTO_ECDH
help
Support for HiSilicon HPRE(High Performance RSA Engine)
diff --git a/drivers/crypto/hisilicon/debugfs.c b/drivers/crypto/hisilicon/debugfs.c
index 45e130b901eb..17eb236e9ee4 100644
--- a/drivers/crypto/hisilicon/debugfs.c
+++ b/drivers/crypto/hisilicon/debugfs.c
@@ -888,6 +888,7 @@ static int qm_diff_regs_init(struct hisi_qm *qm,
dfx_regs_uninit(qm, qm->debug.qm_diff_regs, ARRAY_SIZE(qm_diff_regs));
ret = PTR_ERR(qm->debug.acc_diff_regs);
qm->debug.acc_diff_regs = NULL;
+ qm->debug.qm_diff_regs = NULL;
return ret;
}
diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
index 61b5e1c5d019..21ccf879f70c 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 HiSilicon Limited. */
#include <crypto/akcipher.h>
-#include <crypto/curve25519.h>
#include <crypto/dh.h>
#include <crypto/ecc_curve.h>
#include <crypto/ecdh.h>
@@ -106,16 +105,6 @@ struct hpre_ecdh_ctx {
dma_addr_t dma_g;
};
-struct hpre_curve25519_ctx {
- /* low address: p->a->k */
- unsigned char *p;
- dma_addr_t dma_p;
-
- /* gx coordinate */
- unsigned char *g;
- dma_addr_t dma_g;
-};
-
struct hpre_ctx {
struct hisi_qp *qp;
struct device *dev;
@@ -129,7 +118,6 @@ struct hpre_ctx {
struct hpre_rsa_ctx rsa;
struct hpre_dh_ctx dh;
struct hpre_ecdh_ctx ecdh;
- struct hpre_curve25519_ctx curve25519;
};
/* for ecc algorithms */
unsigned int curve_id;
@@ -146,7 +134,6 @@ struct hpre_asym_request {
struct akcipher_request *rsa;
struct kpp_request *dh;
struct kpp_request *ecdh;
- struct kpp_request *curve25519;
} areq;
int err;
int req_id;
@@ -1214,8 +1201,7 @@ static void hpre_key_to_big_end(u8 *data, int len)
}
}
-static void hpre_ecc_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all,
- bool is_ecdh)
+static void hpre_ecc_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all)
{
struct device *dev = ctx->dev;
unsigned int sz = ctx->key_sz;
@@ -1224,17 +1210,11 @@ static void hpre_ecc_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all,
if (is_clear_all)
hisi_qm_stop_qp(ctx->qp);
- if (is_ecdh && ctx->ecdh.p) {
+ if (ctx->ecdh.p) {
/* ecdh: p->a->k->b */
memzero_explicit(ctx->ecdh.p + shift, sz);
dma_free_coherent(dev, sz << 3, ctx->ecdh.p, ctx->ecdh.dma_p);
ctx->ecdh.p = NULL;
- } else if (!is_ecdh && ctx->curve25519.p) {
- /* curve25519: p->a->k */
- memzero_explicit(ctx->curve25519.p + shift, sz);
- dma_free_coherent(dev, sz << 2, ctx->curve25519.p,
- ctx->curve25519.dma_p);
- ctx->curve25519.p = NULL;
}
hpre_ctx_clear(ctx, is_clear_all);
@@ -1432,7 +1412,7 @@ static int hpre_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
return -EINVAL;
}
- hpre_ecc_clear_ctx(ctx, false, true);
+ hpre_ecc_clear_ctx(ctx, false);
ret = hpre_ecdh_set_param(ctx, &params);
if (ret < 0) {
@@ -1491,11 +1471,13 @@ static void hpre_ecdh_cb(struct hpre_ctx *ctx, void *resp)
if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
+ /* Do unmap before data processing */
+ hpre_ecdh_hw_data_clr_all(ctx, req, areq->dst, areq->src);
+
p = sg_virt(areq->dst);
memmove(p, p + ctx->key_sz - curve_sz, curve_sz);
memmove(p + curve_sz, p + areq->dst_len - curve_sz, curve_sz);
- hpre_ecdh_hw_data_clr_all(ctx, req, areq->dst, areq->src);
kpp_request_complete(areq, ret);
atomic64_inc(&dfx[HPRE_RECV_CNT].value);
@@ -1681,335 +1663,7 @@ static void hpre_ecdh_exit_tfm(struct crypto_kpp *tfm)
{
struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
- hpre_ecc_clear_ctx(ctx, true, true);
-}
-
-static void hpre_curve25519_fill_curve(struct hpre_ctx *ctx, const void *buf,
- unsigned int len)
-{
- u8 secret[CURVE25519_KEY_SIZE] = { 0 };
- unsigned int sz = ctx->key_sz;
- const struct ecc_curve *curve;
- unsigned int shift = sz << 1;
- void *p;
-
- /*
- * The key from 'buf' is in little-endian, we should preprocess it as
- * the description in rfc7748: "k[0] &= 248, k[31] &= 127, k[31] |= 64",
- * then convert it to big endian. Only in this way, the result can be
- * the same as the software curve-25519 that exists in crypto.
- */
- memcpy(secret, buf, len);
- curve25519_clamp_secret(secret);
- hpre_key_to_big_end(secret, CURVE25519_KEY_SIZE);
-
- p = ctx->curve25519.p + sz - len;
-
- curve = ecc_get_curve25519();
-
- /* fill curve parameters */
- fill_curve_param(p, curve->p, len, curve->g.ndigits);
- fill_curve_param(p + sz, curve->a, len, curve->g.ndigits);
- memcpy(p + shift, secret, len);
- fill_curve_param(p + shift + sz, curve->g.x, len, curve->g.ndigits);
- memzero_explicit(secret, CURVE25519_KEY_SIZE);
-}
-
-static int hpre_curve25519_set_param(struct hpre_ctx *ctx, const void *buf,
- unsigned int len)
-{
- struct device *dev = ctx->dev;
- unsigned int sz = ctx->key_sz;
- unsigned int shift = sz << 1;
-
- /* p->a->k->gx */
- if (!ctx->curve25519.p) {
- ctx->curve25519.p = dma_alloc_coherent(dev, sz << 2,
- &ctx->curve25519.dma_p,
- GFP_KERNEL);
- if (!ctx->curve25519.p)
- return -ENOMEM;
- }
-
- ctx->curve25519.g = ctx->curve25519.p + shift + sz;
- ctx->curve25519.dma_g = ctx->curve25519.dma_p + shift + sz;
-
- hpre_curve25519_fill_curve(ctx, buf, len);
-
- return 0;
-}
-
-static int hpre_curve25519_set_secret(struct crypto_kpp *tfm, const void *buf,
- unsigned int len)
-{
- struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
- struct device *dev = ctx->dev;
- int ret = -EINVAL;
-
- if (len != CURVE25519_KEY_SIZE ||
- !crypto_memneq(buf, curve25519_null_point, CURVE25519_KEY_SIZE)) {
- dev_err(dev, "key is null or key len is not 32bytes!\n");
- return ret;
- }
-
- /* Free old secret if any */
- hpre_ecc_clear_ctx(ctx, false, false);
-
- ctx->key_sz = CURVE25519_KEY_SIZE;
- ret = hpre_curve25519_set_param(ctx, buf, CURVE25519_KEY_SIZE);
- if (ret) {
- dev_err(dev, "failed to set curve25519 param, ret = %d!\n", ret);
- hpre_ecc_clear_ctx(ctx, false, false);
- return ret;
- }
-
- return 0;
-}
-
-static void hpre_curve25519_hw_data_clr_all(struct hpre_ctx *ctx,
- struct hpre_asym_request *req,
- struct scatterlist *dst,
- struct scatterlist *src)
-{
- struct device *dev = ctx->dev;
- struct hpre_sqe *sqe = &req->req;
- dma_addr_t dma;
-
- dma = le64_to_cpu(sqe->in);
- if (unlikely(dma_mapping_error(dev, dma)))
- return;
-
- if (src && req->src)
- dma_free_coherent(dev, ctx->key_sz, req->src, dma);
-
- dma = le64_to_cpu(sqe->out);
- if (unlikely(dma_mapping_error(dev, dma)))
- return;
-
- if (req->dst)
- dma_free_coherent(dev, ctx->key_sz, req->dst, dma);
- if (dst)
- dma_unmap_single(dev, dma, ctx->key_sz, DMA_FROM_DEVICE);
-}
-
-static void hpre_curve25519_cb(struct hpre_ctx *ctx, void *resp)
-{
- struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
- struct hpre_asym_request *req = NULL;
- struct kpp_request *areq;
- u64 overtime_thrhld;
- int ret;
-
- ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
- areq = req->areq.curve25519;
- areq->dst_len = ctx->key_sz;
-
- overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
- if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
- atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
-
- hpre_key_to_big_end(sg_virt(areq->dst), CURVE25519_KEY_SIZE);
-
- hpre_curve25519_hw_data_clr_all(ctx, req, areq->dst, areq->src);
- kpp_request_complete(areq, ret);
-
- atomic64_inc(&dfx[HPRE_RECV_CNT].value);
-}
-
-static int hpre_curve25519_msg_request_set(struct hpre_ctx *ctx,
- struct kpp_request *req)
-{
- struct hpre_asym_request *h_req;
- struct hpre_sqe *msg;
- int req_id;
- void *tmp;
-
- if (unlikely(req->dst_len < ctx->key_sz)) {
- req->dst_len = ctx->key_sz;
- return -EINVAL;
- }
-
- tmp = kpp_request_ctx(req);
- h_req = PTR_ALIGN(tmp, hpre_align_sz());
- h_req->cb = hpre_curve25519_cb;
- h_req->areq.curve25519 = req;
- msg = &h_req->req;
- memset(msg, 0, sizeof(*msg));
- msg->in = cpu_to_le64(DMA_MAPPING_ERROR);
- msg->out = cpu_to_le64(DMA_MAPPING_ERROR);
- msg->key = cpu_to_le64(ctx->curve25519.dma_p);
-
- msg->dw0 |= cpu_to_le32(0x1U << HPRE_SQE_DONE_SHIFT);
- msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
- h_req->ctx = ctx;
-
- req_id = hpre_add_req_to_ctx(h_req);
- if (req_id < 0)
- return -EBUSY;
-
- msg->tag = cpu_to_le16((u16)req_id);
- return 0;
-}
-
-static void hpre_curve25519_src_modulo_p(u8 *ptr)
-{
- int i;
-
- for (i = 0; i < CURVE25519_KEY_SIZE - 1; i++)
- ptr[i] = 0;
-
- /* The modulus is ptr's last byte minus '0xed'(last byte of p) */
- ptr[i] -= 0xed;
-}
-
-static int hpre_curve25519_src_init(struct hpre_asym_request *hpre_req,
- struct scatterlist *data, unsigned int len)
-{
- struct hpre_sqe *msg = &hpre_req->req;
- struct hpre_ctx *ctx = hpre_req->ctx;
- struct device *dev = ctx->dev;
- u8 p[CURVE25519_KEY_SIZE] = { 0 };
- const struct ecc_curve *curve;
- dma_addr_t dma = 0;
- u8 *ptr;
-
- if (len != CURVE25519_KEY_SIZE) {
- dev_err(dev, "sourc_data len is not 32bytes, len = %u!\n", len);
- return -EINVAL;
- }
-
- ptr = dma_alloc_coherent(dev, ctx->key_sz, &dma, GFP_KERNEL);
- if (unlikely(!ptr))
- return -ENOMEM;
-
- scatterwalk_map_and_copy(ptr, data, 0, len, 0);
-
- if (!crypto_memneq(ptr, curve25519_null_point, CURVE25519_KEY_SIZE)) {
- dev_err(dev, "gx is null!\n");
- goto err;
- }
-
- /*
- * Src_data(gx) is in little-endian order, MSB in the final byte should
- * be masked as described in RFC7748, then transform it to big-endian
- * form, then hisi_hpre can use the data.
- */
- ptr[31] &= 0x7f;
- hpre_key_to_big_end(ptr, CURVE25519_KEY_SIZE);
-
- curve = ecc_get_curve25519();
-
- fill_curve_param(p, curve->p, CURVE25519_KEY_SIZE, curve->g.ndigits);
-
- /*
- * When src_data equals (2^255 - 19) ~ (2^255 - 1), it is out of p,
- * we get its modulus to p, and then use it.
- */
- if (memcmp(ptr, p, ctx->key_sz) == 0) {
- dev_err(dev, "gx is p!\n");
- goto err;
- } else if (memcmp(ptr, p, ctx->key_sz) > 0) {
- hpre_curve25519_src_modulo_p(ptr);
- }
-
- hpre_req->src = ptr;
- msg->in = cpu_to_le64(dma);
- return 0;
-
-err:
- dma_free_coherent(dev, ctx->key_sz, ptr, dma);
- return -EINVAL;
-}
-
-static int hpre_curve25519_dst_init(struct hpre_asym_request *hpre_req,
- struct scatterlist *data, unsigned int len)
-{
- struct hpre_sqe *msg = &hpre_req->req;
- struct hpre_ctx *ctx = hpre_req->ctx;
- struct device *dev = ctx->dev;
- dma_addr_t dma;
-
- if (!data || !sg_is_last(data) || len != ctx->key_sz) {
- dev_err(dev, "data or data length is illegal!\n");
- return -EINVAL;
- }
-
- hpre_req->dst = NULL;
- dma = dma_map_single(dev, sg_virt(data), len, DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(dev, dma))) {
- dev_err(dev, "dma map data err!\n");
- return -ENOMEM;
- }
-
- msg->out = cpu_to_le64(dma);
- return 0;
-}
-
-static int hpre_curve25519_compute_value(struct kpp_request *req)
-{
- struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
- struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
- struct device *dev = ctx->dev;
- void *tmp = kpp_request_ctx(req);
- struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, hpre_align_sz());
- struct hpre_sqe *msg = &hpre_req->req;
- int ret;
-
- ret = hpre_curve25519_msg_request_set(ctx, req);
- if (unlikely(ret)) {
- dev_err(dev, "failed to set curve25519 request, ret = %d!\n", ret);
- return ret;
- }
-
- if (req->src) {
- ret = hpre_curve25519_src_init(hpre_req, req->src, req->src_len);
- if (unlikely(ret)) {
- dev_err(dev, "failed to init src data, ret = %d!\n",
- ret);
- goto clear_all;
- }
- } else {
- msg->in = cpu_to_le64(ctx->curve25519.dma_g);
- }
-
- ret = hpre_curve25519_dst_init(hpre_req, req->dst, req->dst_len);
- if (unlikely(ret)) {
- dev_err(dev, "failed to init dst data, ret = %d!\n", ret);
- goto clear_all;
- }
-
- msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_CURVE25519_MUL);
- ret = hpre_send(ctx, msg);
- if (likely(!ret))
- return -EINPROGRESS;
-
-clear_all:
- hpre_rm_req_from_ctx(hpre_req);
- hpre_curve25519_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
- return ret;
-}
-
-static unsigned int hpre_curve25519_max_size(struct crypto_kpp *tfm)
-{
- struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
-
- return ctx->key_sz;
-}
-
-static int hpre_curve25519_init_tfm(struct crypto_kpp *tfm)
-{
- struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
-
- kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd());
-
- return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
-}
-
-static void hpre_curve25519_exit_tfm(struct crypto_kpp *tfm)
-{
- struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
-
- hpre_ecc_clear_ctx(ctx, true, false);
+ hpre_ecc_clear_ctx(ctx, true);
}
static struct akcipher_alg rsa = {
@@ -2091,22 +1745,6 @@ static struct kpp_alg ecdh_curves[] = {
}
};
-static struct kpp_alg curve25519_alg = {
- .set_secret = hpre_curve25519_set_secret,
- .generate_public_key = hpre_curve25519_compute_value,
- .compute_shared_secret = hpre_curve25519_compute_value,
- .max_size = hpre_curve25519_max_size,
- .init = hpre_curve25519_init_tfm,
- .exit = hpre_curve25519_exit_tfm,
- .base = {
- .cra_ctxsize = sizeof(struct hpre_ctx),
- .cra_priority = HPRE_CRYPTO_ALG_PRI,
- .cra_name = "curve25519",
- .cra_driver_name = "hpre-curve25519",
- .cra_module = THIS_MODULE,
- },
-};
-
static int hpre_register_rsa(struct hisi_qm *qm)
{
int ret;
@@ -2188,28 +1826,6 @@ static void hpre_unregister_ecdh(struct hisi_qm *qm)
crypto_unregister_kpp(&ecdh_curves[i]);
}
-static int hpre_register_x25519(struct hisi_qm *qm)
-{
- int ret;
-
- if (!hpre_check_alg_support(qm, HPRE_DRV_X25519_MASK_CAP))
- return 0;
-
- ret = crypto_register_kpp(&curve25519_alg);
- if (ret)
- dev_err(&qm->pdev->dev, "failed to register x25519 (%d)!\n", ret);
-
- return ret;
-}
-
-static void hpre_unregister_x25519(struct hisi_qm *qm)
-{
- if (!hpre_check_alg_support(qm, HPRE_DRV_X25519_MASK_CAP))
- return;
-
- crypto_unregister_kpp(&curve25519_alg);
-}
-
int hpre_algs_register(struct hisi_qm *qm)
{
int ret = 0;
@@ -2232,17 +1848,11 @@ int hpre_algs_register(struct hisi_qm *qm)
if (ret)
goto unreg_dh;
- ret = hpre_register_x25519(qm);
- if (ret)
- goto unreg_ecdh;
-
hpre_available_devs++;
mutex_unlock(&hpre_algs_lock);
return ret;
-unreg_ecdh:
- hpre_unregister_ecdh(qm);
unreg_dh:
hpre_unregister_dh(qm);
unreg_rsa:
@@ -2258,7 +1868,6 @@ void hpre_algs_unregister(struct hisi_qm *qm)
if (--hpre_available_devs)
goto unlock;
- hpre_unregister_x25519(qm);
hpre_unregister_ecdh(qm);
hpre_unregister_dh(qm);
hpre_unregister_rsa(qm);
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
index f5b47e5ff48a..b94fecd765ee 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
@@ -39,6 +39,7 @@
#define HPRE_HAC_RAS_NFE_ENB 0x301414
#define HPRE_HAC_RAS_FE_ENB 0x301418
#define HPRE_HAC_INT_SET 0x301500
+#define HPRE_AXI_ERROR_MASK GENMASK(21, 10)
#define HPRE_RNG_TIMEOUT_NUM 0x301A34
#define HPRE_CORE_INT_ENABLE 0
#define HPRE_RDCHN_INI_ST 0x301a00
@@ -78,6 +79,11 @@
#define HPRE_PREFETCH_ENABLE (~(BIT(0) | BIT(30)))
#define HPRE_PREFETCH_DISABLE BIT(30)
#define HPRE_SVA_DISABLE_READY (BIT(4) | BIT(8))
+#define HPRE_SVA_PREFTCH_DFX4 0x301144
+#define HPRE_WAIT_SVA_READY 500000
+#define HPRE_READ_SVA_STATUS_TIMES 3
+#define HPRE_WAIT_US_MIN 10
+#define HPRE_WAIT_US_MAX 20
/* clock gate */
#define HPRE_CLKGATE_CTL 0x301a10
@@ -466,6 +472,33 @@ struct hisi_qp *hpre_create_qp(u8 type)
return NULL;
}
+static int hpre_wait_sva_ready(struct hisi_qm *qm)
+{
+ u32 val, try_times = 0;
+ u8 count = 0;
+
+ /*
+ * Read the register value every 10-20us. If the value is 0 for three
+ * consecutive times, the SVA module is ready.
+ */
+ do {
+ val = readl(qm->io_base + HPRE_SVA_PREFTCH_DFX4);
+ if (val)
+ count = 0;
+ else if (++count == HPRE_READ_SVA_STATUS_TIMES)
+ break;
+
+ usleep_range(HPRE_WAIT_US_MIN, HPRE_WAIT_US_MAX);
+ } while (++try_times < HPRE_WAIT_SVA_READY);
+
+ if (try_times == HPRE_WAIT_SVA_READY) {
+ pci_err(qm->pdev, "failed to wait sva prefetch ready\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
static void hpre_config_pasid(struct hisi_qm *qm)
{
u32 val1, val2;
@@ -563,7 +596,7 @@ static void disable_flr_of_bme(struct hisi_qm *qm)
writel(PEH_AXUSER_CFG_ENABLE, qm->io_base + QM_PEH_AXUSER_CFG_ENABLE);
}
-static void hpre_open_sva_prefetch(struct hisi_qm *qm)
+static void hpre_close_sva_prefetch(struct hisi_qm *qm)
{
u32 val;
int ret;
@@ -571,20 +604,21 @@ static void hpre_open_sva_prefetch(struct hisi_qm *qm)
if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
return;
- /* Enable prefetch */
val = readl_relaxed(qm->io_base + HPRE_PREFETCH_CFG);
- val &= HPRE_PREFETCH_ENABLE;
+ val |= HPRE_PREFETCH_DISABLE;
writel(val, qm->io_base + HPRE_PREFETCH_CFG);
- ret = readl_relaxed_poll_timeout(qm->io_base + HPRE_PREFETCH_CFG,
- val, !(val & HPRE_PREFETCH_DISABLE),
+ ret = readl_relaxed_poll_timeout(qm->io_base + HPRE_SVA_PREFTCH_DFX,
+ val, !(val & HPRE_SVA_DISABLE_READY),
HPRE_REG_RD_INTVRL_US,
HPRE_REG_RD_TMOUT_US);
if (ret)
- pci_err(qm->pdev, "failed to open sva prefetch\n");
+ pci_err(qm->pdev, "failed to close sva prefetch\n");
+
+ (void)hpre_wait_sva_ready(qm);
}
-static void hpre_close_sva_prefetch(struct hisi_qm *qm)
+static void hpre_open_sva_prefetch(struct hisi_qm *qm)
{
u32 val;
int ret;
@@ -592,16 +626,24 @@ static void hpre_close_sva_prefetch(struct hisi_qm *qm)
if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
return;
+ /* Enable prefetch */
val = readl_relaxed(qm->io_base + HPRE_PREFETCH_CFG);
- val |= HPRE_PREFETCH_DISABLE;
+ val &= HPRE_PREFETCH_ENABLE;
writel(val, qm->io_base + HPRE_PREFETCH_CFG);
- ret = readl_relaxed_poll_timeout(qm->io_base + HPRE_SVA_PREFTCH_DFX,
- val, !(val & HPRE_SVA_DISABLE_READY),
+ ret = readl_relaxed_poll_timeout(qm->io_base + HPRE_PREFETCH_CFG,
+ val, !(val & HPRE_PREFETCH_DISABLE),
HPRE_REG_RD_INTVRL_US,
HPRE_REG_RD_TMOUT_US);
+ if (ret) {
+ pci_err(qm->pdev, "failed to open sva prefetch\n");
+ hpre_close_sva_prefetch(qm);
+ return;
+ }
+
+ ret = hpre_wait_sva_ready(qm);
if (ret)
- pci_err(qm->pdev, "failed to close sva prefetch\n");
+ hpre_close_sva_prefetch(qm);
}
static void hpre_enable_clock_gate(struct hisi_qm *qm)
@@ -721,6 +763,7 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
/* Config data buffer pasid needed by Kunpeng 920 */
hpre_config_pasid(qm);
+ hpre_open_sva_prefetch(qm);
hpre_enable_clock_gate(qm);
@@ -756,8 +799,7 @@ static void hpre_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
val1 = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
if (enable) {
val1 |= HPRE_AM_OOO_SHUTDOWN_ENABLE;
- val2 = hisi_qm_get_hw_info(qm, hpre_basic_info,
- HPRE_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
+ val2 = qm->err_info.dev_err.shutdown_mask;
} else {
val1 &= ~HPRE_AM_OOO_SHUTDOWN_ENABLE;
val2 = 0x0;
@@ -771,38 +813,33 @@ static void hpre_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
static void hpre_hw_error_disable(struct hisi_qm *qm)
{
- u32 ce, nfe;
-
- ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CE_MASK_CAP, qm->cap_ver);
- nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver);
+ struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err;
+ u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe;
/* disable hpre hw error interrupts */
- writel(ce | nfe | HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_INT_MASK);
+ writel(err_mask, qm->io_base + HPRE_INT_MASK);
/* disable HPRE block master OOO when nfe occurs on Kunpeng930 */
hpre_master_ooo_ctrl(qm, false);
}
static void hpre_hw_error_enable(struct hisi_qm *qm)
{
- u32 ce, nfe, err_en;
-
- ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CE_MASK_CAP, qm->cap_ver);
- nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver);
+ struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err;
+ u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe;
/* clear HPRE hw error source if having */
- writel(ce | nfe | HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_HAC_SOURCE_INT);
+ writel(err_mask, qm->io_base + HPRE_HAC_SOURCE_INT);
/* configure error type */
- writel(ce, qm->io_base + HPRE_RAS_CE_ENB);
- writel(nfe, qm->io_base + HPRE_RAS_NFE_ENB);
- writel(HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_RAS_FE_ENB);
+ writel(dev_err->ce, qm->io_base + HPRE_RAS_CE_ENB);
+ writel(dev_err->nfe, qm->io_base + HPRE_RAS_NFE_ENB);
+ writel(dev_err->fe, qm->io_base + HPRE_RAS_FE_ENB);
/* enable HPRE block master OOO when nfe occurs on Kunpeng930 */
hpre_master_ooo_ctrl(qm, true);
/* enable hpre hw error interrupts */
- err_en = ce | nfe | HPRE_HAC_RAS_FE_ENABLE;
- writel(~err_en, qm->io_base + HPRE_INT_MASK);
+ writel(~err_mask, qm->io_base + HPRE_INT_MASK);
}
static inline struct hisi_qm *hpre_file_to_qm(struct hpre_debugfs_file *file)
@@ -1171,7 +1208,7 @@ static int hpre_pre_store_cap_reg(struct hisi_qm *qm)
size_t i, size;
size = ARRAY_SIZE(hpre_cap_query_info);
- hpre_cap = devm_kzalloc(dev, sizeof(*hpre_cap) * size, GFP_KERNEL);
+ hpre_cap = devm_kcalloc(dev, size, sizeof(*hpre_cap), GFP_KERNEL);
if (!hpre_cap)
return -ENOMEM;
@@ -1357,12 +1394,20 @@ static void hpre_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
static void hpre_disable_error_report(struct hisi_qm *qm, u32 err_type)
{
- u32 nfe_mask;
+ u32 nfe_mask = qm->err_info.dev_err.nfe;
- nfe_mask = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver);
writel(nfe_mask & (~err_type), qm->io_base + HPRE_RAS_NFE_ENB);
}
+static void hpre_enable_error_report(struct hisi_qm *qm)
+{
+ u32 nfe_mask = qm->err_info.dev_err.nfe;
+ u32 ce_mask = qm->err_info.dev_err.ce;
+
+ writel(nfe_mask, qm->io_base + HPRE_RAS_NFE_ENB);
+ writel(ce_mask, qm->io_base + HPRE_RAS_CE_ENB);
+}
+
static void hpre_open_axi_master_ooo(struct hisi_qm *qm)
{
u32 value;
@@ -1380,16 +1425,18 @@ static enum acc_err_result hpre_get_err_result(struct hisi_qm *qm)
err_status = hpre_get_hw_err_status(qm);
if (err_status) {
- if (err_status & qm->err_info.ecc_2bits_mask)
+ if (err_status & qm->err_info.dev_err.ecc_2bits_mask)
qm->err_status.is_dev_ecc_mbit = true;
hpre_log_hw_error(qm, err_status);
- if (err_status & qm->err_info.dev_reset_mask) {
+ if (err_status & qm->err_info.dev_err.reset_mask) {
/* Disable the same error reporting until device is recovered. */
hpre_disable_error_report(qm, err_status);
return ACC_ERR_NEED_RESET;
}
hpre_clear_hw_err_status(qm, err_status);
+ /* Avoid firmware disable error report, re-enable. */
+ hpre_enable_error_report(qm);
}
return ACC_ERR_RECOVERED;
@@ -1400,28 +1447,64 @@ static bool hpre_dev_is_abnormal(struct hisi_qm *qm)
u32 err_status;
err_status = hpre_get_hw_err_status(qm);
- if (err_status & qm->err_info.dev_shutdown_mask)
+ if (err_status & qm->err_info.dev_err.shutdown_mask)
return true;
return false;
}
+static void hpre_disable_axi_error(struct hisi_qm *qm)
+{
+ struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err;
+ u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe;
+ u32 val;
+
+ val = ~(err_mask & (~HPRE_AXI_ERROR_MASK));
+ writel(val, qm->io_base + HPRE_INT_MASK);
+
+ if (qm->ver > QM_HW_V2)
+ writel(dev_err->shutdown_mask & (~HPRE_AXI_ERROR_MASK),
+ qm->io_base + HPRE_OOO_SHUTDOWN_SEL);
+}
+
+static void hpre_enable_axi_error(struct hisi_qm *qm)
+{
+ struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err;
+ u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe;
+
+ /* clear axi error source */
+ writel(HPRE_AXI_ERROR_MASK, qm->io_base + HPRE_HAC_SOURCE_INT);
+
+ writel(~err_mask, qm->io_base + HPRE_INT_MASK);
+
+ if (qm->ver > QM_HW_V2)
+ writel(dev_err->shutdown_mask, qm->io_base + HPRE_OOO_SHUTDOWN_SEL);
+}
+
static void hpre_err_info_init(struct hisi_qm *qm)
{
struct hisi_qm_err_info *err_info = &qm->err_info;
+ struct hisi_qm_err_mask *qm_err = &err_info->qm_err;
+ struct hisi_qm_err_mask *dev_err = &err_info->dev_err;
+
+ qm_err->fe = HPRE_HAC_RAS_FE_ENABLE;
+ qm_err->ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_QM_CE_MASK_CAP, qm->cap_ver);
+ qm_err->nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_QM_NFE_MASK_CAP, qm->cap_ver);
+ qm_err->shutdown_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
+ HPRE_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
+ qm_err->reset_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
+ HPRE_QM_RESET_MASK_CAP, qm->cap_ver);
+ qm_err->ecc_2bits_mask = QM_ECC_MBIT;
+
+ dev_err->fe = HPRE_HAC_RAS_FE_ENABLE;
+ dev_err->ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CE_MASK_CAP, qm->cap_ver);
+ dev_err->nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver);
+ dev_err->shutdown_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
+ HPRE_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
+ dev_err->reset_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
+ HPRE_RESET_MASK_CAP, qm->cap_ver);
+ dev_err->ecc_2bits_mask = HPRE_CORE_ECC_2BIT_ERR | HPRE_OOO_ECC_2BIT_ERR;
- err_info->fe = HPRE_HAC_RAS_FE_ENABLE;
- err_info->ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_QM_CE_MASK_CAP, qm->cap_ver);
- err_info->nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_QM_NFE_MASK_CAP, qm->cap_ver);
- err_info->ecc_2bits_mask = HPRE_CORE_ECC_2BIT_ERR | HPRE_OOO_ECC_2BIT_ERR;
- err_info->dev_shutdown_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
- HPRE_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
- err_info->qm_shutdown_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
- HPRE_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
- err_info->qm_reset_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
- HPRE_QM_RESET_MASK_CAP, qm->cap_ver);
- err_info->dev_reset_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
- HPRE_RESET_MASK_CAP, qm->cap_ver);
err_info->msi_wr_port = HPRE_WR_MSI_PORT;
err_info->acpi_rst = "HRST";
}
@@ -1439,6 +1522,8 @@ static const struct hisi_qm_err_ini hpre_err_ini = {
.err_info_init = hpre_err_info_init,
.get_err_result = hpre_get_err_result,
.dev_is_abnormal = hpre_dev_is_abnormal,
+ .disable_axi_error = hpre_disable_axi_error,
+ .enable_axi_error = hpre_enable_axi_error,
};
static int hpre_pf_probe_init(struct hpre *hpre)
@@ -1450,8 +1535,6 @@ static int hpre_pf_probe_init(struct hpre *hpre)
if (ret)
return ret;
- hpre_open_sva_prefetch(qm);
-
hisi_qm_dev_err_init(qm);
ret = hpre_show_last_regs_init(qm);
if (ret)
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index 7c41f9593d03..f8bfff5dd0bd 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -45,6 +45,8 @@
#define QM_SQ_TYPE_MASK GENMASK(3, 0)
#define QM_SQ_TAIL_IDX(sqc) ((le16_to_cpu((sqc).w11) >> 6) & 0x1)
+#define QM_SQC_DISABLE_QP (1U << 6)
+#define QM_XQC_RANDOM_DATA 0xaaaa
/* cqc shift */
#define QM_CQ_HOP_NUM_SHIFT 0
@@ -62,10 +64,10 @@
#define QM_EQE_AEQE_SIZE (2UL << 12)
#define QM_EQC_PHASE_SHIFT 16
-#define QM_EQE_PHASE(eqe) ((le32_to_cpu((eqe)->dw0) >> 16) & 0x1)
+#define QM_EQE_PHASE(dw0) (((dw0) >> 16) & 0x1)
#define QM_EQE_CQN_MASK GENMASK(15, 0)
-#define QM_AEQE_PHASE(aeqe) ((le32_to_cpu((aeqe)->dw0) >> 16) & 0x1)
+#define QM_AEQE_PHASE(dw0) (((dw0) >> 16) & 0x1)
#define QM_AEQE_TYPE_SHIFT 17
#define QM_AEQE_TYPE_MASK 0xf
#define QM_AEQE_CQN_MASK GENMASK(15, 0)
@@ -145,9 +147,9 @@
#define QM_RAS_CE_TIMES_PER_IRQ 1
#define QM_OOO_SHUTDOWN_SEL 0x1040f8
#define QM_AXI_RRESP_ERR BIT(0)
-#define QM_ECC_MBIT BIT(2)
#define QM_DB_TIMEOUT BIT(10)
#define QM_OF_FIFO_OF BIT(11)
+#define QM_RAS_AXI_ERROR (BIT(0) | BIT(1) | BIT(12))
#define QM_RESET_WAIT_TIMEOUT 400
#define QM_PEH_VENDOR_ID 0x1000d8
@@ -163,7 +165,6 @@
#define ACC_MASTER_TRANS_RETURN 0x300150
#define ACC_MASTER_GLOBAL_CTRL 0x300000
#define ACC_AM_CFG_PORT_WR_EN 0x30001c
-#define QM_RAS_NFE_MBIT_DISABLE ~QM_ECC_MBIT
#define ACC_AM_ROB_ECC_INT_STS 0x300104
#define ACC_ROB_ECC_ERR_MULTPL BIT(1)
#define QM_MSI_CAP_ENABLE BIT(16)
@@ -520,7 +521,7 @@ static bool qm_check_dev_error(struct hisi_qm *qm)
return false;
err_status = qm_get_hw_error_status(pf_qm);
- if (err_status & pf_qm->err_info.qm_shutdown_mask)
+ if (err_status & pf_qm->err_info.qm_err.shutdown_mask)
return true;
if (pf_qm->err_ini->dev_is_abnormal)
@@ -912,7 +913,6 @@ static void qm_pm_put_sync(struct hisi_qm *qm)
if (!test_bit(QM_SUPPORT_RPM, &qm->caps))
return;
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
}
@@ -976,23 +976,23 @@ static void qm_get_complete_eqe_num(struct hisi_qm *qm)
{
struct qm_eqe *eqe = qm->eqe + qm->status.eq_head;
struct hisi_qm_poll_data *poll_data = NULL;
+ u32 dw0 = le32_to_cpu(eqe->dw0);
u16 eq_depth = qm->eq_depth;
u16 cqn, eqe_num = 0;
- if (QM_EQE_PHASE(eqe) != qm->status.eqc_phase) {
+ if (QM_EQE_PHASE(dw0) != qm->status.eqc_phase) {
atomic64_inc(&qm->debug.dfx.err_irq_cnt);
qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
return;
}
- cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK;
+ cqn = dw0 & QM_EQE_CQN_MASK;
if (unlikely(cqn >= qm->qp_num))
return;
poll_data = &qm->poll_data[cqn];
- while (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) {
- cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK;
- poll_data->qp_finish_id[eqe_num] = cqn;
+ while (QM_EQE_PHASE(dw0) != qm->status.eqc_phase) {
+ poll_data->qp_finish_id[eqe_num] = dw0 & QM_EQE_CQN_MASK;
eqe_num++;
if (qm->status.eq_head == eq_depth - 1) {
@@ -1006,6 +1006,8 @@ static void qm_get_complete_eqe_num(struct hisi_qm *qm)
if (eqe_num == (eq_depth >> 1) - 1)
break;
+
+ dw0 = le32_to_cpu(eqe->dw0);
}
poll_data->eqe_num = eqe_num;
@@ -1098,15 +1100,15 @@ static irqreturn_t qm_aeq_thread(int irq, void *data)
{
struct hisi_qm *qm = data;
struct qm_aeqe *aeqe = qm->aeqe + qm->status.aeq_head;
+ u32 dw0 = le32_to_cpu(aeqe->dw0);
u16 aeq_depth = qm->aeq_depth;
u32 type, qp_id;
atomic64_inc(&qm->debug.dfx.aeq_irq_cnt);
- while (QM_AEQE_PHASE(aeqe) == qm->status.aeqc_phase) {
- type = (le32_to_cpu(aeqe->dw0) >> QM_AEQE_TYPE_SHIFT) &
- QM_AEQE_TYPE_MASK;
- qp_id = le32_to_cpu(aeqe->dw0) & QM_AEQE_CQN_MASK;
+ while (QM_AEQE_PHASE(dw0) == qm->status.aeqc_phase) {
+ type = (dw0 >> QM_AEQE_TYPE_SHIFT) & QM_AEQE_TYPE_MASK;
+ qp_id = dw0 & QM_AEQE_CQN_MASK;
switch (type) {
case QM_EQ_OVERFLOW:
@@ -1134,6 +1136,7 @@ static irqreturn_t qm_aeq_thread(int irq, void *data)
aeqe++;
qm->status.aeq_head++;
}
+ dw0 = le32_to_cpu(aeqe->dw0);
}
qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0);
@@ -1283,6 +1286,13 @@ static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base,
(factor->cbs_s << QM_SHAPER_FACTOR_CBS_S_SHIFT);
}
break;
+ /*
+ * Note: The current logic only needs to handle the above three types
+ * If new types are added, they need to be supplemented here,
+ * otherwise undefined behavior may occur.
+ */
+ default:
+ break;
}
}
@@ -1396,17 +1406,17 @@ static void qm_hw_error_init_v1(struct hisi_qm *qm)
static void qm_hw_error_cfg(struct hisi_qm *qm)
{
- struct hisi_qm_err_info *err_info = &qm->err_info;
+ struct hisi_qm_err_mask *qm_err = &qm->err_info.qm_err;
- qm->error_mask = err_info->nfe | err_info->ce | err_info->fe;
+ qm->error_mask = qm_err->nfe | qm_err->ce | qm_err->fe;
/* clear QM hw residual error source */
writel(qm->error_mask, qm->io_base + QM_ABNORMAL_INT_SOURCE);
/* configure error type */
- writel(err_info->ce, qm->io_base + QM_RAS_CE_ENABLE);
+ writel(qm_err->ce, qm->io_base + QM_RAS_CE_ENABLE);
writel(QM_RAS_CE_TIMES_PER_IRQ, qm->io_base + QM_RAS_CE_THRESHOLD);
- writel(err_info->nfe, qm->io_base + QM_RAS_NFE_ENABLE);
- writel(err_info->fe, qm->io_base + QM_RAS_FE_ENABLE);
+ writel(qm_err->nfe, qm->io_base + QM_RAS_NFE_ENABLE);
+ writel(qm_err->fe, qm->io_base + QM_RAS_FE_ENABLE);
}
static void qm_hw_error_init_v2(struct hisi_qm *qm)
@@ -1435,7 +1445,7 @@ static void qm_hw_error_init_v3(struct hisi_qm *qm)
qm_hw_error_cfg(qm);
/* enable close master ooo when hardware error happened */
- writel(qm->err_info.qm_shutdown_mask, qm->io_base + QM_OOO_SHUTDOWN_SEL);
+ writel(qm->err_info.qm_err.shutdown_mask, qm->io_base + QM_OOO_SHUTDOWN_SEL);
irq_unmask = ~qm->error_mask;
irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK);
@@ -1497,6 +1507,7 @@ static void qm_log_hw_error(struct hisi_qm *qm, u32 error_status)
static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm)
{
+ struct hisi_qm_err_mask *qm_err = &qm->err_info.qm_err;
u32 error_status;
error_status = qm_get_hw_error_status(qm);
@@ -1505,17 +1516,16 @@ static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm)
qm->err_status.is_qm_ecc_mbit = true;
qm_log_hw_error(qm, error_status);
- if (error_status & qm->err_info.qm_reset_mask) {
+ if (error_status & qm_err->reset_mask) {
/* Disable the same error reporting until device is recovered. */
- writel(qm->err_info.nfe & (~error_status),
- qm->io_base + QM_RAS_NFE_ENABLE);
+ writel(qm_err->nfe & (~error_status), qm->io_base + QM_RAS_NFE_ENABLE);
return ACC_ERR_NEED_RESET;
}
/* Clear error source if not need reset. */
writel(error_status, qm->io_base + QM_ABNORMAL_INT_SOURCE);
- writel(qm->err_info.nfe, qm->io_base + QM_RAS_NFE_ENABLE);
- writel(qm->err_info.ce, qm->io_base + QM_RAS_CE_ENABLE);
+ writel(qm_err->nfe, qm->io_base + QM_RAS_NFE_ENABLE);
+ writel(qm_err->ce, qm->io_base + QM_RAS_CE_ENABLE);
}
return ACC_ERR_RECOVERED;
@@ -2652,10 +2662,10 @@ static int qm_hw_err_isolate(struct hisi_qm *qm)
}
}
list_add(&hw_err->list, &isolate->qm_hw_errs);
- mutex_unlock(&isolate->isolate_lock);
if (count >= isolate->err_threshold)
isolate->is_isolate = true;
+ mutex_unlock(&isolate->isolate_lock);
return 0;
}
@@ -2664,12 +2674,10 @@ static void qm_hw_err_destroy(struct hisi_qm *qm)
{
struct qm_hw_err *err, *tmp;
- mutex_lock(&qm->isolate_data.isolate_lock);
list_for_each_entry_safe(err, tmp, &qm->isolate_data.qm_hw_errs, list) {
list_del(&err->list);
kfree(err);
}
- mutex_unlock(&qm->isolate_data.isolate_lock);
}
static enum uacce_dev_state hisi_qm_get_isolate_state(struct uacce_device *uacce)
@@ -2697,10 +2705,12 @@ static int hisi_qm_isolate_threshold_write(struct uacce_device *uacce, u32 num)
if (qm->isolate_data.is_isolate)
return -EPERM;
+ mutex_lock(&qm->isolate_data.isolate_lock);
qm->isolate_data.err_threshold = num;
/* After the policy is updated, need to reset the hardware err list */
qm_hw_err_destroy(qm);
+ mutex_unlock(&qm->isolate_data.isolate_lock);
return 0;
}
@@ -2737,12 +2747,36 @@ static void qm_remove_uacce(struct hisi_qm *qm)
struct uacce_device *uacce = qm->uacce;
if (qm->use_sva) {
+ mutex_lock(&qm->isolate_data.isolate_lock);
qm_hw_err_destroy(qm);
+ mutex_unlock(&qm->isolate_data.isolate_lock);
+
uacce_remove(uacce);
qm->uacce = NULL;
}
}
+static void qm_uacce_api_ver_init(struct hisi_qm *qm)
+{
+ struct uacce_device *uacce = qm->uacce;
+
+ switch (qm->ver) {
+ case QM_HW_V1:
+ uacce->api_ver = HISI_QM_API_VER_BASE;
+ break;
+ case QM_HW_V2:
+ uacce->api_ver = HISI_QM_API_VER2_BASE;
+ break;
+ case QM_HW_V3:
+ case QM_HW_V4:
+ uacce->api_ver = HISI_QM_API_VER3_BASE;
+ break;
+ default:
+ uacce->api_ver = HISI_QM_API_VER5_BASE;
+ break;
+ }
+}
+
static int qm_alloc_uacce(struct hisi_qm *qm)
{
struct pci_dev *pdev = qm->pdev;
@@ -2777,13 +2811,6 @@ static int qm_alloc_uacce(struct hisi_qm *qm)
uacce->priv = qm;
if (qm->ver == QM_HW_V1)
- uacce->api_ver = HISI_QM_API_VER_BASE;
- else if (qm->ver == QM_HW_V2)
- uacce->api_ver = HISI_QM_API_VER2_BASE;
- else
- uacce->api_ver = HISI_QM_API_VER3_BASE;
-
- if (qm->ver == QM_HW_V1)
mmio_page_nr = QM_DOORBELL_PAGE_NR;
else if (!test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps))
mmio_page_nr = QM_DOORBELL_PAGE_NR +
@@ -2802,6 +2829,7 @@ static int qm_alloc_uacce(struct hisi_qm *qm)
uacce->qf_pg_num[UACCE_QFRT_DUS] = dus_page_nr;
qm->uacce = uacce;
+ qm_uacce_api_ver_init(qm);
INIT_LIST_HEAD(&qm->isolate_data.qm_hw_errs);
mutex_init(&qm->isolate_data.isolate_lock);
@@ -3004,11 +3032,36 @@ static void qm_put_pci_res(struct hisi_qm *qm)
pci_release_mem_regions(pdev);
}
+static void hisi_mig_region_clear(struct hisi_qm *qm)
+{
+ u32 val;
+
+ /* Clear migration region set of PF */
+ if (qm->fun_type == QM_HW_PF && qm->ver > QM_HW_V3) {
+ val = readl(qm->io_base + QM_MIG_REGION_SEL);
+ val &= ~QM_MIG_REGION_EN;
+ writel(val, qm->io_base + QM_MIG_REGION_SEL);
+ }
+}
+
+static void hisi_mig_region_enable(struct hisi_qm *qm)
+{
+ u32 val;
+
+ /* Select migration region of PF */
+ if (qm->fun_type == QM_HW_PF && qm->ver > QM_HW_V3) {
+ val = readl(qm->io_base + QM_MIG_REGION_SEL);
+ val |= QM_MIG_REGION_EN;
+ writel(val, qm->io_base + QM_MIG_REGION_SEL);
+ }
+}
+
static void hisi_qm_pci_uninit(struct hisi_qm *qm)
{
struct pci_dev *pdev = qm->pdev;
pci_free_irq_vectors(pdev);
+ hisi_mig_region_clear(qm);
qm_put_pci_res(qm);
pci_disable_device(pdev);
}
@@ -3180,6 +3233,9 @@ static int qm_eq_aeq_ctx_cfg(struct hisi_qm *qm)
qm_init_eq_aeq_status(qm);
+ /* Before starting the dev, clear the memory and then configure to device using. */
+ memset(qm->qdma.va, 0, qm->qdma.size);
+
ret = qm_eq_ctx_cfg(qm);
if (ret) {
dev_err(dev, "Set eqc failed!\n");
@@ -3191,9 +3247,13 @@ static int qm_eq_aeq_ctx_cfg(struct hisi_qm *qm)
static int __hisi_qm_start(struct hisi_qm *qm)
{
+ struct device *dev = &qm->pdev->dev;
int ret;
- WARN_ON(!qm->qdma.va);
+ if (!qm->qdma.va) {
+ dev_err(dev, "qm qdma is NULL!\n");
+ return -EINVAL;
+ }
if (qm->fun_type == QM_HW_PF) {
ret = hisi_qm_set_vft(qm, 0, qm->qp_base, qm->qp_num);
@@ -3267,7 +3327,7 @@ static int qm_restart(struct hisi_qm *qm)
for (i = 0; i < qm->qp_num; i++) {
qp = &qm->qp_array[i];
if (atomic_read(&qp->qp_status.flags) == QP_STOP &&
- qp->is_resetting == true) {
+ qp->is_resetting == true && qp->is_in_kernel == true) {
ret = qm_start_qp_nolock(qp, 0);
if (ret < 0) {
dev_err(dev, "Failed to start qp%d!\n", i);
@@ -3299,24 +3359,44 @@ static void qm_stop_started_qp(struct hisi_qm *qm)
}
/**
- * qm_clear_queues() - Clear all queues memory in a qm.
- * @qm: The qm in which the queues will be cleared.
+ * qm_invalid_queues() - invalid all queues in use.
+ * @qm: The qm in which the queues will be invalidated.
*
- * This function clears all queues memory in a qm. Reset of accelerator can
- * use this to clear queues.
+ * This function invalid all queues in use. If the doorbell command is sent
+ * to device in user space after the device is reset, the device discards
+ * the doorbell command.
*/
-static void qm_clear_queues(struct hisi_qm *qm)
+static void qm_invalid_queues(struct hisi_qm *qm)
{
struct hisi_qp *qp;
+ struct qm_sqc *sqc;
+ struct qm_cqc *cqc;
int i;
+ /*
+ * Normal stop queues is no longer used and does not need to be
+ * invalid queues.
+ */
+ if (qm->status.stop_reason == QM_NORMAL)
+ return;
+
+ if (qm->status.stop_reason == QM_DOWN)
+ hisi_qm_cache_wb(qm);
+
for (i = 0; i < qm->qp_num; i++) {
qp = &qm->qp_array[i];
- if (qp->is_in_kernel && qp->is_resetting)
+ if (!qp->is_resetting)
+ continue;
+
+ /* Modify random data and set sqc close bit to invalid queue. */
+ sqc = qm->sqc + i;
+ cqc = qm->cqc + i;
+ sqc->w8 = cpu_to_le16(QM_XQC_RANDOM_DATA);
+ sqc->w13 = cpu_to_le16(QM_SQC_DISABLE_QP);
+ cqc->w8 = cpu_to_le16(QM_XQC_RANDOM_DATA);
+ if (qp->is_in_kernel)
memset(qp->qdma.va, 0, qp->qdma.size);
}
-
- memset(qm->qdma.va, 0, qm->qdma.size);
}
/**
@@ -3373,7 +3453,7 @@ int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r)
}
}
- qm_clear_queues(qm);
+ qm_invalid_queues(qm);
qm->status.stop_reason = QM_NORMAL;
err_unlock:
@@ -3618,24 +3698,25 @@ static int qm_vf_q_assign(struct hisi_qm *qm, u32 num_vfs)
return 0;
}
-static int qm_clear_vft_config(struct hisi_qm *qm)
+static void qm_clear_vft_config(struct hisi_qm *qm)
{
- int ret;
u32 i;
- for (i = 1; i <= qm->vfs_num; i++) {
- ret = hisi_qm_set_vft(qm, i, 0, 0);
- if (ret)
- return ret;
- }
- qm->vfs_num = 0;
+ /*
+ * When disabling SR-IOV, clear the configuration of each VF in the hardware
+ * sequentially. Failure to clear a single VF should not affect the clearing
+ * operation of other VFs.
+ */
+ for (i = 1; i <= qm->vfs_num; i++)
+ (void)hisi_qm_set_vft(qm, i, 0, 0);
- return 0;
+ qm->vfs_num = 0;
}
static int qm_func_shaper_enable(struct hisi_qm *qm, u32 fun_index, u32 qos)
{
struct device *dev = &qm->pdev->dev;
+ struct qm_shaper_factor t_factor;
u32 ir = qos * QM_QOS_RATE;
int ret, total_vfs, i;
@@ -3643,6 +3724,7 @@ static int qm_func_shaper_enable(struct hisi_qm *qm, u32 fun_index, u32 qos)
if (fun_index > total_vfs)
return -EINVAL;
+ memcpy(&t_factor, &qm->factor[fun_index], sizeof(t_factor));
qm->factor[fun_index].func_qos = qos;
ret = qm_get_shaper_para(ir, &qm->factor[fun_index]);
@@ -3656,11 +3738,21 @@ static int qm_func_shaper_enable(struct hisi_qm *qm, u32 fun_index, u32 qos)
ret = qm_set_vft_common(qm, SHAPER_VFT, fun_index, i, 1);
if (ret) {
dev_err(dev, "type: %d, failed to set shaper vft!\n", i);
- return -EINVAL;
+ goto back_func_qos;
}
}
return 0;
+
+back_func_qos:
+ memcpy(&qm->factor[fun_index], &t_factor, sizeof(t_factor));
+ for (i--; i >= ALG_TYPE_0; i--) {
+ ret = qm_set_vft_common(qm, SHAPER_VFT, fun_index, i, 1);
+ if (ret)
+ dev_err(dev, "failed to restore shaper vft during rollback!\n");
+ }
+
+ return -EINVAL;
}
static u32 qm_get_shaper_vft_qos(struct hisi_qm *qm, u32 fun_index)
@@ -3827,8 +3919,14 @@ static ssize_t qm_get_qos_value(struct hisi_qm *qm, const char *buf,
}
pdev = container_of(dev, struct pci_dev, dev);
+ if (pci_physfn(pdev) != qm->pdev) {
+ pci_err(qm->pdev, "the pdev input does not match the pf!\n");
+ put_device(dev);
+ return -EINVAL;
+ }
*fun_index = pdev->devfn;
+ put_device(dev);
return 0;
}
@@ -3961,13 +4059,13 @@ int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs)
goto err_put_sync;
}
+ qm->vfs_num = num_vfs;
ret = pci_enable_sriov(pdev, num_vfs);
if (ret) {
pci_err(pdev, "Can't enable VF!\n");
qm_clear_vft_config(qm);
goto err_put_sync;
}
- qm->vfs_num = num_vfs;
pci_info(pdev, "VF enabled, vfs_num(=%d)!\n", num_vfs);
@@ -4002,11 +4100,10 @@ int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen)
}
pci_disable_sriov(pdev);
-
- qm->vfs_num = 0;
+ qm_clear_vft_config(qm);
qm_pm_put_sync(qm);
- return qm_clear_vft_config(qm);
+ return 0;
}
EXPORT_SYMBOL_GPL(hisi_qm_sriov_disable);
@@ -4180,9 +4277,9 @@ static void qm_dev_ecc_mbit_handle(struct hisi_qm *qm)
!qm->err_status.is_qm_ecc_mbit &&
!qm->err_ini->close_axi_master_ooo) {
nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE);
- writel(nfe_enb & QM_RAS_NFE_MBIT_DISABLE,
+ writel(nfe_enb & ~qm->err_info.qm_err.ecc_2bits_mask,
qm->io_base + QM_RAS_NFE_ENABLE);
- writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SET);
+ writel(qm->err_info.qm_err.ecc_2bits_mask, qm->io_base + QM_ABNORMAL_INT_SET);
}
}
@@ -4448,9 +4545,6 @@ static void qm_restart_prepare(struct hisi_qm *qm)
{
u32 value;
- if (qm->err_ini->open_sva_prefetch)
- qm->err_ini->open_sva_prefetch(qm);
-
if (qm->ver >= QM_HW_V3)
return;
@@ -4464,12 +4558,12 @@ static void qm_restart_prepare(struct hisi_qm *qm)
qm->io_base + ACC_AM_CFG_PORT_WR_EN);
/* clear dev ecc 2bit error source if having */
- value = qm_get_dev_err_status(qm) & qm->err_info.ecc_2bits_mask;
+ value = qm_get_dev_err_status(qm) & qm->err_info.dev_err.ecc_2bits_mask;
if (value && qm->err_ini->clear_dev_hw_err_status)
qm->err_ini->clear_dev_hw_err_status(qm, value);
/* clear QM ecc mbit error source */
- writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SOURCE);
+ writel(qm->err_info.qm_err.ecc_2bits_mask, qm->io_base + QM_ABNORMAL_INT_SOURCE);
/* clear AM Reorder Buffer ecc mbit source */
writel(ACC_ROB_ECC_ERR_MULTPL, qm->io_base + ACC_AM_ROB_ECC_INT_STS);
@@ -4496,6 +4590,34 @@ clear_flags:
qm->err_status.is_dev_ecc_mbit = false;
}
+static void qm_disable_axi_error(struct hisi_qm *qm)
+{
+ struct hisi_qm_err_mask *qm_err = &qm->err_info.qm_err;
+ u32 val;
+
+ val = ~(qm->error_mask & (~QM_RAS_AXI_ERROR));
+ writel(val, qm->io_base + QM_ABNORMAL_INT_MASK);
+ if (qm->ver > QM_HW_V2)
+ writel(qm_err->shutdown_mask & (~QM_RAS_AXI_ERROR),
+ qm->io_base + QM_OOO_SHUTDOWN_SEL);
+
+ if (qm->err_ini->disable_axi_error)
+ qm->err_ini->disable_axi_error(qm);
+}
+
+static void qm_enable_axi_error(struct hisi_qm *qm)
+{
+ /* clear axi error source */
+ writel(QM_RAS_AXI_ERROR, qm->io_base + QM_ABNORMAL_INT_SOURCE);
+
+ writel(~qm->error_mask, qm->io_base + QM_ABNORMAL_INT_MASK);
+ if (qm->ver > QM_HW_V2)
+ writel(qm->err_info.qm_err.shutdown_mask, qm->io_base + QM_OOO_SHUTDOWN_SEL);
+
+ if (qm->err_ini->enable_axi_error)
+ qm->err_ini->enable_axi_error(qm);
+}
+
static int qm_controller_reset_done(struct hisi_qm *qm)
{
struct pci_dev *pdev = qm->pdev;
@@ -4529,6 +4651,7 @@ static int qm_controller_reset_done(struct hisi_qm *qm)
qm_restart_prepare(qm);
hisi_qm_dev_err_init(qm);
+ qm_disable_axi_error(qm);
if (qm->err_ini->open_axi_master_ooo)
qm->err_ini->open_axi_master_ooo(qm);
@@ -4551,7 +4674,7 @@ static int qm_controller_reset_done(struct hisi_qm *qm)
ret = qm_wait_vf_prepare_finish(qm);
if (ret)
pci_err(pdev, "failed to start by vfs in soft reset!\n");
-
+ qm_enable_axi_error(qm);
qm_cmd_init(qm);
qm_restart_done(qm);
@@ -4732,6 +4855,15 @@ flr_done:
}
EXPORT_SYMBOL_GPL(hisi_qm_reset_done);
+static irqreturn_t qm_rsvd_irq(int irq, void *data)
+{
+ struct hisi_qm *qm = data;
+
+ dev_info(&qm->pdev->dev, "Reserved interrupt, ignore!\n");
+
+ return IRQ_HANDLED;
+}
+
static irqreturn_t qm_abnormal_irq(int irq, void *data)
{
struct hisi_qm *qm = data;
@@ -4761,8 +4893,6 @@ void hisi_qm_dev_shutdown(struct pci_dev *pdev)
ret = hisi_qm_stop(qm, QM_DOWN);
if (ret)
dev_err(&pdev->dev, "Fail to stop qm in shutdown!\n");
-
- hisi_qm_cache_wb(qm);
}
EXPORT_SYMBOL_GPL(hisi_qm_dev_shutdown);
@@ -5015,7 +5145,7 @@ static void qm_unregister_abnormal_irq(struct hisi_qm *qm)
struct pci_dev *pdev = qm->pdev;
u32 irq_vector, val;
- if (qm->fun_type == QM_HW_VF)
+ if (qm->fun_type == QM_HW_VF && qm->ver < QM_HW_V3)
return;
val = qm->cap_tables.qm_cap_table[QM_ABNORMAL_IRQ].cap_val;
@@ -5032,17 +5162,28 @@ static int qm_register_abnormal_irq(struct hisi_qm *qm)
u32 irq_vector, val;
int ret;
- if (qm->fun_type == QM_HW_VF)
- return 0;
-
val = qm->cap_tables.qm_cap_table[QM_ABNORMAL_IRQ].cap_val;
if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_ABN_IRQ_TYPE_MASK))
return 0;
-
irq_vector = val & QM_IRQ_VECTOR_MASK;
+
+ /* For VF, this is a reserved interrupt in V3 version. */
+ if (qm->fun_type == QM_HW_VF) {
+ if (qm->ver < QM_HW_V3)
+ return 0;
+
+ ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_rsvd_irq,
+ IRQF_NO_AUTOEN, qm->dev_name, qm);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request reserved irq, ret = %d!\n", ret);
+ return ret;
+ }
+ return 0;
+ }
+
ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_abnormal_irq, 0, qm->dev_name, qm);
if (ret)
- dev_err(&qm->pdev->dev, "failed to request abnormal irq, ret = %d", ret);
+ dev_err(&qm->pdev->dev, "failed to request abnormal irq, ret = %d!\n", ret);
return ret;
}
@@ -5408,6 +5549,12 @@ static int hisi_qm_pci_init(struct hisi_qm *qm)
pci_set_master(pdev);
num_vec = qm_get_irq_num(qm);
+ if (!num_vec) {
+ dev_err(dev, "Device irq num is zero!\n");
+ ret = -EINVAL;
+ goto err_get_pci_res;
+ }
+ num_vec = roundup_pow_of_two(num_vec);
ret = pci_alloc_irq_vectors(pdev, num_vec, num_vec, PCI_IRQ_MSI);
if (ret < 0) {
dev_err(dev, "Failed to enable MSI vectors!\n");
@@ -5630,6 +5777,7 @@ int hisi_qm_init(struct hisi_qm *qm)
goto err_free_qm_memory;
qm_cmd_init(qm);
+ hisi_mig_region_enable(qm);
return 0;
@@ -5768,6 +5916,7 @@ static int qm_rebuild_for_resume(struct hisi_qm *qm)
}
qm_cmd_init(qm);
+ hisi_mig_region_enable(qm);
hisi_qm_dev_err_init(qm);
/* Set the doorbell timeout to QM_DB_TIMEOUT_CFG ns. */
writel(QM_DB_TIMEOUT_SET, qm->io_base + QM_DB_TIMEOUT_CFG);
diff --git a/drivers/crypto/hisilicon/sec/sec_drv.c b/drivers/crypto/hisilicon/sec/sec_drv.c
index ef0cb733c92c..129cb6faa0b7 100644
--- a/drivers/crypto/hisilicon/sec/sec_drv.c
+++ b/drivers/crypto/hisilicon/sec/sec_drv.c
@@ -922,7 +922,8 @@ static int sec_hw_init(struct sec_dev_info *info)
struct iommu_domain *domain;
u32 sec_ipv4_mask = 0;
u32 sec_ipv6_mask[10] = {};
- u32 i, ret;
+ int ret;
+ u32 i;
domain = iommu_get_domain_for_dev(info->dev);
diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h
index 703920b49c7c..81d0beda93b2 100644
--- a/drivers/crypto/hisilicon/sec2/sec.h
+++ b/drivers/crypto/hisilicon/sec2/sec.h
@@ -7,6 +7,12 @@
#include <linux/hisi_acc_qm.h>
#include "sec_crypto.h"
+#define SEC_PBUF_SZ 512
+#define SEC_MAX_MAC_LEN 64
+#define SEC_IV_SIZE 24
+#define SEC_SGE_NR_NUM 4
+#define SEC_SGL_ALIGN_SIZE 64
+
/* Algorithm resource per hardware SEC queue */
struct sec_alg_res {
u8 *pbuf;
@@ -20,6 +26,40 @@ struct sec_alg_res {
u16 depth;
};
+struct sec_hw_sge {
+ dma_addr_t buf;
+ void *page_ctrl;
+ __le32 len;
+ __le32 pad;
+ __le32 pad0;
+ __le32 pad1;
+};
+
+struct sec_hw_sgl {
+ dma_addr_t next_dma;
+ __le16 entry_sum_in_chain;
+ __le16 entry_sum_in_sgl;
+ __le16 entry_length_in_sgl;
+ __le16 pad0;
+ __le64 pad1[5];
+ struct sec_hw_sgl *next;
+ struct sec_hw_sge sge_entries[SEC_SGE_NR_NUM];
+} __aligned(SEC_SGL_ALIGN_SIZE);
+
+struct sec_src_dst_buf {
+ struct sec_hw_sgl in;
+ struct sec_hw_sgl out;
+};
+
+struct sec_request_buf {
+ union {
+ struct sec_src_dst_buf data_buf;
+ __u8 pbuf[SEC_PBUF_SZ];
+ };
+ dma_addr_t in_dma;
+ dma_addr_t out_dma;
+};
+
/* Cipher request of SEC private */
struct sec_cipher_req {
struct hisi_acc_hw_sgl *c_out;
@@ -29,6 +69,7 @@ struct sec_cipher_req {
struct skcipher_request *sk_req;
u32 c_len;
bool encrypt;
+ __u8 c_ivin_buf[SEC_IV_SIZE];
};
struct sec_aead_req {
@@ -37,6 +78,13 @@ struct sec_aead_req {
u8 *a_ivin;
dma_addr_t a_ivin_dma;
struct aead_request *aead_req;
+ __u8 a_ivin_buf[SEC_IV_SIZE];
+ __u8 out_mac_buf[SEC_MAX_MAC_LEN];
+};
+
+struct sec_instance_backlog {
+ struct list_head list;
+ spinlock_t lock;
};
/* SEC request of Crypto */
@@ -55,15 +103,17 @@ struct sec_req {
dma_addr_t in_dma;
struct sec_cipher_req c_req;
struct sec_aead_req aead_req;
- struct list_head backlog_head;
+ struct crypto_async_request *base;
int err_type;
int req_id;
u32 flag;
- /* Status of the SEC request */
- bool fake_busy;
bool use_pbuf;
+
+ struct list_head list;
+ struct sec_instance_backlog *backlog;
+ struct sec_request_buf buf;
};
/**
@@ -119,9 +169,11 @@ struct sec_qp_ctx {
struct sec_alg_res *res;
struct sec_ctx *ctx;
spinlock_t req_lock;
- struct list_head backlog;
+ spinlock_t id_lock;
struct hisi_acc_sgl_pool *c_in_pool;
struct hisi_acc_sgl_pool *c_out_pool;
+ struct sec_instance_backlog backlog;
+ u16 send_head;
};
enum sec_alg_type {
@@ -139,9 +191,6 @@ struct sec_ctx {
/* Half queues for encipher, and half for decipher */
u32 hlf_q_num;
- /* Threshold for fake busy, trigger to return -EBUSY to user */
- u32 fake_req_limit;
-
/* Current cyclic index to select a queue for encipher */
atomic_t enc_qcyclic;
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index 8ea5305bc320..31590d01139a 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -67,7 +67,6 @@
#define SEC_MAX_CCM_AAD_LEN 65279
#define SEC_TOTAL_MAC_SZ(depth) (SEC_MAX_MAC_LEN * (depth))
-#define SEC_PBUF_SZ 512
#define SEC_PBUF_IV_OFFSET SEC_PBUF_SZ
#define SEC_PBUF_MAC_OFFSET (SEC_PBUF_SZ + SEC_IV_SIZE)
#define SEC_PBUF_PKG (SEC_PBUF_SZ + SEC_IV_SIZE + \
@@ -102,6 +101,8 @@
#define IV_LAST_BYTE_MASK 0xFF
#define IV_CTR_INIT 0x1
#define IV_BYTE_OFFSET 0x8
+#define SEC_GCM_MIN_AUTH_SZ 0x8
+#define SEC_RETRY_MAX_CNT 5U
static DEFINE_MUTEX(sec_algs_lock);
static unsigned int sec_available_devs;
@@ -116,40 +117,19 @@ struct sec_aead {
struct aead_alg alg;
};
-/* Get an en/de-cipher queue cyclically to balance load over queues of TFM */
-static inline u32 sec_alloc_queue_id(struct sec_ctx *ctx, struct sec_req *req)
-{
- if (req->c_req.encrypt)
- return (u32)atomic_inc_return(&ctx->enc_qcyclic) %
- ctx->hlf_q_num;
-
- return (u32)atomic_inc_return(&ctx->dec_qcyclic) % ctx->hlf_q_num +
- ctx->hlf_q_num;
-}
-
-static inline void sec_free_queue_id(struct sec_ctx *ctx, struct sec_req *req)
-{
- if (req->c_req.encrypt)
- atomic_dec(&ctx->enc_qcyclic);
- else
- atomic_dec(&ctx->dec_qcyclic);
-}
+static int sec_aead_soft_crypto(struct sec_ctx *ctx,
+ struct aead_request *aead_req,
+ bool encrypt);
+static int sec_skcipher_soft_crypto(struct sec_ctx *ctx,
+ struct skcipher_request *sreq, bool encrypt);
static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx)
{
int req_id;
- spin_lock_bh(&qp_ctx->req_lock);
+ spin_lock_bh(&qp_ctx->id_lock);
req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL, 0, qp_ctx->qp->sq_depth, GFP_ATOMIC);
- spin_unlock_bh(&qp_ctx->req_lock);
- if (unlikely(req_id < 0)) {
- dev_err(req->ctx->dev, "alloc req id fail!\n");
- return req_id;
- }
-
- req->qp_ctx = qp_ctx;
- qp_ctx->req_list[req_id] = req;
-
+ spin_unlock_bh(&qp_ctx->id_lock);
return req_id;
}
@@ -163,12 +143,9 @@ static void sec_free_req_id(struct sec_req *req)
return;
}
- qp_ctx->req_list[req_id] = NULL;
- req->qp_ctx = NULL;
-
- spin_lock_bh(&qp_ctx->req_lock);
+ spin_lock_bh(&qp_ctx->id_lock);
idr_remove(&qp_ctx->req_idr, req_id);
- spin_unlock_bh(&qp_ctx->req_lock);
+ spin_unlock_bh(&qp_ctx->id_lock);
}
static u8 pre_parse_finished_bd(struct bd_status *status, void *resp)
@@ -229,6 +206,90 @@ static int sec_cb_status_check(struct sec_req *req,
return 0;
}
+static int qp_send_message(struct sec_req *req)
+{
+ struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+ int ret;
+
+ if (atomic_read(&qp_ctx->qp->qp_status.used) == qp_ctx->qp->sq_depth - 1)
+ return -EBUSY;
+
+ spin_lock_bh(&qp_ctx->req_lock);
+ if (atomic_read(&qp_ctx->qp->qp_status.used) == qp_ctx->qp->sq_depth - 1) {
+ spin_unlock_bh(&qp_ctx->req_lock);
+ return -EBUSY;
+ }
+
+ if (qp_ctx->ctx->type_supported == SEC_BD_TYPE2) {
+ req->sec_sqe.type2.tag = cpu_to_le16((u16)qp_ctx->send_head);
+ qp_ctx->req_list[qp_ctx->send_head] = req;
+ }
+
+ ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe);
+ if (ret) {
+ spin_unlock_bh(&qp_ctx->req_lock);
+ return ret;
+ }
+ if (qp_ctx->ctx->type_supported == SEC_BD_TYPE2)
+ qp_ctx->send_head = (qp_ctx->send_head + 1) % qp_ctx->qp->sq_depth;
+
+ spin_unlock_bh(&qp_ctx->req_lock);
+
+ atomic64_inc(&req->ctx->sec->debug.dfx.send_cnt);
+ return -EINPROGRESS;
+}
+
+static void sec_alg_send_backlog_soft(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx)
+{
+ struct sec_req *req, *tmp;
+ int ret;
+
+ list_for_each_entry_safe(req, tmp, &qp_ctx->backlog.list, list) {
+ list_del(&req->list);
+ ctx->req_op->buf_unmap(ctx, req);
+ if (req->req_id >= 0)
+ sec_free_req_id(req);
+
+ if (ctx->alg_type == SEC_AEAD)
+ ret = sec_aead_soft_crypto(ctx, req->aead_req.aead_req,
+ req->c_req.encrypt);
+ else
+ ret = sec_skcipher_soft_crypto(ctx, req->c_req.sk_req,
+ req->c_req.encrypt);
+
+ /* Wake up the busy thread first, then return the errno. */
+ crypto_request_complete(req->base, -EINPROGRESS);
+ crypto_request_complete(req->base, ret);
+ }
+}
+
+static void sec_alg_send_backlog(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx)
+{
+ struct sec_req *req, *tmp;
+ int ret;
+
+ spin_lock_bh(&qp_ctx->backlog.lock);
+ list_for_each_entry_safe(req, tmp, &qp_ctx->backlog.list, list) {
+ ret = qp_send_message(req);
+ switch (ret) {
+ case -EINPROGRESS:
+ list_del(&req->list);
+ crypto_request_complete(req->base, -EINPROGRESS);
+ break;
+ case -EBUSY:
+ /* Device is busy and stop send any request. */
+ goto unlock;
+ default:
+ /* Release memory resources and send all requests through software. */
+ sec_alg_send_backlog_soft(ctx, qp_ctx);
+ goto unlock;
+ }
+ }
+
+unlock:
+ spin_unlock_bh(&qp_ctx->backlog.lock);
+}
+
static void sec_req_cb(struct hisi_qp *qp, void *resp)
{
struct sec_qp_ctx *qp_ctx = qp->qp_ctx;
@@ -273,40 +334,54 @@ static void sec_req_cb(struct hisi_qp *qp, void *resp)
ctx->req_op->callback(ctx, req, err);
}
-static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
+static int sec_alg_send_message_retry(struct sec_req *req)
{
- struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+ int ctr = 0;
int ret;
- if (ctx->fake_req_limit <=
- atomic_read(&qp_ctx->qp->qp_status.used) &&
- !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG))
- return -EBUSY;
+ do {
+ ret = qp_send_message(req);
+ } while (ret == -EBUSY && ctr++ < SEC_RETRY_MAX_CNT);
- spin_lock_bh(&qp_ctx->req_lock);
- ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe);
- if (ctx->fake_req_limit <=
- atomic_read(&qp_ctx->qp->qp_status.used) && !ret) {
- list_add_tail(&req->backlog_head, &qp_ctx->backlog);
- atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
- atomic64_inc(&ctx->sec->debug.dfx.send_busy_cnt);
- spin_unlock_bh(&qp_ctx->req_lock);
+ return ret;
+}
+
+static int sec_alg_try_enqueue(struct sec_req *req)
+{
+ /* Check if any request is already backlogged */
+ if (!list_empty(&req->backlog->list))
return -EBUSY;
- }
- spin_unlock_bh(&qp_ctx->req_lock);
- if (unlikely(ret == -EBUSY))
- return -ENOBUFS;
+ /* Try to enqueue to HW ring */
+ return qp_send_message(req);
+}
- if (likely(!ret)) {
- ret = -EINPROGRESS;
- atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
- }
+
+static int sec_alg_send_message_maybacklog(struct sec_req *req)
+{
+ int ret;
+
+ ret = sec_alg_try_enqueue(req);
+ if (ret != -EBUSY)
+ return ret;
+
+ spin_lock_bh(&req->backlog->lock);
+ ret = sec_alg_try_enqueue(req);
+ if (ret == -EBUSY)
+ list_add_tail(&req->list, &req->backlog->list);
+ spin_unlock_bh(&req->backlog->lock);
return ret;
}
-/* Get DMA memory resources */
+static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
+{
+ if (req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)
+ return sec_alg_send_message_maybacklog(req);
+
+ return sec_alg_send_message_retry(req);
+}
+
static int sec_alloc_civ_resource(struct device *dev, struct sec_alg_res *res)
{
u16 q_depth = res->depth;
@@ -558,7 +633,10 @@ static int sec_create_qp_ctx(struct sec_ctx *ctx, int qp_ctx_id)
spin_lock_init(&qp_ctx->req_lock);
idr_init(&qp_ctx->req_idr);
- INIT_LIST_HEAD(&qp_ctx->backlog);
+ spin_lock_init(&qp_ctx->backlog.lock);
+ spin_lock_init(&qp_ctx->id_lock);
+ INIT_LIST_HEAD(&qp_ctx->backlog.list);
+ qp_ctx->send_head = 0;
ret = sec_alloc_qp_ctx_resource(ctx, qp_ctx);
if (ret)
@@ -602,9 +680,6 @@ static int sec_ctx_base_init(struct sec_ctx *ctx)
ctx->hlf_q_num = sec->ctx_q_num >> 1;
ctx->pbuf_supported = ctx->sec->iommu_used;
-
- /* Half of queue depth is taken as fake requests limit in the queue. */
- ctx->fake_req_limit = ctx->qps[0]->sq_depth >> 1;
ctx->qp_ctx = kcalloc(sec->ctx_q_num, sizeof(struct sec_qp_ctx),
GFP_KERNEL);
if (!ctx->qp_ctx) {
@@ -706,7 +781,7 @@ static int sec_skcipher_init(struct crypto_skcipher *tfm)
int ret;
ctx->alg_type = SEC_SKCIPHER;
- crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_req));
+ crypto_skcipher_set_reqsize_dma(tfm, sizeof(struct sec_req));
ctx->c_ctx.ivsize = crypto_skcipher_ivsize(tfm);
if (ctx->c_ctx.ivsize > SEC_IV_SIZE) {
pr_err("get error skcipher iv size!\n");
@@ -883,24 +958,25 @@ GEN_SEC_SETKEY_FUNC(sm4_ctr, SEC_CALG_SM4, SEC_CMODE_CTR)
static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req,
struct scatterlist *src)
{
- struct sec_aead_req *a_req = &req->aead_req;
- struct aead_request *aead_req = a_req->aead_req;
+ struct aead_request *aead_req = req->aead_req.aead_req;
struct sec_cipher_req *c_req = &req->c_req;
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+ struct sec_request_buf *buf = &req->buf;
struct device *dev = ctx->dev;
int copy_size, pbuf_length;
int req_id = req->req_id;
struct crypto_aead *tfm;
+ u8 *mac_offset, *pbuf;
size_t authsize;
- u8 *mac_offset;
if (ctx->alg_type == SEC_AEAD)
copy_size = aead_req->cryptlen + aead_req->assoclen;
else
copy_size = c_req->c_len;
- pbuf_length = sg_copy_to_buffer(src, sg_nents(src),
- qp_ctx->res[req_id].pbuf, copy_size);
+
+ pbuf = req->req_id < 0 ? buf->pbuf : qp_ctx->res[req_id].pbuf;
+ pbuf_length = sg_copy_to_buffer(src, sg_nents(src), pbuf, copy_size);
if (unlikely(pbuf_length != copy_size)) {
dev_err(dev, "copy src data to pbuf error!\n");
return -EINVAL;
@@ -908,8 +984,17 @@ static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req,
if (!c_req->encrypt && ctx->alg_type == SEC_AEAD) {
tfm = crypto_aead_reqtfm(aead_req);
authsize = crypto_aead_authsize(tfm);
- mac_offset = qp_ctx->res[req_id].pbuf + copy_size - authsize;
- memcpy(a_req->out_mac, mac_offset, authsize);
+ mac_offset = pbuf + copy_size - authsize;
+ memcpy(req->aead_req.out_mac, mac_offset, authsize);
+ }
+
+ if (req->req_id < 0) {
+ buf->in_dma = dma_map_single(dev, buf->pbuf, SEC_PBUF_SZ, DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(dev, buf->in_dma)))
+ return -ENOMEM;
+
+ buf->out_dma = buf->in_dma;
+ return 0;
}
req->in_dma = qp_ctx->res[req_id].pbuf_dma;
@@ -924,6 +1009,7 @@ static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req,
struct aead_request *aead_req = req->aead_req.aead_req;
struct sec_cipher_req *c_req = &req->c_req;
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+ struct sec_request_buf *buf = &req->buf;
int copy_size, pbuf_length;
int req_id = req->req_id;
@@ -932,10 +1018,16 @@ static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req,
else
copy_size = c_req->c_len;
- pbuf_length = sg_copy_from_buffer(dst, sg_nents(dst),
- qp_ctx->res[req_id].pbuf, copy_size);
+ if (req->req_id < 0)
+ pbuf_length = sg_copy_from_buffer(dst, sg_nents(dst), buf->pbuf, copy_size);
+ else
+ pbuf_length = sg_copy_from_buffer(dst, sg_nents(dst), qp_ctx->res[req_id].pbuf,
+ copy_size);
if (unlikely(pbuf_length != copy_size))
dev_err(ctx->dev, "copy pbuf data to dst error!\n");
+
+ if (req->req_id < 0)
+ dma_unmap_single(ctx->dev, buf->in_dma, SEC_PBUF_SZ, DMA_BIDIRECTIONAL);
}
static int sec_aead_mac_init(struct sec_aead_req *req)
@@ -957,14 +1049,95 @@ static int sec_aead_mac_init(struct sec_aead_req *req)
return 0;
}
-static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
- struct scatterlist *src, struct scatterlist *dst)
+static void fill_sg_to_hw_sge(struct scatterlist *sgl, struct sec_hw_sge *hw_sge)
+{
+ hw_sge->buf = sg_dma_address(sgl);
+ hw_sge->len = cpu_to_le32(sg_dma_len(sgl));
+ hw_sge->page_ctrl = sg_virt(sgl);
+}
+
+static int sec_cipher_to_hw_sgl(struct device *dev, struct scatterlist *src,
+ struct sec_hw_sgl *src_in, dma_addr_t *hw_sgl_dma,
+ int dma_dir)
+{
+ struct sec_hw_sge *curr_hw_sge = src_in->sge_entries;
+ u32 i, sg_n, sg_n_mapped;
+ struct scatterlist *sg;
+ u32 sge_var = 0;
+
+ sg_n = sg_nents(src);
+ sg_n_mapped = dma_map_sg(dev, src, sg_n, dma_dir);
+ if (unlikely(!sg_n_mapped)) {
+ dev_err(dev, "dma mapping for SG error!\n");
+ return -EINVAL;
+ } else if (unlikely(sg_n_mapped > SEC_SGE_NR_NUM)) {
+ dev_err(dev, "the number of entries in input scatterlist error!\n");
+ dma_unmap_sg(dev, src, sg_n, dma_dir);
+ return -EINVAL;
+ }
+
+ for_each_sg(src, sg, sg_n_mapped, i) {
+ fill_sg_to_hw_sge(sg, curr_hw_sge);
+ curr_hw_sge++;
+ sge_var++;
+ }
+
+ src_in->entry_sum_in_sgl = cpu_to_le16(sge_var);
+ src_in->entry_sum_in_chain = cpu_to_le16(SEC_SGE_NR_NUM);
+ src_in->entry_length_in_sgl = cpu_to_le16(SEC_SGE_NR_NUM);
+ *hw_sgl_dma = dma_map_single(dev, src_in, sizeof(struct sec_hw_sgl), dma_dir);
+ if (unlikely(dma_mapping_error(dev, *hw_sgl_dma))) {
+ dma_unmap_sg(dev, src, sg_n, dma_dir);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void sec_cipher_put_hw_sgl(struct device *dev, struct scatterlist *src,
+ dma_addr_t src_in, int dma_dir)
+{
+ dma_unmap_single(dev, src_in, sizeof(struct sec_hw_sgl), dma_dir);
+ dma_unmap_sg(dev, src, sg_nents(src), dma_dir);
+}
+
+static int sec_cipher_map_sgl(struct device *dev, struct sec_req *req,
+ struct scatterlist *src, struct scatterlist *dst)
+{
+ struct sec_hw_sgl *src_in = &req->buf.data_buf.in;
+ struct sec_hw_sgl *dst_out = &req->buf.data_buf.out;
+ int ret;
+
+ if (dst == src) {
+ ret = sec_cipher_to_hw_sgl(dev, src, src_in, &req->buf.in_dma,
+ DMA_BIDIRECTIONAL);
+ req->buf.out_dma = req->buf.in_dma;
+ return ret;
+ }
+
+ ret = sec_cipher_to_hw_sgl(dev, src, src_in, &req->buf.in_dma, DMA_TO_DEVICE);
+ if (unlikely(ret))
+ return ret;
+
+ ret = sec_cipher_to_hw_sgl(dev, dst, dst_out, &req->buf.out_dma,
+ DMA_FROM_DEVICE);
+ if (unlikely(ret)) {
+ sec_cipher_put_hw_sgl(dev, src, req->buf.in_dma, DMA_TO_DEVICE);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int sec_cipher_map_inner(struct sec_ctx *ctx, struct sec_req *req,
+ struct scatterlist *src, struct scatterlist *dst)
{
struct sec_cipher_req *c_req = &req->c_req;
struct sec_aead_req *a_req = &req->aead_req;
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
struct sec_alg_res *res = &qp_ctx->res[req->req_id];
struct device *dev = ctx->dev;
+ enum dma_data_direction src_direction;
int ret;
if (req->use_pbuf) {
@@ -977,10 +1150,9 @@ static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
a_req->out_mac_dma = res->pbuf_dma +
SEC_PBUF_MAC_OFFSET;
}
- ret = sec_cipher_pbuf_map(ctx, req, src);
-
- return ret;
+ return sec_cipher_pbuf_map(ctx, req, src);
}
+
c_req->c_ivin = res->c_ivin;
c_req->c_ivin_dma = res->c_ivin_dma;
if (ctx->alg_type == SEC_AEAD) {
@@ -990,10 +1162,11 @@ static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
a_req->out_mac_dma = res->out_mac_dma;
}
+ src_direction = dst == src ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
req->in = hisi_acc_sg_buf_map_to_hw_sgl(dev, src,
qp_ctx->c_in_pool,
req->req_id,
- &req->in_dma);
+ &req->in_dma, src_direction);
if (IS_ERR(req->in)) {
dev_err(dev, "fail to dma map input sgl buffers!\n");
return PTR_ERR(req->in);
@@ -1003,7 +1176,7 @@ static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
ret = sec_aead_mac_init(a_req);
if (unlikely(ret)) {
dev_err(dev, "fail to init mac data for ICV!\n");
- hisi_acc_sg_buf_unmap(dev, src, req->in);
+ hisi_acc_sg_buf_unmap(dev, src, req->in, src_direction);
return ret;
}
}
@@ -1015,11 +1188,12 @@ static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
c_req->c_out = hisi_acc_sg_buf_map_to_hw_sgl(dev, dst,
qp_ctx->c_out_pool,
req->req_id,
- &c_req->c_out_dma);
+ &c_req->c_out_dma,
+ DMA_FROM_DEVICE);
if (IS_ERR(c_req->c_out)) {
dev_err(dev, "fail to dma map output sgl buffers!\n");
- hisi_acc_sg_buf_unmap(dev, src, req->in);
+ hisi_acc_sg_buf_unmap(dev, src, req->in, src_direction);
return PTR_ERR(c_req->c_out);
}
}
@@ -1027,19 +1201,108 @@ static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
return 0;
}
+static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
+ struct scatterlist *src, struct scatterlist *dst)
+{
+ struct sec_aead_req *a_req = &req->aead_req;
+ struct sec_cipher_req *c_req = &req->c_req;
+ bool is_aead = (ctx->alg_type == SEC_AEAD);
+ struct device *dev = ctx->dev;
+ int ret = -ENOMEM;
+
+ if (req->req_id >= 0)
+ return sec_cipher_map_inner(ctx, req, src, dst);
+
+ c_req->c_ivin = c_req->c_ivin_buf;
+ c_req->c_ivin_dma = dma_map_single(dev, c_req->c_ivin,
+ SEC_IV_SIZE, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, c_req->c_ivin_dma)))
+ return -ENOMEM;
+
+ if (is_aead) {
+ a_req->a_ivin = a_req->a_ivin_buf;
+ a_req->out_mac = a_req->out_mac_buf;
+ a_req->a_ivin_dma = dma_map_single(dev, a_req->a_ivin,
+ SEC_IV_SIZE, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, a_req->a_ivin_dma)))
+ goto free_c_ivin_dma;
+
+ a_req->out_mac_dma = dma_map_single(dev, a_req->out_mac,
+ SEC_MAX_MAC_LEN, DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(dev, a_req->out_mac_dma)))
+ goto free_a_ivin_dma;
+ }
+ if (req->use_pbuf) {
+ ret = sec_cipher_pbuf_map(ctx, req, src);
+ if (unlikely(ret))
+ goto free_out_mac_dma;
+
+ return 0;
+ }
+
+ if (!c_req->encrypt && is_aead) {
+ ret = sec_aead_mac_init(a_req);
+ if (unlikely(ret)) {
+ dev_err(dev, "fail to init mac data for ICV!\n");
+ goto free_out_mac_dma;
+ }
+ }
+
+ ret = sec_cipher_map_sgl(dev, req, src, dst);
+ if (unlikely(ret)) {
+ dev_err(dev, "fail to dma map input sgl buffers!\n");
+ goto free_out_mac_dma;
+ }
+
+ return 0;
+
+free_out_mac_dma:
+ if (is_aead)
+ dma_unmap_single(dev, a_req->out_mac_dma, SEC_MAX_MAC_LEN, DMA_BIDIRECTIONAL);
+free_a_ivin_dma:
+ if (is_aead)
+ dma_unmap_single(dev, a_req->a_ivin_dma, SEC_IV_SIZE, DMA_TO_DEVICE);
+free_c_ivin_dma:
+ dma_unmap_single(dev, c_req->c_ivin_dma, SEC_IV_SIZE, DMA_TO_DEVICE);
+ return ret;
+}
+
static void sec_cipher_unmap(struct sec_ctx *ctx, struct sec_req *req,
struct scatterlist *src, struct scatterlist *dst)
{
+ struct sec_aead_req *a_req = &req->aead_req;
struct sec_cipher_req *c_req = &req->c_req;
struct device *dev = ctx->dev;
+ if (req->req_id >= 0) {
+ if (req->use_pbuf) {
+ sec_cipher_pbuf_unmap(ctx, req, dst);
+ } else {
+ if (dst != src) {
+ hisi_acc_sg_buf_unmap(dev, dst, c_req->c_out, DMA_FROM_DEVICE);
+ hisi_acc_sg_buf_unmap(dev, src, req->in, DMA_TO_DEVICE);
+ } else {
+ hisi_acc_sg_buf_unmap(dev, src, req->in, DMA_BIDIRECTIONAL);
+ }
+ }
+ return;
+ }
+
if (req->use_pbuf) {
sec_cipher_pbuf_unmap(ctx, req, dst);
} else {
- if (dst != src)
- hisi_acc_sg_buf_unmap(dev, src, req->in);
+ if (dst != src) {
+ sec_cipher_put_hw_sgl(dev, dst, req->buf.out_dma, DMA_FROM_DEVICE);
+ sec_cipher_put_hw_sgl(dev, src, req->buf.in_dma, DMA_TO_DEVICE);
+ } else {
+ sec_cipher_put_hw_sgl(dev, src, req->buf.in_dma, DMA_BIDIRECTIONAL);
+ }
+ }
- hisi_acc_sg_buf_unmap(dev, dst, c_req->c_out);
+ dma_unmap_single(dev, c_req->c_ivin_dma, SEC_IV_SIZE, DMA_TO_DEVICE);
+ if (ctx->alg_type == SEC_AEAD) {
+ dma_unmap_single(dev, a_req->a_ivin_dma, SEC_IV_SIZE, DMA_TO_DEVICE);
+ dma_unmap_single(dev, a_req->out_mac_dma, SEC_MAX_MAC_LEN, DMA_BIDIRECTIONAL);
}
}
@@ -1257,8 +1520,15 @@ static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
sec_sqe->type2.c_key_addr = cpu_to_le64(c_ctx->c_key_dma);
sec_sqe->type2.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma);
- sec_sqe->type2.data_src_addr = cpu_to_le64(req->in_dma);
- sec_sqe->type2.data_dst_addr = cpu_to_le64(c_req->c_out_dma);
+ if (req->req_id < 0) {
+ sec_sqe->type2.data_src_addr = cpu_to_le64(req->buf.in_dma);
+ sec_sqe->type2.data_dst_addr = cpu_to_le64(req->buf.out_dma);
+ } else {
+ sec_sqe->type2.data_src_addr = cpu_to_le64(req->in_dma);
+ sec_sqe->type2.data_dst_addr = cpu_to_le64(c_req->c_out_dma);
+ }
+ if (sec_sqe->type2.data_src_addr != sec_sqe->type2.data_dst_addr)
+ de = 0x1 << SEC_DE_OFFSET;
sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_mode) <<
SEC_CMODE_OFFSET);
@@ -1284,13 +1554,10 @@ static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
sec_sqe->sdm_addr_type |= da_type;
scene = SEC_COMM_SCENE << SEC_SCENE_OFFSET;
- if (req->in_dma != c_req->c_out_dma)
- de = 0x1 << SEC_DE_OFFSET;
sec_sqe->sds_sa_type = (de | scene | sa_type);
sec_sqe->type2.clen_ivhlen |= cpu_to_le32(c_req->c_len);
- sec_sqe->type2.tag = cpu_to_le16((u16)req->req_id);
return 0;
}
@@ -1307,8 +1574,15 @@ static int sec_skcipher_bd_fill_v3(struct sec_ctx *ctx, struct sec_req *req)
sec_sqe3->c_key_addr = cpu_to_le64(c_ctx->c_key_dma);
sec_sqe3->no_scene.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma);
- sec_sqe3->data_src_addr = cpu_to_le64(req->in_dma);
- sec_sqe3->data_dst_addr = cpu_to_le64(c_req->c_out_dma);
+ if (req->req_id < 0) {
+ sec_sqe3->data_src_addr = cpu_to_le64(req->buf.in_dma);
+ sec_sqe3->data_dst_addr = cpu_to_le64(req->buf.out_dma);
+ } else {
+ sec_sqe3->data_src_addr = cpu_to_le64(req->in_dma);
+ sec_sqe3->data_dst_addr = cpu_to_le64(c_req->c_out_dma);
+ }
+ if (sec_sqe3->data_src_addr != sec_sqe3->data_dst_addr)
+ bd_param |= 0x1 << SEC_DE_OFFSET_V3;
sec_sqe3->c_mode_alg = ((u8)c_ctx->c_alg << SEC_CALG_OFFSET_V3) |
c_ctx->c_mode;
@@ -1334,8 +1608,6 @@ static int sec_skcipher_bd_fill_v3(struct sec_ctx *ctx, struct sec_req *req)
}
bd_param |= SEC_COMM_SCENE << SEC_SCENE_OFFSET_V3;
- if (req->in_dma != c_req->c_out_dma)
- bd_param |= 0x1 << SEC_DE_OFFSET_V3;
bd_param |= SEC_BD_TYPE3;
sec_sqe3->bd_param = cpu_to_le32(bd_param);
@@ -1367,15 +1639,12 @@ static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type)
size_t sz;
u8 *iv;
- if (req->c_req.encrypt)
- sgl = alg_type == SEC_SKCIPHER ? sk_req->dst : aead_req->dst;
- else
- sgl = alg_type == SEC_SKCIPHER ? sk_req->src : aead_req->src;
-
if (alg_type == SEC_SKCIPHER) {
+ sgl = req->c_req.encrypt ? sk_req->dst : sk_req->src;
iv = sk_req->iv;
cryptlen = sk_req->cryptlen;
} else {
+ sgl = req->c_req.encrypt ? aead_req->dst : aead_req->src;
iv = aead_req->iv;
cryptlen = aead_req->cryptlen;
}
@@ -1386,57 +1655,26 @@ static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type)
if (unlikely(sz != iv_size))
dev_err(req->ctx->dev, "copy output iv error!\n");
} else {
- sz = cryptlen / iv_size;
- if (cryptlen % iv_size)
- sz += 1;
+ sz = (cryptlen + iv_size - 1) / iv_size;
ctr_iv_inc(iv, iv_size, sz);
}
}
-static struct sec_req *sec_back_req_clear(struct sec_ctx *ctx,
- struct sec_qp_ctx *qp_ctx)
-{
- struct sec_req *backlog_req = NULL;
-
- spin_lock_bh(&qp_ctx->req_lock);
- if (ctx->fake_req_limit >=
- atomic_read(&qp_ctx->qp->qp_status.used) &&
- !list_empty(&qp_ctx->backlog)) {
- backlog_req = list_first_entry(&qp_ctx->backlog,
- typeof(*backlog_req), backlog_head);
- list_del(&backlog_req->backlog_head);
- }
- spin_unlock_bh(&qp_ctx->req_lock);
-
- return backlog_req;
-}
-
static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req,
int err)
{
- struct skcipher_request *sk_req = req->c_req.sk_req;
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
- struct skcipher_request *backlog_sk_req;
- struct sec_req *backlog_req;
- sec_free_req_id(req);
+ if (req->req_id >= 0)
+ sec_free_req_id(req);
/* IV output at encrypto of CBC/CTR mode */
if (!err && (ctx->c_ctx.c_mode == SEC_CMODE_CBC ||
ctx->c_ctx.c_mode == SEC_CMODE_CTR) && req->c_req.encrypt)
sec_update_iv(req, SEC_SKCIPHER);
- while (1) {
- backlog_req = sec_back_req_clear(ctx, qp_ctx);
- if (!backlog_req)
- break;
-
- backlog_sk_req = backlog_req->c_req.sk_req;
- skcipher_request_complete(backlog_sk_req, -EINPROGRESS);
- atomic64_inc(&ctx->sec->debug.dfx.recv_busy_cnt);
- }
-
- skcipher_request_complete(sk_req, err);
+ crypto_request_complete(req->base, err);
+ sec_alg_send_backlog(ctx, qp_ctx);
}
static void set_aead_auth_iv(struct sec_ctx *ctx, struct sec_req *req)
@@ -1675,21 +1913,14 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
struct aead_request *a_req = req->aead_req.aead_req;
struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);
size_t authsize = crypto_aead_authsize(tfm);
- struct sec_aead_req *aead_req = &req->aead_req;
- struct sec_cipher_req *c_req = &req->c_req;
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
- struct aead_request *backlog_aead_req;
- struct sec_req *backlog_req;
size_t sz;
- if (!err && c->c_ctx.c_mode == SEC_CMODE_CBC && c_req->encrypt)
- sec_update_iv(req, SEC_AEAD);
-
- /* Copy output mac */
- if (!err && c_req->encrypt) {
- struct scatterlist *sgl = a_req->dst;
+ if (!err && req->c_req.encrypt) {
+ if (c->c_ctx.c_mode == SEC_CMODE_CBC)
+ sec_update_iv(req, SEC_AEAD);
- sz = sg_pcopy_from_buffer(sgl, sg_nents(sgl), aead_req->out_mac,
+ sz = sg_pcopy_from_buffer(a_req->dst, sg_nents(a_req->dst), req->aead_req.out_mac,
authsize, a_req->cryptlen + a_req->assoclen);
if (unlikely(sz != authsize)) {
dev_err(c->dev, "copy out mac err!\n");
@@ -1697,48 +1928,37 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
}
}
- sec_free_req_id(req);
-
- while (1) {
- backlog_req = sec_back_req_clear(c, qp_ctx);
- if (!backlog_req)
- break;
+ if (req->req_id >= 0)
+ sec_free_req_id(req);
- backlog_aead_req = backlog_req->aead_req.aead_req;
- aead_request_complete(backlog_aead_req, -EINPROGRESS);
- atomic64_inc(&c->sec->debug.dfx.recv_busy_cnt);
- }
-
- aead_request_complete(a_req, err);
+ crypto_request_complete(req->base, err);
+ sec_alg_send_backlog(c, qp_ctx);
}
-static void sec_request_uninit(struct sec_ctx *ctx, struct sec_req *req)
+static void sec_request_uninit(struct sec_req *req)
{
- sec_free_req_id(req);
- sec_free_queue_id(ctx, req);
+ if (req->req_id >= 0)
+ sec_free_req_id(req);
}
static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req)
{
struct sec_qp_ctx *qp_ctx;
- int queue_id;
+ int i = 0;
- /* To load balance */
- queue_id = sec_alloc_queue_id(ctx, req);
- qp_ctx = &ctx->qp_ctx[queue_id];
+ do {
+ qp_ctx = &ctx->qp_ctx[i];
+ req->req_id = sec_alloc_req_id(req, qp_ctx);
+ } while (req->req_id < 0 && ++i < ctx->sec->ctx_q_num);
- req->req_id = sec_alloc_req_id(req, qp_ctx);
- if (unlikely(req->req_id < 0)) {
- sec_free_queue_id(ctx, req);
- return req->req_id;
- }
+ req->qp_ctx = qp_ctx;
+ req->backlog = &qp_ctx->backlog;
return 0;
}
static int sec_process(struct sec_ctx *ctx, struct sec_req *req)
{
- struct sec_cipher_req *c_req = &req->c_req;
int ret;
ret = sec_request_init(ctx, req);
@@ -1755,8 +1975,7 @@ static int sec_process(struct sec_ctx *ctx, struct sec_req *req)
sec_update_iv(req, ctx->alg_type);
ret = ctx->req_op->bd_send(ctx, req);
- if (unlikely((ret != -EBUSY && ret != -EINPROGRESS) ||
- (ret == -EBUSY && !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
+ if (unlikely((ret != -EBUSY && ret != -EINPROGRESS))) {
dev_err_ratelimited(ctx->dev, "send sec request failed!\n");
goto err_send_req;
}
@@ -1767,16 +1986,23 @@ err_send_req:
/* As failing, restore the IV from user */
if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt) {
if (ctx->alg_type == SEC_SKCIPHER)
- memcpy(req->c_req.sk_req->iv, c_req->c_ivin,
+ memcpy(req->c_req.sk_req->iv, req->c_req.c_ivin,
ctx->c_ctx.ivsize);
else
- memcpy(req->aead_req.aead_req->iv, c_req->c_ivin,
+ memcpy(req->aead_req.aead_req->iv, req->c_req.c_ivin,
ctx->c_ctx.ivsize);
}
sec_request_untransfer(ctx, req);
+
err_uninit_req:
- sec_request_uninit(ctx, req);
+ sec_request_uninit(req);
+ if (ctx->alg_type == SEC_AEAD)
+ ret = sec_aead_soft_crypto(ctx, req->aead_req.aead_req,
+ req->c_req.encrypt);
+ else
+ ret = sec_skcipher_soft_crypto(ctx, req->c_req.sk_req,
+ req->c_req.encrypt);
return ret;
}
@@ -1850,7 +2076,7 @@ static int sec_aead_init(struct crypto_aead *tfm)
struct sec_ctx *ctx = crypto_aead_ctx(tfm);
int ret;
- crypto_aead_set_reqsize(tfm, sizeof(struct sec_req));
+ crypto_aead_set_reqsize_dma(tfm, sizeof(struct sec_req));
ctx->alg_type = SEC_AEAD;
ctx->c_ctx.ivsize = crypto_aead_ivsize(tfm);
if (ctx->c_ctx.ivsize < SEC_AIV_SIZE ||
@@ -2087,7 +2313,7 @@ static int sec_skcipher_soft_crypto(struct sec_ctx *ctx,
static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(sk_req);
- struct sec_req *req = skcipher_request_ctx(sk_req);
+ struct sec_req *req = skcipher_request_ctx_dma(sk_req);
struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
bool need_fallback = false;
int ret;
@@ -2102,6 +2328,7 @@ static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
req->c_req.sk_req = sk_req;
req->c_req.encrypt = encrypt;
req->ctx = ctx;
+ req->base = &sk_req->base;
ret = sec_skcipher_param_check(ctx, req, &need_fallback);
if (unlikely(ret))
@@ -2236,6 +2463,9 @@ static int sec_aead_spec_check(struct sec_ctx *ctx, struct sec_req *sreq)
return -EINVAL;
if (unlikely(ctx->a_ctx.a_key_len & WORD_MASK))
return -EINVAL;
+ } else if (c_mode == SEC_CMODE_GCM) {
+ if (unlikely(sz < SEC_GCM_MIN_AUTH_SZ))
+ return -EINVAL;
}
return 0;
@@ -2309,7 +2539,7 @@ static int sec_aead_soft_crypto(struct sec_ctx *ctx,
static int sec_aead_crypto(struct aead_request *a_req, bool encrypt)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);
- struct sec_req *req = aead_request_ctx(a_req);
+ struct sec_req *req = aead_request_ctx_dma(a_req);
struct sec_ctx *ctx = crypto_aead_ctx(tfm);
size_t sz = crypto_aead_authsize(tfm);
bool need_fallback = false;
@@ -2319,6 +2549,7 @@ static int sec_aead_crypto(struct aead_request *a_req, bool encrypt)
req->aead_req.aead_req = a_req;
req->c_req.encrypt = encrypt;
req->ctx = ctx;
+ req->base = &a_req->base;
req->c_req.c_len = a_req->cryptlen - (req->c_req.encrypt ? 0 : sz);
ret = sec_aead_param_check(ctx, req, &need_fallback);
diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
index 72cf48d1f3ab..5eb2d6820742 100644
--- a/drivers/crypto/hisilicon/sec2/sec_main.c
+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
@@ -47,6 +47,8 @@
#define SEC_RAS_FE_ENB_MSK 0x0
#define SEC_OOO_SHUTDOWN_SEL 0x301014
#define SEC_RAS_DISABLE 0x0
+#define SEC_AXI_ERROR_MASK (BIT(0) | BIT(1))
+
#define SEC_MEM_START_INIT_REG 0x301100
#define SEC_MEM_INIT_DONE_REG 0x301104
@@ -93,6 +95,16 @@
#define SEC_PREFETCH_ENABLE (~(BIT(0) | BIT(1) | BIT(11)))
#define SEC_PREFETCH_DISABLE BIT(1)
#define SEC_SVA_DISABLE_READY (BIT(7) | BIT(11))
+#define SEC_SVA_PREFETCH_INFO 0x301ED4
+#define SEC_SVA_STALL_NUM GENMASK(23, 8)
+#define SEC_SVA_PREFETCH_NUM GENMASK(2, 0)
+#define SEC_WAIT_SVA_READY 500000
+#define SEC_READ_SVA_STATUS_TIMES 3
+#define SEC_WAIT_US_MIN 10
+#define SEC_WAIT_US_MAX 20
+#define SEC_WAIT_QP_US_MIN 1000
+#define SEC_WAIT_QP_US_MAX 2000
+#define SEC_MAX_WAIT_TIMES 2000
#define SEC_DELAY_10_US 10
#define SEC_POLL_TIMEOUT_US 1000
@@ -464,6 +476,81 @@ static void sec_set_endian(struct hisi_qm *qm)
writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG);
}
+static int sec_wait_sva_ready(struct hisi_qm *qm, __u32 offset, __u32 mask)
+{
+ u32 val, try_times = 0;
+ u8 count = 0;
+
+ /*
+ * Read the register value every 10-20us. If the value is 0 for three
+ * consecutive times, the SVA module is ready.
+ */
+ do {
+ val = readl(qm->io_base + offset);
+ if (val & mask)
+ count = 0;
+ else if (++count == SEC_READ_SVA_STATUS_TIMES)
+ break;
+
+ usleep_range(SEC_WAIT_US_MIN, SEC_WAIT_US_MAX);
+ } while (++try_times < SEC_WAIT_SVA_READY);
+
+ if (try_times == SEC_WAIT_SVA_READY) {
+ pci_err(qm->pdev, "failed to wait sva prefetch ready\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void sec_close_sva_prefetch(struct hisi_qm *qm)
+{
+ u32 val;
+ int ret;
+
+ if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
+ return;
+
+ val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG);
+ val |= SEC_PREFETCH_DISABLE;
+ writel(val, qm->io_base + SEC_PREFETCH_CFG);
+
+ ret = readl_relaxed_poll_timeout(qm->io_base + SEC_SVA_TRANS,
+ val, !(val & SEC_SVA_DISABLE_READY),
+ SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US);
+ if (ret)
+ pci_err(qm->pdev, "failed to close sva prefetch\n");
+
+ (void)sec_wait_sva_ready(qm, SEC_SVA_PREFETCH_INFO, SEC_SVA_STALL_NUM);
+}
+
+static void sec_open_sva_prefetch(struct hisi_qm *qm)
+{
+ u32 val;
+ int ret;
+
+ if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
+ return;
+
+ /* Enable prefetch */
+ val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG);
+ val &= SEC_PREFETCH_ENABLE;
+ writel(val, qm->io_base + SEC_PREFETCH_CFG);
+
+ ret = readl_relaxed_poll_timeout(qm->io_base + SEC_PREFETCH_CFG,
+ val, !(val & SEC_PREFETCH_DISABLE),
+ SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US);
+ if (ret) {
+ pci_err(qm->pdev, "failed to open sva prefetch\n");
+ sec_close_sva_prefetch(qm);
+ return;
+ }
+
+ ret = sec_wait_sva_ready(qm, SEC_SVA_TRANS, SEC_SVA_PREFETCH_NUM);
+ if (ret)
+ sec_close_sva_prefetch(qm);
+}
+
static void sec_engine_sva_config(struct hisi_qm *qm)
{
u32 reg;
@@ -497,45 +584,7 @@ static void sec_engine_sva_config(struct hisi_qm *qm)
writel_relaxed(reg, qm->io_base +
SEC_INTERFACE_USER_CTRL1_REG);
}
-}
-
-static void sec_open_sva_prefetch(struct hisi_qm *qm)
-{
- u32 val;
- int ret;
-
- if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
- return;
-
- /* Enable prefetch */
- val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG);
- val &= SEC_PREFETCH_ENABLE;
- writel(val, qm->io_base + SEC_PREFETCH_CFG);
-
- ret = readl_relaxed_poll_timeout(qm->io_base + SEC_PREFETCH_CFG,
- val, !(val & SEC_PREFETCH_DISABLE),
- SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US);
- if (ret)
- pci_err(qm->pdev, "failed to open sva prefetch\n");
-}
-
-static void sec_close_sva_prefetch(struct hisi_qm *qm)
-{
- u32 val;
- int ret;
-
- if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
- return;
-
- val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG);
- val |= SEC_PREFETCH_DISABLE;
- writel(val, qm->io_base + SEC_PREFETCH_CFG);
-
- ret = readl_relaxed_poll_timeout(qm->io_base + SEC_SVA_TRANS,
- val, !(val & SEC_SVA_DISABLE_READY),
- SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US);
- if (ret)
- pci_err(qm->pdev, "failed to close sva prefetch\n");
+ sec_open_sva_prefetch(qm);
}
static void sec_enable_clock_gate(struct hisi_qm *qm)
@@ -666,8 +715,7 @@ static void sec_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
val1 = readl(qm->io_base + SEC_CONTROL_REG);
if (enable) {
val1 |= SEC_AXI_SHUTDOWN_ENABLE;
- val2 = hisi_qm_get_hw_info(qm, sec_basic_info,
- SEC_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
+ val2 = qm->err_info.dev_err.shutdown_mask;
} else {
val1 &= SEC_AXI_SHUTDOWN_DISABLE;
val2 = 0x0;
@@ -681,7 +729,8 @@ static void sec_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
static void sec_hw_error_enable(struct hisi_qm *qm)
{
- u32 ce, nfe;
+ struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err;
+ u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe;
if (qm->ver == QM_HW_V1) {
writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK);
@@ -689,22 +738,19 @@ static void sec_hw_error_enable(struct hisi_qm *qm)
return;
}
- ce = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_CE_MASK_CAP, qm->cap_ver);
- nfe = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_NFE_MASK_CAP, qm->cap_ver);
-
/* clear SEC hw error source if having */
- writel(ce | nfe | SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_CORE_INT_SOURCE);
+ writel(err_mask, qm->io_base + SEC_CORE_INT_SOURCE);
/* enable RAS int */
- writel(ce, qm->io_base + SEC_RAS_CE_REG);
- writel(SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_RAS_FE_REG);
- writel(nfe, qm->io_base + SEC_RAS_NFE_REG);
+ writel(dev_err->ce, qm->io_base + SEC_RAS_CE_REG);
+ writel(dev_err->fe, qm->io_base + SEC_RAS_FE_REG);
+ writel(dev_err->nfe, qm->io_base + SEC_RAS_NFE_REG);
/* enable SEC block master OOO when nfe occurs on Kunpeng930 */
sec_master_ooo_ctrl(qm, true);
/* enable SEC hw error interrupts */
- writel(ce | nfe | SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_CORE_INT_MASK);
+ writel(err_mask, qm->io_base + SEC_CORE_INT_MASK);
}
static void sec_hw_error_disable(struct hisi_qm *qm)
@@ -1061,12 +1107,20 @@ static void sec_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
static void sec_disable_error_report(struct hisi_qm *qm, u32 err_type)
{
- u32 nfe_mask;
+ u32 nfe_mask = qm->err_info.dev_err.nfe;
- nfe_mask = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_NFE_MASK_CAP, qm->cap_ver);
writel(nfe_mask & (~err_type), qm->io_base + SEC_RAS_NFE_REG);
}
+static void sec_enable_error_report(struct hisi_qm *qm)
+{
+ u32 nfe_mask = qm->err_info.dev_err.nfe;
+ u32 ce_mask = qm->err_info.dev_err.ce;
+
+ writel(nfe_mask, qm->io_base + SEC_RAS_NFE_REG);
+ writel(ce_mask, qm->io_base + SEC_RAS_CE_REG);
+}
+
static void sec_open_axi_master_ooo(struct hisi_qm *qm)
{
u32 val;
@@ -1082,16 +1136,18 @@ static enum acc_err_result sec_get_err_result(struct hisi_qm *qm)
err_status = sec_get_hw_err_status(qm);
if (err_status) {
- if (err_status & qm->err_info.ecc_2bits_mask)
+ if (err_status & qm->err_info.dev_err.ecc_2bits_mask)
qm->err_status.is_dev_ecc_mbit = true;
sec_log_hw_error(qm, err_status);
- if (err_status & qm->err_info.dev_reset_mask) {
+ if (err_status & qm->err_info.dev_err.reset_mask) {
/* Disable the same error reporting until device is recovered. */
sec_disable_error_report(qm, err_status);
return ACC_ERR_NEED_RESET;
}
sec_clear_hw_err_status(qm, err_status);
+ /* Avoid firmware disable error report, re-enable. */
+ sec_enable_error_report(qm);
}
return ACC_ERR_RECOVERED;
@@ -1102,28 +1158,62 @@ static bool sec_dev_is_abnormal(struct hisi_qm *qm)
u32 err_status;
err_status = sec_get_hw_err_status(qm);
- if (err_status & qm->err_info.dev_shutdown_mask)
+ if (err_status & qm->err_info.dev_err.shutdown_mask)
return true;
return false;
}
+static void sec_disable_axi_error(struct hisi_qm *qm)
+{
+ struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err;
+ u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe;
+
+ writel(err_mask & ~SEC_AXI_ERROR_MASK, qm->io_base + SEC_CORE_INT_MASK);
+
+ if (qm->ver > QM_HW_V2)
+ writel(dev_err->shutdown_mask & (~SEC_AXI_ERROR_MASK),
+ qm->io_base + SEC_OOO_SHUTDOWN_SEL);
+}
+
+static void sec_enable_axi_error(struct hisi_qm *qm)
+{
+ struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err;
+ u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe;
+
+ /* clear axi error source */
+ writel(SEC_AXI_ERROR_MASK, qm->io_base + SEC_CORE_INT_SOURCE);
+
+ writel(err_mask, qm->io_base + SEC_CORE_INT_MASK);
+
+ if (qm->ver > QM_HW_V2)
+ writel(dev_err->shutdown_mask, qm->io_base + SEC_OOO_SHUTDOWN_SEL);
+}
+
static void sec_err_info_init(struct hisi_qm *qm)
{
struct hisi_qm_err_info *err_info = &qm->err_info;
+ struct hisi_qm_err_mask *qm_err = &err_info->qm_err;
+ struct hisi_qm_err_mask *dev_err = &err_info->dev_err;
+
+ qm_err->fe = SEC_RAS_FE_ENB_MSK;
+ qm_err->ce = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_QM_CE_MASK_CAP, qm->cap_ver);
+ qm_err->nfe = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_QM_NFE_MASK_CAP, qm->cap_ver);
+ qm_err->shutdown_mask = hisi_qm_get_hw_info(qm, sec_basic_info,
+ SEC_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
+ qm_err->reset_mask = hisi_qm_get_hw_info(qm, sec_basic_info,
+ SEC_QM_RESET_MASK_CAP, qm->cap_ver);
+ qm_err->ecc_2bits_mask = QM_ECC_MBIT;
+
+ dev_err->fe = SEC_RAS_FE_ENB_MSK;
+ dev_err->ce = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_CE_MASK_CAP, qm->cap_ver);
+ dev_err->nfe = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_NFE_MASK_CAP, qm->cap_ver);
+ dev_err->shutdown_mask = hisi_qm_get_hw_info(qm, sec_basic_info,
+ SEC_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
+ dev_err->reset_mask = hisi_qm_get_hw_info(qm, sec_basic_info,
+ SEC_RESET_MASK_CAP, qm->cap_ver);
+ dev_err->ecc_2bits_mask = SEC_CORE_INT_STATUS_M_ECC;
- err_info->fe = SEC_RAS_FE_ENB_MSK;
- err_info->ce = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_QM_CE_MASK_CAP, qm->cap_ver);
- err_info->nfe = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_QM_NFE_MASK_CAP, qm->cap_ver);
- err_info->ecc_2bits_mask = SEC_CORE_INT_STATUS_M_ECC;
- err_info->qm_shutdown_mask = hisi_qm_get_hw_info(qm, sec_basic_info,
- SEC_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
- err_info->dev_shutdown_mask = hisi_qm_get_hw_info(qm, sec_basic_info,
- SEC_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
- err_info->qm_reset_mask = hisi_qm_get_hw_info(qm, sec_basic_info,
- SEC_QM_RESET_MASK_CAP, qm->cap_ver);
- err_info->dev_reset_mask = hisi_qm_get_hw_info(qm, sec_basic_info,
- SEC_RESET_MASK_CAP, qm->cap_ver);
err_info->msi_wr_port = BIT(0);
err_info->acpi_rst = "SRST";
}
@@ -1141,6 +1231,8 @@ static const struct hisi_qm_err_ini sec_err_ini = {
.err_info_init = sec_err_info_init,
.get_err_result = sec_get_err_result,
.dev_is_abnormal = sec_dev_is_abnormal,
+ .disable_axi_error = sec_disable_axi_error,
+ .enable_axi_error = sec_enable_axi_error,
};
static int sec_pf_probe_init(struct sec_dev *sec)
@@ -1152,7 +1244,6 @@ static int sec_pf_probe_init(struct sec_dev *sec)
if (ret)
return ret;
- sec_open_sva_prefetch(qm);
hisi_qm_dev_err_init(qm);
sec_debug_regs_clear(qm);
ret = sec_show_last_regs_init(qm);
@@ -1169,7 +1260,7 @@ static int sec_pre_store_cap_reg(struct hisi_qm *qm)
size_t i, size;
size = ARRAY_SIZE(sec_cap_query_info);
- sec_cap = devm_kzalloc(&pdev->dev, sizeof(*sec_cap) * size, GFP_KERNEL);
+ sec_cap = devm_kcalloc(&pdev->dev, size, sizeof(*sec_cap), GFP_KERNEL);
if (!sec_cap)
return -ENOMEM;
diff --git a/drivers/crypto/hisilicon/sgl.c b/drivers/crypto/hisilicon/sgl.c
index c974f95cd126..24c7b6ab285b 100644
--- a/drivers/crypto/hisilicon/sgl.c
+++ b/drivers/crypto/hisilicon/sgl.c
@@ -210,15 +210,15 @@ static void clear_hw_sgl_sge(struct hisi_acc_hw_sgl *hw_sgl)
* @pool: Pool which hw sgl memory will be allocated in.
* @index: Index of hisi_acc_hw_sgl in pool.
* @hw_sgl_dma: The dma address of allocated hw sgl.
+ * @dir: DMA direction.
*
* This function builds hw sgl according input sgl, user can use hw_sgl_dma
* as src/dst in its BD. Only support single hw sgl currently.
*/
struct hisi_acc_hw_sgl *
-hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
- struct scatterlist *sgl,
- struct hisi_acc_sgl_pool *pool,
- u32 index, dma_addr_t *hw_sgl_dma)
+hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev, struct scatterlist *sgl,
+ struct hisi_acc_sgl_pool *pool, u32 index,
+ dma_addr_t *hw_sgl_dma, enum dma_data_direction dir)
{
struct hisi_acc_hw_sgl *curr_hw_sgl;
unsigned int i, sg_n_mapped;
@@ -232,7 +232,7 @@ hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
sg_n = sg_nents(sgl);
- sg_n_mapped = dma_map_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL);
+ sg_n_mapped = dma_map_sg(dev, sgl, sg_n, dir);
if (!sg_n_mapped) {
dev_err(dev, "DMA mapping for SG error!\n");
return ERR_PTR(-EINVAL);
@@ -245,11 +245,6 @@ hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
}
curr_hw_sgl = acc_get_sgl(pool, index, &curr_sgl_dma);
- if (IS_ERR(curr_hw_sgl)) {
- dev_err(dev, "Get SGL error!\n");
- ret = -ENOMEM;
- goto err_unmap;
- }
curr_hw_sgl->entry_length_in_sgl = cpu_to_le16(pool->sge_nr);
curr_hw_sge = curr_hw_sgl->sge_entries;
@@ -276,16 +271,17 @@ EXPORT_SYMBOL_GPL(hisi_acc_sg_buf_map_to_hw_sgl);
* @dev: The device which hw sgl belongs to.
* @sgl: Related scatterlist.
* @hw_sgl: Virtual address of hw sgl.
+ * @dir: DMA direction.
*
* This function unmaps allocated hw sgl.
*/
void hisi_acc_sg_buf_unmap(struct device *dev, struct scatterlist *sgl,
- struct hisi_acc_hw_sgl *hw_sgl)
+ struct hisi_acc_hw_sgl *hw_sgl, enum dma_data_direction dir)
{
if (!dev || !sgl || !hw_sgl)
return;
- dma_unmap_sg(dev, sgl, sg_nents(sgl), DMA_BIDIRECTIONAL);
+ dma_unmap_sg(dev, sgl, sg_nents(sgl), dir);
clear_hw_sgl_sge(hw_sgl);
hw_sgl->entry_sum_in_chain = 0;
hw_sgl->entry_sum_in_sgl = 0;
diff --git a/drivers/crypto/hisilicon/zip/dae_main.c b/drivers/crypto/hisilicon/zip/dae_main.c
index 6f22e4c36e49..68aebd02fc84 100644
--- a/drivers/crypto/hisilicon/zip/dae_main.c
+++ b/drivers/crypto/hisilicon/zip/dae_main.c
@@ -15,6 +15,7 @@
#define DAE_REG_RD_TMOUT_US USEC_PER_SEC
#define DAE_ALG_NAME "hashagg"
+#define DAE_V5_ALG_NAME "hashagg\nudma\nhashjoin\ngather"
/* error */
#define DAE_AXI_CFG_OFFSET 0x331000
@@ -82,6 +83,7 @@ int hisi_dae_set_user_domain(struct hisi_qm *qm)
int hisi_dae_set_alg(struct hisi_qm *qm)
{
+ const char *alg_name;
size_t len;
if (!dae_is_support(qm))
@@ -90,9 +92,14 @@ int hisi_dae_set_alg(struct hisi_qm *qm)
if (!qm->uacce)
return 0;
+ if (qm->ver >= QM_HW_V5)
+ alg_name = DAE_V5_ALG_NAME;
+ else
+ alg_name = DAE_ALG_NAME;
+
len = strlen(qm->uacce->algs);
/* A line break may be required */
- if (len + strlen(DAE_ALG_NAME) + 1 >= QM_DEV_ALG_MAX_LEN) {
+ if (len + strlen(alg_name) + 1 >= QM_DEV_ALG_MAX_LEN) {
pci_err(qm->pdev, "algorithm name is too long!\n");
return -EINVAL;
}
@@ -100,7 +107,7 @@ int hisi_dae_set_alg(struct hisi_qm *qm)
if (len)
strcat((char *)qm->uacce->algs, "\n");
- strcat((char *)qm->uacce->algs, DAE_ALG_NAME);
+ strcat((char *)qm->uacce->algs, alg_name);
return 0;
}
@@ -168,6 +175,12 @@ static void hisi_dae_disable_error_report(struct hisi_qm *qm, u32 err_type)
writel(DAE_ERR_NFE_MASK & (~err_type), qm->io_base + DAE_ERR_NFE_OFFSET);
}
+static void hisi_dae_enable_error_report(struct hisi_qm *qm)
+{
+ writel(DAE_ERR_CE_MASK, qm->io_base + DAE_ERR_CE_OFFSET);
+ writel(DAE_ERR_NFE_MASK, qm->io_base + DAE_ERR_NFE_OFFSET);
+}
+
static void hisi_dae_log_hw_error(struct hisi_qm *qm, u32 err_type)
{
const struct hisi_dae_hw_error *err = dae_hw_error;
@@ -209,6 +222,8 @@ enum acc_err_result hisi_dae_get_err_result(struct hisi_qm *qm)
return ACC_ERR_NEED_RESET;
}
hisi_dae_clear_hw_err_status(qm, err_status);
+ /* Avoid firmware disable error report, re-enable. */
+ hisi_dae_enable_error_report(qm);
return ACC_ERR_RECOVERED;
}
diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c
index 7327f8f29b01..b97513981a3b 100644
--- a/drivers/crypto/hisilicon/zip/zip_crypto.c
+++ b/drivers/crypto/hisilicon/zip/zip_crypto.c
@@ -224,7 +224,8 @@ static int hisi_zip_do_work(struct hisi_zip_qp_ctx *qp_ctx,
return -EINVAL;
req->hw_src = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->src, pool,
- req->req_id << 1, &req->dma_src);
+ req->req_id << 1, &req->dma_src,
+ DMA_TO_DEVICE);
if (IS_ERR(req->hw_src)) {
dev_err(dev, "failed to map the src buffer to hw sgl (%ld)!\n",
PTR_ERR(req->hw_src));
@@ -233,7 +234,7 @@ static int hisi_zip_do_work(struct hisi_zip_qp_ctx *qp_ctx,
req->hw_dst = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->dst, pool,
(req->req_id << 1) + 1,
- &req->dma_dst);
+ &req->dma_dst, DMA_FROM_DEVICE);
if (IS_ERR(req->hw_dst)) {
ret = PTR_ERR(req->hw_dst);
dev_err(dev, "failed to map the dst buffer to hw slg (%d)!\n",
@@ -258,9 +259,9 @@ static int hisi_zip_do_work(struct hisi_zip_qp_ctx *qp_ctx,
return -EINPROGRESS;
err_unmap_output:
- hisi_acc_sg_buf_unmap(dev, a_req->dst, req->hw_dst);
+ hisi_acc_sg_buf_unmap(dev, a_req->dst, req->hw_dst, DMA_FROM_DEVICE);
err_unmap_input:
- hisi_acc_sg_buf_unmap(dev, a_req->src, req->hw_src);
+ hisi_acc_sg_buf_unmap(dev, a_req->src, req->hw_src, DMA_TO_DEVICE);
return ret;
}
@@ -303,8 +304,8 @@ static void hisi_zip_acomp_cb(struct hisi_qp *qp, void *data)
err = -EIO;
}
- hisi_acc_sg_buf_unmap(dev, acomp_req->src, req->hw_src);
- hisi_acc_sg_buf_unmap(dev, acomp_req->dst, req->hw_dst);
+ hisi_acc_sg_buf_unmap(dev, acomp_req->dst, req->hw_dst, DMA_FROM_DEVICE);
+ hisi_acc_sg_buf_unmap(dev, acomp_req->src, req->hw_src, DMA_TO_DEVICE);
acomp_req->dlen = ops->get_dstlen(sqe);
diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
index d8ba23b7cc7d..4fcbe6bada06 100644
--- a/drivers/crypto/hisilicon/zip/zip_main.c
+++ b/drivers/crypto/hisilicon/zip/zip_main.c
@@ -65,6 +65,7 @@
#define HZIP_SRAM_ECC_ERR_NUM_SHIFT 16
#define HZIP_SRAM_ECC_ERR_ADDR_SHIFT 24
#define HZIP_CORE_INT_MASK_ALL GENMASK(12, 0)
+#define HZIP_AXI_ERROR_MASK (BIT(2) | BIT(3))
#define HZIP_SQE_SIZE 128
#define HZIP_PF_DEF_Q_NUM 64
#define HZIP_PF_DEF_Q_BASE 0
@@ -80,6 +81,7 @@
#define HZIP_ALG_GZIP_BIT GENMASK(3, 2)
#define HZIP_ALG_DEFLATE_BIT GENMASK(5, 4)
#define HZIP_ALG_LZ77_BIT GENMASK(7, 6)
+#define HZIP_ALG_LZ4_BIT GENMASK(9, 8)
#define HZIP_BUF_SIZE 22
#define HZIP_SQE_MASK_OFFSET 64
@@ -95,10 +97,16 @@
#define HZIP_PREFETCH_ENABLE (~(BIT(26) | BIT(17) | BIT(0)))
#define HZIP_SVA_PREFETCH_DISABLE BIT(26)
#define HZIP_SVA_DISABLE_READY (BIT(26) | BIT(30))
+#define HZIP_SVA_PREFETCH_NUM GENMASK(18, 16)
+#define HZIP_SVA_STALL_NUM GENMASK(15, 0)
#define HZIP_SHAPER_RATE_COMPRESS 750
#define HZIP_SHAPER_RATE_DECOMPRESS 140
-#define HZIP_DELAY_1_US 1
-#define HZIP_POLL_TIMEOUT_US 1000
+#define HZIP_DELAY_1_US 1
+#define HZIP_POLL_TIMEOUT_US 1000
+#define HZIP_WAIT_SVA_READY 500000
+#define HZIP_READ_SVA_STATUS_TIMES 3
+#define HZIP_WAIT_US_MIN 10
+#define HZIP_WAIT_US_MAX 20
/* clock gating */
#define HZIP_PEH_CFG_AUTO_GATE 0x3011A8
@@ -111,6 +119,9 @@
/* zip comp high performance */
#define HZIP_HIGH_PERF_OFFSET 0x301208
+#define HZIP_LIT_LEN_EN_OFFSET 0x301204
+#define HZIP_LIT_LEN_EN_EN BIT(4)
+
enum {
HZIP_HIGH_COMP_RATE,
HZIP_HIGH_COMP_PERF,
@@ -141,6 +152,12 @@ static const struct qm_dev_alg zip_dev_algs[] = { {
}, {
.alg_msk = HZIP_ALG_LZ77_BIT,
.alg = "lz77_zstd\n",
+ }, {
+ .alg_msk = HZIP_ALG_LZ77_BIT,
+ .alg = "lz77_only\n",
+ }, {
+ .alg_msk = HZIP_ALG_LZ4_BIT,
+ .alg = "lz4\n",
},
};
@@ -448,10 +465,23 @@ bool hisi_zip_alg_support(struct hisi_qm *qm, u32 alg)
return false;
}
-static int hisi_zip_set_high_perf(struct hisi_qm *qm)
+static void hisi_zip_literal_set(struct hisi_qm *qm)
+{
+ u32 val;
+
+ if (qm->ver < QM_HW_V3)
+ return;
+
+ val = readl_relaxed(qm->io_base + HZIP_LIT_LEN_EN_OFFSET);
+ val &= ~HZIP_LIT_LEN_EN_EN;
+
+ /* enable literal length in stream mode compression */
+ writel(val, qm->io_base + HZIP_LIT_LEN_EN_OFFSET);
+}
+
+static void hisi_zip_set_high_perf(struct hisi_qm *qm)
{
u32 val;
- int ret;
val = readl_relaxed(qm->io_base + HZIP_HIGH_PERF_OFFSET);
if (perf_mode == HZIP_HIGH_COMP_PERF)
@@ -461,16 +491,36 @@ static int hisi_zip_set_high_perf(struct hisi_qm *qm)
/* Set perf mode */
writel(val, qm->io_base + HZIP_HIGH_PERF_OFFSET);
- ret = readl_relaxed_poll_timeout(qm->io_base + HZIP_HIGH_PERF_OFFSET,
- val, val == perf_mode, HZIP_DELAY_1_US,
- HZIP_POLL_TIMEOUT_US);
- if (ret)
- pci_err(qm->pdev, "failed to set perf mode\n");
+}
- return ret;
+static int hisi_zip_wait_sva_ready(struct hisi_qm *qm, __u32 offset, __u32 mask)
+{
+ u32 val, try_times = 0;
+ u8 count = 0;
+
+ /*
+ * Read the register value every 10-20us. If the value is 0 for three
+ * consecutive times, the SVA module is ready.
+ */
+ do {
+ val = readl(qm->io_base + offset);
+ if (val & mask)
+ count = 0;
+ else if (++count == HZIP_READ_SVA_STATUS_TIMES)
+ break;
+
+ usleep_range(HZIP_WAIT_US_MIN, HZIP_WAIT_US_MAX);
+ } while (++try_times < HZIP_WAIT_SVA_READY);
+
+ if (try_times == HZIP_WAIT_SVA_READY) {
+ pci_err(qm->pdev, "failed to wait sva prefetch ready\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
}
-static void hisi_zip_open_sva_prefetch(struct hisi_qm *qm)
+static void hisi_zip_close_sva_prefetch(struct hisi_qm *qm)
{
u32 val;
int ret;
@@ -478,19 +528,20 @@ static void hisi_zip_open_sva_prefetch(struct hisi_qm *qm)
if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
return;
- /* Enable prefetch */
val = readl_relaxed(qm->io_base + HZIP_PREFETCH_CFG);
- val &= HZIP_PREFETCH_ENABLE;
+ val |= HZIP_SVA_PREFETCH_DISABLE;
writel(val, qm->io_base + HZIP_PREFETCH_CFG);
- ret = readl_relaxed_poll_timeout(qm->io_base + HZIP_PREFETCH_CFG,
- val, !(val & HZIP_SVA_PREFETCH_DISABLE),
+ ret = readl_relaxed_poll_timeout(qm->io_base + HZIP_SVA_TRANS,
+ val, !(val & HZIP_SVA_DISABLE_READY),
HZIP_DELAY_1_US, HZIP_POLL_TIMEOUT_US);
if (ret)
- pci_err(qm->pdev, "failed to open sva prefetch\n");
+ pci_err(qm->pdev, "failed to close sva prefetch\n");
+
+ (void)hisi_zip_wait_sva_ready(qm, HZIP_SVA_TRANS, HZIP_SVA_STALL_NUM);
}
-static void hisi_zip_close_sva_prefetch(struct hisi_qm *qm)
+static void hisi_zip_open_sva_prefetch(struct hisi_qm *qm)
{
u32 val;
int ret;
@@ -498,15 +549,23 @@ static void hisi_zip_close_sva_prefetch(struct hisi_qm *qm)
if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
return;
+ /* Enable prefetch */
val = readl_relaxed(qm->io_base + HZIP_PREFETCH_CFG);
- val |= HZIP_SVA_PREFETCH_DISABLE;
+ val &= HZIP_PREFETCH_ENABLE;
writel(val, qm->io_base + HZIP_PREFETCH_CFG);
- ret = readl_relaxed_poll_timeout(qm->io_base + HZIP_SVA_TRANS,
- val, !(val & HZIP_SVA_DISABLE_READY),
+ ret = readl_relaxed_poll_timeout(qm->io_base + HZIP_PREFETCH_CFG,
+ val, !(val & HZIP_SVA_PREFETCH_DISABLE),
HZIP_DELAY_1_US, HZIP_POLL_TIMEOUT_US);
+ if (ret) {
+ pci_err(qm->pdev, "failed to open sva prefetch\n");
+ hisi_zip_close_sva_prefetch(qm);
+ return;
+ }
+
+ ret = hisi_zip_wait_sva_ready(qm, HZIP_SVA_TRANS, HZIP_SVA_PREFETCH_NUM);
if (ret)
- pci_err(qm->pdev, "failed to close sva prefetch\n");
+ hisi_zip_close_sva_prefetch(qm);
}
static void hisi_zip_enable_clock_gate(struct hisi_qm *qm)
@@ -530,6 +589,7 @@ static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
void __iomem *base = qm->io_base;
u32 dcomp_bm, comp_bm;
u32 zip_core_en;
+ int ret;
/* qm user domain */
writel(AXUSER_BASE, base + QM_ARUSER_M_CFG_1);
@@ -565,6 +625,7 @@ static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
writel(AXUSER_BASE, base + HZIP_DATA_WUSER_32_63);
writel(AXUSER_BASE, base + HZIP_SGL_RUSER_32_63);
}
+ hisi_zip_open_sva_prefetch(qm);
/* let's open all compression/decompression cores */
@@ -580,9 +641,19 @@ static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) |
FIELD_PREP(CQC_CACHE_WB_THRD, 1), base + QM_CACHE_CTL);
+ hisi_zip_set_high_perf(qm);
+ hisi_zip_literal_set(qm);
hisi_zip_enable_clock_gate(qm);
- return hisi_dae_set_user_domain(qm);
+ ret = hisi_dae_set_user_domain(qm);
+ if (ret)
+ goto close_sva_prefetch;
+
+ return 0;
+
+close_sva_prefetch:
+ hisi_zip_close_sva_prefetch(qm);
+ return ret;
}
static void hisi_zip_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
@@ -592,8 +663,7 @@ static void hisi_zip_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
val1 = readl(qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
if (enable) {
val1 |= HZIP_AXI_SHUTDOWN_ENABLE;
- val2 = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
- ZIP_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
+ val2 = qm->err_info.dev_err.shutdown_mask;
} else {
val1 &= ~HZIP_AXI_SHUTDOWN_ENABLE;
val2 = 0x0;
@@ -607,7 +677,8 @@ static void hisi_zip_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
static void hisi_zip_hw_error_enable(struct hisi_qm *qm)
{
- u32 nfe, ce;
+ struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err;
+ u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe;
if (qm->ver == QM_HW_V1) {
writel(HZIP_CORE_INT_MASK_ALL,
@@ -616,33 +687,29 @@ static void hisi_zip_hw_error_enable(struct hisi_qm *qm)
return;
}
- nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_NFE_MASK_CAP, qm->cap_ver);
- ce = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CE_MASK_CAP, qm->cap_ver);
-
/* clear ZIP hw error source if having */
- writel(ce | nfe | HZIP_CORE_INT_RAS_FE_ENB_MASK, qm->io_base + HZIP_CORE_INT_SOURCE);
+ writel(err_mask, qm->io_base + HZIP_CORE_INT_SOURCE);
/* configure error type */
- writel(ce, qm->io_base + HZIP_CORE_INT_RAS_CE_ENB);
- writel(HZIP_CORE_INT_RAS_FE_ENB_MASK, qm->io_base + HZIP_CORE_INT_RAS_FE_ENB);
- writel(nfe, qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
+ writel(dev_err->ce, qm->io_base + HZIP_CORE_INT_RAS_CE_ENB);
+ writel(dev_err->fe, qm->io_base + HZIP_CORE_INT_RAS_FE_ENB);
+ writel(dev_err->nfe, qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
hisi_zip_master_ooo_ctrl(qm, true);
/* enable ZIP hw error interrupts */
- writel(0, qm->io_base + HZIP_CORE_INT_MASK_REG);
+ writel(~err_mask, qm->io_base + HZIP_CORE_INT_MASK_REG);
hisi_dae_hw_error_enable(qm);
}
static void hisi_zip_hw_error_disable(struct hisi_qm *qm)
{
- u32 nfe, ce;
+ struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err;
+ u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe;
/* disable ZIP hw error interrupts */
- nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_NFE_MASK_CAP, qm->cap_ver);
- ce = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CE_MASK_CAP, qm->cap_ver);
- writel(ce | nfe | HZIP_CORE_INT_RAS_FE_ENB_MASK, qm->io_base + HZIP_CORE_INT_MASK_REG);
+ writel(err_mask, qm->io_base + HZIP_CORE_INT_MASK_REG);
hisi_zip_master_ooo_ctrl(qm, false);
@@ -1116,12 +1183,20 @@ static void hisi_zip_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
static void hisi_zip_disable_error_report(struct hisi_qm *qm, u32 err_type)
{
- u32 nfe_mask;
+ u32 nfe_mask = qm->err_info.dev_err.nfe;
- nfe_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_NFE_MASK_CAP, qm->cap_ver);
writel(nfe_mask & (~err_type), qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
}
+static void hisi_zip_enable_error_report(struct hisi_qm *qm)
+{
+ u32 nfe_mask = qm->err_info.dev_err.nfe;
+ u32 ce_mask = qm->err_info.dev_err.ce;
+
+ writel(nfe_mask, qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
+ writel(ce_mask, qm->io_base + HZIP_CORE_INT_RAS_CE_ENB);
+}
+
static void hisi_zip_open_axi_master_ooo(struct hisi_qm *qm)
{
u32 val;
@@ -1160,16 +1235,18 @@ static enum acc_err_result hisi_zip_get_err_result(struct hisi_qm *qm)
/* Get device hardware new error status */
err_status = hisi_zip_get_hw_err_status(qm);
if (err_status) {
- if (err_status & qm->err_info.ecc_2bits_mask)
+ if (err_status & qm->err_info.dev_err.ecc_2bits_mask)
qm->err_status.is_dev_ecc_mbit = true;
hisi_zip_log_hw_error(qm, err_status);
- if (err_status & qm->err_info.dev_reset_mask) {
+ if (err_status & qm->err_info.dev_err.reset_mask) {
/* Disable the same error reporting until device is recovered. */
hisi_zip_disable_error_report(qm, err_status);
- return ACC_ERR_NEED_RESET;
+ zip_result = ACC_ERR_NEED_RESET;
} else {
hisi_zip_clear_hw_err_status(qm, err_status);
+ /* Avoid firmware disable error report, re-enable. */
+ hisi_zip_enable_error_report(qm);
}
}
@@ -1185,7 +1262,7 @@ static bool hisi_zip_dev_is_abnormal(struct hisi_qm *qm)
u32 err_status;
err_status = hisi_zip_get_hw_err_status(qm);
- if (err_status & qm->err_info.dev_shutdown_mask)
+ if (err_status & qm->err_info.dev_err.shutdown_mask)
return true;
return hisi_dae_dev_is_abnormal(qm);
@@ -1196,23 +1273,59 @@ static int hisi_zip_set_priv_status(struct hisi_qm *qm)
return hisi_dae_close_axi_master_ooo(qm);
}
+static void hisi_zip_disable_axi_error(struct hisi_qm *qm)
+{
+ struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err;
+ u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe;
+ u32 val;
+
+ val = ~(err_mask & (~HZIP_AXI_ERROR_MASK));
+ writel(val, qm->io_base + HZIP_CORE_INT_MASK_REG);
+
+ if (qm->ver > QM_HW_V2)
+ writel(dev_err->shutdown_mask & (~HZIP_AXI_ERROR_MASK),
+ qm->io_base + HZIP_OOO_SHUTDOWN_SEL);
+}
+
+static void hisi_zip_enable_axi_error(struct hisi_qm *qm)
+{
+ struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err;
+ u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe;
+
+ /* clear axi error source */
+ writel(HZIP_AXI_ERROR_MASK, qm->io_base + HZIP_CORE_INT_SOURCE);
+
+ writel(~err_mask, qm->io_base + HZIP_CORE_INT_MASK_REG);
+
+ if (qm->ver > QM_HW_V2)
+ writel(dev_err->shutdown_mask, qm->io_base + HZIP_OOO_SHUTDOWN_SEL);
+}
+
static void hisi_zip_err_info_init(struct hisi_qm *qm)
{
struct hisi_qm_err_info *err_info = &qm->err_info;
+ struct hisi_qm_err_mask *qm_err = &err_info->qm_err;
+ struct hisi_qm_err_mask *dev_err = &err_info->dev_err;
+
+ qm_err->fe = HZIP_CORE_INT_RAS_FE_ENB_MASK;
+ qm_err->ce = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_QM_CE_MASK_CAP, qm->cap_ver);
+ qm_err->nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
+ ZIP_QM_NFE_MASK_CAP, qm->cap_ver);
+ qm_err->ecc_2bits_mask = QM_ECC_MBIT;
+ qm_err->reset_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
+ ZIP_QM_RESET_MASK_CAP, qm->cap_ver);
+ qm_err->shutdown_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
+ ZIP_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
+
+ dev_err->fe = HZIP_CORE_INT_RAS_FE_ENB_MASK;
+ dev_err->ce = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CE_MASK_CAP, qm->cap_ver);
+ dev_err->nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_NFE_MASK_CAP, qm->cap_ver);
+ dev_err->ecc_2bits_mask = HZIP_CORE_INT_STATUS_M_ECC;
+ dev_err->shutdown_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
+ ZIP_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
+ dev_err->reset_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
+ ZIP_RESET_MASK_CAP, qm->cap_ver);
- err_info->fe = HZIP_CORE_INT_RAS_FE_ENB_MASK;
- err_info->ce = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_QM_CE_MASK_CAP, qm->cap_ver);
- err_info->nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
- ZIP_QM_NFE_MASK_CAP, qm->cap_ver);
- err_info->ecc_2bits_mask = HZIP_CORE_INT_STATUS_M_ECC;
- err_info->qm_shutdown_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
- ZIP_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
- err_info->dev_shutdown_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
- ZIP_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
- err_info->qm_reset_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
- ZIP_QM_RESET_MASK_CAP, qm->cap_ver);
- err_info->dev_reset_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
- ZIP_RESET_MASK_CAP, qm->cap_ver);
err_info->msi_wr_port = HZIP_WR_PORT;
err_info->acpi_rst = "ZRST";
}
@@ -1232,6 +1345,8 @@ static const struct hisi_qm_err_ini hisi_zip_err_ini = {
.get_err_result = hisi_zip_get_err_result,
.set_priv_status = hisi_zip_set_priv_status,
.dev_is_abnormal = hisi_zip_dev_is_abnormal,
+ .disable_axi_error = hisi_zip_disable_axi_error,
+ .enable_axi_error = hisi_zip_enable_axi_error,
};
static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
@@ -1251,11 +1366,6 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
if (ret)
return ret;
- ret = hisi_zip_set_high_perf(qm);
- if (ret)
- return ret;
-
- hisi_zip_open_sva_prefetch(qm);
hisi_qm_dev_err_init(qm);
hisi_zip_debug_regs_clear(qm);
@@ -1273,7 +1383,7 @@ static int zip_pre_store_cap_reg(struct hisi_qm *qm)
size_t i, size;
size = ARRAY_SIZE(zip_cap_query_info);
- zip_cap = devm_kzalloc(&pdev->dev, sizeof(*zip_cap) * size, GFP_KERNEL);
+ zip_cap = devm_kcalloc(&pdev->dev, size, sizeof(*zip_cap), GFP_KERNEL);
if (!zip_cap)
return -ENOMEM;
diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c
index e050f5ff5efb..f22c12e36b56 100644
--- a/drivers/crypto/img-hash.c
+++ b/drivers/crypto/img-hash.c
@@ -436,7 +436,7 @@ static int img_hash_write_via_dma_stop(struct img_hash_dev *hdev)
struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
if (ctx->flags & DRIVER_FLAGS_SG)
- dma_unmap_sg(hdev->dev, ctx->sg, ctx->dma_ct, DMA_TO_DEVICE);
+ dma_unmap_sg(hdev->dev, ctx->sg, 1, DMA_TO_DEVICE);
return 0;
}
@@ -700,22 +700,22 @@ static int img_hash_cra_init(struct crypto_tfm *tfm, const char *alg_name)
static int img_hash_cra_md5_init(struct crypto_tfm *tfm)
{
- return img_hash_cra_init(tfm, "md5-generic");
+ return img_hash_cra_init(tfm, "md5-lib");
}
static int img_hash_cra_sha1_init(struct crypto_tfm *tfm)
{
- return img_hash_cra_init(tfm, "sha1-generic");
+ return img_hash_cra_init(tfm, "sha1-lib");
}
static int img_hash_cra_sha224_init(struct crypto_tfm *tfm)
{
- return img_hash_cra_init(tfm, "sha224-generic");
+ return img_hash_cra_init(tfm, "sha224-lib");
}
static int img_hash_cra_sha256_init(struct crypto_tfm *tfm)
{
- return img_hash_cra_init(tfm, "sha256-generic");
+ return img_hash_cra_init(tfm, "sha256-lib");
}
static void img_hash_cra_exit(struct crypto_tfm *tfm)
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index 9ca80d082c4f..c3b2b22934b7 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -1218,7 +1218,6 @@ static struct safexcel_alg_template *safexcel_algs[] = {
&safexcel_alg_xts_aes,
&safexcel_alg_gcm,
&safexcel_alg_ccm,
- &safexcel_alg_crc32,
&safexcel_alg_cbcmac,
&safexcel_alg_xcbcmac,
&safexcel_alg_cmac,
diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h
index 0c79ad78d1c0..0f27367a85fa 100644
--- a/drivers/crypto/inside-secure/safexcel.h
+++ b/drivers/crypto/inside-secure/safexcel.h
@@ -959,7 +959,6 @@ extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_ctr_aes;
extern struct safexcel_alg_template safexcel_alg_xts_aes;
extern struct safexcel_alg_template safexcel_alg_gcm;
extern struct safexcel_alg_template safexcel_alg_ccm;
-extern struct safexcel_alg_template safexcel_alg_crc32;
extern struct safexcel_alg_template safexcel_alg_cbcmac;
extern struct safexcel_alg_template safexcel_alg_xcbcmac;
extern struct safexcel_alg_template safexcel_alg_cmac;
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index d2b632193beb..ef0ba4832928 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -249,7 +249,9 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv,
safexcel_complete(priv, ring);
if (sreq->nents) {
- dma_unmap_sg(priv->dev, areq->src, sreq->nents, DMA_TO_DEVICE);
+ dma_unmap_sg(priv->dev, areq->src,
+ sg_nents_for_len(areq->src, areq->nbytes),
+ DMA_TO_DEVICE);
sreq->nents = 0;
}
@@ -289,14 +291,8 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv,
return 1;
}
- if (unlikely(sreq->digest == CONTEXT_CONTROL_DIGEST_XCM &&
- ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_CRC32)) {
- /* Undo final XOR with 0xffffffff ...*/
- *(__le32 *)areq->result = ~sreq->state[0];
- } else {
- memcpy(areq->result, sreq->state,
- crypto_ahash_digestsize(ahash));
- }
+ memcpy(areq->result, sreq->state,
+ crypto_ahash_digestsize(ahash));
}
cache_len = safexcel_queued_len(sreq);
@@ -497,7 +493,9 @@ unmap_result:
DMA_FROM_DEVICE);
unmap_sg:
if (req->nents) {
- dma_unmap_sg(priv->dev, areq->src, req->nents, DMA_TO_DEVICE);
+ dma_unmap_sg(priv->dev, areq->src,
+ sg_nents_for_len(areq->src, areq->nbytes),
+ DMA_TO_DEVICE);
req->nents = 0;
}
cdesc_rollback:
@@ -1881,88 +1879,6 @@ struct safexcel_alg_template safexcel_alg_hmac_md5 = {
},
};
-static int safexcel_crc32_cra_init(struct crypto_tfm *tfm)
-{
- struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
- int ret = safexcel_ahash_cra_init(tfm);
-
- /* Default 'key' is all zeroes */
- memset(&ctx->base.ipad, 0, sizeof(u32));
- return ret;
-}
-
-static int safexcel_crc32_init(struct ahash_request *areq)
-{
- struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
- struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq);
-
- memset(req, 0, sizeof(*req));
-
- /* Start from loaded key */
- req->state[0] = cpu_to_le32(~ctx->base.ipad.word[0]);
- /* Set processed to non-zero to enable invalidation detection */
- req->len = sizeof(u32);
- req->processed = sizeof(u32);
-
- ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_CRC32;
- req->digest = CONTEXT_CONTROL_DIGEST_XCM;
- req->state_sz = sizeof(u32);
- req->digest_sz = sizeof(u32);
- req->block_sz = sizeof(u32);
-
- return 0;
-}
-
-static int safexcel_crc32_setkey(struct crypto_ahash *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
-
- if (keylen != sizeof(u32))
- return -EINVAL;
-
- memcpy(&ctx->base.ipad, key, sizeof(u32));
- return 0;
-}
-
-static int safexcel_crc32_digest(struct ahash_request *areq)
-{
- return safexcel_crc32_init(areq) ?: safexcel_ahash_finup(areq);
-}
-
-struct safexcel_alg_template safexcel_alg_crc32 = {
- .type = SAFEXCEL_ALG_TYPE_AHASH,
- .algo_mask = 0,
- .alg.ahash = {
- .init = safexcel_crc32_init,
- .update = safexcel_ahash_update,
- .final = safexcel_ahash_final,
- .finup = safexcel_ahash_finup,
- .digest = safexcel_crc32_digest,
- .setkey = safexcel_crc32_setkey,
- .export = safexcel_ahash_export,
- .import = safexcel_ahash_import,
- .halg = {
- .digestsize = sizeof(u32),
- .statesize = sizeof(struct safexcel_ahash_export_state),
- .base = {
- .cra_name = "crc32",
- .cra_driver_name = "safexcel-crc32",
- .cra_priority = SAFEXCEL_CRA_PRIORITY,
- .cra_flags = CRYPTO_ALG_OPTIONAL_KEY |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_ALLOCATES_MEMORY |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
- .cra_init = safexcel_crc32_cra_init,
- .cra_exit = safexcel_ahash_cra_exit,
- .cra_module = THIS_MODULE,
- },
- },
- },
-};
-
static int safexcel_cbcmac_init(struct ahash_request *areq)
{
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c
index 23f585219fb4..d0058757b000 100644
--- a/drivers/crypto/intel/iaa/iaa_crypto_main.c
+++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c
@@ -805,7 +805,7 @@ static int save_iaa_wq(struct idxd_wq *wq)
if (!cpus_per_iaa)
cpus_per_iaa = 1;
out:
- return 0;
+ return ret;
}
static void remove_iaa_wq(struct idxd_wq *wq)
diff --git a/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c b/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c
index 95dc8979918d..48281d882260 100644
--- a/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c
+++ b/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c
@@ -68,6 +68,7 @@ struct ocs_hcu_ctx {
* @sg_data_total: Total data in the SG list at any time.
* @sg_data_offset: Offset into the data of the current individual SG node.
* @sg_dma_nents: Number of sg entries mapped in dma_list.
+ * @nents: Number of entries in the scatterlist.
*/
struct ocs_hcu_rctx {
struct ocs_hcu_dev *hcu_dev;
@@ -91,6 +92,7 @@ struct ocs_hcu_rctx {
unsigned int sg_data_total;
unsigned int sg_data_offset;
unsigned int sg_dma_nents;
+ unsigned int nents;
};
/**
@@ -199,7 +201,7 @@ static void kmb_ocs_hcu_dma_cleanup(struct ahash_request *req,
/* Unmap req->src (if mapped). */
if (rctx->sg_dma_nents) {
- dma_unmap_sg(dev, req->src, rctx->sg_dma_nents, DMA_TO_DEVICE);
+ dma_unmap_sg(dev, req->src, rctx->nents, DMA_TO_DEVICE);
rctx->sg_dma_nents = 0;
}
@@ -230,7 +232,7 @@ static int kmb_ocs_dma_prepare(struct ahash_request *req)
struct device *dev = rctx->hcu_dev->dev;
unsigned int remainder = 0;
unsigned int total;
- size_t nents;
+ int nents;
size_t count;
int rc;
int i;
@@ -251,6 +253,9 @@ static int kmb_ocs_dma_prepare(struct ahash_request *req)
/* Determine the number of scatter gather list entries to process. */
nents = sg_nents_for_len(req->src, rctx->sg_data_total - remainder);
+ if (nents < 0)
+ return nents;
+
/* If there are entries to process, map them. */
if (nents) {
rctx->sg_dma_nents = dma_map_sg(dev, req->src, nents,
@@ -260,6 +265,10 @@ static int kmb_ocs_dma_prepare(struct ahash_request *req)
rc = -ENOMEM;
goto cleanup;
}
+
+ /* Save the value of nents to pass to dma_unmap_sg. */
+ rctx->nents = nents;
+
/*
* The value returned by dma_map_sg() can be < nents; so update
* nents accordingly.
diff --git a/drivers/crypto/intel/keembay/ocs-aes.c b/drivers/crypto/intel/keembay/ocs-aes.c
index be9f32fc8f42..bb6f33f6b4d3 100644
--- a/drivers/crypto/intel/keembay/ocs-aes.c
+++ b/drivers/crypto/intel/keembay/ocs-aes.c
@@ -7,6 +7,7 @@
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
+#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/swab.h>
@@ -1473,8 +1474,7 @@ int ocs_create_linked_list_from_sg(const struct ocs_aes_dev *aes_dev,
ll = dll_desc->vaddr;
for (i = 0; i < dma_nents; i++, sg = sg_next(sg)) {
ll[i].src_addr = sg_dma_address(sg) + data_offset;
- ll[i].src_len = (sg_dma_len(sg) - data_offset) < data_size ?
- (sg_dma_len(sg) - data_offset) : data_size;
+ ll[i].src_len = min(sg_dma_len(sg) - data_offset, data_size);
data_offset = 0;
data_size -= ll[i].src_len;
/* Current element points to the DMA address of the next one. */
diff --git a/drivers/crypto/intel/qat/Kconfig b/drivers/crypto/intel/qat/Kconfig
index 359c61f0c8a1..4b4861460dd4 100644
--- a/drivers/crypto/intel/qat/Kconfig
+++ b/drivers/crypto/intel/qat/Kconfig
@@ -6,12 +6,11 @@ config CRYPTO_DEV_QAT
select CRYPTO_SKCIPHER
select CRYPTO_AKCIPHER
select CRYPTO_DH
- select CRYPTO_HMAC
select CRYPTO_RSA
- select CRYPTO_SHA1
- select CRYPTO_SHA256
- select CRYPTO_SHA512
select CRYPTO_LIB_AES
+ select CRYPTO_LIB_SHA1
+ select CRYPTO_LIB_SHA256
+ select CRYPTO_LIB_SHA512
select FW_LOADER
select CRC8
diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
index 7c3c0f561c95..53fa91d577ed 100644
--- a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
@@ -191,7 +191,6 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
ICP_ACCEL_CAPABILITIES_SM4 |
ICP_ACCEL_CAPABILITIES_AES_V2 |
ICP_ACCEL_CAPABILITIES_ZUC |
- ICP_ACCEL_CAPABILITIES_ZUC_256 |
ICP_ACCEL_CAPABILITIES_WIRELESS_CRYPTO_EXT |
ICP_ACCEL_CAPABILITIES_EXT_ALGCHAIN;
@@ -223,17 +222,11 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
if (fusectl1 & ICP_ACCEL_GEN4_MASK_WCP_WAT_SLICE) {
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC;
- capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC_256;
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_WIRELESS_CRYPTO_EXT;
}
- if (fusectl1 & ICP_ACCEL_GEN4_MASK_EIA3_SLICE) {
+ if (fusectl1 & ICP_ACCEL_GEN4_MASK_EIA3_SLICE)
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC;
- capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC_256;
- }
-
- if (fusectl1 & ICP_ACCEL_GEN4_MASK_ZUC_256_SLICE)
- capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC_256;
capabilities_asym = ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
ICP_ACCEL_CAPABILITIES_SM2 |
@@ -303,11 +296,13 @@ static void adf_init_rl_data(struct adf_rl_hw_data *rl_data)
rl_data->pcie_scale_div = ADF_420XX_RL_PCIE_SCALE_FACTOR_DIV;
rl_data->pcie_scale_mul = ADF_420XX_RL_PCIE_SCALE_FACTOR_MUL;
rl_data->dcpr_correction = ADF_420XX_RL_DCPR_CORRECTION;
- rl_data->max_tp[ADF_SVC_ASYM] = ADF_420XX_RL_MAX_TP_ASYM;
- rl_data->max_tp[ADF_SVC_SYM] = ADF_420XX_RL_MAX_TP_SYM;
- rl_data->max_tp[ADF_SVC_DC] = ADF_420XX_RL_MAX_TP_DC;
+ rl_data->max_tp[SVC_ASYM] = ADF_420XX_RL_MAX_TP_ASYM;
+ rl_data->max_tp[SVC_SYM] = ADF_420XX_RL_MAX_TP_SYM;
+ rl_data->max_tp[SVC_DC] = ADF_420XX_RL_MAX_TP_DC;
rl_data->scan_interval = ADF_420XX_RL_SCANS_PER_SEC;
rl_data->scale_ref = ADF_420XX_RL_SLICE_REF;
+
+ adf_gen4_init_num_svc_aes(rl_data);
}
static int get_rp_group(struct adf_accel_dev *accel_dev, u32 ae_mask)
@@ -473,6 +468,7 @@ void adf_init_hw_data_420xx(struct adf_hw_device_data *hw_data, u32 dev_id)
hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE;
hw_data->clock_frequency = ADF_420XX_AE_FREQ;
hw_data->services_supported = adf_gen4_services_supported;
+ hw_data->get_svc_slice_cnt = adf_gen4_get_svc_slice_cnt;
adf_gen4_set_err_mask(&hw_data->dev_err_mask);
adf_gen4_init_hw_csr_ops(&hw_data->csr_ops);
diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
index bd0b1b1015c0..740f68a36ac5 100644
--- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
@@ -3,6 +3,7 @@
#include <linux/iopoll.h>
#include <adf_accel_devices.h>
#include <adf_admin.h>
+#include <adf_bank_state.h>
#include <adf_cfg.h>
#include <adf_cfg_services.h>
#include <adf_clock.h>
@@ -221,11 +222,13 @@ static void adf_init_rl_data(struct adf_rl_hw_data *rl_data)
rl_data->pcie_scale_div = ADF_4XXX_RL_PCIE_SCALE_FACTOR_DIV;
rl_data->pcie_scale_mul = ADF_4XXX_RL_PCIE_SCALE_FACTOR_MUL;
rl_data->dcpr_correction = ADF_4XXX_RL_DCPR_CORRECTION;
- rl_data->max_tp[ADF_SVC_ASYM] = ADF_4XXX_RL_MAX_TP_ASYM;
- rl_data->max_tp[ADF_SVC_SYM] = ADF_4XXX_RL_MAX_TP_SYM;
- rl_data->max_tp[ADF_SVC_DC] = ADF_4XXX_RL_MAX_TP_DC;
+ rl_data->max_tp[SVC_ASYM] = ADF_4XXX_RL_MAX_TP_ASYM;
+ rl_data->max_tp[SVC_SYM] = ADF_4XXX_RL_MAX_TP_SYM;
+ rl_data->max_tp[SVC_DC] = ADF_4XXX_RL_MAX_TP_DC;
rl_data->scan_interval = ADF_4XXX_RL_SCANS_PER_SEC;
rl_data->scale_ref = ADF_4XXX_RL_SLICE_REF;
+
+ adf_gen4_init_num_svc_aes(rl_data);
}
static u32 uof_get_num_objs(struct adf_accel_dev *accel_dev)
@@ -448,8 +451,8 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id)
hw_data->get_ring_to_svc_map = adf_gen4_get_ring_to_svc_map;
hw_data->disable_iov = adf_disable_sriov;
hw_data->ring_pair_reset = adf_gen4_ring_pair_reset;
- hw_data->bank_state_save = adf_gen4_bank_state_save;
- hw_data->bank_state_restore = adf_gen4_bank_state_restore;
+ hw_data->bank_state_save = adf_bank_state_save;
+ hw_data->bank_state_restore = adf_bank_state_restore;
hw_data->enable_pm = adf_gen4_enable_pm;
hw_data->handle_pm_interrupt = adf_gen4_handle_pm_interrupt;
hw_data->dev_config = adf_gen4_dev_config;
@@ -459,6 +462,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id)
hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE;
hw_data->clock_frequency = ADF_4XXX_AE_FREQ;
hw_data->services_supported = adf_gen4_services_supported;
+ hw_data->get_svc_slice_cnt = adf_gen4_get_svc_slice_cnt;
adf_gen4_set_err_mask(&hw_data->dev_err_mask);
adf_gen4_init_hw_csr_ops(&hw_data->csr_ops);
diff --git a/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.c b/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.c
index 359a6447ccb8..bed88d3ce8ca 100644
--- a/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.c
@@ -10,6 +10,7 @@
#include <adf_accel_devices.h>
#include <adf_admin.h>
+#include <adf_bank_state.h>
#include <adf_cfg.h>
#include <adf_cfg_services.h>
#include <adf_clock.h>
@@ -18,6 +19,7 @@
#include <adf_gen6_pm.h>
#include <adf_gen6_ras.h>
#include <adf_gen6_shared.h>
+#include <adf_gen6_tl.h>
#include <adf_timer.h>
#include "adf_6xxx_hw_data.h"
#include "icp_qat_fw_comp.h"
@@ -76,6 +78,10 @@ static const unsigned long thrd_mask_dcc[ADF_6XXX_MAX_ACCELENGINES] = {
0x00, 0x00, 0x00, 0x00, 0x07, 0x07, 0x03, 0x03, 0x00
};
+static const unsigned long thrd_mask_dcpr[ADF_6XXX_MAX_ACCELENGINES] = {
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x00
+};
+
static const char *const adf_6xxx_fw_objs[] = {
[ADF_FW_CY_OBJ] = ADF_6XXX_CY_OBJ,
[ADF_FW_DC_OBJ] = ADF_6XXX_DC_OBJ,
@@ -97,7 +103,7 @@ static bool services_supported(unsigned long mask)
{
int num_svc;
- if (mask >= BIT(SVC_BASE_COUNT))
+ if (mask >= BIT(SVC_COUNT))
return false;
num_svc = hweight_long(mask);
@@ -126,10 +132,13 @@ static int get_service(unsigned long *mask)
if (test_and_clear_bit(SVC_DCC, mask))
return SVC_DCC;
+ if (test_and_clear_bit(SVC_DECOMP, mask))
+ return SVC_DECOMP;
+
return -EINVAL;
}
-static enum adf_cfg_service_type get_ring_type(enum adf_services service)
+static enum adf_cfg_service_type get_ring_type(unsigned int service)
{
switch (service) {
case SVC_SYM:
@@ -139,12 +148,14 @@ static enum adf_cfg_service_type get_ring_type(enum adf_services service)
case SVC_DC:
case SVC_DCC:
return COMP;
+ case SVC_DECOMP:
+ return DECOMP;
default:
return UNUSED;
}
}
-static const unsigned long *get_thrd_mask(enum adf_services service)
+static const unsigned long *get_thrd_mask(unsigned int service)
{
switch (service) {
case SVC_SYM:
@@ -155,6 +166,8 @@ static const unsigned long *get_thrd_mask(enum adf_services service)
return thrd_mask_cpr;
case SVC_DCC:
return thrd_mask_dcc;
+ case SVC_DECOMP:
+ return thrd_mask_dcpr;
default:
return NULL;
}
@@ -511,6 +524,55 @@ static int adf_gen6_init_thd2arb_map(struct adf_accel_dev *accel_dev)
return 0;
}
+static void init_num_svc_aes(struct adf_rl_hw_data *device_data)
+{
+ enum adf_fw_objs obj_type, obj_iter;
+ unsigned int svc, i, num_grp;
+ u32 ae_mask;
+
+ for (svc = 0; svc < SVC_BASE_COUNT; svc++) {
+ switch (svc) {
+ case SVC_SYM:
+ case SVC_ASYM:
+ obj_type = ADF_FW_CY_OBJ;
+ break;
+ case SVC_DC:
+ case SVC_DECOMP:
+ obj_type = ADF_FW_DC_OBJ;
+ break;
+ }
+
+ num_grp = ARRAY_SIZE(adf_default_fw_config);
+ for (i = 0; i < num_grp; i++) {
+ obj_iter = adf_default_fw_config[i].obj;
+ if (obj_iter == obj_type) {
+ ae_mask = adf_default_fw_config[i].ae_mask;
+ device_data->svc_ae_mask[svc] = hweight32(ae_mask);
+ break;
+ }
+ }
+ }
+}
+
+static u32 adf_gen6_get_svc_slice_cnt(struct adf_accel_dev *accel_dev,
+ enum adf_base_services svc)
+{
+ struct adf_rl_hw_data *device_data = &accel_dev->hw_device->rl_data;
+
+ switch (svc) {
+ case SVC_SYM:
+ return device_data->slices.cph_cnt;
+ case SVC_ASYM:
+ return device_data->slices.pke_cnt;
+ case SVC_DC:
+ return device_data->slices.cpr_cnt + device_data->slices.dcpr_cnt;
+ case SVC_DECOMP:
+ return device_data->slices.dcpr_cnt;
+ default:
+ return 0;
+ }
+}
+
static void set_vc_csr_for_bank(void __iomem *csr, u32 bank_number)
{
u32 value;
@@ -520,8 +582,8 @@ static void set_vc_csr_for_bank(void __iomem *csr, u32 bank_number)
* driver must program the ringmodectl CSRs.
*/
value = ADF_CSR_RD(csr, ADF_GEN6_CSR_RINGMODECTL(bank_number));
- value |= FIELD_PREP(ADF_GEN6_RINGMODECTL_TC_MASK, ADF_GEN6_RINGMODECTL_TC_DEFAULT);
- value |= FIELD_PREP(ADF_GEN6_RINGMODECTL_TC_EN_MASK, ADF_GEN6_RINGMODECTL_TC_EN_OP1);
+ FIELD_MODIFY(ADF_GEN6_RINGMODECTL_TC_MASK, &value, ADF_GEN6_RINGMODECTL_TC_DEFAULT);
+ FIELD_MODIFY(ADF_GEN6_RINGMODECTL_TC_EN_MASK, &value, ADF_GEN6_RINGMODECTL_TC_EN_OP1);
ADF_CSR_WR(csr, ADF_GEN6_CSR_RINGMODECTL(bank_number), value);
}
@@ -537,7 +599,7 @@ static int set_vc_config(struct adf_accel_dev *accel_dev)
* Read PVC0CTL then write the masked values.
*/
pci_read_config_dword(pdev, ADF_GEN6_PVC0CTL_OFFSET, &value);
- value |= FIELD_PREP(ADF_GEN6_PVC0CTL_TCVCMAP_MASK, ADF_GEN6_PVC0CTL_TCVCMAP_DEFAULT);
+ FIELD_MODIFY(ADF_GEN6_PVC0CTL_TCVCMAP_MASK, &value, ADF_GEN6_PVC0CTL_TCVCMAP_DEFAULT);
err = pci_write_config_dword(pdev, ADF_GEN6_PVC0CTL_OFFSET, value);
if (err) {
dev_err(&GET_DEV(accel_dev), "pci write to PVC0CTL failed\n");
@@ -546,8 +608,8 @@ static int set_vc_config(struct adf_accel_dev *accel_dev)
/* Read PVC1CTL then write masked values */
pci_read_config_dword(pdev, ADF_GEN6_PVC1CTL_OFFSET, &value);
- value |= FIELD_PREP(ADF_GEN6_PVC1CTL_TCVCMAP_MASK, ADF_GEN6_PVC1CTL_TCVCMAP_DEFAULT);
- value |= FIELD_PREP(ADF_GEN6_PVC1CTL_VCEN_MASK, ADF_GEN6_PVC1CTL_VCEN_ON);
+ FIELD_MODIFY(ADF_GEN6_PVC1CTL_TCVCMAP_MASK, &value, ADF_GEN6_PVC1CTL_TCVCMAP_DEFAULT);
+ FIELD_MODIFY(ADF_GEN6_PVC1CTL_VCEN_MASK, &value, ADF_GEN6_PVC1CTL_VCEN_ON);
err = pci_write_config_dword(pdev, ADF_GEN6_PVC1CTL_OFFSET, value);
if (err)
dev_err(&GET_DEV(accel_dev), "pci write to PVC1CTL failed\n");
@@ -618,7 +680,6 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CHACHA_POLY;
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AESGCM_SPC;
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AES_V2;
- capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
}
if (fusectl1 & ICP_ACCEL_GEN6_MASK_AUTH_SLICE) {
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
@@ -627,7 +688,15 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
}
- capabilities_asym = 0;
+ capabilities_asym = ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
+ ICP_ACCEL_CAPABILITIES_SM2 |
+ ICP_ACCEL_CAPABILITIES_ECEDMONT;
+
+ if (fusectl1 & ICP_ACCEL_GEN6_MASK_PKE_SLICE) {
+ capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
+ capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_SM2;
+ capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_ECEDMONT;
+ }
capabilities_dc = ICP_ACCEL_CAPABILITIES_COMPRESSION |
ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION |
@@ -648,7 +717,7 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
caps |= capabilities_asym;
if (test_bit(SVC_SYM, &mask))
caps |= capabilities_sym;
- if (test_bit(SVC_DC, &mask))
+ if (test_bit(SVC_DC, &mask) || test_bit(SVC_DECOMP, &mask))
caps |= capabilities_dc;
if (test_bit(SVC_DCC, &mask)) {
/*
@@ -744,7 +813,16 @@ static int adf_init_device(struct adf_accel_dev *accel_dev)
static int enable_pm(struct adf_accel_dev *accel_dev)
{
- return adf_init_admin_pm(accel_dev, ADF_GEN6_PM_DEFAULT_IDLE_FILTER);
+ int ret;
+
+ ret = adf_init_admin_pm(accel_dev, ADF_GEN6_PM_DEFAULT_IDLE_FILTER);
+ if (ret)
+ return ret;
+
+ /* Initialize PM internal data */
+ adf_gen6_init_dev_pm_data(accel_dev);
+
+ return 0;
}
static int dev_config(struct adf_accel_dev *accel_dev)
@@ -776,6 +854,25 @@ static int dev_config(struct adf_accel_dev *accel_dev)
return ret;
}
+static void adf_gen6_init_rl_data(struct adf_rl_hw_data *rl_data)
+{
+ rl_data->pciout_tb_offset = ADF_GEN6_RL_TOKEN_PCIEOUT_BUCKET_OFFSET;
+ rl_data->pciin_tb_offset = ADF_GEN6_RL_TOKEN_PCIEIN_BUCKET_OFFSET;
+ rl_data->r2l_offset = ADF_GEN6_RL_R2L_OFFSET;
+ rl_data->l2c_offset = ADF_GEN6_RL_L2C_OFFSET;
+ rl_data->c2s_offset = ADF_GEN6_RL_C2S_OFFSET;
+ rl_data->pcie_scale_div = ADF_6XXX_RL_PCIE_SCALE_FACTOR_DIV;
+ rl_data->pcie_scale_mul = ADF_6XXX_RL_PCIE_SCALE_FACTOR_MUL;
+ rl_data->max_tp[SVC_ASYM] = ADF_6XXX_RL_MAX_TP_ASYM;
+ rl_data->max_tp[SVC_SYM] = ADF_6XXX_RL_MAX_TP_SYM;
+ rl_data->max_tp[SVC_DC] = ADF_6XXX_RL_MAX_TP_DC;
+ rl_data->max_tp[SVC_DECOMP] = ADF_6XXX_RL_MAX_TP_DECOMP;
+ rl_data->scan_interval = ADF_6XXX_RL_SCANS_PER_SEC;
+ rl_data->scale_ref = ADF_6XXX_RL_SLICE_REF;
+
+ init_num_svc_aes(rl_data);
+}
+
void adf_init_hw_data_6xxx(struct adf_hw_device_data *hw_data)
{
hw_data->dev_class = &adf_6xxx_class;
@@ -824,6 +921,8 @@ void adf_init_hw_data_6xxx(struct adf_hw_device_data *hw_data)
hw_data->disable_iov = adf_disable_sriov;
hw_data->ring_pair_reset = ring_pair_reset;
hw_data->dev_config = dev_config;
+ hw_data->bank_state_save = adf_bank_state_save;
+ hw_data->bank_state_restore = adf_bank_state_restore;
hw_data->get_hb_clock = get_heartbeat_clock;
hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE;
hw_data->start_timer = adf_timer_start;
@@ -831,11 +930,17 @@ void adf_init_hw_data_6xxx(struct adf_hw_device_data *hw_data)
hw_data->init_device = adf_init_device;
hw_data->enable_pm = enable_pm;
hw_data->services_supported = services_supported;
+ hw_data->num_rps = ADF_GEN6_ETR_MAX_BANKS;
+ hw_data->clock_frequency = ADF_6XXX_AE_FREQ;
+ hw_data->get_svc_slice_cnt = adf_gen6_get_svc_slice_cnt;
adf_gen6_init_hw_csr_ops(&hw_data->csr_ops);
adf_gen6_init_pf_pfvf_ops(&hw_data->pfvf_ops);
adf_gen6_init_dc_ops(&hw_data->dc_ops);
+ adf_gen6_init_vf_mig_ops(&hw_data->vfmig_ops);
adf_gen6_init_ras_ops(&hw_data->ras_ops);
+ adf_gen6_init_tl_data(&hw_data->tl_data);
+ adf_gen6_init_rl_data(&hw_data->rl_data);
}
void adf_clean_hw_data_6xxx(struct adf_hw_device_data *hw_data)
diff --git a/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.h b/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.h
index 78e2e2c5816e..d822911fe68c 100644
--- a/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.h
+++ b/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.h
@@ -99,7 +99,7 @@
#define ADF_GEN6_PVC0CTL_OFFSET 0x204
#define ADF_GEN6_PVC0CTL_TCVCMAP_OFFSET 1
#define ADF_GEN6_PVC0CTL_TCVCMAP_MASK GENMASK(7, 1)
-#define ADF_GEN6_PVC0CTL_TCVCMAP_DEFAULT 0x7F
+#define ADF_GEN6_PVC0CTL_TCVCMAP_DEFAULT 0x3F
/* VC1 Resource Control Register */
#define ADF_GEN6_PVC1CTL_OFFSET 0x210
@@ -122,6 +122,13 @@
/* Number of heartbeat counter pairs */
#define ADF_NUM_HB_CNT_PER_AE ADF_NUM_THREADS_PER_AE
+/* Rate Limiting */
+#define ADF_GEN6_RL_R2L_OFFSET 0x508000
+#define ADF_GEN6_RL_L2C_OFFSET 0x509000
+#define ADF_GEN6_RL_C2S_OFFSET 0x508818
+#define ADF_GEN6_RL_TOKEN_PCIEIN_BUCKET_OFFSET 0x508800
+#define ADF_GEN6_RL_TOKEN_PCIEOUT_BUCKET_OFFSET 0x508804
+
/* Physical function fuses */
#define ADF_6XXX_ACCELENGINES_MASK GENMASK(8, 0)
#define ADF_6XXX_ADMIN_AE_MASK GENMASK(8, 8)
@@ -133,6 +140,19 @@
#define ADF_6XXX_DC_OBJ "qat_6xxx_dc.bin"
#define ADF_6XXX_ADMIN_OBJ "qat_6xxx_admin.bin"
+/* RL constants */
+#define ADF_6XXX_RL_PCIE_SCALE_FACTOR_DIV 100
+#define ADF_6XXX_RL_PCIE_SCALE_FACTOR_MUL 102
+#define ADF_6XXX_RL_SCANS_PER_SEC 954
+#define ADF_6XXX_RL_MAX_TP_ASYM 173750UL
+#define ADF_6XXX_RL_MAX_TP_SYM 95000UL
+#define ADF_6XXX_RL_MAX_TP_DC 40000UL
+#define ADF_6XXX_RL_MAX_TP_DECOMP 40000UL
+#define ADF_6XXX_RL_SLICE_REF 1000UL
+
+/* Clock frequency */
+#define ADF_6XXX_AE_FREQ (1000 * HZ_PER_MHZ)
+
enum icp_qat_gen6_slice_mask {
ICP_ACCEL_GEN6_MASK_UCS_SLICE = BIT(0),
ICP_ACCEL_GEN6_MASK_AUTH_SLICE = BIT(1),
diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile
index 66bb295ace28..89845754841b 100644
--- a/drivers/crypto/intel/qat/qat_common/Makefile
+++ b/drivers/crypto/intel/qat/qat_common/Makefile
@@ -4,6 +4,7 @@ ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE='"CRYPTO_QAT"'
intel_qat-y := adf_accel_engine.o \
adf_admin.o \
adf_aer.o \
+ adf_bank_state.o \
adf_cfg.o \
adf_cfg_services.o \
adf_clock.o \
@@ -48,9 +49,12 @@ intel_qat-$(CONFIG_DEBUG_FS) += adf_cnv_dbgfs.o \
adf_fw_counters.o \
adf_gen4_pm_debugfs.o \
adf_gen4_tl.o \
+ adf_gen6_pm_dbgfs.o \
+ adf_gen6_tl.o \
adf_heartbeat_dbgfs.o \
adf_heartbeat.o \
adf_pm_dbgfs.o \
+ adf_pm_dbgfs_utils.o \
adf_telemetry.o \
adf_tl_debugfs.o \
adf_transport_debug.o
diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
index 2ee526063213..9fe3239f0114 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
@@ -157,39 +157,7 @@ struct admin_info {
u32 mailbox_offset;
};
-struct ring_config {
- u64 base;
- u32 config;
- u32 head;
- u32 tail;
- u32 reserved0;
-};
-
-struct bank_state {
- u32 ringstat0;
- u32 ringstat1;
- u32 ringuostat;
- u32 ringestat;
- u32 ringnestat;
- u32 ringnfstat;
- u32 ringfstat;
- u32 ringcstat0;
- u32 ringcstat1;
- u32 ringcstat2;
- u32 ringcstat3;
- u32 iaintflagen;
- u32 iaintflagreg;
- u32 iaintflagsrcsel0;
- u32 iaintflagsrcsel1;
- u32 iaintcolen;
- u32 iaintcolctl;
- u32 iaintflagandcolen;
- u32 ringexpstat;
- u32 ringexpintenable;
- u32 ringsrvarben;
- u32 reserved0;
- struct ring_config rings[ADF_ETR_MAX_RINGS_PER_BANK];
-};
+struct adf_bank_state;
struct adf_hw_csr_ops {
u64 (*build_csr_ring_base_addr)(dma_addr_t addr, u32 size);
@@ -338,9 +306,9 @@ struct adf_hw_device_data {
void (*set_ssm_wdtimer)(struct adf_accel_dev *accel_dev);
int (*ring_pair_reset)(struct adf_accel_dev *accel_dev, u32 bank_nr);
int (*bank_state_save)(struct adf_accel_dev *accel_dev, u32 bank_number,
- struct bank_state *state);
+ struct adf_bank_state *state);
int (*bank_state_restore)(struct adf_accel_dev *accel_dev,
- u32 bank_number, struct bank_state *state);
+ u32 bank_number, struct adf_bank_state *state);
void (*reset_device)(struct adf_accel_dev *accel_dev);
void (*set_msix_rttable)(struct adf_accel_dev *accel_dev);
const char *(*uof_get_name)(struct adf_accel_dev *accel_dev, u32 obj_num);
@@ -351,6 +319,8 @@ struct adf_hw_device_data {
u32 (*get_ena_thd_mask)(struct adf_accel_dev *accel_dev, u32 obj_num);
int (*dev_config)(struct adf_accel_dev *accel_dev);
bool (*services_supported)(unsigned long mask);
+ u32 (*get_svc_slice_cnt)(struct adf_accel_dev *accel_dev,
+ enum adf_base_services svc);
struct adf_pfvf_ops pfvf_ops;
struct adf_hw_csr_ops csr_ops;
struct adf_dc_ops dc_ops;
diff --git a/drivers/crypto/intel/qat/qat_common/adf_aer.c b/drivers/crypto/intel/qat/qat_common/adf_aer.c
index 4cb8bd83f570..11728cf32653 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_aer.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_aer.c
@@ -105,7 +105,6 @@ void adf_dev_restore(struct adf_accel_dev *accel_dev)
accel_dev->accel_id);
hw_device->reset_device(accel_dev);
pci_restore_state(pdev);
- pci_save_state(pdev);
}
}
@@ -204,7 +203,6 @@ static pci_ers_result_t adf_slot_reset(struct pci_dev *pdev)
if (!pdev->is_busmaster)
pci_set_master(pdev);
pci_restore_state(pdev);
- pci_save_state(pdev);
res = adf_dev_up(accel_dev, false);
if (res && res != -EALREADY)
return PCI_ERS_RESULT_DISCONNECT;
@@ -229,7 +227,7 @@ const struct pci_error_handlers adf_err_handler = {
};
EXPORT_SYMBOL_GPL(adf_err_handler);
-int adf_dev_autoreset(struct adf_accel_dev *accel_dev)
+static int adf_dev_autoreset(struct adf_accel_dev *accel_dev)
{
if (accel_dev->autoreset_on_error)
return adf_dev_aer_schedule_reset(accel_dev, ADF_DEV_RESET_ASYNC);
@@ -276,11 +274,11 @@ int adf_notify_fatal_error(struct adf_accel_dev *accel_dev)
int adf_init_aer(void)
{
device_reset_wq = alloc_workqueue("qat_device_reset_wq",
- WQ_MEM_RECLAIM, 0);
+ WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (!device_reset_wq)
return -EFAULT;
- device_sriov_wq = alloc_workqueue("qat_device_sriov_wq", 0, 0);
+ device_sriov_wq = alloc_workqueue("qat_device_sriov_wq", WQ_PERCPU, 0);
if (!device_sriov_wq) {
destroy_workqueue(device_reset_wq);
device_reset_wq = NULL;
diff --git a/drivers/crypto/intel/qat/qat_common/adf_bank_state.c b/drivers/crypto/intel/qat/qat_common/adf_bank_state.c
new file mode 100644
index 000000000000..225d55d56a4b
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_bank_state.c
@@ -0,0 +1,238 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2025 Intel Corporation */
+
+#define pr_fmt(fmt) "QAT: " fmt
+
+#include <linux/bits.h>
+#include <linux/dev_printk.h>
+#include <linux/printk.h>
+#include "adf_accel_devices.h"
+#include "adf_bank_state.h"
+#include "adf_common_drv.h"
+
+/* Ring interrupt masks */
+#define ADF_RP_INT_SRC_SEL_F_RISE_MASK GENMASK(1, 0)
+#define ADF_RP_INT_SRC_SEL_F_FALL_MASK GENMASK(2, 0)
+#define ADF_RP_INT_SRC_SEL_RANGE_WIDTH 4
+
+static inline int check_stat(u32 (*op)(void __iomem *, u32), u32 expect_val,
+ const char *name, void __iomem *base, u32 bank)
+{
+ u32 actual_val = op(base, bank);
+
+ if (expect_val == actual_val)
+ return 0;
+
+ pr_err("Fail to restore %s register. Expected %#x, actual %#x\n",
+ name, expect_val, actual_val);
+
+ return -EINVAL;
+}
+
+static void bank_state_save(struct adf_hw_csr_ops *ops, void __iomem *base,
+ u32 bank, struct adf_bank_state *state, u32 num_rings)
+{
+ u32 i;
+
+ state->ringstat0 = ops->read_csr_stat(base, bank);
+ state->ringuostat = ops->read_csr_uo_stat(base, bank);
+ state->ringestat = ops->read_csr_e_stat(base, bank);
+ state->ringnestat = ops->read_csr_ne_stat(base, bank);
+ state->ringnfstat = ops->read_csr_nf_stat(base, bank);
+ state->ringfstat = ops->read_csr_f_stat(base, bank);
+ state->ringcstat0 = ops->read_csr_c_stat(base, bank);
+ state->iaintflagen = ops->read_csr_int_en(base, bank);
+ state->iaintflagreg = ops->read_csr_int_flag(base, bank);
+ state->iaintflagsrcsel0 = ops->read_csr_int_srcsel(base, bank);
+ state->iaintcolen = ops->read_csr_int_col_en(base, bank);
+ state->iaintcolctl = ops->read_csr_int_col_ctl(base, bank);
+ state->iaintflagandcolen = ops->read_csr_int_flag_and_col(base, bank);
+ state->ringexpstat = ops->read_csr_exp_stat(base, bank);
+ state->ringexpintenable = ops->read_csr_exp_int_en(base, bank);
+ state->ringsrvarben = ops->read_csr_ring_srv_arb_en(base, bank);
+
+ for (i = 0; i < num_rings; i++) {
+ state->rings[i].head = ops->read_csr_ring_head(base, bank, i);
+ state->rings[i].tail = ops->read_csr_ring_tail(base, bank, i);
+ state->rings[i].config = ops->read_csr_ring_config(base, bank, i);
+ state->rings[i].base = ops->read_csr_ring_base(base, bank, i);
+ }
+}
+
+static int bank_state_restore(struct adf_hw_csr_ops *ops, void __iomem *base,
+ u32 bank, struct adf_bank_state *state, u32 num_rings,
+ int tx_rx_gap)
+{
+ u32 val, tmp_val, i;
+ int ret;
+
+ for (i = 0; i < num_rings; i++)
+ ops->write_csr_ring_base(base, bank, i, state->rings[i].base);
+
+ for (i = 0; i < num_rings; i++)
+ ops->write_csr_ring_config(base, bank, i, state->rings[i].config);
+
+ for (i = 0; i < num_rings / 2; i++) {
+ int tx = i * (tx_rx_gap + 1);
+ int rx = tx + tx_rx_gap;
+
+ ops->write_csr_ring_head(base, bank, tx, state->rings[tx].head);
+ ops->write_csr_ring_tail(base, bank, tx, state->rings[tx].tail);
+
+ /*
+ * The TX ring head needs to be updated again to make sure that
+ * the HW will not consider the ring as full when it is empty
+ * and the correct state flags are set to match the recovered state.
+ */
+ if (state->ringestat & BIT(tx)) {
+ val = ops->read_csr_int_srcsel(base, bank);
+ val |= ADF_RP_INT_SRC_SEL_F_RISE_MASK;
+ ops->write_csr_int_srcsel_w_val(base, bank, val);
+ ops->write_csr_ring_head(base, bank, tx, state->rings[tx].head);
+ }
+
+ ops->write_csr_ring_tail(base, bank, rx, state->rings[rx].tail);
+ val = ops->read_csr_int_srcsel(base, bank);
+ val |= ADF_RP_INT_SRC_SEL_F_RISE_MASK << ADF_RP_INT_SRC_SEL_RANGE_WIDTH;
+ ops->write_csr_int_srcsel_w_val(base, bank, val);
+
+ ops->write_csr_ring_head(base, bank, rx, state->rings[rx].head);
+ val = ops->read_csr_int_srcsel(base, bank);
+ val |= ADF_RP_INT_SRC_SEL_F_FALL_MASK << ADF_RP_INT_SRC_SEL_RANGE_WIDTH;
+ ops->write_csr_int_srcsel_w_val(base, bank, val);
+
+ /*
+ * The RX ring tail needs to be updated again to make sure that
+ * the HW will not consider the ring as empty when it is full
+ * and the correct state flags are set to match the recovered state.
+ */
+ if (state->ringfstat & BIT(rx))
+ ops->write_csr_ring_tail(base, bank, rx, state->rings[rx].tail);
+ }
+
+ ops->write_csr_int_flag_and_col(base, bank, state->iaintflagandcolen);
+ ops->write_csr_int_en(base, bank, state->iaintflagen);
+ ops->write_csr_int_col_en(base, bank, state->iaintcolen);
+ ops->write_csr_int_srcsel_w_val(base, bank, state->iaintflagsrcsel0);
+ ops->write_csr_exp_int_en(base, bank, state->ringexpintenable);
+ ops->write_csr_int_col_ctl(base, bank, state->iaintcolctl);
+
+ /*
+ * Verify whether any exceptions were raised during the bank save process.
+ * If exceptions occurred, the status and exception registers cannot
+ * be directly restored. Consequently, further restoration is not
+ * feasible, and the current state of the ring should be maintained.
+ */
+ val = state->ringexpstat;
+ if (val) {
+ pr_info("Bank %u state not fully restored due to exception in saved state (%#x)\n",
+ bank, val);
+ return 0;
+ }
+
+ /* Ensure that the restoration process completed without exceptions */
+ tmp_val = ops->read_csr_exp_stat(base, bank);
+ if (tmp_val) {
+ pr_err("Bank %u restored with exception: %#x\n", bank, tmp_val);
+ return -EFAULT;
+ }
+
+ ops->write_csr_ring_srv_arb_en(base, bank, state->ringsrvarben);
+
+ /* Check that all ring statuses match the saved state. */
+ ret = check_stat(ops->read_csr_stat, state->ringstat0, "ringstat",
+ base, bank);
+ if (ret)
+ return ret;
+
+ ret = check_stat(ops->read_csr_e_stat, state->ringestat, "ringestat",
+ base, bank);
+ if (ret)
+ return ret;
+
+ ret = check_stat(ops->read_csr_ne_stat, state->ringnestat, "ringnestat",
+ base, bank);
+ if (ret)
+ return ret;
+
+ ret = check_stat(ops->read_csr_nf_stat, state->ringnfstat, "ringnfstat",
+ base, bank);
+ if (ret)
+ return ret;
+
+ ret = check_stat(ops->read_csr_f_stat, state->ringfstat, "ringfstat",
+ base, bank);
+ if (ret)
+ return ret;
+
+ ret = check_stat(ops->read_csr_c_stat, state->ringcstat0, "ringcstat",
+ base, bank);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/**
+ * adf_bank_state_save() - save state of bank-related registers
+ * @accel_dev: Pointer to the device structure
+ * @bank_number: Bank number
+ * @state: Pointer to bank state structure
+ *
+ * This function saves the state of a bank by reading the bank CSRs and
+ * writing them in the @state structure.
+ *
+ * Returns 0 on success, error code otherwise
+ */
+int adf_bank_state_save(struct adf_accel_dev *accel_dev, u32 bank_number,
+ struct adf_bank_state *state)
+{
+ struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
+ struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
+ void __iomem *csr_base = adf_get_etr_base(accel_dev);
+
+ if (bank_number >= hw_data->num_banks || !state)
+ return -EINVAL;
+
+ dev_dbg(&GET_DEV(accel_dev), "Saving state of bank %d\n", bank_number);
+
+ bank_state_save(csr_ops, csr_base, bank_number, state,
+ hw_data->num_rings_per_bank);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adf_bank_state_save);
+
+/**
+ * adf_bank_state_restore() - restore state of bank-related registers
+ * @accel_dev: Pointer to the device structure
+ * @bank_number: Bank number
+ * @state: Pointer to bank state structure
+ *
+ * This function attempts to restore the state of a bank by writing the
+ * bank CSRs to the values in the state structure.
+ *
+ * Returns 0 on success, error code otherwise
+ */
+int adf_bank_state_restore(struct adf_accel_dev *accel_dev, u32 bank_number,
+ struct adf_bank_state *state)
+{
+ struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
+ struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
+ void __iomem *csr_base = adf_get_etr_base(accel_dev);
+ int ret;
+
+ if (bank_number >= hw_data->num_banks || !state)
+ return -EINVAL;
+
+ dev_dbg(&GET_DEV(accel_dev), "Restoring state of bank %d\n", bank_number);
+
+ ret = bank_state_restore(csr_ops, csr_base, bank_number, state,
+ hw_data->num_rings_per_bank, hw_data->tx_rx_gap);
+ if (ret)
+ dev_err(&GET_DEV(accel_dev),
+ "Unable to restore state of bank %d\n", bank_number);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(adf_bank_state_restore);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_bank_state.h b/drivers/crypto/intel/qat/qat_common/adf_bank_state.h
new file mode 100644
index 000000000000..48b573d692dd
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_bank_state.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2025 Intel Corporation */
+#ifndef ADF_BANK_STATE_H_
+#define ADF_BANK_STATE_H_
+
+#include <linux/types.h>
+
+struct adf_accel_dev;
+
+struct ring_config {
+ u64 base;
+ u32 config;
+ u32 head;
+ u32 tail;
+ u32 reserved0;
+};
+
+struct adf_bank_state {
+ u32 ringstat0;
+ u32 ringstat1;
+ u32 ringuostat;
+ u32 ringestat;
+ u32 ringnestat;
+ u32 ringnfstat;
+ u32 ringfstat;
+ u32 ringcstat0;
+ u32 ringcstat1;
+ u32 ringcstat2;
+ u32 ringcstat3;
+ u32 iaintflagen;
+ u32 iaintflagreg;
+ u32 iaintflagsrcsel0;
+ u32 iaintflagsrcsel1;
+ u32 iaintcolen;
+ u32 iaintcolctl;
+ u32 iaintflagandcolen;
+ u32 ringexpstat;
+ u32 ringexpintenable;
+ u32 ringsrvarben;
+ u32 reserved0;
+ struct ring_config rings[ADF_ETR_MAX_RINGS_PER_BANK];
+};
+
+int adf_bank_state_restore(struct adf_accel_dev *accel_dev, u32 bank_number,
+ struct adf_bank_state *state);
+int adf_bank_state_save(struct adf_accel_dev *accel_dev, u32 bank_number,
+ struct adf_bank_state *state);
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h
index 15fdf9854b81..81e9e9d7eccd 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h
@@ -29,6 +29,7 @@ enum adf_cfg_service_type {
COMP,
SYM,
ASYM,
+ DECOMP,
USED
};
diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c
index c39871291da7..7d00bcb41ce7 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c
@@ -7,6 +7,7 @@
#include <linux/pci.h>
#include <linux/string.h>
#include "adf_cfg.h"
+#include "adf_cfg_common.h"
#include "adf_cfg_services.h"
#include "adf_cfg_strings.h"
@@ -15,13 +16,14 @@ static const char *const adf_cfg_services[] = {
[SVC_SYM] = ADF_CFG_SYM,
[SVC_DC] = ADF_CFG_DC,
[SVC_DCC] = ADF_CFG_DCC,
+ [SVC_DECOMP] = ADF_CFG_DECOMP,
};
/*
* Ensure that the size of the array matches the number of services,
- * SVC_BASE_COUNT, that is used to size the bitmap.
+ * SVC_COUNT, that is used to size the bitmap.
*/
-static_assert(ARRAY_SIZE(adf_cfg_services) == SVC_BASE_COUNT);
+static_assert(ARRAY_SIZE(adf_cfg_services) == SVC_COUNT);
/*
* Ensure that the maximum number of concurrent services that can be
@@ -34,7 +36,7 @@ static_assert(ARRAY_SIZE(adf_cfg_services) >= MAX_NUM_CONCURR_SVC);
* Ensure that the number of services fit a single unsigned long, as each
* service is represented by a bit in the mask.
*/
-static_assert(BITS_PER_LONG >= SVC_BASE_COUNT);
+static_assert(BITS_PER_LONG >= SVC_COUNT);
/*
* Ensure that size of the concatenation of all service strings is smaller
@@ -43,6 +45,7 @@ static_assert(BITS_PER_LONG >= SVC_BASE_COUNT);
static_assert(sizeof(ADF_CFG_SYM ADF_SERVICES_DELIMITER
ADF_CFG_ASYM ADF_SERVICES_DELIMITER
ADF_CFG_DC ADF_SERVICES_DELIMITER
+ ADF_CFG_DECOMP ADF_SERVICES_DELIMITER
ADF_CFG_DCC) < ADF_CFG_MAX_VAL_LEN_IN_BYTES);
static int adf_service_string_to_mask(struct adf_accel_dev *accel_dev, const char *buf,
@@ -88,7 +91,7 @@ static int adf_service_mask_to_string(unsigned long mask, char *buf, size_t len)
if (len < ADF_CFG_MAX_VAL_LEN_IN_BYTES)
return -ENOSPC;
- for_each_set_bit(bit, &mask, SVC_BASE_COUNT) {
+ for_each_set_bit(bit, &mask, SVC_COUNT) {
if (offset)
offset += scnprintf(buf + offset, len - offset,
ADF_SERVICES_DELIMITER);
@@ -167,9 +170,43 @@ int adf_get_service_enabled(struct adf_accel_dev *accel_dev)
if (test_bit(SVC_DC, &mask))
return SVC_DC;
+ if (test_bit(SVC_DECOMP, &mask))
+ return SVC_DECOMP;
+
if (test_bit(SVC_DCC, &mask))
return SVC_DCC;
return -EINVAL;
}
EXPORT_SYMBOL_GPL(adf_get_service_enabled);
+
+enum adf_cfg_service_type adf_srv_to_cfg_svc_type(enum adf_base_services svc)
+{
+ switch (svc) {
+ case SVC_ASYM:
+ return ASYM;
+ case SVC_SYM:
+ return SYM;
+ case SVC_DC:
+ return COMP;
+ case SVC_DECOMP:
+ return DECOMP;
+ default:
+ return UNUSED;
+ }
+}
+
+bool adf_is_service_enabled(struct adf_accel_dev *accel_dev, enum adf_base_services svc)
+{
+ enum adf_cfg_service_type arb_srv = adf_srv_to_cfg_svc_type(svc);
+ struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
+ u8 rps_per_bundle = hw_data->num_banks_per_vf;
+ int i;
+
+ for (i = 0; i < rps_per_bundle; i++) {
+ if (GET_SRV_TYPE(accel_dev, i) == arb_srv)
+ return true;
+ }
+
+ return false;
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h
index 3742c450878f..913d717280af 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h
@@ -7,16 +7,21 @@
struct adf_accel_dev;
-enum adf_services {
+enum adf_base_services {
SVC_ASYM = 0,
SVC_SYM,
SVC_DC,
- SVC_DCC,
+ SVC_DECOMP,
SVC_BASE_COUNT
};
+enum adf_extended_services {
+ SVC_DCC = SVC_BASE_COUNT,
+ SVC_COUNT
+};
+
enum adf_composed_services {
- SVC_SYM_ASYM = SVC_BASE_COUNT,
+ SVC_SYM_ASYM = SVC_COUNT,
SVC_SYM_DC,
SVC_ASYM_DC,
};
@@ -33,5 +38,7 @@ int adf_parse_service_string(struct adf_accel_dev *accel_dev, const char *in,
size_t in_len, char *out, size_t out_len);
int adf_get_service_enabled(struct adf_accel_dev *accel_dev);
int adf_get_service_mask(struct adf_accel_dev *accel_dev, unsigned long *mask);
+enum adf_cfg_service_type adf_srv_to_cfg_svc_type(enum adf_base_services svc);
+bool adf_is_service_enabled(struct adf_accel_dev *accel_dev, enum adf_base_services svc);
#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h
index b79982c4a856..30107a02ee7f 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h
@@ -24,6 +24,7 @@
#define ADF_CY "Cy"
#define ADF_DC "Dc"
#define ADF_CFG_DC "dc"
+#define ADF_CFG_DECOMP "decomp"
#define ADF_CFG_CY "sym;asym"
#define ADF_CFG_SYM "sym"
#define ADF_CFG_ASYM "asym"
diff --git a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
index eaa6388a6678..6cf3a95489e8 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
@@ -86,7 +86,6 @@ int adf_ae_stop(struct adf_accel_dev *accel_dev);
extern const struct pci_error_handlers adf_err_handler;
void adf_reset_sbr(struct adf_accel_dev *accel_dev);
void adf_reset_flr(struct adf_accel_dev *accel_dev);
-int adf_dev_autoreset(struct adf_accel_dev *accel_dev);
void adf_dev_restore(struct adf_accel_dev *accel_dev);
int adf_init_aer(void);
void adf_exit_aer(void);
@@ -189,6 +188,7 @@ void adf_exit_misc_wq(void);
bool adf_misc_wq_queue_work(struct work_struct *work);
bool adf_misc_wq_queue_delayed_work(struct delayed_work *work,
unsigned long delay);
+void adf_misc_wq_flush(void);
#if defined(CONFIG_PCI_IOV)
int adf_sriov_configure(struct pci_dev *pdev, int numvfs);
void adf_disable_sriov(struct adf_accel_dev *accel_dev);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/intel/qat/qat_common/adf_ctl_drv.c
index 48c62a14a6a7..c2e6f0cb7480 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_ctl_drv.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_ctl_drv.c
@@ -89,26 +89,14 @@ err_chrdev_unreg:
return -EFAULT;
}
-static int adf_ctl_alloc_resources(struct adf_user_cfg_ctl_data **ctl_data,
- unsigned long arg)
+static struct adf_user_cfg_ctl_data *adf_ctl_alloc_resources(unsigned long arg)
{
struct adf_user_cfg_ctl_data *cfg_data;
- cfg_data = kzalloc(sizeof(*cfg_data), GFP_KERNEL);
- if (!cfg_data)
- return -ENOMEM;
-
- /* Initialize device id to NO DEVICE as 0 is a valid device id */
- cfg_data->device_id = ADF_CFG_NO_DEVICE;
-
- if (copy_from_user(cfg_data, (void __user *)arg, sizeof(*cfg_data))) {
+ cfg_data = memdup_user((void __user *)arg, sizeof(*cfg_data));
+ if (IS_ERR(cfg_data))
pr_err("QAT: failed to copy from user cfg_data.\n");
- kfree(cfg_data);
- return -EIO;
- }
-
- *ctl_data = cfg_data;
- return 0;
+ return cfg_data;
}
static int adf_add_key_value_data(struct adf_accel_dev *accel_dev,
@@ -188,13 +176,13 @@ out_err:
static int adf_ctl_ioctl_dev_config(struct file *fp, unsigned int cmd,
unsigned long arg)
{
- int ret;
struct adf_user_cfg_ctl_data *ctl_data;
struct adf_accel_dev *accel_dev;
+ int ret = 0;
- ret = adf_ctl_alloc_resources(&ctl_data, arg);
- if (ret)
- return ret;
+ ctl_data = adf_ctl_alloc_resources(arg);
+ if (IS_ERR(ctl_data))
+ return PTR_ERR(ctl_data);
accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id);
if (!accel_dev) {
@@ -267,9 +255,9 @@ static int adf_ctl_ioctl_dev_stop(struct file *fp, unsigned int cmd,
int ret;
struct adf_user_cfg_ctl_data *ctl_data;
- ret = adf_ctl_alloc_resources(&ctl_data, arg);
- if (ret)
- return ret;
+ ctl_data = adf_ctl_alloc_resources(arg);
+ if (IS_ERR(ctl_data))
+ return PTR_ERR(ctl_data);
if (adf_devmgr_verify_id(ctl_data->device_id)) {
pr_err("QAT: Device %d not found\n", ctl_data->device_id);
@@ -301,9 +289,9 @@ static int adf_ctl_ioctl_dev_start(struct file *fp, unsigned int cmd,
struct adf_user_cfg_ctl_data *ctl_data;
struct adf_accel_dev *accel_dev;
- ret = adf_ctl_alloc_resources(&ctl_data, arg);
- if (ret)
- return ret;
+ ctl_data = adf_ctl_alloc_resources(arg);
+ if (IS_ERR(ctl_data))
+ return PTR_ERR(ctl_data);
ret = -ENODEV;
accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
index 0406cb09c5bb..349fdb323763 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
@@ -1,5 +1,8 @@
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2020 Intel Corporation */
+
+#define pr_fmt(fmt) "QAT: " fmt
+
#include <linux/bitops.h>
#include <linux/iopoll.h>
#include <asm/div64.h>
@@ -259,7 +262,10 @@ bool adf_gen4_services_supported(unsigned long mask)
{
unsigned long num_svc = hweight_long(mask);
- if (mask >= BIT(SVC_BASE_COUNT))
+ if (mask >= BIT(SVC_COUNT))
+ return false;
+
+ if (test_bit(SVC_DECOMP, &mask))
return false;
switch (num_svc) {
@@ -485,187 +491,6 @@ int adf_gen4_bank_drain_start(struct adf_accel_dev *accel_dev,
return ret;
}
-static void bank_state_save(struct adf_hw_csr_ops *ops, void __iomem *base,
- u32 bank, struct bank_state *state, u32 num_rings)
-{
- u32 i;
-
- state->ringstat0 = ops->read_csr_stat(base, bank);
- state->ringuostat = ops->read_csr_uo_stat(base, bank);
- state->ringestat = ops->read_csr_e_stat(base, bank);
- state->ringnestat = ops->read_csr_ne_stat(base, bank);
- state->ringnfstat = ops->read_csr_nf_stat(base, bank);
- state->ringfstat = ops->read_csr_f_stat(base, bank);
- state->ringcstat0 = ops->read_csr_c_stat(base, bank);
- state->iaintflagen = ops->read_csr_int_en(base, bank);
- state->iaintflagreg = ops->read_csr_int_flag(base, bank);
- state->iaintflagsrcsel0 = ops->read_csr_int_srcsel(base, bank);
- state->iaintcolen = ops->read_csr_int_col_en(base, bank);
- state->iaintcolctl = ops->read_csr_int_col_ctl(base, bank);
- state->iaintflagandcolen = ops->read_csr_int_flag_and_col(base, bank);
- state->ringexpstat = ops->read_csr_exp_stat(base, bank);
- state->ringexpintenable = ops->read_csr_exp_int_en(base, bank);
- state->ringsrvarben = ops->read_csr_ring_srv_arb_en(base, bank);
-
- for (i = 0; i < num_rings; i++) {
- state->rings[i].head = ops->read_csr_ring_head(base, bank, i);
- state->rings[i].tail = ops->read_csr_ring_tail(base, bank, i);
- state->rings[i].config = ops->read_csr_ring_config(base, bank, i);
- state->rings[i].base = ops->read_csr_ring_base(base, bank, i);
- }
-}
-
-#define CHECK_STAT(op, expect_val, name, args...) \
-({ \
- u32 __expect_val = (expect_val); \
- u32 actual_val = op(args); \
- (__expect_val == actual_val) ? 0 : \
- (pr_err("QAT: Fail to restore %s register. Expected 0x%x, actual 0x%x\n", \
- name, __expect_val, actual_val), -EINVAL); \
-})
-
-static int bank_state_restore(struct adf_hw_csr_ops *ops, void __iomem *base,
- u32 bank, struct bank_state *state, u32 num_rings,
- int tx_rx_gap)
-{
- u32 val, tmp_val, i;
- int ret;
-
- for (i = 0; i < num_rings; i++)
- ops->write_csr_ring_base(base, bank, i, state->rings[i].base);
-
- for (i = 0; i < num_rings; i++)
- ops->write_csr_ring_config(base, bank, i, state->rings[i].config);
-
- for (i = 0; i < num_rings / 2; i++) {
- int tx = i * (tx_rx_gap + 1);
- int rx = tx + tx_rx_gap;
-
- ops->write_csr_ring_head(base, bank, tx, state->rings[tx].head);
- ops->write_csr_ring_tail(base, bank, tx, state->rings[tx].tail);
-
- /*
- * The TX ring head needs to be updated again to make sure that
- * the HW will not consider the ring as full when it is empty
- * and the correct state flags are set to match the recovered state.
- */
- if (state->ringestat & BIT(tx)) {
- val = ops->read_csr_int_srcsel(base, bank);
- val |= ADF_RP_INT_SRC_SEL_F_RISE_MASK;
- ops->write_csr_int_srcsel_w_val(base, bank, val);
- ops->write_csr_ring_head(base, bank, tx, state->rings[tx].head);
- }
-
- ops->write_csr_ring_tail(base, bank, rx, state->rings[rx].tail);
- val = ops->read_csr_int_srcsel(base, bank);
- val |= ADF_RP_INT_SRC_SEL_F_RISE_MASK << ADF_RP_INT_SRC_SEL_RANGE_WIDTH;
- ops->write_csr_int_srcsel_w_val(base, bank, val);
-
- ops->write_csr_ring_head(base, bank, rx, state->rings[rx].head);
- val = ops->read_csr_int_srcsel(base, bank);
- val |= ADF_RP_INT_SRC_SEL_F_FALL_MASK << ADF_RP_INT_SRC_SEL_RANGE_WIDTH;
- ops->write_csr_int_srcsel_w_val(base, bank, val);
-
- /*
- * The RX ring tail needs to be updated again to make sure that
- * the HW will not consider the ring as empty when it is full
- * and the correct state flags are set to match the recovered state.
- */
- if (state->ringfstat & BIT(rx))
- ops->write_csr_ring_tail(base, bank, rx, state->rings[rx].tail);
- }
-
- ops->write_csr_int_flag_and_col(base, bank, state->iaintflagandcolen);
- ops->write_csr_int_en(base, bank, state->iaintflagen);
- ops->write_csr_int_col_en(base, bank, state->iaintcolen);
- ops->write_csr_int_srcsel_w_val(base, bank, state->iaintflagsrcsel0);
- ops->write_csr_exp_int_en(base, bank, state->ringexpintenable);
- ops->write_csr_int_col_ctl(base, bank, state->iaintcolctl);
- ops->write_csr_ring_srv_arb_en(base, bank, state->ringsrvarben);
-
- /* Check that all ring statuses match the saved state. */
- ret = CHECK_STAT(ops->read_csr_stat, state->ringstat0, "ringstat",
- base, bank);
- if (ret)
- return ret;
-
- ret = CHECK_STAT(ops->read_csr_e_stat, state->ringestat, "ringestat",
- base, bank);
- if (ret)
- return ret;
-
- ret = CHECK_STAT(ops->read_csr_ne_stat, state->ringnestat, "ringnestat",
- base, bank);
- if (ret)
- return ret;
-
- ret = CHECK_STAT(ops->read_csr_nf_stat, state->ringnfstat, "ringnfstat",
- base, bank);
- if (ret)
- return ret;
-
- ret = CHECK_STAT(ops->read_csr_f_stat, state->ringfstat, "ringfstat",
- base, bank);
- if (ret)
- return ret;
-
- ret = CHECK_STAT(ops->read_csr_c_stat, state->ringcstat0, "ringcstat",
- base, bank);
- if (ret)
- return ret;
-
- tmp_val = ops->read_csr_exp_stat(base, bank);
- val = state->ringexpstat;
- if (tmp_val && !val) {
- pr_err("QAT: Bank was restored with exception: 0x%x\n", val);
- return -EINVAL;
- }
-
- return 0;
-}
-
-int adf_gen4_bank_state_save(struct adf_accel_dev *accel_dev, u32 bank_number,
- struct bank_state *state)
-{
- struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
- struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
- void __iomem *csr_base = adf_get_etr_base(accel_dev);
-
- if (bank_number >= hw_data->num_banks || !state)
- return -EINVAL;
-
- dev_dbg(&GET_DEV(accel_dev), "Saving state of bank %d\n", bank_number);
-
- bank_state_save(csr_ops, csr_base, bank_number, state,
- hw_data->num_rings_per_bank);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(adf_gen4_bank_state_save);
-
-int adf_gen4_bank_state_restore(struct adf_accel_dev *accel_dev, u32 bank_number,
- struct bank_state *state)
-{
- struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
- struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
- void __iomem *csr_base = adf_get_etr_base(accel_dev);
- int ret;
-
- if (bank_number >= hw_data->num_banks || !state)
- return -EINVAL;
-
- dev_dbg(&GET_DEV(accel_dev), "Restoring state of bank %d\n", bank_number);
-
- ret = bank_state_restore(csr_ops, csr_base, bank_number, state,
- hw_data->num_rings_per_bank, hw_data->tx_rx_gap);
- if (ret)
- dev_err(&GET_DEV(accel_dev),
- "Unable to restore state of bank %d\n", bank_number);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(adf_gen4_bank_state_restore);
-
static int adf_gen4_build_comp_block(void *ctx, enum adf_dc_algo algo)
{
struct icp_qat_fw_comp_req *req_tmpl = ctx;
@@ -733,3 +558,43 @@ void adf_gen4_init_dc_ops(struct adf_dc_ops *dc_ops)
dc_ops->build_decomp_block = adf_gen4_build_decomp_block;
}
EXPORT_SYMBOL_GPL(adf_gen4_init_dc_ops);
+
+void adf_gen4_init_num_svc_aes(struct adf_rl_hw_data *device_data)
+{
+ struct adf_hw_device_data *hw_data;
+ unsigned int i;
+ u32 ae_cnt;
+
+ hw_data = container_of(device_data, struct adf_hw_device_data, rl_data);
+ ae_cnt = hweight32(hw_data->get_ae_mask(hw_data));
+ if (!ae_cnt)
+ return;
+
+ for (i = 0; i < SVC_BASE_COUNT; i++)
+ device_data->svc_ae_mask[i] = ae_cnt - 1;
+
+ /*
+ * The decompression service is not supported on QAT GEN4 devices.
+ * Therefore, set svc_ae_mask to 0.
+ */
+ device_data->svc_ae_mask[SVC_DECOMP] = 0;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_init_num_svc_aes);
+
+u32 adf_gen4_get_svc_slice_cnt(struct adf_accel_dev *accel_dev,
+ enum adf_base_services svc)
+{
+ struct adf_rl_hw_data *device_data = &accel_dev->hw_device->rl_data;
+
+ switch (svc) {
+ case SVC_SYM:
+ return device_data->slices.cph_cnt;
+ case SVC_ASYM:
+ return device_data->slices.pke_cnt;
+ case SVC_DC:
+ return device_data->slices.dcpr_cnt;
+ default:
+ return 0;
+ }
+}
+EXPORT_SYMBOL_GPL(adf_gen4_get_svc_slice_cnt);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
index e4f4d5fa616d..cd26b6724c43 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
@@ -84,9 +84,6 @@
#define ADF_WQM_CSR_RPRESETSTS(bank) (ADF_WQM_CSR_RPRESETCTL(bank) + 4)
/* Ring interrupt */
-#define ADF_RP_INT_SRC_SEL_F_RISE_MASK GENMASK(1, 0)
-#define ADF_RP_INT_SRC_SEL_F_FALL_MASK GENMASK(2, 0)
-#define ADF_RP_INT_SRC_SEL_RANGE_WIDTH 4
#define ADF_COALESCED_POLL_TIMEOUT_US (1 * USEC_PER_SEC)
#define ADF_COALESCED_POLL_DELAY_US 1000
#define ADF_WQM_CSR_RPINTSOU(bank) (0x200000 + ((bank) << 12))
@@ -176,11 +173,10 @@ int adf_gen4_bank_drain_start(struct adf_accel_dev *accel_dev,
u32 bank_number, int timeout_us);
void adf_gen4_bank_drain_finish(struct adf_accel_dev *accel_dev,
u32 bank_number);
-int adf_gen4_bank_state_save(struct adf_accel_dev *accel_dev, u32 bank_number,
- struct bank_state *state);
-int adf_gen4_bank_state_restore(struct adf_accel_dev *accel_dev,
- u32 bank_number, struct bank_state *state);
bool adf_gen4_services_supported(unsigned long service_mask);
void adf_gen4_init_dc_ops(struct adf_dc_ops *dc_ops);
+void adf_gen4_init_num_svc_aes(struct adf_rl_hw_data *device_data);
+u32 adf_gen4_get_svc_slice_cnt(struct adf_accel_dev *accel_dev,
+ enum adf_base_services svc);
#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c
index 2e4095c4c12c..b7e38842a46d 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c
@@ -1,47 +1,18 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2023 Intel Corporation */
#include <linux/dma-mapping.h>
-#include <linux/kernel.h>
#include <linux/string_helpers.h>
-#include <linux/stringify.h>
#include "adf_accel_devices.h"
#include "adf_admin.h"
#include "adf_common_drv.h"
#include "adf_gen4_pm.h"
+#include "adf_pm_dbgfs_utils.h"
#include "icp_qat_fw_init_admin.h"
-/*
- * This is needed because a variable is used to index the mask at
- * pm_scnprint_table(), making it not compile time constant, so the compile
- * asserts from FIELD_GET() or u32_get_bits() won't be fulfilled.
- */
-#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
-
-#define PM_INFO_MEMBER_OFF(member) \
- (offsetof(struct icp_qat_fw_init_admin_pm_info, member) / sizeof(u32))
-
-#define PM_INFO_REGSET_ENTRY_MASK(_reg_, _field_, _mask_) \
-{ \
- .reg_offset = PM_INFO_MEMBER_OFF(_reg_), \
- .key = __stringify(_field_), \
- .field_mask = _mask_, \
-}
-
-#define PM_INFO_REGSET_ENTRY32(_reg_, _field_) \
- PM_INFO_REGSET_ENTRY_MASK(_reg_, _field_, GENMASK(31, 0))
-
#define PM_INFO_REGSET_ENTRY(_reg_, _field_) \
PM_INFO_REGSET_ENTRY_MASK(_reg_, _field_, ADF_GEN4_PM_##_field_##_MASK)
-#define PM_INFO_MAX_KEY_LEN 21
-
-struct pm_status_row {
- int reg_offset;
- u32 field_mask;
- const char *key;
-};
-
static const struct pm_status_row pm_fuse_rows[] = {
PM_INFO_REGSET_ENTRY(fusectl0, ENABLE_PM),
PM_INFO_REGSET_ENTRY(fusectl0, ENABLE_PM_IDLE),
@@ -109,44 +80,6 @@ static const struct pm_status_row pm_csrs_rows[] = {
PM_INFO_REGSET_ENTRY32(pm.pwrreq, CPM_PM_PWRREQ),
};
-static int pm_scnprint_table(char *buff, const struct pm_status_row *table,
- u32 *pm_info_regs, size_t buff_size, int table_len,
- bool lowercase)
-{
- char key[PM_INFO_MAX_KEY_LEN];
- int wr = 0;
- int i;
-
- for (i = 0; i < table_len; i++) {
- if (lowercase)
- string_lower(key, table[i].key);
- else
- string_upper(key, table[i].key);
-
- wr += scnprintf(&buff[wr], buff_size - wr, "%s: %#x\n", key,
- field_get(table[i].field_mask,
- pm_info_regs[table[i].reg_offset]));
- }
-
- return wr;
-}
-
-static int pm_scnprint_table_upper_keys(char *buff, const struct pm_status_row *table,
- u32 *pm_info_regs, size_t buff_size,
- int table_len)
-{
- return pm_scnprint_table(buff, table, pm_info_regs, buff_size,
- table_len, false);
-}
-
-static int pm_scnprint_table_lower_keys(char *buff, const struct pm_status_row *table,
- u32 *pm_info_regs, size_t buff_size,
- int table_len)
-{
- return pm_scnprint_table(buff, table, pm_info_regs, buff_size,
- table_len, true);
-}
-
static_assert(sizeof(struct icp_qat_fw_init_admin_pm_info) < PAGE_SIZE);
static ssize_t adf_gen4_print_pm_status(struct adf_accel_dev *accel_dev,
@@ -191,9 +124,9 @@ static ssize_t adf_gen4_print_pm_status(struct adf_accel_dev *accel_dev,
/* Fusectl related */
len += scnprintf(&pm_kv[len], PAGE_SIZE - len,
"----------- PM Fuse info ---------\n");
- len += pm_scnprint_table_lower_keys(&pm_kv[len], pm_fuse_rows,
- pm_info_regs, PAGE_SIZE - len,
- ARRAY_SIZE(pm_fuse_rows));
+ len += adf_pm_scnprint_table_lower_keys(&pm_kv[len], pm_fuse_rows,
+ pm_info_regs, PAGE_SIZE - len,
+ ARRAY_SIZE(pm_fuse_rows));
len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "max_pwrreq: %#x\n",
pm_info->max_pwrreq);
len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "min_pwrreq: %#x\n",
@@ -204,28 +137,28 @@ static ssize_t adf_gen4_print_pm_status(struct adf_accel_dev *accel_dev,
"------------ PM Info ------------\n");
len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "power_level: %s\n",
pm_info->pwr_state == PM_SET_MIN ? "min" : "max");
- len += pm_scnprint_table_lower_keys(&pm_kv[len], pm_info_rows,
- pm_info_regs, PAGE_SIZE - len,
- ARRAY_SIZE(pm_info_rows));
+ len += adf_pm_scnprint_table_lower_keys(&pm_kv[len], pm_info_rows,
+ pm_info_regs, PAGE_SIZE - len,
+ ARRAY_SIZE(pm_info_rows));
len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "pm_mode: STATIC\n");
/* SSM related */
len += scnprintf(&pm_kv[len], PAGE_SIZE - len,
"----------- SSM_PM Info ----------\n");
- len += pm_scnprint_table_lower_keys(&pm_kv[len], pm_ssm_rows,
- pm_info_regs, PAGE_SIZE - len,
- ARRAY_SIZE(pm_ssm_rows));
+ len += adf_pm_scnprint_table_lower_keys(&pm_kv[len], pm_ssm_rows,
+ pm_info_regs, PAGE_SIZE - len,
+ ARRAY_SIZE(pm_ssm_rows));
/* Log related */
len += scnprintf(&pm_kv[len], PAGE_SIZE - len,
"------------- PM Log -------------\n");
- len += pm_scnprint_table_lower_keys(&pm_kv[len], pm_log_rows,
- pm_info_regs, PAGE_SIZE - len,
- ARRAY_SIZE(pm_log_rows));
+ len += adf_pm_scnprint_table_lower_keys(&pm_kv[len], pm_log_rows,
+ pm_info_regs, PAGE_SIZE - len,
+ ARRAY_SIZE(pm_log_rows));
- len += pm_scnprint_table_lower_keys(&pm_kv[len], pm_event_rows,
- pm_info_regs, PAGE_SIZE - len,
- ARRAY_SIZE(pm_event_rows));
+ len += adf_pm_scnprint_table_lower_keys(&pm_kv[len], pm_event_rows,
+ pm_info_regs, PAGE_SIZE - len,
+ ARRAY_SIZE(pm_event_rows));
len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "idle_irq_count: %#x\n",
pm->idle_irq_counters);
@@ -241,9 +174,9 @@ static ssize_t adf_gen4_print_pm_status(struct adf_accel_dev *accel_dev,
/* CSRs content */
len += scnprintf(&pm_kv[len], PAGE_SIZE - len,
"----------- HW PM CSRs -----------\n");
- len += pm_scnprint_table_upper_keys(&pm_kv[len], pm_csrs_rows,
- pm_info_regs, PAGE_SIZE - len,
- ARRAY_SIZE(pm_csrs_rows));
+ len += adf_pm_scnprint_table_upper_keys(&pm_kv[len], pm_csrs_rows,
+ pm_info_regs, PAGE_SIZE - len,
+ ARRAY_SIZE(pm_csrs_rows));
val = ADF_CSR_RD(pmisc, ADF_GEN4_PM_HOST_MSG);
len += scnprintf(&pm_kv[len], PAGE_SIZE - len,
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.c
index a62eb5e8dbe6..adb21656a3ba 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.c
@@ -9,6 +9,7 @@
#include <asm/errno.h>
#include "adf_accel_devices.h"
+#include "adf_bank_state.h"
#include "adf_common_drv.h"
#include "adf_gen4_hw_data.h"
#include "adf_gen4_pfvf.h"
@@ -358,7 +359,7 @@ static int adf_gen4_vfmig_load_etr_regs(struct adf_mstate_mgr *sub_mgr,
pf_bank_nr = vf_bank_info->bank_nr + vf_bank_info->vf_nr * hw_data->num_banks_per_vf;
ret = hw_data->bank_state_restore(accel_dev, pf_bank_nr,
- (struct bank_state *)state);
+ (struct adf_bank_state *)state);
if (ret) {
dev_err(&GET_DEV(accel_dev),
"Failed to load regs for vf%d bank%d\n",
@@ -585,7 +586,7 @@ static int adf_gen4_vfmig_save_etr_regs(struct adf_mstate_mgr *subs, u8 *state,
pf_bank_nr += vf_bank_info->vf_nr * hw_data->num_banks_per_vf;
ret = hw_data->bank_state_save(accel_dev, pf_bank_nr,
- (struct bank_state *)state);
+ (struct adf_bank_state *)state);
if (ret) {
dev_err(&GET_DEV(accel_dev),
"Failed to save regs for vf%d bank%d\n",
@@ -593,7 +594,7 @@ static int adf_gen4_vfmig_save_etr_regs(struct adf_mstate_mgr *subs, u8 *state,
return ret;
}
- return sizeof(struct bank_state);
+ return sizeof(struct adf_bank_state);
}
static int adf_gen4_vfmig_save_etr_bank(struct adf_accel_dev *accel_dev,
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen6_pm.h b/drivers/crypto/intel/qat/qat_common/adf_gen6_pm.h
index 9a5b995f7ada..4c0d576e8c21 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen6_pm.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen6_pm.h
@@ -24,5 +24,29 @@ struct adf_accel_dev;
/* cpm_pm_status bitfields */
#define ADF_GEN6_PM_INIT_STATE BIT(21)
+#define ADF_GEN6_PM_CPM_PM_STATE_MASK GENMASK(22, 20)
+
+/* fusectl0 bitfields */
+#define ADF_GEN6_PM_ENABLE_PM_MASK BIT(21)
+#define ADF_GEN6_PM_ENABLE_PM_IDLE_MASK BIT(22)
+#define ADF_GEN6_PM_ENABLE_DEEP_PM_IDLE_MASK BIT(23)
+
+/* cpm_pm_fw_init bitfields */
+#define ADF_GEN6_PM_IDLE_FILTER_MASK GENMASK(5, 3)
+#define ADF_GEN6_PM_IDLE_ENABLE_MASK BIT(2)
+
+/* ssm_pm_enable bitfield */
+#define ADF_GEN6_PM_SSM_PM_ENABLE_MASK BIT(0)
+
+/* ssm_pm_domain_status bitfield */
+#define ADF_GEN6_PM_DOMAIN_POWERED_UP_MASK BIT(0)
+
+#ifdef CONFIG_DEBUG_FS
+void adf_gen6_init_dev_pm_data(struct adf_accel_dev *accel_dev);
+#else
+static inline void adf_gen6_init_dev_pm_data(struct adf_accel_dev *accel_dev)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
#endif /* ADF_GEN6_PM_H */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen6_pm_dbgfs.c b/drivers/crypto/intel/qat/qat_common/adf_gen6_pm_dbgfs.c
new file mode 100644
index 000000000000..603aefba0fdb
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen6_pm_dbgfs.c
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2025 Intel Corporation */
+#include <linux/dma-mapping.h>
+#include <linux/export.h>
+#include <linux/string_helpers.h>
+
+#include "adf_admin.h"
+#include "adf_common_drv.h"
+#include "adf_gen6_pm.h"
+#include "adf_pm_dbgfs_utils.h"
+#include "icp_qat_fw_init_admin.h"
+
+#define PM_INFO_REGSET_ENTRY(_reg_, _field_) \
+ PM_INFO_REGSET_ENTRY_MASK(_reg_, _field_, ADF_GEN6_PM_##_field_##_MASK)
+
+static struct pm_status_row pm_fuse_rows[] = {
+ PM_INFO_REGSET_ENTRY(fusectl0, ENABLE_PM),
+ PM_INFO_REGSET_ENTRY(fusectl0, ENABLE_PM_IDLE),
+ PM_INFO_REGSET_ENTRY(fusectl0, ENABLE_DEEP_PM_IDLE),
+};
+
+static struct pm_status_row pm_info_rows[] = {
+ PM_INFO_REGSET_ENTRY(pm.status, CPM_PM_STATE),
+ PM_INFO_REGSET_ENTRY(pm.fw_init, IDLE_ENABLE),
+ PM_INFO_REGSET_ENTRY(pm.fw_init, IDLE_FILTER),
+};
+
+static struct pm_status_row pm_ssm_rows[] = {
+ PM_INFO_REGSET_ENTRY(ssm.pm_enable, SSM_PM_ENABLE),
+ PM_INFO_REGSET_ENTRY(ssm.pm_domain_status, DOMAIN_POWERED_UP),
+};
+
+static struct pm_status_row pm_csrs_rows[] = {
+ PM_INFO_REGSET_ENTRY32(pm.fw_init, CPM_PM_FW_INIT),
+ PM_INFO_REGSET_ENTRY32(pm.status, CPM_PM_STATUS),
+};
+
+static_assert(sizeof(struct icp_qat_fw_init_admin_pm_info) < PAGE_SIZE);
+
+static ssize_t adf_gen6_print_pm_status(struct adf_accel_dev *accel_dev,
+ char __user *buf, size_t count,
+ loff_t *pos)
+{
+ void __iomem *pmisc = adf_get_pmisc_base(accel_dev);
+ struct icp_qat_fw_init_admin_pm_info *pm_info;
+ dma_addr_t p_state_addr;
+ u32 *pm_info_regs;
+ size_t len = 0;
+ char *pm_kv;
+ u32 val;
+ int ret;
+
+ pm_info = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!pm_info)
+ return -ENOMEM;
+
+ pm_kv = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!pm_kv) {
+ kfree(pm_info);
+ return -ENOMEM;
+ }
+
+ p_state_addr = dma_map_single(&GET_DEV(accel_dev), pm_info, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ ret = dma_mapping_error(&GET_DEV(accel_dev), p_state_addr);
+ if (ret)
+ goto out_free;
+
+ /* Query power management information from QAT FW */
+ ret = adf_get_pm_info(accel_dev, p_state_addr, PAGE_SIZE);
+ dma_unmap_single(&GET_DEV(accel_dev), p_state_addr, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ if (ret)
+ goto out_free;
+
+ pm_info_regs = (u32 *)pm_info;
+
+ /* Fuse control register */
+ len += scnprintf(&pm_kv[len], PAGE_SIZE - len,
+ "----------- PM Fuse info ---------\n");
+ len += adf_pm_scnprint_table_lower_keys(&pm_kv[len], pm_fuse_rows,
+ pm_info_regs, PAGE_SIZE - len,
+ ARRAY_SIZE(pm_fuse_rows));
+
+ /* Power management */
+ len += scnprintf(&pm_kv[len], PAGE_SIZE - len,
+ "----------- PM Info --------------\n");
+
+ len += adf_pm_scnprint_table_lower_keys(&pm_kv[len], pm_info_rows,
+ pm_info_regs, PAGE_SIZE - len,
+ ARRAY_SIZE(pm_info_rows));
+ len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "pm_mode: ACTIVE\n");
+
+ /* Shared Slice Module */
+ len += scnprintf(&pm_kv[len], PAGE_SIZE - len,
+ "----------- SSM_PM Info ----------\n");
+ len += adf_pm_scnprint_table_lower_keys(&pm_kv[len], pm_ssm_rows,
+ pm_info_regs, PAGE_SIZE - len,
+ ARRAY_SIZE(pm_ssm_rows));
+
+ /* Control status register content */
+ len += scnprintf(&pm_kv[len], PAGE_SIZE - len,
+ "----------- HW PM CSRs -----------\n");
+ len += adf_pm_scnprint_table_upper_keys(&pm_kv[len], pm_csrs_rows,
+ pm_info_regs, PAGE_SIZE - len,
+ ARRAY_SIZE(pm_csrs_rows));
+
+ val = ADF_CSR_RD(pmisc, ADF_GEN6_PM_INTERRUPT);
+ len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "CPM_PM_INTERRUPT: %#x\n", val);
+ ret = simple_read_from_buffer(buf, count, pos, pm_kv, len);
+
+out_free:
+ kfree(pm_info);
+ kfree(pm_kv);
+
+ return ret;
+}
+
+void adf_gen6_init_dev_pm_data(struct adf_accel_dev *accel_dev)
+{
+ accel_dev->power_management.print_pm_status = adf_gen6_print_pm_status;
+ accel_dev->power_management.present = true;
+}
+EXPORT_SYMBOL_GPL(adf_gen6_init_dev_pm_data);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.c b/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.c
index 58a072e2f936..c9b151006dca 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.c
@@ -5,6 +5,7 @@
#include "adf_gen4_config.h"
#include "adf_gen4_hw_csr_data.h"
#include "adf_gen4_pfvf.h"
+#include "adf_gen4_vf_mig.h"
#include "adf_gen6_shared.h"
struct adf_accel_dev;
@@ -47,3 +48,9 @@ int adf_gen6_no_dev_config(struct adf_accel_dev *accel_dev)
return adf_no_dev_config(accel_dev);
}
EXPORT_SYMBOL_GPL(adf_gen6_no_dev_config);
+
+void adf_gen6_init_vf_mig_ops(struct qat_migdev_ops *vfmig_ops)
+{
+ adf_gen4_init_vf_mig_ops(vfmig_ops);
+}
+EXPORT_SYMBOL_GPL(adf_gen6_init_vf_mig_ops);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.h b/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.h
index bc8e71e984fc..fc6fad029a70 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.h
@@ -4,6 +4,7 @@
#define ADF_GEN6_SHARED_H_
struct adf_hw_csr_ops;
+struct qat_migdev_ops;
struct adf_accel_dev;
struct adf_pfvf_ops;
@@ -12,4 +13,5 @@ void adf_gen6_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops);
int adf_gen6_cfg_dev_init(struct adf_accel_dev *accel_dev);
int adf_gen6_comp_dev_config(struct adf_accel_dev *accel_dev);
int adf_gen6_no_dev_config(struct adf_accel_dev *accel_dev);
+void adf_gen6_init_vf_mig_ops(struct qat_migdev_ops *vfmig_ops);
#endif/* ADF_GEN6_SHARED_H_ */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen6_tl.c b/drivers/crypto/intel/qat/qat_common/adf_gen6_tl.c
new file mode 100644
index 000000000000..faa60b04c406
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen6_tl.c
@@ -0,0 +1,258 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2025 Intel Corporation. */
+#include <linux/export.h>
+
+#include "adf_gen6_tl.h"
+#include "adf_telemetry.h"
+#include "adf_tl_debugfs.h"
+#include "icp_qat_fw_init_admin.h"
+
+#define ADF_GEN6_TL_DEV_REG_OFF(reg) ADF_TL_DEV_REG_OFF(reg, gen6)
+
+#define ADF_GEN6_TL_RP_REG_OFF(reg) ADF_TL_RP_REG_OFF(reg, gen6)
+
+#define ADF_GEN6_TL_SL_UTIL_COUNTER(_name) \
+ ADF_TL_COUNTER("util_" #_name, ADF_TL_SIMPLE_COUNT, \
+ ADF_TL_SLICE_REG_OFF(_name, reg_tm_slice_util, gen6))
+
+#define ADF_GEN6_TL_SL_EXEC_COUNTER(_name) \
+ ADF_TL_COUNTER("exec_" #_name, ADF_TL_SIMPLE_COUNT, \
+ ADF_TL_SLICE_REG_OFF(_name, reg_tm_slice_exec_cnt, gen6))
+
+#define SLICE_IDX(sl) offsetof(struct icp_qat_fw_init_admin_slice_cnt, sl##_cnt)
+
+#define ADF_GEN6_TL_CMDQ_WAIT_COUNTER(_name) \
+ ADF_TL_COUNTER("cmdq_wait_" #_name, ADF_TL_SIMPLE_COUNT, \
+ ADF_TL_CMDQ_REG_OFF(_name, reg_tm_cmdq_wait_cnt, gen6))
+#define ADF_GEN6_TL_CMDQ_EXEC_COUNTER(_name) \
+ ADF_TL_COUNTER("cmdq_exec_" #_name, ADF_TL_SIMPLE_COUNT, \
+ ADF_TL_CMDQ_REG_OFF(_name, reg_tm_cmdq_exec_cnt, gen6))
+#define ADF_GEN6_TL_CMDQ_DRAIN_COUNTER(_name) \
+ ADF_TL_COUNTER("cmdq_drain_" #_name, ADF_TL_SIMPLE_COUNT, \
+ ADF_TL_CMDQ_REG_OFF(_name, reg_tm_cmdq_drain_cnt, \
+ gen6))
+
+#define CPR_QUEUE_COUNT 5
+#define DCPR_QUEUE_COUNT 3
+#define PKE_QUEUE_COUNT 1
+#define WAT_QUEUE_COUNT 7
+#define WCP_QUEUE_COUNT 7
+#define USC_QUEUE_COUNT 3
+#define ATH_QUEUE_COUNT 2
+
+/* Device level counters. */
+static const struct adf_tl_dbg_counter dev_counters[] = {
+ /* PCIe partial transactions. */
+ ADF_TL_COUNTER(PCI_TRANS_CNT_NAME, ADF_TL_SIMPLE_COUNT,
+ ADF_GEN6_TL_DEV_REG_OFF(reg_tl_prt_trans_cnt)),
+ /* Max read latency[ns]. */
+ ADF_TL_COUNTER(MAX_RD_LAT_NAME, ADF_TL_COUNTER_NS,
+ ADF_GEN6_TL_DEV_REG_OFF(reg_tl_rd_lat_max)),
+ /* Read latency average[ns]. */
+ ADF_TL_COUNTER_LATENCY(RD_LAT_ACC_NAME, ADF_TL_COUNTER_NS_AVG,
+ ADF_GEN6_TL_DEV_REG_OFF(reg_tl_rd_lat_acc),
+ ADF_GEN6_TL_DEV_REG_OFF(reg_tl_rd_cmpl_cnt)),
+ /* Max "get to put" latency[ns]. */
+ ADF_TL_COUNTER(MAX_LAT_NAME, ADF_TL_COUNTER_NS,
+ ADF_GEN6_TL_DEV_REG_OFF(reg_tl_gp_lat_max)),
+ /* "Get to put" latency average[ns]. */
+ ADF_TL_COUNTER_LATENCY(LAT_ACC_NAME, ADF_TL_COUNTER_NS_AVG,
+ ADF_GEN6_TL_DEV_REG_OFF(reg_tl_gp_lat_acc),
+ ADF_GEN6_TL_DEV_REG_OFF(reg_tl_ae_put_cnt)),
+ /* PCIe write bandwidth[Mbps]. */
+ ADF_TL_COUNTER(BW_IN_NAME, ADF_TL_COUNTER_MBPS,
+ ADF_GEN6_TL_DEV_REG_OFF(reg_tl_bw_in)),
+ /* PCIe read bandwidth[Mbps]. */
+ ADF_TL_COUNTER(BW_OUT_NAME, ADF_TL_COUNTER_MBPS,
+ ADF_GEN6_TL_DEV_REG_OFF(reg_tl_bw_out)),
+ /* Page request latency average[ns]. */
+ ADF_TL_COUNTER_LATENCY(PAGE_REQ_LAT_NAME, ADF_TL_COUNTER_NS_AVG,
+ ADF_GEN6_TL_DEV_REG_OFF(reg_tl_at_page_req_lat_acc),
+ ADF_GEN6_TL_DEV_REG_OFF(reg_tl_at_page_req_cnt)),
+ /* Page translation latency average[ns]. */
+ ADF_TL_COUNTER_LATENCY(AT_TRANS_LAT_NAME, ADF_TL_COUNTER_NS_AVG,
+ ADF_GEN6_TL_DEV_REG_OFF(reg_tl_at_trans_lat_acc),
+ ADF_GEN6_TL_DEV_REG_OFF(reg_tl_at_trans_lat_cnt)),
+ /* Maximum uTLB used. */
+ ADF_TL_COUNTER(AT_MAX_UTLB_USED_NAME, ADF_TL_SIMPLE_COUNT,
+ ADF_GEN6_TL_DEV_REG_OFF(reg_tl_at_max_utlb_used)),
+ /* Ring Empty average[ns] across all rings */
+ ADF_TL_COUNTER_LATENCY(RE_ACC_NAME, ADF_TL_COUNTER_NS_AVG,
+ ADF_GEN6_TL_DEV_REG_OFF(reg_tl_re_acc),
+ ADF_GEN6_TL_DEV_REG_OFF(reg_tl_re_cnt)),
+};
+
+/* Accelerator utilization counters */
+static const struct adf_tl_dbg_counter sl_util_counters[ADF_TL_SL_CNT_COUNT] = {
+ /* Compression accelerator utilization. */
+ [SLICE_IDX(cpr)] = ADF_GEN6_TL_SL_UTIL_COUNTER(cnv),
+ /* Decompression accelerator utilization. */
+ [SLICE_IDX(dcpr)] = ADF_GEN6_TL_SL_UTIL_COUNTER(dcprz),
+ /* PKE accelerator utilization. */
+ [SLICE_IDX(pke)] = ADF_GEN6_TL_SL_UTIL_COUNTER(pke),
+ /* Wireless Authentication accelerator utilization. */
+ [SLICE_IDX(wat)] = ADF_GEN6_TL_SL_UTIL_COUNTER(wat),
+ /* Wireless Cipher accelerator utilization. */
+ [SLICE_IDX(wcp)] = ADF_GEN6_TL_SL_UTIL_COUNTER(wcp),
+ /* UCS accelerator utilization. */
+ [SLICE_IDX(ucs)] = ADF_GEN6_TL_SL_UTIL_COUNTER(ucs),
+ /* Authentication accelerator utilization. */
+ [SLICE_IDX(ath)] = ADF_GEN6_TL_SL_UTIL_COUNTER(ath),
+};
+
+/* Accelerator execution counters */
+static const struct adf_tl_dbg_counter sl_exec_counters[ADF_TL_SL_CNT_COUNT] = {
+ /* Compression accelerator execution count. */
+ [SLICE_IDX(cpr)] = ADF_GEN6_TL_SL_EXEC_COUNTER(cnv),
+ /* Decompression accelerator execution count. */
+ [SLICE_IDX(dcpr)] = ADF_GEN6_TL_SL_EXEC_COUNTER(dcprz),
+ /* PKE execution count. */
+ [SLICE_IDX(pke)] = ADF_GEN6_TL_SL_EXEC_COUNTER(pke),
+ /* Wireless Authentication accelerator execution count. */
+ [SLICE_IDX(wat)] = ADF_GEN6_TL_SL_EXEC_COUNTER(wat),
+ /* Wireless Cipher accelerator execution count. */
+ [SLICE_IDX(wcp)] = ADF_GEN6_TL_SL_EXEC_COUNTER(wcp),
+ /* UCS accelerator execution count. */
+ [SLICE_IDX(ucs)] = ADF_GEN6_TL_SL_EXEC_COUNTER(ucs),
+ /* Authentication accelerator execution count. */
+ [SLICE_IDX(ath)] = ADF_GEN6_TL_SL_EXEC_COUNTER(ath),
+};
+
+static const struct adf_tl_dbg_counter cnv_cmdq_counters[] = {
+ ADF_GEN6_TL_CMDQ_WAIT_COUNTER(cnv),
+ ADF_GEN6_TL_CMDQ_EXEC_COUNTER(cnv),
+ ADF_GEN6_TL_CMDQ_DRAIN_COUNTER(cnv)
+};
+
+#define NUM_CMDQ_COUNTERS ARRAY_SIZE(cnv_cmdq_counters)
+
+static const struct adf_tl_dbg_counter dcprz_cmdq_counters[] = {
+ ADF_GEN6_TL_CMDQ_WAIT_COUNTER(dcprz),
+ ADF_GEN6_TL_CMDQ_EXEC_COUNTER(dcprz),
+ ADF_GEN6_TL_CMDQ_DRAIN_COUNTER(dcprz)
+};
+
+static_assert(ARRAY_SIZE(dcprz_cmdq_counters) == NUM_CMDQ_COUNTERS);
+
+static const struct adf_tl_dbg_counter pke_cmdq_counters[] = {
+ ADF_GEN6_TL_CMDQ_WAIT_COUNTER(pke),
+ ADF_GEN6_TL_CMDQ_EXEC_COUNTER(pke),
+ ADF_GEN6_TL_CMDQ_DRAIN_COUNTER(pke)
+};
+
+static_assert(ARRAY_SIZE(pke_cmdq_counters) == NUM_CMDQ_COUNTERS);
+
+static const struct adf_tl_dbg_counter wat_cmdq_counters[] = {
+ ADF_GEN6_TL_CMDQ_WAIT_COUNTER(wat),
+ ADF_GEN6_TL_CMDQ_EXEC_COUNTER(wat),
+ ADF_GEN6_TL_CMDQ_DRAIN_COUNTER(wat)
+};
+
+static_assert(ARRAY_SIZE(wat_cmdq_counters) == NUM_CMDQ_COUNTERS);
+
+static const struct adf_tl_dbg_counter wcp_cmdq_counters[] = {
+ ADF_GEN6_TL_CMDQ_WAIT_COUNTER(wcp),
+ ADF_GEN6_TL_CMDQ_EXEC_COUNTER(wcp),
+ ADF_GEN6_TL_CMDQ_DRAIN_COUNTER(wcp)
+};
+
+static_assert(ARRAY_SIZE(wcp_cmdq_counters) == NUM_CMDQ_COUNTERS);
+
+static const struct adf_tl_dbg_counter ucs_cmdq_counters[] = {
+ ADF_GEN6_TL_CMDQ_WAIT_COUNTER(ucs),
+ ADF_GEN6_TL_CMDQ_EXEC_COUNTER(ucs),
+ ADF_GEN6_TL_CMDQ_DRAIN_COUNTER(ucs)
+};
+
+static_assert(ARRAY_SIZE(ucs_cmdq_counters) == NUM_CMDQ_COUNTERS);
+
+static const struct adf_tl_dbg_counter ath_cmdq_counters[] = {
+ ADF_GEN6_TL_CMDQ_WAIT_COUNTER(ath),
+ ADF_GEN6_TL_CMDQ_EXEC_COUNTER(ath),
+ ADF_GEN6_TL_CMDQ_DRAIN_COUNTER(ath)
+};
+
+static_assert(ARRAY_SIZE(ath_cmdq_counters) == NUM_CMDQ_COUNTERS);
+
+/* CMDQ drain counters. */
+static const struct adf_tl_dbg_counter *cmdq_counters[ADF_TL_SL_CNT_COUNT] = {
+ /* Compression accelerator execution count. */
+ [SLICE_IDX(cpr)] = cnv_cmdq_counters,
+ /* Decompression accelerator execution count. */
+ [SLICE_IDX(dcpr)] = dcprz_cmdq_counters,
+ /* PKE execution count. */
+ [SLICE_IDX(pke)] = pke_cmdq_counters,
+ /* Wireless Authentication accelerator execution count. */
+ [SLICE_IDX(wat)] = wat_cmdq_counters,
+ /* Wireless Cipher accelerator execution count. */
+ [SLICE_IDX(wcp)] = wcp_cmdq_counters,
+ /* UCS accelerator execution count. */
+ [SLICE_IDX(ucs)] = ucs_cmdq_counters,
+ /* Authentication accelerator execution count. */
+ [SLICE_IDX(ath)] = ath_cmdq_counters,
+};
+
+/* Ring pair counters. */
+static const struct adf_tl_dbg_counter rp_counters[] = {
+ /* PCIe partial transactions. */
+ ADF_TL_COUNTER(PCI_TRANS_CNT_NAME, ADF_TL_SIMPLE_COUNT,
+ ADF_GEN6_TL_RP_REG_OFF(reg_tl_prt_trans_cnt)),
+ /* "Get to put" latency average[ns]. */
+ ADF_TL_COUNTER_LATENCY(LAT_ACC_NAME, ADF_TL_COUNTER_NS_AVG,
+ ADF_GEN6_TL_RP_REG_OFF(reg_tl_gp_lat_acc),
+ ADF_GEN6_TL_RP_REG_OFF(reg_tl_ae_put_cnt)),
+ /* PCIe write bandwidth[Mbps]. */
+ ADF_TL_COUNTER(BW_IN_NAME, ADF_TL_COUNTER_MBPS,
+ ADF_GEN6_TL_RP_REG_OFF(reg_tl_bw_in)),
+ /* PCIe read bandwidth[Mbps]. */
+ ADF_TL_COUNTER(BW_OUT_NAME, ADF_TL_COUNTER_MBPS,
+ ADF_GEN6_TL_RP_REG_OFF(reg_tl_bw_out)),
+ /* Message descriptor DevTLB hit rate. */
+ ADF_TL_COUNTER(AT_GLOB_DTLB_HIT_NAME, ADF_TL_SIMPLE_COUNT,
+ ADF_GEN6_TL_RP_REG_OFF(reg_tl_at_glob_devtlb_hit)),
+ /* Message descriptor DevTLB miss rate. */
+ ADF_TL_COUNTER(AT_GLOB_DTLB_MISS_NAME, ADF_TL_SIMPLE_COUNT,
+ ADF_GEN6_TL_RP_REG_OFF(reg_tl_at_glob_devtlb_miss)),
+ /* Payload DevTLB hit rate. */
+ ADF_TL_COUNTER(AT_PAYLD_DTLB_HIT_NAME, ADF_TL_SIMPLE_COUNT,
+ ADF_GEN6_TL_RP_REG_OFF(reg_tl_at_payld_devtlb_hit)),
+ /* Payload DevTLB miss rate. */
+ ADF_TL_COUNTER(AT_PAYLD_DTLB_MISS_NAME, ADF_TL_SIMPLE_COUNT,
+ ADF_GEN6_TL_RP_REG_OFF(reg_tl_at_payld_devtlb_miss)),
+ /* Ring Empty average[ns]. */
+ ADF_TL_COUNTER_LATENCY(RE_ACC_NAME, ADF_TL_COUNTER_NS_AVG,
+ ADF_GEN6_TL_RP_REG_OFF(reg_tl_re_acc),
+ ADF_GEN6_TL_RP_REG_OFF(reg_tl_re_cnt)),
+};
+
+void adf_gen6_init_tl_data(struct adf_tl_hw_data *tl_data)
+{
+ tl_data->layout_sz = ADF_GEN6_TL_LAYOUT_SZ;
+ tl_data->slice_reg_sz = ADF_GEN6_TL_SLICE_REG_SZ;
+ tl_data->cmdq_reg_sz = ADF_GEN6_TL_CMDQ_REG_SZ;
+ tl_data->rp_reg_sz = ADF_GEN6_TL_RP_REG_SZ;
+ tl_data->num_hbuff = ADF_GEN6_TL_NUM_HIST_BUFFS;
+ tl_data->max_rp = ADF_GEN6_TL_MAX_RP_NUM;
+ tl_data->msg_cnt_off = ADF_GEN6_TL_MSG_CNT_OFF;
+ tl_data->cpp_ns_per_cycle = ADF_GEN6_CPP_NS_PER_CYCLE;
+ tl_data->bw_units_to_bytes = ADF_GEN6_TL_BW_HW_UNITS_TO_BYTES;
+
+ tl_data->dev_counters = dev_counters;
+ tl_data->num_dev_counters = ARRAY_SIZE(dev_counters);
+ tl_data->sl_util_counters = sl_util_counters;
+ tl_data->sl_exec_counters = sl_exec_counters;
+ tl_data->cmdq_counters = cmdq_counters;
+ tl_data->num_cmdq_counters = NUM_CMDQ_COUNTERS;
+ tl_data->rp_counters = rp_counters;
+ tl_data->num_rp_counters = ARRAY_SIZE(rp_counters);
+ tl_data->max_sl_cnt = ADF_GEN6_TL_MAX_SLICES_PER_TYPE;
+
+ tl_data->multiplier.cpr_cnt = CPR_QUEUE_COUNT;
+ tl_data->multiplier.dcpr_cnt = DCPR_QUEUE_COUNT;
+ tl_data->multiplier.pke_cnt = PKE_QUEUE_COUNT;
+ tl_data->multiplier.wat_cnt = WAT_QUEUE_COUNT;
+ tl_data->multiplier.wcp_cnt = WCP_QUEUE_COUNT;
+ tl_data->multiplier.ucs_cnt = USC_QUEUE_COUNT;
+ tl_data->multiplier.ath_cnt = ATH_QUEUE_COUNT;
+}
+EXPORT_SYMBOL_GPL(adf_gen6_init_tl_data);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen6_tl.h b/drivers/crypto/intel/qat/qat_common/adf_gen6_tl.h
new file mode 100644
index 000000000000..49db660b8eb9
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen6_tl.h
@@ -0,0 +1,198 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2025 Intel Corporation. */
+#ifndef ADF_GEN6_TL_H
+#define ADF_GEN6_TL_H
+
+#include <linux/types.h>
+
+struct adf_tl_hw_data;
+
+/* Computation constants. */
+#define ADF_GEN6_CPP_NS_PER_CYCLE 2
+#define ADF_GEN6_TL_BW_HW_UNITS_TO_BYTES 64
+
+/* Maximum aggregation time. Value is in milliseconds. */
+#define ADF_GEN6_TL_MAX_AGGR_TIME_MS 4000
+/* Number of buffers to store historic values. */
+#define ADF_GEN6_TL_NUM_HIST_BUFFS \
+ (ADF_GEN6_TL_MAX_AGGR_TIME_MS / ADF_TL_DATA_WR_INTERVAL_MS)
+
+/* Max number of HW resources of one type */
+#define ADF_GEN6_TL_MAX_SLICES_PER_TYPE 32
+#define MAX_ATH_SL_COUNT 7
+#define MAX_CNV_SL_COUNT 2
+#define MAX_DCPRZ_SL_COUNT 2
+#define MAX_PKE_SL_COUNT 32
+#define MAX_UCS_SL_COUNT 4
+#define MAX_WAT_SL_COUNT 5
+#define MAX_WCP_SL_COUNT 5
+
+#define MAX_ATH_CMDQ_COUNT 14
+#define MAX_CNV_CMDQ_COUNT 6
+#define MAX_DCPRZ_CMDQ_COUNT 6
+#define MAX_PKE_CMDQ_COUNT 32
+#define MAX_UCS_CMDQ_COUNT 12
+#define MAX_WAT_CMDQ_COUNT 35
+#define MAX_WCP_CMDQ_COUNT 35
+
+/* Max number of simultaneously monitored ring pairs. */
+#define ADF_GEN6_TL_MAX_RP_NUM 4
+
+/**
+ * struct adf_gen6_tl_slice_data_regs - HW slice data as populated by FW.
+ * @reg_tm_slice_exec_cnt: Slice execution count.
+ * @reg_tm_slice_util: Slice utilization.
+ */
+struct adf_gen6_tl_slice_data_regs {
+ __u32 reg_tm_slice_exec_cnt;
+ __u32 reg_tm_slice_util;
+};
+
+#define ADF_GEN6_TL_SLICE_REG_SZ sizeof(struct adf_gen6_tl_slice_data_regs)
+
+/**
+ * struct adf_gen6_tl_cmdq_data_regs - HW CMDQ data as populated by FW.
+ * @reg_tm_cmdq_wait_cnt: CMDQ wait count.
+ * @reg_tm_cmdq_exec_cnt: CMDQ execution count.
+ * @reg_tm_cmdq_drain_cnt: CMDQ drain count.
+ */
+struct adf_gen6_tl_cmdq_data_regs {
+ __u32 reg_tm_cmdq_wait_cnt;
+ __u32 reg_tm_cmdq_exec_cnt;
+ __u32 reg_tm_cmdq_drain_cnt;
+ __u32 reserved;
+};
+
+#define ADF_GEN6_TL_CMDQ_REG_SZ sizeof(struct adf_gen6_tl_cmdq_data_regs)
+
+/**
+ * struct adf_gen6_tl_device_data_regs - This structure stores device telemetry
+ * counter values as are being populated periodically by device.
+ * @reg_tl_rd_lat_acc: read latency accumulator
+ * @reg_tl_gp_lat_acc: "get to put" latency accumulator
+ * @reg_tl_at_page_req_lat_acc: AT/DevTLB page request latency accumulator
+ * @reg_tl_at_trans_lat_acc: DevTLB transaction latency accumulator
+ * @reg_tl_re_acc: accumulated ring empty time
+ * @reg_tl_prt_trans_cnt: PCIe partial transactions
+ * @reg_tl_rd_lat_max: maximum logged read latency
+ * @reg_tl_rd_cmpl_cnt: read requests completed count
+ * @reg_tl_gp_lat_max: maximum logged get to put latency
+ * @reg_tl_ae_put_cnt: Accelerator Engine put counts across all rings
+ * @reg_tl_bw_in: PCIe write bandwidth
+ * @reg_tl_bw_out: PCIe read bandwidth
+ * @reg_tl_at_page_req_cnt: DevTLB page requests count
+ * @reg_tl_at_trans_lat_cnt: DevTLB transaction latency samples count
+ * @reg_tl_at_max_utlb_used: maximum uTLB used
+ * @reg_tl_re_cnt: ring empty time samples count
+ * @reserved: reserved
+ * @ath_slices: array of Authentication slices utilization registers
+ * @cnv_slices: array of Compression slices utilization registers
+ * @dcprz_slices: array of Decompression slices utilization registers
+ * @pke_slices: array of PKE slices utilization registers
+ * @ucs_slices: array of UCS slices utilization registers
+ * @wat_slices: array of Wireless Authentication slices utilization registers
+ * @wcp_slices: array of Wireless Cipher slices utilization registers
+ * @ath_cmdq: array of Authentication cmdq telemetry registers
+ * @cnv_cmdq: array of Compression cmdq telemetry registers
+ * @dcprz_cmdq: array of Decomopression cmdq telemetry registers
+ * @pke_cmdq: array of PKE cmdq telemetry registers
+ * @ucs_cmdq: array of UCS cmdq telemetry registers
+ * @wat_cmdq: array of Wireless Authentication cmdq telemetry registers
+ * @wcp_cmdq: array of Wireless Cipher cmdq telemetry registers
+ */
+struct adf_gen6_tl_device_data_regs {
+ __u64 reg_tl_rd_lat_acc;
+ __u64 reg_tl_gp_lat_acc;
+ __u64 reg_tl_at_page_req_lat_acc;
+ __u64 reg_tl_at_trans_lat_acc;
+ __u64 reg_tl_re_acc;
+ __u32 reg_tl_prt_trans_cnt;
+ __u32 reg_tl_rd_lat_max;
+ __u32 reg_tl_rd_cmpl_cnt;
+ __u32 reg_tl_gp_lat_max;
+ __u32 reg_tl_ae_put_cnt;
+ __u32 reg_tl_bw_in;
+ __u32 reg_tl_bw_out;
+ __u32 reg_tl_at_page_req_cnt;
+ __u32 reg_tl_at_trans_lat_cnt;
+ __u32 reg_tl_at_max_utlb_used;
+ __u32 reg_tl_re_cnt;
+ __u32 reserved;
+ struct adf_gen6_tl_slice_data_regs ath_slices[MAX_ATH_SL_COUNT];
+ struct adf_gen6_tl_slice_data_regs cnv_slices[MAX_CNV_SL_COUNT];
+ struct adf_gen6_tl_slice_data_regs dcprz_slices[MAX_DCPRZ_SL_COUNT];
+ struct adf_gen6_tl_slice_data_regs pke_slices[MAX_PKE_SL_COUNT];
+ struct adf_gen6_tl_slice_data_regs ucs_slices[MAX_UCS_SL_COUNT];
+ struct adf_gen6_tl_slice_data_regs wat_slices[MAX_WAT_SL_COUNT];
+ struct adf_gen6_tl_slice_data_regs wcp_slices[MAX_WCP_SL_COUNT];
+ struct adf_gen6_tl_cmdq_data_regs ath_cmdq[MAX_ATH_CMDQ_COUNT];
+ struct adf_gen6_tl_cmdq_data_regs cnv_cmdq[MAX_CNV_CMDQ_COUNT];
+ struct adf_gen6_tl_cmdq_data_regs dcprz_cmdq[MAX_DCPRZ_CMDQ_COUNT];
+ struct adf_gen6_tl_cmdq_data_regs pke_cmdq[MAX_PKE_CMDQ_COUNT];
+ struct adf_gen6_tl_cmdq_data_regs ucs_cmdq[MAX_UCS_CMDQ_COUNT];
+ struct adf_gen6_tl_cmdq_data_regs wat_cmdq[MAX_WAT_CMDQ_COUNT];
+ struct adf_gen6_tl_cmdq_data_regs wcp_cmdq[MAX_WCP_CMDQ_COUNT];
+};
+
+/**
+ * struct adf_gen6_tl_ring_pair_data_regs - This structure stores ring pair
+ * telemetry counter values as they are being populated periodically by device.
+ * @reg_tl_gp_lat_acc: get-put latency accumulator
+ * @reg_tl_re_acc: accumulated ring empty time
+ * @reg_tl_pci_trans_cnt: PCIe partial transactions
+ * @reg_tl_ae_put_cnt: Accelerator Engine put counts across all rings
+ * @reg_tl_bw_in: PCIe write bandwidth
+ * @reg_tl_bw_out: PCIe read bandwidth
+ * @reg_tl_at_glob_devtlb_hit: Message descriptor DevTLB hit rate
+ * @reg_tl_at_glob_devtlb_miss: Message descriptor DevTLB miss rate
+ * @reg_tl_at_payld_devtlb_hit: Payload DevTLB hit rate
+ * @reg_tl_at_payld_devtlb_miss: Payload DevTLB miss rate
+ * @reg_tl_re_cnt: ring empty time samples count
+ * @reserved1: reserved
+ */
+struct adf_gen6_tl_ring_pair_data_regs {
+ __u64 reg_tl_gp_lat_acc;
+ __u64 reg_tl_re_acc;
+ __u32 reg_tl_prt_trans_cnt;
+ __u32 reg_tl_ae_put_cnt;
+ __u32 reg_tl_bw_in;
+ __u32 reg_tl_bw_out;
+ __u32 reg_tl_at_glob_devtlb_hit;
+ __u32 reg_tl_at_glob_devtlb_miss;
+ __u32 reg_tl_at_payld_devtlb_hit;
+ __u32 reg_tl_at_payld_devtlb_miss;
+ __u32 reg_tl_re_cnt;
+ __u32 reserved1;
+};
+
+#define ADF_GEN6_TL_RP_REG_SZ sizeof(struct adf_gen6_tl_ring_pair_data_regs)
+
+/**
+ * struct adf_gen6_tl_layout - This structure represents the entire telemetry
+ * counters data: Device + 4 Ring Pairs as they are being populated periodically
+ * by device.
+ * @tl_device_data_regs: structure of device telemetry registers
+ * @tl_ring_pairs_data_regs: array of ring pairs telemetry registers
+ * @reg_tl_msg_cnt: telemetry message counter
+ * @reserved: reserved
+ */
+struct adf_gen6_tl_layout {
+ struct adf_gen6_tl_device_data_regs tl_device_data_regs;
+ struct adf_gen6_tl_ring_pair_data_regs
+ tl_ring_pairs_data_regs[ADF_GEN6_TL_MAX_RP_NUM];
+ __u32 reg_tl_msg_cnt;
+ __u32 reserved;
+};
+
+#define ADF_GEN6_TL_LAYOUT_SZ sizeof(struct adf_gen6_tl_layout)
+#define ADF_GEN6_TL_MSG_CNT_OFF \
+ offsetof(struct adf_gen6_tl_layout, reg_tl_msg_cnt)
+
+#ifdef CONFIG_DEBUG_FS
+void adf_gen6_init_tl_data(struct adf_tl_hw_data *tl_data);
+#else
+static inline void adf_gen6_init_tl_data(struct adf_tl_hw_data *tl_data)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+#endif /* ADF_GEN6_TL_H */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_init.c b/drivers/crypto/intel/qat/qat_common/adf_init.c
index f189cce7d153..46491048e0bb 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_init.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_init.c
@@ -404,6 +404,7 @@ static void adf_dev_shutdown(struct adf_accel_dev *accel_dev)
hw_data->exit_admin_comms(accel_dev);
adf_cleanup_etr_data(accel_dev);
+ adf_misc_wq_flush();
adf_dev_restore(accel_dev);
}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_isr.c b/drivers/crypto/intel/qat/qat_common/adf_isr.c
index cae1aee5479a..4639d7fd93e6 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_isr.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_isr.c
@@ -384,7 +384,8 @@ EXPORT_SYMBOL_GPL(adf_isr_resource_alloc);
*/
int __init adf_init_misc_wq(void)
{
- adf_misc_wq = alloc_workqueue("qat_misc_wq", WQ_MEM_RECLAIM, 0);
+ adf_misc_wq = alloc_workqueue("qat_misc_wq",
+ WQ_MEM_RECLAIM | WQ_PERCPU, 0);
return !adf_misc_wq ? -ENOMEM : 0;
}
@@ -407,3 +408,8 @@ bool adf_misc_wq_queue_delayed_work(struct delayed_work *work,
{
return queue_delayed_work(adf_misc_wq, work, delay);
}
+
+void adf_misc_wq_flush(void)
+{
+ flush_workqueue(adf_misc_wq);
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs_utils.c b/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs_utils.c
new file mode 100644
index 000000000000..4ccc94ed9493
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs_utils.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2025 Intel Corporation */
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/sprintf.h>
+#include <linux/string_helpers.h>
+
+#include "adf_pm_dbgfs_utils.h"
+
+#define PM_INFO_MAX_KEY_LEN 21
+
+static int pm_scnprint_table(char *buff, const struct pm_status_row *table,
+ u32 *pm_info_regs, size_t buff_size, int table_len,
+ bool lowercase)
+{
+ char key[PM_INFO_MAX_KEY_LEN];
+ int wr = 0;
+ int i;
+
+ for (i = 0; i < table_len; i++) {
+ if (lowercase)
+ string_lower(key, table[i].key);
+ else
+ string_upper(key, table[i].key);
+
+ wr += scnprintf(&buff[wr], buff_size - wr, "%s: %#x\n", key,
+ field_get(table[i].field_mask,
+ pm_info_regs[table[i].reg_offset]));
+ }
+
+ return wr;
+}
+
+int adf_pm_scnprint_table_upper_keys(char *buff, const struct pm_status_row *table,
+ u32 *pm_info_regs, size_t buff_size, int table_len)
+{
+ return pm_scnprint_table(buff, table, pm_info_regs, buff_size,
+ table_len, false);
+}
+
+int adf_pm_scnprint_table_lower_keys(char *buff, const struct pm_status_row *table,
+ u32 *pm_info_regs, size_t buff_size, int table_len)
+{
+ return pm_scnprint_table(buff, table, pm_info_regs, buff_size,
+ table_len, true);
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs_utils.h b/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs_utils.h
new file mode 100644
index 000000000000..854f058b35ed
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs_utils.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2025 Intel Corporation */
+#ifndef ADF_PM_DBGFS_UTILS_H_
+#define ADF_PM_DBGFS_UTILS_H_
+
+#include <linux/stddef.h>
+#include <linux/stringify.h>
+#include <linux/types.h>
+#include "icp_qat_fw_init_admin.h"
+
+#define PM_INFO_MEMBER_OFF(member) \
+ (offsetof(struct icp_qat_fw_init_admin_pm_info, member) / sizeof(u32))
+
+#define PM_INFO_REGSET_ENTRY_MASK(_reg_, _field_, _mask_) \
+{ \
+ .reg_offset = PM_INFO_MEMBER_OFF(_reg_), \
+ .key = __stringify(_field_), \
+ .field_mask = _mask_, \
+}
+
+#define PM_INFO_REGSET_ENTRY32(_reg_, _field_) \
+ PM_INFO_REGSET_ENTRY_MASK(_reg_, _field_, GENMASK(31, 0))
+
+struct pm_status_row {
+ int reg_offset;
+ u32 field_mask;
+ const char *key;
+};
+
+int adf_pm_scnprint_table_upper_keys(char *buff, const struct pm_status_row *table,
+ u32 *pm_info_regs, size_t buff_size, int table_len);
+
+int adf_pm_scnprint_table_lower_keys(char *buff, const struct pm_status_row *table,
+ u32 *pm_info_regs, size_t buff_size, int table_len);
+
+#endif /* ADF_PM_DBGFS_UTILS_H_ */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_rl.c b/drivers/crypto/intel/qat/qat_common/adf_rl.c
index e782c23fc1bf..c6a54e465931 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_rl.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_rl.c
@@ -13,6 +13,7 @@
#include <linux/units.h>
#include "adf_accel_devices.h"
+#include "adf_cfg_services.h"
#include "adf_common_drv.h"
#include "adf_rl_admin.h"
#include "adf_rl.h"
@@ -55,7 +56,7 @@ static int validate_user_input(struct adf_accel_dev *accel_dev,
}
}
- if (sla_in->srv >= ADF_SVC_NONE) {
+ if (sla_in->srv >= SVC_BASE_COUNT) {
dev_notice(&GET_DEV(accel_dev),
"Wrong service type\n");
return -EINVAL;
@@ -168,20 +169,6 @@ static struct rl_sla *find_parent(struct adf_rl *rl_data,
return NULL;
}
-static enum adf_cfg_service_type srv_to_cfg_svc_type(enum adf_base_services rl_srv)
-{
- switch (rl_srv) {
- case ADF_SVC_ASYM:
- return ASYM;
- case ADF_SVC_SYM:
- return SYM;
- case ADF_SVC_DC:
- return COMP;
- default:
- return UNUSED;
- }
-}
-
/**
* adf_rl_get_sla_arr_of_type() - Returns a pointer to SLA type specific array
* @rl_data: pointer to ratelimiting data
@@ -209,22 +196,6 @@ u32 adf_rl_get_sla_arr_of_type(struct adf_rl *rl_data, enum rl_node_type type,
}
}
-static bool is_service_enabled(struct adf_accel_dev *accel_dev,
- enum adf_base_services rl_srv)
-{
- enum adf_cfg_service_type arb_srv = srv_to_cfg_svc_type(rl_srv);
- struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
- u8 rps_per_bundle = hw_data->num_banks_per_vf;
- int i;
-
- for (i = 0; i < rps_per_bundle; i++) {
- if (GET_SRV_TYPE(accel_dev, i) == arb_srv)
- return true;
- }
-
- return false;
-}
-
/**
* prepare_rp_ids() - Creates an array of ring pair IDs from bitmask
* @accel_dev: pointer to acceleration device structure
@@ -243,7 +214,7 @@ static bool is_service_enabled(struct adf_accel_dev *accel_dev,
static int prepare_rp_ids(struct adf_accel_dev *accel_dev, struct rl_sla *sla,
const unsigned long rp_mask)
{
- enum adf_cfg_service_type arb_srv = srv_to_cfg_svc_type(sla->srv);
+ enum adf_cfg_service_type arb_srv = adf_srv_to_cfg_svc_type(sla->srv);
u16 rps_per_bundle = GET_HW_DATA(accel_dev)->num_banks_per_vf;
bool *rp_in_use = accel_dev->rate_limiting->rp_in_use;
size_t rp_cnt_max = ARRAY_SIZE(sla->ring_pairs_ids);
@@ -558,21 +529,9 @@ u32 adf_rl_calculate_slice_tokens(struct adf_accel_dev *accel_dev, u32 sla_val,
if (!sla_val)
return 0;
+ /* Handle generation specific slice count adjustment */
avail_slice_cycles = hw_data->clock_frequency;
-
- switch (svc_type) {
- case ADF_SVC_ASYM:
- avail_slice_cycles *= device_data->slices.pke_cnt;
- break;
- case ADF_SVC_SYM:
- avail_slice_cycles *= device_data->slices.cph_cnt;
- break;
- case ADF_SVC_DC:
- avail_slice_cycles *= device_data->slices.dcpr_cnt;
- break;
- default:
- break;
- }
+ avail_slice_cycles *= hw_data->get_svc_slice_cnt(accel_dev, svc_type);
do_div(avail_slice_cycles, device_data->scan_interval);
allocated_tokens = avail_slice_cycles * sla_val;
@@ -581,6 +540,17 @@ u32 adf_rl_calculate_slice_tokens(struct adf_accel_dev *accel_dev, u32 sla_val,
return allocated_tokens;
}
+static u32 adf_rl_get_num_svc_aes(struct adf_accel_dev *accel_dev,
+ enum adf_base_services svc)
+{
+ struct adf_rl_hw_data *device_data = &accel_dev->hw_device->rl_data;
+
+ if (svc >= SVC_BASE_COUNT)
+ return 0;
+
+ return device_data->svc_ae_mask[svc];
+}
+
u32 adf_rl_calculate_ae_cycles(struct adf_accel_dev *accel_dev, u32 sla_val,
enum adf_base_services svc_type)
{
@@ -592,7 +562,7 @@ u32 adf_rl_calculate_ae_cycles(struct adf_accel_dev *accel_dev, u32 sla_val,
return 0;
avail_ae_cycles = hw_data->clock_frequency;
- avail_ae_cycles *= hw_data->get_num_aes(hw_data) - 1;
+ avail_ae_cycles *= adf_rl_get_num_svc_aes(accel_dev, svc_type);
do_div(avail_ae_cycles, device_data->scan_interval);
sla_val *= device_data->max_tp[svc_type];
@@ -617,9 +587,8 @@ u32 adf_rl_calculate_pci_bw(struct adf_accel_dev *accel_dev, u32 sla_val,
sla_to_bytes *= device_data->max_tp[svc_type];
do_div(sla_to_bytes, device_data->scale_ref);
- sla_to_bytes *= (svc_type == ADF_SVC_ASYM) ? RL_TOKEN_ASYM_SIZE :
- BYTES_PER_MBIT;
- if (svc_type == ADF_SVC_DC && is_bw_out)
+ sla_to_bytes *= (svc_type == SVC_ASYM) ? RL_TOKEN_ASYM_SIZE : BYTES_PER_MBIT;
+ if (svc_type == SVC_DC && is_bw_out)
sla_to_bytes *= device_data->slices.dcpr_cnt -
device_data->dcpr_correction;
@@ -660,7 +629,7 @@ static int add_new_sla_entry(struct adf_accel_dev *accel_dev,
}
*sla_out = sla;
- if (!is_service_enabled(accel_dev, sla_in->srv)) {
+ if (!adf_is_service_enabled(accel_dev, sla_in->srv)) {
dev_notice(&GET_DEV(accel_dev),
"Provided service is not enabled\n");
ret = -EINVAL;
@@ -730,8 +699,8 @@ static int initialize_default_nodes(struct adf_accel_dev *accel_dev)
sla_in.type = RL_ROOT;
sla_in.parent_id = RL_PARENT_DEFAULT_ID;
- for (i = 0; i < ADF_SVC_NONE; i++) {
- if (!is_service_enabled(accel_dev, i))
+ for (i = 0; i < SVC_BASE_COUNT; i++) {
+ if (!adf_is_service_enabled(accel_dev, i))
continue;
sla_in.cir = device_data->scale_ref;
@@ -745,10 +714,9 @@ static int initialize_default_nodes(struct adf_accel_dev *accel_dev)
/* Init default cluster for each root */
sla_in.type = RL_CLUSTER;
- for (i = 0; i < ADF_SVC_NONE; i++) {
+ for (i = 0; i < SVC_BASE_COUNT; i++) {
if (!rl_data->root[i])
continue;
-
sla_in.cir = rl_data->root[i]->cir;
sla_in.pir = sla_in.cir;
sla_in.srv = rl_data->root[i]->srv;
@@ -987,7 +955,7 @@ int adf_rl_get_capability_remaining(struct adf_accel_dev *accel_dev,
struct rl_sla *sla = NULL;
int i;
- if (srv >= ADF_SVC_NONE)
+ if (srv >= SVC_BASE_COUNT)
return -EINVAL;
if (sla_id > RL_SLA_EMPTY_ID && !validate_sla_id(accel_dev, sla_id)) {
@@ -1086,9 +1054,9 @@ int adf_rl_init(struct adf_accel_dev *accel_dev)
int ret = 0;
/* Validate device parameters */
- if (RL_VALIDATE_NON_ZERO(rl_hw_data->max_tp[ADF_SVC_ASYM]) ||
- RL_VALIDATE_NON_ZERO(rl_hw_data->max_tp[ADF_SVC_SYM]) ||
- RL_VALIDATE_NON_ZERO(rl_hw_data->max_tp[ADF_SVC_DC]) ||
+ if (RL_VALIDATE_NON_ZERO(rl_hw_data->max_tp[SVC_ASYM]) ||
+ RL_VALIDATE_NON_ZERO(rl_hw_data->max_tp[SVC_SYM]) ||
+ RL_VALIDATE_NON_ZERO(rl_hw_data->max_tp[SVC_DC]) ||
RL_VALIDATE_NON_ZERO(rl_hw_data->scan_interval) ||
RL_VALIDATE_NON_ZERO(rl_hw_data->pcie_scale_div) ||
RL_VALIDATE_NON_ZERO(rl_hw_data->pcie_scale_mul) ||
diff --git a/drivers/crypto/intel/qat/qat_common/adf_rl.h b/drivers/crypto/intel/qat/qat_common/adf_rl.h
index bfe750ea0e83..c1f3f9a51195 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_rl.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_rl.h
@@ -7,6 +7,8 @@
#include <linux/mutex.h>
#include <linux/types.h>
+#include "adf_cfg_services.h"
+
struct adf_accel_dev;
#define RL_ROOT_MAX 4
@@ -24,13 +26,6 @@ enum rl_node_type {
RL_LEAF,
};
-enum adf_base_services {
- ADF_SVC_ASYM = 0,
- ADF_SVC_SYM,
- ADF_SVC_DC,
- ADF_SVC_NONE,
-};
-
/**
* struct adf_rl_sla_input_data - ratelimiting user input data structure
* @rp_mask: 64 bit bitmask of ring pair IDs which will be assigned to SLA.
@@ -73,6 +68,7 @@ struct rl_slice_cnt {
u8 dcpr_cnt;
u8 pke_cnt;
u8 cph_cnt;
+ u8 cpr_cnt;
};
struct adf_rl_interface_data {
@@ -94,6 +90,7 @@ struct adf_rl_hw_data {
u32 pcie_scale_div;
u32 dcpr_correction;
u32 max_tp[RL_ROOT_MAX];
+ u32 svc_ae_mask[SVC_BASE_COUNT];
struct rl_slice_cnt slices;
};
diff --git a/drivers/crypto/intel/qat/qat_common/adf_rl_admin.c b/drivers/crypto/intel/qat/qat_common/adf_rl_admin.c
index 698a14f4ce66..4a3e0591fdba 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_rl_admin.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_rl_admin.c
@@ -63,6 +63,7 @@ int adf_rl_send_admin_init_msg(struct adf_accel_dev *accel_dev,
slices_int->pke_cnt = slices_resp.pke_cnt;
/* For symmetric crypto, slice tokens are relative to the UCS slice */
slices_int->cph_cnt = slices_resp.ucs_cnt;
+ slices_int->cpr_cnt = slices_resp.cpr_cnt;
return 0;
}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_sriov.c b/drivers/crypto/intel/qat/qat_common/adf_sriov.c
index c75d0b6cb0ad..bb904ba4bf84 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_sriov.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_sriov.c
@@ -155,7 +155,6 @@ static int adf_do_enable_sriov(struct adf_accel_dev *accel_dev)
if (!device_iommu_mapped(&GET_DEV(accel_dev))) {
dev_warn(&GET_DEV(accel_dev),
"IOMMU should be enabled for SR-IOV to work correctly\n");
- return -EINVAL;
}
if (adf_dev_started(accel_dev)) {
@@ -300,7 +299,8 @@ EXPORT_SYMBOL_GPL(adf_sriov_configure);
int __init adf_init_pf_wq(void)
{
/* Workqueue for PF2VF responses */
- pf2vf_resp_wq = alloc_workqueue("qat_pf2vf_resp_wq", WQ_MEM_RECLAIM, 0);
+ pf2vf_resp_wq = alloc_workqueue("qat_pf2vf_resp_wq",
+ WQ_MEM_RECLAIM | WQ_PERCPU, 0);
return !pf2vf_resp_wq ? -ENOMEM : 0;
}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c
index 6c39194647f0..79c63dfa8ff3 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c
@@ -269,6 +269,8 @@ static ssize_t rp2srv_show(struct device *dev, struct device_attribute *attr,
return sysfs_emit(buf, "%s\n", ADF_CFG_SYM);
case ASYM:
return sysfs_emit(buf, "%s\n", ADF_CFG_ASYM);
+ case DECOMP:
+ return sysfs_emit(buf, "%s\n", ADF_CFG_DECOMP);
default:
break;
}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.c
index bedb514d4e30..f31556beed8b 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.c
@@ -32,9 +32,10 @@ enum rl_params {
};
static const char *const rl_services[] = {
- [ADF_SVC_ASYM] = "asym",
- [ADF_SVC_SYM] = "sym",
- [ADF_SVC_DC] = "dc",
+ [SVC_ASYM] = "asym",
+ [SVC_SYM] = "sym",
+ [SVC_DC] = "dc",
+ [SVC_DECOMP] = "decomp",
};
static const char *const rl_operations[] = {
@@ -282,7 +283,7 @@ static ssize_t srv_show(struct device *dev, struct device_attribute *attr,
if (ret)
return ret;
- if (get == ADF_SVC_NONE)
+ if (get == SVC_BASE_COUNT)
return -EINVAL;
return sysfs_emit(buf, "%s\n", rl_services[get]);
@@ -291,14 +292,22 @@ static ssize_t srv_show(struct device *dev, struct device_attribute *attr,
static ssize_t srv_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
+ struct adf_accel_dev *accel_dev;
unsigned int val;
int ret;
+ accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
+ if (!accel_dev)
+ return -EINVAL;
+
ret = sysfs_match_string(rl_services, buf);
if (ret < 0)
return ret;
val = ret;
+ if (!adf_is_service_enabled(accel_dev, val))
+ return -EINVAL;
+
ret = set_param_u(dev, SRV, val);
if (ret)
return ret;
@@ -439,8 +448,8 @@ int adf_sysfs_rl_add(struct adf_accel_dev *accel_dev)
dev_err(&GET_DEV(accel_dev),
"Failed to create qat_rl attribute group\n");
- data->cap_rem_srv = ADF_SVC_NONE;
- data->input.srv = ADF_SVC_NONE;
+ data->cap_rem_srv = SVC_BASE_COUNT;
+ data->input.srv = SVC_BASE_COUNT;
data->sysfs_added = true;
return ret;
diff --git a/drivers/crypto/intel/qat/qat_common/adf_telemetry.c b/drivers/crypto/intel/qat/qat_common/adf_telemetry.c
index 74fb0c2ed241..b64142db1f0d 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_telemetry.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_telemetry.c
@@ -212,6 +212,23 @@ int adf_tl_halt(struct adf_accel_dev *accel_dev)
return ret;
}
+static void adf_set_cmdq_cnt(struct adf_accel_dev *accel_dev,
+ struct adf_tl_hw_data *tl_data)
+{
+ struct icp_qat_fw_init_admin_slice_cnt *slice_cnt, *cmdq_cnt;
+
+ slice_cnt = &accel_dev->telemetry->slice_cnt;
+ cmdq_cnt = &accel_dev->telemetry->cmdq_cnt;
+
+ cmdq_cnt->cpr_cnt = slice_cnt->cpr_cnt * tl_data->multiplier.cpr_cnt;
+ cmdq_cnt->dcpr_cnt = slice_cnt->dcpr_cnt * tl_data->multiplier.dcpr_cnt;
+ cmdq_cnt->pke_cnt = slice_cnt->pke_cnt * tl_data->multiplier.pke_cnt;
+ cmdq_cnt->wat_cnt = slice_cnt->wat_cnt * tl_data->multiplier.wat_cnt;
+ cmdq_cnt->wcp_cnt = slice_cnt->wcp_cnt * tl_data->multiplier.wcp_cnt;
+ cmdq_cnt->ucs_cnt = slice_cnt->ucs_cnt * tl_data->multiplier.ucs_cnt;
+ cmdq_cnt->ath_cnt = slice_cnt->ath_cnt * tl_data->multiplier.ath_cnt;
+}
+
int adf_tl_run(struct adf_accel_dev *accel_dev, int state)
{
struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev);
@@ -235,6 +252,8 @@ int adf_tl_run(struct adf_accel_dev *accel_dev, int state)
return ret;
}
+ adf_set_cmdq_cnt(accel_dev, tl_data);
+
telemetry->hbuffs = state;
atomic_set(&telemetry->state, state);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_telemetry.h b/drivers/crypto/intel/qat/qat_common/adf_telemetry.h
index e54a406cc1b4..02d75c3c214a 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_telemetry.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_telemetry.h
@@ -28,19 +28,23 @@ struct dentry;
struct adf_tl_hw_data {
size_t layout_sz;
size_t slice_reg_sz;
+ size_t cmdq_reg_sz;
size_t rp_reg_sz;
size_t msg_cnt_off;
const struct adf_tl_dbg_counter *dev_counters;
const struct adf_tl_dbg_counter *sl_util_counters;
const struct adf_tl_dbg_counter *sl_exec_counters;
+ const struct adf_tl_dbg_counter **cmdq_counters;
const struct adf_tl_dbg_counter *rp_counters;
u8 num_hbuff;
u8 cpp_ns_per_cycle;
u8 bw_units_to_bytes;
u8 num_dev_counters;
u8 num_rp_counters;
+ u8 num_cmdq_counters;
u8 max_rp;
u8 max_sl_cnt;
+ struct icp_qat_fw_init_admin_slice_cnt multiplier;
};
struct adf_telemetry {
@@ -69,6 +73,7 @@ struct adf_telemetry {
struct mutex wr_lock;
struct delayed_work work_ctx;
struct icp_qat_fw_init_admin_slice_cnt slice_cnt;
+ struct icp_qat_fw_init_admin_slice_cnt cmdq_cnt;
};
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c b/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c
index f20ae7e35a0d..b81f70576683 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c
@@ -339,6 +339,48 @@ static int tl_calc_and_print_sl_counters(struct adf_accel_dev *accel_dev,
return 0;
}
+static int tl_print_cmdq_counter(struct adf_telemetry *telemetry,
+ const struct adf_tl_dbg_counter *ctr,
+ struct seq_file *s, u8 cnt_id, u8 counter)
+{
+ size_t cmdq_regs_sz = GET_TL_DATA(telemetry->accel_dev).cmdq_reg_sz;
+ size_t offset_inc = cnt_id * cmdq_regs_sz;
+ struct adf_tl_dbg_counter slice_ctr;
+ char cnt_name[MAX_COUNT_NAME_SIZE];
+
+ slice_ctr = *(ctr + counter);
+ slice_ctr.offset1 += offset_inc;
+ snprintf(cnt_name, MAX_COUNT_NAME_SIZE, "%s%d", slice_ctr.name, cnt_id);
+
+ return tl_calc_and_print_counter(telemetry, s, &slice_ctr, cnt_name);
+}
+
+static int tl_calc_and_print_cmdq_counters(struct adf_accel_dev *accel_dev,
+ struct seq_file *s, u8 cnt_type,
+ u8 cnt_id)
+{
+ struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev);
+ struct adf_telemetry *telemetry = accel_dev->telemetry;
+ const struct adf_tl_dbg_counter **cmdq_tl_counters;
+ const struct adf_tl_dbg_counter *ctr;
+ u8 counter;
+ int ret;
+
+ cmdq_tl_counters = tl_data->cmdq_counters;
+ ctr = cmdq_tl_counters[cnt_type];
+
+ for (counter = 0; counter < tl_data->num_cmdq_counters; counter++) {
+ ret = tl_print_cmdq_counter(telemetry, ctr, s, cnt_id, counter);
+ if (ret) {
+ dev_notice(&GET_DEV(accel_dev),
+ "invalid slice utilization counter type\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static void tl_print_msg_cnt(struct seq_file *s, u32 msg_cnt)
{
seq_printf(s, "%-*s", TL_KEY_MIN_PADDING, SNAPSHOT_CNT_MSG);
@@ -352,6 +394,7 @@ static int tl_print_dev_data(struct adf_accel_dev *accel_dev,
struct adf_telemetry *telemetry = accel_dev->telemetry;
const struct adf_tl_dbg_counter *dev_tl_counters;
u8 num_dev_counters = tl_data->num_dev_counters;
+ u8 *cmdq_cnt = (u8 *)&telemetry->cmdq_cnt;
u8 *sl_cnt = (u8 *)&telemetry->slice_cnt;
const struct adf_tl_dbg_counter *ctr;
unsigned int i;
@@ -387,6 +430,15 @@ static int tl_print_dev_data(struct adf_accel_dev *accel_dev,
}
}
+ /* Print per command queue telemetry. */
+ for (i = 0; i < ADF_TL_SL_CNT_COUNT; i++) {
+ for (j = 0; j < cmdq_cnt[i]; j++) {
+ ret = tl_calc_and_print_cmdq_counters(accel_dev, s, i, j);
+ if (ret)
+ return ret;
+ }
+ }
+
return 0;
}
@@ -538,6 +590,9 @@ static void tl_print_rp_srv(struct adf_accel_dev *accel_dev, struct seq_file *s,
case ASYM:
seq_printf(s, "%*s\n", TL_VALUE_MIN_PADDING, ADF_CFG_ASYM);
break;
+ case DECOMP:
+ seq_printf(s, "%*s\n", TL_VALUE_MIN_PADDING, ADF_CFG_DECOMP);
+ break;
default:
seq_printf(s, "%*s\n", TL_VALUE_MIN_PADDING, TL_RP_SRV_UNKNOWN);
break;
diff --git a/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.h b/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.h
index 11cc9eae19b3..97c5eeaa1b17 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.h
@@ -17,6 +17,7 @@ struct adf_accel_dev;
#define LAT_ACC_NAME "gp_lat_acc_avg"
#define BW_IN_NAME "bw_in"
#define BW_OUT_NAME "bw_out"
+#define RE_ACC_NAME "re_acc_avg"
#define PAGE_REQ_LAT_NAME "at_page_req_lat_avg"
#define AT_TRANS_LAT_NAME "at_trans_lat_avg"
#define AT_MAX_UTLB_USED_NAME "at_max_tlb_used"
@@ -43,6 +44,10 @@ struct adf_accel_dev;
(ADF_TL_DEV_REG_OFF(slice##_slices[0], qat_gen) + \
offsetof(struct adf_##qat_gen##_tl_slice_data_regs, reg))
+#define ADF_TL_CMDQ_REG_OFF(slice, reg, qat_gen) \
+ (ADF_TL_DEV_REG_OFF(slice##_cmdq[0], qat_gen) + \
+ offsetof(struct adf_##qat_gen##_tl_cmdq_data_regs, reg))
+
#define ADF_TL_RP_REG_OFF(reg, qat_gen) \
(ADF_TL_DATA_REG_OFF(tl_ring_pairs_data_regs[0], qat_gen) + \
offsetof(struct adf_##qat_gen##_tl_ring_pair_data_regs, reg))
diff --git a/drivers/crypto/intel/qat/qat_common/adf_transport_debug.c b/drivers/crypto/intel/qat/qat_common/adf_transport_debug.c
index e2dd568b87b5..6c22bc9b28e4 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_transport_debug.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_transport_debug.c
@@ -10,16 +10,21 @@
static DEFINE_MUTEX(ring_read_lock);
static DEFINE_MUTEX(bank_read_lock);
+#define ADF_RING_NUM_MSGS(ring) \
+ (ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size) / \
+ ADF_MSG_SIZE_TO_BYTES(ring->msg_size))
+
static void *adf_ring_start(struct seq_file *sfile, loff_t *pos)
{
struct adf_etr_ring_data *ring = sfile->private;
+ unsigned int num_msg = ADF_RING_NUM_MSGS(ring);
+ loff_t val = *pos;
mutex_lock(&ring_read_lock);
- if (*pos == 0)
+ if (val == 0)
return SEQ_START_TOKEN;
- if (*pos >= (ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size) /
- ADF_MSG_SIZE_TO_BYTES(ring->msg_size)))
+ if (val >= num_msg)
return NULL;
return ring->base_addr +
@@ -29,13 +34,15 @@ static void *adf_ring_start(struct seq_file *sfile, loff_t *pos)
static void *adf_ring_next(struct seq_file *sfile, void *v, loff_t *pos)
{
struct adf_etr_ring_data *ring = sfile->private;
+ unsigned int num_msg = ADF_RING_NUM_MSGS(ring);
+ loff_t val = *pos;
+
+ (*pos)++;
- if (*pos >= (ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size) /
- ADF_MSG_SIZE_TO_BYTES(ring->msg_size)))
+ if (val >= num_msg)
return NULL;
- return ring->base_addr +
- (ADF_MSG_SIZE_TO_BYTES(ring->msg_size) * (*pos)++);
+ return ring->base_addr + (ADF_MSG_SIZE_TO_BYTES(ring->msg_size) * val);
}
static int adf_ring_show(struct seq_file *sfile, void *v)
diff --git a/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c b/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c
index a4636ec9f9ca..d0fef20a3df4 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c
@@ -299,7 +299,8 @@ EXPORT_SYMBOL_GPL(adf_flush_vf_wq);
*/
int __init adf_init_vf_wq(void)
{
- adf_vf_stop_wq = alloc_workqueue("adf_vf_stop_wq", WQ_MEM_RECLAIM, 0);
+ adf_vf_stop_wq = alloc_workqueue("adf_vf_stop_wq",
+ WQ_MEM_RECLAIM | WQ_PERCPU, 0);
return !adf_vf_stop_wq ? -EFAULT : 0;
}
diff --git a/drivers/crypto/intel/qat/qat_common/qat_algs.c b/drivers/crypto/intel/qat/qat_common/qat_algs.c
index 3c4bba4a8779..7f638a62e3ad 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_algs.c
@@ -9,8 +9,6 @@
#include <crypto/aes.h>
#include <crypto/sha1.h>
#include <crypto/sha2.h>
-#include <crypto/hash.h>
-#include <crypto/hmac.h>
#include <crypto/algapi.h>
#include <crypto/authenc.h>
#include <crypto/scatterwalk.h>
@@ -68,16 +66,10 @@ struct qat_alg_aead_ctx {
dma_addr_t dec_cd_paddr;
struct icp_qat_fw_la_bulk_req enc_fw_req;
struct icp_qat_fw_la_bulk_req dec_fw_req;
- struct crypto_shash *hash_tfm;
enum icp_qat_hw_auth_algo qat_hash_alg;
+ unsigned int hash_digestsize;
+ unsigned int hash_blocksize;
struct qat_crypto_instance *inst;
- union {
- struct sha1_state sha1;
- struct sha256_state sha256;
- struct sha512_state sha512;
- };
- char ipad[SHA512_BLOCK_SIZE]; /* sufficient for SHA-1/SHA-256 as well */
- char opad[SHA512_BLOCK_SIZE];
};
struct qat_alg_skcipher_ctx {
@@ -94,125 +86,57 @@ struct qat_alg_skcipher_ctx {
int mode;
};
-static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
-{
- switch (qat_hash_alg) {
- case ICP_QAT_HW_AUTH_ALGO_SHA1:
- return ICP_QAT_HW_SHA1_STATE1_SZ;
- case ICP_QAT_HW_AUTH_ALGO_SHA256:
- return ICP_QAT_HW_SHA256_STATE1_SZ;
- case ICP_QAT_HW_AUTH_ALGO_SHA512:
- return ICP_QAT_HW_SHA512_STATE1_SZ;
- default:
- return -EFAULT;
- }
-}
-
static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
struct qat_alg_aead_ctx *ctx,
const u8 *auth_key,
unsigned int auth_keylen)
{
- SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
- int block_size = crypto_shash_blocksize(ctx->hash_tfm);
- int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
- __be32 *hash_state_out;
- __be64 *hash512_state_out;
- int i, offset;
-
- memset(ctx->ipad, 0, block_size);
- memset(ctx->opad, 0, block_size);
- shash->tfm = ctx->hash_tfm;
-
- if (auth_keylen > block_size) {
- int ret = crypto_shash_digest(shash, auth_key,
- auth_keylen, ctx->ipad);
- if (ret)
- return ret;
-
- memcpy(ctx->opad, ctx->ipad, digest_size);
- } else {
- memcpy(ctx->ipad, auth_key, auth_keylen);
- memcpy(ctx->opad, auth_key, auth_keylen);
+ switch (ctx->qat_hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_SHA1: {
+ struct hmac_sha1_key key;
+ __be32 *istate = (__be32 *)hash->sha.state1;
+ __be32 *ostate = (__be32 *)(hash->sha.state1 +
+ round_up(sizeof(key.istate.h), 8));
+
+ hmac_sha1_preparekey(&key, auth_key, auth_keylen);
+ for (int i = 0; i < ARRAY_SIZE(key.istate.h); i++) {
+ istate[i] = cpu_to_be32(key.istate.h[i]);
+ ostate[i] = cpu_to_be32(key.ostate.h[i]);
+ }
+ memzero_explicit(&key, sizeof(key));
+ return 0;
}
-
- for (i = 0; i < block_size; i++) {
- char *ipad_ptr = ctx->ipad + i;
- char *opad_ptr = ctx->opad + i;
- *ipad_ptr ^= HMAC_IPAD_VALUE;
- *opad_ptr ^= HMAC_OPAD_VALUE;
+ case ICP_QAT_HW_AUTH_ALGO_SHA256: {
+ struct hmac_sha256_key key;
+ __be32 *istate = (__be32 *)hash->sha.state1;
+ __be32 *ostate = (__be32 *)(hash->sha.state1 +
+ sizeof(key.key.istate.h));
+
+ hmac_sha256_preparekey(&key, auth_key, auth_keylen);
+ for (int i = 0; i < ARRAY_SIZE(key.key.istate.h); i++) {
+ istate[i] = cpu_to_be32(key.key.istate.h[i]);
+ ostate[i] = cpu_to_be32(key.key.ostate.h[i]);
+ }
+ memzero_explicit(&key, sizeof(key));
+ return 0;
}
-
- if (crypto_shash_init(shash))
- return -EFAULT;
-
- if (crypto_shash_update(shash, ctx->ipad, block_size))
- return -EFAULT;
-
- hash_state_out = (__be32 *)hash->sha.state1;
- hash512_state_out = (__be64 *)hash_state_out;
-
- switch (ctx->qat_hash_alg) {
- case ICP_QAT_HW_AUTH_ALGO_SHA1:
- if (crypto_shash_export(shash, &ctx->sha1))
- return -EFAULT;
- for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
- *hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
- break;
- case ICP_QAT_HW_AUTH_ALGO_SHA256:
- if (crypto_shash_export(shash, &ctx->sha256))
- return -EFAULT;
- for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
- *hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
- break;
- case ICP_QAT_HW_AUTH_ALGO_SHA512:
- if (crypto_shash_export(shash, &ctx->sha512))
- return -EFAULT;
- for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
- *hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
- break;
- default:
- return -EFAULT;
+ case ICP_QAT_HW_AUTH_ALGO_SHA512: {
+ struct hmac_sha512_key key;
+ __be64 *istate = (__be64 *)hash->sha.state1;
+ __be64 *ostate = (__be64 *)(hash->sha.state1 +
+ sizeof(key.key.istate.h));
+
+ hmac_sha512_preparekey(&key, auth_key, auth_keylen);
+ for (int i = 0; i < ARRAY_SIZE(key.key.istate.h); i++) {
+ istate[i] = cpu_to_be64(key.key.istate.h[i]);
+ ostate[i] = cpu_to_be64(key.key.ostate.h[i]);
+ }
+ memzero_explicit(&key, sizeof(key));
+ return 0;
}
-
- if (crypto_shash_init(shash))
- return -EFAULT;
-
- if (crypto_shash_update(shash, ctx->opad, block_size))
- return -EFAULT;
-
- offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
- if (offset < 0)
- return -EFAULT;
-
- hash_state_out = (__be32 *)(hash->sha.state1 + offset);
- hash512_state_out = (__be64 *)hash_state_out;
-
- switch (ctx->qat_hash_alg) {
- case ICP_QAT_HW_AUTH_ALGO_SHA1:
- if (crypto_shash_export(shash, &ctx->sha1))
- return -EFAULT;
- for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
- *hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
- break;
- case ICP_QAT_HW_AUTH_ALGO_SHA256:
- if (crypto_shash_export(shash, &ctx->sha256))
- return -EFAULT;
- for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
- *hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
- break;
- case ICP_QAT_HW_AUTH_ALGO_SHA512:
- if (crypto_shash_export(shash, &ctx->sha512))
- return -EFAULT;
- for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
- *hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
- break;
default:
return -EFAULT;
}
- memzero_explicit(ctx->ipad, block_size);
- memzero_explicit(ctx->opad, block_size);
- return 0;
}
static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
@@ -259,7 +183,7 @@ static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
ctx->qat_hash_alg, digestsize);
hash->sha.inner_setup.auth_counter.counter =
- cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
+ cpu_to_be32(ctx->hash_blocksize);
if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
return -EFAULT;
@@ -326,7 +250,7 @@ static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
struct icp_qat_hw_cipher_algo_blk *cipher =
(struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
sizeof(struct icp_qat_hw_auth_setup) +
- roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
+ roundup(ctx->hash_digestsize, 8) * 2);
struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req;
struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
@@ -346,7 +270,7 @@ static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
ctx->qat_hash_alg,
digestsize);
hash->sha.inner_setup.auth_counter.counter =
- cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
+ cpu_to_be32(ctx->hash_blocksize);
if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
return -EFAULT;
@@ -368,7 +292,7 @@ static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
cipher_cd_ctrl->cipher_cfg_offset =
(sizeof(struct icp_qat_hw_auth_setup) +
- roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3;
+ roundup(ctx->hash_digestsize, 8) * 2) >> 3;
ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
@@ -1150,32 +1074,35 @@ static int qat_alg_skcipher_xts_decrypt(struct skcipher_request *req)
}
static int qat_alg_aead_init(struct crypto_aead *tfm,
- enum icp_qat_hw_auth_algo hash,
- const char *hash_name)
+ enum icp_qat_hw_auth_algo hash_alg,
+ unsigned int hash_digestsize,
+ unsigned int hash_blocksize)
{
struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
- ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
- if (IS_ERR(ctx->hash_tfm))
- return PTR_ERR(ctx->hash_tfm);
- ctx->qat_hash_alg = hash;
+ ctx->qat_hash_alg = hash_alg;
+ ctx->hash_digestsize = hash_digestsize;
+ ctx->hash_blocksize = hash_blocksize;
crypto_aead_set_reqsize(tfm, sizeof(struct qat_crypto_request));
return 0;
}
static int qat_alg_aead_sha1_init(struct crypto_aead *tfm)
{
- return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
+ return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1,
+ SHA1_DIGEST_SIZE, SHA1_BLOCK_SIZE);
}
static int qat_alg_aead_sha256_init(struct crypto_aead *tfm)
{
- return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
+ return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256,
+ SHA256_DIGEST_SIZE, SHA256_BLOCK_SIZE);
}
static int qat_alg_aead_sha512_init(struct crypto_aead *tfm)
{
- return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
+ return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512,
+ SHA512_DIGEST_SIZE, SHA512_BLOCK_SIZE);
}
static void qat_alg_aead_exit(struct crypto_aead *tfm)
@@ -1184,8 +1111,6 @@ static void qat_alg_aead_exit(struct crypto_aead *tfm)
struct qat_crypto_instance *inst = ctx->inst;
struct device *dev;
- crypto_free_shash(ctx->hash_tfm);
-
if (!inst)
return;
@@ -1277,7 +1202,7 @@ static struct aead_alg qat_aeads[] = { {
.base = {
.cra_name = "authenc(hmac(sha1),cbc(aes))",
.cra_driver_name = "qat_aes_cbc_hmac_sha1",
- .cra_priority = 4001,
+ .cra_priority = 100,
.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
@@ -1294,7 +1219,7 @@ static struct aead_alg qat_aeads[] = { {
.base = {
.cra_name = "authenc(hmac(sha256),cbc(aes))",
.cra_driver_name = "qat_aes_cbc_hmac_sha256",
- .cra_priority = 4001,
+ .cra_priority = 100,
.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
@@ -1311,7 +1236,7 @@ static struct aead_alg qat_aeads[] = { {
.base = {
.cra_name = "authenc(hmac(sha512),cbc(aes))",
.cra_driver_name = "qat_aes_cbc_hmac_sha512",
- .cra_priority = 4001,
+ .cra_priority = 100,
.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
@@ -1329,7 +1254,7 @@ static struct aead_alg qat_aeads[] = { {
static struct skcipher_alg qat_skciphers[] = { {
.base.cra_name = "cbc(aes)",
.base.cra_driver_name = "qat_aes_cbc",
- .base.cra_priority = 4001,
+ .base.cra_priority = 100,
.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
@@ -1347,7 +1272,7 @@ static struct skcipher_alg qat_skciphers[] = { {
}, {
.base.cra_name = "ctr(aes)",
.base.cra_driver_name = "qat_aes_ctr",
- .base.cra_priority = 4001,
+ .base.cra_priority = 100,
.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
@@ -1365,7 +1290,7 @@ static struct skcipher_alg qat_skciphers[] = { {
}, {
.base.cra_name = "xts(aes)",
.base.cra_driver_name = "qat_aes_xts",
- .base.cra_priority = 4001,
+ .base.cra_priority = 100,
.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK |
CRYPTO_ALG_ALLOCATES_MEMORY,
.base.cra_blocksize = AES_BLOCK_SIZE,
diff --git a/drivers/crypto/intel/qat/qat_common/qat_bl.c b/drivers/crypto/intel/qat/qat_common/qat_bl.c
index 5e4dad4693ca..9b2338f58d97 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_bl.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_bl.c
@@ -38,7 +38,7 @@ void qat_bl_free_bufl(struct adf_accel_dev *accel_dev,
for (i = 0; i < blout->num_mapped_bufs; i++) {
dma_unmap_single(dev, blout->buffers[i].addr,
blout->buffers[i].len,
- DMA_FROM_DEVICE);
+ DMA_BIDIRECTIONAL);
}
dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
@@ -162,7 +162,7 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
}
buffers[y].addr = dma_map_single(dev, sg_virt(sg) + left,
sg->length - left,
- DMA_FROM_DEVICE);
+ DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(dev, buffers[y].addr)))
goto err_out;
buffers[y].len = sg->length;
@@ -204,7 +204,7 @@ err_out:
if (!dma_mapping_error(dev, buflout->buffers[i].addr))
dma_unmap_single(dev, buflout->buffers[i].addr,
buflout->buffers[i].len,
- DMA_FROM_DEVICE);
+ DMA_BIDIRECTIONAL);
}
if (!buf->sgl_dst_valid)
diff --git a/drivers/crypto/intel/qat/qat_common/qat_compression.c b/drivers/crypto/intel/qat/qat_common/qat_compression.c
index c285b45b8679..53a4db5507ec 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_compression.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_compression.c
@@ -196,7 +196,7 @@ static int qat_compression_alloc_dc_data(struct adf_accel_dev *accel_dev)
struct adf_dc_data *dc_data = NULL;
u8 *obuff = NULL;
- dc_data = devm_kzalloc(dev, sizeof(*dc_data), GFP_KERNEL);
+ dc_data = kzalloc_node(sizeof(*dc_data), GFP_KERNEL, dev_to_node(dev));
if (!dc_data)
goto err;
@@ -204,7 +204,7 @@ static int qat_compression_alloc_dc_data(struct adf_accel_dev *accel_dev)
if (!obuff)
goto err;
- obuff_p = dma_map_single(dev, obuff, ovf_buff_sz, DMA_FROM_DEVICE);
+ obuff_p = dma_map_single(dev, obuff, ovf_buff_sz, DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(dev, obuff_p)))
goto err;
@@ -232,9 +232,9 @@ static void qat_free_dc_data(struct adf_accel_dev *accel_dev)
return;
dma_unmap_single(dev, dc_data->ovf_buff_p, dc_data->ovf_buff_sz,
- DMA_FROM_DEVICE);
+ DMA_BIDIRECTIONAL);
kfree_sensitive(dc_data->ovf_buff);
- devm_kfree(dev, dc_data);
+ kfree(dc_data);
accel_dev->dc_data = NULL;
}
diff --git a/drivers/crypto/intel/qat/qat_common/qat_uclo.c b/drivers/crypto/intel/qat/qat_common/qat_uclo.c
index 21d652a1c8ef..06d49cb781ae 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_uclo.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_uclo.c
@@ -200,20 +200,12 @@ qat_uclo_cleanup_batch_init_list(struct icp_qat_fw_loader_handle *handle,
static int qat_uclo_parse_num(char *str, unsigned int *num)
{
- char buf[16] = {0};
- unsigned long ae = 0;
- int i;
-
- strscpy(buf, str, sizeof(buf));
- for (i = 0; i < 16; i++) {
- if (!isdigit(buf[i])) {
- buf[i] = '\0';
- break;
- }
- }
- if ((kstrtoul(buf, 10, &ae)))
- return -EFAULT;
+ unsigned long long ae;
+ char *end;
+ ae = simple_strtoull(str, &end, 10);
+ if (ae > UINT_MAX || str == end || (end - str) > 19)
+ return -EINVAL;
*num = (unsigned int)ae;
return 0;
}
@@ -1900,7 +1892,7 @@ static int qat_uclo_map_objs_from_mof(struct icp_qat_mof_handle *mobj_handle)
if (sobj_hdr)
sobj_chunk_num = sobj_hdr->num_chunks;
- mobj_hdr = kzalloc((uobj_chunk_num + sobj_chunk_num) *
+ mobj_hdr = kcalloc(size_add(uobj_chunk_num, sobj_chunk_num),
sizeof(*mobj_hdr), GFP_KERNEL);
if (!mobj_hdr)
return -ENOMEM;
diff --git a/drivers/crypto/loongson/Kconfig b/drivers/crypto/loongson/Kconfig
new file mode 100644
index 000000000000..15475da8fc11
--- /dev/null
+++ b/drivers/crypto/loongson/Kconfig
@@ -0,0 +1,5 @@
+config CRYPTO_DEV_LOONGSON_RNG
+ tristate "Support for Loongson RNG Driver"
+ depends on MFD_LOONGSON_SE
+ help
+ Support for Loongson RNG Driver.
diff --git a/drivers/crypto/loongson/Makefile b/drivers/crypto/loongson/Makefile
new file mode 100644
index 000000000000..1ce5ec32b553
--- /dev/null
+++ b/drivers/crypto/loongson/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_CRYPTO_DEV_LOONGSON_RNG) += loongson-rng.o
diff --git a/drivers/crypto/loongson/loongson-rng.c b/drivers/crypto/loongson/loongson-rng.c
new file mode 100644
index 000000000000..3a4940260f9e
--- /dev/null
+++ b/drivers/crypto/loongson/loongson-rng.c
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 HiSilicon Limited. */
+/* Copyright (c) 2025 Loongson Technology Corporation Limited. */
+
+#include <linux/crypto.h>
+#include <linux/err.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mfd/loongson-se.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/random.h>
+#include <crypto/internal/rng.h>
+
+#define SE_SEED_SIZE 32
+
+struct loongson_rng_list {
+ struct mutex lock;
+ struct list_head list;
+ int registered;
+};
+
+struct loongson_rng {
+ u32 used;
+ struct loongson_se_engine *engine;
+ struct list_head list;
+ struct mutex lock;
+};
+
+struct loongson_rng_ctx {
+ struct loongson_rng *rng;
+};
+
+struct loongson_rng_cmd {
+ u32 cmd_id;
+ union {
+ u32 len;
+ u32 ret;
+ } u;
+ u32 seed_off;
+ u32 out_off;
+ u32 pad[4];
+};
+
+static struct loongson_rng_list rng_devices = {
+ .lock = __MUTEX_INITIALIZER(rng_devices.lock),
+ .list = LIST_HEAD_INIT(rng_devices.list),
+};
+
+static int loongson_rng_generate(struct crypto_rng *tfm, const u8 *src,
+ unsigned int slen, u8 *dstn, unsigned int dlen)
+{
+ struct loongson_rng_ctx *ctx = crypto_rng_ctx(tfm);
+ struct loongson_rng *rng = ctx->rng;
+ struct loongson_rng_cmd *cmd = rng->engine->command;
+ int err, len;
+
+ mutex_lock(&rng->lock);
+ cmd->seed_off = 0;
+ do {
+ len = min(dlen, rng->engine->buffer_size);
+ cmd = rng->engine->command;
+ cmd->u.len = len;
+ err = loongson_se_send_engine_cmd(rng->engine);
+ if (err)
+ break;
+
+ cmd = rng->engine->command_ret;
+ if (cmd->u.ret) {
+ err = -EIO;
+ break;
+ }
+
+ memcpy(dstn, rng->engine->data_buffer, len);
+ dlen -= len;
+ dstn += len;
+ } while (dlen > 0);
+ mutex_unlock(&rng->lock);
+
+ return err;
+}
+
+static int loongson_rng_init(struct crypto_tfm *tfm)
+{
+ struct loongson_rng_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct loongson_rng *rng;
+ u32 min_used = U32_MAX;
+
+ mutex_lock(&rng_devices.lock);
+ list_for_each_entry(rng, &rng_devices.list, list) {
+ if (rng->used < min_used) {
+ ctx->rng = rng;
+ min_used = rng->used;
+ }
+ }
+ ctx->rng->used++;
+ mutex_unlock(&rng_devices.lock);
+
+ return 0;
+}
+
+static void loongson_rng_exit(struct crypto_tfm *tfm)
+{
+ struct loongson_rng_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ mutex_lock(&rng_devices.lock);
+ ctx->rng->used--;
+ mutex_unlock(&rng_devices.lock);
+}
+
+static int loongson_rng_seed(struct crypto_rng *tfm, const u8 *seed,
+ unsigned int slen)
+{
+ struct loongson_rng_ctx *ctx = crypto_rng_ctx(tfm);
+ struct loongson_rng *rng = ctx->rng;
+ struct loongson_rng_cmd *cmd;
+ int err;
+
+ if (slen < SE_SEED_SIZE)
+ return -EINVAL;
+
+ slen = min(slen, rng->engine->buffer_size);
+
+ mutex_lock(&rng->lock);
+ cmd = rng->engine->command;
+ cmd->u.len = slen;
+ cmd->seed_off = rng->engine->buffer_off;
+ memcpy(rng->engine->data_buffer, seed, slen);
+ err = loongson_se_send_engine_cmd(rng->engine);
+ if (err)
+ goto out;
+
+ cmd = rng->engine->command_ret;
+ if (cmd->u.ret)
+ err = -EIO;
+out:
+ mutex_unlock(&rng->lock);
+
+ return err;
+}
+
+static struct rng_alg loongson_rng_alg = {
+ .generate = loongson_rng_generate,
+ .seed = loongson_rng_seed,
+ .seedsize = SE_SEED_SIZE,
+ .base = {
+ .cra_name = "stdrng",
+ .cra_driver_name = "loongson_stdrng",
+ .cra_priority = 300,
+ .cra_ctxsize = sizeof(struct loongson_rng_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = loongson_rng_init,
+ .cra_exit = loongson_rng_exit,
+ },
+};
+
+static int loongson_rng_probe(struct platform_device *pdev)
+{
+ struct loongson_rng_cmd *cmd;
+ struct loongson_rng *rng;
+ int ret = 0;
+
+ rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL);
+ if (!rng)
+ return -ENOMEM;
+
+ rng->engine = loongson_se_init_engine(pdev->dev.parent, SE_ENGINE_RNG);
+ if (!rng->engine)
+ return -ENODEV;
+ cmd = rng->engine->command;
+ cmd->cmd_id = SE_CMD_RNG;
+ cmd->out_off = rng->engine->buffer_off;
+ mutex_init(&rng->lock);
+
+ mutex_lock(&rng_devices.lock);
+
+ if (!rng_devices.registered) {
+ ret = crypto_register_rng(&loongson_rng_alg);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register crypto(%d)\n", ret);
+ goto out;
+ }
+ rng_devices.registered = 1;
+ }
+
+ list_add_tail(&rng->list, &rng_devices.list);
+out:
+ mutex_unlock(&rng_devices.lock);
+
+ return ret;
+}
+
+static struct platform_driver loongson_rng_driver = {
+ .probe = loongson_rng_probe,
+ .driver = {
+ .name = "loongson-rng",
+ },
+};
+module_platform_driver(loongson_rng_driver);
+
+MODULE_ALIAS("platform:loongson-rng");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Yinggang Gu <guyinggang@loongson.cn>");
+MODULE_AUTHOR("Qunqin Zhao <zhaoqunqin@loongson.cn>");
+MODULE_DESCRIPTION("Loongson Random Number Generator driver");
diff --git a/drivers/crypto/marvell/cesa/cesa.c b/drivers/crypto/marvell/cesa/cesa.c
index 9c21f5d835d2..301bdf239e7d 100644
--- a/drivers/crypto/marvell/cesa/cesa.c
+++ b/drivers/crypto/marvell/cesa/cesa.c
@@ -420,7 +420,6 @@ static int mv_cesa_probe(struct platform_device *pdev)
{
const struct mv_cesa_caps *caps = &orion_caps;
const struct mbus_dram_target_info *dram;
- const struct of_device_id *match;
struct device *dev = &pdev->dev;
struct mv_cesa_dev *cesa;
struct mv_cesa_engine *engines;
@@ -433,11 +432,9 @@ static int mv_cesa_probe(struct platform_device *pdev)
}
if (dev->of_node) {
- match = of_match_node(mv_cesa_of_match_table, dev->of_node);
- if (!match || !match->data)
+ caps = of_device_get_match_data(dev);
+ if (!caps)
return -ENOTSUPP;
-
- caps = match->data;
}
cesa = devm_kzalloc(dev, sizeof(*cesa), GFP_KERNEL);
diff --git a/drivers/crypto/marvell/cesa/cipher.c b/drivers/crypto/marvell/cesa/cipher.c
index 48c5c8ea8c43..3fe0fd9226cf 100644
--- a/drivers/crypto/marvell/cesa/cipher.c
+++ b/drivers/crypto/marvell/cesa/cipher.c
@@ -75,9 +75,12 @@ mv_cesa_skcipher_dma_cleanup(struct skcipher_request *req)
static inline void mv_cesa_skcipher_cleanup(struct skcipher_request *req)
{
struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
+ struct mv_cesa_engine *engine = creq->base.engine;
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
mv_cesa_skcipher_dma_cleanup(req);
+
+ atomic_sub(req->cryptlen, &engine->load);
}
static void mv_cesa_skcipher_std_step(struct skcipher_request *req)
@@ -212,7 +215,6 @@ mv_cesa_skcipher_complete(struct crypto_async_request *req)
struct mv_cesa_engine *engine = creq->base.engine;
unsigned int ivsize;
- atomic_sub(skreq->cryptlen, &engine->load);
ivsize = crypto_skcipher_ivsize(crypto_skcipher_reqtfm(skreq));
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) {
diff --git a/drivers/crypto/marvell/cesa/hash.c b/drivers/crypto/marvell/cesa/hash.c
index 6815eddc9068..5103d36cdfdb 100644
--- a/drivers/crypto/marvell/cesa/hash.c
+++ b/drivers/crypto/marvell/cesa/hash.c
@@ -110,9 +110,12 @@ static inline void mv_cesa_ahash_dma_cleanup(struct ahash_request *req)
static inline void mv_cesa_ahash_cleanup(struct ahash_request *req)
{
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
+ struct mv_cesa_engine *engine = creq->base.engine;
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
mv_cesa_ahash_dma_cleanup(req);
+
+ atomic_sub(req->nbytes, &engine->load);
}
static void mv_cesa_ahash_last_cleanup(struct ahash_request *req)
@@ -362,16 +365,13 @@ static void mv_cesa_ahash_complete(struct crypto_async_request *req)
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ &&
(creq->base.chain.last->flags & CESA_TDMA_TYPE_MSK) ==
CESA_TDMA_RESULT) {
- __le32 *data = NULL;
+ const void *data;
/*
* Result is already in the correct endianness when the SA is
* used
*/
data = creq->base.chain.last->op->ctx.hash.hash;
- for (i = 0; i < digsize / 4; i++)
- creq->state[i] = le32_to_cpu(data[i]);
-
memcpy(ahashreq->result, data, digsize);
} else {
for (i = 0; i < digsize / 4; i++)
@@ -395,8 +395,6 @@ static void mv_cesa_ahash_complete(struct crypto_async_request *req)
}
}
}
-
- atomic_sub(ahashreq->nbytes, &engine->load);
}
static void mv_cesa_ahash_prepare(struct crypto_async_request *req,
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
index d529bcb03775..062def303dce 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
@@ -18,9 +18,8 @@
#define OTX2_CPT_MAX_VFS_NUM 128
#define OTX2_CPT_RVU_FUNC_ADDR_S(blk, slot, offs) \
(((blk) << 20) | ((slot) << 12) | (offs))
-#define OTX2_CPT_RVU_PFFUNC(pf, func) \
- ((((pf) & RVU_PFVF_PF_MASK) << RVU_PFVF_PF_SHIFT) | \
- (((func) & RVU_PFVF_FUNC_MASK) << RVU_PFVF_FUNC_SHIFT))
+
+#define OTX2_CPT_RVU_PFFUNC(pdev, pf, func) rvu_make_pcifunc(pdev, pf, func)
#define OTX2_CPT_INVALID_CRYPTO_ENG_GRP 0xFF
#define OTX2_CPT_NAME_LENGTH 64
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c b/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c
index 215a1a8ba7e9..07a74f702c3a 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c
@@ -24,7 +24,8 @@ static int otx2_cpt_dl_egrp_delete(struct devlink *dl, u32 id,
}
static int otx2_cpt_dl_uc_info(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
ctx->val.vstr[0] = '\0';
@@ -32,7 +33,8 @@ static int otx2_cpt_dl_uc_info(struct devlink *dl, u32 id,
}
static int otx2_cpt_dl_t106_mode_get(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct otx2_cpt_devlink *cpt_dl = devlink_priv(dl);
struct otx2_cptpf_dev *cptpf = cpt_dl->cptpf;
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_reqmgr.h b/drivers/crypto/marvell/octeontx2/otx2_cpt_reqmgr.h
index e27e849b01df..e64ca30335de 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cpt_reqmgr.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_reqmgr.h
@@ -34,6 +34,9 @@
#define SG_COMP_2 2
#define SG_COMP_1 1
+#define OTX2_CPT_DPTR_RPTR_ALIGN 8
+#define OTX2_CPT_RES_ADDR_ALIGN 32
+
union otx2_cpt_opcode {
u16 flags;
struct {
@@ -347,22 +350,48 @@ static inline struct otx2_cpt_inst_info *
cn10k_sgv2_info_create(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
gfp_t gfp)
{
- u32 dlen = 0, g_len, sg_len, info_len;
- int align = OTX2_CPT_DMA_MINALIGN;
+ u32 dlen = 0, g_len, s_len, sg_len, info_len;
struct otx2_cpt_inst_info *info;
- u16 g_sz_bytes, s_sz_bytes;
u32 total_mem_len;
int i;
- g_sz_bytes = ((req->in_cnt + 2) / 3) *
- sizeof(struct cn10kb_cpt_sglist_component);
- s_sz_bytes = ((req->out_cnt + 2) / 3) *
- sizeof(struct cn10kb_cpt_sglist_component);
+ /* Allocate memory to meet below alignment requirement:
+ * ------------------------------------
+ * | struct otx2_cpt_inst_info |
+ * | (No alignment required) |
+ * | --------------------------------|
+ * | | padding for ARCH_DMA_MINALIGN |
+ * | | alignment |
+ * |------------------------------------|
+ * | SG List Gather/Input memory |
+ * | Length = multiple of 32Bytes |
+ * | Alignment = 8Byte |
+ * |---------------------------------- |
+ * | SG List Scatter/Output memory |
+ * | Length = multiple of 32Bytes |
+ * | Alignment = 8Byte |
+ * | -------------------------------|
+ * | | padding for 32B alignment |
+ * |------------------------------------|
+ * | Result response memory |
+ * | Alignment = 32Byte |
+ * ------------------------------------
+ */
+
+ info_len = sizeof(*info);
+
+ g_len = ((req->in_cnt + 2) / 3) *
+ sizeof(struct cn10kb_cpt_sglist_component);
+ s_len = ((req->out_cnt + 2) / 3) *
+ sizeof(struct cn10kb_cpt_sglist_component);
+ sg_len = g_len + s_len;
- g_len = ALIGN(g_sz_bytes, align);
- sg_len = ALIGN(g_len + s_sz_bytes, align);
- info_len = ALIGN(sizeof(*info), align);
- total_mem_len = sg_len + info_len + sizeof(union otx2_cpt_res_s);
+ /* Allocate extra memory for SG and response address alignment */
+ total_mem_len = ALIGN(info_len, OTX2_CPT_DPTR_RPTR_ALIGN);
+ total_mem_len += (ARCH_DMA_MINALIGN - 1) &
+ ~(OTX2_CPT_DPTR_RPTR_ALIGN - 1);
+ total_mem_len += ALIGN(sg_len, OTX2_CPT_RES_ADDR_ALIGN);
+ total_mem_len += sizeof(union otx2_cpt_res_s);
info = kzalloc(total_mem_len, gfp);
if (unlikely(!info))
@@ -372,7 +401,8 @@ cn10k_sgv2_info_create(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
dlen += req->in[i].size;
info->dlen = dlen;
- info->in_buffer = (u8 *)info + info_len;
+ info->in_buffer = PTR_ALIGN((u8 *)info + info_len, ARCH_DMA_MINALIGN);
+ info->out_buffer = info->in_buffer + g_len;
info->gthr_sz = req->in_cnt;
info->sctr_sz = req->out_cnt;
@@ -384,7 +414,7 @@ cn10k_sgv2_info_create(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
}
if (sgv2io_components_setup(pdev, req->out, req->out_cnt,
- &info->in_buffer[g_len])) {
+ info->out_buffer)) {
dev_err(&pdev->dev, "Failed to setup scatter list\n");
goto destroy_info;
}
@@ -401,8 +431,10 @@ cn10k_sgv2_info_create(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
* Get buffer for union otx2_cpt_res_s response
* structure and its physical address
*/
- info->completion_addr = info->in_buffer + sg_len;
- info->comp_baddr = info->dptr_baddr + sg_len;
+ info->completion_addr = PTR_ALIGN((info->in_buffer + sg_len),
+ OTX2_CPT_RES_ADDR_ALIGN);
+ info->comp_baddr = ALIGN((info->dptr_baddr + sg_len),
+ OTX2_CPT_RES_ADDR_ALIGN);
return info;
@@ -417,10 +449,9 @@ static inline struct otx2_cpt_inst_info *
otx2_sg_info_create(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
gfp_t gfp)
{
- int align = OTX2_CPT_DMA_MINALIGN;
struct otx2_cpt_inst_info *info;
- u32 dlen, align_dlen, info_len;
- u16 g_sz_bytes, s_sz_bytes;
+ u32 dlen, info_len;
+ u16 g_len, s_len;
u32 total_mem_len;
if (unlikely(req->in_cnt > OTX2_CPT_MAX_SG_IN_CNT ||
@@ -429,22 +460,54 @@ otx2_sg_info_create(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
return NULL;
}
- g_sz_bytes = ((req->in_cnt + 3) / 4) *
- sizeof(struct otx2_cpt_sglist_component);
- s_sz_bytes = ((req->out_cnt + 3) / 4) *
- sizeof(struct otx2_cpt_sglist_component);
+ /* Allocate memory to meet below alignment requirement:
+ * ------------------------------------
+ * | struct otx2_cpt_inst_info |
+ * | (No alignment required) |
+ * | --------------------------------|
+ * | | padding for ARCH_DMA_MINALIGN |
+ * | | alignment |
+ * |------------------------------------|
+ * | SG List Header of 8 Byte |
+ * |------------------------------------|
+ * | SG List Gather/Input memory |
+ * | Length = multiple of 32Bytes |
+ * | Alignment = 8Byte |
+ * |---------------------------------- |
+ * | SG List Scatter/Output memory |
+ * | Length = multiple of 32Bytes |
+ * | Alignment = 8Byte |
+ * | -------------------------------|
+ * | | padding for 32B alignment |
+ * |------------------------------------|
+ * | Result response memory |
+ * | Alignment = 32Byte |
+ * ------------------------------------
+ */
+
+ info_len = sizeof(*info);
+
+ g_len = ((req->in_cnt + 3) / 4) *
+ sizeof(struct otx2_cpt_sglist_component);
+ s_len = ((req->out_cnt + 3) / 4) *
+ sizeof(struct otx2_cpt_sglist_component);
+
+ dlen = g_len + s_len + SG_LIST_HDR_SIZE;
- dlen = g_sz_bytes + s_sz_bytes + SG_LIST_HDR_SIZE;
- align_dlen = ALIGN(dlen, align);
- info_len = ALIGN(sizeof(*info), align);
- total_mem_len = align_dlen + info_len + sizeof(union otx2_cpt_res_s);
+ /* Allocate extra memory for SG and response address alignment */
+ total_mem_len = ALIGN(info_len, OTX2_CPT_DPTR_RPTR_ALIGN);
+ total_mem_len += (ARCH_DMA_MINALIGN - 1) &
+ ~(OTX2_CPT_DPTR_RPTR_ALIGN - 1);
+ total_mem_len += ALIGN(dlen, OTX2_CPT_RES_ADDR_ALIGN);
+ total_mem_len += sizeof(union otx2_cpt_res_s);
info = kzalloc(total_mem_len, gfp);
if (unlikely(!info))
return NULL;
info->dlen = dlen;
- info->in_buffer = (u8 *)info + info_len;
+ info->in_buffer = PTR_ALIGN((u8 *)info + info_len, ARCH_DMA_MINALIGN);
+ info->out_buffer = info->in_buffer + SG_LIST_HDR_SIZE + g_len;
((u16 *)info->in_buffer)[0] = req->out_cnt;
((u16 *)info->in_buffer)[1] = req->in_cnt;
@@ -460,7 +523,7 @@ otx2_sg_info_create(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
}
if (setup_sgio_components(pdev, req->out, req->out_cnt,
- &info->in_buffer[8 + g_sz_bytes])) {
+ info->out_buffer)) {
dev_err(&pdev->dev, "Failed to setup scatter list\n");
goto destroy_info;
}
@@ -476,8 +539,10 @@ otx2_sg_info_create(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
* Get buffer for union otx2_cpt_res_s response
* structure and its physical address
*/
- info->completion_addr = info->in_buffer + align_dlen;
- info->comp_baddr = info->dptr_baddr + align_dlen;
+ info->completion_addr = PTR_ALIGN((info->in_buffer + dlen),
+ OTX2_CPT_RES_ADDR_ALIGN);
+ info->comp_baddr = ALIGN((info->dptr_baddr + dlen),
+ OTX2_CPT_RES_ADDR_ALIGN);
return info;
@@ -490,6 +555,7 @@ struct otx2_cptlf_wqe;
int otx2_cpt_do_request(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
int cpu_num);
void otx2_cpt_post_process(struct otx2_cptlf_wqe *wqe);
-int otx2_cpt_get_kcrypto_eng_grp_num(struct pci_dev *pdev);
+int otx2_cpt_get_eng_grp_num(struct pci_dev *pdev,
+ enum otx2_cpt_eng_type);
#endif /* __OTX2_CPT_REQMGR_H */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptlf.h b/drivers/crypto/marvell/octeontx2/otx2_cptlf.h
index 6e004a5568d8..1b9f75214d18 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptlf.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptlf.h
@@ -124,7 +124,8 @@ struct otx2_cptlfs_info {
struct cpt_hw_ops *ops;
u8 are_lfs_attached; /* Whether CPT LFs are attached */
u8 lfs_num; /* Number of CPT LFs */
- u8 kcrypto_eng_grp_num; /* Kernel crypto engine group number */
+ u8 kcrypto_se_eng_grp_num; /* Crypto symmetric engine group number */
+ u8 kcrypto_ae_eng_grp_num; /* Crypto asymmetric engine group number */
u8 kvf_limits; /* Kernel crypto limits */
atomic_t state; /* LF's state. started/reset */
int blkaddr; /* CPT blkaddr: BLKADDR_CPT0/BLKADDR_CPT1 */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c
index 12c0e966fa65..b4b2d3d1cbc2 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c
@@ -142,7 +142,7 @@ static int send_inline_ipsec_inbound_msg(struct otx2_cptpf_dev *cptpf,
memset(req, 0, sizeof(*req));
req->hdr.id = MBOX_MSG_CPT_INLINE_IPSEC_CFG;
req->hdr.sig = OTX2_MBOX_REQ_SIG;
- req->hdr.pcifunc = OTX2_CPT_RVU_PFFUNC(cptpf->pf_id, 0);
+ req->hdr.pcifunc = OTX2_CPT_RVU_PFFUNC(cptpf->pdev, cptpf->pf_id, 0);
req->dir = CPT_INLINE_INBOUND;
req->slot = slot;
req->sso_pf_func_ovrd = cptpf->sso_pf_func_ovrd;
@@ -184,7 +184,8 @@ static int rx_inline_ipsec_lf_cfg(struct otx2_cptpf_dev *cptpf, u8 egrp,
nix_req->gen_cfg.opcode = cpt_inline_rx_opcode(pdev);
nix_req->gen_cfg.param1 = req->param1;
nix_req->gen_cfg.param2 = req->param2;
- nix_req->inst_qsel.cpt_pf_func = OTX2_CPT_RVU_PFFUNC(cptpf->pf_id, 0);
+ nix_req->inst_qsel.cpt_pf_func =
+ OTX2_CPT_RVU_PFFUNC(cptpf->pdev, cptpf->pf_id, 0);
nix_req->inst_qsel.cpt_slot = 0;
ret = otx2_cpt_send_mbox_msg(&cptpf->afpf_mbox, pdev);
if (ret)
@@ -392,9 +393,8 @@ void otx2_cptpf_vfpf_mbox_handler(struct work_struct *work)
msg = (struct mbox_msghdr *)(mdev->mbase + offset);
/* Set which VF sent this message based on mbox IRQ */
- msg->pcifunc = ((u16)cptpf->pf_id << RVU_PFVF_PF_SHIFT) |
- ((vf->vf_id + 1) & RVU_PFVF_FUNC_MASK);
-
+ msg->pcifunc = rvu_make_pcifunc(cptpf->pdev, cptpf->pf_id,
+ (vf->vf_id + 1));
err = cptpf_handle_vf_req(cptpf, vf, msg,
msg->next_msgoff - offset);
/*
@@ -469,8 +469,7 @@ static void process_afpf_mbox_msg(struct otx2_cptpf_dev *cptpf,
switch (msg->id) {
case MBOX_MSG_READY:
- cptpf->pf_id = (msg->pcifunc >> RVU_PFVF_PF_SHIFT) &
- RVU_PFVF_PF_MASK;
+ cptpf->pf_id = rvu_get_pf(cptpf->pdev, msg->pcifunc);
break;
case MBOX_MSG_MSIX_OFFSET:
rsp_msix = (struct msix_offset_rsp *) msg;
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
index 78367849c3d5..b5cc5401f704 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
@@ -3,6 +3,7 @@
#include <linux/ctype.h>
#include <linux/firmware.h>
+#include <linux/string.h>
#include <linux/string_choices.h>
#include "otx2_cptpf_ucode.h"
#include "otx2_cpt_common.h"
@@ -176,7 +177,9 @@ static int cptx_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp,
/* Set PF number for microcode fetches */
ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
CPT_AF_PF_FUNC,
- cptpf->pf_id << RVU_PFVF_PF_SHIFT, blkaddr);
+ rvu_make_pcifunc(cptpf->pdev,
+ cptpf->pf_id, 0),
+ blkaddr);
if (ret)
return ret;
@@ -456,13 +459,13 @@ static int cpt_ucode_load_fw(struct pci_dev *pdev, struct fw_info_t *fw_info,
u16 rid)
{
char filename[OTX2_CPT_NAME_LENGTH];
- char eng_type[8] = {0};
+ char eng_type[8];
int ret, e, i;
INIT_LIST_HEAD(&fw_info->ucodes);
for (e = 1; e < OTX2_CPT_MAX_ENG_TYPES; e++) {
- strcpy(eng_type, get_eng_type_str(e));
+ strscpy(eng_type, get_eng_type_str(e));
for (i = 0; i < strlen(eng_type); i++)
eng_type[i] = tolower(eng_type[i]);
@@ -1491,11 +1494,13 @@ int otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev *cptpf)
union otx2_cpt_opcode opcode;
union otx2_cpt_res_s *result;
union otx2_cpt_inst_s inst;
+ dma_addr_t result_baddr;
dma_addr_t rptr_baddr;
struct pci_dev *pdev;
- u32 len, compl_rlen;
+ int timeout = 10000;
+ void *base, *rptr;
int ret, etype;
- void *rptr;
+ u32 len;
/*
* We don't get capabilities if it was already done
@@ -1518,22 +1523,28 @@ int otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev *cptpf)
if (ret)
goto delete_grps;
- compl_rlen = ALIGN(sizeof(union otx2_cpt_res_s), OTX2_CPT_DMA_MINALIGN);
- len = compl_rlen + LOADFVC_RLEN;
+ /* Allocate extra memory for "rptr" and "result" pointer alignment */
+ len = LOADFVC_RLEN + ARCH_DMA_MINALIGN +
+ sizeof(union otx2_cpt_res_s) + OTX2_CPT_RES_ADDR_ALIGN;
- result = kzalloc(len, GFP_KERNEL);
- if (!result) {
+ base = kzalloc(len, GFP_KERNEL);
+ if (!base) {
ret = -ENOMEM;
goto lf_cleanup;
}
- rptr_baddr = dma_map_single(&pdev->dev, (void *)result, len,
- DMA_BIDIRECTIONAL);
+
+ rptr = PTR_ALIGN(base, ARCH_DMA_MINALIGN);
+ rptr_baddr = dma_map_single(&pdev->dev, rptr, len, DMA_BIDIRECTIONAL);
if (dma_mapping_error(&pdev->dev, rptr_baddr)) {
dev_err(&pdev->dev, "DMA mapping failed\n");
ret = -EFAULT;
- goto free_result;
+ goto free_rptr;
}
- rptr = (u8 *)result + compl_rlen;
+
+ result = (union otx2_cpt_res_s *)PTR_ALIGN(rptr + LOADFVC_RLEN,
+ OTX2_CPT_RES_ADDR_ALIGN);
+ result_baddr = ALIGN(rptr_baddr + LOADFVC_RLEN,
+ OTX2_CPT_RES_ADDR_ALIGN);
/* Fill in the command */
opcode.s.major = LOADFVC_MAJOR_OP;
@@ -1545,27 +1556,38 @@ int otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev *cptpf)
/* 64-bit swap for microcode data reads, not needed for addresses */
cpu_to_be64s(&iq_cmd.cmd.u);
iq_cmd.dptr = 0;
- iq_cmd.rptr = rptr_baddr + compl_rlen;
+ iq_cmd.rptr = rptr_baddr;
iq_cmd.cptr.u = 0;
for (etype = 1; etype < OTX2_CPT_MAX_ENG_TYPES; etype++) {
result->s.compcode = OTX2_CPT_COMPLETION_CODE_INIT;
iq_cmd.cptr.s.grp = otx2_cpt_get_eng_grp(&cptpf->eng_grps,
etype);
- otx2_cpt_fill_inst(&inst, &iq_cmd, rptr_baddr);
+ otx2_cpt_fill_inst(&inst, &iq_cmd, result_baddr);
lfs->ops->send_cmd(&inst, 1, &cptpf->lfs.lf[0]);
+ timeout = 10000;
while (lfs->ops->cpt_get_compcode(result) ==
- OTX2_CPT_COMPLETION_CODE_INIT)
+ OTX2_CPT_COMPLETION_CODE_INIT) {
cpu_relax();
+ udelay(1);
+ timeout--;
+ if (!timeout) {
+ ret = -ENODEV;
+ cptpf->is_eng_caps_discovered = false;
+ dev_warn(&pdev->dev, "Timeout on CPT load_fvc completion poll\n");
+ goto error_no_response;
+ }
+ }
cptpf->eng_caps[etype].u = be64_to_cpup(rptr);
}
- dma_unmap_single(&pdev->dev, rptr_baddr, len, DMA_BIDIRECTIONAL);
cptpf->is_eng_caps_discovered = true;
-free_result:
- kfree(result);
+error_no_response:
+ dma_unmap_single(&pdev->dev, rptr_baddr, len, DMA_BIDIRECTIONAL);
+free_rptr:
+ kfree(base);
lf_cleanup:
otx2_cptlf_shutdown(lfs);
delete_grps:
@@ -1594,7 +1616,7 @@ int otx2_cpt_dl_custom_egrp_create(struct otx2_cptpf_dev *cptpf,
return -EINVAL;
}
err_msg = "Invalid engine group format";
- strscpy(tmp_buf, ctx->val.vstr, strlen(ctx->val.vstr) + 1);
+ strscpy(tmp_buf, ctx->val.vstr);
start = tmp_buf;
has_se = has_ie = has_ae = false;
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
index 7eb0bc13994d..8d9f394d6b50 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
@@ -384,7 +384,8 @@ static inline int cpt_enc_dec(struct skcipher_request *req, u32 enc)
req_info->req_type = OTX2_CPT_ENC_DEC_REQ;
req_info->is_enc = enc;
req_info->is_trunc_hmac = false;
- req_info->ctrl.s.grp = otx2_cpt_get_kcrypto_eng_grp_num(pdev);
+ req_info->ctrl.s.grp = otx2_cpt_get_eng_grp_num(pdev,
+ OTX2_CPT_SE_TYPES);
req_info->req.cptr = ctx->er_ctx.hw_ctx;
req_info->req.cptr_dma = ctx->er_ctx.cptr_dma;
@@ -1288,7 +1289,8 @@ static int cpt_aead_enc_dec(struct aead_request *req, u8 reg_type, u8 enc)
if (status)
return status;
- req_info->ctrl.s.grp = otx2_cpt_get_kcrypto_eng_grp_num(pdev);
+ req_info->ctrl.s.grp = otx2_cpt_get_eng_grp_num(pdev,
+ OTX2_CPT_SE_TYPES);
/*
* We perform an asynchronous send and once
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
index 56904bdfd6e8..c1c44a7b89fa 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
@@ -265,17 +265,33 @@ static int cptvf_lf_init(struct otx2_cptvf_dev *cptvf)
u8 eng_grp_msk;
/* Get engine group number for symmetric crypto */
- cptvf->lfs.kcrypto_eng_grp_num = OTX2_CPT_INVALID_CRYPTO_ENG_GRP;
+ cptvf->lfs.kcrypto_se_eng_grp_num = OTX2_CPT_INVALID_CRYPTO_ENG_GRP;
ret = otx2_cptvf_send_eng_grp_num_msg(cptvf, OTX2_CPT_SE_TYPES);
if (ret)
return ret;
- if (cptvf->lfs.kcrypto_eng_grp_num == OTX2_CPT_INVALID_CRYPTO_ENG_GRP) {
- dev_err(dev, "Engine group for kernel crypto not available\n");
- ret = -ENOENT;
+ if (cptvf->lfs.kcrypto_se_eng_grp_num ==
+ OTX2_CPT_INVALID_CRYPTO_ENG_GRP) {
+ dev_err(dev,
+ "Symmetric Engine group for crypto not available\n");
+ return -ENOENT;
+ }
+
+ /* Get engine group number for asymmetric crypto */
+ cptvf->lfs.kcrypto_ae_eng_grp_num = OTX2_CPT_INVALID_CRYPTO_ENG_GRP;
+ ret = otx2_cptvf_send_eng_grp_num_msg(cptvf, OTX2_CPT_AE_TYPES);
+ if (ret)
return ret;
+
+ if (cptvf->lfs.kcrypto_ae_eng_grp_num ==
+ OTX2_CPT_INVALID_CRYPTO_ENG_GRP) {
+ dev_err(dev,
+ "Asymmetric Engine group for crypto not available\n");
+ return -ENOENT;
}
- eng_grp_msk = 1 << cptvf->lfs.kcrypto_eng_grp_num;
+
+ eng_grp_msk = BIT(cptvf->lfs.kcrypto_se_eng_grp_num) |
+ BIT(cptvf->lfs.kcrypto_ae_eng_grp_num);
ret = otx2_cptvf_send_kvf_limits_msg(cptvf);
if (ret)
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c
index 931b72580fd9..5277bcfa275e 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c
@@ -75,6 +75,7 @@ static void process_pfvf_mbox_mbox_msg(struct otx2_cptvf_dev *cptvf,
struct otx2_cpt_caps_rsp *eng_caps;
struct cpt_rd_wr_reg_msg *rsp_reg;
struct msix_offset_rsp *rsp_msix;
+ u8 grp_num;
int i;
if (msg->id >= MBOX_MSG_MAX) {
@@ -122,7 +123,11 @@ static void process_pfvf_mbox_mbox_msg(struct otx2_cptvf_dev *cptvf,
break;
case MBOX_MSG_GET_ENG_GRP_NUM:
rsp_grp = (struct otx2_cpt_egrp_num_rsp *) msg;
- cptvf->lfs.kcrypto_eng_grp_num = rsp_grp->eng_grp_num;
+ grp_num = rsp_grp->eng_grp_num;
+ if (rsp_grp->eng_type == OTX2_CPT_SE_TYPES)
+ cptvf->lfs.kcrypto_se_eng_grp_num = grp_num;
+ else if (rsp_grp->eng_type == OTX2_CPT_AE_TYPES)
+ cptvf->lfs.kcrypto_ae_eng_grp_num = grp_num;
break;
case MBOX_MSG_GET_KVF_LIMITS:
rsp_limits = (struct otx2_cpt_kvf_limits_rsp *) msg;
@@ -189,7 +194,7 @@ int otx2_cptvf_send_eng_grp_num_msg(struct otx2_cptvf_dev *cptvf, int eng_type)
}
req->hdr.id = MBOX_MSG_GET_ENG_GRP_NUM;
req->hdr.sig = OTX2_MBOX_REQ_SIG;
- req->hdr.pcifunc = OTX2_CPT_RVU_PFFUNC(cptvf->vf_id, 0);
+ req->hdr.pcifunc = OTX2_CPT_RVU_PFFUNC(cptvf->pdev, cptvf->vf_id, 0);
req->eng_type = eng_type;
return otx2_cpt_send_mbox_msg(mbox, pdev);
@@ -210,7 +215,7 @@ int otx2_cptvf_send_kvf_limits_msg(struct otx2_cptvf_dev *cptvf)
}
req->id = MBOX_MSG_GET_KVF_LIMITS;
req->sig = OTX2_MBOX_REQ_SIG;
- req->pcifunc = OTX2_CPT_RVU_PFFUNC(cptvf->vf_id, 0);
+ req->pcifunc = OTX2_CPT_RVU_PFFUNC(cptvf->pdev, cptvf->vf_id, 0);
return otx2_cpt_send_mbox_msg(mbox, pdev);
}
@@ -230,7 +235,7 @@ int otx2_cptvf_send_caps_msg(struct otx2_cptvf_dev *cptvf)
}
req->id = MBOX_MSG_GET_CAPS;
req->sig = OTX2_MBOX_REQ_SIG;
- req->pcifunc = OTX2_CPT_RVU_PFFUNC(cptvf->vf_id, 0);
+ req->pcifunc = OTX2_CPT_RVU_PFFUNC(cptvf->pdev, cptvf->vf_id, 0);
return otx2_cpt_send_mbox_msg(mbox, pdev);
}
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c
index 426244107037..e71494486c64 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c
@@ -391,9 +391,19 @@ void otx2_cpt_post_process(struct otx2_cptlf_wqe *wqe)
&wqe->lfs->lf[wqe->lf_num].pqueue);
}
-int otx2_cpt_get_kcrypto_eng_grp_num(struct pci_dev *pdev)
+int otx2_cpt_get_eng_grp_num(struct pci_dev *pdev,
+ enum otx2_cpt_eng_type eng_type)
{
struct otx2_cptvf_dev *cptvf = pci_get_drvdata(pdev);
- return cptvf->lfs.kcrypto_eng_grp_num;
+ switch (eng_type) {
+ case OTX2_CPT_SE_TYPES:
+ return cptvf->lfs.kcrypto_se_eng_grp_num;
+ case OTX2_CPT_AE_TYPES:
+ return cptvf->lfs.kcrypto_ae_eng_grp_num;
+ default:
+ dev_err(&cptvf->pdev->dev, "Unsupported engine type");
+ break;
+ }
+ return -ENXIO;
}
diff --git a/drivers/crypto/nx/nx-common-powernv.c b/drivers/crypto/nx/nx-common-powernv.c
index fd0a98b2fb1b..0493041ea088 100644
--- a/drivers/crypto/nx/nx-common-powernv.c
+++ b/drivers/crypto/nx/nx-common-powernv.c
@@ -1043,8 +1043,10 @@ static struct scomp_alg nx842_powernv_alg = {
.base.cra_priority = 300,
.base.cra_module = THIS_MODULE,
- .alloc_ctx = nx842_powernv_crypto_alloc_ctx,
- .free_ctx = nx842_crypto_free_ctx,
+ .streams = {
+ .alloc_ctx = nx842_powernv_crypto_alloc_ctx,
+ .free_ctx = nx842_crypto_free_ctx,
+ },
.compress = nx842_crypto_compress,
.decompress = nx842_crypto_decompress,
};
diff --git a/drivers/crypto/nx/nx-common-pseries.c b/drivers/crypto/nx/nx-common-pseries.c
index f528e072494a..fc0222ebe807 100644
--- a/drivers/crypto/nx/nx-common-pseries.c
+++ b/drivers/crypto/nx/nx-common-pseries.c
@@ -1020,8 +1020,10 @@ static struct scomp_alg nx842_pseries_alg = {
.base.cra_priority = 300,
.base.cra_module = THIS_MODULE,
- .alloc_ctx = nx842_pseries_crypto_alloc_ctx,
- .free_ctx = nx842_crypto_free_ctx,
+ .streams = {
+ .alloc_ctx = nx842_pseries_crypto_alloc_ctx,
+ .free_ctx = nx842_crypto_free_ctx,
+ },
.compress = nx842_crypto_compress,
.decompress = nx842_crypto_decompress,
};
diff --git a/drivers/crypto/omap-aes-gcm.c b/drivers/crypto/omap-aes-gcm.c
index c498950402e8..1f4586509ca4 100644
--- a/drivers/crypto/omap-aes-gcm.c
+++ b/drivers/crypto/omap-aes-gcm.c
@@ -38,7 +38,6 @@ static void omap_aes_gcm_finish_req(struct omap_aes_dev *dd, int ret)
crypto_finalize_aead_request(dd->engine, req, ret);
- pm_runtime_mark_last_busy(dd->dev);
pm_runtime_put_autosuspend(dd->dev);
}
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index 1ecf5f6ac04e..3cc802622dd5 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -32,6 +32,7 @@
#include <linux/pm_runtime.h>
#include <linux/scatterlist.h>
#include <linux/string.h>
+#include <linux/workqueue.h>
#include "omap-crypto.h"
#include "omap-aes.h"
@@ -221,7 +222,7 @@ static void omap_aes_dma_out_callback(void *data)
struct omap_aes_dev *dd = data;
/* dma_lch_out - completed */
- tasklet_schedule(&dd->done_task);
+ queue_work(system_bh_wq, &dd->done_task);
}
static int omap_aes_dma_init(struct omap_aes_dev *dd)
@@ -400,7 +401,6 @@ static void omap_aes_finish_req(struct omap_aes_dev *dd, int err)
crypto_finalize_skcipher_request(dd->engine, req, err);
- pm_runtime_mark_last_busy(dd->dev);
pm_runtime_put_autosuspend(dd->dev);
}
@@ -495,9 +495,9 @@ static void omap_aes_copy_ivout(struct omap_aes_dev *dd, u8 *ivbuf)
((u32 *)ivbuf)[i] = omap_aes_read(dd, AES_REG_IV(dd, i));
}
-static void omap_aes_done_task(unsigned long data)
+static void omap_aes_done_task(struct work_struct *t)
{
- struct omap_aes_dev *dd = (struct omap_aes_dev *)data;
+ struct omap_aes_dev *dd = from_work(dd, t, done_task);
pr_debug("enter done_task\n");
@@ -926,7 +926,7 @@ static irqreturn_t omap_aes_irq(int irq, void *dev_id)
if (!dd->total)
/* All bytes read! */
- tasklet_schedule(&dd->done_task);
+ queue_work(system_bh_wq, &dd->done_task);
else
/* Enable DATA_IN interrupt for next block */
omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x2);
@@ -1141,7 +1141,7 @@ static int omap_aes_probe(struct platform_device *pdev)
(reg & dd->pdata->major_mask) >> dd->pdata->major_shift,
(reg & dd->pdata->minor_mask) >> dd->pdata->minor_shift);
- tasklet_init(&dd->done_task, omap_aes_done_task, (unsigned long)dd);
+ INIT_WORK(&dd->done_task, omap_aes_done_task);
err = omap_aes_dma_init(dd);
if (err == -EPROBE_DEFER) {
@@ -1230,7 +1230,7 @@ err_engine:
omap_aes_dma_cleanup(dd);
err_irq:
- tasklet_kill(&dd->done_task);
+ cancel_work_sync(&dd->done_task);
err_pm_disable:
pm_runtime_disable(dev);
err_res:
@@ -1265,7 +1265,7 @@ static void omap_aes_remove(struct platform_device *pdev)
crypto_engine_exit(dd->engine);
- tasklet_kill(&dd->done_task);
+ cancel_work_sync(&dd->done_task);
omap_aes_dma_cleanup(dd);
pm_runtime_disable(dd->dev);
}
diff --git a/drivers/crypto/omap-aes.h b/drivers/crypto/omap-aes.h
index 41d67780fd45..99c36a777e97 100644
--- a/drivers/crypto/omap-aes.h
+++ b/drivers/crypto/omap-aes.h
@@ -159,7 +159,7 @@ struct omap_aes_dev {
unsigned long flags;
int err;
- struct tasklet_struct done_task;
+ struct work_struct done_task;
struct aead_queue aead_queue;
spinlock_t lock;
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
index a099460d5f21..149ebd77710b 100644
--- a/drivers/crypto/omap-des.c
+++ b/drivers/crypto/omap-des.c
@@ -32,6 +32,7 @@
#include <linux/pm_runtime.h>
#include <linux/scatterlist.h>
#include <linux/string.h>
+#include <linux/workqueue.h>
#include "omap-crypto.h"
@@ -130,7 +131,7 @@ struct omap_des_dev {
unsigned long flags;
int err;
- struct tasklet_struct done_task;
+ struct work_struct done_task;
struct skcipher_request *req;
struct crypto_engine *engine;
@@ -325,7 +326,7 @@ static void omap_des_dma_out_callback(void *data)
struct omap_des_dev *dd = data;
/* dma_lch_out - completed */
- tasklet_schedule(&dd->done_task);
+ queue_work(system_bh_wq, &dd->done_task);
}
static int omap_des_dma_init(struct omap_des_dev *dd)
@@ -489,7 +490,6 @@ static void omap_des_finish_req(struct omap_des_dev *dd, int err)
crypto_finalize_skcipher_request(dd->engine, req, err);
- pm_runtime_mark_last_busy(dd->dev);
pm_runtime_put_autosuspend(dd->dev);
}
@@ -581,9 +581,9 @@ static int omap_des_crypt_req(struct crypto_engine *engine,
omap_des_crypt_dma_start(dd);
}
-static void omap_des_done_task(unsigned long data)
+static void omap_des_done_task(struct work_struct *t)
{
- struct omap_des_dev *dd = (struct omap_des_dev *)data;
+ struct omap_des_dev *dd = from_work(dd, t, done_task);
int i;
pr_debug("enter done_task\n");
@@ -891,7 +891,7 @@ static irqreturn_t omap_des_irq(int irq, void *dev_id)
if (!dd->total)
/* All bytes read! */
- tasklet_schedule(&dd->done_task);
+ queue_work(system_bh_wq, &dd->done_task);
else
/* Enable DATA_IN interrupt for next block */
omap_des_write(dd, DES_REG_IRQ_ENABLE(dd), 0x2);
@@ -987,7 +987,7 @@ static int omap_des_probe(struct platform_device *pdev)
(reg & dd->pdata->major_mask) >> dd->pdata->major_shift,
(reg & dd->pdata->minor_mask) >> dd->pdata->minor_shift);
- tasklet_init(&dd->done_task, omap_des_done_task, (unsigned long)dd);
+ INIT_WORK(&dd->done_task, omap_des_done_task);
err = omap_des_dma_init(dd);
if (err == -EPROBE_DEFER) {
@@ -1054,7 +1054,7 @@ err_engine:
omap_des_dma_cleanup(dd);
err_irq:
- tasklet_kill(&dd->done_task);
+ cancel_work_sync(&dd->done_task);
err_get:
pm_runtime_disable(dev);
err_res:
@@ -1078,7 +1078,7 @@ static void omap_des_remove(struct platform_device *pdev)
crypto_engine_unregister_skcipher(
&dd->pdata->algs_info[i].algs_list[j]);
- tasklet_kill(&dd->done_task);
+ cancel_work_sync(&dd->done_task);
omap_des_dma_cleanup(dd);
pm_runtime_disable(dd->dev);
}
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 56f192cb976d..ff8aac02994a 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -37,6 +37,7 @@
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/string.h>
+#include <linux/workqueue.h>
#define MD5_DIGEST_SIZE 16
@@ -217,7 +218,7 @@ struct omap_sham_dev {
int irq;
int err;
struct dma_chan *dma_lch;
- struct tasklet_struct done_task;
+ struct work_struct done_task;
u8 polling_mode;
u8 xmit_buf[BUFLEN] OMAP_ALIGNED;
@@ -561,7 +562,7 @@ static void omap_sham_dma_callback(void *param)
struct omap_sham_dev *dd = param;
set_bit(FLAGS_DMA_READY, &dd->flags);
- tasklet_schedule(&dd->done_task);
+ queue_work(system_bh_wq, &dd->done_task);
}
static int omap_sham_xmit_dma(struct omap_sham_dev *dd, size_t length,
@@ -1167,7 +1168,6 @@ static void omap_sham_finish_req(struct ahash_request *req, int err)
dd->flags &= ~(BIT(FLAGS_FINAL) | BIT(FLAGS_CPU) |
BIT(FLAGS_DMA_READY) | BIT(FLAGS_OUTPUT_READY));
- pm_runtime_mark_last_busy(dd->dev);
pm_runtime_put_autosuspend(dd->dev);
ctx->offset = 0;
@@ -1704,9 +1704,9 @@ static struct ahash_engine_alg algs_sha384_sha512[] = {
},
};
-static void omap_sham_done_task(unsigned long data)
+static void omap_sham_done_task(struct work_struct *t)
{
- struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
+ struct omap_sham_dev *dd = from_work(dd, t, done_task);
int err = 0;
dev_dbg(dd->dev, "%s: flags=%lx\n", __func__, dd->flags);
@@ -1740,7 +1740,7 @@ finish:
static irqreturn_t omap_sham_irq_common(struct omap_sham_dev *dd)
{
set_bit(FLAGS_OUTPUT_READY, &dd->flags);
- tasklet_schedule(&dd->done_task);
+ queue_work(system_bh_wq, &dd->done_task);
return IRQ_HANDLED;
}
@@ -2060,7 +2060,7 @@ static int omap_sham_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dd);
INIT_LIST_HEAD(&dd->list);
- tasklet_init(&dd->done_task, omap_sham_done_task, (unsigned long)dd);
+ INIT_WORK(&dd->done_task, omap_sham_done_task);
crypto_init_queue(&dd->queue, OMAP_SHAM_QUEUE_LENGTH);
err = (dev->of_node) ? omap_sham_get_res_of(dd, dev, &res) :
@@ -2195,7 +2195,7 @@ static void omap_sham_remove(struct platform_device *pdev)
&dd->pdata->algs_info[i].algs_list[j]);
dd->pdata->algs_info[i].registered--;
}
- tasklet_kill(&dd->done_task);
+ cancel_work_sync(&dd->done_task);
pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/crypto/qce/core.c b/drivers/crypto/qce/core.c
index e95e84486d9a..b966f3365b7d 100644
--- a/drivers/crypto/qce/core.c
+++ b/drivers/crypto/qce/core.c
@@ -21,7 +21,6 @@
#include "sha.h"
#include "aead.h"
-#define QCE_MAJOR_VERSION5 0x05
#define QCE_QUEUE_LENGTH 1
#define QCE_DEFAULT_MEM_BANDWIDTH 393600
@@ -161,7 +160,7 @@ static int qce_check_version(struct qce_device *qce)
* the driver does not support v5 with minor 0 because it has special
* alignment requirements.
*/
- if (major != QCE_MAJOR_VERSION5 || minor == 0)
+ if (major == 5 && minor == 0)
return -ENODEV;
qce->burst_size = QCE_BAM_BURST_SIZE;
diff --git a/drivers/crypto/qce/dma.c b/drivers/crypto/qce/dma.c
index 1dec7aea852d..68cafd4741ad 100644
--- a/drivers/crypto/qce/dma.c
+++ b/drivers/crypto/qce/dma.c
@@ -24,11 +24,13 @@ int devm_qce_dma_request(struct device *dev, struct qce_dma_data *dma)
dma->txchan = dma_request_chan(dev, "tx");
if (IS_ERR(dma->txchan))
- return PTR_ERR(dma->txchan);
+ return dev_err_probe(dev, PTR_ERR(dma->txchan),
+ "Failed to get TX DMA channel\n");
dma->rxchan = dma_request_chan(dev, "rx");
if (IS_ERR(dma->rxchan)) {
- ret = PTR_ERR(dma->rxchan);
+ ret = dev_err_probe(dev, PTR_ERR(dma->rxchan),
+ "Failed to get RX DMA channel\n");
goto error_rx;
}
diff --git a/drivers/crypto/rockchip/rk3288_crypto_ahash.c b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
index d6928ebe9526..b9f5a8b42e66 100644
--- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c
+++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
@@ -254,7 +254,7 @@ static void rk_hash_unprepare(struct crypto_engine *engine, void *breq)
struct rk_ahash_rctx *rctx = ahash_request_ctx(areq);
struct rk_crypto_info *rkc = rctx->dev;
- dma_unmap_sg(rkc->dev, areq->src, rctx->nrsg, DMA_TO_DEVICE);
+ dma_unmap_sg(rkc->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE);
}
static int rk_hash_run(struct crypto_engine *engine, void *breq)
diff --git a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
index 9393e10671c2..e80f9148c012 100644
--- a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
+++ b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
@@ -321,8 +321,7 @@ static int rk_cipher_run(struct crypto_engine *engine, void *async_req)
algt->stat_req++;
rkc->nreq++;
- ivsize = crypto_skcipher_ivsize(tfm);
- if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) {
+ if (areq->iv && ivsize > 0) {
if (rctx->mode & RK_CRYPTO_DEC) {
offset = areq->cryptlen - ivsize;
scatterwalk_map_and_copy(rctx->backup_iv, areq->src,
diff --git a/drivers/crypto/starfive/jh7110-aes.c b/drivers/crypto/starfive/jh7110-aes.c
index 86a1a1fa9f8f..426b24889af8 100644
--- a/drivers/crypto/starfive/jh7110-aes.c
+++ b/drivers/crypto/starfive/jh7110-aes.c
@@ -511,8 +511,7 @@ static int starfive_aes_map_sg(struct starfive_cryp_dev *cryp,
stsg = sg_next(stsg), dtsg = sg_next(dtsg)) {
src_nents = dma_map_sg(cryp->dev, stsg, 1, DMA_BIDIRECTIONAL);
if (src_nents == 0)
- return dev_err_probe(cryp->dev, -ENOMEM,
- "dma_map_sg error\n");
+ return -ENOMEM;
dst_nents = src_nents;
len = min(sg_dma_len(stsg), remain);
@@ -528,13 +527,11 @@ static int starfive_aes_map_sg(struct starfive_cryp_dev *cryp,
for (stsg = src, dtsg = dst;;) {
src_nents = dma_map_sg(cryp->dev, stsg, 1, DMA_TO_DEVICE);
if (src_nents == 0)
- return dev_err_probe(cryp->dev, -ENOMEM,
- "dma_map_sg src error\n");
+ return -ENOMEM;
dst_nents = dma_map_sg(cryp->dev, dtsg, 1, DMA_FROM_DEVICE);
if (dst_nents == 0)
- return dev_err_probe(cryp->dev, -ENOMEM,
- "dma_map_sg dst error\n");
+ return -ENOMEM;
len = min(sg_dma_len(stsg), sg_dma_len(dtsg));
len = min(len, remain);
@@ -669,8 +666,7 @@ static int starfive_aes_aead_do_one_req(struct crypto_engine *engine, void *areq
if (cryp->assoclen) {
rctx->adata = kzalloc(cryp->assoclen + AES_BLOCK_SIZE, GFP_KERNEL);
if (!rctx->adata)
- return dev_err_probe(cryp->dev, -ENOMEM,
- "Failed to alloc memory for adata");
+ return -ENOMEM;
if (sg_copy_to_buffer(req->src, sg_nents_for_len(req->src, cryp->assoclen),
rctx->adata, cryp->assoclen) != cryp->assoclen)
diff --git a/drivers/crypto/starfive/jh7110-hash.c b/drivers/crypto/starfive/jh7110-hash.c
index 2c60a1047bc3..54b7af4a7aee 100644
--- a/drivers/crypto/starfive/jh7110-hash.c
+++ b/drivers/crypto/starfive/jh7110-hash.c
@@ -229,8 +229,7 @@ static int starfive_hash_one_request(struct crypto_engine *engine, void *areq)
for_each_sg(rctx->in_sg, tsg, rctx->in_sg_len, i) {
src_nents = dma_map_sg(cryp->dev, tsg, 1, DMA_TO_DEVICE);
if (src_nents == 0)
- return dev_err_probe(cryp->dev, -ENOMEM,
- "dma_map_sg error\n");
+ return -ENOMEM;
ret = starfive_hash_dma_xfer(cryp, tsg);
dma_unmap_sg(cryp->dev, tsg, 1, DMA_TO_DEVICE);
@@ -326,6 +325,7 @@ static int starfive_hash_digest(struct ahash_request *req)
struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(tfm);
struct starfive_cryp_request_ctx *rctx = ahash_request_ctx(req);
struct starfive_cryp_dev *cryp = ctx->cryp;
+ int sg_len;
memset(rctx, 0, sizeof(struct starfive_cryp_request_ctx));
@@ -334,7 +334,10 @@ static int starfive_hash_digest(struct ahash_request *req)
rctx->in_sg = req->src;
rctx->blksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
rctx->digsize = crypto_ahash_digestsize(tfm);
- rctx->in_sg_len = sg_nents_for_len(rctx->in_sg, rctx->total);
+ sg_len = sg_nents_for_len(rctx->in_sg, rctx->total);
+ if (sg_len < 0)
+ return sg_len;
+ rctx->in_sg_len = sg_len;
ctx->rctx = rctx;
return crypto_transfer_hash_request_to_engine(cryp->engine, req);
@@ -493,25 +496,25 @@ static int starfive_hash_setkey(struct crypto_ahash *hash,
static int starfive_sha224_init_tfm(struct crypto_ahash *hash)
{
- return starfive_hash_init_tfm(hash, "sha224-generic",
+ return starfive_hash_init_tfm(hash, "sha224-lib",
STARFIVE_HASH_SHA224, 0);
}
static int starfive_sha256_init_tfm(struct crypto_ahash *hash)
{
- return starfive_hash_init_tfm(hash, "sha256-generic",
+ return starfive_hash_init_tfm(hash, "sha256-lib",
STARFIVE_HASH_SHA256, 0);
}
static int starfive_sha384_init_tfm(struct crypto_ahash *hash)
{
- return starfive_hash_init_tfm(hash, "sha384-generic",
+ return starfive_hash_init_tfm(hash, "sha384-lib",
STARFIVE_HASH_SHA384, 0);
}
static int starfive_sha512_init_tfm(struct crypto_ahash *hash)
{
- return starfive_hash_init_tfm(hash, "sha512-generic",
+ return starfive_hash_init_tfm(hash, "sha512-lib",
STARFIVE_HASH_SHA512, 0);
}
@@ -523,25 +526,25 @@ static int starfive_sm3_init_tfm(struct crypto_ahash *hash)
static int starfive_hmac_sha224_init_tfm(struct crypto_ahash *hash)
{
- return starfive_hash_init_tfm(hash, "hmac(sha224-generic)",
+ return starfive_hash_init_tfm(hash, "hmac-sha224-lib",
STARFIVE_HASH_SHA224, 1);
}
static int starfive_hmac_sha256_init_tfm(struct crypto_ahash *hash)
{
- return starfive_hash_init_tfm(hash, "hmac(sha256-generic)",
+ return starfive_hash_init_tfm(hash, "hmac-sha256-lib",
STARFIVE_HASH_SHA256, 1);
}
static int starfive_hmac_sha384_init_tfm(struct crypto_ahash *hash)
{
- return starfive_hash_init_tfm(hash, "hmac(sha384-generic)",
+ return starfive_hash_init_tfm(hash, "hmac-sha384-lib",
STARFIVE_HASH_SHA384, 1);
}
static int starfive_hmac_sha512_init_tfm(struct crypto_ahash *hash)
{
- return starfive_hash_init_tfm(hash, "hmac(sha512-generic)",
+ return starfive_hash_init_tfm(hash, "hmac-sha512-lib",
STARFIVE_HASH_SHA512, 1);
}
diff --git a/drivers/crypto/stm32/Kconfig b/drivers/crypto/stm32/Kconfig
index 49dfd161e9b9..d6dc848c82ee 100644
--- a/drivers/crypto/stm32/Kconfig
+++ b/drivers/crypto/stm32/Kconfig
@@ -1,13 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-config CRYPTO_DEV_STM32_CRC
- tristate "Support for STM32 crc accelerators"
- depends on ARCH_STM32
- select CRYPTO_HASH
- select CRC32
- help
- This enables support for the CRC32 hw accelerator which can be found
- on STMicroelectronics STM32 SOC.
-
config CRYPTO_DEV_STM32_HASH
tristate "Support for STM32 hash accelerators"
depends on ARCH_STM32 || ARCH_U8500
diff --git a/drivers/crypto/stm32/Makefile b/drivers/crypto/stm32/Makefile
index 518e0e0b11a9..c63004026afb 100644
--- a/drivers/crypto/stm32/Makefile
+++ b/drivers/crypto/stm32/Makefile
@@ -1,4 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_CRYPTO_DEV_STM32_CRC) += stm32-crc32.o
obj-$(CONFIG_CRYPTO_DEV_STM32_HASH) += stm32-hash.o
obj-$(CONFIG_CRYPTO_DEV_STM32_CRYP) += stm32-cryp.o
diff --git a/drivers/crypto/stm32/stm32-crc32.c b/drivers/crypto/stm32/stm32-crc32.c
deleted file mode 100644
index fd29785a3ecf..000000000000
--- a/drivers/crypto/stm32/stm32-crc32.c
+++ /dev/null
@@ -1,480 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) STMicroelectronics SA 2017
- * Author: Fabien Dessenne <fabien.dessenne@st.com>
- */
-
-#include <linux/bitrev.h>
-#include <linux/clk.h>
-#include <linux/crc32.h>
-#include <linux/crc32poly.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/mod_devicetable.h>
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-
-#include <crypto/internal/hash.h>
-
-#include <linux/unaligned.h>
-
-#define DRIVER_NAME "stm32-crc32"
-#define CHKSUM_DIGEST_SIZE 4
-#define CHKSUM_BLOCK_SIZE 1
-
-/* Registers */
-#define CRC_DR 0x00000000
-#define CRC_CR 0x00000008
-#define CRC_INIT 0x00000010
-#define CRC_POL 0x00000014
-
-/* Registers values */
-#define CRC_CR_RESET BIT(0)
-#define CRC_CR_REV_IN_WORD (BIT(6) | BIT(5))
-#define CRC_CR_REV_IN_BYTE BIT(5)
-#define CRC_CR_REV_OUT BIT(7)
-#define CRC32C_INIT_DEFAULT 0xFFFFFFFF
-
-#define CRC_AUTOSUSPEND_DELAY 50
-
-static unsigned int burst_size;
-module_param(burst_size, uint, 0644);
-MODULE_PARM_DESC(burst_size, "Select burst byte size (0 unlimited)");
-
-struct stm32_crc {
- struct list_head list;
- struct device *dev;
- void __iomem *regs;
- struct clk *clk;
- spinlock_t lock;
-};
-
-struct stm32_crc_list {
- struct list_head dev_list;
- spinlock_t lock; /* protect dev_list */
-};
-
-static struct stm32_crc_list crc_list = {
- .dev_list = LIST_HEAD_INIT(crc_list.dev_list),
- .lock = __SPIN_LOCK_UNLOCKED(crc_list.lock),
-};
-
-struct stm32_crc_ctx {
- u32 key;
- u32 poly;
-};
-
-struct stm32_crc_desc_ctx {
- u32 partial; /* crc32c: partial in first 4 bytes of that struct */
-};
-
-static int stm32_crc32_cra_init(struct crypto_tfm *tfm)
-{
- struct stm32_crc_ctx *mctx = crypto_tfm_ctx(tfm);
-
- mctx->key = 0;
- mctx->poly = CRC32_POLY_LE;
- return 0;
-}
-
-static int stm32_crc32c_cra_init(struct crypto_tfm *tfm)
-{
- struct stm32_crc_ctx *mctx = crypto_tfm_ctx(tfm);
-
- mctx->key = CRC32C_INIT_DEFAULT;
- mctx->poly = CRC32C_POLY_LE;
- return 0;
-}
-
-static int stm32_crc_setkey(struct crypto_shash *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct stm32_crc_ctx *mctx = crypto_shash_ctx(tfm);
-
- if (keylen != sizeof(u32))
- return -EINVAL;
-
- mctx->key = get_unaligned_le32(key);
- return 0;
-}
-
-static struct stm32_crc *stm32_crc_get_next_crc(void)
-{
- struct stm32_crc *crc;
-
- spin_lock_bh(&crc_list.lock);
- crc = list_first_entry_or_null(&crc_list.dev_list, struct stm32_crc, list);
- if (crc)
- list_move_tail(&crc->list, &crc_list.dev_list);
- spin_unlock_bh(&crc_list.lock);
-
- return crc;
-}
-
-static int stm32_crc_init(struct shash_desc *desc)
-{
- struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
- struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm);
- struct stm32_crc *crc;
- unsigned long flags;
-
- crc = stm32_crc_get_next_crc();
- if (!crc)
- return -ENODEV;
-
- pm_runtime_get_sync(crc->dev);
-
- spin_lock_irqsave(&crc->lock, flags);
-
- /* Reset, set key, poly and configure in bit reverse mode */
- writel_relaxed(bitrev32(mctx->key), crc->regs + CRC_INIT);
- writel_relaxed(bitrev32(mctx->poly), crc->regs + CRC_POL);
- writel_relaxed(CRC_CR_RESET | CRC_CR_REV_IN_WORD | CRC_CR_REV_OUT,
- crc->regs + CRC_CR);
-
- /* Store partial result */
- ctx->partial = readl_relaxed(crc->regs + CRC_DR);
-
- spin_unlock_irqrestore(&crc->lock, flags);
-
- pm_runtime_mark_last_busy(crc->dev);
- pm_runtime_put_autosuspend(crc->dev);
-
- return 0;
-}
-
-static int burst_update(struct shash_desc *desc, const u8 *d8,
- size_t length)
-{
- struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
- struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm);
- struct stm32_crc *crc;
-
- crc = stm32_crc_get_next_crc();
- if (!crc)
- return -ENODEV;
-
- pm_runtime_get_sync(crc->dev);
-
- if (!spin_trylock(&crc->lock)) {
- /* Hardware is busy, calculate crc32 by software */
- if (mctx->poly == CRC32_POLY_LE)
- ctx->partial = crc32_le(ctx->partial, d8, length);
- else
- ctx->partial = crc32c(ctx->partial, d8, length);
-
- goto pm_out;
- }
-
- /*
- * Restore previously calculated CRC for this context as init value
- * Restore polynomial configuration
- * Configure in register for word input data,
- * Configure out register in reversed bit mode data.
- */
- writel_relaxed(bitrev32(ctx->partial), crc->regs + CRC_INIT);
- writel_relaxed(bitrev32(mctx->poly), crc->regs + CRC_POL);
- writel_relaxed(CRC_CR_RESET | CRC_CR_REV_IN_WORD | CRC_CR_REV_OUT,
- crc->regs + CRC_CR);
-
- if (d8 != PTR_ALIGN(d8, sizeof(u32))) {
- /* Configure for byte data */
- writel_relaxed(CRC_CR_REV_IN_BYTE | CRC_CR_REV_OUT,
- crc->regs + CRC_CR);
- while (d8 != PTR_ALIGN(d8, sizeof(u32)) && length) {
- writeb_relaxed(*d8++, crc->regs + CRC_DR);
- length--;
- }
- /* Configure for word data */
- writel_relaxed(CRC_CR_REV_IN_WORD | CRC_CR_REV_OUT,
- crc->regs + CRC_CR);
- }
-
- for (; length >= sizeof(u32); d8 += sizeof(u32), length -= sizeof(u32))
- writel_relaxed(*((u32 *)d8), crc->regs + CRC_DR);
-
- if (length) {
- /* Configure for byte data */
- writel_relaxed(CRC_CR_REV_IN_BYTE | CRC_CR_REV_OUT,
- crc->regs + CRC_CR);
- while (length--)
- writeb_relaxed(*d8++, crc->regs + CRC_DR);
- }
-
- /* Store partial result */
- ctx->partial = readl_relaxed(crc->regs + CRC_DR);
-
- spin_unlock(&crc->lock);
-
-pm_out:
- pm_runtime_mark_last_busy(crc->dev);
- pm_runtime_put_autosuspend(crc->dev);
-
- return 0;
-}
-
-static int stm32_crc_update(struct shash_desc *desc, const u8 *d8,
- unsigned int length)
-{
- const unsigned int burst_sz = burst_size;
- unsigned int rem_sz;
- const u8 *cur;
- size_t size;
- int ret;
-
- if (!burst_sz)
- return burst_update(desc, d8, length);
-
- /* Digest first bytes not 32bit aligned at first pass in the loop */
- size = min_t(size_t, length, burst_sz + (size_t)d8 -
- ALIGN_DOWN((size_t)d8, sizeof(u32)));
- for (rem_sz = length, cur = d8; rem_sz;
- rem_sz -= size, cur += size, size = min(rem_sz, burst_sz)) {
- ret = burst_update(desc, cur, size);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static int stm32_crc_final(struct shash_desc *desc, u8 *out)
-{
- struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
- struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm);
-
- /* Send computed CRC */
- put_unaligned_le32(mctx->poly == CRC32C_POLY_LE ?
- ~ctx->partial : ctx->partial, out);
-
- return 0;
-}
-
-static int stm32_crc_finup(struct shash_desc *desc, const u8 *data,
- unsigned int length, u8 *out)
-{
- return stm32_crc_update(desc, data, length) ?:
- stm32_crc_final(desc, out);
-}
-
-static int stm32_crc_digest(struct shash_desc *desc, const u8 *data,
- unsigned int length, u8 *out)
-{
- return stm32_crc_init(desc) ?: stm32_crc_finup(desc, data, length, out);
-}
-
-static unsigned int refcnt;
-static DEFINE_MUTEX(refcnt_lock);
-static struct shash_alg algs[] = {
- /* CRC-32 */
- {
- .setkey = stm32_crc_setkey,
- .init = stm32_crc_init,
- .update = stm32_crc_update,
- .final = stm32_crc_final,
- .finup = stm32_crc_finup,
- .digest = stm32_crc_digest,
- .descsize = sizeof(struct stm32_crc_desc_ctx),
- .digestsize = CHKSUM_DIGEST_SIZE,
- .base = {
- .cra_name = "crc32",
- .cra_driver_name = "stm32-crc32-crc32",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
- .cra_blocksize = CHKSUM_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct stm32_crc_ctx),
- .cra_module = THIS_MODULE,
- .cra_init = stm32_crc32_cra_init,
- }
- },
- /* CRC-32Castagnoli */
- {
- .setkey = stm32_crc_setkey,
- .init = stm32_crc_init,
- .update = stm32_crc_update,
- .final = stm32_crc_final,
- .finup = stm32_crc_finup,
- .digest = stm32_crc_digest,
- .descsize = sizeof(struct stm32_crc_desc_ctx),
- .digestsize = CHKSUM_DIGEST_SIZE,
- .base = {
- .cra_name = "crc32c",
- .cra_driver_name = "stm32-crc32-crc32c",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
- .cra_blocksize = CHKSUM_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct stm32_crc_ctx),
- .cra_module = THIS_MODULE,
- .cra_init = stm32_crc32c_cra_init,
- }
- }
-};
-
-static int stm32_crc_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct stm32_crc *crc;
- int ret;
-
- crc = devm_kzalloc(dev, sizeof(*crc), GFP_KERNEL);
- if (!crc)
- return -ENOMEM;
-
- crc->dev = dev;
-
- crc->regs = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(crc->regs)) {
- dev_err(dev, "Cannot map CRC IO\n");
- return PTR_ERR(crc->regs);
- }
-
- crc->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(crc->clk)) {
- dev_err(dev, "Could not get clock\n");
- return PTR_ERR(crc->clk);
- }
-
- ret = clk_prepare_enable(crc->clk);
- if (ret) {
- dev_err(crc->dev, "Failed to enable clock\n");
- return ret;
- }
-
- pm_runtime_set_autosuspend_delay(dev, CRC_AUTOSUSPEND_DELAY);
- pm_runtime_use_autosuspend(dev);
-
- pm_runtime_get_noresume(dev);
- pm_runtime_set_active(dev);
- pm_runtime_irq_safe(dev);
- pm_runtime_enable(dev);
-
- spin_lock_init(&crc->lock);
-
- platform_set_drvdata(pdev, crc);
-
- spin_lock(&crc_list.lock);
- list_add(&crc->list, &crc_list.dev_list);
- spin_unlock(&crc_list.lock);
-
- mutex_lock(&refcnt_lock);
- if (!refcnt) {
- ret = crypto_register_shashes(algs, ARRAY_SIZE(algs));
- if (ret) {
- mutex_unlock(&refcnt_lock);
- dev_err(dev, "Failed to register\n");
- clk_disable_unprepare(crc->clk);
- return ret;
- }
- }
- refcnt++;
- mutex_unlock(&refcnt_lock);
-
- dev_info(dev, "Initialized\n");
-
- pm_runtime_put_sync(dev);
-
- return 0;
-}
-
-static void stm32_crc_remove(struct platform_device *pdev)
-{
- struct stm32_crc *crc = platform_get_drvdata(pdev);
- int ret = pm_runtime_get_sync(crc->dev);
-
- spin_lock(&crc_list.lock);
- list_del(&crc->list);
- spin_unlock(&crc_list.lock);
-
- mutex_lock(&refcnt_lock);
- if (!--refcnt)
- crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
- mutex_unlock(&refcnt_lock);
-
- pm_runtime_disable(crc->dev);
- pm_runtime_put_noidle(crc->dev);
-
- if (ret >= 0)
- clk_disable(crc->clk);
- clk_unprepare(crc->clk);
-}
-
-static int __maybe_unused stm32_crc_suspend(struct device *dev)
-{
- struct stm32_crc *crc = dev_get_drvdata(dev);
- int ret;
-
- ret = pm_runtime_force_suspend(dev);
- if (ret)
- return ret;
-
- clk_unprepare(crc->clk);
-
- return 0;
-}
-
-static int __maybe_unused stm32_crc_resume(struct device *dev)
-{
- struct stm32_crc *crc = dev_get_drvdata(dev);
- int ret;
-
- ret = clk_prepare(crc->clk);
- if (ret) {
- dev_err(crc->dev, "Failed to prepare clock\n");
- return ret;
- }
-
- return pm_runtime_force_resume(dev);
-}
-
-static int __maybe_unused stm32_crc_runtime_suspend(struct device *dev)
-{
- struct stm32_crc *crc = dev_get_drvdata(dev);
-
- clk_disable(crc->clk);
-
- return 0;
-}
-
-static int __maybe_unused stm32_crc_runtime_resume(struct device *dev)
-{
- struct stm32_crc *crc = dev_get_drvdata(dev);
- int ret;
-
- ret = clk_enable(crc->clk);
- if (ret) {
- dev_err(crc->dev, "Failed to enable clock\n");
- return ret;
- }
-
- return 0;
-}
-
-static const struct dev_pm_ops stm32_crc_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(stm32_crc_suspend,
- stm32_crc_resume)
- SET_RUNTIME_PM_OPS(stm32_crc_runtime_suspend,
- stm32_crc_runtime_resume, NULL)
-};
-
-static const struct of_device_id stm32_dt_ids[] = {
- { .compatible = "st,stm32f7-crc", },
- {},
-};
-MODULE_DEVICE_TABLE(of, stm32_dt_ids);
-
-static struct platform_driver stm32_crc_driver = {
- .probe = stm32_crc_probe,
- .remove = stm32_crc_remove,
- .driver = {
- .name = DRIVER_NAME,
- .pm = &stm32_crc_pm_ops,
- .of_match_table = stm32_dt_ids,
- },
-};
-
-module_platform_driver(stm32_crc_driver);
-
-MODULE_AUTHOR("Fabien Dessenne <fabien.dessenne@st.com>");
-MODULE_DESCRIPTION("STMicrolectronics STM32 CRC32 hardware driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c
index 5ce88e7a8f65..5e82e8a1f71a 100644
--- a/drivers/crypto/stm32/stm32-cryp.c
+++ b/drivers/crypto/stm32/stm32-cryp.c
@@ -851,7 +851,6 @@ static void stm32_cryp_finish_req(struct stm32_cryp *cryp, int err)
if (!err && (!(is_gcm(cryp) || is_ccm(cryp) || is_ecb(cryp))))
stm32_cryp_get_iv(cryp);
- pm_runtime_mark_last_busy(cryp->dev);
pm_runtime_put_autosuspend(cryp->dev);
if (is_gcm(cryp) || is_ccm(cryp))
@@ -2782,5 +2781,5 @@ static struct platform_driver stm32_cryp_driver = {
module_platform_driver(stm32_cryp_driver);
MODULE_AUTHOR("Fabien Dessenne <fabien.dessenne@st.com>");
-MODULE_DESCRIPTION("STMicrolectronics STM32 CRYP hardware driver");
+MODULE_DESCRIPTION("STMicroelectronics STM32 CRYP hardware driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
index 768b27de4737..a4436728b0db 100644
--- a/drivers/crypto/stm32/stm32-hash.c
+++ b/drivers/crypto/stm32/stm32-hash.c
@@ -1373,7 +1373,6 @@ static void stm32_hash_unprepare_request(struct ahash_request *req)
*preg++ = stm32_hash_read(hdev, HASH_CSR(i));
pm_runtime:
- pm_runtime_mark_last_busy(hdev->dev);
pm_runtime_put_autosuspend(hdev->dev);
}
diff --git a/drivers/crypto/tegra/tegra-se-hash.c b/drivers/crypto/tegra/tegra-se-hash.c
index d09b4aaeecef..4a298ace6e9f 100644
--- a/drivers/crypto/tegra/tegra-se-hash.c
+++ b/drivers/crypto/tegra/tegra-se-hash.c
@@ -400,8 +400,9 @@ static int tegra_sha_do_update(struct ahash_request *req)
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
struct tegra_se *se = ctx->se;
- unsigned int nblks, nresidue, size, ret;
+ unsigned int nblks, nresidue, size;
u32 *cpuvaddr = se->cmdbuf->addr;
+ int ret;
nresidue = (req->nbytes + rctx->residue.size) % rctx->blk_size;
nblks = (req->nbytes + rctx->residue.size) / rctx->blk_size;
diff --git a/drivers/crypto/tegra/tegra-se-main.c b/drivers/crypto/tegra/tegra-se-main.c
index 1c94f1de0546..7237f14eaf5a 100644
--- a/drivers/crypto/tegra/tegra-se-main.c
+++ b/drivers/crypto/tegra/tegra-se-main.c
@@ -310,7 +310,7 @@ static int tegra_se_probe(struct platform_device *pdev)
se->engine = crypto_engine_alloc_init(dev, 0);
if (!se->engine)
- return dev_err_probe(dev, -ENOMEM, "failed to init crypto engine\n");
+ return -ENOMEM;
ret = crypto_engine_start(se->engine);
if (ret) {
diff --git a/drivers/crypto/ti/Kconfig b/drivers/crypto/ti/Kconfig
new file mode 100644
index 000000000000..a3692ceec49b
--- /dev/null
+++ b/drivers/crypto/ti/Kconfig
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config CRYPTO_DEV_TI_DTHEV2
+ tristate "Support for TI DTHE V2 cryptography engine"
+ depends on ARCH_K3 || COMPILE_TEST
+ select CRYPTO_ENGINE
+ select CRYPTO_SKCIPHER
+ select CRYPTO_ECB
+ select CRYPTO_CBC
+ select CRYPTO_XTS
+ help
+ This enables support for the TI DTHE V2 hw cryptography engine
+ which can be found on TI K3 SOCs. Selecting this enables use
+ of hardware offloading for cryptographic algorithms on
+ these devices, providing enhanced resistance against side-channel
+ attacks.
diff --git a/drivers/crypto/ti/Makefile b/drivers/crypto/ti/Makefile
new file mode 100644
index 000000000000..b883078f203d
--- /dev/null
+++ b/drivers/crypto/ti/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_CRYPTO_DEV_TI_DTHEV2) += dthev2.o
+dthev2-objs := dthev2-common.o dthev2-aes.o
diff --git a/drivers/crypto/ti/dthev2-aes.c b/drivers/crypto/ti/dthev2-aes.c
new file mode 100644
index 000000000000..156729ccc50e
--- /dev/null
+++ b/drivers/crypto/ti/dthev2-aes.c
@@ -0,0 +1,538 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * K3 DTHE V2 crypto accelerator driver
+ *
+ * Copyright (C) Texas Instruments 2025 - https://www.ti.com
+ * Author: T Pratham <t-pratham@ti.com>
+ */
+
+#include <crypto/aead.h>
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <crypto/engine.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/skcipher.h>
+
+#include "dthev2-common.h"
+
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/scatterlist.h>
+
+/* Registers */
+
+// AES Engine
+#define DTHE_P_AES_BASE 0x7000
+
+#define DTHE_P_AES_KEY1_0 0x0038
+#define DTHE_P_AES_KEY1_1 0x003C
+#define DTHE_P_AES_KEY1_2 0x0030
+#define DTHE_P_AES_KEY1_3 0x0034
+#define DTHE_P_AES_KEY1_4 0x0028
+#define DTHE_P_AES_KEY1_5 0x002C
+#define DTHE_P_AES_KEY1_6 0x0020
+#define DTHE_P_AES_KEY1_7 0x0024
+
+#define DTHE_P_AES_KEY2_0 0x0018
+#define DTHE_P_AES_KEY2_1 0x001C
+#define DTHE_P_AES_KEY2_2 0x0010
+#define DTHE_P_AES_KEY2_3 0x0014
+#define DTHE_P_AES_KEY2_4 0x0008
+#define DTHE_P_AES_KEY2_5 0x000C
+#define DTHE_P_AES_KEY2_6 0x0000
+#define DTHE_P_AES_KEY2_7 0x0004
+
+#define DTHE_P_AES_IV_IN_0 0x0040
+#define DTHE_P_AES_IV_IN_1 0x0044
+#define DTHE_P_AES_IV_IN_2 0x0048
+#define DTHE_P_AES_IV_IN_3 0x004C
+#define DTHE_P_AES_CTRL 0x0050
+#define DTHE_P_AES_C_LENGTH_0 0x0054
+#define DTHE_P_AES_C_LENGTH_1 0x0058
+#define DTHE_P_AES_AUTH_LENGTH 0x005C
+#define DTHE_P_AES_DATA_IN_OUT 0x0060
+
+#define DTHE_P_AES_SYSCONFIG 0x0084
+#define DTHE_P_AES_IRQSTATUS 0x008C
+#define DTHE_P_AES_IRQENABLE 0x0090
+
+/* Register write values and macros */
+
+enum aes_ctrl_mode_masks {
+ AES_CTRL_ECB_MASK = 0x00,
+ AES_CTRL_CBC_MASK = BIT(5),
+ AES_CTRL_XTS_MASK = BIT(12) | BIT(11),
+};
+
+#define DTHE_AES_CTRL_MODE_CLEAR_MASK ~GENMASK(28, 5)
+
+#define DTHE_AES_CTRL_DIR_ENC BIT(2)
+
+#define DTHE_AES_CTRL_KEYSIZE_16B BIT(3)
+#define DTHE_AES_CTRL_KEYSIZE_24B BIT(4)
+#define DTHE_AES_CTRL_KEYSIZE_32B (BIT(3) | BIT(4))
+
+#define DTHE_AES_CTRL_SAVE_CTX_SET BIT(29)
+
+#define DTHE_AES_CTRL_OUTPUT_READY BIT_MASK(0)
+#define DTHE_AES_CTRL_INPUT_READY BIT_MASK(1)
+#define DTHE_AES_CTRL_SAVED_CTX_READY BIT_MASK(30)
+#define DTHE_AES_CTRL_CTX_READY BIT_MASK(31)
+
+#define DTHE_AES_SYSCONFIG_DMA_DATA_IN_OUT_EN GENMASK(6, 5)
+#define DTHE_AES_IRQENABLE_EN_ALL GENMASK(3, 0)
+
+/* Misc */
+#define AES_IV_SIZE AES_BLOCK_SIZE
+#define AES_BLOCK_WORDS (AES_BLOCK_SIZE / sizeof(u32))
+#define AES_IV_WORDS AES_BLOCK_WORDS
+
+static int dthe_cipher_init_tfm(struct crypto_skcipher *tfm)
+{
+ struct dthe_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct dthe_data *dev_data = dthe_get_dev(ctx);
+
+ ctx->dev_data = dev_data;
+ ctx->keylen = 0;
+
+ return 0;
+}
+
+static int dthe_cipher_xts_init_tfm(struct crypto_skcipher *tfm)
+{
+ struct dthe_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct dthe_data *dev_data = dthe_get_dev(ctx);
+
+ ctx->dev_data = dev_data;
+ ctx->keylen = 0;
+
+ ctx->skcipher_fb = crypto_alloc_sync_skcipher("xts(aes)", 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->skcipher_fb)) {
+ dev_err(dev_data->dev, "fallback driver xts(aes) couldn't be loaded\n");
+ return PTR_ERR(ctx->skcipher_fb);
+ }
+
+ return 0;
+}
+
+static void dthe_cipher_xts_exit_tfm(struct crypto_skcipher *tfm)
+{
+ struct dthe_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ crypto_free_sync_skcipher(ctx->skcipher_fb);
+}
+
+static int dthe_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen)
+{
+ struct dthe_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256)
+ return -EINVAL;
+
+ ctx->keylen = keylen;
+ memcpy(ctx->key, key, keylen);
+
+ return 0;
+}
+
+static int dthe_aes_ecb_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen)
+{
+ struct dthe_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ ctx->aes_mode = DTHE_AES_ECB;
+
+ return dthe_aes_setkey(tfm, key, keylen);
+}
+
+static int dthe_aes_cbc_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen)
+{
+ struct dthe_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ ctx->aes_mode = DTHE_AES_CBC;
+
+ return dthe_aes_setkey(tfm, key, keylen);
+}
+
+static int dthe_aes_xts_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen)
+{
+ struct dthe_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ if (keylen != 2 * AES_KEYSIZE_128 &&
+ keylen != 2 * AES_KEYSIZE_192 &&
+ keylen != 2 * AES_KEYSIZE_256)
+ return -EINVAL;
+
+ ctx->aes_mode = DTHE_AES_XTS;
+ ctx->keylen = keylen / 2;
+ memcpy(ctx->key, key, keylen);
+
+ crypto_sync_skcipher_clear_flags(ctx->skcipher_fb, CRYPTO_TFM_REQ_MASK);
+ crypto_sync_skcipher_set_flags(ctx->skcipher_fb,
+ crypto_skcipher_get_flags(tfm) &
+ CRYPTO_TFM_REQ_MASK);
+
+ return crypto_sync_skcipher_setkey(ctx->skcipher_fb, key, keylen);
+}
+
+static void dthe_aes_set_ctrl_key(struct dthe_tfm_ctx *ctx,
+ struct dthe_aes_req_ctx *rctx,
+ u32 *iv_in)
+{
+ struct dthe_data *dev_data = dthe_get_dev(ctx);
+ void __iomem *aes_base_reg = dev_data->regs + DTHE_P_AES_BASE;
+ u32 ctrl_val = 0;
+
+ writel_relaxed(ctx->key[0], aes_base_reg + DTHE_P_AES_KEY1_0);
+ writel_relaxed(ctx->key[1], aes_base_reg + DTHE_P_AES_KEY1_1);
+ writel_relaxed(ctx->key[2], aes_base_reg + DTHE_P_AES_KEY1_2);
+ writel_relaxed(ctx->key[3], aes_base_reg + DTHE_P_AES_KEY1_3);
+
+ if (ctx->keylen > AES_KEYSIZE_128) {
+ writel_relaxed(ctx->key[4], aes_base_reg + DTHE_P_AES_KEY1_4);
+ writel_relaxed(ctx->key[5], aes_base_reg + DTHE_P_AES_KEY1_5);
+ }
+ if (ctx->keylen == AES_KEYSIZE_256) {
+ writel_relaxed(ctx->key[6], aes_base_reg + DTHE_P_AES_KEY1_6);
+ writel_relaxed(ctx->key[7], aes_base_reg + DTHE_P_AES_KEY1_7);
+ }
+
+ if (ctx->aes_mode == DTHE_AES_XTS) {
+ size_t key2_offset = ctx->keylen / sizeof(u32);
+
+ writel_relaxed(ctx->key[key2_offset + 0], aes_base_reg + DTHE_P_AES_KEY2_0);
+ writel_relaxed(ctx->key[key2_offset + 1], aes_base_reg + DTHE_P_AES_KEY2_1);
+ writel_relaxed(ctx->key[key2_offset + 2], aes_base_reg + DTHE_P_AES_KEY2_2);
+ writel_relaxed(ctx->key[key2_offset + 3], aes_base_reg + DTHE_P_AES_KEY2_3);
+
+ if (ctx->keylen > AES_KEYSIZE_128) {
+ writel_relaxed(ctx->key[key2_offset + 4], aes_base_reg + DTHE_P_AES_KEY2_4);
+ writel_relaxed(ctx->key[key2_offset + 5], aes_base_reg + DTHE_P_AES_KEY2_5);
+ }
+ if (ctx->keylen == AES_KEYSIZE_256) {
+ writel_relaxed(ctx->key[key2_offset + 6], aes_base_reg + DTHE_P_AES_KEY2_6);
+ writel_relaxed(ctx->key[key2_offset + 7], aes_base_reg + DTHE_P_AES_KEY2_7);
+ }
+ }
+
+ if (rctx->enc)
+ ctrl_val |= DTHE_AES_CTRL_DIR_ENC;
+
+ if (ctx->keylen == AES_KEYSIZE_128)
+ ctrl_val |= DTHE_AES_CTRL_KEYSIZE_16B;
+ else if (ctx->keylen == AES_KEYSIZE_192)
+ ctrl_val |= DTHE_AES_CTRL_KEYSIZE_24B;
+ else
+ ctrl_val |= DTHE_AES_CTRL_KEYSIZE_32B;
+
+ // Write AES mode
+ ctrl_val &= DTHE_AES_CTRL_MODE_CLEAR_MASK;
+ switch (ctx->aes_mode) {
+ case DTHE_AES_ECB:
+ ctrl_val |= AES_CTRL_ECB_MASK;
+ break;
+ case DTHE_AES_CBC:
+ ctrl_val |= AES_CTRL_CBC_MASK;
+ break;
+ case DTHE_AES_XTS:
+ ctrl_val |= AES_CTRL_XTS_MASK;
+ break;
+ }
+
+ if (iv_in) {
+ ctrl_val |= DTHE_AES_CTRL_SAVE_CTX_SET;
+ for (int i = 0; i < AES_IV_WORDS; ++i)
+ writel_relaxed(iv_in[i],
+ aes_base_reg + DTHE_P_AES_IV_IN_0 + (DTHE_REG_SIZE * i));
+ }
+
+ writel_relaxed(ctrl_val, aes_base_reg + DTHE_P_AES_CTRL);
+}
+
+static void dthe_aes_dma_in_callback(void *data)
+{
+ struct skcipher_request *req = (struct skcipher_request *)data;
+ struct dthe_aes_req_ctx *rctx = skcipher_request_ctx(req);
+
+ complete(&rctx->aes_compl);
+}
+
+static int dthe_aes_run(struct crypto_engine *engine, void *areq)
+{
+ struct skcipher_request *req = container_of(areq, struct skcipher_request, base);
+ struct dthe_tfm_ctx *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
+ struct dthe_data *dev_data = dthe_get_dev(ctx);
+ struct dthe_aes_req_ctx *rctx = skcipher_request_ctx(req);
+
+ unsigned int len = req->cryptlen;
+ struct scatterlist *src = req->src;
+ struct scatterlist *dst = req->dst;
+
+ int src_nents = sg_nents_for_len(src, len);
+ int dst_nents;
+
+ int src_mapped_nents;
+ int dst_mapped_nents;
+
+ bool diff_dst;
+ enum dma_data_direction src_dir, dst_dir;
+
+ struct device *tx_dev, *rx_dev;
+ struct dma_async_tx_descriptor *desc_in, *desc_out;
+
+ int ret;
+
+ void __iomem *aes_base_reg = dev_data->regs + DTHE_P_AES_BASE;
+
+ u32 aes_irqenable_val = readl_relaxed(aes_base_reg + DTHE_P_AES_IRQENABLE);
+ u32 aes_sysconfig_val = readl_relaxed(aes_base_reg + DTHE_P_AES_SYSCONFIG);
+
+ aes_sysconfig_val |= DTHE_AES_SYSCONFIG_DMA_DATA_IN_OUT_EN;
+ writel_relaxed(aes_sysconfig_val, aes_base_reg + DTHE_P_AES_SYSCONFIG);
+
+ aes_irqenable_val |= DTHE_AES_IRQENABLE_EN_ALL;
+ writel_relaxed(aes_irqenable_val, aes_base_reg + DTHE_P_AES_IRQENABLE);
+
+ if (src == dst) {
+ diff_dst = false;
+ src_dir = DMA_BIDIRECTIONAL;
+ dst_dir = DMA_BIDIRECTIONAL;
+ } else {
+ diff_dst = true;
+ src_dir = DMA_TO_DEVICE;
+ dst_dir = DMA_FROM_DEVICE;
+ }
+
+ tx_dev = dmaengine_get_dma_device(dev_data->dma_aes_tx);
+ rx_dev = dmaengine_get_dma_device(dev_data->dma_aes_rx);
+
+ src_mapped_nents = dma_map_sg(tx_dev, src, src_nents, src_dir);
+ if (src_mapped_nents == 0) {
+ ret = -EINVAL;
+ goto aes_err;
+ }
+
+ if (!diff_dst) {
+ dst_nents = src_nents;
+ dst_mapped_nents = src_mapped_nents;
+ } else {
+ dst_nents = sg_nents_for_len(dst, len);
+ dst_mapped_nents = dma_map_sg(rx_dev, dst, dst_nents, dst_dir);
+ if (dst_mapped_nents == 0) {
+ dma_unmap_sg(tx_dev, src, src_nents, src_dir);
+ ret = -EINVAL;
+ goto aes_err;
+ }
+ }
+
+ desc_in = dmaengine_prep_slave_sg(dev_data->dma_aes_rx, dst, dst_mapped_nents,
+ DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc_in) {
+ dev_err(dev_data->dev, "IN prep_slave_sg() failed\n");
+ ret = -EINVAL;
+ goto aes_prep_err;
+ }
+
+ desc_out = dmaengine_prep_slave_sg(dev_data->dma_aes_tx, src, src_mapped_nents,
+ DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc_out) {
+ dev_err(dev_data->dev, "OUT prep_slave_sg() failed\n");
+ ret = -EINVAL;
+ goto aes_prep_err;
+ }
+
+ desc_in->callback = dthe_aes_dma_in_callback;
+ desc_in->callback_param = req;
+
+ init_completion(&rctx->aes_compl);
+
+ if (ctx->aes_mode == DTHE_AES_ECB)
+ dthe_aes_set_ctrl_key(ctx, rctx, NULL);
+ else
+ dthe_aes_set_ctrl_key(ctx, rctx, (u32 *)req->iv);
+
+ writel_relaxed(lower_32_bits(req->cryptlen), aes_base_reg + DTHE_P_AES_C_LENGTH_0);
+ writel_relaxed(upper_32_bits(req->cryptlen), aes_base_reg + DTHE_P_AES_C_LENGTH_1);
+
+ dmaengine_submit(desc_in);
+ dmaengine_submit(desc_out);
+
+ dma_async_issue_pending(dev_data->dma_aes_rx);
+ dma_async_issue_pending(dev_data->dma_aes_tx);
+
+ // Need to do a timeout to ensure finalise gets called if DMA callback fails for any reason
+ ret = wait_for_completion_timeout(&rctx->aes_compl, msecs_to_jiffies(DTHE_DMA_TIMEOUT_MS));
+ if (!ret) {
+ ret = -ETIMEDOUT;
+ dmaengine_terminate_sync(dev_data->dma_aes_rx);
+ dmaengine_terminate_sync(dev_data->dma_aes_tx);
+
+ for (int i = 0; i < AES_BLOCK_WORDS; ++i)
+ readl_relaxed(aes_base_reg + DTHE_P_AES_DATA_IN_OUT + (DTHE_REG_SIZE * i));
+ } else {
+ ret = 0;
+ }
+
+ // For modes other than ECB, read IV_OUT
+ if (ctx->aes_mode != DTHE_AES_ECB) {
+ u32 *iv_out = (u32 *)req->iv;
+
+ for (int i = 0; i < AES_IV_WORDS; ++i)
+ iv_out[i] = readl_relaxed(aes_base_reg +
+ DTHE_P_AES_IV_IN_0 +
+ (DTHE_REG_SIZE * i));
+ }
+
+aes_prep_err:
+ dma_unmap_sg(tx_dev, src, src_nents, src_dir);
+ if (dst_dir != DMA_BIDIRECTIONAL)
+ dma_unmap_sg(rx_dev, dst, dst_nents, dst_dir);
+
+aes_err:
+ local_bh_disable();
+ crypto_finalize_skcipher_request(dev_data->engine, req, ret);
+ local_bh_enable();
+ return 0;
+}
+
+static int dthe_aes_crypt(struct skcipher_request *req)
+{
+ struct dthe_tfm_ctx *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
+ struct dthe_aes_req_ctx *rctx = skcipher_request_ctx(req);
+ struct dthe_data *dev_data = dthe_get_dev(ctx);
+ struct crypto_engine *engine;
+
+ /*
+ * If data is not a multiple of AES_BLOCK_SIZE:
+ * - need to return -EINVAL for ECB, CBC as they are block ciphers
+ * - need to fallback to software as H/W doesn't support Ciphertext Stealing for XTS
+ */
+ if (req->cryptlen % AES_BLOCK_SIZE) {
+ if (ctx->aes_mode == DTHE_AES_XTS) {
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->skcipher_fb);
+
+ skcipher_request_set_callback(subreq, skcipher_request_flags(req),
+ req->base.complete, req->base.data);
+ skcipher_request_set_crypt(subreq, req->src, req->dst,
+ req->cryptlen, req->iv);
+
+ return rctx->enc ? crypto_skcipher_encrypt(subreq) :
+ crypto_skcipher_decrypt(subreq);
+ }
+ return -EINVAL;
+ }
+
+ /*
+ * If data length input is zero, no need to do any operation.
+ * Except for XTS mode, where data length should be non-zero.
+ */
+ if (req->cryptlen == 0) {
+ if (ctx->aes_mode == DTHE_AES_XTS)
+ return -EINVAL;
+ return 0;
+ }
+
+ engine = dev_data->engine;
+ return crypto_transfer_skcipher_request_to_engine(engine, req);
+}
+
+static int dthe_aes_encrypt(struct skcipher_request *req)
+{
+ struct dthe_aes_req_ctx *rctx = skcipher_request_ctx(req);
+
+ rctx->enc = 1;
+ return dthe_aes_crypt(req);
+}
+
+static int dthe_aes_decrypt(struct skcipher_request *req)
+{
+ struct dthe_aes_req_ctx *rctx = skcipher_request_ctx(req);
+
+ rctx->enc = 0;
+ return dthe_aes_crypt(req);
+}
+
+static struct skcipher_engine_alg cipher_algs[] = {
+ {
+ .base.init = dthe_cipher_init_tfm,
+ .base.setkey = dthe_aes_ecb_setkey,
+ .base.encrypt = dthe_aes_encrypt,
+ .base.decrypt = dthe_aes_decrypt,
+ .base.min_keysize = AES_MIN_KEY_SIZE,
+ .base.max_keysize = AES_MAX_KEY_SIZE,
+ .base.base = {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "ecb-aes-dthev2",
+ .cra_priority = 299,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_alignmask = AES_BLOCK_SIZE - 1,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct dthe_tfm_ctx),
+ .cra_reqsize = sizeof(struct dthe_aes_req_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ .op.do_one_request = dthe_aes_run,
+ }, /* ECB AES */
+ {
+ .base.init = dthe_cipher_init_tfm,
+ .base.setkey = dthe_aes_cbc_setkey,
+ .base.encrypt = dthe_aes_encrypt,
+ .base.decrypt = dthe_aes_decrypt,
+ .base.min_keysize = AES_MIN_KEY_SIZE,
+ .base.max_keysize = AES_MAX_KEY_SIZE,
+ .base.ivsize = AES_IV_SIZE,
+ .base.base = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-dthev2",
+ .cra_priority = 299,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_alignmask = AES_BLOCK_SIZE - 1,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct dthe_tfm_ctx),
+ .cra_reqsize = sizeof(struct dthe_aes_req_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ .op.do_one_request = dthe_aes_run,
+ }, /* CBC AES */
+ {
+ .base.init = dthe_cipher_xts_init_tfm,
+ .base.exit = dthe_cipher_xts_exit_tfm,
+ .base.setkey = dthe_aes_xts_setkey,
+ .base.encrypt = dthe_aes_encrypt,
+ .base.decrypt = dthe_aes_decrypt,
+ .base.min_keysize = AES_MIN_KEY_SIZE * 2,
+ .base.max_keysize = AES_MAX_KEY_SIZE * 2,
+ .base.ivsize = AES_IV_SIZE,
+ .base.base = {
+ .cra_name = "xts(aes)",
+ .cra_driver_name = "xts-aes-dthev2",
+ .cra_priority = 299,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_alignmask = AES_BLOCK_SIZE - 1,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct dthe_tfm_ctx),
+ .cra_reqsize = sizeof(struct dthe_aes_req_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ .op.do_one_request = dthe_aes_run,
+ }, /* XTS AES */
+};
+
+int dthe_register_aes_algs(void)
+{
+ return crypto_engine_register_skciphers(cipher_algs, ARRAY_SIZE(cipher_algs));
+}
+
+void dthe_unregister_aes_algs(void)
+{
+ crypto_engine_unregister_skciphers(cipher_algs, ARRAY_SIZE(cipher_algs));
+}
diff --git a/drivers/crypto/ti/dthev2-common.c b/drivers/crypto/ti/dthev2-common.c
new file mode 100644
index 000000000000..c39d37933b9e
--- /dev/null
+++ b/drivers/crypto/ti/dthev2-common.c
@@ -0,0 +1,217 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * K3 DTHE V2 crypto accelerator driver
+ *
+ * Copyright (C) Texas Instruments 2025 - https://www.ti.com
+ * Author: T Pratham <t-pratham@ti.com>
+ */
+
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <crypto/engine.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/skcipher.h>
+
+#include "dthev2-common.h"
+
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dmapool.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+
+#define DRIVER_NAME "dthev2"
+
+static struct dthe_list dthe_dev_list = {
+ .dev_list = LIST_HEAD_INIT(dthe_dev_list.dev_list),
+ .lock = __SPIN_LOCK_UNLOCKED(dthe_dev_list.lock),
+};
+
+struct dthe_data *dthe_get_dev(struct dthe_tfm_ctx *ctx)
+{
+ struct dthe_data *dev_data;
+
+ if (ctx->dev_data)
+ return ctx->dev_data;
+
+ spin_lock_bh(&dthe_dev_list.lock);
+ dev_data = list_first_entry(&dthe_dev_list.dev_list, struct dthe_data, list);
+ if (dev_data)
+ list_move_tail(&dev_data->list, &dthe_dev_list.dev_list);
+ spin_unlock_bh(&dthe_dev_list.lock);
+
+ return dev_data;
+}
+
+static int dthe_dma_init(struct dthe_data *dev_data)
+{
+ int ret;
+ struct dma_slave_config cfg;
+
+ dev_data->dma_aes_rx = NULL;
+ dev_data->dma_aes_tx = NULL;
+ dev_data->dma_sha_tx = NULL;
+
+ dev_data->dma_aes_rx = dma_request_chan(dev_data->dev, "rx");
+ if (IS_ERR(dev_data->dma_aes_rx)) {
+ return dev_err_probe(dev_data->dev, PTR_ERR(dev_data->dma_aes_rx),
+ "Unable to request rx DMA channel\n");
+ }
+
+ dev_data->dma_aes_tx = dma_request_chan(dev_data->dev, "tx1");
+ if (IS_ERR(dev_data->dma_aes_tx)) {
+ ret = dev_err_probe(dev_data->dev, PTR_ERR(dev_data->dma_aes_tx),
+ "Unable to request tx1 DMA channel\n");
+ goto err_dma_aes_tx;
+ }
+
+ dev_data->dma_sha_tx = dma_request_chan(dev_data->dev, "tx2");
+ if (IS_ERR(dev_data->dma_sha_tx)) {
+ ret = dev_err_probe(dev_data->dev, PTR_ERR(dev_data->dma_sha_tx),
+ "Unable to request tx2 DMA channel\n");
+ goto err_dma_sha_tx;
+ }
+
+ memzero_explicit(&cfg, sizeof(cfg));
+
+ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.src_maxburst = 4;
+
+ ret = dmaengine_slave_config(dev_data->dma_aes_rx, &cfg);
+ if (ret) {
+ dev_err(dev_data->dev, "Can't configure IN dmaengine slave: %d\n", ret);
+ goto err_dma_config;
+ }
+
+ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.dst_maxburst = 4;
+
+ ret = dmaengine_slave_config(dev_data->dma_aes_tx, &cfg);
+ if (ret) {
+ dev_err(dev_data->dev, "Can't configure OUT dmaengine slave: %d\n", ret);
+ goto err_dma_config;
+ }
+
+ return 0;
+
+err_dma_config:
+ dma_release_channel(dev_data->dma_sha_tx);
+err_dma_sha_tx:
+ dma_release_channel(dev_data->dma_aes_tx);
+err_dma_aes_tx:
+ dma_release_channel(dev_data->dma_aes_rx);
+
+ return ret;
+}
+
+static int dthe_register_algs(void)
+{
+ return dthe_register_aes_algs();
+}
+
+static void dthe_unregister_algs(void)
+{
+ dthe_unregister_aes_algs();
+}
+
+static int dthe_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct dthe_data *dev_data;
+ int ret;
+
+ dev_data = devm_kzalloc(dev, sizeof(*dev_data), GFP_KERNEL);
+ if (!dev_data)
+ return -ENOMEM;
+
+ dev_data->dev = dev;
+ dev_data->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(dev_data->regs))
+ return PTR_ERR(dev_data->regs);
+
+ platform_set_drvdata(pdev, dev_data);
+
+ spin_lock(&dthe_dev_list.lock);
+ list_add(&dev_data->list, &dthe_dev_list.dev_list);
+ spin_unlock(&dthe_dev_list.lock);
+
+ ret = dthe_dma_init(dev_data);
+ if (ret)
+ goto probe_dma_err;
+
+ dev_data->engine = crypto_engine_alloc_init(dev, 1);
+ if (!dev_data->engine) {
+ ret = -ENOMEM;
+ goto probe_engine_err;
+ }
+
+ ret = crypto_engine_start(dev_data->engine);
+ if (ret) {
+ dev_err(dev, "Failed to start crypto engine\n");
+ goto probe_engine_start_err;
+ }
+
+ ret = dthe_register_algs();
+ if (ret) {
+ dev_err(dev, "Failed to register algs\n");
+ goto probe_engine_start_err;
+ }
+
+ return 0;
+
+probe_engine_start_err:
+ crypto_engine_exit(dev_data->engine);
+probe_engine_err:
+ dma_release_channel(dev_data->dma_aes_rx);
+ dma_release_channel(dev_data->dma_aes_tx);
+ dma_release_channel(dev_data->dma_sha_tx);
+probe_dma_err:
+ spin_lock(&dthe_dev_list.lock);
+ list_del(&dev_data->list);
+ spin_unlock(&dthe_dev_list.lock);
+
+ return ret;
+}
+
+static void dthe_remove(struct platform_device *pdev)
+{
+ struct dthe_data *dev_data = platform_get_drvdata(pdev);
+
+ spin_lock(&dthe_dev_list.lock);
+ list_del(&dev_data->list);
+ spin_unlock(&dthe_dev_list.lock);
+
+ dthe_unregister_algs();
+
+ crypto_engine_exit(dev_data->engine);
+
+ dma_release_channel(dev_data->dma_aes_rx);
+ dma_release_channel(dev_data->dma_aes_tx);
+ dma_release_channel(dev_data->dma_sha_tx);
+}
+
+static const struct of_device_id dthe_of_match[] = {
+ { .compatible = "ti,am62l-dthev2", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, dthe_of_match);
+
+static struct platform_driver dthe_driver = {
+ .probe = dthe_probe,
+ .remove = dthe_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = dthe_of_match,
+ },
+};
+
+module_platform_driver(dthe_driver);
+
+MODULE_AUTHOR("T Pratham <t-pratham@ti.com>");
+MODULE_DESCRIPTION("Texas Instruments DTHE V2 driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/ti/dthev2-common.h b/drivers/crypto/ti/dthev2-common.h
new file mode 100644
index 000000000000..c7a06a4c353f
--- /dev/null
+++ b/drivers/crypto/ti/dthev2-common.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * K3 DTHE V2 crypto accelerator driver
+ *
+ * Copyright (C) Texas Instruments 2025 - https://www.ti.com
+ * Author: T Pratham <t-pratham@ti.com>
+ */
+
+#ifndef __TI_DTHEV2_H__
+#define __TI_DTHEV2_H__
+
+#include <crypto/aead.h>
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <crypto/engine.h>
+#include <crypto/hash.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
+
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dmapool.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/scatterlist.h>
+
+#define DTHE_REG_SIZE 4
+#define DTHE_DMA_TIMEOUT_MS 2000
+/*
+ * Size of largest possible key (of all algorithms) to be stored in dthe_tfm_ctx
+ * This is currently the keysize of XTS-AES-256 which is 512 bits (64 bytes)
+ */
+#define DTHE_MAX_KEYSIZE (AES_MAX_KEY_SIZE * 2)
+
+enum dthe_aes_mode {
+ DTHE_AES_ECB = 0,
+ DTHE_AES_CBC,
+ DTHE_AES_XTS,
+};
+
+/* Driver specific struct definitions */
+
+/**
+ * struct dthe_data - DTHE_V2 driver instance data
+ * @dev: Device pointer
+ * @regs: Base address of the register space
+ * @list: list node for dev
+ * @engine: Crypto engine instance
+ * @dma_aes_rx: AES Rx DMA Channel
+ * @dma_aes_tx: AES Tx DMA Channel
+ * @dma_sha_tx: SHA Tx DMA Channel
+ */
+struct dthe_data {
+ struct device *dev;
+ void __iomem *regs;
+ struct list_head list;
+ struct crypto_engine *engine;
+
+ struct dma_chan *dma_aes_rx;
+ struct dma_chan *dma_aes_tx;
+
+ struct dma_chan *dma_sha_tx;
+};
+
+/**
+ * struct dthe_list - device data list head
+ * @dev_list: linked list head
+ * @lock: Spinlock protecting accesses to the list
+ */
+struct dthe_list {
+ struct list_head dev_list;
+ spinlock_t lock;
+};
+
+/**
+ * struct dthe_tfm_ctx - Transform ctx struct containing ctx for all sub-components of DTHE V2
+ * @dev_data: Device data struct pointer
+ * @keylen: AES key length
+ * @key: AES key
+ * @aes_mode: AES mode
+ * @skcipher_fb: Fallback crypto skcipher handle for AES-XTS mode
+ */
+struct dthe_tfm_ctx {
+ struct dthe_data *dev_data;
+ unsigned int keylen;
+ u32 key[DTHE_MAX_KEYSIZE / sizeof(u32)];
+ enum dthe_aes_mode aes_mode;
+ struct crypto_sync_skcipher *skcipher_fb;
+};
+
+/**
+ * struct dthe_aes_req_ctx - AES engine req ctx struct
+ * @enc: flag indicating encryption or decryption operation
+ * @aes_compl: Completion variable for use in manual completion in case of DMA callback failure
+ */
+struct dthe_aes_req_ctx {
+ int enc;
+ struct completion aes_compl;
+};
+
+/* Struct definitions end */
+
+struct dthe_data *dthe_get_dev(struct dthe_tfm_ctx *ctx);
+
+int dthe_register_aes_algs(void);
+void dthe_unregister_aes_algs(void);
+
+#endif
diff --git a/drivers/crypto/virtio/virtio_crypto_common.h b/drivers/crypto/virtio/virtio_crypto_common.h
index 7059bbe5a2eb..19c934af3df6 100644
--- a/drivers/crypto/virtio/virtio_crypto_common.h
+++ b/drivers/crypto/virtio/virtio_crypto_common.h
@@ -113,8 +113,6 @@ struct virtio_crypto_request {
int virtcrypto_devmgr_add_dev(struct virtio_crypto *vcrypto_dev);
struct list_head *virtcrypto_devmgr_get_head(void);
void virtcrypto_devmgr_rm_dev(struct virtio_crypto *vcrypto_dev);
-struct virtio_crypto *virtcrypto_devmgr_get_first(void);
-int virtcrypto_dev_in_use(struct virtio_crypto *vcrypto_dev);
int virtcrypto_dev_get(struct virtio_crypto *vcrypto_dev);
void virtcrypto_dev_put(struct virtio_crypto *vcrypto_dev);
int virtcrypto_dev_started(struct virtio_crypto *vcrypto_dev);
diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c
index 0d522049f595..3d241446099c 100644
--- a/drivers/crypto/virtio/virtio_crypto_core.c
+++ b/drivers/crypto/virtio/virtio_crypto_core.c
@@ -139,7 +139,7 @@ static int virtcrypto_find_vqs(struct virtio_crypto *vi)
spin_lock_init(&vi->data_vq[i].lock);
vi->data_vq[i].vq = vqs[i];
/* Initialize crypto engine */
- vi->data_vq[i].engine = crypto_engine_alloc_init_and_set(dev, true, NULL, true,
+ vi->data_vq[i].engine = crypto_engine_alloc_init_and_set(dev, true, true,
virtqueue_get_vring_size(vqs[i]));
if (!vi->data_vq[i].engine) {
ret = -ENOMEM;
diff --git a/drivers/crypto/virtio/virtio_crypto_mgr.c b/drivers/crypto/virtio/virtio_crypto_mgr.c
index bddbd8ebfebe..06c74fa132cd 100644
--- a/drivers/crypto/virtio/virtio_crypto_mgr.c
+++ b/drivers/crypto/virtio/virtio_crypto_mgr.c
@@ -82,42 +82,6 @@ void virtcrypto_devmgr_rm_dev(struct virtio_crypto *vcrypto_dev)
}
/*
- * virtcrypto_devmgr_get_first()
- *
- * Function returns the first virtio crypto device from the acceleration
- * framework.
- *
- * To be used by virtio crypto device specific drivers.
- *
- * Return: pointer to vcrypto_dev or NULL if not found.
- */
-struct virtio_crypto *virtcrypto_devmgr_get_first(void)
-{
- struct virtio_crypto *dev = NULL;
-
- mutex_lock(&table_lock);
- if (!list_empty(&virtio_crypto_table))
- dev = list_first_entry(&virtio_crypto_table,
- struct virtio_crypto,
- list);
- mutex_unlock(&table_lock);
- return dev;
-}
-
-/*
- * virtcrypto_dev_in_use() - Check whether vcrypto_dev is currently in use
- * @vcrypto_dev: Pointer to virtio crypto device.
- *
- * To be used by virtio crypto device specific drivers.
- *
- * Return: 1 when device is in use, 0 otherwise.
- */
-int virtcrypto_dev_in_use(struct virtio_crypto *vcrypto_dev)
-{
- return atomic_read(&vcrypto_dev->ref_count) != 0;
-}
-
-/*
* virtcrypto_dev_get() - Increment vcrypto_dev reference count
* @vcrypto_dev: Pointer to virtio crypto device.
*
diff --git a/drivers/crypto/xilinx/Makefile b/drivers/crypto/xilinx/Makefile
index 730feff5b5f2..9b51636ef75e 100644
--- a/drivers/crypto/xilinx/Makefile
+++ b/drivers/crypto/xilinx/Makefile
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_CRYPTO_DEV_XILINX_TRNG) += xilinx-trng.o
obj-$(CONFIG_CRYPTO_DEV_ZYNQMP_AES) += zynqmp-aes-gcm.o
obj-$(CONFIG_CRYPTO_DEV_ZYNQMP_SHA3) += zynqmp-sha.o
diff --git a/drivers/crypto/xilinx/xilinx-trng.c b/drivers/crypto/xilinx/xilinx-trng.c
new file mode 100644
index 000000000000..db0fbb28ff32
--- /dev/null
+++ b/drivers/crypto/xilinx/xilinx-trng.c
@@ -0,0 +1,430 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AMD Versal True Random Number Generator driver
+ * Copyright (c) 2024 - 2025 Advanced Micro Devices, Inc.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/crypto.h>
+#include <linux/delay.h>
+#include <linux/firmware/xlnx-zynqmp.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <crypto/aes.h>
+#include <crypto/df_sp80090a.h>
+#include <crypto/internal/drbg.h>
+#include <crypto/internal/cipher.h>
+#include <crypto/internal/rng.h>
+
+/* TRNG Registers Offsets */
+#define TRNG_STATUS_OFFSET 0x4U
+#define TRNG_CTRL_OFFSET 0x8U
+#define TRNG_EXT_SEED_OFFSET 0x40U
+#define TRNG_PER_STRNG_OFFSET 0x80U
+#define TRNG_CORE_OUTPUT_OFFSET 0xC0U
+#define TRNG_RESET_OFFSET 0xD0U
+#define TRNG_OSC_EN_OFFSET 0xD4U
+
+/* Mask values */
+#define TRNG_RESET_VAL_MASK BIT(0)
+#define TRNG_OSC_EN_VAL_MASK BIT(0)
+#define TRNG_CTRL_PRNGSRST_MASK BIT(0)
+#define TRNG_CTRL_EUMODE_MASK BIT(8)
+#define TRNG_CTRL_TRSSEN_MASK BIT(2)
+#define TRNG_CTRL_PRNGSTART_MASK BIT(5)
+#define TRNG_CTRL_PRNGXS_MASK BIT(3)
+#define TRNG_CTRL_PRNGMODE_MASK BIT(7)
+#define TRNG_STATUS_DONE_MASK BIT(0)
+#define TRNG_STATUS_QCNT_MASK GENMASK(11, 9)
+#define TRNG_STATUS_QCNT_16_BYTES 0x800
+
+/* Sizes in bytes */
+#define TRNG_SEED_LEN_BYTES 48U
+#define TRNG_ENTROPY_SEED_LEN_BYTES 64U
+#define TRNG_SEC_STRENGTH_SHIFT 5U
+#define TRNG_SEC_STRENGTH_BYTES BIT(TRNG_SEC_STRENGTH_SHIFT)
+#define TRNG_BYTES_PER_REG 4U
+#define TRNG_RESET_DELAY 10
+#define TRNG_NUM_INIT_REGS 12U
+#define TRNG_READ_4_WORD 4
+#define TRNG_DATA_READ_DELAY 8000
+
+struct xilinx_rng {
+ void __iomem *rng_base;
+ struct device *dev;
+ unsigned char *scratchpadbuf;
+ struct crypto_aes_ctx *aesctx;
+ struct mutex lock; /* Protect access to TRNG device */
+ struct hwrng trng;
+};
+
+struct xilinx_rng_ctx {
+ struct xilinx_rng *rng;
+};
+
+static struct xilinx_rng *xilinx_rng_dev;
+
+static void xtrng_readwrite32(void __iomem *addr, u32 mask, u8 value)
+{
+ u32 val;
+
+ val = ioread32(addr);
+ val = (val & (~mask)) | (mask & value);
+ iowrite32(val, addr);
+}
+
+static void xtrng_trng_reset(void __iomem *addr)
+{
+ xtrng_readwrite32(addr + TRNG_RESET_OFFSET, TRNG_RESET_VAL_MASK, TRNG_RESET_VAL_MASK);
+ udelay(TRNG_RESET_DELAY);
+ xtrng_readwrite32(addr + TRNG_RESET_OFFSET, TRNG_RESET_VAL_MASK, 0);
+}
+
+static void xtrng_hold_reset(void __iomem *addr)
+{
+ xtrng_readwrite32(addr + TRNG_CTRL_OFFSET, TRNG_CTRL_PRNGSRST_MASK,
+ TRNG_CTRL_PRNGSRST_MASK);
+ iowrite32(TRNG_RESET_VAL_MASK, addr + TRNG_RESET_OFFSET);
+ udelay(TRNG_RESET_DELAY);
+}
+
+static void xtrng_softreset(struct xilinx_rng *rng)
+{
+ xtrng_readwrite32(rng->rng_base + TRNG_CTRL_OFFSET, TRNG_CTRL_PRNGSRST_MASK,
+ TRNG_CTRL_PRNGSRST_MASK);
+ udelay(TRNG_RESET_DELAY);
+ xtrng_readwrite32(rng->rng_base + TRNG_CTRL_OFFSET, TRNG_CTRL_PRNGSRST_MASK, 0);
+}
+
+/* Return no. of bytes read */
+static size_t xtrng_readblock32(void __iomem *rng_base, __be32 *buf, int blocks32, bool wait)
+{
+ int read = 0, ret;
+ int timeout = 1;
+ int i, idx;
+ u32 val;
+
+ if (wait)
+ timeout = TRNG_DATA_READ_DELAY;
+
+ for (i = 0; i < (blocks32 * 2); i++) {
+ /* TRNG core generate data in 16 bytes. Read twice to complete 32 bytes read */
+ ret = readl_poll_timeout(rng_base + TRNG_STATUS_OFFSET, val,
+ (val & TRNG_STATUS_QCNT_MASK) ==
+ TRNG_STATUS_QCNT_16_BYTES, !!wait, timeout);
+ if (ret)
+ break;
+
+ for (idx = 0; idx < TRNG_READ_4_WORD; idx++) {
+ *(buf + read) = cpu_to_be32(ioread32(rng_base + TRNG_CORE_OUTPUT_OFFSET));
+ read += 1;
+ }
+ }
+ return read * 4;
+}
+
+static int xtrng_collect_random_data(struct xilinx_rng *rng, u8 *rand_gen_buf,
+ int no_of_random_bytes, bool wait)
+{
+ u8 randbuf[TRNG_SEC_STRENGTH_BYTES];
+ int byteleft, blocks, count = 0;
+ int ret;
+
+ byteleft = no_of_random_bytes & (TRNG_SEC_STRENGTH_BYTES - 1);
+ blocks = no_of_random_bytes >> TRNG_SEC_STRENGTH_SHIFT;
+ xtrng_readwrite32(rng->rng_base + TRNG_CTRL_OFFSET, TRNG_CTRL_PRNGSTART_MASK,
+ TRNG_CTRL_PRNGSTART_MASK);
+ if (blocks) {
+ ret = xtrng_readblock32(rng->rng_base, (__be32 *)rand_gen_buf, blocks, wait);
+ if (!ret)
+ return 0;
+ count += ret;
+ }
+
+ if (byteleft) {
+ ret = xtrng_readblock32(rng->rng_base, (__be32 *)randbuf, 1, wait);
+ if (!ret)
+ return count;
+ memcpy(rand_gen_buf + (blocks * TRNG_SEC_STRENGTH_BYTES), randbuf, byteleft);
+ count += byteleft;
+ }
+
+ xtrng_readwrite32(rng->rng_base + TRNG_CTRL_OFFSET,
+ TRNG_CTRL_PRNGMODE_MASK | TRNG_CTRL_PRNGSTART_MASK, 0U);
+
+ return count;
+}
+
+static void xtrng_write_multiple_registers(void __iomem *base_addr, u32 *values, size_t n)
+{
+ void __iomem *reg_addr;
+ size_t i;
+
+ /* Write seed value into EXTERNAL_SEED Registers in big endian format */
+ for (i = 0; i < n; i++) {
+ reg_addr = (base_addr + ((n - 1 - i) * TRNG_BYTES_PER_REG));
+ iowrite32((u32 __force)(cpu_to_be32(values[i])), reg_addr);
+ }
+}
+
+static void xtrng_enable_entropy(struct xilinx_rng *rng)
+{
+ iowrite32(TRNG_OSC_EN_VAL_MASK, rng->rng_base + TRNG_OSC_EN_OFFSET);
+ xtrng_softreset(rng);
+ iowrite32(TRNG_CTRL_EUMODE_MASK | TRNG_CTRL_TRSSEN_MASK, rng->rng_base + TRNG_CTRL_OFFSET);
+}
+
+static int xtrng_reseed_internal(struct xilinx_rng *rng)
+{
+ u8 entropy[TRNG_ENTROPY_SEED_LEN_BYTES];
+ struct drbg_string data;
+ LIST_HEAD(seedlist);
+ u32 val;
+ int ret;
+
+ drbg_string_fill(&data, entropy, TRNG_SEED_LEN_BYTES);
+ list_add_tail(&data.list, &seedlist);
+ memset(entropy, 0, sizeof(entropy));
+ xtrng_enable_entropy(rng);
+
+ /* collect random data to use it as entropy (input for DF) */
+ ret = xtrng_collect_random_data(rng, entropy, TRNG_SEED_LEN_BYTES, true);
+ if (ret != TRNG_SEED_LEN_BYTES)
+ return -EINVAL;
+ ret = crypto_drbg_ctr_df(rng->aesctx, rng->scratchpadbuf,
+ TRNG_SEED_LEN_BYTES, &seedlist, AES_BLOCK_SIZE,
+ TRNG_SEED_LEN_BYTES);
+ if (ret)
+ return ret;
+
+ xtrng_write_multiple_registers(rng->rng_base + TRNG_EXT_SEED_OFFSET,
+ (u32 *)rng->scratchpadbuf, TRNG_NUM_INIT_REGS);
+ /* select reseed operation */
+ iowrite32(TRNG_CTRL_PRNGXS_MASK, rng->rng_base + TRNG_CTRL_OFFSET);
+
+ /* Start the reseed operation with above configuration and wait for STATUS.Done bit to be
+ * set. Monitor STATUS.CERTF bit, if set indicates SP800-90B entropy health test has failed.
+ */
+ xtrng_readwrite32(rng->rng_base + TRNG_CTRL_OFFSET, TRNG_CTRL_PRNGSTART_MASK,
+ TRNG_CTRL_PRNGSTART_MASK);
+
+ ret = readl_poll_timeout(rng->rng_base + TRNG_STATUS_OFFSET, val,
+ (val & TRNG_STATUS_DONE_MASK) == TRNG_STATUS_DONE_MASK,
+ 1U, 15000U);
+ if (ret)
+ return ret;
+
+ xtrng_readwrite32(rng->rng_base + TRNG_CTRL_OFFSET, TRNG_CTRL_PRNGSTART_MASK, 0U);
+
+ return 0;
+}
+
+static int xtrng_random_bytes_generate(struct xilinx_rng *rng, u8 *rand_buf_ptr,
+ u32 rand_buf_size, int wait)
+{
+ int nbytes;
+ int ret;
+
+ xtrng_readwrite32(rng->rng_base + TRNG_CTRL_OFFSET,
+ TRNG_CTRL_PRNGMODE_MASK | TRNG_CTRL_PRNGXS_MASK,
+ TRNG_CTRL_PRNGMODE_MASK | TRNG_CTRL_PRNGXS_MASK);
+ nbytes = xtrng_collect_random_data(rng, rand_buf_ptr, rand_buf_size, wait);
+
+ ret = xtrng_reseed_internal(rng);
+ if (ret) {
+ dev_err(rng->dev, "Re-seed fail\n");
+ return ret;
+ }
+
+ return nbytes;
+}
+
+static int xtrng_trng_generate(struct crypto_rng *tfm, const u8 *src, u32 slen,
+ u8 *dst, u32 dlen)
+{
+ struct xilinx_rng_ctx *ctx = crypto_rng_ctx(tfm);
+ int ret;
+
+ mutex_lock(&ctx->rng->lock);
+ ret = xtrng_random_bytes_generate(ctx->rng, dst, dlen, true);
+ mutex_unlock(&ctx->rng->lock);
+
+ return ret < 0 ? ret : 0;
+}
+
+static int xtrng_trng_seed(struct crypto_rng *tfm, const u8 *seed, unsigned int slen)
+{
+ return 0;
+}
+
+static int xtrng_trng_init(struct crypto_tfm *rtfm)
+{
+ struct xilinx_rng_ctx *ctx = crypto_tfm_ctx(rtfm);
+
+ ctx->rng = xilinx_rng_dev;
+
+ return 0;
+}
+
+static struct rng_alg xtrng_trng_alg = {
+ .generate = xtrng_trng_generate,
+ .seed = xtrng_trng_seed,
+ .seedsize = 0,
+ .base = {
+ .cra_name = "stdrng",
+ .cra_driver_name = "xilinx-trng",
+ .cra_priority = 300,
+ .cra_ctxsize = sizeof(struct xilinx_rng_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = xtrng_trng_init,
+ },
+};
+
+static int xtrng_hwrng_trng_read(struct hwrng *hwrng, void *data, size_t max, bool wait)
+{
+ u8 buf[TRNG_SEC_STRENGTH_BYTES];
+ struct xilinx_rng *rng;
+ int ret = -EINVAL, i = 0;
+
+ rng = container_of(hwrng, struct xilinx_rng, trng);
+ /* Return in case wait not set and lock not available. */
+ if (!mutex_trylock(&rng->lock) && !wait)
+ return 0;
+ else if (!mutex_is_locked(&rng->lock) && wait)
+ mutex_lock(&rng->lock);
+
+ while (i < max) {
+ ret = xtrng_random_bytes_generate(rng, buf, TRNG_SEC_STRENGTH_BYTES, wait);
+ if (ret < 0)
+ break;
+
+ memcpy(data + i, buf, min_t(int, ret, (max - i)));
+ i += min_t(int, ret, (max - i));
+ }
+ mutex_unlock(&rng->lock);
+
+ return ret;
+}
+
+static int xtrng_hwrng_register(struct hwrng *trng)
+{
+ int ret;
+
+ trng->name = "Xilinx Versal Crypto Engine TRNG";
+ trng->read = xtrng_hwrng_trng_read;
+
+ ret = hwrng_register(trng);
+ if (ret)
+ pr_err("Fail to register the TRNG\n");
+
+ return ret;
+}
+
+static void xtrng_hwrng_unregister(struct hwrng *trng)
+{
+ hwrng_unregister(trng);
+}
+
+static int xtrng_probe(struct platform_device *pdev)
+{
+ struct xilinx_rng *rng;
+ size_t sb_size;
+ int ret;
+
+ rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL);
+ if (!rng)
+ return -ENOMEM;
+
+ rng->dev = &pdev->dev;
+ rng->rng_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(rng->rng_base)) {
+ dev_err(&pdev->dev, "Failed to map resource %pe\n", rng->rng_base);
+ return PTR_ERR(rng->rng_base);
+ }
+
+ rng->aesctx = devm_kzalloc(&pdev->dev, sizeof(*rng->aesctx), GFP_KERNEL);
+ if (!rng->aesctx)
+ return -ENOMEM;
+
+ sb_size = crypto_drbg_ctr_df_datalen(TRNG_SEED_LEN_BYTES, AES_BLOCK_SIZE);
+ rng->scratchpadbuf = devm_kzalloc(&pdev->dev, sb_size, GFP_KERNEL);
+ if (!rng->scratchpadbuf) {
+ ret = -ENOMEM;
+ goto end;
+ }
+
+ xtrng_trng_reset(rng->rng_base);
+ ret = xtrng_reseed_internal(rng);
+ if (ret) {
+ dev_err(&pdev->dev, "TRNG Seed fail\n");
+ goto end;
+ }
+
+ xilinx_rng_dev = rng;
+ mutex_init(&rng->lock);
+ ret = crypto_register_rng(&xtrng_trng_alg);
+ if (ret) {
+ dev_err(&pdev->dev, "Crypto Random device registration failed: %d\n", ret);
+ goto end;
+ }
+
+ ret = xtrng_hwrng_register(&rng->trng);
+ if (ret) {
+ dev_err(&pdev->dev, "HWRNG device registration failed: %d\n", ret);
+ goto crypto_rng_free;
+ }
+ platform_set_drvdata(pdev, rng);
+
+ return 0;
+
+crypto_rng_free:
+ crypto_unregister_rng(&xtrng_trng_alg);
+
+end:
+ return ret;
+}
+
+static void xtrng_remove(struct platform_device *pdev)
+{
+ struct xilinx_rng *rng;
+ u32 zero[TRNG_NUM_INIT_REGS] = { };
+
+ rng = platform_get_drvdata(pdev);
+ xtrng_hwrng_unregister(&rng->trng);
+ crypto_unregister_rng(&xtrng_trng_alg);
+ xtrng_write_multiple_registers(rng->rng_base + TRNG_EXT_SEED_OFFSET, zero,
+ TRNG_NUM_INIT_REGS);
+ xtrng_write_multiple_registers(rng->rng_base + TRNG_PER_STRNG_OFFSET, zero,
+ TRNG_NUM_INIT_REGS);
+ xtrng_hold_reset(rng->rng_base);
+ xilinx_rng_dev = NULL;
+}
+
+static const struct of_device_id xtrng_of_match[] = {
+ { .compatible = "xlnx,versal-trng", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, xtrng_of_match);
+
+static struct platform_driver xtrng_driver = {
+ .driver = {
+ .name = "xlnx,versal-trng",
+ .of_match_table = xtrng_of_match,
+ },
+ .probe = xtrng_probe,
+ .remove = xtrng_remove,
+};
+
+module_platform_driver(xtrng_driver);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Harsh Jain <h.jain@amd.com>");
+MODULE_AUTHOR("Mounika Botcha <mounika.botcha@amd.com>");
+MODULE_DESCRIPTION("True Random Number Generator Driver");
diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c
index a1a99ec3f12c..77ac940e3013 100644
--- a/drivers/cxl/acpi.c
+++ b/drivers/cxl/acpi.c
@@ -11,38 +11,52 @@
#include "cxlpci.h"
#include "cxl.h"
-struct cxl_cxims_data {
- int nr_maps;
- u64 xormaps[] __counted_by(nr_maps);
-};
-
static const guid_t acpi_cxl_qtg_id_guid =
GUID_INIT(0xF365F9A6, 0xA7DE, 0x4071,
0xA6, 0x6A, 0xB4, 0x0C, 0x0B, 0x4F, 0x8E, 0x52);
+#define HBIW_TO_NR_MAPS_SIZE (CXL_DECODER_MAX_INTERLEAVE + 1)
+static const int hbiw_to_nr_maps[HBIW_TO_NR_MAPS_SIZE] = {
+ [1] = 0, [2] = 1, [3] = 0, [4] = 2, [6] = 1, [8] = 3, [12] = 2, [16] = 4
+};
+
+static const int valid_hbiw[] = { 1, 2, 3, 4, 6, 8, 12, 16 };
-static u64 cxl_xor_hpa_to_spa(struct cxl_root_decoder *cxlrd, u64 hpa)
+u64 cxl_do_xormap_calc(struct cxl_cxims_data *cximsd, u64 addr, int hbiw)
{
- struct cxl_cxims_data *cximsd = cxlrd->platform_data;
- int hbiw = cxlrd->cxlsd.nr_targets;
+ int nr_maps_to_apply = -1;
u64 val;
int pos;
- /* No xormaps for host bridge interleave ways of 1 or 3 */
- if (hbiw == 1 || hbiw == 3)
- return hpa;
+ /*
+ * Strictly validate hbiw since this function is used for testing and
+ * that nullifies any expectation of trusted parameters from the CXL
+ * Region Driver.
+ */
+ for (int i = 0; i < ARRAY_SIZE(valid_hbiw); i++) {
+ if (valid_hbiw[i] == hbiw) {
+ nr_maps_to_apply = hbiw_to_nr_maps[hbiw];
+ break;
+ }
+ }
+ if (nr_maps_to_apply == -1 || nr_maps_to_apply > cximsd->nr_maps)
+ return ULLONG_MAX;
/*
- * For root decoders using xormaps (hbiw: 2,4,6,8,12,16) restore
- * the position bit to its value before the xormap was applied at
- * HPA->DPA translation.
+ * In regions using XOR interleave arithmetic the CXL HPA may not
+ * be the same as the SPA. This helper performs the SPA->CXL HPA
+ * or the CXL HPA->SPA translation. Since XOR is self-inverting,
+ * so is this function.
+ *
+ * For root decoders using xormaps (hbiw: 2,4,6,8,12,16) applying the
+ * xormaps will toggle a position bit.
*
* pos is the lowest set bit in an XORMAP
- * val is the XORALLBITS(HPA & XORMAP)
+ * val is the XORALLBITS(addr & XORMAP)
*
* XORALLBITS: The CXL spec (3.1 Table 9-22) defines XORALLBITS
* as an operation that outputs a single bit by XORing all the
- * bits in the input (hpa & xormap). Implement XORALLBITS using
+ * bits in the input (addr & xormap). Implement XORALLBITS using
* hweight64(). If the hamming weight is even the XOR of those
* bits results in val==0, if odd the XOR result is val==1.
*/
@@ -51,11 +65,19 @@ static u64 cxl_xor_hpa_to_spa(struct cxl_root_decoder *cxlrd, u64 hpa)
if (!cximsd->xormaps[i])
continue;
pos = __ffs(cximsd->xormaps[i]);
- val = (hweight64(hpa & cximsd->xormaps[i]) & 1);
- hpa = (hpa & ~(1ULL << pos)) | (val << pos);
+ val = (hweight64(addr & cximsd->xormaps[i]) & 1);
+ addr = (addr & ~(1ULL << pos)) | (val << pos);
}
- return hpa;
+ return addr;
+}
+EXPORT_SYMBOL_FOR_MODULES(cxl_do_xormap_calc, "cxl_translate");
+
+static u64 cxl_apply_xor_maps(struct cxl_root_decoder *cxlrd, u64 addr)
+{
+ struct cxl_cxims_data *cximsd = cxlrd->platform_data;
+
+ return cxl_do_xormap_calc(cximsd, addr, cxlrd->cxlsd.nr_targets);
}
struct cxl_cxims_context {
@@ -113,9 +135,9 @@ static unsigned long cfmws_to_decoder_flags(int restrictions)
{
unsigned long flags = CXL_DECODER_F_ENABLE;
- if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_TYPE2)
+ if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_DEVMEM)
flags |= CXL_DECODER_F_TYPE2;
- if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_TYPE3)
+ if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_HOSTONLYMEM)
flags |= CXL_DECODER_F_TYPE3;
if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_VOLATILE)
flags |= CXL_DECODER_F_RAM;
@@ -335,13 +357,66 @@ static int add_or_reset_cxl_resource(struct resource *parent, struct resource *r
return rc;
}
+static int cxl_acpi_set_cache_size(struct cxl_root_decoder *cxlrd)
+{
+ struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld;
+ struct range *hpa = &cxld->hpa_range;
+ resource_size_t size = range_len(hpa);
+ resource_size_t start = hpa->start;
+ resource_size_t cache_size;
+ struct resource res;
+ int nid, rc;
+
+ res = DEFINE_RES_MEM(start, size);
+ nid = phys_to_target_node(start);
+
+ rc = hmat_get_extended_linear_cache_size(&res, nid, &cache_size);
+ if (rc)
+ return 0;
+
+ /*
+ * The cache range is expected to be within the CFMWS.
+ * Currently there is only support cache_size == cxl_size. CXL
+ * size is then half of the total CFMWS window size.
+ */
+ size = size >> 1;
+ if (cache_size && size != cache_size) {
+ dev_warn(&cxld->dev,
+ "Extended Linear Cache size %pa != CXL size %pa. No Support!",
+ &cache_size, &size);
+ return -ENXIO;
+ }
+
+ cxlrd->cache_size = cache_size;
+
+ return 0;
+}
+
+static void cxl_setup_extended_linear_cache(struct cxl_root_decoder *cxlrd)
+{
+ int rc;
+
+ rc = cxl_acpi_set_cache_size(cxlrd);
+ if (rc) {
+ /*
+ * Failing to retrieve extended linear cache region resize does not
+ * prevent the region from functioning. Only causes cxl list showing
+ * incorrect region size.
+ */
+ dev_warn(cxlrd->cxlsd.cxld.dev.parent,
+ "Extended linear cache retrieval failed rc:%d\n", rc);
+
+ /* Ignoring return code */
+ cxlrd->cache_size = 0;
+ }
+}
+
DEFINE_FREE(put_cxlrd, struct cxl_root_decoder *,
if (!IS_ERR_OR_NULL(_T)) put_device(&_T->cxlsd.cxld.dev))
DEFINE_FREE(del_cxl_resource, struct resource *, if (_T) del_cxl_resource(_T))
static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws,
struct cxl_cfmws_context *ctx)
{
- int target_map[CXL_DECODER_MAX_INTERLEAVE];
struct cxl_port *root_port = ctx->root_port;
struct cxl_cxims_context cxims_ctx;
struct device *dev = ctx->dev;
@@ -359,8 +434,6 @@ static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws,
rc = eig_to_granularity(cfmws->granularity, &ig);
if (rc)
return rc;
- for (i = 0; i < ways; i++)
- target_map[i] = cfmws->interleave_targets[i];
struct resource *res __free(del_cxl_resource) = alloc_cxl_resource(
cfmws->base_hpa, cfmws->window_size, ctx->id++);
@@ -386,6 +459,8 @@ static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws,
.end = cfmws->base_hpa + cfmws->window_size - 1,
};
cxld->interleave_ways = ways;
+ for (i = 0; i < ways; i++)
+ cxld->target_map[i] = cfmws->interleave_targets[i];
/*
* Minimize the x1 granularity to advertise support for any
* valid region granularity
@@ -409,14 +484,15 @@ static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws,
return -EINVAL;
}
}
+ cxlrd->ops.hpa_to_spa = cxl_apply_xor_maps;
+ cxlrd->ops.spa_to_hpa = cxl_apply_xor_maps;
}
- cxlrd->qos_class = cfmws->qtg_id;
+ cxl_setup_extended_linear_cache(cxlrd);
- if (cfmws->interleave_arithmetic == ACPI_CEDT_CFMWS_ARITHMETIC_XOR)
- cxlrd->hpa_to_spa = cxl_xor_hpa_to_spa;
+ cxlrd->qos_class = cfmws->qtg_id;
- rc = cxl_decoder_add(cxld, target_map);
+ rc = cxl_decoder_add(cxld);
if (rc)
return rc;
diff --git a/drivers/cxl/core/Makefile b/drivers/cxl/core/Makefile
index 79e2ef81fde8..5ad8fef210b5 100644
--- a/drivers/cxl/core/Makefile
+++ b/drivers/cxl/core/Makefile
@@ -15,7 +15,6 @@ cxl_core-y += hdm.o
cxl_core-y += pmu.o
cxl_core-y += cdat.o
cxl_core-y += ras.o
-cxl_core-y += acpi.o
cxl_core-$(CONFIG_TRACING) += trace.o
cxl_core-$(CONFIG_CXL_REGION) += region.o
cxl_core-$(CONFIG_CXL_MCE) += mce.o
diff --git a/drivers/cxl/core/acpi.c b/drivers/cxl/core/acpi.c
deleted file mode 100644
index f13b4dae6ac5..000000000000
--- a/drivers/cxl/core/acpi.c
+++ /dev/null
@@ -1,11 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright(c) 2024 Intel Corporation. All rights reserved. */
-#include <linux/acpi.h>
-#include "cxl.h"
-#include "core.h"
-
-int cxl_acpi_get_extended_linear_cache_size(struct resource *backing_res,
- int nid, resource_size_t *size)
-{
- return hmat_get_extended_linear_cache_size(backing_res, nid, size);
-}
diff --git a/drivers/cxl/core/cdat.c b/drivers/cxl/core/cdat.c
index 0ccef2f2a26a..7120b5f2e31f 100644
--- a/drivers/cxl/core/cdat.c
+++ b/drivers/cxl/core/cdat.c
@@ -336,9 +336,9 @@ static int match_cxlrd_hb(struct device *dev, void *data)
cxlrd = to_cxl_root_decoder(dev);
cxlsd = &cxlrd->cxlsd;
- guard(rwsem_read)(&cxl_region_rwsem);
+ guard(rwsem_read)(&cxl_rwsem.region);
for (int i = 0; i < cxlsd->nr_targets; i++) {
- if (host_bridge == cxlsd->target[i]->dport_dev)
+ if (cxlsd->target[i] && host_bridge == cxlsd->target[i]->dport_dev)
return 1;
}
@@ -440,8 +440,8 @@ static int cdat_sslbis_handler(union acpi_subtable_headers *header, void *arg,
} *tbl = (struct acpi_cdat_sslbis_table *)header;
int size = sizeof(header->cdat) + sizeof(tbl->sslbis_header);
struct acpi_cdat_sslbis *sslbis;
- struct cxl_port *port = arg;
- struct device *dev = &port->dev;
+ struct cxl_dport *dport = arg;
+ struct device *dev = &dport->port->dev;
int remain, entries, i;
u16 len;
@@ -467,8 +467,6 @@ static int cdat_sslbis_handler(union acpi_subtable_headers *header, void *arg,
u16 y = le16_to_cpu((__force __le16)tbl->entries[i].porty_id);
__le64 le_base;
__le16 le_val;
- struct cxl_dport *dport;
- unsigned long index;
u16 dsp_id;
u64 val;
@@ -499,28 +497,27 @@ static int cdat_sslbis_handler(union acpi_subtable_headers *header, void *arg,
val = cdat_normalize(le16_to_cpu(le_val), le64_to_cpu(le_base),
sslbis->data_type);
- xa_for_each(&port->dports, index, dport) {
- if (dsp_id == ACPI_CDAT_SSLBIS_ANY_PORT ||
- dsp_id == dport->port_id) {
- cxl_access_coordinate_set(dport->coord,
- sslbis->data_type,
- val);
- }
+ if (dsp_id == ACPI_CDAT_SSLBIS_ANY_PORT ||
+ dsp_id == dport->port_id) {
+ cxl_access_coordinate_set(dport->coord,
+ sslbis->data_type, val);
+ return 0;
}
}
return 0;
}
-void cxl_switch_parse_cdat(struct cxl_port *port)
+void cxl_switch_parse_cdat(struct cxl_dport *dport)
{
+ struct cxl_port *port = dport->port;
int rc;
if (!port->cdat.table)
return;
rc = cdat_table_parse(ACPI_CDAT_TYPE_SSLBIS, cdat_sslbis_handler,
- port, port->cdat.table, port->cdat.length);
+ dport, port->cdat.table, port->cdat.length);
rc = cdat_table_parse_output(rc);
if (rc)
dev_dbg(&port->dev, "Failed to parse SSLBIS: %d\n", rc);
@@ -829,7 +826,7 @@ static struct xarray *cxl_switch_gather_bandwidth(struct cxl_region *cxlr,
cxl_coordinates_combine(coords, coords, ctx->coord);
/*
- * Take the min of the calculated bandwdith and the upstream
+ * Take the min of the calculated bandwidth and the upstream
* switch SSLBIS bandwidth if there's a parent switch
*/
if (!is_root)
@@ -952,7 +949,7 @@ static struct xarray *cxl_hb_gather_bandwidth(struct xarray *xa)
/**
* cxl_region_update_bandwidth - Update the bandwidth access coordinates of a region
* @cxlr: The region being operated on
- * @input_xa: xarray holds cxl_perf_ctx wht calculated bandwidth per ACPI0017 instance
+ * @input_xa: xarray holds cxl_perf_ctx with calculated bandwidth per ACPI0017 instance
*/
static void cxl_region_update_bandwidth(struct cxl_region *cxlr,
struct xarray *input_xa)
@@ -987,7 +984,7 @@ void cxl_region_shared_upstream_bandwidth_update(struct cxl_region *cxlr)
bool is_root;
int rc;
- lockdep_assert_held(&cxl_dpa_rwsem);
+ lockdep_assert_held(&cxl_rwsem.dpa);
struct xarray *usp_xa __free(free_perf_xa) =
kzalloc(sizeof(*usp_xa), GFP_KERNEL);
@@ -1057,7 +1054,7 @@ void cxl_region_perf_data_calculate(struct cxl_region *cxlr,
{
struct cxl_dpa_perf *perf;
- lockdep_assert_held(&cxl_dpa_rwsem);
+ lockdep_assert_held(&cxl_rwsem.dpa);
perf = cxled_get_dpa_perf(cxled);
if (IS_ERR(perf))
@@ -1075,14 +1072,3 @@ void cxl_region_perf_data_calculate(struct cxl_region *cxlr,
cxlr->coord[i].write_bandwidth += perf->coord[i].write_bandwidth;
}
}
-
-int cxl_update_hmat_access_coordinates(int nid, struct cxl_region *cxlr,
- enum access_coordinate_class access)
-{
- return hmat_update_target_coordinates(nid, &cxlr->coord[access], access);
-}
-
-bool cxl_need_node_perf_attrs_update(int nid)
-{
- return !acpi_node_backed_by_real_pxm(nid);
-}
diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h
index 29b61828a847..1fb66132b777 100644
--- a/drivers/cxl/core/core.h
+++ b/drivers/cxl/core/core.h
@@ -5,6 +5,7 @@
#define __CXL_CORE_H__
#include <cxl/mailbox.h>
+#include <linux/rwsem.h>
extern const struct device_type cxl_nvdimm_bridge_type;
extern const struct device_type cxl_nvdimm_type;
@@ -12,6 +13,11 @@ extern const struct device_type cxl_pmu_type;
extern struct attribute_group cxl_base_attribute_group;
+enum cxl_detach_mode {
+ DETACH_ONLY,
+ DETACH_INVALIDATE,
+};
+
#ifdef CONFIG_CXL_REGION
extern struct device_attribute dev_attr_create_pmem_region;
extern struct device_attribute dev_attr_create_ram_region;
@@ -20,7 +26,11 @@ extern struct device_attribute dev_attr_region;
extern const struct device_type cxl_pmem_region_type;
extern const struct device_type cxl_dax_region_type;
extern const struct device_type cxl_region_type;
-void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled);
+
+int cxl_decoder_detach(struct cxl_region *cxlr,
+ struct cxl_endpoint_decoder *cxled, int pos,
+ enum cxl_detach_mode mode);
+
#define CXL_REGION_ATTR(x) (&dev_attr_##x.attr)
#define CXL_REGION_TYPE(x) (&cxl_region_type)
#define SET_CXL_REGION_ATTR(x) (&dev_attr_##x.attr),
@@ -48,8 +58,11 @@ static inline int cxl_get_poison_by_endpoint(struct cxl_port *port)
{
return 0;
}
-static inline void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled)
+static inline int cxl_decoder_detach(struct cxl_region *cxlr,
+ struct cxl_endpoint_decoder *cxled,
+ int pos, enum cxl_detach_mode mode)
{
+ return 0;
}
static inline int cxl_region_init(void)
{
@@ -80,6 +93,7 @@ int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, u64 size);
int cxl_dpa_free(struct cxl_endpoint_decoder *cxled);
resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled);
resource_size_t cxl_dpa_resource_start(struct cxl_endpoint_decoder *cxled);
+bool cxl_resource_contains_addr(const struct resource *res, const resource_size_t addr);
enum cxl_rcrb {
CXL_RCRB_DOWNSTREAM,
@@ -96,8 +110,20 @@ u16 cxl_rcrb_to_aer(struct device *dev, resource_size_t rcrb);
#define PCI_RCRB_CAP_HDR_NEXT_MASK GENMASK(15, 8)
#define PCI_CAP_EXP_SIZEOF 0x3c
-extern struct rw_semaphore cxl_dpa_rwsem;
-extern struct rw_semaphore cxl_region_rwsem;
+struct cxl_rwsem {
+ /*
+ * All changes to HPA (interleave configuration) occur with this
+ * lock held for write.
+ */
+ struct rw_semaphore region;
+ /*
+ * All changes to a device DPA space occur with this lock held
+ * for write.
+ */
+ struct rw_semaphore dpa;
+};
+
+extern struct cxl_rwsem cxl_rwsem;
int cxl_memdev_init(void);
void cxl_memdev_exit(void);
@@ -109,19 +135,23 @@ enum cxl_poison_trace_type {
CXL_POISON_TRACE_CLEAR,
};
+enum poison_cmd_enabled_bits;
+bool cxl_memdev_has_poison_cmd(struct cxl_memdev *cxlmd,
+ enum poison_cmd_enabled_bits cmd);
+
long cxl_pci_get_latency(struct pci_dev *pdev);
int cxl_pci_get_bandwidth(struct pci_dev *pdev, struct access_coordinate *c);
-int cxl_update_hmat_access_coordinates(int nid, struct cxl_region *cxlr,
- enum access_coordinate_class access);
-bool cxl_need_node_perf_attrs_update(int nid);
int cxl_port_get_switch_dport_bandwidth(struct cxl_port *port,
struct access_coordinate *c);
int cxl_ras_init(void);
void cxl_ras_exit(void);
int cxl_gpf_port_setup(struct cxl_dport *dport);
-int cxl_acpi_get_extended_linear_cache_size(struct resource *backing_res,
- int nid, resource_size_t *size);
+
+struct cxl_hdm;
+int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm,
+ struct cxl_endpoint_dvsec_info *info);
+int cxl_port_get_possible_dports(struct cxl_port *port);
#ifdef CONFIG_CXL_FEATURES
struct cxl_feat_entry *
diff --git a/drivers/cxl/core/edac.c b/drivers/cxl/core/edac.c
index 2cbc664e5d62..79994ca9bc9f 100644
--- a/drivers/cxl/core/edac.c
+++ b/drivers/cxl/core/edac.c
@@ -103,10 +103,10 @@ static int cxl_scrub_get_attrbs(struct cxl_patrol_scrub_context *cxl_ps_ctx,
u8 *cap, u16 *cycle, u8 *flags, u8 *min_cycle)
{
struct cxl_mailbox *cxl_mbox;
- u8 min_scrub_cycle = U8_MAX;
struct cxl_region_params *p;
struct cxl_memdev *cxlmd;
struct cxl_region *cxlr;
+ u8 min_scrub_cycle = 0;
int i, ret;
if (!cxl_ps_ctx->cxlr) {
@@ -115,10 +115,9 @@ static int cxl_scrub_get_attrbs(struct cxl_patrol_scrub_context *cxl_ps_ctx,
flags, min_cycle);
}
- struct rw_semaphore *region_lock __free(rwsem_read_release) =
- rwsem_read_intr_acquire(&cxl_region_rwsem);
- if (!region_lock)
- return -EINTR;
+ ACQUIRE(rwsem_read_intr, rwsem)(&cxl_rwsem.region);
+ if ((ret = ACQUIRE_ERR(rwsem_read_intr, &rwsem)))
+ return ret;
cxlr = cxl_ps_ctx->cxlr;
p = &cxlr->params;
@@ -133,8 +132,12 @@ static int cxl_scrub_get_attrbs(struct cxl_patrol_scrub_context *cxl_ps_ctx,
if (ret)
return ret;
+ /*
+ * The min_scrub_cycle of a region is the max of minimum scrub
+ * cycles supported by memdevs that back the region.
+ */
if (min_cycle)
- min_scrub_cycle = min(*min_cycle, min_scrub_cycle);
+ min_scrub_cycle = max(*min_cycle, min_scrub_cycle);
}
if (min_cycle)
@@ -154,10 +157,9 @@ static int cxl_scrub_set_attrbs_region(struct device *dev,
struct cxl_region *cxlr;
int ret, i;
- struct rw_semaphore *region_lock __free(rwsem_read_release) =
- rwsem_read_intr_acquire(&cxl_region_rwsem);
- if (!region_lock)
- return -EINTR;
+ ACQUIRE(rwsem_read_intr, rwsem)(&cxl_rwsem.region);
+ if ((ret = ACQUIRE_ERR(rwsem_read_intr, &rwsem)))
+ return ret;
cxlr = cxl_ps_ctx->cxlr;
p = &cxlr->params;
@@ -693,7 +695,7 @@ static int cxl_set_ecs_threshold(struct device *dev, u8 *log_cap, u16 *config,
ECS_THRESHOLD_IDX_4096);
break;
default:
- dev_dbg(dev, "Invalid CXL ECS threshold count(%d) to set\n",
+ dev_dbg(dev, "Invalid CXL ECS threshold count(%u) to set\n",
val);
dev_dbg(dev, "Supported ECS threshold counts: %u, %u, %u\n",
ECS_THRESHOLD_256, ECS_THRESHOLD_1024,
@@ -1099,8 +1101,10 @@ int cxl_store_rec_gen_media(struct cxl_memdev *cxlmd, union cxl_event *evt)
old_rec = xa_store(&array_rec->rec_gen_media,
le64_to_cpu(rec->media_hdr.phys_addr), rec,
GFP_KERNEL);
- if (xa_is_err(old_rec))
+ if (xa_is_err(old_rec)) {
+ kfree(rec);
return xa_err(old_rec);
+ }
kfree(old_rec);
@@ -1127,8 +1131,10 @@ int cxl_store_rec_dram(struct cxl_memdev *cxlmd, union cxl_event *evt)
old_rec = xa_store(&array_rec->rec_dram,
le64_to_cpu(rec->media_hdr.phys_addr), rec,
GFP_KERNEL);
- if (xa_is_err(old_rec))
+ if (xa_is_err(old_rec)) {
+ kfree(rec);
return xa_err(old_rec);
+ }
kfree(old_rec);
@@ -1315,7 +1321,7 @@ cxl_mem_get_rec_dram(struct cxl_memdev *cxlmd,
attrbs.bank = ctx->bank;
break;
case EDAC_REPAIR_RANK_SPARING:
- attrbs.repair_type = CXL_BANK_SPARING;
+ attrbs.repair_type = CXL_RANK_SPARING;
break;
default:
return NULL;
@@ -1332,16 +1338,15 @@ cxl_mem_perform_sparing(struct device *dev,
struct cxl_memdev_sparing_in_payload sparing_pi;
struct cxl_event_dram *rec = NULL;
u16 validity_flags = 0;
+ int ret;
- struct rw_semaphore *region_lock __free(rwsem_read_release) =
- rwsem_read_intr_acquire(&cxl_region_rwsem);
- if (!region_lock)
- return -EINTR;
+ ACQUIRE(rwsem_read_intr, region_rwsem)(&cxl_rwsem.region);
+ if ((ret = ACQUIRE_ERR(rwsem_read_intr, &region_rwsem)))
+ return ret;
- struct rw_semaphore *dpa_lock __free(rwsem_read_release) =
- rwsem_read_intr_acquire(&cxl_dpa_rwsem);
- if (!dpa_lock)
- return -EINTR;
+ ACQUIRE(rwsem_read_intr, dpa_rwsem)(&cxl_rwsem.dpa);
+ if ((ret = ACQUIRE_ERR(rwsem_read_intr, &dpa_rwsem)))
+ return ret;
if (!cxl_sparing_ctx->cap_safe_when_in_use) {
/* Memory to repair must be offline */
@@ -1515,7 +1520,7 @@ static int cxl_mem_sparing_set_dpa(struct device *dev, void *drv_data, u64 dpa)
struct cxl_memdev *cxlmd = ctx->cxlmd;
struct cxl_dev_state *cxlds = cxlmd->cxlds;
- if (dpa < cxlds->dpa_res.start || dpa > cxlds->dpa_res.end)
+ if (!cxl_resource_contains_addr(&cxlds->dpa_res, dpa))
return -EINVAL;
ctx->dpa = dpa;
@@ -1779,16 +1784,15 @@ static int cxl_mem_perform_ppr(struct cxl_ppr_context *cxl_ppr_ctx)
struct cxl_memdev_ppr_maintenance_attrbs maintenance_attrbs;
struct cxl_memdev *cxlmd = cxl_ppr_ctx->cxlmd;
struct cxl_mem_repair_attrbs attrbs = { 0 };
+ int ret;
- struct rw_semaphore *region_lock __free(rwsem_read_release) =
- rwsem_read_intr_acquire(&cxl_region_rwsem);
- if (!region_lock)
- return -EINTR;
+ ACQUIRE(rwsem_read_intr, region_rwsem)(&cxl_rwsem.region);
+ if ((ret = ACQUIRE_ERR(rwsem_read_intr, &region_rwsem)))
+ return ret;
- struct rw_semaphore *dpa_lock __free(rwsem_read_release) =
- rwsem_read_intr_acquire(&cxl_dpa_rwsem);
- if (!dpa_lock)
- return -EINTR;
+ ACQUIRE(rwsem_read_intr, dpa_rwsem)(&cxl_rwsem.dpa);
+ if ((ret = ACQUIRE_ERR(rwsem_read_intr, &dpa_rwsem)))
+ return ret;
if (!cxl_ppr_ctx->media_accessible || !cxl_ppr_ctx->data_retained) {
/* Memory to repair must be offline */
@@ -1884,7 +1888,7 @@ static int cxl_ppr_set_dpa(struct device *dev, void *drv_data, u64 dpa)
struct cxl_memdev *cxlmd = cxl_ppr_ctx->cxlmd;
struct cxl_dev_state *cxlds = cxlmd->cxlds;
- if (dpa < cxlds->dpa_res.start || dpa > cxlds->dpa_res.end)
+ if (!cxl_resource_contains_addr(&cxlds->dpa_res, dpa))
return -EINVAL;
cxl_ppr_ctx->dpa = dpa;
@@ -1915,8 +1919,11 @@ static int cxl_ppr_set_nibble_mask(struct device *dev, void *drv_data,
static int cxl_do_ppr(struct device *dev, void *drv_data, u32 val)
{
struct cxl_ppr_context *cxl_ppr_ctx = drv_data;
+ struct cxl_memdev *cxlmd = cxl_ppr_ctx->cxlmd;
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
- if (!cxl_ppr_ctx->dpa || val != EDAC_DO_MEM_REPAIR)
+ if (val != EDAC_DO_MEM_REPAIR ||
+ !cxl_resource_contains_addr(&cxlds->dpa_res, cxl_ppr_ctx->dpa))
return -EINVAL;
return cxl_mem_perform_ppr(cxl_ppr_ctx);
diff --git a/drivers/cxl/core/features.c b/drivers/cxl/core/features.c
index 6f2eae1eb126..4bc484b46f43 100644
--- a/drivers/cxl/core/features.c
+++ b/drivers/cxl/core/features.c
@@ -371,6 +371,9 @@ cxl_feature_info(struct cxl_features_state *cxlfs,
{
struct cxl_feat_entry *feat;
+ if (!cxlfs || !cxlfs->entries)
+ return ERR_PTR(-EOPNOTSUPP);
+
for (int i = 0; i < cxlfs->entries->num_features; i++) {
feat = &cxlfs->entries->ent[i];
if (uuid_equal(uuid, &feat->uuid))
@@ -544,7 +547,7 @@ static bool cxlctl_validate_set_features(struct cxl_features_state *cxlfs,
u32 flags;
if (rpc_in->op_size < sizeof(uuid_t))
- return ERR_PTR(-EINVAL);
+ return false;
feat = cxl_feature_info(cxlfs, &rpc_in->set_feat_in.uuid);
if (IS_ERR(feat))
diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c
index ab1007495f6b..1c5d2022c87a 100644
--- a/drivers/cxl/core/hdm.c
+++ b/drivers/cxl/core/hdm.c
@@ -16,14 +16,16 @@
* for enumerating these registers and capabilities.
*/
-DECLARE_RWSEM(cxl_dpa_rwsem);
+struct cxl_rwsem cxl_rwsem = {
+ .region = __RWSEM_INITIALIZER(cxl_rwsem.region),
+ .dpa = __RWSEM_INITIALIZER(cxl_rwsem.dpa),
+};
-static int add_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
- int *target_map)
+static int add_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld)
{
int rc;
- rc = cxl_decoder_add_locked(cxld, target_map);
+ rc = cxl_decoder_add_locked(cxld);
if (rc) {
put_device(&cxld->dev);
dev_err(&port->dev, "Failed to add decoder\n");
@@ -47,12 +49,9 @@ static int add_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
* are claimed and passed to the single dport. Disable the range until the first
* CXL region is enumerated / activated.
*/
-int devm_cxl_add_passthrough_decoder(struct cxl_port *port)
+static int devm_cxl_add_passthrough_decoder(struct cxl_port *port)
{
struct cxl_switch_decoder *cxlsd;
- struct cxl_dport *dport = NULL;
- int single_port_map[1];
- unsigned long index;
struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
/*
@@ -68,13 +67,8 @@ int devm_cxl_add_passthrough_decoder(struct cxl_port *port)
device_lock_assert(&port->dev);
- xa_for_each(&port->dports, index, dport)
- break;
- single_port_map[0] = dport->port_id;
-
- return add_hdm_decoder(port, &cxlsd->cxld, single_port_map);
+ return add_hdm_decoder(port, &cxlsd->cxld);
}
-EXPORT_SYMBOL_NS_GPL(devm_cxl_add_passthrough_decoder, "CXL");
static void parse_hdm_decoder_caps(struct cxl_hdm *cxlhdm)
{
@@ -144,8 +138,8 @@ static bool should_emulate_decoders(struct cxl_endpoint_dvsec_info *info)
* @port: cxl_port to map
* @info: cached DVSEC range register info
*/
-struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port,
- struct cxl_endpoint_dvsec_info *info)
+static struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port,
+ struct cxl_endpoint_dvsec_info *info)
{
struct cxl_register_map *reg_map = &port->reg_map;
struct device *dev = &port->dev;
@@ -194,13 +188,12 @@ struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port,
*/
if (should_emulate_decoders(info)) {
dev_dbg(dev, "Fallback map %d range register%s\n", info->ranges,
- info->ranges > 1 ? "s" : "");
+ str_plural(info->ranges));
cxlhdm->decoder_count = info->ranges;
}
return cxlhdm;
}
-EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_hdm, "CXL");
static void __cxl_dpa_debug(struct seq_file *file, struct resource *r, int depth)
{
@@ -214,7 +207,7 @@ void cxl_dpa_debug(struct seq_file *file, struct cxl_dev_state *cxlds)
{
struct resource *p1, *p2;
- guard(rwsem_read)(&cxl_dpa_rwsem);
+ guard(rwsem_read)(&cxl_rwsem.dpa);
for (p1 = cxlds->dpa_res.child; p1; p1 = p1->sibling) {
__cxl_dpa_debug(file, p1, 0);
for (p2 = p1->child; p2; p2 = p2->sibling)
@@ -266,7 +259,7 @@ static void __cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
struct resource *res = cxled->dpa_res;
resource_size_t skip_start;
- lockdep_assert_held_write(&cxl_dpa_rwsem);
+ lockdep_assert_held_write(&cxl_rwsem.dpa);
/* save @skip_start, before @res is released */
skip_start = res->start - cxled->skip;
@@ -281,7 +274,7 @@ static void __cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
static void cxl_dpa_release(void *cxled)
{
- guard(rwsem_write)(&cxl_dpa_rwsem);
+ guard(rwsem_write)(&cxl_rwsem.dpa);
__cxl_dpa_release(cxled);
}
@@ -293,7 +286,7 @@ static void devm_cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
{
struct cxl_port *port = cxled_to_port(cxled);
- lockdep_assert_held_write(&cxl_dpa_rwsem);
+ lockdep_assert_held_write(&cxl_rwsem.dpa);
devm_remove_action(&port->dev, cxl_dpa_release, cxled);
__cxl_dpa_release(cxled);
}
@@ -361,7 +354,7 @@ static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
struct resource *res;
int rc;
- lockdep_assert_held_write(&cxl_dpa_rwsem);
+ lockdep_assert_held_write(&cxl_rwsem.dpa);
if (!len) {
dev_warn(dev, "decoder%d.%d: empty reservation attempted\n",
@@ -470,7 +463,7 @@ int cxl_dpa_setup(struct cxl_dev_state *cxlds, const struct cxl_dpa_info *info)
{
struct device *dev = cxlds->dev;
- guard(rwsem_write)(&cxl_dpa_rwsem);
+ guard(rwsem_write)(&cxl_rwsem.dpa);
if (cxlds->nr_partitions)
return -EBUSY;
@@ -516,9 +509,8 @@ int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
struct cxl_port *port = cxled_to_port(cxled);
int rc;
- down_write(&cxl_dpa_rwsem);
- rc = __cxl_dpa_reserve(cxled, base, len, skipped);
- up_write(&cxl_dpa_rwsem);
+ scoped_guard(rwsem_write, &cxl_rwsem.dpa)
+ rc = __cxl_dpa_reserve(cxled, base, len, skipped);
if (rc)
return rc;
@@ -529,7 +521,7 @@ EXPORT_SYMBOL_NS_GPL(devm_cxl_dpa_reserve, "CXL");
resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled)
{
- guard(rwsem_read)(&cxl_dpa_rwsem);
+ guard(rwsem_read)(&cxl_rwsem.dpa);
if (cxled->dpa_res)
return resource_size(cxled->dpa_res);
@@ -540,19 +532,26 @@ resource_size_t cxl_dpa_resource_start(struct cxl_endpoint_decoder *cxled)
{
resource_size_t base = -1;
- lockdep_assert_held(&cxl_dpa_rwsem);
+ lockdep_assert_held(&cxl_rwsem.dpa);
if (cxled->dpa_res)
base = cxled->dpa_res->start;
return base;
}
+bool cxl_resource_contains_addr(const struct resource *res, const resource_size_t addr)
+{
+ struct resource _addr = DEFINE_RES_MEM(addr, 1);
+
+ return resource_contains(res, &_addr);
+}
+
int cxl_dpa_free(struct cxl_endpoint_decoder *cxled)
{
struct cxl_port *port = cxled_to_port(cxled);
struct device *dev = &cxled->cxld.dev;
- guard(rwsem_write)(&cxl_dpa_rwsem);
+ guard(rwsem_write)(&cxl_rwsem.dpa);
if (!cxled->dpa_res)
return 0;
if (cxled->cxld.region) {
@@ -582,7 +581,7 @@ int cxl_dpa_set_part(struct cxl_endpoint_decoder *cxled,
struct device *dev = &cxled->cxld.dev;
int part;
- guard(rwsem_write)(&cxl_dpa_rwsem);
+ guard(rwsem_write)(&cxl_rwsem.dpa);
if (cxled->cxld.flags & CXL_DECODER_F_ENABLE)
return -EBUSY;
@@ -614,7 +613,7 @@ static int __cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, u64 size)
struct resource *p, *last;
int part;
- guard(rwsem_write)(&cxl_dpa_rwsem);
+ guard(rwsem_write)(&cxl_rwsem.dpa);
if (cxled->cxld.region) {
dev_dbg(dev, "decoder attached to %s\n",
dev_name(&cxled->cxld.region->dev));
@@ -764,46 +763,12 @@ static int cxld_await_commit(void __iomem *hdm, int id)
return -ETIMEDOUT;
}
-static int cxl_decoder_commit(struct cxl_decoder *cxld)
+static void setup_hw_decoder(struct cxl_decoder *cxld, void __iomem *hdm)
{
- struct cxl_port *port = to_cxl_port(cxld->dev.parent);
- struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
- void __iomem *hdm = cxlhdm->regs.hdm_decoder;
- int id = cxld->id, rc;
+ int id = cxld->id;
u64 base, size;
u32 ctrl;
- if (cxld->flags & CXL_DECODER_F_ENABLE)
- return 0;
-
- if (cxl_num_decoders_committed(port) != id) {
- dev_dbg(&port->dev,
- "%s: out of order commit, expected decoder%d.%d\n",
- dev_name(&cxld->dev), port->id,
- cxl_num_decoders_committed(port));
- return -EBUSY;
- }
-
- /*
- * For endpoint decoders hosted on CXL memory devices that
- * support the sanitize operation, make sure sanitize is not in-flight.
- */
- if (is_endpoint_decoder(&cxld->dev)) {
- struct cxl_endpoint_decoder *cxled =
- to_cxl_endpoint_decoder(&cxld->dev);
- struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
- struct cxl_memdev_state *mds =
- to_cxl_memdev_state(cxlmd->cxlds);
-
- if (mds && mds->security.sanitize_active) {
- dev_dbg(&cxlmd->dev,
- "attempted to commit %s during sanitize\n",
- dev_name(&cxld->dev));
- return -EBUSY;
- }
- }
-
- down_read(&cxl_dpa_rwsem);
/* common decoder settings */
ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(cxld->id));
cxld_set_interleave(cxld, &ctrl);
@@ -837,7 +802,47 @@ static int cxl_decoder_commit(struct cxl_decoder *cxld)
}
writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
- up_read(&cxl_dpa_rwsem);
+}
+
+static int cxl_decoder_commit(struct cxl_decoder *cxld)
+{
+ struct cxl_port *port = to_cxl_port(cxld->dev.parent);
+ struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
+ void __iomem *hdm = cxlhdm->regs.hdm_decoder;
+ int id = cxld->id, rc;
+
+ if (cxld->flags & CXL_DECODER_F_ENABLE)
+ return 0;
+
+ if (cxl_num_decoders_committed(port) != id) {
+ dev_dbg(&port->dev,
+ "%s: out of order commit, expected decoder%d.%d\n",
+ dev_name(&cxld->dev), port->id,
+ cxl_num_decoders_committed(port));
+ return -EBUSY;
+ }
+
+ /*
+ * For endpoint decoders hosted on CXL memory devices that
+ * support the sanitize operation, make sure sanitize is not in-flight.
+ */
+ if (is_endpoint_decoder(&cxld->dev)) {
+ struct cxl_endpoint_decoder *cxled =
+ to_cxl_endpoint_decoder(&cxld->dev);
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct cxl_memdev_state *mds =
+ to_cxl_memdev_state(cxlmd->cxlds);
+
+ if (mds && mds->security.sanitize_active) {
+ dev_dbg(&cxlmd->dev,
+ "attempted to commit %s during sanitize\n",
+ dev_name(&cxld->dev));
+ return -EBUSY;
+ }
+ }
+
+ scoped_guard(rwsem_read, &cxl_rwsem.dpa)
+ setup_hw_decoder(cxld, hdm);
port->commit_end++;
rc = cxld_await_commit(hdm, cxld->id);
@@ -875,7 +880,7 @@ void cxl_port_commit_reap(struct cxl_decoder *cxld)
{
struct cxl_port *port = to_cxl_port(cxld->dev.parent);
- lockdep_assert_held_write(&cxl_region_rwsem);
+ lockdep_assert_held_write(&cxl_rwsem.region);
/*
* Once the highest committed decoder is disabled, free any other
@@ -900,6 +905,9 @@ static void cxl_decoder_reset(struct cxl_decoder *cxld)
if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)
return;
+ if (test_bit(CXL_DECODER_F_LOCK, &cxld->flags))
+ return;
+
if (port->commit_end == id)
cxl_port_commit_reap(cxld);
else
@@ -907,7 +915,6 @@ static void cxl_decoder_reset(struct cxl_decoder *cxld)
"%s: out of order reset, expected decoder%d.%d\n",
dev_name(&cxld->dev), port->id, port->commit_end);
- down_read(&cxl_dpa_rwsem);
ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
ctrl &= ~CXL_HDM_DECODER0_CTRL_COMMIT;
writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
@@ -916,7 +923,6 @@ static void cxl_decoder_reset(struct cxl_decoder *cxld)
writel(0, hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(id));
writel(0, hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(id));
writel(0, hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id));
- up_read(&cxl_dpa_rwsem);
cxld->flags &= ~CXL_DECODER_F_ENABLE;
@@ -971,7 +977,7 @@ static int cxl_setup_hdm_decoder_from_dvsec(
}
static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
- int *target_map, void __iomem *hdm, int which,
+ void __iomem *hdm, int which,
u64 *dpa_base, struct cxl_endpoint_dvsec_info *info)
{
struct cxl_endpoint_decoder *cxled = NULL;
@@ -1025,7 +1031,7 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
else
cxld->target_type = CXL_DECODER_DEVMEM;
- guard(rwsem_write)(&cxl_region_rwsem);
+ guard(rwsem_write)(&cxl_rwsem.region);
if (cxld->id != cxl_num_decoders_committed(port)) {
dev_warn(&port->dev,
"decoder%d.%d: Committed out of order\n",
@@ -1090,7 +1096,7 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
hi = readl(hdm + CXL_HDM_DECODER0_TL_HIGH(which));
target_list.value = (hi << 32) + lo;
for (i = 0; i < cxld->interleave_ways; i++)
- target_map[i] = target_list.target_id[i];
+ cxld->target_map[i] = target_list.target_id[i];
return 0;
}
@@ -1155,8 +1161,8 @@ static void cxl_settle_decoders(struct cxl_hdm *cxlhdm)
* @cxlhdm: Structure to populate with HDM capabilities
* @info: cached DVSEC range register info
*/
-int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
- struct cxl_endpoint_dvsec_info *info)
+static int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
+ struct cxl_endpoint_dvsec_info *info)
{
void __iomem *hdm = cxlhdm->regs.hdm_decoder;
struct cxl_port *port = cxlhdm->port;
@@ -1166,7 +1172,6 @@ int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
cxl_settle_decoders(cxlhdm);
for (i = 0; i < cxlhdm->decoder_count; i++) {
- int target_map[CXL_DECODER_MAX_INTERLEAVE] = { 0 };
int rc, target_count = cxlhdm->target_count;
struct cxl_decoder *cxld;
@@ -1194,8 +1199,7 @@ int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
cxld = &cxlsd->cxld;
}
- rc = init_hdm_decoder(port, cxld, target_map, hdm, i,
- &dpa_base, info);
+ rc = init_hdm_decoder(port, cxld, hdm, i, &dpa_base, info);
if (rc) {
dev_warn(&port->dev,
"Failed to initialize decoder%d.%d\n",
@@ -1203,7 +1207,7 @@ int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
put_device(&cxld->dev);
return rc;
}
- rc = add_hdm_decoder(port, cxld, target_map);
+ rc = add_hdm_decoder(port, cxld);
if (rc) {
dev_warn(&port->dev,
"Failed to add decoder%d.%d\n", port->id, i);
@@ -1213,4 +1217,71 @@ int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
return 0;
}
-EXPORT_SYMBOL_NS_GPL(devm_cxl_enumerate_decoders, "CXL");
+
+/**
+ * __devm_cxl_switch_port_decoders_setup - allocate and setup switch decoders
+ * @port: CXL port context
+ *
+ * Return 0 or -errno on error
+ */
+int __devm_cxl_switch_port_decoders_setup(struct cxl_port *port)
+{
+ struct cxl_hdm *cxlhdm;
+
+ if (is_cxl_root(port) || is_cxl_endpoint(port))
+ return -EOPNOTSUPP;
+
+ cxlhdm = devm_cxl_setup_hdm(port, NULL);
+ if (!IS_ERR(cxlhdm))
+ return devm_cxl_enumerate_decoders(cxlhdm, NULL);
+
+ if (PTR_ERR(cxlhdm) != -ENODEV) {
+ dev_err(&port->dev, "Failed to map HDM decoder capability\n");
+ return PTR_ERR(cxlhdm);
+ }
+
+ if (cxl_port_get_possible_dports(port) == 1) {
+ dev_dbg(&port->dev, "Fallback to passthrough decoder\n");
+ return devm_cxl_add_passthrough_decoder(port);
+ }
+
+ dev_err(&port->dev, "HDM decoder capability not found\n");
+ return -ENXIO;
+}
+EXPORT_SYMBOL_NS_GPL(__devm_cxl_switch_port_decoders_setup, "CXL");
+
+/**
+ * devm_cxl_endpoint_decoders_setup - allocate and setup endpoint decoders
+ * @port: CXL port context
+ *
+ * Return 0 or -errno on error
+ */
+int devm_cxl_endpoint_decoders_setup(struct cxl_port *port)
+{
+ struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev);
+ struct cxl_endpoint_dvsec_info info = { .port = port };
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct cxl_hdm *cxlhdm;
+ int rc;
+
+ if (!is_cxl_endpoint(port))
+ return -EOPNOTSUPP;
+
+ rc = cxl_dvsec_rr_decode(cxlds, &info);
+ if (rc < 0)
+ return rc;
+
+ cxlhdm = devm_cxl_setup_hdm(port, &info);
+ if (IS_ERR(cxlhdm)) {
+ if (PTR_ERR(cxlhdm) == -ENODEV)
+ dev_err(&port->dev, "HDM decoder registers not found\n");
+ return PTR_ERR(cxlhdm);
+ }
+
+ rc = cxl_hdm_decode_init(cxlds, cxlhdm, &info);
+ if (rc)
+ return rc;
+
+ return devm_cxl_enumerate_decoders(cxlhdm, &info);
+}
+EXPORT_SYMBOL_NS_GPL(devm_cxl_endpoint_decoders_setup, "CXL");
diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c
index 2689e6453c5a..fa6dd0c94656 100644
--- a/drivers/cxl/core/mbox.c
+++ b/drivers/cxl/core/mbox.c
@@ -899,6 +899,10 @@ void cxl_event_trace_record(const struct cxl_memdev *cxlmd,
trace_cxl_generic_event(cxlmd, type, uuid, &evt->generic);
return;
}
+ if (event_type == CXL_CPER_EVENT_MEM_SPARING) {
+ trace_cxl_memory_sparing(cxlmd, type, &evt->mem_sparing);
+ return;
+ }
if (trace_cxl_general_media_enabled() || trace_cxl_dram_enabled()) {
u64 dpa, hpa = ULLONG_MAX, hpa_alias = ULLONG_MAX;
@@ -909,8 +913,8 @@ void cxl_event_trace_record(const struct cxl_memdev *cxlmd,
* translations. Take topology mutation locks and lookup
* { HPA, REGION } from { DPA, MEMDEV } in the event record.
*/
- guard(rwsem_read)(&cxl_region_rwsem);
- guard(rwsem_read)(&cxl_dpa_rwsem);
+ guard(rwsem_read)(&cxl_rwsem.region);
+ guard(rwsem_read)(&cxl_rwsem.dpa);
dpa = le64_to_cpu(evt->media_hdr.phys_addr) & CXL_DPA_MASK;
cxlr = cxl_dpa_to_region(cxlmd, dpa);
@@ -926,12 +930,30 @@ void cxl_event_trace_record(const struct cxl_memdev *cxlmd,
if (cxl_store_rec_gen_media((struct cxl_memdev *)cxlmd, evt))
dev_dbg(&cxlmd->dev, "CXL store rec_gen_media failed\n");
+ if (evt->gen_media.media_hdr.descriptor &
+ CXL_GMER_EVT_DESC_THRESHOLD_EVENT)
+ WARN_ON_ONCE((evt->gen_media.media_hdr.type &
+ CXL_GMER_MEM_EVT_TYPE_AP_CME_COUNTER_EXPIRE) &&
+ !get_unaligned_le24(evt->gen_media.cme_count));
+ else
+ WARN_ON_ONCE(evt->gen_media.media_hdr.type &
+ CXL_GMER_MEM_EVT_TYPE_AP_CME_COUNTER_EXPIRE);
+
trace_cxl_general_media(cxlmd, type, cxlr, hpa,
hpa_alias, &evt->gen_media);
} else if (event_type == CXL_CPER_EVENT_DRAM) {
if (cxl_store_rec_dram((struct cxl_memdev *)cxlmd, evt))
dev_dbg(&cxlmd->dev, "CXL store rec_dram failed\n");
+ if (evt->dram.media_hdr.descriptor &
+ CXL_GMER_EVT_DESC_THRESHOLD_EVENT)
+ WARN_ON_ONCE((evt->dram.media_hdr.type &
+ CXL_DER_MEM_EVT_TYPE_AP_CME_COUNTER_EXPIRE) &&
+ !get_unaligned_le24(evt->dram.cvme_count));
+ else
+ WARN_ON_ONCE(evt->dram.media_hdr.type &
+ CXL_DER_MEM_EVT_TYPE_AP_CME_COUNTER_EXPIRE);
+
trace_cxl_dram(cxlmd, type, cxlr, hpa, hpa_alias,
&evt->dram);
}
@@ -952,6 +974,8 @@ static void __cxl_event_trace_record(const struct cxl_memdev *cxlmd,
ev_type = CXL_CPER_EVENT_DRAM;
else if (uuid_equal(uuid, &CXL_EVENT_MEM_MODULE_UUID))
ev_type = CXL_CPER_EVENT_MEM_MODULE;
+ else if (uuid_equal(uuid, &CXL_EVENT_MEM_SPARING_UUID))
+ ev_type = CXL_CPER_EVENT_MEM_SPARING;
cxl_event_trace_record(cxlmd, type, ev_type, uuid, &record->event);
}
@@ -1265,7 +1289,7 @@ int cxl_mem_sanitize(struct cxl_memdev *cxlmd, u16 cmd)
/* synchronize with cxl_mem_probe() and decoder write operations */
guard(device)(&cxlmd->dev);
endpoint = cxlmd->endpoint;
- guard(rwsem_read)(&cxl_region_rwsem);
+ guard(rwsem_read)(&cxl_rwsem.region);
/*
* Require an endpoint to be safe otherwise the driver can not
* be sure that the device is unmapped.
@@ -1401,8 +1425,8 @@ int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len,
int nr_records = 0;
int rc;
- rc = mutex_lock_interruptible(&mds->poison.lock);
- if (rc)
+ ACQUIRE(mutex_intr, lock)(&mds->poison.mutex);
+ if ((rc = ACQUIRE_ERR(mutex_intr, &lock)))
return rc;
po = mds->poison.list_out;
@@ -1437,7 +1461,6 @@ int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len,
}
} while (po->flags & CXL_POISON_FLAG_MORE);
- mutex_unlock(&mds->poison.lock);
return rc;
}
EXPORT_SYMBOL_NS_GPL(cxl_mem_get_poison, "CXL");
@@ -1473,7 +1496,7 @@ int cxl_poison_state_init(struct cxl_memdev_state *mds)
return rc;
}
- mutex_init(&mds->poison.lock);
+ mutex_init(&mds->poison.mutex);
return 0;
}
EXPORT_SYMBOL_NS_GPL(cxl_poison_state_init, "CXL");
diff --git a/drivers/cxl/core/mce.h b/drivers/cxl/core/mce.h
index ace73424eeb6..ca272e8db6c7 100644
--- a/drivers/cxl/core/mce.h
+++ b/drivers/cxl/core/mce.h
@@ -7,7 +7,7 @@
#ifdef CONFIG_CXL_MCE
int devm_cxl_register_mce_notifier(struct device *dev,
- struct notifier_block *mce_notifer);
+ struct notifier_block *mce_notifier);
#else
static inline int
devm_cxl_register_mce_notifier(struct device *dev,
diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c
index f88a13adf7fa..e370d733e440 100644
--- a/drivers/cxl/core/memdev.c
+++ b/drivers/cxl/core/memdev.c
@@ -200,6 +200,14 @@ static ssize_t security_erase_store(struct device *dev,
static struct device_attribute dev_attr_security_erase =
__ATTR(erase, 0200, NULL, security_erase_store);
+bool cxl_memdev_has_poison_cmd(struct cxl_memdev *cxlmd,
+ enum poison_cmd_enabled_bits cmd)
+{
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
+
+ return test_bit(cmd, mds->poison.enabled_cmds);
+}
+
static int cxl_get_poison_by_memdev(struct cxl_memdev *cxlmd)
{
struct cxl_dev_state *cxlds = cxlmd->cxlds;
@@ -232,15 +240,13 @@ int cxl_trigger_poison_list(struct cxl_memdev *cxlmd)
if (!port || !is_cxl_endpoint(port))
return -EINVAL;
- rc = down_read_interruptible(&cxl_region_rwsem);
- if (rc)
+ ACQUIRE(rwsem_read_intr, region_rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &region_rwsem)))
return rc;
- rc = down_read_interruptible(&cxl_dpa_rwsem);
- if (rc) {
- up_read(&cxl_region_rwsem);
+ ACQUIRE(rwsem_read_intr, dpa_rwsem)(&cxl_rwsem.dpa);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &dpa_rwsem)))
return rc;
- }
if (cxl_num_decoders_committed(port) == 0) {
/* No regions mapped to this memdev */
@@ -249,8 +255,6 @@ int cxl_trigger_poison_list(struct cxl_memdev *cxlmd)
/* Regions mapped, collect poison by endpoint */
rc = cxl_get_poison_by_endpoint(port);
}
- up_read(&cxl_dpa_rwsem);
- up_read(&cxl_region_rwsem);
return rc;
}
@@ -267,7 +271,7 @@ static int cxl_validate_poison_dpa(struct cxl_memdev *cxlmd, u64 dpa)
dev_dbg(cxlds->dev, "device has no dpa resource\n");
return -EINVAL;
}
- if (dpa < cxlds->dpa_res.start || dpa > cxlds->dpa_res.end) {
+ if (!cxl_resource_contains_addr(&cxlds->dpa_res, dpa)) {
dev_dbg(cxlds->dev, "dpa:0x%llx not in resource:%pR\n",
dpa, &cxlds->dpa_res);
return -EINVAL;
@@ -280,7 +284,7 @@ static int cxl_validate_poison_dpa(struct cxl_memdev *cxlmd, u64 dpa)
return 0;
}
-int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa)
+int cxl_inject_poison_locked(struct cxl_memdev *cxlmd, u64 dpa)
{
struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox;
struct cxl_mbox_inject_poison inject;
@@ -292,19 +296,12 @@ int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa)
if (!IS_ENABLED(CONFIG_DEBUG_FS))
return 0;
- rc = down_read_interruptible(&cxl_region_rwsem);
- if (rc)
- return rc;
-
- rc = down_read_interruptible(&cxl_dpa_rwsem);
- if (rc) {
- up_read(&cxl_region_rwsem);
- return rc;
- }
+ lockdep_assert_held(&cxl_rwsem.dpa);
+ lockdep_assert_held(&cxl_rwsem.region);
rc = cxl_validate_poison_dpa(cxlmd, dpa);
if (rc)
- goto out;
+ return rc;
inject.address = cpu_to_le64(dpa);
mbox_cmd = (struct cxl_mbox_cmd) {
@@ -314,7 +311,7 @@ int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa)
};
rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
if (rc)
- goto out;
+ return rc;
cxlr = cxl_dpa_to_region(cxlmd, dpa);
if (cxlr)
@@ -327,15 +324,27 @@ int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa)
.length = cpu_to_le32(1),
};
trace_cxl_poison(cxlmd, cxlr, &record, 0, 0, CXL_POISON_TRACE_INJECT);
-out:
- up_read(&cxl_dpa_rwsem);
- up_read(&cxl_region_rwsem);
- return rc;
+ return 0;
+}
+
+int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa)
+{
+ int rc;
+
+ ACQUIRE(rwsem_read_intr, region_rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &region_rwsem)))
+ return rc;
+
+ ACQUIRE(rwsem_read_intr, dpa_rwsem)(&cxl_rwsem.dpa);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &dpa_rwsem)))
+ return rc;
+
+ return cxl_inject_poison_locked(cxlmd, dpa);
}
EXPORT_SYMBOL_NS_GPL(cxl_inject_poison, "CXL");
-int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
+int cxl_clear_poison_locked(struct cxl_memdev *cxlmd, u64 dpa)
{
struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox;
struct cxl_mbox_clear_poison clear;
@@ -347,19 +356,12 @@ int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
if (!IS_ENABLED(CONFIG_DEBUG_FS))
return 0;
- rc = down_read_interruptible(&cxl_region_rwsem);
- if (rc)
- return rc;
-
- rc = down_read_interruptible(&cxl_dpa_rwsem);
- if (rc) {
- up_read(&cxl_region_rwsem);
- return rc;
- }
+ lockdep_assert_held(&cxl_rwsem.dpa);
+ lockdep_assert_held(&cxl_rwsem.region);
rc = cxl_validate_poison_dpa(cxlmd, dpa);
if (rc)
- goto out;
+ return rc;
/*
* In CXL 3.0 Spec 8.2.9.8.4.3, the Clear Poison mailbox command
@@ -378,7 +380,7 @@ int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
if (rc)
- goto out;
+ return rc;
cxlr = cxl_dpa_to_region(cxlmd, dpa);
if (cxlr)
@@ -391,11 +393,23 @@ int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
.length = cpu_to_le32(1),
};
trace_cxl_poison(cxlmd, cxlr, &record, 0, 0, CXL_POISON_TRACE_CLEAR);
-out:
- up_read(&cxl_dpa_rwsem);
- up_read(&cxl_region_rwsem);
- return rc;
+ return 0;
+}
+
+int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
+{
+ int rc;
+
+ ACQUIRE(rwsem_read_intr, region_rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &region_rwsem)))
+ return rc;
+
+ ACQUIRE(rwsem_read_intr, dpa_rwsem)(&cxl_rwsem.dpa);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &dpa_rwsem)))
+ return rc;
+
+ return cxl_clear_poison_locked(cxlmd, dpa);
}
EXPORT_SYMBOL_NS_GPL(cxl_clear_poison, "CXL");
diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c
index b50551601c2e..5b023a0178a4 100644
--- a/drivers/cxl/core/pci.c
+++ b/drivers/cxl/core/pci.c
@@ -24,84 +24,52 @@ static unsigned short media_ready_timeout = 60;
module_param(media_ready_timeout, ushort, 0644);
MODULE_PARM_DESC(media_ready_timeout, "seconds to wait for media ready");
-struct cxl_walk_context {
- struct pci_bus *bus;
- struct cxl_port *port;
+static int pci_get_port_num(struct pci_dev *pdev)
+{
+ u32 lnkcap;
int type;
- int error;
- int count;
-};
-static int match_add_dports(struct pci_dev *pdev, void *data)
-{
- struct cxl_walk_context *ctx = data;
- struct cxl_port *port = ctx->port;
- int type = pci_pcie_type(pdev);
- struct cxl_register_map map;
- struct cxl_dport *dport;
- u32 lnkcap, port_num;
- int rc;
+ type = pci_pcie_type(pdev);
+ if (type != PCI_EXP_TYPE_DOWNSTREAM && type != PCI_EXP_TYPE_ROOT_PORT)
+ return -EINVAL;
- if (pdev->bus != ctx->bus)
- return 0;
- if (!pci_is_pcie(pdev))
- return 0;
- if (type != ctx->type)
- return 0;
if (pci_read_config_dword(pdev, pci_pcie_cap(pdev) + PCI_EXP_LNKCAP,
&lnkcap))
- return 0;
-
- rc = cxl_find_regblock(pdev, CXL_REGLOC_RBI_COMPONENT, &map);
- if (rc)
- dev_dbg(&port->dev, "failed to find component registers\n");
-
- port_num = FIELD_GET(PCI_EXP_LNKCAP_PN, lnkcap);
- dport = devm_cxl_add_dport(port, &pdev->dev, port_num, map.resource);
- if (IS_ERR(dport)) {
- ctx->error = PTR_ERR(dport);
- return PTR_ERR(dport);
- }
- ctx->count++;
+ return -ENXIO;
- return 0;
+ return FIELD_GET(PCI_EXP_LNKCAP_PN, lnkcap);
}
/**
- * devm_cxl_port_enumerate_dports - enumerate downstream ports of the upstream port
- * @port: cxl_port whose ->uport_dev is the upstream of dports to be enumerated
+ * __devm_cxl_add_dport_by_dev - allocate a dport by dport device
+ * @port: cxl_port that hosts the dport
+ * @dport_dev: 'struct device' of the dport
*
- * Returns a positive number of dports enumerated or a negative error
- * code.
+ * Returns the allocated dport on success or ERR_PTR() of -errno on error
*/
-int devm_cxl_port_enumerate_dports(struct cxl_port *port)
+struct cxl_dport *__devm_cxl_add_dport_by_dev(struct cxl_port *port,
+ struct device *dport_dev)
{
- struct pci_bus *bus = cxl_port_to_pci_bus(port);
- struct cxl_walk_context ctx;
- int type;
+ struct cxl_register_map map;
+ struct pci_dev *pdev;
+ int port_num, rc;
- if (!bus)
- return -ENXIO;
+ if (!dev_is_pci(dport_dev))
+ return ERR_PTR(-EINVAL);
- if (pci_is_root_bus(bus))
- type = PCI_EXP_TYPE_ROOT_PORT;
- else
- type = PCI_EXP_TYPE_DOWNSTREAM;
+ pdev = to_pci_dev(dport_dev);
+ port_num = pci_get_port_num(pdev);
+ if (port_num < 0)
+ return ERR_PTR(port_num);
- ctx = (struct cxl_walk_context) {
- .port = port,
- .bus = bus,
- .type = type,
- };
- pci_walk_bus(bus, match_add_dports, &ctx);
+ rc = cxl_find_regblock(pdev, CXL_REGLOC_RBI_COMPONENT, &map);
+ if (rc)
+ return ERR_PTR(rc);
- if (ctx.count == 0)
- return -ENODEV;
- if (ctx.error)
- return ctx.error;
- return ctx.count;
+ device_lock_assert(&port->dev);
+ return devm_cxl_add_dport(port, dport_dev, port_num, map.resource);
}
-EXPORT_SYMBOL_NS_GPL(devm_cxl_port_enumerate_dports, "CXL");
+EXPORT_SYMBOL_NS_GPL(__devm_cxl_add_dport_by_dev, "CXL");
static int cxl_dvsec_mem_range_valid(struct cxl_dev_state *cxlds, int id)
{
@@ -1169,3 +1137,53 @@ int cxl_gpf_port_setup(struct cxl_dport *dport)
return 0;
}
+
+struct cxl_walk_context {
+ struct pci_bus *bus;
+ struct cxl_port *port;
+ int type;
+ int error;
+ int count;
+};
+
+static int count_dports(struct pci_dev *pdev, void *data)
+{
+ struct cxl_walk_context *ctx = data;
+ int type = pci_pcie_type(pdev);
+
+ if (pdev->bus != ctx->bus)
+ return 0;
+ if (!pci_is_pcie(pdev))
+ return 0;
+ if (type != ctx->type)
+ return 0;
+
+ ctx->count++;
+ return 0;
+}
+
+int cxl_port_get_possible_dports(struct cxl_port *port)
+{
+ struct pci_bus *bus = cxl_port_to_pci_bus(port);
+ struct cxl_walk_context ctx;
+ int type;
+
+ if (!bus) {
+ dev_err(&port->dev, "No PCI bus found for port %s\n",
+ dev_name(&port->dev));
+ return -ENXIO;
+ }
+
+ if (pci_is_root_bus(bus))
+ type = PCI_EXP_TYPE_ROOT_PORT;
+ else
+ type = PCI_EXP_TYPE_DOWNSTREAM;
+
+ ctx = (struct cxl_walk_context) {
+ .bus = bus,
+ .type = type,
+ };
+ pci_walk_bus(bus, count_dports, &ctx);
+
+ return ctx.count;
+}
diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c
index eb46c6764d20..fef3aa0c6680 100644
--- a/drivers/cxl/core/port.c
+++ b/drivers/cxl/core/port.c
@@ -30,18 +30,21 @@
* instantiated by the core.
*/
-/*
- * All changes to the interleave configuration occur with this lock held
- * for write.
- */
-DECLARE_RWSEM(cxl_region_rwsem);
-
static DEFINE_IDA(cxl_port_ida);
static DEFINE_XARRAY(cxl_root_buses);
+/*
+ * The terminal device in PCI is NULL and @platform_bus
+ * for platform devices (for cxl_test)
+ */
+static bool is_cxl_host_bridge(struct device *dev)
+{
+ return (!dev || dev == &platform_bus);
+}
+
int cxl_num_decoders_committed(struct cxl_port *port)
{
- lockdep_assert_held(&cxl_region_rwsem);
+ lockdep_assert_held(&cxl_rwsem.region);
return port->commit_end + 1;
}
@@ -176,7 +179,7 @@ static ssize_t target_list_show(struct device *dev,
ssize_t offset;
int rc;
- guard(rwsem_read)(&cxl_region_rwsem);
+ guard(rwsem_read)(&cxl_rwsem.region);
rc = emit_target_list(cxlsd, buf);
if (rc < 0)
return rc;
@@ -196,7 +199,7 @@ static ssize_t mode_show(struct device *dev, struct device_attribute *attr,
struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
- /* without @cxl_dpa_rwsem, make sure @part is not reloaded */
+ /* without @cxl_rwsem.dpa, make sure @part is not reloaded */
int part = READ_ONCE(cxled->part);
const char *desc;
@@ -235,7 +238,7 @@ static ssize_t dpa_resource_show(struct device *dev, struct device_attribute *at
{
struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
- guard(rwsem_read)(&cxl_dpa_rwsem);
+ guard(rwsem_read)(&cxl_rwsem.dpa);
return sysfs_emit(buf, "%#llx\n", (u64)cxl_dpa_resource_start(cxled));
}
static DEVICE_ATTR_RO(dpa_resource);
@@ -560,7 +563,7 @@ static ssize_t decoders_committed_show(struct device *dev,
{
struct cxl_port *port = to_cxl_port(dev);
- guard(rwsem_read)(&cxl_region_rwsem);
+ guard(rwsem_read)(&cxl_rwsem.region);
return sysfs_emit(buf, "%d\n", cxl_num_decoders_committed(port));
}
@@ -746,6 +749,7 @@ static struct cxl_port *cxl_port_alloc(struct device *uport_dev,
xa_init(&port->dports);
xa_init(&port->endpoints);
xa_init(&port->regions);
+ port->component_reg_phys = CXL_RESOURCE_NONE;
device_initialize(dev);
lockdep_set_class_and_subclass(&dev->mutex, &cxl_port_key, port->depth);
@@ -864,9 +868,7 @@ static int cxl_port_add(struct cxl_port *port,
if (rc)
return rc;
- rc = cxl_port_setup_regs(port, component_reg_phys);
- if (rc)
- return rc;
+ port->component_reg_phys = component_reg_phys;
} else {
rc = dev_set_name(dev, "root%d", port->id);
if (rc)
@@ -1179,6 +1181,20 @@ __devm_cxl_add_dport(struct cxl_port *port, struct device *dport_dev,
if (rc)
return ERR_PTR(rc);
+ /*
+ * Setup port register if this is the first dport showed up. Having
+ * a dport also means that there is at least 1 active link.
+ */
+ if (port->nr_dports == 1 &&
+ port->component_reg_phys != CXL_RESOURCE_NONE) {
+ rc = cxl_port_setup_regs(port, port->component_reg_phys);
+ if (rc) {
+ xa_erase(&port->dports, (unsigned long)dport->dport_dev);
+ return ERR_PTR(rc);
+ }
+ port->component_reg_phys = CXL_RESOURCE_NONE;
+ }
+
get_device(dport_dev);
rc = devm_add_action_or_reset(host, cxl_dport_remove, dport);
if (rc)
@@ -1354,21 +1370,6 @@ static struct cxl_port *find_cxl_port(struct device *dport_dev,
return port;
}
-static struct cxl_port *find_cxl_port_at(struct cxl_port *parent_port,
- struct device *dport_dev,
- struct cxl_dport **dport)
-{
- struct cxl_find_port_ctx ctx = {
- .dport_dev = dport_dev,
- .parent_port = parent_port,
- .dport = dport,
- };
- struct cxl_port *port;
-
- port = __find_cxl_port(&ctx);
- return port;
-}
-
/*
* All users of grandparent() are using it to walk PCIe-like switch port
* hierarchy. A PCIe switch is comprised of a bridge device representing the
@@ -1429,7 +1430,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_endpoint_autoremove, "CXL");
* through ->remove(). This "bottom-up" removal selectively removes individual
* child ports manually. This depends on devm_cxl_add_port() to not change is
* devm action registration order, and for dports to have already been
- * destroyed by reap_dports().
+ * destroyed by del_dports().
*/
static void delete_switch_port(struct cxl_port *port)
{
@@ -1438,18 +1439,24 @@ static void delete_switch_port(struct cxl_port *port)
devm_release_action(port->dev.parent, unregister_port, port);
}
-static void reap_dports(struct cxl_port *port)
+static void del_dport(struct cxl_dport *dport)
+{
+ struct cxl_port *port = dport->port;
+
+ devm_release_action(&port->dev, cxl_dport_unlink, dport);
+ devm_release_action(&port->dev, cxl_dport_remove, dport);
+ devm_kfree(&port->dev, dport);
+}
+
+static void del_dports(struct cxl_port *port)
{
struct cxl_dport *dport;
unsigned long index;
device_lock_assert(&port->dev);
- xa_for_each(&port->dports, index, dport) {
- devm_release_action(&port->dev, cxl_dport_unlink, dport);
- devm_release_action(&port->dev, cxl_dport_remove, dport);
- devm_kfree(&port->dev, dport);
- }
+ xa_for_each(&port->dports, index, dport)
+ del_dport(dport);
}
struct detach_ctx {
@@ -1507,7 +1514,7 @@ static void cxl_detach_ep(void *data)
*/
died = true;
port->dead = true;
- reap_dports(port);
+ del_dports(port);
}
device_unlock(&port->dev);
@@ -1538,16 +1545,157 @@ static resource_size_t find_component_registers(struct device *dev)
return map.resource;
}
+static int match_port_by_uport(struct device *dev, const void *data)
+{
+ const struct device *uport_dev = data;
+ struct cxl_port *port;
+
+ if (!is_cxl_port(dev))
+ return 0;
+
+ port = to_cxl_port(dev);
+ return uport_dev == port->uport_dev;
+}
+
+/*
+ * Function takes a device reference on the port device. Caller should do a
+ * put_device() when done.
+ */
+static struct cxl_port *find_cxl_port_by_uport(struct device *uport_dev)
+{
+ struct device *dev;
+
+ dev = bus_find_device(&cxl_bus_type, NULL, uport_dev, match_port_by_uport);
+ if (dev)
+ return to_cxl_port(dev);
+ return NULL;
+}
+
+static int update_decoder_targets(struct device *dev, void *data)
+{
+ struct cxl_dport *dport = data;
+ struct cxl_switch_decoder *cxlsd;
+ struct cxl_decoder *cxld;
+ int i;
+
+ if (!is_switch_decoder(dev))
+ return 0;
+
+ cxlsd = to_cxl_switch_decoder(dev);
+ cxld = &cxlsd->cxld;
+ guard(rwsem_write)(&cxl_rwsem.region);
+
+ for (i = 0; i < cxld->interleave_ways; i++) {
+ if (cxld->target_map[i] == dport->port_id) {
+ cxlsd->target[i] = dport;
+ dev_dbg(dev, "dport%d found in target list, index %d\n",
+ dport->port_id, i);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+DEFINE_FREE(del_cxl_dport, struct cxl_dport *, if (!IS_ERR_OR_NULL(_T)) del_dport(_T))
+static struct cxl_dport *cxl_port_add_dport(struct cxl_port *port,
+ struct device *dport_dev)
+{
+ struct cxl_dport *dport;
+ int rc;
+
+ device_lock_assert(&port->dev);
+ if (!port->dev.driver)
+ return ERR_PTR(-ENXIO);
+
+ dport = cxl_find_dport_by_dev(port, dport_dev);
+ if (dport) {
+ dev_dbg(&port->dev, "dport%d:%s already exists\n",
+ dport->port_id, dev_name(dport_dev));
+ return ERR_PTR(-EBUSY);
+ }
+
+ struct cxl_dport *new_dport __free(del_cxl_dport) =
+ devm_cxl_add_dport_by_dev(port, dport_dev);
+ if (IS_ERR(new_dport))
+ return new_dport;
+
+ cxl_switch_parse_cdat(new_dport);
+
+ if (ida_is_empty(&port->decoder_ida)) {
+ rc = devm_cxl_switch_port_decoders_setup(port);
+ if (rc)
+ return ERR_PTR(rc);
+ dev_dbg(&port->dev, "first dport%d:%s added with decoders\n",
+ new_dport->port_id, dev_name(dport_dev));
+ return no_free_ptr(new_dport);
+ }
+
+ /* New dport added, update the decoder targets */
+ device_for_each_child(&port->dev, new_dport, update_decoder_targets);
+
+ dev_dbg(&port->dev, "dport%d:%s added\n", new_dport->port_id,
+ dev_name(dport_dev));
+
+ return no_free_ptr(new_dport);
+}
+
+static struct cxl_dport *devm_cxl_create_port(struct device *ep_dev,
+ struct cxl_port *parent_port,
+ struct cxl_dport *parent_dport,
+ struct device *uport_dev,
+ struct device *dport_dev)
+{
+ resource_size_t component_reg_phys;
+
+ device_lock_assert(&parent_port->dev);
+ if (!parent_port->dev.driver) {
+ dev_warn(ep_dev,
+ "port %s:%s:%s disabled, failed to enumerate CXL.mem\n",
+ dev_name(&parent_port->dev), dev_name(uport_dev),
+ dev_name(dport_dev));
+ }
+
+ struct cxl_port *port __free(put_cxl_port) =
+ find_cxl_port_by_uport(uport_dev);
+ if (!port) {
+ component_reg_phys = find_component_registers(uport_dev);
+ port = devm_cxl_add_port(&parent_port->dev, uport_dev,
+ component_reg_phys, parent_dport);
+ if (IS_ERR(port))
+ return ERR_CAST(port);
+
+ /*
+ * retry to make sure a port is found. a port device
+ * reference is taken.
+ */
+ port = find_cxl_port_by_uport(uport_dev);
+ if (!port)
+ return ERR_PTR(-ENODEV);
+
+ dev_dbg(ep_dev, "created port %s:%s\n",
+ dev_name(&port->dev), dev_name(port->uport_dev));
+ } else {
+ /*
+ * Port was created before right before this function is
+ * called. Signal the caller to deal with it.
+ */
+ return ERR_PTR(-EAGAIN);
+ }
+
+ guard(device)(&port->dev);
+ return cxl_port_add_dport(port, dport_dev);
+}
+
static int add_port_attach_ep(struct cxl_memdev *cxlmd,
struct device *uport_dev,
struct device *dport_dev)
{
struct device *dparent = grandparent(dport_dev);
struct cxl_dport *dport, *parent_dport;
- resource_size_t component_reg_phys;
int rc;
- if (!dparent) {
+ if (is_cxl_host_bridge(dparent)) {
/*
* The iteration reached the topology root without finding the
* CXL-root 'cxl_port' on a previous iteration, fail for now to
@@ -1559,42 +1707,31 @@ static int add_port_attach_ep(struct cxl_memdev *cxlmd,
}
struct cxl_port *parent_port __free(put_cxl_port) =
- find_cxl_port(dparent, &parent_dport);
+ find_cxl_port_by_uport(dparent->parent);
if (!parent_port) {
/* iterate to create this parent_port */
return -EAGAIN;
}
- /*
- * Definition with __free() here to keep the sequence of
- * dereferencing the device of the port before the parent_port releasing.
- */
- struct cxl_port *port __free(put_cxl_port) = NULL;
scoped_guard(device, &parent_port->dev) {
- if (!parent_port->dev.driver) {
- dev_warn(&cxlmd->dev,
- "port %s:%s disabled, failed to enumerate CXL.mem\n",
- dev_name(&parent_port->dev), dev_name(uport_dev));
- return -ENXIO;
+ parent_dport = cxl_find_dport_by_dev(parent_port, dparent);
+ if (!parent_dport) {
+ parent_dport = cxl_port_add_dport(parent_port, dparent);
+ if (IS_ERR(parent_dport))
+ return PTR_ERR(parent_dport);
}
- port = find_cxl_port_at(parent_port, dport_dev, &dport);
- if (!port) {
- component_reg_phys = find_component_registers(uport_dev);
- port = devm_cxl_add_port(&parent_port->dev, uport_dev,
- component_reg_phys, parent_dport);
- if (IS_ERR(port))
- return PTR_ERR(port);
-
- /* retry find to pick up the new dport information */
- port = find_cxl_port_at(parent_port, dport_dev, &dport);
- if (!port)
- return -ENXIO;
+ dport = devm_cxl_create_port(&cxlmd->dev, parent_port,
+ parent_dport, uport_dev,
+ dport_dev);
+ if (IS_ERR(dport)) {
+ /* Port already exists, restart iteration */
+ if (PTR_ERR(dport) == -EAGAIN)
+ return 0;
+ return PTR_ERR(dport);
}
}
- dev_dbg(&cxlmd->dev, "add to new port %s:%s\n",
- dev_name(&port->dev), dev_name(port->uport_dev));
rc = cxl_add_ep(dport, &cxlmd->dev);
if (rc == -EBUSY) {
/*
@@ -1607,6 +1744,25 @@ static int add_port_attach_ep(struct cxl_memdev *cxlmd,
return rc;
}
+static struct cxl_dport *find_or_add_dport(struct cxl_port *port,
+ struct device *dport_dev)
+{
+ struct cxl_dport *dport;
+
+ device_lock_assert(&port->dev);
+ dport = cxl_find_dport_by_dev(port, dport_dev);
+ if (!dport) {
+ dport = cxl_port_add_dport(port, dport_dev);
+ if (IS_ERR(dport))
+ return dport;
+
+ /* New dport added, restart iteration */
+ return ERR_PTR(-EAGAIN);
+ }
+
+ return dport;
+}
+
int devm_cxl_enumerate_ports(struct cxl_memdev *cxlmd)
{
struct device *dev = &cxlmd->dev;
@@ -1635,11 +1791,7 @@ retry:
struct device *uport_dev;
struct cxl_dport *dport;
- /*
- * The terminal "grandparent" in PCI is NULL and @platform_bus
- * for platform devices
- */
- if (!dport_dev || dport_dev == &platform_bus)
+ if (is_cxl_host_bridge(dport_dev))
return 0;
uport_dev = dport_dev->parent;
@@ -1653,12 +1805,26 @@ retry:
dev_name(iter), dev_name(dport_dev),
dev_name(uport_dev));
struct cxl_port *port __free(put_cxl_port) =
- find_cxl_port(dport_dev, &dport);
+ find_cxl_port_by_uport(uport_dev);
if (port) {
dev_dbg(&cxlmd->dev,
"found already registered port %s:%s\n",
dev_name(&port->dev),
dev_name(port->uport_dev));
+
+ /*
+ * RP port enumerated by cxl_acpi without dport will
+ * have the dport added here.
+ */
+ scoped_guard(device, &port->dev) {
+ dport = find_or_add_dport(port, dport_dev);
+ if (IS_ERR(dport)) {
+ if (PTR_ERR(dport) == -EAGAIN)
+ goto retry;
+ return PTR_ERR(dport);
+ }
+ }
+
rc = cxl_add_ep(dport, &cxlmd->dev);
/*
@@ -1710,24 +1876,24 @@ struct cxl_port *cxl_mem_find_port(struct cxl_memdev *cxlmd,
EXPORT_SYMBOL_NS_GPL(cxl_mem_find_port, "CXL");
static int decoder_populate_targets(struct cxl_switch_decoder *cxlsd,
- struct cxl_port *port, int *target_map)
+ struct cxl_port *port)
{
+ struct cxl_decoder *cxld = &cxlsd->cxld;
int i;
- if (!target_map)
- return 0;
-
device_lock_assert(&port->dev);
if (xa_empty(&port->dports))
- return -EINVAL;
+ return 0;
- guard(rwsem_write)(&cxl_region_rwsem);
+ guard(rwsem_write)(&cxl_rwsem.region);
for (i = 0; i < cxlsd->cxld.interleave_ways; i++) {
- struct cxl_dport *dport = find_dport(port, target_map[i]);
+ struct cxl_dport *dport = find_dport(port, cxld->target_map[i]);
- if (!dport)
- return -ENXIO;
+ if (!dport) {
+ /* dport may be activated later */
+ continue;
+ }
cxlsd->target[i] = dport;
}
@@ -1916,9 +2082,6 @@ EXPORT_SYMBOL_NS_GPL(cxl_endpoint_decoder_alloc, "CXL");
/**
* cxl_decoder_add_locked - Add a decoder with targets
* @cxld: The cxl decoder allocated by cxl_<type>_decoder_alloc()
- * @target_map: A list of downstream ports that this decoder can direct memory
- * traffic to. These numbers should correspond with the port number
- * in the PCIe Link Capabilities structure.
*
* Certain types of decoders may not have any targets. The main example of this
* is an endpoint device. A more awkward example is a hostbridge whose root
@@ -1932,7 +2095,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_endpoint_decoder_alloc, "CXL");
* Return: Negative error code if the decoder wasn't properly configured; else
* returns 0.
*/
-int cxl_decoder_add_locked(struct cxl_decoder *cxld, int *target_map)
+int cxl_decoder_add_locked(struct cxl_decoder *cxld)
{
struct cxl_port *port;
struct device *dev;
@@ -1953,7 +2116,7 @@ int cxl_decoder_add_locked(struct cxl_decoder *cxld, int *target_map)
if (!is_endpoint_decoder(dev)) {
struct cxl_switch_decoder *cxlsd = to_cxl_switch_decoder(dev);
- rc = decoder_populate_targets(cxlsd, port, target_map);
+ rc = decoder_populate_targets(cxlsd, port);
if (rc && (cxld->flags & CXL_DECODER_F_ENABLE)) {
dev_err(&port->dev,
"Failed to populate active decoder targets\n");
@@ -1972,9 +2135,6 @@ EXPORT_SYMBOL_NS_GPL(cxl_decoder_add_locked, "CXL");
/**
* cxl_decoder_add - Add a decoder with targets
* @cxld: The cxl decoder allocated by cxl_<type>_decoder_alloc()
- * @target_map: A list of downstream ports that this decoder can direct memory
- * traffic to. These numbers should correspond with the port number
- * in the PCIe Link Capabilities structure.
*
* This is the unlocked variant of cxl_decoder_add_locked().
* See cxl_decoder_add_locked().
@@ -1982,7 +2142,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_decoder_add_locked, "CXL");
* Context: Process context. Takes and releases the device lock of the port that
* owns the @cxld.
*/
-int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map)
+int cxl_decoder_add(struct cxl_decoder *cxld)
{
struct cxl_port *port;
@@ -1995,18 +2155,15 @@ int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map)
port = to_cxl_port(cxld->dev.parent);
guard(device)(&port->dev);
- return cxl_decoder_add_locked(cxld, target_map);
+ return cxl_decoder_add_locked(cxld);
}
EXPORT_SYMBOL_NS_GPL(cxl_decoder_add, "CXL");
static void cxld_unregister(void *dev)
{
- struct cxl_endpoint_decoder *cxled;
-
- if (is_endpoint_decoder(dev)) {
- cxled = to_cxl_endpoint_decoder(dev);
- cxl_decoder_kill_region(cxled);
- }
+ if (is_endpoint_decoder(dev))
+ cxl_decoder_detach(NULL, to_cxl_endpoint_decoder(dev), -1,
+ DETACH_INVALIDATE);
device_unregister(dev);
}
@@ -2293,7 +2450,7 @@ static const struct attribute_group *cxl_bus_attribute_groups[] = {
NULL,
};
-struct bus_type cxl_bus_type = {
+const struct bus_type cxl_bus_type = {
.name = "cxl",
.uevent = cxl_bus_uevent,
.match = cxl_bus_match,
diff --git a/drivers/cxl/core/ras.c b/drivers/cxl/core/ras.c
index 485a831695c7..2731ba3a0799 100644
--- a/drivers/cxl/core/ras.c
+++ b/drivers/cxl/core/ras.c
@@ -31,40 +31,38 @@ static void cxl_cper_trace_uncorr_port_prot_err(struct pci_dev *pdev,
ras_cap.header_log);
}
-static void cxl_cper_trace_corr_prot_err(struct pci_dev *pdev,
- struct cxl_ras_capability_regs ras_cap)
+static void cxl_cper_trace_corr_prot_err(struct cxl_memdev *cxlmd,
+ struct cxl_ras_capability_regs ras_cap)
{
u32 status = ras_cap.cor_status & ~ras_cap.cor_mask;
- struct cxl_dev_state *cxlds;
- cxlds = pci_get_drvdata(pdev);
- if (!cxlds)
- return;
-
- trace_cxl_aer_correctable_error(cxlds->cxlmd, status);
+ trace_cxl_aer_correctable_error(cxlmd, status);
}
-static void cxl_cper_trace_uncorr_prot_err(struct pci_dev *pdev,
- struct cxl_ras_capability_regs ras_cap)
+static void
+cxl_cper_trace_uncorr_prot_err(struct cxl_memdev *cxlmd,
+ struct cxl_ras_capability_regs ras_cap)
{
u32 status = ras_cap.uncor_status & ~ras_cap.uncor_mask;
- struct cxl_dev_state *cxlds;
u32 fe;
- cxlds = pci_get_drvdata(pdev);
- if (!cxlds)
- return;
-
if (hweight32(status) > 1)
fe = BIT(FIELD_GET(CXL_RAS_CAP_CONTROL_FE_MASK,
ras_cap.cap_control));
else
fe = status;
- trace_cxl_aer_uncorrectable_error(cxlds->cxlmd, status, fe,
+ trace_cxl_aer_uncorrectable_error(cxlmd, status, fe,
ras_cap.header_log);
}
+static int match_memdev_by_parent(struct device *dev, const void *uport)
+{
+ if (is_cxl_memdev(dev) && dev->parent == uport)
+ return 1;
+ return 0;
+}
+
static void cxl_cper_handle_prot_err(struct cxl_cper_prot_err_work_data *data)
{
unsigned int devfn = PCI_DEVFN(data->prot_err.agent_addr.device,
@@ -73,13 +71,12 @@ static void cxl_cper_handle_prot_err(struct cxl_cper_prot_err_work_data *data)
pci_get_domain_bus_and_slot(data->prot_err.agent_addr.segment,
data->prot_err.agent_addr.bus,
devfn);
+ struct cxl_memdev *cxlmd;
int port_type;
if (!pdev)
return;
- guard(device)(&pdev->dev);
-
port_type = pci_pcie_type(pdev);
if (port_type == PCI_EXP_TYPE_ROOT_PORT ||
port_type == PCI_EXP_TYPE_DOWNSTREAM ||
@@ -92,10 +89,20 @@ static void cxl_cper_handle_prot_err(struct cxl_cper_prot_err_work_data *data)
return;
}
+ guard(device)(&pdev->dev);
+ if (!pdev->dev.driver)
+ return;
+
+ struct device *mem_dev __free(put_device) = bus_find_device(
+ &cxl_bus_type, NULL, pdev, match_memdev_by_parent);
+ if (!mem_dev)
+ return;
+
+ cxlmd = to_cxl_memdev(mem_dev);
if (data->severity == AER_CORRECTABLE)
- cxl_cper_trace_corr_prot_err(pdev, data->ras_cap);
+ cxl_cper_trace_corr_prot_err(cxlmd, data->ras_cap);
else
- cxl_cper_trace_uncorr_prot_err(pdev, data->ras_cap);
+ cxl_cper_trace_uncorr_prot_err(cxlmd, data->ras_cap);
}
static void cxl_cper_prot_err_work_fn(struct work_struct *work)
diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
index 6e5e1460068d..ae899f68551f 100644
--- a/drivers/cxl/core/region.c
+++ b/drivers/cxl/core/region.c
@@ -2,6 +2,7 @@
/* Copyright(c) 2022 Intel Corporation. All rights reserved. */
#include <linux/memregion.h>
#include <linux/genalloc.h>
+#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/memory.h>
@@ -10,6 +11,7 @@
#include <linux/sort.h>
#include <linux/idr.h>
#include <linux/memory-tiers.h>
+#include <linux/string_choices.h>
#include <cxlmem.h>
#include <cxl.h>
#include "core.h"
@@ -30,6 +32,12 @@
* 3. Decoder targets
*/
+/*
+ * nodemask that sets per node when the access_coordinates for the node has
+ * been updated by the CXL memory hotplug notifier.
+ */
+static nodemask_t nodemask_region_seen = NODE_MASK_NONE;
+
static struct cxl_region *to_cxl_region(struct device *dev);
#define __ACCESS_ATTR_RO(_level, _name) { \
@@ -141,16 +149,12 @@ static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
struct cxl_region_params *p = &cxlr->params;
ssize_t rc;
- rc = down_read_interruptible(&cxl_region_rwsem);
- if (rc)
+ ACQUIRE(rwsem_read_intr, region_rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &region_rwsem)))
return rc;
if (cxlr->mode != CXL_PARTMODE_PMEM)
- rc = sysfs_emit(buf, "\n");
- else
- rc = sysfs_emit(buf, "%pUb\n", &p->uuid);
- up_read(&cxl_region_rwsem);
-
- return rc;
+ return sysfs_emit(buf, "\n");
+ return sysfs_emit(buf, "%pUb\n", &p->uuid);
}
static int is_dup(struct device *match, void *data)
@@ -162,7 +166,7 @@ static int is_dup(struct device *match, void *data)
if (!is_cxl_region(match))
return 0;
- lockdep_assert_held(&cxl_region_rwsem);
+ lockdep_assert_held(&cxl_rwsem.region);
cxlr = to_cxl_region(match);
p = &cxlr->params;
@@ -192,27 +196,22 @@ static ssize_t uuid_store(struct device *dev, struct device_attribute *attr,
if (uuid_is_null(&temp))
return -EINVAL;
- rc = down_write_killable(&cxl_region_rwsem);
- if (rc)
+ ACQUIRE(rwsem_write_kill, region_rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_write_kill, &region_rwsem)))
return rc;
if (uuid_equal(&p->uuid, &temp))
- goto out;
+ return len;
- rc = -EBUSY;
if (p->state >= CXL_CONFIG_ACTIVE)
- goto out;
+ return -EBUSY;
rc = bus_for_each_dev(&cxl_bus_type, NULL, &temp, is_dup);
if (rc < 0)
- goto out;
+ return rc;
uuid_copy(&p->uuid, &temp);
-out:
- up_write(&cxl_region_rwsem);
- if (rc)
- return rc;
return len;
}
static DEVICE_ATTR_RW(uuid);
@@ -237,7 +236,10 @@ static int cxl_region_invalidate_memregion(struct cxl_region *cxlr)
return -ENXIO;
}
- cpu_cache_invalidate_memregion(IORES_DESC_CXL);
+ if (!cxlr->params.res)
+ return -ENXIO;
+ cpu_cache_invalidate_memregion(cxlr->params.res->start,
+ resource_size(cxlr->params.res));
return 0;
}
@@ -246,6 +248,9 @@ static void cxl_region_decode_reset(struct cxl_region *cxlr, int count)
struct cxl_region_params *p = &cxlr->params;
int i;
+ if (test_bit(CXL_REGION_F_LOCK, &cxlr->flags))
+ return;
+
/*
* Before region teardown attempt to flush, evict any data cached for
* this region, or scream loudly about missing arch / platform support
@@ -349,33 +354,40 @@ err:
return rc;
}
-static ssize_t commit_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t len)
+static int queue_reset(struct cxl_region *cxlr)
{
- struct cxl_region *cxlr = to_cxl_region(dev);
struct cxl_region_params *p = &cxlr->params;
- bool commit;
- ssize_t rc;
+ int rc;
- rc = kstrtobool(buf, &commit);
- if (rc)
+ ACQUIRE(rwsem_write_kill, rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_write_kill, &rwsem)))
return rc;
- rc = down_write_killable(&cxl_region_rwsem);
- if (rc)
+ /* Already in the requested state? */
+ if (p->state < CXL_CONFIG_COMMIT)
+ return 0;
+
+ p->state = CXL_CONFIG_RESET_PENDING;
+
+ return 0;
+}
+
+static int __commit(struct cxl_region *cxlr)
+{
+ struct cxl_region_params *p = &cxlr->params;
+ int rc;
+
+ ACQUIRE(rwsem_write_kill, rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_write_kill, &rwsem)))
return rc;
/* Already in the requested state? */
- if (commit && p->state >= CXL_CONFIG_COMMIT)
- goto out;
- if (!commit && p->state < CXL_CONFIG_COMMIT)
- goto out;
+ if (p->state >= CXL_CONFIG_COMMIT)
+ return 0;
/* Not ready to commit? */
- if (commit && p->state < CXL_CONFIG_ACTIVE) {
- rc = -ENXIO;
- goto out;
- }
+ if (p->state < CXL_CONFIG_ACTIVE)
+ return -ENXIO;
/*
* Invalidate caches before region setup to drop any speculative
@@ -383,33 +395,64 @@ static ssize_t commit_store(struct device *dev, struct device_attribute *attr,
*/
rc = cxl_region_invalidate_memregion(cxlr);
if (rc)
- goto out;
+ return rc;
- if (commit) {
- rc = cxl_region_decode_commit(cxlr);
- if (rc == 0)
- p->state = CXL_CONFIG_COMMIT;
- } else {
- p->state = CXL_CONFIG_RESET_PENDING;
- up_write(&cxl_region_rwsem);
- device_release_driver(&cxlr->dev);
- down_write(&cxl_region_rwsem);
+ rc = cxl_region_decode_commit(cxlr);
+ if (rc)
+ return rc;
- /*
- * The lock was dropped, so need to revalidate that the reset is
- * still pending.
- */
- if (p->state == CXL_CONFIG_RESET_PENDING) {
- cxl_region_decode_reset(cxlr, p->interleave_ways);
- p->state = CXL_CONFIG_ACTIVE;
- }
+ p->state = CXL_CONFIG_COMMIT;
+
+ return 0;
+}
+
+static ssize_t commit_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct cxl_region *cxlr = to_cxl_region(dev);
+ struct cxl_region_params *p = &cxlr->params;
+ bool commit;
+ ssize_t rc;
+
+ rc = kstrtobool(buf, &commit);
+ if (rc)
+ return rc;
+
+ if (commit) {
+ rc = __commit(cxlr);
+ if (rc)
+ return rc;
+ return len;
}
-out:
- up_write(&cxl_region_rwsem);
+ if (test_bit(CXL_REGION_F_LOCK, &cxlr->flags))
+ return -EPERM;
+ rc = queue_reset(cxlr);
if (rc)
return rc;
+
+ /*
+ * Unmap the region and depend the reset-pending state to ensure
+ * it does not go active again until post reset
+ */
+ device_release_driver(&cxlr->dev);
+
+ /*
+ * With the reset pending take cxl_rwsem.region unconditionally
+ * to ensure the reset gets handled before returning.
+ */
+ guard(rwsem_write)(&cxl_rwsem.region);
+
+ /*
+ * Revalidate that the reset is still pending in case another
+ * thread already handled this reset.
+ */
+ if (p->state == CXL_CONFIG_RESET_PENDING) {
+ cxl_region_decode_reset(cxlr, p->interleave_ways);
+ p->state = CXL_CONFIG_ACTIVE;
+ }
+
return len;
}
@@ -420,45 +463,24 @@ static ssize_t commit_show(struct device *dev, struct device_attribute *attr,
struct cxl_region_params *p = &cxlr->params;
ssize_t rc;
- rc = down_read_interruptible(&cxl_region_rwsem);
- if (rc)
+ ACQUIRE(rwsem_read_intr, rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &rwsem)))
return rc;
- rc = sysfs_emit(buf, "%d\n", p->state >= CXL_CONFIG_COMMIT);
- up_read(&cxl_region_rwsem);
-
- return rc;
+ return sysfs_emit(buf, "%d\n", p->state >= CXL_CONFIG_COMMIT);
}
static DEVICE_ATTR_RW(commit);
-static umode_t cxl_region_visible(struct kobject *kobj, struct attribute *a,
- int n)
-{
- struct device *dev = kobj_to_dev(kobj);
- struct cxl_region *cxlr = to_cxl_region(dev);
-
- /*
- * Support tooling that expects to find a 'uuid' attribute for all
- * regions regardless of mode.
- */
- if (a == &dev_attr_uuid.attr && cxlr->mode != CXL_PARTMODE_PMEM)
- return 0444;
- return a->mode;
-}
-
static ssize_t interleave_ways_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct cxl_region *cxlr = to_cxl_region(dev);
struct cxl_region_params *p = &cxlr->params;
- ssize_t rc;
+ int rc;
- rc = down_read_interruptible(&cxl_region_rwsem);
- if (rc)
+ ACQUIRE(rwsem_read_intr, rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &rwsem)))
return rc;
- rc = sysfs_emit(buf, "%d\n", p->interleave_ways);
- up_read(&cxl_region_rwsem);
-
- return rc;
+ return sysfs_emit(buf, "%d\n", p->interleave_ways);
}
static const struct attribute_group *get_cxl_region_target_group(void);
@@ -493,23 +515,21 @@ static ssize_t interleave_ways_store(struct device *dev,
return -EINVAL;
}
- rc = down_write_killable(&cxl_region_rwsem);
- if (rc)
+ ACQUIRE(rwsem_write_kill, rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_write_kill, &rwsem)))
return rc;
- if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) {
- rc = -EBUSY;
- goto out;
- }
+
+ if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE)
+ return -EBUSY;
save = p->interleave_ways;
p->interleave_ways = val;
rc = sysfs_update_group(&cxlr->dev.kobj, get_cxl_region_target_group());
- if (rc)
+ if (rc) {
p->interleave_ways = save;
-out:
- up_write(&cxl_region_rwsem);
- if (rc)
return rc;
+ }
+
return len;
}
static DEVICE_ATTR_RW(interleave_ways);
@@ -520,15 +540,12 @@ static ssize_t interleave_granularity_show(struct device *dev,
{
struct cxl_region *cxlr = to_cxl_region(dev);
struct cxl_region_params *p = &cxlr->params;
- ssize_t rc;
+ int rc;
- rc = down_read_interruptible(&cxl_region_rwsem);
- if (rc)
+ ACQUIRE(rwsem_read_intr, rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &rwsem)))
return rc;
- rc = sysfs_emit(buf, "%d\n", p->interleave_granularity);
- up_read(&cxl_region_rwsem);
-
- return rc;
+ return sysfs_emit(buf, "%d\n", p->interleave_granularity);
}
static ssize_t interleave_granularity_store(struct device *dev,
@@ -561,19 +578,15 @@ static ssize_t interleave_granularity_store(struct device *dev,
if (cxld->interleave_ways > 1 && val != cxld->interleave_granularity)
return -EINVAL;
- rc = down_write_killable(&cxl_region_rwsem);
- if (rc)
+ ACQUIRE(rwsem_write_kill, rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_write_kill, &rwsem)))
return rc;
- if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) {
- rc = -EBUSY;
- goto out;
- }
+
+ if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE)
+ return -EBUSY;
p->interleave_granularity = val;
-out:
- up_write(&cxl_region_rwsem);
- if (rc)
- return rc;
+
return len;
}
static DEVICE_ATTR_RW(interleave_granularity);
@@ -584,17 +597,15 @@ static ssize_t resource_show(struct device *dev, struct device_attribute *attr,
struct cxl_region *cxlr = to_cxl_region(dev);
struct cxl_region_params *p = &cxlr->params;
u64 resource = -1ULL;
- ssize_t rc;
+ int rc;
- rc = down_read_interruptible(&cxl_region_rwsem);
- if (rc)
+ ACQUIRE(rwsem_read_intr, rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &rwsem)))
return rc;
+
if (p->res)
resource = p->res->start;
- rc = sysfs_emit(buf, "%#llx\n", resource);
- up_read(&cxl_region_rwsem);
-
- return rc;
+ return sysfs_emit(buf, "%#llx\n", resource);
}
static DEVICE_ATTR_RO(resource);
@@ -622,7 +633,7 @@ static int alloc_hpa(struct cxl_region *cxlr, resource_size_t size)
struct resource *res;
u64 remainder = 0;
- lockdep_assert_held_write(&cxl_region_rwsem);
+ lockdep_assert_held_write(&cxl_rwsem.region);
/* Nothing to do... */
if (p->res && resource_size(p->res) == size)
@@ -664,7 +675,7 @@ static void cxl_region_iomem_release(struct cxl_region *cxlr)
struct cxl_region_params *p = &cxlr->params;
if (device_is_registered(&cxlr->dev))
- lockdep_assert_held_write(&cxl_region_rwsem);
+ lockdep_assert_held_write(&cxl_rwsem.region);
if (p->res) {
/*
* Autodiscovered regions may not have been able to insert their
@@ -681,7 +692,7 @@ static int free_hpa(struct cxl_region *cxlr)
{
struct cxl_region_params *p = &cxlr->params;
- lockdep_assert_held_write(&cxl_region_rwsem);
+ lockdep_assert_held_write(&cxl_rwsem.region);
if (!p->res)
return 0;
@@ -705,15 +716,14 @@ static ssize_t size_store(struct device *dev, struct device_attribute *attr,
if (rc)
return rc;
- rc = down_write_killable(&cxl_region_rwsem);
- if (rc)
+ ACQUIRE(rwsem_write_kill, rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_write_kill, &rwsem)))
return rc;
if (val)
rc = alloc_hpa(cxlr, val);
else
rc = free_hpa(cxlr);
- up_write(&cxl_region_rwsem);
if (rc)
return rc;
@@ -729,18 +739,30 @@ static ssize_t size_show(struct device *dev, struct device_attribute *attr,
u64 size = 0;
ssize_t rc;
- rc = down_read_interruptible(&cxl_region_rwsem);
- if (rc)
+ ACQUIRE(rwsem_read_intr, rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &rwsem)))
return rc;
if (p->res)
size = resource_size(p->res);
- rc = sysfs_emit(buf, "%#llx\n", size);
- up_read(&cxl_region_rwsem);
-
- return rc;
+ return sysfs_emit(buf, "%#llx\n", size);
}
static DEVICE_ATTR_RW(size);
+static ssize_t extended_linear_cache_size_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl_region *cxlr = to_cxl_region(dev);
+ struct cxl_region_params *p = &cxlr->params;
+ ssize_t rc;
+
+ ACQUIRE(rwsem_read_intr, rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &rwsem)))
+ return rc;
+ return sysfs_emit(buf, "%#llx\n", p->cache_size);
+}
+static DEVICE_ATTR_RO(extended_linear_cache_size);
+
static struct attribute *cxl_region_attrs[] = {
&dev_attr_uuid.attr,
&dev_attr_commit.attr,
@@ -749,9 +771,34 @@ static struct attribute *cxl_region_attrs[] = {
&dev_attr_resource.attr,
&dev_attr_size.attr,
&dev_attr_mode.attr,
+ &dev_attr_extended_linear_cache_size.attr,
NULL,
};
+static umode_t cxl_region_visible(struct kobject *kobj, struct attribute *a,
+ int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct cxl_region *cxlr = to_cxl_region(dev);
+
+ /*
+ * Support tooling that expects to find a 'uuid' attribute for all
+ * regions regardless of mode.
+ */
+ if (a == &dev_attr_uuid.attr && cxlr->mode != CXL_PARTMODE_PMEM)
+ return 0444;
+
+ /*
+ * Don't display extended linear cache attribute if there is no
+ * extended linear cache.
+ */
+ if (a == &dev_attr_extended_linear_cache_size.attr &&
+ cxlr->params.cache_size == 0)
+ return 0;
+
+ return a->mode;
+}
+
static const struct attribute_group cxl_region_group = {
.attrs = cxl_region_attrs,
.is_visible = cxl_region_visible,
@@ -763,26 +810,20 @@ static size_t show_targetN(struct cxl_region *cxlr, char *buf, int pos)
struct cxl_endpoint_decoder *cxled;
int rc;
- rc = down_read_interruptible(&cxl_region_rwsem);
- if (rc)
+ ACQUIRE(rwsem_read_intr, rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &rwsem)))
return rc;
if (pos >= p->interleave_ways) {
dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos,
p->interleave_ways);
- rc = -ENXIO;
- goto out;
+ return -ENXIO;
}
cxled = p->targets[pos];
if (!cxled)
- rc = sysfs_emit(buf, "\n");
- else
- rc = sysfs_emit(buf, "%s\n", dev_name(&cxled->cxld.dev));
-out:
- up_read(&cxl_region_rwsem);
-
- return rc;
+ return sysfs_emit(buf, "\n");
+ return sysfs_emit(buf, "%s\n", dev_name(&cxled->cxld.dev));
}
static int check_commit_order(struct device *dev, void *data)
@@ -831,16 +872,16 @@ static int match_free_decoder(struct device *dev, const void *data)
return 1;
}
-static bool region_res_match_cxl_range(const struct cxl_region_params *p,
- struct range *range)
+static bool spa_maps_hpa(const struct cxl_region_params *p,
+ const struct range *range)
{
if (!p->res)
return false;
/*
- * If an extended linear cache region then the CXL range is assumed
- * to be fronted by the DRAM range in current known implementation.
- * This assumption will be made until a variant implementation exists.
+ * The extended linear cache region is constructed by a 1:1 ratio
+ * where the SPA maps equal amounts of DRAM and CXL HPA capacity with
+ * CXL decoders at the high end of the SPA range.
*/
return p->res->start + p->cache_size == range->start &&
p->res->end == range->end;
@@ -858,7 +899,7 @@ static int match_auto_decoder(struct device *dev, const void *data)
cxld = to_cxl_decoder(dev);
r = &cxld->hpa_range;
- if (region_res_match_cxl_range(p, r))
+ if (spa_maps_hpa(p, r))
return 1;
return 0;
@@ -897,7 +938,7 @@ cxl_port_pick_region_decoder(struct cxl_port *port,
/*
* This decoder is pinned registered as long as the endpoint decoder is
* registered, and endpoint decoder unregistration holds the
- * cxl_region_rwsem over unregister events, so no need to hold on to
+ * cxl_rwsem.region over unregister events, so no need to hold on to
* this extra reference.
*/
put_device(dev);
@@ -1052,6 +1093,16 @@ static int cxl_rr_assign_decoder(struct cxl_port *port, struct cxl_region *cxlr,
return 0;
}
+static void cxl_region_set_lock(struct cxl_region *cxlr,
+ struct cxl_decoder *cxld)
+{
+ if (!test_bit(CXL_DECODER_F_LOCK, &cxld->flags))
+ return;
+
+ set_bit(CXL_REGION_F_LOCK, &cxlr->flags);
+ clear_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags);
+}
+
/**
* cxl_port_attach_region() - track a region's interest in a port by endpoint
* @port: port to add a new region reference 'struct cxl_region_ref'
@@ -1088,7 +1139,7 @@ static int cxl_port_attach_region(struct cxl_port *port,
unsigned long index;
int rc = -EBUSY;
- lockdep_assert_held_write(&cxl_region_rwsem);
+ lockdep_assert_held_write(&cxl_rwsem.region);
cxl_rr = cxl_rr_load(port, cxlr);
if (cxl_rr) {
@@ -1163,6 +1214,8 @@ static int cxl_port_attach_region(struct cxl_port *port,
}
}
+ cxl_region_set_lock(cxlr, cxld);
+
rc = cxl_rr_ep_add(cxl_rr, cxled);
if (rc) {
dev_dbg(&cxlr->dev,
@@ -1198,7 +1251,7 @@ static void cxl_port_detach_region(struct cxl_port *port,
struct cxl_region_ref *cxl_rr;
struct cxl_ep *ep = NULL;
- lockdep_assert_held_write(&cxl_region_rwsem);
+ lockdep_assert_held_write(&cxl_rwsem.region);
cxl_rr = cxl_rr_load(port, cxlr);
if (!cxl_rr)
@@ -1321,7 +1374,7 @@ static int cxl_port_setup_targets(struct cxl_port *port,
struct cxl_endpoint_decoder *cxled)
{
struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
- int parent_iw, parent_ig, ig, iw, rc, inc = 0, pos = cxled->pos;
+ int parent_iw, parent_ig, ig, iw, rc, pos = cxled->pos;
struct cxl_port *parent_port = to_cxl_port(port->dev.parent);
struct cxl_region_ref *cxl_rr = cxl_rr_load(port, cxlr);
struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
@@ -1458,7 +1511,7 @@ static int cxl_port_setup_targets(struct cxl_port *port,
if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) {
if (cxld->interleave_ways != iw ||
(iw > 1 && cxld->interleave_granularity != ig) ||
- !region_res_match_cxl_range(p, &cxld->hpa_range) ||
+ !spa_maps_hpa(p, &cxld->hpa_range) ||
((cxld->flags & CXL_DECODER_F_ENABLE) == 0)) {
dev_err(&cxlr->dev,
"%s:%s %s expected iw: %d ig: %d %pr\n",
@@ -1469,9 +1522,7 @@ static int cxl_port_setup_targets(struct cxl_port *port,
dev_name(port->uport_dev), dev_name(&port->dev),
__func__, cxld->interleave_ways,
cxld->interleave_granularity,
- (cxld->flags & CXL_DECODER_F_ENABLE) ?
- "enabled" :
- "disabled",
+ str_enabled_disabled(cxld->flags & CXL_DECODER_F_ENABLE),
cxld->hpa_range.start, cxld->hpa_range.end);
return -ENXIO;
}
@@ -1511,11 +1562,12 @@ add_target:
cxl_rr->nr_targets_set);
return -ENXIO;
}
- } else
+ } else {
cxlsd->target[cxl_rr->nr_targets_set] = ep->dport;
- inc = 1;
+ cxlsd->cxld.target_map[cxl_rr->nr_targets_set] = ep->dport->port_id;
+ }
+ cxl_rr->nr_targets_set++;
out_target_set:
- cxl_rr->nr_targets_set += inc;
dev_dbg(&cxlr->dev, "%s:%s target[%d] = %s for %s:%s @ %d\n",
dev_name(port->uport_dev), dev_name(&port->dev),
cxl_rr->nr_targets_set - 1, dev_name(ep->dport->dport_dev),
@@ -2094,27 +2146,43 @@ static int cxl_region_attach(struct cxl_region *cxlr,
return 0;
}
-static int cxl_region_detach(struct cxl_endpoint_decoder *cxled)
+static struct cxl_region *
+__cxl_decoder_detach(struct cxl_region *cxlr,
+ struct cxl_endpoint_decoder *cxled, int pos,
+ enum cxl_detach_mode mode)
{
- struct cxl_port *iter, *ep_port = cxled_to_port(cxled);
- struct cxl_region *cxlr = cxled->cxld.region;
struct cxl_region_params *p;
- int rc = 0;
- lockdep_assert_held_write(&cxl_region_rwsem);
+ lockdep_assert_held_write(&cxl_rwsem.region);
- if (!cxlr)
- return 0;
+ if (!cxled) {
+ p = &cxlr->params;
- p = &cxlr->params;
- get_device(&cxlr->dev);
+ if (pos >= p->interleave_ways) {
+ dev_dbg(&cxlr->dev, "position %d out of range %d\n",
+ pos, p->interleave_ways);
+ return NULL;
+ }
+
+ if (!p->targets[pos])
+ return NULL;
+ cxled = p->targets[pos];
+ } else {
+ cxlr = cxled->cxld.region;
+ if (!cxlr)
+ return NULL;
+ p = &cxlr->params;
+ }
+
+ if (mode == DETACH_INVALIDATE)
+ cxled->part = -1;
if (p->state > CXL_CONFIG_ACTIVE) {
cxl_region_decode_reset(cxlr, p->interleave_ways);
p->state = CXL_CONFIG_ACTIVE;
}
- for (iter = ep_port; !is_cxl_root(iter);
+ for (struct cxl_port *iter = cxled_to_port(cxled); !is_cxl_root(iter);
iter = to_cxl_port(iter->dev.parent))
cxl_port_detach_region(iter, cxlr, cxled);
@@ -2125,7 +2193,7 @@ static int cxl_region_detach(struct cxl_endpoint_decoder *cxled)
dev_WARN_ONCE(&cxlr->dev, 1, "expected %s:%s at position %d\n",
dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
cxled->pos);
- goto out;
+ return NULL;
}
if (p->state == CXL_CONFIG_ACTIVE) {
@@ -2139,74 +2207,79 @@ static int cxl_region_detach(struct cxl_endpoint_decoder *cxled)
.end = -1,
};
- /* notify the region driver that one of its targets has departed */
- up_write(&cxl_region_rwsem);
- device_release_driver(&cxlr->dev);
- down_write(&cxl_region_rwsem);
-out:
- put_device(&cxlr->dev);
- return rc;
+ get_device(&cxlr->dev);
+ return cxlr;
+}
+
+/*
+ * Cleanup a decoder's interest in a region. There are 2 cases to
+ * handle, removing an unknown @cxled from a known position in a region
+ * (detach_target()) or removing a known @cxled from an unknown @cxlr
+ * (cxld_unregister())
+ *
+ * When the detachment finds a region release the region driver.
+ */
+int cxl_decoder_detach(struct cxl_region *cxlr,
+ struct cxl_endpoint_decoder *cxled, int pos,
+ enum cxl_detach_mode mode)
+{
+ struct cxl_region *detach;
+
+ /* when the decoder is being destroyed lock unconditionally */
+ if (mode == DETACH_INVALIDATE) {
+ guard(rwsem_write)(&cxl_rwsem.region);
+ detach = __cxl_decoder_detach(cxlr, cxled, pos, mode);
+ } else {
+ int rc;
+
+ ACQUIRE(rwsem_write_kill, rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_write_kill, &rwsem)))
+ return rc;
+ detach = __cxl_decoder_detach(cxlr, cxled, pos, mode);
+ }
+
+ if (detach) {
+ device_release_driver(&detach->dev);
+ put_device(&detach->dev);
+ }
+ return 0;
}
-void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled)
+static int __attach_target(struct cxl_region *cxlr,
+ struct cxl_endpoint_decoder *cxled, int pos,
+ unsigned int state)
{
- down_write(&cxl_region_rwsem);
- cxled->part = -1;
- cxl_region_detach(cxled);
- up_write(&cxl_region_rwsem);
+ int rc;
+
+ if (state == TASK_INTERRUPTIBLE) {
+ ACQUIRE(rwsem_write_kill, rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_write_kill, &rwsem)))
+ return rc;
+ guard(rwsem_read)(&cxl_rwsem.dpa);
+ return cxl_region_attach(cxlr, cxled, pos);
+ }
+ guard(rwsem_write)(&cxl_rwsem.region);
+ guard(rwsem_read)(&cxl_rwsem.dpa);
+ return cxl_region_attach(cxlr, cxled, pos);
}
static int attach_target(struct cxl_region *cxlr,
struct cxl_endpoint_decoder *cxled, int pos,
unsigned int state)
{
- int rc = 0;
+ int rc = __attach_target(cxlr, cxled, pos, state);
- if (state == TASK_INTERRUPTIBLE)
- rc = down_write_killable(&cxl_region_rwsem);
- else
- down_write(&cxl_region_rwsem);
- if (rc)
- return rc;
-
- down_read(&cxl_dpa_rwsem);
- rc = cxl_region_attach(cxlr, cxled, pos);
- up_read(&cxl_dpa_rwsem);
- up_write(&cxl_region_rwsem);
-
- if (rc)
- dev_warn(cxled->cxld.dev.parent,
- "failed to attach %s to %s: %d\n",
- dev_name(&cxled->cxld.dev), dev_name(&cxlr->dev), rc);
+ if (rc == 0)
+ return 0;
+ dev_warn(cxled->cxld.dev.parent, "failed to attach %s to %s: %d\n",
+ dev_name(&cxled->cxld.dev), dev_name(&cxlr->dev), rc);
return rc;
}
static int detach_target(struct cxl_region *cxlr, int pos)
{
- struct cxl_region_params *p = &cxlr->params;
- int rc;
-
- rc = down_write_killable(&cxl_region_rwsem);
- if (rc)
- return rc;
-
- if (pos >= p->interleave_ways) {
- dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos,
- p->interleave_ways);
- rc = -ENXIO;
- goto out;
- }
-
- if (!p->targets[pos]) {
- rc = 0;
- goto out;
- }
-
- rc = cxl_region_detach(p->targets[pos]);
-out:
- up_write(&cxl_region_rwsem);
- return rc;
+ return cxl_decoder_detach(cxlr, NULL, pos, DETACH_ONLY);
}
static size_t store_targetN(struct cxl_region *cxlr, const char *buf, int pos,
@@ -2411,6 +2484,7 @@ static struct cxl_region *cxl_region_alloc(struct cxl_root_decoder *cxlrd, int i
dev->bus = &cxl_bus_type;
dev->type = &cxl_region_type;
cxlr->id = id;
+ cxl_region_set_lock(cxlr, &cxlrd->cxlsd.cxld);
return cxlr;
}
@@ -2422,14 +2496,8 @@ static bool cxl_region_update_coordinates(struct cxl_region *cxlr, int nid)
for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
if (cxlr->coord[i].read_bandwidth) {
- rc = 0;
- if (cxl_need_node_perf_attrs_update(nid))
- node_set_perf_attrs(nid, &cxlr->coord[i], i);
- else
- rc = cxl_update_hmat_access_coordinates(nid, cxlr, i);
-
- if (rc == 0)
- cset++;
+ node_update_perf_attrs(nid, &cxlr->coord[i], i);
+ cset++;
}
}
@@ -2451,22 +2519,26 @@ static int cxl_region_perf_attrs_callback(struct notifier_block *nb,
unsigned long action, void *arg)
{
struct cxl_region *cxlr = container_of(nb, struct cxl_region,
- memory_notifier);
- struct memory_notify *mnb = arg;
- int nid = mnb->status_change_nid;
+ node_notifier);
+ struct node_notify *nn = arg;
+ int nid = nn->nid;
int region_nid;
- if (nid == NUMA_NO_NODE || action != MEM_ONLINE)
+ if (action != NODE_ADDED_FIRST_MEMORY)
return NOTIFY_DONE;
/*
- * No need to hold cxl_region_rwsem; region parameters are stable
+ * No need to hold cxl_rwsem.region; region parameters are stable
* within the cxl_region driver.
*/
region_nid = phys_to_target_node(cxlr->params.res->start);
if (nid != region_nid)
return NOTIFY_DONE;
+ /* No action needed if node bit already set */
+ if (node_test_and_set(nid, nodemask_region_seen))
+ return NOTIFY_DONE;
+
if (!cxl_region_update_coordinates(cxlr, nid))
return NOTIFY_DONE;
@@ -2483,7 +2555,7 @@ static int cxl_region_calculate_adistance(struct notifier_block *nb,
int region_nid;
/*
- * No need to hold cxl_region_rwsem; region parameters are stable
+ * No need to hold cxl_rwsem.region; region parameters are stable
* within the cxl_region driver.
*/
region_nid = phys_to_target_node(cxlr->params.res->start);
@@ -2632,17 +2704,13 @@ static ssize_t region_show(struct device *dev, struct device_attribute *attr,
struct cxl_decoder *cxld = to_cxl_decoder(dev);
ssize_t rc;
- rc = down_read_interruptible(&cxl_region_rwsem);
- if (rc)
+ ACQUIRE(rwsem_read_intr, rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &rwsem)))
return rc;
if (cxld->region)
- rc = sysfs_emit(buf, "%s\n", dev_name(&cxld->region->dev));
- else
- rc = sysfs_emit(buf, "\n");
- up_read(&cxl_region_rwsem);
-
- return rc;
+ return sysfs_emit(buf, "%s\n", dev_name(&cxld->region->dev));
+ return sysfs_emit(buf, "\n");
}
DEVICE_ATTR_RO(region);
@@ -2847,7 +2915,7 @@ static int __cxl_dpa_to_region(struct device *dev, void *arg)
if (!cxled || !cxled->dpa_res || !resource_size(cxled->dpa_res))
return 0;
- if (dpa > cxled->dpa_res->end || dpa < cxled->dpa_res->start)
+ if (!cxl_resource_contains_addr(cxled->dpa_res, dpa))
return 0;
/*
@@ -2902,28 +2970,119 @@ static bool cxl_is_hpa_in_chunk(u64 hpa, struct cxl_region *cxlr, int pos)
return false;
}
-u64 cxl_dpa_to_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd,
- u64 dpa)
+#define CXL_POS_ZERO 0
+/**
+ * cxl_validate_translation_params
+ * @eiw: encoded interleave ways
+ * @eig: encoded interleave granularity
+ * @pos: position in interleave
+ *
+ * Callers pass CXL_POS_ZERO when no position parameter needs validating.
+ *
+ * Returns: 0 on success, -EINVAL on first invalid parameter
+ */
+int cxl_validate_translation_params(u8 eiw, u16 eig, int pos)
{
- struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
- u64 dpa_offset, hpa_offset, bits_upper, mask_upper, hpa;
- struct cxl_region_params *p = &cxlr->params;
- struct cxl_endpoint_decoder *cxled = NULL;
- u16 eig = 0;
- u8 eiw = 0;
- int pos;
+ int ways, gran;
- for (int i = 0; i < p->nr_targets; i++) {
- cxled = p->targets[i];
- if (cxlmd == cxled_to_memdev(cxled))
- break;
+ if (eiw_to_ways(eiw, &ways)) {
+ pr_debug("%s: invalid eiw=%u\n", __func__, eiw);
+ return -EINVAL;
+ }
+ if (eig_to_granularity(eig, &gran)) {
+ pr_debug("%s: invalid eig=%u\n", __func__, eig);
+ return -EINVAL;
+ }
+ if (pos < 0 || pos >= ways) {
+ pr_debug("%s: invalid pos=%d for ways=%u\n", __func__, pos,
+ ways);
+ return -EINVAL;
}
- if (!cxled || cxlmd != cxled_to_memdev(cxled))
+
+ return 0;
+}
+EXPORT_SYMBOL_FOR_MODULES(cxl_validate_translation_params, "cxl_translate");
+
+u64 cxl_calculate_dpa_offset(u64 hpa_offset, u8 eiw, u16 eig)
+{
+ u64 dpa_offset, bits_lower, bits_upper, temp;
+ int ret;
+
+ ret = cxl_validate_translation_params(eiw, eig, CXL_POS_ZERO);
+ if (ret)
return ULLONG_MAX;
- pos = cxled->pos;
- ways_to_eiw(p->interleave_ways, &eiw);
- granularity_to_eig(p->interleave_granularity, &eig);
+ /*
+ * DPA offset: CXL Spec 3.2 Section 8.2.4.20.13
+ * Lower bits [IG+7:0] pass through unchanged
+ * (eiw < 8)
+ * Per spec: DPAOffset[51:IG+8] = (HPAOffset[51:IG+IW+8] >> IW)
+ * Clear the position bits to isolate upper section, then
+ * reverse the left shift by eiw that occurred during DPA->HPA
+ * (eiw >= 8)
+ * Per spec: DPAOffset[51:IG+8] = HPAOffset[51:IG+IW] / 3
+ * Extract upper bits from the correct bit range and divide by 3
+ * to recover the original DPA upper bits
+ */
+ bits_lower = hpa_offset & GENMASK_ULL(eig + 7, 0);
+ if (eiw < 8) {
+ temp = hpa_offset &= ~GENMASK_ULL(eig + eiw + 8 - 1, 0);
+ dpa_offset = temp >> eiw;
+ } else {
+ bits_upper = div64_u64(hpa_offset >> (eig + eiw), 3);
+ dpa_offset = bits_upper << (eig + 8);
+ }
+ dpa_offset |= bits_lower;
+
+ return dpa_offset;
+}
+EXPORT_SYMBOL_FOR_MODULES(cxl_calculate_dpa_offset, "cxl_translate");
+
+int cxl_calculate_position(u64 hpa_offset, u8 eiw, u16 eig)
+{
+ unsigned int ways = 0;
+ u64 shifted, rem;
+ int pos, ret;
+
+ ret = cxl_validate_translation_params(eiw, eig, CXL_POS_ZERO);
+ if (ret)
+ return ret;
+
+ if (!eiw)
+ /* position is 0 if no interleaving */
+ return 0;
+
+ /*
+ * Interleave position: CXL Spec 3.2 Section 8.2.4.20.13
+ * eiw < 8
+ * Position is in the IW bits at HPA_OFFSET[IG+8+IW-1:IG+8].
+ * Per spec "remove IW bits starting with bit position IG+8"
+ * eiw >= 8
+ * Position is not explicitly stored in HPA_OFFSET bits. It is
+ * derived from the modulo operation of the upper bits using
+ * the total number of interleave ways.
+ */
+ if (eiw < 8) {
+ pos = (hpa_offset >> (eig + 8)) & GENMASK(eiw - 1, 0);
+ } else {
+ shifted = hpa_offset >> (eig + 8);
+ eiw_to_ways(eiw, &ways);
+ div64_u64_rem(shifted, ways, &rem);
+ pos = rem;
+ }
+
+ return pos;
+}
+EXPORT_SYMBOL_FOR_MODULES(cxl_calculate_position, "cxl_translate");
+
+u64 cxl_calculate_hpa_offset(u64 dpa_offset, int pos, u8 eiw, u16 eig)
+{
+ u64 mask_upper, hpa_offset, bits_upper;
+ int ret;
+
+ ret = cxl_validate_translation_params(eiw, eig, pos);
+ if (ret)
+ return ULLONG_MAX;
/*
* The device position in the region interleave set was removed
@@ -2935,9 +3094,6 @@ u64 cxl_dpa_to_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd,
* 8.2.4.19.13 Implementation Note: Device Decode Logic
*/
- /* Remove the dpa base */
- dpa_offset = dpa - cxl_dpa_resource_start(cxled);
-
mask_upper = GENMASK_ULL(51, eig + 8);
if (eiw < 8) {
@@ -2952,26 +3108,115 @@ u64 cxl_dpa_to_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd,
/* The lower bits remain unchanged */
hpa_offset |= dpa_offset & GENMASK_ULL(eig + 7, 0);
+ return hpa_offset;
+}
+EXPORT_SYMBOL_FOR_MODULES(cxl_calculate_hpa_offset, "cxl_translate");
+
+u64 cxl_dpa_to_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd,
+ u64 dpa)
+{
+ struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
+ struct cxl_region_params *p = &cxlr->params;
+ struct cxl_endpoint_decoder *cxled = NULL;
+ u64 dpa_offset, hpa_offset, hpa;
+ u16 eig = 0;
+ u8 eiw = 0;
+ int pos;
+
+ for (int i = 0; i < p->nr_targets; i++) {
+ if (cxlmd == cxled_to_memdev(p->targets[i])) {
+ cxled = p->targets[i];
+ break;
+ }
+ }
+ if (!cxled)
+ return ULLONG_MAX;
+
+ pos = cxled->pos;
+ ways_to_eiw(p->interleave_ways, &eiw);
+ granularity_to_eig(p->interleave_granularity, &eig);
+
+ dpa_offset = dpa - cxl_dpa_resource_start(cxled);
+ hpa_offset = cxl_calculate_hpa_offset(dpa_offset, pos, eiw, eig);
+
/* Apply the hpa_offset to the region base address */
hpa = hpa_offset + p->res->start + p->cache_size;
/* Root decoder translation overrides typical modulo decode */
- if (cxlrd->hpa_to_spa)
- hpa = cxlrd->hpa_to_spa(cxlrd, hpa);
+ if (cxlrd->ops.hpa_to_spa)
+ hpa = cxlrd->ops.hpa_to_spa(cxlrd, hpa);
- if (hpa < p->res->start || hpa > p->res->end) {
+ if (!cxl_resource_contains_addr(p->res, hpa)) {
dev_dbg(&cxlr->dev,
"Addr trans fail: hpa 0x%llx not in region\n", hpa);
return ULLONG_MAX;
}
/* Simple chunk check, by pos & gran, only applies to modulo decodes */
- if (!cxlrd->hpa_to_spa && (!cxl_is_hpa_in_chunk(hpa, cxlr, pos)))
+ if (!cxlrd->ops.hpa_to_spa && !cxl_is_hpa_in_chunk(hpa, cxlr, pos))
return ULLONG_MAX;
return hpa;
}
+struct dpa_result {
+ struct cxl_memdev *cxlmd;
+ u64 dpa;
+};
+
+static int region_offset_to_dpa_result(struct cxl_region *cxlr, u64 offset,
+ struct dpa_result *result)
+{
+ struct cxl_region_params *p = &cxlr->params;
+ struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
+ struct cxl_endpoint_decoder *cxled;
+ u64 hpa, hpa_offset, dpa_offset;
+ u16 eig = 0;
+ u8 eiw = 0;
+ int pos;
+
+ lockdep_assert_held(&cxl_rwsem.region);
+ lockdep_assert_held(&cxl_rwsem.dpa);
+
+ /* Input validation ensures valid ways and gran */
+ granularity_to_eig(p->interleave_granularity, &eig);
+ ways_to_eiw(p->interleave_ways, &eiw);
+
+ /*
+ * If the root decoder has SPA to CXL HPA callback, use it. Otherwise
+ * CXL HPA is assumed to equal SPA.
+ */
+ if (cxlrd->ops.spa_to_hpa) {
+ hpa = cxlrd->ops.spa_to_hpa(cxlrd, p->res->start + offset);
+ hpa_offset = hpa - p->res->start;
+ } else {
+ hpa_offset = offset;
+ }
+
+ pos = cxl_calculate_position(hpa_offset, eiw, eig);
+ if (pos < 0 || pos >= p->nr_targets) {
+ dev_dbg(&cxlr->dev, "Invalid position %d for %d targets\n",
+ pos, p->nr_targets);
+ return -ENXIO;
+ }
+
+ dpa_offset = cxl_calculate_dpa_offset(hpa_offset, eiw, eig);
+
+ /* Look-up and return the result: a memdev and a DPA */
+ for (int i = 0; i < p->nr_targets; i++) {
+ cxled = p->targets[i];
+ if (cxled->pos != pos)
+ continue;
+ result->cxlmd = cxled_to_memdev(cxled);
+ result->dpa = cxl_dpa_resource_start(cxled) + dpa_offset;
+
+ return 0;
+ }
+ dev_err(&cxlr->dev, "No device found for position %d\n", pos);
+
+ return -ENXIO;
+}
+
static struct lock_class_key cxl_pmem_region_key;
static int cxl_pmem_region_alloc(struct cxl_region *cxlr)
@@ -2981,7 +3226,7 @@ static int cxl_pmem_region_alloc(struct cxl_region *cxlr)
struct device *dev;
int i;
- guard(rwsem_read)(&cxl_region_rwsem);
+ guard(rwsem_read)(&cxl_rwsem.region);
if (p->state != CXL_CONFIG_COMMIT)
return -ENXIO;
@@ -2993,7 +3238,7 @@ static int cxl_pmem_region_alloc(struct cxl_region *cxlr)
cxlr_pmem->hpa_range.start = p->res->start;
cxlr_pmem->hpa_range.end = p->res->end;
- /* Snapshot the region configuration underneath the cxl_region_rwsem */
+ /* Snapshot the region configuration underneath the cxl_rwsem.region */
cxlr_pmem->nr_mappings = p->nr_targets;
for (i = 0; i < p->nr_targets; i++) {
struct cxl_endpoint_decoder *cxled = p->targets[i];
@@ -3070,7 +3315,7 @@ static struct cxl_dax_region *cxl_dax_region_alloc(struct cxl_region *cxlr)
struct cxl_dax_region *cxlr_dax;
struct device *dev;
- guard(rwsem_read)(&cxl_region_rwsem);
+ guard(rwsem_read)(&cxl_rwsem.region);
if (p->state != CXL_CONFIG_COMMIT)
return ERR_PTR(-ENXIO);
@@ -3270,11 +3515,8 @@ static int match_region_by_range(struct device *dev, const void *data)
cxlr = to_cxl_region(dev);
p = &cxlr->params;
- guard(rwsem_read)(&cxl_region_rwsem);
- if (p->res && p->res->start == r->start && p->res->end == r->end)
- return 1;
-
- return 0;
+ guard(rwsem_read)(&cxl_rwsem.region);
+ return spa_maps_hpa(p, r);
}
static int cxl_extended_linear_cache_resize(struct cxl_region *cxlr,
@@ -3282,15 +3524,10 @@ static int cxl_extended_linear_cache_resize(struct cxl_region *cxlr,
{
struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
struct cxl_region_params *p = &cxlr->params;
- int nid = phys_to_target_node(res->start);
resource_size_t size = resource_size(res);
resource_size_t cache_size, start;
- int rc;
-
- rc = cxl_acpi_get_extended_linear_cache_size(res, nid, &cache_size);
- if (rc)
- return rc;
+ cache_size = cxlrd->cache_size;
if (!cache_size)
return 0;
@@ -3330,7 +3567,7 @@ static int __construct_region(struct cxl_region *cxlr,
struct resource *res;
int rc;
- guard(rwsem_write)(&cxl_region_rwsem);
+ guard(rwsem_write)(&cxl_rwsem.region);
p = &cxlr->params;
if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) {
dev_err(cxlmd->dev.parent,
@@ -3360,6 +3597,10 @@ static int __construct_region(struct cxl_region *cxlr,
"Extended linear cache calculation failed rc:%d\n", rc);
}
+ rc = sysfs_update_group(&cxlr->dev.kobj, &cxl_region_group);
+ if (rc)
+ return rc;
+
rc = insert_resource(cxlrd->res, res);
if (rc) {
/*
@@ -3466,10 +3707,10 @@ int cxl_add_to_region(struct cxl_endpoint_decoder *cxled)
attach_target(cxlr, cxled, -1, TASK_UNINTERRUPTIBLE);
- down_read(&cxl_region_rwsem);
- p = &cxlr->params;
- attach = p->state == CXL_CONFIG_COMMIT;
- up_read(&cxl_region_rwsem);
+ scoped_guard(rwsem_read, &cxl_rwsem.region) {
+ p = &cxlr->params;
+ attach = p->state == CXL_CONFIG_COMMIT;
+ }
if (attach) {
/*
@@ -3494,12 +3735,12 @@ u64 cxl_port_get_spa_cache_alias(struct cxl_port *endpoint, u64 spa)
if (!endpoint)
return ~0ULL;
- guard(rwsem_write)(&cxl_region_rwsem);
+ guard(rwsem_write)(&cxl_rwsem.region);
xa_for_each(&endpoint->regions, index, iter) {
struct cxl_region_params *p = &iter->region->params;
- if (p->res->start <= spa && spa <= p->res->end) {
+ if (cxl_resource_contains_addr(p->res, spa)) {
if (!p->cache_size)
return ~0ULL;
@@ -3527,48 +3768,155 @@ static void shutdown_notifiers(void *_cxlr)
{
struct cxl_region *cxlr = _cxlr;
- unregister_memory_notifier(&cxlr->memory_notifier);
+ unregister_node_notifier(&cxlr->node_notifier);
unregister_mt_adistance_algorithm(&cxlr->adist_notifier);
}
-static int cxl_region_probe(struct device *dev)
+static void remove_debugfs(void *dentry)
+{
+ debugfs_remove_recursive(dentry);
+}
+
+static int validate_region_offset(struct cxl_region *cxlr, u64 offset)
{
- struct cxl_region *cxlr = to_cxl_region(dev);
struct cxl_region_params *p = &cxlr->params;
+ resource_size_t region_size;
+ u64 hpa;
+
+ if (offset < p->cache_size) {
+ dev_err(&cxlr->dev,
+ "Offset %#llx is within extended linear cache %pa\n",
+ offset, &p->cache_size);
+ return -EINVAL;
+ }
+
+ region_size = resource_size(p->res);
+ if (offset >= region_size) {
+ dev_err(&cxlr->dev, "Offset %#llx exceeds region size %pa\n",
+ offset, &region_size);
+ return -EINVAL;
+ }
+
+ hpa = p->res->start + offset;
+ if (hpa < p->res->start || hpa > p->res->end) {
+ dev_err(&cxlr->dev, "HPA %#llx not in region %pr\n", hpa,
+ p->res);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cxl_region_debugfs_poison_inject(void *data, u64 offset)
+{
+ struct dpa_result result = { .dpa = ULLONG_MAX, .cxlmd = NULL };
+ struct cxl_region *cxlr = data;
int rc;
- rc = down_read_interruptible(&cxl_region_rwsem);
- if (rc) {
+ ACQUIRE(rwsem_read_intr, region_rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &region_rwsem)))
+ return rc;
+
+ ACQUIRE(rwsem_read_intr, dpa_rwsem)(&cxl_rwsem.dpa);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &dpa_rwsem)))
+ return rc;
+
+ if (validate_region_offset(cxlr, offset))
+ return -EINVAL;
+
+ offset -= cxlr->params.cache_size;
+ rc = region_offset_to_dpa_result(cxlr, offset, &result);
+ if (rc || !result.cxlmd || result.dpa == ULLONG_MAX) {
+ dev_dbg(&cxlr->dev,
+ "Failed to resolve DPA for region offset %#llx rc %d\n",
+ offset, rc);
+
+ return rc ? rc : -EINVAL;
+ }
+
+ return cxl_inject_poison_locked(result.cxlmd, result.dpa);
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(cxl_poison_inject_fops, NULL,
+ cxl_region_debugfs_poison_inject, "%llx\n");
+
+static int cxl_region_debugfs_poison_clear(void *data, u64 offset)
+{
+ struct dpa_result result = { .dpa = ULLONG_MAX, .cxlmd = NULL };
+ struct cxl_region *cxlr = data;
+ int rc;
+
+ ACQUIRE(rwsem_read_intr, region_rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &region_rwsem)))
+ return rc;
+
+ ACQUIRE(rwsem_read_intr, dpa_rwsem)(&cxl_rwsem.dpa);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &dpa_rwsem)))
+ return rc;
+
+ if (validate_region_offset(cxlr, offset))
+ return -EINVAL;
+
+ offset -= cxlr->params.cache_size;
+ rc = region_offset_to_dpa_result(cxlr, offset, &result);
+ if (rc || !result.cxlmd || result.dpa == ULLONG_MAX) {
+ dev_dbg(&cxlr->dev,
+ "Failed to resolve DPA for region offset %#llx rc %d\n",
+ offset, rc);
+
+ return rc ? rc : -EINVAL;
+ }
+
+ return cxl_clear_poison_locked(result.cxlmd, result.dpa);
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(cxl_poison_clear_fops, NULL,
+ cxl_region_debugfs_poison_clear, "%llx\n");
+
+static int cxl_region_can_probe(struct cxl_region *cxlr)
+{
+ struct cxl_region_params *p = &cxlr->params;
+ int rc;
+
+ ACQUIRE(rwsem_read_intr, rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &rwsem))) {
dev_dbg(&cxlr->dev, "probe interrupted\n");
return rc;
}
if (p->state < CXL_CONFIG_COMMIT) {
dev_dbg(&cxlr->dev, "config state: %d\n", p->state);
- rc = -ENXIO;
- goto out;
+ return -ENXIO;
}
if (test_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags)) {
dev_err(&cxlr->dev,
"failed to activate, re-commit region and retry\n");
- rc = -ENXIO;
- goto out;
+ return -ENXIO;
}
+ return 0;
+}
+
+static int cxl_region_probe(struct device *dev)
+{
+ struct cxl_region *cxlr = to_cxl_region(dev);
+ struct cxl_region_params *p = &cxlr->params;
+ bool poison_supported = true;
+ int rc;
+
+ rc = cxl_region_can_probe(cxlr);
+ if (rc)
+ return rc;
+
/*
* From this point on any path that changes the region's state away from
* CXL_CONFIG_COMMIT is also responsible for releasing the driver.
*/
-out:
- up_read(&cxl_region_rwsem);
- if (rc)
- return rc;
-
- cxlr->memory_notifier.notifier_call = cxl_region_perf_attrs_callback;
- cxlr->memory_notifier.priority = CXL_CALLBACK_PRI;
- register_memory_notifier(&cxlr->memory_notifier);
+ cxlr->node_notifier.notifier_call = cxl_region_perf_attrs_callback;
+ cxlr->node_notifier.priority = CXL_CALLBACK_PRI;
+ register_node_notifier(&cxlr->node_notifier);
cxlr->adist_notifier.notifier_call = cxl_region_calculate_adistance;
cxlr->adist_notifier.priority = 100;
@@ -3578,6 +3926,31 @@ out:
if (rc)
return rc;
+ /* Create poison attributes if all memdevs support the capabilities */
+ for (int i = 0; i < p->nr_targets; i++) {
+ struct cxl_endpoint_decoder *cxled = p->targets[i];
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+
+ if (!cxl_memdev_has_poison_cmd(cxlmd, CXL_POISON_ENABLED_INJECT) ||
+ !cxl_memdev_has_poison_cmd(cxlmd, CXL_POISON_ENABLED_CLEAR)) {
+ poison_supported = false;
+ break;
+ }
+ }
+
+ if (poison_supported) {
+ struct dentry *dentry;
+
+ dentry = cxl_debugfs_create_dir(dev_name(dev));
+ debugfs_create_file("inject_poison", 0200, dentry, cxlr,
+ &cxl_poison_inject_fops);
+ debugfs_create_file("clear_poison", 0200, dentry, cxlr,
+ &cxl_poison_clear_fops);
+ rc = devm_add_action_or_reset(dev, remove_debugfs, dentry);
+ if (rc)
+ return rc;
+ }
+
switch (cxlr->mode) {
case CXL_PARTMODE_PMEM:
rc = devm_cxl_region_edac_register(cxlr);
diff --git a/drivers/cxl/core/trace.h b/drivers/cxl/core/trace.h
index 25ebfbc1616c..a972e4ef1936 100644
--- a/drivers/cxl/core/trace.h
+++ b/drivers/cxl/core/trace.h
@@ -214,12 +214,16 @@ TRACE_EVENT(cxl_overflow,
#define CXL_EVENT_RECORD_FLAG_PERF_DEGRADED BIT(4)
#define CXL_EVENT_RECORD_FLAG_HW_REPLACE BIT(5)
#define CXL_EVENT_RECORD_FLAG_MAINT_OP_SUB_CLASS_VALID BIT(6)
+#define CXL_EVENT_RECORD_FLAG_LD_ID_VALID BIT(7)
+#define CXL_EVENT_RECORD_FLAG_HEAD_ID_VALID BIT(8)
#define show_hdr_flags(flags) __print_flags(flags, " | ", \
{ CXL_EVENT_RECORD_FLAG_PERMANENT, "PERMANENT_CONDITION" }, \
{ CXL_EVENT_RECORD_FLAG_MAINT_NEEDED, "MAINTENANCE_NEEDED" }, \
{ CXL_EVENT_RECORD_FLAG_PERF_DEGRADED, "PERFORMANCE_DEGRADED" }, \
{ CXL_EVENT_RECORD_FLAG_HW_REPLACE, "HARDWARE_REPLACEMENT_NEEDED" }, \
- { CXL_EVENT_RECORD_FLAG_MAINT_OP_SUB_CLASS_VALID, "MAINT_OP_SUB_CLASS_VALID" } \
+ { CXL_EVENT_RECORD_FLAG_MAINT_OP_SUB_CLASS_VALID, "MAINT_OP_SUB_CLASS_VALID" }, \
+ { CXL_EVENT_RECORD_FLAG_LD_ID_VALID, "LD_ID_VALID" }, \
+ { CXL_EVENT_RECORD_FLAG_HEAD_ID_VALID, "HEAD_ID_VALID" } \
)
/*
@@ -247,7 +251,9 @@ TRACE_EVENT(cxl_overflow,
__field(u64, hdr_timestamp) \
__field(u8, hdr_length) \
__field(u8, hdr_maint_op_class) \
- __field(u8, hdr_maint_op_sub_class)
+ __field(u8, hdr_maint_op_sub_class) \
+ __field(u16, hdr_ld_id) \
+ __field(u8, hdr_head_id)
#define CXL_EVT_TP_fast_assign(cxlmd, l, hdr) \
__assign_str(memdev); \
@@ -260,18 +266,22 @@ TRACE_EVENT(cxl_overflow,
__entry->hdr_related_handle = le16_to_cpu((hdr).related_handle); \
__entry->hdr_timestamp = le64_to_cpu((hdr).timestamp); \
__entry->hdr_maint_op_class = (hdr).maint_op_class; \
- __entry->hdr_maint_op_sub_class = (hdr).maint_op_sub_class
+ __entry->hdr_maint_op_sub_class = (hdr).maint_op_sub_class; \
+ __entry->hdr_ld_id = le16_to_cpu((hdr).ld_id); \
+ __entry->hdr_head_id = (hdr).head_id
#define CXL_EVT_TP_printk(fmt, ...) \
TP_printk("memdev=%s host=%s serial=%lld log=%s : time=%llu uuid=%pUb " \
"len=%d flags='%s' handle=%x related_handle=%x " \
- "maint_op_class=%u maint_op_sub_class=%u : " fmt, \
+ "maint_op_class=%u maint_op_sub_class=%u " \
+ "ld_id=%x head_id=%x : " fmt, \
__get_str(memdev), __get_str(host), __entry->serial, \
cxl_event_log_type_str(__entry->log), \
__entry->hdr_timestamp, &__entry->hdr_uuid, __entry->hdr_length,\
show_hdr_flags(__entry->hdr_flags), __entry->hdr_handle, \
__entry->hdr_related_handle, __entry->hdr_maint_op_class, \
__entry->hdr_maint_op_sub_class, \
+ __entry->hdr_ld_id, __entry->hdr_head_id, \
##__VA_ARGS__)
TRACE_EVENT(cxl_generic_event,
@@ -496,7 +506,10 @@ TRACE_EVENT(cxl_general_media,
uuid_copy(&__entry->region_uuid, &uuid_null);
}
__entry->cme_threshold_ev_flags = rec->cme_threshold_ev_flags;
- __entry->cme_count = get_unaligned_le24(rec->cme_count);
+ if (rec->media_hdr.descriptor & CXL_GMER_EVT_DESC_THRESHOLD_EVENT)
+ __entry->cme_count = get_unaligned_le24(rec->cme_count);
+ else
+ __entry->cme_count = 0;
),
CXL_EVT_TP_printk("dpa=%llx dpa_flags='%s' " \
@@ -648,7 +661,10 @@ TRACE_EVENT(cxl_dram,
CXL_EVENT_GEN_MED_COMP_ID_SIZE);
__entry->sub_channel = rec->sub_channel;
__entry->cme_threshold_ev_flags = rec->cme_threshold_ev_flags;
- __entry->cvme_count = get_unaligned_le24(rec->cvme_count);
+ if (rec->media_hdr.descriptor & CXL_GMER_EVT_DESC_THRESHOLD_EVENT)
+ __entry->cvme_count = get_unaligned_le24(rec->cvme_count);
+ else
+ __entry->cvme_count = 0;
),
CXL_EVT_TP_printk("dpa=%llx dpa_flags='%s' descriptor='%s' type='%s' sub_type='%s' " \
@@ -871,6 +887,111 @@ TRACE_EVENT(cxl_memory_module,
)
);
+/*
+ * Memory Sparing Event Record - MSER
+ *
+ * CXL rev 3.2 section 8.2.10.2.1.4; Table 8-60
+ */
+#define CXL_MSER_QUERY_RESOURCE_FLAG BIT(0)
+#define CXL_MSER_HARD_SPARING_FLAG BIT(1)
+#define CXL_MSER_DEV_INITED_FLAG BIT(2)
+#define show_mem_sparing_flags(flags) __print_flags(flags, "|", \
+ { CXL_MSER_QUERY_RESOURCE_FLAG, "Query Resources" }, \
+ { CXL_MSER_HARD_SPARING_FLAG, "Hard Sparing" }, \
+ { CXL_MSER_DEV_INITED_FLAG, "Device Initiated Sparing" } \
+)
+
+#define CXL_MSER_VALID_CHANNEL BIT(0)
+#define CXL_MSER_VALID_RANK BIT(1)
+#define CXL_MSER_VALID_NIBBLE BIT(2)
+#define CXL_MSER_VALID_BANK_GROUP BIT(3)
+#define CXL_MSER_VALID_BANK BIT(4)
+#define CXL_MSER_VALID_ROW BIT(5)
+#define CXL_MSER_VALID_COLUMN BIT(6)
+#define CXL_MSER_VALID_COMPONENT_ID BIT(7)
+#define CXL_MSER_VALID_COMPONENT_ID_FORMAT BIT(8)
+#define CXL_MSER_VALID_SUB_CHANNEL BIT(9)
+#define show_mem_sparing_valid_flags(flags) __print_flags(flags, "|", \
+ { CXL_MSER_VALID_CHANNEL, "CHANNEL" }, \
+ { CXL_MSER_VALID_RANK, "RANK" }, \
+ { CXL_MSER_VALID_NIBBLE, "NIBBLE" }, \
+ { CXL_MSER_VALID_BANK_GROUP, "BANK GROUP" }, \
+ { CXL_MSER_VALID_BANK, "BANK" }, \
+ { CXL_MSER_VALID_ROW, "ROW" }, \
+ { CXL_MSER_VALID_COLUMN, "COLUMN" }, \
+ { CXL_MSER_VALID_COMPONENT_ID, "COMPONENT ID" }, \
+ { CXL_MSER_VALID_COMPONENT_ID_FORMAT, "COMPONENT ID PLDM FORMAT" }, \
+ { CXL_MSER_VALID_SUB_CHANNEL, "SUB CHANNEL" } \
+)
+
+TRACE_EVENT(cxl_memory_sparing,
+
+ TP_PROTO(const struct cxl_memdev *cxlmd, enum cxl_event_log_type log,
+ struct cxl_event_mem_sparing *rec),
+
+ TP_ARGS(cxlmd, log, rec),
+
+ TP_STRUCT__entry(
+ CXL_EVT_TP_entry
+
+ /* Memory Sparing Event */
+ __field(u8, flags)
+ __field(u8, result)
+ __field(u16, validity_flags)
+ __field(u16, res_avail)
+ __field(u8, channel)
+ __field(u8, rank)
+ __field(u32, nibble_mask)
+ __field(u8, bank_group)
+ __field(u8, bank)
+ __field(u32, row)
+ __field(u16, column)
+ __field(u8, sub_channel)
+ __array(u8, comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE)
+ ),
+
+ TP_fast_assign(
+ CXL_EVT_TP_fast_assign(cxlmd, log, rec->hdr);
+ __entry->hdr_uuid = CXL_EVENT_MEM_SPARING_UUID;
+
+ /* Memory Sparing Event */
+ __entry->flags = rec->flags;
+ __entry->result = rec->result;
+ __entry->validity_flags = le16_to_cpu(rec->validity_flags);
+ __entry->res_avail = le16_to_cpu(rec->res_avail);
+ __entry->channel = rec->channel;
+ __entry->rank = rec->rank;
+ __entry->nibble_mask = get_unaligned_le24(rec->nibble_mask);
+ __entry->bank_group = rec->bank_group;
+ __entry->bank = rec->bank;
+ __entry->row = get_unaligned_le24(rec->row);
+ __entry->column = le16_to_cpu(rec->column);
+ __entry->sub_channel = rec->sub_channel;
+ memcpy(__entry->comp_id, &rec->component_id,
+ CXL_EVENT_GEN_MED_COMP_ID_SIZE);
+ ),
+
+ CXL_EVT_TP_printk("flags='%s' result=%u validity_flags='%s' " \
+ "spare resource avail=%u channel=%u rank=%u " \
+ "nibble_mask=%x bank_group=%u bank=%u " \
+ "row=%u column=%u sub_channel=%u " \
+ "comp_id=%s comp_id_pldm_valid_flags='%s' " \
+ "pldm_entity_id=%s pldm_resource_id=%s",
+ show_mem_sparing_flags(__entry->flags),
+ __entry->result,
+ show_mem_sparing_valid_flags(__entry->validity_flags),
+ __entry->res_avail, __entry->channel, __entry->rank,
+ __entry->nibble_mask, __entry->bank_group, __entry->bank,
+ __entry->row, __entry->column, __entry->sub_channel,
+ __print_hex(__entry->comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE),
+ show_comp_id_pldm_flags(__entry->comp_id[0]),
+ show_pldm_entity_id(__entry->validity_flags, CXL_MSER_VALID_COMPONENT_ID,
+ CXL_MSER_VALID_COMPONENT_ID_FORMAT, __entry->comp_id),
+ show_pldm_resource_id(__entry->validity_flags, CXL_MSER_VALID_COMPONENT_ID,
+ CXL_MSER_VALID_COMPONENT_ID_FORMAT, __entry->comp_id)
+ )
+);
+
#define show_poison_trace_type(type) \
__print_symbolic(type, \
{ CXL_POISON_TRACE_LIST, "List" }, \
@@ -947,7 +1068,7 @@ TRACE_EVENT(cxl_poison,
__entry->hpa = cxl_dpa_to_hpa(cxlr, cxlmd,
__entry->dpa);
if (__entry->hpa != ULLONG_MAX && cxlr->params.cache_size)
- __entry->hpa_alias0 = __entry->hpa +
+ __entry->hpa_alias0 = __entry->hpa -
cxlr->params.cache_size;
else
__entry->hpa_alias0 = ULLONG_MAX;
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index 3f1695c96abc..ba17fa86d249 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -11,6 +11,7 @@
#include <linux/log2.h>
#include <linux/node.h>
#include <linux/io.h>
+#include <linux/range.h>
extern const struct nvdimm_security_ops *cxl_security_ops;
@@ -356,6 +357,9 @@ enum cxl_decoder_type {
* @target_type: accelerator vs expander (type2 vs type3) selector
* @region: currently assigned region for this decoder
* @flags: memory type capabilities and locking
+ * @target_map: cached copy of hardware port-id list, available at init
+ * before all @dport objects have been instantiated. While
+ * dport id is 8bit, CFMWS interleave targets are 32bits.
* @commit: device/decoder-type specific callback to commit settings to hw
* @reset: device/decoder-type specific callback to reset hw settings
*/
@@ -368,6 +372,7 @@ struct cxl_decoder {
enum cxl_decoder_type target_type;
struct cxl_region *region;
unsigned long flags;
+ u32 target_map[CXL_DECODER_MAX_INTERLEAVE];
int (*commit)(struct cxl_decoder *cxld);
void (*reset)(struct cxl_decoder *cxld);
};
@@ -418,25 +423,35 @@ struct cxl_switch_decoder {
};
struct cxl_root_decoder;
-typedef u64 (*cxl_hpa_to_spa_fn)(struct cxl_root_decoder *cxlrd, u64 hpa);
+/**
+ * struct cxl_rd_ops - CXL root decoder callback operations
+ * @hpa_to_spa: Convert host physical address to system physical address
+ * @spa_to_hpa: Convert system physical address to host physical address
+ */
+struct cxl_rd_ops {
+ u64 (*hpa_to_spa)(struct cxl_root_decoder *cxlrd, u64 hpa);
+ u64 (*spa_to_hpa)(struct cxl_root_decoder *cxlrd, u64 spa);
+};
/**
* struct cxl_root_decoder - Static platform CXL address decoder
* @res: host / parent resource for region allocations
+ * @cache_size: extended linear cache size if exists, otherwise zero.
* @region_id: region id for next region provisioning event
- * @hpa_to_spa: translate CXL host-physical-address to Platform system-physical-address
* @platform_data: platform specific configuration data
* @range_lock: sync region autodiscovery by address range
* @qos_class: QoS performance class cookie
+ * @ops: CXL root decoder operations
* @cxlsd: base cxl switch decoder
*/
struct cxl_root_decoder {
struct resource *res;
+ resource_size_t cache_size;
atomic_t region_id;
- cxl_hpa_to_spa_fn hpa_to_spa;
void *platform_data;
struct mutex range_lock;
int qos_class;
+ struct cxl_rd_ops ops;
struct cxl_switch_decoder cxlsd;
};
@@ -469,7 +484,7 @@ enum cxl_config_state {
* @nr_targets: number of targets
* @cache_size: extended linear cache size if exists, otherwise zero.
*
- * State transitions are protected by the cxl_region_rwsem
+ * State transitions are protected by cxl_rwsem.region
*/
struct cxl_region_params {
enum cxl_config_state state;
@@ -502,6 +517,14 @@ enum cxl_partition_mode {
*/
#define CXL_REGION_F_NEEDS_RESET 1
+/*
+ * Indicate whether this region is locked due to 1 or more decoders that have
+ * been locked. The approach of all or nothing is taken with regard to the
+ * locked attribute. CXL_REGION_F_NEEDS_RESET should not be set if this flag is
+ * set.
+ */
+#define CXL_REGION_F_LOCK 2
+
/**
* struct cxl_region - CXL region
* @dev: This region's device
@@ -513,7 +536,7 @@ enum cxl_partition_mode {
* @flags: Region state flags
* @params: active + config params for the region
* @coord: QoS access coordinates for the region
- * @memory_notifier: notifier for setting the access coordinates to node
+ * @node_notifier: notifier for setting the access coordinates to node
* @adist_notifier: notifier for calculating the abstract distance of node
*/
struct cxl_region {
@@ -526,7 +549,7 @@ struct cxl_region {
unsigned long flags;
struct cxl_region_params params;
struct access_coordinate coord[ACCESS_COORDINATE_MAX];
- struct notifier_block memory_notifier;
+ struct notifier_block node_notifier;
struct notifier_block adist_notifier;
};
@@ -592,6 +615,7 @@ struct cxl_dax_region {
* @cdat: Cached CDAT data
* @cdat_available: Should a CDAT attribute be available in sysfs
* @pci_latency: Upstream latency in picoseconds
+ * @component_reg_phys: Physical address of component register
*/
struct cxl_port {
struct device dev;
@@ -615,6 +639,7 @@ struct cxl_port {
} cdat;
bool cdat_available;
long pci_latency;
+ resource_size_t component_reg_phys;
};
/**
@@ -721,6 +746,25 @@ static inline bool is_cxl_root(struct cxl_port *port)
return port->uport_dev == port->dev.parent;
}
+/* Address translation functions exported to cxl_translate test module only */
+int cxl_validate_translation_params(u8 eiw, u16 eig, int pos);
+u64 cxl_calculate_hpa_offset(u64 dpa_offset, int pos, u8 eiw, u16 eig);
+u64 cxl_calculate_dpa_offset(u64 hpa_offset, u8 eiw, u16 eig);
+int cxl_calculate_position(u64 hpa_offset, u8 eiw, u16 eig);
+struct cxl_cxims_data {
+ int nr_maps;
+ u64 xormaps[] __counted_by(nr_maps);
+};
+
+#if IS_ENABLED(CONFIG_CXL_ACPI)
+u64 cxl_do_xormap_calc(struct cxl_cxims_data *cximsd, u64 addr, int hbiw);
+#else
+static inline u64 cxl_do_xormap_calc(struct cxl_cxims_data *cximsd, u64 addr, int hbiw)
+{
+ return ULLONG_MAX;
+}
+#endif
+
int cxl_num_decoders_committed(struct cxl_port *port);
bool is_cxl_port(const struct device *dev);
struct cxl_port *to_cxl_port(const struct device *dev);
@@ -778,9 +822,9 @@ struct cxl_root_decoder *cxl_root_decoder_alloc(struct cxl_port *port,
unsigned int nr_targets);
struct cxl_switch_decoder *cxl_switch_decoder_alloc(struct cxl_port *port,
unsigned int nr_targets);
-int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map);
+int cxl_decoder_add(struct cxl_decoder *cxld);
struct cxl_endpoint_decoder *cxl_endpoint_decoder_alloc(struct cxl_port *port);
-int cxl_decoder_add_locked(struct cxl_decoder *cxld, int *target_map);
+int cxl_decoder_add_locked(struct cxl_decoder *cxld);
int cxl_decoder_autoremove(struct device *host, struct cxl_decoder *cxld);
static inline int cxl_root_decoder_autoremove(struct device *host,
struct cxl_root_decoder *cxlrd)
@@ -803,19 +847,17 @@ struct cxl_endpoint_dvsec_info {
struct range dvsec_range[2];
};
-struct cxl_hdm;
-struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port,
- struct cxl_endpoint_dvsec_info *info);
-int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
- struct cxl_endpoint_dvsec_info *info);
-int devm_cxl_add_passthrough_decoder(struct cxl_port *port);
+int devm_cxl_switch_port_decoders_setup(struct cxl_port *port);
+int __devm_cxl_switch_port_decoders_setup(struct cxl_port *port);
+int devm_cxl_endpoint_decoders_setup(struct cxl_port *port);
+
struct cxl_dev_state;
int cxl_dvsec_rr_decode(struct cxl_dev_state *cxlds,
struct cxl_endpoint_dvsec_info *info);
bool is_cxl_region(struct device *dev);
-extern struct bus_type cxl_bus_type;
+extern const struct bus_type cxl_bus_type;
struct cxl_driver {
const char *name;
@@ -887,7 +929,7 @@ static inline u64 cxl_port_get_spa_cache_alias(struct cxl_port *endpoint,
#endif
void cxl_endpoint_parse_cdat(struct cxl_port *port);
-void cxl_switch_parse_cdat(struct cxl_port *port);
+void cxl_switch_parse_cdat(struct cxl_dport *dport);
int cxl_endpoint_get_perf_coordinates(struct cxl_port *port,
struct access_coordinate *coord);
@@ -902,6 +944,10 @@ void cxl_coordinates_combine(struct access_coordinate *out,
struct access_coordinate *c2);
bool cxl_endpoint_decoder_reset_detected(struct cxl_port *port);
+struct cxl_dport *devm_cxl_add_dport_by_dev(struct cxl_port *port,
+ struct device *dport_dev);
+struct cxl_dport *__devm_cxl_add_dport_by_dev(struct cxl_port *port,
+ struct device *dport_dev);
/*
* Unit test builds overrides this to __weak, find the 'strong' version
@@ -913,14 +959,20 @@ bool cxl_endpoint_decoder_reset_detected(struct cxl_port *port);
u16 cxl_gpf_get_dvsec(struct device *dev);
-static inline struct rw_semaphore *rwsem_read_intr_acquire(struct rw_semaphore *rwsem)
-{
- if (down_read_interruptible(rwsem))
- return NULL;
-
- return rwsem;
-}
-
-DEFINE_FREE(rwsem_read_release, struct rw_semaphore *, if (_T) up_read(_T))
+/*
+ * Declaration for functions that are mocked by cxl_test that are called by
+ * cxl_core. The respective functions are defined as __foo() and called by
+ * cxl_core as foo(). The macros below ensures that those functions would
+ * exist as foo(). See tools/testing/cxl/cxl_core_exports.c and
+ * tools/testing/cxl/exports.h for setting up the mock functions. The dance
+ * is done to avoid a circular dependency where cxl_core calls a function that
+ * ends up being a mock function and goes to * cxl_test where it calls a
+ * cxl_core function.
+ */
+#ifndef CXL_TEST_ENABLE
+#define DECLARE_TESTABLE(x) __##x
+#define devm_cxl_add_dport_by_dev DECLARE_TESTABLE(devm_cxl_add_dport_by_dev)
+#define devm_cxl_switch_port_decoders_setup DECLARE_TESTABLE(devm_cxl_switch_port_decoders_setup)
+#endif
#endif /* __CXL_H__ */
diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h
index 551b0ba2caa1..434031a0c1f7 100644
--- a/drivers/cxl/cxlmem.h
+++ b/drivers/cxl/cxlmem.h
@@ -254,7 +254,7 @@ enum security_cmd_enabled_bits {
* @max_errors: Maximum media error records held in device cache
* @enabled_cmds: All poison commands enabled in the CEL
* @list_out: The poison list payload returned by device
- * @lock: Protect reads of the poison list
+ * @mutex: Protect reads of the poison list
*
* Reads of the poison list are synchronized to ensure that a reader
* does not get an incomplete list because their request overlapped
@@ -265,7 +265,7 @@ struct cxl_poison_state {
u32 max_errors;
DECLARE_BITMAP(enabled_cmds, CXL_POISON_ENABLED_MAX);
struct cxl_mbox_poison_out *list_out;
- struct mutex lock; /* Protect reads of poison list */
+ struct mutex mutex; /* Protect reads of poison list */
};
/*
@@ -634,6 +634,14 @@ struct cxl_mbox_identify {
0x13, 0xb7, 0x74)
/*
+ * Memory Sparing Event Record UUID
+ * CXL rev 3.2 section 8.2.10.2.1.4: Table 8-60
+ */
+#define CXL_EVENT_MEM_SPARING_UUID \
+ UUID_INIT(0xe71f3a40, 0x2d29, 0x4092, 0x8a, 0x39, 0x4d, 0x1c, 0x96, \
+ 0x6c, 0x7c, 0x65)
+
+/*
* Get Event Records output payload
* CXL rev 3.0 section 8.2.9.2.2; Table 8-50
*/
@@ -861,6 +869,8 @@ int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len,
int cxl_trigger_poison_list(struct cxl_memdev *cxlmd);
int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa);
int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa);
+int cxl_inject_poison_locked(struct cxl_memdev *cxlmd, u64 dpa);
+int cxl_clear_poison_locked(struct cxl_memdev *cxlmd, u64 dpa);
#ifdef CONFIG_CXL_EDAC_MEM_FEATURES
int devm_cxl_memdev_edac_register(struct cxl_memdev *cxlmd);
diff --git a/drivers/cxl/cxlpci.h b/drivers/cxl/cxlpci.h
index 54e219b0049e..1d526bea8431 100644
--- a/drivers/cxl/cxlpci.h
+++ b/drivers/cxl/cxlpci.h
@@ -127,10 +127,7 @@ static inline bool cxl_pci_flit_256(struct pci_dev *pdev)
return lnksta2 & PCI_EXP_LNKSTA2_FLIT;
}
-int devm_cxl_port_enumerate_dports(struct cxl_port *port);
struct cxl_dev_state;
-int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm,
- struct cxl_endpoint_dvsec_info *info);
void read_cdat_data(struct cxl_port *port);
void cxl_cor_error_detected(struct pci_dev *pdev);
pci_ers_result_t cxl_error_detected(struct pci_dev *pdev,
diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c
index 785aa2af5eaa..0be4e508affe 100644
--- a/drivers/cxl/pci.c
+++ b/drivers/cxl/pci.c
@@ -136,7 +136,7 @@ static irqreturn_t cxl_pci_mbox_irq(int irq, void *id)
if (opcode == CXL_MBOX_OP_SANITIZE) {
mutex_lock(&cxl_mbox->mbox_mutex);
if (mds->security.sanitize_node)
- mod_delayed_work(system_wq, &mds->security.poll_dwork, 0);
+ mod_delayed_work(system_percpu_wq, &mds->security.poll_dwork, 0);
mutex_unlock(&cxl_mbox->mbox_mutex);
} else {
/* short-circuit the wait in __cxl_pci_mbox_send_cmd() */
@@ -379,7 +379,7 @@ static int cxl_pci_mbox_send(struct cxl_mailbox *cxl_mbox,
{
int rc;
- mutex_lock_io(&cxl_mbox->mbox_mutex);
+ mutex_lock(&cxl_mbox->mbox_mutex);
rc = __cxl_pci_mbox_send_cmd(cxl_mbox, cmd);
mutex_unlock(&cxl_mbox->mbox_mutex);
diff --git a/drivers/cxl/port.c b/drivers/cxl/port.c
index fe4b593331da..51c8f2f84717 100644
--- a/drivers/cxl/port.c
+++ b/drivers/cxl/port.c
@@ -59,55 +59,20 @@ static int discover_region(struct device *dev, void *unused)
static int cxl_switch_port_probe(struct cxl_port *port)
{
- struct cxl_hdm *cxlhdm;
- int rc;
+ /* Reset nr_dports for rebind of driver */
+ port->nr_dports = 0;
/* Cache the data early to ensure is_visible() works */
read_cdat_data(port);
- rc = devm_cxl_port_enumerate_dports(port);
- if (rc < 0)
- return rc;
-
- cxl_switch_parse_cdat(port);
-
- cxlhdm = devm_cxl_setup_hdm(port, NULL);
- if (!IS_ERR(cxlhdm))
- return devm_cxl_enumerate_decoders(cxlhdm, NULL);
-
- if (PTR_ERR(cxlhdm) != -ENODEV) {
- dev_err(&port->dev, "Failed to map HDM decoder capability\n");
- return PTR_ERR(cxlhdm);
- }
-
- if (rc == 1) {
- dev_dbg(&port->dev, "Fallback to passthrough decoder\n");
- return devm_cxl_add_passthrough_decoder(port);
- }
-
- dev_err(&port->dev, "HDM decoder capability not found\n");
- return -ENXIO;
+ return 0;
}
static int cxl_endpoint_port_probe(struct cxl_port *port)
{
- struct cxl_endpoint_dvsec_info info = { .port = port };
struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev);
- struct cxl_dev_state *cxlds = cxlmd->cxlds;
- struct cxl_hdm *cxlhdm;
int rc;
- rc = cxl_dvsec_rr_decode(cxlds, &info);
- if (rc < 0)
- return rc;
-
- cxlhdm = devm_cxl_setup_hdm(port, &info);
- if (IS_ERR(cxlhdm)) {
- if (PTR_ERR(cxlhdm) == -ENODEV)
- dev_err(&port->dev, "HDM decoder registers not found\n");
- return PTR_ERR(cxlhdm);
- }
-
/* Cache the data early to ensure is_visible() works */
read_cdat_data(port);
cxl_endpoint_parse_cdat(port);
@@ -117,11 +82,7 @@ static int cxl_endpoint_port_probe(struct cxl_port *port)
if (rc)
return rc;
- rc = cxl_hdm_decode_init(cxlds, cxlhdm, &info);
- if (rc)
- return rc;
-
- rc = devm_cxl_enumerate_decoders(cxlhdm, &info);
+ rc = devm_cxl_endpoint_decoders_setup(port);
if (rc)
return rc;
@@ -181,7 +142,7 @@ static const struct bin_attribute *const cxl_cdat_bin_attributes[] = {
};
static const struct attribute_group cxl_cdat_attribute_group = {
- .bin_attrs_new = cxl_cdat_bin_attributes,
+ .bin_attrs = cxl_cdat_bin_attributes,
.is_bin_visible = cxl_port_bin_attr_is_visible,
};
diff --git a/drivers/dax/device.c b/drivers/dax/device.c
index 328231cfb028..22999a402e02 100644
--- a/drivers/dax/device.c
+++ b/drivers/dax/device.c
@@ -4,7 +4,6 @@
#include <linux/pagemap.h>
#include <linux/module.h>
#include <linux/device.h>
-#include <linux/pfn_t.h>
#include <linux/cdev.h>
#include <linux/slab.h>
#include <linux/dax.h>
@@ -14,8 +13,9 @@
#include "dax-private.h"
#include "bus.h"
-static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma,
- const char *func)
+static int __check_vma(struct dev_dax *dev_dax, vm_flags_t vm_flags,
+ unsigned long start, unsigned long end, struct file *file,
+ const char *func)
{
struct device *dev = &dev_dax->dev;
unsigned long mask;
@@ -24,7 +24,7 @@ static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma,
return -ENXIO;
/* prevent private mappings from being established */
- if ((vma->vm_flags & VM_MAYSHARE) != VM_MAYSHARE) {
+ if ((vm_flags & VM_MAYSHARE) != VM_MAYSHARE) {
dev_info_ratelimited(dev,
"%s: %s: fail, attempted private mapping\n",
current->comm, func);
@@ -32,15 +32,15 @@ static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma,
}
mask = dev_dax->align - 1;
- if (vma->vm_start & mask || vma->vm_end & mask) {
+ if (start & mask || end & mask) {
dev_info_ratelimited(dev,
"%s: %s: fail, unaligned vma (%#lx - %#lx, %#lx)\n",
- current->comm, func, vma->vm_start, vma->vm_end,
+ current->comm, func, start, end,
mask);
return -EINVAL;
}
- if (!vma_is_dax(vma)) {
+ if (!file_is_dax(file)) {
dev_info_ratelimited(dev,
"%s: %s: fail, vma is not DAX capable\n",
current->comm, func);
@@ -50,6 +50,13 @@ static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma,
return 0;
}
+static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma,
+ const char *func)
+{
+ return __check_vma(dev_dax, vma->vm_flags, vma->vm_start, vma->vm_end,
+ vma->vm_file, func);
+}
+
/* see "strong" declaration in tools/testing/nvdimm/dax-dev.c */
__weak phys_addr_t dax_pgoff_to_phys(struct dev_dax *dev_dax, pgoff_t pgoff,
unsigned long size)
@@ -73,7 +80,7 @@ __weak phys_addr_t dax_pgoff_to_phys(struct dev_dax *dev_dax, pgoff_t pgoff,
return -1;
}
-static void dax_set_mapping(struct vm_fault *vmf, pfn_t pfn,
+static void dax_set_mapping(struct vm_fault *vmf, unsigned long pfn,
unsigned long fault_size)
{
unsigned long i, nr_pages = fault_size / PAGE_SIZE;
@@ -89,7 +96,7 @@ static void dax_set_mapping(struct vm_fault *vmf, pfn_t pfn,
ALIGN_DOWN(vmf->address, fault_size));
for (i = 0; i < nr_pages; i++) {
- struct folio *folio = pfn_folio(pfn_t_to_pfn(pfn) + i);
+ struct folio *folio = pfn_folio(pfn + i);
if (folio->mapping)
continue;
@@ -104,7 +111,7 @@ static vm_fault_t __dev_dax_pte_fault(struct dev_dax *dev_dax,
{
struct device *dev = &dev_dax->dev;
phys_addr_t phys;
- pfn_t pfn;
+ unsigned long pfn;
unsigned int fault_size = PAGE_SIZE;
if (check_vma(dev_dax, vmf->vma, __func__))
@@ -125,11 +132,11 @@ static vm_fault_t __dev_dax_pte_fault(struct dev_dax *dev_dax,
return VM_FAULT_SIGBUS;
}
- pfn = phys_to_pfn_t(phys, 0);
+ pfn = PHYS_PFN(phys);
dax_set_mapping(vmf, pfn, fault_size);
- return vmf_insert_page_mkwrite(vmf, pfn_t_to_page(pfn),
+ return vmf_insert_page_mkwrite(vmf, pfn_to_page(pfn),
vmf->flags & FAULT_FLAG_WRITE);
}
@@ -140,7 +147,7 @@ static vm_fault_t __dev_dax_pmd_fault(struct dev_dax *dev_dax,
struct device *dev = &dev_dax->dev;
phys_addr_t phys;
pgoff_t pgoff;
- pfn_t pfn;
+ unsigned long pfn;
unsigned int fault_size = PMD_SIZE;
if (check_vma(dev_dax, vmf->vma, __func__))
@@ -169,11 +176,11 @@ static vm_fault_t __dev_dax_pmd_fault(struct dev_dax *dev_dax,
return VM_FAULT_SIGBUS;
}
- pfn = phys_to_pfn_t(phys, 0);
+ pfn = PHYS_PFN(phys);
dax_set_mapping(vmf, pfn, fault_size);
- return vmf_insert_folio_pmd(vmf, page_folio(pfn_t_to_page(pfn)),
+ return vmf_insert_folio_pmd(vmf, page_folio(pfn_to_page(pfn)),
vmf->flags & FAULT_FLAG_WRITE);
}
@@ -185,7 +192,7 @@ static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
struct device *dev = &dev_dax->dev;
phys_addr_t phys;
pgoff_t pgoff;
- pfn_t pfn;
+ unsigned long pfn;
unsigned int fault_size = PUD_SIZE;
@@ -215,11 +222,11 @@ static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
return VM_FAULT_SIGBUS;
}
- pfn = phys_to_pfn_t(phys, 0);
+ pfn = PHYS_PFN(phys);
dax_set_mapping(vmf, pfn, fault_size);
- return vmf_insert_folio_pud(vmf, page_folio(pfn_t_to_page(pfn)),
+ return vmf_insert_folio_pud(vmf, page_folio(pfn_to_page(pfn)),
vmf->flags & FAULT_FLAG_WRITE);
}
#else
@@ -286,8 +293,9 @@ static const struct vm_operations_struct dax_vm_ops = {
.pagesize = dev_dax_pagesize,
};
-static int dax_mmap(struct file *filp, struct vm_area_struct *vma)
+static int dax_mmap_prepare(struct vm_area_desc *desc)
{
+ struct file *filp = desc->file;
struct dev_dax *dev_dax = filp->private_data;
int rc, id;
@@ -298,13 +306,14 @@ static int dax_mmap(struct file *filp, struct vm_area_struct *vma)
* fault time.
*/
id = dax_read_lock();
- rc = check_vma(dev_dax, vma, __func__);
+ rc = __check_vma(dev_dax, desc->vm_flags, desc->start, desc->end, filp,
+ __func__);
dax_read_unlock(id);
if (rc)
return rc;
- vma->vm_ops = &dax_vm_ops;
- vm_flags_set(vma, VM_HUGEPAGE);
+ desc->vm_ops = &dax_vm_ops;
+ desc->vm_flags |= VM_HUGEPAGE;
return 0;
}
@@ -331,14 +340,13 @@ static unsigned long dax_get_unmapped_area(struct file *filp,
if ((off + len_align) < off)
goto out;
- addr_align = mm_get_unmapped_area(current->mm, filp, addr, len_align,
- pgoff, flags);
+ addr_align = mm_get_unmapped_area(filp, addr, len_align, pgoff, flags);
if (!IS_ERR_VALUE(addr_align)) {
addr_align += (off - addr_align) & (align - 1);
return addr_align;
}
out:
- return mm_get_unmapped_area(current->mm, filp, addr, len, pgoff, flags);
+ return mm_get_unmapped_area(filp, addr, len, pgoff, flags);
}
static const struct address_space_operations dev_dax_aops = {
@@ -378,7 +386,7 @@ static const struct file_operations dax_fops = {
.open = dax_open,
.release = dax_release,
.get_unmapped_area = dax_get_unmapped_area,
- .mmap = dax_mmap,
+ .mmap_prepare = dax_mmap_prepare,
.fop_flags = FOP_MMAP_SYNC,
};
diff --git a/drivers/dax/hmem/hmem.c b/drivers/dax/hmem/hmem.c
index 5e7c53f18491..c18451a37e4f 100644
--- a/drivers/dax/hmem/hmem.c
+++ b/drivers/dax/hmem/hmem.c
@@ -2,7 +2,6 @@
#include <linux/platform_device.h>
#include <linux/memregion.h>
#include <linux/module.h>
-#include <linux/pfn_t.h>
#include <linux/dax.h>
#include "../bus.h"
diff --git a/drivers/dax/kmem.c b/drivers/dax/kmem.c
index 584c70a34b52..c036e4d0b610 100644
--- a/drivers/dax/kmem.c
+++ b/drivers/dax/kmem.c
@@ -5,7 +5,6 @@
#include <linux/memory.h>
#include <linux/module.h>
#include <linux/device.h>
-#include <linux/pfn_t.h>
#include <linux/slab.h>
#include <linux/dax.h>
#include <linux/fs.h>
diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c
index c8ebf4e281f2..bee93066a849 100644
--- a/drivers/dax/pmem.c
+++ b/drivers/dax/pmem.c
@@ -2,7 +2,6 @@
/* Copyright(c) 2016 - 2018 Intel Corporation. All rights reserved. */
#include <linux/memremap.h>
#include <linux/module.h>
-#include <linux/pfn_t.h>
#include "../nvdimm/pfn.h"
#include "../nvdimm/nd.h"
#include "bus.h"
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index e16d1d40d773..c00b9dff4a06 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -7,7 +7,6 @@
#include <linux/mount.h>
#include <linux/pseudo_fs.h>
#include <linux/magic.h>
-#include <linux/pfn_t.h>
#include <linux/cdev.h>
#include <linux/slab.h>
#include <linux/uio.h>
@@ -148,7 +147,7 @@ enum dax_device_flags {
* pages accessible at the device relative @pgoff.
*/
long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
- enum dax_access_mode mode, void **kaddr, pfn_t *pfn)
+ enum dax_access_mode mode, void **kaddr, unsigned long *pfn)
{
long avail;
@@ -389,7 +388,7 @@ static const struct super_operations dax_sops = {
.alloc_inode = dax_alloc_inode,
.destroy_inode = dax_destroy_inode,
.free_inode = dax_free_inode,
- .drop_inode = generic_delete_inode,
+ .drop_inode = inode_just_drop,
};
static int dax_init_fs_context(struct fs_context *fc)
@@ -434,7 +433,7 @@ static struct dax_device *dax_dev_get(dev_t devt)
return NULL;
dax_dev = to_dax_dev(inode);
- if (inode->i_state & I_NEW) {
+ if (inode_state_read_once(inode) & I_NEW) {
set_bit(DAXDEV_ALIVE, &dax_dev->flags);
inode->i_cdev = &dax_dev->cdev;
inode->i_mode = S_IFCHR;
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
index 3c4862a752b5..c999c4a1e567 100644
--- a/drivers/devfreq/Kconfig
+++ b/drivers/devfreq/Kconfig
@@ -90,6 +90,17 @@ config ARM_EXYNOS_BUS_DEVFREQ
and adjusts the operating frequencies and voltages with OPP support.
This does not yet operate with optimal voltages.
+config ARM_HISI_UNCORE_DEVFREQ
+ tristate "HiSilicon uncore DEVFREQ Driver"
+ depends on ACPI && ACPI_PPTT && PCC
+ select DEVFREQ_GOV_PERFORMANCE
+ select DEVFREQ_GOV_USERSPACE
+ help
+ This adds a DEVFREQ driver that manages uncore frequency scaling for
+ HiSilicon Kunpeng SoCs. This enables runtime management of uncore
+ frequency scaling from kernel and userspace. The uncore domain
+ contains system interconnects and L3 cache.
+
config ARM_IMX_BUS_DEVFREQ
tristate "i.MX Generic Bus DEVFREQ Driver"
depends on ARCH_MXC || COMPILE_TEST
diff --git a/drivers/devfreq/Makefile b/drivers/devfreq/Makefile
index bf40d04928d0..404179d79a9d 100644
--- a/drivers/devfreq/Makefile
+++ b/drivers/devfreq/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_DEVFREQ_GOV_PASSIVE) += governor_passive.o
# DEVFREQ Drivers
obj-$(CONFIG_ARM_EXYNOS_BUS_DEVFREQ) += exynos-bus.o
+obj-$(CONFIG_ARM_HISI_UNCORE_DEVFREQ) += hisi_uncore_freq.o
obj-$(CONFIG_ARM_IMX_BUS_DEVFREQ) += imx-bus.o
obj-$(CONFIG_ARM_IMX8M_DDRC_DEVFREQ) += imx8m-ddrc.o
obj-$(CONFIG_ARM_MEDIATEK_CCI_DEVFREQ) += mtk-cci-devfreq.o
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 98657d3b9435..00979f2e0e27 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -20,6 +20,7 @@
#include <linux/stat.h>
#include <linux/pm_opp.h>
#include <linux/devfreq.h>
+#include <linux/devfreq-governor.h>
#include <linux/workqueue.h>
#include <linux/platform_device.h>
#include <linux/list.h>
@@ -28,7 +29,6 @@
#include <linux/of.h>
#include <linux/pm_qos.h>
#include <linux/units.h>
-#include "governor.h"
#define CREATE_TRACE_POINTS
#include <trace/events/devfreq.h>
@@ -152,11 +152,8 @@ void devfreq_get_freq_range(struct devfreq *devfreq,
(unsigned long)HZ_PER_KHZ * qos_max_freq);
/* Apply constraints from OPP interface */
- *min_freq = max(*min_freq, devfreq->scaling_min_freq);
- *max_freq = min(*max_freq, devfreq->scaling_max_freq);
-
- if (*min_freq > *max_freq)
- *min_freq = *max_freq;
+ *max_freq = clamp(*max_freq, devfreq->scaling_min_freq, devfreq->scaling_max_freq);
+ *min_freq = clamp(*min_freq, devfreq->scaling_min_freq, *max_freq);
}
EXPORT_SYMBOL(devfreq_get_freq_range);
@@ -807,7 +804,6 @@ struct devfreq *devfreq_add_device(struct device *dev,
{
struct devfreq *devfreq;
struct devfreq_governor *governor;
- unsigned long min_freq, max_freq;
int err = 0;
if (!dev || !profile || !governor_name) {
@@ -835,6 +831,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
mutex_lock(&devfreq->lock);
devfreq->dev.parent = dev;
devfreq->dev.class = devfreq_class;
+ devfreq->dev.groups = profile->dev_groups;
devfreq->dev.release = devfreq_dev_release;
INIT_LIST_HEAD(&devfreq->node);
devfreq->profile = profile;
@@ -875,8 +872,6 @@ struct devfreq *devfreq_add_device(struct device *dev,
goto err_dev;
}
- devfreq_get_freq_range(devfreq, &min_freq, &max_freq);
-
devfreq->suspend_freq = dev_pm_opp_get_suspend_opp_freq(dev);
devfreq->opp_table = dev_pm_opp_get_opp_table(dev);
if (IS_ERR(devfreq->opp_table))
@@ -1382,15 +1377,11 @@ int devfreq_remove_governor(struct devfreq_governor *governor)
int ret;
struct device *dev = devfreq->dev.parent;
+ if (!devfreq->governor)
+ continue;
+
if (!strncmp(devfreq->governor->name, governor->name,
DEVFREQ_NAME_LEN)) {
- /* we should have a devfreq governor! */
- if (!devfreq->governor) {
- dev_warn(dev, "%s: Governor %s NOT present\n",
- __func__, governor->name);
- continue;
- /* Fall through */
- }
ret = devfreq->governor->event_handler(devfreq,
DEVFREQ_GOV_STOP, NULL);
if (ret) {
@@ -1743,7 +1734,7 @@ static ssize_t trans_stat_show(struct device *dev,
for (i = 0; i < max_state; i++) {
if (len >= PAGE_SIZE - 1)
break;
- if (df->freq_table[2] == df->previous_freq)
+ if (df->freq_table[i] == df->previous_freq)
len += sysfs_emit_at(buf, len, "*");
else
len += sysfs_emit_at(buf, len, " ");
diff --git a/drivers/devfreq/event/rockchip-dfi.c b/drivers/devfreq/event/rockchip-dfi.c
index 0470d7c175f4..5e6e7e900bda 100644
--- a/drivers/devfreq/event/rockchip-dfi.c
+++ b/drivers/devfreq/event/rockchip-dfi.c
@@ -20,6 +20,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/bitfield.h>
+#include <linux/hw_bitfield.h>
#include <linux/bits.h>
#include <linux/perf_event.h>
@@ -30,19 +31,16 @@
#define DMC_MAX_CHANNELS 4
-#define HIWORD_UPDATE(val, mask) ((val) | (mask) << 16)
-
/* DDRMON_CTRL */
#define DDRMON_CTRL 0x04
+#define DDRMON_CTRL_LPDDR5 BIT(6)
#define DDRMON_CTRL_DDR4 BIT(5)
#define DDRMON_CTRL_LPDDR4 BIT(4)
#define DDRMON_CTRL_HARDWARE_EN BIT(3)
#define DDRMON_CTRL_LPDDR23 BIT(2)
#define DDRMON_CTRL_SOFTWARE_EN BIT(1)
#define DDRMON_CTRL_TIMER_CNT_EN BIT(0)
-#define DDRMON_CTRL_DDR_TYPE_MASK (DDRMON_CTRL_DDR4 | \
- DDRMON_CTRL_LPDDR4 | \
- DDRMON_CTRL_LPDDR23)
+#define DDRMON_CTRL_LP5_BANK_MODE_MASK GENMASK(8, 7)
#define DDRMON_CH0_WR_NUM 0x20
#define DDRMON_CH0_RD_NUM 0x24
@@ -116,12 +114,63 @@ struct rockchip_dfi {
int buswidth[DMC_MAX_CHANNELS];
int ddrmon_stride;
bool ddrmon_ctrl_single;
+ u32 lp5_bank_mode;
+ bool lp5_ckr; /* true if in 4:1 command-to-data clock ratio mode */
+ unsigned int count_multiplier; /* number of data clocks per count */
};
+static int rockchip_dfi_ddrtype_to_ctrl(struct rockchip_dfi *dfi, u32 *ctrl)
+{
+ u32 ddrmon_ver;
+
+ switch (dfi->ddr_type) {
+ case ROCKCHIP_DDRTYPE_LPDDR2:
+ case ROCKCHIP_DDRTYPE_LPDDR3:
+ *ctrl = FIELD_PREP_WM16(DDRMON_CTRL_LPDDR23, 1) |
+ FIELD_PREP_WM16(DDRMON_CTRL_LPDDR4, 0) |
+ FIELD_PREP_WM16(DDRMON_CTRL_LPDDR5, 0);
+ break;
+ case ROCKCHIP_DDRTYPE_LPDDR4:
+ case ROCKCHIP_DDRTYPE_LPDDR4X:
+ *ctrl = FIELD_PREP_WM16(DDRMON_CTRL_LPDDR23, 0) |
+ FIELD_PREP_WM16(DDRMON_CTRL_LPDDR4, 1) |
+ FIELD_PREP_WM16(DDRMON_CTRL_LPDDR5, 0);
+ break;
+ case ROCKCHIP_DDRTYPE_LPDDR5:
+ ddrmon_ver = readl_relaxed(dfi->regs);
+ if (ddrmon_ver < 0x40) {
+ *ctrl = FIELD_PREP_WM16(DDRMON_CTRL_LPDDR23, 0) |
+ FIELD_PREP_WM16(DDRMON_CTRL_LPDDR4, 0) |
+ FIELD_PREP_WM16(DDRMON_CTRL_LPDDR5, 1) |
+ FIELD_PREP_WM16(DDRMON_CTRL_LP5_BANK_MODE_MASK,
+ dfi->lp5_bank_mode);
+ break;
+ }
+
+ /*
+ * As it is unknown whether the unpleasant special case
+ * behaviour used by the vendor kernel is needed for any
+ * shipping hardware, ask users to report if they have
+ * some of that hardware.
+ */
+ dev_err(&dfi->edev->dev,
+ "unsupported DDRMON version 0x%04X, please let linux-rockchip know!\n",
+ ddrmon_ver);
+ return -EOPNOTSUPP;
+ default:
+ dev_err(&dfi->edev->dev, "unsupported memory type 0x%X\n",
+ dfi->ddr_type);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static int rockchip_dfi_enable(struct rockchip_dfi *dfi)
{
void __iomem *dfi_regs = dfi->regs;
int i, ret = 0;
+ u32 ctrl;
mutex_lock(&dfi->mutex);
@@ -135,36 +184,26 @@ static int rockchip_dfi_enable(struct rockchip_dfi *dfi)
goto out;
}
+ ret = rockchip_dfi_ddrtype_to_ctrl(dfi, &ctrl);
+ if (ret)
+ goto out;
+
for (i = 0; i < dfi->max_channels; i++) {
- u32 ctrl = 0;
if (!(dfi->channel_mask & BIT(i)))
continue;
/* clear DDRMON_CTRL setting */
- writel_relaxed(HIWORD_UPDATE(0, DDRMON_CTRL_TIMER_CNT_EN |
- DDRMON_CTRL_SOFTWARE_EN | DDRMON_CTRL_HARDWARE_EN),
+ writel_relaxed(FIELD_PREP_WM16(DDRMON_CTRL_TIMER_CNT_EN, 0) |
+ FIELD_PREP_WM16(DDRMON_CTRL_SOFTWARE_EN, 0) |
+ FIELD_PREP_WM16(DDRMON_CTRL_HARDWARE_EN, 0),
dfi_regs + i * dfi->ddrmon_stride + DDRMON_CTRL);
- /* set ddr type to dfi */
- switch (dfi->ddr_type) {
- case ROCKCHIP_DDRTYPE_LPDDR2:
- case ROCKCHIP_DDRTYPE_LPDDR3:
- ctrl = DDRMON_CTRL_LPDDR23;
- break;
- case ROCKCHIP_DDRTYPE_LPDDR4:
- case ROCKCHIP_DDRTYPE_LPDDR4X:
- ctrl = DDRMON_CTRL_LPDDR4;
- break;
- default:
- break;
- }
-
- writel_relaxed(HIWORD_UPDATE(ctrl, DDRMON_CTRL_DDR_TYPE_MASK),
- dfi_regs + i * dfi->ddrmon_stride + DDRMON_CTRL);
+ writel_relaxed(ctrl, dfi_regs + i * dfi->ddrmon_stride +
+ DDRMON_CTRL);
/* enable count, use software mode */
- writel_relaxed(HIWORD_UPDATE(DDRMON_CTRL_SOFTWARE_EN, DDRMON_CTRL_SOFTWARE_EN),
+ writel_relaxed(FIELD_PREP_WM16(DDRMON_CTRL_SOFTWARE_EN, 1),
dfi_regs + i * dfi->ddrmon_stride + DDRMON_CTRL);
if (dfi->ddrmon_ctrl_single)
@@ -194,8 +233,8 @@ static void rockchip_dfi_disable(struct rockchip_dfi *dfi)
if (!(dfi->channel_mask & BIT(i)))
continue;
- writel_relaxed(HIWORD_UPDATE(0, DDRMON_CTRL_SOFTWARE_EN),
- dfi_regs + i * dfi->ddrmon_stride + DDRMON_CTRL);
+ writel_relaxed(FIELD_PREP_WM16(DDRMON_CTRL_SOFTWARE_EN, 0),
+ dfi_regs + i * dfi->ddrmon_stride + DDRMON_CTRL);
if (dfi->ddrmon_ctrl_single)
break;
@@ -435,7 +474,7 @@ static u64 rockchip_ddr_perf_event_get_count(struct perf_event *event)
switch (event->attr.config) {
case PERF_EVENT_CYCLES:
- count = total.c[0].clock_cycles;
+ count = total.c[0].clock_cycles * dfi->count_multiplier;
break;
case PERF_EVENT_READ_BYTES:
for (i = 0; i < dfi->max_channels; i++)
@@ -651,10 +690,14 @@ static int rockchip_ddr_perf_init(struct rockchip_dfi *dfi)
break;
case ROCKCHIP_DDRTYPE_LPDDR4:
case ROCKCHIP_DDRTYPE_LPDDR4X:
+ case ROCKCHIP_DDRTYPE_LPDDR5:
dfi->burst_len = 16;
break;
}
+ if (!dfi->count_multiplier)
+ dfi->count_multiplier = 1;
+
ret = perf_pmu_register(pmu, "rockchip_ddr", -1);
if (ret)
return ret;
@@ -726,7 +769,7 @@ static int rk3568_dfi_init(struct rockchip_dfi *dfi)
static int rk3588_dfi_init(struct rockchip_dfi *dfi)
{
struct regmap *regmap_pmu = dfi->regmap_pmu;
- u32 reg2, reg3, reg4;
+ u32 reg2, reg3, reg4, reg6;
regmap_read(regmap_pmu, RK3588_PMUGRF_OS_REG2, &reg2);
regmap_read(regmap_pmu, RK3588_PMUGRF_OS_REG3, &reg3);
@@ -751,6 +794,15 @@ static int rk3588_dfi_init(struct rockchip_dfi *dfi)
dfi->max_channels = 4;
dfi->ddrmon_stride = 0x4000;
+ dfi->count_multiplier = 2;
+
+ if (dfi->ddr_type == ROCKCHIP_DDRTYPE_LPDDR5) {
+ regmap_read(regmap_pmu, RK3588_PMUGRF_OS_REG6, &reg6);
+ dfi->lp5_bank_mode = FIELD_GET(RK3588_PMUGRF_OS_REG6_LP5_BANK_MODE, reg6) << 7;
+ dfi->lp5_ckr = FIELD_GET(RK3588_PMUGRF_OS_REG6_LP5_CKR, reg6);
+ if (dfi->lp5_ckr)
+ dfi->count_multiplier *= 2;
+ }
return 0;
};
diff --git a/drivers/devfreq/governor.h b/drivers/devfreq/governor.h
deleted file mode 100644
index 0adfebc0467a..000000000000
--- a/drivers/devfreq/governor.h
+++ /dev/null
@@ -1,127 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * governor.h - internal header for devfreq governors.
- *
- * Copyright (C) 2011 Samsung Electronics
- * MyungJoo Ham <myungjoo.ham@samsung.com>
- *
- * This header is for devfreq governors in drivers/devfreq/
- */
-
-#ifndef _GOVERNOR_H
-#define _GOVERNOR_H
-
-#include <linux/devfreq.h>
-
-#define DEVFREQ_NAME_LEN 16
-
-#define to_devfreq(DEV) container_of((DEV), struct devfreq, dev)
-
-/* Devfreq events */
-#define DEVFREQ_GOV_START 0x1
-#define DEVFREQ_GOV_STOP 0x2
-#define DEVFREQ_GOV_UPDATE_INTERVAL 0x3
-#define DEVFREQ_GOV_SUSPEND 0x4
-#define DEVFREQ_GOV_RESUME 0x5
-
-#define DEVFREQ_MIN_FREQ 0
-#define DEVFREQ_MAX_FREQ ULONG_MAX
-
-/*
- * Definition of the governor feature flags
- * - DEVFREQ_GOV_FLAG_IMMUTABLE
- * : This governor is never changeable to other governors.
- * - DEVFREQ_GOV_FLAG_IRQ_DRIVEN
- * : The devfreq won't schedule the work for this governor.
- */
-#define DEVFREQ_GOV_FLAG_IMMUTABLE BIT(0)
-#define DEVFREQ_GOV_FLAG_IRQ_DRIVEN BIT(1)
-
-/*
- * Definition of governor attribute flags except for common sysfs attributes
- * - DEVFREQ_GOV_ATTR_POLLING_INTERVAL
- * : Indicate polling_interval sysfs attribute
- * - DEVFREQ_GOV_ATTR_TIMER
- * : Indicate timer sysfs attribute
- */
-#define DEVFREQ_GOV_ATTR_POLLING_INTERVAL BIT(0)
-#define DEVFREQ_GOV_ATTR_TIMER BIT(1)
-
-/**
- * struct devfreq_cpu_data - Hold the per-cpu data
- * @node: list node
- * @dev: reference to cpu device.
- * @first_cpu: the cpumask of the first cpu of a policy.
- * @opp_table: reference to cpu opp table.
- * @cur_freq: the current frequency of the cpu.
- * @min_freq: the min frequency of the cpu.
- * @max_freq: the max frequency of the cpu.
- *
- * This structure stores the required cpu_data of a cpu.
- * This is auto-populated by the governor.
- */
-struct devfreq_cpu_data {
- struct list_head node;
-
- struct device *dev;
- unsigned int first_cpu;
-
- struct opp_table *opp_table;
- unsigned int cur_freq;
- unsigned int min_freq;
- unsigned int max_freq;
-};
-
-/**
- * struct devfreq_governor - Devfreq policy governor
- * @node: list node - contains registered devfreq governors
- * @name: Governor's name
- * @attrs: Governor's sysfs attribute flags
- * @flags: Governor's feature flags
- * @get_target_freq: Returns desired operating frequency for the device.
- * Basically, get_target_freq will run
- * devfreq_dev_profile.get_dev_status() to get the
- * status of the device (load = busy_time / total_time).
- * @event_handler: Callback for devfreq core framework to notify events
- * to governors. Events include per device governor
- * init and exit, opp changes out of devfreq, suspend
- * and resume of per device devfreq during device idle.
- *
- * Note that the callbacks are called with devfreq->lock locked by devfreq.
- */
-struct devfreq_governor {
- struct list_head node;
-
- const char name[DEVFREQ_NAME_LEN];
- const u64 attrs;
- const u64 flags;
- int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
- int (*event_handler)(struct devfreq *devfreq,
- unsigned int event, void *data);
-};
-
-void devfreq_monitor_start(struct devfreq *devfreq);
-void devfreq_monitor_stop(struct devfreq *devfreq);
-void devfreq_monitor_suspend(struct devfreq *devfreq);
-void devfreq_monitor_resume(struct devfreq *devfreq);
-void devfreq_update_interval(struct devfreq *devfreq, unsigned int *delay);
-
-int devfreq_add_governor(struct devfreq_governor *governor);
-int devfreq_remove_governor(struct devfreq_governor *governor);
-
-int devm_devfreq_add_governor(struct device *dev,
- struct devfreq_governor *governor);
-
-int devfreq_update_status(struct devfreq *devfreq, unsigned long freq);
-int devfreq_update_target(struct devfreq *devfreq, unsigned long freq);
-void devfreq_get_freq_range(struct devfreq *devfreq, unsigned long *min_freq,
- unsigned long *max_freq);
-
-static inline int devfreq_update_stats(struct devfreq *df)
-{
- if (!df->profile->get_dev_status)
- return -EINVAL;
-
- return df->profile->get_dev_status(df->dev.parent, &df->last_status);
-}
-#endif /* _GOVERNOR_H */
diff --git a/drivers/devfreq/governor_passive.c b/drivers/devfreq/governor_passive.c
index 953cf9a1e9f7..8cd6f9a59f64 100644
--- a/drivers/devfreq/governor_passive.c
+++ b/drivers/devfreq/governor_passive.c
@@ -14,8 +14,33 @@
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/devfreq.h>
+#include <linux/devfreq-governor.h>
#include <linux/units.h>
-#include "governor.h"
+
+/**
+ * struct devfreq_cpu_data - Hold the per-cpu data
+ * @node: list node
+ * @dev: reference to cpu device.
+ * @first_cpu: the cpumask of the first cpu of a policy.
+ * @opp_table: reference to cpu opp table.
+ * @cur_freq: the current frequency of the cpu.
+ * @min_freq: the min frequency of the cpu.
+ * @max_freq: the max frequency of the cpu.
+ *
+ * This structure stores the required cpu_data of a cpu.
+ * This is auto-populated by the governor.
+ */
+struct devfreq_cpu_data {
+ struct list_head node;
+
+ struct device *dev;
+ unsigned int first_cpu;
+
+ struct opp_table *opp_table;
+ unsigned int cur_freq;
+ unsigned int min_freq;
+ unsigned int max_freq;
+};
static struct devfreq_cpu_data *
get_parent_cpu_data(struct devfreq_passive_data *p_data,
diff --git a/drivers/devfreq/governor_performance.c b/drivers/devfreq/governor_performance.c
index 2e4e981446fa..fdb22bf512cf 100644
--- a/drivers/devfreq/governor_performance.c
+++ b/drivers/devfreq/governor_performance.c
@@ -7,8 +7,8 @@
*/
#include <linux/devfreq.h>
+#include <linux/devfreq-governor.h>
#include <linux/module.h>
-#include "governor.h"
static int devfreq_performance_func(struct devfreq *df,
unsigned long *freq)
diff --git a/drivers/devfreq/governor_powersave.c b/drivers/devfreq/governor_powersave.c
index f059e8814804..ee2d6ec8a512 100644
--- a/drivers/devfreq/governor_powersave.c
+++ b/drivers/devfreq/governor_powersave.c
@@ -7,8 +7,8 @@
*/
#include <linux/devfreq.h>
+#include <linux/devfreq-governor.h>
#include <linux/module.h>
-#include "governor.h"
static int devfreq_powersave_func(struct devfreq *df,
unsigned long *freq)
diff --git a/drivers/devfreq/governor_simpleondemand.c b/drivers/devfreq/governor_simpleondemand.c
index c23435736367..ac9c5e9e51a4 100644
--- a/drivers/devfreq/governor_simpleondemand.c
+++ b/drivers/devfreq/governor_simpleondemand.c
@@ -9,12 +9,12 @@
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/devfreq.h>
+#include <linux/devfreq-governor.h>
#include <linux/math64.h>
-#include "governor.h"
/* Default constants for DevFreq-Simple-Ondemand (DFSO) */
#define DFSO_UPTHRESHOLD (90)
-#define DFSO_DOWNDIFFERENCTIAL (5)
+#define DFSO_DOWNDIFFERENTIAL (5)
static int devfreq_simple_ondemand_func(struct devfreq *df,
unsigned long *freq)
{
@@ -22,7 +22,7 @@ static int devfreq_simple_ondemand_func(struct devfreq *df,
struct devfreq_dev_status *stat;
unsigned long long a, b;
unsigned int dfso_upthreshold = DFSO_UPTHRESHOLD;
- unsigned int dfso_downdifferential = DFSO_DOWNDIFFERENCTIAL;
+ unsigned int dfso_downdifferential = DFSO_DOWNDIFFERENTIAL;
struct devfreq_simple_ondemand_data *data = df->data;
err = devfreq_update_stats(df);
diff --git a/drivers/devfreq/governor_userspace.c b/drivers/devfreq/governor_userspace.c
index d1aa6806b683..395174f93960 100644
--- a/drivers/devfreq/governor_userspace.c
+++ b/drivers/devfreq/governor_userspace.c
@@ -9,10 +9,11 @@
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/devfreq.h>
+#include <linux/devfreq-governor.h>
+#include <linux/kstrtox.h>
#include <linux/pm.h>
#include <linux/mutex.h>
#include <linux/module.h>
-#include "governor.h"
struct userspace_data {
unsigned long user_frequency;
@@ -39,10 +40,13 @@ static ssize_t set_freq_store(struct device *dev, struct device_attribute *attr,
unsigned long wanted;
int err = 0;
+ err = kstrtoul(buf, 0, &wanted);
+ if (err)
+ return err;
+
mutex_lock(&devfreq->lock);
data = devfreq->governor_data;
- sscanf(buf, "%lu", &wanted);
data->user_frequency = wanted;
data->valid = true;
err = update_devfreq(devfreq);
diff --git a/drivers/devfreq/hisi_uncore_freq.c b/drivers/devfreq/hisi_uncore_freq.c
new file mode 100644
index 000000000000..4d00d813c8ac
--- /dev/null
+++ b/drivers/devfreq/hisi_uncore_freq.c
@@ -0,0 +1,658 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * HiSilicon uncore frequency scaling driver
+ *
+ * Copyright (c) 2025 HiSilicon Co., Ltd
+ */
+
+#include <linux/acpi.h>
+#include <linux/bits.h>
+#include <linux/cleanup.h>
+#include <linux/devfreq.h>
+#include <linux/devfreq-governor.h>
+#include <linux/device.h>
+#include <linux/dev_printk.h>
+#include <linux/errno.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/ktime.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/property.h>
+#include <linux/topology.h>
+#include <linux/units.h>
+#include <acpi/pcc.h>
+
+struct hisi_uncore_pcc_data {
+ u16 status;
+ u16 resv;
+ u32 data;
+};
+
+struct hisi_uncore_pcc_shmem {
+ struct acpi_pcct_shared_memory head;
+ struct hisi_uncore_pcc_data pcc_data;
+};
+
+enum hisi_uncore_pcc_cmd_type {
+ HUCF_PCC_CMD_GET_CAP = 0,
+ HUCF_PCC_CMD_GET_FREQ,
+ HUCF_PCC_CMD_SET_FREQ,
+ HUCF_PCC_CMD_GET_MODE,
+ HUCF_PCC_CMD_SET_MODE,
+ HUCF_PCC_CMD_GET_PLAT_FREQ_NUM,
+ HUCF_PCC_CMD_GET_PLAT_FREQ_BY_IDX,
+ HUCF_PCC_CMD_MAX = 256
+};
+
+static int hisi_platform_gov_usage;
+static DEFINE_MUTEX(hisi_platform_gov_usage_lock);
+
+enum hisi_uncore_freq_mode {
+ HUCF_MODE_PLATFORM = 0,
+ HUCF_MODE_OS,
+ HUCF_MODE_MAX
+};
+
+#define HUCF_CAP_PLATFORM_CTRL BIT(0)
+
+/**
+ * struct hisi_uncore_freq - hisi uncore frequency scaling device data
+ * @dev: device of this frequency scaling driver
+ * @cl: mailbox client object
+ * @pchan: PCC mailbox channel
+ * @chan_id: PCC channel ID
+ * @last_cmd_cmpl_time: timestamp of the last completed PCC command
+ * @pcc_lock: PCC channel lock
+ * @devfreq: devfreq data of this hisi_uncore_freq device
+ * @related_cpus: CPUs whose performance is majorly affected by this
+ * uncore frequency domain
+ * @cap: capability flag
+ */
+struct hisi_uncore_freq {
+ struct device *dev;
+ struct mbox_client cl;
+ struct pcc_mbox_chan *pchan;
+ int chan_id;
+ ktime_t last_cmd_cmpl_time;
+ struct mutex pcc_lock;
+ struct devfreq *devfreq;
+ struct cpumask related_cpus;
+ u32 cap;
+};
+
+/* PCC channel timeout = PCC nominal latency * NUM */
+#define HUCF_PCC_POLL_TIMEOUT_NUM 1000
+#define HUCF_PCC_POLL_INTERVAL_US 5
+
+/* Default polling interval in ms for devfreq governors*/
+#define HUCF_DEFAULT_POLLING_MS 100
+
+static void hisi_uncore_free_pcc_chan(struct hisi_uncore_freq *uncore)
+{
+ guard(mutex)(&uncore->pcc_lock);
+ pcc_mbox_free_channel(uncore->pchan);
+ uncore->pchan = NULL;
+}
+
+static void devm_hisi_uncore_free_pcc_chan(void *data)
+{
+ hisi_uncore_free_pcc_chan(data);
+}
+
+static int hisi_uncore_request_pcc_chan(struct hisi_uncore_freq *uncore)
+{
+ struct device *dev = uncore->dev;
+ struct pcc_mbox_chan *pcc_chan;
+
+ uncore->cl = (struct mbox_client) {
+ .dev = dev,
+ .tx_block = false,
+ .knows_txdone = true,
+ };
+
+ pcc_chan = pcc_mbox_request_channel(&uncore->cl, uncore->chan_id);
+ if (IS_ERR(pcc_chan))
+ return dev_err_probe(dev, PTR_ERR(pcc_chan),
+ "Failed to request PCC channel %u\n", uncore->chan_id);
+
+ if (!pcc_chan->shmem_base_addr) {
+ pcc_mbox_free_channel(pcc_chan);
+ return dev_err_probe(dev, -EINVAL,
+ "Invalid PCC shared memory address\n");
+ }
+
+ if (pcc_chan->shmem_size < sizeof(struct hisi_uncore_pcc_shmem)) {
+ pcc_mbox_free_channel(pcc_chan);
+ return dev_err_probe(dev, -EINVAL,
+ "Invalid PCC shared memory size (%lluB)\n",
+ pcc_chan->shmem_size);
+ }
+
+ uncore->pchan = pcc_chan;
+
+ return devm_add_action_or_reset(uncore->dev,
+ devm_hisi_uncore_free_pcc_chan, uncore);
+}
+
+static acpi_status hisi_uncore_pcc_reg_scan(struct acpi_resource *res,
+ void *ctx)
+{
+ struct acpi_resource_generic_register *reg;
+ struct hisi_uncore_freq *uncore;
+
+ if (!res || res->type != ACPI_RESOURCE_TYPE_GENERIC_REGISTER)
+ return AE_OK;
+
+ reg = &res->data.generic_reg;
+ if (reg->space_id != ACPI_ADR_SPACE_PLATFORM_COMM)
+ return AE_OK;
+
+ if (!ctx)
+ return AE_ERROR;
+
+ uncore = ctx;
+ /* PCC subspace ID stored in Access Size */
+ uncore->chan_id = reg->access_size;
+
+ return AE_CTRL_TERMINATE;
+}
+
+static int hisi_uncore_init_pcc_chan(struct hisi_uncore_freq *uncore)
+{
+ acpi_handle handle = ACPI_HANDLE(uncore->dev);
+ acpi_status status;
+ int rc;
+
+ uncore->chan_id = -1;
+ status = acpi_walk_resources(handle, METHOD_NAME__CRS,
+ hisi_uncore_pcc_reg_scan, uncore);
+ if (ACPI_FAILURE(status) || uncore->chan_id < 0)
+ return dev_err_probe(uncore->dev, -ENODEV,
+ "Failed to get a PCC channel\n");
+
+
+ rc = devm_mutex_init(uncore->dev, &uncore->pcc_lock);
+ if (rc)
+ return rc;
+
+ return hisi_uncore_request_pcc_chan(uncore);
+}
+
+static int hisi_uncore_cmd_send(struct hisi_uncore_freq *uncore,
+ u8 cmd, u32 *data)
+{
+ struct hisi_uncore_pcc_shmem __iomem *addr;
+ struct hisi_uncore_pcc_shmem shmem;
+ struct pcc_mbox_chan *pchan;
+ unsigned int mrtt;
+ s64 time_delta;
+ u16 status;
+ int rc;
+
+ guard(mutex)(&uncore->pcc_lock);
+
+ pchan = uncore->pchan;
+ if (!pchan)
+ return -ENODEV;
+
+ addr = (struct hisi_uncore_pcc_shmem __iomem *)pchan->shmem;
+ if (!addr)
+ return -EINVAL;
+
+ /* Handle the Minimum Request Turnaround Time (MRTT) */
+ mrtt = pchan->min_turnaround_time;
+ time_delta = ktime_us_delta(ktime_get(), uncore->last_cmd_cmpl_time);
+ if (mrtt > time_delta)
+ udelay(mrtt - time_delta);
+
+ /* Copy data */
+ shmem.head = (struct acpi_pcct_shared_memory) {
+ .signature = PCC_SIGNATURE | uncore->chan_id,
+ .command = cmd,
+ };
+ shmem.pcc_data.data = *data;
+ memcpy_toio(addr, &shmem, sizeof(shmem));
+
+ /* Ring doorbell */
+ rc = mbox_send_message(pchan->mchan, &cmd);
+ if (rc < 0) {
+ dev_err(uncore->dev, "Failed to send mbox message, %d\n", rc);
+ return rc;
+ }
+
+ /* Wait status */
+ rc = readw_poll_timeout(&addr->head.status, status,
+ status & (PCC_STATUS_CMD_COMPLETE |
+ PCC_STATUS_ERROR),
+ HUCF_PCC_POLL_INTERVAL_US,
+ pchan->latency * HUCF_PCC_POLL_TIMEOUT_NUM);
+ if (rc) {
+ dev_err(uncore->dev, "PCC channel response timeout, cmd=%u\n", cmd);
+ } else if (status & PCC_STATUS_ERROR) {
+ dev_err(uncore->dev, "PCC cmd error, cmd=%u\n", cmd);
+ rc = -EIO;
+ }
+
+ uncore->last_cmd_cmpl_time = ktime_get();
+
+ /* Copy data back */
+ memcpy_fromio(data, &addr->pcc_data.data, sizeof(*data));
+
+ /* Clear mailbox active req */
+ mbox_client_txdone(pchan->mchan, rc);
+
+ return rc;
+}
+
+static int hisi_uncore_target(struct device *dev, unsigned long *freq,
+ u32 flags)
+{
+ struct hisi_uncore_freq *uncore = dev_get_drvdata(dev);
+ struct dev_pm_opp *opp;
+ u32 data;
+
+ if (WARN_ON(!uncore || !uncore->pchan))
+ return -ENODEV;
+
+ opp = devfreq_recommended_opp(dev, freq, flags);
+ if (IS_ERR(opp)) {
+ dev_err(dev, "Failed to get opp for freq %lu hz\n", *freq);
+ return PTR_ERR(opp);
+ }
+
+ data = (u32)(dev_pm_opp_get_freq(opp) / HZ_PER_MHZ);
+
+ dev_pm_opp_put(opp);
+
+ return hisi_uncore_cmd_send(uncore, HUCF_PCC_CMD_SET_FREQ, &data);
+}
+
+static int hisi_uncore_get_dev_status(struct device *dev,
+ struct devfreq_dev_status *stat)
+{
+ /* Not used */
+ return 0;
+}
+
+static int hisi_uncore_get_cur_freq(struct device *dev, unsigned long *freq)
+{
+ struct hisi_uncore_freq *uncore = dev_get_drvdata(dev);
+ u32 data = 0;
+ int rc;
+
+ if (WARN_ON(!uncore || !uncore->pchan))
+ return -ENODEV;
+
+ rc = hisi_uncore_cmd_send(uncore, HUCF_PCC_CMD_GET_FREQ, &data);
+
+ /*
+ * Upon a failure, 'data' remains 0 and 'freq' is set to 0 rather than a
+ * random value. devfreq shouldn't use 'freq' in that case though.
+ */
+ *freq = data * HZ_PER_MHZ;
+
+ return rc;
+}
+
+static void devm_hisi_uncore_remove_opp(void *data)
+{
+ struct hisi_uncore_freq *uncore = data;
+
+ dev_pm_opp_remove_all_dynamic(uncore->dev);
+}
+
+static int hisi_uncore_init_opp(struct hisi_uncore_freq *uncore)
+{
+ struct device *dev = uncore->dev;
+ unsigned long freq_mhz;
+ u32 num, index;
+ u32 data = 0;
+ int rc;
+
+ rc = hisi_uncore_cmd_send(uncore, HUCF_PCC_CMD_GET_PLAT_FREQ_NUM,
+ &data);
+ if (rc)
+ return dev_err_probe(dev, rc, "Failed to get plat freq num\n");
+
+ num = data;
+
+ for (index = 0; index < num; index++) {
+ data = index;
+ rc = hisi_uncore_cmd_send(uncore,
+ HUCF_PCC_CMD_GET_PLAT_FREQ_BY_IDX,
+ &data);
+ if (rc) {
+ dev_pm_opp_remove_all_dynamic(dev);
+ return dev_err_probe(dev, rc,
+ "Failed to get plat freq at index %u\n", index);
+ }
+ freq_mhz = data;
+
+ /* Don't care OPP voltage, take 1V as default */
+ rc = dev_pm_opp_add(dev, freq_mhz * HZ_PER_MHZ, 1000000);
+ if (rc) {
+ dev_pm_opp_remove_all_dynamic(dev);
+ return dev_err_probe(dev, rc,
+ "Add OPP %lu failed\n", freq_mhz);
+ }
+ }
+
+ return devm_add_action_or_reset(dev, devm_hisi_uncore_remove_opp,
+ uncore);
+}
+
+static int hisi_platform_gov_func(struct devfreq *df, unsigned long *freq)
+{
+ /*
+ * Platform-controlled mode doesn't care the frequency issued from
+ * devfreq, so just pick the max freq.
+ */
+ *freq = DEVFREQ_MAX_FREQ;
+
+ return 0;
+}
+
+static int hisi_platform_gov_handler(struct devfreq *df, unsigned int event,
+ void *val)
+{
+ struct hisi_uncore_freq *uncore = dev_get_drvdata(df->dev.parent);
+ int rc = 0;
+ u32 data;
+
+ if (WARN_ON(!uncore || !uncore->pchan))
+ return -ENODEV;
+
+ switch (event) {
+ case DEVFREQ_GOV_START:
+ data = HUCF_MODE_PLATFORM;
+ rc = hisi_uncore_cmd_send(uncore, HUCF_PCC_CMD_SET_MODE, &data);
+ if (rc)
+ dev_err(uncore->dev, "Failed to set platform mode (%d)\n", rc);
+ break;
+ case DEVFREQ_GOV_STOP:
+ data = HUCF_MODE_OS;
+ rc = hisi_uncore_cmd_send(uncore, HUCF_PCC_CMD_SET_MODE, &data);
+ if (rc)
+ dev_err(uncore->dev, "Failed to set os mode (%d)\n", rc);
+ break;
+ default:
+ break;
+ }
+
+ return rc;
+}
+
+/*
+ * In the platform-controlled mode, the platform decides the uncore frequency
+ * and ignores the frequency issued from the driver.
+ * Thus, create a pseudo 'hisi_platform' governor that stops devfreq monitor
+ * from working so as to save meaningless overhead.
+ */
+static struct devfreq_governor hisi_platform_governor = {
+ .name = "hisi_platform",
+ /*
+ * Set interrupt_driven to skip the devfreq monitor mechanism, though
+ * this governor is not interrupt-driven.
+ */
+ .flags = DEVFREQ_GOV_FLAG_IRQ_DRIVEN,
+ .get_target_freq = hisi_platform_gov_func,
+ .event_handler = hisi_platform_gov_handler,
+};
+
+static void hisi_uncore_remove_platform_gov(struct hisi_uncore_freq *uncore)
+{
+ u32 data = HUCF_MODE_PLATFORM;
+ int rc;
+
+ if (!(uncore->cap & HUCF_CAP_PLATFORM_CTRL))
+ return;
+
+ guard(mutex)(&hisi_platform_gov_usage_lock);
+
+ if (--hisi_platform_gov_usage == 0) {
+ rc = devfreq_remove_governor(&hisi_platform_governor);
+ if (rc)
+ dev_err(uncore->dev, "Failed to remove hisi_platform gov (%d)\n", rc);
+ }
+
+ /*
+ * Set to the platform-controlled mode on exit if supported, so as to
+ * have a certain behaviour when the driver is detached.
+ */
+ rc = hisi_uncore_cmd_send(uncore, HUCF_PCC_CMD_SET_MODE, &data);
+ if (rc)
+ dev_err(uncore->dev, "Failed to set platform mode on exit (%d)\n", rc);
+}
+
+static void devm_hisi_uncore_remove_platform_gov(void *data)
+{
+ hisi_uncore_remove_platform_gov(data);
+}
+
+static int hisi_uncore_add_platform_gov(struct hisi_uncore_freq *uncore)
+{
+ if (!(uncore->cap & HUCF_CAP_PLATFORM_CTRL))
+ return 0;
+
+ guard(mutex)(&hisi_platform_gov_usage_lock);
+
+ if (hisi_platform_gov_usage == 0) {
+ int rc = devfreq_add_governor(&hisi_platform_governor);
+ if (rc)
+ return rc;
+ }
+ hisi_platform_gov_usage++;
+
+ return devm_add_action_or_reset(uncore->dev,
+ devm_hisi_uncore_remove_platform_gov,
+ uncore);
+}
+
+/*
+ * Returns:
+ * 0 if success, uncore->related_cpus is set.
+ * -EINVAL if property not found, or property found but without elements in it,
+ * or invalid arguments received in any of the subroutine.
+ * Other error codes if it goes wrong.
+ */
+static int hisi_uncore_mark_related_cpus(struct hisi_uncore_freq *uncore,
+ char *property, int (*get_topo_id)(int cpu),
+ const struct cpumask *(*get_cpumask)(int cpu))
+{
+ unsigned int i, cpu;
+ size_t len;
+ int rc;
+
+ rc = device_property_count_u32(uncore->dev, property);
+ if (rc < 0)
+ return rc;
+ if (rc == 0)
+ return -EINVAL;
+
+ len = rc;
+ u32 *num __free(kfree) = kcalloc(len, sizeof(*num), GFP_KERNEL);
+ if (!num)
+ return -ENOMEM;
+
+ rc = device_property_read_u32_array(uncore->dev, property, num, len);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < len; i++) {
+ for_each_possible_cpu(cpu) {
+ if (get_topo_id(cpu) != num[i])
+ continue;
+
+ cpumask_or(&uncore->related_cpus,
+ &uncore->related_cpus, get_cpumask(cpu));
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int get_package_id(int cpu)
+{
+ return topology_physical_package_id(cpu);
+}
+
+static const struct cpumask *get_package_cpumask(int cpu)
+{
+ return topology_core_cpumask(cpu);
+}
+
+static int get_cluster_id(int cpu)
+{
+ return topology_cluster_id(cpu);
+}
+
+static const struct cpumask *get_cluster_cpumask(int cpu)
+{
+ return topology_cluster_cpumask(cpu);
+}
+
+static int hisi_uncore_mark_related_cpus_wrap(struct hisi_uncore_freq *uncore)
+{
+ int rc;
+
+ cpumask_clear(&uncore->related_cpus);
+
+ rc = hisi_uncore_mark_related_cpus(uncore, "related-package",
+ get_package_id,
+ get_package_cpumask);
+ /* Success, or firmware probably broken */
+ if (!rc || rc != -EINVAL)
+ return rc;
+
+ /* Try another property name if rc == -EINVAL */
+ return hisi_uncore_mark_related_cpus(uncore, "related-cluster",
+ get_cluster_id,
+ get_cluster_cpumask);
+}
+
+static ssize_t related_cpus_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hisi_uncore_freq *uncore = dev_get_drvdata(dev->parent);
+
+ return cpumap_print_to_pagebuf(true, buf, &uncore->related_cpus);
+}
+
+static DEVICE_ATTR_RO(related_cpus);
+
+static struct attribute *hisi_uncore_freq_attrs[] = {
+ &dev_attr_related_cpus.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(hisi_uncore_freq);
+
+static int hisi_uncore_devfreq_register(struct hisi_uncore_freq *uncore)
+{
+ struct devfreq_dev_profile *profile;
+ struct device *dev = uncore->dev;
+ unsigned long freq;
+ u32 data;
+ int rc;
+
+ rc = hisi_uncore_get_cur_freq(dev, &freq);
+ if (rc)
+ return dev_err_probe(dev, rc, "Failed to get plat init freq\n");
+
+ profile = devm_kzalloc(dev, sizeof(*profile), GFP_KERNEL);
+ if (!profile)
+ return -ENOMEM;
+
+ *profile = (struct devfreq_dev_profile) {
+ .initial_freq = freq,
+ .polling_ms = HUCF_DEFAULT_POLLING_MS,
+ .timer = DEVFREQ_TIMER_DELAYED,
+ .target = hisi_uncore_target,
+ .get_dev_status = hisi_uncore_get_dev_status,
+ .get_cur_freq = hisi_uncore_get_cur_freq,
+ .dev_groups = hisi_uncore_freq_groups,
+ };
+
+ rc = hisi_uncore_cmd_send(uncore, HUCF_PCC_CMD_GET_MODE, &data);
+ if (rc)
+ return dev_err_probe(dev, rc, "Failed to get operate mode\n");
+
+ if (data == HUCF_MODE_PLATFORM)
+ uncore->devfreq = devm_devfreq_add_device(dev, profile,
+ hisi_platform_governor.name, NULL);
+ else
+ uncore->devfreq = devm_devfreq_add_device(dev, profile,
+ DEVFREQ_GOV_PERFORMANCE, NULL);
+ if (IS_ERR(uncore->devfreq))
+ return dev_err_probe(dev, PTR_ERR(uncore->devfreq),
+ "Failed to add devfreq device\n");
+
+ return 0;
+}
+
+static int hisi_uncore_freq_probe(struct platform_device *pdev)
+{
+ struct hisi_uncore_freq *uncore;
+ struct device *dev = &pdev->dev;
+ u32 cap;
+ int rc;
+
+ uncore = devm_kzalloc(dev, sizeof(*uncore), GFP_KERNEL);
+ if (!uncore)
+ return -ENOMEM;
+
+ uncore->dev = dev;
+ platform_set_drvdata(pdev, uncore);
+
+ rc = hisi_uncore_init_pcc_chan(uncore);
+ if (rc)
+ return dev_err_probe(dev, rc, "Failed to init PCC channel\n");
+
+ rc = hisi_uncore_init_opp(uncore);
+ if (rc)
+ return dev_err_probe(dev, rc, "Failed to init OPP\n");
+
+ rc = hisi_uncore_cmd_send(uncore, HUCF_PCC_CMD_GET_CAP, &cap);
+ if (rc)
+ return dev_err_probe(dev, rc, "Failed to get capability\n");
+
+ uncore->cap = cap;
+
+ rc = hisi_uncore_add_platform_gov(uncore);
+ if (rc)
+ return dev_err_probe(dev, rc, "Failed to add hisi_platform governor\n");
+
+ rc = hisi_uncore_mark_related_cpus_wrap(uncore);
+ if (rc)
+ return dev_err_probe(dev, rc, "Failed to mark related cpus\n");
+
+ rc = hisi_uncore_devfreq_register(uncore);
+ if (rc)
+ return dev_err_probe(dev, rc, "Failed to register devfreq\n");
+
+ return 0;
+}
+
+static const struct acpi_device_id hisi_uncore_freq_acpi_match[] = {
+ { "HISI04F1", },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, hisi_uncore_freq_acpi_match);
+
+static struct platform_driver hisi_uncore_freq_drv = {
+ .probe = hisi_uncore_freq_probe,
+ .driver = {
+ .name = "hisi_uncore_freq",
+ .acpi_match_table = hisi_uncore_freq_acpi_match,
+ },
+};
+module_platform_driver(hisi_uncore_freq_drv);
+
+MODULE_DESCRIPTION("HiSilicon uncore frequency scaling driver");
+MODULE_AUTHOR("Jie Zhan <zhanjie9@hisilicon.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/devfreq/mtk-cci-devfreq.c b/drivers/devfreq/mtk-cci-devfreq.c
index 22fe9e631f8a..4c22be728f6a 100644
--- a/drivers/devfreq/mtk-cci-devfreq.c
+++ b/drivers/devfreq/mtk-cci-devfreq.c
@@ -86,7 +86,7 @@ static int mtk_ccifreq_set_voltage(struct mtk_ccifreq_drv *drv, int new_voltage)
soc_data->sram_max_volt);
return ret;
}
- } else if (pre_voltage > new_voltage) {
+ } else {
voltage = max(new_voltage,
pre_vsram - soc_data->max_volt_shift);
ret = regulator_set_voltage(drv->proc_reg, voltage,
@@ -386,7 +386,8 @@ out_disable_cci_clk:
out_free_resources:
if (regulator_is_enabled(drv->proc_reg))
regulator_disable(drv->proc_reg);
- if (drv->sram_reg && regulator_is_enabled(drv->sram_reg))
+ if (!IS_ERR_OR_NULL(drv->sram_reg) &&
+ regulator_is_enabled(drv->sram_reg))
regulator_disable(drv->sram_reg);
return ret;
diff --git a/drivers/devfreq/sun8i-a33-mbus.c b/drivers/devfreq/sun8i-a33-mbus.c
index 7c6ae91ede1f..4bd5657558d6 100644
--- a/drivers/devfreq/sun8i-a33-mbus.c
+++ b/drivers/devfreq/sun8i-a33-mbus.c
@@ -360,7 +360,7 @@ static int sun8i_a33_mbus_probe(struct platform_device *pdev)
if (IS_ERR(priv->reg_mbus))
return PTR_ERR(priv->reg_mbus);
- priv->clk_bus = devm_clk_get(dev, "bus");
+ priv->clk_bus = devm_clk_get_enabled(dev, "bus");
if (IS_ERR(priv->clk_bus))
return dev_err_probe(dev, PTR_ERR(priv->clk_bus),
"failed to get bus clock\n");
@@ -375,24 +375,15 @@ static int sun8i_a33_mbus_probe(struct platform_device *pdev)
return dev_err_probe(dev, PTR_ERR(priv->clk_mbus),
"failed to get mbus clock\n");
- ret = clk_prepare_enable(priv->clk_bus);
- if (ret)
- return dev_err_probe(dev, ret,
- "failed to enable bus clock\n");
-
/* Lock the DRAM clock rate to keep priv->nominal_bw in sync. */
- ret = clk_rate_exclusive_get(priv->clk_dram);
- if (ret) {
- err = "failed to lock dram clock rate\n";
- goto err_disable_bus;
- }
+ ret = devm_clk_rate_exclusive_get(dev, priv->clk_dram);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to lock dram clock rate\n");
/* Lock the MBUS clock rate to keep MBUS_TMR_PERIOD in sync. */
- ret = clk_rate_exclusive_get(priv->clk_mbus);
- if (ret) {
- err = "failed to lock mbus clock rate\n";
- goto err_unlock_dram;
- }
+ ret = devm_clk_rate_exclusive_get(dev, priv->clk_mbus);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to lock mbus clock rate\n");
priv->gov_data.upthreshold = 10;
priv->gov_data.downdifferential = 5;
@@ -405,10 +396,8 @@ static int sun8i_a33_mbus_probe(struct platform_device *pdev)
priv->profile.max_state = max_state;
ret = devm_pm_opp_set_clkname(dev, "dram");
- if (ret) {
- err = "failed to add OPP table\n";
- goto err_unlock_mbus;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to add OPP table\n");
base_freq = clk_get_rate(clk_get_parent(priv->clk_dram));
for (i = 0; i < max_state; ++i) {
@@ -448,12 +437,6 @@ static int sun8i_a33_mbus_probe(struct platform_device *pdev)
err_remove_opps:
dev_pm_opp_remove_all_dynamic(dev);
-err_unlock_mbus:
- clk_rate_exclusive_put(priv->clk_mbus);
-err_unlock_dram:
- clk_rate_exclusive_put(priv->clk_dram);
-err_disable_bus:
- clk_disable_unprepare(priv->clk_bus);
return dev_err_probe(dev, ret, err);
}
@@ -472,9 +455,6 @@ static void sun8i_a33_mbus_remove(struct platform_device *pdev)
dev_warn(dev, "failed to restore DRAM frequency: %d\n", ret);
dev_pm_opp_remove_all_dynamic(dev);
- clk_rate_exclusive_put(priv->clk_mbus);
- clk_rate_exclusive_put(priv->clk_dram);
- clk_disable_unprepare(priv->clk_bus);
}
static const struct sun8i_a33_mbus_variant sun50i_a64_mbus = {
diff --git a/drivers/devfreq/tegra30-devfreq.c b/drivers/devfreq/tegra30-devfreq.c
index 4a4f0106ab9d..8b57194ac698 100644
--- a/drivers/devfreq/tegra30-devfreq.c
+++ b/drivers/devfreq/tegra30-devfreq.c
@@ -9,9 +9,11 @@
#include <linux/clk.h>
#include <linux/cpufreq.h>
#include <linux/devfreq.h>
+#include <linux/devfreq-governor.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/minmax.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -21,8 +23,6 @@
#include <soc/tegra/fuse.h>
-#include "governor.h"
-
#define ACTMON_GLB_STATUS 0x0
#define ACTMON_GLB_PERIOD_CTRL 0x4
@@ -326,14 +326,9 @@ static unsigned long actmon_cpu_to_emc_rate(struct tegra_devfreq *tegra,
unsigned int i;
const struct tegra_actmon_emc_ratio *ratio = actmon_emc_ratios;
- for (i = 0; i < ARRAY_SIZE(actmon_emc_ratios); i++, ratio++) {
- if (cpu_freq >= ratio->cpu_freq) {
- if (ratio->emc_freq >= tegra->max_freq)
- return tegra->max_freq;
- else
- return ratio->emc_freq;
- }
- }
+ for (i = 0; i < ARRAY_SIZE(actmon_emc_ratios); i++, ratio++)
+ if (cpu_freq >= ratio->cpu_freq)
+ return min(ratio->emc_freq, tegra->max_freq);
return 0;
}
diff --git a/drivers/dibs/Kconfig b/drivers/dibs/Kconfig
new file mode 100644
index 000000000000..5dc347b9b235
--- /dev/null
+++ b/drivers/dibs/Kconfig
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: GPL-2.0
+config DIBS
+ tristate "DIBS support"
+ default n
+ help
+ Direct Internal Buffer Sharing (DIBS)
+ A communication method that uses common physical (internal) memory
+ for synchronous direct access into a remote buffer.
+
+ Select this option to provide the abstraction layer between
+ dibs devices and dibs clients like the SMC protocol.
+ The module name is dibs.
+
+config DIBS_LO
+ bool "intra-OS shortcut with dibs loopback"
+ depends on DIBS
+ default n
+ help
+ DIBS_LO enables the creation of an software-emulated dibs device
+ named lo which can be used for transferring data when communication
+ occurs within the same OS. This helps in convenient testing of
+ dibs clients, since dibs loopback is independent of architecture or
+ hardware.
diff --git a/drivers/dibs/Makefile b/drivers/dibs/Makefile
new file mode 100644
index 000000000000..85805490c77f
--- /dev/null
+++ b/drivers/dibs/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# DIBS class module
+#
+
+dibs-y += dibs_main.o
+obj-$(CONFIG_DIBS) += dibs.o
+dibs-$(CONFIG_DIBS_LO) += dibs_loopback.o \ No newline at end of file
diff --git a/drivers/dibs/dibs_loopback.c b/drivers/dibs/dibs_loopback.c
new file mode 100644
index 000000000000..aa029e29c6b2
--- /dev/null
+++ b/drivers/dibs/dibs_loopback.c
@@ -0,0 +1,361 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Functions for dibs loopback/loopback-ism device.
+ *
+ * Copyright (c) 2024, Alibaba Inc.
+ *
+ * Author: Wen Gu <guwen@linux.alibaba.com>
+ * Tony Lu <tonylu@linux.alibaba.com>
+ *
+ */
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/dibs.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include "dibs_loopback.h"
+
+#define DIBS_LO_SUPPORT_NOCOPY 0x1
+#define DIBS_DMA_ADDR_INVALID (~(dma_addr_t)0)
+
+static const char dibs_lo_dev_name[] = "lo";
+/* global loopback device */
+static struct dibs_lo_dev *lo_dev;
+
+static u16 dibs_lo_get_fabric_id(struct dibs_dev *dibs)
+{
+ return DIBS_LOOPBACK_FABRIC;
+}
+
+static int dibs_lo_query_rgid(struct dibs_dev *dibs, const uuid_t *rgid,
+ u32 vid_valid, u32 vid)
+{
+ /* rgid should be the same as lgid */
+ if (!uuid_equal(rgid, &dibs->gid))
+ return -ENETUNREACH;
+ return 0;
+}
+
+static int dibs_lo_max_dmbs(void)
+{
+ return DIBS_LO_MAX_DMBS;
+}
+
+static int dibs_lo_register_dmb(struct dibs_dev *dibs, struct dibs_dmb *dmb,
+ struct dibs_client *client)
+{
+ struct dibs_lo_dmb_node *dmb_node, *tmp_node;
+ struct dibs_lo_dev *ldev;
+ struct folio *folio;
+ unsigned long flags;
+ int sba_idx, rc;
+
+ ldev = dibs->drv_priv;
+ sba_idx = dmb->idx;
+ /* check space for new dmb */
+ for_each_clear_bit(sba_idx, ldev->sba_idx_mask, DIBS_LO_MAX_DMBS) {
+ if (!test_and_set_bit(sba_idx, ldev->sba_idx_mask))
+ break;
+ }
+ if (sba_idx == DIBS_LO_MAX_DMBS)
+ return -ENOSPC;
+
+ dmb_node = kzalloc(sizeof(*dmb_node), GFP_KERNEL);
+ if (!dmb_node) {
+ rc = -ENOMEM;
+ goto err_bit;
+ }
+
+ dmb_node->sba_idx = sba_idx;
+ dmb_node->len = dmb->dmb_len;
+
+ /* not critical; fail under memory pressure and fallback to TCP */
+ folio = folio_alloc(GFP_KERNEL | __GFP_NOWARN | __GFP_NOMEMALLOC |
+ __GFP_NORETRY | __GFP_ZERO,
+ get_order(dmb_node->len));
+ if (!folio) {
+ rc = -ENOMEM;
+ goto err_node;
+ }
+ dmb_node->cpu_addr = folio_address(folio);
+ dmb_node->dma_addr = DIBS_DMA_ADDR_INVALID;
+ refcount_set(&dmb_node->refcnt, 1);
+
+again:
+ /* add new dmb into hash table */
+ get_random_bytes(&dmb_node->token, sizeof(dmb_node->token));
+ write_lock_bh(&ldev->dmb_ht_lock);
+ hash_for_each_possible(ldev->dmb_ht, tmp_node, list, dmb_node->token) {
+ if (tmp_node->token == dmb_node->token) {
+ write_unlock_bh(&ldev->dmb_ht_lock);
+ goto again;
+ }
+ }
+ hash_add(ldev->dmb_ht, &dmb_node->list, dmb_node->token);
+ write_unlock_bh(&ldev->dmb_ht_lock);
+ atomic_inc(&ldev->dmb_cnt);
+
+ dmb->idx = dmb_node->sba_idx;
+ dmb->dmb_tok = dmb_node->token;
+ dmb->cpu_addr = dmb_node->cpu_addr;
+ dmb->dma_addr = dmb_node->dma_addr;
+ dmb->dmb_len = dmb_node->len;
+
+ spin_lock_irqsave(&dibs->lock, flags);
+ dibs->dmb_clientid_arr[sba_idx] = client->id;
+ spin_unlock_irqrestore(&dibs->lock, flags);
+
+ return 0;
+
+err_node:
+ kfree(dmb_node);
+err_bit:
+ clear_bit(sba_idx, ldev->sba_idx_mask);
+ return rc;
+}
+
+static void __dibs_lo_unregister_dmb(struct dibs_lo_dev *ldev,
+ struct dibs_lo_dmb_node *dmb_node)
+{
+ /* remove dmb from hash table */
+ write_lock_bh(&ldev->dmb_ht_lock);
+ hash_del(&dmb_node->list);
+ write_unlock_bh(&ldev->dmb_ht_lock);
+
+ clear_bit(dmb_node->sba_idx, ldev->sba_idx_mask);
+ folio_put(virt_to_folio(dmb_node->cpu_addr));
+ kfree(dmb_node);
+
+ if (atomic_dec_and_test(&ldev->dmb_cnt))
+ wake_up(&ldev->ldev_release);
+}
+
+static int dibs_lo_unregister_dmb(struct dibs_dev *dibs, struct dibs_dmb *dmb)
+{
+ struct dibs_lo_dmb_node *dmb_node = NULL, *tmp_node;
+ struct dibs_lo_dev *ldev;
+ unsigned long flags;
+
+ ldev = dibs->drv_priv;
+
+ /* find dmb from hash table */
+ read_lock_bh(&ldev->dmb_ht_lock);
+ hash_for_each_possible(ldev->dmb_ht, tmp_node, list, dmb->dmb_tok) {
+ if (tmp_node->token == dmb->dmb_tok) {
+ dmb_node = tmp_node;
+ break;
+ }
+ }
+ read_unlock_bh(&ldev->dmb_ht_lock);
+ if (!dmb_node)
+ return -EINVAL;
+
+ if (refcount_dec_and_test(&dmb_node->refcnt)) {
+ spin_lock_irqsave(&dibs->lock, flags);
+ dibs->dmb_clientid_arr[dmb_node->sba_idx] = NO_DIBS_CLIENT;
+ spin_unlock_irqrestore(&dibs->lock, flags);
+
+ __dibs_lo_unregister_dmb(ldev, dmb_node);
+ }
+ return 0;
+}
+
+static int dibs_lo_support_dmb_nocopy(struct dibs_dev *dibs)
+{
+ return DIBS_LO_SUPPORT_NOCOPY;
+}
+
+static int dibs_lo_attach_dmb(struct dibs_dev *dibs, struct dibs_dmb *dmb)
+{
+ struct dibs_lo_dmb_node *dmb_node = NULL, *tmp_node;
+ struct dibs_lo_dev *ldev;
+
+ ldev = dibs->drv_priv;
+
+ /* find dmb_node according to dmb->dmb_tok */
+ read_lock_bh(&ldev->dmb_ht_lock);
+ hash_for_each_possible(ldev->dmb_ht, tmp_node, list, dmb->dmb_tok) {
+ if (tmp_node->token == dmb->dmb_tok) {
+ dmb_node = tmp_node;
+ break;
+ }
+ }
+ if (!dmb_node) {
+ read_unlock_bh(&ldev->dmb_ht_lock);
+ return -EINVAL;
+ }
+ read_unlock_bh(&ldev->dmb_ht_lock);
+
+ if (!refcount_inc_not_zero(&dmb_node->refcnt))
+ /* the dmb is being unregistered, but has
+ * not been removed from the hash table.
+ */
+ return -EINVAL;
+
+ /* provide dmb information */
+ dmb->idx = dmb_node->sba_idx;
+ dmb->dmb_tok = dmb_node->token;
+ dmb->cpu_addr = dmb_node->cpu_addr;
+ dmb->dma_addr = dmb_node->dma_addr;
+ dmb->dmb_len = dmb_node->len;
+ return 0;
+}
+
+static int dibs_lo_detach_dmb(struct dibs_dev *dibs, u64 token)
+{
+ struct dibs_lo_dmb_node *dmb_node = NULL, *tmp_node;
+ struct dibs_lo_dev *ldev;
+
+ ldev = dibs->drv_priv;
+
+ /* find dmb_node according to dmb->dmb_tok */
+ read_lock_bh(&ldev->dmb_ht_lock);
+ hash_for_each_possible(ldev->dmb_ht, tmp_node, list, token) {
+ if (tmp_node->token == token) {
+ dmb_node = tmp_node;
+ break;
+ }
+ }
+ if (!dmb_node) {
+ read_unlock_bh(&ldev->dmb_ht_lock);
+ return -EINVAL;
+ }
+ read_unlock_bh(&ldev->dmb_ht_lock);
+
+ if (refcount_dec_and_test(&dmb_node->refcnt))
+ __dibs_lo_unregister_dmb(ldev, dmb_node);
+ return 0;
+}
+
+static int dibs_lo_move_data(struct dibs_dev *dibs, u64 dmb_tok,
+ unsigned int idx, bool sf, unsigned int offset,
+ void *data, unsigned int size)
+{
+ struct dibs_lo_dmb_node *rmb_node = NULL, *tmp_node;
+ struct dibs_lo_dev *ldev;
+ u16 s_mask;
+ u8 client_id;
+ u32 sba_idx;
+
+ ldev = dibs->drv_priv;
+
+ read_lock_bh(&ldev->dmb_ht_lock);
+ hash_for_each_possible(ldev->dmb_ht, tmp_node, list, dmb_tok) {
+ if (tmp_node->token == dmb_tok) {
+ rmb_node = tmp_node;
+ break;
+ }
+ }
+ if (!rmb_node) {
+ read_unlock_bh(&ldev->dmb_ht_lock);
+ return -EINVAL;
+ }
+ memcpy((char *)rmb_node->cpu_addr + offset, data, size);
+ sba_idx = rmb_node->sba_idx;
+ read_unlock_bh(&ldev->dmb_ht_lock);
+
+ if (!sf)
+ return 0;
+
+ spin_lock(&dibs->lock);
+ client_id = dibs->dmb_clientid_arr[sba_idx];
+ s_mask = ror16(0x1000, idx);
+ if (likely(client_id != NO_DIBS_CLIENT && dibs->subs[client_id]))
+ dibs->subs[client_id]->ops->handle_irq(dibs, sba_idx, s_mask);
+ spin_unlock(&dibs->lock);
+
+ return 0;
+}
+
+static const struct dibs_dev_ops dibs_lo_ops = {
+ .get_fabric_id = dibs_lo_get_fabric_id,
+ .query_remote_gid = dibs_lo_query_rgid,
+ .max_dmbs = dibs_lo_max_dmbs,
+ .register_dmb = dibs_lo_register_dmb,
+ .unregister_dmb = dibs_lo_unregister_dmb,
+ .move_data = dibs_lo_move_data,
+ .support_mmapped_rdmb = dibs_lo_support_dmb_nocopy,
+ .attach_dmb = dibs_lo_attach_dmb,
+ .detach_dmb = dibs_lo_detach_dmb,
+};
+
+static void dibs_lo_dev_init(struct dibs_lo_dev *ldev)
+{
+ rwlock_init(&ldev->dmb_ht_lock);
+ hash_init(ldev->dmb_ht);
+ atomic_set(&ldev->dmb_cnt, 0);
+ init_waitqueue_head(&ldev->ldev_release);
+}
+
+static void dibs_lo_dev_exit(struct dibs_lo_dev *ldev)
+{
+ if (atomic_read(&ldev->dmb_cnt))
+ wait_event(ldev->ldev_release, !atomic_read(&ldev->dmb_cnt));
+}
+
+static int dibs_lo_dev_probe(void)
+{
+ struct dibs_lo_dev *ldev;
+ struct dibs_dev *dibs;
+ int ret;
+
+ ldev = kzalloc(sizeof(*ldev), GFP_KERNEL);
+ if (!ldev)
+ return -ENOMEM;
+
+ dibs = dibs_dev_alloc();
+ if (!dibs) {
+ kfree(ldev);
+ return -ENOMEM;
+ }
+
+ ldev->dibs = dibs;
+ dibs->drv_priv = ldev;
+ dibs_lo_dev_init(ldev);
+ uuid_gen(&dibs->gid);
+ dibs->ops = &dibs_lo_ops;
+
+ dibs->dev.parent = NULL;
+ dev_set_name(&dibs->dev, "%s", dibs_lo_dev_name);
+
+ ret = dibs_dev_add(dibs);
+ if (ret)
+ goto err_reg;
+ lo_dev = ldev;
+ return 0;
+
+err_reg:
+ kfree(dibs->dmb_clientid_arr);
+ /* pairs with dibs_dev_alloc() */
+ put_device(&dibs->dev);
+ kfree(ldev);
+
+ return ret;
+}
+
+static void dibs_lo_dev_remove(void)
+{
+ if (!lo_dev)
+ return;
+
+ dibs_dev_del(lo_dev->dibs);
+ dibs_lo_dev_exit(lo_dev);
+ /* pairs with dibs_dev_alloc() */
+ put_device(&lo_dev->dibs->dev);
+ kfree(lo_dev);
+ lo_dev = NULL;
+}
+
+int dibs_loopback_init(void)
+{
+ return dibs_lo_dev_probe();
+}
+
+void dibs_loopback_exit(void)
+{
+ dibs_lo_dev_remove();
+}
diff --git a/drivers/dibs/dibs_loopback.h b/drivers/dibs/dibs_loopback.h
new file mode 100644
index 000000000000..0664f6a8e662
--- /dev/null
+++ b/drivers/dibs/dibs_loopback.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * dibs loopback (aka loopback-ism) device structure definitions.
+ *
+ * Copyright (c) 2024, Alibaba Inc.
+ *
+ * Author: Wen Gu <guwen@linux.alibaba.com>
+ * Tony Lu <tonylu@linux.alibaba.com>
+ *
+ */
+
+#ifndef _DIBS_LOOPBACK_H
+#define _DIBS_LOOPBACK_H
+
+#include <linux/dibs.h>
+#include <linux/hashtable.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+
+#if IS_ENABLED(CONFIG_DIBS_LO)
+#define DIBS_LO_DMBS_HASH_BITS 12
+#define DIBS_LO_MAX_DMBS 5000
+
+struct dibs_lo_dmb_node {
+ struct hlist_node list;
+ u64 token;
+ u32 len;
+ u32 sba_idx;
+ void *cpu_addr;
+ dma_addr_t dma_addr;
+ refcount_t refcnt;
+};
+
+struct dibs_lo_dev {
+ struct dibs_dev *dibs;
+ atomic_t dmb_cnt;
+ rwlock_t dmb_ht_lock;
+ DECLARE_BITMAP(sba_idx_mask, DIBS_LO_MAX_DMBS);
+ DECLARE_HASHTABLE(dmb_ht, DIBS_LO_DMBS_HASH_BITS);
+ wait_queue_head_t ldev_release;
+};
+
+int dibs_loopback_init(void);
+void dibs_loopback_exit(void);
+#else
+static inline int dibs_loopback_init(void)
+{
+ return 0;
+}
+
+static inline void dibs_loopback_exit(void)
+{
+}
+#endif
+
+#endif /* _DIBS_LOOPBACK_H */
diff --git a/drivers/dibs/dibs_main.c b/drivers/dibs/dibs_main.c
new file mode 100644
index 000000000000..b8c16586706c
--- /dev/null
+++ b/drivers/dibs/dibs_main.c
@@ -0,0 +1,274 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DIBS - Direct Internal Buffer Sharing
+ *
+ * Implementation of the DIBS class module
+ *
+ * Copyright IBM Corp. 2025
+ */
+#define pr_fmt(fmt) "dibs: " fmt
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/dibs.h>
+
+#include "dibs_loopback.h"
+
+MODULE_DESCRIPTION("Direct Internal Buffer Sharing class");
+MODULE_LICENSE("GPL");
+
+static struct class *dibs_class;
+
+/* use an array rather a list for fast mapping: */
+static struct dibs_client *clients[MAX_DIBS_CLIENTS];
+static u8 max_client;
+static DEFINE_MUTEX(clients_lock);
+struct dibs_dev_list {
+ struct list_head list;
+ struct mutex mutex; /* protects dibs device list */
+};
+
+static struct dibs_dev_list dibs_dev_list = {
+ .list = LIST_HEAD_INIT(dibs_dev_list.list),
+ .mutex = __MUTEX_INITIALIZER(dibs_dev_list.mutex),
+};
+
+static void dibs_setup_forwarding(struct dibs_client *client,
+ struct dibs_dev *dibs)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dibs->lock, flags);
+ dibs->subs[client->id] = client;
+ spin_unlock_irqrestore(&dibs->lock, flags);
+}
+
+int dibs_register_client(struct dibs_client *client)
+{
+ struct dibs_dev *dibs;
+ int i, rc = -ENOSPC;
+
+ mutex_lock(&dibs_dev_list.mutex);
+ mutex_lock(&clients_lock);
+ for (i = 0; i < MAX_DIBS_CLIENTS; ++i) {
+ if (!clients[i]) {
+ clients[i] = client;
+ client->id = i;
+ if (i == max_client)
+ max_client++;
+ rc = 0;
+ break;
+ }
+ }
+ mutex_unlock(&clients_lock);
+
+ if (i < MAX_DIBS_CLIENTS) {
+ /* initialize with all devices that we got so far */
+ list_for_each_entry(dibs, &dibs_dev_list.list, list) {
+ dibs->priv[i] = NULL;
+ client->ops->add_dev(dibs);
+ dibs_setup_forwarding(client, dibs);
+ }
+ }
+ mutex_unlock(&dibs_dev_list.mutex);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(dibs_register_client);
+
+int dibs_unregister_client(struct dibs_client *client)
+{
+ struct dibs_dev *dibs;
+ unsigned long flags;
+ int max_dmbs;
+ int rc = 0;
+
+ mutex_lock(&dibs_dev_list.mutex);
+ list_for_each_entry(dibs, &dibs_dev_list.list, list) {
+ spin_lock_irqsave(&dibs->lock, flags);
+ max_dmbs = dibs->ops->max_dmbs();
+ for (int i = 0; i < max_dmbs; ++i) {
+ if (dibs->dmb_clientid_arr[i] == client->id) {
+ WARN(1, "%s: attempt to unregister '%s' with registered dmb(s)\n",
+ __func__, client->name);
+ rc = -EBUSY;
+ goto err_reg_dmb;
+ }
+ }
+ /* Stop forwarding IRQs and events */
+ dibs->subs[client->id] = NULL;
+ spin_unlock_irqrestore(&dibs->lock, flags);
+ clients[client->id]->ops->del_dev(dibs);
+ dibs->priv[client->id] = NULL;
+ }
+
+ mutex_lock(&clients_lock);
+ clients[client->id] = NULL;
+ if (client->id + 1 == max_client)
+ max_client--;
+ mutex_unlock(&clients_lock);
+
+ mutex_unlock(&dibs_dev_list.mutex);
+ return rc;
+
+err_reg_dmb:
+ spin_unlock_irqrestore(&dibs->lock, flags);
+ mutex_unlock(&dibs_dev_list.mutex);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(dibs_unregister_client);
+
+static void dibs_dev_release(struct device *dev)
+{
+ struct dibs_dev *dibs;
+
+ dibs = container_of(dev, struct dibs_dev, dev);
+
+ kfree(dibs);
+}
+
+struct dibs_dev *dibs_dev_alloc(void)
+{
+ struct dibs_dev *dibs;
+
+ dibs = kzalloc(sizeof(*dibs), GFP_KERNEL);
+ if (!dibs)
+ return dibs;
+ dibs->dev.release = dibs_dev_release;
+ dibs->dev.class = dibs_class;
+ device_initialize(&dibs->dev);
+
+ return dibs;
+}
+EXPORT_SYMBOL_GPL(dibs_dev_alloc);
+
+static ssize_t gid_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct dibs_dev *dibs;
+
+ dibs = container_of(dev, struct dibs_dev, dev);
+
+ return sysfs_emit(buf, "%pUb\n", &dibs->gid);
+}
+static DEVICE_ATTR_RO(gid);
+
+static ssize_t fabric_id_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct dibs_dev *dibs;
+ u16 fabric_id;
+
+ dibs = container_of(dev, struct dibs_dev, dev);
+ fabric_id = dibs->ops->get_fabric_id(dibs);
+
+ return sysfs_emit(buf, "0x%04x\n", fabric_id);
+}
+static DEVICE_ATTR_RO(fabric_id);
+
+static struct attribute *dibs_dev_attrs[] = {
+ &dev_attr_gid.attr,
+ &dev_attr_fabric_id.attr,
+ NULL,
+};
+
+static const struct attribute_group dibs_dev_attr_group = {
+ .attrs = dibs_dev_attrs,
+};
+
+int dibs_dev_add(struct dibs_dev *dibs)
+{
+ int max_dmbs;
+ int i, ret;
+
+ max_dmbs = dibs->ops->max_dmbs();
+ spin_lock_init(&dibs->lock);
+ dibs->dmb_clientid_arr = kzalloc(max_dmbs, GFP_KERNEL);
+ if (!dibs->dmb_clientid_arr)
+ return -ENOMEM;
+ memset(dibs->dmb_clientid_arr, NO_DIBS_CLIENT, max_dmbs);
+
+ ret = device_add(&dibs->dev);
+ if (ret)
+ goto free_client_arr;
+
+ ret = sysfs_create_group(&dibs->dev.kobj, &dibs_dev_attr_group);
+ if (ret) {
+ dev_err(&dibs->dev, "sysfs_create_group failed for dibs_dev\n");
+ goto err_device_del;
+ }
+ mutex_lock(&dibs_dev_list.mutex);
+ mutex_lock(&clients_lock);
+ for (i = 0; i < max_client; ++i) {
+ if (clients[i]) {
+ clients[i]->ops->add_dev(dibs);
+ dibs_setup_forwarding(clients[i], dibs);
+ }
+ }
+ mutex_unlock(&clients_lock);
+ list_add(&dibs->list, &dibs_dev_list.list);
+ mutex_unlock(&dibs_dev_list.mutex);
+
+ return 0;
+
+err_device_del:
+ device_del(&dibs->dev);
+free_client_arr:
+ kfree(dibs->dmb_clientid_arr);
+ return ret;
+
+}
+EXPORT_SYMBOL_GPL(dibs_dev_add);
+
+void dibs_dev_del(struct dibs_dev *dibs)
+{
+ unsigned long flags;
+ int i;
+
+ sysfs_remove_group(&dibs->dev.kobj, &dibs_dev_attr_group);
+
+ spin_lock_irqsave(&dibs->lock, flags);
+ for (i = 0; i < MAX_DIBS_CLIENTS; ++i)
+ dibs->subs[i] = NULL;
+ spin_unlock_irqrestore(&dibs->lock, flags);
+
+ mutex_lock(&dibs_dev_list.mutex);
+ mutex_lock(&clients_lock);
+ for (i = 0; i < max_client; ++i) {
+ if (clients[i])
+ clients[i]->ops->del_dev(dibs);
+ }
+ mutex_unlock(&clients_lock);
+ list_del_init(&dibs->list);
+ mutex_unlock(&dibs_dev_list.mutex);
+
+ device_del(&dibs->dev);
+ kfree(dibs->dmb_clientid_arr);
+}
+EXPORT_SYMBOL_GPL(dibs_dev_del);
+
+static int __init dibs_init(void)
+{
+ int rc;
+
+ dibs_class = class_create("dibs");
+ if (IS_ERR(dibs_class))
+ return PTR_ERR(dibs_class);
+
+ rc = dibs_loopback_init();
+ if (rc)
+ pr_err("%s fails with %d\n", __func__, rc);
+
+ return rc;
+}
+
+static void __exit dibs_exit(void)
+{
+ dibs_loopback_exit();
+ class_destroy(dibs_class);
+}
+
+subsys_initcall(dibs_init);
+module_exit(dibs_exit);
diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig
index fee04fdb0822..b46eb8a552d7 100644
--- a/drivers/dma-buf/Kconfig
+++ b/drivers/dma-buf/Kconfig
@@ -36,7 +36,6 @@ config UDMABUF
depends on DMA_SHARED_BUFFER
depends on MEMFD_CREATE || COMPILE_TEST
depends on MMU
- select VMAP_PFN
help
A driver to let userspace turn memfd regions into dma-bufs.
Qemu can use this to create host dmabufs for guest framebuffers.
diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile
index 70ec901edf2c..2008fb7481b3 100644
--- a/drivers/dma-buf/Makefile
+++ b/drivers/dma-buf/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-y := dma-buf.o dma-fence.o dma-fence-array.o dma-fence-chain.o \
- dma-fence-unwrap.o dma-resv.o
+ dma-fence-unwrap.o dma-resv.o dma-buf-mapping.o
obj-$(CONFIG_DMABUF_HEAPS) += dma-heap.o
obj-$(CONFIG_DMABUF_HEAPS) += heaps/
obj-$(CONFIG_SYNC_FILE) += sync_file.o
diff --git a/drivers/dma-buf/dma-buf-mapping.c b/drivers/dma-buf/dma-buf-mapping.c
new file mode 100644
index 000000000000..b7352e609fbd
--- /dev/null
+++ b/drivers/dma-buf/dma-buf-mapping.c
@@ -0,0 +1,248 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * DMA BUF Mapping Helpers
+ *
+ */
+#include <linux/dma-buf-mapping.h>
+#include <linux/dma-resv.h>
+
+static struct scatterlist *fill_sg_entry(struct scatterlist *sgl, size_t length,
+ dma_addr_t addr)
+{
+ unsigned int len, nents;
+ int i;
+
+ nents = DIV_ROUND_UP(length, UINT_MAX);
+ for (i = 0; i < nents; i++) {
+ len = min_t(size_t, length, UINT_MAX);
+ length -= len;
+ /*
+ * DMABUF abuses scatterlist to create a scatterlist
+ * that does not have any CPU list, only the DMA list.
+ * Always set the page related values to NULL to ensure
+ * importers can't use it. The phys_addr based DMA API
+ * does not require the CPU list for mapping or unmapping.
+ */
+ sg_set_page(sgl, NULL, 0, 0);
+ sg_dma_address(sgl) = addr + (dma_addr_t)i * UINT_MAX;
+ sg_dma_len(sgl) = len;
+ sgl = sg_next(sgl);
+ }
+
+ return sgl;
+}
+
+static unsigned int calc_sg_nents(struct dma_iova_state *state,
+ struct dma_buf_phys_vec *phys_vec,
+ size_t nr_ranges, size_t size)
+{
+ unsigned int nents = 0;
+ size_t i;
+
+ if (!state || !dma_use_iova(state)) {
+ for (i = 0; i < nr_ranges; i++)
+ nents += DIV_ROUND_UP(phys_vec[i].len, UINT_MAX);
+ } else {
+ /*
+ * In IOVA case, there is only one SG entry which spans
+ * for whole IOVA address space, but we need to make sure
+ * that it fits sg->length, maybe we need more.
+ */
+ nents = DIV_ROUND_UP(size, UINT_MAX);
+ }
+
+ return nents;
+}
+
+/**
+ * struct dma_buf_dma - holds DMA mapping information
+ * @sgt: Scatter-gather table
+ * @state: DMA IOVA state relevant in IOMMU-based DMA
+ * @size: Total size of DMA transfer
+ */
+struct dma_buf_dma {
+ struct sg_table sgt;
+ struct dma_iova_state *state;
+ size_t size;
+};
+
+/**
+ * dma_buf_phys_vec_to_sgt - Returns the scatterlist table of the attachment
+ * from arrays of physical vectors. This funciton is intended for MMIO memory
+ * only.
+ * @attach: [in] attachment whose scatterlist is to be returned
+ * @provider: [in] p2pdma provider
+ * @phys_vec: [in] array of physical vectors
+ * @nr_ranges: [in] number of entries in phys_vec array
+ * @size: [in] total size of phys_vec
+ * @dir: [in] direction of DMA transfer
+ *
+ * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
+ * on error. May return -EINTR if it is interrupted by a signal.
+ *
+ * On success, the DMA addresses and lengths in the returned scatterlist are
+ * PAGE_SIZE aligned.
+ *
+ * A mapping must be unmapped by using dma_buf_free_sgt().
+ *
+ * NOTE: This function is intended for exporters. If direct traffic routing is
+ * mandatory exporter should call routing pci_p2pdma_map_type() before calling
+ * this function.
+ */
+struct sg_table *dma_buf_phys_vec_to_sgt(struct dma_buf_attachment *attach,
+ struct p2pdma_provider *provider,
+ struct dma_buf_phys_vec *phys_vec,
+ size_t nr_ranges, size_t size,
+ enum dma_data_direction dir)
+{
+ unsigned int nents, mapped_len = 0;
+ struct dma_buf_dma *dma;
+ struct scatterlist *sgl;
+ dma_addr_t addr;
+ size_t i;
+ int ret;
+
+ dma_resv_assert_held(attach->dmabuf->resv);
+
+ if (WARN_ON(!attach || !attach->dmabuf || !provider))
+ /* This function is supposed to work on MMIO memory only */
+ return ERR_PTR(-EINVAL);
+
+ dma = kzalloc(sizeof(*dma), GFP_KERNEL);
+ if (!dma)
+ return ERR_PTR(-ENOMEM);
+
+ switch (pci_p2pdma_map_type(provider, attach->dev)) {
+ case PCI_P2PDMA_MAP_BUS_ADDR:
+ /*
+ * There is no need in IOVA at all for this flow.
+ */
+ break;
+ case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
+ dma->state = kzalloc(sizeof(*dma->state), GFP_KERNEL);
+ if (!dma->state) {
+ ret = -ENOMEM;
+ goto err_free_dma;
+ }
+
+ dma_iova_try_alloc(attach->dev, dma->state, 0, size);
+ break;
+ default:
+ ret = -EINVAL;
+ goto err_free_dma;
+ }
+
+ nents = calc_sg_nents(dma->state, phys_vec, nr_ranges, size);
+ ret = sg_alloc_table(&dma->sgt, nents, GFP_KERNEL | __GFP_ZERO);
+ if (ret)
+ goto err_free_state;
+
+ sgl = dma->sgt.sgl;
+
+ for (i = 0; i < nr_ranges; i++) {
+ if (!dma->state) {
+ addr = pci_p2pdma_bus_addr_map(provider,
+ phys_vec[i].paddr);
+ } else if (dma_use_iova(dma->state)) {
+ ret = dma_iova_link(attach->dev, dma->state,
+ phys_vec[i].paddr, 0,
+ phys_vec[i].len, dir,
+ DMA_ATTR_MMIO);
+ if (ret)
+ goto err_unmap_dma;
+
+ mapped_len += phys_vec[i].len;
+ } else {
+ addr = dma_map_phys(attach->dev, phys_vec[i].paddr,
+ phys_vec[i].len, dir,
+ DMA_ATTR_MMIO);
+ ret = dma_mapping_error(attach->dev, addr);
+ if (ret)
+ goto err_unmap_dma;
+ }
+
+ if (!dma->state || !dma_use_iova(dma->state))
+ sgl = fill_sg_entry(sgl, phys_vec[i].len, addr);
+ }
+
+ if (dma->state && dma_use_iova(dma->state)) {
+ WARN_ON_ONCE(mapped_len != size);
+ ret = dma_iova_sync(attach->dev, dma->state, 0, mapped_len);
+ if (ret)
+ goto err_unmap_dma;
+
+ sgl = fill_sg_entry(sgl, mapped_len, dma->state->addr);
+ }
+
+ dma->size = size;
+
+ /*
+ * No CPU list included — set orig_nents = 0 so others can detect
+ * this via SG table (use nents only).
+ */
+ dma->sgt.orig_nents = 0;
+
+
+ /*
+ * SGL must be NULL to indicate that SGL is the last one
+ * and we allocated correct number of entries in sg_alloc_table()
+ */
+ WARN_ON_ONCE(sgl);
+ return &dma->sgt;
+
+err_unmap_dma:
+ if (!i || !dma->state) {
+ ; /* Do nothing */
+ } else if (dma_use_iova(dma->state)) {
+ dma_iova_destroy(attach->dev, dma->state, mapped_len, dir,
+ DMA_ATTR_MMIO);
+ } else {
+ for_each_sgtable_dma_sg(&dma->sgt, sgl, i)
+ dma_unmap_phys(attach->dev, sg_dma_address(sgl),
+ sg_dma_len(sgl), dir, DMA_ATTR_MMIO);
+ }
+ sg_free_table(&dma->sgt);
+err_free_state:
+ kfree(dma->state);
+err_free_dma:
+ kfree(dma);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_NS_GPL(dma_buf_phys_vec_to_sgt, "DMA_BUF");
+
+/**
+ * dma_buf_free_sgt- unmaps the buffer
+ * @attach: [in] attachment to unmap buffer from
+ * @sgt: [in] scatterlist info of the buffer to unmap
+ * @dir: [in] direction of DMA transfer
+ *
+ * This unmaps a DMA mapping for @attached obtained
+ * by dma_buf_phys_vec_to_sgt().
+ */
+void dma_buf_free_sgt(struct dma_buf_attachment *attach, struct sg_table *sgt,
+ enum dma_data_direction dir)
+{
+ struct dma_buf_dma *dma = container_of(sgt, struct dma_buf_dma, sgt);
+ int i;
+
+ dma_resv_assert_held(attach->dmabuf->resv);
+
+ if (!dma->state) {
+ ; /* Do nothing */
+ } else if (dma_use_iova(dma->state)) {
+ dma_iova_destroy(attach->dev, dma->state, dma->size, dir,
+ DMA_ATTR_MMIO);
+ } else {
+ struct scatterlist *sgl;
+
+ for_each_sgtable_dma_sg(sgt, sgl, i)
+ dma_unmap_phys(attach->dev, sg_dma_address(sgl),
+ sg_dma_len(sgl), dir, DMA_ATTR_MMIO);
+ }
+
+ sg_free_table(sgt);
+ kfree(dma->state);
+ kfree(dma);
+
+}
+EXPORT_SYMBOL_NS_GPL(dma_buf_free_sgt, "DMA_BUF");
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 890ecac04dac..edaa9e4ee4ae 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -768,18 +768,10 @@ EXPORT_SYMBOL_NS_GPL(dma_buf_export, "DMA_BUF");
*/
int dma_buf_fd(struct dma_buf *dmabuf, int flags)
{
- int fd;
-
if (!dmabuf || !dmabuf->file)
return -EINVAL;
- fd = get_unused_fd_flags(flags);
- if (fd < 0)
- return fd;
-
- fd_install(fd, dmabuf->file);
-
- return fd;
+ return FD_ADD(flags, dmabuf->file);
}
EXPORT_SYMBOL_NS_GPL(dma_buf_fd, "DMA_BUF");
@@ -1118,7 +1110,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
* Catch exporters making buffers inaccessible even when
* attachments preventing that exist.
*/
- WARN_ON_ONCE(ret == EBUSY);
+ WARN_ON_ONCE(ret == -EBUSY);
if (ret)
return ERR_PTR(ret);
}
diff --git a/drivers/dma-buf/dma-fence-chain.c b/drivers/dma-buf/dma-fence-chain.c
index 9663ba1bb6ac..a8a90acf4f34 100644
--- a/drivers/dma-buf/dma-fence-chain.c
+++ b/drivers/dma-buf/dma-fence-chain.c
@@ -218,7 +218,6 @@ static void dma_fence_chain_set_deadline(struct dma_fence *fence,
}
const struct dma_fence_ops dma_fence_chain_ops = {
- .use_64bit_seqno = true,
.get_driver_name = dma_fence_chain_get_driver_name,
.get_timeline_name = dma_fence_chain_get_timeline_name,
.enable_signaling = dma_fence_chain_enable_signaling,
@@ -252,7 +251,7 @@ void dma_fence_chain_init(struct dma_fence_chain *chain,
chain->prev_seqno = 0;
/* Try to reuse the context of the previous chain node. */
- if (prev_chain && __dma_fence_is_later(seqno, prev->seqno, prev->ops)) {
+ if (prev_chain && __dma_fence_is_later(prev, seqno, prev->seqno)) {
context = prev->context;
chain->prev_seqno = prev->seqno;
} else {
@@ -262,8 +261,8 @@ void dma_fence_chain_init(struct dma_fence_chain *chain,
seqno = max(prev->seqno, seqno);
}
- dma_fence_init(&chain->base, &dma_fence_chain_ops,
- &chain->lock, context, seqno);
+ dma_fence_init64(&chain->base, &dma_fence_chain_ops, &chain->lock,
+ context, seqno);
/*
* Chaining dma_fence_chain container together is only allowed through
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index f0cdd3e99d36..b4f5c8635276 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -121,29 +121,27 @@ static const struct dma_fence_ops dma_fence_stub_ops = {
.get_timeline_name = dma_fence_stub_get_name,
};
+static int __init dma_fence_init_stub(void)
+{
+ dma_fence_init(&dma_fence_stub, &dma_fence_stub_ops,
+ &dma_fence_stub_lock, 0, 0);
+
+ set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+ &dma_fence_stub.flags);
+
+ dma_fence_signal(&dma_fence_stub);
+ return 0;
+}
+subsys_initcall(dma_fence_init_stub);
+
/**
* dma_fence_get_stub - return a signaled fence
*
- * Return a stub fence which is already signaled. The fence's
- * timestamp corresponds to the first time after boot this
- * function is called.
+ * Return a stub fence which is already signaled. The fence's timestamp
+ * corresponds to the initialisation time of the linux kernel.
*/
struct dma_fence *dma_fence_get_stub(void)
{
- spin_lock(&dma_fence_stub_lock);
- if (!dma_fence_stub.ops) {
- dma_fence_init(&dma_fence_stub,
- &dma_fence_stub_ops,
- &dma_fence_stub_lock,
- 0, 0);
-
- set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
- &dma_fence_stub.flags);
-
- dma_fence_signal_locked(&dma_fence_stub);
- }
- spin_unlock(&dma_fence_stub_lock);
-
return dma_fence_get(&dma_fence_stub);
}
EXPORT_SYMBOL(dma_fence_get_stub);
@@ -511,12 +509,20 @@ dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout)
dma_fence_enable_sw_signaling(fence);
- trace_dma_fence_wait_start(fence);
+ if (trace_dma_fence_wait_start_enabled()) {
+ rcu_read_lock();
+ trace_dma_fence_wait_start(fence);
+ rcu_read_unlock();
+ }
if (fence->ops->wait)
ret = fence->ops->wait(fence, intr, timeout);
else
ret = dma_fence_default_wait(fence, intr, timeout);
- trace_dma_fence_wait_end(fence);
+ if (trace_dma_fence_wait_end_enabled()) {
+ rcu_read_lock();
+ trace_dma_fence_wait_end(fence);
+ rcu_read_unlock();
+ }
return ret;
}
EXPORT_SYMBOL(dma_fence_wait_timeout);
@@ -533,16 +539,23 @@ void dma_fence_release(struct kref *kref)
struct dma_fence *fence =
container_of(kref, struct dma_fence, refcount);
+ rcu_read_lock();
trace_dma_fence_destroy(fence);
- if (WARN(!list_empty(&fence->cb_list) &&
- !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags),
- "Fence %s:%s:%llx:%llx released with pending signals!\n",
- fence->ops->get_driver_name(fence),
- fence->ops->get_timeline_name(fence),
- fence->context, fence->seqno)) {
+ if (!list_empty(&fence->cb_list) &&
+ !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
+ const char __rcu *timeline;
+ const char __rcu *driver;
unsigned long flags;
+ driver = dma_fence_driver_name(fence);
+ timeline = dma_fence_timeline_name(fence);
+
+ WARN(1,
+ "Fence %s:%s:%llx:%llx released with pending signals!\n",
+ rcu_dereference(driver), rcu_dereference(timeline),
+ fence->context, fence->seqno);
+
/*
* Failed to signal before release, likely a refcounting issue.
*
@@ -556,6 +569,8 @@ void dma_fence_release(struct kref *kref)
spin_unlock_irqrestore(fence->lock, flags);
}
+ rcu_read_unlock();
+
if (fence->ops->release)
fence->ops->release(fence);
else
@@ -982,13 +997,45 @@ EXPORT_SYMBOL(dma_fence_set_deadline);
*/
void dma_fence_describe(struct dma_fence *fence, struct seq_file *seq)
{
- seq_printf(seq, "%s %s seq %llu %ssignalled\n",
- fence->ops->get_driver_name(fence),
- fence->ops->get_timeline_name(fence), fence->seqno,
- dma_fence_is_signaled(fence) ? "" : "un");
+ const char __rcu *timeline = "";
+ const char __rcu *driver = "";
+ const char *signaled = "";
+
+ rcu_read_lock();
+
+ if (!dma_fence_is_signaled(fence)) {
+ timeline = dma_fence_timeline_name(fence);
+ driver = dma_fence_driver_name(fence);
+ signaled = "un";
+ }
+
+ seq_printf(seq, "%llu:%llu %s %s %ssignalled\n",
+ fence->context, fence->seqno, timeline, driver,
+ signaled);
+
+ rcu_read_unlock();
}
EXPORT_SYMBOL(dma_fence_describe);
+static void
+__dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
+ spinlock_t *lock, u64 context, u64 seqno, unsigned long flags)
+{
+ BUG_ON(!lock);
+ BUG_ON(!ops || !ops->get_driver_name || !ops->get_timeline_name);
+
+ kref_init(&fence->refcount);
+ fence->ops = ops;
+ INIT_LIST_HEAD(&fence->cb_list);
+ fence->lock = lock;
+ fence->context = context;
+ fence->seqno = seqno;
+ fence->flags = flags;
+ fence->error = 0;
+
+ trace_dma_fence_init(fence);
+}
+
/**
* dma_fence_init - Initialize a custom fence.
* @fence: the fence to initialize
@@ -1008,18 +1055,94 @@ void
dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
spinlock_t *lock, u64 context, u64 seqno)
{
- BUG_ON(!lock);
- BUG_ON(!ops || !ops->get_driver_name || !ops->get_timeline_name);
+ __dma_fence_init(fence, ops, lock, context, seqno, 0UL);
+}
+EXPORT_SYMBOL(dma_fence_init);
- kref_init(&fence->refcount);
- fence->ops = ops;
- INIT_LIST_HEAD(&fence->cb_list);
- fence->lock = lock;
- fence->context = context;
- fence->seqno = seqno;
- fence->flags = 0UL;
- fence->error = 0;
+/**
+ * dma_fence_init64 - Initialize a custom fence with 64-bit seqno support.
+ * @fence: the fence to initialize
+ * @ops: the dma_fence_ops for operations on this fence
+ * @lock: the irqsafe spinlock to use for locking this fence
+ * @context: the execution context this fence is run on
+ * @seqno: a linear increasing sequence number for this context
+ *
+ * Initializes an allocated fence, the caller doesn't have to keep its
+ * refcount after committing with this fence, but it will need to hold a
+ * refcount again if &dma_fence_ops.enable_signaling gets called.
+ *
+ * Context and seqno are used for easy comparison between fences, allowing
+ * to check which fence is later by simply using dma_fence_later().
+ */
+void
+dma_fence_init64(struct dma_fence *fence, const struct dma_fence_ops *ops,
+ spinlock_t *lock, u64 context, u64 seqno)
+{
+ __dma_fence_init(fence, ops, lock, context, seqno,
+ BIT(DMA_FENCE_FLAG_SEQNO64_BIT));
+}
+EXPORT_SYMBOL(dma_fence_init64);
- trace_dma_fence_init(fence);
+/**
+ * dma_fence_driver_name - Access the driver name
+ * @fence: the fence to query
+ *
+ * Returns a driver name backing the dma-fence implementation.
+ *
+ * IMPORTANT CONSIDERATION:
+ * Dma-fence contract stipulates that access to driver provided data (data not
+ * directly embedded into the object itself), such as the &dma_fence.lock and
+ * memory potentially accessed by the &dma_fence.ops functions, is forbidden
+ * after the fence has been signalled. Drivers are allowed to free that data,
+ * and some do.
+ *
+ * To allow safe access drivers are mandated to guarantee a RCU grace period
+ * between signalling the fence and freeing said data.
+ *
+ * As such access to the driver name is only valid inside a RCU locked section.
+ * The pointer MUST be both queried and USED ONLY WITHIN a SINGLE block guarded
+ * by the &rcu_read_lock and &rcu_read_unlock pair.
+ */
+const char __rcu *dma_fence_driver_name(struct dma_fence *fence)
+{
+ RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
+ "RCU protection is required for safe access to returned string");
+
+ if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ return fence->ops->get_driver_name(fence);
+ else
+ return "detached-driver";
}
-EXPORT_SYMBOL(dma_fence_init);
+EXPORT_SYMBOL(dma_fence_driver_name);
+
+/**
+ * dma_fence_timeline_name - Access the timeline name
+ * @fence: the fence to query
+ *
+ * Returns a timeline name provided by the dma-fence implementation.
+ *
+ * IMPORTANT CONSIDERATION:
+ * Dma-fence contract stipulates that access to driver provided data (data not
+ * directly embedded into the object itself), such as the &dma_fence.lock and
+ * memory potentially accessed by the &dma_fence.ops functions, is forbidden
+ * after the fence has been signalled. Drivers are allowed to free that data,
+ * and some do.
+ *
+ * To allow safe access drivers are mandated to guarantee a RCU grace period
+ * between signalling the fence and freeing said data.
+ *
+ * As such access to the driver name is only valid inside a RCU locked section.
+ * The pointer MUST be both queried and USED ONLY WITHIN a SINGLE block guarded
+ * by the &rcu_read_lock and &rcu_read_unlock pair.
+ */
+const char __rcu *dma_fence_timeline_name(struct dma_fence *fence)
+{
+ RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
+ "RCU protection is required for safe access to returned string");
+
+ if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ return fence->ops->get_timeline_name(fence);
+ else
+ return "signaled-timeline";
+}
+EXPORT_SYMBOL(dma_fence_timeline_name);
diff --git a/drivers/dma-buf/dma-heap.c b/drivers/dma-buf/dma-heap.c
index 3cbe87d4a464..8ab49924f8b7 100644
--- a/drivers/dma-buf/dma-heap.c
+++ b/drivers/dma-buf/dma-heap.c
@@ -11,6 +11,7 @@
#include <linux/dma-buf.h>
#include <linux/dma-heap.h>
#include <linux/err.h>
+#include <linux/export.h>
#include <linux/list.h>
#include <linux/nospec.h>
#include <linux/syscalls.h>
@@ -202,6 +203,7 @@ void *dma_heap_get_drvdata(struct dma_heap *heap)
{
return heap->priv;
}
+EXPORT_SYMBOL_NS_GPL(dma_heap_get_drvdata, "DMA_BUF_HEAP");
/**
* dma_heap_get_name - get heap name
@@ -214,6 +216,7 @@ const char *dma_heap_get_name(struct dma_heap *heap)
{
return heap->name;
}
+EXPORT_SYMBOL_NS_GPL(dma_heap_get_name, "DMA_BUF_HEAP");
/**
* dma_heap_add - adds a heap to dmabuf heaps
@@ -303,6 +306,7 @@ err0:
kfree(heap);
return err_ret;
}
+EXPORT_SYMBOL_NS_GPL(dma_heap_add, "DMA_BUF_HEAP");
static char *dma_heap_devnode(const struct device *dev, umode_t *mode)
{
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index b1ef4546346d..bea3e9858aca 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -685,11 +685,13 @@ long dma_resv_wait_timeout(struct dma_resv *obj, enum dma_resv_usage usage,
dma_resv_iter_begin(&cursor, obj, usage);
dma_resv_for_each_fence_unlocked(&cursor, fence) {
- ret = dma_fence_wait_timeout(fence, intr, ret);
- if (ret <= 0) {
- dma_resv_iter_end(&cursor);
- return ret;
- }
+ ret = dma_fence_wait_timeout(fence, intr, timeout);
+ if (ret <= 0)
+ break;
+
+ /* Even for zero timeout the return value is 1 */
+ if (timeout)
+ timeout = ret;
}
dma_resv_iter_end(&cursor);
diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c
index 9512d050563a..42f88193eab9 100644
--- a/drivers/dma-buf/heaps/cma_heap.c
+++ b/drivers/dma-buf/heaps/cma_heap.c
@@ -9,8 +9,12 @@
* Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
* Andrew F. Davis <afd@ti.com>
*/
+
+#define pr_fmt(fmt) "cma_heap: " fmt
+
#include <linux/cma.h>
#include <linux/dma-buf.h>
+#include <linux/dma-buf/heaps/cma.h>
#include <linux/dma-heap.h>
#include <linux/dma-map-ops.h>
#include <linux/err.h>
@@ -18,10 +22,26 @@
#include <linux/io.h>
#include <linux/mm.h>
#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_reserved_mem.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
+#define DEFAULT_CMA_NAME "default_cma_region"
+
+static struct cma *dma_areas[MAX_CMA_AREAS] __initdata;
+static unsigned int dma_areas_num __initdata;
+
+int __init dma_heap_cma_register_heap(struct cma *cma)
+{
+ if (dma_areas_num >= ARRAY_SIZE(dma_areas))
+ return -EINVAL;
+
+ dma_areas[dma_areas_num++] = cma;
+
+ return 0;
+}
struct cma_heap {
struct dma_heap *heap;
@@ -366,17 +386,17 @@ static const struct dma_heap_ops cma_heap_ops = {
.allocate = cma_heap_allocate,
};
-static int __init __add_cma_heap(struct cma *cma, void *data)
+static int __init __add_cma_heap(struct cma *cma, const char *name)
{
- struct cma_heap *cma_heap;
struct dma_heap_export_info exp_info;
+ struct cma_heap *cma_heap;
cma_heap = kzalloc(sizeof(*cma_heap), GFP_KERNEL);
if (!cma_heap)
return -ENOMEM;
cma_heap->cma = cma;
- exp_info.name = cma_get_name(cma);
+ exp_info.name = name;
exp_info.ops = &cma_heap_ops;
exp_info.priv = cma_heap;
@@ -391,15 +411,30 @@ static int __init __add_cma_heap(struct cma *cma, void *data)
return 0;
}
-static int __init add_default_cma_heap(void)
+static int __init add_cma_heaps(void)
{
struct cma *default_cma = dev_get_cma_area(NULL);
- int ret = 0;
+ unsigned int i;
+ int ret;
+
+ if (default_cma) {
+ ret = __add_cma_heap(default_cma, DEFAULT_CMA_NAME);
+ if (ret)
+ return ret;
+ }
- if (default_cma)
- ret = __add_cma_heap(default_cma, NULL);
+ for (i = 0; i < dma_areas_num; i++) {
+ struct cma *cma = dma_areas[i];
- return ret;
+ ret = __add_cma_heap(cma, cma_get_name(cma));
+ if (ret) {
+ pr_warn("Failed to add CMA heap %s", cma_get_name(cma));
+ continue;
+ }
+
+ }
+
+ return 0;
}
-module_init(add_default_cma_heap);
+module_init(add_cma_heaps);
MODULE_DESCRIPTION("DMA-BUF CMA Heap");
diff --git a/drivers/dma-buf/heaps/system_heap.c b/drivers/dma-buf/heaps/system_heap.c
index 82b1b714300d..4c782fe33fd4 100644
--- a/drivers/dma-buf/heaps/system_heap.c
+++ b/drivers/dma-buf/heaps/system_heap.c
@@ -33,7 +33,7 @@ struct system_heap_buffer {
struct dma_heap_attachment {
struct device *dev;
- struct sg_table *table;
+ struct sg_table table;
struct list_head list;
bool mapped;
};
@@ -52,29 +52,22 @@ static gfp_t order_flags[] = {HIGH_ORDER_GFP, HIGH_ORDER_GFP, LOW_ORDER_GFP};
static const unsigned int orders[] = {8, 4, 0};
#define NUM_ORDERS ARRAY_SIZE(orders)
-static struct sg_table *dup_sg_table(struct sg_table *table)
+static int dup_sg_table(struct sg_table *from, struct sg_table *to)
{
- struct sg_table *new_table;
- int ret, i;
struct scatterlist *sg, *new_sg;
+ int ret, i;
- new_table = kzalloc(sizeof(*new_table), GFP_KERNEL);
- if (!new_table)
- return ERR_PTR(-ENOMEM);
-
- ret = sg_alloc_table(new_table, table->orig_nents, GFP_KERNEL);
- if (ret) {
- kfree(new_table);
- return ERR_PTR(-ENOMEM);
- }
+ ret = sg_alloc_table(to, from->orig_nents, GFP_KERNEL);
+ if (ret)
+ return ret;
- new_sg = new_table->sgl;
- for_each_sgtable_sg(table, sg, i) {
+ new_sg = to->sgl;
+ for_each_sgtable_sg(from, sg, i) {
sg_set_page(new_sg, sg_page(sg), sg->length, sg->offset);
new_sg = sg_next(new_sg);
}
- return new_table;
+ return 0;
}
static int system_heap_attach(struct dma_buf *dmabuf,
@@ -82,19 +75,18 @@ static int system_heap_attach(struct dma_buf *dmabuf,
{
struct system_heap_buffer *buffer = dmabuf->priv;
struct dma_heap_attachment *a;
- struct sg_table *table;
+ int ret;
a = kzalloc(sizeof(*a), GFP_KERNEL);
if (!a)
return -ENOMEM;
- table = dup_sg_table(&buffer->sg_table);
- if (IS_ERR(table)) {
+ ret = dup_sg_table(&buffer->sg_table, &a->table);
+ if (ret) {
kfree(a);
- return -ENOMEM;
+ return ret;
}
- a->table = table;
a->dev = attachment->dev;
INIT_LIST_HEAD(&a->list);
a->mapped = false;
@@ -118,8 +110,7 @@ static void system_heap_detach(struct dma_buf *dmabuf,
list_del(&a->list);
mutex_unlock(&buffer->lock);
- sg_free_table(a->table);
- kfree(a->table);
+ sg_free_table(&a->table);
kfree(a);
}
@@ -127,7 +118,7 @@ static struct sg_table *system_heap_map_dma_buf(struct dma_buf_attachment *attac
enum dma_data_direction direction)
{
struct dma_heap_attachment *a = attachment->priv;
- struct sg_table *table = a->table;
+ struct sg_table *table = &a->table;
int ret;
ret = dma_map_sgtable(attachment->dev, table, direction, 0);
@@ -162,7 +153,7 @@ static int system_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
list_for_each_entry(a, &buffer->attachments, list) {
if (!a->mapped)
continue;
- dma_sync_sgtable_for_cpu(a->dev, a->table, direction);
+ dma_sync_sgtable_for_cpu(a->dev, &a->table, direction);
}
mutex_unlock(&buffer->lock);
@@ -183,7 +174,7 @@ static int system_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
list_for_each_entry(a, &buffer->attachments, list) {
if (!a->mapped)
continue;
- dma_sync_sgtable_for_device(a->dev, a->table, direction);
+ dma_sync_sgtable_for_device(a->dev, &a->table, direction);
}
mutex_unlock(&buffer->lock);
@@ -195,20 +186,35 @@ static int system_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
struct system_heap_buffer *buffer = dmabuf->priv;
struct sg_table *table = &buffer->sg_table;
unsigned long addr = vma->vm_start;
- struct sg_page_iter piter;
- int ret;
+ unsigned long pgoff = vma->vm_pgoff;
+ struct scatterlist *sg;
+ int i, ret;
+
+ for_each_sgtable_sg(table, sg, i) {
+ unsigned long n = sg->length >> PAGE_SHIFT;
+
+ if (pgoff < n)
+ break;
+ pgoff -= n;
+ }
- for_each_sgtable_page(table, &piter, vma->vm_pgoff) {
- struct page *page = sg_page_iter_page(&piter);
+ for (; sg && addr < vma->vm_end; sg = sg_next(sg)) {
+ unsigned long n = (sg->length >> PAGE_SHIFT) - pgoff;
+ struct page *page = sg_page(sg) + pgoff;
+ unsigned long size = n << PAGE_SHIFT;
- ret = remap_pfn_range(vma, addr, page_to_pfn(page), PAGE_SIZE,
- vma->vm_page_prot);
+ if (addr + size > vma->vm_end)
+ size = vma->vm_end - addr;
+
+ ret = remap_pfn_range(vma, addr, page_to_pfn(page),
+ size, vma->vm_page_prot);
if (ret)
return ret;
- addr += PAGE_SIZE;
- if (addr >= vma->vm_end)
- return 0;
+
+ addr += size;
+ pgoff = 0;
}
+
return 0;
}
diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c
index 4f27ee93a00c..6f09d13be6b6 100644
--- a/drivers/dma-buf/sw_sync.c
+++ b/drivers/dma-buf/sw_sync.c
@@ -8,6 +8,7 @@
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/uaccess.h>
+#include <linux/panic.h>
#include <linux/slab.h>
#include <linux/sync_file.h>
@@ -170,7 +171,7 @@ static bool timeline_fence_signaled(struct dma_fence *fence)
{
struct sync_timeline *parent = dma_fence_parent(fence);
- return !__dma_fence_is_later(fence->seqno, parent->value, fence->ops);
+ return !__dma_fence_is_later(fence, fence->seqno, parent->value);
}
static void timeline_fence_set_deadline(struct dma_fence *fence, ktime_t deadline)
@@ -349,6 +350,9 @@ static long sw_sync_ioctl_create_fence(struct sync_timeline *obj,
struct sync_file *sync_file;
struct sw_sync_create_fence_data data;
+ /* SW sync fence are inherently unsafe and can deadlock the kernel */
+ add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
+
if (fd < 0)
return fd;
diff --git a/drivers/dma-buf/sync_debug.c b/drivers/dma-buf/sync_debug.c
index 67cd69551e42..9e5d662cd4e8 100644
--- a/drivers/dma-buf/sync_debug.c
+++ b/drivers/dma-buf/sync_debug.c
@@ -59,7 +59,7 @@ static void sync_print_fence(struct seq_file *s,
struct timespec64 ts64 =
ktime_to_timespec64(fence->timestamp);
- seq_printf(s, "@%lld.%09ld", (s64)ts64.tv_sec, ts64.tv_nsec);
+ seq_printf(s, "@%ptSp", &ts64);
}
seq_printf(s, ": %lld", fence->seqno);
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index d9b1c1b2a72b..747e377fb954 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -135,12 +135,18 @@ char *sync_file_get_name(struct sync_file *sync_file, char *buf, int len)
strscpy(buf, sync_file->user_name, len);
} else {
struct dma_fence *fence = sync_file->fence;
+ const char __rcu *timeline;
+ const char __rcu *driver;
+ rcu_read_lock();
+ driver = dma_fence_driver_name(fence);
+ timeline = dma_fence_timeline_name(fence);
snprintf(buf, len, "%s-%s%llu-%lld",
- fence->ops->get_driver_name(fence),
- fence->ops->get_timeline_name(fence),
+ rcu_dereference(driver),
+ rcu_dereference(timeline),
fence->context,
fence->seqno);
+ rcu_read_unlock();
}
return buf;
@@ -262,9 +268,17 @@ err_put_fd:
static int sync_fill_fence_info(struct dma_fence *fence,
struct sync_fence_info *info)
{
- strscpy(info->obj_name, fence->ops->get_timeline_name(fence),
+ const char __rcu *timeline;
+ const char __rcu *driver;
+
+ rcu_read_lock();
+
+ driver = dma_fence_driver_name(fence);
+ timeline = dma_fence_timeline_name(fence);
+
+ strscpy(info->obj_name, rcu_dereference(timeline),
sizeof(info->obj_name));
- strscpy(info->driver_name, fence->ops->get_driver_name(fence),
+ strscpy(info->driver_name, rcu_dereference(driver),
sizeof(info->driver_name));
info->status = dma_fence_get_status(fence);
@@ -273,6 +287,8 @@ static int sync_fill_fence_info(struct dma_fence *fence,
ktime_to_ns(dma_fence_timestamp(fence)) :
ktime_set(0, 0);
+ rcu_read_unlock();
+
return info->status;
}
diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
index 7eee3eb47a8e..40399c26e6be 100644
--- a/drivers/dma-buf/udmabuf.c
+++ b/drivers/dma-buf/udmabuf.c
@@ -109,29 +109,22 @@ static int mmap_udmabuf(struct dma_buf *buf, struct vm_area_struct *vma)
static int vmap_udmabuf(struct dma_buf *buf, struct iosys_map *map)
{
struct udmabuf *ubuf = buf->priv;
- unsigned long *pfns;
+ struct page **pages;
void *vaddr;
pgoff_t pg;
dma_resv_assert_held(buf->resv);
- /**
- * HVO may free tail pages, so just use pfn to map each folio
- * into vmalloc area.
- */
- pfns = kvmalloc_array(ubuf->pagecount, sizeof(*pfns), GFP_KERNEL);
- if (!pfns)
+ pages = kvmalloc_array(ubuf->pagecount, sizeof(*pages), GFP_KERNEL);
+ if (!pages)
return -ENOMEM;
- for (pg = 0; pg < ubuf->pagecount; pg++) {
- unsigned long pfn = folio_pfn(ubuf->folios[pg]);
-
- pfn += ubuf->offsets[pg] >> PAGE_SHIFT;
- pfns[pg] = pfn;
- }
+ for (pg = 0; pg < ubuf->pagecount; pg++)
+ pages[pg] = folio_page(ubuf->folios[pg],
+ ubuf->offsets[pg] >> PAGE_SHIFT);
- vaddr = vmap_pfn(pfns, ubuf->pagecount, PAGE_KERNEL);
- kvfree(pfns);
+ vaddr = vm_map_ram(pages, ubuf->pagecount, -1);
+ kvfree(pages);
if (!vaddr)
return -EINVAL;
@@ -264,8 +257,7 @@ static int begin_cpu_udmabuf(struct dma_buf *buf,
ubuf->sg = NULL;
}
} else {
- dma_sync_sg_for_cpu(dev, ubuf->sg->sgl, ubuf->sg->nents,
- direction);
+ dma_sync_sgtable_for_cpu(dev, ubuf->sg, direction);
}
return ret;
@@ -280,7 +272,7 @@ static int end_cpu_udmabuf(struct dma_buf *buf,
if (!ubuf->sg)
return -EINVAL;
- dma_sync_sg_for_device(dev, ubuf->sg->sgl, ubuf->sg->nents, direction);
+ dma_sync_sgtable_for_device(dev, ubuf->sg, direction);
return 0;
}
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index db87dd2a07f7..8bb0a119ecd4 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -89,7 +89,6 @@ config APPLE_ADMAC
tristate "Apple ADMAC support"
depends on ARCH_APPLE || COMPILE_TEST
select DMA_ENGINE
- default ARCH_APPLE
help
Enable support for Audio DMA Controller found on Apple Silicon SoCs.
@@ -103,7 +102,7 @@ config ARM_DMA350
config AT_HDMAC
tristate "Atmel AHB DMA support"
- depends on ARCH_AT91
+ depends on ARCH_AT91 || COMPILE_TEST
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
@@ -111,7 +110,7 @@ config AT_HDMAC
config AT_XDMAC
tristate "Atmel XDMA support"
- depends on ARCH_AT91
+ depends on ARCH_MICROCHIP
select DMA_ENGINE
help
Support the Atmel XDMA controller.
@@ -144,7 +143,7 @@ config BCM_SBA_RAID
config DMA_BCM2835
tristate "BCM2835 DMA engine support"
- depends on ARCH_BCM2835
+ depends on ARCH_BCM2835 || COMPILE_TEST
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
@@ -451,7 +450,7 @@ config MILBEAUT_XDMAC
config MMP_PDMA
tristate "MMP PDMA support"
- depends on ARCH_MMP || ARCH_PXA || COMPILE_TEST
+ depends on ARCH_MMP || ARCH_PXA || ARCH_SPACEMIT || COMPILE_TEST
select DMA_ENGINE
help
Support the MMP PDMA engine for PXA and MMP platform.
@@ -572,6 +571,15 @@ config PLX_DMA
These are exposed via extra functions on the switch's
upstream port. Each function exposes one DMA channel.
+config SOPHGO_CV1800B_DMAMUX
+ tristate "Sophgo CV1800/SG2000 series SoC DMA multiplexer support"
+ depends on MFD_SYSCON
+ depends on ARCH_SOPHGO || COMPILE_TEST
+ help
+ Support for the DMA multiplexer on Sophgo CV1800/SG2000
+ series SoCs.
+ Say Y here if your board have this soc.
+
config STE_DMA40
bool "ST-Ericsson DMA40 support"
depends on ARCH_U8500
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index ba9732644752..a54d7688392b 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -71,6 +71,7 @@ obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/
obj-$(CONFIG_PXA_DMA) += pxa_dma.o
obj-$(CONFIG_RENESAS_DMA) += sh/
obj-$(CONFIG_SF_PDMA) += sf-pdma/
+obj-$(CONFIG_SOPHGO_CV1800B_DMAMUX) += cv1800b-dmamux.o
obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
obj-$(CONFIG_SPRD_DMA) += sprd-dma.o
obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 2d147712cbc6..7d226453961f 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -887,7 +887,7 @@ atc_prep_dma_interleaved(struct dma_chan *chan,
first = xt->sgl;
dev_info(chan2dev(chan),
- "%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n",
+ "%s: src=%pad, dest=%pad, numf=%zu, frame_size=%zu, flags=0x%lx\n",
__func__, &xt->src_start, &xt->dst_start, xt->numf,
xt->frame_size, flags);
@@ -1174,7 +1174,7 @@ atc_prep_dma_memset_sg(struct dma_chan *chan,
int i;
int ret;
- dev_vdbg(chan2dev(chan), "%s: v0x%x l0x%zx f0x%lx\n", __func__,
+ dev_vdbg(chan2dev(chan), "%s: v0x%x l0x%x f0x%lx\n", __func__,
value, sg_len, flags);
if (unlikely(!sgl || !sg_len)) {
@@ -1503,7 +1503,7 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
unsigned int periods = buf_len / period_len;
unsigned int i;
- dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@%pad - %d (%d/%d)\n",
+ dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@%pad - %d (%zu/%zu)\n",
direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
&buf_addr,
periods, buf_len, period_len);
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index 0117bb2e8591..321748e2983e 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -1060,7 +1060,6 @@ static struct platform_driver bcm2835_dma_driver = {
module_platform_driver(bcm2835_dma_driver);
-MODULE_ALIAS("platform:bcm2835-dma");
MODULE_DESCRIPTION("BCM2835 DMA engine driver");
MODULE_AUTHOR("Florian Meier <florian.meier@koalo.de>");
MODULE_LICENSE("GPL");
diff --git a/drivers/dma/cv1800b-dmamux.c b/drivers/dma/cv1800b-dmamux.c
new file mode 100644
index 000000000000..e900d6595617
--- /dev/null
+++ b/drivers/dma/cv1800b-dmamux.c
@@ -0,0 +1,259 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2025 Inochi Amaoto <inochiama@gmail.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/cleanup.h>
+#include <linux/module.h>
+#include <linux/of_dma.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/llist.h>
+#include <linux/regmap.h>
+#include <linux/spinlock.h>
+#include <linux/mfd/syscon.h>
+
+#define REG_DMA_CHANNEL_REMAP0 0x154
+#define REG_DMA_CHANNEL_REMAP1 0x158
+#define REG_DMA_INT_MUX 0x298
+
+#define DMAMUX_NCELLS 2
+#define MAX_DMA_MAPPING_ID 42
+#define MAX_DMA_CPU_ID 2
+#define MAX_DMA_CH_ID 7
+
+#define DMAMUX_INTMUX_REGISTER_LEN 4
+#define DMAMUX_NR_CH_PER_REGISTER 4
+#define DMAMUX_BIT_PER_CH 8
+#define DMAMUX_CH_MASk GENMASK(5, 0)
+#define DMAMUX_INT_BIT_PER_CPU 10
+#define DMAMUX_CH_UPDATE_BIT BIT(31)
+
+#define DMAMUX_CH_REGPOS(chid) \
+ ((chid) / DMAMUX_NR_CH_PER_REGISTER)
+#define DMAMUX_CH_REGOFF(chid) \
+ ((chid) % DMAMUX_NR_CH_PER_REGISTER)
+#define DMAMUX_CH_REG(chid) \
+ ((DMAMUX_CH_REGPOS(chid) * sizeof(u32)) + \
+ REG_DMA_CHANNEL_REMAP0)
+#define DMAMUX_CH_SET(chid, val) \
+ (((val) << (DMAMUX_CH_REGOFF(chid) * DMAMUX_BIT_PER_CH)) | \
+ DMAMUX_CH_UPDATE_BIT)
+#define DMAMUX_CH_MASK(chid) \
+ DMAMUX_CH_SET(chid, DMAMUX_CH_MASk)
+
+#define DMAMUX_INT_BIT(chid, cpuid) \
+ BIT((cpuid) * DMAMUX_INT_BIT_PER_CPU + (chid))
+#define DMAMUX_INTEN_BIT(cpuid) \
+ DMAMUX_INT_BIT(8, cpuid)
+#define DMAMUX_INT_CH_BIT(chid, cpuid) \
+ (DMAMUX_INT_BIT(chid, cpuid) | DMAMUX_INTEN_BIT(cpuid))
+#define DMAMUX_INT_MASK(chid) \
+ (DMAMUX_INT_BIT(chid, 0) | \
+ DMAMUX_INT_BIT(chid, 1) | \
+ DMAMUX_INT_BIT(chid, 2))
+#define DMAMUX_INT_CH_MASK(chid, cpuid) \
+ (DMAMUX_INT_MASK(chid) | DMAMUX_INTEN_BIT(cpuid))
+
+struct cv1800_dmamux_data {
+ struct dma_router dmarouter;
+ struct regmap *regmap;
+ spinlock_t lock;
+ struct llist_head free_maps;
+ struct llist_head reserve_maps;
+ DECLARE_BITMAP(mapped_peripherals, MAX_DMA_MAPPING_ID);
+};
+
+struct cv1800_dmamux_map {
+ struct llist_node node;
+ unsigned int channel;
+ unsigned int peripheral;
+ unsigned int cpu;
+};
+
+static void cv1800_dmamux_free(struct device *dev, void *route_data)
+{
+ struct cv1800_dmamux_data *dmamux = dev_get_drvdata(dev);
+ struct cv1800_dmamux_map *map = route_data;
+
+ guard(spinlock_irqsave)(&dmamux->lock);
+
+ regmap_update_bits(dmamux->regmap,
+ DMAMUX_CH_REG(map->channel),
+ DMAMUX_CH_MASK(map->channel),
+ DMAMUX_CH_UPDATE_BIT);
+
+ regmap_update_bits(dmamux->regmap, REG_DMA_INT_MUX,
+ DMAMUX_INT_CH_MASK(map->channel, map->cpu),
+ DMAMUX_INTEN_BIT(map->cpu));
+
+ dev_dbg(dev, "free channel %u for req %u (cpu %u)\n",
+ map->channel, map->peripheral, map->cpu);
+}
+
+static void *cv1800_dmamux_route_allocate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct platform_device *pdev = of_find_device_by_node(ofdma->of_node);
+ struct cv1800_dmamux_data *dmamux = platform_get_drvdata(pdev);
+ struct cv1800_dmamux_map *map;
+ struct llist_node *node;
+ unsigned long flags;
+ unsigned int chid, devid, cpuid;
+ int ret;
+
+ if (dma_spec->args_count != DMAMUX_NCELLS) {
+ dev_err(&pdev->dev, "invalid number of dma mux args\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ devid = dma_spec->args[0];
+ cpuid = dma_spec->args[1];
+ dma_spec->args_count = 1;
+
+ if (devid > MAX_DMA_MAPPING_ID) {
+ dev_err(&pdev->dev, "invalid device id: %u\n", devid);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (cpuid > MAX_DMA_CPU_ID) {
+ dev_err(&pdev->dev, "invalid cpu id: %u\n", cpuid);
+ return ERR_PTR(-EINVAL);
+ }
+
+ dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0);
+ if (!dma_spec->np) {
+ dev_err(&pdev->dev, "can't get dma master\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ spin_lock_irqsave(&dmamux->lock, flags);
+
+ if (test_bit(devid, dmamux->mapped_peripherals)) {
+ llist_for_each_entry(map, dmamux->reserve_maps.first, node) {
+ if (map->peripheral == devid && map->cpu == cpuid)
+ goto found;
+ }
+
+ ret = -EINVAL;
+ goto failed;
+ } else {
+ node = llist_del_first(&dmamux->free_maps);
+ if (!node) {
+ ret = -ENODEV;
+ goto failed;
+ }
+
+ map = llist_entry(node, struct cv1800_dmamux_map, node);
+ llist_add(&map->node, &dmamux->reserve_maps);
+ set_bit(devid, dmamux->mapped_peripherals);
+ }
+
+found:
+ chid = map->channel;
+ map->peripheral = devid;
+ map->cpu = cpuid;
+
+ regmap_set_bits(dmamux->regmap,
+ DMAMUX_CH_REG(chid),
+ DMAMUX_CH_SET(chid, devid));
+
+ regmap_update_bits(dmamux->regmap, REG_DMA_INT_MUX,
+ DMAMUX_INT_CH_MASK(chid, cpuid),
+ DMAMUX_INT_CH_BIT(chid, cpuid));
+
+ spin_unlock_irqrestore(&dmamux->lock, flags);
+
+ dma_spec->args[0] = chid;
+
+ dev_dbg(&pdev->dev, "register channel %u for req %u (cpu %u)\n",
+ chid, devid, cpuid);
+
+ return map;
+
+failed:
+ spin_unlock_irqrestore(&dmamux->lock, flags);
+ of_node_put(dma_spec->np);
+ dev_err(&pdev->dev, "errno %d\n", ret);
+ return ERR_PTR(ret);
+}
+
+static int cv1800_dmamux_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *mux_node = dev->of_node;
+ struct cv1800_dmamux_data *data;
+ struct cv1800_dmamux_map *tmp;
+ struct device *parent = dev->parent;
+ struct regmap *regmap = NULL;
+ unsigned int i;
+
+ if (!parent)
+ return -ENODEV;
+
+ regmap = device_node_to_regmap(parent->of_node);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ spin_lock_init(&data->lock);
+ init_llist_head(&data->free_maps);
+ init_llist_head(&data->reserve_maps);
+
+ for (i = 0; i <= MAX_DMA_CH_ID; i++) {
+ tmp = devm_kmalloc(dev, sizeof(*tmp), GFP_KERNEL);
+ if (!tmp) {
+ /* It is OK for not allocating all channel */
+ dev_warn(dev, "can not allocate channel %u\n", i);
+ continue;
+ }
+
+ init_llist_node(&tmp->node);
+ tmp->channel = i;
+ llist_add(&tmp->node, &data->free_maps);
+ }
+
+ /* if no channel is allocated, the probe must fail */
+ if (llist_empty(&data->free_maps))
+ return -ENOMEM;
+
+ data->regmap = regmap;
+ data->dmarouter.dev = dev;
+ data->dmarouter.route_free = cv1800_dmamux_free;
+
+ platform_set_drvdata(pdev, data);
+
+ return of_dma_router_register(mux_node,
+ cv1800_dmamux_route_allocate,
+ &data->dmarouter);
+}
+
+static void cv1800_dmamux_remove(struct platform_device *pdev)
+{
+ of_dma_controller_free(pdev->dev.of_node);
+}
+
+static const struct of_device_id cv1800_dmamux_ids[] = {
+ { .compatible = "sophgo,cv1800b-dmamux", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, cv1800_dmamux_ids);
+
+static struct platform_driver cv1800_dmamux_driver = {
+ .probe = cv1800_dmamux_probe,
+ .remove = cv1800_dmamux_remove,
+ .driver = {
+ .name = "cv1800-dmamux",
+ .of_match_table = cv1800_dmamux_ids,
+ },
+};
+module_platform_driver(cv1800_dmamux_driver);
+
+MODULE_AUTHOR("Inochi Amaoto <inochiama@gmail.com>");
+MODULE_DESCRIPTION("Sophgo CV1800/SG2000 Series SoC DMAMUX driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c
index 36943b0c6d60..5b06b0dc67ee 100644
--- a/drivers/dma/dma-axi-dmac.c
+++ b/drivers/dma/dma-axi-dmac.c
@@ -6,6 +6,7 @@
* Author: Lars-Peter Clausen <lars@metafoo.de>
*/
+#include <linux/adi-axi-common.h>
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/device.h>
@@ -22,7 +23,6 @@
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
-#include <linux/fpga/adi-axi-common.h>
#include <dt-bindings/dma/axi-dmac.h>
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 758fcd0546d8..ca13cd39330b 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -926,6 +926,36 @@ void dma_release_channel(struct dma_chan *chan)
}
EXPORT_SYMBOL_GPL(dma_release_channel);
+static void dmaenginem_release_channel(void *chan)
+{
+ dma_release_channel(chan);
+}
+
+/**
+ * devm_dma_request_chan - try to allocate an exclusive slave channel
+ * @dev: pointer to client device structure
+ * @name: slave channel name
+ *
+ * Returns pointer to appropriate DMA channel on success or an error pointer.
+ *
+ * The operation is managed and will be undone on driver detach.
+ */
+
+struct dma_chan *devm_dma_request_chan(struct device *dev, const char *name)
+{
+ struct dma_chan *chan = dma_request_chan(dev, name);
+ int ret = 0;
+
+ if (!IS_ERR(chan))
+ ret = devm_add_action_or_reset(dev, dmaenginem_release_channel, chan);
+
+ if (ret)
+ return ERR_PTR(ret);
+
+ return chan;
+}
+EXPORT_SYMBOL_GPL(devm_dma_request_chan);
+
/**
* dmaengine_get - register interest in dma_channels
*/
diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c
index c2b88cc99e5d..8e5f7defa6b6 100644
--- a/drivers/dma/dw-edma/dw-edma-core.c
+++ b/drivers/dma/dw-edma/dw-edma-core.c
@@ -24,18 +24,6 @@
#include "../virt-dma.h"
static inline
-struct device *dchan2dev(struct dma_chan *dchan)
-{
- return &dchan->dev->device;
-}
-
-static inline
-struct device *chan2dev(struct dw_edma_chan *chan)
-{
- return &chan->vc.chan.dev->device;
-}
-
-static inline
struct dw_edma_desc *vd2dw_edma_desc(struct virt_dma_desc *vd)
{
return container_of(vd, struct dw_edma_desc, vd);
@@ -596,6 +584,25 @@ dw_edma_device_prep_interleaved_dma(struct dma_chan *dchan,
return dw_edma_device_transfer(&xfer);
}
+static void dw_hdma_set_callback_result(struct virt_dma_desc *vd,
+ enum dmaengine_tx_result result)
+{
+ u32 residue = 0;
+ struct dw_edma_desc *desc;
+ struct dmaengine_result *res;
+
+ if (!vd->tx.callback_result)
+ return;
+
+ desc = vd2dw_edma_desc(vd);
+ if (desc)
+ residue = desc->alloc_sz - desc->xfer_sz;
+
+ res = &vd->tx_result;
+ res->result = result;
+ res->residue = residue;
+}
+
static void dw_edma_done_interrupt(struct dw_edma_chan *chan)
{
struct dw_edma_desc *desc;
@@ -609,6 +616,8 @@ static void dw_edma_done_interrupt(struct dw_edma_chan *chan)
case EDMA_REQ_NONE:
desc = vd2dw_edma_desc(vd);
if (!desc->chunks_alloc) {
+ dw_hdma_set_callback_result(vd,
+ DMA_TRANS_NOERROR);
list_del(&vd->node);
vchan_cookie_complete(vd);
}
@@ -645,6 +654,7 @@ static void dw_edma_abort_interrupt(struct dw_edma_chan *chan)
spin_lock_irqsave(&chan->vc.lock, flags);
vd = vchan_next_desc(&chan->vc);
if (vd) {
+ dw_hdma_set_callback_result(vd, DMA_TRANS_ABORTED);
list_del(&vd->node);
vchan_cookie_complete(vd);
}
diff --git a/drivers/dma/dw-edma/dw-edma-pcie.c b/drivers/dma/dw-edma/dw-edma-pcie.c
index 49f09998e5c0..3371e0a76d3c 100644
--- a/drivers/dma/dw-edma/dw-edma-pcie.c
+++ b/drivers/dma/dw-edma/dw-edma-pcie.c
@@ -161,12 +161,16 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
const struct pci_device_id *pid)
{
struct dw_edma_pcie_data *pdata = (void *)pid->driver_data;
- struct dw_edma_pcie_data vsec_data;
+ struct dw_edma_pcie_data *vsec_data __free(kfree) = NULL;
struct device *dev = &pdev->dev;
struct dw_edma_chip *chip;
int err, nr_irqs;
int i, mask;
+ vsec_data = kmalloc(sizeof(*vsec_data), GFP_KERNEL);
+ if (!vsec_data)
+ return -ENOMEM;
+
/* Enable PCI device */
err = pcim_enable_device(pdev);
if (err) {
@@ -174,23 +178,23 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
return err;
}
- memcpy(&vsec_data, pdata, sizeof(struct dw_edma_pcie_data));
+ memcpy(vsec_data, pdata, sizeof(struct dw_edma_pcie_data));
/*
* Tries to find if exists a PCIe Vendor-Specific Extended Capability
* for the DMA, if one exists, then reconfigures it.
*/
- dw_edma_pcie_get_vsec_dma_data(pdev, &vsec_data);
+ dw_edma_pcie_get_vsec_dma_data(pdev, vsec_data);
/* Mapping PCI BAR regions */
- mask = BIT(vsec_data.rg.bar);
- for (i = 0; i < vsec_data.wr_ch_cnt; i++) {
- mask |= BIT(vsec_data.ll_wr[i].bar);
- mask |= BIT(vsec_data.dt_wr[i].bar);
+ mask = BIT(vsec_data->rg.bar);
+ for (i = 0; i < vsec_data->wr_ch_cnt; i++) {
+ mask |= BIT(vsec_data->ll_wr[i].bar);
+ mask |= BIT(vsec_data->dt_wr[i].bar);
}
- for (i = 0; i < vsec_data.rd_ch_cnt; i++) {
- mask |= BIT(vsec_data.ll_rd[i].bar);
- mask |= BIT(vsec_data.dt_rd[i].bar);
+ for (i = 0; i < vsec_data->rd_ch_cnt; i++) {
+ mask |= BIT(vsec_data->ll_rd[i].bar);
+ mask |= BIT(vsec_data->dt_rd[i].bar);
}
err = pcim_iomap_regions(pdev, mask, pci_name(pdev));
if (err) {
@@ -213,7 +217,7 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
return -ENOMEM;
/* IRQs allocation */
- nr_irqs = pci_alloc_irq_vectors(pdev, 1, vsec_data.irqs,
+ nr_irqs = pci_alloc_irq_vectors(pdev, 1, vsec_data->irqs,
PCI_IRQ_MSI | PCI_IRQ_MSIX);
if (nr_irqs < 1) {
pci_err(pdev, "fail to alloc IRQ vector (number of IRQs=%u)\n",
@@ -224,22 +228,22 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
/* Data structure initialization */
chip->dev = dev;
- chip->mf = vsec_data.mf;
+ chip->mf = vsec_data->mf;
chip->nr_irqs = nr_irqs;
chip->ops = &dw_edma_pcie_plat_ops;
- chip->ll_wr_cnt = vsec_data.wr_ch_cnt;
- chip->ll_rd_cnt = vsec_data.rd_ch_cnt;
+ chip->ll_wr_cnt = vsec_data->wr_ch_cnt;
+ chip->ll_rd_cnt = vsec_data->rd_ch_cnt;
- chip->reg_base = pcim_iomap_table(pdev)[vsec_data.rg.bar];
+ chip->reg_base = pcim_iomap_table(pdev)[vsec_data->rg.bar];
if (!chip->reg_base)
return -ENOMEM;
for (i = 0; i < chip->ll_wr_cnt; i++) {
struct dw_edma_region *ll_region = &chip->ll_region_wr[i];
struct dw_edma_region *dt_region = &chip->dt_region_wr[i];
- struct dw_edma_block *ll_block = &vsec_data.ll_wr[i];
- struct dw_edma_block *dt_block = &vsec_data.dt_wr[i];
+ struct dw_edma_block *ll_block = &vsec_data->ll_wr[i];
+ struct dw_edma_block *dt_block = &vsec_data->dt_wr[i];
ll_region->vaddr.io = pcim_iomap_table(pdev)[ll_block->bar];
if (!ll_region->vaddr.io)
@@ -263,8 +267,8 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
for (i = 0; i < chip->ll_rd_cnt; i++) {
struct dw_edma_region *ll_region = &chip->ll_region_rd[i];
struct dw_edma_region *dt_region = &chip->dt_region_rd[i];
- struct dw_edma_block *ll_block = &vsec_data.ll_rd[i];
- struct dw_edma_block *dt_block = &vsec_data.dt_rd[i];
+ struct dw_edma_block *ll_block = &vsec_data->ll_rd[i];
+ struct dw_edma_block *dt_block = &vsec_data->dt_rd[i];
ll_region->vaddr.io = pcim_iomap_table(pdev)[ll_block->bar];
if (!ll_region->vaddr.io)
@@ -298,31 +302,31 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
pci_dbg(pdev, "Version:\tUnknown (0x%x)\n", chip->mf);
pci_dbg(pdev, "Registers:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p)\n",
- vsec_data.rg.bar, vsec_data.rg.off, vsec_data.rg.sz,
+ vsec_data->rg.bar, vsec_data->rg.off, vsec_data->rg.sz,
chip->reg_base);
for (i = 0; i < chip->ll_wr_cnt; i++) {
pci_dbg(pdev, "L. List:\tWRITE CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
- i, vsec_data.ll_wr[i].bar,
- vsec_data.ll_wr[i].off, chip->ll_region_wr[i].sz,
+ i, vsec_data->ll_wr[i].bar,
+ vsec_data->ll_wr[i].off, chip->ll_region_wr[i].sz,
chip->ll_region_wr[i].vaddr.io, &chip->ll_region_wr[i].paddr);
pci_dbg(pdev, "Data:\tWRITE CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
- i, vsec_data.dt_wr[i].bar,
- vsec_data.dt_wr[i].off, chip->dt_region_wr[i].sz,
+ i, vsec_data->dt_wr[i].bar,
+ vsec_data->dt_wr[i].off, chip->dt_region_wr[i].sz,
chip->dt_region_wr[i].vaddr.io, &chip->dt_region_wr[i].paddr);
}
for (i = 0; i < chip->ll_rd_cnt; i++) {
pci_dbg(pdev, "L. List:\tREAD CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
- i, vsec_data.ll_rd[i].bar,
- vsec_data.ll_rd[i].off, chip->ll_region_rd[i].sz,
+ i, vsec_data->ll_rd[i].bar,
+ vsec_data->ll_rd[i].off, chip->ll_region_rd[i].sz,
chip->ll_region_rd[i].vaddr.io, &chip->ll_region_rd[i].paddr);
pci_dbg(pdev, "Data:\tREAD CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
- i, vsec_data.dt_rd[i].bar,
- vsec_data.dt_rd[i].off, chip->dt_region_rd[i].sz,
+ i, vsec_data->dt_rd[i].bar,
+ vsec_data->dt_rd[i].off, chip->dt_region_rd[i].sz,
chip->dt_region_rd[i].vaddr.io, &chip->dt_region_rd[i].paddr);
}
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c
index cee56cd31a61..c63fa52036d7 100644
--- a/drivers/dma/dw/platform.c
+++ b/drivers/dma/dw/platform.c
@@ -21,8 +21,6 @@
#include "internal.h"
-#define DRV_NAME "dw_dmac"
-
static int dw_probe(struct platform_device *pdev)
{
const struct dw_dma_chip_pdata *match;
@@ -190,7 +188,7 @@ static struct platform_driver dw_driver = {
.remove = dw_remove,
.shutdown = dw_shutdown,
.driver = {
- .name = DRV_NAME,
+ .name = "dw_dmac",
.pm = pm_sleep_ptr(&dw_dev_pm_ops),
.of_match_table = of_match_ptr(dw_dma_of_id_table),
.acpi_match_table = ACPI_PTR(dw_dma_acpi_id_table),
@@ -211,4 +209,3 @@ module_exit(dw_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller platform driver");
-MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/dma/dw/rzn1-dmamux.c b/drivers/dma/dw/rzn1-dmamux.c
index 4fb8508419db..deadf135681b 100644
--- a/drivers/dma/dw/rzn1-dmamux.c
+++ b/drivers/dma/dw/rzn1-dmamux.c
@@ -48,12 +48,16 @@ static void *rzn1_dmamux_route_allocate(struct of_phandle_args *dma_spec,
u32 mask;
int ret;
- if (dma_spec->args_count != RNZ1_DMAMUX_NCELLS)
- return ERR_PTR(-EINVAL);
+ if (dma_spec->args_count != RNZ1_DMAMUX_NCELLS) {
+ ret = -EINVAL;
+ goto put_device;
+ }
map = kzalloc(sizeof(*map), GFP_KERNEL);
- if (!map)
- return ERR_PTR(-ENOMEM);
+ if (!map) {
+ ret = -ENOMEM;
+ goto put_device;
+ }
chan = dma_spec->args[0];
map->req_idx = dma_spec->args[4];
@@ -94,12 +98,15 @@ static void *rzn1_dmamux_route_allocate(struct of_phandle_args *dma_spec,
if (ret)
goto clear_bitmap;
+ put_device(&pdev->dev);
return map;
clear_bitmap:
clear_bit(map->req_idx, dmamux->used_chans);
free_map:
kfree(map);
+put_device:
+ put_device(&pdev->dev);
return ERR_PTR(ret);
}
diff --git a/drivers/dma/fsl-dpaa2-qdma/dpdmai.c b/drivers/dma/fsl-dpaa2-qdma/dpdmai.c
index b4323d243d6d..4be81db24a19 100644
--- a/drivers/dma/fsl-dpaa2-qdma/dpdmai.c
+++ b/drivers/dma/fsl-dpaa2-qdma/dpdmai.c
@@ -48,11 +48,6 @@ struct dpdmai_cmd_destroy {
__le32 dpdmai_id;
} __packed;
-static inline u64 mc_enc(int lsoffset, int width, u64 val)
-{
- return (val & MAKE_UMASK64(width)) << lsoffset;
-}
-
/**
* dpdmai_open() - Open a control session for the specified object
* @mc_io: Pointer to MC portal's I/O object
diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c
index 4976d7dde080..a59212758029 100644
--- a/drivers/dma/fsl-edma-common.c
+++ b/drivers/dma/fsl-edma-common.c
@@ -206,15 +206,19 @@ void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
mux_configure8(fsl_chan, muxaddr, ch_off, slot, enable);
}
-static unsigned int fsl_edma_get_tcd_attr(enum dma_slave_buswidth addr_width)
+static unsigned int fsl_edma_get_tcd_attr(enum dma_slave_buswidth src_addr_width,
+ enum dma_slave_buswidth dst_addr_width)
{
- u32 val;
+ u32 src_val, dst_val;
- if (addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
- addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ if (src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
+ src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ if (dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
+ dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- val = ffs(addr_width) - 1;
- return val | (val << 8);
+ src_val = ffs(src_addr_width) - 1;
+ dst_val = ffs(dst_addr_width) - 1;
+ return dst_val | (src_val << 8);
}
void fsl_edma_free_desc(struct virt_dma_desc *vdesc)
@@ -612,13 +616,19 @@ struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
dma_buf_next = dma_addr;
if (direction == DMA_MEM_TO_DEV) {
+ if (!fsl_chan->cfg.src_addr_width)
+ fsl_chan->cfg.src_addr_width = fsl_chan->cfg.dst_addr_width;
fsl_chan->attr =
- fsl_edma_get_tcd_attr(fsl_chan->cfg.dst_addr_width);
+ fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width,
+ fsl_chan->cfg.dst_addr_width);
nbytes = fsl_chan->cfg.dst_addr_width *
fsl_chan->cfg.dst_maxburst;
} else {
+ if (!fsl_chan->cfg.dst_addr_width)
+ fsl_chan->cfg.dst_addr_width = fsl_chan->cfg.src_addr_width;
fsl_chan->attr =
- fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width);
+ fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width,
+ fsl_chan->cfg.dst_addr_width);
nbytes = fsl_chan->cfg.src_addr_width *
fsl_chan->cfg.src_maxburst;
}
@@ -689,13 +699,19 @@ struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
fsl_desc->dirn = direction;
if (direction == DMA_MEM_TO_DEV) {
+ if (!fsl_chan->cfg.src_addr_width)
+ fsl_chan->cfg.src_addr_width = fsl_chan->cfg.dst_addr_width;
fsl_chan->attr =
- fsl_edma_get_tcd_attr(fsl_chan->cfg.dst_addr_width);
+ fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width,
+ fsl_chan->cfg.dst_addr_width);
nbytes = fsl_chan->cfg.dst_addr_width *
fsl_chan->cfg.dst_maxburst;
} else {
+ if (!fsl_chan->cfg.dst_addr_width)
+ fsl_chan->cfg.dst_addr_width = fsl_chan->cfg.src_addr_width;
fsl_chan->attr =
- fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width);
+ fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width,
+ fsl_chan->cfg.dst_addr_width);
nbytes = fsl_chan->cfg.src_addr_width *
fsl_chan->cfg.src_maxburst;
}
@@ -766,6 +782,10 @@ struct dma_async_tx_descriptor *fsl_edma_prep_memcpy(struct dma_chan *chan,
{
struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
struct fsl_edma_desc *fsl_desc;
+ u32 src_bus_width, dst_bus_width;
+
+ src_bus_width = min_t(u32, DMA_SLAVE_BUSWIDTH_32_BYTES, 1 << (ffs(dma_src) - 1));
+ dst_bus_width = min_t(u32, DMA_SLAVE_BUSWIDTH_32_BYTES, 1 << (ffs(dma_dst) - 1));
fsl_desc = fsl_edma_alloc_desc(fsl_chan, 1);
if (!fsl_desc)
@@ -778,8 +798,9 @@ struct dma_async_tx_descriptor *fsl_edma_prep_memcpy(struct dma_chan *chan,
/* To match with copy_align and max_seg_size so 1 tcd is enough */
fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[0].vtcd, dma_src, dma_dst,
- fsl_edma_get_tcd_attr(DMA_SLAVE_BUSWIDTH_32_BYTES),
- 32, len, 0, 1, 1, 32, 0, true, true, false);
+ fsl_edma_get_tcd_attr(src_bus_width, dst_bus_width),
+ src_bus_width, len, 0, 1, 1, dst_bus_width, 0, true,
+ true, false);
return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
}
diff --git a/drivers/dma/fsl-edma-main.c b/drivers/dma/fsl-edma-main.c
index 97583c7d51a2..a753b7cbfa7a 100644
--- a/drivers/dma/fsl-edma-main.c
+++ b/drivers/dma/fsl-edma-main.c
@@ -999,6 +999,5 @@ static void __exit fsl_edma_exit(void)
}
module_exit(fsl_edma_exit);
-MODULE_ALIAS("platform:fsl-edma");
MODULE_DESCRIPTION("Freescale eDMA engine driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c
index 823f5c6bc2e1..6ace5bf80c40 100644
--- a/drivers/dma/fsl-qdma.c
+++ b/drivers/dma/fsl-qdma.c
@@ -148,6 +148,9 @@
* @__reserved1: Reserved field.
* @cfg8b_w1: Compound descriptor command queue origin produced
* by qDMA and dynamic debug field.
+ * @__reserved2: Reserved field.
+ * @cmd: Command for QDMA (see FSL_QDMA_CMD_RWTTYPE and
+ * others).
* @data: Pointer to the memory 40-bit address, describes DMA
* source information and DMA destination information.
*/
@@ -1293,6 +1296,5 @@ static struct platform_driver fsl_qdma_driver = {
module_platform_driver(fsl_qdma_driver);
-MODULE_ALIAS("platform:fsl-qdma");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("NXP Layerscape qDMA engine driver");
diff --git a/drivers/dma/idxd/defaults.c b/drivers/dma/idxd/defaults.c
index c607ae8dd12c..2bbbcd02a0da 100644
--- a/drivers/dma/idxd/defaults.c
+++ b/drivers/dma/idxd/defaults.c
@@ -36,12 +36,10 @@ int idxd_load_iaa_device_defaults(struct idxd_device *idxd)
group->num_wqs++;
/* set name to "iaa_crypto" */
- memset(wq->name, 0, WQ_NAME_SIZE + 1);
- strscpy(wq->name, "iaa_crypto", WQ_NAME_SIZE + 1);
+ strscpy_pad(wq->name, "iaa_crypto");
/* set driver_name to "crypto" */
- memset(wq->driver_name, 0, DRIVER_NAME_SIZE + 1);
- strscpy(wq->driver_name, "crypto", DRIVER_NAME_SIZE + 1);
+ strscpy_pad(wq->driver_name, "crypto");
engine = idxd->engines[0];
diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
index 5cf419fe6b46..c2cdf41b6e57 100644
--- a/drivers/dma/idxd/device.c
+++ b/drivers/dma/idxd/device.c
@@ -16,6 +16,7 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
u32 *status);
static void idxd_device_wqs_clear_state(struct idxd_device *idxd);
static void idxd_wq_disable_cleanup(struct idxd_wq *wq);
+static int idxd_wq_config_write(struct idxd_wq *wq);
/* Interrupt control bits */
void idxd_unmask_error_interrupts(struct idxd_device *idxd)
@@ -215,14 +216,28 @@ int idxd_wq_disable(struct idxd_wq *wq, bool reset_config)
return 0;
}
+ /*
+ * Disable WQ does not drain address translations, if WQ attributes are
+ * changed before translations are drained, pending translations can
+ * be issued using updated WQ attibutes, resulting in invalid
+ * translations being cached in the device translation cache.
+ *
+ * To make sure pending translations are drained before WQ
+ * attributes are changed, we use a WQ Drain followed by WQ Reset and
+ * then restore the WQ configuration.
+ */
+ idxd_wq_drain(wq);
+
operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
- idxd_cmd_exec(idxd, IDXD_CMD_DISABLE_WQ, operand, &status);
+ idxd_cmd_exec(idxd, IDXD_CMD_RESET_WQ, operand, &status);
if (status != IDXD_CMDSTS_SUCCESS) {
- dev_dbg(dev, "WQ disable failed: %#x\n", status);
+ dev_dbg(dev, "WQ reset failed: %#x\n", status);
return -ENXIO;
}
+ idxd_wq_config_write(wq);
+
if (reset_config)
idxd_wq_disable_cleanup(wq);
clear_bit(wq->id, idxd->wq_enable_map);
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index 80355d03004d..2acc34b3daff 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -80,6 +80,8 @@ static struct pci_device_id idxd_pci_tbl[] = {
{ PCI_DEVICE_DATA(INTEL, IAA_DMR, &idxd_driver_data[IDXD_TYPE_IAX]) },
/* IAA PTL platforms */
{ PCI_DEVICE_DATA(INTEL, IAA_PTL, &idxd_driver_data[IDXD_TYPE_IAX]) },
+ /* IAA WCL platforms */
+ { PCI_DEVICE_DATA(INTEL, IAA_WCL, &idxd_driver_data[IDXD_TYPE_IAX]) },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, idxd_pci_tbl);
@@ -189,27 +191,30 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
idxd->wq_enable_map = bitmap_zalloc_node(idxd->max_wqs, GFP_KERNEL, dev_to_node(dev));
if (!idxd->wq_enable_map) {
rc = -ENOMEM;
- goto err_bitmap;
+ goto err_free_wqs;
}
for (i = 0; i < idxd->max_wqs; i++) {
wq = kzalloc_node(sizeof(*wq), GFP_KERNEL, dev_to_node(dev));
if (!wq) {
rc = -ENOMEM;
- goto err;
+ goto err_unwind;
}
idxd_dev_set_type(&wq->idxd_dev, IDXD_DEV_WQ);
conf_dev = wq_confdev(wq);
wq->id = i;
wq->idxd = idxd;
- device_initialize(wq_confdev(wq));
+ device_initialize(conf_dev);
conf_dev->parent = idxd_confdev(idxd);
conf_dev->bus = &dsa_bus_type;
conf_dev->type = &idxd_wq_device_type;
rc = dev_set_name(conf_dev, "wq%d.%d", idxd->id, wq->id);
- if (rc < 0)
- goto err;
+ if (rc < 0) {
+ put_device(conf_dev);
+ kfree(wq);
+ goto err_unwind;
+ }
mutex_init(&wq->wq_lock);
init_waitqueue_head(&wq->err_queue);
@@ -220,15 +225,20 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES;
wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev));
if (!wq->wqcfg) {
+ put_device(conf_dev);
+ kfree(wq);
rc = -ENOMEM;
- goto err;
+ goto err_unwind;
}
if (idxd->hw.wq_cap.op_config) {
wq->opcap_bmap = bitmap_zalloc(IDXD_MAX_OPCAP_BITS, GFP_KERNEL);
if (!wq->opcap_bmap) {
+ kfree(wq->wqcfg);
+ put_device(conf_dev);
+ kfree(wq);
rc = -ENOMEM;
- goto err_opcap_bmap;
+ goto err_unwind;
}
bitmap_copy(wq->opcap_bmap, idxd->opcap_bmap, IDXD_MAX_OPCAP_BITS);
}
@@ -239,13 +249,7 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
return 0;
-err_opcap_bmap:
- kfree(wq->wqcfg);
-
-err:
- put_device(conf_dev);
- kfree(wq);
-
+err_unwind:
while (--i >= 0) {
wq = idxd->wqs[i];
if (idxd->hw.wq_cap.op_config)
@@ -254,11 +258,10 @@ err:
conf_dev = wq_confdev(wq);
put_device(conf_dev);
kfree(wq);
-
}
bitmap_free(idxd->wq_enable_map);
-err_bitmap:
+err_free_wqs:
kfree(idxd->wqs);
return rc;
@@ -1036,7 +1039,6 @@ static void idxd_reset_prepare(struct pci_dev *pdev)
const char *idxd_name;
int rc;
- dev = &idxd->pdev->dev;
idxd_name = dev_name(idxd_confdev(idxd));
struct idxd_saved_states *idxd_saved __free(kfree) =
@@ -1292,10 +1294,12 @@ static void idxd_remove(struct pci_dev *pdev)
device_unregister(idxd_confdev(idxd));
idxd_shutdown(pdev);
idxd_device_remove_debugfs(idxd);
- idxd_cleanup(idxd);
+ perfmon_pmu_remove(idxd);
+ idxd_cleanup_interrupts(idxd);
+ if (device_pasid_enabled(idxd))
+ idxd_disable_system_pasid(idxd);
pci_iounmap(pdev, idxd->reg_base);
put_device(idxd_confdev(idxd));
- idxd_free(idxd);
pci_disable_device(pdev);
}
diff --git a/drivers/dma/idxd/registers.h b/drivers/dma/idxd/registers.h
index 006ba206ab1b..8dc2e8bca779 100644
--- a/drivers/dma/idxd/registers.h
+++ b/drivers/dma/idxd/registers.h
@@ -3,13 +3,18 @@
#ifndef _IDXD_REGISTERS_H_
#define _IDXD_REGISTERS_H_
+#ifdef __KERNEL__
#include <uapi/linux/idxd.h>
+#else
+#include <linux/idxd.h>
+#endif
/* PCI Config */
#define PCI_DEVICE_ID_INTEL_DSA_GNRD 0x11fb
#define PCI_DEVICE_ID_INTEL_DSA_DMR 0x1212
#define PCI_DEVICE_ID_INTEL_IAA_DMR 0x1216
#define PCI_DEVICE_ID_INTEL_IAA_PTL 0xb02d
+#define PCI_DEVICE_ID_INTEL_IAA_WCL 0xfd2d
#define DEVICE_VERSION_1 0x100
#define DEVICE_VERSION_2 0x200
@@ -45,7 +50,7 @@ union gen_cap_reg {
u64 rsvd3:32;
};
u64 bits;
-} __packed;
+};
#define IDXD_GENCAP_OFFSET 0x10
union wq_cap_reg {
@@ -65,7 +70,7 @@ union wq_cap_reg {
u64 rsvd4:8;
};
u64 bits;
-} __packed;
+};
#define IDXD_WQCAP_OFFSET 0x20
#define IDXD_WQCFG_MIN 5
@@ -79,7 +84,7 @@ union group_cap_reg {
u64 rsvd:45;
};
u64 bits;
-} __packed;
+};
#define IDXD_GRPCAP_OFFSET 0x30
union engine_cap_reg {
@@ -88,7 +93,7 @@ union engine_cap_reg {
u64 rsvd:56;
};
u64 bits;
-} __packed;
+};
#define IDXD_ENGCAP_OFFSET 0x38
@@ -114,7 +119,7 @@ union offsets_reg {
u64 rsvd:48;
};
u64 bits[2];
-} __packed;
+};
#define IDXD_TABLE_MULT 0x100
@@ -128,7 +133,7 @@ union gencfg_reg {
u32 rsvd2:18;
};
u32 bits;
-} __packed;
+};
#define IDXD_GENCTRL_OFFSET 0x88
union genctrl_reg {
@@ -139,7 +144,7 @@ union genctrl_reg {
u32 rsvd:29;
};
u32 bits;
-} __packed;
+};
#define IDXD_GENSTATS_OFFSET 0x90
union gensts_reg {
@@ -149,7 +154,7 @@ union gensts_reg {
u32 rsvd:28;
};
u32 bits;
-} __packed;
+};
enum idxd_device_status_state {
IDXD_DEVICE_STATE_DISABLED = 0,
@@ -183,7 +188,7 @@ union idxd_command_reg {
u32 int_req:1;
};
u32 bits;
-} __packed;
+};
enum idxd_cmd {
IDXD_CMD_ENABLE_DEVICE = 1,
@@ -213,7 +218,7 @@ union cmdsts_reg {
u8 active:1;
};
u32 bits;
-} __packed;
+};
#define IDXD_CMDSTS_ACTIVE 0x80000000
#define IDXD_CMDSTS_ERR_MASK 0xff
#define IDXD_CMDSTS_RES_SHIFT 8
@@ -284,7 +289,7 @@ union sw_err_reg {
u64 rsvd5;
};
u64 bits[4];
-} __packed;
+};
union iaa_cap_reg {
struct {
@@ -303,7 +308,7 @@ union iaa_cap_reg {
u64 rsvd:52;
};
u64 bits;
-} __packed;
+};
#define IDXD_IAACAP_OFFSET 0x180
@@ -320,7 +325,7 @@ union evlcfg_reg {
u64 rsvd2:28;
};
u64 bits[2];
-} __packed;
+};
#define IDXD_EVL_SIZE_MIN 0x0040
#define IDXD_EVL_SIZE_MAX 0xffff
@@ -334,7 +339,7 @@ union msix_perm {
u32 pasid:20;
};
u32 bits;
-} __packed;
+};
union group_flags {
struct {
@@ -352,13 +357,13 @@ union group_flags {
u64 rsvd5:26;
};
u64 bits;
-} __packed;
+};
struct grpcfg {
u64 wqs[4];
u64 engines;
union group_flags flags;
-} __packed;
+};
union wqcfg {
struct {
@@ -410,7 +415,7 @@ union wqcfg {
u64 op_config[4];
};
u32 bits[16];
-} __packed;
+};
#define WQCFG_PASID_IDX 2
#define WQCFG_PRIVL_IDX 2
@@ -474,7 +479,7 @@ union idxd_perfcap {
u64 rsvd3:8;
};
u64 bits;
-} __packed;
+};
#define IDXD_EVNTCAP_OFFSET 0x80
union idxd_evntcap {
@@ -483,7 +488,7 @@ union idxd_evntcap {
u64 rsvd:36;
};
u64 bits;
-} __packed;
+};
struct idxd_event {
union {
@@ -493,7 +498,7 @@ struct idxd_event {
};
u32 val;
};
-} __packed;
+};
#define IDXD_CNTRCAP_OFFSET 0x800
struct idxd_cntrcap {
@@ -506,7 +511,7 @@ struct idxd_cntrcap {
u32 val;
};
struct idxd_event events[];
-} __packed;
+};
#define IDXD_PERFRST_OFFSET 0x10
union idxd_perfrst {
@@ -516,7 +521,7 @@ union idxd_perfrst {
u32 rsvd:30;
};
u32 val;
-} __packed;
+};
#define IDXD_OVFSTATUS_OFFSET 0x30
#define IDXD_PERFFRZ_OFFSET 0x20
@@ -533,7 +538,7 @@ union idxd_cntrcfg {
u64 rsvd3:4;
};
u64 val;
-} __packed;
+};
#define IDXD_FLTCFG_OFFSET 0x300
@@ -543,7 +548,7 @@ union idxd_cntrdata {
u64 event_count_value;
};
u64 val;
-} __packed;
+};
union event_cfg {
struct {
@@ -551,7 +556,7 @@ union event_cfg {
u64 event_enc:28;
};
u64 val;
-} __packed;
+};
union filter_cfg {
struct {
@@ -562,7 +567,7 @@ union filter_cfg {
u64 eng:8;
};
u64 val;
-} __packed;
+};
#define IDXD_EVLSTATUS_OFFSET 0xf0
@@ -580,7 +585,7 @@ union evl_status_reg {
u32 bits_upper32;
};
u64 bits;
-} __packed;
+};
#define IDXD_MAX_BATCH_IDENT 256
@@ -620,17 +625,17 @@ struct __evl_entry {
};
u64 fault_addr;
u64 rsvd5;
-} __packed;
+};
struct dsa_evl_entry {
struct __evl_entry e;
struct dsa_completion_record cr;
-} __packed;
+};
struct iax_evl_entry {
struct __evl_entry e;
u64 rsvd[4];
struct iax_completion_record cr;
-} __packed;
+};
#endif
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 02a85d6f1bea..ed9e56de5a9b 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -256,7 +256,7 @@ struct sdma_script_start_addrs {
/* End of v3 array */
union { s32 v3_end; s32 mcu_2_zqspi_addr; };
/* End of v4 array */
- s32 v4_end[0];
+ s32 v4_end[];
};
/*
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index a180171087a8..12a4a4860a74 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -19,6 +19,8 @@
#define IOAT_DMA_DCA_ANY_CPU ~0
+int system_has_dca_enabled(struct pci_dev *pdev);
+
#define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, dma_dev)
#define to_dev(ioat_chan) (&(ioat_chan)->ioat_dma->pdev->dev)
#define to_pdev(ioat_chan) ((ioat_chan)->ioat_dma->pdev)
diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h
index 79e4e4c09c18..0373c48520c9 100644
--- a/drivers/dma/ioat/hw.h
+++ b/drivers/dma/ioat/hw.h
@@ -63,9 +63,6 @@
#define IOAT_VER_3_3 0x33 /* Version 3.3 */
#define IOAT_VER_3_4 0x34 /* Version 3.4 */
-
-int system_has_dca_enabled(struct pci_dev *pdev);
-
#define IOAT_DESC_SZ 64
struct ioat_dma_descriptor {
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index 02f68b328511..227398673b73 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -1286,7 +1286,6 @@ static pci_ers_result_t ioat_pcie_error_slot_reset(struct pci_dev *pdev)
} else {
pci_set_master(pdev);
pci_restore_state(pdev);
- pci_save_state(pdev);
pci_wake_from_d3(pdev, false);
}
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
index acc2983e28e0..0f9cd7815f88 100644
--- a/drivers/dma/k3dma.c
+++ b/drivers/dma/k3dma.c
@@ -1034,5 +1034,4 @@ static struct platform_driver k3_pdma_driver = {
module_platform_driver(k3_pdma_driver);
MODULE_DESCRIPTION("HiSilicon k3 DMA Driver");
-MODULE_ALIAS("platform:k3dma");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/mediatek/mtk-cqdma.c b/drivers/dma/mediatek/mtk-cqdma.c
index 47c8adfdc155..9f0c41ca7770 100644
--- a/drivers/dma/mediatek/mtk-cqdma.c
+++ b/drivers/dma/mediatek/mtk-cqdma.c
@@ -449,9 +449,9 @@ static enum dma_status mtk_cqdma_tx_status(struct dma_chan *c,
return ret;
spin_lock_irqsave(&cvc->pc->lock, flags);
- spin_lock_irqsave(&cvc->vc.lock, flags);
+ spin_lock(&cvc->vc.lock);
vd = mtk_cqdma_find_active_desc(c, cookie);
- spin_unlock_irqrestore(&cvc->vc.lock, flags);
+ spin_unlock(&cvc->vc.lock);
spin_unlock_irqrestore(&cvc->pc->lock, flags);
if (vd) {
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index a95d31103d30..d07229a74886 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -15,6 +15,8 @@
#include <linux/device.h>
#include <linux/platform_data/mmp_dma.h>
#include <linux/dmapool.h>
+#include <linux/clk.h>
+#include <linux/reset.h>
#include <linux/of_dma.h>
#include <linux/of.h>
@@ -23,9 +25,12 @@
#define DCSR 0x0000
#define DALGN 0x00a0
#define DINT 0x00f0
-#define DDADR 0x0200
+#define DDADR(n) (0x0200 + ((n) << 4))
#define DSADR(n) (0x0204 + ((n) << 4))
#define DTADR(n) (0x0208 + ((n) << 4))
+#define DDADRH(n) (0x0300 + ((n) << 4))
+#define DSADRH(n) (0x0304 + ((n) << 4))
+#define DTADRH(n) (0x0308 + ((n) << 4))
#define DCMD 0x020c
#define DCSR_RUN BIT(31) /* Run Bit (read / write) */
@@ -42,6 +47,7 @@
#define DCSR_EORSTOPEN BIT(26) /* STOP on an EOR */
#define DCSR_SETCMPST BIT(25) /* Set Descriptor Compare Status */
#define DCSR_CLRCMPST BIT(24) /* Clear Descriptor Compare Status */
+#define DCSR_LPAEEN BIT(21) /* Long Physical Address Extension Enable */
#define DCSR_CMPST BIT(10) /* The Descriptor Compare Status */
#define DCSR_EORINTR BIT(9) /* The end of Receive */
@@ -74,6 +80,16 @@ struct mmp_pdma_desc_hw {
u32 dsadr; /* DSADR value for the current transfer */
u32 dtadr; /* DTADR value for the current transfer */
u32 dcmd; /* DCMD value for the current transfer */
+ /*
+ * The following 32-bit words are only used in the 64-bit, ie.
+ * LPAE (Long Physical Address Extension) mode.
+ * They are used to specify the high 32 bits of the descriptor's
+ * addresses.
+ */
+ u32 ddadrh; /* High 32-bit of DDADR */
+ u32 dsadrh; /* High 32-bit of DSADR */
+ u32 dtadrh; /* High 32-bit of DTADR */
+ u32 rsvd; /* reserved */
} __aligned(32);
struct mmp_pdma_desc_sw {
@@ -118,12 +134,55 @@ struct mmp_pdma_phy {
struct mmp_pdma_chan *vchan;
};
+/**
+ * struct mmp_pdma_ops - Operations for the MMP PDMA controller
+ *
+ * Hardware Register Operations (read/write hardware registers):
+ * @write_next_addr: Function to program address of next descriptor into
+ * DDADR/DDADRH
+ * @read_src_addr: Function to read the source address from DSADR/DSADRH
+ * @read_dst_addr: Function to read the destination address from DTADR/DTADRH
+ *
+ * Descriptor Memory Operations (manipulate descriptor structs in memory):
+ * @set_desc_next_addr: Function to set next descriptor address in descriptor
+ * @set_desc_src_addr: Function to set the source address in descriptor
+ * @set_desc_dst_addr: Function to set the destination address in descriptor
+ * @get_desc_src_addr: Function to get the source address from descriptor
+ * @get_desc_dst_addr: Function to get the destination address from descriptor
+ *
+ * Controller Configuration:
+ * @run_bits: Control bits in DCSR register for channel start/stop
+ * @dma_mask: DMA addressing capability of controller. 0 to use OF/platform
+ * settings, or explicit mask like DMA_BIT_MASK(32/64)
+ */
+struct mmp_pdma_ops {
+ /* Hardware Register Operations */
+ void (*write_next_addr)(struct mmp_pdma_phy *phy, dma_addr_t addr);
+ u64 (*read_src_addr)(struct mmp_pdma_phy *phy);
+ u64 (*read_dst_addr)(struct mmp_pdma_phy *phy);
+
+ /* Descriptor Memory Operations */
+ void (*set_desc_next_addr)(struct mmp_pdma_desc_hw *desc,
+ dma_addr_t addr);
+ void (*set_desc_src_addr)(struct mmp_pdma_desc_hw *desc,
+ dma_addr_t addr);
+ void (*set_desc_dst_addr)(struct mmp_pdma_desc_hw *desc,
+ dma_addr_t addr);
+ u64 (*get_desc_src_addr)(const struct mmp_pdma_desc_hw *desc);
+ u64 (*get_desc_dst_addr)(const struct mmp_pdma_desc_hw *desc);
+
+ /* Controller Configuration */
+ u32 run_bits;
+ u64 dma_mask;
+};
+
struct mmp_pdma_device {
int dma_channels;
void __iomem *base;
struct device *dev;
struct dma_device device;
struct mmp_pdma_phy *phy;
+ const struct mmp_pdma_ops *ops;
spinlock_t phy_lock; /* protect alloc/free phy channels */
};
@@ -136,24 +195,112 @@ struct mmp_pdma_device {
#define to_mmp_pdma_dev(dmadev) \
container_of(dmadev, struct mmp_pdma_device, device)
-static int mmp_pdma_config_write(struct dma_chan *dchan,
- struct dma_slave_config *cfg,
- enum dma_transfer_direction direction);
+/* For 32-bit PDMA */
+static void write_next_addr_32(struct mmp_pdma_phy *phy, dma_addr_t addr)
+{
+ writel(addr, phy->base + DDADR(phy->idx));
+}
+
+static u64 read_src_addr_32(struct mmp_pdma_phy *phy)
+{
+ return readl(phy->base + DSADR(phy->idx));
+}
+
+static u64 read_dst_addr_32(struct mmp_pdma_phy *phy)
+{
+ return readl(phy->base + DTADR(phy->idx));
+}
+
+static void set_desc_next_addr_32(struct mmp_pdma_desc_hw *desc, dma_addr_t addr)
+{
+ desc->ddadr = addr;
+}
+
+static void set_desc_src_addr_32(struct mmp_pdma_desc_hw *desc, dma_addr_t addr)
+{
+ desc->dsadr = addr;
+}
+
+static void set_desc_dst_addr_32(struct mmp_pdma_desc_hw *desc, dma_addr_t addr)
+{
+ desc->dtadr = addr;
+}
+
+static u64 get_desc_src_addr_32(const struct mmp_pdma_desc_hw *desc)
+{
+ return desc->dsadr;
+}
+
+static u64 get_desc_dst_addr_32(const struct mmp_pdma_desc_hw *desc)
+{
+ return desc->dtadr;
+}
+
+/* For 64-bit PDMA */
+static void write_next_addr_64(struct mmp_pdma_phy *phy, dma_addr_t addr)
+{
+ writel(lower_32_bits(addr), phy->base + DDADR(phy->idx));
+ writel(upper_32_bits(addr), phy->base + DDADRH(phy->idx));
+}
+
+static u64 read_src_addr_64(struct mmp_pdma_phy *phy)
+{
+ u32 low = readl(phy->base + DSADR(phy->idx));
+ u32 high = readl(phy->base + DSADRH(phy->idx));
+
+ return ((u64)high << 32) | low;
+}
-static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr)
+static u64 read_dst_addr_64(struct mmp_pdma_phy *phy)
{
- u32 reg = (phy->idx << 4) + DDADR;
+ u32 low = readl(phy->base + DTADR(phy->idx));
+ u32 high = readl(phy->base + DTADRH(phy->idx));
- writel(addr, phy->base + reg);
+ return ((u64)high << 32) | low;
}
+static void set_desc_next_addr_64(struct mmp_pdma_desc_hw *desc, dma_addr_t addr)
+{
+ desc->ddadr = lower_32_bits(addr);
+ desc->ddadrh = upper_32_bits(addr);
+}
+
+static void set_desc_src_addr_64(struct mmp_pdma_desc_hw *desc, dma_addr_t addr)
+{
+ desc->dsadr = lower_32_bits(addr);
+ desc->dsadrh = upper_32_bits(addr);
+}
+
+static void set_desc_dst_addr_64(struct mmp_pdma_desc_hw *desc, dma_addr_t addr)
+{
+ desc->dtadr = lower_32_bits(addr);
+ desc->dtadrh = upper_32_bits(addr);
+}
+
+static u64 get_desc_src_addr_64(const struct mmp_pdma_desc_hw *desc)
+{
+ return ((u64)desc->dsadrh << 32) | desc->dsadr;
+}
+
+static u64 get_desc_dst_addr_64(const struct mmp_pdma_desc_hw *desc)
+{
+ return ((u64)desc->dtadrh << 32) | desc->dtadr;
+}
+
+static int mmp_pdma_config_write(struct dma_chan *dchan,
+ struct dma_slave_config *cfg,
+ enum dma_transfer_direction direction);
+
static void enable_chan(struct mmp_pdma_phy *phy)
{
u32 reg, dalgn;
+ struct mmp_pdma_device *pdev;
if (!phy->vchan)
return;
+ pdev = to_mmp_pdma_dev(phy->vchan->chan.device);
+
reg = DRCMR(phy->vchan->drcmr);
writel(DRCMR_MAPVLD | phy->idx, phy->base + reg);
@@ -165,18 +312,29 @@ static void enable_chan(struct mmp_pdma_phy *phy)
writel(dalgn, phy->base + DALGN);
reg = (phy->idx << 2) + DCSR;
- writel(readl(phy->base + reg) | DCSR_RUN, phy->base + reg);
+ writel(readl(phy->base + reg) | pdev->ops->run_bits,
+ phy->base + reg);
}
static void disable_chan(struct mmp_pdma_phy *phy)
{
- u32 reg;
+ u32 reg, dcsr;
if (!phy)
return;
reg = (phy->idx << 2) + DCSR;
- writel(readl(phy->base + reg) & ~DCSR_RUN, phy->base + reg);
+ dcsr = readl(phy->base + reg);
+
+ if (phy->vchan) {
+ struct mmp_pdma_device *pdev;
+
+ pdev = to_mmp_pdma_dev(phy->vchan->chan.device);
+ writel(dcsr & ~pdev->ops->run_bits, phy->base + reg);
+ } else {
+ /* If no vchan, just clear the RUN bit */
+ writel(dcsr & ~DCSR_RUN, phy->base + reg);
+ }
}
static int clear_chan_irq(struct mmp_pdma_phy *phy)
@@ -295,6 +453,7 @@ static void mmp_pdma_free_phy(struct mmp_pdma_chan *pchan)
static void start_pending_queue(struct mmp_pdma_chan *chan)
{
struct mmp_pdma_desc_sw *desc;
+ struct mmp_pdma_device *pdev = to_mmp_pdma_dev(chan->chan.device);
/* still in running, irq will start the pending list */
if (!chan->idle) {
@@ -329,7 +488,7 @@ static void start_pending_queue(struct mmp_pdma_chan *chan)
* Program the descriptor's address into the DMA controller,
* then start the DMA transaction
*/
- set_desc(chan->phy, desc->async_tx.phys);
+ pdev->ops->write_next_addr(chan->phy, desc->async_tx.phys);
enable_chan(chan->phy);
chan->idle = false;
}
@@ -445,15 +604,14 @@ mmp_pdma_prep_memcpy(struct dma_chan *dchan,
size_t len, unsigned long flags)
{
struct mmp_pdma_chan *chan;
+ struct mmp_pdma_device *pdev;
struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
size_t copy = 0;
- if (!dchan)
- return NULL;
-
- if (!len)
+ if (!dchan || !len)
return NULL;
+ pdev = to_mmp_pdma_dev(dchan->device);
chan = to_mmp_pdma_chan(dchan);
chan->byte_align = false;
@@ -476,13 +634,14 @@ mmp_pdma_prep_memcpy(struct dma_chan *dchan,
chan->byte_align = true;
new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & copy);
- new->desc.dsadr = dma_src;
- new->desc.dtadr = dma_dst;
+ pdev->ops->set_desc_src_addr(&new->desc, dma_src);
+ pdev->ops->set_desc_dst_addr(&new->desc, dma_dst);
if (!first)
first = new;
else
- prev->desc.ddadr = new->async_tx.phys;
+ pdev->ops->set_desc_next_addr(&prev->desc,
+ new->async_tx.phys);
new->async_tx.cookie = 0;
async_tx_ack(&new->async_tx);
@@ -526,6 +685,7 @@ mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
unsigned long flags, void *context)
{
struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
+ struct mmp_pdma_device *pdev = to_mmp_pdma_dev(dchan->device);
struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new = NULL;
size_t len, avail;
struct scatterlist *sg;
@@ -557,17 +717,18 @@ mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & len);
if (dir == DMA_MEM_TO_DEV) {
- new->desc.dsadr = addr;
+ pdev->ops->set_desc_src_addr(&new->desc, addr);
new->desc.dtadr = chan->dev_addr;
} else {
new->desc.dsadr = chan->dev_addr;
- new->desc.dtadr = addr;
+ pdev->ops->set_desc_dst_addr(&new->desc, addr);
}
if (!first)
first = new;
else
- prev->desc.ddadr = new->async_tx.phys;
+ pdev->ops->set_desc_next_addr(&prev->desc,
+ new->async_tx.phys);
new->async_tx.cookie = 0;
async_tx_ack(&new->async_tx);
@@ -607,12 +768,15 @@ mmp_pdma_prep_dma_cyclic(struct dma_chan *dchan,
unsigned long flags)
{
struct mmp_pdma_chan *chan;
+ struct mmp_pdma_device *pdev;
struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
dma_addr_t dma_src, dma_dst;
if (!dchan || !len || !period_len)
return NULL;
+ pdev = to_mmp_pdma_dev(dchan->device);
+
/* the buffer length must be a multiple of period_len */
if (len % period_len != 0)
return NULL;
@@ -649,13 +813,14 @@ mmp_pdma_prep_dma_cyclic(struct dma_chan *dchan,
new->desc.dcmd = (chan->dcmd | DCMD_ENDIRQEN |
(DCMD_LENGTH & period_len));
- new->desc.dsadr = dma_src;
- new->desc.dtadr = dma_dst;
+ pdev->ops->set_desc_src_addr(&new->desc, dma_src);
+ pdev->ops->set_desc_dst_addr(&new->desc, dma_dst);
if (!first)
first = new;
else
- prev->desc.ddadr = new->async_tx.phys;
+ pdev->ops->set_desc_next_addr(&prev->desc,
+ new->async_tx.phys);
new->async_tx.cookie = 0;
async_tx_ack(&new->async_tx);
@@ -676,7 +841,7 @@ mmp_pdma_prep_dma_cyclic(struct dma_chan *dchan,
first->async_tx.cookie = -EBUSY;
/* make the cyclic link */
- new->desc.ddadr = first->async_tx.phys;
+ pdev->ops->set_desc_next_addr(&new->desc, first->async_tx.phys);
chan->cyclic_first = first;
return &first->async_tx;
@@ -762,7 +927,9 @@ static unsigned int mmp_pdma_residue(struct mmp_pdma_chan *chan,
dma_cookie_t cookie)
{
struct mmp_pdma_desc_sw *sw;
- u32 curr, residue = 0;
+ struct mmp_pdma_device *pdev = to_mmp_pdma_dev(chan->chan.device);
+ u64 curr;
+ u32 residue = 0;
bool passed = false;
bool cyclic = chan->cyclic_first != NULL;
@@ -774,17 +941,18 @@ static unsigned int mmp_pdma_residue(struct mmp_pdma_chan *chan,
return 0;
if (chan->dir == DMA_DEV_TO_MEM)
- curr = readl(chan->phy->base + DTADR(chan->phy->idx));
+ curr = pdev->ops->read_dst_addr(chan->phy);
else
- curr = readl(chan->phy->base + DSADR(chan->phy->idx));
+ curr = pdev->ops->read_src_addr(chan->phy);
list_for_each_entry(sw, &chan->chain_running, node) {
- u32 start, end, len;
+ u64 start, end;
+ u32 len;
if (chan->dir == DMA_DEV_TO_MEM)
- start = sw->desc.dtadr;
+ start = pdev->ops->get_desc_dst_addr(&sw->desc);
else
- start = sw->desc.dsadr;
+ start = pdev->ops->get_desc_src_addr(&sw->desc);
len = sw->desc.dcmd & DCMD_LENGTH;
end = start + len;
@@ -800,7 +968,7 @@ static unsigned int mmp_pdma_residue(struct mmp_pdma_chan *chan,
if (passed) {
residue += len;
} else if (curr >= start && curr <= end) {
- residue += end - curr;
+ residue += (u32)(end - curr);
passed = true;
}
@@ -994,9 +1162,42 @@ static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, int idx, int irq)
return 0;
}
+static const struct mmp_pdma_ops marvell_pdma_v1_ops = {
+ .write_next_addr = write_next_addr_32,
+ .read_src_addr = read_src_addr_32,
+ .read_dst_addr = read_dst_addr_32,
+ .set_desc_next_addr = set_desc_next_addr_32,
+ .set_desc_src_addr = set_desc_src_addr_32,
+ .set_desc_dst_addr = set_desc_dst_addr_32,
+ .get_desc_src_addr = get_desc_src_addr_32,
+ .get_desc_dst_addr = get_desc_dst_addr_32,
+ .run_bits = (DCSR_RUN),
+ .dma_mask = 0, /* let OF/platform set DMA mask */
+};
+
+static const struct mmp_pdma_ops spacemit_k1_pdma_ops = {
+ .write_next_addr = write_next_addr_64,
+ .read_src_addr = read_src_addr_64,
+ .read_dst_addr = read_dst_addr_64,
+ .set_desc_next_addr = set_desc_next_addr_64,
+ .set_desc_src_addr = set_desc_src_addr_64,
+ .set_desc_dst_addr = set_desc_dst_addr_64,
+ .get_desc_src_addr = get_desc_src_addr_64,
+ .get_desc_dst_addr = get_desc_dst_addr_64,
+ .run_bits = (DCSR_RUN | DCSR_LPAEEN),
+ .dma_mask = DMA_BIT_MASK(64), /* force 64-bit DMA addr capability */
+};
+
static const struct of_device_id mmp_pdma_dt_ids[] = {
- { .compatible = "marvell,pdma-1.0", },
- {}
+ {
+ .compatible = "marvell,pdma-1.0",
+ .data = &marvell_pdma_v1_ops
+ }, {
+ .compatible = "spacemit,k1-pdma",
+ .data = &spacemit_k1_pdma_ops
+ }, {
+ /* sentinel */
+ }
};
MODULE_DEVICE_TABLE(of, mmp_pdma_dt_ids);
@@ -1019,6 +1220,8 @@ static int mmp_pdma_probe(struct platform_device *op)
{
struct mmp_pdma_device *pdev;
struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev);
+ struct clk *clk;
+ struct reset_control *rst;
int i, ret, irq = 0;
int dma_channels = 0, irq_num = 0;
const enum dma_slave_buswidth widths =
@@ -1037,6 +1240,19 @@ static int mmp_pdma_probe(struct platform_device *op)
if (IS_ERR(pdev->base))
return PTR_ERR(pdev->base);
+ clk = devm_clk_get_optional_enabled(pdev->dev, NULL);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ rst = devm_reset_control_get_optional_exclusive_deasserted(pdev->dev,
+ NULL);
+ if (IS_ERR(rst))
+ return PTR_ERR(rst);
+
+ pdev->ops = of_device_get_match_data(&op->dev);
+ if (!pdev->ops)
+ return -ENODEV;
+
if (pdev->dev->of_node) {
/* Parse new and deprecated dma-channels properties */
if (of_property_read_u32(pdev->dev->of_node, "dma-channels",
@@ -1098,7 +1314,10 @@ static int mmp_pdma_probe(struct platform_device *op)
pdev->device.directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
pdev->device.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
- if (pdev->dev->coherent_dma_mask)
+ /* Set DMA mask based on ops->dma_mask, or OF/platform */
+ if (pdev->ops->dma_mask)
+ dma_set_mask(pdev->dev, pdev->ops->dma_mask);
+ else if (pdev->dev->coherent_dma_mask)
dma_set_mask(pdev->dev, pdev->dev->coherent_dma_mask);
else
dma_set_mask(pdev->dev, DMA_BIT_MASK(64));
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index c8dc504510f1..ba03321eeff7 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -554,8 +554,7 @@ static void mmp_tdma_issue_pending(struct dma_chan *chan)
static void mmp_tdma_remove(struct platform_device *pdev)
{
- if (pdev->dev.of_node)
- of_dma_controller_free(pdev->dev.of_node);
+ of_dma_controller_free(pdev->dev.of_node);
}
static int mmp_tdma_chan_init(struct mmp_tdma_device *tdev,
@@ -641,7 +640,7 @@ static int mmp_tdma_probe(struct platform_device *pdev)
int chan_num = TDMA_CHANNEL_NUM;
struct gen_pool *pool = NULL;
- type = (enum mmp_tdma_type)device_get_match_data(&pdev->dev);
+ type = (kernel_ulong_t)device_get_match_data(&pdev->dev);
/* always have couple channels */
tdev = devm_kzalloc(&pdev->dev, sizeof(*tdev), GFP_KERNEL);
@@ -743,6 +742,5 @@ module_platform_driver(mmp_tdma_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MMP Two-Channel DMA Driver");
-MODULE_ALIAS("platform:mmp-tdma");
MODULE_AUTHOR("Leo Yan <leoy@marvell.com>");
MODULE_AUTHOR("Zhangfei Gao <zhangfei.gao@marvell.com>");
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index fa6e4646fdc2..5e8386296046 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -1013,7 +1013,7 @@ static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan)
dma_async_device_unregister(&mv_chan->dmadev);
- dma_free_coherent(dev, MV_XOR_POOL_SIZE,
+ dma_free_wc(dev, MV_XOR_POOL_SIZE,
mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
dma_unmap_single(dev, mv_chan->dummy_src_addr,
MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
@@ -1061,8 +1061,16 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
*/
mv_chan->dummy_src_addr = dma_map_single(dma_dev->dev,
mv_chan->dummy_src, MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dma_dev->dev, mv_chan->dummy_src_addr))
+ return ERR_PTR(-ENOMEM);
+
mv_chan->dummy_dst_addr = dma_map_single(dma_dev->dev,
mv_chan->dummy_dst, MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
+ if (dma_mapping_error(dma_dev->dev, mv_chan->dummy_dst_addr)) {
+ ret = -ENOMEM;
+ goto err_unmap_src;
+ }
+
/* allocate coherent memory for hardware descriptors
* note: writecombine gives slightly better performance, but
@@ -1071,8 +1079,10 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
mv_chan->dma_desc_pool_virt =
dma_alloc_wc(&pdev->dev, MV_XOR_POOL_SIZE, &mv_chan->dma_desc_pool,
GFP_KERNEL);
- if (!mv_chan->dma_desc_pool_virt)
- return ERR_PTR(-ENOMEM);
+ if (!mv_chan->dma_desc_pool_virt) {
+ ret = -ENOMEM;
+ goto err_unmap_dst;
+ }
/* discover transaction capabilities from the platform data */
dma_dev->cap_mask = cap_mask;
@@ -1153,8 +1163,15 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
err_free_irq:
free_irq(mv_chan->irq, mv_chan);
err_free_dma:
- dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE,
+ dma_free_wc(&pdev->dev, MV_XOR_POOL_SIZE,
mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
+err_unmap_dst:
+ dma_unmap_single(dma_dev->dev, mv_chan->dummy_dst_addr,
+ MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
+err_unmap_src:
+ dma_unmap_single(dma_dev->dev, mv_chan->dummy_src_addr,
+ MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
+
return ERR_PTR(ret);
}
diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c
index 0d6324c4e2be..334425faac00 100644
--- a/drivers/dma/nbpfaxi.c
+++ b/drivers/dma/nbpfaxi.c
@@ -711,6 +711,9 @@ static int nbpf_desc_page_alloc(struct nbpf_channel *chan)
list_add_tail(&ldesc->node, &lhead);
ldesc->hwdesc_dma_addr = dma_map_single(dchan->device->dev,
hwdesc, sizeof(*hwdesc), DMA_TO_DEVICE);
+ if (dma_mapping_error(dchan->device->dev,
+ ldesc->hwdesc_dma_addr))
+ goto unmap_error;
dev_dbg(dev, "%s(): mapped 0x%p to %pad\n", __func__,
hwdesc, &ldesc->hwdesc_dma_addr);
@@ -737,6 +740,16 @@ static int nbpf_desc_page_alloc(struct nbpf_channel *chan)
spin_unlock_irq(&chan->lock);
return ARRAY_SIZE(dpage->desc);
+
+unmap_error:
+ while (i--) {
+ ldesc--; hwdesc--;
+
+ dma_unmap_single(dchan->device->dev, ldesc->hwdesc_dma_addr,
+ sizeof(hwdesc), DMA_TO_DEVICE);
+ }
+
+ return -ENOMEM;
}
static void nbpf_desc_put(struct nbpf_desc *desc)
@@ -1351,7 +1364,7 @@ static int nbpf_probe(struct platform_device *pdev)
if (irqs == 1) {
eirq = irqbuf[0];
- for (i = 0; i <= num_channels; i++)
+ for (i = 0; i < num_channels; i++)
nbpf->chan[i].irq = irqbuf[0];
} else {
eirq = platform_get_irq_byname(pdev, "error");
@@ -1361,16 +1374,15 @@ static int nbpf_probe(struct platform_device *pdev)
if (irqs == num_channels + 1) {
struct nbpf_channel *chan;
- for (i = 0, chan = nbpf->chan; i <= num_channels;
+ for (i = 0, chan = nbpf->chan; i < num_channels;
i++, chan++) {
/* Skip the error IRQ */
if (irqbuf[i] == eirq)
i++;
+ if (i >= ARRAY_SIZE(irqbuf))
+ return -EINVAL;
chan->irq = irqbuf[i];
}
-
- if (chan != nbpf->chan + num_channels)
- return -EINVAL;
} else {
/* 2 IRQs and more than one channel */
if (irqbuf[0] == eirq)
@@ -1378,7 +1390,7 @@ static int nbpf_probe(struct platform_device *pdev)
else
irq = irqbuf[0];
- for (i = 0; i <= num_channels; i++)
+ for (i = 0; i < num_channels; i++)
nbpf->chan[i].irq = irq;
}
}
@@ -1488,7 +1500,6 @@ static const struct platform_device_id nbpf_ids[] = {
};
MODULE_DEVICE_TABLE(platform, nbpf_ids);
-#ifdef CONFIG_PM
static int nbpf_runtime_suspend(struct device *dev)
{
struct nbpf_device *nbpf = dev_get_drvdata(dev);
@@ -1501,17 +1512,16 @@ static int nbpf_runtime_resume(struct device *dev)
struct nbpf_device *nbpf = dev_get_drvdata(dev);
return clk_prepare_enable(nbpf->clk);
}
-#endif
static const struct dev_pm_ops nbpf_pm_ops = {
- SET_RUNTIME_PM_OPS(nbpf_runtime_suspend, nbpf_runtime_resume, NULL)
+ RUNTIME_PM_OPS(nbpf_runtime_suspend, nbpf_runtime_resume, NULL)
};
static struct platform_driver nbpf_driver = {
.driver = {
.name = "dma-nbpf",
.of_match_table = nbpf_match,
- .pm = &nbpf_pm_ops,
+ .pm = pm_ptr(&nbpf_pm_ops),
},
.id_table = nbpf_ids,
.probe = nbpf_probe,
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index 9d2a5a967a99..61500ad7c850 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -874,7 +874,7 @@ static int ppc440spe_dma2_pq_slot_count(dma_addr_t *srcs,
pr_err("%s: src_cnt=%d, state=%d, addr_count=%d, order=%lld\n",
__func__, src_cnt, state, addr_count, order);
for (i = 0; i < src_cnt; i++)
- pr_err("\t[%d] 0x%llx \n", i, srcs[i]);
+ pr_err("\t[%d] 0x%llx\n", i, srcs[i]);
BUG();
}
@@ -3636,7 +3636,7 @@ static void ppc440spe_adma_issue_pending(struct dma_chan *chan)
ppc440spe_chan = to_ppc440spe_adma_chan(chan);
dev_dbg(ppc440spe_chan->device->common.dev,
- "ppc440spe adma%d: %s %d \n", ppc440spe_chan->device->id,
+ "ppc440spe adma%d: %s %d\n", ppc440spe_chan->device->id,
__func__, ppc440spe_chan->pending);
if (ppc440spe_chan->pending) {
diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c
index bbc3276992bb..2cf060174795 100644
--- a/drivers/dma/qcom/bam_dma.c
+++ b/drivers/dma/qcom/bam_dma.c
@@ -1283,13 +1283,17 @@ static int bam_dma_probe(struct platform_device *pdev)
if (!bdev->bamclk) {
ret = of_property_read_u32(pdev->dev.of_node, "num-channels",
&bdev->num_channels);
- if (ret)
+ if (ret) {
dev_err(bdev->dev, "num-channels unspecified in dt\n");
+ return ret;
+ }
ret = of_property_read_u32(pdev->dev.of_node, "qcom,num-ees",
&bdev->num_ees);
- if (ret)
+ if (ret) {
dev_err(bdev->dev, "num-ees unspecified in dt\n");
+ return ret;
+ }
}
ret = clk_prepare_enable(bdev->bamclk);
diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c
index b1f0001cc99c..66bfea1f156d 100644
--- a/drivers/dma/qcom/gpi.c
+++ b/drivers/dma/qcom/gpi.c
@@ -569,17 +569,6 @@ static inline void gpi_write_reg(struct gpii *gpii, void __iomem *addr, u32 val)
writel_relaxed(val, addr);
}
-/* gpi_write_reg_field - write to specific bit field */
-static inline void gpi_write_reg_field(struct gpii *gpii, void __iomem *addr,
- u32 mask, u32 shift, u32 val)
-{
- u32 tmp = gpi_read_reg(gpii, addr);
-
- tmp &= ~mask;
- val = tmp | ((val << shift) & mask);
- gpi_write_reg(gpii, addr, val);
-}
-
static __always_inline void
gpi_update_reg(struct gpii *gpii, u32 offset, u32 mask, u32 val)
{
@@ -1630,7 +1619,8 @@ gpi_peripheral_config(struct dma_chan *chan, struct dma_slave_config *config)
}
static int gpi_create_i2c_tre(struct gchan *chan, struct gpi_desc *desc,
- struct scatterlist *sgl, enum dma_transfer_direction direction)
+ struct scatterlist *sgl, enum dma_transfer_direction direction,
+ unsigned long flags)
{
struct gpi_i2c_config *i2c = chan->config;
struct device *dev = chan->gpii->gpi_dev->dev;
@@ -1695,6 +1685,9 @@ static int gpi_create_i2c_tre(struct gchan *chan, struct gpi_desc *desc,
tre->dword[3] = u32_encode_bits(TRE_TYPE_DMA, TRE_FLAGS_TYPE);
tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_IEOT);
+
+ if (!(flags & DMA_PREP_INTERRUPT))
+ tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_BEI);
}
for (i = 0; i < tre_idx; i++)
@@ -1838,6 +1831,9 @@ gpi_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
return NULL;
}
+ if (!(flags & DMA_PREP_INTERRUPT) && (nr - nr_tre < 2))
+ return NULL;
+
gpi_desc = kzalloc(sizeof(*gpi_desc), GFP_NOWAIT);
if (!gpi_desc)
return NULL;
@@ -1846,7 +1842,7 @@ gpi_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
if (gchan->protocol == QCOM_GPI_SPI) {
i = gpi_create_spi_tre(gchan, gpi_desc, sgl, direction);
} else if (gchan->protocol == QCOM_GPI_I2C) {
- i = gpi_create_i2c_tre(gchan, gpi_desc, sgl, direction);
+ i = gpi_create_i2c_tre(gchan, gpi_desc, sgl, direction, flags);
} else {
dev_err(dev, "invalid peripheral: %d\n", gchan->protocol);
kfree(gpi_desc);
diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig
index 6ea5a880b433..a16c7e83bd14 100644
--- a/drivers/dma/sh/Kconfig
+++ b/drivers/dma/sh/Kconfig
@@ -16,7 +16,7 @@ config SH_DMAE_BASE
depends on SUPERH || COMPILE_TEST
depends on !SUPERH || SH_DMA
depends on !SH_DMA_API
- default y
+ default SUPERH || SH_DMA
select RENESAS_DMA
help
Enable support for the Renesas SuperH DMA controllers.
@@ -50,7 +50,7 @@ config RENESAS_USB_DMAC
config RZ_DMAC
tristate "Renesas RZ DMA Controller"
- depends on ARCH_R7S72100 || ARCH_RZG2L || COMPILE_TEST
+ depends on ARCH_RENESAS || COMPILE_TEST
select RENESAS_DMA
select DMA_VIRTUAL_CHANNELS
help
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index 0c45ce8c74aa..475a347cae1b 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -1728,19 +1728,12 @@ static struct dma_chan *rcar_dmac_of_xlate(struct of_phandle_args *dma_spec,
* Power management
*/
-#ifdef CONFIG_PM
-static int rcar_dmac_runtime_suspend(struct device *dev)
-{
- return 0;
-}
-
static int rcar_dmac_runtime_resume(struct device *dev)
{
struct rcar_dmac *dmac = dev_get_drvdata(dev);
return rcar_dmac_init(dmac);
}
-#endif
static const struct dev_pm_ops rcar_dmac_pm = {
/*
@@ -1748,10 +1741,9 @@ static const struct dev_pm_ops rcar_dmac_pm = {
* - Wait for the current transfer to complete and stop the device,
* - Resume transfers, if any.
*/
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
- SET_RUNTIME_PM_OPS(rcar_dmac_runtime_suspend, rcar_dmac_runtime_resume,
- NULL)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ RUNTIME_PM_OPS(NULL, rcar_dmac_runtime_resume, NULL)
};
/* -----------------------------------------------------------------------------
@@ -2036,7 +2028,7 @@ MODULE_DEVICE_TABLE(of, rcar_dmac_of_ids);
static struct platform_driver rcar_dmac_driver = {
.driver = {
- .pm = &rcar_dmac_pm,
+ .pm = pm_ptr(&rcar_dmac_pm),
.name = "rcar-dmac",
.of_match_table = rcar_dmac_of_ids,
},
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
index 6b4fce453c85..834741adadaa 100644
--- a/drivers/dma/sh/shdma-base.c
+++ b/drivers/dma/sh/shdma-base.c
@@ -129,12 +129,25 @@ static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx)
const struct shdma_ops *ops = sdev->ops;
dev_dbg(schan->dev, "Bring up channel %d\n",
schan->id);
- /*
- * TODO: .xfer_setup() might fail on some platforms.
- * Make it int then, on error remove chunks from the
- * queue again
- */
- ops->setup_xfer(schan, schan->slave_id);
+
+ ret = ops->setup_xfer(schan, schan->slave_id);
+ if (ret < 0) {
+ dev_err(schan->dev, "setup_xfer failed: %d\n", ret);
+
+ /* Remove chunks from the queue and mark them as idle */
+ list_for_each_entry_safe(chunk, c, &schan->ld_queue, node) {
+ if (chunk->cookie == cookie) {
+ chunk->mark = DESC_IDLE;
+ list_move(&chunk->node, &schan->ld_free);
+ }
+ }
+
+ schan->pm_state = SHDMA_PM_ESTABLISHED;
+ ret = pm_runtime_put(schan->dev);
+
+ spin_unlock_irq(&schan->chan_lock);
+ return ret;
+ }
if (schan->pm_state == SHDMA_PM_PENDING)
shdma_chan_xfer_ld_queue(schan);
diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
index 093e449e19ee..603e15102e45 100644
--- a/drivers/dma/sh/shdmac.c
+++ b/drivers/dma/sh/shdmac.c
@@ -300,21 +300,30 @@ static bool sh_dmae_channel_busy(struct shdma_chan *schan)
return dmae_is_busy(sh_chan);
}
-static void sh_dmae_setup_xfer(struct shdma_chan *schan,
- int slave_id)
+static int sh_dmae_setup_xfer(struct shdma_chan *schan, int slave_id)
{
struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
shdma_chan);
+ int ret = 0;
if (slave_id >= 0) {
const struct sh_dmae_slave_config *cfg =
sh_chan->config;
- dmae_set_dmars(sh_chan, cfg->mid_rid);
- dmae_set_chcr(sh_chan, cfg->chcr);
+ ret = dmae_set_dmars(sh_chan, cfg->mid_rid);
+ if (ret < 0)
+ goto END;
+
+ ret = dmae_set_chcr(sh_chan, cfg->chcr);
+ if (ret < 0)
+ goto END;
+
} else {
dmae_init(sh_chan);
}
+
+END:
+ return ret;
}
/*
diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c
index 7e2b6c97fa2f..b42e5a66fd95 100644
--- a/drivers/dma/sh/usb-dmac.c
+++ b/drivers/dma/sh/usb-dmac.c
@@ -670,7 +670,6 @@ static struct dma_chan *usb_dmac_of_xlate(struct of_phandle_args *dma_spec,
* Power management
*/
-#ifdef CONFIG_PM
static int usb_dmac_runtime_suspend(struct device *dev)
{
struct usb_dmac *dmac = dev_get_drvdata(dev);
@@ -691,13 +690,11 @@ static int usb_dmac_runtime_resume(struct device *dev)
return usb_dmac_init(dmac);
}
-#endif /* CONFIG_PM */
static const struct dev_pm_ops usb_dmac_pm = {
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
- SET_RUNTIME_PM_OPS(usb_dmac_runtime_suspend, usb_dmac_runtime_resume,
- NULL)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ RUNTIME_PM_OPS(usb_dmac_runtime_suspend, usb_dmac_runtime_resume, NULL)
};
/* -----------------------------------------------------------------------------
@@ -894,7 +891,7 @@ MODULE_DEVICE_TABLE(of, usb_dmac_of_ids);
static struct platform_driver usb_dmac_driver = {
.driver = {
- .pm = &usb_dmac_pm,
+ .pm = pm_ptr(&usb_dmac_pm),
.name = "usb-dmac",
.of_match_table = usb_dmac_of_ids,
},
diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c
index 187a090463ce..6207e0b185e1 100644
--- a/drivers/dma/sprd-dma.c
+++ b/drivers/dma/sprd-dma.c
@@ -1311,4 +1311,3 @@ MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("DMA driver for Spreadtrum");
MODULE_AUTHOR("Baolin Wang <baolin.wang@spreadtrum.com>");
MODULE_AUTHOR("Eric Long <eric.long@spreadtrum.com>");
-MODULE_ALIAS("platform:sprd-dma");
diff --git a/drivers/dma/st_fdma.c b/drivers/dma/st_fdma.c
index c65ee0c7bfbd..dc2ab7d16cf2 100644
--- a/drivers/dma/st_fdma.c
+++ b/drivers/dma/st_fdma.c
@@ -866,4 +866,3 @@ MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("STMicroelectronics FDMA engine driver");
MODULE_AUTHOR("Ludovic.barre <Ludovic.barre@st.com>");
MODULE_AUTHOR("Peter Griffin <peter.griffin@linaro.org>");
-MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/dma/stm32/stm32-dma.c b/drivers/dma/stm32/stm32-dma.c
index 917f8e922373..04389936c8a6 100644
--- a/drivers/dma/stm32/stm32-dma.c
+++ b/drivers/dma/stm32/stm32-dma.c
@@ -613,7 +613,7 @@ static void stm32_dma_start_transfer(struct stm32_dma_chan *chan)
reg->dma_scr |= STM32_DMA_SCR_EN;
stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), reg->dma_scr);
- dev_dbg(chan2dev(chan), "vchan %pK: started\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %p: started\n", &chan->vchan);
}
static void stm32_dma_configure_next_sg(struct stm32_dma_chan *chan)
@@ -676,7 +676,7 @@ static void stm32_dma_handle_chan_paused(struct stm32_dma_chan *chan)
chan->status = DMA_PAUSED;
- dev_dbg(chan2dev(chan), "vchan %pK: paused\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %p: paused\n", &chan->vchan);
}
static void stm32_dma_post_resume_reconfigure(struct stm32_dma_chan *chan)
@@ -728,7 +728,7 @@ static void stm32_dma_post_resume_reconfigure(struct stm32_dma_chan *chan)
dma_scr |= STM32_DMA_SCR_EN;
stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), dma_scr);
- dev_dbg(chan2dev(chan), "vchan %pK: reconfigured after pause/resume\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %p: reconfigured after pause/resume\n", &chan->vchan);
}
static void stm32_dma_handle_chan_done(struct stm32_dma_chan *chan, u32 scr)
@@ -744,7 +744,7 @@ static void stm32_dma_handle_chan_done(struct stm32_dma_chan *chan, u32 scr)
/* cyclic while CIRC/DBM disable => post resume reconfiguration needed */
if (!(scr & (STM32_DMA_SCR_CIRC | STM32_DMA_SCR_DBM)))
stm32_dma_post_resume_reconfigure(chan);
- else if (scr & STM32_DMA_SCR_DBM)
+ else if (scr & STM32_DMA_SCR_DBM && chan->desc->num_sgs > 2)
stm32_dma_configure_next_sg(chan);
} else {
chan->busy = false;
@@ -820,7 +820,7 @@ static void stm32_dma_issue_pending(struct dma_chan *c)
spin_lock_irqsave(&chan->vchan.lock, flags);
if (vchan_issue_pending(&chan->vchan) && !chan->desc && !chan->busy) {
- dev_dbg(chan2dev(chan), "vchan %pK: issued\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %p: issued\n", &chan->vchan);
stm32_dma_start_transfer(chan);
}
@@ -922,7 +922,7 @@ static int stm32_dma_resume(struct dma_chan *c)
spin_unlock_irqrestore(&chan->vchan.lock, flags);
- dev_dbg(chan2dev(chan), "vchan %pK: resumed\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %p: resumed\n", &chan->vchan);
return 0;
}
diff --git a/drivers/dma/stm32/stm32-dma3.c b/drivers/dma/stm32/stm32-dma3.c
index 0c6c4258b195..50e7106c5cb7 100644
--- a/drivers/dma/stm32/stm32-dma3.c
+++ b/drivers/dma/stm32/stm32-dma3.c
@@ -801,7 +801,7 @@ static void stm32_dma3_chan_start(struct stm32_dma3_chan *chan)
chan->dma_status = DMA_IN_PROGRESS;
- dev_dbg(chan2dev(chan), "vchan %pK: started\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %p: started\n", &chan->vchan);
}
static int stm32_dma3_chan_suspend(struct stm32_dma3_chan *chan, bool susp)
@@ -1452,7 +1452,7 @@ static int stm32_dma3_pause(struct dma_chan *c)
chan->dma_status = DMA_PAUSED;
- dev_dbg(chan2dev(chan), "vchan %pK: paused\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %p: paused\n", &chan->vchan);
return 0;
}
@@ -1465,7 +1465,7 @@ static int stm32_dma3_resume(struct dma_chan *c)
chan->dma_status = DMA_IN_PROGRESS;
- dev_dbg(chan2dev(chan), "vchan %pK: resumed\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %p: resumed\n", &chan->vchan);
return 0;
}
@@ -1490,7 +1490,7 @@ static int stm32_dma3_terminate_all(struct dma_chan *c)
spin_unlock_irqrestore(&chan->vchan.lock, flags);
vchan_dma_desc_free_list(&chan->vchan, &head);
- dev_dbg(chan2dev(chan), "vchan %pK: terminated\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %p: terminated\n", &chan->vchan);
return 0;
}
@@ -1543,7 +1543,7 @@ static void stm32_dma3_issue_pending(struct dma_chan *c)
spin_lock_irqsave(&chan->vchan.lock, flags);
if (vchan_issue_pending(&chan->vchan) && !chan->swdesc) {
- dev_dbg(chan2dev(chan), "vchan %pK: issued\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %p: issued\n", &chan->vchan);
stm32_dma3_chan_start(chan);
}
diff --git a/drivers/dma/stm32/stm32-mdma.c b/drivers/dma/stm32/stm32-mdma.c
index e6d525901de7..080c1c725216 100644
--- a/drivers/dma/stm32/stm32-mdma.c
+++ b/drivers/dma/stm32/stm32-mdma.c
@@ -1187,7 +1187,7 @@ static void stm32_mdma_start_transfer(struct stm32_mdma_chan *chan)
chan->busy = true;
- dev_dbg(chan2dev(chan), "vchan %pK: started\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %p: started\n", &chan->vchan);
}
static void stm32_mdma_issue_pending(struct dma_chan *c)
@@ -1200,7 +1200,7 @@ static void stm32_mdma_issue_pending(struct dma_chan *c)
if (!vchan_issue_pending(&chan->vchan))
goto end;
- dev_dbg(chan2dev(chan), "vchan %pK: issued\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %p: issued\n", &chan->vchan);
if (!chan->desc && !chan->busy)
stm32_mdma_start_transfer(chan);
@@ -1220,7 +1220,7 @@ static int stm32_mdma_pause(struct dma_chan *c)
spin_unlock_irqrestore(&chan->vchan.lock, flags);
if (!ret)
- dev_dbg(chan2dev(chan), "vchan %pK: pause\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %p: pause\n", &chan->vchan);
return ret;
}
@@ -1261,7 +1261,7 @@ static int stm32_mdma_resume(struct dma_chan *c)
spin_unlock_irqrestore(&chan->vchan.lock, flags);
- dev_dbg(chan2dev(chan), "vchan %pK: resume\n", &chan->vchan);
+ dev_dbg(chan2dev(chan), "vchan %p: resume\n", &chan->vchan);
return 0;
}
diff --git a/drivers/dma/sun4i-dma.c b/drivers/dma/sun4i-dma.c
index 24796aaaddfa..00d2fd38d17f 100644
--- a/drivers/dma/sun4i-dma.c
+++ b/drivers/dma/sun4i-dma.c
@@ -1249,11 +1249,10 @@ static int sun4i_dma_probe(struct platform_device *pdev)
if (priv->irq < 0)
return priv->irq;
- priv->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(priv->clk)) {
- dev_err(&pdev->dev, "No clock specified\n");
- return PTR_ERR(priv->clk);
- }
+ priv->clk = devm_clk_get_enabled(&pdev->dev, NULL);
+ if (IS_ERR(priv->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(priv->clk),
+ "Couldn't start the clock\n");
if (priv->cfg->has_reset) {
priv->rst = devm_reset_control_get_exclusive_deasserted(&pdev->dev, NULL);
@@ -1328,12 +1327,6 @@ static int sun4i_dma_probe(struct platform_device *pdev)
vchan_init(&vchan->vc, &priv->slave);
}
- ret = clk_prepare_enable(priv->clk);
- if (ret) {
- dev_err(&pdev->dev, "Couldn't enable the clock\n");
- return ret;
- }
-
/*
* Make sure the IRQs are all disabled and accounted for. The bootloader
* likes to leave these dirty
@@ -1343,33 +1336,23 @@ static int sun4i_dma_probe(struct platform_device *pdev)
ret = devm_request_irq(&pdev->dev, priv->irq, sun4i_dma_interrupt,
0, dev_name(&pdev->dev), priv);
- if (ret) {
- dev_err(&pdev->dev, "Cannot request IRQ\n");
- goto err_clk_disable;
- }
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Cannot request IRQ\n");
- ret = dma_async_device_register(&priv->slave);
- if (ret) {
- dev_warn(&pdev->dev, "Failed to register DMA engine device\n");
- goto err_clk_disable;
- }
+ ret = dmaenginem_async_device_register(&priv->slave);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "Failed to register DMA engine device\n");
ret = of_dma_controller_register(pdev->dev.of_node, sun4i_dma_of_xlate,
priv);
- if (ret) {
- dev_err(&pdev->dev, "of_dma_controller_register failed\n");
- goto err_dma_unregister;
- }
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "Failed to register translation function\n");
dev_dbg(&pdev->dev, "Successfully probed SUN4I_DMA\n");
return 0;
-
-err_dma_unregister:
- dma_async_device_unregister(&priv->slave);
-err_clk_disable:
- clk_disable_unprepare(priv->clk);
- return ret;
}
static void sun4i_dma_remove(struct platform_device *pdev)
@@ -1380,9 +1363,6 @@ static void sun4i_dma_remove(struct platform_device *pdev)
disable_irq(priv->irq);
of_dma_controller_free(pdev->dev.of_node);
- dma_async_device_unregister(&priv->slave);
-
- clk_disable_unprepare(priv->clk);
}
static struct sun4i_dma_config sun4i_a10_dma_cfg = {
diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c
index fad896ff29a2..d0e8bb27a03b 100644
--- a/drivers/dma/tegra210-adma.c
+++ b/drivers/dma/tegra210-adma.c
@@ -1230,7 +1230,6 @@ static struct platform_driver tegra_admac_driver = {
module_platform_driver(tegra_admac_driver);
-MODULE_ALIAS("platform:tegra210-adma");
MODULE_DESCRIPTION("NVIDIA Tegra ADMA driver");
MODULE_AUTHOR("Dara Ramesh <dramesh@nvidia.com>");
MODULE_AUTHOR("Jon Hunter <jonathanh@nvidia.com>");
diff --git a/drivers/dma/ti/Kconfig b/drivers/dma/ti/Kconfig
index 2adc2cca10e9..dbf168146d35 100644
--- a/drivers/dma/ti/Kconfig
+++ b/drivers/dma/ti/Kconfig
@@ -17,7 +17,7 @@ config TI_EDMA
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
select TI_DMA_CROSSBAR if (ARCH_OMAP || COMPILE_TEST)
- default y
+ default ARCH_DAVINCI || ARCH_OMAP || ARCH_KEYSTONE
help
Enable support for the TI EDMA (Enhanced DMA) controller. This DMA
engine is found on TI DaVinci, AM33xx, AM43xx, DRA7xx and Keystone 2
@@ -29,7 +29,7 @@ config DMA_OMAP
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
select TI_DMA_CROSSBAR if (SOC_DRA7XX || COMPILE_TEST)
- default y
+ default ARCH_OMAP
help
Enable support for the TI sDMA (System DMA or DMA4) controller. This
DMA engine is found on OMAP and DRA7xx parts.
diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
index 3ed406f08c44..552be71db6c4 100644
--- a/drivers/dma/ti/edma.c
+++ b/drivers/dma/ti/edma.c
@@ -2064,8 +2064,8 @@ static int edma_setup_from_hw(struct device *dev, struct edma_soc_info *pdata,
* priority. So Q0 is the highest priority queue and the last queue has
* the lowest priority.
*/
- queue_priority_map = devm_kcalloc(dev, ecc->num_tc + 1, sizeof(s8),
- GFP_KERNEL);
+ queue_priority_map = devm_kcalloc(dev, ecc->num_tc + 1,
+ sizeof(*queue_priority_map), GFP_KERNEL);
if (!queue_priority_map)
return -ENOMEM;
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index a34d8f0ceed8..fabff602065f 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -2173,6 +2173,99 @@ error:
}
/**
+ * xilinx_dma_prep_peripheral_dma_vec - prepare descriptors for a DMA_SLAVE
+ * transaction from DMA vectors
+ * @dchan: DMA channel
+ * @vecs: Array of DMA vectors that should be transferred
+ * @nb: number of entries in @vecs
+ * @direction: DMA direction
+ * @flags: transfer ack flags
+ *
+ * Return: Async transaction descriptor on success and NULL on failure
+ */
+static struct dma_async_tx_descriptor *xilinx_dma_prep_peripheral_dma_vec(
+ struct dma_chan *dchan, const struct dma_vec *vecs, size_t nb,
+ enum dma_transfer_direction direction, unsigned long flags)
+{
+ struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
+ struct xilinx_dma_tx_descriptor *desc;
+ struct xilinx_axidma_tx_segment *segment, *head, *prev = NULL;
+ size_t copy;
+ size_t sg_used;
+ unsigned int i;
+
+ if (!is_slave_direction(direction) || direction != chan->direction)
+ return NULL;
+
+ desc = xilinx_dma_alloc_tx_descriptor(chan);
+ if (!desc)
+ return NULL;
+
+ dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
+ desc->async_tx.tx_submit = xilinx_dma_tx_submit;
+
+ /* Build transactions using information from DMA vectors */
+ for (i = 0; i < nb; i++) {
+ sg_used = 0;
+
+ /* Loop until the entire dma_vec entry is used */
+ while (sg_used < vecs[i].len) {
+ struct xilinx_axidma_desc_hw *hw;
+
+ /* Get a free segment */
+ segment = xilinx_axidma_alloc_tx_segment(chan);
+ if (!segment)
+ goto error;
+
+ /*
+ * Calculate the maximum number of bytes to transfer,
+ * making sure it is less than the hw limit
+ */
+ copy = xilinx_dma_calc_copysize(chan, vecs[i].len,
+ sg_used);
+ hw = &segment->hw;
+
+ /* Fill in the descriptor */
+ xilinx_axidma_buf(chan, hw, vecs[i].addr, sg_used, 0);
+ hw->control = copy;
+
+ if (prev)
+ prev->hw.next_desc = segment->phys;
+
+ prev = segment;
+ sg_used += copy;
+
+ /*
+ * Insert the segment into the descriptor segments
+ * list.
+ */
+ list_add_tail(&segment->node, &desc->segments);
+ }
+ }
+
+ head = list_first_entry(&desc->segments, struct xilinx_axidma_tx_segment, node);
+ desc->async_tx.phys = head->phys;
+
+ /* For the last DMA_MEM_TO_DEV transfer, set EOP */
+ if (chan->direction == DMA_MEM_TO_DEV) {
+ segment->hw.control |= XILINX_DMA_BD_SOP;
+ segment = list_last_entry(&desc->segments,
+ struct xilinx_axidma_tx_segment,
+ node);
+ segment->hw.control |= XILINX_DMA_BD_EOP;
+ }
+
+ if (chan->xdev->has_axistream_connected)
+ desc->async_tx.metadata_ops = &xilinx_dma_metadata_ops;
+
+ return &desc->async_tx;
+
+error:
+ xilinx_dma_free_tx_descriptor(chan, desc);
+ return NULL;
+}
+
+/**
* xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
* @dchan: DMA channel
* @sgl: scatterlist to transfer to/from
@@ -3180,6 +3273,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
xdev->common.device_config = xilinx_dma_device_config;
if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
dma_cap_set(DMA_CYCLIC, xdev->common.cap_mask);
+ xdev->common.device_prep_peripheral_dma_vec = xilinx_dma_prep_peripheral_dma_vec;
xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg;
xdev->common.device_prep_dma_cyclic =
xilinx_dma_prep_dma_cyclic;
diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
index d05fc5fcc77d..f7e584de4335 100644
--- a/drivers/dma/xilinx/zynqmp_dma.c
+++ b/drivers/dma/xilinx/zynqmp_dma.c
@@ -1173,9 +1173,9 @@ static void zynqmp_dma_remove(struct platform_device *pdev)
dma_async_device_unregister(&zdev->common);
zynqmp_dma_chan_remove(zdev->chan);
- pm_runtime_disable(zdev->dev);
- if (!pm_runtime_enabled(zdev->dev))
+ if (pm_runtime_active(zdev->dev))
zynqmp_dma_runtime_suspend(zdev->dev);
+ pm_runtime_disable(zdev->dev);
}
static const struct of_device_id zynqmp_dma_of_match[] = {
@@ -1193,6 +1193,7 @@ static struct platform_driver zynqmp_dma_driver = {
},
.probe = zynqmp_dma_probe,
.remove = zynqmp_dma_remove,
+ .shutdown = zynqmp_dma_remove,
};
module_platform_driver(zynqmp_dma_driver);
diff --git a/drivers/dpll/Kconfig b/drivers/dpll/Kconfig
index 20607ed54243..ade872c915ac 100644
--- a/drivers/dpll/Kconfig
+++ b/drivers/dpll/Kconfig
@@ -3,5 +3,11 @@
# Generic DPLL drivers configuration
#
+menu "DPLL device support"
+
config DPLL
bool
+
+source "drivers/dpll/zl3073x/Kconfig"
+
+endmenu
diff --git a/drivers/dpll/Makefile b/drivers/dpll/Makefile
index 2e5b27850110..9e7a3a3e592e 100644
--- a/drivers/dpll/Makefile
+++ b/drivers/dpll/Makefile
@@ -7,3 +7,5 @@ obj-$(CONFIG_DPLL) += dpll.o
dpll-y += dpll_core.o
dpll-y += dpll_netlink.o
dpll-y += dpll_nl.o
+
+obj-$(CONFIG_ZL3073X) += zl3073x/
diff --git a/drivers/dpll/dpll_core.c b/drivers/dpll/dpll_core.c
index 20bdc52f63a5..a461095efd8a 100644
--- a/drivers/dpll/dpll_core.c
+++ b/drivers/dpll/dpll_core.c
@@ -506,6 +506,7 @@ dpll_pin_alloc(u64 clock_id, u32 pin_idx, struct module *module,
refcount_set(&pin->refcount, 1);
xa_init_flags(&pin->dpll_refs, XA_FLAGS_ALLOC);
xa_init_flags(&pin->parent_refs, XA_FLAGS_ALLOC);
+ xa_init_flags(&pin->ref_sync_pins, XA_FLAGS_ALLOC);
ret = xa_alloc_cyclic(&dpll_pin_xa, &pin->id, pin, xa_limit_32b,
&dpll_pin_xa_id, GFP_KERNEL);
if (ret < 0)
@@ -514,6 +515,7 @@ dpll_pin_alloc(u64 clock_id, u32 pin_idx, struct module *module,
err_xa_alloc:
xa_destroy(&pin->dpll_refs);
xa_destroy(&pin->parent_refs);
+ xa_destroy(&pin->ref_sync_pins);
dpll_pin_prop_free(&pin->prop);
err_pin_prop:
kfree(pin);
@@ -595,6 +597,7 @@ void dpll_pin_put(struct dpll_pin *pin)
xa_erase(&dpll_pin_xa, pin->id);
xa_destroy(&pin->dpll_refs);
xa_destroy(&pin->parent_refs);
+ xa_destroy(&pin->ref_sync_pins);
dpll_pin_prop_free(&pin->prop);
kfree_rcu(pin, rcu);
}
@@ -659,11 +662,26 @@ dpll_pin_register(struct dpll_device *dpll, struct dpll_pin *pin,
}
EXPORT_SYMBOL_GPL(dpll_pin_register);
+static void dpll_pin_ref_sync_pair_del(u32 ref_sync_pin_id)
+{
+ struct dpll_pin *pin, *ref_sync_pin;
+ unsigned long i;
+
+ xa_for_each(&dpll_pin_xa, i, pin) {
+ ref_sync_pin = xa_load(&pin->ref_sync_pins, ref_sync_pin_id);
+ if (ref_sync_pin) {
+ xa_erase(&pin->ref_sync_pins, ref_sync_pin_id);
+ __dpll_pin_change_ntf(pin);
+ }
+ }
+}
+
static void
__dpll_pin_unregister(struct dpll_device *dpll, struct dpll_pin *pin,
const struct dpll_pin_ops *ops, void *priv, void *cookie)
{
ASSERT_DPLL_PIN_REGISTERED(pin);
+ dpll_pin_ref_sync_pair_del(pin->id);
dpll_xa_ref_pin_del(&dpll->pin_refs, pin, ops, priv, cookie);
dpll_xa_ref_dpll_del(&pin->dpll_refs, dpll, ops, priv, cookie);
if (xa_empty(&pin->dpll_refs))
@@ -783,6 +801,33 @@ void dpll_pin_on_pin_unregister(struct dpll_pin *parent, struct dpll_pin *pin,
}
EXPORT_SYMBOL_GPL(dpll_pin_on_pin_unregister);
+/**
+ * dpll_pin_ref_sync_pair_add - create a reference sync signal pin pair
+ * @pin: pin which produces the base frequency
+ * @ref_sync_pin: pin which produces the sync signal
+ *
+ * Once pins are paired, the user-space configuration of reference sync pair
+ * is possible.
+ * Context: Acquires a lock (dpll_lock)
+ * Return:
+ * * 0 on success
+ * * negative - error value
+ */
+int dpll_pin_ref_sync_pair_add(struct dpll_pin *pin,
+ struct dpll_pin *ref_sync_pin)
+{
+ int ret;
+
+ mutex_lock(&dpll_lock);
+ ret = xa_insert(&pin->ref_sync_pins, ref_sync_pin->id,
+ ref_sync_pin, GFP_KERNEL);
+ __dpll_pin_change_ntf(pin);
+ mutex_unlock(&dpll_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dpll_pin_ref_sync_pair_add);
+
static struct dpll_device_registration *
dpll_device_registration_first(struct dpll_device *dpll)
{
diff --git a/drivers/dpll/dpll_core.h b/drivers/dpll/dpll_core.h
index 2b6d8ef1cdf3..8ce969bbeb64 100644
--- a/drivers/dpll/dpll_core.h
+++ b/drivers/dpll/dpll_core.h
@@ -44,8 +44,8 @@ struct dpll_device {
* @module: module of creator
* @dpll_refs: hold referencees to dplls pin was registered with
* @parent_refs: hold references to parent pins pin was registered with
+ * @ref_sync_pins: hold references to pins for Reference SYNC feature
* @prop: pin properties copied from the registerer
- * @rclk_dev_name: holds name of device when pin can recover clock from it
* @refcount: refcount
* @rcu: rcu_head for kfree_rcu()
**/
@@ -56,6 +56,7 @@ struct dpll_pin {
struct module *module;
struct xarray dpll_refs;
struct xarray parent_refs;
+ struct xarray ref_sync_pins;
struct dpll_pin_properties prop;
refcount_t refcount;
struct rcu_head rcu;
diff --git a/drivers/dpll/dpll_netlink.c b/drivers/dpll/dpll_netlink.c
index c130f87147fa..64944f601ee5 100644
--- a/drivers/dpll/dpll_netlink.c
+++ b/drivers/dpll/dpll_netlink.c
@@ -48,6 +48,24 @@ dpll_msg_add_dev_parent_handle(struct sk_buff *msg, u32 id)
return 0;
}
+static bool dpll_pin_available(struct dpll_pin *pin)
+{
+ struct dpll_pin_ref *par_ref;
+ unsigned long i;
+
+ if (!xa_get_mark(&dpll_pin_xa, pin->id, DPLL_REGISTERED))
+ return false;
+ xa_for_each(&pin->parent_refs, i, par_ref)
+ if (xa_get_mark(&dpll_pin_xa, par_ref->pin->id,
+ DPLL_REGISTERED))
+ return true;
+ xa_for_each(&pin->dpll_refs, i, par_ref)
+ if (xa_get_mark(&dpll_device_xa, par_ref->dpll->id,
+ DPLL_REGISTERED))
+ return true;
+ return false;
+}
+
/**
* dpll_msg_add_pin_handle - attach pin handle attribute to a given message
* @msg: pointer to sk_buff message to attach a pin handle
@@ -127,6 +145,47 @@ dpll_msg_add_mode_supported(struct sk_buff *msg, struct dpll_device *dpll,
}
static int
+dpll_msg_add_phase_offset_monitor(struct sk_buff *msg, struct dpll_device *dpll,
+ struct netlink_ext_ack *extack)
+{
+ const struct dpll_device_ops *ops = dpll_device_ops(dpll);
+ enum dpll_feature_state state;
+ int ret;
+
+ if (ops->phase_offset_monitor_set && ops->phase_offset_monitor_get) {
+ ret = ops->phase_offset_monitor_get(dpll, dpll_priv(dpll),
+ &state, extack);
+ if (ret)
+ return ret;
+ if (nla_put_u32(msg, DPLL_A_PHASE_OFFSET_MONITOR, state))
+ return -EMSGSIZE;
+ }
+
+ return 0;
+}
+
+static int
+dpll_msg_add_phase_offset_avg_factor(struct sk_buff *msg,
+ struct dpll_device *dpll,
+ struct netlink_ext_ack *extack)
+{
+ const struct dpll_device_ops *ops = dpll_device_ops(dpll);
+ u32 factor;
+ int ret;
+
+ if (ops->phase_offset_avg_factor_get) {
+ ret = ops->phase_offset_avg_factor_get(dpll, dpll_priv(dpll),
+ &factor, extack);
+ if (ret)
+ return ret;
+ if (nla_put_u32(msg, DPLL_A_PHASE_OFFSET_AVG_FACTOR, factor))
+ return -EMSGSIZE;
+ }
+
+ return 0;
+}
+
+static int
dpll_msg_add_lock_status(struct sk_buff *msg, struct dpll_device *dpll,
struct netlink_ext_ack *extack)
{
@@ -173,8 +232,8 @@ static int
dpll_msg_add_clock_quality_level(struct sk_buff *msg, struct dpll_device *dpll,
struct netlink_ext_ack *extack)
{
+ DECLARE_BITMAP(qls, DPLL_CLOCK_QUALITY_LEVEL_MAX + 1) = { 0 };
const struct dpll_device_ops *ops = dpll_device_ops(dpll);
- DECLARE_BITMAP(qls, DPLL_CLOCK_QUALITY_LEVEL_MAX) = { 0 };
enum dpll_clock_quality_level ql;
int ret;
@@ -183,7 +242,7 @@ dpll_msg_add_clock_quality_level(struct sk_buff *msg, struct dpll_device *dpll,
ret = ops->clock_quality_level_get(dpll, dpll_priv(dpll), qls, extack);
if (ret)
return ret;
- for_each_set_bit(ql, qls, DPLL_CLOCK_QUALITY_LEVEL_MAX)
+ for_each_set_bit(ql, qls, DPLL_CLOCK_QUALITY_LEVEL_MAX + 1)
if (nla_put_u32(msg, DPLL_A_CLOCK_QUALITY_LEVEL, ql))
return -EMSGSIZE;
@@ -408,6 +467,47 @@ nest_cancel:
return -EMSGSIZE;
}
+static int
+dpll_msg_add_pin_ref_sync(struct sk_buff *msg, struct dpll_pin *pin,
+ struct dpll_pin_ref *ref,
+ struct netlink_ext_ack *extack)
+{
+ const struct dpll_pin_ops *ops = dpll_pin_ops(ref);
+ struct dpll_device *dpll = ref->dpll;
+ void *pin_priv, *ref_sync_pin_priv;
+ struct dpll_pin *ref_sync_pin;
+ enum dpll_pin_state state;
+ struct nlattr *nest;
+ unsigned long index;
+ int ret;
+
+ pin_priv = dpll_pin_on_dpll_priv(dpll, pin);
+ xa_for_each(&pin->ref_sync_pins, index, ref_sync_pin) {
+ if (!dpll_pin_available(ref_sync_pin))
+ continue;
+ ref_sync_pin_priv = dpll_pin_on_dpll_priv(dpll, ref_sync_pin);
+ if (WARN_ON(!ops->ref_sync_get))
+ return -EOPNOTSUPP;
+ ret = ops->ref_sync_get(pin, pin_priv, ref_sync_pin,
+ ref_sync_pin_priv, &state, extack);
+ if (ret)
+ return ret;
+ nest = nla_nest_start(msg, DPLL_A_PIN_REFERENCE_SYNC);
+ if (!nest)
+ return -EMSGSIZE;
+ if (nla_put_s32(msg, DPLL_A_PIN_ID, ref_sync_pin->id))
+ goto nest_cancel;
+ if (nla_put_s32(msg, DPLL_A_PIN_STATE, state))
+ goto nest_cancel;
+ nla_nest_end(msg, nest);
+ }
+ return 0;
+
+nest_cancel:
+ nla_nest_cancel(msg, nest);
+ return -EMSGSIZE;
+}
+
static bool dpll_pin_is_freq_supported(struct dpll_pin *pin, u32 freq)
{
int fs;
@@ -537,6 +637,10 @@ dpll_cmd_pin_get_one(struct sk_buff *msg, struct dpll_pin *pin,
ret = dpll_msg_add_pin_freq(msg, pin, ref, extack);
if (ret)
return ret;
+ if (prop->phase_gran &&
+ nla_put_u32(msg, DPLL_A_PIN_PHASE_ADJUST_GRAN,
+ prop->phase_gran))
+ return -EMSGSIZE;
if (nla_put_s32(msg, DPLL_A_PIN_PHASE_ADJUST_MIN,
prop->phase_range.min))
return -EMSGSIZE;
@@ -552,6 +656,10 @@ dpll_cmd_pin_get_one(struct sk_buff *msg, struct dpll_pin *pin,
ret = dpll_msg_add_pin_esync(msg, pin, ref, extack);
if (ret)
return ret;
+ if (!xa_empty(&pin->ref_sync_pins))
+ ret = dpll_msg_add_pin_ref_sync(msg, pin, ref, extack);
+ if (ret)
+ return ret;
if (xa_empty(&pin->parent_refs))
ret = dpll_msg_add_pin_dplls(msg, pin, extack);
else
@@ -591,6 +699,12 @@ dpll_device_get_one(struct dpll_device *dpll, struct sk_buff *msg,
return ret;
if (nla_put_u32(msg, DPLL_A_TYPE, dpll->type))
return -EMSGSIZE;
+ ret = dpll_msg_add_phase_offset_monitor(msg, dpll, extack);
+ if (ret)
+ return ret;
+ ret = dpll_msg_add_phase_offset_avg_factor(msg, dpll, extack);
+ if (ret)
+ return ret;
return 0;
}
@@ -642,24 +756,6 @@ __dpll_device_change_ntf(struct dpll_device *dpll)
return dpll_device_event_send(DPLL_CMD_DEVICE_CHANGE_NTF, dpll);
}
-static bool dpll_pin_available(struct dpll_pin *pin)
-{
- struct dpll_pin_ref *par_ref;
- unsigned long i;
-
- if (!xa_get_mark(&dpll_pin_xa, pin->id, DPLL_REGISTERED))
- return false;
- xa_for_each(&pin->parent_refs, i, par_ref)
- if (xa_get_mark(&dpll_pin_xa, par_ref->pin->id,
- DPLL_REGISTERED))
- return true;
- xa_for_each(&pin->dpll_refs, i, par_ref)
- if (xa_get_mark(&dpll_device_xa, par_ref->dpll->id,
- DPLL_REGISTERED))
- return true;
- return false;
-}
-
/**
* dpll_device_change_ntf - notify that the dpll device has been changed
* @dpll: registered dpll pointer
@@ -722,7 +818,7 @@ int dpll_pin_delete_ntf(struct dpll_pin *pin)
return dpll_pin_event_send(DPLL_CMD_PIN_DELETE_NTF, pin);
}
-static int __dpll_pin_change_ntf(struct dpll_pin *pin)
+int __dpll_pin_change_ntf(struct dpll_pin *pin)
{
return dpll_pin_event_send(DPLL_CMD_PIN_CHANGE_NTF, pin);
}
@@ -747,6 +843,48 @@ int dpll_pin_change_ntf(struct dpll_pin *pin)
EXPORT_SYMBOL_GPL(dpll_pin_change_ntf);
static int
+dpll_phase_offset_monitor_set(struct dpll_device *dpll, struct nlattr *a,
+ struct netlink_ext_ack *extack)
+{
+ const struct dpll_device_ops *ops = dpll_device_ops(dpll);
+ enum dpll_feature_state state = nla_get_u32(a), old_state;
+ int ret;
+
+ if (!(ops->phase_offset_monitor_set && ops->phase_offset_monitor_get)) {
+ NL_SET_ERR_MSG_ATTR(extack, a, "dpll device not capable of phase offset monitor");
+ return -EOPNOTSUPP;
+ }
+ ret = ops->phase_offset_monitor_get(dpll, dpll_priv(dpll), &old_state,
+ extack);
+ if (ret) {
+ NL_SET_ERR_MSG(extack, "unable to get current state of phase offset monitor");
+ return ret;
+ }
+ if (state == old_state)
+ return 0;
+
+ return ops->phase_offset_monitor_set(dpll, dpll_priv(dpll), state,
+ extack);
+}
+
+static int
+dpll_phase_offset_avg_factor_set(struct dpll_device *dpll, struct nlattr *a,
+ struct netlink_ext_ack *extack)
+{
+ const struct dpll_device_ops *ops = dpll_device_ops(dpll);
+ u32 factor = nla_get_u32(a);
+
+ if (!ops->phase_offset_avg_factor_set) {
+ NL_SET_ERR_MSG_ATTR(extack, a,
+ "device not capable of changing phase offset average factor");
+ return -EOPNOTSUPP;
+ }
+
+ return ops->phase_offset_avg_factor_set(dpll, dpll_priv(dpll), factor,
+ extack);
+}
+
+static int
dpll_pin_freq_set(struct dpll_pin *pin, struct nlattr *a,
struct netlink_ext_ack *extack)
{
@@ -888,6 +1026,108 @@ rollback:
}
static int
+dpll_pin_ref_sync_state_set(struct dpll_pin *pin,
+ unsigned long ref_sync_pin_idx,
+ const enum dpll_pin_state state,
+ struct netlink_ext_ack *extack)
+
+{
+ struct dpll_pin_ref *ref, *failed;
+ const struct dpll_pin_ops *ops;
+ enum dpll_pin_state old_state;
+ struct dpll_pin *ref_sync_pin;
+ struct dpll_device *dpll;
+ unsigned long i;
+ int ret;
+
+ ref_sync_pin = xa_find(&pin->ref_sync_pins, &ref_sync_pin_idx,
+ ULONG_MAX, XA_PRESENT);
+ if (!ref_sync_pin) {
+ NL_SET_ERR_MSG(extack, "reference sync pin not found");
+ return -EINVAL;
+ }
+ if (!dpll_pin_available(ref_sync_pin)) {
+ NL_SET_ERR_MSG(extack, "reference sync pin not available");
+ return -EINVAL;
+ }
+ ref = dpll_xa_ref_dpll_first(&pin->dpll_refs);
+ ASSERT_NOT_NULL(ref);
+ ops = dpll_pin_ops(ref);
+ if (!ops->ref_sync_set || !ops->ref_sync_get) {
+ NL_SET_ERR_MSG(extack, "reference sync not supported by this pin");
+ return -EOPNOTSUPP;
+ }
+ dpll = ref->dpll;
+ ret = ops->ref_sync_get(pin, dpll_pin_on_dpll_priv(dpll, pin),
+ ref_sync_pin,
+ dpll_pin_on_dpll_priv(dpll, ref_sync_pin),
+ &old_state, extack);
+ if (ret) {
+ NL_SET_ERR_MSG(extack, "unable to get old reference sync state");
+ return ret;
+ }
+ if (state == old_state)
+ return 0;
+ xa_for_each(&pin->dpll_refs, i, ref) {
+ ops = dpll_pin_ops(ref);
+ dpll = ref->dpll;
+ ret = ops->ref_sync_set(pin, dpll_pin_on_dpll_priv(dpll, pin),
+ ref_sync_pin,
+ dpll_pin_on_dpll_priv(dpll,
+ ref_sync_pin),
+ state, extack);
+ if (ret) {
+ failed = ref;
+ NL_SET_ERR_MSG_FMT(extack, "reference sync set failed for dpll_id:%u",
+ dpll->id);
+ goto rollback;
+ }
+ }
+ __dpll_pin_change_ntf(pin);
+
+ return 0;
+
+rollback:
+ xa_for_each(&pin->dpll_refs, i, ref) {
+ if (ref == failed)
+ break;
+ ops = dpll_pin_ops(ref);
+ dpll = ref->dpll;
+ if (ops->ref_sync_set(pin, dpll_pin_on_dpll_priv(dpll, pin),
+ ref_sync_pin,
+ dpll_pin_on_dpll_priv(dpll, ref_sync_pin),
+ old_state, extack))
+ NL_SET_ERR_MSG(extack, "set reference sync rollback failed");
+ }
+ return ret;
+}
+
+static int
+dpll_pin_ref_sync_set(struct dpll_pin *pin, struct nlattr *nest,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[DPLL_A_PIN_MAX + 1];
+ enum dpll_pin_state state;
+ u32 sync_pin_id;
+
+ nla_parse_nested(tb, DPLL_A_PIN_MAX, nest,
+ dpll_reference_sync_nl_policy, extack);
+ if (!tb[DPLL_A_PIN_ID]) {
+ NL_SET_ERR_MSG(extack, "sync pin id expected");
+ return -EINVAL;
+ }
+ sync_pin_id = nla_get_u32(tb[DPLL_A_PIN_ID]);
+
+ if (!tb[DPLL_A_PIN_STATE]) {
+ NL_SET_ERR_MSG(extack, "sync pin state expected");
+ return -EINVAL;
+ }
+ state = nla_get_u32(tb[DPLL_A_PIN_STATE]);
+
+ return dpll_pin_ref_sync_state_set(pin, sync_pin_id, state, extack);
+}
+
+static int
dpll_pin_on_pin_state_set(struct dpll_pin *pin, u32 parent_idx,
enum dpll_pin_state state,
struct netlink_ext_ack *extack)
@@ -1025,7 +1265,13 @@ dpll_pin_phase_adj_set(struct dpll_pin *pin, struct nlattr *phase_adj_attr,
if (phase_adj > pin->prop.phase_range.max ||
phase_adj < pin->prop.phase_range.min) {
NL_SET_ERR_MSG_ATTR(extack, phase_adj_attr,
- "phase adjust value not supported");
+ "phase adjust value of out range");
+ return -EINVAL;
+ }
+ if (pin->prop.phase_gran && phase_adj % (s32)pin->prop.phase_gran) {
+ NL_SET_ERR_MSG_ATTR_FMT(extack, phase_adj_attr,
+ "phase adjust value not multiple of %u",
+ pin->prop.phase_gran);
return -EINVAL;
}
@@ -1193,6 +1439,11 @@ dpll_pin_set_from_nlattr(struct dpll_pin *pin, struct genl_info *info)
if (ret)
return ret;
break;
+ case DPLL_A_PIN_REFERENCE_SYNC:
+ ret = dpll_pin_ref_sync_set(pin, a, info->extack);
+ if (ret)
+ return ret;
+ break;
}
}
@@ -1318,16 +1569,18 @@ int dpll_nl_pin_id_get_doit(struct sk_buff *skb, struct genl_info *info)
return -EMSGSIZE;
}
pin = dpll_pin_find_from_nlattr(info);
- if (!IS_ERR(pin)) {
- if (!dpll_pin_available(pin)) {
- nlmsg_free(msg);
- return -ENODEV;
- }
- ret = dpll_msg_add_pin_handle(msg, pin);
- if (ret) {
- nlmsg_free(msg);
- return ret;
- }
+ if (IS_ERR(pin)) {
+ nlmsg_free(msg);
+ return PTR_ERR(pin);
+ }
+ if (!dpll_pin_available(pin)) {
+ nlmsg_free(msg);
+ return -ENODEV;
+ }
+ ret = dpll_msg_add_pin_handle(msg, pin);
+ if (ret) {
+ nlmsg_free(msg);
+ return ret;
}
genlmsg_end(msg, hdr);
@@ -1494,12 +1747,14 @@ int dpll_nl_device_id_get_doit(struct sk_buff *skb, struct genl_info *info)
}
dpll = dpll_device_find_from_nlattr(info);
- if (!IS_ERR(dpll)) {
- ret = dpll_msg_add_dev_handle(msg, dpll);
- if (ret) {
- nlmsg_free(msg);
- return ret;
- }
+ if (IS_ERR(dpll)) {
+ nlmsg_free(msg);
+ return PTR_ERR(dpll);
+ }
+ ret = dpll_msg_add_dev_handle(msg, dpll);
+ if (ret) {
+ nlmsg_free(msg);
+ return ret;
}
genlmsg_end(msg, hdr);
@@ -1533,12 +1788,40 @@ int dpll_nl_device_get_doit(struct sk_buff *skb, struct genl_info *info)
return genlmsg_reply(msg, info);
}
-int dpll_nl_device_set_doit(struct sk_buff *skb, struct genl_info *info)
+static int
+dpll_set_from_nlattr(struct dpll_device *dpll, struct genl_info *info)
{
- /* placeholder for set command */
+ struct nlattr *a;
+ int rem, ret;
+
+ nla_for_each_attr(a, genlmsg_data(info->genlhdr),
+ genlmsg_len(info->genlhdr), rem) {
+ switch (nla_type(a)) {
+ case DPLL_A_PHASE_OFFSET_MONITOR:
+ ret = dpll_phase_offset_monitor_set(dpll, a,
+ info->extack);
+ if (ret)
+ return ret;
+ break;
+ case DPLL_A_PHASE_OFFSET_AVG_FACTOR:
+ ret = dpll_phase_offset_avg_factor_set(dpll, a,
+ info->extack);
+ if (ret)
+ return ret;
+ break;
+ }
+ }
+
return 0;
}
+int dpll_nl_device_set_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct dpll_device *dpll = info->user_ptr[0];
+
+ return dpll_set_from_nlattr(dpll, info);
+}
+
int dpll_nl_device_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
{
struct dpll_dump_ctx *ctx = dpll_dump_context(cb);
diff --git a/drivers/dpll/dpll_netlink.h b/drivers/dpll/dpll_netlink.h
index a9cfd55f57fc..dd28b56d27c5 100644
--- a/drivers/dpll/dpll_netlink.h
+++ b/drivers/dpll/dpll_netlink.h
@@ -11,3 +11,5 @@ int dpll_device_delete_ntf(struct dpll_device *dpll);
int dpll_pin_create_ntf(struct dpll_pin *pin);
int dpll_pin_delete_ntf(struct dpll_pin *pin);
+
+int __dpll_pin_change_ntf(struct dpll_pin *pin);
diff --git a/drivers/dpll/dpll_nl.c b/drivers/dpll/dpll_nl.c
index fe9b6893d261..36d11ff195df 100644
--- a/drivers/dpll/dpll_nl.c
+++ b/drivers/dpll/dpll_nl.c
@@ -2,6 +2,7 @@
/* Do not edit directly, auto-generated from: */
/* Documentation/netlink/specs/dpll.yaml */
/* YNL-GEN kernel source */
+/* To regenerate run: tools/net/ynl/ynl-regen.sh */
#include <net/netlink.h>
#include <net/genetlink.h>
@@ -24,6 +25,11 @@ const struct nla_policy dpll_pin_parent_pin_nl_policy[DPLL_A_PIN_STATE + 1] = {
[DPLL_A_PIN_STATE] = NLA_POLICY_RANGE(NLA_U32, 1, 3),
};
+const struct nla_policy dpll_reference_sync_nl_policy[DPLL_A_PIN_STATE + 1] = {
+ [DPLL_A_PIN_ID] = { .type = NLA_U32, },
+ [DPLL_A_PIN_STATE] = NLA_POLICY_RANGE(NLA_U32, 1, 3),
+};
+
/* DPLL_CMD_DEVICE_ID_GET - do */
static const struct nla_policy dpll_device_id_get_nl_policy[DPLL_A_TYPE + 1] = {
[DPLL_A_MODULE_NAME] = { .type = NLA_NUL_STRING, },
@@ -37,8 +43,10 @@ static const struct nla_policy dpll_device_get_nl_policy[DPLL_A_ID + 1] = {
};
/* DPLL_CMD_DEVICE_SET - do */
-static const struct nla_policy dpll_device_set_nl_policy[DPLL_A_ID + 1] = {
+static const struct nla_policy dpll_device_set_nl_policy[DPLL_A_PHASE_OFFSET_AVG_FACTOR + 1] = {
[DPLL_A_ID] = { .type = NLA_U32, },
+ [DPLL_A_PHASE_OFFSET_MONITOR] = NLA_POLICY_MAX(NLA_U32, 1),
+ [DPLL_A_PHASE_OFFSET_AVG_FACTOR] = { .type = NLA_U32, },
};
/* DPLL_CMD_PIN_ID_GET - do */
@@ -62,7 +70,7 @@ static const struct nla_policy dpll_pin_get_dump_nl_policy[DPLL_A_PIN_ID + 1] =
};
/* DPLL_CMD_PIN_SET - do */
-static const struct nla_policy dpll_pin_set_nl_policy[DPLL_A_PIN_ESYNC_FREQUENCY + 1] = {
+static const struct nla_policy dpll_pin_set_nl_policy[DPLL_A_PIN_REFERENCE_SYNC + 1] = {
[DPLL_A_PIN_ID] = { .type = NLA_U32, },
[DPLL_A_PIN_FREQUENCY] = { .type = NLA_U64, },
[DPLL_A_PIN_DIRECTION] = NLA_POLICY_RANGE(NLA_U32, 1, 2),
@@ -72,6 +80,7 @@ static const struct nla_policy dpll_pin_set_nl_policy[DPLL_A_PIN_ESYNC_FREQUENCY
[DPLL_A_PIN_PARENT_PIN] = NLA_POLICY_NESTED(dpll_pin_parent_pin_nl_policy),
[DPLL_A_PIN_PHASE_ADJUST] = { .type = NLA_S32, },
[DPLL_A_PIN_ESYNC_FREQUENCY] = { .type = NLA_U64, },
+ [DPLL_A_PIN_REFERENCE_SYNC] = NLA_POLICY_NESTED(dpll_reference_sync_nl_policy),
};
/* Ops table for dpll */
@@ -105,7 +114,7 @@ static const struct genl_split_ops dpll_nl_ops[] = {
.doit = dpll_nl_device_set_doit,
.post_doit = dpll_post_doit,
.policy = dpll_device_set_nl_policy,
- .maxattr = DPLL_A_ID,
+ .maxattr = DPLL_A_PHASE_OFFSET_AVG_FACTOR,
.flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
},
{
@@ -139,7 +148,7 @@ static const struct genl_split_ops dpll_nl_ops[] = {
.doit = dpll_nl_pin_set_doit,
.post_doit = dpll_pin_post_doit,
.policy = dpll_pin_set_nl_policy,
- .maxattr = DPLL_A_PIN_ESYNC_FREQUENCY,
+ .maxattr = DPLL_A_PIN_REFERENCE_SYNC,
.flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
},
};
diff --git a/drivers/dpll/dpll_nl.h b/drivers/dpll/dpll_nl.h
index f491262bee4f..7419679b6977 100644
--- a/drivers/dpll/dpll_nl.h
+++ b/drivers/dpll/dpll_nl.h
@@ -2,6 +2,7 @@
/* Do not edit directly, auto-generated from: */
/* Documentation/netlink/specs/dpll.yaml */
/* YNL-GEN kernel header */
+/* To regenerate run: tools/net/ynl/ynl-regen.sh */
#ifndef _LINUX_DPLL_GEN_H
#define _LINUX_DPLL_GEN_H
@@ -14,6 +15,7 @@
/* Common nested types */
extern const struct nla_policy dpll_pin_parent_device_nl_policy[DPLL_A_PIN_PHASE_OFFSET + 1];
extern const struct nla_policy dpll_pin_parent_pin_nl_policy[DPLL_A_PIN_STATE + 1];
+extern const struct nla_policy dpll_reference_sync_nl_policy[DPLL_A_PIN_STATE + 1];
int dpll_lock_doit(const struct genl_split_ops *ops, struct sk_buff *skb,
struct genl_info *info);
diff --git a/drivers/dpll/zl3073x/Kconfig b/drivers/dpll/zl3073x/Kconfig
new file mode 100644
index 000000000000..5bbca1400581
--- /dev/null
+++ b/drivers/dpll/zl3073x/Kconfig
@@ -0,0 +1,39 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config ZL3073X
+ tristate "Microchip Azurite DPLL/PTP/SyncE devices" if COMPILE_TEST
+ depends on NET
+ select DPLL
+ select NET_DEVLINK
+ select REGMAP
+ help
+ This driver supports Microchip Azurite family DPLL/PTP/SyncE
+ devices that support up to 5 independent DPLL channels,
+ 10 input pins and up to 20 output pins.
+
+ To compile this driver as a module, choose M here. The module
+ will be called zl3073x.
+
+config ZL3073X_I2C
+ tristate "I2C bus implementation for Microchip Azurite devices"
+ depends on I2C && NET
+ select REGMAP_I2C
+ select ZL3073X
+ help
+ This is I2C bus implementation for Microchip Azurite DPLL/PTP/SyncE
+ devices.
+
+ To compile this driver as a module, choose M here: the module will
+ be called zl3073x_i2c.
+
+config ZL3073X_SPI
+ tristate "SPI bus implementation for Microchip Azurite devices"
+ depends on NET && SPI
+ select REGMAP_SPI
+ select ZL3073X
+ help
+ This is SPI bus implementation for Microchip Azurite DPLL/PTP/SyncE
+ devices.
+
+ To compile this driver as a module, choose M here: the module will
+ be called zl3073x_spi.
diff --git a/drivers/dpll/zl3073x/Makefile b/drivers/dpll/zl3073x/Makefile
new file mode 100644
index 000000000000..bd324c7fe710
--- /dev/null
+++ b/drivers/dpll/zl3073x/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_ZL3073X) += zl3073x.o
+zl3073x-objs := core.o devlink.o dpll.o flash.o fw.o \
+ out.o prop.o ref.o synth.o
+
+obj-$(CONFIG_ZL3073X_I2C) += zl3073x_i2c.o
+zl3073x_i2c-objs := i2c.o
+
+obj-$(CONFIG_ZL3073X_SPI) += zl3073x_spi.o
+zl3073x_spi-objs := spi.o
diff --git a/drivers/dpll/zl3073x/core.c b/drivers/dpll/zl3073x/core.c
new file mode 100644
index 000000000000..383e2397dd03
--- /dev/null
+++ b/drivers/dpll/zl3073x/core.c
@@ -0,0 +1,1080 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/array_size.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/dev_printk.h>
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/math64.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/regmap.h>
+#include <linux/sprintf.h>
+#include <linux/string_choices.h>
+#include <linux/unaligned.h>
+#include <net/devlink.h>
+
+#include "core.h"
+#include "devlink.h"
+#include "dpll.h"
+#include "regs.h"
+
+/* Chip IDs for zl30731 */
+static const u16 zl30731_ids[] = {
+ 0x0E93,
+ 0x1E93,
+ 0x2E93,
+};
+
+const struct zl3073x_chip_info zl30731_chip_info = {
+ .ids = zl30731_ids,
+ .num_ids = ARRAY_SIZE(zl30731_ids),
+ .num_channels = 1,
+};
+EXPORT_SYMBOL_NS_GPL(zl30731_chip_info, "ZL3073X");
+
+/* Chip IDs for zl30732 */
+static const u16 zl30732_ids[] = {
+ 0x0E30,
+ 0x0E94,
+ 0x1E94,
+ 0x1F60,
+ 0x2E94,
+ 0x3FC4,
+};
+
+const struct zl3073x_chip_info zl30732_chip_info = {
+ .ids = zl30732_ids,
+ .num_ids = ARRAY_SIZE(zl30732_ids),
+ .num_channels = 2,
+};
+EXPORT_SYMBOL_NS_GPL(zl30732_chip_info, "ZL3073X");
+
+/* Chip IDs for zl30733 */
+static const u16 zl30733_ids[] = {
+ 0x0E95,
+ 0x1E95,
+ 0x2E95,
+};
+
+const struct zl3073x_chip_info zl30733_chip_info = {
+ .ids = zl30733_ids,
+ .num_ids = ARRAY_SIZE(zl30733_ids),
+ .num_channels = 3,
+};
+EXPORT_SYMBOL_NS_GPL(zl30733_chip_info, "ZL3073X");
+
+/* Chip IDs for zl30734 */
+static const u16 zl30734_ids[] = {
+ 0x0E96,
+ 0x1E96,
+ 0x2E96,
+};
+
+const struct zl3073x_chip_info zl30734_chip_info = {
+ .ids = zl30734_ids,
+ .num_ids = ARRAY_SIZE(zl30734_ids),
+ .num_channels = 4,
+};
+EXPORT_SYMBOL_NS_GPL(zl30734_chip_info, "ZL3073X");
+
+/* Chip IDs for zl30735 */
+static const u16 zl30735_ids[] = {
+ 0x0E97,
+ 0x1E97,
+ 0x2E97,
+};
+
+const struct zl3073x_chip_info zl30735_chip_info = {
+ .ids = zl30735_ids,
+ .num_ids = ARRAY_SIZE(zl30735_ids),
+ .num_channels = 5,
+};
+EXPORT_SYMBOL_NS_GPL(zl30735_chip_info, "ZL3073X");
+
+#define ZL_RANGE_OFFSET 0x80
+#define ZL_PAGE_SIZE 0x80
+#define ZL_NUM_PAGES 256
+#define ZL_PAGE_SEL 0x7F
+#define ZL_PAGE_SEL_MASK GENMASK(7, 0)
+#define ZL_NUM_REGS (ZL_NUM_PAGES * ZL_PAGE_SIZE)
+
+/* Regmap range configuration */
+static const struct regmap_range_cfg zl3073x_regmap_range = {
+ .range_min = ZL_RANGE_OFFSET,
+ .range_max = ZL_RANGE_OFFSET + ZL_NUM_REGS - 1,
+ .selector_reg = ZL_PAGE_SEL,
+ .selector_mask = ZL_PAGE_SEL_MASK,
+ .selector_shift = 0,
+ .window_start = 0,
+ .window_len = ZL_PAGE_SIZE,
+};
+
+static bool
+zl3073x_is_volatile_reg(struct device *dev __maybe_unused, unsigned int reg)
+{
+ /* Only page selector is non-volatile */
+ return reg != ZL_PAGE_SEL;
+}
+
+const struct regmap_config zl3073x_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = ZL_RANGE_OFFSET + ZL_NUM_REGS - 1,
+ .ranges = &zl3073x_regmap_range,
+ .num_ranges = 1,
+ .cache_type = REGCACHE_MAPLE,
+ .volatile_reg = zl3073x_is_volatile_reg,
+};
+EXPORT_SYMBOL_NS_GPL(zl3073x_regmap_config, "ZL3073X");
+
+static bool
+zl3073x_check_reg(struct zl3073x_dev *zldev, unsigned int reg, size_t size)
+{
+ /* Check that multiop lock is held when accessing registers
+ * from page 10 and above except the page 255 that does not
+ * need this protection.
+ */
+ if (ZL_REG_PAGE(reg) >= 10 && ZL_REG_PAGE(reg) < 255)
+ lockdep_assert_held(&zldev->multiop_lock);
+
+ /* Check the index is in valid range for indexed register */
+ if (ZL_REG_OFFSET(reg) > ZL_REG_MAX_OFFSET(reg)) {
+ dev_err(zldev->dev, "Index out of range for reg 0x%04lx\n",
+ ZL_REG_ADDR(reg));
+ return false;
+ }
+ /* Check the requested size corresponds to register size */
+ if (ZL_REG_SIZE(reg) != size) {
+ dev_err(zldev->dev, "Invalid size %zu for reg 0x%04lx\n",
+ size, ZL_REG_ADDR(reg));
+ return false;
+ }
+
+ return true;
+}
+
+static int
+zl3073x_read_reg(struct zl3073x_dev *zldev, unsigned int reg, void *val,
+ size_t size)
+{
+ int rc;
+
+ if (!zl3073x_check_reg(zldev, reg, size))
+ return -EINVAL;
+
+ /* Map the register address to virtual range */
+ reg = ZL_REG_ADDR(reg) + ZL_RANGE_OFFSET;
+
+ rc = regmap_bulk_read(zldev->regmap, reg, val, size);
+ if (rc) {
+ dev_err(zldev->dev, "Failed to read reg 0x%04x: %pe\n", reg,
+ ERR_PTR(rc));
+ return rc;
+ }
+
+ return 0;
+}
+
+static int
+zl3073x_write_reg(struct zl3073x_dev *zldev, unsigned int reg, const void *val,
+ size_t size)
+{
+ int rc;
+
+ if (!zl3073x_check_reg(zldev, reg, size))
+ return -EINVAL;
+
+ /* Map the register address to virtual range */
+ reg = ZL_REG_ADDR(reg) + ZL_RANGE_OFFSET;
+
+ rc = regmap_bulk_write(zldev->regmap, reg, val, size);
+ if (rc) {
+ dev_err(zldev->dev, "Failed to write reg 0x%04x: %pe\n", reg,
+ ERR_PTR(rc));
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * zl3073x_read_u8 - read value from 8bit register
+ * @zldev: zl3073x device pointer
+ * @reg: register to write to
+ * @val: value to write
+ *
+ * Reads value from given 8bit register.
+ *
+ * Returns: 0 on success, <0 on error
+ */
+int zl3073x_read_u8(struct zl3073x_dev *zldev, unsigned int reg, u8 *val)
+{
+ return zl3073x_read_reg(zldev, reg, val, sizeof(*val));
+}
+
+/**
+ * zl3073x_write_u8 - write value to 16bit register
+ * @zldev: zl3073x device pointer
+ * @reg: register to write to
+ * @val: value to write
+ *
+ * Writes value into given 8bit register.
+ *
+ * Returns: 0 on success, <0 on error
+ */
+int zl3073x_write_u8(struct zl3073x_dev *zldev, unsigned int reg, u8 val)
+{
+ return zl3073x_write_reg(zldev, reg, &val, sizeof(val));
+}
+
+/**
+ * zl3073x_read_u16 - read value from 16bit register
+ * @zldev: zl3073x device pointer
+ * @reg: register to write to
+ * @val: value to write
+ *
+ * Reads value from given 16bit register.
+ *
+ * Returns: 0 on success, <0 on error
+ */
+int zl3073x_read_u16(struct zl3073x_dev *zldev, unsigned int reg, u16 *val)
+{
+ int rc;
+
+ rc = zl3073x_read_reg(zldev, reg, val, sizeof(*val));
+ if (!rc)
+ be16_to_cpus(val);
+
+ return rc;
+}
+
+/**
+ * zl3073x_write_u16 - write value to 16bit register
+ * @zldev: zl3073x device pointer
+ * @reg: register to write to
+ * @val: value to write
+ *
+ * Writes value into given 16bit register.
+ *
+ * Returns: 0 on success, <0 on error
+ */
+int zl3073x_write_u16(struct zl3073x_dev *zldev, unsigned int reg, u16 val)
+{
+ cpu_to_be16s(&val);
+
+ return zl3073x_write_reg(zldev, reg, &val, sizeof(val));
+}
+
+/**
+ * zl3073x_read_u32 - read value from 32bit register
+ * @zldev: zl3073x device pointer
+ * @reg: register to write to
+ * @val: value to write
+ *
+ * Reads value from given 32bit register.
+ *
+ * Returns: 0 on success, <0 on error
+ */
+int zl3073x_read_u32(struct zl3073x_dev *zldev, unsigned int reg, u32 *val)
+{
+ int rc;
+
+ rc = zl3073x_read_reg(zldev, reg, val, sizeof(*val));
+ if (!rc)
+ be32_to_cpus(val);
+
+ return rc;
+}
+
+/**
+ * zl3073x_write_u32 - write value to 32bit register
+ * @zldev: zl3073x device pointer
+ * @reg: register to write to
+ * @val: value to write
+ *
+ * Writes value into given 32bit register.
+ *
+ * Returns: 0 on success, <0 on error
+ */
+int zl3073x_write_u32(struct zl3073x_dev *zldev, unsigned int reg, u32 val)
+{
+ cpu_to_be32s(&val);
+
+ return zl3073x_write_reg(zldev, reg, &val, sizeof(val));
+}
+
+/**
+ * zl3073x_read_u48 - read value from 48bit register
+ * @zldev: zl3073x device pointer
+ * @reg: register to write to
+ * @val: value to write
+ *
+ * Reads value from given 48bit register.
+ *
+ * Returns: 0 on success, <0 on error
+ */
+int zl3073x_read_u48(struct zl3073x_dev *zldev, unsigned int reg, u64 *val)
+{
+ u8 buf[6];
+ int rc;
+
+ rc = zl3073x_read_reg(zldev, reg, buf, sizeof(buf));
+ if (!rc)
+ *val = get_unaligned_be48(buf);
+
+ return rc;
+}
+
+/**
+ * zl3073x_write_u48 - write value to 48bit register
+ * @zldev: zl3073x device pointer
+ * @reg: register to write to
+ * @val: value to write
+ *
+ * Writes value into given 48bit register.
+ * The value must be from the interval -S48_MIN to U48_MAX.
+ *
+ * Returns: 0 on success, <0 on error
+ */
+int zl3073x_write_u48(struct zl3073x_dev *zldev, unsigned int reg, u64 val)
+{
+ u8 buf[6];
+
+ /* Check the value belongs to <S48_MIN, U48_MAX>
+ * Any value >= S48_MIN has bits 47..63 set.
+ */
+ if (val > GENMASK_ULL(47, 0) && val < GENMASK_ULL(63, 47)) {
+ dev_err(zldev->dev, "Value 0x%0llx out of range\n", val);
+ return -EINVAL;
+ }
+
+ put_unaligned_be48(val, buf);
+
+ return zl3073x_write_reg(zldev, reg, buf, sizeof(buf));
+}
+
+/**
+ * zl3073x_poll_zero_u8 - wait for register to be cleared by device
+ * @zldev: zl3073x device pointer
+ * @reg: register to poll (has to be 8bit register)
+ * @mask: bit mask for polling
+ *
+ * Waits for bits specified by @mask in register @reg value to be cleared
+ * by the device.
+ *
+ * Returns: 0 on success, <0 on error
+ */
+int zl3073x_poll_zero_u8(struct zl3073x_dev *zldev, unsigned int reg, u8 mask)
+{
+ /* Register polling sleep & timeout */
+#define ZL_POLL_SLEEP_US 10
+#define ZL_POLL_TIMEOUT_US 2000000
+ unsigned int val;
+
+ /* Check the register is 8bit */
+ if (ZL_REG_SIZE(reg) != 1) {
+ dev_err(zldev->dev, "Invalid reg 0x%04lx size for polling\n",
+ ZL_REG_ADDR(reg));
+ return -EINVAL;
+ }
+
+ /* Map the register address to virtual range */
+ reg = ZL_REG_ADDR(reg) + ZL_RANGE_OFFSET;
+
+ return regmap_read_poll_timeout(zldev->regmap, reg, val, !(val & mask),
+ ZL_POLL_SLEEP_US, ZL_POLL_TIMEOUT_US);
+}
+
+int zl3073x_mb_op(struct zl3073x_dev *zldev, unsigned int op_reg, u8 op_val,
+ unsigned int mask_reg, u16 mask_val)
+{
+ int rc;
+
+ /* Set mask for the operation */
+ rc = zl3073x_write_u16(zldev, mask_reg, mask_val);
+ if (rc)
+ return rc;
+
+ /* Trigger the operation */
+ rc = zl3073x_write_u8(zldev, op_reg, op_val);
+ if (rc)
+ return rc;
+
+ /* Wait for the operation to actually finish */
+ return zl3073x_poll_zero_u8(zldev, op_reg, op_val);
+}
+
+/**
+ * zl3073x_do_hwreg_op - Perform HW register read/write operation
+ * @zldev: zl3073x device pointer
+ * @op: operation to perform
+ *
+ * Performs requested operation and waits for its completion.
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_do_hwreg_op(struct zl3073x_dev *zldev, u8 op)
+{
+ int rc;
+
+ /* Set requested operation and set pending bit */
+ rc = zl3073x_write_u8(zldev, ZL_REG_HWREG_OP, op | ZL_HWREG_OP_PENDING);
+ if (rc)
+ return rc;
+
+ /* Poll for completion - pending bit cleared */
+ return zl3073x_poll_zero_u8(zldev, ZL_REG_HWREG_OP,
+ ZL_HWREG_OP_PENDING);
+}
+
+/**
+ * zl3073x_read_hwreg - Read HW register
+ * @zldev: zl3073x device pointer
+ * @addr: HW register address
+ * @value: Value of the HW register
+ *
+ * Reads HW register value and stores it into @value.
+ *
+ * Return: 0 on success, <0 on error
+ */
+int zl3073x_read_hwreg(struct zl3073x_dev *zldev, u32 addr, u32 *value)
+{
+ int rc;
+
+ /* Set address to read data from */
+ rc = zl3073x_write_u32(zldev, ZL_REG_HWREG_ADDR, addr);
+ if (rc)
+ return rc;
+
+ /* Perform the read operation */
+ rc = zl3073x_do_hwreg_op(zldev, ZL_HWREG_OP_READ);
+ if (rc)
+ return rc;
+
+ /* Read the received data */
+ return zl3073x_read_u32(zldev, ZL_REG_HWREG_READ_DATA, value);
+}
+
+/**
+ * zl3073x_write_hwreg - Write value to HW register
+ * @zldev: zl3073x device pointer
+ * @addr: HW registers address
+ * @value: Value to be written to HW register
+ *
+ * Stores the requested value into HW register.
+ *
+ * Return: 0 on success, <0 on error
+ */
+int zl3073x_write_hwreg(struct zl3073x_dev *zldev, u32 addr, u32 value)
+{
+ int rc;
+
+ /* Set address to write data to */
+ rc = zl3073x_write_u32(zldev, ZL_REG_HWREG_ADDR, addr);
+ if (rc)
+ return rc;
+
+ /* Set data to be written */
+ rc = zl3073x_write_u32(zldev, ZL_REG_HWREG_WRITE_DATA, value);
+ if (rc)
+ return rc;
+
+ /* Perform the write operation */
+ return zl3073x_do_hwreg_op(zldev, ZL_HWREG_OP_WRITE);
+}
+
+/**
+ * zl3073x_update_hwreg - Update certain bits in HW register
+ * @zldev: zl3073x device pointer
+ * @addr: HW register address
+ * @value: Value to be written into HW register
+ * @mask: Bitmask indicating bits to be updated
+ *
+ * Reads given HW register, updates requested bits specified by value and
+ * mask and writes result back to HW register.
+ *
+ * Return: 0 on success, <0 on error
+ */
+int zl3073x_update_hwreg(struct zl3073x_dev *zldev, u32 addr, u32 value,
+ u32 mask)
+{
+ u32 tmp;
+ int rc;
+
+ rc = zl3073x_read_hwreg(zldev, addr, &tmp);
+ if (rc)
+ return rc;
+
+ tmp &= ~mask;
+ tmp |= value & mask;
+
+ return zl3073x_write_hwreg(zldev, addr, tmp);
+}
+
+/**
+ * zl3073x_write_hwreg_seq - Write HW registers sequence
+ * @zldev: pointer to device structure
+ * @seq: pointer to first sequence item
+ * @num_items: number of items in sequence
+ *
+ * Writes given HW registers sequence.
+ *
+ * Return: 0 on success, <0 on error
+ */
+int zl3073x_write_hwreg_seq(struct zl3073x_dev *zldev,
+ const struct zl3073x_hwreg_seq_item *seq,
+ size_t num_items)
+{
+ int i, rc = 0;
+
+ for (i = 0; i < num_items; i++) {
+ dev_dbg(zldev->dev, "Write 0x%0x [0x%0x] to 0x%0x",
+ seq[i].value, seq[i].mask, seq[i].addr);
+
+ if (seq[i].mask == U32_MAX)
+ /* Write value directly */
+ rc = zl3073x_write_hwreg(zldev, seq[i].addr,
+ seq[i].value);
+ else
+ /* Update only bits specified by the mask */
+ rc = zl3073x_update_hwreg(zldev, seq[i].addr,
+ seq[i].value, seq[i].mask);
+ if (rc)
+ return rc;
+
+ if (seq->wait)
+ msleep(seq->wait);
+ }
+
+ return rc;
+}
+
+static int
+zl3073x_dev_state_fetch(struct zl3073x_dev *zldev)
+{
+ int rc;
+ u8 i;
+
+ for (i = 0; i < ZL3073X_NUM_REFS; i++) {
+ rc = zl3073x_ref_state_fetch(zldev, i);
+ if (rc) {
+ dev_err(zldev->dev,
+ "Failed to fetch input state: %pe\n",
+ ERR_PTR(rc));
+ return rc;
+ }
+ }
+
+ for (i = 0; i < ZL3073X_NUM_SYNTHS; i++) {
+ rc = zl3073x_synth_state_fetch(zldev, i);
+ if (rc) {
+ dev_err(zldev->dev,
+ "Failed to fetch synth state: %pe\n",
+ ERR_PTR(rc));
+ return rc;
+ }
+ }
+
+ for (i = 0; i < ZL3073X_NUM_OUTS; i++) {
+ rc = zl3073x_out_state_fetch(zldev, i);
+ if (rc) {
+ dev_err(zldev->dev,
+ "Failed to fetch output state: %pe\n",
+ ERR_PTR(rc));
+ return rc;
+ }
+ }
+
+ return rc;
+}
+
+static void
+zl3073x_dev_ref_status_update(struct zl3073x_dev *zldev)
+{
+ int i, rc;
+
+ for (i = 0; i < ZL3073X_NUM_REFS; i++) {
+ rc = zl3073x_read_u8(zldev, ZL_REG_REF_MON_STATUS(i),
+ &zldev->ref[i].mon_status);
+ if (rc)
+ dev_warn(zldev->dev,
+ "Failed to get REF%u status: %pe\n", i,
+ ERR_PTR(rc));
+ }
+}
+
+/**
+ * zl3073x_ref_phase_offsets_update - update reference phase offsets
+ * @zldev: pointer to zl3073x_dev structure
+ * @channel: DPLL channel number or -1
+ *
+ * The function asks device to update phase offsets latch registers with
+ * the latest measured values. There are 2 sets of latch registers:
+ *
+ * 1) Up to 5 DPLL-to-connected-ref registers that contain phase offset
+ * values between particular DPLL channel and its *connected* input
+ * reference.
+ *
+ * 2) 10 selected-DPLL-to-all-ref registers that contain phase offset values
+ * between selected DPLL channel and all input references.
+ *
+ * If the caller is interested in 2) then it has to pass DPLL channel number
+ * in @channel parameter. If it is interested only in 1) then it should pass
+ * @channel parameter with value of -1.
+ *
+ * Return: 0 on success, <0 on error
+ */
+int zl3073x_ref_phase_offsets_update(struct zl3073x_dev *zldev, int channel)
+{
+ int rc;
+
+ /* Per datasheet we have to wait for 'dpll_ref_phase_err_rqst_rd'
+ * to be zero to ensure that the measured data are coherent.
+ */
+ rc = zl3073x_poll_zero_u8(zldev, ZL_REG_REF_PHASE_ERR_READ_RQST,
+ ZL_REF_PHASE_ERR_READ_RQST_RD);
+ if (rc)
+ return rc;
+
+ /* Select DPLL channel if it is specified */
+ if (channel != -1) {
+ rc = zl3073x_write_u8(zldev, ZL_REG_DPLL_MEAS_IDX, channel);
+ if (rc)
+ return rc;
+ }
+
+ /* Request to update phase offsets measurement values */
+ rc = zl3073x_write_u8(zldev, ZL_REG_REF_PHASE_ERR_READ_RQST,
+ ZL_REF_PHASE_ERR_READ_RQST_RD);
+ if (rc)
+ return rc;
+
+ /* Wait for finish */
+ return zl3073x_poll_zero_u8(zldev, ZL_REG_REF_PHASE_ERR_READ_RQST,
+ ZL_REF_PHASE_ERR_READ_RQST_RD);
+}
+
+/**
+ * zl3073x_ref_ffo_update - update reference fractional frequency offsets
+ * @zldev: pointer to zl3073x_dev structure
+ *
+ * The function asks device to update fractional frequency offsets latch
+ * registers the latest measured values, reads and stores them into
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_ref_ffo_update(struct zl3073x_dev *zldev)
+{
+ int i, rc;
+
+ /* Per datasheet we have to wait for 'ref_freq_meas_ctrl' to be zero
+ * to ensure that the measured data are coherent.
+ */
+ rc = zl3073x_poll_zero_u8(zldev, ZL_REG_REF_FREQ_MEAS_CTRL,
+ ZL_REF_FREQ_MEAS_CTRL);
+ if (rc)
+ return rc;
+
+ /* Select all references for measurement */
+ rc = zl3073x_write_u8(zldev, ZL_REG_REF_FREQ_MEAS_MASK_3_0,
+ GENMASK(7, 0)); /* REF0P..REF3N */
+ if (rc)
+ return rc;
+ rc = zl3073x_write_u8(zldev, ZL_REG_REF_FREQ_MEAS_MASK_4,
+ GENMASK(1, 0)); /* REF4P..REF4N */
+ if (rc)
+ return rc;
+
+ /* Request frequency offset measurement */
+ rc = zl3073x_write_u8(zldev, ZL_REG_REF_FREQ_MEAS_CTRL,
+ ZL_REF_FREQ_MEAS_CTRL_REF_FREQ_OFF);
+ if (rc)
+ return rc;
+
+ /* Wait for finish */
+ rc = zl3073x_poll_zero_u8(zldev, ZL_REG_REF_FREQ_MEAS_CTRL,
+ ZL_REF_FREQ_MEAS_CTRL);
+ if (rc)
+ return rc;
+
+ /* Read DPLL-to-REFx frequency offset measurements */
+ for (i = 0; i < ZL3073X_NUM_REFS; i++) {
+ s32 value;
+
+ /* Read value stored in units of 2^-32 signed */
+ rc = zl3073x_read_u32(zldev, ZL_REG_REF_FREQ(i), &value);
+ if (rc)
+ return rc;
+
+ /* Convert to ppm -> ffo = (10^6 * value) / 2^32 */
+ zldev->ref[i].ffo = mul_s64_u64_shr(value, 1000000, 32);
+ }
+
+ return 0;
+}
+
+static void
+zl3073x_dev_periodic_work(struct kthread_work *work)
+{
+ struct zl3073x_dev *zldev = container_of(work, struct zl3073x_dev,
+ work.work);
+ struct zl3073x_dpll *zldpll;
+ int rc;
+
+ /* Update input references status */
+ zl3073x_dev_ref_status_update(zldev);
+
+ /* Update DPLL-to-connected-ref phase offsets registers */
+ rc = zl3073x_ref_phase_offsets_update(zldev, -1);
+ if (rc)
+ dev_warn(zldev->dev, "Failed to update phase offsets: %pe\n",
+ ERR_PTR(rc));
+
+ /* Update references' fractional frequency offsets */
+ rc = zl3073x_ref_ffo_update(zldev);
+ if (rc)
+ dev_warn(zldev->dev,
+ "Failed to update fractional frequency offsets: %pe\n",
+ ERR_PTR(rc));
+
+ list_for_each_entry(zldpll, &zldev->dplls, list)
+ zl3073x_dpll_changes_check(zldpll);
+
+ /* Run twice a second */
+ kthread_queue_delayed_work(zldev->kworker, &zldev->work,
+ msecs_to_jiffies(500));
+}
+
+int zl3073x_dev_phase_avg_factor_set(struct zl3073x_dev *zldev, u8 factor)
+{
+ u8 dpll_meas_ctrl, value;
+ int rc;
+
+ /* Read DPLL phase measurement control register */
+ rc = zl3073x_read_u8(zldev, ZL_REG_DPLL_MEAS_CTRL, &dpll_meas_ctrl);
+ if (rc)
+ return rc;
+
+ /* Convert requested factor to register value */
+ value = (factor + 1) & 0x0f;
+
+ /* Update phase measurement control register */
+ dpll_meas_ctrl &= ~ZL_DPLL_MEAS_CTRL_AVG_FACTOR;
+ dpll_meas_ctrl |= FIELD_PREP(ZL_DPLL_MEAS_CTRL_AVG_FACTOR, value);
+ rc = zl3073x_write_u8(zldev, ZL_REG_DPLL_MEAS_CTRL, dpll_meas_ctrl);
+ if (rc)
+ return rc;
+
+ /* Save the new factor */
+ zldev->phase_avg_factor = factor;
+
+ return 0;
+}
+
+/**
+ * zl3073x_dev_phase_meas_setup - setup phase offset measurement
+ * @zldev: pointer to zl3073x_dev structure
+ *
+ * Enable phase offset measurement block, set measurement averaging factor
+ * and enable DPLL-to-its-ref phase measurement for all DPLLs.
+ *
+ * Returns: 0 on success, <0 on error
+ */
+static int
+zl3073x_dev_phase_meas_setup(struct zl3073x_dev *zldev)
+{
+ struct zl3073x_dpll *zldpll;
+ u8 dpll_meas_ctrl, mask = 0;
+ int rc;
+
+ /* Setup phase measurement averaging factor */
+ rc = zl3073x_dev_phase_avg_factor_set(zldev, zldev->phase_avg_factor);
+ if (rc)
+ return rc;
+
+ /* Read DPLL phase measurement control register */
+ rc = zl3073x_read_u8(zldev, ZL_REG_DPLL_MEAS_CTRL, &dpll_meas_ctrl);
+ if (rc)
+ return rc;
+
+ /* Enable DPLL measurement block */
+ dpll_meas_ctrl |= ZL_DPLL_MEAS_CTRL_EN;
+
+ /* Update phase measurement control register */
+ rc = zl3073x_write_u8(zldev, ZL_REG_DPLL_MEAS_CTRL, dpll_meas_ctrl);
+ if (rc)
+ return rc;
+
+ /* Enable DPLL-to-connected-ref measurement for each channel */
+ list_for_each_entry(zldpll, &zldev->dplls, list)
+ mask |= BIT(zldpll->id);
+
+ return zl3073x_write_u8(zldev, ZL_REG_DPLL_PHASE_ERR_READ_MASK, mask);
+}
+
+/**
+ * zl3073x_dev_start - Start normal operation
+ * @zldev: zl3073x device pointer
+ * @full: perform full initialization
+ *
+ * The function starts normal operation, which means registering all DPLLs and
+ * their pins, and starting monitoring. If full initialization is requested,
+ * the function additionally initializes the phase offset measurement block and
+ * fetches hardware-invariant parameters.
+ *
+ * Return: 0 on success, <0 on error
+ */
+int zl3073x_dev_start(struct zl3073x_dev *zldev, bool full)
+{
+ struct zl3073x_dpll *zldpll;
+ u8 info;
+ int rc;
+
+ rc = zl3073x_read_u8(zldev, ZL_REG_INFO, &info);
+ if (rc) {
+ dev_err(zldev->dev, "Failed to read device status info\n");
+ return rc;
+ }
+
+ if (!FIELD_GET(ZL_INFO_READY, info)) {
+ /* The ready bit indicates that the firmware was successfully
+ * configured and is ready for normal operation. If it is
+ * cleared then the configuration stored in flash is wrong
+ * or missing. In this situation the driver will expose
+ * only devlink interface to give an opportunity to flash
+ * the correct config.
+ */
+ dev_info(zldev->dev,
+ "FW not fully ready - missing or corrupted config\n");
+
+ return 0;
+ }
+
+ if (full) {
+ /* Fetch device state */
+ rc = zl3073x_dev_state_fetch(zldev);
+ if (rc)
+ return rc;
+
+ /* Setup phase offset measurement block */
+ rc = zl3073x_dev_phase_meas_setup(zldev);
+ if (rc) {
+ dev_err(zldev->dev,
+ "Failed to setup phase measurement\n");
+ return rc;
+ }
+ }
+
+ /* Register all DPLLs */
+ list_for_each_entry(zldpll, &zldev->dplls, list) {
+ rc = zl3073x_dpll_register(zldpll);
+ if (rc) {
+ dev_err_probe(zldev->dev, rc,
+ "Failed to register DPLL%u\n",
+ zldpll->id);
+ return rc;
+ }
+ }
+
+ /* Perform initial firmware fine phase correction */
+ rc = zl3073x_dpll_init_fine_phase_adjust(zldev);
+ if (rc) {
+ dev_err_probe(zldev->dev, rc,
+ "Failed to init fine phase correction\n");
+ return rc;
+ }
+
+ /* Start monitoring */
+ kthread_queue_delayed_work(zldev->kworker, &zldev->work, 0);
+
+ return 0;
+}
+
+/**
+ * zl3073x_dev_stop - Stop normal operation
+ * @zldev: zl3073x device pointer
+ *
+ * The function stops the normal operation that mean deregistration of all
+ * DPLLs and their pins and stop monitoring.
+ *
+ * Return: 0 on success, <0 on error
+ */
+void zl3073x_dev_stop(struct zl3073x_dev *zldev)
+{
+ struct zl3073x_dpll *zldpll;
+
+ /* Stop monitoring */
+ kthread_cancel_delayed_work_sync(&zldev->work);
+
+ /* Unregister all DPLLs */
+ list_for_each_entry(zldpll, &zldev->dplls, list) {
+ if (zldpll->dpll_dev)
+ zl3073x_dpll_unregister(zldpll);
+ }
+}
+
+static void zl3073x_dev_dpll_fini(void *ptr)
+{
+ struct zl3073x_dpll *zldpll, *next;
+ struct zl3073x_dev *zldev = ptr;
+
+ /* Stop monitoring and unregister DPLLs */
+ zl3073x_dev_stop(zldev);
+
+ /* Destroy monitoring thread */
+ if (zldev->kworker) {
+ kthread_destroy_worker(zldev->kworker);
+ zldev->kworker = NULL;
+ }
+
+ /* Free all DPLLs */
+ list_for_each_entry_safe(zldpll, next, &zldev->dplls, list) {
+ list_del(&zldpll->list);
+ zl3073x_dpll_free(zldpll);
+ }
+}
+
+static int
+zl3073x_devm_dpll_init(struct zl3073x_dev *zldev, u8 num_dplls)
+{
+ struct kthread_worker *kworker;
+ struct zl3073x_dpll *zldpll;
+ unsigned int i;
+ int rc;
+
+ INIT_LIST_HEAD(&zldev->dplls);
+
+ /* Allocate all DPLLs */
+ for (i = 0; i < num_dplls; i++) {
+ zldpll = zl3073x_dpll_alloc(zldev, i);
+ if (IS_ERR(zldpll)) {
+ dev_err_probe(zldev->dev, PTR_ERR(zldpll),
+ "Failed to alloc DPLL%u\n", i);
+ rc = PTR_ERR(zldpll);
+ goto error;
+ }
+
+ list_add_tail(&zldpll->list, &zldev->dplls);
+ }
+
+ /* Initialize monitoring thread */
+ kthread_init_delayed_work(&zldev->work, zl3073x_dev_periodic_work);
+ kworker = kthread_run_worker(0, "zl3073x-%s", dev_name(zldev->dev));
+ if (IS_ERR(kworker)) {
+ rc = PTR_ERR(kworker);
+ goto error;
+ }
+ zldev->kworker = kworker;
+
+ /* Start normal operation */
+ rc = zl3073x_dev_start(zldev, true);
+ if (rc) {
+ dev_err_probe(zldev->dev, rc, "Failed to start device\n");
+ goto error;
+ }
+
+ /* Add devres action to release DPLL related resources */
+ rc = devm_add_action_or_reset(zldev->dev, zl3073x_dev_dpll_fini, zldev);
+ if (rc)
+ goto error;
+
+ return 0;
+
+error:
+ zl3073x_dev_dpll_fini(zldev);
+
+ return rc;
+}
+
+/**
+ * zl3073x_dev_probe - initialize zl3073x device
+ * @zldev: pointer to zl3073x device
+ * @chip_info: chip info based on compatible
+ *
+ * Common initialization of zl3073x device structure.
+ *
+ * Returns: 0 on success, <0 on error
+ */
+int zl3073x_dev_probe(struct zl3073x_dev *zldev,
+ const struct zl3073x_chip_info *chip_info)
+{
+ u16 id, revision, fw_ver;
+ unsigned int i;
+ u32 cfg_ver;
+ int rc;
+
+ /* Read chip ID */
+ rc = zl3073x_read_u16(zldev, ZL_REG_ID, &id);
+ if (rc)
+ return rc;
+
+ /* Check it matches */
+ for (i = 0; i < chip_info->num_ids; i++) {
+ if (id == chip_info->ids[i])
+ break;
+ }
+
+ if (i == chip_info->num_ids) {
+ return dev_err_probe(zldev->dev, -ENODEV,
+ "Unknown or non-match chip ID: 0x%0x\n",
+ id);
+ }
+
+ /* Read revision, firmware version and custom config version */
+ rc = zl3073x_read_u16(zldev, ZL_REG_REVISION, &revision);
+ if (rc)
+ return rc;
+ rc = zl3073x_read_u16(zldev, ZL_REG_FW_VER, &fw_ver);
+ if (rc)
+ return rc;
+ rc = zl3073x_read_u32(zldev, ZL_REG_CUSTOM_CONFIG_VER, &cfg_ver);
+ if (rc)
+ return rc;
+
+ dev_dbg(zldev->dev, "ChipID(%X), ChipRev(%X), FwVer(%u)\n", id,
+ revision, fw_ver);
+ dev_dbg(zldev->dev, "Custom config version: %lu.%lu.%lu.%lu\n",
+ FIELD_GET(GENMASK(31, 24), cfg_ver),
+ FIELD_GET(GENMASK(23, 16), cfg_ver),
+ FIELD_GET(GENMASK(15, 8), cfg_ver),
+ FIELD_GET(GENMASK(7, 0), cfg_ver));
+
+ /* Generate random clock ID as the device has not such property that
+ * could be used for this purpose. A user can later change this value
+ * using devlink.
+ */
+ zldev->clock_id = get_random_u64();
+
+ /* Default phase offset averaging factor */
+ zldev->phase_avg_factor = 2;
+
+ /* Initialize mutex for operations where multiple reads, writes
+ * and/or polls are required to be done atomically.
+ */
+ rc = devm_mutex_init(zldev->dev, &zldev->multiop_lock);
+ if (rc)
+ return dev_err_probe(zldev->dev, rc,
+ "Failed to initialize mutex\n");
+
+ /* Register DPLL channels */
+ rc = zl3073x_devm_dpll_init(zldev, chip_info->num_channels);
+ if (rc)
+ return rc;
+
+ /* Register the devlink instance and parameters */
+ rc = zl3073x_devlink_register(zldev);
+ if (rc)
+ return dev_err_probe(zldev->dev, rc,
+ "Failed to register devlink instance\n");
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(zl3073x_dev_probe, "ZL3073X");
+
+MODULE_AUTHOR("Ivan Vecera <ivecera@redhat.com>");
+MODULE_DESCRIPTION("Microchip ZL3073x core driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dpll/zl3073x/core.h b/drivers/dpll/zl3073x/core.h
new file mode 100644
index 000000000000..09bca2d0926d
--- /dev/null
+++ b/drivers/dpll/zl3073x/core.h
@@ -0,0 +1,367 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _ZL3073X_CORE_H
+#define _ZL3073X_CORE_H
+
+#include <linux/bitfield.h>
+#include <linux/kthread.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+#include "out.h"
+#include "ref.h"
+#include "regs.h"
+#include "synth.h"
+
+struct device;
+struct regmap;
+struct zl3073x_dpll;
+
+/*
+ * Hardware limits for ZL3073x chip family
+ */
+#define ZL3073X_MAX_CHANNELS 5
+#define ZL3073X_NUM_REFS 10
+#define ZL3073X_NUM_OUTS 10
+#define ZL3073X_NUM_SYNTHS 5
+#define ZL3073X_NUM_INPUT_PINS ZL3073X_NUM_REFS
+#define ZL3073X_NUM_OUTPUT_PINS (ZL3073X_NUM_OUTS * 2)
+#define ZL3073X_NUM_PINS (ZL3073X_NUM_INPUT_PINS + \
+ ZL3073X_NUM_OUTPUT_PINS)
+
+/**
+ * struct zl3073x_dev - zl3073x device
+ * @dev: pointer to device
+ * @regmap: regmap to access device registers
+ * @multiop_lock: to serialize multiple register operations
+ * @ref: array of input references' invariants
+ * @out: array of outs' invariants
+ * @synth: array of synths' invariants
+ * @dplls: list of DPLLs
+ * @kworker: thread for periodic work
+ * @work: periodic work
+ * @clock_id: clock id of the device
+ * @phase_avg_factor: phase offset measurement averaging factor
+ */
+struct zl3073x_dev {
+ struct device *dev;
+ struct regmap *regmap;
+ struct mutex multiop_lock;
+
+ /* Invariants */
+ struct zl3073x_ref ref[ZL3073X_NUM_REFS];
+ struct zl3073x_out out[ZL3073X_NUM_OUTS];
+ struct zl3073x_synth synth[ZL3073X_NUM_SYNTHS];
+
+ /* DPLL channels */
+ struct list_head dplls;
+
+ /* Monitor */
+ struct kthread_worker *kworker;
+ struct kthread_delayed_work work;
+
+ /* Devlink parameters */
+ u64 clock_id;
+ u8 phase_avg_factor;
+};
+
+struct zl3073x_chip_info {
+ const u16 *ids;
+ size_t num_ids;
+ int num_channels;
+};
+
+extern const struct zl3073x_chip_info zl30731_chip_info;
+extern const struct zl3073x_chip_info zl30732_chip_info;
+extern const struct zl3073x_chip_info zl30733_chip_info;
+extern const struct zl3073x_chip_info zl30734_chip_info;
+extern const struct zl3073x_chip_info zl30735_chip_info;
+extern const struct regmap_config zl3073x_regmap_config;
+
+struct zl3073x_dev *zl3073x_devm_alloc(struct device *dev);
+int zl3073x_dev_probe(struct zl3073x_dev *zldev,
+ const struct zl3073x_chip_info *chip_info);
+
+int zl3073x_dev_start(struct zl3073x_dev *zldev, bool full);
+void zl3073x_dev_stop(struct zl3073x_dev *zldev);
+
+static inline u8 zl3073x_dev_phase_avg_factor_get(struct zl3073x_dev *zldev)
+{
+ return zldev->phase_avg_factor;
+}
+
+int zl3073x_dev_phase_avg_factor_set(struct zl3073x_dev *zldev, u8 factor);
+
+/**********************
+ * Registers operations
+ **********************/
+
+/**
+ * struct zl3073x_hwreg_seq_item - HW register write sequence item
+ * @addr: HW register to be written
+ * @value: value to be written to HW register
+ * @mask: bitmask indicating bits to be updated
+ * @wait: number of ms to wait after register write
+ */
+struct zl3073x_hwreg_seq_item {
+ u32 addr;
+ u32 value;
+ u32 mask;
+ u32 wait;
+};
+
+#define HWREG_SEQ_ITEM(_addr, _value, _mask, _wait) \
+{ \
+ .addr = _addr, \
+ .value = FIELD_PREP_CONST(_mask, _value), \
+ .mask = _mask, \
+ .wait = _wait, \
+}
+
+int zl3073x_mb_op(struct zl3073x_dev *zldev, unsigned int op_reg, u8 op_val,
+ unsigned int mask_reg, u16 mask_val);
+int zl3073x_poll_zero_u8(struct zl3073x_dev *zldev, unsigned int reg, u8 mask);
+int zl3073x_read_u8(struct zl3073x_dev *zldev, unsigned int reg, u8 *val);
+int zl3073x_read_u16(struct zl3073x_dev *zldev, unsigned int reg, u16 *val);
+int zl3073x_read_u32(struct zl3073x_dev *zldev, unsigned int reg, u32 *val);
+int zl3073x_read_u48(struct zl3073x_dev *zldev, unsigned int reg, u64 *val);
+int zl3073x_write_u8(struct zl3073x_dev *zldev, unsigned int reg, u8 val);
+int zl3073x_write_u16(struct zl3073x_dev *zldev, unsigned int reg, u16 val);
+int zl3073x_write_u32(struct zl3073x_dev *zldev, unsigned int reg, u32 val);
+int zl3073x_write_u48(struct zl3073x_dev *zldev, unsigned int reg, u64 val);
+int zl3073x_read_hwreg(struct zl3073x_dev *zldev, u32 addr, u32 *value);
+int zl3073x_write_hwreg(struct zl3073x_dev *zldev, u32 addr, u32 value);
+int zl3073x_update_hwreg(struct zl3073x_dev *zldev, u32 addr, u32 value,
+ u32 mask);
+int zl3073x_write_hwreg_seq(struct zl3073x_dev *zldev,
+ const struct zl3073x_hwreg_seq_item *seq,
+ size_t num_items);
+
+/*****************
+ * Misc operations
+ *****************/
+
+int zl3073x_ref_phase_offsets_update(struct zl3073x_dev *zldev, int channel);
+
+static inline bool
+zl3073x_is_n_pin(u8 id)
+{
+ /* P-pins ids are even while N-pins are odd */
+ return id & 1;
+}
+
+static inline bool
+zl3073x_is_p_pin(u8 id)
+{
+ return !zl3073x_is_n_pin(id);
+}
+
+/**
+ * zl3073x_input_pin_ref_get - get reference for given input pin
+ * @id: input pin id
+ *
+ * Return: reference id for the given input pin
+ */
+static inline u8
+zl3073x_input_pin_ref_get(u8 id)
+{
+ return id;
+}
+
+/**
+ * zl3073x_output_pin_out_get - get output for the given output pin
+ * @id: output pin id
+ *
+ * Return: output id for the given output pin
+ */
+static inline u8
+zl3073x_output_pin_out_get(u8 id)
+{
+ /* Output pin pair shares the single output */
+ return id / 2;
+}
+
+/**
+ * zl3073x_dev_ref_freq_get - get input reference frequency
+ * @zldev: pointer to zl3073x device
+ * @index: input reference index
+ *
+ * Return: frequency of given input reference
+ */
+static inline u32
+zl3073x_dev_ref_freq_get(struct zl3073x_dev *zldev, u8 index)
+{
+ const struct zl3073x_ref *ref = zl3073x_ref_state_get(zldev, index);
+
+ return zl3073x_ref_freq_get(ref);
+}
+
+/**
+ * zl3073x_dev_ref_is_diff - check if the given input reference is differential
+ * @zldev: pointer to zl3073x device
+ * @index: input reference index
+ *
+ * Return: true if reference is differential, false if reference is single-ended
+ */
+static inline bool
+zl3073x_dev_ref_is_diff(struct zl3073x_dev *zldev, u8 index)
+{
+ const struct zl3073x_ref *ref = zl3073x_ref_state_get(zldev, index);
+
+ return zl3073x_ref_is_diff(ref);
+}
+
+/*
+ * zl3073x_dev_ref_is_status_ok - check the given input reference status
+ * @zldev: pointer to zl3073x device
+ * @index: input reference index
+ *
+ * Return: true if the status is ok, false otherwise
+ */
+static inline bool
+zl3073x_dev_ref_is_status_ok(struct zl3073x_dev *zldev, u8 index)
+{
+ const struct zl3073x_ref *ref = zl3073x_ref_state_get(zldev, index);
+
+ return zl3073x_ref_is_status_ok(ref);
+}
+
+/**
+ * zl3073x_dev_synth_freq_get - get synth current freq
+ * @zldev: pointer to zl3073x device
+ * @index: synth index
+ *
+ * Return: frequency of given synthetizer
+ */
+static inline u32
+zl3073x_dev_synth_freq_get(struct zl3073x_dev *zldev, u8 index)
+{
+ const struct zl3073x_synth *synth;
+
+ synth = zl3073x_synth_state_get(zldev, index);
+ return zl3073x_synth_freq_get(synth);
+}
+
+/**
+ * zl3073x_dev_out_synth_get - get synth connected to given output
+ * @zldev: pointer to zl3073x device
+ * @index: output index
+ *
+ * Return: index of synth connected to given output.
+ */
+static inline u8
+zl3073x_dev_out_synth_get(struct zl3073x_dev *zldev, u8 index)
+{
+ const struct zl3073x_out *out = zl3073x_out_state_get(zldev, index);
+
+ return zl3073x_out_synth_get(out);
+}
+
+/**
+ * zl3073x_dev_out_is_enabled - check if the given output is enabled
+ * @zldev: pointer to zl3073x device
+ * @index: output index
+ *
+ * Return: true if the output is enabled, false otherwise
+ */
+static inline bool
+zl3073x_dev_out_is_enabled(struct zl3073x_dev *zldev, u8 index)
+{
+ const struct zl3073x_out *out = zl3073x_out_state_get(zldev, index);
+ const struct zl3073x_synth *synth;
+ u8 synth_id;
+
+ /* Output is enabled only if associated synth is enabled */
+ synth_id = zl3073x_out_synth_get(out);
+ synth = zl3073x_synth_state_get(zldev, synth_id);
+
+ return zl3073x_synth_is_enabled(synth) && zl3073x_out_is_enabled(out);
+}
+
+/**
+ * zl3073x_dev_out_dpll_get - get DPLL ID the output is driven by
+ * @zldev: pointer to zl3073x device
+ * @index: output index
+ *
+ * Return: ID of DPLL the given output is driven by
+ */
+static inline
+u8 zl3073x_dev_out_dpll_get(struct zl3073x_dev *zldev, u8 index)
+{
+ const struct zl3073x_out *out = zl3073x_out_state_get(zldev, index);
+ const struct zl3073x_synth *synth;
+ u8 synth_id;
+
+ /* Get synthesizer connected to given output */
+ synth_id = zl3073x_out_synth_get(out);
+ synth = zl3073x_synth_state_get(zldev, synth_id);
+
+ /* Return DPLL that drives the synth */
+ return zl3073x_synth_dpll_get(synth);
+}
+
+/**
+ * zl3073x_dev_out_is_diff - check if the given output is differential
+ * @zldev: pointer to zl3073x device
+ * @index: output index
+ *
+ * Return: true if output is differential, false if output is single-ended
+ */
+static inline bool
+zl3073x_dev_out_is_diff(struct zl3073x_dev *zldev, u8 index)
+{
+ const struct zl3073x_out *out = zl3073x_out_state_get(zldev, index);
+
+ return zl3073x_out_is_diff(out);
+}
+
+/**
+ * zl3073x_dev_output_pin_is_enabled - check if the given output pin is enabled
+ * @zldev: pointer to zl3073x device
+ * @id: output pin id
+ *
+ * Checks if the output of the given output pin is enabled and also that
+ * its signal format also enables the given pin.
+ *
+ * Return: true if output pin is enabled, false if output pin is disabled
+ */
+static inline bool
+zl3073x_dev_output_pin_is_enabled(struct zl3073x_dev *zldev, u8 id)
+{
+ u8 out_id = zl3073x_output_pin_out_get(id);
+ const struct zl3073x_out *out;
+
+ out = zl3073x_out_state_get(zldev, out_id);
+
+ /* Check if the output is enabled - call _dev_ helper that
+ * additionally checks for attached synth enablement.
+ */
+ if (!zl3073x_dev_out_is_enabled(zldev, out_id))
+ return false;
+
+ /* Check signal format */
+ switch (zl3073x_out_signal_format_get(out)) {
+ case ZL_OUTPUT_MODE_SIGNAL_FORMAT_DISABLED:
+ /* Both output pins are disabled by signal format */
+ return false;
+
+ case ZL_OUTPUT_MODE_SIGNAL_FORMAT_1P:
+ /* Output is one single ended P-pin output */
+ if (zl3073x_is_n_pin(id))
+ return false;
+ break;
+ case ZL_OUTPUT_MODE_SIGNAL_FORMAT_1N:
+ /* Output is one single ended N-pin output */
+ if (zl3073x_is_p_pin(id))
+ return false;
+ break;
+ default:
+ /* For other format both pins are enabled */
+ break;
+ }
+
+ return true;
+}
+
+#endif /* _ZL3073X_CORE_H */
diff --git a/drivers/dpll/zl3073x/devlink.c b/drivers/dpll/zl3073x/devlink.c
new file mode 100644
index 000000000000..ccc22332b346
--- /dev/null
+++ b/drivers/dpll/zl3073x/devlink.c
@@ -0,0 +1,390 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/device/devres.h>
+#include <linux/netlink.h>
+#include <linux/sprintf.h>
+#include <linux/types.h>
+#include <net/devlink.h>
+
+#include "core.h"
+#include "devlink.h"
+#include "dpll.h"
+#include "flash.h"
+#include "fw.h"
+#include "regs.h"
+
+/**
+ * zl3073x_devlink_info_get - Devlink device info callback
+ * @devlink: devlink structure pointer
+ * @req: devlink request pointer to store information
+ * @extack: netlink extack pointer to report errors
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dev *zldev = devlink_priv(devlink);
+ u16 id, revision, fw_ver;
+ char buf[16];
+ u32 cfg_ver;
+ int rc;
+
+ rc = zl3073x_read_u16(zldev, ZL_REG_ID, &id);
+ if (rc)
+ return rc;
+
+ snprintf(buf, sizeof(buf), "%X", id);
+ rc = devlink_info_version_fixed_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_ASIC_ID,
+ buf);
+ if (rc)
+ return rc;
+
+ rc = zl3073x_read_u16(zldev, ZL_REG_REVISION, &revision);
+ if (rc)
+ return rc;
+
+ snprintf(buf, sizeof(buf), "%X", revision);
+ rc = devlink_info_version_fixed_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_ASIC_REV,
+ buf);
+ if (rc)
+ return rc;
+
+ rc = zl3073x_read_u16(zldev, ZL_REG_FW_VER, &fw_ver);
+ if (rc)
+ return rc;
+
+ snprintf(buf, sizeof(buf), "%u", fw_ver);
+ rc = devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW,
+ buf);
+ if (rc)
+ return rc;
+
+ rc = zl3073x_read_u32(zldev, ZL_REG_CUSTOM_CONFIG_VER, &cfg_ver);
+ if (rc)
+ return rc;
+
+ /* No custom config version */
+ if (cfg_ver == U32_MAX)
+ return 0;
+
+ snprintf(buf, sizeof(buf), "%lu.%lu.%lu.%lu",
+ FIELD_GET(GENMASK(31, 24), cfg_ver),
+ FIELD_GET(GENMASK(23, 16), cfg_ver),
+ FIELD_GET(GENMASK(15, 8), cfg_ver),
+ FIELD_GET(GENMASK(7, 0), cfg_ver));
+
+ return devlink_info_version_running_put(req, "custom_cfg", buf);
+}
+
+static int
+zl3073x_devlink_reload_down(struct devlink *devlink, bool netns_change,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dev *zldev = devlink_priv(devlink);
+
+ if (action != DEVLINK_RELOAD_ACTION_DRIVER_REINIT)
+ return -EOPNOTSUPP;
+
+ /* Stop normal operation */
+ zl3073x_dev_stop(zldev);
+
+ return 0;
+}
+
+static int
+zl3073x_devlink_reload_up(struct devlink *devlink,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
+ u32 *actions_performed,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dev *zldev = devlink_priv(devlink);
+ union devlink_param_value val;
+ int rc;
+
+ if (action != DEVLINK_RELOAD_ACTION_DRIVER_REINIT)
+ return -EOPNOTSUPP;
+
+ rc = devl_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_CLOCK_ID,
+ &val);
+ if (rc)
+ return rc;
+
+ if (zldev->clock_id != val.vu64) {
+ dev_dbg(zldev->dev,
+ "'clock_id' changed to %016llx\n", val.vu64);
+ zldev->clock_id = val.vu64;
+ }
+
+ /* Restart normal operation */
+ rc = zl3073x_dev_start(zldev, false);
+ if (rc)
+ dev_warn(zldev->dev, "Failed to re-start normal operation\n");
+
+ *actions_performed = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
+
+ return 0;
+}
+
+void zl3073x_devlink_flash_notify(struct zl3073x_dev *zldev, const char *msg,
+ const char *component, u32 done, u32 total)
+{
+ struct devlink *devlink = priv_to_devlink(zldev);
+
+ devlink_flash_update_status_notify(devlink, msg, component, done,
+ total);
+}
+
+/**
+ * zl3073x_devlink_flash_prepare - Prepare and enter flash mode
+ * @zldev: zl3073x device pointer
+ * @zlfw: pointer to loaded firmware
+ * @extack: netlink extack pointer to report errors
+ *
+ * The function stops normal operation and switches the device to flash mode.
+ * If an error occurs the normal operation is resumed.
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_devlink_flash_prepare(struct zl3073x_dev *zldev,
+ struct zl3073x_fw *zlfw,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_fw_component *util;
+ int rc;
+
+ util = zlfw->component[ZL_FW_COMPONENT_UTIL];
+ if (!util) {
+ zl3073x_devlink_flash_notify(zldev,
+ "Utility is missing in firmware",
+ NULL, 0, 0);
+ return -ENOEXEC;
+ }
+
+ /* Stop normal operation prior entering flash mode */
+ zl3073x_dev_stop(zldev);
+
+ rc = zl3073x_flash_mode_enter(zldev, util->data, util->size, extack);
+ if (rc) {
+ zl3073x_devlink_flash_notify(zldev,
+ "Failed to enter flash mode",
+ NULL, 0, 0);
+
+ /* Resume normal operation */
+ zl3073x_dev_start(zldev, true);
+
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * zl3073x_devlink_flash_finish - Leave flash mode and resume normal operation
+ * @zldev: zl3073x device pointer
+ * @extack: netlink extack pointer to report errors
+ *
+ * The function switches the device back to standard mode and resumes normal
+ * operation.
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_devlink_flash_finish(struct zl3073x_dev *zldev,
+ struct netlink_ext_ack *extack)
+{
+ int rc;
+
+ /* Reset device CPU to normal mode */
+ zl3073x_flash_mode_leave(zldev, extack);
+
+ /* Resume normal operation */
+ rc = zl3073x_dev_start(zldev, true);
+ if (rc)
+ zl3073x_devlink_flash_notify(zldev,
+ "Failed to start normal operation",
+ NULL, 0, 0);
+
+ return rc;
+}
+
+/**
+ * zl3073x_devlink_flash_update - Devlink flash update callback
+ * @devlink: devlink structure pointer
+ * @params: flashing parameters pointer
+ * @extack: netlink extack pointer to report errors
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_devlink_flash_update(struct devlink *devlink,
+ struct devlink_flash_update_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dev *zldev = devlink_priv(devlink);
+ struct zl3073x_fw *zlfw;
+ int rc = 0;
+
+ zlfw = zl3073x_fw_load(zldev, params->fw->data, params->fw->size,
+ extack);
+ if (IS_ERR(zlfw)) {
+ zl3073x_devlink_flash_notify(zldev, "Failed to load firmware",
+ NULL, 0, 0);
+ rc = PTR_ERR(zlfw);
+ goto finish;
+ }
+
+ /* Stop normal operation and enter flash mode */
+ rc = zl3073x_devlink_flash_prepare(zldev, zlfw, extack);
+ if (rc)
+ goto finish;
+
+ rc = zl3073x_fw_flash(zldev, zlfw, extack);
+ if (rc) {
+ zl3073x_devlink_flash_finish(zldev, extack);
+ goto finish;
+ }
+
+ /* Resume normal mode */
+ rc = zl3073x_devlink_flash_finish(zldev, extack);
+
+finish:
+ if (!IS_ERR(zlfw))
+ zl3073x_fw_free(zlfw);
+
+ zl3073x_devlink_flash_notify(zldev,
+ rc ? "Flashing failed" : "Flashing done",
+ NULL, 0, 0);
+
+ return rc;
+}
+
+static const struct devlink_ops zl3073x_devlink_ops = {
+ .info_get = zl3073x_devlink_info_get,
+ .reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT),
+ .reload_down = zl3073x_devlink_reload_down,
+ .reload_up = zl3073x_devlink_reload_up,
+ .flash_update = zl3073x_devlink_flash_update,
+};
+
+static void
+zl3073x_devlink_free(void *ptr)
+{
+ devlink_free(ptr);
+}
+
+/**
+ * zl3073x_devm_alloc - allocates zl3073x device structure
+ * @dev: pointer to device structure
+ *
+ * Allocates zl3073x device structure as device resource.
+ *
+ * Return: pointer to zl3073x device on success, error pointer on error
+ */
+struct zl3073x_dev *zl3073x_devm_alloc(struct device *dev)
+{
+ struct zl3073x_dev *zldev;
+ struct devlink *devlink;
+ int rc;
+
+ devlink = devlink_alloc(&zl3073x_devlink_ops, sizeof(*zldev), dev);
+ if (!devlink)
+ return ERR_PTR(-ENOMEM);
+
+ /* Add devres action to free devlink device */
+ rc = devm_add_action_or_reset(dev, zl3073x_devlink_free, devlink);
+ if (rc)
+ return ERR_PTR(rc);
+
+ zldev = devlink_priv(devlink);
+ zldev->dev = dev;
+ dev_set_drvdata(zldev->dev, zldev);
+
+ return zldev;
+}
+EXPORT_SYMBOL_NS_GPL(zl3073x_devm_alloc, "ZL3073X");
+
+static int
+zl3073x_devlink_param_clock_id_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ if (!val.vu64) {
+ NL_SET_ERR_MSG_MOD(extack, "'clock_id' must be non-zero");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct devlink_param zl3073x_devlink_params[] = {
+ DEVLINK_PARAM_GENERIC(CLOCK_ID, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL, NULL,
+ zl3073x_devlink_param_clock_id_validate),
+};
+
+static void
+zl3073x_devlink_unregister(void *ptr)
+{
+ struct devlink *devlink = priv_to_devlink(ptr);
+
+ devl_lock(devlink);
+
+ /* Unregister devlink params */
+ devl_params_unregister(devlink, zl3073x_devlink_params,
+ ARRAY_SIZE(zl3073x_devlink_params));
+
+ /* Unregister devlink instance */
+ devl_unregister(devlink);
+
+ devl_unlock(devlink);
+}
+
+/**
+ * zl3073x_devlink_register - register devlink instance and params
+ * @zldev: zl3073x device to register the devlink for
+ *
+ * Register the devlink instance and parameters associated with the device.
+ *
+ * Return: 0 on success, <0 on error
+ */
+int zl3073x_devlink_register(struct zl3073x_dev *zldev)
+{
+ struct devlink *devlink = priv_to_devlink(zldev);
+ union devlink_param_value value;
+ int rc;
+
+ devl_lock(devlink);
+
+ /* Register devlink params */
+ rc = devl_params_register(devlink, zl3073x_devlink_params,
+ ARRAY_SIZE(zl3073x_devlink_params));
+ if (rc) {
+ devl_unlock(devlink);
+
+ return rc;
+ }
+
+ value.vu64 = zldev->clock_id;
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_CLOCK_ID,
+ value);
+
+ /* Register devlink instance */
+ devl_register(devlink);
+
+ devl_unlock(devlink);
+
+ /* Add devres action to unregister devlink device */
+ return devm_add_action_or_reset(zldev->dev, zl3073x_devlink_unregister,
+ zldev);
+}
diff --git a/drivers/dpll/zl3073x/devlink.h b/drivers/dpll/zl3073x/devlink.h
new file mode 100644
index 000000000000..63dfd6fa1cd6
--- /dev/null
+++ b/drivers/dpll/zl3073x/devlink.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _ZL3073X_DEVLINK_H
+#define _ZL3073X_DEVLINK_H
+
+struct zl3073x_dev;
+
+struct zl3073x_dev *zl3073x_devm_alloc(struct device *dev);
+
+int zl3073x_devlink_register(struct zl3073x_dev *zldev);
+
+void zl3073x_devlink_flash_notify(struct zl3073x_dev *zldev, const char *msg,
+ const char *component, u32 done, u32 total);
+
+#endif /* _ZL3073X_DEVLINK_H */
diff --git a/drivers/dpll/zl3073x/dpll.c b/drivers/dpll/zl3073x/dpll.c
new file mode 100644
index 000000000000..9879d85d29af
--- /dev/null
+++ b/drivers/dpll/zl3073x/dpll.c
@@ -0,0 +1,1938 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/bug.h>
+#include <linux/container_of.h>
+#include <linux/dev_printk.h>
+#include <linux/dpll.h>
+#include <linux/err.h>
+#include <linux/kthread.h>
+#include <linux/math64.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/sprintf.h>
+
+#include "core.h"
+#include "dpll.h"
+#include "prop.h"
+#include "regs.h"
+
+#define ZL3073X_DPLL_REF_NONE ZL3073X_NUM_REFS
+#define ZL3073X_DPLL_REF_IS_VALID(_ref) ((_ref) != ZL3073X_DPLL_REF_NONE)
+
+/**
+ * struct zl3073x_dpll_pin - DPLL pin
+ * @list: this DPLL pin list entry
+ * @dpll: DPLL the pin is registered to
+ * @dpll_pin: pointer to registered dpll_pin
+ * @label: package label
+ * @dir: pin direction
+ * @id: pin id
+ * @prio: pin priority <0, 14>
+ * @selectable: pin is selectable in automatic mode
+ * @esync_control: embedded sync is controllable
+ * @phase_gran: phase adjustment granularity
+ * @pin_state: last saved pin state
+ * @phase_offset: last saved pin phase offset
+ * @freq_offset: last saved fractional frequency offset
+ */
+struct zl3073x_dpll_pin {
+ struct list_head list;
+ struct zl3073x_dpll *dpll;
+ struct dpll_pin *dpll_pin;
+ char label[8];
+ enum dpll_pin_direction dir;
+ u8 id;
+ u8 prio;
+ bool selectable;
+ bool esync_control;
+ s32 phase_gran;
+ enum dpll_pin_state pin_state;
+ s64 phase_offset;
+ s64 freq_offset;
+};
+
+/*
+ * Supported esync ranges for input and for output per output pair type
+ */
+static const struct dpll_pin_frequency esync_freq_ranges[] = {
+ DPLL_PIN_FREQUENCY_RANGE(0, 1),
+};
+
+/**
+ * zl3073x_dpll_is_input_pin - check if the pin is input one
+ * @pin: pin to check
+ *
+ * Return: true if pin is input, false if pin is output.
+ */
+static bool
+zl3073x_dpll_is_input_pin(struct zl3073x_dpll_pin *pin)
+{
+ return pin->dir == DPLL_PIN_DIRECTION_INPUT;
+}
+
+/**
+ * zl3073x_dpll_is_p_pin - check if the pin is P-pin
+ * @pin: pin to check
+ *
+ * Return: true if the pin is P-pin, false if it is N-pin
+ */
+static bool
+zl3073x_dpll_is_p_pin(struct zl3073x_dpll_pin *pin)
+{
+ return zl3073x_is_p_pin(pin->id);
+}
+
+static int
+zl3073x_dpll_pin_direction_get(const struct dpll_pin *dpll_pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_pin_direction *direction,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll_pin *pin = pin_priv;
+
+ *direction = pin->dir;
+
+ return 0;
+}
+
+static int
+zl3073x_dpll_input_pin_esync_get(const struct dpll_pin *dpll_pin,
+ void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv,
+ struct dpll_pin_esync *esync,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll *zldpll = dpll_priv;
+ struct zl3073x_dev *zldev = zldpll->dev;
+ struct zl3073x_dpll_pin *pin = pin_priv;
+ const struct zl3073x_ref *ref;
+ u8 ref_id;
+
+ ref_id = zl3073x_input_pin_ref_get(pin->id);
+ ref = zl3073x_ref_state_get(zldev, ref_id);
+
+ switch (FIELD_GET(ZL_REF_SYNC_CTRL_MODE, ref->sync_ctrl)) {
+ case ZL_REF_SYNC_CTRL_MODE_50_50_ESYNC_25_75:
+ esync->freq = ref->esync_n_div == ZL_REF_ESYNC_DIV_1HZ ? 1 : 0;
+ esync->pulse = 25;
+ break;
+ default:
+ esync->freq = 0;
+ esync->pulse = 0;
+ break;
+ }
+
+ /* If the pin supports esync control expose its range but only
+ * if the current reference frequency is > 1 Hz.
+ */
+ if (pin->esync_control && zl3073x_ref_freq_get(ref) > 1) {
+ esync->range = esync_freq_ranges;
+ esync->range_num = ARRAY_SIZE(esync_freq_ranges);
+ } else {
+ esync->range = NULL;
+ esync->range_num = 0;
+ }
+
+ return 0;
+}
+
+static int
+zl3073x_dpll_input_pin_esync_set(const struct dpll_pin *dpll_pin,
+ void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv, u64 freq,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll *zldpll = dpll_priv;
+ struct zl3073x_dev *zldev = zldpll->dev;
+ struct zl3073x_dpll_pin *pin = pin_priv;
+ struct zl3073x_ref ref;
+ u8 ref_id, sync_mode;
+
+ ref_id = zl3073x_input_pin_ref_get(pin->id);
+ ref = *zl3073x_ref_state_get(zldev, ref_id);
+
+ /* Use freq == 0 to disable esync */
+ if (!freq)
+ sync_mode = ZL_REF_SYNC_CTRL_MODE_REFSYNC_PAIR_OFF;
+ else
+ sync_mode = ZL_REF_SYNC_CTRL_MODE_50_50_ESYNC_25_75;
+
+ ref.sync_ctrl &= ~ZL_REF_SYNC_CTRL_MODE;
+ ref.sync_ctrl |= FIELD_PREP(ZL_REF_SYNC_CTRL_MODE, sync_mode);
+
+ if (freq) {
+ /* 1 Hz is only supported frequency now */
+ ref.esync_n_div = ZL_REF_ESYNC_DIV_1HZ;
+ }
+
+ /* Update reference configuration */
+ return zl3073x_ref_state_set(zldev, ref_id, &ref);
+}
+
+static int
+zl3073x_dpll_input_pin_ffo_get(const struct dpll_pin *dpll_pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ s64 *ffo, struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll_pin *pin = pin_priv;
+
+ *ffo = pin->freq_offset;
+
+ return 0;
+}
+
+static int
+zl3073x_dpll_input_pin_frequency_get(const struct dpll_pin *dpll_pin,
+ void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv, u64 *frequency,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll *zldpll = dpll_priv;
+ struct zl3073x_dpll_pin *pin = pin_priv;
+ u8 ref_id;
+
+ ref_id = zl3073x_input_pin_ref_get(pin->id);
+ *frequency = zl3073x_dev_ref_freq_get(zldpll->dev, ref_id);
+
+ return 0;
+}
+
+static int
+zl3073x_dpll_input_pin_frequency_set(const struct dpll_pin *dpll_pin,
+ void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv, u64 frequency,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll *zldpll = dpll_priv;
+ struct zl3073x_dev *zldev = zldpll->dev;
+ struct zl3073x_dpll_pin *pin = pin_priv;
+ struct zl3073x_ref ref;
+ u8 ref_id;
+
+ /* Get reference state */
+ ref_id = zl3073x_input_pin_ref_get(pin->id);
+ ref = *zl3073x_ref_state_get(zldev, ref_id);
+
+ /* Update frequency */
+ zl3073x_ref_freq_set(&ref, frequency);
+
+ /* Commit reference state */
+ return zl3073x_ref_state_set(zldev, ref_id, &ref);
+}
+
+/**
+ * zl3073x_dpll_selected_ref_get - get currently selected reference
+ * @zldpll: pointer to zl3073x_dpll
+ * @ref: place to store selected reference
+ *
+ * Check for currently selected reference the DPLL should be locked to
+ * and stores its index to given @ref.
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_dpll_selected_ref_get(struct zl3073x_dpll *zldpll, u8 *ref)
+{
+ struct zl3073x_dev *zldev = zldpll->dev;
+ u8 state, value;
+ int rc;
+
+ switch (zldpll->refsel_mode) {
+ case ZL_DPLL_MODE_REFSEL_MODE_AUTO:
+ /* For automatic mode read refsel_status register */
+ rc = zl3073x_read_u8(zldev,
+ ZL_REG_DPLL_REFSEL_STATUS(zldpll->id),
+ &value);
+ if (rc)
+ return rc;
+
+ /* Extract reference state */
+ state = FIELD_GET(ZL_DPLL_REFSEL_STATUS_STATE, value);
+
+ /* Return the reference only if the DPLL is locked to it */
+ if (state == ZL_DPLL_REFSEL_STATUS_STATE_LOCK)
+ *ref = FIELD_GET(ZL_DPLL_REFSEL_STATUS_REFSEL, value);
+ else
+ *ref = ZL3073X_DPLL_REF_NONE;
+ break;
+ case ZL_DPLL_MODE_REFSEL_MODE_REFLOCK:
+ /* For manual mode return stored value */
+ *ref = zldpll->forced_ref;
+ break;
+ default:
+ /* For other modes like NCO, freerun... there is no input ref */
+ *ref = ZL3073X_DPLL_REF_NONE;
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * zl3073x_dpll_selected_ref_set - select reference in manual mode
+ * @zldpll: pointer to zl3073x_dpll
+ * @ref: input reference to be selected
+ *
+ * Selects the given reference for the DPLL channel it should be
+ * locked to.
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_dpll_selected_ref_set(struct zl3073x_dpll *zldpll, u8 ref)
+{
+ struct zl3073x_dev *zldev = zldpll->dev;
+ u8 mode, mode_refsel;
+ int rc;
+
+ mode = zldpll->refsel_mode;
+
+ switch (mode) {
+ case ZL_DPLL_MODE_REFSEL_MODE_REFLOCK:
+ /* Manual mode with ref selected */
+ if (ref == ZL3073X_DPLL_REF_NONE) {
+ switch (zldpll->lock_status) {
+ case DPLL_LOCK_STATUS_LOCKED_HO_ACQ:
+ case DPLL_LOCK_STATUS_HOLDOVER:
+ /* Switch to forced holdover */
+ mode = ZL_DPLL_MODE_REFSEL_MODE_HOLDOVER;
+ break;
+ default:
+ /* Switch to freerun */
+ mode = ZL_DPLL_MODE_REFSEL_MODE_FREERUN;
+ break;
+ }
+ /* Keep selected reference */
+ ref = zldpll->forced_ref;
+ } else if (ref == zldpll->forced_ref) {
+ /* No register update - same mode and same ref */
+ return 0;
+ }
+ break;
+ case ZL_DPLL_MODE_REFSEL_MODE_FREERUN:
+ case ZL_DPLL_MODE_REFSEL_MODE_HOLDOVER:
+ /* Manual mode without no ref */
+ if (ref == ZL3073X_DPLL_REF_NONE)
+ /* No register update - keep current mode */
+ return 0;
+
+ /* Switch to reflock mode and update ref selection */
+ mode = ZL_DPLL_MODE_REFSEL_MODE_REFLOCK;
+ break;
+ default:
+ /* For other modes like automatic or NCO ref cannot be selected
+ * manually
+ */
+ return -EOPNOTSUPP;
+ }
+
+ /* Build mode_refsel value */
+ mode_refsel = FIELD_PREP(ZL_DPLL_MODE_REFSEL_MODE, mode) |
+ FIELD_PREP(ZL_DPLL_MODE_REFSEL_REF, ref);
+
+ /* Update dpll_mode_refsel register */
+ rc = zl3073x_write_u8(zldev, ZL_REG_DPLL_MODE_REFSEL(zldpll->id),
+ mode_refsel);
+ if (rc)
+ return rc;
+
+ /* Store new mode and forced reference */
+ zldpll->refsel_mode = mode;
+ zldpll->forced_ref = ref;
+
+ return rc;
+}
+
+/**
+ * zl3073x_dpll_connected_ref_get - get currently connected reference
+ * @zldpll: pointer to zl3073x_dpll
+ * @ref: place to store selected reference
+ *
+ * Looks for currently connected the DPLL is locked to and stores its index
+ * to given @ref.
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_dpll_connected_ref_get(struct zl3073x_dpll *zldpll, u8 *ref)
+{
+ struct zl3073x_dev *zldev = zldpll->dev;
+ int rc;
+
+ /* Get currently selected input reference */
+ rc = zl3073x_dpll_selected_ref_get(zldpll, ref);
+ if (rc)
+ return rc;
+
+ /* If the monitor indicates an error nothing is connected */
+ if (ZL3073X_DPLL_REF_IS_VALID(*ref) &&
+ !zl3073x_dev_ref_is_status_ok(zldev, *ref))
+ *ref = ZL3073X_DPLL_REF_NONE;
+
+ return 0;
+}
+
+static int
+zl3073x_dpll_input_pin_phase_offset_get(const struct dpll_pin *dpll_pin,
+ void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv, s64 *phase_offset,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll *zldpll = dpll_priv;
+ struct zl3073x_dev *zldev = zldpll->dev;
+ struct zl3073x_dpll_pin *pin = pin_priv;
+ const struct zl3073x_ref *ref;
+ u8 conn_id, ref_id;
+ s64 ref_phase;
+ int rc;
+
+ /* Get currently connected reference */
+ rc = zl3073x_dpll_connected_ref_get(zldpll, &conn_id);
+ if (rc)
+ return rc;
+
+ /* Report phase offset only for currently connected pin if the phase
+ * monitor feature is disabled and only if the input pin signal is
+ * present.
+ */
+ ref_id = zl3073x_input_pin_ref_get(pin->id);
+ ref = zl3073x_ref_state_get(zldev, ref_id);
+ if ((!zldpll->phase_monitor && ref_id != conn_id) ||
+ !zl3073x_ref_is_status_ok(ref)) {
+ *phase_offset = 0;
+ return 0;
+ }
+
+ ref_phase = pin->phase_offset;
+
+ /* The DPLL being locked to a higher freq than the current ref
+ * the phase offset is modded to the period of the signal
+ * the dpll is locked to.
+ */
+ if (ZL3073X_DPLL_REF_IS_VALID(conn_id) && conn_id != ref_id) {
+ u32 conn_freq, ref_freq;
+
+ /* Get frequency of connected and given ref */
+ conn_freq = zl3073x_dev_ref_freq_get(zldev, conn_id);
+ ref_freq = zl3073x_ref_freq_get(ref);
+
+ if (conn_freq > ref_freq) {
+ s64 conn_period, div_factor;
+
+ conn_period = div_s64(PSEC_PER_SEC, conn_freq);
+ div_factor = div64_s64(ref_phase, conn_period);
+ ref_phase -= conn_period * div_factor;
+ }
+ }
+
+ *phase_offset = ref_phase * DPLL_PHASE_OFFSET_DIVIDER;
+
+ return rc;
+}
+
+static int
+zl3073x_dpll_input_pin_phase_adjust_get(const struct dpll_pin *dpll_pin,
+ void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv,
+ s32 *phase_adjust,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll *zldpll = dpll_priv;
+ struct zl3073x_dev *zldev = zldpll->dev;
+ struct zl3073x_dpll_pin *pin = pin_priv;
+ const struct zl3073x_ref *ref;
+ s64 phase_comp;
+ u8 ref_id;
+
+ /* Read reference configuration */
+ ref_id = zl3073x_input_pin_ref_get(pin->id);
+ ref = zl3073x_ref_state_get(zldev, ref_id);
+
+ /* Perform sign extension for 48bit signed value */
+ phase_comp = sign_extend64(ref->phase_comp, 47);
+
+ /* Reverse two's complement negation applied during set and convert
+ * to 32bit signed int
+ */
+ *phase_adjust = (s32)-phase_comp;
+
+ return 0;
+}
+
+static int
+zl3073x_dpll_input_pin_phase_adjust_set(const struct dpll_pin *dpll_pin,
+ void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv,
+ s32 phase_adjust,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll *zldpll = dpll_priv;
+ struct zl3073x_dev *zldev = zldpll->dev;
+ struct zl3073x_dpll_pin *pin = pin_priv;
+ struct zl3073x_ref ref;
+ u8 ref_id;
+
+ /* Read reference configuration */
+ ref_id = zl3073x_input_pin_ref_get(pin->id);
+ ref = *zl3073x_ref_state_get(zldev, ref_id);
+
+ /* The value in the register is stored as two's complement negation
+ * of requested value.
+ */
+ ref.phase_comp = -phase_adjust;
+
+ /* Update reference configuration */
+ return zl3073x_ref_state_set(zldev, ref_id, &ref);
+}
+
+/**
+ * zl3073x_dpll_ref_prio_get - get priority for given input pin
+ * @pin: pointer to pin
+ * @prio: place to store priority
+ *
+ * Reads current priority for the given input pin and stores the value
+ * to @prio.
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_dpll_ref_prio_get(struct zl3073x_dpll_pin *pin, u8 *prio)
+{
+ struct zl3073x_dpll *zldpll = pin->dpll;
+ struct zl3073x_dev *zldev = zldpll->dev;
+ u8 ref, ref_prio;
+ int rc;
+
+ guard(mutex)(&zldev->multiop_lock);
+
+ /* Read DPLL configuration */
+ rc = zl3073x_mb_op(zldev, ZL_REG_DPLL_MB_SEM, ZL_DPLL_MB_SEM_RD,
+ ZL_REG_DPLL_MB_MASK, BIT(zldpll->id));
+ if (rc)
+ return rc;
+
+ /* Read reference priority - one value for P&N pins (4 bits/pin) */
+ ref = zl3073x_input_pin_ref_get(pin->id);
+ rc = zl3073x_read_u8(zldev, ZL_REG_DPLL_REF_PRIO(ref / 2),
+ &ref_prio);
+ if (rc)
+ return rc;
+
+ /* Select nibble according pin type */
+ if (zl3073x_dpll_is_p_pin(pin))
+ *prio = FIELD_GET(ZL_DPLL_REF_PRIO_REF_P, ref_prio);
+ else
+ *prio = FIELD_GET(ZL_DPLL_REF_PRIO_REF_N, ref_prio);
+
+ return rc;
+}
+
+/**
+ * zl3073x_dpll_ref_prio_set - set priority for given input pin
+ * @pin: pointer to pin
+ * @prio: place to store priority
+ *
+ * Sets priority for the given input pin.
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_dpll_ref_prio_set(struct zl3073x_dpll_pin *pin, u8 prio)
+{
+ struct zl3073x_dpll *zldpll = pin->dpll;
+ struct zl3073x_dev *zldev = zldpll->dev;
+ u8 ref, ref_prio;
+ int rc;
+
+ guard(mutex)(&zldev->multiop_lock);
+
+ /* Read DPLL configuration into mailbox */
+ rc = zl3073x_mb_op(zldev, ZL_REG_DPLL_MB_SEM, ZL_DPLL_MB_SEM_RD,
+ ZL_REG_DPLL_MB_MASK, BIT(zldpll->id));
+ if (rc)
+ return rc;
+
+ /* Read reference priority - one value shared between P&N pins */
+ ref = zl3073x_input_pin_ref_get(pin->id);
+ rc = zl3073x_read_u8(zldev, ZL_REG_DPLL_REF_PRIO(ref / 2), &ref_prio);
+ if (rc)
+ return rc;
+
+ /* Update nibble according pin type */
+ if (zl3073x_dpll_is_p_pin(pin)) {
+ ref_prio &= ~ZL_DPLL_REF_PRIO_REF_P;
+ ref_prio |= FIELD_PREP(ZL_DPLL_REF_PRIO_REF_P, prio);
+ } else {
+ ref_prio &= ~ZL_DPLL_REF_PRIO_REF_N;
+ ref_prio |= FIELD_PREP(ZL_DPLL_REF_PRIO_REF_N, prio);
+ }
+
+ /* Update reference priority */
+ rc = zl3073x_write_u8(zldev, ZL_REG_DPLL_REF_PRIO(ref / 2), ref_prio);
+ if (rc)
+ return rc;
+
+ /* Commit configuration */
+ return zl3073x_mb_op(zldev, ZL_REG_DPLL_MB_SEM, ZL_DPLL_MB_SEM_WR,
+ ZL_REG_DPLL_MB_MASK, BIT(zldpll->id));
+}
+
+/**
+ * zl3073x_dpll_ref_state_get - get status for given input pin
+ * @pin: pointer to pin
+ * @state: place to store status
+ *
+ * Checks current status for the given input pin and stores the value
+ * to @state.
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_dpll_ref_state_get(struct zl3073x_dpll_pin *pin,
+ enum dpll_pin_state *state)
+{
+ struct zl3073x_dpll *zldpll = pin->dpll;
+ struct zl3073x_dev *zldev = zldpll->dev;
+ u8 ref, ref_conn;
+ int rc;
+
+ ref = zl3073x_input_pin_ref_get(pin->id);
+
+ /* Get currently connected reference */
+ rc = zl3073x_dpll_connected_ref_get(zldpll, &ref_conn);
+ if (rc)
+ return rc;
+
+ if (ref == ref_conn) {
+ *state = DPLL_PIN_STATE_CONNECTED;
+ return 0;
+ }
+
+ /* If the DPLL is running in automatic mode and the reference is
+ * selectable and its monitor does not report any error then report
+ * pin as selectable.
+ */
+ if (zldpll->refsel_mode == ZL_DPLL_MODE_REFSEL_MODE_AUTO &&
+ zl3073x_dev_ref_is_status_ok(zldev, ref) && pin->selectable) {
+ *state = DPLL_PIN_STATE_SELECTABLE;
+ return 0;
+ }
+
+ /* Otherwise report the pin as disconnected */
+ *state = DPLL_PIN_STATE_DISCONNECTED;
+
+ return 0;
+}
+
+static int
+zl3073x_dpll_input_pin_state_on_dpll_get(const struct dpll_pin *dpll_pin,
+ void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv,
+ enum dpll_pin_state *state,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll_pin *pin = pin_priv;
+
+ return zl3073x_dpll_ref_state_get(pin, state);
+}
+
+static int
+zl3073x_dpll_input_pin_state_on_dpll_set(const struct dpll_pin *dpll_pin,
+ void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv,
+ enum dpll_pin_state state,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll *zldpll = dpll_priv;
+ struct zl3073x_dpll_pin *pin = pin_priv;
+ u8 new_ref;
+ int rc;
+
+ switch (zldpll->refsel_mode) {
+ case ZL_DPLL_MODE_REFSEL_MODE_REFLOCK:
+ case ZL_DPLL_MODE_REFSEL_MODE_FREERUN:
+ case ZL_DPLL_MODE_REFSEL_MODE_HOLDOVER:
+ if (state == DPLL_PIN_STATE_CONNECTED) {
+ /* Choose the pin as new selected reference */
+ new_ref = zl3073x_input_pin_ref_get(pin->id);
+ } else if (state == DPLL_PIN_STATE_DISCONNECTED) {
+ /* No reference */
+ new_ref = ZL3073X_DPLL_REF_NONE;
+ } else {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Invalid pin state for manual mode");
+ return -EINVAL;
+ }
+
+ rc = zl3073x_dpll_selected_ref_set(zldpll, new_ref);
+ break;
+
+ case ZL_DPLL_MODE_REFSEL_MODE_AUTO:
+ if (state == DPLL_PIN_STATE_SELECTABLE) {
+ if (pin->selectable)
+ return 0; /* Pin is already selectable */
+
+ /* Restore pin priority in HW */
+ rc = zl3073x_dpll_ref_prio_set(pin, pin->prio);
+ if (rc)
+ return rc;
+
+ /* Mark pin as selectable */
+ pin->selectable = true;
+ } else if (state == DPLL_PIN_STATE_DISCONNECTED) {
+ if (!pin->selectable)
+ return 0; /* Pin is already disconnected */
+
+ /* Set pin priority to none in HW */
+ rc = zl3073x_dpll_ref_prio_set(pin,
+ ZL_DPLL_REF_PRIO_NONE);
+ if (rc)
+ return rc;
+
+ /* Mark pin as non-selectable */
+ pin->selectable = false;
+ } else {
+ NL_SET_ERR_MSG(extack,
+ "Invalid pin state for automatic mode");
+ return -EINVAL;
+ }
+ break;
+
+ default:
+ /* In other modes we cannot change input reference */
+ NL_SET_ERR_MSG(extack,
+ "Pin state cannot be changed in current mode");
+ rc = -EOPNOTSUPP;
+ break;
+ }
+
+ return rc;
+}
+
+static int
+zl3073x_dpll_input_pin_prio_get(const struct dpll_pin *dpll_pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u32 *prio, struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll_pin *pin = pin_priv;
+
+ *prio = pin->prio;
+
+ return 0;
+}
+
+static int
+zl3073x_dpll_input_pin_prio_set(const struct dpll_pin *dpll_pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u32 prio, struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll_pin *pin = pin_priv;
+ int rc;
+
+ if (prio > ZL_DPLL_REF_PRIO_MAX)
+ return -EINVAL;
+
+ /* If the pin is selectable then update HW registers */
+ if (pin->selectable) {
+ rc = zl3073x_dpll_ref_prio_set(pin, prio);
+ if (rc)
+ return rc;
+ }
+
+ /* Save priority */
+ pin->prio = prio;
+
+ return 0;
+}
+
+static int
+zl3073x_dpll_output_pin_esync_get(const struct dpll_pin *dpll_pin,
+ void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv,
+ struct dpll_pin_esync *esync,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll *zldpll = dpll_priv;
+ struct zl3073x_dev *zldev = zldpll->dev;
+ struct zl3073x_dpll_pin *pin = pin_priv;
+ const struct zl3073x_synth *synth;
+ const struct zl3073x_out *out;
+ u8 clock_type, out_id;
+ u32 synth_freq;
+
+ out_id = zl3073x_output_pin_out_get(pin->id);
+ out = zl3073x_out_state_get(zldev, out_id);
+
+ /* If N-division is enabled, esync is not supported. The register used
+ * for N-division is also used for the esync divider so both cannot
+ * be used.
+ */
+ switch (zl3073x_out_signal_format_get(out)) {
+ case ZL_OUTPUT_MODE_SIGNAL_FORMAT_2_NDIV:
+ case ZL_OUTPUT_MODE_SIGNAL_FORMAT_2_NDIV_INV:
+ return -EOPNOTSUPP;
+ default:
+ break;
+ }
+
+ /* Get attached synth frequency */
+ synth = zl3073x_synth_state_get(zldev, zl3073x_out_synth_get(out));
+ synth_freq = zl3073x_synth_freq_get(synth);
+
+ clock_type = FIELD_GET(ZL_OUTPUT_MODE_CLOCK_TYPE, out->mode);
+ if (clock_type != ZL_OUTPUT_MODE_CLOCK_TYPE_ESYNC) {
+ /* No need to read esync data if it is not enabled */
+ esync->freq = 0;
+ esync->pulse = 0;
+
+ goto finish;
+ }
+
+ /* Compute esync frequency */
+ esync->freq = synth_freq / out->div / out->esync_n_period;
+
+ /* By comparing the esync_pulse_width to the half of the pulse width
+ * the esync pulse percentage can be determined.
+ * Note that half pulse width is in units of half synth cycles, which
+ * is why it reduces down to be output_div.
+ */
+ esync->pulse = (50 * out->esync_n_width) / out->div;
+
+finish:
+ /* Set supported esync ranges if the pin supports esync control and
+ * if the output frequency is > 1 Hz.
+ */
+ if (pin->esync_control && (synth_freq / out->div) > 1) {
+ esync->range = esync_freq_ranges;
+ esync->range_num = ARRAY_SIZE(esync_freq_ranges);
+ } else {
+ esync->range = NULL;
+ esync->range_num = 0;
+ }
+
+ return 0;
+}
+
+static int
+zl3073x_dpll_output_pin_esync_set(const struct dpll_pin *dpll_pin,
+ void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv, u64 freq,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll *zldpll = dpll_priv;
+ struct zl3073x_dev *zldev = zldpll->dev;
+ struct zl3073x_dpll_pin *pin = pin_priv;
+ const struct zl3073x_synth *synth;
+ struct zl3073x_out out;
+ u8 clock_type, out_id;
+ u32 synth_freq;
+
+ out_id = zl3073x_output_pin_out_get(pin->id);
+ out = *zl3073x_out_state_get(zldev, out_id);
+
+ /* If N-division is enabled, esync is not supported. The register used
+ * for N-division is also used for the esync divider so both cannot
+ * be used.
+ */
+ switch (zl3073x_out_signal_format_get(&out)) {
+ case ZL_OUTPUT_MODE_SIGNAL_FORMAT_2_NDIV:
+ case ZL_OUTPUT_MODE_SIGNAL_FORMAT_2_NDIV_INV:
+ return -EOPNOTSUPP;
+ default:
+ break;
+ }
+
+ /* Select clock type */
+ if (freq)
+ clock_type = ZL_OUTPUT_MODE_CLOCK_TYPE_ESYNC;
+ else
+ clock_type = ZL_OUTPUT_MODE_CLOCK_TYPE_NORMAL;
+
+ /* Update clock type in output mode */
+ out.mode &= ~ZL_OUTPUT_MODE_CLOCK_TYPE;
+ out.mode |= FIELD_PREP(ZL_OUTPUT_MODE_CLOCK_TYPE, clock_type);
+
+ /* If esync is being disabled just write mailbox and finish */
+ if (!freq)
+ goto write_mailbox;
+
+ /* Get attached synth frequency */
+ synth = zl3073x_synth_state_get(zldev, zl3073x_out_synth_get(&out));
+ synth_freq = zl3073x_synth_freq_get(synth);
+
+ /* Compute and update esync period */
+ out.esync_n_period = synth_freq / (u32)freq / out.div;
+
+ /* Half of the period in units of 1/2 synth cycle can be represented by
+ * the output_div. To get the supported esync pulse width of 25% of the
+ * period the output_div can just be divided by 2. Note that this
+ * assumes that output_div is even, otherwise some resolution will be
+ * lost.
+ */
+ out.esync_n_width = out.div / 2;
+
+write_mailbox:
+ /* Commit output configuration */
+ return zl3073x_out_state_set(zldev, out_id, &out);
+}
+
+static int
+zl3073x_dpll_output_pin_frequency_get(const struct dpll_pin *dpll_pin,
+ void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv, u64 *frequency,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll *zldpll = dpll_priv;
+ struct zl3073x_dev *zldev = zldpll->dev;
+ struct zl3073x_dpll_pin *pin = pin_priv;
+ const struct zl3073x_synth *synth;
+ const struct zl3073x_out *out;
+ u32 synth_freq;
+ u8 out_id;
+
+ out_id = zl3073x_output_pin_out_get(pin->id);
+ out = zl3073x_out_state_get(zldev, out_id);
+
+ /* Get attached synth frequency */
+ synth = zl3073x_synth_state_get(zldev, zl3073x_out_synth_get(out));
+ synth_freq = zl3073x_synth_freq_get(synth);
+
+ switch (zl3073x_out_signal_format_get(out)) {
+ case ZL_OUTPUT_MODE_SIGNAL_FORMAT_2_NDIV:
+ case ZL_OUTPUT_MODE_SIGNAL_FORMAT_2_NDIV_INV:
+ /* In case of divided format we have to distiguish between
+ * given output pin type.
+ *
+ * For P-pin the resulting frequency is computed as simple
+ * division of synth frequency and output divisor.
+ *
+ * For N-pin we have to divide additionally by divisor stored
+ * in esync_n_period output mailbox register that is used as
+ * N-pin divisor for these modes.
+ */
+ *frequency = synth_freq / out->div;
+
+ if (!zl3073x_dpll_is_p_pin(pin))
+ *frequency = (u32)*frequency / out->esync_n_period;
+
+ break;
+ default:
+ /* In other modes the resulting frequency is computed as
+ * division of synth frequency and output divisor.
+ */
+ *frequency = synth_freq / out->div;
+ break;
+ }
+
+ return 0;
+}
+
+static int
+zl3073x_dpll_output_pin_frequency_set(const struct dpll_pin *dpll_pin,
+ void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv, u64 frequency,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll *zldpll = dpll_priv;
+ struct zl3073x_dev *zldev = zldpll->dev;
+ struct zl3073x_dpll_pin *pin = pin_priv;
+ const struct zl3073x_synth *synth;
+ u8 out_id, signal_format;
+ u32 new_div, synth_freq;
+ struct zl3073x_out out;
+
+ out_id = zl3073x_output_pin_out_get(pin->id);
+ out = *zl3073x_out_state_get(zldev, out_id);
+
+ /* Get attached synth frequency and compute new divisor */
+ synth = zl3073x_synth_state_get(zldev, zl3073x_out_synth_get(&out));
+ synth_freq = zl3073x_synth_freq_get(synth);
+ new_div = synth_freq / (u32)frequency;
+
+ /* Get used signal format for the given output */
+ signal_format = zl3073x_out_signal_format_get(&out);
+
+ /* Check signal format */
+ if (signal_format != ZL_OUTPUT_MODE_SIGNAL_FORMAT_2_NDIV &&
+ signal_format != ZL_OUTPUT_MODE_SIGNAL_FORMAT_2_NDIV_INV) {
+ /* For non N-divided signal formats the frequency is computed
+ * as division of synth frequency and output divisor.
+ */
+ out.div = new_div;
+
+ /* For 50/50 duty cycle the divisor is equal to width */
+ out.width = new_div;
+
+ /* Commit output configuration */
+ return zl3073x_out_state_set(zldev, out_id, &out);
+ }
+
+ if (zl3073x_dpll_is_p_pin(pin)) {
+ /* We are going to change output frequency for P-pin but
+ * if the requested frequency is less than current N-pin
+ * frequency then indicate a failure as we are not able
+ * to compute N-pin divisor to keep its frequency unchanged.
+ *
+ * Update divisor for N-pin to keep N-pin frequency.
+ */
+ out.esync_n_period = (out.esync_n_period * out.div) / new_div;
+ if (!out.esync_n_period)
+ return -EINVAL;
+
+ /* Update the output divisor */
+ out.div = new_div;
+
+ /* For 50/50 duty cycle the divisor is equal to width */
+ out.width = out.div;
+ } else {
+ /* We are going to change frequency of N-pin but if
+ * the requested freq is greater or equal than freq of P-pin
+ * in the output pair we cannot compute divisor for the N-pin.
+ * In this case indicate a failure.
+ *
+ * Update divisor for N-pin
+ */
+ out.esync_n_period = div64_u64(synth_freq, frequency * out.div);
+ if (!out.esync_n_period)
+ return -EINVAL;
+ }
+
+ /* For 50/50 duty cycle the divisor is equal to width */
+ out.esync_n_width = out.esync_n_period;
+
+ /* Commit output configuration */
+ return zl3073x_out_state_set(zldev, out_id, &out);
+}
+
+static int
+zl3073x_dpll_output_pin_phase_adjust_get(const struct dpll_pin *dpll_pin,
+ void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv,
+ s32 *phase_adjust,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll *zldpll = dpll_priv;
+ struct zl3073x_dev *zldev = zldpll->dev;
+ struct zl3073x_dpll_pin *pin = pin_priv;
+ const struct zl3073x_out *out;
+ u8 out_id;
+
+ out_id = zl3073x_output_pin_out_get(pin->id);
+ out = zl3073x_out_state_get(zldev, out_id);
+
+ /* Convert value to ps and reverse two's complement negation applied
+ * during 'set'
+ */
+ *phase_adjust = -out->phase_comp * pin->phase_gran;
+
+ return 0;
+}
+
+static int
+zl3073x_dpll_output_pin_phase_adjust_set(const struct dpll_pin *dpll_pin,
+ void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv,
+ s32 phase_adjust,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll *zldpll = dpll_priv;
+ struct zl3073x_dev *zldev = zldpll->dev;
+ struct zl3073x_dpll_pin *pin = pin_priv;
+ struct zl3073x_out out;
+ u8 out_id;
+
+ out_id = zl3073x_output_pin_out_get(pin->id);
+ out = *zl3073x_out_state_get(zldev, out_id);
+
+ /* The value in the register is stored as two's complement negation
+ * of requested value and expressed in half synth clock cycles.
+ */
+ out.phase_comp = -phase_adjust / pin->phase_gran;
+
+ /* Update output configuration from mailbox */
+ return zl3073x_out_state_set(zldev, out_id, &out);
+}
+
+static int
+zl3073x_dpll_output_pin_state_on_dpll_get(const struct dpll_pin *dpll_pin,
+ void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv,
+ enum dpll_pin_state *state,
+ struct netlink_ext_ack *extack)
+{
+ /* If the output pin is registered then it is always connected */
+ *state = DPLL_PIN_STATE_CONNECTED;
+
+ return 0;
+}
+
+static int
+zl3073x_dpll_lock_status_get(const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_lock_status *status,
+ enum dpll_lock_status_error *status_error,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll *zldpll = dpll_priv;
+ struct zl3073x_dev *zldev = zldpll->dev;
+ u8 mon_status, state;
+ int rc;
+
+ switch (zldpll->refsel_mode) {
+ case ZL_DPLL_MODE_REFSEL_MODE_FREERUN:
+ case ZL_DPLL_MODE_REFSEL_MODE_NCO:
+ /* In FREERUN and NCO modes the DPLL is always unlocked */
+ *status = DPLL_LOCK_STATUS_UNLOCKED;
+
+ return 0;
+ default:
+ break;
+ }
+
+ /* Read DPLL monitor status */
+ rc = zl3073x_read_u8(zldev, ZL_REG_DPLL_MON_STATUS(zldpll->id),
+ &mon_status);
+ if (rc)
+ return rc;
+ state = FIELD_GET(ZL_DPLL_MON_STATUS_STATE, mon_status);
+
+ switch (state) {
+ case ZL_DPLL_MON_STATUS_STATE_LOCK:
+ if (FIELD_GET(ZL_DPLL_MON_STATUS_HO_READY, mon_status))
+ *status = DPLL_LOCK_STATUS_LOCKED_HO_ACQ;
+ else
+ *status = DPLL_LOCK_STATUS_LOCKED;
+ break;
+ case ZL_DPLL_MON_STATUS_STATE_HOLDOVER:
+ case ZL_DPLL_MON_STATUS_STATE_ACQUIRING:
+ *status = DPLL_LOCK_STATUS_HOLDOVER;
+ break;
+ default:
+ dev_warn(zldev->dev, "Unknown DPLL monitor status: 0x%02x\n",
+ mon_status);
+ *status = DPLL_LOCK_STATUS_UNLOCKED;
+ break;
+ }
+
+ return 0;
+}
+
+static int
+zl3073x_dpll_mode_get(const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_mode *mode, struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll *zldpll = dpll_priv;
+
+ switch (zldpll->refsel_mode) {
+ case ZL_DPLL_MODE_REFSEL_MODE_FREERUN:
+ case ZL_DPLL_MODE_REFSEL_MODE_HOLDOVER:
+ case ZL_DPLL_MODE_REFSEL_MODE_NCO:
+ case ZL_DPLL_MODE_REFSEL_MODE_REFLOCK:
+ /* Use MANUAL for device FREERUN, HOLDOVER, NCO and
+ * REFLOCK modes
+ */
+ *mode = DPLL_MODE_MANUAL;
+ break;
+ case ZL_DPLL_MODE_REFSEL_MODE_AUTO:
+ /* Use AUTO for device AUTO mode */
+ *mode = DPLL_MODE_AUTOMATIC;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+zl3073x_dpll_phase_offset_avg_factor_get(const struct dpll_device *dpll,
+ void *dpll_priv, u32 *factor,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll *zldpll = dpll_priv;
+
+ *factor = zl3073x_dev_phase_avg_factor_get(zldpll->dev);
+
+ return 0;
+}
+
+static void
+zl3073x_dpll_change_work(struct work_struct *work)
+{
+ struct zl3073x_dpll *zldpll;
+
+ zldpll = container_of(work, struct zl3073x_dpll, change_work);
+ dpll_device_change_ntf(zldpll->dpll_dev);
+}
+
+static int
+zl3073x_dpll_phase_offset_avg_factor_set(const struct dpll_device *dpll,
+ void *dpll_priv, u32 factor,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll *item, *zldpll = dpll_priv;
+ int rc;
+
+ if (factor > 15) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "Phase offset average factor has to be from range <0,15>");
+ return -EINVAL;
+ }
+
+ rc = zl3073x_dev_phase_avg_factor_set(zldpll->dev, factor);
+ if (rc) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "Failed to set phase offset averaging factor");
+ return rc;
+ }
+
+ /* The averaging factor is common for all DPLL channels so after change
+ * we have to send a notification for other DPLL devices.
+ */
+ list_for_each_entry(item, &zldpll->dev->dplls, list) {
+ if (item != zldpll)
+ schedule_work(&item->change_work);
+ }
+
+ return 0;
+}
+
+static int
+zl3073x_dpll_phase_offset_monitor_get(const struct dpll_device *dpll,
+ void *dpll_priv,
+ enum dpll_feature_state *state,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll *zldpll = dpll_priv;
+
+ if (zldpll->phase_monitor)
+ *state = DPLL_FEATURE_STATE_ENABLE;
+ else
+ *state = DPLL_FEATURE_STATE_DISABLE;
+
+ return 0;
+}
+
+static int
+zl3073x_dpll_phase_offset_monitor_set(const struct dpll_device *dpll,
+ void *dpll_priv,
+ enum dpll_feature_state state,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll *zldpll = dpll_priv;
+
+ zldpll->phase_monitor = (state == DPLL_FEATURE_STATE_ENABLE);
+
+ return 0;
+}
+
+static const struct dpll_pin_ops zl3073x_dpll_input_pin_ops = {
+ .direction_get = zl3073x_dpll_pin_direction_get,
+ .esync_get = zl3073x_dpll_input_pin_esync_get,
+ .esync_set = zl3073x_dpll_input_pin_esync_set,
+ .ffo_get = zl3073x_dpll_input_pin_ffo_get,
+ .frequency_get = zl3073x_dpll_input_pin_frequency_get,
+ .frequency_set = zl3073x_dpll_input_pin_frequency_set,
+ .phase_offset_get = zl3073x_dpll_input_pin_phase_offset_get,
+ .phase_adjust_get = zl3073x_dpll_input_pin_phase_adjust_get,
+ .phase_adjust_set = zl3073x_dpll_input_pin_phase_adjust_set,
+ .prio_get = zl3073x_dpll_input_pin_prio_get,
+ .prio_set = zl3073x_dpll_input_pin_prio_set,
+ .state_on_dpll_get = zl3073x_dpll_input_pin_state_on_dpll_get,
+ .state_on_dpll_set = zl3073x_dpll_input_pin_state_on_dpll_set,
+};
+
+static const struct dpll_pin_ops zl3073x_dpll_output_pin_ops = {
+ .direction_get = zl3073x_dpll_pin_direction_get,
+ .esync_get = zl3073x_dpll_output_pin_esync_get,
+ .esync_set = zl3073x_dpll_output_pin_esync_set,
+ .frequency_get = zl3073x_dpll_output_pin_frequency_get,
+ .frequency_set = zl3073x_dpll_output_pin_frequency_set,
+ .phase_adjust_get = zl3073x_dpll_output_pin_phase_adjust_get,
+ .phase_adjust_set = zl3073x_dpll_output_pin_phase_adjust_set,
+ .state_on_dpll_get = zl3073x_dpll_output_pin_state_on_dpll_get,
+};
+
+static const struct dpll_device_ops zl3073x_dpll_device_ops = {
+ .lock_status_get = zl3073x_dpll_lock_status_get,
+ .mode_get = zl3073x_dpll_mode_get,
+ .phase_offset_avg_factor_get = zl3073x_dpll_phase_offset_avg_factor_get,
+ .phase_offset_avg_factor_set = zl3073x_dpll_phase_offset_avg_factor_set,
+ .phase_offset_monitor_get = zl3073x_dpll_phase_offset_monitor_get,
+ .phase_offset_monitor_set = zl3073x_dpll_phase_offset_monitor_set,
+};
+
+/**
+ * zl3073x_dpll_pin_alloc - allocate DPLL pin
+ * @zldpll: pointer to zl3073x_dpll
+ * @dir: pin direction
+ * @id: pin id
+ *
+ * Allocates and initializes zl3073x_dpll_pin structure for given
+ * pin id and direction.
+ *
+ * Return: pointer to allocated structure on success, error pointer on error
+ */
+static struct zl3073x_dpll_pin *
+zl3073x_dpll_pin_alloc(struct zl3073x_dpll *zldpll, enum dpll_pin_direction dir,
+ u8 id)
+{
+ struct zl3073x_dpll_pin *pin;
+
+ pin = kzalloc(sizeof(*pin), GFP_KERNEL);
+ if (!pin)
+ return ERR_PTR(-ENOMEM);
+
+ pin->dpll = zldpll;
+ pin->dir = dir;
+ pin->id = id;
+
+ return pin;
+}
+
+/**
+ * zl3073x_dpll_pin_free - deallocate DPLL pin
+ * @pin: pin to free
+ *
+ * Deallocates DPLL pin previously allocated by @zl3073x_dpll_pin_alloc.
+ */
+static void
+zl3073x_dpll_pin_free(struct zl3073x_dpll_pin *pin)
+{
+ WARN(pin->dpll_pin, "DPLL pin is still registered\n");
+
+ kfree(pin);
+}
+
+/**
+ * zl3073x_dpll_pin_register - register DPLL pin
+ * @pin: pointer to DPLL pin
+ * @index: absolute pin index for registration
+ *
+ * Registers given DPLL pin into DPLL sub-system.
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_dpll_pin_register(struct zl3073x_dpll_pin *pin, u32 index)
+{
+ struct zl3073x_dpll *zldpll = pin->dpll;
+ struct zl3073x_pin_props *props;
+ const struct dpll_pin_ops *ops;
+ int rc;
+
+ /* Get pin properties */
+ props = zl3073x_pin_props_get(zldpll->dev, pin->dir, pin->id);
+ if (IS_ERR(props))
+ return PTR_ERR(props);
+
+ /* Save package label, esync capability and phase adjust granularity */
+ strscpy(pin->label, props->package_label);
+ pin->esync_control = props->esync_control;
+ pin->phase_gran = props->dpll_props.phase_gran;
+
+ if (zl3073x_dpll_is_input_pin(pin)) {
+ rc = zl3073x_dpll_ref_prio_get(pin, &pin->prio);
+ if (rc)
+ goto err_prio_get;
+
+ if (pin->prio == ZL_DPLL_REF_PRIO_NONE) {
+ /* Clamp prio to max value & mark pin non-selectable */
+ pin->prio = ZL_DPLL_REF_PRIO_MAX;
+ pin->selectable = false;
+ } else {
+ /* Mark pin as selectable */
+ pin->selectable = true;
+ }
+ }
+
+ /* Create or get existing DPLL pin */
+ pin->dpll_pin = dpll_pin_get(zldpll->dev->clock_id, index, THIS_MODULE,
+ &props->dpll_props);
+ if (IS_ERR(pin->dpll_pin)) {
+ rc = PTR_ERR(pin->dpll_pin);
+ goto err_pin_get;
+ }
+
+ if (zl3073x_dpll_is_input_pin(pin))
+ ops = &zl3073x_dpll_input_pin_ops;
+ else
+ ops = &zl3073x_dpll_output_pin_ops;
+
+ /* Register the pin */
+ rc = dpll_pin_register(zldpll->dpll_dev, pin->dpll_pin, ops, pin);
+ if (rc)
+ goto err_register;
+
+ /* Free pin properties */
+ zl3073x_pin_props_put(props);
+
+ return 0;
+
+err_register:
+ dpll_pin_put(pin->dpll_pin);
+err_prio_get:
+ pin->dpll_pin = NULL;
+err_pin_get:
+ zl3073x_pin_props_put(props);
+
+ return rc;
+}
+
+/**
+ * zl3073x_dpll_pin_unregister - unregister DPLL pin
+ * @pin: pointer to DPLL pin
+ *
+ * Unregisters pin previously registered by @zl3073x_dpll_pin_register.
+ */
+static void
+zl3073x_dpll_pin_unregister(struct zl3073x_dpll_pin *pin)
+{
+ struct zl3073x_dpll *zldpll = pin->dpll;
+ const struct dpll_pin_ops *ops;
+
+ WARN(!pin->dpll_pin, "DPLL pin is not registered\n");
+
+ if (zl3073x_dpll_is_input_pin(pin))
+ ops = &zl3073x_dpll_input_pin_ops;
+ else
+ ops = &zl3073x_dpll_output_pin_ops;
+
+ /* Unregister the pin */
+ dpll_pin_unregister(zldpll->dpll_dev, pin->dpll_pin, ops, pin);
+
+ dpll_pin_put(pin->dpll_pin);
+ pin->dpll_pin = NULL;
+}
+
+/**
+ * zl3073x_dpll_pins_unregister - unregister all registered DPLL pins
+ * @zldpll: pointer to zl3073x_dpll structure
+ *
+ * Enumerates all DPLL pins registered to given DPLL device and
+ * unregisters them.
+ */
+static void
+zl3073x_dpll_pins_unregister(struct zl3073x_dpll *zldpll)
+{
+ struct zl3073x_dpll_pin *pin, *next;
+
+ list_for_each_entry_safe(pin, next, &zldpll->pins, list) {
+ zl3073x_dpll_pin_unregister(pin);
+ list_del(&pin->list);
+ zl3073x_dpll_pin_free(pin);
+ }
+}
+
+/**
+ * zl3073x_dpll_pin_is_registrable - check if the pin is registrable
+ * @zldpll: pointer to zl3073x_dpll structure
+ * @dir: pin direction
+ * @index: pin index
+ *
+ * Checks if the given pin can be registered to given DPLL. For both
+ * directions the pin can be registered if it is enabled. In case of
+ * differential signal type only P-pin is reported as registrable.
+ * And additionally for the output pin, the pin can be registered only
+ * if it is connected to synthesizer that is driven by given DPLL.
+ *
+ * Return: true if the pin is registrable, false if not
+ */
+static bool
+zl3073x_dpll_pin_is_registrable(struct zl3073x_dpll *zldpll,
+ enum dpll_pin_direction dir, u8 index)
+{
+ struct zl3073x_dev *zldev = zldpll->dev;
+ bool is_diff, is_enabled;
+ const char *name;
+
+ if (dir == DPLL_PIN_DIRECTION_INPUT) {
+ u8 ref_id = zl3073x_input_pin_ref_get(index);
+ const struct zl3073x_ref *ref;
+
+ /* Skip the pin if the DPLL is running in NCO mode */
+ if (zldpll->refsel_mode == ZL_DPLL_MODE_REFSEL_MODE_NCO)
+ return false;
+
+ name = "REF";
+ ref = zl3073x_ref_state_get(zldev, ref_id);
+ is_diff = zl3073x_ref_is_diff(ref);
+ is_enabled = zl3073x_ref_is_enabled(ref);
+ } else {
+ /* Output P&N pair shares single HW output */
+ u8 out = zl3073x_output_pin_out_get(index);
+
+ /* Skip the pin if it is connected to different DPLL channel */
+ if (zl3073x_dev_out_dpll_get(zldev, out) != zldpll->id) {
+ dev_dbg(zldev->dev,
+ "OUT%u is driven by different DPLL\n", out);
+
+ return false;
+ }
+
+ name = "OUT";
+ is_diff = zl3073x_dev_out_is_diff(zldev, out);
+ is_enabled = zl3073x_dev_output_pin_is_enabled(zldev, index);
+ }
+
+ /* Skip N-pin if the corresponding input/output is differential */
+ if (is_diff && zl3073x_is_n_pin(index)) {
+ dev_dbg(zldev->dev, "%s%u is differential, skipping N-pin\n",
+ name, index / 2);
+
+ return false;
+ }
+
+ /* Skip the pin if it is disabled */
+ if (!is_enabled) {
+ dev_dbg(zldev->dev, "%s%u%c is disabled\n", name, index / 2,
+ zl3073x_is_p_pin(index) ? 'P' : 'N');
+
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * zl3073x_dpll_pins_register - register all registerable DPLL pins
+ * @zldpll: pointer to zl3073x_dpll structure
+ *
+ * Enumerates all possible input/output pins and registers all of them
+ * that are registrable.
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_dpll_pins_register(struct zl3073x_dpll *zldpll)
+{
+ struct zl3073x_dpll_pin *pin;
+ enum dpll_pin_direction dir;
+ u8 id, index;
+ int rc;
+
+ /* Process input pins */
+ for (index = 0; index < ZL3073X_NUM_PINS; index++) {
+ /* First input pins and then output pins */
+ if (index < ZL3073X_NUM_INPUT_PINS) {
+ id = index;
+ dir = DPLL_PIN_DIRECTION_INPUT;
+ } else {
+ id = index - ZL3073X_NUM_INPUT_PINS;
+ dir = DPLL_PIN_DIRECTION_OUTPUT;
+ }
+
+ /* Check if the pin registrable to this DPLL */
+ if (!zl3073x_dpll_pin_is_registrable(zldpll, dir, id))
+ continue;
+
+ pin = zl3073x_dpll_pin_alloc(zldpll, dir, id);
+ if (IS_ERR(pin)) {
+ rc = PTR_ERR(pin);
+ goto error;
+ }
+
+ rc = zl3073x_dpll_pin_register(pin, index);
+ if (rc)
+ goto error;
+
+ list_add(&pin->list, &zldpll->pins);
+ }
+
+ return 0;
+
+error:
+ zl3073x_dpll_pins_unregister(zldpll);
+
+ return rc;
+}
+
+/**
+ * zl3073x_dpll_device_register - register DPLL device
+ * @zldpll: pointer to zl3073x_dpll structure
+ *
+ * Registers given DPLL device into DPLL sub-system.
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_dpll_device_register(struct zl3073x_dpll *zldpll)
+{
+ struct zl3073x_dev *zldev = zldpll->dev;
+ u8 dpll_mode_refsel;
+ int rc;
+
+ /* Read DPLL mode and forcibly selected reference */
+ rc = zl3073x_read_u8(zldev, ZL_REG_DPLL_MODE_REFSEL(zldpll->id),
+ &dpll_mode_refsel);
+ if (rc)
+ return rc;
+
+ /* Extract mode and selected input reference */
+ zldpll->refsel_mode = FIELD_GET(ZL_DPLL_MODE_REFSEL_MODE,
+ dpll_mode_refsel);
+ zldpll->forced_ref = FIELD_GET(ZL_DPLL_MODE_REFSEL_REF,
+ dpll_mode_refsel);
+
+ zldpll->dpll_dev = dpll_device_get(zldev->clock_id, zldpll->id,
+ THIS_MODULE);
+ if (IS_ERR(zldpll->dpll_dev)) {
+ rc = PTR_ERR(zldpll->dpll_dev);
+ zldpll->dpll_dev = NULL;
+
+ return rc;
+ }
+
+ rc = dpll_device_register(zldpll->dpll_dev,
+ zl3073x_prop_dpll_type_get(zldev, zldpll->id),
+ &zl3073x_dpll_device_ops, zldpll);
+ if (rc) {
+ dpll_device_put(zldpll->dpll_dev);
+ zldpll->dpll_dev = NULL;
+ }
+
+ return rc;
+}
+
+/**
+ * zl3073x_dpll_device_unregister - unregister DPLL device
+ * @zldpll: pointer to zl3073x_dpll structure
+ *
+ * Unregisters given DPLL device from DPLL sub-system previously registered
+ * by @zl3073x_dpll_device_register.
+ */
+static void
+zl3073x_dpll_device_unregister(struct zl3073x_dpll *zldpll)
+{
+ WARN(!zldpll->dpll_dev, "DPLL device is not registered\n");
+
+ cancel_work_sync(&zldpll->change_work);
+
+ dpll_device_unregister(zldpll->dpll_dev, &zl3073x_dpll_device_ops,
+ zldpll);
+ dpll_device_put(zldpll->dpll_dev);
+ zldpll->dpll_dev = NULL;
+}
+
+/**
+ * zl3073x_dpll_pin_phase_offset_check - check for pin phase offset change
+ * @pin: pin to check
+ *
+ * Check for the change of DPLL to connected pin phase offset change.
+ *
+ * Return: true on phase offset change, false otherwise
+ */
+static bool
+zl3073x_dpll_pin_phase_offset_check(struct zl3073x_dpll_pin *pin)
+{
+ struct zl3073x_dpll *zldpll = pin->dpll;
+ struct zl3073x_dev *zldev = zldpll->dev;
+ unsigned int reg;
+ s64 phase_offset;
+ u8 ref_id;
+ int rc;
+
+ /* No phase offset if the ref monitor reports signal errors */
+ ref_id = zl3073x_input_pin_ref_get(pin->id);
+ if (!zl3073x_dev_ref_is_status_ok(zldev, ref_id))
+ return false;
+
+ /* Select register to read phase offset value depending on pin and
+ * phase monitor state:
+ * 1) For connected pin use dpll_phase_err_data register
+ * 2) For other pins use appropriate ref_phase register if the phase
+ * monitor feature is enabled.
+ */
+ if (pin->pin_state == DPLL_PIN_STATE_CONNECTED)
+ reg = ZL_REG_DPLL_PHASE_ERR_DATA(zldpll->id);
+ else if (zldpll->phase_monitor)
+ reg = ZL_REG_REF_PHASE(ref_id);
+ else
+ return false;
+
+ /* Read measured phase offset value */
+ rc = zl3073x_read_u48(zldev, reg, &phase_offset);
+ if (rc) {
+ dev_err(zldev->dev, "Failed to read ref phase offset: %pe\n",
+ ERR_PTR(rc));
+
+ return false;
+ }
+
+ /* Convert to ps */
+ phase_offset = div_s64(sign_extend64(phase_offset, 47), 100);
+
+ /* Compare with previous value */
+ if (phase_offset != pin->phase_offset) {
+ dev_dbg(zldev->dev, "%s phase offset changed: %lld -> %lld\n",
+ pin->label, pin->phase_offset, phase_offset);
+ pin->phase_offset = phase_offset;
+
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * zl3073x_dpll_pin_ffo_check - check for pin fractional frequency offset change
+ * @pin: pin to check
+ *
+ * Check for the given pin's fractional frequency change.
+ *
+ * Return: true on fractional frequency offset change, false otherwise
+ */
+static bool
+zl3073x_dpll_pin_ffo_check(struct zl3073x_dpll_pin *pin)
+{
+ struct zl3073x_dpll *zldpll = pin->dpll;
+ struct zl3073x_dev *zldev = zldpll->dev;
+ const struct zl3073x_ref *ref;
+ u8 ref_id;
+
+ /* Get reference monitor status */
+ ref_id = zl3073x_input_pin_ref_get(pin->id);
+ ref = zl3073x_ref_state_get(zldev, ref_id);
+
+ /* Do not report ffo changes if the reference monitor report errors */
+ if (!zl3073x_ref_is_status_ok(ref))
+ return false;
+
+ /* Compare with previous value */
+ if (pin->freq_offset != ref->ffo) {
+ dev_dbg(zldev->dev, "%s freq offset changed: %lld -> %lld\n",
+ pin->label, pin->freq_offset, ref->ffo);
+ pin->freq_offset = ref->ffo;
+
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * zl3073x_dpll_changes_check - check for changes and send notifications
+ * @zldpll: pointer to zl3073x_dpll structure
+ *
+ * Checks for changes on given DPLL device and its registered DPLL pins
+ * and sends notifications about them.
+ *
+ * This function is periodically called from @zl3073x_dev_periodic_work.
+ */
+void
+zl3073x_dpll_changes_check(struct zl3073x_dpll *zldpll)
+{
+ struct zl3073x_dev *zldev = zldpll->dev;
+ enum dpll_lock_status lock_status;
+ struct device *dev = zldev->dev;
+ struct zl3073x_dpll_pin *pin;
+ int rc;
+
+ zldpll->check_count++;
+
+ /* Get current lock status for the DPLL */
+ rc = zl3073x_dpll_lock_status_get(zldpll->dpll_dev, zldpll,
+ &lock_status, NULL, NULL);
+ if (rc) {
+ dev_err(dev, "Failed to get DPLL%u lock status: %pe\n",
+ zldpll->id, ERR_PTR(rc));
+ return;
+ }
+
+ /* If lock status was changed then notify DPLL core */
+ if (zldpll->lock_status != lock_status) {
+ zldpll->lock_status = lock_status;
+ dpll_device_change_ntf(zldpll->dpll_dev);
+ }
+
+ /* Input pin monitoring does make sense only in automatic
+ * or forced reference modes.
+ */
+ if (zldpll->refsel_mode != ZL_DPLL_MODE_REFSEL_MODE_AUTO &&
+ zldpll->refsel_mode != ZL_DPLL_MODE_REFSEL_MODE_REFLOCK)
+ return;
+
+ /* Update phase offset latch registers for this DPLL if the phase
+ * offset monitor feature is enabled.
+ */
+ if (zldpll->phase_monitor) {
+ rc = zl3073x_ref_phase_offsets_update(zldev, zldpll->id);
+ if (rc) {
+ dev_err(zldev->dev,
+ "Failed to update phase offsets: %pe\n",
+ ERR_PTR(rc));
+ return;
+ }
+ }
+
+ list_for_each_entry(pin, &zldpll->pins, list) {
+ enum dpll_pin_state state;
+ bool pin_changed = false;
+
+ /* Output pins change checks are not necessary because output
+ * states are constant.
+ */
+ if (!zl3073x_dpll_is_input_pin(pin))
+ continue;
+
+ rc = zl3073x_dpll_ref_state_get(pin, &state);
+ if (rc) {
+ dev_err(dev,
+ "Failed to get %s on DPLL%u state: %pe\n",
+ pin->label, zldpll->id, ERR_PTR(rc));
+ return;
+ }
+
+ if (state != pin->pin_state) {
+ dev_dbg(dev, "%s state changed: %u->%u\n", pin->label,
+ pin->pin_state, state);
+ pin->pin_state = state;
+ pin_changed = true;
+ }
+
+ /* Check for phase offset and ffo change once per second */
+ if (zldpll->check_count % 2 == 0) {
+ if (zl3073x_dpll_pin_phase_offset_check(pin))
+ pin_changed = true;
+
+ if (zl3073x_dpll_pin_ffo_check(pin))
+ pin_changed = true;
+ }
+
+ if (pin_changed)
+ dpll_pin_change_ntf(pin->dpll_pin);
+ }
+}
+
+/**
+ * zl3073x_dpll_init_fine_phase_adjust - do initial fine phase adjustments
+ * @zldev: pointer to zl3073x device
+ *
+ * Performs initial fine phase adjustments needed per datasheet.
+ *
+ * Return: 0 on success, <0 on error
+ */
+int
+zl3073x_dpll_init_fine_phase_adjust(struct zl3073x_dev *zldev)
+{
+ int rc;
+
+ rc = zl3073x_write_u8(zldev, ZL_REG_SYNTH_PHASE_SHIFT_MASK, 0x1f);
+ if (rc)
+ return rc;
+
+ rc = zl3073x_write_u8(zldev, ZL_REG_SYNTH_PHASE_SHIFT_INTVL, 0x01);
+ if (rc)
+ return rc;
+
+ rc = zl3073x_write_u16(zldev, ZL_REG_SYNTH_PHASE_SHIFT_DATA, 0xffff);
+ if (rc)
+ return rc;
+
+ rc = zl3073x_write_u8(zldev, ZL_REG_SYNTH_PHASE_SHIFT_CTRL, 0x01);
+ if (rc)
+ return rc;
+
+ return rc;
+}
+
+/**
+ * zl3073x_dpll_alloc - allocate DPLL device
+ * @zldev: pointer to zl3073x device
+ * @ch: DPLL channel number
+ *
+ * Allocates DPLL device structure for given DPLL channel.
+ *
+ * Return: pointer to DPLL device on success, error pointer on error
+ */
+struct zl3073x_dpll *
+zl3073x_dpll_alloc(struct zl3073x_dev *zldev, u8 ch)
+{
+ struct zl3073x_dpll *zldpll;
+
+ zldpll = kzalloc(sizeof(*zldpll), GFP_KERNEL);
+ if (!zldpll)
+ return ERR_PTR(-ENOMEM);
+
+ zldpll->dev = zldev;
+ zldpll->id = ch;
+ INIT_LIST_HEAD(&zldpll->pins);
+ INIT_WORK(&zldpll->change_work, zl3073x_dpll_change_work);
+
+ return zldpll;
+}
+
+/**
+ * zl3073x_dpll_free - free DPLL device
+ * @zldpll: pointer to zl3073x_dpll structure
+ *
+ * Deallocates given DPLL device previously allocated by @zl3073x_dpll_alloc.
+ */
+void
+zl3073x_dpll_free(struct zl3073x_dpll *zldpll)
+{
+ WARN(zldpll->dpll_dev, "DPLL device is still registered\n");
+
+ kfree(zldpll);
+}
+
+/**
+ * zl3073x_dpll_register - register DPLL device and all its pins
+ * @zldpll: pointer to zl3073x_dpll structure
+ *
+ * Registers given DPLL device and all its pins into DPLL sub-system.
+ *
+ * Return: 0 on success, <0 on error
+ */
+int
+zl3073x_dpll_register(struct zl3073x_dpll *zldpll)
+{
+ int rc;
+
+ rc = zl3073x_dpll_device_register(zldpll);
+ if (rc)
+ return rc;
+
+ rc = zl3073x_dpll_pins_register(zldpll);
+ if (rc) {
+ zl3073x_dpll_device_unregister(zldpll);
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * zl3073x_dpll_unregister - unregister DPLL device and its pins
+ * @zldpll: pointer to zl3073x_dpll structure
+ *
+ * Unregisters given DPLL device and all its pins from DPLL sub-system
+ * previously registered by @zl3073x_dpll_register.
+ */
+void
+zl3073x_dpll_unregister(struct zl3073x_dpll *zldpll)
+{
+ /* Unregister all pins and dpll */
+ zl3073x_dpll_pins_unregister(zldpll);
+ zl3073x_dpll_device_unregister(zldpll);
+}
diff --git a/drivers/dpll/zl3073x/dpll.h b/drivers/dpll/zl3073x/dpll.h
new file mode 100644
index 000000000000..e8c39b44b356
--- /dev/null
+++ b/drivers/dpll/zl3073x/dpll.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _ZL3073X_DPLL_H
+#define _ZL3073X_DPLL_H
+
+#include <linux/dpll.h>
+#include <linux/list.h>
+
+#include "core.h"
+
+/**
+ * struct zl3073x_dpll - ZL3073x DPLL sub-device structure
+ * @list: this DPLL list entry
+ * @dev: pointer to multi-function parent device
+ * @id: DPLL index
+ * @refsel_mode: reference selection mode
+ * @forced_ref: selected reference in forced reference lock mode
+ * @check_count: periodic check counter
+ * @phase_monitor: is phase offset monitor enabled
+ * @dpll_dev: pointer to registered DPLL device
+ * @lock_status: last saved DPLL lock status
+ * @pins: list of pins
+ * @change_work: device change notification work
+ */
+struct zl3073x_dpll {
+ struct list_head list;
+ struct zl3073x_dev *dev;
+ u8 id;
+ u8 refsel_mode;
+ u8 forced_ref;
+ u8 check_count;
+ bool phase_monitor;
+ struct dpll_device *dpll_dev;
+ enum dpll_lock_status lock_status;
+ struct list_head pins;
+ struct work_struct change_work;
+};
+
+struct zl3073x_dpll *zl3073x_dpll_alloc(struct zl3073x_dev *zldev, u8 ch);
+void zl3073x_dpll_free(struct zl3073x_dpll *zldpll);
+
+int zl3073x_dpll_register(struct zl3073x_dpll *zldpll);
+void zl3073x_dpll_unregister(struct zl3073x_dpll *zldpll);
+
+int zl3073x_dpll_init_fine_phase_adjust(struct zl3073x_dev *zldev);
+void zl3073x_dpll_changes_check(struct zl3073x_dpll *zldpll);
+
+#endif /* _ZL3073X_DPLL_H */
diff --git a/drivers/dpll/zl3073x/flash.c b/drivers/dpll/zl3073x/flash.c
new file mode 100644
index 000000000000..83452a77e3e9
--- /dev/null
+++ b/drivers/dpll/zl3073x/flash.c
@@ -0,0 +1,666 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/array_size.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/delay.h>
+#include <linux/dev_printk.h>
+#include <linux/errno.h>
+#include <linux/jiffies.h>
+#include <linux/minmax.h>
+#include <linux/netlink.h>
+#include <linux/sched/signal.h>
+#include <linux/sizes.h>
+#include <linux/sprintf.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/unaligned.h>
+#include <net/devlink.h>
+
+#include "core.h"
+#include "devlink.h"
+#include "flash.h"
+
+#define ZL_FLASH_ERR_PFX "FW update failed: "
+#define ZL_FLASH_ERR_MSG(_extack, _msg, ...) \
+ NL_SET_ERR_MSG_FMT_MOD((_extack), ZL_FLASH_ERR_PFX _msg, \
+ ## __VA_ARGS__)
+
+/**
+ * zl3073x_flash_download - Download image block to device memory
+ * @zldev: zl3073x device structure
+ * @component: name of the component to be downloaded
+ * @addr: device memory target address
+ * @data: pointer to data to download
+ * @size: size of data to download
+ * @extack: netlink extack pointer to report errors
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_flash_download(struct zl3073x_dev *zldev, const char *component,
+ u32 addr, const void *data, size_t size,
+ struct netlink_ext_ack *extack)
+{
+#define ZL_CHECK_DELAY 5000 /* Check for interrupt each 5 seconds */
+ unsigned long check_time;
+ const void *ptr, *end;
+ int rc = 0;
+
+ dev_dbg(zldev->dev, "Downloading %zu bytes to device memory at 0x%0x\n",
+ size, addr);
+
+ check_time = jiffies + msecs_to_jiffies(ZL_CHECK_DELAY);
+
+ for (ptr = data, end = data + size; ptr < end; ptr += 4, addr += 4) {
+ /* Write current word to HW memory */
+ rc = zl3073x_write_hwreg(zldev, addr,
+ get_unaligned((u32 *)ptr));
+ if (rc) {
+ ZL_FLASH_ERR_MSG(extack,
+ "failed to write to memory at 0x%0x",
+ addr);
+ return rc;
+ }
+
+ if (time_is_before_jiffies(check_time)) {
+ if (signal_pending(current)) {
+ ZL_FLASH_ERR_MSG(extack,
+ "Flashing interrupted");
+ return -EINTR;
+ }
+
+ check_time = jiffies + msecs_to_jiffies(ZL_CHECK_DELAY);
+ }
+
+ /* Report status each 1 kB block */
+ if ((ptr - data) % 1024 == 0)
+ zl3073x_devlink_flash_notify(zldev, "Downloading image",
+ component, ptr - data,
+ size);
+ }
+
+ zl3073x_devlink_flash_notify(zldev, "Downloading image", component,
+ ptr - data, size);
+
+ dev_dbg(zldev->dev, "%zu bytes downloaded to device memory\n", size);
+
+ return rc;
+}
+
+/**
+ * zl3073x_flash_error_check - Check for flash utility errors
+ * @zldev: zl3073x device structure
+ * @extack: netlink extack pointer to report errors
+ *
+ * The function checks for errors detected by the flash utility and
+ * reports them if any were found.
+ *
+ * Return: 0 on success, -EIO when errors are detected
+ */
+static int
+zl3073x_flash_error_check(struct zl3073x_dev *zldev,
+ struct netlink_ext_ack *extack)
+{
+ u32 count, cause;
+ int rc;
+
+ rc = zl3073x_read_u32(zldev, ZL_REG_ERROR_COUNT, &count);
+ if (rc)
+ return rc;
+ else if (!count)
+ return 0; /* No error */
+
+ rc = zl3073x_read_u32(zldev, ZL_REG_ERROR_CAUSE, &cause);
+ if (rc)
+ return rc;
+
+ /* Report errors */
+ ZL_FLASH_ERR_MSG(extack,
+ "utility error occurred: count=%u cause=0x%x", count,
+ cause);
+
+ return -EIO;
+}
+
+/**
+ * zl3073x_flash_wait_ready - Check or wait for utility to be ready to flash
+ * @zldev: zl3073x device structure
+ * @timeout_ms: timeout for the waiting
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_flash_wait_ready(struct zl3073x_dev *zldev, unsigned int timeout_ms)
+{
+#define ZL_FLASH_POLL_DELAY_MS 100
+ unsigned long timeout;
+ int rc, i;
+
+ dev_dbg(zldev->dev, "Waiting for flashing to be ready\n");
+
+ timeout = jiffies + msecs_to_jiffies(timeout_ms);
+
+ for (i = 0; time_is_after_jiffies(timeout); i++) {
+ u8 value;
+
+ /* Check for interrupt each 1s */
+ if (i > 9) {
+ if (signal_pending(current))
+ return -EINTR;
+ i = 0;
+ }
+
+ rc = zl3073x_read_u8(zldev, ZL_REG_WRITE_FLASH, &value);
+ if (rc)
+ return rc;
+
+ value = FIELD_GET(ZL_WRITE_FLASH_OP, value);
+
+ if (value == ZL_WRITE_FLASH_OP_DONE)
+ return 0; /* Successfully done */
+
+ msleep(ZL_FLASH_POLL_DELAY_MS);
+ }
+
+ return -ETIMEDOUT;
+}
+
+/**
+ * zl3073x_flash_cmd_wait - Perform flash operation and wait for finish
+ * @zldev: zl3073x device structure
+ * @operation: operation to perform
+ * @extack: netlink extack pointer to report errors
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_flash_cmd_wait(struct zl3073x_dev *zldev, u32 operation,
+ struct netlink_ext_ack *extack)
+{
+#define ZL_FLASH_PHASE1_TIMEOUT_MS 60000 /* up to 1 minute */
+#define ZL_FLASH_PHASE2_TIMEOUT_MS 120000 /* up to 2 minutes */
+ u8 value;
+ int rc;
+
+ dev_dbg(zldev->dev, "Sending flash command: 0x%x\n", operation);
+
+ rc = zl3073x_flash_wait_ready(zldev, ZL_FLASH_PHASE1_TIMEOUT_MS);
+ if (rc)
+ return rc;
+
+ /* Issue the requested operation */
+ rc = zl3073x_read_u8(zldev, ZL_REG_WRITE_FLASH, &value);
+ if (rc)
+ return rc;
+
+ value &= ~ZL_WRITE_FLASH_OP;
+ value |= FIELD_PREP(ZL_WRITE_FLASH_OP, operation);
+
+ rc = zl3073x_write_u8(zldev, ZL_REG_WRITE_FLASH, value);
+ if (rc)
+ return rc;
+
+ /* Wait for command completion */
+ rc = zl3073x_flash_wait_ready(zldev, ZL_FLASH_PHASE2_TIMEOUT_MS);
+ if (rc)
+ return rc;
+
+ return zl3073x_flash_error_check(zldev, extack);
+}
+
+/**
+ * zl3073x_flash_get_sector_size - Get flash sector size
+ * @zldev: zl3073x device structure
+ * @sector_size: sector size returned by the function
+ *
+ * The function reads the flash sector size detected by flash utility and
+ * stores it into @sector_size.
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_flash_get_sector_size(struct zl3073x_dev *zldev, size_t *sector_size)
+{
+ u8 flash_info;
+ int rc;
+
+ rc = zl3073x_read_u8(zldev, ZL_REG_FLASH_INFO, &flash_info);
+ if (rc)
+ return rc;
+
+ switch (FIELD_GET(ZL_FLASH_INFO_SECTOR_SIZE, flash_info)) {
+ case ZL_FLASH_INFO_SECTOR_4K:
+ *sector_size = SZ_4K;
+ break;
+ case ZL_FLASH_INFO_SECTOR_64K:
+ *sector_size = SZ_64K;
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+
+ return rc;
+}
+
+/**
+ * zl3073x_flash_block - Download and flash memory block
+ * @zldev: zl3073x device structure
+ * @component: component name
+ * @operation: flash operation to perform
+ * @page: destination flash page
+ * @addr: device memory address to load data
+ * @data: pointer to data to be flashed
+ * @size: size of data
+ * @extack: netlink extack pointer to report errors
+ *
+ * The function downloads the memory block given by the @data pointer and
+ * the size @size and flashes it into internal memory on flash page @page.
+ * The internal flash operation performed by the firmware is specified by
+ * the @operation parameter.
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_flash_block(struct zl3073x_dev *zldev, const char *component,
+ u32 operation, u32 page, u32 addr, const void *data,
+ size_t size, struct netlink_ext_ack *extack)
+{
+ int rc;
+
+ /* Download block to device memory */
+ rc = zl3073x_flash_download(zldev, component, addr, data, size, extack);
+ if (rc)
+ return rc;
+
+ /* Set address to flash from */
+ rc = zl3073x_write_u32(zldev, ZL_REG_IMAGE_START_ADDR, addr);
+ if (rc)
+ return rc;
+
+ /* Set size of block to flash */
+ rc = zl3073x_write_u32(zldev, ZL_REG_IMAGE_SIZE, size);
+ if (rc)
+ return rc;
+
+ /* Set destination page to flash */
+ rc = zl3073x_write_u32(zldev, ZL_REG_FLASH_INDEX_WRITE, page);
+ if (rc)
+ return rc;
+
+ /* Set filling pattern */
+ rc = zl3073x_write_u32(zldev, ZL_REG_FILL_PATTERN, U32_MAX);
+ if (rc)
+ return rc;
+
+ zl3073x_devlink_flash_notify(zldev, "Flashing image", component, 0,
+ size);
+
+ dev_dbg(zldev->dev, "Flashing %zu bytes to page %u\n", size, page);
+
+ /* Execute sectors flash operation */
+ rc = zl3073x_flash_cmd_wait(zldev, operation, extack);
+ if (rc)
+ return rc;
+
+ zl3073x_devlink_flash_notify(zldev, "Flashing image", component, size,
+ size);
+
+ return 0;
+}
+
+/**
+ * zl3073x_flash_sectors - Flash sectors
+ * @zldev: zl3073x device structure
+ * @component: component name
+ * @page: destination flash page
+ * @addr: device memory address to load data
+ * @data: pointer to data to be flashed
+ * @size: size of data
+ * @extack: netlink extack pointer to report errors
+ *
+ * The function flashes given @data with size of @size to the internal flash
+ * memory block starting from page @page. The function uses sector flash
+ * method and has to take into account the flash sector size reported by
+ * flashing utility. Input data are spliced into blocks according this
+ * sector size and each block is flashed separately.
+ *
+ * Return: 0 on success, <0 on error
+ */
+int zl3073x_flash_sectors(struct zl3073x_dev *zldev, const char *component,
+ u32 page, u32 addr, const void *data, size_t size,
+ struct netlink_ext_ack *extack)
+{
+#define ZL_FLASH_MAX_BLOCK_SIZE 0x0001E000
+#define ZL_FLASH_PAGE_SIZE 256
+ size_t max_block_size, block_size, sector_size;
+ const void *ptr, *end;
+ int rc;
+
+ /* Get flash sector size */
+ rc = zl3073x_flash_get_sector_size(zldev, &sector_size);
+ if (rc) {
+ ZL_FLASH_ERR_MSG(extack, "Failed to get flash sector size");
+ return rc;
+ }
+
+ /* Determine max block size depending on sector size */
+ max_block_size = ALIGN_DOWN(ZL_FLASH_MAX_BLOCK_SIZE, sector_size);
+
+ for (ptr = data, end = data + size; ptr < end; ptr += block_size) {
+ char comp_str[32];
+
+ block_size = min_t(size_t, max_block_size, end - ptr);
+
+ /* Add suffix '-partN' if the requested component size is
+ * greater than max_block_size.
+ */
+ if (max_block_size < size)
+ snprintf(comp_str, sizeof(comp_str), "%s-part%zu",
+ component, (ptr - data) / max_block_size + 1);
+ else
+ strscpy(comp_str, component);
+
+ /* Flash the memory block */
+ rc = zl3073x_flash_block(zldev, comp_str,
+ ZL_WRITE_FLASH_OP_SECTORS, page, addr,
+ ptr, block_size, extack);
+ if (rc)
+ goto finish;
+
+ /* Move to next page */
+ page += block_size / ZL_FLASH_PAGE_SIZE;
+ }
+
+finish:
+ zl3073x_devlink_flash_notify(zldev,
+ rc ? "Flashing failed" : "Flashing done",
+ component, 0, 0);
+
+ return rc;
+}
+
+/**
+ * zl3073x_flash_page - Flash page
+ * @zldev: zl3073x device structure
+ * @component: component name
+ * @page: destination flash page
+ * @addr: device memory address to load data
+ * @data: pointer to data to be flashed
+ * @size: size of data
+ * @extack: netlink extack pointer to report errors
+ *
+ * The function flashes given @data with size of @size to the internal flash
+ * memory block starting with page @page.
+ *
+ * Return: 0 on success, <0 on error
+ */
+int zl3073x_flash_page(struct zl3073x_dev *zldev, const char *component,
+ u32 page, u32 addr, const void *data, size_t size,
+ struct netlink_ext_ack *extack)
+{
+ int rc;
+
+ /* Flash the memory block */
+ rc = zl3073x_flash_block(zldev, component, ZL_WRITE_FLASH_OP_PAGE, page,
+ addr, data, size, extack);
+
+ zl3073x_devlink_flash_notify(zldev,
+ rc ? "Flashing failed" : "Flashing done",
+ component, 0, 0);
+
+ return rc;
+}
+
+/**
+ * zl3073x_flash_page_copy - Copy flash page
+ * @zldev: zl3073x device structure
+ * @component: component name
+ * @src_page: source page to copy
+ * @dst_page: destination page
+ * @extack: netlink extack pointer to report errors
+ *
+ * The function copies one flash page specified by @src_page into the flash
+ * page specified by @dst_page.
+ *
+ * Return: 0 on success, <0 on error
+ */
+int zl3073x_flash_page_copy(struct zl3073x_dev *zldev, const char *component,
+ u32 src_page, u32 dst_page,
+ struct netlink_ext_ack *extack)
+{
+ int rc;
+
+ /* Set source page to be copied */
+ rc = zl3073x_write_u32(zldev, ZL_REG_FLASH_INDEX_READ, src_page);
+ if (rc)
+ return rc;
+
+ /* Set destination page for the copy */
+ rc = zl3073x_write_u32(zldev, ZL_REG_FLASH_INDEX_WRITE, dst_page);
+ if (rc)
+ return rc;
+
+ /* Perform copy operation */
+ rc = zl3073x_flash_cmd_wait(zldev, ZL_WRITE_FLASH_OP_COPY_PAGE, extack);
+ if (rc)
+ ZL_FLASH_ERR_MSG(extack, "Failed to copy page %u to page %u",
+ src_page, dst_page);
+
+ return rc;
+}
+
+/**
+ * zl3073x_flash_mode_verify - Check flash utility
+ * @zldev: zl3073x device structure
+ *
+ * Return: 0 if the flash utility is ready, <0 on error
+ */
+static int
+zl3073x_flash_mode_verify(struct zl3073x_dev *zldev)
+{
+ u8 family, release;
+ u32 hash;
+ int rc;
+
+ rc = zl3073x_read_u32(zldev, ZL_REG_FLASH_HASH, &hash);
+ if (rc)
+ return rc;
+
+ rc = zl3073x_read_u8(zldev, ZL_REG_FLASH_FAMILY, &family);
+ if (rc)
+ return rc;
+
+ rc = zl3073x_read_u8(zldev, ZL_REG_FLASH_RELEASE, &release);
+ if (rc)
+ return rc;
+
+ dev_dbg(zldev->dev,
+ "Flash utility check: hash 0x%08x, fam 0x%02x, rel 0x%02x\n",
+ hash, family, release);
+
+ /* Return success for correct family */
+ return (family == 0x21) ? 0 : -ENODEV;
+}
+
+static int
+zl3073x_flash_host_ctrl_enable(struct zl3073x_dev *zldev)
+{
+ u8 host_ctrl;
+ int rc;
+
+ /* Enable host control */
+ rc = zl3073x_read_u8(zldev, ZL_REG_HOST_CONTROL, &host_ctrl);
+ if (rc)
+ return rc;
+
+ host_ctrl |= ZL_HOST_CONTROL_ENABLE;
+
+ return zl3073x_write_u8(zldev, ZL_REG_HOST_CONTROL, host_ctrl);
+}
+
+/**
+ * zl3073x_flash_mode_enter - Switch the device to flash mode
+ * @zldev: zl3073x device structure
+ * @util_ptr: buffer with flash utility
+ * @util_size: size of buffer with flash utility
+ * @extack: netlink extack pointer to report errors
+ *
+ * The function prepares and switches the device into flash mode.
+ *
+ * The procedure:
+ * 1) Stop device CPU by specific HW register sequence
+ * 2) Download flash utility to device memory
+ * 3) Resume device CPU by specific HW register sequence
+ * 4) Check communication with flash utility
+ * 5) Enable host control necessary to access flash API
+ * 6) Check for potential error detected by the utility
+ *
+ * The API provided by normal firmware is not available in flash mode
+ * so the caller has to ensure that this API is not used in this mode.
+ *
+ * After performing flash operation the caller should call
+ * @zl3073x_flash_mode_leave to return back to normal operation.
+ *
+ * Return: 0 on success, <0 on error.
+ */
+int zl3073x_flash_mode_enter(struct zl3073x_dev *zldev, const void *util_ptr,
+ size_t util_size, struct netlink_ext_ack *extack)
+{
+ /* Sequence to be written prior utility download */
+ static const struct zl3073x_hwreg_seq_item pre_seq[] = {
+ HWREG_SEQ_ITEM(0x80000400, 1, BIT(0), 0),
+ HWREG_SEQ_ITEM(0x80206340, 1, BIT(4), 0),
+ HWREG_SEQ_ITEM(0x10000000, 1, BIT(2), 0),
+ HWREG_SEQ_ITEM(0x10000024, 0x00000001, U32_MAX, 0),
+ HWREG_SEQ_ITEM(0x10000020, 0x00000001, U32_MAX, 0),
+ HWREG_SEQ_ITEM(0x10000000, 1, BIT(10), 1000),
+ };
+ /* Sequence to be written after utility download */
+ static const struct zl3073x_hwreg_seq_item post_seq[] = {
+ HWREG_SEQ_ITEM(0x10400004, 0x000000C0, U32_MAX, 0),
+ HWREG_SEQ_ITEM(0x10400008, 0x00000000, U32_MAX, 0),
+ HWREG_SEQ_ITEM(0x10400010, 0x20000000, U32_MAX, 0),
+ HWREG_SEQ_ITEM(0x10400014, 0x20000004, U32_MAX, 0),
+ HWREG_SEQ_ITEM(0x10000000, 1, GENMASK(10, 9), 0),
+ HWREG_SEQ_ITEM(0x10000020, 0x00000000, U32_MAX, 0),
+ HWREG_SEQ_ITEM(0x10000000, 0, BIT(0), 1000),
+ };
+ int rc;
+
+ zl3073x_devlink_flash_notify(zldev, "Prepare flash mode", "utility",
+ 0, 0);
+
+ /* Execure pre-load sequence */
+ rc = zl3073x_write_hwreg_seq(zldev, pre_seq, ARRAY_SIZE(pre_seq));
+ if (rc) {
+ ZL_FLASH_ERR_MSG(extack, "cannot execute pre-load sequence");
+ goto error;
+ }
+
+ /* Download utility image to device memory */
+ rc = zl3073x_flash_download(zldev, "utility", 0x20000000, util_ptr,
+ util_size, extack);
+ if (rc) {
+ ZL_FLASH_ERR_MSG(extack, "cannot download flash utility");
+ goto error;
+ }
+
+ /* Execute post-load sequence */
+ rc = zl3073x_write_hwreg_seq(zldev, post_seq, ARRAY_SIZE(post_seq));
+ if (rc) {
+ ZL_FLASH_ERR_MSG(extack, "cannot execute post-load sequence");
+ goto error;
+ }
+
+ /* Check that utility identifies itself correctly */
+ rc = zl3073x_flash_mode_verify(zldev);
+ if (rc) {
+ ZL_FLASH_ERR_MSG(extack, "flash utility check failed");
+ goto error;
+ }
+
+ /* Enable host control */
+ rc = zl3073x_flash_host_ctrl_enable(zldev);
+ if (rc) {
+ ZL_FLASH_ERR_MSG(extack, "cannot enable host control");
+ goto error;
+ }
+
+ zl3073x_devlink_flash_notify(zldev, "Flash mode enabled", "utility",
+ 0, 0);
+
+ return 0;
+
+error:
+ zl3073x_flash_mode_leave(zldev, extack);
+
+ return rc;
+}
+
+/**
+ * zl3073x_flash_mode_leave - Leave flash mode
+ * @zldev: zl3073x device structure
+ * @extack: netlink extack pointer to report errors
+ *
+ * The function instructs the device to leave the flash mode and
+ * to return back to normal operation.
+ *
+ * The procedure:
+ * 1) Set reset flag
+ * 2) Reset the device CPU by specific HW register sequence
+ * 3) Wait for the device to be ready
+ * 4) Check the reset flag was cleared
+ *
+ * Return: 0 on success, <0 on error
+ */
+int zl3073x_flash_mode_leave(struct zl3073x_dev *zldev,
+ struct netlink_ext_ack *extack)
+{
+ /* Sequence to be written after flash */
+ static const struct zl3073x_hwreg_seq_item fw_reset_seq[] = {
+ HWREG_SEQ_ITEM(0x80000404, 1, BIT(0), 0),
+ HWREG_SEQ_ITEM(0x80000410, 1, BIT(0), 0),
+ };
+ u8 reset_status;
+ int rc;
+
+ zl3073x_devlink_flash_notify(zldev, "Leaving flash mode", "utility",
+ 0, 0);
+
+ /* Read reset status register */
+ rc = zl3073x_read_u8(zldev, ZL_REG_RESET_STATUS, &reset_status);
+ if (rc)
+ return rc;
+
+ /* Set reset bit */
+ reset_status |= ZL_REG_RESET_STATUS_RESET;
+
+ /* Update reset status register */
+ rc = zl3073x_write_u8(zldev, ZL_REG_RESET_STATUS, reset_status);
+ if (rc)
+ return rc;
+
+ /* We do not check the return value here as the sequence resets
+ * the device CPU and the last write always return an error.
+ */
+ zl3073x_write_hwreg_seq(zldev, fw_reset_seq, ARRAY_SIZE(fw_reset_seq));
+
+ /* Wait for the device to be ready */
+ msleep(500);
+
+ /* Read again the reset status register */
+ rc = zl3073x_read_u8(zldev, ZL_REG_RESET_STATUS, &reset_status);
+ if (rc)
+ return rc;
+
+ /* Check the reset bit was cleared */
+ if (reset_status & ZL_REG_RESET_STATUS_RESET) {
+ dev_err(zldev->dev,
+ "Reset not confirmed after switch to normal mode\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/dpll/zl3073x/flash.h b/drivers/dpll/zl3073x/flash.h
new file mode 100644
index 000000000000..effe1b16b359
--- /dev/null
+++ b/drivers/dpll/zl3073x/flash.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef __ZL3073X_FLASH_H
+#define __ZL3073X_FLASH_H
+
+#include <linux/types.h>
+
+struct netlink_ext_ack;
+struct zl3073x_dev;
+
+int zl3073x_flash_mode_enter(struct zl3073x_dev *zldev, const void *util_ptr,
+ size_t util_size, struct netlink_ext_ack *extack);
+
+int zl3073x_flash_mode_leave(struct zl3073x_dev *zldev,
+ struct netlink_ext_ack *extack);
+
+int zl3073x_flash_page(struct zl3073x_dev *zldev, const char *component,
+ u32 page, u32 addr, const void *data, size_t size,
+ struct netlink_ext_ack *extack);
+
+int zl3073x_flash_page_copy(struct zl3073x_dev *zldev, const char *component,
+ u32 src_page, u32 dst_page,
+ struct netlink_ext_ack *extack);
+
+int zl3073x_flash_sectors(struct zl3073x_dev *zldev, const char *component,
+ u32 page, u32 addr, const void *data, size_t size,
+ struct netlink_ext_ack *extack);
+
+#endif /* __ZL3073X_FLASH_H */
diff --git a/drivers/dpll/zl3073x/fw.c b/drivers/dpll/zl3073x/fw.c
new file mode 100644
index 000000000000..55b638247f4b
--- /dev/null
+++ b/drivers/dpll/zl3073x/fw.c
@@ -0,0 +1,419 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/array_size.h>
+#include <linux/build_bug.h>
+#include <linux/dev_printk.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/netlink.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#include "core.h"
+#include "flash.h"
+#include "fw.h"
+
+#define ZL3073X_FW_ERR_PFX "FW load failed: "
+#define ZL3073X_FW_ERR_MSG(_extack, _msg, ...) \
+ NL_SET_ERR_MSG_FMT_MOD((_extack), ZL3073X_FW_ERR_PFX _msg, \
+ ## __VA_ARGS__)
+
+enum zl3073x_flash_type {
+ ZL3073X_FLASH_TYPE_NONE = 0,
+ ZL3073X_FLASH_TYPE_SECTORS,
+ ZL3073X_FLASH_TYPE_PAGE,
+ ZL3073X_FLASH_TYPE_PAGE_AND_COPY,
+};
+
+struct zl3073x_fw_component_info {
+ const char *name;
+ size_t max_size;
+ enum zl3073x_flash_type flash_type;
+ u32 load_addr;
+ u32 dest_page;
+ u32 copy_page;
+};
+
+static const struct zl3073x_fw_component_info component_info[] = {
+ [ZL_FW_COMPONENT_UTIL] = {
+ .name = "utility",
+ .max_size = 0x4000,
+ .load_addr = 0x20000000,
+ .flash_type = ZL3073X_FLASH_TYPE_NONE,
+ },
+ [ZL_FW_COMPONENT_FW1] = {
+ .name = "firmware1",
+ .max_size = 0x35000,
+ .load_addr = 0x20002000,
+ .flash_type = ZL3073X_FLASH_TYPE_SECTORS,
+ .dest_page = 0x020,
+ },
+ [ZL_FW_COMPONENT_FW2] = {
+ .name = "firmware2",
+ .max_size = 0x0040,
+ .load_addr = 0x20000000,
+ .flash_type = ZL3073X_FLASH_TYPE_PAGE_AND_COPY,
+ .dest_page = 0x3e0,
+ .copy_page = 0x000,
+ },
+ [ZL_FW_COMPONENT_FW3] = {
+ .name = "firmware3",
+ .max_size = 0x0248,
+ .load_addr = 0x20000400,
+ .flash_type = ZL3073X_FLASH_TYPE_PAGE_AND_COPY,
+ .dest_page = 0x3e4,
+ .copy_page = 0x004,
+ },
+ [ZL_FW_COMPONENT_CFG0] = {
+ .name = "config0",
+ .max_size = 0x1000,
+ .load_addr = 0x20000000,
+ .flash_type = ZL3073X_FLASH_TYPE_PAGE,
+ .dest_page = 0x3d0,
+ },
+ [ZL_FW_COMPONENT_CFG1] = {
+ .name = "config1",
+ .max_size = 0x1000,
+ .load_addr = 0x20000000,
+ .flash_type = ZL3073X_FLASH_TYPE_PAGE,
+ .dest_page = 0x3c0,
+ },
+ [ZL_FW_COMPONENT_CFG2] = {
+ .name = "config2",
+ .max_size = 0x1000,
+ .load_addr = 0x20000000,
+ .flash_type = ZL3073X_FLASH_TYPE_PAGE,
+ .dest_page = 0x3b0,
+ },
+ [ZL_FW_COMPONENT_CFG3] = {
+ .name = "config3",
+ .max_size = 0x1000,
+ .load_addr = 0x20000000,
+ .flash_type = ZL3073X_FLASH_TYPE_PAGE,
+ .dest_page = 0x3a0,
+ },
+ [ZL_FW_COMPONENT_CFG4] = {
+ .name = "config4",
+ .max_size = 0x1000,
+ .load_addr = 0x20000000,
+ .flash_type = ZL3073X_FLASH_TYPE_PAGE,
+ .dest_page = 0x390,
+ },
+ [ZL_FW_COMPONENT_CFG5] = {
+ .name = "config5",
+ .max_size = 0x1000,
+ .load_addr = 0x20000000,
+ .flash_type = ZL3073X_FLASH_TYPE_PAGE,
+ .dest_page = 0x380,
+ },
+ [ZL_FW_COMPONENT_CFG6] = {
+ .name = "config6",
+ .max_size = 0x1000,
+ .load_addr = 0x20000000,
+ .flash_type = ZL3073X_FLASH_TYPE_PAGE,
+ .dest_page = 0x370,
+ },
+};
+
+/* Sanity check */
+static_assert(ARRAY_SIZE(component_info) == ZL_FW_NUM_COMPONENTS);
+
+/**
+ * zl3073x_fw_component_alloc - Alloc structure to hold firmware component
+ * @size: size of buffer to store data
+ *
+ * Return: pointer to allocated component structure or NULL on error.
+ */
+static struct zl3073x_fw_component *
+zl3073x_fw_component_alloc(size_t size)
+{
+ struct zl3073x_fw_component *comp;
+
+ comp = kzalloc(sizeof(*comp), GFP_KERNEL);
+ if (!comp)
+ return NULL;
+
+ comp->size = size;
+ comp->data = kzalloc(size, GFP_KERNEL);
+ if (!comp->data) {
+ kfree(comp);
+ return NULL;
+ }
+
+ return comp;
+}
+
+/**
+ * zl3073x_fw_component_free - Free allocated component structure
+ * @comp: pointer to allocated component
+ */
+static void
+zl3073x_fw_component_free(struct zl3073x_fw_component *comp)
+{
+ if (comp)
+ kfree(comp->data);
+
+ kfree(comp);
+}
+
+/**
+ * zl3073x_fw_component_id_get - Get ID for firmware component name
+ * @name: input firmware component name
+ *
+ * Return:
+ * - ZL3073X_FW_COMPONENT_* ID for known component name
+ * - ZL3073X_FW_COMPONENT_INVALID if the given name is unknown
+ */
+static enum zl3073x_fw_component_id
+zl3073x_fw_component_id_get(const char *name)
+{
+ enum zl3073x_fw_component_id id;
+
+ for (id = 0; id < ZL_FW_NUM_COMPONENTS; id++)
+ if (!strcasecmp(name, component_info[id].name))
+ return id;
+
+ return ZL_FW_COMPONENT_INVALID;
+}
+
+/**
+ * zl3073x_fw_component_load - Load component from firmware source
+ * @zldev: zl3073x device structure
+ * @pcomp: pointer to loaded component
+ * @psrc: data pointer to load component from
+ * @psize: remaining bytes in buffer
+ * @extack: netlink extack pointer to report errors
+ *
+ * The function allocates single firmware component and loads the data from
+ * the buffer specified by @psrc and @psize. Pointer to allocated component
+ * is stored in output @pcomp. Source data pointer @psrc and remaining bytes
+ * @psize are updated accordingly.
+ *
+ * Return:
+ * * 1 when component was allocated and loaded
+ * * 0 when there is no component to load
+ * * <0 on error
+ */
+static ssize_t
+zl3073x_fw_component_load(struct zl3073x_dev *zldev,
+ struct zl3073x_fw_component **pcomp,
+ const char **psrc, size_t *psize,
+ struct netlink_ext_ack *extack)
+{
+ const struct zl3073x_fw_component_info *info;
+ struct zl3073x_fw_component *comp = NULL;
+ struct device *dev = zldev->dev;
+ enum zl3073x_fw_component_id id;
+ char buf[32], name[16];
+ u32 count, size, *dest;
+ int pos, rc;
+
+ /* Fetch image name and size from input */
+ strscpy(buf, *psrc, min(sizeof(buf), *psize));
+ rc = sscanf(buf, "%15s %u %n", name, &count, &pos);
+ if (!rc) {
+ /* No more data */
+ return 0;
+ } else if (rc == 1 || count > U32_MAX / sizeof(u32)) {
+ ZL3073X_FW_ERR_MSG(extack, "invalid component size");
+ return -EINVAL;
+ }
+ *psrc += pos;
+ *psize -= pos;
+
+ dev_dbg(dev, "Firmware component '%s' found\n", name);
+
+ id = zl3073x_fw_component_id_get(name);
+ if (id == ZL_FW_COMPONENT_INVALID) {
+ ZL3073X_FW_ERR_MSG(extack, "unknown component type '%s'", name);
+ return -EINVAL;
+ }
+
+ info = &component_info[id];
+ size = count * sizeof(u32); /* get size in bytes */
+
+ /* Check image size validity */
+ if (size > component_info[id].max_size) {
+ ZL3073X_FW_ERR_MSG(extack,
+ "[%s] component is too big (%u bytes)\n",
+ info->name, size);
+ return -EINVAL;
+ }
+
+ dev_dbg(dev, "Indicated component image size: %u bytes\n", size);
+
+ /* Alloc component */
+ comp = zl3073x_fw_component_alloc(size);
+ if (!comp) {
+ ZL3073X_FW_ERR_MSG(extack, "failed to alloc memory");
+ return -ENOMEM;
+ }
+ comp->id = id;
+
+ /* Load component data from firmware source */
+ for (dest = comp->data; count; count--, dest++) {
+ strscpy(buf, *psrc, min(sizeof(buf), *psize));
+ rc = sscanf(buf, "%x %n", dest, &pos);
+ if (!rc)
+ goto err_data;
+
+ *psrc += pos;
+ *psize -= pos;
+ }
+
+ *pcomp = comp;
+
+ return 1;
+
+err_data:
+ ZL3073X_FW_ERR_MSG(extack, "[%s] invalid or missing data", info->name);
+
+ zl3073x_fw_component_free(comp);
+
+ return -ENODATA;
+}
+
+/**
+ * zl3073x_fw_free - Free allocated firmware
+ * @fw: firmware pointer
+ *
+ * The function frees existing firmware allocated by @zl3073x_fw_load.
+ */
+void zl3073x_fw_free(struct zl3073x_fw *fw)
+{
+ size_t i;
+
+ if (!fw)
+ return;
+
+ for (i = 0; i < ZL_FW_NUM_COMPONENTS; i++)
+ zl3073x_fw_component_free(fw->component[i]);
+
+ kfree(fw);
+}
+
+/**
+ * zl3073x_fw_load - Load all components from source
+ * @zldev: zl3073x device structure
+ * @data: source buffer pointer
+ * @size: size of source buffer
+ * @extack: netlink extack pointer to report errors
+ *
+ * The functions allocate firmware structure and loads all components from
+ * the given buffer specified by @data and @size.
+ *
+ * Return: pointer to firmware on success, error pointer on error
+ */
+struct zl3073x_fw *zl3073x_fw_load(struct zl3073x_dev *zldev, const char *data,
+ size_t size, struct netlink_ext_ack *extack)
+{
+ struct zl3073x_fw_component *comp;
+ enum zl3073x_fw_component_id id;
+ struct zl3073x_fw *fw;
+ ssize_t rc;
+
+ /* Allocate firmware structure */
+ fw = kzalloc(sizeof(*fw), GFP_KERNEL);
+ if (!fw)
+ return ERR_PTR(-ENOMEM);
+
+ do {
+ /* Load single component */
+ rc = zl3073x_fw_component_load(zldev, &comp, &data, &size,
+ extack);
+ if (rc <= 0)
+ /* Everything was read or error occurred */
+ break;
+
+ id = comp->id;
+
+ /* Report error if the given component is present twice
+ * or more.
+ */
+ if (fw->component[id]) {
+ ZL3073X_FW_ERR_MSG(extack,
+ "duplicate component '%s' detected",
+ component_info[id].name);
+ zl3073x_fw_component_free(comp);
+ rc = -EINVAL;
+ break;
+ }
+
+ fw->component[id] = comp;
+ } while (true);
+
+ if (rc) {
+ /* Free allocated firmware in case of error */
+ zl3073x_fw_free(fw);
+ return ERR_PTR(rc);
+ }
+
+ return fw;
+}
+
+/**
+ * zl3073x_fw_component_flash - Flash all components
+ * @zldev: zl3073x device structure
+ * @comp: pointer to components array
+ * @extack: netlink extack pointer to report errors
+ *
+ * Return: 0 in case of success or negative number otherwise.
+ */
+static int
+zl3073x_fw_component_flash(struct zl3073x_dev *zldev,
+ struct zl3073x_fw_component *comp,
+ struct netlink_ext_ack *extack)
+{
+ const struct zl3073x_fw_component_info *info;
+ int rc;
+
+ info = &component_info[comp->id];
+
+ switch (info->flash_type) {
+ case ZL3073X_FLASH_TYPE_NONE:
+ /* Non-flashable component - used for utility */
+ return 0;
+ case ZL3073X_FLASH_TYPE_SECTORS:
+ rc = zl3073x_flash_sectors(zldev, info->name, info->dest_page,
+ info->load_addr, comp->data,
+ comp->size, extack);
+ break;
+ case ZL3073X_FLASH_TYPE_PAGE:
+ rc = zl3073x_flash_page(zldev, info->name, info->dest_page,
+ info->load_addr, comp->data, comp->size,
+ extack);
+ break;
+ case ZL3073X_FLASH_TYPE_PAGE_AND_COPY:
+ rc = zl3073x_flash_page(zldev, info->name, info->dest_page,
+ info->load_addr, comp->data, comp->size,
+ extack);
+ if (!rc)
+ rc = zl3073x_flash_page_copy(zldev, info->name,
+ info->dest_page,
+ info->copy_page, extack);
+ break;
+ }
+ if (rc)
+ ZL3073X_FW_ERR_MSG(extack, "Failed to flash component '%s'",
+ info->name);
+
+ return rc;
+}
+
+int zl3073x_fw_flash(struct zl3073x_dev *zldev, struct zl3073x_fw *zlfw,
+ struct netlink_ext_ack *extack)
+{
+ int i, rc = 0;
+
+ for (i = 0; i < ZL_FW_NUM_COMPONENTS; i++) {
+ if (!zlfw->component[i])
+ continue; /* Component is not present */
+
+ rc = zl3073x_fw_component_flash(zldev, zlfw->component[i],
+ extack);
+ if (rc)
+ break;
+ }
+
+ return rc;
+}
diff --git a/drivers/dpll/zl3073x/fw.h b/drivers/dpll/zl3073x/fw.h
new file mode 100644
index 000000000000..fcaa89ab075e
--- /dev/null
+++ b/drivers/dpll/zl3073x/fw.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _ZL3073X_FW_H
+#define _ZL3073X_FW_H
+
+/*
+ * enum zl3073x_fw_component_id - Identifiers for possible flash components
+ */
+enum zl3073x_fw_component_id {
+ ZL_FW_COMPONENT_INVALID = -1,
+ ZL_FW_COMPONENT_UTIL = 0,
+ ZL_FW_COMPONENT_FW1,
+ ZL_FW_COMPONENT_FW2,
+ ZL_FW_COMPONENT_FW3,
+ ZL_FW_COMPONENT_CFG0,
+ ZL_FW_COMPONENT_CFG1,
+ ZL_FW_COMPONENT_CFG2,
+ ZL_FW_COMPONENT_CFG3,
+ ZL_FW_COMPONENT_CFG4,
+ ZL_FW_COMPONENT_CFG5,
+ ZL_FW_COMPONENT_CFG6,
+ ZL_FW_NUM_COMPONENTS
+};
+
+/**
+ * struct zl3073x_fw_component - Firmware component
+ * @id: Flash component ID
+ * @size: Size of the buffer
+ * @data: Pointer to buffer with component data
+ */
+struct zl3073x_fw_component {
+ enum zl3073x_fw_component_id id;
+ size_t size;
+ void *data;
+};
+
+/**
+ * struct zl3073x_fw - Firmware bundle
+ * @component: firmware components array
+ */
+struct zl3073x_fw {
+ struct zl3073x_fw_component *component[ZL_FW_NUM_COMPONENTS];
+};
+
+struct zl3073x_fw *zl3073x_fw_load(struct zl3073x_dev *zldev, const char *data,
+ size_t size, struct netlink_ext_ack *extack);
+void zl3073x_fw_free(struct zl3073x_fw *fw);
+
+int zl3073x_fw_flash(struct zl3073x_dev *zldev, struct zl3073x_fw *zlfw,
+ struct netlink_ext_ack *extack);
+
+#endif /* _ZL3073X_FW_H */
diff --git a/drivers/dpll/zl3073x/i2c.c b/drivers/dpll/zl3073x/i2c.c
new file mode 100644
index 000000000000..7bbfdd4ed867
--- /dev/null
+++ b/drivers/dpll/zl3073x/i2c.c
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/dev_printk.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#include "core.h"
+
+static int zl3073x_i2c_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct zl3073x_dev *zldev;
+
+ zldev = zl3073x_devm_alloc(dev);
+ if (IS_ERR(zldev))
+ return PTR_ERR(zldev);
+
+ zldev->regmap = devm_regmap_init_i2c(client, &zl3073x_regmap_config);
+ if (IS_ERR(zldev->regmap))
+ return dev_err_probe(dev, PTR_ERR(zldev->regmap),
+ "Failed to initialize regmap\n");
+
+ return zl3073x_dev_probe(zldev, i2c_get_match_data(client));
+}
+
+static const struct i2c_device_id zl3073x_i2c_id[] = {
+ {
+ .name = "zl30731",
+ .driver_data = (kernel_ulong_t)&zl30731_chip_info,
+ },
+ {
+ .name = "zl30732",
+ .driver_data = (kernel_ulong_t)&zl30732_chip_info,
+ },
+ {
+ .name = "zl30733",
+ .driver_data = (kernel_ulong_t)&zl30733_chip_info,
+ },
+ {
+ .name = "zl30734",
+ .driver_data = (kernel_ulong_t)&zl30734_chip_info,
+ },
+ {
+ .name = "zl30735",
+ .driver_data = (kernel_ulong_t)&zl30735_chip_info,
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(i2c, zl3073x_i2c_id);
+
+static const struct of_device_id zl3073x_i2c_of_match[] = {
+ { .compatible = "microchip,zl30731", .data = &zl30731_chip_info },
+ { .compatible = "microchip,zl30732", .data = &zl30732_chip_info },
+ { .compatible = "microchip,zl30733", .data = &zl30733_chip_info },
+ { .compatible = "microchip,zl30734", .data = &zl30734_chip_info },
+ { .compatible = "microchip,zl30735", .data = &zl30735_chip_info },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, zl3073x_i2c_of_match);
+
+static struct i2c_driver zl3073x_i2c_driver = {
+ .driver = {
+ .name = "zl3073x-i2c",
+ .of_match_table = zl3073x_i2c_of_match,
+ },
+ .probe = zl3073x_i2c_probe,
+ .id_table = zl3073x_i2c_id,
+};
+module_i2c_driver(zl3073x_i2c_driver);
+
+MODULE_AUTHOR("Ivan Vecera <ivecera@redhat.com>");
+MODULE_DESCRIPTION("Microchip ZL3073x I2C driver");
+MODULE_IMPORT_NS("ZL3073X");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dpll/zl3073x/out.c b/drivers/dpll/zl3073x/out.c
new file mode 100644
index 000000000000..86829a0c1c02
--- /dev/null
+++ b/drivers/dpll/zl3073x/out.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/bitfield.h>
+#include <linux/cleanup.h>
+#include <linux/dev_printk.h>
+#include <linux/string.h>
+#include <linux/string_choices.h>
+#include <linux/types.h>
+
+#include "core.h"
+#include "out.h"
+
+/**
+ * zl3073x_out_state_fetch - fetch output state from hardware
+ * @zldev: pointer to zl3073x_dev structure
+ * @index: output index to fetch state for
+ *
+ * Function fetches state of the given output from hardware and stores it
+ * for later use.
+ *
+ * Return: 0 on success, <0 on error
+ */
+int zl3073x_out_state_fetch(struct zl3073x_dev *zldev, u8 index)
+{
+ struct zl3073x_out *out = &zldev->out[index];
+ int rc;
+
+ /* Read output configuration */
+ rc = zl3073x_read_u8(zldev, ZL_REG_OUTPUT_CTRL(index), &out->ctrl);
+ if (rc)
+ return rc;
+
+ dev_dbg(zldev->dev, "OUT%u is %s and connected to SYNTH%u\n", index,
+ str_enabled_disabled(zl3073x_out_is_enabled(out)),
+ zl3073x_out_synth_get(out));
+
+ guard(mutex)(&zldev->multiop_lock);
+
+ /* Read output configuration */
+ rc = zl3073x_mb_op(zldev, ZL_REG_OUTPUT_MB_SEM, ZL_OUTPUT_MB_SEM_RD,
+ ZL_REG_OUTPUT_MB_MASK, BIT(index));
+ if (rc)
+ return rc;
+
+ /* Read output mode */
+ rc = zl3073x_read_u8(zldev, ZL_REG_OUTPUT_MODE, &out->mode);
+ if (rc)
+ return rc;
+
+ dev_dbg(zldev->dev, "OUT%u has signal format 0x%02x\n", index,
+ zl3073x_out_signal_format_get(out));
+
+ /* Read output divisor */
+ rc = zl3073x_read_u32(zldev, ZL_REG_OUTPUT_DIV, &out->div);
+ if (rc)
+ return rc;
+
+ if (!out->div) {
+ dev_err(zldev->dev, "Zero divisor for OUT%u got from device\n",
+ index);
+ return -EINVAL;
+ }
+
+ dev_dbg(zldev->dev, "OUT%u divisor: %u\n", index, out->div);
+
+ /* Read output width */
+ rc = zl3073x_read_u32(zldev, ZL_REG_OUTPUT_WIDTH, &out->width);
+ if (rc)
+ return rc;
+
+ rc = zl3073x_read_u32(zldev, ZL_REG_OUTPUT_ESYNC_PERIOD,
+ &out->esync_n_period);
+ if (rc)
+ return rc;
+
+ if (!out->esync_n_period) {
+ dev_err(zldev->dev,
+ "Zero esync divisor for OUT%u got from device\n",
+ index);
+ return -EINVAL;
+ }
+
+ rc = zl3073x_read_u32(zldev, ZL_REG_OUTPUT_ESYNC_WIDTH,
+ &out->esync_n_width);
+ if (rc)
+ return rc;
+
+ rc = zl3073x_read_u32(zldev, ZL_REG_OUTPUT_PHASE_COMP,
+ &out->phase_comp);
+ if (rc)
+ return rc;
+
+ return rc;
+}
+
+/**
+ * zl3073x_out_state_get - get current output state
+ * @zldev: pointer to zl3073x_dev structure
+ * @index: output index to get state for
+ *
+ * Return: pointer to given output state
+ */
+const struct zl3073x_out *zl3073x_out_state_get(struct zl3073x_dev *zldev,
+ u8 index)
+{
+ return &zldev->out[index];
+}
+
+int zl3073x_out_state_set(struct zl3073x_dev *zldev, u8 index,
+ const struct zl3073x_out *out)
+{
+ struct zl3073x_out *dout = &zldev->out[index];
+ int rc;
+
+ guard(mutex)(&zldev->multiop_lock);
+
+ /* Read output configuration into mailbox */
+ rc = zl3073x_mb_op(zldev, ZL_REG_OUTPUT_MB_SEM, ZL_OUTPUT_MB_SEM_RD,
+ ZL_REG_OUTPUT_MB_MASK, BIT(index));
+ if (rc)
+ return rc;
+
+ /* Update mailbox with changed values */
+ if (dout->div != out->div)
+ rc = zl3073x_write_u32(zldev, ZL_REG_OUTPUT_DIV, out->div);
+ if (!rc && dout->width != out->width)
+ rc = zl3073x_write_u32(zldev, ZL_REG_OUTPUT_WIDTH, out->width);
+ if (!rc && dout->esync_n_period != out->esync_n_period)
+ rc = zl3073x_write_u32(zldev, ZL_REG_OUTPUT_ESYNC_PERIOD,
+ out->esync_n_period);
+ if (!rc && dout->esync_n_width != out->esync_n_width)
+ rc = zl3073x_write_u32(zldev, ZL_REG_OUTPUT_ESYNC_WIDTH,
+ out->esync_n_width);
+ if (!rc && dout->mode != out->mode)
+ rc = zl3073x_write_u8(zldev, ZL_REG_OUTPUT_MODE, out->mode);
+ if (!rc && dout->phase_comp != out->phase_comp)
+ rc = zl3073x_write_u32(zldev, ZL_REG_OUTPUT_PHASE_COMP,
+ out->phase_comp);
+ if (rc)
+ return rc;
+
+ /* Commit output configuration */
+ rc = zl3073x_mb_op(zldev, ZL_REG_OUTPUT_MB_SEM, ZL_OUTPUT_MB_SEM_WR,
+ ZL_REG_OUTPUT_MB_MASK, BIT(index));
+ if (rc)
+ return rc;
+
+ /* After successful commit store new state */
+ dout->div = out->div;
+ dout->width = out->width;
+ dout->esync_n_period = out->esync_n_period;
+ dout->esync_n_width = out->esync_n_width;
+ dout->mode = out->mode;
+ dout->phase_comp = out->phase_comp;
+
+ return 0;
+}
diff --git a/drivers/dpll/zl3073x/out.h b/drivers/dpll/zl3073x/out.h
new file mode 100644
index 000000000000..e8ea7a0e0f07
--- /dev/null
+++ b/drivers/dpll/zl3073x/out.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _ZL3073X_OUT_H
+#define _ZL3073X_OUT_H
+
+#include <linux/bitfield.h>
+#include <linux/types.h>
+
+#include "regs.h"
+
+struct zl3073x_dev;
+
+/**
+ * struct zl3073x_out - output state
+ * @div: output divisor
+ * @width: output pulse width
+ * @esync_n_period: embedded sync or n-pin period (for n-div formats)
+ * @esync_n_width: embedded sync or n-pin pulse width
+ * @phase_comp: phase compensation
+ * @ctrl: output control
+ * @mode: output mode
+ */
+struct zl3073x_out {
+ u32 div;
+ u32 width;
+ u32 esync_n_period;
+ u32 esync_n_width;
+ s32 phase_comp;
+ u8 ctrl;
+ u8 mode;
+};
+
+int zl3073x_out_state_fetch(struct zl3073x_dev *zldev, u8 index);
+const struct zl3073x_out *zl3073x_out_state_get(struct zl3073x_dev *zldev,
+ u8 index);
+
+int zl3073x_out_state_set(struct zl3073x_dev *zldev, u8 index,
+ const struct zl3073x_out *out);
+
+/**
+ * zl3073x_out_signal_format_get - get output signal format
+ * @out: pointer to out state
+ *
+ * Return: signal format of given output
+ */
+static inline u8 zl3073x_out_signal_format_get(const struct zl3073x_out *out)
+{
+ return FIELD_GET(ZL_OUTPUT_MODE_SIGNAL_FORMAT, out->mode);
+}
+
+/**
+ * zl3073x_out_is_diff - check if the given output is differential
+ * @out: pointer to out state
+ *
+ * Return: true if output is differential, false if output is single-ended
+ */
+static inline bool zl3073x_out_is_diff(const struct zl3073x_out *out)
+{
+ switch (zl3073x_out_signal_format_get(out)) {
+ case ZL_OUTPUT_MODE_SIGNAL_FORMAT_LVDS:
+ case ZL_OUTPUT_MODE_SIGNAL_FORMAT_DIFF:
+ case ZL_OUTPUT_MODE_SIGNAL_FORMAT_LOWVCM:
+ return true;
+ default:
+ break;
+ }
+
+ return false;
+}
+
+/**
+ * zl3073x_out_is_enabled - check if the given output is enabled
+ * @out: pointer to out state
+ *
+ * Return: true if output is enabled, false if output is disabled
+ */
+static inline bool zl3073x_out_is_enabled(const struct zl3073x_out *out)
+{
+ return !!FIELD_GET(ZL_OUTPUT_CTRL_EN, out->ctrl);
+}
+
+/**
+ * zl3073x_out_synth_get - get synth connected to given output
+ * @out: pointer to out state
+ *
+ * Return: index of synth connected to given output.
+ */
+static inline u8 zl3073x_out_synth_get(const struct zl3073x_out *out)
+{
+ return FIELD_GET(ZL_OUTPUT_CTRL_SYNTH_SEL, out->ctrl);
+}
+
+#endif /* _ZL3073X_OUT_H */
diff --git a/drivers/dpll/zl3073x/prop.c b/drivers/dpll/zl3073x/prop.c
new file mode 100644
index 000000000000..4ed153087570
--- /dev/null
+++ b/drivers/dpll/zl3073x/prop.c
@@ -0,0 +1,369 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/array_size.h>
+#include <linux/dev_printk.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/fwnode.h>
+#include <linux/property.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#include "core.h"
+#include "prop.h"
+
+/**
+ * zl3073x_pin_check_freq - verify frequency for given pin
+ * @zldev: pointer to zl3073x device
+ * @dir: pin direction
+ * @id: pin index
+ * @freq: frequency to check
+ *
+ * The function checks the given frequency is valid for the device. For input
+ * pins it checks that the frequency can be factorized using supported base
+ * frequencies. For output pins it checks that the frequency divides connected
+ * synth frequency without remainder.
+ *
+ * Return: true if the frequency is valid, false if not.
+ */
+static bool
+zl3073x_pin_check_freq(struct zl3073x_dev *zldev, enum dpll_pin_direction dir,
+ u8 id, u64 freq)
+{
+ if (freq > U32_MAX)
+ goto err_inv_freq;
+
+ if (dir == DPLL_PIN_DIRECTION_INPUT) {
+ int rc;
+
+ /* Check if the frequency can be factorized */
+ rc = zl3073x_ref_freq_factorize(freq, NULL, NULL);
+ if (rc)
+ goto err_inv_freq;
+ } else {
+ u32 synth_freq;
+ u8 out, synth;
+
+ /* Get output pin synthesizer */
+ out = zl3073x_output_pin_out_get(id);
+ synth = zl3073x_dev_out_synth_get(zldev, out);
+
+ /* Get synth frequency */
+ synth_freq = zl3073x_dev_synth_freq_get(zldev, synth);
+
+ /* Check the frequency divides synth frequency */
+ if (synth_freq % (u32)freq)
+ goto err_inv_freq;
+ }
+
+ return true;
+
+err_inv_freq:
+ dev_warn(zldev->dev,
+ "Unsupported frequency %llu Hz in firmware node\n", freq);
+
+ return false;
+}
+
+/**
+ * zl3073x_prop_pin_package_label_set - get package label for the pin
+ * @zldev: pointer to zl3073x device
+ * @props: pointer to pin properties
+ * @dir: pin direction
+ * @id: pin index
+ *
+ * Generates package label string and stores it into pin properties structure.
+ *
+ * Possible formats:
+ * REF<n> - differential input reference
+ * REF<n>P & REF<n>N - single-ended input reference (P or N pin)
+ * OUT<n> - differential output
+ * OUT<n>P & OUT<n>N - single-ended output (P or N pin)
+ */
+static void
+zl3073x_prop_pin_package_label_set(struct zl3073x_dev *zldev,
+ struct zl3073x_pin_props *props,
+ enum dpll_pin_direction dir, u8 id)
+{
+ const char *prefix, *suffix;
+ bool is_diff;
+
+ if (dir == DPLL_PIN_DIRECTION_INPUT) {
+ u8 ref;
+
+ prefix = "REF";
+ ref = zl3073x_input_pin_ref_get(id);
+ is_diff = zl3073x_dev_ref_is_diff(zldev, ref);
+ } else {
+ u8 out;
+
+ prefix = "OUT";
+ out = zl3073x_output_pin_out_get(id);
+ is_diff = zl3073x_dev_out_is_diff(zldev, out);
+ }
+
+ if (!is_diff)
+ suffix = zl3073x_is_p_pin(id) ? "P" : "N";
+ else
+ suffix = ""; /* No suffix for differential one */
+
+ snprintf(props->package_label, sizeof(props->package_label), "%s%u%s",
+ prefix, id / 2, suffix);
+
+ /* Set package_label pointer in DPLL core properties to generated
+ * string.
+ */
+ props->dpll_props.package_label = props->package_label;
+}
+
+/**
+ * zl3073x_prop_pin_fwnode_get - get fwnode for given pin
+ * @zldev: pointer to zl3073x device
+ * @props: pointer to pin properties
+ * @dir: pin direction
+ * @id: pin index
+ *
+ * Return: 0 on success, -ENOENT if the firmware node does not exist
+ */
+static int
+zl3073x_prop_pin_fwnode_get(struct zl3073x_dev *zldev,
+ struct zl3073x_pin_props *props,
+ enum dpll_pin_direction dir, u8 id)
+{
+ struct fwnode_handle *pins_node, *pin_node;
+ const char *node_name;
+
+ if (dir == DPLL_PIN_DIRECTION_INPUT)
+ node_name = "input-pins";
+ else
+ node_name = "output-pins";
+
+ /* Get node containing input or output pins */
+ pins_node = device_get_named_child_node(zldev->dev, node_name);
+ if (!pins_node) {
+ dev_dbg(zldev->dev, "'%s' sub-node is missing\n", node_name);
+ return -ENOENT;
+ }
+
+ /* Enumerate child pin nodes and find the requested one */
+ fwnode_for_each_child_node(pins_node, pin_node) {
+ u32 reg;
+
+ if (fwnode_property_read_u32(pin_node, "reg", &reg))
+ continue;
+
+ if (id == reg)
+ break;
+ }
+
+ /* Release pin parent node */
+ fwnode_handle_put(pins_node);
+
+ /* Save found node */
+ props->fwnode = pin_node;
+
+ dev_dbg(zldev->dev, "Firmware node for %s %sfound\n",
+ props->package_label, pin_node ? "" : "NOT ");
+
+ return pin_node ? 0 : -ENOENT;
+}
+
+/**
+ * zl3073x_pin_props_get - get pin properties
+ * @zldev: pointer to zl3073x device
+ * @dir: pin direction
+ * @index: pin index
+ *
+ * The function looks for firmware node for the given pin if it is provided
+ * by the system firmware (DT or ACPI), allocates pin properties structure,
+ * generates package label string according pin type and optionally fetches
+ * board label, connection type, supported frequencies and esync capability
+ * from the firmware node if it does exist.
+ *
+ * Pointer that is returned by this function should be freed using
+ * @zl3073x_pin_props_put().
+ *
+ * Return:
+ * * pointer to allocated pin properties structure on success
+ * * error pointer in case of error
+ */
+struct zl3073x_pin_props *zl3073x_pin_props_get(struct zl3073x_dev *zldev,
+ enum dpll_pin_direction dir,
+ u8 index)
+{
+ struct dpll_pin_frequency *ranges;
+ struct zl3073x_pin_props *props;
+ int i, j, num_freqs, rc;
+ const char *type;
+ u64 *freqs;
+
+ props = kzalloc(sizeof(*props), GFP_KERNEL);
+ if (!props)
+ return ERR_PTR(-ENOMEM);
+
+ /* Set default pin type and capabilities */
+ if (dir == DPLL_PIN_DIRECTION_INPUT) {
+ props->dpll_props.type = DPLL_PIN_TYPE_EXT;
+ props->dpll_props.capabilities =
+ DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE |
+ DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE;
+ } else {
+ u8 out, synth;
+ u32 f;
+
+ props->dpll_props.type = DPLL_PIN_TYPE_GNSS;
+
+ /* The output pin phase adjustment granularity equals half of
+ * the synth frequency count.
+ */
+ out = zl3073x_output_pin_out_get(index);
+ synth = zl3073x_dev_out_synth_get(zldev, out);
+ f = 2 * zl3073x_dev_synth_freq_get(zldev, synth);
+ props->dpll_props.phase_gran = f ? div_u64(PSEC_PER_SEC, f) : 1;
+ }
+
+ props->dpll_props.phase_range.min = S32_MIN;
+ props->dpll_props.phase_range.max = S32_MAX;
+
+ zl3073x_prop_pin_package_label_set(zldev, props, dir, index);
+
+ /* Get firmware node for the given pin */
+ rc = zl3073x_prop_pin_fwnode_get(zldev, props, dir, index);
+ if (rc)
+ return props; /* Return if it does not exist */
+
+ /* Look for label property and store the value as board label */
+ fwnode_property_read_string(props->fwnode, "label",
+ &props->dpll_props.board_label);
+
+ /* Look for pin type property and translate its value to DPLL
+ * pin type enum if it is present.
+ */
+ if (!fwnode_property_read_string(props->fwnode, "connection-type",
+ &type)) {
+ if (!strcmp(type, "ext"))
+ props->dpll_props.type = DPLL_PIN_TYPE_EXT;
+ else if (!strcmp(type, "gnss"))
+ props->dpll_props.type = DPLL_PIN_TYPE_GNSS;
+ else if (!strcmp(type, "int"))
+ props->dpll_props.type = DPLL_PIN_TYPE_INT_OSCILLATOR;
+ else if (!strcmp(type, "synce"))
+ props->dpll_props.type = DPLL_PIN_TYPE_SYNCE_ETH_PORT;
+ else
+ dev_warn(zldev->dev,
+ "Unknown or unsupported pin type '%s'\n",
+ type);
+ }
+
+ /* Check if the pin supports embedded sync control */
+ props->esync_control = fwnode_property_read_bool(props->fwnode,
+ "esync-control");
+
+ /* Read supported frequencies property if it is specified */
+ num_freqs = fwnode_property_count_u64(props->fwnode,
+ "supported-frequencies-hz");
+ if (num_freqs <= 0)
+ /* Return if the property does not exist or number is 0 */
+ return props;
+
+ /* The firmware node specifies list of supported frequencies while
+ * DPLL core pin properties requires list of frequency ranges.
+ * So read the frequency list into temporary array.
+ */
+ freqs = kcalloc(num_freqs, sizeof(*freqs), GFP_KERNEL);
+ if (!freqs) {
+ rc = -ENOMEM;
+ goto err_alloc_freqs;
+ }
+
+ /* Read frequencies list from firmware node */
+ fwnode_property_read_u64_array(props->fwnode,
+ "supported-frequencies-hz", freqs,
+ num_freqs);
+
+ /* Allocate frequency ranges list and fill it */
+ ranges = kcalloc(num_freqs, sizeof(*ranges), GFP_KERNEL);
+ if (!ranges) {
+ rc = -ENOMEM;
+ goto err_alloc_ranges;
+ }
+
+ /* Convert list of frequencies to list of frequency ranges but
+ * filter-out frequencies that are not representable by device
+ */
+ for (i = 0, j = 0; i < num_freqs; i++) {
+ struct dpll_pin_frequency freq = DPLL_PIN_FREQUENCY(freqs[i]);
+
+ if (zl3073x_pin_check_freq(zldev, dir, index, freqs[i])) {
+ ranges[j] = freq;
+ j++;
+ }
+ }
+
+ /* Save number of freq ranges and pointer to them into pin properties */
+ props->dpll_props.freq_supported = ranges;
+ props->dpll_props.freq_supported_num = j;
+
+ /* Free temporary array */
+ kfree(freqs);
+
+ return props;
+
+err_alloc_ranges:
+ kfree(freqs);
+err_alloc_freqs:
+ fwnode_handle_put(props->fwnode);
+ kfree(props);
+
+ return ERR_PTR(rc);
+}
+
+/**
+ * zl3073x_pin_props_put - release pin properties
+ * @props: pin properties to free
+ *
+ * The function deallocates given pin properties structure.
+ */
+void zl3073x_pin_props_put(struct zl3073x_pin_props *props)
+{
+ /* Free supported frequency ranges list if it is present */
+ kfree(props->dpll_props.freq_supported);
+
+ /* Put firmware handle if it is present */
+ if (props->fwnode)
+ fwnode_handle_put(props->fwnode);
+
+ kfree(props);
+}
+
+/**
+ * zl3073x_prop_dpll_type_get - get DPLL channel type
+ * @zldev: pointer to zl3073x device
+ * @index: DPLL channel index
+ *
+ * Return: DPLL type for given DPLL channel
+ */
+enum dpll_type
+zl3073x_prop_dpll_type_get(struct zl3073x_dev *zldev, u8 index)
+{
+ const char *types[ZL3073X_MAX_CHANNELS];
+ int count;
+
+ /* Read dpll types property from firmware */
+ count = device_property_read_string_array(zldev->dev, "dpll-types",
+ types, ARRAY_SIZE(types));
+
+ /* Return default if property or entry for given channel is missing */
+ if (index >= count)
+ return DPLL_TYPE_PPS;
+
+ if (!strcmp(types[index], "pps"))
+ return DPLL_TYPE_PPS;
+ else if (!strcmp(types[index], "eec"))
+ return DPLL_TYPE_EEC;
+
+ dev_info(zldev->dev, "Unknown DPLL type '%s', using default\n",
+ types[index]);
+
+ return DPLL_TYPE_PPS; /* Default */
+}
diff --git a/drivers/dpll/zl3073x/prop.h b/drivers/dpll/zl3073x/prop.h
new file mode 100644
index 000000000000..721a18f05938
--- /dev/null
+++ b/drivers/dpll/zl3073x/prop.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _ZL3073X_PROP_H
+#define _ZL3073X_PROP_H
+
+#include <linux/dpll.h>
+
+#include "core.h"
+
+struct fwnode_handle;
+
+/**
+ * struct zl3073x_pin_props - pin properties
+ * @fwnode: pin firmware node
+ * @dpll_props: DPLL core pin properties
+ * @package_label: pin package label
+ * @esync_control: embedded sync support
+ */
+struct zl3073x_pin_props {
+ struct fwnode_handle *fwnode;
+ struct dpll_pin_properties dpll_props;
+ char package_label[8];
+ bool esync_control;
+};
+
+enum dpll_type zl3073x_prop_dpll_type_get(struct zl3073x_dev *zldev, u8 index);
+
+struct zl3073x_pin_props *zl3073x_pin_props_get(struct zl3073x_dev *zldev,
+ enum dpll_pin_direction dir,
+ u8 index);
+
+void zl3073x_pin_props_put(struct zl3073x_pin_props *props);
+
+#endif /* _ZL3073X_PROP_H */
diff --git a/drivers/dpll/zl3073x/ref.c b/drivers/dpll/zl3073x/ref.c
new file mode 100644
index 000000000000..aa2de13effa8
--- /dev/null
+++ b/drivers/dpll/zl3073x/ref.c
@@ -0,0 +1,204 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/bitfield.h>
+#include <linux/cleanup.h>
+#include <linux/dev_printk.h>
+#include <linux/string.h>
+#include <linux/string_choices.h>
+#include <linux/types.h>
+
+#include "core.h"
+#include "ref.h"
+
+/**
+ * zl3073x_ref_freq_factorize - factorize given frequency
+ * @freq: input frequency
+ * @base: base frequency
+ * @mult: multiplier
+ *
+ * Checks if the given frequency can be factorized using one of the
+ * supported base frequencies. If so the base frequency and multiplier
+ * are stored into appropriate parameters if they are not NULL.
+ *
+ * Return: 0 on success, -EINVAL if the frequency cannot be factorized
+ */
+int
+zl3073x_ref_freq_factorize(u32 freq, u16 *base, u16 *mult)
+{
+ static const u16 base_freqs[] = {
+ 1, 2, 4, 5, 8, 10, 16, 20, 25, 32, 40, 50, 64, 80, 100, 125,
+ 128, 160, 200, 250, 256, 320, 400, 500, 625, 640, 800, 1000,
+ 1250, 1280, 1600, 2000, 2500, 3125, 3200, 4000, 5000, 6250,
+ 6400, 8000, 10000, 12500, 15625, 16000, 20000, 25000, 31250,
+ 32000, 40000, 50000, 62500,
+ };
+ u32 div;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(base_freqs); i++) {
+ div = freq / base_freqs[i];
+
+ if (div <= U16_MAX && (freq % base_freqs[i]) == 0) {
+ if (base)
+ *base = base_freqs[i];
+ if (mult)
+ *mult = div;
+
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * zl3073x_ref_state_fetch - fetch input reference state from hardware
+ * @zldev: pointer to zl3073x_dev structure
+ * @index: input reference index to fetch state for
+ *
+ * Function fetches state for the given input reference from hardware and
+ * stores it for later use.
+ *
+ * Return: 0 on success, <0 on error
+ */
+int zl3073x_ref_state_fetch(struct zl3073x_dev *zldev, u8 index)
+{
+ struct zl3073x_ref *ref = &zldev->ref[index];
+ int rc;
+
+ /* For differential type inputs the N-pin reference shares
+ * part of the configuration with the P-pin counterpart.
+ */
+ if (zl3073x_is_n_pin(index) && zl3073x_ref_is_diff(ref - 1)) {
+ struct zl3073x_ref *p_ref = ref - 1; /* P-pin counterpart*/
+
+ /* Copy the shared items from the P-pin */
+ ref->config = p_ref->config;
+ ref->esync_n_div = p_ref->esync_n_div;
+ ref->freq_base = p_ref->freq_base;
+ ref->freq_mult = p_ref->freq_mult;
+ ref->freq_ratio_m = p_ref->freq_ratio_m;
+ ref->freq_ratio_n = p_ref->freq_ratio_n;
+ ref->phase_comp = p_ref->phase_comp;
+ ref->sync_ctrl = p_ref->sync_ctrl;
+
+ return 0; /* Finish - no non-shared items for now */
+ }
+
+ guard(mutex)(&zldev->multiop_lock);
+
+ /* Read reference configuration */
+ rc = zl3073x_mb_op(zldev, ZL_REG_REF_MB_SEM, ZL_REF_MB_SEM_RD,
+ ZL_REG_REF_MB_MASK, BIT(index));
+ if (rc)
+ return rc;
+
+ /* Read ref_config register */
+ rc = zl3073x_read_u8(zldev, ZL_REG_REF_CONFIG, &ref->config);
+ if (rc)
+ return rc;
+
+ /* Read frequency related registers */
+ rc = zl3073x_read_u16(zldev, ZL_REG_REF_FREQ_BASE, &ref->freq_base);
+ if (rc)
+ return rc;
+ rc = zl3073x_read_u16(zldev, ZL_REG_REF_FREQ_MULT, &ref->freq_mult);
+ if (rc)
+ return rc;
+ rc = zl3073x_read_u16(zldev, ZL_REG_REF_RATIO_M, &ref->freq_ratio_m);
+ if (rc)
+ return rc;
+ rc = zl3073x_read_u16(zldev, ZL_REG_REF_RATIO_N, &ref->freq_ratio_n);
+ if (rc)
+ return rc;
+
+ /* Read eSync and N-div rated registers */
+ rc = zl3073x_read_u32(zldev, ZL_REG_REF_ESYNC_DIV, &ref->esync_n_div);
+ if (rc)
+ return rc;
+ rc = zl3073x_read_u8(zldev, ZL_REG_REF_SYNC_CTRL, &ref->sync_ctrl);
+ if (rc)
+ return rc;
+
+ /* Read phase compensation register */
+ rc = zl3073x_read_u48(zldev, ZL_REG_REF_PHASE_OFFSET_COMP,
+ &ref->phase_comp);
+ if (rc)
+ return rc;
+
+ dev_dbg(zldev->dev, "REF%u is %s and configured as %s\n", index,
+ str_enabled_disabled(zl3073x_ref_is_enabled(ref)),
+ zl3073x_ref_is_diff(ref) ? "differential" : "single-ended");
+
+ return rc;
+}
+
+/**
+ * zl3073x_ref_state_get - get current input reference state
+ * @zldev: pointer to zl3073x_dev structure
+ * @index: input reference index to get state for
+ *
+ * Return: pointer to given input reference state
+ */
+const struct zl3073x_ref *
+zl3073x_ref_state_get(struct zl3073x_dev *zldev, u8 index)
+{
+ return &zldev->ref[index];
+}
+
+int zl3073x_ref_state_set(struct zl3073x_dev *zldev, u8 index,
+ const struct zl3073x_ref *ref)
+{
+ struct zl3073x_ref *dref = &zldev->ref[index];
+ int rc;
+
+ guard(mutex)(&zldev->multiop_lock);
+
+ /* Read reference configuration into mailbox */
+ rc = zl3073x_mb_op(zldev, ZL_REG_REF_MB_SEM, ZL_REF_MB_SEM_RD,
+ ZL_REG_REF_MB_MASK, BIT(index));
+ if (rc)
+ return rc;
+
+ /* Update mailbox with changed values */
+ if (dref->freq_base != ref->freq_base)
+ rc = zl3073x_write_u16(zldev, ZL_REG_REF_FREQ_BASE,
+ ref->freq_base);
+ if (!rc && dref->freq_mult != ref->freq_mult)
+ rc = zl3073x_write_u16(zldev, ZL_REG_REF_FREQ_MULT,
+ ref->freq_mult);
+ if (!rc && dref->freq_ratio_m != ref->freq_ratio_m)
+ rc = zl3073x_write_u16(zldev, ZL_REG_REF_RATIO_M,
+ ref->freq_ratio_m);
+ if (!rc && dref->freq_ratio_n != ref->freq_ratio_n)
+ rc = zl3073x_write_u16(zldev, ZL_REG_REF_RATIO_N,
+ ref->freq_ratio_n);
+ if (!rc && dref->esync_n_div != ref->esync_n_div)
+ rc = zl3073x_write_u32(zldev, ZL_REG_REF_ESYNC_DIV,
+ ref->esync_n_div);
+ if (!rc && dref->sync_ctrl != ref->sync_ctrl)
+ rc = zl3073x_write_u8(zldev, ZL_REG_REF_SYNC_CTRL,
+ ref->sync_ctrl);
+ if (!rc && dref->phase_comp != ref->phase_comp)
+ rc = zl3073x_write_u48(zldev, ZL_REG_REF_PHASE_OFFSET_COMP,
+ ref->phase_comp);
+ if (rc)
+ return rc;
+
+ /* Commit reference configuration */
+ rc = zl3073x_mb_op(zldev, ZL_REG_REF_MB_SEM, ZL_REF_MB_SEM_WR,
+ ZL_REG_REF_MB_MASK, BIT(index));
+ if (rc)
+ return rc;
+
+ /* After successful commit store new state */
+ dref->freq_base = ref->freq_base;
+ dref->freq_mult = ref->freq_mult;
+ dref->freq_ratio_m = ref->freq_ratio_m;
+ dref->freq_ratio_n = ref->freq_ratio_n;
+ dref->esync_n_div = ref->esync_n_div;
+ dref->sync_ctrl = ref->sync_ctrl;
+ dref->phase_comp = ref->phase_comp;
+
+ return 0;
+}
diff --git a/drivers/dpll/zl3073x/ref.h b/drivers/dpll/zl3073x/ref.h
new file mode 100644
index 000000000000..efc7f59cd9f9
--- /dev/null
+++ b/drivers/dpll/zl3073x/ref.h
@@ -0,0 +1,134 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _ZL3073X_REF_H
+#define _ZL3073X_REF_H
+
+#include <linux/bitfield.h>
+#include <linux/math64.h>
+#include <linux/types.h>
+
+#include "regs.h"
+
+struct zl3073x_dev;
+
+/**
+ * struct zl3073x_ref - input reference state
+ * @ffo: current fractional frequency offset
+ * @phase_comp: phase compensation
+ * @esync_n_div: divisor for embedded sync or n-divided signal formats
+ * @freq_base: frequency base
+ * @freq_mult: frequnecy multiplier
+ * @freq_ratio_m: FEC mode multiplier
+ * @freq_ratio_n: FEC mode divisor
+ * @config: reference config
+ * @sync_ctrl: reference sync control
+ * @mon_status: reference monitor status
+ */
+struct zl3073x_ref {
+ s64 ffo;
+ u64 phase_comp;
+ u32 esync_n_div;
+ u16 freq_base;
+ u16 freq_mult;
+ u16 freq_ratio_m;
+ u16 freq_ratio_n;
+ u8 config;
+ u8 sync_ctrl;
+ u8 mon_status;
+};
+
+int zl3073x_ref_state_fetch(struct zl3073x_dev *zldev, u8 index);
+
+const struct zl3073x_ref *zl3073x_ref_state_get(struct zl3073x_dev *zldev,
+ u8 index);
+
+int zl3073x_ref_state_set(struct zl3073x_dev *zldev, u8 index,
+ const struct zl3073x_ref *ref);
+
+int zl3073x_ref_freq_factorize(u32 freq, u16 *base, u16 *mult);
+
+/**
+ * zl3073x_ref_ffo_get - get current fractional frequency offset
+ * @ref: pointer to ref state
+ *
+ * Return: the latest measured fractional frequency offset
+ */
+static inline s64
+zl3073x_ref_ffo_get(const struct zl3073x_ref *ref)
+{
+ return ref->ffo;
+}
+
+/**
+ * zl3073x_ref_freq_get - get given input reference frequency
+ * @ref: pointer to ref state
+ *
+ * Return: frequency of the given input reference
+ */
+static inline u32
+zl3073x_ref_freq_get(const struct zl3073x_ref *ref)
+{
+ return mul_u64_u32_div(ref->freq_base * ref->freq_mult,
+ ref->freq_ratio_m, ref->freq_ratio_n);
+}
+
+/**
+ * zl3073x_ref_freq_set - set given input reference frequency
+ * @ref: pointer to ref state
+ * @freq: frequency to be set
+ *
+ * Return: 0 on success, <0 when frequency cannot be factorized
+ */
+static inline int
+zl3073x_ref_freq_set(struct zl3073x_ref *ref, u32 freq)
+{
+ u16 base, mult;
+ int rc;
+
+ rc = zl3073x_ref_freq_factorize(freq, &base, &mult);
+ if (rc)
+ return rc;
+
+ ref->freq_base = base;
+ ref->freq_mult = mult;
+
+ return 0;
+}
+
+/**
+ * zl3073x_ref_is_diff - check if the given input reference is differential
+ * @ref: pointer to ref state
+ *
+ * Return: true if reference is differential, false if reference is single-ended
+ */
+static inline bool
+zl3073x_ref_is_diff(const struct zl3073x_ref *ref)
+{
+ return !!FIELD_GET(ZL_REF_CONFIG_DIFF_EN, ref->config);
+}
+
+/**
+ * zl3073x_ref_is_enabled - check if the given input reference is enabled
+ * @ref: pointer to ref state
+ *
+ * Return: true if input refernce is enabled, false otherwise
+ */
+static inline bool
+zl3073x_ref_is_enabled(const struct zl3073x_ref *ref)
+{
+ return !!FIELD_GET(ZL_REF_CONFIG_ENABLE, ref->config);
+}
+
+/**
+ * zl3073x_ref_is_status_ok - check the given input reference status
+ * @ref: pointer to ref state
+ *
+ * Return: true if the status is ok, false otherwise
+ */
+static inline bool
+zl3073x_ref_is_status_ok(const struct zl3073x_ref *ref)
+{
+ return ref->mon_status == ZL_REF_MON_STATUS_OK;
+}
+
+#endif /* _ZL3073X_REF_H */
diff --git a/drivers/dpll/zl3073x/regs.h b/drivers/dpll/zl3073x/regs.h
new file mode 100644
index 000000000000..d837bee72b17
--- /dev/null
+++ b/drivers/dpll/zl3073x/regs.h
@@ -0,0 +1,317 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _ZL3073X_REGS_H
+#define _ZL3073X_REGS_H
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+
+/*
+ * Register address structure:
+ * ===========================
+ * 25 19 18 16 15 7 6 0
+ * +------------------------------------------+
+ * | max_offset | size | page | page_offset |
+ * +------------------------------------------+
+ *
+ * page_offset ... <0x00..0x7F>
+ * page .......... HW page number
+ * size .......... register byte size (1, 2, 4 or 6)
+ * max_offset .... maximal offset for indexed registers
+ * (for non-indexed regs max_offset == page_offset)
+ */
+
+#define ZL_REG_OFFSET_MASK GENMASK(6, 0)
+#define ZL_REG_PAGE_MASK GENMASK(15, 7)
+#define ZL_REG_SIZE_MASK GENMASK(18, 16)
+#define ZL_REG_MAX_OFFSET_MASK GENMASK(25, 19)
+#define ZL_REG_ADDR_MASK GENMASK(15, 0)
+
+#define ZL_REG_OFFSET(_reg) FIELD_GET(ZL_REG_OFFSET_MASK, _reg)
+#define ZL_REG_PAGE(_reg) FIELD_GET(ZL_REG_PAGE_MASK, _reg)
+#define ZL_REG_MAX_OFFSET(_reg) FIELD_GET(ZL_REG_MAX_OFFSET_MASK, _reg)
+#define ZL_REG_SIZE(_reg) FIELD_GET(ZL_REG_SIZE_MASK, _reg)
+#define ZL_REG_ADDR(_reg) FIELD_GET(ZL_REG_ADDR_MASK, _reg)
+
+/**
+ * ZL_REG_IDX - define indexed register
+ * @_idx: index of register to access
+ * @_page: register page
+ * @_offset: register offset in page
+ * @_size: register byte size (1, 2, 4 or 6)
+ * @_items: number of register indices
+ * @_stride: stride between items in bytes
+ *
+ * All parameters except @_idx should be constant.
+ */
+#define ZL_REG_IDX(_idx, _page, _offset, _size, _items, _stride) \
+ (FIELD_PREP(ZL_REG_OFFSET_MASK, \
+ (_offset) + (_idx) * (_stride)) | \
+ FIELD_PREP_CONST(ZL_REG_PAGE_MASK, _page) | \
+ FIELD_PREP_CONST(ZL_REG_SIZE_MASK, _size) | \
+ FIELD_PREP_CONST(ZL_REG_MAX_OFFSET_MASK, \
+ (_offset) + ((_items) - 1) * (_stride)))
+
+/**
+ * ZL_REG - define simple (non-indexed) register
+ * @_page: register page
+ * @_offset: register offset in page
+ * @_size: register byte size (1, 2, 4 or 6)
+ *
+ * All parameters should be constant.
+ */
+#define ZL_REG(_page, _offset, _size) \
+ ZL_REG_IDX(0, _page, _offset, _size, 1, 0)
+
+/**************************
+ * Register Page 0, General
+ **************************/
+
+#define ZL_REG_INFO ZL_REG(0, 0x00, 1)
+#define ZL_INFO_READY BIT(7)
+
+#define ZL_REG_ID ZL_REG(0, 0x01, 2)
+#define ZL_REG_REVISION ZL_REG(0, 0x03, 2)
+#define ZL_REG_FW_VER ZL_REG(0, 0x05, 2)
+#define ZL_REG_CUSTOM_CONFIG_VER ZL_REG(0, 0x07, 4)
+
+#define ZL_REG_RESET_STATUS ZL_REG(0, 0x18, 1)
+#define ZL_REG_RESET_STATUS_RESET BIT(0)
+
+/*************************
+ * Register Page 2, Status
+ *************************/
+
+#define ZL_REG_REF_MON_STATUS(_idx) \
+ ZL_REG_IDX(_idx, 2, 0x02, 1, ZL3073X_NUM_REFS, 1)
+#define ZL_REF_MON_STATUS_OK 0 /* all bits zeroed */
+
+#define ZL_REG_DPLL_MON_STATUS(_idx) \
+ ZL_REG_IDX(_idx, 2, 0x10, 1, ZL3073X_MAX_CHANNELS, 1)
+#define ZL_DPLL_MON_STATUS_STATE GENMASK(1, 0)
+#define ZL_DPLL_MON_STATUS_STATE_ACQUIRING 0
+#define ZL_DPLL_MON_STATUS_STATE_LOCK 1
+#define ZL_DPLL_MON_STATUS_STATE_HOLDOVER 2
+#define ZL_DPLL_MON_STATUS_HO_READY BIT(2)
+
+#define ZL_REG_DPLL_REFSEL_STATUS(_idx) \
+ ZL_REG_IDX(_idx, 2, 0x30, 1, ZL3073X_MAX_CHANNELS, 1)
+#define ZL_DPLL_REFSEL_STATUS_REFSEL GENMASK(3, 0)
+#define ZL_DPLL_REFSEL_STATUS_STATE GENMASK(6, 4)
+#define ZL_DPLL_REFSEL_STATUS_STATE_LOCK 4
+
+#define ZL_REG_REF_FREQ(_idx) \
+ ZL_REG_IDX(_idx, 2, 0x44, 4, ZL3073X_NUM_REFS, 4)
+
+/**********************
+ * Register Page 4, Ref
+ **********************/
+
+#define ZL_REG_REF_PHASE_ERR_READ_RQST ZL_REG(4, 0x0f, 1)
+#define ZL_REF_PHASE_ERR_READ_RQST_RD BIT(0)
+
+#define ZL_REG_REF_FREQ_MEAS_CTRL ZL_REG(4, 0x1c, 1)
+#define ZL_REF_FREQ_MEAS_CTRL GENMASK(1, 0)
+#define ZL_REF_FREQ_MEAS_CTRL_REF_FREQ 1
+#define ZL_REF_FREQ_MEAS_CTRL_REF_FREQ_OFF 2
+#define ZL_REF_FREQ_MEAS_CTRL_DPLL_FREQ_OFF 3
+
+#define ZL_REG_REF_FREQ_MEAS_MASK_3_0 ZL_REG(4, 0x1d, 1)
+#define ZL_REF_FREQ_MEAS_MASK_3_0(_ref) BIT(_ref)
+
+#define ZL_REG_REF_FREQ_MEAS_MASK_4 ZL_REG(4, 0x1e, 1)
+#define ZL_REF_FREQ_MEAS_MASK_4(_ref) BIT((_ref) - 8)
+
+#define ZL_REG_DPLL_MEAS_REF_FREQ_CTRL ZL_REG(4, 0x1f, 1)
+#define ZL_DPLL_MEAS_REF_FREQ_CTRL_EN BIT(0)
+#define ZL_DPLL_MEAS_REF_FREQ_CTRL_IDX GENMASK(6, 4)
+
+#define ZL_REG_REF_PHASE(_idx) \
+ ZL_REG_IDX(_idx, 4, 0x20, 6, ZL3073X_NUM_REFS, 6)
+
+/***********************
+ * Register Page 5, DPLL
+ ***********************/
+
+#define ZL_REG_DPLL_MODE_REFSEL(_idx) \
+ ZL_REG_IDX(_idx, 5, 0x04, 1, ZL3073X_MAX_CHANNELS, 4)
+#define ZL_DPLL_MODE_REFSEL_MODE GENMASK(2, 0)
+#define ZL_DPLL_MODE_REFSEL_MODE_FREERUN 0
+#define ZL_DPLL_MODE_REFSEL_MODE_HOLDOVER 1
+#define ZL_DPLL_MODE_REFSEL_MODE_REFLOCK 2
+#define ZL_DPLL_MODE_REFSEL_MODE_AUTO 3
+#define ZL_DPLL_MODE_REFSEL_MODE_NCO 4
+#define ZL_DPLL_MODE_REFSEL_REF GENMASK(7, 4)
+
+#define ZL_REG_DPLL_MEAS_CTRL ZL_REG(5, 0x50, 1)
+#define ZL_DPLL_MEAS_CTRL_EN BIT(0)
+#define ZL_DPLL_MEAS_CTRL_AVG_FACTOR GENMASK(7, 4)
+
+#define ZL_REG_DPLL_MEAS_IDX ZL_REG(5, 0x51, 1)
+#define ZL_DPLL_MEAS_IDX GENMASK(2, 0)
+
+#define ZL_REG_DPLL_PHASE_ERR_READ_MASK ZL_REG(5, 0x54, 1)
+
+#define ZL_REG_DPLL_PHASE_ERR_DATA(_idx) \
+ ZL_REG_IDX(_idx, 5, 0x55, 6, ZL3073X_MAX_CHANNELS, 6)
+
+/***********************************
+ * Register Page 9, Synth and Output
+ ***********************************/
+
+#define ZL_REG_SYNTH_CTRL(_idx) \
+ ZL_REG_IDX(_idx, 9, 0x00, 1, ZL3073X_NUM_SYNTHS, 1)
+#define ZL_SYNTH_CTRL_EN BIT(0)
+#define ZL_SYNTH_CTRL_DPLL_SEL GENMASK(6, 4)
+
+#define ZL_REG_SYNTH_PHASE_SHIFT_CTRL ZL_REG(9, 0x1e, 1)
+#define ZL_REG_SYNTH_PHASE_SHIFT_MASK ZL_REG(9, 0x1f, 1)
+#define ZL_REG_SYNTH_PHASE_SHIFT_INTVL ZL_REG(9, 0x20, 1)
+#define ZL_REG_SYNTH_PHASE_SHIFT_DATA ZL_REG(9, 0x21, 2)
+
+#define ZL_REG_OUTPUT_CTRL(_idx) \
+ ZL_REG_IDX(_idx, 9, 0x28, 1, ZL3073X_NUM_OUTS, 1)
+#define ZL_OUTPUT_CTRL_EN BIT(0)
+#define ZL_OUTPUT_CTRL_SYNTH_SEL GENMASK(6, 4)
+
+/*******************************
+ * Register Page 10, Ref Mailbox
+ *******************************/
+
+#define ZL_REG_REF_MB_MASK ZL_REG(10, 0x02, 2)
+
+#define ZL_REG_REF_MB_SEM ZL_REG(10, 0x04, 1)
+#define ZL_REF_MB_SEM_WR BIT(0)
+#define ZL_REF_MB_SEM_RD BIT(1)
+
+#define ZL_REG_REF_FREQ_BASE ZL_REG(10, 0x05, 2)
+#define ZL_REG_REF_FREQ_MULT ZL_REG(10, 0x07, 2)
+#define ZL_REG_REF_RATIO_M ZL_REG(10, 0x09, 2)
+#define ZL_REG_REF_RATIO_N ZL_REG(10, 0x0b, 2)
+
+#define ZL_REG_REF_CONFIG ZL_REG(10, 0x0d, 1)
+#define ZL_REF_CONFIG_ENABLE BIT(0)
+#define ZL_REF_CONFIG_DIFF_EN BIT(2)
+
+#define ZL_REG_REF_PHASE_OFFSET_COMP ZL_REG(10, 0x28, 6)
+
+#define ZL_REG_REF_SYNC_CTRL ZL_REG(10, 0x2e, 1)
+#define ZL_REF_SYNC_CTRL_MODE GENMASK(2, 0)
+#define ZL_REF_SYNC_CTRL_MODE_REFSYNC_PAIR_OFF 0
+#define ZL_REF_SYNC_CTRL_MODE_50_50_ESYNC_25_75 2
+
+#define ZL_REG_REF_ESYNC_DIV ZL_REG(10, 0x30, 4)
+#define ZL_REF_ESYNC_DIV_1HZ 0
+
+/********************************
+ * Register Page 12, DPLL Mailbox
+ ********************************/
+
+#define ZL_REG_DPLL_MB_MASK ZL_REG(12, 0x02, 2)
+
+#define ZL_REG_DPLL_MB_SEM ZL_REG(12, 0x04, 1)
+#define ZL_DPLL_MB_SEM_WR BIT(0)
+#define ZL_DPLL_MB_SEM_RD BIT(1)
+
+#define ZL_REG_DPLL_REF_PRIO(_idx) \
+ ZL_REG_IDX(_idx, 12, 0x52, 1, ZL3073X_NUM_REFS / 2, 1)
+#define ZL_DPLL_REF_PRIO_REF_P GENMASK(3, 0)
+#define ZL_DPLL_REF_PRIO_REF_N GENMASK(7, 4)
+#define ZL_DPLL_REF_PRIO_MAX 14
+#define ZL_DPLL_REF_PRIO_NONE 15
+
+/*********************************
+ * Register Page 13, Synth Mailbox
+ *********************************/
+
+#define ZL_REG_SYNTH_MB_MASK ZL_REG(13, 0x02, 2)
+
+#define ZL_REG_SYNTH_MB_SEM ZL_REG(13, 0x04, 1)
+#define ZL_SYNTH_MB_SEM_WR BIT(0)
+#define ZL_SYNTH_MB_SEM_RD BIT(1)
+
+#define ZL_REG_SYNTH_FREQ_BASE ZL_REG(13, 0x06, 2)
+#define ZL_REG_SYNTH_FREQ_MULT ZL_REG(13, 0x08, 4)
+#define ZL_REG_SYNTH_FREQ_M ZL_REG(13, 0x0c, 2)
+#define ZL_REG_SYNTH_FREQ_N ZL_REG(13, 0x0e, 2)
+
+/**********************************
+ * Register Page 14, Output Mailbox
+ **********************************/
+#define ZL_REG_OUTPUT_MB_MASK ZL_REG(14, 0x02, 2)
+
+#define ZL_REG_OUTPUT_MB_SEM ZL_REG(14, 0x04, 1)
+#define ZL_OUTPUT_MB_SEM_WR BIT(0)
+#define ZL_OUTPUT_MB_SEM_RD BIT(1)
+
+#define ZL_REG_OUTPUT_MODE ZL_REG(14, 0x05, 1)
+#define ZL_OUTPUT_MODE_CLOCK_TYPE GENMASK(2, 0)
+#define ZL_OUTPUT_MODE_CLOCK_TYPE_NORMAL 0
+#define ZL_OUTPUT_MODE_CLOCK_TYPE_ESYNC 1
+#define ZL_OUTPUT_MODE_SIGNAL_FORMAT GENMASK(7, 4)
+#define ZL_OUTPUT_MODE_SIGNAL_FORMAT_DISABLED 0
+#define ZL_OUTPUT_MODE_SIGNAL_FORMAT_LVDS 1
+#define ZL_OUTPUT_MODE_SIGNAL_FORMAT_DIFF 2
+#define ZL_OUTPUT_MODE_SIGNAL_FORMAT_LOWVCM 3
+#define ZL_OUTPUT_MODE_SIGNAL_FORMAT_2 4
+#define ZL_OUTPUT_MODE_SIGNAL_FORMAT_1P 5
+#define ZL_OUTPUT_MODE_SIGNAL_FORMAT_1N 6
+#define ZL_OUTPUT_MODE_SIGNAL_FORMAT_2_INV 7
+#define ZL_OUTPUT_MODE_SIGNAL_FORMAT_2_NDIV 12
+#define ZL_OUTPUT_MODE_SIGNAL_FORMAT_2_NDIV_INV 15
+
+#define ZL_REG_OUTPUT_DIV ZL_REG(14, 0x0c, 4)
+#define ZL_REG_OUTPUT_WIDTH ZL_REG(14, 0x10, 4)
+#define ZL_REG_OUTPUT_ESYNC_PERIOD ZL_REG(14, 0x14, 4)
+#define ZL_REG_OUTPUT_ESYNC_WIDTH ZL_REG(14, 0x18, 4)
+#define ZL_REG_OUTPUT_PHASE_COMP ZL_REG(14, 0x20, 4)
+
+/*
+ * Register Page 255 - HW registers access
+ */
+#define ZL_REG_HWREG_OP ZL_REG(0xff, 0x00, 1)
+#define ZL_HWREG_OP_WRITE 0x28
+#define ZL_HWREG_OP_READ 0x29
+#define ZL_HWREG_OP_PENDING BIT(1)
+
+#define ZL_REG_HWREG_ADDR ZL_REG(0xff, 0x04, 4)
+#define ZL_REG_HWREG_WRITE_DATA ZL_REG(0xff, 0x08, 4)
+#define ZL_REG_HWREG_READ_DATA ZL_REG(0xff, 0x0c, 4)
+
+/*
+ * Registers available in flash mode
+ */
+#define ZL_REG_FLASH_HASH ZL_REG(0, 0x78, 4)
+#define ZL_REG_FLASH_FAMILY ZL_REG(0, 0x7c, 1)
+#define ZL_REG_FLASH_RELEASE ZL_REG(0, 0x7d, 1)
+
+#define ZL_REG_HOST_CONTROL ZL_REG(1, 0x02, 1)
+#define ZL_HOST_CONTROL_ENABLE BIT(0)
+
+#define ZL_REG_IMAGE_START_ADDR ZL_REG(1, 0x04, 4)
+#define ZL_REG_IMAGE_SIZE ZL_REG(1, 0x08, 4)
+#define ZL_REG_FLASH_INDEX_READ ZL_REG(1, 0x0c, 4)
+#define ZL_REG_FLASH_INDEX_WRITE ZL_REG(1, 0x10, 4)
+#define ZL_REG_FILL_PATTERN ZL_REG(1, 0x14, 4)
+
+#define ZL_REG_WRITE_FLASH ZL_REG(1, 0x18, 1)
+#define ZL_WRITE_FLASH_OP GENMASK(2, 0)
+#define ZL_WRITE_FLASH_OP_DONE 0x0
+#define ZL_WRITE_FLASH_OP_SECTORS 0x2
+#define ZL_WRITE_FLASH_OP_PAGE 0x3
+#define ZL_WRITE_FLASH_OP_COPY_PAGE 0x4
+
+#define ZL_REG_FLASH_INFO ZL_REG(2, 0x00, 1)
+#define ZL_FLASH_INFO_SECTOR_SIZE GENMASK(3, 0)
+#define ZL_FLASH_INFO_SECTOR_4K 0
+#define ZL_FLASH_INFO_SECTOR_64K 1
+
+#define ZL_REG_ERROR_COUNT ZL_REG(2, 0x04, 4)
+#define ZL_REG_ERROR_CAUSE ZL_REG(2, 0x08, 4)
+
+#define ZL_REG_OP_STATE ZL_REG(2, 0x14, 1)
+#define ZL_OP_STATE_NO_COMMAND 0
+#define ZL_OP_STATE_PENDING 1
+#define ZL_OP_STATE_DONE 2
+
+#endif /* _ZL3073X_REGS_H */
diff --git a/drivers/dpll/zl3073x/spi.c b/drivers/dpll/zl3073x/spi.c
new file mode 100644
index 000000000000..af901b4d6dda
--- /dev/null
+++ b/drivers/dpll/zl3073x/spi.c
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/dev_printk.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+
+#include "core.h"
+
+static int zl3073x_spi_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct zl3073x_dev *zldev;
+
+ zldev = zl3073x_devm_alloc(dev);
+ if (IS_ERR(zldev))
+ return PTR_ERR(zldev);
+
+ zldev->regmap = devm_regmap_init_spi(spi, &zl3073x_regmap_config);
+ if (IS_ERR(zldev->regmap))
+ return dev_err_probe(dev, PTR_ERR(zldev->regmap),
+ "Failed to initialize regmap\n");
+
+ return zl3073x_dev_probe(zldev, spi_get_device_match_data(spi));
+}
+
+static const struct spi_device_id zl3073x_spi_id[] = {
+ {
+ .name = "zl30731",
+ .driver_data = (kernel_ulong_t)&zl30731_chip_info
+ },
+ {
+ .name = "zl30732",
+ .driver_data = (kernel_ulong_t)&zl30732_chip_info,
+ },
+ {
+ .name = "zl30733",
+ .driver_data = (kernel_ulong_t)&zl30733_chip_info,
+ },
+ {
+ .name = "zl30734",
+ .driver_data = (kernel_ulong_t)&zl30734_chip_info,
+ },
+ {
+ .name = "zl30735",
+ .driver_data = (kernel_ulong_t)&zl30735_chip_info,
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(spi, zl3073x_spi_id);
+
+static const struct of_device_id zl3073x_spi_of_match[] = {
+ { .compatible = "microchip,zl30731", .data = &zl30731_chip_info },
+ { .compatible = "microchip,zl30732", .data = &zl30732_chip_info },
+ { .compatible = "microchip,zl30733", .data = &zl30733_chip_info },
+ { .compatible = "microchip,zl30734", .data = &zl30734_chip_info },
+ { .compatible = "microchip,zl30735", .data = &zl30735_chip_info },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, zl3073x_spi_of_match);
+
+static struct spi_driver zl3073x_spi_driver = {
+ .driver = {
+ .name = "zl3073x-spi",
+ .of_match_table = zl3073x_spi_of_match,
+ },
+ .probe = zl3073x_spi_probe,
+ .id_table = zl3073x_spi_id,
+};
+module_spi_driver(zl3073x_spi_driver);
+
+MODULE_AUTHOR("Ivan Vecera <ivecera@redhat.com>");
+MODULE_DESCRIPTION("Microchip ZL3073x SPI driver");
+MODULE_IMPORT_NS("ZL3073X");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dpll/zl3073x/synth.c b/drivers/dpll/zl3073x/synth.c
new file mode 100644
index 000000000000..da839572dab2
--- /dev/null
+++ b/drivers/dpll/zl3073x/synth.c
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/bitfield.h>
+#include <linux/cleanup.h>
+#include <linux/dev_printk.h>
+#include <linux/string.h>
+#include <linux/string_choices.h>
+#include <linux/types.h>
+
+#include "core.h"
+#include "synth.h"
+
+/**
+ * zl3073x_synth_state_fetch - fetch synth state from hardware
+ * @zldev: pointer to zl3073x_dev structure
+ * @index: synth index to fetch state for
+ *
+ * Function fetches state of the given synthesizer from the hardware and
+ * stores it for later use.
+ *
+ * Return: 0 on success, <0 on error
+ */
+int zl3073x_synth_state_fetch(struct zl3073x_dev *zldev, u8 index)
+{
+ struct zl3073x_synth *synth = &zldev->synth[index];
+ int rc;
+
+ /* Read synth control register */
+ rc = zl3073x_read_u8(zldev, ZL_REG_SYNTH_CTRL(index), &synth->ctrl);
+ if (rc)
+ return rc;
+
+ guard(mutex)(&zldev->multiop_lock);
+
+ /* Read synth configuration */
+ rc = zl3073x_mb_op(zldev, ZL_REG_SYNTH_MB_SEM, ZL_SYNTH_MB_SEM_RD,
+ ZL_REG_SYNTH_MB_MASK, BIT(index));
+ if (rc)
+ return rc;
+
+ /* The output frequency is determined by the following formula:
+ * base * multiplier * numerator / denominator
+ *
+ * Read registers with these values
+ */
+ rc = zl3073x_read_u16(zldev, ZL_REG_SYNTH_FREQ_BASE, &synth->freq_base);
+ if (rc)
+ return rc;
+
+ rc = zl3073x_read_u32(zldev, ZL_REG_SYNTH_FREQ_MULT, &synth->freq_mult);
+ if (rc)
+ return rc;
+
+ rc = zl3073x_read_u16(zldev, ZL_REG_SYNTH_FREQ_M, &synth->freq_m);
+ if (rc)
+ return rc;
+
+ rc = zl3073x_read_u16(zldev, ZL_REG_SYNTH_FREQ_N, &synth->freq_n);
+ if (rc)
+ return rc;
+
+ /* Check denominator for zero to avoid div by 0 */
+ if (!synth->freq_n) {
+ dev_err(zldev->dev,
+ "Zero divisor for SYNTH%u retrieved from device\n",
+ index);
+ return -EINVAL;
+ }
+
+ dev_dbg(zldev->dev, "SYNTH%u frequency: %u Hz\n", index,
+ zl3073x_synth_freq_get(synth));
+
+ return rc;
+}
+
+/**
+ * zl3073x_synth_state_get - get current synth state
+ * @zldev: pointer to zl3073x_dev structure
+ * @index: synth index to get state for
+ *
+ * Return: pointer to given synth state
+ */
+const struct zl3073x_synth *zl3073x_synth_state_get(struct zl3073x_dev *zldev,
+ u8 index)
+{
+ return &zldev->synth[index];
+}
diff --git a/drivers/dpll/zl3073x/synth.h b/drivers/dpll/zl3073x/synth.h
new file mode 100644
index 000000000000..6c55eb8a888c
--- /dev/null
+++ b/drivers/dpll/zl3073x/synth.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _ZL3073X_SYNTH_H
+#define _ZL3073X_SYNTH_H
+
+#include <linux/bitfield.h>
+#include <linux/math64.h>
+#include <linux/types.h>
+
+#include "regs.h"
+
+struct zl3073x_dev;
+
+/**
+ * struct zl3073x_synth - synthesizer state
+ * @freq_mult: frequency multiplier
+ * @freq_base: frequency base
+ * @freq_m: frequency numerator
+ * @freq_n: frequency denominator
+ * @ctrl: synth control
+ */
+struct zl3073x_synth {
+ u32 freq_mult;
+ u16 freq_base;
+ u16 freq_m;
+ u16 freq_n;
+ u8 ctrl;
+};
+
+int zl3073x_synth_state_fetch(struct zl3073x_dev *zldev, u8 synth_id);
+
+const struct zl3073x_synth *zl3073x_synth_state_get(struct zl3073x_dev *zldev,
+ u8 synth_id);
+
+int zl3073x_synth_state_set(struct zl3073x_dev *zldev, u8 synth_id,
+ const struct zl3073x_synth *synth);
+
+/**
+ * zl3073x_synth_dpll_get - get DPLL ID the synth is driven by
+ * @synth: pointer to synth state
+ *
+ * Return: ID of DPLL the given synthetizer is driven by
+ */
+static inline u8 zl3073x_synth_dpll_get(const struct zl3073x_synth *synth)
+{
+ return FIELD_GET(ZL_SYNTH_CTRL_DPLL_SEL, synth->ctrl);
+}
+
+/**
+ * zl3073x_synth_freq_get - get synth current freq
+ * @synth: pointer to synth state
+ *
+ * Return: frequency of given synthetizer
+ */
+static inline u32 zl3073x_synth_freq_get(const struct zl3073x_synth *synth)
+{
+ return mul_u64_u32_div(synth->freq_base * synth->freq_m,
+ synth->freq_mult, synth->freq_n);
+}
+
+/**
+ * zl3073x_synth_is_enabled - check if the given synth is enabled
+ * @synth: pointer to synth state
+ *
+ * Return: true if synth is enabled, false otherwise
+ */
+static inline bool zl3073x_synth_is_enabled(const struct zl3073x_synth *synth)
+{
+ return FIELD_GET(ZL_SYNTH_CTRL_EN, synth->ctrl);
+}
+
+#endif /* _ZL3073X_SYNTH_H */
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 19ad3c3b675d..81e40543ffd8 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -23,14 +23,6 @@ menuconfig EDAC
if EDAC
-config EDAC_LEGACY_SYSFS
- bool "EDAC legacy sysfs"
- default y
- help
- Enable the compatibility sysfs nodes.
- Use 'Y' if your edac utilities aren't ported to work with the newer
- structures.
-
config EDAC_DEBUG
bool "Debugging"
select DEBUG_FS
@@ -291,6 +283,18 @@ config EDAC_I10NM
system has non-volatile DIMMs you should also manually
select CONFIG_ACPI_NFIT.
+config EDAC_IMH
+ tristate "Intel Integrated Memory/IO Hub MC"
+ depends on X86_64 && X86_MCE_INTEL && ACPI
+ depends on ACPI_NFIT || !ACPI_NFIT # if ACPI_NFIT=m, EDAC_IMH can't be y
+ select DMI
+ select ACPI_ADXL
+ help
+ Support for error detection and correction the Intel
+ Integrated Memory/IO Hub Memory Controller. This MC IP is
+ first used on the Diamond Rapids servers but may appear on
+ others in the future.
+
config EDAC_PND2
tristate "Intel Pondicherry2"
depends on PCI && X86_64 && X86_MCE_INTEL
@@ -576,4 +580,20 @@ config EDAC_LOONGSON
errors (CE) only. Loongson-3A5000/3C5000/3D5000/3A6000/3C6000
are compatible.
+config EDAC_CORTEX_A72
+ tristate "ARM Cortex A72"
+ depends on ARM64
+ help
+ Support for L1/L2 cache error detection for ARM Cortex A72 processor.
+ The detected and reported errors are from reading CPU/L2 memory error
+ syndrome registers.
+
+config EDAC_VERSALNET
+ tristate "AMD VersalNET DDR Controller"
+ depends on CDX_CONTROLLER && ARCH_ZYNQMP
+ help
+ Support for single bit error correction, double bit error detection
+ and other system errors from various IP subsystems like RPU, NOCs,
+ HNICX, PL on the AMD Versal NET DDR memory controller.
+
endif # EDAC
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index a8f2d8f6c894..8429b1e856bc 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -65,6 +65,9 @@ obj-$(CONFIG_EDAC_SKX) += skx_edac.o skx_edac_common.o
i10nm_edac-y := i10nm_base.o
obj-$(CONFIG_EDAC_I10NM) += i10nm_edac.o skx_edac_common.o
+imh_edac-y := imh_base.o
+obj-$(CONFIG_EDAC_IMH) += imh_edac.o skx_edac_common.o
+
obj-$(CONFIG_EDAC_HIGHBANK_MC) += highbank_mc_edac.o
obj-$(CONFIG_EDAC_HIGHBANK_L2) += highbank_l2_edac.o
@@ -88,3 +91,5 @@ obj-$(CONFIG_EDAC_NPCM) += npcm_edac.o
obj-$(CONFIG_EDAC_ZYNQMP) += zynqmp_edac.o
obj-$(CONFIG_EDAC_VERSAL) += versal_edac.o
obj-$(CONFIG_EDAC_LOONGSON) += loongson_edac.o
+obj-$(CONFIG_EDAC_VERSALNET) += versalnet_edac.o
+obj-$(CONFIG_EDAC_CORTEX_A72) += a72_edac.o
diff --git a/drivers/edac/a72_edac.c b/drivers/edac/a72_edac.c
new file mode 100644
index 000000000000..9262d75c3855
--- /dev/null
+++ b/drivers/edac/a72_edac.c
@@ -0,0 +1,225 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cortex A72 EDAC L1 and L2 cache error detection
+ *
+ * Copyright (c) 2020 Pengutronix, Sascha Hauer <s.hauer@pengutronix.de>
+ * Copyright (c) 2025 Microsoft Corporation, <vijayb@linux.microsoft.com>
+ *
+ * Based on Code from:
+ * Copyright (c) 2018, NXP Semiconductor
+ * Author: York Sun <york.sun@nxp.com>
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/bitfield.h>
+#include <asm/smp_plat.h>
+
+#include "edac_module.h"
+
+#define DRVNAME "a72-edac"
+
+#define SYS_CPUMERRSR_EL1 sys_reg(3, 1, 15, 2, 2)
+#define SYS_L2MERRSR_EL1 sys_reg(3, 1, 15, 2, 3)
+
+#define CPUMERRSR_EL1_RAMID GENMASK(30, 24)
+#define L2MERRSR_EL1_CPUID_WAY GENMASK(21, 18)
+
+#define CPUMERRSR_EL1_VALID BIT(31)
+#define CPUMERRSR_EL1_FATAL BIT(63)
+#define L2MERRSR_EL1_VALID BIT(31)
+#define L2MERRSR_EL1_FATAL BIT(63)
+
+#define L1_I_TAG_RAM 0x00
+#define L1_I_DATA_RAM 0x01
+#define L1_D_TAG_RAM 0x08
+#define L1_D_DATA_RAM 0x09
+#define TLB_RAM 0x18
+
+#define MESSAGE_SIZE 64
+
+struct mem_err_synd_reg {
+ u64 cpu_mesr;
+ u64 l2_mesr;
+};
+
+static struct cpumask compat_mask;
+
+static void report_errors(struct edac_device_ctl_info *edac_ctl, int cpu,
+ struct mem_err_synd_reg *mesr)
+{
+ u64 cpu_mesr = mesr->cpu_mesr;
+ u64 l2_mesr = mesr->l2_mesr;
+ char msg[MESSAGE_SIZE];
+
+ if (cpu_mesr & CPUMERRSR_EL1_VALID) {
+ const char *str;
+ bool fatal = cpu_mesr & CPUMERRSR_EL1_FATAL;
+
+ switch (FIELD_GET(CPUMERRSR_EL1_RAMID, cpu_mesr)) {
+ case L1_I_TAG_RAM:
+ str = "L1-I Tag RAM";
+ break;
+ case L1_I_DATA_RAM:
+ str = "L1-I Data RAM";
+ break;
+ case L1_D_TAG_RAM:
+ str = "L1-D Tag RAM";
+ break;
+ case L1_D_DATA_RAM:
+ str = "L1-D Data RAM";
+ break;
+ case TLB_RAM:
+ str = "TLB RAM";
+ break;
+ default:
+ str = "Unspecified";
+ break;
+ }
+
+ snprintf(msg, MESSAGE_SIZE, "%s %s error(s) on CPU %d",
+ str, fatal ? "fatal" : "correctable", cpu);
+
+ if (fatal)
+ edac_device_handle_ue(edac_ctl, cpu, 0, msg);
+ else
+ edac_device_handle_ce(edac_ctl, cpu, 0, msg);
+ }
+
+ if (l2_mesr & L2MERRSR_EL1_VALID) {
+ bool fatal = l2_mesr & L2MERRSR_EL1_FATAL;
+
+ snprintf(msg, MESSAGE_SIZE, "L2 %s error(s) on CPU %d CPUID/WAY 0x%lx",
+ fatal ? "fatal" : "correctable", cpu,
+ FIELD_GET(L2MERRSR_EL1_CPUID_WAY, l2_mesr));
+ if (fatal)
+ edac_device_handle_ue(edac_ctl, cpu, 1, msg);
+ else
+ edac_device_handle_ce(edac_ctl, cpu, 1, msg);
+ }
+}
+
+static void read_errors(void *data)
+{
+ struct mem_err_synd_reg *mesr = data;
+
+ mesr->cpu_mesr = read_sysreg_s(SYS_CPUMERRSR_EL1);
+ if (mesr->cpu_mesr & CPUMERRSR_EL1_VALID) {
+ write_sysreg_s(0, SYS_CPUMERRSR_EL1);
+ isb();
+ }
+ mesr->l2_mesr = read_sysreg_s(SYS_L2MERRSR_EL1);
+ if (mesr->l2_mesr & L2MERRSR_EL1_VALID) {
+ write_sysreg_s(0, SYS_L2MERRSR_EL1);
+ isb();
+ }
+}
+
+static void a72_edac_check(struct edac_device_ctl_info *edac_ctl)
+{
+ struct mem_err_synd_reg mesr;
+ int cpu;
+
+ cpus_read_lock();
+ for_each_cpu_and(cpu, cpu_online_mask, &compat_mask) {
+ smp_call_function_single(cpu, read_errors, &mesr, true);
+ report_errors(edac_ctl, cpu, &mesr);
+ }
+ cpus_read_unlock();
+}
+
+static int a72_edac_probe(struct platform_device *pdev)
+{
+ struct edac_device_ctl_info *edac_ctl;
+ struct device *dev = &pdev->dev;
+ int rc;
+
+ edac_ctl = edac_device_alloc_ctl_info(0, "cpu",
+ num_possible_cpus(), "L", 2, 1,
+ edac_device_alloc_index());
+ if (!edac_ctl)
+ return -ENOMEM;
+
+ edac_ctl->edac_check = a72_edac_check;
+ edac_ctl->dev = dev;
+ edac_ctl->mod_name = dev_name(dev);
+ edac_ctl->dev_name = dev_name(dev);
+ edac_ctl->ctl_name = DRVNAME;
+ dev_set_drvdata(dev, edac_ctl);
+
+ rc = edac_device_add_device(edac_ctl);
+ if (rc)
+ goto out_dev;
+
+ return 0;
+
+out_dev:
+ edac_device_free_ctl_info(edac_ctl);
+
+ return rc;
+}
+
+static void a72_edac_remove(struct platform_device *pdev)
+{
+ struct edac_device_ctl_info *edac_ctl = dev_get_drvdata(&pdev->dev);
+
+ edac_device_del_device(edac_ctl->dev);
+ edac_device_free_ctl_info(edac_ctl);
+}
+
+static const struct of_device_id cortex_arm64_edac_of_match[] = {
+ { .compatible = "arm,cortex-a72" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, cortex_arm64_edac_of_match);
+
+static struct platform_driver a72_edac_driver = {
+ .probe = a72_edac_probe,
+ .remove = a72_edac_remove,
+ .driver = {
+ .name = DRVNAME,
+ },
+};
+
+static struct platform_device *a72_pdev;
+
+static int __init a72_edac_driver_init(void)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ struct device_node *np __free(device_node) = of_cpu_device_node_get(cpu);
+ if (np) {
+ if (of_match_node(cortex_arm64_edac_of_match, np) &&
+ of_property_read_bool(np, "edac-enabled")) {
+ cpumask_set_cpu(cpu, &compat_mask);
+ }
+ } else {
+ pr_warn("failed to find device node for CPU %d\n", cpu);
+ }
+ }
+
+ if (cpumask_empty(&compat_mask))
+ return 0;
+
+ a72_pdev = platform_device_register_simple(DRVNAME, -1, NULL, 0);
+ if (IS_ERR(a72_pdev)) {
+ pr_err("failed to register A72 EDAC device\n");
+ return PTR_ERR(a72_pdev);
+ }
+
+ return platform_driver_register(&a72_edac_driver);
+}
+
+static void __exit a72_edac_driver_exit(void)
+{
+ platform_device_unregister(a72_pdev);
+ platform_driver_unregister(&a72_edac_driver);
+}
+
+module_init(a72_edac_driver_init);
+module_exit(a72_edac_driver_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
+MODULE_DESCRIPTION("Cortex A72 L1 and L2 cache EDAC driver");
diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
index cae52c654a15..0c5b94e64ea1 100644
--- a/drivers/edac/altera_edac.c
+++ b/drivers/edac/altera_edac.c
@@ -128,7 +128,6 @@ static ssize_t altr_sdr_mc_err_inject_write(struct file *file,
ptemp = dma_alloc_coherent(mci->pdev, 16, &dma_handle, GFP_KERNEL);
if (!ptemp) {
- dma_free_coherent(mci->pdev, 16, ptemp, dma_handle);
edac_printk(KERN_ERR, EDAC_MC,
"Inject: Buffer Allocation error\n");
return -ENOMEM;
@@ -1185,10 +1184,22 @@ altr_check_ocram_deps_init(struct altr_edac_device_dev *device)
if (ret)
return ret;
- /* Verify OCRAM has been initialized */
+ /*
+ * Verify that OCRAM has been initialized.
+ * During a warm reset, OCRAM contents are retained, but the control
+ * and status registers are reset to their default values. Therefore,
+ * ECC must be explicitly re-enabled in the control register.
+ * Error condition: if INITCOMPLETEA is clear and ECC_EN is already set.
+ */
if (!ecc_test_bits(ALTR_A10_ECC_INITCOMPLETEA,
- (base + ALTR_A10_ECC_INITSTAT_OFST)))
- return -ENODEV;
+ (base + ALTR_A10_ECC_INITSTAT_OFST))) {
+ if (!ecc_test_bits(ALTR_A10_ECC_EN,
+ (base + ALTR_A10_ECC_CTRL_OFST)))
+ ecc_set_bits(ALTR_A10_ECC_EN,
+ (base + ALTR_A10_ECC_CTRL_OFST));
+ else
+ return -ENODEV;
+ }
/* Enable IRQ on Single Bit Error */
writel(ALTR_A10_ECC_SERRINTEN, (base + ALTR_A10_ECC_ERRINTENS_OFST));
@@ -1358,7 +1369,7 @@ static const struct edac_device_prv_data a10_enetecc_data = {
.ue_set_mask = ALTR_A10_ECC_TDERRA,
.set_err_ofst = ALTR_A10_ECC_INTTEST_OFST,
.ecc_irq_handler = altr_edac_a10_ecc_irq,
- .inject_fops = &altr_edac_a10_device_inject2_fops,
+ .inject_fops = &altr_edac_a10_device_inject_fops,
};
#endif /* CONFIG_EDAC_ALTERA_ETHERNET */
@@ -1448,7 +1459,7 @@ static const struct edac_device_prv_data a10_usbecc_data = {
.ue_set_mask = ALTR_A10_ECC_TDERRA,
.set_err_ofst = ALTR_A10_ECC_INTTEST_OFST,
.ecc_irq_handler = altr_edac_a10_ecc_irq,
- .inject_fops = &altr_edac_a10_device_inject2_fops,
+ .inject_fops = &altr_edac_a10_device_inject_fops,
};
#endif /* CONFIG_EDAC_ALTERA_USB */
@@ -2131,8 +2142,8 @@ static int altr_edac_a10_probe(struct platform_device *pdev)
edac->irq_chip.name = pdev->dev.of_node->name;
edac->irq_chip.irq_mask = a10_eccmgr_irq_mask;
edac->irq_chip.irq_unmask = a10_eccmgr_irq_unmask;
- edac->domain = irq_domain_create_linear(of_fwnode_handle(pdev->dev.of_node),
- 64, &a10_eccmgr_ic_ops, edac);
+ edac->domain = irq_domain_create_linear(dev_fwnode(&pdev->dev), 64, &a10_eccmgr_ic_ops,
+ edac);
if (!edac->domain) {
dev_err(&pdev->dev, "Error adding IRQ domain\n");
return -ENOMEM;
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 58b1482a0fbb..2391f3469961 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -1209,7 +1209,9 @@ static int umc_get_cs_mode(int dimm, u8 ctrl, struct amd64_pvt *pvt)
if (csrow_enabled(2 * dimm + 1, ctrl, pvt))
cs_mode |= CS_ODD_PRIMARY;
- /* Asymmetric dual-rank DIMM support. */
+ if (csrow_sec_enabled(2 * dimm, ctrl, pvt))
+ cs_mode |= CS_EVEN_SECONDARY;
+
if (csrow_sec_enabled(2 * dimm + 1, ctrl, pvt))
cs_mode |= CS_ODD_SECONDARY;
@@ -1230,12 +1232,13 @@ static int umc_get_cs_mode(int dimm, u8 ctrl, struct amd64_pvt *pvt)
return cs_mode;
}
-static int __addr_mask_to_cs_size(u32 addr_mask_orig, unsigned int cs_mode,
- int csrow_nr, int dimm)
+static int calculate_cs_size(u32 mask, unsigned int cs_mode)
{
- u32 msb, weight, num_zero_bits;
- u32 addr_mask_deinterleaved;
- int size = 0;
+ int msb, weight, num_zero_bits;
+ u32 deinterleaved_mask;
+
+ if (!mask)
+ return 0;
/*
* The number of zero bits in the mask is equal to the number of bits
@@ -1248,19 +1251,30 @@ static int __addr_mask_to_cs_size(u32 addr_mask_orig, unsigned int cs_mode,
* without swapping with the most significant bit. This can be handled
* by keeping the MSB where it is and ignoring the single zero bit.
*/
- msb = fls(addr_mask_orig) - 1;
- weight = hweight_long(addr_mask_orig);
+ msb = fls(mask) - 1;
+ weight = hweight_long(mask);
num_zero_bits = msb - weight - !!(cs_mode & CS_3R_INTERLEAVE);
/* Take the number of zero bits off from the top of the mask. */
- addr_mask_deinterleaved = GENMASK_ULL(msb - num_zero_bits, 1);
+ deinterleaved_mask = GENMASK(msb - num_zero_bits, 1);
+ edac_dbg(1, " Deinterleaved AddrMask: 0x%x\n", deinterleaved_mask);
+
+ return (deinterleaved_mask >> 2) + 1;
+}
+
+static int __addr_mask_to_cs_size(u32 addr_mask, u32 addr_mask_sec,
+ unsigned int cs_mode, int csrow_nr, int dimm)
+{
+ int size;
edac_dbg(1, "CS%d DIMM%d AddrMasks:\n", csrow_nr, dimm);
- edac_dbg(1, " Original AddrMask: 0x%x\n", addr_mask_orig);
- edac_dbg(1, " Deinterleaved AddrMask: 0x%x\n", addr_mask_deinterleaved);
+ edac_dbg(1, " Primary AddrMask: 0x%x\n", addr_mask);
/* Register [31:1] = Address [39:9]. Size is in kBs here. */
- size = (addr_mask_deinterleaved >> 2) + 1;
+ size = calculate_cs_size(addr_mask, cs_mode);
+
+ edac_dbg(1, " Secondary AddrMask: 0x%x\n", addr_mask_sec);
+ size += calculate_cs_size(addr_mask_sec, cs_mode);
/* Return size in MBs. */
return size >> 10;
@@ -1269,8 +1283,8 @@ static int __addr_mask_to_cs_size(u32 addr_mask_orig, unsigned int cs_mode,
static int umc_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc,
unsigned int cs_mode, int csrow_nr)
{
+ u32 addr_mask = 0, addr_mask_sec = 0;
int cs_mask_nr = csrow_nr;
- u32 addr_mask_orig;
int dimm, size = 0;
/* No Chip Selects are enabled. */
@@ -1308,13 +1322,13 @@ static int umc_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc,
if (!pvt->flags.zn_regs_v2)
cs_mask_nr >>= 1;
- /* Asymmetric dual-rank DIMM support. */
- if ((csrow_nr & 1) && (cs_mode & CS_ODD_SECONDARY))
- addr_mask_orig = pvt->csels[umc].csmasks_sec[cs_mask_nr];
- else
- addr_mask_orig = pvt->csels[umc].csmasks[cs_mask_nr];
+ if (cs_mode & (CS_EVEN_PRIMARY | CS_ODD_PRIMARY))
+ addr_mask = pvt->csels[umc].csmasks[cs_mask_nr];
- return __addr_mask_to_cs_size(addr_mask_orig, cs_mode, csrow_nr, dimm);
+ if (cs_mode & (CS_EVEN_SECONDARY | CS_ODD_SECONDARY))
+ addr_mask_sec = pvt->csels[umc].csmasks_sec[cs_mask_nr];
+
+ return __addr_mask_to_cs_size(addr_mask, addr_mask_sec, cs_mode, csrow_nr, dimm);
}
static void umc_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
@@ -3512,9 +3526,10 @@ static void gpu_get_err_info(struct mce *m, struct err_info *err)
static int gpu_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc,
unsigned int cs_mode, int csrow_nr)
{
- u32 addr_mask_orig = pvt->csels[umc].csmasks[csrow_nr];
+ u32 addr_mask = pvt->csels[umc].csmasks[csrow_nr];
+ u32 addr_mask_sec = pvt->csels[umc].csmasks_sec[csrow_nr];
- return __addr_mask_to_cs_size(addr_mask_orig, cs_mode, csrow_nr, csrow_nr >> 1);
+ return __addr_mask_to_cs_size(addr_mask, addr_mask_sec, cs_mode, csrow_nr, csrow_nr >> 1);
}
static void gpu_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
@@ -3717,6 +3732,7 @@ static void hw_info_put(struct amd64_pvt *pvt)
pci_dev_put(pvt->F1);
pci_dev_put(pvt->F2);
kfree(pvt->umc);
+ kfree(pvt->csels);
}
static struct low_ops umc_ops = {
@@ -3751,6 +3767,7 @@ static int per_family_init(struct amd64_pvt *pvt)
pvt->stepping = boot_cpu_data.x86_stepping;
pvt->model = boot_cpu_data.x86_model;
pvt->fam = boot_cpu_data.x86;
+ char *tmp_name = NULL;
pvt->max_mcs = 2;
/*
@@ -3764,7 +3781,7 @@ static int per_family_init(struct amd64_pvt *pvt)
switch (pvt->fam) {
case 0xf:
- pvt->ctl_name = (pvt->ext_model >= K8_REV_F) ?
+ tmp_name = (pvt->ext_model >= K8_REV_F) ?
"K8 revF or later" : "K8 revE or earlier";
pvt->f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP;
pvt->f2_id = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL;
@@ -3773,7 +3790,6 @@ static int per_family_init(struct amd64_pvt *pvt)
break;
case 0x10:
- pvt->ctl_name = "F10h";
pvt->f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP;
pvt->f2_id = PCI_DEVICE_ID_AMD_10H_NB_DRAM;
pvt->ops->dbam_to_cs = f10_dbam_to_chip_select;
@@ -3782,12 +3798,10 @@ static int per_family_init(struct amd64_pvt *pvt)
case 0x15:
switch (pvt->model) {
case 0x30:
- pvt->ctl_name = "F15h_M30h";
pvt->f1_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1;
pvt->f2_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F2;
break;
case 0x60:
- pvt->ctl_name = "F15h_M60h";
pvt->f1_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1;
pvt->f2_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F2;
pvt->ops->dbam_to_cs = f15_m60h_dbam_to_chip_select;
@@ -3796,7 +3810,6 @@ static int per_family_init(struct amd64_pvt *pvt)
/* Richland is only client */
return -ENODEV;
default:
- pvt->ctl_name = "F15h";
pvt->f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1;
pvt->f2_id = PCI_DEVICE_ID_AMD_15H_NB_F2;
pvt->ops->dbam_to_cs = f15_dbam_to_chip_select;
@@ -3807,12 +3820,10 @@ static int per_family_init(struct amd64_pvt *pvt)
case 0x16:
switch (pvt->model) {
case 0x30:
- pvt->ctl_name = "F16h_M30h";
pvt->f1_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F1;
pvt->f2_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F2;
break;
default:
- pvt->ctl_name = "F16h";
pvt->f1_id = PCI_DEVICE_ID_AMD_16H_NB_F1;
pvt->f2_id = PCI_DEVICE_ID_AMD_16H_NB_F2;
break;
@@ -3821,75 +3832,51 @@ static int per_family_init(struct amd64_pvt *pvt)
case 0x17:
switch (pvt->model) {
- case 0x10 ... 0x2f:
- pvt->ctl_name = "F17h_M10h";
- break;
case 0x30 ... 0x3f:
- pvt->ctl_name = "F17h_M30h";
pvt->max_mcs = 8;
break;
- case 0x60 ... 0x6f:
- pvt->ctl_name = "F17h_M60h";
- break;
- case 0x70 ... 0x7f:
- pvt->ctl_name = "F17h_M70h";
- break;
default:
- pvt->ctl_name = "F17h";
break;
}
break;
case 0x18:
- pvt->ctl_name = "F18h";
break;
case 0x19:
switch (pvt->model) {
case 0x00 ... 0x0f:
- pvt->ctl_name = "F19h";
pvt->max_mcs = 8;
break;
case 0x10 ... 0x1f:
- pvt->ctl_name = "F19h_M10h";
pvt->max_mcs = 12;
pvt->flags.zn_regs_v2 = 1;
break;
- case 0x20 ... 0x2f:
- pvt->ctl_name = "F19h_M20h";
- break;
case 0x30 ... 0x3f:
if (pvt->F3->device == PCI_DEVICE_ID_AMD_MI200_DF_F3) {
- pvt->ctl_name = "MI200";
+ tmp_name = "MI200";
pvt->max_mcs = 4;
pvt->dram_type = MEM_HBM2;
pvt->gpu_umc_base = 0x50000;
pvt->ops = &gpu_ops;
} else {
- pvt->ctl_name = "F19h_M30h";
pvt->max_mcs = 8;
}
break;
- case 0x50 ... 0x5f:
- pvt->ctl_name = "F19h_M50h";
- break;
case 0x60 ... 0x6f:
- pvt->ctl_name = "F19h_M60h";
pvt->flags.zn_regs_v2 = 1;
break;
case 0x70 ... 0x7f:
- pvt->ctl_name = "F19h_M70h";
+ pvt->max_mcs = 4;
pvt->flags.zn_regs_v2 = 1;
break;
case 0x90 ... 0x9f:
- pvt->ctl_name = "F19h_M90h";
pvt->max_mcs = 4;
pvt->dram_type = MEM_HBM3;
pvt->gpu_umc_base = 0x90000;
pvt->ops = &gpu_ops;
break;
case 0xa0 ... 0xaf:
- pvt->ctl_name = "F19h_MA0h";
pvt->max_mcs = 12;
pvt->flags.zn_regs_v2 = 1;
break;
@@ -3899,12 +3886,20 @@ static int per_family_init(struct amd64_pvt *pvt)
case 0x1A:
switch (pvt->model) {
case 0x00 ... 0x1f:
- pvt->ctl_name = "F1Ah";
pvt->max_mcs = 12;
pvt->flags.zn_regs_v2 = 1;
break;
case 0x40 ... 0x4f:
- pvt->ctl_name = "F1Ah_M40h";
+ pvt->flags.zn_regs_v2 = 1;
+ break;
+ case 0x50 ... 0x57:
+ case 0xc0 ... 0xc7:
+ pvt->max_mcs = 16;
+ pvt->flags.zn_regs_v2 = 1;
+ break;
+ case 0x90 ... 0x9f:
+ case 0xa0 ... 0xaf:
+ pvt->max_mcs = 8;
pvt->flags.zn_regs_v2 = 1;
break;
}
@@ -3915,6 +3910,16 @@ static int per_family_init(struct amd64_pvt *pvt)
return -ENODEV;
}
+ if (tmp_name)
+ scnprintf(pvt->ctl_name, sizeof(pvt->ctl_name), tmp_name);
+ else
+ scnprintf(pvt->ctl_name, sizeof(pvt->ctl_name), "F%02Xh_M%02Xh",
+ pvt->fam, pvt->model);
+
+ pvt->csels = kcalloc(pvt->max_mcs, sizeof(*pvt->csels), GFP_KERNEL);
+ if (!pvt->csels)
+ return -ENOMEM;
+
return 0;
}
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index 17228d07de4c..1757c1b99fc8 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -96,11 +96,12 @@
/* Hardware limit on ChipSelect rows per MC and processors per system */
#define NUM_CHIPSELECTS 8
#define DRAM_RANGES 8
-#define NUM_CONTROLLERS 12
#define ON true
#define OFF false
+#define MAX_CTL_NAMELEN 19
+
/*
* PCI-defined configuration space registers
*/
@@ -346,7 +347,7 @@ struct amd64_pvt {
u32 dbam1; /* DRAM Base Address Mapping reg for DCT1 */
/* one for each DCT/UMC */
- struct chip_select csels[NUM_CONTROLLERS];
+ struct chip_select *csels;
/* DRAM base and limit pairs F1x[78,70,68,60,58,50,48,40] */
struct dram_range ranges[DRAM_RANGES];
@@ -362,7 +363,7 @@ struct amd64_pvt {
/* x4, x8, or x16 syndromes in use */
u8 ecc_sym_sz;
- const char *ctl_name;
+ char ctl_name[MAX_CTL_NAMELEN];
u16 f1_id, f2_id;
/* Maximum number of memory controllers per die/node. */
u8 max_mcs;
diff --git a/drivers/edac/ecs.c b/drivers/edac/ecs.c
index 1d51838a60c1..51c451c7f0f0 100755..100644
--- a/drivers/edac/ecs.c
+++ b/drivers/edac/ecs.c
@@ -170,8 +170,10 @@ static int ecs_create_desc(struct device *ecs_dev, const struct attribute_group
fru_ctx->dev_attr[ECS_RESET] = EDAC_ECS_ATTR_WO(reset, fru);
fru_ctx->dev_attr[ECS_THRESHOLD] = EDAC_ECS_ATTR_RW(threshold, fru);
- for (i = 0; i < ECS_MAX_ATTRS; i++)
+ for (i = 0; i < ECS_MAX_ATTRS; i++) {
+ sysfs_attr_init(&fru_ctx->dev_attr[i].dev_attr.attr);
fru_ctx->ecs_attrs[i] = &fru_ctx->dev_attr[i].dev_attr.attr;
+ }
sprintf(fru_ctx->name, "%s%d", EDAC_ECS_FRU_NAME, fru);
group->name = fru_ctx->name;
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 0f338adf7d93..091cc6aae8a9 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -115,377 +115,6 @@ static const char * const edac_caps[] = {
[EDAC_S16ECD16ED] = "S16ECD16ED"
};
-#ifdef CONFIG_EDAC_LEGACY_SYSFS
-/*
- * EDAC sysfs CSROW data structures and methods
- */
-
-#define to_csrow(k) container_of(k, struct csrow_info, dev)
-
-/*
- * We need it to avoid namespace conflicts between the legacy API
- * and the per-dimm/per-rank one
- */
-#define DEVICE_ATTR_LEGACY(_name, _mode, _show, _store) \
- static struct device_attribute dev_attr_legacy_##_name = __ATTR(_name, _mode, _show, _store)
-
-struct dev_ch_attribute {
- struct device_attribute attr;
- unsigned int channel;
-};
-
-#define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
- static struct dev_ch_attribute dev_attr_legacy_##_name = \
- { __ATTR(_name, _mode, _show, _store), (_var) }
-
-#define to_channel(k) (container_of(k, struct dev_ch_attribute, attr)->channel)
-
-/* Set of more default csrow<id> attribute show/store functions */
-static ssize_t csrow_ue_count_show(struct device *dev,
- struct device_attribute *mattr, char *data)
-{
- struct csrow_info *csrow = to_csrow(dev);
-
- return sysfs_emit(data, "%u\n", csrow->ue_count);
-}
-
-static ssize_t csrow_ce_count_show(struct device *dev,
- struct device_attribute *mattr, char *data)
-{
- struct csrow_info *csrow = to_csrow(dev);
-
- return sysfs_emit(data, "%u\n", csrow->ce_count);
-}
-
-static ssize_t csrow_size_show(struct device *dev,
- struct device_attribute *mattr, char *data)
-{
- struct csrow_info *csrow = to_csrow(dev);
- int i;
- u32 nr_pages = 0;
-
- for (i = 0; i < csrow->nr_channels; i++)
- nr_pages += csrow->channels[i]->dimm->nr_pages;
- return sysfs_emit(data, "%u\n", PAGES_TO_MiB(nr_pages));
-}
-
-static ssize_t csrow_mem_type_show(struct device *dev,
- struct device_attribute *mattr, char *data)
-{
- struct csrow_info *csrow = to_csrow(dev);
-
- return sysfs_emit(data, "%s\n", edac_mem_types[csrow->channels[0]->dimm->mtype]);
-}
-
-static ssize_t csrow_dev_type_show(struct device *dev,
- struct device_attribute *mattr, char *data)
-{
- struct csrow_info *csrow = to_csrow(dev);
-
- return sysfs_emit(data, "%s\n", dev_types[csrow->channels[0]->dimm->dtype]);
-}
-
-static ssize_t csrow_edac_mode_show(struct device *dev,
- struct device_attribute *mattr,
- char *data)
-{
- struct csrow_info *csrow = to_csrow(dev);
-
- return sysfs_emit(data, "%s\n", edac_caps[csrow->channels[0]->dimm->edac_mode]);
-}
-
-/* show/store functions for DIMM Label attributes */
-static ssize_t channel_dimm_label_show(struct device *dev,
- struct device_attribute *mattr,
- char *data)
-{
- struct csrow_info *csrow = to_csrow(dev);
- unsigned int chan = to_channel(mattr);
- struct rank_info *rank = csrow->channels[chan];
-
- /* if field has not been initialized, there is nothing to send */
- if (!rank->dimm->label[0])
- return 0;
-
- return sysfs_emit(data, "%s\n", rank->dimm->label);
-}
-
-static ssize_t channel_dimm_label_store(struct device *dev,
- struct device_attribute *mattr,
- const char *data, size_t count)
-{
- struct csrow_info *csrow = to_csrow(dev);
- unsigned int chan = to_channel(mattr);
- struct rank_info *rank = csrow->channels[chan];
- size_t copy_count = count;
-
- if (count == 0)
- return -EINVAL;
-
- if (data[count - 1] == '\0' || data[count - 1] == '\n')
- copy_count -= 1;
-
- if (copy_count == 0 || copy_count >= sizeof(rank->dimm->label))
- return -EINVAL;
-
- memcpy(rank->dimm->label, data, copy_count);
- rank->dimm->label[copy_count] = '\0';
-
- return count;
-}
-
-/* show function for dynamic chX_ce_count attribute */
-static ssize_t channel_ce_count_show(struct device *dev,
- struct device_attribute *mattr, char *data)
-{
- struct csrow_info *csrow = to_csrow(dev);
- unsigned int chan = to_channel(mattr);
- struct rank_info *rank = csrow->channels[chan];
-
- return sysfs_emit(data, "%u\n", rank->ce_count);
-}
-
-/* cwrow<id>/attribute files */
-DEVICE_ATTR_LEGACY(size_mb, S_IRUGO, csrow_size_show, NULL);
-DEVICE_ATTR_LEGACY(dev_type, S_IRUGO, csrow_dev_type_show, NULL);
-DEVICE_ATTR_LEGACY(mem_type, S_IRUGO, csrow_mem_type_show, NULL);
-DEVICE_ATTR_LEGACY(edac_mode, S_IRUGO, csrow_edac_mode_show, NULL);
-DEVICE_ATTR_LEGACY(ue_count, S_IRUGO, csrow_ue_count_show, NULL);
-DEVICE_ATTR_LEGACY(ce_count, S_IRUGO, csrow_ce_count_show, NULL);
-
-/* default attributes of the CSROW<id> object */
-static struct attribute *csrow_attrs[] = {
- &dev_attr_legacy_dev_type.attr,
- &dev_attr_legacy_mem_type.attr,
- &dev_attr_legacy_edac_mode.attr,
- &dev_attr_legacy_size_mb.attr,
- &dev_attr_legacy_ue_count.attr,
- &dev_attr_legacy_ce_count.attr,
- NULL,
-};
-
-static const struct attribute_group csrow_attr_grp = {
- .attrs = csrow_attrs,
-};
-
-static const struct attribute_group *csrow_attr_groups[] = {
- &csrow_attr_grp,
- NULL
-};
-
-static const struct device_type csrow_attr_type = {
- .groups = csrow_attr_groups,
-};
-
-/*
- * possible dynamic channel DIMM Label attribute files
- *
- */
-DEVICE_CHANNEL(ch0_dimm_label, S_IRUGO | S_IWUSR,
- channel_dimm_label_show, channel_dimm_label_store, 0);
-DEVICE_CHANNEL(ch1_dimm_label, S_IRUGO | S_IWUSR,
- channel_dimm_label_show, channel_dimm_label_store, 1);
-DEVICE_CHANNEL(ch2_dimm_label, S_IRUGO | S_IWUSR,
- channel_dimm_label_show, channel_dimm_label_store, 2);
-DEVICE_CHANNEL(ch3_dimm_label, S_IRUGO | S_IWUSR,
- channel_dimm_label_show, channel_dimm_label_store, 3);
-DEVICE_CHANNEL(ch4_dimm_label, S_IRUGO | S_IWUSR,
- channel_dimm_label_show, channel_dimm_label_store, 4);
-DEVICE_CHANNEL(ch5_dimm_label, S_IRUGO | S_IWUSR,
- channel_dimm_label_show, channel_dimm_label_store, 5);
-DEVICE_CHANNEL(ch6_dimm_label, S_IRUGO | S_IWUSR,
- channel_dimm_label_show, channel_dimm_label_store, 6);
-DEVICE_CHANNEL(ch7_dimm_label, S_IRUGO | S_IWUSR,
- channel_dimm_label_show, channel_dimm_label_store, 7);
-DEVICE_CHANNEL(ch8_dimm_label, S_IRUGO | S_IWUSR,
- channel_dimm_label_show, channel_dimm_label_store, 8);
-DEVICE_CHANNEL(ch9_dimm_label, S_IRUGO | S_IWUSR,
- channel_dimm_label_show, channel_dimm_label_store, 9);
-DEVICE_CHANNEL(ch10_dimm_label, S_IRUGO | S_IWUSR,
- channel_dimm_label_show, channel_dimm_label_store, 10);
-DEVICE_CHANNEL(ch11_dimm_label, S_IRUGO | S_IWUSR,
- channel_dimm_label_show, channel_dimm_label_store, 11);
-
-/* Total possible dynamic DIMM Label attribute file table */
-static struct attribute *dynamic_csrow_dimm_attr[] = {
- &dev_attr_legacy_ch0_dimm_label.attr.attr,
- &dev_attr_legacy_ch1_dimm_label.attr.attr,
- &dev_attr_legacy_ch2_dimm_label.attr.attr,
- &dev_attr_legacy_ch3_dimm_label.attr.attr,
- &dev_attr_legacy_ch4_dimm_label.attr.attr,
- &dev_attr_legacy_ch5_dimm_label.attr.attr,
- &dev_attr_legacy_ch6_dimm_label.attr.attr,
- &dev_attr_legacy_ch7_dimm_label.attr.attr,
- &dev_attr_legacy_ch8_dimm_label.attr.attr,
- &dev_attr_legacy_ch9_dimm_label.attr.attr,
- &dev_attr_legacy_ch10_dimm_label.attr.attr,
- &dev_attr_legacy_ch11_dimm_label.attr.attr,
- NULL
-};
-
-/* possible dynamic channel ce_count attribute files */
-DEVICE_CHANNEL(ch0_ce_count, S_IRUGO,
- channel_ce_count_show, NULL, 0);
-DEVICE_CHANNEL(ch1_ce_count, S_IRUGO,
- channel_ce_count_show, NULL, 1);
-DEVICE_CHANNEL(ch2_ce_count, S_IRUGO,
- channel_ce_count_show, NULL, 2);
-DEVICE_CHANNEL(ch3_ce_count, S_IRUGO,
- channel_ce_count_show, NULL, 3);
-DEVICE_CHANNEL(ch4_ce_count, S_IRUGO,
- channel_ce_count_show, NULL, 4);
-DEVICE_CHANNEL(ch5_ce_count, S_IRUGO,
- channel_ce_count_show, NULL, 5);
-DEVICE_CHANNEL(ch6_ce_count, S_IRUGO,
- channel_ce_count_show, NULL, 6);
-DEVICE_CHANNEL(ch7_ce_count, S_IRUGO,
- channel_ce_count_show, NULL, 7);
-DEVICE_CHANNEL(ch8_ce_count, S_IRUGO,
- channel_ce_count_show, NULL, 8);
-DEVICE_CHANNEL(ch9_ce_count, S_IRUGO,
- channel_ce_count_show, NULL, 9);
-DEVICE_CHANNEL(ch10_ce_count, S_IRUGO,
- channel_ce_count_show, NULL, 10);
-DEVICE_CHANNEL(ch11_ce_count, S_IRUGO,
- channel_ce_count_show, NULL, 11);
-
-/* Total possible dynamic ce_count attribute file table */
-static struct attribute *dynamic_csrow_ce_count_attr[] = {
- &dev_attr_legacy_ch0_ce_count.attr.attr,
- &dev_attr_legacy_ch1_ce_count.attr.attr,
- &dev_attr_legacy_ch2_ce_count.attr.attr,
- &dev_attr_legacy_ch3_ce_count.attr.attr,
- &dev_attr_legacy_ch4_ce_count.attr.attr,
- &dev_attr_legacy_ch5_ce_count.attr.attr,
- &dev_attr_legacy_ch6_ce_count.attr.attr,
- &dev_attr_legacy_ch7_ce_count.attr.attr,
- &dev_attr_legacy_ch8_ce_count.attr.attr,
- &dev_attr_legacy_ch9_ce_count.attr.attr,
- &dev_attr_legacy_ch10_ce_count.attr.attr,
- &dev_attr_legacy_ch11_ce_count.attr.attr,
- NULL
-};
-
-static umode_t csrow_dev_is_visible(struct kobject *kobj,
- struct attribute *attr, int idx)
-{
- struct device *dev = kobj_to_dev(kobj);
- struct csrow_info *csrow = container_of(dev, struct csrow_info, dev);
-
- if (idx >= csrow->nr_channels)
- return 0;
-
- if (idx >= ARRAY_SIZE(dynamic_csrow_ce_count_attr) - 1) {
- WARN_ONCE(1, "idx: %d\n", idx);
- return 0;
- }
-
- /* Only expose populated DIMMs */
- if (!csrow->channels[idx]->dimm->nr_pages)
- return 0;
-
- return attr->mode;
-}
-
-
-static const struct attribute_group csrow_dev_dimm_group = {
- .attrs = dynamic_csrow_dimm_attr,
- .is_visible = csrow_dev_is_visible,
-};
-
-static const struct attribute_group csrow_dev_ce_count_group = {
- .attrs = dynamic_csrow_ce_count_attr,
- .is_visible = csrow_dev_is_visible,
-};
-
-static const struct attribute_group *csrow_dev_groups[] = {
- &csrow_dev_dimm_group,
- &csrow_dev_ce_count_group,
- NULL
-};
-
-static void csrow_release(struct device *dev)
-{
- /*
- * Nothing to do, just unregister sysfs here. The mci
- * device owns the data and will also release it.
- */
-}
-
-static inline int nr_pages_per_csrow(struct csrow_info *csrow)
-{
- int chan, nr_pages = 0;
-
- for (chan = 0; chan < csrow->nr_channels; chan++)
- nr_pages += csrow->channels[chan]->dimm->nr_pages;
-
- return nr_pages;
-}
-
-/* Create a CSROW object under specified edac_mc_device */
-static int edac_create_csrow_object(struct mem_ctl_info *mci,
- struct csrow_info *csrow, int index)
-{
- int err;
-
- csrow->dev.type = &csrow_attr_type;
- csrow->dev.groups = csrow_dev_groups;
- csrow->dev.release = csrow_release;
- device_initialize(&csrow->dev);
- csrow->dev.parent = &mci->dev;
- csrow->mci = mci;
- dev_set_name(&csrow->dev, "csrow%d", index);
- dev_set_drvdata(&csrow->dev, csrow);
-
- err = device_add(&csrow->dev);
- if (err) {
- edac_dbg(1, "failure: create device %s\n", dev_name(&csrow->dev));
- put_device(&csrow->dev);
- return err;
- }
-
- edac_dbg(0, "device %s created\n", dev_name(&csrow->dev));
-
- return 0;
-}
-
-/* Create a CSROW object under specified edac_mc_device */
-static int edac_create_csrow_objects(struct mem_ctl_info *mci)
-{
- int err, i;
- struct csrow_info *csrow;
-
- for (i = 0; i < mci->nr_csrows; i++) {
- csrow = mci->csrows[i];
- if (!nr_pages_per_csrow(csrow))
- continue;
- err = edac_create_csrow_object(mci, mci->csrows[i], i);
- if (err < 0)
- goto error;
- }
- return 0;
-
-error:
- for (--i; i >= 0; i--) {
- if (device_is_registered(&mci->csrows[i]->dev))
- device_unregister(&mci->csrows[i]->dev);
- }
-
- return err;
-}
-
-static void edac_delete_csrow_objects(struct mem_ctl_info *mci)
-{
- int i;
-
- for (i = 0; i < mci->nr_csrows; i++) {
- if (device_is_registered(&mci->csrows[i]->dev))
- device_unregister(&mci->csrows[i]->dev);
- }
-}
-
-#endif
-
/*
* Per-dimm (or per-rank) devices
*/
@@ -965,12 +594,6 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci,
goto fail;
}
-#ifdef CONFIG_EDAC_LEGACY_SYSFS
- err = edac_create_csrow_objects(mci);
- if (err < 0)
- goto fail;
-#endif
-
edac_create_debugfs_nodes(mci);
return 0;
@@ -995,9 +618,6 @@ void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
#ifdef CONFIG_EDAC_DEBUG
edac_debugfs_remove_recursive(mci->debugfs);
#endif
-#ifdef CONFIG_EDAC_LEGACY_SYSFS
- edac_delete_csrow_objects(mci);
-#endif
mci_for_each_dimm(mci, dimm) {
if (!device_is_registered(&dimm->dev))
diff --git a/drivers/edac/ghes_edac.c b/drivers/edac/ghes_edac.c
index 1eb0136c6fbd..d80c88818691 100644
--- a/drivers/edac/ghes_edac.c
+++ b/drivers/edac/ghes_edac.c
@@ -15,6 +15,7 @@
#include "edac_module.h"
#include <ras/ras_event.h>
#include <linux/notifier.h>
+#include <linux/string.h>
#define OTHER_DETAIL_LEN 400
@@ -332,7 +333,7 @@ static int ghes_edac_report_mem_error(struct notifier_block *nb,
p = pvt->msg;
p += snprintf(p, sizeof(pvt->msg), "%s", cper_mem_err_type_str(etype));
} else {
- strcpy(pvt->msg, "unknown error");
+ strscpy(pvt->msg, "unknown error");
}
/* Error address */
@@ -357,14 +358,14 @@ static int ghes_edac_report_mem_error(struct notifier_block *nb,
dimm = find_dimm_by_handle(mci, mem_err->mem_dev_handle);
if (dimm) {
e->top_layer = dimm->idx;
- strcpy(e->label, dimm->label);
+ strscpy(e->label, dimm->label);
}
}
if (p > e->location)
*(p - 1) = '\0';
if (!*e->label)
- strcpy(e->label, "unknown memory");
+ strscpy(e->label, "unknown memory");
/* All other fields are mapped on e->other_detail */
p = pvt->other_detail;
diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c
index a3fca2567752..89b3e8cc38b1 100644
--- a/drivers/edac/i10nm_base.c
+++ b/drivers/edac/i10nm_base.c
@@ -62,6 +62,7 @@
((GET_BITFIELD(reg, 0, 10) << 12) + 0x140000)
#define I10NM_GNR_IMC_MMIO_OFFSET 0x24c000
+#define I10NM_GNR_D_IMC_MMIO_OFFSET 0x206000
#define I10NM_GNR_IMC_MMIO_SIZE 0x4000
#define I10NM_HBM_IMC_MMIO_SIZE 0x9000
#define I10NM_DDR_IMC_CH_CNT(reg) GET_BITFIELD(reg, 21, 24)
@@ -343,7 +344,7 @@ static void show_retry_rd_err_log(struct decoded_addr *res, char *msg,
status_mask = rrl->over_mask | rrl->uc_mask | rrl->v_mask;
- n = snprintf(msg, len, " retry_rd_err_log[");
+ n = scnprintf(msg, len, " retry_rd_err_log[");
for (i = 0; i < rrl->set_num; i++) {
scrub = (rrl->modes[i] == FRE_SCRUB || rrl->modes[i] == LRE_SCRUB);
if (scrub_err != scrub)
@@ -355,9 +356,9 @@ static void show_retry_rd_err_log(struct decoded_addr *res, char *msg,
log = read_imc_reg(imc, ch, offset, width);
if (width == 4)
- n += snprintf(msg + n, len - n, "%.8llx ", log);
+ n += scnprintf(msg + n, len - n, "%.8llx ", log);
else
- n += snprintf(msg + n, len - n, "%.16llx ", log);
+ n += scnprintf(msg + n, len - n, "%.16llx ", log);
/* Clear RRL status if RRL in Linux control mode. */
if (retry_rd_err_log == 2 && !j && (log & status_mask))
@@ -367,10 +368,10 @@ static void show_retry_rd_err_log(struct decoded_addr *res, char *msg,
/* Move back one space. */
n--;
- n += snprintf(msg + n, len - n, "]");
+ n += scnprintf(msg + n, len - n, "]");
if (len - n > 0) {
- n += snprintf(msg + n, len - n, " correrrcnt[");
+ n += scnprintf(msg + n, len - n, " correrrcnt[");
for (i = 0; i < rrl->cecnt_num && len - n > 0; i++) {
offset = rrl->cecnt_offsets[i];
width = rrl->cecnt_widths[i];
@@ -378,20 +379,20 @@ static void show_retry_rd_err_log(struct decoded_addr *res, char *msg,
/* CPUs {ICX,SPR} encode two counters per 4-byte CORRERRCNT register. */
if (res_cfg->type <= SPR) {
- n += snprintf(msg + n, len - n, "%.4llx %.4llx ",
+ n += scnprintf(msg + n, len - n, "%.4llx %.4llx ",
corr & 0xffff, corr >> 16);
} else {
/* CPUs {GNR} encode one counter per CORRERRCNT register. */
if (width == 4)
- n += snprintf(msg + n, len - n, "%.8llx ", corr);
+ n += scnprintf(msg + n, len - n, "%.8llx ", corr);
else
- n += snprintf(msg + n, len - n, "%.16llx ", corr);
+ n += scnprintf(msg + n, len - n, "%.16llx ", corr);
}
}
/* Move back one space. */
n--;
- n += snprintf(msg + n, len - n, "]");
+ n += scnprintf(msg + n, len - n, "]");
}
}
@@ -467,17 +468,18 @@ static int i10nm_get_imc_num(struct res_config *cfg)
return -ENODEV;
}
- if (imc_num > I10NM_NUM_DDR_IMC) {
- i10nm_printk(KERN_ERR, "Need to make I10NM_NUM_DDR_IMC >= %d\n", imc_num);
- return -EINVAL;
- }
-
if (cfg->ddr_imc_num != imc_num) {
/*
- * Store the number of present DDR memory controllers.
+ * Update the configuration data to reflect the number of
+ * present DDR memory controllers.
*/
cfg->ddr_imc_num = imc_num;
edac_dbg(2, "Set DDR MC number: %d", imc_num);
+
+ /* Release and reallocate skx_dev list with the updated number. */
+ skx_remove();
+ if (skx_get_all_bus_mappings(cfg, &i10nm_edac_list) <= 0)
+ return -ENODEV;
}
return 0;
@@ -687,6 +689,14 @@ static struct pci_dev *get_gnr_mdev(struct skx_dev *d, int logical_idx, int *phy
return NULL;
}
+static u32 get_gnr_imc_mmio_offset(void)
+{
+ if (boot_cpu_data.x86_vfm == INTEL_GRANITERAPIDS_D)
+ return I10NM_GNR_D_IMC_MMIO_OFFSET;
+
+ return I10NM_GNR_IMC_MMIO_OFFSET;
+}
+
/**
* get_ddr_munit() - Get the resource of the i-th DDR memory controller.
*
@@ -715,7 +725,7 @@ static struct pci_dev *get_ddr_munit(struct skx_dev *d, int i, u32 *offset, unsi
return NULL;
*offset = I10NM_GET_IMC_MMIO_OFFSET(reg) +
- I10NM_GNR_IMC_MMIO_OFFSET +
+ get_gnr_imc_mmio_offset() +
physical_idx * I10NM_GNR_IMC_MMIO_SIZE;
*size = I10NM_GNR_IMC_MMIO_SIZE;
@@ -1030,6 +1040,7 @@ static const struct x86_cpu_id i10nm_cpuids[] = {
X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, &spr_cfg),
X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, &spr_cfg),
X86_MATCH_VFM(INTEL_GRANITERAPIDS_X, &gnr_cfg),
+ X86_MATCH_VFM(INTEL_GRANITERAPIDS_D, &gnr_cfg),
X86_MATCH_VFM(INTEL_ATOM_CRESTMONT_X, &gnr_cfg),
X86_MATCH_VFM(INTEL_ATOM_CRESTMONT, &gnr_cfg),
X86_MATCH_VFM(INTEL_ATOM_DARKMONT_X, &gnr_cfg),
@@ -1047,6 +1058,15 @@ static bool i10nm_check_ecc(struct skx_imc *imc, int chan)
return !!GET_BITFIELD(mcmtr, 2, 2);
}
+static bool i10nm_channel_disabled(struct skx_imc *imc, int chan)
+{
+ u32 mcmtr = I10NM_GET_MCMTR(imc, chan);
+
+ edac_dbg(1, "mc%d ch%d mcmtr reg %x\n", imc->mc, chan, mcmtr);
+
+ return (mcmtr == ~0 || GET_BITFIELD(mcmtr, 18, 18));
+}
+
static int i10nm_get_dimm_config(struct mem_ctl_info *mci,
struct res_config *cfg)
{
@@ -1060,6 +1080,11 @@ static int i10nm_get_dimm_config(struct mem_ctl_info *mci,
if (!imc->mbase)
continue;
+ if (i10nm_channel_disabled(imc, i)) {
+ edac_dbg(1, "mc%d ch%d is disabled.\n", imc->mc, i);
+ continue;
+ }
+
ndimms = 0;
if (res_cfg->type != GNR)
@@ -1173,7 +1198,8 @@ static int __init i10nm_init(void)
d->imc[i].num_dimms = cfg->ddr_dimm_num;
}
- rc = skx_register_mci(&d->imc[i], d->imc[i].mdev,
+ rc = skx_register_mci(&d->imc[i], &d->imc[i].mdev->dev,
+ pci_name(d->imc[i].mdev),
"Intel_10nm Socket", EDAC_MOD_STR,
i10nm_get_dimm_config, cfg);
if (rc < 0)
diff --git a/drivers/edac/ie31200_edac.c b/drivers/edac/ie31200_edac.c
index a53612be4b2f..eaab6af143e1 100644
--- a/drivers/edac/ie31200_edac.c
+++ b/drivers/edac/ie31200_edac.c
@@ -44,6 +44,7 @@
* but lo_hi_readq() ensures that we are safe across all e3-1200 processors.
*/
+#include <linux/bitfield.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
@@ -87,13 +88,32 @@
#define PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_10 0x3eca
/* Raptor Lake-S */
-#define PCI_DEVICE_ID_INTEL_IE31200_RPL_S_1 0xa703
-#define PCI_DEVICE_ID_INTEL_IE31200_RPL_S_2 0x4640
-#define PCI_DEVICE_ID_INTEL_IE31200_RPL_S_3 0x4630
-#define PCI_DEVICE_ID_INTEL_IE31200_RPL_S_4 0xa700
+#define PCI_DEVICE_ID_INTEL_IE31200_RPL_S_1 0xa703 /* 8P+8E, e.g. i7-13700 */
+#define PCI_DEVICE_ID_INTEL_IE31200_RPL_S_2 0x4640 /* 6P+8E, e.g. i5-13500, i5-13600, i5-14500 */
+#define PCI_DEVICE_ID_INTEL_IE31200_RPL_S_3 0x4630 /* 4P+0E, e.g. i3-13100E */
+#define PCI_DEVICE_ID_INTEL_IE31200_RPL_S_4 0xa700 /* 8P+16E, e.g. i9-13900, i9-14900 */
+#define PCI_DEVICE_ID_INTEL_IE31200_RPL_S_5 0xa740 /* 8P+12E, e.g. i7-14700 */
+#define PCI_DEVICE_ID_INTEL_IE31200_RPL_S_6 0xa704 /* 6P+8E, e.g. i5-14600 */
+
+/* Raptor Lake-HX */
+#define PCI_DEVICE_ID_INTEL_IE31200_RPL_HX_1 0xa702 /* 8P+16E, e.g. i9-13950HX */
/* Alder Lake-S */
#define PCI_DEVICE_ID_INTEL_IE31200_ADL_S_1 0x4660
+#define PCI_DEVICE_ID_INTEL_IE31200_ADL_S_2 0x4668 /* 8P+4E, e.g. i7-12700K */
+#define PCI_DEVICE_ID_INTEL_IE31200_ADL_S_3 0x4648 /* 6P+4E, e.g. i5-12600K */
+
+/* Bartlett Lake-S */
+#define PCI_DEVICE_ID_INTEL_IE31200_BTL_S_1 0x4639
+#define PCI_DEVICE_ID_INTEL_IE31200_BTL_S_2 0x463c
+#define PCI_DEVICE_ID_INTEL_IE31200_BTL_S_3 0x4642
+#define PCI_DEVICE_ID_INTEL_IE31200_BTL_S_4 0x4643
+#define PCI_DEVICE_ID_INTEL_IE31200_BTL_S_5 0xa731
+#define PCI_DEVICE_ID_INTEL_IE31200_BTL_S_6 0xa732
+#define PCI_DEVICE_ID_INTEL_IE31200_BTL_S_7 0xa733
+#define PCI_DEVICE_ID_INTEL_IE31200_BTL_S_8 0xa741
+#define PCI_DEVICE_ID_INTEL_IE31200_BTL_S_9 0xa744
+#define PCI_DEVICE_ID_INTEL_IE31200_BTL_S_10 0xa745
#define IE31200_RANKS_PER_CHANNEL 8
#define IE31200_DIMMS_PER_CHANNEL 2
@@ -120,9 +140,6 @@
#define IE31200_CAPID0_DDPCD BIT(6)
#define IE31200_CAPID0_ECC BIT(1)
-/* Non-constant mask variant of FIELD_GET() */
-#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
-
static int nr_channels;
static struct pci_dev *mci_pdev;
static int ie31200_registered = 1;
@@ -507,6 +524,7 @@ static int ie31200_register_mci(struct pci_dev *pdev, struct res_config *cfg, in
ie31200_pvt.priv[mc] = priv;
return 0;
fail_unmap:
+ put_device(&priv->dev);
iounmap(window);
fail_free:
edac_mc_free(mci);
@@ -579,6 +597,7 @@ static void ie31200_unregister_mcis(void)
mci = priv->mci;
edac_mc_del_mc(mci->pdev);
iounmap(priv->window);
+ put_device(&priv->dev);
edac_mc_free(mci);
}
}
@@ -740,7 +759,22 @@ static const struct pci_device_id ie31200_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_RPL_S_2), (kernel_ulong_t)&rpl_s_cfg},
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_RPL_S_3), (kernel_ulong_t)&rpl_s_cfg},
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_RPL_S_4), (kernel_ulong_t)&rpl_s_cfg},
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_RPL_S_5), (kernel_ulong_t)&rpl_s_cfg},
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_RPL_S_6), (kernel_ulong_t)&rpl_s_cfg},
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_RPL_HX_1), (kernel_ulong_t)&rpl_s_cfg},
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_ADL_S_1), (kernel_ulong_t)&rpl_s_cfg},
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_ADL_S_2), (kernel_ulong_t)&rpl_s_cfg},
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_ADL_S_3), (kernel_ulong_t)&rpl_s_cfg},
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_BTL_S_1), (kernel_ulong_t)&rpl_s_cfg},
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_BTL_S_2), (kernel_ulong_t)&rpl_s_cfg},
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_BTL_S_3), (kernel_ulong_t)&rpl_s_cfg},
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_BTL_S_4), (kernel_ulong_t)&rpl_s_cfg},
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_BTL_S_5), (kernel_ulong_t)&rpl_s_cfg},
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_BTL_S_6), (kernel_ulong_t)&rpl_s_cfg},
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_BTL_S_7), (kernel_ulong_t)&rpl_s_cfg},
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_BTL_S_8), (kernel_ulong_t)&rpl_s_cfg},
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_BTL_S_9), (kernel_ulong_t)&rpl_s_cfg},
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_BTL_S_10), (kernel_ulong_t)&rpl_s_cfg},
{ 0, } /* 0 terminated list. */
};
MODULE_DEVICE_TABLE(pci, ie31200_pci_tbl);
diff --git a/drivers/edac/igen6_edac.c b/drivers/edac/igen6_edac.c
index 1930dc00c791..553c31a2d922 100644
--- a/drivers/edac/igen6_edac.c
+++ b/drivers/edac/igen6_edac.c
@@ -125,7 +125,7 @@
#define MEM_SLICE_HASH_MASK(v) (GET_BITFIELD(v, 6, 19) << 6)
#define MEM_SLICE_HASH_LSB_MASK_BIT(v) GET_BITFIELD(v, 24, 26)
-static const struct res_config {
+static struct res_config {
bool machine_check;
/* The number of present memory controllers. */
int num_imc;
@@ -275,6 +275,9 @@ static struct work_struct ecclog_work;
#define DID_PTL_H_SKU2 0xb001
#define DID_PTL_H_SKU3 0xb002
+/* Compute die IDs for Wildcat Lake with IBECC */
+#define DID_WCL_SKU1 0xfd00
+
static int get_mchbar(struct pci_dev *pdev, u64 *mchbar)
{
union {
@@ -479,7 +482,7 @@ static u64 rpl_p_err_addr(u64 ecclog)
return ECC_ERROR_LOG_ADDR45(ecclog);
}
-static const struct res_config ehl_cfg = {
+static struct res_config ehl_cfg = {
.num_imc = 1,
.imc_base = 0x5000,
.ibecc_base = 0xdc00,
@@ -489,7 +492,7 @@ static const struct res_config ehl_cfg = {
.err_addr_to_imc_addr = ehl_err_addr_to_imc_addr,
};
-static const struct res_config icl_cfg = {
+static struct res_config icl_cfg = {
.num_imc = 1,
.imc_base = 0x5000,
.ibecc_base = 0xd800,
@@ -499,7 +502,7 @@ static const struct res_config icl_cfg = {
.err_addr_to_imc_addr = ehl_err_addr_to_imc_addr,
};
-static const struct res_config tgl_cfg = {
+static struct res_config tgl_cfg = {
.machine_check = true,
.num_imc = 2,
.imc_base = 0x5000,
@@ -513,7 +516,7 @@ static const struct res_config tgl_cfg = {
.err_addr_to_imc_addr = tgl_err_addr_to_imc_addr,
};
-static const struct res_config adl_cfg = {
+static struct res_config adl_cfg = {
.machine_check = true,
.num_imc = 2,
.imc_base = 0xd800,
@@ -524,7 +527,7 @@ static const struct res_config adl_cfg = {
.err_addr_to_imc_addr = adl_err_addr_to_imc_addr,
};
-static const struct res_config adl_n_cfg = {
+static struct res_config adl_n_cfg = {
.machine_check = true,
.num_imc = 1,
.imc_base = 0xd800,
@@ -535,7 +538,7 @@ static const struct res_config adl_n_cfg = {
.err_addr_to_imc_addr = adl_err_addr_to_imc_addr,
};
-static const struct res_config rpl_p_cfg = {
+static struct res_config rpl_p_cfg = {
.machine_check = true,
.num_imc = 2,
.imc_base = 0xd800,
@@ -547,7 +550,7 @@ static const struct res_config rpl_p_cfg = {
.err_addr_to_imc_addr = adl_err_addr_to_imc_addr,
};
-static const struct res_config mtl_ps_cfg = {
+static struct res_config mtl_ps_cfg = {
.machine_check = true,
.num_imc = 2,
.imc_base = 0xd800,
@@ -558,7 +561,7 @@ static const struct res_config mtl_ps_cfg = {
.err_addr_to_imc_addr = adl_err_addr_to_imc_addr,
};
-static const struct res_config mtl_p_cfg = {
+static struct res_config mtl_p_cfg = {
.machine_check = true,
.num_imc = 2,
.imc_base = 0xd800,
@@ -569,7 +572,18 @@ static const struct res_config mtl_p_cfg = {
.err_addr_to_imc_addr = adl_err_addr_to_imc_addr,
};
-static const struct pci_device_id igen6_pci_tbl[] = {
+static struct res_config wcl_cfg = {
+ .machine_check = true,
+ .num_imc = 1,
+ .imc_base = 0xd800,
+ .ibecc_base = 0xd400,
+ .ibecc_error_log_offset = 0x170,
+ .ibecc_available = mtl_p_ibecc_available,
+ .err_addr_to_sys_addr = adl_err_addr_to_sys_addr,
+ .err_addr_to_imc_addr = adl_err_addr_to_imc_addr,
+};
+
+static struct pci_device_id igen6_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, DID_EHL_SKU5), (kernel_ulong_t)&ehl_cfg },
{ PCI_VDEVICE(INTEL, DID_EHL_SKU6), (kernel_ulong_t)&ehl_cfg },
{ PCI_VDEVICE(INTEL, DID_EHL_SKU7), (kernel_ulong_t)&ehl_cfg },
@@ -622,6 +636,7 @@ static const struct pci_device_id igen6_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, DID_PTL_H_SKU1), (kernel_ulong_t)&mtl_p_cfg },
{ PCI_VDEVICE(INTEL, DID_PTL_H_SKU2), (kernel_ulong_t)&mtl_p_cfg },
{ PCI_VDEVICE(INTEL, DID_PTL_H_SKU3), (kernel_ulong_t)&mtl_p_cfg },
+ { PCI_VDEVICE(INTEL, DID_WCL_SKU1), (kernel_ulong_t)&wcl_cfg },
{ },
};
MODULE_DEVICE_TABLE(pci, igen6_pci_tbl);
@@ -1285,6 +1300,7 @@ static int igen6_register_mci(int mc, void __iomem *window, struct pci_dev *pdev
imc->mci = mci;
return 0;
fail3:
+ put_device(&imc->dev);
mci->pvt_info = NULL;
kfree(mci->ctl_name);
fail2:
@@ -1311,6 +1327,7 @@ static void igen6_unregister_mcis(void)
kfree(mci->ctl_name);
mci->pvt_info = NULL;
edac_mc_free(mci);
+ put_device(&imc->dev);
iounmap(imc->window);
}
}
@@ -1350,9 +1367,11 @@ static int igen6_register_mcis(struct pci_dev *pdev, u64 mchbar)
return -ENODEV;
}
- if (lmc < res_cfg->num_imc)
- igen6_printk(KERN_WARNING, "Expected %d mcs, but only %d detected.",
+ if (lmc < res_cfg->num_imc) {
+ igen6_printk(KERN_DEBUG, "Expected %d mcs, but only %d detected.",
res_cfg->num_imc, lmc);
+ res_cfg->num_imc = lmc;
+ }
return 0;
diff --git a/drivers/edac/imh_base.c b/drivers/edac/imh_base.c
new file mode 100644
index 000000000000..4348b3883b45
--- /dev/null
+++ b/drivers/edac/imh_base.c
@@ -0,0 +1,602 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for Intel(R) servers with Integrated Memory/IO Hub-based memory controller.
+ * Copyright (c) 2025, Intel Corporation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
+#include <asm/mce.h>
+#include <asm/cpu.h>
+#include "edac_module.h"
+#include "skx_common.h"
+
+#define IMH_REVISION "v0.0.1"
+#define EDAC_MOD_STR "imh_edac"
+
+/* Debug macros */
+#define imh_printk(level, fmt, arg...) \
+ edac_printk(level, "imh", fmt, ##arg)
+
+/* Configuration Agent(Ubox) */
+#define MMIO_BASE_H(reg) (((u64)GET_BITFIELD(reg, 0, 29)) << 23)
+#define SOCKET_ID(reg) GET_BITFIELD(reg, 0, 3)
+
+/* PUNIT */
+#define DDR_IMC_BITMAP(reg) GET_BITFIELD(reg, 23, 30)
+
+/* Memory Controller */
+#define ECC_ENABLED(reg) GET_BITFIELD(reg, 2, 2)
+#define DIMM_POPULATED(reg) GET_BITFIELD(reg, 15, 15)
+
+/* System Cache Agent(SCA) */
+#define TOLM(reg) (((u64)GET_BITFIELD(reg, 16, 31)) << 16)
+#define TOHM(reg) (((u64)GET_BITFIELD(reg, 16, 51)) << 16)
+
+/* Home Agent (HA) */
+#define NMCACHING(reg) GET_BITFIELD(reg, 8, 8)
+
+/**
+ * struct local_reg - A register as described in the local package view.
+ *
+ * @pkg: (input) The package where the register is located.
+ * @pbase: (input) The IP MMIO base physical address in the local package view.
+ * @size: (input) The IP MMIO size.
+ * @offset: (input) The register offset from the IP MMIO base @pbase.
+ * @width: (input) The register width in byte.
+ * @vbase: (internal) The IP MMIO base virtual address.
+ * @val: (output) The register value.
+ */
+struct local_reg {
+ int pkg;
+ u64 pbase;
+ u32 size;
+ u32 offset;
+ u8 width;
+ void __iomem *vbase;
+ u64 val;
+};
+
+#define DEFINE_LOCAL_REG(name, cfg, package, north, ip_name, ip_idx, reg_name) \
+ struct local_reg name = { \
+ .pkg = package, \
+ .pbase = (north ? (cfg)->mmio_base_l_north : \
+ (cfg)->mmio_base_l_south) + \
+ (cfg)->ip_name##_base + \
+ (cfg)->ip_name##_size * (ip_idx), \
+ .size = (cfg)->ip_name##_size, \
+ .offset = (cfg)->ip_name##_reg_##reg_name##_offset, \
+ .width = (cfg)->ip_name##_reg_##reg_name##_width, \
+ }
+
+static u64 readx(void __iomem *addr, u8 width)
+{
+ switch (width) {
+ case 1:
+ return readb(addr);
+ case 2:
+ return readw(addr);
+ case 4:
+ return readl(addr);
+ case 8:
+ return readq(addr);
+ default:
+ imh_printk(KERN_ERR, "Invalid reg 0x%p width %d\n", addr, width);
+ return 0;
+ }
+}
+
+static void __read_local_reg(void *reg)
+{
+ struct local_reg *r = (struct local_reg *)reg;
+
+ r->val = readx(r->vbase + r->offset, r->width);
+}
+
+/* Read a local-view register. */
+static bool read_local_reg(struct local_reg *reg)
+{
+ int cpu;
+
+ /* Get the target CPU in the package @reg->pkg. */
+ for_each_online_cpu(cpu) {
+ if (reg->pkg == topology_physical_package_id(cpu))
+ break;
+ }
+
+ if (cpu >= nr_cpu_ids)
+ return false;
+
+ reg->vbase = ioremap(reg->pbase, reg->size);
+ if (!reg->vbase) {
+ imh_printk(KERN_ERR, "Failed to ioremap 0x%llx\n", reg->pbase);
+ return false;
+ }
+
+ /* Get the target CPU to read the register. */
+ smp_call_function_single(cpu, __read_local_reg, reg, 1);
+ iounmap(reg->vbase);
+
+ return true;
+}
+
+/* Get the bitmap of memory controller instances in package @pkg. */
+static u32 get_imc_bitmap(struct res_config *cfg, int pkg, bool north)
+{
+ DEFINE_LOCAL_REG(reg, cfg, pkg, north, pcu, 0, capid3);
+
+ if (!read_local_reg(&reg))
+ return 0;
+
+ edac_dbg(2, "Pkg%d %s mc instances bitmap 0x%llx (reg 0x%llx)\n",
+ pkg, north ? "north" : "south",
+ DDR_IMC_BITMAP(reg.val), reg.val);
+
+ return DDR_IMC_BITMAP(reg.val);
+}
+
+static void imc_release(struct device *dev)
+{
+ edac_dbg(2, "imc device %s released\n", dev_name(dev));
+ kfree(dev);
+}
+
+static int __get_ddr_munits(struct res_config *cfg, struct skx_dev *d,
+ bool north, int lmc)
+{
+ unsigned long size = cfg->ddr_chan_mmio_sz * cfg->ddr_chan_num;
+ unsigned long bitmap = get_imc_bitmap(cfg, d->pkg, north);
+ void __iomem *mbase;
+ struct device *dev;
+ int i, rc, pmc;
+ u64 base;
+
+ for_each_set_bit(i, &bitmap, sizeof(bitmap) * 8) {
+ base = north ? d->mmio_base_h_north : d->mmio_base_h_south;
+ base += cfg->ddr_imc_base + size * i;
+
+ edac_dbg(2, "Pkg%d mc%d mmio base 0x%llx size 0x%lx\n",
+ d->pkg, lmc, base, size);
+
+ /* Set up the imc MMIO. */
+ mbase = ioremap(base, size);
+ if (!mbase) {
+ imh_printk(KERN_ERR, "Failed to ioremap 0x%llx\n", base);
+ return -ENOMEM;
+ }
+
+ d->imc[lmc].mbase = mbase;
+ d->imc[lmc].lmc = lmc;
+
+ /* Create the imc device instance. */
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->release = imc_release;
+ device_initialize(dev);
+ rc = dev_set_name(dev, "0x%llx", base);
+ if (rc) {
+ imh_printk(KERN_ERR, "Failed to set dev name\n");
+ put_device(dev);
+ return rc;
+ }
+
+ d->imc[lmc].dev = dev;
+
+ /* Set up the imc index mapping. */
+ pmc = north ? i : 8 + i;
+ skx_set_mc_mapping(d, pmc, lmc);
+
+ lmc++;
+ }
+
+ return lmc;
+}
+
+static bool get_ddr_munits(struct res_config *cfg, struct skx_dev *d)
+{
+ int lmc = __get_ddr_munits(cfg, d, true, 0);
+
+ if (lmc < 0)
+ return false;
+
+ lmc = __get_ddr_munits(cfg, d, false, lmc);
+ if (lmc <= 0)
+ return false;
+
+ return true;
+}
+
+static bool get_socket_id(struct res_config *cfg, struct skx_dev *d)
+{
+ DEFINE_LOCAL_REG(reg, cfg, d->pkg, true, ubox, 0, socket_id);
+ u8 src_id;
+ int i;
+
+ if (!read_local_reg(&reg))
+ return false;
+
+ src_id = SOCKET_ID(reg.val);
+ edac_dbg(2, "socket id 0x%x (reg 0x%llx)\n", src_id, reg.val);
+
+ for (i = 0; i < cfg->ddr_imc_num; i++)
+ d->imc[i].src_id = src_id;
+
+ return true;
+}
+
+/* Get TOLM (Top Of Low Memory) and TOHM (Top Of High Memory) parameters. */
+static bool imh_get_tolm_tohm(struct res_config *cfg, u64 *tolm, u64 *tohm)
+{
+ DEFINE_LOCAL_REG(reg, cfg, 0, true, sca, 0, tolm);
+
+ if (!read_local_reg(&reg))
+ return false;
+
+ *tolm = TOLM(reg.val);
+ edac_dbg(2, "tolm 0x%llx (reg 0x%llx)\n", *tolm, reg.val);
+
+ DEFINE_LOCAL_REG(reg2, cfg, 0, true, sca, 0, tohm);
+
+ if (!read_local_reg(&reg2))
+ return false;
+
+ *tohm = TOHM(reg2.val);
+ edac_dbg(2, "tohm 0x%llx (reg 0x%llx)\n", *tohm, reg2.val);
+
+ return true;
+}
+
+/* Get the system-view MMIO_BASE_H for {north,south}-IMH. */
+static int imh_get_all_mmio_base_h(struct res_config *cfg, struct list_head *edac_list)
+{
+ int i, n = topology_max_packages(), imc_num = cfg->ddr_imc_num + cfg->hbm_imc_num;
+ struct skx_dev *d;
+
+ for (i = 0; i < n; i++) {
+ d = kzalloc(struct_size(d, imc, imc_num), GFP_KERNEL);
+ if (!d)
+ return -ENOMEM;
+
+ DEFINE_LOCAL_REG(reg, cfg, i, true, ubox, 0, mmio_base);
+
+ /* Get MMIO_BASE_H for the north-IMH. */
+ if (!read_local_reg(&reg) || !reg.val) {
+ kfree(d);
+ imh_printk(KERN_ERR, "Pkg%d has no north mmio_base_h\n", i);
+ return -ENODEV;
+ }
+
+ d->mmio_base_h_north = MMIO_BASE_H(reg.val);
+ edac_dbg(2, "Pkg%d north mmio_base_h 0x%llx (reg 0x%llx)\n",
+ i, d->mmio_base_h_north, reg.val);
+
+ /* Get MMIO_BASE_H for the south-IMH (optional). */
+ DEFINE_LOCAL_REG(reg2, cfg, i, false, ubox, 0, mmio_base);
+
+ if (read_local_reg(&reg2)) {
+ d->mmio_base_h_south = MMIO_BASE_H(reg2.val);
+ edac_dbg(2, "Pkg%d south mmio_base_h 0x%llx (reg 0x%llx)\n",
+ i, d->mmio_base_h_south, reg2.val);
+ }
+
+ d->pkg = i;
+ d->num_imc = imc_num;
+ skx_init_mc_mapping(d);
+ list_add_tail(&d->list, edac_list);
+ }
+
+ return 0;
+}
+
+/* Get the number of per-package memory controllers. */
+static int imh_get_imc_num(struct res_config *cfg)
+{
+ int imc_num = hweight32(get_imc_bitmap(cfg, 0, true)) +
+ hweight32(get_imc_bitmap(cfg, 0, false));
+
+ if (!imc_num) {
+ imh_printk(KERN_ERR, "Invalid mc number\n");
+ return -ENODEV;
+ }
+
+ if (cfg->ddr_imc_num != imc_num) {
+ /*
+ * Update the configuration data to reflect the number of
+ * present DDR memory controllers.
+ */
+ cfg->ddr_imc_num = imc_num;
+ edac_dbg(2, "Set ddr mc number %d\n", imc_num);
+ }
+
+ return 0;
+}
+
+/* Get all memory controllers' parameters. */
+static int imh_get_munits(struct res_config *cfg, struct list_head *edac_list)
+{
+ struct skx_imc *imc;
+ struct skx_dev *d;
+ u8 mc = 0;
+ int i;
+
+ list_for_each_entry(d, edac_list, list) {
+ if (!get_ddr_munits(cfg, d)) {
+ imh_printk(KERN_ERR, "No mc found\n");
+ return -ENODEV;
+ }
+
+ if (!get_socket_id(cfg, d)) {
+ imh_printk(KERN_ERR, "Failed to get socket id\n");
+ return -ENODEV;
+ }
+
+ for (i = 0; i < cfg->ddr_imc_num; i++) {
+ imc = &d->imc[i];
+ if (!imc->mbase)
+ continue;
+
+ imc->chan_mmio_sz = cfg->ddr_chan_mmio_sz;
+ imc->num_channels = cfg->ddr_chan_num;
+ imc->num_dimms = cfg->ddr_dimm_num;
+ imc->mc = mc++;
+ }
+ }
+
+ return 0;
+}
+
+static bool check_2lm_enabled(struct res_config *cfg, struct skx_dev *d, int ha_idx)
+{
+ DEFINE_LOCAL_REG(reg, cfg, d->pkg, true, ha, ha_idx, mode);
+
+ if (!read_local_reg(&reg))
+ return false;
+
+ if (!NMCACHING(reg.val))
+ return false;
+
+ edac_dbg(2, "2-level memory configuration (reg 0x%llx, ha idx %d)\n", reg.val, ha_idx);
+ return true;
+}
+
+/* Check whether the system has a 2-level memory configuration. */
+static bool imh_2lm_enabled(struct res_config *cfg, struct list_head *head)
+{
+ struct skx_dev *d;
+ int i;
+
+ list_for_each_entry(d, head, list) {
+ for (i = 0; i < cfg->ddr_imc_num; i++)
+ if (check_2lm_enabled(cfg, d, i))
+ return true;
+ }
+
+ return false;
+}
+
+/* Helpers to read memory controller registers */
+static u64 read_imc_reg(struct skx_imc *imc, int chan, u32 offset, u8 width)
+{
+ return readx(imc->mbase + imc->chan_mmio_sz * chan + offset, width);
+}
+
+static u32 read_imc_mcmtr(struct res_config *cfg, struct skx_imc *imc, int chan)
+{
+ return (u32)read_imc_reg(imc, chan, cfg->ddr_reg_mcmtr_offset, cfg->ddr_reg_mcmtr_width);
+}
+
+static u32 read_imc_dimmmtr(struct res_config *cfg, struct skx_imc *imc, int chan, int dimm)
+{
+ return (u32)read_imc_reg(imc, chan, cfg->ddr_reg_dimmmtr_offset +
+ cfg->ddr_reg_dimmmtr_width * dimm,
+ cfg->ddr_reg_dimmmtr_width);
+}
+
+static bool ecc_enabled(u32 mcmtr)
+{
+ return (bool)ECC_ENABLED(mcmtr);
+}
+
+static bool dimm_populated(u32 dimmmtr)
+{
+ return (bool)DIMM_POPULATED(dimmmtr);
+}
+
+/* Get each DIMM's configurations of the memory controller @mci. */
+static int imh_get_dimm_config(struct mem_ctl_info *mci, struct res_config *cfg)
+{
+ struct skx_pvt *pvt = mci->pvt_info;
+ struct skx_imc *imc = pvt->imc;
+ struct dimm_info *dimm;
+ u32 mcmtr, dimmmtr;
+ int i, j, ndimms;
+
+ for (i = 0; i < imc->num_channels; i++) {
+ if (!imc->mbase)
+ continue;
+
+ mcmtr = read_imc_mcmtr(cfg, imc, i);
+
+ for (ndimms = 0, j = 0; j < imc->num_dimms; j++) {
+ dimmmtr = read_imc_dimmmtr(cfg, imc, i, j);
+ edac_dbg(1, "mcmtr 0x%x dimmmtr 0x%x (mc%d ch%d dimm%d)\n",
+ mcmtr, dimmmtr, imc->mc, i, j);
+
+ if (!dimm_populated(dimmmtr))
+ continue;
+
+ dimm = edac_get_dimm(mci, i, j, 0);
+ ndimms += skx_get_dimm_info(dimmmtr, 0, 0, dimm,
+ imc, i, j, cfg);
+ }
+
+ if (ndimms && !ecc_enabled(mcmtr)) {
+ imh_printk(KERN_ERR, "ECC is disabled on mc%d ch%d\n",
+ imc->mc, i);
+ return -ENODEV;
+ }
+ }
+
+ return 0;
+}
+
+/* Register all memory controllers to the EDAC core. */
+static int imh_register_mci(struct res_config *cfg, struct list_head *edac_list)
+{
+ struct skx_imc *imc;
+ struct skx_dev *d;
+ int i, rc;
+
+ list_for_each_entry(d, edac_list, list) {
+ for (i = 0; i < cfg->ddr_imc_num; i++) {
+ imc = &d->imc[i];
+ if (!imc->mbase)
+ continue;
+
+ rc = skx_register_mci(imc, imc->dev,
+ dev_name(imc->dev),
+ "Intel IMH-based Socket",
+ EDAC_MOD_STR,
+ imh_get_dimm_config, cfg);
+ if (rc)
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static struct res_config dmr_cfg = {
+ .type = DMR,
+ .support_ddr5 = true,
+ .mmio_base_l_north = 0xf6800000,
+ .mmio_base_l_south = 0xf6000000,
+ .ddr_chan_num = 1,
+ .ddr_dimm_num = 2,
+ .ddr_imc_base = 0x39b000,
+ .ddr_chan_mmio_sz = 0x8000,
+ .ddr_reg_mcmtr_offset = 0x360,
+ .ddr_reg_mcmtr_width = 4,
+ .ddr_reg_dimmmtr_offset = 0x370,
+ .ddr_reg_dimmmtr_width = 4,
+ .ubox_base = 0x0,
+ .ubox_size = 0x2000,
+ .ubox_reg_mmio_base_offset = 0x580,
+ .ubox_reg_mmio_base_width = 4,
+ .ubox_reg_socket_id_offset = 0x1080,
+ .ubox_reg_socket_id_width = 4,
+ .pcu_base = 0x3000,
+ .pcu_size = 0x10000,
+ .pcu_reg_capid3_offset = 0x290,
+ .pcu_reg_capid3_width = 4,
+ .sca_base = 0x24c000,
+ .sca_size = 0x2500,
+ .sca_reg_tolm_offset = 0x2100,
+ .sca_reg_tolm_width = 8,
+ .sca_reg_tohm_offset = 0x2108,
+ .sca_reg_tohm_width = 8,
+ .ha_base = 0x3eb000,
+ .ha_size = 0x1000,
+ .ha_reg_mode_offset = 0x4a0,
+ .ha_reg_mode_width = 4,
+};
+
+static const struct x86_cpu_id imh_cpuids[] = {
+ X86_MATCH_VFM(INTEL_DIAMONDRAPIDS_X, &dmr_cfg),
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, imh_cpuids);
+
+static struct notifier_block imh_mce_dec = {
+ .notifier_call = skx_mce_check_error,
+ .priority = MCE_PRIO_EDAC,
+};
+
+static int __init imh_init(void)
+{
+ const struct x86_cpu_id *id;
+ struct list_head *edac_list;
+ struct res_config *cfg;
+ const char *owner;
+ u64 tolm, tohm;
+ int rc;
+
+ edac_dbg(2, "\n");
+
+ if (ghes_get_devices())
+ return -EBUSY;
+
+ owner = edac_get_owner();
+ if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
+ return -EBUSY;
+
+ if (cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
+ return -ENODEV;
+
+ id = x86_match_cpu(imh_cpuids);
+ if (!id)
+ return -ENODEV;
+ cfg = (struct res_config *)id->driver_data;
+ skx_set_res_cfg(cfg);
+
+ if (!imh_get_tolm_tohm(cfg, &tolm, &tohm))
+ return -ENODEV;
+
+ skx_set_hi_lo(tolm, tohm);
+
+ rc = imh_get_imc_num(cfg);
+ if (rc < 0)
+ goto fail;
+
+ edac_list = skx_get_edac_list();
+
+ rc = imh_get_all_mmio_base_h(cfg, edac_list);
+ if (rc)
+ goto fail;
+
+ rc = imh_get_munits(cfg, edac_list);
+ if (rc)
+ goto fail;
+
+ skx_set_mem_cfg(imh_2lm_enabled(cfg, edac_list));
+
+ rc = imh_register_mci(cfg, edac_list);
+ if (rc)
+ goto fail;
+
+ rc = skx_adxl_get();
+ if (rc)
+ goto fail;
+
+ opstate_init();
+ mce_register_decode_chain(&imh_mce_dec);
+ skx_setup_debug("imh_test");
+
+ imh_printk(KERN_INFO, "%s\n", IMH_REVISION);
+
+ return 0;
+fail:
+ skx_remove();
+ return rc;
+}
+
+static void __exit imh_exit(void)
+{
+ edac_dbg(2, "\n");
+
+ skx_teardown_debug();
+ mce_unregister_decode_chain(&imh_mce_dec);
+ skx_adxl_put();
+ skx_remove();
+}
+
+module_init(imh_init);
+module_exit(imh_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Qiuxu Zhuo");
+MODULE_DESCRIPTION("MC Driver for Intel servers using IMH-based memory controller");
diff --git a/drivers/edac/mem_repair.c b/drivers/edac/mem_repair.c
index d1a8caa85369..108d69209146 100755..100644
--- a/drivers/edac/mem_repair.c
+++ b/drivers/edac/mem_repair.c
@@ -286,17 +286,26 @@ static umode_t mem_repair_attr_visible(struct kobject *kobj, struct attribute *a
return 0;
}
-#define MR_ATTR_RO(_name, _instance) \
- ((struct edac_mem_repair_dev_attr) { .dev_attr = __ATTR_RO(_name), \
- .instance = _instance })
-
-#define MR_ATTR_WO(_name, _instance) \
- ((struct edac_mem_repair_dev_attr) { .dev_attr = __ATTR_WO(_name), \
- .instance = _instance })
-
-#define MR_ATTR_RW(_name, _instance) \
- ((struct edac_mem_repair_dev_attr) { .dev_attr = __ATTR_RW(_name), \
- .instance = _instance })
+static const struct device_attribute mem_repair_dev_attr[] = {
+ [MR_TYPE] = __ATTR_RO(repair_type),
+ [MR_PERSIST_MODE] = __ATTR_RW(persist_mode),
+ [MR_SAFE_IN_USE] = __ATTR_RO(repair_safe_when_in_use),
+ [MR_HPA] = __ATTR_RW(hpa),
+ [MR_MIN_HPA] = __ATTR_RO(min_hpa),
+ [MR_MAX_HPA] = __ATTR_RO(max_hpa),
+ [MR_DPA] = __ATTR_RW(dpa),
+ [MR_MIN_DPA] = __ATTR_RO(min_dpa),
+ [MR_MAX_DPA] = __ATTR_RO(max_dpa),
+ [MR_NIBBLE_MASK] = __ATTR_RW(nibble_mask),
+ [MR_BANK_GROUP] = __ATTR_RW(bank_group),
+ [MR_BANK] = __ATTR_RW(bank),
+ [MR_RANK] = __ATTR_RW(rank),
+ [MR_ROW] = __ATTR_RW(row),
+ [MR_COLUMN] = __ATTR_RW(column),
+ [MR_CHANNEL] = __ATTR_RW(channel),
+ [MR_SUB_CHANNEL] = __ATTR_RW(sub_channel),
+ [MEM_DO_REPAIR] = __ATTR_WO(repair)
+};
static int mem_repair_create_desc(struct device *dev,
const struct attribute_group **attr_groups,
@@ -305,34 +314,14 @@ static int mem_repair_create_desc(struct device *dev,
struct edac_mem_repair_context *ctx;
struct attribute_group *group;
int i;
- struct edac_mem_repair_dev_attr dev_attr[] = {
- [MR_TYPE] = MR_ATTR_RO(repair_type, instance),
- [MR_PERSIST_MODE] = MR_ATTR_RW(persist_mode, instance),
- [MR_SAFE_IN_USE] = MR_ATTR_RO(repair_safe_when_in_use, instance),
- [MR_HPA] = MR_ATTR_RW(hpa, instance),
- [MR_MIN_HPA] = MR_ATTR_RO(min_hpa, instance),
- [MR_MAX_HPA] = MR_ATTR_RO(max_hpa, instance),
- [MR_DPA] = MR_ATTR_RW(dpa, instance),
- [MR_MIN_DPA] = MR_ATTR_RO(min_dpa, instance),
- [MR_MAX_DPA] = MR_ATTR_RO(max_dpa, instance),
- [MR_NIBBLE_MASK] = MR_ATTR_RW(nibble_mask, instance),
- [MR_BANK_GROUP] = MR_ATTR_RW(bank_group, instance),
- [MR_BANK] = MR_ATTR_RW(bank, instance),
- [MR_RANK] = MR_ATTR_RW(rank, instance),
- [MR_ROW] = MR_ATTR_RW(row, instance),
- [MR_COLUMN] = MR_ATTR_RW(column, instance),
- [MR_CHANNEL] = MR_ATTR_RW(channel, instance),
- [MR_SUB_CHANNEL] = MR_ATTR_RW(sub_channel, instance),
- [MEM_DO_REPAIR] = MR_ATTR_WO(repair, instance)
- };
-
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
for (i = 0; i < MR_MAX_ATTRS; i++) {
- memcpy(&ctx->mem_repair_dev_attr[i],
- &dev_attr[i], sizeof(dev_attr[i]));
+ ctx->mem_repair_dev_attr[i].dev_attr = mem_repair_dev_attr[i];
+ ctx->mem_repair_dev_attr[i].instance = instance;
+ sysfs_attr_init(&ctx->mem_repair_dev_attr[i].dev_attr.attr);
ctx->mem_repair_attrs[i] =
&ctx->mem_repair_dev_attr[i].dev_attr.attr;
}
diff --git a/drivers/edac/scrub.c b/drivers/edac/scrub.c
index e421d3ebd959..f9d02af2fc3a 100755..100644
--- a/drivers/edac/scrub.c
+++ b/drivers/edac/scrub.c
@@ -176,6 +176,7 @@ static int scrub_create_desc(struct device *scrub_dev,
group = &scrub_ctx->group;
for (i = 0; i < SCRUB_MAX_ATTRS; i++) {
memcpy(&scrub_ctx->scrub_dev_attr[i], &dev_attr[i], sizeof(dev_attr[i]));
+ sysfs_attr_init(&scrub_ctx->scrub_dev_attr[i].dev_attr.attr);
scrub_ctx->scrub_attrs[i] = &scrub_ctx->scrub_dev_attr[i].dev_attr.attr;
}
sprintf(scrub_ctx->name, "%s%d", "scrub", instance);
diff --git a/drivers/edac/skx_base.c b/drivers/edac/skx_base.c
index 29897b21fb8e..aa6593ccda2d 100644
--- a/drivers/edac/skx_base.c
+++ b/drivers/edac/skx_base.c
@@ -33,6 +33,15 @@ static unsigned int nvdimm_count;
#define MASK26 0x3FFFFFF /* Mask for 2^26 */
#define MASK29 0x1FFFFFFF /* Mask for 2^29 */
+static struct res_config skx_cfg = {
+ .type = SKX,
+ .decs_did = 0x2016,
+ .busno_cfg_offset = 0xcc,
+ .ddr_imc_num = 2,
+ .ddr_chan_num = 3,
+ .ddr_dimm_num = 2,
+};
+
static struct skx_dev *get_skx_dev(struct pci_bus *bus, u8 idx)
{
struct skx_dev *d;
@@ -52,7 +61,7 @@ enum munittype {
struct munit {
u16 did;
- u16 devfn[SKX_NUM_IMC];
+ u16 devfn[2];
u8 busidx;
u8 per_socket;
enum munittype mtype;
@@ -89,11 +98,11 @@ static int get_all_munits(const struct munit *m)
if (!pdev)
break;
ndev++;
- if (m->per_socket == SKX_NUM_IMC) {
- for (i = 0; i < SKX_NUM_IMC; i++)
+ if (m->per_socket == skx_cfg.ddr_imc_num) {
+ for (i = 0; i < skx_cfg.ddr_imc_num; i++)
if (m->devfn[i] == pdev->devfn)
break;
- if (i == SKX_NUM_IMC)
+ if (i == skx_cfg.ddr_imc_num)
goto fail;
}
d = get_skx_dev(pdev->bus, m->busidx);
@@ -157,12 +166,6 @@ fail:
return -ENODEV;
}
-static struct res_config skx_cfg = {
- .type = SKX,
- .decs_did = 0x2016,
- .busno_cfg_offset = 0xcc,
-};
-
static const struct x86_cpu_id skx_cpuids[] = {
X86_MATCH_VFM(INTEL_SKYLAKE_X, &skx_cfg),
{ }
@@ -186,11 +189,11 @@ static int skx_get_dimm_config(struct mem_ctl_info *mci, struct res_config *cfg)
/* Only the mcmtr on the first channel is effective */
pci_read_config_dword(imc->chan[0].cdev, 0x87c, &mcmtr);
- for (i = 0; i < SKX_NUM_CHANNELS; i++) {
+ for (i = 0; i < cfg->ddr_chan_num; i++) {
ndimms = 0;
pci_read_config_dword(imc->chan[i].cdev, 0x8C, &amap);
pci_read_config_dword(imc->chan[i].cdev, 0x400, &mcddrtcfg);
- for (j = 0; j < SKX_NUM_DIMMS; j++) {
+ for (j = 0; j < cfg->ddr_dimm_num; j++) {
dimm = edac_get_dimm(mci, i, j, 0);
pci_read_config_dword(imc->chan[i].cdev,
0x80 + 4 * j, &mtr);
@@ -620,6 +623,7 @@ static int __init skx_init(void)
return -ENODEV;
cfg = (struct res_config *)id->driver_data;
+ skx_set_res_cfg(cfg);
rc = skx_get_hi_lo(0x2034, off, &skx_tolm, &skx_tohm);
if (rc)
@@ -652,11 +656,14 @@ static int __init skx_init(void)
goto fail;
edac_dbg(2, "src_id = %d\n", src_id);
- for (i = 0; i < SKX_NUM_IMC; i++) {
+ for (i = 0; i < cfg->ddr_imc_num; i++) {
d->imc[i].mc = mc++;
d->imc[i].lmc = i;
d->imc[i].src_id = src_id;
- rc = skx_register_mci(&d->imc[i], d->imc[i].chan[0].cdev,
+ d->imc[i].num_channels = cfg->ddr_chan_num;
+ d->imc[i].num_dimms = cfg->ddr_dimm_num;
+ rc = skx_register_mci(&d->imc[i], &d->imc[i].chan[0].cdev->dev,
+ pci_name(d->imc[i].chan[0].cdev),
"Skylake Socket", EDAC_MOD_STR,
skx_get_dimm_config, cfg);
if (rc < 0)
diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c
index c9ade45c1a99..3276afe43922 100644
--- a/drivers/edac/skx_common.c
+++ b/drivers/edac/skx_common.c
@@ -14,9 +14,11 @@
* Copyright (c) 2018, Intel Corporation.
*/
+#include <linux/topology.h>
#include <linux/acpi.h>
#include <linux/dmi.h>
#include <linux/adxl.h>
+#include <linux/overflow.h>
#include <acpi/nfit.h>
#include <asm/mce.h>
#include <asm/uv/uv.h>
@@ -122,7 +124,7 @@ void skx_adxl_put(void)
}
EXPORT_SYMBOL_GPL(skx_adxl_put);
-static void skx_init_mc_mapping(struct skx_dev *d)
+void skx_init_mc_mapping(struct skx_dev *d)
{
/*
* By default, the BIOS presents all memory controllers within each
@@ -130,31 +132,38 @@ static void skx_init_mc_mapping(struct skx_dev *d)
* the logical indices of the memory controllers enumerated by the
* EDAC driver.
*/
- for (int i = 0; i < NUM_IMC; i++)
- d->mc_mapping[i] = i;
+ for (int i = 0; i < d->num_imc; i++)
+ d->imc[i].mc_mapping = i;
}
+EXPORT_SYMBOL_GPL(skx_init_mc_mapping);
void skx_set_mc_mapping(struct skx_dev *d, u8 pmc, u8 lmc)
{
edac_dbg(0, "Set the mapping of mc phy idx to logical idx: %02d -> %02d\n",
pmc, lmc);
- d->mc_mapping[pmc] = lmc;
+ d->imc[lmc].mc_mapping = pmc;
}
EXPORT_SYMBOL_GPL(skx_set_mc_mapping);
-static u8 skx_get_mc_mapping(struct skx_dev *d, u8 pmc)
+static int skx_get_mc_mapping(struct skx_dev *d, u8 pmc)
{
- edac_dbg(0, "Get the mapping of mc phy idx to logical idx: %02d -> %02d\n",
- pmc, d->mc_mapping[pmc]);
+ for (int lmc = 0; lmc < d->num_imc; lmc++) {
+ if (d->imc[lmc].mc_mapping == pmc) {
+ edac_dbg(0, "Get the mapping of mc phy idx to logical idx: %02d -> %02d\n",
+ pmc, lmc);
- return d->mc_mapping[pmc];
+ return lmc;
+ }
+ }
+
+ return -1;
}
static bool skx_adxl_decode(struct decoded_addr *res, enum error_source err_src)
{
+ int i, lmc, len = 0;
struct skx_dev *d;
- int i, len = 0;
if (res->addr >= skx_tohm || (res->addr >= skx_tolm &&
res->addr < BIT_ULL(32))) {
@@ -200,7 +209,7 @@ static bool skx_adxl_decode(struct decoded_addr *res, enum error_source err_src)
res->cs = (int)adxl_values[component_indices[INDEX_CS]];
}
- if (res->imc > NUM_IMC - 1 || res->imc < 0) {
+ if (res->imc < 0) {
skx_printk(KERN_ERR, "Bad imc %d\n", res->imc);
return false;
}
@@ -218,7 +227,13 @@ static bool skx_adxl_decode(struct decoded_addr *res, enum error_source err_src)
return false;
}
- res->imc = skx_get_mc_mapping(d, res->imc);
+ lmc = skx_get_mc_mapping(d, res->imc);
+ if (lmc < 0) {
+ skx_printk(KERN_ERR, "No lmc for imc %d\n", res->imc);
+ return false;
+ }
+
+ res->imc = lmc;
for (i = 0; i < adxl_component_count; i++) {
if (adxl_values[i] == ~0x0ull)
@@ -265,7 +280,7 @@ static int skx_get_pkg_id(struct skx_dev *d, u8 *id)
struct cpuinfo_x86 *c = &cpu_data(cpu);
if (c->initialized && cpu_to_node(cpu) == node) {
- *id = c->topo.pkg_id;
+ *id = topology_physical_package_id(cpu);
return 0;
}
}
@@ -320,10 +335,10 @@ static int get_width(u32 mtr)
*/
int skx_get_all_bus_mappings(struct res_config *cfg, struct list_head **list)
{
+ int ndev = 0, imc_num = cfg->ddr_imc_num + cfg->hbm_imc_num;
struct pci_dev *pdev, *prev;
struct skx_dev *d;
u32 reg;
- int ndev = 0;
prev = NULL;
for (;;) {
@@ -331,7 +346,7 @@ int skx_get_all_bus_mappings(struct res_config *cfg, struct list_head **list)
if (!pdev)
break;
ndev++;
- d = kzalloc(sizeof(*d), GFP_KERNEL);
+ d = kzalloc(struct_size(d, imc, imc_num), GFP_KERNEL);
if (!d) {
pci_dev_put(pdev);
return -ENOMEM;
@@ -354,8 +369,10 @@ int skx_get_all_bus_mappings(struct res_config *cfg, struct list_head **list)
d->seg = GET_BITFIELD(reg, 16, 23);
}
- edac_dbg(2, "busses: 0x%x, 0x%x, 0x%x, 0x%x\n",
- d->bus[0], d->bus[1], d->bus[2], d->bus[3]);
+ d->num_imc = imc_num;
+
+ edac_dbg(2, "busses: 0x%x, 0x%x, 0x%x, 0x%x, imcs %d\n",
+ d->bus[0], d->bus[1], d->bus[2], d->bus[3], imc_num);
list_add_tail(&d->list, &dev_edac_list);
prev = pdev;
@@ -368,6 +385,12 @@ int skx_get_all_bus_mappings(struct res_config *cfg, struct list_head **list)
}
EXPORT_SYMBOL_GPL(skx_get_all_bus_mappings);
+struct list_head *skx_get_edac_list(void)
+{
+ return &dev_edac_list;
+}
+EXPORT_SYMBOL_GPL(skx_get_edac_list);
+
int skx_get_hi_lo(unsigned int did, int off[], u64 *tolm, u64 *tohm)
{
struct pci_dev *pdev;
@@ -408,6 +431,13 @@ fail:
}
EXPORT_SYMBOL_GPL(skx_get_hi_lo);
+void skx_set_hi_lo(u64 tolm, u64 tohm)
+{
+ skx_tolm = tolm;
+ skx_tohm = tohm;
+}
+EXPORT_SYMBOL_GPL(skx_set_hi_lo);
+
static int skx_get_dimm_attr(u32 reg, int lobit, int hibit, int add,
int minval, int maxval, const char *name)
{
@@ -421,7 +451,7 @@ static int skx_get_dimm_attr(u32 reg, int lobit, int hibit, int add,
}
#define numrank(reg) skx_get_dimm_attr(reg, 12, 13, 0, 0, 2, "ranks")
-#define numrow(reg) skx_get_dimm_attr(reg, 2, 4, 12, 1, 6, "rows")
+#define numrow(reg) skx_get_dimm_attr(reg, 2, 4, 12, 1, 7, "rows")
#define numcol(reg) skx_get_dimm_attr(reg, 0, 1, 10, 0, 2, "cols")
int skx_get_dimm_info(u32 mtr, u32 mcmtr, u32 amap, struct dimm_info *dimm,
@@ -529,9 +559,9 @@ unknown_size:
}
EXPORT_SYMBOL_GPL(skx_get_nvdimm_info);
-int skx_register_mci(struct skx_imc *imc, struct pci_dev *pdev,
- const char *ctl_name, const char *mod_str,
- get_dimm_config_f get_dimm_config,
+int skx_register_mci(struct skx_imc *imc, struct device *dev,
+ const char *dev_name, const char *ctl_name,
+ const char *mod_str, get_dimm_config_f get_dimm_config,
struct res_config *cfg)
{
struct mem_ctl_info *mci;
@@ -541,10 +571,10 @@ int skx_register_mci(struct skx_imc *imc, struct pci_dev *pdev,
/* Allocate a new MC control structure */
layers[0].type = EDAC_MC_LAYER_CHANNEL;
- layers[0].size = NUM_CHANNELS;
+ layers[0].size = imc->num_channels;
layers[0].is_virt_csrow = false;
layers[1].type = EDAC_MC_LAYER_SLOT;
- layers[1].size = NUM_DIMMS;
+ layers[1].size = imc->num_dimms;
layers[1].is_virt_csrow = true;
mci = edac_mc_alloc(imc->mc, ARRAY_SIZE(layers), layers,
sizeof(struct skx_pvt));
@@ -572,7 +602,7 @@ int skx_register_mci(struct skx_imc *imc, struct pci_dev *pdev,
mci->edac_ctl_cap = EDAC_FLAG_NONE;
mci->edac_cap = EDAC_FLAG_NONE;
mci->mod_name = mod_str;
- mci->dev_name = pci_name(pdev);
+ mci->dev_name = dev_name;
mci->ctl_page_to_phys = NULL;
rc = get_dimm_config(mci, cfg);
@@ -580,7 +610,7 @@ int skx_register_mci(struct skx_imc *imc, struct pci_dev *pdev,
goto fail;
/* Record ptr to the generic device */
- mci->pdev = &pdev->dev;
+ mci->pdev = dev;
/* Add this new MC control structure to EDAC's list of MCs */
if (unlikely(edac_mc_add_mc(mci))) {
@@ -670,12 +700,12 @@ static void skx_mce_output_error(struct mem_ctl_info *mci,
}
if (res->decoded_by_adxl) {
- len = snprintf(skx_msg, MSG_SIZE, "%s%s err_code:0x%04x:0x%04x %s",
+ len = scnprintf(skx_msg, MSG_SIZE, "%s%s err_code:0x%04x:0x%04x %s",
overflow ? " OVERFLOW" : "",
(uncorrected_error && recoverable) ? " recoverable" : "",
mscod, errcode, adxl_msg);
} else {
- len = snprintf(skx_msg, MSG_SIZE,
+ len = scnprintf(skx_msg, MSG_SIZE,
"%s%s err_code:0x%04x:0x%04x ProcessorSocketId:0x%x MemoryControllerId:0x%x PhysicalRankId:0x%x Row:0x%x Column:0x%x Bank:0x%x BankGroup:0x%x",
overflow ? " OVERFLOW" : "",
(uncorrected_error && recoverable) ? " recoverable" : "",
@@ -784,7 +814,7 @@ void skx_remove(void)
list_for_each_entry_safe(d, tmp, &dev_edac_list, list) {
list_del(&d->list);
- for (i = 0; i < NUM_IMC; i++) {
+ for (i = 0; i < d->num_imc; i++) {
if (d->imc[i].mci)
skx_unregister_mci(&d->imc[i]);
@@ -794,7 +824,10 @@ void skx_remove(void)
if (d->imc[i].mbase)
iounmap(d->imc[i].mbase);
- for (j = 0; j < NUM_CHANNELS; j++) {
+ if (d->imc[i].dev)
+ put_device(d->imc[i].dev);
+
+ for (j = 0; j < d->imc[i].num_channels; j++) {
if (d->imc[i].chan[j].cdev)
pci_dev_put(d->imc[i].chan[j].cdev);
}
@@ -817,7 +850,7 @@ EXPORT_SYMBOL_GPL(skx_remove);
/*
* Debug feature.
* Exercise the address decode logic by writing an address to
- * /sys/kernel/debug/edac/{skx,i10nm}_test/addr.
+ * /sys/kernel/debug/edac/{skx,i10nm,imh}_test/addr.
*/
static struct dentry *skx_test;
diff --git a/drivers/edac/skx_common.h b/drivers/edac/skx_common.h
index ec4966f7ea40..f88038e5b18c 100644
--- a/drivers/edac/skx_common.h
+++ b/drivers/edac/skx_common.h
@@ -29,23 +29,18 @@
#define GET_BITFIELD(v, lo, hi) \
(((v) & GENMASK_ULL((hi), (lo))) >> (lo))
-#define SKX_NUM_IMC 2 /* Memory controllers per socket */
#define SKX_NUM_CHANNELS 3 /* Channels per memory controller */
#define SKX_NUM_DIMMS 2 /* Max DIMMS per channel */
-#define I10NM_NUM_DDR_IMC 12
#define I10NM_NUM_DDR_CHANNELS 2
#define I10NM_NUM_DDR_DIMMS 2
-#define I10NM_NUM_HBM_IMC 16
#define I10NM_NUM_HBM_CHANNELS 2
#define I10NM_NUM_HBM_DIMMS 1
-#define I10NM_NUM_IMC (I10NM_NUM_DDR_IMC + I10NM_NUM_HBM_IMC)
#define I10NM_NUM_CHANNELS MAX(I10NM_NUM_DDR_CHANNELS, I10NM_NUM_HBM_CHANNELS)
#define I10NM_NUM_DIMMS MAX(I10NM_NUM_DDR_DIMMS, I10NM_NUM_HBM_DIMMS)
-#define NUM_IMC MAX(SKX_NUM_IMC, I10NM_NUM_IMC)
#define NUM_CHANNELS MAX(SKX_NUM_CHANNELS, I10NM_NUM_CHANNELS)
#define NUM_DIMMS MAX(SKX_NUM_DIMMS, I10NM_NUM_DIMMS)
@@ -126,35 +121,49 @@ struct reg_rrl {
* memory controllers on the die.
*/
struct skx_dev {
- struct list_head list;
+ /* {skx,i10nm}_edac */
u8 bus[4];
int seg;
struct pci_dev *sad_all;
struct pci_dev *util_all;
- struct pci_dev *uracu; /* for i10nm CPU */
- struct pci_dev *pcu_cr3; /* for HBM memory detection */
+ struct pci_dev *uracu;
+ struct pci_dev *pcu_cr3;
u32 mcroute;
- /*
- * Some server BIOS may hide certain memory controllers, and the
- * EDAC driver skips those hidden memory controllers. However, the
- * ADXL still decodes memory error address using physical memory
- * controller indices. The mapping table is used to convert the
- * physical indices (reported by ADXL) to the logical indices
- * (used the EDAC driver) of present memory controllers during the
- * error handling process.
- */
- u8 mc_mapping[NUM_IMC];
+
+ /* imh_edac */
+ /* System-view MMIO base physical addresses. */
+ u64 mmio_base_h_north;
+ u64 mmio_base_h_south;
+ int pkg;
+
+ int num_imc;
+ struct list_head list;
struct skx_imc {
+ /* i10nm_edac */
+ struct pci_dev *mdev;
+
+ /* imh_edac */
+ struct device *dev;
+
struct mem_ctl_info *mci;
- struct pci_dev *mdev; /* for i10nm CPU */
- void __iomem *mbase; /* for i10nm CPU */
- int chan_mmio_sz; /* for i10nm CPU */
+ void __iomem *mbase;
+ int chan_mmio_sz;
int num_channels; /* channels per memory controller */
int num_dimms; /* dimms per channel */
bool hbm_mc;
u8 mc; /* system wide mc# */
u8 lmc; /* socket relative mc# */
u8 src_id;
+ /*
+ * Some server BIOS may hide certain memory controllers, and the
+ * EDAC driver skips those hidden memory controllers. However, the
+ * ADXL still decodes memory error address using physical memory
+ * controller indices. The mapping table is used to convert the
+ * physical indices (reported by ADXL) to the logical indices
+ * (used the EDAC driver) of present memory controllers during the
+ * error handling process.
+ */
+ u8 mc_mapping;
struct skx_channel {
struct pci_dev *cdev;
struct pci_dev *edev;
@@ -171,7 +180,7 @@ struct skx_dev {
u8 colbits;
} dimms[NUM_DIMMS];
} chan[NUM_CHANNELS];
- } imc[NUM_IMC];
+ } imc[];
};
struct skx_pvt {
@@ -182,7 +191,8 @@ enum type {
SKX,
I10NM,
SPR,
- GNR
+ GNR,
+ DMR,
};
enum {
@@ -241,10 +251,6 @@ struct pci_bdf {
struct res_config {
enum type type;
- /* Configuration agent device ID */
- unsigned int decs_did;
- /* Default bus number configuration register offset */
- int busno_cfg_offset;
/* DDR memory controllers per socket */
int ddr_imc_num;
/* DDR channels per DDR memory controller */
@@ -262,23 +268,57 @@ struct res_config {
/* Per HBM channel memory-mapped I/O size */
int hbm_chan_mmio_sz;
bool support_ddr5;
- /* SAD device BDF */
- struct pci_bdf sad_all_bdf;
- /* PCU device BDF */
- struct pci_bdf pcu_cr3_bdf;
- /* UTIL device BDF */
- struct pci_bdf util_all_bdf;
- /* URACU device BDF */
- struct pci_bdf uracu_bdf;
- /* DDR mdev device BDF */
- struct pci_bdf ddr_mdev_bdf;
- /* HBM mdev device BDF */
- struct pci_bdf hbm_mdev_bdf;
- int sad_all_offset;
/* RRL register sets per DDR channel */
struct reg_rrl *reg_rrl_ddr;
/* RRL register sets per HBM channel */
struct reg_rrl *reg_rrl_hbm[2];
+ union {
+ /* {skx,i10nm}_edac */
+ struct {
+ /* Configuration agent device ID */
+ unsigned int decs_did;
+ /* Default bus number configuration register offset */
+ int busno_cfg_offset;
+ struct pci_bdf sad_all_bdf;
+ struct pci_bdf pcu_cr3_bdf;
+ struct pci_bdf util_all_bdf;
+ struct pci_bdf uracu_bdf;
+ struct pci_bdf ddr_mdev_bdf;
+ struct pci_bdf hbm_mdev_bdf;
+ int sad_all_offset;
+ };
+ /* imh_edac */
+ struct {
+ /* MMIO base physical address in local package view */
+ u64 mmio_base_l_north;
+ u64 mmio_base_l_south;
+ u64 ddr_imc_base;
+ u64 ddr_reg_mcmtr_offset;
+ u8 ddr_reg_mcmtr_width;
+ u64 ddr_reg_dimmmtr_offset;
+ u8 ddr_reg_dimmmtr_width;
+ u64 ubox_base;
+ u32 ubox_size;
+ u32 ubox_reg_mmio_base_offset;
+ u8 ubox_reg_mmio_base_width;
+ u32 ubox_reg_socket_id_offset;
+ u8 ubox_reg_socket_id_width;
+ u64 pcu_base;
+ u32 pcu_size;
+ u32 pcu_reg_capid3_offset;
+ u8 pcu_reg_capid3_width;
+ u64 sca_base;
+ u32 sca_size;
+ u32 sca_reg_tolm_offset;
+ u8 sca_reg_tolm_width;
+ u32 sca_reg_tohm_offset;
+ u8 sca_reg_tohm_width;
+ u64 ha_base;
+ u32 ha_size;
+ u32 ha_reg_mode_offset;
+ u8 ha_reg_mode_width;
+ };
+ };
};
typedef int (*get_dimm_config_f)(struct mem_ctl_info *mci,
@@ -291,13 +331,17 @@ void skx_adxl_put(void);
void skx_set_decode(skx_decode_f decode, skx_show_retry_log_f show_retry_log);
void skx_set_mem_cfg(bool mem_cfg_2lm);
void skx_set_res_cfg(struct res_config *cfg);
+void skx_init_mc_mapping(struct skx_dev *d);
void skx_set_mc_mapping(struct skx_dev *d, u8 pmc, u8 lmc);
int skx_get_src_id(struct skx_dev *d, int off, u8 *id);
int skx_get_all_bus_mappings(struct res_config *cfg, struct list_head **list);
+struct list_head *skx_get_edac_list(void);
+
int skx_get_hi_lo(unsigned int did, int off[], u64 *tolm, u64 *tohm);
+void skx_set_hi_lo(u64 tolm, u64 tohm);
int skx_get_dimm_info(u32 mtr, u32 mcmtr, u32 amap, struct dimm_info *dimm,
struct skx_imc *imc, int chan, int dimmno,
@@ -306,7 +350,7 @@ int skx_get_dimm_info(u32 mtr, u32 mcmtr, u32 amap, struct dimm_info *dimm,
int skx_get_nvdimm_info(struct dimm_info *dimm, struct skx_imc *imc,
int chan, int dimmno, const char *mod_str);
-int skx_register_mci(struct skx_imc *imc, struct pci_dev *pdev,
+int skx_register_mci(struct skx_imc *imc, struct device *dev, const char *dev_name,
const char *ctl_name, const char *mod_str,
get_dimm_config_f get_dimm_config,
struct res_config *cfg);
diff --git a/drivers/edac/synopsys_edac.c b/drivers/edac/synopsys_edac.c
index 5ed32a3299c4..51143b3257de 100644
--- a/drivers/edac/synopsys_edac.c
+++ b/drivers/edac/synopsys_edac.c
@@ -332,20 +332,26 @@ struct synps_edac_priv {
#endif
};
+enum synps_platform_type {
+ ZYNQ,
+ ZYNQMP,
+ SYNPS,
+};
+
/**
* struct synps_platform_data - synps platform data structure.
+ * @platform: Identifies the target hardware platform
* @get_error_info: Get EDAC error info.
* @get_mtype: Get mtype.
* @get_dtype: Get dtype.
- * @get_ecc_state: Get ECC state.
* @get_mem_info: Get EDAC memory info
* @quirks: To differentiate IPs.
*/
struct synps_platform_data {
+ enum synps_platform_type platform;
int (*get_error_info)(struct synps_edac_priv *priv);
enum mem_type (*get_mtype)(const void __iomem *base);
enum dev_type (*get_dtype)(const void __iomem *base);
- bool (*get_ecc_state)(void __iomem *base);
#ifdef CONFIG_EDAC_DEBUG
u64 (*get_mem_info)(struct synps_edac_priv *priv);
#endif
@@ -720,51 +726,38 @@ static enum dev_type zynqmp_get_dtype(const void __iomem *base)
return dt;
}
-/**
- * zynq_get_ecc_state - Return the controller ECC enable/disable status.
- * @base: DDR memory controller base address.
- *
- * Get the ECC enable/disable status of the controller.
- *
- * Return: true if enabled, otherwise false.
- */
-static bool zynq_get_ecc_state(void __iomem *base)
+static bool get_ecc_state(struct synps_edac_priv *priv)
{
+ u32 ecctype, clearval;
enum dev_type dt;
- u32 ecctype;
-
- dt = zynq_get_dtype(base);
- if (dt == DEV_UNKNOWN)
- return false;
- ecctype = readl(base + SCRUB_OFST) & SCRUB_MODE_MASK;
- if ((ecctype == SCRUB_MODE_SECDED) && (dt == DEV_X2))
- return true;
-
- return false;
-}
-
-/**
- * zynqmp_get_ecc_state - Return the controller ECC enable/disable status.
- * @base: DDR memory controller base address.
- *
- * Get the ECC enable/disable status for the controller.
- *
- * Return: a ECC status boolean i.e true/false - enabled/disabled.
- */
-static bool zynqmp_get_ecc_state(void __iomem *base)
-{
- enum dev_type dt;
- u32 ecctype;
-
- dt = zynqmp_get_dtype(base);
- if (dt == DEV_UNKNOWN)
- return false;
-
- ecctype = readl(base + ECC_CFG0_OFST) & SCRUB_MODE_MASK;
- if ((ecctype == SCRUB_MODE_SECDED) &&
- ((dt == DEV_X2) || (dt == DEV_X4) || (dt == DEV_X8)))
- return true;
+ if (priv->p_data->platform == ZYNQ) {
+ dt = zynq_get_dtype(priv->baseaddr);
+ if (dt == DEV_UNKNOWN)
+ return false;
+
+ ecctype = readl(priv->baseaddr + SCRUB_OFST) & SCRUB_MODE_MASK;
+ if (ecctype == SCRUB_MODE_SECDED && dt == DEV_X2) {
+ clearval = ECC_CTRL_CLR_CE_ERR | ECC_CTRL_CLR_UE_ERR;
+ writel(clearval, priv->baseaddr + ECC_CTRL_OFST);
+ writel(0x0, priv->baseaddr + ECC_CTRL_OFST);
+ return true;
+ }
+ } else {
+ dt = zynqmp_get_dtype(priv->baseaddr);
+ if (dt == DEV_UNKNOWN)
+ return false;
+
+ ecctype = readl(priv->baseaddr + ECC_CFG0_OFST) & SCRUB_MODE_MASK;
+ if (ecctype == SCRUB_MODE_SECDED &&
+ (dt == DEV_X2 || dt == DEV_X4 || dt == DEV_X8)) {
+ clearval = readl(priv->baseaddr + ECC_CLR_OFST) |
+ ECC_CTRL_CLR_CE_ERR | ECC_CTRL_CLR_CE_ERRCNT |
+ ECC_CTRL_CLR_UE_ERR | ECC_CTRL_CLR_UE_ERRCNT;
+ writel(clearval, priv->baseaddr + ECC_CLR_OFST);
+ return true;
+ }
+ }
return false;
}
@@ -934,18 +927,18 @@ static int setup_irq(struct mem_ctl_info *mci,
}
static const struct synps_platform_data zynq_edac_def = {
+ .platform = ZYNQ,
.get_error_info = zynq_get_error_info,
.get_mtype = zynq_get_mtype,
.get_dtype = zynq_get_dtype,
- .get_ecc_state = zynq_get_ecc_state,
.quirks = 0,
};
static const struct synps_platform_data zynqmp_edac_def = {
+ .platform = ZYNQMP,
.get_error_info = zynqmp_get_error_info,
.get_mtype = zynqmp_get_mtype,
.get_dtype = zynqmp_get_dtype,
- .get_ecc_state = zynqmp_get_ecc_state,
#ifdef CONFIG_EDAC_DEBUG
.get_mem_info = zynqmp_get_mem_info,
#endif
@@ -957,10 +950,10 @@ static const struct synps_platform_data zynqmp_edac_def = {
};
static const struct synps_platform_data synopsys_edac_def = {
+ .platform = SYNPS,
.get_error_info = zynqmp_get_error_info,
.get_mtype = zynqmp_get_mtype,
.get_dtype = zynqmp_get_dtype,
- .get_ecc_state = zynqmp_get_ecc_state,
.quirks = (DDR_ECC_INTR_SUPPORT | DDR_ECC_INTR_SELF_CLEAR
#ifdef CONFIG_EDAC_DEBUG
| DDR_ECC_DATA_POISON_SUPPORT
@@ -1390,10 +1383,6 @@ static int mc_probe(struct platform_device *pdev)
if (!p_data)
return -ENODEV;
- if (!p_data->get_ecc_state(baseaddr)) {
- edac_printk(KERN_INFO, EDAC_MC, "ECC not enabled\n");
- return -ENXIO;
- }
layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
layers[0].size = SYNPS_EDAC_NR_CSROWS;
@@ -1413,6 +1402,12 @@ static int mc_probe(struct platform_device *pdev)
priv = mci->pvt_info;
priv->baseaddr = baseaddr;
priv->p_data = p_data;
+ if (!get_ecc_state(priv)) {
+ edac_printk(KERN_INFO, EDAC_MC, "ECC not enabled\n");
+ rc = -ENODEV;
+ goto free_edac_mc;
+ }
+
spin_lock_init(&priv->reglock);
mc_init(mci, pdev);
diff --git a/drivers/edac/versalnet_edac.c b/drivers/edac/versalnet_edac.c
new file mode 100644
index 000000000000..1a1092793092
--- /dev/null
+++ b/drivers/edac/versalnet_edac.c
@@ -0,0 +1,962 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AMD Versal NET memory controller driver
+ * Copyright (C) 2025 Advanced Micro Devices, Inc.
+ */
+
+#include <linux/cdx/edac_cdx_pcol.h>
+#include <linux/edac.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/ras.h>
+#include <linux/remoteproc.h>
+#include <linux/rpmsg.h>
+#include <linux/sizes.h>
+#include <ras/ras_event.h>
+
+#include "edac_module.h"
+
+/* Granularity of reported error in bytes */
+#define MC5_ERR_GRAIN 1
+#define MC_GET_DDR_CONFIG_IN_LEN 4
+
+#define MC5_IRQ_CE_MASK GENMASK(18, 15)
+#define MC5_IRQ_UE_MASK GENMASK(14, 11)
+
+#define MC5_RANK_1_MASK GENMASK(11, 6)
+#define MASK_24 GENMASK(29, 24)
+#define MASK_0 GENMASK(5, 0)
+
+#define MC5_LRANK_1_MASK GENMASK(11, 6)
+#define MC5_LRANK_2_MASK GENMASK(17, 12)
+#define MC5_BANK1_MASK GENMASK(11, 6)
+#define MC5_GRP_0_MASK GENMASK(17, 12)
+#define MC5_GRP_1_MASK GENMASK(23, 18)
+
+#define MC5_REGHI_ROW 7
+#define MC5_EACHBIT 1
+#define MC5_ERR_TYPE_CE 0
+#define MC5_ERR_TYPE_UE 1
+#define MC5_HIGH_MEM_EN BIT(20)
+#define MC5_MEM_MASK GENMASK(19, 0)
+#define MC5_X16_BASE 256
+#define MC5_X16_ECC 32
+#define MC5_X16_SIZE (MC5_X16_BASE + MC5_X16_ECC)
+#define MC5_X32_SIZE 576
+#define MC5_HIMEM_BASE (256 * SZ_1M)
+#define MC5_ILC_HIMEM_EN BIT(28)
+#define MC5_ILC_MEM GENMASK(27, 0)
+#define MC5_INTERLEAVE_SEL GENMASK(3, 0)
+#define MC5_BUS_WIDTH_MASK GENMASK(19, 18)
+#define MC5_NUM_CHANS_MASK BIT(17)
+#define MC5_RANK_MASK GENMASK(15, 14)
+
+#define ERROR_LEVEL 2
+#define ERROR_ID 3
+#define TOTAL_ERR_LENGTH 5
+#define MSG_ERR_OFFSET 8
+#define MSG_ERR_LENGTH 9
+#define ERROR_DATA 10
+#define MCDI_RESPONSE 0xFF
+
+#define REG_MAX 152
+#define ADEC_MAX 152
+#define NUM_CONTROLLERS 8
+#define REGS_PER_CONTROLLER 19
+#define ADEC_NUM 19
+#define BUFFER_SZ 80
+
+#define XDDR5_BUS_WIDTH_64 0
+#define XDDR5_BUS_WIDTH_32 1
+#define XDDR5_BUS_WIDTH_16 2
+
+/**
+ * struct ecc_error_info - ECC error log information.
+ * @burstpos: Burst position.
+ * @lrank: Logical Rank number.
+ * @rank: Rank number.
+ * @group: Group number.
+ * @bank: Bank number.
+ * @col: Column number.
+ * @row: Row number.
+ * @rowhi: Row number higher bits.
+ * @i: Combined ECC error vector containing encoded values of burst position,
+ * rank, bank, column, and row information.
+ */
+union ecc_error_info {
+ struct {
+ u32 burstpos:3;
+ u32 lrank:4;
+ u32 rank:2;
+ u32 group:3;
+ u32 bank:2;
+ u32 col:11;
+ u32 row:7;
+ u32 rowhi;
+ };
+ u64 i;
+} __packed;
+
+/* Row and column bit positions in the address decoder (ADEC) registers. */
+union row_col_mapping {
+ struct {
+ u32 row0:6;
+ u32 row1:6;
+ u32 row2:6;
+ u32 row3:6;
+ u32 row4:6;
+ u32 reserved:2;
+ };
+ struct {
+ u32 col1:6;
+ u32 col2:6;
+ u32 col3:6;
+ u32 col4:6;
+ u32 col5:6;
+ u32 reservedcol:2;
+ };
+ u32 i;
+} __packed;
+
+/**
+ * struct ecc_status - ECC status information to report.
+ * @ceinfo: Correctable errors.
+ * @ueinfo: Uncorrected errors.
+ * @channel: Channel number.
+ * @error_type: Error type.
+ */
+struct ecc_status {
+ union ecc_error_info ceinfo[2];
+ union ecc_error_info ueinfo[2];
+ u8 channel;
+ u8 error_type;
+};
+
+/**
+ * struct mc_priv - DDR memory controller private instance data.
+ * @message: Buffer for framing the event specific info.
+ * @stat: ECC status information.
+ * @error_id: The error id.
+ * @error_level: The error level.
+ * @dwidth: Width of data bus excluding ECC bits.
+ * @part_len: The support of the message received.
+ * @regs: The registers sent on the rpmsg.
+ * @adec: Address decode registers.
+ * @mci: Memory controller interface.
+ * @ept: rpmsg endpoint.
+ * @mcdi: The mcdi handle.
+ */
+struct mc_priv {
+ char message[256];
+ struct ecc_status stat;
+ u32 error_id;
+ u32 error_level;
+ u32 dwidth;
+ u32 part_len;
+ u32 regs[REG_MAX];
+ u32 adec[ADEC_MAX];
+ struct mem_ctl_info *mci[NUM_CONTROLLERS];
+ struct rpmsg_endpoint *ept;
+ struct cdx_mcdi *mcdi;
+};
+
+/*
+ * Address decoder (ADEC) registers to match the order in which the register
+ * information is received from the firmware.
+ */
+enum adec_info {
+ CONF = 0,
+ ADEC0,
+ ADEC1,
+ ADEC2,
+ ADEC3,
+ ADEC4,
+ ADEC5,
+ ADEC6,
+ ADEC7,
+ ADEC8,
+ ADEC9,
+ ADEC10,
+ ADEC11,
+ ADEC12,
+ ADEC13,
+ ADEC14,
+ ADEC15,
+ ADEC16,
+ ADECILC,
+};
+
+enum reg_info {
+ ISR = 0,
+ IMR,
+ ECCR0_ERR_STATUS,
+ ECCR0_ADDR_LO,
+ ECCR0_ADDR_HI,
+ ECCR0_DATA_LO,
+ ECCR0_DATA_HI,
+ ECCR0_PAR,
+ ECCR1_ERR_STATUS,
+ ECCR1_ADDR_LO,
+ ECCR1_ADDR_HI,
+ ECCR1_DATA_LO,
+ ECCR1_DATA_HI,
+ ECCR1_PAR,
+ XMPU_ERR,
+ XMPU_ERR_ADDR_L0,
+ XMPU_ERR_ADDR_HI,
+ XMPU_ERR_AXI_ID,
+ ADEC_CHK_ERR_LOG,
+};
+
+static bool get_ddr_info(u32 *error_data, struct mc_priv *priv)
+{
+ u32 reglo, reghi, parity, eccr0_val, eccr1_val, isr;
+ struct ecc_status *p;
+
+ isr = error_data[ISR];
+
+ if (!(isr & (MC5_IRQ_UE_MASK | MC5_IRQ_CE_MASK)))
+ return false;
+
+ eccr0_val = error_data[ECCR0_ERR_STATUS];
+ eccr1_val = error_data[ECCR1_ERR_STATUS];
+
+ if (!eccr0_val && !eccr1_val)
+ return false;
+
+ p = &priv->stat;
+
+ if (!eccr0_val)
+ p->channel = 1;
+ else
+ p->channel = 0;
+
+ reglo = error_data[ECCR0_ADDR_LO];
+ reghi = error_data[ECCR0_ADDR_HI];
+ if (isr & MC5_IRQ_CE_MASK)
+ p->ceinfo[0].i = reglo | (u64)reghi << 32;
+ else if (isr & MC5_IRQ_UE_MASK)
+ p->ueinfo[0].i = reglo | (u64)reghi << 32;
+
+ parity = error_data[ECCR0_PAR];
+ edac_dbg(2, "ERR DATA: 0x%08X%08X PARITY: 0x%08X\n",
+ reghi, reglo, parity);
+
+ reglo = error_data[ECCR1_ADDR_LO];
+ reghi = error_data[ECCR1_ADDR_HI];
+ if (isr & MC5_IRQ_CE_MASK)
+ p->ceinfo[1].i = reglo | (u64)reghi << 32;
+ else if (isr & MC5_IRQ_UE_MASK)
+ p->ueinfo[1].i = reglo | (u64)reghi << 32;
+
+ parity = error_data[ECCR1_PAR];
+ edac_dbg(2, "ERR DATA: 0x%08X%08X PARITY: 0x%08X\n",
+ reghi, reglo, parity);
+
+ return true;
+}
+
+/**
+ * convert_to_physical - Convert @error_data to a physical address.
+ * @priv: DDR memory controller private instance data.
+ * @pinf: ECC error info structure.
+ * @controller: Controller number of the MC5
+ * @error_data: the DDRMC5 ADEC address decoder register data
+ *
+ * Return: physical address of the DDR memory.
+ */
+static unsigned long convert_to_physical(struct mc_priv *priv,
+ union ecc_error_info pinf,
+ int controller, int *error_data)
+{
+ u32 row, blk, rsh_req_addr, interleave, ilc_base_ctrl_add, ilc_himem_en, reg, offset;
+ u64 high_mem_base, high_mem_offset, low_mem_offset, ilcmem_base;
+ unsigned long err_addr = 0, addr;
+ union row_col_mapping cols;
+ union row_col_mapping rows;
+ u32 col_bit_0;
+
+ row = pinf.rowhi << MC5_REGHI_ROW | pinf.row;
+ offset = controller * ADEC_NUM;
+
+ reg = error_data[ADEC6];
+ rows.i = reg;
+ err_addr |= (row & BIT(0)) << rows.row0;
+ row >>= MC5_EACHBIT;
+ err_addr |= (row & BIT(0)) << rows.row1;
+ row >>= MC5_EACHBIT;
+ err_addr |= (row & BIT(0)) << rows.row2;
+ row >>= MC5_EACHBIT;
+ err_addr |= (row & BIT(0)) << rows.row3;
+ row >>= MC5_EACHBIT;
+ err_addr |= (row & BIT(0)) << rows.row4;
+ row >>= MC5_EACHBIT;
+
+ reg = error_data[ADEC7];
+ rows.i = reg;
+ err_addr |= (row & BIT(0)) << rows.row0;
+ row >>= MC5_EACHBIT;
+ err_addr |= (row & BIT(0)) << rows.row1;
+ row >>= MC5_EACHBIT;
+ err_addr |= (row & BIT(0)) << rows.row2;
+ row >>= MC5_EACHBIT;
+ err_addr |= (row & BIT(0)) << rows.row3;
+ row >>= MC5_EACHBIT;
+ err_addr |= (row & BIT(0)) << rows.row4;
+ row >>= MC5_EACHBIT;
+
+ reg = error_data[ADEC8];
+ rows.i = reg;
+ err_addr |= (row & BIT(0)) << rows.row0;
+ row >>= MC5_EACHBIT;
+ err_addr |= (row & BIT(0)) << rows.row1;
+ row >>= MC5_EACHBIT;
+ err_addr |= (row & BIT(0)) << rows.row2;
+ row >>= MC5_EACHBIT;
+ err_addr |= (row & BIT(0)) << rows.row3;
+ row >>= MC5_EACHBIT;
+ err_addr |= (row & BIT(0)) << rows.row4;
+
+ reg = error_data[ADEC9];
+ rows.i = reg;
+
+ err_addr |= (row & BIT(0)) << rows.row0;
+ row >>= MC5_EACHBIT;
+ err_addr |= (row & BIT(0)) << rows.row1;
+ row >>= MC5_EACHBIT;
+ err_addr |= (row & BIT(0)) << rows.row2;
+ row >>= MC5_EACHBIT;
+
+ col_bit_0 = FIELD_GET(MASK_24, error_data[ADEC9]);
+ pinf.col >>= 1;
+ err_addr |= (pinf.col & 1) << col_bit_0;
+
+ cols.i = error_data[ADEC10];
+ err_addr |= (pinf.col & 1) << cols.col1;
+ pinf.col >>= 1;
+ err_addr |= (pinf.col & 1) << cols.col2;
+ pinf.col >>= 1;
+ err_addr |= (pinf.col & 1) << cols.col3;
+ pinf.col >>= 1;
+ err_addr |= (pinf.col & 1) << cols.col4;
+ pinf.col >>= 1;
+ err_addr |= (pinf.col & 1) << cols.col5;
+ pinf.col >>= 1;
+
+ cols.i = error_data[ADEC11];
+ err_addr |= (pinf.col & 1) << cols.col1;
+ pinf.col >>= 1;
+ err_addr |= (pinf.col & 1) << cols.col2;
+ pinf.col >>= 1;
+ err_addr |= (pinf.col & 1) << cols.col3;
+ pinf.col >>= 1;
+ err_addr |= (pinf.col & 1) << cols.col4;
+ pinf.col >>= 1;
+ err_addr |= (pinf.col & 1) << cols.col5;
+ pinf.col >>= 1;
+
+ reg = error_data[ADEC12];
+ err_addr |= (pinf.bank & BIT(0)) << (reg & MASK_0);
+ pinf.bank >>= MC5_EACHBIT;
+ err_addr |= (pinf.bank & BIT(0)) << FIELD_GET(MC5_BANK1_MASK, reg);
+ pinf.bank >>= MC5_EACHBIT;
+
+ err_addr |= (pinf.bank & BIT(0)) << FIELD_GET(MC5_GRP_0_MASK, reg);
+ pinf.group >>= MC5_EACHBIT;
+ err_addr |= (pinf.bank & BIT(0)) << FIELD_GET(MC5_GRP_1_MASK, reg);
+ pinf.group >>= MC5_EACHBIT;
+ err_addr |= (pinf.bank & BIT(0)) << FIELD_GET(MASK_24, reg);
+ pinf.group >>= MC5_EACHBIT;
+
+ reg = error_data[ADEC4];
+ err_addr |= (pinf.rank & BIT(0)) << (reg & MASK_0);
+ pinf.rank >>= MC5_EACHBIT;
+ err_addr |= (pinf.rank & BIT(0)) << FIELD_GET(MC5_RANK_1_MASK, reg);
+ pinf.rank >>= MC5_EACHBIT;
+
+ reg = error_data[ADEC5];
+ err_addr |= (pinf.lrank & BIT(0)) << (reg & MASK_0);
+ pinf.lrank >>= MC5_EACHBIT;
+ err_addr |= (pinf.lrank & BIT(0)) << FIELD_GET(MC5_LRANK_1_MASK, reg);
+ pinf.lrank >>= MC5_EACHBIT;
+ err_addr |= (pinf.lrank & BIT(0)) << FIELD_GET(MC5_LRANK_2_MASK, reg);
+ pinf.lrank >>= MC5_EACHBIT;
+ err_addr |= (pinf.lrank & BIT(0)) << FIELD_GET(MASK_24, reg);
+ pinf.lrank >>= MC5_EACHBIT;
+
+ high_mem_base = (priv->adec[ADEC2 + offset] & MC5_MEM_MASK) * MC5_HIMEM_BASE;
+ interleave = priv->adec[ADEC13 + offset] & MC5_INTERLEAVE_SEL;
+
+ high_mem_offset = priv->adec[ADEC3 + offset] & MC5_MEM_MASK;
+ low_mem_offset = priv->adec[ADEC1 + offset] & MC5_MEM_MASK;
+ reg = priv->adec[ADEC14 + offset];
+ ilc_himem_en = !!(reg & MC5_ILC_HIMEM_EN);
+ ilcmem_base = (reg & MC5_ILC_MEM) * SZ_1M;
+ if (ilc_himem_en)
+ ilc_base_ctrl_add = ilcmem_base - high_mem_offset;
+ else
+ ilc_base_ctrl_add = ilcmem_base - low_mem_offset;
+
+ if (priv->dwidth == DEV_X16) {
+ blk = err_addr / MC5_X16_SIZE;
+ rsh_req_addr = (blk << 8) + ilc_base_ctrl_add;
+ err_addr = rsh_req_addr * interleave * 2;
+ } else {
+ blk = err_addr / MC5_X32_SIZE;
+ rsh_req_addr = (blk << 9) + ilc_base_ctrl_add;
+ err_addr = rsh_req_addr * interleave * 2;
+ }
+
+ if ((priv->adec[ADEC2 + offset] & MC5_HIGH_MEM_EN) && err_addr >= high_mem_base)
+ addr = err_addr - high_mem_offset;
+ else
+ addr = err_addr - low_mem_offset;
+
+ return addr;
+}
+
+/**
+ * handle_error - Handle errors.
+ * @priv: DDR memory controller private instance data.
+ * @stat: ECC status structure.
+ * @ctl_num: Controller number of the MC5
+ * @error_data: the MC5 ADEC address decoder register data
+ *
+ * Handles ECC correctable and uncorrectable errors.
+ */
+static void handle_error(struct mc_priv *priv, struct ecc_status *stat,
+ int ctl_num, int *error_data)
+{
+ union ecc_error_info pinf;
+ struct mem_ctl_info *mci;
+ unsigned long pa;
+ phys_addr_t pfn;
+ int err;
+
+ if (WARN_ON_ONCE(ctl_num >= NUM_CONTROLLERS))
+ return;
+
+ mci = priv->mci[ctl_num];
+
+ if (stat->error_type == MC5_ERR_TYPE_CE) {
+ pinf = stat->ceinfo[stat->channel];
+ snprintf(priv->message, sizeof(priv->message),
+ "Error type:%s Controller %d Addr at %lx\n",
+ "CE", ctl_num, convert_to_physical(priv, pinf, ctl_num, error_data));
+
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ 1, 0, 0, 0, 0, 0, -1,
+ priv->message, "");
+ }
+
+ if (stat->error_type == MC5_ERR_TYPE_UE) {
+ pinf = stat->ueinfo[stat->channel];
+ snprintf(priv->message, sizeof(priv->message),
+ "Error type:%s controller %d Addr at %lx\n",
+ "UE", ctl_num, convert_to_physical(priv, pinf, ctl_num, error_data));
+
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ 1, 0, 0, 0, 0, 0, -1,
+ priv->message, "");
+ pa = convert_to_physical(priv, pinf, ctl_num, error_data);
+ pfn = PHYS_PFN(pa);
+
+ if (IS_ENABLED(CONFIG_MEMORY_FAILURE)) {
+ err = memory_failure(pfn, MF_ACTION_REQUIRED);
+ if (err)
+ edac_dbg(2, "memory_failure() error: %d", err);
+ else
+ edac_dbg(2, "Poison page at PA 0x%lx\n", pa);
+ }
+ }
+}
+
+static void mc_init(struct mem_ctl_info *mci, struct device *dev)
+{
+ struct mc_priv *priv = mci->pvt_info;
+ struct csrow_info *csi;
+ struct dimm_info *dimm;
+ u32 row;
+ int ch;
+
+ /* Initialize controller capabilities and configuration */
+ mci->mtype_cap = MEM_FLAG_DDR5;
+ mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
+ mci->scrub_cap = SCRUB_HW_SRC;
+ mci->scrub_mode = SCRUB_NONE;
+
+ mci->edac_cap = EDAC_FLAG_SECDED;
+ mci->ctl_name = "VersalNET DDR5";
+ mci->dev_name = dev_name(dev);
+ mci->mod_name = "versalnet_edac";
+
+ edac_op_state = EDAC_OPSTATE_INT;
+
+ for (row = 0; row < mci->nr_csrows; row++) {
+ csi = mci->csrows[row];
+ for (ch = 0; ch < csi->nr_channels; ch++) {
+ dimm = csi->channels[ch]->dimm;
+ dimm->edac_mode = EDAC_SECDED;
+ dimm->mtype = MEM_DDR5;
+ dimm->grain = MC5_ERR_GRAIN;
+ dimm->dtype = priv->dwidth;
+ }
+ }
+}
+
+#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
+
+static unsigned int mcdi_rpc_timeout(struct cdx_mcdi *cdx, unsigned int cmd)
+{
+ return MCDI_RPC_TIMEOUT;
+}
+
+static void mcdi_request(struct cdx_mcdi *cdx,
+ const struct cdx_dword *hdr, size_t hdr_len,
+ const struct cdx_dword *sdu, size_t sdu_len)
+{
+ void *send_buf;
+ int ret;
+
+ send_buf = kzalloc(hdr_len + sdu_len, GFP_KERNEL);
+ if (!send_buf)
+ return;
+
+ memcpy(send_buf, hdr, hdr_len);
+ memcpy(send_buf + hdr_len, sdu, sdu_len);
+
+ ret = rpmsg_send(cdx->ept, send_buf, hdr_len + sdu_len);
+ if (ret)
+ dev_err(&cdx->rpdev->dev, "Failed to send rpmsg data: %d\n", ret);
+
+ kfree(send_buf);
+}
+
+static const struct cdx_mcdi_ops mcdi_ops = {
+ .mcdi_rpc_timeout = mcdi_rpc_timeout,
+ .mcdi_request = mcdi_request,
+};
+
+static void get_ddr_config(u32 index, u32 *buffer, struct cdx_mcdi *amd_mcdi)
+{
+ size_t outlen;
+ int ret;
+
+ MCDI_DECLARE_BUF(inbuf, MC_GET_DDR_CONFIG_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, BUFFER_SZ);
+
+ MCDI_SET_DWORD(inbuf, EDAC_GET_DDR_CONFIG_IN_CONTROLLER_INDEX, index);
+
+ ret = cdx_mcdi_rpc(amd_mcdi, MC_CMD_EDAC_GET_DDR_CONFIG, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (!ret)
+ memcpy(buffer, MCDI_PTR(outbuf, GET_DDR_CONFIG),
+ (ADEC_NUM * 4));
+}
+
+static int setup_mcdi(struct mc_priv *mc_priv)
+{
+ struct cdx_mcdi *amd_mcdi;
+ int ret, i;
+
+ amd_mcdi = kzalloc(sizeof(*amd_mcdi), GFP_KERNEL);
+ if (!amd_mcdi)
+ return -ENOMEM;
+
+ amd_mcdi->mcdi_ops = &mcdi_ops;
+ ret = cdx_mcdi_init(amd_mcdi);
+ if (ret) {
+ kfree(amd_mcdi);
+ return ret;
+ }
+
+ amd_mcdi->ept = mc_priv->ept;
+ mc_priv->mcdi = amd_mcdi;
+
+ for (i = 0; i < NUM_CONTROLLERS; i++)
+ get_ddr_config(i, &mc_priv->adec[ADEC_NUM * i], amd_mcdi);
+
+ return 0;
+}
+
+static const guid_t amd_versalnet_guid = GUID_INIT(0x82678888, 0xa556, 0x44f2,
+ 0xb8, 0xb4, 0x45, 0x56, 0x2e,
+ 0x8c, 0x5b, 0xec);
+
+static int rpmsg_cb(struct rpmsg_device *rpdev, void *data,
+ int len, void *priv, u32 src)
+{
+ struct mc_priv *mc_priv = dev_get_drvdata(&rpdev->dev);
+ const guid_t *sec_type = &guid_null;
+ u32 length, offset, error_id;
+ u32 *result = (u32 *)data;
+ struct ecc_status *p;
+ int i, j, k, sec_sev;
+ const char *err_str;
+ u32 *adec_data;
+
+ if (*(u8 *)data == MCDI_RESPONSE) {
+ cdx_mcdi_process_cmd(mc_priv->mcdi, (struct cdx_dword *)data, len);
+ return 0;
+ }
+
+ sec_sev = result[ERROR_LEVEL];
+ error_id = result[ERROR_ID];
+ length = result[MSG_ERR_LENGTH];
+ offset = result[MSG_ERR_OFFSET];
+
+ /*
+ * The data can come in two stretches. Construct the regs from two
+ * messages. The offset indicates the offset from which the data is to
+ * be taken.
+ */
+ for (i = 0 ; i < length; i++) {
+ k = offset + i;
+ j = ERROR_DATA + i;
+ mc_priv->regs[k] = result[j];
+ }
+
+ if (result[TOTAL_ERR_LENGTH] > length) {
+ if (!mc_priv->part_len)
+ mc_priv->part_len = length;
+ else
+ mc_priv->part_len += length;
+
+ if (mc_priv->part_len < result[TOTAL_ERR_LENGTH])
+ return 0;
+ mc_priv->part_len = 0;
+ }
+
+ mc_priv->error_id = error_id;
+ mc_priv->error_level = result[ERROR_LEVEL];
+
+ switch (error_id) {
+ case 5: err_str = "General Software Non-Correctable error"; break;
+ case 6: err_str = "CFU error"; break;
+ case 7: err_str = "CFRAME error"; break;
+ case 10: err_str = "DDRMC Microblaze Correctable ECC error"; break;
+ case 11: err_str = "DDRMC Microblaze Non-Correctable ECC error"; break;
+ case 15: err_str = "MMCM error"; break;
+ case 16: err_str = "HNICX Correctable error"; break;
+ case 17: err_str = "HNICX Non-Correctable error"; break;
+
+ case 18:
+ p = &mc_priv->stat;
+ memset(p, 0, sizeof(struct ecc_status));
+ p->error_type = MC5_ERR_TYPE_CE;
+ for (i = 0 ; i < NUM_CONTROLLERS; i++) {
+ if (get_ddr_info(&mc_priv->regs[i * REGS_PER_CONTROLLER], mc_priv)) {
+ adec_data = mc_priv->adec + ADEC_NUM * i;
+ handle_error(mc_priv, &mc_priv->stat, i, adec_data);
+ }
+ }
+ return 0;
+ case 19:
+ p = &mc_priv->stat;
+ memset(p, 0, sizeof(struct ecc_status));
+ p->error_type = MC5_ERR_TYPE_UE;
+ for (i = 0 ; i < NUM_CONTROLLERS; i++) {
+ if (get_ddr_info(&mc_priv->regs[i * REGS_PER_CONTROLLER], mc_priv)) {
+ adec_data = mc_priv->adec + ADEC_NUM * i;
+ handle_error(mc_priv, &mc_priv->stat, i, adec_data);
+ }
+ }
+ return 0;
+
+ case 21: err_str = "GT Non-Correctable error"; break;
+ case 22: err_str = "PL Sysmon Correctable error"; break;
+ case 23: err_str = "PL Sysmon Non-Correctable error"; break;
+ case 111: err_str = "LPX unexpected dfx activation error"; break;
+ case 114: err_str = "INT_LPD Non-Correctable error"; break;
+ case 116: err_str = "INT_OCM Non-Correctable error"; break;
+ case 117: err_str = "INT_FPD Correctable error"; break;
+ case 118: err_str = "INT_FPD Non-Correctable error"; break;
+ case 120: err_str = "INT_IOU Non-Correctable error"; break;
+ case 123: err_str = "err_int_irq from APU GIC Distributor"; break;
+ case 124: err_str = "fault_int_irq from APU GIC Distribute"; break;
+ case 132 ... 139: err_str = "FPX SPLITTER error"; break;
+ case 140: err_str = "APU Cluster 0 error"; break;
+ case 141: err_str = "APU Cluster 1 error"; break;
+ case 142: err_str = "APU Cluster 2 error"; break;
+ case 143: err_str = "APU Cluster 3 error"; break;
+ case 145: err_str = "WWDT1 LPX error"; break;
+ case 147: err_str = "IPI error"; break;
+ case 152 ... 153: err_str = "AFIFS error"; break;
+ case 154 ... 155: err_str = "LPX glitch error"; break;
+ case 185 ... 186: err_str = "FPX AFIFS error"; break;
+ case 195 ... 199: err_str = "AFIFM error"; break;
+ case 108: err_str = "PSM Correctable error"; break;
+ case 59: err_str = "PMC correctable error"; break;
+ case 60: err_str = "PMC Un correctable error"; break;
+ case 43 ... 47: err_str = "PMC Sysmon error"; break;
+ case 163 ... 184: err_str = "RPU error"; break;
+ case 148: err_str = "OCM0 correctable error"; break;
+ case 149: err_str = "OCM1 correctable error"; break;
+ case 150: err_str = "OCM0 Un-correctable error"; break;
+ case 151: err_str = "OCM1 Un-correctable error"; break;
+ case 189: err_str = "PSX_CMN_3 PD block consolidated error"; break;
+ case 191: err_str = "FPD_INT_WRAP PD block consolidated error"; break;
+ case 232: err_str = "CRAM Un-Correctable error"; break;
+ default: err_str = "VERSAL_EDAC_ERR_ID: %d"; break;
+ }
+
+ snprintf(mc_priv->message,
+ sizeof(mc_priv->message),
+ "[VERSAL_EDAC_ERR_ID: %d] Error type: %s", error_id, err_str);
+
+ /* Convert to bytes */
+ length = result[TOTAL_ERR_LENGTH] * 4;
+ log_non_standard_event(sec_type, &amd_versalnet_guid, mc_priv->message,
+ sec_sev, (void *)&mc_priv->regs, length);
+
+ return 0;
+}
+
+static struct rpmsg_device_id amd_rpmsg_id_table[] = {
+ { .name = "error_ipc" },
+ { },
+};
+MODULE_DEVICE_TABLE(rpmsg, amd_rpmsg_id_table);
+
+static int rpmsg_probe(struct rpmsg_device *rpdev)
+{
+ struct rpmsg_channel_info chinfo;
+ struct mc_priv *pg;
+
+ pg = (struct mc_priv *)amd_rpmsg_id_table[0].driver_data;
+ chinfo.src = RPMSG_ADDR_ANY;
+ chinfo.dst = rpdev->dst;
+ strscpy(chinfo.name, amd_rpmsg_id_table[0].name,
+ strlen(amd_rpmsg_id_table[0].name));
+
+ pg->ept = rpmsg_create_ept(rpdev, rpmsg_cb, NULL, chinfo);
+ if (!pg->ept)
+ return dev_err_probe(&rpdev->dev, -ENXIO, "Failed to create ept for channel %s\n",
+ chinfo.name);
+
+ dev_set_drvdata(&rpdev->dev, pg);
+
+ return 0;
+}
+
+static void rpmsg_remove(struct rpmsg_device *rpdev)
+{
+ struct mc_priv *mc_priv = dev_get_drvdata(&rpdev->dev);
+
+ rpmsg_destroy_ept(mc_priv->ept);
+ dev_set_drvdata(&rpdev->dev, NULL);
+}
+
+static struct rpmsg_driver amd_rpmsg_driver = {
+ .drv.name = KBUILD_MODNAME,
+ .probe = rpmsg_probe,
+ .remove = rpmsg_remove,
+ .callback = rpmsg_cb,
+ .id_table = amd_rpmsg_id_table,
+};
+
+static void versal_edac_release(struct device *dev)
+{
+ kfree(dev);
+}
+
+static int init_versalnet(struct mc_priv *priv, struct platform_device *pdev)
+{
+ u32 num_chans, rank, dwidth, config;
+ struct edac_mc_layer layers[2];
+ struct mem_ctl_info *mci;
+ struct device *dev;
+ enum dev_type dt;
+ char *name;
+ int rc, i;
+
+ for (i = 0; i < NUM_CONTROLLERS; i++) {
+ config = priv->adec[CONF + i * ADEC_NUM];
+ num_chans = FIELD_GET(MC5_NUM_CHANS_MASK, config);
+ rank = 1 << FIELD_GET(MC5_RANK_MASK, config);
+ dwidth = FIELD_GET(MC5_BUS_WIDTH_MASK, config);
+
+ switch (dwidth) {
+ case XDDR5_BUS_WIDTH_16:
+ dt = DEV_X16;
+ break;
+ case XDDR5_BUS_WIDTH_32:
+ dt = DEV_X32;
+ break;
+ case XDDR5_BUS_WIDTH_64:
+ dt = DEV_X64;
+ break;
+ default:
+ dt = DEV_UNKNOWN;
+ }
+
+ if (dt == DEV_UNKNOWN)
+ continue;
+
+ /* Find the first enabled device and register that one. */
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = rank;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = num_chans;
+ layers[1].is_virt_csrow = false;
+
+ rc = -ENOMEM;
+ mci = edac_mc_alloc(i, ARRAY_SIZE(layers), layers,
+ sizeof(struct mc_priv));
+ if (!mci) {
+ edac_printk(KERN_ERR, EDAC_MC, "Failed memory allocation for MC%d\n", i);
+ goto err_alloc;
+ }
+
+ priv->mci[i] = mci;
+ priv->dwidth = dt;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ dev->release = versal_edac_release;
+ name = kmalloc(32, GFP_KERNEL);
+ sprintf(name, "versal-net-ddrmc5-edac-%d", i);
+ dev->init_name = name;
+ rc = device_register(dev);
+ if (rc)
+ goto err_alloc;
+
+ mci->pdev = dev;
+
+ platform_set_drvdata(pdev, priv);
+
+ mc_init(mci, dev);
+ rc = edac_mc_add_mc(mci);
+ if (rc) {
+ edac_printk(KERN_ERR, EDAC_MC, "Failed to register MC%d with EDAC core\n", i);
+ goto err_alloc;
+ }
+ }
+ return 0;
+
+err_alloc:
+ while (i--) {
+ mci = priv->mci[i];
+ if (!mci)
+ continue;
+
+ if (mci->pdev) {
+ device_unregister(mci->pdev);
+ edac_mc_del_mc(mci->pdev);
+ }
+
+ edac_mc_free(mci);
+ }
+
+ return rc;
+}
+
+static void remove_versalnet(struct mc_priv *priv)
+{
+ struct mem_ctl_info *mci;
+ int i;
+
+ for (i = 0; i < NUM_CONTROLLERS; i++) {
+ device_unregister(priv->mci[i]->pdev);
+ mci = edac_mc_del_mc(priv->mci[i]->pdev);
+ if (!mci)
+ return;
+
+ edac_mc_free(mci);
+ }
+}
+
+static int mc_probe(struct platform_device *pdev)
+{
+ struct device_node *r5_core_node;
+ struct mc_priv *priv;
+ struct rproc *rp;
+ int rc;
+
+ r5_core_node = of_parse_phandle(pdev->dev.of_node, "amd,rproc", 0);
+ if (!r5_core_node) {
+ dev_err(&pdev->dev, "amd,rproc: invalid phandle\n");
+ return -EINVAL;
+ }
+
+ rp = rproc_get_by_phandle(r5_core_node->phandle);
+ if (!rp)
+ return -EPROBE_DEFER;
+
+ rc = rproc_boot(rp);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed to attach to remote processor\n");
+ goto err_rproc_boot;
+ }
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ rc = -ENOMEM;
+ goto err_alloc;
+ }
+
+ amd_rpmsg_id_table[0].driver_data = (kernel_ulong_t)priv;
+
+ rc = register_rpmsg_driver(&amd_rpmsg_driver);
+ if (rc) {
+ edac_printk(KERN_ERR, EDAC_MC, "Failed to register RPMsg driver: %d\n", rc);
+ goto err_alloc;
+ }
+
+ rc = setup_mcdi(priv);
+ if (rc)
+ goto err_unreg;
+
+ priv->mcdi->r5_rproc = rp;
+
+ rc = init_versalnet(priv, pdev);
+ if (rc)
+ goto err_init;
+
+ return 0;
+
+err_init:
+ cdx_mcdi_finish(priv->mcdi);
+
+err_unreg:
+ unregister_rpmsg_driver(&amd_rpmsg_driver);
+
+err_alloc:
+ rproc_shutdown(rp);
+
+err_rproc_boot:
+ rproc_put(rp);
+
+ return rc;
+}
+
+static void mc_remove(struct platform_device *pdev)
+{
+ struct mc_priv *priv = platform_get_drvdata(pdev);
+
+ unregister_rpmsg_driver(&amd_rpmsg_driver);
+ remove_versalnet(priv);
+ rproc_shutdown(priv->mcdi->r5_rproc);
+ cdx_mcdi_finish(priv->mcdi);
+}
+
+static const struct of_device_id amd_edac_match[] = {
+ { .compatible = "xlnx,versal-net-ddrmc5", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, amd_edac_match);
+
+static struct platform_driver amd_ddr_edac_mc_driver = {
+ .driver = {
+ .name = "versal-net-edac",
+ .of_match_table = amd_edac_match,
+ },
+ .probe = mc_probe,
+ .remove = mc_remove,
+};
+
+module_platform_driver(amd_ddr_edac_mc_driver);
+
+MODULE_AUTHOR("AMD Inc");
+MODULE_DESCRIPTION("Versal NET EDAC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/eisa/eisa-bus.c b/drivers/eisa/eisa-bus.c
index edceea083b98..bd76d599109c 100644
--- a/drivers/eisa/eisa-bus.c
+++ b/drivers/eisa/eisa-bus.c
@@ -135,7 +135,7 @@ static int eisa_bus_uevent(const struct device *dev, struct kobj_uevent_env *env
return 0;
}
-struct bus_type eisa_bus_type = {
+const struct bus_type eisa_bus_type = {
.name = "eisa",
.match = eisa_bus_match,
.uevent = eisa_bus_uevent,
diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig
index a6f6d467aacf..aec46bf03302 100644
--- a/drivers/extcon/Kconfig
+++ b/drivers/extcon/Kconfig
@@ -134,6 +134,19 @@ config EXTCON_MAX8997
Maxim MAX8997 PMIC. The MAX8997 MUIC is a USB port accessory
detector and switch.
+config EXTCON_MAX14526
+ tristate "Maxim MAX14526 EXTCON Support"
+ depends on I2C
+ select IRQ_DOMAIN
+ select REGMAP_I2C
+ help
+ If you say yes here you get support for the Maxim MAX14526
+ MUIC device. The MAX14526 MUIC is a USB port accessory
+ detector and switch. The MAX14526 is designed to simplify
+ interface requirements on portable devices by multiplexing
+ common inputs (USB, UART, Microphone, Stereo Audio and
+ Composite Video) on a single micro/mini USB connector.
+
config EXTCON_PALMAS
tristate "Palmas USB EXTCON support"
depends on MFD_PALMAS
diff --git a/drivers/extcon/Makefile b/drivers/extcon/Makefile
index 0d6d23faf748..6482f2bfd661 100644
--- a/drivers/extcon/Makefile
+++ b/drivers/extcon/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_EXTCON_MAX3355) += extcon-max3355.o
obj-$(CONFIG_EXTCON_MAX77693) += extcon-max77693.o
obj-$(CONFIG_EXTCON_MAX77843) += extcon-max77843.o
obj-$(CONFIG_EXTCON_MAX8997) += extcon-max8997.o
+obj-$(CONFIG_EXTCON_MAX14526) += extcon-max14526.o
obj-$(CONFIG_EXTCON_PALMAS) += extcon-palmas.o
obj-$(CONFIG_EXTCON_PTN5150) += extcon-ptn5150.o
obj-$(CONFIG_EXTCON_QCOM_SPMI_MISC) += extcon-qcom-spmi-misc.o
diff --git a/drivers/extcon/extcon-adc-jack.c b/drivers/extcon/extcon-adc-jack.c
index 46c40d85c2ac..7e3c9f38297b 100644
--- a/drivers/extcon/extcon-adc-jack.c
+++ b/drivers/extcon/extcon-adc-jack.c
@@ -164,6 +164,8 @@ static void adc_jack_remove(struct platform_device *pdev)
{
struct adc_jack_data *data = platform_get_drvdata(pdev);
+ if (data->wakeup_source)
+ device_init_wakeup(&pdev->dev, false);
free_irq(data->irq, data);
cancel_work_sync(&data->handler.work);
}
diff --git a/drivers/extcon/extcon-axp288.c b/drivers/extcon/extcon-axp288.c
index d3bcbe839c09..19856dddade6 100644
--- a/drivers/extcon/extcon-axp288.c
+++ b/drivers/extcon/extcon-axp288.c
@@ -470,7 +470,7 @@ static int axp288_extcon_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
- device_init_wakeup(dev, true);
+ devm_device_init_wakeup(dev);
platform_set_drvdata(pdev, info);
return 0;
diff --git a/drivers/extcon/extcon-fsa9480.c b/drivers/extcon/extcon-fsa9480.c
index b11b43171063..a031eb0914a0 100644
--- a/drivers/extcon/extcon-fsa9480.c
+++ b/drivers/extcon/extcon-fsa9480.c
@@ -317,7 +317,7 @@ static int fsa9480_probe(struct i2c_client *client)
return ret;
}
- device_init_wakeup(info->dev, true);
+ devm_device_init_wakeup(info->dev);
fsa9480_detect_dev(info);
return 0;
diff --git a/drivers/extcon/extcon-max14526.c b/drivers/extcon/extcon-max14526.c
new file mode 100644
index 000000000000..3750a5c20612
--- /dev/null
+++ b/drivers/extcon/extcon-max14526.c
@@ -0,0 +1,302 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/device.h>
+#include <linux/devm-helpers.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/extcon-provider.h>
+#include <linux/i2c.h>
+#include <linux/mod_devicetable.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/pm.h>
+#include <linux/regmap.h>
+
+/* I2C addresses of MUIC internal registers */
+#define MAX14526_DEVICE_ID 0x00
+#define MAX14526_ID 0x02
+
+/* CONTROL_1 register masks */
+#define MAX14526_CONTROL_1 0x01
+#define ID_2P2 BIT(6)
+#define ID_620 BIT(5)
+#define ID_200 BIT(4)
+#define VLDO BIT(3)
+#define SEMREN BIT(2)
+#define ADC_EN BIT(1)
+#define CP_EN BIT(0)
+
+/* CONTROL_2 register masks */
+#define MAX14526_CONTROL_2 0x02
+#define INTPOL BIT(7)
+#define INT_EN BIT(6)
+#define MIC_LP BIT(5)
+#define CP_AUD BIT(4)
+#define CHG_TYPE BIT(1)
+#define USB_DET_DIS BIT(0)
+
+/* SW_CONTROL register masks */
+#define MAX14526_SW_CONTROL 0x03
+#define SW_DATA 0x00
+#define SW_UART 0x01
+#define SW_AUDIO 0x02
+#define SW_OPEN 0x07
+
+/* INT_STATUS register masks */
+#define MAX14526_INT_STAT 0x04
+#define CHGDET BIT(7)
+#define MR_COMP BIT(6)
+#define SENDEND BIT(5)
+#define V_VBUS BIT(4)
+
+/* STATUS register masks */
+#define MAX14526_STATUS 0x05
+#define CPORT BIT(7)
+#define CHPORT BIT(6)
+#define C1COMP BIT(0)
+
+enum max14526_idno_resistance {
+ MAX14526_GND,
+ MAX14526_24KOHM,
+ MAX14526_56KOHM,
+ MAX14526_100KOHM,
+ MAX14526_130KOHM,
+ MAX14526_180KOHM,
+ MAX14526_240KOHM,
+ MAX14526_330KOHM,
+ MAX14526_430KOHM,
+ MAX14526_620KOHM,
+ MAX14526_910KOHM,
+ MAX14526_OPEN
+};
+
+enum max14526_field_idx {
+ VENDOR_ID, CHIP_REV, /* DEVID */
+ DM, DP, /* SW_CONTROL */
+ MAX14526_N_REGMAP_FIELDS
+};
+
+static const struct reg_field max14526_reg_field[MAX14526_N_REGMAP_FIELDS] = {
+ [VENDOR_ID] = REG_FIELD(MAX14526_DEVICE_ID, 4, 7),
+ [CHIP_REV] = REG_FIELD(MAX14526_DEVICE_ID, 0, 3),
+ [DM] = REG_FIELD(MAX14526_SW_CONTROL, 0, 2),
+ [DP] = REG_FIELD(MAX14526_SW_CONTROL, 3, 5),
+};
+
+struct max14526_data {
+ struct i2c_client *client;
+ struct extcon_dev *edev;
+
+ struct regmap *regmap;
+ struct regmap_field *rfield[MAX14526_N_REGMAP_FIELDS];
+
+ int last_state;
+ int cable;
+};
+
+enum max14526_muic_modes {
+ MAX14526_OTG = MAX14526_GND, /* no power */
+ MAX14526_MHL = MAX14526_56KOHM, /* no power */
+ MAX14526_OTG_Y = MAX14526_GND | V_VBUS,
+ MAX14526_MHL_CHG = MAX14526_GND | V_VBUS | CHGDET,
+ MAX14526_NONE = MAX14526_OPEN,
+ MAX14526_USB = MAX14526_OPEN | V_VBUS,
+ MAX14526_CHG = MAX14526_OPEN | V_VBUS | CHGDET,
+};
+
+static const unsigned int max14526_extcon_cable[] = {
+ EXTCON_USB,
+ EXTCON_USB_HOST,
+ EXTCON_CHG_USB_FAST,
+ EXTCON_DISP_MHL,
+ EXTCON_NONE,
+};
+
+static int max14526_ap_usb_mode(struct max14526_data *priv)
+{
+ struct device *dev = &priv->client->dev;
+ int ret;
+
+ /* Enable USB Path */
+ ret = regmap_field_write(priv->rfield[DM], SW_DATA);
+ if (ret)
+ return ret;
+
+ ret = regmap_field_write(priv->rfield[DP], SW_DATA);
+ if (ret)
+ return ret;
+
+ /* Enable 200K, Charger Pump and ADC */
+ ret = regmap_write(priv->regmap, MAX14526_CONTROL_1,
+ ID_200 | ADC_EN | CP_EN);
+ if (ret)
+ return ret;
+
+ dev_dbg(dev, "AP USB mode set\n");
+
+ return 0;
+}
+
+static irqreturn_t max14526_interrupt(int irq, void *dev_id)
+{
+ struct max14526_data *priv = dev_id;
+ struct device *dev = &priv->client->dev;
+ int state, ret;
+
+ /*
+ * Upon an MUIC IRQ (MUIC_INT_N falls), wait at least 70ms
+ * before reading INT_STAT and STATUS. After the reads,
+ * MUIC_INT_N returns to high (but the INT_STAT and STATUS
+ * contents will be held).
+ */
+ msleep(100);
+
+ ret = regmap_read(priv->regmap, MAX14526_INT_STAT, &state);
+ if (ret)
+ dev_err(dev, "failed to read MUIC state %d\n", ret);
+
+ if (state == priv->last_state)
+ return IRQ_HANDLED;
+
+ /* Detach previous device */
+ extcon_set_state_sync(priv->edev, priv->cable, false);
+
+ switch (state) {
+ case MAX14526_USB:
+ priv->cable = EXTCON_USB;
+ break;
+
+ case MAX14526_CHG:
+ priv->cable = EXTCON_CHG_USB_FAST;
+ break;
+
+ case MAX14526_OTG:
+ case MAX14526_OTG_Y:
+ priv->cable = EXTCON_USB_HOST;
+ break;
+
+ case MAX14526_MHL:
+ case MAX14526_MHL_CHG:
+ priv->cable = EXTCON_DISP_MHL;
+ break;
+
+ case MAX14526_NONE:
+ default:
+ priv->cable = EXTCON_NONE;
+ break;
+ }
+
+ extcon_set_state_sync(priv->edev, priv->cable, true);
+
+ priv->last_state = state;
+
+ return IRQ_HANDLED;
+}
+
+static const struct regmap_config max14526_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = MAX14526_STATUS,
+};
+
+static int max14526_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct max14526_data *priv;
+ int ret, dev_id, rev, i;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->client = client;
+ i2c_set_clientdata(client, priv);
+
+ priv->regmap = devm_regmap_init_i2c(client, &max14526_regmap_config);
+ if (IS_ERR(priv->regmap))
+ return dev_err_probe(dev, PTR_ERR(priv->regmap), "cannot allocate regmap\n");
+
+ for (i = 0; i < MAX14526_N_REGMAP_FIELDS; i++) {
+ priv->rfield[i] = devm_regmap_field_alloc(dev, priv->regmap,
+ max14526_reg_field[i]);
+ if (IS_ERR(priv->rfield[i]))
+ return dev_err_probe(dev, PTR_ERR(priv->rfield[i]),
+ "cannot allocate regmap field\n");
+ }
+
+ /* Detect if MUIC version is supported */
+ ret = regmap_field_read(priv->rfield[VENDOR_ID], &dev_id);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to read MUIC ID\n");
+
+ regmap_field_read(priv->rfield[CHIP_REV], &rev);
+
+ if (dev_id == MAX14526_ID)
+ dev_info(dev, "detected MAX14526 MUIC with id 0x%x, rev 0x%x\n", dev_id, rev);
+ else
+ dev_err_probe(dev, -EINVAL, "MUIC vendor id 0x%X is not recognized\n", dev_id);
+
+ priv->edev = devm_extcon_dev_allocate(dev, max14526_extcon_cable);
+ if (IS_ERR(priv->edev))
+ return dev_err_probe(dev, (IS_ERR(priv->edev)),
+ "failed to allocate extcon device\n");
+
+ ret = devm_extcon_dev_register(dev, priv->edev);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to register extcon device\n");
+
+ ret = max14526_ap_usb_mode(priv);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to set AP USB mode\n");
+
+ regmap_write_bits(priv->regmap, MAX14526_CONTROL_2, INT_EN, INT_EN);
+ regmap_write_bits(priv->regmap, MAX14526_CONTROL_2, USB_DET_DIS, (u32)~USB_DET_DIS);
+
+ ret = devm_request_threaded_irq(dev, client->irq, NULL, &max14526_interrupt,
+ IRQF_ONESHOT | IRQF_SHARED, client->name, priv);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to register IRQ\n");
+
+ irq_wake_thread(client->irq, priv);
+
+ return 0;
+}
+
+static int max14526_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct max14526_data *priv = i2c_get_clientdata(client);
+
+ irq_wake_thread(client->irq, priv);
+
+ return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(max14526_pm_ops, NULL, max14526_resume);
+
+static const struct of_device_id max14526_match[] = {
+ { .compatible = "maxim,max14526" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, max14526_match);
+
+static const struct i2c_device_id max14526_id[] = {
+ { "max14526" },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, max14526_id);
+
+static struct i2c_driver max14526_driver = {
+ .driver = {
+ .name = "max14526",
+ .of_match_table = max14526_match,
+ .pm = &max14526_pm_ops,
+ },
+ .probe = max14526_probe,
+ .id_table = max14526_id,
+};
+module_i2c_driver(max14526_driver);
+
+MODULE_AUTHOR("Svyatoslav Ryhel <clamor95@gmail.com>");
+MODULE_DESCRIPTION("MAX14526 extcon driver to support MUIC");
+MODULE_LICENSE("GPL");
diff --git a/drivers/extcon/extcon-qcom-spmi-misc.c b/drivers/extcon/extcon-qcom-spmi-misc.c
index 53de581a393a..afaba5685c3d 100644
--- a/drivers/extcon/extcon-qcom-spmi-misc.c
+++ b/drivers/extcon/extcon-qcom-spmi-misc.c
@@ -155,7 +155,7 @@ static int qcom_usb_extcon_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, info);
- device_init_wakeup(dev, 1);
+ devm_device_init_wakeup(dev);
/* Perform initial detection */
qcom_usb_extcon_detect_cable(&info->wq_detcable.work);
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
index 01354b9de8b2..0462d7b9e547 100644
--- a/drivers/firewire/core-card.c
+++ b/drivers/firewire/core-card.c
@@ -86,8 +86,6 @@ static size_t config_rom_length = 1 + 4 + 1 + 1;
*/
#define DEFAULT_SPLIT_TIMEOUT (2 * 8000)
-#define CANON_OUI 0x000085
-
static void generate_config_rom(struct fw_card *card, __be32 *config_rom)
{
struct fw_descriptor *desc;
@@ -229,22 +227,21 @@ void fw_schedule_bus_reset(struct fw_card *card, bool delayed, bool short_reset)
/* Use an arbitrary short delay to combine multiple reset requests. */
fw_card_get(card);
- if (!queue_delayed_work(fw_workqueue, &card->br_work,
- delayed ? DIV_ROUND_UP(HZ, 100) : 0))
+ if (!queue_delayed_work(fw_workqueue, &card->br_work, delayed ? msecs_to_jiffies(10) : 0))
fw_card_put(card);
}
EXPORT_SYMBOL(fw_schedule_bus_reset);
static void br_work(struct work_struct *work)
{
- struct fw_card *card = container_of(work, struct fw_card, br_work.work);
+ struct fw_card *card = from_work(card, work, br_work.work);
/* Delay for 2s after last reset per IEEE 1394 clause 8.2.1. */
if (card->reset_jiffies != 0 &&
- time_before64(get_jiffies_64(), card->reset_jiffies + 2 * HZ)) {
+ time_is_after_jiffies64(card->reset_jiffies + secs_to_jiffies(2))) {
trace_bus_reset_postpone(card->index, card->generation, card->br_short);
- if (!queue_delayed_work(fw_workqueue, &card->br_work, 2 * HZ))
+ if (!queue_delayed_work(fw_workqueue, &card->br_work, secs_to_jiffies(2)))
fw_card_put(card);
return;
}
@@ -273,10 +270,6 @@ static void allocate_broadcast_channel(struct fw_card *card, int generation)
fw_device_set_broadcast_channel);
}
-static const char gap_count_table[] = {
- 63, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40
-};
-
void fw_schedule_bm_work(struct fw_card *card, unsigned long delay)
{
fw_card_get(card);
@@ -284,222 +277,249 @@ void fw_schedule_bm_work(struct fw_card *card, unsigned long delay)
fw_card_put(card);
}
-static void bm_work(struct work_struct *work)
-{
- struct fw_card *card = container_of(work, struct fw_card, bm_work.work);
- struct fw_device *root_device, *irm_device;
- struct fw_node *root_node;
- int root_id, new_root_id, irm_id, bm_id, local_id;
- int gap_count, generation, grace, rcode;
- bool do_reset = false;
- bool root_device_is_running;
- bool root_device_is_cmc;
- bool irm_is_1394_1995_only;
- bool keep_this_irm;
- __be32 transaction_data[2];
-
- spin_lock_irq(&card->lock);
+enum bm_contention_outcome {
+ // The bus management contention window is not expired.
+ BM_CONTENTION_OUTCOME_WITHIN_WINDOW = 0,
+ // The IRM node has link off.
+ BM_CONTENTION_OUTCOME_IRM_HAS_LINK_OFF,
+ // The IRM node complies IEEE 1394:1994 only.
+ BM_CONTENTION_OUTCOME_IRM_COMPLIES_1394_1995_ONLY,
+ // Another bus reset, BM work has been rescheduled.
+ BM_CONTENTION_OUTCOME_AT_NEW_GENERATION,
+ // We have been unable to send the lock request to IRM node due to some local problem.
+ BM_CONTENTION_OUTCOME_LOCAL_PROBLEM_AT_TRANSACTION,
+ // The lock request failed, maybe the IRM isn't really IRM capable after all.
+ BM_CONTENTION_OUTCOME_IRM_IS_NOT_CAPABLE_FOR_IRM,
+ // Somebody else is BM.
+ BM_CONTENTION_OUTCOME_IRM_HOLDS_ANOTHER_NODE_AS_BM,
+ // The local node succeeds after contending for bus manager.
+ BM_CONTENTION_OUTCOME_IRM_HOLDS_LOCAL_NODE_AS_BM,
+};
- if (card->local_node == NULL) {
- spin_unlock_irq(&card->lock);
- goto out_put_card;
+static enum bm_contention_outcome contend_for_bm(struct fw_card *card)
+__must_hold(&card->lock)
+{
+ int generation = card->generation;
+ int local_id = card->local_node->node_id;
+ __be32 data[2] = {
+ cpu_to_be32(BUS_MANAGER_ID_NOT_REGISTERED),
+ cpu_to_be32(local_id),
+ };
+ bool grace = time_is_before_jiffies64(card->reset_jiffies + msecs_to_jiffies(125));
+ struct fw_node *irm_node;
+ struct fw_device *irm_device;
+ int irm_node_id, irm_device_quirks = 0;
+ int rcode;
+
+ lockdep_assert_held(&card->lock);
+
+ if (!grace) {
+ if (!is_next_generation(generation, card->bm_generation) || card->bm_abdicate)
+ return BM_CONTENTION_OUTCOME_WITHIN_WINDOW;
}
- generation = card->generation;
-
- root_node = card->root_node;
- fw_node_get(root_node);
- root_device = root_node->data;
- root_device_is_running = root_device &&
- atomic_read(&root_device->state) == FW_DEVICE_RUNNING;
- root_device_is_cmc = root_device && root_device->cmc;
+ irm_node = card->irm_node;
+ if (!irm_node->link_on) {
+ fw_notice(card, "IRM has link off, making local node (%02x) root\n", local_id);
+ return BM_CONTENTION_OUTCOME_IRM_HAS_LINK_OFF;
+ }
- irm_device = card->irm_node->data;
- irm_is_1394_1995_only = irm_device && irm_device->config_rom &&
- (irm_device->config_rom[2] & 0x000000f0) == 0;
+ // NOTE: It is likely that the quirk detection for IRM device has not done yet.
+ irm_device = fw_node_get_device(irm_node);
+ if (irm_device)
+ irm_device_quirks = READ_ONCE(irm_device->quirks);
+ if ((irm_device_quirks & FW_DEVICE_QUIRK_IRM_IS_1394_1995_ONLY) &&
+ !(irm_device_quirks & FW_DEVICE_QUIRK_IRM_IGNORES_BUS_MANAGER)) {
+ fw_notice(card, "IRM is not 1394a compliant, making local node (%02x) root\n",
+ local_id);
+ return BM_CONTENTION_OUTCOME_IRM_COMPLIES_1394_1995_ONLY;
+ }
- /* Canon MV5i works unreliably if it is not root node. */
- keep_this_irm = irm_device && irm_device->config_rom &&
- irm_device->config_rom[3] >> 8 == CANON_OUI;
+ irm_node_id = irm_node->node_id;
- root_id = root_node->node_id;
- irm_id = card->irm_node->node_id;
- local_id = card->local_node->node_id;
+ spin_unlock_irq(&card->lock);
- grace = time_after64(get_jiffies_64(),
- card->reset_jiffies + DIV_ROUND_UP(HZ, 8));
+ rcode = fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP, irm_node_id, generation,
+ SCODE_100, CSR_REGISTER_BASE + CSR_BUS_MANAGER_ID, data,
+ sizeof(data));
- if ((is_next_generation(generation, card->bm_generation) &&
- !card->bm_abdicate) ||
- (card->bm_generation != generation && grace)) {
- /*
- * This first step is to figure out who is IRM and
- * then try to become bus manager. If the IRM is not
- * well defined (e.g. does not have an active link
- * layer or does not responds to our lock request, we
- * will have to do a little vigilante bus management.
- * In that case, we do a goto into the gap count logic
- * so that when we do the reset, we still optimize the
- * gap count. That could well save a reset in the
- * next generation.
- */
+ spin_lock_irq(&card->lock);
- if (!card->irm_node->link_on) {
- new_root_id = local_id;
- fw_notice(card, "%s, making local node (%02x) root\n",
- "IRM has link off", new_root_id);
- goto pick_me;
+ switch (rcode) {
+ case RCODE_GENERATION:
+ return BM_CONTENTION_OUTCOME_AT_NEW_GENERATION;
+ case RCODE_SEND_ERROR:
+ return BM_CONTENTION_OUTCOME_LOCAL_PROBLEM_AT_TRANSACTION;
+ case RCODE_COMPLETE:
+ {
+ int bm_id = be32_to_cpu(data[0]);
+
+ // Used by cdev layer for "struct fw_cdev_event_bus_reset".
+ if (bm_id != BUS_MANAGER_ID_NOT_REGISTERED)
+ card->bm_node_id = 0xffc0 & bm_id;
+ else
+ card->bm_node_id = local_id;
+
+ if (bm_id != BUS_MANAGER_ID_NOT_REGISTERED)
+ return BM_CONTENTION_OUTCOME_IRM_HOLDS_ANOTHER_NODE_AS_BM;
+ else
+ return BM_CONTENTION_OUTCOME_IRM_HOLDS_LOCAL_NODE_AS_BM;
+ }
+ default:
+ if (!(irm_device_quirks & FW_DEVICE_QUIRK_IRM_IGNORES_BUS_MANAGER)) {
+ fw_notice(card, "BM lock failed (%s), making local node (%02x) root\n",
+ fw_rcode_string(rcode), local_id);
+ return BM_CONTENTION_OUTCOME_IRM_COMPLIES_1394_1995_ONLY;
+ } else {
+ return BM_CONTENTION_OUTCOME_IRM_IS_NOT_CAPABLE_FOR_IRM;
}
+ }
+}
- if (irm_is_1394_1995_only && !keep_this_irm) {
- new_root_id = local_id;
- fw_notice(card, "%s, making local node (%02x) root\n",
- "IRM is not 1394a compliant", new_root_id);
- goto pick_me;
- }
+DEFINE_FREE(node_unref, struct fw_node *, if (_T) fw_node_put(_T))
+DEFINE_FREE(card_unref, struct fw_card *, if (_T) fw_card_put(_T))
- transaction_data[0] = cpu_to_be32(0x3f);
- transaction_data[1] = cpu_to_be32(local_id);
+static void bm_work(struct work_struct *work)
+{
+ static const char gap_count_table[] = {
+ 63, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40
+ };
+ struct fw_card *card __free(card_unref) = from_work(card, work, bm_work.work);
+ struct fw_node *root_node __free(node_unref) = NULL;
+ int root_id, new_root_id, irm_id, local_id;
+ int expected_gap_count, generation;
+ bool stand_for_root = false;
+ spin_lock_irq(&card->lock);
+
+ if (card->local_node == NULL) {
spin_unlock_irq(&card->lock);
+ return;
+ }
- rcode = fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
- irm_id, generation, SCODE_100,
- CSR_REGISTER_BASE + CSR_BUS_MANAGER_ID,
- transaction_data, 8);
+ generation = card->generation;
- if (rcode == RCODE_GENERATION)
- /* Another bus reset, BM work has been rescheduled. */
- goto out;
+ root_node = fw_node_get(card->root_node);
- bm_id = be32_to_cpu(transaction_data[0]);
+ root_id = root_node->node_id;
+ irm_id = card->irm_node->node_id;
+ local_id = card->local_node->node_id;
- scoped_guard(spinlock_irq, &card->lock) {
- if (rcode == RCODE_COMPLETE && generation == card->generation)
- card->bm_node_id =
- bm_id == 0x3f ? local_id : 0xffc0 | bm_id;
- }
+ if (card->bm_generation != generation) {
+ enum bm_contention_outcome result = contend_for_bm(card);
- if (rcode == RCODE_COMPLETE && bm_id != 0x3f) {
- /* Somebody else is BM. Only act as IRM. */
- if (local_id == irm_id)
+ switch (result) {
+ case BM_CONTENTION_OUTCOME_WITHIN_WINDOW:
+ spin_unlock_irq(&card->lock);
+ fw_schedule_bm_work(card, msecs_to_jiffies(125));
+ return;
+ case BM_CONTENTION_OUTCOME_IRM_HAS_LINK_OFF:
+ stand_for_root = true;
+ break;
+ case BM_CONTENTION_OUTCOME_IRM_COMPLIES_1394_1995_ONLY:
+ stand_for_root = true;
+ break;
+ case BM_CONTENTION_OUTCOME_AT_NEW_GENERATION:
+ // BM work has been rescheduled.
+ spin_unlock_irq(&card->lock);
+ return;
+ case BM_CONTENTION_OUTCOME_LOCAL_PROBLEM_AT_TRANSACTION:
+ // Let's try again later and hope that the local problem has gone away by
+ // then.
+ spin_unlock_irq(&card->lock);
+ fw_schedule_bm_work(card, msecs_to_jiffies(125));
+ return;
+ case BM_CONTENTION_OUTCOME_IRM_IS_NOT_CAPABLE_FOR_IRM:
+ // Let's do a bus reset and pick the local node as root, and thus, IRM.
+ stand_for_root = true;
+ break;
+ case BM_CONTENTION_OUTCOME_IRM_HOLDS_ANOTHER_NODE_AS_BM:
+ if (local_id == irm_id) {
+ // Only acts as IRM.
+ spin_unlock_irq(&card->lock);
allocate_broadcast_channel(card, generation);
-
- goto out;
+ spin_lock_irq(&card->lock);
+ }
+ fallthrough;
+ case BM_CONTENTION_OUTCOME_IRM_HOLDS_LOCAL_NODE_AS_BM:
+ default:
+ card->bm_generation = generation;
+ break;
}
+ }
- if (rcode == RCODE_SEND_ERROR) {
- /*
- * We have been unable to send the lock request due to
- * some local problem. Let's try again later and hope
- * that the problem has gone away by then.
- */
- fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8));
- goto out;
- }
-
- spin_lock_irq(&card->lock);
-
- if (rcode != RCODE_COMPLETE && !keep_this_irm) {
- /*
- * The lock request failed, maybe the IRM
- * isn't really IRM capable after all. Let's
- * do a bus reset and pick the local node as
- * root, and thus, IRM.
- */
- new_root_id = local_id;
- fw_notice(card, "BM lock failed (%s), making local node (%02x) root\n",
- fw_rcode_string(rcode), new_root_id);
- goto pick_me;
+ // We're bus manager for this generation, so next step is to make sure we have an active
+ // cycle master and do gap count optimization.
+ if (!stand_for_root) {
+ if (card->gap_count == GAP_COUNT_MISMATCHED) {
+ // If self IDs have inconsistent gap counts, do a
+ // bus reset ASAP. The config rom read might never
+ // complete, so don't wait for it. However, still
+ // send a PHY configuration packet prior to the
+ // bus reset. The PHY configuration packet might
+ // fail, but 1394-2008 8.4.5.2 explicitly permits
+ // it in this case, so it should be safe to try.
+ stand_for_root = true;
+
+ // We must always send a bus reset if the gap count
+ // is inconsistent, so bypass the 5-reset limit.
+ card->bm_retries = 0;
+ } else {
+ // Now investigate root node.
+ struct fw_device *root_device = fw_node_get_device(root_node);
+
+ if (root_device == NULL) {
+ // Either link_on is false, or we failed to read the
+ // config rom. In either case, pick another root.
+ stand_for_root = true;
+ } else {
+ bool root_device_is_running =
+ atomic_read(&root_device->state) == FW_DEVICE_RUNNING;
+
+ if (!root_device_is_running) {
+ // If we haven't probed this device yet, bail out now
+ // and let's try again once that's done.
+ spin_unlock_irq(&card->lock);
+ return;
+ } else if (!root_device->cmc) {
+ // Current root has an active link layer and we
+ // successfully read the config rom, but it's not
+ // cycle master capable.
+ stand_for_root = true;
+ }
+ }
}
- } else if (card->bm_generation != generation) {
- /*
- * We weren't BM in the last generation, and the last
- * bus reset is less than 125ms ago. Reschedule this job.
- */
- spin_unlock_irq(&card->lock);
- fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8));
- goto out;
}
- /*
- * We're bus manager for this generation, so next step is to
- * make sure we have an active cycle master and do gap count
- * optimization.
- */
- card->bm_generation = generation;
-
- if (card->gap_count == 0) {
- /*
- * If self IDs have inconsistent gap counts, do a
- * bus reset ASAP. The config rom read might never
- * complete, so don't wait for it. However, still
- * send a PHY configuration packet prior to the
- * bus reset. The PHY configuration packet might
- * fail, but 1394-2008 8.4.5.2 explicitly permits
- * it in this case, so it should be safe to try.
- */
+ if (stand_for_root) {
new_root_id = local_id;
- /*
- * We must always send a bus reset if the gap count
- * is inconsistent, so bypass the 5-reset limit.
- */
- card->bm_retries = 0;
- } else if (root_device == NULL) {
- /*
- * Either link_on is false, or we failed to read the
- * config rom. In either case, pick another root.
- */
- new_root_id = local_id;
- } else if (!root_device_is_running) {
- /*
- * If we haven't probed this device yet, bail out now
- * and let's try again once that's done.
- */
- spin_unlock_irq(&card->lock);
- goto out;
- } else if (root_device_is_cmc) {
- /*
- * We will send out a force root packet for this
- * node as part of the gap count optimization.
- */
- new_root_id = root_id;
} else {
- /*
- * Current root has an active link layer and we
- * successfully read the config rom, but it's not
- * cycle master capable.
- */
- new_root_id = local_id;
+ // We will send out a force root packet for this node as part of the gap count
+ // optimization on behalf of the node.
+ new_root_id = root_id;
}
- pick_me:
/*
* Pick a gap count from 1394a table E-1. The table doesn't cover
* the typically much larger 1394b beta repeater delays though.
*/
if (!card->beta_repeaters_present &&
root_node->max_hops < ARRAY_SIZE(gap_count_table))
- gap_count = gap_count_table[root_node->max_hops];
+ expected_gap_count = gap_count_table[root_node->max_hops];
else
- gap_count = 63;
+ expected_gap_count = 63;
- /*
- * Finally, figure out if we should do a reset or not. If we have
- * done less than 5 resets with the same physical topology and we
- * have either a new root or a new gap count setting, let's do it.
- */
-
- if (card->bm_retries++ < 5 &&
- (card->gap_count != gap_count || new_root_id != root_id))
- do_reset = true;
+ // Finally, figure out if we should do a reset or not. If we have done less than 5 resets
+ // with the same physical topology and we have either a new root or a new gap count
+ // setting, let's do it.
+ if (card->bm_retries++ < 5 && (card->gap_count != expected_gap_count || new_root_id != root_id)) {
+ int card_gap_count = card->gap_count;
- spin_unlock_irq(&card->lock);
+ spin_unlock_irq(&card->lock);
- if (do_reset) {
fw_notice(card, "phy config: new root=%x, gap_count=%d\n",
- new_root_id, gap_count);
- fw_send_phy_config(card, new_root_id, generation, gap_count);
+ new_root_id, expected_gap_count);
+ fw_send_phy_config(card, new_root_id, generation, expected_gap_count);
/*
* Where possible, use a short bus reset to minimize
* disruption to isochronous transfers. But in the event
@@ -512,31 +532,27 @@ static void bm_work(struct work_struct *work)
* may treat it as two, causing a gap count inconsistency
* again. Using a long bus reset prevents this.
*/
- reset_bus(card, card->gap_count != 0);
+ reset_bus(card, card_gap_count != 0);
/* Will allocate broadcast channel after the reset. */
- goto out;
- }
+ } else {
+ struct fw_device *root_device = fw_node_get_device(root_node);
- if (root_device_is_cmc) {
- /*
- * Make sure that the cycle master sends cycle start packets.
- */
- transaction_data[0] = cpu_to_be32(CSR_STATE_BIT_CMSTR);
- rcode = fw_run_transaction(card, TCODE_WRITE_QUADLET_REQUEST,
- root_id, generation, SCODE_100,
- CSR_REGISTER_BASE + CSR_STATE_SET,
- transaction_data, 4);
- if (rcode == RCODE_GENERATION)
- goto out;
- }
+ spin_unlock_irq(&card->lock);
- if (local_id == irm_id)
- allocate_broadcast_channel(card, generation);
+ if (root_device && root_device->cmc) {
+ // Make sure that the cycle master sends cycle start packets.
+ __be32 data = cpu_to_be32(CSR_STATE_BIT_CMSTR);
+ int rcode = fw_run_transaction(card, TCODE_WRITE_QUADLET_REQUEST,
+ root_id, generation, SCODE_100,
+ CSR_REGISTER_BASE + CSR_STATE_SET,
+ &data, sizeof(data));
+ if (rcode == RCODE_GENERATION)
+ return;
+ }
- out:
- fw_node_put(root_node);
- out_put_card:
- fw_card_put(card);
+ if (local_id == irm_id)
+ allocate_broadcast_channel(card, generation);
+ }
}
void fw_card_initialize(struct fw_card *card,
@@ -548,20 +564,26 @@ void fw_card_initialize(struct fw_card *card,
card->index = atomic_inc_return(&index);
card->driver = driver;
card->device = device;
- card->current_tlabel = 0;
- card->tlabel_mask = 0;
- card->split_timeout_hi = DEFAULT_SPLIT_TIMEOUT / 8000;
- card->split_timeout_lo = (DEFAULT_SPLIT_TIMEOUT % 8000) << 19;
- card->split_timeout_cycles = DEFAULT_SPLIT_TIMEOUT;
- card->split_timeout_jiffies =
- DIV_ROUND_UP(DEFAULT_SPLIT_TIMEOUT * HZ, 8000);
+
+ card->transactions.current_tlabel = 0;
+ card->transactions.tlabel_mask = 0;
+ INIT_LIST_HEAD(&card->transactions.list);
+ spin_lock_init(&card->transactions.lock);
+
+ spin_lock_init(&card->topology_map.lock);
+
+ card->split_timeout.hi = DEFAULT_SPLIT_TIMEOUT / 8000;
+ card->split_timeout.lo = (DEFAULT_SPLIT_TIMEOUT % 8000) << 19;
+ card->split_timeout.cycles = DEFAULT_SPLIT_TIMEOUT;
+ card->split_timeout.jiffies = isoc_cycles_to_jiffies(DEFAULT_SPLIT_TIMEOUT);
+ spin_lock_init(&card->split_timeout.lock);
+
card->color = 0;
card->broadcast_channel = BROADCAST_CHANNEL_INITIAL;
kref_init(&card->kref);
init_completion(&card->done);
- INIT_LIST_HEAD(&card->transaction_list);
- INIT_LIST_HEAD(&card->phy_receiver_list);
+
spin_lock_init(&card->lock);
card->local_node = NULL;
@@ -571,10 +593,13 @@ void fw_card_initialize(struct fw_card *card,
}
EXPORT_SYMBOL(fw_card_initialize);
+DEFINE_FREE(workqueue_destroy, struct workqueue_struct *, if (_T) destroy_workqueue(_T))
+
int fw_card_add(struct fw_card *card, u32 max_receive, u32 link_speed, u64 guid,
unsigned int supported_isoc_contexts)
{
- struct workqueue_struct *isoc_wq;
+ struct workqueue_struct *isoc_wq __free(workqueue_destroy) = NULL;
+ struct workqueue_struct *async_wq __free(workqueue_destroy) = NULL;
int ret;
// This workqueue should be:
@@ -595,22 +620,41 @@ int fw_card_add(struct fw_card *card, u32 max_receive, u32 link_speed, u64 guid,
if (!isoc_wq)
return -ENOMEM;
+ // This workqueue should be:
+ // * != WQ_BH Sleepable.
+ // * == WQ_UNBOUND Any core can process data for asynchronous context.
+ // * == WQ_MEM_RECLAIM Used for any backend of block device.
+ // * == WQ_FREEZABLE The target device would not be available when being freezed.
+ // * == WQ_HIGHPRI High priority to process semi-realtime timestamped data.
+ // * == WQ_SYSFS Parameters are available via sysfs.
+ // * max_active == 4 A hardIRQ could notify events for a pair of requests and
+ // response AR/AT contexts.
+ async_wq = alloc_workqueue("firewire-async-card%u",
+ WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_HIGHPRI | WQ_SYSFS,
+ 4, card->index);
+ if (!async_wq)
+ return -ENOMEM;
+
+ card->isoc_wq = isoc_wq;
+ card->async_wq = async_wq;
card->max_receive = max_receive;
card->link_speed = link_speed;
card->guid = guid;
- guard(mutex)(&card_mutex);
+ scoped_guard(mutex, &card_mutex) {
+ generate_config_rom(card, tmp_config_rom);
+ ret = card->driver->enable(card, tmp_config_rom, config_rom_length);
+ if (ret < 0) {
+ card->isoc_wq = NULL;
+ card->async_wq = NULL;
+ return ret;
+ }
+ retain_and_null_ptr(isoc_wq);
+ retain_and_null_ptr(async_wq);
- generate_config_rom(card, tmp_config_rom);
- ret = card->driver->enable(card, tmp_config_rom, config_rom_length);
- if (ret < 0) {
- destroy_workqueue(isoc_wq);
- return ret;
+ list_add_tail(&card->link, &card_list);
}
- card->isoc_wq = isoc_wq;
- list_add_tail(&card->link, &card_list);
-
return 0;
}
EXPORT_SYMBOL(fw_card_add);
@@ -742,8 +786,13 @@ void fw_core_remove_card(struct fw_card *card)
/* Switch off most of the card driver interface. */
dummy_driver.free_iso_context = card->driver->free_iso_context;
dummy_driver.stop_iso = card->driver->stop_iso;
+ dummy_driver.disable = card->driver->disable;
card->driver = &dummy_driver;
+
drain_workqueue(card->isoc_wq);
+ drain_workqueue(card->async_wq);
+ card->driver->disable(card);
+ fw_cancel_pending_transactions(card);
scoped_guard(spinlock_irqsave, &card->lock)
fw_destroy_nodes(card);
@@ -753,8 +802,9 @@ void fw_core_remove_card(struct fw_card *card)
wait_for_completion(&card->done);
destroy_workqueue(card->isoc_wq);
+ destroy_workqueue(card->async_wq);
- WARN_ON(!list_empty(&card->transaction_list));
+ WARN_ON(!list_empty(&card->transactions.list));
}
EXPORT_SYMBOL(fw_core_remove_card);
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index bd04980009a4..49dc1612c691 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -41,12 +41,15 @@
/*
* ABI version history is documented in linux/firewire-cdev.h.
*/
-#define FW_CDEV_KERNEL_VERSION 5
+#define FW_CDEV_KERNEL_VERSION 6
#define FW_CDEV_VERSION_EVENT_REQUEST2 4
#define FW_CDEV_VERSION_ALLOCATE_REGION_END 4
#define FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW 5
#define FW_CDEV_VERSION_EVENT_ASYNC_TSTAMP 6
+static DEFINE_SPINLOCK(phy_receiver_list_lock);
+static LIST_HEAD(phy_receiver_list);
+
struct client {
u32 version;
struct fw_device *device;
@@ -937,11 +940,12 @@ static int ioctl_add_descriptor(struct client *client, union ioctl_arg *arg)
if (a->length > 256)
return -EINVAL;
- r = kmalloc(sizeof(*r) + a->length * 4, GFP_KERNEL);
+ r = kmalloc(struct_size(r, data, a->length), GFP_KERNEL);
if (r == NULL)
return -ENOMEM;
- if (copy_from_user(r->data, u64_to_uptr(a->data), a->length * 4)) {
+ if (copy_from_user(r->data, u64_to_uptr(a->data),
+ flex_array_size(r, data, a->length))) {
ret = -EFAULT;
goto failed;
}
@@ -1313,8 +1317,7 @@ static int ioctl_get_cycle_timer(struct client *client, union ioctl_arg *arg)
static void iso_resource_work(struct work_struct *work)
{
struct iso_resource_event *e;
- struct iso_resource *r =
- container_of(work, struct iso_resource, work.work);
+ struct iso_resource *r = from_work(r, work, work.work);
struct client *client = r->client;
unsigned long index = r->resource.handle;
int generation, channel, bandwidth, todo;
@@ -1325,8 +1328,8 @@ static void iso_resource_work(struct work_struct *work)
todo = r->todo;
// Allow 1000ms grace period for other reallocations.
if (todo == ISO_RES_ALLOC &&
- time_before64(get_jiffies_64(), client->device->card->reset_jiffies + HZ)) {
- schedule_iso_resource(r, DIV_ROUND_UP(HZ, 3));
+ time_is_after_jiffies64(client->device->card->reset_jiffies + secs_to_jiffies(1))) {
+ schedule_iso_resource(r, msecs_to_jiffies(333));
skip = true;
} else {
// We could be called twice within the same generation.
@@ -1670,15 +1673,16 @@ static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg)
static int ioctl_receive_phy_packets(struct client *client, union ioctl_arg *arg)
{
struct fw_cdev_receive_phy_packets *a = &arg->receive_phy_packets;
- struct fw_card *card = client->device->card;
/* Access policy: Allow this ioctl only on local nodes' device files. */
if (!client->device->is_local)
return -ENOSYS;
- guard(spinlock_irq)(&card->lock);
+ // NOTE: This can be without irq when we can guarantee that __fw_send_request() for local
+ // destination never runs in any type of IRQ context.
+ scoped_guard(spinlock_irq, &phy_receiver_list_lock)
+ list_move_tail(&client->phy_receiver_link, &phy_receiver_list);
- list_move_tail(&client->phy_receiver_link, &card->phy_receiver_list);
client->phy_receiver_closure = a->closure;
return 0;
@@ -1688,10 +1692,17 @@ void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p)
{
struct client *client;
- guard(spinlock_irqsave)(&card->lock);
+ // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for local
+ // destination never runs in any type of IRQ context.
+ guard(spinlock_irqsave)(&phy_receiver_list_lock);
+
+ list_for_each_entry(client, &phy_receiver_list, phy_receiver_link) {
+ struct inbound_phy_packet_event *e;
+
+ if (client->device->card != card)
+ continue;
- list_for_each_entry(client, &card->phy_receiver_list, phy_receiver_link) {
- struct inbound_phy_packet_event *e = kmalloc(sizeof(*e) + 8, GFP_ATOMIC);
+ e = kmalloc(sizeof(*e) + 8, GFP_ATOMIC);
if (e == NULL)
break;
@@ -1858,7 +1869,9 @@ static int fw_device_op_release(struct inode *inode, struct file *file)
struct client_resource *resource;
unsigned long index;
- scoped_guard(spinlock_irq, &client->device->card->lock)
+ // NOTE: This can be without irq when we can guarantee that __fw_send_request() for local
+ // destination never runs in any type of IRQ context.
+ scoped_guard(spinlock_irq, &phy_receiver_list_lock)
list_del(&client->phy_receiver_link);
scoped_guard(mutex, &client->device->client_list_mutex)
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index ec3e21ad2025..9b0080397154 100644
--- a/drivers/firewire/core-device.c
+++ b/drivers/firewire/core-device.c
@@ -542,8 +542,83 @@ static struct device_attribute fw_device_attributes[] = {
__ATTR_NULL,
};
-static int read_rom(struct fw_device *device,
- int generation, int index, u32 *data)
+#define CANON_OUI 0x000085
+
+static int detect_quirks_by_bus_information_block(const u32 *bus_information_block)
+{
+ int quirks = 0;
+
+ if ((bus_information_block[2] & 0x000000f0) == 0)
+ quirks |= FW_DEVICE_QUIRK_IRM_IS_1394_1995_ONLY;
+
+ if ((bus_information_block[3] >> 8) == CANON_OUI)
+ quirks |= FW_DEVICE_QUIRK_IRM_IGNORES_BUS_MANAGER;
+
+ return quirks;
+}
+
+struct entry_match {
+ unsigned int index;
+ u32 value;
+};
+
+static const struct entry_match motu_audio_express_matches[] = {
+ { 1, 0x030001f2 },
+ { 3, 0xd1000002 },
+ { 4, 0x8d000005 },
+ { 6, 0x120001f2 },
+ { 7, 0x13000033 },
+ { 8, 0x17104800 },
+};
+
+static const struct entry_match tascam_fw_series_matches[] = {
+ { 1, 0x0300022e },
+ { 3, 0x8d000006 },
+ { 4, 0xd1000001 },
+ { 6, 0x1200022e },
+ { 8, 0xd4000004 },
+};
+
+static int detect_quirks_by_root_directory(const u32 *root_directory, unsigned int length)
+{
+ static const struct {
+ enum fw_device_quirk quirk;
+ const struct entry_match *matches;
+ unsigned int match_count;
+ } *entry, entries[] = {
+ {
+ .quirk = FW_DEVICE_QUIRK_ACK_PACKET_WITH_INVALID_PENDING_CODE,
+ .matches = motu_audio_express_matches,
+ .match_count = ARRAY_SIZE(motu_audio_express_matches),
+ },
+ {
+ .quirk = FW_DEVICE_QUIRK_UNSTABLE_AT_S400,
+ .matches = tascam_fw_series_matches,
+ .match_count = ARRAY_SIZE(tascam_fw_series_matches),
+ },
+ };
+ int quirks = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(entries); ++i) {
+ int j;
+
+ entry = entries + i;
+ for (j = 0; j < entry->match_count; ++j) {
+ unsigned int index = entry->matches[j].index;
+ unsigned int value = entry->matches[j].value;
+
+ if ((length < index) || (root_directory[index] != value))
+ break;
+ }
+ if (j == entry->match_count)
+ quirks |= entry->quirk;
+ }
+
+ return quirks;
+}
+
+static int read_rom(struct fw_device *device, int generation, int speed, int index, u32 *data)
{
u64 offset = (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + index * 4;
int i, rcode;
@@ -554,7 +629,7 @@ static int read_rom(struct fw_device *device,
for (i = 10; i < 100; i += 10) {
rcode = fw_run_transaction(device->card,
TCODE_READ_QUADLET_REQUEST, device->node_id,
- generation, device->max_speed, offset, data, 4);
+ generation, speed, offset, data, 4);
if (rcode != RCODE_BUSY)
break;
msleep(i);
@@ -578,10 +653,11 @@ static int read_rom(struct fw_device *device,
static int read_config_rom(struct fw_device *device, int generation)
{
struct fw_card *card = device->card;
- const u32 *old_rom, *new_rom;
- u32 *rom, *stack;
+ const u32 *new_rom, *old_rom __free(kfree) = NULL;
+ u32 *stack, *rom __free(kfree) = NULL;
u32 sp, key;
- int i, end, length, ret;
+ int i, end, length, ret, speed;
+ int quirks;
rom = kmalloc(sizeof(*rom) * MAX_CONFIG_ROM_SIZE +
sizeof(*stack) * MAX_CONFIG_ROM_SIZE, GFP_KERNEL);
@@ -591,13 +667,13 @@ static int read_config_rom(struct fw_device *device, int generation)
stack = &rom[MAX_CONFIG_ROM_SIZE];
memset(rom, 0, sizeof(*rom) * MAX_CONFIG_ROM_SIZE);
- device->max_speed = SCODE_100;
+ speed = SCODE_100;
/* First read the bus info block. */
for (i = 0; i < 5; i++) {
- ret = read_rom(device, generation, i, &rom[i]);
+ ret = read_rom(device, generation, speed, i, &rom[i]);
if (ret != RCODE_COMPLETE)
- goto out;
+ return ret;
/*
* As per IEEE1212 7.2, during initialization, devices can
* reply with a 0 for the first quadlet of the config
@@ -606,39 +682,14 @@ static int read_config_rom(struct fw_device *device, int generation)
* harddisk). In that case we just fail, and the
* retry mechanism will try again later.
*/
- if (i == 0 && rom[i] == 0) {
- ret = RCODE_BUSY;
- goto out;
- }
+ if (i == 0 && rom[i] == 0)
+ return RCODE_BUSY;
}
- device->max_speed = device->node->max_speed;
-
- /*
- * Determine the speed of
- * - devices with link speed less than PHY speed,
- * - devices with 1394b PHY (unless only connected to 1394a PHYs),
- * - all devices if there are 1394b repeaters.
- * Note, we cannot use the bus info block's link_spd as starting point
- * because some buggy firmwares set it lower than necessary and because
- * 1394-1995 nodes do not have the field.
- */
- if ((rom[2] & 0x7) < device->max_speed ||
- device->max_speed == SCODE_BETA ||
- card->beta_repeaters_present) {
- u32 dummy;
-
- /* for S1600 and S3200 */
- if (device->max_speed == SCODE_BETA)
- device->max_speed = card->link_speed;
+ quirks = detect_quirks_by_bus_information_block(rom);
- while (device->max_speed > SCODE_100) {
- if (read_rom(device, generation, 0, &dummy) ==
- RCODE_COMPLETE)
- break;
- device->max_speed--;
- }
- }
+ // Just prevent from torn writing/reading.
+ WRITE_ONCE(device->quirks, quirks);
/*
* Now parse the config rom. The config rom is a recursive
@@ -659,15 +710,13 @@ static int read_config_rom(struct fw_device *device, int generation)
*/
key = stack[--sp];
i = key & 0xffffff;
- if (WARN_ON(i >= MAX_CONFIG_ROM_SIZE)) {
- ret = -ENXIO;
- goto out;
- }
+ if (WARN_ON(i >= MAX_CONFIG_ROM_SIZE))
+ return -ENXIO;
/* Read header quadlet for the block to get the length. */
- ret = read_rom(device, generation, i, &rom[i]);
+ ret = read_rom(device, generation, speed, i, &rom[i]);
if (ret != RCODE_COMPLETE)
- goto out;
+ return ret;
end = i + (rom[i] >> 16) + 1;
if (end > MAX_CONFIG_ROM_SIZE) {
/*
@@ -689,9 +738,9 @@ static int read_config_rom(struct fw_device *device, int generation)
* it references another block, and push it in that case.
*/
for (; i < end; i++) {
- ret = read_rom(device, generation, i, &rom[i]);
+ ret = read_rom(device, generation, speed, i, &rom[i]);
if (ret != RCODE_COMPLETE)
- goto out;
+ return ret;
if ((key >> 30) != 3 || (rom[i] >> 30) < 2)
continue;
@@ -716,27 +765,54 @@ static int read_config_rom(struct fw_device *device, int generation)
length = i;
}
+ quirks |= detect_quirks_by_root_directory(rom + ROOT_DIR_OFFSET, length - ROOT_DIR_OFFSET);
+
+ // Just prevent from torn writing/reading.
+ WRITE_ONCE(device->quirks, quirks);
+
+ if (unlikely(quirks & FW_DEVICE_QUIRK_UNSTABLE_AT_S400))
+ speed = SCODE_200;
+ else
+ speed = device->node->max_speed;
+
+ // Determine the speed of
+ // - devices with link speed less than PHY speed,
+ // - devices with 1394b PHY (unless only connected to 1394a PHYs),
+ // - all devices if there are 1394b repeaters.
+ // Note, we cannot use the bus info block's link_spd as starting point because some buggy
+ // firmwares set it lower than necessary and because 1394-1995 nodes do not have the field.
+ if ((rom[2] & 0x7) < speed || speed == SCODE_BETA || card->beta_repeaters_present) {
+ u32 dummy;
+
+ // for S1600 and S3200.
+ if (speed == SCODE_BETA)
+ speed = card->link_speed;
+
+ while (speed > SCODE_100) {
+ if (read_rom(device, generation, speed, 0, &dummy) ==
+ RCODE_COMPLETE)
+ break;
+ --speed;
+ }
+ }
+
+ device->max_speed = speed;
+
old_rom = device->config_rom;
new_rom = kmemdup(rom, length * 4, GFP_KERNEL);
- if (new_rom == NULL) {
- ret = -ENOMEM;
- goto out;
- }
+ if (new_rom == NULL)
+ return -ENOMEM;
scoped_guard(rwsem_write, &fw_device_rwsem) {
device->config_rom = new_rom;
device->config_rom_length = length;
}
- kfree(old_rom);
- ret = RCODE_COMPLETE;
device->max_rec = rom[2] >> 12 & 0xf;
device->cmc = rom[2] >> 30 & 1;
device->irmc = rom[2] >> 31 & 1;
- out:
- kfree(rom);
- return ret;
+ return RCODE_COMPLETE;
}
static void fw_unit_release(struct device *dev)
@@ -847,17 +923,15 @@ static void fw_schedule_device_work(struct fw_device *device,
*/
#define MAX_RETRIES 10
-#define RETRY_DELAY (3 * HZ)
-#define INITIAL_DELAY (HZ / 2)
-#define SHUTDOWN_DELAY (2 * HZ)
+#define RETRY_DELAY secs_to_jiffies(3)
+#define INITIAL_DELAY msecs_to_jiffies(500)
+#define SHUTDOWN_DELAY secs_to_jiffies(2)
static void fw_device_shutdown(struct work_struct *work)
{
- struct fw_device *device =
- container_of(work, struct fw_device, work.work);
+ struct fw_device *device = from_work(device, work, work.work);
- if (time_before64(get_jiffies_64(),
- device->card->reset_jiffies + SHUTDOWN_DELAY)
+ if (time_is_after_jiffies64(device->card->reset_jiffies + SHUTDOWN_DELAY)
&& !list_empty(&device->card->link)) {
fw_schedule_device_work(device, SHUTDOWN_DELAY);
return;
@@ -888,7 +962,7 @@ static void fw_device_release(struct device *dev)
* bus manager work looks at this node.
*/
scoped_guard(spinlock_irqsave, &card->lock)
- device->node->data = NULL;
+ fw_node_set_device(device->node, NULL);
fw_node_put(device->node);
kfree(device->config_rom);
@@ -921,8 +995,7 @@ static int update_unit(struct device *dev, void *data)
static void fw_device_update(struct work_struct *work)
{
- struct fw_device *device =
- container_of(work, struct fw_device, work.work);
+ struct fw_device *device = from_work(device, work, work.work);
fw_device_cdev_update(device);
device_for_each_child(&device->device, NULL, update_unit);
@@ -1002,15 +1075,14 @@ static int compare_configuration_rom(struct device *dev, const void *data)
static void fw_device_init(struct work_struct *work)
{
- struct fw_device *device =
- container_of(work, struct fw_device, work.work);
+ struct fw_device *device = from_work(device, work, work.work);
struct fw_card *card = device->card;
struct device *found;
u32 minor;
int ret;
/*
- * All failure paths here set node->data to NULL, so that we
+ * All failure paths here call fw_node_set_device(node, NULL), so that we
* don't try to do device_for_each_child() on a kfree()'d
* device.
*/
@@ -1054,9 +1126,9 @@ static void fw_device_init(struct work_struct *work)
struct fw_node *obsolete_node = reused->node;
device->node = obsolete_node;
- device->node->data = device;
+ fw_node_set_device(device->node, device);
reused->node = current_node;
- reused->node->data = reused;
+ fw_node_set_device(reused->node, reused);
reused->max_speed = device->max_speed;
reused->node_id = current_node->node_id;
@@ -1126,10 +1198,10 @@ static void fw_device_init(struct work_struct *work)
device->workfn = fw_device_shutdown;
fw_schedule_device_work(device, SHUTDOWN_DELAY);
} else {
- fw_notice(card, "created device %s: GUID %08x%08x, S%d00\n",
+ fw_notice(card, "created device %s: GUID %08x%08x, S%d00, quirks %08x\n",
dev_name(&device->device),
device->config_rom[3], device->config_rom[4],
- 1 << device->max_speed);
+ 1 << device->max_speed, device->quirks);
device->config_rom_retries = 0;
set_broadcast_channel(device, device->generation);
@@ -1164,7 +1236,7 @@ static int reread_config_rom(struct fw_device *device, int generation,
int i, rcode;
for (i = 0; i < 6; i++) {
- rcode = read_rom(device, generation, i, &q);
+ rcode = read_rom(device, generation, device->max_speed, i, &q);
if (rcode != RCODE_COMPLETE)
return rcode;
@@ -1184,8 +1256,7 @@ static int reread_config_rom(struct fw_device *device, int generation,
static void fw_device_refresh(struct work_struct *work)
{
- struct fw_device *device =
- container_of(work, struct fw_device, work.work);
+ struct fw_device *device = from_work(device, work, work.work);
struct fw_card *card = device->card;
int ret, node_id = device->node_id;
bool changed;
@@ -1251,8 +1322,7 @@ static void fw_device_refresh(struct work_struct *work)
static void fw_device_workfn(struct work_struct *work)
{
- struct fw_device *device = container_of(to_delayed_work(work),
- struct fw_device, work);
+ struct fw_device *device = from_work(device, to_delayed_work(work), work);
device->workfn(work);
}
@@ -1297,7 +1367,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
* FW_NODE_UPDATED callbacks can update the node_id
* and generation for the device.
*/
- node->data = device;
+ fw_node_set_device(node, device);
/*
* Many devices are slow to respond after bus resets,
@@ -1312,7 +1382,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
case FW_NODE_INITIATED_RESET:
case FW_NODE_LINK_ON:
- device = node->data;
+ device = fw_node_get_device(node);
if (device == NULL)
goto create;
@@ -1329,7 +1399,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
break;
case FW_NODE_UPDATED:
- device = node->data;
+ device = fw_node_get_device(node);
if (device == NULL)
break;
@@ -1344,7 +1414,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
case FW_NODE_DESTROYED:
case FW_NODE_LINK_OFF:
- if (!node->data)
+ if (!fw_node_get_device(node))
break;
/*
@@ -1359,7 +1429,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
* the device in shutdown state to have that code fail
* to create the device.
*/
- device = node->data;
+ device = fw_node_get_device(node);
if (atomic_xchg(&device->state,
FW_DEVICE_GONE) == FW_DEVICE_RUNNING) {
device->workfn = fw_device_shutdown;
diff --git a/drivers/firewire/core-topology.c b/drivers/firewire/core-topology.c
index 74a6aa7d8cc9..ed3ae8cdb0cd 100644
--- a/drivers/firewire/core-topology.c
+++ b/drivers/firewire/core-topology.c
@@ -241,7 +241,7 @@ static struct fw_node *build_tree(struct fw_card *card, const u32 *sid, int self
// If PHYs report different gap counts, set an invalid count which will force a gap
// count reconfiguration and a reset.
if (phy_packet_self_id_zero_get_gap_count(self_id_sequence[0]) != gap_count)
- gap_count = 0;
+ gap_count = GAP_COUNT_MISMATCHED;
update_hop_count(node);
@@ -325,9 +325,11 @@ static void report_found_node(struct fw_card *card,
card->bm_retries = 0;
}
-/* Must be called with card->lock held */
void fw_destroy_nodes(struct fw_card *card)
+__must_hold(&card->lock)
{
+ lockdep_assert_held(&card->lock);
+
card->color++;
if (card->local_node != NULL)
for_each_fw_node(card, card->local_node, report_lost_node);
@@ -435,20 +437,23 @@ static void update_tree(struct fw_card *card, struct fw_node *root)
}
}
-static void update_topology_map(struct fw_card *card,
- u32 *self_ids, int self_id_count)
+static void update_topology_map(__be32 *buffer, size_t buffer_size, int root_node_id,
+ const u32 *self_ids, int self_id_count)
{
- int node_count = (card->root_node->node_id & 0x3f) + 1;
- __be32 *map = card->topology_map;
+ __be32 *map = buffer;
+ u32 next_generation = be32_to_cpu(buffer[1]) + 1;
+ int node_count = (root_node_id & 0x3f) + 1;
+
+ memset(map, 0, buffer_size);
*map++ = cpu_to_be32((self_id_count + 2) << 16);
- *map++ = cpu_to_be32(be32_to_cpu(card->topology_map[1]) + 1);
+ *map++ = cpu_to_be32(next_generation);
*map++ = cpu_to_be32((node_count << 16) | self_id_count);
while (self_id_count--)
*map++ = cpu_to_be32p(self_ids++);
- fw_compute_block_crc(card->topology_map);
+ fw_compute_block_crc(buffer);
}
void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
@@ -458,46 +463,45 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
trace_bus_reset_handle(card->index, generation, node_id, bm_abdicate, self_ids, self_id_count);
- guard(spinlock_irqsave)(&card->lock);
-
- /*
- * If the selfID buffer is not the immediate successor of the
- * previously processed one, we cannot reliably compare the
- * old and new topologies.
- */
- if (!is_next_generation(generation, card->generation) &&
- card->local_node != NULL) {
- fw_destroy_nodes(card);
- card->bm_retries = 0;
+ scoped_guard(spinlock, &card->lock) {
+ // If the selfID buffer is not the immediate successor of the
+ // previously processed one, we cannot reliably compare the
+ // old and new topologies.
+ if (!is_next_generation(generation, card->generation) && card->local_node != NULL) {
+ fw_destroy_nodes(card);
+ card->bm_retries = 0;
+ }
+ card->broadcast_channel_allocated = card->broadcast_channel_auto_allocated;
+ card->node_id = node_id;
+ // Update node_id before generation to prevent anybody from using
+ // a stale node_id together with a current generation.
+ smp_wmb();
+ card->generation = generation;
+ card->reset_jiffies = get_jiffies_64();
+ card->bm_node_id = 0xffff;
+ card->bm_abdicate = bm_abdicate;
+
+ local_node = build_tree(card, self_ids, self_id_count, generation);
+
+ card->color++;
+
+ if (local_node == NULL) {
+ fw_err(card, "topology build failed\n");
+ // FIXME: We need to issue a bus reset in this case.
+ } else if (card->local_node == NULL) {
+ card->local_node = local_node;
+ for_each_fw_node(card, local_node, report_found_node);
+ } else {
+ update_tree(card, local_node);
+ }
}
- card->broadcast_channel_allocated = card->broadcast_channel_auto_allocated;
- card->node_id = node_id;
- /*
- * Update node_id before generation to prevent anybody from using
- * a stale node_id together with a current generation.
- */
- smp_wmb();
- card->generation = generation;
- card->reset_jiffies = get_jiffies_64();
- card->bm_node_id = 0xffff;
- card->bm_abdicate = bm_abdicate;
fw_schedule_bm_work(card, 0);
- local_node = build_tree(card, self_ids, self_id_count, generation);
-
- update_topology_map(card, self_ids, self_id_count);
-
- card->color++;
-
- if (local_node == NULL) {
- fw_err(card, "topology build failed\n");
- /* FIXME: We need to issue a bus reset in this case. */
- } else if (card->local_node == NULL) {
- card->local_node = local_node;
- for_each_fw_node(card, local_node, report_found_node);
- } else {
- update_tree(card, local_node);
+ // Just used by transaction layer.
+ scoped_guard(spinlock, &card->topology_map.lock) {
+ update_topology_map(card->topology_map.buffer, sizeof(card->topology_map.buffer),
+ card->root_node->node_id, self_ids, self_id_count);
}
}
EXPORT_SYMBOL(fw_core_handle_bus_reset);
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index 2bd5deb9054e..7fea11a5e359 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -44,26 +44,68 @@ static int try_cancel_split_timeout(struct fw_transaction *t)
return 1;
}
-static int close_transaction(struct fw_transaction *transaction, struct fw_card *card, int rcode,
- u32 response_tstamp)
+// card->transactions.lock must be acquired in advance.
+static void remove_transaction_entry(struct fw_card *card, struct fw_transaction *entry)
{
- struct fw_transaction *t = NULL, *iter;
+ list_del_init(&entry->link);
+ card->transactions.tlabel_mask &= ~(1ULL << entry->tlabel);
+}
- scoped_guard(spinlock_irqsave, &card->lock) {
- list_for_each_entry(iter, &card->transaction_list, link) {
- if (iter == transaction) {
- if (try_cancel_split_timeout(iter)) {
- list_del_init(&iter->link);
- card->tlabel_mask &= ~(1ULL << iter->tlabel);
- t = iter;
- }
- break;
- }
+// Must be called without holding card->transactions.lock.
+void fw_cancel_pending_transactions(struct fw_card *card)
+{
+ struct fw_transaction *t, *tmp;
+ LIST_HEAD(pending_list);
+
+ // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for
+ // local destination never runs in any type of IRQ context.
+ scoped_guard(spinlock_irqsave, &card->transactions.lock) {
+ list_for_each_entry_safe(t, tmp, &card->transactions.list, link) {
+ if (try_cancel_split_timeout(t))
+ list_move(&t->link, &pending_list);
+ }
+ }
+
+ list_for_each_entry_safe(t, tmp, &pending_list, link) {
+ list_del(&t->link);
+
+ if (!t->with_tstamp) {
+ t->callback.without_tstamp(card, RCODE_CANCELLED, NULL, 0,
+ t->callback_data);
+ } else {
+ t->callback.with_tstamp(card, RCODE_CANCELLED, t->packet.timestamp, 0,
+ NULL, 0, t->callback_data);
}
}
+}
- if (!t)
- return -ENOENT;
+// card->transactions.lock must be acquired in advance.
+#define find_and_pop_transaction_entry(card, condition) \
+({ \
+ struct fw_transaction *iter, *t = NULL; \
+ list_for_each_entry(iter, &card->transactions.list, link) { \
+ if (condition) { \
+ t = iter; \
+ break; \
+ } \
+ } \
+ if (t && try_cancel_split_timeout(t)) \
+ remove_transaction_entry(card, t); \
+ t; \
+})
+
+static int close_transaction(struct fw_transaction *transaction, struct fw_card *card, int rcode,
+ u32 response_tstamp)
+{
+ struct fw_transaction *t;
+
+ // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for
+ // local destination never runs in any type of IRQ context.
+ scoped_guard(spinlock_irqsave, &card->transactions.lock) {
+ t = find_and_pop_transaction_entry(card, iter == transaction);
+ if (!t)
+ return -ENOENT;
+ }
if (!t->with_tstamp) {
t->callback.without_tstamp(card, rcode, NULL, 0, t->callback_data);
@@ -117,11 +159,10 @@ static void split_transaction_timeout_callback(struct timer_list *timer)
struct fw_transaction *t = timer_container_of(t, timer, split_timeout_timer);
struct fw_card *card = t->card;
- scoped_guard(spinlock_irqsave, &card->lock) {
+ scoped_guard(spinlock_irqsave, &card->transactions.lock) {
if (list_empty(&t->link))
return;
- list_del(&t->link);
- card->tlabel_mask &= ~(1ULL << t->tlabel);
+ remove_transaction_entry(card, t);
}
if (!t->with_tstamp) {
@@ -135,14 +176,18 @@ static void split_transaction_timeout_callback(struct timer_list *timer)
static void start_split_transaction_timeout(struct fw_transaction *t,
struct fw_card *card)
{
- guard(spinlock_irqsave)(&card->lock);
+ unsigned long delta;
if (list_empty(&t->link) || WARN_ON(t->is_split_transaction))
return;
t->is_split_transaction = true;
- mod_timer(&t->split_timeout_timer,
- jiffies + card->split_timeout_jiffies);
+
+ // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for
+ // local destination never runs in any type of IRQ context.
+ scoped_guard(spinlock_irqsave, &card->split_timeout.lock)
+ delta = card->split_timeout.jiffies;
+ mod_timer(&t->split_timeout_timer, jiffies + delta);
}
static u32 compute_split_timeout_timestamp(struct fw_card *card, u32 request_timestamp);
@@ -162,8 +207,12 @@ static void transmit_complete_callback(struct fw_packet *packet,
break;
case ACK_PENDING:
{
- t->split_timeout_cycle =
- compute_split_timeout_timestamp(card, packet->timestamp) & 0xffff;
+ // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for
+ // local destination never runs in any type of IRQ context.
+ scoped_guard(spinlock_irqsave, &card->split_timeout.lock) {
+ t->split_timeout_cycle =
+ compute_split_timeout_timestamp(card, packet->timestamp) & 0xffff;
+ }
start_split_transaction_timeout(t, card);
break;
}
@@ -259,18 +308,21 @@ static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
}
static int allocate_tlabel(struct fw_card *card)
+__must_hold(&card->transactions.lock)
{
int tlabel;
- tlabel = card->current_tlabel;
- while (card->tlabel_mask & (1ULL << tlabel)) {
+ lockdep_assert_held(&card->transactions.lock);
+
+ tlabel = card->transactions.current_tlabel;
+ while (card->transactions.tlabel_mask & (1ULL << tlabel)) {
tlabel = (tlabel + 1) & 0x3f;
- if (tlabel == card->current_tlabel)
+ if (tlabel == card->transactions.current_tlabel)
return -EBUSY;
}
- card->current_tlabel = (tlabel + 1) & 0x3f;
- card->tlabel_mask |= 1ULL << tlabel;
+ card->transactions.current_tlabel = (tlabel + 1) & 0x3f;
+ card->transactions.tlabel_mask |= 1ULL << tlabel;
return tlabel;
}
@@ -331,7 +383,6 @@ void __fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode
void *payload, size_t length, union fw_transaction_callback callback,
bool with_tstamp, void *callback_data)
{
- unsigned long flags;
int tlabel;
/*
@@ -339,11 +390,11 @@ void __fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode
* the list while holding the card spinlock.
*/
- spin_lock_irqsave(&card->lock, flags);
-
- tlabel = allocate_tlabel(card);
+ // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for
+ // local destination never runs in any type of IRQ context.
+ scoped_guard(spinlock_irqsave, &card->transactions.lock)
+ tlabel = allocate_tlabel(card);
if (tlabel < 0) {
- spin_unlock_irqrestore(&card->lock, flags);
if (!with_tstamp) {
callback.without_tstamp(card, RCODE_SEND_ERROR, NULL, 0, callback_data);
} else {
@@ -368,15 +419,22 @@ void __fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode
t->callback = callback;
t->with_tstamp = with_tstamp;
t->callback_data = callback_data;
-
- fw_fill_request(&t->packet, tcode, t->tlabel, destination_id, card->node_id, generation,
- speed, offset, payload, length);
t->packet.callback = transmit_complete_callback;
- list_add_tail(&t->link, &card->transaction_list);
+ // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for
+ // local destination never runs in any type of IRQ context.
+ scoped_guard(spinlock_irqsave, &card->lock) {
+ // The node_id field of fw_card can be updated when handling SelfIDComplete.
+ fw_fill_request(&t->packet, tcode, t->tlabel, destination_id, card->node_id,
+ generation, speed, offset, payload, length);
+ }
- spin_unlock_irqrestore(&card->lock, flags);
+ // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for
+ // local destination never runs in any type of IRQ context.
+ scoped_guard(spinlock_irqsave, &card->transactions.lock)
+ list_add_tail(&t->link, &card->transactions.list);
+ // Safe with no lock, since the index field of fw_card is immutable once assigned.
trace_async_request_outbound_initiate((uintptr_t)t, card->index, generation, speed,
t->packet.header, payload,
tcode_is_read_request(tcode) ? 0 : length / 4);
@@ -458,7 +516,7 @@ static struct fw_packet phy_config_packet = {
void fw_send_phy_config(struct fw_card *card,
int node_id, int generation, int gap_count)
{
- long timeout = DIV_ROUND_UP(HZ, 10);
+ long timeout = msecs_to_jiffies(100);
u32 data = 0;
phy_packet_set_packet_identifier(&data, PHY_PACKET_PACKET_IDENTIFIER_PHY_CONFIG);
@@ -550,6 +608,23 @@ const struct fw_address_region fw_unit_space_region =
{ .start = 0xfffff0000900ULL, .end = 0x1000000000000ULL, };
#endif /* 0 */
+static void complete_address_handler(struct kref *kref)
+{
+ struct fw_address_handler *handler = container_of(kref, struct fw_address_handler, kref);
+
+ complete(&handler->done);
+}
+
+static void get_address_handler(struct fw_address_handler *handler)
+{
+ kref_get(&handler->kref);
+}
+
+static int put_address_handler(struct fw_address_handler *handler)
+{
+ return kref_put(&handler->kref, complete_address_handler);
+}
+
/**
* fw_core_add_address_handler() - register for incoming requests
* @handler: callback
@@ -557,9 +632,10 @@ const struct fw_address_region fw_unit_space_region =
*
* region->start, ->end, and handler->length have to be quadlet-aligned.
*
- * When a request is received that falls within the specified address range,
- * the specified callback is invoked. The parameters passed to the callback
- * give the details of the particular request.
+ * When a request is received that falls within the specified address range, the specified callback
+ * is invoked. The parameters passed to the callback give the details of the particular request.
+ * The callback is invoked in the workqueue context in most cases. However, if the request is
+ * initiated by the local node, the callback is invoked in the initiator's context.
*
* To be called in process context.
* Return value: 0 on success, non-zero otherwise.
@@ -595,6 +671,8 @@ int fw_core_add_address_handler(struct fw_address_handler *handler,
if (other != NULL) {
handler->offset += other->length;
} else {
+ init_completion(&handler->done);
+ kref_init(&handler->kref);
list_add_tail_rcu(&handler->link, &address_handler_list);
ret = 0;
break;
@@ -620,6 +698,9 @@ void fw_core_remove_address_handler(struct fw_address_handler *handler)
list_del_rcu(&handler->link);
synchronize_rcu();
+
+ if (!put_address_handler(handler))
+ wait_for_completion(&handler->done);
}
EXPORT_SYMBOL(fw_core_remove_address_handler);
@@ -756,11 +837,14 @@ EXPORT_SYMBOL(fw_fill_response);
static u32 compute_split_timeout_timestamp(struct fw_card *card,
u32 request_timestamp)
+__must_hold(&card->split_timeout.lock)
{
unsigned int cycles;
u32 timestamp;
- cycles = card->split_timeout_cycles;
+ lockdep_assert_held(&card->split_timeout.lock);
+
+ cycles = card->split_timeout.cycles;
cycles += request_timestamp & 0x1fff;
timestamp = request_timestamp & ~0x1fff;
@@ -811,9 +895,12 @@ static struct fw_request *allocate_request(struct fw_card *card,
return NULL;
kref_init(&request->kref);
+ // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for
+ // local destination never runs in any type of IRQ context.
+ scoped_guard(spinlock_irqsave, &card->split_timeout.lock)
+ request->response.timestamp = compute_split_timeout_timestamp(card, p->timestamp);
+
request->response.speed = p->speed;
- request->response.timestamp =
- compute_split_timeout_timestamp(card, p->timestamp);
request->response.generation = p->generation;
request->response.ack = 0;
request->response.callback = free_response_callback;
@@ -913,22 +1000,31 @@ static void handle_exclusive_region_request(struct fw_card *card,
handler = lookup_enclosing_address_handler(&address_handler_list, offset,
request->length);
if (handler)
- handler->address_callback(card, request, tcode, destination, source,
- p->generation, offset, request->data,
- request->length, handler->callback_data);
+ get_address_handler(handler);
}
- if (!handler)
+ if (!handler) {
fw_send_response(card, request, RCODE_ADDRESS_ERROR);
+ return;
+ }
+
+ // Outside the RCU read-side critical section. Without spinlock. With reference count.
+ handler->address_callback(card, request, tcode, destination, source, p->generation, offset,
+ request->data, request->length, handler->callback_data);
+ put_address_handler(handler);
}
+// To use kmalloc allocator efficiently, this should be power of two.
+#define BUFFER_ON_KERNEL_STACK_SIZE 4
+
static void handle_fcp_region_request(struct fw_card *card,
struct fw_packet *p,
struct fw_request *request,
unsigned long long offset)
{
- struct fw_address_handler *handler;
- int tcode, destination, source;
+ struct fw_address_handler *buffer_on_kernel_stack[BUFFER_ON_KERNEL_STACK_SIZE];
+ struct fw_address_handler *handler, **handlers;
+ int tcode, destination, source, i, count, buffer_size;
if ((offset != (CSR_REGISTER_BASE | CSR_FCP_COMMAND) &&
offset != (CSR_REGISTER_BASE | CSR_FCP_RESPONSE)) ||
@@ -949,15 +1045,55 @@ static void handle_fcp_region_request(struct fw_card *card,
return;
}
+ count = 0;
+ handlers = buffer_on_kernel_stack;
+ buffer_size = ARRAY_SIZE(buffer_on_kernel_stack);
scoped_guard(rcu) {
list_for_each_entry_rcu(handler, &address_handler_list, link) {
- if (is_enclosing_handler(handler, offset, request->length))
- handler->address_callback(card, request, tcode, destination, source,
- p->generation, offset, request->data,
- request->length, handler->callback_data);
+ if (is_enclosing_handler(handler, offset, request->length)) {
+ if (count >= buffer_size) {
+ int next_size = buffer_size * 2;
+ struct fw_address_handler **buffer_on_kernel_heap;
+
+ if (handlers == buffer_on_kernel_stack)
+ buffer_on_kernel_heap = NULL;
+ else
+ buffer_on_kernel_heap = handlers;
+
+ buffer_on_kernel_heap =
+ krealloc_array(buffer_on_kernel_heap, next_size,
+ sizeof(*buffer_on_kernel_heap), GFP_ATOMIC);
+ // FCP is used for purposes unrelated to significant system
+ // resources (e.g. storage or networking), so allocation
+ // failures are not considered so critical.
+ if (!buffer_on_kernel_heap)
+ break;
+
+ if (handlers == buffer_on_kernel_stack) {
+ memcpy(buffer_on_kernel_heap, buffer_on_kernel_stack,
+ sizeof(buffer_on_kernel_stack));
+ }
+
+ handlers = buffer_on_kernel_heap;
+ buffer_size = next_size;
+ }
+ get_address_handler(handler);
+ handlers[count++] = handler;
+ }
}
}
+ for (i = 0; i < count; ++i) {
+ handler = handlers[i];
+ handler->address_callback(card, request, tcode, destination, source,
+ p->generation, offset, request->data,
+ request->length, handler->callback_data);
+ put_address_handler(handler);
+ }
+
+ if (handlers != buffer_on_kernel_stack)
+ kfree(handlers);
+
fw_send_response(card, request, RCODE_COMPLETE);
}
@@ -1000,7 +1136,7 @@ EXPORT_SYMBOL(fw_core_handle_request);
void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
{
- struct fw_transaction *t = NULL, *iter;
+ struct fw_transaction *t = NULL;
u32 *data;
size_t data_length;
int tcode, tlabel, source, rcode;
@@ -1039,17 +1175,11 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
break;
}
- scoped_guard(spinlock_irqsave, &card->lock) {
- list_for_each_entry(iter, &card->transaction_list, link) {
- if (iter->node_id == source && iter->tlabel == tlabel) {
- if (try_cancel_split_timeout(iter)) {
- list_del_init(&iter->link);
- card->tlabel_mask &= ~(1ULL << iter->tlabel);
- t = iter;
- }
- break;
- }
- }
+ // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for
+ // local destination never runs in any type of IRQ context.
+ scoped_guard(spinlock_irqsave, &card->transactions.lock) {
+ t = find_and_pop_transaction_entry(card,
+ iter->node_id == source && iter->tlabel == tlabel);
}
trace_async_response_inbound((uintptr_t)t, card->index, p->generation, p->speed, p->ack,
@@ -1124,7 +1254,11 @@ static void handle_topology_map(struct fw_card *card, struct fw_request *request
}
start = (offset - topology_map_region.start) / 4;
- memcpy(payload, &card->topology_map[start], length);
+
+ // NOTE: This can be without irqsave when we can guarantee that fw_send_request() for local
+ // destination never runs in any type of IRQ context.
+ scoped_guard(spinlock_irqsave, &card->topology_map.lock)
+ memcpy(payload, &card->topology_map.buffer[start], length);
fw_send_response(card, request, RCODE_COMPLETE);
}
@@ -1139,16 +1273,17 @@ static const struct fw_address_region registers_region =
.end = CSR_REGISTER_BASE | CSR_CONFIG_ROM, };
static void update_split_timeout(struct fw_card *card)
+__must_hold(&card->split_timeout.lock)
{
unsigned int cycles;
- cycles = card->split_timeout_hi * 8000 + (card->split_timeout_lo >> 19);
+ cycles = card->split_timeout.hi * 8000 + (card->split_timeout.lo >> 19);
/* minimum per IEEE 1394, maximum which doesn't overflow OHCI */
cycles = clamp(cycles, 800u, 3u * 8000u);
- card->split_timeout_cycles = cycles;
- card->split_timeout_jiffies = DIV_ROUND_UP(cycles * HZ, 8000);
+ card->split_timeout.cycles = cycles;
+ card->split_timeout.jiffies = isoc_cycles_to_jiffies(cycles);
}
static void handle_registers(struct fw_card *card, struct fw_request *request,
@@ -1198,12 +1333,15 @@ static void handle_registers(struct fw_card *card, struct fw_request *request,
case CSR_SPLIT_TIMEOUT_HI:
if (tcode == TCODE_READ_QUADLET_REQUEST) {
- *data = cpu_to_be32(card->split_timeout_hi);
+ *data = cpu_to_be32(card->split_timeout.hi);
} else if (tcode == TCODE_WRITE_QUADLET_REQUEST) {
- guard(spinlock_irqsave)(&card->lock);
-
- card->split_timeout_hi = be32_to_cpu(*data) & 7;
- update_split_timeout(card);
+ // NOTE: This can be without irqsave when we can guarantee that
+ // __fw_send_request() for local destination never runs in any type of IRQ
+ // context.
+ scoped_guard(spinlock_irqsave, &card->split_timeout.lock) {
+ card->split_timeout.hi = be32_to_cpu(*data) & 7;
+ update_split_timeout(card);
+ }
} else {
rcode = RCODE_TYPE_ERROR;
}
@@ -1211,12 +1349,15 @@ static void handle_registers(struct fw_card *card, struct fw_request *request,
case CSR_SPLIT_TIMEOUT_LO:
if (tcode == TCODE_READ_QUADLET_REQUEST) {
- *data = cpu_to_be32(card->split_timeout_lo);
+ *data = cpu_to_be32(card->split_timeout.lo);
} else if (tcode == TCODE_WRITE_QUADLET_REQUEST) {
- guard(spinlock_irqsave)(&card->lock);
-
- card->split_timeout_lo = be32_to_cpu(*data) & 0xfff80000;
- update_split_timeout(card);
+ // NOTE: This can be without irqsave when we can guarantee that
+ // __fw_send_request() for local destination never runs in any type of IRQ
+ // context.
+ scoped_guard(spinlock_irqsave, &card->split_timeout.lock) {
+ card->split_timeout.lo = be32_to_cpu(*data) & 0xfff80000;
+ update_split_timeout(card);
+ }
} else {
rcode = RCODE_TYPE_ERROR;
}
@@ -1327,7 +1468,8 @@ static int __init fw_core_init(void)
{
int ret;
- fw_workqueue = alloc_workqueue("firewire", WQ_MEM_RECLAIM, 0);
+ fw_workqueue = alloc_workqueue("firewire", WQ_MEM_RECLAIM | WQ_UNBOUND,
+ 0);
if (!fw_workqueue)
return -ENOMEM;
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
index 9b298af1cac0..41fb39d9a4e6 100644
--- a/drivers/firewire/core.h
+++ b/drivers/firewire/core.h
@@ -27,6 +27,11 @@ struct fw_packet;
/* -card */
+// This is the arbitrary value we use to indicate a mismatched gap count.
+#define GAP_COUNT_MISMATCHED 0
+
+#define isoc_cycles_to_jiffies(cycles) usecs_to_jiffies((u32)div_u64((u64)cycles * USEC_PER_SEC, 8000))
+
extern __printf(2, 3)
void fw_err(const struct fw_card *card, const char *fmt, ...);
extern __printf(2, 3)
@@ -60,6 +65,9 @@ struct fw_card_driver {
int (*enable)(struct fw_card *card,
const __be32 *config_rom, size_t length);
+ // After returning the call, any function is no longer triggered to handle hardware event.
+ void (*disable)(struct fw_card *card);
+
int (*read_phy_reg)(struct fw_card *card, int address);
int (*update_phy_reg)(struct fw_card *card, int address,
int clear_bits, int set_bits);
@@ -167,6 +175,9 @@ static inline void fw_iso_context_init_work(struct fw_iso_context *ctx, work_fun
/* -topology */
+// The initial value of BUS_MANAGER_ID register, to express nothing registered.
+#define BUS_MANAGER_ID_NOT_REGISTERED 0x3f
+
enum {
FW_NODE_CREATED,
FW_NODE_UPDATED,
@@ -194,8 +205,8 @@ struct fw_node {
/* For serializing node topology into a list. */
struct list_head link;
- /* Upper layer specific data. */
- void *data;
+ // The device when already associated, else NULL.
+ struct fw_device *device;
struct fw_node *ports[] __counted_by(port_count);
};
@@ -219,6 +230,16 @@ static inline void fw_node_put(struct fw_node *node)
kref_put(&node->kref, release_node);
}
+static inline struct fw_device *fw_node_get_device(struct fw_node *node)
+{
+ return node->device;
+}
+
+static inline void fw_node_set_device(struct fw_node *node, struct fw_device *device)
+{
+ node->device = device;
+}
+
void fw_core_handle_bus_reset(struct fw_card *card, int node_id,
int generation, int self_id_count, u32 *self_ids, bool bm_abdicate);
void fw_destroy_nodes(struct fw_card *card);
@@ -266,6 +287,8 @@ void fw_fill_response(struct fw_packet *response, u32 *request_header,
void fw_request_get(struct fw_request *request);
void fw_request_put(struct fw_request *request);
+void fw_cancel_pending_transactions(struct fw_card *card);
+
// Convert the value of IEEE 1394 CYCLE_TIME register to the format of timeStamp field in
// descriptors of 1394 OHCI.
static inline u32 cycle_time_to_ohci_tstamp(u32 tstamp)
diff --git a/drivers/firewire/init_ohci1394_dma.c b/drivers/firewire/init_ohci1394_dma.c
index 48b879e9e831..121f0c2f6401 100644
--- a/drivers/firewire/init_ohci1394_dma.c
+++ b/drivers/firewire/init_ohci1394_dma.c
@@ -167,6 +167,7 @@ static inline void __init init_ohci1394_initialize(struct ohci *ohci)
/**
* init_ohci1394_wait_for_busresets - wait until bus resets are completed
+ * @ohci: Pointer to the OHCI-1394 controller structure
*
* OHCI1394 initialization itself and any device going on- or offline
* and any cable issue cause a IEEE1394 bus reset. The OHCI1394 spec
@@ -189,6 +190,8 @@ static inline void __init init_ohci1394_wait_for_busresets(struct ohci *ohci)
/**
* init_ohci1394_enable_physical_dma - Enable physical DMA for remote debugging
+ * @ohci: Pointer to the OHCI-1394 controller structure
+ *
* This enables remote DMA access over IEEE1394 from every host for the low
* 4GB of address space. DMA accesses above 4GB are not available currently.
*/
@@ -201,6 +204,8 @@ static inline void __init init_ohci1394_enable_physical_dma(struct ohci *ohci)
/**
* init_ohci1394_reset_and_init_dma - init controller and enable DMA
+ * @ohci: Pointer to the OHCI-1394 controller structure
+ *
* This initializes the given controller and enables physical DMA engine in it.
*/
static inline void __init init_ohci1394_reset_and_init_dma(struct ohci *ohci)
@@ -230,6 +235,10 @@ static inline void __init init_ohci1394_reset_and_init_dma(struct ohci *ohci)
/**
* init_ohci1394_controller - Map the registers of the controller and init DMA
+ * @num: PCI bus number
+ * @slot: PCI device number
+ * @func: PCI function number
+ *
* This maps the registers of the specified controller and initializes it
*/
static inline void __init init_ohci1394_controller(int num, int slot, int func)
@@ -284,6 +293,7 @@ void __init init_ohci1394_dma_on_all_controllers(void)
/**
* setup_ohci1394_dma - enables early OHCI1394 DMA initialization
+ * @opt: Kernel command line parameter string
*/
static int __init setup_ohci1394_dma(char *opt)
{
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index 1bf0e15c1540..6d6446713539 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -1007,7 +1007,7 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask)
spin_lock_irqsave(&dev->lock, flags);
- /* If the AT tasklet already ran, we may be last user. */
+ /* If the AT work item already ran, we may be last user. */
free = (ptask->outstanding_pkts == 0 && !ptask->enqueued);
if (!free)
ptask->enqueued = true;
@@ -1026,7 +1026,7 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask)
spin_lock_irqsave(&dev->lock, flags);
- /* If the AT tasklet already ran, we may be last user. */
+ /* If the AT work item already ran, we may be last user. */
free = (ptask->outstanding_pkts == 0 && !ptask->enqueued);
if (!free)
ptask->enqueued = true;
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index edaedd156a6d..e3e78dc42530 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -101,7 +101,7 @@ struct ar_context {
void *pointer;
unsigned int last_buffer_index;
u32 regs;
- struct tasklet_struct tasklet;
+ struct work_struct work;
};
struct context;
@@ -128,7 +128,6 @@ struct context {
int total_allocation;
u32 current_bus;
bool running;
- bool flushing;
/*
* List of page-sized buffers for storing DMA descriptors.
@@ -157,8 +156,12 @@ struct context {
int prev_z;
descriptor_callback_t callback;
+};
- struct tasklet_struct tasklet;
+struct at_context {
+ struct context context;
+ struct work_struct work;
+ bool flushing;
};
struct iso_context {
@@ -204,8 +207,8 @@ struct fw_ohci {
struct ar_context ar_request_ctx;
struct ar_context ar_response_ctx;
- struct context at_request_ctx;
- struct context at_response_ctx;
+ struct at_context at_request_ctx;
+ struct at_context at_response_ctx;
u32 it_context_support;
u32 it_context_mask; /* unoccupied IT contexts */
@@ -225,13 +228,10 @@ struct fw_ohci {
__le32 *self_id;
dma_addr_t self_id_bus;
- struct work_struct bus_reset_work;
u32 self_id_buffer[512];
};
-static struct workqueue_struct *selfid_workqueue;
-
static inline struct fw_ohci *fw_ohci(struct fw_card *card)
{
return container_of(card, struct fw_ohci, card);
@@ -390,225 +390,10 @@ MODULE_PARM_DESC(quirks, "Chip quirks (default = 0"
", IR wake unreliable = " __stringify(QUIRK_IR_WAKE)
")");
-#define OHCI_PARAM_DEBUG_AT_AR 1
-#define OHCI_PARAM_DEBUG_SELFIDS 2
-#define OHCI_PARAM_DEBUG_IRQS 4
-
-static int param_debug;
-module_param_named(debug, param_debug, int, 0644);
-MODULE_PARM_DESC(debug, "Verbose logging, deprecated in v6.11 kernel or later. (default = 0"
- ", AT/AR events = " __stringify(OHCI_PARAM_DEBUG_AT_AR)
- ", self-IDs = " __stringify(OHCI_PARAM_DEBUG_SELFIDS)
- ", IRQs = " __stringify(OHCI_PARAM_DEBUG_IRQS)
- ", or a combination, or all = -1)");
-
static bool param_remote_dma;
module_param_named(remote_dma, param_remote_dma, bool, 0444);
MODULE_PARM_DESC(remote_dma, "Enable unfiltered remote DMA (default = N)");
-static void log_irqs(struct fw_ohci *ohci, u32 evt)
-{
- if (likely(!(param_debug & OHCI_PARAM_DEBUG_IRQS)))
- return;
-
- ohci_notice(ohci, "IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt,
- evt & OHCI1394_selfIDComplete ? " selfID" : "",
- evt & OHCI1394_RQPkt ? " AR_req" : "",
- evt & OHCI1394_RSPkt ? " AR_resp" : "",
- evt & OHCI1394_reqTxComplete ? " AT_req" : "",
- evt & OHCI1394_respTxComplete ? " AT_resp" : "",
- evt & OHCI1394_isochRx ? " IR" : "",
- evt & OHCI1394_isochTx ? " IT" : "",
- evt & OHCI1394_postedWriteErr ? " postedWriteErr" : "",
- evt & OHCI1394_cycleTooLong ? " cycleTooLong" : "",
- evt & OHCI1394_cycle64Seconds ? " cycle64Seconds" : "",
- evt & OHCI1394_cycleInconsistent ? " cycleInconsistent" : "",
- evt & OHCI1394_regAccessFail ? " regAccessFail" : "",
- evt & OHCI1394_unrecoverableError ? " unrecoverableError" : "",
- evt & OHCI1394_busReset ? " busReset" : "",
- evt & ~(OHCI1394_selfIDComplete | OHCI1394_RQPkt |
- OHCI1394_RSPkt | OHCI1394_reqTxComplete |
- OHCI1394_respTxComplete | OHCI1394_isochRx |
- OHCI1394_isochTx | OHCI1394_postedWriteErr |
- OHCI1394_cycleTooLong | OHCI1394_cycle64Seconds |
- OHCI1394_cycleInconsistent |
- OHCI1394_regAccessFail | OHCI1394_busReset)
- ? " ?" : "");
-}
-
-static void log_selfids(struct fw_ohci *ohci, int generation, int self_id_count)
-{
- static const char *const speed[] = {
- [0] = "S100", [1] = "S200", [2] = "S400", [3] = "beta",
- };
- static const char *const power[] = {
- [0] = "+0W", [1] = "+15W", [2] = "+30W", [3] = "+45W",
- [4] = "-3W", [5] = " ?W", [6] = "-3..-6W", [7] = "-3..-10W",
- };
- static const char port[] = {
- [PHY_PACKET_SELF_ID_PORT_STATUS_NONE] = '.',
- [PHY_PACKET_SELF_ID_PORT_STATUS_NCONN] = '-',
- [PHY_PACKET_SELF_ID_PORT_STATUS_PARENT] = 'p',
- [PHY_PACKET_SELF_ID_PORT_STATUS_CHILD] = 'c',
- };
- struct self_id_sequence_enumerator enumerator = {
- .cursor = ohci->self_id_buffer,
- .quadlet_count = self_id_count,
- };
-
- if (likely(!(param_debug & OHCI_PARAM_DEBUG_SELFIDS)))
- return;
-
- ohci_notice(ohci, "%d selfIDs, generation %d, local node ID %04x\n",
- self_id_count, generation, ohci->node_id);
-
- while (enumerator.quadlet_count > 0) {
- unsigned int quadlet_count;
- unsigned int port_index;
- const u32 *s;
- int i;
-
- s = self_id_sequence_enumerator_next(&enumerator, &quadlet_count);
- if (IS_ERR(s))
- break;
-
- ohci_notice(ohci,
- "selfID 0: %08x, phy %d [%c%c%c] %s gc=%d %s %s%s%s\n",
- *s,
- phy_packet_self_id_get_phy_id(*s),
- port[self_id_sequence_get_port_status(s, quadlet_count, 0)],
- port[self_id_sequence_get_port_status(s, quadlet_count, 1)],
- port[self_id_sequence_get_port_status(s, quadlet_count, 2)],
- speed[*s >> 14 & 3], *s >> 16 & 63,
- power[*s >> 8 & 7], *s >> 22 & 1 ? "L" : "",
- *s >> 11 & 1 ? "c" : "", *s & 2 ? "i" : "");
-
- port_index = 3;
- for (i = 1; i < quadlet_count; ++i) {
- ohci_notice(ohci,
- "selfID n: %08x, phy %d [%c%c%c%c%c%c%c%c]\n",
- s[i],
- phy_packet_self_id_get_phy_id(s[i]),
- port[self_id_sequence_get_port_status(s, quadlet_count, port_index)],
- port[self_id_sequence_get_port_status(s, quadlet_count, port_index + 1)],
- port[self_id_sequence_get_port_status(s, quadlet_count, port_index + 2)],
- port[self_id_sequence_get_port_status(s, quadlet_count, port_index + 3)],
- port[self_id_sequence_get_port_status(s, quadlet_count, port_index + 4)],
- port[self_id_sequence_get_port_status(s, quadlet_count, port_index + 5)],
- port[self_id_sequence_get_port_status(s, quadlet_count, port_index + 6)],
- port[self_id_sequence_get_port_status(s, quadlet_count, port_index + 7)]
- );
-
- port_index += 8;
- }
- }
-}
-
-static const char *evts[] = {
- [0x00] = "evt_no_status", [0x01] = "-reserved-",
- [0x02] = "evt_long_packet", [0x03] = "evt_missing_ack",
- [0x04] = "evt_underrun", [0x05] = "evt_overrun",
- [0x06] = "evt_descriptor_read", [0x07] = "evt_data_read",
- [0x08] = "evt_data_write", [0x09] = "evt_bus_reset",
- [0x0a] = "evt_timeout", [0x0b] = "evt_tcode_err",
- [0x0c] = "-reserved-", [0x0d] = "-reserved-",
- [0x0e] = "evt_unknown", [0x0f] = "evt_flushed",
- [0x10] = "-reserved-", [0x11] = "ack_complete",
- [0x12] = "ack_pending ", [0x13] = "-reserved-",
- [0x14] = "ack_busy_X", [0x15] = "ack_busy_A",
- [0x16] = "ack_busy_B", [0x17] = "-reserved-",
- [0x18] = "-reserved-", [0x19] = "-reserved-",
- [0x1a] = "-reserved-", [0x1b] = "ack_tardy",
- [0x1c] = "-reserved-", [0x1d] = "ack_data_error",
- [0x1e] = "ack_type_error", [0x1f] = "-reserved-",
- [0x20] = "pending/cancelled",
-};
-
-static void log_ar_at_event(struct fw_ohci *ohci,
- char dir, int speed, u32 *header, int evt)
-{
- static const char *const tcodes[] = {
- [TCODE_WRITE_QUADLET_REQUEST] = "QW req",
- [TCODE_WRITE_BLOCK_REQUEST] = "BW req",
- [TCODE_WRITE_RESPONSE] = "W resp",
- [0x3] = "-reserved-",
- [TCODE_READ_QUADLET_REQUEST] = "QR req",
- [TCODE_READ_BLOCK_REQUEST] = "BR req",
- [TCODE_READ_QUADLET_RESPONSE] = "QR resp",
- [TCODE_READ_BLOCK_RESPONSE] = "BR resp",
- [TCODE_CYCLE_START] = "cycle start",
- [TCODE_LOCK_REQUEST] = "Lk req",
- [TCODE_STREAM_DATA] = "async stream packet",
- [TCODE_LOCK_RESPONSE] = "Lk resp",
- [0xc] = "-reserved-",
- [0xd] = "-reserved-",
- [TCODE_LINK_INTERNAL] = "link internal",
- [0xf] = "-reserved-",
- };
- int tcode = async_header_get_tcode(header);
- char specific[12];
-
- if (likely(!(param_debug & OHCI_PARAM_DEBUG_AT_AR)))
- return;
-
- if (unlikely(evt >= ARRAY_SIZE(evts)))
- evt = 0x1f;
-
- if (evt == OHCI1394_evt_bus_reset) {
- ohci_notice(ohci, "A%c evt_bus_reset, generation %d\n",
- dir, (header[2] >> 16) & 0xff);
- return;
- }
-
- switch (tcode) {
- case TCODE_WRITE_QUADLET_REQUEST:
- case TCODE_READ_QUADLET_RESPONSE:
- case TCODE_CYCLE_START:
- snprintf(specific, sizeof(specific), " = %08x",
- be32_to_cpu((__force __be32)header[3]));
- break;
- case TCODE_WRITE_BLOCK_REQUEST:
- case TCODE_READ_BLOCK_REQUEST:
- case TCODE_READ_BLOCK_RESPONSE:
- case TCODE_LOCK_REQUEST:
- case TCODE_LOCK_RESPONSE:
- snprintf(specific, sizeof(specific), " %x,%x",
- async_header_get_data_length(header),
- async_header_get_extended_tcode(header));
- break;
- default:
- specific[0] = '\0';
- }
-
- switch (tcode) {
- case TCODE_STREAM_DATA:
- ohci_notice(ohci, "A%c %s, %s\n",
- dir, evts[evt], tcodes[tcode]);
- break;
- case TCODE_LINK_INTERNAL:
- ohci_notice(ohci, "A%c %s, PHY %08x %08x\n",
- dir, evts[evt], header[1], header[2]);
- break;
- case TCODE_WRITE_QUADLET_REQUEST:
- case TCODE_WRITE_BLOCK_REQUEST:
- case TCODE_READ_QUADLET_REQUEST:
- case TCODE_READ_BLOCK_REQUEST:
- case TCODE_LOCK_REQUEST:
- ohci_notice(ohci,
- "A%c spd %x tl %02x, %04x -> %04x, %s, %s, %012llx%s\n",
- dir, speed, async_header_get_tlabel(header),
- async_header_get_source(header), async_header_get_destination(header),
- evts[evt], tcodes[tcode], async_header_get_offset(header), specific);
- break;
- default:
- ohci_notice(ohci,
- "A%c spd %x tl %02x, %04x -> %04x, %s, %s%s\n",
- dir, speed, async_header_get_tlabel(header),
- async_header_get_source(header), async_header_get_destination(header),
- evts[evt], tcodes[tcode], specific);
- }
-}
-
static inline void reg_write(const struct fw_ohci *ohci, int offset, u32 data)
{
writel(data, ohci->registers + offset);
@@ -954,8 +739,6 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
p.timestamp = status & 0xffff;
p.generation = ohci->request_generation;
- log_ar_at_event(ohci, 'R', p.speed, p.header, evt);
-
/*
* Several controllers, notably from NEC and VIA, forget to
* write ack_complete status at PHY packet reception.
@@ -974,7 +757,7 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
*
* Alas some chips sometimes emit bus reset packets with a
* wrong generation. We set the correct generation for these
- * at a slightly incorrect time (in bus_reset_work).
+ * at a slightly incorrect time (in handle_selfid_complete_event).
*/
if (evt == OHCI1394_evt_bus_reset) {
if (!(ohci->quirks & QUIRK_RESET_PACKET))
@@ -1016,9 +799,9 @@ static void ar_recycle_buffers(struct ar_context *ctx, unsigned int end_buffer)
}
}
-static void ar_context_tasklet(unsigned long data)
+static void ohci_ar_context_work(struct work_struct *work)
{
- struct ar_context *ctx = (struct ar_context *)data;
+ struct ar_context *ctx = from_work(ctx, work, work);
unsigned int end_buffer_index, end_buffer_offset;
void *p, *end;
@@ -1026,23 +809,19 @@ static void ar_context_tasklet(unsigned long data)
if (!p)
return;
- end_buffer_index = ar_search_last_active_buffer(ctx,
- &end_buffer_offset);
+ end_buffer_index = ar_search_last_active_buffer(ctx, &end_buffer_offset);
ar_sync_buffers_for_cpu(ctx, end_buffer_index, end_buffer_offset);
end = ctx->buffer + end_buffer_index * PAGE_SIZE + end_buffer_offset;
if (end_buffer_index < ar_first_buffer_index(ctx)) {
- /*
- * The filled part of the overall buffer wraps around; handle
- * all packets up to the buffer end here. If the last packet
- * wraps around, its tail will be visible after the buffer end
- * because the buffer start pages are mapped there again.
- */
+ // The filled part of the overall buffer wraps around; handle all packets up to the
+ // buffer end here. If the last packet wraps around, its tail will be visible after
+ // the buffer end because the buffer start pages are mapped there again.
void *buffer_end = ctx->buffer + AR_BUFFERS * PAGE_SIZE;
p = handle_ar_packets(ctx, p, buffer_end);
if (p < buffer_end)
goto error;
- /* adjust p to point back into the actual buffer */
+ // adjust p to point back into the actual buffer
p -= AR_BUFFERS * PAGE_SIZE;
}
@@ -1057,7 +836,6 @@ static void ar_context_tasklet(unsigned long data)
ar_recycle_buffers(ctx, end_buffer_index);
return;
-
error:
ctx->pointer = NULL;
}
@@ -1073,7 +851,7 @@ static int ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci,
ctx->regs = regs;
ctx->ohci = ohci;
- tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx);
+ INIT_WORK(&ctx->work, ohci_ar_context_work);
for (i = 0; i < AR_BUFFERS; i++) {
ctx->pages[i] = dma_alloc_pages(dev, PAGE_SIZE, &dma_addr,
@@ -1181,16 +959,16 @@ static void context_retire_descriptors(struct context *ctx)
}
}
-static void context_tasklet(unsigned long data)
+static void ohci_at_context_work(struct work_struct *work)
{
- struct context *ctx = (struct context *) data;
+ struct at_context *ctx = from_work(ctx, work, work);
- context_retire_descriptors(ctx);
+ context_retire_descriptors(&ctx->context);
}
static void ohci_isoc_context_work(struct work_struct *work)
{
- struct fw_iso_context *base = container_of(work, struct fw_iso_context, work);
+ struct fw_iso_context *base = from_work(base, work, work);
struct iso_context *isoc_ctx = container_of(base, struct iso_context, base);
context_retire_descriptors(&isoc_ctx->context);
@@ -1248,7 +1026,6 @@ static int context_init(struct context *ctx, struct fw_ohci *ohci,
ctx->buffer_tail = list_entry(ctx->buffer_list.next,
struct descriptor_buffer, list);
- tasklet_init(&ctx->tasklet, context_tasklet, (unsigned long)ctx);
ctx->callback = callback;
/*
@@ -1388,17 +1165,17 @@ struct driver_data {
* Must always be called with the ochi->lock held to ensure proper
* generation handling and locking around packet queue manipulation.
*/
-static int at_context_queue_packet(struct context *ctx,
- struct fw_packet *packet)
+static int at_context_queue_packet(struct at_context *ctx, struct fw_packet *packet)
{
- struct fw_ohci *ohci = ctx->ohci;
+ struct context *context = &ctx->context;
+ struct fw_ohci *ohci = context->ohci;
dma_addr_t d_bus, payload_bus;
struct driver_data *driver_data;
struct descriptor *d, *last;
__le32 *header;
int z, tcode;
- d = context_get_descriptors(ctx, 4, &d_bus);
+ d = context_get_descriptors(context, 4, &d_bus);
if (d == NULL) {
packet->ack = RCODE_SEND_ERROR;
return -1;
@@ -1428,7 +1205,7 @@ static int at_context_queue_packet(struct context *ctx,
ohci1394_at_data_set_destination_id(header,
async_header_get_destination(packet->header));
- if (ctx == &ctx->ohci->at_response_ctx) {
+ if (ctx == &ohci->at_response_ctx) {
ohci1394_at_data_set_rcode(header, async_header_get_rcode(packet->header));
} else {
ohci1394_at_data_set_destination_offset(header,
@@ -1517,37 +1294,50 @@ static int at_context_queue_packet(struct context *ctx,
return -1;
}
- context_append(ctx, d, z, 4 - z);
+ context_append(context, d, z, 4 - z);
- if (ctx->running)
- reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
+ if (context->running)
+ reg_write(ohci, CONTROL_SET(context->regs), CONTEXT_WAKE);
else
- context_run(ctx, 0);
+ context_run(context, 0);
return 0;
}
-static void at_context_flush(struct context *ctx)
+static void at_context_flush(struct at_context *ctx)
{
- tasklet_disable(&ctx->tasklet);
+ // Avoid dead lock due to programming mistake.
+ if (WARN_ON_ONCE(current_work() == &ctx->work))
+ return;
+
+ disable_work_sync(&ctx->work);
+
+ WRITE_ONCE(ctx->flushing, true);
+ ohci_at_context_work(&ctx->work);
+ WRITE_ONCE(ctx->flushing, false);
+
+ enable_work(&ctx->work);
+}
- ctx->flushing = true;
- context_tasklet((unsigned long)ctx);
- ctx->flushing = false;
+static int find_fw_device(struct device *dev, const void *data)
+{
+ struct fw_device *device = fw_device(dev);
+ const u32 *params = data;
- tasklet_enable(&ctx->tasklet);
+ return (device->generation == params[0]) && (device->node_id == params[1]);
}
static int handle_at_packet(struct context *context,
struct descriptor *d,
struct descriptor *last)
{
+ struct at_context *ctx = container_of(context, struct at_context, context);
+ struct fw_ohci *ohci = ctx->context.ohci;
struct driver_data *driver_data;
struct fw_packet *packet;
- struct fw_ohci *ohci = context->ohci;
int evt;
- if (last->transfer_status == 0 && !context->flushing)
+ if (last->transfer_status == 0 && !READ_ONCE(ctx->flushing))
/* This descriptor isn't done yet, stop iteration. */
return 0;
@@ -1564,8 +1354,6 @@ static int handle_at_packet(struct context *context,
evt = le16_to_cpu(last->transfer_status) & 0x1f;
packet->timestamp = le16_to_cpu(last->res_count);
- log_ar_at_event(ohci, 'T', packet->speed, packet->header, evt);
-
switch (evt) {
case OHCI1394_evt_timeout:
/* Async response transmit timed out. */
@@ -1581,7 +1369,7 @@ static int handle_at_packet(struct context *context,
break;
case OHCI1394_evt_missing_ack:
- if (context->flushing)
+ if (READ_ONCE(ctx->flushing))
packet->ack = RCODE_GENERATION;
else {
/*
@@ -1603,13 +1391,34 @@ static int handle_at_packet(struct context *context,
break;
case OHCI1394_evt_no_status:
- if (context->flushing) {
+ if (READ_ONCE(ctx->flushing)) {
packet->ack = RCODE_GENERATION;
break;
}
fallthrough;
default:
+ if (unlikely(evt == 0x10)) {
+ u32 params[2] = {
+ packet->generation,
+ async_header_get_destination(packet->header),
+ };
+ struct device *dev;
+
+ fw_card_get(&ohci->card);
+ dev = device_find_child(ohci->card.device, (const void *)params, find_fw_device);
+ fw_card_put(&ohci->card);
+ if (dev) {
+ struct fw_device *device = fw_device(dev);
+ int quirks = READ_ONCE(device->quirks);
+
+ put_device(dev);
+ if (quirks & FW_DEVICE_QUIRK_ACK_PACKET_WITH_INVALID_PENDING_CODE) {
+ packet->ack = ACK_PENDING;
+ break;
+ }
+ }
+ }
packet->ack = RCODE_SEND_ERROR;
break;
}
@@ -1700,13 +1509,14 @@ static void handle_local_lock(struct fw_ohci *ohci,
fw_core_handle_response(&ohci->card, &response);
}
-static void handle_local_request(struct context *ctx, struct fw_packet *packet)
+static void handle_local_request(struct at_context *ctx, struct fw_packet *packet)
{
+ struct fw_ohci *ohci = ctx->context.ohci;
u64 offset, csr;
- if (ctx == &ctx->ohci->at_request_ctx) {
+ if (ctx == &ohci->at_request_ctx) {
packet->ack = ACK_PENDING;
- packet->callback(packet, &ctx->ohci->card, packet->ack);
+ packet->callback(packet, &ohci->card, packet->ack);
}
offset = async_header_get_offset(packet->header);
@@ -1714,60 +1524,80 @@ static void handle_local_request(struct context *ctx, struct fw_packet *packet)
/* Handle config rom reads. */
if (csr >= CSR_CONFIG_ROM && csr < CSR_CONFIG_ROM_END)
- handle_local_rom(ctx->ohci, packet, csr);
+ handle_local_rom(ohci, packet, csr);
else switch (csr) {
case CSR_BUS_MANAGER_ID:
case CSR_BANDWIDTH_AVAILABLE:
case CSR_CHANNELS_AVAILABLE_HI:
case CSR_CHANNELS_AVAILABLE_LO:
- handle_local_lock(ctx->ohci, packet, csr);
+ handle_local_lock(ohci, packet, csr);
break;
default:
- if (ctx == &ctx->ohci->at_request_ctx)
- fw_core_handle_request(&ctx->ohci->card, packet);
+ if (ctx == &ohci->at_request_ctx)
+ fw_core_handle_request(&ohci->card, packet);
else
- fw_core_handle_response(&ctx->ohci->card, packet);
+ fw_core_handle_response(&ohci->card, packet);
break;
}
- if (ctx == &ctx->ohci->at_response_ctx) {
+ if (ctx == &ohci->at_response_ctx) {
packet->ack = ACK_COMPLETE;
- packet->callback(packet, &ctx->ohci->card, packet->ack);
+ packet->callback(packet, &ohci->card, packet->ack);
}
}
-static void at_context_transmit(struct context *ctx, struct fw_packet *packet)
+static void at_context_transmit(struct at_context *ctx, struct fw_packet *packet)
{
+ struct fw_ohci *ohci = ctx->context.ohci;
unsigned long flags;
int ret;
- spin_lock_irqsave(&ctx->ohci->lock, flags);
+ spin_lock_irqsave(&ohci->lock, flags);
- if (async_header_get_destination(packet->header) == ctx->ohci->node_id &&
- ctx->ohci->generation == packet->generation) {
- spin_unlock_irqrestore(&ctx->ohci->lock, flags);
+ if (async_header_get_destination(packet->header) == ohci->node_id &&
+ ohci->generation == packet->generation) {
+ spin_unlock_irqrestore(&ohci->lock, flags);
// Timestamping on behalf of the hardware.
- packet->timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ctx->ohci));
+ packet->timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ohci));
handle_local_request(ctx, packet);
return;
}
ret = at_context_queue_packet(ctx, packet);
- spin_unlock_irqrestore(&ctx->ohci->lock, flags);
+ spin_unlock_irqrestore(&ohci->lock, flags);
if (ret < 0) {
// Timestamping on behalf of the hardware.
- packet->timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ctx->ohci));
+ packet->timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ohci));
- packet->callback(packet, &ctx->ohci->card, packet->ack);
+ packet->callback(packet, &ohci->card, packet->ack);
}
}
static void detect_dead_context(struct fw_ohci *ohci,
const char *name, unsigned int regs)
{
+ static const char *const evts[] = {
+ [0x00] = "evt_no_status", [0x01] = "-reserved-",
+ [0x02] = "evt_long_packet", [0x03] = "evt_missing_ack",
+ [0x04] = "evt_underrun", [0x05] = "evt_overrun",
+ [0x06] = "evt_descriptor_read", [0x07] = "evt_data_read",
+ [0x08] = "evt_data_write", [0x09] = "evt_bus_reset",
+ [0x0a] = "evt_timeout", [0x0b] = "evt_tcode_err",
+ [0x0c] = "-reserved-", [0x0d] = "-reserved-",
+ [0x0e] = "evt_unknown", [0x0f] = "evt_flushed",
+ [0x10] = "-reserved-", [0x11] = "ack_complete",
+ [0x12] = "ack_pending ", [0x13] = "-reserved-",
+ [0x14] = "ack_busy_X", [0x15] = "ack_busy_A",
+ [0x16] = "ack_busy_B", [0x17] = "-reserved-",
+ [0x18] = "-reserved-", [0x19] = "-reserved-",
+ [0x1a] = "-reserved-", [0x1b] = "ack_tardy",
+ [0x1c] = "-reserved-", [0x1d] = "ack_data_error",
+ [0x1e] = "ack_type_error", [0x1f] = "-reserved-",
+ [0x20] = "pending/cancelled",
+ };
u32 ctl;
ctl = reg_read(ohci, CONTROL_SET(regs));
@@ -2026,10 +1856,9 @@ static int find_and_insert_self_id(struct fw_ohci *ohci, int self_id_count)
return self_id_count;
}
-static void bus_reset_work(struct work_struct *work)
+static irqreturn_t handle_selfid_complete_event(int irq, void *data)
{
- struct fw_ohci *ohci =
- container_of(work, struct fw_ohci, bus_reset_work);
+ struct fw_ohci *ohci = data;
int self_id_count, generation, new_generation, i, j;
u32 reg, quadlet;
void *free_rom = NULL;
@@ -2040,11 +1869,11 @@ static void bus_reset_work(struct work_struct *work)
if (!(reg & OHCI1394_NodeID_idValid)) {
ohci_notice(ohci,
"node ID not valid, new bus reset in progress\n");
- return;
+ goto end;
}
if ((reg & OHCI1394_NodeID_nodeNumber) == 63) {
ohci_notice(ohci, "malconfigured bus\n");
- return;
+ goto end;
}
ohci->node_id = reg & (OHCI1394_NodeID_busNumber |
OHCI1394_NodeID_nodeNumber);
@@ -2058,8 +1887,11 @@ static void bus_reset_work(struct work_struct *work)
reg = reg_read(ohci, OHCI1394_SelfIDCount);
if (ohci1394_self_id_count_is_error(reg)) {
ohci_notice(ohci, "self ID receive error\n");
- return;
+ goto end;
}
+
+ trace_self_id_complete(ohci->card.index, reg, ohci->self_id, has_be_header_quirk(ohci));
+
/*
* The count in the SelfIDCount register is the number of
* bytes in the self ID receive buffer. Since we also receive
@@ -2070,7 +1902,7 @@ static void bus_reset_work(struct work_struct *work)
if (self_id_count > 252) {
ohci_notice(ohci, "bad selfIDSize (%08x)\n", reg);
- return;
+ goto end;
}
quadlet = cond_le32_to_cpu(ohci->self_id[0], has_be_header_quirk(ohci));
@@ -2097,7 +1929,7 @@ static void bus_reset_work(struct work_struct *work)
ohci_notice(ohci, "bad self ID %d/%d (%08x != ~%08x)\n",
j, self_id_count, id, id2);
- return;
+ goto end;
}
ohci->self_id_buffer[j] = id;
}
@@ -2107,13 +1939,13 @@ static void bus_reset_work(struct work_struct *work)
if (self_id_count < 0) {
ohci_notice(ohci,
"could not construct local self ID\n");
- return;
+ goto end;
}
}
if (self_id_count == 0) {
ohci_notice(ohci, "no self IDs\n");
- return;
+ goto end;
}
rmb();
@@ -2135,14 +1967,14 @@ static void bus_reset_work(struct work_struct *work)
new_generation = ohci1394_self_id_count_get_generation(reg);
if (new_generation != generation) {
ohci_notice(ohci, "new bus reset, discarding self ids\n");
- return;
+ goto end;
}
// FIXME: Document how the locking works.
scoped_guard(spinlock_irq, &ohci->lock) {
ohci->generation = -1; // prevent AT packet queueing
- context_stop(&ohci->at_request_ctx);
- context_stop(&ohci->at_response_ctx);
+ context_stop(&ohci->at_request_ctx.context);
+ context_stop(&ohci->at_response_ctx.context);
}
/*
@@ -2192,12 +2024,12 @@ static void bus_reset_work(struct work_struct *work)
if (free_rom)
dmam_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, free_rom, free_rom_bus);
- log_selfids(ohci, generation, self_id_count);
-
fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation,
self_id_count, ohci->self_id_buffer,
ohci->csr_state_setclear_abdicate);
ohci->csr_state_setclear_abdicate = false;
+end:
+ return IRQ_HANDLED;
}
static irqreturn_t irq_handler(int irq, void *data)
@@ -2211,11 +2043,6 @@ static irqreturn_t irq_handler(int irq, void *data)
if (!event || !~event)
return IRQ_NONE;
- if (unlikely(param_debug > 0)) {
- dev_notice_ratelimited(ohci->card.device,
- "The debug parameter is superseded by tracepoints events, and deprecated.");
- }
-
/*
* busReset and postedWriteErr events must not be cleared yet
* (OHCI 1.1 clauses 7.2.3.2 and 13.2.8.1)
@@ -2223,32 +2050,22 @@ static irqreturn_t irq_handler(int irq, void *data)
reg_write(ohci, OHCI1394_IntEventClear,
event & ~(OHCI1394_busReset | OHCI1394_postedWriteErr));
trace_irqs(ohci->card.index, event);
- log_irqs(ohci, event);
- // The flag is masked again at bus_reset_work() scheduled by selfID event.
+
+ // The flag is masked again at handle_selfid_complete_event() scheduled by selfID event.
if (event & OHCI1394_busReset)
reg_write(ohci, OHCI1394_IntMaskClear, OHCI1394_busReset);
- if (event & OHCI1394_selfIDComplete) {
- if (trace_self_id_complete_enabled()) {
- u32 reg = reg_read(ohci, OHCI1394_SelfIDCount);
-
- trace_self_id_complete(ohci->card.index, reg, ohci->self_id,
- has_be_header_quirk(ohci));
- }
- queue_work(selfid_workqueue, &ohci->bus_reset_work);
- }
-
if (event & OHCI1394_RQPkt)
- tasklet_schedule(&ohci->ar_request_ctx.tasklet);
+ queue_work(ohci->card.async_wq, &ohci->ar_request_ctx.work);
if (event & OHCI1394_RSPkt)
- tasklet_schedule(&ohci->ar_response_ctx.tasklet);
+ queue_work(ohci->card.async_wq, &ohci->ar_response_ctx.work);
if (event & OHCI1394_reqTxComplete)
- tasklet_schedule(&ohci->at_request_ctx.tasklet);
+ queue_work(ohci->card.async_wq, &ohci->at_request_ctx.work);
if (event & OHCI1394_respTxComplete)
- tasklet_schedule(&ohci->at_response_ctx.tasklet);
+ queue_work(ohci->card.async_wq, &ohci->at_response_ctx.work);
if (event & OHCI1394_isochRx) {
iso_event = reg_read(ohci, OHCI1394_IsoRecvIntEventClear);
@@ -2308,7 +2125,10 @@ static irqreturn_t irq_handler(int irq, void *data)
} else
flush_writes(ohci);
- return IRQ_HANDLED;
+ if (event & OHCI1394_selfIDComplete)
+ return IRQ_WAKE_THREAD;
+ else
+ return IRQ_HANDLED;
}
static int software_reset(struct fw_ohci *ohci)
@@ -2528,7 +2348,7 @@ static int ohci_enable(struct fw_card *card,
* They shouldn't do that in this initial case where the link
* isn't enabled. This means we have to use the same
* workaround here, setting the bus header to 0 and then write
- * the right values in the bus reset tasklet.
+ * the right values in the bus reset work item.
*/
if (config_rom) {
@@ -2588,6 +2408,41 @@ static int ohci_enable(struct fw_card *card,
return 0;
}
+static void ohci_disable(struct fw_card *card)
+{
+ struct pci_dev *pdev = to_pci_dev(card->device);
+ struct fw_ohci *ohci = pci_get_drvdata(pdev);
+ int i, irq = pci_irq_vector(pdev, 0);
+
+ // If the removal is happening from the suspend state, LPS won't be enabled and host
+ // registers (eg., IntMaskClear) won't be accessible.
+ if (!(reg_read(ohci, OHCI1394_HCControlSet) & OHCI1394_HCControl_LPS))
+ return;
+
+ reg_write(ohci, OHCI1394_IntMaskClear, ~0);
+ flush_writes(ohci);
+
+ if (irq >= 0)
+ synchronize_irq(irq);
+
+ flush_work(&ohci->ar_request_ctx.work);
+ flush_work(&ohci->ar_response_ctx.work);
+ flush_work(&ohci->at_request_ctx.work);
+ flush_work(&ohci->at_response_ctx.work);
+
+ for (i = 0; i < ohci->n_ir; ++i) {
+ if (!(ohci->ir_context_mask & BIT(i)))
+ flush_work(&ohci->ir_context_list[i].base.work);
+ }
+ for (i = 0; i < ohci->n_it; ++i) {
+ if (!(ohci->it_context_mask & BIT(i)))
+ flush_work(&ohci->it_context_list[i].base.work);
+ }
+
+ at_context_flush(&ohci->at_request_ctx);
+ at_context_flush(&ohci->at_response_ctx);
+}
+
static int ohci_set_config_rom(struct fw_card *card,
const __be32 *config_rom, size_t length)
{
@@ -2617,11 +2472,11 @@ static int ohci_set_config_rom(struct fw_card *card,
* during the atomic update, even on little endian
* architectures. The workaround we use is to put a 0 in the
* header quadlet; 0 is endian agnostic and means that the
- * config rom isn't ready yet. In the bus reset tasklet we
+ * config rom isn't ready yet. In the bus reset work item we
* then set up the real values for the two registers.
*
* We use ohci->lock to avoid racing with the code that sets
- * ohci->next_config_rom to NULL (see bus_reset_work).
+ * ohci->next_config_rom to NULL (see handle_selfid_complete_event).
*/
next_config_rom = dmam_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
@@ -2659,7 +2514,7 @@ static int ohci_set_config_rom(struct fw_card *card,
/*
* Now initiate a bus reset to have the changes take
* effect. We clean up the old config rom memory and DMA
- * mappings in the bus reset tasklet, since the OHCI
+ * mappings in the bus reset work item, since the OHCI
* controller could need to access it before the bus reset
* takes effect.
*/
@@ -2686,11 +2541,14 @@ static void ohci_send_response(struct fw_card *card, struct fw_packet *packet)
static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
{
struct fw_ohci *ohci = fw_ohci(card);
- struct context *ctx = &ohci->at_request_ctx;
+ struct at_context *ctx = &ohci->at_request_ctx;
struct driver_data *driver_data = packet->driver_data;
int ret = -ENOENT;
- tasklet_disable_in_atomic(&ctx->tasklet);
+ // Avoid dead lock due to programming mistake.
+ if (WARN_ON_ONCE(current_work() == &ctx->work))
+ return 0;
+ disable_work_sync(&ctx->work);
if (packet->ack != 0)
goto out;
@@ -2699,7 +2557,6 @@ static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
dma_unmap_single(ohci->card.device, packet->payload_bus,
packet->payload_length, DMA_TO_DEVICE);
- log_ar_at_event(ohci, 'T', packet->speed, packet->header, 0x20);
driver_data->packet = NULL;
packet->ack = RCODE_CANCELLED;
@@ -2709,7 +2566,7 @@ static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
packet->callback(packet, &ohci->card, packet->ack);
ret = 0;
out:
- tasklet_enable(&ctx->tasklet);
+ enable_work(&ctx->work);
return ret;
}
@@ -3620,6 +3477,7 @@ static int ohci_flush_iso_completions(struct fw_iso_context *base)
static const struct fw_card_driver ohci_driver = {
.enable = ohci_enable,
+ .disable = ohci_disable,
.read_phy_reg = ohci_read_phy_reg,
.update_phy_reg = ohci_update_phy_reg,
.set_config_rom = ohci_set_config_rom,
@@ -3689,7 +3547,6 @@ static int pci_probe(struct pci_dev *dev,
u32 bus_options, max_receive, link_speed, version;
u64 guid;
int i, flags, irq, err;
- size_t size;
if (dev->vendor == PCI_VENDOR_ID_PINNACLE_SYSTEMS) {
dev_err(&dev->dev, "Pinnacle MovieBoard is not yet supported\n");
@@ -3716,8 +3573,6 @@ static int pci_probe(struct pci_dev *dev,
spin_lock_init(&ohci->lock);
mutex_init(&ohci->phy_reg_mutex);
- INIT_WORK(&ohci->bus_reset_work, bus_reset_work);
-
if (!(pci_resource_flags(dev, 0) & IORESOURCE_MEM) ||
pci_resource_len(dev, 0) < OHCI1394_REGISTER_SIZE) {
ohci_err(ohci, "invalid MMIO resource\n");
@@ -3767,15 +3622,17 @@ static int pci_probe(struct pci_dev *dev,
if (err < 0)
return err;
- err = context_init(&ohci->at_request_ctx, ohci,
+ err = context_init(&ohci->at_request_ctx.context, ohci,
OHCI1394_AsReqTrContextControlSet, handle_at_packet);
if (err < 0)
return err;
+ INIT_WORK(&ohci->at_request_ctx.work, ohci_at_context_work);
- err = context_init(&ohci->at_response_ctx, ohci,
+ err = context_init(&ohci->at_response_ctx.context, ohci,
OHCI1394_AsRspTrContextControlSet, handle_at_packet);
if (err < 0)
return err;
+ INIT_WORK(&ohci->at_response_ctx.work, ohci_at_context_work);
reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0);
ohci->ir_context_channels = ~0ULL;
@@ -3783,8 +3640,7 @@ static int pci_probe(struct pci_dev *dev,
reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0);
ohci->ir_context_mask = ohci->ir_context_support;
ohci->n_ir = hweight32(ohci->ir_context_mask);
- size = sizeof(struct iso_context) * ohci->n_ir;
- ohci->ir_context_list = devm_kzalloc(&dev->dev, size, GFP_KERNEL);
+ ohci->ir_context_list = devm_kcalloc(&dev->dev, ohci->n_ir, sizeof(struct iso_context), GFP_KERNEL);
if (!ohci->ir_context_list)
return -ENOMEM;
@@ -3798,8 +3654,7 @@ static int pci_probe(struct pci_dev *dev,
reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
ohci->it_context_mask = ohci->it_context_support;
ohci->n_it = hweight32(ohci->it_context_mask);
- size = sizeof(struct iso_context) * ohci->n_it;
- ohci->it_context_list = devm_kzalloc(&dev->dev, size, GFP_KERNEL);
+ ohci->it_context_list = devm_kcalloc(&dev->dev, ohci->n_it, sizeof(struct iso_context), GFP_KERNEL);
if (!ohci->it_context_list)
return -ENOMEM;
@@ -3824,7 +3679,9 @@ static int pci_probe(struct pci_dev *dev,
goto fail_msi;
}
- err = request_threaded_irq(irq, irq_handler, NULL,
+ // IRQF_ONESHOT is not applied so that any events are handled in the hardIRQ handler during
+ // invoking the threaded IRQ handler for SelfIDComplete event.
+ err = request_threaded_irq(irq, irq_handler, handle_selfid_complete_event,
pci_dev_msi_enabled(dev) ? 0 : IRQF_SHARED, ohci_driver_name,
ohci);
if (err < 0) {
@@ -3860,22 +3717,8 @@ static void pci_remove(struct pci_dev *dev)
struct fw_ohci *ohci = pci_get_drvdata(dev);
int irq;
- /*
- * If the removal is happening from the suspend state, LPS won't be
- * enabled and host registers (eg., IntMaskClear) won't be accessible.
- */
- if (reg_read(ohci, OHCI1394_HCControlSet) & OHCI1394_HCControl_LPS) {
- reg_write(ohci, OHCI1394_IntMaskClear, ~0);
- flush_writes(ohci);
- }
- cancel_work_sync(&ohci->bus_reset_work);
fw_core_remove_card(&ohci->card);
- /*
- * FIXME: Fail all pending packets here, now that the upper
- * layers can't queue any more.
- */
-
software_reset(ohci);
irq = pci_irq_vector(dev, 0);
@@ -3941,17 +3784,12 @@ static struct pci_driver fw_ohci_pci_driver = {
static int __init fw_ohci_init(void)
{
- selfid_workqueue = alloc_workqueue(KBUILD_MODNAME, WQ_MEM_RECLAIM, 0);
- if (!selfid_workqueue)
- return -ENOMEM;
-
return pci_register_driver(&fw_ohci_pci_driver);
}
static void __exit fw_ohci_cleanup(void)
{
pci_unregister_driver(&fw_ohci_pci_driver);
- destroy_workqueue(selfid_workqueue);
}
module_init(fw_ohci_init);
diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c
index fe55613a8ea9..c72ee4756585 100644
--- a/drivers/firmware/arm_ffa/driver.c
+++ b/drivers/firmware/arm_ffa/driver.c
@@ -110,7 +110,7 @@ struct ffa_drv_info {
struct work_struct sched_recv_irq_work;
struct xarray partition_info;
DECLARE_HASHTABLE(notifier_hash, ilog2(FFA_MAX_NOTIFICATIONS));
- struct mutex notify_lock; /* lock to protect notifier hashtable */
+ rwlock_t notify_lock; /* lock to protect notifier hashtable */
};
static struct ffa_drv_info *drv_info;
@@ -649,6 +649,26 @@ static u16 ffa_memory_attributes_get(u32 func_id)
return FFA_MEM_NORMAL | FFA_MEM_WRITE_BACK | FFA_MEM_INNER_SHAREABLE;
}
+static void ffa_emad_impdef_value_init(u32 version, void *dst, void *src)
+{
+ struct ffa_mem_region_attributes *ep_mem_access;
+
+ if (FFA_EMAD_HAS_IMPDEF_FIELD(version))
+ memcpy(dst, src, sizeof(ep_mem_access->impdef_val));
+}
+
+static void
+ffa_mem_region_additional_setup(u32 version, struct ffa_mem_region *mem_region)
+{
+ if (!FFA_MEM_REGION_HAS_EP_MEM_OFFSET(version)) {
+ mem_region->ep_mem_size = 0;
+ } else {
+ mem_region->ep_mem_size = ffa_emad_size_get(version);
+ mem_region->ep_mem_offset = sizeof(*mem_region);
+ memset(mem_region->reserved, 0, 12);
+ }
+}
+
static int
ffa_setup_and_transmit(u32 func_id, void *buffer, u32 max_fragsize,
struct ffa_mem_ops_args *args)
@@ -667,27 +687,24 @@ ffa_setup_and_transmit(u32 func_id, void *buffer, u32 max_fragsize,
mem_region->flags = args->flags;
mem_region->sender_id = drv_info->vm_id;
mem_region->attributes = ffa_memory_attributes_get(func_id);
- ep_mem_access = buffer +
- ffa_mem_desc_offset(buffer, 0, drv_info->version);
composite_offset = ffa_mem_desc_offset(buffer, args->nattrs,
drv_info->version);
- for (idx = 0; idx < args->nattrs; idx++, ep_mem_access++) {
+ for (idx = 0; idx < args->nattrs; idx++) {
+ ep_mem_access = buffer +
+ ffa_mem_desc_offset(buffer, idx, drv_info->version);
ep_mem_access->receiver = args->attrs[idx].receiver;
ep_mem_access->attrs = args->attrs[idx].attrs;
ep_mem_access->composite_off = composite_offset;
ep_mem_access->flag = 0;
ep_mem_access->reserved = 0;
+ ffa_emad_impdef_value_init(drv_info->version,
+ ep_mem_access->impdef_val,
+ args->attrs[idx].impdef_val);
}
mem_region->handle = 0;
mem_region->ep_count = args->nattrs;
- if (drv_info->version <= FFA_VERSION_1_0) {
- mem_region->ep_mem_size = 0;
- } else {
- mem_region->ep_mem_size = sizeof(*ep_mem_access);
- mem_region->ep_mem_offset = sizeof(*mem_region);
- memset(mem_region->reserved, 0, 12);
- }
+ ffa_mem_region_additional_setup(drv_info->version, mem_region);
composite = buffer + composite_offset;
composite->total_pg_cnt = ffa_get_num_pages_sg(args->sg);
@@ -1250,13 +1267,12 @@ notifier_hnode_get_by_type(u16 notify_id, enum notify_type type)
return NULL;
}
-static int
-update_notifier_cb(struct ffa_device *dev, int notify_id, void *cb,
- void *cb_data, bool is_registration, bool is_framework)
+static int update_notifier_cb(struct ffa_device *dev, int notify_id,
+ struct notifier_cb_info *cb, bool is_framework)
{
struct notifier_cb_info *cb_info = NULL;
enum notify_type type = ffa_notify_type_get(dev->vm_id);
- bool cb_found;
+ bool cb_found, is_registration = !!cb;
if (is_framework)
cb_info = notifier_hnode_get_by_vmid_uuid(notify_id, dev->vm_id,
@@ -1270,20 +1286,10 @@ update_notifier_cb(struct ffa_device *dev, int notify_id, void *cb,
return -EINVAL;
if (is_registration) {
- cb_info = kzalloc(sizeof(*cb_info), GFP_KERNEL);
- if (!cb_info)
- return -ENOMEM;
-
- cb_info->dev = dev;
- cb_info->cb_data = cb_data;
- if (is_framework)
- cb_info->fwk_cb = cb;
- else
- cb_info->cb = cb;
-
- hash_add(drv_info->notifier_hash, &cb_info->hnode, notify_id);
+ hash_add(drv_info->notifier_hash, &cb->hnode, notify_id);
} else {
hash_del(&cb_info->hnode);
+ kfree(cb_info);
}
return 0;
@@ -1300,20 +1306,19 @@ static int __ffa_notify_relinquish(struct ffa_device *dev, int notify_id,
if (notify_id >= FFA_MAX_NOTIFICATIONS)
return -EINVAL;
- mutex_lock(&drv_info->notify_lock);
+ write_lock(&drv_info->notify_lock);
- rc = update_notifier_cb(dev, notify_id, NULL, NULL, false,
- is_framework);
+ rc = update_notifier_cb(dev, notify_id, NULL, is_framework);
if (rc) {
pr_err("Could not unregister notification callback\n");
- mutex_unlock(&drv_info->notify_lock);
+ write_unlock(&drv_info->notify_lock);
return rc;
}
if (!is_framework)
rc = ffa_notification_unbind(dev->vm_id, BIT(notify_id));
- mutex_unlock(&drv_info->notify_lock);
+ write_unlock(&drv_info->notify_lock);
return rc;
}
@@ -1334,6 +1339,7 @@ static int __ffa_notify_request(struct ffa_device *dev, bool is_per_vcpu,
{
int rc;
u32 flags = 0;
+ struct notifier_cb_info *cb_info = NULL;
if (ffa_notifications_disabled())
return -EOPNOTSUPP;
@@ -1341,28 +1347,40 @@ static int __ffa_notify_request(struct ffa_device *dev, bool is_per_vcpu,
if (notify_id >= FFA_MAX_NOTIFICATIONS)
return -EINVAL;
- mutex_lock(&drv_info->notify_lock);
+ cb_info = kzalloc(sizeof(*cb_info), GFP_KERNEL);
+ if (!cb_info)
+ return -ENOMEM;
+
+ cb_info->dev = dev;
+ cb_info->cb_data = cb_data;
+ if (is_framework)
+ cb_info->fwk_cb = cb;
+ else
+ cb_info->cb = cb;
+
+ write_lock(&drv_info->notify_lock);
if (!is_framework) {
if (is_per_vcpu)
flags = PER_VCPU_NOTIFICATION_FLAG;
rc = ffa_notification_bind(dev->vm_id, BIT(notify_id), flags);
- if (rc) {
- mutex_unlock(&drv_info->notify_lock);
- return rc;
- }
+ if (rc)
+ goto out_unlock_free;
}
- rc = update_notifier_cb(dev, notify_id, cb, cb_data, true,
- is_framework);
+ rc = update_notifier_cb(dev, notify_id, cb_info, is_framework);
if (rc) {
pr_err("Failed to register callback for %d - %d\n",
notify_id, rc);
if (!is_framework)
ffa_notification_unbind(dev->vm_id, BIT(notify_id));
}
- mutex_unlock(&drv_info->notify_lock);
+
+out_unlock_free:
+ write_unlock(&drv_info->notify_lock);
+ if (rc)
+ kfree(cb_info);
return rc;
}
@@ -1406,9 +1424,9 @@ static void handle_notif_callbacks(u64 bitmap, enum notify_type type)
if (!(bitmap & 1))
continue;
- mutex_lock(&drv_info->notify_lock);
+ read_lock(&drv_info->notify_lock);
cb_info = notifier_hnode_get_by_type(notify_id, type);
- mutex_unlock(&drv_info->notify_lock);
+ read_unlock(&drv_info->notify_lock);
if (cb_info && cb_info->cb)
cb_info->cb(notify_id, cb_info->cb_data);
@@ -1446,9 +1464,9 @@ static void handle_fwk_notif_callbacks(u32 bitmap)
ffa_rx_release();
- mutex_lock(&drv_info->notify_lock);
+ read_lock(&drv_info->notify_lock);
cb_info = notifier_hnode_get_by_vmid_uuid(notify_id, target, &uuid);
- mutex_unlock(&drv_info->notify_lock);
+ read_unlock(&drv_info->notify_lock);
if (cb_info && cb_info->fwk_cb)
cb_info->fwk_cb(notify_id, cb_info->cb_data, buf);
@@ -1973,7 +1991,7 @@ static void ffa_notifications_setup(void)
goto cleanup;
hash_init(drv_info->notifier_hash);
- mutex_init(&drv_info->notify_lock);
+ rwlock_init(&drv_info->notify_lock);
drv_info->notif_enabled = true;
return;
@@ -2058,7 +2076,7 @@ free_drv_info:
kfree(drv_info);
return ret;
}
-module_init(ffa_init);
+rootfs_initcall(ffa_init);
static void __exit ffa_exit(void)
{
diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c
index 1adef0389475..c7698cfaa4e8 100644
--- a/drivers/firmware/arm_scmi/bus.c
+++ b/drivers/firmware/arm_scmi/bus.c
@@ -323,6 +323,31 @@ static struct attribute *scmi_device_attributes_attrs[] = {
};
ATTRIBUTE_GROUPS(scmi_device_attributes);
+static int scmi_pm_suspend(struct device *dev)
+{
+ const struct device_driver *drv = dev->driver;
+
+ if (drv && drv->pm && drv->pm->suspend)
+ return drv->pm->suspend(dev);
+
+ return 0;
+}
+
+static int scmi_pm_resume(struct device *dev)
+{
+ const struct device_driver *drv = dev->driver;
+
+ if (drv && drv->pm && drv->pm->resume)
+ return drv->pm->resume(dev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops scmi_dev_pm_ops = {
+ .suspend = pm_sleep_ptr(scmi_pm_suspend),
+ .resume = pm_sleep_ptr(scmi_pm_resume),
+};
+
const struct bus_type scmi_bus_type = {
.name = "scmi_protocol",
.match = scmi_dev_match,
@@ -330,6 +355,7 @@ const struct bus_type scmi_bus_type = {
.remove = scmi_dev_remove,
.uevent = scmi_device_uevent,
.dev_groups = scmi_device_attributes_groups,
+ .pm = &scmi_dev_pm_ops,
};
EXPORT_SYMBOL_GPL(scmi_bus_type);
@@ -375,8 +401,8 @@ static void scmi_device_release(struct device *dev)
static void __scmi_device_destroy(struct scmi_device *scmi_dev)
{
- pr_debug("(%s) Destroying SCMI device '%s' for protocol 0x%x (%s)\n",
- of_node_full_name(scmi_dev->dev.parent->of_node),
+ pr_debug("(%pOF) Destroying SCMI device '%s' for protocol 0x%x (%s)\n",
+ scmi_dev->dev.parent->of_node,
dev_name(&scmi_dev->dev), scmi_dev->protocol_id,
scmi_dev->name);
@@ -448,9 +474,8 @@ __scmi_device_create(struct device_node *np, struct device *parent,
if (retval)
goto put_dev;
- pr_debug("(%s) Created SCMI device '%s' for protocol 0x%x (%s)\n",
- of_node_full_name(parent->of_node),
- dev_name(&scmi_dev->dev), protocol, name);
+ pr_debug("(%pOF) Created SCMI device '%s' for protocol 0x%x (%s)\n",
+ parent->of_node, dev_name(&scmi_dev->dev), protocol, name);
return scmi_dev;
put_dev:
@@ -467,8 +492,8 @@ _scmi_device_create(struct device_node *np, struct device *parent,
sdev = __scmi_device_create(np, parent, protocol, name);
if (!sdev)
- pr_err("(%s) Failed to create device for protocol 0x%x (%s)\n",
- of_node_full_name(parent->of_node), protocol, name);
+ pr_err("(%pOF) Failed to create device for protocol 0x%x (%s)\n",
+ parent->of_node, protocol, name);
return sdev;
}
diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h
index dab758c5fdea..7c35c95fddba 100644
--- a/drivers/firmware/arm_scmi/common.h
+++ b/drivers/firmware/arm_scmi/common.h
@@ -305,13 +305,40 @@ enum debug_counters {
ERR_MSG_INVALID,
ERR_MSG_NOMEM,
ERR_PROTOCOL,
+ XFERS_INFLIGHT,
SCMI_DEBUG_COUNTERS_LAST
};
-static inline void scmi_inc_count(atomic_t *arr, int stat)
+/**
+ * struct scmi_debug_info - Debug common info
+ * @top_dentry: A reference to the top debugfs dentry
+ * @name: Name of this SCMI instance
+ * @type: Type of this SCMI instance
+ * @is_atomic: Flag to state if the transport of this instance is atomic
+ * @counters: An array of atomic_c's used for tracking statistics (if enabled)
+ */
+struct scmi_debug_info {
+ struct dentry *top_dentry;
+ const char *name;
+ const char *type;
+ bool is_atomic;
+ atomic_t counters[SCMI_DEBUG_COUNTERS_LAST];
+};
+
+static inline void scmi_inc_count(struct scmi_debug_info *dbg, int stat)
+{
+ if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS)) {
+ if (dbg)
+ atomic_inc(&dbg->counters[stat]);
+ }
+}
+
+static inline void scmi_dec_count(struct scmi_debug_info *dbg, int stat)
{
- if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS))
- atomic_inc(&arr[stat]);
+ if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS)) {
+ if (dbg)
+ atomic_dec(&dbg->counters[stat]);
+ }
}
enum scmi_bad_msg {
@@ -498,4 +525,5 @@ static struct platform_driver __drv = { \
void scmi_notification_instance_data_set(const struct scmi_handle *handle,
void *priv);
void *scmi_notification_instance_data_get(const struct scmi_handle *handle);
+int scmi_inflight_count(const struct scmi_handle *handle);
#endif /* _SCMI_COMMON_H */
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index 395fe9289035..5caa9191a8d1 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -116,22 +116,6 @@ struct scmi_protocol_instance {
#define ph_to_pi(h) container_of(h, struct scmi_protocol_instance, ph)
/**
- * struct scmi_debug_info - Debug common info
- * @top_dentry: A reference to the top debugfs dentry
- * @name: Name of this SCMI instance
- * @type: Type of this SCMI instance
- * @is_atomic: Flag to state if the transport of this instance is atomic
- * @counters: An array of atomic_c's used for tracking statistics (if enabled)
- */
-struct scmi_debug_info {
- struct dentry *top_dentry;
- const char *name;
- const char *type;
- bool is_atomic;
- atomic_t counters[SCMI_DEBUG_COUNTERS_LAST];
-};
-
-/**
* struct scmi_info - Structure representing a SCMI instance
*
* @id: A sequence number starting from zero identifying this instance
@@ -190,6 +174,7 @@ struct scmi_info {
};
#define handle_to_scmi_info(h) container_of(h, struct scmi_info, handle)
+#define tx_minfo_to_scmi_info(h) container_of(h, struct scmi_info, tx_minfo)
#define bus_nb_to_scmi_info(nb) container_of(nb, struct scmi_info, bus_nb)
#define req_nb_to_scmi_info(nb) container_of(nb, struct scmi_info, dev_req_nb)
@@ -603,9 +588,14 @@ static inline void
scmi_xfer_inflight_register_unlocked(struct scmi_xfer *xfer,
struct scmi_xfers_info *minfo)
{
+ /* In this context minfo will be tx_minfo due to the xfer pending */
+ struct scmi_info *info = tx_minfo_to_scmi_info(minfo);
+
/* Set in-flight */
set_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
hash_add(minfo->pending_xfers, &xfer->node, xfer->hdr.seq);
+ scmi_inc_count(info->dbg, XFERS_INFLIGHT);
+
xfer->pending = true;
}
@@ -807,10 +797,15 @@ __scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer)
spin_lock_irqsave(&minfo->xfer_lock, flags);
if (refcount_dec_and_test(&xfer->users)) {
if (xfer->pending) {
+ struct scmi_info *info = tx_minfo_to_scmi_info(minfo);
+
scmi_xfer_token_clear(minfo, xfer);
hash_del(&xfer->node);
xfer->pending = false;
+
+ scmi_dec_count(info->dbg, XFERS_INFLIGHT);
}
+ xfer->flags = 0;
hlist_add_head(&xfer->node, &minfo->free_xfers);
}
spin_unlock_irqrestore(&minfo->xfer_lock, flags);
@@ -829,8 +824,6 @@ void scmi_xfer_raw_put(const struct scmi_handle *handle, struct scmi_xfer *xfer)
{
struct scmi_info *info = handle_to_scmi_info(handle);
- xfer->flags &= ~SCMI_XFER_FLAG_IS_RAW;
- xfer->flags &= ~SCMI_XFER_FLAG_CHAN_SET;
return __scmi_xfer_put(&info->tx_minfo, xfer);
}
@@ -1024,7 +1017,7 @@ scmi_xfer_command_acquire(struct scmi_chan_info *cinfo, u32 msg_hdr)
spin_unlock_irqrestore(&minfo->xfer_lock, flags);
scmi_bad_message_trace(cinfo, msg_hdr, MSG_UNEXPECTED);
- scmi_inc_count(info->dbg->counters, ERR_MSG_UNEXPECTED);
+ scmi_inc_count(info->dbg, ERR_MSG_UNEXPECTED);
return xfer;
}
@@ -1052,7 +1045,7 @@ scmi_xfer_command_acquire(struct scmi_chan_info *cinfo, u32 msg_hdr)
msg_type, xfer_id, msg_hdr, xfer->state);
scmi_bad_message_trace(cinfo, msg_hdr, MSG_INVALID);
- scmi_inc_count(info->dbg->counters, ERR_MSG_INVALID);
+ scmi_inc_count(info->dbg, ERR_MSG_INVALID);
/* On error the refcount incremented above has to be dropped */
__scmi_xfer_put(minfo, xfer);
@@ -1097,7 +1090,7 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo,
PTR_ERR(xfer));
scmi_bad_message_trace(cinfo, msg_hdr, MSG_NOMEM);
- scmi_inc_count(info->dbg->counters, ERR_MSG_NOMEM);
+ scmi_inc_count(info->dbg, ERR_MSG_NOMEM);
scmi_clear_channel(info, cinfo);
return;
@@ -1113,7 +1106,7 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo,
trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
xfer->hdr.id, "NOTI", xfer->hdr.seq,
xfer->hdr.status, xfer->rx.buf, xfer->rx.len);
- scmi_inc_count(info->dbg->counters, NOTIFICATION_OK);
+ scmi_inc_count(info->dbg, NOTIFICATION_OK);
scmi_notify(cinfo->handle, xfer->hdr.protocol_id,
xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts);
@@ -1173,10 +1166,10 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo,
if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP) {
scmi_clear_channel(info, cinfo);
complete(xfer->async_done);
- scmi_inc_count(info->dbg->counters, DELAYED_RESPONSE_OK);
+ scmi_inc_count(info->dbg, DELAYED_RESPONSE_OK);
} else {
complete(&xfer->done);
- scmi_inc_count(info->dbg->counters, RESPONSE_OK);
+ scmi_inc_count(info->dbg, RESPONSE_OK);
}
if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
@@ -1286,7 +1279,7 @@ static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc,
"timed out in resp(caller: %pS) - polling\n",
(void *)_RET_IP_);
ret = -ETIMEDOUT;
- scmi_inc_count(info->dbg->counters, XFERS_RESPONSE_POLLED_TIMEOUT);
+ scmi_inc_count(info->dbg, XFERS_RESPONSE_POLLED_TIMEOUT);
}
}
@@ -1311,7 +1304,7 @@ static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc,
"RESP" : "resp",
xfer->hdr.seq, xfer->hdr.status,
xfer->rx.buf, xfer->rx.len);
- scmi_inc_count(info->dbg->counters, RESPONSE_POLLED_OK);
+ scmi_inc_count(info->dbg, RESPONSE_POLLED_OK);
if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
scmi_raw_message_report(info->raw, xfer,
@@ -1326,7 +1319,7 @@ static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc,
dev_err(dev, "timed out in resp(caller: %pS)\n",
(void *)_RET_IP_);
ret = -ETIMEDOUT;
- scmi_inc_count(info->dbg->counters, XFERS_RESPONSE_TIMEOUT);
+ scmi_inc_count(info->dbg, XFERS_RESPONSE_TIMEOUT);
}
}
@@ -1410,13 +1403,13 @@ static int do_xfer(const struct scmi_protocol_handle *ph,
!is_transport_polling_capable(info->desc)) {
dev_warn_once(dev,
"Polling mode is not supported by transport.\n");
- scmi_inc_count(info->dbg->counters, SENT_FAIL_POLLING_UNSUPPORTED);
+ scmi_inc_count(info->dbg, SENT_FAIL_POLLING_UNSUPPORTED);
return -EINVAL;
}
cinfo = idr_find(&info->tx_idr, pi->proto->id);
if (unlikely(!cinfo)) {
- scmi_inc_count(info->dbg->counters, SENT_FAIL_CHANNEL_NOT_FOUND);
+ scmi_inc_count(info->dbg, SENT_FAIL_CHANNEL_NOT_FOUND);
return -EINVAL;
}
/* True ONLY if also supported by transport. */
@@ -1433,7 +1426,8 @@ static int do_xfer(const struct scmi_protocol_handle *ph,
trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id,
xfer->hdr.protocol_id, xfer->hdr.seq,
- xfer->hdr.poll_completion);
+ xfer->hdr.poll_completion,
+ scmi_inflight_count(&info->handle));
/* Clear any stale status */
xfer->hdr.status = SCMI_SUCCESS;
@@ -1450,26 +1444,27 @@ static int do_xfer(const struct scmi_protocol_handle *ph,
ret = info->desc->ops->send_message(cinfo, xfer);
if (ret < 0) {
dev_dbg(dev, "Failed to send message %d\n", ret);
- scmi_inc_count(info->dbg->counters, SENT_FAIL);
+ scmi_inc_count(info->dbg, SENT_FAIL);
return ret;
}
trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
xfer->hdr.id, "CMND", xfer->hdr.seq,
xfer->hdr.status, xfer->tx.buf, xfer->tx.len);
- scmi_inc_count(info->dbg->counters, SENT_OK);
+ scmi_inc_count(info->dbg, SENT_OK);
ret = scmi_wait_for_message_response(cinfo, xfer);
if (!ret && xfer->hdr.status) {
ret = scmi_to_linux_errno(xfer->hdr.status);
- scmi_inc_count(info->dbg->counters, ERR_PROTOCOL);
+ scmi_inc_count(info->dbg, ERR_PROTOCOL);
}
if (info->desc->ops->mark_txdone)
info->desc->ops->mark_txdone(cinfo, ret, xfer);
trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id,
- xfer->hdr.protocol_id, xfer->hdr.seq, ret);
+ xfer->hdr.protocol_id, xfer->hdr.seq, ret,
+ scmi_inflight_count(&info->handle));
return ret;
}
@@ -2912,6 +2907,7 @@ static const char * const dbg_counter_strs[] = {
"err_msg_invalid",
"err_msg_nomem",
"err_protocol",
+ "xfers_inflight",
};
static ssize_t reset_all_on_write(struct file *filp, const char __user *buf,
@@ -3031,9 +3027,6 @@ static int scmi_debugfs_raw_mode_setup(struct scmi_info *info)
u8 channels[SCMI_MAX_CHANNELS] = {};
DECLARE_BITMAP(protos, SCMI_MAX_CHANNELS) = {};
- if (!info->dbg)
- return -EINVAL;
-
/* Enumerate all channels to collect their ids */
idr_for_each_entry(&info->tx_idr, cinfo, id) {
/*
@@ -3205,7 +3198,7 @@ static int scmi_probe(struct platform_device *pdev)
if (!info->dbg)
dev_warn(dev, "Failed to setup SCMI debugfs.\n");
- if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
+ if (info->dbg && IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
ret = scmi_debugfs_raw_mode_setup(info);
if (!coex) {
if (ret)
@@ -3405,6 +3398,20 @@ static struct dentry *scmi_debugfs_init(void)
return d;
}
+int scmi_inflight_count(const struct scmi_handle *handle)
+{
+ if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS)) {
+ struct scmi_info *info = handle_to_scmi_info(handle);
+
+ if (!info->dbg)
+ return 0;
+
+ return atomic_read(&info->dbg->counters[XFERS_INFLIGHT]);
+ } else {
+ return 0;
+ }
+}
+
static int __init scmi_driver_init(void)
{
scmi_quirks_initialize();
diff --git a/drivers/firmware/arm_scmi/notify.c b/drivers/firmware/arm_scmi/notify.c
index e160ecb22948..dee9f238f6fd 100644
--- a/drivers/firmware/arm_scmi/notify.c
+++ b/drivers/firmware/arm_scmi/notify.c
@@ -318,6 +318,9 @@ struct scmi_registered_events_desc {
* customized event report
* @num_sources: The number of possible sources for this event as stated at
* events' registration time
+ * @not_supported_by_platform: A flag to indicate that not even one source was
+ * found to be supported by the platform for this
+ * event
* @sources: A reference to a dynamically allocated array used to refcount the
* events' enable requests for all the existing sources
* @sources_mtx: A mutex to serialize the access to @sources
@@ -334,6 +337,7 @@ struct scmi_registered_event {
const struct scmi_event *evt;
void *report;
u32 num_sources;
+ bool not_supported_by_platform;
refcount_t *sources;
/* locking to serialize the access to sources */
struct mutex sources_mtx;
@@ -811,10 +815,19 @@ int scmi_register_protocol_events(const struct scmi_handle *handle, u8 proto_id,
if (!r_evt->report)
return -ENOMEM;
- for (id = 0; id < r_evt->num_sources; id++)
- if (ee->ops->is_notify_supported &&
- !ee->ops->is_notify_supported(ph, r_evt->evt->id, id))
- refcount_set(&r_evt->sources[id], NOTIF_UNSUPP);
+ if (ee->ops->is_notify_supported) {
+ int supported = 0;
+
+ for (id = 0; id < r_evt->num_sources; id++) {
+ if (!ee->ops->is_notify_supported(ph, r_evt->evt->id, id))
+ refcount_set(&r_evt->sources[id], NOTIF_UNSUPP);
+ else
+ supported++;
+ }
+
+ /* Not even one source has been found to be supported */
+ r_evt->not_supported_by_platform = !supported;
+ }
pd->registered_events[i] = r_evt;
/* Ensure events are updated */
@@ -936,6 +949,11 @@ static inline int scmi_bind_event_handler(struct scmi_notify_instance *ni,
* of protocol instance.
*/
hash_del(&hndl->hash);
+
+ /* Bailout if event is not supported at all */
+ if (r_evt->not_supported_by_platform)
+ return -EOPNOTSUPP;
+
/*
* Acquire protocols only for NON pending handlers, so as NOT to trigger
* protocol initialization when a notifier is registered against a still
@@ -1060,6 +1078,9 @@ __scmi_event_handler_get_ops(struct scmi_notify_instance *ni,
r_evt = SCMI_GET_REVT(ni, KEY_XTRACT_PROTO_ID(evt_key),
KEY_XTRACT_EVT_ID(evt_key));
+ if (r_evt && r_evt->not_supported_by_platform)
+ return ERR_PTR(-EOPNOTSUPP);
+
mutex_lock(&ni->pending_mtx);
/* Search registered events at first ... if possible at all */
if (r_evt) {
@@ -1087,7 +1108,7 @@ __scmi_event_handler_get_ops(struct scmi_notify_instance *ni,
hndl->key);
/* this hndl can be only a pending one */
scmi_put_handler_unlocked(ni, hndl);
- hndl = NULL;
+ hndl = ERR_PTR(-EINVAL);
}
}
mutex_unlock(&ni->pending_mtx);
@@ -1370,8 +1391,8 @@ static int scmi_notifier_register(const struct scmi_handle *handle,
evt_key = MAKE_HASH_KEY(proto_id, evt_id,
src_id ? *src_id : SRC_ID_MASK);
hndl = scmi_get_or_create_handler(ni, evt_key);
- if (!hndl)
- return -EINVAL;
+ if (IS_ERR(hndl))
+ return PTR_ERR(hndl);
blocking_notifier_chain_register(&hndl->chain, nb);
@@ -1416,8 +1437,8 @@ static int scmi_notifier_unregister(const struct scmi_handle *handle,
evt_key = MAKE_HASH_KEY(proto_id, evt_id,
src_id ? *src_id : SRC_ID_MASK);
hndl = scmi_get_handler(ni, evt_key);
- if (!hndl)
- return -EINVAL;
+ if (IS_ERR(hndl))
+ return PTR_ERR(hndl);
/*
* Note that this chain unregistration call is safe on its own
diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c
index c7e5a34b254b..683fd9b85c5c 100644
--- a/drivers/firmware/arm_scmi/perf.c
+++ b/drivers/firmware/arm_scmi/perf.c
@@ -892,7 +892,7 @@ static int scmi_dvfs_device_opps_add(const struct scmi_protocol_handle *ph,
freq = dom->opp[idx].indicative_freq * dom->mult_factor;
/* All OPPs above the sustained frequency are treated as turbo */
- data.turbo = freq > dom->sustained_freq_khz * 1000;
+ data.turbo = freq > dom->sustained_freq_khz * 1000UL;
data.level = dom->opp[idx].perf;
data.freq = freq;
diff --git a/drivers/firmware/arm_scmi/quirks.c b/drivers/firmware/arm_scmi/quirks.c
index 03960aca3610..03848283c2a0 100644
--- a/drivers/firmware/arm_scmi/quirks.c
+++ b/drivers/firmware/arm_scmi/quirks.c
@@ -71,6 +71,7 @@
*/
#include <linux/ctype.h>
+#include <linux/cleanup.h>
#include <linux/device.h>
#include <linux/export.h>
#include <linux/hashtable.h>
@@ -89,9 +90,9 @@
struct scmi_quirk {
bool enabled;
const char *name;
- char *vendor;
- char *sub_vendor_id;
- char *impl_ver_range;
+ const char *vendor;
+ const char *sub_vendor_id;
+ const char *impl_ver_range;
u32 start_range;
u32 end_range;
struct static_key_false *key;
@@ -217,7 +218,7 @@ static unsigned int scmi_quirk_signature(const char *vend, const char *sub_vend)
static int scmi_quirk_range_parse(struct scmi_quirk *quirk)
{
- const char *last, *first = quirk->impl_ver_range;
+ const char *last, *first __free(kfree) = NULL;
size_t len;
char *sep;
int ret;
@@ -228,8 +229,12 @@ static int scmi_quirk_range_parse(struct scmi_quirk *quirk)
if (!len)
return 0;
+ first = kmemdup(quirk->impl_ver_range, len + 1, GFP_KERNEL);
+ if (!first)
+ return -ENOMEM;
+
last = first + len - 1;
- sep = strchr(quirk->impl_ver_range, '-');
+ sep = strchr(first, '-');
if (sep)
*sep = '\0';
diff --git a/drivers/firmware/arm_scmi/raw_mode.c b/drivers/firmware/arm_scmi/raw_mode.c
index 3d543b1d8947..73db5492ab44 100644
--- a/drivers/firmware/arm_scmi/raw_mode.c
+++ b/drivers/firmware/arm_scmi/raw_mode.c
@@ -475,7 +475,8 @@ static void scmi_xfer_raw_worker(struct work_struct *work)
raw->desc->ops->mark_txdone(rw->cinfo, ret, xfer);
trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id,
- xfer->hdr.protocol_id, xfer->hdr.seq, ret);
+ xfer->hdr.protocol_id, xfer->hdr.seq,
+ ret, scmi_inflight_count(raw->handle));
/* Wait also for an async delayed response if needed */
if (!ret && xfer->async_done) {
@@ -642,7 +643,8 @@ static int scmi_do_xfer_raw_start(struct scmi_raw_mode_info *raw,
trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id,
xfer->hdr.protocol_id, xfer->hdr.seq,
- xfer->hdr.poll_completion);
+ xfer->hdr.poll_completion,
+ scmi_inflight_count(raw->handle));
ret = raw->desc->ops->send_message(rw->cinfo, xfer);
if (ret) {
diff --git a/drivers/firmware/arm_scmi/scmi_power_control.c b/drivers/firmware/arm_scmi/scmi_power_control.c
index 21f467a92942..955736336061 100644
--- a/drivers/firmware/arm_scmi/scmi_power_control.c
+++ b/drivers/firmware/arm_scmi/scmi_power_control.c
@@ -46,6 +46,7 @@
#include <linux/math.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/pm.h>
#include <linux/printk.h>
#include <linux/reboot.h>
#include <linux/scmi_protocol.h>
@@ -324,12 +325,7 @@ static int scmi_userspace_notifier(struct notifier_block *nb,
static void scmi_suspend_work_func(struct work_struct *work)
{
- struct scmi_syspower_conf *sc =
- container_of(work, struct scmi_syspower_conf, suspend_work);
-
pm_suspend(PM_SUSPEND_MEM);
-
- sc->state = SCMI_SYSPOWER_IDLE;
}
static int scmi_syspower_probe(struct scmi_device *sdev)
@@ -354,6 +350,7 @@ static int scmi_syspower_probe(struct scmi_device *sdev)
sc->required_transition = SCMI_SYSTEM_MAX;
sc->userspace_nb.notifier_call = &scmi_userspace_notifier;
sc->dev = &sdev->dev;
+ dev_set_drvdata(&sdev->dev, sc);
INIT_WORK(&sc->suspend_work, scmi_suspend_work_func);
@@ -363,6 +360,18 @@ static int scmi_syspower_probe(struct scmi_device *sdev)
NULL, &sc->userspace_nb);
}
+static int scmi_system_power_resume(struct device *dev)
+{
+ struct scmi_syspower_conf *sc = dev_get_drvdata(dev);
+
+ sc->state = SCMI_SYSPOWER_IDLE;
+ return 0;
+}
+
+static const struct dev_pm_ops scmi_system_power_pmops = {
+ SYSTEM_SLEEP_PM_OPS(NULL, scmi_system_power_resume)
+};
+
static const struct scmi_device_id scmi_id_table[] = {
{ SCMI_PROTOCOL_SYSTEM, "syspower" },
{ },
@@ -370,6 +379,9 @@ static const struct scmi_device_id scmi_id_table[] = {
MODULE_DEVICE_TABLE(scmi, scmi_id_table);
static struct scmi_driver scmi_system_power_driver = {
+ .driver = {
+ .pm = pm_sleep_ptr(&scmi_system_power_pmops),
+ },
.name = "scmi-system-power",
.probe = scmi_syspower_probe,
.id_table = scmi_id_table,
diff --git a/drivers/firmware/arm_scmi/transports/mailbox.c b/drivers/firmware/arm_scmi/transports/mailbox.c
index bd041c99b92b..ae0f67e6cc45 100644
--- a/drivers/firmware/arm_scmi/transports/mailbox.c
+++ b/drivers/firmware/arm_scmi/transports/mailbox.c
@@ -127,8 +127,8 @@ static int mailbox_chan_validate(struct device *cdev, int *a2p_rx_chan,
(num_mb == 1 && num_sh != 1) || (num_mb == 3 && num_sh != 2) ||
(num_mb == 4 && num_sh != 2)) {
dev_warn(cdev,
- "Invalid channel descriptor for '%s' - mbs:%d shm:%d\n",
- of_node_full_name(np), num_mb, num_sh);
+ "Invalid channel descriptor for '%pOF' - mbs:%d shm:%d\n",
+ np, num_mb, num_sh);
return -EINVAL;
}
@@ -140,8 +140,7 @@ static int mailbox_chan_validate(struct device *cdev, int *a2p_rx_chan,
of_parse_phandle(np, "shmem", 1);
if (!np_tx || !np_rx || np_tx == np_rx) {
- dev_warn(cdev, "Invalid shmem descriptor for '%s'\n",
- of_node_full_name(np));
+ dev_warn(cdev, "Invalid shmem descriptor for '%pOF'\n", np);
ret = -EINVAL;
}
}
diff --git a/drivers/firmware/arm_scmi/transports/optee.c b/drivers/firmware/arm_scmi/transports/optee.c
index 3949a877e17d..dc0f46340153 100644
--- a/drivers/firmware/arm_scmi/transports/optee.c
+++ b/drivers/firmware/arm_scmi/transports/optee.c
@@ -498,7 +498,7 @@ static void scmi_optee_mark_txdone(struct scmi_chan_info *cinfo, int ret,
mutex_unlock(&channel->mu);
}
-static struct scmi_transport_ops scmi_optee_ops = {
+static const struct scmi_transport_ops scmi_optee_ops = {
.chan_available = scmi_optee_chan_available,
.chan_setup = scmi_optee_chan_setup,
.chan_free = scmi_optee_chan_free,
diff --git a/drivers/firmware/arm_scmi/transports/virtio.c b/drivers/firmware/arm_scmi/transports/virtio.c
index cb934db9b2b4..326c4a93e44b 100644
--- a/drivers/firmware/arm_scmi/transports/virtio.c
+++ b/drivers/firmware/arm_scmi/transports/virtio.c
@@ -871,6 +871,9 @@ static int scmi_vio_probe(struct virtio_device *vdev)
/* Ensure initialized scmi_vdev is visible */
smp_store_mb(scmi_vdev, vdev);
+ /* Set device ready */
+ virtio_device_ready(vdev);
+
ret = platform_driver_register(&scmi_virtio_driver);
if (ret) {
vdev->priv = NULL;
diff --git a/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c
index a8915d3b4df5..700a3f24f4ef 100644
--- a/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c
+++ b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c
@@ -25,7 +25,10 @@
enum scmi_imx_misc_protocol_cmd {
SCMI_IMX_MISC_CTRL_SET = 0x3,
SCMI_IMX_MISC_CTRL_GET = 0x4,
+ SCMI_IMX_MISC_DISCOVER_BUILD_INFO = 0x6,
SCMI_IMX_MISC_CTRL_NOTIFY = 0x8,
+ SCMI_IMX_MISC_CFG_INFO_GET = 0xC,
+ SCMI_IMX_MISC_BOARD_INFO = 0xE,
};
struct scmi_imx_misc_info {
@@ -65,6 +68,27 @@ struct scmi_imx_misc_ctrl_get_out {
__le32 val[];
};
+struct scmi_imx_misc_buildinfo_out {
+ __le32 buildnum;
+ __le32 buildcommit;
+#define MISC_MAX_BUILDDATE 16
+ u8 builddate[MISC_MAX_BUILDDATE];
+#define MISC_MAX_BUILDTIME 16
+ u8 buildtime[MISC_MAX_BUILDTIME];
+};
+
+struct scmi_imx_misc_board_info_out {
+ __le32 attributes;
+#define MISC_MAX_BRDNAME 16
+ u8 brdname[MISC_MAX_BRDNAME];
+};
+
+struct scmi_imx_misc_cfg_info_out {
+ __le32 msel;
+#define MISC_MAX_CFGNAME 16
+ u8 cfgname[MISC_MAX_CFGNAME];
+};
+
static int scmi_imx_misc_attributes_get(const struct scmi_protocol_handle *ph,
struct scmi_imx_misc_info *mi)
{
@@ -272,6 +296,81 @@ static int scmi_imx_misc_ctrl_set(const struct scmi_protocol_handle *ph,
return ret;
}
+static int scmi_imx_misc_build_info_discover(const struct scmi_protocol_handle *ph)
+{
+ char date[MISC_MAX_BUILDDATE], time[MISC_MAX_BUILDTIME];
+ struct scmi_imx_misc_buildinfo_out *out;
+ struct scmi_xfer *t;
+ int ret;
+
+ ret = ph->xops->xfer_get_init(ph, SCMI_IMX_MISC_DISCOVER_BUILD_INFO, 0,
+ sizeof(*out), &t);
+ if (ret)
+ return ret;
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ out = t->rx.buf;
+ strscpy(date, out->builddate, MISC_MAX_BUILDDATE);
+ strscpy(time, out->buildtime, MISC_MAX_BUILDTIME);
+ dev_info(ph->dev, "SM Version\t= Build %u, Commit %08x %s %s\n",
+ le32_to_cpu(out->buildnum), le32_to_cpu(out->buildcommit),
+ date, time);
+ }
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static int scmi_imx_misc_board_info(const struct scmi_protocol_handle *ph)
+{
+ struct scmi_imx_misc_board_info_out *out;
+ char name[MISC_MAX_BRDNAME];
+ struct scmi_xfer *t;
+ int ret;
+
+ ret = ph->xops->xfer_get_init(ph, SCMI_IMX_MISC_BOARD_INFO, 0, sizeof(*out), &t);
+ if (ret)
+ return ret;
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ out = t->rx.buf;
+ strscpy(name, out->brdname, MISC_MAX_BRDNAME);
+ dev_info(ph->dev, "Board\t\t= %s, attr=0x%08x\n",
+ name, le32_to_cpu(out->attributes));
+ }
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static int scmi_imx_misc_cfg_info_get(const struct scmi_protocol_handle *ph)
+{
+ struct scmi_imx_misc_cfg_info_out *out;
+ char name[MISC_MAX_CFGNAME];
+ struct scmi_xfer *t;
+ int ret;
+
+ ret = ph->xops->xfer_get_init(ph, SCMI_IMX_MISC_CFG_INFO_GET, 0, sizeof(*out), &t);
+ if (ret)
+ return ret;
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ out = t->rx.buf;
+ strscpy(name, out->cfgname, MISC_MAX_CFGNAME);
+ dev_info(ph->dev, "SM Config\t= %s, mSel = %u\n",
+ name, le32_to_cpu(out->msel));
+ }
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
static const struct scmi_imx_misc_proto_ops scmi_imx_misc_proto_ops = {
.misc_ctrl_set = scmi_imx_misc_ctrl_set,
.misc_ctrl_get = scmi_imx_misc_ctrl_get,
@@ -299,6 +398,18 @@ static int scmi_imx_misc_protocol_init(const struct scmi_protocol_handle *ph)
if (ret)
return ret;
+ ret = scmi_imx_misc_build_info_discover(ph);
+ if (ret && ret != -EOPNOTSUPP)
+ return ret;
+
+ ret = scmi_imx_misc_board_info(ph);
+ if (ret && ret != -EOPNOTSUPP)
+ return ret;
+
+ ret = scmi_imx_misc_cfg_info_get(ph);
+ if (ret && ret != -EOPNOTSUPP)
+ return ret;
+
return ph->set_priv(ph, minfo, version);
}
diff --git a/drivers/firmware/arm_scmi/vendors/imx/imx95.rst b/drivers/firmware/arm_scmi/vendors/imx/imx95.rst
index 4e246a78a042..741f4eace350 100644
--- a/drivers/firmware/arm_scmi/vendors/imx/imx95.rst
+++ b/drivers/firmware/arm_scmi/vendors/imx/imx95.rst
@@ -1660,6 +1660,7 @@ protocol_id: 0x84
|Name |Description |
+--------------------+---------------------------------------------------------+
|int32 status |SUCCESS: system log return |
+| |NOT_SUPPORTED: system log not available |
+--------------------+---------------------------------------------------------+
|uint32 numLogflags |Descriptor for the log data returned by this call. |
| |Bits[31:20] Number of remaining log words. |
@@ -1670,6 +1671,30 @@ protocol_id: 0x84
|uint32 syslog[N] |Log data array, N is defined in bits[11:0] of numLogflags|
+--------------------+---------------------------------------------------------+
+MISC_BOARD_INFO
+~~~~~~~~~~~~~~~
+
+message_id: 0xE
+protocol_id: 0x84
+
++--------------------+---------------------------------------------------------+
+|Return values |
++--------------------+---------------------------------------------------------+
+|Name |Description |
++--------------------+---------------------------------------------------------+
+|int32 status |SUCCESS: config name return |
+| |NOT_SUPPORTED: name not available |
++--------------------+---------------------------------------------------------+
+|uint32 attributes |Board-specific attributes reserved for future expansion |
+| |without breaking backwards compatibility. The firmware |
+| |sets the value to 0 |
++--------------------+---------------------------------------------------------+
+|uint8 boardname[16] |Board name. NULL terminated ASCII string, up to 16 bytes |
+| |in length. This is System Manager(SM) firmware-exported |
+| |board-name and may not align with the board name in the |
+| |device tree. |
++--------------------+---------------------------------------------------------+
+
NEGOTIATE_PROTOCOL_VERSION
~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/drivers/firmware/arm_scmi/voltage.c b/drivers/firmware/arm_scmi/voltage.c
index fda6a1573609..17127880e10a 100644
--- a/drivers/firmware/arm_scmi/voltage.c
+++ b/drivers/firmware/arm_scmi/voltage.c
@@ -393,7 +393,7 @@ static int scmi_voltage_domains_num_get(const struct scmi_protocol_handle *ph)
return vinfo->num_domains;
}
-static struct scmi_voltage_proto_ops voltage_proto_ops = {
+static const struct scmi_voltage_proto_ops voltage_proto_ops = {
.num_domains_get = scmi_voltage_domains_num_get,
.info_get = scmi_voltage_info_get,
.config_set = scmi_voltage_config_set,
diff --git a/drivers/firmware/broadcom/bcm47xx_sprom.c b/drivers/firmware/broadcom/bcm47xx_sprom.c
index 14fbcd11657c..fdcd3a07abcd 100644
--- a/drivers/firmware/broadcom/bcm47xx_sprom.c
+++ b/drivers/firmware/broadcom/bcm47xx_sprom.c
@@ -404,7 +404,7 @@ static void bcm47xx_sprom_fill_auto(struct ssb_sprom *sprom,
ENTRY(0x00000700, u8, pre, "noiselvl5gua1", noiselvl5gua[1], 0, fb);
ENTRY(0x00000700, u8, pre, "noiselvl5gua2", noiselvl5gua[2], 0, fb);
}
-#undef ENTRY /* It's specififc, uses local variable, don't use it (again). */
+#undef ENTRY /* It's specific, uses local variable, don't use it (again). */
static void bcm47xx_fill_sprom_path_r4589(struct ssb_sprom *sprom,
const char *prefix, bool fallback)
diff --git a/drivers/firmware/cirrus/cs_dsp.c b/drivers/firmware/cirrus/cs_dsp.c
index 560724ce21aa..525ac0f0a75d 100644
--- a/drivers/firmware/cirrus/cs_dsp.c
+++ b/drivers/firmware/cirrus/cs_dsp.c
@@ -9,9 +9,11 @@
* Cirrus Logic International Semiconductor Ltd.
*/
+#include <linux/cleanup.h>
#include <linux/ctype.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
+#include <linux/math.h>
#include <linux/minmax.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -311,44 +313,11 @@ static const struct cs_dsp_ops cs_dsp_adsp2_ops[];
static const struct cs_dsp_ops cs_dsp_halo_ops;
static const struct cs_dsp_ops cs_dsp_halo_ao_ops;
-struct cs_dsp_buf {
+struct cs_dsp_alg_region_list_item {
struct list_head list;
- void *buf;
+ struct cs_dsp_alg_region alg_region;
};
-static struct cs_dsp_buf *cs_dsp_buf_alloc(const void *src, size_t len,
- struct list_head *list)
-{
- struct cs_dsp_buf *buf = kzalloc(sizeof(*buf), GFP_KERNEL);
-
- if (buf == NULL)
- return NULL;
-
- buf->buf = vmalloc(len);
- if (!buf->buf) {
- kfree(buf);
- return NULL;
- }
- memcpy(buf->buf, src, len);
-
- if (list)
- list_add_tail(&buf->list, list);
-
- return buf;
-}
-
-static void cs_dsp_buf_free(struct list_head *list)
-{
- while (!list_empty(list)) {
- struct cs_dsp_buf *buf = list_first_entry(list,
- struct cs_dsp_buf,
- list);
- list_del(&buf->list);
- vfree(buf->buf);
- kfree(buf);
- }
-}
-
/**
* cs_dsp_mem_region_name() - Return a name string for a memory type
* @type: the memory type to match
@@ -383,18 +352,14 @@ EXPORT_SYMBOL_NS_GPL(cs_dsp_mem_region_name, "FW_CS_DSP");
#ifdef CONFIG_DEBUG_FS
static void cs_dsp_debugfs_save_wmfwname(struct cs_dsp *dsp, const char *s)
{
- char *tmp = kasprintf(GFP_KERNEL, "%s\n", s);
-
kfree(dsp->wmfw_file_name);
- dsp->wmfw_file_name = tmp;
+ dsp->wmfw_file_name = kstrdup(s, GFP_KERNEL);
}
static void cs_dsp_debugfs_save_binname(struct cs_dsp *dsp, const char *s)
{
- char *tmp = kasprintf(GFP_KERNEL, "%s\n", s);
-
kfree(dsp->bin_file_name);
- dsp->bin_file_name = tmp;
+ dsp->bin_file_name = kstrdup(s, GFP_KERNEL);
}
static void cs_dsp_debugfs_clear(struct cs_dsp *dsp)
@@ -405,24 +370,33 @@ static void cs_dsp_debugfs_clear(struct cs_dsp *dsp)
dsp->bin_file_name = NULL;
}
+static ssize_t cs_dsp_debugfs_string_read(struct cs_dsp *dsp,
+ char __user *user_buf,
+ size_t count, loff_t *ppos,
+ const char **pstr)
+{
+ const char *str __free(kfree) = NULL;
+
+ scoped_guard(mutex, &dsp->pwr_lock) {
+ if (!*pstr)
+ return 0;
+
+ str = kasprintf(GFP_KERNEL, "%s\n", *pstr);
+ if (!str)
+ return -ENOMEM;
+
+ return simple_read_from_buffer(user_buf, count, ppos, str, strlen(str));
+ }
+}
+
static ssize_t cs_dsp_debugfs_wmfw_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct cs_dsp *dsp = file->private_data;
- ssize_t ret;
-
- mutex_lock(&dsp->pwr_lock);
-
- if (!dsp->wmfw_file_name || !dsp->booted)
- ret = 0;
- else
- ret = simple_read_from_buffer(user_buf, count, ppos,
- dsp->wmfw_file_name,
- strlen(dsp->wmfw_file_name));
- mutex_unlock(&dsp->pwr_lock);
- return ret;
+ return cs_dsp_debugfs_string_read(dsp, user_buf, count, ppos,
+ &dsp->wmfw_file_name);
}
static ssize_t cs_dsp_debugfs_bin_read(struct file *file,
@@ -430,19 +404,9 @@ static ssize_t cs_dsp_debugfs_bin_read(struct file *file,
size_t count, loff_t *ppos)
{
struct cs_dsp *dsp = file->private_data;
- ssize_t ret;
-
- mutex_lock(&dsp->pwr_lock);
- if (!dsp->bin_file_name || !dsp->booted)
- ret = 0;
- else
- ret = simple_read_from_buffer(user_buf, count, ppos,
- dsp->bin_file_name,
- strlen(dsp->bin_file_name));
-
- mutex_unlock(&dsp->pwr_lock);
- return ret;
+ return cs_dsp_debugfs_string_read(dsp, user_buf, count, ppos,
+ &dsp->bin_file_name);
}
static const struct {
@@ -474,9 +438,11 @@ static int cs_dsp_debugfs_read_controls_show(struct seq_file *s, void *ignored)
struct cs_dsp_coeff_ctl *ctl;
unsigned int reg;
+ guard(mutex)(&dsp->pwr_lock);
+
list_for_each_entry(ctl, &dsp->ctl_list, list) {
cs_dsp_coeff_base_reg(ctl, &reg, 0);
- seq_printf(s, "%22.*s: %#8zx %s:%08x %#8x %s %#8x %#4x %c%c%c%c %s %s\n",
+ seq_printf(s, "%22.*s: %#8x %s:%08x %#8x %s %#8x %#4x %c%c%c%c %s %s\n",
ctl->subname_len, ctl->subname, ctl->len,
cs_dsp_mem_region_name(ctl->alg_region.type),
ctl->offset, reg, ctl->fw_name, ctl->alg_region.alg, ctl->type,
@@ -1023,7 +989,7 @@ static void cs_dsp_signal_event_controls(struct cs_dsp *dsp,
static void cs_dsp_free_ctl_blk(struct cs_dsp_coeff_ctl *ctl)
{
- kfree(ctl->cache);
+ kvfree(ctl->cache);
kfree(ctl->subname);
kfree(ctl);
}
@@ -1073,7 +1039,7 @@ static int cs_dsp_create_control(struct cs_dsp *dsp,
ctl->type = type;
ctl->offset = offset;
ctl->len = len;
- ctl->cache = kzalloc(ctl->len, GFP_KERNEL);
+ ctl->cache = kvzalloc(ctl->len, GFP_KERNEL);
if (!ctl->cache) {
ret = -ENOMEM;
goto err_ctl_subname;
@@ -1091,7 +1057,7 @@ static int cs_dsp_create_control(struct cs_dsp *dsp,
err_list_del:
list_del(&ctl->list);
- kfree(ctl->cache);
+ kvfree(ctl->cache);
err_ctl_subname:
kfree(ctl->subname);
err_ctl:
@@ -1480,7 +1446,9 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware,
const struct wmfw_region *region;
const struct cs_dsp_region *mem;
const char *region_name;
- struct cs_dsp_buf *buf;
+ u8 *buf __free(kfree) = NULL;
+ size_t buf_len = 0;
+ size_t region_len;
unsigned int reg;
int regions = 0;
int ret, offset, type;
@@ -1600,23 +1568,23 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware,
region_name);
if (reg) {
- buf = cs_dsp_buf_alloc(region->data,
- le32_to_cpu(region->len),
- &buf_list);
- if (!buf) {
- cs_dsp_err(dsp, "Out of memory\n");
- ret = -ENOMEM;
- goto out_fw;
+ region_len = le32_to_cpu(region->len);
+ if (region_len > buf_len) {
+ buf_len = round_up(region_len, PAGE_SIZE);
+ kfree(buf);
+ buf = kmalloc(buf_len, GFP_KERNEL | GFP_DMA);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto out_fw;
+ }
}
- ret = regmap_raw_write(regmap, reg, buf->buf,
- le32_to_cpu(region->len));
+ memcpy(buf, region->data, region_len);
+ ret = regmap_raw_write(regmap, reg, buf, region_len);
if (ret != 0) {
cs_dsp_err(dsp,
- "%s.%d: Failed to write %d bytes at %d in %s: %d\n",
- file, regions,
- le32_to_cpu(region->len), offset,
- region_name, ret);
+ "%s.%d: Failed to write %zu bytes at %d in %s: %d\n",
+ file, regions, region_len, offset, region_name, ret);
goto out_fw;
}
}
@@ -1633,8 +1601,6 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware,
ret = 0;
out_fw:
- cs_dsp_buf_free(&buf_list);
-
if (ret == -EOVERFLOW)
cs_dsp_err(dsp, "%s: file content overflows file data\n", file);
@@ -1752,13 +1718,13 @@ static void *cs_dsp_read_algs(struct cs_dsp *dsp, size_t n_algs,
struct cs_dsp_alg_region *cs_dsp_find_alg_region(struct cs_dsp *dsp,
int type, unsigned int id)
{
- struct cs_dsp_alg_region *alg_region;
+ struct cs_dsp_alg_region_list_item *item;
lockdep_assert_held(&dsp->pwr_lock);
- list_for_each_entry(alg_region, &dsp->alg_regions, list) {
- if (id == alg_region->alg && type == alg_region->type)
- return alg_region;
+ list_for_each_entry(item, &dsp->alg_regions, list) {
+ if (id == item->alg_region.alg && type == item->alg_region.type)
+ return &item->alg_region;
}
return NULL;
@@ -1769,35 +1735,35 @@ static struct cs_dsp_alg_region *cs_dsp_create_region(struct cs_dsp *dsp,
int type, __be32 id,
__be32 ver, __be32 base)
{
- struct cs_dsp_alg_region *alg_region;
+ struct cs_dsp_alg_region_list_item *item;
- alg_region = kzalloc(sizeof(*alg_region), GFP_KERNEL);
- if (!alg_region)
+ item = kzalloc(sizeof(*item), GFP_KERNEL);
+ if (!item)
return ERR_PTR(-ENOMEM);
- alg_region->type = type;
- alg_region->alg = be32_to_cpu(id);
- alg_region->ver = be32_to_cpu(ver);
- alg_region->base = be32_to_cpu(base);
+ item->alg_region.type = type;
+ item->alg_region.alg = be32_to_cpu(id);
+ item->alg_region.ver = be32_to_cpu(ver);
+ item->alg_region.base = be32_to_cpu(base);
- list_add_tail(&alg_region->list, &dsp->alg_regions);
+ list_add_tail(&item->list, &dsp->alg_regions);
if (dsp->wmfw_ver > 0)
- cs_dsp_ctl_fixup_base(dsp, alg_region);
+ cs_dsp_ctl_fixup_base(dsp, &item->alg_region);
- return alg_region;
+ return &item->alg_region;
}
static void cs_dsp_free_alg_regions(struct cs_dsp *dsp)
{
- struct cs_dsp_alg_region *alg_region;
+ struct cs_dsp_alg_region_list_item *item;
while (!list_empty(&dsp->alg_regions)) {
- alg_region = list_first_entry(&dsp->alg_regions,
- struct cs_dsp_alg_region,
- list);
- list_del(&alg_region->list);
- kfree(alg_region);
+ item = list_first_entry(&dsp->alg_regions,
+ struct cs_dsp_alg_region_list_item,
+ list);
+ list_del(&item->list);
+ kfree(item);
}
}
@@ -2166,7 +2132,9 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware
struct cs_dsp_alg_region *alg_region;
const char *region_name;
int ret, pos, blocks, type, offset, reg, version;
- struct cs_dsp_buf *buf;
+ u8 *buf __free(kfree) = NULL;
+ size_t buf_len = 0;
+ size_t region_len;
if (!firmware)
return 0;
@@ -2308,20 +2276,22 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware
}
if (reg) {
- buf = cs_dsp_buf_alloc(blk->data,
- le32_to_cpu(blk->len),
- &buf_list);
- if (!buf) {
- cs_dsp_err(dsp, "Out of memory\n");
- ret = -ENOMEM;
- goto out_fw;
+ region_len = le32_to_cpu(blk->len);
+ if (region_len > buf_len) {
+ buf_len = round_up(region_len, PAGE_SIZE);
+ kfree(buf);
+ buf = kmalloc(buf_len, GFP_KERNEL | GFP_DMA);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto out_fw;
+ }
}
- cs_dsp_dbg(dsp, "%s.%d: Writing %d bytes at %x\n",
- file, blocks, le32_to_cpu(blk->len),
- reg);
- ret = regmap_raw_write(regmap, reg, buf->buf,
- le32_to_cpu(blk->len));
+ memcpy(buf, blk->data, region_len);
+
+ cs_dsp_dbg(dsp, "%s.%d: Writing %zu bytes at %x\n",
+ file, blocks, region_len, reg);
+ ret = regmap_raw_write(regmap, reg, buf, region_len);
if (ret != 0) {
cs_dsp_err(dsp,
"%s.%d: Failed to write to %x in %s: %d\n",
@@ -2341,8 +2311,6 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware
ret = 0;
out_fw:
- cs_dsp_buf_free(&buf_list);
-
if (ret == -EOVERFLOW)
cs_dsp_err(dsp, "%s: file content overflows file data\n", file);
@@ -2361,6 +2329,9 @@ static int cs_dsp_create_name(struct cs_dsp *dsp)
return 0;
}
+static const struct cs_dsp_client_ops cs_dsp_default_client_ops = {
+};
+
static int cs_dsp_common_init(struct cs_dsp *dsp)
{
int ret;
@@ -2374,6 +2345,9 @@ static int cs_dsp_common_init(struct cs_dsp *dsp)
mutex_init(&dsp->pwr_lock);
+ if (!dsp->client_ops)
+ dsp->client_ops = &cs_dsp_default_client_ops;
+
#ifdef CONFIG_DEBUG_FS
/* Ensure this is invalid if client never provides a debugfs root */
dsp->debugfs_root = ERR_PTR(-ENODEV);
diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_callbacks.c b/drivers/firmware/cirrus/test/cs_dsp_test_callbacks.c
index 8a9b66a3b7d3..e5a389808e5f 100644
--- a/drivers/firmware/cirrus/test/cs_dsp_test_callbacks.c
+++ b/drivers/firmware/cirrus/test/cs_dsp_test_callbacks.c
@@ -600,6 +600,7 @@ KUNIT_ARRAY_PARAM(cs_dsp_callbacks_ops,
static const struct cs_dsp_callbacks_test_param cs_dsp_no_callbacks_cases[] = {
{ .ops = &cs_dsp_callback_test_empty_client_ops, .case_name = "empty ops" },
+ { .ops = NULL, .case_name = "NULL ops" },
};
KUNIT_ARRAY_PARAM(cs_dsp_no_callbacks,
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index db8c5c03d3a2..29e0729299f5 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -263,6 +263,15 @@ config EFI_COCO_SECRET
virt/coco/efi_secret module to access the secrets, which in turn
allows userspace programs to access the injected secrets.
+config OVMF_DEBUG_LOG
+ bool "Expose OVMF firmware debug log via sysfs"
+ depends on EFI
+ help
+ Recent versions of the Open Virtual Machine Firmware
+ (edk2-stable202508 + newer) can write their debug log to a memory
+ buffer. This driver exposes the log content via sysfs
+ (/sys/firmware/efi/ovmf_debug_log).
+
config UNACCEPTED_MEMORY
bool
depends on EFI_STUB
@@ -286,7 +295,7 @@ config EFI_SBAT
config EFI_SBAT_FILE
string "Embedded SBAT section file path"
- depends on EFI_ZBOOT
+ depends on EFI_ZBOOT || (EFI_STUB && X86)
help
SBAT section provides a way to improve SecureBoot revocations of UEFI
binaries by introducing a generation-based mechanism. With SBAT, older
diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile
index a2d0009560d0..8efbcf699e4f 100644
--- a/drivers/firmware/efi/Makefile
+++ b/drivers/firmware/efi/Makefile
@@ -29,6 +29,7 @@ obj-$(CONFIG_APPLE_PROPERTIES) += apple-properties.o
obj-$(CONFIG_EFI_RCI2_TABLE) += rci2-table.o
obj-$(CONFIG_EFI_EMBEDDED_FIRMWARE) += embedded-firmware.o
obj-$(CONFIG_LOAD_UEFI_KEYS) += mokvar-table.o
+obj-$(CONFIG_OVMF_DEBUG_LOG) += ovmf-debug-log.o
obj-$(CONFIG_SYSFB) += sysfb_efi.o
diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c
index 83092d93f36a..53a5336cde5a 100644
--- a/drivers/firmware/efi/arm-runtime.c
+++ b/drivers/firmware/efi/arm-runtime.c
@@ -12,18 +12,18 @@
#include <linux/io.h>
#include <linux/memblock.h>
#include <linux/mm_types.h>
+#include <linux/pgalloc.h>
+#include <linux/pgtable.h>
#include <linux/preempt.h>
#include <linux/rbtree.h>
#include <linux/rwsem.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
-#include <linux/pgtable.h>
#include <asm/cacheflush.h>
#include <asm/efi.h>
#include <asm/mmu.h>
-#include <asm/pgalloc.h>
#if defined(CONFIG_PTDUMP_DEBUGFS) || defined(CONFIG_ARM_PTDUMP_DEBUGFS)
#include <asm/ptdump.h>
diff --git a/drivers/firmware/efi/cper-arm.c b/drivers/firmware/efi/cper-arm.c
index f0a63d09d3c4..76542a53e202 100644
--- a/drivers/firmware/efi/cper-arm.c
+++ b/drivers/firmware/efi/cper-arm.c
@@ -93,15 +93,11 @@ static void cper_print_arm_err_info(const char *pfx, u32 type,
bool proc_context_corrupt, corrected, precise_pc, restartable_pc;
bool time_out, access_mode;
- /* If the type is unknown, bail. */
- if (type > CPER_ARM_MAX_TYPE)
- return;
-
/*
* Vendor type errors have error information values that are vendor
* specific.
*/
- if (type == CPER_ARM_VENDOR_ERROR)
+ if (type & CPER_ARM_VENDOR_ERROR)
return;
if (error_info & CPER_ARM_ERR_VALID_TRANSACTION_TYPE) {
@@ -116,43 +112,38 @@ static void cper_print_arm_err_info(const char *pfx, u32 type,
if (error_info & CPER_ARM_ERR_VALID_OPERATION_TYPE) {
op_type = ((error_info >> CPER_ARM_ERR_OPERATION_SHIFT)
& CPER_ARM_ERR_OPERATION_MASK);
- switch (type) {
- case CPER_ARM_CACHE_ERROR:
+ if (type & CPER_ARM_CACHE_ERROR) {
if (op_type < ARRAY_SIZE(arm_cache_err_op_strs)) {
- printk("%soperation type: %s\n", pfx,
+ printk("%scache error, operation type: %s\n", pfx,
arm_cache_err_op_strs[op_type]);
}
- break;
- case CPER_ARM_TLB_ERROR:
+ }
+ if (type & CPER_ARM_TLB_ERROR) {
if (op_type < ARRAY_SIZE(arm_tlb_err_op_strs)) {
- printk("%soperation type: %s\n", pfx,
+ printk("%sTLB error, operation type: %s\n", pfx,
arm_tlb_err_op_strs[op_type]);
}
- break;
- case CPER_ARM_BUS_ERROR:
+ }
+ if (type & CPER_ARM_BUS_ERROR) {
if (op_type < ARRAY_SIZE(arm_bus_err_op_strs)) {
- printk("%soperation type: %s\n", pfx,
+ printk("%sbus error, operation type: %s\n", pfx,
arm_bus_err_op_strs[op_type]);
}
- break;
}
}
if (error_info & CPER_ARM_ERR_VALID_LEVEL) {
level = ((error_info >> CPER_ARM_ERR_LEVEL_SHIFT)
& CPER_ARM_ERR_LEVEL_MASK);
- switch (type) {
- case CPER_ARM_CACHE_ERROR:
+ if (type & CPER_ARM_CACHE_ERROR)
printk("%scache level: %d\n", pfx, level);
- break;
- case CPER_ARM_TLB_ERROR:
+
+ if (type & CPER_ARM_TLB_ERROR)
printk("%sTLB level: %d\n", pfx, level);
- break;
- case CPER_ARM_BUS_ERROR:
+
+ if (type & CPER_ARM_BUS_ERROR)
printk("%saffinity level at which the bus error occurred: %d\n",
pfx, level);
- break;
- }
}
if (error_info & CPER_ARM_ERR_VALID_PROC_CONTEXT_CORRUPT) {
@@ -240,7 +231,8 @@ void cper_print_proc_arm(const char *pfx,
int i, len, max_ctx_type;
struct cper_arm_err_info *err_info;
struct cper_arm_ctx_info *ctx_info;
- char newpfx[64], infopfx[64];
+ char newpfx[64], infopfx[ARRAY_SIZE(newpfx) + 1];
+ char error_type[120];
printk("%sMIDR: 0x%016llx\n", pfx, proc->midr);
@@ -289,9 +281,15 @@ void cper_print_proc_arm(const char *pfx,
newpfx);
}
- printk("%serror_type: %d, %s\n", newpfx, err_info->type,
- err_info->type < ARRAY_SIZE(cper_proc_error_type_strs) ?
- cper_proc_error_type_strs[err_info->type] : "unknown");
+ cper_bits_to_str(error_type, sizeof(error_type),
+ FIELD_GET(CPER_ARM_ERR_TYPE_MASK, err_info->type),
+ cper_proc_error_type_strs,
+ ARRAY_SIZE(cper_proc_error_type_strs));
+
+ printk("%serror_type: 0x%02x: %s%s\n", newpfx, err_info->type,
+ error_type,
+ (err_info->type & ~CPER_ARM_ERR_TYPE_MASK) ? " with reserved bit(s)" : "");
+
if (err_info->validation_bits & CPER_ARM_INFO_VALID_ERR_INFO) {
printk("%serror_info: 0x%016llx\n", newpfx,
err_info->error_info);
diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
index 928409199a1a..0232bd040f61 100644
--- a/drivers/firmware/efi/cper.c
+++ b/drivers/firmware/efi/cper.c
@@ -12,6 +12,7 @@
* Specification version 2.4.
*/
+#include <linux/bitmap.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/time.h>
@@ -69,7 +70,7 @@ const char *cper_severity_str(unsigned int severity)
}
EXPORT_SYMBOL_GPL(cper_severity_str);
-/*
+/**
* cper_print_bits - print strings for set bits
* @pfx: prefix for each line, including log level and prefix string
* @bits: bit mask
@@ -106,6 +107,65 @@ void cper_print_bits(const char *pfx, unsigned int bits,
printk("%s\n", buf);
}
+/**
+ * cper_bits_to_str - return a string for set bits
+ * @buf: buffer to store the output string
+ * @buf_size: size of the output string buffer
+ * @bits: bit mask
+ * @strs: string array, indexed by bit position
+ * @strs_size: size of the string array: @strs
+ *
+ * Add to @buf the bitmask in hexadecimal. Then, for each set bit in @bits,
+ * add the corresponding string describing the bit in @strs to @buf.
+ *
+ * A typical example is::
+ *
+ * const char * const bits[] = {
+ * "bit 3 name",
+ * "bit 4 name",
+ * "bit 5 name",
+ * };
+ * char str[120];
+ * unsigned int bitmask = BIT(3) | BIT(5);
+ * #define MASK GENMASK(5,3)
+ *
+ * cper_bits_to_str(str, sizeof(str), FIELD_GET(MASK, bitmask),
+ * bits, ARRAY_SIZE(bits));
+ *
+ * The above code fills the string ``str`` with ``bit 3 name|bit 5 name``.
+ *
+ * Return: number of bytes stored or an error code if lower than zero.
+ */
+int cper_bits_to_str(char *buf, int buf_size, unsigned long bits,
+ const char * const strs[], unsigned int strs_size)
+{
+ int len = buf_size;
+ char *str = buf;
+ int i, size;
+
+ *buf = '\0';
+
+ for_each_set_bit(i, &bits, strs_size) {
+ if (!(bits & BIT_ULL(i)))
+ continue;
+
+ if (*buf && len > 0) {
+ *str = '|';
+ len--;
+ str++;
+ }
+
+ size = strscpy(str, strs[i], len);
+ if (size < 0)
+ return size;
+
+ len -= size;
+ str += size;
+ }
+ return len - buf_size;
+}
+EXPORT_SYMBOL_GPL(cper_bits_to_str);
+
static const char * const proc_type_strs[] = {
"IA32/X64",
"IA64",
diff --git a/drivers/firmware/efi/efi-init.c b/drivers/firmware/efi/efi-init.c
index a00e07b853f2..a65c2d5b9e7b 100644
--- a/drivers/firmware/efi/efi-init.c
+++ b/drivers/firmware/efi/efi-init.c
@@ -12,6 +12,7 @@
#include <linux/efi.h>
#include <linux/fwnode.h>
#include <linux/init.h>
+#include <linux/kexec_handover.h>
#include <linux/memblock.h>
#include <linux/mm_types.h>
#include <linux/of.h>
@@ -164,12 +165,32 @@ static __init void reserve_regions(void)
pr_info("Processing EFI memory map:\n");
/*
- * Discard memblocks discovered so far: if there are any at this
- * point, they originate from memory nodes in the DT, and UEFI
- * uses its own memory map instead.
+ * Discard memblocks discovered so far except for KHO scratch
+ * regions. Most memblocks at this point originate from memory nodes
+ * in the DT and UEFI uses its own memory map instead. However, if
+ * KHO is enabled, scratch regions, which are good known memory
+ * must be preserved.
*/
memblock_dump_all();
- memblock_remove(0, PHYS_ADDR_MAX);
+
+ if (is_kho_boot()) {
+ struct memblock_region *r;
+
+ /* Remove all non-KHO regions */
+ for_each_mem_region(r) {
+ if (!memblock_is_kho_scratch(r)) {
+ memblock_remove(r->base, r->size);
+ r--;
+ }
+ }
+ } else {
+ /*
+ * KHO is disabled. Discard memblocks discovered so far:
+ * if there are any at this point, they originate from memory
+ * nodes in the DT, and UEFI uses its own memory map instead.
+ */
+ memblock_remove(0, PHYS_ADDR_MAX);
+ }
for_each_efi_memory_desc(md) {
paddr = md->phys_addr;
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index e57bff702b5f..a9070d00b833 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -45,6 +45,7 @@ struct efi __read_mostly efi = {
.esrt = EFI_INVALID_TABLE_ADDR,
.tpm_log = EFI_INVALID_TABLE_ADDR,
.tpm_final_log = EFI_INVALID_TABLE_ADDR,
+ .ovmf_debug_log = EFI_INVALID_TABLE_ADDR,
#ifdef CONFIG_LOAD_UEFI_KEYS
.mokvar_table = EFI_INVALID_TABLE_ADDR,
#endif
@@ -73,6 +74,9 @@ struct mm_struct efi_mm = {
.page_table_lock = __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock),
.mmlist = LIST_HEAD_INIT(efi_mm.mmlist),
.cpu_bitmap = { [BITS_TO_LONGS(NR_CPUS)] = 0},
+#ifdef CONFIG_SCHED_MM_CID
+ .mm_cid.lock = __RAW_SPIN_LOCK_UNLOCKED(efi_mm.mm_cid.lock),
+#endif
};
struct workqueue_struct *efi_rts_wq;
@@ -473,6 +477,10 @@ static int __init efisubsys_init(void)
platform_device_register_simple("efi_secret", 0, NULL, 0);
#endif
+ if (IS_ENABLED(CONFIG_OVMF_DEBUG_LOG) &&
+ efi.ovmf_debug_log != EFI_INVALID_TABLE_ADDR)
+ ovmf_log_probe(efi.ovmf_debug_log);
+
return 0;
err_remove_group:
@@ -617,6 +625,9 @@ static const efi_config_table_type_t common_tables[] __initconst = {
{LINUX_EFI_MEMRESERVE_TABLE_GUID, &mem_reserve, "MEMRESERVE" },
{LINUX_EFI_INITRD_MEDIA_GUID, &initrd, "INITRD" },
{EFI_RT_PROPERTIES_TABLE_GUID, &rt_prop, "RTPROP" },
+#ifdef CONFIG_OVMF_DEBUG_LOG
+ {OVMF_MEMORY_LOG_TABLE_GUID, &efi.ovmf_debug_log, "OvmfDebugLog" },
+#endif
#ifdef CONFIG_EFI_RCI2_TABLE
{DELLEMC_EFI_RCI2_TABLE_GUID, &rci2_table_phys },
#endif
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index 939a4955e00b..7d15a85d579f 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -11,27 +11,27 @@ cflags-y := $(KBUILD_CFLAGS)
cflags-$(CONFIG_X86_32) := -march=i386
cflags-$(CONFIG_X86_64) := -mcmodel=small
-cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ -std=gnu11 \
+cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ -std=gnu11 -fms-extensions \
-fPIC -fno-strict-aliasing -mno-red-zone \
-mno-mmx -mno-sse -fshort-wchar \
-Wno-pointer-sign \
$(call cc-disable-warning, address-of-packed-member) \
- $(call cc-disable-warning, gnu) \
+ $(if $(CONFIG_CC_IS_CLANG),-Wno-gnu -Wno-microsoft-anon-tag) \
-fno-asynchronous-unwind-tables \
$(CLANG_FLAGS)
# arm64 uses the full KBUILD_CFLAGS so it's necessary to explicitly
# disable the stackleak plugin
-cflags-$(CONFIG_ARM64) += -fpie $(DISABLE_STACKLEAK_PLUGIN) \
+cflags-$(CONFIG_ARM64) += -fpie $(DISABLE_KSTACK_ERASE) \
-fno-unwind-tables -fno-asynchronous-unwind-tables
cflags-$(CONFIG_ARM) += -DEFI_HAVE_STRLEN -DEFI_HAVE_STRNLEN \
-DEFI_HAVE_MEMCHR -DEFI_HAVE_STRRCHR \
-DEFI_HAVE_STRCMP -fno-builtin -fpic \
$(call cc-option,-mno-single-pic-base) \
- $(DISABLE_STACKLEAK_PLUGIN)
+ $(DISABLE_KSTACK_ERASE)
cflags-$(CONFIG_RISCV) += -fpic -DNO_ALTERNATIVE -mno-relax \
- $(DISABLE_STACKLEAK_PLUGIN)
-cflags-$(CONFIG_LOONGARCH) += -fpie $(DISABLE_STACKLEAK_PLUGIN)
+ $(DISABLE_KSTACK_ERASE)
+cflags-$(CONFIG_LOONGARCH) += -fpie $(DISABLE_KSTACK_ERASE)
cflags-$(CONFIG_EFI_PARAMS_FROM_FDT) += -I$(srctree)/scripts/dtc/libfdt
diff --git a/drivers/firmware/efi/libstub/Makefile.zboot b/drivers/firmware/efi/libstub/Makefile.zboot
index 92e3c73502ba..832deee36e48 100644
--- a/drivers/firmware/efi/libstub/Makefile.zboot
+++ b/drivers/firmware/efi/libstub/Makefile.zboot
@@ -36,7 +36,7 @@ aflags-zboot-header-$(EFI_ZBOOT_FORWARD_CFI) := \
-DPE_DLL_CHAR_EX=IMAGE_DLLCHARACTERISTICS_EX_FORWARD_CFI_COMPAT
AFLAGS_zboot-header.o += -DMACHINE_TYPE=IMAGE_FILE_MACHINE_$(EFI_ZBOOT_MACH_TYPE) \
- -DZBOOT_EFI_PATH="\"$(realpath $(obj)/vmlinuz.efi.elf)\"" \
+ -DZBOOT_EFI_PATH="\"$(abspath $(obj)/vmlinuz.efi.elf)\"" \
-DZBOOT_SIZE_LEN=$(zboot-size-len-y) \
-DCOMP_TYPE="\"$(comp-type-y)\"" \
$(aflags-zboot-header-y)
diff --git a/drivers/firmware/efi/libstub/efi-stub.c b/drivers/firmware/efi/libstub/efi-stub.c
index 874f63b4a383..9cb814c5ba1b 100644
--- a/drivers/firmware/efi/libstub/efi-stub.c
+++ b/drivers/firmware/efi/libstub/efi-stub.c
@@ -56,7 +56,7 @@ static struct screen_info *setup_graphics(void)
{
struct screen_info *si, tmp = {};
- if (efi_setup_gop(&tmp) != EFI_SUCCESS)
+ if (efi_setup_graphics(&tmp, NULL) != EFI_SUCCESS)
return NULL;
si = alloc_screen_info();
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index f5ba032863a9..b2fb0c3fa721 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -34,6 +34,9 @@
#define EFI_ALLOC_LIMIT ULONG_MAX
#endif
+struct edid_info;
+struct screen_info;
+
extern bool efi_no5lvl;
extern bool efi_nochunk;
extern bool efi_nokaslr;
@@ -578,6 +581,32 @@ union efi_graphics_output_protocol {
} mixed_mode;
};
+typedef union efi_edid_discovered_protocol efi_edid_discovered_protocol_t;
+
+union efi_edid_discovered_protocol {
+ struct {
+ u32 size_of_edid;
+ u8 *edid;
+ };
+ struct {
+ u32 size_of_edid;
+ u32 edid;
+ } mixed_mode;
+};
+
+typedef union efi_edid_active_protocol efi_edid_active_protocol_t;
+
+union efi_edid_active_protocol {
+ struct {
+ u32 size_of_edid;
+ u8 *edid;
+ };
+ struct {
+ u32 size_of_edid;
+ u32 edid;
+ } mixed_mode;
+};
+
typedef union {
struct {
u32 revision;
@@ -1085,7 +1114,7 @@ efi_status_t efi_parse_options(char const *cmdline);
void efi_parse_option_graphics(char *option);
-efi_status_t efi_setup_gop(struct screen_info *si);
+efi_status_t efi_setup_graphics(struct screen_info *si, struct edid_info *edid);
efi_status_t handle_cmdline_files(efi_loaded_image_t *image,
const efi_char16_t *optstr,
diff --git a/drivers/firmware/efi/libstub/gop.c b/drivers/firmware/efi/libstub/gop.c
index 3785fb4986b4..72d74436a7a4 100644
--- a/drivers/firmware/efi/libstub/gop.c
+++ b/drivers/firmware/efi/libstub/gop.c
@@ -12,6 +12,7 @@
#include <linux/string.h>
#include <asm/efi.h>
#include <asm/setup.h>
+#include <video/edid.h>
#include "efistub.h"
@@ -367,24 +368,31 @@ static void find_bits(u32 mask, u8 *pos, u8 *size)
*size = __fls(mask) - *pos + 1;
}
-static void
-setup_pixel_info(struct screen_info *si, u32 pixels_per_scan_line,
- efi_pixel_bitmask_t pixel_info, int pixel_format)
+static void setup_screen_info(struct screen_info *si, const efi_graphics_output_protocol_t *gop)
{
- if (pixel_format == PIXEL_BIT_MASK) {
- find_bits(pixel_info.red_mask,
- &si->red_pos, &si->red_size);
- find_bits(pixel_info.green_mask,
- &si->green_pos, &si->green_size);
- find_bits(pixel_info.blue_mask,
- &si->blue_pos, &si->blue_size);
- find_bits(pixel_info.reserved_mask,
- &si->rsvd_pos, &si->rsvd_size);
- si->lfb_depth = si->red_size + si->green_size +
- si->blue_size + si->rsvd_size;
- si->lfb_linelength = (pixels_per_scan_line * si->lfb_depth) / 8;
+ const efi_graphics_output_protocol_mode_t *mode = efi_table_attr(gop, mode);
+ const efi_graphics_output_mode_info_t *info = efi_table_attr(mode, info);
+
+ si->orig_video_isVGA = VIDEO_TYPE_EFI;
+
+ si->lfb_width = info->horizontal_resolution;
+ si->lfb_height = info->vertical_resolution;
+
+ efi_set_u64_split(efi_table_attr(mode, frame_buffer_base),
+ &si->lfb_base, &si->ext_lfb_base);
+ if (si->ext_lfb_base)
+ si->capabilities |= VIDEO_CAPABILITY_64BIT_BASE;
+ si->pages = 1;
+
+ if (info->pixel_format == PIXEL_BIT_MASK) {
+ find_bits(info->pixel_information.red_mask, &si->red_pos, &si->red_size);
+ find_bits(info->pixel_information.green_mask, &si->green_pos, &si->green_size);
+ find_bits(info->pixel_information.blue_mask, &si->blue_pos, &si->blue_size);
+ find_bits(info->pixel_information.reserved_mask, &si->rsvd_pos, &si->rsvd_size);
+ si->lfb_depth = si->red_size + si->green_size + si->blue_size + si->rsvd_size;
+ si->lfb_linelength = (info->pixels_per_scan_line * si->lfb_depth) / 8;
} else {
- if (pixel_format == PIXEL_RGB_RESERVED_8BIT_PER_COLOR) {
+ if (info->pixel_format == PIXEL_RGB_RESERVED_8BIT_PER_COLOR) {
si->red_pos = 0;
si->blue_pos = 16;
} else /* PIXEL_BGR_RESERVED_8BIT_PER_COLOR */ {
@@ -394,20 +402,33 @@ setup_pixel_info(struct screen_info *si, u32 pixels_per_scan_line,
si->green_pos = 8;
si->rsvd_pos = 24;
- si->red_size = si->green_size =
- si->blue_size = si->rsvd_size = 8;
-
+ si->red_size = 8;
+ si->green_size = 8;
+ si->blue_size = 8;
+ si->rsvd_size = 8;
si->lfb_depth = 32;
- si->lfb_linelength = pixels_per_scan_line * 4;
+ si->lfb_linelength = info->pixels_per_scan_line * 4;
}
+
+ si->lfb_size = si->lfb_linelength * si->lfb_height;
+ si->capabilities |= VIDEO_CAPABILITY_SKIP_QUIRKS;
}
-static efi_graphics_output_protocol_t *find_gop(unsigned long num,
- const efi_handle_t handles[])
+static void setup_edid_info(struct edid_info *edid, u32 gop_size_of_edid, u8 *gop_edid)
+{
+ if (!gop_edid || gop_size_of_edid < 128)
+ memset(edid->dummy, 0, sizeof(edid->dummy));
+ else
+ memcpy(edid->dummy, gop_edid, min(gop_size_of_edid, sizeof(edid->dummy)));
+}
+
+static efi_handle_t find_handle_with_primary_gop(unsigned long num, const efi_handle_t handles[],
+ efi_graphics_output_protocol_t **found_gop)
{
efi_graphics_output_protocol_t *first_gop;
- efi_handle_t h;
+ efi_handle_t h, first_gop_handle;
+ first_gop_handle = NULL;
first_gop = NULL;
for_each_efi_handle(h, handles, num) {
@@ -442,21 +463,25 @@ static efi_graphics_output_protocol_t *find_gop(unsigned long num,
*/
status = efi_bs_call(handle_protocol, h,
&EFI_CONSOLE_OUT_DEVICE_GUID, &dummy);
- if (status == EFI_SUCCESS)
- return gop;
-
- if (!first_gop)
+ if (status == EFI_SUCCESS) {
+ if (found_gop)
+ *found_gop = gop;
+ return h;
+ } else if (!first_gop_handle) {
+ first_gop_handle = h;
first_gop = gop;
+ }
}
- return first_gop;
+ if (found_gop)
+ *found_gop = first_gop;
+ return first_gop_handle;
}
-efi_status_t efi_setup_gop(struct screen_info *si)
+efi_status_t efi_setup_graphics(struct screen_info *si, struct edid_info *edid)
{
efi_handle_t *handles __free(efi_pool) = NULL;
- efi_graphics_output_protocol_mode_t *mode;
- efi_graphics_output_mode_info_t *info;
+ efi_handle_t handle;
efi_graphics_output_protocol_t *gop;
efi_status_t status;
unsigned long num;
@@ -467,35 +492,41 @@ efi_status_t efi_setup_gop(struct screen_info *si)
if (status != EFI_SUCCESS)
return status;
- gop = find_gop(num, handles);
- if (!gop)
+ handle = find_handle_with_primary_gop(num, handles, &gop);
+ if (!handle)
return EFI_NOT_FOUND;
/* Change mode if requested */
set_mode(gop);
/* EFI framebuffer */
- mode = efi_table_attr(gop, mode);
- info = efi_table_attr(mode, info);
-
- si->orig_video_isVGA = VIDEO_TYPE_EFI;
-
- si->lfb_width = info->horizontal_resolution;
- si->lfb_height = info->vertical_resolution;
-
- efi_set_u64_split(efi_table_attr(mode, frame_buffer_base),
- &si->lfb_base, &si->ext_lfb_base);
- if (si->ext_lfb_base)
- si->capabilities |= VIDEO_CAPABILITY_64BIT_BASE;
-
- si->pages = 1;
-
- setup_pixel_info(si, info->pixels_per_scan_line,
- info->pixel_information, info->pixel_format);
-
- si->lfb_size = si->lfb_linelength * si->lfb_height;
+ if (si)
+ setup_screen_info(si, gop);
+
+ /* Display EDID for primary GOP */
+ if (edid) {
+ efi_edid_discovered_protocol_t *discovered_edid;
+ efi_edid_active_protocol_t *active_edid;
+ u32 gop_size_of_edid = 0;
+ u8 *gop_edid = NULL;
+
+ status = efi_bs_call(handle_protocol, handle, &EFI_EDID_ACTIVE_PROTOCOL_GUID,
+ (void **)&active_edid);
+ if (status == EFI_SUCCESS) {
+ gop_size_of_edid = active_edid->size_of_edid;
+ gop_edid = active_edid->edid;
+ } else {
+ status = efi_bs_call(handle_protocol, handle,
+ &EFI_EDID_DISCOVERED_PROTOCOL_GUID,
+ (void **)&discovered_edid);
+ if (status == EFI_SUCCESS) {
+ gop_size_of_edid = discovered_edid->size_of_edid;
+ gop_edid = discovered_edid->edid;
+ }
+ }
- si->capabilities |= VIDEO_CAPABILITY_SKIP_QUIRKS;
+ setup_edid_info(edid, gop_size_of_edid, gop_edid);
+ }
return EFI_SUCCESS;
}
diff --git a/drivers/firmware/efi/libstub/printk.c b/drivers/firmware/efi/libstub/printk.c
index 3a67a2cea7bd..bc599212c05d 100644
--- a/drivers/firmware/efi/libstub/printk.c
+++ b/drivers/firmware/efi/libstub/printk.c
@@ -5,13 +5,13 @@
#include <linux/ctype.h>
#include <linux/efi.h>
#include <linux/kernel.h>
-#include <linux/printk.h> /* For CONSOLE_LOGLEVEL_* */
+#include <linux/kern_levels.h>
#include <asm/efi.h>
#include <asm/setup.h>
#include "efistub.h"
-int efi_loglevel = CONSOLE_LOGLEVEL_DEFAULT;
+int efi_loglevel = LOGLEVEL_NOTICE;
/**
* efi_char16_puts() - Write a UCS-2 encoded string to the console
diff --git a/drivers/firmware/efi/libstub/x86-5lvl.c b/drivers/firmware/efi/libstub/x86-5lvl.c
index f1c5fb45d5f7..c00d0ae7ed5d 100644
--- a/drivers/firmware/efi/libstub/x86-5lvl.c
+++ b/drivers/firmware/efi/libstub/x86-5lvl.c
@@ -66,7 +66,7 @@ void efi_5level_switch(void)
bool have_la57 = native_read_cr4() & X86_CR4_LA57;
bool need_toggle = want_la57 ^ have_la57;
u64 *pgt = (void *)la57_toggle + PAGE_SIZE;
- u64 *cr3 = (u64 *)__native_read_cr3();
+ pgd_t *cr3 = (pgd_t *)native_read_cr3_pa();
u64 *new_cr3;
if (!la57_toggle || !need_toggle)
@@ -82,7 +82,7 @@ void efi_5level_switch(void)
new_cr3[0] = (u64)cr3 | _PAGE_TABLE_NOENC;
} else {
/* take the new root table pointer from the current entry #0 */
- new_cr3 = (u64 *)(cr3[0] & PAGE_MASK);
+ new_cr3 = (u64 *)(native_pgd_val(cr3[0]) & PTE_PFN_MASK);
/* copy the new root table if it is not 32-bit addressable */
if ((u64)new_cr3 > U32_MAX)
diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c
index cafc90d4caaf..cef32e2c82d8 100644
--- a/drivers/firmware/efi/libstub/x86-stub.c
+++ b/drivers/firmware/efi/libstub/x86-stub.c
@@ -203,6 +203,104 @@ static void retrieve_apple_device_properties(struct boot_params *boot_params)
}
}
+struct smbios_entry_point {
+ u8 anchor[4];
+ u8 ep_checksum;
+ u8 ep_length;
+ u8 major_version;
+ u8 minor_version;
+ u16 max_size_entry;
+ u8 ep_rev;
+ u8 reserved[5];
+
+ struct __packed {
+ u8 anchor[5];
+ u8 checksum;
+ u16 st_length;
+ u32 st_address;
+ u16 number_of_entries;
+ u8 bcd_rev;
+ } intm;
+};
+
+static bool verify_ep_checksum(const void *ptr, int length)
+{
+ u8 sum = 0;
+
+ for (int i = 0; i < length; i++)
+ sum += ((u8 *)ptr)[i];
+
+ return sum == 0;
+}
+
+static bool verify_ep_integrity(const struct smbios_entry_point *ep)
+{
+ if (memcmp(ep->anchor, "_SM_", sizeof(ep->anchor)) != 0)
+ return false;
+
+ if (memcmp(ep->intm.anchor, "_DMI_", sizeof(ep->intm.anchor)) != 0)
+ return false;
+
+ if (!verify_ep_checksum(ep, ep->ep_length) ||
+ !verify_ep_checksum(&ep->intm, sizeof(ep->intm)))
+ return false;
+
+ return true;
+}
+
+static const struct efi_smbios_record *search_record(void *table, u32 length,
+ u8 type)
+{
+ const u8 *p, *end;
+
+ p = (u8 *)table;
+ end = p + length;
+
+ while (p + sizeof(struct efi_smbios_record) < end) {
+ const struct efi_smbios_record *hdr =
+ (struct efi_smbios_record *)p;
+ const u8 *next;
+
+ if (hdr->type == type)
+ return hdr;
+
+ /* Type 127 = End-of-Table */
+ if (hdr->type == 0x7F)
+ return NULL;
+
+ /* Jumping to the unformed section */
+ next = p + hdr->length;
+
+ /* Unformed section ends with 0000h */
+ while ((next[0] != 0 || next[1] != 0) && next + 1 < end)
+ next++;
+
+ next += 2;
+ p = next;
+ }
+
+ return NULL;
+}
+
+static const struct efi_smbios_record *get_table_record(u8 type)
+{
+ const struct smbios_entry_point *ep;
+
+ /*
+ * Locate the legacy 32-bit SMBIOS entrypoint in memory, and parse it
+ * directly. Needed by some Macs that do not implement the EFI protocol.
+ */
+ ep = get_efi_config_table(SMBIOS_TABLE_GUID);
+ if (!ep)
+ return NULL;
+
+ if (!verify_ep_integrity(ep))
+ return NULL;
+
+ return search_record((void *)(unsigned long)ep->intm.st_address,
+ ep->intm.st_length, type);
+}
+
static bool apple_match_product_name(void)
{
static const char type1_product_matches[][15] = {
@@ -218,7 +316,8 @@ static bool apple_match_product_name(void)
const struct efi_smbios_type1_record *record;
const u8 *product;
- record = (struct efi_smbios_type1_record *)efi_get_smbios_record(1);
+ record = (struct efi_smbios_type1_record *)
+ (efi_get_smbios_record(1) ?: get_table_record(1));
if (!record)
return false;
@@ -300,7 +399,7 @@ efi_status_t efi_adjust_memory_range_protection(unsigned long start,
return EFI_SUCCESS;
/*
- * Don't modify memory region attributes, they are
+ * Don't modify memory region attributes, if they are
* already suitable, to lower the possibility to
* encounter firmware bugs.
*/
@@ -315,11 +414,13 @@ efi_status_t efi_adjust_memory_range_protection(unsigned long start,
next = desc.base_address + desc.length;
/*
- * Only system memory is suitable for trampoline/kernel image placement,
- * so only this type of memory needs its attributes to be modified.
+ * Only system memory and more reliable memory are suitable for
+ * trampoline/kernel image placement. So only those memory types
+ * may need to have attributes modified.
*/
- if (desc.gcd_memory_type != EfiGcdMemoryTypeSystemMemory ||
+ if ((desc.gcd_memory_type != EfiGcdMemoryTypeSystemMemory &&
+ desc.gcd_memory_type != EfiGcdMemoryTypeMoreReliable) ||
(desc.attributes & (EFI_MEMORY_RO | EFI_MEMORY_XP)) == 0)
continue;
@@ -386,8 +487,9 @@ static void setup_quirks(struct boot_params *boot_params)
static void setup_graphics(struct boot_params *boot_params)
{
struct screen_info *si = memset(&boot_params->screen_info, 0, sizeof(*si));
+ struct edid_info *edid = memset(&boot_params->edid_info, 0, sizeof(*edid));
- efi_setup_gop(si);
+ efi_setup_graphics(si, edid);
}
static void __noreturn efi_exit(efi_handle_t handle, efi_status_t status)
@@ -788,7 +890,9 @@ static efi_status_t efi_decompress_kernel(unsigned long *kernel_entry,
*kernel_entry = addr + entry;
- return efi_adjust_memory_range_protection(addr, kernel_text_size);
+ return efi_adjust_memory_range_protection(addr, kernel_text_size) ?:
+ efi_adjust_memory_range_protection(addr + kernel_inittext_offset,
+ kernel_inittext_size);
}
static void __noreturn enter_kernel(unsigned long kernel_addr,
diff --git a/drivers/firmware/efi/libstub/zboot.lds b/drivers/firmware/efi/libstub/zboot.lds
index c3a166675450..367907eb7d86 100644
--- a/drivers/firmware/efi/libstub/zboot.lds
+++ b/drivers/firmware/efi/libstub/zboot.lds
@@ -29,14 +29,12 @@ SECTIONS
. = _etext;
}
-#ifdef CONFIG_EFI_SBAT
.sbat : ALIGN(4096) {
_sbat = .;
*(.sbat)
_esbat = ALIGN(4096);
. = _esbat;
}
-#endif
.data : ALIGN(4096) {
_data = .;
@@ -60,6 +58,6 @@ SECTIONS
PROVIDE(__efistub__gzdata_size =
ABSOLUTE(__efistub__gzdata_end - __efistub__gzdata_start));
-PROVIDE(__data_rawsize = ABSOLUTE(_edata - _etext));
-PROVIDE(__data_size = ABSOLUTE(_end - _etext));
+PROVIDE(__data_rawsize = ABSOLUTE(_edata - _data));
+PROVIDE(__data_size = ABSOLUTE(_end - _data));
PROVIDE(__sbat_size = ABSOLUTE(_esbat - _sbat));
diff --git a/drivers/firmware/efi/memattr.c b/drivers/firmware/efi/memattr.c
index c38b1a335590..e727cc5909cb 100644
--- a/drivers/firmware/efi/memattr.c
+++ b/drivers/firmware/efi/memattr.c
@@ -19,19 +19,19 @@ unsigned long __ro_after_init efi_mem_attr_table = EFI_INVALID_TABLE_ADDR;
* Reserve the memory associated with the Memory Attributes configuration
* table, if it exists.
*/
-int __init efi_memattr_init(void)
+void __init efi_memattr_init(void)
{
efi_memory_attributes_table_t *tbl;
unsigned long size;
if (efi_mem_attr_table == EFI_INVALID_TABLE_ADDR)
- return 0;
+ return;
tbl = early_memremap(efi_mem_attr_table, sizeof(*tbl));
if (!tbl) {
pr_err("Failed to map EFI Memory Attributes table @ 0x%lx\n",
efi_mem_attr_table);
- return -ENOMEM;
+ return;
}
if (tbl->version > 2) {
@@ -61,7 +61,6 @@ int __init efi_memattr_init(void)
unmap:
early_memunmap(tbl, sizeof(*tbl));
- return 0;
}
/*
diff --git a/drivers/firmware/efi/mokvar-table.c b/drivers/firmware/efi/mokvar-table.c
index 0a856c3f69a3..aedbbd627706 100644
--- a/drivers/firmware/efi/mokvar-table.c
+++ b/drivers/firmware/efi/mokvar-table.c
@@ -340,7 +340,7 @@ static int __init efi_mokvar_sysfs_init(void)
mokvar_sysfs->bin_attr.attr.name = mokvar_entry->name;
mokvar_sysfs->bin_attr.attr.mode = 0400;
mokvar_sysfs->bin_attr.size = mokvar_entry->data_size;
- mokvar_sysfs->bin_attr.read_new = efi_mokvar_sysfs_read;
+ mokvar_sysfs->bin_attr.read = efi_mokvar_sysfs_read;
err = sysfs_create_bin_file(mokvar_kobj,
&mokvar_sysfs->bin_attr);
diff --git a/drivers/firmware/efi/ovmf-debug-log.c b/drivers/firmware/efi/ovmf-debug-log.c
new file mode 100644
index 000000000000..5b2471ffaeed
--- /dev/null
+++ b/drivers/firmware/efi/ovmf-debug-log.c
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/efi.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/kobject.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sysfs.h>
+
+#define OVMF_DEBUG_LOG_MAGIC1 0x3167646d666d766f // "ovmfmdg1"
+#define OVMF_DEBUG_LOG_MAGIC2 0x3267646d666d766f // "ovmfmdg2"
+
+struct ovmf_debug_log_header {
+ u64 magic1;
+ u64 magic2;
+ u64 hdr_size;
+ u64 log_size;
+ u64 lock; // edk2 spinlock
+ u64 head_off;
+ u64 tail_off;
+ u64 truncated;
+ u8 fw_version[128];
+};
+
+static struct ovmf_debug_log_header *hdr;
+static u8 *logbuf;
+static u64 logbufsize;
+
+static ssize_t ovmf_log_read(struct file *filp, struct kobject *kobj,
+ const struct bin_attribute *attr, char *buf,
+ loff_t offset, size_t count)
+{
+ u64 start, end;
+
+ start = hdr->head_off + offset;
+ if (hdr->head_off > hdr->tail_off && start >= hdr->log_size)
+ start -= hdr->log_size;
+
+ end = start + count;
+ if (start > hdr->tail_off) {
+ if (end > hdr->log_size)
+ end = hdr->log_size;
+ } else {
+ if (end > hdr->tail_off)
+ end = hdr->tail_off;
+ }
+
+ if (start > logbufsize || end > logbufsize)
+ return 0;
+ if (start >= end)
+ return 0;
+
+ memcpy(buf, logbuf + start, end - start);
+ return end - start;
+}
+
+static struct bin_attribute ovmf_log_bin_attr = {
+ .attr = {
+ .name = "ovmf_debug_log",
+ .mode = 0444,
+ },
+ .read = ovmf_log_read,
+};
+
+int __init ovmf_log_probe(unsigned long ovmf_debug_log_table)
+{
+ int ret = -EINVAL;
+ u64 size;
+
+ /* map + verify header */
+ hdr = memremap(ovmf_debug_log_table, sizeof(*hdr), MEMREMAP_WB);
+ if (!hdr) {
+ pr_err("OVMF debug log: header map failed\n");
+ return -EINVAL;
+ }
+
+ if (hdr->magic1 != OVMF_DEBUG_LOG_MAGIC1 ||
+ hdr->magic2 != OVMF_DEBUG_LOG_MAGIC2) {
+ printk(KERN_ERR "OVMF debug log: magic mismatch\n");
+ goto err_unmap;
+ }
+
+ size = hdr->hdr_size + hdr->log_size;
+ pr_info("OVMF debug log: firmware version: \"%s\"\n", hdr->fw_version);
+ pr_info("OVMF debug log: buffer size: %lluk\n", size / 1024);
+
+ /* map complete log buffer */
+ memunmap(hdr);
+ hdr = memremap(ovmf_debug_log_table, size, MEMREMAP_WB);
+ if (!hdr) {
+ pr_err("OVMF debug log: buffer map failed\n");
+ return -EINVAL;
+ }
+ logbuf = (void *)hdr + hdr->hdr_size;
+ logbufsize = hdr->log_size;
+
+ ovmf_log_bin_attr.size = size;
+ ret = sysfs_create_bin_file(efi_kobj, &ovmf_log_bin_attr);
+ if (ret != 0) {
+ pr_err("OVMF debug log: sysfs register failed\n");
+ goto err_unmap;
+ }
+
+ return 0;
+
+err_unmap:
+ memunmap(hdr);
+ return ret;
+}
diff --git a/drivers/firmware/efi/riscv-runtime.c b/drivers/firmware/efi/riscv-runtime.c
index fa71cd898120..66f584a228d0 100644
--- a/drivers/firmware/efi/riscv-runtime.c
+++ b/drivers/firmware/efi/riscv-runtime.c
@@ -14,18 +14,18 @@
#include <linux/io.h>
#include <linux/memblock.h>
#include <linux/mm_types.h>
+#include <linux/pgalloc.h>
+#include <linux/pgtable.h>
#include <linux/preempt.h>
#include <linux/rbtree.h>
#include <linux/rwsem.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
-#include <linux/pgtable.h>
#include <asm/cacheflush.h>
#include <asm/efi.h>
#include <asm/mmu.h>
-#include <asm/pgalloc.h>
static bool __init efi_virtmap_init(void)
{
@@ -36,20 +36,12 @@ static bool __init efi_virtmap_init(void)
init_new_context(NULL, &efi_mm);
for_each_efi_memory_desc(md) {
- phys_addr_t phys = md->phys_addr;
- int ret;
-
if (!(md->attribute & EFI_MEMORY_RUNTIME))
continue;
if (md->virt_addr == U64_MAX)
return false;
- ret = efi_create_mapping(&efi_mm, md);
- if (ret) {
- pr_warn(" EFI remap %pa: failed to create mapping (%d)\n",
- &phys, ret);
- return false;
- }
+ efi_create_mapping(&efi_mm, md);
}
if (efi_memattr_apply_permissions(&efi_mm, efi_set_mapping_permissions))
diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c
index 708b777857d3..da8d29621644 100644
--- a/drivers/firmware/efi/runtime-wrappers.c
+++ b/drivers/firmware/efi/runtime-wrappers.c
@@ -202,6 +202,8 @@ void efi_call_virt_check_flags(unsigned long flags, const void *caller)
*/
static DEFINE_SEMAPHORE(efi_runtime_lock, 1);
+static struct task_struct *efi_runtime_lock_owner;
+
/*
* Expose the EFI runtime lock to the UV platform
*/
@@ -219,6 +221,8 @@ static void __nocfi efi_call_rts(struct work_struct *work)
efi_status_t status = EFI_NOT_FOUND;
unsigned long flags;
+ efi_runtime_lock_owner = current;
+
arch_efi_call_virt_setup();
flags = efi_call_virt_save_flags();
@@ -310,6 +314,7 @@ static void __nocfi efi_call_rts(struct work_struct *work)
efi_rts_work.status = status;
complete(&efi_rts_work.efi_rts_comp);
+ efi_runtime_lock_owner = NULL;
}
static efi_status_t __efi_queue_work(enum efi_rts_ids id,
@@ -444,8 +449,10 @@ virt_efi_set_variable_nb(efi_char16_t *name, efi_guid_t *vendor, u32 attr,
if (down_trylock(&efi_runtime_lock))
return EFI_NOT_READY;
+ efi_runtime_lock_owner = current;
status = efi_call_virt_pointer(efi.runtime, set_variable, name, vendor,
attr, data_size, data);
+ efi_runtime_lock_owner = NULL;
up(&efi_runtime_lock);
return status;
}
@@ -481,9 +488,11 @@ virt_efi_query_variable_info_nb(u32 attr, u64 *storage_space,
if (down_trylock(&efi_runtime_lock))
return EFI_NOT_READY;
+ efi_runtime_lock_owner = current;
status = efi_call_virt_pointer(efi.runtime, query_variable_info, attr,
storage_space, remaining_space,
max_variable_size);
+ efi_runtime_lock_owner = NULL;
up(&efi_runtime_lock);
return status;
}
@@ -509,12 +518,13 @@ virt_efi_reset_system(int reset_type, efi_status_t status,
return;
}
+ efi_runtime_lock_owner = current;
arch_efi_call_virt_setup();
efi_rts_work.efi_rts_id = EFI_RESET_SYSTEM;
arch_efi_call_virt(efi.runtime, reset_system, reset_type, status,
data_size, data);
arch_efi_call_virt_teardown();
-
+ efi_runtime_lock_owner = NULL;
up(&efi_runtime_lock);
}
@@ -587,3 +597,8 @@ efi_call_acpi_prm_handler(efi_status_t (__efiapi *handler_addr)(u64, void *),
}
#endif
+
+void efi_runtime_assert_lock_held(void)
+{
+ WARN_ON(efi_runtime_lock_owner != current);
+}
diff --git a/drivers/firmware/efi/stmm/mm_communication.h b/drivers/firmware/efi/stmm/mm_communication.h
index 52a1f32cd1eb..06e7663f96dc 100644
--- a/drivers/firmware/efi/stmm/mm_communication.h
+++ b/drivers/firmware/efi/stmm/mm_communication.h
@@ -32,7 +32,7 @@
/**
* struct efi_mm_communicate_header - Header used for SMM variable communication
-
+ *
* @header_guid: header use for disambiguation of content
* @message_len: length of the message. Does not include the size of the
* header
@@ -111,7 +111,7 @@ struct efi_mm_communicate_header {
/**
* struct smm_variable_communicate_header - Used for SMM variable communication
-
+ *
* @function: function to call in Smm.
* @ret_status: return status
* @data: payload
@@ -128,7 +128,7 @@ struct smm_variable_communicate_header {
/**
* struct smm_variable_access - Used to communicate with StMM by
* SetVariable and GetVariable.
-
+ *
* @guid: vendor GUID
* @data_size: size of EFI variable data
* @name_size: size of EFI name
diff --git a/drivers/firmware/efi/stmm/tee_stmm_efi.c b/drivers/firmware/efi/stmm/tee_stmm_efi.c
index f741ca279052..65c0fe1ba275 100644
--- a/drivers/firmware/efi/stmm/tee_stmm_efi.c
+++ b/drivers/firmware/efi/stmm/tee_stmm_efi.c
@@ -143,6 +143,10 @@ static efi_status_t mm_communicate(u8 *comm_buf, size_t payload_size)
return var_hdr->ret_status;
}
+#define COMM_BUF_SIZE(__payload_size) (MM_COMMUNICATE_HEADER_SIZE + \
+ MM_VARIABLE_COMMUNICATE_SIZE + \
+ (__payload_size))
+
/**
* setup_mm_hdr() - Allocate a buffer for StandAloneMM and initialize the
* header data.
@@ -150,11 +154,9 @@ static efi_status_t mm_communicate(u8 *comm_buf, size_t payload_size)
* @dptr: pointer address to store allocated buffer
* @payload_size: payload size
* @func: standAloneMM function number
- * @ret: EFI return code
* Return: pointer to corresponding StandAloneMM function buffer or NULL
*/
-static void *setup_mm_hdr(u8 **dptr, size_t payload_size, size_t func,
- efi_status_t *ret)
+static void *setup_mm_hdr(u8 **dptr, size_t payload_size, size_t func)
{
const efi_guid_t mm_var_guid = EFI_MM_VARIABLE_GUID;
struct efi_mm_communicate_header *mm_hdr;
@@ -169,17 +171,13 @@ static void *setup_mm_hdr(u8 **dptr, size_t payload_size, size_t func,
if (max_buffer_size &&
max_buffer_size < (MM_COMMUNICATE_HEADER_SIZE +
MM_VARIABLE_COMMUNICATE_SIZE + payload_size)) {
- *ret = EFI_INVALID_PARAMETER;
return NULL;
}
- comm_buf = kzalloc(MM_COMMUNICATE_HEADER_SIZE +
- MM_VARIABLE_COMMUNICATE_SIZE + payload_size,
- GFP_KERNEL);
- if (!comm_buf) {
- *ret = EFI_OUT_OF_RESOURCES;
+ comm_buf = alloc_pages_exact(COMM_BUF_SIZE(payload_size),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!comm_buf)
return NULL;
- }
mm_hdr = (struct efi_mm_communicate_header *)comm_buf;
memcpy(&mm_hdr->header_guid, &mm_var_guid, sizeof(mm_hdr->header_guid));
@@ -187,9 +185,7 @@ static void *setup_mm_hdr(u8 **dptr, size_t payload_size, size_t func,
var_hdr = (struct smm_variable_communicate_header *)mm_hdr->data;
var_hdr->function = func;
- if (dptr)
- *dptr = comm_buf;
- *ret = EFI_SUCCESS;
+ *dptr = comm_buf;
return var_hdr->data;
}
@@ -212,10 +208,9 @@ static efi_status_t get_max_payload(size_t *size)
payload_size = sizeof(*var_payload);
var_payload = setup_mm_hdr(&comm_buf, payload_size,
- SMM_VARIABLE_FUNCTION_GET_PAYLOAD_SIZE,
- &ret);
+ SMM_VARIABLE_FUNCTION_GET_PAYLOAD_SIZE);
if (!var_payload)
- return EFI_OUT_OF_RESOURCES;
+ return EFI_DEVICE_ERROR;
ret = mm_communicate(comm_buf, payload_size);
if (ret != EFI_SUCCESS)
@@ -239,7 +234,7 @@ static efi_status_t get_max_payload(size_t *size)
*/
*size -= 2;
out:
- kfree(comm_buf);
+ free_pages_exact(comm_buf, COMM_BUF_SIZE(payload_size));
return ret;
}
@@ -259,9 +254,9 @@ static efi_status_t get_property_int(u16 *name, size_t name_size,
smm_property = setup_mm_hdr(
&comm_buf, payload_size,
- SMM_VARIABLE_FUNCTION_VAR_CHECK_VARIABLE_PROPERTY_GET, &ret);
+ SMM_VARIABLE_FUNCTION_VAR_CHECK_VARIABLE_PROPERTY_GET);
if (!smm_property)
- return EFI_OUT_OF_RESOURCES;
+ return EFI_DEVICE_ERROR;
memcpy(&smm_property->guid, vendor, sizeof(smm_property->guid));
smm_property->name_size = name_size;
@@ -282,7 +277,7 @@ static efi_status_t get_property_int(u16 *name, size_t name_size,
memcpy(var_property, &smm_property->property, sizeof(*var_property));
out:
- kfree(comm_buf);
+ free_pages_exact(comm_buf, COMM_BUF_SIZE(payload_size));
return ret;
}
@@ -315,9 +310,9 @@ static efi_status_t tee_get_variable(u16 *name, efi_guid_t *vendor,
payload_size = MM_VARIABLE_ACCESS_HEADER_SIZE + name_size + tmp_dsize;
var_acc = setup_mm_hdr(&comm_buf, payload_size,
- SMM_VARIABLE_FUNCTION_GET_VARIABLE, &ret);
+ SMM_VARIABLE_FUNCTION_GET_VARIABLE);
if (!var_acc)
- return EFI_OUT_OF_RESOURCES;
+ return EFI_DEVICE_ERROR;
/* Fill in contents */
memcpy(&var_acc->guid, vendor, sizeof(var_acc->guid));
@@ -347,7 +342,7 @@ static efi_status_t tee_get_variable(u16 *name, efi_guid_t *vendor,
memcpy(data, (u8 *)var_acc->name + var_acc->name_size,
var_acc->data_size);
out:
- kfree(comm_buf);
+ free_pages_exact(comm_buf, COMM_BUF_SIZE(payload_size));
return ret;
}
@@ -380,10 +375,9 @@ static efi_status_t tee_get_next_variable(unsigned long *name_size,
payload_size = MM_VARIABLE_GET_NEXT_HEADER_SIZE + out_name_size;
var_getnext = setup_mm_hdr(&comm_buf, payload_size,
- SMM_VARIABLE_FUNCTION_GET_NEXT_VARIABLE_NAME,
- &ret);
+ SMM_VARIABLE_FUNCTION_GET_NEXT_VARIABLE_NAME);
if (!var_getnext)
- return EFI_OUT_OF_RESOURCES;
+ return EFI_DEVICE_ERROR;
/* Fill in contents */
memcpy(&var_getnext->guid, guid, sizeof(var_getnext->guid));
@@ -404,7 +398,7 @@ static efi_status_t tee_get_next_variable(unsigned long *name_size,
memcpy(name, var_getnext->name, var_getnext->name_size);
out:
- kfree(comm_buf);
+ free_pages_exact(comm_buf, COMM_BUF_SIZE(payload_size));
return ret;
}
@@ -437,9 +431,9 @@ static efi_status_t tee_set_variable(efi_char16_t *name, efi_guid_t *vendor,
* the properties, if the allocation fails
*/
var_acc = setup_mm_hdr(&comm_buf, payload_size,
- SMM_VARIABLE_FUNCTION_SET_VARIABLE, &ret);
+ SMM_VARIABLE_FUNCTION_SET_VARIABLE);
if (!var_acc)
- return EFI_OUT_OF_RESOURCES;
+ return EFI_DEVICE_ERROR;
/*
* The API has the ability to override RO flags. If no RO check was
@@ -467,7 +461,7 @@ static efi_status_t tee_set_variable(efi_char16_t *name, efi_guid_t *vendor,
ret = mm_communicate(comm_buf, payload_size);
dev_dbg(pvt_data.dev, "Set Variable %s %d %lx\n", __FILE__, __LINE__, ret);
out:
- kfree(comm_buf);
+ free_pages_exact(comm_buf, COMM_BUF_SIZE(payload_size));
return ret;
}
@@ -492,10 +486,9 @@ static efi_status_t tee_query_variable_info(u32 attributes,
payload_size = sizeof(*mm_query_info);
mm_query_info = setup_mm_hdr(&comm_buf, payload_size,
- SMM_VARIABLE_FUNCTION_QUERY_VARIABLE_INFO,
- &ret);
+ SMM_VARIABLE_FUNCTION_QUERY_VARIABLE_INFO);
if (!mm_query_info)
- return EFI_OUT_OF_RESOURCES;
+ return EFI_DEVICE_ERROR;
mm_query_info->attr = attributes;
ret = mm_communicate(comm_buf, payload_size);
@@ -507,7 +500,7 @@ static efi_status_t tee_query_variable_info(u32 attributes,
*max_variable_size = mm_query_info->max_variable_size;
out:
- kfree(comm_buf);
+ free_pages_exact(comm_buf, COMM_BUF_SIZE(payload_size));
return ret;
}
diff --git a/drivers/firmware/google/cbmem.c b/drivers/firmware/google/cbmem.c
index 773d05078e0a..54c3b8b05e5d 100644
--- a/drivers/firmware/google/cbmem.c
+++ b/drivers/firmware/google/cbmem.c
@@ -86,7 +86,7 @@ static const struct bin_attribute *const bin_attrs[] = {
static const struct attribute_group cbmem_entry_group = {
.attrs = attrs,
- .bin_attrs_new = bin_attrs,
+ .bin_attrs = bin_attrs,
};
static const struct attribute_group *dev_groups[] = {
diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c
index e8fb00dcaf65..0ceccde5a302 100644
--- a/drivers/firmware/google/gsmi.c
+++ b/drivers/firmware/google/gsmi.c
@@ -530,7 +530,7 @@ static ssize_t eventlog_write(struct file *filp, struct kobject *kobj,
static const struct bin_attribute eventlog_bin_attr = {
.attr = {.name = "append_to_eventlog", .mode = 0200},
- .write_new = eventlog_write,
+ .write = eventlog_write,
};
static ssize_t gsmi_clear_eventlog_store(struct kobject *kobj,
diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c
index d957af6f9349..6138a1653ec5 100644
--- a/drivers/firmware/google/memconsole.c
+++ b/drivers/firmware/google/memconsole.c
@@ -28,7 +28,7 @@ static ssize_t memconsole_read(struct file *filp, struct kobject *kobp,
static struct bin_attribute memconsole_bin_attr = {
.attr = {.name = "log", .mode = 0444},
- .read_new = memconsole_read,
+ .read = memconsole_read,
};
void memconsole_setup(ssize_t (*read_func)(char *, loff_t, size_t))
diff --git a/drivers/firmware/google/vpd.c b/drivers/firmware/google/vpd.c
index 254ac6545d68..339a3f74b247 100644
--- a/drivers/firmware/google/vpd.c
+++ b/drivers/firmware/google/vpd.c
@@ -121,7 +121,7 @@ static int vpd_section_attrib_add(const u8 *key, u32 key_len,
info->bin_attr.attr.name = info->key;
info->bin_attr.attr.mode = 0444;
info->bin_attr.size = value_len;
- info->bin_attr.read_new = vpd_attrib_read;
+ info->bin_attr.read = vpd_attrib_read;
info->bin_attr.private = info;
info->value = value;
@@ -201,7 +201,7 @@ static int vpd_section_init(const char *name, struct vpd_section *sec,
sec->bin_attr.attr.name = sec->raw_name;
sec->bin_attr.attr.mode = 0444;
sec->bin_attr.size = size;
- sec->bin_attr.read_new = vpd_section_read;
+ sec->bin_attr.read = vpd_section_read;
sec->bin_attr.private = sec;
err = sysfs_create_bin_file(vpd_kobj, &sec->bin_attr);
diff --git a/drivers/firmware/imx/imx-scu-irq.c b/drivers/firmware/imx/imx-scu-irq.c
index 6125cccc9ba7..a68d38f89254 100644
--- a/drivers/firmware/imx/imx-scu-irq.c
+++ b/drivers/firmware/imx/imx-scu-irq.c
@@ -203,6 +203,18 @@ int imx_scu_enable_general_irq_channel(struct device *dev)
struct mbox_chan *ch;
int ret = 0, i = 0;
+ if (!of_parse_phandle_with_args(dev->of_node, "mboxes",
+ "#mbox-cells", 0, &spec)) {
+ i = of_alias_get_id(spec.np, "mu");
+ of_node_put(spec.np);
+ }
+
+ /* use mu1 as general mu irq channel if failed */
+ if (i < 0)
+ i = 1;
+
+ mu_resource_id = IMX_SC_R_MU_0A + i;
+
ret = imx_scu_get_handle(&imx_sc_irq_ipc_handle);
if (ret)
return ret;
@@ -214,27 +226,16 @@ int imx_scu_enable_general_irq_channel(struct device *dev)
cl->dev = dev;
cl->rx_callback = imx_scu_irq_callback;
+ INIT_WORK(&imx_sc_irq_work, imx_scu_irq_work_handler);
+
/* SCU general IRQ uses general interrupt channel 3 */
ch = mbox_request_channel_byname(cl, "gip3");
if (IS_ERR(ch)) {
ret = PTR_ERR(ch);
dev_err(dev, "failed to request mbox chan gip3, ret %d\n", ret);
- devm_kfree(dev, cl);
- return ret;
+ goto free_cl;
}
- INIT_WORK(&imx_sc_irq_work, imx_scu_irq_work_handler);
-
- if (!of_parse_phandle_with_args(dev->of_node, "mboxes",
- "#mbox-cells", 0, &spec))
- i = of_alias_get_id(spec.np, "mu");
-
- /* use mu1 as general mu irq channel if failed */
- if (i < 0)
- i = 1;
-
- mu_resource_id = IMX_SC_R_MU_0A + i;
-
/* Create directory under /sysfs/firmware */
wakeup_obj = kobject_create_and_add("scu_wakeup_source", firmware_kobj);
if (!wakeup_obj) {
@@ -253,7 +254,8 @@ int imx_scu_enable_general_irq_channel(struct device *dev)
free_ch:
mbox_free_channel(ch);
+free_cl:
+ devm_kfree(dev, cl);
return ret;
}
-EXPORT_SYMBOL(imx_scu_enable_general_irq_channel);
diff --git a/drivers/firmware/imx/imx-scu.c b/drivers/firmware/imx/imx-scu.c
index 8c28e25ddc8a..67b267a7408a 100644
--- a/drivers/firmware/imx/imx-scu.c
+++ b/drivers/firmware/imx/imx-scu.c
@@ -73,9 +73,9 @@ static int imx_sc_linux_errmap[IMX_SC_ERR_LAST] = {
-EACCES, /* IMX_SC_ERR_NOACCESS */
-EACCES, /* IMX_SC_ERR_LOCKED */
-ERANGE, /* IMX_SC_ERR_UNAVAILABLE */
- -EEXIST, /* IMX_SC_ERR_NOTFOUND */
- -EPERM, /* IMX_SC_ERR_NOPOWER */
- -EPIPE, /* IMX_SC_ERR_IPC */
+ -ENOENT, /* IMX_SC_ERR_NOTFOUND */
+ -ENODEV, /* IMX_SC_ERR_NOPOWER */
+ -ECOMM, /* IMX_SC_ERR_IPC */
-EBUSY, /* IMX_SC_ERR_BUSY */
-EIO, /* IMX_SC_ERR_FAIL */
};
@@ -324,7 +324,9 @@ static int imx_scu_probe(struct platform_device *pdev)
}
sc_ipc->dev = dev;
- mutex_init(&sc_ipc->lock);
+ ret = devm_mutex_init(dev, &sc_ipc->lock);
+ if (ret)
+ return ret;
init_completion(&sc_ipc->done);
imx_sc_ipc_handle = sc_ipc;
@@ -352,6 +354,7 @@ static struct platform_driver imx_scu_driver = {
.driver = {
.name = "imx-scu",
.of_match_table = imx_scu_match,
+ .suppress_bind_attrs = true,
},
.probe = imx_scu_probe,
};
diff --git a/drivers/firmware/meson/Kconfig b/drivers/firmware/meson/Kconfig
index f2fdd3756648..179f5d46d8dd 100644
--- a/drivers/firmware/meson/Kconfig
+++ b/drivers/firmware/meson/Kconfig
@@ -5,7 +5,7 @@
config MESON_SM
tristate "Amlogic Secure Monitor driver"
depends on ARCH_MESON || COMPILE_TEST
- default y
+ default ARCH_MESON
depends on ARM64_4K_PAGES
help
Say y here to enable the Amlogic secure monitor driver
diff --git a/drivers/firmware/meson/meson_sm.c b/drivers/firmware/meson/meson_sm.c
index f25a9746249b..3ab67aaa9e5d 100644
--- a/drivers/firmware/meson/meson_sm.c
+++ b/drivers/firmware/meson/meson_sm.c
@@ -232,11 +232,16 @@ EXPORT_SYMBOL(meson_sm_call_write);
struct meson_sm_firmware *meson_sm_get(struct device_node *sm_node)
{
struct platform_device *pdev = of_find_device_by_node(sm_node);
+ struct meson_sm_firmware *fw;
if (!pdev)
return NULL;
- return platform_get_drvdata(pdev);
+ fw = platform_get_drvdata(pdev);
+
+ put_device(&pdev->dev);
+
+ return fw;
}
EXPORT_SYMBOL_GPL(meson_sm_get);
diff --git a/drivers/firmware/qcom/qcom_scm.c b/drivers/firmware/qcom/qcom_scm.c
index f63b716be5b0..1a6f85e463e0 100644
--- a/drivers/firmware/qcom/qcom_scm.c
+++ b/drivers/firmware/qcom/qcom_scm.c
@@ -1119,7 +1119,7 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
if (ret) {
dev_err(__scm->dev,
"Assign memory protection call failed %d\n", ret);
- return -EINVAL;
+ return ret;
}
*srcvm = next_vm;
@@ -1603,7 +1603,13 @@ bool qcom_scm_lmh_dcvsh_available(void)
}
EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh_available);
-int qcom_scm_shm_bridge_enable(void)
+/*
+ * This is only supposed to be called once by the TZMem module. It takes the
+ * SCM struct device as argument and uses it to pass the call as at the time
+ * the SHM Bridge is enabled, the SCM is not yet fully set up and doesn't
+ * accept global user calls. Don't try to use the __scm pointer here.
+ */
+int qcom_scm_shm_bridge_enable(struct device *scm_dev)
{
int ret;
@@ -1615,11 +1621,11 @@ int qcom_scm_shm_bridge_enable(void)
struct qcom_scm_res res;
- if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP,
+ if (!__qcom_scm_is_call_available(scm_dev, QCOM_SCM_SVC_MP,
QCOM_SCM_MP_SHM_BRIDGE_ENABLE))
return -EOPNOTSUPP;
- ret = qcom_scm_call(__scm->dev, &desc, &res);
+ ret = qcom_scm_call(scm_dev, &desc, &res);
if (ret)
return ret;
@@ -1631,7 +1637,7 @@ int qcom_scm_shm_bridge_enable(void)
}
EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_enable);
-int qcom_scm_shm_bridge_create(struct device *dev, u64 pfn_and_ns_perm_flags,
+int qcom_scm_shm_bridge_create(u64 pfn_and_ns_perm_flags,
u64 ipfn_and_s_perm_flags, u64 size_and_flags,
u64 ns_vmids, u64 *handle)
{
@@ -1659,7 +1665,7 @@ int qcom_scm_shm_bridge_create(struct device *dev, u64 pfn_and_ns_perm_flags,
}
EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_create);
-int qcom_scm_shm_bridge_delete(struct device *dev, u64 handle)
+int qcom_scm_shm_bridge_delete(u64 handle)
{
struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_MP,
@@ -1988,11 +1994,14 @@ static const struct of_device_id qcom_scm_qseecom_allowlist[] __maybe_unused = {
{ .compatible = "asus,vivobook-s15" },
{ .compatible = "asus,zenbook-a14-ux3407qa" },
{ .compatible = "asus,zenbook-a14-ux3407ra" },
+ { .compatible = "dell,inspiron-14-plus-7441" },
+ { .compatible = "dell,latitude-7455" },
{ .compatible = "dell,xps13-9345" },
{ .compatible = "hp,elitebook-ultra-g1q" },
{ .compatible = "hp,omnibook-x14" },
{ .compatible = "huawei,gaokun3" },
{ .compatible = "lenovo,flex-5g" },
+ { .compatible = "lenovo,thinkbook-16" },
{ .compatible = "lenovo,thinkpad-t14s" },
{ .compatible = "lenovo,thinkpad-x13s", },
{ .compatible = "lenovo,yoga-slim7x" },
@@ -2000,6 +2009,7 @@ static const struct of_device_id qcom_scm_qseecom_allowlist[] __maybe_unused = {
{ .compatible = "microsoft,blackrock" },
{ .compatible = "microsoft,romulus13", },
{ .compatible = "microsoft,romulus15", },
+ { .compatible = "qcom,hamoa-iot-evk" },
{ .compatible = "qcom,sc8180x-primus" },
{ .compatible = "qcom,x1e001de-devkit" },
{ .compatible = "qcom,x1e80100-crd" },
@@ -2008,21 +2018,6 @@ static const struct of_device_id qcom_scm_qseecom_allowlist[] __maybe_unused = {
{ }
};
-static bool qcom_scm_qseecom_machine_is_allowed(void)
-{
- struct device_node *np;
- bool match;
-
- np = of_find_node_by_path("/");
- if (!np)
- return false;
-
- match = of_match_node(qcom_scm_qseecom_allowlist, np);
- of_node_put(np);
-
- return match;
-}
-
static void qcom_scm_qseecom_free(void *data)
{
struct platform_device *qseecom_dev = data;
@@ -2054,7 +2049,7 @@ static int qcom_scm_qseecom_init(struct qcom_scm *scm)
dev_info(scm->dev, "qseecom: found qseecom with version 0x%x\n", version);
- if (!qcom_scm_qseecom_machine_is_allowed()) {
+ if (!of_machine_device_match(qcom_scm_qseecom_allowlist)) {
dev_info(scm->dev, "qseecom: untested machine, skipping\n");
return 0;
}
@@ -2088,6 +2083,122 @@ static int qcom_scm_qseecom_init(struct qcom_scm *scm)
#endif /* CONFIG_QCOM_QSEECOM */
/**
+ * qcom_scm_qtee_invoke_smc() - Invoke a QTEE object.
+ * @inbuf: start address of memory area used for inbound buffer.
+ * @inbuf_size: size of the memory area used for inbound buffer.
+ * @outbuf: start address of memory area used for outbound buffer.
+ * @outbuf_size: size of the memory area used for outbound buffer.
+ * @result: result of QTEE object invocation.
+ * @response_type: response type returned by QTEE.
+ *
+ * @response_type determines how the contents of @inbuf and @outbuf
+ * should be processed.
+ *
+ * Return: On success, return 0 or <0 on failure.
+ */
+int qcom_scm_qtee_invoke_smc(phys_addr_t inbuf, size_t inbuf_size,
+ phys_addr_t outbuf, size_t outbuf_size,
+ u64 *result, u64 *response_type)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_SMCINVOKE,
+ .cmd = QCOM_SCM_SMCINVOKE_INVOKE,
+ .owner = ARM_SMCCC_OWNER_TRUSTED_OS,
+ .args[0] = inbuf,
+ .args[1] = inbuf_size,
+ .args[2] = outbuf,
+ .args[3] = outbuf_size,
+ .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_RW, QCOM_SCM_VAL,
+ QCOM_SCM_RW, QCOM_SCM_VAL),
+ };
+ struct qcom_scm_res res;
+ int ret;
+
+ ret = qcom_scm_call(__scm->dev, &desc, &res);
+ if (ret)
+ return ret;
+
+ if (response_type)
+ *response_type = res.result[0];
+
+ if (result)
+ *result = res.result[1];
+
+ return 0;
+}
+EXPORT_SYMBOL(qcom_scm_qtee_invoke_smc);
+
+/**
+ * qcom_scm_qtee_callback_response() - Submit response for callback request.
+ * @buf: start address of memory area used for outbound buffer.
+ * @buf_size: size of the memory area used for outbound buffer.
+ * @result: Result of QTEE object invocation.
+ * @response_type: Response type returned by QTEE.
+ *
+ * @response_type determines how the contents of @buf should be processed.
+ *
+ * Return: On success, return 0 or <0 on failure.
+ */
+int qcom_scm_qtee_callback_response(phys_addr_t buf, size_t buf_size,
+ u64 *result, u64 *response_type)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_SMCINVOKE,
+ .cmd = QCOM_SCM_SMCINVOKE_CB_RSP,
+ .owner = ARM_SMCCC_OWNER_TRUSTED_OS,
+ .args[0] = buf,
+ .args[1] = buf_size,
+ .arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_RW, QCOM_SCM_VAL),
+ };
+ struct qcom_scm_res res;
+ int ret;
+
+ ret = qcom_scm_call(__scm->dev, &desc, &res);
+ if (ret)
+ return ret;
+
+ if (response_type)
+ *response_type = res.result[0];
+
+ if (result)
+ *result = res.result[1];
+
+ return 0;
+}
+EXPORT_SYMBOL(qcom_scm_qtee_callback_response);
+
+static void qcom_scm_qtee_free(void *data)
+{
+ struct platform_device *qtee_dev = data;
+
+ platform_device_unregister(qtee_dev);
+}
+
+static void qcom_scm_qtee_init(struct qcom_scm *scm)
+{
+ struct platform_device *qtee_dev;
+ u64 result, response_type;
+ int ret;
+
+ /*
+ * Probe for smcinvoke support. This will fail due to invalid buffers,
+ * but first, it checks whether the call is supported in QTEE syscall
+ * handler. If it is not supported, -EIO is returned.
+ */
+ ret = qcom_scm_qtee_invoke_smc(0, 0, 0, 0, &result, &response_type);
+ if (ret == -EIO)
+ return;
+
+ /* Setup QTEE interface device. */
+ qtee_dev = platform_device_register_data(scm->dev, "qcomtee",
+ PLATFORM_DEVID_NONE, NULL, 0);
+ if (IS_ERR(qtee_dev))
+ return;
+
+ devm_add_action_or_reset(scm->dev, qcom_scm_qtee_free, qtee_dev);
+}
+
+/**
* qcom_scm_is_available() - Checks if SCM is available
*/
bool qcom_scm_is_available(void)
@@ -2250,24 +2361,47 @@ static int qcom_scm_probe(struct platform_device *pdev)
if (ret)
return ret;
- /* Paired with smp_load_acquire() in qcom_scm_is_available(). */
- smp_store_release(&__scm, scm);
+ ret = of_reserved_mem_device_init(scm->dev);
+ if (ret && ret != -ENODEV)
+ return dev_err_probe(scm->dev, ret,
+ "Failed to setup the reserved memory region for TZ mem\n");
+
+ ret = qcom_tzmem_enable(scm->dev);
+ if (ret)
+ return dev_err_probe(scm->dev, ret,
+ "Failed to enable the TrustZone memory allocator\n");
+
+ memset(&pool_config, 0, sizeof(pool_config));
+ pool_config.initial_size = 0;
+ pool_config.policy = QCOM_TZMEM_POLICY_ON_DEMAND;
+ pool_config.max_size = SZ_256K;
+
+ scm->mempool = devm_qcom_tzmem_pool_new(scm->dev, &pool_config);
+ if (IS_ERR(scm->mempool))
+ return dev_err_probe(scm->dev, PTR_ERR(scm->mempool),
+ "Failed to create the SCM memory pool\n");
irq = platform_get_irq_optional(pdev, 0);
if (irq < 0) {
- if (irq != -ENXIO) {
- ret = irq;
- goto err;
- }
+ if (irq != -ENXIO)
+ return irq;
} else {
- ret = devm_request_threaded_irq(__scm->dev, irq, NULL, qcom_scm_irq_handler,
- IRQF_ONESHOT, "qcom-scm", __scm);
- if (ret < 0) {
- dev_err_probe(scm->dev, ret, "Failed to request qcom-scm irq\n");
- goto err;
- }
+ ret = devm_request_threaded_irq(scm->dev, irq, NULL, qcom_scm_irq_handler,
+ IRQF_ONESHOT, "qcom-scm", scm);
+ if (ret < 0)
+ return dev_err_probe(scm->dev, ret,
+ "Failed to request qcom-scm irq\n");
}
+ /*
+ * Paired with smp_load_acquire() in qcom_scm_is_available().
+ *
+ * This marks the SCM API as ready to accept user calls and can only
+ * be called after the TrustZone memory pool is initialized and the
+ * waitqueue interrupt requested.
+ */
+ smp_store_release(&__scm, scm);
+
__get_convention();
/*
@@ -2283,32 +2417,6 @@ static int qcom_scm_probe(struct platform_device *pdev)
if (of_property_read_bool(pdev->dev.of_node, "qcom,sdi-enabled") || !download_mode)
qcom_scm_disable_sdi();
- ret = of_reserved_mem_device_init(__scm->dev);
- if (ret && ret != -ENODEV) {
- dev_err_probe(__scm->dev, ret,
- "Failed to setup the reserved memory region for TZ mem\n");
- goto err;
- }
-
- ret = qcom_tzmem_enable(__scm->dev);
- if (ret) {
- dev_err_probe(__scm->dev, ret,
- "Failed to enable the TrustZone memory allocator\n");
- goto err;
- }
-
- memset(&pool_config, 0, sizeof(pool_config));
- pool_config.initial_size = 0;
- pool_config.policy = QCOM_TZMEM_POLICY_ON_DEMAND;
- pool_config.max_size = SZ_256K;
-
- __scm->mempool = devm_qcom_tzmem_pool_new(__scm->dev, &pool_config);
- if (IS_ERR(__scm->mempool)) {
- ret = dev_err_probe(__scm->dev, PTR_ERR(__scm->mempool),
- "Failed to create the SCM memory pool\n");
- goto err;
- }
-
/*
* Initialize the QSEECOM interface.
*
@@ -2322,13 +2430,10 @@ static int qcom_scm_probe(struct platform_device *pdev)
ret = qcom_scm_qseecom_init(scm);
WARN(ret < 0, "failed to initialize qseecom: %d\n", ret);
- return 0;
-
-err:
- /* Paired with smp_load_acquire() in qcom_scm_is_available(). */
- smp_store_release(&__scm, NULL);
+ /* Initialize the QTEE object interface. */
+ qcom_scm_qtee_init(scm);
- return ret;
+ return 0;
}
static void qcom_scm_shutdown(struct platform_device *pdev)
diff --git a/drivers/firmware/qcom/qcom_scm.h b/drivers/firmware/qcom/qcom_scm.h
index 3133d826f5fa..a56c8212cc0c 100644
--- a/drivers/firmware/qcom/qcom_scm.h
+++ b/drivers/firmware/qcom/qcom_scm.h
@@ -83,6 +83,7 @@ int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc,
struct qcom_scm_res *res);
struct qcom_tzmem_pool *qcom_scm_get_tzmem_pool(void);
+int qcom_scm_shm_bridge_enable(struct device *scm_dev);
#define QCOM_SCM_SVC_BOOT 0x01
#define QCOM_SCM_BOOT_SET_ADDR 0x01
@@ -155,6 +156,13 @@ struct qcom_tzmem_pool *qcom_scm_get_tzmem_pool(void);
#define QCOM_SCM_SVC_GPU 0x28
#define QCOM_SCM_SVC_GPU_INIT_REGS 0x01
+/* ARM_SMCCC_OWNER_TRUSTED_OS calls */
+
+#define QCOM_SCM_SVC_SMCINVOKE 0x06
+#define QCOM_SCM_SMCINVOKE_INVOKE_LEGACY 0x00
+#define QCOM_SCM_SMCINVOKE_CB_RSP 0x01
+#define QCOM_SCM_SMCINVOKE_INVOKE 0x02
+
/* common error codes */
#define QCOM_SCM_V2_EBUSY -12
#define QCOM_SCM_ENOMEM -5
diff --git a/drivers/firmware/qcom/qcom_tzmem.c b/drivers/firmware/qcom/qcom_tzmem.c
index 94196ad87105..9f232e53115e 100644
--- a/drivers/firmware/qcom/qcom_tzmem.c
+++ b/drivers/firmware/qcom/qcom_tzmem.c
@@ -20,6 +20,7 @@
#include <linux/spinlock.h>
#include <linux/types.h>
+#include "qcom_scm.h"
#include "qcom_tzmem.h"
struct qcom_tzmem_area {
@@ -76,6 +77,7 @@ static bool qcom_tzmem_using_shm_bridge;
/* List of machines that are known to not support SHM bridge correctly. */
static const char *const qcom_tzmem_blacklist[] = {
+ "qcom,sc7180", /* hang in rmtfs memory assignment */
"qcom,sc8180x",
"qcom,sdm670", /* failure in GPU firmware loading */
"qcom,sdm845", /* reset in rmtfs memory assignment */
@@ -94,7 +96,7 @@ static int qcom_tzmem_init(void)
goto notsupp;
}
- ret = qcom_scm_shm_bridge_enable();
+ ret = qcom_scm_shm_bridge_enable(qcom_tzmem_dev);
if (ret == -EOPNOTSUPP)
goto notsupp;
@@ -108,7 +110,19 @@ notsupp:
return 0;
}
-static int qcom_tzmem_init_area(struct qcom_tzmem_area *area)
+/**
+ * qcom_tzmem_shm_bridge_create() - Create a SHM bridge.
+ * @paddr: Physical address of the memory to share.
+ * @size: Size of the memory to share.
+ * @handle: Handle to the SHM bridge.
+ *
+ * On platforms that support SHM bridge, this function creates a SHM bridge
+ * for the given memory region with QTEE. The handle returned by this function
+ * must be passed to qcom_tzmem_shm_bridge_delete() to free the SHM bridge.
+ *
+ * Return: On success, returns 0; on failure, returns < 0.
+ */
+int qcom_tzmem_shm_bridge_create(phys_addr_t paddr, size_t size, u64 *handle)
{
u64 pfn_and_ns_perm, ipfn_and_s_perm, size_and_flags;
int ret;
@@ -116,17 +130,49 @@ static int qcom_tzmem_init_area(struct qcom_tzmem_area *area)
if (!qcom_tzmem_using_shm_bridge)
return 0;
- pfn_and_ns_perm = (u64)area->paddr | QCOM_SCM_PERM_RW;
- ipfn_and_s_perm = (u64)area->paddr | QCOM_SCM_PERM_RW;
- size_and_flags = area->size | (1 << QCOM_SHM_BRIDGE_NUM_VM_SHIFT);
+ pfn_and_ns_perm = paddr | QCOM_SCM_PERM_RW;
+ ipfn_and_s_perm = paddr | QCOM_SCM_PERM_RW;
+ size_and_flags = size | (1 << QCOM_SHM_BRIDGE_NUM_VM_SHIFT);
+
+ ret = qcom_scm_shm_bridge_create(pfn_and_ns_perm, ipfn_and_s_perm,
+ size_and_flags, QCOM_SCM_VMID_HLOS,
+ handle);
+ if (ret) {
+ dev_err(qcom_tzmem_dev,
+ "SHM Bridge failed: ret %d paddr 0x%pa, size %zu\n",
+ ret, &paddr, size);
+
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qcom_tzmem_shm_bridge_create);
+
+/**
+ * qcom_tzmem_shm_bridge_delete() - Delete a SHM bridge.
+ * @handle: Handle to the SHM bridge.
+ *
+ * On platforms that support SHM bridge, this function deletes the SHM bridge
+ * for the given memory region. The handle must be the same as the one
+ * returned by qcom_tzmem_shm_bridge_create().
+ */
+void qcom_tzmem_shm_bridge_delete(u64 handle)
+{
+ if (qcom_tzmem_using_shm_bridge)
+ qcom_scm_shm_bridge_delete(handle);
+}
+EXPORT_SYMBOL_GPL(qcom_tzmem_shm_bridge_delete);
+
+static int qcom_tzmem_init_area(struct qcom_tzmem_area *area)
+{
+ int ret;
u64 *handle __free(kfree) = kzalloc(sizeof(*handle), GFP_KERNEL);
if (!handle)
return -ENOMEM;
- ret = qcom_scm_shm_bridge_create(qcom_tzmem_dev, pfn_and_ns_perm,
- ipfn_and_s_perm, size_and_flags,
- QCOM_SCM_VMID_HLOS, handle);
+ ret = qcom_tzmem_shm_bridge_create(area->paddr, area->size, handle);
if (ret)
return ret;
@@ -139,10 +185,7 @@ static void qcom_tzmem_cleanup_area(struct qcom_tzmem_area *area)
{
u64 *handle = area->priv;
- if (!qcom_tzmem_using_shm_bridge)
- return;
-
- qcom_scm_shm_bridge_delete(qcom_tzmem_dev, *handle);
+ qcom_tzmem_shm_bridge_delete(*handle);
kfree(handle);
}
diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c
index 2615fb780e3c..0eebd572f9a5 100644
--- a/drivers/firmware/qemu_fw_cfg.c
+++ b/drivers/firmware/qemu_fw_cfg.c
@@ -476,7 +476,7 @@ static ssize_t fw_cfg_sysfs_read_raw(struct file *filp, struct kobject *kobj,
static const struct bin_attribute fw_cfg_sysfs_attr_raw = {
.attr = { .name = "raw", .mode = S_IRUSR },
- .read_new = fw_cfg_sysfs_read_raw,
+ .read = fw_cfg_sysfs_read_raw,
};
/*
diff --git a/drivers/firmware/samsung/Makefile b/drivers/firmware/samsung/Makefile
index 7b4c9f6f34f5..80d4f89b33a9 100644
--- a/drivers/firmware/samsung/Makefile
+++ b/drivers/firmware/samsung/Makefile
@@ -1,4 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
-acpm-protocol-objs := exynos-acpm.o exynos-acpm-pmic.o
+acpm-protocol-objs := exynos-acpm.o
+acpm-protocol-objs += exynos-acpm-pmic.o
+acpm-protocol-objs += exynos-acpm-dvfs.o
obj-$(CONFIG_EXYNOS_ACPM_PROTOCOL) += acpm-protocol.o
diff --git a/drivers/firmware/samsung/exynos-acpm-dvfs.c b/drivers/firmware/samsung/exynos-acpm-dvfs.c
new file mode 100644
index 000000000000..1c5b2b143bcc
--- /dev/null
+++ b/drivers/firmware/samsung/exynos-acpm-dvfs.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2020 Samsung Electronics Co., Ltd.
+ * Copyright 2020 Google LLC.
+ * Copyright 2025 Linaro Ltd.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/firmware/samsung/exynos-acpm-protocol.h>
+#include <linux/ktime.h>
+#include <linux/types.h>
+#include <linux/units.h>
+
+#include "exynos-acpm.h"
+#include "exynos-acpm-dvfs.h"
+
+#define ACPM_DVFS_ID GENMASK(11, 0)
+#define ACPM_DVFS_REQ_TYPE GENMASK(15, 0)
+
+#define ACPM_DVFS_FREQ_REQ 0
+#define ACPM_DVFS_FREQ_GET 1
+
+static void acpm_dvfs_set_xfer(struct acpm_xfer *xfer, u32 *cmd, size_t cmdlen,
+ unsigned int acpm_chan_id, bool response)
+{
+ xfer->acpm_chan_id = acpm_chan_id;
+ xfer->txd = cmd;
+ xfer->txlen = cmdlen;
+
+ if (response) {
+ xfer->rxd = cmd;
+ xfer->rxlen = cmdlen;
+ }
+}
+
+static void acpm_dvfs_init_set_rate_cmd(u32 cmd[4], unsigned int clk_id,
+ unsigned long rate)
+{
+ cmd[0] = FIELD_PREP(ACPM_DVFS_ID, clk_id);
+ cmd[1] = rate / HZ_PER_KHZ;
+ cmd[2] = FIELD_PREP(ACPM_DVFS_REQ_TYPE, ACPM_DVFS_FREQ_REQ);
+ cmd[3] = ktime_to_ms(ktime_get());
+}
+
+int acpm_dvfs_set_rate(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, unsigned int clk_id,
+ unsigned long rate)
+{
+ struct acpm_xfer xfer = {0};
+ u32 cmd[4];
+
+ acpm_dvfs_init_set_rate_cmd(cmd, clk_id, rate);
+ acpm_dvfs_set_xfer(&xfer, cmd, sizeof(cmd), acpm_chan_id, false);
+
+ return acpm_do_xfer(handle, &xfer);
+}
+
+static void acpm_dvfs_init_get_rate_cmd(u32 cmd[4], unsigned int clk_id)
+{
+ cmd[0] = FIELD_PREP(ACPM_DVFS_ID, clk_id);
+ cmd[2] = FIELD_PREP(ACPM_DVFS_REQ_TYPE, ACPM_DVFS_FREQ_GET);
+ cmd[3] = ktime_to_ms(ktime_get());
+}
+
+unsigned long acpm_dvfs_get_rate(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, unsigned int clk_id)
+{
+ struct acpm_xfer xfer;
+ unsigned int cmd[4] = {0};
+ int ret;
+
+ acpm_dvfs_init_get_rate_cmd(cmd, clk_id);
+ acpm_dvfs_set_xfer(&xfer, cmd, sizeof(cmd), acpm_chan_id, true);
+
+ ret = acpm_do_xfer(handle, &xfer);
+ if (ret)
+ return 0;
+
+ return xfer.rxd[1] * HZ_PER_KHZ;
+}
diff --git a/drivers/firmware/samsung/exynos-acpm-dvfs.h b/drivers/firmware/samsung/exynos-acpm-dvfs.h
new file mode 100644
index 000000000000..9f2778e649c9
--- /dev/null
+++ b/drivers/firmware/samsung/exynos-acpm-dvfs.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2020 Samsung Electronics Co., Ltd.
+ * Copyright 2020 Google LLC.
+ * Copyright 2025 Linaro Ltd.
+ */
+#ifndef __EXYNOS_ACPM_DVFS_H__
+#define __EXYNOS_ACPM_DVFS_H__
+
+#include <linux/types.h>
+
+struct acpm_handle;
+
+int acpm_dvfs_set_rate(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, unsigned int id,
+ unsigned long rate);
+unsigned long acpm_dvfs_get_rate(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id,
+ unsigned int clk_id);
+
+#endif /* __EXYNOS_ACPM_DVFS_H__ */
diff --git a/drivers/firmware/samsung/exynos-acpm-pmic.c b/drivers/firmware/samsung/exynos-acpm-pmic.c
index 39b33a356ebd..961d7599e422 100644
--- a/drivers/firmware/samsung/exynos-acpm-pmic.c
+++ b/drivers/firmware/samsung/exynos-acpm-pmic.c
@@ -4,7 +4,9 @@
* Copyright 2020 Google LLC.
* Copyright 2024 Linaro Ltd.
*/
+#include <linux/array_size.h>
#include <linux/bitfield.h>
+#include <linux/errno.h>
#include <linux/firmware/samsung/exynos-acpm-protocol.h>
#include <linux/ktime.h>
#include <linux/types.h>
@@ -33,6 +35,19 @@ enum exynos_acpm_pmic_func {
ACPM_PMIC_BULK_WRITE,
};
+static const int acpm_pmic_linux_errmap[] = {
+ [0] = 0, /* ACPM_PMIC_SUCCESS */
+ [1] = -EACCES, /* Read register can't be accessed or issues to access it. */
+ [2] = -EACCES, /* Write register can't be accessed or issues to access it. */
+};
+
+static int acpm_pmic_to_linux_err(int err)
+{
+ if (err >= 0 && err < ARRAY_SIZE(acpm_pmic_linux_errmap))
+ return acpm_pmic_linux_errmap[err];
+ return -EIO;
+}
+
static inline u32 acpm_pmic_set_bulk(u32 data, unsigned int i)
{
return (data & ACPM_PMIC_BULK_MASK) << (ACPM_PMIC_BULK_SHIFT * i);
@@ -79,7 +94,7 @@ int acpm_pmic_read_reg(const struct acpm_handle *handle,
*buf = FIELD_GET(ACPM_PMIC_VALUE, xfer.rxd[1]);
- return FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]);
+ return acpm_pmic_to_linux_err(FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]));
}
static void acpm_pmic_init_bulk_read_cmd(u32 cmd[4], u8 type, u8 reg, u8 chan,
@@ -110,7 +125,7 @@ int acpm_pmic_bulk_read(const struct acpm_handle *handle,
if (ret)
return ret;
- ret = FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]);
+ ret = acpm_pmic_to_linux_err(FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]));
if (ret)
return ret;
@@ -150,7 +165,7 @@ int acpm_pmic_write_reg(const struct acpm_handle *handle,
if (ret)
return ret;
- return FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]);
+ return acpm_pmic_to_linux_err(FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]));
}
static void acpm_pmic_init_bulk_write_cmd(u32 cmd[4], u8 type, u8 reg, u8 chan,
@@ -190,7 +205,7 @@ int acpm_pmic_bulk_write(const struct acpm_handle *handle,
if (ret)
return ret;
- return FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]);
+ return acpm_pmic_to_linux_err(FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]));
}
static void acpm_pmic_init_update_cmd(u32 cmd[4], u8 type, u8 reg, u8 chan,
@@ -220,5 +235,5 @@ int acpm_pmic_update_reg(const struct acpm_handle *handle,
if (ret)
return ret;
- return FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]);
+ return acpm_pmic_to_linux_err(FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]));
}
diff --git a/drivers/firmware/samsung/exynos-acpm.c b/drivers/firmware/samsung/exynos-acpm.c
index e02f14f4bd7c..0cb269c70460 100644
--- a/drivers/firmware/samsung/exynos-acpm.c
+++ b/drivers/firmware/samsung/exynos-acpm.c
@@ -29,6 +29,7 @@
#include <linux/types.h>
#include "exynos-acpm.h"
+#include "exynos-acpm-dvfs.h"
#include "exynos-acpm-pmic.h"
#define ACPM_PROTOCOL_SEQNUM GENMASK(21, 16)
@@ -176,9 +177,11 @@ struct acpm_info {
/**
* struct acpm_match_data - of_device_id data.
* @initdata_base: offset in SRAM where the channels configuration resides.
+ * @acpm_clk_dev_name: base name for the ACPM clocks device that we're registering.
*/
struct acpm_match_data {
loff_t initdata_base;
+ const char *acpm_clk_dev_name;
};
#define client_to_acpm_chan(c) container_of(c, struct acpm_chan, cl)
@@ -430,6 +433,9 @@ int acpm_do_xfer(const struct acpm_handle *handle, const struct acpm_xfer *xfer)
return -EOPNOTSUPP;
}
+ msg.chan_id = xfer->acpm_chan_id;
+ msg.chan_type = EXYNOS_MBOX_CHAN_TYPE_DOORBELL;
+
scoped_guard(mutex, &achan->tx_lock) {
tx_front = readl(achan->tx.front);
idx = (tx_front + 1) % achan->qlen;
@@ -446,25 +452,15 @@ int acpm_do_xfer(const struct acpm_handle *handle, const struct acpm_xfer *xfer)
/* Advance TX front. */
writel(idx, achan->tx.front);
- }
-
- msg.chan_id = xfer->acpm_chan_id;
- msg.chan_type = EXYNOS_MBOX_CHAN_TYPE_DOORBELL;
- ret = mbox_send_message(achan->chan, (void *)&msg);
- if (ret < 0)
- return ret;
- ret = acpm_wait_for_message_response(achan, xfer);
+ ret = mbox_send_message(achan->chan, (void *)&msg);
+ if (ret < 0)
+ return ret;
- /*
- * NOTE: we might prefer not to need the mailbox ticker to manage the
- * transfer queueing since the protocol layer queues things by itself.
- * Unfortunately, we have to kick the mailbox framework after we have
- * received our message.
- */
- mbox_client_txdone(achan->chan, ret);
+ mbox_client_txdone(achan->chan, 0);
+ }
- return ret;
+ return acpm_wait_for_message_response(achan, xfer);
}
/**
@@ -597,8 +593,12 @@ static int acpm_channels_init(struct acpm_info *acpm)
*/
static void acpm_setup_ops(struct acpm_info *acpm)
{
+ struct acpm_dvfs_ops *dvfs_ops = &acpm->handle.ops.dvfs_ops;
struct acpm_pmic_ops *pmic_ops = &acpm->handle.ops.pmic_ops;
+ dvfs_ops->set_rate = acpm_dvfs_set_rate;
+ dvfs_ops->get_rate = acpm_dvfs_get_rate;
+
pmic_ops->read_reg = acpm_pmic_read_reg;
pmic_ops->bulk_read = acpm_pmic_bulk_read;
pmic_ops->write_reg = acpm_pmic_write_reg;
@@ -606,9 +606,15 @@ static void acpm_setup_ops(struct acpm_info *acpm)
pmic_ops->update_reg = acpm_pmic_update_reg;
}
+static void acpm_clk_pdev_unregister(void *data)
+{
+ platform_device_unregister(data);
+}
+
static int acpm_probe(struct platform_device *pdev)
{
const struct acpm_match_data *match_data;
+ struct platform_device *acpm_clk_pdev;
struct device *dev = &pdev->dev;
struct device_node *shmem;
struct acpm_info *acpm;
@@ -649,6 +655,18 @@ static int acpm_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, acpm);
+ acpm_clk_pdev = platform_device_register_data(dev,
+ match_data->acpm_clk_dev_name,
+ PLATFORM_DEVID_NONE, NULL, 0);
+ if (IS_ERR(acpm_clk_pdev))
+ return dev_err_probe(dev, PTR_ERR(acpm_clk_pdev),
+ "Failed to register ACPM clocks device.\n");
+
+ ret = devm_add_action_or_reset(dev, acpm_clk_pdev_unregister,
+ acpm_clk_pdev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add devm action.\n");
+
return devm_of_platform_populate(dev);
}
@@ -748,6 +766,7 @@ EXPORT_SYMBOL_GPL(devm_acpm_get_by_node);
static const struct acpm_match_data acpm_gs101 = {
.initdata_base = ACPM_GS101_INITDATA_BASE,
+ .acpm_clk_dev_name = "gs101-acpm-clk",
};
static const struct of_device_id acpm_match[] = {
diff --git a/drivers/firmware/smccc/smccc.c b/drivers/firmware/smccc/smccc.c
index cd65b434dc6e..bdee057db2fd 100644
--- a/drivers/firmware/smccc/smccc.c
+++ b/drivers/firmware/smccc/smccc.c
@@ -72,10 +72,7 @@ bool arm_smccc_hypervisor_has_uuid(const uuid_t *hyp_uuid)
struct arm_smccc_res res = {};
uuid_t uuid;
- if (arm_smccc_1_1_get_conduit() != SMCCC_CONDUIT_HVC)
- return false;
-
- arm_smccc_1_1_hvc(ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID, &res);
+ arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID, &res);
if (res.a0 == SMCCC_RET_NOT_SUPPORTED)
return false;
diff --git a/drivers/firmware/stratix10-rsu.c b/drivers/firmware/stratix10-rsu.c
index 1ea39a0a76c7..41da07c445a6 100644
--- a/drivers/firmware/stratix10-rsu.c
+++ b/drivers/firmware/stratix10-rsu.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018-2019, Intel Corporation
+ * Copyright (C) 2025, Altera Corporation
*/
#include <linux/arm-smccc.h>
@@ -14,11 +15,9 @@
#include <linux/firmware/intel/stratix10-svc-client.h>
#include <linux/string.h>
#include <linux/sysfs.h>
+#include <linux/delay.h>
-#define RSU_STATE_MASK GENMASK_ULL(31, 0)
-#define RSU_VERSION_MASK GENMASK_ULL(63, 32)
-#define RSU_ERROR_LOCATION_MASK GENMASK_ULL(31, 0)
-#define RSU_ERROR_DETAIL_MASK GENMASK_ULL(63, 32)
+#define RSU_ERASE_SIZE_MASK GENMASK_ULL(63, 32)
#define RSU_DCMF0_MASK GENMASK_ULL(31, 0)
#define RSU_DCMF1_MASK GENMASK_ULL(63, 32)
#define RSU_DCMF2_MASK GENMASK_ULL(31, 0)
@@ -35,7 +34,8 @@
#define INVALID_DCMF_STATUS 0xFFFFFFFF
#define INVALID_SPT_ADDRESS 0x0
-#define RSU_GET_SPT_CMD 0x5A
+#define RSU_RETRY_SLEEP_MS (1U)
+#define RSU_ASYNC_MSG_RETRY (3U)
#define RSU_GET_SPT_RESP_LEN (4 * sizeof(unsigned int))
typedef void (*rsu_callback)(struct stratix10_svc_client *client,
@@ -64,7 +64,6 @@ typedef void (*rsu_callback)(struct stratix10_svc_client *client,
* @max_retry: the preset max retry value
* @spt0_address: address of spt0
* @spt1_address: address of spt1
- * @get_spt_response_buf: response from sdm for get_spt command
*/
struct stratix10_rsu_priv {
struct stratix10_svc_chan *chan;
@@ -99,47 +98,32 @@ struct stratix10_rsu_priv {
unsigned long spt0_address;
unsigned long spt1_address;
-
- unsigned int *get_spt_response_buf;
};
+typedef void (*rsu_async_callback)(struct device *dev,
+ struct stratix10_rsu_priv *priv, struct stratix10_svc_cb_data *data);
+
/**
- * rsu_status_callback() - Status callback from Intel Service Layer
- * @client: pointer to service client
+ * rsu_async_status_callback() - Status callback from rsu_async_send()
+ * @dev: pointer to device object
+ * @priv: pointer to priv object
* @data: pointer to callback data structure
*
- * Callback from Intel service layer for RSU status request. Status is
- * only updated after a system reboot, so a get updated status call is
- * made during driver probe.
+ * Callback from rsu_async_send() to get the system rsu error status.
*/
-static void rsu_status_callback(struct stratix10_svc_client *client,
- struct stratix10_svc_cb_data *data)
+static void rsu_async_status_callback(struct device *dev,
+ struct stratix10_rsu_priv *priv,
+ struct stratix10_svc_cb_data *data)
{
- struct stratix10_rsu_priv *priv = client->priv;
- struct arm_smccc_res *res = (struct arm_smccc_res *)data->kaddr1;
-
- if (data->status == BIT(SVC_STATUS_OK)) {
- priv->status.version = FIELD_GET(RSU_VERSION_MASK,
- res->a2);
- priv->status.state = FIELD_GET(RSU_STATE_MASK, res->a2);
- priv->status.fail_image = res->a1;
- priv->status.current_image = res->a0;
- priv->status.error_location =
- FIELD_GET(RSU_ERROR_LOCATION_MASK, res->a3);
- priv->status.error_details =
- FIELD_GET(RSU_ERROR_DETAIL_MASK, res->a3);
- } else {
- dev_err(client->dev, "COMMAND_RSU_STATUS returned 0x%lX\n",
- res->a0);
- priv->status.version = 0;
- priv->status.state = 0;
- priv->status.fail_image = 0;
- priv->status.current_image = 0;
- priv->status.error_location = 0;
- priv->status.error_details = 0;
- }
-
- complete(&priv->completion);
+ struct arm_smccc_1_2_regs *res = (struct arm_smccc_1_2_regs *)data->kaddr1;
+
+ priv->status.current_image = res->a2;
+ priv->status.fail_image = res->a3;
+ priv->status.state = res->a4;
+ priv->status.version = res->a5;
+ priv->status.error_location = res->a7;
+ priv->status.error_details = res->a8;
+ priv->retry_counter = res->a9;
}
/**
@@ -163,32 +147,6 @@ static void rsu_command_callback(struct stratix10_svc_client *client,
complete(&priv->completion);
}
-/**
- * rsu_retry_callback() - Callback from Intel service layer for getting
- * the current image's retry counter from the firmware
- * @client: pointer to client
- * @data: pointer to callback data structure
- *
- * Callback from Intel service layer for retry counter, which is used by
- * user to know how many times the images is still allowed to reload
- * itself before giving up and starting RSU fail-over flow.
- */
-static void rsu_retry_callback(struct stratix10_svc_client *client,
- struct stratix10_svc_cb_data *data)
-{
- struct stratix10_rsu_priv *priv = client->priv;
- unsigned int *counter = (unsigned int *)data->kaddr1;
-
- if (data->status == BIT(SVC_STATUS_OK))
- priv->retry_counter = *counter;
- else if (data->status == BIT(SVC_STATUS_NO_SUPPORT))
- dev_warn(client->dev, "Secure FW doesn't support retry\n");
- else
- dev_err(client->dev, "Failed to get retry counter %lu\n",
- BIT(data->status));
-
- complete(&priv->completion);
-}
/**
* rsu_max_retry_callback() - Callback from Intel service layer for getting
@@ -270,34 +228,19 @@ static void rsu_dcmf_status_callback(struct stratix10_svc_client *client,
complete(&priv->completion);
}
-static void rsu_get_spt_callback(struct stratix10_svc_client *client,
- struct stratix10_svc_cb_data *data)
+/**
+ * rsu_async_get_spt_table_callback() - Callback to be used by the rsu_async_send()
+ * to retrieve the SPT table information.
+ * @dev: pointer to device object
+ * @priv: pointer to priv object
+ * @data: pointer to callback data structure
+ */
+static void rsu_async_get_spt_table_callback(struct device *dev,
+ struct stratix10_rsu_priv *priv,
+ struct stratix10_svc_cb_data *data)
{
- struct stratix10_rsu_priv *priv = client->priv;
- unsigned long *mbox_err = (unsigned long *)data->kaddr1;
- unsigned long *resp_len = (unsigned long *)data->kaddr2;
-
- if (data->status != BIT(SVC_STATUS_OK) || (*mbox_err) ||
- (*resp_len != RSU_GET_SPT_RESP_LEN))
- goto error;
-
- priv->spt0_address = priv->get_spt_response_buf[0];
- priv->spt0_address <<= 32;
- priv->spt0_address |= priv->get_spt_response_buf[1];
-
- priv->spt1_address = priv->get_spt_response_buf[2];
- priv->spt1_address <<= 32;
- priv->spt1_address |= priv->get_spt_response_buf[3];
-
- goto complete;
-
-error:
- dev_err(client->dev, "failed to get SPTs\n");
-
-complete:
- stratix10_svc_free_memory(priv->chan, priv->get_spt_response_buf);
- priv->get_spt_response_buf = NULL;
- complete(&priv->completion);
+ priv->spt0_address = *((unsigned long *)data->kaddr1);
+ priv->spt1_address = *((unsigned long *)data->kaddr2);
}
/**
@@ -329,14 +272,6 @@ static int rsu_send_msg(struct stratix10_rsu_priv *priv,
if (arg)
msg.arg[0] = arg;
- if (command == COMMAND_MBOX_SEND_CMD) {
- msg.arg[1] = 0;
- msg.payload = NULL;
- msg.payload_length = 0;
- msg.payload_output = priv->get_spt_response_buf;
- msg.payload_length_output = RSU_GET_SPT_RESP_LEN;
- }
-
ret = stratix10_svc_send(priv->chan, &msg);
if (ret < 0)
goto status_done;
@@ -362,6 +297,95 @@ status_done:
return ret;
}
+/**
+ * soc64_async_callback() - Callback from Intel service layer for async requests
+ * @ptr: pointer to the completion object
+ */
+static void soc64_async_callback(void *ptr)
+{
+ if (ptr)
+ complete(ptr);
+}
+
+/**
+ * rsu_send_async_msg() - send an async message to Intel service layer
+ * @dev: pointer to device object
+ * @priv: pointer to rsu private data
+ * @command: RSU status or update command
+ * @arg: the request argument, notify status
+ * @callback: function pointer for the callback (status or update)
+ */
+static int rsu_send_async_msg(struct device *dev, struct stratix10_rsu_priv *priv,
+ enum stratix10_svc_command_code command,
+ unsigned long arg,
+ rsu_async_callback callback)
+{
+ struct stratix10_svc_client_msg msg = {0};
+ struct stratix10_svc_cb_data data = {0};
+ struct completion completion;
+ int status, index, ret;
+ void *handle = NULL;
+
+ msg.command = command;
+ msg.arg[0] = arg;
+
+ init_completion(&completion);
+
+ for (index = 0; index < RSU_ASYNC_MSG_RETRY; index++) {
+ status = stratix10_svc_async_send(priv->chan, &msg,
+ &handle, soc64_async_callback,
+ &completion);
+ if (status == 0)
+ break;
+ dev_warn(dev, "Failed to send async message\n");
+ msleep(RSU_RETRY_SLEEP_MS);
+ }
+
+ if (status && !handle) {
+ dev_err(dev, "Failed to send async message\n");
+ return -ETIMEDOUT;
+ }
+
+ ret = wait_for_completion_io_timeout(&completion, RSU_TIMEOUT);
+ if (ret > 0)
+ dev_dbg(dev, "Received async interrupt\n");
+ else if (ret == 0)
+ dev_dbg(dev, "Timeout occurred. Trying to poll the response\n");
+
+ for (index = 0; index < RSU_ASYNC_MSG_RETRY; index++) {
+ status = stratix10_svc_async_poll(priv->chan, handle, &data);
+ if (status == -EAGAIN) {
+ dev_dbg(dev, "Async message is still in progress\n");
+ } else if (status < 0) {
+ dev_alert(dev, "Failed to poll async message\n");
+ ret = -ETIMEDOUT;
+ } else if (status == 0) {
+ ret = 0;
+ break;
+ }
+ msleep(RSU_RETRY_SLEEP_MS);
+ }
+
+ if (ret) {
+ dev_err(dev, "Failed to get async response\n");
+ goto status_done;
+ }
+
+ if (data.status == 0) {
+ ret = 0;
+ if (callback)
+ callback(dev, priv, &data);
+ } else {
+ dev_err(dev, "%s returned 0x%x from SDM\n", __func__,
+ data.status);
+ ret = -EFAULT;
+ }
+
+status_done:
+ stratix10_svc_async_done(priv->chan, handle);
+ return ret;
+}
+
/*
* This driver exposes some optional features of the Intel Stratix 10 SoC FPGA.
* The sysfs interfaces exposed here are FPGA Remote System Update (RSU)
@@ -454,8 +478,7 @@ static ssize_t max_retry_show(struct device *dev,
if (!priv)
return -ENODEV;
- return scnprintf(buf, sizeof(priv->max_retry),
- "0x%08x\n", priv->max_retry);
+ return sysfs_emit(buf, "0x%08x\n", priv->max_retry);
}
static ssize_t dcmf0_show(struct device *dev,
@@ -597,27 +620,20 @@ static ssize_t notify_store(struct device *dev,
if (ret)
return ret;
- ret = rsu_send_msg(priv, COMMAND_RSU_NOTIFY,
- status, rsu_command_callback);
+ ret = rsu_send_async_msg(dev, priv, COMMAND_RSU_NOTIFY, status, NULL);
if (ret) {
dev_err(dev, "Error, RSU notify returned %i\n", ret);
return ret;
}
/* to get the updated state */
- ret = rsu_send_msg(priv, COMMAND_RSU_STATUS,
- 0, rsu_status_callback);
+ ret = rsu_send_async_msg(dev, priv, COMMAND_RSU_STATUS, 0,
+ rsu_async_status_callback);
if (ret) {
dev_err(dev, "Error, getting RSU status %i\n", ret);
return ret;
}
- ret = rsu_send_msg(priv, COMMAND_RSU_RETRY, 0, rsu_retry_callback);
- if (ret) {
- dev_err(dev, "Error, getting RSU retry %i\n", ret);
- return ret;
- }
-
return count;
}
@@ -632,7 +648,7 @@ static ssize_t spt0_address_show(struct device *dev,
if (priv->spt0_address == INVALID_SPT_ADDRESS)
return -EIO;
- return scnprintf(buf, PAGE_SIZE, "0x%08lx\n", priv->spt0_address);
+ return sysfs_emit(buf, "0x%08lx\n", priv->spt0_address);
}
static ssize_t spt1_address_show(struct device *dev,
@@ -646,7 +662,7 @@ static ssize_t spt1_address_show(struct device *dev,
if (priv->spt1_address == INVALID_SPT_ADDRESS)
return -EIO;
- return scnprintf(buf, PAGE_SIZE, "0x%08lx\n", priv->spt1_address);
+ return sysfs_emit(buf, "0x%08lx\n", priv->spt1_address);
}
static DEVICE_ATTR_RO(current_image);
@@ -737,12 +753,19 @@ static int stratix10_rsu_probe(struct platform_device *pdev)
return PTR_ERR(priv->chan);
}
+ ret = stratix10_svc_add_async_client(priv->chan, false);
+ if (ret) {
+ dev_err(dev, "failed to add async client\n");
+ stratix10_svc_free_channel(priv->chan);
+ return ret;
+ }
+
init_completion(&priv->completion);
platform_set_drvdata(pdev, priv);
/* get the initial state from firmware */
- ret = rsu_send_msg(priv, COMMAND_RSU_STATUS,
- 0, rsu_status_callback);
+ ret = rsu_send_async_msg(dev, priv, COMMAND_RSU_STATUS, 0,
+ rsu_async_status_callback);
if (ret) {
dev_err(dev, "Error, getting RSU status %i\n", ret);
stratix10_svc_free_channel(priv->chan);
@@ -763,12 +786,6 @@ static int stratix10_rsu_probe(struct platform_device *pdev)
stratix10_svc_free_channel(priv->chan);
}
- ret = rsu_send_msg(priv, COMMAND_RSU_RETRY, 0, rsu_retry_callback);
- if (ret) {
- dev_err(dev, "Error, getting RSU retry %i\n", ret);
- stratix10_svc_free_channel(priv->chan);
- }
-
ret = rsu_send_msg(priv, COMMAND_RSU_MAX_RETRY, 0,
rsu_max_retry_callback);
if (ret) {
@@ -776,18 +793,12 @@ static int stratix10_rsu_probe(struct platform_device *pdev)
stratix10_svc_free_channel(priv->chan);
}
- priv->get_spt_response_buf =
- stratix10_svc_allocate_memory(priv->chan, RSU_GET_SPT_RESP_LEN);
- if (IS_ERR(priv->get_spt_response_buf)) {
- dev_err(dev, "failed to allocate get spt buffer\n");
- } else {
- ret = rsu_send_msg(priv, COMMAND_MBOX_SEND_CMD,
- RSU_GET_SPT_CMD, rsu_get_spt_callback);
- if (ret) {
- dev_err(dev, "Error, getting SPT table %i\n", ret);
- stratix10_svc_free_channel(priv->chan);
- }
+ ret = rsu_send_async_msg(dev, priv, COMMAND_RSU_GET_SPT_TABLE, 0,
+ rsu_async_get_spt_table_callback);
+ if (ret) {
+ dev_err(dev, "Error, getting SPT table %i\n", ret);
+ stratix10_svc_free_channel(priv->chan);
}
return ret;
diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c
index e3f990d888d7..515b948ff320 100644
--- a/drivers/firmware/stratix10-svc.c
+++ b/drivers/firmware/stratix10-svc.c
@@ -1,11 +1,15 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2017-2018, Intel Corporation
+ * Copyright (C) 2025, Altera Corporation
*/
+#include <linux/atomic.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/genalloc.h>
+#include <linux/hashtable.h>
+#include <linux/idr.h>
#include <linux/io.h>
#include <linux/kfifo.h>
#include <linux/kthread.h>
@@ -34,7 +38,7 @@
* timeout is set to 30 seconds (30 * 1000) at Intel Stratix10 SoC.
*/
#define SVC_NUM_DATA_IN_FIFO 32
-#define SVC_NUM_CHANNEL 3
+#define SVC_NUM_CHANNEL 4
#define FPGA_CONFIG_DATA_CLAIM_TIMEOUT_MS 200
#define FPGA_CONFIG_STATUS_TIMEOUT_SEC 30
#define BYTE_TO_WORD_SIZE 4
@@ -43,6 +47,55 @@
#define STRATIX10_RSU "stratix10-rsu"
#define INTEL_FCS "intel-fcs"
+/* Maximum number of SDM client IDs. */
+#define MAX_SDM_CLIENT_IDS 16
+/* Client ID for SIP Service Version 1. */
+#define SIP_SVC_V1_CLIENT_ID 0x1
+/* Maximum number of SDM job IDs. */
+#define MAX_SDM_JOB_IDS 16
+/* Number of bits used for asynchronous transaction hashing. */
+#define ASYNC_TRX_HASH_BITS 3
+/*
+ * Total number of transaction IDs, which is a combination of
+ * client ID and job ID.
+ */
+#define TOTAL_TRANSACTION_IDS \
+ (MAX_SDM_CLIENT_IDS * MAX_SDM_JOB_IDS)
+
+/* Minimum major version of the ATF for Asynchronous transactions. */
+#define ASYNC_ATF_MINIMUM_MAJOR_VERSION 0x3
+/* Minimum minor version of the ATF for Asynchronous transactions.*/
+#define ASYNC_ATF_MINIMUM_MINOR_VERSION 0x0
+
+/* Job ID field in the transaction ID */
+#define STRATIX10_JOB_FIELD GENMASK(3, 0)
+/* Client ID field in the transaction ID */
+#define STRATIX10_CLIENT_FIELD GENMASK(7, 4)
+/* Transaction ID mask for Stratix10 service layer */
+#define STRATIX10_TRANS_ID_FIELD GENMASK(7, 0)
+
+/* Macro to extract the job ID from a transaction ID. */
+#define STRATIX10_GET_JOBID(transaction_id) \
+ (FIELD_GET(STRATIX10_JOB_FIELD, transaction_id))
+/* Macro to set the job ID in a transaction ID. */
+#define STRATIX10_SET_JOBID(jobid) \
+ (FIELD_PREP(STRATIX10_JOB_FIELD, jobid))
+/* Macro to set the client ID in a transaction ID. */
+#define STRATIX10_SET_CLIENTID(clientid) \
+ (FIELD_PREP(STRATIX10_CLIENT_FIELD, clientid))
+/* Macro to set a transaction ID using a client ID and a job ID. */
+#define STRATIX10_SET_TRANSACTIONID(clientid, jobid) \
+ (STRATIX10_SET_CLIENTID(clientid) | STRATIX10_SET_JOBID(jobid))
+/* Macro to set a transaction ID for SIP SMC Async transactions */
+#define STRATIX10_SIP_SMC_SET_TRANSACTIONID_X1(transaction_id) \
+ (FIELD_PREP(STRATIX10_TRANS_ID_FIELD, transaction_id))
+
+/* 10-bit mask for extracting the SDM status code */
+#define STRATIX10_SDM_STATUS_MASK GENMASK(9, 0)
+/* Macro to get the SDM mailbox error status */
+#define STRATIX10_GET_SDM_STATUS_CODE(status) \
+ (FIELD_GET(STRATIX10_SDM_STATUS_MASK, status))
+
typedef void (svc_invoke_fn)(unsigned long, unsigned long, unsigned long,
unsigned long, unsigned long, unsigned long,
unsigned long, unsigned long,
@@ -52,6 +105,7 @@ struct stratix10_svc_chan;
/**
* struct stratix10_svc - svc private data
* @stratix10_svc_rsu: pointer to stratix10 RSU device
+ * @intel_svc_fcs: pointer to the FCS device
*/
struct stratix10_svc {
struct platform_device *stratix10_svc_rsu;
@@ -63,7 +117,7 @@ struct stratix10_svc {
* @sync_complete: state for a completion
* @addr: physical address of shared memory block
* @size: size of shared memory block
- * @invoke_fn: function to issue secure monitor or hypervisor call
+ * @invoke_fn: service clients to handle secure monitor or hypervisor calls
*
* This struct is used to save physical address and size of shared memory
* block. The shared memory blocked is allocated by secure monitor software
@@ -122,6 +176,74 @@ struct stratix10_svc_data {
};
/**
+ * struct stratix10_svc_async_handler - Asynchronous handler for Stratix10
+ * service layer
+ * @transaction_id: Unique identifier for the transaction
+ * @achan: Pointer to the asynchronous channel structure
+ * @cb_arg: Argument to be passed to the callback function
+ * @cb: Callback function to be called upon completion
+ * @msg: Pointer to the client message structure
+ * @next: Node in the hash list
+ * @res: Response structure to store result from the secure firmware
+ *
+ * This structure is used to handle asynchronous transactions in the
+ * Stratix10 service layer. It maintains the necessary information
+ * for processing and completing asynchronous requests.
+ */
+
+struct stratix10_svc_async_handler {
+ u8 transaction_id;
+ struct stratix10_async_chan *achan;
+ void *cb_arg;
+ async_callback_t cb;
+ struct stratix10_svc_client_msg *msg;
+ struct hlist_node next;
+ struct arm_smccc_1_2_regs res;
+};
+
+/**
+ * struct stratix10_async_chan - Structure representing an asynchronous channel
+ * @async_client_id: Unique client identifier for the asynchronous operation
+ * @job_id_pool: Pointer to the job ID pool associated with this channel
+ */
+
+struct stratix10_async_chan {
+ unsigned long async_client_id;
+ struct ida job_id_pool;
+};
+
+/**
+ * struct stratix10_async_ctrl - Control structure for Stratix10
+ * asynchronous operations
+ * @initialized: Flag indicating whether the control structure has
+ * been initialized
+ * @invoke_fn: Function pointer for invoking Stratix10 service calls
+ * to EL3 secure firmware
+ * @async_id_pool: Pointer to the ID pool used for asynchronous
+ * operations
+ * @common_achan_refcount: Atomic reference count for the common
+ * asynchronous channel usage
+ * @common_async_chan: Pointer to the common asynchronous channel
+ * structure
+ * @trx_list_lock: Spinlock for protecting the transaction list
+ * operations
+ * @trx_list: Hash table for managing asynchronous transactions
+ */
+
+struct stratix10_async_ctrl {
+ bool initialized;
+ void (*invoke_fn)(struct stratix10_async_ctrl *actrl,
+ const struct arm_smccc_1_2_regs *args,
+ struct arm_smccc_1_2_regs *res);
+ struct ida async_id_pool;
+ atomic_t common_achan_refcount;
+ struct stratix10_async_chan *common_async_chan;
+ /* spinlock to protect trx_list hash table */
+ spinlock_t trx_list_lock;
+ DECLARE_HASHTABLE(trx_list, ASYNC_TRX_HASH_BITS);
+};
+
+/**
* struct stratix10_svc_controller - service controller
* @dev: device
* @chans: array of service channels
@@ -134,6 +256,8 @@ struct stratix10_svc_data {
* @complete_status: state for completion
* @svc_fifo_lock: protect access to service message data queue
* @invoke_fn: function to issue secure monitor call or hypervisor call
+ * @svc: manages the list of client svc drivers
+ * @actrl: async control structure
*
* This struct is used to create communication channels for service clients, to
* handle secure monitor or hypervisor call.
@@ -150,6 +274,8 @@ struct stratix10_svc_controller {
struct completion complete_status;
spinlock_t svc_fifo_lock;
svc_invoke_fn *invoke_fn;
+ struct stratix10_svc *svc;
+ struct stratix10_async_ctrl actrl;
};
/**
@@ -158,20 +284,28 @@ struct stratix10_svc_controller {
* @scl: pointer to service client which owns the channel
* @name: service client name associated with the channel
* @lock: protect access to the channel
+ * @async_chan: reference to asynchronous channel object for this channel
*
- * This struct is used by service client to communicate with service layer, each
- * service client has its own channel created by service controller.
+ * This struct is used by service client to communicate with service layer.
+ * Each service client has its own channel created by service controller.
*/
struct stratix10_svc_chan {
struct stratix10_svc_controller *ctrl;
struct stratix10_svc_client *scl;
char *name;
spinlock_t lock;
+ struct stratix10_async_chan *async_chan;
};
static LIST_HEAD(svc_ctrl);
static LIST_HEAD(svc_data_mem);
+/*
+ * svc_mem_lock protects access to the svc_data_mem list for
+ * concurrent multi-client operations
+ */
+static DEFINE_MUTEX(svc_mem_lock);
+
/**
* svc_pa_to_va() - translate physical address to virtual address
* @addr: to be translated physical address
@@ -184,6 +318,7 @@ static void *svc_pa_to_va(unsigned long addr)
struct stratix10_svc_data_mem *pmem;
pr_debug("claim back P-addr=0x%016x\n", (unsigned int)addr);
+ guard(mutex)(&svc_mem_lock);
list_for_each_entry(pmem, &svc_data_mem, node)
if (pmem->paddr == addr)
return pmem->vaddr;
@@ -341,6 +476,8 @@ static void svc_thread_recv_status_ok(struct stratix10_svc_data *p_data,
case COMMAND_RSU_MAX_RETRY:
case COMMAND_RSU_DCMF_STATUS:
case COMMAND_FIRMWARE_VERSION:
+ case COMMAND_HWMON_READTEMP:
+ case COMMAND_HWMON_READVOLT:
cb_data->status = BIT(SVC_STATUS_OK);
cb_data->kaddr1 = &res.a1;
break;
@@ -525,7 +662,17 @@ static int svc_normal_to_secure_thread(void *data)
a1 = (unsigned long)pdata->paddr;
a2 = 0;
break;
-
+ /* for HWMON */
+ case COMMAND_HWMON_READTEMP:
+ a0 = INTEL_SIP_SMC_HWMON_READTEMP;
+ a1 = pdata->arg[0];
+ a2 = 0;
+ break;
+ case COMMAND_HWMON_READVOLT:
+ a0 = INTEL_SIP_SMC_HWMON_READVOLT;
+ a1 = pdata->arg[0];
+ a2 = 0;
+ break;
/* for polling */
case COMMAND_POLL_SERVICE_STATUS:
a0 = INTEL_SIP_SMC_SERVICE_COMPLETED;
@@ -923,6 +1070,591 @@ struct stratix10_svc_chan *stratix10_svc_request_channel_byname(
EXPORT_SYMBOL_GPL(stratix10_svc_request_channel_byname);
/**
+ * stratix10_svc_add_async_client - Add an asynchronous client to the
+ * Stratix10 service channel.
+ * @chan: Pointer to the Stratix10 service channel structure.
+ * @use_unique_clientid: Boolean flag indicating whether to use a
+ * unique client ID.
+ *
+ * This function adds an asynchronous client to the specified
+ * Stratix10 service channel. If the `use_unique_clientid` flag is
+ * set to true, a unique client ID is allocated for the asynchronous
+ * channel. Otherwise, a common asynchronous channel is used.
+ *
+ * Return: 0 on success, or a negative error code on failure:
+ * -EINVAL if the channel is NULL or the async controller is
+ * not initialized.
+ * -EALREADY if the async channel is already allocated.
+ * -ENOMEM if memory allocation fails.
+ * Other negative values if ID allocation fails.
+ */
+int stratix10_svc_add_async_client(struct stratix10_svc_chan *chan,
+ bool use_unique_clientid)
+{
+ struct stratix10_svc_controller *ctrl;
+ struct stratix10_async_ctrl *actrl;
+ struct stratix10_async_chan *achan;
+ int ret = 0;
+
+ if (!chan)
+ return -EINVAL;
+
+ ctrl = chan->ctrl;
+ actrl = &ctrl->actrl;
+
+ if (!actrl->initialized) {
+ dev_err(ctrl->dev, "Async controller not initialized\n");
+ return -EINVAL;
+ }
+
+ if (chan->async_chan) {
+ dev_err(ctrl->dev, "async channel already allocated\n");
+ return -EALREADY;
+ }
+
+ if (use_unique_clientid &&
+ atomic_read(&actrl->common_achan_refcount) > 0) {
+ chan->async_chan = actrl->common_async_chan;
+ atomic_inc(&actrl->common_achan_refcount);
+ return 0;
+ }
+
+ achan = kzalloc(sizeof(*achan), GFP_KERNEL);
+ if (!achan)
+ return -ENOMEM;
+
+ ida_init(&achan->job_id_pool);
+
+ ret = ida_alloc_max(&actrl->async_id_pool, MAX_SDM_CLIENT_IDS,
+ GFP_KERNEL);
+ if (ret < 0) {
+ dev_err(ctrl->dev,
+ "Failed to allocate async client id\n");
+ ida_destroy(&achan->job_id_pool);
+ kfree(achan);
+ return ret;
+ }
+
+ achan->async_client_id = ret;
+ chan->async_chan = achan;
+
+ if (use_unique_clientid &&
+ atomic_read(&actrl->common_achan_refcount) == 0) {
+ actrl->common_async_chan = achan;
+ atomic_inc(&actrl->common_achan_refcount);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(stratix10_svc_add_async_client);
+
+/**
+ * stratix10_svc_remove_async_client - Remove an asynchronous client
+ * from the Stratix10 service
+ * channel.
+ * @chan: Pointer to the Stratix10 service channel structure.
+ *
+ * This function removes an asynchronous client associated with the
+ * given service channel. It checks if the channel and the
+ * asynchronous channel are valid, and then proceeds to decrement
+ * the reference count for the common asynchronous channel if
+ * applicable. If the reference count reaches zero, it destroys the
+ * job ID pool and deallocates the asynchronous client ID. For
+ * non-common asynchronous channels, it directly destroys the job ID
+ * pool, deallocates the asynchronous client ID, and frees the
+ * memory allocated for the asynchronous channel.
+ *
+ * Return: 0 on success, -EINVAL if the channel or asynchronous
+ * channel is invalid.
+ */
+int stratix10_svc_remove_async_client(struct stratix10_svc_chan *chan)
+{
+ struct stratix10_svc_controller *ctrl;
+ struct stratix10_async_ctrl *actrl;
+ struct stratix10_async_chan *achan;
+
+ if (!chan)
+ return -EINVAL;
+
+ ctrl = chan->ctrl;
+ actrl = &ctrl->actrl;
+ achan = chan->async_chan;
+
+ if (!achan) {
+ dev_err(ctrl->dev, "async channel not allocated\n");
+ return -EINVAL;
+ }
+
+ if (achan == actrl->common_async_chan) {
+ atomic_dec(&actrl->common_achan_refcount);
+ if (atomic_read(&actrl->common_achan_refcount) == 0) {
+ ida_destroy(&achan->job_id_pool);
+ ida_free(&actrl->async_id_pool,
+ achan->async_client_id);
+ kfree(achan);
+ actrl->common_async_chan = NULL;
+ }
+ } else {
+ ida_destroy(&achan->job_id_pool);
+ ida_free(&actrl->async_id_pool, achan->async_client_id);
+ kfree(achan);
+ }
+ chan->async_chan = NULL;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(stratix10_svc_remove_async_client);
+
+/**
+ * stratix10_svc_async_send - Send an asynchronous message to the
+ * Stratix10 service
+ * @chan: Pointer to the service channel structure
+ * @msg: Pointer to the message to be sent
+ * @handler: Pointer to the handler for the asynchronous message
+ * used by caller for later reference.
+ * @cb: Callback function to be called upon completion
+ * @cb_arg: Argument to be passed to the callback function
+ *
+ * This function sends an asynchronous message to the SDM mailbox in
+ * EL3 secure firmware. It performs various checks and setups,
+ * including allocating a job ID, setting up the transaction ID and
+ * packaging it to El3 firmware. The function handles different
+ * commands by setting up the appropriate arguments for the SMC call.
+ * If the SMC call is successful, the handler is set up and the
+ * function returns 0. If the SMC call fails, appropriate error
+ * handling is performed along with cleanup of resources.
+ *
+ * Return: 0 on success, -EINVAL for invalid argument, -ENOMEM if
+ * memory is not available, -EAGAIN if EL3 firmware is busy, -EBADF
+ * if the message is rejected by EL3 firmware and -EIO on other
+ * errors from EL3 firmware.
+ */
+int stratix10_svc_async_send(struct stratix10_svc_chan *chan, void *msg,
+ void **handler, async_callback_t cb, void *cb_arg)
+{
+ struct arm_smccc_1_2_regs args = { 0 }, res = { 0 };
+ struct stratix10_svc_async_handler *handle = NULL;
+ struct stratix10_svc_client_msg *p_msg =
+ (struct stratix10_svc_client_msg *)msg;
+ struct stratix10_svc_controller *ctrl;
+ struct stratix10_async_ctrl *actrl;
+ struct stratix10_async_chan *achan;
+ int ret = 0;
+
+ if (!chan || !msg || !handler)
+ return -EINVAL;
+
+ achan = chan->async_chan;
+ ctrl = chan->ctrl;
+ actrl = &ctrl->actrl;
+
+ if (!actrl->initialized) {
+ dev_err(ctrl->dev, "Async controller not initialized\n");
+ return -EINVAL;
+ }
+
+ if (!achan) {
+ dev_err(ctrl->dev, "Async channel not allocated\n");
+ return -EINVAL;
+ }
+
+ handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+ if (!handle)
+ return -ENOMEM;
+
+ ret = ida_alloc_max(&achan->job_id_pool, MAX_SDM_JOB_IDS,
+ GFP_KERNEL);
+ if (ret < 0) {
+ dev_err(ctrl->dev, "Failed to allocate job id\n");
+ kfree(handle);
+ return -ENOMEM;
+ }
+
+ handle->transaction_id =
+ STRATIX10_SET_TRANSACTIONID(achan->async_client_id, ret);
+ handle->cb = cb;
+ handle->msg = p_msg;
+ handle->cb_arg = cb_arg;
+ handle->achan = achan;
+
+ /*set the transaction jobid in args.a1*/
+ args.a1 =
+ STRATIX10_SIP_SMC_SET_TRANSACTIONID_X1(handle->transaction_id);
+
+ switch (p_msg->command) {
+ case COMMAND_RSU_GET_SPT_TABLE:
+ args.a0 = INTEL_SIP_SMC_ASYNC_RSU_GET_SPT;
+ break;
+ case COMMAND_RSU_STATUS:
+ args.a0 = INTEL_SIP_SMC_ASYNC_RSU_GET_ERROR_STATUS;
+ break;
+ case COMMAND_RSU_NOTIFY:
+ args.a0 = INTEL_SIP_SMC_ASYNC_RSU_NOTIFY;
+ args.a2 = p_msg->arg[0];
+ break;
+ default:
+ dev_err(ctrl->dev, "Invalid command ,%d\n", p_msg->command);
+ ret = -EINVAL;
+ goto deallocate_id;
+ }
+
+ /**
+ * There is a chance that during the execution of async_send()
+ * in one core, an interrupt might be received in another core;
+ * to mitigate this we are adding the handle to the DB and then
+ * send the smc call. If the smc call is rejected or busy then
+ * we will deallocate the handle for the client to retry again.
+ */
+ scoped_guard(spinlock_bh, &actrl->trx_list_lock) {
+ hash_add(actrl->trx_list, &handle->next,
+ handle->transaction_id);
+ }
+
+ actrl->invoke_fn(actrl, &args, &res);
+
+ switch (res.a0) {
+ case INTEL_SIP_SMC_STATUS_OK:
+ dev_dbg(ctrl->dev,
+ "Async message sent with transaction_id 0x%02x\n",
+ handle->transaction_id);
+ *handler = handle;
+ return 0;
+ case INTEL_SIP_SMC_STATUS_BUSY:
+ dev_warn(ctrl->dev, "Mailbox is busy, try after some time\n");
+ ret = -EAGAIN;
+ break;
+ case INTEL_SIP_SMC_STATUS_REJECTED:
+ dev_err(ctrl->dev, "Async message rejected\n");
+ ret = -EBADF;
+ break;
+ default:
+ dev_err(ctrl->dev,
+ "Failed to send async message ,got status as %ld\n",
+ res.a0);
+ ret = -EIO;
+ }
+
+ scoped_guard(spinlock_bh, &actrl->trx_list_lock) {
+ hash_del(&handle->next);
+ }
+
+deallocate_id:
+ ida_free(&achan->job_id_pool,
+ STRATIX10_GET_JOBID(handle->transaction_id));
+ kfree(handle);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(stratix10_svc_async_send);
+
+/**
+ * stratix10_svc_async_prepare_response - Prepare the response data for
+ * an asynchronous transaction.
+ * @chan: Pointer to the service channel structure.
+ * @handle: Pointer to the asynchronous handler structure.
+ * @data: Pointer to the callback data structure.
+ *
+ * This function prepares the response data for an asynchronous transaction. It
+ * extracts the response data from the SMC response structure and stores it in
+ * the callback data structure. The function also logs the completion of the
+ * asynchronous transaction.
+ *
+ * Return: 0 on success, -ENOENT if the command is invalid
+ */
+static int stratix10_svc_async_prepare_response(struct stratix10_svc_chan *chan,
+ struct stratix10_svc_async_handler *handle,
+ struct stratix10_svc_cb_data *data)
+{
+ struct stratix10_svc_client_msg *p_msg =
+ (struct stratix10_svc_client_msg *)handle->msg;
+ struct stratix10_svc_controller *ctrl = chan->ctrl;
+
+ data->status = STRATIX10_GET_SDM_STATUS_CODE(handle->res.a1);
+
+ switch (p_msg->command) {
+ case COMMAND_RSU_NOTIFY:
+ break;
+ case COMMAND_RSU_GET_SPT_TABLE:
+ data->kaddr1 = (void *)&handle->res.a2;
+ data->kaddr2 = (void *)&handle->res.a3;
+ break;
+ case COMMAND_RSU_STATUS:
+ /* COMMAND_RSU_STATUS has more elements than the cb_data
+ * can acomodate, so passing the response structure to the
+ * response function to be handled before done command is
+ * executed by the client.
+ */
+ data->kaddr1 = (void *)&handle->res;
+ break;
+
+ default:
+ dev_alert(ctrl->dev, "Invalid command\n ,%d", p_msg->command);
+ return -ENOENT;
+ }
+ dev_dbg(ctrl->dev, "Async message completed transaction_id 0x%02x\n",
+ handle->transaction_id);
+ return 0;
+}
+
+/**
+ * stratix10_svc_async_poll - Polls the status of an asynchronous
+ * transaction.
+ * @chan: Pointer to the service channel structure.
+ * @tx_handle: Handle to the transaction being polled.
+ * @data: Pointer to the callback data structure.
+ *
+ * This function polls the status of an asynchronous transaction
+ * identified by the given transaction handle. It ensures that the
+ * necessary structures are initialized and valid before proceeding
+ * with the poll operation. The function sets up the necessary
+ * arguments for the SMC call, invokes the call, and prepares the
+ * response data if the call is successful. If the call fails, the
+ * function returns the error mapped to the SVC status error.
+ *
+ * Return: 0 on success, -EINVAL if any input parameter is invalid,
+ * -EAGAIN if the transaction is still in progress,
+ * -EPERM if the command is invalid, or other negative
+ * error codes on failure.
+ */
+int stratix10_svc_async_poll(struct stratix10_svc_chan *chan,
+ void *tx_handle,
+ struct stratix10_svc_cb_data *data)
+{
+ struct stratix10_svc_async_handler *handle;
+ struct arm_smccc_1_2_regs args = { 0 };
+ struct stratix10_svc_controller *ctrl;
+ struct stratix10_async_ctrl *actrl;
+ struct stratix10_async_chan *achan;
+ int ret;
+
+ if (!chan || !tx_handle || !data)
+ return -EINVAL;
+
+ ctrl = chan->ctrl;
+ actrl = &ctrl->actrl;
+ achan = chan->async_chan;
+
+ if (!achan) {
+ dev_err(ctrl->dev, "Async channel not allocated\n");
+ return -EINVAL;
+ }
+
+ handle = (struct stratix10_svc_async_handler *)tx_handle;
+ scoped_guard(spinlock_bh, &actrl->trx_list_lock) {
+ if (!hash_hashed(&handle->next)) {
+ dev_err(ctrl->dev, "Invalid transaction handler");
+ return -EINVAL;
+ }
+ }
+
+ args.a0 = INTEL_SIP_SMC_ASYNC_POLL;
+ args.a1 =
+ STRATIX10_SIP_SMC_SET_TRANSACTIONID_X1(handle->transaction_id);
+
+ actrl->invoke_fn(actrl, &args, &handle->res);
+
+ /*clear data for response*/
+ memset(data, 0, sizeof(*data));
+
+ if (handle->res.a0 == INTEL_SIP_SMC_STATUS_OK) {
+ ret = stratix10_svc_async_prepare_response(chan, handle, data);
+ if (ret) {
+ dev_err(ctrl->dev, "Error in preparation of response,%d\n", ret);
+ WARN_ON_ONCE(1);
+ }
+ return 0;
+ } else if (handle->res.a0 == INTEL_SIP_SMC_STATUS_BUSY) {
+ dev_dbg(ctrl->dev, "async message is still in progress\n");
+ return -EAGAIN;
+ }
+
+ dev_err(ctrl->dev,
+ "Failed to poll async message ,got status as %ld\n",
+ handle->res.a0);
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(stratix10_svc_async_poll);
+
+/**
+ * stratix10_svc_async_done - Completes an asynchronous transaction.
+ * @chan: Pointer to the service channel structure.
+ * @tx_handle: Handle to the transaction being completed.
+ *
+ * This function completes an asynchronous transaction identified by
+ * the given transaction handle. It ensures that the necessary
+ * structures are initialized and valid before proceeding with the
+ * completion operation. The function deallocates the transaction ID,
+ * frees the memory allocated for the handler, and removes the handler
+ * from the transaction list.
+ *
+ * Return: 0 on success, -EINVAL if any input parameter is invalid,
+ * or other negative error codes on failure.
+ */
+int stratix10_svc_async_done(struct stratix10_svc_chan *chan, void *tx_handle)
+{
+ struct stratix10_svc_async_handler *handle;
+ struct stratix10_svc_controller *ctrl;
+ struct stratix10_async_chan *achan;
+ struct stratix10_async_ctrl *actrl;
+
+ if (!chan || !tx_handle)
+ return -EINVAL;
+
+ ctrl = chan->ctrl;
+ achan = chan->async_chan;
+ actrl = &ctrl->actrl;
+
+ if (!achan) {
+ dev_err(ctrl->dev, "async channel not allocated\n");
+ return -EINVAL;
+ }
+
+ handle = (struct stratix10_svc_async_handler *)tx_handle;
+ scoped_guard(spinlock_bh, &actrl->trx_list_lock) {
+ if (!hash_hashed(&handle->next)) {
+ dev_err(ctrl->dev, "Invalid transaction handle");
+ return -EINVAL;
+ }
+ hash_del(&handle->next);
+ }
+ ida_free(&achan->job_id_pool,
+ STRATIX10_GET_JOBID(handle->transaction_id));
+ kfree(handle);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(stratix10_svc_async_done);
+
+static inline void stratix10_smc_1_2(struct stratix10_async_ctrl *actrl,
+ const struct arm_smccc_1_2_regs *args,
+ struct arm_smccc_1_2_regs *res)
+{
+ arm_smccc_1_2_smc(args, res);
+}
+
+/**
+ * stratix10_svc_async_init - Initialize the Stratix10 service
+ * controller for asynchronous operations.
+ * @controller: Pointer to the Stratix10 service controller structure.
+ *
+ * This function initializes the asynchronous service controller by
+ * setting up the necessary data structures and initializing the
+ * transaction list.
+ *
+ * Return: 0 on success, -EINVAL if the controller is NULL or already
+ * initialized, -ENOMEM if memory allocation fails,
+ * -EADDRINUSE if the client ID is already reserved, or other
+ * negative error codes on failure.
+ */
+static int stratix10_svc_async_init(struct stratix10_svc_controller *controller)
+{
+ struct stratix10_async_ctrl *actrl;
+ struct arm_smccc_res res;
+ struct device *dev;
+ int ret;
+
+ if (!controller)
+ return -EINVAL;
+
+ actrl = &controller->actrl;
+
+ if (actrl->initialized)
+ return -EINVAL;
+
+ dev = controller->dev;
+
+ controller->invoke_fn(INTEL_SIP_SMC_SVC_VERSION, 0, 0, 0, 0, 0, 0, 0, &res);
+ if (res.a0 != INTEL_SIP_SMC_STATUS_OK ||
+ !(res.a1 > ASYNC_ATF_MINIMUM_MAJOR_VERSION ||
+ (res.a1 == ASYNC_ATF_MINIMUM_MAJOR_VERSION &&
+ res.a2 >= ASYNC_ATF_MINIMUM_MINOR_VERSION))) {
+ dev_err(dev,
+ "Intel Service Layer Driver: ATF version is not compatible for async operation\n");
+ return -EINVAL;
+ }
+
+ actrl->invoke_fn = stratix10_smc_1_2;
+
+ ida_init(&actrl->async_id_pool);
+
+ /**
+ * SIP_SVC_V1_CLIENT_ID is used by V1/stratix10_svc_send() clients
+ * for communicating with SDM synchronously. We need to restrict
+ * this in V3/stratix10_svc_async_send() usage to distinguish
+ * between V1 and V3 messages in El3 firmware.
+ */
+ ret = ida_alloc_range(&actrl->async_id_pool, SIP_SVC_V1_CLIENT_ID,
+ SIP_SVC_V1_CLIENT_ID, GFP_KERNEL);
+ if (ret < 0) {
+ dev_err(dev,
+ "Intel Service Layer Driver: Error on reserving SIP_SVC_V1_CLIENT_ID\n");
+ ida_destroy(&actrl->async_id_pool);
+ actrl->invoke_fn = NULL;
+ return -EADDRINUSE;
+ }
+
+ spin_lock_init(&actrl->trx_list_lock);
+ hash_init(actrl->trx_list);
+ atomic_set(&actrl->common_achan_refcount, 0);
+
+ actrl->initialized = true;
+ return 0;
+}
+
+/**
+ * stratix10_svc_async_exit - Clean up and exit the asynchronous
+ * service controller
+ * @ctrl: Pointer to the stratix10_svc_controller structure
+ *
+ * This function performs the necessary cleanup for the asynchronous
+ * service controller. It checks if the controller is valid and if it
+ * has been initialized. It then locks the transaction list and safely
+ * removes and deallocates each handler in the list. The function also
+ * removes any asynchronous clients associated with the controller's
+ * channels and destroys the asynchronous ID pool. Finally, it resets
+ * the asynchronous ID pool and invoke function pointers to NULL.
+ *
+ * Return: 0 on success, -EINVAL if the controller is invalid or not
+ * initialized.
+ */
+static int stratix10_svc_async_exit(struct stratix10_svc_controller *ctrl)
+{
+ struct stratix10_svc_async_handler *handler;
+ struct stratix10_async_ctrl *actrl;
+ struct hlist_node *tmp;
+ int i;
+
+ if (!ctrl)
+ return -EINVAL;
+
+ actrl = &ctrl->actrl;
+
+ if (!actrl->initialized)
+ return -EINVAL;
+
+ actrl->initialized = false;
+
+ scoped_guard(spinlock_bh, &actrl->trx_list_lock) {
+ hash_for_each_safe(actrl->trx_list, i, tmp, handler, next) {
+ ida_free(&handler->achan->job_id_pool,
+ STRATIX10_GET_JOBID(handler->transaction_id));
+ hash_del(&handler->next);
+ kfree(handler);
+ }
+ }
+
+ for (i = 0; i < SVC_NUM_CHANNEL; i++) {
+ if (ctrl->chans[i].async_chan) {
+ stratix10_svc_remove_async_client(&ctrl->chans[i]);
+ ctrl->chans[i].async_chan = NULL;
+ }
+ }
+
+ ida_destroy(&actrl->async_id_pool);
+ actrl->invoke_fn = NULL;
+
+ return 0;
+}
+
+/**
* stratix10_svc_free_channel() - free service channel
* @chan: service channel to be freed
*
@@ -990,6 +1722,7 @@ int stratix10_svc_send(struct stratix10_svc_chan *chan, void *msg)
p_data->flag = ct->flags;
}
} else {
+ guard(mutex)(&svc_mem_lock);
list_for_each_entry(p_mem, &svc_data_mem, node)
if (p_mem->vaddr == p_msg->payload) {
p_data->paddr = p_mem->paddr;
@@ -1072,6 +1805,7 @@ void *stratix10_svc_allocate_memory(struct stratix10_svc_chan *chan,
if (!pmem)
return ERR_PTR(-ENOMEM);
+ guard(mutex)(&svc_mem_lock);
va = gen_pool_alloc(genpool, s);
if (!va)
return ERR_PTR(-ENOMEM);
@@ -1100,6 +1834,7 @@ EXPORT_SYMBOL_GPL(stratix10_svc_allocate_memory);
void stratix10_svc_free_memory(struct stratix10_svc_chan *chan, void *kaddr)
{
struct stratix10_svc_data_mem *pmem;
+ guard(mutex)(&svc_mem_lock);
list_for_each_entry(pmem, &svc_data_mem, node)
if (pmem->vaddr == kaddr) {
@@ -1174,11 +1909,18 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev)
controller->invoke_fn = invoke_fn;
init_completion(&controller->complete_status);
+ ret = stratix10_svc_async_init(controller);
+ if (ret) {
+ dev_dbg(dev, "Intel Service Layer Driver: Error on stratix10_svc_async_init %d\n",
+ ret);
+ goto err_destroy_pool;
+ }
+
fifo_size = sizeof(struct stratix10_svc_data) * SVC_NUM_DATA_IN_FIFO;
ret = kfifo_alloc(&controller->svc_fifo, fifo_size, GFP_KERNEL);
if (ret) {
dev_err(dev, "failed to allocate FIFO\n");
- goto err_destroy_pool;
+ goto err_async_exit;
}
spin_lock_init(&controller->svc_fifo_lock);
@@ -1197,6 +1939,11 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev)
chans[2].name = SVC_CLIENT_FCS;
spin_lock_init(&chans[2].lock);
+ chans[3].scl = NULL;
+ chans[3].ctrl = controller;
+ chans[3].name = SVC_CLIENT_HWMON;
+ spin_lock_init(&chans[3].lock);
+
list_add_tail(&controller->node, &svc_ctrl);
platform_set_drvdata(pdev, controller);
@@ -1206,6 +1953,7 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev)
ret = -ENOMEM;
goto err_free_kfifo;
}
+ controller->svc = svc;
svc->stratix10_svc_rsu = platform_device_alloc(STRATIX10_RSU, 0);
if (!svc->stratix10_svc_rsu) {
@@ -1237,8 +1985,6 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev)
if (ret)
goto err_unregister_fcs_dev;
- dev_set_drvdata(dev, svc);
-
pr_info("Intel Service Layer Driver Initialized\n");
return 0;
@@ -1249,6 +1995,8 @@ err_unregister_rsu_dev:
platform_device_unregister(svc->stratix10_svc_rsu);
err_free_kfifo:
kfifo_free(&controller->svc_fifo);
+err_async_exit:
+ stratix10_svc_async_exit(controller);
err_destroy_pool:
gen_pool_destroy(genpool);
return ret;
@@ -1256,8 +2004,10 @@ err_destroy_pool:
static void stratix10_svc_drv_remove(struct platform_device *pdev)
{
- struct stratix10_svc *svc = dev_get_drvdata(&pdev->dev);
struct stratix10_svc_controller *ctrl = platform_get_drvdata(pdev);
+ struct stratix10_svc *svc = ctrl->svc;
+
+ stratix10_svc_async_exit(ctrl);
of_platform_depopulate(ctrl->dev);
diff --git a/drivers/firmware/tegra/Kconfig b/drivers/firmware/tegra/Kconfig
index cde1ab8bd9d1..91f2320c0d0f 100644
--- a/drivers/firmware/tegra/Kconfig
+++ b/drivers/firmware/tegra/Kconfig
@@ -2,7 +2,7 @@
menu "Tegra firmware driver"
config TEGRA_IVC
- bool "Tegra IVC protocol"
+ bool "Tegra IVC protocol" if COMPILE_TEST
depends on ARCH_TEGRA
help
IVC (Inter-VM Communication) protocol is part of the IPC
@@ -13,8 +13,9 @@ config TEGRA_IVC
config TEGRA_BPMP
bool "Tegra BPMP driver"
- depends on ARCH_TEGRA && TEGRA_HSP_MBOX && TEGRA_IVC
+ depends on ARCH_TEGRA && TEGRA_HSP_MBOX
depends on !CPU_BIG_ENDIAN
+ select TEGRA_IVC
help
BPMP (Boot and Power Management Processor) is designed to off-loading
the PM functions which include clock/DVFS/thermal/power from the CPU.
diff --git a/drivers/firmware/tegra/Makefile b/drivers/firmware/tegra/Makefile
index 620cf3fdd607..41e2e4dc31d6 100644
--- a/drivers/firmware/tegra/Makefile
+++ b/drivers/firmware/tegra/Makefile
@@ -4,6 +4,7 @@ tegra-bpmp-$(CONFIG_ARCH_TEGRA_210_SOC) += bpmp-tegra210.o
tegra-bpmp-$(CONFIG_ARCH_TEGRA_186_SOC) += bpmp-tegra186.o
tegra-bpmp-$(CONFIG_ARCH_TEGRA_194_SOC) += bpmp-tegra186.o
tegra-bpmp-$(CONFIG_ARCH_TEGRA_234_SOC) += bpmp-tegra186.o
+tegra-bpmp-$(CONFIG_ARCH_TEGRA_264_SOC) += bpmp-tegra186.o
tegra-bpmp-$(CONFIG_DEBUG_FS) += bpmp-debugfs.o
obj-$(CONFIG_TEGRA_BPMP) += tegra-bpmp.o
obj-$(CONFIG_TEGRA_IVC) += ivc.o
diff --git a/drivers/firmware/tegra/bpmp-private.h b/drivers/firmware/tegra/bpmp-private.h
index 182bfe396516..07c3d46abb87 100644
--- a/drivers/firmware/tegra/bpmp-private.h
+++ b/drivers/firmware/tegra/bpmp-private.h
@@ -23,13 +23,7 @@ struct tegra_bpmp_ops {
int (*resume)(struct tegra_bpmp *bpmp);
};
-#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) || \
- IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC) || \
- IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC)
extern const struct tegra_bpmp_ops tegra186_bpmp_ops;
-#endif
-#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
extern const struct tegra_bpmp_ops tegra210_bpmp_ops;
-#endif
#endif
diff --git a/drivers/firmware/tegra/bpmp-tegra186.c b/drivers/firmware/tegra/bpmp-tegra186.c
index 6f0d0511b486..64863db7a715 100644
--- a/drivers/firmware/tegra/bpmp-tegra186.c
+++ b/drivers/firmware/tegra/bpmp-tegra186.c
@@ -6,7 +6,7 @@
#include <linux/genalloc.h>
#include <linux/io.h>
#include <linux/mailbox_client.h>
-#include <linux/of_address.h>
+#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <soc/tegra/bpmp.h>
@@ -192,18 +192,16 @@ static void tegra186_bpmp_teardown_channels(struct tegra_bpmp *bpmp)
static int tegra186_bpmp_dram_init(struct tegra_bpmp *bpmp)
{
struct tegra186_bpmp *priv = bpmp->priv;
- struct device_node *np;
struct resource res;
size_t size;
int err;
- np = of_parse_phandle(bpmp->dev->of_node, "memory-region", 0);
- if (!np)
- return -ENODEV;
-
- err = of_address_to_resource(np, 0, &res);
+ err = of_reserved_mem_region_to_resource(bpmp->dev->of_node, 0, &res);
if (err < 0) {
- dev_warn(bpmp->dev, "failed to parse memory region: %d\n", err);
+ if (err != -ENODEV)
+ dev_warn(bpmp->dev,
+ "failed to parse memory region: %d\n", err);
+
return err;
}
diff --git a/drivers/firmware/tegra/bpmp.c b/drivers/firmware/tegra/bpmp.c
index c3a1dc344961..e74bba7ccc44 100644
--- a/drivers/firmware/tegra/bpmp.c
+++ b/drivers/firmware/tegra/bpmp.c
@@ -836,7 +836,8 @@ static const struct dev_pm_ops tegra_bpmp_pm_ops = {
#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) || \
IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC) || \
- IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC)
+ IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC) || \
+ IS_ENABLED(CONFIG_ARCH_TEGRA_264_SOC)
static const struct tegra_bpmp_soc tegra186_soc = {
.channels = {
.cpu_tx = {
@@ -884,7 +885,8 @@ static const struct tegra_bpmp_soc tegra210_soc = {
static const struct of_device_id tegra_bpmp_match[] = {
#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) || \
IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC) || \
- IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC)
+ IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC) || \
+ IS_ENABLED(CONFIG_ARCH_TEGRA_264_SOC)
{ .compatible = "nvidia,tegra186-bpmp", .data = &tegra186_soc },
#endif
#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c
index ae5fd1936ad3..e027a2bd8f26 100644
--- a/drivers/firmware/ti_sci.c
+++ b/drivers/firmware/ti_sci.c
@@ -398,6 +398,9 @@ static void ti_sci_put_one_xfer(struct ti_sci_xfers_info *minfo,
static inline int ti_sci_do_xfer(struct ti_sci_info *info,
struct ti_sci_xfer *xfer)
{
+ struct ti_sci_msg_hdr *hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
+ bool response_expected = !!(hdr->flags & (TI_SCI_FLAG_REQ_ACK_ON_PROCESSED |
+ TI_SCI_FLAG_REQ_ACK_ON_RECEIVED));
int ret;
int timeout;
struct device *dev = info->dev;
@@ -409,12 +412,12 @@ static inline int ti_sci_do_xfer(struct ti_sci_info *info,
ret = 0;
- if (system_state <= SYSTEM_RUNNING) {
+ if (response_expected && system_state <= SYSTEM_RUNNING) {
/* And we wait for the response. */
timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
if (!wait_for_completion_timeout(&xfer->done, timeout))
ret = -ETIMEDOUT;
- } else {
+ } else if (response_expected) {
/*
* If we are !running, we cannot use wait_for_completion_timeout
* during noirq phase, so we must manually poll the completion.
@@ -1670,6 +1673,9 @@ fail:
static int ti_sci_cmd_prepare_sleep(const struct ti_sci_handle *handle, u8 mode,
u32 ctx_lo, u32 ctx_hi, u32 debug_flags)
{
+ u32 msg_flags = mode == TISCI_MSG_VALUE_SLEEP_MODE_PARTIAL_IO ?
+ TI_SCI_FLAG_REQ_GENERIC_NORESPONSE :
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED;
struct ti_sci_info *info;
struct ti_sci_msg_req_prepare_sleep *req;
struct ti_sci_msg_hdr *resp;
@@ -1686,7 +1692,7 @@ static int ti_sci_cmd_prepare_sleep(const struct ti_sci_handle *handle, u8 mode,
dev = info->dev;
xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PREPARE_SLEEP,
- TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ msg_flags,
sizeof(*req), sizeof(*resp));
if (IS_ERR(xfer)) {
ret = PTR_ERR(xfer);
@@ -1706,11 +1712,12 @@ static int ti_sci_cmd_prepare_sleep(const struct ti_sci_handle *handle, u8 mode,
goto fail;
}
- resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
-
- if (!ti_sci_is_response_ack(resp)) {
- dev_err(dev, "Failed to prepare sleep\n");
- ret = -ENODEV;
+ if (msg_flags == TI_SCI_FLAG_REQ_ACK_ON_PROCESSED) {
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+ if (!ti_sci_is_response_ack(resp)) {
+ dev_err(dev, "Failed to prepare sleep\n");
+ ret = -ENODEV;
+ }
}
fail:
@@ -2015,6 +2022,47 @@ fail:
return ret;
}
+/**
+ * ti_sci_cmd_lpm_abort() - Abort entry to LPM by clearing selection of LPM to enter
+ * @dev: Device pointer corresponding to the SCI entity
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_lpm_abort(struct device *dev)
+{
+ struct ti_sci_info *info = dev_get_drvdata(dev);
+ struct ti_sci_msg_hdr *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_xfer *xfer;
+ int ret = 0;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_LPM_ABORT,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+
+ if (!ti_sci_is_response_ack(resp))
+ ret = -ENODEV;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
static int ti_sci_cmd_core_reboot(const struct ti_sci_handle *handle)
{
struct ti_sci_info *info;
@@ -3623,6 +3671,78 @@ devm_ti_sci_get_resource(const struct ti_sci_handle *handle, struct device *dev,
}
EXPORT_SYMBOL_GPL(devm_ti_sci_get_resource);
+/*
+ * Iterate all device nodes that have a wakeup-source property and check if one
+ * of the possible phandles points to a Partial-IO system state. If it
+ * does resolve the device node to an actual device and check if wakeup is
+ * enabled.
+ */
+static bool ti_sci_partial_io_wakeup_enabled(struct ti_sci_info *info)
+{
+ struct device_node *wakeup_node = NULL;
+
+ for_each_node_with_property(wakeup_node, "wakeup-source") {
+ struct of_phandle_iterator it;
+ int err;
+
+ of_for_each_phandle(&it, err, wakeup_node, "wakeup-source", NULL, 0) {
+ struct platform_device *pdev;
+ bool may_wakeup;
+
+ /*
+ * Continue if idle-state-name is not off-wake. Return
+ * value is the index of the string which should be 0 if
+ * off-wake is present.
+ */
+ if (of_property_match_string(it.node, "idle-state-name", "off-wake"))
+ continue;
+
+ pdev = of_find_device_by_node(wakeup_node);
+ if (!pdev)
+ continue;
+
+ may_wakeup = device_may_wakeup(&pdev->dev);
+ put_device(&pdev->dev);
+
+ if (may_wakeup) {
+ dev_dbg(info->dev, "%pOF identified as wakeup source for Partial-IO\n",
+ wakeup_node);
+ of_node_put(it.node);
+ of_node_put(wakeup_node);
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+static int ti_sci_sys_off_handler(struct sys_off_data *data)
+{
+ struct ti_sci_info *info = data->cb_data;
+ const struct ti_sci_handle *handle = &info->handle;
+ bool enter_partial_io = ti_sci_partial_io_wakeup_enabled(info);
+ int ret;
+
+ if (!enter_partial_io)
+ return NOTIFY_DONE;
+
+ dev_info(info->dev, "Entering Partial-IO because a powered wakeup-enabled device was found.\n");
+
+ ret = ti_sci_cmd_prepare_sleep(handle, TISCI_MSG_VALUE_SLEEP_MODE_PARTIAL_IO, 0, 0, 0);
+ if (ret) {
+ dev_err(info->dev,
+ "Failed to enter Partial-IO %pe, trying to do an emergency restart\n",
+ ERR_PTR(ret));
+ emergency_restart();
+ }
+
+ mdelay(5000);
+ emergency_restart();
+
+ return NOTIFY_DONE;
+}
+
static int tisci_reboot_handler(struct sys_off_data *data)
{
struct ti_sci_info *info = data->cb_data;
@@ -3665,7 +3785,7 @@ static int ti_sci_prepare_system_suspend(struct ti_sci_info *info)
}
}
-static int __maybe_unused ti_sci_suspend(struct device *dev)
+static int ti_sci_suspend(struct device *dev)
{
struct ti_sci_info *info = dev_get_drvdata(dev);
struct device *cpu_dev, *cpu_dev_max = NULL;
@@ -3705,19 +3825,21 @@ static int __maybe_unused ti_sci_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused ti_sci_suspend_noirq(struct device *dev)
+static int ti_sci_suspend_noirq(struct device *dev)
{
struct ti_sci_info *info = dev_get_drvdata(dev);
int ret = 0;
- ret = ti_sci_cmd_set_io_isolation(&info->handle, TISCI_MSG_VALUE_IO_ENABLE);
- if (ret)
- return ret;
+ if (info->fw_caps & MSG_FLAG_CAPS_IO_ISOLATION) {
+ ret = ti_sci_cmd_set_io_isolation(&info->handle, TISCI_MSG_VALUE_IO_ENABLE);
+ if (ret)
+ return ret;
+ }
return 0;
}
-static int __maybe_unused ti_sci_resume_noirq(struct device *dev)
+static int ti_sci_resume_noirq(struct device *dev)
{
struct ti_sci_info *info = dev_get_drvdata(dev);
int ret = 0;
@@ -3726,9 +3848,11 @@ static int __maybe_unused ti_sci_resume_noirq(struct device *dev)
u8 pin;
u8 mode;
- ret = ti_sci_cmd_set_io_isolation(&info->handle, TISCI_MSG_VALUE_IO_DISABLE);
- if (ret)
- return ret;
+ if (info->fw_caps & MSG_FLAG_CAPS_IO_ISOLATION) {
+ ret = ti_sci_cmd_set_io_isolation(&info->handle, TISCI_MSG_VALUE_IO_DISABLE);
+ if (ret)
+ return ret;
+ }
ret = ti_sci_msg_cmd_lpm_wake_reason(&info->handle, &source, &time, &pin, &mode);
/* Do not fail to resume on error as the wake reason is not critical */
@@ -3739,12 +3863,21 @@ static int __maybe_unused ti_sci_resume_noirq(struct device *dev)
return 0;
}
+static void ti_sci_pm_complete(struct device *dev)
+{
+ struct ti_sci_info *info = dev_get_drvdata(dev);
+
+ if (info->fw_caps & MSG_FLAG_CAPS_LPM_ABORT) {
+ if (ti_sci_cmd_lpm_abort(dev))
+ dev_err(dev, "LPM clear selection failed.\n");
+ }
+}
+
static const struct dev_pm_ops ti_sci_pm_ops = {
-#ifdef CONFIG_PM_SLEEP
- .suspend = ti_sci_suspend,
- .suspend_noirq = ti_sci_suspend_noirq,
- .resume_noirq = ti_sci_resume_noirq,
-#endif
+ .suspend = pm_sleep_ptr(ti_sci_suspend),
+ .suspend_noirq = pm_sleep_ptr(ti_sci_suspend_noirq),
+ .resume_noirq = pm_sleep_ptr(ti_sci_resume_noirq),
+ .complete = pm_sleep_ptr(ti_sci_pm_complete),
};
/* Description for K2G */
@@ -3876,10 +4009,12 @@ static int ti_sci_probe(struct platform_device *pdev)
}
ti_sci_msg_cmd_query_fw_caps(&info->handle, &info->fw_caps);
- dev_dbg(dev, "Detected firmware capabilities: %s%s%s\n",
+ dev_dbg(dev, "Detected firmware capabilities: %s%s%s%s%s\n",
info->fw_caps & MSG_FLAG_CAPS_GENERIC ? "Generic" : "",
info->fw_caps & MSG_FLAG_CAPS_LPM_PARTIAL_IO ? " Partial-IO" : "",
- info->fw_caps & MSG_FLAG_CAPS_LPM_DM_MANAGED ? " DM-Managed" : ""
+ info->fw_caps & MSG_FLAG_CAPS_LPM_DM_MANAGED ? " DM-Managed" : "",
+ info->fw_caps & MSG_FLAG_CAPS_LPM_ABORT ? " LPM-Abort" : "",
+ info->fw_caps & MSG_FLAG_CAPS_IO_ISOLATION ? " IO-Isolation" : ""
);
ti_sci_setup_ops(info);
@@ -3890,6 +4025,19 @@ static int ti_sci_probe(struct platform_device *pdev)
goto out;
}
+ if (info->fw_caps & MSG_FLAG_CAPS_LPM_PARTIAL_IO) {
+ ret = devm_register_sys_off_handler(dev,
+ SYS_OFF_MODE_POWER_OFF,
+ SYS_OFF_PRIO_FIRMWARE,
+ ti_sci_sys_off_handler,
+ info);
+ if (ret) {
+ dev_err(dev, "Failed to register sys_off_handler %pe\n",
+ ERR_PTR(ret));
+ goto out;
+ }
+ }
+
dev_info(dev, "ABI: %d.%d (firmware rev 0x%04x '%s')\n",
info->handle.version.abi_major, info->handle.version.abi_minor,
info->handle.version.firmware_revision,
@@ -3899,7 +4047,13 @@ static int ti_sci_probe(struct platform_device *pdev)
list_add_tail(&info->node, &ti_sci_list);
mutex_unlock(&ti_sci_list_mutex);
- return of_platform_populate(dev->of_node, NULL, NULL, dev);
+ ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
+ if (ret) {
+ dev_err(dev, "platform_populate failed %pe\n", ERR_PTR(ret));
+ goto out;
+ }
+ return 0;
+
out:
if (!IS_ERR(info->chan_tx))
mbox_free_channel(info->chan_tx);
diff --git a/drivers/firmware/ti_sci.h b/drivers/firmware/ti_sci.h
index 053387d7baa0..91f234550c43 100644
--- a/drivers/firmware/ti_sci.h
+++ b/drivers/firmware/ti_sci.h
@@ -42,6 +42,7 @@
#define TI_SCI_MSG_SET_IO_ISOLATION 0x0307
#define TI_SCI_MSG_LPM_SET_DEVICE_CONSTRAINT 0x0309
#define TI_SCI_MSG_LPM_SET_LATENCY_CONSTRAINT 0x030A
+#define TI_SCI_MSG_LPM_ABORT 0x0311
/* Resource Management Requests */
#define TI_SCI_MSG_GET_RESOURCE_RANGE 0x1500
@@ -147,6 +148,8 @@ struct ti_sci_msg_req_reboot {
* MSG_FLAG_CAPS_GENERIC: Generic capability (LPM not supported)
* MSG_FLAG_CAPS_LPM_PARTIAL_IO: Partial IO in LPM
* MSG_FLAG_CAPS_LPM_DM_MANAGED: LPM can be managed by DM
+ * MSG_FLAG_CAPS_LPM_ABORT: Abort entry to LPM
+ * MSG_FLAG_CAPS_IO_ISOLATION: IO Isolation support
*
* Response to a generic message with message type TI_SCI_MSG_QUERY_FW_CAPS
* providing currently available SOC/firmware capabilities. SoC that don't
@@ -157,6 +160,8 @@ struct ti_sci_msg_resp_query_fw_caps {
#define MSG_FLAG_CAPS_GENERIC TI_SCI_MSG_FLAG(0)
#define MSG_FLAG_CAPS_LPM_PARTIAL_IO TI_SCI_MSG_FLAG(4)
#define MSG_FLAG_CAPS_LPM_DM_MANAGED TI_SCI_MSG_FLAG(5)
+#define MSG_FLAG_CAPS_LPM_ABORT TI_SCI_MSG_FLAG(9)
+#define MSG_FLAG_CAPS_IO_ISOLATION TI_SCI_MSG_FLAG(7)
#define MSG_MASK_CAPS_LPM GENMASK_ULL(4, 1)
u64 fw_caps;
} __packed;
@@ -592,6 +597,11 @@ struct ti_sci_msg_resp_get_clock_freq {
struct ti_sci_msg_req_prepare_sleep {
struct ti_sci_msg_hdr hdr;
+/*
+ * When sending prepare_sleep with MODE_PARTIAL_IO no response will be sent,
+ * no further steps are required.
+ */
+#define TISCI_MSG_VALUE_SLEEP_MODE_PARTIAL_IO 0x03
#define TISCI_MSG_VALUE_SLEEP_MODE_DM_MANAGED 0xfd
u8 mode;
u32 ctx_lo;
diff --git a/drivers/firmware/xilinx/Makefile b/drivers/firmware/xilinx/Makefile
index 875a53703c82..70f8f02f14a3 100644
--- a/drivers/firmware/xilinx/Makefile
+++ b/drivers/firmware/xilinx/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
# Makefile for Xilinx firmwares
-obj-$(CONFIG_ZYNQMP_FIRMWARE) += zynqmp.o
+obj-$(CONFIG_ZYNQMP_FIRMWARE) += zynqmp.o zynqmp-ufs.o
obj-$(CONFIG_ZYNQMP_FIRMWARE_DEBUG) += zynqmp-debug.o
diff --git a/drivers/firmware/xilinx/zynqmp-debug.c b/drivers/firmware/xilinx/zynqmp-debug.c
index 22853ae0efdf..36efb827f3da 100644
--- a/drivers/firmware/xilinx/zynqmp-debug.c
+++ b/drivers/firmware/xilinx/zynqmp-debug.c
@@ -3,6 +3,7 @@
* Xilinx Zynq MPSoC Firmware layer for debugfs APIs
*
* Copyright (C) 2014-2018 Xilinx, Inc.
+ * Copyright (C) 2022 - 2025 Advanced Micro Devices, Inc.
*
* Michal Simek <michal.simek@amd.com>
* Davorin Mista <davorin.mista@aggios.com>
@@ -38,6 +39,7 @@ static struct pm_api_info pm_api_list[] = {
PM_API(PM_RELEASE_NODE),
PM_API(PM_SET_REQUIREMENT),
PM_API(PM_GET_API_VERSION),
+ PM_API(PM_GET_NODE_STATUS),
PM_API(PM_REGISTER_NOTIFIER),
PM_API(PM_RESET_ASSERT),
PM_API(PM_RESET_GET_STATUS),
@@ -167,6 +169,17 @@ static int process_api_request(u32 pm_id, u64 *pm_api_arg, u32 *pm_api_ret)
pm_api_arg[3] ? pm_api_arg[3] :
ZYNQMP_PM_REQUEST_ACK_BLOCKING);
break;
+ case PM_GET_NODE_STATUS:
+ ret = zynqmp_pm_get_node_status(pm_api_arg[0],
+ &pm_api_ret[0],
+ &pm_api_ret[1],
+ &pm_api_ret[2]);
+ if (!ret)
+ sprintf(debugfs_buf,
+ "GET_NODE_STATUS:\n\tNodeId: %llu\n\tStatus: %u\n\tRequirements: %u\n\tUsage: %u\n",
+ pm_api_arg[0], pm_api_ret[0],
+ pm_api_ret[1], pm_api_ret[2]);
+ break;
case PM_REGISTER_NOTIFIER:
ret = zynqmp_pm_register_notifier(pm_api_arg[0],
pm_api_arg[1] ?
diff --git a/drivers/firmware/xilinx/zynqmp-ufs.c b/drivers/firmware/xilinx/zynqmp-ufs.c
new file mode 100644
index 000000000000..85da8a822f3a
--- /dev/null
+++ b/drivers/firmware/xilinx/zynqmp-ufs.c
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Firmware Layer for UFS APIs
+ *
+ * Copyright (C) 2025 Advanced Micro Devices, Inc.
+ */
+
+#include <linux/firmware/xlnx-zynqmp.h>
+#include <linux/module.h>
+
+/* Register Node IDs */
+#define PM_REGNODE_PMC_IOU_SLCR 0x30000002 /* PMC IOU SLCR */
+#define PM_REGNODE_EFUSE_CACHE 0x30000003 /* EFUSE Cache */
+
+/* Register Offsets for PMC IOU SLCR */
+#define SRAM_CSR_OFFSET 0x104C /* SRAM Control and Status */
+#define TXRX_CFGRDY_OFFSET 0x1054 /* M-PHY TX-RX Config ready */
+
+/* Masks for SRAM Control and Status Register */
+#define SRAM_CSR_INIT_DONE_MASK BIT(0) /* SRAM initialization done */
+#define SRAM_CSR_EXT_LD_DONE_MASK BIT(1) /* SRAM External load done */
+#define SRAM_CSR_BYPASS_MASK BIT(2) /* Bypass SRAM interface */
+
+/* Mask to check M-PHY TX-RX configuration readiness */
+#define TX_RX_CFG_RDY_MASK GENMASK(3, 0)
+
+/* Register Offsets for EFUSE Cache */
+#define UFS_CAL_1_OFFSET 0xBE8 /* UFS Calibration Value */
+
+/**
+ * zynqmp_pm_is_mphy_tx_rx_config_ready - check M-PHY TX-RX config readiness
+ * @is_ready: Store output status (true/false)
+ *
+ * Return: Returns 0 on success or error value on failure.
+ */
+int zynqmp_pm_is_mphy_tx_rx_config_ready(bool *is_ready)
+{
+ u32 regval;
+ int ret;
+
+ if (!is_ready)
+ return -EINVAL;
+
+ ret = zynqmp_pm_sec_read_reg(PM_REGNODE_PMC_IOU_SLCR, TXRX_CFGRDY_OFFSET, &regval);
+ if (ret)
+ return ret;
+
+ regval &= TX_RX_CFG_RDY_MASK;
+ if (regval)
+ *is_ready = true;
+ else
+ *is_ready = false;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_is_mphy_tx_rx_config_ready);
+
+/**
+ * zynqmp_pm_is_sram_init_done - check SRAM initialization
+ * @is_done: Store output status (true/false)
+ *
+ * Return: Returns 0 on success or error value on failure.
+ */
+int zynqmp_pm_is_sram_init_done(bool *is_done)
+{
+ u32 regval;
+ int ret;
+
+ if (!is_done)
+ return -EINVAL;
+
+ ret = zynqmp_pm_sec_read_reg(PM_REGNODE_PMC_IOU_SLCR, SRAM_CSR_OFFSET, &regval);
+ if (ret)
+ return ret;
+
+ regval &= SRAM_CSR_INIT_DONE_MASK;
+ if (regval)
+ *is_done = true;
+ else
+ *is_done = false;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_is_sram_init_done);
+
+/**
+ * zynqmp_pm_set_sram_bypass - Set SRAM bypass Control
+ *
+ * Return: Returns 0 on success or error value on failure.
+ */
+int zynqmp_pm_set_sram_bypass(void)
+{
+ u32 sram_csr;
+ int ret;
+
+ ret = zynqmp_pm_sec_read_reg(PM_REGNODE_PMC_IOU_SLCR, SRAM_CSR_OFFSET, &sram_csr);
+ if (ret)
+ return ret;
+
+ sram_csr &= ~SRAM_CSR_EXT_LD_DONE_MASK;
+ sram_csr |= SRAM_CSR_BYPASS_MASK;
+
+ return zynqmp_pm_sec_mask_write_reg(PM_REGNODE_PMC_IOU_SLCR, SRAM_CSR_OFFSET,
+ GENMASK(2, 1), sram_csr);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_set_sram_bypass);
+
+/**
+ * zynqmp_pm_get_ufs_calibration_values - Read UFS calibration values
+ * @val: Store the calibration value
+ *
+ * Return: Returns 0 on success or error value on failure.
+ */
+int zynqmp_pm_get_ufs_calibration_values(u32 *val)
+{
+ return zynqmp_pm_sec_read_reg(PM_REGNODE_EFUSE_CACHE, UFS_CAL_1_OFFSET, val);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_get_ufs_calibration_values);
diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
index 7356e860e65c..ad811f40e059 100644
--- a/drivers/firmware/xilinx/zynqmp.c
+++ b/drivers/firmware/xilinx/zynqmp.c
@@ -3,7 +3,7 @@
* Xilinx Zynq MPSoC Firmware layer
*
* Copyright (C) 2014-2022 Xilinx, Inc.
- * Copyright (C) 2022 - 2024, Advanced Micro Devices, Inc.
+ * Copyright (C) 2022 - 2025 Advanced Micro Devices, Inc.
*
* Michal Simek <michal.simek@amd.com>
* Davorin Mista <davorin.mista@aggios.com>
@@ -20,6 +20,7 @@
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/hashtable.h>
@@ -71,6 +72,15 @@ struct pm_api_feature_data {
struct hlist_node hentry;
};
+struct platform_fw_data {
+ /*
+ * Family code for platform.
+ */
+ const u32 family_code;
+};
+
+static struct platform_fw_data *active_platform_fw_data;
+
static const struct mfd_cell firmware_devs[] = {
{
.name = "zynqmp_power_controller",
@@ -463,8 +473,6 @@ int zynqmp_pm_invoke_fn(u32 pm_api_id, u32 *ret_payload, u32 num_args, ...)
static u32 pm_api_version;
static u32 pm_tz_version;
-static u32 pm_family_code;
-static u32 pm_sub_family_code;
int zynqmp_pm_register_sgi(u32 sgi_num, u32 reset)
{
@@ -531,32 +539,18 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_get_chipid);
/**
* zynqmp_pm_get_family_info() - Get family info of platform
* @family: Returned family code value
- * @subfamily: Returned sub-family code value
*
* Return: Returns status, either success or error+reason
*/
-int zynqmp_pm_get_family_info(u32 *family, u32 *subfamily)
+int zynqmp_pm_get_family_info(u32 *family)
{
- u32 ret_payload[PAYLOAD_ARG_CNT];
- u32 idcode;
- int ret;
+ if (!active_platform_fw_data)
+ return -ENODEV;
- /* Check is family or sub-family code already received */
- if (pm_family_code && pm_sub_family_code) {
- *family = pm_family_code;
- *subfamily = pm_sub_family_code;
- return 0;
- }
-
- ret = zynqmp_pm_invoke_fn(PM_GET_CHIPID, ret_payload, 0);
- if (ret < 0)
- return ret;
+ if (!family)
+ return -EINVAL;
- idcode = ret_payload[1];
- pm_family_code = FIELD_GET(FAMILY_CODE_MASK, idcode);
- pm_sub_family_code = FIELD_GET(SUB_FAMILY_CODE_MASK, idcode);
- *family = pm_family_code;
- *subfamily = pm_sub_family_code;
+ *family = active_platform_fw_data->family_code;
return 0;
}
@@ -1237,8 +1231,13 @@ int zynqmp_pm_pinctrl_set_config(const u32 pin, const u32 param,
u32 value)
{
int ret;
+ u32 pm_family_code;
+
+ ret = zynqmp_pm_get_family_info(&pm_family_code);
+ if (ret)
+ return ret;
- if (pm_family_code == ZYNQMP_FAMILY_CODE &&
+ if (pm_family_code == PM_ZYNQMP_FAMILY_CODE &&
param == PM_PINCTRL_CONFIG_TRI_STATE) {
ret = zynqmp_pm_feature(PM_PINCTRL_CONFIG_PARAM_SET);
if (ret < PM_PINCTRL_PARAM_SET_VERSION) {
@@ -1299,11 +1298,10 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_bootmode_write);
* This API function is to be used for notify the power management controller
* about the completed power management initialization.
*/
-int zynqmp_pm_init_finalize(void)
+static int zynqmp_pm_init_finalize(void)
{
return zynqmp_pm_invoke_fn(PM_PM_INIT_FINALIZE, NULL, 0);
}
-EXPORT_SYMBOL_GPL(zynqmp_pm_init_finalize);
/**
* zynqmp_pm_set_suspend_mode() - Set system suspend mode
@@ -1414,6 +1412,45 @@ int zynqmp_pm_set_tcm_config(u32 node_id, enum rpu_tcm_comb tcm_mode)
EXPORT_SYMBOL_GPL(zynqmp_pm_set_tcm_config);
/**
+ * zynqmp_pm_get_node_status - PM call to request a node's current power state
+ * @node: ID of the component or sub-system in question
+ * @status: Current operating state of the requested node
+ * @requirements: Current requirements asserted on the node,
+ * used for slave nodes only.
+ * @usage: Usage information, used for slave nodes only:
+ * PM_USAGE_NO_MASTER - No master is currently using
+ * the node
+ * PM_USAGE_CURRENT_MASTER - Only requesting master is
+ * currently using the node
+ * PM_USAGE_OTHER_MASTER - Only other masters are
+ * currently using the node
+ * PM_USAGE_BOTH_MASTERS - Both the current and at least
+ * one other master is currently
+ * using the node
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_get_node_status(const u32 node, u32 *const status,
+ u32 *const requirements, u32 *const usage)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ if (!status || !requirements || !usage)
+ return -EINVAL;
+
+ ret = zynqmp_pm_invoke_fn(PM_GET_NODE_STATUS, ret_payload, 1, node);
+ if (ret_payload[0] == XST_PM_SUCCESS) {
+ *status = ret_payload[1];
+ *requirements = ret_payload[2];
+ *usage = ret_payload[3];
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_get_node_status);
+
+/**
* zynqmp_pm_force_pwrdwn - PM call to request for another PU or subsystem to
* be powered down forcefully
* @node: Node ID of the targeted PU or subsystem
@@ -1617,6 +1654,52 @@ int zynqmp_pm_get_feature_config(enum pm_feature_config_id id,
}
/**
+ * zynqmp_pm_sec_read_reg - PM call to securely read from given offset
+ * of the node
+ * @node_id: Node Id of the device
+ * @offset: Offset to be used (20-bit)
+ * @ret_value: Output data read from the given offset after
+ * firmware access policy is successfully enforced
+ *
+ * Return: Returns 0 on success or error value on failure
+ */
+int zynqmp_pm_sec_read_reg(u32 node_id, u32 offset, u32 *ret_value)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ u32 count = 1;
+ int ret;
+
+ if (!ret_value)
+ return -EINVAL;
+
+ ret = zynqmp_pm_invoke_fn(PM_IOCTL, ret_payload, 4, node_id, IOCTL_READ_REG,
+ offset, count);
+
+ *ret_value = ret_payload[1];
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_sec_read_reg);
+
+/**
+ * zynqmp_pm_sec_mask_write_reg - PM call to securely write to given offset
+ * of the node
+ * @node_id: Node Id of the device
+ * @offset: Offset to be used (20-bit)
+ * @mask: Mask to be used
+ * @value: Value to be written
+ *
+ * Return: Returns 0 on success or error value on failure
+ */
+int zynqmp_pm_sec_mask_write_reg(const u32 node_id, const u32 offset, u32 mask,
+ u32 value)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, NULL, 5, node_id, IOCTL_MASK_WRITE_REG,
+ offset, mask, value);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_sec_mask_write_reg);
+
+/**
* zynqmp_pm_set_sd_config - PM call to set value of SD config registers
* @node: SD node ID
* @config: The config type of SD registers
@@ -2007,12 +2090,18 @@ static int zynqmp_firmware_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct zynqmp_devinfo *devinfo;
+ u32 pm_family_code;
int ret;
ret = get_set_conduit_method(dev->of_node);
if (ret)
return ret;
+ /* Get platform-specific firmware data from device tree match */
+ active_platform_fw_data = (struct platform_fw_data *)device_get_match_data(dev);
+ if (!active_platform_fw_data)
+ return -EINVAL;
+
/* Get SiP SVC version number */
ret = zynqmp_pm_get_sip_svc_version(&sip_svc_version);
if (ret)
@@ -2045,8 +2134,8 @@ static int zynqmp_firmware_probe(struct platform_device *pdev)
pr_info("%s Platform Management API v%d.%d\n", __func__,
pm_api_version >> 16, pm_api_version & 0xFFFF);
- /* Get the Family code and sub family code of platform */
- ret = zynqmp_pm_get_family_info(&pm_family_code, &pm_sub_family_code);
+ /* Get the Family code of platform */
+ ret = zynqmp_pm_get_family_info(&pm_family_code);
if (ret < 0)
return ret;
@@ -2073,7 +2162,7 @@ static int zynqmp_firmware_probe(struct platform_device *pdev)
zynqmp_pm_api_debugfs_init();
- if (pm_family_code == VERSAL_FAMILY_CODE) {
+ if (pm_family_code != PM_ZYNQMP_FAMILY_CODE) {
em_dev = platform_device_register_data(&pdev->dev, "xlnx_event_manager",
-1, NULL, 0);
if (IS_ERR(em_dev))
@@ -2100,9 +2189,35 @@ static void zynqmp_firmware_remove(struct platform_device *pdev)
platform_device_unregister(em_dev);
}
+static void zynqmp_firmware_sync_state(struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+
+ if (!of_device_is_compatible(np, "xlnx,zynqmp-firmware"))
+ return;
+
+ of_genpd_sync_state(np);
+
+ if (zynqmp_pm_init_finalize())
+ dev_warn(dev, "failed to release power management to firmware\n");
+}
+
+static const struct platform_fw_data platform_fw_data_versal = {
+ .family_code = PM_VERSAL_FAMILY_CODE,
+};
+
+static const struct platform_fw_data platform_fw_data_versal_net = {
+ .family_code = PM_VERSAL_NET_FAMILY_CODE,
+};
+
+static const struct platform_fw_data platform_fw_data_zynqmp = {
+ .family_code = PM_ZYNQMP_FAMILY_CODE,
+};
+
static const struct of_device_id zynqmp_firmware_of_match[] = {
- {.compatible = "xlnx,zynqmp-firmware"},
- {.compatible = "xlnx,versal-firmware"},
+ {.compatible = "xlnx,zynqmp-firmware", .data = &platform_fw_data_zynqmp},
+ {.compatible = "xlnx,versal-firmware", .data = &platform_fw_data_versal},
+ {.compatible = "xlnx,versal-net-firmware", .data = &platform_fw_data_versal_net},
{},
};
MODULE_DEVICE_TABLE(of, zynqmp_firmware_of_match);
@@ -2112,6 +2227,7 @@ static struct platform_driver zynqmp_firmware_driver = {
.name = "zynqmp_firmware",
.of_match_table = zynqmp_firmware_of_match,
.dev_groups = zynqmp_firmware_groups,
+ .sync_state = zynqmp_firmware_sync_state,
},
.probe = zynqmp_firmware_probe,
.remove = zynqmp_firmware_remove,
diff --git a/drivers/fpga/altera-cvp.c b/drivers/fpga/altera-cvp.c
index 5af0bd33890c..44badfd11e1b 100644
--- a/drivers/fpga/altera-cvp.c
+++ b/drivers/fpga/altera-cvp.c
@@ -22,9 +22,6 @@
#define TIMEOUT_US 2000 /* CVP STATUS timeout for USERMODE polling */
/* Vendor Specific Extended Capability Registers */
-#define VSE_PCIE_EXT_CAP_ID 0x0
-#define VSE_PCIE_EXT_CAP_ID_VAL 0x000b /* 16bit */
-
#define VSE_CVP_STATUS 0x1c /* 32bit */
#define VSE_CVP_STATUS_CFG_RDY BIT(18) /* CVP_CONFIG_READY */
#define VSE_CVP_STATUS_CFG_ERR BIT(19) /* CVP_CONFIG_ERROR */
@@ -577,25 +574,18 @@ static int altera_cvp_probe(struct pci_dev *pdev,
{
struct altera_cvp_conf *conf;
struct fpga_manager *mgr;
- int ret, offset;
- u16 cmd, val;
+ u16 cmd, offset;
u32 regval;
-
- /* Discover the Vendor Specific Offset for this device */
- offset = pci_find_next_ext_capability(pdev, 0, PCI_EXT_CAP_ID_VNDR);
- if (!offset) {
- dev_err(&pdev->dev, "No Vendor Specific Offset.\n");
- return -ENODEV;
- }
+ int ret;
/*
* First check if this is the expected FPGA device. PCI config
* space access works without enabling the PCI device, memory
* space access is enabled further down.
*/
- pci_read_config_word(pdev, offset + VSE_PCIE_EXT_CAP_ID, &val);
- if (val != VSE_PCIE_EXT_CAP_ID_VAL) {
- dev_err(&pdev->dev, "Wrong EXT_CAP_ID value 0x%x\n", val);
+ offset = pci_find_vsec_capability(pdev, PCI_VENDOR_ID_ALTERA, 0x1172);
+ if (!offset) {
+ dev_err(&pdev->dev, "Wrong VSEC ID value\n");
return -ENODEV;
}
diff --git a/drivers/fpga/xilinx-spi.c b/drivers/fpga/xilinx-spi.c
index 8756504340de..e294e3a6cc03 100644
--- a/drivers/fpga/xilinx-spi.c
+++ b/drivers/fpga/xilinx-spi.c
@@ -57,6 +57,12 @@ static int xilinx_spi_probe(struct spi_device *spi)
return xilinx_core_probe(core);
}
+static const struct spi_device_id xilinx_spi_ids[] = {
+ { "fpga-slave-serial" },
+ { },
+};
+MODULE_DEVICE_TABLE(spi, xilinx_spi_ids);
+
#ifdef CONFIG_OF
static const struct of_device_id xlnx_spi_of_match[] = {
{
@@ -73,6 +79,7 @@ static struct spi_driver xilinx_slave_spi_driver = {
.of_match_table = of_match_ptr(xlnx_spi_of_match),
},
.probe = xilinx_spi_probe,
+ .id_table = xilinx_spi_ids,
};
module_spi_driver(xilinx_slave_spi_driver)
diff --git a/drivers/fpga/zynq-fpga.c b/drivers/fpga/zynq-fpga.c
index f7e08f7ea9ef..b7629a0e4813 100644
--- a/drivers/fpga/zynq-fpga.c
+++ b/drivers/fpga/zynq-fpga.c
@@ -405,12 +405,12 @@ static int zynq_fpga_ops_write(struct fpga_manager *mgr, struct sg_table *sgt)
}
}
- priv->dma_nelms =
- dma_map_sg(mgr->dev.parent, sgt->sgl, sgt->nents, DMA_TO_DEVICE);
- if (priv->dma_nelms == 0) {
+ err = dma_map_sgtable(mgr->dev.parent, sgt, DMA_TO_DEVICE, 0);
+ if (err) {
dev_err(&mgr->dev, "Unable to DMA map (TO_DEVICE)\n");
- return -ENOMEM;
+ return err;
}
+ priv->dma_nelms = sgt->nents;
/* enable clock */
err = clk_enable(priv->clk);
@@ -478,7 +478,7 @@ out_clk:
clk_disable(priv->clk);
out_free:
- dma_unmap_sg(mgr->dev.parent, sgt->sgl, sgt->nents, DMA_TO_DEVICE);
+ dma_unmap_sgtable(mgr->dev.parent, sgt, DMA_TO_DEVICE, 0);
return err;
}
diff --git a/drivers/fsi/fsi-core.c b/drivers/fsi/fsi-core.c
index 50e8736039fe..c6c115993ebc 100644
--- a/drivers/fsi/fsi-core.c
+++ b/drivers/fsi/fsi-core.c
@@ -613,8 +613,8 @@ static const struct bin_attribute fsi_slave_raw_attr = {
.mode = 0600,
},
.size = 0,
- .read_new = fsi_slave_sysfs_raw_read,
- .write_new = fsi_slave_sysfs_raw_write,
+ .read = fsi_slave_sysfs_raw_read,
+ .write = fsi_slave_sysfs_raw_write,
};
static void fsi_slave_release(struct device *dev)
@@ -1404,7 +1404,7 @@ void fsi_driver_unregister(struct fsi_driver *fsi_drv)
}
EXPORT_SYMBOL_GPL(fsi_driver_unregister);
-struct bus_type fsi_bus_type = {
+const struct bus_type fsi_bus_type = {
.name = "fsi",
.match = fsi_bus_match,
};
diff --git a/drivers/fsi/fsi-master-ast-cf.c b/drivers/fsi/fsi-master-ast-cf.c
index 9f2fd444ceb6..e67d7cd30fca 100644
--- a/drivers/fsi/fsi-master-ast-cf.c
+++ b/drivers/fsi/fsi-master-ast-cf.c
@@ -13,13 +13,13 @@
#include <linux/irqflags.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/regmap.h>
#include <linux/firmware.h>
#include <linux/gpio/aspeed.h>
#include <linux/mfd/syscon.h>
-#include <linux/of_address.h>
#include <linux/genalloc.h>
#include "fsi-master.h"
@@ -1285,14 +1285,7 @@ static int fsi_master_acf_probe(struct platform_device *pdev)
master->gpio_mux = gpio;
/* Grab the reserved memory region (use DMA API instead ?) */
- np = of_parse_phandle(mnode, "memory-region", 0);
- if (!np) {
- dev_err(&pdev->dev, "Didn't find reserved memory\n");
- rc = -EINVAL;
- goto err_free;
- }
- rc = of_address_to_resource(np, 0, &res);
- of_node_put(np);
+ rc = of_reserved_mem_region_to_resource(mnode, 0, &res);
if (rc) {
dev_err(&pdev->dev, "Couldn't address to resource for reserved memory\n");
rc = -ENOMEM;
diff --git a/drivers/fsi/fsi-occ.c b/drivers/fsi/fsi-occ.c
index d3e6bf37878a..e41ef12fa095 100644
--- a/drivers/fsi/fsi-occ.c
+++ b/drivers/fsi/fsi-occ.c
@@ -22,9 +22,9 @@
#include <linux/uaccess.h>
#include <linux/unaligned.h>
-#define OCC_SRAM_BYTES 4096
-#define OCC_CMD_DATA_BYTES 4090
-#define OCC_RESP_DATA_BYTES 4089
+#define OCC_SRAM_BYTES 8192
+#define OCC_CMD_DATA_BYTES 8186
+#define OCC_RESP_DATA_BYTES 8185
#define OCC_P9_SRAM_CMD_ADDR 0xFFFBE000
#define OCC_P9_SRAM_RSP_ADDR 0xFFFBF000
@@ -86,7 +86,7 @@ static int occ_open(struct inode *inode, struct file *file)
if (!client)
return -ENOMEM;
- client->buffer = (u8 *)__get_free_page(GFP_KERNEL);
+ client->buffer = kvmalloc(OCC_SRAM_BYTES, GFP_KERNEL);
if (!client->buffer) {
kfree(client);
return -ENOMEM;
@@ -97,10 +97,6 @@ static int occ_open(struct inode *inode, struct file *file)
file->private_data = client;
get_device(occ->dev);
- /* We allocate a 1-page buffer, make sure it all fits */
- BUILD_BUG_ON((OCC_CMD_DATA_BYTES + 3) > PAGE_SIZE);
- BUILD_BUG_ON((OCC_RESP_DATA_BYTES + 7) > PAGE_SIZE);
-
return 0;
}
@@ -176,7 +172,7 @@ static ssize_t occ_write(struct file *file, const char __user *buf,
}
/* Submit command; 4 bytes before the data and 2 bytes after */
- rlen = PAGE_SIZE;
+ rlen = OCC_SRAM_BYTES;
rc = fsi_occ_submit(client->occ->dev, cmd, data_length + 6, cmd,
&rlen);
if (rc)
@@ -200,7 +196,7 @@ static int occ_release(struct inode *inode, struct file *file)
struct occ_client *client = file->private_data;
put_device(client->occ->dev);
- free_page((unsigned long)client->buffer);
+ kvfree(client->buffer);
kfree(client);
return 0;
diff --git a/drivers/fwctl/mlx5/main.c b/drivers/fwctl/mlx5/main.c
index f93aa0cecdb9..3dacccf7855c 100644
--- a/drivers/fwctl/mlx5/main.c
+++ b/drivers/fwctl/mlx5/main.c
@@ -58,6 +58,9 @@ enum {
MLX5_CMD_OP_QUERY_DC_CNAK_TRACE = 0x716,
MLX5_CMD_OP_QUERY_NVMF_BACKEND_CONTROLLER = 0x722,
MLX5_CMD_OP_QUERY_NVMF_NAMESPACE_CONTEXT = 0x728,
+ MLX5_CMD_OP_QUERY_ADJACENT_FUNCTIONS_ID = 0x730,
+ MLX5_CMD_OP_DELEGATE_VHCA_MANAGEMENT = 0x731,
+ MLX5_CMD_OP_QUERY_DELEGATED_VHCA = 0x732,
MLX5_CMD_OP_QUERY_BURST_SIZE = 0x813,
MLX5_CMD_OP_QUERY_DIAGNOSTIC_PARAMS = 0x819,
MLX5_CMD_OP_SET_DIAGNOSTIC_PARAMS = 0x820,
@@ -188,6 +191,7 @@ static bool mlx5ctl_validate_rpc(const void *in, enum fwctl_rpc_scope scope)
* filter commands manually for now.
*/
switch (opcode) {
+ case MLX5_CMD_OP_MODIFY_CONG_STATUS:
case MLX5_CMD_OP_POSTPONE_CONNECTED_QP_TIMEOUT:
case MLX5_CMD_OP_QUERY_ADAPTER:
case MLX5_CMD_OP_QUERY_ESW_FUNCTIONS:
@@ -196,6 +200,7 @@ static bool mlx5ctl_validate_rpc(const void *in, enum fwctl_rpc_scope scope)
case MLX5_CMD_OP_QUERY_OTHER_HCA_CAP:
case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
case MLX5_CMD_OPCODE_QUERY_VUID:
+ case MLX5_CMD_OP_DELEGATE_VHCA_MANAGEMENT:
/*
* FW limits SET_HCA_CAP on the tools UID to only the other function
* mode which is used for function pre-configuration
@@ -281,6 +286,8 @@ static bool mlx5ctl_validate_rpc(const void *in, enum fwctl_rpc_scope scope)
case MLX5_CMD_OP_QUERY_XRQ:
case MLX5_CMD_OP_USER_QUERY_XRQ_DC_PARAMS_ENTRY:
case MLX5_CMD_OP_USER_QUERY_XRQ_ERROR_PARAMS:
+ case MLX5_CMD_OP_QUERY_ADJACENT_FUNCTIONS_ID:
+ case MLX5_CMD_OP_QUERY_DELEGATED_VHCA:
return scope >= FWCTL_RPC_DEBUG_READ_ONLY;
case MLX5_CMD_OP_SET_DIAGNOSTIC_PARAMS:
@@ -345,7 +352,7 @@ static void *mlx5ctl_fw_rpc(struct fwctl_uctx *uctx, enum fwctl_rpc_scope scope,
*/
if (ret && ret != -EREMOTEIO) {
if (rpc_out != rpc_in)
- kfree(rpc_out);
+ kvfree(rpc_out);
return ERR_PTR(ret);
}
return rpc_out;
diff --git a/drivers/fwctl/pds/main.c b/drivers/fwctl/pds/main.c
index 9b9d1f6b5556..1809853f6353 100644
--- a/drivers/fwctl/pds/main.c
+++ b/drivers/fwctl/pds/main.c
@@ -6,6 +6,7 @@
#include <linux/pci.h>
#include <linux/vmalloc.h>
#include <linux/bitfield.h>
+#include <linux/string.h>
#include <uapi/fwctl/fwctl.h>
#include <uapi/fwctl/pds.h>
@@ -366,18 +367,10 @@ static void *pdsfc_fw_rpc(struct fwctl_uctx *uctx, enum fwctl_rpc_scope scope,
return ERR_PTR(err);
if (rpc->in.len > 0) {
- in_payload = kzalloc(rpc->in.len, GFP_KERNEL);
- if (!in_payload) {
- dev_err(dev, "Failed to allocate in_payload\n");
- err = -ENOMEM;
- goto err_out;
- }
-
- if (copy_from_user(in_payload, u64_to_user_ptr(rpc->in.payload),
- rpc->in.len)) {
+ in_payload = memdup_user(u64_to_user_ptr(rpc->in.payload), rpc->in.len);
+ if (IS_ERR(in_payload)) {
dev_dbg(dev, "Failed to copy in_payload from user\n");
- err = -EFAULT;
- goto err_in_payload;
+ return in_payload;
}
in_payload_dma_addr = dma_map_single(dev->parent, in_payload,
@@ -453,7 +446,6 @@ err_out_payload:
rpc->in.len, DMA_TO_DEVICE);
err_in_payload:
kfree(in_payload);
-err_out:
if (err)
return ERR_PTR(err);
@@ -481,7 +473,7 @@ static int pdsfc_probe(struct auxiliary_device *adev,
pdsfc = fwctl_alloc_device(&padev->vf_pdev->dev, &pdsfc_ops,
struct pdsfc_dev, fwctl);
if (!pdsfc)
- return dev_err_probe(dev, -ENOMEM, "Failed to allocate fwctl device struct\n");
+ return -ENOMEM;
pdsfc->padev = padev;
err = pdsfc_identify(pdsfc);
diff --git a/drivers/gnss/ubx.c b/drivers/gnss/ubx.c
index 92402f6082c4..23894ff75ff9 100644
--- a/drivers/gnss/ubx.c
+++ b/drivers/gnss/ubx.c
@@ -66,6 +66,7 @@ static const struct gnss_serial_ops ubx_gserial_ops = {
static int ubx_probe(struct serdev_device *serdev)
{
struct gnss_serial *gserial;
+ struct gpio_desc *safeboot;
struct gpio_desc *reset;
struct ubx_data *data;
int ret;
@@ -92,6 +93,13 @@ static int ubx_probe(struct serdev_device *serdev)
if (ret < 0 && ret != -ENODEV)
goto err_free_gserial;
+ /* Deassert safeboot */
+ safeboot = devm_gpiod_get_optional(&serdev->dev, "safeboot", GPIOD_OUT_LOW);
+ if (IS_ERR(safeboot)) {
+ ret = PTR_ERR(safeboot);
+ goto err_free_gserial;
+ }
+
/* Deassert reset */
reset = devm_gpiod_get_optional(&serdev->dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(reset)) {
diff --git a/drivers/staging/gpib/Kconfig b/drivers/gpib/Kconfig
index aa01538d5beb..eeb50956ce85 100644
--- a/drivers/staging/gpib/Kconfig
+++ b/drivers/gpib/Kconfig
@@ -1,10 +1,10 @@
# SPDX-License-Identifier: GPL-2.0
menuconfig GPIB
- tristate "Linux GPIB drivers"
+ tristate "GPIB drivers"
help
- Enable support for GPIB cards and dongles for Linux. GPIB
- is the General Purpose Interface Bus which conforms to the
- IEEE488 standard.
+ Enable support for GPIB cards and dongles. GPIB is the
+ General Purpose Interface Bus which conforms to the IEEE488
+ standard.
This set of drivers can be used with the corresponding user
space library that can be found on Sourceforge under linux-gpib.
diff --git a/drivers/staging/gpib/Makefile b/drivers/gpib/Makefile
index d0e88f5c0844..2d44fed2a743 100644
--- a/drivers/staging/gpib/Makefile
+++ b/drivers/gpib/Makefile
@@ -1,5 +1,5 @@
-subdir-ccflags-y += -I$(src)/include -I$(src)/uapi
+subdir-ccflags-y += -I$(src)/include
obj-$(CONFIG_GPIB_AGILENT_82350B) += agilent_82350b/
obj-$(CONFIG_GPIB_AGILENT_82357A) += agilent_82357a/
diff --git a/drivers/gpib/TODO b/drivers/gpib/TODO
new file mode 100644
index 000000000000..ac07dd90b4ef
--- /dev/null
+++ b/drivers/gpib/TODO
@@ -0,0 +1,10 @@
+TODO:
+- checkpatch.pl fixes
+ These checks should be ignored:
+ CHECK:ALLOC_SIZEOF_STRUCT: Prefer kmalloc(sizeof(*board->private_data)...) over kmalloc(sizeof(struct xxx_priv)...)
+ ./gpio/gpib_bitbang.c:50: ERROR:COMPLEX_MACRO: Macros with complex values should be enclosed in parenthese
+ This warning will be addressed later: WARNING:UNDOCUMENTED_DT_STRING: DT compatible string
+- resolve XXX notes where possible
+- fix FIXME notes
+- clean-up commented-out code
+- fix typos
diff --git a/drivers/staging/gpib/agilent_82350b/Makefile b/drivers/gpib/agilent_82350b/Makefile
index f24e1e713a63..f24e1e713a63 100644
--- a/drivers/staging/gpib/agilent_82350b/Makefile
+++ b/drivers/gpib/agilent_82350b/Makefile
diff --git a/drivers/staging/gpib/agilent_82350b/agilent_82350b.c b/drivers/gpib/agilent_82350b/agilent_82350b.c
index 94bbb3b6576d..01a5bb43cd2d 100644
--- a/drivers/staging/gpib/agilent_82350b/agilent_82350b.c
+++ b/drivers/gpib/agilent_82350b/agilent_82350b.c
@@ -182,10 +182,12 @@ static int agilent_82350b_accel_write(struct gpib_board *board, u8 *buffer,
return retval;
#endif
- retval = agilent_82350b_write(board, buffer, 1, 0, &num_bytes);
- *bytes_written += num_bytes;
- if (retval < 0)
- return retval;
+ if (fifotransferlength > 0) {
+ retval = agilent_82350b_write(board, buffer, 1, 0, &num_bytes);
+ *bytes_written += num_bytes;
+ if (retval < 0)
+ return retval;
+ }
write_byte(tms_priv, tms_priv->imr0_bits & ~HR_BOIE, IMR0);
for (i = 1; i < fifotransferlength;) {
@@ -217,7 +219,7 @@ static int agilent_82350b_accel_write(struct gpib_board *board, u8 *buffer,
break;
}
write_byte(tms_priv, tms_priv->imr0_bits, IMR0);
- if (retval)
+ if (retval < 0)
return retval;
if (send_eoi) {
diff --git a/drivers/staging/gpib/agilent_82350b/agilent_82350b.h b/drivers/gpib/agilent_82350b/agilent_82350b.h
index ef841957297f..ef841957297f 100644
--- a/drivers/staging/gpib/agilent_82350b/agilent_82350b.h
+++ b/drivers/gpib/agilent_82350b/agilent_82350b.h
diff --git a/drivers/staging/gpib/agilent_82357a/Makefile b/drivers/gpib/agilent_82357a/Makefile
index 81a55c257a6e..81a55c257a6e 100644
--- a/drivers/staging/gpib/agilent_82357a/Makefile
+++ b/drivers/gpib/agilent_82357a/Makefile
diff --git a/drivers/staging/gpib/agilent_82357a/agilent_82357a.c b/drivers/gpib/agilent_82357a/agilent_82357a.c
index b923dc606d1d..77c8e549b208 100644
--- a/drivers/staging/gpib/agilent_82357a/agilent_82357a.c
+++ b/drivers/gpib/agilent_82357a/agilent_82357a.c
@@ -449,8 +449,8 @@ static int agilent_82357a_read(struct gpib_board *board, u8 *buffer, size_t leng
if (!out_data)
return -ENOMEM;
out_data[i++] = DATA_PIPE_CMD_READ;
- out_data[i++] = 0; //primary address when ARF_NO_ADDR is not set
- out_data[i++] = 0; //secondary address when ARF_NO_ADDR is not set
+ out_data[i++] = 0; // primary address when ARF_NO_ADDR is not set
+ out_data[i++] = 0; // secondary address when ARF_NO_ADDR is not set
out_data[i] = ARF_NO_ADDRESS | ARF_END_ON_EOI;
if (a_priv->eos_mode & REOS)
out_data[i] |= ARF_END_ON_EOS_CHAR;
@@ -532,7 +532,7 @@ static int agilent_82357a_read(struct gpib_board *board, u8 *buffer, size_t leng
*/
agilent_82357a_take_control_internal(board, 0);
- //FIXME check trailing flags for error
+ // FIXME check trailing flags for error
return retval;
}
@@ -966,7 +966,7 @@ static int agilent_82357a_parallel_poll(struct gpib_board *board, u8 *result)
dev_err(&usb_dev->dev, "write_registers() returned error\n");
return retval;
}
- udelay(2); //silly, since usb write will take way longer
+ udelay(2); // silly, since usb write will take way longer
read.address = CPTR;
retval = agilent_82357a_read_registers(a_priv, &read, 1, 1);
if (retval) {
@@ -989,31 +989,31 @@ static int agilent_82357a_parallel_poll(struct gpib_board *board, u8 *result)
static void agilent_82357a_parallel_poll_configure(struct gpib_board *board, u8 config)
{
- //board can only be system controller
+ // board can only be system controller
return;// 0;
}
static void agilent_82357a_parallel_poll_response(struct gpib_board *board, int ist)
{
- //board can only be system controller
+ // board can only be system controller
return;// 0;
}
static void agilent_82357a_serial_poll_response(struct gpib_board *board, u8 status)
{
- //board can only be system controller
+ // board can only be system controller
return;// 0;
}
static u8 agilent_82357a_serial_poll_status(struct gpib_board *board)
{
- //board can only be system controller
+ // board can only be system controller
return 0;
}
static void agilent_82357a_return_to_local(struct gpib_board *board)
{
- //board can only be system controller
+ // board can only be system controller
return;// 0;
}
diff --git a/drivers/staging/gpib/agilent_82357a/agilent_82357a.h b/drivers/gpib/agilent_82357a/agilent_82357a.h
index 23aa4799eb86..33ac558e5552 100644
--- a/drivers/staging/gpib/agilent_82357a/agilent_82357a.h
+++ b/drivers/gpib/agilent_82357a/agilent_82357a.h
@@ -20,7 +20,7 @@ enum usb_vendor_ids {
enum usb_device_ids {
USB_DEVICE_ID_AGILENT_82357A = 0x0107,
USB_DEVICE_ID_AGILENT_82357A_PREINIT = 0x0007, // device id before firmware is loaded
- USB_DEVICE_ID_AGILENT_82357B = 0x0718, // device id before firmware is loaded
+ USB_DEVICE_ID_AGILENT_82357B = 0x0718, // device id before firmware is loaded
USB_DEVICE_ID_AGILENT_82357B_PREINIT = 0x0518, // device id before firmware is loaded
};
@@ -129,10 +129,10 @@ struct agilent_82357a_priv {
struct urb *bulk_urb;
struct urb *interrupt_urb;
u8 *interrupt_buffer;
- struct mutex bulk_transfer_lock; // bulk transfer lock
- struct mutex bulk_alloc_lock; // bulk transfer allocation lock
- struct mutex interrupt_alloc_lock; // interrupt allocation lock
- struct mutex control_alloc_lock; // control message allocation lock
+ struct mutex bulk_transfer_lock; // bulk transfer lock
+ struct mutex bulk_alloc_lock; // bulk transfer allocation lock
+ struct mutex interrupt_alloc_lock; // interrupt allocation lock
+ struct mutex control_alloc_lock; // control message allocation lock
struct timer_list bulk_timer;
struct agilent_82357a_urb_ctx context;
unsigned int bulk_out_endpoint;
diff --git a/drivers/staging/gpib/cb7210/Makefile b/drivers/gpib/cb7210/Makefile
index d239ae80b415..d239ae80b415 100644
--- a/drivers/staging/gpib/cb7210/Makefile
+++ b/drivers/gpib/cb7210/Makefile
diff --git a/drivers/staging/gpib/cb7210/cb7210.c b/drivers/gpib/cb7210/cb7210.c
index 298ed306189d..24c61b151071 100644
--- a/drivers/staging/gpib/cb7210/cb7210.c
+++ b/drivers/gpib/cb7210/cb7210.c
@@ -1184,8 +1184,7 @@ struct local_info {
static int cb_gpib_probe(struct pcmcia_device *link)
{
struct local_info *info;
-
-// int ret, i;
+ int ret;
/* Allocate space for private device-specific data */
info = kzalloc(sizeof(*info), GFP_KERNEL);
@@ -1211,8 +1210,16 @@ static int cb_gpib_probe(struct pcmcia_device *link)
/* Register with Card Services */
curr_dev = link;
- return cb_gpib_config(link);
-} /* gpib_attach */
+ ret = cb_gpib_config(link);
+ if (ret)
+ goto free_info;
+
+ return 0;
+
+free_info:
+ kfree(info);
+ return ret;
+}
/*
* This deletes a driver "instance". The device is de-registered
@@ -1283,26 +1290,14 @@ static void cb_gpib_release(struct pcmcia_device *link)
static int cb_gpib_suspend(struct pcmcia_device *link)
{
- //struct local_info *info = link->priv;
- //struct struct gpib_board *dev = info->dev;
-
if (link->open)
dev_warn(&link->dev, "Device still open\n");
- //netif_device_detach(dev);
return 0;
}
static int cb_gpib_resume(struct pcmcia_device *link)
{
- //struct local_info *info = link->priv;
- //struct struct gpib_board *dev = info->dev;
-
- /*if (link->open) {
- * ni_gpib_probe(dev); / really?
- * //netif_device_attach(dev);
- *
- */
return cb_gpib_config(link);
}
diff --git a/drivers/staging/gpib/cb7210/cb7210.h b/drivers/gpib/cb7210/cb7210.h
index 13f127563ab3..ddc841ff87ae 100644
--- a/drivers/staging/gpib/cb7210/cb7210.h
+++ b/drivers/gpib/cb7210/cb7210.h
@@ -56,10 +56,10 @@ enum cb7210_page_in {
};
enum hs_regs {
- //write registers
+ // write registers
HS_MODE = 0x8, /* HS_MODE register */
HS_INT_LEVEL = 0x9, /* HS_INT_LEVEL register */
- //read registers
+ // read registers
HS_STATUS = 0x8, /* HS_STATUS register */
};
diff --git a/drivers/staging/gpib/cec/Makefile b/drivers/gpib/cec/Makefile
index b7141e23d4e0..b7141e23d4e0 100644
--- a/drivers/staging/gpib/cec/Makefile
+++ b/drivers/gpib/cec/Makefile
diff --git a/drivers/staging/gpib/cec/cec.h b/drivers/gpib/cec/cec.h
index 3ce2869c7429..3ce2869c7429 100644
--- a/drivers/staging/gpib/cec/cec.h
+++ b/drivers/gpib/cec/cec.h
diff --git a/drivers/staging/gpib/cec/cec_gpib.c b/drivers/gpib/cec/cec_gpib.c
index e8736cbf50e3..dbf9b95baabc 100644
--- a/drivers/staging/gpib/cec/cec_gpib.c
+++ b/drivers/gpib/cec/cec_gpib.c
@@ -206,7 +206,7 @@ static struct gpib_interface cec_pci_interface = {
.parallel_poll_configure = cec_parallel_poll_configure,
.parallel_poll_response = cec_parallel_poll_response,
.local_parallel_poll_mode = NULL, // XXX
- .line_status = NULL, //XXX
+ .line_status = NULL, // XXX
.update_status = cec_update_status,
.primary_address = cec_primary_address,
.secondary_address = cec_secondary_address,
@@ -302,7 +302,7 @@ static int cec_pci_attach(struct gpib_board *board, const struct gpib_board_conf
return -EBUSY;
cec_priv->plx_iobase = pci_resource_start(cec_priv->pci_device, 1);
- nec_priv->iobase = pci_resource_start(cec_priv->pci_device, 3);
+ nec_priv->iobase = pci_resource_start(cec_priv->pci_device, 3);
isr_flags |= IRQF_SHARED;
if (request_irq(cec_priv->pci_device->irq, cec_interrupt, isr_flags, DRV_NAME, board)) {
diff --git a/drivers/staging/gpib/common/Makefile b/drivers/gpib/common/Makefile
index 460586edb574..460586edb574 100644
--- a/drivers/staging/gpib/common/Makefile
+++ b/drivers/gpib/common/Makefile
diff --git a/drivers/staging/gpib/common/gpib_os.c b/drivers/gpib/common/gpib_os.c
index a193d64db033..9dbbac8b8436 100644
--- a/drivers/staging/gpib/common/gpib_os.c
+++ b/drivers/gpib/common/gpib_os.c
@@ -326,7 +326,7 @@ static int setup_serial_poll(struct gpib_board *board, unsigned int usec_timeout
cmd_string[i++] = MLA(board->pad); /* controller's listen address */
if (board->sad >= 0)
cmd_string[i++] = MSA(board->sad);
- cmd_string[i++] = SPE; //serial poll enable
+ cmd_string[i++] = SPE; // serial poll enable
ret = board->interface->command(board, cmd_string, i, &bytes_written);
if (ret < 0 || bytes_written < i) {
@@ -610,7 +610,7 @@ int ibclose(struct inode *inode, struct file *filep)
long ibioctl(struct file *filep, unsigned int cmd, unsigned long arg)
{
- unsigned int minor = iminor(filep->f_path.dentry->d_inode);
+ unsigned int minor = iminor(file_inode(filep));
struct gpib_board *board;
struct gpib_file_private *file_priv = filep->private_data;
long retval = -ENOTTY;
@@ -831,7 +831,7 @@ static int board_type_ioctl(struct gpib_file_private *file_priv,
retval = copy_from_user(&cmd, (void __user *)arg,
sizeof(struct gpib_board_type_ioctl));
if (retval)
- return retval;
+ return -EFAULT;
for (list_ptr = registered_drivers.next; list_ptr != &registered_drivers;
list_ptr = list_ptr->next) {
@@ -1774,7 +1774,7 @@ static int query_board_rsv_ioctl(struct gpib_board *board, unsigned long arg)
static int board_info_ioctl(const struct gpib_board *board, unsigned long arg)
{
- struct gpib_board_info_ioctl info;
+ struct gpib_board_info_ioctl info = { };
int retval;
info.pad = board->pad;
diff --git a/drivers/staging/gpib/common/iblib.c b/drivers/gpib/common/iblib.c
index 549280d9a6e9..7cbb6a467177 100644
--- a/drivers/staging/gpib/common/iblib.c
+++ b/drivers/gpib/common/iblib.c
@@ -608,7 +608,7 @@ static int wait_satisfied(struct wait_info *winfo, struct gpib_status_queue *sta
*status = temp_status;
return 1;
}
-//XXX does wait for END work?
+// XXX does wait for END work?
return 0;
}
diff --git a/drivers/staging/gpib/common/ibsys.h b/drivers/gpib/common/ibsys.h
index e5a148f513a8..e5a148f513a8 100644
--- a/drivers/staging/gpib/common/ibsys.h
+++ b/drivers/gpib/common/ibsys.h
diff --git a/drivers/staging/gpib/eastwood/Makefile b/drivers/gpib/eastwood/Makefile
index 384825195f77..384825195f77 100644
--- a/drivers/staging/gpib/eastwood/Makefile
+++ b/drivers/gpib/eastwood/Makefile
diff --git a/drivers/staging/gpib/eastwood/fluke_gpib.c b/drivers/gpib/eastwood/fluke_gpib.c
index 491356433249..3ae848e3f738 100644
--- a/drivers/staging/gpib/eastwood/fluke_gpib.c
+++ b/drivers/gpib/eastwood/fluke_gpib.c
@@ -507,7 +507,7 @@ static int fluke_accel_write(struct gpib_board *board, u8 *buffer, size_t length
}
if (retval < 0)
return retval;
- //handle sending of last byte with eoi
+ // handle sending of last byte with eoi
if (send_eoi) {
size_t num_bytes;
diff --git a/drivers/staging/gpib/eastwood/fluke_gpib.h b/drivers/gpib/eastwood/fluke_gpib.h
index 493c200d0bbf..493c200d0bbf 100644
--- a/drivers/staging/gpib/eastwood/fluke_gpib.h
+++ b/drivers/gpib/eastwood/fluke_gpib.h
diff --git a/drivers/staging/gpib/fmh_gpib/Makefile b/drivers/gpib/fmh_gpib/Makefile
index cc4d9e7cd5cd..cc4d9e7cd5cd 100644
--- a/drivers/staging/gpib/fmh_gpib/Makefile
+++ b/drivers/gpib/fmh_gpib/Makefile
diff --git a/drivers/staging/gpib/fmh_gpib/fmh_gpib.c b/drivers/gpib/fmh_gpib/fmh_gpib.c
index 4138f3d2bae7..f7bfb4a8e553 100644
--- a/drivers/staging/gpib/fmh_gpib/fmh_gpib.c
+++ b/drivers/gpib/fmh_gpib/fmh_gpib.c
@@ -523,7 +523,7 @@ static int fmh_gpib_accel_write(struct gpib_board *board, u8 *buffer,
}
if (retval < 0)
return retval;
- //handle sending of last byte with eoi
+ // handle sending of last byte with eoi
if (send_eoi) {
size_t num_bytes;
@@ -1517,6 +1517,11 @@ void fmh_gpib_detach(struct gpib_board *board)
resource_size(e_priv->gpib_iomem_res));
}
fmh_gpib_generic_detach(board);
+
+ if (board->dev) {
+ put_device(board->dev);
+ board->dev = NULL;
+ }
}
static int fmh_gpib_pci_attach_impl(struct gpib_board *board,
diff --git a/drivers/staging/gpib/fmh_gpib/fmh_gpib.h b/drivers/gpib/fmh_gpib/fmh_gpib.h
index e7602d7e1401..e7602d7e1401 100644
--- a/drivers/staging/gpib/fmh_gpib/fmh_gpib.h
+++ b/drivers/gpib/fmh_gpib/fmh_gpib.h
diff --git a/drivers/staging/gpib/gpio/Makefile b/drivers/gpib/gpio/Makefile
index 00ea52abdda7..00ea52abdda7 100644
--- a/drivers/staging/gpib/gpio/Makefile
+++ b/drivers/gpib/gpio/Makefile
diff --git a/drivers/staging/gpib/gpio/gpib_bitbang.c b/drivers/gpib/gpio/gpib_bitbang.c
index 625fef24a0bf..374cd61355e9 100644
--- a/drivers/staging/gpib/gpio/gpib_bitbang.c
+++ b/drivers/gpib/gpio/gpib_bitbang.c
@@ -169,7 +169,7 @@ static struct gpio_desc *all_descriptors[GPIB_PINS + SN7516X_PINS];
#define TE all_descriptors[18]
#define ACT_LED all_descriptors[19]
-/* YOGA dapter uses a global enable for the buffer chips, re-using the TE pin */
+/* YOGA adapter uses a global enable for the buffer chips, re-using the TE pin */
#define YOGA_ENABLE TE
static int gpios_vector[] = {
@@ -277,8 +277,8 @@ struct bb_priv {
int ndac_mode; /* nrfd interrupt mode 0/1 -> edge/levels */
int dav_tx; /* keep trace of DAV status while sending */
int dav_rx; /* keep trace of DAV status while receiving */
- u8 eos; // eos character
- short eos_flags; // eos mode
+ u8 eos; /* eos character */
+ short eos_flags; /* eos mode */
short eos_check; /* eos check required in current operation ... */
short eos_check_8; /* ... with byte comparison */
short eos_mask_7; /* ... with 7 bit masked character */
@@ -290,14 +290,14 @@ struct bb_priv {
u8 *rbuf;
u8 *wbuf;
int end_flag;
- int r_busy; /* 0==idle 1==busy */
+ int r_busy; /* 0==idle 1==busy */
int w_busy;
int write_done;
- int cmd; /* 1 = cmd write in progress */
+ int cmd; /* 1 = cmd write in progress */
size_t w_cnt;
size_t length;
u8 *w_buf;
- spinlock_t rw_lock; // protect mods to rw_lock
+ spinlock_t rw_lock; /* protect mods to rw_lock */
int phase;
int ndac_idle;
int ndac_seq;
@@ -726,7 +726,7 @@ static irqreturn_t bb_SRQ_interrupt(int irq, void *arg)
static int bb_command(struct gpib_board *board, u8 *buffer,
size_t length, size_t *bytes_written)
{
- size_t ret;
+ int ret;
struct bb_priv *priv = board->private_data;
int i;
@@ -1462,8 +1462,8 @@ static inline void SET_DIR_READ(struct bb_priv *priv)
gpiod_set_value(TE, 0); /* set NDAC and NRFD to transmit and DAV to receive */
}
- gpiod_direction_output(NRFD, 0); // hold off the talker
- gpiod_direction_output(NDAC, 0); // data not accepted
+ gpiod_direction_output(NRFD, 0); /* hold off the talker */
+ gpiod_direction_output(NDAC, 0); /* data not accepted */
priv->direction = DIR_READ;
}
diff --git a/drivers/staging/gpib/hp_82335/Makefile b/drivers/gpib/hp_82335/Makefile
index 305ce44ee48a..305ce44ee48a 100644
--- a/drivers/staging/gpib/hp_82335/Makefile
+++ b/drivers/gpib/hp_82335/Makefile
diff --git a/drivers/staging/gpib/hp_82335/hp82335.c b/drivers/gpib/hp_82335/hp82335.c
index d0e47ef77c87..d0e47ef77c87 100644
--- a/drivers/staging/gpib/hp_82335/hp82335.c
+++ b/drivers/gpib/hp_82335/hp82335.c
diff --git a/drivers/staging/gpib/hp_82335/hp82335.h b/drivers/gpib/hp_82335/hp82335.h
index 0c252a712ec9..0c252a712ec9 100644
--- a/drivers/staging/gpib/hp_82335/hp82335.h
+++ b/drivers/gpib/hp_82335/hp82335.h
diff --git a/drivers/staging/gpib/hp_82341/Makefile b/drivers/gpib/hp_82341/Makefile
index 21367310a17e..21367310a17e 100644
--- a/drivers/staging/gpib/hp_82341/Makefile
+++ b/drivers/gpib/hp_82341/Makefile
diff --git a/drivers/staging/gpib/hp_82341/hp_82341.c b/drivers/gpib/hp_82341/hp_82341.c
index 1b0822b2a3b8..1a2ad0560e14 100644
--- a/drivers/staging/gpib/hp_82341/hp_82341.c
+++ b/drivers/gpib/hp_82341/hp_82341.c
@@ -38,7 +38,7 @@ static int hp_82341_accel_read(struct gpib_board *board, u8 *buffer, size_t leng
unsigned short event_status;
int i;
int num_fifo_bytes;
- //hardware doesn't support checking for end-of-string character when using fifo
+ // hardware doesn't support checking for end-of-string character when using fifo
if (tms_priv->eos_flags & REOS)
return tms9914_read(board, tms_priv, buffer, length, end, bytes_read);
@@ -49,7 +49,7 @@ static int hp_82341_accel_read(struct gpib_board *board, u8 *buffer, size_t leng
*bytes_read = 0;
if (length == 0)
return 0;
- //disable fifo for the moment
+ // disable fifo for the moment
outb(DIRECTION_GPIB_TO_HOST_BIT, hp_priv->iobase[3] + BUFFER_CONTROL_REG);
/*
* Handle corner case of board not in holdoff and one byte has slipped in already.
@@ -79,10 +79,7 @@ static int hp_82341_accel_read(struct gpib_board *board, u8 *buffer, size_t leng
int j;
int count;
- if (num_fifo_bytes - i < hp_82341_fifo_size)
- block_size = num_fifo_bytes - i;
- else
- block_size = hp_82341_fifo_size;
+ block_size = min(num_fifo_bytes - i, hp_82341_fifo_size);
set_transfer_counter(hp_priv, block_size);
outb(ENABLE_TI_BUFFER_BIT | DIRECTION_GPIB_TO_HOST_BIT, hp_priv->iobase[3] +
BUFFER_CONTROL_REG);
@@ -157,7 +154,7 @@ static int restart_write_fifo(struct gpib_board *board, struct hp_82341_priv *hp
while (1) {
int status;
- //restart doesn't work if data holdoff is in effect
+ // restart doesn't work if data holdoff is in effect
status = tms9914_line_status(board, tms_priv);
if ((status & BUS_NRFD) == 0) {
outb(RESTART_STREAM_BIT, hp_priv->iobase[0] + STREAM_STATUS_REG);
@@ -195,10 +192,7 @@ static int hp_82341_accel_write(struct gpib_board *board, u8 *buffer, size_t len
for (i = 0; i < fifo_xfer_len;) {
int block_size;
- if (fifo_xfer_len - i < hp_82341_fifo_size)
- block_size = fifo_xfer_len - i;
- else
- block_size = hp_82341_fifo_size;
+ block_size = min(fifo_xfer_len - i, hp_82341_fifo_size);
set_transfer_counter(hp_priv, block_size);
// load data into board's fifo
for (j = 0; j < block_size;) {
@@ -770,7 +764,7 @@ static int hp_82341_attach(struct gpib_board *board, const struct gpib_board_con
ENABLE_TI_INTERRUPT_EVENT_BIT, hp_priv->iobase[0] + EVENT_ENABLE_REG);
outb(ENABLE_BUFFER_END_INTERRUPT_BIT | ENABLE_TERMINAL_COUNT_INTERRUPT_BIT |
ENABLE_TI_INTERRUPT_BIT, hp_priv->iobase[0] + INTERRUPT_ENABLE_REG);
- //write clear event register
+ // write clear event register
outb((TI_INTERRUPT_EVENT_BIT | POINTERS_EQUAL_EVENT_BIT |
BUFFER_END_EVENT_BIT | TERMINAL_COUNT_EVENT_BIT),
hp_priv->iobase[0] + EVENT_STATUS_REG);
@@ -873,7 +867,7 @@ static irqreturn_t hp_82341_interrupt(int irq, void *arg)
event_status = inb(hp_priv->iobase[0] + EVENT_STATUS_REG);
if (event_status & INTERRUPT_PENDING_EVENT_BIT)
retval = IRQ_HANDLED;
- //write-clear status bits
+ // write-clear status bits
if (event_status & (TI_INTERRUPT_EVENT_BIT | POINTERS_EQUAL_EVENT_BIT |
BUFFER_END_EVENT_BIT | TERMINAL_COUNT_EVENT_BIT)) {
outb(event_status & (TI_INTERRUPT_EVENT_BIT | POINTERS_EQUAL_EVENT_BIT |
@@ -907,7 +901,7 @@ static void set_transfer_counter(struct hp_82341_priv *hp_priv, int count)
outb(complement & 0xff, hp_priv->iobase[1] + TRANSFER_COUNT_LOW_REG);
outb((complement >> 8) & 0xff, hp_priv->iobase[1] + TRANSFER_COUNT_MID_REG);
- //I don't think the hi count reg is even used, but oh well
+ // I don't think the hi count reg is even used, but oh well
outb((complement >> 16) & 0xf, hp_priv->iobase[1] + TRANSFER_COUNT_HIGH_REG);
}
diff --git a/drivers/staging/gpib/hp_82341/hp_82341.h b/drivers/gpib/hp_82341/hp_82341.h
index 370a3d4576eb..859ef2899acb 100644
--- a/drivers/staging/gpib/hp_82341/hp_82341.h
+++ b/drivers/gpib/hp_82341/hp_82341.h
@@ -65,7 +65,7 @@ enum config_control_status_bits {
IRQ_SELECT_MASK = 0x7,
DMA_CONFIG_MASK = 0x18,
ENABLE_DMA_CONFIG_BIT = 0x20,
- XILINX_READY_BIT = 0x40, //read only
+ XILINX_READY_BIT = 0x40, // read only
DONE_PGL_BIT = 0x80
};
@@ -94,7 +94,7 @@ static inline unsigned int IRQ_SELECT_BITS(int irq)
};
enum mode_control_status_bits {
- SLOT8_BIT = 0x1, // read only
+ SLOT8_BIT = 0x1, // read only
ACTIVE_CONTROLLER_BIT = 0x2, // read only
ENABLE_DMA_BIT = 0x4,
SYSTEM_CONTROLLER_BIT = 0x8,
@@ -106,12 +106,12 @@ enum mode_control_status_bits {
enum monitor_bits {
MONITOR_INTERRUPT_PENDING_BIT = 0x1, // read only
MONITOR_CLEAR_HOLDOFF_BIT = 0x2, // write only
- MONITOR_PPOLL_BIT = 0x4, // write clear
- MONITOR_SRQ_BIT = 0x8, // write clear
- MONITOR_IFC_BIT = 0x10, // write clear
- MONITOR_REN_BIT = 0x20, // write clear
- MONITOR_END_BIT = 0x40, // write clear
- MONITOR_DAV_BIT = 0x80 // write clear
+ MONITOR_PPOLL_BIT = 0x4, // write clear
+ MONITOR_SRQ_BIT = 0x8, // write clear
+ MONITOR_IFC_BIT = 0x10, // write clear
+ MONITOR_REN_BIT = 0x20, // write clear
+ MONITOR_END_BIT = 0x40, // write clear
+ MONITOR_DAV_BIT = 0x80 // write clear
};
enum interrupt_enable_bits {
@@ -123,36 +123,36 @@ enum interrupt_enable_bits {
};
enum event_status_bits {
- TI_INTERRUPT_EVENT_BIT = 0x1, //write clear
+ TI_INTERRUPT_EVENT_BIT = 0x1, // write clear
INTERRUPT_PENDING_EVENT_BIT = 0x2, // read only
- POINTERS_EQUAL_EVENT_BIT = 0x4, //write clear
- BUFFER_END_EVENT_BIT = 0x10, //write clear
+ POINTERS_EQUAL_EVENT_BIT = 0x4, // write clear
+ BUFFER_END_EVENT_BIT = 0x10, // write clear
TERMINAL_COUNT_EVENT_BIT = 0x20, // write clear
DMA_TERMINAL_COUNT_EVENT_BIT = 0x80, // write clear
};
enum event_enable_bits {
- ENABLE_TI_INTERRUPT_EVENT_BIT = 0x1, //write clear
- ENABLE_POINTERS_EQUAL_EVENT_BIT = 0x4, //write clear
- ENABLE_BUFFER_END_EVENT_BIT = 0x10, //write clear
- ENABLE_TERMINAL_COUNT_EVENT_BIT = 0x20, // write clear
+ ENABLE_TI_INTERRUPT_EVENT_BIT = 0x1, // write clear
+ ENABLE_POINTERS_EQUAL_EVENT_BIT = 0x4, // write clear
+ ENABLE_BUFFER_END_EVENT_BIT = 0x10, // write clear
+ ENABLE_TERMINAL_COUNT_EVENT_BIT = 0x20, // write clear
ENABLE_DMA_TERMINAL_COUNT_EVENT_BIT = 0x80, // write clear
};
enum stream_status_bits {
- HALTED_STATUS_BIT = 0x1, //read
- RESTART_STREAM_BIT = 0x1 //write
+ HALTED_STATUS_BIT = 0x1, // read
+ RESTART_STREAM_BIT = 0x1 // write
};
enum buffer_control_bits {
DIRECTION_GPIB_TO_HOST_BIT = 0x20, // transfer direction (set for gpib to host)
- ENABLE_TI_BUFFER_BIT = 0x40, //enable fifo
- FAST_WR_EN_BIT = 0x80, // 350 ns t1 delay?
+ ENABLE_TI_BUFFER_BIT = 0x40, // enable fifo
+ FAST_WR_EN_BIT = 0x80, // 350 ns t1 delay?
};
// registers accessible through isapnp chip on 82341d
enum hp_82341d_pnp_registers {
- PIO_DATA_REG = 0x20, //read/write pio data lines
+ PIO_DATA_REG = 0x20, // read/write pio data lines
PIO_DIRECTION_REG = 0x21, // set pio data line directions (set for input)
};
diff --git a/drivers/staging/gpib/include/amcc5920.h b/drivers/gpib/include/amcc5920.h
index 7a88bd282feb..7a88bd282feb 100644
--- a/drivers/staging/gpib/include/amcc5920.h
+++ b/drivers/gpib/include/amcc5920.h
diff --git a/drivers/staging/gpib/include/amccs5933.h b/drivers/gpib/include/amccs5933.h
index 4de0f6797458..d7f63c795096 100644
--- a/drivers/staging/gpib/include/amccs5933.h
+++ b/drivers/gpib/include/amccs5933.h
@@ -24,7 +24,7 @@ extern inline int INCOMING_MAILBOX_REG(unsigned int mailbox)
enum {
OUTBOX_EMPTY_INTR_BIT = 0x10, // enable outbox empty interrupt
INBOX_FULL_INTR_BIT = 0x1000, // enable inbox full interrupt
- INBOX_INTR_CS_BIT = 0x20000, // read, or write clear inbox full interrupt
+ INBOX_INTR_CS_BIT = 0x20000, // read, or write clear inbox full interrupt
INTR_ASSERTED_BIT = 0x800000, // read only, interrupt asserted
};
@@ -52,7 +52,7 @@ extern inline int OUTBOX_SELECT_BITS(unsigned int mailbox)
return (mailbox & 0x3) << 2;
};
-//BMCSR bits
+// BMCSR bits
enum {
MBOX_FLAGS_RESET_BIT = 0x08000000, // resets mailbox empty/full flags
};
diff --git a/drivers/staging/gpib/include/gpibP.h b/drivers/gpib/include/gpibP.h
index 0af72934ce24..e3938ada3e0d 100644
--- a/drivers/staging/gpib/include/gpibP.h
+++ b/drivers/gpib/include/gpibP.h
@@ -11,8 +11,9 @@
#include "gpib_types.h"
#include "gpib_proto.h"
-#include "gpib.h"
-#include "gpib_ioctl.h"
+#include "gpib_cmd.h"
+#include <linux/gpib.h>
+#include <linux/gpib_ioctl.h>
#include <linux/fs.h>
#include <linux/interrupt.h>
diff --git a/drivers/gpib/include/gpib_cmd.h b/drivers/gpib/include/gpib_cmd.h
new file mode 100644
index 000000000000..9e96a3bfa22d
--- /dev/null
+++ b/drivers/gpib/include/gpib_cmd.h
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _GPIB_CMD_H
+#define _GPIB_CMD_H
+
+#include <linux/types.h>
+
+/* Command byte definitions tests and functions */
+
+/* mask of bits that actually matter in a command byte */
+enum {
+ gpib_command_mask = 0x7f,
+};
+
+/* Possible GPIB command messages */
+
+enum cmd_byte {
+ GTL = 0x1, /* go to local */
+ SDC = 0x4, /* selected device clear */
+ PP_CONFIG = 0x5,
+ GET = 0x8, /* group execute trigger */
+ TCT = 0x9, /* take control */
+ LLO = 0x11, /* local lockout */
+ DCL = 0x14, /* device clear */
+ PPU = 0x15, /* parallel poll unconfigure */
+ SPE = 0x18, /* serial poll enable */
+ SPD = 0x19, /* serial poll disable */
+ CFE = 0x1f, /* configure enable */
+ LAD = 0x20, /* value to be 'ored' in to obtain listen address */
+ UNL = 0x3F, /* unlisten */
+ TAD = 0x40, /* value to be 'ored' in to obtain talk address */
+ UNT = 0x5F, /* untalk */
+ SAD = 0x60, /* my secondary address (base) */
+ PPE = 0x60, /* parallel poll enable (base) */
+ PPD = 0x70 /* parallel poll disable */
+};
+
+/* confine address to range 0 to 30. */
+static inline unsigned int gpib_address_restrict(u32 addr)
+{
+ addr &= 0x1f;
+ if (addr == 0x1f)
+ addr = 0;
+ return addr;
+}
+
+static inline u8 MLA(u32 addr)
+{
+ return gpib_address_restrict(addr) | LAD;
+}
+
+static inline u8 MTA(u32 addr)
+{
+ return gpib_address_restrict(addr) | TAD;
+}
+
+static inline u8 MSA(u32 addr)
+{
+ return (addr & 0x1f) | SAD;
+}
+
+static inline s32 gpib_address_equal(u32 pad1, s32 sad1, u32 pad2, s32 sad2)
+{
+ if (pad1 == pad2) {
+ if (sad1 == sad2)
+ return 1;
+ if (sad1 < 0 && sad2 < 0)
+ return 1;
+ }
+
+ return 0;
+}
+
+static inline s32 is_PPE(u8 command)
+{
+ return (command & 0x70) == 0x60;
+}
+
+static inline s32 is_PPD(u8 command)
+{
+ return (command & 0x70) == 0x70;
+}
+
+static inline s32 in_addressed_command_group(u8 command)
+{
+ return (command & 0x70) == 0x0;
+}
+
+static inline s32 in_universal_command_group(u8 command)
+{
+ return (command & 0x70) == 0x10;
+}
+
+static inline s32 in_listen_address_group(u8 command)
+{
+ return (command & 0x60) == 0x20;
+}
+
+static inline s32 in_talk_address_group(u8 command)
+{
+ return (command & 0x60) == 0x40;
+}
+
+static inline s32 in_primary_command_group(u8 command)
+{
+ return in_addressed_command_group(command) ||
+ in_universal_command_group(command) ||
+ in_listen_address_group(command) ||
+ in_talk_address_group(command);
+}
+
+#endif /* _GPIB_CMD_H */
diff --git a/drivers/staging/gpib/include/gpib_pci_ids.h b/drivers/gpib/include/gpib_pci_ids.h
index 52dcab07a7d1..52dcab07a7d1 100644
--- a/drivers/staging/gpib/include/gpib_pci_ids.h
+++ b/drivers/gpib/include/gpib_pci_ids.h
diff --git a/drivers/staging/gpib/include/gpib_proto.h b/drivers/gpib/include/gpib_proto.h
index 42e736e3b7cd..42e736e3b7cd 100644
--- a/drivers/staging/gpib/include/gpib_proto.h
+++ b/drivers/gpib/include/gpib_proto.h
diff --git a/drivers/staging/gpib/include/gpib_state_machines.h b/drivers/gpib/include/gpib_state_machines.h
index 7488c00f191e..7488c00f191e 100644
--- a/drivers/staging/gpib/include/gpib_state_machines.h
+++ b/drivers/gpib/include/gpib_state_machines.h
diff --git a/drivers/staging/gpib/include/gpib_types.h b/drivers/gpib/include/gpib_types.h
index db040c80d778..5a0978ae27e7 100644
--- a/drivers/staging/gpib/include/gpib_types.h
+++ b/drivers/gpib/include/gpib_types.h
@@ -8,7 +8,7 @@
#define _GPIB_TYPES_H
#ifdef __KERNEL__
-#include "gpib.h"
+#include <linux/gpib.h>
#include <linux/atomic.h>
#include <linux/device.h>
#include <linux/mutex.h>
@@ -273,7 +273,8 @@ struct gpib_board {
struct mutex big_gpib_mutex;
/* pid of last process to lock the board mutex */
pid_t locking_pid;
- spinlock_t locking_pid_spinlock; // lock for setting locking pid
+ /* lock for setting locking pid */
+ spinlock_t locking_pid_spinlock;
/* Spin lock for dealing with races with the interrupt handler */
spinlock_t spinlock;
/* Watchdog timer to enable timeouts */
diff --git a/drivers/staging/gpib/include/nec7210.h b/drivers/gpib/include/nec7210.h
index 312217b4580e..9835aa5ef4ff 100644
--- a/drivers/staging/gpib/include/nec7210.h
+++ b/drivers/gpib/include/nec7210.h
@@ -22,18 +22,18 @@ struct nec7210_priv {
u32 iobase;
#endif
void __iomem *mmiobase;
- unsigned int offset; // offset between successive nec7210 io addresses
+ unsigned int offset; // offset between successive nec7210 io addresses
unsigned int dma_channel;
u8 *dma_buffer;
unsigned int dma_buffer_length; // length of dma buffer
dma_addr_t dma_buffer_addr; // bus address of board->buffer for use with dma
// software copy of bits written to registers
u8 reg_bits[8];
- u8 auxa_bits; // bits written to auxiliary register A
- u8 auxb_bits; // bits written to auxiliary register B
+ u8 auxa_bits; // bits written to auxiliary register A
+ u8 auxb_bits; // bits written to auxiliary register B
// used to keep track of board's state, bit definitions given below
unsigned long state;
- /* lock for chips that extend the nec7210 registers by paging in alternate regs */
+ // lock for chips that extend the nec7210 registers by paging in alternate regs
spinlock_t register_page_lock;
// wrappers for outb, inb, readb, or writeb
u8 (*read_byte)(struct nec7210_priv *priv, unsigned int register_number);
@@ -64,17 +64,17 @@ static inline void write_byte(struct nec7210_priv *priv, u8 byte, unsigned int r
// struct nec7210_priv.state bit numbers
enum {
- PIO_IN_PROGRESS_BN, // pio transfer in progress
+ PIO_IN_PROGRESS_BN, // pio transfer in progress
DMA_READ_IN_PROGRESS_BN, // dma read transfer in progress
DMA_WRITE_IN_PROGRESS_BN, // dma write transfer in progress
- READ_READY_BN, // board has data byte available to read
- WRITE_READY_BN, // board is ready to send a data byte
- COMMAND_READY_BN, // board is ready to send a command byte
- RECEIVED_END_BN, // received END
- BUS_ERROR_BN, // output error has occurred
- RFD_HOLDOFF_BN, // rfd holdoff in effect
- DEV_CLEAR_BN, // device clear received
- ADR_CHANGE_BN, // address state change occurred
+ READ_READY_BN, // board has data byte available to read
+ WRITE_READY_BN, // board is ready to send a data byte
+ COMMAND_READY_BN, // board is ready to send a command byte
+ RECEIVED_END_BN, // received END
+ BUS_ERROR_BN, // output error has occurred
+ RFD_HOLDOFF_BN, // rfd holdoff in effect
+ DEV_CLEAR_BN, // device clear received
+ ADR_CHANGE_BN, // address state change occurred
};
// interface functions
diff --git a/drivers/staging/gpib/include/nec7210_registers.h b/drivers/gpib/include/nec7210_registers.h
index 97c53ac8e805..067983d7a07f 100644
--- a/drivers/staging/gpib/include/nec7210_registers.h
+++ b/drivers/gpib/include/nec7210_registers.h
@@ -11,7 +11,7 @@ enum nec7210_chipset {
NEC7210, // The original
TNT4882, // NI
NAT4882, // NI
- CB7210, // measurement computing
+ CB7210, // measurement computing
IOT7210, // iotech
IGPIB7210, // Ines
TNT5004, // NI (minor differences to TNT4882)
@@ -48,7 +48,7 @@ enum nec7210_read_regs {
ADR1, // address 2
};
-//bit definitions common to nec-7210 compatible registers
+// bit definitions common to nec-7210 compatible registers
// ISR1: interrupt status register 1
enum isr1_bits {
diff --git a/drivers/staging/gpib/include/plx9050.h b/drivers/gpib/include/plx9050.h
index 66c56335f5c0..c911b285a0ca 100644
--- a/drivers/staging/gpib/include/plx9050.h
+++ b/drivers/gpib/include/plx9050.h
@@ -23,10 +23,10 @@ enum plx9050_intcsr_bits {
PLX9050_LINTR2_STATUS_BIT = 0x20,
PLX9050_PCI_INTR_EN_BIT = 0x40,
PLX9050_SOFT_INTR_BIT = 0x80,
- PLX9050_LINTR1_SELECT_ENABLE_BIT = 0x100, //9052 extension
- PLX9050_LINTR2_SELECT_ENABLE_BIT = 0x200, //9052 extension
- PLX9050_LINTR1_EDGE_CLEAR_BIT = 0x400, //9052 extension
- PLX9050_LINTR2_EDGE_CLEAR_BIT = 0x800, //9052 extension
+ PLX9050_LINTR1_SELECT_ENABLE_BIT = 0x100, // 9052 extension
+ PLX9050_LINTR2_SELECT_ENABLE_BIT = 0x200, // 9052 extension
+ PLX9050_LINTR1_EDGE_CLEAR_BIT = 0x400, // 9052 extension
+ PLX9050_LINTR2_EDGE_CLEAR_BIT = 0x800, // 9052 extension
};
enum plx9050_cntrl_bits {
diff --git a/drivers/staging/gpib/include/quancom_pci.h b/drivers/gpib/include/quancom_pci.h
index cdaf0d056be9..cdaf0d056be9 100644
--- a/drivers/staging/gpib/include/quancom_pci.h
+++ b/drivers/gpib/include/quancom_pci.h
diff --git a/drivers/staging/gpib/include/tms9914.h b/drivers/gpib/include/tms9914.h
index 50a9d3b22619..e66b75e0fda8 100644
--- a/drivers/staging/gpib/include/tms9914.h
+++ b/drivers/gpib/include/tms9914.h
@@ -30,10 +30,10 @@ struct tms9914_priv {
u8 imr0_bits, imr1_bits;
// bits written to address mode register
u8 admr_bits;
- u8 auxa_bits; // bits written to auxiliary register A
+ u8 auxa_bits; // bits written to auxiliary register A
// used to keep track of board's state, bit definitions given below
unsigned long state;
- u8 eos; // eos character
+ u8 eos; // eos character
short eos_flags;
u8 spoll_status;
enum tms9914_holdoff_mode holdoff_mode;
@@ -67,15 +67,15 @@ static inline void write_byte(struct tms9914_priv *priv, u8 byte, unsigned int r
// struct tms9914_priv.state bit numbers
enum {
- PIO_IN_PROGRESS_BN, // pio transfer in progress
+ PIO_IN_PROGRESS_BN, // pio transfer in progress
DMA_READ_IN_PROGRESS_BN, // dma read transfer in progress
DMA_WRITE_IN_PROGRESS_BN, // dma write transfer in progress
- READ_READY_BN, // board has data byte available to read
- WRITE_READY_BN, // board is ready to send a data byte
- COMMAND_READY_BN, // board is ready to send a command byte
- RECEIVED_END_BN, // received END
- BUS_ERROR_BN, // bus error
- DEV_CLEAR_BN, // device clear received
+ READ_READY_BN, // board has data byte available to read
+ WRITE_READY_BN, // board is ready to send a data byte
+ COMMAND_READY_BN, // board is ready to send a command byte
+ RECEIVED_END_BN, // received END
+ BUS_ERROR_BN, // bus error
+ DEV_CLEAR_BN, // device clear received
};
// interface functions
@@ -150,23 +150,23 @@ enum {
IMR0 = 0, /* interrupt mask 0 */
IMR1 = 1, /* interrupt mask 1 */
AUXCR = 3, /* auxiliary command */
- ADR = 4, // address register
- SPMR = 5, // serial poll mode register
+ ADR = 4, /* address register */
+ SPMR = 5, /* serial poll mode register */
PPR = 6, /* parallel poll */
CDOR = 7, /* data out register */
};
// read registers
enum {
- ISR0 = 0, /* interrupt status 0 */
- ISR1 = 1, /* interrupt status 1 */
- ADSR = 2, /* address status */
- BSR = 3, /* bus status */
- CPTR = 6, /* command pass thru */
- DIR = 7, /* data in register */
+ ISR0 = 0, /* interrupt status 0 */
+ ISR1 = 1, /* interrupt status 1 */
+ ADSR = 2, /* address status */
+ BSR = 3, /* bus status */
+ CPTR = 6, /* command pass thru */
+ DIR = 7, /* data in register */
};
-//bit definitions common to tms9914 compatible registers
+// bit definitions common to tms9914 compatible registers
/* ISR0 - Register bits */
enum isr0_bits {
@@ -248,33 +248,33 @@ enum bus_status_bits {
/*---------------------------------------------------------*/
enum aux_cmd_bits {
- AUX_CS = 0x80, /* set bit instead of clearing it, used with commands marked 'd' below */
- AUX_CHIP_RESET = 0x0, /* d Chip reset */
- AUX_INVAL = 0x1, // release dac holdoff, invalid command byte
- AUX_VAL = (AUX_INVAL | AUX_CS), // release dac holdoff, valid command byte
- AUX_RHDF = 0x2, /* X Release RFD holdoff */
- AUX_HLDA = 0x3, /* d holdoff on all data */
- AUX_HLDE = 0x4, /* d holdoff on EOI only */
- AUX_NBAF = 0x5, /* X Set new byte available false */
- AUX_FGET = 0x6, /* d force GET */
- AUX_RTL = 0x7, /* d return to local */
- AUX_SEOI = 0x8, /* X send EOI with next byte */
- AUX_LON = 0x9, /* d Listen only */
- AUX_TON = 0xa, /* d Talk only */
- AUX_GTS = 0xb, /* X goto standby */
- AUX_TCA = 0xc, /* X take control asynchronously */
- AUX_TCS = 0xd, /* X take " synchronously */
- AUX_RPP = 0xe, /* d Request parallel poll */
- AUX_SIC = 0xf, /* d send interface clear */
- AUX_SRE = 0x10, /* d send remote enable */
- AUX_RQC = 0x11, /* X request control */
- AUX_RLC = 0x12, /* X release control */
- AUX_DAI = 0x13, /* d disable all interrupts */
- AUX_PTS = 0x14, /* X pass through next secondary */
- AUX_STDL = 0x15, /* d short T1 delay */
- AUX_SHDW = 0x16, /* d shadow handshake */
- AUX_VSTDL = 0x17, /* d very short T1 delay (smj9914 extension) */
- AUX_RSV2 = 0x18, /* d request service bit 2 (smj9914 extension) */
+ AUX_CS = 0x80, /* set bit instead of clearing it, used with commands marked 'd' below */
+ AUX_CHIP_RESET = 0x0, /* d Chip reset */
+ AUX_INVAL = 0x1, /* release dac holdoff, invalid command byte */
+ AUX_VAL = (AUX_INVAL | AUX_CS), /* release dac holdoff, valid command byte */
+ AUX_RHDF = 0x2, /* X Release RFD holdoff */
+ AUX_HLDA = 0x3, /* d holdoff on all data */
+ AUX_HLDE = 0x4, /* d holdoff on EOI only */
+ AUX_NBAF = 0x5, /* X Set new byte available false */
+ AUX_FGET = 0x6, /* d force GET */
+ AUX_RTL = 0x7, /* d return to local */
+ AUX_SEOI = 0x8, /* X send EOI with next byte */
+ AUX_LON = 0x9, /* d Listen only */
+ AUX_TON = 0xa, /* d Talk only */
+ AUX_GTS = 0xb, /* X goto standby */
+ AUX_TCA = 0xc, /* X take control asynchronously */
+ AUX_TCS = 0xd, /* X take " synchronously */
+ AUX_RPP = 0xe, /* d Request parallel poll */
+ AUX_SIC = 0xf, /* d send interface clear */
+ AUX_SRE = 0x10, /* d send remote enable */
+ AUX_RQC = 0x11, /* X request control */
+ AUX_RLC = 0x12, /* X release control */
+ AUX_DAI = 0x13, /* d disable all interrupts */
+ AUX_PTS = 0x14, /* X pass through next secondary */
+ AUX_STDL = 0x15, /* d short T1 delay */
+ AUX_SHDW = 0x16, /* d shadow handshake */
+ AUX_VSTDL = 0x17, /* d very short T1 delay (smj9914 extension) */
+ AUX_RSV2 = 0x18, /* d request service bit 2 (smj9914 extension) */
};
#endif //_TMS9914_H
diff --git a/drivers/staging/gpib/include/tnt4882_registers.h b/drivers/gpib/include/tnt4882_registers.h
index 1b1441cd03d5..d54c4cc61168 100644
--- a/drivers/staging/gpib/include/tnt4882_registers.h
+++ b/drivers/gpib/include/tnt4882_registers.h
@@ -32,11 +32,11 @@ enum {
CMDR = 0x1c, // command register
TIMER = 0x1e, // timer register
- STS1 = 0x10, /* T488 Status Register 1 */
- STS2 = 0x1c, /* T488 Status Register 2 */
+ STS1 = 0x10, // T488 Status Register 1
+ STS2 = 0x1c, // T488 Status Register 2
ISR0 = IMR0,
- ISR3 = 0x1a, /* T488 Interrupt Status Register 3 */
- BCR = 0x1f, /* bus control/status register */
+ ISR3 = 0x1a, // T488 Interrupt Status Register 3
+ BCR = 0x1f, // bus control/status register
BSR = BCR,
};
@@ -107,11 +107,11 @@ enum imr0_bits {
/* ISR0 -- Interrupt Status Register 0 */
enum isr0_bits {
- TNT_SYNC_BIT = 0x1, /* handshake sync */
- TNT_TO_BIT = 0x2, /* timeout */
- TNT_ATNI_BIT = 0x4, /* ATN interrupt */
+ TNT_SYNC_BIT = 0x1, /* handshake sync */
+ TNT_TO_BIT = 0x2, /* timeout */
+ TNT_ATNI_BIT = 0x4, /* ATN interrupt */
TNT_IFCI_BIT = 0x8, /* interface clear interrupt */
- TNT_EOS_BIT = 0x10, /* end of string */
+ TNT_EOS_BIT = 0x10, /* end of string */
TNT_NL_BIT = 0x20, /* new line receive */
TNT_STBO_BIT = 0x40, /* status byte out */
TNT_NBA_BIT = 0x80, /* new byte available */
@@ -129,7 +129,7 @@ enum isr3_bits {
};
enum keyreg_bits {
- MSTD = 0x20, // enable 350ns T1 delay
+ MSTD = 0x20, /* enable 350ns T1 delay */
};
/* STS1 -- Status Register 1 (read only) */
@@ -157,7 +157,7 @@ enum tnt4882_aux_cmds {
AUX_9914 = 0x15, // switch to 9914 mode
AUX_REQT = 0x18,
AUX_REQF = 0x19,
- AUX_PAGEIN = 0x50, /* page in alternate registers */
+ AUX_PAGEIN = 0x50, // page in alternate registers
AUX_HLDI = 0x51, // rfd holdoff immediately
AUX_CLEAR_END = 0x55,
AUX_7210 = 0x99, // switch to 7210 mode
@@ -183,7 +183,7 @@ enum auxi_bits {
enum sasr_bits {
ACRDY_BIT = 0x4, /* acceptor ready state */
- ADHS_BIT = 0x8, /* acceptor data holdoff state */
+ ADHS_BIT = 0x8, /* acceptor data holdoff state */
ANHS2_BIT = 0x10, /* acceptor not ready holdoff immediately state */
ANHS1_BIT = 0x20, /* acceptor not ready holdoff state */
AEHS_BIT = 0x40, /* acceptor end holdoff state */
diff --git a/drivers/staging/gpib/ines/Makefile b/drivers/gpib/ines/Makefile
index 88241f15ecea..88241f15ecea 100644
--- a/drivers/staging/gpib/ines/Makefile
+++ b/drivers/gpib/ines/Makefile
diff --git a/drivers/staging/gpib/ines/ines.h b/drivers/gpib/ines/ines.h
index f0210ce2470d..6ad57e9a1216 100644
--- a/drivers/staging/gpib/ines/ines.h
+++ b/drivers/gpib/ines/ines.h
@@ -97,9 +97,9 @@ enum extend_mode_bits {
TR3_TRIG_ENABLE_BIT = 0x1, // enable generation of trigger pulse T/R3 pin
// clear message available status bit when chip writes byte with EOI true
MAV_ENABLE_BIT = 0x2,
- EOS1_ENABLE_BIT = 0x4, // enable eos register 1
- EOS2_ENABLE_BIT = 0x8, // enable eos register 2
- EOIDIS_BIT = 0x10, // disable EOI interrupt when doing rfd holdoff on end?
+ EOS1_ENABLE_BIT = 0x4, // enable eos register 1
+ EOS2_ENABLE_BIT = 0x8, // enable eos register 2
+ EOIDIS_BIT = 0x10, // disable EOI interrupt when doing rfd holdoff on end?
XFER_COUNTER_ENABLE_BIT = 0x20,
XFER_COUNTER_OUTPUT_BIT = 0x40, // use counter for output, clear for input
// when xfer counter hits 0, assert EOI on write or RFD holdoff on read
@@ -121,10 +121,10 @@ enum ines_admr_bits {
};
enum xdma_control_bits {
- DMA_OUTPUT_BIT = 0x1, // use dma for output, clear for input
+ DMA_OUTPUT_BIT = 0x1, // use dma for output, clear for input
ENABLE_SYNC_DMA_BIT = 0x2,
- DMA_ACCESS_EVERY_CYCLE = 0x4,// dma accesses fifo every cycle, clear for every other cycle
- DMA_16BIT = 0x8, // clear for 8 bit transfers
+ DMA_ACCESS_EVERY_CYCLE = 0x4, // dma accesses fifo every cycle, clear for every other cycle
+ DMA_16BIT = 0x8, // clear for 8 bit transfers
};
enum bus_control_monitor_bits {
diff --git a/drivers/staging/gpib/ines/ines_gpib.c b/drivers/gpib/ines/ines_gpib.c
index c851fd014f48..a3cf846fd0f9 100644
--- a/drivers/staging/gpib/ines/ines_gpib.c
+++ b/drivers/gpib/ines/ines_gpib.c
@@ -152,7 +152,7 @@ static int ines_accel_read(struct gpib_board *board, u8 *buffer,
write_byte(nec_priv, INES_RFD_HLD_IMMEDIATE, AUXMR);
- //clear in fifo
+ // clear in fifo
nec7210_set_reg_bits(nec_priv, ADMR, IN_FIFO_ENABLE_BIT, 0);
nec7210_set_reg_bits(nec_priv, ADMR, IN_FIFO_ENABLE_BIT, IN_FIFO_ENABLE_BIT);
@@ -225,7 +225,7 @@ static int ines_accel_write(struct gpib_board *board, u8 *buffer, size_t length,
unsigned int num_bytes, i;
*bytes_written = 0;
- //clear out fifo
+ // clear out fifo
nec7210_set_reg_bits(nec_priv, ADMR, OUT_FIFO_ENABLE_BIT, 0);
nec7210_set_reg_bits(nec_priv, ADMR, OUT_FIFO_ENABLE_BIT, OUT_FIFO_ENABLE_BIT);
diff --git a/drivers/staging/gpib/lpvo_usb_gpib/Makefile b/drivers/gpib/lpvo_usb_gpib/Makefile
index 360553488e6d..360553488e6d 100644
--- a/drivers/staging/gpib/lpvo_usb_gpib/Makefile
+++ b/drivers/gpib/lpvo_usb_gpib/Makefile
diff --git a/drivers/staging/gpib/lpvo_usb_gpib/lpvo_usb_gpib.c b/drivers/gpib/lpvo_usb_gpib/lpvo_usb_gpib.c
index 3cf5037c0cd2..dd68c4843490 100644
--- a/drivers/staging/gpib/lpvo_usb_gpib/lpvo_usb_gpib.c
+++ b/drivers/gpib/lpvo_usb_gpib/lpvo_usb_gpib.c
@@ -791,7 +791,6 @@ static int usb_gpib_read(struct gpib_board *board,
return -EIO;
else
return -ETIME;
- return 0;
}
/* allocate buffer for multibyte read */
diff --git a/drivers/staging/gpib/nec7210/Makefile b/drivers/gpib/nec7210/Makefile
index 64330f2e89d1..64330f2e89d1 100644
--- a/drivers/staging/gpib/nec7210/Makefile
+++ b/drivers/gpib/nec7210/Makefile
diff --git a/drivers/staging/gpib/nec7210/board.h b/drivers/gpib/nec7210/board.h
index ac3fe38ade57..ac3fe38ade57 100644
--- a/drivers/staging/gpib/nec7210/board.h
+++ b/drivers/gpib/nec7210/board.h
diff --git a/drivers/staging/gpib/nec7210/nec7210.c b/drivers/gpib/nec7210/nec7210.c
index 34a1cae4f486..bbf39367f5e4 100644
--- a/drivers/staging/gpib/nec7210/nec7210.c
+++ b/drivers/gpib/nec7210/nec7210.c
@@ -779,10 +779,10 @@ int nec7210_write(struct gpib_board *board, struct nec7210_priv *priv,
*bytes_written = 0;
- clear_bit(DEV_CLEAR_BN, &priv->state); //XXX
+ clear_bit(DEV_CLEAR_BN, &priv->state); // XXX
if (send_eoi)
- length-- ; /* save the last byte for sending EOI */
+ length-- ; // save the last byte for sending EOI
if (length > 0) {
// isa dma transfer
@@ -1005,7 +1005,7 @@ void nec7210_board_online(struct nec7210_priv *priv, const struct gpib_board *bo
nec7210_primary_address(board, priv, board->pad);
nec7210_secondary_address(board, priv, board->sad, board->sad >= 0);
- // enable interrupts
+ /* enable interrupts */
priv->reg_bits[IMR1] = HR_ERRIE | HR_DECIE | HR_ENDIE |
HR_DETIE | HR_CPTIE | HR_DOIE | HR_DIIE;
priv->reg_bits[IMR2] = IMR2_ENABLE_INTR_MASK;
diff --git a/drivers/staging/gpib/ni_usb/Makefile b/drivers/gpib/ni_usb/Makefile
index 469c5d16add3..469c5d16add3 100644
--- a/drivers/staging/gpib/ni_usb/Makefile
+++ b/drivers/gpib/ni_usb/Makefile
diff --git a/drivers/staging/gpib/ni_usb/ni_usb_gpib.c b/drivers/gpib/ni_usb/ni_usb_gpib.c
index 7cf25c95787f..1f8412de9fa3 100644
--- a/drivers/staging/gpib/ni_usb/ni_usb_gpib.c
+++ b/drivers/gpib/ni_usb/ni_usb_gpib.c
@@ -29,7 +29,7 @@ static void ni_usb_stop(struct ni_usb_priv *ni_priv);
static DEFINE_MUTEX(ni_usb_hotplug_lock);
-//calculates a reasonable timeout in that can be passed to usb functions
+// calculates a reasonable timeout in that can be passed to usb functions
static inline unsigned long ni_usb_timeout_msecs(unsigned int usec)
{
if (usec == 0)
@@ -327,7 +327,10 @@ static void ni_usb_soft_update_status(struct gpib_board *board, unsigned int ni_
board->status &= ~clear_mask;
board->status &= ~ni_usb_ibsta_mask;
board->status |= ni_usb_ibsta & ni_usb_ibsta_mask;
- //FIXME should generate events on DTAS and DCAS
+ if (ni_usb_ibsta & DCAS)
+ push_gpib_event(board, EVENT_DEV_CLR);
+ if (ni_usb_ibsta & DTAS)
+ push_gpib_event(board, EVENT_DEV_TRG);
spin_lock_irqsave(&board->spinlock, flags);
/* remove set status bits from monitored set why ?***/
@@ -569,7 +572,7 @@ static int ni_usb_write_registers(struct ni_usb_priv *ni_priv,
mutex_unlock(&ni_priv->addressed_transfer_lock);
ni_usb_parse_reg_write_status_block(in_data, &status, &reg_writes_completed);
- //FIXME parse extra 09 status bits and termination
+ // FIXME parse extra 09 status bits and termination
kfree(in_data);
if (status.id != NIUSB_REG_WRITE_ID) {
dev_err(&usb_dev->dev, "parse error, id=0x%x != NIUSB_REG_WRITE_ID\n", status.id);
@@ -694,8 +697,12 @@ static int ni_usb_read(struct gpib_board *board, u8 *buffer, size_t length,
*/
break;
case NIUSB_ATN_STATE_ERROR:
- retval = -EIO;
- dev_err(&usb_dev->dev, "read when ATN set\n");
+ if (status.ibsta & DCAS) {
+ retval = -EINTR;
+ } else {
+ retval = -EIO;
+ dev_dbg(&usb_dev->dev, "read when ATN set stat: 0x%06x\n", status.ibsta);
+ }
break;
case NIUSB_ADDRESSING_ERROR:
retval = -EIO;
@@ -1106,7 +1113,7 @@ static int ni_usb_request_system_control(struct gpib_board *board, int request_c
return 0;
}
-//FIXME maybe the interface should have a "pulse interface clear" function that can return an error?
+// FIXME maybe the interface should have a "pulse interface clear" function that can return an error?
static void ni_usb_interface_clear(struct gpib_board *board, int assert)
{
int retval;
@@ -1363,7 +1370,7 @@ static int ni_usb_parallel_poll(struct gpib_board *board, u8 *result)
return -ENOMEM;
out_data[i++] = NIUSB_IBRPP_ID;
- out_data[i++] = 0xf0; //FIXME: this should be the parallel poll timeout code
+ out_data[i++] = 0xf0; // FIXME: this should be the parallel poll timeout code
out_data[i++] = 0x0;
out_data[i++] = 0x0;
i += ni_usb_bulk_termination(&out_data[i]);
@@ -2079,10 +2086,10 @@ static int ni_usb_hs_wait_for_ready(struct ni_usb_priv *ni_priv)
}
if (buffer[++j] != 0x0) { // [6]
ready = 1;
- // NI-USB-HS+ sends 0xf here
+ // NI-USB-HS+ sends 0xf or 0x19 here
if (buffer[j] != 0x2 && buffer[j] != 0xe && buffer[j] != 0xf &&
- buffer[j] != 0x16) {
- dev_err(&usb_dev->dev, "unexpected data: buffer[%i]=0x%x, expected 0x2, 0xe, 0xf or 0x16\n",
+ buffer[j] != 0x16 && buffer[j] != 0x19) {
+ dev_err(&usb_dev->dev, "unexpected data: buffer[%i]=0x%x, expected 0x2, 0xe, 0xf, 0x16 or 0x19\n",
j, (int)buffer[j]);
unexpected = 1;
}
@@ -2110,11 +2117,11 @@ static int ni_usb_hs_wait_for_ready(struct ni_usb_priv *ni_priv)
j, (int)buffer[j]);
unexpected = 1;
}
- if (buffer[++j] != 0x0) {
+ if (buffer[++j] != 0x0) { // [10] MC usb-488 sends 0x7 here, new HS+ sends 0x59
ready = 1;
- if (buffer[j] != 0x96 && buffer[j] != 0x7 && buffer[j] != 0x6e) {
-// [10] MC usb-488 sends 0x7 here
- dev_err(&usb_dev->dev, "unexpected data: buffer[%i]=0x%x, expected 0x96, 0x07 or 0x6e\n",
+ if (buffer[j] != 0x96 && buffer[j] != 0x7 && buffer[j] != 0x6e &&
+ buffer[j] != 0x59) {
+ dev_err(&usb_dev->dev, "unexpected data: buffer[%i]=0x%x, expected 0x96, 0x07, 0x6e or 0x59\n",
j, (int)buffer[j]);
unexpected = 1;
}
diff --git a/drivers/staging/gpib/ni_usb/ni_usb_gpib.h b/drivers/gpib/ni_usb/ni_usb_gpib.h
index b011e131201c..688f5e08792f 100644
--- a/drivers/staging/gpib/ni_usb/ni_usb_gpib.h
+++ b/drivers/gpib/ni_usb/ni_usb_gpib.h
@@ -72,10 +72,10 @@ struct ni_usb_priv {
struct urb *bulk_urb;
struct urb *interrupt_urb;
u8 interrupt_buffer[0x11];
- struct mutex addressed_transfer_lock; // protect transfer lock
- struct mutex bulk_transfer_lock; // protect bulk message sends
- struct mutex control_transfer_lock; // protect control messages
- struct mutex interrupt_transfer_lock; // protect interrupt messages
+ struct mutex addressed_transfer_lock; // protect transfer lock
+ struct mutex bulk_transfer_lock; // protect bulk message sends
+ struct mutex control_transfer_lock; // protect control messages
+ struct mutex interrupt_transfer_lock; // protect interrupt messages
struct timer_list bulk_timer;
struct ni_usb_urb_ctx context;
int product_id;
@@ -145,7 +145,7 @@ enum ni_usb_error_codes {
* CIC with no listener
*/
NIUSB_NO_LISTENER_ERROR = 8,
- // get NIUSB_TIMEOUT_ERROR on board read/write timeout
+ /* get NIUSB_TIMEOUT_ERROR on board read/write timeout */
NIUSB_TIMEOUT_ERROR = 10,
};
diff --git a/drivers/staging/gpib/pc2/Makefile b/drivers/gpib/pc2/Makefile
index 481ee4296e1b..481ee4296e1b 100644
--- a/drivers/staging/gpib/pc2/Makefile
+++ b/drivers/gpib/pc2/Makefile
diff --git a/drivers/staging/gpib/pc2/pc2_gpib.c b/drivers/gpib/pc2/pc2_gpib.c
index 2282492025b7..9f3943d1df66 100644
--- a/drivers/staging/gpib/pc2/pc2_gpib.c
+++ b/drivers/gpib/pc2/pc2_gpib.c
@@ -36,7 +36,7 @@ static const int pc2_2a_iosize = 16;
static const int pc2a_reg_offset = 0x400;
static const int pc2_reg_offset = 1;
-//interrupt service routine
+// interrupt service routine
static irqreturn_t pc2_interrupt(int irq, void *arg);
static irqreturn_t pc2a_interrupt(int irq, void *arg);
@@ -593,7 +593,7 @@ static struct gpib_interface pc2a_cb7210_interface = {
.parallel_poll_configure = pc2_parallel_poll_configure,
.parallel_poll_response = pc2_parallel_poll_response,
.local_parallel_poll_mode = NULL, // XXX
- .line_status = NULL, //XXX
+ .line_status = NULL, // XXX
.update_status = pc2_update_status,
.primary_address = pc2_primary_address,
.secondary_address = pc2_secondary_address,
diff --git a/drivers/staging/gpib/tms9914/Makefile b/drivers/gpib/tms9914/Makefile
index 4705ab07f413..4705ab07f413 100644
--- a/drivers/staging/gpib/tms9914/Makefile
+++ b/drivers/gpib/tms9914/Makefile
diff --git a/drivers/staging/gpib/tms9914/tms9914.c b/drivers/gpib/tms9914/tms9914.c
index 04d57108efc7..72a11596a35e 100644
--- a/drivers/staging/gpib/tms9914/tms9914.c
+++ b/drivers/gpib/tms9914/tms9914.c
@@ -535,7 +535,7 @@ int tms9914_read(struct gpib_board *board, struct tms9914_priv *priv, u8 *buffer
buffer += num_bytes;
length -= num_bytes;
}
- // read last bytes if we havn't received an END yet
+ // read last bytes if we haven't received an END yet
if (*end == 0) {
// make sure we holdoff after last byte read
tms9914_set_holdoff_mode(priv, TMS9914_HOLDOFF_ALL);
@@ -647,7 +647,7 @@ static void check_my_address_state(struct gpib_board *board,
} else if (cmd_byte == MTA(board->pad)) {
priv->primary_talk_addressed = 1;
if (board->sad < 0)
- //make active talker
+ // make active talker
write_byte(priv, AUX_TON | AUX_CS, AUXCR);
} else if (board->sad >= 0 && priv->primary_talk_addressed &&
cmd_byte == MSA(board->sad)) {
@@ -730,7 +730,7 @@ irqreturn_t tms9914_interrupt_have_status(struct gpib_board *board, struct tms99
if (status0 & HR_SPAS) {
priv->spoll_status &= ~request_service_bit;
write_byte(priv, priv->spoll_status, SPMR);
- //FIXME: set SPOLL status bit
+ // FIXME: set SPOLL status bit
}
// record service request in status
if (status1 & HR_SRQ)
@@ -841,7 +841,7 @@ void tms9914_board_reset(struct tms9914_priv *priv)
/* parallel poll unconfigure */
write_byte(priv, 0, PPR);
- // request for data holdoff
+ /* request for data holdoff */
tms9914_set_holdoff_mode(priv, TMS9914_HOLDOFF_ALL);
}
EXPORT_SYMBOL_GPL(tms9914_board_reset);
@@ -852,7 +852,7 @@ void tms9914_online(struct gpib_board *board, struct tms9914_priv *priv)
tms9914_primary_address(board, priv, board->pad);
tms9914_secondary_address(board, priv, board->sad, board->sad >= 0);
- // enable tms9914 interrupts
+ /* enable tms9914 interrupts */
priv->imr0_bits |= HR_MACIE | HR_RLCIE | HR_ENDIE | HR_BOIE | HR_BIIE |
HR_SPASIE;
priv->imr1_bits |= HR_MAIE | HR_SRQIE | HR_UNCIE | HR_ERRIE | HR_IFCIE |
@@ -861,7 +861,7 @@ void tms9914_online(struct gpib_board *board, struct tms9914_priv *priv)
write_byte(priv, priv->imr1_bits, IMR1);
write_byte(priv, AUX_DAI, AUXCR);
- // turn off reset state
+ /* turn off reset state */
write_byte(priv, AUX_CHIP_RESET, AUXCR);
}
EXPORT_SYMBOL_GPL(tms9914_online);
diff --git a/drivers/staging/gpib/tnt4882/Makefile b/drivers/gpib/tnt4882/Makefile
index fa1687ad0d1b..fa1687ad0d1b 100644
--- a/drivers/staging/gpib/tnt4882/Makefile
+++ b/drivers/gpib/tnt4882/Makefile
diff --git a/drivers/staging/gpib/tnt4882/mite.c b/drivers/gpib/tnt4882/mite.c
index 847b96f411bd..847b96f411bd 100644
--- a/drivers/staging/gpib/tnt4882/mite.c
+++ b/drivers/gpib/tnt4882/mite.c
diff --git a/drivers/staging/gpib/tnt4882/mite.h b/drivers/gpib/tnt4882/mite.h
index 522d6b56cb7d..a1fdba9672a0 100644
--- a/drivers/staging/gpib/tnt4882/mite.h
+++ b/drivers/gpib/tnt4882/mite.h
@@ -219,15 +219,15 @@ void mite_list_devices(void);
#define MITE_AMHOST_A24_BLOCK 0x3b
enum mite_registers {
- MITE_IODWBSR = 0xc0, //IO Device Window Base Size Register
- MITE_CSIGR = 0x460, //chip signature
- MITE_IODWBSR_1 = 0xc4, // IO Device Window Base Size Register 1 (used by 6602 boards)
+ MITE_IODWBSR = 0xc0, // IO Device Window Base Size Register
+ MITE_CSIGR = 0x460, // chip signature
+ MITE_IODWBSR_1 = 0xc4, // IO Device Window Base Size Register 1 (used by 6602 boards)
MITE_IODWCR_1 = 0xf4
};
enum MITE_IODWBSR_bits {
- WENAB = 0x80, // window enable
- WENAB_6602 = 0x8c // window enable for 6602 boards
+ WENAB = 0x80, // window enable
+ WENAB_6602 = 0x8c // window enable for 6602 boards
};
#endif
diff --git a/drivers/staging/gpib/tnt4882/tnt4882_gpib.c b/drivers/gpib/tnt4882/tnt4882_gpib.c
index a17b69e34986..c03a976b7380 100644
--- a/drivers/staging/gpib/tnt4882/tnt4882_gpib.c
+++ b/drivers/gpib/tnt4882/tnt4882_gpib.c
@@ -570,7 +570,7 @@ static irqreturn_t tnt4882_internal_interrupt(struct gpib_board *board)
if (isr0_bits & TNT_IFCI_BIT)
push_gpib_event(board, EVENT_IFC);
- //XXX don't need this wakeup, one below should do?
+ // XXX don't need this wakeup, one below should do?
// wake_up_interruptible(&board->wait);
if (isr3_bits & HR_NFF)
@@ -730,7 +730,7 @@ static int tnt4882_parallel_poll(struct gpib_board *board, u8 *result)
if (tnt_priv->nec7210_priv.type != NEC7210) {
tnt_priv->auxg_bits |= RPP2_BIT;
write_byte(&tnt_priv->nec7210_priv, tnt_priv->auxg_bits, AUXMR);
- udelay(2); //FIXME use parallel poll timeout
+ udelay(2); // FIXME use parallel poll timeout
*result = read_byte(&tnt_priv->nec7210_priv, CPTR);
tnt_priv->auxg_bits &= ~RPP2_BIT;
write_byte(&tnt_priv->nec7210_priv, tnt_priv->auxg_bits, AUXMR);
@@ -1522,7 +1522,6 @@ static void __exit tnt4882_exit_module(void)
#include <linux/moduleparam.h>
#include <linux/ptrace.h>
#include <linux/timer.h>
-#include <linux/ioport.h>
#include <linux/io.h>
#include <pcmcia/cistpl.h>
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 44f922e10db2..c74da29253e8 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -3,6 +3,12 @@
# GPIO infrastructure and drivers
#
+config GPIOLIB_LEGACY
+ def_bool y
+
+config HAVE_SHARED_GPIOS
+ bool
+
menuconfig GPIOLIB
bool "GPIO Support"
help
@@ -39,13 +45,10 @@ config GPIOLIB_IRQCHIP
select IRQ_DOMAIN
bool
-config OF_GPIO_MM_GPIOCHIP
- bool
- help
- This adds support for the legacy 'struct of_mm_gpio_chip' interface
- from PowerPC. Existing drivers using this interface need to select
- this symbol, but new drivers should use the generic gpio-regmap
- infrastructure instead.
+config GPIO_SHARED
+ def_bool y
+ depends on HAVE_SHARED_GPIOS || COMPILE_TEST
+ select AUXILIARY_BUS
config DEBUG_GPIO
bool "Debug GPIO calls"
@@ -69,6 +72,14 @@ config GPIO_SYSFS
use the character device /dev/gpiochipN with the appropriate
ioctl() operations instead.
+config GPIO_SYSFS_LEGACY
+ bool "Enable legacy functionalities of the sysfs interface"
+ depends on GPIO_SYSFS
+ default y if GPIO_SYSFS
+ help
+ Say Y here if you want to enable the legacy, global GPIO
+ numberspace-based functionalities of the sysfs interface.
+
config GPIO_CDEV
bool "Character device (/dev/gpiochipN) support" if EXPERT
default y
@@ -292,7 +303,7 @@ config GPIO_EN7523
config GPIO_EP93XX
def_bool y
- depends on ARCH_EP93XX
+ depends on ARCH_EP93XX || COMPILE_TEST
select GPIO_GENERIC
select GPIOLIB_IRQCHIP
@@ -397,8 +408,7 @@ config GPIO_IMX_SCU
config GPIO_IXP4XX
bool "Intel IXP4xx GPIO"
- depends on ARCH_IXP4XX
- depends on OF
+ depends on (ARCH_IXP4XX && OF) || COMPILE_TEST
select GPIO_GENERIC
select GPIOLIB_IRQCHIP
select IRQ_DOMAIN_HIERARCHY
@@ -426,6 +436,7 @@ config GPIO_LOONGSON_64BIT
depends on LOONGARCH || COMPILE_TEST
depends on OF_GPIO
select GPIO_GENERIC
+ select GPIOLIB_IRQCHIP
help
Say yes here to support the GPIO functionality of a number of
Loongson series of chips. The Loongson GPIO controller supports
@@ -465,7 +476,6 @@ config GPIO_MENZ127
config GPIO_MM_LANTIQ
bool "Lantiq Memory mapped GPIOs"
depends on LANTIQ && SOC_XWAY
- select OF_GPIO_MM_GPIOCHIP
help
This enables support for memory mapped GPIOs on the External Bus Unit
(EBU) found on Lantiq SoCs. The GPIOs are output only as they are
@@ -474,7 +484,6 @@ config GPIO_MM_LANTIQ
config GPIO_MPC5200
def_bool y
depends on PPC_MPC52xx
- select OF_GPIO_MM_GPIOCHIP
config GPIO_MPC8XXX
bool "MPC512x/MPC8xxx/QorIQ GPIO support"
@@ -724,7 +733,8 @@ config GPIO_TANGIER
If built as a module its name will be gpio-tangier.
config GPIO_TB10X
- bool
+ bool "Abilis Systems TB10x GPIO controller"
+ depends on ARC_PLAT_TB10X || COMPILE_TEST
select GPIO_GENERIC
select GENERIC_IRQ_CHIP
select OF_GPIO
@@ -873,7 +883,7 @@ config GPIO_ZYNQMP_MODEPIN
config GPIO_LOONGSON1
tristate "Loongson1 GPIO support"
- depends on MACH_LOONGSON32
+ depends on MACH_LOONGSON32 || COMPILE_TEST
select GPIO_GENERIC
help
Say Y or M here to support GPIO on Loongson1 SoCs.
@@ -1183,14 +1193,18 @@ config GPIO_PCA953X
4 bits: pca9536, pca9537
8 bits: max7310, max7315, pca6107, pca9534, pca9538, pca9554,
- pca9556, pca9557, pca9574, tca6408, tca9554, xra1202
+ pca9556, pca9557, pca9574, tca6408, tca9554, xra1202,
+ pcal6408, pcal9554b, tca9538
16 bits: max7312, max7313, pca9535, pca9539, pca9555, pca9575,
- tca6416
+ tca6416, pca6416, pcal6416, pcal9535, pcal9555a, max7318,
+ tca9539
+
+ 18 bits: tca6418
- 24 bits: tca6424
+ 24 bits: tca6424, pcal6524
- 40 bits: pca9505, pca9698
+ 40 bits: pca9505, pca9698, pca9506
config GPIO_PCA953X_IRQ
bool "Interrupt controller support for PCA953x"
@@ -1263,6 +1277,7 @@ config GPIO_ADP5520
config GPIO_ADP5585
tristate "GPIO Support for ADP5585"
depends on MFD_ADP5585
+ select GPIOLIB_IRQCHIP
help
This option enables support for the GPIO function found in the Analog
Devices ADP5585.
@@ -1397,7 +1412,7 @@ config HTC_EGPIO
config GPIO_ELKHARTLAKE
tristate "Intel Elkhart Lake PSE GPIO support"
- depends on X86 || COMPILE_TEST
+ depends on INTEL_EHL_PSE_IO
select GPIO_TANGIER
help
Select this option to enable GPIO support for Intel Elkhart Lake
@@ -1464,12 +1479,34 @@ config GPIO_LP87565
This driver can also be built as a module. If so, the module will be
called gpio-lp87565.
+config GPIO_MACSMC
+ tristate "Apple Mac SMC GPIO"
+ depends on MFD_MACSMC
+ help
+ Support for GPIOs controlled by the SMC microcontroller on Apple Mac
+ systems.
+
+ This driver can also be built as a module. If so, the module will be
+ called gpio-macsmc.
+
config GPIO_MADERA
tristate "Cirrus Logic Madera class codecs"
depends on PINCTRL_MADERA
help
Support for GPIOs on Cirrus Logic Madera class codecs.
+config GPIO_MAX7360
+ tristate "MAX7360 GPIO support"
+ depends on MFD_MAX7360
+ select GPIO_REGMAP
+ select REGMAP_IRQ
+ help
+ Allows to use MAX7360 I/O Expander PWM lines as GPIO and keypad COL
+ lines as GPO.
+
+ This driver can also be built as a module. If so, the module will be
+ called gpio-max7360.
+
config GPIO_MAX77620
tristate "GPIO support for PMIC MAX77620 and MAX20024"
depends on MFD_MAX77620
@@ -1500,8 +1537,20 @@ config GPIO_MAX77759
This driver can also be built as a module. If so, the module will be
called gpio-max77759.
+config GPIO_NCT6694
+ tristate "Nuvoton NCT6694 GPIO controller support"
+ depends on MFD_NCT6694
+ select GENERIC_IRQ_CHIP
+ select GPIOLIB_IRQCHIP
+ help
+ This driver supports 8 GPIO pins per bank that can all be interrupt
+ sources.
+
+ This driver can also be built as a module. If so, the module will be
+ called gpio-nct6694.
+
config GPIO_PALMAS
- bool "TI PALMAS series PMICs GPIO"
+ tristate "TI PALMAS series PMICs GPIO"
depends on MFD_PALMAS
help
Select this option to enable GPIO driver for the TI PALMAS
@@ -1515,6 +1564,15 @@ config GPIO_PMIC_EIC_SPRD
help
Say yes here to support Spreadtrum PMIC EIC device.
+config GPIO_QIXIS_FPGA
+ tristate "NXP QIXIS FPGA GPIO support"
+ depends on MFD_SIMPLE_MFD_I2C || COMPILE_TEST
+ select GPIO_REGMAP
+ help
+ This enables support for the GPIOs found in the QIXIS FPGA which is
+ integrated on some NXP Layerscape boards such as LX2160ARDB and
+ LS1046AQDS.
+
config GPIO_RC5T583
bool "RICOH RC5T583 GPIO"
depends on MFD_RC5T583
@@ -1537,7 +1595,7 @@ config GPIO_SL28CPLD
called gpio-sl28cpld.
config GPIO_STMPE
- bool "STMPE GPIOs"
+ tristate "STMPE GPIOs"
depends on MFD_STMPE
depends on OF_GPIO
select GPIOLIB_IRQCHIP
@@ -1901,6 +1959,17 @@ config GPIO_MPSSE
GPIO driver for FTDI's MPSSE interface. These can do input and
output. Each MPSSE provides 16 IO pins.
+config GPIO_USBIO
+ tristate "Intel USBIO GPIO support"
+ depends on USB_USBIO
+ default USB_USBIO
+ help
+ Select this option to enable GPIO driver for the INTEL
+ USBIO driver stack.
+
+ This driver can also be built as a module. If so, the module
+ will be called gpio_usbio.
+
endmenu
menu "Virtual GPIO drivers"
@@ -1956,6 +2025,15 @@ config GPIO_SIM
This enables the GPIO simulator - a configfs-based GPIO testing
driver.
+config GPIO_SHARED_PROXY
+ tristate "Proxy driver for non-exclusive GPIOs"
+ default m
+ depends on GPIO_SHARED || COMPILE_TEST
+ select AUXILIARY_BUS
+ help
+ This enables the GPIO shared proxy driver - an abstraction layer
+ for GPIO pins that are shared by multiple devices.
+
endmenu
menu "GPIO Debugging utilities"
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 88dedd298256..2421a8fd3733 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -5,13 +5,14 @@ ccflags-$(CONFIG_DEBUG_GPIO) += -DDEBUG
obj-$(CONFIG_GPIOLIB) += gpiolib.o
obj-$(CONFIG_GPIOLIB) += gpiolib-devres.o
-obj-$(CONFIG_GPIOLIB) += gpiolib-legacy.o
+obj-$(CONFIG_GPIOLIB_LEGACY) += gpiolib-legacy.o
obj-$(CONFIG_OF_GPIO) += gpiolib-of.o
obj-$(CONFIG_GPIO_CDEV) += gpiolib-cdev.o
obj-$(CONFIG_GPIO_SYSFS) += gpiolib-sysfs.o
obj-$(CONFIG_GPIO_ACPI) += gpiolib-acpi.o
gpiolib-acpi-y := gpiolib-acpi-core.o gpiolib-acpi-quirks.o
obj-$(CONFIG_GPIOLIB) += gpiolib-swnode.o
+obj-$(CONFIG_GPIO_SHARED) += gpiolib-shared.o
# Device drivers. Generally keep list sorted alphabetically
obj-$(CONFIG_GPIO_REGMAP) += gpio-regmap.o
@@ -99,12 +100,14 @@ obj-$(CONFIG_GPIO_LP873X) += gpio-lp873x.o
obj-$(CONFIG_GPIO_LP87565) += gpio-lp87565.o
obj-$(CONFIG_GPIO_LPC18XX) += gpio-lpc18xx.o
obj-$(CONFIG_GPIO_LPC32XX) += gpio-lpc32xx.o
+obj-$(CONFIG_GPIO_MACSMC) += gpio-macsmc.o
obj-$(CONFIG_GPIO_MADERA) += gpio-madera.o
obj-$(CONFIG_GPIO_MAX3191X) += gpio-max3191x.o
obj-$(CONFIG_GPIO_MAX7300) += gpio-max7300.o
obj-$(CONFIG_GPIO_MAX7301) += gpio-max7301.o
obj-$(CONFIG_GPIO_MAX730X) += gpio-max730x.o
obj-$(CONFIG_GPIO_MAX732X) += gpio-max732x.o
+obj-$(CONFIG_GPIO_MAX7360) += gpio-max7360.o
obj-$(CONFIG_GPIO_MAX77620) += gpio-max77620.o
obj-$(CONFIG_GPIO_MAX77650) += gpio-max77650.o
obj-$(CONFIG_GPIO_MAX77759) += gpio-max77759.o
@@ -127,6 +130,7 @@ obj-$(CONFIG_GPIO_MT7621) += gpio-mt7621.o
obj-$(CONFIG_GPIO_MVEBU) += gpio-mvebu.o
obj-$(CONFIG_GPIO_MXC) += gpio-mxc.o
obj-$(CONFIG_GPIO_MXS) += gpio-mxs.o
+obj-$(CONFIG_GPIO_NCT6694) += gpio-nct6694.o
obj-$(CONFIG_GPIO_NOMADIK) += gpio-nomadik.o
obj-$(CONFIG_GPIO_NPCM_SGPIO) += gpio-npcm-sgpio.o
obj-$(CONFIG_GPIO_OCTEON) += gpio-octeon.o
@@ -143,6 +147,7 @@ obj-$(CONFIG_GPIO_PL061) += gpio-pl061.o
obj-$(CONFIG_GPIO_PMIC_EIC_SPRD) += gpio-pmic-eic-sprd.o
obj-$(CONFIG_GPIO_POLARFIRE_SOC) += gpio-mpfs.o
obj-$(CONFIG_GPIO_PXA) += gpio-pxa.o
+obj-$(CONFIG_GPIO_QIXIS_FPGA) += gpio-qixis-fpga.o
obj-$(CONFIG_GPIO_RASPBERRYPI_EXP) += gpio-raspberrypi-exp.o
obj-$(CONFIG_GPIO_RC5T583) += gpio-rc5t583.o
obj-$(CONFIG_GPIO_RCAR) += gpio-rcar.o
@@ -156,6 +161,7 @@ obj-$(CONFIG_ARCH_SA1100) += gpio-sa1100.o
obj-$(CONFIG_GPIO_SAMA5D2_PIOBU) += gpio-sama5d2-piobu.o
obj-$(CONFIG_GPIO_SCH311X) += gpio-sch311x.o
obj-$(CONFIG_GPIO_SCH) += gpio-sch.o
+obj-$(CONFIG_GPIO_SHARED_PROXY) += gpio-shared-proxy.o
obj-$(CONFIG_GPIO_SIFIVE) += gpio-sifive.o
obj-$(CONFIG_GPIO_SIM) += gpio-sim.o
obj-$(CONFIG_GPIO_SIOX) += gpio-siox.o
@@ -191,6 +197,7 @@ obj-$(CONFIG_GPIO_TS5500) += gpio-ts5500.o
obj-$(CONFIG_GPIO_TWL4030) += gpio-twl4030.o
obj-$(CONFIG_GPIO_TWL6040) += gpio-twl6040.o
obj-$(CONFIG_GPIO_UNIPHIER) += gpio-uniphier.o
+obj-$(CONFIG_GPIO_USBIO) += gpio-usbio.o
obj-$(CONFIG_GPIO_VF610) += gpio-vf610.o
obj-$(CONFIG_GPIO_VIPERBOARD) += gpio-viperboard.o
obj-$(CONFIG_GPIO_VIRTUSER) += gpio-virtuser.o
diff --git a/drivers/gpio/TODO b/drivers/gpio/TODO
index 4a8b349f2483..5acaeab029ec 100644
--- a/drivers/gpio/TODO
+++ b/drivers/gpio/TODO
@@ -86,17 +86,6 @@ Work items:
-------------------------------------------------------------------------------
-Get rid of <linux/gpio/legacy-of-mm-gpiochip.h>
-
-Work items:
-
-- Get rid of struct of_mm_gpio_chip altogether: use the generic MMIO
- GPIO for all current users (see below). Delete struct of_mm_gpio_chip,
- to_of_mm_gpio_chip(), of_mm_gpiochip_add_data(), of_mm_gpiochip_remove(),
- CONFIG_OF_GPIO_MM_GPIOCHIP from the kernel.
-
--------------------------------------------------------------------------------
-
Collect drivers
Collect GPIO drivers from arch/* and other places that should be placed
@@ -171,28 +160,12 @@ cannot be converted yet, but watch this space!
-------------------------------------------------------------------------------
-Convert all GPIO chips to using the new, value returning line setters
-
-struct gpio_chip's set() and set_multiple() callbacks are now deprecated. They
-return void and thus do not allow drivers to indicate failure to set the line
-value back to the caller.
-
-We've now added new variants - set_rv() and set_multiple_rv() that return an
-integer. Let's convert all GPIO drivers treewide to use the new callbacks,
-remove the old ones and finally rename the new ones back to the old names.
-
--------------------------------------------------------------------------------
-
-Extend the sysfs ABI to allow exporting lines by their HW offsets
-
-The need to support the sysfs GPIO class is one of the main obstacles to
-removing the global GPIO numberspace from the kernel. In order to wean users
-off using global numbers from user-space, extend the existing interface with
-new per-gpiochip export/unexport attributes that allow to refer to GPIOs using
-their hardware offsets within the chip.
+Remove legacy sysfs features
-Encourage users to switch to using them and eventually remove the existing
-global export/unexport attribues.
+We have two parallel per-chip class devices and per-exported-line attribute
+groups in sysfs. One is using the obsolete global GPIO numberspace and the
+second relies on hardware offsets of pins within the chip. Remove the former
+once user-space has switched to using the latter.
-------------------------------------------------------------------------------
diff --git a/drivers/gpio/gpio-104-idio-16.c b/drivers/gpio/gpio-104-idio-16.c
index ffe7e1cb6b23..fe5c10cd5c32 100644
--- a/drivers/gpio/gpio-104-idio-16.c
+++ b/drivers/gpio/gpio-104-idio-16.c
@@ -59,6 +59,7 @@ static const struct regmap_config idio_16_regmap_config = {
.reg_stride = 1,
.val_bits = 8,
.io_port = true,
+ .max_register = 0x5,
.wr_table = &idio_16_wr_table,
.rd_table = &idio_16_rd_table,
.volatile_table = &idio_16_rd_table,
diff --git a/drivers/gpio/gpio-74x164.c b/drivers/gpio/gpio-74x164.c
index 4dd5c2c330bb..c226524efeba 100644
--- a/drivers/gpio/gpio-74x164.c
+++ b/drivers/gpio/gpio-74x164.c
@@ -141,8 +141,8 @@ static int gen_74x164_probe(struct spi_device *spi)
chip->gpio_chip.label = spi->modalias;
chip->gpio_chip.direction_output = gen_74x164_direction_output;
chip->gpio_chip.get = gen_74x164_get_value;
- chip->gpio_chip.set_rv = gen_74x164_set_value;
- chip->gpio_chip.set_multiple_rv = gen_74x164_set_multiple;
+ chip->gpio_chip.set = gen_74x164_set_value;
+ chip->gpio_chip.set_multiple = gen_74x164_set_multiple;
chip->gpio_chip.base = -1;
chip->gpio_chip.ngpio = GEN_74X164_NUMBER_GPIOS * chip->registers;
chip->gpio_chip.can_sleep = true;
diff --git a/drivers/gpio/gpio-74xx-mmio.c b/drivers/gpio/gpio-74xx-mmio.c
index c7ac5a9ffb1f..bd2cc5f4f851 100644
--- a/drivers/gpio/gpio-74xx-mmio.c
+++ b/drivers/gpio/gpio-74xx-mmio.c
@@ -8,6 +8,7 @@
#include <linux/bits.h>
#include <linux/err.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -18,8 +19,8 @@
#define MMIO_74XX_BIT_CNT(x) ((x) & GENMASK(7, 0))
struct mmio_74xx_gpio_priv {
- struct gpio_chip gc;
- unsigned flags;
+ struct gpio_generic_chip gen_gc;
+ unsigned int flags;
};
static const struct of_device_id mmio_74xx_gpio_ids[] = {
@@ -99,16 +100,15 @@ static int mmio_74xx_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
{
struct mmio_74xx_gpio_priv *priv = gpiochip_get_data(gc);
- if (priv->flags & MMIO_74XX_DIR_OUT) {
- gc->set(gc, gpio, val);
- return 0;
- }
+ if (priv->flags & MMIO_74XX_DIR_OUT)
+ return gpio_generic_chip_set(&priv->gen_gc, gpio, val);
return -ENOTSUPP;
}
static int mmio_74xx_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config = { };
struct mmio_74xx_gpio_priv *priv;
void __iomem *dat;
int err;
@@ -123,19 +123,21 @@ static int mmio_74xx_gpio_probe(struct platform_device *pdev)
if (IS_ERR(dat))
return PTR_ERR(dat);
- err = bgpio_init(&priv->gc, &pdev->dev,
- DIV_ROUND_UP(MMIO_74XX_BIT_CNT(priv->flags), 8),
- dat, NULL, NULL, NULL, NULL, 0);
+ config.dev = &pdev->dev;
+ config.sz = DIV_ROUND_UP(MMIO_74XX_BIT_CNT(priv->flags), 8);
+ config.dat = dat;
+
+ err = gpio_generic_chip_init(&priv->gen_gc, &config);
if (err)
return err;
- priv->gc.direction_input = mmio_74xx_dir_in;
- priv->gc.direction_output = mmio_74xx_dir_out;
- priv->gc.get_direction = mmio_74xx_get_direction;
- priv->gc.ngpio = MMIO_74XX_BIT_CNT(priv->flags);
- priv->gc.owner = THIS_MODULE;
+ priv->gen_gc.gc.direction_input = mmio_74xx_dir_in;
+ priv->gen_gc.gc.direction_output = mmio_74xx_dir_out;
+ priv->gen_gc.gc.get_direction = mmio_74xx_get_direction;
+ priv->gen_gc.gc.ngpio = MMIO_74XX_BIT_CNT(priv->flags);
+ priv->gen_gc.gc.owner = THIS_MODULE;
- return devm_gpiochip_add_data(&pdev->dev, &priv->gc, priv);
+ return devm_gpiochip_add_data(&pdev->dev, &priv->gen_gc.gc, priv);
}
static struct platform_driver mmio_74xx_gpio_driver = {
diff --git a/drivers/gpio/gpio-adnp.c b/drivers/gpio/gpio-adnp.c
index dc2b941c3726..e5ac2d211013 100644
--- a/drivers/gpio/gpio-adnp.c
+++ b/drivers/gpio/gpio-adnp.c
@@ -430,7 +430,7 @@ static int adnp_gpio_setup(struct adnp *adnp, unsigned int num_gpios,
chip->direction_input = adnp_gpio_direction_input;
chip->direction_output = adnp_gpio_direction_output;
chip->get = adnp_gpio_get;
- chip->set_rv = adnp_gpio_set;
+ chip->set = adnp_gpio_set;
chip->can_sleep = true;
if (IS_ENABLED(CONFIG_DEBUG_FS))
diff --git a/drivers/gpio/gpio-adp5520.c b/drivers/gpio/gpio-adp5520.c
index 57d12c10cbda..6305c8b7dc05 100644
--- a/drivers/gpio/gpio-adp5520.c
+++ b/drivers/gpio/gpio-adp5520.c
@@ -122,7 +122,7 @@ static int adp5520_gpio_probe(struct platform_device *pdev)
gc->direction_input = adp5520_gpio_direction_input;
gc->direction_output = adp5520_gpio_direction_output;
gc->get = adp5520_gpio_get_value;
- gc->set_rv = adp5520_gpio_set_value;
+ gc->set = adp5520_gpio_set_value;
gc->can_sleep = true;
gc->base = pdata->gpio_start;
diff --git a/drivers/gpio/gpio-adp5585.c b/drivers/gpio/gpio-adp5585.c
index d5c0f1b267c8..0fd3cc26d017 100644
--- a/drivers/gpio/gpio-adp5585.c
+++ b/drivers/gpio/gpio-adp5585.c
@@ -4,67 +4,131 @@
*
* Copyright 2022 NXP
* Copyright 2024 Ideas on Board Oy
+ * Copyright 2025 Analog Devices, Inc.
*/
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include <linux/container_of.h>
#include <linux/device.h>
#include <linux/gpio/driver.h>
#include <linux/mfd/adp5585.h>
#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/types.h>
-#define ADP5585_GPIO_MAX 11
+/*
+ * Bank 0 covers pins "GPIO 1/R0" to "GPIO 6/R5", numbered 0 to 5 by the
+ * driver, and bank 1 covers pins "GPIO 7/C0" to "GPIO 11/C4", numbered 6 to
+ * 10. Some variants of the ADP5585 don't support "GPIO 6/R5". As the driver
+ * uses identical GPIO numbering for all variants to avoid confusion, GPIO 5 is
+ * marked as reserved in the device tree for variants that don't support it.
+ */
+#define ADP5585_BANK(n) ((n) >= 6 ? 1 : 0)
+#define ADP5585_BIT(n) ((n) >= 6 ? BIT((n) - 6) : BIT(n))
+
+/*
+ * Bank 0 covers pins "GPIO 1/R0" to "GPIO 8/R7", numbered 0 to 7 by the
+ * driver, bank 1 covers pins "GPIO 9/C0" to "GPIO 16/C7", numbered 8 to
+ * 15 and bank 3 covers pins "GPIO 17/C8" to "GPIO 19/C10", numbered 16 to 18.
+ */
+#define ADP5589_BANK(n) ((n) >> 3)
+#define ADP5589_BIT(n) BIT((n) & 0x7)
+
+struct adp5585_gpio_chip {
+ int (*bank)(unsigned int off);
+ int (*bit)(unsigned int off);
+ unsigned int debounce_dis_a;
+ unsigned int rpull_cfg_a;
+ unsigned int gpo_data_a;
+ unsigned int gpo_out_a;
+ unsigned int gpio_dir_a;
+ unsigned int gpi_stat_a;
+ unsigned int gpi_int_lvl_a;
+ unsigned int gpi_ev_a;
+ unsigned int gpi_ev_min;
+ unsigned int gpi_ev_max;
+ bool has_bias_hole;
+};
struct adp5585_gpio_dev {
struct gpio_chip gpio_chip;
+ struct notifier_block nb;
+ const struct adp5585_gpio_chip *info;
struct regmap *regmap;
+ unsigned long irq_mask;
+ unsigned long irq_en;
+ unsigned long irq_active_high;
+ /* used for irqchip bus locking */
+ struct mutex bus_lock;
};
+static int adp5585_gpio_bank(unsigned int off)
+{
+ return ADP5585_BANK(off);
+}
+
+static int adp5585_gpio_bit(unsigned int off)
+{
+ return ADP5585_BIT(off);
+}
+
+static int adp5589_gpio_bank(unsigned int off)
+{
+ return ADP5589_BANK(off);
+}
+
+static int adp5589_gpio_bit(unsigned int off)
+{
+ return ADP5589_BIT(off);
+}
+
static int adp5585_gpio_get_direction(struct gpio_chip *chip, unsigned int off)
{
struct adp5585_gpio_dev *adp5585_gpio = gpiochip_get_data(chip);
- unsigned int bank = ADP5585_BANK(off);
- unsigned int bit = ADP5585_BIT(off);
+ const struct adp5585_gpio_chip *info = adp5585_gpio->info;
unsigned int val;
- regmap_read(adp5585_gpio->regmap, ADP5585_GPIO_DIRECTION_A + bank, &val);
+ regmap_read(adp5585_gpio->regmap, info->gpio_dir_a + info->bank(off), &val);
- return val & bit ? GPIO_LINE_DIRECTION_OUT : GPIO_LINE_DIRECTION_IN;
+ return val & info->bit(off) ? GPIO_LINE_DIRECTION_OUT : GPIO_LINE_DIRECTION_IN;
}
static int adp5585_gpio_direction_input(struct gpio_chip *chip, unsigned int off)
{
struct adp5585_gpio_dev *adp5585_gpio = gpiochip_get_data(chip);
- unsigned int bank = ADP5585_BANK(off);
- unsigned int bit = ADP5585_BIT(off);
+ const struct adp5585_gpio_chip *info = adp5585_gpio->info;
- return regmap_clear_bits(adp5585_gpio->regmap,
- ADP5585_GPIO_DIRECTION_A + bank, bit);
+ return regmap_clear_bits(adp5585_gpio->regmap, info->gpio_dir_a + info->bank(off),
+ info->bit(off));
}
static int adp5585_gpio_direction_output(struct gpio_chip *chip, unsigned int off, int val)
{
struct adp5585_gpio_dev *adp5585_gpio = gpiochip_get_data(chip);
- unsigned int bank = ADP5585_BANK(off);
- unsigned int bit = ADP5585_BIT(off);
+ const struct adp5585_gpio_chip *info = adp5585_gpio->info;
+ unsigned int bank = info->bank(off);
+ unsigned int bit = info->bit(off);
int ret;
- ret = regmap_update_bits(adp5585_gpio->regmap,
- ADP5585_GPO_DATA_OUT_A + bank, bit,
- val ? bit : 0);
+ ret = regmap_update_bits(adp5585_gpio->regmap, info->gpo_data_a + bank,
+ bit, val ? bit : 0);
if (ret)
return ret;
- return regmap_set_bits(adp5585_gpio->regmap,
- ADP5585_GPIO_DIRECTION_A + bank, bit);
+ return regmap_set_bits(adp5585_gpio->regmap, info->gpio_dir_a + bank,
+ bit);
}
static int adp5585_gpio_get_value(struct gpio_chip *chip, unsigned int off)
{
struct adp5585_gpio_dev *adp5585_gpio = gpiochip_get_data(chip);
- unsigned int bank = ADP5585_BANK(off);
- unsigned int bit = ADP5585_BIT(off);
+ const struct adp5585_gpio_chip *info = adp5585_gpio->info;
+ unsigned int bank = info->bank(off);
+ unsigned int bit = info->bit(off);
unsigned int reg;
unsigned int val;
@@ -79,8 +143,8 @@ static int adp5585_gpio_get_value(struct gpio_chip *chip, unsigned int off)
* .direction_input(), .direction_output() or .set() operations racing
* with this.
*/
- regmap_read(adp5585_gpio->regmap, ADP5585_GPIO_DIRECTION_A + bank, &val);
- reg = val & bit ? ADP5585_GPO_DATA_OUT_A : ADP5585_GPI_STATUS_A;
+ regmap_read(adp5585_gpio->regmap, info->gpio_dir_a + bank, &val);
+ reg = val & bit ? info->gpo_data_a : info->gpi_stat_a;
regmap_read(adp5585_gpio->regmap, reg + bank, &val);
return !!(val & bit);
@@ -90,17 +154,17 @@ static int adp5585_gpio_set_value(struct gpio_chip *chip, unsigned int off,
int val)
{
struct adp5585_gpio_dev *adp5585_gpio = gpiochip_get_data(chip);
- unsigned int bank = ADP5585_BANK(off);
- unsigned int bit = ADP5585_BIT(off);
+ const struct adp5585_gpio_chip *info = adp5585_gpio->info;
+ unsigned int bit = adp5585_gpio->info->bit(off);
- return regmap_update_bits(adp5585_gpio->regmap,
- ADP5585_GPO_DATA_OUT_A + bank,
+ return regmap_update_bits(adp5585_gpio->regmap, info->gpo_data_a + info->bank(off),
bit, val ? bit : 0);
}
static int adp5585_gpio_set_bias(struct adp5585_gpio_dev *adp5585_gpio,
unsigned int off, unsigned int bias)
{
+ const struct adp5585_gpio_chip *info = adp5585_gpio->info;
unsigned int bit, reg, mask, val;
/*
@@ -108,8 +172,10 @@ static int adp5585_gpio_set_bias(struct adp5585_gpio_dev *adp5585_gpio,
* consecutive registers ADP5585_RPULL_CONFIG_*, with a hole of 4 bits
* after R5.
*/
- bit = off * 2 + (off > 5 ? 4 : 0);
- reg = ADP5585_RPULL_CONFIG_A + bit / 8;
+ bit = off * 2;
+ if (info->has_bias_hole)
+ bit += (off > 5 ? 4 : 0);
+ reg = info->rpull_cfg_a + bit / 8;
mask = ADP5585_Rx_PULL_CFG_MASK << (bit % 8);
val = bias << (bit % 8);
@@ -119,22 +185,22 @@ static int adp5585_gpio_set_bias(struct adp5585_gpio_dev *adp5585_gpio,
static int adp5585_gpio_set_drive(struct adp5585_gpio_dev *adp5585_gpio,
unsigned int off, enum pin_config_param drive)
{
- unsigned int bank = ADP5585_BANK(off);
- unsigned int bit = ADP5585_BIT(off);
+ const struct adp5585_gpio_chip *info = adp5585_gpio->info;
+ unsigned int bit = adp5585_gpio->info->bit(off);
return regmap_update_bits(adp5585_gpio->regmap,
- ADP5585_GPO_OUT_MODE_A + bank, bit,
+ info->gpo_out_a + info->bank(off), bit,
drive == PIN_CONFIG_DRIVE_OPEN_DRAIN ? bit : 0);
}
static int adp5585_gpio_set_debounce(struct adp5585_gpio_dev *adp5585_gpio,
unsigned int off, unsigned int debounce)
{
- unsigned int bank = ADP5585_BANK(off);
- unsigned int bit = ADP5585_BIT(off);
+ const struct adp5585_gpio_chip *info = adp5585_gpio->info;
+ unsigned int bit = adp5585_gpio->info->bit(off);
return regmap_update_bits(adp5585_gpio->regmap,
- ADP5585_DEBOUNCE_DIS_A + bank, bit,
+ info->debounce_dis_a + info->bank(off), bit,
debounce ? 0 : bit);
}
@@ -172,11 +238,175 @@ static int adp5585_gpio_set_config(struct gpio_chip *chip, unsigned int off,
};
}
+static int adp5585_gpio_request(struct gpio_chip *chip, unsigned int off)
+{
+ struct adp5585_gpio_dev *adp5585_gpio = gpiochip_get_data(chip);
+ const struct adp5585_gpio_chip *info = adp5585_gpio->info;
+ struct device *dev = chip->parent;
+ struct adp5585_dev *adp5585 = dev_get_drvdata(dev->parent);
+ const struct adp5585_regs *regs = adp5585->regs;
+ int ret;
+
+ ret = test_and_set_bit(off, adp5585->pin_usage);
+ if (ret)
+ return -EBUSY;
+
+ /* make sure it's configured for GPIO */
+ return regmap_clear_bits(adp5585_gpio->regmap,
+ regs->pin_cfg_a + info->bank(off),
+ info->bit(off));
+}
+
+static void adp5585_gpio_free(struct gpio_chip *chip, unsigned int off)
+{
+ struct device *dev = chip->parent;
+ struct adp5585_dev *adp5585 = dev_get_drvdata(dev->parent);
+
+ clear_bit(off, adp5585->pin_usage);
+}
+
+static int adp5585_gpio_key_event(struct notifier_block *nb, unsigned long key,
+ void *data)
+{
+ struct adp5585_gpio_dev *adp5585_gpio = container_of(nb, struct adp5585_gpio_dev, nb);
+ struct device *dev = adp5585_gpio->gpio_chip.parent;
+ unsigned long key_press = (unsigned long)data;
+ unsigned int irq, irq_type;
+ struct irq_data *irqd;
+ bool active_high;
+ unsigned int off;
+
+ /* make sure the event is for me */
+ if (key < adp5585_gpio->info->gpi_ev_min || key > adp5585_gpio->info->gpi_ev_max)
+ return NOTIFY_DONE;
+
+ off = key - adp5585_gpio->info->gpi_ev_min;
+ active_high = test_bit(off, &adp5585_gpio->irq_active_high);
+
+ irq = irq_find_mapping(adp5585_gpio->gpio_chip.irq.domain, off);
+ if (!irq)
+ return NOTIFY_BAD;
+
+ irqd = irq_get_irq_data(irq);
+ if (!irqd) {
+ dev_err(dev, "Could not get irq(%u) data\n", irq);
+ return NOTIFY_BAD;
+ }
+
+ dev_dbg_ratelimited(dev, "gpio-keys event(%u) press=%lu, a_high=%u\n",
+ off, key_press, active_high);
+
+ if (!active_high)
+ key_press = !key_press;
+
+ irq_type = irqd_get_trigger_type(irqd);
+
+ if ((irq_type & IRQ_TYPE_EDGE_RISING && key_press) ||
+ (irq_type & IRQ_TYPE_EDGE_FALLING && !key_press))
+ handle_nested_irq(irq);
+
+ return NOTIFY_STOP;
+}
+
+static void adp5585_irq_bus_lock(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct adp5585_gpio_dev *adp5585_gpio = gpiochip_get_data(gc);
+
+ mutex_lock(&adp5585_gpio->bus_lock);
+}
+
+static void adp5585_irq_bus_sync_unlock(struct irq_data *d)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
+ struct adp5585_gpio_dev *adp5585_gpio = gpiochip_get_data(chip);
+ const struct adp5585_gpio_chip *info = adp5585_gpio->info;
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ bool active_high = test_bit(hwirq, &adp5585_gpio->irq_active_high);
+ bool enabled = test_bit(hwirq, &adp5585_gpio->irq_en);
+ bool masked = test_bit(hwirq, &adp5585_gpio->irq_mask);
+ unsigned int bank = adp5585_gpio->info->bank(hwirq);
+ unsigned int bit = adp5585_gpio->info->bit(hwirq);
+
+ if (masked && !enabled)
+ goto out_unlock;
+ if (!masked && enabled)
+ goto out_unlock;
+
+ regmap_update_bits(adp5585_gpio->regmap, info->gpi_int_lvl_a + bank, bit,
+ active_high ? bit : 0);
+ regmap_update_bits(adp5585_gpio->regmap, info->gpi_ev_a + bank, bit,
+ masked ? 0 : bit);
+ assign_bit(hwirq, &adp5585_gpio->irq_en, !masked);
+
+out_unlock:
+ mutex_unlock(&adp5585_gpio->bus_lock);
+}
+
+static void adp5585_irq_mask(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct adp5585_gpio_dev *adp5585_gpio = gpiochip_get_data(gc);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+ __set_bit(hwirq, &adp5585_gpio->irq_mask);
+ gpiochip_disable_irq(gc, hwirq);
+}
+
+static void adp5585_irq_unmask(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct adp5585_gpio_dev *adp5585_gpio = gpiochip_get_data(gc);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+ gpiochip_enable_irq(gc, hwirq);
+ __clear_bit(hwirq, &adp5585_gpio->irq_mask);
+}
+
+static int adp5585_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct adp5585_gpio_dev *adp5585_gpio = gpiochip_get_data(gc);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+ if (!(type & IRQ_TYPE_EDGE_BOTH))
+ return -EINVAL;
+
+ assign_bit(hwirq, &adp5585_gpio->irq_active_high,
+ type == IRQ_TYPE_EDGE_RISING);
+
+ irq_set_handler_locked(d, handle_edge_irq);
+ return 0;
+}
+
+static const struct irq_chip adp5585_irq_chip = {
+ .name = "adp5585",
+ .irq_mask = adp5585_irq_mask,
+ .irq_unmask = adp5585_irq_unmask,
+ .irq_bus_lock = adp5585_irq_bus_lock,
+ .irq_bus_sync_unlock = adp5585_irq_bus_sync_unlock,
+ .irq_set_type = adp5585_irq_set_type,
+ .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
+static void adp5585_gpio_unreg_notifier(void *data)
+{
+ struct adp5585_gpio_dev *adp5585_gpio = data;
+ struct device *dev = adp5585_gpio->gpio_chip.parent;
+ struct adp5585_dev *adp5585 = dev_get_drvdata(dev->parent);
+
+ blocking_notifier_chain_unregister(&adp5585->event_notifier,
+ &adp5585_gpio->nb);
+}
+
static int adp5585_gpio_probe(struct platform_device *pdev)
{
struct adp5585_dev *adp5585 = dev_get_drvdata(pdev->dev.parent);
+ const struct platform_device_id *id = platform_get_device_id(pdev);
struct adp5585_gpio_dev *adp5585_gpio;
struct device *dev = &pdev->dev;
+ struct gpio_irq_chip *girq;
struct gpio_chip *gc;
int ret;
@@ -186,6 +416,10 @@ static int adp5585_gpio_probe(struct platform_device *pdev)
adp5585_gpio->regmap = adp5585->regmap;
+ adp5585_gpio->info = (const struct adp5585_gpio_chip *)id->driver_data;
+ if (!adp5585_gpio->info)
+ return -ENODEV;
+
device_set_of_node_from_dev(dev, dev->parent);
gc = &adp5585_gpio->gpio_chip;
@@ -194,15 +428,45 @@ static int adp5585_gpio_probe(struct platform_device *pdev)
gc->direction_input = adp5585_gpio_direction_input;
gc->direction_output = adp5585_gpio_direction_output;
gc->get = adp5585_gpio_get_value;
- gc->set_rv = adp5585_gpio_set_value;
+ gc->set = adp5585_gpio_set_value;
gc->set_config = adp5585_gpio_set_config;
+ gc->request = adp5585_gpio_request;
+ gc->free = adp5585_gpio_free;
gc->can_sleep = true;
gc->base = -1;
- gc->ngpio = ADP5585_GPIO_MAX;
+ gc->ngpio = adp5585->n_pins;
gc->label = pdev->name;
gc->owner = THIS_MODULE;
+ if (device_property_present(dev->parent, "interrupt-controller")) {
+ if (!adp5585->irq)
+ return dev_err_probe(dev, -EINVAL,
+ "Unable to serve as interrupt controller without IRQ\n");
+
+ girq = &adp5585_gpio->gpio_chip.irq;
+ gpio_irq_chip_set_chip(girq, &adp5585_irq_chip);
+ girq->handler = handle_bad_irq;
+ girq->threaded = true;
+
+ adp5585_gpio->nb.notifier_call = adp5585_gpio_key_event;
+ ret = blocking_notifier_chain_register(&adp5585->event_notifier,
+ &adp5585_gpio->nb);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, adp5585_gpio_unreg_notifier,
+ adp5585_gpio);
+ if (ret)
+ return ret;
+ }
+
+ /* everything masked by default */
+ adp5585_gpio->irq_mask = ~0UL;
+
+ ret = devm_mutex_init(dev, &adp5585_gpio->bus_lock);
+ if (ret)
+ return ret;
ret = devm_gpiochip_add_data(dev, &adp5585_gpio->gpio_chip,
adp5585_gpio);
if (ret)
@@ -211,8 +475,40 @@ static int adp5585_gpio_probe(struct platform_device *pdev)
return 0;
}
+static const struct adp5585_gpio_chip adp5585_gpio_chip_info = {
+ .bank = adp5585_gpio_bank,
+ .bit = adp5585_gpio_bit,
+ .debounce_dis_a = ADP5585_DEBOUNCE_DIS_A,
+ .rpull_cfg_a = ADP5585_RPULL_CONFIG_A,
+ .gpo_data_a = ADP5585_GPO_DATA_OUT_A,
+ .gpo_out_a = ADP5585_GPO_OUT_MODE_A,
+ .gpio_dir_a = ADP5585_GPIO_DIRECTION_A,
+ .gpi_stat_a = ADP5585_GPI_STATUS_A,
+ .has_bias_hole = true,
+ .gpi_ev_min = ADP5585_GPI_EVENT_START,
+ .gpi_ev_max = ADP5585_GPI_EVENT_END,
+ .gpi_int_lvl_a = ADP5585_GPI_INT_LEVEL_A,
+ .gpi_ev_a = ADP5585_GPI_EVENT_EN_A,
+};
+
+static const struct adp5585_gpio_chip adp5589_gpio_chip_info = {
+ .bank = adp5589_gpio_bank,
+ .bit = adp5589_gpio_bit,
+ .debounce_dis_a = ADP5589_DEBOUNCE_DIS_A,
+ .rpull_cfg_a = ADP5589_RPULL_CONFIG_A,
+ .gpo_data_a = ADP5589_GPO_DATA_OUT_A,
+ .gpo_out_a = ADP5589_GPO_OUT_MODE_A,
+ .gpio_dir_a = ADP5589_GPIO_DIRECTION_A,
+ .gpi_stat_a = ADP5589_GPI_STATUS_A,
+ .gpi_ev_min = ADP5589_GPI_EVENT_START,
+ .gpi_ev_max = ADP5589_GPI_EVENT_END,
+ .gpi_int_lvl_a = ADP5589_GPI_INT_LEVEL_A,
+ .gpi_ev_a = ADP5589_GPI_EVENT_EN_A,
+};
+
static const struct platform_device_id adp5585_gpio_id_table[] = {
- { "adp5585-gpio" },
+ { "adp5585-gpio", (kernel_ulong_t)&adp5585_gpio_chip_info },
+ { "adp5589-gpio", (kernel_ulong_t)&adp5589_gpio_chip_info },
{ /* Sentinel */ }
};
MODULE_DEVICE_TABLE(platform, adp5585_gpio_id_table);
diff --git a/drivers/gpio/gpio-aggregator.c b/drivers/gpio/gpio-aggregator.c
index 6f941db02c04..416f265d09d0 100644
--- a/drivers/gpio/gpio-aggregator.c
+++ b/drivers/gpio/gpio-aggregator.c
@@ -12,6 +12,7 @@
#include <linux/configfs.h>
#include <linux/ctype.h>
#include <linux/delay.h>
+#include <linux/export.h>
#include <linux/idr.h>
#include <linux/kernel.h>
#include <linux/list.h>
@@ -28,6 +29,7 @@
#include <linux/gpio/consumer.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/forwarder.h>
#include <linux/gpio/machine.h>
#include "dev-sync-probe.h"
@@ -244,18 +246,34 @@ struct gpiochip_fwd {
spinlock_t slock; /* protects tmp[] if !can_sleep */
};
struct gpiochip_fwd_timing *delay_timings;
+ void *data;
+ unsigned long *valid_mask;
unsigned long tmp[]; /* values and descs for multiple ops */
};
-#define fwd_tmp_values(fwd) &(fwd)->tmp[0]
-#define fwd_tmp_descs(fwd) (void *)&(fwd)->tmp[BITS_TO_LONGS((fwd)->chip.ngpio)]
+#define fwd_tmp_values(fwd) (&(fwd)->tmp[0])
+#define fwd_tmp_descs(fwd) ((void *)&(fwd)->tmp[BITS_TO_LONGS((fwd)->chip.ngpio)])
#define fwd_tmp_size(ngpios) (BITS_TO_LONGS((ngpios)) + (ngpios))
+static int gpio_fwd_request(struct gpio_chip *chip, unsigned int offset)
+{
+ struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
+
+ return test_bit(offset, fwd->valid_mask) ? 0 : -ENODEV;
+}
+
static int gpio_fwd_get_direction(struct gpio_chip *chip, unsigned int offset)
{
struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
+ /*
+ * get_direction() is called during gpiochip registration, return
+ * -ENODEV if there is no GPIO desc for the line.
+ */
+ if (!test_bit(offset, fwd->valid_mask))
+ return -ENODEV;
+
return gpiod_get_direction(fwd->descs[offset]);
}
@@ -453,10 +471,11 @@ static int gpiochip_fwd_delay_of_xlate(struct gpio_chip *chip,
return line;
}
-static int gpiochip_fwd_setup_delay_line(struct device *dev, struct gpio_chip *chip,
- struct gpiochip_fwd *fwd)
+static int gpiochip_fwd_setup_delay_line(struct gpiochip_fwd *fwd)
{
- fwd->delay_timings = devm_kcalloc(dev, chip->ngpio,
+ struct gpio_chip *chip = &fwd->chip;
+
+ fwd->delay_timings = devm_kcalloc(chip->parent, chip->ngpio,
sizeof(*fwd->delay_timings),
GFP_KERNEL);
if (!fwd->delay_timings)
@@ -468,91 +487,368 @@ static int gpiochip_fwd_setup_delay_line(struct device *dev, struct gpio_chip *c
return 0;
}
#else
-static int gpiochip_fwd_setup_delay_line(struct device *dev, struct gpio_chip *chip,
- struct gpiochip_fwd *fwd)
+static int gpiochip_fwd_setup_delay_line(struct gpiochip_fwd *fwd)
{
return 0;
}
#endif /* !CONFIG_OF_GPIO */
/**
- * gpiochip_fwd_create() - Create a new GPIO forwarder
- * @dev: Parent device pointer
- * @ngpios: Number of GPIOs in the forwarder.
- * @descs: Array containing the GPIO descriptors to forward to.
- * This array must contain @ngpios entries, and must not be deallocated
- * before the forwarder has been destroyed again.
- * @features: Bitwise ORed features as defined with FWD_FEATURE_*.
+ * gpiochip_fwd_get_gpiochip - Get the GPIO chip for the GPIO forwarder
+ * @fwd: GPIO forwarder
*
- * This function creates a new gpiochip, which forwards all GPIO operations to
- * the passed GPIO descriptors.
+ * Returns: The GPIO chip for the GPIO forwarder
+ */
+struct gpio_chip *gpiochip_fwd_get_gpiochip(struct gpiochip_fwd *fwd)
+{
+ return &fwd->chip;
+}
+EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_get_gpiochip, "GPIO_FORWARDER");
+
+/**
+ * gpiochip_fwd_get_data - Get driver-private data for the GPIO forwarder
+ * @fwd: GPIO forwarder
*
- * Return: An opaque object pointer, or an ERR_PTR()-encoded negative error
- * code on failure.
+ * Returns: The driver-private data for the GPIO forwarder
*/
-static struct gpiochip_fwd *gpiochip_fwd_create(struct device *dev,
- unsigned int ngpios,
- struct gpio_desc *descs[],
- unsigned long features)
+void *gpiochip_fwd_get_data(struct gpiochip_fwd *fwd)
+{
+ return fwd->data;
+}
+EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_get_data, "GPIO_FORWARDER");
+
+/**
+ * gpiochip_fwd_gpio_request - Request a line of the GPIO forwarder
+ * @fwd: GPIO forwarder
+ * @offset: the offset of the line to request
+ *
+ * Returns: 0 on success, or negative errno on failure.
+ */
+int gpiochip_fwd_gpio_request(struct gpiochip_fwd *fwd, unsigned int offset)
+{
+ struct gpio_chip *gc = gpiochip_fwd_get_gpiochip(fwd);
+
+ return gpio_fwd_request(gc, offset);
+}
+EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_gpio_request, "GPIO_FORWARDER");
+
+/**
+ * gpiochip_fwd_gpio_get_direction - Return the current direction of a GPIO forwarder line
+ * @fwd: GPIO forwarder
+ * @offset: the offset of the line
+ *
+ * Returns: 0 for output, 1 for input, or an error code in case of error.
+ */
+int gpiochip_fwd_gpio_get_direction(struct gpiochip_fwd *fwd, unsigned int offset)
+{
+ struct gpio_chip *gc = gpiochip_fwd_get_gpiochip(fwd);
+
+ return gpio_fwd_get_direction(gc, offset);
+}
+EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_gpio_get_direction, "GPIO_FORWARDER");
+
+/**
+ * gpiochip_fwd_gpio_direction_output - Set a GPIO forwarder line direction to
+ * output
+ * @fwd: GPIO forwarder
+ * @offset: the offset of the line
+ * @value: value to set
+ *
+ * Returns: 0 on success, or negative errno on failure.
+ */
+int gpiochip_fwd_gpio_direction_output(struct gpiochip_fwd *fwd, unsigned int offset,
+ int value)
+{
+ struct gpio_chip *gc = gpiochip_fwd_get_gpiochip(fwd);
+
+ return gpio_fwd_direction_output(gc, offset, value);
+}
+EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_gpio_direction_output, "GPIO_FORWARDER");
+
+/**
+ * gpiochip_fwd_gpio_direction_input - Set a GPIO forwarder line direction to input
+ * @fwd: GPIO forwarder
+ * @offset: the offset of the line
+ *
+ * Returns: 0 on success, or negative errno on failure.
+ */
+int gpiochip_fwd_gpio_direction_input(struct gpiochip_fwd *fwd, unsigned int offset)
+{
+ struct gpio_chip *gc = gpiochip_fwd_get_gpiochip(fwd);
+
+ return gpio_fwd_direction_input(gc, offset);
+}
+EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_gpio_direction_input, "GPIO_FORWARDER");
+
+/**
+ * gpiochip_fwd_gpio_get - Return a GPIO forwarder line's value
+ * @fwd: GPIO forwarder
+ * @offset: the offset of the line
+ *
+ * Returns: The GPIO's logical value, i.e. taking the ACTIVE_LOW status into
+ * account, or negative errno on failure.
+ */
+int gpiochip_fwd_gpio_get(struct gpiochip_fwd *fwd, unsigned int offset)
+{
+ struct gpio_chip *gc = gpiochip_fwd_get_gpiochip(fwd);
+
+ return gpio_fwd_get(gc, offset);
+}
+EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_gpio_get, "GPIO_FORWARDER");
+
+/**
+ * gpiochip_fwd_gpio_get_multiple - Get values for multiple GPIO forwarder lines
+ * @fwd: GPIO forwarder
+ * @mask: bit mask array; one bit per line; BITS_PER_LONG bits per word defines
+ * which lines are to be read
+ * @bits: bit value array; one bit per line; BITS_PER_LONG bits per word will
+ * contains the read values for the lines specified by mask
+ *
+ * Returns: 0 on success, or negative errno on failure.
+ */
+int gpiochip_fwd_gpio_get_multiple(struct gpiochip_fwd *fwd, unsigned long *mask,
+ unsigned long *bits)
+{
+ struct gpio_chip *gc = gpiochip_fwd_get_gpiochip(fwd);
+
+ return gpio_fwd_get_multiple_locked(gc, mask, bits);
+}
+EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_gpio_get_multiple, "GPIO_FORWARDER");
+
+/**
+ * gpiochip_fwd_gpio_set - Assign value to a GPIO forwarder line.
+ * @fwd: GPIO forwarder
+ * @offset: the offset of the line
+ * @value: value to set
+ *
+ * Returns: 0 on success, or negative errno on failure.
+ */
+int gpiochip_fwd_gpio_set(struct gpiochip_fwd *fwd, unsigned int offset, int value)
+{
+ struct gpio_chip *gc = gpiochip_fwd_get_gpiochip(fwd);
+
+ return gpio_fwd_set(gc, offset, value);
+}
+EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_gpio_set, "GPIO_FORWARDER");
+
+/**
+ * gpiochip_fwd_gpio_set_multiple - Assign values to multiple GPIO forwarder lines
+ * @fwd: GPIO forwarder
+ * @mask: bit mask array; one bit per output; BITS_PER_LONG bits per word
+ * defines which outputs are to be changed
+ * @bits: bit value array; one bit per output; BITS_PER_LONG bits per word
+ * defines the values the outputs specified by mask are to be set to
+ *
+ * Returns: 0 on success, or negative errno on failure.
+ */
+int gpiochip_fwd_gpio_set_multiple(struct gpiochip_fwd *fwd, unsigned long *mask,
+ unsigned long *bits)
+{
+ struct gpio_chip *gc = gpiochip_fwd_get_gpiochip(fwd);
+
+ return gpio_fwd_set_multiple_locked(gc, mask, bits);
+}
+EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_gpio_set_multiple, "GPIO_FORWARDER");
+
+/**
+ * gpiochip_fwd_gpio_set_config - Set @config for a GPIO forwarder line
+ * @fwd: GPIO forwarder
+ * @offset: the offset of the line
+ * @config: Same packed config format as generic pinconf
+ *
+ * Returns: 0 on success, %-ENOTSUPP if the controller doesn't support setting
+ * the configuration.
+ */
+int gpiochip_fwd_gpio_set_config(struct gpiochip_fwd *fwd, unsigned int offset,
+ unsigned long config)
+{
+ struct gpio_chip *gc = gpiochip_fwd_get_gpiochip(fwd);
+
+ return gpio_fwd_set_config(gc, offset, config);
+}
+EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_gpio_set_config, "GPIO_FORWARDER");
+
+/**
+ * gpiochip_fwd_gpio_to_irq - Return the IRQ corresponding to a GPIO forwarder line
+ * @fwd: GPIO forwarder
+ * @offset: the offset of the line
+ *
+ * Returns: The Linux IRQ corresponding to the passed line, or an error code in
+ * case of error.
+ */
+int gpiochip_fwd_gpio_to_irq(struct gpiochip_fwd *fwd, unsigned int offset)
+{
+ struct gpio_chip *gc = gpiochip_fwd_get_gpiochip(fwd);
+
+ return gpio_fwd_to_irq(gc, offset);
+}
+EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_gpio_to_irq, "GPIO_FORWARDER");
+
+/**
+ * devm_gpiochip_fwd_alloc - Allocate and initialize a new GPIO forwarder
+ * @dev: Parent device pointer
+ * @ngpios: Number of GPIOs in the forwarder
+ *
+ * Returns: An opaque object pointer, or an ERR_PTR()-encoded negative error
+ * code on failure.
+ */
+struct gpiochip_fwd *devm_gpiochip_fwd_alloc(struct device *dev,
+ unsigned int ngpios)
{
- const char *label = dev_name(dev);
struct gpiochip_fwd *fwd;
struct gpio_chip *chip;
- unsigned int i;
- int error;
- fwd = devm_kzalloc(dev, struct_size(fwd, tmp, fwd_tmp_size(ngpios)),
- GFP_KERNEL);
+ fwd = devm_kzalloc(dev, struct_size(fwd, tmp, fwd_tmp_size(ngpios)), GFP_KERNEL);
if (!fwd)
return ERR_PTR(-ENOMEM);
- chip = &fwd->chip;
-
- /*
- * If any of the GPIO lines are sleeping, then the entire forwarder
- * will be sleeping.
- * If any of the chips support .set_config(), then the forwarder will
- * support setting configs.
- */
- for (i = 0; i < ngpios; i++) {
- struct gpio_chip *parent = gpiod_to_chip(descs[i]);
+ fwd->descs = devm_kcalloc(dev, ngpios, sizeof(*fwd->descs), GFP_KERNEL);
+ if (!fwd->descs)
+ return ERR_PTR(-ENOMEM);
- dev_dbg(dev, "%u => gpio %d irq %d\n", i,
- desc_to_gpio(descs[i]), gpiod_to_irq(descs[i]));
+ fwd->valid_mask = devm_bitmap_zalloc(dev, ngpios, GFP_KERNEL);
+ if (!fwd->valid_mask)
+ return ERR_PTR(-ENOMEM);
- if (gpiod_cansleep(descs[i]))
- chip->can_sleep = true;
- if (parent && parent->set_config)
- chip->set_config = gpio_fwd_set_config;
- }
+ chip = &fwd->chip;
- chip->label = label;
+ chip->label = dev_name(dev);
chip->parent = dev;
chip->owner = THIS_MODULE;
+ chip->request = gpio_fwd_request;
chip->get_direction = gpio_fwd_get_direction;
chip->direction_input = gpio_fwd_direction_input;
chip->direction_output = gpio_fwd_direction_output;
chip->get = gpio_fwd_get;
chip->get_multiple = gpio_fwd_get_multiple_locked;
- chip->set_rv = gpio_fwd_set;
- chip->set_multiple_rv = gpio_fwd_set_multiple_locked;
+ chip->set = gpio_fwd_set;
+ chip->set_multiple = gpio_fwd_set_multiple_locked;
+ chip->set_config = gpio_fwd_set_config;
chip->to_irq = gpio_fwd_to_irq;
chip->base = -1;
chip->ngpio = ngpios;
- fwd->descs = descs;
+
+ return fwd;
+}
+EXPORT_SYMBOL_NS_GPL(devm_gpiochip_fwd_alloc, "GPIO_FORWARDER");
+
+/**
+ * gpiochip_fwd_desc_add - Add a GPIO desc in the forwarder
+ * @fwd: GPIO forwarder
+ * @desc: GPIO descriptor to register
+ * @offset: offset for the GPIO in the forwarder
+ *
+ * Returns: 0 on success, or negative errno on failure.
+ */
+int gpiochip_fwd_desc_add(struct gpiochip_fwd *fwd, struct gpio_desc *desc,
+ unsigned int offset)
+{
+ struct gpio_chip *chip = &fwd->chip;
+
+ if (offset >= chip->ngpio)
+ return -EINVAL;
+
+ if (test_and_set_bit(offset, fwd->valid_mask))
+ return -EEXIST;
+
+ /*
+ * If any of the GPIO lines are sleeping, then the entire forwarder
+ * will be sleeping.
+ */
+ if (gpiod_cansleep(desc))
+ chip->can_sleep = true;
+
+ fwd->descs[offset] = desc;
+
+ dev_dbg(chip->parent, "%u => gpio %d irq %d\n", offset,
+ desc_to_gpio(desc), gpiod_to_irq(desc));
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_desc_add, "GPIO_FORWARDER");
+
+/**
+ * gpiochip_fwd_desc_free - Remove a GPIO desc from the forwarder
+ * @fwd: GPIO forwarder
+ * @offset: offset of GPIO desc to remove
+ */
+void gpiochip_fwd_desc_free(struct gpiochip_fwd *fwd, unsigned int offset)
+{
+ if (test_and_clear_bit(offset, fwd->valid_mask))
+ gpiod_put(fwd->descs[offset]);
+}
+EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_desc_free, "GPIO_FORWARDER");
+
+/**
+ * gpiochip_fwd_register - Register a GPIO forwarder
+ * @fwd: GPIO forwarder
+ * @data: driver-private data associated with this forwarder
+ *
+ * Returns: 0 on success, or negative errno on failure.
+ */
+int gpiochip_fwd_register(struct gpiochip_fwd *fwd, void *data)
+{
+ struct gpio_chip *chip = &fwd->chip;
+
+ /*
+ * Some gpio_desc were not registered. They will be registered at runtime
+ * but we have to suppose they can sleep.
+ */
+ if (!bitmap_full(fwd->valid_mask, chip->ngpio))
+ chip->can_sleep = true;
if (chip->can_sleep)
mutex_init(&fwd->mlock);
else
spin_lock_init(&fwd->slock);
+ fwd->data = data;
+
+ return devm_gpiochip_add_data(chip->parent, chip, fwd);
+}
+EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_register, "GPIO_FORWARDER");
+
+/**
+ * gpiochip_fwd_create() - Create a new GPIO forwarder
+ * @dev: Parent device pointer
+ * @ngpios: Number of GPIOs in the forwarder.
+ * @descs: Array containing the GPIO descriptors to forward to.
+ * This array must contain @ngpios entries, and can be deallocated
+ * as the forwarder has its own array.
+ * @features: Bitwise ORed features as defined with FWD_FEATURE_*.
+ *
+ * This function creates a new gpiochip, which forwards all GPIO operations to
+ * the passed GPIO descriptors.
+ *
+ * Return: An opaque object pointer, or an ERR_PTR()-encoded negative error
+ * code on failure.
+ */
+static struct gpiochip_fwd *gpiochip_fwd_create(struct device *dev,
+ unsigned int ngpios,
+ struct gpio_desc *descs[],
+ unsigned long features)
+{
+ struct gpiochip_fwd *fwd;
+ unsigned int i;
+ int error;
+
+ fwd = devm_gpiochip_fwd_alloc(dev, ngpios);
+ if (IS_ERR(fwd))
+ return fwd;
+
+ for (i = 0; i < ngpios; i++) {
+ error = gpiochip_fwd_desc_add(fwd, descs[i], i);
+ if (error)
+ return ERR_PTR(error);
+ }
+
if (features & FWD_FEATURE_DELAY) {
- error = gpiochip_fwd_setup_delay_line(dev, chip, fwd);
+ error = gpiochip_fwd_setup_delay_line(fwd);
if (error)
return ERR_PTR(error);
}
- error = devm_gpiochip_add_data(dev, chip, fwd);
+ error = gpiochip_fwd_register(fwd, NULL);
if (error)
return ERR_PTR(error);
@@ -1334,6 +1630,7 @@ static int gpio_aggregator_probe(struct platform_device *pdev)
return PTR_ERR(fwd);
platform_set_drvdata(pdev, fwd);
+ devm_kfree(dev, descs);
return 0;
}
diff --git a/drivers/gpio/gpio-altera-a10sr.c b/drivers/gpio/gpio-altera-a10sr.c
index 77a674cf99e4..4524c18a87e7 100644
--- a/drivers/gpio/gpio-altera-a10sr.c
+++ b/drivers/gpio/gpio-altera-a10sr.c
@@ -69,7 +69,7 @@ static const struct gpio_chip altr_a10sr_gc = {
.label = "altr_a10sr_gpio",
.owner = THIS_MODULE,
.get = altr_a10sr_gpio_get,
- .set_rv = altr_a10sr_gpio_set,
+ .set = altr_a10sr_gpio_set,
.direction_input = altr_a10sr_gpio_direction_input,
.direction_output = altr_a10sr_gpio_direction_output,
.can_sleep = true,
diff --git a/drivers/gpio/gpio-altera.c b/drivers/gpio/gpio-altera.c
index 1b28525726d7..9508d764cce4 100644
--- a/drivers/gpio/gpio-altera.c
+++ b/drivers/gpio/gpio-altera.c
@@ -259,7 +259,7 @@ static int altera_gpio_probe(struct platform_device *pdev)
altera_gc->gc.direction_input = altera_gpio_direction_input;
altera_gc->gc.direction_output = altera_gpio_direction_output;
altera_gc->gc.get = altera_gpio_get;
- altera_gc->gc.set_rv = altera_gpio_set;
+ altera_gc->gc.set = altera_gpio_set;
altera_gc->gc.owner = THIS_MODULE;
altera_gc->gc.parent = &pdev->dev;
altera_gc->gc.base = -1;
diff --git a/drivers/gpio/gpio-amd-fch.c b/drivers/gpio/gpio-amd-fch.c
index f8d0cea46049..e6c6c3ec7656 100644
--- a/drivers/gpio/gpio-amd-fch.c
+++ b/drivers/gpio/gpio-amd-fch.c
@@ -165,7 +165,7 @@ static int amd_fch_gpio_probe(struct platform_device *pdev)
priv->gc.direction_output = amd_fch_gpio_direction_output;
priv->gc.get_direction = amd_fch_gpio_get_direction;
priv->gc.get = amd_fch_gpio_get;
- priv->gc.set_rv = amd_fch_gpio_set;
+ priv->gc.set = amd_fch_gpio_set;
spin_lock_init(&priv->lock);
diff --git a/drivers/gpio/gpio-amd8111.c b/drivers/gpio/gpio-amd8111.c
index 425d8472f744..15fd5e210d74 100644
--- a/drivers/gpio/gpio-amd8111.c
+++ b/drivers/gpio/gpio-amd8111.c
@@ -165,7 +165,7 @@ static struct amd_gpio gp = {
.ngpio = 32,
.request = amd_gpio_request,
.free = amd_gpio_free,
- .set_rv = amd_gpio_set,
+ .set = amd_gpio_set,
.get = amd_gpio_get,
.direction_output = amd_gpio_dirout,
.direction_input = amd_gpio_dirin,
diff --git a/drivers/gpio/gpio-amdpt.c b/drivers/gpio/gpio-amdpt.c
index b70036587d9c..8458a6949c65 100644
--- a/drivers/gpio/gpio-amdpt.c
+++ b/drivers/gpio/gpio-amdpt.c
@@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/spinlock.h>
#include <linux/acpi.h>
#include <linux/platform_device.h>
@@ -24,54 +25,50 @@
#define PT_SYNC_REG 0x28
struct pt_gpio_chip {
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
void __iomem *reg_base;
};
static int pt_gpio_request(struct gpio_chip *gc, unsigned offset)
{
+ struct gpio_generic_chip *gen_gc = to_gpio_generic_chip(gc);
struct pt_gpio_chip *pt_gpio = gpiochip_get_data(gc);
- unsigned long flags;
u32 using_pins;
dev_dbg(gc->parent, "pt_gpio_request offset=%x\n", offset);
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(gen_gc);
using_pins = readl(pt_gpio->reg_base + PT_SYNC_REG);
if (using_pins & BIT(offset)) {
dev_warn(gc->parent, "PT GPIO pin %x reconfigured\n",
offset);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
return -EINVAL;
}
writel(using_pins | BIT(offset), pt_gpio->reg_base + PT_SYNC_REG);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
-
return 0;
}
static void pt_gpio_free(struct gpio_chip *gc, unsigned offset)
{
+ struct gpio_generic_chip *gen_gc = to_gpio_generic_chip(gc);
struct pt_gpio_chip *pt_gpio = gpiochip_get_data(gc);
- unsigned long flags;
u32 using_pins;
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(gen_gc);
using_pins = readl(pt_gpio->reg_base + PT_SYNC_REG);
using_pins &= ~BIT(offset);
writel(using_pins, pt_gpio->reg_base + PT_SYNC_REG);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
-
dev_dbg(gc->parent, "pt_gpio_free offset=%x\n", offset);
}
static int pt_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct device *dev = &pdev->dev;
struct pt_gpio_chip *pt_gpio;
int ret = 0;
@@ -91,22 +88,27 @@ static int pt_gpio_probe(struct platform_device *pdev)
return PTR_ERR(pt_gpio->reg_base);
}
- ret = bgpio_init(&pt_gpio->gc, dev, 4,
- pt_gpio->reg_base + PT_INPUTDATA_REG,
- pt_gpio->reg_base + PT_OUTPUTDATA_REG, NULL,
- pt_gpio->reg_base + PT_DIRECTION_REG, NULL,
- BGPIOF_READ_OUTPUT_REG_SET);
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = pt_gpio->reg_base + PT_INPUTDATA_REG,
+ .set = pt_gpio->reg_base + PT_OUTPUTDATA_REG,
+ .dirout = pt_gpio->reg_base + PT_DIRECTION_REG,
+ .flags = GPIO_GENERIC_READ_OUTPUT_REG_SET,
+ };
+
+ ret = gpio_generic_chip_init(&pt_gpio->chip, &config);
if (ret) {
- dev_err(dev, "bgpio_init failed\n");
+ dev_err(dev, "failed to initialize the generic GPIO chip\n");
return ret;
}
- pt_gpio->gc.owner = THIS_MODULE;
- pt_gpio->gc.request = pt_gpio_request;
- pt_gpio->gc.free = pt_gpio_free;
- pt_gpio->gc.ngpio = (uintptr_t)device_get_match_data(dev);
+ pt_gpio->chip.gc.owner = THIS_MODULE;
+ pt_gpio->chip.gc.request = pt_gpio_request;
+ pt_gpio->chip.gc.free = pt_gpio_free;
+ pt_gpio->chip.gc.ngpio = (uintptr_t)device_get_match_data(dev);
- ret = devm_gpiochip_add_data(dev, &pt_gpio->gc, pt_gpio);
+ ret = devm_gpiochip_add_data(dev, &pt_gpio->chip.gc, pt_gpio);
if (ret) {
dev_err(dev, "Failed to register GPIO lib\n");
return ret;
diff --git a/drivers/gpio/gpio-arizona.c b/drivers/gpio/gpio-arizona.c
index e530c94dcce8..a7e98d395d8e 100644
--- a/drivers/gpio/gpio-arizona.c
+++ b/drivers/gpio/gpio-arizona.c
@@ -39,7 +39,6 @@ static int arizona_gpio_direction_in(struct gpio_chip *chip, unsigned offset)
return ret;
if (change && persistent) {
- pm_runtime_mark_last_busy(chip->parent);
pm_runtime_put_autosuspend(chip->parent);
}
@@ -82,7 +81,6 @@ static int arizona_gpio_get(struct gpio_chip *chip, unsigned offset)
return ret;
}
- pm_runtime_mark_last_busy(chip->parent);
pm_runtime_put_autosuspend(chip->parent);
}
@@ -140,7 +138,7 @@ static const struct gpio_chip template_chip = {
.direction_input = arizona_gpio_direction_in,
.get = arizona_gpio_get,
.direction_output = arizona_gpio_direction_out,
- .set_rv = arizona_gpio_set,
+ .set = arizona_gpio_set,
.can_sleep = true,
};
diff --git a/drivers/gpio/gpio-aspeed-sgpio.c b/drivers/gpio/gpio-aspeed-sgpio.c
index 00b31497ecff..7622f9e9f54a 100644
--- a/drivers/gpio/gpio-aspeed-sgpio.c
+++ b/drivers/gpio/gpio-aspeed-sgpio.c
@@ -596,7 +596,7 @@ static int __init aspeed_sgpio_probe(struct platform_device *pdev)
gpio->chip.request = NULL;
gpio->chip.free = NULL;
gpio->chip.get = aspeed_sgpio_get;
- gpio->chip.set_rv = aspeed_sgpio_set;
+ gpio->chip.set = aspeed_sgpio_set;
gpio->chip.set_config = aspeed_sgpio_set_config;
gpio->chip.label = dev_name(&pdev->dev);
gpio->chip.base = -1;
diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
index 2d340a343a17..cbdf781994dc 100644
--- a/drivers/gpio/gpio-aspeed.c
+++ b/drivers/gpio/gpio-aspeed.c
@@ -5,6 +5,7 @@
* Joel Stanley <joel@jms.id.au>
*/
+#include <linux/bitfield.h>
#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/gpio/aspeed.h>
@@ -24,16 +25,11 @@
/*
* These two headers aren't meant to be used by GPIO drivers. We need
- * them in order to access gpio_chip_hwgpio() which we need to implement
+ * them in order to access gpiod_hwgpio() which we need to implement
* the aspeed specific API which allows the coprocessor to request
* access to some GPIOs and to arbitrate between coprocessor and ARM.
*/
#include <linux/gpio/consumer.h>
-#include "gpiolib.h"
-
-/* Non-constant mask variant of FIELD_GET() and FIELD_PREP() */
-#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
-#define field_prep(_mask, _val) (((_val) << (ffs(_mask) - 1)) & (_mask))
#define GPIO_G7_IRQ_STS_BASE 0x100
#define GPIO_G7_IRQ_STS_OFFSET(x) (GPIO_G7_IRQ_STS_BASE + (x) * 0x4)
@@ -942,7 +938,7 @@ int aspeed_gpio_copro_grab_gpio(struct gpio_desc *desc,
{
struct gpio_chip *chip = gpiod_to_chip(desc);
struct aspeed_gpio *gpio = gpiochip_get_data(chip);
- int rc = 0, bindex, offset = gpio_chip_hwgpio(desc);
+ int rc = 0, bindex, offset = gpiod_hwgpio(desc);
const struct aspeed_gpio_bank *bank = to_bank(offset);
if (!aspeed_gpio_support_copro(gpio))
@@ -987,7 +983,7 @@ int aspeed_gpio_copro_release_gpio(struct gpio_desc *desc)
{
struct gpio_chip *chip = gpiod_to_chip(desc);
struct aspeed_gpio *gpio = gpiochip_get_data(chip);
- int rc = 0, bindex, offset = gpio_chip_hwgpio(desc);
+ int rc = 0, bindex, offset = gpiod_hwgpio(desc);
if (!aspeed_gpio_support_copro(gpio))
return -EOPNOTSUPP;
@@ -1352,7 +1348,7 @@ static int aspeed_gpio_probe(struct platform_device *pdev)
gpio->chip.request = aspeed_gpio_request;
gpio->chip.free = aspeed_gpio_free;
gpio->chip.get = aspeed_gpio_get;
- gpio->chip.set_rv = aspeed_gpio_set;
+ gpio->chip.set = aspeed_gpio_set;
gpio->chip.set_config = aspeed_gpio_set_config;
gpio->chip.label = dev_name(&pdev->dev);
gpio->chip.base = -1;
diff --git a/drivers/gpio/gpio-ath79.c b/drivers/gpio/gpio-ath79.c
index de4cc12e5e03..2ad9f6ac6636 100644
--- a/drivers/gpio/gpio-ath79.c
+++ b/drivers/gpio/gpio-ath79.c
@@ -10,6 +10,7 @@
#include <linux/device.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/mod_devicetable.h>
@@ -28,17 +29,17 @@
#define AR71XX_GPIO_REG_INT_MASK 0x24
struct ath79_gpio_ctrl {
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
void __iomem *base;
- raw_spinlock_t lock;
unsigned long both_edges;
};
static struct ath79_gpio_ctrl *irq_data_to_ath79_gpio(struct irq_data *data)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+ struct gpio_generic_chip *gen_gc = to_gpio_generic_chip(gc);
- return container_of(gc, struct ath79_gpio_ctrl, gc);
+ return container_of(gen_gc, struct ath79_gpio_ctrl, chip);
}
static u32 ath79_gpio_read(struct ath79_gpio_ctrl *ctrl, unsigned reg)
@@ -70,48 +71,43 @@ static void ath79_gpio_irq_unmask(struct irq_data *data)
{
struct ath79_gpio_ctrl *ctrl = irq_data_to_ath79_gpio(data);
u32 mask = BIT(irqd_to_hwirq(data));
- unsigned long flags;
- gpiochip_enable_irq(&ctrl->gc, irqd_to_hwirq(data));
- raw_spin_lock_irqsave(&ctrl->lock, flags);
+ gpiochip_enable_irq(&ctrl->chip.gc, irqd_to_hwirq(data));
+
+ guard(gpio_generic_lock_irqsave)(&ctrl->chip);
+
ath79_gpio_update_bits(ctrl, AR71XX_GPIO_REG_INT_MASK, mask, mask);
- raw_spin_unlock_irqrestore(&ctrl->lock, flags);
}
static void ath79_gpio_irq_mask(struct irq_data *data)
{
struct ath79_gpio_ctrl *ctrl = irq_data_to_ath79_gpio(data);
u32 mask = BIT(irqd_to_hwirq(data));
- unsigned long flags;
- raw_spin_lock_irqsave(&ctrl->lock, flags);
- ath79_gpio_update_bits(ctrl, AR71XX_GPIO_REG_INT_MASK, mask, 0);
- raw_spin_unlock_irqrestore(&ctrl->lock, flags);
- gpiochip_disable_irq(&ctrl->gc, irqd_to_hwirq(data));
+ scoped_guard(gpio_generic_lock_irqsave, &ctrl->chip)
+ ath79_gpio_update_bits(ctrl, AR71XX_GPIO_REG_INT_MASK, mask, 0);
+
+ gpiochip_disable_irq(&ctrl->chip.gc, irqd_to_hwirq(data));
}
static void ath79_gpio_irq_enable(struct irq_data *data)
{
struct ath79_gpio_ctrl *ctrl = irq_data_to_ath79_gpio(data);
u32 mask = BIT(irqd_to_hwirq(data));
- unsigned long flags;
- raw_spin_lock_irqsave(&ctrl->lock, flags);
+ guard(gpio_generic_lock_irqsave)(&ctrl->chip);
ath79_gpio_update_bits(ctrl, AR71XX_GPIO_REG_INT_ENABLE, mask, mask);
ath79_gpio_update_bits(ctrl, AR71XX_GPIO_REG_INT_MASK, mask, mask);
- raw_spin_unlock_irqrestore(&ctrl->lock, flags);
}
static void ath79_gpio_irq_disable(struct irq_data *data)
{
struct ath79_gpio_ctrl *ctrl = irq_data_to_ath79_gpio(data);
u32 mask = BIT(irqd_to_hwirq(data));
- unsigned long flags;
- raw_spin_lock_irqsave(&ctrl->lock, flags);
+ guard(gpio_generic_lock_irqsave)(&ctrl->chip);
ath79_gpio_update_bits(ctrl, AR71XX_GPIO_REG_INT_MASK, mask, 0);
ath79_gpio_update_bits(ctrl, AR71XX_GPIO_REG_INT_ENABLE, mask, 0);
- raw_spin_unlock_irqrestore(&ctrl->lock, flags);
}
static int ath79_gpio_irq_set_type(struct irq_data *data,
@@ -120,7 +116,6 @@ static int ath79_gpio_irq_set_type(struct irq_data *data,
struct ath79_gpio_ctrl *ctrl = irq_data_to_ath79_gpio(data);
u32 mask = BIT(irqd_to_hwirq(data));
u32 type = 0, polarity = 0;
- unsigned long flags;
bool disabled;
switch (flow_type) {
@@ -142,7 +137,7 @@ static int ath79_gpio_irq_set_type(struct irq_data *data,
return -EINVAL;
}
- raw_spin_lock_irqsave(&ctrl->lock, flags);
+ guard(gpio_generic_lock_irqsave)(&ctrl->chip);
if (flow_type == IRQ_TYPE_EDGE_BOTH) {
ctrl->both_edges |= mask;
@@ -167,8 +162,6 @@ static int ath79_gpio_irq_set_type(struct irq_data *data,
ath79_gpio_update_bits(
ctrl, AR71XX_GPIO_REG_INT_ENABLE, mask, mask);
- raw_spin_unlock_irqrestore(&ctrl->lock, flags);
-
return 0;
}
@@ -187,28 +180,27 @@ static void ath79_gpio_irq_handler(struct irq_desc *desc)
{
struct gpio_chip *gc = irq_desc_get_handler_data(desc);
struct irq_chip *irqchip = irq_desc_get_chip(desc);
+ struct gpio_generic_chip *gen_gc = to_gpio_generic_chip(gc);
struct ath79_gpio_ctrl *ctrl =
- container_of(gc, struct ath79_gpio_ctrl, gc);
- unsigned long flags, pending;
+ container_of(gen_gc, struct ath79_gpio_ctrl, chip);
+ unsigned long pending;
u32 both_edges, state;
int irq;
chained_irq_enter(irqchip, desc);
- raw_spin_lock_irqsave(&ctrl->lock, flags);
-
- pending = ath79_gpio_read(ctrl, AR71XX_GPIO_REG_INT_PENDING);
+ scoped_guard(gpio_generic_lock_irqsave, &ctrl->chip) {
+ pending = ath79_gpio_read(ctrl, AR71XX_GPIO_REG_INT_PENDING);
- /* Update the polarity of the both edges irqs */
- both_edges = ctrl->both_edges & pending;
- if (both_edges) {
- state = ath79_gpio_read(ctrl, AR71XX_GPIO_REG_IN);
- ath79_gpio_update_bits(ctrl, AR71XX_GPIO_REG_INT_POLARITY,
- both_edges, ~state);
+ /* Update the polarity of the both edges irqs */
+ both_edges = ctrl->both_edges & pending;
+ if (both_edges) {
+ state = ath79_gpio_read(ctrl, AR71XX_GPIO_REG_IN);
+ ath79_gpio_update_bits(ctrl, AR71XX_GPIO_REG_INT_POLARITY,
+ both_edges, ~state);
+ }
}
- raw_spin_unlock_irqrestore(&ctrl->lock, flags);
-
for_each_set_bit(irq, &pending, gc->ngpio)
generic_handle_domain_irq(gc->irq.domain, irq);
@@ -224,6 +216,7 @@ MODULE_DEVICE_TABLE(of, ath79_gpio_of_match);
static int ath79_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct device *dev = &pdev->dev;
struct ath79_gpio_ctrl *ctrl;
struct gpio_irq_chip *girq;
@@ -252,22 +245,25 @@ static int ath79_gpio_probe(struct platform_device *pdev)
if (IS_ERR(ctrl->base))
return PTR_ERR(ctrl->base);
- raw_spin_lock_init(&ctrl->lock);
- err = bgpio_init(&ctrl->gc, dev, 4,
- ctrl->base + AR71XX_GPIO_REG_IN,
- ctrl->base + AR71XX_GPIO_REG_SET,
- ctrl->base + AR71XX_GPIO_REG_CLEAR,
- oe_inverted ? NULL : ctrl->base + AR71XX_GPIO_REG_OE,
- oe_inverted ? ctrl->base + AR71XX_GPIO_REG_OE : NULL,
- 0);
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = ctrl->base + AR71XX_GPIO_REG_IN,
+ .set = ctrl->base + AR71XX_GPIO_REG_SET,
+ .clr = ctrl->base + AR71XX_GPIO_REG_CLEAR,
+ .dirout = oe_inverted ? NULL : ctrl->base + AR71XX_GPIO_REG_OE,
+ .dirin = oe_inverted ? ctrl->base + AR71XX_GPIO_REG_OE : NULL,
+ };
+
+ err = gpio_generic_chip_init(&ctrl->chip, &config);
if (err) {
- dev_err(dev, "bgpio_init failed\n");
+ dev_err(dev, "failed to initialize generic GPIO chip\n");
return err;
}
/* Optional interrupt setup */
if (device_property_read_bool(dev, "interrupt-controller")) {
- girq = &ctrl->gc.irq;
+ girq = &ctrl->chip.gc.irq;
gpio_irq_chip_set_chip(girq, &ath79_gpio_irqchip);
girq->parent_handler = ath79_gpio_irq_handler;
girq->num_parents = 1;
@@ -280,7 +276,7 @@ static int ath79_gpio_probe(struct platform_device *pdev)
girq->handler = handle_simple_irq;
}
- return devm_gpiochip_add_data(dev, &ctrl->gc, ctrl);
+ return devm_gpiochip_add_data(dev, &ctrl->chip.gc, ctrl);
}
static struct platform_driver ath79_gpio_driver = {
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
index 8f22cb36004d..208b71c59d58 100644
--- a/drivers/gpio/gpio-bcm-kona.c
+++ b/drivers/gpio/gpio-bcm-kona.c
@@ -339,7 +339,7 @@ static const struct gpio_chip template_chip = {
.direction_input = bcm_kona_gpio_direction_input,
.get = bcm_kona_gpio_get,
.direction_output = bcm_kona_gpio_direction_output,
- .set_rv = bcm_kona_gpio_set,
+ .set = bcm_kona_gpio_set,
.set_config = bcm_kona_gpio_set_config,
.to_irq = bcm_kona_gpio_to_irq,
.base = 0,
diff --git a/drivers/gpio/gpio-bd71815.c b/drivers/gpio/gpio-bd71815.c
index 36701500925e..afb18a5a9d79 100644
--- a/drivers/gpio/gpio-bd71815.c
+++ b/drivers/gpio/gpio-bd71815.c
@@ -85,7 +85,7 @@ static const struct gpio_chip bd71815gpo_chip = {
.owner = THIS_MODULE,
.get = bd71815gpo_get,
.get_direction = bd71815gpo_direction_get,
- .set_rv = bd71815gpo_set,
+ .set = bd71815gpo_set,
.set_config = bd71815_gpio_set_config,
.can_sleep = true,
};
diff --git a/drivers/gpio/gpio-bd71828.c b/drivers/gpio/gpio-bd71828.c
index 4ba151e5cf25..e439dbfffc62 100644
--- a/drivers/gpio/gpio-bd71828.c
+++ b/drivers/gpio/gpio-bd71828.c
@@ -109,7 +109,7 @@ static int bd71828_probe(struct platform_device *pdev)
bdgpio->gpio.set_config = bd71828_gpio_set_config;
bdgpio->gpio.can_sleep = true;
bdgpio->gpio.get = bd71828_gpio_get;
- bdgpio->gpio.set_rv = bd71828_gpio_set;
+ bdgpio->gpio.set = bd71828_gpio_set;
bdgpio->gpio.base = -1;
/*
diff --git a/drivers/gpio/gpio-bd9571mwv.c b/drivers/gpio/gpio-bd9571mwv.c
index 8df1361e3e84..7c95bb36511e 100644
--- a/drivers/gpio/gpio-bd9571mwv.c
+++ b/drivers/gpio/gpio-bd9571mwv.c
@@ -88,7 +88,7 @@ static const struct gpio_chip template_chip = {
.direction_input = bd9571mwv_gpio_direction_input,
.direction_output = bd9571mwv_gpio_direction_output,
.get = bd9571mwv_gpio_get,
- .set_rv = bd9571mwv_gpio_set,
+ .set = bd9571mwv_gpio_set,
.base = -1,
.ngpio = 2,
.can_sleep = true,
diff --git a/drivers/gpio/gpio-blzp1600.c b/drivers/gpio/gpio-blzp1600.c
index 055cb296ae54..0f8c826ba876 100644
--- a/drivers/gpio/gpio-blzp1600.c
+++ b/drivers/gpio/gpio-blzp1600.c
@@ -6,6 +6,7 @@
#include <linux/errno.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -36,7 +37,7 @@
struct blzp1600_gpio {
void __iomem *base;
- struct gpio_chip gc;
+ struct gpio_generic_chip gen_gc;
int irq;
};
@@ -76,7 +77,7 @@ static void blzp1600_gpio_irq_mask(struct irq_data *d)
{
struct blzp1600_gpio *chip = get_blzp1600_gpio_from_irq_data(d);
- guard(raw_spinlock_irqsave)(&chip->gc.bgpio_lock);
+ guard(gpio_generic_lock_irqsave)(&chip->gen_gc);
blzp1600_gpio_rmw(chip->base + GPIO_IM_REG, BIT(d->hwirq), 1);
}
@@ -84,7 +85,7 @@ static void blzp1600_gpio_irq_unmask(struct irq_data *d)
{
struct blzp1600_gpio *chip = get_blzp1600_gpio_from_irq_data(d);
- guard(raw_spinlock_irqsave)(&chip->gc.bgpio_lock);
+ guard(gpio_generic_lock_irqsave)(&chip->gen_gc);
blzp1600_gpio_rmw(chip->base + GPIO_IM_REG, BIT(d->hwirq), 0);
}
@@ -99,9 +100,9 @@ static void blzp1600_gpio_irq_enable(struct irq_data *d)
{
struct blzp1600_gpio *chip = get_blzp1600_gpio_from_irq_data(d);
- gpiochip_enable_irq(&chip->gc, irqd_to_hwirq(d));
+ gpiochip_enable_irq(&chip->gen_gc.gc, irqd_to_hwirq(d));
- guard(raw_spinlock_irqsave)(&chip->gc.bgpio_lock);
+ guard(gpio_generic_lock_irqsave)(&chip->gen_gc);
blzp1600_gpio_rmw(chip->base + GPIO_DIR_REG, BIT(d->hwirq), 0);
blzp1600_gpio_rmw(chip->base + GPIO_IEN_REG, BIT(d->hwirq), 1);
}
@@ -110,9 +111,9 @@ static void blzp1600_gpio_irq_disable(struct irq_data *d)
{
struct blzp1600_gpio *chip = get_blzp1600_gpio_from_irq_data(d);
- guard(raw_spinlock_irqsave)(&chip->gc.bgpio_lock);
+ guard(gpio_generic_lock_irqsave)(&chip->gen_gc);
blzp1600_gpio_rmw(chip->base + GPIO_IEN_REG, BIT(d->hwirq), 0);
- gpiochip_disable_irq(&chip->gc, irqd_to_hwirq(d));
+ gpiochip_disable_irq(&chip->gen_gc.gc, irqd_to_hwirq(d));
}
static int blzp1600_gpio_irq_set_type(struct irq_data *d, u32 type)
@@ -121,7 +122,7 @@ static int blzp1600_gpio_irq_set_type(struct irq_data *d, u32 type)
u32 edge_level, single_both, fall_rise;
int mask = BIT(d->hwirq);
- guard(raw_spinlock_irqsave)(&chip->gc.bgpio_lock);
+ guard(gpio_generic_lock_irqsave)(&chip->gen_gc);
edge_level = blzp1600_gpio_read(chip, GPIO_IS_REG);
single_both = blzp1600_gpio_read(chip, GPIO_IBE_REG);
fall_rise = blzp1600_gpio_read(chip, GPIO_IEV_REG);
@@ -186,8 +187,8 @@ static void blzp1600_gpio_irqhandler(struct irq_desc *desc)
chained_irq_enter(irqchip, desc);
irq_status = blzp1600_gpio_read(gpio, GPIO_RIS_REG);
- for_each_set_bit(hwirq, &irq_status, gpio->gc.ngpio)
- generic_handle_domain_irq(gpio->gc.irq.domain, hwirq);
+ for_each_set_bit(hwirq, &irq_status, gpio->gen_gc.gc.ngpio)
+ generic_handle_domain_irq(gpio->gen_gc.gc.irq.domain, hwirq);
chained_irq_exit(irqchip, desc);
}
@@ -197,7 +198,7 @@ static int blzp1600_gpio_set_debounce(struct gpio_chip *gc, unsigned int offset,
{
struct blzp1600_gpio *chip = gpiochip_get_data(gc);
- guard(raw_spinlock_irqsave)(&chip->gc.bgpio_lock);
+ guard(gpio_generic_lock_irqsave)(&chip->gen_gc);
blzp1600_gpio_rmw(chip->base + GPIO_DB_REG, BIT(offset), debounce);
return 0;
@@ -216,6 +217,7 @@ static int blzp1600_gpio_set_config(struct gpio_chip *gc, unsigned int offset, u
static int blzp1600_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct blzp1600_gpio *chip;
struct gpio_chip *gc;
int ret;
@@ -228,14 +230,21 @@ static int blzp1600_gpio_probe(struct platform_device *pdev)
if (IS_ERR(chip->base))
return PTR_ERR(chip->base);
- ret = bgpio_init(&chip->gc, &pdev->dev, 4, chip->base + GPIO_IDATA_REG,
- chip->base + GPIO_SET_REG, chip->base + GPIO_CLR_REG,
- chip->base + GPIO_DIR_REG, NULL, 0);
+ config = (struct gpio_generic_chip_config) {
+ .dev = &pdev->dev,
+ .sz = 4,
+ .dat = chip->base + GPIO_IDATA_REG,
+ .set = chip->base + GPIO_SET_REG,
+ .clr = chip->base + GPIO_CLR_REG,
+ .dirout = chip->base + GPIO_DIR_REG,
+ };
+
+ ret = gpio_generic_chip_init(&chip->gen_gc, &config);
if (ret)
return dev_err_probe(&pdev->dev, ret, "Failed to register generic gpio\n");
/* configure the gpio chip */
- gc = &chip->gc;
+ gc = &chip->gen_gc.gc;
gc->set_config = blzp1600_gpio_set_config;
if (device_property_present(&pdev->dev, "interrupt-controller")) {
diff --git a/drivers/gpio/gpio-brcmstb.c b/drivers/gpio/gpio-brcmstb.c
index e7671bcd5c07..af9287ff5dc4 100644
--- a/drivers/gpio/gpio-brcmstb.c
+++ b/drivers/gpio/gpio-brcmstb.c
@@ -3,6 +3,7 @@
#include <linux/bitops.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/of.h>
#include <linux/module.h>
#include <linux/irqdomain.h>
@@ -37,7 +38,7 @@ enum gio_reg_index {
struct brcmstb_gpio_bank {
struct list_head node;
int id;
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
struct brcmstb_gpio_priv *parent_priv;
u32 width;
u32 wake_active;
@@ -72,19 +73,18 @@ __brcmstb_gpio_get_active_irqs(struct brcmstb_gpio_bank *bank)
{
void __iomem *reg_base = bank->parent_priv->reg_base;
- return bank->gc.read_reg(reg_base + GIO_STAT(bank->id)) &
- bank->gc.read_reg(reg_base + GIO_MASK(bank->id));
+ return gpio_generic_read_reg(&bank->chip, reg_base + GIO_STAT(bank->id)) &
+ gpio_generic_read_reg(&bank->chip, reg_base + GIO_MASK(bank->id));
}
static unsigned long
brcmstb_gpio_get_active_irqs(struct brcmstb_gpio_bank *bank)
{
unsigned long status;
- unsigned long flags;
- raw_spin_lock_irqsave(&bank->gc.bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(&bank->chip);
+
status = __brcmstb_gpio_get_active_irqs(bank);
- raw_spin_unlock_irqrestore(&bank->gc.bgpio_lock, flags);
return status;
}
@@ -92,26 +92,26 @@ brcmstb_gpio_get_active_irqs(struct brcmstb_gpio_bank *bank)
static int brcmstb_gpio_hwirq_to_offset(irq_hw_number_t hwirq,
struct brcmstb_gpio_bank *bank)
{
- return hwirq - bank->gc.offset;
+ return hwirq - bank->chip.gc.offset;
}
static void brcmstb_gpio_set_imask(struct brcmstb_gpio_bank *bank,
unsigned int hwirq, bool enable)
{
- struct gpio_chip *gc = &bank->gc;
struct brcmstb_gpio_priv *priv = bank->parent_priv;
u32 mask = BIT(brcmstb_gpio_hwirq_to_offset(hwirq, bank));
u32 imask;
- unsigned long flags;
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
- imask = gc->read_reg(priv->reg_base + GIO_MASK(bank->id));
+ guard(gpio_generic_lock_irqsave)(&bank->chip);
+
+ imask = gpio_generic_read_reg(&bank->chip,
+ priv->reg_base + GIO_MASK(bank->id));
if (enable)
imask |= mask;
else
imask &= ~mask;
- gc->write_reg(priv->reg_base + GIO_MASK(bank->id), imask);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+ gpio_generic_write_reg(&bank->chip,
+ priv->reg_base + GIO_MASK(bank->id), imask);
}
static int brcmstb_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
@@ -150,7 +150,8 @@ static void brcmstb_gpio_irq_ack(struct irq_data *d)
struct brcmstb_gpio_priv *priv = bank->parent_priv;
u32 mask = BIT(brcmstb_gpio_hwirq_to_offset(d->hwirq, bank));
- gc->write_reg(priv->reg_base + GIO_STAT(bank->id), mask);
+ gpio_generic_write_reg(&bank->chip,
+ priv->reg_base + GIO_STAT(bank->id), mask);
}
static int brcmstb_gpio_irq_set_type(struct irq_data *d, unsigned int type)
@@ -162,7 +163,6 @@ static int brcmstb_gpio_irq_set_type(struct irq_data *d, unsigned int type)
u32 edge_insensitive, iedge_insensitive;
u32 edge_config, iedge_config;
u32 level, ilevel;
- unsigned long flags;
switch (type) {
case IRQ_TYPE_LEVEL_LOW:
@@ -194,23 +194,25 @@ static int brcmstb_gpio_irq_set_type(struct irq_data *d, unsigned int type)
return -EINVAL;
}
- raw_spin_lock_irqsave(&bank->gc.bgpio_lock, flags);
-
- iedge_config = bank->gc.read_reg(priv->reg_base +
- GIO_EC(bank->id)) & ~mask;
- iedge_insensitive = bank->gc.read_reg(priv->reg_base +
- GIO_EI(bank->id)) & ~mask;
- ilevel = bank->gc.read_reg(priv->reg_base +
- GIO_LEVEL(bank->id)) & ~mask;
+ guard(gpio_generic_lock_irqsave)(&bank->chip);
+
+ iedge_config = gpio_generic_read_reg(&bank->chip,
+ priv->reg_base + GIO_EC(bank->id)) & ~mask;
+ iedge_insensitive = gpio_generic_read_reg(&bank->chip,
+ priv->reg_base + GIO_EI(bank->id)) & ~mask;
+ ilevel = gpio_generic_read_reg(&bank->chip,
+ priv->reg_base + GIO_LEVEL(bank->id)) & ~mask;
+
+ gpio_generic_write_reg(&bank->chip,
+ priv->reg_base + GIO_EC(bank->id),
+ iedge_config | edge_config);
+ gpio_generic_write_reg(&bank->chip,
+ priv->reg_base + GIO_EI(bank->id),
+ iedge_insensitive | edge_insensitive);
+ gpio_generic_write_reg(&bank->chip,
+ priv->reg_base + GIO_LEVEL(bank->id),
+ ilevel | level);
- bank->gc.write_reg(priv->reg_base + GIO_EC(bank->id),
- iedge_config | edge_config);
- bank->gc.write_reg(priv->reg_base + GIO_EI(bank->id),
- iedge_insensitive | edge_insensitive);
- bank->gc.write_reg(priv->reg_base + GIO_LEVEL(bank->id),
- ilevel | level);
-
- raw_spin_unlock_irqrestore(&bank->gc.bgpio_lock, flags);
return 0;
}
@@ -263,7 +265,7 @@ static void brcmstb_gpio_irq_bank_handler(struct brcmstb_gpio_bank *bank)
{
struct brcmstb_gpio_priv *priv = bank->parent_priv;
struct irq_domain *domain = priv->irq_domain;
- int hwbase = bank->gc.offset;
+ int hwbase = bank->chip.gc.offset;
unsigned long status;
while ((status = brcmstb_gpio_get_active_irqs(bank))) {
@@ -303,7 +305,7 @@ static struct brcmstb_gpio_bank *brcmstb_gpio_hwirq_to_bank(
/* banks are in descending order */
list_for_each_entry_reverse(bank, &priv->bank_list, node) {
- i += bank->gc.ngpio;
+ i += bank->chip.gc.ngpio;
if (hwirq < i)
return bank;
}
@@ -332,7 +334,7 @@ static int brcmstb_gpio_irq_map(struct irq_domain *d, unsigned int irq,
dev_dbg(&pdev->dev, "Mapping irq %d for gpio line %d (bank %d)\n",
irq, (int)hwirq, bank->id);
- ret = irq_set_chip_data(irq, &bank->gc);
+ ret = irq_set_chip_data(irq, &bank->chip.gc);
if (ret < 0)
return ret;
irq_set_lockdep_class(irq, &brcmstb_gpio_irq_lock_class,
@@ -394,7 +396,7 @@ static void brcmstb_gpio_remove(struct platform_device *pdev)
* more important to actually perform all of the steps.
*/
list_for_each_entry(bank, &priv->bank_list, node)
- gpiochip_remove(&bank->gc);
+ gpiochip_remove(&bank->chip.gc);
}
static int brcmstb_gpio_of_xlate(struct gpio_chip *gc,
@@ -412,7 +414,7 @@ static int brcmstb_gpio_of_xlate(struct gpio_chip *gc,
if (WARN_ON(gpiospec->args_count < gc->of_gpio_n_cells))
return -EINVAL;
- offset = gpiospec->args[0] - bank->gc.offset;
+ offset = gpiospec->args[0] - bank->chip.gc.offset;
if (offset >= gc->ngpio || offset < 0)
return -EINVAL;
@@ -436,10 +438,8 @@ static int brcmstb_gpio_irq_setup(struct platform_device *pdev,
struct device_node *np = dev->of_node;
int err;
- priv->irq_domain =
- irq_domain_create_linear(of_fwnode_handle(np), priv->num_gpios,
- &brcmstb_gpio_irq_domain_ops,
- priv);
+ priv->irq_domain = irq_domain_create_linear(dev_fwnode(dev), priv->num_gpios,
+ &brcmstb_gpio_irq_domain_ops, priv);
if (!priv->irq_domain) {
dev_err(dev, "Couldn't allocate IRQ domain\n");
return -ENXIO;
@@ -495,19 +495,17 @@ out_free_domain:
static void brcmstb_gpio_bank_save(struct brcmstb_gpio_priv *priv,
struct brcmstb_gpio_bank *bank)
{
- struct gpio_chip *gc = &bank->gc;
unsigned int i;
for (i = 0; i < GIO_REG_STAT; i++)
- bank->saved_regs[i] = gc->read_reg(priv->reg_base +
- GIO_BANK_OFF(bank->id, i));
+ bank->saved_regs[i] = gpio_generic_read_reg(&bank->chip,
+ priv->reg_base + GIO_BANK_OFF(bank->id, i));
}
static void brcmstb_gpio_quiesce(struct device *dev, bool save)
{
struct brcmstb_gpio_priv *priv = dev_get_drvdata(dev);
struct brcmstb_gpio_bank *bank;
- struct gpio_chip *gc;
u32 imask;
/* disable non-wake interrupt */
@@ -515,8 +513,6 @@ static void brcmstb_gpio_quiesce(struct device *dev, bool save)
disable_irq(priv->parent_irq);
list_for_each_entry(bank, &priv->bank_list, node) {
- gc = &bank->gc;
-
if (save)
brcmstb_gpio_bank_save(priv, bank);
@@ -525,8 +521,9 @@ static void brcmstb_gpio_quiesce(struct device *dev, bool save)
imask = bank->wake_active;
else
imask = 0;
- gc->write_reg(priv->reg_base + GIO_MASK(bank->id),
- imask);
+ gpio_generic_write_reg(&bank->chip,
+ priv->reg_base + GIO_MASK(bank->id),
+ imask);
}
}
@@ -536,16 +533,15 @@ static void brcmstb_gpio_shutdown(struct platform_device *pdev)
brcmstb_gpio_quiesce(&pdev->dev, false);
}
-#ifdef CONFIG_PM_SLEEP
static void brcmstb_gpio_bank_restore(struct brcmstb_gpio_priv *priv,
struct brcmstb_gpio_bank *bank)
{
- struct gpio_chip *gc = &bank->gc;
unsigned int i;
for (i = 0; i < GIO_REG_STAT; i++)
- gc->write_reg(priv->reg_base + GIO_BANK_OFF(bank->id, i),
- bank->saved_regs[i]);
+ gpio_generic_write_reg(&bank->chip,
+ priv->reg_base + GIO_BANK_OFF(bank->id, i),
+ bank->saved_regs[i]);
}
static int brcmstb_gpio_suspend(struct device *dev)
@@ -575,18 +571,14 @@ static int brcmstb_gpio_resume(struct device *dev)
return 0;
}
-#else
-#define brcmstb_gpio_suspend NULL
-#define brcmstb_gpio_resume NULL
-#endif /* CONFIG_PM_SLEEP */
-
static const struct dev_pm_ops brcmstb_gpio_pm_ops = {
- .suspend_noirq = brcmstb_gpio_suspend,
- .resume_noirq = brcmstb_gpio_resume,
+ .suspend_noirq = pm_sleep_ptr(brcmstb_gpio_suspend),
+ .resume_noirq = pm_sleep_ptr(brcmstb_gpio_resume),
};
static int brcmstb_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
void __iomem *reg_base;
@@ -632,7 +624,7 @@ static int brcmstb_gpio_probe(struct platform_device *pdev)
* else leave I/O in little endian mode.
*/
#if defined(CONFIG_MIPS) && defined(__BIG_ENDIAN)
- flags = BGPIOF_BIG_ENDIAN_BYTE_ORDER;
+ flags = GPIO_GENERIC_BIG_ENDIAN_BYTE_ORDER;
#endif
of_property_for_each_u32(np, "brcm,gpio-bank-widths", bank_width) {
@@ -667,17 +659,24 @@ static int brcmstb_gpio_probe(struct platform_device *pdev)
bank->width = bank_width;
}
+ gc = &bank->chip.gc;
+
/*
* Regs are 4 bytes wide, have data reg, no set/clear regs,
* and direction bits have 0 = output and 1 = input
*/
- gc = &bank->gc;
- err = bgpio_init(gc, dev, 4,
- reg_base + GIO_DATA(bank->id),
- NULL, NULL, NULL,
- reg_base + GIO_IODIR(bank->id), flags);
+
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = reg_base + GIO_DATA(bank->id),
+ .dirin = reg_base + GIO_IODIR(bank->id),
+ .flags = flags,
+ };
+
+ err = gpio_generic_chip_init(&bank->chip, &config);
if (err) {
- dev_err(dev, "bgpio_init() failed\n");
+ dev_err(dev, "failed to initialize generic GPIO chip\n");
goto fail;
}
@@ -702,7 +701,8 @@ static int brcmstb_gpio_probe(struct platform_device *pdev)
* be retained from S5 cold boot
*/
need_wakeup_event |= !!__brcmstb_gpio_get_active_irqs(bank);
- gc->write_reg(reg_base + GIO_MASK(bank->id), 0);
+ gpio_generic_write_reg(&bank->chip,
+ reg_base + GIO_MASK(bank->id), 0);
err = gpiochip_add_data(gc, bank);
if (err) {
@@ -749,7 +749,7 @@ static struct platform_driver brcmstb_gpio_driver = {
.driver = {
.name = "brcmstb-gpio",
.of_match_table = brcmstb_gpio_of_match,
- .pm = &brcmstb_gpio_pm_ops,
+ .pm = pm_sleep_ptr(&brcmstb_gpio_pm_ops),
},
.probe = brcmstb_gpio_probe,
.remove = brcmstb_gpio_remove,
diff --git a/drivers/gpio/gpio-bt8xx.c b/drivers/gpio/gpio-bt8xx.c
index 7c9e81fea37a..324eeb77dbd5 100644
--- a/drivers/gpio/gpio-bt8xx.c
+++ b/drivers/gpio/gpio-bt8xx.c
@@ -52,10 +52,8 @@ struct bt8xxgpio {
struct pci_dev *pdev;
struct gpio_chip gpio;
-#ifdef CONFIG_PM
u32 saved_outen;
u32 saved_data;
-#endif
};
#define bgwrite(dat, adr) writel((dat), bg->mmio+(adr))
@@ -145,7 +143,7 @@ static void bt8xxgpio_gpio_setup(struct bt8xxgpio *bg)
c->direction_input = bt8xxgpio_gpio_direction_input;
c->get = bt8xxgpio_gpio_get;
c->direction_output = bt8xxgpio_gpio_direction_output;
- c->set_rv = bt8xxgpio_gpio_set;
+ c->set = bt8xxgpio_gpio_set;
c->dbg_show = NULL;
c->base = modparam_gpiobase;
c->ngpio = BT8XXGPIO_NR_GPIOS;
@@ -224,9 +222,10 @@ static void bt8xxgpio_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-#ifdef CONFIG_PM
-static int bt8xxgpio_suspend(struct pci_dev *pdev, pm_message_t state)
+
+static int bt8xxgpio_suspend(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct bt8xxgpio *bg = pci_get_drvdata(pdev);
scoped_guard(spinlock_irqsave, &bg->lock) {
@@ -238,23 +237,13 @@ static int bt8xxgpio_suspend(struct pci_dev *pdev, pm_message_t state)
bgwrite(0x0, BT848_GPIO_OUT_EN);
}
- pci_save_state(pdev);
- pci_disable_device(pdev);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
-
return 0;
}
-static int bt8xxgpio_resume(struct pci_dev *pdev)
+static int bt8xxgpio_resume(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct bt8xxgpio *bg = pci_get_drvdata(pdev);
- int err;
-
- pci_set_power_state(pdev, PCI_D0);
- err = pci_enable_device(pdev);
- if (err)
- return err;
- pci_restore_state(pdev);
guard(spinlock_irqsave)(&bg->lock);
@@ -267,10 +256,8 @@ static int bt8xxgpio_resume(struct pci_dev *pdev)
return 0;
}
-#else
-#define bt8xxgpio_suspend NULL
-#define bt8xxgpio_resume NULL
-#endif /* CONFIG_PM */
+
+static DEFINE_SIMPLE_DEV_PM_OPS(bt8xxgpio_pm_ops, bt8xxgpio_suspend, bt8xxgpio_resume);
static const struct pci_device_id bt8xxgpio_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_BROOKTREE, PCI_DEVICE_ID_BT848) },
@@ -286,8 +273,7 @@ static struct pci_driver bt8xxgpio_pci_driver = {
.id_table = bt8xxgpio_pci_tbl,
.probe = bt8xxgpio_probe,
.remove = bt8xxgpio_remove,
- .suspend = bt8xxgpio_suspend,
- .resume = bt8xxgpio_resume,
+ .driver.pm = &bt8xxgpio_pm_ops,
};
module_pci_driver(bt8xxgpio_pci_driver);
diff --git a/drivers/gpio/gpio-cadence.c b/drivers/gpio/gpio-cadence.c
index e9dd2564c54f..b75734ca22dd 100644
--- a/drivers/gpio/gpio-cadence.c
+++ b/drivers/gpio/gpio-cadence.c
@@ -8,9 +8,11 @@
* Boris Brezillon <boris.brezillon@free-electrons.com>
*/
-#include <linux/gpio/driver.h>
+#include <linux/cleanup.h>
#include <linux/clk.h>
+#include <linux/gpio/driver.h>
#include <linux/interrupt.h>
+#include <linux/gpio/generic.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -30,7 +32,7 @@
#define CDNS_GPIO_IRQ_ANY_EDGE 0x2c
struct cdns_gpio_chip {
- struct gpio_chip gc;
+ struct gpio_generic_chip gen_gc;
void __iomem *regs;
u32 bypass_orig;
};
@@ -38,29 +40,24 @@ struct cdns_gpio_chip {
static int cdns_gpio_request(struct gpio_chip *chip, unsigned int offset)
{
struct cdns_gpio_chip *cgpio = gpiochip_get_data(chip);
- unsigned long flags;
- raw_spin_lock_irqsave(&chip->bgpio_lock, flags);
+ guard(gpio_generic_lock)(&cgpio->gen_gc);
iowrite32(ioread32(cgpio->regs + CDNS_GPIO_BYPASS_MODE) & ~BIT(offset),
cgpio->regs + CDNS_GPIO_BYPASS_MODE);
- raw_spin_unlock_irqrestore(&chip->bgpio_lock, flags);
return 0;
}
static void cdns_gpio_free(struct gpio_chip *chip, unsigned int offset)
{
struct cdns_gpio_chip *cgpio = gpiochip_get_data(chip);
- unsigned long flags;
- raw_spin_lock_irqsave(&chip->bgpio_lock, flags);
+ guard(gpio_generic_lock)(&cgpio->gen_gc);
iowrite32(ioread32(cgpio->regs + CDNS_GPIO_BYPASS_MODE) |
(BIT(offset) & cgpio->bypass_orig),
cgpio->regs + CDNS_GPIO_BYPASS_MODE);
-
- raw_spin_unlock_irqrestore(&chip->bgpio_lock, flags);
}
static void cdns_gpio_irq_mask(struct irq_data *d)
@@ -85,13 +82,12 @@ static int cdns_gpio_irq_set_type(struct irq_data *d, unsigned int type)
{
struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
struct cdns_gpio_chip *cgpio = gpiochip_get_data(chip);
- unsigned long flags;
u32 int_value;
u32 int_type;
u32 mask = BIT(d->hwirq);
int ret = 0;
- raw_spin_lock_irqsave(&chip->bgpio_lock, flags);
+ guard(gpio_generic_lock)(&cgpio->gen_gc);
int_value = ioread32(cgpio->regs + CDNS_GPIO_IRQ_VALUE) & ~mask;
int_type = ioread32(cgpio->regs + CDNS_GPIO_IRQ_TYPE) & ~mask;
@@ -108,15 +104,12 @@ static int cdns_gpio_irq_set_type(struct irq_data *d, unsigned int type)
} else if (type == IRQ_TYPE_LEVEL_LOW) {
int_type |= mask;
} else {
- ret = -EINVAL;
- goto err_irq_type;
+ return -EINVAL;
}
iowrite32(int_value, cgpio->regs + CDNS_GPIO_IRQ_VALUE);
iowrite32(int_type, cgpio->regs + CDNS_GPIO_IRQ_TYPE);
-err_irq_type:
- raw_spin_unlock_irqrestore(&chip->bgpio_lock, flags);
return ret;
}
@@ -150,6 +143,7 @@ static const struct irq_chip cdns_gpio_irqchip = {
static int cdns_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config = { };
struct cdns_gpio_chip *cgpio;
int ret, irq;
u32 dir_prev;
@@ -176,32 +170,33 @@ static int cdns_gpio_probe(struct platform_device *pdev)
* gpiochip_lock_as_irq:
* tried to flag a GPIO set as output for IRQ
* Generic GPIO driver stores the direction value internally,
- * so it needs to be changed before bgpio_init() is called.
+ * so it needs to be changed before gpio_generic_chip_init() is called.
*/
dir_prev = ioread32(cgpio->regs + CDNS_GPIO_DIRECTION_MODE);
iowrite32(GENMASK(num_gpios - 1, 0),
cgpio->regs + CDNS_GPIO_DIRECTION_MODE);
- ret = bgpio_init(&cgpio->gc, &pdev->dev, 4,
- cgpio->regs + CDNS_GPIO_INPUT_VALUE,
- cgpio->regs + CDNS_GPIO_OUTPUT_VALUE,
- NULL,
- NULL,
- cgpio->regs + CDNS_GPIO_DIRECTION_MODE,
- BGPIOF_READ_OUTPUT_REG_SET);
+ config.dev = &pdev->dev;
+ config.sz = 4;
+ config.dat = cgpio->regs + CDNS_GPIO_INPUT_VALUE;
+ config.set = cgpio->regs + CDNS_GPIO_OUTPUT_VALUE;
+ config.dirin = cgpio->regs + CDNS_GPIO_DIRECTION_MODE;
+ config.flags = GPIO_GENERIC_READ_OUTPUT_REG_SET;
+
+ ret = gpio_generic_chip_init(&cgpio->gen_gc, &config);
if (ret) {
dev_err(&pdev->dev, "Failed to register generic gpio, %d\n",
ret);
goto err_revert_dir;
}
- cgpio->gc.label = dev_name(&pdev->dev);
- cgpio->gc.ngpio = num_gpios;
- cgpio->gc.parent = &pdev->dev;
- cgpio->gc.base = -1;
- cgpio->gc.owner = THIS_MODULE;
- cgpio->gc.request = cdns_gpio_request;
- cgpio->gc.free = cdns_gpio_free;
+ cgpio->gen_gc.gc.label = dev_name(&pdev->dev);
+ cgpio->gen_gc.gc.ngpio = num_gpios;
+ cgpio->gen_gc.gc.parent = &pdev->dev;
+ cgpio->gen_gc.gc.base = -1;
+ cgpio->gen_gc.gc.owner = THIS_MODULE;
+ cgpio->gen_gc.gc.request = cdns_gpio_request;
+ cgpio->gen_gc.gc.free = cdns_gpio_free;
clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(clk)) {
@@ -218,7 +213,7 @@ static int cdns_gpio_probe(struct platform_device *pdev)
if (irq >= 0) {
struct gpio_irq_chip *girq;
- girq = &cgpio->gc.irq;
+ girq = &cgpio->gen_gc.gc.irq;
gpio_irq_chip_set_chip(girq, &cdns_gpio_irqchip);
girq->parent_handler = cdns_gpio_irq_handler;
girq->num_parents = 1;
@@ -234,7 +229,7 @@ static int cdns_gpio_probe(struct platform_device *pdev)
girq->handler = handle_level_irq;
}
- ret = devm_gpiochip_add_data(&pdev->dev, &cgpio->gc, cgpio);
+ ret = devm_gpiochip_add_data(&pdev->dev, &cgpio->gen_gc.gc, cgpio);
if (ret < 0) {
dev_err(&pdev->dev, "Could not register gpiochip, %d\n", ret);
goto err_revert_dir;
diff --git a/drivers/gpio/gpio-cgbc.c b/drivers/gpio/gpio-cgbc.c
index 1495bec62456..0efa1b61001a 100644
--- a/drivers/gpio/gpio-cgbc.c
+++ b/drivers/gpio/gpio-cgbc.c
@@ -171,7 +171,7 @@ static int cgbc_gpio_probe(struct platform_device *pdev)
chip->direction_output = cgbc_gpio_direction_output;
chip->get_direction = cgbc_gpio_get_direction;
chip->get = cgbc_gpio_get;
- chip->set_rv = cgbc_gpio_set;
+ chip->set = cgbc_gpio_set;
chip->ngpio = CGBC_GPIO_NGPIO;
ret = devm_mutex_init(dev, &gpio->lock);
diff --git a/drivers/gpio/gpio-clps711x.c b/drivers/gpio/gpio-clps711x.c
index d69a24dd4828..24ff2347d599 100644
--- a/drivers/gpio/gpio-clps711x.c
+++ b/drivers/gpio/gpio-clps711x.c
@@ -8,13 +8,15 @@
#include <linux/err.h>
#include <linux/module.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/platform_device.h>
static int clps711x_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config = { };
struct device_node *np = pdev->dev.of_node;
+ struct gpio_generic_chip *gen_gc;
void __iomem *dat, *dir;
- struct gpio_chip *gc;
int err, id;
if (!np)
@@ -24,8 +26,8 @@ static int clps711x_gpio_probe(struct platform_device *pdev)
if ((id < 0) || (id > 4))
return -ENODEV;
- gc = devm_kzalloc(&pdev->dev, sizeof(*gc), GFP_KERNEL);
- if (!gc)
+ gen_gc = devm_kzalloc(&pdev->dev, sizeof(*gen_gc), GFP_KERNEL);
+ if (!gen_gc)
return -ENOMEM;
dat = devm_platform_ioremap_resource(pdev, 0);
@@ -36,35 +38,37 @@ static int clps711x_gpio_probe(struct platform_device *pdev)
if (IS_ERR(dir))
return PTR_ERR(dir);
+ config.dev = &pdev->dev;
+ config.sz = 1;
+ config.dat = dat;
+
switch (id) {
case 3:
/* PORTD is inverted logic for direction register */
- err = bgpio_init(gc, &pdev->dev, 1, dat, NULL, NULL,
- NULL, dir, 0);
+ config.dirin = dir;
break;
default:
- err = bgpio_init(gc, &pdev->dev, 1, dat, NULL, NULL,
- dir, NULL, 0);
+ config.dirout = dir;
break;
}
+ err = gpio_generic_chip_init(gen_gc, &config);
if (err)
return err;
switch (id) {
case 4:
/* PORTE is 3 lines only */
- gc->ngpio = 3;
+ gen_gc->gc.ngpio = 3;
break;
default:
break;
}
- gc->base = -1;
- gc->owner = THIS_MODULE;
- platform_set_drvdata(pdev, gc);
+ gen_gc->gc.base = -1;
+ gen_gc->gc.owner = THIS_MODULE;
- return devm_gpiochip_add_data(&pdev->dev, gc, NULL);
+ return devm_gpiochip_add_data(&pdev->dev, &gen_gc->gc, NULL);
}
static const struct of_device_id clps711x_gpio_ids[] = {
diff --git a/drivers/gpio/gpio-creg-snps.c b/drivers/gpio/gpio-creg-snps.c
index 8b49f02c7896..f8ea961fa1de 100644
--- a/drivers/gpio/gpio-creg-snps.c
+++ b/drivers/gpio/gpio-creg-snps.c
@@ -167,7 +167,7 @@ static int creg_gpio_probe(struct platform_device *pdev)
hcg->gc.label = dev_name(dev);
hcg->gc.base = -1;
hcg->gc.ngpio = ngpios;
- hcg->gc.set_rv = creg_gpio_set;
+ hcg->gc.set = creg_gpio_set;
hcg->gc.direction_output = creg_gpio_dir_out;
ret = devm_gpiochip_add_data(dev, &hcg->gc, hcg);
diff --git a/drivers/gpio/gpio-cros-ec.c b/drivers/gpio/gpio-cros-ec.c
index 53cd5ff6247b..435483826c6e 100644
--- a/drivers/gpio/gpio-cros-ec.c
+++ b/drivers/gpio/gpio-cros-ec.c
@@ -188,7 +188,7 @@ static int cros_ec_gpio_probe(struct platform_device *pdev)
gc->can_sleep = true;
gc->label = dev_name(dev);
gc->base = -1;
- gc->set_rv = cros_ec_gpio_set;
+ gc->set = cros_ec_gpio_set;
gc->get = cros_ec_gpio_get;
gc->get_direction = cros_ec_gpio_get_direction;
diff --git a/drivers/gpio/gpio-crystalcove.c b/drivers/gpio/gpio-crystalcove.c
index 8db7cca3a060..0fb5c06d0886 100644
--- a/drivers/gpio/gpio-crystalcove.c
+++ b/drivers/gpio/gpio-crystalcove.c
@@ -349,7 +349,7 @@ static int crystalcove_gpio_probe(struct platform_device *pdev)
cg->chip.direction_input = crystalcove_gpio_dir_in;
cg->chip.direction_output = crystalcove_gpio_dir_out;
cg->chip.get = crystalcove_gpio_get;
- cg->chip.set_rv = crystalcove_gpio_set;
+ cg->chip.set = crystalcove_gpio_set;
cg->chip.base = -1;
cg->chip.ngpio = CRYSTALCOVE_VGPIO_NUM;
cg->chip.can_sleep = true;
diff --git a/drivers/gpio/gpio-cs5535.c b/drivers/gpio/gpio-cs5535.c
index 143d1f4173a6..8affe4e9f90e 100644
--- a/drivers/gpio/gpio-cs5535.c
+++ b/drivers/gpio/gpio-cs5535.c
@@ -296,7 +296,7 @@ static struct cs5535_gpio_chip cs5535_gpio_chip = {
.request = chip_gpio_request,
.get = chip_gpio_get,
- .set_rv = chip_gpio_set,
+ .set = chip_gpio_set,
.direction_input = chip_direction_input,
.direction_output = chip_direction_output,
diff --git a/drivers/gpio/gpio-da9052.c b/drivers/gpio/gpio-da9052.c
index 6482c5b267db..495f0ee58505 100644
--- a/drivers/gpio/gpio-da9052.c
+++ b/drivers/gpio/gpio-da9052.c
@@ -172,7 +172,7 @@ static const struct gpio_chip reference_gp = {
.label = "da9052-gpio",
.owner = THIS_MODULE,
.get = da9052_gpio_get,
- .set_rv = da9052_gpio_set,
+ .set = da9052_gpio_set,
.direction_input = da9052_gpio_direction_input,
.direction_output = da9052_gpio_direction_output,
.to_irq = da9052_gpio_to_irq,
diff --git a/drivers/gpio/gpio-da9055.c b/drivers/gpio/gpio-da9055.c
index 3d9d0c700100..a09bd6eb93cf 100644
--- a/drivers/gpio/gpio-da9055.c
+++ b/drivers/gpio/gpio-da9055.c
@@ -116,7 +116,7 @@ static const struct gpio_chip reference_gp = {
.label = "da9055-gpio",
.owner = THIS_MODULE,
.get = da9055_gpio_get,
- .set_rv = da9055_gpio_set,
+ .set = da9055_gpio_set,
.direction_input = da9055_gpio_direction_input,
.direction_output = da9055_gpio_direction_output,
.to_irq = da9055_gpio_to_irq,
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index 80a82492171e..538f27209ce7 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -202,7 +202,7 @@ static int davinci_gpio_probe(struct platform_device *pdev)
chips->chip.direction_input = davinci_direction_in;
chips->chip.get = davinci_gpio_get;
chips->chip.direction_output = davinci_direction_out;
- chips->chip.set_rv = davinci_gpio_set;
+ chips->chip.set = davinci_gpio_set;
chips->chip.ngpio = ngpio;
chips->chip.base = -1;
@@ -478,7 +478,7 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
return irq;
}
- irq_domain = irq_domain_create_legacy(of_fwnode_handle(dev->of_node), ngpio, irq, 0,
+ irq_domain = irq_domain_create_legacy(dev_fwnode(dev), ngpio, irq, 0,
&davinci_gpio_irq_ops, chips);
if (!irq_domain) {
dev_err(dev, "Couldn't register an IRQ domain\n");
diff --git a/drivers/gpio/gpio-dln2.c b/drivers/gpio/gpio-dln2.c
index 4bd3c47eaf93..4670ffd7ea7f 100644
--- a/drivers/gpio/gpio-dln2.c
+++ b/drivers/gpio/gpio-dln2.c
@@ -469,7 +469,7 @@ static int dln2_gpio_probe(struct platform_device *pdev)
dln2->gpio.base = -1;
dln2->gpio.ngpio = pins;
dln2->gpio.can_sleep = true;
- dln2->gpio.set_rv = dln2_gpio_set;
+ dln2->gpio.set = dln2_gpio_set;
dln2->gpio.get = dln2_gpio_get;
dln2->gpio.request = dln2_gpio_request;
dln2->gpio.free = dln2_gpio_free;
diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c
index 43b667b41f5d..4986c465c9a8 100644
--- a/drivers/gpio/gpio-dwapb.c
+++ b/drivers/gpio/gpio-dwapb.c
@@ -8,6 +8,7 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -78,7 +79,6 @@ struct dwapb_platform_data {
unsigned int nports;
};
-#ifdef CONFIG_PM_SLEEP
/* Store GPIO context across system-wide suspend/resume transitions */
struct dwapb_context {
u32 data;
@@ -91,7 +91,6 @@ struct dwapb_context {
u32 int_deb;
u32 wake_en;
};
-#endif
struct dwapb_gpio_port_irqchip {
unsigned int nr_irqs;
@@ -99,16 +98,18 @@ struct dwapb_gpio_port_irqchip {
};
struct dwapb_gpio_port {
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
struct dwapb_gpio_port_irqchip *pirq;
struct dwapb_gpio *gpio;
-#ifdef CONFIG_PM_SLEEP
struct dwapb_context *ctx;
-#endif
unsigned int idx;
};
-#define to_dwapb_gpio(_gc) \
- (container_of(_gc, struct dwapb_gpio_port, gc)->gpio)
+
+static inline struct dwapb_gpio *to_dwapb_gpio(struct gpio_chip *gc)
+{
+ return container_of(to_gpio_generic_chip(gc),
+ struct dwapb_gpio_port, chip)->gpio;
+}
struct dwapb_gpio {
struct device *dev;
@@ -148,19 +149,19 @@ static inline u32 gpio_reg_convert(struct dwapb_gpio *gpio, unsigned int offset)
static inline u32 dwapb_read(struct dwapb_gpio *gpio, unsigned int offset)
{
- struct gpio_chip *gc = &gpio->ports[0].gc;
- void __iomem *reg_base = gpio->regs;
+ struct gpio_generic_chip *chip = &gpio->ports[0].chip;
+ void __iomem *reg_base = gpio->regs;
- return gc->read_reg(reg_base + gpio_reg_convert(gpio, offset));
+ return gpio_generic_read_reg(chip, reg_base + gpio_reg_convert(gpio, offset));
}
static inline void dwapb_write(struct dwapb_gpio *gpio, unsigned int offset,
u32 val)
{
- struct gpio_chip *gc = &gpio->ports[0].gc;
- void __iomem *reg_base = gpio->regs;
+ struct gpio_generic_chip *chip = &gpio->ports[0].chip;
+ void __iomem *reg_base = gpio->regs;
- gc->write_reg(reg_base + gpio_reg_convert(gpio, offset), val);
+ gpio_generic_write_reg(chip, reg_base + gpio_reg_convert(gpio, offset), val);
}
static struct dwapb_gpio_port *dwapb_offs_to_port(struct dwapb_gpio *gpio, unsigned int offs)
@@ -186,7 +187,7 @@ static void dwapb_toggle_trigger(struct dwapb_gpio *gpio, unsigned int offs)
if (!port)
return;
- gc = &port->gc;
+ gc = &port->chip.gc;
pol = dwapb_read(gpio, GPIO_INT_POLARITY);
/* Just read the current value right out of the data register */
@@ -201,13 +202,13 @@ static void dwapb_toggle_trigger(struct dwapb_gpio *gpio, unsigned int offs)
static u32 dwapb_do_irq(struct dwapb_gpio *gpio)
{
- struct gpio_chip *gc = &gpio->ports[0].gc;
+ struct gpio_generic_chip *gen_gc = &gpio->ports[0].chip;
unsigned long irq_status;
irq_hw_number_t hwirq;
irq_status = dwapb_read(gpio, GPIO_INTSTATUS);
for_each_set_bit(hwirq, &irq_status, DWAPB_MAX_GPIOS) {
- int gpio_irq = irq_find_mapping(gc->irq.domain, hwirq);
+ int gpio_irq = irq_find_mapping(gen_gc->gc.irq.domain, hwirq);
u32 irq_type = irq_get_trigger_type(gpio_irq);
generic_handle_irq(gpio_irq);
@@ -237,27 +238,27 @@ static irqreturn_t dwapb_irq_handler_mfd(int irq, void *dev_id)
static void dwapb_irq_ack(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct gpio_generic_chip *gen_gc = to_gpio_generic_chip(gc);
struct dwapb_gpio *gpio = to_dwapb_gpio(gc);
u32 val = BIT(irqd_to_hwirq(d));
- unsigned long flags;
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(gen_gc);
+
dwapb_write(gpio, GPIO_PORTA_EOI, val);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
}
static void dwapb_irq_mask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct gpio_generic_chip *gen_gc = to_gpio_generic_chip(gc);
struct dwapb_gpio *gpio = to_dwapb_gpio(gc);
irq_hw_number_t hwirq = irqd_to_hwirq(d);
- unsigned long flags;
u32 val;
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
- val = dwapb_read(gpio, GPIO_INTMASK) | BIT(hwirq);
- dwapb_write(gpio, GPIO_INTMASK, val);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+ scoped_guard(gpio_generic_lock_irqsave, gen_gc) {
+ val = dwapb_read(gpio, GPIO_INTMASK) | BIT(hwirq);
+ dwapb_write(gpio, GPIO_INTMASK, val);
+ }
gpiochip_disable_irq(gc, hwirq);
}
@@ -265,59 +266,61 @@ static void dwapb_irq_mask(struct irq_data *d)
static void dwapb_irq_unmask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct gpio_generic_chip *gen_gc = to_gpio_generic_chip(gc);
struct dwapb_gpio *gpio = to_dwapb_gpio(gc);
irq_hw_number_t hwirq = irqd_to_hwirq(d);
- unsigned long flags;
u32 val;
gpiochip_enable_irq(gc, hwirq);
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(gen_gc);
+
val = dwapb_read(gpio, GPIO_INTMASK) & ~BIT(hwirq);
dwapb_write(gpio, GPIO_INTMASK, val);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
}
static void dwapb_irq_enable(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct gpio_generic_chip *gen_gc = to_gpio_generic_chip(gc);
struct dwapb_gpio *gpio = to_dwapb_gpio(gc);
irq_hw_number_t hwirq = irqd_to_hwirq(d);
- unsigned long flags;
u32 val;
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(gen_gc);
+
val = dwapb_read(gpio, GPIO_INTEN) | BIT(hwirq);
dwapb_write(gpio, GPIO_INTEN, val);
val = dwapb_read(gpio, GPIO_INTMASK) & ~BIT(hwirq);
dwapb_write(gpio, GPIO_INTMASK, val);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
}
static void dwapb_irq_disable(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct gpio_generic_chip *gen_gc = to_gpio_generic_chip(gc);
struct dwapb_gpio *gpio = to_dwapb_gpio(gc);
irq_hw_number_t hwirq = irqd_to_hwirq(d);
- unsigned long flags;
u32 val;
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(gen_gc);
+
val = dwapb_read(gpio, GPIO_INTMASK) | BIT(hwirq);
dwapb_write(gpio, GPIO_INTMASK, val);
val = dwapb_read(gpio, GPIO_INTEN) & ~BIT(hwirq);
dwapb_write(gpio, GPIO_INTEN, val);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
}
static int dwapb_irq_set_type(struct irq_data *d, u32 type)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct gpio_generic_chip *gen_gc = to_gpio_generic_chip(gc);
struct dwapb_gpio *gpio = to_dwapb_gpio(gc);
irq_hw_number_t bit = irqd_to_hwirq(d);
- unsigned long level, polarity, flags;
+ unsigned long level, polarity;
+
+ guard(gpio_generic_lock_irqsave)(gen_gc);
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
level = dwapb_read(gpio, GPIO_INTTYPE_LEVEL);
polarity = dwapb_read(gpio, GPIO_INT_POLARITY);
@@ -352,12 +355,10 @@ static int dwapb_irq_set_type(struct irq_data *d, u32 type)
dwapb_write(gpio, GPIO_INTTYPE_LEVEL, level);
if (type != IRQ_TYPE_EDGE_BOTH)
dwapb_write(gpio, GPIO_INT_POLARITY, polarity);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
return 0;
}
-#ifdef CONFIG_PM_SLEEP
static int dwapb_irq_set_wake(struct irq_data *d, unsigned int enable)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
@@ -372,9 +373,6 @@ static int dwapb_irq_set_wake(struct irq_data *d, unsigned int enable)
return 0;
}
-#else
-#define dwapb_irq_set_wake NULL
-#endif
static const struct irq_chip dwapb_irq_chip = {
.name = DWAPB_DRIVER_NAME,
@@ -384,7 +382,7 @@ static const struct irq_chip dwapb_irq_chip = {
.irq_set_type = dwapb_irq_set_type,
.irq_enable = dwapb_irq_enable,
.irq_disable = dwapb_irq_disable,
- .irq_set_wake = dwapb_irq_set_wake,
+ .irq_set_wake = pm_sleep_ptr(dwapb_irq_set_wake),
.flags = IRQCHIP_IMMUTABLE,
GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
@@ -393,11 +391,12 @@ static int dwapb_gpio_set_debounce(struct gpio_chip *gc,
unsigned offset, unsigned debounce)
{
struct dwapb_gpio_port *port = gpiochip_get_data(gc);
+ struct gpio_generic_chip *gen_gc = to_gpio_generic_chip(gc);
struct dwapb_gpio *gpio = port->gpio;
- unsigned long flags, val_deb;
+ unsigned long val_deb;
unsigned long mask = BIT(offset);
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(gen_gc);
val_deb = dwapb_read(gpio, GPIO_PORTA_DEBOUNCE);
if (debounce)
@@ -406,8 +405,6 @@ static int dwapb_gpio_set_debounce(struct gpio_chip *gc,
val_deb &= ~mask;
dwapb_write(gpio, GPIO_PORTA_DEBOUNCE, val_deb);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
-
return 0;
}
@@ -445,7 +442,7 @@ static void dwapb_configure_irqs(struct dwapb_gpio *gpio,
struct dwapb_port_property *pp)
{
struct dwapb_gpio_port_irqchip *pirq;
- struct gpio_chip *gc = &port->gc;
+ struct gpio_chip *gc = &port->chip.gc;
struct gpio_irq_chip *girq;
int err;
@@ -501,6 +498,7 @@ static int dwapb_gpio_add_port(struct dwapb_gpio *gpio,
struct dwapb_port_property *pp,
unsigned int offs)
{
+ struct gpio_generic_chip_config config;
struct dwapb_gpio_port *port;
void __iomem *dat, *set, *dirout;
int err;
@@ -519,32 +517,39 @@ static int dwapb_gpio_add_port(struct dwapb_gpio *gpio,
set = gpio->regs + GPIO_SWPORTA_DR + pp->idx * GPIO_SWPORT_DR_STRIDE;
dirout = gpio->regs + GPIO_SWPORTA_DDR + pp->idx * GPIO_SWPORT_DDR_STRIDE;
+ config = (struct gpio_generic_chip_config) {
+ .dev = gpio->dev,
+ .sz = 4,
+ .dat = dat,
+ .set = set,
+ .dirout = dirout,
+ };
+
/* This registers 32 GPIO lines per port */
- err = bgpio_init(&port->gc, gpio->dev, 4, dat, set, NULL, dirout,
- NULL, 0);
+ err = gpio_generic_chip_init(&port->chip, &config);
if (err) {
dev_err(gpio->dev, "failed to init gpio chip for port%d\n",
port->idx);
return err;
}
- port->gc.fwnode = pp->fwnode;
- port->gc.ngpio = pp->ngpio;
- port->gc.base = pp->gpio_base;
- port->gc.request = gpiochip_generic_request;
- port->gc.free = gpiochip_generic_free;
+ port->chip.gc.fwnode = pp->fwnode;
+ port->chip.gc.ngpio = pp->ngpio;
+ port->chip.gc.base = pp->gpio_base;
+ port->chip.gc.request = gpiochip_generic_request;
+ port->chip.gc.free = gpiochip_generic_free;
/* Only port A support debounce */
if (pp->idx == 0)
- port->gc.set_config = dwapb_gpio_set_config;
+ port->chip.gc.set_config = dwapb_gpio_set_config;
else
- port->gc.set_config = gpiochip_generic_config;
+ port->chip.gc.set_config = gpiochip_generic_config;
/* Only port A can provide interrupts in all configurations of the IP */
if (pp->idx == 0)
dwapb_configure_irqs(gpio, port, pp);
- err = devm_gpiochip_add_data(gpio->dev, &port->gc, port);
+ err = devm_gpiochip_add_data(gpio->dev, &port->chip.gc, port);
if (err) {
dev_err(gpio->dev, "failed to register gpiochip for port%d\n",
port->idx);
@@ -746,42 +751,40 @@ static int dwapb_gpio_probe(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
static int dwapb_gpio_suspend(struct device *dev)
{
struct dwapb_gpio *gpio = dev_get_drvdata(dev);
- struct gpio_chip *gc = &gpio->ports[0].gc;
- unsigned long flags;
+ struct gpio_generic_chip *gen_gc = &gpio->ports[0].chip;
int i;
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
- for (i = 0; i < gpio->nr_ports; i++) {
- unsigned int offset;
- unsigned int idx = gpio->ports[i].idx;
- struct dwapb_context *ctx = gpio->ports[i].ctx;
+ scoped_guard(gpio_generic_lock_irqsave, gen_gc) {
+ for (i = 0; i < gpio->nr_ports; i++) {
+ unsigned int offset;
+ unsigned int idx = gpio->ports[i].idx;
+ struct dwapb_context *ctx = gpio->ports[i].ctx;
- offset = GPIO_SWPORTA_DDR + idx * GPIO_SWPORT_DDR_STRIDE;
- ctx->dir = dwapb_read(gpio, offset);
+ offset = GPIO_SWPORTA_DDR + idx * GPIO_SWPORT_DDR_STRIDE;
+ ctx->dir = dwapb_read(gpio, offset);
- offset = GPIO_SWPORTA_DR + idx * GPIO_SWPORT_DR_STRIDE;
- ctx->data = dwapb_read(gpio, offset);
+ offset = GPIO_SWPORTA_DR + idx * GPIO_SWPORT_DR_STRIDE;
+ ctx->data = dwapb_read(gpio, offset);
- offset = GPIO_EXT_PORTA + idx * GPIO_EXT_PORT_STRIDE;
- ctx->ext = dwapb_read(gpio, offset);
+ offset = GPIO_EXT_PORTA + idx * GPIO_EXT_PORT_STRIDE;
+ ctx->ext = dwapb_read(gpio, offset);
- /* Only port A can provide interrupts */
- if (idx == 0) {
- ctx->int_mask = dwapb_read(gpio, GPIO_INTMASK);
- ctx->int_en = dwapb_read(gpio, GPIO_INTEN);
- ctx->int_pol = dwapb_read(gpio, GPIO_INT_POLARITY);
- ctx->int_type = dwapb_read(gpio, GPIO_INTTYPE_LEVEL);
- ctx->int_deb = dwapb_read(gpio, GPIO_PORTA_DEBOUNCE);
-
- /* Mask out interrupts */
- dwapb_write(gpio, GPIO_INTMASK, ~ctx->wake_en);
+ /* Only port A can provide interrupts */
+ if (idx == 0) {
+ ctx->int_mask = dwapb_read(gpio, GPIO_INTMASK);
+ ctx->int_en = dwapb_read(gpio, GPIO_INTEN);
+ ctx->int_pol = dwapb_read(gpio, GPIO_INT_POLARITY);
+ ctx->int_type = dwapb_read(gpio, GPIO_INTTYPE_LEVEL);
+ ctx->int_deb = dwapb_read(gpio, GPIO_PORTA_DEBOUNCE);
+
+ /* Mask out interrupts */
+ dwapb_write(gpio, GPIO_INTMASK, ~ctx->wake_en);
+ }
}
}
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
clk_bulk_disable_unprepare(DWAPB_NR_CLOCKS, gpio->clks);
@@ -791,8 +794,8 @@ static int dwapb_gpio_suspend(struct device *dev)
static int dwapb_gpio_resume(struct device *dev)
{
struct dwapb_gpio *gpio = dev_get_drvdata(dev);
- struct gpio_chip *gc = &gpio->ports[0].gc;
- unsigned long flags;
+ struct gpio_chip *gc = &gpio->ports[0].chip.gc;
+ struct gpio_generic_chip *gen_gc = to_gpio_generic_chip(gc);
int i, err;
err = clk_bulk_prepare_enable(DWAPB_NR_CLOCKS, gpio->clks);
@@ -801,7 +804,8 @@ static int dwapb_gpio_resume(struct device *dev)
return err;
}
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(gen_gc);
+
for (i = 0; i < gpio->nr_ports; i++) {
unsigned int offset;
unsigned int idx = gpio->ports[i].idx;
@@ -828,19 +832,17 @@ static int dwapb_gpio_resume(struct device *dev)
dwapb_write(gpio, GPIO_PORTA_EOI, 0xffffffff);
}
}
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
return 0;
}
-#endif
-static SIMPLE_DEV_PM_OPS(dwapb_gpio_pm_ops, dwapb_gpio_suspend,
- dwapb_gpio_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(dwapb_gpio_pm_ops,
+ dwapb_gpio_suspend, dwapb_gpio_resume);
static struct platform_driver dwapb_gpio_driver = {
.driver = {
.name = DWAPB_DRIVER_NAME,
- .pm = &dwapb_gpio_pm_ops,
+ .pm = pm_sleep_ptr(&dwapb_gpio_pm_ops),
.of_match_table = dwapb_of_match,
.acpi_match_table = dwapb_acpi_match,
},
diff --git a/drivers/gpio/gpio-eic-sprd.c b/drivers/gpio/gpio-eic-sprd.c
index f2973d0b7138..50fafeda8d7e 100644
--- a/drivers/gpio/gpio-eic-sprd.c
+++ b/drivers/gpio/gpio-eic-sprd.c
@@ -663,7 +663,7 @@ static int sprd_eic_probe(struct platform_device *pdev)
sprd_eic->chip.request = sprd_eic_request;
sprd_eic->chip.free = sprd_eic_free;
sprd_eic->chip.set_config = sprd_eic_set_config;
- sprd_eic->chip.set_rv = sprd_eic_set;
+ sprd_eic->chip.set = sprd_eic_set;
fallthrough;
case SPRD_EIC_ASYNC:
case SPRD_EIC_SYNC:
diff --git a/drivers/gpio/gpio-elkhartlake.c b/drivers/gpio/gpio-elkhartlake.c
index 95de52d2cc63..b96e7928b6e5 100644
--- a/drivers/gpio/gpio-elkhartlake.c
+++ b/drivers/gpio/gpio-elkhartlake.c
@@ -2,43 +2,46 @@
/*
* Intel Elkhart Lake PSE GPIO driver
*
- * Copyright (c) 2023 Intel Corporation.
+ * Copyright (c) 2023, 2025 Intel Corporation.
*
* Authors: Pandith N <pandith.n@intel.com>
* Raag Jadav <raag.jadav@intel.com>
*/
+#include <linux/auxiliary_bus.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/module.h>
-#include <linux/platform_device.h>
#include <linux/pm.h>
+#include <linux/ehl_pse_io_aux.h>
+
#include "gpio-tangier.h"
/* Each Intel EHL PSE GPIO Controller has 30 GPIO pins */
#define EHL_PSE_NGPIO 30
-static int ehl_gpio_probe(struct platform_device *pdev)
+static int ehl_gpio_probe(struct auxiliary_device *adev, const struct auxiliary_device_id *id)
{
- struct device *dev = &pdev->dev;
+ struct device *dev = &adev->dev;
+ struct ehl_pse_io_data *data;
struct tng_gpio *priv;
- int irq, ret;
+ int ret;
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
+ data = dev_get_platdata(dev);
+ if (!data)
+ return -ENODATA;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- priv->reg_base = devm_platform_ioremap_resource(pdev, 0);
+ priv->reg_base = devm_ioremap_resource(dev, &data->mem);
if (IS_ERR(priv->reg_base))
return PTR_ERR(priv->reg_base);
priv->dev = dev;
- priv->irq = irq;
+ priv->irq = data->irq;
priv->info.base = -1;
priv->info.ngpio = EHL_PSE_NGPIO;
@@ -51,25 +54,24 @@ static int ehl_gpio_probe(struct platform_device *pdev)
if (ret)
return dev_err_probe(dev, ret, "tng_gpio_probe error\n");
- platform_set_drvdata(pdev, priv);
+ auxiliary_set_drvdata(adev, priv);
return 0;
}
-static const struct platform_device_id ehl_gpio_ids[] = {
- { "gpio-elkhartlake" },
+static const struct auxiliary_device_id ehl_gpio_ids[] = {
+ { EHL_PSE_IO_NAME "." EHL_PSE_GPIO_NAME },
{ }
};
-MODULE_DEVICE_TABLE(platform, ehl_gpio_ids);
+MODULE_DEVICE_TABLE(auxiliary, ehl_gpio_ids);
-static struct platform_driver ehl_gpio_driver = {
+static struct auxiliary_driver ehl_gpio_driver = {
.driver = {
- .name = "gpio-elkhartlake",
.pm = pm_sleep_ptr(&tng_gpio_pm_ops),
},
.probe = ehl_gpio_probe,
.id_table = ehl_gpio_ids,
};
-module_platform_driver(ehl_gpio_driver);
+module_auxiliary_driver(ehl_gpio_driver);
MODULE_AUTHOR("Pandith N <pandith.n@intel.com>");
MODULE_AUTHOR("Raag Jadav <raag.jadav@intel.com>");
diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c
index a5e6e446f39c..a214b0672726 100644
--- a/drivers/gpio/gpio-em.c
+++ b/drivers/gpio/gpio-em.c
@@ -306,7 +306,7 @@ static int em_gio_probe(struct platform_device *pdev)
gpio_chip->direction_input = em_gio_direction_input;
gpio_chip->get = em_gio_get;
gpio_chip->direction_output = em_gio_direction_output;
- gpio_chip->set_rv = em_gio_set;
+ gpio_chip->set = em_gio_set;
gpio_chip->to_irq = em_gio_to_irq;
gpio_chip->request = pinctrl_gpio_request;
gpio_chip->free = em_gio_free;
@@ -325,8 +325,7 @@ static int em_gio_probe(struct platform_device *pdev)
irq_chip->irq_release_resources = em_gio_irq_relres;
irq_chip->flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND;
- p->irq_domain = irq_domain_create_simple(of_fwnode_handle(dev->of_node),
- ngpios, 0,
+ p->irq_domain = irq_domain_create_simple(dev_fwnode(dev), ngpios, 0,
&em_gio_irq_domain_ops, p);
if (!p->irq_domain) {
dev_err(dev, "cannot initialize irq domain\n");
diff --git a/drivers/gpio/gpio-en7523.c b/drivers/gpio/gpio-en7523.c
index 69834db2c1cf..cf47afc578a9 100644
--- a/drivers/gpio/gpio-en7523.c
+++ b/drivers/gpio/gpio-en7523.c
@@ -4,6 +4,7 @@
#include <linux/io.h>
#include <linux/bits.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -13,28 +14,23 @@
/**
* struct airoha_gpio_ctrl - Airoha GPIO driver data
- * @gc: Associated gpio_chip instance.
+ * @gen_gc: Associated gpio_generic_chip instance.
* @data: The data register.
* @dir: [0] The direction register for the lower 16 pins.
* [1]: The direction register for the higher 16 pins.
* @output: The output enable register.
*/
struct airoha_gpio_ctrl {
- struct gpio_chip gc;
+ struct gpio_generic_chip gen_gc;
void __iomem *data;
void __iomem *dir[2];
void __iomem *output;
};
-static struct airoha_gpio_ctrl *gc_to_ctrl(struct gpio_chip *gc)
-{
- return container_of(gc, struct airoha_gpio_ctrl, gc);
-}
-
static int airoha_dir_set(struct gpio_chip *gc, unsigned int gpio,
int val, int out)
{
- struct airoha_gpio_ctrl *ctrl = gc_to_ctrl(gc);
+ struct airoha_gpio_ctrl *ctrl = gpiochip_get_data(gc);
u32 dir = ioread32(ctrl->dir[gpio / 16]);
u32 output = ioread32(ctrl->output);
u32 mask = BIT((gpio % 16) * 2);
@@ -50,7 +46,7 @@ static int airoha_dir_set(struct gpio_chip *gc, unsigned int gpio,
iowrite32(dir, ctrl->dir[gpio / 16]);
if (out)
- gc->set(gc, gpio, val);
+ gpio_generic_chip_set(&ctrl->gen_gc, gpio, val);
iowrite32(output, ctrl->output);
@@ -70,7 +66,7 @@ static int airoha_dir_in(struct gpio_chip *gc, unsigned int gpio)
static int airoha_get_dir(struct gpio_chip *gc, unsigned int gpio)
{
- struct airoha_gpio_ctrl *ctrl = gc_to_ctrl(gc);
+ struct airoha_gpio_ctrl *ctrl = gpiochip_get_data(gc);
u32 dir = ioread32(ctrl->dir[gpio / 16]);
u32 mask = BIT((gpio % 16) * 2);
@@ -79,6 +75,7 @@ static int airoha_get_dir(struct gpio_chip *gc, unsigned int gpio)
static int airoha_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config = { };
struct device *dev = &pdev->dev;
struct airoha_gpio_ctrl *ctrl;
int err;
@@ -103,18 +100,21 @@ static int airoha_gpio_probe(struct platform_device *pdev)
if (IS_ERR(ctrl->output))
return PTR_ERR(ctrl->output);
- err = bgpio_init(&ctrl->gc, dev, 4, ctrl->data, NULL,
- NULL, NULL, NULL, 0);
+ config.dev = dev;
+ config.sz = 4;
+ config.dat = ctrl->data;
+
+ err = gpio_generic_chip_init(&ctrl->gen_gc, &config);
if (err)
return dev_err_probe(dev, err, "unable to init generic GPIO");
- ctrl->gc.ngpio = AIROHA_GPIO_MAX;
- ctrl->gc.owner = THIS_MODULE;
- ctrl->gc.direction_output = airoha_dir_out;
- ctrl->gc.direction_input = airoha_dir_in;
- ctrl->gc.get_direction = airoha_get_dir;
+ ctrl->gen_gc.gc.ngpio = AIROHA_GPIO_MAX;
+ ctrl->gen_gc.gc.owner = THIS_MODULE;
+ ctrl->gen_gc.gc.direction_output = airoha_dir_out;
+ ctrl->gen_gc.gc.direction_input = airoha_dir_in;
+ ctrl->gen_gc.gc.get_direction = airoha_get_dir;
- return devm_gpiochip_add_data(dev, &ctrl->gc, ctrl);
+ return devm_gpiochip_add_data(dev, &ctrl->gen_gc.gc, ctrl);
}
static const struct of_device_id airoha_gpio_of_match[] = {
diff --git a/drivers/gpio/gpio-ep93xx.c b/drivers/gpio/gpio-ep93xx.c
index 58d2464c07bc..1f56e44ffc9a 100644
--- a/drivers/gpio/gpio-ep93xx.c
+++ b/drivers/gpio/gpio-ep93xx.c
@@ -9,16 +9,17 @@
* linux/arch/arm/mach-ep93xx/core.c
*/
+#include <linux/bitops.h>
+#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
-#include <linux/slab.h>
-#include <linux/gpio/driver.h>
-#include <linux/bitops.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
#include <linux/seq_file.h>
+#include <linux/slab.h>
struct ep93xx_gpio_irq_chip {
void __iomem *base;
@@ -31,11 +32,14 @@ struct ep93xx_gpio_irq_chip {
struct ep93xx_gpio_chip {
void __iomem *base;
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
struct ep93xx_gpio_irq_chip *eic;
};
-#define to_ep93xx_gpio_chip(x) container_of(x, struct ep93xx_gpio_chip, gc)
+static struct ep93xx_gpio_chip *to_ep93xx_gpio_chip(struct gpio_chip *gc)
+{
+ return container_of(to_gpio_generic_chip(gc), struct ep93xx_gpio_chip, chip);
+}
static struct ep93xx_gpio_irq_chip *to_ep93xx_gpio_irq_chip(struct gpio_chip *gc)
{
@@ -267,7 +271,7 @@ static const struct irq_chip gpio_eic_irq_chip = {
static int ep93xx_setup_irqs(struct platform_device *pdev,
struct ep93xx_gpio_chip *egc)
{
- struct gpio_chip *gc = &egc->gc;
+ struct gpio_chip *gc = &egc->chip.gc;
struct device *dev = &pdev->dev;
struct gpio_irq_chip *girq = &gc->irq;
int ret, irq, i;
@@ -327,6 +331,7 @@ static int ep93xx_setup_irqs(struct platform_device *pdev,
static int ep93xx_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct ep93xx_gpio_chip *egc;
struct gpio_chip *gc;
void __iomem *data;
@@ -345,8 +350,16 @@ static int ep93xx_gpio_probe(struct platform_device *pdev)
if (IS_ERR(dir))
return PTR_ERR(dir);
- gc = &egc->gc;
- ret = bgpio_init(gc, &pdev->dev, 1, data, NULL, NULL, dir, NULL, 0);
+ gc = &egc->chip.gc;
+
+ config = (struct gpio_generic_chip_config) {
+ .dev = &pdev->dev,
+ .sz = 1,
+ .dat = data,
+ .dirout = dir,
+ };
+
+ ret = gpio_generic_chip_init(&egc->chip, &config);
if (ret)
return dev_err_probe(&pdev->dev, ret, "unable to init generic GPIO\n");
diff --git a/drivers/gpio/gpio-exar.c b/drivers/gpio/gpio-exar.c
index beb98286d13e..9053662f1817 100644
--- a/drivers/gpio/gpio-exar.c
+++ b/drivers/gpio/gpio-exar.c
@@ -211,7 +211,7 @@ static int gpio_exar_probe(struct platform_device *pdev)
exar_gpio->gpio_chip.direction_input = exar_direction_input;
exar_gpio->gpio_chip.get_direction = exar_get_direction;
exar_gpio->gpio_chip.get = exar_get_value;
- exar_gpio->gpio_chip.set_rv = exar_set_value;
+ exar_gpio->gpio_chip.set = exar_set_value;
exar_gpio->gpio_chip.base = -1;
exar_gpio->gpio_chip.ngpio = ngpios;
exar_gpio->index = index;
diff --git a/drivers/gpio/gpio-f7188x.c b/drivers/gpio/gpio-f7188x.c
index dfcd3634f279..4d5b927ad70f 100644
--- a/drivers/gpio/gpio-f7188x.c
+++ b/drivers/gpio/gpio-f7188x.c
@@ -173,7 +173,7 @@ static int f7188x_gpio_set_config(struct gpio_chip *chip, unsigned offset,
.direction_input = f7188x_gpio_direction_in, \
.get = f7188x_gpio_get, \
.direction_output = f7188x_gpio_direction_out, \
- .set_rv = f7188x_gpio_set, \
+ .set = f7188x_gpio_set, \
.set_config = f7188x_gpio_set_config, \
.base = -1, \
.ngpio = _ngpio, \
diff --git a/drivers/gpio/gpio-ftgpio010.c b/drivers/gpio/gpio-ftgpio010.c
index c35eaa2851d8..11e6907c3b54 100644
--- a/drivers/gpio/gpio-ftgpio010.c
+++ b/drivers/gpio/gpio-ftgpio010.c
@@ -10,12 +10,14 @@
* MXC GPIO support. (c) 2008 Daniel Mack <daniel@caiaq.de>
* Copyright 2008 Juergen Beisert, kernel@pengutronix.de
*/
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
#include <linux/gpio/driver.h>
-#include <linux/io.h>
+#include <linux/gpio/generic.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/platform_device.h>
-#include <linux/bitops.h>
-#include <linux/clk.h>
/* GPIO registers definition */
#define GPIO_DATA_OUT 0x00
@@ -40,13 +42,13 @@
/**
* struct ftgpio_gpio - Gemini GPIO state container
* @dev: containing device for this instance
- * @gc: gpiochip for this instance
+ * @chip: generic GPIO chip for this instance
* @base: remapped I/O-memory base
* @clk: silicon clock
*/
struct ftgpio_gpio {
struct device *dev;
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
void __iomem *base;
struct clk *clk;
};
@@ -233,6 +235,7 @@ static const struct irq_chip ftgpio_irq_chip = {
static int ftgpio_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct device *dev = &pdev->dev;
struct ftgpio_gpio *g;
struct gpio_irq_chip *girq;
@@ -261,27 +264,30 @@ static int ftgpio_gpio_probe(struct platform_device *pdev)
*/
return PTR_ERR(g->clk);
- ret = bgpio_init(&g->gc, dev, 4,
- g->base + GPIO_DATA_IN,
- g->base + GPIO_DATA_SET,
- g->base + GPIO_DATA_CLR,
- g->base + GPIO_DIR,
- NULL,
- 0);
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = g->base + GPIO_DATA_IN,
+ .set = g->base + GPIO_DATA_SET,
+ .clr = g->base + GPIO_DATA_CLR,
+ .dirout = g->base + GPIO_DIR,
+ };
+
+ ret = gpio_generic_chip_init(&g->chip, &config);
if (ret)
return dev_err_probe(dev, ret, "unable to init generic GPIO\n");
- g->gc.label = dev_name(dev);
- g->gc.base = -1;
- g->gc.parent = dev;
- g->gc.owner = THIS_MODULE;
- /* ngpio is set by bgpio_init() */
+ g->chip.gc.label = dev_name(dev);
+ g->chip.gc.base = -1;
+ g->chip.gc.parent = dev;
+ g->chip.gc.owner = THIS_MODULE;
+ /* ngpio is set by gpio_generic_chip_init() */
/* We need a silicon clock to do debounce */
if (!IS_ERR(g->clk))
- g->gc.set_config = ftgpio_gpio_set_config;
+ g->chip.gc.set_config = ftgpio_gpio_set_config;
- girq = &g->gc.irq;
+ girq = &g->chip.gc.irq;
gpio_irq_chip_set_chip(girq, &ftgpio_irq_chip);
girq->parent_handler = ftgpio_gpio_irq_handler;
girq->num_parents = 1;
@@ -302,7 +308,7 @@ static int ftgpio_gpio_probe(struct platform_device *pdev)
/* Clear any use of debounce */
writel(0x0, g->base + GPIO_DEBOUNCE_EN);
- return devm_gpiochip_add_data(dev, &g->gc, g);
+ return devm_gpiochip_add_data(dev, &g->chip.gc, g);
}
static const struct of_device_id ftgpio_gpio_of_match[] = {
diff --git a/drivers/gpio/gpio-fxl6408.c b/drivers/gpio/gpio-fxl6408.c
index 86ebc66b1104..afc1b8461dab 100644
--- a/drivers/gpio/gpio-fxl6408.c
+++ b/drivers/gpio/gpio-fxl6408.c
@@ -123,6 +123,8 @@ static int fxl6408_probe(struct i2c_client *client)
if (ret)
return ret;
+ i2c_set_clientdata(client, gpio_config.regmap);
+
/* Disable High-Z of outputs, so that our OUTPUT updates actually take effect. */
ret = regmap_write(gpio_config.regmap, FXL6408_REG_OUTPUT_HIGH_Z, 0);
if (ret)
@@ -131,6 +133,16 @@ static int fxl6408_probe(struct i2c_client *client)
return PTR_ERR_OR_ZERO(devm_gpio_regmap_register(dev, &gpio_config));
}
+static int fxl6408_resume(struct device *dev)
+{
+ struct regmap *regmap = dev_get_drvdata(dev);
+
+ regcache_mark_dirty(regmap);
+ return regcache_sync(regmap);
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(fxl6408_pm_ops, NULL, fxl6408_resume);
+
static const __maybe_unused struct of_device_id fxl6408_dt_ids[] = {
{ .compatible = "fcs,fxl6408" },
{ }
@@ -146,6 +158,7 @@ MODULE_DEVICE_TABLE(i2c, fxl6408_id);
static struct i2c_driver fxl6408_driver = {
.driver = {
.name = "fxl6408",
+ .pm = pm_sleep_ptr(&fxl6408_pm_ops),
.of_match_table = fxl6408_dt_ids,
},
.probe = fxl6408_probe,
diff --git a/drivers/gpio/gpio-ge.c b/drivers/gpio/gpio-ge.c
index 5dc49648d8e3..66bdff36eb61 100644
--- a/drivers/gpio/gpio-ge.c
+++ b/drivers/gpio/gpio-ge.c
@@ -16,6 +16,7 @@
*/
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/mod_devicetable.h>
@@ -51,24 +52,36 @@ MODULE_DEVICE_TABLE(of, gef_gpio_ids);
static int __init gef_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct device *dev = &pdev->dev;
+ struct gpio_generic_chip *chip;
struct gpio_chip *gc;
void __iomem *regs;
int ret;
- gc = devm_kzalloc(dev, sizeof(*gc), GFP_KERNEL);
- if (!gc)
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
return -ENOMEM;
regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs))
return PTR_ERR(regs);
- ret = bgpio_init(gc, dev, 4, regs + GEF_GPIO_IN, regs + GEF_GPIO_OUT,
- NULL, NULL, regs + GEF_GPIO_DIRECT,
- BGPIOF_BIG_ENDIAN_BYTE_ORDER);
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = regs + GEF_GPIO_IN,
+ .set = regs + GEF_GPIO_OUT,
+ .dirin = regs + GEF_GPIO_DIRECT,
+ .flags = GPIO_GENERIC_BIG_ENDIAN_BYTE_ORDER,
+ };
+
+ ret = gpio_generic_chip_init(chip, &config);
if (ret)
- return dev_err_probe(dev, ret, "bgpio_init failed\n");
+ return dev_err_probe(dev, ret,
+ "failed to initialize the generic GPIO chip\n");
+
+ gc = &chip->gc;
/* Setup pointers to chip functions */
gc->label = devm_kasprintf(dev, GFP_KERNEL, "%pfw", dev_fwnode(dev));
diff --git a/drivers/gpio/gpio-graniterapids.c b/drivers/gpio/gpio-graniterapids.c
index f25283e5239d..121bf29a27f5 100644
--- a/drivers/gpio/gpio-graniterapids.c
+++ b/drivers/gpio/gpio-graniterapids.c
@@ -159,7 +159,7 @@ static const struct gpio_chip gnr_gpio_chip = {
.owner = THIS_MODULE,
.request = gnr_gpio_request,
.get = gnr_gpio_get,
- .set_rv = gnr_gpio_set,
+ .set = gnr_gpio_set,
.get_direction = gnr_gpio_get_direction,
.direction_input = gnr_gpio_direction_input,
.direction_output = gnr_gpio_direction_output,
diff --git a/drivers/gpio/gpio-grgpio.c b/drivers/gpio/gpio-grgpio.c
index d38a2d9854ca..e4fa84e22726 100644
--- a/drivers/gpio/gpio-grgpio.c
+++ b/drivers/gpio/gpio-grgpio.c
@@ -19,6 +19,7 @@
#include <linux/bitops.h>
#include <linux/err.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -45,7 +46,7 @@
/* Structure for an irq of the core - called an underlying irq */
struct grgpio_uirq {
- u8 refcnt; /* Reference counter to manage requesting/freeing of uirq */
+ atomic_t refcnt; /* Reference counter to manage requesting/freeing of uirq */
u8 uirq; /* Underlying irq of the gpio driver */
};
@@ -59,7 +60,7 @@ struct grgpio_lirq {
};
struct grgpio_priv {
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
void __iomem *regs;
struct device *dev;
@@ -91,13 +92,12 @@ struct grgpio_priv {
static void grgpio_set_imask(struct grgpio_priv *priv, unsigned int offset,
int val)
{
- struct gpio_chip *gc = &priv->gc;
-
if (val)
priv->imask |= BIT(offset);
else
priv->imask &= ~BIT(offset);
- gc->write_reg(priv->regs + GRGPIO_IMASK, priv->imask);
+
+ gpio_generic_write_reg(&priv->chip, priv->regs + GRGPIO_IMASK, priv->imask);
}
static int grgpio_to_irq(struct gpio_chip *gc, unsigned offset)
@@ -118,7 +118,6 @@ static int grgpio_to_irq(struct gpio_chip *gc, unsigned offset)
static int grgpio_irq_set_type(struct irq_data *d, unsigned int type)
{
struct grgpio_priv *priv = irq_data_get_irq_chip_data(d);
- unsigned long flags;
u32 mask = BIT(d->hwirq);
u32 ipol;
u32 iedge;
@@ -146,15 +145,13 @@ static int grgpio_irq_set_type(struct irq_data *d, unsigned int type)
return -EINVAL;
}
- raw_spin_lock_irqsave(&priv->gc.bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(&priv->chip);
- ipol = priv->gc.read_reg(priv->regs + GRGPIO_IPOL) & ~mask;
- iedge = priv->gc.read_reg(priv->regs + GRGPIO_IEDGE) & ~mask;
+ ipol = gpio_generic_read_reg(&priv->chip, priv->regs + GRGPIO_IPOL) & ~mask;
+ iedge = gpio_generic_read_reg(&priv->chip, priv->regs + GRGPIO_IEDGE) & ~mask;
- priv->gc.write_reg(priv->regs + GRGPIO_IPOL, ipol | pol);
- priv->gc.write_reg(priv->regs + GRGPIO_IEDGE, iedge | edge);
-
- raw_spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
+ gpio_generic_write_reg(&priv->chip, priv->regs + GRGPIO_IPOL, ipol | pol);
+ gpio_generic_write_reg(&priv->chip, priv->regs + GRGPIO_IEDGE, iedge | edge);
return 0;
}
@@ -163,29 +160,23 @@ static void grgpio_irq_mask(struct irq_data *d)
{
struct grgpio_priv *priv = irq_data_get_irq_chip_data(d);
int offset = d->hwirq;
- unsigned long flags;
- raw_spin_lock_irqsave(&priv->gc.bgpio_lock, flags);
+ scoped_guard(gpio_generic_lock_irqsave, &priv->chip)
+ grgpio_set_imask(priv, offset, 0);
- grgpio_set_imask(priv, offset, 0);
-
- raw_spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
-
- gpiochip_disable_irq(&priv->gc, d->hwirq);
+ gpiochip_disable_irq(&priv->chip.gc, d->hwirq);
}
static void grgpio_irq_unmask(struct irq_data *d)
{
struct grgpio_priv *priv = irq_data_get_irq_chip_data(d);
int offset = d->hwirq;
- unsigned long flags;
- gpiochip_enable_irq(&priv->gc, d->hwirq);
- raw_spin_lock_irqsave(&priv->gc.bgpio_lock, flags);
+ gpiochip_enable_irq(&priv->chip.gc, d->hwirq);
- grgpio_set_imask(priv, offset, 1);
+ guard(gpio_generic_lock_irqsave)(&priv->chip);
- raw_spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
+ grgpio_set_imask(priv, offset, 1);
}
static const struct irq_chip grgpio_irq_chip = {
@@ -200,12 +191,11 @@ static const struct irq_chip grgpio_irq_chip = {
static irqreturn_t grgpio_irq_handler(int irq, void *dev)
{
struct grgpio_priv *priv = dev;
- int ngpio = priv->gc.ngpio;
- unsigned long flags;
+ int ngpio = priv->chip.gc.ngpio;
int i;
int match = 0;
- raw_spin_lock_irqsave(&priv->gc.bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(&priv->chip);
/*
* For each gpio line, call its interrupt handler if it its underlying
@@ -221,8 +211,6 @@ static irqreturn_t grgpio_irq_handler(int irq, void *dev)
}
}
- raw_spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
-
if (!match)
dev_warn(priv->dev, "No gpio line matched irq %d\n", irq);
@@ -253,26 +241,23 @@ static int grgpio_irq_map(struct irq_domain *d, unsigned int irq,
dev_dbg(priv->dev, "Mapping irq %d for gpio line %d\n",
irq, offset);
- raw_spin_lock_irqsave(&priv->gc.bgpio_lock, flags);
-
- /* Request underlying irq if not already requested */
+ gpio_generic_chip_lock_irqsave(&priv->chip, flags);
lirq->irq = irq;
uirq = &priv->uirqs[lirq->index];
- if (uirq->refcnt == 0) {
- raw_spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
+ gpio_generic_chip_unlock_irqrestore(&priv->chip, flags);
+
+ /* Request underlying irq if not already requested */
+ if (atomic_fetch_add(1, &uirq->refcnt) == 0) {
ret = request_irq(uirq->uirq, grgpio_irq_handler, 0,
dev_name(priv->dev), priv);
if (ret) {
dev_err(priv->dev,
"Could not request underlying irq %d\n",
uirq->uirq);
+ atomic_dec(&uirq->refcnt); /* rollback */
return ret;
}
- raw_spin_lock_irqsave(&priv->gc.bgpio_lock, flags);
}
- uirq->refcnt++;
-
- raw_spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
/* Setup irq */
irq_set_chip_data(irq, priv);
@@ -290,13 +275,13 @@ static void grgpio_irq_unmap(struct irq_domain *d, unsigned int irq)
struct grgpio_lirq *lirq;
struct grgpio_uirq *uirq;
unsigned long flags;
- int ngpio = priv->gc.ngpio;
+ int ngpio = priv->chip.gc.ngpio;
int i;
irq_set_chip_and_handler(irq, NULL, NULL);
irq_set_chip_data(irq, NULL);
- raw_spin_lock_irqsave(&priv->gc.bgpio_lock, flags);
+ gpio_generic_chip_lock_irqsave(&priv->chip, flags);
/* Free underlying irq if last user unmapped */
index = -1;
@@ -313,15 +298,14 @@ static void grgpio_irq_unmap(struct irq_domain *d, unsigned int irq)
if (index >= 0) {
uirq = &priv->uirqs[lirq->index];
- uirq->refcnt--;
- if (uirq->refcnt == 0) {
- raw_spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
+ if (atomic_dec_and_test(&uirq->refcnt)) {
+ gpio_generic_chip_unlock_irqrestore(&priv->chip, flags);
free_irq(uirq->uirq, priv);
return;
}
}
- raw_spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
+ gpio_generic_chip_unlock_irqrestore(&priv->chip, flags);
}
static void grgpio_irq_domain_remove(void *data)
@@ -341,6 +325,7 @@ static const struct irq_domain_ops grgpio_irq_domain_ops = {
static int grgpio_probe(struct platform_device *ofdev)
{
struct device_node *np = ofdev->dev.of_node;
+ struct gpio_generic_chip_config config;
struct device *dev = &ofdev->dev;
void __iomem *regs;
struct gpio_chip *gc;
@@ -359,17 +344,24 @@ static int grgpio_probe(struct platform_device *ofdev)
if (IS_ERR(regs))
return PTR_ERR(regs);
- gc = &priv->gc;
- err = bgpio_init(gc, dev, 4, regs + GRGPIO_DATA,
- regs + GRGPIO_OUTPUT, NULL, regs + GRGPIO_DIR, NULL,
- BGPIOF_BIG_ENDIAN_BYTE_ORDER);
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = regs + GRGPIO_DATA,
+ .set = regs + GRGPIO_OUTPUT,
+ .dirout = regs + GRGPIO_DIR,
+ .flags = GPIO_GENERIC_BIG_ENDIAN_BYTE_ORDER,
+ };
+
+ gc = &priv->chip.gc;
+ err = gpio_generic_chip_init(&priv->chip, &config);
if (err) {
- dev_err(dev, "bgpio_init() failed\n");
+ dev_err(dev, "failed to initialize the generic GPIO chip\n");
return err;
}
priv->regs = regs;
- priv->imask = gc->read_reg(regs + GRGPIO_IMASK);
+ priv->imask = gpio_generic_read_reg(&priv->chip, regs + GRGPIO_IMASK);
priv->dev = dev;
gc->owner = THIS_MODULE;
@@ -402,9 +394,8 @@ static int grgpio_probe(struct platform_device *ofdev)
return -EINVAL;
}
- priv->domain = irq_domain_create_linear(of_fwnode_handle(np), gc->ngpio,
- &grgpio_irq_domain_ops,
- priv);
+ priv->domain = irq_domain_create_linear(dev_fwnode(&ofdev->dev), gc->ngpio,
+ &grgpio_irq_domain_ops, priv);
if (!priv->domain) {
dev_err(dev, "Could not add irq domain\n");
return -EINVAL;
@@ -434,6 +425,7 @@ static int grgpio_probe(struct platform_device *ofdev)
continue;
}
priv->uirqs[lirq->index].uirq = ret;
+ atomic_set(&priv->uirqs[lirq->index].refcnt, 0);
}
}
diff --git a/drivers/gpio/gpio-gw-pld.c b/drivers/gpio/gpio-gw-pld.c
index a40ba99a3aea..2e5d97b7363f 100644
--- a/drivers/gpio/gpio-gw-pld.c
+++ b/drivers/gpio/gpio-gw-pld.c
@@ -86,7 +86,7 @@ static int gw_pld_probe(struct i2c_client *client)
gw->chip.direction_input = gw_pld_input8;
gw->chip.get = gw_pld_get8;
gw->chip.direction_output = gw_pld_output8;
- gw->chip.set_rv = gw_pld_set8;
+ gw->chip.set = gw_pld_set8;
gw->client = client;
/*
diff --git a/drivers/gpio/gpio-hisi.c b/drivers/gpio/gpio-hisi.c
index ef5cc654a24e..d26298c8351b 100644
--- a/drivers/gpio/gpio-hisi.c
+++ b/drivers/gpio/gpio-hisi.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2020 HiSilicon Limited. */
+
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
@@ -33,7 +35,7 @@
#define HISI_GPIO_DRIVER_NAME "gpio-hisi"
struct hisi_gpio {
- struct gpio_chip chip;
+ struct gpio_generic_chip chip;
struct device *dev;
void __iomem *reg_base;
unsigned int line_num;
@@ -43,8 +45,8 @@ struct hisi_gpio {
static inline u32 hisi_gpio_read_reg(struct gpio_chip *chip,
unsigned int off)
{
- struct hisi_gpio *hisi_gpio =
- container_of(chip, struct hisi_gpio, chip);
+ struct hisi_gpio *hisi_gpio = container_of(to_gpio_generic_chip(chip),
+ struct hisi_gpio, chip);
void __iomem *reg = hisi_gpio->reg_base + off;
return readl(reg);
@@ -53,8 +55,8 @@ static inline u32 hisi_gpio_read_reg(struct gpio_chip *chip,
static inline void hisi_gpio_write_reg(struct gpio_chip *chip,
unsigned int off, u32 val)
{
- struct hisi_gpio *hisi_gpio =
- container_of(chip, struct hisi_gpio, chip);
+ struct hisi_gpio *hisi_gpio = container_of(to_gpio_generic_chip(chip),
+ struct hisi_gpio, chip);
void __iomem *reg = hisi_gpio->reg_base + off;
writel(val, reg);
@@ -180,14 +182,14 @@ static void hisi_gpio_irq_disable(struct irq_data *d)
static void hisi_gpio_irq_handler(struct irq_desc *desc)
{
struct hisi_gpio *hisi_gpio = irq_desc_get_handler_data(desc);
- unsigned long irq_msk = hisi_gpio_read_reg(&hisi_gpio->chip,
+ unsigned long irq_msk = hisi_gpio_read_reg(&hisi_gpio->chip.gc,
HISI_GPIO_INTSTATUS_WX);
struct irq_chip *irq_c = irq_desc_get_chip(desc);
int hwirq;
chained_irq_enter(irq_c, desc);
for_each_set_bit(hwirq, &irq_msk, HISI_GPIO_LINE_NUM_MAX)
- generic_handle_domain_irq(hisi_gpio->chip.irq.domain,
+ generic_handle_domain_irq(hisi_gpio->chip.gc.irq.domain,
hwirq);
chained_irq_exit(irq_c, desc);
}
@@ -206,7 +208,7 @@ static const struct irq_chip hisi_gpio_irq_chip = {
static void hisi_gpio_init_irq(struct hisi_gpio *hisi_gpio)
{
- struct gpio_chip *chip = &hisi_gpio->chip;
+ struct gpio_chip *chip = &hisi_gpio->chip.gc;
struct gpio_irq_chip *girq_chip = &chip->irq;
gpio_irq_chip_set_chip(girq_chip, &hisi_gpio_irq_chip);
@@ -264,6 +266,7 @@ static void hisi_gpio_get_pdata(struct device *dev,
static int hisi_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct device *dev = &pdev->dev;
struct hisi_gpio *hisi_gpio;
int port_num;
@@ -289,27 +292,32 @@ static int hisi_gpio_probe(struct platform_device *pdev)
hisi_gpio->dev = dev;
- ret = bgpio_init(&hisi_gpio->chip, hisi_gpio->dev, 0x4,
- hisi_gpio->reg_base + HISI_GPIO_EXT_PORT_WX,
- hisi_gpio->reg_base + HISI_GPIO_SWPORT_DR_SET_WX,
- hisi_gpio->reg_base + HISI_GPIO_SWPORT_DR_CLR_WX,
- hisi_gpio->reg_base + HISI_GPIO_SWPORT_DDR_SET_WX,
- hisi_gpio->reg_base + HISI_GPIO_SWPORT_DDR_CLR_WX,
- BGPIOF_NO_SET_ON_INPUT);
+ config = (struct gpio_generic_chip_config) {
+ .dev = hisi_gpio->dev,
+ .sz = 4,
+ .dat = hisi_gpio->reg_base + HISI_GPIO_EXT_PORT_WX,
+ .set = hisi_gpio->reg_base + HISI_GPIO_SWPORT_DR_SET_WX,
+ .clr = hisi_gpio->reg_base + HISI_GPIO_SWPORT_DR_CLR_WX,
+ .dirout = hisi_gpio->reg_base + HISI_GPIO_SWPORT_DDR_SET_WX,
+ .dirin = hisi_gpio->reg_base + HISI_GPIO_SWPORT_DDR_CLR_WX,
+ .flags = GPIO_GENERIC_NO_SET_ON_INPUT |
+ GPIO_GENERIC_UNREADABLE_REG_DIR,
+ };
+
+ ret = gpio_generic_chip_init(&hisi_gpio->chip, &config);
if (ret) {
dev_err(dev, "failed to init, ret = %d\n", ret);
return ret;
}
- hisi_gpio->chip.set_config = hisi_gpio_set_config;
- hisi_gpio->chip.ngpio = hisi_gpio->line_num;
- hisi_gpio->chip.bgpio_dir_unreadable = 1;
- hisi_gpio->chip.base = -1;
+ hisi_gpio->chip.gc.set_config = hisi_gpio_set_config;
+ hisi_gpio->chip.gc.ngpio = hisi_gpio->line_num;
+ hisi_gpio->chip.gc.base = -1;
if (hisi_gpio->irq > 0)
hisi_gpio_init_irq(hisi_gpio);
- ret = devm_gpiochip_add_data(dev, &hisi_gpio->chip, hisi_gpio);
+ ret = devm_gpiochip_add_data(dev, &hisi_gpio->chip.gc, hisi_gpio);
if (ret) {
dev_err(dev, "failed to register gpiochip, ret = %d\n", ret);
return ret;
diff --git a/drivers/gpio/gpio-hlwd.c b/drivers/gpio/gpio-hlwd.c
index 0580f6712bea..043ce5ef3b07 100644
--- a/drivers/gpio/gpio-hlwd.c
+++ b/drivers/gpio/gpio-hlwd.c
@@ -6,6 +6,7 @@
// Nintendo Wii (Hollywood) GPIO driver
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -48,7 +49,7 @@
#define HW_GPIO_OWNER 0x3c
struct hlwd_gpio {
- struct gpio_chip gpioc;
+ struct gpio_generic_chip gpioc;
struct device *dev;
void __iomem *regs;
int irq;
@@ -61,45 +62,44 @@ static void hlwd_gpio_irqhandler(struct irq_desc *desc)
struct hlwd_gpio *hlwd =
gpiochip_get_data(irq_desc_get_handler_data(desc));
struct irq_chip *chip = irq_desc_get_chip(desc);
- unsigned long flags;
unsigned long pending;
int hwirq;
u32 emulated_pending;
- raw_spin_lock_irqsave(&hlwd->gpioc.bgpio_lock, flags);
- pending = ioread32be(hlwd->regs + HW_GPIOB_INTFLAG);
- pending &= ioread32be(hlwd->regs + HW_GPIOB_INTMASK);
+ scoped_guard(gpio_generic_lock_irqsave, &hlwd->gpioc) {
+ pending = ioread32be(hlwd->regs + HW_GPIOB_INTFLAG);
+ pending &= ioread32be(hlwd->regs + HW_GPIOB_INTMASK);
- /* Treat interrupts due to edge trigger emulation separately */
- emulated_pending = hlwd->edge_emulation & pending;
- pending &= ~emulated_pending;
- if (emulated_pending) {
- u32 level, rising, falling;
+ /* Treat interrupts due to edge trigger emulation separately */
+ emulated_pending = hlwd->edge_emulation & pending;
+ pending &= ~emulated_pending;
+ if (emulated_pending) {
+ u32 level, rising, falling;
- level = ioread32be(hlwd->regs + HW_GPIOB_INTLVL);
- rising = level & emulated_pending;
- falling = ~level & emulated_pending;
+ level = ioread32be(hlwd->regs + HW_GPIOB_INTLVL);
+ rising = level & emulated_pending;
+ falling = ~level & emulated_pending;
- /* Invert the levels */
- iowrite32be(level ^ emulated_pending,
- hlwd->regs + HW_GPIOB_INTLVL);
+ /* Invert the levels */
+ iowrite32be(level ^ emulated_pending,
+ hlwd->regs + HW_GPIOB_INTLVL);
- /* Ack all emulated-edge interrupts */
- iowrite32be(emulated_pending, hlwd->regs + HW_GPIOB_INTFLAG);
+ /* Ack all emulated-edge interrupts */
+ iowrite32be(emulated_pending, hlwd->regs + HW_GPIOB_INTFLAG);
- /* Signal interrupts only on the correct edge */
- rising &= hlwd->rising_edge;
- falling &= hlwd->falling_edge;
+ /* Signal interrupts only on the correct edge */
+ rising &= hlwd->rising_edge;
+ falling &= hlwd->falling_edge;
- /* Mark emulated interrupts as pending */
- pending |= rising | falling;
+ /* Mark emulated interrupts as pending */
+ pending |= rising | falling;
+ }
}
- raw_spin_unlock_irqrestore(&hlwd->gpioc.bgpio_lock, flags);
chained_irq_enter(chip, desc);
for_each_set_bit(hwirq, &pending, 32)
- generic_handle_domain_irq(hlwd->gpioc.irq.domain, hwirq);
+ generic_handle_domain_irq(hlwd->gpioc.gc.irq.domain, hwirq);
chained_irq_exit(chip, desc);
}
@@ -116,30 +116,29 @@ static void hlwd_gpio_irq_mask(struct irq_data *data)
{
struct hlwd_gpio *hlwd =
gpiochip_get_data(irq_data_get_irq_chip_data(data));
- unsigned long flags;
u32 mask;
- raw_spin_lock_irqsave(&hlwd->gpioc.bgpio_lock, flags);
- mask = ioread32be(hlwd->regs + HW_GPIOB_INTMASK);
- mask &= ~BIT(data->hwirq);
- iowrite32be(mask, hlwd->regs + HW_GPIOB_INTMASK);
- raw_spin_unlock_irqrestore(&hlwd->gpioc.bgpio_lock, flags);
- gpiochip_disable_irq(&hlwd->gpioc, irqd_to_hwirq(data));
+ scoped_guard(gpio_generic_lock_irqsave, &hlwd->gpioc) {
+ mask = ioread32be(hlwd->regs + HW_GPIOB_INTMASK);
+ mask &= ~BIT(data->hwirq);
+ iowrite32be(mask, hlwd->regs + HW_GPIOB_INTMASK);
+ }
+ gpiochip_disable_irq(&hlwd->gpioc.gc, irqd_to_hwirq(data));
}
static void hlwd_gpio_irq_unmask(struct irq_data *data)
{
struct hlwd_gpio *hlwd =
gpiochip_get_data(irq_data_get_irq_chip_data(data));
- unsigned long flags;
u32 mask;
- gpiochip_enable_irq(&hlwd->gpioc, irqd_to_hwirq(data));
- raw_spin_lock_irqsave(&hlwd->gpioc.bgpio_lock, flags);
+ gpiochip_enable_irq(&hlwd->gpioc.gc, irqd_to_hwirq(data));
+
+ guard(gpio_generic_lock_irqsave)(&hlwd->gpioc);
+
mask = ioread32be(hlwd->regs + HW_GPIOB_INTMASK);
mask |= BIT(data->hwirq);
iowrite32be(mask, hlwd->regs + HW_GPIOB_INTMASK);
- raw_spin_unlock_irqrestore(&hlwd->gpioc.bgpio_lock, flags);
}
static void hlwd_gpio_irq_enable(struct irq_data *data)
@@ -173,10 +172,9 @@ static int hlwd_gpio_irq_set_type(struct irq_data *data, unsigned int flow_type)
{
struct hlwd_gpio *hlwd =
gpiochip_get_data(irq_data_get_irq_chip_data(data));
- unsigned long flags;
u32 level;
- raw_spin_lock_irqsave(&hlwd->gpioc.bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(&hlwd->gpioc);
hlwd->edge_emulation &= ~BIT(data->hwirq);
@@ -197,11 +195,9 @@ static int hlwd_gpio_irq_set_type(struct irq_data *data, unsigned int flow_type)
hlwd_gpio_irq_setup_emulation(hlwd, data->hwirq, flow_type);
break;
default:
- raw_spin_unlock_irqrestore(&hlwd->gpioc.bgpio_lock, flags);
return -EINVAL;
}
- raw_spin_unlock_irqrestore(&hlwd->gpioc.bgpio_lock, flags);
return 0;
}
@@ -225,6 +221,7 @@ static const struct irq_chip hlwd_gpio_irq_chip = {
static int hlwd_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct hlwd_gpio *hlwd;
u32 ngpios;
int res;
@@ -244,25 +241,31 @@ static int hlwd_gpio_probe(struct platform_device *pdev)
* systems where the AHBPROT memory firewall hasn't been configured to
* permit PPC access to HW_GPIO_*.
*
- * Note that this has to happen before bgpio_init reads the
- * HW_GPIOB_OUT and HW_GPIOB_DIR, because otherwise it reads the wrong
- * values.
+ * Note that this has to happen before gpio_generic_chip_init() reads
+ * the HW_GPIOB_OUT and HW_GPIOB_DIR, because otherwise it reads the
+ * wrong values.
*/
iowrite32be(0xffffffff, hlwd->regs + HW_GPIO_OWNER);
- res = bgpio_init(&hlwd->gpioc, &pdev->dev, 4,
- hlwd->regs + HW_GPIOB_IN, hlwd->regs + HW_GPIOB_OUT,
- NULL, hlwd->regs + HW_GPIOB_DIR, NULL,
- BGPIOF_BIG_ENDIAN_BYTE_ORDER);
+ config = (struct gpio_generic_chip_config) {
+ .dev = &pdev->dev,
+ .sz = 4,
+ .dat = hlwd->regs + HW_GPIOB_IN,
+ .set = hlwd->regs + HW_GPIOB_OUT,
+ .dirout = hlwd->regs + HW_GPIOB_DIR,
+ .flags = GPIO_GENERIC_BIG_ENDIAN_BYTE_ORDER,
+ };
+
+ res = gpio_generic_chip_init(&hlwd->gpioc, &config);
if (res < 0) {
- dev_warn(&pdev->dev, "bgpio_init failed: %d\n", res);
+ dev_warn(&pdev->dev, "failed to initialize generic GPIO chip: %d\n", res);
return res;
}
res = of_property_read_u32(pdev->dev.of_node, "ngpios", &ngpios);
if (res)
ngpios = 32;
- hlwd->gpioc.ngpio = ngpios;
+ hlwd->gpioc.gc.ngpio = ngpios;
/* Mask and ack all interrupts */
iowrite32be(0, hlwd->regs + HW_GPIOB_INTMASK);
@@ -282,7 +285,7 @@ static int hlwd_gpio_probe(struct platform_device *pdev)
return hlwd->irq;
}
- girq = &hlwd->gpioc.irq;
+ girq = &hlwd->gpioc.gc.irq;
gpio_irq_chip_set_chip(girq, &hlwd_gpio_irq_chip);
girq->parent_handler = hlwd_gpio_irqhandler;
girq->num_parents = 1;
@@ -296,7 +299,7 @@ static int hlwd_gpio_probe(struct platform_device *pdev)
girq->handler = handle_level_irq;
}
- return devm_gpiochip_add_data(&pdev->dev, &hlwd->gpioc, hlwd);
+ return devm_gpiochip_add_data(&pdev->dev, &hlwd->gpioc.gc, hlwd);
}
static const struct of_device_id hlwd_gpio_match[] = {
diff --git a/drivers/gpio/gpio-htc-egpio.c b/drivers/gpio/gpio-htc-egpio.c
index b1844a676c7c..72935d6dbebf 100644
--- a/drivers/gpio/gpio-htc-egpio.c
+++ b/drivers/gpio/gpio-htc-egpio.c
@@ -324,7 +324,7 @@ static int __init egpio_probe(struct platform_device *pdev)
chip->parent = &pdev->dev;
chip->owner = THIS_MODULE;
chip->get = egpio_get;
- chip->set_rv = egpio_set;
+ chip->set = egpio_set;
chip->direction_input = egpio_direction_input;
chip->direction_output = egpio_direction_output;
chip->get_direction = egpio_get_direction;
@@ -364,21 +364,20 @@ static int __init egpio_probe(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
-static int egpio_suspend(struct platform_device *pdev, pm_message_t state)
+static int egpio_suspend(struct device *dev)
{
- struct egpio_info *ei = platform_get_drvdata(pdev);
+ struct egpio_info *ei = dev_get_drvdata(dev);
- if (ei->chained_irq && device_may_wakeup(&pdev->dev))
+ if (ei->chained_irq && device_may_wakeup(dev))
enable_irq_wake(ei->chained_irq);
return 0;
}
-static int egpio_resume(struct platform_device *pdev)
+static int egpio_resume(struct device *dev)
{
- struct egpio_info *ei = platform_get_drvdata(pdev);
+ struct egpio_info *ei = dev_get_drvdata(dev);
- if (ei->chained_irq && device_may_wakeup(&pdev->dev))
+ if (ei->chained_irq && device_may_wakeup(dev))
disable_irq_wake(ei->chained_irq);
/* Update registers from the cache, in case
@@ -386,19 +385,15 @@ static int egpio_resume(struct platform_device *pdev)
egpio_write_cache(ei);
return 0;
}
-#else
-#define egpio_suspend NULL
-#define egpio_resume NULL
-#endif
+static DEFINE_SIMPLE_DEV_PM_OPS(egpio_pm_ops, egpio_suspend, egpio_resume);
static struct platform_driver egpio_driver = {
.driver = {
.name = "htc-egpio",
.suppress_bind_attrs = true,
+ .pm = pm_sleep_ptr(&egpio_pm_ops),
},
- .suspend = egpio_suspend,
- .resume = egpio_resume,
};
static int __init egpio_init(void)
diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
index 67089b2423d8..1802c9116ffe 100644
--- a/drivers/gpio/gpio-ich.c
+++ b/drivers/gpio/gpio-ich.c
@@ -273,7 +273,7 @@ static void ichx_gpiolib_setup(struct gpio_chip *chip)
chip->get = ichx_priv.desc->get ?
ichx_priv.desc->get : ichx_gpio_get;
- chip->set_rv = ichx_gpio_set;
+ chip->set = ichx_gpio_set;
chip->get_direction = ichx_gpio_get_direction;
chip->direction_input = ichx_gpio_direction_input;
chip->direction_output = ichx_gpio_direction_output;
diff --git a/drivers/gpio/gpio-idio-16.c b/drivers/gpio/gpio-idio-16.c
index 0103be977c66..4fbae6f6a497 100644
--- a/drivers/gpio/gpio-idio-16.c
+++ b/drivers/gpio/gpio-idio-16.c
@@ -6,6 +6,7 @@
#define DEFAULT_SYMBOL_NAMESPACE "GPIO_IDIO_16"
+#include <linux/bitmap.h>
#include <linux/bits.h>
#include <linux/device.h>
#include <linux/err.h>
@@ -107,6 +108,7 @@ int devm_idio_16_regmap_register(struct device *const dev,
struct idio_16_data *data;
struct regmap_irq_chip *chip;
struct regmap_irq_chip_data *chip_data;
+ DECLARE_BITMAP(fixed_direction_output, IDIO_16_NGPIO);
if (!config->parent)
return -EINVAL;
@@ -164,6 +166,9 @@ int devm_idio_16_regmap_register(struct device *const dev,
gpio_config.irq_domain = regmap_irq_get_domain(chip_data);
gpio_config.reg_mask_xlate = idio_16_reg_mask_xlate;
+ bitmap_from_u64(fixed_direction_output, GENMASK_U64(15, 0));
+ gpio_config.fixed_direction_output = fixed_direction_output;
+
return PTR_ERR_OR_ZERO(devm_gpio_regmap_register(dev, &gpio_config));
}
EXPORT_SYMBOL_GPL(devm_idio_16_regmap_register);
diff --git a/drivers/gpio/gpio-idt3243x.c b/drivers/gpio/gpio-idt3243x.c
index 535f25514455..56f1f1e57b69 100644
--- a/drivers/gpio/gpio-idt3243x.c
+++ b/drivers/gpio/gpio-idt3243x.c
@@ -3,6 +3,7 @@
#include <linux/bitops.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
@@ -18,7 +19,7 @@
#define IDT_GPIO_ISTAT 0x0C
struct idt_gpio_ctrl {
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
void __iomem *pic;
void __iomem *gpio;
u32 mask_cache;
@@ -50,14 +51,13 @@ static int idt_gpio_irq_set_type(struct irq_data *d, unsigned int flow_type)
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct idt_gpio_ctrl *ctrl = gpiochip_get_data(gc);
unsigned int sense = flow_type & IRQ_TYPE_SENSE_MASK;
- unsigned long flags;
u32 ilevel;
/* hardware only supports level triggered */
if (sense == IRQ_TYPE_NONE || (sense & IRQ_TYPE_EDGE_BOTH))
return -EINVAL;
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(&ctrl->chip);
ilevel = readl(ctrl->gpio + IDT_GPIO_ILEVEL);
if (sense & IRQ_TYPE_LEVEL_HIGH)
@@ -68,7 +68,6 @@ static int idt_gpio_irq_set_type(struct irq_data *d, unsigned int flow_type)
writel(ilevel, ctrl->gpio + IDT_GPIO_ILEVEL);
irq_set_handler_locked(d, handle_level_irq);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
return 0;
}
@@ -84,14 +83,11 @@ static void idt_gpio_mask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct idt_gpio_ctrl *ctrl = gpiochip_get_data(gc);
- unsigned long flags;
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
-
- ctrl->mask_cache |= BIT(d->hwirq);
- writel(ctrl->mask_cache, ctrl->pic + IDT_PIC_IRQ_MASK);
-
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+ scoped_guard(gpio_generic_lock_irqsave, &ctrl->chip) {
+ ctrl->mask_cache |= BIT(d->hwirq);
+ writel(ctrl->mask_cache, ctrl->pic + IDT_PIC_IRQ_MASK);
+ }
gpiochip_disable_irq(gc, irqd_to_hwirq(d));
}
@@ -100,15 +96,13 @@ static void idt_gpio_unmask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct idt_gpio_ctrl *ctrl = gpiochip_get_data(gc);
- unsigned long flags;
gpiochip_enable_irq(gc, irqd_to_hwirq(d));
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+
+ guard(gpio_generic_lock_irqsave)(&ctrl->chip);
ctrl->mask_cache &= ~BIT(d->hwirq);
writel(ctrl->mask_cache, ctrl->pic + IDT_PIC_IRQ_MASK);
-
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
}
static int idt_gpio_irq_init_hw(struct gpio_chip *gc)
@@ -134,6 +128,7 @@ static const struct irq_chip idt_gpio_irqchip = {
static int idt_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct device *dev = &pdev->dev;
struct gpio_irq_chip *girq;
struct idt_gpio_ctrl *ctrl;
@@ -150,18 +145,24 @@ static int idt_gpio_probe(struct platform_device *pdev)
if (IS_ERR(ctrl->gpio))
return PTR_ERR(ctrl->gpio);
- ctrl->gc.parent = dev;
+ ctrl->chip.gc.parent = dev;
+
+ config = (struct gpio_generic_chip_config) {
+ .dev = &pdev->dev,
+ .sz = 4,
+ .dat = ctrl->gpio + IDT_GPIO_DATA,
+ .dirout = ctrl->gpio + IDT_GPIO_DIR,
+ };
- ret = bgpio_init(&ctrl->gc, &pdev->dev, 4, ctrl->gpio + IDT_GPIO_DATA,
- NULL, NULL, ctrl->gpio + IDT_GPIO_DIR, NULL, 0);
+ ret = gpio_generic_chip_init(&ctrl->chip, &config);
if (ret) {
- dev_err(dev, "bgpio_init failed\n");
+ dev_err(dev, "failed to initialize the generic GPIO chip\n");
return ret;
}
ret = device_property_read_u32(dev, "ngpios", &ngpios);
if (!ret)
- ctrl->gc.ngpio = ngpios;
+ ctrl->chip.gc.ngpio = ngpios;
if (device_property_read_bool(dev, "interrupt-controller")) {
ctrl->pic = devm_platform_ioremap_resource_byname(pdev, "pic");
@@ -172,7 +173,7 @@ static int idt_gpio_probe(struct platform_device *pdev)
if (parent_irq < 0)
return parent_irq;
- girq = &ctrl->gc.irq;
+ girq = &ctrl->chip.gc.irq;
gpio_irq_chip_set_chip(girq, &idt_gpio_irqchip);
girq->init_hw = idt_gpio_irq_init_hw;
girq->parent_handler = idt_gpio_dispatch;
@@ -188,7 +189,7 @@ static int idt_gpio_probe(struct platform_device *pdev)
girq->handler = handle_bad_irq;
}
- return devm_gpiochip_add_data(&pdev->dev, &ctrl->gc, ctrl);
+ return devm_gpiochip_add_data(&pdev->dev, &ctrl->chip.gc, ctrl);
}
static const struct of_device_id idt_gpio_of_match[] = {
diff --git a/drivers/gpio/gpio-imx-scu.c b/drivers/gpio/gpio-imx-scu.c
index 1693dbf1b777..0a75afecf9f8 100644
--- a/drivers/gpio/gpio-imx-scu.c
+++ b/drivers/gpio/gpio-imx-scu.c
@@ -102,7 +102,7 @@ static int imx_scu_gpio_probe(struct platform_device *pdev)
gc->ngpio = ARRAY_SIZE(scu_rsrc_arr);
gc->label = dev_name(dev);
gc->get = imx_scu_gpio_get;
- gc->set_rv = imx_scu_gpio_set;
+ gc->set = imx_scu_gpio_set;
gc->get_direction = imx_scu_gpio_get_direction;
platform_set_drvdata(pdev, priv);
diff --git a/drivers/gpio/gpio-it87.c b/drivers/gpio/gpio-it87.c
index d8184b527bac..5d677bcfccf2 100644
--- a/drivers/gpio/gpio-it87.c
+++ b/drivers/gpio/gpio-it87.c
@@ -267,7 +267,7 @@ static const struct gpio_chip it87_template_chip = {
.request = it87_gpio_request,
.get = it87_gpio_get,
.direction_input = it87_gpio_direction_in,
- .set_rv = it87_gpio_set,
+ .set = it87_gpio_set,
.direction_output = it87_gpio_direction_out,
.base = -1
};
diff --git a/drivers/gpio/gpio-ixp4xx.c b/drivers/gpio/gpio-ixp4xx.c
index 28a8a6a8f05f..f34d87869c8b 100644
--- a/drivers/gpio/gpio-ixp4xx.c
+++ b/drivers/gpio/gpio-ixp4xx.c
@@ -8,6 +8,7 @@
#include <linux/bitops.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
@@ -53,14 +54,14 @@
/**
* struct ixp4xx_gpio - IXP4 GPIO state container
+ * @chip: generic GPIO chip for this instance
* @dev: containing device for this instance
- * @gc: gpiochip for this instance
* @base: remapped I/O-memory base
* @irq_edge: Each bit represents an IRQ: 1: edge-triggered,
* 0: level triggered
*/
struct ixp4xx_gpio {
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
struct device *dev;
void __iomem *base;
unsigned long long irq_edge;
@@ -100,7 +101,6 @@ static int ixp4xx_gpio_irq_set_type(struct irq_data *d, unsigned int type)
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct ixp4xx_gpio *g = gpiochip_get_data(gc);
int line = d->hwirq;
- unsigned long flags;
u32 int_style;
u32 int_reg;
u32 val;
@@ -144,26 +144,24 @@ static int ixp4xx_gpio_irq_set_type(struct irq_data *d, unsigned int type)
int_reg = IXP4XX_REG_GPIT1;
}
- raw_spin_lock_irqsave(&g->gc.bgpio_lock, flags);
-
- /* Clear the style for the appropriate pin */
- val = __raw_readl(g->base + int_reg);
- val &= ~(IXP4XX_GPIO_STYLE_MASK << (line * IXP4XX_GPIO_STYLE_SIZE));
- __raw_writel(val, g->base + int_reg);
-
- __raw_writel(BIT(line), g->base + IXP4XX_REG_GPIS);
+ scoped_guard(gpio_generic_lock_irqsave, &g->chip) {
+ /* Clear the style for the appropriate pin */
+ val = __raw_readl(g->base + int_reg);
+ val &= ~(IXP4XX_GPIO_STYLE_MASK << (line * IXP4XX_GPIO_STYLE_SIZE));
+ __raw_writel(val, g->base + int_reg);
- /* Set the new style */
- val = __raw_readl(g->base + int_reg);
- val |= (int_style << (line * IXP4XX_GPIO_STYLE_SIZE));
- __raw_writel(val, g->base + int_reg);
+ __raw_writel(BIT(line), g->base + IXP4XX_REG_GPIS);
- /* Force-configure this line as an input */
- val = __raw_readl(g->base + IXP4XX_REG_GPOE);
- val |= BIT(d->hwirq);
- __raw_writel(val, g->base + IXP4XX_REG_GPOE);
+ /* Set the new style */
+ val = __raw_readl(g->base + int_reg);
+ val |= (int_style << (line * IXP4XX_GPIO_STYLE_SIZE));
+ __raw_writel(val, g->base + int_reg);
- raw_spin_unlock_irqrestore(&g->gc.bgpio_lock, flags);
+ /* Force-configure this line as an input */
+ val = __raw_readl(g->base + IXP4XX_REG_GPOE);
+ val |= BIT(d->hwirq);
+ __raw_writel(val, g->base + IXP4XX_REG_GPOE);
+ }
/* This parent only accept level high (asserted) */
return irq_chip_set_type_parent(d, IRQ_TYPE_LEVEL_HIGH);
@@ -206,6 +204,7 @@ static int ixp4xx_gpio_child_to_parent_hwirq(struct gpio_chip *gc,
static int ixp4xx_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
unsigned long flags;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
@@ -290,35 +289,38 @@ static int ixp4xx_gpio_probe(struct platform_device *pdev)
* for big endian.
*/
#if defined(CONFIG_CPU_BIG_ENDIAN)
- flags = BGPIOF_BIG_ENDIAN_BYTE_ORDER;
+ flags = GPIO_GENERIC_BIG_ENDIAN_BYTE_ORDER;
#else
flags = 0;
#endif
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = g->base + IXP4XX_REG_GPIN,
+ .set = g->base + IXP4XX_REG_GPOUT,
+ .dirin = g->base + IXP4XX_REG_GPOE,
+ .flags = flags,
+ };
+
/* Populate and register gpio chip */
- ret = bgpio_init(&g->gc, dev, 4,
- g->base + IXP4XX_REG_GPIN,
- g->base + IXP4XX_REG_GPOUT,
- NULL,
- NULL,
- g->base + IXP4XX_REG_GPOE,
- flags);
+ ret = gpio_generic_chip_init(&g->chip, &config);
if (ret) {
dev_err(dev, "unable to init generic GPIO\n");
return ret;
}
- g->gc.ngpio = 16;
- g->gc.label = "IXP4XX_GPIO_CHIP";
+ g->chip.gc.ngpio = 16;
+ g->chip.gc.label = "IXP4XX_GPIO_CHIP";
/*
* TODO: when we have migrated to device tree and all GPIOs
* are fetched using phandles, set this to -1 to get rid of
* the fixed gpiochip base.
*/
- g->gc.base = 0;
- g->gc.parent = &pdev->dev;
- g->gc.owner = THIS_MODULE;
+ g->chip.gc.base = 0;
+ g->chip.gc.parent = &pdev->dev;
+ g->chip.gc.owner = THIS_MODULE;
- girq = &g->gc.irq;
+ girq = &g->chip.gc.irq;
gpio_irq_chip_set_chip(girq, &ixp4xx_gpio_irqchip);
girq->fwnode = dev_fwnode(dev);
girq->parent_domain = parent;
@@ -326,7 +328,7 @@ static int ixp4xx_gpio_probe(struct platform_device *pdev)
girq->handler = handle_bad_irq;
girq->default_type = IRQ_TYPE_NONE;
- ret = devm_gpiochip_add_data(dev, &g->gc, g);
+ ret = devm_gpiochip_add_data(dev, &g->chip.gc, g);
if (ret) {
dev_err(dev, "failed to add SoC gpiochip\n");
return ret;
diff --git a/drivers/gpio/gpio-janz-ttl.c b/drivers/gpio/gpio-janz-ttl.c
index 9f548eda3888..b0c4a3346e7d 100644
--- a/drivers/gpio/gpio-janz-ttl.c
+++ b/drivers/gpio/gpio-janz-ttl.c
@@ -171,7 +171,7 @@ static int ttl_probe(struct platform_device *pdev)
gpio->parent = &pdev->dev;
gpio->label = pdev->name;
gpio->get = ttl_get_value;
- gpio->set_rv = ttl_set_value;
+ gpio->set = ttl_set_value;
gpio->owner = THIS_MODULE;
/* request dynamic allocation */
diff --git a/drivers/gpio/gpio-kempld.c b/drivers/gpio/gpio-kempld.c
index e38e604baa22..923aad3ab4d4 100644
--- a/drivers/gpio/gpio-kempld.c
+++ b/drivers/gpio/gpio-kempld.c
@@ -169,7 +169,7 @@ static int kempld_gpio_probe(struct platform_device *pdev)
chip->direction_output = kempld_gpio_direction_output;
chip->get_direction = kempld_gpio_get_direction;
chip->get = kempld_gpio_get;
- chip->set_rv = kempld_gpio_set;
+ chip->set = kempld_gpio_set;
chip->ngpio = kempld_gpio_pincount(pld);
if (chip->ngpio == 0) {
dev_err(dev, "No GPIO pins detected\n");
diff --git a/drivers/gpio/gpio-latch.c b/drivers/gpio/gpio-latch.c
index 3d0ff09284fb..452a9ce61488 100644
--- a/drivers/gpio/gpio-latch.c
+++ b/drivers/gpio/gpio-latch.c
@@ -48,8 +48,6 @@
#include <linux/property.h>
#include <linux/delay.h>
-#include "gpiolib.h"
-
struct gpio_latch_priv {
struct gpio_chip gc;
struct gpio_descs *clk_gpios;
@@ -166,11 +164,11 @@ static int gpio_latch_probe(struct platform_device *pdev)
if (gpio_latch_can_sleep(priv, n_latches)) {
priv->gc.can_sleep = true;
- priv->gc.set_rv = gpio_latch_set_can_sleep;
+ priv->gc.set = gpio_latch_set_can_sleep;
mutex_init(&priv->mutex);
} else {
priv->gc.can_sleep = false;
- priv->gc.set_rv = gpio_latch_set;
+ priv->gc.set = gpio_latch_set;
spin_lock_init(&priv->spinlock);
}
diff --git a/drivers/gpio/gpio-ljca.c b/drivers/gpio/gpio-ljca.c
index 61524a9ba765..f32d1d237795 100644
--- a/drivers/gpio/gpio-ljca.c
+++ b/drivers/gpio/gpio-ljca.c
@@ -286,22 +286,14 @@ static void ljca_gpio_event_cb(void *context, u8 cmd, const void *evt_data,
{
const struct ljca_gpio_packet *packet = evt_data;
struct ljca_gpio_dev *ljca_gpio = context;
- int i, irq;
+ int i;
if (cmd != LJCA_GPIO_INT_EVENT)
return;
for (i = 0; i < packet->num; i++) {
- irq = irq_find_mapping(ljca_gpio->gc.irq.domain,
- packet->item[i].index);
- if (!irq) {
- dev_err(ljca_gpio->gc.parent,
- "gpio_id %u does not mapped to IRQ yet\n",
- packet->item[i].index);
- return;
- }
-
- generic_handle_domain_irq(ljca_gpio->gc.irq.domain, irq);
+ generic_handle_domain_irq(ljca_gpio->gc.irq.domain,
+ packet->item[i].index);
set_bit(packet->item[i].index, ljca_gpio->reenable_irqs);
}
@@ -437,7 +429,7 @@ static int ljca_gpio_probe(struct auxiliary_device *auxdev,
ljca_gpio->gc.direction_output = ljca_gpio_direction_output;
ljca_gpio->gc.get_direction = ljca_gpio_get_direction;
ljca_gpio->gc.get = ljca_gpio_get_value;
- ljca_gpio->gc.set_rv = ljca_gpio_set_value;
+ ljca_gpio->gc.set = ljca_gpio_set_value;
ljca_gpio->gc.set_config = ljca_gpio_set_config;
ljca_gpio->gc.init_valid_mask = ljca_gpio_init_valid_mask;
ljca_gpio->gc.can_sleep = true;
diff --git a/drivers/gpio/gpio-logicvc.c b/drivers/gpio/gpio-logicvc.c
index 19cd2847467c..cb9dbcc290ad 100644
--- a/drivers/gpio/gpio-logicvc.c
+++ b/drivers/gpio/gpio-logicvc.c
@@ -134,7 +134,7 @@ static int logicvc_gpio_probe(struct platform_device *pdev)
logicvc->chip.ngpio = LOGICVC_CTRL_GPIO_BITS +
LOGICVC_POWER_CTRL_GPIO_BITS;
logicvc->chip.get = logicvc_gpio_get;
- logicvc->chip.set_rv = logicvc_gpio_set;
+ logicvc->chip.set = logicvc_gpio_set;
logicvc->chip.direction_output = logicvc_gpio_direction_output;
return devm_gpiochip_add_data(dev, &logicvc->chip, logicvc);
diff --git a/drivers/gpio/gpio-loongson-64bit.c b/drivers/gpio/gpio-loongson-64bit.c
index 26227669f026..77d07e31366f 100644
--- a/drivers/gpio/gpio-loongson-64bit.c
+++ b/drivers/gpio/gpio-loongson-64bit.c
@@ -7,12 +7,16 @@
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/irqdesc.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/err.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/platform_device.h>
#include <linux/bitops.h>
+#include <linux/reset.h>
#include <asm/types.h>
enum loongson_gpio_mode {
@@ -27,10 +31,18 @@ struct loongson_gpio_chip_data {
unsigned int out_offset;
unsigned int in_offset;
unsigned int inten_offset;
+ unsigned int intpol_offset;
+ unsigned int intedge_offset;
+ unsigned int intclr_offset;
+ unsigned int intsts_offset;
+ unsigned int intdual_offset;
+ unsigned int intr_num;
+ irq_flow_handler_t irq_handler;
+ const struct irq_chip *girqchip;
};
struct loongson_gpio_chip {
- struct gpio_chip chip;
+ struct gpio_generic_chip chip;
spinlock_t lock;
void __iomem *reg_base;
const struct loongson_gpio_chip_data *chip_data;
@@ -38,7 +50,8 @@ struct loongson_gpio_chip {
static inline struct loongson_gpio_chip *to_loongson_gpio_chip(struct gpio_chip *chip)
{
- return container_of(chip, struct loongson_gpio_chip, chip);
+ return container_of(to_gpio_generic_chip(chip),
+ struct loongson_gpio_chip, chip);
}
static inline void loongson_commit_direction(struct loongson_gpio_chip *lgpio, unsigned int pin,
@@ -135,39 +148,185 @@ static int loongson_gpio_to_irq(struct gpio_chip *chip, unsigned int offset)
return platform_get_irq(pdev, offset);
}
-static int loongson_gpio_init(struct device *dev, struct loongson_gpio_chip *lgpio,
+static void loongson_gpio_irq_ack(struct irq_data *data)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct loongson_gpio_chip *lgpio = to_loongson_gpio_chip(chip);
+ irq_hw_number_t hwirq = irqd_to_hwirq(data);
+
+ writeb(0x1, lgpio->reg_base + lgpio->chip_data->intclr_offset + hwirq);
+}
+
+static void loongson_gpio_irq_mask(struct irq_data *data)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct loongson_gpio_chip *lgpio = to_loongson_gpio_chip(chip);
+ irq_hw_number_t hwirq = irqd_to_hwirq(data);
+
+ writeb(0x0, lgpio->reg_base + lgpio->chip_data->inten_offset + hwirq);
+}
+
+static void loongson_gpio_irq_unmask(struct irq_data *data)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct loongson_gpio_chip *lgpio = to_loongson_gpio_chip(chip);
+ irq_hw_number_t hwirq = irqd_to_hwirq(data);
+
+ writeb(0x1, lgpio->reg_base + lgpio->chip_data->inten_offset + hwirq);
+}
+
+static int loongson_gpio_irq_set_type(struct irq_data *data, unsigned int type)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct loongson_gpio_chip *lgpio = to_loongson_gpio_chip(chip);
+ irq_hw_number_t hwirq = irqd_to_hwirq(data);
+ u8 pol = 0, edge = 0, dual = 0;
+
+ if ((type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) {
+ edge = 1;
+ dual = 1;
+ irq_set_handler_locked(data, handle_edge_irq);
+ } else {
+ switch (type) {
+ case IRQ_TYPE_LEVEL_HIGH:
+ pol = 1;
+ fallthrough;
+ case IRQ_TYPE_LEVEL_LOW:
+ irq_set_handler_locked(data, handle_level_irq);
+ break;
+
+ case IRQ_TYPE_EDGE_RISING:
+ pol = 1;
+ fallthrough;
+ case IRQ_TYPE_EDGE_FALLING:
+ edge = 1;
+ irq_set_handler_locked(data, handle_edge_irq);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ }
+
+ writeb(pol, lgpio->reg_base + lgpio->chip_data->intpol_offset + hwirq);
+ writeb(edge, lgpio->reg_base + lgpio->chip_data->intedge_offset + hwirq);
+ writeb(dual, lgpio->reg_base + lgpio->chip_data->intdual_offset + hwirq);
+
+ return 0;
+}
+
+static void loongson_gpio_ls2k0300_irq_handler(struct irq_desc *desc)
+{
+ struct loongson_gpio_chip *lgpio = irq_desc_get_handler_data(desc);
+ struct irq_chip *girqchip = irq_desc_get_chip(desc);
+ int i;
+
+ chained_irq_enter(girqchip, desc);
+
+ for (i = 0; i < lgpio->chip.gc.ngpio; i++) {
+ /*
+ * For the GPIO controller of LS2K0300, interrupts status bits
+ * may be wrongly set even if the corresponding interrupt is
+ * disabled. Thus interrupt enable bits are checked along with
+ * status bits to detect interrupts reliably.
+ */
+ if (readb(lgpio->reg_base + lgpio->chip_data->intsts_offset + i) &&
+ readb(lgpio->reg_base + lgpio->chip_data->inten_offset + i))
+ generic_handle_domain_irq(lgpio->chip.gc.irq.domain, i);
+ }
+
+ chained_irq_exit(girqchip, desc);
+}
+
+static const struct irq_chip loongson_gpio_ls2k0300_irqchip = {
+ .irq_ack = loongson_gpio_irq_ack,
+ .irq_mask = loongson_gpio_irq_mask,
+ .irq_unmask = loongson_gpio_irq_unmask,
+ .irq_set_type = loongson_gpio_irq_set_type,
+ .flags = IRQCHIP_IMMUTABLE | IRQCHIP_SKIP_SET_WAKE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
+static int loongson_gpio_init_irqchip(struct platform_device *pdev,
+ struct loongson_gpio_chip *lgpio)
+{
+ const struct loongson_gpio_chip_data *data = lgpio->chip_data;
+ struct gpio_chip *chip = &lgpio->chip.gc;
+ int i;
+
+ chip->irq.default_type = IRQ_TYPE_NONE;
+ chip->irq.handler = handle_bad_irq;
+ chip->irq.parent_handler = data->irq_handler;
+ chip->irq.parent_handler_data = lgpio;
+ gpio_irq_chip_set_chip(&chip->irq, data->girqchip);
+
+ chip->irq.num_parents = data->intr_num;
+ chip->irq.parents = devm_kcalloc(&pdev->dev, data->intr_num,
+ sizeof(*chip->irq.parents), GFP_KERNEL);
+ if (!chip->parent)
+ return -ENOMEM;
+
+ for (i = 0; i < data->intr_num; i++) {
+ int ret;
+
+ ret = platform_get_irq(pdev, i);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to get IRQ %d\n", i);
+ chip->irq.parents[i] = ret;
+ }
+
+ for (i = 0; i < data->intr_num; i++) {
+ writeb(0x0, lgpio->reg_base + data->inten_offset + i);
+ writeb(0x1, lgpio->reg_base + data->intclr_offset + i);
+ }
+
+ return 0;
+}
+
+static int loongson_gpio_init(struct platform_device *pdev, struct loongson_gpio_chip *lgpio,
void __iomem *reg_base)
{
+ struct gpio_generic_chip_config config;
int ret;
lgpio->reg_base = reg_base;
if (lgpio->chip_data->mode == BIT_CTRL_MODE) {
- ret = bgpio_init(&lgpio->chip, dev, 8,
- lgpio->reg_base + lgpio->chip_data->in_offset,
- lgpio->reg_base + lgpio->chip_data->out_offset,
- NULL, NULL,
- lgpio->reg_base + lgpio->chip_data->conf_offset,
- 0);
+ config = (struct gpio_generic_chip_config) {
+ .dev = &pdev->dev,
+ .sz = 8,
+ .dat = lgpio->reg_base + lgpio->chip_data->in_offset,
+ .set = lgpio->reg_base + lgpio->chip_data->out_offset,
+ .dirin = lgpio->reg_base + lgpio->chip_data->conf_offset,
+ };
+
+ ret = gpio_generic_chip_init(&lgpio->chip, &config);
if (ret) {
- dev_err(dev, "unable to init generic GPIO\n");
+ dev_err(&pdev->dev, "unable to init generic GPIO\n");
return ret;
}
} else {
- lgpio->chip.direction_input = loongson_gpio_direction_input;
- lgpio->chip.get = loongson_gpio_get;
- lgpio->chip.get_direction = loongson_gpio_get_direction;
- lgpio->chip.direction_output = loongson_gpio_direction_output;
- lgpio->chip.set_rv = loongson_gpio_set;
- lgpio->chip.parent = dev;
+ lgpio->chip.gc.direction_input = loongson_gpio_direction_input;
+ lgpio->chip.gc.get = loongson_gpio_get;
+ lgpio->chip.gc.get_direction = loongson_gpio_get_direction;
+ lgpio->chip.gc.direction_output = loongson_gpio_direction_output;
+ lgpio->chip.gc.set = loongson_gpio_set;
+ lgpio->chip.gc.parent = &pdev->dev;
+ lgpio->chip.gc.base = -1;
spin_lock_init(&lgpio->lock);
}
- lgpio->chip.label = lgpio->chip_data->label;
- lgpio->chip.can_sleep = false;
- if (lgpio->chip_data->inten_offset)
- lgpio->chip.to_irq = loongson_gpio_to_irq;
+ lgpio->chip.gc.label = lgpio->chip_data->label;
+ lgpio->chip.gc.can_sleep = false;
+ if (lgpio->chip_data->girqchip) {
+ ret = loongson_gpio_init_irqchip(pdev, lgpio);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "failed to initialize irqchip\n");
+ } else if (lgpio->chip_data->inten_offset) {
+ lgpio->chip.gc.to_irq = loongson_gpio_to_irq;
+ }
- return devm_gpiochip_add_data(dev, &lgpio->chip, lgpio);
+ return devm_gpiochip_add_data(&pdev->dev, &lgpio->chip.gc, lgpio);
}
static int loongson_gpio_probe(struct platform_device *pdev)
@@ -175,6 +334,7 @@ static int loongson_gpio_probe(struct platform_device *pdev)
void __iomem *reg_base;
struct loongson_gpio_chip *lgpio;
struct device *dev = &pdev->dev;
+ struct reset_control *rst;
lgpio = devm_kzalloc(dev, sizeof(*lgpio), GFP_KERNEL);
if (!lgpio)
@@ -186,7 +346,11 @@ static int loongson_gpio_probe(struct platform_device *pdev)
if (IS_ERR(reg_base))
return PTR_ERR(reg_base);
- return loongson_gpio_init(dev, lgpio, reg_base);
+ rst = devm_reset_control_get_optional_exclusive_deasserted(&pdev->dev, NULL);
+ if (IS_ERR(rst))
+ return dev_err_probe(&pdev->dev, PTR_ERR(rst), "failed to get reset control\n");
+
+ return loongson_gpio_init(pdev, lgpio, reg_base);
}
static const struct loongson_gpio_chip_data loongson_gpio_ls2k_data = {
@@ -198,6 +362,23 @@ static const struct loongson_gpio_chip_data loongson_gpio_ls2k_data = {
.inten_offset = 0x30,
};
+static const struct loongson_gpio_chip_data loongson_gpio_ls2k0300_data = {
+ .label = "ls2k0300_gpio",
+ .mode = BYTE_CTRL_MODE,
+ .conf_offset = 0x800,
+ .in_offset = 0xa00,
+ .out_offset = 0x900,
+ .inten_offset = 0xb00,
+ .intpol_offset = 0xc00,
+ .intedge_offset = 0xd00,
+ .intclr_offset = 0xe00,
+ .intsts_offset = 0xf00,
+ .intdual_offset = 0xf80,
+ .intr_num = 7,
+ .irq_handler = loongson_gpio_ls2k0300_irq_handler,
+ .girqchip = &loongson_gpio_ls2k0300_irqchip,
+};
+
static const struct loongson_gpio_chip_data loongson_gpio_ls2k0500_data0 = {
.label = "ls2k0500_gpio",
.mode = BIT_CTRL_MODE,
@@ -222,14 +403,16 @@ static const struct loongson_gpio_chip_data loongson_gpio_ls2k2000_data0 = {
.conf_offset = 0x0,
.in_offset = 0xc,
.out_offset = 0x8,
+ .inten_offset = 0x14,
};
static const struct loongson_gpio_chip_data loongson_gpio_ls2k2000_data1 = {
.label = "ls2k2000_gpio",
- .mode = BIT_CTRL_MODE,
- .conf_offset = 0x0,
- .in_offset = 0x20,
- .out_offset = 0x10,
+ .mode = BYTE_CTRL_MODE,
+ .conf_offset = 0x800,
+ .in_offset = 0xa00,
+ .out_offset = 0x900,
+ .inten_offset = 0xb00,
};
static const struct loongson_gpio_chip_data loongson_gpio_ls2k2000_data2 = {
@@ -246,6 +429,7 @@ static const struct loongson_gpio_chip_data loongson_gpio_ls3a5000_data = {
.conf_offset = 0x0,
.in_offset = 0xc,
.out_offset = 0x8,
+ .inten_offset = 0x14,
};
static const struct loongson_gpio_chip_data loongson_gpio_ls7a_data = {
@@ -254,6 +438,7 @@ static const struct loongson_gpio_chip_data loongson_gpio_ls7a_data = {
.conf_offset = 0x800,
.in_offset = 0xa00,
.out_offset = 0x900,
+ .inten_offset = 0xb00,
};
/* LS7A2000 chipset GPIO */
@@ -263,12 +448,13 @@ static const struct loongson_gpio_chip_data loongson_gpio_ls7a2000_data0 = {
.conf_offset = 0x800,
.in_offset = 0xa00,
.out_offset = 0x900,
+ .inten_offset = 0xb00,
};
/* LS7A2000 ACPI GPIO */
static const struct loongson_gpio_chip_data loongson_gpio_ls7a2000_data1 = {
.label = "ls7a2000_gpio",
- .mode = BYTE_CTRL_MODE,
+ .mode = BIT_CTRL_MODE,
.conf_offset = 0x4,
.in_offset = 0x8,
.out_offset = 0x0,
@@ -281,6 +467,7 @@ static const struct loongson_gpio_chip_data loongson_gpio_ls3a6000_data = {
.conf_offset = 0x0,
.in_offset = 0xc,
.out_offset = 0x8,
+ .inten_offset = 0x14,
};
static const struct of_device_id loongson_gpio_of_match[] = {
@@ -289,6 +476,10 @@ static const struct of_device_id loongson_gpio_of_match[] = {
.data = &loongson_gpio_ls2k_data,
},
{
+ .compatible = "loongson,ls2k0300-gpio",
+ .data = &loongson_gpio_ls2k0300_data,
+ },
+ {
.compatible = "loongson,ls2k0500-gpio0",
.data = &loongson_gpio_ls2k0500_data0,
},
diff --git a/drivers/gpio/gpio-loongson.c b/drivers/gpio/gpio-loongson.c
index 8f3668169ebf..f3e0559f969d 100644
--- a/drivers/gpio/gpio-loongson.c
+++ b/drivers/gpio/gpio-loongson.c
@@ -106,7 +106,7 @@ static int loongson_gpio_probe(struct platform_device *pdev)
gc->base = 0;
gc->ngpio = LOONGSON_N_GPIO;
gc->get = loongson_gpio_get_value;
- gc->set_rv = loongson_gpio_set_value;
+ gc->set = loongson_gpio_set_value;
gc->direction_input = loongson_gpio_direction_input;
gc->direction_output = loongson_gpio_direction_output;
diff --git a/drivers/gpio/gpio-loongson1.c b/drivers/gpio/gpio-loongson1.c
index 6ca3b969db4d..9750a7a17508 100644
--- a/drivers/gpio/gpio-loongson1.c
+++ b/drivers/gpio/gpio-loongson1.c
@@ -5,10 +5,11 @@
* Copyright (C) 2015-2023 Keguang Zhang <keguang.zhang@gmail.com>
*/
+#include <linux/bitops.h>
#include <linux/module.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/platform_device.h>
-#include <linux/bitops.h>
/* Loongson 1 GPIO Register Definitions */
#define GPIO_CFG 0x0
@@ -17,19 +18,18 @@
#define GPIO_OUTPUT 0x30
struct ls1x_gpio_chip {
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
void __iomem *reg_base;
};
static int ls1x_gpio_request(struct gpio_chip *gc, unsigned int offset)
{
struct ls1x_gpio_chip *ls1x_gc = gpiochip_get_data(gc);
- unsigned long flags;
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(&ls1x_gc->chip);
+
__raw_writel(__raw_readl(ls1x_gc->reg_base + GPIO_CFG) | BIT(offset),
ls1x_gc->reg_base + GPIO_CFG);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
return 0;
}
@@ -37,16 +37,16 @@ static int ls1x_gpio_request(struct gpio_chip *gc, unsigned int offset)
static void ls1x_gpio_free(struct gpio_chip *gc, unsigned int offset)
{
struct ls1x_gpio_chip *ls1x_gc = gpiochip_get_data(gc);
- unsigned long flags;
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(&ls1x_gc->chip);
+
__raw_writel(__raw_readl(ls1x_gc->reg_base + GPIO_CFG) & ~BIT(offset),
ls1x_gc->reg_base + GPIO_CFG);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
}
static int ls1x_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct device *dev = &pdev->dev;
struct ls1x_gpio_chip *ls1x_gc;
int ret;
@@ -59,29 +59,35 @@ static int ls1x_gpio_probe(struct platform_device *pdev)
if (IS_ERR(ls1x_gc->reg_base))
return PTR_ERR(ls1x_gc->reg_base);
- ret = bgpio_init(&ls1x_gc->gc, dev, 4, ls1x_gc->reg_base + GPIO_DATA,
- ls1x_gc->reg_base + GPIO_OUTPUT, NULL,
- NULL, ls1x_gc->reg_base + GPIO_DIR, 0);
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = ls1x_gc->reg_base + GPIO_DATA,
+ .set = ls1x_gc->reg_base + GPIO_OUTPUT,
+ .dirin = ls1x_gc->reg_base + GPIO_DIR,
+ };
+
+ ret = gpio_generic_chip_init(&ls1x_gc->chip, &config);
if (ret)
goto err;
- ls1x_gc->gc.owner = THIS_MODULE;
- ls1x_gc->gc.request = ls1x_gpio_request;
- ls1x_gc->gc.free = ls1x_gpio_free;
+ ls1x_gc->chip.gc.owner = THIS_MODULE;
+ ls1x_gc->chip.gc.request = ls1x_gpio_request;
+ ls1x_gc->chip.gc.free = ls1x_gpio_free;
/*
* Clear ngpio to let gpiolib get the correct number
* by reading ngpios property
*/
- ls1x_gc->gc.ngpio = 0;
+ ls1x_gc->chip.gc.ngpio = 0;
- ret = devm_gpiochip_add_data(dev, &ls1x_gc->gc, ls1x_gc);
+ ret = devm_gpiochip_add_data(dev, &ls1x_gc->chip.gc, ls1x_gc);
if (ret)
goto err;
platform_set_drvdata(pdev, ls1x_gc);
dev_info(dev, "GPIO controller registered with %d pins\n",
- ls1x_gc->gc.ngpio);
+ ls1x_gc->chip.gc.ngpio);
return 0;
err:
diff --git a/drivers/gpio/gpio-lp3943.c b/drivers/gpio/gpio-lp3943.c
index 52ab3ac4844c..e8e00daff7df 100644
--- a/drivers/gpio/gpio-lp3943.c
+++ b/drivers/gpio/gpio-lp3943.c
@@ -184,7 +184,7 @@ static const struct gpio_chip lp3943_gpio_chip = {
.direction_input = lp3943_gpio_direction_input,
.get = lp3943_gpio_get,
.direction_output = lp3943_gpio_direction_output,
- .set_rv = lp3943_gpio_set,
+ .set = lp3943_gpio_set,
.base = -1,
.ngpio = LP3943_MAX_GPIO,
.can_sleep = 1,
diff --git a/drivers/gpio/gpio-lp873x.c b/drivers/gpio/gpio-lp873x.c
index 1908ed302e92..5376708a81bf 100644
--- a/drivers/gpio/gpio-lp873x.c
+++ b/drivers/gpio/gpio-lp873x.c
@@ -124,7 +124,7 @@ static const struct gpio_chip template_chip = {
.direction_input = lp873x_gpio_direction_input,
.direction_output = lp873x_gpio_direction_output,
.get = lp873x_gpio_get,
- .set_rv = lp873x_gpio_set,
+ .set = lp873x_gpio_set,
.set_config = lp873x_gpio_set_config,
.base = -1,
.ngpio = 2,
diff --git a/drivers/gpio/gpio-lp87565.c b/drivers/gpio/gpio-lp87565.c
index 8ea687d5d028..0f337c1283b2 100644
--- a/drivers/gpio/gpio-lp87565.c
+++ b/drivers/gpio/gpio-lp87565.c
@@ -139,7 +139,7 @@ static const struct gpio_chip template_chip = {
.direction_input = lp87565_gpio_direction_input,
.direction_output = lp87565_gpio_direction_output,
.get = lp87565_gpio_get,
- .set_rv = lp87565_gpio_set,
+ .set = lp87565_gpio_set,
.set_config = lp87565_gpio_set_config,
.base = -1,
.ngpio = 3,
diff --git a/drivers/gpio/gpio-lpc18xx.c b/drivers/gpio/gpio-lpc18xx.c
index b0a8da5c058d..37a2342eb2e6 100644
--- a/drivers/gpio/gpio-lpc18xx.c
+++ b/drivers/gpio/gpio-lpc18xx.c
@@ -249,8 +249,8 @@ static int lpc18xx_gpio_pin_ic_probe(struct lpc18xx_gpio_chip *gc)
raw_spin_lock_init(&ic->lock);
ic->domain = irq_domain_create_hierarchy(parent_domain, 0, NR_LPC18XX_GPIO_PIN_IC_IRQS,
- of_fwnode_handle(dev->of_node),
- &lpc18xx_gpio_pin_ic_domain_ops, ic);
+ dev_fwnode(dev), &lpc18xx_gpio_pin_ic_domain_ops,
+ ic);
if (!ic->domain) {
pr_err("unable to add irq domain\n");
ret = -ENODEV;
@@ -327,7 +327,7 @@ static const struct gpio_chip lpc18xx_chip = {
.free = gpiochip_generic_free,
.direction_input = lpc18xx_gpio_direction_input,
.direction_output = lpc18xx_gpio_direction_output,
- .set_rv = lpc18xx_gpio_set,
+ .set = lpc18xx_gpio_set,
.get = lpc18xx_gpio_get,
.ngpio = LPC18XX_MAX_PORTS * LPC18XX_PINS_PER_PORT,
.owner = THIS_MODULE,
diff --git a/drivers/gpio/gpio-lpc32xx.c b/drivers/gpio/gpio-lpc32xx.c
index 6668b8bd9f1e..37fc54fc7385 100644
--- a/drivers/gpio/gpio-lpc32xx.c
+++ b/drivers/gpio/gpio-lpc32xx.c
@@ -407,7 +407,7 @@ static struct lpc32xx_gpio_chip lpc32xx_gpiochip[] = {
.direction_input = lpc32xx_gpio_dir_input_p012,
.get = lpc32xx_gpio_get_value_p012,
.direction_output = lpc32xx_gpio_dir_output_p012,
- .set_rv = lpc32xx_gpio_set_value_p012,
+ .set = lpc32xx_gpio_set_value_p012,
.request = lpc32xx_gpio_request,
.to_irq = lpc32xx_gpio_to_irq_p01,
.base = LPC32XX_GPIO_P0_GRP,
@@ -423,7 +423,7 @@ static struct lpc32xx_gpio_chip lpc32xx_gpiochip[] = {
.direction_input = lpc32xx_gpio_dir_input_p012,
.get = lpc32xx_gpio_get_value_p012,
.direction_output = lpc32xx_gpio_dir_output_p012,
- .set_rv = lpc32xx_gpio_set_value_p012,
+ .set = lpc32xx_gpio_set_value_p012,
.request = lpc32xx_gpio_request,
.to_irq = lpc32xx_gpio_to_irq_p01,
.base = LPC32XX_GPIO_P1_GRP,
@@ -439,7 +439,7 @@ static struct lpc32xx_gpio_chip lpc32xx_gpiochip[] = {
.direction_input = lpc32xx_gpio_dir_input_p012,
.get = lpc32xx_gpio_get_value_p012,
.direction_output = lpc32xx_gpio_dir_output_p012,
- .set_rv = lpc32xx_gpio_set_value_p012,
+ .set = lpc32xx_gpio_set_value_p012,
.request = lpc32xx_gpio_request,
.base = LPC32XX_GPIO_P2_GRP,
.ngpio = LPC32XX_GPIO_P2_MAX,
@@ -454,7 +454,7 @@ static struct lpc32xx_gpio_chip lpc32xx_gpiochip[] = {
.direction_input = lpc32xx_gpio_dir_input_p3,
.get = lpc32xx_gpio_get_value_p3,
.direction_output = lpc32xx_gpio_dir_output_p3,
- .set_rv = lpc32xx_gpio_set_value_p3,
+ .set = lpc32xx_gpio_set_value_p3,
.request = lpc32xx_gpio_request,
.to_irq = lpc32xx_gpio_to_irq_gpio_p3,
.base = LPC32XX_GPIO_P3_GRP,
@@ -482,7 +482,7 @@ static struct lpc32xx_gpio_chip lpc32xx_gpiochip[] = {
.chip = {
.label = "gpo_p3",
.direction_output = lpc32xx_gpio_dir_out_always,
- .set_rv = lpc32xx_gpo_set_value,
+ .set = lpc32xx_gpo_set_value,
.get = lpc32xx_gpo_get_value,
.request = lpc32xx_gpio_request,
.base = LPC32XX_GPO_P3_GRP,
diff --git a/drivers/gpio/gpio-macsmc.c b/drivers/gpio/gpio-macsmc.c
new file mode 100644
index 000000000000..30ef258e7655
--- /dev/null
+++ b/drivers/gpio/gpio-macsmc.c
@@ -0,0 +1,292 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Apple SMC GPIO driver
+ * Copyright The Asahi Linux Contributors
+ *
+ * This driver implements basic SMC PMU GPIO support that can read inputs
+ * and write outputs. Mode changes and IRQ config are not yet implemented.
+ */
+
+#include <linux/bitmap.h>
+#include <linux/device.h>
+#include <linux/gpio/driver.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/macsmc.h>
+
+#define MAX_GPIO 64
+
+/*
+ * Commands 0-6 are, presumably, the intended API.
+ * Command 0xff lets you get/set the pin configuration in detail directly,
+ * but the bit meanings seem not to be stable between devices/PMU hardware
+ * versions.
+ *
+ * We're going to try to make do with the low commands for now.
+ * We don't implement pin mode changes at this time.
+ */
+
+#define CMD_ACTION (0 << 24)
+#define CMD_OUTPUT (1 << 24)
+#define CMD_INPUT (2 << 24)
+#define CMD_PINMODE (3 << 24)
+#define CMD_IRQ_ENABLE (4 << 24)
+#define CMD_IRQ_ACK (5 << 24)
+#define CMD_IRQ_MODE (6 << 24)
+#define CMD_CONFIG (0xff << 24)
+
+#define MODE_INPUT 0
+#define MODE_OUTPUT 1
+#define MODE_VALUE_0 0
+#define MODE_VALUE_1 2
+
+#define IRQ_MODE_HIGH 0
+#define IRQ_MODE_LOW 1
+#define IRQ_MODE_RISING 2
+#define IRQ_MODE_FALLING 3
+#define IRQ_MODE_BOTH 4
+
+#define CONFIG_MASK GENMASK(23, 16)
+#define CONFIG_VAL GENMASK(7, 0)
+
+#define CONFIG_OUTMODE GENMASK(7, 6)
+#define CONFIG_IRQMODE GENMASK(5, 3)
+#define CONFIG_PULLDOWN BIT(2)
+#define CONFIG_PULLUP BIT(1)
+#define CONFIG_OUTVAL BIT(0)
+
+/*
+ * Output modes seem to differ depending on the PMU in use... ?
+ * j274 / M1 (Sera PMU):
+ * 0 = input
+ * 1 = output
+ * 2 = open drain
+ * 3 = disable
+ * j314 / M1Pro (Maverick PMU):
+ * 0 = input
+ * 1 = open drain
+ * 2 = output
+ * 3 = ?
+ */
+
+struct macsmc_gpio {
+ struct device *dev;
+ struct apple_smc *smc;
+ struct gpio_chip gc;
+
+ int first_index;
+};
+
+static int macsmc_gpio_nr(smc_key key)
+{
+ int low = hex_to_bin(key & 0xff);
+ int high = hex_to_bin((key >> 8) & 0xff);
+
+ if (low < 0 || high < 0)
+ return -1;
+
+ return low | (high << 4);
+}
+
+static int macsmc_gpio_key(unsigned int offset)
+{
+ return _SMC_KEY("gP\0\0") | hex_asc_hi(offset) << 8 | hex_asc_lo(offset);
+}
+
+static int macsmc_gpio_find_first_gpio_index(struct macsmc_gpio *smcgp)
+{
+ struct apple_smc *smc = smcgp->smc;
+ smc_key key = macsmc_gpio_key(0);
+ smc_key first_key, last_key;
+ int start, count, ret;
+
+ /* Return early if the key is out of bounds */
+ ret = apple_smc_get_key_by_index(smc, 0, &first_key);
+ if (ret)
+ return ret;
+ if (key <= first_key)
+ return -ENODEV;
+
+ ret = apple_smc_get_key_by_index(smc, smc->key_count - 1, &last_key);
+ if (ret)
+ return ret;
+ if (key > last_key)
+ return -ENODEV;
+
+ /* Binary search to find index of first SMC key bigger or equal to key */
+ start = 0;
+ count = smc->key_count;
+ while (count > 1) {
+ smc_key pkey;
+ int pivot = start + ((count - 1) >> 1);
+
+ ret = apple_smc_get_key_by_index(smc, pivot, &pkey);
+ if (ret < 0)
+ return ret;
+
+ if (pkey == key)
+ return pivot;
+
+ pivot++;
+
+ if (pkey < key) {
+ count -= pivot - start;
+ start = pivot;
+ } else {
+ count = pivot - start;
+ }
+ }
+
+ return start;
+}
+
+static int macsmc_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
+{
+ struct macsmc_gpio *smcgp = gpiochip_get_data(gc);
+ smc_key key = macsmc_gpio_key(offset);
+ u32 val;
+ int ret;
+
+ /* First try reading the explicit pin mode register */
+ ret = apple_smc_rw_u32(smcgp->smc, key, CMD_PINMODE, &val);
+ if (!ret)
+ return (val & MODE_OUTPUT) ? GPIO_LINE_DIRECTION_OUT : GPIO_LINE_DIRECTION_IN;
+
+ /*
+ * Less common IRQ configs cause CMD_PINMODE to fail, and so does open drain mode.
+ * Fall back to reading IRQ mode, which will only succeed for inputs.
+ */
+ ret = apple_smc_rw_u32(smcgp->smc, key, CMD_IRQ_MODE, &val);
+ return ret ? GPIO_LINE_DIRECTION_OUT : GPIO_LINE_DIRECTION_IN;
+}
+
+static int macsmc_gpio_get(struct gpio_chip *gc, unsigned int offset)
+{
+ struct macsmc_gpio *smcgp = gpiochip_get_data(gc);
+ smc_key key = macsmc_gpio_key(offset);
+ u32 cmd, val;
+ int ret;
+
+ ret = macsmc_gpio_get_direction(gc, offset);
+ if (ret < 0)
+ return ret;
+
+ if (ret == GPIO_LINE_DIRECTION_OUT)
+ cmd = CMD_OUTPUT;
+ else
+ cmd = CMD_INPUT;
+
+ ret = apple_smc_rw_u32(smcgp->smc, key, cmd, &val);
+ if (ret < 0)
+ return ret;
+
+ return val ? 1 : 0;
+}
+
+static int macsmc_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
+{
+ struct macsmc_gpio *smcgp = gpiochip_get_data(gc);
+ smc_key key = macsmc_gpio_key(offset);
+ int ret;
+
+ value |= CMD_OUTPUT;
+ ret = apple_smc_write_u32(smcgp->smc, key, CMD_OUTPUT | value);
+ if (ret < 0)
+ dev_err(smcgp->dev, "GPIO set failed %p4ch = 0x%x\n",
+ &key, value);
+
+ return ret;
+}
+
+static int macsmc_gpio_init_valid_mask(struct gpio_chip *gc,
+ unsigned long *valid_mask, unsigned int ngpios)
+{
+ struct macsmc_gpio *smcgp = gpiochip_get_data(gc);
+ int count;
+ int i;
+
+ count = min(smcgp->smc->key_count, MAX_GPIO);
+
+ bitmap_zero(valid_mask, ngpios);
+
+ for (i = 0; i < count; i++) {
+ int ret, gpio_nr;
+ smc_key key;
+
+ ret = apple_smc_get_key_by_index(smcgp->smc, smcgp->first_index + i, &key);
+ if (ret < 0)
+ return ret;
+
+ if (key > SMC_KEY(gPff))
+ break;
+
+ gpio_nr = macsmc_gpio_nr(key);
+ if (gpio_nr < 0 || gpio_nr > MAX_GPIO) {
+ dev_err(smcgp->dev, "Bad GPIO key %p4ch\n", &key);
+ continue;
+ }
+
+ set_bit(gpio_nr, valid_mask);
+ }
+
+ return 0;
+}
+
+static int macsmc_gpio_probe(struct platform_device *pdev)
+{
+ struct macsmc_gpio *smcgp;
+ struct apple_smc *smc = dev_get_drvdata(pdev->dev.parent);
+ smc_key key;
+ int ret;
+
+ smcgp = devm_kzalloc(&pdev->dev, sizeof(*smcgp), GFP_KERNEL);
+ if (!smcgp)
+ return -ENOMEM;
+
+ smcgp->dev = &pdev->dev;
+ smcgp->smc = smc;
+
+ smcgp->first_index = macsmc_gpio_find_first_gpio_index(smcgp);
+ if (smcgp->first_index < 0)
+ return smcgp->first_index;
+
+ ret = apple_smc_get_key_by_index(smc, smcgp->first_index, &key);
+ if (ret < 0)
+ return ret;
+
+ if (key > macsmc_gpio_key(MAX_GPIO - 1))
+ return -ENODEV;
+
+ dev_info(smcgp->dev, "First GPIO key: %p4ch\n", &key);
+
+ smcgp->gc.label = "macsmc-pmu-gpio";
+ smcgp->gc.owner = THIS_MODULE;
+ smcgp->gc.get = macsmc_gpio_get;
+ smcgp->gc.set = macsmc_gpio_set;
+ smcgp->gc.get_direction = macsmc_gpio_get_direction;
+ smcgp->gc.init_valid_mask = macsmc_gpio_init_valid_mask;
+ smcgp->gc.can_sleep = true;
+ smcgp->gc.ngpio = MAX_GPIO;
+ smcgp->gc.base = -1;
+ smcgp->gc.parent = &pdev->dev;
+
+ return devm_gpiochip_add_data(&pdev->dev, &smcgp->gc, smcgp);
+}
+
+static const struct of_device_id macsmc_gpio_of_table[] = {
+ { .compatible = "apple,smc-gpio", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, macsmc_gpio_of_table);
+
+static struct platform_driver macsmc_gpio_driver = {
+ .driver = {
+ .name = "macsmc-gpio",
+ .of_match_table = macsmc_gpio_of_table,
+ },
+ .probe = macsmc_gpio_probe,
+};
+module_platform_driver(macsmc_gpio_driver);
+
+MODULE_AUTHOR("Hector Martin <marcan@marcan.st>");
+MODULE_LICENSE("Dual MIT/GPL");
+MODULE_DESCRIPTION("Apple SMC GPIO driver");
diff --git a/drivers/gpio/gpio-madera.c b/drivers/gpio/gpio-madera.c
index e73e72d62bc8..551faf9655b2 100644
--- a/drivers/gpio/gpio-madera.c
+++ b/drivers/gpio/gpio-madera.c
@@ -109,7 +109,7 @@ static const struct gpio_chip madera_gpio_chip = {
.direction_input = madera_gpio_direction_in,
.get = madera_gpio_get,
.direction_output = madera_gpio_direction_out,
- .set_rv = madera_gpio_set,
+ .set = madera_gpio_set,
.set_config = gpiochip_generic_config,
.can_sleep = true,
};
diff --git a/drivers/gpio/gpio-max730x.c b/drivers/gpio/gpio-max730x.c
index 75d414d8c992..84c7c2dca822 100644
--- a/drivers/gpio/gpio-max730x.c
+++ b/drivers/gpio/gpio-max730x.c
@@ -188,7 +188,7 @@ int __max730x_probe(struct max7301 *ts)
ts->chip.direction_input = max7301_direction_input;
ts->chip.get = max7301_get;
ts->chip.direction_output = max7301_direction_output;
- ts->chip.set_rv = max7301_set;
+ ts->chip.set = max7301_set;
ts->chip.ngpio = PIN_NUMBER;
ts->chip.can_sleep = true;
diff --git a/drivers/gpio/gpio-max732x.c b/drivers/gpio/gpio-max732x.c
index d5ffedb086af..a61d670ceeda 100644
--- a/drivers/gpio/gpio-max732x.c
+++ b/drivers/gpio/gpio-max732x.c
@@ -585,8 +585,8 @@ static int max732x_setup_gpio(struct max732x_chip *chip,
gc->direction_input = max732x_gpio_direction_input;
if (chip->dir_output) {
gc->direction_output = max732x_gpio_direction_output;
- gc->set_rv = max732x_gpio_set_value;
- gc->set_multiple_rv = max732x_gpio_set_multiple;
+ gc->set = max732x_gpio_set_value;
+ gc->set_multiple = max732x_gpio_set_multiple;
}
gc->get = max732x_gpio_get_value;
gc->can_sleep = true;
diff --git a/drivers/gpio/gpio-max7360.c b/drivers/gpio/gpio-max7360.c
new file mode 100644
index 000000000000..db92a43776a9
--- /dev/null
+++ b/drivers/gpio/gpio-max7360.c
@@ -0,0 +1,257 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2025 Bootlin
+ *
+ * Author: Kamel BOUHARA <kamel.bouhara@bootlin.com>
+ * Author: Mathieu Dubois-Briand <mathieu.dubois-briand@bootlin.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitmap.h>
+#include <linux/err.h>
+#include <linux/gpio/driver.h>
+#include <linux/gpio/regmap.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/max7360.h>
+#include <linux/minmax.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+
+#define MAX7360_GPIO_PORT 1
+#define MAX7360_GPIO_COL 2
+
+struct max7360_gpio_plat_data {
+ unsigned int function;
+};
+
+static struct max7360_gpio_plat_data max7360_gpio_port_plat = { .function = MAX7360_GPIO_PORT };
+static struct max7360_gpio_plat_data max7360_gpio_col_plat = { .function = MAX7360_GPIO_COL };
+
+static int max7360_get_available_gpos(struct device *dev, unsigned int *available_gpios)
+{
+ u32 columns;
+ int ret;
+
+ ret = device_property_read_u32(dev->parent, "keypad,num-columns", &columns);
+ if (ret) {
+ dev_err(dev, "Failed to read columns count\n");
+ return ret;
+ }
+
+ *available_gpios = min(MAX7360_MAX_GPO, MAX7360_MAX_KEY_COLS - columns);
+
+ return 0;
+}
+
+static int max7360_gpo_init_valid_mask(struct gpio_chip *gc,
+ unsigned long *valid_mask,
+ unsigned int ngpios)
+{
+ unsigned int available_gpios;
+ int ret;
+
+ ret = max7360_get_available_gpos(gc->parent, &available_gpios);
+ if (ret)
+ return ret;
+
+ bitmap_clear(valid_mask, 0, MAX7360_MAX_KEY_COLS - available_gpios);
+
+ return 0;
+}
+
+static int max7360_set_gpos_count(struct device *dev, struct regmap *regmap)
+{
+ /*
+ * MAX7360 COL0 to COL7 pins can be used either as keypad columns,
+ * general purpose output or a mix of both.
+ * By default, all pins are used as keypad, here we update this
+ * configuration to allow to use some of them as GPIOs.
+ */
+ unsigned int available_gpios;
+ unsigned int val;
+ int ret;
+
+ ret = max7360_get_available_gpos(dev, &available_gpios);
+ if (ret)
+ return ret;
+
+ /*
+ * Configure which GPIOs will be used for keypad.
+ * MAX7360_REG_DEBOUNCE contains configuration both for keypad debounce
+ * timings and gpos/keypad columns repartition. Only the later is
+ * modified here.
+ */
+ val = FIELD_PREP(MAX7360_PORTS, available_gpios);
+ ret = regmap_write_bits(regmap, MAX7360_REG_DEBOUNCE, MAX7360_PORTS, val);
+ if (ret)
+ dev_err(dev, "Failed to write max7360 columns/gpos configuration");
+
+ return ret;
+}
+
+static int max7360_gpio_reg_mask_xlate(struct gpio_regmap *gpio,
+ unsigned int base, unsigned int offset,
+ unsigned int *reg, unsigned int *mask)
+{
+ if (base == MAX7360_REG_PWMBASE) {
+ /*
+ * GPIO output is using PWM duty cycle registers: one register
+ * per line, with value being either 0 or 255.
+ */
+ *reg = base + offset;
+ *mask = GENMASK(7, 0);
+ } else {
+ *reg = base;
+ *mask = BIT(offset);
+ }
+
+ return 0;
+}
+
+static const struct regmap_irq max7360_regmap_irqs[MAX7360_MAX_GPIO] = {
+ REGMAP_IRQ_REG(0, 0, BIT(0)),
+ REGMAP_IRQ_REG(1, 0, BIT(1)),
+ REGMAP_IRQ_REG(2, 0, BIT(2)),
+ REGMAP_IRQ_REG(3, 0, BIT(3)),
+ REGMAP_IRQ_REG(4, 0, BIT(4)),
+ REGMAP_IRQ_REG(5, 0, BIT(5)),
+ REGMAP_IRQ_REG(6, 0, BIT(6)),
+ REGMAP_IRQ_REG(7, 0, BIT(7)),
+};
+
+static int max7360_handle_mask_sync(const int index,
+ const unsigned int mask_buf_def,
+ const unsigned int mask_buf,
+ void *const irq_drv_data)
+{
+ struct regmap *regmap = irq_drv_data;
+ int ret;
+
+ for (unsigned int i = 0; i < MAX7360_MAX_GPIO; i++) {
+ ret = regmap_assign_bits(regmap, MAX7360_REG_PWMCFG(i),
+ MAX7360_PORT_CFG_INTERRUPT_MASK, mask_buf & BIT(i));
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int max7360_gpio_probe(struct platform_device *pdev)
+{
+ const struct max7360_gpio_plat_data *plat_data;
+ struct gpio_regmap_config gpio_config = { };
+ struct regmap_irq_chip *irq_chip;
+ struct device *dev = &pdev->dev;
+ struct regmap *regmap;
+ unsigned int outconf;
+ int ret;
+
+ regmap = dev_get_regmap(dev->parent, NULL);
+ if (!regmap)
+ return dev_err_probe(dev, -ENODEV, "could not get parent regmap\n");
+
+ plat_data = device_get_match_data(dev);
+ if (plat_data->function == MAX7360_GPIO_PORT) {
+ if (device_property_read_bool(dev, "interrupt-controller")) {
+ /*
+ * Port GPIOs with interrupt-controller property: add IRQ
+ * controller.
+ */
+ gpio_config.regmap_irq_flags = IRQF_ONESHOT | IRQF_SHARED;
+ gpio_config.regmap_irq_line =
+ fwnode_irq_get_byname(dev_fwnode(dev->parent), "inti");
+ if (gpio_config.regmap_irq_line < 0)
+ return dev_err_probe(dev, gpio_config.regmap_irq_line,
+ "Failed to get IRQ\n");
+
+ /* Create custom IRQ configuration. */
+ irq_chip = devm_kzalloc(dev, sizeof(*irq_chip), GFP_KERNEL);
+ gpio_config.regmap_irq_chip = irq_chip;
+ if (!irq_chip)
+ return -ENOMEM;
+
+ irq_chip->name = dev_name(dev);
+ irq_chip->status_base = MAX7360_REG_GPIOIN;
+ irq_chip->status_is_level = true;
+ irq_chip->num_regs = 1;
+ irq_chip->num_irqs = MAX7360_MAX_GPIO;
+ irq_chip->irqs = max7360_regmap_irqs;
+ irq_chip->handle_mask_sync = max7360_handle_mask_sync;
+ irq_chip->irq_drv_data = regmap;
+
+ for (unsigned int i = 0; i < MAX7360_MAX_GPIO; i++) {
+ ret = regmap_write_bits(regmap, MAX7360_REG_PWMCFG(i),
+ MAX7360_PORT_CFG_INTERRUPT_EDGES,
+ MAX7360_PORT_CFG_INTERRUPT_EDGES);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to enable interrupts\n");
+ }
+ }
+
+ /*
+ * Port GPIOs: set output mode configuration (constant-current or not).
+ * This property is optional.
+ */
+ ret = device_property_read_u32(dev, "maxim,constant-current-disable", &outconf);
+ if (!ret) {
+ ret = regmap_write(regmap, MAX7360_REG_GPIOOUTM, outconf);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to set constant-current configuration\n");
+ }
+ }
+
+ /* Add gpio device. */
+ gpio_config.parent = dev;
+ gpio_config.regmap = regmap;
+ if (plat_data->function == MAX7360_GPIO_PORT) {
+ gpio_config.ngpio = MAX7360_MAX_GPIO;
+ gpio_config.reg_dat_base = GPIO_REGMAP_ADDR(MAX7360_REG_GPIOIN);
+ gpio_config.reg_set_base = GPIO_REGMAP_ADDR(MAX7360_REG_PWMBASE);
+ gpio_config.reg_dir_out_base = GPIO_REGMAP_ADDR(MAX7360_REG_GPIOCTRL);
+ gpio_config.ngpio_per_reg = MAX7360_MAX_GPIO;
+ gpio_config.reg_mask_xlate = max7360_gpio_reg_mask_xlate;
+ } else {
+ ret = max7360_set_gpos_count(dev, regmap);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to set GPOS pin count\n");
+
+ gpio_config.reg_set_base = GPIO_REGMAP_ADDR(MAX7360_REG_PORTS);
+ gpio_config.ngpio = MAX7360_MAX_KEY_COLS;
+ gpio_config.init_valid_mask = max7360_gpo_init_valid_mask;
+ }
+
+ return PTR_ERR_OR_ZERO(devm_gpio_regmap_register(dev, &gpio_config));
+}
+
+static const struct of_device_id max7360_gpio_of_match[] = {
+ {
+ .compatible = "maxim,max7360-gpo",
+ .data = &max7360_gpio_col_plat
+ }, {
+ .compatible = "maxim,max7360-gpio",
+ .data = &max7360_gpio_port_plat
+ }, {
+ }
+};
+MODULE_DEVICE_TABLE(of, max7360_gpio_of_match);
+
+static struct platform_driver max7360_gpio_driver = {
+ .driver = {
+ .name = "max7360-gpio",
+ .of_match_table = max7360_gpio_of_match,
+ },
+ .probe = max7360_gpio_probe,
+};
+module_platform_driver(max7360_gpio_driver);
+
+MODULE_DESCRIPTION("MAX7360 GPIO driver");
+MODULE_AUTHOR("Kamel BOUHARA <kamel.bouhara@bootlin.com>");
+MODULE_AUTHOR("Mathieu Dubois-Briand <mathieu.dubois-briand@bootlin.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-max77620.c b/drivers/gpio/gpio-max77620.c
index af7af8e40afe..02eca400b307 100644
--- a/drivers/gpio/gpio-max77620.c
+++ b/drivers/gpio/gpio-max77620.c
@@ -311,7 +311,7 @@ static int max77620_gpio_probe(struct platform_device *pdev)
mgpio->gpio_chip.direction_input = max77620_gpio_dir_input;
mgpio->gpio_chip.get = max77620_gpio_get;
mgpio->gpio_chip.direction_output = max77620_gpio_dir_output;
- mgpio->gpio_chip.set_rv = max77620_gpio_set;
+ mgpio->gpio_chip.set = max77620_gpio_set;
mgpio->gpio_chip.set_config = max77620_gpio_set_config;
mgpio->gpio_chip.ngpio = MAX77620_GPIO_NR;
mgpio->gpio_chip.can_sleep = 1;
diff --git a/drivers/gpio/gpio-max77650.c b/drivers/gpio/gpio-max77650.c
index a553e141059f..4540da4c1418 100644
--- a/drivers/gpio/gpio-max77650.c
+++ b/drivers/gpio/gpio-max77650.c
@@ -166,7 +166,7 @@ static int max77650_gpio_probe(struct platform_device *pdev)
chip->gc.direction_input = max77650_gpio_direction_input;
chip->gc.direction_output = max77650_gpio_direction_output;
- chip->gc.set_rv = max77650_gpio_set_value;
+ chip->gc.set = max77650_gpio_set_value;
chip->gc.get = max77650_gpio_get_value;
chip->gc.get_direction = max77650_gpio_get_direction;
chip->gc.set_config = max77650_gpio_set_config;
diff --git a/drivers/gpio/gpio-max77759.c b/drivers/gpio/gpio-max77759.c
index 7fe8e6f697d0..5e48eb03e7b3 100644
--- a/drivers/gpio/gpio-max77759.c
+++ b/drivers/gpio/gpio-max77759.c
@@ -469,7 +469,7 @@ static int max77759_gpio_probe(struct platform_device *pdev)
chip->gc.direction_input = max77759_gpio_direction_input;
chip->gc.direction_output = max77759_gpio_direction_output;
chip->gc.get = max77759_gpio_get_value;
- chip->gc.set_rv = max77759_gpio_set_value;
+ chip->gc.set = max77759_gpio_set_value;
girq = &chip->gc.irq;
gpio_irq_chip_set_chip(girq, &max77759_gpio_irq_chip);
diff --git a/drivers/gpio/gpio-mb86s7x.c b/drivers/gpio/gpio-mb86s7x.c
index 5ee2991ecdfd..581a71872eab 100644
--- a/drivers/gpio/gpio-mb86s7x.c
+++ b/drivers/gpio/gpio-mb86s7x.c
@@ -180,7 +180,7 @@ static int mb86s70_gpio_probe(struct platform_device *pdev)
gchip->gc.request = mb86s70_gpio_request;
gchip->gc.free = mb86s70_gpio_free;
gchip->gc.get = mb86s70_gpio_get;
- gchip->gc.set_rv = mb86s70_gpio_set;
+ gchip->gc.set = mb86s70_gpio_set;
gchip->gc.to_irq = mb86s70_gpio_to_irq;
gchip->gc.label = dev_name(&pdev->dev);
gchip->gc.ngpio = 32;
diff --git a/drivers/gpio/gpio-mc33880.c b/drivers/gpio/gpio-mc33880.c
index e68956104161..9a40e9579e95 100644
--- a/drivers/gpio/gpio-mc33880.c
+++ b/drivers/gpio/gpio-mc33880.c
@@ -103,7 +103,7 @@ static int mc33880_probe(struct spi_device *spi)
mc->spi = spi;
mc->chip.label = DRIVER_NAME;
- mc->chip.set_rv = mc33880_set;
+ mc->chip.set = mc33880_set;
mc->chip.base = pdata->base;
mc->chip.ngpio = PIN_NUMBER;
mc->chip.can_sleep = true;
diff --git a/drivers/gpio/gpio-menz127.c b/drivers/gpio/gpio-menz127.c
index ebe5da4933bc..52b13c6ae496 100644
--- a/drivers/gpio/gpio-menz127.c
+++ b/drivers/gpio/gpio-menz127.c
@@ -12,6 +12,7 @@
#include <linux/mcb.h>
#include <linux/bitops.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#define MEN_Z127_CTRL 0x00
#define MEN_Z127_PSR 0x04
@@ -23,6 +24,11 @@
#define MEN_Z127_ODER 0x1C
#define GPIO_TO_DBCNT_REG(gpio) ((gpio * 4) + 0x80)
+/* MEN Z127 supported model ids*/
+#define MEN_Z127_ID 0x7f
+#define MEN_Z034_ID 0x22
+#define MEN_Z037_ID 0x25
+
#define MEN_Z127_DB_MIN_US 50
/* 16 bit compare register. Each bit represents 50us */
#define MEN_Z127_DB_MAX_US (0xffff * MEN_Z127_DB_MIN_US)
@@ -30,7 +36,7 @@
(db <= MEN_Z127_DB_MAX_US))
struct men_z127_gpio {
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
void __iomem *reg_base;
struct resource *mem;
};
@@ -64,7 +70,7 @@ static int men_z127_debounce(struct gpio_chip *gc, unsigned gpio,
debounce /= 50;
}
- raw_spin_lock(&gc->bgpio_lock);
+ guard(gpio_generic_lock)(&priv->chip);
db_en = readl(priv->reg_base + MEN_Z127_DBER);
@@ -79,8 +85,6 @@ static int men_z127_debounce(struct gpio_chip *gc, unsigned gpio,
writel(db_en, priv->reg_base + MEN_Z127_DBER);
writel(db_cnt, priv->reg_base + GPIO_TO_DBCNT_REG(gpio));
- raw_spin_unlock(&gc->bgpio_lock);
-
return 0;
}
@@ -91,7 +95,8 @@ static int men_z127_set_single_ended(struct gpio_chip *gc,
struct men_z127_gpio *priv = gpiochip_get_data(gc);
u32 od_en;
- raw_spin_lock(&gc->bgpio_lock);
+ guard(gpio_generic_lock)(&priv->chip);
+
od_en = readl(priv->reg_base + MEN_Z127_ODER);
if (param == PIN_CONFIG_DRIVE_OPEN_DRAIN)
@@ -101,7 +106,6 @@ static int men_z127_set_single_ended(struct gpio_chip *gc,
od_en &= ~BIT(offset);
writel(od_en, priv->reg_base + MEN_Z127_ODER);
- raw_spin_unlock(&gc->bgpio_lock);
return 0;
}
@@ -137,9 +141,11 @@ static void men_z127_release_mem(void *data)
static int men_z127_probe(struct mcb_device *mdev,
const struct mcb_device_id *id)
{
+ struct gpio_generic_chip_config config;
struct men_z127_gpio *men_z127_gpio;
struct device *dev = &mdev->dev;
int ret;
+ unsigned long sz;
men_z127_gpio = devm_kzalloc(dev, sizeof(struct men_z127_gpio),
GFP_KERNEL);
@@ -163,18 +169,33 @@ static int men_z127_probe(struct mcb_device *mdev,
mcb_set_drvdata(mdev, men_z127_gpio);
- ret = bgpio_init(&men_z127_gpio->gc, &mdev->dev, 4,
- men_z127_gpio->reg_base + MEN_Z127_PSR,
- men_z127_gpio->reg_base + MEN_Z127_CTRL,
- NULL,
- men_z127_gpio->reg_base + MEN_Z127_GPIODR,
- NULL, 0);
+ switch (mdev->id) {
+ case MEN_Z127_ID:
+ sz = 4;
+ break;
+ case MEN_Z034_ID:
+ case MEN_Z037_ID:
+ sz = 1;
+ break;
+ default:
+ return dev_err_probe(&mdev->dev, -EINVAL, "no size found for id %d", mdev->id);
+ }
+
+ config = (struct gpio_generic_chip_config) {
+ .dev = &mdev->dev,
+ .sz = sz,
+ .dat = men_z127_gpio->reg_base + MEN_Z127_PSR,
+ .set = men_z127_gpio->reg_base + MEN_Z127_CTRL,
+ .dirout = men_z127_gpio->reg_base + MEN_Z127_GPIODR,
+ };
+
+ ret = gpio_generic_chip_init(&men_z127_gpio->chip, &config);
if (ret)
return ret;
- men_z127_gpio->gc.set_config = men_z127_set_config;
+ men_z127_gpio->chip.gc.set_config = men_z127_set_config;
- ret = devm_gpiochip_add_data(dev, &men_z127_gpio->gc, men_z127_gpio);
+ ret = devm_gpiochip_add_data(dev, &men_z127_gpio->chip.gc, men_z127_gpio);
if (ret)
return dev_err_probe(dev, ret,
"failed to register MEN 16Z127 GPIO controller");
@@ -183,7 +204,9 @@ static int men_z127_probe(struct mcb_device *mdev,
}
static const struct mcb_device_id men_z127_ids[] = {
- { .device = 0x7f },
+ { .device = MEN_Z127_ID },
+ { .device = MEN_Z034_ID },
+ { .device = MEN_Z037_ID },
{ }
};
MODULE_DEVICE_TABLE(mcb, men_z127_ids);
@@ -198,7 +221,7 @@ static struct mcb_driver men_z127_driver = {
module_mcb_driver(men_z127_driver);
MODULE_AUTHOR("Andreas Werner <andreas.werner@men.de>");
-MODULE_DESCRIPTION("MEN 16z127 GPIO Controller");
+MODULE_DESCRIPTION("MEN GPIO Controller");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("mcb:16z127");
MODULE_IMPORT_NS("MCB");
diff --git a/drivers/gpio/gpio-ml-ioh.c b/drivers/gpio/gpio-ml-ioh.c
index 12cf36f9ca63..6576e5dcb0ee 100644
--- a/drivers/gpio/gpio-ml-ioh.c
+++ b/drivers/gpio/gpio-ml-ioh.c
@@ -160,7 +160,7 @@ static int ioh_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)
/*
* Save register configuration and disable interrupts.
*/
-static void __maybe_unused ioh_gpio_save_reg_conf(struct ioh_gpio *chip)
+static void ioh_gpio_save_reg_conf(struct ioh_gpio *chip)
{
int i;
@@ -186,7 +186,7 @@ static void __maybe_unused ioh_gpio_save_reg_conf(struct ioh_gpio *chip)
/*
* This function restores the register configuration of the GPIO device.
*/
-static void __maybe_unused ioh_gpio_restore_reg_conf(struct ioh_gpio *chip)
+static void ioh_gpio_restore_reg_conf(struct ioh_gpio *chip)
{
int i;
@@ -224,7 +224,7 @@ static void ioh_gpio_setup(struct ioh_gpio *chip, int num_port)
gpio->direction_input = ioh_gpio_direction_input;
gpio->get = ioh_gpio_get;
gpio->direction_output = ioh_gpio_direction_output;
- gpio->set_rv = ioh_gpio_set;
+ gpio->set = ioh_gpio_set;
gpio->dbg_show = NULL;
gpio->base = -1;
gpio->ngpio = num_port;
@@ -479,7 +479,7 @@ static int ioh_gpio_probe(struct pci_dev *pdev,
return 0;
}
-static int __maybe_unused ioh_gpio_suspend(struct device *dev)
+static int ioh_gpio_suspend(struct device *dev)
{
struct ioh_gpio *chip = dev_get_drvdata(dev);
unsigned long flags;
@@ -491,7 +491,7 @@ static int __maybe_unused ioh_gpio_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused ioh_gpio_resume(struct device *dev)
+static int ioh_gpio_resume(struct device *dev)
{
struct ioh_gpio *chip = dev_get_drvdata(dev);
unsigned long flags;
@@ -505,7 +505,7 @@ static int __maybe_unused ioh_gpio_resume(struct device *dev)
return 0;
}
-static SIMPLE_DEV_PM_OPS(ioh_gpio_pm_ops, ioh_gpio_suspend, ioh_gpio_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(ioh_gpio_pm_ops, ioh_gpio_suspend, ioh_gpio_resume);
static const struct pci_device_id ioh_gpio_pcidev_id[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x802E) },
@@ -518,7 +518,7 @@ static struct pci_driver ioh_gpio_driver = {
.id_table = ioh_gpio_pcidev_id,
.probe = ioh_gpio_probe,
.driver = {
- .pm = &ioh_gpio_pm_ops,
+ .pm = pm_sleep_ptr(&ioh_gpio_pm_ops),
},
};
diff --git a/drivers/gpio/gpio-mlxbf.c b/drivers/gpio/gpio-mlxbf.c
index 1fa9973f55b9..a18fedbc463e 100644
--- a/drivers/gpio/gpio-mlxbf.c
+++ b/drivers/gpio/gpio-mlxbf.c
@@ -4,6 +4,7 @@
#include <linux/bitops.h>
#include <linux/device.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -37,7 +38,7 @@ struct mlxbf_gpio_context_save_regs {
/* Device state structure. */
struct mlxbf_gpio_state {
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
/* Memory Address */
void __iomem *base;
@@ -49,6 +50,7 @@ struct mlxbf_gpio_state {
static int mlxbf_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct mlxbf_gpio_state *gs;
struct device *dev = &pdev->dev;
struct gpio_chip *gc;
@@ -62,21 +64,24 @@ static int mlxbf_gpio_probe(struct platform_device *pdev)
if (IS_ERR(gs->base))
return PTR_ERR(gs->base);
- gc = &gs->gc;
- ret = bgpio_init(gc, dev, 8,
- gs->base + MLXBF_GPIO_PIN_STATE,
- NULL,
- NULL,
- gs->base + MLXBF_GPIO_PIN_DIR_O,
- gs->base + MLXBF_GPIO_PIN_DIR_I,
- 0);
+ gc = &gs->chip.gc;
+
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 8,
+ .dat = gs->base + MLXBF_GPIO_PIN_STATE,
+ .dirout = gs->base + MLXBF_GPIO_PIN_DIR_O,
+ .dirin = gs->base + MLXBF_GPIO_PIN_DIR_I,
+ };
+
+ ret = gpio_generic_chip_init(&gs->chip, &config);
if (ret)
return -ENODEV;
gc->owner = THIS_MODULE;
gc->ngpio = MLXBF_GPIO_NR;
- ret = devm_gpiochip_add_data(dev, &gs->gc, gs);
+ ret = devm_gpiochip_add_data(dev, &gs->chip.gc, gs);
if (ret) {
dev_err(&pdev->dev, "Failed adding memory mapped gpiochip\n");
return ret;
diff --git a/drivers/gpio/gpio-mlxbf2.c b/drivers/gpio/gpio-mlxbf2.c
index 6f3dda6b635f..6668686a28ff 100644
--- a/drivers/gpio/gpio-mlxbf2.c
+++ b/drivers/gpio/gpio-mlxbf2.c
@@ -6,8 +6,10 @@
#include <linux/bitfield.h>
#include <linux/bitops.h>
+#include <linux/cleanup.h>
#include <linux/device.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/ioport.h>
@@ -65,7 +67,7 @@ struct mlxbf2_gpio_context_save_regs {
/* BlueField-2 gpio block context structure. */
struct mlxbf2_gpio_context {
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
/* YU GPIO blocks address */
void __iomem *gpio_io;
@@ -132,7 +134,7 @@ static int mlxbf2_gpio_lock_acquire(struct mlxbf2_gpio_context *gs)
u32 arm_gpio_lock_val;
mutex_lock(yu_arm_gpio_lock_param.lock);
- raw_spin_lock(&gs->gc.bgpio_lock);
+ gpio_generic_chip_lock(&gs->chip);
arm_gpio_lock_val = readl(yu_arm_gpio_lock_param.io);
@@ -140,7 +142,7 @@ static int mlxbf2_gpio_lock_acquire(struct mlxbf2_gpio_context *gs)
* When lock active bit[31] is set, ModeX is write enabled
*/
if (YU_LOCK_ACTIVE_BIT(arm_gpio_lock_val)) {
- raw_spin_unlock(&gs->gc.bgpio_lock);
+ gpio_generic_chip_unlock(&gs->chip);
mutex_unlock(yu_arm_gpio_lock_param.lock);
return -EINVAL;
}
@@ -154,11 +156,11 @@ static int mlxbf2_gpio_lock_acquire(struct mlxbf2_gpio_context *gs)
* Release the YU arm_gpio_lock after changing the direction mode.
*/
static void mlxbf2_gpio_lock_release(struct mlxbf2_gpio_context *gs)
- __releases(&gs->gc.bgpio_lock)
+ __releases(&gs->chip.lock)
__releases(yu_arm_gpio_lock_param.lock)
{
writel(YU_ARM_GPIO_LOCK_RELEASE, yu_arm_gpio_lock_param.io);
- raw_spin_unlock(&gs->gc.bgpio_lock);
+ gpio_generic_chip_unlock(&gs->chip);
mutex_unlock(yu_arm_gpio_lock_param.lock);
}
@@ -235,11 +237,10 @@ static void mlxbf2_gpio_irq_enable(struct irq_data *irqd)
struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
struct mlxbf2_gpio_context *gs = gpiochip_get_data(gc);
int offset = irqd_to_hwirq(irqd);
- unsigned long flags;
u32 val;
gpiochip_enable_irq(gc, irqd_to_hwirq(irqd));
- raw_spin_lock_irqsave(&gs->gc.bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(&gs->chip);
val = readl(gs->gpio_io + YU_GPIO_CAUSE_OR_CLRCAUSE);
val |= BIT(offset);
writel(val, gs->gpio_io + YU_GPIO_CAUSE_OR_CLRCAUSE);
@@ -247,7 +248,6 @@ static void mlxbf2_gpio_irq_enable(struct irq_data *irqd)
val = readl(gs->gpio_io + YU_GPIO_CAUSE_OR_EVTEN0);
val |= BIT(offset);
writel(val, gs->gpio_io + YU_GPIO_CAUSE_OR_EVTEN0);
- raw_spin_unlock_irqrestore(&gs->gc.bgpio_lock, flags);
}
static void mlxbf2_gpio_irq_disable(struct irq_data *irqd)
@@ -255,21 +255,21 @@ static void mlxbf2_gpio_irq_disable(struct irq_data *irqd)
struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
struct mlxbf2_gpio_context *gs = gpiochip_get_data(gc);
int offset = irqd_to_hwirq(irqd);
- unsigned long flags;
u32 val;
- raw_spin_lock_irqsave(&gs->gc.bgpio_lock, flags);
- val = readl(gs->gpio_io + YU_GPIO_CAUSE_OR_EVTEN0);
- val &= ~BIT(offset);
- writel(val, gs->gpio_io + YU_GPIO_CAUSE_OR_EVTEN0);
- raw_spin_unlock_irqrestore(&gs->gc.bgpio_lock, flags);
+ scoped_guard(gpio_generic_lock_irqsave, &gs->chip) {
+ val = readl(gs->gpio_io + YU_GPIO_CAUSE_OR_EVTEN0);
+ val &= ~BIT(offset);
+ writel(val, gs->gpio_io + YU_GPIO_CAUSE_OR_EVTEN0);
+ }
+
gpiochip_disable_irq(gc, irqd_to_hwirq(irqd));
}
static irqreturn_t mlxbf2_gpio_irq_handler(int irq, void *ptr)
{
struct mlxbf2_gpio_context *gs = ptr;
- struct gpio_chip *gc = &gs->gc;
+ struct gpio_chip *gc = &gs->chip.gc;
unsigned long pending;
u32 level;
@@ -288,7 +288,6 @@ mlxbf2_gpio_irq_set_type(struct irq_data *irqd, unsigned int type)
struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
struct mlxbf2_gpio_context *gs = gpiochip_get_data(gc);
int offset = irqd_to_hwirq(irqd);
- unsigned long flags;
bool fall = false;
bool rise = false;
u32 val;
@@ -308,7 +307,8 @@ mlxbf2_gpio_irq_set_type(struct irq_data *irqd, unsigned int type)
return -EINVAL;
}
- raw_spin_lock_irqsave(&gs->gc.bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(&gs->chip);
+
if (fall) {
val = readl(gs->gpio_io + YU_GPIO_CAUSE_FALL_EN);
val |= BIT(offset);
@@ -320,7 +320,6 @@ mlxbf2_gpio_irq_set_type(struct irq_data *irqd, unsigned int type)
val |= BIT(offset);
writel(val, gs->gpio_io + YU_GPIO_CAUSE_RISE_EN);
}
- raw_spin_unlock_irqrestore(&gs->gc.bgpio_lock, flags);
return 0;
}
@@ -347,6 +346,7 @@ static const struct irq_chip mlxbf2_gpio_irq_chip = {
static int
mlxbf2_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct mlxbf2_gpio_context *gs;
struct device *dev = &pdev->dev;
struct gpio_irq_chip *girq;
@@ -369,37 +369,34 @@ mlxbf2_gpio_probe(struct platform_device *pdev)
return PTR_ERR(gs->gpio_io);
ret = mlxbf2_gpio_get_lock_res(pdev);
- if (ret) {
- dev_err(dev, "Failed to get yu_arm_gpio_lock resource\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get yu_arm_gpio_lock resource\n");
if (device_property_read_u32(dev, "npins", &npins))
npins = MLXBF2_GPIO_MAX_PINS_PER_BLOCK;
- gc = &gs->gc;
+ gc = &gs->chip.gc;
- ret = bgpio_init(gc, dev, 4,
- gs->gpio_io + YU_GPIO_DATAIN,
- gs->gpio_io + YU_GPIO_DATASET,
- gs->gpio_io + YU_GPIO_DATACLEAR,
- NULL,
- NULL,
- 0);
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = gs->gpio_io + YU_GPIO_DATAIN,
+ .set = gs->gpio_io + YU_GPIO_DATASET,
+ .clr = gs->gpio_io + YU_GPIO_DATACLEAR,
+ };
- if (ret) {
- dev_err(dev, "bgpio_init failed\n");
- return ret;
- }
+ ret = gpio_generic_chip_init(&gs->chip, &config);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to initialize the generic GPIO chip\n");
gc->direction_input = mlxbf2_gpio_direction_input;
gc->direction_output = mlxbf2_gpio_direction_output;
gc->ngpio = npins;
gc->owner = THIS_MODULE;
- irq = platform_get_irq(pdev, 0);
+ irq = platform_get_irq_optional(pdev, 0);
if (irq >= 0) {
- girq = &gs->gc.irq;
+ girq = &gs->chip.gc.irq;
gpio_irq_chip_set_chip(girq, &mlxbf2_gpio_irq_chip);
girq->handler = handle_simple_irq;
girq->default_type = IRQ_TYPE_NONE;
@@ -414,24 +411,20 @@ mlxbf2_gpio_probe(struct platform_device *pdev)
*/
ret = devm_request_irq(dev, irq, mlxbf2_gpio_irq_handler,
IRQF_SHARED, name, gs);
- if (ret) {
- dev_err(dev, "failed to request IRQ");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to request IRQ");
}
platform_set_drvdata(pdev, gs);
- ret = devm_gpiochip_add_data(dev, &gs->gc, gs);
- if (ret) {
- dev_err(dev, "Failed adding memory mapped gpiochip\n");
- return ret;
- }
+ ret = devm_gpiochip_add_data(dev, &gs->chip.gc, gs);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed adding memory mapped gpiochip\n");
return 0;
}
-static int __maybe_unused mlxbf2_gpio_suspend(struct device *dev)
+static int mlxbf2_gpio_suspend(struct device *dev)
{
struct mlxbf2_gpio_context *gs = dev_get_drvdata(dev);
@@ -443,7 +436,7 @@ static int __maybe_unused mlxbf2_gpio_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused mlxbf2_gpio_resume(struct device *dev)
+static int mlxbf2_gpio_resume(struct device *dev)
{
struct mlxbf2_gpio_context *gs = dev_get_drvdata(dev);
@@ -454,7 +447,7 @@ static int __maybe_unused mlxbf2_gpio_resume(struct device *dev)
return 0;
}
-static SIMPLE_DEV_PM_OPS(mlxbf2_pm_ops, mlxbf2_gpio_suspend, mlxbf2_gpio_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(mlxbf2_pm_ops, mlxbf2_gpio_suspend, mlxbf2_gpio_resume);
static const struct acpi_device_id __maybe_unused mlxbf2_gpio_acpi_match[] = {
{ "MLNXBF22", 0 },
@@ -466,7 +459,7 @@ static struct platform_driver mlxbf2_gpio_driver = {
.driver = {
.name = "mlxbf2_gpio",
.acpi_match_table = mlxbf2_gpio_acpi_match,
- .pm = &mlxbf2_pm_ops,
+ .pm = pm_sleep_ptr(&mlxbf2_pm_ops),
},
.probe = mlxbf2_gpio_probe,
};
diff --git a/drivers/gpio/gpio-mlxbf3.c b/drivers/gpio/gpio-mlxbf3.c
index 10ea71273c89..4770578269ba 100644
--- a/drivers/gpio/gpio-mlxbf3.c
+++ b/drivers/gpio/gpio-mlxbf3.c
@@ -6,6 +6,7 @@
#include <linux/device.h>
#include <linux/err.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
@@ -42,7 +43,7 @@
#define MLXBF_GPIO_CLR_ALL_INTS GENMASK(31, 0)
struct mlxbf3_gpio_context {
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
/* YU GPIO block address */
void __iomem *gpio_set_io;
@@ -58,18 +59,17 @@ static void mlxbf3_gpio_irq_enable(struct irq_data *irqd)
struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
struct mlxbf3_gpio_context *gs = gpiochip_get_data(gc);
irq_hw_number_t offset = irqd_to_hwirq(irqd);
- unsigned long flags;
u32 val;
gpiochip_enable_irq(gc, offset);
- raw_spin_lock_irqsave(&gs->gc.bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(&gs->chip);
+
writel(BIT(offset), gs->gpio_cause_io + MLXBF_GPIO_CAUSE_OR_CLRCAUSE);
val = readl(gs->gpio_cause_io + MLXBF_GPIO_CAUSE_OR_EVTEN0);
val |= BIT(offset);
writel(val, gs->gpio_cause_io + MLXBF_GPIO_CAUSE_OR_EVTEN0);
- raw_spin_unlock_irqrestore(&gs->gc.bgpio_lock, flags);
}
static void mlxbf3_gpio_irq_disable(struct irq_data *irqd)
@@ -77,16 +77,15 @@ static void mlxbf3_gpio_irq_disable(struct irq_data *irqd)
struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
struct mlxbf3_gpio_context *gs = gpiochip_get_data(gc);
irq_hw_number_t offset = irqd_to_hwirq(irqd);
- unsigned long flags;
u32 val;
- raw_spin_lock_irqsave(&gs->gc.bgpio_lock, flags);
- val = readl(gs->gpio_cause_io + MLXBF_GPIO_CAUSE_OR_EVTEN0);
- val &= ~BIT(offset);
- writel(val, gs->gpio_cause_io + MLXBF_GPIO_CAUSE_OR_EVTEN0);
+ scoped_guard(gpio_generic_lock_irqsave, &gs->chip) {
+ val = readl(gs->gpio_cause_io + MLXBF_GPIO_CAUSE_OR_EVTEN0);
+ val &= ~BIT(offset);
+ writel(val, gs->gpio_cause_io + MLXBF_GPIO_CAUSE_OR_EVTEN0);
- writel(BIT(offset), gs->gpio_cause_io + MLXBF_GPIO_CAUSE_OR_CLRCAUSE);
- raw_spin_unlock_irqrestore(&gs->gc.bgpio_lock, flags);
+ writel(BIT(offset), gs->gpio_cause_io + MLXBF_GPIO_CAUSE_OR_CLRCAUSE);
+ }
gpiochip_disable_irq(gc, offset);
}
@@ -94,7 +93,7 @@ static void mlxbf3_gpio_irq_disable(struct irq_data *irqd)
static irqreturn_t mlxbf3_gpio_irq_handler(int irq, void *ptr)
{
struct mlxbf3_gpio_context *gs = ptr;
- struct gpio_chip *gc = &gs->gc;
+ struct gpio_chip *gc = &gs->chip.gc;
unsigned long pending;
u32 level;
@@ -113,37 +112,33 @@ mlxbf3_gpio_irq_set_type(struct irq_data *irqd, unsigned int type)
struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
struct mlxbf3_gpio_context *gs = gpiochip_get_data(gc);
irq_hw_number_t offset = irqd_to_hwirq(irqd);
- unsigned long flags;
u32 val;
- raw_spin_lock_irqsave(&gs->gc.bgpio_lock, flags);
-
- switch (type & IRQ_TYPE_SENSE_MASK) {
- case IRQ_TYPE_EDGE_BOTH:
- val = readl(gs->gpio_io + MLXBF_GPIO_CAUSE_FALL_EN);
- val |= BIT(offset);
- writel(val, gs->gpio_io + MLXBF_GPIO_CAUSE_FALL_EN);
- val = readl(gs->gpio_io + MLXBF_GPIO_CAUSE_RISE_EN);
- val |= BIT(offset);
- writel(val, gs->gpio_io + MLXBF_GPIO_CAUSE_RISE_EN);
- break;
- case IRQ_TYPE_EDGE_RISING:
- val = readl(gs->gpio_io + MLXBF_GPIO_CAUSE_RISE_EN);
- val |= BIT(offset);
- writel(val, gs->gpio_io + MLXBF_GPIO_CAUSE_RISE_EN);
- break;
- case IRQ_TYPE_EDGE_FALLING:
- val = readl(gs->gpio_io + MLXBF_GPIO_CAUSE_FALL_EN);
- val |= BIT(offset);
- writel(val, gs->gpio_io + MLXBF_GPIO_CAUSE_FALL_EN);
- break;
- default:
- raw_spin_unlock_irqrestore(&gs->gc.bgpio_lock, flags);
- return -EINVAL;
+ scoped_guard(gpio_generic_lock_irqsave, &gs->chip) {
+ switch (type & IRQ_TYPE_SENSE_MASK) {
+ case IRQ_TYPE_EDGE_BOTH:
+ val = readl(gs->gpio_io + MLXBF_GPIO_CAUSE_FALL_EN);
+ val |= BIT(offset);
+ writel(val, gs->gpio_io + MLXBF_GPIO_CAUSE_FALL_EN);
+ val = readl(gs->gpio_io + MLXBF_GPIO_CAUSE_RISE_EN);
+ val |= BIT(offset);
+ writel(val, gs->gpio_io + MLXBF_GPIO_CAUSE_RISE_EN);
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ val = readl(gs->gpio_io + MLXBF_GPIO_CAUSE_RISE_EN);
+ val |= BIT(offset);
+ writel(val, gs->gpio_io + MLXBF_GPIO_CAUSE_RISE_EN);
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ val = readl(gs->gpio_io + MLXBF_GPIO_CAUSE_FALL_EN);
+ val |= BIT(offset);
+ writel(val, gs->gpio_io + MLXBF_GPIO_CAUSE_FALL_EN);
+ break;
+ default:
+ return -EINVAL;
+ }
}
- raw_spin_unlock_irqrestore(&gs->gc.bgpio_lock, flags);
-
irq_set_handler_locked(irqd, handle_edge_irq);
return 0;
@@ -186,6 +181,7 @@ static int mlxbf3_gpio_add_pin_ranges(struct gpio_chip *chip)
static int mlxbf3_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct device *dev = &pdev->dev;
struct mlxbf3_gpio_context *gs;
struct gpio_irq_chip *girq;
@@ -211,25 +207,32 @@ static int mlxbf3_gpio_probe(struct platform_device *pdev)
gs->gpio_clr_io = devm_platform_ioremap_resource(pdev, 3);
if (IS_ERR(gs->gpio_clr_io))
return PTR_ERR(gs->gpio_clr_io);
- gc = &gs->gc;
-
- ret = bgpio_init(gc, dev, 4,
- gs->gpio_io + MLXBF_GPIO_READ_DATA_IN,
- gs->gpio_set_io + MLXBF_GPIO_FW_DATA_OUT_SET,
- gs->gpio_clr_io + MLXBF_GPIO_FW_DATA_OUT_CLEAR,
- gs->gpio_set_io + MLXBF_GPIO_FW_OUTPUT_ENABLE_SET,
- gs->gpio_clr_io + MLXBF_GPIO_FW_OUTPUT_ENABLE_CLEAR, 0);
+ gc = &gs->chip.gc;
+
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = gs->gpio_io + MLXBF_GPIO_READ_DATA_IN,
+ .set = gs->gpio_set_io + MLXBF_GPIO_FW_DATA_OUT_SET,
+ .clr = gs->gpio_clr_io + MLXBF_GPIO_FW_DATA_OUT_CLEAR,
+ .dirout = gs->gpio_set_io + MLXBF_GPIO_FW_OUTPUT_ENABLE_SET,
+ .dirin = gs->gpio_clr_io + MLXBF_GPIO_FW_OUTPUT_ENABLE_CLEAR,
+ };
+
+ ret = gpio_generic_chip_init(&gs->chip, &config);
if (ret)
- return dev_err_probe(dev, ret, "%s: bgpio_init() failed", __func__);
+ return dev_err_probe(dev, ret,
+ "%s: failed to initialize the generic GPIO chip",
+ __func__);
gc->request = gpiochip_generic_request;
gc->free = gpiochip_generic_free;
gc->owner = THIS_MODULE;
gc->add_pin_ranges = mlxbf3_gpio_add_pin_ranges;
- irq = platform_get_irq(pdev, 0);
+ irq = platform_get_irq_optional(pdev, 0);
if (irq >= 0) {
- girq = &gs->gc.irq;
+ girq = &gs->chip.gc.irq;
gpio_irq_chip_set_chip(girq, &gpio_mlxbf3_irqchip);
girq->default_type = IRQ_TYPE_NONE;
/* This will let us handle the parent IRQ in the driver */
@@ -250,7 +253,7 @@ static int mlxbf3_gpio_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, gs);
- ret = devm_gpiochip_add_data(dev, &gs->gc, gs);
+ ret = devm_gpiochip_add_data(dev, gc, gs);
if (ret)
dev_err_probe(dev, ret, "Failed adding memory mapped gpiochip\n");
diff --git a/drivers/gpio/gpio-mm-lantiq.c b/drivers/gpio/gpio-mm-lantiq.c
index 14ae25783438..1bd98c50a459 100644
--- a/drivers/gpio/gpio-mm-lantiq.c
+++ b/drivers/gpio/gpio-mm-lantiq.c
@@ -10,7 +10,6 @@
#include <linux/platform_device.h>
#include <linux/mutex.h>
#include <linux/gpio/driver.h>
-#include <linux/gpio/legacy-of-mm-gpiochip.h>
#include <linux/of.h>
#include <linux/io.h>
#include <linux/slab.h>
@@ -27,7 +26,8 @@
#define LTQ_EBU_WP 0x80000000 /* write protect bit */
struct ltq_mm {
- struct of_mm_gpio_chip mmchip;
+ struct gpio_chip gc;
+ void __iomem *regs;
u16 shadow; /* shadow the latches state */
};
@@ -44,7 +44,7 @@ static void ltq_mm_apply(struct ltq_mm *chip)
spin_lock_irqsave(&ebu_lock, flags);
ltq_ebu_w32(LTQ_EBU_BUSCON, LTQ_EBU_BUSCON1);
- __raw_writew(chip->shadow, chip->mmchip.regs);
+ __raw_writew(chip->shadow, chip->regs);
ltq_ebu_w32(LTQ_EBU_BUSCON | LTQ_EBU_WP, LTQ_EBU_BUSCON1);
spin_unlock_irqrestore(&ebu_lock, flags);
}
@@ -52,12 +52,12 @@ static void ltq_mm_apply(struct ltq_mm *chip)
/**
* ltq_mm_set() - gpio_chip->set - set gpios.
* @gc: Pointer to gpio_chip device structure.
- * @gpio: GPIO signal number.
- * @val: Value to be written to specified signal.
+ * @offset: GPIO signal number.
+ * @value: Value to be written to specified signal.
*
- * Set the shadow value and call ltq_mm_apply.
+ * Set the shadow value and call ltq_mm_apply. Always returns 0.
*/
-static void ltq_mm_set(struct gpio_chip *gc, unsigned offset, int value)
+static int ltq_mm_set(struct gpio_chip *gc, unsigned int offset, int value)
{
struct ltq_mm *chip = gpiochip_get_data(gc);
@@ -66,40 +66,40 @@ static void ltq_mm_set(struct gpio_chip *gc, unsigned offset, int value)
else
chip->shadow &= ~(1 << offset);
ltq_mm_apply(chip);
+
+ return 0;
}
/**
* ltq_mm_dir_out() - gpio_chip->dir_out - set gpio direction.
* @gc: Pointer to gpio_chip device structure.
- * @gpio: GPIO signal number.
- * @val: Value to be written to specified signal.
+ * @offset: GPIO signal number.
+ * @value: Value to be written to specified signal.
*
* Same as ltq_mm_set, always returns 0.
*/
static int ltq_mm_dir_out(struct gpio_chip *gc, unsigned offset, int value)
{
- ltq_mm_set(gc, offset, value);
-
- return 0;
+ return ltq_mm_set(gc, offset, value);
}
/**
* ltq_mm_save_regs() - Set initial values of GPIO pins
- * @mm_gc: pointer to memory mapped GPIO chip structure
+ * @chip: Pointer to our private data structure.
*/
-static void ltq_mm_save_regs(struct of_mm_gpio_chip *mm_gc)
+static void ltq_mm_save_regs(struct ltq_mm *chip)
{
- struct ltq_mm *chip =
- container_of(mm_gc, struct ltq_mm, mmchip);
-
/* tell the ebu controller which memory address we will be using */
- ltq_ebu_w32(CPHYSADDR(chip->mmchip.regs) | 0x1, LTQ_EBU_ADDRSEL1);
+ ltq_ebu_w32(CPHYSADDR((__force void *)chip->regs) | 0x1, LTQ_EBU_ADDRSEL1);
ltq_mm_apply(chip);
}
static int ltq_mm_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct gpio_chip *gc;
struct ltq_mm *chip;
u32 shadow;
@@ -107,25 +107,29 @@ static int ltq_mm_probe(struct platform_device *pdev)
if (!chip)
return -ENOMEM;
- platform_set_drvdata(pdev, chip);
+ gc = &chip->gc;
+
+ gc->base = -1;
+ gc->ngpio = 16;
+ gc->direction_output = ltq_mm_dir_out;
+ gc->set = ltq_mm_set;
+ gc->parent = dev;
+ gc->owner = THIS_MODULE;
+ gc->label = devm_kasprintf(dev, GFP_KERNEL, "%pOF", np);
+ if (!gc->label)
+ return -ENOMEM;
+
+ chip->regs = devm_of_iomap(dev, np, 0, NULL);
+ if (IS_ERR(chip->regs))
+ return PTR_ERR(chip->regs);
- chip->mmchip.gc.ngpio = 16;
- chip->mmchip.gc.direction_output = ltq_mm_dir_out;
- chip->mmchip.gc.set = ltq_mm_set;
- chip->mmchip.save_regs = ltq_mm_save_regs;
+ ltq_mm_save_regs(chip);
/* store the shadow value if one was passed by the devicetree */
if (!of_property_read_u32(pdev->dev.of_node, "lantiq,shadow", &shadow))
chip->shadow = shadow;
- return of_mm_gpiochip_add_data(pdev->dev.of_node, &chip->mmchip, chip);
-}
-
-static void ltq_mm_remove(struct platform_device *pdev)
-{
- struct ltq_mm *chip = platform_get_drvdata(pdev);
-
- of_mm_gpiochip_remove(&chip->mmchip);
+ return devm_gpiochip_add_data(dev, gc, chip);
}
static const struct of_device_id ltq_mm_match[] = {
@@ -136,7 +140,6 @@ MODULE_DEVICE_TABLE(of, ltq_mm_match);
static struct platform_driver ltq_mm_driver = {
.probe = ltq_mm_probe,
- .remove = ltq_mm_remove,
.driver = {
.name = "gpio-mm-ltq",
.of_match_table = ltq_mm_match,
diff --git a/drivers/gpio/gpio-mmio.c b/drivers/gpio/gpio-mmio.c
index 4841e4ebe7a6..b3a26a06260b 100644
--- a/drivers/gpio/gpio-mmio.c
+++ b/drivers/gpio/gpio-mmio.c
@@ -41,6 +41,7 @@ o ` ~~~~\___/~~~~ ` controller in FPGA is ,.`
*/
#include <linux/bitops.h>
+#include <linux/cleanup.h>
#include <linux/compiler.h>
#include <linux/err.h>
#include <linux/init.h>
@@ -57,136 +58,145 @@ o ` ~~~~\___/~~~~ ` controller in FPGA is ,.`
#include <linux/types.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include "gpiolib.h"
-static void bgpio_write8(void __iomem *reg, unsigned long data)
+static void gpio_mmio_write8(void __iomem *reg, unsigned long data)
{
writeb(data, reg);
}
-static unsigned long bgpio_read8(void __iomem *reg)
+static unsigned long gpio_mmio_read8(void __iomem *reg)
{
return readb(reg);
}
-static void bgpio_write16(void __iomem *reg, unsigned long data)
+static void gpio_mmio_write16(void __iomem *reg, unsigned long data)
{
writew(data, reg);
}
-static unsigned long bgpio_read16(void __iomem *reg)
+static unsigned long gpio_mmio_read16(void __iomem *reg)
{
return readw(reg);
}
-static void bgpio_write32(void __iomem *reg, unsigned long data)
+static void gpio_mmio_write32(void __iomem *reg, unsigned long data)
{
writel(data, reg);
}
-static unsigned long bgpio_read32(void __iomem *reg)
+static unsigned long gpio_mmio_read32(void __iomem *reg)
{
return readl(reg);
}
#if BITS_PER_LONG >= 64
-static void bgpio_write64(void __iomem *reg, unsigned long data)
+static void gpio_mmio_write64(void __iomem *reg, unsigned long data)
{
writeq(data, reg);
}
-static unsigned long bgpio_read64(void __iomem *reg)
+static unsigned long gpio_mmio_read64(void __iomem *reg)
{
return readq(reg);
}
#endif /* BITS_PER_LONG >= 64 */
-static void bgpio_write16be(void __iomem *reg, unsigned long data)
+static void gpio_mmio_write16be(void __iomem *reg, unsigned long data)
{
iowrite16be(data, reg);
}
-static unsigned long bgpio_read16be(void __iomem *reg)
+static unsigned long gpio_mmio_read16be(void __iomem *reg)
{
return ioread16be(reg);
}
-static void bgpio_write32be(void __iomem *reg, unsigned long data)
+static void gpio_mmio_write32be(void __iomem *reg, unsigned long data)
{
iowrite32be(data, reg);
}
-static unsigned long bgpio_read32be(void __iomem *reg)
+static unsigned long gpio_mmio_read32be(void __iomem *reg)
{
return ioread32be(reg);
}
-static unsigned long bgpio_line2mask(struct gpio_chip *gc, unsigned int line)
+static unsigned long gpio_mmio_line2mask(struct gpio_chip *gc, unsigned int line)
{
- if (gc->be_bits)
- return BIT(gc->bgpio_bits - 1 - line);
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
+
+ if (chip->be_bits)
+ return BIT(chip->bits - 1 - line);
return BIT(line);
}
-static int bgpio_get_set(struct gpio_chip *gc, unsigned int gpio)
+static int gpio_mmio_get_set(struct gpio_chip *gc, unsigned int gpio)
{
- unsigned long pinmask = bgpio_line2mask(gc, gpio);
- bool dir = !!(gc->bgpio_dir & pinmask);
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
+ unsigned long pinmask = gpio_mmio_line2mask(gc, gpio);
+ bool dir = !!(chip->sdir & pinmask);
if (dir)
- return !!(gc->read_reg(gc->reg_set) & pinmask);
- else
- return !!(gc->read_reg(gc->reg_dat) & pinmask);
+ return !!(chip->read_reg(chip->reg_set) & pinmask);
+
+ return !!(chip->read_reg(chip->reg_dat) & pinmask);
}
/*
* This assumes that the bits in the GPIO register are in native endianness.
* We only assign the function pointer if we have that.
*/
-static int bgpio_get_set_multiple(struct gpio_chip *gc, unsigned long *mask,
- unsigned long *bits)
+static int gpio_mmio_get_set_multiple(struct gpio_chip *gc, unsigned long *mask,
+ unsigned long *bits)
{
- unsigned long get_mask = 0;
- unsigned long set_mask = 0;
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
+ unsigned long get_mask = 0, set_mask = 0;
/* Make sure we first clear any bits that are zero when we read the register */
*bits &= ~*mask;
- set_mask = *mask & gc->bgpio_dir;
- get_mask = *mask & ~gc->bgpio_dir;
+ set_mask = *mask & chip->sdir;
+ get_mask = *mask & ~chip->sdir;
if (set_mask)
- *bits |= gc->read_reg(gc->reg_set) & set_mask;
+ *bits |= chip->read_reg(chip->reg_set) & set_mask;
if (get_mask)
- *bits |= gc->read_reg(gc->reg_dat) & get_mask;
+ *bits |= chip->read_reg(chip->reg_dat) & get_mask;
return 0;
}
-static int bgpio_get(struct gpio_chip *gc, unsigned int gpio)
+static int gpio_mmio_get(struct gpio_chip *gc, unsigned int gpio)
{
- return !!(gc->read_reg(gc->reg_dat) & bgpio_line2mask(gc, gpio));
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
+
+ return !!(chip->read_reg(chip->reg_dat) & gpio_mmio_line2mask(gc, gpio));
}
/*
* This only works if the bits in the GPIO register are in native endianness.
*/
-static int bgpio_get_multiple(struct gpio_chip *gc, unsigned long *mask,
- unsigned long *bits)
+static int gpio_mmio_get_multiple(struct gpio_chip *gc, unsigned long *mask,
+ unsigned long *bits)
{
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
+
/* Make sure we first clear any bits that are zero when we read the register */
*bits &= ~*mask;
- *bits |= gc->read_reg(gc->reg_dat) & *mask;
+ *bits |= chip->read_reg(chip->reg_dat) & *mask;
return 0;
}
/*
* With big endian mirrored bit order it becomes more tedious.
*/
-static int bgpio_get_multiple_be(struct gpio_chip *gc, unsigned long *mask,
- unsigned long *bits)
+static int gpio_mmio_get_multiple_be(struct gpio_chip *gc, unsigned long *mask,
+ unsigned long *bits)
{
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
unsigned long readmask = 0;
unsigned long val;
int bit;
@@ -196,137 +206,155 @@ static int bgpio_get_multiple_be(struct gpio_chip *gc, unsigned long *mask,
/* Create a mirrored mask */
for_each_set_bit(bit, mask, gc->ngpio)
- readmask |= bgpio_line2mask(gc, bit);
+ readmask |= gpio_mmio_line2mask(gc, bit);
/* Read the register */
- val = gc->read_reg(gc->reg_dat) & readmask;
+ val = chip->read_reg(chip->reg_dat) & readmask;
/*
* Mirror the result into the "bits" result, this will give line 0
* in bit 0 ... line 31 in bit 31 for a 32bit register.
*/
for_each_set_bit(bit, &val, gc->ngpio)
- *bits |= bgpio_line2mask(gc, bit);
+ *bits |= gpio_mmio_line2mask(gc, bit);
return 0;
}
-static void bgpio_set_none(struct gpio_chip *gc, unsigned int gpio, int val)
+static int gpio_mmio_set_none(struct gpio_chip *gc, unsigned int gpio, int val)
{
+ return 0;
}
-static void bgpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
+static int gpio_mmio_set(struct gpio_chip *gc, unsigned int gpio, int val)
{
- unsigned long mask = bgpio_line2mask(gc, gpio);
- unsigned long flags;
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
+ unsigned long mask = gpio_mmio_line2mask(gc, gpio);
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+ guard(raw_spinlock)(&chip->lock);
if (val)
- gc->bgpio_data |= mask;
+ chip->sdata |= mask;
else
- gc->bgpio_data &= ~mask;
+ chip->sdata &= ~mask;
- gc->write_reg(gc->reg_dat, gc->bgpio_data);
+ chip->write_reg(chip->reg_dat, chip->sdata);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+ return 0;
}
-static void bgpio_set_with_clear(struct gpio_chip *gc, unsigned int gpio,
- int val)
+static int gpio_mmio_set_with_clear(struct gpio_chip *gc, unsigned int gpio,
+ int val)
{
- unsigned long mask = bgpio_line2mask(gc, gpio);
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
+ unsigned long mask = gpio_mmio_line2mask(gc, gpio);
if (val)
- gc->write_reg(gc->reg_set, mask);
+ chip->write_reg(chip->reg_set, mask);
else
- gc->write_reg(gc->reg_clr, mask);
+ chip->write_reg(chip->reg_clr, mask);
+
+ return 0;
}
-static void bgpio_set_set(struct gpio_chip *gc, unsigned int gpio, int val)
+static int gpio_mmio_set_set(struct gpio_chip *gc, unsigned int gpio, int val)
{
- unsigned long mask = bgpio_line2mask(gc, gpio);
- unsigned long flags;
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
+ unsigned long mask = gpio_mmio_line2mask(gc, gpio);
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+ guard(raw_spinlock)(&chip->lock);
if (val)
- gc->bgpio_data |= mask;
+ chip->sdata |= mask;
else
- gc->bgpio_data &= ~mask;
+ chip->sdata &= ~mask;
- gc->write_reg(gc->reg_set, gc->bgpio_data);
+ chip->write_reg(chip->reg_set, chip->sdata);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+ return 0;
}
-static void bgpio_multiple_get_masks(struct gpio_chip *gc,
- unsigned long *mask, unsigned long *bits,
- unsigned long *set_mask,
- unsigned long *clear_mask)
+static void gpio_mmio_multiple_get_masks(struct gpio_chip *gc,
+ unsigned long *mask,
+ unsigned long *bits,
+ unsigned long *set_mask,
+ unsigned long *clear_mask)
{
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
int i;
*set_mask = 0;
*clear_mask = 0;
- for_each_set_bit(i, mask, gc->bgpio_bits) {
+ for_each_set_bit(i, mask, chip->bits) {
if (test_bit(i, bits))
- *set_mask |= bgpio_line2mask(gc, i);
+ *set_mask |= gpio_mmio_line2mask(gc, i);
else
- *clear_mask |= bgpio_line2mask(gc, i);
+ *clear_mask |= gpio_mmio_line2mask(gc, i);
}
}
-static void bgpio_set_multiple_single_reg(struct gpio_chip *gc,
- unsigned long *mask,
- unsigned long *bits,
- void __iomem *reg)
+static void gpio_mmio_set_multiple_single_reg(struct gpio_chip *gc,
+ unsigned long *mask,
+ unsigned long *bits,
+ void __iomem *reg)
{
- unsigned long flags;
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
unsigned long set_mask, clear_mask;
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
-
- bgpio_multiple_get_masks(gc, mask, bits, &set_mask, &clear_mask);
+ guard(raw_spinlock)(&chip->lock);
- gc->bgpio_data |= set_mask;
- gc->bgpio_data &= ~clear_mask;
+ gpio_mmio_multiple_get_masks(gc, mask, bits, &set_mask, &clear_mask);
- gc->write_reg(reg, gc->bgpio_data);
+ chip->sdata |= set_mask;
+ chip->sdata &= ~clear_mask;
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+ chip->write_reg(reg, chip->sdata);
}
-static void bgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
- unsigned long *bits)
+static int gpio_mmio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
+ unsigned long *bits)
{
- bgpio_set_multiple_single_reg(gc, mask, bits, gc->reg_dat);
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
+
+ gpio_mmio_set_multiple_single_reg(gc, mask, bits, chip->reg_dat);
+
+ return 0;
}
-static void bgpio_set_multiple_set(struct gpio_chip *gc, unsigned long *mask,
- unsigned long *bits)
+static int gpio_mmio_set_multiple_set(struct gpio_chip *gc, unsigned long *mask,
+ unsigned long *bits)
{
- bgpio_set_multiple_single_reg(gc, mask, bits, gc->reg_set);
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
+
+ gpio_mmio_set_multiple_single_reg(gc, mask, bits, chip->reg_set);
+
+ return 0;
}
-static void bgpio_set_multiple_with_clear(struct gpio_chip *gc,
- unsigned long *mask,
- unsigned long *bits)
+static int gpio_mmio_set_multiple_with_clear(struct gpio_chip *gc,
+ unsigned long *mask,
+ unsigned long *bits)
{
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
unsigned long set_mask, clear_mask;
- bgpio_multiple_get_masks(gc, mask, bits, &set_mask, &clear_mask);
+ gpio_mmio_multiple_get_masks(gc, mask, bits, &set_mask, &clear_mask);
if (set_mask)
- gc->write_reg(gc->reg_set, set_mask);
+ chip->write_reg(chip->reg_set, set_mask);
if (clear_mask)
- gc->write_reg(gc->reg_clr, clear_mask);
+ chip->write_reg(chip->reg_clr, clear_mask);
+
+ return 0;
}
-static int bgpio_dir_return(struct gpio_chip *gc, unsigned int gpio, bool dir_out)
+static int gpio_mmio_dir_return(struct gpio_chip *gc, unsigned int gpio,
+ bool dir_out)
{
- if (!gc->bgpio_pinctrl)
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
+
+ if (!chip->pinctrl)
return 0;
if (dir_out)
@@ -335,123 +363,125 @@ static int bgpio_dir_return(struct gpio_chip *gc, unsigned int gpio, bool dir_ou
return pinctrl_gpio_direction_input(gc, gpio);
}
-static int bgpio_simple_dir_in(struct gpio_chip *gc, unsigned int gpio)
+static int gpio_mmio_dir_in_err(struct gpio_chip *gc, unsigned int gpio)
{
- return bgpio_dir_return(gc, gpio, false);
+ return -EINVAL;
}
-static int bgpio_dir_out_err(struct gpio_chip *gc, unsigned int gpio,
- int val)
+static int gpio_mmio_simple_dir_in(struct gpio_chip *gc, unsigned int gpio)
+{
+ return gpio_mmio_dir_return(gc, gpio, false);
+}
+
+static int gpio_mmio_dir_out_err(struct gpio_chip *gc, unsigned int gpio,
+ int val)
{
return -EINVAL;
}
-static int bgpio_simple_dir_out(struct gpio_chip *gc, unsigned int gpio,
- int val)
+static int gpio_mmio_simple_dir_out(struct gpio_chip *gc, unsigned int gpio,
+ int val)
{
gc->set(gc, gpio, val);
- return bgpio_dir_return(gc, gpio, true);
+ return gpio_mmio_dir_return(gc, gpio, true);
}
-static int bgpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
+static int gpio_mmio_dir_in(struct gpio_chip *gc, unsigned int gpio)
{
- unsigned long flags;
-
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
-
- gc->bgpio_dir &= ~bgpio_line2mask(gc, gpio);
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
- if (gc->reg_dir_in)
- gc->write_reg(gc->reg_dir_in, ~gc->bgpio_dir);
- if (gc->reg_dir_out)
- gc->write_reg(gc->reg_dir_out, gc->bgpio_dir);
+ scoped_guard(raw_spinlock, &chip->lock) {
+ chip->sdir &= ~gpio_mmio_line2mask(gc, gpio);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+ if (chip->reg_dir_in)
+ chip->write_reg(chip->reg_dir_in, ~chip->sdir);
+ if (chip->reg_dir_out)
+ chip->write_reg(chip->reg_dir_out, chip->sdir);
+ }
- return bgpio_dir_return(gc, gpio, false);
+ return gpio_mmio_dir_return(gc, gpio, false);
}
-static int bgpio_get_dir(struct gpio_chip *gc, unsigned int gpio)
+static int gpio_mmio_get_dir(struct gpio_chip *gc, unsigned int gpio)
{
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
+
/* Return 0 if output, 1 if input */
- if (gc->bgpio_dir_unreadable) {
- if (gc->bgpio_dir & bgpio_line2mask(gc, gpio))
+ if (chip->dir_unreadable) {
+ if (chip->sdir & gpio_mmio_line2mask(gc, gpio))
return GPIO_LINE_DIRECTION_OUT;
return GPIO_LINE_DIRECTION_IN;
}
- if (gc->reg_dir_out) {
- if (gc->read_reg(gc->reg_dir_out) & bgpio_line2mask(gc, gpio))
+ if (chip->reg_dir_out) {
+ if (chip->read_reg(chip->reg_dir_out) & gpio_mmio_line2mask(gc, gpio))
return GPIO_LINE_DIRECTION_OUT;
return GPIO_LINE_DIRECTION_IN;
}
- if (gc->reg_dir_in)
- if (!(gc->read_reg(gc->reg_dir_in) & bgpio_line2mask(gc, gpio)))
+ if (chip->reg_dir_in)
+ if (!(chip->read_reg(chip->reg_dir_in) & gpio_mmio_line2mask(gc, gpio)))
return GPIO_LINE_DIRECTION_OUT;
return GPIO_LINE_DIRECTION_IN;
}
-static void bgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
+static void gpio_mmio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
{
- unsigned long flags;
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+ guard(raw_spinlock)(&chip->lock);
- gc->bgpio_dir |= bgpio_line2mask(gc, gpio);
+ chip->sdir |= gpio_mmio_line2mask(gc, gpio);
- if (gc->reg_dir_in)
- gc->write_reg(gc->reg_dir_in, ~gc->bgpio_dir);
- if (gc->reg_dir_out)
- gc->write_reg(gc->reg_dir_out, gc->bgpio_dir);
-
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+ if (chip->reg_dir_in)
+ chip->write_reg(chip->reg_dir_in, ~chip->sdir);
+ if (chip->reg_dir_out)
+ chip->write_reg(chip->reg_dir_out, chip->sdir);
}
-static int bgpio_dir_out_dir_first(struct gpio_chip *gc, unsigned int gpio,
- int val)
+static int gpio_mmio_dir_out_dir_first(struct gpio_chip *gc, unsigned int gpio,
+ int val)
{
- bgpio_dir_out(gc, gpio, val);
+ gpio_mmio_dir_out(gc, gpio, val);
gc->set(gc, gpio, val);
- return bgpio_dir_return(gc, gpio, true);
+ return gpio_mmio_dir_return(gc, gpio, true);
}
-static int bgpio_dir_out_val_first(struct gpio_chip *gc, unsigned int gpio,
- int val)
+static int gpio_mmio_dir_out_val_first(struct gpio_chip *gc, unsigned int gpio,
+ int val)
{
gc->set(gc, gpio, val);
- bgpio_dir_out(gc, gpio, val);
- return bgpio_dir_return(gc, gpio, true);
+ gpio_mmio_dir_out(gc, gpio, val);
+ return gpio_mmio_dir_return(gc, gpio, true);
}
-static int bgpio_setup_accessors(struct device *dev,
- struct gpio_chip *gc,
- bool byte_be)
+static int gpio_mmio_setup_accessors(struct device *dev,
+ struct gpio_generic_chip *chip,
+ bool byte_be)
{
-
- switch (gc->bgpio_bits) {
+ switch (chip->bits) {
case 8:
- gc->read_reg = bgpio_read8;
- gc->write_reg = bgpio_write8;
+ chip->read_reg = gpio_mmio_read8;
+ chip->write_reg = gpio_mmio_write8;
break;
case 16:
if (byte_be) {
- gc->read_reg = bgpio_read16be;
- gc->write_reg = bgpio_write16be;
+ chip->read_reg = gpio_mmio_read16be;
+ chip->write_reg = gpio_mmio_write16be;
} else {
- gc->read_reg = bgpio_read16;
- gc->write_reg = bgpio_write16;
+ chip->read_reg = gpio_mmio_read16;
+ chip->write_reg = gpio_mmio_write16;
}
break;
case 32:
if (byte_be) {
- gc->read_reg = bgpio_read32be;
- gc->write_reg = bgpio_write32be;
+ chip->read_reg = gpio_mmio_read32be;
+ chip->write_reg = gpio_mmio_write32be;
} else {
- gc->read_reg = bgpio_read32;
- gc->write_reg = bgpio_write32;
+ chip->read_reg = gpio_mmio_read32;
+ chip->write_reg = gpio_mmio_write32;
}
break;
#if BITS_PER_LONG >= 64
@@ -461,13 +491,13 @@ static int bgpio_setup_accessors(struct device *dev,
"64 bit big endian byte order unsupported\n");
return -EINVAL;
} else {
- gc->read_reg = bgpio_read64;
- gc->write_reg = bgpio_write64;
+ chip->read_reg = gpio_mmio_read64;
+ chip->write_reg = gpio_mmio_write64;
}
break;
#endif /* BITS_PER_LONG >= 64 */
default:
- dev_err(dev, "unsupported data width %u bits\n", gc->bgpio_bits);
+ dev_err(dev, "unsupported data width %u bits\n", chip->bits);
return -EINVAL;
}
@@ -496,39 +526,37 @@ static int bgpio_setup_accessors(struct device *dev,
* - an input direction register (named "dirin") where a 1 bit indicates
* the GPIO is an input.
*/
-static int bgpio_setup_io(struct gpio_chip *gc,
- void __iomem *dat,
- void __iomem *set,
- void __iomem *clr,
- unsigned long flags)
+static int gpio_mmio_setup_io(struct gpio_generic_chip *chip,
+ const struct gpio_generic_chip_config *cfg)
{
+ struct gpio_chip *gc = &chip->gc;
- gc->reg_dat = dat;
- if (!gc->reg_dat)
+ chip->reg_dat = cfg->dat;
+ if (!chip->reg_dat)
return -EINVAL;
- if (set && clr) {
- gc->reg_set = set;
- gc->reg_clr = clr;
- gc->set = bgpio_set_with_clear;
- gc->set_multiple = bgpio_set_multiple_with_clear;
- } else if (set && !clr) {
- gc->reg_set = set;
- gc->set = bgpio_set_set;
- gc->set_multiple = bgpio_set_multiple_set;
- } else if (flags & BGPIOF_NO_OUTPUT) {
- gc->set = bgpio_set_none;
+ if (cfg->set && cfg->clr) {
+ chip->reg_set = cfg->set;
+ chip->reg_clr = cfg->clr;
+ gc->set = gpio_mmio_set_with_clear;
+ gc->set_multiple = gpio_mmio_set_multiple_with_clear;
+ } else if (cfg->set && !cfg->clr) {
+ chip->reg_set = cfg->set;
+ gc->set = gpio_mmio_set_set;
+ gc->set_multiple = gpio_mmio_set_multiple_set;
+ } else if (cfg->flags & GPIO_GENERIC_NO_OUTPUT) {
+ gc->set = gpio_mmio_set_none;
gc->set_multiple = NULL;
} else {
- gc->set = bgpio_set;
- gc->set_multiple = bgpio_set_multiple;
+ gc->set = gpio_mmio_set;
+ gc->set_multiple = gpio_mmio_set_multiple;
}
- if (!(flags & BGPIOF_UNREADABLE_REG_SET) &&
- (flags & BGPIOF_READ_OUTPUT_REG_SET)) {
- gc->get = bgpio_get_set;
- if (!gc->be_bits)
- gc->get_multiple = bgpio_get_set_multiple;
+ if (!(cfg->flags & GPIO_GENERIC_UNREADABLE_REG_SET) &&
+ (cfg->flags & GPIO_GENERIC_READ_OUTPUT_REG_SET)) {
+ gc->get = gpio_mmio_get_set;
+ if (!chip->be_bits)
+ gc->get_multiple = gpio_mmio_get_set_multiple;
/*
* We deliberately avoid assigning the ->get_multiple() call
* for big endian mirrored registers which are ALSO reflecting
@@ -537,158 +565,145 @@ static int bgpio_setup_io(struct gpio_chip *gc,
* reading each line individually in that fringe case.
*/
} else {
- gc->get = bgpio_get;
- if (gc->be_bits)
- gc->get_multiple = bgpio_get_multiple_be;
+ gc->get = gpio_mmio_get;
+ if (chip->be_bits)
+ gc->get_multiple = gpio_mmio_get_multiple_be;
else
- gc->get_multiple = bgpio_get_multiple;
+ gc->get_multiple = gpio_mmio_get_multiple;
}
return 0;
}
-static int bgpio_setup_direction(struct gpio_chip *gc,
- void __iomem *dirout,
- void __iomem *dirin,
- unsigned long flags)
+static int gpio_mmio_setup_direction(struct gpio_generic_chip *chip,
+ const struct gpio_generic_chip_config *cfg)
{
- if (dirout || dirin) {
- gc->reg_dir_out = dirout;
- gc->reg_dir_in = dirin;
- if (flags & BGPIOF_NO_SET_ON_INPUT)
- gc->direction_output = bgpio_dir_out_dir_first;
+ struct gpio_chip *gc = &chip->gc;
+
+ if (cfg->dirout || cfg->dirin) {
+ chip->reg_dir_out = cfg->dirout;
+ chip->reg_dir_in = cfg->dirin;
+ if (cfg->flags & GPIO_GENERIC_NO_SET_ON_INPUT)
+ gc->direction_output = gpio_mmio_dir_out_dir_first;
else
- gc->direction_output = bgpio_dir_out_val_first;
- gc->direction_input = bgpio_dir_in;
- gc->get_direction = bgpio_get_dir;
+ gc->direction_output = gpio_mmio_dir_out_val_first;
+ gc->direction_input = gpio_mmio_dir_in;
+ gc->get_direction = gpio_mmio_get_dir;
} else {
- if (flags & BGPIOF_NO_OUTPUT)
- gc->direction_output = bgpio_dir_out_err;
+ if (cfg->flags & GPIO_GENERIC_NO_OUTPUT)
+ gc->direction_output = gpio_mmio_dir_out_err;
+ else
+ gc->direction_output = gpio_mmio_simple_dir_out;
+
+ if (cfg->flags & GPIO_GENERIC_NO_INPUT)
+ gc->direction_input = gpio_mmio_dir_in_err;
else
- gc->direction_output = bgpio_simple_dir_out;
- gc->direction_input = bgpio_simple_dir_in;
+ gc->direction_input = gpio_mmio_simple_dir_in;
}
return 0;
}
-static int bgpio_request(struct gpio_chip *chip, unsigned gpio_pin)
+static int gpio_mmio_request(struct gpio_chip *gc, unsigned int gpio_pin)
{
- if (gpio_pin >= chip->ngpio)
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
+
+ if (gpio_pin >= gc->ngpio)
return -EINVAL;
- if (chip->bgpio_pinctrl)
- return gpiochip_generic_request(chip, gpio_pin);
+ if (chip->pinctrl)
+ return gpiochip_generic_request(gc, gpio_pin);
return 0;
}
/**
- * bgpio_init() - Initialize generic GPIO accessor functions
- * @gc: the GPIO chip to set up
- * @dev: the parent device of the new GPIO chip (compulsory)
- * @sz: the size (width) of the MMIO registers in bytes, typically 1, 2 or 4
- * @dat: MMIO address for the register to READ the value of the GPIO lines, it
- * is expected that a 1 in the corresponding bit in this register means the
- * line is asserted
- * @set: MMIO address for the register to SET the value of the GPIO lines, it is
- * expected that we write the line with 1 in this register to drive the GPIO line
- * high.
- * @clr: MMIO address for the register to CLEAR the value of the GPIO lines, it is
- * expected that we write the line with 1 in this register to drive the GPIO line
- * low. It is allowed to leave this address as NULL, in that case the SET register
- * will be assumed to also clear the GPIO lines, by actively writing the line
- * with 0.
- * @dirout: MMIO address for the register to set the line as OUTPUT. It is assumed
- * that setting a line to 1 in this register will turn that line into an
- * output line. Conversely, setting the line to 0 will turn that line into
- * an input.
- * @dirin: MMIO address for the register to set this line as INPUT. It is assumed
- * that setting a line to 1 in this register will turn that line into an
- * input line. Conversely, setting the line to 0 will turn that line into
- * an output.
- * @flags: Different flags that will affect the behaviour of the device, such as
- * endianness etc.
+ * gpio_generic_chip_init() - Initialize a generic GPIO chip.
+ * @chip: Generic GPIO chip to set up.
+ * @cfg: Generic GPIO chip configuration.
+ *
+ * Returns 0 on success, negative error number on failure.
*/
-int bgpio_init(struct gpio_chip *gc, struct device *dev,
- unsigned long sz, void __iomem *dat, void __iomem *set,
- void __iomem *clr, void __iomem *dirout, void __iomem *dirin,
- unsigned long flags)
+int gpio_generic_chip_init(struct gpio_generic_chip *chip,
+ const struct gpio_generic_chip_config *cfg)
{
+ struct gpio_chip *gc = &chip->gc;
+ unsigned long flags = cfg->flags;
+ struct device *dev = cfg->dev;
int ret;
- if (!is_power_of_2(sz))
+ if (!is_power_of_2(cfg->sz))
return -EINVAL;
- gc->bgpio_bits = sz * 8;
- if (gc->bgpio_bits > BITS_PER_LONG)
+ chip->bits = cfg->sz * 8;
+ if (chip->bits > BITS_PER_LONG)
return -EINVAL;
- raw_spin_lock_init(&gc->bgpio_lock);
+ raw_spin_lock_init(&chip->lock);
gc->parent = dev;
gc->label = dev_name(dev);
gc->base = -1;
- gc->request = bgpio_request;
- gc->be_bits = !!(flags & BGPIOF_BIG_ENDIAN);
+ gc->request = gpio_mmio_request;
+ chip->be_bits = !!(flags & GPIO_GENERIC_BIG_ENDIAN);
ret = gpiochip_get_ngpios(gc, dev);
if (ret)
- gc->ngpio = gc->bgpio_bits;
+ gc->ngpio = chip->bits;
- ret = bgpio_setup_io(gc, dat, set, clr, flags);
+ ret = gpio_mmio_setup_io(chip, cfg);
if (ret)
return ret;
- ret = bgpio_setup_accessors(dev, gc, flags & BGPIOF_BIG_ENDIAN_BYTE_ORDER);
+ ret = gpio_mmio_setup_accessors(dev, chip,
+ flags & GPIO_GENERIC_BIG_ENDIAN_BYTE_ORDER);
if (ret)
return ret;
- ret = bgpio_setup_direction(gc, dirout, dirin, flags);
+ ret = gpio_mmio_setup_direction(chip, cfg);
if (ret)
return ret;
- if (flags & BGPIOF_PINCTRL_BACKEND) {
- gc->bgpio_pinctrl = true;
+ if (flags & GPIO_GENERIC_PINCTRL_BACKEND) {
+ chip->pinctrl = true;
/* Currently this callback is only used for pincontrol */
gc->free = gpiochip_generic_free;
}
- gc->bgpio_data = gc->read_reg(gc->reg_dat);
- if (gc->set == bgpio_set_set &&
- !(flags & BGPIOF_UNREADABLE_REG_SET))
- gc->bgpio_data = gc->read_reg(gc->reg_set);
+ chip->sdata = chip->read_reg(chip->reg_dat);
+ if (gc->set == gpio_mmio_set_set &&
+ !(flags & GPIO_GENERIC_UNREADABLE_REG_SET))
+ chip->sdata = chip->read_reg(chip->reg_set);
- if (flags & BGPIOF_UNREADABLE_REG_DIR)
- gc->bgpio_dir_unreadable = true;
+ if (flags & GPIO_GENERIC_UNREADABLE_REG_DIR)
+ chip->dir_unreadable = true;
/*
* Inspect hardware to find initial direction setting.
*/
- if ((gc->reg_dir_out || gc->reg_dir_in) &&
- !(flags & BGPIOF_UNREADABLE_REG_DIR)) {
- if (gc->reg_dir_out)
- gc->bgpio_dir = gc->read_reg(gc->reg_dir_out);
- else if (gc->reg_dir_in)
- gc->bgpio_dir = ~gc->read_reg(gc->reg_dir_in);
+ if ((chip->reg_dir_out || chip->reg_dir_in) &&
+ !(flags & GPIO_GENERIC_UNREADABLE_REG_DIR)) {
+ if (chip->reg_dir_out)
+ chip->sdir = chip->read_reg(chip->reg_dir_out);
+ else if (chip->reg_dir_in)
+ chip->sdir = ~chip->read_reg(chip->reg_dir_in);
/*
* If we have two direction registers, synchronise
* input setting to output setting, the library
* can not handle a line being input and output at
* the same time.
*/
- if (gc->reg_dir_out && gc->reg_dir_in)
- gc->write_reg(gc->reg_dir_in, ~gc->bgpio_dir);
+ if (chip->reg_dir_out && chip->reg_dir_in)
+ chip->write_reg(chip->reg_dir_in, ~chip->sdir);
}
return ret;
}
-EXPORT_SYMBOL_GPL(bgpio_init);
+EXPORT_SYMBOL_GPL(gpio_generic_chip_init);
#if IS_ENABLED(CONFIG_GPIO_GENERIC_PLATFORM)
-static void __iomem *bgpio_map(struct platform_device *pdev,
- const char *name,
- resource_size_t sane_sz)
+static void __iomem *gpio_mmio_map(struct platform_device *pdev,
+ const char *name, resource_size_t sane_sz)
{
struct resource *r;
resource_size_t sz;
@@ -704,38 +719,19 @@ static void __iomem *bgpio_map(struct platform_device *pdev,
return devm_ioremap_resource(&pdev->dev, r);
}
-static const struct of_device_id bgpio_of_match[] = {
+static const struct of_device_id gpio_mmio_of_match[] = {
{ .compatible = "brcm,bcm6345-gpio" },
{ .compatible = "wd,mbl-gpio" },
{ .compatible = "ni,169445-nand-gpio" },
+ { .compatible = "intel,ixp4xx-expansion-bus-mmio-gpio" },
{ }
};
-MODULE_DEVICE_TABLE(of, bgpio_of_match);
+MODULE_DEVICE_TABLE(of, gpio_mmio_of_match);
-static struct bgpio_pdata *bgpio_parse_fw(struct device *dev, unsigned long *flags)
-{
- struct bgpio_pdata *pdata;
-
- if (!dev_fwnode(dev))
- return NULL;
-
- pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return ERR_PTR(-ENOMEM);
-
- pdata->base = -1;
-
- if (device_is_big_endian(dev))
- *flags |= BGPIOF_BIG_ENDIAN_BYTE_ORDER;
-
- if (device_property_read_bool(dev, "no-output"))
- *flags |= BGPIOF_NO_OUTPUT;
-
- return pdata;
-}
-
-static int bgpio_pdev_probe(struct platform_device *pdev)
+static int gpio_mmio_pdev_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
+ struct gpio_generic_chip *gen_gc;
struct device *dev = &pdev->dev;
struct resource *r;
void __iomem *dat;
@@ -745,18 +741,9 @@ static int bgpio_pdev_probe(struct platform_device *pdev)
void __iomem *dirin;
unsigned long sz;
unsigned long flags = 0;
+ unsigned int base;
int err;
- struct gpio_chip *gc;
- struct bgpio_pdata *pdata;
-
- pdata = bgpio_parse_fw(dev, &flags);
- if (IS_ERR(pdata))
- return PTR_ERR(pdata);
-
- if (!pdata) {
- pdata = dev_get_platdata(dev);
- flags = pdev->id_entry->driver_data;
- }
+ const char *label;
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dat");
if (!r)
@@ -764,69 +751,87 @@ static int bgpio_pdev_probe(struct platform_device *pdev)
sz = resource_size(r);
- dat = bgpio_map(pdev, "dat", sz);
+ dat = gpio_mmio_map(pdev, "dat", sz);
if (IS_ERR(dat))
return PTR_ERR(dat);
- set = bgpio_map(pdev, "set", sz);
+ set = gpio_mmio_map(pdev, "set", sz);
if (IS_ERR(set))
return PTR_ERR(set);
- clr = bgpio_map(pdev, "clr", sz);
+ clr = gpio_mmio_map(pdev, "clr", sz);
if (IS_ERR(clr))
return PTR_ERR(clr);
- dirout = bgpio_map(pdev, "dirout", sz);
+ dirout = gpio_mmio_map(pdev, "dirout", sz);
if (IS_ERR(dirout))
return PTR_ERR(dirout);
- dirin = bgpio_map(pdev, "dirin", sz);
+ dirin = gpio_mmio_map(pdev, "dirin", sz);
if (IS_ERR(dirin))
return PTR_ERR(dirin);
- gc = devm_kzalloc(&pdev->dev, sizeof(*gc), GFP_KERNEL);
- if (!gc)
+ gen_gc = devm_kzalloc(&pdev->dev, sizeof(*gen_gc), GFP_KERNEL);
+ if (!gen_gc)
return -ENOMEM;
- err = bgpio_init(gc, dev, sz, dat, set, clr, dirout, dirin, flags);
+ if (device_is_big_endian(dev))
+ flags |= GPIO_GENERIC_BIG_ENDIAN_BYTE_ORDER;
+
+ if (device_property_read_bool(dev, "no-output"))
+ flags |= GPIO_GENERIC_NO_OUTPUT;
+
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = sz,
+ .dat = dat,
+ .set = set,
+ .clr = clr,
+ .dirout = dirout,
+ .dirin = dirin,
+ .flags = flags,
+ };
+
+ err = gpio_generic_chip_init(gen_gc, &config);
if (err)
return err;
- if (pdata) {
- if (pdata->label)
- gc->label = pdata->label;
- gc->base = pdata->base;
- if (pdata->ngpio > 0)
- gc->ngpio = pdata->ngpio;
- }
+ err = device_property_read_string(dev, "label", &label);
+ if (!err)
+ gen_gc->gc.label = label;
+
+ /*
+ * This property *must not* be used in device-tree sources, it's only
+ * meant to be passed to the driver from board files and MFD core.
+ */
+ err = device_property_read_u32(dev, "gpio-mmio,base", &base);
+ if (!err && base <= INT_MAX)
+ gen_gc->gc.base = base;
- platform_set_drvdata(pdev, gc);
+ platform_set_drvdata(pdev, &gen_gc->gc);
- return devm_gpiochip_add_data(&pdev->dev, gc, NULL);
+ return devm_gpiochip_add_data(&pdev->dev, &gen_gc->gc, NULL);
}
-static const struct platform_device_id bgpio_id_table[] = {
+static const struct platform_device_id gpio_mmio_id_table[] = {
{
.name = "basic-mmio-gpio",
.driver_data = 0,
- }, {
- .name = "basic-mmio-gpio-be",
- .driver_data = BGPIOF_BIG_ENDIAN,
},
{ }
};
-MODULE_DEVICE_TABLE(platform, bgpio_id_table);
+MODULE_DEVICE_TABLE(platform, gpio_mmio_id_table);
-static struct platform_driver bgpio_driver = {
+static struct platform_driver gpio_mmio_driver = {
.driver = {
.name = "basic-mmio-gpio",
- .of_match_table = bgpio_of_match,
+ .of_match_table = gpio_mmio_of_match,
},
- .id_table = bgpio_id_table,
- .probe = bgpio_pdev_probe,
+ .id_table = gpio_mmio_id_table,
+ .probe = gpio_mmio_pdev_probe,
};
-module_platform_driver(bgpio_driver);
+module_platform_driver(gpio_mmio_driver);
#endif /* CONFIG_GPIO_GENERIC_PLATFORM */
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
index 266c0953d914..a7d69f3835c1 100644
--- a/drivers/gpio/gpio-mockup.c
+++ b/drivers/gpio/gpio-mockup.c
@@ -449,9 +449,9 @@ static int gpio_mockup_probe(struct platform_device *pdev)
gc->owner = THIS_MODULE;
gc->parent = dev;
gc->get = gpio_mockup_get;
- gc->set_rv = gpio_mockup_set;
+ gc->set = gpio_mockup_set;
gc->get_multiple = gpio_mockup_get_multiple;
- gc->set_multiple_rv = gpio_mockup_set_multiple;
+ gc->set_multiple = gpio_mockup_set_multiple;
gc->direction_output = gpio_mockup_dirout;
gc->direction_input = gpio_mockup_dirin;
gc->get_direction = gpio_mockup_get_direction;
diff --git a/drivers/gpio/gpio-moxtet.c b/drivers/gpio/gpio-moxtet.c
index 61f9efd6c64f..4eb9f1a2779b 100644
--- a/drivers/gpio/gpio-moxtet.c
+++ b/drivers/gpio/gpio-moxtet.c
@@ -52,15 +52,15 @@ static int moxtet_gpio_get_value(struct gpio_chip *gc, unsigned int offset)
return !!(ret & BIT(offset));
}
-static void moxtet_gpio_set_value(struct gpio_chip *gc, unsigned int offset,
- int val)
+static int moxtet_gpio_set_value(struct gpio_chip *gc, unsigned int offset,
+ int val)
{
struct moxtet_gpio_chip *chip = gpiochip_get_data(gc);
int state;
state = moxtet_device_written(chip->dev);
if (state < 0)
- return;
+ return state;
offset -= MOXTET_GPIO_INPUTS;
@@ -69,7 +69,7 @@ static void moxtet_gpio_set_value(struct gpio_chip *gc, unsigned int offset,
else
state &= ~BIT(offset);
- moxtet_device_write(chip->dev, state);
+ return moxtet_device_write(chip->dev, state);
}
static int moxtet_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
@@ -104,13 +104,11 @@ static int moxtet_gpio_direction_output(struct gpio_chip *gc,
struct moxtet_gpio_chip *chip = gpiochip_get_data(gc);
if (chip->desc->out_mask & BIT(offset))
- moxtet_gpio_set_value(gc, offset, val);
+ return moxtet_gpio_set_value(gc, offset, val);
else if (chip->desc->in_mask & BIT(offset))
return -ENOTSUPP;
- else
- return -EINVAL;
- return 0;
+ return -EINVAL;
}
static int moxtet_gpio_probe(struct device *dev)
diff --git a/drivers/gpio/gpio-mpc5200.c b/drivers/gpio/gpio-mpc5200.c
index 091d96f2d682..00f209157fd0 100644
--- a/drivers/gpio/gpio-mpc5200.c
+++ b/drivers/gpio/gpio-mpc5200.c
@@ -8,7 +8,7 @@
#include <linux/of.h>
#include <linux/kernel.h>
#include <linux/slab.h>
-#include <linux/gpio/legacy-of-mm-gpiochip.h>
+#include <linux/gpio/driver.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/module.h>
@@ -19,7 +19,8 @@
static DEFINE_SPINLOCK(gpio_lock);
struct mpc52xx_gpiochip {
- struct of_mm_gpio_chip mmchip;
+ struct gpio_chip gc;
+ void __iomem *regs;
unsigned int shadow_dvo;
unsigned int shadow_gpioe;
unsigned int shadow_ddr;
@@ -43,8 +44,8 @@ struct mpc52xx_gpiochip {
*/
static int mpc52xx_wkup_gpio_get(struct gpio_chip *gc, unsigned int gpio)
{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
- struct mpc52xx_gpio_wkup __iomem *regs = mm_gc->regs;
+ struct mpc52xx_gpiochip *chip = gpiochip_get_data(gc);
+ struct mpc52xx_gpio_wkup __iomem *regs = chip->regs;
unsigned int ret;
ret = (in_8(&regs->wkup_ival) >> (7 - gpio)) & 1;
@@ -57,9 +58,8 @@ static int mpc52xx_wkup_gpio_get(struct gpio_chip *gc, unsigned int gpio)
static inline void
__mpc52xx_wkup_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
struct mpc52xx_gpiochip *chip = gpiochip_get_data(gc);
- struct mpc52xx_gpio_wkup __iomem *regs = mm_gc->regs;
+ struct mpc52xx_gpio_wkup __iomem *regs = chip->regs;
if (val)
chip->shadow_dvo |= 1 << (7 - gpio);
@@ -69,7 +69,7 @@ __mpc52xx_wkup_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
out_8(&regs->wkup_dvo, chip->shadow_dvo);
}
-static void
+static int
mpc52xx_wkup_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
{
unsigned long flags;
@@ -81,13 +81,14 @@ mpc52xx_wkup_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
spin_unlock_irqrestore(&gpio_lock, flags);
pr_debug("%s: gpio: %d val: %d\n", __func__, gpio, val);
+
+ return 0;
}
static int mpc52xx_wkup_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
struct mpc52xx_gpiochip *chip = gpiochip_get_data(gc);
- struct mpc52xx_gpio_wkup __iomem *regs = mm_gc->regs;
+ struct mpc52xx_gpio_wkup __iomem *regs = chip->regs;
unsigned long flags;
spin_lock_irqsave(&gpio_lock, flags);
@@ -108,9 +109,8 @@ static int mpc52xx_wkup_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
static int
mpc52xx_wkup_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
- struct mpc52xx_gpio_wkup __iomem *regs = mm_gc->regs;
struct mpc52xx_gpiochip *chip = gpiochip_get_data(gc);
+ struct mpc52xx_gpio_wkup __iomem *regs = chip->regs;
unsigned long flags;
spin_lock_irqsave(&gpio_lock, flags);
@@ -134,30 +134,41 @@ mpc52xx_wkup_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
static int mpc52xx_wkup_gpiochip_probe(struct platform_device *ofdev)
{
+ struct device *dev = &ofdev->dev;
+ struct device_node *np = dev->of_node;
struct mpc52xx_gpiochip *chip;
struct mpc52xx_gpio_wkup __iomem *regs;
struct gpio_chip *gc;
int ret;
- chip = devm_kzalloc(&ofdev->dev, sizeof(*chip), GFP_KERNEL);
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
platform_set_drvdata(ofdev, chip);
- gc = &chip->mmchip.gc;
+ gc = &chip->gc;
+ gc->base = -1;
gc->ngpio = 8;
gc->direction_input = mpc52xx_wkup_gpio_dir_in;
gc->direction_output = mpc52xx_wkup_gpio_dir_out;
gc->get = mpc52xx_wkup_gpio_get;
gc->set = mpc52xx_wkup_gpio_set;
- ret = of_mm_gpiochip_add_data(ofdev->dev.of_node, &chip->mmchip, chip);
+ gc->label = devm_kasprintf(dev, GFP_KERNEL, "%pOF", np);
+ if (!gc->label)
+ return -ENOMEM;
+
+ chip->regs = devm_of_iomap(dev, np, 0, NULL);
+ if (IS_ERR(chip->regs))
+ return PTR_ERR(chip->regs);
+
+ ret = devm_gpiochip_add_data(dev, gc, chip);
if (ret)
return ret;
- regs = chip->mmchip.regs;
+ regs = chip->regs;
chip->shadow_gpioe = in_8(&regs->wkup_gpioe);
chip->shadow_ddr = in_8(&regs->wkup_ddr);
chip->shadow_dvo = in_8(&regs->wkup_dvo);
@@ -165,13 +176,6 @@ static int mpc52xx_wkup_gpiochip_probe(struct platform_device *ofdev)
return 0;
}
-static void mpc52xx_gpiochip_remove(struct platform_device *ofdev)
-{
- struct mpc52xx_gpiochip *chip = platform_get_drvdata(ofdev);
-
- of_mm_gpiochip_remove(&chip->mmchip);
-}
-
static const struct of_device_id mpc52xx_wkup_gpiochip_match[] = {
{ .compatible = "fsl,mpc5200-gpio-wkup", },
{}
@@ -183,7 +187,6 @@ static struct platform_driver mpc52xx_wkup_gpiochip_driver = {
.of_match_table = mpc52xx_wkup_gpiochip_match,
},
.probe = mpc52xx_wkup_gpiochip_probe,
- .remove = mpc52xx_gpiochip_remove,
};
/*
@@ -205,8 +208,8 @@ static struct platform_driver mpc52xx_wkup_gpiochip_driver = {
*/
static int mpc52xx_simple_gpio_get(struct gpio_chip *gc, unsigned int gpio)
{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
- struct mpc52xx_gpio __iomem *regs = mm_gc->regs;
+ struct mpc52xx_gpiochip *chip = gpiochip_get_data(gc);
+ struct mpc52xx_gpio __iomem *regs = chip->regs;
unsigned int ret;
ret = (in_be32(&regs->simple_ival) >> (31 - gpio)) & 1;
@@ -217,9 +220,8 @@ static int mpc52xx_simple_gpio_get(struct gpio_chip *gc, unsigned int gpio)
static inline void
__mpc52xx_simple_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
struct mpc52xx_gpiochip *chip = gpiochip_get_data(gc);
- struct mpc52xx_gpio __iomem *regs = mm_gc->regs;
+ struct mpc52xx_gpio __iomem *regs = chip->regs;
if (val)
chip->shadow_dvo |= 1 << (31 - gpio);
@@ -228,7 +230,7 @@ __mpc52xx_simple_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
out_be32(&regs->simple_dvo, chip->shadow_dvo);
}
-static void
+static int
mpc52xx_simple_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
{
unsigned long flags;
@@ -240,13 +242,14 @@ mpc52xx_simple_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
spin_unlock_irqrestore(&gpio_lock, flags);
pr_debug("%s: gpio: %d val: %d\n", __func__, gpio, val);
+
+ return 0;
}
static int mpc52xx_simple_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
struct mpc52xx_gpiochip *chip = gpiochip_get_data(gc);
- struct mpc52xx_gpio __iomem *regs = mm_gc->regs;
+ struct mpc52xx_gpio __iomem *regs = chip->regs;
unsigned long flags;
spin_lock_irqsave(&gpio_lock, flags);
@@ -267,9 +270,8 @@ static int mpc52xx_simple_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
static int
mpc52xx_simple_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
struct mpc52xx_gpiochip *chip = gpiochip_get_data(gc);
- struct mpc52xx_gpio __iomem *regs = mm_gc->regs;
+ struct mpc52xx_gpio __iomem *regs = chip->regs;
unsigned long flags;
spin_lock_irqsave(&gpio_lock, flags);
@@ -294,30 +296,41 @@ mpc52xx_simple_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
static int mpc52xx_simple_gpiochip_probe(struct platform_device *ofdev)
{
+ struct device *dev = &ofdev->dev;
+ struct device_node *np = dev->of_node;
struct mpc52xx_gpiochip *chip;
struct gpio_chip *gc;
struct mpc52xx_gpio __iomem *regs;
int ret;
- chip = devm_kzalloc(&ofdev->dev, sizeof(*chip), GFP_KERNEL);
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
platform_set_drvdata(ofdev, chip);
- gc = &chip->mmchip.gc;
+ gc = &chip->gc;
+ gc->base = -1;
gc->ngpio = 32;
gc->direction_input = mpc52xx_simple_gpio_dir_in;
gc->direction_output = mpc52xx_simple_gpio_dir_out;
gc->get = mpc52xx_simple_gpio_get;
gc->set = mpc52xx_simple_gpio_set;
- ret = of_mm_gpiochip_add_data(ofdev->dev.of_node, &chip->mmchip, chip);
+ gc->label = devm_kasprintf(dev, GFP_KERNEL, "%pOF", np);
+ if (!gc->label)
+ return -ENOMEM;
+
+ chip->regs = devm_of_iomap(dev, np, 0, NULL);
+ if (IS_ERR(chip->regs))
+ return PTR_ERR(chip->regs);
+
+ ret = devm_gpiochip_add_data(dev, gc, chip);
if (ret)
return ret;
- regs = chip->mmchip.regs;
+ regs = chip->regs;
chip->shadow_gpioe = in_be32(&regs->simple_gpioe);
chip->shadow_ddr = in_be32(&regs->simple_ddr);
chip->shadow_dvo = in_be32(&regs->simple_dvo);
@@ -336,7 +349,6 @@ static struct platform_driver mpc52xx_simple_gpiochip_driver = {
.of_match_table = mpc52xx_simple_gpiochip_match,
},
.probe = mpc52xx_simple_gpiochip_probe,
- .remove = mpc52xx_gpiochip_remove,
};
static struct platform_driver * const drivers[] = {
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index 121efdd71e45..bfe828734ee1 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -9,6 +9,7 @@
#include <linux/acpi.h>
#include <linux/bitops.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -34,7 +35,7 @@
#define GPIO_IBE 0x18
struct mpc8xxx_gpio_chip {
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
void __iomem *regs;
raw_spinlock_t lock;
@@ -66,9 +67,11 @@ static int mpc8572_gpio_get(struct gpio_chip *gc, unsigned int gpio)
struct mpc8xxx_gpio_chip *mpc8xxx_gc = gpiochip_get_data(gc);
u32 out_mask, out_shadow;
- out_mask = gc->read_reg(mpc8xxx_gc->regs + GPIO_DIR);
- val = gc->read_reg(mpc8xxx_gc->regs + GPIO_DAT) & ~out_mask;
- out_shadow = gc->bgpio_data & out_mask;
+ out_mask = gpio_generic_read_reg(&mpc8xxx_gc->chip,
+ mpc8xxx_gc->regs + GPIO_DIR);
+ val = gpio_generic_read_reg(&mpc8xxx_gc->chip,
+ mpc8xxx_gc->regs + GPIO_DAT) & ~out_mask;
+ out_shadow = mpc8xxx_gc->chip.sdata & out_mask;
return !!((val | out_shadow) & mpc_pin2mask(gpio));
}
@@ -108,12 +111,13 @@ static int mpc8xxx_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
static irqreturn_t mpc8xxx_gpio_irq_cascade(int irq, void *data)
{
struct mpc8xxx_gpio_chip *mpc8xxx_gc = data;
- struct gpio_chip *gc = &mpc8xxx_gc->gc;
unsigned long mask;
int i;
- mask = gc->read_reg(mpc8xxx_gc->regs + GPIO_IER)
- & gc->read_reg(mpc8xxx_gc->regs + GPIO_IMR);
+ mask = gpio_generic_read_reg(&mpc8xxx_gc->chip,
+ mpc8xxx_gc->regs + GPIO_IER) &
+ gpio_generic_read_reg(&mpc8xxx_gc->chip,
+ mpc8xxx_gc->regs + GPIO_IMR);
for_each_set_bit(i, &mask, 32)
generic_handle_domain_irq(mpc8xxx_gc->irq, 31 - i);
@@ -124,15 +128,17 @@ static void mpc8xxx_irq_unmask(struct irq_data *d)
{
struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d);
irq_hw_number_t hwirq = irqd_to_hwirq(d);
- struct gpio_chip *gc = &mpc8xxx_gc->gc;
+ struct gpio_chip *gc = &mpc8xxx_gc->chip.gc;
unsigned long flags;
gpiochip_enable_irq(gc, hwirq);
raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
- gc->write_reg(mpc8xxx_gc->regs + GPIO_IMR,
- gc->read_reg(mpc8xxx_gc->regs + GPIO_IMR)
+ gpio_generic_write_reg(&mpc8xxx_gc->chip,
+ mpc8xxx_gc->regs + GPIO_IMR,
+ gpio_generic_read_reg(&mpc8xxx_gc->chip,
+ mpc8xxx_gc->regs + GPIO_IMR)
| mpc_pin2mask(irqd_to_hwirq(d)));
raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
@@ -142,13 +148,14 @@ static void mpc8xxx_irq_mask(struct irq_data *d)
{
struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d);
irq_hw_number_t hwirq = irqd_to_hwirq(d);
- struct gpio_chip *gc = &mpc8xxx_gc->gc;
+ struct gpio_chip *gc = &mpc8xxx_gc->chip.gc;
unsigned long flags;
raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
- gc->write_reg(mpc8xxx_gc->regs + GPIO_IMR,
- gc->read_reg(mpc8xxx_gc->regs + GPIO_IMR)
+ gpio_generic_write_reg(&mpc8xxx_gc->chip, mpc8xxx_gc->regs + GPIO_IMR,
+ gpio_generic_read_reg(&mpc8xxx_gc->chip,
+ mpc8xxx_gc->regs + GPIO_IMR)
& ~mpc_pin2mask(irqd_to_hwirq(d)));
raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
@@ -159,32 +166,34 @@ static void mpc8xxx_irq_mask(struct irq_data *d)
static void mpc8xxx_irq_ack(struct irq_data *d)
{
struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d);
- struct gpio_chip *gc = &mpc8xxx_gc->gc;
- gc->write_reg(mpc8xxx_gc->regs + GPIO_IER,
+ gpio_generic_write_reg(&mpc8xxx_gc->chip, mpc8xxx_gc->regs + GPIO_IER,
mpc_pin2mask(irqd_to_hwirq(d)));
}
static int mpc8xxx_irq_set_type(struct irq_data *d, unsigned int flow_type)
{
struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d);
- struct gpio_chip *gc = &mpc8xxx_gc->gc;
unsigned long flags;
switch (flow_type) {
case IRQ_TYPE_EDGE_FALLING:
case IRQ_TYPE_LEVEL_LOW:
raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
- gc->write_reg(mpc8xxx_gc->regs + GPIO_ICR,
- gc->read_reg(mpc8xxx_gc->regs + GPIO_ICR)
+ gpio_generic_write_reg(&mpc8xxx_gc->chip,
+ mpc8xxx_gc->regs + GPIO_ICR,
+ gpio_generic_read_reg(&mpc8xxx_gc->chip,
+ mpc8xxx_gc->regs + GPIO_ICR)
| mpc_pin2mask(irqd_to_hwirq(d)));
raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
break;
case IRQ_TYPE_EDGE_BOTH:
raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
- gc->write_reg(mpc8xxx_gc->regs + GPIO_ICR,
- gc->read_reg(mpc8xxx_gc->regs + GPIO_ICR)
+ gpio_generic_write_reg(&mpc8xxx_gc->chip,
+ mpc8xxx_gc->regs + GPIO_ICR,
+ gpio_generic_read_reg(&mpc8xxx_gc->chip,
+ mpc8xxx_gc->regs + GPIO_ICR)
& ~mpc_pin2mask(irqd_to_hwirq(d)));
raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
break;
@@ -199,7 +208,6 @@ static int mpc8xxx_irq_set_type(struct irq_data *d, unsigned int flow_type)
static int mpc512x_irq_set_type(struct irq_data *d, unsigned int flow_type)
{
struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d);
- struct gpio_chip *gc = &mpc8xxx_gc->gc;
unsigned long gpio = irqd_to_hwirq(d);
void __iomem *reg;
unsigned int shift;
@@ -217,7 +225,9 @@ static int mpc512x_irq_set_type(struct irq_data *d, unsigned int flow_type)
case IRQ_TYPE_EDGE_FALLING:
case IRQ_TYPE_LEVEL_LOW:
raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
- gc->write_reg(reg, (gc->read_reg(reg) & ~(3 << shift))
+ gpio_generic_write_reg(&mpc8xxx_gc->chip, reg,
+ (gpio_generic_read_reg(&mpc8xxx_gc->chip,
+ reg) & ~(3 << shift))
| (2 << shift));
raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
break;
@@ -225,14 +235,18 @@ static int mpc512x_irq_set_type(struct irq_data *d, unsigned int flow_type)
case IRQ_TYPE_EDGE_RISING:
case IRQ_TYPE_LEVEL_HIGH:
raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
- gc->write_reg(reg, (gc->read_reg(reg) & ~(3 << shift))
+ gpio_generic_write_reg(&mpc8xxx_gc->chip, reg,
+ (gpio_generic_read_reg(&mpc8xxx_gc->chip,
+ reg) & ~(3 << shift))
| (1 << shift));
raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
break;
case IRQ_TYPE_EDGE_BOTH:
raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
- gc->write_reg(reg, (gc->read_reg(reg) & ~(3 << shift)));
+ gpio_generic_write_reg(&mpc8xxx_gc->chip, reg,
+ (gpio_generic_read_reg(&mpc8xxx_gc->chip,
+ reg) & ~(3 << shift)));
raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
break;
@@ -309,6 +323,7 @@ static const struct of_device_id mpc8xxx_gpio_ids[] = {
static int mpc8xxx_probe(struct platform_device *pdev)
{
const struct mpc8xxx_gpio_devtype *devtype = NULL;
+ struct gpio_generic_chip_config config;
struct mpc8xxx_gpio_chip *mpc8xxx_gc;
struct device *dev = &pdev->dev;
struct fwnode_handle *fwnode;
@@ -327,26 +342,28 @@ static int mpc8xxx_probe(struct platform_device *pdev)
if (IS_ERR(mpc8xxx_gc->regs))
return PTR_ERR(mpc8xxx_gc->regs);
- gc = &mpc8xxx_gc->gc;
+ gc = &mpc8xxx_gc->chip.gc;
gc->parent = dev;
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = mpc8xxx_gc->regs + GPIO_DAT,
+ .dirout = mpc8xxx_gc->regs + GPIO_DIR,
+ .flags = GPIO_GENERIC_BIG_ENDIAN
+ };
+
if (device_property_read_bool(dev, "little-endian")) {
- ret = bgpio_init(gc, dev, 4, mpc8xxx_gc->regs + GPIO_DAT,
- NULL, NULL, mpc8xxx_gc->regs + GPIO_DIR,
- NULL, BGPIOF_BIG_ENDIAN);
- if (ret)
- return ret;
dev_dbg(dev, "GPIO registers are LITTLE endian\n");
} else {
- ret = bgpio_init(gc, dev, 4, mpc8xxx_gc->regs + GPIO_DAT,
- NULL, NULL, mpc8xxx_gc->regs + GPIO_DIR,
- NULL, BGPIOF_BIG_ENDIAN
- | BGPIOF_BIG_ENDIAN_BYTE_ORDER);
- if (ret)
- return ret;
+ config.flags |= GPIO_GENERIC_BIG_ENDIAN_BYTE_ORDER;
dev_dbg(dev, "GPIO registers are BIG endian\n");
}
+ ret = gpio_generic_chip_init(&mpc8xxx_gc->chip, &config);
+ if (ret)
+ return ret;
+
mpc8xxx_gc->direction_output = gc->direction_output;
devtype = device_get_match_data(dev);
@@ -379,10 +396,14 @@ static int mpc8xxx_probe(struct platform_device *pdev)
device_is_compatible(dev, "fsl,ls1028a-gpio") ||
device_is_compatible(dev, "fsl,ls1088a-gpio") ||
is_acpi_node(fwnode)) {
- gc->write_reg(mpc8xxx_gc->regs + GPIO_IBE, 0xffffffff);
+ gpio_generic_write_reg(&mpc8xxx_gc->chip,
+ mpc8xxx_gc->regs + GPIO_IBE, 0xffffffff);
/* Also, latch state of GPIOs configured as output by bootloader. */
- gc->bgpio_data = gc->read_reg(mpc8xxx_gc->regs + GPIO_DAT) &
- gc->read_reg(mpc8xxx_gc->regs + GPIO_DIR);
+ mpc8xxx_gc->chip.sdata =
+ gpio_generic_read_reg(&mpc8xxx_gc->chip,
+ mpc8xxx_gc->regs + GPIO_DAT) &
+ gpio_generic_read_reg(&mpc8xxx_gc->chip,
+ mpc8xxx_gc->regs + GPIO_DIR);
}
ret = devm_gpiochip_add_data(dev, gc, mpc8xxx_gc);
@@ -405,8 +426,10 @@ static int mpc8xxx_probe(struct platform_device *pdev)
return 0;
/* ack and mask all irqs */
- gc->write_reg(mpc8xxx_gc->regs + GPIO_IER, 0xffffffff);
- gc->write_reg(mpc8xxx_gc->regs + GPIO_IMR, 0);
+ gpio_generic_write_reg(&mpc8xxx_gc->chip,
+ mpc8xxx_gc->regs + GPIO_IER, 0xffffffff);
+ gpio_generic_write_reg(&mpc8xxx_gc->chip,
+ mpc8xxx_gc->regs + GPIO_IMR, 0);
ret = devm_request_irq(dev, mpc8xxx_gc->irqn,
mpc8xxx_gpio_irq_cascade,
diff --git a/drivers/gpio/gpio-mpfs.c b/drivers/gpio/gpio-mpfs.c
index 561a961c97a6..9468795b9634 100644
--- a/drivers/gpio/gpio-mpfs.c
+++ b/drivers/gpio/gpio-mpfs.c
@@ -69,7 +69,7 @@ static int mpfs_gpio_direction_output(struct gpio_chip *gc, unsigned int gpio_in
struct mpfs_gpio_chip *mpfs_gpio = gpiochip_get_data(gc);
regmap_update_bits(mpfs_gpio->regs, MPFS_GPIO_CTRL(gpio_index),
- MPFS_GPIO_DIR_MASK, MPFS_GPIO_EN_IN);
+ MPFS_GPIO_DIR_MASK, MPFS_GPIO_EN_OUT | MPFS_GPIO_EN_OUT_BUF);
regmap_update_bits(mpfs_gpio->regs, mpfs_gpio->offsets->outp, BIT(gpio_index),
value << gpio_index);
@@ -99,16 +99,19 @@ static int mpfs_gpio_get(struct gpio_chip *gc, unsigned int gpio_index)
return regmap_test_bits(mpfs_gpio->regs, mpfs_gpio->offsets->inp, BIT(gpio_index));
}
-static void mpfs_gpio_set(struct gpio_chip *gc, unsigned int gpio_index, int value)
+static int mpfs_gpio_set(struct gpio_chip *gc, unsigned int gpio_index, int value)
{
struct mpfs_gpio_chip *mpfs_gpio = gpiochip_get_data(gc);
+ int ret;
mpfs_gpio_get(gc, gpio_index);
- regmap_update_bits(mpfs_gpio->regs, mpfs_gpio->offsets->outp, BIT(gpio_index),
- value << gpio_index);
+ ret = regmap_update_bits(mpfs_gpio->regs, mpfs_gpio->offsets->outp,
+ BIT(gpio_index), value << gpio_index);
mpfs_gpio_get(gc, gpio_index);
+
+ return ret;
}
static int mpfs_gpio_probe(struct platform_device *pdev)
diff --git a/drivers/gpio/gpio-mpsse.c b/drivers/gpio/gpio-mpsse.c
index 3ea32c5e33d1..ace652ba4df1 100644
--- a/drivers/gpio/gpio-mpsse.c
+++ b/drivers/gpio/gpio-mpsse.c
@@ -10,6 +10,7 @@
#include <linux/cleanup.h>
#include <linux/gpio/driver.h>
#include <linux/mutex.h>
+#include <linux/spinlock.h>
#include <linux/usb.h>
struct mpsse_priv {
@@ -17,8 +18,10 @@ struct mpsse_priv {
struct usb_device *udev; /* USB device encompassing all MPSSEs */
struct usb_interface *intf; /* USB interface for this MPSSE */
u8 intf_id; /* USB interface number for this MPSSE */
- struct work_struct irq_work; /* polling work thread */
+ struct list_head workers; /* polling work threads */
struct mutex irq_mutex; /* lock over irq_data */
+ struct mutex irq_race; /* race for polling worker teardown */
+ raw_spinlock_t irq_spin; /* protects worker list */
atomic_t irq_type[16]; /* pin -> edge detection type */
atomic_t irq_enabled;
int id;
@@ -26,6 +29,9 @@ struct mpsse_priv {
u8 gpio_outputs[2]; /* Output states for GPIOs [L, H] */
u8 gpio_dir[2]; /* Directions for GPIOs [L, H] */
+ unsigned long dir_in; /* Bitmask of valid input pins */
+ unsigned long dir_out; /* Bitmask of valid output pins */
+
u8 *bulk_in_buf; /* Extra recv buffer to grab status bytes */
struct usb_endpoint_descriptor *bulk_in;
@@ -34,6 +40,14 @@ struct mpsse_priv {
struct mutex io_mutex; /* sync I/O with disconnect */
};
+struct mpsse_worker {
+ struct mpsse_priv *priv;
+ struct work_struct work;
+ atomic_t cancelled;
+ struct list_head list; /* linked list */
+ struct list_head destroy; /* teardown linked list */
+};
+
struct bulk_desc {
bool tx; /* direction of bulk transfer */
u8 *data; /* input (tx) or output (rx) */
@@ -43,8 +57,27 @@ struct bulk_desc {
int timeout;
};
+#define MPSSE_NGPIO 16
+
+struct mpsse_quirk {
+ const char *names[MPSSE_NGPIO]; /* Pin names, if applicable */
+ unsigned long dir_in; /* Bitmask of valid input pins */
+ unsigned long dir_out; /* Bitmask of valid output pins */
+};
+
+static struct mpsse_quirk bryx_brik_quirk = {
+ .names = {
+ [3] = "Push to Talk",
+ [5] = "Channel Activity",
+ },
+ .dir_out = BIT(3), /* Push to Talk */
+ .dir_in = BIT(5), /* Channel Activity */
+};
+
static const struct usb_device_id gpio_mpsse_table[] = {
{ USB_DEVICE(0x0c52, 0xa064) }, /* SeaLevel Systems, Inc. */
+ { USB_DEVICE(0x0403, 0x6988), /* FTDI, assigned to Bryx */
+ .driver_info = (kernel_ulong_t)&bryx_brik_quirk},
{ } /* Terminating entry */
};
@@ -160,13 +193,43 @@ static int gpio_mpsse_get_bank(struct mpsse_priv *priv, u8 bank)
return buf;
}
-static void gpio_mpsse_set_multiple(struct gpio_chip *chip, unsigned long *mask,
- unsigned long *bits)
+static int mpsse_ensure_supported(struct gpio_chip *chip,
+ unsigned long mask, int direction)
+{
+ unsigned long supported, unsupported;
+ char *type = "input";
+ struct mpsse_priv *priv = gpiochip_get_data(chip);
+
+ supported = priv->dir_in;
+ if (direction == GPIO_LINE_DIRECTION_OUT) {
+ supported = priv->dir_out;
+ type = "output";
+ }
+
+ /* An invalid bit was in the provided mask */
+ unsupported = mask & ~supported;
+ if (unsupported) {
+ dev_err(&priv->udev->dev,
+ "mpsse: GPIO %lu doesn't support %s\n",
+ find_first_bit(&unsupported, sizeof(unsupported) * 8),
+ type);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int gpio_mpsse_set_multiple(struct gpio_chip *chip, unsigned long *mask,
+ unsigned long *bits)
{
unsigned long i, bank, bank_mask, bank_bits;
int ret;
struct mpsse_priv *priv = gpiochip_get_data(chip);
+ ret = mpsse_ensure_supported(chip, *mask, GPIO_LINE_DIRECTION_OUT);
+ if (ret)
+ return ret;
+
guard(mutex)(&priv->io_mutex);
for_each_set_clump8(i, bank_mask, mask, chip->ngpio) {
bank = i / 8;
@@ -180,11 +243,11 @@ static void gpio_mpsse_set_multiple(struct gpio_chip *chip, unsigned long *mask,
ret = gpio_mpsse_set_bank(priv, bank);
if (ret)
- dev_err(&priv->intf->dev,
- "Couldn't set values for bank %ld!",
- bank);
+ return ret;
}
}
+
+ return 0;
}
static int gpio_mpsse_get_multiple(struct gpio_chip *chip, unsigned long *mask,
@@ -194,6 +257,10 @@ static int gpio_mpsse_get_multiple(struct gpio_chip *chip, unsigned long *mask,
int ret;
struct mpsse_priv *priv = gpiochip_get_data(chip);
+ ret = mpsse_ensure_supported(chip, *mask, GPIO_LINE_DIRECTION_IN);
+ if (ret)
+ return ret;
+
guard(mutex)(&priv->io_mutex);
for_each_set_clump8(i, bank_mask, mask, chip->ngpio) {
bank = i / 8;
@@ -227,7 +294,7 @@ static int gpio_mpsse_gpio_get(struct gpio_chip *chip, unsigned int offset)
return 0;
}
-static void gpio_mpsse_gpio_set(struct gpio_chip *chip, unsigned int offset,
+static int gpio_mpsse_gpio_set(struct gpio_chip *chip, unsigned int offset,
int value)
{
unsigned long mask = 0, bits = 0;
@@ -236,36 +303,43 @@ static void gpio_mpsse_gpio_set(struct gpio_chip *chip, unsigned int offset,
if (value)
__set_bit(offset, &bits);
- gpio_mpsse_set_multiple(chip, &mask, &bits);
+ return gpio_mpsse_set_multiple(chip, &mask, &bits);
}
static int gpio_mpsse_direction_output(struct gpio_chip *chip,
unsigned int offset, int value)
{
+ int ret;
struct mpsse_priv *priv = gpiochip_get_data(chip);
int bank = (offset & 8) >> 3;
int bank_offset = offset & 7;
+ ret = mpsse_ensure_supported(chip, BIT(offset), GPIO_LINE_DIRECTION_OUT);
+ if (ret)
+ return ret;
+
scoped_guard(mutex, &priv->io_mutex)
priv->gpio_dir[bank] |= BIT(bank_offset);
- gpio_mpsse_gpio_set(chip, offset, value);
-
- return 0;
+ return gpio_mpsse_gpio_set(chip, offset, value);
}
static int gpio_mpsse_direction_input(struct gpio_chip *chip,
unsigned int offset)
{
+ int ret;
struct mpsse_priv *priv = gpiochip_get_data(chip);
int bank = (offset & 8) >> 3;
int bank_offset = offset & 7;
+ ret = mpsse_ensure_supported(chip, BIT(offset), GPIO_LINE_DIRECTION_IN);
+ if (ret)
+ return ret;
+
guard(mutex)(&priv->io_mutex);
priv->gpio_dir[bank] &= ~BIT(bank_offset);
- gpio_mpsse_set_bank(priv, bank);
- return 0;
+ return gpio_mpsse_set_bank(priv, bank);
}
static int gpio_mpsse_get_direction(struct gpio_chip *chip,
@@ -286,18 +360,62 @@ static int gpio_mpsse_get_direction(struct gpio_chip *chip,
return ret;
}
-static void gpio_mpsse_poll(struct work_struct *work)
+/*
+ * Stops all workers except `my_worker`.
+ * Safe to call only when `irq_race` is held.
+ */
+static void gpio_mpsse_stop_all_except(struct mpsse_priv *priv,
+ struct mpsse_worker *my_worker)
+{
+ struct mpsse_worker *worker, *worker_tmp;
+ struct list_head destructors = LIST_HEAD_INIT(destructors);
+
+ scoped_guard(raw_spinlock_irqsave, &priv->irq_spin) {
+ list_for_each_entry_safe(worker, worker_tmp,
+ &priv->workers, list) {
+ /* Don't stop ourselves */
+ if (worker == my_worker)
+ continue;
+
+ list_del(&worker->list);
+
+ /* Give worker a chance to terminate itself */
+ atomic_set(&worker->cancelled, 1);
+ /* Keep track of stuff to cancel */
+ INIT_LIST_HEAD(&worker->destroy);
+ list_add(&worker->destroy, &destructors);
+ }
+ }
+
+ list_for_each_entry_safe(worker, worker_tmp,
+ &destructors, destroy) {
+ list_del(&worker->destroy);
+ cancel_work_sync(&worker->work);
+ kfree(worker);
+ }
+}
+
+static void gpio_mpsse_poll(struct work_struct *my_work)
{
unsigned long pin_mask, pin_states, flags;
int irq_enabled, offset, err, value, fire_irq,
irq, old_value[16], irq_type[16];
- struct mpsse_priv *priv = container_of(work, struct mpsse_priv,
- irq_work);
+ struct mpsse_worker *my_worker = container_of(my_work, struct mpsse_worker, work);
+ struct mpsse_priv *priv = my_worker->priv;
for (offset = 0; offset < priv->gpio.ngpio; ++offset)
old_value[offset] = -1;
- while ((irq_enabled = atomic_read(&priv->irq_enabled))) {
+ /*
+ * We only want one worker. Workers race to acquire irq_race and tear
+ * down all other workers. This is a cond guard so that we don't deadlock
+ * trying to cancel a worker.
+ */
+ scoped_cond_guard(mutex_try, return, &priv->irq_race)
+ gpio_mpsse_stop_all_except(priv, my_worker);
+
+ while ((irq_enabled = atomic_read(&priv->irq_enabled)) &&
+ !atomic_read(&my_worker->cancelled)) {
usleep_range(MPSSE_POLL_INTERVAL, MPSSE_POLL_INTERVAL + 1000);
/* Cleanup will trigger at the end of the loop */
guard(mutex)(&priv->irq_mutex);
@@ -372,21 +490,45 @@ static int gpio_mpsse_set_irq_type(struct irq_data *irqd, unsigned int type)
static void gpio_mpsse_irq_disable(struct irq_data *irqd)
{
+ struct mpsse_worker *worker;
struct mpsse_priv *priv = irq_data_get_irq_chip_data(irqd);
atomic_and(~BIT(irqd->hwirq), &priv->irq_enabled);
gpiochip_disable_irq(&priv->gpio, irqd->hwirq);
+
+ /*
+ * Can't actually do teardown in IRQ context (it blocks).
+ * As a result, these workers will stick around until irq is reenabled
+ * or device gets disconnected
+ */
+ scoped_guard(raw_spinlock_irqsave, &priv->irq_spin)
+ list_for_each_entry(worker, &priv->workers, list)
+ atomic_set(&worker->cancelled, 1);
}
static void gpio_mpsse_irq_enable(struct irq_data *irqd)
{
+ struct mpsse_worker *worker;
struct mpsse_priv *priv = irq_data_get_irq_chip_data(irqd);
gpiochip_enable_irq(&priv->gpio, irqd->hwirq);
/* If no-one else was using the IRQ, enable it */
if (!atomic_fetch_or(BIT(irqd->hwirq), &priv->irq_enabled)) {
- INIT_WORK(&priv->irq_work, gpio_mpsse_poll);
- schedule_work(&priv->irq_work);
+ /*
+ * Can't be devm because it uses a non-raw spinlock (illegal in
+ * this context, where a raw spinlock is held by our caller)
+ */
+ worker = kzalloc(sizeof(*worker), GFP_NOWAIT);
+ if (!worker)
+ return;
+
+ worker->priv = priv;
+ INIT_LIST_HEAD(&worker->list);
+ INIT_WORK(&worker->work, gpio_mpsse_poll);
+ schedule_work(&worker->work);
+
+ scoped_guard(raw_spinlock_irqsave, &priv->irq_spin)
+ list_add(&worker->list, &priv->workers);
}
}
@@ -406,18 +548,49 @@ static void gpio_mpsse_ida_remove(void *data)
ida_free(&gpio_mpsse_ida, priv->id);
}
+static int mpsse_init_valid_mask(struct gpio_chip *chip,
+ unsigned long *valid_mask,
+ unsigned int ngpios)
+{
+ struct mpsse_priv *priv = gpiochip_get_data(chip);
+
+ if (WARN_ON(priv == NULL))
+ return -ENODEV;
+
+ *valid_mask = priv->dir_in | priv->dir_out;
+
+ return 0;
+}
+
+static void mpsse_irq_init_valid_mask(struct gpio_chip *chip,
+ unsigned long *valid_mask,
+ unsigned int ngpios)
+{
+ struct mpsse_priv *priv = gpiochip_get_data(chip);
+
+ if (WARN_ON(priv == NULL))
+ return;
+
+ /* Can only use IRQ on input capable pins */
+ *valid_mask = priv->dir_in;
+}
+
static int gpio_mpsse_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
struct mpsse_priv *priv;
struct device *dev;
+ char *serial;
int err;
+ struct mpsse_quirk *quirk = (void *)id->driver_info;
dev = &interface->dev;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
+ INIT_LIST_HEAD(&priv->workers);
+
priv->udev = usb_get_dev(interface_to_usbdev(interface));
priv->intf = interface;
priv->intf_id = interface->cur_altsetting->desc.bInterfaceNumber;
@@ -438,9 +611,21 @@ static int gpio_mpsse_probe(struct usb_interface *interface,
if (err)
return err;
+ err = devm_mutex_init(dev, &priv->irq_race);
+ if (err)
+ return err;
+
+ raw_spin_lock_init(&priv->irq_spin);
+
+ serial = priv->udev->serial;
+ if (!serial)
+ serial = "NONE";
+
priv->gpio.label = devm_kasprintf(dev, GFP_KERNEL,
- "gpio-mpsse.%d.%d",
- priv->id, priv->intf_id);
+ "MPSSE%04x:%04x.%d.%d.%s",
+ id->idVendor, id->idProduct,
+ priv->intf_id, priv->id,
+ serial);
if (!priv->gpio.label)
return -ENOMEM;
@@ -454,10 +639,20 @@ static int gpio_mpsse_probe(struct usb_interface *interface,
priv->gpio.get_multiple = gpio_mpsse_get_multiple;
priv->gpio.set_multiple = gpio_mpsse_set_multiple;
priv->gpio.base = -1;
- priv->gpio.ngpio = 16;
+ priv->gpio.ngpio = MPSSE_NGPIO;
priv->gpio.offset = priv->intf_id * priv->gpio.ngpio;
priv->gpio.can_sleep = 1;
+ if (quirk) {
+ priv->dir_out = quirk->dir_out;
+ priv->dir_in = quirk->dir_in;
+ priv->gpio.names = quirk->names;
+ priv->gpio.init_valid_mask = mpsse_init_valid_mask;
+ } else {
+ priv->dir_in = U16_MAX;
+ priv->dir_out = U16_MAX;
+ }
+
err = usb_find_common_endpoints(interface->cur_altsetting,
&priv->bulk_in, &priv->bulk_out,
NULL, NULL);
@@ -496,6 +691,7 @@ static int gpio_mpsse_probe(struct usb_interface *interface,
priv->gpio.irq.parents = NULL;
priv->gpio.irq.default_type = IRQ_TYPE_NONE;
priv->gpio.irq.handler = handle_simple_irq;
+ priv->gpio.irq.init_valid_mask = mpsse_irq_init_valid_mask;
err = devm_gpiochip_add_data(dev, &priv->gpio, priv);
if (err)
@@ -508,6 +704,13 @@ static void gpio_mpsse_disconnect(struct usb_interface *intf)
{
struct mpsse_priv *priv = usb_get_intfdata(intf);
+ /*
+ * Lock prevents double-free of worker from here and the teardown
+ * step at the beginning of gpio_mpsse_poll
+ */
+ scoped_guard(mutex, &priv->irq_race)
+ gpio_mpsse_stop_all_except(priv, NULL);
+
priv->intf = NULL;
usb_set_intfdata(intf, NULL);
usb_put_dev(priv->udev);
diff --git a/drivers/gpio/gpio-msc313.c b/drivers/gpio/gpio-msc313.c
index 6db9e469e0dc..7345afdc78de 100644
--- a/drivers/gpio/gpio-msc313.c
+++ b/drivers/gpio/gpio-msc313.c
@@ -486,7 +486,7 @@ struct msc313_gpio {
u8 *saved;
};
-static void msc313_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
+static int msc313_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct msc313_gpio *gpio = gpiochip_get_data(chip);
u8 gpioreg = readb_relaxed(gpio->base + gpio->gpio_data->offsets[offset]);
@@ -497,6 +497,8 @@ static void msc313_gpio_set(struct gpio_chip *chip, unsigned int offset, int val
gpioreg &= ~MSC313_GPIO_OUT;
writeb_relaxed(gpioreg, gpio->base + gpio->gpio_data->offsets[offset]);
+
+ return 0;
}
static int msc313_gpio_get(struct gpio_chip *chip, unsigned int offset)
@@ -692,7 +694,7 @@ static const struct of_device_id msc313_gpio_of_match[] = {
* SoC goes into suspend to memory mode so we need to save some
* of the register bits before suspending and put it back when resuming
*/
-static int __maybe_unused msc313_gpio_suspend(struct device *dev)
+static int msc313_gpio_suspend(struct device *dev)
{
struct msc313_gpio *gpio = dev_get_drvdata(dev);
int i;
@@ -703,7 +705,7 @@ static int __maybe_unused msc313_gpio_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused msc313_gpio_resume(struct device *dev)
+static int msc313_gpio_resume(struct device *dev)
{
struct msc313_gpio *gpio = dev_get_drvdata(dev);
int i;
@@ -714,13 +716,13 @@ static int __maybe_unused msc313_gpio_resume(struct device *dev)
return 0;
}
-static SIMPLE_DEV_PM_OPS(msc313_gpio_ops, msc313_gpio_suspend, msc313_gpio_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(msc313_gpio_ops, msc313_gpio_suspend, msc313_gpio_resume);
static struct platform_driver msc313_gpio_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = msc313_gpio_of_match,
- .pm = &msc313_gpio_ops,
+ .pm = pm_sleep_ptr(&msc313_gpio_ops),
},
.probe = msc313_gpio_probe,
};
diff --git a/drivers/gpio/gpio-mt7621.c b/drivers/gpio/gpio-mt7621.c
index 93facbebb80e..91230be51587 100644
--- a/drivers/gpio/gpio-mt7621.c
+++ b/drivers/gpio/gpio-mt7621.c
@@ -6,11 +6,11 @@
#include <linux/err.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/spinlock.h>
#define MTK_BANK_CNT 3
#define MTK_BANK_WIDTH 32
@@ -30,8 +30,7 @@
struct mtk_gc {
struct irq_chip irq_chip;
- struct gpio_chip chip;
- spinlock_t lock;
+ struct gpio_generic_chip chip;
int bank;
u32 rising;
u32 falling;
@@ -59,27 +58,29 @@ struct mtk {
static inline struct mtk_gc *
to_mediatek_gpio(struct gpio_chip *chip)
{
- return container_of(chip, struct mtk_gc, chip);
+ struct gpio_generic_chip *gen_gc = to_gpio_generic_chip(chip);
+
+ return container_of(gen_gc, struct mtk_gc, chip);
}
static inline void
mtk_gpio_w32(struct mtk_gc *rg, u32 offset, u32 val)
{
- struct gpio_chip *gc = &rg->chip;
+ struct gpio_chip *gc = &rg->chip.gc;
struct mtk *mtk = gpiochip_get_data(gc);
offset = (rg->bank * GPIO_BANK_STRIDE) + offset;
- gc->write_reg(mtk->base + offset, val);
+ gpio_generic_write_reg(&rg->chip, mtk->base + offset, val);
}
static inline u32
mtk_gpio_r32(struct mtk_gc *rg, u32 offset)
{
- struct gpio_chip *gc = &rg->chip;
+ struct gpio_chip *gc = &rg->chip.gc;
struct mtk *mtk = gpiochip_get_data(gc);
offset = (rg->bank * GPIO_BANK_STRIDE) + offset;
- return gc->read_reg(mtk->base + offset);
+ return gpio_generic_read_reg(&rg->chip, mtk->base + offset);
}
static irqreturn_t
@@ -108,12 +109,12 @@ mediatek_gpio_irq_unmask(struct irq_data *d)
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct mtk_gc *rg = to_mediatek_gpio(gc);
int pin = d->hwirq;
- unsigned long flags;
u32 rise, fall, high, low;
gpiochip_enable_irq(gc, d->hwirq);
- spin_lock_irqsave(&rg->lock, flags);
+ guard(gpio_generic_lock_irqsave)(&rg->chip);
+
rise = mtk_gpio_r32(rg, GPIO_REG_REDGE);
fall = mtk_gpio_r32(rg, GPIO_REG_FEDGE);
high = mtk_gpio_r32(rg, GPIO_REG_HLVL);
@@ -122,7 +123,6 @@ mediatek_gpio_irq_unmask(struct irq_data *d)
mtk_gpio_w32(rg, GPIO_REG_FEDGE, fall | (BIT(pin) & rg->falling));
mtk_gpio_w32(rg, GPIO_REG_HLVL, high | (BIT(pin) & rg->hlevel));
mtk_gpio_w32(rg, GPIO_REG_LLVL, low | (BIT(pin) & rg->llevel));
- spin_unlock_irqrestore(&rg->lock, flags);
}
static void
@@ -131,19 +131,18 @@ mediatek_gpio_irq_mask(struct irq_data *d)
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct mtk_gc *rg = to_mediatek_gpio(gc);
int pin = d->hwirq;
- unsigned long flags;
u32 rise, fall, high, low;
- spin_lock_irqsave(&rg->lock, flags);
- rise = mtk_gpio_r32(rg, GPIO_REG_REDGE);
- fall = mtk_gpio_r32(rg, GPIO_REG_FEDGE);
- high = mtk_gpio_r32(rg, GPIO_REG_HLVL);
- low = mtk_gpio_r32(rg, GPIO_REG_LLVL);
- mtk_gpio_w32(rg, GPIO_REG_FEDGE, fall & ~BIT(pin));
- mtk_gpio_w32(rg, GPIO_REG_REDGE, rise & ~BIT(pin));
- mtk_gpio_w32(rg, GPIO_REG_HLVL, high & ~BIT(pin));
- mtk_gpio_w32(rg, GPIO_REG_LLVL, low & ~BIT(pin));
- spin_unlock_irqrestore(&rg->lock, flags);
+ scoped_guard(gpio_generic_lock_irqsave, &rg->chip) {
+ rise = mtk_gpio_r32(rg, GPIO_REG_REDGE);
+ fall = mtk_gpio_r32(rg, GPIO_REG_FEDGE);
+ high = mtk_gpio_r32(rg, GPIO_REG_HLVL);
+ low = mtk_gpio_r32(rg, GPIO_REG_LLVL);
+ mtk_gpio_w32(rg, GPIO_REG_FEDGE, fall & ~BIT(pin));
+ mtk_gpio_w32(rg, GPIO_REG_REDGE, rise & ~BIT(pin));
+ mtk_gpio_w32(rg, GPIO_REG_HLVL, high & ~BIT(pin));
+ mtk_gpio_w32(rg, GPIO_REG_LLVL, low & ~BIT(pin));
+ }
gpiochip_disable_irq(gc, d->hwirq);
}
@@ -220,6 +219,7 @@ static const struct irq_chip mt7621_irq_chip = {
static int
mediatek_gpio_bank_probe(struct device *dev, int bank)
{
+ struct gpio_generic_chip_config config;
struct mtk *mtk = dev_get_drvdata(dev);
struct mtk_gc *rg;
void __iomem *dat, *set, *ctrl, *diro;
@@ -228,7 +228,6 @@ mediatek_gpio_bank_probe(struct device *dev, int bank)
rg = &mtk->gc_map[bank];
memset(rg, 0, sizeof(*rg));
- spin_lock_init(&rg->lock);
rg->bank = bank;
dat = mtk->base + GPIO_REG_DATA + (rg->bank * GPIO_BANK_STRIDE);
@@ -236,21 +235,30 @@ mediatek_gpio_bank_probe(struct device *dev, int bank)
ctrl = mtk->base + GPIO_REG_DCLR + (rg->bank * GPIO_BANK_STRIDE);
diro = mtk->base + GPIO_REG_CTRL + (rg->bank * GPIO_BANK_STRIDE);
- ret = bgpio_init(&rg->chip, dev, 4, dat, set, ctrl, diro, NULL,
- BGPIOF_NO_SET_ON_INPUT);
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = dat,
+ .set = set,
+ .clr = ctrl,
+ .dirout = diro,
+ .flags = GPIO_GENERIC_NO_SET_ON_INPUT,
+ };
+
+ ret = gpio_generic_chip_init(&rg->chip, &config);
if (ret) {
- dev_err(dev, "bgpio_init() failed\n");
+ dev_err(dev, "failed to initialize generic GPIO chip\n");
return ret;
}
- rg->chip.of_gpio_n_cells = 2;
- rg->chip.of_xlate = mediatek_gpio_xlate;
- rg->chip.label = devm_kasprintf(dev, GFP_KERNEL, "%s-bank%d",
+ rg->chip.gc.of_gpio_n_cells = 2;
+ rg->chip.gc.of_xlate = mediatek_gpio_xlate;
+ rg->chip.gc.label = devm_kasprintf(dev, GFP_KERNEL, "%s-bank%d",
dev_name(dev), bank);
- if (!rg->chip.label)
+ if (!rg->chip.gc.label)
return -ENOMEM;
- rg->chip.offset = bank * MTK_BANK_WIDTH;
+ rg->chip.gc.offset = bank * MTK_BANK_WIDTH;
if (mtk->gpio_irq) {
struct gpio_irq_chip *girq;
@@ -261,7 +269,7 @@ mediatek_gpio_bank_probe(struct device *dev, int bank)
*/
ret = devm_request_irq(dev, mtk->gpio_irq,
mediatek_gpio_irq_handler, IRQF_SHARED,
- rg->chip.label, &rg->chip);
+ rg->chip.gc.label, &rg->chip.gc);
if (ret) {
dev_err(dev, "Error requesting IRQ %d: %d\n",
@@ -269,7 +277,7 @@ mediatek_gpio_bank_probe(struct device *dev, int bank)
return ret;
}
- girq = &rg->chip.irq;
+ girq = &rg->chip.gc.irq;
gpio_irq_chip_set_chip(girq, &mt7621_irq_chip);
/* This will let us handle the parent IRQ in the driver */
girq->parent_handler = NULL;
@@ -279,17 +287,17 @@ mediatek_gpio_bank_probe(struct device *dev, int bank)
girq->handler = handle_simple_irq;
}
- ret = devm_gpiochip_add_data(dev, &rg->chip, mtk);
+ ret = devm_gpiochip_add_data(dev, &rg->chip.gc, mtk);
if (ret < 0) {
dev_err(dev, "Could not register gpio %d, ret=%d\n",
- rg->chip.ngpio, ret);
+ rg->chip.gc.ngpio, ret);
return ret;
}
/* set polarity to low for all gpios */
mtk_gpio_w32(rg, GPIO_REG_POL, 0);
- dev_info(dev, "registering %d gpios\n", rg->chip.ngpio);
+ dev_info(dev, "registering %d gpios\n", rg->chip.gc.ngpio);
return 0;
}
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index 57633a7b4270..22c36b79e249 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -573,11 +573,10 @@ static void mvebu_gpio_irq_handler(struct irq_desc *desc)
for (i = 0; i < mvchip->chip.ngpio; i++) {
int irq;
- irq = irq_find_mapping(mvchip->domain, i);
-
if (!(cause & BIT(i)))
continue;
+ irq = irq_find_mapping(mvchip->domain, i);
type = irq_get_trigger_type(irq);
if ((type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) {
/* Swap polarity (race with GPIO line) */
@@ -602,7 +601,6 @@ static const struct regmap_config mvebu_gpio_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
- .fast_io = true,
};
/*
@@ -899,7 +897,7 @@ static void mvebu_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
msk = BIT(i);
is_out = !(io_conf & msk);
- seq_printf(s, " gpio-%-3d (%-20.20s)", chip->base + i, label);
+ seq_printf(s, " gpio-%-3d (%-20.20s)", i, label);
if (is_out) {
seq_printf(s, " out %s %s\n",
@@ -1168,7 +1166,7 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
mvchip->chip.direction_input = mvebu_gpio_direction_input;
mvchip->chip.get = mvebu_gpio_get;
mvchip->chip.direction_output = mvebu_gpio_direction_output;
- mvchip->chip.set_rv = mvebu_gpio_set;
+ mvchip->chip.set = mvebu_gpio_set;
if (have_irqs)
mvchip->chip.to_irq = mvebu_gpio_to_irq;
mvchip->chip.base = id * MVEBU_MAX_GPIO_PER_BANK;
@@ -1236,8 +1234,8 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
if (!have_irqs)
return 0;
- mvchip->domain =
- irq_domain_create_linear(of_fwnode_handle(np), ngpios, &irq_generic_chip_ops, NULL);
+ mvchip->domain = irq_domain_create_linear(dev_fwnode(&pdev->dev), ngpios,
+ &irq_generic_chip_ops, NULL);
if (!mvchip->domain) {
dev_err(&pdev->dev, "couldn't allocate irq domain %s (DT).\n",
mvchip->chip.label);
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
index fae1a30f8ae6..d7666fe9dbf8 100644
--- a/drivers/gpio/gpio-mxc.c
+++ b/drivers/gpio/gpio-mxc.c
@@ -7,6 +7,7 @@
// Authors: Daniel Mack, Juergen Beisert.
// Copyright (C) 2004-2010 Freescale Semiconductor, Inc. All Rights Reserved.
+#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/init.h>
@@ -22,6 +23,7 @@
#include <linux/spinlock.h>
#include <linux/syscore_ops.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/of.h>
#include <linux/bug.h>
@@ -64,7 +66,7 @@ struct mxc_gpio_port {
int irq_high;
void (*mx_irq_handler)(struct irq_desc *desc);
struct irq_domain *domain;
- struct gpio_chip gc;
+ struct gpio_generic_chip gen_gc;
struct device *dev;
u32 both_edges;
struct mxc_gpio_reg_saved gpio_saved_reg;
@@ -161,7 +163,6 @@ static int gpio_set_irq_type(struct irq_data *d, u32 type)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct mxc_gpio_port *port = gc->private;
- unsigned long flags;
u32 bit, val;
u32 gpio_idx = d->hwirq;
int edge;
@@ -179,7 +180,7 @@ static int gpio_set_irq_type(struct irq_data *d, u32 type)
if (GPIO_EDGE_SEL >= 0) {
edge = GPIO_INT_BOTH_EDGES;
} else {
- val = port->gc.get(&port->gc, gpio_idx);
+ val = port->gen_gc.gc.get(&port->gen_gc.gc, gpio_idx);
if (val) {
edge = GPIO_INT_LOW_LEV;
pr_debug("mxc: set GPIO %d to low trigger\n", gpio_idx);
@@ -200,41 +201,38 @@ static int gpio_set_irq_type(struct irq_data *d, u32 type)
return -EINVAL;
}
- raw_spin_lock_irqsave(&port->gc.bgpio_lock, flags);
+ scoped_guard(gpio_generic_lock_irqsave, &port->gen_gc) {
+ if (GPIO_EDGE_SEL >= 0) {
+ val = readl(port->base + GPIO_EDGE_SEL);
+ if (edge == GPIO_INT_BOTH_EDGES)
+ writel(val | (1 << gpio_idx),
+ port->base + GPIO_EDGE_SEL);
+ else
+ writel(val & ~(1 << gpio_idx),
+ port->base + GPIO_EDGE_SEL);
+ }
- if (GPIO_EDGE_SEL >= 0) {
- val = readl(port->base + GPIO_EDGE_SEL);
- if (edge == GPIO_INT_BOTH_EDGES)
- writel(val | (1 << gpio_idx),
- port->base + GPIO_EDGE_SEL);
- else
- writel(val & ~(1 << gpio_idx),
- port->base + GPIO_EDGE_SEL);
- }
+ if (edge != GPIO_INT_BOTH_EDGES) {
+ reg += GPIO_ICR1 + ((gpio_idx & 0x10) >> 2); /* lower or upper register */
+ bit = gpio_idx & 0xf;
+ val = readl(reg) & ~(0x3 << (bit << 1));
+ writel(val | (edge << (bit << 1)), reg);
+ }
- if (edge != GPIO_INT_BOTH_EDGES) {
- reg += GPIO_ICR1 + ((gpio_idx & 0x10) >> 2); /* lower or upper register */
- bit = gpio_idx & 0xf;
- val = readl(reg) & ~(0x3 << (bit << 1));
- writel(val | (edge << (bit << 1)), reg);
+ writel(1 << gpio_idx, port->base + GPIO_ISR);
+ port->pad_type[gpio_idx] = type;
}
- writel(1 << gpio_idx, port->base + GPIO_ISR);
- port->pad_type[gpio_idx] = type;
-
- raw_spin_unlock_irqrestore(&port->gc.bgpio_lock, flags);
-
- return port->gc.direction_input(&port->gc, gpio_idx);
+ return port->gen_gc.gc.direction_input(&port->gen_gc.gc, gpio_idx);
}
static void mxc_flip_edge(struct mxc_gpio_port *port, u32 gpio)
{
void __iomem *reg = port->base;
- unsigned long flags;
u32 bit, val;
int edge;
- raw_spin_lock_irqsave(&port->gc.bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(&port->gen_gc);
reg += GPIO_ICR1 + ((gpio & 0x10) >> 2); /* lower or upper register */
bit = gpio & 0xf;
@@ -250,12 +248,9 @@ static void mxc_flip_edge(struct mxc_gpio_port *port, u32 gpio)
} else {
pr_err("mxc: invalid configuration for GPIO %d: %x\n",
gpio, edge);
- goto unlock;
+ return;
}
writel(val | (edge << (bit << 1)), reg);
-
-unlock:
- raw_spin_unlock_irqrestore(&port->gc.bgpio_lock, flags);
}
/* handle 32 interrupts in one status register */
@@ -420,6 +415,7 @@ static void mxc_update_irq_chained_handler(struct mxc_gpio_port *port, bool enab
static int mxc_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config = { };
struct device_node *np = pdev->dev.of_node;
struct mxc_gpio_port *port;
int irq_count;
@@ -479,27 +475,31 @@ static int mxc_gpio_probe(struct platform_device *pdev)
port->mx_irq_handler = mx3_gpio_irq_handler;
mxc_update_irq_chained_handler(port, true);
- err = bgpio_init(&port->gc, &pdev->dev, 4,
- port->base + GPIO_PSR,
- port->base + GPIO_DR, NULL,
- port->base + GPIO_GDIR, NULL,
- BGPIOF_READ_OUTPUT_REG_SET);
+
+ config.dev = &pdev->dev;
+ config.sz = 4;
+ config.dat = port->base + GPIO_PSR;
+ config.set = port->base + GPIO_DR;
+ config.dirout = port->base + GPIO_GDIR;
+ config.flags = GPIO_GENERIC_READ_OUTPUT_REG_SET;
+
+ err = gpio_generic_chip_init(&port->gen_gc, &config);
if (err)
goto out_bgio;
- port->gc.request = mxc_gpio_request;
- port->gc.free = mxc_gpio_free;
- port->gc.to_irq = mxc_gpio_to_irq;
+ port->gen_gc.gc.request = mxc_gpio_request;
+ port->gen_gc.gc.free = mxc_gpio_free;
+ port->gen_gc.gc.to_irq = mxc_gpio_to_irq;
/*
* Driver is DT-only, so a fixed base needs only be maintained for legacy
* userspace with sysfs interface.
*/
if (IS_ENABLED(CONFIG_GPIO_SYSFS))
- port->gc.base = of_alias_get_id(np, "gpio") * 32;
+ port->gen_gc.gc.base = of_alias_get_id(np, "gpio") * 32;
else /* silence boot time warning */
- port->gc.base = -1;
+ port->gen_gc.gc.base = -1;
- err = devm_gpiochip_add_data(&pdev->dev, &port->gc, port);
+ err = devm_gpiochip_add_data(&pdev->dev, &port->gen_gc.gc, port);
if (err)
goto out_bgio;
@@ -509,8 +509,8 @@ static int mxc_gpio_probe(struct platform_device *pdev)
goto out_bgio;
}
- port->domain = irq_domain_create_legacy(of_fwnode_handle(np), 32, irq_base, 0,
- &irq_domain_simple_ops, NULL);
+ port->domain = irq_domain_create_legacy(dev_fwnode(&pdev->dev), 32, irq_base, 0,
+ &irq_domain_simple_ops, NULL);
if (!port->domain) {
err = -ENODEV;
goto out_bgio;
@@ -573,7 +573,8 @@ static bool mxc_gpio_generic_config(struct mxc_gpio_port *port,
if (of_device_is_compatible(np, "fsl,imx8dxl-gpio") ||
of_device_is_compatible(np, "fsl,imx8qxp-gpio") ||
of_device_is_compatible(np, "fsl,imx8qm-gpio"))
- return (gpiochip_generic_config(&port->gc, offset, conf) == 0);
+ return (gpiochip_generic_config(&port->gen_gc.gc,
+ offset, conf) == 0);
return false;
}
@@ -666,7 +667,7 @@ static const struct dev_pm_ops mxc_gpio_dev_pm_ops = {
RUNTIME_PM_OPS(mxc_gpio_runtime_suspend, mxc_gpio_runtime_resume, NULL)
};
-static int mxc_gpio_syscore_suspend(void)
+static int mxc_gpio_syscore_suspend(void *data)
{
struct mxc_gpio_port *port;
int ret;
@@ -683,7 +684,7 @@ static int mxc_gpio_syscore_suspend(void)
return 0;
}
-static void mxc_gpio_syscore_resume(void)
+static void mxc_gpio_syscore_resume(void *data)
{
struct mxc_gpio_port *port;
int ret;
@@ -700,11 +701,15 @@ static void mxc_gpio_syscore_resume(void)
}
}
-static struct syscore_ops mxc_gpio_syscore_ops = {
+static const struct syscore_ops mxc_gpio_syscore_ops = {
.suspend = mxc_gpio_syscore_suspend,
.resume = mxc_gpio_syscore_resume,
};
+static struct syscore mxc_gpio_syscore = {
+ .ops = &mxc_gpio_syscore_ops,
+};
+
static struct platform_driver mxc_gpio_driver = {
.driver = {
.name = "gpio-mxc",
@@ -717,7 +722,7 @@ static struct platform_driver mxc_gpio_driver = {
static int __init gpio_mxc_init(void)
{
- register_syscore_ops(&mxc_gpio_syscore_ops);
+ register_syscore(&mxc_gpio_syscore);
return platform_driver_register(&mxc_gpio_driver);
}
diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c
index b418fbccb26c..5635694bf9f4 100644
--- a/drivers/gpio/gpio-mxs.c
+++ b/drivers/gpio/gpio-mxs.c
@@ -7,17 +7,18 @@
// Copyright (C) 2004-2010 Freescale Semiconductor, Inc. All Rights Reserved.
#include <linux/err.h>
+#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
-#include <linux/gpio/driver.h>
-#include <linux/module.h>
#define MXS_SET 0x4
#define MXS_CLR 0x8
@@ -48,7 +49,7 @@ struct mxs_gpio_port {
int id;
int irq;
struct irq_domain *domain;
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
struct device *dev;
enum mxs_gpio_id devid;
u32 both_edges;
@@ -258,6 +259,7 @@ MODULE_DEVICE_TABLE(of, mxs_gpio_dt_ids);
static int mxs_gpio_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
+ struct gpio_generic_chip_config config;
struct device_node *parent;
static void __iomem *base;
struct mxs_gpio_port *port;
@@ -303,7 +305,7 @@ static int mxs_gpio_probe(struct platform_device *pdev)
goto out_iounmap;
}
- port->domain = irq_domain_create_legacy(of_fwnode_handle(np), 32, irq_base, 0,
+ port->domain = irq_domain_create_legacy(dev_fwnode(&pdev->dev), 32, irq_base, 0,
&irq_domain_simple_ops, NULL);
if (!port->domain) {
err = -ENODEV;
@@ -319,19 +321,24 @@ static int mxs_gpio_probe(struct platform_device *pdev)
irq_set_chained_handler_and_data(port->irq, mxs_gpio_irq_handler,
port);
- err = bgpio_init(&port->gc, &pdev->dev, 4,
- port->base + PINCTRL_DIN(port),
- port->base + PINCTRL_DOUT(port) + MXS_SET,
- port->base + PINCTRL_DOUT(port) + MXS_CLR,
- port->base + PINCTRL_DOE(port), NULL, 0);
+ config = (struct gpio_generic_chip_config) {
+ .dev = &pdev->dev,
+ .sz = 4,
+ .dat = port->base + PINCTRL_DIN(port),
+ .set = port->base + PINCTRL_DOUT(port) + MXS_SET,
+ .clr = port->base + PINCTRL_DOUT(port) + MXS_CLR,
+ .dirout = port->base + PINCTRL_DOE(port),
+ };
+
+ err = gpio_generic_chip_init(&port->chip, &config);
if (err)
goto out_irqdomain_remove;
- port->gc.to_irq = mxs_gpio_to_irq;
- port->gc.get_direction = mxs_gpio_get_direction;
- port->gc.base = port->id * 32;
+ port->chip.gc.to_irq = mxs_gpio_to_irq;
+ port->chip.gc.get_direction = mxs_gpio_get_direction;
+ port->chip.gc.base = port->id * 32;
- err = gpiochip_add_data(&port->gc, port);
+ err = gpiochip_add_data(&port->chip.gc, port);
if (err)
goto out_irqdomain_remove;
diff --git a/drivers/gpio/gpio-nct6694.c b/drivers/gpio/gpio-nct6694.c
new file mode 100644
index 000000000000..a8607f0d9915
--- /dev/null
+++ b/drivers/gpio/gpio-nct6694.c
@@ -0,0 +1,499 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Nuvoton NCT6694 GPIO controller driver based on USB interface.
+ *
+ * Copyright (C) 2025 Nuvoton Technology Corp.
+ */
+
+#include <linux/bits.h>
+#include <linux/gpio/driver.h>
+#include <linux/idr.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/nct6694.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+/*
+ * USB command module type for NCT6694 GPIO controller.
+ * This defines the module type used for communication with the NCT6694
+ * GPIO controller over the USB interface.
+ */
+#define NCT6694_GPIO_MOD 0xFF
+
+#define NCT6694_GPIO_VER 0x90
+#define NCT6694_GPIO_VALID 0x110
+#define NCT6694_GPI_DATA 0x120
+#define NCT6694_GPO_DIR 0x170
+#define NCT6694_GPO_TYPE 0x180
+#define NCT6694_GPO_DATA 0x190
+
+#define NCT6694_GPI_STS 0x130
+#define NCT6694_GPI_CLR 0x140
+#define NCT6694_GPI_FALLING 0x150
+#define NCT6694_GPI_RISING 0x160
+
+#define NCT6694_NR_GPIO 8
+
+struct nct6694_gpio_data {
+ struct nct6694 *nct6694;
+ struct gpio_chip gpio;
+ struct mutex lock;
+ /* Protect irq operation */
+ struct mutex irq_lock;
+
+ unsigned char reg_val;
+ unsigned char irq_trig_falling;
+ unsigned char irq_trig_rising;
+
+ /* Current gpio group */
+ unsigned char group;
+ int irq;
+};
+
+static int nct6694_get_direction(struct gpio_chip *gpio, unsigned int offset)
+{
+ struct nct6694_gpio_data *data = gpiochip_get_data(gpio);
+ const struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_GPIO_MOD,
+ .offset = cpu_to_le16(NCT6694_GPO_DIR + data->group),
+ .len = cpu_to_le16(sizeof(data->reg_val))
+ };
+ int ret;
+
+ guard(mutex)(&data->lock);
+
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd, &data->reg_val);
+ if (ret < 0)
+ return ret;
+
+ return !(BIT(offset) & data->reg_val);
+}
+
+static int nct6694_direction_input(struct gpio_chip *gpio, unsigned int offset)
+{
+ struct nct6694_gpio_data *data = gpiochip_get_data(gpio);
+ const struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_GPIO_MOD,
+ .offset = cpu_to_le16(NCT6694_GPO_DIR + data->group),
+ .len = cpu_to_le16(sizeof(data->reg_val))
+ };
+ int ret;
+
+ guard(mutex)(&data->lock);
+
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd, &data->reg_val);
+ if (ret < 0)
+ return ret;
+
+ data->reg_val &= ~BIT(offset);
+
+ return nct6694_write_msg(data->nct6694, &cmd_hd, &data->reg_val);
+}
+
+static int nct6694_direction_output(struct gpio_chip *gpio,
+ unsigned int offset, int val)
+{
+ struct nct6694_gpio_data *data = gpiochip_get_data(gpio);
+ struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_GPIO_MOD,
+ .offset = cpu_to_le16(NCT6694_GPO_DIR + data->group),
+ .len = cpu_to_le16(sizeof(data->reg_val))
+ };
+ int ret;
+
+ guard(mutex)(&data->lock);
+
+ /* Set direction to output */
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd, &data->reg_val);
+ if (ret < 0)
+ return ret;
+
+ data->reg_val |= BIT(offset);
+ ret = nct6694_write_msg(data->nct6694, &cmd_hd, &data->reg_val);
+ if (ret < 0)
+ return ret;
+
+ /* Then set output level */
+ cmd_hd.offset = cpu_to_le16(NCT6694_GPO_DATA + data->group);
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd, &data->reg_val);
+ if (ret < 0)
+ return ret;
+
+ if (val)
+ data->reg_val |= BIT(offset);
+ else
+ data->reg_val &= ~BIT(offset);
+
+ return nct6694_write_msg(data->nct6694, &cmd_hd, &data->reg_val);
+}
+
+static int nct6694_get_value(struct gpio_chip *gpio, unsigned int offset)
+{
+ struct nct6694_gpio_data *data = gpiochip_get_data(gpio);
+ struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_GPIO_MOD,
+ .offset = cpu_to_le16(NCT6694_GPO_DIR + data->group),
+ .len = cpu_to_le16(sizeof(data->reg_val))
+ };
+ int ret;
+
+ guard(mutex)(&data->lock);
+
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd, &data->reg_val);
+ if (ret < 0)
+ return ret;
+
+ if (BIT(offset) & data->reg_val) {
+ cmd_hd.offset = cpu_to_le16(NCT6694_GPO_DATA + data->group);
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd, &data->reg_val);
+ if (ret < 0)
+ return ret;
+
+ return !!(BIT(offset) & data->reg_val);
+ }
+
+ cmd_hd.offset = cpu_to_le16(NCT6694_GPI_DATA + data->group);
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd, &data->reg_val);
+ if (ret < 0)
+ return ret;
+
+ return !!(BIT(offset) & data->reg_val);
+}
+
+static int nct6694_set_value(struct gpio_chip *gpio, unsigned int offset,
+ int val)
+{
+ struct nct6694_gpio_data *data = gpiochip_get_data(gpio);
+ const struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_GPIO_MOD,
+ .offset = cpu_to_le16(NCT6694_GPO_DATA + data->group),
+ .len = cpu_to_le16(sizeof(data->reg_val))
+ };
+ int ret;
+
+ guard(mutex)(&data->lock);
+
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd, &data->reg_val);
+ if (ret < 0)
+ return ret;
+
+ if (val)
+ data->reg_val |= BIT(offset);
+ else
+ data->reg_val &= ~BIT(offset);
+
+ return nct6694_write_msg(data->nct6694, &cmd_hd, &data->reg_val);
+}
+
+static int nct6694_set_config(struct gpio_chip *gpio, unsigned int offset,
+ unsigned long config)
+{
+ struct nct6694_gpio_data *data = gpiochip_get_data(gpio);
+ const struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_GPIO_MOD,
+ .offset = cpu_to_le16(NCT6694_GPO_TYPE + data->group),
+ .len = cpu_to_le16(sizeof(data->reg_val))
+ };
+ int ret;
+
+ guard(mutex)(&data->lock);
+
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd, &data->reg_val);
+ if (ret < 0)
+ return ret;
+
+ switch (pinconf_to_config_param(config)) {
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ data->reg_val |= BIT(offset);
+ break;
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ data->reg_val &= ~BIT(offset);
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ return nct6694_write_msg(data->nct6694, &cmd_hd, &data->reg_val);
+}
+
+static int nct6694_init_valid_mask(struct gpio_chip *gpio,
+ unsigned long *valid_mask,
+ unsigned int ngpios)
+{
+ struct nct6694_gpio_data *data = gpiochip_get_data(gpio);
+ const struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_GPIO_MOD,
+ .offset = cpu_to_le16(NCT6694_GPIO_VALID + data->group),
+ .len = cpu_to_le16(sizeof(data->reg_val))
+ };
+ int ret;
+
+ guard(mutex)(&data->lock);
+
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd, &data->reg_val);
+ if (ret < 0)
+ return ret;
+
+ *valid_mask = data->reg_val;
+
+ return ret;
+}
+
+static irqreturn_t nct6694_irq_handler(int irq, void *priv)
+{
+ struct nct6694_gpio_data *data = priv;
+ struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_GPIO_MOD,
+ .offset = cpu_to_le16(NCT6694_GPI_STS + data->group),
+ .len = cpu_to_le16(sizeof(data->reg_val))
+ };
+ unsigned char status;
+ int ret;
+
+ guard(mutex)(&data->lock);
+
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd, &data->reg_val);
+ if (ret)
+ return IRQ_NONE;
+
+ status = data->reg_val;
+
+ while (status) {
+ int bit = __ffs(status);
+
+ data->reg_val = BIT(bit);
+ handle_nested_irq(irq_find_mapping(data->gpio.irq.domain, bit));
+ status &= ~BIT(bit);
+ cmd_hd.offset = cpu_to_le16(NCT6694_GPI_CLR + data->group);
+ nct6694_write_msg(data->nct6694, &cmd_hd, &data->reg_val);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int nct6694_get_irq_trig(struct nct6694_gpio_data *data)
+{
+ struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_GPIO_MOD,
+ .offset = cpu_to_le16(NCT6694_GPI_FALLING + data->group),
+ .len = cpu_to_le16(sizeof(data->reg_val))
+ };
+ int ret;
+
+ guard(mutex)(&data->lock);
+
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd, &data->irq_trig_falling);
+ if (ret)
+ return ret;
+
+ cmd_hd.offset = cpu_to_le16(NCT6694_GPI_RISING + data->group);
+ return nct6694_read_msg(data->nct6694, &cmd_hd, &data->irq_trig_rising);
+}
+
+static void nct6694_irq_mask(struct irq_data *d)
+{
+ struct gpio_chip *gpio = irq_data_get_irq_chip_data(d);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+ gpiochip_disable_irq(gpio, hwirq);
+}
+
+static void nct6694_irq_unmask(struct irq_data *d)
+{
+ struct gpio_chip *gpio = irq_data_get_irq_chip_data(d);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+ gpiochip_enable_irq(gpio, hwirq);
+}
+
+static int nct6694_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ struct gpio_chip *gpio = irq_data_get_irq_chip_data(d);
+ struct nct6694_gpio_data *data = gpiochip_get_data(gpio);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+ guard(mutex)(&data->lock);
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ data->irq_trig_rising |= BIT(hwirq);
+ break;
+
+ case IRQ_TYPE_EDGE_FALLING:
+ data->irq_trig_falling |= BIT(hwirq);
+ break;
+
+ case IRQ_TYPE_EDGE_BOTH:
+ data->irq_trig_rising |= BIT(hwirq);
+ data->irq_trig_falling |= BIT(hwirq);
+ break;
+
+ default:
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static void nct6694_irq_bus_lock(struct irq_data *d)
+{
+ struct gpio_chip *gpio = irq_data_get_irq_chip_data(d);
+ struct nct6694_gpio_data *data = gpiochip_get_data(gpio);
+
+ mutex_lock(&data->irq_lock);
+}
+
+static void nct6694_irq_bus_sync_unlock(struct irq_data *d)
+{
+ struct gpio_chip *gpio = irq_data_get_irq_chip_data(d);
+ struct nct6694_gpio_data *data = gpiochip_get_data(gpio);
+ struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_GPIO_MOD,
+ .offset = cpu_to_le16(NCT6694_GPI_FALLING + data->group),
+ .len = cpu_to_le16(sizeof(data->reg_val))
+ };
+
+ scoped_guard(mutex, &data->lock) {
+ nct6694_write_msg(data->nct6694, &cmd_hd, &data->irq_trig_falling);
+
+ cmd_hd.offset = cpu_to_le16(NCT6694_GPI_RISING + data->group);
+ nct6694_write_msg(data->nct6694, &cmd_hd, &data->irq_trig_rising);
+ }
+
+ mutex_unlock(&data->irq_lock);
+}
+
+static const struct irq_chip nct6694_irq_chip = {
+ .name = "gpio-nct6694",
+ .irq_mask = nct6694_irq_mask,
+ .irq_unmask = nct6694_irq_unmask,
+ .irq_set_type = nct6694_irq_set_type,
+ .irq_bus_lock = nct6694_irq_bus_lock,
+ .irq_bus_sync_unlock = nct6694_irq_bus_sync_unlock,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
+static void nct6694_irq_dispose_mapping(void *d)
+{
+ struct nct6694_gpio_data *data = d;
+
+ irq_dispose_mapping(data->irq);
+}
+
+static void nct6694_gpio_ida_free(void *d)
+{
+ struct nct6694_gpio_data *data = d;
+ struct nct6694 *nct6694 = data->nct6694;
+
+ ida_free(&nct6694->gpio_ida, data->group);
+}
+
+static int nct6694_gpio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct nct6694 *nct6694 = dev_get_drvdata(dev->parent);
+ struct nct6694_gpio_data *data;
+ struct gpio_irq_chip *girq;
+ int ret, i;
+ char **names;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->nct6694 = nct6694;
+
+ ret = ida_alloc(&nct6694->gpio_ida, GFP_KERNEL);
+ if (ret < 0)
+ return ret;
+ data->group = ret;
+
+ ret = devm_add_action_or_reset(dev, nct6694_gpio_ida_free, data);
+ if (ret)
+ return ret;
+
+ names = devm_kcalloc(dev, NCT6694_NR_GPIO, sizeof(char *),
+ GFP_KERNEL);
+ if (!names)
+ return -ENOMEM;
+
+ for (i = 0; i < NCT6694_NR_GPIO; i++) {
+ names[i] = devm_kasprintf(dev, GFP_KERNEL, "GPIO%X%d",
+ data->group, i);
+ if (!names[i])
+ return -ENOMEM;
+ }
+
+ data->irq = irq_create_mapping(nct6694->domain,
+ NCT6694_IRQ_GPIO0 + data->group);
+ if (!data->irq)
+ return -EINVAL;
+
+ ret = devm_add_action_or_reset(dev, nct6694_irq_dispose_mapping, data);
+ if (ret)
+ return ret;
+
+ data->gpio.names = (const char * const*)names;
+ data->gpio.label = pdev->name;
+ data->gpio.direction_input = nct6694_direction_input;
+ data->gpio.get = nct6694_get_value;
+ data->gpio.direction_output = nct6694_direction_output;
+ data->gpio.set = nct6694_set_value;
+ data->gpio.get_direction = nct6694_get_direction;
+ data->gpio.set_config = nct6694_set_config;
+ data->gpio.init_valid_mask = nct6694_init_valid_mask;
+ data->gpio.base = -1;
+ data->gpio.can_sleep = false;
+ data->gpio.owner = THIS_MODULE;
+ data->gpio.ngpio = NCT6694_NR_GPIO;
+
+ platform_set_drvdata(pdev, data);
+
+ ret = devm_mutex_init(dev, &data->lock);
+ if (ret)
+ return ret;
+
+ ret = devm_mutex_init(dev, &data->irq_lock);
+ if (ret)
+ return ret;
+
+ ret = nct6694_get_irq_trig(data);
+ if (ret) {
+ dev_err_probe(dev, ret, "Failed to get irq trigger type\n");
+ return ret;
+ }
+
+ girq = &data->gpio.irq;
+ gpio_irq_chip_set_chip(girq, &nct6694_irq_chip);
+ girq->parent_handler = NULL;
+ girq->num_parents = 0;
+ girq->parents = NULL;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_level_irq;
+ girq->threaded = true;
+
+ ret = devm_request_threaded_irq(dev, data->irq, NULL, nct6694_irq_handler,
+ IRQF_ONESHOT | IRQF_SHARED,
+ "gpio-nct6694", data);
+ if (ret) {
+ dev_err_probe(dev, ret, "Failed to request irq\n");
+ return ret;
+ }
+
+ return devm_gpiochip_add_data(dev, &data->gpio, data);
+}
+
+static struct platform_driver nct6694_gpio_driver = {
+ .driver = {
+ .name = "nct6694-gpio",
+ },
+ .probe = nct6694_gpio_probe,
+};
+
+module_platform_driver(nct6694_gpio_driver);
+
+MODULE_DESCRIPTION("USB-GPIO controller driver for NCT6694");
+MODULE_AUTHOR("Ming Yu <tmyu0@nuvoton.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:nct6694-gpio");
diff --git a/drivers/gpio/gpio-nomadik.c b/drivers/gpio/gpio-nomadik.c
index fa19a44943fd..97c5cd33279d 100644
--- a/drivers/gpio/gpio-nomadik.c
+++ b/drivers/gpio/gpio-nomadik.c
@@ -20,6 +20,7 @@
*/
#include <linux/cleanup.h>
#include <linux/clk.h>
+#include <linux/gpio/consumer.h>
#include <linux/gpio/driver.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
@@ -347,8 +348,8 @@ static int nmk_gpio_get_input(struct gpio_chip *chip, unsigned int offset)
return value;
}
-static void nmk_gpio_set_output(struct gpio_chip *chip, unsigned int offset,
- int val)
+static int nmk_gpio_set_output(struct gpio_chip *chip, unsigned int offset,
+ int val)
{
struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(chip);
@@ -357,6 +358,8 @@ static void nmk_gpio_set_output(struct gpio_chip *chip, unsigned int offset,
__nmk_gpio_set_output(nmk_chip, offset, val);
clk_disable(nmk_chip->clk);
+
+ return 0;
}
static int nmk_gpio_make_output(struct gpio_chip *chip, unsigned int offset,
@@ -394,10 +397,12 @@ static int nmk_gpio_get_mode(struct nmk_gpio_chip *nmk_chip, int offset)
}
void nmk_gpio_dbg_show_one(struct seq_file *s, struct pinctrl_dev *pctldev,
- struct gpio_chip *chip, unsigned int offset,
- unsigned int gpio)
+ struct gpio_chip *chip, unsigned int offset)
{
struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(chip);
+#ifdef CONFIG_PINCTRL_NOMADIK
+ struct gpio_desc *desc;
+#endif
int mode;
bool is_out;
bool data_out;
@@ -423,15 +428,15 @@ void nmk_gpio_dbg_show_one(struct seq_file *s, struct pinctrl_dev *pctldev,
data_out = !!(readl(nmk_chip->addr + NMK_GPIO_DAT) & BIT(offset));
mode = nmk_gpio_get_mode(nmk_chip, offset);
#ifdef CONFIG_PINCTRL_NOMADIK
- if (mode == NMK_GPIO_ALT_C && pctldev)
- mode = nmk_prcm_gpiocr_get_mode(pctldev, gpio);
+ if (mode == NMK_GPIO_ALT_C && pctldev) {
+ desc = gpio_device_get_desc(chip->gpiodev, offset);
+ mode = nmk_prcm_gpiocr_get_mode(pctldev, desc_to_gpio(desc));
+ }
#endif
if (is_out) {
seq_printf(s, " gpio-%-3d (%-20.20s) out %s %s",
- gpio,
- label ?: "(none)",
- str_hi_lo(data_out),
+ offset, label ?: "(none)", str_hi_lo(data_out),
(mode < 0) ? "unknown" : modes[mode]);
} else {
int irq = chip->to_irq(chip, offset);
@@ -443,9 +448,7 @@ void nmk_gpio_dbg_show_one(struct seq_file *s, struct pinctrl_dev *pctldev,
};
seq_printf(s, " gpio-%-3d (%-20.20s) in %s %s",
- gpio,
- label ?: "(none)",
- pulls[pullidx],
+ offset, label ?: "(none)", pulls[pullidx],
(mode < 0) ? "unknown" : modes[mode]);
val = nmk_gpio_get_input(chip, offset);
@@ -477,10 +480,10 @@ void nmk_gpio_dbg_show_one(struct seq_file *s, struct pinctrl_dev *pctldev,
static void nmk_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
{
- unsigned int i, gpio = chip->base;
+ unsigned int i;
- for (i = 0; i < chip->ngpio; i++, gpio++) {
- nmk_gpio_dbg_show_one(s, NULL, chip, i, gpio);
+ for (i = 0; i < chip->ngpio; i++) {
+ nmk_gpio_dbg_show_one(s, NULL, chip, i);
seq_puts(s, "\n");
}
}
diff --git a/drivers/gpio/gpio-npcm-sgpio.c b/drivers/gpio/gpio-npcm-sgpio.c
index 260570614543..83c77a2c0623 100644
--- a/drivers/gpio/gpio-npcm-sgpio.c
+++ b/drivers/gpio/gpio-npcm-sgpio.c
@@ -211,9 +211,7 @@ static int npcm_sgpio_dir_in(struct gpio_chip *gc, unsigned int offset)
static int npcm_sgpio_dir_out(struct gpio_chip *gc, unsigned int offset, int val)
{
- gc->set(gc, offset, val);
-
- return 0;
+ return gc->set(gc, offset, val);
}
static int npcm_sgpio_get_direction(struct gpio_chip *gc, unsigned int offset)
@@ -226,7 +224,7 @@ static int npcm_sgpio_get_direction(struct gpio_chip *gc, unsigned int offset)
return GPIO_LINE_DIRECTION_IN;
}
-static void npcm_sgpio_set(struct gpio_chip *gc, unsigned int offset, int val)
+static int npcm_sgpio_set(struct gpio_chip *gc, unsigned int offset, int val)
{
struct npcm_sgpio *gpio = gpiochip_get_data(gc);
const struct npcm_sgpio_bank *bank = offset_to_bank(offset);
@@ -242,6 +240,8 @@ static void npcm_sgpio_set(struct gpio_chip *gc, unsigned int offset, int val)
reg &= ~BIT(GPIO_BIT(offset));
iowrite8(reg, addr);
+
+ return 0;
}
static int npcm_sgpio_get(struct gpio_chip *gc, unsigned int offset)
diff --git a/drivers/gpio/gpio-octeon.c b/drivers/gpio/gpio-octeon.c
index afb0e8a791e5..777e20c608dc 100644
--- a/drivers/gpio/gpio-octeon.c
+++ b/drivers/gpio/gpio-octeon.c
@@ -47,12 +47,15 @@ static int octeon_gpio_dir_in(struct gpio_chip *chip, unsigned offset)
return 0;
}
-static void octeon_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int octeon_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct octeon_gpio *gpio = gpiochip_get_data(chip);
u64 mask = 1ull << offset;
u64 reg = gpio->register_base + (value ? TX_SET : TX_CLEAR);
cvmx_write_csr(reg, mask);
+
+ return 0;
}
static int octeon_gpio_dir_out(struct gpio_chip *chip, unsigned offset,
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 54c4bfdccf56..e136e81794df 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -953,7 +953,7 @@ static int omap_gpio_set_config(struct gpio_chip *chip, unsigned offset,
return ret;
}
-static void omap_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int omap_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct gpio_bank *bank;
unsigned long flags;
@@ -962,10 +962,12 @@ static void omap_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
raw_spin_lock_irqsave(&bank->lock, flags);
bank->set_dataout(bank, offset, value);
raw_spin_unlock_irqrestore(&bank->lock, flags);
+
+ return 0;
}
-static void omap_gpio_set_multiple(struct gpio_chip *chip, unsigned long *mask,
- unsigned long *bits)
+static int omap_gpio_set_multiple(struct gpio_chip *chip, unsigned long *mask,
+ unsigned long *bits)
{
struct gpio_bank *bank = gpiochip_get_data(chip);
void __iomem *reg = bank->base + bank->regs->dataout;
@@ -977,6 +979,8 @@ static void omap_gpio_set_multiple(struct gpio_chip *chip, unsigned long *mask,
writel_relaxed(l, reg);
bank->context.dataout = l;
raw_spin_unlock_irqrestore(&bank->lock, flags);
+
+ return 0;
}
/*---------------------------------------------------------------------*/
@@ -1499,7 +1503,7 @@ static void omap_gpio_remove(struct platform_device *pdev)
clk_unprepare(bank->dbck);
}
-static int __maybe_unused omap_gpio_runtime_suspend(struct device *dev)
+static int omap_gpio_runtime_suspend(struct device *dev)
{
struct gpio_bank *bank = dev_get_drvdata(dev);
unsigned long flags;
@@ -1512,7 +1516,7 @@ static int __maybe_unused omap_gpio_runtime_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused omap_gpio_runtime_resume(struct device *dev)
+static int omap_gpio_runtime_resume(struct device *dev)
{
struct gpio_bank *bank = dev_get_drvdata(dev);
unsigned long flags;
@@ -1525,7 +1529,7 @@ static int __maybe_unused omap_gpio_runtime_resume(struct device *dev)
return 0;
}
-static int __maybe_unused omap_gpio_suspend(struct device *dev)
+static int omap_gpio_suspend(struct device *dev)
{
struct gpio_bank *bank = dev_get_drvdata(dev);
@@ -1537,7 +1541,7 @@ static int __maybe_unused omap_gpio_suspend(struct device *dev)
return omap_gpio_runtime_suspend(dev);
}
-static int __maybe_unused omap_gpio_resume(struct device *dev)
+static int omap_gpio_resume(struct device *dev)
{
struct gpio_bank *bank = dev_get_drvdata(dev);
@@ -1550,9 +1554,8 @@ static int __maybe_unused omap_gpio_resume(struct device *dev)
}
static const struct dev_pm_ops gpio_pm_ops = {
- SET_RUNTIME_PM_OPS(omap_gpio_runtime_suspend, omap_gpio_runtime_resume,
- NULL)
- SET_LATE_SYSTEM_SLEEP_PM_OPS(omap_gpio_suspend, omap_gpio_resume)
+ RUNTIME_PM_OPS(omap_gpio_runtime_suspend, omap_gpio_runtime_resume, NULL)
+ LATE_SYSTEM_SLEEP_PM_OPS(omap_gpio_suspend, omap_gpio_resume)
};
static struct platform_driver omap_gpio_driver = {
@@ -1560,7 +1563,7 @@ static struct platform_driver omap_gpio_driver = {
.remove = omap_gpio_remove,
.driver = {
.name = "omap_gpio",
- .pm = &gpio_pm_ops,
+ .pm = pm_ptr(&gpio_pm_ops),
.of_match_table = omap_gpio_match,
},
};
diff --git a/drivers/gpio/gpio-palmas.c b/drivers/gpio/gpio-palmas.c
index 28dba7048509..e377f6dd4ccf 100644
--- a/drivers/gpio/gpio-palmas.c
+++ b/drivers/gpio/gpio-palmas.c
@@ -54,12 +54,11 @@ static int palmas_gpio_get(struct gpio_chip *gc, unsigned offset)
return !!(val & BIT(offset));
}
-static void palmas_gpio_set(struct gpio_chip *gc, unsigned offset,
- int value)
+static int palmas_gpio_set(struct gpio_chip *gc, unsigned int offset,
+ int value)
{
struct palmas_gpio *pg = gpiochip_get_data(gc);
struct palmas *palmas = pg->palmas;
- int ret;
unsigned int reg;
int gpio16 = (offset/8);
@@ -71,9 +70,7 @@ static void palmas_gpio_set(struct gpio_chip *gc, unsigned offset,
reg = (value) ?
PALMAS_GPIO_SET_DATA_OUT : PALMAS_GPIO_CLEAR_DATA_OUT;
- ret = palmas_write(palmas, PALMAS_GPIO_BASE, reg, BIT(offset));
- if (ret < 0)
- dev_err(gc->parent, "Reg 0x%02x write failed, %d\n", reg, ret);
+ return palmas_write(palmas, PALMAS_GPIO_BASE, reg, BIT(offset));
}
static int palmas_gpio_output(struct gpio_chip *gc, unsigned offset,
@@ -89,7 +86,9 @@ static int palmas_gpio_output(struct gpio_chip *gc, unsigned offset,
reg = (gpio16) ? PALMAS_GPIO_DATA_DIR2 : PALMAS_GPIO_DATA_DIR;
/* Set the initial value */
- palmas_gpio_set(gc, offset, value);
+ ret = palmas_gpio_set(gc, offset, value);
+ if (ret)
+ return ret;
ret = palmas_update_bits(palmas, PALMAS_GPIO_BASE, reg,
BIT(offset), BIT(offset));
@@ -140,6 +139,7 @@ static const struct of_device_id of_palmas_gpio_match[] = {
{ .compatible = "ti,tps80036-gpio", .data = &tps80036_dev_data,},
{ },
};
+MODULE_DEVICE_TABLE(of, of_palmas_gpio_match);
static int palmas_gpio_probe(struct platform_device *pdev)
{
@@ -197,3 +197,13 @@ static int __init palmas_gpio_init(void)
return platform_driver_register(&palmas_gpio_driver);
}
subsys_initcall(palmas_gpio_init);
+
+static void __exit palmas_gpio_exit(void)
+{
+ platform_driver_unregister(&palmas_gpio_driver);
+}
+module_exit(palmas_gpio_exit);
+
+MODULE_DESCRIPTION("TI PALMAS series GPIO driver");
+MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index b852e4997629..0a3916cc2772 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -38,6 +38,10 @@
#define PCA953X_INVERT 0x02
#define PCA953X_DIRECTION 0x03
+#define TCA6418_INPUT 0x14
+#define TCA6418_OUTPUT 0x17
+#define TCA6418_DIRECTION 0x23
+
#define REG_ADDR_MASK GENMASK(5, 0)
#define REG_ADDR_EXT BIT(6)
#define REG_ADDR_AI BIT(7)
@@ -76,7 +80,8 @@
#define PCA953X_TYPE BIT(12)
#define PCA957X_TYPE BIT(13)
#define PCAL653X_TYPE BIT(14)
-#define PCA_TYPE_MASK GENMASK(15, 12)
+#define TCA6418_TYPE BIT(16)
+#define PCA_TYPE_MASK GENMASK(16, 12)
#define PCA_CHIP_TYPE(x) ((x) & PCA_TYPE_MASK)
@@ -115,6 +120,7 @@ static const struct i2c_device_id pca953x_id[] = {
{ "pca6107", 8 | PCA953X_TYPE | PCA_INT, },
{ "tca6408", 8 | PCA953X_TYPE | PCA_INT, },
{ "tca6416", 16 | PCA953X_TYPE | PCA_INT, },
+ { "tca6418", 18 | TCA6418_TYPE | PCA_INT, },
{ "tca6424", 24 | PCA953X_TYPE | PCA_INT, },
{ "tca9538", 8 | PCA953X_TYPE | PCA_INT, },
{ "tca9539", 16 | PCA953X_TYPE | PCA_INT, },
@@ -204,6 +210,13 @@ static const struct pca953x_reg_config pca957x_regs = {
.invert = PCA957X_INVRT,
};
+static const struct pca953x_reg_config tca6418_regs = {
+ .direction = TCA6418_DIRECTION,
+ .output = TCA6418_OUTPUT,
+ .input = TCA6418_INPUT,
+ .invert = 0xFF, /* Does not apply */
+};
+
struct pca953x_chip {
unsigned gpio_start;
struct mutex i2c_lock;
@@ -237,6 +250,22 @@ static int pca953x_bank_shift(struct pca953x_chip *chip)
return fls((chip->gpio_chip.ngpio - 1) / BANK_SZ);
}
+/*
+ * Helper function to get the correct bit mask for a given offset and chip type.
+ * The TCA6418's input, output, and direction banks have a peculiar bit order:
+ * the first byte uses reversed bit order, while the second byte uses standard order.
+ */
+static inline u8 pca953x_get_bit_mask(struct pca953x_chip *chip, unsigned int offset)
+{
+ unsigned int bit_pos_in_bank = offset % BANK_SZ;
+ int msb = BANK_SZ - 1;
+
+ if (PCA_CHIP_TYPE(chip->driver_data) == TCA6418_TYPE && offset <= msb)
+ return BIT(msb - bit_pos_in_bank);
+
+ return BIT(bit_pos_in_bank);
+}
+
#define PCA953x_BANK_INPUT BIT(0)
#define PCA953x_BANK_OUTPUT BIT(1)
#define PCA953x_BANK_POLARITY BIT(2)
@@ -277,7 +306,7 @@ static int pca953x_bank_shift(struct pca953x_chip *chip)
* Interrupt mask register 0x40 + 5 * bank_size RW
* Interrupt status register 0x40 + 6 * bank_size R
*
- * - Registers with bit 0x80 set, the AI bit
+ * - Registers with bit 0x80 set, the AI bit (auto increment)
* The bit is cleared and the registers fall into one of the
* categories above.
*/
@@ -353,18 +382,43 @@ static bool pcal6534_check_register(struct pca953x_chip *chip, unsigned int reg,
return true;
}
+/* TCA6418 breaks the PCA953x register order rule */
+static bool tca6418_check_register(struct pca953x_chip *chip, unsigned int reg,
+ u32 access_type_mask)
+{
+ /* Valid Input Registers - BIT(0) for readable access */
+ if (reg >= TCA6418_INPUT && reg < (TCA6418_INPUT + NBANK(chip)))
+ return (access_type_mask & BIT(0));
+
+ /* Valid Output Registers - BIT(1) for writeable access */
+ if (reg >= TCA6418_OUTPUT && reg < (TCA6418_OUTPUT + NBANK(chip)))
+ return (access_type_mask & (BIT(0) | BIT(1)));
+
+ /* Valid Direction Registers - BIT(2) for volatile access */
+ if (reg >= TCA6418_DIRECTION && reg < (TCA6418_DIRECTION + NBANK(chip)))
+ return (access_type_mask & (BIT(0) | BIT(1)));
+
+ return false;
+}
+
static bool pca953x_readable_register(struct device *dev, unsigned int reg)
{
struct pca953x_chip *chip = dev_get_drvdata(dev);
u32 bank;
- if (PCA_CHIP_TYPE(chip->driver_data) == PCA957X_TYPE) {
+ switch (PCA_CHIP_TYPE(chip->driver_data)) {
+ case PCA957X_TYPE:
bank = PCA957x_BANK_INPUT | PCA957x_BANK_OUTPUT |
PCA957x_BANK_POLARITY | PCA957x_BANK_CONFIG |
PCA957x_BANK_BUSHOLD;
- } else {
+ break;
+ case TCA6418_TYPE:
+ /* BIT(0) to indicate read access */
+ return tca6418_check_register(chip, reg, BIT(0));
+ default:
bank = PCA953x_BANK_INPUT | PCA953x_BANK_OUTPUT |
PCA953x_BANK_POLARITY | PCA953x_BANK_CONFIG;
+ break;
}
if (chip->driver_data & PCA_PCAL) {
@@ -381,12 +435,18 @@ static bool pca953x_writeable_register(struct device *dev, unsigned int reg)
struct pca953x_chip *chip = dev_get_drvdata(dev);
u32 bank;
- if (PCA_CHIP_TYPE(chip->driver_data) == PCA957X_TYPE) {
+ switch (PCA_CHIP_TYPE(chip->driver_data)) {
+ case PCA957X_TYPE:
bank = PCA957x_BANK_OUTPUT | PCA957x_BANK_POLARITY |
PCA957x_BANK_CONFIG | PCA957x_BANK_BUSHOLD;
- } else {
+ break;
+ case TCA6418_TYPE:
+ /* BIT(1) for write access */
+ return tca6418_check_register(chip, reg, BIT(1));
+ default:
bank = PCA953x_BANK_OUTPUT | PCA953x_BANK_POLARITY |
PCA953x_BANK_CONFIG;
+ break;
}
if (chip->driver_data & PCA_PCAL)
@@ -401,10 +461,17 @@ static bool pca953x_volatile_register(struct device *dev, unsigned int reg)
struct pca953x_chip *chip = dev_get_drvdata(dev);
u32 bank;
- if (PCA_CHIP_TYPE(chip->driver_data) == PCA957X_TYPE)
+ switch (PCA_CHIP_TYPE(chip->driver_data)) {
+ case PCA957X_TYPE:
bank = PCA957x_BANK_INPUT;
- else
+ break;
+ case TCA6418_TYPE:
+ /* BIT(2) for volatile access */
+ return tca6418_check_register(chip, reg, BIT(2));
+ default:
bank = PCA953x_BANK_INPUT;
+ break;
+ }
if (chip->driver_data & PCA_PCAL)
bank |= PCAL9xxx_BANK_IRQ_STAT;
@@ -489,6 +556,16 @@ static u8 pcal6534_recalc_addr(struct pca953x_chip *chip, int reg, int off)
return pinctrl + addr + (off / BANK_SZ);
}
+static u8 tca6418_recalc_addr(struct pca953x_chip *chip, int reg_base, int offset)
+{
+ /*
+ * reg_base will be TCA6418_INPUT, TCA6418_OUTPUT, or TCA6418_DIRECTION
+ * offset is the global GPIO line offset (0-17)
+ * BANK_SZ is 8 for TCA6418 (8 bits per register bank)
+ */
+ return reg_base + (offset / BANK_SZ);
+}
+
static int pca953x_write_regs(struct pca953x_chip *chip, int reg, unsigned long *val)
{
u8 regaddr = chip->recalc_addr(chip, reg, 0);
@@ -529,11 +606,14 @@ static int pca953x_gpio_direction_input(struct gpio_chip *gc, unsigned off)
{
struct pca953x_chip *chip = gpiochip_get_data(gc);
u8 dirreg = chip->recalc_addr(chip, chip->regs->direction, off);
- u8 bit = BIT(off % BANK_SZ);
+ u8 bit = pca953x_get_bit_mask(chip, off);
guard(mutex)(&chip->i2c_lock);
- return regmap_write_bits(chip->regmap, dirreg, bit, bit);
+ if (PCA_CHIP_TYPE(chip->driver_data) == TCA6418_TYPE)
+ return regmap_update_bits(chip->regmap, dirreg, bit, 0);
+
+ return regmap_update_bits(chip->regmap, dirreg, bit, bit);
}
static int pca953x_gpio_direction_output(struct gpio_chip *gc,
@@ -542,25 +622,31 @@ static int pca953x_gpio_direction_output(struct gpio_chip *gc,
struct pca953x_chip *chip = gpiochip_get_data(gc);
u8 dirreg = chip->recalc_addr(chip, chip->regs->direction, off);
u8 outreg = chip->recalc_addr(chip, chip->regs->output, off);
- u8 bit = BIT(off % BANK_SZ);
+ u8 bit = pca953x_get_bit_mask(chip, off);
int ret;
guard(mutex)(&chip->i2c_lock);
/* set output level */
- ret = regmap_write_bits(chip->regmap, outreg, bit, val ? bit : 0);
+ ret = regmap_update_bits(chip->regmap, outreg, bit, val ? bit : 0);
if (ret)
return ret;
- /* then direction */
- return regmap_write_bits(chip->regmap, dirreg, bit, 0);
+ /*
+ * then direction
+ * (in/out logic is inverted on TCA6418)
+ */
+ if (PCA_CHIP_TYPE(chip->driver_data) == TCA6418_TYPE)
+ return regmap_update_bits(chip->regmap, dirreg, bit, bit);
+
+ return regmap_update_bits(chip->regmap, dirreg, bit, 0);
}
static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off)
{
struct pca953x_chip *chip = gpiochip_get_data(gc);
u8 inreg = chip->recalc_addr(chip, chip->regs->input, off);
- u8 bit = BIT(off % BANK_SZ);
+ u8 bit = pca953x_get_bit_mask(chip, off);
u32 reg_val;
int ret;
@@ -577,18 +663,18 @@ static int pca953x_gpio_set_value(struct gpio_chip *gc, unsigned int off,
{
struct pca953x_chip *chip = gpiochip_get_data(gc);
u8 outreg = chip->recalc_addr(chip, chip->regs->output, off);
- u8 bit = BIT(off % BANK_SZ);
+ u8 bit = pca953x_get_bit_mask(chip, off);
guard(mutex)(&chip->i2c_lock);
- return regmap_write_bits(chip->regmap, outreg, bit, val ? bit : 0);
+ return regmap_update_bits(chip->regmap, outreg, bit, val ? bit : 0);
}
static int pca953x_gpio_get_direction(struct gpio_chip *gc, unsigned off)
{
struct pca953x_chip *chip = gpiochip_get_data(gc);
u8 dirreg = chip->recalc_addr(chip, chip->regs->direction, off);
- u8 bit = BIT(off % BANK_SZ);
+ u8 bit = pca953x_get_bit_mask(chip, off);
u32 reg_val;
int ret;
@@ -597,7 +683,14 @@ static int pca953x_gpio_get_direction(struct gpio_chip *gc, unsigned off)
if (ret < 0)
return ret;
- if (reg_val & bit)
+ /* (in/out logic is inverted on TCA6418) */
+ if (reg_val & bit) {
+ if (PCA_CHIP_TYPE(chip->driver_data) == TCA6418_TYPE)
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
+ }
+ if (PCA_CHIP_TYPE(chip->driver_data) == TCA6418_TYPE)
return GPIO_LINE_DIRECTION_IN;
return GPIO_LINE_DIRECTION_OUT;
@@ -658,9 +751,9 @@ static int pca953x_gpio_set_pull_up_down(struct pca953x_chip *chip,
/* Configure pull-up/pull-down */
if (param == PIN_CONFIG_BIAS_PULL_UP)
- ret = regmap_write_bits(chip->regmap, pull_sel_reg, bit, bit);
+ ret = regmap_update_bits(chip->regmap, pull_sel_reg, bit, bit);
else if (param == PIN_CONFIG_BIAS_PULL_DOWN)
- ret = regmap_write_bits(chip->regmap, pull_sel_reg, bit, 0);
+ ret = regmap_update_bits(chip->regmap, pull_sel_reg, bit, 0);
else
ret = 0;
if (ret)
@@ -668,9 +761,9 @@ static int pca953x_gpio_set_pull_up_down(struct pca953x_chip *chip,
/* Disable/Enable pull-up/pull-down */
if (param == PIN_CONFIG_BIAS_DISABLE)
- return regmap_write_bits(chip->regmap, pull_en_reg, bit, 0);
+ return regmap_update_bits(chip->regmap, pull_en_reg, bit, 0);
else
- return regmap_write_bits(chip->regmap, pull_en_reg, bit, bit);
+ return regmap_update_bits(chip->regmap, pull_en_reg, bit, bit);
}
static int pca953x_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
@@ -696,10 +789,10 @@ static void pca953x_setup_gpio(struct pca953x_chip *chip, int gpios)
gc->direction_input = pca953x_gpio_direction_input;
gc->direction_output = pca953x_gpio_direction_output;
gc->get = pca953x_gpio_get_value;
- gc->set_rv = pca953x_gpio_set_value;
+ gc->set = pca953x_gpio_set_value;
gc->get_direction = pca953x_gpio_get_direction;
gc->get_multiple = pca953x_gpio_get_multiple;
- gc->set_multiple_rv = pca953x_gpio_set_multiple;
+ gc->set_multiple = pca953x_gpio_set_multiple;
gc->set_config = pca953x_gpio_set_config;
gc->can_sleep = true;
@@ -761,10 +854,13 @@ static void pca953x_irq_bus_sync_unlock(struct irq_data *d)
int level;
if (chip->driver_data & PCA_PCAL) {
+ DECLARE_BITMAP(latched_inputs, MAX_LINE);
guard(mutex)(&chip->i2c_lock);
- /* Enable latch on interrupt-enabled inputs */
- pca953x_write_regs(chip, PCAL953X_IN_LATCH, chip->irq_mask);
+ /* Enable latch on edge-triggered interrupt-enabled inputs */
+ bitmap_or(latched_inputs, chip->irq_trig_fall, chip->irq_trig_raise, gc->ngpio);
+ bitmap_and(latched_inputs, latched_inputs, chip->irq_mask, gc->ngpio);
+ pca953x_write_regs(chip, PCAL953X_IN_LATCH, latched_inputs);
bitmap_complement(irq_mask, chip->irq_mask, gc->ngpio);
@@ -974,7 +1070,7 @@ static int pca953x_irq_setup(struct pca953x_chip *chip, int irq_base)
IRQF_ONESHOT | IRQF_SHARED, dev_name(dev),
chip);
if (ret)
- return dev_err_probe(dev, client->irq, "failed to request irq\n");
+ return dev_err_probe(dev, ret, "failed to request irq\n");
return 0;
}
@@ -1110,19 +1206,29 @@ static int pca953x_probe(struct i2c_client *client)
pca953x_setup_gpio(chip, chip->driver_data & PCA_GPIO_MASK);
if (NBANK(chip) > 2 || PCA_CHIP_TYPE(chip->driver_data) == PCA957X_TYPE) {
- dev_info(dev, "using AI\n");
+ dev_info(dev, "using auto increment\n");
regmap_config = &pca953x_ai_i2c_regmap;
} else {
- dev_info(dev, "using no AI\n");
+ dev_info(dev, "using no auto increment\n");
regmap_config = &pca953x_i2c_regmap;
}
- if (PCA_CHIP_TYPE(chip->driver_data) == PCAL653X_TYPE) {
+ switch (PCA_CHIP_TYPE(chip->driver_data)) {
+ case PCAL653X_TYPE:
chip->recalc_addr = pcal6534_recalc_addr;
chip->check_reg = pcal6534_check_register;
- } else {
+ break;
+ case TCA6418_TYPE:
+ chip->recalc_addr = tca6418_recalc_addr;
+ /*
+ * We don't assign chip->check_reg = tca6418_check_register directly here.
+ * Instead, the wrappers handle the dispatch based on PCA_CHIP_TYPE.
+ */
+ break;
+ default:
chip->recalc_addr = pca953x_recalc_addr;
chip->check_reg = pca953x_check_register;
+ break;
}
chip->regmap = devm_regmap_init_i2c(client, regmap_config);
@@ -1151,15 +1257,22 @@ static int pca953x_probe(struct i2c_client *client)
lockdep_set_subclass(&chip->i2c_lock,
i2c_adapter_depth(client->adapter));
- /* initialize cached registers from their original values.
+ /*
+ * initialize cached registers from their original values.
* we can't share this chip with another i2c master.
*/
- if (PCA_CHIP_TYPE(chip->driver_data) == PCA957X_TYPE) {
+ switch (PCA_CHIP_TYPE(chip->driver_data)) {
+ case PCA957X_TYPE:
chip->regs = &pca957x_regs;
ret = device_pca957x_init(chip);
- } else {
+ break;
+ case TCA6418_TYPE:
+ chip->regs = &tca6418_regs;
+ break;
+ default:
chip->regs = &pca953x_regs;
ret = device_pca95xx_init(chip);
+ break;
}
if (ret)
return ret;
@@ -1325,6 +1438,7 @@ static const struct of_device_id pca953x_dt_ids[] = {
{ .compatible = "ti,pca9536", .data = OF_953X( 4, 0), },
{ .compatible = "ti,tca6408", .data = OF_953X( 8, PCA_INT), },
{ .compatible = "ti,tca6416", .data = OF_953X(16, PCA_INT), },
+ { .compatible = "ti,tca6418", .data = (void *)(18 | TCA6418_TYPE | PCA_INT), },
{ .compatible = "ti,tca6424", .data = OF_953X(24, PCA_INT), },
{ .compatible = "ti,tca9535", .data = OF_953X(16, PCA_INT), },
{ .compatible = "ti,tca9538", .data = OF_953X( 8, PCA_INT), },
@@ -1355,7 +1469,9 @@ static int __init pca953x_init(void)
{
return i2c_add_driver(&pca953x_driver);
}
-/* register after i2c postcore initcall and before
+
+/*
+ * register after i2c postcore initcall and before
* subsys initcalls that may rely on these GPIOs
*/
subsys_initcall(pca953x_init);
diff --git a/drivers/gpio/gpio-pca9570.c b/drivers/gpio/gpio-pca9570.c
index d37ba4049368..c5a1287079a0 100644
--- a/drivers/gpio/gpio-pca9570.c
+++ b/drivers/gpio/gpio-pca9570.c
@@ -88,7 +88,7 @@ static int pca9570_get(struct gpio_chip *chip, unsigned offset)
return !!(buffer & BIT(offset));
}
-static void pca9570_set(struct gpio_chip *chip, unsigned offset, int value)
+static int pca9570_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct pca9570 *gpio = gpiochip_get_data(chip);
u8 buffer;
@@ -110,6 +110,7 @@ static void pca9570_set(struct gpio_chip *chip, unsigned offset, int value)
out:
mutex_unlock(&gpio->lock);
+ return ret;
}
static int pca9570_probe(struct i2c_client *client)
diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c
index 2e5f5d7f8865..3b9de8c3d924 100644
--- a/drivers/gpio/gpio-pcf857x.c
+++ b/drivers/gpio/gpio-pcf857x.c
@@ -171,21 +171,24 @@ static int pcf857x_output(struct gpio_chip *chip, unsigned int offset, int value
return status;
}
-static void pcf857x_set(struct gpio_chip *chip, unsigned int offset, int value)
+static int pcf857x_set(struct gpio_chip *chip, unsigned int offset, int value)
{
- pcf857x_output(chip, offset, value);
+ return pcf857x_output(chip, offset, value);
}
-static void pcf857x_set_multiple(struct gpio_chip *chip, unsigned long *mask,
- unsigned long *bits)
+static int pcf857x_set_multiple(struct gpio_chip *chip, unsigned long *mask,
+ unsigned long *bits)
{
struct pcf857x *gpio = gpiochip_get_data(chip);
+ int status;
mutex_lock(&gpio->lock);
gpio->out &= ~*mask;
gpio->out |= *bits & *mask;
- gpio->write(gpio->client, gpio->out);
+ status = gpio->write(gpio->client, gpio->out);
mutex_unlock(&gpio->lock);
+
+ return status;
}
/*-------------------------------------------------------------------------*/
diff --git a/drivers/gpio/gpio-pch.c b/drivers/gpio/gpio-pch.c
index 63f25c72eac2..4ffa0955a9e3 100644
--- a/drivers/gpio/gpio-pch.c
+++ b/drivers/gpio/gpio-pch.c
@@ -99,7 +99,7 @@ struct pch_gpio {
spinlock_t spinlock;
};
-static void pch_gpio_set(struct gpio_chip *gpio, unsigned int nr, int val)
+static int pch_gpio_set(struct gpio_chip *gpio, unsigned int nr, int val)
{
u32 reg_val;
struct pch_gpio *chip = gpiochip_get_data(gpio);
@@ -114,6 +114,8 @@ static void pch_gpio_set(struct gpio_chip *gpio, unsigned int nr, int val)
iowrite32(reg_val, &chip->reg->po);
spin_unlock_irqrestore(&chip->spinlock, flags);
+
+ return 0;
}
static int pch_gpio_get(struct gpio_chip *gpio, unsigned int nr)
@@ -169,7 +171,7 @@ static int pch_gpio_direction_input(struct gpio_chip *gpio, unsigned int nr)
/*
* Save register configuration and disable interrupts.
*/
-static void __maybe_unused pch_gpio_save_reg_conf(struct pch_gpio *chip)
+static void pch_gpio_save_reg_conf(struct pch_gpio *chip)
{
chip->pch_gpio_reg.ien_reg = ioread32(&chip->reg->ien);
chip->pch_gpio_reg.imask_reg = ioread32(&chip->reg->imask);
@@ -185,7 +187,7 @@ static void __maybe_unused pch_gpio_save_reg_conf(struct pch_gpio *chip)
/*
* This function restores the register configuration of the GPIO device.
*/
-static void __maybe_unused pch_gpio_restore_reg_conf(struct pch_gpio *chip)
+static void pch_gpio_restore_reg_conf(struct pch_gpio *chip)
{
iowrite32(chip->pch_gpio_reg.ien_reg, &chip->reg->ien);
iowrite32(chip->pch_gpio_reg.imask_reg, &chip->reg->imask);
@@ -400,7 +402,7 @@ static int pch_gpio_probe(struct pci_dev *pdev,
return pch_gpio_alloc_generic_chip(chip, irq_base, gpio_pins[chip->ioh]);
}
-static int __maybe_unused pch_gpio_suspend(struct device *dev)
+static int pch_gpio_suspend(struct device *dev)
{
struct pch_gpio *chip = dev_get_drvdata(dev);
unsigned long flags;
@@ -412,7 +414,7 @@ static int __maybe_unused pch_gpio_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused pch_gpio_resume(struct device *dev)
+static int pch_gpio_resume(struct device *dev)
{
struct pch_gpio *chip = dev_get_drvdata(dev);
unsigned long flags;
@@ -426,7 +428,7 @@ static int __maybe_unused pch_gpio_resume(struct device *dev)
return 0;
}
-static SIMPLE_DEV_PM_OPS(pch_gpio_pm_ops, pch_gpio_suspend, pch_gpio_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(pch_gpio_pm_ops, pch_gpio_suspend, pch_gpio_resume);
static const struct pci_device_id pch_gpio_pcidev_id[] = {
{ PCI_DEVICE_DATA(INTEL, EG20T_PCH, INTEL_EG20T_PCH) },
@@ -442,7 +444,7 @@ static struct pci_driver pch_gpio_driver = {
.id_table = pch_gpio_pcidev_id,
.probe = pch_gpio_probe,
.driver = {
- .pm = &pch_gpio_pm_ops,
+ .pm = pm_sleep_ptr(&pch_gpio_pm_ops),
},
};
diff --git a/drivers/gpio/gpio-pci-idio-16.c b/drivers/gpio/gpio-pci-idio-16.c
index 476cea1b5ed7..9d28ca8e1d6f 100644
--- a/drivers/gpio/gpio-pci-idio-16.c
+++ b/drivers/gpio/gpio-pci-idio-16.c
@@ -41,6 +41,7 @@ static const struct regmap_config idio_16_regmap_config = {
.reg_stride = 1,
.val_bits = 8,
.io_port = true,
+ .max_register = 0x7,
.wr_table = &idio_16_wr_table,
.rd_table = &idio_16_rd_table,
.volatile_table = &idio_16_rd_table,
diff --git a/drivers/gpio/gpio-pisosr.c b/drivers/gpio/gpio-pisosr.c
index e3013e778e15..7ec6a46ed600 100644
--- a/drivers/gpio/gpio-pisosr.c
+++ b/drivers/gpio/gpio-pisosr.c
@@ -67,13 +67,6 @@ static int pisosr_gpio_direction_input(struct gpio_chip *chip,
return 0;
}
-static int pisosr_gpio_direction_output(struct gpio_chip *chip,
- unsigned offset, int value)
-{
- /* This device is input only */
- return -EINVAL;
-}
-
static int pisosr_gpio_get(struct gpio_chip *chip, unsigned offset)
{
struct pisosr_gpio *gpio = gpiochip_get_data(chip);
@@ -108,7 +101,6 @@ static const struct gpio_chip template_chip = {
.owner = THIS_MODULE,
.get_direction = pisosr_gpio_get_direction,
.direction_input = pisosr_gpio_direction_input,
- .direction_output = pisosr_gpio_direction_output,
.get = pisosr_gpio_get,
.get_multiple = pisosr_gpio_get_multiple,
.base = -1,
@@ -116,11 +108,6 @@ static const struct gpio_chip template_chip = {
.can_sleep = true,
};
-static void pisosr_mutex_destroy(void *lock)
-{
- mutex_destroy(lock);
-}
-
static int pisosr_gpio_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
@@ -147,8 +134,7 @@ static int pisosr_gpio_probe(struct spi_device *spi)
return dev_err_probe(dev, PTR_ERR(gpio->load_gpio),
"Unable to allocate load GPIO\n");
- mutex_init(&gpio->lock);
- ret = devm_add_action_or_reset(dev, pisosr_mutex_destroy, &gpio->lock);
+ ret = devm_mutex_init(dev, &gpio->lock);
if (ret)
return ret;
diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c
index 1c273727ffa3..919cf86fd590 100644
--- a/drivers/gpio/gpio-pl061.c
+++ b/drivers/gpio/gpio-pl061.c
@@ -37,7 +37,6 @@
#define PL061_GPIO_NR 8
-#ifdef CONFIG_PM
struct pl061_context_save_regs {
u8 gpio_data;
u8 gpio_dir;
@@ -46,7 +45,6 @@ struct pl061_context_save_regs {
u8 gpio_iev;
u8 gpio_ie;
};
-#endif
struct pl061 {
raw_spinlock_t lock;
@@ -55,9 +53,7 @@ struct pl061 {
struct gpio_chip gc;
int parent_irq;
-#ifdef CONFIG_PM
struct pl061_context_save_regs csave_regs;
-#endif
};
static int pl061_get_direction(struct gpio_chip *gc, unsigned offset)
@@ -115,11 +111,13 @@ static int pl061_get_value(struct gpio_chip *gc, unsigned offset)
return !!readb(pl061->base + (BIT(offset + 2)));
}
-static void pl061_set_value(struct gpio_chip *gc, unsigned offset, int value)
+static int pl061_set_value(struct gpio_chip *gc, unsigned int offset, int value)
{
struct pl061 *pl061 = gpiochip_get_data(gc);
writeb(!!value << offset, pl061->base + (BIT(offset + 2)));
+
+ return 0;
}
static int pl061_irq_type(struct irq_data *d, unsigned trigger)
@@ -365,7 +363,6 @@ static int pl061_probe(struct amba_device *adev, const struct amba_id *id)
return 0;
}
-#ifdef CONFIG_PM
static int pl061_suspend(struct device *dev)
{
struct pl061 *pl061 = dev_get_drvdata(dev);
@@ -409,13 +406,7 @@ static int pl061_resume(struct device *dev)
return 0;
}
-static const struct dev_pm_ops pl061_dev_pm_ops = {
- .suspend = pl061_suspend,
- .resume = pl061_resume,
- .freeze = pl061_suspend,
- .restore = pl061_resume,
-};
-#endif
+static DEFINE_SIMPLE_DEV_PM_OPS(pl061_dev_pm_ops, pl061_suspend, pl061_resume);
static const struct amba_id pl061_ids[] = {
{
@@ -429,9 +420,7 @@ MODULE_DEVICE_TABLE(amba, pl061_ids);
static struct amba_driver pl061_gpio_driver = {
.drv = {
.name = "pl061_gpio",
-#ifdef CONFIG_PM
- .pm = &pl061_dev_pm_ops,
-#endif
+ .pm = pm_sleep_ptr(&pl061_dev_pm_ops),
},
.id_table = pl061_ids,
.probe = pl061_probe,
diff --git a/drivers/gpio/gpio-pmic-eic-sprd.c b/drivers/gpio/gpio-pmic-eic-sprd.c
index d9b228bea42e..cb015fb5c946 100644
--- a/drivers/gpio/gpio-pmic-eic-sprd.c
+++ b/drivers/gpio/gpio-pmic-eic-sprd.c
@@ -109,12 +109,6 @@ static int sprd_pmic_eic_direction_input(struct gpio_chip *chip,
return 0;
}
-static void sprd_pmic_eic_set(struct gpio_chip *chip, unsigned int offset,
- int value)
-{
- /* EICs are always input, nothing need to do here. */
-}
-
static int sprd_pmic_eic_set_debounce(struct gpio_chip *chip,
unsigned int offset,
unsigned int debounce)
@@ -351,7 +345,6 @@ static int sprd_pmic_eic_probe(struct platform_device *pdev)
pmic_eic->chip.request = sprd_pmic_eic_request;
pmic_eic->chip.free = sprd_pmic_eic_free;
pmic_eic->chip.set_config = sprd_pmic_eic_set_config;
- pmic_eic->chip.set = sprd_pmic_eic_set;
pmic_eic->chip.get = sprd_pmic_eic_get;
pmic_eic->chip.can_sleep = true;
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index aead35ea090e..664cf1eef494 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -315,12 +315,14 @@ static int pxa_gpio_get(struct gpio_chip *chip, unsigned offset)
return !!(gplr & GPIO_bit(offset));
}
-static void pxa_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int pxa_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
{
void __iomem *base = gpio_bank_base(chip, offset);
writel_relaxed(GPIO_bit(offset),
base + (value ? GPSR_OFFSET : GPCR_OFFSET));
+
+ return 0;
}
#ifdef CONFIG_OF_GPIO
@@ -497,8 +499,6 @@ static void pxa_mask_muxed_gpio(struct irq_data *d)
gfer = readl_relaxed(base + GFER_OFFSET) & ~GPIO_bit(gpio);
writel_relaxed(grer, base + GRER_OFFSET);
writel_relaxed(gfer, base + GFER_OFFSET);
-
- gpiochip_disable_irq(&pchip->chip, gpio);
}
static int pxa_gpio_set_wake(struct irq_data *d, unsigned int on)
@@ -518,21 +518,17 @@ static void pxa_unmask_muxed_gpio(struct irq_data *d)
unsigned int gpio = irqd_to_hwirq(d);
struct pxa_gpio_bank *c = gpio_to_pxabank(&pchip->chip, gpio);
- gpiochip_enable_irq(&pchip->chip, gpio);
-
c->irq_mask |= GPIO_bit(gpio);
update_edge_detect(c);
}
-static const struct irq_chip pxa_muxed_gpio_chip = {
+static struct irq_chip pxa_muxed_gpio_chip = {
.name = "GPIO",
.irq_ack = pxa_ack_muxed_gpio,
.irq_mask = pxa_mask_muxed_gpio,
.irq_unmask = pxa_unmask_muxed_gpio,
.irq_set_type = pxa_gpio_irq_type,
.irq_set_wake = pxa_gpio_set_wake,
- .flags = IRQCHIP_IMMUTABLE,
- GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static int pxa_gpio_nums(struct platform_device *pdev)
@@ -642,9 +638,8 @@ static int pxa_gpio_probe(struct platform_device *pdev)
if (!pxa_last_gpio)
return -EINVAL;
- pchip->irqdomain = irq_domain_create_legacy(of_fwnode_handle(pdev->dev.of_node),
- pxa_last_gpio + 1, irq_base, 0,
- &pxa_irq_domain_ops, pchip);
+ pchip->irqdomain = irq_domain_create_legacy(dev_fwnode(&pdev->dev), pxa_last_gpio + 1,
+ irq_base, 0, &pxa_irq_domain_ops, pchip);
if (!pchip->irqdomain)
return -ENOMEM;
@@ -752,7 +747,7 @@ static int __init pxa_gpio_dt_init(void)
device_initcall(pxa_gpio_dt_init);
#ifdef CONFIG_PM
-static int pxa_gpio_suspend(void)
+static int pxa_gpio_suspend(void *data)
{
struct pxa_gpio_chip *pchip = pxa_gpio_chip;
struct pxa_gpio_bank *c;
@@ -773,7 +768,7 @@ static int pxa_gpio_suspend(void)
return 0;
}
-static void pxa_gpio_resume(void)
+static void pxa_gpio_resume(void *data)
{
struct pxa_gpio_chip *pchip = pxa_gpio_chip;
struct pxa_gpio_bank *c;
@@ -797,14 +792,18 @@ static void pxa_gpio_resume(void)
#define pxa_gpio_resume NULL
#endif
-static struct syscore_ops pxa_gpio_syscore_ops = {
+static const struct syscore_ops pxa_gpio_syscore_ops = {
.suspend = pxa_gpio_suspend,
.resume = pxa_gpio_resume,
};
+static struct syscore pxa_gpio_syscore = {
+ .ops = &pxa_gpio_syscore_ops,
+};
+
static int __init pxa_gpio_sysinit(void)
{
- register_syscore_ops(&pxa_gpio_syscore_ops);
+ register_syscore(&pxa_gpio_syscore);
return 0;
}
postcore_initcall(pxa_gpio_sysinit);
diff --git a/drivers/gpio/gpio-qixis-fpga.c b/drivers/gpio/gpio-qixis-fpga.c
new file mode 100644
index 000000000000..6e67f43ac0bd
--- /dev/null
+++ b/drivers/gpio/gpio-qixis-fpga.c
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Layerscape GPIO QIXIS FPGA driver
+ *
+ * Copyright 2025 NXP
+ */
+
+#include <linux/device.h>
+#include <linux/gpio/driver.h>
+#include <linux/gpio/regmap.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+struct qixis_cpld_gpio_config {
+ u64 output_lines;
+};
+
+static const struct qixis_cpld_gpio_config lx2160ardb_sfp_cfg = {
+ .output_lines = BIT(0),
+};
+
+static const struct qixis_cpld_gpio_config ls1046aqds_stat_pres2_cfg = {
+ .output_lines = 0x0,
+};
+
+static const struct regmap_config regmap_config_8r_8v = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static int qixis_cpld_gpio_probe(struct platform_device *pdev)
+{
+ DECLARE_BITMAP(fixed_direction_output, 8);
+ const struct qixis_cpld_gpio_config *cfg;
+ struct gpio_regmap_config config = {0};
+ struct regmap *regmap;
+ void __iomem *reg;
+ u32 base;
+ int ret;
+
+ if (!pdev->dev.parent)
+ return -ENODEV;
+
+ cfg = device_get_match_data(&pdev->dev);
+
+ ret = device_property_read_u32(&pdev->dev, "reg", &base);
+ if (ret)
+ return ret;
+
+ regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!regmap) {
+ /* In case there is no regmap configured by the parent device,
+ * create our own from the MMIO space.
+ */
+ reg = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
+
+ regmap = devm_regmap_init_mmio(&pdev->dev, reg, &regmap_config_8r_8v);
+ if (!regmap)
+ return -ENODEV;
+
+ /* In this case, the offset of our register is 0 inside the
+ * regmap area that we just created.
+ */
+ base = 0;
+ }
+ config.reg_dat_base = GPIO_REGMAP_ADDR(base);
+ config.reg_set_base = GPIO_REGMAP_ADDR(base);
+
+ config.drvdata = (void *)cfg;
+ config.regmap = regmap;
+ config.parent = &pdev->dev;
+ config.ngpio_per_reg = 8;
+ config.ngpio = 8;
+
+ bitmap_from_u64(fixed_direction_output, cfg->output_lines);
+ config.fixed_direction_output = fixed_direction_output;
+
+ return PTR_ERR_OR_ZERO(devm_gpio_regmap_register(&pdev->dev, &config));
+}
+
+static const struct of_device_id qixis_cpld_gpio_of_match[] = {
+ {
+ .compatible = "fsl,lx2160ardb-fpga-gpio-sfp",
+ .data = &lx2160ardb_sfp_cfg,
+ },
+ {
+ .compatible = "fsl,ls1046aqds-fpga-gpio-stat-pres2",
+ .data = &ls1046aqds_stat_pres2_cfg,
+ },
+
+ {}
+};
+MODULE_DEVICE_TABLE(of, qixis_cpld_gpio_of_match);
+
+static struct platform_driver qixis_cpld_gpio_driver = {
+ .probe = qixis_cpld_gpio_probe,
+ .driver = {
+ .name = "gpio-qixis-cpld",
+ .of_match_table = qixis_cpld_gpio_of_match,
+ },
+};
+module_platform_driver(qixis_cpld_gpio_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ioana Ciornei <ioana.ciornei@nxp.com>");
+MODULE_DESCRIPTION("Layerscape GPIO QIXIS FPGA driver");
diff --git a/drivers/gpio/gpio-raspberrypi-exp.c b/drivers/gpio/gpio-raspberrypi-exp.c
index 9d1b95e429f1..40413e06b69c 100644
--- a/drivers/gpio/gpio-raspberrypi-exp.c
+++ b/drivers/gpio/gpio-raspberrypi-exp.c
@@ -175,7 +175,7 @@ static int rpi_exp_gpio_get(struct gpio_chip *gc, unsigned int off)
return !!get.state;
}
-static void rpi_exp_gpio_set(struct gpio_chip *gc, unsigned int off, int val)
+static int rpi_exp_gpio_set(struct gpio_chip *gc, unsigned int off, int val)
{
struct rpi_exp_gpio *gpio;
struct gpio_get_set_state set;
@@ -188,10 +188,14 @@ static void rpi_exp_gpio_set(struct gpio_chip *gc, unsigned int off, int val)
ret = rpi_firmware_property(gpio->fw, RPI_FIRMWARE_SET_GPIO_STATE,
&set, sizeof(set));
- if (ret || set.gpio != 0)
+ if (ret || set.gpio != 0) {
dev_err(gc->parent,
"Failed to set GPIO %u state (%d %x)\n", off, ret,
set.gpio);
+ return ret ? ret : -EIO;
+ }
+
+ return 0;
}
static int rpi_exp_gpio_probe(struct platform_device *pdev)
diff --git a/drivers/gpio/gpio-rc5t583.c b/drivers/gpio/gpio-rc5t583.c
index c34dcadaee36..5a69e4534591 100644
--- a/drivers/gpio/gpio-rc5t583.c
+++ b/drivers/gpio/gpio-rc5t583.c
@@ -35,14 +35,20 @@ static int rc5t583_gpio_get(struct gpio_chip *gc, unsigned int offset)
return !!(val & BIT(offset));
}
-static void rc5t583_gpio_set(struct gpio_chip *gc, unsigned int offset, int val)
+static int rc5t583_gpio_set(struct gpio_chip *gc, unsigned int offset, int val)
{
struct rc5t583_gpio *rc5t583_gpio = gpiochip_get_data(gc);
struct device *parent = rc5t583_gpio->rc5t583->dev;
+ int ret;
+
if (val)
- rc5t583_set_bits(parent, RC5T583_GPIO_IOOUT, BIT(offset));
+ ret = rc5t583_set_bits(parent, RC5T583_GPIO_IOOUT,
+ BIT(offset));
else
- rc5t583_clear_bits(parent, RC5T583_GPIO_IOOUT, BIT(offset));
+ ret = rc5t583_clear_bits(parent, RC5T583_GPIO_IOOUT,
+ BIT(offset));
+
+ return ret;
}
static int rc5t583_gpio_dir_input(struct gpio_chip *gc, unsigned int offset)
@@ -66,7 +72,10 @@ static int rc5t583_gpio_dir_output(struct gpio_chip *gc, unsigned offset,
struct device *parent = rc5t583_gpio->rc5t583->dev;
int ret;
- rc5t583_gpio_set(gc, offset, value);
+ ret = rc5t583_gpio_set(gc, offset, value);
+ if (ret)
+ return ret;
+
ret = rc5t583_set_bits(parent, RC5T583_GPIO_IOSEL, BIT(offset));
if (ret < 0)
return ret;
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index 18c965ee02c8..86777e097fd8 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -331,14 +331,11 @@ static int gpio_rcar_get(struct gpio_chip *chip, unsigned offset)
static int gpio_rcar_get_multiple(struct gpio_chip *chip, unsigned long *mask,
unsigned long *bits)
{
+ u32 bankmask = mask[0] & GENMASK(chip->ngpio - 1, 0);
struct gpio_rcar_priv *p = gpiochip_get_data(chip);
- u32 bankmask, outputs, m, val = 0;
+ u32 outputs, m, val = 0;
unsigned long flags;
- bankmask = mask[0] & GENMASK(chip->ngpio - 1, 0);
- if (!bankmask)
- return 0;
-
if (p->info.has_always_in) {
bits[0] = gpio_rcar_read(p, INDT) & bankmask;
return 0;
@@ -359,7 +356,7 @@ static int gpio_rcar_get_multiple(struct gpio_chip *chip, unsigned long *mask,
return 0;
}
-static void gpio_rcar_set(struct gpio_chip *chip, unsigned offset, int value)
+static int gpio_rcar_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct gpio_rcar_priv *p = gpiochip_get_data(chip);
unsigned long flags;
@@ -367,18 +364,17 @@ static void gpio_rcar_set(struct gpio_chip *chip, unsigned offset, int value)
raw_spin_lock_irqsave(&p->lock, flags);
gpio_rcar_modify_bit(p, OUTDT, offset, value);
raw_spin_unlock_irqrestore(&p->lock, flags);
+
+ return 0;
}
-static void gpio_rcar_set_multiple(struct gpio_chip *chip, unsigned long *mask,
- unsigned long *bits)
+static int gpio_rcar_set_multiple(struct gpio_chip *chip, unsigned long *mask,
+ unsigned long *bits)
{
+ u32 bankmask = mask[0] & GENMASK(chip->ngpio - 1, 0);
struct gpio_rcar_priv *p = gpiochip_get_data(chip);
unsigned long flags;
- u32 val, bankmask;
-
- bankmask = mask[0] & GENMASK(chip->ngpio - 1, 0);
- if (!bankmask)
- return;
+ u32 val;
raw_spin_lock_irqsave(&p->lock, flags);
val = gpio_rcar_read(p, OUTDT);
@@ -386,6 +382,8 @@ static void gpio_rcar_set_multiple(struct gpio_chip *chip, unsigned long *mask,
val |= (bankmask & bits[0]);
gpio_rcar_write(p, OUTDT, val);
raw_spin_unlock_irqrestore(&p->lock, flags);
+
+ return 0;
}
static int gpio_rcar_direction_output(struct gpio_chip *chip, unsigned offset,
@@ -594,7 +592,6 @@ static void gpio_rcar_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
}
-#ifdef CONFIG_PM_SLEEP
static int gpio_rcar_suspend(struct device *dev)
{
struct gpio_rcar_priv *p = dev_get_drvdata(dev);
@@ -653,16 +650,16 @@ static int gpio_rcar_resume(struct device *dev)
return 0;
}
-#endif /* CONFIG_PM_SLEEP*/
-static SIMPLE_DEV_PM_OPS(gpio_rcar_pm_ops, gpio_rcar_suspend, gpio_rcar_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(gpio_rcar_pm_ops, gpio_rcar_suspend,
+ gpio_rcar_resume);
static struct platform_driver gpio_rcar_device_driver = {
.probe = gpio_rcar_probe,
.remove = gpio_rcar_remove,
.driver = {
.name = "gpio_rcar",
- .pm = &gpio_rcar_pm_ops,
+ .pm = pm_sleep_ptr(&gpio_rcar_pm_ops),
.of_match_table = gpio_rcar_of_table,
}
};
diff --git a/drivers/gpio/gpio-rda.c b/drivers/gpio/gpio-rda.c
index cb2f63eee2aa..7bbc6f0ce4c8 100644
--- a/drivers/gpio/gpio-rda.c
+++ b/drivers/gpio/gpio-rda.c
@@ -8,6 +8,7 @@
#include <linux/bitops.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -35,7 +36,7 @@
#define RDA_GPIO_BANK_NR 32
struct rda_gpio {
- struct gpio_chip chip;
+ struct gpio_generic_chip chip;
void __iomem *base;
spinlock_t lock;
int irq;
@@ -208,6 +209,7 @@ static const struct irq_chip rda_gpio_irq_chip = {
static int rda_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct device *dev = &pdev->dev;
struct gpio_irq_chip *girq;
struct rda_gpio *rda_gpio;
@@ -235,24 +237,29 @@ static int rda_gpio_probe(struct platform_device *pdev)
spin_lock_init(&rda_gpio->lock);
- ret = bgpio_init(&rda_gpio->chip, dev, 4,
- rda_gpio->base + RDA_GPIO_VAL,
- rda_gpio->base + RDA_GPIO_SET,
- rda_gpio->base + RDA_GPIO_CLR,
- rda_gpio->base + RDA_GPIO_OEN_SET_OUT,
- rda_gpio->base + RDA_GPIO_OEN_SET_IN,
- BGPIOF_READ_OUTPUT_REG_SET);
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = rda_gpio->base + RDA_GPIO_VAL,
+ .set = rda_gpio->base + RDA_GPIO_SET,
+ .clr = rda_gpio->base + RDA_GPIO_CLR,
+ .dirout = rda_gpio->base + RDA_GPIO_OEN_SET_OUT,
+ .dirin = rda_gpio->base + RDA_GPIO_OEN_SET_IN,
+ .flags = GPIO_GENERIC_READ_OUTPUT_REG_SET,
+ };
+
+ ret = gpio_generic_chip_init(&rda_gpio->chip, &config);
if (ret) {
- dev_err(dev, "bgpio_init failed\n");
+ dev_err(dev, "failed to initialize the generic GPIO chip\n");
return ret;
}
- rda_gpio->chip.label = dev_name(dev);
- rda_gpio->chip.ngpio = ngpios;
- rda_gpio->chip.base = -1;
+ rda_gpio->chip.gc.label = dev_name(dev);
+ rda_gpio->chip.gc.ngpio = ngpios;
+ rda_gpio->chip.gc.base = -1;
if (rda_gpio->irq >= 0) {
- girq = &rda_gpio->chip.irq;
+ girq = &rda_gpio->chip.gc.irq;
gpio_irq_chip_set_chip(girq, &rda_gpio_irq_chip);
girq->handler = handle_bad_irq;
girq->default_type = IRQ_TYPE_NONE;
@@ -269,7 +276,7 @@ static int rda_gpio_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, rda_gpio);
- return devm_gpiochip_add_data(dev, &rda_gpio->chip, rda_gpio);
+ return devm_gpiochip_add_data(dev, &rda_gpio->chip.gc, rda_gpio);
}
static const struct of_device_id rda_gpio_of_match[] = {
diff --git a/drivers/gpio/gpio-rdc321x.c b/drivers/gpio/gpio-rdc321x.c
index ec7fb9220a47..ba62b81aa8ae 100644
--- a/drivers/gpio/gpio-rdc321x.c
+++ b/drivers/gpio/gpio-rdc321x.c
@@ -64,8 +64,8 @@ static void rdc_gpio_set_value_impl(struct gpio_chip *chip,
}
/* set GPIO pin to value */
-static void rdc_gpio_set_value(struct gpio_chip *chip,
- unsigned gpio, int value)
+static int rdc_gpio_set_value(struct gpio_chip *chip, unsigned int gpio,
+ int value)
{
struct rdc321x_gpio *gpch;
@@ -73,6 +73,8 @@ static void rdc_gpio_set_value(struct gpio_chip *chip,
spin_lock(&gpch->lock);
rdc_gpio_set_value_impl(chip, gpio, value);
spin_unlock(&gpch->lock);
+
+ return 0;
}
static int rdc_gpio_config(struct gpio_chip *chip,
diff --git a/drivers/gpio/gpio-realtek-otto.c b/drivers/gpio/gpio-realtek-otto.c
index d6418f89d3f6..de527f4fc6c2 100644
--- a/drivers/gpio/gpio-realtek-otto.c
+++ b/drivers/gpio/gpio-realtek-otto.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
-#include <linux/gpio/driver.h>
#include <linux/cpumask.h>
+#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/irq.h>
#include <linux/minmax.h>
#include <linux/mod_devicetable.h>
@@ -41,7 +42,7 @@
/**
* realtek_gpio_ctrl - Realtek Otto GPIO driver data
*
- * @gc: Associated gpio_chip instance
+ * @chip: Associated gpio_generic_chip instance
* @base: Base address of the register block for a GPIO bank
* @lock: Lock for accessing the IRQ registers and values
* @intr_mask: Mask for interrupts lines
@@ -64,7 +65,7 @@
* IMR on changes.
*/
struct realtek_gpio_ctrl {
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
void __iomem *base;
void __iomem *cpumask_base;
struct cpumask cpu_irq_maskable;
@@ -101,7 +102,7 @@ static struct realtek_gpio_ctrl *irq_data_to_ctrl(struct irq_data *data)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
- return container_of(gc, struct realtek_gpio_ctrl, gc);
+ return container_of(to_gpio_generic_chip(gc), struct realtek_gpio_ctrl, chip);
}
/*
@@ -194,7 +195,7 @@ static void realtek_gpio_irq_unmask(struct irq_data *data)
unsigned int line = irqd_to_hwirq(data);
unsigned long flags;
- gpiochip_enable_irq(&ctrl->gc, line);
+ gpiochip_enable_irq(&ctrl->chip.gc, line);
raw_spin_lock_irqsave(&ctrl->lock, flags);
ctrl->intr_mask[line] = REALTEK_GPIO_IMR_LINE_MASK;
@@ -213,7 +214,7 @@ static void realtek_gpio_irq_mask(struct irq_data *data)
realtek_gpio_update_line_imr(ctrl, line);
raw_spin_unlock_irqrestore(&ctrl->lock, flags);
- gpiochip_disable_irq(&ctrl->gc, line);
+ gpiochip_disable_irq(&ctrl->chip.gc, line);
}
static int realtek_gpio_irq_set_type(struct irq_data *data, unsigned int flow_type)
@@ -356,8 +357,9 @@ MODULE_DEVICE_TABLE(of, realtek_gpio_of_match);
static int realtek_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct device *dev = &pdev->dev;
- unsigned long bgpio_flags;
+ unsigned long gen_gc_flags;
unsigned int dev_flags;
struct gpio_irq_chip *girq;
struct realtek_gpio_ctrl *ctrl;
@@ -388,32 +390,37 @@ static int realtek_gpio_probe(struct platform_device *pdev)
raw_spin_lock_init(&ctrl->lock);
if (dev_flags & GPIO_PORTS_REVERSED) {
- bgpio_flags = 0;
+ gen_gc_flags = 0;
ctrl->bank_read = realtek_gpio_bank_read;
ctrl->bank_write = realtek_gpio_bank_write;
ctrl->line_imr_pos = realtek_gpio_line_imr_pos;
} else {
- bgpio_flags = BGPIOF_BIG_ENDIAN_BYTE_ORDER;
+ gen_gc_flags = GPIO_GENERIC_BIG_ENDIAN_BYTE_ORDER;
ctrl->bank_read = realtek_gpio_bank_read_swapped;
ctrl->bank_write = realtek_gpio_bank_write_swapped;
ctrl->line_imr_pos = realtek_gpio_line_imr_pos_swapped;
}
- err = bgpio_init(&ctrl->gc, dev, 4,
- ctrl->base + REALTEK_GPIO_REG_DATA, NULL, NULL,
- ctrl->base + REALTEK_GPIO_REG_DIR, NULL,
- bgpio_flags);
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = ctrl->base + REALTEK_GPIO_REG_DATA,
+ .dirout = ctrl->base + REALTEK_GPIO_REG_DIR,
+ .flags = gen_gc_flags,
+ };
+
+ err = gpio_generic_chip_init(&ctrl->chip, &config);
if (err) {
dev_err(dev, "unable to init generic GPIO");
return err;
}
- ctrl->gc.ngpio = ngpios;
- ctrl->gc.owner = THIS_MODULE;
+ ctrl->chip.gc.ngpio = ngpios;
+ ctrl->chip.gc.owner = THIS_MODULE;
irq = platform_get_irq_optional(pdev, 0);
if (!(dev_flags & GPIO_INTERRUPTS_DISABLED) && irq > 0) {
- girq = &ctrl->gc.irq;
+ girq = &ctrl->chip.gc.irq;
gpio_irq_chip_set_chip(girq, &realtek_gpio_irq_chip);
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_bad_irq;
@@ -442,7 +449,7 @@ static int realtek_gpio_probe(struct platform_device *pdev)
cpumask_set_cpu(cpu, &ctrl->cpu_irq_maskable);
}
- return devm_gpiochip_add_data(dev, &ctrl->gc, ctrl);
+ return devm_gpiochip_add_data(dev, &ctrl->chip.gc, ctrl);
}
static struct platform_driver realtek_gpio_driver = {
diff --git a/drivers/gpio/gpio-reg.c b/drivers/gpio/gpio-reg.c
index 73c7260d89c0..f2238196faf1 100644
--- a/drivers/gpio/gpio-reg.c
+++ b/drivers/gpio/gpio-reg.c
@@ -57,7 +57,7 @@ static int gpio_reg_direction_input(struct gpio_chip *gc, unsigned offset)
return r->direction & BIT(offset) ? 0 : -ENOTSUPP;
}
-static void gpio_reg_set(struct gpio_chip *gc, unsigned offset, int value)
+static int gpio_reg_set(struct gpio_chip *gc, unsigned int offset, int value)
{
struct gpio_reg *r = to_gpio_reg(gc);
unsigned long flags;
@@ -72,6 +72,8 @@ static void gpio_reg_set(struct gpio_chip *gc, unsigned offset, int value)
r->out = val;
writel_relaxed(val, r->reg);
spin_unlock_irqrestore(&r->lock, flags);
+
+ return 0;
}
static int gpio_reg_get(struct gpio_chip *gc, unsigned offset)
@@ -92,8 +94,8 @@ static int gpio_reg_get(struct gpio_chip *gc, unsigned offset)
return !!(val & mask);
}
-static void gpio_reg_set_multiple(struct gpio_chip *gc, unsigned long *mask,
- unsigned long *bits)
+static int gpio_reg_set_multiple(struct gpio_chip *gc, unsigned long *mask,
+ unsigned long *bits)
{
struct gpio_reg *r = to_gpio_reg(gc);
unsigned long flags;
@@ -102,6 +104,8 @@ static void gpio_reg_set_multiple(struct gpio_chip *gc, unsigned long *mask,
r->out = (r->out & ~*mask) | (*bits & *mask);
writel_relaxed(r->out, r->reg);
spin_unlock_irqrestore(&r->lock, flags);
+
+ return 0;
}
static int gpio_reg_to_irq(struct gpio_chip *gc, unsigned offset)
diff --git a/drivers/gpio/gpio-regmap.c b/drivers/gpio/gpio-regmap.c
index 87c4225784cf..e5ba38e65c10 100644
--- a/drivers/gpio/gpio-regmap.c
+++ b/drivers/gpio/gpio-regmap.c
@@ -31,6 +31,12 @@ struct gpio_regmap {
unsigned int reg_clr_base;
unsigned int reg_dir_in_base;
unsigned int reg_dir_out_base;
+ unsigned long *fixed_direction_output;
+
+#ifdef CONFIG_REGMAP_IRQ
+ int regmap_irq_line;
+ struct regmap_irq_chip_data *irq_chip_data;
+#endif
int (*reg_mask_xlate)(struct gpio_regmap *gpio, unsigned int base,
unsigned int offset, unsigned int *reg,
@@ -76,7 +82,11 @@ static int gpio_regmap_get(struct gpio_chip *chip, unsigned int offset)
if (ret)
return ret;
- ret = regmap_read(gpio->regmap, reg, &val);
+ /* ensure we don't spoil any register cache with pin input values */
+ if (gpio->reg_dat_base == gpio->reg_set_base)
+ ret = regmap_read_bypassed(gpio->regmap, reg, &val);
+ else
+ ret = regmap_read(gpio->regmap, reg, &val);
if (ret)
return ret;
@@ -88,7 +98,7 @@ static int gpio_regmap_set(struct gpio_chip *chip, unsigned int offset,
{
struct gpio_regmap *gpio = gpiochip_get_data(chip);
unsigned int base = gpio_regmap_addr(gpio->reg_set_base);
- unsigned int reg, mask;
+ unsigned int reg, mask, mask_val;
int ret;
ret = gpio->reg_mask_xlate(gpio, base, offset, &reg, &mask);
@@ -96,9 +106,15 @@ static int gpio_regmap_set(struct gpio_chip *chip, unsigned int offset,
return ret;
if (val)
- ret = regmap_update_bits(gpio->regmap, reg, mask, mask);
+ mask_val = mask;
else
- ret = regmap_update_bits(gpio->regmap, reg, mask, 0);
+ mask_val = 0;
+
+ /* ignore input values which shadow the old output value */
+ if (gpio->reg_dat_base == gpio->reg_set_base)
+ ret = regmap_write_bits(gpio->regmap, reg, mask, mask_val);
+ else
+ ret = regmap_update_bits(gpio->regmap, reg, mask, mask_val);
return ret;
}
@@ -129,6 +145,13 @@ static int gpio_regmap_get_direction(struct gpio_chip *chip,
unsigned int base, val, reg, mask;
int invert, ret;
+ if (gpio->fixed_direction_output) {
+ if (test_bit(offset, gpio->fixed_direction_output))
+ return GPIO_LINE_DIRECTION_OUT;
+ else
+ return GPIO_LINE_DIRECTION_IN;
+ }
+
if (gpio->reg_dat_base && !gpio->reg_set_base)
return GPIO_LINE_DIRECTION_IN;
if (gpio->reg_set_base && !gpio->reg_dat_base)
@@ -215,6 +238,7 @@ EXPORT_SYMBOL_GPL(gpio_regmap_get_drvdata);
*/
struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config)
{
+ struct irq_domain *irq_domain;
struct gpio_regmap *gpio;
struct gpio_chip *chip;
int ret;
@@ -255,14 +279,15 @@ struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config
chip->names = config->names;
chip->label = config->label ?: dev_name(config->parent);
chip->can_sleep = regmap_might_sleep(config->regmap);
+ chip->init_valid_mask = config->init_valid_mask;
chip->request = gpiochip_generic_request;
chip->free = gpiochip_generic_free;
chip->get = gpio_regmap_get;
if (gpio->reg_set_base && gpio->reg_clr_base)
- chip->set_rv = gpio_regmap_set_with_clear;
+ chip->set = gpio_regmap_set_with_clear;
else if (gpio->reg_set_base)
- chip->set_rv = gpio_regmap_set;
+ chip->set = gpio_regmap_set;
chip->get_direction = gpio_regmap_get_direction;
if (gpio->reg_dir_in_base || gpio->reg_dir_out_base) {
@@ -274,7 +299,18 @@ struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config
if (!chip->ngpio) {
ret = gpiochip_get_ngpios(chip, chip->parent);
if (ret)
- return ERR_PTR(ret);
+ goto err_free_gpio;
+ }
+
+ if (config->fixed_direction_output) {
+ gpio->fixed_direction_output = bitmap_alloc(chip->ngpio,
+ GFP_KERNEL);
+ if (!gpio->fixed_direction_output) {
+ ret = -ENOMEM;
+ goto err_free_gpio;
+ }
+ bitmap_copy(gpio->fixed_direction_output,
+ config->fixed_direction_output, chip->ngpio);
}
/* if not set, assume there is only one register */
@@ -293,10 +329,24 @@ struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config
ret = gpiochip_add_data(chip, gpio);
if (ret < 0)
- goto err_free_gpio;
+ goto err_free_bitmap;
+
+#ifdef CONFIG_REGMAP_IRQ
+ if (config->regmap_irq_chip) {
+ gpio->regmap_irq_line = config->regmap_irq_line;
+ ret = regmap_add_irq_chip_fwnode(dev_fwnode(config->parent), config->regmap,
+ config->regmap_irq_line, config->regmap_irq_flags,
+ 0, config->regmap_irq_chip, &gpio->irq_chip_data);
+ if (ret)
+ goto err_free_bitmap;
+
+ irq_domain = regmap_irq_get_domain(gpio->irq_chip_data);
+ } else
+#endif
+ irq_domain = config->irq_domain;
- if (config->irq_domain) {
- ret = gpiochip_irqchip_add_domain(chip, config->irq_domain);
+ if (irq_domain) {
+ ret = gpiochip_irqchip_add_domain(chip, irq_domain);
if (ret)
goto err_remove_gpiochip;
}
@@ -305,6 +355,8 @@ struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config
err_remove_gpiochip:
gpiochip_remove(chip);
+err_free_bitmap:
+ bitmap_free(gpio->fixed_direction_output);
err_free_gpio:
kfree(gpio);
return ERR_PTR(ret);
@@ -317,7 +369,13 @@ EXPORT_SYMBOL_GPL(gpio_regmap_register);
*/
void gpio_regmap_unregister(struct gpio_regmap *gpio)
{
+#ifdef CONFIG_REGMAP_IRQ
+ if (gpio->irq_chip_data)
+ regmap_del_irq_chip(gpio->regmap_irq_line, gpio->irq_chip_data);
+#endif
+
gpiochip_remove(&gpio->gpio_chip);
+ bitmap_free(gpio->fixed_direction_output);
kfree(gpio);
}
EXPORT_SYMBOL_GPL(gpio_regmap_unregister);
diff --git a/drivers/gpio/gpio-rockchip.c b/drivers/gpio/gpio-rockchip.c
index c63352f2f1ec..47174eb3ba76 100644
--- a/drivers/gpio/gpio-rockchip.c
+++ b/drivers/gpio/gpio-rockchip.c
@@ -177,8 +177,8 @@ static int rockchip_gpio_set_direction(struct gpio_chip *chip,
return 0;
}
-static void rockchip_gpio_set(struct gpio_chip *gc, unsigned int offset,
- int value)
+static int rockchip_gpio_set(struct gpio_chip *gc, unsigned int offset,
+ int value)
{
struct rockchip_pin_bank *bank = gpiochip_get_data(gc);
unsigned long flags;
@@ -186,6 +186,8 @@ static void rockchip_gpio_set(struct gpio_chip *gc, unsigned int offset,
raw_spin_lock_irqsave(&bank->slock, flags);
rockchip_gpio_writel_bit(bank, offset, value, bank->gpio_regs->port_dr);
raw_spin_unlock_irqrestore(&bank->slock, flags);
+
+ return 0;
}
static int rockchip_gpio_get(struct gpio_chip *gc, unsigned int offset)
@@ -521,8 +523,8 @@ static int rockchip_interrupts_register(struct rockchip_pin_bank *bank)
struct irq_chip_generic *gc;
int ret;
- bank->domain = irq_domain_create_linear(of_fwnode_handle(bank->of_node), 32,
- &irq_generic_chip_ops, NULL);
+ bank->domain = irq_domain_create_linear(dev_fwnode(bank->dev), 32, &irq_generic_chip_ops,
+ NULL);
if (!bank->domain) {
dev_warn(bank->dev, "could not init irq domain for bank %s\n",
bank->name);
@@ -767,7 +769,7 @@ static int rockchip_gpio_probe(struct platform_device *pdev)
list_del(&cfg->head);
switch (cfg->param) {
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
ret = rockchip_gpio_direction_output(&bank->gpio_chip, cfg->pin, cfg->arg);
if (ret)
dev_warn(dev, "setting output pin %u to %u failed\n", cfg->pin,
diff --git a/drivers/gpio/gpio-rtd.c b/drivers/gpio/gpio-rtd.c
index bf7f008f58d7..d46b40dd5283 100644
--- a/drivers/gpio/gpio-rtd.c
+++ b/drivers/gpio/gpio-rtd.c
@@ -275,7 +275,7 @@ static int rtd_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
}
}
-static void rtd_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
+static int rtd_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct rtd_gpio *data = gpiochip_get_data(chip);
u32 mask = BIT(offset % 32);
@@ -292,6 +292,8 @@ static void rtd_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
else
val &= ~mask;
writel_relaxed(val, data->base + dato_reg_offset);
+
+ return 0;
}
static int rtd_gpio_get(struct gpio_chip *chip, unsigned int offset)
diff --git a/drivers/gpio/gpio-sa1100.c b/drivers/gpio/gpio-sa1100.c
index 3f3ee36bc3cb..1938ffa2f4f3 100644
--- a/drivers/gpio/gpio-sa1100.c
+++ b/drivers/gpio/gpio-sa1100.c
@@ -43,11 +43,14 @@ static int sa1100_gpio_get(struct gpio_chip *chip, unsigned offset)
BIT(offset);
}
-static void sa1100_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int sa1100_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
int reg = value ? R_GPSR : R_GPCR;
writel_relaxed(BIT(offset), sa1100_gpio_chip(chip)->membase + reg);
+
+ return 0;
}
static int sa1100_get_direction(struct gpio_chip *chip, unsigned offset)
@@ -253,7 +256,7 @@ static void sa1100_gpio_handler(struct irq_desc *desc)
} while (mask);
}
-static int sa1100_gpio_suspend(void)
+static int sa1100_gpio_suspend(void *data)
{
struct sa1100_gpio_chip *sgc = &sa1100_gpio_chip;
@@ -272,19 +275,23 @@ static int sa1100_gpio_suspend(void)
return 0;
}
-static void sa1100_gpio_resume(void)
+static void sa1100_gpio_resume(void *data)
{
sa1100_update_edge_regs(&sa1100_gpio_chip);
}
-static struct syscore_ops sa1100_gpio_syscore_ops = {
+static const struct syscore_ops sa1100_gpio_syscore_ops = {
.suspend = sa1100_gpio_suspend,
.resume = sa1100_gpio_resume,
};
+static struct syscore sa1100_gpio_syscore = {
+ .ops = &sa1100_gpio_syscore_ops,
+};
+
static int __init sa1100_gpio_init_devicefs(void)
{
- register_syscore_ops(&sa1100_gpio_syscore_ops);
+ register_syscore(&sa1100_gpio_syscore);
return 0;
}
diff --git a/drivers/gpio/gpio-sama5d2-piobu.c b/drivers/gpio/gpio-sama5d2-piobu.c
index d770a6f3d846..5005688f6e67 100644
--- a/drivers/gpio/gpio-sama5d2-piobu.c
+++ b/drivers/gpio/gpio-sama5d2-piobu.c
@@ -169,15 +169,15 @@ static int sama5d2_piobu_get(struct gpio_chip *chip, unsigned int pin)
/*
* sama5d2_piobu_set() - gpiochip set
*/
-static void sama5d2_piobu_set(struct gpio_chip *chip, unsigned int pin,
- int value)
+static int sama5d2_piobu_set(struct gpio_chip *chip, unsigned int pin,
+ int value)
{
if (!value)
value = PIOBU_LOW;
else
value = PIOBU_HIGH;
- sama5d2_piobu_write_value(chip, pin, PIOBU_SOD, value);
+ return sama5d2_piobu_write_value(chip, pin, PIOBU_SOD, value);
}
static int sama5d2_piobu_probe(struct platform_device *pdev)
diff --git a/drivers/gpio/gpio-sch.c b/drivers/gpio/gpio-sch.c
index ff0341b1222f..966d16a6d515 100644
--- a/drivers/gpio/gpio-sch.c
+++ b/drivers/gpio/gpio-sch.c
@@ -117,7 +117,7 @@ static int sch_gpio_get(struct gpio_chip *gc, unsigned int gpio_num)
return sch_gpio_reg_get(sch, gpio_num, GLV);
}
-static void sch_gpio_set(struct gpio_chip *gc, unsigned int gpio_num, int val)
+static int sch_gpio_set(struct gpio_chip *gc, unsigned int gpio_num, int val)
{
struct sch_gpio *sch = gpiochip_get_data(gc);
unsigned long flags;
@@ -125,6 +125,8 @@ static void sch_gpio_set(struct gpio_chip *gc, unsigned int gpio_num, int val)
spin_lock_irqsave(&sch->lock, flags);
sch_gpio_reg_set(sch, gpio_num, GLV, val);
spin_unlock_irqrestore(&sch->lock, flags);
+
+ return 0;
}
static int sch_gpio_direction_out(struct gpio_chip *gc, unsigned int gpio_num,
@@ -146,8 +148,7 @@ static int sch_gpio_direction_out(struct gpio_chip *gc, unsigned int gpio_num,
* But we cannot prevent a short low pulse if direction is set to high
* and an external pull-up is connected.
*/
- sch_gpio_set(gc, gpio_num, val);
- return 0;
+ return sch_gpio_set(gc, gpio_num, val);
}
static int sch_gpio_get_direction(struct gpio_chip *gc, unsigned int gpio_num)
diff --git a/drivers/gpio/gpio-sch311x.c b/drivers/gpio/gpio-sch311x.c
index ba4fccf3cc94..f95566998d30 100644
--- a/drivers/gpio/gpio-sch311x.c
+++ b/drivers/gpio/gpio-sch311x.c
@@ -178,14 +178,16 @@ static void __sch311x_gpio_set(struct sch311x_gpio_block *block,
outb(data, block->runtime_reg + block->data_reg);
}
-static void sch311x_gpio_set(struct gpio_chip *chip, unsigned offset,
- int value)
+static int sch311x_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct sch311x_gpio_block *block = gpiochip_get_data(chip);
spin_lock(&block->lock);
__sch311x_gpio_set(block, offset, value);
spin_unlock(&block->lock);
+
+ return 0;
}
static int sch311x_gpio_direction_in(struct gpio_chip *chip, unsigned offset)
diff --git a/drivers/gpio/gpio-shared-proxy.c b/drivers/gpio/gpio-shared-proxy.c
new file mode 100644
index 000000000000..29d7d2e4dfc0
--- /dev/null
+++ b/drivers/gpio/gpio-shared-proxy.c
@@ -0,0 +1,334 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2025 Linaro Ltd.
+ */
+
+#include <linux/auxiliary_bus.h>
+#include <linux/cleanup.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/gpio/driver.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/string_choices.h>
+#include <linux/types.h>
+
+#include "gpiolib-shared.h"
+
+struct gpio_shared_proxy_data {
+ struct gpio_chip gc;
+ struct gpio_shared_desc *shared_desc;
+ struct device *dev;
+ bool voted_high;
+};
+
+static int
+gpio_shared_proxy_set_unlocked(struct gpio_shared_proxy_data *proxy,
+ int (*set_func)(struct gpio_desc *desc, int value),
+ int value)
+{
+ struct gpio_shared_desc *shared_desc = proxy->shared_desc;
+ struct gpio_desc *desc = shared_desc->desc;
+ int ret = 0;
+
+ gpio_shared_lockdep_assert(shared_desc);
+
+ if (value) {
+ /* User wants to set value to high. */
+ if (proxy->voted_high)
+ /* Already voted for high, nothing to do. */
+ goto out;
+
+ /* Haven't voted for high yet. */
+ if (!shared_desc->highcnt) {
+ /*
+ * Current value is low, need to actually set value
+ * to high.
+ */
+ ret = set_func(desc, 1);
+ if (ret)
+ goto out;
+ }
+
+ shared_desc->highcnt++;
+ proxy->voted_high = true;
+
+ goto out;
+ }
+
+ /* Desired value is low. */
+ if (!proxy->voted_high)
+ /* We didn't vote for high, nothing to do. */
+ goto out;
+
+ /* We previously voted for high. */
+ if (shared_desc->highcnt == 1) {
+ /* This is the last remaining vote for high, set value to low. */
+ ret = set_func(desc, 0);
+ if (ret)
+ goto out;
+ }
+
+ shared_desc->highcnt--;
+ proxy->voted_high = false;
+
+out:
+ if (shared_desc->highcnt)
+ dev_dbg(proxy->dev,
+ "Voted for value '%s', effective value is 'high', number of votes for 'high': %u\n",
+ str_high_low(value), shared_desc->highcnt);
+ else
+ dev_dbg(proxy->dev, "Voted for value 'low', effective value is 'low'\n");
+
+ return ret;
+}
+
+static int gpio_shared_proxy_request(struct gpio_chip *gc, unsigned int offset)
+{
+ struct gpio_shared_proxy_data *proxy = gpiochip_get_data(gc);
+ struct gpio_shared_desc *shared_desc = proxy->shared_desc;
+
+ guard(gpio_shared_desc_lock)(shared_desc);
+
+ proxy->shared_desc->usecnt++;
+
+ dev_dbg(proxy->dev, "Shared GPIO requested, number of users: %u\n",
+ proxy->shared_desc->usecnt);
+
+ return 0;
+}
+
+static void gpio_shared_proxy_free(struct gpio_chip *gc, unsigned int offset)
+{
+ struct gpio_shared_proxy_data *proxy = gpiochip_get_data(gc);
+ struct gpio_shared_desc *shared_desc = proxy->shared_desc;
+
+ guard(gpio_shared_desc_lock)(shared_desc);
+
+ proxy->shared_desc->usecnt--;
+
+ dev_dbg(proxy->dev, "Shared GPIO freed, number of users: %u\n",
+ proxy->shared_desc->usecnt);
+}
+
+static int gpio_shared_proxy_set_config(struct gpio_chip *gc,
+ unsigned int offset, unsigned long cfg)
+{
+ struct gpio_shared_proxy_data *proxy = gpiochip_get_data(gc);
+ struct gpio_shared_desc *shared_desc = proxy->shared_desc;
+ struct gpio_desc *desc = shared_desc->desc;
+ int ret;
+
+ guard(gpio_shared_desc_lock)(shared_desc);
+
+ if (shared_desc->usecnt > 1) {
+ if (shared_desc->cfg != cfg) {
+ dev_dbg(proxy->dev,
+ "Shared GPIO's configuration already set, accepting changes but users may conflict!!\n");
+ } else {
+ dev_dbg(proxy->dev, "Equal config requested, nothing to do\n");
+ return 0;
+ }
+ }
+
+ ret = gpiod_set_config(desc, cfg);
+ if (ret && ret != -ENOTSUPP)
+ return ret;
+
+ shared_desc->cfg = cfg;
+ return 0;
+}
+
+static int gpio_shared_proxy_direction_input(struct gpio_chip *gc,
+ unsigned int offset)
+{
+ struct gpio_shared_proxy_data *proxy = gpiochip_get_data(gc);
+ struct gpio_shared_desc *shared_desc = proxy->shared_desc;
+ struct gpio_desc *desc = shared_desc->desc;
+ int dir;
+
+ guard(gpio_shared_desc_lock)(shared_desc);
+
+ if (shared_desc->usecnt == 1) {
+ dev_dbg(proxy->dev,
+ "Only one user of this shared GPIO, allowing to set direction to input\n");
+
+ return gpiod_direction_input(desc);
+ }
+
+ dir = gpiod_get_direction(desc);
+ if (dir < 0)
+ return dir;
+
+ if (dir == GPIO_LINE_DIRECTION_OUT) {
+ dev_dbg(proxy->dev,
+ "Shared GPIO's direction already set to output, refusing to change\n");
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+static int gpio_shared_proxy_direction_output(struct gpio_chip *gc,
+ unsigned int offset, int value)
+{
+ struct gpio_shared_proxy_data *proxy = gpiochip_get_data(gc);
+ struct gpio_shared_desc *shared_desc = proxy->shared_desc;
+ struct gpio_desc *desc = shared_desc->desc;
+ int ret, dir;
+
+ guard(gpio_shared_desc_lock)(shared_desc);
+
+ if (shared_desc->usecnt == 1) {
+ dev_dbg(proxy->dev,
+ "Only one user of this shared GPIO, allowing to set direction to output with value '%s'\n",
+ str_high_low(value));
+
+ ret = gpiod_direction_output(desc, value);
+ if (ret)
+ return ret;
+
+ if (value) {
+ proxy->voted_high = true;
+ shared_desc->highcnt = 1;
+ } else {
+ proxy->voted_high = false;
+ shared_desc->highcnt = 0;
+ }
+
+ return 0;
+ }
+
+ dir = gpiod_get_direction(desc);
+ if (dir < 0)
+ return dir;
+
+ if (dir == GPIO_LINE_DIRECTION_IN) {
+ dev_dbg(proxy->dev,
+ "Shared GPIO's direction already set to input, refusing to change\n");
+ return -EPERM;
+ }
+
+ return gpio_shared_proxy_set_unlocked(proxy, gpiod_direction_output, value);
+}
+
+static int gpio_shared_proxy_get(struct gpio_chip *gc, unsigned int offset)
+{
+ struct gpio_shared_proxy_data *proxy = gpiochip_get_data(gc);
+
+ return gpiod_get_value(proxy->shared_desc->desc);
+}
+
+static int gpio_shared_proxy_get_cansleep(struct gpio_chip *gc,
+ unsigned int offset)
+{
+ struct gpio_shared_proxy_data *proxy = gpiochip_get_data(gc);
+
+ return gpiod_get_value_cansleep(proxy->shared_desc->desc);
+}
+
+static int gpio_shared_proxy_do_set(struct gpio_shared_proxy_data *proxy,
+ int (*set_func)(struct gpio_desc *desc, int value),
+ int value)
+{
+ guard(gpio_shared_desc_lock)(proxy->shared_desc);
+
+ return gpio_shared_proxy_set_unlocked(proxy, set_func, value);
+}
+
+static int gpio_shared_proxy_set(struct gpio_chip *gc, unsigned int offset,
+ int value)
+{
+ struct gpio_shared_proxy_data *proxy = gpiochip_get_data(gc);
+
+ return gpio_shared_proxy_do_set(proxy, gpiod_set_value, value);
+}
+
+static int gpio_shared_proxy_set_cansleep(struct gpio_chip *gc,
+ unsigned int offset, int value)
+{
+ struct gpio_shared_proxy_data *proxy = gpiochip_get_data(gc);
+
+ return gpio_shared_proxy_do_set(proxy, gpiod_set_value_cansleep, value);
+}
+
+static int gpio_shared_proxy_get_direction(struct gpio_chip *gc,
+ unsigned int offset)
+{
+ struct gpio_shared_proxy_data *proxy = gpiochip_get_data(gc);
+
+ return gpiod_get_direction(proxy->shared_desc->desc);
+}
+
+static int gpio_shared_proxy_to_irq(struct gpio_chip *gc, unsigned int offset)
+{
+ struct gpio_shared_proxy_data *proxy = gpiochip_get_data(gc);
+
+ return gpiod_to_irq(proxy->shared_desc->desc);
+}
+
+static int gpio_shared_proxy_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ struct gpio_shared_proxy_data *proxy;
+ struct gpio_shared_desc *shared_desc;
+ struct device *dev = &adev->dev;
+ struct gpio_chip *gc;
+
+ shared_desc = devm_gpiod_shared_get(dev);
+ if (IS_ERR(shared_desc))
+ return PTR_ERR(shared_desc);
+
+ proxy = devm_kzalloc(dev, sizeof(*proxy), GFP_KERNEL);
+ if (!proxy)
+ return -ENOMEM;
+
+ proxy->shared_desc = shared_desc;
+ proxy->dev = dev;
+
+ gc = &proxy->gc;
+ gc->base = -1;
+ gc->ngpio = 1;
+ gc->label = dev_name(dev);
+ gc->parent = dev;
+ gc->owner = THIS_MODULE;
+ gc->can_sleep = shared_desc->can_sleep;
+
+ gc->request = gpio_shared_proxy_request;
+ gc->free = gpio_shared_proxy_free;
+ gc->set_config = gpio_shared_proxy_set_config;
+ gc->direction_input = gpio_shared_proxy_direction_input;
+ gc->direction_output = gpio_shared_proxy_direction_output;
+ if (gc->can_sleep) {
+ gc->set = gpio_shared_proxy_set_cansleep;
+ gc->get = gpio_shared_proxy_get_cansleep;
+ } else {
+ gc->set = gpio_shared_proxy_set;
+ gc->get = gpio_shared_proxy_get;
+ }
+ gc->get_direction = gpio_shared_proxy_get_direction;
+ gc->to_irq = gpio_shared_proxy_to_irq;
+
+ return devm_gpiochip_add_data(dev, &proxy->gc, proxy);
+}
+
+static const struct auxiliary_device_id gpio_shared_proxy_id_table[] = {
+ { .name = "gpiolib_shared.proxy" },
+ {},
+};
+MODULE_DEVICE_TABLE(auxiliary, gpio_shared_proxy_id_table);
+
+static struct auxiliary_driver gpio_shared_proxy_driver = {
+ .driver = {
+ .name = "gpio-shared-proxy",
+ .suppress_bind_attrs = true,
+ },
+ .probe = gpio_shared_proxy_probe,
+ .id_table = gpio_shared_proxy_id_table,
+};
+module_auxiliary_driver(gpio_shared_proxy_driver);
+
+MODULE_AUTHOR("Bartosz Golaszewski <bartosz.golaszewski@linaro.org>");
+MODULE_DESCRIPTION("Shared GPIO mux driver.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-sifive.c b/drivers/gpio/gpio-sifive.c
index 067c8edb62e2..94ef2efbd14f 100644
--- a/drivers/gpio/gpio-sifive.c
+++ b/drivers/gpio/gpio-sifive.c
@@ -7,6 +7,7 @@
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/property.h>
@@ -32,7 +33,7 @@
struct sifive_gpio {
void __iomem *base;
- struct gpio_chip gc;
+ struct gpio_generic_chip gen_gc;
struct regmap *regs;
unsigned long irq_state;
unsigned int trigger[SIFIVE_GPIO_MAX];
@@ -41,10 +42,10 @@ struct sifive_gpio {
static void sifive_gpio_set_ie(struct sifive_gpio *chip, unsigned int offset)
{
- unsigned long flags;
unsigned int trigger;
- raw_spin_lock_irqsave(&chip->gc.bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(&chip->gen_gc);
+
trigger = (chip->irq_state & BIT(offset)) ? chip->trigger[offset] : 0;
regmap_update_bits(chip->regs, SIFIVE_GPIO_RISE_IE, BIT(offset),
(trigger & IRQ_TYPE_EDGE_RISING) ? BIT(offset) : 0);
@@ -54,7 +55,6 @@ static void sifive_gpio_set_ie(struct sifive_gpio *chip, unsigned int offset)
(trigger & IRQ_TYPE_LEVEL_HIGH) ? BIT(offset) : 0);
regmap_update_bits(chip->regs, SIFIVE_GPIO_LOW_IE, BIT(offset),
(trigger & IRQ_TYPE_LEVEL_LOW) ? BIT(offset) : 0);
- raw_spin_unlock_irqrestore(&chip->gc.bgpio_lock, flags);
}
static int sifive_gpio_irq_set_type(struct irq_data *d, unsigned int trigger)
@@ -72,13 +72,12 @@ static int sifive_gpio_irq_set_type(struct irq_data *d, unsigned int trigger)
}
static void sifive_gpio_irq_enable(struct irq_data *d)
-{
+ {
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct sifive_gpio *chip = gpiochip_get_data(gc);
irq_hw_number_t hwirq = irqd_to_hwirq(d);
int offset = hwirq % SIFIVE_GPIO_MAX;
u32 bit = BIT(offset);
- unsigned long flags;
gpiochip_enable_irq(gc, hwirq);
irq_chip_enable_parent(d);
@@ -86,13 +85,13 @@ static void sifive_gpio_irq_enable(struct irq_data *d)
/* Switch to input */
gc->direction_input(gc, offset);
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
- /* Clear any sticky pending interrupts */
- regmap_write(chip->regs, SIFIVE_GPIO_RISE_IP, bit);
- regmap_write(chip->regs, SIFIVE_GPIO_FALL_IP, bit);
- regmap_write(chip->regs, SIFIVE_GPIO_HIGH_IP, bit);
- regmap_write(chip->regs, SIFIVE_GPIO_LOW_IP, bit);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+ scoped_guard(gpio_generic_lock_irqsave, &chip->gen_gc) {
+ /* Clear any sticky pending interrupts */
+ regmap_write(chip->regs, SIFIVE_GPIO_RISE_IP, bit);
+ regmap_write(chip->regs, SIFIVE_GPIO_FALL_IP, bit);
+ regmap_write(chip->regs, SIFIVE_GPIO_HIGH_IP, bit);
+ regmap_write(chip->regs, SIFIVE_GPIO_LOW_IP, bit);
+ }
/* Enable interrupts */
assign_bit(offset, &chip->irq_state, 1);
@@ -118,15 +117,14 @@ static void sifive_gpio_irq_eoi(struct irq_data *d)
struct sifive_gpio *chip = gpiochip_get_data(gc);
int offset = irqd_to_hwirq(d) % SIFIVE_GPIO_MAX;
u32 bit = BIT(offset);
- unsigned long flags;
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
- /* Clear all pending interrupts */
- regmap_write(chip->regs, SIFIVE_GPIO_RISE_IP, bit);
- regmap_write(chip->regs, SIFIVE_GPIO_FALL_IP, bit);
- regmap_write(chip->regs, SIFIVE_GPIO_HIGH_IP, bit);
- regmap_write(chip->regs, SIFIVE_GPIO_LOW_IP, bit);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+ scoped_guard(gpio_generic_lock_irqsave, &chip->gen_gc) {
+ /* Clear all pending interrupts */
+ regmap_write(chip->regs, SIFIVE_GPIO_RISE_IP, bit);
+ regmap_write(chip->regs, SIFIVE_GPIO_FALL_IP, bit);
+ regmap_write(chip->regs, SIFIVE_GPIO_HIGH_IP, bit);
+ regmap_write(chip->regs, SIFIVE_GPIO_LOW_IP, bit);
+ }
irq_chip_eoi_parent(d);
}
@@ -174,12 +172,12 @@ static const struct regmap_config sifive_gpio_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
- .fast_io = true,
.disable_locking = true,
};
static int sifive_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct device *dev = &pdev->dev;
struct irq_domain *parent;
struct gpio_irq_chip *girq;
@@ -218,13 +216,17 @@ static int sifive_gpio_probe(struct platform_device *pdev)
*/
parent = irq_get_irq_data(chip->irq_number[0])->domain;
- ret = bgpio_init(&chip->gc, dev, 4,
- chip->base + SIFIVE_GPIO_INPUT_VAL,
- chip->base + SIFIVE_GPIO_OUTPUT_VAL,
- NULL,
- chip->base + SIFIVE_GPIO_OUTPUT_EN,
- chip->base + SIFIVE_GPIO_INPUT_EN,
- BGPIOF_READ_OUTPUT_REG_SET);
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = chip->base + SIFIVE_GPIO_INPUT_VAL,
+ .set = chip->base + SIFIVE_GPIO_OUTPUT_VAL,
+ .dirout = chip->base + SIFIVE_GPIO_OUTPUT_EN,
+ .dirin = chip->base + SIFIVE_GPIO_INPUT_EN,
+ .flags = GPIO_GENERIC_READ_OUTPUT_REG_SET,
+ };
+
+ ret = gpio_generic_chip_init(&chip->gen_gc, &config);
if (ret) {
dev_err(dev, "unable to init generic GPIO\n");
return ret;
@@ -237,12 +239,12 @@ static int sifive_gpio_probe(struct platform_device *pdev)
regmap_write(chip->regs, SIFIVE_GPIO_LOW_IE, 0);
chip->irq_state = 0;
- chip->gc.base = -1;
- chip->gc.ngpio = ngpio;
- chip->gc.label = dev_name(dev);
- chip->gc.parent = dev;
- chip->gc.owner = THIS_MODULE;
- girq = &chip->gc.irq;
+ chip->gen_gc.gc.base = -1;
+ chip->gen_gc.gc.ngpio = ngpio;
+ chip->gen_gc.gc.label = dev_name(dev);
+ chip->gen_gc.gc.parent = dev;
+ chip->gen_gc.gc.owner = THIS_MODULE;
+ girq = &chip->gen_gc.gc.irq;
gpio_irq_chip_set_chip(girq, &sifive_gpio_irqchip);
girq->fwnode = dev_fwnode(dev);
girq->parent_domain = parent;
@@ -250,7 +252,7 @@ static int sifive_gpio_probe(struct platform_device *pdev)
girq->handler = handle_bad_irq;
girq->default_type = IRQ_TYPE_NONE;
- return gpiochip_add_data(&chip->gc, chip);
+ return gpiochip_add_data(&chip->gen_gc.gc, chip);
}
static const struct of_device_id sifive_gpio_match[] = {
diff --git a/drivers/gpio/gpio-sim.c b/drivers/gpio/gpio-sim.c
index f638219a7c4f..a83f5238427c 100644
--- a/drivers/gpio/gpio-sim.c
+++ b/drivers/gpio/gpio-sim.c
@@ -39,7 +39,7 @@
#include "dev-sync-probe.h"
#define GPIO_SIM_NGPIO_MAX 1024
-#define GPIO_SIM_PROP_MAX 4 /* Max 3 properties + sentinel. */
+#define GPIO_SIM_PROP_MAX 5 /* Max 4 properties + sentinel. */
#define GPIO_SIM_NUM_ATTRS 3 /* value, pull and sentinel */
static DEFINE_IDA(gpio_sim_ida);
@@ -262,8 +262,7 @@ static void gpio_sim_dbg_show(struct seq_file *seq, struct gpio_chip *gc)
guard(mutex)(&chip->lock);
for_each_hwgpio(gc, i, label)
- seq_printf(seq, " gpio-%-3d (%s) %s,%s\n",
- gc->base + i,
+ seq_printf(seq, " gpio-%-3d (%s) %s,%s\n", i,
label ?: "<unused>",
test_bit(i, chip->direction_map) ? "input" :
test_bit(i, chip->value_map) ? "output-high" :
@@ -486,9 +485,9 @@ static int gpio_sim_add_bank(struct fwnode_handle *swnode, struct device *dev)
gc->parent = dev;
gc->fwnode = swnode;
gc->get = gpio_sim_get;
- gc->set_rv = gpio_sim_set;
+ gc->set = gpio_sim_set;
gc->get_multiple = gpio_sim_get_multiple;
- gc->set_multiple_rv = gpio_sim_set_multiple;
+ gc->set_multiple = gpio_sim_set_multiple;
gc->direction_output = gpio_sim_direction_output;
gc->direction_input = gpio_sim_direction_input;
gc->get_direction = gpio_sim_get_direction;
@@ -629,6 +628,7 @@ struct gpio_sim_line {
unsigned int offset;
char *name;
+ bool valid;
/* There can only be one hog per line. */
struct gpio_sim_hog *hog;
@@ -744,6 +744,36 @@ gpio_sim_set_line_names(struct gpio_sim_bank *bank, char **line_names)
}
}
+static unsigned int gpio_sim_get_reserved_ranges_size(struct gpio_sim_bank *bank)
+{
+ struct gpio_sim_line *line;
+ unsigned int size = 0;
+
+ list_for_each_entry(line, &bank->line_list, siblings) {
+ if (line->valid)
+ continue;
+
+ size += 2;
+ }
+
+ return size;
+}
+
+static void gpio_sim_set_reserved_ranges(struct gpio_sim_bank *bank,
+ u32 *ranges)
+{
+ struct gpio_sim_line *line;
+ int i = 0;
+
+ list_for_each_entry(line, &bank->line_list, siblings) {
+ if (line->valid)
+ continue;
+
+ ranges[i++] = line->offset;
+ ranges[i++] = 1;
+ }
+}
+
static void gpio_sim_remove_hogs(struct gpio_sim_device *dev)
{
struct gpiod_hog *hog;
@@ -844,9 +874,10 @@ static struct fwnode_handle *
gpio_sim_make_bank_swnode(struct gpio_sim_bank *bank,
struct fwnode_handle *parent)
{
+ unsigned int prop_idx = 0, line_names_size, ranges_size;
struct property_entry properties[GPIO_SIM_PROP_MAX];
- unsigned int prop_idx = 0, line_names_size;
char **line_names __free(kfree) = NULL;
+ u32 *ranges __free(kfree) = NULL;
memset(properties, 0, sizeof(properties));
@@ -870,6 +901,19 @@ gpio_sim_make_bank_swnode(struct gpio_sim_bank *bank,
line_names, line_names_size);
}
+ ranges_size = gpio_sim_get_reserved_ranges_size(bank);
+ if (ranges_size) {
+ ranges = kcalloc(ranges_size, sizeof(u32), GFP_KERNEL);
+ if (!ranges)
+ return ERR_PTR(-ENOMEM);
+
+ gpio_sim_set_reserved_ranges(bank, ranges);
+
+ properties[prop_idx++] = PROPERTY_ENTRY_U32_ARRAY_LEN(
+ "gpio-reserved-ranges",
+ ranges, ranges_size);
+ }
+
return fwnode_create_software_node(properties, parent);
}
@@ -1189,8 +1233,41 @@ static ssize_t gpio_sim_line_config_name_store(struct config_item *item,
CONFIGFS_ATTR(gpio_sim_line_config_, name);
+static ssize_t
+gpio_sim_line_config_valid_show(struct config_item *item, char *page)
+{
+ struct gpio_sim_line *line = to_gpio_sim_line(item);
+ struct gpio_sim_device *dev = gpio_sim_line_get_device(line);
+
+ guard(mutex)(&dev->lock);
+
+ return sprintf(page, "%c\n", line->valid ? '1' : '0');
+}
+
+static ssize_t gpio_sim_line_config_valid_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct gpio_sim_line *line = to_gpio_sim_line(item);
+ struct gpio_sim_device *dev = gpio_sim_line_get_device(line);
+ bool valid;
+ int ret;
+
+ ret = kstrtobool(page, &valid);
+ if (ret)
+ return ret;
+
+ guard(mutex)(&dev->lock);
+
+ line->valid = valid;
+
+ return count;
+}
+
+CONFIGFS_ATTR(gpio_sim_line_config_, valid);
+
static struct configfs_attribute *gpio_sim_line_config_attrs[] = {
&gpio_sim_line_config_attr_name,
+ &gpio_sim_line_config_attr_valid,
NULL
};
@@ -1399,6 +1476,7 @@ gpio_sim_bank_config_make_line_group(struct config_group *group,
line->parent = bank;
line->offset = offset;
+ line->valid = true;
list_add_tail(&line->siblings, &bank->line_list);
return &line->group;
diff --git a/drivers/gpio/gpio-siox.c b/drivers/gpio/gpio-siox.c
index 051bc99bdfb2..958034b9f3f3 100644
--- a/drivers/gpio/gpio-siox.c
+++ b/drivers/gpio/gpio-siox.c
@@ -160,8 +160,8 @@ static int gpio_siox_get(struct gpio_chip *chip, unsigned int offset)
return ret;
}
-static void gpio_siox_set(struct gpio_chip *chip,
- unsigned int offset, int value)
+static int gpio_siox_set(struct gpio_chip *chip,
+ unsigned int offset, int value)
{
struct gpio_siox_ddata *ddata = gpiochip_get_data(chip);
u8 mask = 1 << (19 - offset);
@@ -174,6 +174,8 @@ static void gpio_siox_set(struct gpio_chip *chip,
ddata->setdata[0] &= ~mask;
mutex_unlock(&ddata->lock);
+
+ return 0;
}
static int gpio_siox_direction_input(struct gpio_chip *chip,
@@ -191,8 +193,7 @@ static int gpio_siox_direction_output(struct gpio_chip *chip,
if (offset < 12)
return -EINVAL;
- gpio_siox_set(chip, offset, value);
- return 0;
+ return gpio_siox_set(chip, offset, value);
}
static int gpio_siox_get_direction(struct gpio_chip *chip, unsigned int offset)
diff --git a/drivers/gpio/gpio-sloppy-logic-analyzer.c b/drivers/gpio/gpio-sloppy-logic-analyzer.c
index 8cf3b171c599..969dddd3d6fa 100644
--- a/drivers/gpio/gpio-sloppy-logic-analyzer.c
+++ b/drivers/gpio/gpio-sloppy-logic-analyzer.c
@@ -306,7 +306,7 @@ static void gpio_la_poll_remove(struct platform_device *pdev)
}
static const struct of_device_id gpio_la_poll_of_match[] = {
- { .compatible = GPIO_LA_NAME },
+ { .compatible = "gpio-sloppy-logic-analyzer" },
{ }
};
MODULE_DEVICE_TABLE(of, gpio_la_poll_of_match);
diff --git a/drivers/gpio/gpio-sodaville.c b/drivers/gpio/gpio-sodaville.c
index 6a3c4c625138..37c133837729 100644
--- a/drivers/gpio/gpio-sodaville.c
+++ b/drivers/gpio/gpio-sodaville.c
@@ -9,6 +9,7 @@
#include <linux/errno.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -39,7 +40,7 @@ struct sdv_gpio_chip_data {
void __iomem *gpio_pub_base;
struct irq_domain *id;
struct irq_chip_generic *gc;
- struct gpio_chip chip;
+ struct gpio_generic_chip gen_gc;
};
static int sdv_gpio_pub_set_type(struct irq_data *d, unsigned int type)
@@ -169,8 +170,8 @@ static int sdv_register_irqsupport(struct sdv_gpio_chip_data *sd,
IRQ_GC_INIT_MASK_CACHE, IRQ_NOREQUEST,
IRQ_LEVEL | IRQ_NOPROBE);
- sd->id = irq_domain_create_legacy(of_fwnode_handle(pdev->dev.of_node), SDV_NUM_PUB_GPIOS,
- sd->irq_base, 0, &irq_domain_sdv_ops, sd);
+ sd->id = irq_domain_create_legacy(dev_fwnode(&pdev->dev), SDV_NUM_PUB_GPIOS, sd->irq_base,
+ 0, &irq_domain_sdv_ops, sd);
if (!sd->id)
return -ENODEV;
@@ -180,6 +181,7 @@ static int sdv_register_irqsupport(struct sdv_gpio_chip_data *sd,
static int sdv_gpio_probe(struct pci_dev *pdev,
const struct pci_device_id *pci_id)
{
+ struct gpio_generic_chip_config config;
struct sdv_gpio_chip_data *sd;
int ret;
u32 mux_val;
@@ -206,15 +208,21 @@ static int sdv_gpio_probe(struct pci_dev *pdev,
if (!ret)
writel(mux_val, sd->gpio_pub_base + GPMUXCTL);
- ret = bgpio_init(&sd->chip, &pdev->dev, 4,
- sd->gpio_pub_base + GPINR, sd->gpio_pub_base + GPOUTR,
- NULL, sd->gpio_pub_base + GPOER, NULL, 0);
+ config = (struct gpio_generic_chip_config) {
+ .dev = &pdev->dev,
+ .sz = 4,
+ .dat = sd->gpio_pub_base + GPINR,
+ .set = sd->gpio_pub_base + GPOUTR,
+ .dirout = sd->gpio_pub_base + GPOER,
+ };
+
+ ret = gpio_generic_chip_init(&sd->gen_gc, &config);
if (ret)
return ret;
- sd->chip.ngpio = SDV_NUM_PUB_GPIOS;
+ sd->gen_gc.gc.ngpio = SDV_NUM_PUB_GPIOS;
- ret = devm_gpiochip_add_data(&pdev->dev, &sd->chip, sd);
+ ret = devm_gpiochip_add_data(&pdev->dev, &sd->gen_gc.gc, sd);
if (ret < 0) {
dev_err(&pdev->dev, "gpiochip_add() failed.\n");
return ret;
diff --git a/drivers/gpio/gpio-spacemit-k1.c b/drivers/gpio/gpio-spacemit-k1.c
index f027066365ff..eb66a15c002f 100644
--- a/drivers/gpio/gpio-spacemit-k1.c
+++ b/drivers/gpio/gpio-spacemit-k1.c
@@ -6,6 +6,7 @@
#include <linux/clk.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -38,7 +39,7 @@
struct spacemit_gpio;
struct spacemit_gpio_bank {
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
struct spacemit_gpio *sg;
void __iomem *base;
u32 irq_mask;
@@ -72,7 +73,7 @@ static irqreturn_t spacemit_gpio_irq_handler(int irq, void *dev_id)
return IRQ_NONE;
for_each_set_bit(n, &pending, BITS_PER_LONG)
- handle_nested_irq(irq_find_mapping(gb->gc.irq.domain, n));
+ handle_nested_irq(irq_find_mapping(gb->chip.gc.irq.domain, n));
return IRQ_HANDLED;
}
@@ -143,7 +144,7 @@ static void spacemit_gpio_irq_print_chip(struct irq_data *data, struct seq_file
{
struct spacemit_gpio_bank *gb = irq_data_get_irq_chip_data(data);
- seq_printf(p, "%s-%d", dev_name(gb->gc.parent), spacemit_gpio_bank_index(gb));
+ seq_printf(p, "%s-%d", dev_name(gb->chip.gc.parent), spacemit_gpio_bank_index(gb));
}
static struct irq_chip spacemit_gpio_chip = {
@@ -165,7 +166,7 @@ static bool spacemit_of_node_instance_match(struct gpio_chip *gc, unsigned int i
if (i >= SPACEMIT_NR_BANKS)
return false;
- return (gc == &sg->sgb[i].gc);
+ return (gc == &sg->sgb[i].chip.gc);
}
static int spacemit_gpio_add_bank(struct spacemit_gpio *sg,
@@ -173,7 +174,8 @@ static int spacemit_gpio_add_bank(struct spacemit_gpio *sg,
int index, int irq)
{
struct spacemit_gpio_bank *gb = &sg->sgb[index];
- struct gpio_chip *gc = &gb->gc;
+ struct gpio_generic_chip_config config;
+ struct gpio_chip *gc = &gb->chip.gc;
struct device *dev = sg->dev;
struct gpio_irq_chip *girq;
void __iomem *dat, *set, *clr, *dirin, *dirout;
@@ -187,9 +189,20 @@ static int spacemit_gpio_add_bank(struct spacemit_gpio *sg,
dirin = gb->base + SPACEMIT_GCDR;
dirout = gb->base + SPACEMIT_GSDR;
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = dat,
+ .set = set,
+ .clr = clr,
+ .dirout = dirout,
+ .dirin = dirin,
+ .flags = GPIO_GENERIC_UNREADABLE_REG_SET |
+ GPIO_GENERIC_UNREADABLE_REG_DIR,
+ };
+
/* This registers 32 GPIO lines per bank */
- ret = bgpio_init(gc, dev, 4, dat, set, clr, dirout, dirin,
- BGPIOF_UNREADABLE_REG_SET | BGPIOF_UNREADABLE_REG_DIR);
+ ret = gpio_generic_chip_init(&gb->chip, &config);
if (ret)
return dev_err_probe(dev, ret, "failed to init gpio chip\n");
@@ -221,7 +234,7 @@ static int spacemit_gpio_add_bank(struct spacemit_gpio *sg,
ret = devm_request_threaded_irq(dev, irq, NULL,
spacemit_gpio_irq_handler,
IRQF_ONESHOT | IRQF_SHARED,
- gb->gc.label, gb);
+ gb->chip.gc.label, gb);
if (ret < 0)
return dev_err_probe(dev, ret, "failed to register IRQ\n");
@@ -278,6 +291,7 @@ static const struct of_device_id spacemit_gpio_dt_ids[] = {
{ .compatible = "spacemit,k1-gpio" },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, spacemit_gpio_dt_ids);
static struct platform_driver spacemit_gpio_driver = {
.probe = spacemit_gpio_probe,
diff --git a/drivers/gpio/gpio-spear-spics.c b/drivers/gpio/gpio-spear-spics.c
index 51539185400d..96a0e1211500 100644
--- a/drivers/gpio/gpio-spear-spics.c
+++ b/drivers/gpio/gpio-spear-spics.c
@@ -51,13 +51,8 @@ struct spear_spics {
struct gpio_chip chip;
};
-/* gpio framework specific routines */
-static int spics_get_value(struct gpio_chip *chip, unsigned offset)
-{
- return -ENXIO;
-}
-
-static void spics_set_value(struct gpio_chip *chip, unsigned offset, int value)
+static int spics_set_value(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct spear_spics *spics = gpiochip_get_data(chip);
u32 tmp;
@@ -74,18 +69,14 @@ static void spics_set_value(struct gpio_chip *chip, unsigned offset, int value)
tmp &= ~(0x1 << spics->cs_value_bit);
tmp |= value << spics->cs_value_bit;
writel_relaxed(tmp, spics->base + spics->perip_cfg);
-}
-static int spics_direction_input(struct gpio_chip *chip, unsigned offset)
-{
- return -ENXIO;
+ return 0;
}
static int spics_direction_output(struct gpio_chip *chip, unsigned offset,
int value)
{
- spics_set_value(chip, offset, value);
- return 0;
+ return spics_set_value(chip, offset, value);
}
static int spics_request(struct gpio_chip *chip, unsigned offset)
@@ -148,9 +139,7 @@ static int spics_gpio_probe(struct platform_device *pdev)
spics->chip.base = -1;
spics->chip.request = spics_request;
spics->chip.free = spics_free;
- spics->chip.direction_input = spics_direction_input;
spics->chip.direction_output = spics_direction_output;
- spics->chip.get = spics_get_value;
spics->chip.set = spics_set_value;
spics->chip.label = dev_name(&pdev->dev);
spics->chip.parent = &pdev->dev;
diff --git a/drivers/gpio/gpio-sprd.c b/drivers/gpio/gpio-sprd.c
index c117c11bfb29..413bcd0a4240 100644
--- a/drivers/gpio/gpio-sprd.c
+++ b/drivers/gpio/gpio-sprd.c
@@ -108,10 +108,12 @@ static int sprd_gpio_get(struct gpio_chip *chip, unsigned int offset)
return sprd_gpio_read(chip, offset, SPRD_GPIO_DATA);
}
-static void sprd_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int value)
+static int sprd_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
sprd_gpio_update(chip, offset, SPRD_GPIO_DATA, value);
+
+ return 0;
}
static void sprd_gpio_irq_mask(struct irq_data *data)
diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c
index dce8ff322e47..6faf30347a36 100644
--- a/drivers/gpio/gpio-stmpe.c
+++ b/drivers/gpio/gpio-stmpe.c
@@ -54,7 +54,7 @@ static int stmpe_gpio_get(struct gpio_chip *chip, unsigned offset)
return !!(ret & mask);
}
-static void stmpe_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
+static int stmpe_gpio_set(struct gpio_chip *chip, unsigned int offset, int val)
{
struct stmpe_gpio *stmpe_gpio = gpiochip_get_data(chip);
struct stmpe *stmpe = stmpe_gpio->stmpe;
@@ -67,9 +67,9 @@ static void stmpe_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
* For them we need to write 0 to clear and 1 to set.
*/
if (stmpe->regs[STMPE_IDX_GPSR_LSB] == stmpe->regs[STMPE_IDX_GPCR_LSB])
- stmpe_set_bits(stmpe, reg, mask, val ? mask : 0);
- else
- stmpe_reg_write(stmpe, reg, mask);
+ return stmpe_set_bits(stmpe, reg, mask, val ? mask : 0);
+
+ return stmpe_reg_write(stmpe, reg, mask);
}
static int stmpe_gpio_get_direction(struct gpio_chip *chip,
@@ -98,8 +98,11 @@ static int stmpe_gpio_direction_output(struct gpio_chip *chip,
struct stmpe *stmpe = stmpe_gpio->stmpe;
u8 reg = stmpe->regs[STMPE_IDX_GPDR_LSB + (offset / 8)];
u8 mask = BIT(offset % 8);
+ int ret;
- stmpe_gpio_set(chip, offset, val);
+ ret = stmpe_gpio_set(chip, offset, val);
+ if (ret)
+ return ret;
return stmpe_set_bits(stmpe, reg, mask, mask);
}
@@ -259,9 +262,8 @@ static void stmpe_gpio_irq_unmask(struct irq_data *d)
stmpe_gpio->regs[REG_IE][regoffset] |= mask;
}
-static void stmpe_dbg_show_one(struct seq_file *s,
- struct gpio_chip *gc,
- unsigned offset, unsigned gpio)
+static void stmpe_dbg_show_one(struct seq_file *s, struct gpio_chip *gc,
+ unsigned int offset)
{
struct stmpe_gpio *stmpe_gpio = gpiochip_get_data(gc);
struct stmpe *stmpe = stmpe_gpio->stmpe;
@@ -283,7 +285,7 @@ static void stmpe_dbg_show_one(struct seq_file *s,
if (dir) {
seq_printf(s, " gpio-%-3d (%-20.20s) out %s",
- gpio, label ?: "(none)", str_hi_lo(val));
+ offset, label ?: "(none)", str_hi_lo(val));
} else {
u8 edge_det_reg;
u8 rise_reg;
@@ -351,7 +353,7 @@ static void stmpe_dbg_show_one(struct seq_file *s,
irqen = !!(ret & mask);
seq_printf(s, " gpio-%-3d (%-20.20s) in %s %13s %13s %25s %25s",
- gpio, label ?: "(none)",
+ offset, label ?: "(none)",
str_hi_lo(val),
edge_det_values[edge_det],
irqen ? "IRQ-enabled" : "IRQ-disabled",
@@ -363,10 +365,9 @@ static void stmpe_dbg_show_one(struct seq_file *s,
static void stmpe_dbg_show(struct seq_file *s, struct gpio_chip *gc)
{
unsigned i;
- unsigned gpio = gc->base;
- for (i = 0; i < gc->ngpio; i++, gpio++) {
- stmpe_dbg_show_one(s, gc, i, gpio);
+ for (i = 0; i < gc->ngpio; i++) {
+ stmpe_dbg_show_one(s, gc, i);
seq_putc(s, '\n');
}
}
@@ -531,10 +532,16 @@ static int stmpe_gpio_probe(struct platform_device *pdev)
return devm_gpiochip_add_data(dev, &stmpe_gpio->chip, stmpe_gpio);
}
+static const struct of_device_id stmpe_gpio_of_matches[] = {
+ { .compatible = "st,stmpe-gpio", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, stmpe_gpio_of_matches);
+
static struct platform_driver stmpe_gpio_driver = {
.driver = {
- .suppress_bind_attrs = true,
- .name = "stmpe-gpio",
+ .name = "stmpe-gpio",
+ .of_match_table = stmpe_gpio_of_matches,
},
.probe = stmpe_gpio_probe,
};
@@ -544,3 +551,13 @@ static int __init stmpe_gpio_init(void)
return platform_driver_register(&stmpe_gpio_driver);
}
subsys_initcall(stmpe_gpio_init);
+
+static void __exit stmpe_gpio_exit(void)
+{
+ platform_driver_unregister(&stmpe_gpio_driver);
+}
+module_exit(stmpe_gpio_exit);
+
+MODULE_DESCRIPTION("STMPE expander GPIO");
+MODULE_AUTHOR("Rabin Vincent <rabin.vincent@stericsson.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-stp-xway.c b/drivers/gpio/gpio-stp-xway.c
index 5a6406d1f03a..493c027afdd6 100644
--- a/drivers/gpio/gpio-stp-xway.c
+++ b/drivers/gpio/gpio-stp-xway.c
@@ -113,7 +113,7 @@ static int xway_stp_get(struct gpio_chip *gc, unsigned int gpio)
*
* Set the shadow value and call ltq_ebu_apply.
*/
-static void xway_stp_set(struct gpio_chip *gc, unsigned gpio, int val)
+static int xway_stp_set(struct gpio_chip *gc, unsigned int gpio, int val)
{
struct xway_stp *chip = gpiochip_get_data(gc);
@@ -124,6 +124,8 @@ static void xway_stp_set(struct gpio_chip *gc, unsigned gpio, int val)
xway_stp_w32(chip->virt, chip->shadow, XWAY_STP_CPU0);
if (!chip->reserved)
xway_stp_w32_mask(chip->virt, 0, XWAY_STP_CON_SWU, XWAY_STP_CON0);
+
+ return 0;
}
/**
@@ -136,9 +138,7 @@ static void xway_stp_set(struct gpio_chip *gc, unsigned gpio, int val)
*/
static int xway_stp_dir_out(struct gpio_chip *gc, unsigned gpio, int val)
{
- xway_stp_set(gc, gpio, val);
-
- return 0;
+ return xway_stp_set(gc, gpio, val);
}
/**
diff --git a/drivers/gpio/gpio-syscon.c b/drivers/gpio/gpio-syscon.c
index 5ab394ec81e6..40064d4cf47f 100644
--- a/drivers/gpio/gpio-syscon.c
+++ b/drivers/gpio/gpio-syscon.c
@@ -40,8 +40,8 @@ struct syscon_gpio_data {
unsigned int bit_count;
unsigned int dat_bit_offset;
unsigned int dir_bit_offset;
- void (*set)(struct gpio_chip *chip,
- unsigned offset, int value);
+ int (*set)(struct gpio_chip *chip, unsigned int offset,
+ int value);
};
struct syscon_gpio_priv {
@@ -68,17 +68,17 @@ static int syscon_gpio_get(struct gpio_chip *chip, unsigned offset)
return !!(val & BIT(offs % SYSCON_REG_BITS));
}
-static void syscon_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
+static int syscon_gpio_set(struct gpio_chip *chip, unsigned int offset, int val)
{
struct syscon_gpio_priv *priv = gpiochip_get_data(chip);
unsigned int offs;
offs = priv->dreg_offset + priv->data->dat_bit_offset + offset;
- regmap_update_bits(priv->syscon,
- (offs / SYSCON_REG_BITS) * SYSCON_REG_SIZE,
- BIT(offs % SYSCON_REG_BITS),
- val ? BIT(offs % SYSCON_REG_BITS) : 0);
+ return regmap_update_bits(priv->syscon,
+ (offs / SYSCON_REG_BITS) * SYSCON_REG_SIZE,
+ BIT(offs % SYSCON_REG_BITS),
+ val ? BIT(offs % SYSCON_REG_BITS) : 0);
}
static int syscon_gpio_dir_in(struct gpio_chip *chip, unsigned offset)
@@ -115,9 +115,7 @@ static int syscon_gpio_dir_out(struct gpio_chip *chip, unsigned offset, int val)
BIT(offs % SYSCON_REG_BITS));
}
- chip->set(chip, offset, val);
-
- return 0;
+ return chip->set(chip, offset, val);
}
static const struct syscon_gpio_data clps711x_mctrl_gpio = {
@@ -127,8 +125,8 @@ static const struct syscon_gpio_data clps711x_mctrl_gpio = {
.dat_bit_offset = 0x40 * 8 + 8,
};
-static void rockchip_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int val)
+static int rockchip_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int val)
{
struct syscon_gpio_priv *priv = gpiochip_get_data(chip);
unsigned int offs;
@@ -144,6 +142,8 @@ static void rockchip_gpio_set(struct gpio_chip *chip, unsigned int offset,
data);
if (ret < 0)
dev_err(chip->parent, "gpio write failed ret(%d)\n", ret);
+
+ return ret;
}
static const struct syscon_gpio_data rockchip_rk3328_gpio_mute = {
@@ -156,7 +156,8 @@ static const struct syscon_gpio_data rockchip_rk3328_gpio_mute = {
#define KEYSTONE_LOCK_BIT BIT(0)
-static void keystone_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
+static int keystone_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int val)
{
struct syscon_gpio_priv *priv = gpiochip_get_data(chip);
unsigned int offs;
@@ -165,7 +166,7 @@ static void keystone_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
offs = priv->dreg_offset + priv->data->dat_bit_offset + offset;
if (!val)
- return;
+ return 0;
ret = regmap_update_bits(
priv->syscon,
@@ -174,6 +175,8 @@ static void keystone_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
BIT(offs % SYSCON_REG_BITS) | KEYSTONE_LOCK_BIT);
if (ret < 0)
dev_err(chip->parent, "gpio write failed ret(%d)\n", ret);
+
+ return ret;
}
static const struct syscon_gpio_data keystone_dsp_gpio = {
diff --git a/drivers/gpio/gpio-tangier.c b/drivers/gpio/gpio-tangier.c
index a415e6d36173..ba5a8ede8912 100644
--- a/drivers/gpio/gpio-tangier.c
+++ b/drivers/gpio/gpio-tangier.c
@@ -90,7 +90,7 @@ static int tng_gpio_get(struct gpio_chip *chip, unsigned int offset)
return !!(readl(gplr) & BIT(shift));
}
-static void tng_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
+static int tng_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct tng_gpio *priv = gpiochip_get_data(chip);
void __iomem *reg;
@@ -101,6 +101,8 @@ static void tng_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
guard(raw_spinlock_irqsave)(&priv->lock);
writel(BIT(shift), reg);
+
+ return 0;
}
static int tng_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
diff --git a/drivers/gpio/gpio-tb10x.c b/drivers/gpio/gpio-tb10x.c
index 8cf676fd0a0b..3c8fd322a713 100644
--- a/drivers/gpio/gpio-tb10x.c
+++ b/drivers/gpio/gpio-tb10x.c
@@ -7,20 +7,20 @@
* Christian Ruppert <christian.ruppert@abilis.com>
*/
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
+#include <linux/bitops.h>
#include <linux/gpio/driver.h>
-#include <linux/slab.h>
-#include <linux/irq.h>
-#include <linux/irqdomain.h>
+#include <linux/gpio/generic.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
-#include <linux/spinlock.h>
-#include <linux/bitops.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
#define TB10X_GPIO_DIR_IN (0x00000000)
#define TB10X_GPIO_DIR_OUT (0x00000001)
@@ -36,13 +36,13 @@
* @base: register base address
* @domain: IRQ domain of GPIO generated interrupts managed by this controller
* @irq: Interrupt line of parent interrupt controller
- * @gc: gpio_chip structure associated to this GPIO controller
+ * @chip: Generic GPIO chip structure associated with this GPIO controller
*/
struct tb10x_gpio {
void __iomem *base;
struct irq_domain *domain;
int irq;
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
};
static inline u32 tb10x_reg_read(struct tb10x_gpio *gpio, unsigned int offs)
@@ -50,28 +50,6 @@ static inline u32 tb10x_reg_read(struct tb10x_gpio *gpio, unsigned int offs)
return ioread32(gpio->base + offs);
}
-static inline void tb10x_reg_write(struct tb10x_gpio *gpio, unsigned int offs,
- u32 val)
-{
- iowrite32(val, gpio->base + offs);
-}
-
-static inline void tb10x_set_bits(struct tb10x_gpio *gpio, unsigned int offs,
- u32 mask, u32 val)
-{
- u32 r;
- unsigned long flags;
-
- raw_spin_lock_irqsave(&gpio->gc.bgpio_lock, flags);
-
- r = tb10x_reg_read(gpio, offs);
- r = (r & ~mask) | (val & mask);
-
- tb10x_reg_write(gpio, offs, r);
-
- raw_spin_unlock_irqrestore(&gpio->gc.bgpio_lock, flags);
-}
-
static int tb10x_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
{
struct tb10x_gpio *tb10x_gpio = gpiochip_get_data(chip);
@@ -107,6 +85,7 @@ static irqreturn_t tb10x_gpio_irq_cascade(int irq, void *data)
static int tb10x_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct tb10x_gpio *tb10x_gpio;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
@@ -127,9 +106,9 @@ static int tb10x_gpio_probe(struct platform_device *pdev)
if (IS_ERR(tb10x_gpio->base))
return PTR_ERR(tb10x_gpio->base);
- tb10x_gpio->gc.label =
+ tb10x_gpio->chip.gc.label =
devm_kasprintf(dev, GFP_KERNEL, "%pOF", pdev->dev.of_node);
- if (!tb10x_gpio->gc.label)
+ if (!tb10x_gpio->chip.gc.label)
return -ENOMEM;
/*
@@ -137,29 +116,30 @@ static int tb10x_gpio_probe(struct platform_device *pdev)
* the lines, no special set or clear registers and a data direction register
* wher 1 means "output".
*/
- ret = bgpio_init(&tb10x_gpio->gc, dev, 4,
- tb10x_gpio->base + OFFSET_TO_REG_DATA,
- NULL,
- NULL,
- tb10x_gpio->base + OFFSET_TO_REG_DDR,
- NULL,
- 0);
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = tb10x_gpio->base + OFFSET_TO_REG_DATA,
+ .dirout = tb10x_gpio->base + OFFSET_TO_REG_DDR,
+ };
+
+ ret = gpio_generic_chip_init(&tb10x_gpio->chip, &config);
if (ret) {
dev_err(dev, "unable to init generic GPIO\n");
return ret;
}
- tb10x_gpio->gc.base = -1;
- tb10x_gpio->gc.parent = dev;
- tb10x_gpio->gc.owner = THIS_MODULE;
+ tb10x_gpio->chip.gc.base = -1;
+ tb10x_gpio->chip.gc.parent = dev;
+ tb10x_gpio->chip.gc.owner = THIS_MODULE;
/*
- * ngpio is set by bgpio_init() but we override it, this .request()
- * callback also overrides the one set up by generic GPIO.
+ * ngpio is set by gpio_generic_chip_init() but we override it, this
+ * .request() callback also overrides the one set up by generic GPIO.
*/
- tb10x_gpio->gc.ngpio = ngpio;
- tb10x_gpio->gc.request = gpiochip_generic_request;
- tb10x_gpio->gc.free = gpiochip_generic_free;
+ tb10x_gpio->chip.gc.ngpio = ngpio;
+ tb10x_gpio->chip.gc.request = gpiochip_generic_request;
+ tb10x_gpio->chip.gc.free = gpiochip_generic_free;
- ret = devm_gpiochip_add_data(dev, &tb10x_gpio->gc, tb10x_gpio);
+ ret = devm_gpiochip_add_data(dev, &tb10x_gpio->chip.gc, tb10x_gpio);
if (ret < 0) {
dev_err(dev, "Could not add gpiochip.\n");
return ret;
@@ -174,7 +154,7 @@ static int tb10x_gpio_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
- tb10x_gpio->gc.to_irq = tb10x_gpio_to_irq;
+ tb10x_gpio->chip.gc.to_irq = tb10x_gpio_to_irq;
tb10x_gpio->irq = ret;
ret = devm_request_irq(dev, ret, tb10x_gpio_irq_cascade,
@@ -183,15 +163,15 @@ static int tb10x_gpio_probe(struct platform_device *pdev)
if (ret != 0)
return ret;
- tb10x_gpio->domain = irq_domain_create_linear(of_fwnode_handle(np),
- tb10x_gpio->gc.ngpio,
- &irq_generic_chip_ops, NULL);
+ tb10x_gpio->domain = irq_domain_create_linear(dev_fwnode(dev),
+ tb10x_gpio->chip.gc.ngpio,
+ &irq_generic_chip_ops, NULL);
if (!tb10x_gpio->domain) {
return -ENOMEM;
}
ret = irq_alloc_domain_generic_chips(tb10x_gpio->domain,
- tb10x_gpio->gc.ngpio, 1, tb10x_gpio->gc.label,
+ tb10x_gpio->chip.gc.ngpio, 1, tb10x_gpio->chip.gc.label,
handle_edge_irq, IRQ_NOREQUEST, IRQ_NOPROBE,
IRQ_GC_INIT_MASK_CACHE);
if (ret)
@@ -219,9 +199,9 @@ static void tb10x_gpio_remove(struct platform_device *pdev)
{
struct tb10x_gpio *tb10x_gpio = platform_get_drvdata(pdev);
- if (tb10x_gpio->gc.to_irq) {
+ if (tb10x_gpio->chip.gc.to_irq) {
irq_remove_generic_chip(tb10x_gpio->domain->gc->gc[0],
- BIT(tb10x_gpio->gc.ngpio) - 1, 0, 0);
+ BIT(tb10x_gpio->chip.gc.ngpio) - 1, 0, 0);
kfree(tb10x_gpio->domain->gc);
irq_domain_remove(tb10x_gpio->domain);
}
diff --git a/drivers/gpio/gpio-tc3589x.c b/drivers/gpio/gpio-tc3589x.c
index e62ee7e56908..90d048f9da08 100644
--- a/drivers/gpio/gpio-tc3589x.c
+++ b/drivers/gpio/gpio-tc3589x.c
@@ -49,7 +49,7 @@ static int tc3589x_gpio_get(struct gpio_chip *chip, unsigned int offset)
return !!(ret & mask);
}
-static void tc3589x_gpio_set(struct gpio_chip *chip, unsigned int offset, int val)
+static int tc3589x_gpio_set(struct gpio_chip *chip, unsigned int offset, int val)
{
struct tc3589x_gpio *tc3589x_gpio = gpiochip_get_data(chip);
struct tc3589x *tc3589x = tc3589x_gpio->tc3589x;
@@ -57,7 +57,7 @@ static void tc3589x_gpio_set(struct gpio_chip *chip, unsigned int offset, int va
unsigned int pos = offset % 8;
u8 data[] = {val ? BIT(pos) : 0, BIT(pos)};
- tc3589x_block_write(tc3589x, reg, ARRAY_SIZE(data), data);
+ return tc3589x_block_write(tc3589x, reg, ARRAY_SIZE(data), data);
}
static int tc3589x_gpio_direction_output(struct gpio_chip *chip,
@@ -67,8 +67,11 @@ static int tc3589x_gpio_direction_output(struct gpio_chip *chip,
struct tc3589x *tc3589x = tc3589x_gpio->tc3589x;
u8 reg = TC3589x_GPIODIR0 + offset / 8;
unsigned int pos = offset % 8;
+ int ret;
- tc3589x_gpio_set(chip, offset, val);
+ ret = tc3589x_gpio_set(chip, offset, val);
+ if (ret)
+ return ret;
return tc3589x_set_bits(tc3589x, reg, BIT(pos), BIT(pos));
}
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index 9ad286adf263..15a5762a82c2 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -146,12 +146,14 @@ static void tegra_gpio_free(struct gpio_chip *chip, unsigned int offset)
tegra_gpio_disable(tgi, offset);
}
-static void tegra_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int value)
+static int tegra_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct tegra_gpio_info *tgi = gpiochip_get_data(chip);
tegra_gpio_mask_write(tgi, GPIO_MSK_OUT(tgi, offset), offset, value);
+
+ return 0;
}
static int tegra_gpio_get(struct gpio_chip *chip, unsigned int offset)
diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c
index d27bfac6c9f5..b1498b59a921 100644
--- a/drivers/gpio/gpio-tegra186.c
+++ b/drivers/gpio/gpio-tegra186.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2016-2022 NVIDIA Corporation
+ * Copyright (c) 2016-2025 NVIDIA Corporation
*
* Author: Thierry Reding <treding@nvidia.com>
* Dipen Patel <dpatel@nvidia.com>
@@ -20,6 +20,7 @@
#include <dt-bindings/gpio/tegra194-gpio.h>
#include <dt-bindings/gpio/tegra234-gpio.h>
#include <dt-bindings/gpio/tegra241-gpio.h>
+#include <dt-bindings/gpio/tegra256-gpio.h>
/* security registers */
#define TEGRA186_GPIO_CTL_SCR 0x0c
@@ -68,6 +69,30 @@
#define TEGRA186_GPIO_INTERRUPT_STATUS(x) (0x100 + (x) * 4)
+/* Tegra410 GPIOs implemented by the COMPUTE GPIO controller */
+#define TEGRA410_COMPUTE_GPIO_PORT_A 0
+#define TEGRA410_COMPUTE_GPIO_PORT_B 1
+#define TEGRA410_COMPUTE_GPIO_PORT_C 2
+#define TEGRA410_COMPUTE_GPIO_PORT_D 3
+#define TEGRA410_COMPUTE_GPIO_PORT_E 4
+
+/* Tegra410 GPIOs implemented by the SYSTEM GPIO controller */
+#define TEGRA410_SYSTEM_GPIO_PORT_A 0
+#define TEGRA410_SYSTEM_GPIO_PORT_B 1
+#define TEGRA410_SYSTEM_GPIO_PORT_C 2
+#define TEGRA410_SYSTEM_GPIO_PORT_D 3
+#define TEGRA410_SYSTEM_GPIO_PORT_E 4
+#define TEGRA410_SYSTEM_GPIO_PORT_I 5
+#define TEGRA410_SYSTEM_GPIO_PORT_J 6
+#define TEGRA410_SYSTEM_GPIO_PORT_K 7
+#define TEGRA410_SYSTEM_GPIO_PORT_L 8
+#define TEGRA410_SYSTEM_GPIO_PORT_M 9
+#define TEGRA410_SYSTEM_GPIO_PORT_N 10
+#define TEGRA410_SYSTEM_GPIO_PORT_P 11
+#define TEGRA410_SYSTEM_GPIO_PORT_Q 12
+#define TEGRA410_SYSTEM_GPIO_PORT_R 13
+#define TEGRA410_SYSTEM_GPIO_PORT_V 14
+
struct tegra_gpio_port {
const char *name;
unsigned int bank;
@@ -84,6 +109,7 @@ struct tegra_gpio_soc {
const struct tegra_gpio_port *ports;
unsigned int num_ports;
const char *name;
+ const char *prefix;
unsigned int instance;
unsigned int num_irqs_per_bank;
@@ -202,6 +228,28 @@ static int tegra186_init_valid_mask(struct gpio_chip *chip,
return 0;
}
+static int tegra186_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int level)
+{
+ struct tegra_gpio *gpio = gpiochip_get_data(chip);
+ void __iomem *base;
+ u32 value;
+
+ base = tegra186_gpio_get_base(gpio, offset);
+ if (WARN_ON(base == NULL))
+ return -ENODEV;
+
+ value = readl(base + TEGRA186_GPIO_OUTPUT_VALUE);
+ if (level == 0)
+ value &= ~TEGRA186_GPIO_OUTPUT_VALUE_HIGH;
+ else
+ value |= TEGRA186_GPIO_OUTPUT_VALUE_HIGH;
+
+ writel(value, base + TEGRA186_GPIO_OUTPUT_VALUE);
+
+ return 0;
+}
+
static int tegra186_gpio_get_direction(struct gpio_chip *chip,
unsigned int offset)
{
@@ -249,9 +297,12 @@ static int tegra186_gpio_direction_output(struct gpio_chip *chip,
struct tegra_gpio *gpio = gpiochip_get_data(chip);
void __iomem *base;
u32 value;
+ int ret;
/* configure output level first */
- chip->set(chip, offset, level);
+ ret = tegra186_gpio_set(chip, offset, level);
+ if (ret)
+ return ret;
base = tegra186_gpio_get_base(gpio, offset);
if (WARN_ON(base == NULL))
@@ -359,26 +410,6 @@ static int tegra186_gpio_get(struct gpio_chip *chip, unsigned int offset)
return value & BIT(0);
}
-static void tegra186_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int level)
-{
- struct tegra_gpio *gpio = gpiochip_get_data(chip);
- void __iomem *base;
- u32 value;
-
- base = tegra186_gpio_get_base(gpio, offset);
- if (WARN_ON(base == NULL))
- return;
-
- value = readl(base + TEGRA186_GPIO_OUTPUT_VALUE);
- if (level == 0)
- value &= ~TEGRA186_GPIO_OUTPUT_VALUE_HIGH;
- else
- value |= TEGRA186_GPIO_OUTPUT_VALUE_HIGH;
-
- writel(value, base + TEGRA186_GPIO_OUTPUT_VALUE);
-}
-
static int tegra186_gpio_set_config(struct gpio_chip *chip,
unsigned int offset,
unsigned long config)
@@ -910,8 +941,12 @@ static int tegra186_gpio_probe(struct platform_device *pdev)
char *name;
for (j = 0; j < port->pins; j++) {
- name = devm_kasprintf(gpio->gpio.parent, GFP_KERNEL,
- "P%s.%02x", port->name, j);
+ if (gpio->soc->prefix)
+ name = devm_kasprintf(gpio->gpio.parent, GFP_KERNEL, "%s-P%s.%02x",
+ gpio->soc->prefix, port->name, j);
+ else
+ name = devm_kasprintf(gpio->gpio.parent, GFP_KERNEL, "P%s.%02x",
+ port->name, j);
if (!name)
return -ENOMEM;
@@ -996,14 +1031,17 @@ static int tegra186_gpio_probe(struct platform_device *pdev)
return devm_gpiochip_add_data(&pdev->dev, &gpio->gpio, gpio);
}
-#define TEGRA186_MAIN_GPIO_PORT(_name, _bank, _port, _pins) \
- [TEGRA186_MAIN_GPIO_PORT_##_name] = { \
- .name = #_name, \
- .bank = _bank, \
- .port = _port, \
- .pins = _pins, \
+#define TEGRA_GPIO_PORT(_prefix, _name, _bank, _port, _pins) \
+ [_prefix##_GPIO_PORT_##_name] = { \
+ .name = #_name, \
+ .bank = _bank, \
+ .port = _port, \
+ .pins = _pins, \
}
+#define TEGRA186_MAIN_GPIO_PORT(_name, _bank, _port, _pins) \
+ TEGRA_GPIO_PORT(TEGRA186_MAIN, _name, _bank, _port, _pins)
+
static const struct tegra_gpio_port tegra186_main_ports[] = {
TEGRA186_MAIN_GPIO_PORT( A, 2, 0, 7),
TEGRA186_MAIN_GPIO_PORT( B, 3, 0, 7),
@@ -1039,13 +1077,8 @@ static const struct tegra_gpio_soc tegra186_main_soc = {
.has_vm_support = false,
};
-#define TEGRA186_AON_GPIO_PORT(_name, _bank, _port, _pins) \
- [TEGRA186_AON_GPIO_PORT_##_name] = { \
- .name = #_name, \
- .bank = _bank, \
- .port = _port, \
- .pins = _pins, \
- }
+#define TEGRA186_AON_GPIO_PORT(_name, _bank, _port, _pins) \
+ TEGRA_GPIO_PORT(TEGRA186_AON, _name, _bank, _port, _pins)
static const struct tegra_gpio_port tegra186_aon_ports[] = {
TEGRA186_AON_GPIO_PORT( S, 0, 1, 5),
@@ -1067,13 +1100,8 @@ static const struct tegra_gpio_soc tegra186_aon_soc = {
.has_vm_support = false,
};
-#define TEGRA194_MAIN_GPIO_PORT(_name, _bank, _port, _pins) \
- [TEGRA194_MAIN_GPIO_PORT_##_name] = { \
- .name = #_name, \
- .bank = _bank, \
- .port = _port, \
- .pins = _pins, \
- }
+#define TEGRA194_MAIN_GPIO_PORT(_name, _bank, _port, _pins) \
+ TEGRA_GPIO_PORT(TEGRA194_MAIN, _name, _bank, _port, _pins)
static const struct tegra_gpio_port tegra194_main_ports[] = {
TEGRA194_MAIN_GPIO_PORT( A, 1, 2, 8),
@@ -1123,13 +1151,8 @@ static const struct tegra_gpio_soc tegra194_main_soc = {
.has_vm_support = true,
};
-#define TEGRA194_AON_GPIO_PORT(_name, _bank, _port, _pins) \
- [TEGRA194_AON_GPIO_PORT_##_name] = { \
- .name = #_name, \
- .bank = _bank, \
- .port = _port, \
- .pins = _pins, \
- }
+#define TEGRA194_AON_GPIO_PORT(_name, _bank, _port, _pins) \
+ TEGRA_GPIO_PORT(TEGRA194_AON, _name, _bank, _port, _pins)
static const struct tegra_gpio_port tegra194_aon_ports[] = {
TEGRA194_AON_GPIO_PORT(AA, 0, 3, 8),
@@ -1149,13 +1172,8 @@ static const struct tegra_gpio_soc tegra194_aon_soc = {
.has_vm_support = false,
};
-#define TEGRA234_MAIN_GPIO_PORT(_name, _bank, _port, _pins) \
- [TEGRA234_MAIN_GPIO_PORT_##_name] = { \
- .name = #_name, \
- .bank = _bank, \
- .port = _port, \
- .pins = _pins, \
- }
+#define TEGRA234_MAIN_GPIO_PORT(_name, _bank, _port, _pins) \
+ TEGRA_GPIO_PORT(TEGRA234_MAIN, _name, _bank, _port, _pins)
static const struct tegra_gpio_port tegra234_main_ports[] = {
TEGRA234_MAIN_GPIO_PORT( A, 0, 0, 8),
@@ -1194,13 +1212,8 @@ static const struct tegra_gpio_soc tegra234_main_soc = {
.has_vm_support = true,
};
-#define TEGRA234_AON_GPIO_PORT(_name, _bank, _port, _pins) \
- [TEGRA234_AON_GPIO_PORT_##_name] = { \
- .name = #_name, \
- .bank = _bank, \
- .port = _port, \
- .pins = _pins, \
- }
+#define TEGRA234_AON_GPIO_PORT(_name, _bank, _port, _pins) \
+ TEGRA_GPIO_PORT(TEGRA234_AON, _name, _bank, _port, _pins)
static const struct tegra_gpio_port tegra234_aon_ports[] = {
TEGRA234_AON_GPIO_PORT(AA, 0, 4, 8),
@@ -1221,13 +1234,8 @@ static const struct tegra_gpio_soc tegra234_aon_soc = {
.has_vm_support = false,
};
-#define TEGRA241_MAIN_GPIO_PORT(_name, _bank, _port, _pins) \
- [TEGRA241_MAIN_GPIO_PORT_##_name] = { \
- .name = #_name, \
- .bank = _bank, \
- .port = _port, \
- .pins = _pins, \
- }
+#define TEGRA241_MAIN_GPIO_PORT(_name, _bank, _port, _pins) \
+ TEGRA_GPIO_PORT(TEGRA241_MAIN, _name, _bank, _port, _pins)
static const struct tegra_gpio_port tegra241_main_ports[] = {
TEGRA241_MAIN_GPIO_PORT(A, 0, 0, 8),
@@ -1252,13 +1260,8 @@ static const struct tegra_gpio_soc tegra241_main_soc = {
.has_vm_support = false,
};
-#define TEGRA241_AON_GPIO_PORT(_name, _bank, _port, _pins) \
- [TEGRA241_AON_GPIO_PORT_##_name] = { \
- .name = #_name, \
- .bank = _bank, \
- .port = _port, \
- .pins = _pins, \
- }
+#define TEGRA241_AON_GPIO_PORT(_name, _bank, _port, _pins) \
+ TEGRA_GPIO_PORT(TEGRA241_AON, _name, _bank, _port, _pins)
static const struct tegra_gpio_port tegra241_aon_ports[] = {
TEGRA241_AON_GPIO_PORT(AA, 0, 0, 8),
@@ -1274,6 +1277,75 @@ static const struct tegra_gpio_soc tegra241_aon_soc = {
.has_vm_support = false,
};
+#define TEGRA256_MAIN_GPIO_PORT(_name, _bank, _port, _pins) \
+ TEGRA_GPIO_PORT(TEGRA256_MAIN, _name, _bank, _port, _pins)
+
+static const struct tegra_gpio_port tegra256_main_ports[] = {
+ TEGRA256_MAIN_GPIO_PORT(A, 0, 0, 8),
+ TEGRA256_MAIN_GPIO_PORT(B, 0, 1, 8),
+ TEGRA256_MAIN_GPIO_PORT(C, 0, 2, 8),
+ TEGRA256_MAIN_GPIO_PORT(D, 0, 3, 8),
+};
+
+static const struct tegra_gpio_soc tegra256_main_soc = {
+ .num_ports = ARRAY_SIZE(tegra256_main_ports),
+ .ports = tegra256_main_ports,
+ .name = "tegra256-gpio-main",
+ .instance = 1,
+ .num_irqs_per_bank = 8,
+ .has_vm_support = true,
+};
+
+#define TEGRA410_COMPUTE_GPIO_PORT(_name, _bank, _port, _pins) \
+ TEGRA_GPIO_PORT(TEGRA410_COMPUTE, _name, _bank, _port, _pins)
+
+static const struct tegra_gpio_port tegra410_compute_ports[] = {
+ TEGRA410_COMPUTE_GPIO_PORT(A, 0, 0, 3),
+ TEGRA410_COMPUTE_GPIO_PORT(B, 1, 0, 8),
+ TEGRA410_COMPUTE_GPIO_PORT(C, 1, 1, 3),
+ TEGRA410_COMPUTE_GPIO_PORT(D, 2, 0, 8),
+ TEGRA410_COMPUTE_GPIO_PORT(E, 2, 1, 8),
+};
+
+static const struct tegra_gpio_soc tegra410_compute_soc = {
+ .num_ports = ARRAY_SIZE(tegra410_compute_ports),
+ .ports = tegra410_compute_ports,
+ .name = "tegra410-gpio-compute",
+ .prefix = "COMPUTE",
+ .num_irqs_per_bank = 8,
+ .instance = 0,
+};
+
+#define TEGRA410_SYSTEM_GPIO_PORT(_name, _bank, _port, _pins) \
+ TEGRA_GPIO_PORT(TEGRA410_SYSTEM, _name, _bank, _port, _pins)
+
+static const struct tegra_gpio_port tegra410_system_ports[] = {
+ TEGRA410_SYSTEM_GPIO_PORT(A, 0, 0, 7),
+ TEGRA410_SYSTEM_GPIO_PORT(B, 0, 1, 8),
+ TEGRA410_SYSTEM_GPIO_PORT(C, 0, 2, 8),
+ TEGRA410_SYSTEM_GPIO_PORT(D, 0, 3, 8),
+ TEGRA410_SYSTEM_GPIO_PORT(E, 0, 4, 6),
+ TEGRA410_SYSTEM_GPIO_PORT(I, 1, 0, 8),
+ TEGRA410_SYSTEM_GPIO_PORT(J, 1, 1, 7),
+ TEGRA410_SYSTEM_GPIO_PORT(K, 1, 2, 7),
+ TEGRA410_SYSTEM_GPIO_PORT(L, 1, 3, 7),
+ TEGRA410_SYSTEM_GPIO_PORT(M, 2, 0, 7),
+ TEGRA410_SYSTEM_GPIO_PORT(N, 2, 1, 6),
+ TEGRA410_SYSTEM_GPIO_PORT(P, 2, 2, 8),
+ TEGRA410_SYSTEM_GPIO_PORT(Q, 2, 3, 3),
+ TEGRA410_SYSTEM_GPIO_PORT(R, 2, 4, 2),
+ TEGRA410_SYSTEM_GPIO_PORT(V, 1, 4, 2),
+};
+
+static const struct tegra_gpio_soc tegra410_system_soc = {
+ .num_ports = ARRAY_SIZE(tegra410_system_ports),
+ .ports = tegra410_system_ports,
+ .name = "tegra410-gpio-system",
+ .prefix = "SYSTEM",
+ .num_irqs_per_bank = 8,
+ .instance = 0,
+};
+
static const struct of_device_id tegra186_gpio_of_match[] = {
{
.compatible = "nvidia,tegra186-gpio",
@@ -1294,6 +1366,9 @@ static const struct of_device_id tegra186_gpio_of_match[] = {
.compatible = "nvidia,tegra234-gpio-aon",
.data = &tegra234_aon_soc
}, {
+ .compatible = "nvidia,tegra256-gpio",
+ .data = &tegra256_main_soc
+ }, {
/* sentinel */
}
};
@@ -1306,6 +1381,8 @@ static const struct acpi_device_id tegra186_gpio_acpi_match[] = {
{ .id = "NVDA0408", .driver_data = (kernel_ulong_t)&tegra194_aon_soc },
{ .id = "NVDA0508", .driver_data = (kernel_ulong_t)&tegra241_main_soc },
{ .id = "NVDA0608", .driver_data = (kernel_ulong_t)&tegra241_aon_soc },
+ { .id = "NVDA0708", .driver_data = (kernel_ulong_t)&tegra410_compute_soc },
+ { .id = "NVDA0808", .driver_data = (kernel_ulong_t)&tegra410_system_soc },
{}
};
MODULE_DEVICE_TABLE(acpi, tegra186_gpio_acpi_match);
diff --git a/drivers/gpio/gpio-thunderx.c b/drivers/gpio/gpio-thunderx.c
index 5b851e904c11..be96853063ba 100644
--- a/drivers/gpio/gpio-thunderx.c
+++ b/drivers/gpio/gpio-thunderx.c
@@ -116,8 +116,8 @@ static int thunderx_gpio_dir_in(struct gpio_chip *chip, unsigned int line)
return 0;
}
-static void thunderx_gpio_set(struct gpio_chip *chip, unsigned int line,
- int value)
+static int thunderx_gpio_set(struct gpio_chip *chip, unsigned int line,
+ int value)
{
struct thunderx_gpio *txgpio = gpiochip_get_data(chip);
int bank = line / 64;
@@ -127,6 +127,8 @@ static void thunderx_gpio_set(struct gpio_chip *chip, unsigned int line,
(bank * GPIO_2ND_BANK) + (value ? GPIO_TX_SET : GPIO_TX_CLR);
writeq(BIT_ULL(bank_bit), reg);
+
+ return 0;
}
static int thunderx_gpio_dir_out(struct gpio_chip *chip, unsigned int line,
@@ -269,9 +271,9 @@ static int thunderx_gpio_get(struct gpio_chip *chip, unsigned int line)
return masked_bits != 0;
}
-static void thunderx_gpio_set_multiple(struct gpio_chip *chip,
- unsigned long *mask,
- unsigned long *bits)
+static int thunderx_gpio_set_multiple(struct gpio_chip *chip,
+ unsigned long *mask,
+ unsigned long *bits)
{
int bank;
u64 set_bits, clear_bits;
@@ -283,6 +285,8 @@ static void thunderx_gpio_set_multiple(struct gpio_chip *chip,
writeq(set_bits, txgpio->register_base + (bank * GPIO_2ND_BANK) + GPIO_TX_SET);
writeq(clear_bits, txgpio->register_base + (bank * GPIO_2ND_BANK) + GPIO_TX_CLR);
}
+
+ return 0;
}
static void thunderx_gpio_irq_ack(struct irq_data *d)
diff --git a/drivers/gpio/gpio-timberdale.c b/drivers/gpio/gpio-timberdale.c
index cb303a26f4d3..f488939dd00a 100644
--- a/drivers/gpio/gpio-timberdale.c
+++ b/drivers/gpio/gpio-timberdale.c
@@ -80,10 +80,9 @@ static int timbgpio_gpio_direction_output(struct gpio_chip *gpio,
return timbgpio_update_bit(gpio, nr, TGPIODIR, false);
}
-static void timbgpio_gpio_set(struct gpio_chip *gpio,
- unsigned nr, int val)
+static int timbgpio_gpio_set(struct gpio_chip *gpio, unsigned int nr, int val)
{
- timbgpio_update_bit(gpio, nr, TGPIOVAL, val != 0);
+ return timbgpio_update_bit(gpio, nr, TGPIOVAL, val != 0);
}
static int timbgpio_to_irq(struct gpio_chip *gpio, unsigned offset)
@@ -138,7 +137,7 @@ static int timbgpio_irq_type(struct irq_data *d, unsigned trigger)
u32 ver;
int ret = 0;
- if (offset < 0 || offset > tgpio->gpio.ngpio)
+ if (offset < 0 || offset >= tgpio->gpio.ngpio)
return -EINVAL;
ver = ioread32(tgpio->membase + TGPIO_VER);
diff --git a/drivers/gpio/gpio-tpic2810.c b/drivers/gpio/gpio-tpic2810.c
index effb7b8ff81f..866ff2d436d5 100644
--- a/drivers/gpio/gpio-tpic2810.c
+++ b/drivers/gpio/gpio-tpic2810.c
@@ -25,7 +25,7 @@ struct tpic2810 {
struct mutex lock;
};
-static void tpic2810_set(struct gpio_chip *chip, unsigned offset, int value);
+static int tpic2810_set(struct gpio_chip *chip, unsigned int offset, int value);
static int tpic2810_get_direction(struct gpio_chip *chip,
unsigned offset)
@@ -34,19 +34,11 @@ static int tpic2810_get_direction(struct gpio_chip *chip,
return GPIO_LINE_DIRECTION_OUT;
}
-static int tpic2810_direction_input(struct gpio_chip *chip,
- unsigned offset)
-{
- /* This device is output only */
- return -EINVAL;
-}
-
static int tpic2810_direction_output(struct gpio_chip *chip,
unsigned offset, int value)
{
/* This device always output */
- tpic2810_set(chip, offset, value);
- return 0;
+ return tpic2810_set(chip, offset, value);
}
static void tpic2810_set_mask_bits(struct gpio_chip *chip, u8 mask, u8 bits)
@@ -68,22 +60,25 @@ static void tpic2810_set_mask_bits(struct gpio_chip *chip, u8 mask, u8 bits)
mutex_unlock(&gpio->lock);
}
-static void tpic2810_set(struct gpio_chip *chip, unsigned offset, int value)
+static int tpic2810_set(struct gpio_chip *chip, unsigned int offset, int value)
{
tpic2810_set_mask_bits(chip, BIT(offset), value ? BIT(offset) : 0);
+
+ return 0;
}
-static void tpic2810_set_multiple(struct gpio_chip *chip, unsigned long *mask,
- unsigned long *bits)
+static int tpic2810_set_multiple(struct gpio_chip *chip, unsigned long *mask,
+ unsigned long *bits)
{
tpic2810_set_mask_bits(chip, *mask, *bits);
+
+ return 0;
}
static const struct gpio_chip template_chip = {
.label = "tpic2810",
.owner = THIS_MODULE,
.get_direction = tpic2810_get_direction,
- .direction_input = tpic2810_direction_input,
.direction_output = tpic2810_direction_output,
.set = tpic2810_set,
.set_multiple = tpic2810_set_multiple,
diff --git a/drivers/gpio/gpio-tps65086.c b/drivers/gpio/gpio-tps65086.c
index 8f5827554e1e..84b17b83476f 100644
--- a/drivers/gpio/gpio-tps65086.c
+++ b/drivers/gpio/gpio-tps65086.c
@@ -37,10 +37,8 @@ static int tps65086_gpio_direction_output(struct gpio_chip *chip,
struct tps65086_gpio *gpio = gpiochip_get_data(chip);
/* Set the initial value */
- regmap_update_bits(gpio->tps->regmap, TPS65086_GPOCTRL,
- BIT(4 + offset), value ? BIT(4 + offset) : 0);
-
- return 0;
+ return regmap_update_bits(gpio->tps->regmap, TPS65086_GPOCTRL,
+ BIT(4 + offset), value ? BIT(4 + offset) : 0);
}
static int tps65086_gpio_get(struct gpio_chip *chip, unsigned offset)
@@ -55,13 +53,13 @@ static int tps65086_gpio_get(struct gpio_chip *chip, unsigned offset)
return val & BIT(4 + offset);
}
-static void tps65086_gpio_set(struct gpio_chip *chip, unsigned offset,
- int value)
+static int tps65086_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct tps65086_gpio *gpio = gpiochip_get_data(chip);
- regmap_update_bits(gpio->tps->regmap, TPS65086_GPOCTRL,
- BIT(4 + offset), value ? BIT(4 + offset) : 0);
+ return regmap_update_bits(gpio->tps->regmap, TPS65086_GPOCTRL,
+ BIT(4 + offset), value ? BIT(4 + offset) : 0);
}
static const struct gpio_chip template_chip = {
diff --git a/drivers/gpio/gpio-tps65218.c b/drivers/gpio/gpio-tps65218.c
index d7d9d50dcddf..3b4c41f5ef55 100644
--- a/drivers/gpio/gpio-tps65218.c
+++ b/drivers/gpio/gpio-tps65218.c
@@ -34,34 +34,28 @@ static int tps65218_gpio_get(struct gpio_chip *gc, unsigned offset)
return !!(val & (TPS65218_ENABLE2_GPIO1 << offset));
}
-static void tps65218_gpio_set(struct gpio_chip *gc, unsigned offset,
- int value)
+static int tps65218_gpio_set(struct gpio_chip *gc, unsigned int offset,
+ int value)
{
struct tps65218_gpio *tps65218_gpio = gpiochip_get_data(gc);
struct tps65218 *tps65218 = tps65218_gpio->tps65218;
if (value)
- tps65218_set_bits(tps65218, TPS65218_REG_ENABLE2,
- TPS65218_ENABLE2_GPIO1 << offset,
- TPS65218_ENABLE2_GPIO1 << offset,
- TPS65218_PROTECT_L1);
- else
- tps65218_clear_bits(tps65218, TPS65218_REG_ENABLE2,
- TPS65218_ENABLE2_GPIO1 << offset,
- TPS65218_PROTECT_L1);
+ return tps65218_set_bits(tps65218, TPS65218_REG_ENABLE2,
+ TPS65218_ENABLE2_GPIO1 << offset,
+ TPS65218_ENABLE2_GPIO1 << offset,
+ TPS65218_PROTECT_L1);
+
+ return tps65218_clear_bits(tps65218, TPS65218_REG_ENABLE2,
+ TPS65218_ENABLE2_GPIO1 << offset,
+ TPS65218_PROTECT_L1);
}
static int tps65218_gpio_output(struct gpio_chip *gc, unsigned offset,
int value)
{
/* Only drives GPOs */
- tps65218_gpio_set(gc, offset, value);
- return 0;
-}
-
-static int tps65218_gpio_input(struct gpio_chip *gc, unsigned offset)
-{
- return -EPERM;
+ return tps65218_gpio_set(gc, offset, value);
}
static int tps65218_gpio_request(struct gpio_chip *gc, unsigned offset)
@@ -174,7 +168,6 @@ static const struct gpio_chip template_chip = {
.owner = THIS_MODULE,
.request = tps65218_gpio_request,
.direction_output = tps65218_gpio_output,
- .direction_input = tps65218_gpio_input,
.get = tps65218_gpio_get,
.set = tps65218_gpio_set,
.set_config = tps65218_gpio_set_config,
diff --git a/drivers/gpio/gpio-tps65219.c b/drivers/gpio/gpio-tps65219.c
index 526640c39a11..158f63bcf10c 100644
--- a/drivers/gpio/gpio-tps65219.c
+++ b/drivers/gpio/gpio-tps65219.c
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * GPIO driver for TI TPS65219 PMICs
+ * GPIO driver for TI TPS65214/TPS65215/TPS65219 PMICs
*
- * Copyright (C) 2022 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2022, 2025 Texas Instruments Incorporated - http://www.ti.com/
*/
#include <linux/bits.h>
@@ -13,20 +13,48 @@
#include <linux/regmap.h>
#define TPS65219_GPIO0_DIR_MASK BIT(3)
-#define TPS65219_GPIO0_OFFSET 2
-#define TPS65219_GPIO0_IDX 0
+#define TPS65214_GPIO0_DIR_MASK BIT(1)
+#define TPS6521X_GPIO0_OFFSET 2
+#define TPS6521X_GPIO0_IDX 0
+
+/*
+ * TPS65214 GPIO mapping
+ * Linux gpio offset 0 -> GPIO (pin16) -> bit_offset 2
+ * Linux gpio offset 1 -> GPO1 (pin9 ) -> bit_offset 0
+ *
+ * TPS65215 & TPS65219 GPIO mapping
+ * Linux gpio offset 0 -> GPIO (pin16) -> bit_offset 2
+ * Linux gpio offset 1 -> GPO1 (pin8 ) -> bit_offset 0
+ * Linux gpio offset 2 -> GPO2 (pin17) -> bit_offset 1
+ */
struct tps65219_gpio {
+ int (*change_dir)(struct gpio_chip *gc, unsigned int offset, unsigned int dir);
struct gpio_chip gpio_chip;
struct tps65219 *tps;
};
+static int tps65214_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
+{
+ struct tps65219_gpio *gpio = gpiochip_get_data(gc);
+ int ret, val;
+
+ if (offset != TPS6521X_GPIO0_IDX)
+ return GPIO_LINE_DIRECTION_OUT;
+
+ ret = regmap_read(gpio->tps->regmap, TPS65219_REG_GENERAL_CONFIG, &val);
+ if (ret)
+ return ret;
+
+ return !(val & TPS65214_GPIO0_DIR_MASK);
+}
+
static int tps65219_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
{
struct tps65219_gpio *gpio = gpiochip_get_data(gc);
int ret, val;
- if (offset != TPS65219_GPIO0_IDX)
+ if (offset != TPS6521X_GPIO0_IDX)
return GPIO_LINE_DIRECTION_OUT;
ret = regmap_read(gpio->tps->regmap, TPS65219_REG_MFP_1_CONFIG, &val);
@@ -42,7 +70,7 @@ static int tps65219_gpio_get(struct gpio_chip *gc, unsigned int offset)
struct device *dev = gpio->tps->dev;
int ret, val;
- if (offset != TPS65219_GPIO0_IDX) {
+ if (offset != TPS6521X_GPIO0_IDX) {
dev_err(dev, "GPIO%d is output only, cannot get\n", offset);
return -ENOTSUPP;
}
@@ -65,19 +93,18 @@ static int tps65219_gpio_get(struct gpio_chip *gc, unsigned int offset)
return ret;
}
-static void tps65219_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
+static int tps65219_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
{
struct tps65219_gpio *gpio = gpiochip_get_data(gc);
- struct device *dev = gpio->tps->dev;
int v, mask, bit;
- bit = (offset == TPS65219_GPIO0_IDX) ? TPS65219_GPIO0_OFFSET : offset - 1;
+ bit = (offset == TPS6521X_GPIO0_IDX) ? TPS6521X_GPIO0_OFFSET : offset - 1;
mask = BIT(bit);
v = value ? mask : 0;
- if (regmap_update_bits(gpio->tps->regmap, TPS65219_REG_GENERAL_CONFIG, mask, v))
- dev_err(dev, "GPIO%d, set to value %d failed.\n", offset, value);
+ return regmap_update_bits(gpio->tps->regmap,
+ TPS65219_REG_GENERAL_CONFIG, mask, v);
}
static int tps65219_gpio_change_direction(struct gpio_chip *gc, unsigned int offset,
@@ -112,12 +139,39 @@ static int tps65219_gpio_change_direction(struct gpio_chip *gc, unsigned int off
return -ENOTSUPP;
}
+static int tps65214_gpio_change_direction(struct gpio_chip *gc, unsigned int offset,
+ unsigned int direction)
+{
+ struct tps65219_gpio *gpio = gpiochip_get_data(gc);
+ struct device *dev = gpio->tps->dev;
+ int val, ret;
+
+ /**
+ * Verified if GPIO or GPO in parent function
+ * Masked value: 0 = GPIO, 1 = VSEL
+ */
+ ret = regmap_read(gpio->tps->regmap, TPS65219_REG_MFP_1_CONFIG, &val);
+ if (ret)
+ return ret;
+
+ ret = !!(val & BIT(TPS65219_GPIO0_DIR_MASK));
+ if (ret)
+ dev_err(dev, "GPIO%d configured as VSEL, not GPIO\n", offset);
+
+ ret = regmap_update_bits(gpio->tps->regmap, TPS65219_REG_GENERAL_CONFIG,
+ TPS65214_GPIO0_DIR_MASK, direction);
+ if (ret)
+ dev_err(dev, "Fail to change direction to %u for GPIO%d.\n", direction, offset);
+
+ return ret;
+}
+
static int tps65219_gpio_direction_input(struct gpio_chip *gc, unsigned int offset)
{
struct tps65219_gpio *gpio = gpiochip_get_data(gc);
struct device *dev = gpio->tps->dev;
- if (offset != TPS65219_GPIO0_IDX) {
+ if (offset != TPS6521X_GPIO0_IDX) {
dev_err(dev, "GPIO%d is output only, cannot change to input\n", offset);
return -ENOTSUPP;
}
@@ -125,21 +179,36 @@ static int tps65219_gpio_direction_input(struct gpio_chip *gc, unsigned int offs
if (tps65219_gpio_get_direction(gc, offset) == GPIO_LINE_DIRECTION_IN)
return 0;
- return tps65219_gpio_change_direction(gc, offset, GPIO_LINE_DIRECTION_IN);
+ return gpio->change_dir(gc, offset, GPIO_LINE_DIRECTION_IN);
}
static int tps65219_gpio_direction_output(struct gpio_chip *gc, unsigned int offset, int value)
{
+ struct tps65219_gpio *gpio = gpiochip_get_data(gc);
+
tps65219_gpio_set(gc, offset, value);
- if (offset != TPS65219_GPIO0_IDX)
+ if (offset != TPS6521X_GPIO0_IDX)
return 0;
if (tps65219_gpio_get_direction(gc, offset) == GPIO_LINE_DIRECTION_OUT)
return 0;
- return tps65219_gpio_change_direction(gc, offset, GPIO_LINE_DIRECTION_OUT);
+ return gpio->change_dir(gc, offset, GPIO_LINE_DIRECTION_OUT);
}
+static const struct gpio_chip tps65214_template_chip = {
+ .label = "tps65214-gpio",
+ .owner = THIS_MODULE,
+ .get_direction = tps65214_gpio_get_direction,
+ .direction_input = tps65219_gpio_direction_input,
+ .direction_output = tps65219_gpio_direction_output,
+ .get = tps65219_gpio_get,
+ .set = tps65219_gpio_set,
+ .base = -1,
+ .ngpio = 2,
+ .can_sleep = true,
+};
+
static const struct gpio_chip tps65219_template_chip = {
.label = "tps65219-gpio",
.owner = THIS_MODULE,
@@ -155,6 +224,7 @@ static const struct gpio_chip tps65219_template_chip = {
static int tps65219_gpio_probe(struct platform_device *pdev)
{
+ enum pmic_id chip = platform_get_device_id(pdev)->driver_data;
struct tps65219 *tps = dev_get_drvdata(pdev->dev.parent);
struct tps65219_gpio *gpio;
@@ -162,22 +232,38 @@ static int tps65219_gpio_probe(struct platform_device *pdev)
if (!gpio)
return -ENOMEM;
+ if (chip == TPS65214) {
+ gpio->gpio_chip = tps65214_template_chip;
+ gpio->change_dir = tps65214_gpio_change_direction;
+ } else if (chip == TPS65219) {
+ gpio->gpio_chip = tps65219_template_chip;
+ gpio->change_dir = tps65219_gpio_change_direction;
+ } else {
+ return -ENODATA;
+ }
+
gpio->tps = tps;
- gpio->gpio_chip = tps65219_template_chip;
gpio->gpio_chip.parent = tps->dev;
return devm_gpiochip_add_data(&pdev->dev, &gpio->gpio_chip, gpio);
}
+static const struct platform_device_id tps6521x_gpio_id_table[] = {
+ { "tps65214-gpio", TPS65214 },
+ { "tps65219-gpio", TPS65219 },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, tps6521x_gpio_id_table);
+
static struct platform_driver tps65219_gpio_driver = {
.driver = {
.name = "tps65219-gpio",
},
.probe = tps65219_gpio_probe,
+ .id_table = tps6521x_gpio_id_table,
};
module_platform_driver(tps65219_gpio_driver);
-MODULE_ALIAS("platform:tps65219-gpio");
MODULE_AUTHOR("Jonathan Cormier <jcormier@criticallink.com>");
-MODULE_DESCRIPTION("TPS65219 GPIO driver");
+MODULE_DESCRIPTION("TPS65214/TPS65215/TPS65219 GPIO driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-tps6586x.c b/drivers/gpio/gpio-tps6586x.c
index d277aa951143..aaacbb54bf5d 100644
--- a/drivers/gpio/gpio-tps6586x.c
+++ b/drivers/gpio/gpio-tps6586x.c
@@ -40,13 +40,13 @@ static int tps6586x_gpio_get(struct gpio_chip *gc, unsigned offset)
return !!(val & (1 << offset));
}
-static void tps6586x_gpio_set(struct gpio_chip *gc, unsigned offset,
- int value)
+static int tps6586x_gpio_set(struct gpio_chip *gc, unsigned int offset,
+ int value)
{
struct tps6586x_gpio *tps6586x_gpio = gpiochip_get_data(gc);
- tps6586x_update(tps6586x_gpio->parent, TPS6586X_GPIOSET2,
- value << offset, 1 << offset);
+ return tps6586x_update(tps6586x_gpio->parent, TPS6586X_GPIOSET2,
+ value << offset, 1 << offset);
}
static int tps6586x_gpio_output(struct gpio_chip *gc, unsigned offset,
@@ -54,8 +54,11 @@ static int tps6586x_gpio_output(struct gpio_chip *gc, unsigned offset,
{
struct tps6586x_gpio *tps6586x_gpio = gpiochip_get_data(gc);
uint8_t val, mask;
+ int ret;
- tps6586x_gpio_set(gc, offset, value);
+ ret = tps6586x_gpio_set(gc, offset, value);
+ if (ret)
+ return ret;
val = 0x1 << (offset * 2);
mask = 0x3 << (offset * 2);
diff --git a/drivers/gpio/gpio-tps65910.c b/drivers/gpio/gpio-tps65910.c
index 187d21580573..25e9f41efe78 100644
--- a/drivers/gpio/gpio-tps65910.c
+++ b/drivers/gpio/gpio-tps65910.c
@@ -36,18 +36,18 @@ static int tps65910_gpio_get(struct gpio_chip *gc, unsigned offset)
return 0;
}
-static void tps65910_gpio_set(struct gpio_chip *gc, unsigned offset,
- int value)
+static int tps65910_gpio_set(struct gpio_chip *gc, unsigned int offset,
+ int value)
{
struct tps65910_gpio *tps65910_gpio = gpiochip_get_data(gc);
struct tps65910 *tps65910 = tps65910_gpio->tps65910;
if (value)
- regmap_set_bits(tps65910->regmap, TPS65910_GPIO0 + offset,
- GPIO_SET_MASK);
- else
- regmap_clear_bits(tps65910->regmap, TPS65910_GPIO0 + offset,
- GPIO_SET_MASK);
+ return regmap_set_bits(tps65910->regmap,
+ TPS65910_GPIO0 + offset, GPIO_SET_MASK);
+
+ return regmap_clear_bits(tps65910->regmap, TPS65910_GPIO0 + offset,
+ GPIO_SET_MASK);
}
static int tps65910_gpio_output(struct gpio_chip *gc, unsigned offset,
@@ -55,9 +55,12 @@ static int tps65910_gpio_output(struct gpio_chip *gc, unsigned offset,
{
struct tps65910_gpio *tps65910_gpio = gpiochip_get_data(gc);
struct tps65910 *tps65910 = tps65910_gpio->tps65910;
+ int ret;
/* Set the initial value */
- tps65910_gpio_set(gc, offset, value);
+ ret = tps65910_gpio_set(gc, offset, value);
+ if (ret)
+ return ret;
return regmap_set_bits(tps65910->regmap, TPS65910_GPIO0 + offset,
GPIO_CFG_MASK);
diff --git a/drivers/gpio/gpio-tps65912.c b/drivers/gpio/gpio-tps65912.c
index fab771cb6a87..7a2c5685c2fd 100644
--- a/drivers/gpio/gpio-tps65912.c
+++ b/drivers/gpio/gpio-tps65912.c
@@ -49,10 +49,13 @@ static int tps65912_gpio_direction_output(struct gpio_chip *gc,
unsigned offset, int value)
{
struct tps65912_gpio *gpio = gpiochip_get_data(gc);
+ int ret;
/* Set the initial value */
- regmap_update_bits(gpio->tps->regmap, TPS65912_GPIO1 + offset,
- GPIO_SET_MASK, value ? GPIO_SET_MASK : 0);
+ ret = regmap_update_bits(gpio->tps->regmap, TPS65912_GPIO1 + offset,
+ GPIO_SET_MASK, value ? GPIO_SET_MASK : 0);
+ if (ret)
+ return ret;
return regmap_update_bits(gpio->tps->regmap, TPS65912_GPIO1 + offset,
GPIO_CFG_MASK, GPIO_CFG_MASK);
@@ -73,13 +76,13 @@ static int tps65912_gpio_get(struct gpio_chip *gc, unsigned offset)
return 0;
}
-static void tps65912_gpio_set(struct gpio_chip *gc, unsigned offset,
- int value)
+static int tps65912_gpio_set(struct gpio_chip *gc, unsigned int offset,
+ int value)
{
struct tps65912_gpio *gpio = gpiochip_get_data(gc);
- regmap_update_bits(gpio->tps->regmap, TPS65912_GPIO1 + offset,
- GPIO_SET_MASK, value ? GPIO_SET_MASK : 0);
+ return regmap_update_bits(gpio->tps->regmap, TPS65912_GPIO1 + offset,
+ GPIO_SET_MASK, value ? GPIO_SET_MASK : 0);
}
static const struct gpio_chip template_chip = {
diff --git a/drivers/gpio/gpio-tps68470.c b/drivers/gpio/gpio-tps68470.c
index 532deaddfd4e..d4fbdf90e190 100644
--- a/drivers/gpio/gpio-tps68470.c
+++ b/drivers/gpio/gpio-tps68470.c
@@ -70,8 +70,8 @@ static int tps68470_gpio_get_direction(struct gpio_chip *gc,
GPIO_LINE_DIRECTION_IN;
}
-static void tps68470_gpio_set(struct gpio_chip *gc, unsigned int offset,
- int value)
+static int tps68470_gpio_set(struct gpio_chip *gc, unsigned int offset,
+ int value)
{
struct tps68470_gpio_data *tps68470_gpio = gpiochip_get_data(gc);
struct regmap *regmap = tps68470_gpio->tps68470_regmap;
@@ -82,7 +82,8 @@ static void tps68470_gpio_set(struct gpio_chip *gc, unsigned int offset,
offset -= TPS68470_N_REGULAR_GPIO;
}
- regmap_update_bits(regmap, reg, BIT(offset), value ? BIT(offset) : 0);
+ return regmap_update_bits(regmap, reg, BIT(offset),
+ value ? BIT(offset) : 0);
}
static int tps68470_gpio_output(struct gpio_chip *gc, unsigned int offset,
@@ -90,9 +91,12 @@ static int tps68470_gpio_output(struct gpio_chip *gc, unsigned int offset,
{
struct tps68470_gpio_data *tps68470_gpio = gpiochip_get_data(gc);
struct regmap *regmap = tps68470_gpio->tps68470_regmap;
+ int ret;
/* Set the initial value */
- tps68470_gpio_set(gc, offset, value);
+ ret = tps68470_gpio_set(gc, offset, value);
+ if (ret)
+ return ret;
/* rest are always outputs */
if (offset >= TPS68470_N_REGULAR_GPIO)
diff --git a/drivers/gpio/gpio-tqmx86.c b/drivers/gpio/gpio-tqmx86.c
index 18f523a15b3c..eedfc0e371e3 100644
--- a/drivers/gpio/gpio-tqmx86.c
+++ b/drivers/gpio/gpio-tqmx86.c
@@ -93,14 +93,16 @@ static void _tqmx86_gpio_set(struct tqmx86_gpio_data *gpio, unsigned int offset,
tqmx86_gpio_write(gpio, bitmap_get_value8(gpio->output, 0), TQMX86_GPIOD);
}
-static void tqmx86_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int value)
+static int tqmx86_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct tqmx86_gpio_data *gpio = gpiochip_get_data(chip);
guard(raw_spinlock_irqsave)(&gpio->spinlock);
_tqmx86_gpio_set(gpio, offset, value);
+
+ return 0;
}
static int tqmx86_gpio_direction_input(struct gpio_chip *chip,
@@ -277,19 +279,18 @@ static void tqmx86_gpio_irq_handler(struct irq_desc *desc)
}
/* Minimal runtime PM is needed by the IRQ subsystem */
-static int __maybe_unused tqmx86_gpio_runtime_suspend(struct device *dev)
+static int tqmx86_gpio_runtime_suspend(struct device *dev)
{
return 0;
}
-static int __maybe_unused tqmx86_gpio_runtime_resume(struct device *dev)
+static int tqmx86_gpio_runtime_resume(struct device *dev)
{
return 0;
}
static const struct dev_pm_ops tqmx86_gpio_dev_pm_ops = {
- SET_RUNTIME_PM_OPS(tqmx86_gpio_runtime_suspend,
- tqmx86_gpio_runtime_resume, NULL)
+ RUNTIME_PM_OPS(tqmx86_gpio_runtime_suspend, tqmx86_gpio_runtime_resume, NULL)
};
static void tqmx86_init_irq_valid_mask(struct gpio_chip *chip,
@@ -423,7 +424,7 @@ out_pm_dis:
static struct platform_driver tqmx86_gpio_driver = {
.driver = {
.name = "tqmx86-gpio",
- .pm = &tqmx86_gpio_dev_pm_ops,
+ .pm = pm_ptr(&tqmx86_gpio_dev_pm_ops),
},
.probe = tqmx86_gpio_probe,
};
diff --git a/drivers/gpio/gpio-ts4800.c b/drivers/gpio/gpio-ts4800.c
index 4748e3d47106..992ee231db9f 100644
--- a/drivers/gpio/gpio-ts4800.c
+++ b/drivers/gpio/gpio-ts4800.c
@@ -6,9 +6,10 @@
*/
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/module.h>
-#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#define DEFAULT_PIN_NUMBER 16
#define INPUT_REG_OFFSET 0x00
@@ -17,13 +18,14 @@
static int ts4800_gpio_probe(struct platform_device *pdev)
{
- struct device_node *node;
- struct gpio_chip *chip;
+ struct gpio_generic_chip_config config;
+ struct device *dev = &pdev->dev;
+ struct gpio_generic_chip *chip;
void __iomem *base_addr;
int retval;
u32 ngpios;
- chip = devm_kzalloc(&pdev->dev, sizeof(struct gpio_chip), GFP_KERNEL);
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
@@ -31,29 +33,28 @@ static int ts4800_gpio_probe(struct platform_device *pdev)
if (IS_ERR(base_addr))
return PTR_ERR(base_addr);
- node = pdev->dev.of_node;
- if (!node)
- return -EINVAL;
-
- retval = of_property_read_u32(node, "ngpios", &ngpios);
+ retval = device_property_read_u32(dev, "ngpios", &ngpios);
if (retval == -EINVAL)
ngpios = DEFAULT_PIN_NUMBER;
else if (retval)
return retval;
- retval = bgpio_init(chip, &pdev->dev, 2, base_addr + INPUT_REG_OFFSET,
- base_addr + OUTPUT_REG_OFFSET, NULL,
- base_addr + DIRECTION_REG_OFFSET, NULL, 0);
- if (retval) {
- dev_err(&pdev->dev, "bgpio_init failed\n");
- return retval;
- }
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 2,
+ .dat = base_addr + INPUT_REG_OFFSET,
+ .set = base_addr + OUTPUT_REG_OFFSET,
+ .dirout = base_addr + DIRECTION_REG_OFFSET,
+ };
- chip->ngpio = ngpios;
+ retval = gpio_generic_chip_init(chip, &config);
+ if (retval)
+ return dev_err_probe(dev, retval,
+ "failed to initialize the generic GPIO chip\n");
- platform_set_drvdata(pdev, chip);
+ chip->gc.ngpio = ngpios;
- return devm_gpiochip_add_data(&pdev->dev, chip, NULL);
+ return devm_gpiochip_add_data(dev, &chip->gc, NULL);
}
static const struct of_device_id ts4800_gpio_of_match[] = {
diff --git a/drivers/gpio/gpio-ts4900.c b/drivers/gpio/gpio-ts4900.c
index 5c806140fdf0..d9ee8fc77ccd 100644
--- a/drivers/gpio/gpio-ts4900.c
+++ b/drivers/gpio/gpio-ts4900.c
@@ -95,16 +95,16 @@ static int ts4900_gpio_get(struct gpio_chip *chip, unsigned int offset)
return !!(reg & priv->input_bit);
}
-static void ts4900_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int value)
+static int ts4900_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct ts4900_gpio_priv *priv = gpiochip_get_data(chip);
if (value)
- regmap_update_bits(priv->regmap, offset, TS4900_GPIO_OUT,
- TS4900_GPIO_OUT);
- else
- regmap_update_bits(priv->regmap, offset, TS4900_GPIO_OUT, 0);
+ return regmap_update_bits(priv->regmap, offset,
+ TS4900_GPIO_OUT, TS4900_GPIO_OUT);
+
+ return regmap_update_bits(priv->regmap, offset, TS4900_GPIO_OUT, 0);
}
static const struct regmap_config ts4900_regmap_config = {
diff --git a/drivers/gpio/gpio-ts5500.c b/drivers/gpio/gpio-ts5500.c
index 61cbec5c06a7..3c7f2efe10fd 100644
--- a/drivers/gpio/gpio-ts5500.c
+++ b/drivers/gpio/gpio-ts5500.c
@@ -244,7 +244,7 @@ static int ts5500_gpio_output(struct gpio_chip *chip, unsigned offset, int val)
return 0;
}
-static void ts5500_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
+static int ts5500_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
{
struct ts5500_priv *priv = gpiochip_get_data(chip);
const struct ts5500_dio line = priv->pinout[offset];
@@ -256,6 +256,8 @@ static void ts5500_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
else
ts5500_clear_mask(line.value_mask, line.value_addr);
spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
}
static int ts5500_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
diff --git a/drivers/gpio/gpio-twl4030.c b/drivers/gpio/gpio-twl4030.c
index 0d17985a5fdc..a851702befde 100644
--- a/drivers/gpio/gpio-twl4030.c
+++ b/drivers/gpio/gpio-twl4030.c
@@ -120,7 +120,7 @@ static u8 cached_leden;
* external pullup is needed. We could also expose the integrated PWM
* as a LED brightness control; we initialize it as "always on".
*/
-static void twl4030_led_set_value(int led, int value)
+static int twl4030_led_set_value(int led, int value)
{
u8 mask = LEDEN_LEDAON | LEDEN_LEDAPWM;
@@ -132,8 +132,8 @@ static void twl4030_led_set_value(int led, int value)
else
cached_leden |= mask;
- WARN_ON_ONCE(twl_i2c_write_u8(TWL4030_MODULE_LED, cached_leden,
- TWL4030_LED_LEDEN_REG));
+ return twl_i2c_write_u8(TWL4030_MODULE_LED, cached_leden,
+ TWL4030_LED_LEDEN_REG);
}
static int twl4030_set_gpio_direction(int gpio, int is_input)
@@ -278,7 +278,7 @@ static void twl_free(struct gpio_chip *chip, unsigned offset)
mutex_lock(&priv->mutex);
if (offset >= TWL4030_GPIO_MAX) {
- twl4030_led_set_value(offset - TWL4030_GPIO_MAX, 1);
+ WARN_ON_ONCE(twl4030_led_set_value(offset - TWL4030_GPIO_MAX, 1));
goto out;
}
@@ -334,15 +334,16 @@ out:
return ret;
}
-static void twl_set(struct gpio_chip *chip, unsigned offset, int value)
+static int twl_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct gpio_twl4030_priv *priv = gpiochip_get_data(chip);
+ int ret;
mutex_lock(&priv->mutex);
if (offset < TWL4030_GPIO_MAX)
- twl4030_set_gpio_dataout(offset, value);
+ ret = twl4030_set_gpio_dataout(offset, value);
else
- twl4030_led_set_value(offset - TWL4030_GPIO_MAX, value);
+ ret = twl4030_led_set_value(offset - TWL4030_GPIO_MAX, value);
if (value)
priv->out_state |= BIT(offset);
@@ -350,6 +351,8 @@ static void twl_set(struct gpio_chip *chip, unsigned offset, int value)
priv->out_state &= ~BIT(offset);
mutex_unlock(&priv->mutex);
+
+ return ret;
}
static int twl_direction_out(struct gpio_chip *chip, unsigned offset, int value)
@@ -373,9 +376,7 @@ static int twl_direction_out(struct gpio_chip *chip, unsigned offset, int value)
priv->direction |= BIT(offset);
mutex_unlock(&priv->mutex);
- twl_set(chip, offset, value);
-
- return ret;
+ return twl_set(chip, offset, value);
}
static int twl_get_direction(struct gpio_chip *chip, unsigned offset)
@@ -523,7 +524,7 @@ static int gpio_twl4030_probe(struct platform_device *pdev)
return irq_base;
}
- irq_domain_create_legacy(of_fwnode_handle(pdev->dev.of_node), TWL4030_GPIO_MAX, irq_base, 0,
+ irq_domain_create_legacy(dev_fwnode(&pdev->dev), TWL4030_GPIO_MAX, irq_base, 0,
&irq_domain_simple_ops, NULL);
ret = twl4030_sih_setup(&pdev->dev, TWL4030_MODULE_GPIO, irq_base);
@@ -596,9 +597,7 @@ no_irqs:
ret = devm_add_action_or_reset(&pdev->dev, gpio_twl4030_power_off_action, d);
if (ret)
- return dev_err_probe(&pdev->dev, ret,
- "failed to install power off handler\n");
-
+ return ret;
}
return 0;
diff --git a/drivers/gpio/gpio-twl6040.c b/drivers/gpio/gpio-twl6040.c
index b9171bf66168..4ec9bcd40439 100644
--- a/drivers/gpio/gpio-twl6040.c
+++ b/drivers/gpio/gpio-twl6040.c
@@ -37,14 +37,8 @@ static int twl6040gpo_get_direction(struct gpio_chip *chip, unsigned offset)
return GPIO_LINE_DIRECTION_OUT;
}
-static int twl6040gpo_direction_out(struct gpio_chip *chip, unsigned offset,
- int value)
-{
- /* This only drives GPOs, and can't change direction */
- return 0;
-}
-
-static void twl6040gpo_set(struct gpio_chip *chip, unsigned offset, int value)
+static int twl6040gpo_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct twl6040 *twl6040 = gpiochip_get_data(chip);
int ret;
@@ -52,14 +46,21 @@ static void twl6040gpo_set(struct gpio_chip *chip, unsigned offset, int value)
ret = twl6040_reg_read(twl6040, TWL6040_REG_GPOCTL);
if (ret < 0)
- return;
+ return ret;
if (value)
gpoctl = ret | BIT(offset);
else
gpoctl = ret & ~BIT(offset);
- twl6040_reg_write(twl6040, TWL6040_REG_GPOCTL, gpoctl);
+ return twl6040_reg_write(twl6040, TWL6040_REG_GPOCTL, gpoctl);
+}
+
+static int twl6040gpo_direction_out(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ /* This only drives GPOs, and can't change direction */
+ return twl6040gpo_set(chip, offset, value);
}
static struct gpio_chip twl6040gpo_chip = {
diff --git a/drivers/gpio/gpio-uniphier.c b/drivers/gpio/gpio-uniphier.c
index d738da8718f9..0574dde5b5bb 100644
--- a/drivers/gpio/gpio-uniphier.c
+++ b/drivers/gpio/gpio-uniphier.c
@@ -138,14 +138,16 @@ static int uniphier_gpio_get(struct gpio_chip *chip, unsigned int offset)
return uniphier_gpio_offset_read(chip, offset, UNIPHIER_GPIO_PORT_DATA);
}
-static void uniphier_gpio_set(struct gpio_chip *chip,
- unsigned int offset, int val)
+static int uniphier_gpio_set(struct gpio_chip *chip,
+ unsigned int offset, int val)
{
uniphier_gpio_offset_write(chip, offset, UNIPHIER_GPIO_PORT_DATA, val);
+
+ return 0;
}
-static void uniphier_gpio_set_multiple(struct gpio_chip *chip,
- unsigned long *mask, unsigned long *bits)
+static int uniphier_gpio_set_multiple(struct gpio_chip *chip,
+ unsigned long *mask, unsigned long *bits)
{
unsigned long i, bank, bank_mask, bank_bits;
@@ -156,6 +158,8 @@ static void uniphier_gpio_set_multiple(struct gpio_chip *chip,
uniphier_gpio_bank_write(chip, bank, UNIPHIER_GPIO_PORT_DATA,
bank_mask, bank_bits);
}
+
+ return 0;
}
static int uniphier_gpio_to_irq(struct gpio_chip *chip, unsigned int offset)
@@ -422,7 +426,7 @@ static void uniphier_gpio_remove(struct platform_device *pdev)
irq_domain_remove(priv->domain);
}
-static int __maybe_unused uniphier_gpio_suspend(struct device *dev)
+static int uniphier_gpio_suspend(struct device *dev)
{
struct uniphier_gpio_priv *priv = dev_get_drvdata(dev);
unsigned int nbanks = uniphier_gpio_get_nbanks(priv->chip.ngpio);
@@ -444,7 +448,7 @@ static int __maybe_unused uniphier_gpio_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused uniphier_gpio_resume(struct device *dev)
+static int uniphier_gpio_resume(struct device *dev)
{
struct uniphier_gpio_priv *priv = dev_get_drvdata(dev);
unsigned int nbanks = uniphier_gpio_get_nbanks(priv->chip.ngpio);
@@ -469,8 +473,7 @@ static int __maybe_unused uniphier_gpio_resume(struct device *dev)
}
static const struct dev_pm_ops uniphier_gpio_pm_ops = {
- SET_LATE_SYSTEM_SLEEP_PM_OPS(uniphier_gpio_suspend,
- uniphier_gpio_resume)
+ LATE_SYSTEM_SLEEP_PM_OPS(uniphier_gpio_suspend, uniphier_gpio_resume)
};
static const struct of_device_id uniphier_gpio_match[] = {
@@ -485,7 +488,7 @@ static struct platform_driver uniphier_gpio_driver = {
.driver = {
.name = "uniphier-gpio",
.of_match_table = uniphier_gpio_match,
- .pm = &uniphier_gpio_pm_ops,
+ .pm = pm_sleep_ptr(&uniphier_gpio_pm_ops),
},
};
module_platform_driver(uniphier_gpio_driver);
diff --git a/drivers/gpio/gpio-usbio.c b/drivers/gpio/gpio-usbio.c
new file mode 100644
index 000000000000..34d42c743d5b
--- /dev/null
+++ b/drivers/gpio/gpio-usbio.c
@@ -0,0 +1,248 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2025 Intel Corporation.
+ * Copyright (c) 2025 Red Hat, Inc.
+ */
+
+#include <linux/acpi.h>
+#include <linux/auxiliary_bus.h>
+#include <linux/cleanup.h>
+#include <linux/device.h>
+#include <linux/gpio/driver.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/usb/usbio.h>
+
+struct usbio_gpio_bank {
+ u8 config[USBIO_GPIOSPERBANK];
+ u32 bitmap;
+};
+
+struct usbio_gpio {
+ struct mutex config_mutex; /* Protects banks[x].config */
+ struct usbio_gpio_bank banks[USBIO_MAX_GPIOBANKS];
+ struct gpio_chip gc;
+ struct auxiliary_device *adev;
+};
+
+static const struct acpi_device_id usbio_gpio_acpi_hids[] = {
+ { "INTC1007" }, /* MTL */
+ { "INTC10B2" }, /* ARL */
+ { "INTC10B5" }, /* LNL */
+ { "INTC10D1" }, /* MTL-CVF */
+ { "INTC10E2" }, /* PTL */
+ { }
+};
+
+static void usbio_gpio_get_bank_and_pin(struct gpio_chip *gc, unsigned int offset,
+ struct usbio_gpio_bank **bank_ret,
+ unsigned int *pin_ret)
+{
+ struct usbio_gpio *gpio = gpiochip_get_data(gc);
+ struct device *dev = &gpio->adev->dev;
+ struct usbio_gpio_bank *bank;
+ unsigned int pin;
+
+ bank = &gpio->banks[offset / USBIO_GPIOSPERBANK];
+ pin = offset % USBIO_GPIOSPERBANK;
+ if (~bank->bitmap & BIT(pin)) {
+ /* The FW bitmap sometimes is invalid, warn and continue */
+ dev_warn_once(dev, FW_BUG "GPIO %u is not in FW pins bitmap\n", offset);
+ }
+
+ *bank_ret = bank;
+ *pin_ret = pin;
+}
+
+static int usbio_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
+{
+ struct usbio_gpio_bank *bank;
+ unsigned int pin;
+ u8 cfg;
+
+ usbio_gpio_get_bank_and_pin(gc, offset, &bank, &pin);
+
+ cfg = bank->config[pin] & USBIO_GPIO_PINMOD_MASK;
+
+ return (cfg == USBIO_GPIO_PINMOD_OUTPUT) ?
+ GPIO_LINE_DIRECTION_OUT : GPIO_LINE_DIRECTION_IN;
+}
+
+static int usbio_gpio_get(struct gpio_chip *gc, unsigned int offset)
+{
+ struct usbio_gpio *gpio = gpiochip_get_data(gc);
+ struct usbio_gpio_bank *bank;
+ struct usbio_gpio_rw gbuf;
+ unsigned int pin;
+ int ret;
+
+ usbio_gpio_get_bank_and_pin(gc, offset, &bank, &pin);
+
+ gbuf.bankid = offset / USBIO_GPIOSPERBANK;
+ gbuf.pincount = 1;
+ gbuf.pin = pin;
+
+ ret = usbio_control_msg(gpio->adev, USBIO_PKTTYPE_GPIO, USBIO_GPIOCMD_READ,
+ &gbuf, sizeof(gbuf) - sizeof(gbuf.value),
+ &gbuf, sizeof(gbuf));
+ if (ret != sizeof(gbuf))
+ return (ret < 0) ? ret : -EPROTO;
+
+ return (le32_to_cpu(gbuf.value) >> pin) & 1;
+}
+
+static int usbio_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
+{
+ struct usbio_gpio *gpio = gpiochip_get_data(gc);
+ struct usbio_gpio_bank *bank;
+ struct usbio_gpio_rw gbuf;
+ unsigned int pin;
+
+ usbio_gpio_get_bank_and_pin(gc, offset, &bank, &pin);
+
+ gbuf.bankid = offset / USBIO_GPIOSPERBANK;
+ gbuf.pincount = 1;
+ gbuf.pin = pin;
+ gbuf.value = cpu_to_le32(value << pin);
+
+ return usbio_control_msg(gpio->adev, USBIO_PKTTYPE_GPIO, USBIO_GPIOCMD_WRITE,
+ &gbuf, sizeof(gbuf), NULL, 0);
+}
+
+static int usbio_gpio_update_config(struct gpio_chip *gc, unsigned int offset,
+ u8 mask, u8 value)
+{
+ struct usbio_gpio *gpio = gpiochip_get_data(gc);
+ struct usbio_gpio_bank *bank;
+ struct usbio_gpio_init gbuf;
+ unsigned int pin;
+
+ usbio_gpio_get_bank_and_pin(gc, offset, &bank, &pin);
+
+ guard(mutex)(&gpio->config_mutex);
+
+ bank->config[pin] &= ~mask;
+ bank->config[pin] |= value;
+
+ gbuf.bankid = offset / USBIO_GPIOSPERBANK;
+ gbuf.config = bank->config[pin];
+ gbuf.pincount = 1;
+ gbuf.pin = pin;
+
+ return usbio_control_msg(gpio->adev, USBIO_PKTTYPE_GPIO, USBIO_GPIOCMD_INIT,
+ &gbuf, sizeof(gbuf), NULL, 0);
+}
+
+static int usbio_gpio_direction_input(struct gpio_chip *gc, unsigned int offset)
+{
+ return usbio_gpio_update_config(gc, offset, USBIO_GPIO_PINMOD_MASK,
+ USBIO_GPIO_SET_PINMOD(USBIO_GPIO_PINMOD_INPUT));
+}
+
+static int usbio_gpio_direction_output(struct gpio_chip *gc,
+ unsigned int offset, int value)
+{
+ int ret;
+
+ ret = usbio_gpio_update_config(gc, offset, USBIO_GPIO_PINMOD_MASK,
+ USBIO_GPIO_SET_PINMOD(USBIO_GPIO_PINMOD_OUTPUT));
+ if (ret)
+ return ret;
+
+ return usbio_gpio_set(gc, offset, value);
+}
+
+static int usbio_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
+ unsigned long config)
+{
+ u8 value;
+
+ switch (pinconf_to_config_param(config)) {
+ case PIN_CONFIG_BIAS_PULL_PIN_DEFAULT:
+ value = USBIO_GPIO_SET_PINCFG(USBIO_GPIO_PINCFG_DEFAULT);
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ value = USBIO_GPIO_SET_PINCFG(USBIO_GPIO_PINCFG_PULLUP);
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ value = USBIO_GPIO_SET_PINCFG(USBIO_GPIO_PINCFG_PULLDOWN);
+ break;
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ value = USBIO_GPIO_SET_PINCFG(USBIO_GPIO_PINCFG_PUSHPULL);
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ return usbio_gpio_update_config(gc, offset, USBIO_GPIO_PINCFG_MASK, value);
+}
+
+static int usbio_gpio_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *adev_id)
+{
+ struct usbio_gpio_bank_desc *bank_desc;
+ struct device *dev = &adev->dev;
+ struct usbio_gpio *gpio;
+ int bank, ret;
+
+ bank_desc = dev_get_platdata(dev);
+ if (!bank_desc)
+ return -EINVAL;
+
+ gpio = devm_kzalloc(dev, sizeof(*gpio), GFP_KERNEL);
+ if (!gpio)
+ return -ENOMEM;
+
+ ret = devm_mutex_init(dev, &gpio->config_mutex);
+ if (ret)
+ return ret;
+
+ gpio->adev = adev;
+
+ usbio_acpi_bind(gpio->adev, usbio_gpio_acpi_hids);
+
+ for (bank = 0; bank < USBIO_MAX_GPIOBANKS && bank_desc[bank].bmap; bank++)
+ gpio->banks[bank].bitmap = le32_to_cpu(bank_desc[bank].bmap);
+
+ gpio->gc.label = ACPI_COMPANION(dev) ?
+ acpi_dev_name(ACPI_COMPANION(dev)) : dev_name(dev);
+ gpio->gc.parent = dev;
+ gpio->gc.owner = THIS_MODULE;
+ gpio->gc.get_direction = usbio_gpio_get_direction;
+ gpio->gc.direction_input = usbio_gpio_direction_input;
+ gpio->gc.direction_output = usbio_gpio_direction_output;
+ gpio->gc.get = usbio_gpio_get;
+ gpio->gc.set = usbio_gpio_set;
+ gpio->gc.set_config = usbio_gpio_set_config;
+ gpio->gc.base = -1;
+ gpio->gc.ngpio = bank * USBIO_GPIOSPERBANK;
+ gpio->gc.can_sleep = true;
+
+ ret = devm_gpiochip_add_data(dev, &gpio->gc, gpio);
+ if (ret)
+ return ret;
+
+ if (has_acpi_companion(dev))
+ acpi_dev_clear_dependencies(ACPI_COMPANION(dev));
+
+ return 0;
+}
+
+static const struct auxiliary_device_id usbio_gpio_id_table[] = {
+ { "usbio.usbio-gpio" },
+ { }
+};
+MODULE_DEVICE_TABLE(auxiliary, usbio_gpio_id_table);
+
+static struct auxiliary_driver usbio_gpio_driver = {
+ .name = USBIO_GPIO_CLIENT,
+ .probe = usbio_gpio_probe,
+ .id_table = usbio_gpio_id_table
+};
+module_auxiliary_driver(usbio_gpio_driver);
+
+MODULE_DESCRIPTION("Intel USBIO GPIO driver");
+MODULE_AUTHOR("Israel Cepeda <israel.a.cepeda.lopez@intel.com>");
+MODULE_AUTHOR("Hans de Goede <hansg@kernel.org>");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("USBIO");
diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
index 7de0d5b53d56..aa8586d8a787 100644
--- a/drivers/gpio/gpio-vf610.c
+++ b/drivers/gpio/gpio-vf610.c
@@ -10,6 +10,7 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -28,7 +29,7 @@ struct fsl_gpio_soc_data {
};
struct vf610_gpio_port {
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
void __iomem *base;
void __iomem *gpio_base;
const struct fsl_gpio_soc_data *sdata;
@@ -108,7 +109,7 @@ static void vf610_gpio_irq_handler(struct irq_desc *desc)
for_each_set_bit(pin, &irq_isfr, VF610_GPIO_PER_PORT) {
vf610_gpio_writel(BIT(pin), port->base + PORT_ISFR);
- generic_handle_domain_irq(port->gc.irq.domain, pin);
+ generic_handle_domain_irq(port->chip.gc.irq.domain, pin);
}
chained_irq_exit(chip, desc);
@@ -214,6 +215,7 @@ static void vf610_gpio_disable_clk(void *data)
static int vf610_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct device *dev = &pdev->dev;
struct vf610_gpio_port *port;
struct gpio_chip *gc;
@@ -293,22 +295,27 @@ static int vf610_gpio_probe(struct platform_device *pdev)
return ret;
}
- gc = &port->gc;
- flags = BGPIOF_PINCTRL_BACKEND;
+ gc = &port->chip.gc;
+ flags = GPIO_GENERIC_PINCTRL_BACKEND;
/*
* We only read the output register for current value on output
* lines if the direction register is available so we can switch
* direction.
*/
if (port->sdata->have_paddr)
- flags |= BGPIOF_READ_OUTPUT_REG_SET;
- ret = bgpio_init(gc, dev, 4,
- port->gpio_base + GPIO_PDIR,
- port->gpio_base + GPIO_PDOR,
- NULL,
- port->sdata->have_paddr ? port->gpio_base + GPIO_PDDR : NULL,
- NULL,
- flags);
+ flags |= GPIO_GENERIC_READ_OUTPUT_REG_SET;
+
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = port->gpio_base + GPIO_PDIR,
+ .set = port->gpio_base + GPIO_PDOR,
+ .dirout = port->sdata->have_paddr ?
+ port->gpio_base + GPIO_PDDR : NULL,
+ .flags = flags,
+ };
+
+ ret = gpio_generic_chip_init(&port->chip, &config);
if (ret)
return dev_err_probe(dev, ret, "unable to init generic GPIO\n");
gc->label = dev_name(dev);
diff --git a/drivers/gpio/gpio-viperboard.c b/drivers/gpio/gpio-viperboard.c
index e55d28a8a66f..15e495c109d2 100644
--- a/drivers/gpio/gpio-viperboard.c
+++ b/drivers/gpio/gpio-viperboard.c
@@ -128,45 +128,50 @@ static int vprbrd_gpioa_get(struct gpio_chip *chip,
return answer;
}
-static void vprbrd_gpioa_set(struct gpio_chip *chip,
- unsigned int offset, int value)
+static int vprbrd_gpioa_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
- int ret;
+ int ret = 0;
struct vprbrd_gpio *gpio = gpiochip_get_data(chip);
struct vprbrd *vb = gpio->vb;
struct vprbrd_gpioa_msg *gamsg = (struct vprbrd_gpioa_msg *)vb->buf;
- if (gpio->gpioa_out & (1 << offset)) {
- if (value)
- gpio->gpioa_val |= (1 << offset);
- else
- gpio->gpioa_val &= ~(1 << offset);
-
- mutex_lock(&vb->lock);
-
- gamsg->cmd = VPRBRD_GPIOA_CMD_SETOUT;
- gamsg->clk = 0x00;
- gamsg->offset = offset;
- gamsg->t1 = 0x00;
- gamsg->t2 = 0x00;
- gamsg->invert = 0x00;
- gamsg->pwmlevel = 0x00;
- gamsg->outval = value;
- gamsg->risefall = 0x00;
- gamsg->answer = 0x00;
- gamsg->__fill = 0x00;
-
- ret = usb_control_msg(vb->usb_dev,
- usb_sndctrlpipe(vb->usb_dev, 0),
- VPRBRD_USB_REQUEST_GPIOA, VPRBRD_USB_TYPE_OUT,
- 0x0000, 0x0000, gamsg,
- sizeof(struct vprbrd_gpioa_msg), VPRBRD_USB_TIMEOUT_MS);
-
- mutex_unlock(&vb->lock);
-
- if (ret != sizeof(struct vprbrd_gpioa_msg))
- dev_err(chip->parent, "usb error setting pin value\n");
+ if (!(gpio->gpioa_out & (1 << offset)))
+ return 0;
+
+ if (value)
+ gpio->gpioa_val |= (1 << offset);
+ else
+ gpio->gpioa_val &= ~(1 << offset);
+
+ mutex_lock(&vb->lock);
+
+ gamsg->cmd = VPRBRD_GPIOA_CMD_SETOUT;
+ gamsg->clk = 0x00;
+ gamsg->offset = offset;
+ gamsg->t1 = 0x00;
+ gamsg->t2 = 0x00;
+ gamsg->invert = 0x00;
+ gamsg->pwmlevel = 0x00;
+ gamsg->outval = value;
+ gamsg->risefall = 0x00;
+ gamsg->answer = 0x00;
+ gamsg->__fill = 0x00;
+
+ ret = usb_control_msg(vb->usb_dev, usb_sndctrlpipe(vb->usb_dev, 0),
+ VPRBRD_USB_REQUEST_GPIOA, VPRBRD_USB_TYPE_OUT,
+ 0x0000, 0x0000, gamsg,
+ sizeof(struct vprbrd_gpioa_msg),
+ VPRBRD_USB_TIMEOUT_MS);
+
+ mutex_unlock(&vb->lock);
+
+ if (ret != sizeof(struct vprbrd_gpioa_msg)) {
+ dev_err(chip->parent, "usb error setting pin value\n");
+ return -EREMOTEIO;
}
+
+ return 0;
}
static int vprbrd_gpioa_direction_input(struct gpio_chip *chip,
@@ -304,37 +309,42 @@ static int vprbrd_gpiob_get(struct gpio_chip *chip,
return (gpio->gpiob_val >> offset) & 0x1;
}
-static void vprbrd_gpiob_set(struct gpio_chip *chip,
- unsigned int offset, int value)
+static int vprbrd_gpiob_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
int ret;
struct vprbrd_gpio *gpio = gpiochip_get_data(chip);
struct vprbrd *vb = gpio->vb;
struct vprbrd_gpiob_msg *gbmsg = (struct vprbrd_gpiob_msg *)vb->buf;
- if (gpio->gpiob_out & (1 << offset)) {
- if (value)
- gpio->gpiob_val |= (1 << offset);
- else
- gpio->gpiob_val &= ~(1 << offset);
+ if (!(gpio->gpiob_out & (1 << offset)))
+ return 0;
+
+ if (value)
+ gpio->gpiob_val |= (1 << offset);
+ else
+ gpio->gpiob_val &= ~(1 << offset);
- mutex_lock(&vb->lock);
+ mutex_lock(&vb->lock);
- gbmsg->cmd = VPRBRD_GPIOB_CMD_SETVAL;
- gbmsg->val = cpu_to_be16(value << offset);
- gbmsg->mask = cpu_to_be16(0x0001 << offset);
+ gbmsg->cmd = VPRBRD_GPIOB_CMD_SETVAL;
+ gbmsg->val = cpu_to_be16(value << offset);
+ gbmsg->mask = cpu_to_be16(0x0001 << offset);
- ret = usb_control_msg(vb->usb_dev,
- usb_sndctrlpipe(vb->usb_dev, 0),
- VPRBRD_USB_REQUEST_GPIOB, VPRBRD_USB_TYPE_OUT,
- 0x0000, 0x0000, gbmsg,
- sizeof(struct vprbrd_gpiob_msg), VPRBRD_USB_TIMEOUT_MS);
+ ret = usb_control_msg(vb->usb_dev, usb_sndctrlpipe(vb->usb_dev, 0),
+ VPRBRD_USB_REQUEST_GPIOB, VPRBRD_USB_TYPE_OUT,
+ 0x0000, 0x0000, gbmsg,
+ sizeof(struct vprbrd_gpiob_msg),
+ VPRBRD_USB_TIMEOUT_MS);
- mutex_unlock(&vb->lock);
+ mutex_unlock(&vb->lock);
- if (ret != sizeof(struct vprbrd_gpiob_msg))
- dev_err(chip->parent, "usb error setting pin value\n");
+ if (ret != sizeof(struct vprbrd_gpiob_msg)) {
+ dev_err(chip->parent, "usb error setting pin value\n");
+ return -EREMOTEIO;
}
+
+ return 0;
}
static int vprbrd_gpiob_direction_input(struct gpio_chip *chip,
@@ -368,16 +378,14 @@ static int vprbrd_gpiob_direction_output(struct gpio_chip *chip,
gpio->gpiob_out |= (1 << offset);
mutex_lock(&vb->lock);
-
ret = vprbrd_gpiob_setdir(vb, offset, 1);
- if (ret)
- dev_err(chip->parent, "usb error setting pin to output\n");
-
mutex_unlock(&vb->lock);
+ if (ret) {
+ dev_err(chip->parent, "usb error setting pin to output\n");
+ return ret;
+ }
- vprbrd_gpiob_set(chip, offset, value);
-
- return ret;
+ return vprbrd_gpiob_set(chip, offset, value);
}
/* ----- end of gpio b chip ---------------------------------------------- */
diff --git a/drivers/gpio/gpio-virtio.c b/drivers/gpio/gpio-virtio.c
index ac39da17a29b..17e040991e46 100644
--- a/drivers/gpio/gpio-virtio.c
+++ b/drivers/gpio/gpio-virtio.c
@@ -194,11 +194,12 @@ static int virtio_gpio_get(struct gpio_chip *gc, unsigned int gpio)
return ret ? ret : value;
}
-static void virtio_gpio_set(struct gpio_chip *gc, unsigned int gpio, int value)
+static int virtio_gpio_set(struct gpio_chip *gc, unsigned int gpio, int value)
{
struct virtio_gpio *vgpio = gpiochip_get_data(gc);
- virtio_gpio_req(vgpio, VIRTIO_GPIO_MSG_SET_VALUE, gpio, value, NULL);
+ return virtio_gpio_req(vgpio, VIRTIO_GPIO_MSG_SET_VALUE, gpio, value,
+ NULL);
}
/* Interrupt handling */
@@ -526,7 +527,6 @@ static const char **virtio_gpio_get_names(struct virtio_gpio *vgpio,
static int virtio_gpio_probe(struct virtio_device *vdev)
{
- struct virtio_gpio_config config;
struct device *dev = &vdev->dev;
struct virtio_gpio *vgpio;
struct irq_chip *gpio_irq_chip;
@@ -539,9 +539,11 @@ static int virtio_gpio_probe(struct virtio_device *vdev)
return -ENOMEM;
/* Read configuration */
- virtio_cread_bytes(vdev, 0, &config, sizeof(config));
- gpio_names_size = le32_to_cpu(config.gpio_names_size);
- ngpio = le16_to_cpu(config.ngpio);
+ gpio_names_size =
+ virtio_cread32(vdev, offsetof(struct virtio_gpio_config,
+ gpio_names_size));
+ ngpio = virtio_cread16(vdev, offsetof(struct virtio_gpio_config,
+ ngpio));
if (!ngpio) {
dev_err(dev, "Number of GPIOs can't be zero\n");
return -EINVAL;
diff --git a/drivers/gpio/gpio-virtuser.c b/drivers/gpio/gpio-virtuser.c
index eab6726953b4..37f2ce20f1ae 100644
--- a/drivers/gpio/gpio-virtuser.c
+++ b/drivers/gpio/gpio-virtuser.c
@@ -215,9 +215,7 @@ static int gpio_virtuser_set_array_value(struct gpio_descs *descs,
struct gpio_virtuser_irq_work_context ctx;
if (!atomic)
- return gpiod_set_array_value_cansleep(descs->ndescs,
- descs->desc,
- descs->info, values);
+ return gpiod_multi_set_value_cansleep(descs, values);
gpio_virtuser_init_irq_work_context(&ctx);
ctx.work = IRQ_WORK_INIT_HARD(gpio_virtuser_set_value_array_atomic);
@@ -502,9 +500,7 @@ static int gpio_virtuser_value_set(void *data, u64 val)
if (val > 1)
return -EINVAL;
- gpiod_set_value_cansleep(ld->ad.desc, (int)val);
-
- return 0;
+ return gpiod_set_value_cansleep(ld->ad.desc, (int)val);
}
DEFINE_DEBUGFS_ATTRIBUTE(gpio_virtuser_value_fops,
@@ -545,7 +541,7 @@ static void gpio_virtuser_set_value_atomic(struct irq_work *work)
struct gpio_virtuser_irq_work_context *ctx =
to_gpio_virtuser_irq_work_context(work);
- gpiod_set_value(ctx->desc, ctx->val);
+ ctx->ret = gpiod_set_value(ctx->desc, ctx->val);
complete(&ctx->work_completion);
}
@@ -564,7 +560,7 @@ static int gpio_virtuser_value_atomic_set(void *data, u64 val)
gpio_virtuser_irq_work_queue_sync(&ctx);
- return 0;
+ return ctx.ret;
}
DEFINE_DEBUGFS_ATTRIBUTE(gpio_virtuser_value_atomic_fops,
diff --git a/drivers/gpio/gpio-visconti.c b/drivers/gpio/gpio-visconti.c
index 5bd965c18a46..6d5d829634ad 100644
--- a/drivers/gpio/gpio-visconti.c
+++ b/drivers/gpio/gpio-visconti.c
@@ -10,6 +10,7 @@
#include <linux/bitops.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/module.h>
@@ -32,7 +33,7 @@
struct visconti_gpio {
void __iomem *base;
spinlock_t lock; /* protect gpio register */
- struct gpio_chip gpio_chip;
+ struct gpio_generic_chip chip;
struct device *dev;
};
@@ -158,6 +159,7 @@ static const struct irq_chip visconti_gpio_irq_chip = {
static int visconti_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct device *dev = &pdev->dev;
struct visconti_gpio *priv;
struct gpio_irq_chip *girq;
@@ -189,19 +191,22 @@ static int visconti_gpio_probe(struct platform_device *pdev)
return -ENODEV;
}
- ret = bgpio_init(&priv->gpio_chip, dev, 4,
- priv->base + GPIO_IDATA,
- priv->base + GPIO_OSET,
- priv->base + GPIO_OCLR,
- priv->base + GPIO_DIR,
- NULL,
- 0);
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = priv->base + GPIO_IDATA,
+ .set = priv->base + GPIO_OSET,
+ .clr = priv->base + GPIO_OCLR,
+ .dirout = priv->base + GPIO_DIR,
+ };
+
+ ret = gpio_generic_chip_init(&priv->chip, &config);
if (ret) {
dev_err(dev, "unable to init generic GPIO\n");
return ret;
}
- girq = &priv->gpio_chip.irq;
+ girq = &priv->chip.gc.irq;
gpio_irq_chip_set_chip(girq, &visconti_gpio_irq_chip);
girq->fwnode = dev_fwnode(dev);
girq->parent_domain = parent;
@@ -210,7 +215,7 @@ static int visconti_gpio_probe(struct platform_device *pdev)
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_level_irq;
- return devm_gpiochip_add_data(dev, &priv->gpio_chip, priv);
+ return devm_gpiochip_add_data(dev, &priv->chip.gc, priv);
}
static const struct of_device_id visconti_gpio_of_match[] = {
diff --git a/drivers/gpio/gpio-vx855.c b/drivers/gpio/gpio-vx855.c
index 8fd6c3913d69..84b3a973a503 100644
--- a/drivers/gpio/gpio-vx855.c
+++ b/drivers/gpio/gpio-vx855.c
@@ -127,8 +127,7 @@ static int vx855gpio_get(struct gpio_chip *gpio, unsigned int nr)
return ret;
}
-static void vx855gpio_set(struct gpio_chip *gpio, unsigned int nr,
- int val)
+static int vx855gpio_set(struct gpio_chip *gpio, unsigned int nr, int val)
{
struct vx855_gpio *vg = gpiochip_get_data(gpio);
unsigned long flags;
@@ -136,7 +135,7 @@ static void vx855gpio_set(struct gpio_chip *gpio, unsigned int nr,
/* True GPI cannot be switched to output mode */
if (nr < NR_VX855_GPI)
- return;
+ return -EPERM;
spin_lock_irqsave(&vg->lock, flags);
reg_out = inl(vg->io_gpo);
@@ -153,6 +152,8 @@ static void vx855gpio_set(struct gpio_chip *gpio, unsigned int nr,
}
outl(reg_out, vg->io_gpo);
spin_unlock_irqrestore(&vg->lock, flags);
+
+ return 0;
}
static int vx855gpio_direction_output(struct gpio_chip *gpio,
diff --git a/drivers/gpio/gpio-wcd934x.c b/drivers/gpio/gpio-wcd934x.c
index 2bba27b13947..572b85e77370 100644
--- a/drivers/gpio/gpio-wcd934x.c
+++ b/drivers/gpio/gpio-wcd934x.c
@@ -46,9 +46,12 @@ static int wcd_gpio_direction_output(struct gpio_chip *chip, unsigned int pin,
int val)
{
struct wcd_gpio_data *data = gpiochip_get_data(chip);
+ int ret;
- regmap_update_bits(data->map, WCD_REG_DIR_CTL_OFFSET,
- WCD_PIN_MASK(pin), WCD_PIN_MASK(pin));
+ ret = regmap_update_bits(data->map, WCD_REG_DIR_CTL_OFFSET,
+ WCD_PIN_MASK(pin), WCD_PIN_MASK(pin));
+ if (ret)
+ return ret;
return regmap_update_bits(data->map, WCD_REG_VAL_CTL_OFFSET,
WCD_PIN_MASK(pin),
@@ -65,12 +68,13 @@ static int wcd_gpio_get(struct gpio_chip *chip, unsigned int pin)
return !!(value & WCD_PIN_MASK(pin));
}
-static void wcd_gpio_set(struct gpio_chip *chip, unsigned int pin, int val)
+static int wcd_gpio_set(struct gpio_chip *chip, unsigned int pin, int val)
{
struct wcd_gpio_data *data = gpiochip_get_data(chip);
- regmap_update_bits(data->map, WCD_REG_VAL_CTL_OFFSET,
- WCD_PIN_MASK(pin), val ? WCD_PIN_MASK(pin) : 0);
+ return regmap_update_bits(data->map, WCD_REG_VAL_CTL_OFFSET,
+ WCD_PIN_MASK(pin),
+ val ? WCD_PIN_MASK(pin) : 0);
}
static int wcd_gpio_probe(struct platform_device *pdev)
@@ -99,7 +103,7 @@ static int wcd_gpio_probe(struct platform_device *pdev)
chip->base = -1;
chip->ngpio = WCD934X_NPINS;
chip->label = dev_name(dev);
- chip->can_sleep = false;
+ chip->can_sleep = true;
return devm_gpiochip_add_data(dev, chip, data);
}
diff --git a/drivers/gpio/gpio-wcove.c b/drivers/gpio/gpio-wcove.c
index 1ec24f6f9300..4a5e20e936a9 100644
--- a/drivers/gpio/gpio-wcove.c
+++ b/drivers/gpio/gpio-wcove.c
@@ -200,18 +200,15 @@ static int wcove_gpio_get(struct gpio_chip *chip, unsigned int gpio)
return val & 0x1;
}
-static void wcove_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value)
+static int wcove_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value)
{
struct wcove_gpio *wg = gpiochip_get_data(chip);
int reg = to_reg(gpio, CTRL_OUT);
if (reg < 0)
- return;
+ return 0;
- if (value)
- regmap_set_bits(wg->regmap, reg, 1);
- else
- regmap_clear_bits(wg->regmap, reg, 1);
+ return regmap_assign_bits(wg->regmap, reg, 1, value);
}
static int wcove_gpio_set_config(struct gpio_chip *chip, unsigned int gpio,
diff --git a/drivers/gpio/gpio-winbond.c b/drivers/gpio/gpio-winbond.c
index 4b61d975cc0e..dcfda738fd69 100644
--- a/drivers/gpio/gpio-winbond.c
+++ b/drivers/gpio/gpio-winbond.c
@@ -458,17 +458,19 @@ static int winbond_gpio_direction_out(struct gpio_chip *gc,
return 0;
}
-static void winbond_gpio_set(struct gpio_chip *gc, unsigned int offset,
- int val)
+static int winbond_gpio_set(struct gpio_chip *gc, unsigned int offset,
+ int val)
{
unsigned long *base = gpiochip_get_data(gc);
const struct winbond_gpio_info *info;
+ int ret;
if (!winbond_gpio_get_info(&offset, &info))
- return;
+ return -EACCES;
- if (winbond_sio_enter(*base) != 0)
- return;
+ ret = winbond_sio_enter(*base);
+ if (ret)
+ return ret;
winbond_sio_select_logical(*base, info->dev);
@@ -481,6 +483,8 @@ static void winbond_gpio_set(struct gpio_chip *gc, unsigned int offset,
winbond_sio_reg_bclear(*base, info->datareg, offset);
winbond_sio_leave(*base);
+
+ return 0;
}
static struct gpio_chip winbond_gpio_chip = {
diff --git a/drivers/gpio/gpio-wm831x.c b/drivers/gpio/gpio-wm831x.c
index 61bb83a1e8ae..489479d6f32b 100644
--- a/drivers/gpio/gpio-wm831x.c
+++ b/drivers/gpio/gpio-wm831x.c
@@ -58,13 +58,14 @@ static int wm831x_gpio_get(struct gpio_chip *chip, unsigned offset)
return 0;
}
-static void wm831x_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int wm831x_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct wm831x_gpio *wm831x_gpio = gpiochip_get_data(chip);
struct wm831x *wm831x = wm831x_gpio->wm831x;
- wm831x_set_bits(wm831x, WM831X_GPIO_LEVEL, 1 << offset,
- value << offset);
+ return wm831x_set_bits(wm831x, WM831X_GPIO_LEVEL, 1 << offset,
+ value << offset);
}
static int wm831x_gpio_direction_out(struct gpio_chip *chip,
@@ -85,9 +86,7 @@ static int wm831x_gpio_direction_out(struct gpio_chip *chip,
return ret;
/* Can only set GPIO state once it's in output mode */
- wm831x_gpio_set(chip, offset, value);
-
- return 0;
+ return wm831x_gpio_set(chip, offset, value);
}
static int wm831x_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
@@ -160,7 +159,6 @@ static void wm831x_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
int i, tristated;
for (i = 0; i < chip->ngpio; i++) {
- int gpio = i + chip->base;
int reg;
const char *pull, *powerdomain;
@@ -176,13 +174,13 @@ static void wm831x_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
}
seq_printf(s, " gpio-%-3d (%-20.20s) ",
- gpio, label ?: "Unrequested");
+ i, label ?: "Unrequested");
reg = wm831x_reg_read(wm831x, WM831X_GPIO1_CONTROL + i);
if (reg < 0) {
dev_err(wm831x->dev,
"GPIO control %d read failed: %d\n",
- gpio, reg);
+ i, reg);
seq_putc(s, '\n');
continue;
}
diff --git a/drivers/gpio/gpio-wm8350.c b/drivers/gpio/gpio-wm8350.c
index 2421cf606ed6..46923b23a72e 100644
--- a/drivers/gpio/gpio-wm8350.c
+++ b/drivers/gpio/gpio-wm8350.c
@@ -48,15 +48,16 @@ static int wm8350_gpio_get(struct gpio_chip *chip, unsigned offset)
return 0;
}
-static void wm8350_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int wm8350_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct wm8350_gpio_data *wm8350_gpio = gpiochip_get_data(chip);
struct wm8350 *wm8350 = wm8350_gpio->wm8350;
if (value)
- wm8350_set_bits(wm8350, WM8350_GPIO_LEVEL, 1 << offset);
- else
- wm8350_clear_bits(wm8350, WM8350_GPIO_LEVEL, 1 << offset);
+ return wm8350_set_bits(wm8350, WM8350_GPIO_LEVEL, 1 << offset);
+
+ return wm8350_clear_bits(wm8350, WM8350_GPIO_LEVEL, 1 << offset);
}
static int wm8350_gpio_direction_out(struct gpio_chip *chip,
@@ -72,9 +73,7 @@ static int wm8350_gpio_direction_out(struct gpio_chip *chip,
return ret;
/* Don't have an atomic direction/value setup */
- wm8350_gpio_set(chip, offset, value);
-
- return 0;
+ return wm8350_gpio_set(chip, offset, value);
}
static int wm8350_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
diff --git a/drivers/gpio/gpio-wm8994.c b/drivers/gpio/gpio-wm8994.c
index bf05c9b5882b..a0665cf3ff2f 100644
--- a/drivers/gpio/gpio-wm8994.c
+++ b/drivers/gpio/gpio-wm8994.c
@@ -89,7 +89,8 @@ static int wm8994_gpio_direction_out(struct gpio_chip *chip,
WM8994_GPN_DIR | WM8994_GPN_LVL, value);
}
-static void wm8994_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int wm8994_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct wm8994_gpio *wm8994_gpio = gpiochip_get_data(chip);
struct wm8994 *wm8994 = wm8994_gpio->wm8994;
@@ -97,7 +98,8 @@ static void wm8994_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
if (value)
value = WM8994_GPN_LVL;
- wm8994_set_bits(wm8994, WM8994_GPIO_1 + offset, WM8994_GPN_LVL, value);
+ return wm8994_set_bits(wm8994, WM8994_GPIO_1 + offset, WM8994_GPN_LVL,
+ value);
}
static int wm8994_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
@@ -192,7 +194,6 @@ static void wm8994_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
int i;
for (i = 0; i < chip->ngpio; i++) {
- int gpio = i + chip->base;
int reg;
/* We report the GPIO even if it's not requested since
@@ -206,14 +207,13 @@ static void wm8994_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
continue;
}
- seq_printf(s, " gpio-%-3d (%-20.20s) ", gpio,
+ seq_printf(s, " gpio-%-3d (%-20.20s) ", i,
label ?: "Unrequested");
reg = wm8994_reg_read(wm8994, WM8994_GPIO_1 + i);
if (reg < 0) {
dev_err(wm8994->dev,
- "GPIO control %d read failed: %d\n",
- gpio, reg);
+ "GPIO control %d read failed: %d\n", i, reg);
seq_printf(s, "\n");
continue;
}
diff --git a/drivers/gpio/gpio-xgene-sb.c b/drivers/gpio/gpio-xgene-sb.c
index b51b1fa726bb..661259f026e1 100644
--- a/drivers/gpio/gpio-xgene-sb.c
+++ b/drivers/gpio/gpio-xgene-sb.c
@@ -21,6 +21,7 @@
#include <linux/types.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include "gpiolib-acpi.h"
@@ -40,7 +41,7 @@
/**
* struct xgene_gpio_sb - GPIO-Standby private data structure.
- * @gc: memory-mapped GPIO controllers.
+ * @chip: Generic GPIO chip data
* @regs: GPIO register base offset
* @irq_domain: GPIO interrupt domain
* @irq_start: GPIO pin that start support interrupt
@@ -48,7 +49,7 @@
* @parent_irq_base: Start parent HWIRQ
*/
struct xgene_gpio_sb {
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
void __iomem *regs;
struct irq_domain *irq_domain;
u16 irq_start;
@@ -62,14 +63,15 @@ struct xgene_gpio_sb {
static void xgene_gpio_set_bit(struct gpio_chip *gc,
void __iomem *reg, u32 gpio, int val)
{
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
u32 data;
- data = gc->read_reg(reg);
+ data = gpio_generic_read_reg(chip, reg);
if (val)
data |= GPIO_MASK(gpio);
else
data &= ~GPIO_MASK(gpio);
- gc->write_reg(reg, data);
+ gpio_generic_write_reg(chip, reg, data);
}
static int xgene_gpio_sb_irq_set_type(struct irq_data *d, unsigned int type)
@@ -91,9 +93,9 @@ static int xgene_gpio_sb_irq_set_type(struct irq_data *d, unsigned int type)
break;
}
- xgene_gpio_set_bit(&priv->gc, priv->regs + MPA_GPIO_SEL_LO,
+ xgene_gpio_set_bit(&priv->chip.gc, priv->regs + MPA_GPIO_SEL_LO,
gpio * 2, 1);
- xgene_gpio_set_bit(&priv->gc, priv->regs + MPA_GPIO_INT_LVL,
+ xgene_gpio_set_bit(&priv->chip.gc, priv->regs + MPA_GPIO_INT_LVL,
d->hwirq, lvl_type);
/* Propagate IRQ type setting to parent */
@@ -109,14 +111,14 @@ static void xgene_gpio_sb_irq_mask(struct irq_data *d)
irq_chip_mask_parent(d);
- gpiochip_disable_irq(&priv->gc, d->hwirq);
+ gpiochip_disable_irq(&priv->chip.gc, d->hwirq);
}
static void xgene_gpio_sb_irq_unmask(struct irq_data *d)
{
struct xgene_gpio_sb *priv = irq_data_get_irq_chip_data(d);
- gpiochip_enable_irq(&priv->gc, d->hwirq);
+ gpiochip_enable_irq(&priv->chip.gc, d->hwirq);
irq_chip_unmask_parent(d);
}
@@ -155,15 +157,15 @@ static int xgene_gpio_sb_domain_activate(struct irq_domain *d,
u32 gpio = HWIRQ_TO_GPIO(priv, irq_data->hwirq);
int ret;
- ret = gpiochip_lock_as_irq(&priv->gc, gpio);
+ ret = gpiochip_lock_as_irq(&priv->chip.gc, gpio);
if (ret) {
- dev_err(priv->gc.parent,
+ dev_err(priv->chip.gc.parent,
"Unable to configure XGene GPIO standby pin %d as IRQ\n",
gpio);
return ret;
}
- xgene_gpio_set_bit(&priv->gc, priv->regs + MPA_GPIO_SEL_LO,
+ xgene_gpio_set_bit(&priv->chip.gc, priv->regs + MPA_GPIO_SEL_LO,
gpio * 2, 1);
return 0;
}
@@ -174,8 +176,8 @@ static void xgene_gpio_sb_domain_deactivate(struct irq_domain *d,
struct xgene_gpio_sb *priv = d->host_data;
u32 gpio = HWIRQ_TO_GPIO(priv, irq_data->hwirq);
- gpiochip_unlock_as_irq(&priv->gc, gpio);
- xgene_gpio_set_bit(&priv->gc, priv->regs + MPA_GPIO_SEL_LO,
+ gpiochip_unlock_as_irq(&priv->chip.gc, gpio);
+ xgene_gpio_set_bit(&priv->chip.gc, priv->regs + MPA_GPIO_SEL_LO,
gpio * 2, 0);
}
@@ -237,6 +239,7 @@ static const struct irq_domain_ops xgene_gpio_sb_domain_ops = {
static int xgene_gpio_sb_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct xgene_gpio_sb *priv;
int ret;
void __iomem *regs;
@@ -263,14 +266,19 @@ static int xgene_gpio_sb_probe(struct platform_device *pdev)
return -ENODEV;
}
- ret = bgpio_init(&priv->gc, &pdev->dev, 4,
- regs + MPA_GPIO_IN_ADDR,
- regs + MPA_GPIO_OUT_ADDR, NULL,
- regs + MPA_GPIO_OE_ADDR, NULL, 0);
+ config = (struct gpio_generic_chip_config) {
+ .dev = &pdev->dev,
+ .sz = 4,
+ .dat = regs + MPA_GPIO_IN_ADDR,
+ .set = regs + MPA_GPIO_OUT_ADDR,
+ .dirout = regs + MPA_GPIO_OE_ADDR,
+ };
+
+ ret = gpio_generic_chip_init(&priv->chip, &config);
if (ret)
return ret;
- priv->gc.to_irq = xgene_gpio_sb_to_irq;
+ priv->chip.gc.to_irq = xgene_gpio_sb_to_irq;
/* Retrieve start irq pin, use default if property not found */
priv->irq_start = XGENE_DFLT_IRQ_START_PIN;
@@ -283,12 +291,12 @@ static int xgene_gpio_sb_probe(struct platform_device *pdev)
priv->nirq = val32;
/* Retrieve number gpio, use default if property not found */
- priv->gc.ngpio = XGENE_DFLT_MAX_NGPIO;
+ priv->chip.gc.ngpio = XGENE_DFLT_MAX_NGPIO;
if (!device_property_read_u32(&pdev->dev, "apm,nr-gpios", &val32))
- priv->gc.ngpio = val32;
+ priv->chip.gc.ngpio = val32;
dev_info(&pdev->dev, "Support %d gpios, %d irqs start from pin %d\n",
- priv->gc.ngpio, priv->nirq, priv->irq_start);
+ priv->chip.gc.ngpio, priv->nirq, priv->irq_start);
platform_set_drvdata(pdev, priv);
@@ -298,9 +306,9 @@ static int xgene_gpio_sb_probe(struct platform_device *pdev)
if (!priv->irq_domain)
return -ENODEV;
- priv->gc.irq.domain = priv->irq_domain;
+ priv->chip.gc.irq.domain = priv->irq_domain;
- ret = devm_gpiochip_add_data(&pdev->dev, &priv->gc, priv);
+ ret = devm_gpiochip_add_data(&pdev->dev, &priv->chip.gc, priv);
if (ret) {
dev_err(&pdev->dev,
"failed to register X-Gene GPIO Standby driver\n");
@@ -311,7 +319,7 @@ static int xgene_gpio_sb_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "X-Gene GPIO Standby driver registered\n");
/* Register interrupt handlers for GPIO signaled ACPI Events */
- acpi_gpiochip_request_interrupts(&priv->gc);
+ acpi_gpiochip_request_interrupts(&priv->chip.gc);
return ret;
}
@@ -320,7 +328,7 @@ static void xgene_gpio_sb_remove(struct platform_device *pdev)
{
struct xgene_gpio_sb *priv = platform_get_drvdata(pdev);
- acpi_gpiochip_free_interrupts(&priv->gc);
+ acpi_gpiochip_free_interrupts(&priv->chip.gc);
irq_domain_remove(priv->irq_domain);
}
diff --git a/drivers/gpio/gpio-xgene.c b/drivers/gpio/gpio-xgene.c
index fb4b0c67aeef..809668449dbe 100644
--- a/drivers/gpio/gpio-xgene.c
+++ b/drivers/gpio/gpio-xgene.c
@@ -62,7 +62,7 @@ static void __xgene_gpio_set(struct gpio_chip *gc, unsigned int offset, int val)
iowrite32(setval, chip->base + bank_offset);
}
-static void xgene_gpio_set(struct gpio_chip *gc, unsigned int offset, int val)
+static int xgene_gpio_set(struct gpio_chip *gc, unsigned int offset, int val)
{
struct xgene_gpio *chip = gpiochip_get_data(gc);
unsigned long flags;
@@ -70,6 +70,8 @@ static void xgene_gpio_set(struct gpio_chip *gc, unsigned int offset, int val)
spin_lock_irqsave(&chip->lock, flags);
__xgene_gpio_set(gc, offset, val);
spin_unlock_irqrestore(&chip->lock, flags);
+
+ return 0;
}
static int xgene_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
@@ -128,7 +130,7 @@ static int xgene_gpio_dir_out(struct gpio_chip *gc,
return 0;
}
-static __maybe_unused int xgene_gpio_suspend(struct device *dev)
+static int xgene_gpio_suspend(struct device *dev)
{
struct xgene_gpio *gpio = dev_get_drvdata(dev);
unsigned long bank_offset;
@@ -141,7 +143,7 @@ static __maybe_unused int xgene_gpio_suspend(struct device *dev)
return 0;
}
-static __maybe_unused int xgene_gpio_resume(struct device *dev)
+static int xgene_gpio_resume(struct device *dev)
{
struct xgene_gpio *gpio = dev_get_drvdata(dev);
unsigned long bank_offset;
@@ -154,7 +156,7 @@ static __maybe_unused int xgene_gpio_resume(struct device *dev)
return 0;
}
-static SIMPLE_DEV_PM_OPS(xgene_gpio_pm, xgene_gpio_suspend, xgene_gpio_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(xgene_gpio_pm, xgene_gpio_suspend, xgene_gpio_resume);
static int xgene_gpio_probe(struct platform_device *pdev)
{
@@ -202,7 +204,7 @@ static struct platform_driver xgene_gpio_driver = {
.name = "xgene-gpio",
.of_match_table = xgene_gpio_of_match,
.acpi_match_table = ACPI_PTR(xgene_gpio_acpi_match),
- .pm = &xgene_gpio_pm,
+ .pm = pm_sleep_ptr(&xgene_gpio_pm),
},
.probe = xgene_gpio_probe,
};
diff --git a/drivers/gpio/gpio-xgs-iproc.c b/drivers/gpio/gpio-xgs-iproc.c
index 93544e98ccbd..77eb29dcc217 100644
--- a/drivers/gpio/gpio-xgs-iproc.c
+++ b/drivers/gpio/gpio-xgs-iproc.c
@@ -3,11 +3,12 @@
* Copyright (C) 2017 Broadcom
*/
-#include <linux/gpio/driver.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -28,7 +29,7 @@
#define IPROC_GPIO_CCA_INT_EDGE 0x24
struct iproc_gpio_chip {
- struct gpio_chip gc;
+ struct gpio_generic_chip gen_gc;
spinlock_t lock;
struct device *dev;
void __iomem *base;
@@ -38,7 +39,7 @@ struct iproc_gpio_chip {
static inline struct iproc_gpio_chip *
to_iproc_gpio(struct gpio_chip *gc)
{
- return container_of(gc, struct iproc_gpio_chip, gc);
+ return container_of(to_gpio_generic_chip(gc), struct iproc_gpio_chip, gen_gc);
}
static void iproc_gpio_irq_ack(struct irq_data *d)
@@ -213,6 +214,7 @@ static const struct irq_chip iproc_gpio_irq_chip = {
static int iproc_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct device *dev = &pdev->dev;
struct device_node *dn = pdev->dev.of_node;
struct iproc_gpio_chip *chip;
@@ -231,21 +233,23 @@ static int iproc_gpio_probe(struct platform_device *pdev)
if (IS_ERR(chip->base))
return PTR_ERR(chip->base);
- ret = bgpio_init(&chip->gc, dev, 4,
- chip->base + IPROC_GPIO_CCA_DIN,
- chip->base + IPROC_GPIO_CCA_DOUT,
- NULL,
- chip->base + IPROC_GPIO_CCA_OUT_EN,
- NULL,
- 0);
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = chip->base + IPROC_GPIO_CCA_DIN,
+ .set = chip->base + IPROC_GPIO_CCA_DOUT,
+ .dirout = chip->base + IPROC_GPIO_CCA_OUT_EN,
+ };
+
+ ret = gpio_generic_chip_init(&chip->gen_gc, &config);
if (ret) {
dev_err(dev, "unable to init GPIO chip\n");
return ret;
}
- chip->gc.label = dev_name(dev);
+ chip->gen_gc.gc.label = dev_name(dev);
if (!of_property_read_u32(dn, "ngpios", &num_gpios))
- chip->gc.ngpio = num_gpios;
+ chip->gen_gc.gc.ngpio = num_gpios;
irq = platform_get_irq(pdev, 0);
if (irq > 0) {
@@ -266,13 +270,13 @@ static int iproc_gpio_probe(struct platform_device *pdev)
* a flow-handler because the irq is shared.
*/
ret = devm_request_irq(dev, irq, iproc_gpio_irq_handler,
- IRQF_SHARED, chip->gc.label, &chip->gc);
+ IRQF_SHARED, chip->gen_gc.gc.label, &chip->gen_gc.gc);
if (ret) {
dev_err(dev, "Fail to request IRQ%d: %d\n", irq, ret);
return ret;
}
- girq = &chip->gc.irq;
+ girq = &chip->gen_gc.gc.irq;
gpio_irq_chip_set_chip(girq, &iproc_gpio_irq_chip);
/* This will let us handle the parent IRQ in the driver */
girq->parent_handler = NULL;
@@ -282,7 +286,7 @@ static int iproc_gpio_probe(struct platform_device *pdev)
girq->handler = handle_simple_irq;
}
- ret = devm_gpiochip_add_data(dev, &chip->gc, chip);
+ ret = devm_gpiochip_add_data(dev, &chip->gen_gc.gc, chip);
if (ret) {
dev_err(dev, "unable to add GPIO chip\n");
return ret;
diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c
index c58a7e1349b4..be4b4d730547 100644
--- a/drivers/gpio/gpio-xilinx.c
+++ b/drivers/gpio/gpio-xilinx.c
@@ -148,7 +148,7 @@ static int xgpio_get(struct gpio_chip *gc, unsigned int gpio)
* This function writes the specified value in to the specified signal of the
* GPIO device.
*/
-static void xgpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
+static int xgpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
{
unsigned long flags;
struct xgpio_instance *chip = gpiochip_get_data(gc);
@@ -162,6 +162,8 @@ static void xgpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
xgpio_write_ch(chip, XGPIO_DATA_OFFSET, bit, chip->state);
raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
+
+ return 0;
}
/**
@@ -173,8 +175,8 @@ static void xgpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
* This function writes the specified values into the specified signals of the
* GPIO devices.
*/
-static void xgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
- unsigned long *bits)
+static int xgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
+ unsigned long *bits)
{
DECLARE_BITMAP(hw_mask, 64);
DECLARE_BITMAP(hw_bits, 64);
@@ -194,6 +196,8 @@ static void xgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
bitmap_copy(chip->state, state, 64);
raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
+
+ return 0;
}
/**
@@ -282,7 +286,7 @@ static void xgpio_free(struct gpio_chip *chip, unsigned int offset)
pm_runtime_put(chip->parent);
}
-static int __maybe_unused xgpio_suspend(struct device *dev)
+static int xgpio_suspend(struct device *dev)
{
struct xgpio_instance *gpio = dev_get_drvdata(dev);
struct irq_data *data = irq_get_irq_data(gpio->irq);
@@ -323,7 +327,7 @@ static void xgpio_irq_ack(struct irq_data *irq_data)
{
}
-static int __maybe_unused xgpio_resume(struct device *dev)
+static int xgpio_resume(struct device *dev)
{
struct xgpio_instance *gpio = dev_get_drvdata(dev);
struct irq_data *data = irq_get_irq_data(gpio->irq);
@@ -339,7 +343,7 @@ static int __maybe_unused xgpio_resume(struct device *dev)
return 0;
}
-static int __maybe_unused xgpio_runtime_suspend(struct device *dev)
+static int xgpio_runtime_suspend(struct device *dev)
{
struct xgpio_instance *gpio = dev_get_drvdata(dev);
@@ -348,7 +352,7 @@ static int __maybe_unused xgpio_runtime_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused xgpio_runtime_resume(struct device *dev)
+static int xgpio_runtime_resume(struct device *dev)
{
struct xgpio_instance *gpio = dev_get_drvdata(dev);
@@ -356,9 +360,8 @@ static int __maybe_unused xgpio_runtime_resume(struct device *dev)
}
static const struct dev_pm_ops xgpio_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(xgpio_suspend, xgpio_resume)
- SET_RUNTIME_PM_OPS(xgpio_runtime_suspend,
- xgpio_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(xgpio_suspend, xgpio_resume)
+ RUNTIME_PM_OPS(xgpio_runtime_suspend, xgpio_runtime_resume, NULL)
};
/**
@@ -678,7 +681,7 @@ static struct platform_driver xgpio_plat_driver = {
.driver = {
.name = "gpio-xilinx",
.of_match_table = xgpio_of_match,
- .pm = &xgpio_dev_pm_ops,
+ .pm = pm_ptr(&xgpio_dev_pm_ops),
},
};
diff --git a/drivers/gpio/gpio-xlp.c b/drivers/gpio/gpio-xlp.c
index b4b52213bcd9..aede6324387f 100644
--- a/drivers/gpio/gpio-xlp.c
+++ b/drivers/gpio/gpio-xlp.c
@@ -206,7 +206,6 @@ static int xlp_gpio_dir_output(struct gpio_chip *gc, unsigned gpio, int state)
{
struct xlp_gpio_priv *priv = gpiochip_get_data(gc);
- BUG_ON(gpio >= gc->ngpio);
xlp_gpio_set_reg(priv->gpio_out_en, gpio, 0x1);
return 0;
@@ -216,7 +215,6 @@ static int xlp_gpio_dir_input(struct gpio_chip *gc, unsigned gpio)
{
struct xlp_gpio_priv *priv = gpiochip_get_data(gc);
- BUG_ON(gpio >= gc->ngpio);
xlp_gpio_set_reg(priv->gpio_out_en, gpio, 0x0);
return 0;
@@ -226,16 +224,16 @@ static int xlp_gpio_get(struct gpio_chip *gc, unsigned gpio)
{
struct xlp_gpio_priv *priv = gpiochip_get_data(gc);
- BUG_ON(gpio >= gc->ngpio);
return xlp_gpio_get_reg(priv->gpio_paddrv, gpio);
}
-static void xlp_gpio_set(struct gpio_chip *gc, unsigned gpio, int state)
+static int xlp_gpio_set(struct gpio_chip *gc, unsigned int gpio, int state)
{
struct xlp_gpio_priv *priv = gpiochip_get_data(gc);
- BUG_ON(gpio >= gc->ngpio);
xlp_gpio_set_reg(priv->gpio_paddrv, gpio, state);
+
+ return 0;
}
static int xlp_gpio_probe(struct platform_device *pdev)
diff --git a/drivers/gpio/gpio-xra1403.c b/drivers/gpio/gpio-xra1403.c
index 842cf875bb92..7f3c98f9f902 100644
--- a/drivers/gpio/gpio-xra1403.c
+++ b/drivers/gpio/gpio-xra1403.c
@@ -102,16 +102,13 @@ static int xra1403_get(struct gpio_chip *chip, unsigned int offset)
return !!(val & BIT(offset % 8));
}
-static void xra1403_set(struct gpio_chip *chip, unsigned int offset, int value)
+static int xra1403_set(struct gpio_chip *chip, unsigned int offset, int value)
{
- int ret;
struct xra1403 *xra = gpiochip_get_data(chip);
- ret = regmap_update_bits(xra->regmap, to_reg(XRA_OCR, offset),
- BIT(offset % 8), value ? BIT(offset % 8) : 0);
- if (ret)
- dev_err(chip->parent, "Failed to set pin: %d, ret: %d\n",
- offset, ret);
+ return regmap_update_bits(xra->regmap, to_reg(XRA_OCR, offset),
+ BIT(offset % 8),
+ value ? BIT(offset % 8) : 0);
}
#ifdef CONFIG_DEBUG_FS
@@ -138,8 +135,7 @@ static void xra1403_dbg_show(struct seq_file *s, struct gpio_chip *chip)
gcr = value[XRA_GCR + 1] << 8 | value[XRA_GCR];
gsr = value[XRA_GSR + 1] << 8 | value[XRA_GSR];
for_each_requested_gpio(chip, i, label) {
- seq_printf(s, " gpio-%-3d (%-12s) %s %s\n",
- chip->base + i, label,
+ seq_printf(s, " gpio-%-3d (%-12s) %s %s\n", i, label,
(gcr & BIT(i)) ? "in" : "out",
str_hi_lo(gsr & BIT(i)));
}
diff --git a/drivers/gpio/gpio-xtensa.c b/drivers/gpio/gpio-xtensa.c
index c8af34a6368f..4418947a10e5 100644
--- a/drivers/gpio/gpio-xtensa.c
+++ b/drivers/gpio/gpio-xtensa.c
@@ -86,12 +86,6 @@ static int xtensa_impwire_get_value(struct gpio_chip *gc, unsigned offset)
return !!(impwire & BIT(offset));
}
-static void xtensa_impwire_set_value(struct gpio_chip *gc, unsigned offset,
- int value)
-{
- BUG(); /* output only; should never be called */
-}
-
static int xtensa_expstate_get_direction(struct gpio_chip *gc, unsigned offset)
{
return GPIO_LINE_DIRECTION_OUT; /* output only */
@@ -109,7 +103,7 @@ static int xtensa_expstate_get_value(struct gpio_chip *gc, unsigned offset)
return !!(expstate & BIT(offset));
}
-static void xtensa_expstate_set_value(struct gpio_chip *gc, unsigned offset,
+static int xtensa_expstate_set_value(struct gpio_chip *gc, unsigned int offset,
int value)
{
unsigned long flags, saved_cpenable;
@@ -120,6 +114,8 @@ static void xtensa_expstate_set_value(struct gpio_chip *gc, unsigned offset,
__asm__ __volatile__("wrmsk_expstate %0, %1"
:: "a" (val), "a" (mask));
disable_cp(flags, saved_cpenable);
+
+ return 0;
}
static struct gpio_chip impwire_chip = {
@@ -128,7 +124,6 @@ static struct gpio_chip impwire_chip = {
.ngpio = 32,
.get_direction = xtensa_impwire_get_direction,
.get = xtensa_impwire_get_value,
- .set = xtensa_impwire_set_value,
};
static struct gpio_chip expstate_chip = {
diff --git a/drivers/gpio/gpio-zevio.c b/drivers/gpio/gpio-zevio.c
index d7230fd83f5d..29375bea2289 100644
--- a/drivers/gpio/gpio-zevio.c
+++ b/drivers/gpio/gpio-zevio.c
@@ -91,7 +91,7 @@ static int zevio_gpio_get(struct gpio_chip *chip, unsigned pin)
return (val >> ZEVIO_GPIO_BIT(pin)) & 0x1;
}
-static void zevio_gpio_set(struct gpio_chip *chip, unsigned pin, int value)
+static int zevio_gpio_set(struct gpio_chip *chip, unsigned int pin, int value)
{
struct zevio_gpio *controller = gpiochip_get_data(chip);
u32 val;
@@ -105,6 +105,8 @@ static void zevio_gpio_set(struct gpio_chip *chip, unsigned pin, int value)
zevio_gpio_port_set(controller, pin, ZEVIO_GPIO_OUTPUT, val);
spin_unlock(&controller->lock);
+
+ return 0;
}
static int zevio_gpio_direction_input(struct gpio_chip *chip, unsigned pin)
diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c
index 3dae63f3ea21..97780c57ab56 100644
--- a/drivers/gpio/gpio-zynq.c
+++ b/drivers/gpio/gpio-zynq.c
@@ -265,8 +265,8 @@ static int zynq_gpio_get_value(struct gpio_chip *chip, unsigned int pin)
* upper 16 bits) based on the given pin number and sets the state of a
* gpio pin to the specified value. The state is either 0 or non-zero.
*/
-static void zynq_gpio_set_value(struct gpio_chip *chip, unsigned int pin,
- int state)
+static int zynq_gpio_set_value(struct gpio_chip *chip, unsigned int pin,
+ int state)
{
unsigned int reg_offset, bank_num, bank_pin_num;
struct zynq_gpio *gpio = gpiochip_get_data(chip);
@@ -290,6 +290,8 @@ static void zynq_gpio_set_value(struct gpio_chip *chip, unsigned int pin,
((state << bank_pin_num) | ZYNQ_GPIO_UPPER_MASK);
writel_relaxed(state, gpio->base_addr + reg_offset);
+
+ return 0;
}
/**
@@ -733,7 +735,7 @@ static void zynq_gpio_restore_context(struct zynq_gpio *gpio)
}
}
-static int __maybe_unused zynq_gpio_suspend(struct device *dev)
+static int zynq_gpio_suspend(struct device *dev)
{
struct zynq_gpio *gpio = dev_get_drvdata(dev);
struct irq_data *data = irq_get_irq_data(gpio->irq);
@@ -754,7 +756,7 @@ static int __maybe_unused zynq_gpio_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused zynq_gpio_resume(struct device *dev)
+static int zynq_gpio_resume(struct device *dev)
{
struct zynq_gpio *gpio = dev_get_drvdata(dev);
struct irq_data *data = irq_get_irq_data(gpio->irq);
@@ -777,7 +779,7 @@ static int __maybe_unused zynq_gpio_resume(struct device *dev)
return 0;
}
-static int __maybe_unused zynq_gpio_runtime_suspend(struct device *dev)
+static int zynq_gpio_runtime_suspend(struct device *dev)
{
struct zynq_gpio *gpio = dev_get_drvdata(dev);
@@ -786,7 +788,7 @@ static int __maybe_unused zynq_gpio_runtime_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused zynq_gpio_runtime_resume(struct device *dev)
+static int zynq_gpio_runtime_resume(struct device *dev)
{
struct zynq_gpio *gpio = dev_get_drvdata(dev);
@@ -812,9 +814,8 @@ static void zynq_gpio_free(struct gpio_chip *chip, unsigned int offset)
}
static const struct dev_pm_ops zynq_gpio_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(zynq_gpio_suspend, zynq_gpio_resume)
- SET_RUNTIME_PM_OPS(zynq_gpio_runtime_suspend,
- zynq_gpio_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(zynq_gpio_suspend, zynq_gpio_resume)
+ RUNTIME_PM_OPS(zynq_gpio_runtime_suspend, zynq_gpio_runtime_resume, NULL)
};
static const struct zynq_platform_data versal_gpio_def = {
@@ -1020,7 +1021,7 @@ static void zynq_gpio_remove(struct platform_device *pdev)
static struct platform_driver zynq_gpio_driver = {
.driver = {
.name = DRIVER_NAME,
- .pm = &zynq_gpio_dev_pm_ops,
+ .pm = pm_ptr(&zynq_gpio_dev_pm_ops),
.of_match_table = zynq_gpio_of_match,
},
.probe = zynq_gpio_probe,
diff --git a/drivers/gpio/gpio-zynqmp-modepin.c b/drivers/gpio/gpio-zynqmp-modepin.c
index 2f3c9ebfa78d..5e651482e985 100644
--- a/drivers/gpio/gpio-zynqmp-modepin.c
+++ b/drivers/gpio/gpio-zynqmp-modepin.c
@@ -57,8 +57,8 @@ static int modepin_gpio_get_value(struct gpio_chip *chip, unsigned int pin)
*
* Return: None.
*/
-static void modepin_gpio_set_value(struct gpio_chip *chip, unsigned int pin,
- int state)
+static int modepin_gpio_set_value(struct gpio_chip *chip, unsigned int pin,
+ int state)
{
u32 bootpin_val = 0;
int ret;
@@ -77,6 +77,8 @@ static void modepin_gpio_set_value(struct gpio_chip *chip, unsigned int pin,
ret = zynqmp_pm_bootmode_write(bootpin_val);
if (ret)
pr_err("modepin: set value error %d for pin %d\n", ret, pin);
+
+ return ret;
}
/**
@@ -102,7 +104,7 @@ static int modepin_gpio_dir_in(struct gpio_chip *chip, unsigned int pin)
static int modepin_gpio_dir_out(struct gpio_chip *chip, unsigned int pin,
int state)
{
- return 0;
+ return modepin_gpio_set_value(chip, pin, state);
}
/**
diff --git a/drivers/gpio/gpiolib-acpi-core.c b/drivers/gpio/gpiolib-acpi-core.c
index 12b24a717e43..83dd227dbbec 100644
--- a/drivers/gpio/gpiolib-acpi-core.c
+++ b/drivers/gpio/gpiolib-acpi-core.c
@@ -291,6 +291,19 @@ acpi_gpio_to_gpiod_flags(const struct acpi_resource_gpio *agpio, int polarity)
return GPIOD_ASIS;
}
+static void acpi_gpio_set_debounce_timeout(struct gpio_desc *desc,
+ unsigned int acpi_debounce)
+{
+ int ret;
+
+ /* ACPI uses hundredths of milliseconds units */
+ acpi_debounce *= 10;
+ ret = gpio_set_debounce_timeout(desc, acpi_debounce);
+ if (ret)
+ gpiod_warn(desc, "Failed to set debounce-timeout %u: %d\n",
+ acpi_debounce, ret);
+}
+
static struct gpio_desc *acpi_request_own_gpiod(struct gpio_chip *chip,
struct acpi_resource_gpio *agpio,
unsigned int index,
@@ -300,18 +313,12 @@ static struct gpio_desc *acpi_request_own_gpiod(struct gpio_chip *chip,
enum gpiod_flags flags = acpi_gpio_to_gpiod_flags(agpio, polarity);
unsigned int pin = agpio->pin_table[index];
struct gpio_desc *desc;
- int ret;
desc = gpiochip_request_own_desc(chip, pin, label, polarity, flags);
if (IS_ERR(desc))
return desc;
- /* ACPI uses hundredths of milliseconds units */
- ret = gpio_set_debounce_timeout(desc, agpio->debounce_timeout * 10);
- if (ret)
- dev_warn(chip->parent,
- "Failed to set debounce-timeout for pin 0x%04X, err %d\n",
- pin, ret);
+ acpi_gpio_set_debounce_timeout(desc, agpio->debounce_timeout);
return desc;
}
@@ -375,8 +382,8 @@ static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares,
desc = acpi_request_own_gpiod(chip, agpio, 0, "ACPI:Event");
if (IS_ERR(desc)) {
dev_err(chip->parent,
- "Failed to request GPIO for pin 0x%04X, err %ld\n",
- pin, PTR_ERR(desc));
+ "Failed to request GPIO for pin 0x%04X, err %pe\n",
+ pin, desc);
return AE_OK;
}
@@ -942,7 +949,7 @@ struct gpio_desc *acpi_find_gpio(struct fwnode_handle *fwnode,
{
struct acpi_device *adev = to_acpi_device_node(fwnode);
bool can_fallback = acpi_can_fallback_to_crs(adev, con_id);
- struct acpi_gpio_info info;
+ struct acpi_gpio_info info = {};
struct gpio_desc *desc;
desc = __acpi_find_gpio(fwnode, con_id, idx, can_fallback, &info);
@@ -957,6 +964,9 @@ struct gpio_desc *acpi_find_gpio(struct fwnode_handle *fwnode,
acpi_gpio_update_gpiod_flags(dflags, &info);
acpi_gpio_update_gpiod_lookup_flags(lookupflags, &info);
+
+ acpi_gpio_set_debounce_timeout(desc, info.debounce);
+
return desc;
}
@@ -992,7 +1002,7 @@ int acpi_dev_gpio_irq_wake_get_by(struct acpi_device *adev, const char *con_id,
int ret;
for (i = 0, idx = 0; idx <= index; i++) {
- struct acpi_gpio_info info;
+ struct acpi_gpio_info info = {};
struct gpio_desc *desc;
/* Ignore -EPROBE_DEFER, it only matters if idx matches */
@@ -1089,7 +1099,7 @@ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address,
return AE_BAD_PARAMETER;
}
- length = min_t(u16, agpio->pin_table_length, pin_index + bits);
+ length = min(agpio->pin_table_length, pin_index + bits);
for (i = pin_index; i < length; ++i) {
unsigned int pin = agpio->pin_table[i];
struct acpi_gpio_connection *conn;
diff --git a/drivers/gpio/gpiolib-acpi-quirks.c b/drivers/gpio/gpiolib-acpi-quirks.c
index 219667315b2c..7b95d1b03361 100644
--- a/drivers/gpio/gpiolib-acpi-quirks.c
+++ b/drivers/gpio/gpiolib-acpi-quirks.c
@@ -319,6 +319,18 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = {
},
{
/*
+ * Same as G1619-04. New model.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GPD"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "G1619-05"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_wake = "PNP0C50:00@8",
+ },
+ },
+ {
+ /*
* Spurious wakeups from GPIO 11
* Found in BIOS 1.04
* https://gitlab.freedesktop.org/drm/amd/-/issues/3954
@@ -331,6 +343,33 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = {
.ignore_interrupt = "AMDI0030:00@11",
},
},
+ {
+ /*
+ * Wakeup only works when keyboard backlight is turned off
+ * https://gitlab.freedesktop.org/drm/amd/-/issues/4169
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "Acer Nitro V 15"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_interrupt = "AMDI0030:00@8",
+ },
+ },
+ {
+ /*
+ * Spurious wakeups from TP_ATTN# pin
+ * Found in BIOS 5.35
+ * https://gitlab.freedesktop.org/drm/amd/-/issues/4482
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "ProArt PX13"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_wake = "ASCP1A00:00@8",
+ },
+ },
{} /* Terminating entry */
};
diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c
index e6a289fa0f8f..3735c9fe1502 100644
--- a/drivers/gpio/gpiolib-cdev.c
+++ b/drivers/gpio/gpiolib-cdev.c
@@ -144,17 +144,17 @@ static void linehandle_flags_to_desc_flags(u32 lflags, unsigned long *flagsp)
{
unsigned long flags = READ_ONCE(*flagsp);
- assign_bit(FLAG_ACTIVE_LOW, &flags,
+ assign_bit(GPIOD_FLAG_ACTIVE_LOW, &flags,
lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW);
- assign_bit(FLAG_OPEN_DRAIN, &flags,
+ assign_bit(GPIOD_FLAG_OPEN_DRAIN, &flags,
lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN);
- assign_bit(FLAG_OPEN_SOURCE, &flags,
+ assign_bit(GPIOD_FLAG_OPEN_SOURCE, &flags,
lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE);
- assign_bit(FLAG_PULL_UP, &flags,
+ assign_bit(GPIOD_FLAG_PULL_UP, &flags,
lflags & GPIOHANDLE_REQUEST_BIAS_PULL_UP);
- assign_bit(FLAG_PULL_DOWN, &flags,
+ assign_bit(GPIOD_FLAG_PULL_DOWN, &flags,
lflags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN);
- assign_bit(FLAG_BIAS_DISABLE, &flags,
+ assign_bit(GPIOD_FLAG_BIAS_DISABLE, &flags,
lflags & GPIOHANDLE_REQUEST_BIAS_DISABLE);
WRITE_ONCE(*flagsp, flags);
@@ -238,7 +238,7 @@ static long linehandle_ioctl(struct file *file, unsigned int cmd,
* All line descriptors were created at once with the same
* flags so just check if the first one is really output.
*/
- if (!test_bit(FLAG_IS_OUT, &lh->descs[0]->flags))
+ if (!test_bit(GPIOD_FLAG_IS_OUT, &lh->descs[0]->flags))
return -EPERM;
if (copy_from_user(&ghd, ip, sizeof(ghd)))
@@ -298,12 +298,13 @@ static const struct file_operations linehandle_fileops = {
#endif
};
+DEFINE_FREE(linehandle_free, struct linehandle_state *, if (!IS_ERR_OR_NULL(_T)) linehandle_free(_T))
+
static int linehandle_create(struct gpio_device *gdev, void __user *ip)
{
struct gpiohandle_request handlereq;
- struct linehandle_state *lh;
- struct file *file;
- int fd, i, ret;
+ struct linehandle_state *lh __free(linehandle_free) = NULL;
+ int i, ret;
u32 lflags;
if (copy_from_user(&handlereq, ip, sizeof(handlereq)))
@@ -327,10 +328,8 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
lh->label = kstrndup(handlereq.consumer_label,
sizeof(handlereq.consumer_label) - 1,
GFP_KERNEL);
- if (!lh->label) {
- ret = -ENOMEM;
- goto out_free_lh;
- }
+ if (!lh->label)
+ return -ENOMEM;
}
lh->num_descs = handlereq.lines;
@@ -340,20 +339,18 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
u32 offset = handlereq.lineoffsets[i];
struct gpio_desc *desc = gpio_device_get_desc(gdev, offset);
- if (IS_ERR(desc)) {
- ret = PTR_ERR(desc);
- goto out_free_lh;
- }
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
ret = gpiod_request_user(desc, lh->label);
if (ret)
- goto out_free_lh;
+ return ret;
lh->descs[i] = desc;
linehandle_flags_to_desc_flags(handlereq.flags, &desc->flags);
ret = gpiod_set_transitory(desc, false);
if (ret < 0)
- goto out_free_lh;
+ return ret;
/*
* Lines have to be requested explicitly for input
@@ -364,11 +361,11 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
ret = gpiod_direction_output_nonotify(desc, val);
if (ret)
- goto out_free_lh;
+ return ret;
} else if (lflags & GPIOHANDLE_REQUEST_INPUT) {
ret = gpiod_direction_input_nonotify(desc);
if (ret)
- goto out_free_lh;
+ return ret;
}
gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_REQUESTED);
@@ -377,44 +374,23 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
offset);
}
- fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
- if (fd < 0) {
- ret = fd;
- goto out_free_lh;
- }
-
- file = anon_inode_getfile("gpio-linehandle",
- &linehandle_fileops,
- lh,
- O_RDONLY | O_CLOEXEC);
- if (IS_ERR(file)) {
- ret = PTR_ERR(file);
- goto out_put_unused_fd;
- }
+ FD_PREPARE(fdf, O_RDONLY | O_CLOEXEC,
+ anon_inode_getfile("gpio-linehandle", &linehandle_fileops,
+ lh, O_RDONLY | O_CLOEXEC));
+ if (fdf.err)
+ return fdf.err;
+ retain_and_null_ptr(lh);
- handlereq.fd = fd;
- if (copy_to_user(ip, &handlereq, sizeof(handlereq))) {
- /*
- * fput() will trigger the release() callback, so do not go onto
- * the regular error cleanup path here.
- */
- fput(file);
- put_unused_fd(fd);
+ handlereq.fd = fd_prepare_fd(fdf);
+ if (copy_to_user(ip, &handlereq, sizeof(handlereq)))
return -EFAULT;
- }
- fd_install(fd, file);
+ fd_publish(fdf);
dev_dbg(&gdev->dev, "registered chardev handle for %d lines\n",
lh->num_descs);
return 0;
-
-out_put_unused_fd:
- put_unused_fd(fd);
-out_free_lh:
- linehandle_free(lh);
- return ret;
}
#endif /* CONFIG_GPIO_CDEV_V1 */
@@ -599,10 +575,10 @@ static void linereq_put_event(struct linereq *lr,
static u64 line_event_timestamp(struct line *line)
{
- if (test_bit(FLAG_EVENT_CLOCK_REALTIME, &line->desc->flags))
+ if (test_bit(GPIOD_FLAG_EVENT_CLOCK_REALTIME, &line->desc->flags))
return ktime_get_real_ns();
else if (IS_ENABLED(CONFIG_HTE) &&
- test_bit(FLAG_EVENT_CLOCK_HTE, &line->desc->flags))
+ test_bit(GPIOD_FLAG_EVENT_CLOCK_HTE, &line->desc->flags))
return line->timestamp_ns;
return ktime_get_ns();
@@ -676,7 +652,7 @@ static enum hte_return process_hw_ts_thread(void *p)
}
le.line_seqno = line->line_seqno;
le.seqno = (lr->num_lines == 1) ? le.line_seqno : line->req_seqno;
- le.offset = gpio_chip_hwgpio(line->desc);
+ le.offset = gpiod_hwgpio(line->desc);
linereq_put_event(lr, &le);
@@ -700,7 +676,7 @@ static enum hte_return process_hw_ts(struct hte_ts_data *ts, void *p)
if (READ_ONCE(line->sw_debounced)) {
line->total_discard_seq++;
line->last_seqno = ts->seq;
- mod_delayed_work(system_wq, &line->work,
+ mod_delayed_work(system_percpu_wq, &line->work,
usecs_to_jiffies(READ_ONCE(line->desc->debounce_period_us)));
} else {
if (unlikely(ts->seq < line->line_seqno))
@@ -725,11 +701,11 @@ static int hte_edge_setup(struct line *line, u64 eflags)
struct hte_ts_desc *hdesc = &line->hdesc;
if (eflags & GPIO_V2_LINE_FLAG_EDGE_RISING)
- flags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
+ flags |= test_bit(GPIOD_FLAG_ACTIVE_LOW, &line->desc->flags) ?
HTE_FALLING_EDGE_TS :
HTE_RISING_EDGE_TS;
if (eflags & GPIO_V2_LINE_FLAG_EDGE_FALLING)
- flags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
+ flags |= test_bit(GPIOD_FLAG_ACTIVE_LOW, &line->desc->flags) ?
HTE_RISING_EDGE_TS :
HTE_FALLING_EDGE_TS;
@@ -793,7 +769,7 @@ static irqreturn_t edge_irq_thread(int irq, void *p)
line->line_seqno++;
le.line_seqno = line->line_seqno;
le.seqno = (lr->num_lines == 1) ? le.line_seqno : line->req_seqno;
- le.offset = gpio_chip_hwgpio(line->desc);
+ le.offset = gpiod_hwgpio(line->desc);
linereq_put_event(lr, &le);
@@ -831,7 +807,7 @@ static bool debounced_value(struct line *line)
*/
value = READ_ONCE(line->level);
- if (test_bit(FLAG_ACTIVE_LOW, &line->desc->flags))
+ if (test_bit(GPIOD_FLAG_ACTIVE_LOW, &line->desc->flags))
value = !value;
return value;
@@ -841,7 +817,7 @@ static irqreturn_t debounce_irq_handler(int irq, void *p)
{
struct line *line = p;
- mod_delayed_work(system_wq, &line->work,
+ mod_delayed_work(system_percpu_wq, &line->work,
usecs_to_jiffies(READ_ONCE(line->desc->debounce_period_us)));
return IRQ_HANDLED;
@@ -891,7 +867,7 @@ static void debounce_work_func(struct work_struct *work)
lr = line->req;
le.timestamp_ns = line_event_timestamp(line);
- le.offset = gpio_chip_hwgpio(line->desc);
+ le.offset = gpiod_hwgpio(line->desc);
#ifdef CONFIG_HTE
if (edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE) {
/* discard events except the last one */
@@ -939,7 +915,7 @@ static int debounce_setup(struct line *line, unsigned int debounce_period_us)
return level;
if (!(IS_ENABLED(CONFIG_HTE) &&
- test_bit(FLAG_EVENT_CLOCK_HTE, &line->desc->flags))) {
+ test_bit(GPIOD_FLAG_EVENT_CLOCK_HTE, &line->desc->flags))) {
irq = gpiod_to_irq(line->desc);
if (irq < 0)
return -ENXIO;
@@ -1061,10 +1037,10 @@ static int edge_detector_setup(struct line *line,
return -ENXIO;
if (eflags & GPIO_V2_LINE_FLAG_EDGE_RISING)
- irqflags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
+ irqflags |= test_bit(GPIOD_FLAG_ACTIVE_LOW, &line->desc->flags) ?
IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
if (eflags & GPIO_V2_LINE_FLAG_EDGE_FALLING)
- irqflags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
+ irqflags |= test_bit(GPIOD_FLAG_ACTIVE_LOW, &line->desc->flags) ?
IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
irqflags |= IRQF_ONESHOT;
@@ -1237,34 +1213,34 @@ static void gpio_v2_line_config_flags_to_desc_flags(u64 lflags,
{
unsigned long flags = READ_ONCE(*flagsp);
- assign_bit(FLAG_ACTIVE_LOW, &flags,
+ assign_bit(GPIOD_FLAG_ACTIVE_LOW, &flags,
lflags & GPIO_V2_LINE_FLAG_ACTIVE_LOW);
if (lflags & GPIO_V2_LINE_FLAG_OUTPUT)
- set_bit(FLAG_IS_OUT, &flags);
+ set_bit(GPIOD_FLAG_IS_OUT, &flags);
else if (lflags & GPIO_V2_LINE_FLAG_INPUT)
- clear_bit(FLAG_IS_OUT, &flags);
+ clear_bit(GPIOD_FLAG_IS_OUT, &flags);
- assign_bit(FLAG_EDGE_RISING, &flags,
+ assign_bit(GPIOD_FLAG_EDGE_RISING, &flags,
lflags & GPIO_V2_LINE_FLAG_EDGE_RISING);
- assign_bit(FLAG_EDGE_FALLING, &flags,
+ assign_bit(GPIOD_FLAG_EDGE_FALLING, &flags,
lflags & GPIO_V2_LINE_FLAG_EDGE_FALLING);
- assign_bit(FLAG_OPEN_DRAIN, &flags,
+ assign_bit(GPIOD_FLAG_OPEN_DRAIN, &flags,
lflags & GPIO_V2_LINE_FLAG_OPEN_DRAIN);
- assign_bit(FLAG_OPEN_SOURCE, &flags,
+ assign_bit(GPIOD_FLAG_OPEN_SOURCE, &flags,
lflags & GPIO_V2_LINE_FLAG_OPEN_SOURCE);
- assign_bit(FLAG_PULL_UP, &flags,
+ assign_bit(GPIOD_FLAG_PULL_UP, &flags,
lflags & GPIO_V2_LINE_FLAG_BIAS_PULL_UP);
- assign_bit(FLAG_PULL_DOWN, &flags,
+ assign_bit(GPIOD_FLAG_PULL_DOWN, &flags,
lflags & GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN);
- assign_bit(FLAG_BIAS_DISABLE, &flags,
+ assign_bit(GPIOD_FLAG_BIAS_DISABLE, &flags,
lflags & GPIO_V2_LINE_FLAG_BIAS_DISABLED);
- assign_bit(FLAG_EVENT_CLOCK_REALTIME, &flags,
+ assign_bit(GPIOD_FLAG_EVENT_CLOCK_REALTIME, &flags,
lflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME);
- assign_bit(FLAG_EVENT_CLOCK_HTE, &flags,
+ assign_bit(GPIOD_FLAG_EVENT_CLOCK_HTE, &flags,
lflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE);
WRITE_ONCE(*flagsp, flags);
@@ -1591,7 +1567,7 @@ static void linereq_show_fdinfo(struct seq_file *out, struct file *file)
for (i = 0; i < lr->num_lines; i++)
seq_printf(out, "gpio-line:\t%d\n",
- gpio_chip_hwgpio(lr->lines[i].desc));
+ gpiod_hwgpio(lr->lines[i].desc));
}
#endif
@@ -2115,10 +2091,10 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
}
if (eflags & GPIOEVENT_REQUEST_RISING_EDGE)
- irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
+ irqflags |= test_bit(GPIOD_FLAG_ACTIVE_LOW, &desc->flags) ?
IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
if (eflags & GPIOEVENT_REQUEST_FALLING_EDGE)
- irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
+ irqflags |= test_bit(GPIOD_FLAG_ACTIVE_LOW, &desc->flags) ?
IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
irqflags |= IRQF_ONESHOT;
@@ -2244,7 +2220,7 @@ static void gpio_desc_to_lineinfo(struct gpio_desc *desc,
return;
memset(info, 0, sizeof(*info));
- info->offset = gpio_chip_hwgpio(desc);
+ info->offset = gpiod_hwgpio(desc);
if (desc->name)
strscpy(info->name, desc->name, sizeof(info->name));
@@ -2253,7 +2229,7 @@ static void gpio_desc_to_lineinfo(struct gpio_desc *desc,
scoped_guard(srcu, &desc->gdev->desc_srcu) {
label = gpiod_get_label(desc);
- if (label && test_bit(FLAG_REQUESTED, &dflags))
+ if (label && test_bit(GPIOD_FLAG_REQUESTED, &dflags))
strscpy(info->consumer, label,
sizeof(info->consumer));
}
@@ -2270,10 +2246,10 @@ static void gpio_desc_to_lineinfo(struct gpio_desc *desc,
* The definitive test that a line is available to userspace is to
* request it.
*/
- if (test_bit(FLAG_REQUESTED, &dflags) ||
- test_bit(FLAG_IS_HOGGED, &dflags) ||
- test_bit(FLAG_EXPORT, &dflags) ||
- test_bit(FLAG_SYSFS, &dflags) ||
+ if (test_bit(GPIOD_FLAG_REQUESTED, &dflags) ||
+ test_bit(GPIOD_FLAG_IS_HOGGED, &dflags) ||
+ test_bit(GPIOD_FLAG_EXPORT, &dflags) ||
+ test_bit(GPIOD_FLAG_SYSFS, &dflags) ||
!gpiochip_line_is_valid(guard.gc, info->offset)) {
info->flags |= GPIO_V2_LINE_FLAG_USED;
} else if (!atomic) {
@@ -2281,34 +2257,34 @@ static void gpio_desc_to_lineinfo(struct gpio_desc *desc,
info->flags |= GPIO_V2_LINE_FLAG_USED;
}
- if (test_bit(FLAG_IS_OUT, &dflags))
+ if (test_bit(GPIOD_FLAG_IS_OUT, &dflags))
info->flags |= GPIO_V2_LINE_FLAG_OUTPUT;
else
info->flags |= GPIO_V2_LINE_FLAG_INPUT;
- if (test_bit(FLAG_ACTIVE_LOW, &dflags))
+ if (test_bit(GPIOD_FLAG_ACTIVE_LOW, &dflags))
info->flags |= GPIO_V2_LINE_FLAG_ACTIVE_LOW;
- if (test_bit(FLAG_OPEN_DRAIN, &dflags))
+ if (test_bit(GPIOD_FLAG_OPEN_DRAIN, &dflags))
info->flags |= GPIO_V2_LINE_FLAG_OPEN_DRAIN;
- if (test_bit(FLAG_OPEN_SOURCE, &dflags))
+ if (test_bit(GPIOD_FLAG_OPEN_SOURCE, &dflags))
info->flags |= GPIO_V2_LINE_FLAG_OPEN_SOURCE;
- if (test_bit(FLAG_BIAS_DISABLE, &dflags))
+ if (test_bit(GPIOD_FLAG_BIAS_DISABLE, &dflags))
info->flags |= GPIO_V2_LINE_FLAG_BIAS_DISABLED;
- if (test_bit(FLAG_PULL_DOWN, &dflags))
+ if (test_bit(GPIOD_FLAG_PULL_DOWN, &dflags))
info->flags |= GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN;
- if (test_bit(FLAG_PULL_UP, &dflags))
+ if (test_bit(GPIOD_FLAG_PULL_UP, &dflags))
info->flags |= GPIO_V2_LINE_FLAG_BIAS_PULL_UP;
- if (test_bit(FLAG_EDGE_RISING, &dflags))
+ if (test_bit(GPIOD_FLAG_EDGE_RISING, &dflags))
info->flags |= GPIO_V2_LINE_FLAG_EDGE_RISING;
- if (test_bit(FLAG_EDGE_FALLING, &dflags))
+ if (test_bit(GPIOD_FLAG_EDGE_FALLING, &dflags))
info->flags |= GPIO_V2_LINE_FLAG_EDGE_FALLING;
- if (test_bit(FLAG_EVENT_CLOCK_REALTIME, &dflags))
+ if (test_bit(GPIOD_FLAG_EVENT_CLOCK_REALTIME, &dflags))
info->flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME;
- else if (test_bit(FLAG_EVENT_CLOCK_HTE, &dflags))
+ else if (test_bit(GPIOD_FLAG_EVENT_CLOCK_HTE, &dflags))
info->flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE;
debounce_period_us = READ_ONCE(desc->debounce_period_us);
@@ -2548,8 +2524,15 @@ static int lineinfo_changed_notify(struct notifier_block *nb,
container_of(nb, struct gpio_chardev_data, lineinfo_changed_nb);
struct lineinfo_changed_ctx *ctx;
struct gpio_desc *desc = data;
+ struct file *fp;
+
+ if (!test_bit(gpiod_hwgpio(desc), cdev->watched_lines))
+ return NOTIFY_DONE;
- if (!test_bit(gpio_chip_hwgpio(desc), cdev->watched_lines))
+ /* Keep the file descriptor alive for the duration of the notification. */
+ fp = get_file_active(&cdev->fp);
+ if (!fp)
+ /* Chardev file descriptor was or is being released. */
return NOTIFY_DONE;
/*
@@ -2575,8 +2558,6 @@ static int lineinfo_changed_notify(struct notifier_block *nb,
/* Keep the GPIO device alive until we emit the event. */
ctx->gdev = gpio_device_get(desc->gdev);
ctx->cdev = cdev;
- /* Keep the file descriptor alive too. */
- get_file(ctx->cdev->fp);
INIT_WORK(&ctx->work, lineinfo_changed_func);
queue_work(ctx->gdev->line_state_wq, &ctx->work);
@@ -2823,7 +2804,7 @@ int gpiolib_cdev_register(struct gpio_device *gdev, dev_t devt)
if (!gc)
return -ENODEV;
- chip_dbg(gc, "added GPIO chardev (%d:%d)\n", MAJOR(devt), gdev->id);
+ gpiochip_dbg(gc, "added GPIO chardev (%d:%d)\n", MAJOR(devt), gdev->id);
return 0;
}
diff --git a/drivers/gpio/gpiolib-devres.c b/drivers/gpio/gpiolib-devres.c
index 4d5f83b17624..72422c5db364 100644
--- a/drivers/gpio/gpiolib-devres.c
+++ b/drivers/gpio/gpiolib-devres.c
@@ -319,7 +319,7 @@ EXPORT_SYMBOL_GPL(devm_gpiod_unhinge);
*/
void devm_gpiod_put_array(struct device *dev, struct gpio_descs *descs)
{
- devm_remove_action(dev, devm_gpiod_release_array, descs);
+ devm_release_action(dev, devm_gpiod_release_array, descs);
}
EXPORT_SYMBOL_GPL(devm_gpiod_put_array);
diff --git a/drivers/gpio/gpiolib-legacy.c b/drivers/gpio/gpiolib-legacy.c
index aeae6df8bec9..ef3f2ef30cf2 100644
--- a/drivers/gpio/gpiolib-legacy.c
+++ b/drivers/gpio/gpiolib-legacy.c
@@ -34,30 +34,20 @@ EXPORT_SYMBOL_GPL(gpio_free);
*/
int gpio_request_one(unsigned gpio, unsigned long flags, const char *label)
{
- struct gpio_desc *desc;
int err;
- /* Compatibility: assume unavailable "valid" GPIOs will appear later */
- desc = gpio_to_desc(gpio);
- if (!desc)
- return -EPROBE_DEFER;
-
- err = gpiod_request(desc, label);
+ err = gpio_request(gpio, label);
if (err)
return err;
if (flags & GPIOF_IN)
- err = gpiod_direction_input(desc);
+ err = gpio_direction_input(gpio);
else
- err = gpiod_direction_output_raw(desc, !!(flags & GPIOF_OUT_INIT_HIGH));
+ err = gpio_direction_output(gpio, !!(flags & GPIOF_OUT_INIT_HIGH));
if (err)
- goto free_gpio;
-
- return 0;
+ gpio_free(gpio);
- free_gpio:
- gpiod_free(desc);
return err;
}
EXPORT_SYMBOL_GPL(gpio_request_one);
@@ -78,50 +68,10 @@ int gpio_request(unsigned gpio, const char *label)
}
EXPORT_SYMBOL_GPL(gpio_request);
-static void devm_gpio_release(struct device *dev, void *res)
-{
- unsigned *gpio = res;
-
- gpio_free(*gpio);
-}
-
-/**
- * devm_gpio_request - request a GPIO for a managed device
- * @dev: device to request the GPIO for
- * @gpio: GPIO to allocate
- * @label: the name of the requested GPIO
- *
- * Except for the extra @dev argument, this function takes the
- * same arguments and performs the same function as gpio_request().
- * GPIOs requested with this function will be automatically freed
- * on driver detach.
- *
- * **DEPRECATED** This function is deprecated and must not be used in new code.
- *
- * Returns:
- * 0 on success, or negative errno on failure.
- */
-int devm_gpio_request(struct device *dev, unsigned gpio, const char *label)
+static void devm_gpio_release(void *gpio)
{
- unsigned *dr;
- int rc;
-
- dr = devres_alloc(devm_gpio_release, sizeof(unsigned), GFP_KERNEL);
- if (!dr)
- return -ENOMEM;
-
- rc = gpio_request(gpio, label);
- if (rc) {
- devres_free(dr);
- return rc;
- }
-
- *dr = gpio;
- devres_add(dev, dr);
-
- return 0;
+ gpio_free((unsigned)(unsigned long)gpio);
}
-EXPORT_SYMBOL_GPL(devm_gpio_request);
/**
* devm_gpio_request_one - request a single GPIO with initial setup
@@ -138,22 +88,22 @@ EXPORT_SYMBOL_GPL(devm_gpio_request);
int devm_gpio_request_one(struct device *dev, unsigned gpio,
unsigned long flags, const char *label)
{
- unsigned *dr;
int rc;
- dr = devres_alloc(devm_gpio_release, sizeof(unsigned), GFP_KERNEL);
- if (!dr)
- return -ENOMEM;
+ rc = gpio_request(gpio, label);
+ if (rc)
+ return rc;
+
+ if (flags & GPIOF_IN)
+ rc = gpio_direction_input(gpio);
+ else
+ rc = gpio_direction_output(gpio, !!(flags & GPIOF_OUT_INIT_HIGH));
- rc = gpio_request_one(gpio, flags, label);
if (rc) {
- devres_free(dr);
+ gpio_free(gpio);
return rc;
}
- *dr = gpio;
- devres_add(dev, dr);
-
- return 0;
+ return devm_add_action_or_reset(dev, devm_gpio_release, (void *)(unsigned long)gpio);
}
EXPORT_SYMBOL_GPL(devm_gpio_request_one);
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 73ba73b31cb1..8657379e9165 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -708,7 +708,7 @@ struct gpio_desc *of_find_gpio(struct device_node *np, const char *con_id,
unsigned int idx, unsigned long *flags)
{
char propname[32]; /* 32 is max size of property name */
- enum of_gpio_flags of_flags;
+ enum of_gpio_flags of_flags = 0;
const of_find_gpio_quirk *q;
struct gpio_desc *desc;
@@ -878,7 +878,7 @@ static void of_gpiochip_remove_hog(struct gpio_chip *chip,
{
struct gpio_desc *desc;
- for_each_gpio_desc_with_flag(chip, desc, FLAG_IS_HOGGED)
+ for_each_gpio_desc_with_flag(chip, desc, GPIOD_FLAG_IS_HOGGED)
if (READ_ONCE(desc->hog) == hog)
gpiochip_free_own_desc(desc);
}
@@ -1031,85 +1031,6 @@ static int of_gpio_threecell_xlate(struct gpio_chip *gc,
return gpiospec->args[1];
}
-#if IS_ENABLED(CONFIG_OF_GPIO_MM_GPIOCHIP)
-#include <linux/gpio/legacy-of-mm-gpiochip.h>
-/**
- * of_mm_gpiochip_add_data - Add memory mapped GPIO chip (bank)
- * @np: device node of the GPIO chip
- * @mm_gc: pointer to the of_mm_gpio_chip allocated structure
- * @data: driver data to store in the struct gpio_chip
- *
- * To use this function you should allocate and fill mm_gc with:
- *
- * 1) In the gpio_chip structure:
- * - all the callbacks
- * - of_gpio_n_cells
- * - of_xlate callback (optional)
- *
- * 3) In the of_mm_gpio_chip structure:
- * - save_regs callback (optional)
- *
- * If succeeded, this function will map bank's memory and will
- * do all necessary work for you. Then you'll able to use .regs
- * to manage GPIOs from the callbacks.
- *
- * Returns:
- * 0 on success, or negative errno on failure.
- */
-int of_mm_gpiochip_add_data(struct device_node *np,
- struct of_mm_gpio_chip *mm_gc,
- void *data)
-{
- int ret = -ENOMEM;
- struct gpio_chip *gc = &mm_gc->gc;
-
- gc->label = kasprintf(GFP_KERNEL, "%pOF", np);
- if (!gc->label)
- goto err0;
-
- mm_gc->regs = of_iomap(np, 0);
- if (!mm_gc->regs)
- goto err1;
-
- gc->base = -1;
-
- if (mm_gc->save_regs)
- mm_gc->save_regs(mm_gc);
-
- fwnode_handle_put(mm_gc->gc.fwnode);
- mm_gc->gc.fwnode = fwnode_handle_get(of_fwnode_handle(np));
-
- ret = gpiochip_add_data(gc, data);
- if (ret)
- goto err2;
-
- return 0;
-err2:
- of_node_put(np);
- iounmap(mm_gc->regs);
-err1:
- kfree(gc->label);
-err0:
- pr_err("%pOF: GPIO chip registration failed with status %d\n", np, ret);
- return ret;
-}
-EXPORT_SYMBOL_GPL(of_mm_gpiochip_add_data);
-
-/**
- * of_mm_gpiochip_remove - Remove memory mapped GPIO chip (bank)
- * @mm_gc: pointer to the of_mm_gpio_chip allocated structure
- */
-void of_mm_gpiochip_remove(struct of_mm_gpio_chip *mm_gc)
-{
- struct gpio_chip *gc = &mm_gc->gc;
-
- gpiochip_remove(gc);
- iounmap(mm_gc->regs);
- kfree(gc->label);
-}
-EXPORT_SYMBOL_GPL(of_mm_gpiochip_remove);
-#endif
-
#ifdef CONFIG_PINCTRL
static int of_gpiochip_add_pin_range(struct gpio_chip *chip)
{
diff --git a/drivers/gpio/gpiolib-of.h b/drivers/gpio/gpiolib-of.h
index 3eebfac290c5..2257f7a498a1 100644
--- a/drivers/gpio/gpiolib-of.h
+++ b/drivers/gpio/gpiolib-of.h
@@ -8,7 +8,7 @@
#include <linux/notifier.h>
-struct device;
+struct device_node;
struct fwnode_handle;
struct gpio_chip;
diff --git a/drivers/gpio/gpiolib-shared.c b/drivers/gpio/gpiolib-shared.c
new file mode 100644
index 000000000000..8bdd107b1ad1
--- /dev/null
+++ b/drivers/gpio/gpiolib-shared.c
@@ -0,0 +1,656 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2025 Linaro Ltd.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/auxiliary_bus.h>
+#include <linux/cleanup.h>
+#include <linux/device.h>
+#include <linux/fwnode.h>
+#include <linux/gpio/consumer.h>
+#include <linux/gpio/machine.h>
+#include <linux/idr.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/lockdep.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/overflow.h>
+#include <linux/printk.h>
+#include <linux/property.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#include "gpiolib.h"
+#include "gpiolib-shared.h"
+
+/* Represents a single reference to a GPIO pin. */
+struct gpio_shared_ref {
+ struct list_head list;
+ /* Firmware node associated with this GPIO's consumer. */
+ struct fwnode_handle *fwnode;
+ /* GPIO flags this consumer uses for the request. */
+ enum gpiod_flags flags;
+ char *con_id;
+ int dev_id;
+ struct auxiliary_device adev;
+ struct gpiod_lookup_table *lookup;
+};
+
+/* Represents a single GPIO pin. */
+struct gpio_shared_entry {
+ struct list_head list;
+ /* Firmware node associated with the GPIO controller. */
+ struct fwnode_handle *fwnode;
+ /* Hardware offset of the GPIO within its chip. */
+ unsigned int offset;
+ /* Index in the property value array. */
+ size_t index;
+ struct mutex lock;
+ struct gpio_shared_desc *shared_desc;
+ struct kref ref;
+ struct list_head refs;
+};
+
+static LIST_HEAD(gpio_shared_list);
+static DEFINE_MUTEX(gpio_shared_lock);
+static DEFINE_IDA(gpio_shared_ida);
+
+#if IS_ENABLED(CONFIG_OF)
+static struct gpio_shared_entry *
+gpio_shared_find_entry(struct fwnode_handle *controller_node,
+ unsigned int offset)
+{
+ struct gpio_shared_entry *entry;
+
+ list_for_each_entry(entry, &gpio_shared_list, list) {
+ if (entry->fwnode == controller_node && entry->offset == offset)
+ return entry;
+ }
+
+ return NULL;
+}
+
+/* Handle all special nodes that we should ignore. */
+static bool gpio_shared_of_node_ignore(struct device_node *node)
+{
+ /*
+ * __symbols__ is a special, internal node and should not be considered
+ * when scanning for shared GPIOs.
+ */
+ if (of_node_name_eq(node, "__symbols__"))
+ return true;
+
+ /*
+ * GPIO hogs have a "gpios" property which is not a phandle and can't
+ * possibly refer to a shared GPIO.
+ */
+ if (of_property_present(node, "gpio-hog"))
+ return true;
+
+ return false;
+}
+
+static int gpio_shared_of_traverse(struct device_node *curr)
+{
+ struct gpio_shared_entry *entry;
+ size_t con_id_len, suffix_len;
+ struct fwnode_handle *fwnode;
+ struct of_phandle_args args;
+ struct property *prop;
+ unsigned int offset;
+ const char *suffix;
+ int ret, count, i;
+
+ if (gpio_shared_of_node_ignore(curr))
+ return 0;
+
+ for_each_property_of_node(curr, prop) {
+ /*
+ * The standard name for a GPIO property is "foo-gpios"
+ * or "foo-gpio". Some bindings also use "gpios" or "gpio".
+ * There are some legacy device-trees which have a different
+ * naming convention and for which we have rename quirks in
+ * place in gpiolib-of.c. I don't think any of them require
+ * support for shared GPIOs so for now let's just ignore
+ * them. We can always just export the quirk list and
+ * iterate over it here.
+ */
+ if (!strends(prop->name, "-gpios") &&
+ !strends(prop->name, "-gpio") &&
+ strcmp(prop->name, "gpios") != 0 &&
+ strcmp(prop->name, "gpio") != 0)
+ continue;
+
+ count = of_count_phandle_with_args(curr, prop->name,
+ "#gpio-cells");
+ if (count <= 0)
+ continue;
+
+ for (i = 0; i < count; i++) {
+ struct device_node *np __free(device_node) = NULL;
+
+ ret = of_parse_phandle_with_args(curr, prop->name,
+ "#gpio-cells", i,
+ &args);
+ if (ret)
+ continue;
+
+ np = args.np;
+
+ if (!of_property_present(np, "gpio-controller"))
+ continue;
+
+ /*
+ * We support 1, 2 and 3 cell GPIO bindings in the
+ * kernel currently. There's only one old MIPS dts that
+ * has a one-cell binding but there's no associated
+ * consumer so it may as well be an error. There don't
+ * seem to be any 3-cell users of non-exclusive GPIOs,
+ * so we can skip this as well. Let's occupy ourselves
+ * with the predominant 2-cell binding with the first
+ * cell indicating the hardware offset of the GPIO and
+ * the second defining the GPIO flags of the request.
+ */
+ if (args.args_count != 2)
+ continue;
+
+ fwnode = of_fwnode_handle(args.np);
+ offset = args.args[0];
+
+ entry = gpio_shared_find_entry(fwnode, offset);
+ if (!entry) {
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ entry->fwnode = fwnode_handle_get(fwnode);
+ entry->offset = offset;
+ entry->index = count;
+ INIT_LIST_HEAD(&entry->refs);
+ mutex_init(&entry->lock);
+
+ list_add_tail(&entry->list, &gpio_shared_list);
+ }
+
+ struct gpio_shared_ref *ref __free(kfree) =
+ kzalloc(sizeof(*ref), GFP_KERNEL);
+ if (!ref)
+ return -ENOMEM;
+
+ ref->fwnode = fwnode_handle_get(of_fwnode_handle(curr));
+ ref->flags = args.args[1];
+
+ if (strends(prop->name, "gpios"))
+ suffix = "-gpios";
+ else if (strends(prop->name, "gpio"))
+ suffix = "-gpio";
+ else
+ suffix = NULL;
+ if (!suffix)
+ continue;
+
+ /* We only set con_id if there's actually one. */
+ if (strcmp(prop->name, "gpios") && strcmp(prop->name, "gpio")) {
+ ref->con_id = kstrdup(prop->name, GFP_KERNEL);
+ if (!ref->con_id)
+ return -ENOMEM;
+
+ con_id_len = strlen(ref->con_id);
+ suffix_len = strlen(suffix);
+
+ ref->con_id[con_id_len - suffix_len] = '\0';
+ }
+
+ ref->dev_id = ida_alloc(&gpio_shared_ida, GFP_KERNEL);
+ if (ref->dev_id < 0) {
+ kfree(ref->con_id);
+ return -ENOMEM;
+ }
+
+ if (!list_empty(&entry->refs))
+ pr_debug("GPIO %u at %s is shared by multiple firmware nodes\n",
+ entry->offset, fwnode_get_name(entry->fwnode));
+
+ list_add_tail(&no_free_ptr(ref)->list, &entry->refs);
+ }
+ }
+
+ for_each_child_of_node_scoped(curr, child) {
+ ret = gpio_shared_of_traverse(child);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int gpio_shared_of_scan(void)
+{
+ if (of_root)
+ return gpio_shared_of_traverse(of_root);
+
+ return 0;
+}
+#else
+static int gpio_shared_of_scan(void)
+{
+ return 0;
+}
+#endif /* CONFIG_OF */
+
+static void gpio_shared_adev_release(struct device *dev)
+{
+
+}
+
+static int gpio_shared_make_adev(struct gpio_device *gdev,
+ struct gpio_shared_entry *entry,
+ struct gpio_shared_ref *ref)
+{
+ struct auxiliary_device *adev = &ref->adev;
+ int ret;
+
+ lockdep_assert_held(&gpio_shared_lock);
+
+ memset(adev, 0, sizeof(*adev));
+
+ adev->id = ref->dev_id;
+ adev->name = "proxy";
+ adev->dev.parent = gdev->dev.parent;
+ adev->dev.platform_data = entry;
+ adev->dev.release = gpio_shared_adev_release;
+
+ ret = auxiliary_device_init(adev);
+ if (ret)
+ return ret;
+
+ ret = auxiliary_device_add(adev);
+ if (ret) {
+ auxiliary_device_uninit(adev);
+ return ret;
+ }
+
+ pr_debug("Created an auxiliary GPIO proxy %s for GPIO device %s\n",
+ dev_name(&adev->dev), gpio_device_get_label(gdev));
+
+ return 0;
+}
+
+#if IS_ENABLED(CONFIG_RESET_GPIO)
+/*
+ * Special case: reset-gpio is an auxiliary device that's created dynamically
+ * and put in between the GPIO controller and consumers of shared GPIOs
+ * referred to by the "reset-gpios" property.
+ *
+ * If the supposed consumer of a shared GPIO didn't match any of the mappings
+ * we created when scanning the firmware nodes, it's still possible that it's
+ * the reset-gpio device which didn't exist at the time of the scan.
+ *
+ * This function verifies it an return true if it's the case.
+ */
+static bool gpio_shared_dev_is_reset_gpio(struct device *consumer,
+ struct gpio_shared_entry *entry,
+ struct gpio_shared_ref *ref)
+{
+ struct fwnode_handle *reset_fwnode = dev_fwnode(consumer);
+ struct fwnode_reference_args ref_args, aux_args;
+ struct device *parent = consumer->parent;
+ bool match;
+ int ret;
+
+ /* The reset-gpio device must have a parent AND a firmware node. */
+ if (!parent || !reset_fwnode)
+ return false;
+
+ /*
+ * FIXME: use device_is_compatible() once the reset-gpio drivers gains
+ * a compatible string which it currently does not have.
+ */
+ if (!strstarts(dev_name(consumer), "reset.gpio."))
+ return false;
+
+ /*
+ * Parent of the reset-gpio auxiliary device is the GPIO chip whose
+ * fwnode we stored in the entry structure.
+ */
+ if (!device_match_fwnode(parent, entry->fwnode))
+ return false;
+
+ /*
+ * The device associated with the shared reference's firmware node is
+ * the consumer of the reset control exposed by the reset-gpio device.
+ * It must have a "reset-gpios" property that's referencing the entry's
+ * firmware node.
+ *
+ * The reference args must agree between the real consumer and the
+ * auxiliary reset-gpio device.
+ */
+ ret = fwnode_property_get_reference_args(ref->fwnode, "reset-gpios",
+ NULL, 2, 0, &ref_args);
+ if (ret)
+ return false;
+
+ ret = fwnode_property_get_reference_args(reset_fwnode, "reset-gpios",
+ NULL, 2, 0, &aux_args);
+ if (ret) {
+ fwnode_handle_put(ref_args.fwnode);
+ return false;
+ }
+
+ match = ((ref_args.fwnode == entry->fwnode) &&
+ (aux_args.fwnode == entry->fwnode) &&
+ (ref_args.args[0] == aux_args.args[0]));
+
+ fwnode_handle_put(ref_args.fwnode);
+ fwnode_handle_put(aux_args.fwnode);
+ return match;
+}
+#else
+static bool gpio_shared_dev_is_reset_gpio(struct device *consumer,
+ struct gpio_shared_entry *entry,
+ struct gpio_shared_ref *ref)
+{
+ return false;
+}
+#endif /* CONFIG_RESET_GPIO */
+
+int gpio_shared_add_proxy_lookup(struct device *consumer, unsigned long lflags)
+{
+ const char *dev_id = dev_name(consumer);
+ struct gpio_shared_entry *entry;
+ struct gpio_shared_ref *ref;
+
+ struct gpiod_lookup_table *lookup __free(kfree) =
+ kzalloc(struct_size(lookup, table, 2), GFP_KERNEL);
+ if (!lookup)
+ return -ENOMEM;
+
+ guard(mutex)(&gpio_shared_lock);
+
+ list_for_each_entry(entry, &gpio_shared_list, list) {
+ list_for_each_entry(ref, &entry->refs, list) {
+ if (!device_match_fwnode(consumer, ref->fwnode) &&
+ !gpio_shared_dev_is_reset_gpio(consumer, entry, ref))
+ continue;
+
+ /* We've already done that on a previous request. */
+ if (ref->lookup)
+ return 0;
+
+ char *key __free(kfree) =
+ kasprintf(GFP_KERNEL,
+ KBUILD_MODNAME ".proxy.%u",
+ ref->adev.id);
+ if (!key)
+ return -ENOMEM;
+
+ pr_debug("Adding machine lookup entry for a shared GPIO for consumer %s, with key '%s' and con_id '%s'\n",
+ dev_id, key, ref->con_id ?: "none");
+
+ lookup->dev_id = dev_id;
+ lookup->table[0] = GPIO_LOOKUP(no_free_ptr(key), 0,
+ ref->con_id, lflags);
+
+ gpiod_add_lookup_table(no_free_ptr(lookup));
+
+ return 0;
+ }
+ }
+
+ /* We warn here because this can only happen if the programmer borked. */
+ WARN_ON(1);
+ return -ENOENT;
+}
+
+static void gpio_shared_remove_adev(struct auxiliary_device *adev)
+{
+ lockdep_assert_held(&gpio_shared_lock);
+
+ auxiliary_device_uninit(adev);
+ auxiliary_device_delete(adev);
+}
+
+int gpio_device_setup_shared(struct gpio_device *gdev)
+{
+ struct gpio_shared_entry *entry;
+ struct gpio_shared_ref *ref;
+ unsigned long *flags;
+ int ret;
+
+ guard(mutex)(&gpio_shared_lock);
+
+ list_for_each_entry(entry, &gpio_shared_list, list) {
+ list_for_each_entry(ref, &entry->refs, list) {
+ if (gdev->dev.parent == &ref->adev.dev) {
+ /*
+ * This is a shared GPIO proxy. Mark its
+ * descriptor as such and return here.
+ */
+ __set_bit(GPIOD_FLAG_SHARED_PROXY,
+ &gdev->descs[0].flags);
+ return 0;
+ }
+ }
+ }
+
+ /*
+ * This is not a shared GPIO proxy but it still may be the device
+ * exposing shared pins. Find them and create the proxy devices.
+ */
+ list_for_each_entry(entry, &gpio_shared_list, list) {
+ if (!device_match_fwnode(&gdev->dev, entry->fwnode))
+ continue;
+
+ if (list_count_nodes(&entry->refs) <= 1)
+ continue;
+
+ flags = &gdev->descs[entry->offset].flags;
+
+ __set_bit(GPIOD_FLAG_SHARED, flags);
+ /*
+ * Shared GPIOs are not requested via the normal path. Make
+ * them inaccessible to anyone even before we register the
+ * chip.
+ */
+ __set_bit(GPIOD_FLAG_REQUESTED, flags);
+
+ pr_debug("GPIO %u owned by %s is shared by multiple consumers\n",
+ entry->offset, gpio_device_get_label(gdev));
+
+ list_for_each_entry(ref, &entry->refs, list) {
+ pr_debug("Setting up a shared GPIO entry for %s\n",
+ fwnode_get_name(ref->fwnode));
+
+ ret = gpio_shared_make_adev(gdev, entry, ref);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+void gpio_device_teardown_shared(struct gpio_device *gdev)
+{
+ struct gpio_shared_entry *entry;
+ struct gpio_shared_ref *ref;
+
+ guard(mutex)(&gpio_shared_lock);
+
+ list_for_each_entry(entry, &gpio_shared_list, list) {
+ if (!device_match_fwnode(&gdev->dev, entry->fwnode))
+ continue;
+
+ list_for_each_entry(ref, &entry->refs, list) {
+ gpiod_remove_lookup_table(ref->lookup);
+ kfree(ref->lookup->table[0].key);
+ kfree(ref->lookup);
+ ref->lookup = NULL;
+ gpio_shared_remove_adev(&ref->adev);
+ }
+ }
+}
+
+static void gpio_shared_release(struct kref *kref)
+{
+ struct gpio_shared_entry *entry =
+ container_of(kref, struct gpio_shared_entry, ref);
+ struct gpio_shared_desc *shared_desc;
+
+ guard(mutex)(&entry->lock);
+
+ shared_desc = entry->shared_desc;
+ gpio_device_put(shared_desc->desc->gdev);
+ if (shared_desc->can_sleep)
+ mutex_destroy(&shared_desc->mutex);
+ kfree(shared_desc);
+ entry->shared_desc = NULL;
+}
+
+static void gpiod_shared_put(void *data)
+{
+ struct gpio_shared_entry *entry = data;
+
+ lockdep_assert_not_held(&gpio_shared_lock);
+
+ kref_put(&entry->ref, gpio_shared_release);
+}
+
+static struct gpio_shared_desc *
+gpiod_shared_desc_create(struct gpio_shared_entry *entry)
+{
+ struct gpio_shared_desc *shared_desc;
+ struct gpio_device *gdev;
+
+ lockdep_assert_held(&entry->lock);
+
+ shared_desc = kzalloc(sizeof(*shared_desc), GFP_KERNEL);
+ if (!shared_desc)
+ return ERR_PTR(-ENOMEM);
+
+ gdev = gpio_device_find_by_fwnode(entry->fwnode);
+ if (!gdev) {
+ kfree(shared_desc);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ shared_desc->desc = &gdev->descs[entry->offset];
+ shared_desc->can_sleep = gpiod_cansleep(shared_desc->desc);
+ if (shared_desc->can_sleep)
+ mutex_init(&shared_desc->mutex);
+ else
+ spin_lock_init(&shared_desc->spinlock);
+
+ return shared_desc;
+}
+
+struct gpio_shared_desc *devm_gpiod_shared_get(struct device *dev)
+{
+ struct gpio_shared_desc *shared_desc;
+ struct gpio_shared_entry *entry;
+ int ret;
+
+ lockdep_assert_not_held(&gpio_shared_lock);
+
+ entry = dev_get_platdata(dev);
+ if (WARN_ON(!entry))
+ /* Programmer bug */
+ return ERR_PTR(-ENOENT);
+
+ scoped_guard(mutex, &entry->lock) {
+ if (entry->shared_desc) {
+ kref_get(&entry->ref);
+ shared_desc = entry->shared_desc;
+ } else {
+ shared_desc = gpiod_shared_desc_create(entry);
+ if (IS_ERR(shared_desc))
+ return ERR_CAST(shared_desc);
+
+ kref_init(&entry->ref);
+ entry->shared_desc = shared_desc;
+ }
+
+ pr_debug("Device %s acquired a reference to the shared GPIO %u owned by %s\n",
+ dev_name(dev), gpiod_hwgpio(shared_desc->desc),
+ gpio_device_get_label(shared_desc->desc->gdev));
+ }
+
+ ret = devm_add_action_or_reset(dev, gpiod_shared_put, entry);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return shared_desc;
+}
+EXPORT_SYMBOL_GPL(devm_gpiod_shared_get);
+
+static void gpio_shared_drop_ref(struct gpio_shared_ref *ref)
+{
+ list_del(&ref->list);
+ kfree(ref->con_id);
+ ida_free(&gpio_shared_ida, ref->dev_id);
+ fwnode_handle_put(ref->fwnode);
+ kfree(ref);
+}
+
+static void gpio_shared_drop_entry(struct gpio_shared_entry *entry)
+{
+ list_del(&entry->list);
+ mutex_destroy(&entry->lock);
+ fwnode_handle_put(entry->fwnode);
+ kfree(entry);
+}
+
+/*
+ * This is only called if gpio_shared_init() fails so it's in fact __init and
+ * not __exit.
+ */
+static void __init gpio_shared_teardown(void)
+{
+ struct gpio_shared_entry *entry, *epos;
+ struct gpio_shared_ref *ref, *rpos;
+
+ list_for_each_entry_safe(entry, epos, &gpio_shared_list, list) {
+ list_for_each_entry_safe(ref, rpos, &entry->refs, list)
+ gpio_shared_drop_ref(ref);
+
+ gpio_shared_drop_entry(entry);
+ }
+}
+
+static void gpio_shared_free_exclusive(void)
+{
+ struct gpio_shared_entry *entry, *epos;
+
+ list_for_each_entry_safe(entry, epos, &gpio_shared_list, list) {
+ if (list_count_nodes(&entry->refs) > 1)
+ continue;
+
+ gpio_shared_drop_ref(list_first_entry(&entry->refs,
+ struct gpio_shared_ref,
+ list));
+ gpio_shared_drop_entry(entry);
+ }
+}
+
+static int __init gpio_shared_init(void)
+{
+ int ret;
+
+ /* Right now, we only support OF-based systems. */
+ ret = gpio_shared_of_scan();
+ if (ret) {
+ gpio_shared_teardown();
+ pr_err("Failed to scan OF nodes for shared GPIOs: %d\n", ret);
+ return ret;
+ }
+
+ gpio_shared_free_exclusive();
+
+ pr_debug("Finished scanning firmware nodes for shared GPIOs\n");
+ return 0;
+}
+postcore_initcall(gpio_shared_init);
diff --git a/drivers/gpio/gpiolib-shared.h b/drivers/gpio/gpiolib-shared.h
new file mode 100644
index 000000000000..667dbdff3585
--- /dev/null
+++ b/drivers/gpio/gpiolib-shared.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __LINUX_GPIO_SHARED_H
+#define __LINUX_GPIO_SHARED_H
+
+#include <linux/cleanup.h>
+#include <linux/lockdep.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+
+struct gpio_device;
+struct gpio_desc;
+struct device;
+
+#if IS_ENABLED(CONFIG_GPIO_SHARED)
+
+int gpio_device_setup_shared(struct gpio_device *gdev);
+void gpio_device_teardown_shared(struct gpio_device *gdev);
+int gpio_shared_add_proxy_lookup(struct device *consumer, unsigned long lflags);
+
+#else
+
+static inline int gpio_device_setup_shared(struct gpio_device *gdev)
+{
+ return 0;
+}
+
+static inline void gpio_device_teardown_shared(struct gpio_device *gdev) { }
+
+static inline int gpio_shared_add_proxy_lookup(struct device *consumer,
+ unsigned long lflags)
+{
+ return 0;
+}
+
+#endif /* CONFIG_GPIO_SHARED */
+
+struct gpio_shared_desc {
+ struct gpio_desc *desc;
+ bool can_sleep;
+ unsigned long cfg;
+ unsigned int usecnt;
+ unsigned int highcnt;
+ union {
+ struct mutex mutex;
+ spinlock_t spinlock;
+ };
+};
+
+struct gpio_shared_desc *devm_gpiod_shared_get(struct device *dev);
+
+DEFINE_LOCK_GUARD_1(gpio_shared_desc_lock, struct gpio_shared_desc,
+ if (_T->lock->can_sleep)
+ mutex_lock(&_T->lock->mutex);
+ else
+ spin_lock_irqsave(&_T->lock->spinlock, _T->flags),
+ if (_T->lock->can_sleep)
+ mutex_unlock(&_T->lock->mutex);
+ else
+ spin_unlock_irqrestore(&_T->lock->spinlock, _T->flags),
+ unsigned long flags)
+
+static inline void gpio_shared_lockdep_assert(struct gpio_shared_desc *shared_desc)
+{
+ if (shared_desc->can_sleep)
+ lockdep_assert_held(&shared_desc->mutex);
+ else
+ lockdep_assert_held(&shared_desc->spinlock);
+}
+
+#endif /* __LINUX_GPIO_SHARED_H */
diff --git a/drivers/gpio/gpiolib-swnode.c b/drivers/gpio/gpiolib-swnode.c
index f21dbc28cf2c..b44f35d68459 100644
--- a/drivers/gpio/gpiolib-swnode.c
+++ b/drivers/gpio/gpiolib-swnode.c
@@ -31,7 +31,7 @@ static struct gpio_device *swnode_get_gpio_device(struct fwnode_handle *fwnode)
gdev_node = to_software_node(fwnode);
if (!gdev_node || !gdev_node->name)
- return ERR_PTR(-EINVAL);
+ goto fwnode_lookup;
/*
* Check for a special node that identifies undefined GPIOs, this is
@@ -41,7 +41,8 @@ static struct gpio_device *swnode_get_gpio_device(struct fwnode_handle *fwnode)
!strcmp(gdev_node->name, GPIOLIB_SWNODE_UNDEFINED_NAME))
return ERR_PTR(-ENOENT);
- gdev = gpio_device_find_by_label(gdev_node->name);
+fwnode_lookup:
+ gdev = gpio_device_find_by_fwnode(fwnode);
return gdev ?: ERR_PTR(-EPROBE_DEFER);
}
diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
index 4a3aa09dad9d..cd553acf3055 100644
--- a/drivers/gpio/gpiolib-sysfs.c
+++ b/drivers/gpio/gpiolib-sysfs.c
@@ -3,7 +3,6 @@
#include <linux/bitops.h>
#include <linux/cleanup.h>
#include <linux/device.h>
-#include <linux/idr.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kdev_t.h>
@@ -12,7 +11,6 @@
#include <linux/mutex.h>
#include <linux/printk.h>
#include <linux/slab.h>
-#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/srcu.h>
#include <linux/sysfs.h>
@@ -26,6 +24,8 @@
#include "gpiolib.h"
#include "gpiolib-sysfs.h"
+#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY)
+
struct kernfs_node;
#define GPIO_IRQF_TRIGGER_NONE 0
@@ -34,15 +34,64 @@ struct kernfs_node;
#define GPIO_IRQF_TRIGGER_BOTH (GPIO_IRQF_TRIGGER_FALLING | \
GPIO_IRQF_TRIGGER_RISING)
+enum {
+ GPIO_SYSFS_LINE_CLASS_ATTR_DIRECTION = 0,
+ GPIO_SYSFS_LINE_CLASS_ATTR_VALUE,
+ GPIO_SYSFS_LINE_CLASS_ATTR_EDGE,
+ GPIO_SYSFS_LINE_CLASS_ATTR_ACTIVE_LOW,
+ GPIO_SYSFS_LINE_CLASS_ATTR_SENTINEL,
+ GPIO_SYSFS_LINE_CLASS_ATTR_SIZE,
+};
+
+#endif /* CONFIG_GPIO_SYSFS_LEGACY */
+
+enum {
+ GPIO_SYSFS_LINE_CHIP_ATTR_DIRECTION = 0,
+ GPIO_SYSFS_LINE_CHIP_ATTR_VALUE,
+ GPIO_SYSFS_LINE_CHIP_ATTR_SENTINEL,
+ GPIO_SYSFS_LINE_CHIP_ATTR_SIZE,
+};
+
struct gpiod_data {
+ struct list_head list;
+
struct gpio_desc *desc;
+ struct device *dev;
struct mutex mutex;
+#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY)
struct kernfs_node *value_kn;
int irq;
unsigned char irq_flags;
+#endif /* CONFIG_GPIO_SYSFS_LEGACY */
bool direction_can_change;
+
+ struct kobject *parent;
+ struct device_attribute dir_attr;
+ struct device_attribute val_attr;
+
+#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY)
+ struct device_attribute edge_attr;
+ struct device_attribute active_low_attr;
+
+ struct attribute *class_attrs[GPIO_SYSFS_LINE_CLASS_ATTR_SIZE];
+ struct attribute_group class_attr_group;
+ const struct attribute_group *class_attr_groups[2];
+#endif /* CONFIG_GPIO_SYSFS_LEGACY */
+
+ struct attribute *chip_attrs[GPIO_SYSFS_LINE_CHIP_ATTR_SIZE];
+ struct attribute_group chip_attr_group;
+ const struct attribute_group *chip_attr_groups[2];
+};
+
+struct gpiodev_data {
+ struct list_head exported_lines;
+ struct gpio_device *gdev;
+ struct device *cdev_id; /* Class device by GPIO device ID */
+#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY)
+ struct device *cdev_base; /* Class device by GPIO base */
+#endif /* CONFIG_GPIO_SYSFS_LEGACY */
};
/*
@@ -73,26 +122,29 @@ static DEFINE_MUTEX(sysfs_lock);
*/
static ssize_t direction_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
- struct gpiod_data *data = dev_get_drvdata(dev);
+ struct gpiod_data *data = container_of(attr, struct gpiod_data,
+ dir_attr);
struct gpio_desc *desc = data->desc;
int value;
scoped_guard(mutex, &data->mutex) {
gpiod_get_direction(desc);
- value = !!test_bit(FLAG_IS_OUT, &desc->flags);
+ value = !!test_bit(GPIOD_FLAG_IS_OUT, &desc->flags);
}
return sysfs_emit(buf, "%s\n", value ? "out" : "in");
}
static ssize_t direction_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t size)
+ struct device_attribute *attr, const char *buf,
+ size_t size)
{
- struct gpiod_data *data = dev_get_drvdata(dev);
+ struct gpiod_data *data = container_of(attr, struct gpiod_data,
+ dir_attr);
struct gpio_desc *desc = data->desc;
- ssize_t status;
+ ssize_t status;
guard(mutex)(&data->mutex);
@@ -107,14 +159,14 @@ static ssize_t direction_store(struct device *dev,
return status ? : size;
}
-static DEVICE_ATTR_RW(direction);
-static ssize_t value_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t value_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
- struct gpiod_data *data = dev_get_drvdata(dev);
+ struct gpiod_data *data = container_of(attr, struct gpiod_data,
+ val_attr);
struct gpio_desc *desc = data->desc;
- ssize_t status;
+ ssize_t status;
scoped_guard(mutex, &data->mutex)
status = gpiod_get_value_cansleep(desc);
@@ -125,10 +177,11 @@ static ssize_t value_show(struct device *dev,
return sysfs_emit(buf, "%zd\n", status);
}
-static ssize_t value_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t size)
+static ssize_t value_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t size)
{
- struct gpiod_data *data = dev_get_drvdata(dev);
+ struct gpiod_data *data = container_of(attr, struct gpiod_data,
+ val_attr);
struct gpio_desc *desc = data->desc;
ssize_t status;
long value;
@@ -145,8 +198,8 @@ static ssize_t value_store(struct device *dev,
return size;
}
-static DEVICE_ATTR_PREALLOC(value, S_IWUSR | S_IRUGO, value_show, value_store);
+#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY)
static irqreturn_t gpio_sysfs_irq(int irq, void *priv)
{
struct gpiod_data *data = priv;
@@ -157,9 +210,8 @@ static irqreturn_t gpio_sysfs_irq(int irq, void *priv)
}
/* Caller holds gpiod-data mutex. */
-static int gpio_sysfs_request_irq(struct device *dev, unsigned char flags)
+static int gpio_sysfs_request_irq(struct gpiod_data *data, unsigned char flags)
{
- struct gpiod_data *data = dev_get_drvdata(dev);
struct gpio_desc *desc = data->desc;
unsigned long irq_flags;
int ret;
@@ -172,33 +224,29 @@ static int gpio_sysfs_request_irq(struct device *dev, unsigned char flags)
if (data->irq < 0)
return -EIO;
- data->value_kn = sysfs_get_dirent(dev->kobj.sd, "value");
- if (!data->value_kn)
- return -ENODEV;
-
irq_flags = IRQF_SHARED;
if (flags & GPIO_IRQF_TRIGGER_FALLING) {
- irq_flags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
- IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
- set_bit(FLAG_EDGE_FALLING, &desc->flags);
+ irq_flags |= test_bit(GPIOD_FLAG_ACTIVE_LOW, &desc->flags) ?
+ IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
+ set_bit(GPIOD_FLAG_EDGE_FALLING, &desc->flags);
}
if (flags & GPIO_IRQF_TRIGGER_RISING) {
- irq_flags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
- IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
- set_bit(FLAG_EDGE_RISING, &desc->flags);
+ irq_flags |= test_bit(GPIOD_FLAG_ACTIVE_LOW, &desc->flags) ?
+ IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
+ set_bit(GPIOD_FLAG_EDGE_RISING, &desc->flags);
}
/*
* FIXME: This should be done in the irq_request_resources callback
- * when the irq is requested, but a few drivers currently fail
- * to do so.
+ * when the irq is requested, but a few drivers currently fail to do
+ * so.
*
- * Remove this redundant call (along with the corresponding
- * unlock) when those drivers have been fixed.
+ * Remove this redundant call (along with the corresponding unlock)
+ * when those drivers have been fixed.
*/
- ret = gpiochip_lock_as_irq(guard.gc, gpio_chip_hwgpio(desc));
+ ret = gpiochip_lock_as_irq(guard.gc, gpiod_hwgpio(desc));
if (ret < 0)
- goto err_put_kn;
+ goto err_clr_bits;
ret = request_any_context_irq(data->irq, gpio_sysfs_irq, irq_flags,
"gpiolib", data);
@@ -210,11 +258,10 @@ static int gpio_sysfs_request_irq(struct device *dev, unsigned char flags)
return 0;
err_unlock:
- gpiochip_unlock_as_irq(guard.gc, gpio_chip_hwgpio(desc));
-err_put_kn:
- clear_bit(FLAG_EDGE_RISING, &desc->flags);
- clear_bit(FLAG_EDGE_FALLING, &desc->flags);
- sysfs_put(data->value_kn);
+ gpiochip_unlock_as_irq(guard.gc, gpiod_hwgpio(desc));
+err_clr_bits:
+ clear_bit(GPIOD_FLAG_EDGE_RISING, &desc->flags);
+ clear_bit(GPIOD_FLAG_EDGE_FALLING, &desc->flags);
return ret;
}
@@ -223,9 +270,8 @@ err_put_kn:
* Caller holds gpiod-data mutex (unless called after class-device
* deregistration).
*/
-static void gpio_sysfs_free_irq(struct device *dev)
+static void gpio_sysfs_free_irq(struct gpiod_data *data)
{
- struct gpiod_data *data = dev_get_drvdata(dev);
struct gpio_desc *desc = data->desc;
CLASS(gpio_chip_guard, guard)(desc);
@@ -234,23 +280,23 @@ static void gpio_sysfs_free_irq(struct device *dev)
data->irq_flags = 0;
free_irq(data->irq, data);
- gpiochip_unlock_as_irq(guard.gc, gpio_chip_hwgpio(desc));
- clear_bit(FLAG_EDGE_RISING, &desc->flags);
- clear_bit(FLAG_EDGE_FALLING, &desc->flags);
- sysfs_put(data->value_kn);
+ gpiochip_unlock_as_irq(guard.gc, gpiod_hwgpio(desc));
+ clear_bit(GPIOD_FLAG_EDGE_RISING, &desc->flags);
+ clear_bit(GPIOD_FLAG_EDGE_FALLING, &desc->flags);
}
-static const char * const trigger_names[] = {
+static const char *const trigger_names[] = {
[GPIO_IRQF_TRIGGER_NONE] = "none",
[GPIO_IRQF_TRIGGER_FALLING] = "falling",
[GPIO_IRQF_TRIGGER_RISING] = "rising",
[GPIO_IRQF_TRIGGER_BOTH] = "both",
};
-static ssize_t edge_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t edge_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
- struct gpiod_data *data = dev_get_drvdata(dev);
+ struct gpiod_data *data = container_of(attr, struct gpiod_data,
+ edge_attr);
int flags;
scoped_guard(mutex, &data->mutex)
@@ -262,10 +308,11 @@ static ssize_t edge_show(struct device *dev,
return sysfs_emit(buf, "%s\n", trigger_names[flags]);
}
-static ssize_t edge_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t size)
+static ssize_t edge_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t size)
{
- struct gpiod_data *data = dev_get_drvdata(dev);
+ struct gpiod_data *data = container_of(attr, struct gpiod_data,
+ edge_attr);
ssize_t status = size;
int flags;
@@ -279,12 +326,12 @@ static ssize_t edge_store(struct device *dev,
return size;
if (data->irq_flags)
- gpio_sysfs_free_irq(dev);
+ gpio_sysfs_free_irq(data);
if (!flags)
return size;
- status = gpio_sysfs_request_irq(dev, flags);
+ status = gpio_sysfs_request_irq(data, flags);
if (status)
return status;
@@ -292,27 +339,24 @@ static ssize_t edge_store(struct device *dev,
return size;
}
-static DEVICE_ATTR_RW(edge);
/* Caller holds gpiod-data mutex. */
-static int gpio_sysfs_set_active_low(struct device *dev, int value)
+static int gpio_sysfs_set_active_low(struct gpiod_data *data, int value)
{
- struct gpiod_data *data = dev_get_drvdata(dev);
unsigned int flags = data->irq_flags;
struct gpio_desc *desc = data->desc;
int status = 0;
-
- if (!!test_bit(FLAG_ACTIVE_LOW, &desc->flags) == !!value)
+ if (!!test_bit(GPIOD_FLAG_ACTIVE_LOW, &desc->flags) == !!value)
return 0;
- assign_bit(FLAG_ACTIVE_LOW, &desc->flags, value);
+ assign_bit(GPIOD_FLAG_ACTIVE_LOW, &desc->flags, value);
/* reconfigure poll(2) support if enabled on one edge only */
if (flags == GPIO_IRQF_TRIGGER_FALLING ||
- flags == GPIO_IRQF_TRIGGER_RISING) {
- gpio_sysfs_free_irq(dev);
- status = gpio_sysfs_request_irq(dev, flags);
+ flags == GPIO_IRQF_TRIGGER_RISING) {
+ gpio_sysfs_free_irq(data);
+ status = gpio_sysfs_request_irq(data, flags);
}
gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_CONFIG);
@@ -321,22 +365,25 @@ static int gpio_sysfs_set_active_low(struct device *dev, int value)
}
static ssize_t active_low_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
- struct gpiod_data *data = dev_get_drvdata(dev);
+ struct gpiod_data *data = container_of(attr, struct gpiod_data,
+ active_low_attr);
struct gpio_desc *desc = data->desc;
int value;
scoped_guard(mutex, &data->mutex)
- value = !!test_bit(FLAG_ACTIVE_LOW, &desc->flags);
+ value = !!test_bit(GPIOD_FLAG_ACTIVE_LOW, &desc->flags);
return sysfs_emit(buf, "%d\n", value);
}
static ssize_t active_low_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t size)
+ struct device_attribute *attr,
+ const char *buf, size_t size)
{
- struct gpiod_data *data = dev_get_drvdata(dev);
+ struct gpiod_data *data = container_of(attr, struct gpiod_data,
+ active_low_attr);
ssize_t status;
long value;
@@ -346,84 +393,189 @@ static ssize_t active_low_store(struct device *dev,
guard(mutex)(&data->mutex);
- return gpio_sysfs_set_active_low(dev, value) ?: size;
+ return gpio_sysfs_set_active_low(data, value) ?: size;
}
-static DEVICE_ATTR_RW(active_low);
+#endif /* CONFIG_GPIO_SYSFS_LEGACY */
static umode_t gpio_is_visible(struct kobject *kobj, struct attribute *attr,
int n)
{
- struct device *dev = kobj_to_dev(kobj);
- struct gpiod_data *data = dev_get_drvdata(dev);
- struct gpio_desc *desc = data->desc;
+ struct device_attribute *dev_attr = container_of(attr,
+ struct device_attribute, attr);
umode_t mode = attr->mode;
- bool show_direction = data->direction_can_change;
+ struct gpiod_data *data;
+
+ if (strcmp(attr->name, "direction") == 0) {
+ data = container_of(dev_attr, struct gpiod_data, dir_attr);
- if (attr == &dev_attr_direction.attr) {
- if (!show_direction)
+ if (!data->direction_can_change)
mode = 0;
- } else if (attr == &dev_attr_edge.attr) {
- if (gpiod_to_irq(desc) < 0)
+#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY)
+ } else if (strcmp(attr->name, "edge") == 0) {
+ data = container_of(dev_attr, struct gpiod_data, edge_attr);
+
+ if (gpiod_to_irq(data->desc) < 0)
mode = 0;
- if (!show_direction && test_bit(FLAG_IS_OUT, &desc->flags))
+
+ if (!data->direction_can_change &&
+ test_bit(GPIOD_FLAG_IS_OUT, &data->desc->flags))
mode = 0;
+#endif /* CONFIG_GPIO_SYSFS_LEGACY */
}
return mode;
}
-static struct attribute *gpio_attrs[] = {
- &dev_attr_direction.attr,
- &dev_attr_edge.attr,
- &dev_attr_value.attr,
- &dev_attr_active_low.attr,
- NULL,
-};
-
-static const struct attribute_group gpio_group = {
- .attrs = gpio_attrs,
- .is_visible = gpio_is_visible,
-};
-
-static const struct attribute_group *gpio_groups[] = {
- &gpio_group,
- NULL
-};
-
/*
* /sys/class/gpio/gpiochipN/
* /base ... matching gpio_chip.base (N)
* /label ... matching gpio_chip.label
* /ngpio ... matching gpio_chip.ngpio
+ *
+ * AND
+ *
+ * /sys/class/gpio/chipX/
+ * /export ... export GPIO at given offset
+ * /unexport ... unexport GPIO at given offset
+ * /label ... matching gpio_chip.label
+ * /ngpio ... matching gpio_chip.ngpio
*/
-static ssize_t base_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY)
+static ssize_t base_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
- const struct gpio_device *gdev = dev_get_drvdata(dev);
+ const struct gpiodev_data *data = dev_get_drvdata(dev);
- return sysfs_emit(buf, "%u\n", gdev->base);
+ return sysfs_emit(buf, "%u\n", data->gdev->base);
}
static DEVICE_ATTR_RO(base);
+#endif /* CONFIG_GPIO_SYSFS_LEGACY */
-static ssize_t label_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t label_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
- const struct gpio_device *gdev = dev_get_drvdata(dev);
+ const struct gpiodev_data *data = dev_get_drvdata(dev);
- return sysfs_emit(buf, "%s\n", gdev->label);
+ return sysfs_emit(buf, "%s\n", data->gdev->label);
}
static DEVICE_ATTR_RO(label);
-static ssize_t ngpio_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t ngpio_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
- const struct gpio_device *gdev = dev_get_drvdata(dev);
+ const struct gpiodev_data *data = dev_get_drvdata(dev);
- return sysfs_emit(buf, "%u\n", gdev->ngpio);
+ return sysfs_emit(buf, "%u\n", data->gdev->ngpio);
}
static DEVICE_ATTR_RO(ngpio);
+static int export_gpio_desc(struct gpio_desc *desc)
+{
+ int offset, ret;
+
+ CLASS(gpio_chip_guard, guard)(desc);
+ if (!guard.gc)
+ return -ENODEV;
+
+ offset = gpiod_hwgpio(desc);
+ if (!gpiochip_line_is_valid(guard.gc, offset)) {
+ pr_debug_ratelimited("%s: GPIO %d masked\n", __func__,
+ gpiod_hwgpio(desc));
+ return -EINVAL;
+ }
+
+ /*
+ * No extra locking here; GPIOD_FLAG_SYSFS just signifies that the
+ * request and export were done by on behalf of userspace, so
+ * they may be undone on its behalf too.
+ */
+
+ ret = gpiod_request_user(desc, "sysfs");
+ if (ret)
+ return ret;
+
+ ret = gpiod_set_transitory(desc, false);
+ if (ret) {
+ gpiod_free(desc);
+ return ret;
+ }
+
+ ret = gpiod_export(desc, true);
+ if (ret < 0) {
+ gpiod_free(desc);
+ } else {
+ set_bit(GPIOD_FLAG_SYSFS, &desc->flags);
+ gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_REQUESTED);
+ }
+
+ return ret;
+}
+
+static int unexport_gpio_desc(struct gpio_desc *desc)
+{
+ /*
+ * No extra locking here; GPIOD_FLAG_SYSFS just signifies that the
+ * request and export were done by on behalf of userspace, so
+ * they may be undone on its behalf too.
+ */
+ if (!test_and_clear_bit(GPIOD_FLAG_SYSFS, &desc->flags))
+ return -EINVAL;
+
+ gpiod_unexport(desc);
+ gpiod_free(desc);
+
+ return 0;
+}
+
+static ssize_t do_chip_export_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, ssize_t size,
+ int (*handler)(struct gpio_desc *desc))
+{
+ struct gpiodev_data *data = dev_get_drvdata(dev);
+ struct gpio_device *gdev = data->gdev;
+ struct gpio_desc *desc;
+ unsigned int gpio;
+ int ret;
+
+ ret = kstrtouint(buf, 0, &gpio);
+ if (ret)
+ return ret;
+
+ desc = gpio_device_get_desc(gdev, gpio);
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
+ ret = handler(desc);
+ if (ret)
+ return ret;
+
+ return size;
+}
+
+static ssize_t chip_export_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ return do_chip_export_store(dev, attr, buf, size, export_gpio_desc);
+}
+
+static struct device_attribute dev_attr_export = __ATTR(export, 0200, NULL,
+ chip_export_store);
+
+static ssize_t chip_unexport_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ return do_chip_export_store(dev, attr, buf, size, unexport_gpio_desc);
+}
+
+static struct device_attribute dev_attr_unexport = __ATTR(unexport, 0200,
+ NULL,
+ chip_unexport_store);
+
+#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY)
static struct attribute *gpiochip_attrs[] = {
&dev_attr_base.attr,
&dev_attr_label.attr,
@@ -431,7 +583,18 @@ static struct attribute *gpiochip_attrs[] = {
NULL,
};
ATTRIBUTE_GROUPS(gpiochip);
+#endif /* CONFIG_GPIO_SYSFS_LEGACY */
+
+static struct attribute *gpiochip_ext_attrs[] = {
+ &dev_attr_label.attr,
+ &dev_attr_ngpio.attr,
+ &dev_attr_export.attr,
+ &dev_attr_unexport.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(gpiochip_ext);
+#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY)
/*
* /sys/class/gpio/export ... write-only
* integer N ... number of GPIO to export (full access)
@@ -439,11 +602,11 @@ ATTRIBUTE_GROUPS(gpiochip);
* integer N ... number of GPIO to unexport
*/
static ssize_t export_store(const struct class *class,
- const struct class_attribute *attr,
- const char *buf, size_t len)
+ const struct class_attribute *attr,
+ const char *buf, size_t len)
{
struct gpio_desc *desc;
- int status, offset;
+ int status;
long gpio;
status = kstrtol(buf, 0, &gpio);
@@ -457,40 +620,7 @@ static ssize_t export_store(const struct class *class,
return -EINVAL;
}
- CLASS(gpio_chip_guard, guard)(desc);
- if (!guard.gc)
- return -ENODEV;
-
- offset = gpio_chip_hwgpio(desc);
- if (!gpiochip_line_is_valid(guard.gc, offset)) {
- pr_debug_ratelimited("%s: GPIO %ld masked\n", __func__, gpio);
- return -EINVAL;
- }
-
- /* No extra locking here; FLAG_SYSFS just signifies that the
- * request and export were done by on behalf of userspace, so
- * they may be undone on its behalf too.
- */
-
- status = gpiod_request_user(desc, "sysfs");
- if (status)
- goto done;
-
- status = gpiod_set_transitory(desc, false);
- if (status) {
- gpiod_free(desc);
- goto done;
- }
-
- status = gpiod_export(desc, true);
- if (status < 0) {
- gpiod_free(desc);
- } else {
- set_bit(FLAG_SYSFS, &desc->flags);
- gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_REQUESTED);
- }
-
-done:
+ status = export_gpio_desc(desc);
if (status)
pr_debug("%s: status %d\n", __func__, status);
return status ? : len;
@@ -498,8 +628,8 @@ done:
static CLASS_ATTR_WO(export);
static ssize_t unexport_store(const struct class *class,
- const struct class_attribute *attr,
- const char *buf, size_t len)
+ const struct class_attribute *attr,
+ const char *buf, size_t len)
{
struct gpio_desc *desc;
int status;
@@ -507,7 +637,7 @@ static ssize_t unexport_store(const struct class *class,
status = kstrtol(buf, 0, &gpio);
if (status < 0)
- goto done;
+ return status;
desc = gpio_to_desc(gpio);
/* reject bogus commands (gpiod_unexport() ignores them) */
@@ -516,18 +646,7 @@ static ssize_t unexport_store(const struct class *class,
return -EINVAL;
}
- status = -EINVAL;
-
- /* No extra locking here; FLAG_SYSFS just signifies that the
- * request and export were done by on behalf of userspace, so
- * they may be undone on its behalf too.
- */
- if (test_and_clear_bit(FLAG_SYSFS, &desc->flags)) {
- gpiod_unexport(desc);
- gpiod_free(desc);
- status = 0;
- }
-done:
+ status = unexport_gpio_desc(desc);
if (status)
pr_debug("%s: status %d\n", __func__, status);
return status ? : len;
@@ -540,12 +659,55 @@ static struct attribute *gpio_class_attrs[] = {
NULL,
};
ATTRIBUTE_GROUPS(gpio_class);
+#endif /* CONFIG_GPIO_SYSFS_LEGACY */
static const struct class gpio_class = {
.name = "gpio",
+#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY)
.class_groups = gpio_class_groups,
+#endif /* CONFIG_GPIO_SYSFS_LEGACY */
+};
+
+static int match_gdev(struct device *dev, const void *desc)
+{
+ struct gpiodev_data *data = dev_get_drvdata(dev);
+ const struct gpio_device *gdev = desc;
+
+ return data && data->gdev == gdev;
+}
+
+static struct gpiodev_data *
+gdev_get_data(struct gpio_device *gdev) __must_hold(&sysfs_lock)
+{
+ /*
+ * Find the first device in GPIO class that matches. Whether that's
+ * the one indexed by GPIO base or device ID doesn't matter, it has
+ * the same address set as driver data.
+ */
+ struct device *cdev __free(put_device) = class_find_device(&gpio_class,
+ NULL, gdev,
+ match_gdev);
+ if (!cdev)
+ return NULL;
+
+ return dev_get_drvdata(cdev);
};
+static void gpiod_attr_init(struct device_attribute *dev_attr, const char *name,
+ ssize_t (*show)(struct device *dev,
+ struct device_attribute *attr,
+ char *buf),
+ ssize_t (*store)(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count))
+{
+ sysfs_attr_init(&dev_attr->attr);
+ dev_attr->attr.name = name;
+ dev_attr->attr.mode = 0644;
+ dev_attr->show = show;
+ dev_attr->store = store;
+}
+
/**
* gpiod_export - export a GPIO through sysfs
* @desc: GPIO to make available, already requested
@@ -564,9 +726,11 @@ static const struct class gpio_class = {
*/
int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
{
+ char *path __free(kfree) = NULL;
+ struct gpiodev_data *gdev_data;
+ struct gpiod_data *desc_data;
struct gpio_device *gdev;
- struct gpiod_data *data;
- struct device *dev;
+ struct attribute **attrs;
int status;
/* can't export until sysfs is available ... */
@@ -584,63 +748,138 @@ int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
if (!guard.gc)
return -ENODEV;
- if (test_and_set_bit(FLAG_EXPORT, &desc->flags))
+ if (test_and_set_bit(GPIOD_FLAG_EXPORT, &desc->flags))
return -EPERM;
gdev = desc->gdev;
guard(mutex)(&sysfs_lock);
- /* check if chip is being removed */
- if (!gdev->mockdev) {
- status = -ENODEV;
- goto err_clear_bit;
- }
-
- if (!test_bit(FLAG_REQUESTED, &desc->flags)) {
+ if (!test_bit(GPIOD_FLAG_REQUESTED, &desc->flags)) {
gpiod_dbg(desc, "%s: unavailable (not requested)\n", __func__);
status = -EPERM;
goto err_clear_bit;
}
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data) {
+ desc_data = kzalloc(sizeof(*desc_data), GFP_KERNEL);
+ if (!desc_data) {
status = -ENOMEM;
goto err_clear_bit;
}
- data->desc = desc;
- mutex_init(&data->mutex);
+ desc_data->desc = desc;
+ mutex_init(&desc_data->mutex);
if (guard.gc->direction_input && guard.gc->direction_output)
- data->direction_can_change = direction_may_change;
+ desc_data->direction_can_change = direction_may_change;
else
- data->direction_can_change = false;
+ desc_data->direction_can_change = false;
+
+ gpiod_attr_init(&desc_data->dir_attr, "direction",
+ direction_show, direction_store);
+ gpiod_attr_init(&desc_data->val_attr, "value", value_show, value_store);
- dev = device_create_with_groups(&gpio_class, &gdev->dev,
- MKDEV(0, 0), data, gpio_groups,
- "gpio%u", desc_to_gpio(desc));
- if (IS_ERR(dev)) {
- status = PTR_ERR(dev);
+#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY)
+ gpiod_attr_init(&desc_data->edge_attr, "edge", edge_show, edge_store);
+ gpiod_attr_init(&desc_data->active_low_attr, "active_low",
+ active_low_show, active_low_store);
+
+ attrs = desc_data->class_attrs;
+ desc_data->class_attr_group.is_visible = gpio_is_visible;
+ attrs[GPIO_SYSFS_LINE_CLASS_ATTR_DIRECTION] = &desc_data->dir_attr.attr;
+ attrs[GPIO_SYSFS_LINE_CLASS_ATTR_VALUE] = &desc_data->val_attr.attr;
+ attrs[GPIO_SYSFS_LINE_CLASS_ATTR_EDGE] = &desc_data->edge_attr.attr;
+ attrs[GPIO_SYSFS_LINE_CLASS_ATTR_ACTIVE_LOW] = &desc_data->active_low_attr.attr;
+
+ desc_data->class_attr_group.attrs = desc_data->class_attrs;
+ desc_data->class_attr_groups[0] = &desc_data->class_attr_group;
+
+ /*
+ * Note: we need to continue passing desc_data here as there's still
+ * at least one known user of gpiod_export_link() in the tree. This
+ * function still uses class_find_device() internally.
+ */
+ desc_data->dev = device_create_with_groups(&gpio_class, &gdev->dev,
+ MKDEV(0, 0), desc_data,
+ desc_data->class_attr_groups,
+ "gpio%u",
+ desc_to_gpio(desc));
+ if (IS_ERR(desc_data->dev)) {
+ status = PTR_ERR(desc_data->dev);
goto err_free_data;
}
+ desc_data->value_kn = sysfs_get_dirent(desc_data->dev->kobj.sd,
+ "value");
+ if (!desc_data->value_kn) {
+ status = -ENODEV;
+ goto err_unregister_device;
+ }
+#endif /* CONFIG_GPIO_SYSFS_LEGACY */
+
+ gdev_data = gdev_get_data(gdev);
+ if (!gdev_data) {
+ status = -ENODEV;
+ goto err_put_dirent;
+ }
+
+ desc_data->chip_attr_group.name = kasprintf(GFP_KERNEL, "gpio%u",
+ gpiod_hwgpio(desc));
+ if (!desc_data->chip_attr_group.name) {
+ status = -ENOMEM;
+ goto err_put_dirent;
+ }
+
+ attrs = desc_data->chip_attrs;
+ desc_data->chip_attr_group.is_visible = gpio_is_visible;
+ attrs[GPIO_SYSFS_LINE_CHIP_ATTR_DIRECTION] = &desc_data->dir_attr.attr;
+ attrs[GPIO_SYSFS_LINE_CHIP_ATTR_VALUE] = &desc_data->val_attr.attr;
+
+ desc_data->chip_attr_group.attrs = attrs;
+ desc_data->chip_attr_groups[0] = &desc_data->chip_attr_group;
+
+ desc_data->parent = &gdev_data->cdev_id->kobj;
+ status = sysfs_create_groups(desc_data->parent,
+ desc_data->chip_attr_groups);
+ if (status)
+ goto err_free_name;
+
+ path = kasprintf(GFP_KERNEL, "gpio%u/value", gpiod_hwgpio(desc));
+ if (!path) {
+ status = -ENOMEM;
+ goto err_remove_groups;
+ }
+
+ list_add(&desc_data->list, &gdev_data->exported_lines);
+
return 0;
+err_remove_groups:
+ sysfs_remove_groups(desc_data->parent, desc_data->chip_attr_groups);
+err_free_name:
+ kfree(desc_data->chip_attr_group.name);
+err_put_dirent:
+#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY)
+ sysfs_put(desc_data->value_kn);
+err_unregister_device:
+ device_unregister(desc_data->dev);
err_free_data:
- kfree(data);
+#endif /* CONFIG_GPIO_SYSFS_LEGACY */
+ kfree(desc_data);
err_clear_bit:
- clear_bit(FLAG_EXPORT, &desc->flags);
+ clear_bit(GPIOD_FLAG_EXPORT, &desc->flags);
gpiod_dbg(desc, "%s: status %d\n", __func__, status);
return status;
}
EXPORT_SYMBOL_GPL(gpiod_export);
+#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY)
static int match_export(struct device *dev, const void *desc)
{
struct gpiod_data *data = dev_get_drvdata(dev);
- return data->desc == desc;
+ return gpiod_is_equal(data->desc, desc);
}
+#endif /* CONFIG_GPIO_SYSFS_LEGACY */
/**
* gpiod_export_link - create a sysfs link to an exported GPIO node
@@ -657,6 +896,7 @@ static int match_export(struct device *dev, const void *desc)
int gpiod_export_link(struct device *dev, const char *name,
struct gpio_desc *desc)
{
+#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY)
struct device *cdev;
int ret;
@@ -673,6 +913,9 @@ int gpiod_export_link(struct device *dev, const char *name,
put_device(cdev);
return ret;
+#else
+ return -EOPNOTSUPP;
+#endif /* CONFIG_GPIO_SYSFS_LEGACY */
}
EXPORT_SYMBOL_GPL(gpiod_export_link);
@@ -684,8 +927,9 @@ EXPORT_SYMBOL_GPL(gpiod_export_link);
*/
void gpiod_unexport(struct gpio_desc *desc)
{
- struct gpiod_data *data;
- struct device *dev;
+ struct gpiod_data *tmp, *desc_data = NULL;
+ struct gpiodev_data *gdev_data;
+ struct gpio_device *gdev;
if (!desc) {
pr_warn("%s: invalid GPIO\n", __func__);
@@ -693,35 +937,53 @@ void gpiod_unexport(struct gpio_desc *desc)
}
scoped_guard(mutex, &sysfs_lock) {
- if (!test_bit(FLAG_EXPORT, &desc->flags))
+ if (!test_bit(GPIOD_FLAG_EXPORT, &desc->flags))
+ return;
+
+ gdev = gpiod_to_gpio_device(desc);
+ gdev_data = gdev_get_data(gdev);
+ if (!gdev_data)
return;
- dev = class_find_device(&gpio_class, NULL, desc, match_export);
- if (!dev)
+ list_for_each_entry(tmp, &gdev_data->exported_lines, list) {
+ if (gpiod_is_equal(desc, tmp->desc)) {
+ desc_data = tmp;
+ break;
+ }
+ }
+
+ if (!desc_data)
return;
- data = dev_get_drvdata(dev);
- clear_bit(FLAG_EXPORT, &desc->flags);
- device_unregister(dev);
+ list_del(&desc_data->list);
+ clear_bit(GPIOD_FLAG_EXPORT, &desc->flags);
+#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY)
+ sysfs_put(desc_data->value_kn);
+ device_unregister(desc_data->dev);
/*
* Release irq after deregistration to prevent race with
* edge_store.
*/
- if (data->irq_flags)
- gpio_sysfs_free_irq(dev);
+ if (desc_data->irq_flags)
+ gpio_sysfs_free_irq(desc_data);
+#endif /* CONFIG_GPIO_SYSFS_LEGACY */
+
+ sysfs_remove_groups(desc_data->parent,
+ desc_data->chip_attr_groups);
}
- put_device(dev);
- kfree(data);
+ mutex_destroy(&desc_data->mutex);
+ kfree(desc_data);
}
EXPORT_SYMBOL_GPL(gpiod_unexport);
int gpiochip_sysfs_register(struct gpio_device *gdev)
{
+ struct gpiodev_data *data;
struct gpio_chip *chip;
struct device *parent;
- struct device *dev;
+ int err;
/*
* Many systems add gpio chips for SOC support very early,
@@ -747,32 +1009,61 @@ int gpiochip_sysfs_register(struct gpio_device *gdev)
else
parent = &gdev->dev;
- /* use chip->base for the ID; it's already known to be unique */
- dev = device_create_with_groups(&gpio_class, parent, MKDEV(0, 0), gdev,
- gpiochip_groups, GPIOCHIP_NAME "%d",
- chip->base);
- if (IS_ERR(dev))
- return PTR_ERR(dev);
+ data = kmalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->gdev = gdev;
+ INIT_LIST_HEAD(&data->exported_lines);
guard(mutex)(&sysfs_lock);
- gdev->mockdev = dev;
+
+#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY)
+ /* use chip->base for the ID; it's already known to be unique */
+ data->cdev_base = device_create_with_groups(&gpio_class, parent,
+ MKDEV(0, 0), data,
+ gpiochip_groups,
+ GPIOCHIP_NAME "%d",
+ chip->base);
+ if (IS_ERR(data->cdev_base)) {
+ err = PTR_ERR(data->cdev_base);
+ kfree(data);
+ return err;
+ }
+#endif /* CONFIG_GPIO_SYSFS_LEGACY */
+
+ data->cdev_id = device_create_with_groups(&gpio_class, parent,
+ MKDEV(0, 0), data,
+ gpiochip_ext_groups,
+ "chip%d", gdev->id);
+ if (IS_ERR(data->cdev_id)) {
+#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY)
+ device_unregister(data->cdev_base);
+#endif /* CONFIG_GPIO_SYSFS_LEGACY */
+ err = PTR_ERR(data->cdev_id);
+ kfree(data);
+ return err;
+ }
return 0;
}
void gpiochip_sysfs_unregister(struct gpio_device *gdev)
{
+ struct gpiodev_data *data;
struct gpio_desc *desc;
struct gpio_chip *chip;
scoped_guard(mutex, &sysfs_lock) {
- if (!gdev->mockdev)
+ data = gdev_get_data(gdev);
+ if (!data)
return;
- device_unregister(gdev->mockdev);
-
- /* prevent further gpiod exports */
- gdev->mockdev = NULL;
+#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY)
+ device_unregister(data->cdev_base);
+#endif /* CONFIG_GPIO_SYSFS_LEGACY */
+ device_unregister(data->cdev_id);
+ kfree(data);
}
guard(srcu)(&gdev->srcu);
@@ -782,7 +1073,7 @@ void gpiochip_sysfs_unregister(struct gpio_device *gdev)
return;
/* unregister gpiod class devices owned by sysfs */
- for_each_gpio_desc_with_flag(chip, desc, FLAG_SYSFS) {
+ for_each_gpio_desc_with_flag(chip, desc, GPIOD_FLAG_SYSFS) {
gpiod_unexport(desc);
gpiod_free(desc);
}
@@ -798,12 +1089,9 @@ static int gpiofind_sysfs_register(struct gpio_chip *gc, const void *data)
struct gpio_device *gdev = gc->gpiodev;
int ret;
- if (gdev->mockdev)
- return 0;
-
ret = gpiochip_sysfs_register(gdev);
if (ret)
- chip_err(gc, "failed to register the sysfs entry: %d\n", ret);
+ gpiochip_err(gc, "failed to register the sysfs entry: %d\n", ret);
return 0;
}
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index fdafa0df1b43..91e0c384f34a 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -37,6 +37,7 @@
#include "gpiolib-acpi.h"
#include "gpiolib-cdev.h"
#include "gpiolib-of.h"
+#include "gpiolib-shared.h"
#include "gpiolib-swnode.h"
#include "gpiolib-sysfs.h"
#include "gpiolib.h"
@@ -75,6 +76,19 @@ static const struct bus_type gpio_bus_type = {
};
/*
+ * At the end we want all GPIOs to be dynamically allocated from 0.
+ * However, some legacy drivers still perform fixed allocation.
+ * Until they are all fixed, leave 0-512 space for them.
+ */
+#define GPIO_DYNAMIC_BASE 512
+/*
+ * Define the maximum of the possible GPIO in the global numberspace.
+ * While the GPIO base and numbers are positive, we limit it with signed
+ * maximum as a lot of code is using negative values for special cases.
+ */
+#define GPIO_DYNAMIC_MAX INT_MAX
+
+/*
* Number of GPIOs to use for the fast path in set array
*/
#define FASTPATH_NGPIO CONFIG_GPIOLIB_FASTPATH_LIMIT
@@ -114,10 +128,10 @@ const char *gpiod_get_label(struct gpio_desc *desc)
label = srcu_dereference_check(desc->label, &desc->gdev->desc_srcu,
srcu_read_lock_held(&desc->gdev->desc_srcu));
- if (test_bit(FLAG_USED_AS_IRQ, &flags))
+ if (test_bit(GPIOD_FLAG_USED_AS_IRQ, &flags))
return label ? label->str : "interrupt";
- if (!test_bit(FLAG_REQUESTED, &flags))
+ if (!test_bit(GPIOD_FLAG_REQUESTED, &flags))
return NULL;
return label ? label->str : NULL;
@@ -222,6 +236,19 @@ int desc_to_gpio(const struct gpio_desc *desc)
}
EXPORT_SYMBOL_GPL(desc_to_gpio);
+/**
+ * gpiod_hwgpio - Return the GPIO number of the passed descriptor relative to
+ * its chip.
+ * @desc: GPIO descriptor
+ *
+ * Returns:
+ * Hardware offset of the GPIO represented by the descriptor.
+ */
+int gpiod_hwgpio(const struct gpio_desc *desc)
+{
+ return desc - &desc->gdev->descs[0];
+}
+EXPORT_SYMBOL_GPL(gpiod_hwgpio);
/**
* gpiod_to_chip - Return the GPIO chip to which a GPIO descriptor belongs
@@ -266,20 +293,6 @@ struct gpio_device *gpiod_to_gpio_device(struct gpio_desc *desc)
EXPORT_SYMBOL_GPL(gpiod_to_gpio_device);
/**
- * gpiod_is_equal() - Check if two GPIO descriptors refer to the same pin.
- * @desc: Descriptor to compare.
- * @other: The second descriptor to compare against.
- *
- * Returns:
- * True if the descriptors refer to the same physical pin. False otherwise.
- */
-bool gpiod_is_equal(struct gpio_desc *desc, struct gpio_desc *other)
-{
- return desc == other;
-}
-EXPORT_SYMBOL_GPL(gpiod_is_equal);
-
-/**
* gpio_device_get_base() - Get the base GPIO number allocated by this device
* @gdev: GPIO device
*
@@ -387,6 +400,21 @@ static int validate_desc(const struct gpio_desc *desc, const char *func)
return; \
} while (0)
+/**
+ * gpiod_is_equal() - Check if two GPIO descriptors refer to the same pin.
+ * @desc: Descriptor to compare.
+ * @other: The second descriptor to compare against.
+ *
+ * Returns:
+ * True if the descriptors refer to the same physical pin. False otherwise.
+ */
+bool gpiod_is_equal(const struct gpio_desc *desc, const struct gpio_desc *other)
+{
+ return validate_desc(desc, __func__) > 0 &&
+ !IS_ERR_OR_NULL(other) && desc == other;
+}
+EXPORT_SYMBOL_GPL(gpiod_is_equal);
+
static int gpiochip_get_direction(struct gpio_chip *gc, unsigned int offset)
{
int ret;
@@ -429,15 +457,15 @@ int gpiod_get_direction(struct gpio_desc *desc)
if (!guard.gc)
return -ENODEV;
- offset = gpio_chip_hwgpio(desc);
+ offset = gpiod_hwgpio(desc);
flags = READ_ONCE(desc->flags);
/*
* Open drain emulation using input mode may incorrectly report
* input here, fix that up.
*/
- if (test_bit(FLAG_OPEN_DRAIN, &flags) &&
- test_bit(FLAG_IS_OUT, &flags))
+ if (test_bit(GPIOD_FLAG_OPEN_DRAIN, &flags) &&
+ test_bit(GPIOD_FLAG_IS_OUT, &flags))
return 0;
if (!guard.gc->get_direction)
@@ -454,7 +482,7 @@ int gpiod_get_direction(struct gpio_desc *desc)
if (ret > 0)
ret = 1;
- assign_bit(FLAG_IS_OUT, &flags, !ret);
+ assign_bit(GPIOD_FLAG_IS_OUT, &flags, !ret);
WRITE_ONCE(desc->flags, flags);
return ret;
@@ -832,7 +860,7 @@ static void gpiochip_free_remaining_irqs(struct gpio_chip *gc)
{
struct gpio_desc *desc;
- for_each_gpio_desc_with_flag(gc, desc, FLAG_USED_AS_IRQ)
+ for_each_gpio_desc_with_flag(gc, desc, GPIOD_FLAG_USED_AS_IRQ)
gpiod_free_irqs(desc);
}
@@ -907,8 +935,8 @@ static void gpiochip_machine_hog(struct gpio_chip *gc, struct gpiod_hog *hog)
desc = gpiochip_get_desc(gc, hog->chip_hwnum);
if (IS_ERR(desc)) {
- chip_err(gc, "%s: unable to get GPIO desc: %ld\n", __func__,
- PTR_ERR(desc));
+ gpiochip_err(gc, "%s: unable to get GPIO desc: %ld\n",
+ __func__, PTR_ERR(desc));
return;
}
@@ -1023,11 +1051,6 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
int base = 0;
int ret;
- /* Only allow one set() and one set_multiple(). */
- if ((gc->set && gc->set_rv) ||
- (gc->set_multiple && gc->set_multiple_rv))
- return -EINVAL;
-
/*
* First: allocate and populate the internal stat container, and
* set up the struct device.
@@ -1115,7 +1138,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
ret = gpiodev_add_to_list_unlocked(gdev);
if (ret) {
- chip_err(gc, "GPIO integer space overlap, cannot add chip\n");
+ gpiochip_err(gc, "GPIO integer space overlap, cannot add chip\n");
goto err_free_label;
}
}
@@ -1160,10 +1183,10 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
* lock here.
*/
if (gc->get_direction && gpiochip_line_is_valid(gc, desc_index))
- assign_bit(FLAG_IS_OUT, &desc->flags,
+ assign_bit(GPIOD_FLAG_IS_OUT, &desc->flags,
!gc->get_direction(gc, desc_index));
else
- assign_bit(FLAG_IS_OUT,
+ assign_bit(GPIOD_FLAG_IS_OUT,
&desc->flags, !gc->direction_input);
}
@@ -1191,6 +1214,10 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
if (ret)
goto err_remove_irqchip_mask;
+ ret = gpio_device_setup_shared(gdev);
+ if (ret)
+ goto err_remove_irqchip;
+
/*
* By first adding the chardev, and then adding the device,
* we get a device node entry in sysfs under
@@ -1202,10 +1229,13 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
if (gpiolib_initialized) {
ret = gpiochip_setup_dev(gdev);
if (ret)
- goto err_remove_irqchip;
+ goto err_teardown_shared;
}
+
return 0;
+err_teardown_shared:
+ gpio_device_teardown_shared(gdev);
err_remove_irqchip:
gpiochip_irqchip_remove(gc);
err_remove_irqchip_mask:
@@ -1274,6 +1304,7 @@ void gpiochip_remove(struct gpio_chip *gc)
/* Numb the device, cancelling all outstanding operations */
rcu_assign_pointer(gdev->chip, NULL);
synchronize_srcu(&gdev->srcu);
+ gpio_device_teardown_shared(gdev);
gpiochip_irqchip_remove(gc);
acpi_gpiochip_remove(gc);
of_gpiochip_remove(gc);
@@ -1519,8 +1550,7 @@ static void gpiochip_set_hierarchical_irqchip(struct gpio_chip *gc,
&parent_hwirq,
&parent_type);
if (ret) {
- chip_err(gc, "skip set-up on hwirq %d\n",
- i);
+ gpiochip_err(gc, "skip set-up on hwirq %d\n", i);
continue;
}
@@ -1533,15 +1563,14 @@ static void gpiochip_set_hierarchical_irqchip(struct gpio_chip *gc,
ret = irq_domain_alloc_irqs(gc->irq.domain, 1,
NUMA_NO_NODE, &fwspec);
if (ret < 0) {
- chip_err(gc,
- "can not allocate irq for GPIO line %d parent hwirq %d in hierarchy domain: %d\n",
- i, parent_hwirq,
- ret);
+ gpiochip_err(gc,
+ "can not allocate irq for GPIO line %d parent hwirq %d in hierarchy domain: %d\n",
+ i, parent_hwirq, ret);
}
}
}
- chip_err(gc, "%s unknown fwnode type proceed anyway\n", __func__);
+ gpiochip_err(gc, "%s unknown fwnode type proceed anyway\n", __func__);
return;
}
@@ -1593,15 +1622,15 @@ static int gpiochip_hierarchy_irq_domain_alloc(struct irq_domain *d,
if (ret)
return ret;
- chip_dbg(gc, "allocate IRQ %d, hwirq %lu\n", irq, hwirq);
+ gpiochip_dbg(gc, "allocate IRQ %d, hwirq %lu\n", irq, hwirq);
ret = girq->child_to_parent_hwirq(gc, hwirq, type,
&parent_hwirq, &parent_type);
if (ret) {
- chip_err(gc, "can't look up hwirq %lu\n", hwirq);
+ gpiochip_err(gc, "can't look up hwirq %lu\n", hwirq);
return ret;
}
- chip_dbg(gc, "found parent hwirq %u\n", parent_hwirq);
+ gpiochip_dbg(gc, "found parent hwirq %u\n", parent_hwirq);
/*
* We set handle_bad_irq because the .set_type() should
@@ -1622,8 +1651,8 @@ static int gpiochip_hierarchy_irq_domain_alloc(struct irq_domain *d,
if (ret)
return ret;
- chip_dbg(gc, "alloc_irqs_parent for %d parent hwirq %d\n",
- irq, parent_hwirq);
+ gpiochip_dbg(gc, "alloc_irqs_parent for %d parent hwirq %d\n",
+ irq, parent_hwirq);
irq_set_lockdep_class(irq, gc->irq.lock_key, gc->irq.request_key);
ret = irq_domain_alloc_irqs_parent(d, irq, 1, &gpio_parent_fwspec);
/*
@@ -1633,9 +1662,9 @@ static int gpiochip_hierarchy_irq_domain_alloc(struct irq_domain *d,
if (irq_domain_is_msi(d->parent) && (ret == -EEXIST))
ret = 0;
if (ret)
- chip_err(gc,
- "failed to allocate parent hwirq %d for hwirq %lu\n",
- parent_hwirq, hwirq);
+ gpiochip_err(gc,
+ "failed to allocate parent hwirq %d for hwirq %lu\n",
+ parent_hwirq, hwirq);
return ret;
}
@@ -1711,7 +1740,7 @@ static struct irq_domain *gpiochip_hierarchy_create_domain(struct gpio_chip *gc)
if (!gc->irq.child_to_parent_hwirq ||
!gc->irq.fwnode) {
- chip_err(gc, "missing irqdomain vital data\n");
+ gpiochip_err(gc, "missing irqdomain vital data\n");
return ERR_PTR(-EINVAL);
}
@@ -1984,7 +2013,7 @@ static void gpiochip_set_irq_hooks(struct gpio_chip *gc)
if (irqchip->flags & IRQCHIP_IMMUTABLE)
return;
- chip_warn(gc, "not an immutable chip, please consider fixing it!\n");
+ gpiochip_warn(gc, "not an immutable chip, please consider fixing it!\n");
if (!irqchip->irq_request_resources &&
!irqchip->irq_release_resources) {
@@ -2000,8 +2029,8 @@ static void gpiochip_set_irq_hooks(struct gpio_chip *gc)
* ...and if so, give a gentle warning that this is bad
* practice.
*/
- chip_info(gc,
- "detected irqchip that is shared with multiple gpiochips: please fix the driver.\n");
+ gpiochip_info(gc,
+ "detected irqchip that is shared with multiple gpiochips: please fix the driver.\n");
return;
}
@@ -2030,7 +2059,8 @@ static int gpiochip_irqchip_add_allocated_domain(struct gpio_chip *gc,
return -EINVAL;
if (gc->to_irq)
- chip_warn(gc, "to_irq is redefined in %s and you shouldn't rely on it\n", __func__);
+ gpiochip_warn(gc, "to_irq is redefined in %s and you shouldn't rely on it\n",
+ __func__);
gc->to_irq = gpiochip_to_irq;
gc->irq.domain = domain;
@@ -2071,7 +2101,7 @@ static int gpiochip_add_irqchip(struct gpio_chip *gc,
return 0;
if (gc->irq.parent_handler && gc->can_sleep) {
- chip_err(gc, "you cannot have chained interrupts on a chip that may sleep\n");
+ gpiochip_err(gc, "you cannot have chained interrupts on a chip that may sleep\n");
return -EINVAL;
}
@@ -2307,10 +2337,8 @@ int gpiochip_add_pingroup_range(struct gpio_chip *gc,
int ret;
pin_range = kzalloc(sizeof(*pin_range), GFP_KERNEL);
- if (!pin_range) {
- chip_err(gc, "failed to allocate pin ranges\n");
+ if (!pin_range)
return -ENOMEM;
- }
/* Use local offset as range ID */
pin_range->range.id = gpio_offset;
@@ -2329,7 +2357,7 @@ int gpiochip_add_pingroup_range(struct gpio_chip *gc,
pinctrl_add_gpio_range(pctldev, &pin_range->range);
- chip_dbg(gc, "created GPIO range %d->%d ==> %s PINGRP %s\n",
+ gpiochip_dbg(gc, "created GPIO range %d->%d ==> %s PINGRP %s\n",
gpio_offset, gpio_offset + pin_range->range.npins - 1,
pinctrl_dev_get_devname(pctldev), pin_group);
@@ -2340,11 +2368,13 @@ int gpiochip_add_pingroup_range(struct gpio_chip *gc,
EXPORT_SYMBOL_GPL(gpiochip_add_pingroup_range);
/**
- * gpiochip_add_pin_range() - add a range for GPIO <-> pin mapping
+ * gpiochip_add_pin_range_with_pins() - add a range for GPIO <-> pin mapping
* @gc: the gpiochip to add the range for
* @pinctl_name: the dev_name() of the pin controller to map to
* @gpio_offset: the start offset in the current gpio_chip number space
* @pin_offset: the start offset in the pin controller number space
+ * @pins: the list of non consecutive pins to accumulate in this range (if not
+ * NULL, pin_offset is ignored by pinctrl core)
* @npins: the number of pins from the offset of each pin space (GPIO and
* pin controller) to accumulate in this range
*
@@ -2356,19 +2386,20 @@ EXPORT_SYMBOL_GPL(gpiochip_add_pingroup_range);
* Returns:
* 0 on success, or a negative errno on failure.
*/
-int gpiochip_add_pin_range(struct gpio_chip *gc, const char *pinctl_name,
- unsigned int gpio_offset, unsigned int pin_offset,
- unsigned int npins)
+int gpiochip_add_pin_range_with_pins(struct gpio_chip *gc,
+ const char *pinctl_name,
+ unsigned int gpio_offset,
+ unsigned int pin_offset,
+ unsigned int const *pins,
+ unsigned int npins)
{
struct gpio_pin_range *pin_range;
struct gpio_device *gdev = gc->gpiodev;
int ret;
pin_range = kzalloc(sizeof(*pin_range), GFP_KERNEL);
- if (!pin_range) {
- chip_err(gc, "failed to allocate pin ranges\n");
+ if (!pin_range)
return -ENOMEM;
- }
/* Use local offset as range ID */
pin_range->range.id = gpio_offset;
@@ -2376,25 +2407,30 @@ int gpiochip_add_pin_range(struct gpio_chip *gc, const char *pinctl_name,
pin_range->range.name = gc->label;
pin_range->range.base = gdev->base + gpio_offset;
pin_range->range.pin_base = pin_offset;
+ pin_range->range.pins = pins;
pin_range->range.npins = npins;
pin_range->pctldev = pinctrl_find_and_add_gpio_range(pinctl_name,
&pin_range->range);
if (IS_ERR(pin_range->pctldev)) {
ret = PTR_ERR(pin_range->pctldev);
- chip_err(gc, "could not create pin range\n");
+ gpiochip_err(gc, "could not create pin range\n");
kfree(pin_range);
return ret;
}
- chip_dbg(gc, "created GPIO range %d->%d ==> %s PIN %d->%d\n",
- gpio_offset, gpio_offset + npins - 1,
- pinctl_name,
- pin_offset, pin_offset + npins - 1);
+ if (pin_range->range.pins)
+ gpiochip_dbg(gc, "created GPIO range %d->%d ==> %s %d sparse PIN range { %d, ... }",
+ gpio_offset, gpio_offset + npins - 1,
+ pinctl_name, npins, pins[0]);
+ else
+ gpiochip_dbg(gc, "created GPIO range %d->%d ==> %s PIN %d->%d\n",
+ gpio_offset, gpio_offset + npins - 1, pinctl_name,
+ pin_offset, pin_offset + npins - 1);
list_add_tail(&pin_range->node, &gdev->pin_ranges);
return 0;
}
-EXPORT_SYMBOL_GPL(gpiochip_add_pin_range);
+EXPORT_SYMBOL_GPL(gpiochip_add_pin_range_with_pins);
/**
* gpiochip_remove_pin_ranges() - remove all the GPIO <-> pin mappings
@@ -2429,10 +2465,10 @@ static int gpiod_request_commit(struct gpio_desc *desc, const char *label)
if (!guard.gc)
return -ENODEV;
- if (test_and_set_bit(FLAG_REQUESTED, &desc->flags))
+ if (test_and_set_bit(GPIOD_FLAG_REQUESTED, &desc->flags))
return -EBUSY;
- offset = gpio_chip_hwgpio(desc);
+ offset = gpiod_hwgpio(desc);
if (!gpiochip_line_is_valid(guard.gc, offset))
return -EINVAL;
@@ -2458,7 +2494,7 @@ static int gpiod_request_commit(struct gpio_desc *desc, const char *label)
return 0;
out_clear_bit:
- clear_bit(FLAG_REQUESTED, &desc->flags);
+ clear_bit(GPIOD_FLAG_REQUESTED, &desc->flags);
return ret;
}
@@ -2492,20 +2528,20 @@ static void gpiod_free_commit(struct gpio_desc *desc)
flags = READ_ONCE(desc->flags);
- if (guard.gc && test_bit(FLAG_REQUESTED, &flags)) {
+ if (guard.gc && test_bit(GPIOD_FLAG_REQUESTED, &flags)) {
if (guard.gc->free)
- guard.gc->free(guard.gc, gpio_chip_hwgpio(desc));
-
- clear_bit(FLAG_ACTIVE_LOW, &flags);
- clear_bit(FLAG_REQUESTED, &flags);
- clear_bit(FLAG_OPEN_DRAIN, &flags);
- clear_bit(FLAG_OPEN_SOURCE, &flags);
- clear_bit(FLAG_PULL_UP, &flags);
- clear_bit(FLAG_PULL_DOWN, &flags);
- clear_bit(FLAG_BIAS_DISABLE, &flags);
- clear_bit(FLAG_EDGE_RISING, &flags);
- clear_bit(FLAG_EDGE_FALLING, &flags);
- clear_bit(FLAG_IS_HOGGED, &flags);
+ guard.gc->free(guard.gc, gpiod_hwgpio(desc));
+
+ clear_bit(GPIOD_FLAG_ACTIVE_LOW, &flags);
+ clear_bit(GPIOD_FLAG_REQUESTED, &flags);
+ clear_bit(GPIOD_FLAG_OPEN_DRAIN, &flags);
+ clear_bit(GPIOD_FLAG_OPEN_SOURCE, &flags);
+ clear_bit(GPIOD_FLAG_PULL_UP, &flags);
+ clear_bit(GPIOD_FLAG_PULL_DOWN, &flags);
+ clear_bit(GPIOD_FLAG_BIAS_DISABLE, &flags);
+ clear_bit(GPIOD_FLAG_EDGE_RISING, &flags);
+ clear_bit(GPIOD_FLAG_EDGE_FALLING, &flags);
+ clear_bit(GPIOD_FLAG_IS_HOGGED, &flags);
#ifdef CONFIG_OF_DYNAMIC
WRITE_ONCE(desc->hog, NULL);
#endif
@@ -2548,7 +2584,7 @@ char *gpiochip_dup_line_label(struct gpio_chip *gc, unsigned int offset)
if (IS_ERR(desc))
return NULL;
- if (!test_bit(FLAG_REQUESTED, &desc->flags))
+ if (!test_bit(GPIOD_FLAG_REQUESTED, &desc->flags))
return NULL;
guard(srcu)(&desc->gdev->desc_srcu);
@@ -2598,7 +2634,7 @@ struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *gc,
int ret;
if (IS_ERR(desc)) {
- chip_err(gc, "failed to get GPIO %s descriptor\n", name);
+ gpiochip_err(gc, "failed to get GPIO %s descriptor\n", name);
return desc;
}
@@ -2609,7 +2645,7 @@ struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *gc,
ret = gpiod_configure_flags(desc, label, lflags, dflags);
if (ret) {
gpiod_free_commit(desc);
- chip_err(gc, "setup of own GPIO %s failed\n", name);
+ gpiochip_err(gc, "setup of own GPIO %s failed\n", name);
return ERR_PTR(ret);
}
@@ -2654,7 +2690,7 @@ int gpio_do_set_config(struct gpio_desc *desc, unsigned long config)
if (!guard.gc->set_config)
return -ENOTSUPP;
- ret = guard.gc->set_config(guard.gc, gpio_chip_hwgpio(desc), config);
+ ret = guard.gc->set_config(guard.gc, gpiod_hwgpio(desc), config);
if (ret > 0)
ret = -EBADE;
@@ -2685,7 +2721,7 @@ static int gpio_set_config_with_argument_optional(struct gpio_desc *desc,
u32 argument)
{
struct device *dev = &desc->gdev->dev;
- int gpio = gpio_chip_hwgpio(desc);
+ int gpio = gpiod_hwgpio(desc);
int ret;
ret = gpio_set_config_with_argument(desc, mode, argument);
@@ -2716,11 +2752,11 @@ static int gpio_set_bias(struct gpio_desc *desc)
flags = READ_ONCE(desc->flags);
- if (test_bit(FLAG_BIAS_DISABLE, &flags))
+ if (test_bit(GPIOD_FLAG_BIAS_DISABLE, &flags))
bias = PIN_CONFIG_BIAS_DISABLE;
- else if (test_bit(FLAG_PULL_UP, &flags))
+ else if (test_bit(GPIOD_FLAG_PULL_UP, &flags))
bias = PIN_CONFIG_BIAS_PULL_UP;
- else if (test_bit(FLAG_PULL_DOWN, &flags))
+ else if (test_bit(GPIOD_FLAG_PULL_DOWN, &flags))
bias = PIN_CONFIG_BIAS_PULL_DOWN;
else
return 0;
@@ -2848,9 +2884,9 @@ int gpiod_direction_input_nonotify(struct gpio_desc *desc)
*/
if (guard.gc->direction_input) {
ret = gpiochip_direction_input(guard.gc,
- gpio_chip_hwgpio(desc));
+ gpiod_hwgpio(desc));
} else if (guard.gc->get_direction) {
- dir = gpiochip_get_direction(guard.gc, gpio_chip_hwgpio(desc));
+ dir = gpiochip_get_direction(guard.gc, gpiod_hwgpio(desc));
if (dir < 0)
return dir;
@@ -2862,7 +2898,7 @@ int gpiod_direction_input_nonotify(struct gpio_desc *desc)
}
}
if (ret == 0) {
- clear_bit(FLAG_IS_OUT, &desc->flags);
+ clear_bit(GPIOD_FLAG_IS_OUT, &desc->flags);
ret = gpio_set_bias(desc);
}
@@ -2877,19 +2913,14 @@ static int gpiochip_set(struct gpio_chip *gc, unsigned int offset, int value)
lockdep_assert_held(&gc->gpiodev->srcu);
- if (WARN_ON(unlikely(!gc->set && !gc->set_rv)))
+ if (WARN_ON(unlikely(!gc->set)))
return -EOPNOTSUPP;
- if (gc->set_rv) {
- ret = gc->set_rv(gc, offset, value);
- if (ret > 0)
- ret = -EBADE;
-
- return ret;
- }
+ ret = gc->set(gc, offset, value);
+ if (ret > 0)
+ ret = -EBADE;
- gc->set(gc, offset, value);
- return 0;
+ return ret;
}
static int gpiod_direction_output_raw_commit(struct gpio_desc *desc, int value)
@@ -2905,7 +2936,7 @@ static int gpiod_direction_output_raw_commit(struct gpio_desc *desc, int value)
* output-only, but if there is then not even a .set() operation it
* is pretty tricky to drive the output line.
*/
- if (!guard.gc->set && !guard.gc->set_rv && !guard.gc->direction_output) {
+ if (!guard.gc->set && !guard.gc->direction_output) {
gpiod_warn(desc,
"%s: missing set() and direction_output() operations\n",
__func__);
@@ -2914,12 +2945,12 @@ static int gpiod_direction_output_raw_commit(struct gpio_desc *desc, int value)
if (guard.gc->direction_output) {
ret = gpiochip_direction_output(guard.gc,
- gpio_chip_hwgpio(desc), val);
+ gpiod_hwgpio(desc), val);
} else {
/* Check that we are in output mode if we can */
if (guard.gc->get_direction) {
dir = gpiochip_get_direction(guard.gc,
- gpio_chip_hwgpio(desc));
+ gpiod_hwgpio(desc));
if (dir < 0)
return dir;
@@ -2934,13 +2965,13 @@ static int gpiod_direction_output_raw_commit(struct gpio_desc *desc, int value)
* If we can't actively set the direction, we are some
* output-only chip, so just drive the output as desired.
*/
- ret = gpiochip_set(guard.gc, gpio_chip_hwgpio(desc), val);
+ ret = gpiochip_set(guard.gc, gpiod_hwgpio(desc), val);
if (ret)
return ret;
}
if (!ret)
- set_bit(FLAG_IS_OUT, &desc->flags);
+ set_bit(GPIOD_FLAG_IS_OUT, &desc->flags);
trace_gpio_value(desc_to_gpio(desc), 0, val);
trace_gpio_direction(desc_to_gpio(desc), 0, ret);
return ret;
@@ -3006,21 +3037,21 @@ int gpiod_direction_output_nonotify(struct gpio_desc *desc, int value)
flags = READ_ONCE(desc->flags);
- if (test_bit(FLAG_ACTIVE_LOW, &flags))
+ if (test_bit(GPIOD_FLAG_ACTIVE_LOW, &flags))
value = !value;
else
value = !!value;
/* GPIOs used for enabled IRQs shall not be set as output */
- if (test_bit(FLAG_USED_AS_IRQ, &flags) &&
- test_bit(FLAG_IRQ_IS_ENABLED, &flags)) {
+ if (test_bit(GPIOD_FLAG_USED_AS_IRQ, &flags) &&
+ test_bit(GPIOD_FLAG_IRQ_IS_ENABLED, &flags)) {
gpiod_err(desc,
"%s: tried to set a GPIO tied to an IRQ as output\n",
__func__);
return -EIO;
}
- if (test_bit(FLAG_OPEN_DRAIN, &flags)) {
+ if (test_bit(GPIOD_FLAG_OPEN_DRAIN, &flags)) {
/* First see if we can enable open drain in hardware */
ret = gpio_set_config(desc, PIN_CONFIG_DRIVE_OPEN_DRAIN);
if (!ret)
@@ -3028,7 +3059,7 @@ int gpiod_direction_output_nonotify(struct gpio_desc *desc, int value)
/* Emulate open drain by not actively driving the line high */
if (value)
goto set_output_flag;
- } else if (test_bit(FLAG_OPEN_SOURCE, &flags)) {
+ } else if (test_bit(GPIOD_FLAG_OPEN_SOURCE, &flags)) {
ret = gpio_set_config(desc, PIN_CONFIG_DRIVE_OPEN_SOURCE);
if (!ret)
goto set_output_value;
@@ -3055,7 +3086,7 @@ set_output_flag:
* set the IS_OUT flag or otherwise we won't be able to set the line
* value anymore.
*/
- set_bit(FLAG_IS_OUT, &desc->flags);
+ set_bit(GPIOD_FLAG_IS_OUT, &desc->flags);
return 0;
}
@@ -3085,7 +3116,7 @@ int gpiod_enable_hw_timestamp_ns(struct gpio_desc *desc, unsigned long flags)
}
ret = guard.gc->en_hw_timestamp(guard.gc,
- gpio_chip_hwgpio(desc), flags);
+ gpiod_hwgpio(desc), flags);
if (ret)
gpiod_warn(desc, "%s: hw ts request failed\n", __func__);
@@ -3117,7 +3148,7 @@ int gpiod_disable_hw_timestamp_ns(struct gpio_desc *desc, unsigned long flags)
return -ENOTSUPP;
}
- ret = guard.gc->dis_hw_timestamp(guard.gc, gpio_chip_hwgpio(desc),
+ ret = guard.gc->dis_hw_timestamp(guard.gc, gpiod_hwgpio(desc),
flags);
if (ret)
gpiod_warn(desc, "%s: hw ts release failed\n", __func__);
@@ -3195,10 +3226,10 @@ int gpiod_set_transitory(struct gpio_desc *desc, bool transitory)
{
VALIDATE_DESC(desc);
/*
- * Handle FLAG_TRANSITORY first, enabling queries to gpiolib for
+ * Handle GPIOD_FLAG_TRANSITORY first, enabling queries to gpiolib for
* persistence state.
*/
- assign_bit(FLAG_TRANSITORY, &desc->flags, transitory);
+ assign_bit(GPIOD_FLAG_TRANSITORY, &desc->flags, transitory);
/* If the driver supports it, set the persistence state now */
return gpio_set_config_with_argument_optional(desc,
@@ -3216,7 +3247,7 @@ int gpiod_set_transitory(struct gpio_desc *desc, bool transitory)
int gpiod_is_active_low(const struct gpio_desc *desc)
{
VALIDATE_DESC(desc);
- return test_bit(FLAG_ACTIVE_LOW, &desc->flags);
+ return test_bit(GPIOD_FLAG_ACTIVE_LOW, &desc->flags);
}
EXPORT_SYMBOL_GPL(gpiod_is_active_low);
@@ -3227,7 +3258,7 @@ EXPORT_SYMBOL_GPL(gpiod_is_active_low);
void gpiod_toggle_active_low(struct gpio_desc *desc)
{
VALIDATE_DESC_VOID(desc);
- change_bit(FLAG_ACTIVE_LOW, &desc->flags);
+ change_bit(GPIOD_FLAG_ACTIVE_LOW, &desc->flags);
gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_CONFIG);
}
EXPORT_SYMBOL_GPL(gpiod_toggle_active_low);
@@ -3248,7 +3279,7 @@ static int gpiochip_get(struct gpio_chip *gc, unsigned int offset)
static int gpio_chip_get_value(struct gpio_chip *gc, const struct gpio_desc *desc)
{
- return gc->get ? gpiochip_get(gc, gpio_chip_hwgpio(desc)) : -EIO;
+ return gc->get ? gpiochip_get(gc, gpiod_hwgpio(desc)) : -EIO;
}
/* I/O calls are only valid after configuration completed; the relevant
@@ -3297,14 +3328,15 @@ static int gpiod_get_raw_value_commit(const struct gpio_desc *desc)
static int gpio_chip_get_multiple(struct gpio_chip *gc,
unsigned long *mask, unsigned long *bits)
{
- int ret;
-
lockdep_assert_held(&gc->gpiodev->srcu);
if (gc->get_multiple) {
+ int ret;
+
ret = gc->get_multiple(gc, mask, bits);
if (ret > 0)
return -EBADE;
+ return ret;
}
if (gc->get) {
@@ -3407,7 +3439,7 @@ int gpiod_get_array_value_complex(bool raw, bool can_sleep,
first = i;
do {
const struct gpio_desc *desc = desc_array[i];
- int hwgpio = gpio_chip_hwgpio(desc);
+ int hwgpio = gpiod_hwgpio(desc);
__set_bit(hwgpio, mask);
i++;
@@ -3429,10 +3461,10 @@ int gpiod_get_array_value_complex(bool raw, bool can_sleep,
for (j = first; j < i; ) {
const struct gpio_desc *desc = desc_array[j];
- int hwgpio = gpio_chip_hwgpio(desc);
+ int hwgpio = gpiod_hwgpio(desc);
int value = test_bit(hwgpio, bits);
- if (!raw && test_bit(FLAG_ACTIVE_LOW, &desc->flags))
+ if (!raw && test_bit(GPIOD_FLAG_ACTIVE_LOW, &desc->flags))
value = !value;
__assign_bit(j, value_bitmap, value);
trace_gpio_value(desc_to_gpio(desc), 1, value);
@@ -3494,7 +3526,7 @@ int gpiod_get_value(const struct gpio_desc *desc)
if (value < 0)
return value;
- if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
+ if (test_bit(GPIOD_FLAG_ACTIVE_LOW, &desc->flags))
value = !value;
return value;
@@ -3566,7 +3598,7 @@ EXPORT_SYMBOL_GPL(gpiod_get_array_value);
*/
static int gpio_set_open_drain_value_commit(struct gpio_desc *desc, bool value)
{
- int ret = 0, offset = gpio_chip_hwgpio(desc);
+ int ret = 0, offset = gpiod_hwgpio(desc);
CLASS(gpio_chip_guard, guard)(desc);
if (!guard.gc)
@@ -3577,7 +3609,7 @@ static int gpio_set_open_drain_value_commit(struct gpio_desc *desc, bool value)
} else {
ret = gpiochip_direction_output(guard.gc, offset, 0);
if (!ret)
- set_bit(FLAG_IS_OUT, &desc->flags);
+ set_bit(GPIOD_FLAG_IS_OUT, &desc->flags);
}
trace_gpio_direction(desc_to_gpio(desc), value, ret);
if (ret < 0)
@@ -3595,7 +3627,7 @@ static int gpio_set_open_drain_value_commit(struct gpio_desc *desc, bool value)
*/
static int gpio_set_open_source_value_commit(struct gpio_desc *desc, bool value)
{
- int ret = 0, offset = gpio_chip_hwgpio(desc);
+ int ret = 0, offset = gpiod_hwgpio(desc);
CLASS(gpio_chip_guard, guard)(desc);
if (!guard.gc)
@@ -3604,7 +3636,7 @@ static int gpio_set_open_source_value_commit(struct gpio_desc *desc, bool value)
if (value) {
ret = gpiochip_direction_output(guard.gc, offset, 1);
if (!ret)
- set_bit(FLAG_IS_OUT, &desc->flags);
+ set_bit(GPIOD_FLAG_IS_OUT, &desc->flags);
} else {
ret = gpiochip_direction_input(guard.gc, offset);
}
@@ -3619,7 +3651,7 @@ static int gpio_set_open_source_value_commit(struct gpio_desc *desc, bool value)
static int gpiod_set_raw_value_commit(struct gpio_desc *desc, bool value)
{
- if (unlikely(!test_bit(FLAG_IS_OUT, &desc->flags)))
+ if (unlikely(!test_bit(GPIOD_FLAG_IS_OUT, &desc->flags)))
return -EPERM;
CLASS(gpio_chip_guard, guard)(desc);
@@ -3627,7 +3659,7 @@ static int gpiod_set_raw_value_commit(struct gpio_desc *desc, bool value)
return -ENODEV;
trace_gpio_value(desc_to_gpio(desc), 0, value);
- return gpiochip_set(guard.gc, gpio_chip_hwgpio(desc), value);
+ return gpiochip_set(guard.gc, gpiod_hwgpio(desc), value);
}
/*
@@ -3650,19 +3682,14 @@ static int gpiochip_set_multiple(struct gpio_chip *gc,
lockdep_assert_held(&gc->gpiodev->srcu);
- if (gc->set_multiple_rv) {
- ret = gc->set_multiple_rv(gc, mask, bits);
+ if (gc->set_multiple) {
+ ret = gc->set_multiple(gc, mask, bits);
if (ret > 0)
ret = -EBADE;
return ret;
}
- if (gc->set_multiple) {
- gc->set_multiple(gc, mask, bits);
- return 0;
- }
-
/* set outputs if the corresponding mask bit is set */
for_each_set_bit(i, mask, gc->ngpio) {
ret = gpiochip_set(gc, i, test_bit(i, bits));
@@ -3694,7 +3721,7 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep,
WARN_ON(array_info->gdev->can_sleep);
for (i = 0; i < array_size; i++) {
- if (unlikely(!test_bit(FLAG_IS_OUT,
+ if (unlikely(!test_bit(GPIOD_FLAG_IS_OUT,
&desc_array[i]->flags)))
return -EPERM;
}
@@ -3755,10 +3782,10 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep,
do {
struct gpio_desc *desc = desc_array[i];
- int hwgpio = gpio_chip_hwgpio(desc);
+ int hwgpio = gpiod_hwgpio(desc);
int value = test_bit(i, value_bitmap);
- if (unlikely(!test_bit(FLAG_IS_OUT, &desc->flags)))
+ if (unlikely(!test_bit(GPIOD_FLAG_IS_OUT, &desc->flags)))
return -EPERM;
/*
@@ -3768,16 +3795,16 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep,
*/
if (!raw && !(array_info &&
test_bit(i, array_info->invert_mask)) &&
- test_bit(FLAG_ACTIVE_LOW, &desc->flags))
+ test_bit(GPIOD_FLAG_ACTIVE_LOW, &desc->flags))
value = !value;
trace_gpio_value(desc_to_gpio(desc), 0, value);
/*
* collect all normal outputs belonging to the same chip
* open drain and open source outputs are set individually
*/
- if (test_bit(FLAG_OPEN_DRAIN, &desc->flags) && !raw) {
+ if (test_bit(GPIOD_FLAG_OPEN_DRAIN, &desc->flags) && !raw) {
gpio_set_open_drain_value_commit(desc, value);
- } else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags) && !raw) {
+ } else if (test_bit(GPIOD_FLAG_OPEN_SOURCE, &desc->flags) && !raw) {
gpio_set_open_source_value_commit(desc, value);
} else {
__set_bit(hwgpio, mask);
@@ -3843,12 +3870,12 @@ EXPORT_SYMBOL_GPL(gpiod_set_raw_value);
*/
static int gpiod_set_value_nocheck(struct gpio_desc *desc, int value)
{
- if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
+ if (test_bit(GPIOD_FLAG_ACTIVE_LOW, &desc->flags))
value = !value;
- if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
+ if (test_bit(GPIOD_FLAG_OPEN_DRAIN, &desc->flags))
return gpio_set_open_drain_value_commit(desc, value);
- else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags))
+ else if (test_bit(GPIOD_FLAG_OPEN_SOURCE, &desc->flags))
return gpio_set_open_source_value_commit(desc, value);
return gpiod_set_raw_value_commit(desc, value);
@@ -3971,6 +3998,26 @@ int gpiod_set_consumer_name(struct gpio_desc *desc, const char *name)
EXPORT_SYMBOL_GPL(gpiod_set_consumer_name);
/**
+ * gpiod_is_shared() - check if this GPIO can be shared by multiple consumers
+ * @desc: GPIO to inspect
+ *
+ * Returns:
+ * True if this GPIO can be shared by multiple consumers at once. False if it's
+ * a regular, exclusive GPIO.
+ *
+ * Note:
+ * This function returning true does not mean that this GPIO is currently being
+ * shared. It means the GPIO core has registered the fact that the firmware
+ * configuration indicates that it can be shared by multiple consumers and is
+ * in charge of arbitrating the access.
+ */
+bool gpiod_is_shared(const struct gpio_desc *desc)
+{
+ return test_bit(GPIOD_FLAG_SHARED_PROXY, &desc->flags);
+}
+EXPORT_SYMBOL_GPL(gpiod_is_shared);
+
+/**
* gpiod_to_irq() - return the IRQ corresponding to a GPIO
* @desc: gpio whose IRQ will be returned (already requested)
*
@@ -3995,7 +4042,7 @@ int gpiod_to_irq(const struct gpio_desc *desc)
if (!gc)
return -ENODEV;
- offset = gpio_chip_hwgpio(desc);
+ offset = gpiod_hwgpio(desc);
if (gc->to_irq) {
ret = gc->to_irq(gc, offset);
if (ret)
@@ -4045,23 +4092,23 @@ int gpiochip_lock_as_irq(struct gpio_chip *gc, unsigned int offset)
int dir = gpiod_get_direction(desc);
if (dir < 0) {
- chip_err(gc, "%s: cannot get GPIO direction\n",
- __func__);
+ gpiochip_err(gc, "%s: cannot get GPIO direction\n",
+ __func__);
return dir;
}
}
/* To be valid for IRQ the line needs to be input or open drain */
- if (test_bit(FLAG_IS_OUT, &desc->flags) &&
- !test_bit(FLAG_OPEN_DRAIN, &desc->flags)) {
- chip_err(gc,
- "%s: tried to flag a GPIO set as output for IRQ\n",
- __func__);
+ if (test_bit(GPIOD_FLAG_IS_OUT, &desc->flags) &&
+ !test_bit(GPIOD_FLAG_OPEN_DRAIN, &desc->flags)) {
+ gpiochip_err(gc,
+ "%s: tried to flag a GPIO set as output for IRQ\n",
+ __func__);
return -EIO;
}
- set_bit(FLAG_USED_AS_IRQ, &desc->flags);
- set_bit(FLAG_IRQ_IS_ENABLED, &desc->flags);
+ set_bit(GPIOD_FLAG_USED_AS_IRQ, &desc->flags);
+ set_bit(GPIOD_FLAG_IRQ_IS_ENABLED, &desc->flags);
return 0;
}
@@ -4083,8 +4130,8 @@ void gpiochip_unlock_as_irq(struct gpio_chip *gc, unsigned int offset)
if (IS_ERR(desc))
return;
- clear_bit(FLAG_USED_AS_IRQ, &desc->flags);
- clear_bit(FLAG_IRQ_IS_ENABLED, &desc->flags);
+ clear_bit(GPIOD_FLAG_USED_AS_IRQ, &desc->flags);
+ clear_bit(GPIOD_FLAG_IRQ_IS_ENABLED, &desc->flags);
}
EXPORT_SYMBOL_GPL(gpiochip_unlock_as_irq);
@@ -4093,8 +4140,8 @@ void gpiochip_disable_irq(struct gpio_chip *gc, unsigned int offset)
struct gpio_desc *desc = gpiochip_get_desc(gc, offset);
if (!IS_ERR(desc) &&
- !WARN_ON(!test_bit(FLAG_USED_AS_IRQ, &desc->flags)))
- clear_bit(FLAG_IRQ_IS_ENABLED, &desc->flags);
+ !WARN_ON(!test_bit(GPIOD_FLAG_USED_AS_IRQ, &desc->flags)))
+ clear_bit(GPIOD_FLAG_IRQ_IS_ENABLED, &desc->flags);
}
EXPORT_SYMBOL_GPL(gpiochip_disable_irq);
@@ -4103,14 +4150,14 @@ void gpiochip_enable_irq(struct gpio_chip *gc, unsigned int offset)
struct gpio_desc *desc = gpiochip_get_desc(gc, offset);
if (!IS_ERR(desc) &&
- !WARN_ON(!test_bit(FLAG_USED_AS_IRQ, &desc->flags))) {
+ !WARN_ON(!test_bit(GPIOD_FLAG_USED_AS_IRQ, &desc->flags))) {
/*
* We must not be output when using IRQ UNLESS we are
* open drain.
*/
- WARN_ON(test_bit(FLAG_IS_OUT, &desc->flags) &&
- !test_bit(FLAG_OPEN_DRAIN, &desc->flags));
- set_bit(FLAG_IRQ_IS_ENABLED, &desc->flags);
+ WARN_ON(test_bit(GPIOD_FLAG_IS_OUT, &desc->flags) &&
+ !test_bit(GPIOD_FLAG_OPEN_DRAIN, &desc->flags));
+ set_bit(GPIOD_FLAG_IRQ_IS_ENABLED, &desc->flags);
}
}
EXPORT_SYMBOL_GPL(gpiochip_enable_irq);
@@ -4120,7 +4167,7 @@ bool gpiochip_line_is_irq(struct gpio_chip *gc, unsigned int offset)
if (offset >= gc->ngpio)
return false;
- return test_bit(FLAG_USED_AS_IRQ, &gc->gpiodev->descs[offset].flags);
+ return test_bit(GPIOD_FLAG_USED_AS_IRQ, &gc->gpiodev->descs[offset].flags);
}
EXPORT_SYMBOL_GPL(gpiochip_line_is_irq);
@@ -4133,7 +4180,7 @@ int gpiochip_reqres_irq(struct gpio_chip *gc, unsigned int offset)
ret = gpiochip_lock_as_irq(gc, offset);
if (ret) {
- chip_err(gc, "unable to lock HW IRQ %u for IRQ\n", offset);
+ gpiochip_err(gc, "unable to lock HW IRQ %u for IRQ\n", offset);
module_put(gc->gpiodev->owner);
return ret;
}
@@ -4153,7 +4200,7 @@ bool gpiochip_line_is_open_drain(struct gpio_chip *gc, unsigned int offset)
if (offset >= gc->ngpio)
return false;
- return test_bit(FLAG_OPEN_DRAIN, &gc->gpiodev->descs[offset].flags);
+ return test_bit(GPIOD_FLAG_OPEN_DRAIN, &gc->gpiodev->descs[offset].flags);
}
EXPORT_SYMBOL_GPL(gpiochip_line_is_open_drain);
@@ -4162,7 +4209,7 @@ bool gpiochip_line_is_open_source(struct gpio_chip *gc, unsigned int offset)
if (offset >= gc->ngpio)
return false;
- return test_bit(FLAG_OPEN_SOURCE, &gc->gpiodev->descs[offset].flags);
+ return test_bit(GPIOD_FLAG_OPEN_SOURCE, &gc->gpiodev->descs[offset].flags);
}
EXPORT_SYMBOL_GPL(gpiochip_line_is_open_source);
@@ -4171,7 +4218,7 @@ bool gpiochip_line_is_persistent(struct gpio_chip *gc, unsigned int offset)
if (offset >= gc->ngpio)
return false;
- return !test_bit(FLAG_TRANSITORY, &gc->gpiodev->descs[offset].flags);
+ return !test_bit(GPIOD_FLAG_TRANSITORY, &gc->gpiodev->descs[offset].flags);
}
EXPORT_SYMBOL_GPL(gpiochip_line_is_persistent);
@@ -4213,7 +4260,7 @@ int gpiod_get_value_cansleep(const struct gpio_desc *desc)
if (value < 0)
return value;
- if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
+ if (test_bit(GPIOD_FLAG_ACTIVE_LOW, &desc->flags))
value = !value;
return value;
@@ -4604,6 +4651,23 @@ static struct gpio_desc *gpiod_find_by_fwnode(struct fwnode_handle *fwnode,
return desc;
}
+static struct gpio_desc *gpiod_fwnode_lookup(struct fwnode_handle *fwnode,
+ struct device *consumer,
+ const char *con_id,
+ unsigned int idx,
+ enum gpiod_flags *flags,
+ unsigned long *lookupflags)
+{
+ struct gpio_desc *desc;
+
+ desc = gpiod_find_by_fwnode(fwnode, consumer, con_id, idx, flags, lookupflags);
+ if (gpiod_not_found(desc) && !IS_ERR_OR_NULL(fwnode))
+ desc = gpiod_find_by_fwnode(fwnode->secondary, consumer, con_id,
+ idx, flags, lookupflags);
+
+ return desc;
+}
+
struct gpio_desc *gpiod_find_and_request(struct device *consumer,
struct fwnode_handle *fwnode,
const char *con_id,
@@ -4622,13 +4686,31 @@ struct gpio_desc *gpiod_find_and_request(struct device *consumer,
int ret = 0;
scoped_guard(srcu, &gpio_devices_srcu) {
- desc = gpiod_find_by_fwnode(fwnode, consumer, con_id, idx,
- &flags, &lookupflags);
+ desc = gpiod_fwnode_lookup(fwnode, consumer, con_id, idx,
+ &flags, &lookupflags);
+ if (!IS_ERR_OR_NULL(desc) &&
+ test_bit(GPIOD_FLAG_SHARED, &desc->flags)) {
+ /*
+ * We're dealing with a GPIO shared by multiple
+ * consumers. This is the moment to add the machine
+ * lookup table for the proxy device as previously
+ * we only knew the consumer's fwnode.
+ */
+ ret = gpio_shared_add_proxy_lookup(consumer, lookupflags);
+ if (ret)
+ return ERR_PTR(ret);
+
+ /* Trigger platform lookup for shared GPIO proxy. */
+ desc = ERR_PTR(-ENOENT);
+ /* Trigger it even for fwnode-only gpiod_get(). */
+ platform_lookup_allowed = true;
+ }
+
if (gpiod_not_found(desc) && platform_lookup_allowed) {
/*
* Either we are not using DT or ACPI, or their lookup
- * did not return a result. In that case, use platform
- * lookup as a fallback.
+ * did not return a result or this is a shared GPIO. In
+ * that case, use platform lookup as a fallback.
*/
dev_dbg(consumer,
"using lookup tables for GPIO lookup\n");
@@ -4651,14 +4733,19 @@ struct gpio_desc *gpiod_find_and_request(struct device *consumer,
return ERR_PTR(ret);
/*
- * This happens when there are several consumers for
- * the same GPIO line: we just return here without
- * further initialization. It is a bit of a hack.
- * This is necessary to support fixed regulators.
+ * This happens when there are several consumers for the same
+ * GPIO line: we just return here without further
+ * initialization. It's a hack introduced long ago to support
+ * fixed regulators. We now have a better solution with
+ * automated scanning where affected platforms just need to
+ * select the provided Kconfig option.
*
- * FIXME: Make this more sane and safe.
+ * FIXME: Remove the GPIOD_FLAGS_BIT_NONEXCLUSIVE flag after
+ * making sure all platforms use the new mechanism.
*/
- dev_info(consumer, "nonexclusive access to GPIO for %s\n", name);
+ dev_info(consumer,
+ "nonexclusive access to GPIO for %s, consider updating your code to using gpio-shared-proxy\n",
+ name);
return desc;
}
@@ -4795,10 +4882,10 @@ int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
int ret;
if (lflags & GPIO_ACTIVE_LOW)
- set_bit(FLAG_ACTIVE_LOW, &desc->flags);
+ set_bit(GPIOD_FLAG_ACTIVE_LOW, &desc->flags);
if (lflags & GPIO_OPEN_DRAIN)
- set_bit(FLAG_OPEN_DRAIN, &desc->flags);
+ set_bit(GPIOD_FLAG_OPEN_DRAIN, &desc->flags);
else if (dflags & GPIOD_FLAGS_BIT_OPEN_DRAIN) {
/*
* This enforces open drain mode from the consumer side.
@@ -4806,13 +4893,13 @@ int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
* should *REALLY* have specified them as open drain in the
* first place, so print a little warning here.
*/
- set_bit(FLAG_OPEN_DRAIN, &desc->flags);
+ set_bit(GPIOD_FLAG_OPEN_DRAIN, &desc->flags);
gpiod_warn(desc,
"enforced open drain please flag it properly in DT/ACPI DSDT/board file\n");
}
if (lflags & GPIO_OPEN_SOURCE)
- set_bit(FLAG_OPEN_SOURCE, &desc->flags);
+ set_bit(GPIOD_FLAG_OPEN_SOURCE, &desc->flags);
if (((lflags & GPIO_PULL_UP) && (lflags & GPIO_PULL_DOWN)) ||
((lflags & GPIO_PULL_UP) && (lflags & GPIO_PULL_DISABLE)) ||
@@ -4823,11 +4910,11 @@ int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
}
if (lflags & GPIO_PULL_UP)
- set_bit(FLAG_PULL_UP, &desc->flags);
+ set_bit(GPIOD_FLAG_PULL_UP, &desc->flags);
else if (lflags & GPIO_PULL_DOWN)
- set_bit(FLAG_PULL_DOWN, &desc->flags);
+ set_bit(GPIOD_FLAG_PULL_DOWN, &desc->flags);
else if (lflags & GPIO_PULL_DISABLE)
- set_bit(FLAG_BIAS_DISABLE, &desc->flags);
+ set_bit(GPIOD_FLAG_BIAS_DISABLE, &desc->flags);
ret = gpiod_set_transitory(desc, (lflags & GPIO_TRANSITORY));
if (ret < 0)
@@ -4932,15 +5019,15 @@ int gpiod_hog(struct gpio_desc *desc, const char *name,
if (!guard.gc)
return -ENODEV;
- if (test_and_set_bit(FLAG_IS_HOGGED, &desc->flags))
+ if (test_and_set_bit(GPIOD_FLAG_IS_HOGGED, &desc->flags))
return 0;
- hwnum = gpio_chip_hwgpio(desc);
+ hwnum = gpiod_hwgpio(desc);
local_desc = gpiochip_request_own_desc(guard.gc, hwnum, name,
lflags, dflags);
if (IS_ERR(local_desc)) {
- clear_bit(FLAG_IS_HOGGED, &desc->flags);
+ clear_bit(GPIOD_FLAG_IS_HOGGED, &desc->flags);
ret = PTR_ERR(local_desc);
pr_err("requesting hog GPIO %s (chip %s, offset %d) failed, %d\n",
name, gdev->label, hwnum, ret);
@@ -4963,7 +5050,7 @@ static void gpiochip_free_hogs(struct gpio_chip *gc)
{
struct gpio_desc *desc;
- for_each_gpio_desc_with_flag(gc, desc, FLAG_IS_HOGGED)
+ for_each_gpio_desc_with_flag(gc, desc, GPIOD_FLAG_IS_HOGGED)
gpiochip_free_own_desc(desc);
}
@@ -5016,7 +5103,7 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
* If pin hardware number of array member 0 is also 0, select
* its chip as a candidate for fast bitmap processing path.
*/
- if (descs->ndescs == 0 && gpio_chip_hwgpio(desc) == 0) {
+ if (descs->ndescs == 0 && gpiod_hwgpio(desc) == 0) {
struct gpio_descs *array;
bitmap_size = BITS_TO_LONGS(gdev->ngpio > count ?
@@ -5061,7 +5148,7 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
* Detect array members which belong to the 'fast' chip
* but their pins are not in hardware order.
*/
- else if (gpio_chip_hwgpio(desc) != descs->ndescs) {
+ else if (gpiod_hwgpio(desc) != descs->ndescs) {
/*
* Don't use fast path if all array members processed so
* far belong to the same chip as this one but its pin
@@ -5078,8 +5165,8 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
} else {
dflags = READ_ONCE(desc->flags);
/* Exclude open drain or open source from fast output */
- if (test_bit(FLAG_OPEN_DRAIN, &dflags) ||
- test_bit(FLAG_OPEN_SOURCE, &dflags))
+ if (test_bit(GPIOD_FLAG_OPEN_DRAIN, &dflags) ||
+ test_bit(GPIOD_FLAG_OPEN_SOURCE, &dflags))
__clear_bit(descs->ndescs,
array_info->set_mask);
/* Identify 'fast' pins which require invertion */
@@ -5220,8 +5307,8 @@ core_initcall(gpiolib_dev_init);
static void gpiolib_dbg_show(struct seq_file *s, struct gpio_device *gdev)
{
bool active_low, is_irq, is_out;
- unsigned int gpio = gdev->base;
struct gpio_desc *desc;
+ unsigned int gpio = 0;
struct gpio_chip *gc;
unsigned long flags;
int value;
@@ -5237,12 +5324,12 @@ static void gpiolib_dbg_show(struct seq_file *s, struct gpio_device *gdev)
for_each_gpio_desc(gc, desc) {
guard(srcu)(&desc->gdev->desc_srcu);
flags = READ_ONCE(desc->flags);
- is_irq = test_bit(FLAG_USED_AS_IRQ, &flags);
- if (is_irq || test_bit(FLAG_REQUESTED, &flags)) {
+ is_irq = test_bit(GPIOD_FLAG_USED_AS_IRQ, &flags);
+ if (is_irq || test_bit(GPIOD_FLAG_REQUESTED, &flags)) {
gpiod_get_direction(desc);
- is_out = test_bit(FLAG_IS_OUT, &flags);
+ is_out = test_bit(GPIOD_FLAG_IS_OUT, &flags);
value = gpio_chip_get_value(gc, desc);
- active_low = test_bit(FLAG_ACTIVE_LOW, &flags);
+ active_low = test_bit(GPIOD_FLAG_ACTIVE_LOW, &flags);
seq_printf(s, " gpio-%-3u (%-20.20s|%-20.20s) %s %s %s%s\n",
gpio, desc->name ?: "", gpiod_get_label(desc),
is_out ? "out" : "in ",
@@ -5268,6 +5355,8 @@ static void *gpiolib_seq_start(struct seq_file *s, loff_t *pos)
struct gpio_device *gdev;
loff_t index = *pos;
+ s->private = NULL;
+
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return NULL;
@@ -5301,7 +5390,11 @@ static void *gpiolib_seq_next(struct seq_file *s, void *v, loff_t *pos)
static void gpiolib_seq_stop(struct seq_file *s, void *v)
{
- struct gpiolib_seq_priv *priv = s->private;
+ struct gpiolib_seq_priv *priv;
+
+ priv = s->private;
+ if (!priv)
+ return;
srcu_read_unlock(&gpio_devices_srcu, priv->idx);
kfree(priv);
@@ -5325,8 +5418,7 @@ static int gpiolib_seq_show(struct seq_file *s, void *v)
return 0;
}
- seq_printf(s, "%s: GPIOs %u-%u", dev_name(&gdev->dev), gdev->base,
- gdev->base + gdev->ngpio - 1);
+ seq_printf(s, "%s: %u GPIOs", dev_name(&gdev->dev), gdev->ngpio);
parent = gc->parent;
if (parent)
seq_printf(s, ", parent: %s/%s",
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index 58f64056de77..77f6f2936dc2 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -27,8 +27,6 @@
* @dev: the GPIO device struct
* @chrdev: character device for the GPIO device
* @id: numerical ID number for the GPIO chip
- * @mockdev: class device used by the deprecated sysfs interface (may be
- * NULL)
* @owner: helps prevent removal of modules exporting active GPIOs
* @chip: pointer to the corresponding gpiochip, holding static
* data for this device
@@ -65,7 +63,6 @@ struct gpio_device {
struct device dev;
struct cdev chrdev;
int id;
- struct device *mockdev;
struct module *owner;
struct gpio_chip __rcu *chip;
struct gpio_desc *descs;
@@ -189,24 +186,26 @@ struct gpio_desc {
struct gpio_device *gdev;
unsigned long flags;
/* flag symbols are bit numbers */
-#define FLAG_REQUESTED 0
-#define FLAG_IS_OUT 1
-#define FLAG_EXPORT 2 /* protected by sysfs_lock */
-#define FLAG_SYSFS 3 /* exported via /sys/class/gpio/control */
-#define FLAG_ACTIVE_LOW 6 /* value has active low */
-#define FLAG_OPEN_DRAIN 7 /* Gpio is open drain type */
-#define FLAG_OPEN_SOURCE 8 /* Gpio is open source type */
-#define FLAG_USED_AS_IRQ 9 /* GPIO is connected to an IRQ */
-#define FLAG_IRQ_IS_ENABLED 10 /* GPIO is connected to an enabled IRQ */
-#define FLAG_IS_HOGGED 11 /* GPIO is hogged */
-#define FLAG_TRANSITORY 12 /* GPIO may lose value in sleep or reset */
-#define FLAG_PULL_UP 13 /* GPIO has pull up enabled */
-#define FLAG_PULL_DOWN 14 /* GPIO has pull down enabled */
-#define FLAG_BIAS_DISABLE 15 /* GPIO has pull disabled */
-#define FLAG_EDGE_RISING 16 /* GPIO CDEV detects rising edge events */
-#define FLAG_EDGE_FALLING 17 /* GPIO CDEV detects falling edge events */
-#define FLAG_EVENT_CLOCK_REALTIME 18 /* GPIO CDEV reports REALTIME timestamps in events */
-#define FLAG_EVENT_CLOCK_HTE 19 /* GPIO CDEV reports hardware timestamps in events */
+#define GPIOD_FLAG_REQUESTED 0 /* GPIO is in use */
+#define GPIOD_FLAG_IS_OUT 1 /* GPIO is in output mode */
+#define GPIOD_FLAG_EXPORT 2 /* GPIO is exported to user-space */
+#define GPIOD_FLAG_SYSFS 3 /* GPIO is exported via /sys/class/gpio */
+#define GPIOD_FLAG_ACTIVE_LOW 6 /* GPIO is active-low */
+#define GPIOD_FLAG_OPEN_DRAIN 7 /* GPIO is open drain type */
+#define GPIOD_FLAG_OPEN_SOURCE 8 /* GPIO is open source type */
+#define GPIOD_FLAG_USED_AS_IRQ 9 /* GPIO is connected to an IRQ */
+#define GPIOD_FLAG_IRQ_IS_ENABLED 10 /* GPIO is connected to an enabled IRQ */
+#define GPIOD_FLAG_IS_HOGGED 11 /* GPIO is hogged */
+#define GPIOD_FLAG_TRANSITORY 12 /* GPIO may lose value in sleep or reset */
+#define GPIOD_FLAG_PULL_UP 13 /* GPIO has pull up enabled */
+#define GPIOD_FLAG_PULL_DOWN 14 /* GPIO has pull down enabled */
+#define GPIOD_FLAG_BIAS_DISABLE 15 /* GPIO has pull disabled */
+#define GPIOD_FLAG_EDGE_RISING 16 /* GPIO CDEV detects rising edge events */
+#define GPIOD_FLAG_EDGE_FALLING 17 /* GPIO CDEV detects falling edge events */
+#define GPIOD_FLAG_EVENT_CLOCK_REALTIME 18 /* GPIO CDEV reports REALTIME timestamps in events */
+#define GPIOD_FLAG_EVENT_CLOCK_HTE 19 /* GPIO CDEV reports hardware timestamps in events */
+#define GPIOD_FLAG_SHARED 20 /* GPIO is shared by multiple consumers */
+#define GPIOD_FLAG_SHARED_PROXY 21 /* GPIO is a virtual proxy to a physically shared pin. */
/* Connection label */
struct gpio_desc_label __rcu *label;
@@ -276,49 +275,30 @@ int gpiochip_get_ngpios(struct gpio_chip *gc, struct device *dev);
struct gpio_desc *gpiochip_get_desc(struct gpio_chip *gc, unsigned int hwnum);
const char *gpiod_get_label(struct gpio_desc *desc);
-/*
- * Return the GPIO number of the passed descriptor relative to its chip
- */
-static inline int gpio_chip_hwgpio(const struct gpio_desc *desc)
-{
- return desc - &desc->gdev->descs[0];
-}
-
/* With descriptor prefix */
-#define gpiod_err(desc, fmt, ...) \
+#define __gpiod_pr(level, desc, fmt, ...) \
do { \
scoped_guard(srcu, &desc->gdev->desc_srcu) { \
- pr_err("gpio-%d (%s): " fmt, desc_to_gpio(desc), \
- gpiod_get_label(desc) ? : "?", ##__VA_ARGS__); \
+ pr_##level("gpio-%d (%s): " fmt, desc_to_gpio(desc), \
+ gpiod_get_label(desc) ?: "?", ##__VA_ARGS__); \
} \
} while (0)
-#define gpiod_warn(desc, fmt, ...) \
-do { \
- scoped_guard(srcu, &desc->gdev->desc_srcu) { \
- pr_warn("gpio-%d (%s): " fmt, desc_to_gpio(desc), \
- gpiod_get_label(desc) ? : "?", ##__VA_ARGS__); \
- } \
-} while (0)
+#define gpiod_err(desc, fmt, ...) __gpiod_pr(err, desc, fmt, ##__VA_ARGS__)
+#define gpiod_warn(desc, fmt, ...) __gpiod_pr(warn, desc, fmt, ##__VA_ARGS__)
+#define gpiod_dbg(desc, fmt, ...) __gpiod_pr(debug, desc, fmt, ##__VA_ARGS__)
+
+/* With chip prefix */
-#define gpiod_dbg(desc, fmt, ...) \
+#define __gpiochip_pr(level, gc, fmt, ...) \
do { \
- scoped_guard(srcu, &desc->gdev->desc_srcu) { \
- pr_debug("gpio-%d (%s): " fmt, desc_to_gpio(desc), \
- gpiod_get_label(desc) ? : "?", ##__VA_ARGS__); \
- } \
+ dev_##level(&gc->gpiodev->dev, "(%s): " fmt, gc->label, ##__VA_ARGS__); \
} while (0)
-/* With chip prefix */
-
-#define chip_err(gc, fmt, ...) \
- dev_err(&gc->gpiodev->dev, "(%s): " fmt, gc->label, ##__VA_ARGS__)
-#define chip_warn(gc, fmt, ...) \
- dev_warn(&gc->gpiodev->dev, "(%s): " fmt, gc->label, ##__VA_ARGS__)
-#define chip_info(gc, fmt, ...) \
- dev_info(&gc->gpiodev->dev, "(%s): " fmt, gc->label, ##__VA_ARGS__)
-#define chip_dbg(gc, fmt, ...) \
- dev_dbg(&gc->gpiodev->dev, "(%s): " fmt, gc->label, ##__VA_ARGS__)
+#define gpiochip_err(gc, fmt, ...) __gpiochip_pr(err, gc, fmt, ##__VA_ARGS__)
+#define gpiochip_warn(gc, fmt, ...) __gpiochip_pr(warn, gc, fmt, ##__VA_ARGS__)
+#define gpiochip_info(gc, fmt, ...) __gpiochip_pr(info, gc, fmt, ##__VA_ARGS__)
+#define gpiochip_dbg(gc, fmt, ...) __gpiochip_pr(dbg, gc, fmt, ##__VA_ARGS__)
#endif /* GPIOLIB_H */
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index f7ea8e895c0c..7e6bc0b3a589 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -396,9 +396,11 @@ source "drivers/gpu/drm/sprd/Kconfig"
source "drivers/gpu/drm/imagination/Kconfig"
+source "drivers/gpu/drm/tyr/Kconfig"
+
config DRM_HYPERV
tristate "DRM Support for Hyper-V synthetic video device"
- depends on DRM && PCI && HYPERV
+ depends on DRM && PCI && HYPERV_VMBUS
select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
select DRM_GEM_SHMEM_HELPER
diff --git a/drivers/gpu/drm/Kconfig.debug b/drivers/gpu/drm/Kconfig.debug
index fa6ee76f4d3c..05dc43c0b8c5 100644
--- a/drivers/gpu/drm/Kconfig.debug
+++ b/drivers/gpu/drm/Kconfig.debug
@@ -70,6 +70,7 @@ config DRM_KUNIT_TEST
select DRM_GEM_SHMEM_HELPER
select DRM_KUNIT_TEST_HELPERS
select DRM_LIB_RANDOM
+ select DRM_SYSFB_HELPER
select PRIME_NUMBERS
default KUNIT_ALL_TESTS
help
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 5050ac32bba2..0e1c668b46d2 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -6,7 +6,7 @@
CFLAGS-$(CONFIG_DRM_USE_DYNAMIC_DEBUG) += -DDYNAMIC_DEBUG_MODULE
# Unconditionally enable W=1 warnings locally
-# --- begin copy-paste W=1 warnings from scripts/Makefile.extrawarn
+# --- begin copy-paste W=1 warnings from scripts/Makefile.warn
subdir-ccflags-y += -Wextra -Wunused -Wno-unused-parameter
subdir-ccflags-y += $(call cc-option, -Wrestrict)
subdir-ccflags-y += -Wmissing-format-attribute
@@ -41,6 +41,7 @@ drm-y := \
drm_bridge.o \
drm_cache.o \
drm_color_mgmt.o \
+ drm_colorop.o \
drm_connector.o \
drm_crtc.o \
drm_displayid.o \
@@ -76,7 +77,8 @@ drm-y := \
drm-$(CONFIG_DRM_CLIENT) += \
drm_client.o \
drm_client_event.o \
- drm_client_modeset.o
+ drm_client_modeset.o \
+ drm_client_sysrq.o
drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o
drm-$(CONFIG_COMPAT) += drm_ioc32.o
drm-$(CONFIG_DRM_PANEL) += drm_panel.o
@@ -104,7 +106,11 @@ obj-$(CONFIG_DRM_PANEL_BACKLIGHT_QUIRKS) += drm_panel_backlight_quirks.o
#
obj-$(CONFIG_DRM_EXEC) += drm_exec.o
obj-$(CONFIG_DRM_GPUVM) += drm_gpuvm.o
-obj-$(CONFIG_DRM_GPUSVM) += drm_gpusvm.o
+
+drm_gpusvm_helper-y := \
+ drm_gpusvm.o\
+ drm_pagemap.o
+obj-$(CONFIG_DRM_GPUSVM) += drm_gpusvm_helper.o
obj-$(CONFIG_DRM_BUDDY) += drm_buddy.o
@@ -146,7 +152,8 @@ drm_kms_helper-y := \
drm_plane_helper.o \
drm_probe_helper.o \
drm_self_refresh_helper.o \
- drm_simple_kms_helper.o
+ drm_simple_kms_helper.o \
+ drm_vblank_helper.o
drm_kms_helper-$(CONFIG_DRM_PANEL_BRIDGE) += bridge/panel.o
drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o
obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
@@ -216,6 +223,7 @@ obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo/
obj-$(CONFIG_DRM_LIMA) += lima/
obj-$(CONFIG_DRM_PANFROST) += panfrost/
obj-$(CONFIG_DRM_PANTHOR) += panthor/
+obj-$(CONFIG_DRM_TYR) += tyr/
obj-$(CONFIG_DRM_ASPEED_GFX) += aspeed/
obj-$(CONFIG_DRM_MCDE) += mcde/
obj-$(CONFIG_DRM_TIDSS) += tidss/
@@ -240,7 +248,7 @@ always-$(CONFIG_DRM_HEADER_TEST) += \
quiet_cmd_hdrtest = HDRTEST $(patsubst %.hdrtest,%.h,$@)
cmd_hdrtest = \
$(CC) $(c_flags) -fsyntax-only -x c /dev/null -include $< -include $<; \
- PYTHONDONTWRITEBYTECODE=1 $(KERNELDOC) -none $(if $(CONFIG_WERROR)$(CONFIG_DRM_WERROR),-Werror) $<; \
+ PYTHONDONTWRITEBYTECODE=1 $(PYTHON3) $(KERNELDOC) -none $(if $(CONFIG_WERROR)$(CONFIG_DRM_WERROR),-Werror) $<; \
touch $@
$(obj)/%.hdrtest: $(src)/%.h FORCE
diff --git a/drivers/gpu/drm/adp/adp-mipi.c b/drivers/gpu/drm/adp/adp-mipi.c
index 2b60128e2c69..cba7d32150a9 100644
--- a/drivers/gpu/drm/adp/adp-mipi.c
+++ b/drivers/gpu/drm/adp/adp-mipi.c
@@ -229,9 +229,10 @@ static int adp_mipi_probe(struct platform_device *pdev)
{
struct adp_mipi_drv_private *adp;
- adp = devm_kzalloc(&pdev->dev, sizeof(*adp), GFP_KERNEL);
- if (!adp)
- return -ENOMEM;
+ adp = devm_drm_bridge_alloc(&pdev->dev, struct adp_mipi_drv_private,
+ bridge, &adp_dsi_bridge_funcs);
+ if (IS_ERR(adp))
+ return PTR_ERR(adp);
adp->mipi = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(adp->mipi)) {
@@ -241,7 +242,6 @@ static int adp_mipi_probe(struct platform_device *pdev)
adp->dsi.dev = &pdev->dev;
adp->dsi.ops = &adp_dsi_host_ops;
- adp->bridge.funcs = &adp_dsi_bridge_funcs;
adp->bridge.of_node = pdev->dev.of_node;
adp->bridge.type = DRM_MODE_CONNECTOR_DSI;
dev_set_drvdata(&pdev->dev, adp);
diff --git a/drivers/gpu/drm/adp/adp_drv.c b/drivers/gpu/drm/adp/adp_drv.c
index 54cde090c3f4..4554cf75565e 100644
--- a/drivers/gpu/drm/adp/adp_drv.c
+++ b/drivers/gpu/drm/adp/adp_drv.c
@@ -16,6 +16,7 @@
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_of.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig
index 1acfed2f92ef..7f515be5185d 100644
--- a/drivers/gpu/drm/amd/amdgpu/Kconfig
+++ b/drivers/gpu/drm/amd/amdgpu/Kconfig
@@ -43,14 +43,16 @@ config DRM_AMDGPU_SI
bool "Enable amdgpu support for SI parts"
depends on DRM_AMDGPU
help
- Choose this option if you want to enable experimental support
+ Choose this option if you want to enable support
for SI (Southern Islands) asics.
- SI is already supported in radeon. Experimental support for SI
- in amdgpu will be disabled by default and is still provided by
- radeon. Use module options to override this:
+ SI (Southern Islands) are first generation GCN GPUs,
+ supported by both drivers: radeon (old) and amdgpu (new).
+ By default, SI dedicated GPUs are supported by amdgpu.
- radeon.si_support=0 amdgpu.si_support=1
+ Use module options to override this:
+ To use radeon for SI,
+ radeon.si_support=1 amdgpu.si_support=0
config DRM_AMDGPU_CIK
bool "Enable amdgpu support for CIK parts"
@@ -59,11 +61,17 @@ config DRM_AMDGPU_CIK
Choose this option if you want to enable support for CIK (Sea
Islands) asics.
- CIK is already supported in radeon. Support for CIK in amdgpu
- will be disabled by default and is still provided by radeon.
- Use module options to override this:
+ CIK (Sea Islands) are second generation GCN GPUs,
+ supported by both drivers: radeon (old) and amdgpu (new).
+ By default,
+ CIK dedicated GPUs are supported by amdgpu
+ CIK APUs are supported by radeon
+ Use module options to override this:
+ To use amdgpu for CIK,
radeon.cik_support=0 amdgpu.cik_support=1
+ To use radeon for CIK,
+ radeon.cik_support=1 amdgpu.cik_support=0
config DRM_AMDGPU_USERPTR
bool "Always enable userptr write support"
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 87080c06e5fc..c88760fb52ea 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -37,7 +37,8 @@ ccflags-y := -I$(FULL_AMD_PATH)/include/asic_reg \
-I$(FULL_AMD_DISPLAY_PATH)/modules/inc \
-I$(FULL_AMD_DISPLAY_PATH)/dc \
-I$(FULL_AMD_DISPLAY_PATH)/amdgpu_dm \
- -I$(FULL_AMD_PATH)/amdkfd
+ -I$(FULL_AMD_PATH)/amdkfd \
+ -I$(FULL_AMD_PATH)/ras/ras_mgr
# Locally disable W=1 warnings enabled in drm subsystem Makefile
subdir-ccflags-y += -Wno-override-init
@@ -66,7 +67,7 @@ amdgpu-y += amdgpu_device.o amdgpu_doorbell_mgr.o amdgpu_kms.o \
amdgpu_fw_attestation.o amdgpu_securedisplay.o \
amdgpu_eeprom.o amdgpu_mca.o amdgpu_psp_ta.o amdgpu_lsdma.o \
amdgpu_ring_mux.o amdgpu_xcp.o amdgpu_seq64.o amdgpu_aca.o amdgpu_dev_coredump.o \
- amdgpu_cper.o amdgpu_userq_fence.o amdgpu_eviction_fence.o
+ amdgpu_cper.o amdgpu_userq_fence.o amdgpu_eviction_fence.o amdgpu_ip.o
amdgpu-$(CONFIG_PROC_FS) += amdgpu_fdinfo.o
@@ -77,14 +78,15 @@ amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o \
dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o
amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce_v6_0.o \
- uvd_v3_1.o
+ uvd_v3_1.o vce_v1_0.o
amdgpu-y += \
vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o \
vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o arct_reg_init.o mxgpu_nv.o \
nbio_v7_2.o hdp_v4_0.o hdp_v5_0.o aldebaran_reg_init.o aldebaran.o soc21.o soc24.o \
sienna_cichlid.o smu_v13_0_10.o nbio_v4_3.o hdp_v6_0.o nbio_v7_7.o hdp_v5_2.o lsdma_v6_0.o \
- nbio_v7_9.o aqua_vanjaram.o nbio_v7_11.o lsdma_v7_0.o hdp_v7_0.o nbif_v6_3_1.o
+ nbio_v7_9.o aqua_vanjaram.o nbio_v7_11.o lsdma_v7_0.o hdp_v7_0.o nbif_v6_3_1.o \
+ cyan_skillfish_reg_init.o
# add DF block
amdgpu-y += \
@@ -137,7 +139,6 @@ amdgpu-y += \
# add DCE block
amdgpu-y += \
dce_v10_0.o \
- dce_v11_0.o \
amdgpu_vkms.o
# add GFX block
@@ -324,4 +325,9 @@ amdgpu-y += \
isp_v4_1_1.o
endif
+AMD_GPU_RAS_PATH := ../ras
+AMD_GPU_RAS_FULL_PATH := $(FULL_AMD_PATH)/ras
+include $(AMD_GPU_RAS_FULL_PATH)/Makefile
+amdgpu-y += $(AMD_GPU_RAS_FILES)
+
obj-$(CONFIG_DRM_AMDGPU)+= amdgpu.o
diff --git a/drivers/gpu/drm/amd/amdgpu/aldebaran.c b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
index e13fbd974141..daa7b23bc775 100644
--- a/drivers/gpu/drm/amd/amdgpu/aldebaran.c
+++ b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
@@ -71,18 +71,33 @@ aldebaran_get_reset_handler(struct amdgpu_reset_control *reset_ctl,
return NULL;
}
+static inline uint32_t aldebaran_get_ip_block_mask(struct amdgpu_device *adev)
+{
+ uint32_t ip_block_mask = BIT(AMD_IP_BLOCK_TYPE_GFX) |
+ BIT(AMD_IP_BLOCK_TYPE_SDMA);
+
+ if (adev->aid_mask)
+ ip_block_mask |= BIT(AMD_IP_BLOCK_TYPE_IH);
+
+ return ip_block_mask;
+}
+
static int aldebaran_mode2_suspend_ip(struct amdgpu_device *adev)
{
+ uint32_t ip_block_mask = aldebaran_get_ip_block_mask(adev);
+ uint32_t ip_block;
int r, i;
+ /* Skip suspend of SDMA IP versions >= 4.4.2. They are multi-aid */
+ if (adev->aid_mask)
+ ip_block_mask &= ~BIT(AMD_IP_BLOCK_TYPE_SDMA);
+
amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
- if (!(adev->ip_blocks[i].version->type ==
- AMD_IP_BLOCK_TYPE_GFX ||
- adev->ip_blocks[i].version->type ==
- AMD_IP_BLOCK_TYPE_SDMA))
+ ip_block = BIT(adev->ip_blocks[i].version->type);
+ if (!(ip_block_mask & ip_block))
continue;
r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]);
@@ -200,8 +215,10 @@ aldebaran_mode2_perform_reset(struct amdgpu_reset_control *reset_ctl,
static int aldebaran_mode2_restore_ip(struct amdgpu_device *adev)
{
struct amdgpu_firmware_info *ucode_list[AMDGPU_UCODE_ID_MAXIMUM];
+ uint32_t ip_block_mask = aldebaran_get_ip_block_mask(adev);
struct amdgpu_firmware_info *ucode;
struct amdgpu_ip_block *cmn_block;
+ struct amdgpu_ip_block *ih_block;
int ucode_count = 0;
int i, r;
@@ -243,6 +260,18 @@ static int aldebaran_mode2_restore_ip(struct amdgpu_device *adev)
if (r)
return r;
+ if (ip_block_mask & BIT(AMD_IP_BLOCK_TYPE_IH)) {
+ ih_block = amdgpu_device_ip_get_ip_block(adev,
+ AMD_IP_BLOCK_TYPE_IH);
+ if (unlikely(!ih_block)) {
+ dev_err(adev->dev, "Failed to get IH handle\n");
+ return -EINVAL;
+ }
+ r = amdgpu_ip_block_resume(ih_block);
+ if (r)
+ return r;
+ }
+
/* Reinit GFXHUB */
adev->gfxhub.funcs->init(adev);
r = adev->gfxhub.funcs->gart_enable(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index a5ccd0ada16a..9f9774f58ce1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -63,6 +63,7 @@
#include "kgd_pp_interface.h"
#include "amd_shared.h"
+#include "amdgpu_utils.h"
#include "amdgpu_mode.h"
#include "amdgpu_ih.h"
#include "amdgpu_irq.h"
@@ -371,13 +372,15 @@ void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
u64 *flags);
int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
enum amd_ip_block_type block_type);
+bool amdgpu_device_ip_is_hw(struct amdgpu_device *adev,
+ enum amd_ip_block_type block_type);
bool amdgpu_device_ip_is_valid(struct amdgpu_device *adev,
enum amd_ip_block_type block_type);
int amdgpu_ip_block_suspend(struct amdgpu_ip_block *ip_block);
int amdgpu_ip_block_resume(struct amdgpu_ip_block *ip_block);
-#define AMDGPU_MAX_IP_NUM 16
+#define AMDGPU_MAX_IP_NUM AMD_IP_BLOCK_TYPE_NUM
struct amdgpu_ip_block_status {
bool valid;
@@ -434,7 +437,6 @@ struct amdgpu_clock {
uint32_t default_mclk;
uint32_t default_sclk;
uint32_t default_dispclk;
- uint32_t current_dispclk;
uint32_t dp_extclk;
uint32_t max_pixel_clock;
};
@@ -470,9 +472,6 @@ struct amdgpu_sa_manager {
void *cpu_ptr;
};
-int amdgpu_fence_slab_init(void);
-void amdgpu_fence_slab_fini(void);
-
/*
* IRQS.
*/
@@ -548,7 +547,7 @@ struct amdgpu_wb {
* this value can be accessed directly by using the offset as an index.
* For the GPU address, it is necessary to use gpu_addr and the offset.
*/
- volatile uint32_t *wb;
+ uint32_t *wb;
/**
* @gpu_addr:
@@ -724,7 +723,7 @@ int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
/* VRAM scratch page for HDP bug, default vram page */
struct amdgpu_mem_scratch {
struct amdgpu_bo *robj;
- volatile uint32_t *ptr;
+ uint32_t *ptr;
u64 gpu_addr;
};
@@ -755,6 +754,7 @@ typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, u
struct amdgpu_mmio_remap {
u32 reg_offset;
resource_size_t bus_addr;
+ struct amdgpu_bo *bo;
};
/* Define the HW IP blocks will be used in driver , add more if necessary */
@@ -822,13 +822,25 @@ struct amdgpu_ip_map_info {
uint32_t mask);
};
+enum amdgpu_uid_type {
+ AMDGPU_UID_TYPE_XCD,
+ AMDGPU_UID_TYPE_AID,
+ AMDGPU_UID_TYPE_SOC,
+ AMDGPU_UID_TYPE_MAX
+};
+
+#define AMDGPU_UID_INST_MAX 8 /* max number of instances for each UID type */
+
+struct amdgpu_uid {
+ uint64_t uid[AMDGPU_UID_TYPE_MAX][AMDGPU_UID_INST_MAX];
+ struct amdgpu_device *adev;
+};
+
struct amd_powerplay {
void *pp_handle;
const struct amd_pm_funcs *pp_funcs;
};
-struct ip_discovery_top;
-
/* polaris10 kickers */
#define ASICID_IS_P20(did, rid) (((did == 0x67DF) && \
((rid == 0xE3) || \
@@ -886,6 +898,7 @@ struct amdgpu_mqd_prop {
uint64_t csa_addr;
uint64_t fence_address;
bool tmz_queue;
+ bool kernel_queue;
};
struct amdgpu_mqd {
@@ -898,6 +911,9 @@ struct amdgpu_pcie_reset_ctx {
bool in_link_reset;
bool occurs_dpc;
bool audio_suspended;
+ struct pci_dev *swus;
+ struct pci_saved_state *swus_pcistate;
+ struct pci_saved_state *swds_pcistate;
};
/*
@@ -931,12 +947,6 @@ enum amdgpu_enforce_isolation_mode {
AMDGPU_ENFORCE_ISOLATION_NO_CLEANER_SHADER = 3,
};
-
-/*
- * Non-zero (true) if the GPU has VRAM. Zero (false) otherwise.
- */
-#define AMDGPU_HAS_VRAM(_adev) ((_adev)->gmc.real_vram_size)
-
struct amdgpu_device {
struct device *dev;
struct pci_dev *pdev;
@@ -962,8 +972,7 @@ struct amdgpu_device {
struct notifier_block acpi_nb;
struct notifier_block pm_nb;
struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS];
- struct debugfs_blob_wrapper debugfs_vbios_blob;
- struct debugfs_blob_wrapper debugfs_discovery_blob;
+ struct debugfs_blob_wrapper debugfs_vbios_blob;
struct mutex srbm_mutex;
/* GRBM index mutex. Protects concurrent access to GRBM index */
struct mutex grbm_idx_mutex;
@@ -1053,6 +1062,9 @@ struct amdgpu_device {
u32 log2_max_MBps;
} mm_stats;
+ /* discovery*/
+ struct amdgpu_discovery_info discovery;
+
/* display */
bool enable_virtual_display;
struct amdgpu_vkms_output *amdgpu_vkms_output;
@@ -1140,9 +1152,6 @@ struct amdgpu_device {
/* for userq and VM fences */
struct amdgpu_seq64 seq64;
- /* KFD */
- struct amdgpu_kfd_dev kfd;
-
/* UMC */
struct amdgpu_umc umc;
@@ -1167,6 +1176,12 @@ struct amdgpu_device {
* queue fence.
*/
struct xarray userq_xa;
+ /**
+ * @userq_doorbell_xa: Global user queue map (doorbell index → queue)
+ * Key: doorbell_index (unique global identifier for the queue)
+ * Value: struct amdgpu_usermode_queue
+ */
+ struct xarray userq_doorbell_xa;
/* df */
struct amdgpu_df df;
@@ -1258,8 +1273,6 @@ struct amdgpu_device {
struct list_head ras_list;
- struct ip_discovery_top *ip_top;
-
struct amdgpu_reset_domain *reset_domain;
struct mutex benchmark_mutex;
@@ -1282,6 +1295,8 @@ struct amdgpu_device {
bool debug_exp_resets;
bool debug_disable_gpu_ring_reset;
bool debug_vm_userptr;
+ bool debug_disable_ce_logs;
+ bool debug_enable_ce_cs;
/* Protection for the following isolation structure */
struct mutex enforce_isolation_mutex;
@@ -1300,9 +1315,14 @@ struct amdgpu_device {
*/
bool apu_prefer_gtt;
- struct list_head userq_mgr_list;
- struct mutex userq_mutex;
bool userq_halt_for_enforce_isolation;
+ struct work_struct userq_reset_work;
+ struct amdgpu_uid *uid_info;
+
+ /* KFD
+ * Must be last --ends in a flexible-array member.
+ */
+ struct amdgpu_kfd_dev kfd;
};
static inline uint32_t amdgpu_ip_version(const struct amdgpu_device *adev,
@@ -1336,6 +1356,11 @@ static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_device *bdev)
return container_of(bdev, struct amdgpu_device, mman.bdev);
}
+static inline bool amdgpu_is_multi_aid(struct amdgpu_device *adev)
+{
+ return !!adev->aid_mask;
+}
+
int amdgpu_device_init(struct amdgpu_device *adev,
uint32_t flags);
void amdgpu_device_fini_hw(struct amdgpu_device *adev);
@@ -1387,7 +1412,8 @@ void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
void amdgpu_device_indirect_wreg64_ext(struct amdgpu_device *adev,
u64 reg_addr, u64 reg_data);
u32 amdgpu_device_get_rev_id(struct amdgpu_device *adev);
-bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type);
+bool amdgpu_device_asic_has_dc_support(struct pci_dev *pdev,
+ enum amd_asic_type asic_type);
bool amdgpu_device_has_dc_support(struct amdgpu_device *adev);
void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev);
@@ -1514,11 +1540,6 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
#define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
#define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
#define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev))
-#define amdgpu_asic_flush_hdp(adev, r) \
- ((adev)->asic_funcs->flush_hdp ? (adev)->asic_funcs->flush_hdp((adev), (r)) : (adev)->hdp.funcs->flush_hdp((adev), (r)))
-#define amdgpu_asic_invalidate_hdp(adev, r) \
- ((adev)->asic_funcs->invalidate_hdp ? (adev)->asic_funcs->invalidate_hdp((adev), (r)) : \
- ((adev)->hdp.funcs->invalidate_hdp ? (adev)->hdp.funcs->invalidate_hdp((adev), (r)) : (void)0))
#define amdgpu_asic_need_full_reset(adev) (adev)->asic_funcs->need_full_reset((adev))
#define amdgpu_asic_init_doorbell_index(adev) (adev)->asic_funcs->init_doorbell_index((adev))
#define amdgpu_asic_get_pcie_usage(adev, cnt0, cnt1) ((adev)->asic_funcs->get_pcie_usage((adev), (cnt0), (cnt1)))
@@ -1558,16 +1579,16 @@ void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
int amdgpu_device_mode1_reset(struct amdgpu_device *adev);
int amdgpu_device_link_reset(struct amdgpu_device *adev);
-bool amdgpu_device_supports_atpx(struct drm_device *dev);
-bool amdgpu_device_supports_px(struct drm_device *dev);
-bool amdgpu_device_supports_boco(struct drm_device *dev);
-bool amdgpu_device_supports_smart_shift(struct drm_device *dev);
-int amdgpu_device_supports_baco(struct drm_device *dev);
+bool amdgpu_device_supports_atpx(struct amdgpu_device *adev);
+bool amdgpu_device_supports_px(struct amdgpu_device *adev);
+bool amdgpu_device_supports_boco(struct amdgpu_device *adev);
+bool amdgpu_device_supports_smart_shift(struct amdgpu_device *adev);
+int amdgpu_device_supports_baco(struct amdgpu_device *adev);
void amdgpu_device_detect_runtime_pm_mode(struct amdgpu_device *adev);
bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
struct amdgpu_device *peer_adev);
-int amdgpu_device_baco_enter(struct drm_device *dev);
-int amdgpu_device_baco_exit(struct drm_device *dev);
+int amdgpu_device_baco_enter(struct amdgpu_device *adev);
+int amdgpu_device_baco_exit(struct amdgpu_device *adev);
void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring);
@@ -1617,8 +1638,8 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
struct drm_file *file_priv);
void amdgpu_driver_release_kms(struct drm_device *dev);
-int amdgpu_device_ip_suspend(struct amdgpu_device *adev);
int amdgpu_device_prepare(struct drm_device *dev);
+void amdgpu_device_complete(struct drm_device *dev);
int amdgpu_device_suspend(struct drm_device *dev, bool fbcon);
int amdgpu_device_resume(struct drm_device *dev, bool fbcon);
u32 amdgpu_get_vblank_counter_kms(struct drm_crtc *crtc);
@@ -1669,7 +1690,8 @@ int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
u8 perf_req, bool advertise);
int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
u8 dev_state, bool drv_state);
-int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_state);
+int amdgpu_acpi_smart_shift_update(struct amdgpu_device *adev,
+ enum amdgpu_ss ss_state);
int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev);
int amdgpu_acpi_get_tmr_info(struct amdgpu_device *adev, u64 *tmr_offset,
u64 *tmr_size);
@@ -1700,8 +1722,11 @@ static inline void amdgpu_acpi_release(void) { }
static inline bool amdgpu_acpi_is_power_shift_control_supported(void) { return false; }
static inline int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
u8 dev_state, bool drv_state) { return 0; }
-static inline int amdgpu_acpi_smart_shift_update(struct drm_device *dev,
- enum amdgpu_ss ss_state) { return 0; }
+static inline int amdgpu_acpi_smart_shift_update(struct amdgpu_device *adev,
+ enum amdgpu_ss ss_state)
+{
+ return 0;
+}
static inline void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps) { }
#endif
@@ -1714,7 +1739,7 @@ static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return
#endif
#if defined(CONFIG_DRM_AMD_ISP)
-int amdgpu_acpi_get_isp4_dev_hid(u8 (*hid)[ACPI_ID_LEN]);
+int amdgpu_acpi_get_isp4_dev(struct acpi_device **dev);
#endif
void amdgpu_register_gpu_instance(struct amdgpu_device *adev);
@@ -1760,4 +1785,24 @@ extern const struct attribute_group amdgpu_flash_attr_group;
void amdgpu_set_init_level(struct amdgpu_device *adev,
enum amdgpu_init_lvl_id lvl);
+
+static inline int amdgpu_device_bus_status_check(struct amdgpu_device *adev)
+{
+ u32 status;
+ int r;
+
+ r = pci_read_config_dword(adev->pdev, PCI_COMMAND, &status);
+ if (r || PCI_POSSIBLE_ERROR(status)) {
+ dev_err(adev->dev, "device lost from bus!");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+void amdgpu_device_set_uid(struct amdgpu_uid *uid_info,
+ enum amdgpu_uid_type type, uint8_t inst,
+ uint64_t uid);
+uint64_t amdgpu_device_get_uid(struct amdgpu_uid *uid_info,
+ enum amdgpu_uid_type type, uint8_t inst);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
index 3835f2592914..9b3180449150 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
@@ -76,6 +76,7 @@ static void aca_banks_release(struct aca_banks *banks)
list_for_each_entry_safe(node, tmp, &banks->list, node) {
list_del(&node->node);
kvfree(node);
+ banks->nr_banks--;
}
}
@@ -115,6 +116,11 @@ static void aca_smu_bank_dump(struct amdgpu_device *adev, int idx, int total, st
u64 event_id = qctx ? qctx->evid.event_id : RAS_EVENT_INVALID_ID;
int i;
+ if (adev->debug_disable_ce_logs &&
+ bank->smu_err_type == ACA_SMU_TYPE_CE &&
+ !ACA_BANK_ERR_IS_DEFFERED(bank))
+ return;
+
RAS_EVENT_LOG(adev, event_id, HW_ERR "Accelerator Check Architecture events logged\n");
/* plus 1 for output format, e.g: ACA[08/08]: xxxx */
for (i = 0; i < ARRAY_SIZE(aca_regs); i++)
@@ -125,6 +131,27 @@ static void aca_smu_bank_dump(struct amdgpu_device *adev, int idx, int total, st
RAS_EVENT_LOG(adev, event_id, HW_ERR "hardware error logged by the scrubber\n");
}
+static bool aca_bank_hwip_is_matched(struct aca_bank *bank, enum aca_hwip_type type)
+{
+
+ struct aca_hwip *hwip;
+ int hwid, mcatype;
+ u64 ipid;
+
+ if (!bank || type == ACA_HWIP_TYPE_UNKNOW)
+ return false;
+
+ hwip = &aca_hwid_mcatypes[type];
+ if (!hwip->hwid)
+ return false;
+
+ ipid = bank->regs[ACA_REG_IDX_IPID];
+ hwid = ACA_REG__IPID__HARDWAREID(ipid);
+ mcatype = ACA_REG__IPID__MCATYPE(ipid);
+
+ return hwip->hwid == hwid && hwip->mcatype == mcatype;
+}
+
static int aca_smu_get_valid_aca_banks(struct amdgpu_device *adev, enum aca_smu_type type,
int start, int count,
struct aca_banks *banks, struct ras_query_context *qctx)
@@ -163,6 +190,15 @@ static int aca_smu_get_valid_aca_banks(struct amdgpu_device *adev, enum aca_smu_
bank.smu_err_type = type;
+ /*
+ * Poison being consumed when injecting a UE while running background workloads,
+ * which are unexpected.
+ */
+ if (type == ACA_SMU_TYPE_UE &&
+ ACA_REG__STATUS__POISON(bank.regs[ACA_REG_IDX_STATUS]) &&
+ !aca_bank_hwip_is_matched(&bank, ACA_HWIP_TYPE_UMC))
+ continue;
+
aca_smu_bank_dump(adev, i, count, &bank, qctx);
ret = aca_banks_add_bank(banks, &bank);
@@ -173,27 +209,6 @@ static int aca_smu_get_valid_aca_banks(struct amdgpu_device *adev, enum aca_smu_
return 0;
}
-static bool aca_bank_hwip_is_matched(struct aca_bank *bank, enum aca_hwip_type type)
-{
-
- struct aca_hwip *hwip;
- int hwid, mcatype;
- u64 ipid;
-
- if (!bank || type == ACA_HWIP_TYPE_UNKNOW)
- return false;
-
- hwip = &aca_hwid_mcatypes[type];
- if (!hwip->hwid)
- return false;
-
- ipid = bank->regs[ACA_REG_IDX_IPID];
- hwid = ACA_REG__IPID__HARDWAREID(ipid);
- mcatype = ACA_REG__IPID__MCATYPE(ipid);
-
- return hwip->hwid == hwid && hwip->mcatype == mcatype;
-}
-
static bool aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank, enum aca_smu_type type)
{
const struct aca_bank_ops *bank_ops = handle->bank_ops;
@@ -224,6 +239,7 @@ static struct aca_bank_error *new_bank_error(struct aca_error *aerr, struct aca_
mutex_lock(&aerr->lock);
list_add_tail(&bank_error->node, &aerr->list);
+ aerr->nr_errors++;
mutex_unlock(&aerr->lock);
return bank_error;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
index 4926996f94da..381ef205b0df 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -302,17 +302,19 @@ static int acp_hw_init(struct amdgpu_ip_block *ip_block)
adev->acp.acp_res[2].end = adev->acp.acp_res[2].start;
adev->acp.acp_cell[0].name = "acp_audio_dma";
+ adev->acp.acp_cell[0].id = 0;
adev->acp.acp_cell[0].num_resources = 3;
adev->acp.acp_cell[0].resources = &adev->acp.acp_res[0];
adev->acp.acp_cell[0].platform_data = &adev->asic_type;
adev->acp.acp_cell[0].pdata_size = sizeof(adev->asic_type);
adev->acp.acp_cell[1].name = "designware-i2s";
+ adev->acp.acp_cell[1].id = 1;
adev->acp.acp_cell[1].num_resources = 1;
adev->acp.acp_cell[1].resources = &adev->acp.acp_res[1];
adev->acp.acp_cell[1].platform_data = &i2s_pdata[0];
adev->acp.acp_cell[1].pdata_size = sizeof(struct i2s_platform_data);
- r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell, 2);
+ r = mfd_add_devices(adev->acp.parent, 0, adev->acp.acp_cell, 2, NULL, 0, NULL);
if (r)
goto failure;
r = device_for_each_child(adev->acp.parent, &adev->acp.acp_genpd->gpd,
@@ -410,30 +412,34 @@ static int acp_hw_init(struct amdgpu_ip_block *ip_block)
adev->acp.acp_res[4].end = adev->acp.acp_res[4].start;
adev->acp.acp_cell[0].name = "acp_audio_dma";
+ adev->acp.acp_cell[0].id = 0;
adev->acp.acp_cell[0].num_resources = 5;
adev->acp.acp_cell[0].resources = &adev->acp.acp_res[0];
adev->acp.acp_cell[0].platform_data = &adev->asic_type;
adev->acp.acp_cell[0].pdata_size = sizeof(adev->asic_type);
adev->acp.acp_cell[1].name = "designware-i2s";
+ adev->acp.acp_cell[1].id = 1;
adev->acp.acp_cell[1].num_resources = 1;
adev->acp.acp_cell[1].resources = &adev->acp.acp_res[1];
adev->acp.acp_cell[1].platform_data = &i2s_pdata[0];
adev->acp.acp_cell[1].pdata_size = sizeof(struct i2s_platform_data);
adev->acp.acp_cell[2].name = "designware-i2s";
+ adev->acp.acp_cell[2].id = 2;
adev->acp.acp_cell[2].num_resources = 1;
adev->acp.acp_cell[2].resources = &adev->acp.acp_res[2];
adev->acp.acp_cell[2].platform_data = &i2s_pdata[1];
adev->acp.acp_cell[2].pdata_size = sizeof(struct i2s_platform_data);
adev->acp.acp_cell[3].name = "designware-i2s";
+ adev->acp.acp_cell[3].id = 3;
adev->acp.acp_cell[3].num_resources = 1;
adev->acp.acp_cell[3].resources = &adev->acp.acp_res[3];
adev->acp.acp_cell[3].platform_data = &i2s_pdata[2];
adev->acp.acp_cell[3].pdata_size = sizeof(struct i2s_platform_data);
- r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell, ACP_DEVS);
+ r = mfd_add_devices(adev->acp.parent, 0, adev->acp.acp_cell, ACP_DEVS, NULL, 0, NULL);
if (r)
goto failure;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index f5466c592d94..d31460a9e958 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -507,7 +507,6 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev,
pm_runtime_get_sync(adev_to_drm(adev)->dev);
/* Just fire off a uevent and let userspace tell us what to do */
drm_helper_hpd_irq_event(adev_to_drm(adev));
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
}
}
@@ -811,18 +810,18 @@ int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
/**
* amdgpu_acpi_smart_shift_update - update dGPU device state to SBIOS
*
- * @dev: drm_device pointer
+ * @adev: amdgpu device pointer
* @ss_state: current smart shift event
*
* returns 0 on success,
* otherwise return error number.
*/
-int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_state)
+int amdgpu_acpi_smart_shift_update(struct amdgpu_device *adev,
+ enum amdgpu_ss ss_state)
{
- struct amdgpu_device *adev = drm_to_adev(dev);
int r;
- if (!amdgpu_device_supports_smart_shift(dev))
+ if (!amdgpu_device_supports_smart_shift(adev))
return 0;
switch (ss_state) {
@@ -1545,7 +1544,7 @@ static int isp_match_acpi_device_ids(struct device *dev, const void *data)
return acpi_match_device(data, dev) ? 1 : 0;
}
-int amdgpu_acpi_get_isp4_dev_hid(u8 (*hid)[ACPI_ID_LEN])
+int amdgpu_acpi_get_isp4_dev(struct acpi_device **dev)
{
struct device *pdev __free(put_device) = NULL;
struct acpi_device *acpi_pdev;
@@ -1559,7 +1558,7 @@ int amdgpu_acpi_get_isp4_dev_hid(u8 (*hid)[ACPI_ID_LEN])
if (!acpi_pdev)
return -ENODEV;
- strscpy(*hid, acpi_device_hid(acpi_pdev));
+ *dev = acpi_pdev;
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index d8ac4b1051a8..a2879d2b7c8e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -248,18 +248,42 @@ void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
kgd2kfd_interrupt(adev->kfd.dev, ih_ring_entry);
}
-void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm)
+void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool suspend_proc)
+{
+ if (adev->kfd.dev) {
+ if (adev->in_s0ix)
+ kgd2kfd_stop_sched_all_nodes(adev->kfd.dev);
+ else
+ kgd2kfd_suspend(adev->kfd.dev, suspend_proc);
+ }
+}
+
+int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool resume_proc)
+{
+ int r = 0;
+
+ if (adev->kfd.dev) {
+ if (adev->in_s0ix)
+ r = kgd2kfd_start_sched_all_nodes(adev->kfd.dev);
+ else
+ r = kgd2kfd_resume(adev->kfd.dev, resume_proc);
+ }
+
+ return r;
+}
+
+void amdgpu_amdkfd_suspend_process(struct amdgpu_device *adev)
{
if (adev->kfd.dev)
- kgd2kfd_suspend(adev->kfd.dev, run_pm);
+ kgd2kfd_suspend_process(adev->kfd.dev);
}
-int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm)
+int amdgpu_amdkfd_resume_process(struct amdgpu_device *adev)
{
int r = 0;
if (adev->kfd.dev)
- r = kgd2kfd_resume(adev->kfd.dev, run_pm);
+ r = kgd2kfd_resume_process(adev->kfd.dev);
return r;
}
@@ -642,7 +666,7 @@ int amdgpu_amdkfd_submit_ib(struct amdgpu_device *adev,
goto err;
}
- ret = amdgpu_job_alloc(adev, NULL, NULL, NULL, 1, &job);
+ ret = amdgpu_job_alloc(adev, NULL, NULL, NULL, 1, &job, 0);
if (ret)
goto err;
@@ -749,12 +773,12 @@ int amdgpu_amdkfd_send_close_event_drain_irq(struct amdgpu_device *adev,
int amdgpu_amdkfd_check_and_lock_kfd(struct amdgpu_device *adev)
{
- return kgd2kfd_check_and_lock_kfd();
+ return kgd2kfd_check_and_lock_kfd(adev->kfd.dev);
}
void amdgpu_amdkfd_unlock_kfd(struct amdgpu_device *adev)
{
- kgd2kfd_unlock_kfd();
+ kgd2kfd_unlock_kfd(adev->kfd.dev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index b6ca41859b53..8bdfcde2029b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -71,7 +71,7 @@ struct kgd_mem {
struct mutex lock;
struct amdgpu_bo *bo;
struct dma_buf *dmabuf;
- struct hmm_range *range;
+ struct amdgpu_hmm_range *range;
struct list_head attachments;
/* protected by amdkfd_process_info.lock */
struct list_head validate_list;
@@ -107,11 +107,13 @@ struct amdgpu_kfd_dev {
bool init_complete;
struct work_struct reset_work;
- /* HMM page migration MEMORY_DEVICE_PRIVATE mapping */
- struct dev_pagemap pgmap;
-
/* Client for KFD BO GEM handle allocations */
struct drm_client_dev client;
+
+ /* HMM page migration MEMORY_DEVICE_PRIVATE mapping
+ * Must be last --ends in a flexible-array member.
+ */
+ struct dev_pagemap pgmap;
};
enum kgd_engine_type {
@@ -154,8 +156,10 @@ struct amdkfd_process_info {
int amdgpu_amdkfd_init(void);
void amdgpu_amdkfd_fini(void);
-void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm);
-int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm);
+void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool suspend_proc);
+int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool resume_proc);
+void amdgpu_amdkfd_suspend_process(struct amdgpu_device *adev);
+int amdgpu_amdkfd_resume_process(struct amdgpu_device *adev);
void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
const void *ih_ring_entry);
void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev);
@@ -411,18 +415,22 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf);
bool kgd2kfd_device_init(struct kfd_dev *kfd,
const struct kgd2kfd_shared_resources *gpu_resources);
void kgd2kfd_device_exit(struct kfd_dev *kfd);
-void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm);
-int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm);
+void kgd2kfd_suspend(struct kfd_dev *kfd, bool suspend_proc);
+int kgd2kfd_resume(struct kfd_dev *kfd, bool resume_proc);
+void kgd2kfd_suspend_process(struct kfd_dev *kfd);
+int kgd2kfd_resume_process(struct kfd_dev *kfd);
int kgd2kfd_pre_reset(struct kfd_dev *kfd,
struct amdgpu_reset_context *reset_context);
int kgd2kfd_post_reset(struct kfd_dev *kfd);
void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry);
void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd);
void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask);
-int kgd2kfd_check_and_lock_kfd(void);
-void kgd2kfd_unlock_kfd(void);
+int kgd2kfd_check_and_lock_kfd(struct kfd_dev *kfd);
+void kgd2kfd_unlock_kfd(struct kfd_dev *kfd);
int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id);
+int kgd2kfd_start_sched_all_nodes(struct kfd_dev *kfd);
int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id);
+int kgd2kfd_stop_sched_all_nodes(struct kfd_dev *kfd);
bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id);
bool kgd2kfd_vmfault_fast_path(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry,
bool retry_fault);
@@ -454,11 +462,20 @@ static inline void kgd2kfd_device_exit(struct kfd_dev *kfd)
{
}
-static inline void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
+static inline void kgd2kfd_suspend(struct kfd_dev *kfd, bool suspend_proc)
{
}
-static inline int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
+static inline int kgd2kfd_resume(struct kfd_dev *kfd, bool resume_proc)
+{
+ return 0;
+}
+
+static inline void kgd2kfd_suspend_process(struct kfd_dev *kfd)
+{
+}
+
+static inline int kgd2kfd_resume_process(struct kfd_dev *kfd)
{
return 0;
}
@@ -489,12 +506,12 @@ void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask)
{
}
-static inline int kgd2kfd_check_and_lock_kfd(void)
+static inline int kgd2kfd_check_and_lock_kfd(struct kfd_dev *kfd)
{
return 0;
}
-static inline void kgd2kfd_unlock_kfd(void)
+static inline void kgd2kfd_unlock_kfd(struct kfd_dev *kfd)
{
}
@@ -503,11 +520,21 @@ static inline int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id)
return 0;
}
+static inline int kgd2kfd_start_sched_all_nodes(struct kfd_dev *kfd)
+{
+ return 0;
+}
+
static inline int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id)
{
return 0;
}
+static inline int kgd2kfd_stop_sched_all_nodes(struct kfd_dev *kfd)
+{
+ return 0;
+}
+
static inline bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id)
{
return false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
index ffbaa8bc5eea..1105a09e55dc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
@@ -320,7 +320,7 @@ static void set_barrier_auto_waitcnt(struct amdgpu_device *adev, bool enable_wai
if (!down_read_trylock(&adev->reset_domain->sem))
return;
- amdgpu_amdkfd_suspend(adev, false);
+ amdgpu_amdkfd_suspend(adev, true);
if (suspend_resume_compute_scheduler(adev, true))
goto out;
@@ -333,7 +333,7 @@ static void set_barrier_auto_waitcnt(struct amdgpu_device *adev, bool enable_wai
out:
suspend_resume_compute_scheduler(adev, false);
- amdgpu_amdkfd_resume(adev, false);
+ amdgpu_amdkfd_resume(adev, true);
up_read(&adev->reset_domain->sem);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
index 04ef0ca10541..0239114fb6c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
@@ -352,7 +352,7 @@ static int kgd_hqd_dump(struct amdgpu_device *adev,
(*dump)[i++][1] = RREG32_SOC15_IP(GC, addr); \
} while (0)
- *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
@@ -449,7 +449,7 @@ static int kgd_hqd_sdma_dump(struct amdgpu_device *adev,
#undef HQD_N_REGS
#define HQD_N_REGS (19+6+7+10)
- *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
index 6d08bc2781a3..f2278a0937ff 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
@@ -338,7 +338,7 @@ static int hqd_dump_v10_3(struct amdgpu_device *adev,
(*dump)[i++][1] = RREG32_SOC15_IP(GC, addr); \
} while (0)
- *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
@@ -435,7 +435,7 @@ static int hqd_sdma_dump_v10_3(struct amdgpu_device *adev,
#undef HQD_N_REGS
#define HQD_N_REGS (19+6+7+12)
- *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c
index e0e6a6a49d90..aaccf0b9947d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c
@@ -323,7 +323,7 @@ static int hqd_dump_v11(struct amdgpu_device *adev,
(*dump)[i++][1] = RREG32(addr); \
} while (0)
- *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
@@ -420,7 +420,7 @@ static int hqd_sdma_dump_v11(struct amdgpu_device *adev,
#undef HQD_N_REGS
#define HQD_N_REGS (7+11+1+12+12)
- *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v12.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v12.c
index 6f0dc23c901b..e0ceab400b2d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v12.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v12.c
@@ -115,7 +115,7 @@ static int hqd_dump_v12(struct amdgpu_device *adev,
(*dump)[i++][1] = RREG32(addr); \
} while (0)
- *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
@@ -146,7 +146,7 @@ static int hqd_sdma_dump_v12(struct amdgpu_device *adev,
#undef HQD_N_REGS
#define HQD_N_REGS (last_reg - first_reg + 1)
- *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index ca4a6b82817f..df77558e03ef 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -561,6 +561,13 @@ static uint32_t read_vmid_from_vmfault_reg(struct amdgpu_device *adev)
return REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
}
+static uint32_t kgd_hqd_sdma_get_doorbell(struct amdgpu_device *adev,
+ int engine, int queue)
+
+{
+ return 0;
+}
+
const struct kfd2kgd_calls gfx_v7_kfd2kgd = {
.program_sh_mem_settings = kgd_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
@@ -578,4 +585,5 @@ const struct kfd2kgd_calls gfx_v7_kfd2kgd = {
.set_scratch_backing_va = set_scratch_backing_va,
.set_vm_context_page_table_base = set_vm_context_page_table_base,
.read_vmid_from_vmfault_reg = read_vmid_from_vmfault_reg,
+ .hqd_sdma_get_doorbell = kgd_hqd_sdma_get_doorbell,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index 0f3e2944edd7..e68c0fa8d751 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -582,6 +582,13 @@ static void set_vm_context_page_table_base(struct amdgpu_device *adev,
lower_32_bits(page_table_base));
}
+static uint32_t kgd_hqd_sdma_get_doorbell(struct amdgpu_device *adev,
+ int engine, int queue)
+
+{
+ return 0;
+}
+
const struct kfd2kgd_calls gfx_v8_kfd2kgd = {
.program_sh_mem_settings = kgd_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
@@ -599,4 +606,5 @@ const struct kfd2kgd_calls gfx_v8_kfd2kgd = {
get_atc_vmid_pasid_mapping_info,
.set_scratch_backing_va = set_scratch_backing_va,
.set_vm_context_page_table_base = set_vm_context_page_table_base,
+ .hqd_sdma_get_doorbell = kgd_hqd_sdma_get_doorbell,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 260165bbe373..b1c24c8fa686 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -213,19 +213,35 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
spin_lock(&kfd_mem_limit.mem_limit_lock);
if (kfd_mem_limit.system_mem_used + system_mem_needed >
- kfd_mem_limit.max_system_mem_limit)
+ kfd_mem_limit.max_system_mem_limit) {
pr_debug("Set no_system_mem_limit=1 if using shared memory\n");
+ if (!no_system_mem_limit) {
+ ret = -ENOMEM;
+ goto release;
+ }
+ }
- if ((kfd_mem_limit.system_mem_used + system_mem_needed >
- kfd_mem_limit.max_system_mem_limit && !no_system_mem_limit) ||
- (kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
- kfd_mem_limit.max_ttm_mem_limit) ||
- (adev && xcp_id >= 0 && adev->kfd.vram_used[xcp_id] + vram_needed >
- vram_size - reserved_for_pt - reserved_for_ras - atomic64_read(&adev->vram_pin_size))) {
+ if (kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
+ kfd_mem_limit.max_ttm_mem_limit) {
ret = -ENOMEM;
goto release;
}
+ /*if is_app_apu is false and apu_prefer_gtt is true, it is an APU with
+ * carve out < gtt. In that case, VRAM allocation will go to gtt domain, skip
+ * VRAM check since ttm_mem_limit check already cover this allocation
+ */
+
+ if (adev && xcp_id >= 0 && (!adev->apu_prefer_gtt || adev->gmc.is_app_apu)) {
+ uint64_t vram_available =
+ vram_size - reserved_for_pt - reserved_for_ras -
+ atomic64_read(&adev->vram_pin_size);
+ if (adev->kfd.vram_used[xcp_id] + vram_needed > vram_available) {
+ ret = -ENOMEM;
+ goto release;
+ }
+ }
+
/* Update memory accounting by decreasing available system
* memory, TTM memory and GPU memory as computed above
*/
@@ -494,7 +510,8 @@ static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
return amdgpu_sync_fence(sync, vm->last_update, GFP_KERNEL);
}
-static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
+static uint64_t get_pte_flags(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct kgd_mem *mem)
{
uint32_t mapping_flags = AMDGPU_VM_PAGE_READABLE |
AMDGPU_VM_MTYPE_DEFAULT;
@@ -504,7 +521,7 @@ static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE)
mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
- return amdgpu_gem_va_map_flags(adev, mapping_flags);
+ return mapping_flags;
}
/**
@@ -961,7 +978,7 @@ static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem,
goto unwind;
}
attachment[i]->va = va;
- attachment[i]->pte_flags = get_pte_flags(adev, mem);
+ attachment[i]->pte_flags = get_pte_flags(adev, vm, mem);
attachment[i]->adev = adev;
list_add(&attachment[i]->list, &mem->attachments);
@@ -1040,7 +1057,7 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr,
struct amdkfd_process_info *process_info = mem->process_info;
struct amdgpu_bo *bo = mem->bo;
struct ttm_operation_ctx ctx = { true, false };
- struct hmm_range *range;
+ struct amdgpu_hmm_range *range;
int ret = 0;
mutex_lock(&process_info->lock);
@@ -1072,8 +1089,15 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr,
return 0;
}
- ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages, &range);
+ range = amdgpu_hmm_range_alloc(NULL);
+ if (unlikely(!range)) {
+ ret = -ENOMEM;
+ goto unregister_out;
+ }
+
+ ret = amdgpu_ttm_tt_get_user_pages(bo, range);
if (ret) {
+ amdgpu_hmm_range_free(range);
if (ret == -EAGAIN)
pr_debug("Failed to get user pages, try again\n");
else
@@ -1086,6 +1110,9 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr,
pr_err("%s: Failed to reserve BO\n", __func__);
goto release_out;
}
+
+ amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, range);
+
amdgpu_bo_placement_from_domain(bo, mem->domain);
ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (ret)
@@ -1093,7 +1120,7 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr,
amdgpu_bo_unreserve(bo);
release_out:
- amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range);
+ amdgpu_hmm_range_free(range);
unregister_out:
if (ret)
amdgpu_hmm_unregister(bo);
@@ -1247,6 +1274,10 @@ static int unmap_bo_from_gpuvm(struct kgd_mem *mem,
(void)amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
+ /* VM entity stopped if process killed, don't clear freed pt bo */
+ if (!amdgpu_vm_ready(vm))
+ return 0;
+
(void)amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
(void)amdgpu_sync_fence(sync, bo_va->last_pt_update, GFP_KERNEL);
@@ -1626,11 +1657,15 @@ size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev,
uint64_t vram_available, system_mem_available, ttm_mem_available;
spin_lock(&kfd_mem_limit.mem_limit_lock);
- vram_available = KFD_XCP_MEMORY_SIZE(adev, xcp_id)
- - adev->kfd.vram_used_aligned[xcp_id]
- - atomic64_read(&adev->vram_pin_size)
- - reserved_for_pt
- - reserved_for_ras;
+ if (adev->apu_prefer_gtt && !adev->gmc.is_app_apu)
+ vram_available = KFD_XCP_MEMORY_SIZE(adev, xcp_id)
+ - adev->kfd.vram_used_aligned[xcp_id];
+ else
+ vram_available = KFD_XCP_MEMORY_SIZE(adev, xcp_id)
+ - adev->kfd.vram_used_aligned[xcp_id]
+ - atomic64_read(&adev->vram_pin_size)
+ - reserved_for_pt
+ - reserved_for_ras;
if (adev->apu_prefer_gtt) {
system_mem_available = no_system_mem_limit ?
@@ -1892,7 +1927,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) {
amdgpu_hmm_unregister(mem->bo);
mutex_lock(&process_info->notifier_lock);
- amdgpu_ttm_tt_discard_user_pages(mem->bo->tbo.ttm, mem->range);
+ amdgpu_hmm_range_free(mem->range);
mutex_unlock(&process_info->notifier_lock);
}
@@ -1930,9 +1965,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
*/
if (size) {
if (!is_imported &&
- (mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM ||
- (adev->apu_prefer_gtt &&
- mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_GTT)))
+ mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM)
*size = bo_size;
else
*size = 0;
@@ -2305,10 +2338,9 @@ void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_mem *mem)
int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device *adev,
struct kfd_vm_fault_info *mem)
{
- if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) {
+ if (atomic_read_acquire(&adev->gmc.vm_fault_info_updated) == 1) {
*mem = *adev->gmc.vm_fault_info;
- mb(); /* make sure read happened */
- atomic_set(&adev->gmc.vm_fault_info_updated, 0);
+ atomic_set_release(&adev->gmc.vm_fault_info_updated, 0);
}
return 0;
}
@@ -2519,7 +2551,7 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
bo = mem->bo;
- amdgpu_ttm_tt_discard_user_pages(bo->tbo.ttm, mem->range);
+ amdgpu_hmm_range_free(mem->range);
mem->range = NULL;
/* BO reservations and getting user pages (hmm_range_fault)
@@ -2543,10 +2575,14 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
}
}
+ mem->range = amdgpu_hmm_range_alloc(NULL);
+ if (unlikely(!mem->range))
+ return -ENOMEM;
/* Get updated user pages */
- ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages,
- &mem->range);
+ ret = amdgpu_ttm_tt_get_user_pages(bo, mem->range);
if (ret) {
+ amdgpu_hmm_range_free(mem->range);
+ mem->range = NULL;
pr_debug("Failed %d to get user pages\n", ret);
/* Return -EFAULT bad address error as success. It will
@@ -2563,17 +2599,24 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
* from the KFD, trigger a segmentation fault in VM debug mode.
*/
if (amdgpu_ttm_adev(bo->tbo.bdev)->debug_vm_userptr) {
+ struct kfd_process *p;
+
pr_err("Pid %d unmapped memory before destroying userptr at GPU addr 0x%llx\n",
pid_nr(process_info->pid), mem->va);
// Send GPU VM fault to user space
- kfd_signal_vm_fault_event_with_userptr(kfd_lookup_process_by_pid(process_info->pid),
- mem->va);
+ p = kfd_lookup_process_by_pid(process_info->pid);
+ if (p) {
+ kfd_signal_vm_fault_event_with_userptr(p, mem->va);
+ kfd_unref_process(p);
+ }
}
ret = 0;
}
+ amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, mem->range);
+
mutex_lock(&process_info->notifier_lock);
/* Mark the BO as valid unless it was invalidated
@@ -2712,8 +2755,8 @@ static int confirm_valid_user_pages_locked(struct amdkfd_process_info *process_i
continue;
/* Only check mem with hmm range associated */
- valid = amdgpu_ttm_tt_get_user_pages_done(
- mem->bo->tbo.ttm, mem->range);
+ valid = amdgpu_hmm_range_valid(mem->range);
+ amdgpu_hmm_range_free(mem->range);
mem->range = NULL;
if (!valid) {
@@ -2969,9 +3012,22 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu *
struct amdgpu_device *adev = amdgpu_ttm_adev(
peer_vm->root.bo->tbo.bdev);
+ struct amdgpu_fpriv *fpriv =
+ container_of(peer_vm, struct amdgpu_fpriv, vm);
+
+ ret = amdgpu_vm_bo_update(adev, fpriv->prt_va, false);
+ if (ret) {
+ dev_dbg(adev->dev,
+ "Memory eviction: handle PRT moved failed, pid %8d. Try again.\n",
+ pid_nr(process_info->pid));
+ goto validate_map_fail;
+ }
+
ret = amdgpu_vm_handle_moved(adev, peer_vm, &exec.ticket);
if (ret) {
- pr_debug("Memory eviction: handle moved failed. Try again\n");
+ dev_dbg(adev->dev,
+ "Memory eviction: handle moved failed, pid %8d. Try again.\n",
+ pid_nr(process_info->pid));
goto validate_map_fail;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index e476e45b996a..763f2b8dcf13 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -706,7 +706,6 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev)
}
adev->clock.dp_extclk =
le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq);
- adev->clock.current_dispclk = adev->clock.default_dispclk;
adev->clock.max_pixel_clock = le16_to_cpu(firmware_info->info.usMaxPixelClock);
if (adev->clock.max_pixel_clock == 0)
@@ -1816,16 +1815,43 @@ static ssize_t amdgpu_atombios_get_vbios_version(struct device *dev,
return sysfs_emit(buf, "%s\n", ctx->vbios_pn);
}
+static ssize_t amdgpu_atombios_get_vbios_build(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ struct atom_context *ctx = adev->mode_info.atom_context;
+
+ return sysfs_emit(buf, "%s\n", ctx->build_num);
+}
+
static DEVICE_ATTR(vbios_version, 0444, amdgpu_atombios_get_vbios_version,
NULL);
+static DEVICE_ATTR(vbios_build, 0444, amdgpu_atombios_get_vbios_build, NULL);
static struct attribute *amdgpu_vbios_version_attrs[] = {
- &dev_attr_vbios_version.attr,
- NULL
+ &dev_attr_vbios_version.attr, &dev_attr_vbios_build.attr, NULL
};
+static umode_t amdgpu_vbios_version_attrs_is_visible(struct kobject *kobj,
+ struct attribute *attr,
+ int index)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ struct atom_context *ctx = adev->mode_info.atom_context;
+
+ if (attr == &dev_attr_vbios_build.attr && !strlen(ctx->build_num))
+ return 0;
+
+ return attr->mode;
+}
+
const struct attribute_group amdgpu_vbios_version_attr_group = {
- .attrs = amdgpu_vbios_version_attrs
+ .attrs = amdgpu_vbios_version_attrs,
+ .is_visible = amdgpu_vbios_version_attrs_is_visible,
};
int amdgpu_atombios_sysfs_init(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index c7d32fb216e4..636385c80f64 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -181,19 +181,22 @@ int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev)
u8 frev, crev;
int usage_bytes = 0;
- if (amdgpu_atom_parse_data_header(ctx, index, NULL, &frev, &crev, &data_offset)) {
- if (frev == 2 && crev == 1) {
- fw_usage_v2_1 =
- (struct vram_usagebyfirmware_v2_1 *)(ctx->bios + data_offset);
- amdgpu_atomfirmware_allocate_fb_v2_1(adev,
- fw_usage_v2_1,
- &usage_bytes);
- } else if (frev >= 2 && crev >= 2) {
- fw_usage_v2_2 =
- (struct vram_usagebyfirmware_v2_2 *)(ctx->bios + data_offset);
- amdgpu_atomfirmware_allocate_fb_v2_2(adev,
- fw_usage_v2_2,
- &usage_bytes);
+ /* Skip atomfirmware allocation for SRIOV VFs when dynamic crit regn is enabled */
+ if (!(amdgpu_sriov_vf(adev) && adev->virt.is_dynamic_crit_regn_enabled)) {
+ if (amdgpu_atom_parse_data_header(ctx, index, NULL, &frev, &crev, &data_offset)) {
+ if (frev == 2 && crev == 1) {
+ fw_usage_v2_1 =
+ (struct vram_usagebyfirmware_v2_1 *)(ctx->bios + data_offset);
+ amdgpu_atomfirmware_allocate_fb_v2_1(adev,
+ fw_usage_v2_1,
+ &usage_bytes);
+ } else if (frev >= 2 && crev >= 2) {
+ fw_usage_v2_2 =
+ (struct vram_usagebyfirmware_v2_2 *)(ctx->bios + data_offset);
+ amdgpu_atomfirmware_allocate_fb_v2_2(adev,
+ fw_usage_v2_2,
+ &usage_bytes);
+ }
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
index 00e96419fcda..35d04e69aec0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
@@ -96,13 +96,14 @@ void amdgpu_bios_release(struct amdgpu_device *adev)
* part of the system bios. On boot, the system bios puts a
* copy of the igp rom at the start of vram if a discrete card is
* present.
- * For SR-IOV, the vbios image is also put in VRAM in the VF.
+ * For SR-IOV, if dynamic critical region is not enabled,
+ * the vbios image is also put at the start of VRAM in the VF.
*/
static bool amdgpu_read_bios_from_vram(struct amdgpu_device *adev)
{
- uint8_t __iomem *bios;
+ uint8_t __iomem *bios = NULL;
resource_size_t vram_base;
- resource_size_t size = 256 * 1024; /* ??? */
+ u32 size = 256U * 1024U; /* ??? */
if (!(adev->flags & AMD_IS_APU))
if (amdgpu_device_need_post(adev))
@@ -114,18 +115,33 @@ static bool amdgpu_read_bios_from_vram(struct amdgpu_device *adev)
adev->bios = NULL;
vram_base = pci_resource_start(adev->pdev, 0);
- bios = ioremap_wc(vram_base, size);
- if (!bios)
- return false;
adev->bios = kmalloc(size, GFP_KERNEL);
- if (!adev->bios) {
- iounmap(bios);
+ if (!adev->bios)
return false;
+
+ /* For SRIOV with dynamic critical region is enabled,
+ * the vbios image is put at a dynamic offset of VRAM in the VF.
+ * If dynamic critical region is disabled, follow the existing logic as on baremetal.
+ */
+ if (amdgpu_sriov_vf(adev) && adev->virt.is_dynamic_crit_regn_enabled) {
+ if (amdgpu_virt_get_dynamic_data_info(adev,
+ AMD_SRIOV_MSG_VBIOS_IMG_TABLE_ID, adev->bios, &size)) {
+ amdgpu_bios_release(adev);
+ return false;
+ }
+ } else {
+ bios = ioremap_wc(vram_base, size);
+ if (!bios) {
+ amdgpu_bios_release(adev);
+ return false;
+ }
+
+ memcpy_fromio(adev->bios, bios, size);
+ iounmap(bios);
}
+
adev->bios_size = size;
- memcpy_fromio(adev->bios, bios, size);
- iounmap(bios);
if (!check_atom_bios(adev, size)) {
amdgpu_bios_release(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
index 702f6610d024..66fb37b64388 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
@@ -184,43 +184,36 @@ void amdgpu_bo_list_put(struct amdgpu_bo_list *list)
int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in,
struct drm_amdgpu_bo_list_entry **info_param)
{
- const void __user *uptr = u64_to_user_ptr(in->bo_info_ptr);
const uint32_t info_size = sizeof(struct drm_amdgpu_bo_list_entry);
+ const void __user *uptr = u64_to_user_ptr(in->bo_info_ptr);
+ const uint32_t bo_info_size = in->bo_info_size;
+ const uint32_t bo_number = in->bo_number;
struct drm_amdgpu_bo_list_entry *info;
- int r;
-
- info = kvmalloc_array(in->bo_number, info_size, GFP_KERNEL);
- if (!info)
- return -ENOMEM;
/* copy the handle array from userspace to a kernel buffer */
- r = -EFAULT;
- if (likely(info_size == in->bo_info_size)) {
- unsigned long bytes = in->bo_number *
- in->bo_info_size;
-
- if (copy_from_user(info, uptr, bytes))
- goto error_free;
-
+ if (likely(info_size == bo_info_size)) {
+ info = vmemdup_array_user(uptr, bo_number, info_size);
+ if (IS_ERR(info))
+ return PTR_ERR(info);
} else {
- unsigned long bytes = min(in->bo_info_size, info_size);
+ const uint32_t bytes = min(bo_info_size, info_size);
unsigned i;
- memset(info, 0, in->bo_number * info_size);
- for (i = 0; i < in->bo_number; ++i) {
- if (copy_from_user(&info[i], uptr, bytes))
- goto error_free;
+ info = kvmalloc_array(bo_number, info_size, GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
- uptr += in->bo_info_size;
+ memset(info, 0, bo_number * info_size);
+ for (i = 0; i < bo_number; ++i, uptr += bo_info_size) {
+ if (copy_from_user(&info[i], uptr, bytes)) {
+ kvfree(info);
+ return -EFAULT;
+ }
}
}
*info_param = info;
return 0;
-
-error_free:
- kvfree(info);
- return r;
}
int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
index 555cd6d877c3..2b5e7c46a39d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
@@ -38,8 +38,7 @@ struct amdgpu_bo_list_entry {
struct amdgpu_bo *bo;
struct amdgpu_bo_va *bo_va;
uint32_t priority;
- struct page **user_pages;
- struct hmm_range *range;
+ struct amdgpu_hmm_range *range;
bool user_invalidated;
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 5e375e9c4f5d..9f96d568acf2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -398,30 +398,28 @@ static void amdgpu_connector_add_common_modes(struct drm_encoder *encoder,
struct drm_display_mode *mode = NULL;
struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
int i;
- static const struct mode_size {
+ int n;
+ struct mode_size {
+ char name[DRM_DISPLAY_MODE_LEN];
int w;
int h;
- } common_modes[17] = {
- { 640, 480},
- { 720, 480},
- { 800, 600},
- { 848, 480},
- {1024, 768},
- {1152, 768},
- {1280, 720},
- {1280, 800},
- {1280, 854},
- {1280, 960},
- {1280, 1024},
- {1440, 900},
- {1400, 1050},
- {1680, 1050},
- {1600, 1200},
- {1920, 1080},
- {1920, 1200}
+ } common_modes[] = {
+ { "640x480", 640, 480},
+ { "800x600", 800, 600},
+ { "1024x768", 1024, 768},
+ { "1280x720", 1280, 720},
+ { "1280x800", 1280, 800},
+ {"1280x1024", 1280, 1024},
+ { "1440x900", 1440, 900},
+ {"1680x1050", 1680, 1050},
+ {"1600x1200", 1600, 1200},
+ {"1920x1080", 1920, 1080},
+ {"1920x1200", 1920, 1200}
};
- for (i = 0; i < 17; i++) {
+ n = ARRAY_SIZE(common_modes);
+
+ for (i = 0; i < n; i++) {
if (amdgpu_encoder->devices & (ATOM_DEVICE_TV_SUPPORT)) {
if (common_modes[i].w > 1024 ||
common_modes[i].h > 768)
@@ -434,12 +432,11 @@ static void amdgpu_connector_add_common_modes(struct drm_encoder *encoder,
common_modes[i].h == native_mode->vdisplay))
continue;
}
- if (common_modes[i].w < 320 || common_modes[i].h < 200)
- continue;
mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false);
if (!mode)
return;
+ strscpy(mode->name, common_modes[i].name, DRM_DISPLAY_MODE_LEN);
drm_mode_probed_add(connector, mode);
}
@@ -737,10 +734,8 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force)
amdgpu_connector_update_scratch_regs(connector, ret);
- if (!drm_kms_helper_is_poll_worker()) {
- pm_runtime_mark_last_busy(connector->dev->dev);
+ if (!drm_kms_helper_is_poll_worker())
pm_runtime_put_autosuspend(connector->dev->dev);
- }
return ret;
}
@@ -922,10 +917,8 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force)
amdgpu_connector_update_scratch_regs(connector, ret);
out:
- if (!drm_kms_helper_is_poll_worker()) {
- pm_runtime_mark_last_busy(connector->dev->dev);
+ if (!drm_kms_helper_is_poll_worker())
pm_runtime_put_autosuspend(connector->dev->dev);
- }
return ret;
}
@@ -1149,10 +1142,8 @@ out:
amdgpu_connector_update_scratch_regs(connector, ret);
exit:
- if (!drm_kms_helper_is_poll_worker()) {
- pm_runtime_mark_last_busy(connector->dev->dev);
+ if (!drm_kms_helper_is_poll_worker())
pm_runtime_put_autosuspend(connector->dev->dev);
- }
return ret;
}
@@ -1195,29 +1186,69 @@ static void amdgpu_connector_dvi_force(struct drm_connector *connector)
amdgpu_connector->use_digital = true;
}
+/**
+ * amdgpu_max_hdmi_pixel_clock - Return max supported HDMI (TMDS) pixel clock
+ * @adev: pointer to amdgpu_device
+ *
+ * Return: maximum supported HDMI (TMDS) pixel clock in KHz.
+ */
+static int amdgpu_max_hdmi_pixel_clock(const struct amdgpu_device *adev)
+{
+ if (adev->asic_type >= CHIP_POLARIS10)
+ return 600000;
+ else if (adev->asic_type >= CHIP_TONGA)
+ return 300000;
+ else
+ return 297000;
+}
+
+/**
+ * amdgpu_connector_dvi_mode_valid - Validate a mode on DVI/HDMI connectors
+ * @connector: DRM connector to validate the mode on
+ * @mode: display mode to validate
+ *
+ * Validate the given display mode on DVI and HDMI connectors, including
+ * analog signals on DVI-I.
+ *
+ * Return: drm_mode_status indicating whether the mode is valid.
+ */
static enum drm_mode_status amdgpu_connector_dvi_mode_valid(struct drm_connector *connector,
const struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+ const int max_hdmi_pixel_clock = amdgpu_max_hdmi_pixel_clock(adev);
+ const int max_dvi_single_link_pixel_clock = 165000;
+ int max_digital_pixel_clock_khz;
/* XXX check mode bandwidth */
- if (amdgpu_connector->use_digital && (mode->clock > 165000)) {
- if ((amdgpu_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I) ||
- (amdgpu_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D) ||
- (amdgpu_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_B)) {
- return MODE_OK;
- } else if (connector->display_info.is_hdmi) {
- /* HDMI 1.3+ supports max clock of 340 Mhz */
- if (mode->clock > 340000)
- return MODE_CLOCK_HIGH;
- else
- return MODE_OK;
- } else {
- return MODE_CLOCK_HIGH;
+ if (amdgpu_connector->use_digital) {
+ switch (amdgpu_connector->connector_object_id) {
+ case CONNECTOR_OBJECT_ID_HDMI_TYPE_A:
+ max_digital_pixel_clock_khz = max_hdmi_pixel_clock;
+ break;
+ case CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I:
+ case CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D:
+ max_digital_pixel_clock_khz = max_dvi_single_link_pixel_clock;
+ break;
+ case CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I:
+ case CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D:
+ case CONNECTOR_OBJECT_ID_HDMI_TYPE_B:
+ max_digital_pixel_clock_khz = max_dvi_single_link_pixel_clock * 2;
+ break;
}
+
+ /* When the display EDID claims that it's an HDMI display,
+ * we use the HDMI encoder mode of the display HW,
+ * so we should verify against the max HDMI clock here.
+ */
+ if (connector->display_info.is_hdmi)
+ max_digital_pixel_clock_khz = max_hdmi_pixel_clock;
+
+ if (mode->clock > max_digital_pixel_clock_khz)
+ return MODE_CLOCK_HIGH;
}
/* check against the max pixel clock */
@@ -1449,10 +1480,8 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force)
amdgpu_connector_update_scratch_regs(connector, ret);
out:
- if (!drm_kms_helper_is_poll_worker()) {
- pm_runtime_mark_last_busy(connector->dev->dev);
+ if (!drm_kms_helper_is_poll_worker())
pm_runtime_put_autosuspend(connector->dev->dev);
- }
if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
connector->connector_type == DRM_MODE_CONNECTOR_eDP)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c
index 5a234eadae8b..425a3e564360 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: MIT
/*
* Copyright 2025 Advanced Micro Devices, Inc.
*
@@ -68,7 +68,6 @@ void amdgpu_cper_entry_fill_hdr(struct amdgpu_device *adev,
hdr->error_severity = sev;
hdr->valid_bits.platform_id = 1;
- hdr->valid_bits.partition_id = 1;
hdr->valid_bits.timestamp = 1;
amdgpu_cper_get_timestamp(&hdr->timestamp);
@@ -174,7 +173,7 @@ int amdgpu_cper_entry_fill_runtime_section(struct amdgpu_device *adev,
struct cper_sec_nonstd_err *section;
bool poison;
- poison = (sev == CPER_SEV_NON_FATAL_CORRECTED) ? false : true;
+ poison = sev != CPER_SEV_NON_FATAL_CORRECTED;
section_desc = (struct cper_sec_desc *)((uint8_t *)hdr + SEC_DESC_OFFSET(idx));
section = (struct cper_sec_nonstd_err *)((uint8_t *)hdr +
NONSTD_SEC_OFFSET(hdr->sec_cnt, idx));
@@ -206,24 +205,31 @@ int amdgpu_cper_entry_fill_bad_page_threshold_section(struct amdgpu_device *adev
{
struct cper_sec_desc *section_desc;
struct cper_sec_nonstd_err *section;
+ uint32_t socket_id;
section_desc = (struct cper_sec_desc *)((uint8_t *)hdr + SEC_DESC_OFFSET(idx));
section = (struct cper_sec_nonstd_err *)((uint8_t *)hdr +
NONSTD_SEC_OFFSET(hdr->sec_cnt, idx));
amdgpu_cper_entry_fill_section_desc(adev, section_desc, true, false,
- CPER_SEV_NUM, RUNTIME, NONSTD_SEC_LEN,
+ CPER_SEV_FATAL, RUNTIME, NONSTD_SEC_LEN,
NONSTD_SEC_OFFSET(hdr->sec_cnt, idx));
section->hdr.valid_bits.err_info_cnt = 1;
section->hdr.valid_bits.err_context_cnt = 1;
section->info.error_type = RUNTIME;
+ section->info.valid_bits.ms_chk = 1;
section->info.ms_chk_bits.err_type_valid = 1;
+ section->info.ms_chk_bits.err_type = 1;
+ section->info.ms_chk_bits.pcc = 1;
section->ctx.reg_ctx_type = CPER_CTX_TYPE_CRASH;
section->ctx.reg_arr_size = sizeof(section->ctx.reg_dump);
/* Hardcoded Reg dump for bad page threshold CPER */
+ socket_id = (adev->smuio.funcs && adev->smuio.funcs->get_socket_id) ?
+ adev->smuio.funcs->get_socket_id(adev) :
+ 0;
section->ctx.reg_dump[CPER_ACA_REG_CTL_LO] = 0x1;
section->ctx.reg_dump[CPER_ACA_REG_CTL_HI] = 0x0;
section->ctx.reg_dump[CPER_ACA_REG_STATUS_LO] = 0x137;
@@ -234,8 +240,8 @@ int amdgpu_cper_entry_fill_bad_page_threshold_section(struct amdgpu_device *adev
section->ctx.reg_dump[CPER_ACA_REG_MISC0_HI] = 0x0;
section->ctx.reg_dump[CPER_ACA_REG_CONFIG_LO] = 0x2;
section->ctx.reg_dump[CPER_ACA_REG_CONFIG_HI] = 0x1ff;
- section->ctx.reg_dump[CPER_ACA_REG_IPID_LO] = 0x0;
- section->ctx.reg_dump[CPER_ACA_REG_IPID_HI] = 0x96;
+ section->ctx.reg_dump[CPER_ACA_REG_IPID_LO] = (socket_id / 4) & 0x01;
+ section->ctx.reg_dump[CPER_ACA_REG_IPID_HI] = 0x096 | (((socket_id % 4) & 0x3) << 12);
section->ctx.reg_dump[CPER_ACA_REG_SYND_LO] = 0x0;
section->ctx.reg_dump[CPER_ACA_REG_SYND_HI] = 0x0;
@@ -326,7 +332,9 @@ int amdgpu_cper_generate_bp_threshold_record(struct amdgpu_device *adev)
return -ENOMEM;
}
- amdgpu_cper_entry_fill_hdr(adev, bp_threshold, AMDGPU_CPER_TYPE_BP_THRESHOLD, CPER_SEV_NUM);
+ amdgpu_cper_entry_fill_hdr(adev, bp_threshold,
+ AMDGPU_CPER_TYPE_BP_THRESHOLD,
+ CPER_SEV_FATAL);
ret = amdgpu_cper_entry_fill_bad_page_threshold_section(adev, bp_threshold, 0);
if (ret)
return ret;
@@ -457,7 +465,7 @@ calc:
void amdgpu_cper_ring_write(struct amdgpu_ring *ring, void *src, int count)
{
- u64 pos, wptr_old, rptr = *ring->rptr_cpu_addr & ring->ptr_mask;
+ u64 pos, wptr_old, rptr;
int rec_cnt_dw = count >> 2;
u32 chunk, ent_sz;
u8 *s = (u8 *)src;
@@ -470,9 +478,11 @@ void amdgpu_cper_ring_write(struct amdgpu_ring *ring, void *src, int count)
return;
}
+ mutex_lock(&ring->adev->cper.ring_lock);
+
wptr_old = ring->wptr;
+ rptr = *ring->rptr_cpu_addr & ring->ptr_mask;
- mutex_lock(&ring->adev->cper.ring_lock);
while (count) {
ent_sz = amdgpu_cper_ring_get_ent_sz(ring, ring->wptr);
chunk = umin(ent_sz, count);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.h
index bcb97d245673..353421807387 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2025 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 9ea0d9b71f48..ecdfe6cb36cc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -40,6 +40,7 @@
#include "amdgpu_gmc.h"
#include "amdgpu_gem.h"
#include "amdgpu_ras.h"
+#include "amdgpu_hmm.h"
static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p,
struct amdgpu_device *adev,
@@ -178,25 +179,17 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
unsigned int num_ibs[AMDGPU_CS_GANG_SIZE] = { };
struct amdgpu_vm *vm = &fpriv->vm;
- uint64_t *chunk_array_user;
uint64_t *chunk_array;
uint32_t uf_offset = 0;
size_t size;
int ret;
int i;
- chunk_array = kvmalloc_array(cs->in.num_chunks, sizeof(uint64_t),
- GFP_KERNEL);
- if (!chunk_array)
- return -ENOMEM;
-
- /* get chunks */
- chunk_array_user = u64_to_user_ptr(cs->in.chunks);
- if (copy_from_user(chunk_array, chunk_array_user,
- sizeof(uint64_t)*cs->in.num_chunks)) {
- ret = -EFAULT;
- goto free_chunk;
- }
+ chunk_array = memdup_array_user(u64_to_user_ptr(cs->in.chunks),
+ cs->in.num_chunks,
+ sizeof(uint64_t));
+ if (IS_ERR(chunk_array))
+ return PTR_ERR(chunk_array);
p->nchunks = cs->in.num_chunks;
p->chunks = kvmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
@@ -209,7 +202,6 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
for (i = 0; i < p->nchunks; i++) {
struct drm_amdgpu_cs_chunk __user *chunk_ptr = NULL;
struct drm_amdgpu_cs_chunk user_chunk;
- uint32_t __user *cdata;
chunk_ptr = u64_to_user_ptr(chunk_array[i]);
if (copy_from_user(&user_chunk, chunk_ptr,
@@ -222,20 +214,16 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
p->chunks[i].length_dw = user_chunk.length_dw;
size = p->chunks[i].length_dw;
- cdata = u64_to_user_ptr(user_chunk.chunk_data);
- p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t),
- GFP_KERNEL);
- if (p->chunks[i].kdata == NULL) {
- ret = -ENOMEM;
+ p->chunks[i].kdata = vmemdup_array_user(u64_to_user_ptr(user_chunk.chunk_data),
+ size,
+ sizeof(uint32_t));
+ if (IS_ERR(p->chunks[i].kdata)) {
+ ret = PTR_ERR(p->chunks[i].kdata);
i--;
goto free_partial_kdata;
}
size *= sizeof(uint32_t);
- if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
- ret = -EFAULT;
- goto free_partial_kdata;
- }
/* Assume the worst on the following checks */
ret = -EINVAL;
@@ -286,14 +274,15 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
}
}
- if (!p->gang_size) {
+ if (!p->gang_size || (amdgpu_sriov_vf(p->adev) && p->gang_size > 1)) {
ret = -EINVAL;
goto free_all_kdata;
}
for (i = 0; i < p->gang_size; ++i) {
ret = amdgpu_job_alloc(p->adev, vm, p->entities[i], vm,
- num_ibs[i], &p->jobs[i]);
+ num_ibs[i], &p->jobs[i],
+ p->filp->client_id);
if (ret)
goto free_all_kdata;
switch (p->adev->enforce_isolation[fpriv->xcp_id]) {
@@ -375,6 +364,12 @@ static int amdgpu_cs_p2_ib(struct amdgpu_cs_parser *p,
if (p->uf_bo && ring->funcs->no_user_fence)
return -EINVAL;
+ if (!p->adev->debug_enable_ce_cs &&
+ chunk_ib->flags & AMDGPU_IB_FLAG_CE) {
+ dev_err_ratelimited(p->adev->dev, "CE CS is blocked, use debug=0x400 to override\n");
+ return -EINVAL;
+ }
+
if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX &&
chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) {
if (chunk_ib->flags & AMDGPU_IB_FLAG_CE)
@@ -395,7 +390,7 @@ static int amdgpu_cs_p2_ib(struct amdgpu_cs_parser *p,
chunk_ib->ib_bytes : 0,
AMDGPU_IB_POOL_DELAYED, ib);
if (r) {
- DRM_ERROR("Failed to get ib !\n");
+ drm_err(adev_to_drm(p->adev), "Failed to get ib !\n");
return r;
}
@@ -467,7 +462,7 @@ static int amdgpu_syncobj_lookup_and_add(struct amdgpu_cs_parser *p,
r = drm_syncobj_find_fence(p->filp, handle, point, flags, &fence);
if (r) {
- DRM_ERROR("syncobj %u failed to find fence @ %llu (%d)!\n",
+ drm_err(adev_to_drm(p->adev), "syncobj %u failed to find fence @ %llu (%d)!\n",
handle, point, r);
return r;
}
@@ -713,7 +708,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
*/
const s64 us_upper_bound = 200000;
- if (!adev->mm_stats.log2_max_MBps) {
+ if ((!adev->mm_stats.log2_max_MBps) || !ttm_resource_manager_used(&adev->mman.vram_mgr.manager)) {
*max_bytes = 0;
*max_vis_bytes = 0;
return;
@@ -895,26 +890,18 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
bool userpage_invalidated = false;
struct amdgpu_bo *bo = e->bo;
- int i;
-
- e->user_pages = kvcalloc(bo->tbo.ttm->num_pages,
- sizeof(struct page *),
- GFP_KERNEL);
- if (!e->user_pages) {
- DRM_ERROR("kvmalloc_array failure\n");
- r = -ENOMEM;
- goto out_free_user_pages;
- }
- r = amdgpu_ttm_tt_get_user_pages(bo, e->user_pages, &e->range);
- if (r) {
- kvfree(e->user_pages);
- e->user_pages = NULL;
+ e->range = amdgpu_hmm_range_alloc(NULL);
+ if (unlikely(!e->range))
+ return -ENOMEM;
+
+ r = amdgpu_ttm_tt_get_user_pages(bo, e->range);
+ if (r)
goto out_free_user_pages;
- }
for (i = 0; i < bo->tbo.ttm->num_pages; i++) {
- if (bo->tbo.ttm->pages[i] != e->user_pages[i]) {
+ if (bo->tbo.ttm->pages[i] !=
+ hmm_pfn_to_page(e->range->hmm_range.hmm_pfns[i])) {
userpage_invalidated = true;
break;
}
@@ -958,7 +945,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
}
if (amdgpu_ttm_tt_is_userptr(e->bo->tbo.ttm) &&
- e->user_invalidated && e->user_pages) {
+ e->user_invalidated) {
amdgpu_bo_placement_from_domain(e->bo,
AMDGPU_GEM_DOMAIN_CPU);
r = ttm_bo_validate(&e->bo->tbo, &e->bo->placement,
@@ -967,11 +954,8 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
goto out_free_user_pages;
amdgpu_ttm_tt_set_user_pages(e->bo->tbo.ttm,
- e->user_pages);
+ e->range);
}
-
- kvfree(e->user_pages);
- e->user_pages = NULL;
}
amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold,
@@ -982,7 +966,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
r = amdgpu_vm_validate(p->adev, &fpriv->vm, NULL,
amdgpu_cs_bo_validate, p);
if (r) {
- DRM_ERROR("amdgpu_vm_validate() failed.\n");
+ drm_err(adev_to_drm(p->adev), "amdgpu_vm_validate() failed.\n");
goto out_free_user_pages;
}
@@ -1011,13 +995,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
out_free_user_pages:
amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
- struct amdgpu_bo *bo = e->bo;
-
- if (!e->user_pages)
- continue;
- amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, e->range);
- kvfree(e->user_pages);
- e->user_pages = NULL;
+ amdgpu_hmm_range_free(e->range);
e->range = NULL;
}
mutex_unlock(&p->bo_list->bo_list_mutex);
@@ -1060,13 +1038,13 @@ static int amdgpu_cs_patch_ibs(struct amdgpu_cs_parser *p,
va_start = ib->gpu_addr & AMDGPU_GMC_HOLE_MASK;
r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m);
if (r) {
- DRM_ERROR("IB va_start is invalid\n");
+ drm_err(adev_to_drm(p->adev), "IB va_start is invalid\n");
return r;
}
if ((va_start + ib->length_dw * 4) >
(m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
- DRM_ERROR("IB va_start+ib_bytes is invalid\n");
+ drm_err(adev_to_drm(p->adev), "IB va_start+ib_bytes is invalid\n");
return -EINVAL;
}
@@ -1138,6 +1116,9 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
}
}
+ if (!amdgpu_vm_ready(vm))
+ return -EINVAL;
+
r = amdgpu_vm_clear_freed(adev, vm, NULL);
if (r)
return r;
@@ -1234,7 +1215,7 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entities[p->gang_leader_idx]);
if (r) {
if (r != -ERESTARTSYS)
- DRM_ERROR("amdgpu_ctx_wait_prev_fence failed.\n");
+ drm_err(adev_to_drm(p->adev), "amdgpu_ctx_wait_prev_fence failed.\n");
return r;
}
@@ -1345,8 +1326,8 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
*/
r = 0;
amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
- r |= !amdgpu_ttm_tt_get_user_pages_done(e->bo->tbo.ttm,
- e->range);
+ r |= !amdgpu_hmm_range_valid(e->range);
+ amdgpu_hmm_range_free(e->range);
e->range = NULL;
}
if (r) {
@@ -1447,7 +1428,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
r = amdgpu_cs_parser_init(&parser, adev, filp, data);
if (r) {
- DRM_ERROR_RATELIMITED("Failed to initialize parser %d!\n", r);
+ drm_err_ratelimited(dev, "Failed to initialize parser %d!\n", r);
return r;
}
@@ -1462,9 +1443,9 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
r = amdgpu_cs_parser_bos(&parser, data);
if (r) {
if (r == -ENOMEM)
- DRM_ERROR("Not enough memory for command submission!\n");
+ drm_err(dev, "Not enough memory for command submission!\n");
else if (r != -ERESTARTSYS && r != -EAGAIN)
- DRM_DEBUG("Failed to process the buffer list %d!\n", r);
+ drm_dbg(dev, "Failed to process the buffer list %d!\n", r);
goto error_fini;
}
@@ -1763,30 +1744,21 @@ int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
{
struct amdgpu_device *adev = drm_to_adev(dev);
union drm_amdgpu_wait_fences *wait = data;
- uint32_t fence_count = wait->in.fence_count;
- struct drm_amdgpu_fence *fences_user;
struct drm_amdgpu_fence *fences;
int r;
/* Get the fences from userspace */
- fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence),
- GFP_KERNEL);
- if (fences == NULL)
- return -ENOMEM;
-
- fences_user = u64_to_user_ptr(wait->in.fences);
- if (copy_from_user(fences, fences_user,
- sizeof(struct drm_amdgpu_fence) * fence_count)) {
- r = -EFAULT;
- goto err_free_fences;
- }
+ fences = memdup_array_user(u64_to_user_ptr(wait->in.fences),
+ wait->in.fence_count,
+ sizeof(struct drm_amdgpu_fence));
+ if (IS_ERR(fences))
+ return PTR_ERR(fences);
if (wait->in.wait_all)
r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences);
else
r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences);
-err_free_fences:
kfree(fences);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 85567d0d9545..afedea02188d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -236,7 +236,7 @@ static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip,
r = amdgpu_xcp_select_scheds(adev, hw_ip, hw_prio, fpriv,
&num_scheds, &scheds);
if (r)
- goto cleanup_entity;
+ goto error_free_entity;
}
/* disable load balance if the hw engine retains context among dependent jobs */
@@ -944,6 +944,7 @@ static void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
drm_sched_entity_fini(entity);
}
}
+ kref_put(&ctx->refcount, amdgpu_ctx_fini);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 8e626f50b362..62d43b8cbe58 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -129,7 +129,6 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f,
if (use_bank) {
if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
(se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines)) {
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
return -EINVAL;
@@ -179,7 +178,6 @@ end:
if (pm_pg_lock)
mutex_unlock(&adev->pm.mutex);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
@@ -255,7 +253,6 @@ static ssize_t amdgpu_debugfs_regs2_op(struct file *f, char __user *buf, u32 off
if (rd->id.use_grbm) {
if ((rd->id.grbm.sh != 0xFFFFFFFF && rd->id.grbm.sh >= adev->gfx.config.max_sh_per_se) ||
(rd->id.grbm.se != 0xFFFFFFFF && rd->id.grbm.se >= adev->gfx.config.max_shader_engines)) {
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
mutex_unlock(&rd->lock);
@@ -310,7 +307,6 @@ end:
mutex_unlock(&rd->lock);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
@@ -446,7 +442,6 @@ static ssize_t amdgpu_debugfs_gprwave_read(struct file *f, char __user *buf, siz
amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, rd->id.xcc_id);
mutex_unlock(&adev->grbm_idx_mutex);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
if (!x) {
@@ -557,7 +552,6 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
r = result;
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
return r;
@@ -617,7 +611,6 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user
r = result;
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
return r;
@@ -676,7 +669,6 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
r = result;
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
return r;
@@ -736,7 +728,6 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user
r = result;
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
return r;
@@ -795,7 +786,6 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
r = result;
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
return r;
@@ -855,7 +845,6 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
r = result;
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
return r;
@@ -1003,7 +992,6 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
if (r) {
@@ -1094,7 +1082,6 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0);
mutex_unlock(&adev->grbm_idx_mutex);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
if (!x) {
@@ -1192,7 +1179,6 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0);
mutex_unlock(&adev->grbm_idx_mutex);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
while (size) {
@@ -1266,7 +1252,6 @@ static ssize_t amdgpu_debugfs_gfxoff_residency_read(struct file *f, char __user
r = result;
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
@@ -1315,7 +1300,6 @@ static ssize_t amdgpu_debugfs_gfxoff_residency_write(struct file *f, const char
r = result;
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
@@ -1365,7 +1349,6 @@ static ssize_t amdgpu_debugfs_gfxoff_count_read(struct file *f, char __user *buf
r = result;
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
@@ -1414,7 +1397,6 @@ static ssize_t amdgpu_debugfs_gfxoff_write(struct file *f, const char __user *bu
r = result;
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
@@ -1460,7 +1442,6 @@ static ssize_t amdgpu_debugfs_gfxoff_read(struct file *f, char __user *buf,
r = result;
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
@@ -1501,7 +1482,6 @@ static ssize_t amdgpu_debugfs_gfxoff_status_read(struct file *f, char __user *bu
r = result;
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
@@ -1701,7 +1681,6 @@ static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused)
up_write(&adev->reset_domain->sem);
- pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
return 0;
@@ -1721,7 +1700,6 @@ static int amdgpu_debugfs_evict_vram(void *data, u64 *val)
*val = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM);
- pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
return 0;
@@ -1742,7 +1720,6 @@ static int amdgpu_debugfs_evict_gtt(void *data, u64 *val)
*val = amdgpu_ttm_evict_resources(adev, TTM_PL_TT);
- pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
return 0;
@@ -1762,7 +1739,6 @@ static int amdgpu_debugfs_benchmark(void *data, u64 val)
r = amdgpu_benchmark(adev, val);
- pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
return r;
@@ -1786,7 +1762,7 @@ static int amdgpu_debugfs_vm_info_show(struct seq_file *m, void *unused)
ti = amdgpu_vm_get_task_info_vm(vm);
if (ti) {
- seq_printf(m, "pid:%d\tProcess:%s ----------\n", ti->pid, ti->process_name);
+ seq_printf(m, "pid:%d\tProcess:%s ----------\n", ti->task.pid, ti->process_name);
amdgpu_vm_put_task_info(ti);
}
@@ -1902,7 +1878,7 @@ no_preempt:
continue;
}
job = to_amdgpu_job(s_job);
- if (preempted && (&job->hw_fence) == fence)
+ if (preempted && (&job->hw_fence->base) == fence)
/* mark the job as preempted */
job->preemption_status |= AMDGPU_IB_PREEMPTED;
}
@@ -2014,7 +1990,6 @@ static int amdgpu_debugfs_sclk_set(void *data, u64 val)
ret = -EINVAL;
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return ret;
@@ -2123,14 +2098,68 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
debugfs_create_blob("amdgpu_vbios", 0444, root,
&adev->debugfs_vbios_blob);
- adev->debugfs_discovery_blob.data = adev->mman.discovery_bin;
- adev->debugfs_discovery_blob.size = adev->mman.discovery_tmr_size;
- debugfs_create_blob("amdgpu_discovery", 0444, root,
- &adev->debugfs_discovery_blob);
+ if (adev->discovery.debugfs_blob.size)
+ debugfs_create_blob("amdgpu_discovery", 0444, root,
+ &adev->discovery.debugfs_blob);
return 0;
}
+static int amdgpu_pt_info_read(struct seq_file *m, void *unused)
+{
+ struct drm_file *file;
+ struct amdgpu_fpriv *fpriv;
+ struct amdgpu_bo *root_bo;
+ struct amdgpu_device *adev;
+ int r;
+
+ file = m->private;
+ if (!file)
+ return -EINVAL;
+
+ adev = drm_to_adev(file->minor->dev);
+ fpriv = file->driver_priv;
+ if (!fpriv || !fpriv->vm.root.bo)
+ return -ENODEV;
+
+ root_bo = amdgpu_bo_ref(fpriv->vm.root.bo);
+ r = amdgpu_bo_reserve(root_bo, true);
+ if (r) {
+ amdgpu_bo_unref(&root_bo);
+ return -EINVAL;
+ }
+
+ seq_printf(m, "pd_address: 0x%llx\n", amdgpu_gmc_pd_addr(fpriv->vm.root.bo));
+ seq_printf(m, "max_pfn: 0x%llx\n", adev->vm_manager.max_pfn);
+ seq_printf(m, "num_level: 0x%x\n", adev->vm_manager.num_level);
+ seq_printf(m, "block_size: 0x%x\n", adev->vm_manager.block_size);
+ seq_printf(m, "fragment_size: 0x%x\n", adev->vm_manager.fragment_size);
+
+ amdgpu_bo_unreserve(root_bo);
+ amdgpu_bo_unref(&root_bo);
+
+ return 0;
+}
+
+static int amdgpu_pt_info_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, amdgpu_pt_info_read, inode->i_private);
+}
+
+static const struct file_operations amdgpu_pt_info_fops = {
+ .owner = THIS_MODULE,
+ .open = amdgpu_pt_info_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void amdgpu_debugfs_vm_init(struct drm_file *file)
+{
+ debugfs_create_file("vm_pagetable_info", 0444, file->debugfs_client, file,
+ &amdgpu_pt_info_fops);
+}
+
#else
int amdgpu_debugfs_init(struct amdgpu_device *adev)
{
@@ -2140,4 +2169,7 @@ int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
{
return 0;
}
+void amdgpu_debugfs_vm_init(struct drm_file *file)
+{
+}
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
index 0425432d8659..e7b3c38e5186 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
@@ -33,4 +33,5 @@ void amdgpu_debugfs_fence_init(struct amdgpu_device *adev);
void amdgpu_debugfs_firmware_init(struct amdgpu_device *adev);
void amdgpu_debugfs_gem_init(struct amdgpu_device *adev);
void amdgpu_debugfs_mes_event_log_init(struct amdgpu_device *adev);
+void amdgpu_debugfs_vm_init(struct drm_file *file);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
index 7b50741dc097..4e2fe6674db8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
@@ -217,13 +217,12 @@ amdgpu_devcoredump_read(char *buffer, loff_t offset, size_t count,
drm_printf(&p, "version: " AMDGPU_COREDUMP_VERSION "\n");
drm_printf(&p, "kernel: " UTS_RELEASE "\n");
drm_printf(&p, "module: " KBUILD_MODNAME "\n");
- drm_printf(&p, "time: %lld.%09ld\n", coredump->reset_time.tv_sec,
- coredump->reset_time.tv_nsec);
+ drm_printf(&p, "time: %ptSp\n", &coredump->reset_time);
- if (coredump->reset_task_info.pid)
+ if (coredump->reset_task_info.task.pid)
drm_printf(&p, "process_name: %s PID: %d\n",
coredump->reset_task_info.process_name,
- coredump->reset_task_info.pid);
+ coredump->reset_task_info.task.pid);
/* SOC Information */
drm_printf(&p, "\nSOC Information\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index e1bab6a96cb6..58c3ffe707d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -71,6 +71,7 @@
#include "amdgpu_xgmi.h"
#include "amdgpu_ras.h"
+#include "amdgpu_ras_mgr.h"
#include "amdgpu_pmu.h"
#include "amdgpu_fru_eeprom.h"
#include "amdgpu_reset.h"
@@ -95,6 +96,7 @@ MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
+MODULE_FIRMWARE("amdgpu/cyan_skillfish_gpu_info.bin");
#define AMDGPU_RESUME_MS 2000
#define AMDGPU_MAX_RETRY_LIMIT 2
@@ -178,6 +180,12 @@ struct amdgpu_init_level amdgpu_init_minimal_xgmi = {
BIT(AMD_IP_BLOCK_TYPE_PSP)
};
+static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev);
+static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev);
+static int amdgpu_device_ip_resume_phase3(struct amdgpu_device *adev);
+
+static void amdgpu_device_load_switch_state(struct amdgpu_device *adev);
+
static inline bool amdgpu_ip_member_of_hwini(struct amdgpu_device *adev,
enum amd_ip_block_type block)
{
@@ -232,7 +240,7 @@ static int amdgpu_device_attr_sysfs_init(struct amdgpu_device *adev)
{
int ret = 0;
- if (!amdgpu_sriov_vf(adev))
+ if (amdgpu_nbio_is_replay_cnt_supported(adev))
ret = sysfs_create_file(&adev->dev->kobj,
&dev_attr_pcie_replay_count.attr);
@@ -241,7 +249,7 @@ static int amdgpu_device_attr_sysfs_init(struct amdgpu_device *adev)
static void amdgpu_device_attr_sysfs_fini(struct amdgpu_device *adev)
{
- if (!amdgpu_sriov_vf(adev))
+ if (amdgpu_nbio_is_replay_cnt_supported(adev))
sysfs_remove_file(&adev->dev->kobj,
&dev_attr_pcie_replay_count.attr);
}
@@ -411,19 +419,16 @@ static const struct attribute_group amdgpu_board_attrs_group = {
static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
-
/**
* amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
*
- * @dev: drm_device pointer
+ * @adev: amdgpu device pointer
*
* Returns true if the device is a dGPU with ATPX power control,
* otherwise return false.
*/
-bool amdgpu_device_supports_px(struct drm_device *dev)
+bool amdgpu_device_supports_px(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = drm_to_adev(dev);
-
if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
return true;
return false;
@@ -432,15 +437,13 @@ bool amdgpu_device_supports_px(struct drm_device *dev)
/**
* amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
*
- * @dev: drm_device pointer
+ * @adev: amdgpu device pointer
*
* Returns true if the device is a dGPU with ACPI power control,
* otherwise return false.
*/
-bool amdgpu_device_supports_boco(struct drm_device *dev)
+bool amdgpu_device_supports_boco(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = drm_to_adev(dev);
-
if (!IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE))
return false;
@@ -453,29 +456,24 @@ bool amdgpu_device_supports_boco(struct drm_device *dev)
/**
* amdgpu_device_supports_baco - Does the device support BACO
*
- * @dev: drm_device pointer
+ * @adev: amdgpu device pointer
*
* Return:
* 1 if the device supports BACO;
* 3 if the device supports MACO (only works if BACO is supported)
* otherwise return 0.
*/
-int amdgpu_device_supports_baco(struct drm_device *dev)
+int amdgpu_device_supports_baco(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = drm_to_adev(dev);
-
return amdgpu_asic_supports_baco(adev);
}
void amdgpu_device_detect_runtime_pm_mode(struct amdgpu_device *adev)
{
- struct drm_device *dev;
int bamaco_support;
- dev = adev_to_drm(adev);
-
adev->pm.rpm_mode = AMDGPU_RUNPM_NONE;
- bamaco_support = amdgpu_device_supports_baco(dev);
+ bamaco_support = amdgpu_device_supports_baco(adev);
switch (amdgpu_runtime_pm) {
case 2:
@@ -495,10 +493,12 @@ void amdgpu_device_detect_runtime_pm_mode(struct amdgpu_device *adev)
break;
case -1:
case -2:
- if (amdgpu_device_supports_px(dev)) { /* enable PX as runtime mode */
+ if (amdgpu_device_supports_px(adev)) {
+ /* enable PX as runtime mode */
adev->pm.rpm_mode = AMDGPU_RUNPM_PX;
dev_info(adev->dev, "Using ATPX for runtime pm\n");
- } else if (amdgpu_device_supports_boco(dev)) { /* enable boco as runtime mode */
+ } else if (amdgpu_device_supports_boco(adev)) {
+ /* enable boco as runtime mode */
adev->pm.rpm_mode = AMDGPU_RUNPM_BOCO;
dev_info(adev->dev, "Using BOCO for runtime pm\n");
} else {
@@ -547,14 +547,14 @@ no_runtime_pm:
* amdgpu_device_supports_smart_shift - Is the device dGPU with
* smart shift support
*
- * @dev: drm_device pointer
+ * @adev: amdgpu device pointer
*
* Returns true if the device is a dGPU with Smart Shift support,
* otherwise returns false.
*/
-bool amdgpu_device_supports_smart_shift(struct drm_device *dev)
+bool amdgpu_device_supports_smart_shift(struct amdgpu_device *adev)
{
- return (amdgpu_device_supports_boco(dev) &&
+ return (amdgpu_device_supports_boco(adev) &&
amdgpu_acpi_is_power_shift_control_supported());
}
@@ -1288,14 +1288,14 @@ u32 amdgpu_device_get_rev_id(struct amdgpu_device *adev)
*/
static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
{
- DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
+ dev_err(adev->dev, "Invalid callback to read register 0x%04X\n", reg);
BUG();
return 0;
}
static uint32_t amdgpu_invalid_rreg_ext(struct amdgpu_device *adev, uint64_t reg)
{
- DRM_ERROR("Invalid callback to read register 0x%llX\n", reg);
+ dev_err(adev->dev, "Invalid callback to read register 0x%llX\n", reg);
BUG();
return 0;
}
@@ -1312,15 +1312,17 @@ static uint32_t amdgpu_invalid_rreg_ext(struct amdgpu_device *adev, uint64_t reg
*/
static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
{
- DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
- reg, v);
+ dev_err(adev->dev,
+ "Invalid callback to write register 0x%04X with 0x%08X\n", reg,
+ v);
BUG();
}
static void amdgpu_invalid_wreg_ext(struct amdgpu_device *adev, uint64_t reg, uint32_t v)
{
- DRM_ERROR("Invalid callback to write register 0x%llX with 0x%08X\n",
- reg, v);
+ dev_err(adev->dev,
+ "Invalid callback to write register 0x%llX with 0x%08X\n", reg,
+ v);
BUG();
}
@@ -1336,14 +1338,15 @@ static void amdgpu_invalid_wreg_ext(struct amdgpu_device *adev, uint64_t reg, ui
*/
static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
{
- DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
+ dev_err(adev->dev, "Invalid callback to read 64 bit register 0x%04X\n",
+ reg);
BUG();
return 0;
}
static uint64_t amdgpu_invalid_rreg64_ext(struct amdgpu_device *adev, uint64_t reg)
{
- DRM_ERROR("Invalid callback to read register 0x%llX\n", reg);
+ dev_err(adev->dev, "Invalid callback to read register 0x%llX\n", reg);
BUG();
return 0;
}
@@ -1360,15 +1363,17 @@ static uint64_t amdgpu_invalid_rreg64_ext(struct amdgpu_device *adev, uint64_t r
*/
static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
{
- DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
- reg, v);
+ dev_err(adev->dev,
+ "Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
+ reg, v);
BUG();
}
static void amdgpu_invalid_wreg64_ext(struct amdgpu_device *adev, uint64_t reg, uint64_t v)
{
- DRM_ERROR("Invalid callback to write 64 bit register 0x%llX with 0x%08llX\n",
- reg, v);
+ dev_err(adev->dev,
+ "Invalid callback to write 64 bit register 0x%llX with 0x%08llX\n",
+ reg, v);
BUG();
}
@@ -1386,8 +1391,9 @@ static void amdgpu_invalid_wreg64_ext(struct amdgpu_device *adev, uint64_t reg,
static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
uint32_t block, uint32_t reg)
{
- DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
- reg, block);
+ dev_err(adev->dev,
+ "Invalid callback to read register 0x%04X in block 0x%04X\n",
+ reg, block);
BUG();
return 0;
}
@@ -1407,8 +1413,9 @@ static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
uint32_t block,
uint32_t reg, uint32_t v)
{
- DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
- reg, block, v);
+ dev_err(adev->dev,
+ "Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
+ reg, block, v);
BUG();
}
@@ -1671,9 +1678,9 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
struct pci_bus *root;
struct resource *res;
+ int max_size, r;
unsigned int i;
u16 cmd;
- int r;
if (!IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT))
return 0;
@@ -1694,7 +1701,9 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
/* PCI_EXT_CAP_ID_VNDR extended capability is located at 0x100 */
if (!pci_find_ext_capability(adev->pdev, PCI_EXT_CAP_ID_VNDR))
- DRM_WARN("System can't access extended configuration space, please check!!\n");
+ dev_warn(
+ adev->dev,
+ "System can't access extended configuration space, please check!!\n");
/* skip if the bios has already enabled large BAR */
if (adev->gmc.real_vram_size &&
@@ -1717,28 +1726,27 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
return 0;
/* Limit the BAR size to what is available */
- rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
- rbar_size);
+ max_size = pci_rebar_get_max_size(adev->pdev, 0);
+ if (max_size < 0)
+ return 0;
+ rbar_size = min(max_size, rbar_size);
/* Disable memory decoding while we change the BAR addresses and size */
pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
pci_write_config_word(adev->pdev, PCI_COMMAND,
cmd & ~PCI_COMMAND_MEMORY);
- /* Free the VRAM and doorbell BAR, we most likely need to move both. */
+ /* Tear down doorbell as resizing will release BARs */
amdgpu_doorbell_fini(adev);
- if (adev->asic_type >= CHIP_BONAIRE)
- pci_release_resource(adev->pdev, 2);
- pci_release_resource(adev->pdev, 0);
-
- r = pci_resize_resource(adev->pdev, 0, rbar_size);
+ r = pci_resize_resource(adev->pdev, 0, rbar_size,
+ (adev->asic_type >= CHIP_BONAIRE) ? 1 << 5
+ : 1 << 2);
if (r == -ENOSPC)
- DRM_INFO("Not enough PCI address space for a large BAR.");
+ dev_info(adev->dev,
+ "Not enough PCI address space for a large BAR.");
else if (r && r != -ENOTSUPP)
- DRM_ERROR("Problem resizing BAR0 (%d).", r);
-
- pci_assign_unassigned_bus_resources(adev->pdev->bus);
+ dev_err(adev->dev, "Problem resizing BAR0 (%d).", r);
/* When the doorbell or fb BAR isn't available we have no chance of
* using the device.
@@ -1838,8 +1846,8 @@ bool amdgpu_device_seamless_boot_supported(struct amdgpu_device *adev)
case 0:
return false;
default:
- DRM_ERROR("Invalid value for amdgpu.seamless: %d\n",
- amdgpu_seamless);
+ dev_err(adev->dev, "Invalid value for amdgpu.seamless: %d\n",
+ amdgpu_seamless);
return false;
}
@@ -1877,6 +1885,13 @@ static bool amdgpu_device_pcie_dynamic_switching_supported(struct amdgpu_device
static bool amdgpu_device_aspm_support_quirk(struct amdgpu_device *adev)
{
+ /* Enabling ASPM causes randoms hangs on Tahiti and Oland on Zen4.
+ * It's unclear if this is a platform-specific or GPU-specific issue.
+ * Disable ASPM on SI for the time being.
+ */
+ if (adev->family == AMDGPU_FAMILY_SI)
+ return true;
+
#if IS_ENABLED(CONFIG_X86)
struct cpuinfo_x86 *c = &cpu_data(0);
@@ -2015,7 +2030,7 @@ static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
return;
if (!is_os_64) {
- DRM_WARN("Not 64-bit OS, feature not supported\n");
+ dev_warn(adev->dev, "Not 64-bit OS, feature not supported\n");
goto def_value;
}
si_meminfo(&si);
@@ -2030,7 +2045,7 @@ static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
if (total_memory < dram_size_seven_GB)
goto def_value1;
} else {
- DRM_WARN("Smu memory pool size not supported\n");
+ dev_warn(adev->dev, "Smu memory pool size not supported\n");
goto def_value;
}
adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
@@ -2038,7 +2053,7 @@ static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
return;
def_value1:
- DRM_WARN("No enough system memory\n");
+ dev_warn(adev->dev, "No enough system memory\n");
def_value:
adev->pm.smu_prv_buffer_size = 0;
}
@@ -2190,7 +2205,8 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
struct drm_device *dev = pci_get_drvdata(pdev);
int r;
- if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF)
+ if (amdgpu_device_supports_px(drm_to_adev(dev)) &&
+ state == VGA_SWITCHEROO_OFF)
return;
if (state == VGA_SWITCHEROO_ON) {
@@ -2202,12 +2218,13 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
amdgpu_device_load_pci_state(pdev);
r = pci_enable_device(pdev);
if (r)
- DRM_WARN("pci_enable_device failed (%d)\n", r);
+ dev_warn(&pdev->dev, "pci_enable_device failed (%d)\n",
+ r);
amdgpu_device_resume(dev, true);
dev->switch_power_state = DRM_SWITCH_POWER_ON;
} else {
- pr_info("switched off\n");
+ dev_info(&pdev->dev, "switched off\n");
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
amdgpu_device_prepare(dev);
amdgpu_device_suspend(dev, true);
@@ -2274,8 +2291,9 @@ int amdgpu_device_ip_set_clockgating_state(void *dev,
r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
&adev->ip_blocks[i], state);
if (r)
- DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_err(adev->dev,
+ "set_clockgating_state of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
}
return r;
}
@@ -2308,8 +2326,9 @@ int amdgpu_device_ip_set_powergating_state(void *dev,
r = adev->ip_blocks[i].version->funcs->set_powergating_state(
&adev->ip_blocks[i], state);
if (r)
- DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_err(adev->dev,
+ "set_powergating_state of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
}
return r;
}
@@ -2371,7 +2390,7 @@ int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
}
/**
- * amdgpu_device_ip_is_valid - is the hardware IP enabled
+ * amdgpu_device_ip_is_hw - is the hardware IP enabled
*
* @adev: amdgpu_device pointer
* @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
@@ -2379,6 +2398,27 @@ int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
* Check if the hardware IP is enable or not.
* Returns true if it the IP is enable, false if not.
*/
+bool amdgpu_device_ip_is_hw(struct amdgpu_device *adev,
+ enum amd_ip_block_type block_type)
+{
+ int i;
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (adev->ip_blocks[i].version->type == block_type)
+ return adev->ip_blocks[i].status.hw;
+ }
+ return false;
+}
+
+/**
+ * amdgpu_device_ip_is_valid - is the hardware IP valid
+ *
+ * @adev: amdgpu_device pointer
+ * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
+ *
+ * Check if the hardware IP is valid or not.
+ * Returns true if it the IP is valid, false if not.
+ */
bool amdgpu_device_ip_is_valid(struct amdgpu_device *adev,
enum amd_ip_block_type block_type)
{
@@ -2439,6 +2479,34 @@ int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
return 1;
}
+static const char *ip_block_names[] = {
+ [AMD_IP_BLOCK_TYPE_COMMON] = "common",
+ [AMD_IP_BLOCK_TYPE_GMC] = "gmc",
+ [AMD_IP_BLOCK_TYPE_IH] = "ih",
+ [AMD_IP_BLOCK_TYPE_SMC] = "smu",
+ [AMD_IP_BLOCK_TYPE_PSP] = "psp",
+ [AMD_IP_BLOCK_TYPE_DCE] = "dce",
+ [AMD_IP_BLOCK_TYPE_GFX] = "gfx",
+ [AMD_IP_BLOCK_TYPE_SDMA] = "sdma",
+ [AMD_IP_BLOCK_TYPE_UVD] = "uvd",
+ [AMD_IP_BLOCK_TYPE_VCE] = "vce",
+ [AMD_IP_BLOCK_TYPE_ACP] = "acp",
+ [AMD_IP_BLOCK_TYPE_VCN] = "vcn",
+ [AMD_IP_BLOCK_TYPE_MES] = "mes",
+ [AMD_IP_BLOCK_TYPE_JPEG] = "jpeg",
+ [AMD_IP_BLOCK_TYPE_VPE] = "vpe",
+ [AMD_IP_BLOCK_TYPE_UMSCH_MM] = "umsch_mm",
+ [AMD_IP_BLOCK_TYPE_ISP] = "isp",
+ [AMD_IP_BLOCK_TYPE_RAS] = "ras",
+};
+
+static const char *ip_block_name(struct amdgpu_device *adev, enum amd_ip_block_type type)
+{
+ int idx = (int)type;
+
+ return idx < ARRAY_SIZE(ip_block_names) ? ip_block_names[idx] : "unknown";
+}
+
/**
* amdgpu_device_ip_block_add
*
@@ -2467,8 +2535,13 @@ int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
break;
}
- dev_info(adev->dev, "detected ip block number %d <%s>\n",
- adev->num_ip_blocks, ip_block_version->funcs->name);
+ dev_info(adev->dev, "detected ip block number %d <%s_v%d_%d_%d> (%s)\n",
+ adev->num_ip_blocks,
+ ip_block_name(adev, ip_block_version->type),
+ ip_block_version->major,
+ ip_block_version->minor,
+ ip_block_version->rev,
+ ip_block_version->funcs->name);
adev->ip_blocks[adev->num_ip_blocks].adev = adev;
@@ -2525,9 +2598,11 @@ static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
}
}
- DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
- amdgpu_virtual_display, pci_address_name,
- adev->enable_virtual_display, adev->mode_info.num_crtc);
+ dev_info(
+ adev->dev,
+ "virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
+ amdgpu_virtual_display, pci_address_name,
+ adev->enable_virtual_display, adev->mode_info.num_crtc);
kfree(pciaddstr);
}
@@ -2538,8 +2613,9 @@ void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev)
if (amdgpu_sriov_vf(adev) && !adev->enable_virtual_display) {
adev->mode_info.num_crtc = 1;
adev->enable_virtual_display = true;
- DRM_INFO("virtual_display:%d, num_crtc:%d\n",
- adev->enable_virtual_display, adev->mode_info.num_crtc);
+ dev_info(adev->dev, "virtual_display:%d, num_crtc:%d\n",
+ adev->enable_virtual_display,
+ adev->mode_info.num_crtc);
}
}
@@ -2561,9 +2637,6 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
adev->firmware.gpu_info_fw = NULL;
- if (adev->mman.discovery_bin)
- return 0;
-
switch (adev->asic_type) {
default:
return 0;
@@ -2585,8 +2658,15 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
chip_name = "arcturus";
break;
case CHIP_NAVI12:
+ if (adev->discovery.bin)
+ return 0;
chip_name = "navi12";
break;
+ case CHIP_CYAN_SKILLFISH:
+ if (adev->discovery.bin)
+ return 0;
+ chip_name = "cyan_skillfish";
+ break;
}
err = amdgpu_ucode_request(adev, &adev->firmware.gpu_info_fw,
@@ -2666,6 +2746,24 @@ out:
return err;
}
+static void amdgpu_uid_init(struct amdgpu_device *adev)
+{
+ /* Initialize the UID for the device */
+ adev->uid_info = kzalloc(sizeof(struct amdgpu_uid), GFP_KERNEL);
+ if (!adev->uid_info) {
+ dev_warn(adev->dev, "Failed to allocate memory for UID\n");
+ return;
+ }
+ adev->uid_info->adev = adev;
+}
+
+static void amdgpu_uid_fini(struct amdgpu_device *adev)
+{
+ /* Free the UID memory */
+ kfree(adev->uid_info);
+ adev->uid_info = NULL;
+}
+
/**
* amdgpu_device_ip_early_init - run early init for hardware IPs
*
@@ -2690,6 +2788,10 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
r = amdgpu_virt_request_full_gpu(adev, true);
if (r)
return r;
+
+ r = amdgpu_virt_init_critical_region(adev);
+ if (r)
+ return r;
}
switch (adev->asic_type) {
@@ -2773,21 +2875,29 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
if (!amdgpu_device_pcie_dynamic_switching_supported(adev))
adev->pm.pp_feature &= ~PP_PCIE_DPM_MASK;
+ adev->virt.is_xgmi_node_migrate_enabled = false;
+ if (amdgpu_sriov_vf(adev)) {
+ adev->virt.is_xgmi_node_migrate_enabled =
+ amdgpu_ip_version((adev), GC_HWIP, 0) == IP_VERSION(9, 4, 4);
+ }
+
total = true;
for (i = 0; i < adev->num_ip_blocks; i++) {
ip_block = &adev->ip_blocks[i];
if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
- DRM_WARN("disabled ip block: %d <%s>\n",
- i, adev->ip_blocks[i].version->funcs->name);
+ dev_warn(adev->dev, "disabled ip block: %d <%s>\n", i,
+ adev->ip_blocks[i].version->funcs->name);
adev->ip_blocks[i].status.valid = false;
} else if (ip_block->version->funcs->early_init) {
r = ip_block->version->funcs->early_init(ip_block);
if (r == -ENOENT) {
adev->ip_blocks[i].status.valid = false;
} else if (r) {
- DRM_ERROR("early_init of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_err(adev->dev,
+ "early_init of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name,
+ r);
total = false;
} else {
adev->ip_blocks[i].status.valid = true;
@@ -2841,6 +2951,8 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
if (adev->gmc.xgmi.supported)
amdgpu_xgmi_early_init(adev);
+ if (amdgpu_is_multi_aid(adev))
+ amdgpu_uid_init(adev);
ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
if (ip_block->status.valid != false)
amdgpu_amdkfd_device_probe(adev);
@@ -2868,8 +2980,10 @@ static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
if (r) {
- DRM_ERROR("hw_init of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_err(adev->dev,
+ "hw_init of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name,
+ r);
return r;
}
adev->ip_blocks[i].status.hw = true;
@@ -2893,8 +3007,9 @@ static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
continue;
r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
if (r) {
- DRM_ERROR("hw_init of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_err(adev->dev,
+ "hw_init of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
return r;
}
adev->ip_blocks[i].status.hw = true;
@@ -2932,8 +3047,11 @@ static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
} else {
r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
if (r) {
- DRM_ERROR("hw_init of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_err(adev->dev,
+ "hw_init of IP block <%s> failed %d\n",
+ adev->ip_blocks[i]
+ .version->funcs->name,
+ r);
return r;
}
adev->ip_blocks[i].status.hw = true;
@@ -2988,25 +3106,29 @@ static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
r = drm_sched_init(&ring->sched, &args);
if (r) {
- DRM_ERROR("Failed to create scheduler on ring %s.\n",
- ring->name);
+ dev_err(adev->dev,
+ "Failed to create scheduler on ring %s.\n",
+ ring->name);
return r;
}
r = amdgpu_uvd_entity_init(adev, ring);
if (r) {
- DRM_ERROR("Failed to create UVD scheduling entity on ring %s.\n",
- ring->name);
+ dev_err(adev->dev,
+ "Failed to create UVD scheduling entity on ring %s.\n",
+ ring->name);
return r;
}
r = amdgpu_vce_entity_init(adev, ring);
if (r) {
- DRM_ERROR("Failed to create VCE scheduling entity on ring %s.\n",
- ring->name);
+ dev_err(adev->dev,
+ "Failed to create VCE scheduling entity on ring %s.\n",
+ ring->name);
return r;
}
}
- amdgpu_xcp_update_partition_sched_list(adev);
+ if (adev->xcp_mgr)
+ amdgpu_xcp_update_partition_sched_list(adev);
return 0;
}
@@ -3038,8 +3160,10 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
if (adev->ip_blocks[i].version->funcs->sw_init) {
r = adev->ip_blocks[i].version->funcs->sw_init(&adev->ip_blocks[i]);
if (r) {
- DRM_ERROR("sw_init of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_err(adev->dev,
+ "sw_init of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name,
+ r);
goto init_failed;
}
}
@@ -3053,7 +3177,8 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
/* need to do common hw init early so everything is set up for gmc */
r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
if (r) {
- DRM_ERROR("hw_init %d failed %d\n", i, r);
+ dev_err(adev->dev, "hw_init %d failed %d\n", i,
+ r);
goto init_failed;
}
adev->ip_blocks[i].status.hw = true;
@@ -3065,17 +3190,21 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
r = amdgpu_device_mem_scratch_init(adev);
if (r) {
- DRM_ERROR("amdgpu_mem_scratch_init failed %d\n", r);
+ dev_err(adev->dev,
+ "amdgpu_mem_scratch_init failed %d\n",
+ r);
goto init_failed;
}
r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
if (r) {
- DRM_ERROR("hw_init %d failed %d\n", i, r);
+ dev_err(adev->dev, "hw_init %d failed %d\n", i,
+ r);
goto init_failed;
}
r = amdgpu_device_wb_init(adev);
if (r) {
- DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
+ dev_err(adev->dev,
+ "amdgpu_device_wb_init failed %d\n", r);
goto init_failed;
}
adev->ip_blocks[i].status.hw = true;
@@ -3087,14 +3216,16 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
AMDGPU_GEM_DOMAIN_GTT,
AMDGPU_CSA_SIZE);
if (r) {
- DRM_ERROR("allocate CSA failed %d\n", r);
+ dev_err(adev->dev,
+ "allocate CSA failed %d\n", r);
goto init_failed;
}
}
r = amdgpu_seq64_init(adev);
if (r) {
- DRM_ERROR("allocate seq64 failed %d\n", r);
+ dev_err(adev->dev, "allocate seq64 failed %d\n",
+ r);
goto init_failed;
}
}
@@ -3235,6 +3366,7 @@ static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
* always assumed to be lost.
*/
switch (amdgpu_asic_reset_method(adev)) {
+ case AMD_RESET_METHOD_LEGACY:
case AMD_RESET_METHOD_LINK:
case AMD_RESET_METHOD_BACO:
case AMD_RESET_METHOD_MODE1:
@@ -3284,8 +3416,10 @@ int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
r = adev->ip_blocks[i].version->funcs->set_clockgating_state(&adev->ip_blocks[i],
state);
if (r) {
- DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_err(adev->dev,
+ "set_clockgating_state(gate) of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name,
+ r);
return r;
}
}
@@ -3311,18 +3445,21 @@ int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
continue;
- /* skip CG for VCE/UVD, it's handled specially */
+ /* skip CG for VCE/UVD/VPE, it's handled specially */
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
+ adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VPE &&
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
adev->ip_blocks[i].version->funcs->set_powergating_state) {
/* enable powergating to save power */
r = adev->ip_blocks[i].version->funcs->set_powergating_state(&adev->ip_blocks[i],
state);
if (r) {
- DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_err(adev->dev,
+ "set_powergating_state(gate) of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name,
+ r);
return r;
}
}
@@ -3349,7 +3486,7 @@ static int amdgpu_device_enable_mgpu_fan_boost(void)
for (i = 0; i < mgpu_info.num_dgpu; i++) {
gpu_ins = &(mgpu_info.gpu_ins[i]);
adev = gpu_ins->adev;
- if (!(adev->flags & AMD_IS_APU) &&
+ if (!(adev->flags & AMD_IS_APU || amdgpu_sriov_multi_vf_mode(adev)) &&
!gpu_ins->mgpu_fan_enabled) {
ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
if (ret)
@@ -3388,8 +3525,10 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
if (adev->ip_blocks[i].version->funcs->late_init) {
r = adev->ip_blocks[i].version->funcs->late_init(&adev->ip_blocks[i]);
if (r) {
- DRM_ERROR("late_init of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_err(adev->dev,
+ "late_init of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name,
+ r);
return r;
}
}
@@ -3398,7 +3537,7 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
r = amdgpu_ras_late_init(adev);
if (r) {
- DRM_ERROR("amdgpu_ras_late_init failed %d", r);
+ dev_err(adev->dev, "amdgpu_ras_late_init failed %d", r);
return r;
}
@@ -3412,7 +3551,7 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
r = amdgpu_device_enable_mgpu_fan_boost();
if (r)
- DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
+ dev_err(adev->dev, "enable mgpu fan boost failed (%d).\n", r);
/* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */
if (amdgpu_passthrough(adev) &&
@@ -3445,7 +3584,9 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
AMDGPU_XGMI_PSTATE_MIN);
if (r) {
- DRM_ERROR("pstate setting failed (%d).\n", r);
+ dev_err(adev->dev,
+ "pstate setting failed (%d).\n",
+ r);
break;
}
}
@@ -3459,17 +3600,19 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
static void amdgpu_ip_block_hw_fini(struct amdgpu_ip_block *ip_block)
{
+ struct amdgpu_device *adev = ip_block->adev;
int r;
if (!ip_block->version->funcs->hw_fini) {
- DRM_ERROR("hw_fini of IP block <%s> not defined\n",
- ip_block->version->funcs->name);
+ dev_err(adev->dev, "hw_fini of IP block <%s> not defined\n",
+ ip_block->version->funcs->name);
} else {
r = ip_block->version->funcs->hw_fini(ip_block);
/* XXX handle errors */
if (r) {
- DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
- ip_block->version->funcs->name, r);
+ dev_dbg(adev->dev,
+ "hw_fini of IP block <%s> failed %d\n",
+ ip_block->version->funcs->name, r);
}
}
@@ -3510,15 +3653,16 @@ static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
r = adev->ip_blocks[i].version->funcs->early_fini(&adev->ip_blocks[i]);
if (r) {
- DRM_DEBUG("early_fini of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_dbg(adev->dev,
+ "early_fini of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
}
}
amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
- amdgpu_amdkfd_suspend(adev, false);
+ amdgpu_amdkfd_suspend(adev, true);
amdgpu_userq_suspend(adev);
/* Workaround for ASICs need to disable SMC first */
@@ -3533,7 +3677,22 @@ static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
if (amdgpu_sriov_vf(adev)) {
if (amdgpu_virt_release_full_gpu(adev, false))
- DRM_ERROR("failed to release exclusive mode on fini\n");
+ dev_err(adev->dev,
+ "failed to release exclusive mode on fini\n");
+ }
+
+ /*
+ * Driver reload on the APU can fail due to firmware validation because
+ * the PSP is always running, as it is shared across the whole SoC.
+ * This same issue does not occur on dGPU because it has a mechanism
+ * that checks whether the PSP is running. A solution for those issues
+ * in the APU is to trigger a GPU reset, but this should be done during
+ * the unload phase to avoid adding boot latency and screen flicker.
+ */
+ if ((adev->flags & AMD_IS_APU) && !adev->gmc.is_app_apu) {
+ r = amdgpu_asic_reset(adev);
+ if (r)
+ dev_err(adev->dev, "asic reset on %s failed\n", __func__);
}
return 0;
@@ -3581,8 +3740,10 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
r = adev->ip_blocks[i].version->funcs->sw_fini(&adev->ip_blocks[i]);
/* XXX handle errors */
if (r) {
- DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_dbg(adev->dev,
+ "sw_fini of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name,
+ r);
}
}
adev->ip_blocks[i].status.sw = false;
@@ -3598,6 +3759,7 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
}
amdgpu_ras_fini(adev);
+ amdgpu_uid_fini(adev);
return 0;
}
@@ -3615,7 +3777,7 @@ static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
r = amdgpu_ib_ring_tests(adev);
if (r)
- DRM_ERROR("ib ring test failed (%d).\n", r);
+ dev_err(adev->dev, "ib ring test failed (%d).\n", r);
}
static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
@@ -3643,7 +3805,7 @@ static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
*/
static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
{
- int i, r;
+ int i, r, rec;
amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
@@ -3664,13 +3826,25 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
continue;
- /* XXX handle errors */
r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]);
if (r)
- return r;
+ goto unwind;
}
return 0;
+unwind:
+ rec = amdgpu_device_ip_resume_phase3(adev);
+ if (rec)
+ dev_err(adev->dev,
+ "amdgpu_device_ip_resume_phase3 failed during unwind: %d\n",
+ rec);
+
+ amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW);
+
+ amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
+ amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
+
+ return r;
}
/**
@@ -3686,7 +3860,7 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
*/
static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
{
- int i, r;
+ int i, r, rec;
if (adev->in_s0ix)
amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D3Entry);
@@ -3747,24 +3921,52 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
continue;
- /* XXX handle errors */
r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]);
- adev->ip_blocks[i].status.hw = false;
+ if (r)
+ goto unwind;
/* handle putting the SMC in the appropriate state */
if (!amdgpu_sriov_vf(adev)) {
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
if (r) {
- DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
- adev->mp1_state, r);
- return r;
+ dev_err(adev->dev,
+ "SMC failed to set mp1 state %d, %d\n",
+ adev->mp1_state, r);
+ goto unwind;
}
}
}
}
return 0;
+unwind:
+ /* suspend phase 2 = resume phase 1 + resume phase 2 */
+ rec = amdgpu_device_ip_resume_phase1(adev);
+ if (rec) {
+ dev_err(adev->dev,
+ "amdgpu_device_ip_resume_phase1 failed during unwind: %d\n",
+ rec);
+ return r;
+ }
+
+ rec = amdgpu_device_fw_loading(adev);
+ if (rec) {
+ dev_err(adev->dev,
+ "amdgpu_device_fw_loading failed during unwind: %d\n",
+ rec);
+ return r;
+ }
+
+ rec = amdgpu_device_ip_resume_phase2(adev);
+ if (rec) {
+ dev_err(adev->dev,
+ "amdgpu_device_ip_resume_phase2 failed during unwind: %d\n",
+ rec);
+ return r;
+ }
+
+ return r;
}
/**
@@ -3778,7 +3980,7 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
* in each IP into a state suitable for suspend.
* Returns 0 on success, negative error code on failure.
*/
-int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
+static int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
{
int r;
@@ -4041,12 +4243,14 @@ static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
/**
* amdgpu_device_asic_has_dc_support - determine if DC supports the asic
*
+ * @pdev : pci device context
* @asic_type: AMD asic type
*
* Check if there is DC (new modesetting infrastructre) support for an asic.
* returns true if DC has support, false if not.
*/
-bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
+bool amdgpu_device_asic_has_dc_support(struct pci_dev *pdev,
+ enum amd_asic_type asic_type)
{
switch (asic_type) {
#ifdef CONFIG_DRM_AMDGPU_SI
@@ -4060,25 +4264,13 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
case CHIP_PITCAIRN:
case CHIP_VERDE:
case CHIP_OLAND:
- /*
- * We have systems in the wild with these ASICs that require
- * LVDS and VGA support which is not supported with DC.
- *
- * Fallback to the non-DC driver here by default so as not to
- * cause regressions.
- */
-#if defined(CONFIG_DRM_AMD_DC_SI)
- return amdgpu_dc > 0;
-#else
- return false;
-#endif
- case CHIP_BONAIRE:
+ return amdgpu_dc != 0 && IS_ENABLED(CONFIG_DRM_AMD_DC_SI);
case CHIP_KAVERI:
case CHIP_KABINI:
case CHIP_MULLINS:
/*
* We have systems in the wild with these ASICs that require
- * VGA support which is not supported with DC.
+ * TRAVIS and NUTMEG support which is not supported with DC.
*
* Fallback to the non-DC driver here by default so as not to
* cause regressions.
@@ -4089,7 +4281,9 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
#else
default:
if (amdgpu_dc > 0)
- DRM_INFO_ONCE("Display Core has been requested via kernel parameter but isn't supported by ASIC, ignoring\n");
+ dev_info_once(
+ &pdev->dev,
+ "Display Core has been requested via kernel parameter but isn't supported by ASIC, ignoring\n");
return false;
#endif
}
@@ -4108,7 +4302,7 @@ bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
(adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
return false;
- return amdgpu_device_asic_has_dc_support(adev->asic_type);
+ return amdgpu_device_asic_has_dc_support(adev->pdev, adev->asic_type);
}
static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
@@ -4130,13 +4324,13 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
task_barrier_enter(&hive->tb);
- adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
+ adev->asic_reset_res = amdgpu_device_baco_enter(adev);
if (adev->asic_reset_res)
goto fail;
task_barrier_exit(&hive->tb);
- adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
+ adev->asic_reset_res = amdgpu_device_baco_exit(adev);
if (adev->asic_reset_res)
goto fail;
@@ -4150,7 +4344,8 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
fail:
if (adev->asic_reset_res)
- DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
+ dev_warn(adev->dev,
+ "ASIC reset failed with error, %d for drm dev, %s",
adev->asic_reset_res, adev_to_drm(adev)->unique);
amdgpu_put_xgmi_hive(hive);
}
@@ -4163,66 +4358,53 @@ static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
long timeout;
int ret = 0;
- /*
- * By default timeout for non compute jobs is 10000
- * and 60000 for compute jobs.
- * In SR-IOV or passthrough mode, timeout for compute
- * jobs are 60000 by default.
- */
- adev->gfx_timeout = msecs_to_jiffies(10000);
- adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
- if (amdgpu_sriov_vf(adev))
- adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
- msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
- else
- adev->compute_timeout = msecs_to_jiffies(60000);
+ /* By default timeout for all queues is 2 sec */
+ adev->gfx_timeout = adev->compute_timeout = adev->sdma_timeout =
+ adev->video_timeout = msecs_to_jiffies(2000);
- if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
- while ((timeout_setting = strsep(&input, ",")) &&
- strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
- ret = kstrtol(timeout_setting, 0, &timeout);
- if (ret)
- return ret;
+ if (!strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH))
+ return 0;
- if (timeout == 0) {
- index++;
- continue;
- } else if (timeout < 0) {
- timeout = MAX_SCHEDULE_TIMEOUT;
- dev_warn(adev->dev, "lockup timeout disabled");
- add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
- } else {
- timeout = msecs_to_jiffies(timeout);
- }
+ while ((timeout_setting = strsep(&input, ",")) &&
+ strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
+ ret = kstrtol(timeout_setting, 0, &timeout);
+ if (ret)
+ return ret;
- switch (index++) {
- case 0:
- adev->gfx_timeout = timeout;
- break;
- case 1:
- adev->compute_timeout = timeout;
- break;
- case 2:
- adev->sdma_timeout = timeout;
- break;
- case 3:
- adev->video_timeout = timeout;
- break;
- default:
- break;
- }
+ if (timeout == 0) {
+ index++;
+ continue;
+ } else if (timeout < 0) {
+ timeout = MAX_SCHEDULE_TIMEOUT;
+ dev_warn(adev->dev, "lockup timeout disabled");
+ add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
+ } else {
+ timeout = msecs_to_jiffies(timeout);
}
- /*
- * There is only one value specified and
- * it should apply to all non-compute jobs.
- */
- if (index == 1) {
- adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
- if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
- adev->compute_timeout = adev->gfx_timeout;
+
+ switch (index++) {
+ case 0:
+ adev->gfx_timeout = timeout;
+ break;
+ case 1:
+ adev->compute_timeout = timeout;
+ break;
+ case 2:
+ adev->sdma_timeout = timeout;
+ break;
+ case 3:
+ adev->video_timeout = timeout;
+ break;
+ default:
+ break;
}
}
+ /* When only one value specified apply it to all queues. */
+ if (index == 1)
+ adev->gfx_timeout = adev->compute_timeout = adev->sdma_timeout =
+ adev->video_timeout = timeout;
+
return ret;
}
@@ -4274,7 +4456,56 @@ static void amdgpu_device_set_mcbp(struct amdgpu_device *adev)
adev->gfx.mcbp = true;
if (adev->gfx.mcbp)
- DRM_INFO("MCBP is enabled\n");
+ dev_info(adev->dev, "MCBP is enabled\n");
+}
+
+static int amdgpu_device_sys_interface_init(struct amdgpu_device *adev)
+{
+ int r;
+
+ r = amdgpu_atombios_sysfs_init(adev);
+ if (r)
+ drm_err(&adev->ddev,
+ "registering atombios sysfs failed (%d).\n", r);
+
+ r = amdgpu_pm_sysfs_init(adev);
+ if (r)
+ dev_err(adev->dev, "registering pm sysfs failed (%d).\n", r);
+
+ r = amdgpu_ucode_sysfs_init(adev);
+ if (r) {
+ adev->ucode_sysfs_en = false;
+ dev_err(adev->dev, "Creating firmware sysfs failed (%d).\n", r);
+ } else
+ adev->ucode_sysfs_en = true;
+
+ r = amdgpu_device_attr_sysfs_init(adev);
+ if (r)
+ dev_err(adev->dev, "Could not create amdgpu device attr\n");
+
+ r = devm_device_add_group(adev->dev, &amdgpu_board_attrs_group);
+ if (r)
+ dev_err(adev->dev,
+ "Could not create amdgpu board attributes\n");
+
+ amdgpu_fru_sysfs_init(adev);
+ amdgpu_reg_state_sysfs_init(adev);
+ amdgpu_xcp_sysfs_init(adev);
+
+ return r;
+}
+
+static void amdgpu_device_sys_interface_fini(struct amdgpu_device *adev)
+{
+ if (adev->pm.sysfs_initialized)
+ amdgpu_pm_sysfs_fini(adev);
+ if (adev->ucode_sysfs_en)
+ amdgpu_ucode_sysfs_fini(adev);
+ amdgpu_device_attr_sysfs_fini(adev);
+ amdgpu_fru_sysfs_fini(adev);
+
+ amdgpu_reg_state_sysfs_fini(adev);
+ amdgpu_xcp_sysfs_fini(adev);
}
/**
@@ -4290,7 +4521,6 @@ static void amdgpu_device_set_mcbp(struct amdgpu_device *adev)
int amdgpu_device_init(struct amdgpu_device *adev,
uint32_t flags)
{
- struct drm_device *ddev = adev_to_drm(adev);
struct pci_dev *pdev = adev->pdev;
int r, i;
bool px = false;
@@ -4342,9 +4572,11 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
- DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
- amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
- pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
+ dev_info(
+ adev->dev,
+ "initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
+ amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
+ pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
/* mutex initialization are all done here so we
* can recall function without having locking issues
@@ -4375,7 +4607,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
mutex_init(&adev->gfx.userq_sch_mutex);
mutex_init(&adev->gfx.workload_profile_mutex);
mutex_init(&adev->vcn.workload_profile_mutex);
- mutex_init(&adev->userq_mutex);
amdgpu_device_init_apu_flags(adev);
@@ -4403,7 +4634,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
INIT_LIST_HEAD(&adev->pm.od_kobj_list);
- INIT_LIST_HEAD(&adev->userq_mgr_list);
+ xa_init(&adev->userq_doorbell_xa);
INIT_DELAYED_WORK(&adev->delayed_init_work,
amdgpu_device_delayed_init_work_handler);
@@ -4426,6 +4657,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
}
INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
+ INIT_WORK(&adev->userq_reset_work, amdgpu_userq_reset_work);
adev->gfx.gfx_off_req_count = 1;
adev->gfx.gfx_off_residency = 0;
@@ -4461,8 +4693,10 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (!adev->rmmio)
return -ENOMEM;
- DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
- DRM_INFO("register mmio size: %u\n", (unsigned int)adev->rmmio_size);
+ dev_info(adev->dev, "register mmio base: 0x%08X\n",
+ (uint32_t)adev->rmmio_base);
+ dev_info(adev->dev, "register mmio size: %u\n",
+ (unsigned int)adev->rmmio_size);
/*
* Reset domain needs to be present early, before XGMI hive discovered
@@ -4599,7 +4833,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = -EINVAL;
goto failed;
}
- DRM_INFO("GPU posting now...\n");
+ dev_info(adev->dev, "GPU posting now...\n");
r = amdgpu_device_asic_init(adev);
if (r) {
dev_err(adev->dev, "gpu post error!\n");
@@ -4697,39 +4931,14 @@ fence_driver_init:
flush_delayed_work(&adev->delayed_init_work);
}
+ if (adev->init_lvl->level == AMDGPU_INIT_LEVEL_MINIMAL_XGMI)
+ amdgpu_xgmi_reset_on_init(adev);
/*
* Place those sysfs registering after `late_init`. As some of those
* operations performed in `late_init` might affect the sysfs
* interfaces creating.
*/
- r = amdgpu_atombios_sysfs_init(adev);
- if (r)
- drm_err(&adev->ddev,
- "registering atombios sysfs failed (%d).\n", r);
-
- r = amdgpu_pm_sysfs_init(adev);
- if (r)
- DRM_ERROR("registering pm sysfs failed (%d).\n", r);
-
- r = amdgpu_ucode_sysfs_init(adev);
- if (r) {
- adev->ucode_sysfs_en = false;
- DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
- } else
- adev->ucode_sysfs_en = true;
-
- r = amdgpu_device_attr_sysfs_init(adev);
- if (r)
- dev_err(adev->dev, "Could not create amdgpu device attr\n");
-
- r = devm_device_add_group(adev->dev, &amdgpu_board_attrs_group);
- if (r)
- dev_err(adev->dev,
- "Could not create amdgpu board attributes\n");
-
- amdgpu_fru_sysfs_init(adev);
- amdgpu_reg_state_sysfs_init(adev);
- amdgpu_xcp_sysfs_init(adev);
+ r = amdgpu_device_sys_interface_init(adev);
if (IS_ENABLED(CONFIG_PERF_EVENTS))
r = amdgpu_pmu_init(adev);
@@ -4747,7 +4956,7 @@ fence_driver_init:
if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
vga_client_register(adev->pdev, amdgpu_device_vga_set_decode);
- px = amdgpu_device_supports_px(ddev);
+ px = amdgpu_device_supports_px(adev);
if (px || (!dev_is_removable(&adev->pdev->dev) &&
apple_gmux_detect(NULL, NULL)))
@@ -4757,9 +4966,6 @@ fence_driver_init:
if (px)
vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
- if (adev->init_lvl->level == AMDGPU_INIT_LEVEL_MINIMAL_XGMI)
- amdgpu_xgmi_reset_on_init(adev);
-
amdgpu_device_check_iommu_direct_map(adev);
adev->pm_nb.notifier_call = amdgpu_device_pm_notifier;
@@ -4851,15 +5057,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
}
amdgpu_fence_driver_hw_fini(adev);
- if (adev->pm.sysfs_initialized)
- amdgpu_pm_sysfs_fini(adev);
- if (adev->ucode_sysfs_en)
- amdgpu_ucode_sysfs_fini(adev);
- amdgpu_device_attr_sysfs_fini(adev);
- amdgpu_fru_sysfs_fini(adev);
-
- amdgpu_reg_state_sysfs_fini(adev);
- amdgpu_xcp_sysfs_fini(adev);
+ amdgpu_device_sys_interface_fini(adev);
/* disable ras feature must before hw fini */
amdgpu_ras_pre_fini(adev);
@@ -4913,7 +5111,7 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
kfree(adev->xcp_mgr);
adev->xcp_mgr = NULL;
- px = amdgpu_device_supports_px(adev_to_drm(adev));
+ px = amdgpu_device_supports_px(adev);
if (px || (!dev_is_removable(&adev->pdev->dev) &&
apple_gmux_detect(NULL, NULL)))
@@ -4934,14 +5132,15 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
if (IS_ENABLED(CONFIG_PERF_EVENTS))
amdgpu_pmu_fini(adev);
- if (adev->mman.discovery_bin)
+ if (adev->discovery.bin)
amdgpu_discovery_fini(adev);
amdgpu_reset_put_reset_domain(adev->reset_domain);
adev->reset_domain = NULL;
kfree(adev->pci_state);
-
+ kfree(adev->pcie_reset_ctx.swds_pcistate);
+ kfree(adev->pcie_reset_ctx.swus_pcistate);
}
/**
@@ -4961,9 +5160,21 @@ static int amdgpu_device_evict_resources(struct amdgpu_device *adev)
if (!adev->in_s4 && (adev->flags & AMD_IS_APU))
return 0;
+ /* No need to evict when going to S5 through S4 callbacks */
+ if (system_state == SYSTEM_POWER_OFF)
+ return 0;
+
ret = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM);
- if (ret)
- DRM_WARN("evicting device resources failed\n");
+ if (ret) {
+ dev_warn(adev->dev, "evicting device resources failed\n");
+ return ret;
+ }
+
+ if (adev->in_s4) {
+ ret = ttm_device_prepare_hibernation(&adev->mman.bdev);
+ if (ret)
+ dev_err(adev->dev, "prepare hibernation failed, %d\n", ret);
+ }
return ret;
}
@@ -5035,6 +5246,28 @@ int amdgpu_device_prepare(struct drm_device *dev)
}
/**
+ * amdgpu_device_complete - complete power state transition
+ *
+ * @dev: drm dev pointer
+ *
+ * Undo the changes from amdgpu_device_prepare. This will be
+ * called on all resume transitions, including those that failed.
+ */
+void amdgpu_device_complete(struct drm_device *dev)
+{
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ int i;
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_blocks[i].status.valid)
+ continue;
+ if (!adev->ip_blocks[i].version->funcs->complete)
+ continue;
+ adev->ip_blocks[i].version->funcs->complete(&adev->ip_blocks[i]);
+ }
+}
+
+/**
* amdgpu_device_suspend - initiate device suspend
*
* @dev: drm dev pointer
@@ -5047,7 +5280,7 @@ int amdgpu_device_prepare(struct drm_device *dev)
int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients)
{
struct amdgpu_device *adev = drm_to_adev(dev);
- int r = 0;
+ int r, rec;
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
@@ -5055,46 +5288,125 @@ int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients)
adev->in_suspend = true;
if (amdgpu_sriov_vf(adev)) {
+ if (!adev->in_runpm)
+ amdgpu_amdkfd_suspend_process(adev);
amdgpu_virt_fini_data_exchange(adev);
r = amdgpu_virt_request_full_gpu(adev, false);
if (r)
return r;
}
- if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
- DRM_WARN("smart shift update failed\n");
+ r = amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DEV_D3);
+ if (r)
+ goto unwind_sriov;
if (notify_clients)
- drm_client_dev_suspend(adev_to_drm(adev), false);
+ drm_client_dev_suspend(adev_to_drm(adev));
cancel_delayed_work_sync(&adev->delayed_init_work);
amdgpu_ras_suspend(adev);
- amdgpu_device_ip_suspend_phase1(adev);
+ r = amdgpu_device_ip_suspend_phase1(adev);
+ if (r)
+ goto unwind_smartshift;
- if (!adev->in_s0ix) {
- amdgpu_amdkfd_suspend(adev, adev->in_runpm);
- amdgpu_userq_suspend(adev);
- }
+ amdgpu_amdkfd_suspend(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm);
+ r = amdgpu_userq_suspend(adev);
+ if (r)
+ goto unwind_ip_phase1;
r = amdgpu_device_evict_resources(adev);
if (r)
- return r;
+ goto unwind_userq;
amdgpu_ttm_set_buffer_funcs_status(adev, false);
amdgpu_fence_driver_hw_fini(adev);
- amdgpu_device_ip_suspend_phase2(adev);
+ r = amdgpu_device_ip_suspend_phase2(adev);
+ if (r)
+ goto unwind_evict;
if (amdgpu_sriov_vf(adev))
amdgpu_virt_release_full_gpu(adev, false);
- r = amdgpu_dpm_notify_rlc_state(adev, false);
+ return 0;
+
+unwind_evict:
+ if (adev->mman.buffer_funcs_ring->sched.ready)
+ amdgpu_ttm_set_buffer_funcs_status(adev, true);
+ amdgpu_fence_driver_hw_init(adev);
+
+unwind_userq:
+ rec = amdgpu_userq_resume(adev);
+ if (rec) {
+ dev_warn(adev->dev, "failed to re-initialize user queues: %d\n", rec);
+ return r;
+ }
+ rec = amdgpu_amdkfd_resume(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm);
+ if (rec) {
+ dev_warn(adev->dev, "failed to re-initialize kfd: %d\n", rec);
+ return r;
+ }
+
+unwind_ip_phase1:
+ /* suspend phase 1 = resume phase 3 */
+ rec = amdgpu_device_ip_resume_phase3(adev);
+ if (rec) {
+ dev_warn(adev->dev, "failed to re-initialize IPs phase1: %d\n", rec);
+ return r;
+ }
+
+unwind_smartshift:
+ rec = amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DEV_D0);
+ if (rec) {
+ dev_warn(adev->dev, "failed to re-update smart shift: %d\n", rec);
+ return r;
+ }
+
+ if (notify_clients)
+ drm_client_dev_resume(adev_to_drm(adev));
+
+ amdgpu_ras_resume(adev);
+
+unwind_sriov:
+ if (amdgpu_sriov_vf(adev)) {
+ rec = amdgpu_virt_request_full_gpu(adev, true);
+ if (rec) {
+ dev_warn(adev->dev, "failed to reinitialize sriov: %d\n", rec);
+ return r;
+ }
+ }
+
+ adev->in_suspend = adev->in_s0ix = adev->in_s3 = false;
+
+ return r;
+}
+
+static inline int amdgpu_virt_resume(struct amdgpu_device *adev)
+{
+ int r;
+ unsigned int prev_physical_node_id = adev->gmc.xgmi.physical_node_id;
+
+ /* During VM resume, QEMU programming of VF MSIX table (register GFXMSIX_VECT0_ADDR_LO)
+ * may not work. The access could be blocked by nBIF protection as VF isn't in
+ * exclusive access mode. Exclusive access is enabled now, disable/enable MSIX
+ * so that QEMU reprograms MSIX table.
+ */
+ amdgpu_restore_msix(adev);
+
+ r = adev->gfxhub.funcs->get_xgmi_info(adev);
if (r)
return r;
+ dev_info(adev->dev, "xgmi node, old id %d, new id %d\n",
+ prev_physical_node_id, adev->gmc.xgmi.physical_node_id);
+
+ adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev);
+ adev->vm_manager.vram_base_offset +=
+ adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
+
return 0;
}
@@ -5119,6 +5431,12 @@ int amdgpu_device_resume(struct drm_device *dev, bool notify_clients)
return r;
}
+ if (amdgpu_virt_xgmi_migrate_enabled(adev)) {
+ r = amdgpu_virt_resume(adev);
+ if (r)
+ goto exit;
+ }
+
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
@@ -5139,15 +5457,13 @@ int amdgpu_device_resume(struct drm_device *dev, bool notify_clients)
goto exit;
}
- if (!adev->in_s0ix) {
- r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
- if (r)
- goto exit;
+ r = amdgpu_amdkfd_resume(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm);
+ if (r)
+ goto exit;
- r = amdgpu_userq_resume(adev);
- if (r)
- goto exit;
- }
+ r = amdgpu_userq_resume(adev);
+ if (r)
+ goto exit;
r = amdgpu_device_ip_late_init(adev);
if (r)
@@ -5159,6 +5475,9 @@ exit:
if (amdgpu_sriov_vf(adev)) {
amdgpu_virt_init_data_exchange(adev);
amdgpu_virt_release_full_gpu(adev, true);
+
+ if (!r && !adev->in_runpm)
+ r = amdgpu_amdkfd_resume_process(adev);
}
if (r)
@@ -5168,7 +5487,7 @@ exit:
flush_delayed_work(&adev->delayed_init_work);
if (notify_clients)
- drm_client_dev_resume(adev_to_drm(adev), false);
+ drm_client_dev_resume(adev_to_drm(adev));
amdgpu_ras_resume(adev);
@@ -5193,10 +5512,12 @@ exit:
dev->dev->power.disable_depth--;
#endif
}
+
+ amdgpu_vram_mgr_clear_reset_blocks(adev);
adev->in_suspend = false;
- if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
- DRM_WARN("smart shift update failed\n");
+ if (amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DEV_D0))
+ dev_warn(adev->dev, "smart shift update failed\n");
return 0;
}
@@ -5581,7 +5902,7 @@ int amdgpu_device_link_reset(struct amdgpu_device *adev)
dev_info(adev->dev, "GPU link reset\n");
- if (!adev->pcie_reset_ctx.occurs_dpc)
+ if (!amdgpu_reset_in_dpc(adev))
ret = amdgpu_dpm_link_reset(adev);
if (ret)
@@ -5622,11 +5943,6 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
if (!amdgpu_ring_sched_ready(ring))
continue;
- /* Clear job fence from fence drv to avoid force_completion
- * leave NULL and vm flush fence in fence drv
- */
- amdgpu_fence_driver_clear_job_fences(ring);
-
/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
amdgpu_fence_driver_force_completion(ring);
}
@@ -5710,6 +6026,7 @@ int amdgpu_device_reinit_after_reset(struct amdgpu_reset_context *reset_context)
amdgpu_set_init_level(tmp_adev, init_level);
if (full_reset) {
/* post card */
+ amdgpu_reset_set_dpc_status(tmp_adev, false);
amdgpu_ras_clear_err_state(tmp_adev);
r = amdgpu_device_asic_init(tmp_adev);
if (r) {
@@ -5727,7 +6044,9 @@ int amdgpu_device_reinit_after_reset(struct amdgpu_reset_context *reset_context)
amdgpu_coredump(tmp_adev, false, vram_lost, reset_context->job);
if (vram_lost) {
- DRM_INFO("VRAM is lost due to GPU reset!\n");
+ dev_info(
+ tmp_adev->dev,
+ "VRAM is lost due to GPU reset!\n");
amdgpu_inc_vram_lost(tmp_adev);
}
@@ -5768,7 +6087,11 @@ int amdgpu_device_reinit_after_reset(struct amdgpu_reset_context *reset_context)
if (r)
goto out;
- drm_client_dev_resume(adev_to_drm(tmp_adev), false);
+ r = amdgpu_userq_post_reset(tmp_adev, vram_lost);
+ if (r)
+ goto out;
+
+ drm_client_dev_resume(adev_to_drm(tmp_adev));
/*
* The GPU enters bad state once faulty pages
@@ -5990,6 +6313,7 @@ static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev)
if (!amdgpu_sriov_vf(adev))
cancel_work(&adev->reset_work);
#endif
+ cancel_work(&adev->userq_reset_work);
if (adev->kfd.dev)
cancel_work(&adev->kfd.reset_work);
@@ -6006,29 +6330,19 @@ static int amdgpu_device_health_check(struct list_head *device_list_handle)
{
struct amdgpu_device *tmp_adev;
int ret = 0;
- u32 status;
list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
- pci_read_config_dword(tmp_adev->pdev, PCI_COMMAND, &status);
- if (PCI_POSSIBLE_ERROR(status)) {
- dev_err(tmp_adev->dev, "device lost from bus!");
- ret = -ENODEV;
- }
+ ret |= amdgpu_device_bus_status_check(tmp_adev);
}
return ret;
}
-static int amdgpu_device_halt_activities(struct amdgpu_device *adev,
- struct amdgpu_job *job,
- struct amdgpu_reset_context *reset_context,
- struct list_head *device_list,
- struct amdgpu_hive_info *hive,
- bool need_emergency_restart)
+static void amdgpu_device_recovery_prepare(struct amdgpu_device *adev,
+ struct list_head *device_list,
+ struct amdgpu_hive_info *hive)
{
- struct list_head *device_list_handle = NULL;
struct amdgpu_device *tmp_adev = NULL;
- int i, r = 0;
/*
* Build list of devices to reset.
@@ -6040,31 +6354,52 @@ static int amdgpu_device_halt_activities(struct amdgpu_device *adev,
list_add_tail(&tmp_adev->reset_list, device_list);
if (adev->shutdown)
tmp_adev->shutdown = true;
- if (adev->pcie_reset_ctx.occurs_dpc)
+ if (amdgpu_reset_in_dpc(adev))
tmp_adev->pcie_reset_ctx.in_link_reset = true;
}
if (!list_is_first(&adev->reset_list, device_list))
list_rotate_to_front(&adev->reset_list, device_list);
- device_list_handle = device_list;
} else {
list_add_tail(&adev->reset_list, device_list);
- device_list_handle = device_list;
}
+}
- if (!amdgpu_sriov_vf(adev) && (!adev->pcie_reset_ctx.occurs_dpc)) {
- r = amdgpu_device_health_check(device_list_handle);
- if (r)
- return r;
- }
+static void amdgpu_device_recovery_get_reset_lock(struct amdgpu_device *adev,
+ struct list_head *device_list)
+{
+ struct amdgpu_device *tmp_adev = NULL;
- /* We need to lock reset domain only once both for XGMI and single device */
- tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
- reset_list);
+ if (list_empty(device_list))
+ return;
+ tmp_adev =
+ list_first_entry(device_list, struct amdgpu_device, reset_list);
amdgpu_device_lock_reset_domain(tmp_adev->reset_domain);
+}
- /* block all schedulers and reset given job's ring */
- list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
+static void amdgpu_device_recovery_put_reset_lock(struct amdgpu_device *adev,
+ struct list_head *device_list)
+{
+ struct amdgpu_device *tmp_adev = NULL;
+ if (list_empty(device_list))
+ return;
+ tmp_adev =
+ list_first_entry(device_list, struct amdgpu_device, reset_list);
+ amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
+}
+
+static void amdgpu_device_halt_activities(struct amdgpu_device *adev,
+ struct amdgpu_job *job,
+ struct amdgpu_reset_context *reset_context,
+ struct list_head *device_list,
+ struct amdgpu_hive_info *hive,
+ bool need_emergency_restart)
+{
+ struct amdgpu_device *tmp_adev = NULL;
+ int i;
+
+ /* block all schedulers and reset given job's ring */
+ list_for_each_entry(tmp_adev, device_list, reset_list) {
amdgpu_device_set_mp1_state(tmp_adev);
/*
@@ -6092,14 +6427,15 @@ static int amdgpu_device_halt_activities(struct amdgpu_device *adev,
*/
amdgpu_unregister_gpu_instance(tmp_adev);
- drm_client_dev_suspend(adev_to_drm(tmp_adev), false);
+ drm_client_dev_suspend(adev_to_drm(tmp_adev));
/* disable ras on ALL IPs */
- if (!need_emergency_restart &&
- (!adev->pcie_reset_ctx.occurs_dpc) &&
- amdgpu_device_ip_need_full_reset(tmp_adev))
+ if (!need_emergency_restart && !amdgpu_reset_in_dpc(adev) &&
+ amdgpu_device_ip_need_full_reset(tmp_adev))
amdgpu_ras_suspend(tmp_adev);
+ amdgpu_userq_pre_reset(tmp_adev);
+
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = tmp_adev->rings[i];
@@ -6113,8 +6449,6 @@ static int amdgpu_device_halt_activities(struct amdgpu_device *adev,
}
atomic_inc(&tmp_adev->gpu_reset_counter);
}
-
- return r;
}
static int amdgpu_device_asic_reset(struct amdgpu_device *adev,
@@ -6127,11 +6461,7 @@ static int amdgpu_device_asic_reset(struct amdgpu_device *adev,
retry: /* Rest of adevs pre asic reset from XGMI hive. */
list_for_each_entry(tmp_adev, device_list, reset_list) {
- if (adev->pcie_reset_ctx.occurs_dpc)
- tmp_adev->no_hw_access = true;
r = amdgpu_device_pre_asic_reset(tmp_adev, reset_context);
- if (adev->pcie_reset_ctx.occurs_dpc)
- tmp_adev->no_hw_access = false;
/*TODO Should we stop ?*/
if (r) {
dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
@@ -6202,25 +6532,32 @@ static int amdgpu_device_sched_resume(struct list_head *device_list,
if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled)
drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
- if (tmp_adev->asic_reset_res)
- r = tmp_adev->asic_reset_res;
-
- tmp_adev->asic_reset_res = 0;
-
- if (r) {
+ if (tmp_adev->asic_reset_res) {
/* bad news, how to tell it to userspace ?
* for ras error, we should report GPU bad status instead of
* reset failure
*/
if (reset_context->src != AMDGPU_RESET_SRC_RAS ||
!amdgpu_ras_eeprom_check_err_threshold(tmp_adev))
- dev_info(tmp_adev->dev, "GPU reset(%d) failed\n",
- atomic_read(&tmp_adev->gpu_reset_counter));
- amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
+ dev_info(
+ tmp_adev->dev,
+ "GPU reset(%d) failed with error %d \n",
+ atomic_read(
+ &tmp_adev->gpu_reset_counter),
+ tmp_adev->asic_reset_res);
+ amdgpu_vf_error_put(tmp_adev,
+ AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0,
+ tmp_adev->asic_reset_res);
+ if (!r)
+ r = tmp_adev->asic_reset_res;
+ tmp_adev->asic_reset_res = 0;
} else {
- dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
- if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0))
- DRM_WARN("smart shift update failed\n");
+ dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n",
+ atomic_read(&tmp_adev->gpu_reset_counter));
+ if (amdgpu_acpi_smart_shift_update(tmp_adev,
+ AMDGPU_SS_DEV_D0))
+ dev_warn(tmp_adev->dev,
+ "smart shift update failed\n");
}
}
@@ -6252,11 +6589,6 @@ static void amdgpu_device_gpu_resume(struct amdgpu_device *adev,
amdgpu_ras_set_error_query_ready(tmp_adev, true);
}
-
- tmp_adev = list_first_entry(device_list, struct amdgpu_device,
- reset_list);
- amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
-
}
@@ -6306,14 +6638,15 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
*/
if (need_emergency_restart && amdgpu_ras_get_context(adev) &&
amdgpu_ras_get_context(adev)->reboot) {
- DRM_WARN("Emergency reboot.");
+ dev_warn(adev->dev, "Emergency reboot.");
ksys_sync_helper();
emergency_restart();
}
- dev_info(adev->dev, "GPU %s begin!\n",
- need_emergency_restart ? "jobs stop":"reset");
+ dev_info(adev->dev, "GPU %s begin!. Source: %d\n",
+ need_emergency_restart ? "jobs stop" : "reset",
+ reset_context->src);
if (!amdgpu_sriov_vf(adev))
hive = amdgpu_get_xgmi_hive(adev);
@@ -6324,11 +6657,22 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
reset_context->hive = hive;
INIT_LIST_HEAD(&device_list);
- r = amdgpu_device_halt_activities(adev, job, reset_context, &device_list,
- hive, need_emergency_restart);
- if (r)
- goto end_reset;
+ amdgpu_device_recovery_prepare(adev, &device_list, hive);
+ if (!amdgpu_sriov_vf(adev)) {
+ r = amdgpu_device_health_check(&device_list);
+ if (r)
+ goto end_reset;
+ }
+
+ /* Cannot be called after locking reset domain */
+ amdgpu_ras_pre_reset(adev, &device_list);
+
+ /* We need to lock reset domain only once both for XGMI and single device */
+ amdgpu_device_recovery_get_reset_lock(adev, &device_list);
+
+ amdgpu_device_halt_activities(adev, job, reset_context, &device_list,
+ hive, need_emergency_restart);
if (need_emergency_restart)
goto skip_sched_resume;
/*
@@ -6337,7 +6681,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
*
* job->base holds a reference to parent fence
*/
- if (job && dma_fence_is_signaled(&job->hw_fence)) {
+ if (job && dma_fence_is_signaled(&job->hw_fence->base)) {
job_signaled = true;
dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
goto skip_hw_reset;
@@ -6345,13 +6689,16 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
r = amdgpu_device_asic_reset(adev, &device_list, reset_context);
if (r)
- goto end_reset;
+ goto reset_unlock;
skip_hw_reset:
r = amdgpu_device_sched_resume(&device_list, reset_context, job_signaled);
if (r)
- goto end_reset;
+ goto reset_unlock;
skip_sched_resume:
amdgpu_device_gpu_resume(adev, &device_list, need_emergency_restart);
+reset_unlock:
+ amdgpu_device_recovery_put_reset_lock(adev, &device_list);
+ amdgpu_ras_post_reset(adev, &device_list);
end_reset:
if (hive) {
mutex_unlock(&hive->hive_lock);
@@ -6363,8 +6710,17 @@ end_reset:
atomic_set(&adev->reset_domain->reset_res, r);
- if (!r)
- drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE);
+ if (!r) {
+ struct amdgpu_task_info *ti = NULL;
+
+ if (job)
+ ti = amdgpu_vm_get_task_info_pasid(adev, job->pasid);
+
+ drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE,
+ ti ? &ti->task : NULL);
+
+ amdgpu_vm_put_task_info(ti);
+ }
return r;
}
@@ -6683,12 +7039,11 @@ bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
#endif
}
-int amdgpu_device_baco_enter(struct drm_device *dev)
+int amdgpu_device_baco_enter(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
- if (!amdgpu_device_supports_baco(dev))
+ if (!amdgpu_device_supports_baco(adev))
return -ENOTSUPP;
if (ras && adev->ras_enabled &&
@@ -6698,13 +7053,12 @@ int amdgpu_device_baco_enter(struct drm_device *dev)
return amdgpu_dpm_baco_enter(adev);
}
-int amdgpu_device_baco_exit(struct drm_device *dev)
+int amdgpu_device_baco_exit(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
int ret = 0;
- if (!amdgpu_device_supports_baco(dev))
+ if (!amdgpu_device_supports_baco(adev))
return -ENOTSUPP;
ret = amdgpu_dpm_baco_exit(adev);
@@ -6735,18 +7089,13 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta
{
struct drm_device *dev = pci_get_drvdata(pdev);
struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
+ struct amdgpu_hive_info *hive __free(xgmi_put_hive) =
+ amdgpu_get_xgmi_hive(adev);
struct amdgpu_reset_context reset_context;
struct list_head device_list;
- int r = 0;
dev_info(adev->dev, "PCI error: detected callback!!\n");
- if (!amdgpu_dpm_is_link_reset_supported(adev)) {
- dev_warn(adev->dev, "No support for XGMI hive yet...\n");
- return PCI_ERS_RESULT_DISCONNECT;
- }
-
adev->pci_channel_state = state;
switch (state) {
@@ -6756,21 +7105,32 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta
case pci_channel_io_frozen:
/* Fatal error, prepare for slot reset */
dev_info(adev->dev, "pci_channel_io_frozen: state(%d)!!\n", state);
+ if (hive) {
+ /* Hive devices should be able to support FW based
+ * link reset on other devices, if not return.
+ */
+ if (!amdgpu_dpm_is_link_reset_supported(adev)) {
+ dev_warn(adev->dev,
+ "No support for XGMI hive yet...\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+ /* Set dpc status only if device is part of hive
+ * Non-hive devices should be able to recover after
+ * link reset.
+ */
+ amdgpu_reset_set_dpc_status(adev, true);
- if (hive)
mutex_lock(&hive->hive_lock);
- adev->pcie_reset_ctx.occurs_dpc = true;
+ }
memset(&reset_context, 0, sizeof(reset_context));
INIT_LIST_HEAD(&device_list);
- r = amdgpu_device_halt_activities(adev, NULL, &reset_context, &device_list,
- hive, false);
- if (hive) {
+ amdgpu_device_recovery_prepare(adev, &device_list, hive);
+ amdgpu_device_recovery_get_reset_lock(adev, &device_list);
+ amdgpu_device_halt_activities(adev, NULL, &reset_context, &device_list,
+ hive, false);
+ if (hive)
mutex_unlock(&hive->hive_lock);
- amdgpu_put_xgmi_hive(hive);
- }
- if (r)
- return PCI_ERS_RESULT_DISCONNECT;
return PCI_ERS_RESULT_NEED_RESET;
case pci_channel_io_perm_failure:
/* Permanent error, prepare for device removal */
@@ -6818,22 +7178,34 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
struct amdgpu_device *tmp_adev;
struct amdgpu_hive_info *hive;
struct list_head device_list;
- int r = 0, i;
+ struct pci_dev *link_dev;
+ int r = 0, i, timeout;
u32 memsize;
-
- /* PCI error slot reset should be skipped During RAS recovery */
- if ((amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4)) &&
- amdgpu_ras_in_recovery(adev))
- return PCI_ERS_RESULT_RECOVERED;
+ u16 status;
dev_info(adev->dev, "PCI error: slot reset callback!!\n");
memset(&reset_context, 0, sizeof(reset_context));
- /* wait for asic to come out of reset */
- msleep(700);
+ if (adev->pcie_reset_ctx.swus)
+ link_dev = adev->pcie_reset_ctx.swus;
+ else
+ link_dev = adev->pdev;
+ /* wait for asic to come out of reset, timeout = 10s */
+ timeout = 10000;
+ do {
+ usleep_range(10000, 10500);
+ r = pci_read_config_word(link_dev, PCI_VENDOR_ID, &status);
+ timeout -= 10;
+ } while (timeout > 0 && (status != PCI_VENDOR_ID_ATI) &&
+ (status != PCI_VENDOR_ID_AMD));
+
+ if ((status != PCI_VENDOR_ID_ATI) && (status != PCI_VENDOR_ID_AMD)) {
+ r = -ETIME;
+ goto out;
+ }
+ amdgpu_device_load_switch_state(adev);
/* Restore PCI confspace */
amdgpu_device_load_pci_state(pdev);
@@ -6880,8 +7252,8 @@ out:
if (hive) {
list_for_each_entry(tmp_adev, &device_list, reset_list)
amdgpu_device_unset_mp1_state(tmp_adev);
- amdgpu_device_unlock_reset_domain(adev->reset_domain);
}
+ amdgpu_device_recovery_put_reset_lock(adev, &device_list);
}
if (hive) {
@@ -6927,7 +7299,7 @@ void amdgpu_pci_resume(struct pci_dev *pdev)
amdgpu_device_sched_resume(&device_list, NULL, NULL);
amdgpu_device_gpu_resume(adev, &device_list, false);
- adev->pcie_reset_ctx.occurs_dpc = false;
+ amdgpu_device_recovery_put_reset_lock(adev, &device_list);
if (hive) {
mutex_unlock(&hive->hive_lock);
@@ -6935,6 +7307,65 @@ void amdgpu_pci_resume(struct pci_dev *pdev)
}
}
+static void amdgpu_device_cache_switch_state(struct amdgpu_device *adev)
+{
+ struct pci_dev *swus, *swds;
+ int r;
+
+ swds = pci_upstream_bridge(adev->pdev);
+ if (!swds || swds->vendor != PCI_VENDOR_ID_ATI ||
+ pci_pcie_type(swds) != PCI_EXP_TYPE_DOWNSTREAM)
+ return;
+ swus = pci_upstream_bridge(swds);
+ if (!swus ||
+ (swus->vendor != PCI_VENDOR_ID_ATI &&
+ swus->vendor != PCI_VENDOR_ID_AMD) ||
+ pci_pcie_type(swus) != PCI_EXP_TYPE_UPSTREAM)
+ return;
+
+ /* If already saved, return */
+ if (adev->pcie_reset_ctx.swus)
+ return;
+ /* Upstream bridge is ATI, assume it's SWUS/DS architecture */
+ r = pci_save_state(swds);
+ if (r)
+ return;
+ adev->pcie_reset_ctx.swds_pcistate = pci_store_saved_state(swds);
+
+ r = pci_save_state(swus);
+ if (r)
+ return;
+ adev->pcie_reset_ctx.swus_pcistate = pci_store_saved_state(swus);
+
+ adev->pcie_reset_ctx.swus = swus;
+}
+
+static void amdgpu_device_load_switch_state(struct amdgpu_device *adev)
+{
+ struct pci_dev *pdev;
+ int r;
+
+ if (!adev->pcie_reset_ctx.swds_pcistate ||
+ !adev->pcie_reset_ctx.swus_pcistate)
+ return;
+
+ pdev = adev->pcie_reset_ctx.swus;
+ r = pci_load_saved_state(pdev, adev->pcie_reset_ctx.swus_pcistate);
+ if (!r) {
+ pci_restore_state(pdev);
+ } else {
+ dev_warn(adev->dev, "Failed to load SWUS state, err:%d\n", r);
+ return;
+ }
+
+ pdev = pci_upstream_bridge(adev->pdev);
+ r = pci_load_saved_state(pdev, adev->pcie_reset_ctx.swds_pcistate);
+ if (!r)
+ pci_restore_state(pdev);
+ else
+ dev_warn(adev->dev, "Failed to load SWDS state, err:%d\n", r);
+}
+
bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
@@ -6951,14 +7382,16 @@ bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
adev->pci_state = pci_store_saved_state(pdev);
if (!adev->pci_state) {
- DRM_ERROR("Failed to store PCI saved state");
+ dev_err(adev->dev, "Failed to store PCI saved state");
return false;
}
} else {
- DRM_WARN("Failed to save PCI state, err:%d\n", r);
+ dev_warn(adev->dev, "Failed to save PCI state, err:%d\n", r);
return false;
}
+ amdgpu_device_cache_switch_state(adev);
+
return true;
}
@@ -6976,7 +7409,7 @@ bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
if (!r) {
pci_restore_state(pdev);
} else {
- DRM_WARN("Failed to load PCI state, err:%d\n", r);
+ dev_warn(adev->dev, "Failed to load PCI state, err:%d\n", r);
return false;
}
@@ -6993,10 +7426,17 @@ void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
if (adev->gmc.xgmi.connected_to_cpu)
return;
- if (ring && ring->funcs->emit_hdp_flush)
+ if (ring && ring->funcs->emit_hdp_flush) {
amdgpu_ring_emit_hdp_flush(ring);
- else
- amdgpu_asic_flush_hdp(adev, ring);
+ return;
+ }
+
+ if (!ring && amdgpu_sriov_runtime(adev)) {
+ if (!amdgpu_kiq_hdp_flush(adev))
+ return;
+ }
+
+ amdgpu_hdp_flush(adev, ring);
}
void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
@@ -7009,7 +7449,7 @@ void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
if (adev->gmc.xgmi.connected_to_cpu)
return;
- amdgpu_asic_invalidate_hdp(adev, ring);
+ amdgpu_hdp_invalidate(adev, ring);
}
int amdgpu_in_reset(struct amdgpu_device *adev)
@@ -7222,7 +7662,7 @@ struct dma_fence *amdgpu_device_enforce_isolation(struct amdgpu_device *adev,
dep = amdgpu_sync_peek_fence(&isolation->prev, ring);
r = amdgpu_sync_fence(&isolation->active, &f->finished, GFP_NOWAIT);
if (r)
- DRM_WARN("OOM tracking isolation\n");
+ dev_warn(adev->dev, "OOM tracking isolation\n");
out_grab_ref:
dma_fence_get(dep);
@@ -7290,9 +7730,11 @@ uint32_t amdgpu_device_wait_on_rreg(struct amdgpu_device *adev,
tmp_ = RREG32(reg_addr);
loop--;
if (!loop) {
- DRM_WARN("Register(%d) [%s] failed to reach value 0x%08x != 0x%08xn",
- inst, reg_name, (uint32_t)expected_value,
- (uint32_t)(tmp_ & (mask)));
+ dev_warn(
+ adev->dev,
+ "Register(%d) [%s] failed to reach value 0x%08x != 0x%08xn",
+ inst, reg_name, (uint32_t)expected_value,
+ (uint32_t)(tmp_ & (mask)));
ret = -ETIMEDOUT;
break;
}
@@ -7343,3 +7785,53 @@ ssize_t amdgpu_show_reset_mask(char *buf, uint32_t supported_reset)
size += sysfs_emit_at(buf, size, "\n");
return size;
}
+
+void amdgpu_device_set_uid(struct amdgpu_uid *uid_info,
+ enum amdgpu_uid_type type, uint8_t inst,
+ uint64_t uid)
+{
+ if (!uid_info)
+ return;
+
+ if (type >= AMDGPU_UID_TYPE_MAX) {
+ dev_err_once(uid_info->adev->dev, "Invalid UID type %d\n",
+ type);
+ return;
+ }
+
+ if (inst >= AMDGPU_UID_INST_MAX) {
+ dev_err_once(uid_info->adev->dev, "Invalid UID instance %d\n",
+ inst);
+ return;
+ }
+
+ if (uid_info->uid[type][inst] != 0) {
+ dev_warn_once(
+ uid_info->adev->dev,
+ "Overwriting existing UID %llu for type %d instance %d\n",
+ uid_info->uid[type][inst], type, inst);
+ }
+
+ uid_info->uid[type][inst] = uid;
+}
+
+u64 amdgpu_device_get_uid(struct amdgpu_uid *uid_info,
+ enum amdgpu_uid_type type, uint8_t inst)
+{
+ if (!uid_info)
+ return 0;
+
+ if (type >= AMDGPU_UID_TYPE_MAX) {
+ dev_err_once(uid_info->adev->dev, "Invalid UID type %d\n",
+ type);
+ return 0;
+ }
+
+ if (inst >= AMDGPU_UID_INST_MAX) {
+ dev_err_once(uid_info->adev->dev, "Invalid UID instance %d\n",
+ inst);
+ return 0;
+ }
+
+ return uid_info->uid[type][inst];
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index a0e9bf9b2710..fa2a22dfa048 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -107,6 +107,7 @@
#include "vcn_v5_0_1.h"
#include "jpeg_v5_0_0.h"
#include "jpeg_v5_0_1.h"
+#include "amdgpu_ras_mgr.h"
#include "amdgpu_vpe.h"
#if defined(CONFIG_DRM_AMD_ISP)
@@ -254,9 +255,9 @@ static int amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device *adev,
pos = tmr_offset + tmr_size - DISCOVERY_TMR_OFFSET;
/* This region is read-only and reserved from system use */
- discv_regn = memremap(pos, adev->mman.discovery_tmr_size, MEMREMAP_WC);
+ discv_regn = memremap(pos, adev->discovery.size, MEMREMAP_WC);
if (discv_regn) {
- memcpy(binary, discv_regn, adev->mman.discovery_tmr_size);
+ memcpy(binary, discv_regn, adev->discovery.size);
memunmap(discv_regn);
return 0;
}
@@ -276,7 +277,7 @@ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
u32 msg;
if (!amdgpu_sriov_vf(adev)) {
- /* It can take up to a second for IFWI init to complete on some dGPUs,
+ /* It can take up to two second for IFWI init to complete on some dGPUs,
* but generally it should be in the 60-100ms range. Normally this starts
* as soon as the device gets power so by the time the OS loads this has long
* completed. However, when a card is hotplugged via e.g., USB4, we need to
@@ -284,7 +285,7 @@ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
* continue.
*/
- for (i = 0; i < 1000; i++) {
+ for (i = 0; i < 2000; i++) {
msg = RREG32(mmMP0_SMN_C2PMSG_33);
if (msg & 0x80000000)
break;
@@ -298,10 +299,31 @@ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
else
vram_size <<= 20;
+ /*
+ * If in VRAM, discovery TMR is marked for reservation. If it is in system mem,
+ * then it is not required to be reserved.
+ */
if (sz_valid) {
- uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET;
- amdgpu_device_vram_access(adev, pos, (uint32_t *)binary,
- adev->mman.discovery_tmr_size, false);
+ if (amdgpu_sriov_vf(adev) && adev->virt.is_dynamic_crit_regn_enabled) {
+ /* For SRIOV VFs with dynamic critical region enabled,
+ * we will get the IPD binary via below call.
+ * If dynamic critical is disabled, fall through to normal seq.
+ */
+ if (amdgpu_virt_get_dynamic_data_info(adev,
+ AMD_SRIOV_MSG_IPD_TABLE_ID, binary,
+ &adev->discovery.size)) {
+ dev_err(adev->dev,
+ "failed to read discovery info from dynamic critical region.");
+ ret = -EINVAL;
+ goto exit;
+ }
+ } else {
+ uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET;
+
+ amdgpu_device_vram_access(adev, pos, (uint32_t *)binary,
+ adev->discovery.size, false);
+ adev->discovery.reserve_tmr = true;
+ }
} else {
ret = amdgpu_discovery_read_binary_from_sysmem(adev, binary);
}
@@ -310,7 +332,7 @@ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
dev_err(adev->dev,
"failed to read discovery info from memory, vram size read: %llx",
vram_size);
-
+exit:
return ret;
}
@@ -321,10 +343,12 @@ static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev,
const struct firmware *fw;
int r;
- r = request_firmware(&fw, fw_name, adev->dev);
+ r = firmware_request_nowarn(&fw, fw_name, adev->dev);
if (r) {
- dev_err(adev->dev, "can't load firmware \"%s\"\n",
- fw_name);
+ if (amdgpu_discovery == 2)
+ dev_err(adev->dev, "can't load firmware \"%s\"\n", fw_name);
+ else
+ drm_info(&adev->ddev, "Optional firmware \"%s\" was not found\n", fw_name);
return r;
}
@@ -387,6 +411,7 @@ static void amdgpu_discovery_harvest_config_quirk(struct amdgpu_device *adev)
static int amdgpu_discovery_verify_npsinfo(struct amdgpu_device *adev,
struct binary_header *bhdr)
{
+ uint8_t *discovery_bin = adev->discovery.bin;
struct table_info *info;
uint16_t checksum;
uint16_t offset;
@@ -396,14 +421,14 @@ static int amdgpu_discovery_verify_npsinfo(struct amdgpu_device *adev,
checksum = le16_to_cpu(info->checksum);
struct nps_info_header *nhdr =
- (struct nps_info_header *)(adev->mman.discovery_bin + offset);
+ (struct nps_info_header *)(discovery_bin + offset);
if (le32_to_cpu(nhdr->table_id) != NPS_INFO_TABLE_ID) {
dev_dbg(adev->dev, "invalid ip discovery nps info table id\n");
return -EINVAL;
}
- if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
+ if (!amdgpu_discovery_verify_checksum(discovery_bin + offset,
le32_to_cpu(nhdr->size_bytes),
checksum)) {
dev_dbg(adev->dev, "invalid nps info data table checksum\n");
@@ -415,8 +440,11 @@ static int amdgpu_discovery_verify_npsinfo(struct amdgpu_device *adev,
static const char *amdgpu_discovery_get_fw_name(struct amdgpu_device *adev)
{
- if (amdgpu_discovery == 2)
+ if (amdgpu_discovery == 2) {
+ /* Assume there is valid discovery TMR in VRAM even if binary is sideloaded */
+ adev->discovery.reserve_tmr = true;
return "amdgpu/ip_discovery.bin";
+ }
switch (adev->asic_type) {
case CHIP_VEGA10:
@@ -445,53 +473,53 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
{
struct table_info *info;
struct binary_header *bhdr;
+ uint8_t *discovery_bin;
const char *fw_name;
uint16_t offset;
uint16_t size;
uint16_t checksum;
int r;
- adev->mman.discovery_tmr_size = DISCOVERY_TMR_SIZE;
- adev->mman.discovery_bin = kzalloc(adev->mman.discovery_tmr_size, GFP_KERNEL);
- if (!adev->mman.discovery_bin)
+ adev->discovery.bin = kzalloc(DISCOVERY_TMR_SIZE, GFP_KERNEL);
+ if (!adev->discovery.bin)
return -ENOMEM;
+ adev->discovery.size = DISCOVERY_TMR_SIZE;
+ adev->discovery.debugfs_blob.data = adev->discovery.bin;
+ adev->discovery.debugfs_blob.size = adev->discovery.size;
+ discovery_bin = adev->discovery.bin;
/* Read from file if it is the preferred option */
fw_name = amdgpu_discovery_get_fw_name(adev);
if (fw_name != NULL) {
- dev_info(adev->dev, "use ip discovery information from file");
- r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin, fw_name);
-
- if (r) {
- dev_err(adev->dev, "failed to read ip discovery binary from file\n");
- r = -EINVAL;
+ drm_dbg(&adev->ddev, "use ip discovery information from file");
+ r = amdgpu_discovery_read_binary_from_file(adev, discovery_bin,
+ fw_name);
+ if (r)
goto out;
- }
-
} else {
- r = amdgpu_discovery_read_binary_from_mem(
- adev, adev->mman.discovery_bin);
+ drm_dbg(&adev->ddev, "use ip discovery information from memory");
+ r = amdgpu_discovery_read_binary_from_mem(adev, discovery_bin);
if (r)
goto out;
}
/* check the ip discovery binary signature */
- if (!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin)) {
+ if (!amdgpu_discovery_verify_binary_signature(discovery_bin)) {
dev_err(adev->dev,
"get invalid ip discovery binary signature\n");
r = -EINVAL;
goto out;
}
- bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ bhdr = (struct binary_header *)discovery_bin;
offset = offsetof(struct binary_header, binary_checksum) +
sizeof(bhdr->binary_checksum);
size = le16_to_cpu(bhdr->binary_size) - offset;
checksum = le16_to_cpu(bhdr->binary_checksum);
- if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
- size, checksum)) {
+ if (!amdgpu_discovery_verify_checksum(discovery_bin + offset, size,
+ checksum)) {
dev_err(adev->dev, "invalid ip discovery binary checksum\n");
r = -EINVAL;
goto out;
@@ -503,15 +531,16 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
if (offset) {
struct ip_discovery_header *ihdr =
- (struct ip_discovery_header *)(adev->mman.discovery_bin + offset);
+ (struct ip_discovery_header *)(discovery_bin + offset);
if (le32_to_cpu(ihdr->signature) != DISCOVERY_TABLE_SIGNATURE) {
dev_err(adev->dev, "invalid ip discovery data table signature\n");
r = -EINVAL;
goto out;
}
- if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
- le16_to_cpu(ihdr->size), checksum)) {
+ if (!amdgpu_discovery_verify_checksum(discovery_bin + offset,
+ le16_to_cpu(ihdr->size),
+ checksum)) {
dev_err(adev->dev, "invalid ip discovery data table checksum\n");
r = -EINVAL;
goto out;
@@ -524,7 +553,7 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
if (offset) {
struct gpu_info_header *ghdr =
- (struct gpu_info_header *)(adev->mman.discovery_bin + offset);
+ (struct gpu_info_header *)(discovery_bin + offset);
if (le32_to_cpu(ghdr->table_id) != GC_TABLE_ID) {
dev_err(adev->dev, "invalid ip discovery gc table id\n");
@@ -532,8 +561,9 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
goto out;
}
- if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
- le32_to_cpu(ghdr->size), checksum)) {
+ if (!amdgpu_discovery_verify_checksum(discovery_bin + offset,
+ le32_to_cpu(ghdr->size),
+ checksum)) {
dev_err(adev->dev, "invalid gc data table checksum\n");
r = -EINVAL;
goto out;
@@ -546,7 +576,7 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
if (offset) {
struct harvest_info_header *hhdr =
- (struct harvest_info_header *)(adev->mman.discovery_bin + offset);
+ (struct harvest_info_header *)(discovery_bin + offset);
if (le32_to_cpu(hhdr->signature) != HARVEST_TABLE_SIGNATURE) {
dev_err(adev->dev, "invalid ip discovery harvest table signature\n");
@@ -554,8 +584,9 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
goto out;
}
- if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
- sizeof(struct harvest_table), checksum)) {
+ if (!amdgpu_discovery_verify_checksum(
+ discovery_bin + offset,
+ sizeof(struct harvest_table), checksum)) {
dev_err(adev->dev, "invalid harvest data table checksum\n");
r = -EINVAL;
goto out;
@@ -568,7 +599,7 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
if (offset) {
struct vcn_info_header *vhdr =
- (struct vcn_info_header *)(adev->mman.discovery_bin + offset);
+ (struct vcn_info_header *)(discovery_bin + offset);
if (le32_to_cpu(vhdr->table_id) != VCN_INFO_TABLE_ID) {
dev_err(adev->dev, "invalid ip discovery vcn table id\n");
@@ -576,8 +607,9 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
goto out;
}
- if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
- le32_to_cpu(vhdr->size_bytes), checksum)) {
+ if (!amdgpu_discovery_verify_checksum(
+ discovery_bin + offset,
+ le32_to_cpu(vhdr->size_bytes), checksum)) {
dev_err(adev->dev, "invalid vcn data table checksum\n");
r = -EINVAL;
goto out;
@@ -590,7 +622,7 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
if (0 && offset) {
struct mall_info_header *mhdr =
- (struct mall_info_header *)(adev->mman.discovery_bin + offset);
+ (struct mall_info_header *)(discovery_bin + offset);
if (le32_to_cpu(mhdr->table_id) != MALL_INFO_TABLE_ID) {
dev_err(adev->dev, "invalid ip discovery mall table id\n");
@@ -598,8 +630,9 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
goto out;
}
- if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
- le32_to_cpu(mhdr->size_bytes), checksum)) {
+ if (!amdgpu_discovery_verify_checksum(
+ discovery_bin + offset,
+ le32_to_cpu(mhdr->size_bytes), checksum)) {
dev_err(adev->dev, "invalid mall data table checksum\n");
r = -EINVAL;
goto out;
@@ -609,8 +642,8 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
return 0;
out:
- kfree(adev->mman.discovery_bin);
- adev->mman.discovery_bin = NULL;
+ kfree(adev->discovery.bin);
+ adev->discovery.bin = NULL;
if ((amdgpu_discovery != 2) &&
(RREG32(mmIP_DISCOVERY_VERSION) == 4))
amdgpu_ras_query_boot_status(adev, 4);
@@ -622,8 +655,8 @@ static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev);
void amdgpu_discovery_fini(struct amdgpu_device *adev)
{
amdgpu_discovery_sysfs_fini(adev);
- kfree(adev->mman.discovery_bin);
- adev->mman.discovery_bin = NULL;
+ kfree(adev->discovery.bin);
+ adev->discovery.bin = NULL;
}
static int amdgpu_discovery_validate_ip(struct amdgpu_device *adev,
@@ -648,6 +681,7 @@ static int amdgpu_discovery_validate_ip(struct amdgpu_device *adev,
static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev,
uint32_t *vcn_harvest_count)
{
+ uint8_t *discovery_bin = adev->discovery.bin;
struct binary_header *bhdr;
struct ip_discovery_header *ihdr;
struct die_header *dhdr;
@@ -657,21 +691,21 @@ static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev,
uint8_t inst;
int i, j;
- bhdr = (struct binary_header *)adev->mman.discovery_bin;
- ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
- le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
+ bhdr = (struct binary_header *)discovery_bin;
+ ihdr = (struct ip_discovery_header
+ *)(discovery_bin +
+ le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
num_dies = le16_to_cpu(ihdr->num_dies);
/* scan harvest bit of all IP data structures */
for (i = 0; i < num_dies; i++) {
die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
- dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
+ dhdr = (struct die_header *)(discovery_bin + die_offset);
num_ips = le16_to_cpu(dhdr->num_ips);
ip_offset = die_offset + sizeof(*dhdr);
for (j = 0; j < num_ips; j++) {
- ip = (struct ip *)(adev->mman.discovery_bin +
- ip_offset);
+ ip = (struct ip *)(discovery_bin + ip_offset);
inst = ip->number_instance;
hw_id = le16_to_cpu(ip->hw_id);
if (amdgpu_discovery_validate_ip(adev, inst, hw_id))
@@ -713,13 +747,14 @@ static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev,
uint32_t *vcn_harvest_count,
uint32_t *umc_harvest_count)
{
+ uint8_t *discovery_bin = adev->discovery.bin;
struct binary_header *bhdr;
struct harvest_table *harvest_info;
u16 offset;
int i;
uint32_t umc_harvest_config = 0;
- bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ bhdr = (struct binary_header *)discovery_bin;
offset = le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset);
if (!offset) {
@@ -727,7 +762,7 @@ static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev,
return;
}
- harvest_info = (struct harvest_table *)(adev->mman.discovery_bin + offset);
+ harvest_info = (struct harvest_table *)(discovery_bin + offset);
for (i = 0; i < 32; i++) {
if (le16_to_cpu(harvest_info->list[i].hw_id) == 0)
@@ -1023,8 +1058,8 @@ static void ip_disc_release(struct kobject *kobj)
kobj);
struct amdgpu_device *adev = ip_top->adev;
- adev->ip_top = NULL;
kfree(ip_top);
+ adev->discovery.ip_top = NULL;
}
static uint8_t amdgpu_discovery_get_harvest_info(struct amdgpu_device *adev,
@@ -1035,7 +1070,9 @@ static uint8_t amdgpu_discovery_get_harvest_info(struct amdgpu_device *adev,
/* Until a uniform way is figured, get mask based on hwid */
switch (hw_id) {
case VCN_HWID:
- harvest = ((1 << inst) & adev->vcn.inst_mask) == 0;
+ /* VCN vs UVD+VCE */
+ if (!amdgpu_ip_version(adev, VCE_HWIP, 0))
+ harvest = ((1 << inst) & adev->vcn.inst_mask) == 0;
break;
case DMU_HWID:
if (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK)
@@ -1062,6 +1099,7 @@ static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev,
const size_t _ip_offset, const int num_ips,
bool reg_base_64)
{
+ uint8_t *discovery_bin = adev->discovery.bin;
int ii, jj, kk, res;
uint16_t hw_id;
uint8_t inst;
@@ -1079,7 +1117,7 @@ static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev,
struct ip_v4 *ip;
struct ip_hw_instance *ip_hw_instance;
- ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
+ ip = (struct ip_v4 *)(discovery_bin + ip_offset);
inst = ip->instance_number;
hw_id = le16_to_cpu(ip->hw_id);
if (amdgpu_discovery_validate_ip(adev, inst, hw_id) ||
@@ -1166,17 +1204,20 @@ next_ip:
static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev)
{
+ struct ip_discovery_top *ip_top = adev->discovery.ip_top;
+ uint8_t *discovery_bin = adev->discovery.bin;
struct binary_header *bhdr;
struct ip_discovery_header *ihdr;
struct die_header *dhdr;
- struct kset *die_kset = &adev->ip_top->die_kset;
+ struct kset *die_kset = &ip_top->die_kset;
u16 num_dies, die_offset, num_ips;
size_t ip_offset;
int ii, res;
- bhdr = (struct binary_header *)adev->mman.discovery_bin;
- ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
- le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
+ bhdr = (struct binary_header *)discovery_bin;
+ ihdr = (struct ip_discovery_header
+ *)(discovery_bin +
+ le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
num_dies = le16_to_cpu(ihdr->num_dies);
DRM_DEBUG("number of dies: %d\n", num_dies);
@@ -1185,7 +1226,7 @@ static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev)
struct ip_die_entry *ip_die_entry;
die_offset = le16_to_cpu(ihdr->die_info[ii].die_offset);
- dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
+ dhdr = (struct die_header *)(discovery_bin + die_offset);
num_ips = le16_to_cpu(dhdr->num_ips);
ip_offset = die_offset + sizeof(*dhdr);
@@ -1219,30 +1260,32 @@ static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev)
static int amdgpu_discovery_sysfs_init(struct amdgpu_device *adev)
{
+ uint8_t *discovery_bin = adev->discovery.bin;
+ struct ip_discovery_top *ip_top;
struct kset *die_kset;
int res, ii;
- if (!adev->mman.discovery_bin)
+ if (!discovery_bin)
return -EINVAL;
- adev->ip_top = kzalloc(sizeof(*adev->ip_top), GFP_KERNEL);
- if (!adev->ip_top)
+ ip_top = kzalloc(sizeof(*ip_top), GFP_KERNEL);
+ if (!ip_top)
return -ENOMEM;
- adev->ip_top->adev = adev;
-
- res = kobject_init_and_add(&adev->ip_top->kobj, &ip_discovery_ktype,
+ ip_top->adev = adev;
+ adev->discovery.ip_top = ip_top;
+ res = kobject_init_and_add(&ip_top->kobj, &ip_discovery_ktype,
&adev->dev->kobj, "ip_discovery");
if (res) {
DRM_ERROR("Couldn't init and add ip_discovery/");
goto Err;
}
- die_kset = &adev->ip_top->die_kset;
+ die_kset = &ip_top->die_kset;
kobject_set_name(&die_kset->kobj, "%s", "die");
- die_kset->kobj.parent = &adev->ip_top->kobj;
+ die_kset->kobj.parent = &ip_top->kobj;
die_kset->kobj.ktype = &die_kobj_ktype;
- res = kset_register(&adev->ip_top->die_kset);
+ res = kset_register(&ip_top->die_kset);
if (res) {
DRM_ERROR("Couldn't register die_kset");
goto Err;
@@ -1256,7 +1299,7 @@ static int amdgpu_discovery_sysfs_init(struct amdgpu_device *adev)
return res;
Err:
- kobject_put(&adev->ip_top->kobj);
+ kobject_put(&ip_top->kobj);
return res;
}
@@ -1301,10 +1344,11 @@ static void amdgpu_discovery_sysfs_die_free(struct ip_die_entry *ip_die_entry)
static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev)
{
+ struct ip_discovery_top *ip_top = adev->discovery.ip_top;
struct list_head *el, *tmp;
struct kset *die_kset;
- die_kset = &adev->ip_top->die_kset;
+ die_kset = &ip_top->die_kset;
spin_lock(&die_kset->list_lock);
list_for_each_prev_safe(el, tmp, &die_kset->list) {
list_del_init(el);
@@ -1313,8 +1357,8 @@ static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev)
spin_lock(&die_kset->list_lock);
}
spin_unlock(&die_kset->list_lock);
- kobject_put(&adev->ip_top->die_kset.kobj);
- kobject_put(&adev->ip_top->kobj);
+ kobject_put(&ip_top->die_kset.kobj);
+ kobject_put(&ip_top->kobj);
}
/* ================================================== */
@@ -1325,6 +1369,7 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
struct binary_header *bhdr;
struct ip_discovery_header *ihdr;
struct die_header *dhdr;
+ uint8_t *discovery_bin;
struct ip_v4 *ip;
uint16_t die_offset;
uint16_t ip_offset;
@@ -1338,26 +1383,25 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
int r;
r = amdgpu_discovery_init(adev);
- if (r) {
- DRM_ERROR("amdgpu_discovery_init failed\n");
+ if (r)
return r;
- }
-
+ discovery_bin = adev->discovery.bin;
wafl_ver = 0;
adev->gfx.xcc_mask = 0;
adev->sdma.sdma_mask = 0;
adev->vcn.inst_mask = 0;
adev->jpeg.inst_mask = 0;
- bhdr = (struct binary_header *)adev->mman.discovery_bin;
- ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
- le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
+ bhdr = (struct binary_header *)discovery_bin;
+ ihdr = (struct ip_discovery_header
+ *)(discovery_bin +
+ le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
num_dies = le16_to_cpu(ihdr->num_dies);
DRM_DEBUG("number of dies: %d\n", num_dies);
for (i = 0; i < num_dies; i++) {
die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
- dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
+ dhdr = (struct die_header *)(discovery_bin + die_offset);
num_ips = le16_to_cpu(dhdr->num_ips);
ip_offset = die_offset + sizeof(*dhdr);
@@ -1371,7 +1415,7 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
le16_to_cpu(dhdr->die_id), num_ips);
for (j = 0; j < num_ips; j++) {
- ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
+ ip = (struct ip_v4 *)(discovery_bin + ip_offset);
inst = ip->instance_number;
hw_id = le16_to_cpu(ip->hw_id);
@@ -1521,16 +1565,16 @@ next_ip:
static void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
{
+ uint8_t *discovery_bin = adev->discovery.bin;
struct ip_discovery_header *ihdr;
struct binary_header *bhdr;
int vcn_harvest_count = 0;
int umc_harvest_count = 0;
uint16_t offset, ihdr_ver;
- bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ bhdr = (struct binary_header *)discovery_bin;
offset = le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset);
- ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
- offset);
+ ihdr = (struct ip_discovery_header *)(discovery_bin + offset);
ihdr_ver = le16_to_cpu(ihdr->version);
/*
* Harvest table does not fit Navi1x and legacy GPUs,
@@ -1577,22 +1621,23 @@ union gc_info {
static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
{
+ uint8_t *discovery_bin = adev->discovery.bin;
struct binary_header *bhdr;
union gc_info *gc_info;
u16 offset;
- if (!adev->mman.discovery_bin) {
+ if (!discovery_bin) {
DRM_ERROR("ip discovery uninitialized\n");
return -EINVAL;
}
- bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ bhdr = (struct binary_header *)discovery_bin;
offset = le16_to_cpu(bhdr->table_list[GC].offset);
if (!offset)
return 0;
- gc_info = (union gc_info *)(adev->mman.discovery_bin + offset);
+ gc_info = (union gc_info *)(discovery_bin + offset);
switch (le16_to_cpu(gc_info->v1.header.version_major)) {
case 1:
@@ -1685,24 +1730,25 @@ union mall_info {
static int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev)
{
+ uint8_t *discovery_bin = adev->discovery.bin;
struct binary_header *bhdr;
union mall_info *mall_info;
u32 u, mall_size_per_umc, m_s_present, half_use;
u64 mall_size;
u16 offset;
- if (!adev->mman.discovery_bin) {
+ if (!discovery_bin) {
DRM_ERROR("ip discovery uninitialized\n");
return -EINVAL;
}
- bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ bhdr = (struct binary_header *)discovery_bin;
offset = le16_to_cpu(bhdr->table_list[MALL_INFO].offset);
if (!offset)
return 0;
- mall_info = (union mall_info *)(adev->mman.discovery_bin + offset);
+ mall_info = (union mall_info *)(discovery_bin + offset);
switch (le16_to_cpu(mall_info->v1.header.version_major)) {
case 1:
@@ -1741,12 +1787,13 @@ union vcn_info {
static int amdgpu_discovery_get_vcn_info(struct amdgpu_device *adev)
{
+ uint8_t *discovery_bin = adev->discovery.bin;
struct binary_header *bhdr;
union vcn_info *vcn_info;
u16 offset;
int v;
- if (!adev->mman.discovery_bin) {
+ if (!discovery_bin) {
DRM_ERROR("ip discovery uninitialized\n");
return -EINVAL;
}
@@ -1761,13 +1808,13 @@ static int amdgpu_discovery_get_vcn_info(struct amdgpu_device *adev)
return -EINVAL;
}
- bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ bhdr = (struct binary_header *)discovery_bin;
offset = le16_to_cpu(bhdr->table_list[VCN_INFO].offset);
if (!offset)
return 0;
- vcn_info = (union vcn_info *)(adev->mman.discovery_bin + offset);
+ vcn_info = (union vcn_info *)(discovery_bin + offset);
switch (le16_to_cpu(vcn_info->v1.header.version_major)) {
case 1:
@@ -1827,6 +1874,7 @@ int amdgpu_discovery_get_nps_info(struct amdgpu_device *adev,
struct amdgpu_gmc_memrange **ranges,
int *range_cnt, bool refresh)
{
+ uint8_t *discovery_bin = adev->discovery.bin;
struct amdgpu_gmc_memrange *mem_ranges;
struct binary_header *bhdr;
union nps_info *nps_info;
@@ -1843,13 +1891,13 @@ int amdgpu_discovery_get_nps_info(struct amdgpu_device *adev,
return r;
nps_info = &nps_data;
} else {
- if (!adev->mman.discovery_bin) {
+ if (!discovery_bin) {
dev_err(adev->dev,
"fetch mem range failed, ip discovery uninitialized\n");
return -EINVAL;
}
- bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ bhdr = (struct binary_header *)discovery_bin;
offset = le16_to_cpu(bhdr->table_list[NPS_INFO].offset);
if (!offset)
@@ -1859,8 +1907,7 @@ int amdgpu_discovery_get_nps_info(struct amdgpu_device *adev,
if (amdgpu_discovery_verify_npsinfo(adev, bhdr))
return -ENOENT;
- nps_info =
- (union nps_info *)(adev->mman.discovery_bin + offset);
+ nps_info = (union nps_info *)(discovery_bin + offset);
}
switch (le16_to_cpu(nps_info->v1.header.version_major)) {
@@ -2128,7 +2175,6 @@ static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(11, 0, 5):
case IP_VERSION(11, 0, 9):
case IP_VERSION(11, 0, 7):
- case IP_VERSION(11, 0, 8):
case IP_VERSION(11, 0, 11):
case IP_VERSION(11, 0, 12):
case IP_VERSION(11, 0, 13):
@@ -2136,6 +2182,10 @@ static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(11, 5, 2):
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
break;
+ case IP_VERSION(11, 0, 8):
+ if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2)
+ amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
+ break;
case IP_VERSION(12, 0, 0):
case IP_VERSION(12, 0, 1):
amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block);
@@ -2360,6 +2410,21 @@ static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_version(adev, SDMA0_HWIP, 0));
return -EINVAL;
}
+
+ return 0;
+}
+
+static int amdgpu_discovery_set_ras_ip_blocks(struct amdgpu_device *adev)
+{
+ switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
+ case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 12):
+ case IP_VERSION(13, 0, 14):
+ amdgpu_device_ip_block_add(adev, &ras_v1_0_ip_block);
+ break;
+ default:
+ break;
+ }
return 0;
}
@@ -2559,41 +2624,16 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_VEGA10:
- case CHIP_VEGA12:
- case CHIP_RAVEN:
- case CHIP_VEGA20:
- case CHIP_ARCTURUS:
- case CHIP_ALDEBARAN:
- /* this is not fatal. We have a fallback below
- * if the new firmwares are not present. some of
- * this will be overridden below to keep things
- * consistent with the current behavior.
+ /* This is not fatal. We only need the discovery
+ * binary for sysfs. We don't need it for a
+ * functional system.
*/
- r = amdgpu_discovery_reg_base_init(adev);
- if (!r) {
- amdgpu_discovery_harvest_ip(adev);
- amdgpu_discovery_get_gfx_info(adev);
- amdgpu_discovery_get_mall_info(adev);
- amdgpu_discovery_get_vcn_info(adev);
- }
- break;
- default:
- r = amdgpu_discovery_reg_base_init(adev);
- if (r)
- return -EINVAL;
-
- amdgpu_discovery_harvest_ip(adev);
- amdgpu_discovery_get_gfx_info(adev);
- amdgpu_discovery_get_mall_info(adev);
- amdgpu_discovery_get_vcn_info(adev);
- break;
- }
-
- switch (adev->asic_type) {
- case CHIP_VEGA10:
+ amdgpu_discovery_init(adev);
vega10_reg_base_init(adev);
adev->sdma.num_instances = 2;
+ adev->sdma.sdma_mask = 3;
adev->gmc.num_umc = 4;
+ adev->gfx.xcc_mask = 1;
adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 0, 0);
adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 0, 0);
adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 0);
@@ -2613,9 +2653,16 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 0);
break;
case CHIP_VEGA12:
+ /* This is not fatal. We only need the discovery
+ * binary for sysfs. We don't need it for a
+ * functional system.
+ */
+ amdgpu_discovery_init(adev);
vega10_reg_base_init(adev);
adev->sdma.num_instances = 2;
+ adev->sdma.sdma_mask = 3;
adev->gmc.num_umc = 4;
+ adev->gfx.xcc_mask = 1;
adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 3, 0);
adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 3, 0);
adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 1);
@@ -2635,10 +2682,17 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 1);
break;
case CHIP_RAVEN:
+ /* This is not fatal. We only need the discovery
+ * binary for sysfs. We don't need it for a
+ * functional system.
+ */
+ amdgpu_discovery_init(adev);
vega10_reg_base_init(adev);
adev->sdma.num_instances = 1;
+ adev->sdma.sdma_mask = 1;
adev->vcn.num_vcn_inst = 1;
adev->gmc.num_umc = 2;
+ adev->gfx.xcc_mask = 1;
if (adev->apu_flags & AMD_APU_IS_RAVEN2) {
adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 2, 0);
adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 2, 0);
@@ -2676,9 +2730,16 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
}
break;
case CHIP_VEGA20:
+ /* This is not fatal. We only need the discovery
+ * binary for sysfs. We don't need it for a
+ * functional system.
+ */
+ amdgpu_discovery_init(adev);
vega20_reg_base_init(adev);
adev->sdma.num_instances = 2;
+ adev->sdma.sdma_mask = 3;
adev->gmc.num_umc = 8;
+ adev->gfx.xcc_mask = 1;
adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 0);
adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 0);
adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 0);
@@ -2699,10 +2760,17 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 1, 0);
break;
case CHIP_ARCTURUS:
+ /* This is not fatal. We only need the discovery
+ * binary for sysfs. We don't need it for a
+ * functional system.
+ */
+ amdgpu_discovery_init(adev);
arct_reg_base_init(adev);
adev->sdma.num_instances = 8;
+ adev->sdma.sdma_mask = 0xff;
adev->vcn.num_vcn_inst = 2;
adev->gmc.num_umc = 8;
+ adev->gfx.xcc_mask = 1;
adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 1);
adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 1);
adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 1);
@@ -2727,10 +2795,17 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 5, 0);
break;
case CHIP_ALDEBARAN:
+ /* This is not fatal. We only need the discovery
+ * binary for sysfs. We don't need it for a
+ * functional system.
+ */
+ amdgpu_discovery_init(adev);
aldebaran_reg_base_init(adev);
adev->sdma.num_instances = 5;
+ adev->sdma.sdma_mask = 0x1f;
adev->vcn.num_vcn_inst = 2;
adev->gmc.num_umc = 4;
+ adev->gfx.xcc_mask = 1;
adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 2);
adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 2);
adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 4, 0);
@@ -2752,7 +2827,49 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 6, 0);
adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0);
break;
+ case CHIP_CYAN_SKILLFISH:
+ if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) {
+ r = amdgpu_discovery_reg_base_init(adev);
+ if (r)
+ return -EINVAL;
+
+ amdgpu_discovery_harvest_ip(adev);
+ amdgpu_discovery_get_gfx_info(adev);
+ amdgpu_discovery_get_mall_info(adev);
+ amdgpu_discovery_get_vcn_info(adev);
+ } else {
+ cyan_skillfish_reg_base_init(adev);
+ adev->sdma.num_instances = 2;
+ adev->sdma.sdma_mask = 3;
+ adev->gfx.xcc_mask = 1;
+ adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(2, 0, 3);
+ adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(2, 0, 3);
+ adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(5, 0, 1);
+ adev->ip_versions[HDP_HWIP][0] = IP_VERSION(5, 0, 1);
+ adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(5, 0, 1);
+ adev->ip_versions[SDMA1_HWIP][1] = IP_VERSION(5, 0, 1);
+ adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 5, 0);
+ adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(2, 1, 1);
+ adev->ip_versions[UMC_HWIP][0] = IP_VERSION(8, 1, 1);
+ adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 8);
+ adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 8);
+ adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 1);
+ adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 8);
+ adev->ip_versions[GC_HWIP][0] = IP_VERSION(10, 1, 3);
+ adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 0, 3);
+ }
+ break;
default:
+ r = amdgpu_discovery_reg_base_init(adev);
+ if (r) {
+ drm_err(&adev->ddev, "discovery failed: %d\n", r);
+ return r;
+ }
+
+ amdgpu_discovery_harvest_ip(adev);
+ amdgpu_discovery_get_gfx_info(adev);
+ amdgpu_discovery_get_mall_info(adev);
+ amdgpu_discovery_get_vcn_info(adev);
break;
}
@@ -3088,6 +3205,10 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
if (r)
return r;
+ r = amdgpu_discovery_set_ras_ip_blocks(adev);
+ if (r)
+ return r;
+
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
!amdgpu_sriov_vf(adev)) ||
(adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO && amdgpu_dpm == 1)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
index b44d56465c5b..4ce04486cc31 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
@@ -24,9 +24,21 @@
#ifndef __AMDGPU_DISCOVERY__
#define __AMDGPU_DISCOVERY__
+#include <linux/debugfs.h>
+
#define DISCOVERY_TMR_SIZE (10 << 10)
#define DISCOVERY_TMR_OFFSET (64 << 10)
+struct ip_discovery_top;
+
+struct amdgpu_discovery_info {
+ struct debugfs_blob_wrapper debugfs_blob;
+ struct ip_discovery_top *ip_top;
+ uint32_t size;
+ uint8_t *bin;
+ bool reserve_tmr;
+};
+
void amdgpu_discovery_fini(struct amdgpu_device *adev);
int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 35c778426a7c..b5d34797d606 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -332,8 +332,6 @@ int amdgpu_display_crtc_set_config(struct drm_mode_set *set,
if (crtc->enabled)
active = true;
- pm_runtime_mark_last_busy(dev->dev);
-
adev = drm_to_adev(dev);
/* if we have active crtcs and we don't have a power ref,
* take the current one
@@ -1196,13 +1194,14 @@ static int amdgpu_display_get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb
static int amdgpu_display_gem_fb_verify_and_init(struct drm_device *dev,
struct amdgpu_framebuffer *rfb,
struct drm_file *file_priv,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj)
{
int ret;
rfb->base.obj[0] = obj;
- drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, &rfb->base, info, mode_cmd);
/* Verify that the modifier is supported. */
if (!drm_any_plane_has_format(dev, mode_cmd->pixel_format,
mode_cmd->modifier[0])) {
@@ -1297,6 +1296,7 @@ static int amdgpu_display_framebuffer_init(struct drm_device *dev,
struct drm_framebuffer *
amdgpu_display_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct amdgpu_framebuffer *amdgpu_fb;
@@ -1317,7 +1317,7 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev,
/* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */
bo = gem_to_amdgpu_bo(obj);
domains = amdgpu_display_supported_domains(drm_to_adev(dev), bo->flags);
- if (obj->import_attach && !(domains & AMDGPU_GEM_DOMAIN_GTT)) {
+ if (drm_gem_is_imported(obj) && !(domains & AMDGPU_GEM_DOMAIN_GTT)) {
drm_dbg_kms(dev, "Cannot create framebuffer from imported dma_buf\n");
drm_gem_object_put(obj);
return ERR_PTR(-EINVAL);
@@ -1330,7 +1330,7 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev,
}
ret = amdgpu_display_gem_fb_verify_and_init(dev, amdgpu_fb, file_priv,
- mode_cmd, obj);
+ info, mode_cmd, obj);
if (ret) {
kfree(amdgpu_fb);
drm_gem_object_put(obj);
@@ -1363,6 +1363,64 @@ static const struct drm_prop_enum_list amdgpu_dither_enum_list[] = {
{ AMDGPU_FMT_DITHER_ENABLE, "on" },
};
+/**
+ * DOC: property for adaptive backlight modulation
+ *
+ * The 'adaptive backlight modulation' property is used for the compositor to
+ * directly control the adaptive backlight modulation power savings feature
+ * that is part of DCN hardware.
+ *
+ * The property will be attached specifically to eDP panels that support it.
+ *
+ * The property is by default set to 'sysfs' to allow the sysfs file 'panel_power_savings'
+ * to be able to control it.
+ * If set to 'off' the compositor will ensure it stays off.
+ * The other values 'min', 'bias min', 'bias max', and 'max' will control the
+ * intensity of the power savings.
+ *
+ * Modifying this value can have implications on color accuracy, so tread
+ * carefully.
+ */
+static int amdgpu_display_setup_abm_prop(struct amdgpu_device *adev)
+{
+ const struct drm_prop_enum_list props[] = {
+ { ABM_SYSFS_CONTROL, "sysfs" },
+ { ABM_LEVEL_OFF, "off" },
+ { ABM_LEVEL_MIN, "min" },
+ { ABM_LEVEL_BIAS_MIN, "bias min" },
+ { ABM_LEVEL_BIAS_MAX, "bias max" },
+ { ABM_LEVEL_MAX, "max" },
+ };
+ struct drm_property *prop;
+ int i;
+
+ if (!adev->dc_enabled)
+ return 0;
+
+ prop = drm_property_create(adev_to_drm(adev), DRM_MODE_PROP_ENUM,
+ "adaptive backlight modulation",
+ 6);
+ if (!prop)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(props); i++) {
+ int ret;
+
+ ret = drm_property_add_enum(prop, props[i].type,
+ props[i].name);
+
+ if (ret) {
+ drm_property_destroy(adev_to_drm(adev), prop);
+
+ return ret;
+ }
+ }
+
+ adev->mode_info.abm_level_property = prop;
+
+ return 0;
+}
+
int amdgpu_display_modeset_create_props(struct amdgpu_device *adev)
{
int sz;
@@ -1409,7 +1467,7 @@ int amdgpu_display_modeset_create_props(struct amdgpu_device *adev)
"dither",
amdgpu_dither_enum_list, sz);
- return 0;
+ return amdgpu_display_setup_abm_prop(adev);
}
void amdgpu_display_update_priority(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
index dfa0d642ac16..49a29bf47a37 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
@@ -44,6 +44,7 @@ uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
struct drm_framebuffer *
amdgpu_display_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd);
const struct drm_format_info *
amdgpu_lookup_format_info(u32 format, uint64_t modifier);
@@ -54,4 +55,11 @@ int amdgpu_display_resume_helper(struct amdgpu_device *adev);
int amdgpu_display_get_scanout_buffer(struct drm_plane *plane,
struct drm_scanout_buffer *sb);
+#define ABM_SYSFS_CONTROL -1
+#define ABM_LEVEL_OFF 0
+#define ABM_LEVEL_MIN 1
+#define ABM_LEVEL_BIAS_MIN 2
+#define ABM_LEVEL_BIAS_MAX 3
+#define ABM_LEVEL_MAX 4
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index 44e120f9f764..e22cfa7c6d32 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -81,13 +81,44 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
struct drm_gem_object *obj = dmabuf->priv;
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ int r;
+
+ /*
+ * Disable peer-to-peer access for DCC-enabled VRAM surfaces on GFX12+.
+ * Such buffers cannot be safely accessed over P2P due to device-local
+ * compression metadata. Fallback to system-memory path instead.
+ * Device supports GFX12 (GC 12.x or newer)
+ * BO was created with the AMDGPU_GEM_CREATE_GFX12_DCC flag
+ *
+ */
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(12, 0, 0) &&
+ bo->flags & AMDGPU_GEM_CREATE_GFX12_DCC)
+ attach->peer2peer = false;
+
+ /*
+ * Disable peer-to-peer access for DCC-enabled VRAM surfaces on GFX12+.
+ * Such buffers cannot be safely accessed over P2P due to device-local
+ * compression metadata. Fallback to system-memory path instead.
+ * Device supports GFX12 (GC 12.x or newer)
+ * BO was created with the AMDGPU_GEM_CREATE_GFX12_DCC flag
+ *
+ */
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(12, 0, 0) &&
+ bo->flags & AMDGPU_GEM_CREATE_GFX12_DCC)
+ attach->peer2peer = false;
if (!amdgpu_dmabuf_is_xgmi_accessible(attach_adev, bo) &&
pci_p2pdma_distance(adev->pdev, attach->dev, false) < 0)
attach->peer2peer = false;
+ r = dma_resv_lock(bo->tbo.base.resv, NULL);
+ if (r)
+ return r;
+
amdgpu_vm_bo_update_shared(bo);
+ dma_resv_unlock(bo->tbo.base.resv);
+
return 0;
}
@@ -285,6 +316,36 @@ static int amdgpu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
return ret;
}
+static int amdgpu_dma_buf_vmap(struct dma_buf *dma_buf, struct iosys_map *map)
+{
+ struct drm_gem_object *obj = dma_buf->priv;
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+ int ret;
+
+ /*
+ * Pin to keep buffer in place while it's vmap'ed. The actual
+ * domain is not that important as long as it's mapable. Using
+ * GTT and VRAM should be compatible with most use cases.
+ */
+ ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT | AMDGPU_GEM_DOMAIN_VRAM);
+ if (ret)
+ return ret;
+ ret = drm_gem_dmabuf_vmap(dma_buf, map);
+ if (ret)
+ amdgpu_bo_unpin(bo);
+
+ return ret;
+}
+
+static void amdgpu_dma_buf_vunmap(struct dma_buf *dma_buf, struct iosys_map *map)
+{
+ struct drm_gem_object *obj = dma_buf->priv;
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+
+ drm_gem_dmabuf_vunmap(dma_buf, map);
+ amdgpu_bo_unpin(bo);
+}
+
const struct dma_buf_ops amdgpu_dmabuf_ops = {
.attach = amdgpu_dma_buf_attach,
.pin = amdgpu_dma_buf_pin,
@@ -294,8 +355,8 @@ const struct dma_buf_ops amdgpu_dmabuf_ops = {
.release = drm_gem_dmabuf_release,
.begin_cpu_access = amdgpu_dma_buf_begin_cpu_access,
.mmap = drm_gem_dmabuf_mmap,
- .vmap = drm_gem_dmabuf_vmap,
- .vunmap = drm_gem_dmabuf_vunmap,
+ .vmap = amdgpu_dma_buf_vmap,
+ .vunmap = amdgpu_dma_buf_vunmap,
};
/**
@@ -313,11 +374,23 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_gem_object *gobj,
{
struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
struct dma_buf *buf;
+ struct ttm_operation_ctx ctx = {
+ .interruptible = true,
+ .no_wait_gpu = true,
+ /* We opt to avoid OOM on system pages allocations */
+ .gfp_retry_mayfail = true,
+ .allow_res_evict = false,
+ };
+ int ret;
if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
bo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
return ERR_PTR(-EPERM);
+ ret = ttm_bo_setup_export(&bo->tbo, &ctx);
+ if (ret)
+ return ERR_PTR(ret);
+
buf = drm_gem_prime_export(gobj, flags);
if (!IS_ERR(buf))
buf->ops = &amdgpu_dmabuf_ops;
@@ -513,7 +586,7 @@ bool amdgpu_dmabuf_is_xgmi_accessible(struct amdgpu_device *adev,
if (!adev)
return false;
- if (obj->import_attach) {
+ if (drm_gem_is_imported(obj)) {
struct dma_buf *dma_buf = obj->import_attach->dmabuf;
if (dma_buf->ops != &amdgpu_dmabuf_ops)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c
index 3f3662e8b871..3040437d99c2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c
@@ -41,7 +41,8 @@ u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
if (index < adev->doorbell.num_kernel_doorbells)
return readl(adev->doorbell.cpu_addr + index);
- DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
+ dev_err(adev->dev, "reading beyond doorbell aperture: 0x%08x!\n",
+ index);
return 0;
}
@@ -63,7 +64,8 @@ void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
if (index < adev->doorbell.num_kernel_doorbells)
writel(v, adev->doorbell.cpu_addr + index);
else
- DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
+ dev_err(adev->dev,
+ "writing beyond doorbell aperture: 0x%08x!\n", index);
}
/**
@@ -83,7 +85,8 @@ u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
if (index < adev->doorbell.num_kernel_doorbells)
return atomic64_read((atomic64_t *)(adev->doorbell.cpu_addr + index));
- DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
+ dev_err(adev->dev, "reading beyond doorbell aperture: 0x%08x!\n",
+ index);
return 0;
}
@@ -105,7 +108,8 @@ void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
if (index < adev->doorbell.num_kernel_doorbells)
atomic64_set((atomic64_t *)(adev->doorbell.cpu_addr + index), v);
else
- DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
+ dev_err(adev->dev,
+ "writing beyond doorbell aperture: 0x%08x!\n", index);
}
/**
@@ -166,7 +170,8 @@ int amdgpu_doorbell_create_kernel_doorbells(struct amdgpu_device *adev)
NULL,
(void **)&adev->doorbell.cpu_addr);
if (r) {
- DRM_ERROR("Failed to allocate kernel doorbells, err=%d\n", r);
+ dev_err(adev->dev,
+ "Failed to allocate kernel doorbells, err=%d\n", r);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 4db92e0a60da..2dfbddcef9ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -144,6 +144,8 @@ enum AMDGPU_DEBUG_MASK {
AMDGPU_DEBUG_DISABLE_GPU_RING_RESET = BIT(6),
AMDGPU_DEBUG_SMU_POOL = BIT(7),
AMDGPU_DEBUG_VM_USERPTR = BIT(8),
+ AMDGPU_DEBUG_DISABLE_RAS_CE_LOG = BIT(9),
+ AMDGPU_DEBUG_ENABLE_CE_CS = BIT(10)
};
unsigned int amdgpu_vram_limit = UINT_MAX;
@@ -310,7 +312,7 @@ module_param_named(moverate, amdgpu_moverate, int, 0600);
* DOC: audio (int)
* Set HDMI/DPAudio. Only affects non-DC display handling. The default is -1 (Enabled), set 0 to disabled it.
*/
-MODULE_PARM_DESC(audio, "Audio enable (-1 = auto, 0 = disable, 1 = enable)");
+MODULE_PARM_DESC(audio, "HDMI/DP Audio enable for non DC displays (-1 = auto, 0 = disable, 1 = enable)");
module_param_named(audio, amdgpu_audio, int, 0444);
/**
@@ -352,22 +354,16 @@ module_param_named(svm_default_granularity, amdgpu_svm_default_granularity, uint
* DOC: lockup_timeout (string)
* Set GPU scheduler timeout value in ms.
*
- * The format can be [Non-Compute] or [GFX,Compute,SDMA,Video]. That is there can be one or
- * multiple values specified. 0 and negative values are invalidated. They will be adjusted
- * to the default timeout.
+ * The format can be [single value] for setting all timeouts at once or
+ * [GFX,Compute,SDMA,Video] to set individual timeouts.
+ * Negative values mean infinity.
*
- * - With one value specified, the setting will apply to all non-compute jobs.
- * - With multiple values specified, the first one will be for GFX.
- * The second one is for Compute. The third and fourth ones are
- * for SDMA and Video.
- *
- * By default(with no lockup_timeout settings), the timeout for all non-compute(GFX, SDMA and Video)
- * jobs is 10000. The timeout for compute is 60000.
+ * By default(with no lockup_timeout settings), the timeout for all queues is 2000.
*/
-MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default: for bare metal 10000 for non-compute jobs and 60000 for compute jobs; "
- "for passthrough or sriov, 10000 for all jobs. 0: keep default value. negative: infinity timeout), format: for bare metal [Non-Compute] or [GFX,Compute,SDMA,Video]; "
- "for passthrough or sriov [all jobs] or [GFX,Compute,SDMA,Video].");
-module_param_string(lockup_timeout, amdgpu_lockup_timeout, sizeof(amdgpu_lockup_timeout), 0444);
+MODULE_PARM_DESC(lockup_timeout,
+ "GPU lockup timeout in ms (default: 2000. 0: keep default value. negative: infinity timeout), format: [single value for all] or [GFX,Compute,SDMA,Video].");
+module_param_string(lockup_timeout, amdgpu_lockup_timeout,
+ sizeof(amdgpu_lockup_timeout), 0444);
/**
* DOC: dpm (int)
@@ -622,39 +618,39 @@ module_param_named(timeout_period, amdgpu_watchdog_timer.period, uint, 0644);
/**
* DOC: si_support (int)
- * Set SI support driver. This parameter works after set config CONFIG_DRM_AMDGPU_SI. For SI asic, when radeon driver is enabled,
- * set value 0 to use radeon driver, while set value 1 to use amdgpu driver. The default is using radeon driver when it available,
- * otherwise using amdgpu driver.
- */
+ * 1 = enabled, 0 = disabled, -1 = default
+ *
+ * SI (Southern Islands) are first generation GCN GPUs, supported by both
+ * drivers: radeon (old) and amdgpu (new). This parameter controls whether
+ * amdgpu should support SI.
+ * By default, SI dedicated GPUs are supported by amdgpu.
+ * Only relevant when CONFIG_DRM_AMDGPU_SI is enabled to build SI support in amdgpu.
+ * See also radeon.si_support which should be disabled when amdgpu.si_support is
+ * enabled, and vice versa.
+ */
+int amdgpu_si_support = -1;
#ifdef CONFIG_DRM_AMDGPU_SI
-
-#if IS_ENABLED(CONFIG_DRM_RADEON) || IS_ENABLED(CONFIG_DRM_RADEON_MODULE)
-int amdgpu_si_support;
-MODULE_PARM_DESC(si_support, "SI support (1 = enabled, 0 = disabled (default))");
-#else
-int amdgpu_si_support = 1;
-MODULE_PARM_DESC(si_support, "SI support (1 = enabled (default), 0 = disabled)");
-#endif
-
+MODULE_PARM_DESC(si_support, "SI support (1 = enabled, 0 = disabled, -1 = default)");
module_param_named(si_support, amdgpu_si_support, int, 0444);
#endif
/**
* DOC: cik_support (int)
- * Set CIK support driver. This parameter works after set config CONFIG_DRM_AMDGPU_CIK. For CIK asic, when radeon driver is enabled,
- * set value 0 to use radeon driver, while set value 1 to use amdgpu driver. The default is using radeon driver when it available,
- * otherwise using amdgpu driver.
- */
+ * 1 = enabled, 0 = disabled, -1 = default
+ *
+ * CIK (Sea Islands) are second generation GCN GPUs, supported by both
+ * drivers: radeon (old) and amdgpu (new). This parameter controls whether
+ * amdgpu should support CIK.
+ * By default:
+ * - CIK dedicated GPUs are supported by amdgpu.
+ * - CIK APUs are supported by radeon (except when radeon is not built).
+ * Only relevant when CONFIG_DRM_AMDGPU_CIK is enabled to build CIK support in amdgpu.
+ * See also radeon.cik_support which should be disabled when amdgpu.cik_support is
+ * enabled, and vice versa.
+ */
+int amdgpu_cik_support = -1;
#ifdef CONFIG_DRM_AMDGPU_CIK
-
-#if IS_ENABLED(CONFIG_DRM_RADEON) || IS_ENABLED(CONFIG_DRM_RADEON_MODULE)
-int amdgpu_cik_support;
-MODULE_PARM_DESC(cik_support, "CIK support (1 = enabled, 0 = disabled (default))");
-#else
-int amdgpu_cik_support = 1;
-MODULE_PARM_DESC(cik_support, "CIK support (1 = enabled (default), 0 = disabled)");
-#endif
-
+MODULE_PARM_DESC(cik_support, "CIK support (1 = enabled, 0 = disabled, -1 = default)");
module_param_named(cik_support, amdgpu_cik_support, int, 0444);
#endif
@@ -885,7 +881,7 @@ module_param_named(dcfeaturemask, amdgpu_dc_feature_mask, uint, 0444);
/**
* DOC: dcdebugmask (uint)
- * Override display features enabled. See enum DC_DEBUG_MASK in drivers/gpu/drm/amd/include/amd_shared.h.
+ * Display debug options. See enum DC_DEBUG_MASK in drivers/gpu/drm/amd/include/amd_shared.h.
*/
MODULE_PARM_DESC(dcdebugmask, "all debug options disabled (default))");
module_param_named(dcdebugmask, amdgpu_dc_debug_mask, uint, 0444);
@@ -959,7 +955,7 @@ module_param_named(tmz, amdgpu_tmz, int, 0444);
*/
MODULE_PARM_DESC(
freesync_video,
- "Enable freesync modesetting optimization feature (0 = off (default), 1 = on)");
+ "Adds additional modes via VRR for refresh changes without a full modeset (0 = off (default), 1 = on)");
module_param_named(freesync_video, amdgpu_freesync_vid_mode, uint, 0444);
/**
@@ -2171,6 +2167,11 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x7410, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN},
/* CYAN_SKILLFISH */
+ {0x1002, 0x13DB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU},
+ {0x1002, 0x13F9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU},
+ {0x1002, 0x13FA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU},
+ {0x1002, 0x13FB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU},
+ {0x1002, 0x13FC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU},
{0x1002, 0x13FE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU},
{0x1002, 0x143F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU},
@@ -2227,7 +2228,6 @@ static void amdgpu_get_secondary_funcs(struct amdgpu_device *adev)
adev->pdev->bus->number, i);
if (p) {
pm_runtime_get_sync(&p->dev);
- pm_runtime_mark_last_busy(&p->dev);
pm_runtime_put_autosuspend(&p->dev);
pci_dev_put(p);
}
@@ -2278,6 +2278,16 @@ static void amdgpu_init_debug_options(struct amdgpu_device *adev)
pr_info("debug: VM mode debug for userptr is enabled\n");
adev->debug_vm_userptr = true;
}
+
+ if (amdgpu_debug_mask & AMDGPU_DEBUG_DISABLE_RAS_CE_LOG) {
+ pr_info("debug: disable kernel logs of correctable errors\n");
+ adev->debug_disable_ce_logs = true;
+ }
+
+ if (amdgpu_debug_mask & AMDGPU_DEBUG_ENABLE_CE_CS) {
+ pr_info("debug: allowing command submission to CE engine\n");
+ adev->debug_enable_ce_cs = true;
+ }
}
static unsigned long amdgpu_fix_asic_type(struct pci_dev *pdev, unsigned long flags)
@@ -2296,6 +2306,72 @@ static unsigned long amdgpu_fix_asic_type(struct pci_dev *pdev, unsigned long fl
return flags;
}
+static bool amdgpu_support_enabled(struct device *dev,
+ const enum amd_asic_type family)
+{
+ const char *gen;
+ const char *param;
+ int module_param = -1;
+ bool radeon_support_built = IS_ENABLED(CONFIG_DRM_RADEON);
+ bool amdgpu_support_built = false;
+ bool support_by_default = false;
+
+ switch (family) {
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ case CHIP_VERDE:
+ case CHIP_OLAND:
+ case CHIP_HAINAN:
+ gen = "SI";
+ param = "si_support";
+ module_param = amdgpu_si_support;
+ amdgpu_support_built = IS_ENABLED(CONFIG_DRM_AMDGPU_SI);
+ support_by_default = true;
+ break;
+
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ support_by_default = true;
+ fallthrough;
+ case CHIP_KAVERI:
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+ gen = "CIK";
+ param = "cik_support";
+ module_param = amdgpu_cik_support;
+ amdgpu_support_built = IS_ENABLED(CONFIG_DRM_AMDGPU_CIK);
+ break;
+
+ default:
+ /* All other chips are supported by amdgpu only */
+ return true;
+ }
+
+ if (!amdgpu_support_built) {
+ dev_info(dev, "amdgpu built without %s support\n", gen);
+ return false;
+ }
+
+ if ((module_param == -1 && (support_by_default || !radeon_support_built)) ||
+ module_param == 1) {
+ if (radeon_support_built)
+ dev_info(dev, "%s support provided by amdgpu.\n"
+ "Use radeon.%s=1 amdgpu.%s=0 to override.\n",
+ gen, param, param);
+
+ return true;
+ }
+
+ if (radeon_support_built)
+ dev_info(dev, "%s support provided by radeon.\n"
+ "Use radeon.%s=0 amdgpu.%s=1 to override.\n",
+ gen, param, param);
+ else if (module_param == 0)
+ dev_info(dev, "%s support disabled by module param\n", gen);
+
+ return false;
+}
+
static int amdgpu_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -2321,7 +2397,7 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
amdgpu_aspm = 0;
if (amdgpu_virtual_display ||
- amdgpu_device_asic_has_dc_support(flags & AMD_ASIC_MASK))
+ amdgpu_device_asic_has_dc_support(pdev, flags & AMD_ASIC_MASK))
supports_atomic = true;
if ((flags & AMD_EXP_HW_SUPPORT) && !amdgpu_exp_hw_support) {
@@ -2343,48 +2419,8 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
return -ENOTSUPP;
}
- switch (flags & AMD_ASIC_MASK) {
- case CHIP_TAHITI:
- case CHIP_PITCAIRN:
- case CHIP_VERDE:
- case CHIP_OLAND:
- case CHIP_HAINAN:
-#ifdef CONFIG_DRM_AMDGPU_SI
- if (!amdgpu_si_support) {
- dev_info(&pdev->dev,
- "SI support provided by radeon.\n");
- dev_info(&pdev->dev,
- "Use radeon.si_support=0 amdgpu.si_support=1 to override.\n"
- );
- return -ENODEV;
- }
- break;
-#else
- dev_info(&pdev->dev, "amdgpu is built without SI support.\n");
- return -ENODEV;
-#endif
- case CHIP_KAVERI:
- case CHIP_BONAIRE:
- case CHIP_HAWAII:
- case CHIP_KABINI:
- case CHIP_MULLINS:
-#ifdef CONFIG_DRM_AMDGPU_CIK
- if (!amdgpu_cik_support) {
- dev_info(&pdev->dev,
- "CIK support provided by radeon.\n");
- dev_info(&pdev->dev,
- "Use radeon.cik_support=0 amdgpu.cik_support=1 to override.\n"
- );
- return -ENODEV;
- }
- break;
-#else
- dev_info(&pdev->dev, "amdgpu is built without CIK support.\n");
+ if (!amdgpu_support_enabled(&pdev->dev, flags & AMD_ASIC_MASK))
return -ENODEV;
-#endif
- default:
- break;
- }
adev = devm_drm_dev_alloc(&pdev->dev, &amdgpu_kms_driver, typeof(*adev), ddev);
if (IS_ERR(adev))
@@ -2451,10 +2487,10 @@ retry_init:
if (adev->pm.rpm_mode != AMDGPU_RUNPM_NONE) {
/* only need to skip on ATPX */
- if (amdgpu_device_supports_px(ddev))
+ if (amdgpu_device_supports_px(adev))
dev_pm_set_driver_flags(ddev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
/* we want direct complete for BOCO */
- if (amdgpu_device_supports_boco(ddev))
+ if (amdgpu_device_supports_boco(adev))
dev_pm_set_driver_flags(ddev->dev, DPM_FLAG_SMART_PREPARE |
DPM_FLAG_SMART_SUSPEND |
DPM_FLAG_MAY_SKIP_RESUME);
@@ -2463,7 +2499,6 @@ retry_init:
pm_runtime_allow(ddev->dev);
- pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
pci_wake_from_d3(pdev, TRUE);
@@ -2487,9 +2522,9 @@ retry_init:
* into D0 state. Then there will be a PMFW-aware D-state
* transition(D0->D3) on runpm suspend.
*/
- if (amdgpu_device_supports_baco(ddev) &&
+ if (amdgpu_device_supports_baco(adev) &&
!(adev->flags & AMD_IS_APU) &&
- (adev->asic_type >= CHIP_NAVI10))
+ adev->asic_type >= CHIP_NAVI10)
amdgpu_get_secondary_funcs(adev);
}
@@ -2506,6 +2541,7 @@ amdgpu_pci_remove(struct pci_dev *pdev)
struct drm_device *dev = pci_get_drvdata(pdev);
struct amdgpu_device *adev = drm_to_adev(dev);
+ amdgpu_ras_eeprom_check_and_recover(adev);
amdgpu_xcp_dev_unplug(adev);
amdgpu_gmc_prepare_nps_mode_change(adev);
drm_dev_unplug(dev);
@@ -2535,6 +2571,10 @@ amdgpu_pci_shutdown(struct pci_dev *pdev)
if (amdgpu_ras_intr_triggered())
return;
+ /* device maybe not resumed here, return immediately in this case */
+ if (adev->in_s4 && adev->in_suspend)
+ return;
+
/* if we are running in a VM, make sure the device
* torn down properly on reboot/shutdown.
* unfortunately we can't detect certain
@@ -2542,7 +2582,8 @@ amdgpu_pci_shutdown(struct pci_dev *pdev)
*/
if (!amdgpu_passthrough(adev))
adev->mp1_state = PP_MP1_STATE_UNLOAD;
- amdgpu_device_ip_suspend(adev);
+ amdgpu_device_prepare(dev);
+ amdgpu_device_suspend(dev, true);
adev->mp1_state = PP_MP1_STATE_NONE;
}
@@ -2551,11 +2592,14 @@ static int amdgpu_pmops_prepare(struct device *dev)
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(drm_dev);
+ /* device maybe not resumed here, return immediately in this case */
+ if (adev->in_s4 && adev->in_suspend)
+ return 0;
+
/* Return a positive number here so
* DPM_FLAG_SMART_SUSPEND works properly
*/
- if (amdgpu_device_supports_boco(drm_dev) &&
- pm_runtime_suspended(dev))
+ if (amdgpu_device_supports_boco(adev) && pm_runtime_suspended(dev))
return 1;
/* if we will not support s3 or s2i for the device
@@ -2570,7 +2614,7 @@ static int amdgpu_pmops_prepare(struct device *dev)
static void amdgpu_pmops_complete(struct device *dev)
{
- /* nothing to do */
+ amdgpu_device_complete(dev_get_drvdata(dev));
}
static int amdgpu_pmops_suspend(struct device *dev)
@@ -2583,6 +2627,7 @@ static int amdgpu_pmops_suspend(struct device *dev)
else if (amdgpu_acpi_is_s3_active(adev))
adev->in_s3 = true;
if (!adev->in_s0ix && !adev->in_s3) {
+#if IS_ENABLED(CONFIG_SUSPEND)
/* don't allow going deep first time followed by s2idle the next time */
if (adev->last_suspend_state != PM_SUSPEND_ON &&
adev->last_suspend_state != pm_suspend_target_state) {
@@ -2590,11 +2635,14 @@ static int amdgpu_pmops_suspend(struct device *dev)
pm_suspend_target_state);
return -EINVAL;
}
+#endif
return 0;
}
+#if IS_ENABLED(CONFIG_SUSPEND)
/* cache the state last used for suspend */
adev->last_suspend_state = pm_suspend_target_state;
+#endif
return amdgpu_device_suspend(drm_dev, true);
}
@@ -2603,9 +2651,14 @@ static int amdgpu_pmops_suspend_noirq(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(drm_dev);
+ int r;
- if (amdgpu_acpi_should_gpu_reset(adev))
- return amdgpu_asic_reset(adev);
+ if (amdgpu_acpi_should_gpu_reset(adev)) {
+ amdgpu_device_lock_reset_domain(adev->reset_domain);
+ r = amdgpu_asic_reset(adev);
+ amdgpu_device_unlock_reset_domain(adev->reset_domain);
+ return r;
+ }
return 0;
}
@@ -2650,12 +2703,21 @@ static int amdgpu_pmops_thaw(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
+ /* do not resume device if it's normal hibernation */
+ if (!pm_hibernate_is_recovering() && !pm_hibernation_mode_is_suspend())
+ return 0;
+
return amdgpu_device_resume(drm_dev, true);
}
static int amdgpu_pmops_poweroff(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(drm_dev);
+
+ /* device maybe not resumed here, return immediately in this case */
+ if (adev->in_s4 && adev->in_suspend)
+ return 0;
return amdgpu_device_suspend(drm_dev, true);
}
@@ -2739,22 +2801,8 @@ static int amdgpu_runtime_idle_check_userq(struct device *dev)
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
struct amdgpu_device *adev = drm_to_adev(drm_dev);
- struct amdgpu_usermode_queue *queue;
- struct amdgpu_userq_mgr *uqm, *tmp;
- int queue_id;
- int ret = 0;
-
- mutex_lock(&adev->userq_mutex);
- list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
- idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
- ret = -EBUSY;
- goto done;
- }
- }
-done:
- mutex_unlock(&adev->userq_mutex);
- return ret;
+ return xa_empty(&adev->userq_doorbell_xa) ? 0 : -EBUSY;
}
static int amdgpu_pmops_runtime_suspend(struct device *dev)
@@ -2828,7 +2876,7 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
/* nothing to do */
} else if ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) ||
(adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)) {
- amdgpu_device_baco_enter(drm_dev);
+ amdgpu_device_baco_enter(adev);
}
dev_dbg(&pdev->dev, "asic/device is runtime suspended\n");
@@ -2869,7 +2917,7 @@ static int amdgpu_pmops_runtime_resume(struct device *dev)
pci_set_master(pdev);
} else if ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) ||
(adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)) {
- amdgpu_device_baco_exit(drm_dev);
+ amdgpu_device_baco_exit(adev);
}
ret = amdgpu_device_resume(drm_dev, false);
if (ret) {
@@ -2901,7 +2949,6 @@ static int amdgpu_pmops_runtime_idle(struct device *dev)
ret = amdgpu_runtime_idle_check_userq(dev);
done:
- pm_runtime_mark_last_busy(dev);
pm_runtime_autosuspend(dev);
return ret;
}
@@ -2910,11 +2957,14 @@ static int amdgpu_drm_release(struct inode *inode, struct file *filp)
{
struct drm_file *file_priv = filp->private_data;
struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
+ struct drm_device *dev = file_priv->minor->dev;
+ int idx;
- if (fpriv) {
+ if (fpriv && drm_dev_enter(dev, &idx)) {
fpriv->evf_mgr.fd_closing = true;
amdgpu_eviction_fence_destroy(&fpriv->evf_mgr);
amdgpu_userq_mgr_fini(&fpriv->userq_mgr);
+ drm_dev_exit(idx);
}
return drm_release(inode, filp);
@@ -2934,22 +2984,21 @@ long amdgpu_drm_ioctl(struct file *filp,
ret = drm_ioctl(filp, cmd, arg);
- pm_runtime_mark_last_busy(dev->dev);
out:
pm_runtime_put_autosuspend(dev->dev);
return ret;
}
static const struct dev_pm_ops amdgpu_pm_ops = {
- .prepare = amdgpu_pmops_prepare,
- .complete = amdgpu_pmops_complete,
- .suspend = amdgpu_pmops_suspend,
- .suspend_noirq = amdgpu_pmops_suspend_noirq,
- .resume = amdgpu_pmops_resume,
- .freeze = amdgpu_pmops_freeze,
- .thaw = amdgpu_pmops_thaw,
- .poweroff = amdgpu_pmops_poweroff,
- .restore = amdgpu_pmops_restore,
+ .prepare = pm_sleep_ptr(amdgpu_pmops_prepare),
+ .complete = pm_sleep_ptr(amdgpu_pmops_complete),
+ .suspend = pm_sleep_ptr(amdgpu_pmops_suspend),
+ .suspend_noirq = pm_sleep_ptr(amdgpu_pmops_suspend_noirq),
+ .resume = pm_sleep_ptr(amdgpu_pmops_resume),
+ .freeze = pm_sleep_ptr(amdgpu_pmops_freeze),
+ .thaw = pm_sleep_ptr(amdgpu_pmops_thaw),
+ .poweroff = pm_sleep_ptr(amdgpu_pmops_poweroff),
+ .restore = pm_sleep_ptr(amdgpu_pmops_restore),
.runtime_suspend = amdgpu_pmops_runtime_suspend,
.runtime_resume = amdgpu_pmops_runtime_resume,
.runtime_idle = amdgpu_pmops_runtime_idle,
@@ -3021,6 +3070,7 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
DRM_IOCTL_DEF_DRV(AMDGPU_USERQ, amdgpu_userq_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_USERQ_SIGNAL, amdgpu_userq_signal_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_USERQ_WAIT, amdgpu_userq_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(AMDGPU_GEM_LIST_HANDLES, amdgpu_gem_list_handles_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
};
static const struct drm_driver amdgpu_kms_driver = {
@@ -3094,7 +3144,7 @@ static struct pci_driver amdgpu_kms_pci_driver = {
.probe = amdgpu_pci_probe,
.remove = amdgpu_pci_remove,
.shutdown = amdgpu_pci_shutdown,
- .driver.pm = &amdgpu_pm_ops,
+ .driver.pm = pm_ptr(&amdgpu_pm_ops),
.err_handler = &amdgpu_pci_err_handler,
.dev_groups = amdgpu_sysfs_groups,
};
@@ -3107,10 +3157,6 @@ static int __init amdgpu_init(void)
if (r)
goto error_sync;
- r = amdgpu_fence_slab_init();
- if (r)
- goto error_fence;
-
r = amdgpu_userq_fence_slab_init();
if (r)
goto error_fence;
@@ -3145,7 +3191,6 @@ static void __exit amdgpu_exit(void)
amdgpu_unregister_atpx_handler();
amdgpu_acpi_release();
amdgpu_sync_fini();
- amdgpu_fence_slab_fini();
amdgpu_userq_fence_slab_fini();
mmu_notifier_synchronize();
amdgpu_xcp_drv_release();
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.c
index 8b919ad3af29..23d7d0b0d625 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.c
@@ -143,7 +143,6 @@ static bool amdgpu_eviction_fence_enable_signaling(struct dma_fence *f)
}
static const struct dma_fence_ops amdgpu_eviction_fence_ops = {
- .use_64bit_seqno = true,
.get_driver_name = amdgpu_eviction_fence_get_driver_name,
.get_timeline_name = amdgpu_eviction_fence_get_timeline_name,
.enable_signaling = amdgpu_eviction_fence_enable_signaling,
@@ -169,9 +168,9 @@ amdgpu_eviction_fence_create(struct amdgpu_eviction_fence_mgr *evf_mgr)
ev_fence->evf_mgr = evf_mgr;
get_task_comm(ev_fence->timeline_name, current);
spin_lock_init(&ev_fence->lock);
- dma_fence_init(&ev_fence->base, &amdgpu_eviction_fence_ops,
- &ev_fence->lock, evf_mgr->ev_fence_ctx,
- atomic_inc_return(&evf_mgr->ev_fence_seq));
+ dma_fence_init64(&ev_fence->base, &amdgpu_eviction_fence_ops,
+ &ev_fence->lock, evf_mgr->ev_fence_ctx,
+ atomic_inc_return(&evf_mgr->ev_fence_seq));
return ev_fence;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
index 91d638098889..b349bb3676d5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
@@ -70,6 +70,7 @@ void amdgpu_show_fdinfo(struct drm_printer *p, struct drm_file *file)
[AMDGPU_PL_GWS] = "gws",
[AMDGPU_PL_OA] = "oa",
[AMDGPU_PL_DOORBELL] = "doorbell",
+ [AMDGPU_PL_MMIO_REMAP] = "mmioremap",
};
unsigned int hw_ip, i;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 8cecf25996ed..c7843e336310 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -42,50 +42,14 @@
#include "amdgpu_reset.h"
/*
- * Fences mark an event in the GPUs pipeline and are used
- * for GPU/CPU synchronization. When the fence is written,
- * it is expected that all buffers associated with that fence
- * are no longer in use by the associated ring on the GPU and
- * that the relevant GPU caches have been flushed.
- */
-
-struct amdgpu_fence {
- struct dma_fence base;
-
- /* RB, DMA, etc. */
- struct amdgpu_ring *ring;
- ktime_t start_timestamp;
-};
-
-static struct kmem_cache *amdgpu_fence_slab;
-
-int amdgpu_fence_slab_init(void)
-{
- amdgpu_fence_slab = KMEM_CACHE(amdgpu_fence, SLAB_HWCACHE_ALIGN);
- if (!amdgpu_fence_slab)
- return -ENOMEM;
- return 0;
-}
-
-void amdgpu_fence_slab_fini(void)
-{
- rcu_barrier();
- kmem_cache_destroy(amdgpu_fence_slab);
-}
-/*
* Cast helper
*/
static const struct dma_fence_ops amdgpu_fence_ops;
-static const struct dma_fence_ops amdgpu_job_fence_ops;
static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f)
{
struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
- if (__f->base.ops == &amdgpu_fence_ops ||
- __f->base.ops == &amdgpu_job_fence_ops)
- return __f;
-
- return NULL;
+ return __f;
}
/**
@@ -129,57 +93,32 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
* amdgpu_fence_emit - emit a fence on the requested ring
*
* @ring: ring the fence is associated with
- * @f: resulting fence object
- * @job: job the fence is embedded in
+ * @af: amdgpu fence input
* @flags: flags to pass into the subordinate .emit_fence() call
*
* Emits a fence command on the requested ring (all asics).
* Returns 0 on success, -ENOMEM on failure.
*/
-int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amdgpu_job *job,
+int amdgpu_fence_emit(struct amdgpu_ring *ring, struct amdgpu_fence *af,
unsigned int flags)
{
struct amdgpu_device *adev = ring->adev;
struct dma_fence *fence;
- struct amdgpu_fence *am_fence;
struct dma_fence __rcu **ptr;
uint32_t seq;
int r;
- if (job == NULL) {
- /* create a sperate hw fence */
- am_fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_ATOMIC);
- if (am_fence == NULL)
- return -ENOMEM;
- fence = &am_fence->base;
- am_fence->ring = ring;
- } else {
- /* take use of job-embedded fence */
- fence = &job->hw_fence;
- }
+ fence = &af->base;
+ af->ring = ring;
seq = ++ring->fence_drv.sync_seq;
- if (job && job->job_run_counter) {
- /* reinit seq for resubmitted jobs */
- fence->seqno = seq;
- /* TO be inline with external fence creation and other drivers */
- dma_fence_get(fence);
- } else {
- if (job) {
- dma_fence_init(fence, &amdgpu_job_fence_ops,
- &ring->fence_drv.lock,
- adev->fence_context + ring->idx, seq);
- /* Against remove in amdgpu_job_{free, free_cb} */
- dma_fence_get(fence);
- } else {
- dma_fence_init(fence, &amdgpu_fence_ops,
- &ring->fence_drv.lock,
- adev->fence_context + ring->idx, seq);
- }
- }
+ dma_fence_init(fence, &amdgpu_fence_ops,
+ &ring->fence_drv.lock,
+ adev->fence_context + ring->idx, seq);
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
seq, flags | AMDGPU_FENCE_FLAG_INT);
+ amdgpu_fence_save_wptr(af);
pm_runtime_get_noresume(adev_to_drm(adev)->dev);
ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
if (unlikely(rcu_dereference_protected(*ptr, 1))) {
@@ -204,8 +143,6 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amd
*/
rcu_assign_pointer(*ptr, dma_fence_get(fence));
- *f = fence;
-
return 0;
}
@@ -292,6 +229,7 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring)
do {
struct dma_fence *fence, **ptr;
+ struct amdgpu_fence *am_fence;
++last_seq;
last_seq &= drv->num_fences_mask;
@@ -304,9 +242,14 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring)
if (!fence)
continue;
+ /* Save the wptr in the fence driver so we know what the last processed
+ * wptr was. This is required for re-emitting the ring state for
+ * queues that are reset but are not guilty and thus have no guilty fence.
+ */
+ am_fence = container_of(fence, struct amdgpu_fence, base);
+ drv->signalled_wptr = am_fence->wptr;
dma_fence_signal(fence);
dma_fence_put(fence);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
} while (last_seq != seq);
@@ -326,7 +269,9 @@ static void amdgpu_fence_fallback(struct timer_list *t)
fence_drv.fallback_timer);
if (amdgpu_fence_process(ring))
- DRM_WARN("Fence fallback timer expired on ring %s\n", ring->name);
+ dev_warn(ring->adev->dev,
+ "Fence fallback timer expired on ring %s\n",
+ ring->name);
}
/**
@@ -698,36 +643,6 @@ void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev)
}
/**
- * amdgpu_fence_driver_clear_job_fences - clear job embedded fences of ring
- *
- * @ring: fence of the ring to be cleared
- *
- */
-void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring)
-{
- int i;
- struct dma_fence *old, **ptr;
-
- for (i = 0; i <= ring->fence_drv.num_fences_mask; i++) {
- ptr = &ring->fence_drv.fences[i];
- old = rcu_dereference_protected(*ptr, 1);
- if (old && old->ops == &amdgpu_job_fence_ops) {
- struct amdgpu_job *job;
-
- /* For non-scheduler bad job, i.e. failed ib test, we need to signal
- * it right here or we won't be able to track them in fence_drv
- * and they will remain unsignaled during sa_bo free.
- */
- job = container_of(old, struct amdgpu_job, hw_fence);
- if (!job->base.s_fence && !dma_fence_is_signaled(old))
- dma_fence_signal(old);
- RCU_INIT_POINTER(*ptr, NULL);
- dma_fence_put(old);
- }
- }
-}
-
-/**
* amdgpu_fence_driver_set_error - set error code on fences
* @ring: the ring which contains the fences
* @error: the error code to set
@@ -764,6 +679,120 @@ void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring)
amdgpu_fence_process(ring);
}
+
+/*
+ * Kernel queue reset handling
+ *
+ * The driver can reset individual queues for most engines, but those queues
+ * may contain work from multiple contexts. Resetting the queue will reset
+ * lose all of that state. In order to minimize the collateral damage, the
+ * driver will save the ring contents which are not associated with the guilty
+ * context prior to resetting the queue. After resetting the queue the queue
+ * contents from the other contexts is re-emitted to the rings so that it can
+ * be processed by the engine. To handle this, we save the queue's write
+ * pointer (wptr) in the fences associated with each context. If we get a
+ * queue timeout, we can then use the wptrs from the fences to determine
+ * which data needs to be saved out of the queue's ring buffer.
+ */
+
+/**
+ * amdgpu_fence_driver_guilty_force_completion - force signal of specified sequence
+ *
+ * @af: fence of the ring to signal
+ *
+ */
+void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *af)
+{
+ struct dma_fence *unprocessed;
+ struct dma_fence __rcu **ptr;
+ struct amdgpu_fence *fence;
+ struct amdgpu_ring *ring = af->ring;
+ unsigned long flags;
+ u32 seq, last_seq;
+
+ last_seq = amdgpu_fence_read(ring) & ring->fence_drv.num_fences_mask;
+ seq = ring->fence_drv.sync_seq & ring->fence_drv.num_fences_mask;
+
+ /* mark all fences from the guilty context with an error */
+ spin_lock_irqsave(&ring->fence_drv.lock, flags);
+ do {
+ last_seq++;
+ last_seq &= ring->fence_drv.num_fences_mask;
+
+ ptr = &ring->fence_drv.fences[last_seq];
+ rcu_read_lock();
+ unprocessed = rcu_dereference(*ptr);
+
+ if (unprocessed && !dma_fence_is_signaled_locked(unprocessed)) {
+ fence = container_of(unprocessed, struct amdgpu_fence, base);
+
+ if (fence == af)
+ dma_fence_set_error(&fence->base, -ETIME);
+ else if (fence->context == af->context)
+ dma_fence_set_error(&fence->base, -ECANCELED);
+ }
+ rcu_read_unlock();
+ } while (last_seq != seq);
+ spin_unlock_irqrestore(&ring->fence_drv.lock, flags);
+ /* signal the guilty fence */
+ amdgpu_fence_write(ring, (u32)af->base.seqno);
+ amdgpu_fence_process(ring);
+}
+
+void amdgpu_fence_save_wptr(struct amdgpu_fence *af)
+{
+ af->wptr = af->ring->wptr;
+}
+
+static void amdgpu_ring_backup_unprocessed_command(struct amdgpu_ring *ring,
+ u64 start_wptr, u32 end_wptr)
+{
+ unsigned int first_idx = start_wptr & ring->buf_mask;
+ unsigned int last_idx = end_wptr & ring->buf_mask;
+ unsigned int i;
+
+ /* Backup the contents of the ring buffer. */
+ for (i = first_idx; i != last_idx; ++i, i &= ring->buf_mask)
+ ring->ring_backup[ring->ring_backup_entries_to_copy++] = ring->ring[i];
+}
+
+void amdgpu_ring_backup_unprocessed_commands(struct amdgpu_ring *ring,
+ struct amdgpu_fence *guilty_fence)
+{
+ struct dma_fence *unprocessed;
+ struct dma_fence __rcu **ptr;
+ struct amdgpu_fence *fence;
+ u64 wptr;
+ u32 seq, last_seq;
+
+ last_seq = amdgpu_fence_read(ring) & ring->fence_drv.num_fences_mask;
+ seq = ring->fence_drv.sync_seq & ring->fence_drv.num_fences_mask;
+ wptr = ring->fence_drv.signalled_wptr;
+ ring->ring_backup_entries_to_copy = 0;
+
+ do {
+ last_seq++;
+ last_seq &= ring->fence_drv.num_fences_mask;
+
+ ptr = &ring->fence_drv.fences[last_seq];
+ rcu_read_lock();
+ unprocessed = rcu_dereference(*ptr);
+
+ if (unprocessed && !dma_fence_is_signaled(unprocessed)) {
+ fence = container_of(unprocessed, struct amdgpu_fence, base);
+
+ /* save everything if the ring is not guilty, otherwise
+ * just save the content from other contexts.
+ */
+ if (!guilty_fence || (fence->context != guilty_fence->context))
+ amdgpu_ring_backup_unprocessed_command(ring, wptr,
+ fence->wptr);
+ wptr = fence->wptr;
+ }
+ rcu_read_unlock();
+ } while (last_seq != seq);
+}
+
/*
* Common fence implementation
*/
@@ -778,13 +807,6 @@ static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
return (const char *)to_amdgpu_fence(f)->ring->name;
}
-static const char *amdgpu_job_fence_get_timeline_name(struct dma_fence *f)
-{
- struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
-
- return (const char *)to_amdgpu_ring(job->base.sched)->name;
-}
-
/**
* amdgpu_fence_enable_signaling - enable signalling on fence
* @f: fence
@@ -802,23 +824,6 @@ static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
}
/**
- * amdgpu_job_fence_enable_signaling - enable signalling on job fence
- * @f: fence
- *
- * This is the simliar function with amdgpu_fence_enable_signaling above, it
- * only handles the job embedded fence.
- */
-static bool amdgpu_job_fence_enable_signaling(struct dma_fence *f)
-{
- struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
-
- if (!timer_pending(&to_amdgpu_ring(job->base.sched)->fence_drv.fallback_timer))
- amdgpu_fence_schedule_fallback(to_amdgpu_ring(job->base.sched));
-
- return true;
-}
-
-/**
* amdgpu_fence_free - free up the fence memory
*
* @rcu: RCU callback head
@@ -830,22 +835,7 @@ static void amdgpu_fence_free(struct rcu_head *rcu)
struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
/* free fence_slab if it's separated fence*/
- kmem_cache_free(amdgpu_fence_slab, to_amdgpu_fence(f));
-}
-
-/**
- * amdgpu_job_fence_free - free up the job with embedded fence
- *
- * @rcu: RCU callback head
- *
- * Free up the job with embedded fence after the RCU grace period.
- */
-static void amdgpu_job_fence_free(struct rcu_head *rcu)
-{
- struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
-
- /* free job if fence has a parent job */
- kfree(container_of(f, struct amdgpu_job, hw_fence));
+ kfree(to_amdgpu_fence(f));
}
/**
@@ -861,19 +851,6 @@ static void amdgpu_fence_release(struct dma_fence *f)
call_rcu(&f->rcu, amdgpu_fence_free);
}
-/**
- * amdgpu_job_fence_release - callback that job embedded fence can be freed
- *
- * @f: fence
- *
- * This is the simliar function with amdgpu_fence_release above, it
- * only handles the job embedded fence.
- */
-static void amdgpu_job_fence_release(struct dma_fence *f)
-{
- call_rcu(&f->rcu, amdgpu_job_fence_free);
-}
-
static const struct dma_fence_ops amdgpu_fence_ops = {
.get_driver_name = amdgpu_fence_get_driver_name,
.get_timeline_name = amdgpu_fence_get_timeline_name,
@@ -881,13 +858,6 @@ static const struct dma_fence_ops amdgpu_fence_ops = {
.release = amdgpu_fence_release,
};
-static const struct dma_fence_ops amdgpu_job_fence_ops = {
- .get_driver_name = amdgpu_fence_get_driver_name,
- .get_timeline_name = amdgpu_job_fence_get_timeline_name,
- .enable_signaling = amdgpu_job_fence_enable_signaling,
- .release = amdgpu_job_fence_release,
-};
-
/*
* Fence debugfs
*/
@@ -957,7 +927,6 @@ static int gpu_recover_get(void *data, u64 *val)
*val = atomic_read(&adev->reset_domain->reset_res);
- pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
index 1ae88c459da5..b0082aa7f3c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
@@ -144,7 +144,8 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
/* If algo exists, it means that the i2c_adapter's initialized */
if (!adev->pm.fru_eeprom_i2c_bus || !adev->pm.fru_eeprom_i2c_bus->algo) {
- DRM_WARN("Cannot access FRU, EEPROM accessor not initialized");
+ dev_warn(adev->dev,
+ "Cannot access FRU, EEPROM accessor not initialized");
return -ENODEV;
}
@@ -152,19 +153,22 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
len = amdgpu_eeprom_read(adev->pm.fru_eeprom_i2c_bus, fru_addr, buf,
sizeof(buf));
if (len != 8) {
- DRM_ERROR("Couldn't read the IPMI Common Header: %d", len);
+ dev_err(adev->dev, "Couldn't read the IPMI Common Header: %d",
+ len);
return len < 0 ? len : -EIO;
}
if (buf[0] != 1) {
- DRM_ERROR("Bad IPMI Common Header version: 0x%02x", buf[0]);
+ dev_err(adev->dev, "Bad IPMI Common Header version: 0x%02x",
+ buf[0]);
return -EIO;
}
for (csum = 0; len > 0; len--)
csum += buf[len - 1];
if (csum) {
- DRM_ERROR("Bad IPMI Common Header checksum: 0x%02x", csum);
+ dev_err(adev->dev, "Bad IPMI Common Header checksum: 0x%02x",
+ csum);
return -EIO;
}
@@ -179,12 +183,14 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
/* Read the header of the PIA. */
len = amdgpu_eeprom_read(adev->pm.fru_eeprom_i2c_bus, addr, buf, 3);
if (len != 3) {
- DRM_ERROR("Couldn't read the Product Info Area header: %d", len);
+ dev_err(adev->dev,
+ "Couldn't read the Product Info Area header: %d", len);
return len < 0 ? len : -EIO;
}
if (buf[0] != 1) {
- DRM_ERROR("Bad IPMI Product Info Area version: 0x%02x", buf[0]);
+ dev_err(adev->dev, "Bad IPMI Product Info Area version: 0x%02x",
+ buf[0]);
return -EIO;
}
@@ -197,14 +203,16 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
len = amdgpu_eeprom_read(adev->pm.fru_eeprom_i2c_bus, addr, pia, size);
if (len != size) {
kfree(pia);
- DRM_ERROR("Couldn't read the Product Info Area: %d", len);
+ dev_err(adev->dev, "Couldn't read the Product Info Area: %d",
+ len);
return len < 0 ? len : -EIO;
}
for (csum = 0; size > 0; size--)
csum += pia[size - 1];
if (csum) {
- DRM_ERROR("Bad Product Info Area checksum: 0x%02x", csum);
+ dev_err(adev->dev, "Bad Product Info Area checksum: 0x%02x",
+ csum);
kfree(pia);
return -EIO;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index b2033f8352f5..d2237ce9da70 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -302,7 +302,6 @@ void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
int pages)
{
unsigned t;
- unsigned p;
int i, j;
u64 page_base;
/* Starting from VEGA10, system bit must be 0 to mean invalid. */
@@ -316,8 +315,7 @@ void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
return;
t = offset / AMDGPU_GPU_PAGE_SIZE;
- p = t / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
- for (i = 0; i < pages; i++, p++) {
+ for (i = 0; i < pages; i++) {
page_base = adev->dummy_page_addr;
if (!adev->gart.ptr)
continue;
@@ -370,6 +368,42 @@ void amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
}
/**
+ * amdgpu_gart_map_vram_range - map VRAM pages into the GART page table
+ *
+ * @adev: amdgpu_device pointer
+ * @pa: physical address of the first page to be mapped
+ * @start_page: first page to map in the GART aperture
+ * @num_pages: number of pages to be mapped
+ * @flags: page table entry flags
+ * @dst: CPU address of the GART table
+ *
+ * Binds a BO that is allocated in VRAM to the GART page table
+ * (all ASICs).
+ *
+ * Useful when a kernel BO is located in VRAM but
+ * needs to be accessed from the GART address space.
+ */
+void amdgpu_gart_map_vram_range(struct amdgpu_device *adev, uint64_t pa,
+ uint64_t start_page, uint64_t num_pages,
+ uint64_t flags, void *dst)
+{
+ u32 i, idx;
+
+ /* The SYSTEM flag indicates the pages aren't in VRAM. */
+ WARN_ON_ONCE(flags & AMDGPU_PTE_SYSTEM);
+
+ if (!drm_dev_enter(adev_to_drm(adev), &idx))
+ return;
+
+ for (i = 0; i < num_pages; ++i) {
+ amdgpu_gmc_set_pte_pde(adev, adev->gart.ptr,
+ start_page + i, pa + AMDGPU_GPU_PAGE_SIZE * i, flags);
+ }
+
+ drm_dev_exit(idx);
+}
+
+/**
* amdgpu_gart_bind - bind pages into the gart page table
*
* @adev: amdgpu_device pointer
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
index 7cc980bf4725..d3118275ddae 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
@@ -64,5 +64,8 @@ void amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
void *dst);
void amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
int pages, dma_addr_t *dma_addr, uint64_t flags);
+void amdgpu_gart_map_vram_range(struct amdgpu_device *adev, uint64_t pa,
+ uint64_t start_page, uint64_t num_pages,
+ uint64_t flags, void *dst);
void amdgpu_gart_invalidate_tlb(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 0ecc88df7208..3e38c5db2987 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -198,7 +198,7 @@ static void amdgpu_gem_object_free(struct drm_gem_object *gobj)
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(gobj);
amdgpu_hmm_unregister(aobj);
- ttm_bo_put(&aobj->tbo);
+ ttm_bo_fini(&aobj->tbo);
}
int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
@@ -317,7 +317,7 @@ static int amdgpu_gem_object_open(struct drm_gem_object *obj,
*/
if (!vm->is_compute_context || !vm->process_info)
return 0;
- if (!obj->import_attach ||
+ if (!drm_gem_is_imported(obj) ||
!dma_buf_is_dynamic(obj->import_attach->dmabuf))
return 0;
mutex_lock_nested(&vm->process_info->lock, 1);
@@ -329,7 +329,7 @@ static int amdgpu_gem_object_open(struct drm_gem_object *obj,
dev_warn(adev->dev, "validate_and_fence failed: %d\n", r);
if (ti) {
- dev_warn(adev->dev, "pid %d\n", ti->pid);
+ dev_warn(adev->dev, "pid %d\n", ti->task.pid);
amdgpu_vm_put_task_info(ti);
}
}
@@ -443,15 +443,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
int r;
/* reject invalid gem flags */
- if (flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
- AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
- AMDGPU_GEM_CREATE_CPU_GTT_USWC |
- AMDGPU_GEM_CREATE_VRAM_CLEARED |
- AMDGPU_GEM_CREATE_VM_ALWAYS_VALID |
- AMDGPU_GEM_CREATE_EXPLICIT_SYNC |
- AMDGPU_GEM_CREATE_ENCRYPTED |
- AMDGPU_GEM_CREATE_GFX12_DCC |
- AMDGPU_GEM_CREATE_DISCARDABLE))
+ if (flags & ~AMDGPU_GEM_CREATE_SETTABLE_MASK)
return -EINVAL;
/* reject invalid gem domains */
@@ -466,6 +458,9 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
/* always clear VRAM */
flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED;
+ if (args->in.domains & AMDGPU_GEM_DOMAIN_MMIO_REMAP)
+ return -EINVAL;
+
/* create a gem object to contain this object in */
if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
@@ -536,7 +531,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
struct drm_amdgpu_gem_userptr *args = data;
struct amdgpu_fpriv *fpriv = filp->driver_priv;
struct drm_gem_object *gobj;
- struct hmm_range *range;
+ struct amdgpu_hmm_range *range;
struct amdgpu_bo *bo;
uint32_t handle;
int r;
@@ -577,15 +572,20 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
goto release_object;
if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
- r = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages,
- &range);
- if (r)
+ range = amdgpu_hmm_range_alloc(NULL);
+ if (unlikely(!range))
+ return -ENOMEM;
+ r = amdgpu_ttm_tt_get_user_pages(bo, range);
+ if (r) {
+ amdgpu_hmm_range_free(range);
goto release_object;
-
+ }
r = amdgpu_bo_reserve(bo, true);
if (r)
goto user_pages_done;
+ amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, range);
+
amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
amdgpu_bo_unreserve(bo);
@@ -601,8 +601,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
user_pages_done:
if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE)
- amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range);
-
+ amdgpu_hmm_range_free(range);
release_object:
drm_gem_object_put(gobj);
@@ -791,36 +790,6 @@ error:
return fence;
}
-/**
- * amdgpu_gem_va_map_flags - map GEM UAPI flags into hardware flags
- *
- * @adev: amdgpu_device pointer
- * @flags: GEM UAPI flags
- *
- * Returns the GEM UAPI flags mapped into hardware for the ASIC.
- */
-uint64_t amdgpu_gem_va_map_flags(struct amdgpu_device *adev, uint32_t flags)
-{
- uint64_t pte_flag = 0;
-
- if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
- pte_flag |= AMDGPU_PTE_EXECUTABLE;
- if (flags & AMDGPU_VM_PAGE_READABLE)
- pte_flag |= AMDGPU_PTE_READABLE;
- if (flags & AMDGPU_VM_PAGE_WRITEABLE)
- pte_flag |= AMDGPU_PTE_WRITEABLE;
- if (flags & AMDGPU_VM_PAGE_PRT)
- pte_flag |= AMDGPU_PTE_PRT_FLAG(adev);
- if (flags & AMDGPU_VM_PAGE_NOALLOC)
- pte_flag |= AMDGPU_PTE_NOALLOC;
-
- if (adev->gmc.gmc_funcs->map_mtype)
- pte_flag |= amdgpu_gmc_map_mtype(adev,
- flags & AMDGPU_VM_MTYPE_MASK);
-
- return pte_flag;
-}
-
int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
@@ -841,7 +810,6 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct dma_fence_chain *timeline_chain = NULL;
struct dma_fence *fence;
struct drm_exec exec;
- uint64_t va_flags;
uint64_t vm_size;
int r = 0;
@@ -945,10 +913,9 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
switch (args->operation) {
case AMDGPU_VA_OP_MAP:
- va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
args->offset_in_bo, args->map_size,
- va_flags);
+ args->flags);
break;
case AMDGPU_VA_OP_UNMAP:
r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address);
@@ -960,10 +927,9 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
args->map_size);
break;
case AMDGPU_VA_OP_REPLACE:
- va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address,
args->offset_in_bo, args->map_size,
- va_flags);
+ args->flags);
break;
default:
break;
@@ -997,17 +963,34 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
struct drm_gem_object *gobj;
struct amdgpu_vm_bo_base *base;
struct amdgpu_bo *robj;
+ struct drm_exec exec;
+ struct amdgpu_fpriv *fpriv = filp->driver_priv;
int r;
+ if (args->padding)
+ return -EINVAL;
+
gobj = drm_gem_object_lookup(filp, args->handle);
if (!gobj)
return -ENOENT;
robj = gem_to_amdgpu_bo(gobj);
- r = amdgpu_bo_reserve(robj, false);
- if (unlikely(r))
- goto out;
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
+ DRM_EXEC_IGNORE_DUPLICATES, 0);
+ drm_exec_until_all_locked(&exec) {
+ r = drm_exec_lock_obj(&exec, gobj);
+ drm_exec_retry_on_contention(&exec);
+ if (r)
+ goto out_exec;
+
+ if (args->op == AMDGPU_GEM_OP_GET_MAPPING_INFO) {
+ r = amdgpu_vm_lock_pd(&fpriv->vm, &exec, 0);
+ drm_exec_retry_on_contention(&exec);
+ if (r)
+ goto out_exec;
+ }
+ }
switch (args->op) {
case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: {
@@ -1018,29 +1001,26 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
info.alignment = robj->tbo.page_alignment << PAGE_SHIFT;
info.domains = robj->preferred_domains;
info.domain_flags = robj->flags;
- amdgpu_bo_unreserve(robj);
+ drm_exec_fini(&exec);
if (copy_to_user(out, &info, sizeof(info)))
r = -EFAULT;
break;
}
case AMDGPU_GEM_OP_SET_PLACEMENT:
- if (robj->tbo.base.import_attach &&
+ if (drm_gem_is_imported(&robj->tbo.base) &&
args->value & AMDGPU_GEM_DOMAIN_VRAM) {
r = -EINVAL;
- amdgpu_bo_unreserve(robj);
- break;
+ goto out_exec;
}
if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) {
r = -EPERM;
- amdgpu_bo_unreserve(robj);
- break;
+ goto out_exec;
}
for (base = robj->vm_bo; base; base = base->next)
if (amdgpu_xgmi_same_hive(amdgpu_ttm_adev(robj->tbo.bdev),
amdgpu_ttm_adev(base->vm->root.bo->tbo.bdev))) {
r = -EINVAL;
- amdgpu_bo_unreserve(robj);
- goto out;
+ goto out_exec;
}
@@ -1053,17 +1033,146 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
if (robj->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
amdgpu_vm_bo_invalidate(robj, true);
+ drm_exec_fini(&exec);
+ break;
+ case AMDGPU_GEM_OP_GET_MAPPING_INFO: {
+ struct amdgpu_bo_va *bo_va = amdgpu_vm_bo_find(&fpriv->vm, robj);
+ struct drm_amdgpu_gem_vm_entry *vm_entries;
+ struct amdgpu_bo_va_mapping *mapping;
+ int num_mappings = 0;
+ /*
+ * num_entries is set as an input to the size of the user-allocated array of
+ * drm_amdgpu_gem_vm_entry stored at args->value.
+ * num_entries is sent back as output as the number of mappings the bo has.
+ * If that number is larger than the size of the array, the ioctl must
+ * be retried.
+ */
+ vm_entries = kvcalloc(args->num_entries, sizeof(*vm_entries), GFP_KERNEL);
+ if (!vm_entries)
+ return -ENOMEM;
+
+ amdgpu_vm_bo_va_for_each_valid_mapping(bo_va, mapping) {
+ if (num_mappings < args->num_entries) {
+ vm_entries[num_mappings].addr = mapping->start * AMDGPU_GPU_PAGE_SIZE;
+ vm_entries[num_mappings].size = (mapping->last - mapping->start + 1) * AMDGPU_GPU_PAGE_SIZE;
+ vm_entries[num_mappings].offset = mapping->offset;
+ vm_entries[num_mappings].flags = mapping->flags;
+ }
+ num_mappings += 1;
+ }
+
+ amdgpu_vm_bo_va_for_each_invalid_mapping(bo_va, mapping) {
+ if (num_mappings < args->num_entries) {
+ vm_entries[num_mappings].addr = mapping->start * AMDGPU_GPU_PAGE_SIZE;
+ vm_entries[num_mappings].size = (mapping->last - mapping->start + 1) * AMDGPU_GPU_PAGE_SIZE;
+ vm_entries[num_mappings].offset = mapping->offset;
+ vm_entries[num_mappings].flags = mapping->flags;
+ }
+ num_mappings += 1;
+ }
- amdgpu_bo_unreserve(robj);
+ drm_exec_fini(&exec);
+
+ if (num_mappings > 0 && num_mappings <= args->num_entries)
+ if (copy_to_user(u64_to_user_ptr(args->value), vm_entries, num_mappings * sizeof(*vm_entries)))
+ r = -EFAULT;
+
+ args->num_entries = num_mappings;
+
+ kvfree(vm_entries);
break;
+ }
default:
- amdgpu_bo_unreserve(robj);
+ drm_exec_fini(&exec);
r = -EINVAL;
}
-out:
drm_gem_object_put(gobj);
return r;
+out_exec:
+ drm_exec_fini(&exec);
+ drm_gem_object_put(gobj);
+ return r;
+}
+
+/**
+ * amdgpu_gem_list_handles_ioctl - get information about a process' buffer objects
+ *
+ * @dev: drm device pointer
+ * @data: drm_amdgpu_gem_list_handles
+ * @filp: drm file pointer
+ *
+ * num_entries is set as an input to the size of the entries array.
+ * num_entries is sent back as output as the number of bos in the process.
+ * If that number is larger than the size of the array, the ioctl must
+ * be retried.
+ *
+ * Returns:
+ * 0 for success, -errno for errors.
+ */
+int amdgpu_gem_list_handles_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ struct drm_amdgpu_gem_list_handles *args = data;
+ struct drm_amdgpu_gem_list_handles_entry *bo_entries;
+ struct drm_gem_object *gobj;
+ int id, ret = 0;
+ int bo_index = 0;
+ int num_bos = 0;
+
+ spin_lock(&filp->table_lock);
+ idr_for_each_entry(&filp->object_idr, gobj, id)
+ num_bos += 1;
+ spin_unlock(&filp->table_lock);
+
+ if (args->num_entries < num_bos) {
+ args->num_entries = num_bos;
+ return 0;
+ }
+
+ if (num_bos == 0) {
+ args->num_entries = 0;
+ return 0;
+ }
+
+ bo_entries = kvcalloc(num_bos, sizeof(*bo_entries), GFP_KERNEL);
+ if (!bo_entries)
+ return -ENOMEM;
+
+ spin_lock(&filp->table_lock);
+ idr_for_each_entry(&filp->object_idr, gobj, id) {
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
+ struct drm_amdgpu_gem_list_handles_entry *bo_entry;
+
+ if (bo_index >= num_bos) {
+ ret = -EAGAIN;
+ break;
+ }
+
+ bo_entry = &bo_entries[bo_index];
+
+ bo_entry->size = amdgpu_bo_size(bo);
+ bo_entry->alloc_flags = bo->flags & AMDGPU_GEM_CREATE_SETTABLE_MASK;
+ bo_entry->preferred_domains = bo->preferred_domains;
+ bo_entry->gem_handle = id;
+ bo_entry->alignment = bo->tbo.page_alignment;
+
+ if (bo->tbo.base.import_attach)
+ bo_entry->flags |= AMDGPU_GEM_LIST_HANDLES_FLAG_IS_IMPORT;
+
+ bo_index += 1;
+ }
+ spin_unlock(&filp->table_lock);
+
+ args->num_entries = bo_index;
+
+ if (!ret)
+ if (copy_to_user(u64_to_user_ptr(args->entries), bo_entries, num_bos * sizeof(*bo_entries)))
+ ret = -EFAULT;
+
+ kvfree(bo_entries);
+
+ return ret;
}
static int amdgpu_gem_align_pitch(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
index 3a8f57900a3a..b558336bc4c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
@@ -63,13 +63,28 @@ int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
-uint64_t amdgpu_gem_va_map_flags(struct amdgpu_device *adev, uint32_t flags);
int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
+int amdgpu_gem_list_handles_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
+#define AMDGPU_GEM_CREATE_SETTABLE_MASK (AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | \
+ AMDGPU_GEM_CREATE_NO_CPU_ACCESS | \
+ AMDGPU_GEM_CREATE_CPU_GTT_USWC | \
+ AMDGPU_GEM_CREATE_VRAM_CLEARED | \
+ AMDGPU_GEM_CREATE_VM_ALWAYS_VALID | \
+ AMDGPU_GEM_CREATE_EXPLICIT_SYNC | \
+ AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE | \
+ AMDGPU_GEM_CREATE_ENCRYPTED | \
+ AMDGPU_GEM_CREATE_GFX12_DCC | \
+ AMDGPU_GEM_CREATE_DISCARDABLE | \
+ AMDGPU_GEM_CREATE_COHERENT | \
+ AMDGPU_GEM_CREATE_UNCACHED | \
+ AMDGPU_GEM_CREATE_EXT_COHERENT)
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index c5646af055ab..8b118c53f351 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -33,6 +33,7 @@
#include "amdgpu_reset.h"
#include "amdgpu_xcp.h"
#include "amdgpu_xgmi.h"
+#include "amdgpu_mes.h"
#include "nvd.h"
/* delay 0.1 second to enable gfx off feature */
@@ -149,7 +150,7 @@ static bool amdgpu_gfx_is_graphics_multipipe_capable(struct amdgpu_device *adev)
static bool amdgpu_gfx_is_compute_multipipe_capable(struct amdgpu_device *adev)
{
if (amdgpu_compute_multipipe != -1) {
- DRM_INFO("amdgpu: forcing compute pipe policy %d\n",
+ dev_info(adev->dev, "amdgpu: forcing compute pipe policy %d\n",
amdgpu_compute_multipipe);
return amdgpu_compute_multipipe == 1;
}
@@ -674,7 +675,7 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id)
* generation exposes more than 64 queues. If so, the
* definition of queue_mask needs updating */
if (WARN_ON(i > (sizeof(queue_mask)*8))) {
- DRM_ERROR("Invalid KCQ enabled: %d\n", i);
+ dev_err(adev->dev, "Invalid KCQ enabled: %d\n", i);
break;
}
@@ -683,15 +684,15 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id)
amdgpu_device_flush_hdp(adev, NULL);
- DRM_INFO("kiq ring mec %d pipe %d q %d\n", kiq_ring->me, kiq_ring->pipe,
- kiq_ring->queue);
+ dev_info(adev->dev, "kiq ring mec %d pipe %d q %d\n", kiq_ring->me,
+ kiq_ring->pipe, kiq_ring->queue);
spin_lock(&kiq->ring_lock);
r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
adev->gfx.num_compute_rings +
kiq->pmf->set_resources_size);
if (r) {
- DRM_ERROR("Failed to lock KIQ (%d).\n", r);
+ dev_err(adev->dev, "Failed to lock KIQ (%d).\n", r);
spin_unlock(&kiq->ring_lock);
return r;
}
@@ -712,7 +713,7 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id)
r = amdgpu_ring_test_helper(kiq_ring);
spin_unlock(&kiq->ring_lock);
if (r)
- DRM_ERROR("KCQ enable failed\n");
+ dev_err(adev->dev, "KCQ enable failed\n");
return r;
}
@@ -734,7 +735,7 @@ int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id)
r = amdgpu_mes_map_legacy_queue(adev,
&adev->gfx.gfx_ring[j]);
if (r) {
- DRM_ERROR("failed to map gfx queue\n");
+ dev_err(adev->dev, "failed to map gfx queue\n");
return r;
}
}
@@ -748,7 +749,7 @@ int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id)
r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
adev->gfx.num_gfx_rings);
if (r) {
- DRM_ERROR("Failed to lock KIQ (%d).\n", r);
+ dev_err(adev->dev, "Failed to lock KIQ (%d).\n", r);
spin_unlock(&kiq->ring_lock);
return r;
}
@@ -769,7 +770,7 @@ int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id)
r = amdgpu_ring_test_helper(kiq_ring);
spin_unlock(&kiq->ring_lock);
if (r)
- DRM_ERROR("KGQ enable failed\n");
+ dev_err(adev->dev, "KGQ enable failed\n");
return r;
}
@@ -1030,7 +1031,7 @@ int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev,
ih_data.head = *ras_if;
- DRM_ERROR("CP ECC ERROR IRQ\n");
+ dev_err(adev->dev, "CP ECC ERROR IRQ\n");
amdgpu_ras_interrupt_dispatch(adev, &ih_data);
return 0;
}
@@ -1102,6 +1103,9 @@ uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg, uint32_t xcc_
might_sleep();
while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
+ if (amdgpu_in_reset(adev))
+ goto failed_kiq_read;
+
msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
}
@@ -1171,6 +1175,8 @@ void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint3
might_sleep();
while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
+ if (amdgpu_in_reset(adev))
+ goto failed_kiq_write;
msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
@@ -1189,6 +1195,75 @@ failed_kiq_write:
dev_err(adev->dev, "failed to write reg:%x\n", reg);
}
+int amdgpu_kiq_hdp_flush(struct amdgpu_device *adev)
+{
+ signed long r, cnt = 0;
+ unsigned long flags;
+ uint32_t seq;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
+ struct amdgpu_ring *ring = &kiq->ring;
+
+ if (amdgpu_device_skip_hw_access(adev))
+ return 0;
+
+ if (adev->enable_mes_kiq && adev->mes.ring[0].sched.ready)
+ return amdgpu_mes_hdp_flush(adev);
+
+ if (!ring->funcs->emit_hdp_flush) {
+ return -EOPNOTSUPP;
+ }
+
+ spin_lock_irqsave(&kiq->ring_lock, flags);
+ r = amdgpu_ring_alloc(ring, 32);
+ if (r)
+ goto failed_unlock;
+
+ amdgpu_ring_emit_hdp_flush(ring);
+ r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
+ if (r)
+ goto failed_undo;
+
+ amdgpu_ring_commit(ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+
+ /* don't wait anymore for gpu reset case because this way may
+ * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
+ * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
+ * never return if we keep waiting in virt_kiq_rreg, which cause
+ * gpu_recover() hang there.
+ *
+ * also don't wait anymore for IRQ context
+ * */
+ if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt()))
+ goto failed_kiq_hdp_flush;
+
+ might_sleep();
+ while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
+ if (amdgpu_in_reset(adev))
+ goto failed_kiq_hdp_flush;
+
+ msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+ }
+
+ if (cnt > MAX_KIQ_REG_TRY) {
+ dev_err(adev->dev, "failed to flush HDP via KIQ timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+
+failed_undo:
+ amdgpu_ring_undo(ring);
+failed_unlock:
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+failed_kiq_hdp_flush:
+ dev_err(adev->dev, "failed to flush HDP via KIQ\n");
+ return r < 0 ? r : -EIO;
+}
+
int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev)
{
if (amdgpu_num_kcq == -1) {
@@ -1474,7 +1549,8 @@ static int amdgpu_gfx_run_cleaner_shader_job(struct amdgpu_ring *ring)
owner = (void *)(unsigned long)atomic_inc_return(&counter);
r = amdgpu_job_alloc_with_ib(ring->adev, &entity, owner,
- 64, 0, &job);
+ 64, 0, &job,
+ AMDGPU_KERNEL_JOB_ID_CLEANER_SHADER);
if (r)
goto err;
@@ -1594,7 +1670,6 @@ static ssize_t amdgpu_gfx_set_run_cleaner_shader(struct device *dev,
ret = amdgpu_gfx_run_cleaner_shader(adev, value);
- pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
if (ret)
@@ -2279,7 +2354,7 @@ void amdgpu_gfx_profile_ring_end_use(struct amdgpu_ring *ring)
* Return:
* return the latest index.
*/
-u32 amdgpu_gfx_csb_preamble_start(volatile u32 *buffer)
+u32 amdgpu_gfx_csb_preamble_start(u32 *buffer)
{
u32 count = 0;
@@ -2303,7 +2378,7 @@ u32 amdgpu_gfx_csb_preamble_start(volatile u32 *buffer)
* Return:
* return the latest index.
*/
-u32 amdgpu_gfx_csb_data_parser(struct amdgpu_device *adev, volatile u32 *buffer, u32 count)
+u32 amdgpu_gfx_csb_data_parser(struct amdgpu_device *adev, u32 *buffer, u32 count)
{
const struct cs_section_def *sect = NULL;
const struct cs_extent_def *ext = NULL;
@@ -2330,7 +2405,7 @@ u32 amdgpu_gfx_csb_data_parser(struct amdgpu_device *adev, volatile u32 *buffer,
* @buffer: This is an output variable that gets the PACKET3 preamble end.
* @count: Index to start set the preemble end.
*/
-void amdgpu_gfx_csb_preamble_end(volatile u32 *buffer, u32 count)
+void amdgpu_gfx_csb_preamble_end(u32 *buffer, u32 count)
{
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
@@ -2479,3 +2554,4 @@ void amdgpu_debugfs_compute_sched_mask_init(struct amdgpu_device *adev)
&amdgpu_debugfs_compute_sched_mask_fops);
#endif
}
+
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index 08f268dab8f5..efd61a1ccc66 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -615,6 +615,7 @@ int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry);
uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg, uint32_t xcc_id);
void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t xcc_id);
+int amdgpu_kiq_hdp_flush(struct amdgpu_device *adev);
int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev);
void amdgpu_gfx_cp_init_microcode(struct amdgpu_device *adev, uint32_t ucode_id);
@@ -642,9 +643,9 @@ void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring);
void amdgpu_gfx_profile_idle_work_handler(struct work_struct *work);
void amdgpu_gfx_profile_ring_begin_use(struct amdgpu_ring *ring);
void amdgpu_gfx_profile_ring_end_use(struct amdgpu_ring *ring);
-u32 amdgpu_gfx_csb_preamble_start(volatile u32 *buffer);
-u32 amdgpu_gfx_csb_data_parser(struct amdgpu_device *adev, volatile u32 *buffer, u32 count);
-void amdgpu_gfx_csb_preamble_end(volatile u32 *buffer, u32 count);
+u32 amdgpu_gfx_csb_preamble_start(u32 *buffer);
+u32 amdgpu_gfx_csb_data_parser(struct amdgpu_device *adev, u32 *buffer, u32 count);
+void amdgpu_gfx_csb_preamble_end(u32 *buffer, u32 count);
void amdgpu_debugfs_gfx_sched_mask_init(struct amdgpu_device *adev);
void amdgpu_debugfs_compute_sched_mask_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 6b0fbbb91e57..869bceb0fe2c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -38,6 +38,13 @@
#include <drm/drm_drv.h>
#include <drm/ttm/ttm_tt.h>
+static const u64 four_gb = 0x100000000ULL;
+
+bool amdgpu_gmc_is_pdb0_enabled(struct amdgpu_device *adev)
+{
+ return adev->gmc.xgmi.connected_to_cpu || amdgpu_virt_xgmi_migrate_enabled(adev);
+}
+
/**
* amdgpu_gmc_pdb0_alloc - allocate vram for pdb0
*
@@ -251,10 +258,20 @@ void amdgpu_gmc_sysvm_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc
u64 hive_vram_end = mc->xgmi.node_segment_size * mc->xgmi.num_physical_nodes - 1;
mc->vram_start = mc->xgmi.node_segment_size * mc->xgmi.physical_node_id;
mc->vram_end = mc->vram_start + mc->xgmi.node_segment_size - 1;
- mc->gart_start = hive_vram_end + 1;
+ /* node_segment_size may not 4GB aligned on SRIOV, align up is needed. */
+ mc->gart_start = ALIGN(hive_vram_end + 1, four_gb);
mc->gart_end = mc->gart_start + mc->gart_size - 1;
- mc->fb_start = hive_vram_start;
- mc->fb_end = hive_vram_end;
+ if (amdgpu_virt_xgmi_migrate_enabled(adev)) {
+ /* set mc->vram_start to 0 to switch the returned GPU address of
+ * amdgpu_bo_create_reserved() from FB aperture to GART aperture.
+ */
+ mc->vram_start = 0;
+ mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
+ mc->visible_vram_size = min(mc->visible_vram_size, mc->real_vram_size);
+ } else {
+ mc->fb_start = hive_vram_start;
+ mc->fb_end = hive_vram_end;
+ }
dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
mc->mc_vram_size >> 20, mc->vram_start,
mc->vram_end, mc->real_vram_size >> 20);
@@ -276,7 +293,6 @@ void amdgpu_gmc_sysvm_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc
void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc,
enum amdgpu_gart_placement gart_placement)
{
- const uint64_t four_gb = 0x100000000ULL;
u64 size_af, size_bf;
/*To avoid the hole, limit the max mc address to AMDGPU_GMC_HOLE_START*/
u64 max_mc_address = min(adev->gmc.mc_mask, AMDGPU_GMC_HOLE_START - 1);
@@ -581,6 +597,9 @@ int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev)
/* reserve engine 5 for firmware */
if (adev->enable_mes)
vm_inv_engs[i] &= ~(1 << 5);
+ /* reserve engine 6 for uni mes */
+ if (adev->enable_uni_mes)
+ vm_inv_engs[i] &= ~(1 << 6);
/* reserve mmhub engine 3 for firmware */
if (adev->enable_umsch_mm)
vm_inv_engs[i] &= ~(1 << 3);
@@ -674,7 +693,7 @@ void amdgpu_gmc_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
r = amdgpu_job_alloc_with_ib(ring->adev, &adev->mman.high_pr,
AMDGPU_FENCE_OWNER_UNDEFINED,
16 * 4, AMDGPU_IB_POOL_IMMEDIATE,
- &job);
+ &job, AMDGPU_KERNEL_JOB_ID_FLUSH_GPU_TLB);
if (r)
goto error_alloc;
@@ -1041,9 +1060,7 @@ void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev)
*/
u64 vram_size = adev->gmc.xgmi.node_segment_size * adev->gmc.xgmi.num_physical_nodes;
u64 pde0_page_size = (1ULL<<adev->gmc.vmid0_page_table_block_size)<<21;
- u64 vram_addr = adev->vm_manager.vram_base_offset -
- adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
- u64 vram_end = vram_addr + vram_size;
+ u64 vram_addr, vram_end;
u64 gart_ptb_gpu_pa = amdgpu_gmc_vram_pa(adev, adev->gart.bo);
int idx;
@@ -1056,6 +1073,11 @@ void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev)
flags |= AMDGPU_PTE_FRAG((adev->gmc.vmid0_page_table_block_size + 9*1));
flags |= AMDGPU_PDE_PTE_FLAG(adev);
+ vram_addr = adev->vm_manager.vram_base_offset;
+ if (!amdgpu_virt_xgmi_migrate_enabled(adev))
+ vram_addr -= adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
+ vram_end = vram_addr + vram_size;
+
/* The first n PDE0 entries are used as PTE,
* pointing to vram
*/
@@ -1429,3 +1451,232 @@ bool amdgpu_gmc_need_reset_on_init(struct amdgpu_device *adev)
return false;
}
+
+enum amdgpu_memory_partition
+amdgpu_gmc_get_vf_memory_partition(struct amdgpu_device *adev)
+{
+ switch (adev->gmc.num_mem_partitions) {
+ case 0:
+ return UNKNOWN_MEMORY_PARTITION_MODE;
+ case 1:
+ return AMDGPU_NPS1_PARTITION_MODE;
+ case 2:
+ return AMDGPU_NPS2_PARTITION_MODE;
+ case 4:
+ return AMDGPU_NPS4_PARTITION_MODE;
+ case 8:
+ return AMDGPU_NPS8_PARTITION_MODE;
+ default:
+ return AMDGPU_NPS1_PARTITION_MODE;
+ }
+}
+
+enum amdgpu_memory_partition
+amdgpu_gmc_get_memory_partition(struct amdgpu_device *adev, u32 *supp_modes)
+{
+ enum amdgpu_memory_partition mode = UNKNOWN_MEMORY_PARTITION_MODE;
+
+ if (adev->nbio.funcs &&
+ adev->nbio.funcs->get_memory_partition_mode)
+ mode = adev->nbio.funcs->get_memory_partition_mode(adev,
+ supp_modes);
+ else
+ dev_warn(adev->dev, "memory partition mode query is not supported\n");
+
+ return mode;
+}
+
+enum amdgpu_memory_partition
+amdgpu_gmc_query_memory_partition(struct amdgpu_device *adev)
+{
+ if (amdgpu_sriov_vf(adev))
+ return amdgpu_gmc_get_vf_memory_partition(adev);
+ else
+ return amdgpu_gmc_get_memory_partition(adev, NULL);
+}
+
+static bool amdgpu_gmc_validate_partition_info(struct amdgpu_device *adev)
+{
+ enum amdgpu_memory_partition mode;
+ u32 supp_modes;
+ bool valid;
+
+ mode = amdgpu_gmc_get_memory_partition(adev, &supp_modes);
+
+ /* Mode detected by hardware not present in supported modes */
+ if ((mode != UNKNOWN_MEMORY_PARTITION_MODE) &&
+ !(BIT(mode - 1) & supp_modes))
+ return false;
+
+ switch (mode) {
+ case UNKNOWN_MEMORY_PARTITION_MODE:
+ case AMDGPU_NPS1_PARTITION_MODE:
+ valid = (adev->gmc.num_mem_partitions == 1);
+ break;
+ case AMDGPU_NPS2_PARTITION_MODE:
+ valid = (adev->gmc.num_mem_partitions == 2);
+ break;
+ case AMDGPU_NPS4_PARTITION_MODE:
+ valid = (adev->gmc.num_mem_partitions == 3 ||
+ adev->gmc.num_mem_partitions == 4);
+ break;
+ case AMDGPU_NPS8_PARTITION_MODE:
+ valid = (adev->gmc.num_mem_partitions == 8);
+ break;
+ default:
+ valid = false;
+ }
+
+ return valid;
+}
+
+static bool amdgpu_gmc_is_node_present(int *node_ids, int num_ids, int nid)
+{
+ int i;
+
+ /* Check if node with id 'nid' is present in 'node_ids' array */
+ for (i = 0; i < num_ids; ++i)
+ if (node_ids[i] == nid)
+ return true;
+
+ return false;
+}
+
+static void
+amdgpu_gmc_init_acpi_mem_ranges(struct amdgpu_device *adev,
+ struct amdgpu_mem_partition_info *mem_ranges)
+{
+ struct amdgpu_numa_info numa_info;
+ int node_ids[AMDGPU_MAX_MEM_RANGES];
+ int num_ranges = 0, ret;
+ int num_xcc, xcc_id;
+ uint32_t xcc_mask;
+
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+ xcc_mask = (1U << num_xcc) - 1;
+
+ for_each_inst(xcc_id, xcc_mask) {
+ ret = amdgpu_acpi_get_mem_info(adev, xcc_id, &numa_info);
+ if (ret)
+ continue;
+
+ if (numa_info.nid == NUMA_NO_NODE) {
+ mem_ranges[0].size = numa_info.size;
+ mem_ranges[0].numa.node = numa_info.nid;
+ num_ranges = 1;
+ break;
+ }
+
+ if (amdgpu_gmc_is_node_present(node_ids, num_ranges,
+ numa_info.nid))
+ continue;
+
+ node_ids[num_ranges] = numa_info.nid;
+ mem_ranges[num_ranges].numa.node = numa_info.nid;
+ mem_ranges[num_ranges].size = numa_info.size;
+ ++num_ranges;
+ }
+
+ adev->gmc.num_mem_partitions = num_ranges;
+}
+
+void amdgpu_gmc_init_sw_mem_ranges(struct amdgpu_device *adev,
+ struct amdgpu_mem_partition_info *mem_ranges)
+{
+ enum amdgpu_memory_partition mode;
+ u32 start_addr = 0, size;
+ int i, r, l;
+
+ mode = amdgpu_gmc_query_memory_partition(adev);
+
+ switch (mode) {
+ case UNKNOWN_MEMORY_PARTITION_MODE:
+ adev->gmc.num_mem_partitions = 0;
+ break;
+ case AMDGPU_NPS1_PARTITION_MODE:
+ adev->gmc.num_mem_partitions = 1;
+ break;
+ case AMDGPU_NPS2_PARTITION_MODE:
+ adev->gmc.num_mem_partitions = 2;
+ break;
+ case AMDGPU_NPS4_PARTITION_MODE:
+ if (adev->flags & AMD_IS_APU)
+ adev->gmc.num_mem_partitions = 3;
+ else
+ adev->gmc.num_mem_partitions = 4;
+ break;
+ case AMDGPU_NPS8_PARTITION_MODE:
+ adev->gmc.num_mem_partitions = 8;
+ break;
+ default:
+ adev->gmc.num_mem_partitions = 1;
+ break;
+ }
+
+ /* Use NPS range info, if populated */
+ r = amdgpu_gmc_get_nps_memranges(adev, mem_ranges,
+ &adev->gmc.num_mem_partitions);
+ if (!r) {
+ l = 0;
+ for (i = 1; i < adev->gmc.num_mem_partitions; ++i) {
+ if (mem_ranges[i].range.lpfn >
+ mem_ranges[i - 1].range.lpfn)
+ l = i;
+ }
+
+ } else {
+ if (!adev->gmc.num_mem_partitions) {
+ dev_warn(adev->dev,
+ "Not able to detect NPS mode, fall back to NPS1\n");
+ adev->gmc.num_mem_partitions = 1;
+ }
+ /* Fallback to sw based calculation */
+ size = (adev->gmc.real_vram_size + SZ_16M) >> AMDGPU_GPU_PAGE_SHIFT;
+ size /= adev->gmc.num_mem_partitions;
+
+ for (i = 0; i < adev->gmc.num_mem_partitions; ++i) {
+ mem_ranges[i].range.fpfn = start_addr;
+ mem_ranges[i].size =
+ ((u64)size << AMDGPU_GPU_PAGE_SHIFT);
+ mem_ranges[i].range.lpfn = start_addr + size - 1;
+ start_addr += size;
+ }
+
+ l = adev->gmc.num_mem_partitions - 1;
+ }
+
+ /* Adjust the last one */
+ mem_ranges[l].range.lpfn =
+ (adev->gmc.real_vram_size >> AMDGPU_GPU_PAGE_SHIFT) - 1;
+ mem_ranges[l].size =
+ adev->gmc.real_vram_size -
+ ((u64)mem_ranges[l].range.fpfn << AMDGPU_GPU_PAGE_SHIFT);
+}
+
+int amdgpu_gmc_init_mem_ranges(struct amdgpu_device *adev)
+{
+ bool valid;
+
+ adev->gmc.mem_partitions = kcalloc(AMDGPU_MAX_MEM_RANGES,
+ sizeof(struct amdgpu_mem_partition_info),
+ GFP_KERNEL);
+ if (!adev->gmc.mem_partitions)
+ return -ENOMEM;
+
+ if (adev->gmc.is_app_apu)
+ amdgpu_gmc_init_acpi_mem_ranges(adev, adev->gmc.mem_partitions);
+ else
+ amdgpu_gmc_init_sw_mem_ranges(adev, adev->gmc.mem_partitions);
+
+ if (amdgpu_sriov_vf(adev))
+ valid = true;
+ else
+ valid = amdgpu_gmc_validate_partition_info(adev);
+ if (!valid) {
+ /* TODO: handle invalid case */
+ dev_warn(adev->dev,
+ "Mem ranges not matching with hardware config\n");
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index 80fa29c26e9e..727342689d4b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -84,6 +84,13 @@ enum amdgpu_memory_partition {
#define AMDGPU_GMC_INIT_RESET_NPS BIT(0)
+#define AMDGPU_MAX_MEM_RANGES 8
+
+#define AMDGPU_GMC9_FAULT_SOURCE_DATA_RETRY 0x80
+#define AMDGPU_GMC9_FAULT_SOURCE_DATA_READ 0x40
+#define AMDGPU_GMC9_FAULT_SOURCE_DATA_WRITE 0x20
+#define AMDGPU_GMC9_FAULT_SOURCE_DATA_EXE 0x10
+
/*
* GMC page fault information
*/
@@ -152,15 +159,15 @@ struct amdgpu_gmc_funcs {
unsigned pasid);
/* enable/disable PRT support */
void (*set_prt)(struct amdgpu_device *adev, bool enable);
- /* map mtype to hardware flags */
- uint64_t (*map_mtype)(struct amdgpu_device *adev, uint32_t flags);
/* get the pde for a given mc addr */
void (*get_vm_pde)(struct amdgpu_device *adev, int level,
u64 *dst, u64 *flags);
- /* get the pte flags to use for a BO VA mapping */
+ /* get the pte flags to use for PTEs */
void (*get_vm_pte)(struct amdgpu_device *adev,
- struct amdgpu_bo_va_mapping *mapping,
- uint64_t *flags);
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo,
+ uint32_t vm_flags,
+ uint64_t *pte_flags);
/* override per-page pte flags */
void (*override_vm_pte_flags)(struct amdgpu_device *dev,
struct amdgpu_vm *vm,
@@ -354,9 +361,10 @@ struct amdgpu_gmc {
#define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr))
#define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid))
-#define amdgpu_gmc_map_mtype(adev, flags) (adev)->gmc.gmc_funcs->map_mtype((adev),(flags))
#define amdgpu_gmc_get_vm_pde(adev, level, dst, flags) (adev)->gmc.gmc_funcs->get_vm_pde((adev), (level), (dst), (flags))
-#define amdgpu_gmc_get_vm_pte(adev, mapping, flags) (adev)->gmc.gmc_funcs->get_vm_pte((adev), (mapping), (flags))
+#define amdgpu_gmc_get_vm_pte(adev, vm, bo, vm_flags, pte_flags) \
+ ((adev)->gmc.gmc_funcs->get_vm_pte((adev), (vm), (bo), (vm_flags), \
+ (pte_flags)))
#define amdgpu_gmc_override_vm_pte_flags(adev, vm, addr, pte_flags) \
(adev)->gmc.gmc_funcs->override_vm_pte_flags \
((adev), (vm), (addr), (pte_flags))
@@ -394,6 +402,7 @@ static inline uint64_t amdgpu_gmc_sign_extend(uint64_t addr)
return addr;
}
+bool amdgpu_gmc_is_pdb0_enabled(struct amdgpu_device *adev);
int amdgpu_gmc_pdb0_alloc(struct amdgpu_device *adev);
void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level,
uint64_t *addr, uint64_t *flags);
@@ -455,5 +464,13 @@ int amdgpu_gmc_request_memory_partition(struct amdgpu_device *adev,
int nps_mode);
void amdgpu_gmc_prepare_nps_mode_change(struct amdgpu_device *adev);
bool amdgpu_gmc_need_reset_on_init(struct amdgpu_device *adev);
-
+enum amdgpu_memory_partition
+amdgpu_gmc_get_vf_memory_partition(struct amdgpu_device *adev);
+enum amdgpu_memory_partition
+amdgpu_gmc_get_memory_partition(struct amdgpu_device *adev, u32 *supp_modes);
+enum amdgpu_memory_partition
+amdgpu_gmc_query_memory_partition(struct amdgpu_device *adev);
+int amdgpu_gmc_init_mem_ranges(struct amdgpu_device *adev);
+void amdgpu_gmc_init_sw_mem_ranges(struct amdgpu_device *adev,
+ struct amdgpu_mem_partition_info *mem_ranges);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index 0760e70402ec..895c1e4c6747 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -284,6 +284,7 @@ int amdgpu_gtt_mgr_init(struct amdgpu_device *adev, uint64_t gtt_size)
ttm_resource_manager_init(man, &adev->mman.bdev, gtt_size);
start = AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS;
+ start += amdgpu_vce_required_gart_pages(adev);
size = (adev->gmc.gart_size >> PAGE_SHIFT) - start;
drm_mm_init(&mgr->mm, start, size);
spin_lock_init(&mgr->lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c
index 6e02fb9ac2f6..5a60d69a3e1f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c
@@ -66,3 +66,19 @@ void amdgpu_hdp_generic_flush(struct amdgpu_device *adev,
0);
}
}
+
+void amdgpu_hdp_invalidate(struct amdgpu_device *adev, struct amdgpu_ring *ring)
+{
+ if (adev->asic_funcs && adev->asic_funcs->invalidate_hdp)
+ adev->asic_funcs->invalidate_hdp(adev, ring);
+ else if (adev->hdp.funcs && adev->hdp.funcs->invalidate_hdp)
+ adev->hdp.funcs->invalidate_hdp(adev, ring);
+}
+
+void amdgpu_hdp_flush(struct amdgpu_device *adev, struct amdgpu_ring *ring)
+{
+ if (adev->asic_funcs && adev->asic_funcs->flush_hdp)
+ adev->asic_funcs->flush_hdp(adev, ring);
+ else if (adev->hdp.funcs && adev->hdp.funcs->flush_hdp)
+ adev->hdp.funcs->flush_hdp(adev, ring);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h
index 4cfd932b7e91..d9f488fa76b9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h
@@ -46,4 +46,8 @@ struct amdgpu_hdp {
int amdgpu_hdp_ras_sw_init(struct amdgpu_device *adev);
void amdgpu_hdp_generic_flush(struct amdgpu_device *adev,
struct amdgpu_ring *ring);
+void amdgpu_hdp_invalidate(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring);
+void amdgpu_hdp_flush(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring);
#endif /* __AMDGPU_HDP_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
index e36fede7f74c..90d26d820bac 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
@@ -167,19 +167,14 @@ void amdgpu_hmm_unregister(struct amdgpu_bo *bo)
int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier,
uint64_t start, uint64_t npages, bool readonly,
- void *owner, struct page **pages,
- struct hmm_range **phmm_range)
+ void *owner,
+ struct amdgpu_hmm_range *range)
{
- struct hmm_range *hmm_range;
unsigned long end;
unsigned long timeout;
- unsigned long i;
unsigned long *pfns;
int r = 0;
-
- hmm_range = kzalloc(sizeof(*hmm_range), GFP_KERNEL);
- if (unlikely(!hmm_range))
- return -ENOMEM;
+ struct hmm_range *hmm_range = &range->hmm_range;
pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL);
if (unlikely(!pfns)) {
@@ -222,36 +217,77 @@ retry:
hmm_range->start = start;
hmm_range->hmm_pfns = pfns;
- /*
- * Due to default_flags, all pages are HMM_PFN_VALID or
- * hmm_range_fault() fails. FIXME: The pages cannot be touched outside
- * the notifier_lock, and mmu_interval_read_retry() must be done first.
- */
- for (i = 0; pages && i < npages; i++)
- pages[i] = hmm_pfn_to_page(pfns[i]);
-
- *phmm_range = hmm_range;
-
return 0;
out_free_pfns:
kvfree(pfns);
+ hmm_range->hmm_pfns = NULL;
out_free_range:
- kfree(hmm_range);
-
if (r == -EBUSY)
r = -EAGAIN;
return r;
}
-bool amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range)
+/**
+ * amdgpu_hmm_range_valid - check if an HMM range is still valid
+ * @range: pointer to the &struct amdgpu_hmm_range to validate
+ *
+ * Determines whether the given HMM range @range is still valid by
+ * checking for invalidations via the MMU notifier sequence. This is
+ * typically used to verify that the range has not been invalidated
+ * by concurrent address space updates before it is accessed.
+ *
+ * Return:
+ * * true if @range is valid and can be used safely
+ * * false if @range is NULL or has been invalidated
+ */
+bool amdgpu_hmm_range_valid(struct amdgpu_hmm_range *range)
{
- bool r;
+ if (!range)
+ return false;
- r = mmu_interval_read_retry(hmm_range->notifier,
- hmm_range->notifier_seq);
- kvfree(hmm_range->hmm_pfns);
- kfree(hmm_range);
+ return !mmu_interval_read_retry(range->hmm_range.notifier,
+ range->hmm_range.notifier_seq);
+}
- return r;
+/**
+ * amdgpu_hmm_range_alloc - allocate and initialize an AMDGPU HMM range
+ * @bo: optional buffer object to associate with this HMM range
+ *
+ * Allocates memory for amdgpu_hmm_range and associates it with the @bo passed.
+ * The reference count of the @bo is incremented.
+ *
+ * Return:
+ * Pointer to a newly allocated struct amdgpu_hmm_range on success,
+ * or NULL if memory allocation fails.
+ */
+struct amdgpu_hmm_range *amdgpu_hmm_range_alloc(struct amdgpu_bo *bo)
+{
+ struct amdgpu_hmm_range *range;
+
+ range = kzalloc(sizeof(*range), GFP_KERNEL);
+ if (!range)
+ return NULL;
+
+ range->bo = amdgpu_bo_ref(bo);
+ return range;
+}
+
+/**
+ * amdgpu_hmm_range_free - release an AMDGPU HMM range
+ * @range: pointer to the range object to free
+ *
+ * Releases all resources held by @range, including the associated
+ * hmm_pfns and the dropping reference of associated bo if any.
+ *
+ * Return: void
+ */
+void amdgpu_hmm_range_free(struct amdgpu_hmm_range *range)
+{
+ if (!range)
+ return;
+
+ kvfree(range->hmm_range.hmm_pfns);
+ amdgpu_bo_unref(&range->bo);
+ kfree(range);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h
index e2edcd010ccc..140bc9cd57b4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h
@@ -31,13 +31,20 @@
#include <linux/interval_tree.h>
#include <linux/mmu_notifier.h>
+struct amdgpu_hmm_range {
+ struct hmm_range hmm_range;
+ struct amdgpu_bo *bo;
+};
+
int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier,
uint64_t start, uint64_t npages, bool readonly,
- void *owner, struct page **pages,
- struct hmm_range **phmm_range);
-bool amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range);
+ void *owner,
+ struct amdgpu_hmm_range *range);
#if defined(CONFIG_HMM_MIRROR)
+bool amdgpu_hmm_range_valid(struct amdgpu_hmm_range *range);
+struct amdgpu_hmm_range *amdgpu_hmm_range_alloc(struct amdgpu_bo *bo);
+void amdgpu_hmm_range_free(struct amdgpu_hmm_range *range);
int amdgpu_hmm_register(struct amdgpu_bo *bo, unsigned long addr);
void amdgpu_hmm_unregister(struct amdgpu_bo *bo);
#else
@@ -47,7 +54,20 @@ static inline int amdgpu_hmm_register(struct amdgpu_bo *bo, unsigned long addr)
"add CONFIG_ZONE_DEVICE=y in config file to fix this\n");
return -ENODEV;
}
+
static inline void amdgpu_hmm_unregister(struct amdgpu_bo *bo) {}
+
+static inline bool amdgpu_hmm_range_valid(struct amdgpu_hmm_range *range)
+{
+ return false;
+}
+
+static inline struct amdgpu_hmm_range *amdgpu_hmm_range_alloc(struct amdgpu_bo *bo)
+{
+ return NULL;
+}
+
+static inline void amdgpu_hmm_range_free(struct amdgpu_hmm_range *range) {}
#endif
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
index 8179d0814db9..9cb72f0c5277 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
@@ -24,7 +24,6 @@
* Alex Deucher
*/
-#include <linux/export.h>
#include <linux/pci.h>
#include <drm/drm_edid.h>
@@ -185,7 +184,7 @@ struct amdgpu_i2c_chan *amdgpu_i2c_create(struct drm_device *dev,
snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
"AMDGPU i2c hw bus %s", name);
i2c->adapter.algo = &amdgpu_atombios_i2c_algo;
- ret = i2c_add_adapter(&i2c->adapter);
+ ret = devm_i2c_add_adapter(dev->dev, &i2c->adapter);
if (ret)
goto out_free;
} else {
@@ -216,15 +215,6 @@ out_free:
}
-void amdgpu_i2c_destroy(struct amdgpu_i2c_chan *i2c)
-{
- if (!i2c)
- return;
- WARN_ON(i2c->has_aux);
- i2c_del_adapter(&i2c->adapter);
- kfree(i2c);
-}
-
void amdgpu_i2c_init(struct amdgpu_device *adev)
{
if (!adev->is_atom_fw) {
@@ -249,12 +239,9 @@ void amdgpu_i2c_fini(struct amdgpu_device *adev)
{
int i;
- for (i = 0; i < AMDGPU_MAX_I2C_BUS; i++) {
- if (adev->i2c_bus[i]) {
- amdgpu_i2c_destroy(adev->i2c_bus[i]);
+ for (i = 0; i < AMDGPU_MAX_I2C_BUS; i++)
+ if (adev->i2c_bus[i])
adev->i2c_bus[i] = NULL;
- }
- }
}
/* looks up bus based on id */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 802743efa3b3..586a58facca1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -128,6 +128,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib *ib = &ibs[0];
struct dma_fence *tmp = NULL;
+ struct amdgpu_fence *af;
bool need_ctx_switch;
struct amdgpu_vm *vm;
uint64_t fence_ctx;
@@ -138,7 +139,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
int vmid = AMDGPU_JOB_GET_VMID(job);
bool need_pipe_sync = false;
unsigned int cond_exec;
-
unsigned int i;
int r = 0;
@@ -149,11 +149,19 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
if (job) {
vm = job->vm;
fence_ctx = job->base.s_fence ?
- job->base.s_fence->scheduled.context : 0;
+ job->base.s_fence->finished.context : 0;
shadow_va = job->shadow_va;
csa_va = job->csa_va;
gds_va = job->gds_va;
init_shadow = job->init_shadow;
+ af = job->hw_fence;
+ /* Save the context of the job for reset handling.
+ * The driver needs this so it can skip the ring
+ * contents for guilty contexts.
+ */
+ af->context = fence_ctx;
+ /* the vm fence is also part of the job's context */
+ job->hw_vm_fence->context = fence_ctx;
} else {
vm = NULL;
fence_ctx = 0;
@@ -161,22 +169,28 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
csa_va = 0;
gds_va = 0;
init_shadow = false;
+ af = kzalloc(sizeof(*af), GFP_ATOMIC);
+ if (!af)
+ return -ENOMEM;
}
if (!ring->sched.ready) {
dev_err(adev->dev, "couldn't schedule ib on ring <%s>\n", ring->name);
- return -EINVAL;
+ r = -EINVAL;
+ goto free_fence;
}
if (vm && !job->vmid) {
dev_err(adev->dev, "VM IB without ID\n");
- return -EINVAL;
+ r = -EINVAL;
+ goto free_fence;
}
if ((ib->flags & AMDGPU_IB_FLAGS_SECURE) &&
(!ring->funcs->secure_submission_supported)) {
dev_err(adev->dev, "secure submissions not supported on ring <%s>\n", ring->name);
- return -EINVAL;
+ r = -EINVAL;
+ goto free_fence;
}
alloc_size = ring->funcs->emit_frame_size + num_ibs *
@@ -185,7 +199,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
r = amdgpu_ring_alloc(ring, alloc_size);
if (r) {
dev_err(adev->dev, "scheduling IB failed (%d).\n", r);
- return r;
+ goto free_fence;
}
need_ctx_switch = ring->current_ctx != fence_ctx;
@@ -282,7 +296,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
amdgpu_ring_init_cond_exec(ring, ring->cond_exe_gpu_addr);
}
- r = amdgpu_fence_emit(ring, f, job, fence_flags);
+ r = amdgpu_fence_emit(ring, af, fence_flags);
if (r) {
dev_err(adev->dev, "failed to emit fence (%d)\n", r);
if (job && job->vmid)
@@ -290,6 +304,10 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
amdgpu_ring_undo(ring);
return r;
}
+ *f = &af->base;
+ /* get a ref for the job */
+ if (job)
+ dma_fence_get(*f);
if (ring->funcs->insert_end)
ring->funcs->insert_end(ring);
@@ -304,9 +322,23 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
ring->hw_prio == AMDGPU_GFX_PIPE_PRIO_HIGH)
ring->funcs->emit_wave_limit(ring, false);
+ /* Save the wptr associated with this fence.
+ * This must be last for resets to work properly
+ * as we need to save the wptr associated with this
+ * fence so we know what rings contents to backup
+ * after we reset the queue.
+ */
+ amdgpu_fence_save_wptr(af);
+
amdgpu_ring_ib_end(ring);
amdgpu_ring_commit(ring);
+
return 0;
+
+free_fence:
+ if (!job)
+ kfree(af);
+ return r;
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index 5dd78a9cb12d..9cab36322c16 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -201,58 +201,34 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_ring *ring,
struct amdgpu_device *adev = ring->adev;
unsigned vmhub = ring->vm_hub;
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
- struct dma_fence **fences;
- unsigned i;
+ /* If anybody is waiting for a VMID let everybody wait for fairness */
if (!dma_fence_is_signaled(ring->vmid_wait)) {
*fence = dma_fence_get(ring->vmid_wait);
return 0;
}
- fences = kmalloc_array(id_mgr->num_ids, sizeof(void *), GFP_NOWAIT);
- if (!fences)
- return -ENOMEM;
-
/* Check if we have an idle VMID */
- i = 0;
- list_for_each_entry((*idle), &id_mgr->ids_lru, list) {
+ list_for_each_entry_reverse((*idle), &id_mgr->ids_lru, list) {
/* Don't use per engine and per process VMID at the same time */
struct amdgpu_ring *r = adev->vm_manager.concurrent_flush ?
NULL : ring;
- fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, r);
- if (!fences[i])
- break;
- ++i;
+ *fence = amdgpu_sync_peek_fence(&(*idle)->active, r);
+ if (!(*fence))
+ return 0;
}
- /* If we can't find a idle VMID to use, wait till one becomes available */
- if (&(*idle)->list == &id_mgr->ids_lru) {
- u64 fence_context = adev->vm_manager.fence_context + ring->idx;
- unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
- struct dma_fence_array *array;
- unsigned j;
-
- *idle = NULL;
- for (j = 0; j < i; ++j)
- dma_fence_get(fences[j]);
-
- array = dma_fence_array_create(i, fences, fence_context,
- seqno, true);
- if (!array) {
- for (j = 0; j < i; ++j)
- dma_fence_put(fences[j]);
- kfree(fences);
- return -ENOMEM;
- }
-
- *fence = dma_fence_get(&array->base);
- dma_fence_put(ring->vmid_wait);
- ring->vmid_wait = &array->base;
- return 0;
- }
- kfree(fences);
+ /*
+ * If we can't find a idle VMID to use, wait on a fence from the least
+ * recently used in the hope that it will be available soon.
+ */
+ *idle = NULL;
+ dma_fence_put(ring->vmid_wait);
+ ring->vmid_wait = dma_fence_get(*fence);
+ /* This is the reference we return */
+ dma_fence_get(*fence);
return 0;
}
@@ -275,13 +251,12 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
{
struct amdgpu_device *adev = ring->adev;
unsigned vmhub = ring->vm_hub;
- struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
uint64_t fence_context = adev->fence_context + ring->idx;
bool needs_flush = vm->use_cpu_for_update;
uint64_t updates = amdgpu_vm_tlb_seq(vm);
int r;
- *id = id_mgr->reserved;
+ *id = vm->reserved_vmid[vmhub];
if ((*id)->owner != vm->immediate.fence_context ||
!amdgpu_vmid_compatible(*id, job) ||
(*id)->flushed_updates < updates ||
@@ -314,7 +289,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
* user of the VMID.
*/
r = amdgpu_sync_fence(&(*id)->active, &job->base.s_fence->finished,
- GFP_NOWAIT);
+ GFP_ATOMIC);
if (r)
return r;
@@ -374,7 +349,7 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
*/
r = amdgpu_sync_fence(&(*id)->active,
&job->base.s_fence->finished,
- GFP_NOWAIT);
+ GFP_ATOMIC);
if (r)
return r;
@@ -427,7 +402,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
/* Remember this submission as user of the VMID */
r = amdgpu_sync_fence(&id->active,
&job->base.s_fence->finished,
- GFP_NOWAIT);
+ GFP_ATOMIC);
if (r)
goto error;
@@ -474,40 +449,61 @@ bool amdgpu_vmid_uses_reserved(struct amdgpu_vm *vm, unsigned int vmhub)
return vm->reserved_vmid[vmhub];
}
-int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
+/*
+ * amdgpu_vmid_alloc_reserved - reserve a specific VMID for this vm
+ * @adev: amdgpu device structure
+ * @vm: the VM to reserve an ID for
+ * @vmhub: the VMHUB which should be used
+ *
+ * Mostly used to have a reserved VMID for debugging and SPM.
+ *
+ * Returns: 0 for success, -ENOENT if an ID is already reserved.
+ */
+int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev, struct amdgpu_vm *vm,
unsigned vmhub)
{
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
+ struct amdgpu_vmid *id;
+ int r = 0;
mutex_lock(&id_mgr->lock);
-
- ++id_mgr->reserved_use_count;
- if (!id_mgr->reserved) {
- struct amdgpu_vmid *id;
-
- id = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vmid,
- list);
- /* Remove from normal round robin handling */
- list_del_init(&id->list);
- id_mgr->reserved = id;
+ if (vm->reserved_vmid[vmhub])
+ goto unlock;
+ if (id_mgr->reserved_vmid) {
+ r = -ENOENT;
+ goto unlock;
}
-
+ /* Remove from normal round robin handling */
+ id = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vmid, list);
+ list_del_init(&id->list);
+ vm->reserved_vmid[vmhub] = id;
+ id_mgr->reserved_vmid = true;
mutex_unlock(&id_mgr->lock);
+
return 0;
+unlock:
+ mutex_unlock(&id_mgr->lock);
+ return r;
}
-void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
+/*
+ * amdgpu_vmid_free_reserved - free up a reserved VMID again
+ * @adev: amdgpu device structure
+ * @vm: the VM with the reserved ID
+ * @vmhub: the VMHUB which should be used
+ */
+void amdgpu_vmid_free_reserved(struct amdgpu_device *adev, struct amdgpu_vm *vm,
unsigned vmhub)
{
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
mutex_lock(&id_mgr->lock);
- if (!--id_mgr->reserved_use_count) {
- /* give the reserved ID back to normal round robin */
- list_add(&id_mgr->reserved->list, &id_mgr->ids_lru);
- id_mgr->reserved = NULL;
+ if (vm->reserved_vmid[vmhub]) {
+ list_add(&vm->reserved_vmid[vmhub]->list,
+ &id_mgr->ids_lru);
+ vm->reserved_vmid[vmhub] = NULL;
+ id_mgr->reserved_vmid = false;
}
-
mutex_unlock(&id_mgr->lock);
}
@@ -574,7 +570,6 @@ void amdgpu_vmid_mgr_init(struct amdgpu_device *adev)
mutex_init(&id_mgr->lock);
INIT_LIST_HEAD(&id_mgr->ids_lru);
- id_mgr->reserved_use_count = 0;
/* for GC <10, SDMA uses MMHUB so use first_kfd_vmid for both GC and MM */
if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(10, 0, 0))
@@ -594,11 +589,6 @@ void amdgpu_vmid_mgr_init(struct amdgpu_device *adev)
list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru);
}
}
- /* alloc a default reserved vmid to enforce isolation */
- for (i = 0; i < (adev->xcp_mgr ? adev->xcp_mgr->num_xcps : 1); i++) {
- if (adev->enforce_isolation[i] != AMDGPU_ENFORCE_ISOLATION_DISABLE)
- amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(i));
- }
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
index 240fa6751260..b3649cd3af56 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
@@ -67,8 +67,7 @@ struct amdgpu_vmid_mgr {
unsigned num_ids;
struct list_head ids_lru;
struct amdgpu_vmid ids[AMDGPU_NUM_VMID];
- struct amdgpu_vmid *reserved;
- unsigned int reserved_use_count;
+ bool reserved_vmid;
};
int amdgpu_pasid_alloc(unsigned int bits);
@@ -79,10 +78,10 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv,
bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
struct amdgpu_vmid *id);
bool amdgpu_vmid_uses_reserved(struct amdgpu_vm *vm, unsigned int vmhub);
-int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
- unsigned vmhub);
-void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
- unsigned vmhub);
+int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ unsigned vmhub);
+void amdgpu_vmid_free_reserved(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ unsigned vmhub);
int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
struct amdgpu_job *job, struct dma_fence **fence);
void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
index 30f16968b578..a6419246e9c2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
@@ -218,7 +218,7 @@ int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih)
restart_ih:
count = AMDGPU_IH_MAX_NUM_IVS;
- DRM_DEBUG("%s: rptr %d, wptr %d\n", __func__, ih->rptr, wptr);
+ dev_dbg(adev->dev, "%s: rptr %d, wptr %d\n", __func__, ih->rptr, wptr);
/* Order reading of wptr vs. reading of IH ring data */
rmb();
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
index 7f7ea046e209..f58b6be7fccc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
@@ -56,14 +56,14 @@ struct amdgpu_ih_ring {
bool use_bus_addr;
struct amdgpu_bo *ring_obj;
- volatile uint32_t *ring;
+ uint32_t *ring;
uint64_t gpu_addr;
uint64_t wptr_addr;
- volatile uint32_t *wptr_cpu;
+ uint32_t *wptr_cpu;
uint64_t rptr_addr;
- volatile uint32_t *rptr_cpu;
+ uint32_t *rptr_cpu;
bool enabled;
unsigned rptr;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ip.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ip.c
new file mode 100644
index 000000000000..99e1cf4fc955
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ip.c
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu.h"
+#include "amdgpu_ip.h"
+
+static int8_t amdgpu_logical_to_dev_inst(struct amdgpu_device *adev,
+ enum amd_hw_ip_block_type block,
+ int8_t inst)
+{
+ int8_t dev_inst;
+
+ switch (block) {
+ case GC_HWIP:
+ case SDMA0_HWIP:
+ /* Both JPEG and VCN as JPEG is only alias of VCN */
+ case VCN_HWIP:
+ dev_inst = adev->ip_map.dev_inst[block][inst];
+ break;
+ default:
+ /* For rest of the IPs, no look up required.
+ * Assume 'logical instance == physical instance' for all configs. */
+ dev_inst = inst;
+ break;
+ }
+
+ return dev_inst;
+}
+
+static uint32_t amdgpu_logical_to_dev_mask(struct amdgpu_device *adev,
+ enum amd_hw_ip_block_type block,
+ uint32_t mask)
+{
+ uint32_t dev_mask = 0;
+ int8_t log_inst, dev_inst;
+
+ while (mask) {
+ log_inst = ffs(mask) - 1;
+ dev_inst = amdgpu_logical_to_dev_inst(adev, block, log_inst);
+ dev_mask |= (1 << dev_inst);
+ mask &= ~(1 << log_inst);
+ }
+
+ return dev_mask;
+}
+
+static void amdgpu_populate_ip_map(struct amdgpu_device *adev,
+ enum amd_hw_ip_block_type ip_block,
+ uint32_t inst_mask)
+{
+ int l = 0, i;
+
+ while (inst_mask) {
+ i = ffs(inst_mask) - 1;
+ adev->ip_map.dev_inst[ip_block][l++] = i;
+ inst_mask &= ~(1 << i);
+ }
+ for (; l < HWIP_MAX_INSTANCE; l++)
+ adev->ip_map.dev_inst[ip_block][l] = -1;
+}
+
+void amdgpu_ip_map_init(struct amdgpu_device *adev)
+{
+ u32 ip_map[][2] = {
+ { GC_HWIP, adev->gfx.xcc_mask },
+ { SDMA0_HWIP, adev->sdma.sdma_mask },
+ { VCN_HWIP, adev->vcn.inst_mask },
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ip_map); ++i)
+ amdgpu_populate_ip_map(adev, ip_map[i][0], ip_map[i][1]);
+
+ adev->ip_map.logical_to_dev_inst = amdgpu_logical_to_dev_inst;
+ adev->ip_map.logical_to_dev_mask = amdgpu_logical_to_dev_mask;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ip.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ip.h
new file mode 100644
index 000000000000..2490fd322aec
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ip.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_IP_H__
+#define __AMDGPU_IP_H__
+
+void amdgpu_ip_map_init(struct amdgpu_device *adev);
+
+#endif /* __AMDGPU_IP_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 13c60cac4261..8112ffc85995 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -142,8 +142,9 @@ void amdgpu_irq_disable_all(struct amdgpu_device *adev)
r = src->funcs->set(adev, src, k,
AMDGPU_IRQ_STATE_DISABLE);
if (r)
- DRM_ERROR("error disabling interrupt (%d)\n",
- r);
+ dev_err(adev->dev,
+ "error disabling interrupt (%d)\n",
+ r);
}
}
}
@@ -242,7 +243,7 @@ static bool amdgpu_msi_ok(struct amdgpu_device *adev)
return true;
}
-static void amdgpu_restore_msix(struct amdgpu_device *adev)
+void amdgpu_restore_msix(struct amdgpu_device *adev)
{
u16 ctrl;
@@ -315,7 +316,7 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
adev->irq.irq = irq;
adev_to_drm(adev)->max_vblank_count = 0x00ffffff;
- DRM_DEBUG("amdgpu: irq initialized.\n");
+ dev_dbg(adev->dev, "amdgpu: irq initialized.\n");
return 0;
free_vectors:
@@ -461,10 +462,10 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev,
src_id = entry.src_id;
if (client_id >= AMDGPU_IRQ_CLIENTID_MAX) {
- DRM_DEBUG("Invalid client_id in IV: %d\n", client_id);
+ dev_dbg(adev->dev, "Invalid client_id in IV: %d\n", client_id);
} else if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) {
- DRM_DEBUG("Invalid src_id in IV: %d\n", src_id);
+ dev_dbg(adev->dev, "Invalid src_id in IV: %d\n", src_id);
} else if (((client_id == AMDGPU_IRQ_CLIENTID_LEGACY) ||
(client_id == SOC15_IH_CLIENTID_ISP)) &&
@@ -472,18 +473,21 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev,
generic_handle_domain_irq(adev->irq.domain, src_id);
} else if (!adev->irq.client[client_id].sources) {
- DRM_DEBUG("Unregistered interrupt client_id: %d src_id: %d\n",
- client_id, src_id);
+ dev_dbg(adev->dev,
+ "Unregistered interrupt client_id: %d src_id: %d\n",
+ client_id, src_id);
} else if ((src = adev->irq.client[client_id].sources[src_id])) {
r = src->funcs->process(adev, src, &entry);
if (r < 0)
- DRM_ERROR("error processing interrupt (%d)\n", r);
+ dev_err(adev->dev, "error processing interrupt (%d)\n",
+ r);
else if (r)
handled = true;
} else {
- DRM_DEBUG("Unregistered interrupt src_id: %d of client_id:%d\n",
+ dev_dbg(adev->dev,
+ "Unregistered interrupt src_id: %d of client_id:%d\n",
src_id, client_id);
}
@@ -620,7 +624,7 @@ int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
unsigned int type)
{
/* When the threshold is reached,the interrupt source may not be enabled.return -EINVAL */
- if (amdgpu_ras_is_rma(adev))
+ if (amdgpu_ras_is_rma(adev) && !amdgpu_irq_enabled(adev, src, type))
return -EINVAL;
if (!adev->irq.installed)
@@ -732,7 +736,7 @@ int amdgpu_irq_add_domain(struct amdgpu_device *adev)
adev->irq.domain = irq_domain_create_linear(NULL, AMDGPU_MAX_IRQ_SRC_ID,
&amdgpu_hw_irqdomain_ops, adev);
if (!adev->irq.domain) {
- DRM_ERROR("GPU irq add domain failed\n");
+ dev_err(adev->dev, "GPU irq add domain failed\n");
return -ENODEV;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
index 04c0b4fa17a4..9f0417456abd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
@@ -146,5 +146,6 @@ void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev);
int amdgpu_irq_add_domain(struct amdgpu_device *adev);
void amdgpu_irq_remove_domain(struct amdgpu_device *adev);
unsigned amdgpu_irq_create_mapping(struct amdgpu_device *adev, unsigned src_id);
+void amdgpu_restore_msix(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c
index 43fc941dfa57..37270c4dab8d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c
@@ -33,6 +33,8 @@
#include "isp_v4_1_0.h"
#include "isp_v4_1_1.h"
+#define ISP_MC_ADDR_ALIGN (1024 * 32)
+
/**
* isp_hw_init - start and test isp block
*
@@ -141,6 +143,181 @@ static int isp_set_powergating_state(struct amdgpu_ip_block *ip_block,
return 0;
}
+static int is_valid_isp_device(struct device *isp_parent, struct device *amdgpu_dev)
+{
+ if (isp_parent != amdgpu_dev)
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * isp_user_buffer_alloc - create user buffer object (BO) for isp
+ *
+ * @dev: isp device handle
+ * @dmabuf: DMABUF handle for isp buffer allocated in system memory
+ * @buf_obj: GPU buffer object handle to initialize
+ * @buf_addr: GPU addr of the pinned BO to initialize
+ *
+ * Imports isp DMABUF to allocate and pin a user BO for isp internal use. It does
+ * GART alloc to generate GPU addr for BO to make it accessible through the
+ * GART aperture for ISP HW.
+ *
+ * This function is exported to allow the V4L2 isp device external to drm device
+ * to create and access the isp user BO.
+ *
+ * Returns:
+ * 0 on success, negative error code otherwise.
+ */
+int isp_user_buffer_alloc(struct device *dev, void *dmabuf,
+ void **buf_obj, u64 *buf_addr)
+{
+ struct platform_device *ispdev = to_platform_device(dev);
+ const struct isp_platform_data *isp_pdata;
+ struct amdgpu_device *adev;
+ struct mfd_cell *mfd_cell;
+ struct amdgpu_bo *bo;
+ u64 gpu_addr;
+ int ret;
+
+ if (WARN_ON(!ispdev))
+ return -ENODEV;
+
+ if (WARN_ON(!buf_obj))
+ return -EINVAL;
+
+ if (WARN_ON(!buf_addr))
+ return -EINVAL;
+
+ mfd_cell = &ispdev->mfd_cell[0];
+ if (!mfd_cell)
+ return -ENODEV;
+
+ isp_pdata = mfd_cell->platform_data;
+ adev = isp_pdata->adev;
+
+ ret = is_valid_isp_device(ispdev->dev.parent, adev->dev);
+ if (ret)
+ return ret;
+
+ ret = amdgpu_bo_create_isp_user(adev, dmabuf,
+ AMDGPU_GEM_DOMAIN_GTT, &bo, &gpu_addr);
+ if (ret) {
+ drm_err(&adev->ddev, "failed to alloc gart user buffer (%d)", ret);
+ return ret;
+ }
+
+ *buf_obj = (void *)bo;
+ *buf_addr = gpu_addr;
+
+ return 0;
+}
+EXPORT_SYMBOL(isp_user_buffer_alloc);
+
+/**
+ * isp_user_buffer_free - free isp user buffer object (BO)
+ *
+ * @buf_obj: amdgpu isp user BO to free
+ *
+ * unpin and unref BO for isp internal use.
+ *
+ * This function is exported to allow the V4L2 isp device
+ * external to drm device to free the isp user BO.
+ */
+void isp_user_buffer_free(void *buf_obj)
+{
+ amdgpu_bo_free_isp_user(buf_obj);
+}
+EXPORT_SYMBOL(isp_user_buffer_free);
+
+/**
+ * isp_kernel_buffer_alloc - create kernel buffer object (BO) for isp
+ *
+ * @dev: isp device handle
+ * @size: size for the new BO
+ * @buf_obj: GPU BO handle to initialize
+ * @gpu_addr: GPU addr of the pinned BO
+ * @cpu_addr: CPU address mapping of BO
+ *
+ * Allocates and pins a kernel BO for internal isp firmware use.
+ *
+ * This function is exported to allow the V4L2 isp device
+ * external to drm device to create and access the kernel BO.
+ *
+ * Returns:
+ * 0 on success, negative error code otherwise.
+ */
+int isp_kernel_buffer_alloc(struct device *dev, u64 size,
+ void **buf_obj, u64 *gpu_addr, void **cpu_addr)
+{
+ struct platform_device *ispdev = to_platform_device(dev);
+ struct amdgpu_bo **bo = (struct amdgpu_bo **)buf_obj;
+ const struct isp_platform_data *isp_pdata;
+ struct amdgpu_device *adev;
+ struct mfd_cell *mfd_cell;
+ int ret;
+
+ if (WARN_ON(!ispdev))
+ return -ENODEV;
+
+ if (WARN_ON(!buf_obj))
+ return -EINVAL;
+
+ if (WARN_ON(!gpu_addr))
+ return -EINVAL;
+
+ if (WARN_ON(!cpu_addr))
+ return -EINVAL;
+
+ mfd_cell = &ispdev->mfd_cell[0];
+ if (!mfd_cell)
+ return -ENODEV;
+
+ isp_pdata = mfd_cell->platform_data;
+ adev = isp_pdata->adev;
+
+ ret = is_valid_isp_device(ispdev->dev.parent, adev->dev);
+ if (ret)
+ return ret;
+
+ /* Ensure *bo is NULL so a new BO will be created */
+ *bo = NULL;
+ ret = amdgpu_bo_create_kernel(adev,
+ size,
+ ISP_MC_ADDR_ALIGN,
+ AMDGPU_GEM_DOMAIN_GTT,
+ bo,
+ gpu_addr,
+ cpu_addr);
+ if (!cpu_addr || ret) {
+ drm_err(&adev->ddev, "failed to alloc gart kernel buffer (%d)", ret);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(isp_kernel_buffer_alloc);
+
+/**
+ * isp_kernel_buffer_free - free isp kernel buffer object (BO)
+ *
+ * @buf_obj: amdgpu isp user BO to free
+ * @gpu_addr: GPU addr of isp kernel BO
+ * @cpu_addr: CPU addr of isp kernel BO
+ *
+ * unmaps and unpin a isp kernel BO.
+ *
+ * This function is exported to allow the V4L2 isp device
+ * external to drm device to free the kernel BO.
+ */
+void isp_kernel_buffer_free(void **buf_obj, u64 *gpu_addr, void **cpu_addr)
+{
+ struct amdgpu_bo **bo = (struct amdgpu_bo **)buf_obj;
+
+ amdgpu_bo_free_kernel(bo, gpu_addr, cpu_addr);
+}
+EXPORT_SYMBOL(isp_kernel_buffer_free);
+
static const struct amd_ip_funcs isp_ip_funcs = {
.name = "isp_ip",
.early_init = isp_early_init,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h
index 4f3b7b5d9c1f..d6f4ffa4c97c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h
@@ -28,16 +28,13 @@
#ifndef __AMDGPU_ISP_H__
#define __AMDGPU_ISP_H__
+#include <drm/amd/isp.h>
+#include <linux/pm_domain.h>
+
#define ISP_REGS_OFFSET_END 0x629A4
struct amdgpu_isp;
-struct isp_platform_data {
- void *adev;
- u32 asic_type;
- resource_size_t base_rmmio_size;
-};
-
struct isp_funcs {
int (*hw_init)(struct amdgpu_isp *isp);
int (*hw_fini)(struct amdgpu_isp *isp);
@@ -54,6 +51,7 @@ struct amdgpu_isp {
struct isp_platform_data *isp_pdata;
unsigned int harvest_config;
const struct firmware *fw;
+ struct generic_pm_domain ispgpd;
};
extern const struct amdgpu_ip_block_version isp_v4_1_0_ip_block;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index acb21fc8b3ce..0a0dcbf0798d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -89,10 +89,10 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
{
struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
struct amdgpu_job *job = to_amdgpu_job(s_job);
- struct amdgpu_task_info *ti;
+ struct drm_wedge_task_info *info = NULL;
+ struct amdgpu_task_info *ti = NULL;
struct amdgpu_device *adev = ring->adev;
- int idx;
- int r;
+ int idx, r;
if (!drm_dev_enter(adev_to_drm(adev), &idx)) {
dev_info(adev->dev, "%s - device unplugged skipping recovery on scheduler:%s",
@@ -112,6 +112,7 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
amdgpu_job_core_dump(adev, job);
if (amdgpu_gpu_recovery &&
+ amdgpu_ring_is_reset_type_supported(ring, AMDGPU_RESET_TYPE_SOFT_RESET) &&
amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
dev_err(adev->dev, "ring %s timeout, but soft recovered\n",
s_job->sched->name);
@@ -124,53 +125,28 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
ti = amdgpu_vm_get_task_info_pasid(ring->adev, job->pasid);
if (ti) {
- dev_err(adev->dev,
- "Process information: process %s pid %d thread %s pid %d\n",
- ti->process_name, ti->tgid, ti->task_name, ti->pid);
- amdgpu_vm_put_task_info(ti);
+ amdgpu_vm_print_task_info(adev, ti);
+ info = &ti->task;
}
/* attempt a per ring reset */
- if (unlikely(adev->debug_disable_gpu_ring_reset)) {
- dev_err(adev->dev, "Ring reset disabled by debug mask\n");
- } else if (amdgpu_gpu_recovery && ring->funcs->reset) {
- bool is_guilty;
-
- dev_err(adev->dev, "Starting %s ring reset\n", s_job->sched->name);
- /* stop the scheduler, but don't mess with the
- * bad job yet because if ring reset fails
- * we'll fall back to full GPU reset.
- */
- drm_sched_wqueue_stop(&ring->sched);
-
- /* for engine resets, we need to reset the engine,
- * but individual queues may be unaffected.
- * check here to make sure the accounting is correct.
- */
- if (ring->funcs->is_guilty)
- is_guilty = ring->funcs->is_guilty(ring);
- else
- is_guilty = true;
-
- if (is_guilty)
- dma_fence_set_error(&s_job->s_fence->finished, -ETIME);
-
- r = amdgpu_ring_reset(ring, job->vmid);
+ if (amdgpu_gpu_recovery &&
+ amdgpu_ring_is_reset_type_supported(ring, AMDGPU_RESET_TYPE_PER_QUEUE) &&
+ ring->funcs->reset) {
+ dev_err(adev->dev, "Starting %s ring reset\n",
+ s_job->sched->name);
+ r = amdgpu_ring_reset(ring, job->vmid, job->hw_fence);
if (!r) {
- if (amdgpu_ring_sched_ready(ring))
- drm_sched_stop(&ring->sched, s_job);
- if (is_guilty) {
- atomic_inc(&ring->adev->gpu_reset_counter);
- amdgpu_fence_driver_force_completion(ring);
- }
- if (amdgpu_ring_sched_ready(ring))
- drm_sched_start(&ring->sched, 0);
- dev_err(adev->dev, "Ring %s reset succeeded\n", ring->sched.name);
- drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE);
+ atomic_inc(&ring->adev->gpu_reset_counter);
+ dev_err(adev->dev, "Ring %s reset succeeded\n",
+ ring->sched.name);
+ drm_dev_wedged_event(adev_to_drm(adev),
+ DRM_WEDGE_RECOVERY_NONE, info);
goto exit;
}
- dev_err(adev->dev, "Ring %s reset failure\n", ring->sched.name);
+ dev_err(adev->dev, "Ring %s reset failed\n", ring->sched.name);
}
+
dma_fence_set_error(&s_job->s_fence->finished, -ETIME);
if (amdgpu_device_should_recover_gpu(ring->adev)) {
@@ -198,14 +174,19 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
}
exit:
+ amdgpu_vm_put_task_info(ti);
drm_dev_exit(idx);
- return DRM_GPU_SCHED_STAT_NOMINAL;
+ return DRM_GPU_SCHED_STAT_RESET;
}
int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct drm_sched_entity *entity, void *owner,
- unsigned int num_ibs, struct amdgpu_job **job)
+ unsigned int num_ibs, struct amdgpu_job **job,
+ u64 drm_client_id)
{
+ struct amdgpu_fence *af;
+ int r;
+
if (num_ibs == 0)
return -EINVAL;
@@ -213,6 +194,20 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (!*job)
return -ENOMEM;
+ af = kzalloc(sizeof(struct amdgpu_fence), GFP_KERNEL);
+ if (!af) {
+ r = -ENOMEM;
+ goto err_job;
+ }
+ (*job)->hw_fence = af;
+
+ af = kzalloc(sizeof(struct amdgpu_fence), GFP_KERNEL);
+ if (!af) {
+ r = -ENOMEM;
+ goto err_fence;
+ }
+ (*job)->hw_vm_fence = af;
+
(*job)->vm = vm;
amdgpu_sync_create(&(*job)->explicit_sync);
@@ -222,17 +217,27 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (!entity)
return 0;
- return drm_sched_job_init(&(*job)->base, entity, 1, owner);
+ return drm_sched_job_init(&(*job)->base, entity, 1, owner,
+ drm_client_id);
+
+err_fence:
+ kfree((*job)->hw_fence);
+err_job:
+ kfree(*job);
+ *job = NULL;
+
+ return r;
}
int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev,
struct drm_sched_entity *entity, void *owner,
size_t size, enum amdgpu_ib_pool_type pool_type,
- struct amdgpu_job **job)
+ struct amdgpu_job **job, u64 k_job_id)
{
int r;
- r = amdgpu_job_alloc(adev, NULL, entity, owner, 1, job);
+ r = amdgpu_job_alloc(adev, NULL, entity, owner, 1, job,
+ k_job_id);
if (r)
return r;
@@ -241,7 +246,10 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev,
if (r) {
if (entity)
drm_sched_job_cleanup(&(*job)->base);
+ kfree((*job)->hw_vm_fence);
+ kfree((*job)->hw_fence);
kfree(*job);
+ *job = NULL;
}
return r;
@@ -269,11 +277,11 @@ void amdgpu_job_free_resources(struct amdgpu_job *job)
struct dma_fence *f;
unsigned i;
- /* Check if any fences where initialized */
+ /* Check if any fences were initialized */
if (job->base.s_fence && job->base.s_fence->finished.ops)
f = &job->base.s_fence->finished;
- else if (job->hw_fence.ops)
- f = &job->hw_fence;
+ else if (job->hw_fence && job->hw_fence->base.ops)
+ f = &job->hw_fence->base;
else
f = NULL;
@@ -289,11 +297,16 @@ static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
amdgpu_sync_free(&job->explicit_sync);
- /* only put the hw fence if has embedded fence */
- if (!job->hw_fence.ops)
- kfree(job);
+ if (job->hw_fence->base.ops)
+ dma_fence_put(&job->hw_fence->base);
else
- dma_fence_put(&job->hw_fence);
+ kfree(job->hw_fence);
+ if (job->hw_vm_fence->base.ops)
+ dma_fence_put(&job->hw_vm_fence->base);
+ else
+ kfree(job->hw_vm_fence);
+
+ kfree(job);
}
void amdgpu_job_set_gang_leader(struct amdgpu_job *job,
@@ -322,10 +335,16 @@ void amdgpu_job_free(struct amdgpu_job *job)
if (job->gang_submit != &job->base.s_fence->scheduled)
dma_fence_put(job->gang_submit);
- if (!job->hw_fence.ops)
- kfree(job);
+ if (job->hw_fence->base.ops)
+ dma_fence_put(&job->hw_fence->base);
+ else
+ kfree(job->hw_fence);
+ if (job->hw_vm_fence->base.ops)
+ dma_fence_put(&job->hw_vm_fence->base);
else
- dma_fence_put(&job->hw_fence);
+ kfree(job->hw_vm_fence);
+
+ kfree(job);
}
struct dma_fence *amdgpu_job_submit(struct amdgpu_job *job)
@@ -384,13 +403,6 @@ amdgpu_job_prepare_job(struct drm_sched_job *sched_job,
dev_err(ring->adev->dev, "Error getting VM ID (%d)\n", r);
goto error;
}
- /*
- * The VM structure might be released after the VMID is
- * assigned, we had multiple problems with people trying to use
- * the VM pointer so better set it to NULL.
- */
- if (!fence)
- job->vm = NULL;
return fence;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
index f2c049129661..7abf069d17d4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
@@ -44,11 +44,28 @@
struct amdgpu_fence;
enum amdgpu_ib_pool_type;
+/* Internal kernel job ids. (decreasing values, starting from U64_MAX). */
+#define AMDGPU_KERNEL_JOB_ID_VM_UPDATE (18446744073709551615ULL)
+#define AMDGPU_KERNEL_JOB_ID_VM_UPDATE_PDES (18446744073709551614ULL)
+#define AMDGPU_KERNEL_JOB_ID_VM_UPDATE_RANGE (18446744073709551613ULL)
+#define AMDGPU_KERNEL_JOB_ID_VM_PT_CLEAR (18446744073709551612ULL)
+#define AMDGPU_KERNEL_JOB_ID_TTM_MAP_BUFFER (18446744073709551611ULL)
+#define AMDGPU_KERNEL_JOB_ID_TTM_ACCESS_MEMORY_SDMA (18446744073709551610ULL)
+#define AMDGPU_KERNEL_JOB_ID_TTM_COPY_BUFFER (18446744073709551609ULL)
+#define AMDGPU_KERNEL_JOB_ID_CLEAR_ON_RELEASE (18446744073709551608ULL)
+#define AMDGPU_KERNEL_JOB_ID_MOVE_BLIT (18446744073709551607ULL)
+#define AMDGPU_KERNEL_JOB_ID_TTM_CLEAR_BUFFER (18446744073709551606ULL)
+#define AMDGPU_KERNEL_JOB_ID_CLEANER_SHADER (18446744073709551605ULL)
+#define AMDGPU_KERNEL_JOB_ID_FLUSH_GPU_TLB (18446744073709551604ULL)
+#define AMDGPU_KERNEL_JOB_ID_KFD_GART_MAP (18446744073709551603ULL)
+#define AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST (18446744073709551602ULL)
+
struct amdgpu_job {
struct drm_sched_job base;
struct amdgpu_vm *vm;
struct amdgpu_sync explicit_sync;
- struct dma_fence hw_fence;
+ struct amdgpu_fence *hw_fence;
+ struct amdgpu_fence *hw_vm_fence;
struct dma_fence *gang_submit;
uint32_t preamble_status;
uint32_t preemption_status;
@@ -91,11 +108,13 @@ static inline struct amdgpu_ring *amdgpu_job_ring(struct amdgpu_job *job)
int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct drm_sched_entity *entity, void *owner,
- unsigned int num_ibs, struct amdgpu_job **job);
+ unsigned int num_ibs, struct amdgpu_job **job,
+ u64 drm_client_id);
int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev,
struct drm_sched_entity *entity, void *owner,
size_t size, enum amdgpu_ib_pool_type pool_type,
- struct amdgpu_job **job);
+ struct amdgpu_job **job,
+ u64 k_job_id);
void amdgpu_job_set_resources(struct amdgpu_job *job, struct amdgpu_bo *gds,
struct amdgpu_bo *gws, struct amdgpu_bo *oa);
void amdgpu_job_free_resources(struct amdgpu_job *job);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
index dda29132dfb2..63ee6ba6a931 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
@@ -121,10 +121,12 @@ static void amdgpu_jpeg_idle_work_handler(struct work_struct *work)
fences += amdgpu_fence_count_emitted(&adev->jpeg.inst[i].ring_dec[j]);
}
- if (!fences && !atomic_read(&adev->jpeg.total_submission_cnt))
+ if (!fences && !atomic_read(&adev->jpeg.total_submission_cnt)) {
+ mutex_lock(&adev->jpeg.jpeg_pg_lock);
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_JPEG,
AMD_PG_STATE_GATE);
- else
+ mutex_unlock(&adev->jpeg.jpeg_pg_lock);
+ } else
schedule_delayed_work(&adev->jpeg.idle_work, JPEG_IDLE_TIMEOUT);
}
@@ -194,7 +196,8 @@ static int amdgpu_jpeg_dec_set_reg(struct amdgpu_ring *ring, uint32_t handle,
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
- AMDGPU_IB_POOL_DIRECT, &job);
+ AMDGPU_IB_POOL_DIRECT, &job,
+ AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
if (r)
return r;
@@ -368,7 +371,7 @@ static int amdgpu_debugfs_jpeg_sched_mask_set(void *data, u64 val)
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
ring = &adev->jpeg.inst[i].ring_dec[j];
- if (val & (1 << ((i * adev->jpeg.num_jpeg_rings) + j)))
+ if (val & (BIT_ULL((i * adev->jpeg.num_jpeg_rings) + j)))
ring->sched.ready = true;
else
ring->sched.ready = false;
@@ -463,7 +466,8 @@ int amdgpu_jpeg_reg_dump_init(struct amdgpu_device *adev,
adev->jpeg.ip_dump = kcalloc(adev->jpeg.num_jpeg_inst * count,
sizeof(uint32_t), GFP_KERNEL);
if (!adev->jpeg.ip_dump) {
- DRM_ERROR("Failed to allocate memory for JPEG IP Dump\n");
+ dev_err(adev->dev,
+ "Failed to allocate memory for JPEG IP Dump\n");
return -ENOMEM;
}
adev->jpeg.reg_list = reg;
@@ -536,3 +540,68 @@ void amdgpu_jpeg_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_pri
drm_printf(p, "\nInactive Instance:JPEG%d\n", i);
}
}
+
+static inline bool amdgpu_jpeg_reg_valid(u32 reg)
+{
+ if (reg < JPEG_REG_RANGE_START || reg > JPEG_REG_RANGE_END ||
+ (reg >= JPEG_ATOMIC_RANGE_START && reg <= JPEG_ATOMIC_RANGE_END))
+ return false;
+ else
+ return true;
+}
+
+/**
+ * amdgpu_jpeg_dec_parse_cs - command submission parser
+ *
+ * @parser: Command submission parser context
+ * @job: the job to parse
+ * @ib: the IB to parse
+ *
+ * Parse the command stream, return -EINVAL for invalid packet,
+ * 0 otherwise
+ */
+
+int amdgpu_jpeg_dec_parse_cs(struct amdgpu_cs_parser *parser,
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib)
+{
+ u32 i, reg, res, cond, type;
+ struct amdgpu_device *adev = parser->adev;
+
+ for (i = 0; i < ib->length_dw ; i += 2) {
+ reg = CP_PACKETJ_GET_REG(ib->ptr[i]);
+ res = CP_PACKETJ_GET_RES(ib->ptr[i]);
+ cond = CP_PACKETJ_GET_COND(ib->ptr[i]);
+ type = CP_PACKETJ_GET_TYPE(ib->ptr[i]);
+
+ if (res) /* only support 0 at the moment */
+ return -EINVAL;
+
+ switch (type) {
+ case PACKETJ_TYPE0:
+ if (cond != PACKETJ_CONDITION_CHECK0 ||
+ !amdgpu_jpeg_reg_valid(reg)) {
+ dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
+ return -EINVAL;
+ }
+ break;
+ case PACKETJ_TYPE3:
+ if (cond != PACKETJ_CONDITION_CHECK3 ||
+ !amdgpu_jpeg_reg_valid(reg)) {
+ dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
+ return -EINVAL;
+ }
+ break;
+ case PACKETJ_TYPE6:
+ if (ib->ptr[i] == CP_PACKETJ_NOP)
+ continue;
+ dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
+ return -EINVAL;
+ default:
+ dev_err(adev->dev, "Unknown packet type %d !\n", type);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
index 4f0775e39b54..346ae0ab09d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
@@ -25,11 +25,18 @@
#define __AMDGPU_JPEG_H__
#include "amdgpu_ras.h"
+#include "amdgpu_cs.h"
#define AMDGPU_MAX_JPEG_INSTANCES 4
#define AMDGPU_MAX_JPEG_RINGS 10
#define AMDGPU_MAX_JPEG_RINGS_4_0_3 8
+#define JPEG_REG_RANGE_START 0x4000
+#define JPEG_REG_RANGE_END 0x41c2
+#define JPEG_ATOMIC_RANGE_START 0x4120
+#define JPEG_ATOMIC_RANGE_END 0x412A
+
+
#define AMDGPU_JPEG_HARVEST_JPEG0 (1 << 0)
#define AMDGPU_JPEG_HARVEST_JPEG1 (1 << 1)
@@ -170,5 +177,8 @@ int amdgpu_jpeg_reg_dump_init(struct amdgpu_device *adev,
const struct amdgpu_hwip_reg_entry *reg, u32 count);
void amdgpu_jpeg_dump_ip_state(struct amdgpu_ip_block *ip_block);
void amdgpu_jpeg_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p);
+int amdgpu_jpeg_dec_parse_cs(struct amdgpu_cs_parser *parser,
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib);
#endif /*__AMDGPU_JPEG_H__*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index d2ce7d86dbc8..6ee77f431d56 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -91,7 +91,7 @@ void amdgpu_driver_unload_kms(struct drm_device *dev)
if (adev->rmmio == NULL)
return;
- if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DRV_UNLOAD))
+ if (amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DRV_UNLOAD))
DRM_WARN("smart shift update failed\n");
amdgpu_acpi_fini(adev);
@@ -161,7 +161,7 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
if (acpi_status)
dev_dbg(dev->dev, "Error during ACPI methods call\n");
- if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DRV_LOAD))
+ if (amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DRV_LOAD))
DRM_WARN("smart shift update failed\n");
out:
@@ -399,6 +399,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
uint32_t ib_size_alignment = 0;
enum amd_ip_block_type type;
unsigned int num_rings = 0;
+ uint32_t num_slots = 0;
unsigned int i, j;
if (info->query_hw_ip.ip_instance >= AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
@@ -411,6 +412,12 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
if (adev->gfx.gfx_ring[i].sched.ready &&
!adev->gfx.gfx_ring[i].no_user_submission)
++num_rings;
+
+ if (!adev->gfx.disable_uq) {
+ for (i = 0; i < AMDGPU_MES_MAX_GFX_PIPES; i++)
+ num_slots += hweight32(adev->mes.gfx_hqd_mask[i]);
+ }
+
ib_start_alignment = 32;
ib_size_alignment = 32;
break;
@@ -420,6 +427,12 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
if (adev->gfx.compute_ring[i].sched.ready &&
!adev->gfx.compute_ring[i].no_user_submission)
++num_rings;
+
+ if (!adev->sdma.disable_uq) {
+ for (i = 0; i < AMDGPU_MES_MAX_COMPUTE_PIPES; i++)
+ num_slots += hweight32(adev->mes.compute_hqd_mask[i]);
+ }
+
ib_start_alignment = 32;
ib_size_alignment = 32;
break;
@@ -429,6 +442,12 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
if (adev->sdma.instance[i].ring.sched.ready &&
!adev->sdma.instance[i].ring.no_user_submission)
++num_rings;
+
+ if (!adev->gfx.disable_uq) {
+ for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++)
+ num_slots += hweight32(adev->mes.sdma_hqd_mask[i]);
+ }
+
ib_start_alignment = 256;
ib_size_alignment = 4;
break;
@@ -570,6 +589,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
}
result->capabilities_flags = 0;
result->available_rings = (1 << num_rings) - 1;
+ result->userq_num_slots = num_slots;
result->ib_start_alignment = ib_start_alignment;
result->ib_size_alignment = ib_size_alignment;
return 0;
@@ -738,7 +758,8 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
ui64 = atomic64_read(&adev->num_vram_cpu_page_faults);
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
case AMDGPU_INFO_VRAM_USAGE:
- ui64 = ttm_resource_manager_usage(&adev->mman.vram_mgr.manager);
+ ui64 = ttm_resource_manager_used(&adev->mman.vram_mgr.manager) ?
+ ttm_resource_manager_usage(&adev->mman.vram_mgr.manager) : 0;
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
case AMDGPU_INFO_VIS_VRAM_USAGE:
ui64 = amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr);
@@ -784,8 +805,8 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
mem.vram.usable_heap_size = adev->gmc.real_vram_size -
atomic64_read(&adev->vram_pin_size) -
AMDGPU_VM_RESERVED_VRAM;
- mem.vram.heap_usage =
- ttm_resource_manager_usage(vram_man);
+ mem.vram.heap_usage = ttm_resource_manager_used(&adev->mman.vram_mgr.manager) ?
+ ttm_resource_manager_usage(vram_man) : 0;
mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
mem.cpu_accessible_vram.total_heap_size =
@@ -919,6 +940,10 @@ out:
if (adev->gfx.config.ta_cntl2_truncate_coord_mode)
dev_info->ids_flags |= AMDGPU_IDS_FLAGS_CONFORMANT_TRUNC_COORD;
+ /* Gang submit is not supported under SRIOV currently */
+ if (!amdgpu_sriov_vf(adev))
+ dev_info->ids_flags |= AMDGPU_IDS_FLAGS_GANG_SUBMIT;
+
if (amdgpu_passthrough(adev))
dev_info->ids_flags |= (AMDGPU_IDS_FLAGS_MODE_PT <<
AMDGPU_IDS_FLAGS_MODE_SHIFT) &
@@ -1395,13 +1420,11 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
if (r)
goto error_pasid;
- r = amdgpu_vm_init(adev, &fpriv->vm, fpriv->xcp_id);
- if (r)
- goto error_pasid;
+ amdgpu_debugfs_vm_init(file_priv);
- r = amdgpu_vm_set_pasid(adev, &fpriv->vm, pasid);
+ r = amdgpu_vm_init(adev, &fpriv->vm, fpriv->xcp_id, pasid);
if (r)
- goto error_vm;
+ goto error_pasid;
fpriv->prt_va = amdgpu_vm_bo_add(adev, &fpriv->vm, NULL);
if (!fpriv->prt_va) {
@@ -1442,15 +1465,12 @@ error_vm:
amdgpu_vm_fini(adev, &fpriv->vm);
error_pasid:
- if (pasid) {
+ if (pasid)
amdgpu_pasid_free(pasid);
- amdgpu_vm_set_pasid(adev, &fpriv->vm, 0);
- }
kfree(fpriv);
out_suspend:
- pm_runtime_mark_last_busy(dev->dev);
pm_put:
pm_runtime_put_autosuspend(dev->dev);
@@ -1518,7 +1538,6 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
kfree(fpriv);
file_priv->driver_priv = NULL;
- pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
index 6fa9fa11c8f3..9c182ce501af 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
@@ -47,7 +47,7 @@ static int amdgpu_mes_doorbell_init(struct amdgpu_device *adev)
/* Bitmap for dynamic allocation of kernel doorbells */
mes->doorbell_bitmap = bitmap_zalloc(PAGE_SIZE / sizeof(u32), GFP_KERNEL);
if (!mes->doorbell_bitmap) {
- DRM_ERROR("Failed to allocate MES doorbell bitmap\n");
+ dev_err(adev->dev, "Failed to allocate MES doorbell bitmap\n");
return -ENOMEM;
}
@@ -105,8 +105,8 @@ int amdgpu_mes_init(struct amdgpu_device *adev)
spin_lock_init(&adev->mes.ring_lock[i]);
adev->mes.total_max_queue = AMDGPU_FENCE_MES_QUEUE_ID_MASK;
- adev->mes.vmid_mask_mmhub = 0xffffff00;
- adev->mes.vmid_mask_gfxhub = adev->gfx.disable_kq ? 0xfffffffe : 0xffffff00;
+ adev->mes.vmid_mask_mmhub = 0xFF00;
+ adev->mes.vmid_mask_gfxhub = adev->gfx.disable_kq ? 0xFFFE : 0xFF00;
num_pipes = adev->gfx.me.num_pipe_per_me * adev->gfx.me.num_me;
if (num_pipes > AMDGPU_MES_MAX_GFX_PIPES)
@@ -191,6 +191,20 @@ int amdgpu_mes_init(struct amdgpu_device *adev)
if (r)
goto error_doorbell;
+ if (adev->mes.hung_queue_db_array_size) {
+ r = amdgpu_bo_create_kernel(adev,
+ adev->mes.hung_queue_db_array_size * sizeof(u32),
+ PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_GTT,
+ &adev->mes.hung_queue_db_array_gpu_obj,
+ &adev->mes.hung_queue_db_array_gpu_addr,
+ &adev->mes.hung_queue_db_array_cpu_addr);
+ if (r) {
+ dev_warn(adev->dev, "failed to create MES hung db array buffer (%d)", r);
+ goto error_doorbell;
+ }
+ }
+
return 0;
error_doorbell:
@@ -216,6 +230,10 @@ void amdgpu_mes_fini(struct amdgpu_device *adev)
{
int i;
+ amdgpu_bo_free_kernel(&adev->mes.hung_queue_db_array_gpu_obj,
+ &adev->mes.hung_queue_db_array_gpu_addr,
+ &adev->mes.hung_queue_db_array_cpu_addr);
+
amdgpu_bo_free_kernel(&adev->mes.event_log_gpu_obj,
&adev->mes.event_log_gpu_addr,
&adev->mes.event_log_cpu_addr);
@@ -256,7 +274,7 @@ int amdgpu_mes_suspend(struct amdgpu_device *adev)
r = adev->mes.funcs->suspend_gang(&adev->mes, &input);
amdgpu_mes_unlock(&adev->mes);
if (r)
- DRM_ERROR("failed to suspend all gangs");
+ dev_err(adev->dev, "failed to suspend all gangs");
return r;
}
@@ -280,7 +298,7 @@ int amdgpu_mes_resume(struct amdgpu_device *adev)
r = adev->mes.funcs->resume_gang(&adev->mes, &input);
amdgpu_mes_unlock(&adev->mes);
if (r)
- DRM_ERROR("failed to resume all gangs");
+ dev_err(adev->dev, "failed to resume all gangs");
return r;
}
@@ -304,7 +322,7 @@ int amdgpu_mes_map_legacy_queue(struct amdgpu_device *adev,
r = adev->mes.funcs->map_legacy_queue(&adev->mes, &queue_input);
amdgpu_mes_unlock(&adev->mes);
if (r)
- DRM_ERROR("failed to map legacy queue\n");
+ dev_err(adev->dev, "failed to map legacy queue\n");
return r;
}
@@ -329,7 +347,7 @@ int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
r = adev->mes.funcs->unmap_legacy_queue(&adev->mes, &queue_input);
amdgpu_mes_unlock(&adev->mes);
if (r)
- DRM_ERROR("failed to unmap legacy queue\n");
+ dev_err(adev->dev, "failed to unmap legacy queue\n");
return r;
}
@@ -361,7 +379,59 @@ int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev,
r = adev->mes.funcs->reset_hw_queue(&adev->mes, &queue_input);
amdgpu_mes_unlock(&adev->mes);
if (r)
- DRM_ERROR("failed to reset legacy queue\n");
+ dev_err(adev->dev, "failed to reset legacy queue\n");
+
+ return r;
+}
+
+int amdgpu_mes_get_hung_queue_db_array_size(struct amdgpu_device *adev)
+{
+ return adev->mes.hung_queue_db_array_size;
+}
+
+int amdgpu_mes_detect_and_reset_hung_queues(struct amdgpu_device *adev,
+ int queue_type,
+ bool detect_only,
+ unsigned int *hung_db_num,
+ u32 *hung_db_array)
+
+{
+ struct mes_detect_and_reset_queue_input input;
+ u32 *db_array = adev->mes.hung_queue_db_array_cpu_addr;
+ int r, i;
+
+ if (!hung_db_num || !hung_db_array)
+ return -EINVAL;
+
+ if ((queue_type != AMDGPU_RING_TYPE_GFX) &&
+ (queue_type != AMDGPU_RING_TYPE_COMPUTE) &&
+ (queue_type != AMDGPU_RING_TYPE_SDMA))
+ return -EINVAL;
+
+ /* Clear the doorbell array before detection */
+ memset(adev->mes.hung_queue_db_array_cpu_addr, AMDGPU_MES_INVALID_DB_OFFSET,
+ adev->mes.hung_queue_db_array_size * sizeof(u32));
+ input.queue_type = queue_type;
+ input.detect_only = detect_only;
+
+ r = adev->mes.funcs->detect_and_reset_hung_queues(&adev->mes,
+ &input);
+ if (r) {
+ dev_err(adev->dev, "failed to detect and reset\n");
+ } else {
+ *hung_db_num = 0;
+ for (i = 0; i < adev->mes.hung_queue_hqd_info_offset; i++) {
+ if (db_array[i] != AMDGPU_MES_INVALID_DB_OFFSET) {
+ hung_db_array[i] = db_array[i];
+ *hung_db_num += 1;
+ }
+ }
+
+ /*
+ * TODO: return HQD info for MES scheduled user compute queue reset cases
+ * stored in hung_db_array hqd info offset to full array size
+ */
+ }
return r;
}
@@ -458,6 +528,18 @@ error:
return r;
}
+int amdgpu_mes_hdp_flush(struct amdgpu_device *adev)
+{
+ uint32_t hdp_flush_req_offset, hdp_flush_done_offset, ref_and_mask;
+
+ hdp_flush_req_offset = adev->nbio.funcs->get_hdp_flush_req_offset(adev);
+ hdp_flush_done_offset = adev->nbio.funcs->get_hdp_flush_done_offset(adev);
+ ref_and_mask = adev->nbio.hdp_flush_reg->ref_and_mask_cp0;
+
+ return amdgpu_mes_reg_write_reg_wait(adev, hdp_flush_req_offset, hdp_flush_done_offset,
+ ref_and_mask, ref_and_mask);
+}
+
int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
uint64_t process_context_addr,
uint32_t spi_gdbg_per_vmid_cntl,
@@ -469,7 +551,8 @@ int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
int r;
if (!adev->mes.funcs->misc_op) {
- DRM_ERROR("mes set shader debugger is not supported!\n");
+ dev_err(adev->dev,
+ "mes set shader debugger is not supported!\n");
return -EINVAL;
}
@@ -493,7 +576,7 @@ int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
if (r)
- DRM_ERROR("failed to set_shader_debugger\n");
+ dev_err(adev->dev, "failed to set_shader_debugger\n");
amdgpu_mes_unlock(&adev->mes);
@@ -507,7 +590,8 @@ int amdgpu_mes_flush_shader_debugger(struct amdgpu_device *adev,
int r;
if (!adev->mes.funcs->misc_op) {
- DRM_ERROR("mes flush shader debugger is not supported!\n");
+ dev_err(adev->dev,
+ "mes flush shader debugger is not supported!\n");
return -EINVAL;
}
@@ -519,7 +603,7 @@ int amdgpu_mes_flush_shader_debugger(struct amdgpu_device *adev,
r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
if (r)
- DRM_ERROR("failed to set_shader_debugger\n");
+ dev_err(adev->dev, "failed to set_shader_debugger\n");
amdgpu_mes_unlock(&adev->mes);
@@ -619,14 +703,11 @@ out:
bool amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device *adev)
{
uint32_t mes_rev = adev->mes.sched_version & AMDGPU_MES_VERSION_MASK;
- bool is_supported = false;
-
- if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0) &&
- amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(12, 0, 0) &&
- mes_rev >= 0x63)
- is_supported = true;
- return is_supported;
+ return ((amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0) &&
+ amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(12, 0, 0) &&
+ mes_rev >= 0x63) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(12, 0, 0));
}
/* Fix me -- node_id is used to identify the correct MES instances in the future */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
index c0d2c195fe2e..e989225b354b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
@@ -41,6 +41,7 @@
#define AMDGPU_MES_API_VERSION_MASK 0x00fff000
#define AMDGPU_MES_FEAT_VERSION_MASK 0xff000000
#define AMDGPU_MES_MSCRATCH_SIZE 0x40000
+#define AMDGPU_MES_INVALID_DB_OFFSET 0xffffffff
enum amdgpu_mes_priority_level {
AMDGPU_MES_PRIORITY_LEVEL_LOW = 0,
@@ -147,6 +148,11 @@ struct amdgpu_mes {
uint64_t resource_1_gpu_addr[AMDGPU_MAX_MES_PIPES];
void *resource_1_addr[AMDGPU_MAX_MES_PIPES];
+ int hung_queue_db_array_size;
+ int hung_queue_hqd_info_offset;
+ struct amdgpu_bo *hung_queue_db_array_gpu_obj;
+ uint64_t hung_queue_db_array_gpu_addr;
+ void *hung_queue_db_array_cpu_addr;
};
struct amdgpu_mes_gang {
@@ -233,6 +239,7 @@ struct mes_add_queue_input {
struct mes_remove_queue_input {
uint32_t doorbell_offset;
uint64_t gang_context_addr;
+ bool remove_queue_after_reset;
};
struct mes_map_legacy_queue_input {
@@ -280,6 +287,18 @@ struct mes_reset_queue_input {
bool is_kq;
};
+struct mes_detect_and_reset_queue_input {
+ uint32_t queue_type;
+ bool detect_only;
+};
+
+struct mes_inv_tlbs_pasid_input {
+ uint32_t xcc_id;
+ uint16_t pasid;
+ uint8_t hub_id;
+ uint8_t flush_type;
+};
+
enum mes_misc_opcode {
MES_MISC_OP_WRITE_REG,
MES_MISC_OP_READ_REG,
@@ -367,6 +386,13 @@ struct amdgpu_mes_funcs {
int (*reset_hw_queue)(struct amdgpu_mes *mes,
struct mes_reset_queue_input *input);
+
+ int (*detect_and_reset_hung_queues)(struct amdgpu_mes *mes,
+ struct mes_detect_and_reset_queue_input *input);
+
+
+ int (*invalidate_tlbs_pasid)(struct amdgpu_mes *mes,
+ struct mes_inv_tlbs_pasid_input *input);
};
#define amdgpu_mes_kiq_hw_init(adev) (adev)->mes.kiq_hw_init((adev))
@@ -390,12 +416,20 @@ int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev,
unsigned int vmid,
bool use_mmio);
+int amdgpu_mes_get_hung_queue_db_array_size(struct amdgpu_device *adev);
+int amdgpu_mes_detect_and_reset_hung_queues(struct amdgpu_device *adev,
+ int queue_type,
+ bool detect_only,
+ unsigned int *hung_db_num,
+ u32 *hung_db_array);
+
uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg);
int amdgpu_mes_wreg(struct amdgpu_device *adev,
uint32_t reg, uint32_t val);
int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev,
uint32_t reg0, uint32_t reg1,
uint32_t ref, uint32_t mask);
+int amdgpu_mes_hdp_flush(struct amdgpu_device *adev);
int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
uint64_t process_context_addr,
uint32_t spi_gdbg_per_vmid_cntl,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 6da4f946cac0..dc8d2f52c7d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -326,6 +326,8 @@ struct amdgpu_mode_info {
struct drm_property *audio_property;
/* FMT dithering */
struct drm_property *dither_property;
+ /* Adaptive Backlight Modulation (power feature) */
+ struct drm_property *abm_level_property;
/* hardcoded DFP edid from BIOS */
const struct drm_edid *bios_hardcoded_edid;
@@ -496,8 +498,6 @@ struct amdgpu_crtc {
struct drm_connector *connector;
/* for dpm */
u32 line_time;
- u32 wm_low;
- u32 wm_high;
u32 lb_vblank_lead_lines;
struct drm_display_mode hw_mode;
/* for virtual dce */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
index d085687a47ea..a974265837f0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
@@ -53,6 +53,16 @@ u64 amdgpu_nbio_get_pcie_replay_count(struct amdgpu_device *adev)
return 0;
}
+bool amdgpu_nbio_is_replay_cnt_supported(struct amdgpu_device *adev)
+{
+ if (amdgpu_sriov_vf(adev) || !adev->asic_funcs ||
+ !adev->asic_funcs->get_pcie_replay_count ||
+ (!adev->nbio.funcs || !adev->nbio.funcs->get_pcie_replay_count))
+ return false;
+
+ return true;
+}
+
int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
{
int r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
index 79c2f807b9fe..b528de6a01f6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
@@ -119,4 +119,6 @@ int amdgpu_nbio_ras_sw_init(struct amdgpu_device *adev);
int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block);
u64 amdgpu_nbio_get_pcie_replay_count(struct amdgpu_device *adev);
+bool amdgpu_nbio_is_replay_cnt_supported(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 73403744331a..e08f58de4b17 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -32,6 +32,7 @@
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/dma-buf.h>
+#include <linux/export.h>
#include <drm/drm_drv.h>
#include <drm/amdgpu_drm.h>
@@ -62,7 +63,7 @@ static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
amdgpu_bo_kunmap(bo);
- if (bo->tbo.base.import_attach)
+ if (drm_gem_is_imported(&bo->tbo.base))
drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg);
drm_gem_object_release(&bo->tbo.base);
amdgpu_bo_unref(&bo->parent);
@@ -152,6 +153,14 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
c++;
}
+ if (domain & AMDGPU_GEM_DOMAIN_MMIO_REMAP) {
+ places[c].fpfn = 0;
+ places[c].lpfn = 0;
+ places[c].mem_type = AMDGPU_PL_MMIO_REMAP;
+ places[c].flags = 0;
+ c++;
+ }
+
if (domain & AMDGPU_GEM_DOMAIN_GTT) {
places[c].fpfn = 0;
places[c].lpfn = 0;
@@ -351,7 +360,6 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
return 0;
}
-EXPORT_SYMBOL(amdgpu_bo_create_kernel);
/**
* amdgpu_bo_create_isp_user - create user BO for isp
@@ -420,7 +428,6 @@ error_unreserve:
return r;
}
-EXPORT_SYMBOL(amdgpu_bo_create_isp_user);
/**
* amdgpu_bo_create_kernel_at - create BO for kernel use at specific location
@@ -524,7 +531,6 @@ void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
if (cpu_addr)
*cpu_addr = NULL;
}
-EXPORT_SYMBOL(amdgpu_bo_free_kernel);
/**
* amdgpu_bo_free_isp_user - free BO for isp use
@@ -547,7 +553,6 @@ void amdgpu_bo_free_isp_user(struct amdgpu_bo *bo)
}
amdgpu_bo_unref(&bo);
}
-EXPORT_SYMBOL(amdgpu_bo_free_isp_user);
/* Validate bo size is bit bigger than the request domain */
static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
@@ -939,7 +944,7 @@ int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain)
domain = bo->preferred_domains & domain;
/* A shared bo cannot be migrated to VRAM */
- if (bo->tbo.base.import_attach) {
+ if (drm_gem_is_imported(&bo->tbo.base)) {
if (domain & AMDGPU_GEM_DOMAIN_GTT)
domain = AMDGPU_GEM_DOMAIN_GTT;
else
@@ -967,7 +972,7 @@ int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain)
*/
domain = amdgpu_bo_get_preferred_domain(adev, domain);
- if (bo->tbo.base.import_attach)
+ if (drm_gem_is_imported(&bo->tbo.base))
dma_buf_pin(bo->tbo.base.import_attach);
/* force to pin into visible video ram */
@@ -1018,7 +1023,7 @@ void amdgpu_bo_unpin(struct amdgpu_bo *bo)
if (bo->tbo.pin_count)
return;
- if (bo->tbo.base.import_attach)
+ if (drm_gem_is_imported(&bo->tbo.base))
dma_buf_unpin(bo->tbo.base.import_attach);
if (bo->tbo.resource->mem_type == TTM_PL_VRAM) {
@@ -1263,7 +1268,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
amdgpu_bo_kunmap(abo);
- if (abo->tbo.base.dma_buf && !abo->tbo.base.import_attach &&
+ if (abo->tbo.base.dma_buf && !drm_gem_is_imported(&abo->tbo.base) &&
old_mem && old_mem->mem_type != TTM_PL_SYSTEM)
dma_buf_move_notify(abo->tbo.base.dma_buf);
@@ -1316,7 +1321,8 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
if (r)
goto out;
- r = amdgpu_fill_buffer(abo, 0, &bo->base._resv, &fence, true);
+ r = amdgpu_fill_buffer(abo, 0, &bo->base._resv, &fence, true,
+ AMDGPU_KERNEL_JOB_ID_CLEAR_ON_RELEASE);
if (WARN_ON(r))
goto out;
@@ -1473,6 +1479,26 @@ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
}
/**
+ * amdgpu_bo_fb_aper_addr - return FB aperture GPU offset of the VRAM bo
+ * @bo: amdgpu VRAM buffer object for which we query the offset
+ *
+ * Returns:
+ * current FB aperture GPU offset of the object.
+ */
+u64 amdgpu_bo_fb_aper_addr(struct amdgpu_bo *bo)
+{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ uint64_t offset, fb_base;
+
+ WARN_ON_ONCE(bo->tbo.resource->mem_type != TTM_PL_VRAM);
+
+ fb_base = adev->gmc.fb_start;
+ fb_base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
+ offset = (bo->tbo.resource->start << PAGE_SHIFT) + fb_base;
+ return amdgpu_gmc_sign_extend(offset);
+}
+
+/**
* amdgpu_bo_gpu_offset_no_check - return GPU offset of bo
* @bo: amdgpu object for which we query the offset
*
@@ -1528,6 +1554,8 @@ uint32_t amdgpu_bo_mem_stats_placement(struct amdgpu_bo *bo)
return AMDGPU_PL_OA;
case AMDGPU_GEM_DOMAIN_DOORBELL:
return AMDGPU_PL_DOORBELL;
+ case AMDGPU_GEM_DOMAIN_MMIO_REMAP:
+ return AMDGPU_PL_MMIO_REMAP;
default:
return TTM_PL_SYSTEM;
}
@@ -1611,6 +1639,9 @@ u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
case AMDGPU_PL_DOORBELL:
placement = "DOORBELL";
break;
+ case AMDGPU_PL_MMIO_REMAP:
+ placement = "MMIO REMAP";
+ break;
case TTM_PL_SYSTEM:
default:
placement = "CPU";
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 375448627f7b..52c2d1731aab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -69,7 +69,7 @@ struct amdgpu_bo_va_mapping {
uint64_t last;
uint64_t __subtree_last;
uint64_t offset;
- uint64_t flags;
+ uint32_t flags;
};
/* User space allocated BO in a VM */
@@ -96,6 +96,7 @@ struct amdgpu_bo_va {
* if non-zero, cannot unmap from GPU because user queues may still access it
*/
unsigned int queue_refcount;
+ atomic_t userq_va_mapped;
};
struct amdgpu_bo {
@@ -167,6 +168,8 @@ static inline unsigned amdgpu_mem_type_to_domain(u32 mem_type)
return AMDGPU_GEM_DOMAIN_OA;
case AMDGPU_PL_DOORBELL:
return AMDGPU_GEM_DOMAIN_DOORBELL;
+ case AMDGPU_PL_MMIO_REMAP:
+ return AMDGPU_GEM_DOMAIN_MMIO_REMAP;
default:
break;
}
@@ -304,6 +307,7 @@ int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
bool intr);
int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr);
u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
+u64 amdgpu_bo_fb_aper_addr(struct amdgpu_bo *bo);
u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo);
uint32_t amdgpu_bo_mem_stats_placement(struct amdgpu_bo *bo);
uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index e6f0b035e20b..0b10497d487c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -252,6 +252,7 @@ static int psp_early_init(struct amdgpu_ip_block *ip_block)
break;
case IP_VERSION(14, 0, 2):
case IP_VERSION(14, 0, 3):
+ adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev);
psp_v14_0_set_psp_funcs(psp);
break;
case IP_VERSION(14, 0, 5):
@@ -447,7 +448,7 @@ static int psp_sw_init(struct amdgpu_ip_block *ip_block)
psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
if (!psp->cmd) {
dev_err(adev->dev, "Failed to allocate memory to command buffer!\n");
- ret = -ENOMEM;
+ return -ENOMEM;
}
adev->psp.xgmi_context.supports_extended_data =
@@ -574,9 +575,11 @@ static int psp_sw_fini(struct amdgpu_ip_block *ip_block)
return 0;
}
-int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
- uint32_t reg_val, uint32_t mask, bool check_changed)
+int psp_wait_for(struct psp_context *psp, uint32_t reg_index, uint32_t reg_val,
+ uint32_t mask, uint32_t flags)
{
+ bool check_changed = flags & PSP_WAITREG_CHANGED;
+ bool verbose = !(flags & PSP_WAITREG_NOVERBOSE);
uint32_t val;
int i;
struct amdgpu_device *adev = psp->adev;
@@ -596,6 +599,11 @@ int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
udelay(1);
}
+ if (verbose)
+ dev_err(adev->dev,
+ "psp reg (0x%x) wait timed out, mask: %x, read: %x exp: %x",
+ reg_index, mask, val, reg_val);
+
return -ETIME;
}
@@ -654,6 +662,14 @@ static const char *psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id)
return "BOOT_CFG";
case GFX_CMD_ID_CONFIG_SQ_PERFMON:
return "CONFIG_SQ_PERFMON";
+ case GFX_CMD_ID_FB_FW_RESERV_ADDR:
+ return "FB_FW_RESERV_ADDR";
+ case GFX_CMD_ID_FB_FW_RESERV_EXT_ADDR:
+ return "FB_FW_RESERV_EXT_ADDR";
+ case GFX_CMD_ID_SRIOV_SPATIAL_PART:
+ return "SPATIAL_PARTITION";
+ case GFX_CMD_ID_FB_NPS_MODE:
+ return "NPS_MODE_CHANGE";
default:
return "UNKNOWN CMD";
}
@@ -865,12 +881,12 @@ static int psp_tmr_init(struct psp_context *psp)
pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
ret = amdgpu_bo_create_kernel(psp->adev, tmr_size,
PSP_TMR_ALIGNMENT,
- AMDGPU_HAS_VRAM(psp->adev) ?
- AMDGPU_GEM_DOMAIN_VRAM :
- AMDGPU_GEM_DOMAIN_GTT,
+ AMDGPU_GEM_DOMAIN_GTT | AMDGPU_GEM_DOMAIN_VRAM,
&psp->tmr_bo, &psp->tmr_mc_addr,
pptr);
}
+ if (amdgpu_virt_xgmi_migrate_enabled(psp->adev) && psp->tmr_bo)
+ psp->tmr_mc_addr = amdgpu_bo_fb_aper_addr(psp->tmr_bo);
return ret;
}
@@ -984,6 +1000,106 @@ int psp_get_fw_attestation_records_addr(struct psp_context *psp,
return ret;
}
+static int psp_get_fw_reservation_info(struct psp_context *psp,
+ uint32_t cmd_id,
+ uint64_t *addr,
+ uint32_t *size)
+{
+ int ret;
+ uint32_t status;
+ struct psp_gfx_cmd_resp *cmd;
+
+ cmd = acquire_psp_cmd_buf(psp);
+
+ cmd->cmd_id = cmd_id;
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd,
+ psp->fence_buf_mc_addr);
+ if (ret) {
+ release_psp_cmd_buf(psp);
+ return ret;
+ }
+
+ status = cmd->resp.status;
+ if (status == PSP_ERR_UNKNOWN_COMMAND) {
+ release_psp_cmd_buf(psp);
+ *addr = 0;
+ *size = 0;
+ return 0;
+ }
+
+ *addr = (uint64_t)cmd->resp.uresp.fw_reserve_info.reserve_base_address_hi << 32 |
+ cmd->resp.uresp.fw_reserve_info.reserve_base_address_lo;
+ *size = cmd->resp.uresp.fw_reserve_info.reserve_size;
+
+ release_psp_cmd_buf(psp);
+
+ return 0;
+}
+
+int psp_update_fw_reservation(struct psp_context *psp)
+{
+ int ret;
+ uint64_t reserv_addr, reserv_addr_ext;
+ uint32_t reserv_size, reserv_size_ext, mp0_ip_ver;
+ struct amdgpu_device *adev = psp->adev;
+
+ mp0_ip_ver = amdgpu_ip_version(adev, MP0_HWIP, 0);
+
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ switch (mp0_ip_ver) {
+ case IP_VERSION(14, 0, 2):
+ if (adev->psp.sos.fw_version < 0x3b0e0d)
+ return 0;
+ break;
+
+ case IP_VERSION(14, 0, 3):
+ if (adev->psp.sos.fw_version < 0x3a0e14)
+ return 0;
+ break;
+
+ default:
+ return 0;
+ }
+
+ ret = psp_get_fw_reservation_info(psp, GFX_CMD_ID_FB_FW_RESERV_ADDR, &reserv_addr, &reserv_size);
+ if (ret)
+ return ret;
+ ret = psp_get_fw_reservation_info(psp, GFX_CMD_ID_FB_FW_RESERV_EXT_ADDR, &reserv_addr_ext, &reserv_size_ext);
+ if (ret)
+ return ret;
+
+ if (reserv_addr != adev->gmc.real_vram_size - reserv_size) {
+ dev_warn(adev->dev, "reserve fw region is not valid!\n");
+ return 0;
+ }
+
+ amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL, NULL);
+
+ reserv_size = roundup(reserv_size, SZ_1M);
+
+ ret = amdgpu_bo_create_kernel_at(adev, reserv_addr, reserv_size, &adev->mman.fw_reserved_memory, NULL);
+ if (ret) {
+ dev_err(adev->dev, "reserve fw region failed(%d)!\n", ret);
+ amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL, NULL);
+ return ret;
+ }
+
+ reserv_size_ext = roundup(reserv_size_ext, SZ_1M);
+
+ ret = amdgpu_bo_create_kernel_at(adev, reserv_addr_ext, reserv_size_ext,
+ &adev->mman.fw_reserved_memory_extend, NULL);
+ if (ret) {
+ dev_err(adev->dev, "reserve extend fw region failed(%d)!\n", ret);
+ amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory_extend, NULL, NULL);
+ return ret;
+ }
+
+ return 0;
+}
+
static int psp_boot_config_get(struct amdgpu_device *adev, uint32_t *boot_cfg)
{
struct psp_context *psp = &adev->psp;
@@ -1270,6 +1386,11 @@ int psp_ta_load(struct psp_context *psp, struct ta_context *context)
psp_copy_fw(psp, context->bin_desc.start_addr,
context->bin_desc.size_bytes);
+ if (amdgpu_virt_xgmi_migrate_enabled(psp->adev) &&
+ context->mem_context.shared_bo)
+ context->mem_context.shared_mc_addr =
+ amdgpu_bo_fb_aper_addr(context->mem_context.shared_bo);
+
psp_prep_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, context);
ret = psp_cmd_submit_buf(psp, NULL, cmd,
@@ -1418,6 +1539,7 @@ static void psp_xgmi_reflect_topology_info(struct psp_context *psp,
uint64_t src_node_id = psp->adev->gmc.xgmi.node_id;
uint64_t dst_node_id = node_info.node_id;
uint8_t dst_num_hops = node_info.num_hops;
+ uint8_t dst_is_sharing_enabled = node_info.is_sharing_enabled;
uint8_t dst_num_links = node_info.num_links;
hive = amdgpu_get_xgmi_hive(psp->adev);
@@ -1437,13 +1559,20 @@ static void psp_xgmi_reflect_topology_info(struct psp_context *psp,
continue;
mirror_top_info->nodes[j].num_hops = dst_num_hops;
- /*
- * prevent 0 num_links value re-reflection since reflection
+ mirror_top_info->nodes[j].is_sharing_enabled = dst_is_sharing_enabled;
+ /* prevent 0 num_links value re-reflection since reflection
* criteria is based on num_hops (direct or indirect).
- *
*/
- if (dst_num_links)
+ if (dst_num_links) {
mirror_top_info->nodes[j].num_links = dst_num_links;
+ /* swap src and dst due to frame of reference */
+ for (int k = 0; k < dst_num_links; k++) {
+ mirror_top_info->nodes[j].port_num[k].src_xgmi_port_num =
+ node_info.port_num[k].dst_xgmi_port_num;
+ mirror_top_info->nodes[j].port_num[k].dst_xgmi_port_num =
+ node_info.port_num[k].src_xgmi_port_num;
+ }
+ }
break;
}
@@ -1518,9 +1647,10 @@ int psp_xgmi_get_topology_info(struct psp_context *psp,
amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
IP_VERSION(13, 0, 6) ||
amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
- IP_VERSION(13, 0, 14);
- bool ta_port_num_support = amdgpu_sriov_vf(psp->adev) ? 0 :
- psp->xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG;
+ IP_VERSION(13, 0, 14) ||
+ amdgpu_sriov_vf(psp->adev);
+ bool ta_port_num_support = psp->xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG ||
+ amdgpu_sriov_xgmi_ta_ext_peer_link_en(psp->adev);
/* popluate the shared output buffer rather than the cmd input buffer
* with node_ids as the input for GET_PEER_LINKS command execution.
@@ -2231,11 +2361,14 @@ static int psp_securedisplay_initialize(struct psp_context *psp)
}
ret = psp_ta_load(psp, &psp->securedisplay_context.context);
- if (!ret) {
+ if (!ret && !psp->securedisplay_context.context.resp_status) {
psp->securedisplay_context.context.initialized = true;
mutex_init(&psp->securedisplay_context.mutex);
- } else
+ } else {
+ /* don't try again */
+ psp->securedisplay_context.context.bin_desc.size_bytes = 0;
return ret;
+ }
mutex_lock(&psp->securedisplay_context.mutex);
@@ -2337,11 +2470,27 @@ bool amdgpu_psp_tos_reload_needed(struct amdgpu_device *adev)
return false;
}
+static void psp_update_gpu_addresses(struct amdgpu_device *adev)
+{
+ struct psp_context *psp = &adev->psp;
+
+ if (psp->cmd_buf_bo && psp->cmd_buf_mem) {
+ psp->fw_pri_mc_addr = amdgpu_bo_fb_aper_addr(psp->fw_pri_bo);
+ psp->fence_buf_mc_addr = amdgpu_bo_fb_aper_addr(psp->fence_buf_bo);
+ psp->cmd_buf_mc_addr = amdgpu_bo_fb_aper_addr(psp->cmd_buf_bo);
+ }
+ if (adev->firmware.rbuf && psp->km_ring.ring_mem)
+ psp->km_ring.ring_mem_mc_addr = amdgpu_bo_fb_aper_addr(adev->firmware.rbuf);
+}
+
static int psp_hw_start(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
int ret;
+ if (amdgpu_virt_xgmi_migrate_enabled(adev))
+ psp_update_gpu_addresses(adev);
+
if (!amdgpu_sriov_vf(adev)) {
if ((is_psp_fw_valid(psp->kdb)) &&
(psp->funcs->bootloader_load_kdb != NULL)) {
@@ -2440,6 +2589,14 @@ static int psp_hw_start(struct psp_context *psp)
return ret;
}
+ if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
+ ret = psp_update_fw_reservation(psp);
+ if (ret) {
+ dev_err(adev->dev, "update fw reservation failed!\n");
+ return ret;
+ }
+ }
+
if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev))
goto skip_pin_bo;
@@ -3522,8 +3679,12 @@ int psp_init_sos_microcode(struct psp_context *psp, const char *chip_name)
uint8_t *ucode_array_start_addr;
int err = 0;
- err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, AMDGPU_UCODE_REQUIRED,
- "amdgpu/%s_sos.bin", chip_name);
+ if (amdgpu_is_kicker_fw(adev))
+ err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_sos_kicker.bin", chip_name);
+ else
+ err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_sos.bin", chip_name);
if (err)
goto out;
@@ -3799,8 +3960,12 @@ int psp_init_ta_microcode(struct psp_context *psp, const char *chip_name)
struct amdgpu_device *adev = psp->adev;
int err;
- err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, AMDGPU_UCODE_REQUIRED,
- "amdgpu/%s_ta.bin", chip_name);
+ if (amdgpu_is_kicker_fw(adev))
+ err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_ta_kicker.bin", chip_name);
+ else
+ err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_ta.bin", chip_name);
if (err)
return err;
@@ -4117,8 +4282,8 @@ rel_buf:
static const struct bin_attribute psp_vbflash_bin_attr = {
.attr = {.name = "psp_vbflash", .mode = 0660},
.size = 0,
- .write_new = amdgpu_psp_vbflash_write,
- .read_new = amdgpu_psp_vbflash_read,
+ .write = amdgpu_psp_vbflash_write,
+ .read = amdgpu_psp_vbflash_read,
};
/**
@@ -4181,7 +4346,7 @@ static umode_t amdgpu_bin_flash_attr_is_visible(struct kobject *kobj,
const struct attribute_group amdgpu_flash_attr_group = {
.attrs = flash_attrs,
- .bin_attrs_new = bin_flash_attrs,
+ .bin_attrs = bin_flash_attrs,
.is_bin_visible = amdgpu_bin_flash_attr_is_visible,
.is_visible = amdgpu_flash_attr_is_visible,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index 428adc7f741d..237b624aa51c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -51,6 +51,17 @@
#define C2PMSG_CMD_SPI_GET_ROM_IMAGE_ADDR_HI 0x10
#define C2PMSG_CMD_SPI_GET_FLASH_IMAGE 0x11
+/* Command register bit 31 set to indicate readiness */
+#define MBOX_TOS_READY_FLAG (GFX_FLAG_RESPONSE)
+#define MBOX_TOS_READY_MASK (GFX_CMD_RESPONSE_MASK | GFX_CMD_STATUS_MASK)
+
+/* Values to check for a successful GFX_CMD response wait. Check against
+ * both status bits and response state - helps to detect a command failure
+ * or other unexpected cases like a device drop reading all 0xFFs
+ */
+#define MBOX_TOS_RESP_FLAG (GFX_FLAG_RESPONSE)
+#define MBOX_TOS_RESP_MASK (GFX_CMD_RESPONSE_MASK | GFX_CMD_STATUS_MASK)
+
extern const struct attribute_group amdgpu_flash_attr_group;
enum psp_shared_mem_size {
@@ -123,6 +134,9 @@ enum psp_reg_prog_id {
PSP_REG_LAST
};
+#define PSP_WAITREG_CHANGED BIT(0) /* check if the value has changed */
+#define PSP_WAITREG_NOVERBOSE BIT(1) /* No error verbose */
+
struct psp_funcs {
int (*init_microcode)(struct psp_context *psp);
int (*wait_for_bootloader)(struct psp_context *psp);
@@ -521,8 +535,8 @@ extern const struct amdgpu_ip_block_version psp_v13_0_ip_block;
extern const struct amdgpu_ip_block_version psp_v13_0_4_ip_block;
extern const struct amdgpu_ip_block_version psp_v14_0_ip_block;
-extern int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
- uint32_t field_val, uint32_t mask, bool check_changed);
+int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
+ uint32_t field_val, uint32_t mask, uint32_t flags);
extern int psp_wait_for_spirom_update(struct psp_context *psp, uint32_t reg_index,
uint32_t field_val, uint32_t mask, uint32_t msec_timeout);
@@ -588,7 +602,7 @@ int psp_init_cap_microcode(struct psp_context *psp,
const char *chip_name);
int psp_get_fw_attestation_records_addr(struct psp_context *psp,
uint64_t *output_ptr);
-
+int psp_update_fw_reservation(struct psp_context *psp);
int psp_load_fw_list(struct psp_context *psp,
struct amdgpu_firmware_info **ucode_list, int ucode_count);
void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c
index 38face981c3e..6e8aad91bcd3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c
@@ -171,13 +171,9 @@ static ssize_t ta_if_load_debugfs_write(struct file *fp, const char *buf, size_t
copy_pos += sizeof(uint32_t);
- ta_bin = kzalloc(ta_bin_len, GFP_KERNEL);
- if (!ta_bin)
- return -ENOMEM;
- if (copy_from_user((void *)ta_bin, &buf[copy_pos], ta_bin_len)) {
- ret = -EFAULT;
- goto err_free_bin;
- }
+ ta_bin = memdup_user(&buf[copy_pos], ta_bin_len);
+ if (IS_ERR(ta_bin))
+ return PTR_ERR(ta_bin);
/* Set TA context and functions */
set_ta_context_funcs(psp, ta_type, &context);
@@ -327,13 +323,9 @@ static ssize_t ta_if_invoke_debugfs_write(struct file *fp, const char *buf, size
return -EFAULT;
copy_pos += sizeof(uint32_t);
- shared_buf = kzalloc(shared_buf_len, GFP_KERNEL);
- if (!shared_buf)
- return -ENOMEM;
- if (copy_from_user((void *)shared_buf, &buf[copy_pos], shared_buf_len)) {
- ret = -EFAULT;
- goto err_free_shared_buf;
- }
+ shared_buf = memdup_user(&buf[copy_pos], shared_buf_len);
+ if (IS_ERR(shared_buf))
+ return PTR_ERR(shared_buf);
set_ta_context_funcs(psp, ta_type, &context);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c
index 123bcf5c2bb1..bacf888735db 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c
@@ -101,7 +101,6 @@ static ssize_t amdgpu_rap_debugfs_write(struct file *f, const char __user *buf,
}
amdgpu_gfx_off_ctrl(adev, true);
- pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
return size;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index de0944947eaf..2a6cf7963dde 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -41,6 +41,7 @@
#include "atom.h"
#include "amdgpu_reset.h"
#include "amdgpu_psp.h"
+#include "amdgpu_ras_mgr.h"
#ifdef CONFIG_X86_MCE_AMD
#include <asm/mce.h>
@@ -122,12 +123,15 @@ const char *get_ras_block_str(struct ras_common_if *ras_block)
/* typical ECC bad page rate is 1 bad page per 100MB VRAM */
#define RAS_BAD_PAGE_COVER (100 * 1024 * 1024ULL)
-#define MAX_UMC_POISON_POLLING_TIME_ASYNC 300 //ms
+#define MAX_UMC_POISON_POLLING_TIME_ASYNC 10
#define AMDGPU_RAS_RETIRE_PAGE_INTERVAL 100 //ms
#define MAX_FLUSH_RETIRE_DWORK_TIMES 100
+#define BYPASS_ALLOCATED_ADDRESS 0x0
+#define BYPASS_INITIALIZATION_ADDRESS 0x1
+
enum amdgpu_ras_retire_page_reservation {
AMDGPU_RAS_RETIRE_PAGE_RESERVED,
AMDGPU_RAS_RETIRE_PAGE_PENDING,
@@ -136,12 +140,18 @@ enum amdgpu_ras_retire_page_reservation {
atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0);
-static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
+static int amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
uint64_t addr);
-static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
+static int amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
uint64_t addr);
+
+static void amdgpu_ras_critical_region_init(struct amdgpu_device *adev);
+static void amdgpu_ras_critical_region_fini(struct amdgpu_device *adev);
+
#ifdef CONFIG_X86_MCE_AMD
static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev);
+static void
+amdgpu_unregister_bad_pages_mca_notifier(struct amdgpu_device *adev);
struct mce_notifier_adev_list {
struct amdgpu_device *devs[MAX_GPU_INSTANCE];
int num_gpu;
@@ -169,18 +179,16 @@ static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t addre
struct eeprom_table_record err_rec;
int ret;
- if ((address >= adev->gmc.mc_vram_size) ||
- (address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
+ ret = amdgpu_ras_check_bad_page(adev, address);
+ if (ret == -EINVAL) {
dev_warn(adev->dev,
- "RAS WARN: input address 0x%llx is invalid.\n",
- address);
+ "RAS WARN: input address 0x%llx is invalid.\n",
+ address);
return -EINVAL;
- }
-
- if (amdgpu_ras_check_bad_page(adev, address)) {
+ } else if (ret == 1) {
dev_warn(adev->dev,
- "RAS WARN: 0x%llx has already been marked as bad page!\n",
- address);
+ "RAS WARN: 0x%llx has already been marked as bad page!\n",
+ address);
return 0;
}
@@ -207,6 +215,56 @@ static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t addre
return 0;
}
+static int amdgpu_check_address_validity(struct amdgpu_device *adev,
+ uint64_t address, uint64_t flags)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct amdgpu_vram_block_info blk_info;
+ uint64_t page_pfns[32] = {0};
+ int i, ret, count;
+ bool hit = false;
+
+ if (amdgpu_ip_version(adev, UMC_HWIP, 0) < IP_VERSION(12, 0, 0))
+ return 0;
+
+ if (amdgpu_sriov_vf(adev)) {
+ if (amdgpu_virt_check_vf_critical_region(adev, address, &hit))
+ return -EPERM;
+ return hit ? -EACCES : 0;
+ }
+
+ if ((address >= adev->gmc.mc_vram_size) ||
+ (address >= RAS_UMC_INJECT_ADDR_LIMIT))
+ return -EFAULT;
+
+ count = amdgpu_umc_lookup_bad_pages_in_a_row(adev,
+ address, page_pfns, ARRAY_SIZE(page_pfns));
+ if (count <= 0)
+ return -EPERM;
+
+ for (i = 0; i < count; i++) {
+ memset(&blk_info, 0, sizeof(blk_info));
+ ret = amdgpu_vram_mgr_query_address_block_info(&adev->mman.vram_mgr,
+ page_pfns[i] << AMDGPU_GPU_PAGE_SHIFT, &blk_info);
+ if (!ret) {
+ /* The input address that needs to be checked is allocated by
+ * current calling process, so it is necessary to exclude
+ * the calling process.
+ */
+ if ((flags == BYPASS_ALLOCATED_ADDRESS) &&
+ ((blk_info.task.pid != task_pid_nr(current)) ||
+ strncmp(blk_info.task.comm, current->comm, TASK_COMM_LEN)))
+ return -EACCES;
+ else if ((flags == BYPASS_INITIALIZATION_ADDRESS) &&
+ (blk_info.task.pid == con->init_task_pid) &&
+ !strncmp(blk_info.task.comm, con->init_task_comm, TASK_COMM_LEN))
+ return -EACCES;
+ }
+ }
+
+ return 0;
+}
+
static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
@@ -297,6 +355,8 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
op = 2;
else if (strstr(str, "retire_page") != NULL)
op = 3;
+ else if (strstr(str, "check_address") != NULL)
+ op = 4;
else if (str[0] && str[1] && str[2] && str[3])
/* ascii string, but commands are not matched. */
return -EINVAL;
@@ -311,6 +371,15 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
data->inject.address = address;
return 0;
+ } else if (op == 4) {
+ if (sscanf(str, "%*s 0x%llx 0x%llx", &address, &value) != 2 &&
+ sscanf(str, "%*s %llu %llu", &address, &value) != 2)
+ return -EINVAL;
+
+ data->op = op;
+ data->inject.address = address;
+ data->inject.value = value;
+ return 0;
}
if (amdgpu_ras_find_block_id_by_name(block_name, &block_id))
@@ -500,6 +569,9 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f,
return size;
else
return ret;
+ } else if (data.op == 4) {
+ ret = amdgpu_check_address_validity(adev, data.inject.address, data.inject.value);
+ return ret ? ret : size;
}
if (!amdgpu_ras_is_supported(adev, data.head.block))
@@ -513,22 +585,16 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f,
ret = amdgpu_ras_feature_enable(adev, &data.head, 1);
break;
case 2:
- if ((data.inject.address >= adev->gmc.mc_vram_size &&
- adev->gmc.mc_vram_size) ||
- (data.inject.address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
- dev_warn(adev->dev, "RAS WARN: input address "
- "0x%llx is invalid.",
+ /* umc ce/ue error injection for a bad page is not allowed */
+ if (data.head.block == AMDGPU_RAS_BLOCK__UMC)
+ ret = amdgpu_ras_check_bad_page(adev, data.inject.address);
+ if (ret == -EINVAL) {
+ dev_warn(adev->dev, "RAS WARN: input address 0x%llx is invalid.",
data.inject.address);
- ret = -EINVAL;
break;
- }
-
- /* umc ce/ue error injection for a bad page is not allowed */
- if ((data.head.block == AMDGPU_RAS_BLOCK__UMC) &&
- amdgpu_ras_check_bad_page(adev, data.inject.address)) {
- dev_warn(adev->dev, "RAS WARN: inject: 0x%llx has "
- "already been marked as bad!\n",
- data.inject.address);
+ } else if (ret == 1) {
+ dev_warn(adev->dev, "RAS WARN: inject: 0x%llx has already been marked as bad!\n",
+ data.inject.address);
break;
}
@@ -548,6 +614,8 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f,
return size;
}
+static int amdgpu_uniras_clear_badpages_info(struct amdgpu_device *adev);
+
/**
* DOC: AMDGPU RAS debugfs EEPROM table reset interface
*
@@ -572,6 +640,11 @@ static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f,
(struct amdgpu_device *)file_inode(f)->i_private;
int ret;
+ if (amdgpu_uniras_enabled(adev)) {
+ ret = amdgpu_uniras_clear_badpages_info(adev);
+ return ret ? ret : size;
+ }
+
ret = amdgpu_ras_eeprom_reset_table(
&(amdgpu_ras_get_context(adev)->eeprom_control));
@@ -1107,6 +1180,9 @@ static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev,
err_info->de_count, blk_name);
}
} else {
+ if (adev->debug_disable_ce_logs)
+ return;
+
for_each_ras_error(err_node, err_data) {
err_info = &err_node->err_info;
mcm_info = &err_info->mcm_info;
@@ -1476,9 +1552,51 @@ out_fini_err_data:
return ret;
}
+static int amdgpu_uniras_clear_badpages_info(struct amdgpu_device *adev)
+{
+ struct ras_cmd_dev_handle req = {0};
+ int ret;
+
+ ret = amdgpu_ras_mgr_handle_ras_cmd(adev, RAS_CMD__CLEAR_BAD_PAGE_INFO,
+ &req, sizeof(req), NULL, 0);
+ if (ret) {
+ dev_err(adev->dev, "Failed to clear bad pages info, ret: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int amdgpu_uniras_query_block_ecc(struct amdgpu_device *adev,
+ struct ras_query_if *info)
+{
+ struct ras_cmd_block_ecc_info_req req = {0};
+ struct ras_cmd_block_ecc_info_rsp rsp = {0};
+ int ret;
+
+ if (!info)
+ return -EINVAL;
+
+ req.block_id = info->head.block;
+ req.subblock_id = info->head.sub_block_index;
+
+ ret = amdgpu_ras_mgr_handle_ras_cmd(adev, RAS_CMD__GET_BLOCK_ECC_STATUS,
+ &req, sizeof(req), &rsp, sizeof(rsp));
+ if (!ret) {
+ info->ce_count = rsp.ce_count;
+ info->ue_count = rsp.ue_count;
+ info->de_count = rsp.de_count;
+ }
+
+ return ret;
+}
+
int amdgpu_ras_query_error_status(struct amdgpu_device *adev, struct ras_query_if *info)
{
- return amdgpu_ras_query_error_status_with_event(adev, info, RAS_EVENT_TYPE_INVALID);
+ if (amdgpu_uniras_enabled(adev))
+ return amdgpu_uniras_query_block_ecc(adev, info);
+ else
+ return amdgpu_ras_query_error_status_with_event(adev, info, RAS_EVENT_TYPE_INVALID);
}
int amdgpu_ras_reset_error_count(struct amdgpu_device *adev,
@@ -1530,6 +1648,27 @@ int amdgpu_ras_reset_error_status(struct amdgpu_device *adev,
return 0;
}
+static int amdgpu_uniras_error_inject(struct amdgpu_device *adev,
+ struct ras_inject_if *info)
+{
+ struct ras_cmd_inject_error_req inject_req;
+ struct ras_cmd_inject_error_rsp rsp;
+
+ if (!info)
+ return -EINVAL;
+
+ memset(&inject_req, 0, sizeof(inject_req));
+ inject_req.block_id = info->head.block;
+ inject_req.subblock_id = info->head.sub_block_index;
+ inject_req.address = info->address;
+ inject_req.error_type = info->head.type;
+ inject_req.instance_mask = info->instance_mask;
+ inject_req.method = info->value;
+
+ return amdgpu_ras_mgr_handle_ras_cmd(adev, RAS_CMD__INJECT_ERROR,
+ &inject_req, sizeof(inject_req), &rsp, sizeof(rsp));
+}
+
/* wrapper of psp_ras_trigger_error */
int amdgpu_ras_error_inject(struct amdgpu_device *adev,
struct ras_inject_if *info)
@@ -1547,6 +1686,9 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev,
info->head.block,
info->head.sub_block_index);
+ if (amdgpu_uniras_enabled(adev))
+ return amdgpu_uniras_error_inject(adev, info);
+
/* inject on guest isn't allowed, return success directly */
if (amdgpu_sriov_vf(adev))
return 0;
@@ -1691,7 +1833,9 @@ int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
/* sysfs begin */
static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
- struct ras_badpage **bps, unsigned int *count);
+ struct ras_badpage *bps, uint32_t count, uint32_t start);
+static int amdgpu_uniras_badpages_read(struct amdgpu_device *adev,
+ struct ras_badpage *bps, uint32_t count, uint32_t start);
static char *amdgpu_ras_badpage_flags_str(unsigned int flags)
{
@@ -1749,19 +1893,50 @@ static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f,
unsigned int end = div64_ul(ppos + count - 1, element_size);
ssize_t s = 0;
struct ras_badpage *bps = NULL;
- unsigned int bps_count = 0;
+ int bps_count = 0, i, status;
+ uint64_t address;
memset(buf, 0, count);
- if (amdgpu_ras_badpages_read(adev, &bps, &bps_count))
+ bps_count = end - start;
+ bps = kmalloc_array(bps_count, sizeof(*bps), GFP_KERNEL);
+ if (!bps)
return 0;
- for (; start < end && start < bps_count; start++)
+ memset(bps, 0, sizeof(*bps) * bps_count);
+
+ if (amdgpu_uniras_enabled(adev))
+ bps_count = amdgpu_uniras_badpages_read(adev, bps, bps_count, start);
+ else
+ bps_count = amdgpu_ras_badpages_read(adev, bps, bps_count, start);
+
+ if (bps_count <= 0) {
+ kfree(bps);
+ return 0;
+ }
+
+ for (i = 0; i < bps_count; i++) {
+ address = ((uint64_t)bps[i].bp) << AMDGPU_GPU_PAGE_SHIFT;
+ if (amdgpu_ras_check_critical_address(adev, address))
+ continue;
+
+ bps[i].size = AMDGPU_GPU_PAGE_SIZE;
+
+ status = amdgpu_vram_mgr_query_page_status(&adev->mman.vram_mgr,
+ address);
+ if (status == -EBUSY)
+ bps[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING;
+ else if (status == -ENOENT)
+ bps[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT;
+ else
+ bps[i].flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED;
+
s += scnprintf(&buf[s], element_size + 1,
"0x%08x : 0x%08x : %1s\n",
- bps[start].bp,
- bps[start].size,
- amdgpu_ras_badpage_flags_str(bps[start].flags));
+ bps[i].bp,
+ bps[i].size,
+ amdgpu_ras_badpage_flags_str(bps[i].flags));
+ }
kfree(bps);
@@ -1777,12 +1952,42 @@ static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev,
return sysfs_emit(buf, "feature mask: 0x%x\n", con->features);
}
+static bool amdgpu_ras_get_version_info(struct amdgpu_device *adev, u32 *major,
+ u32 *minor, u32 *rev)
+{
+ int i;
+
+ if (!adev || !major || !minor || !rev || !amdgpu_uniras_enabled(adev))
+ return false;
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_RAS) {
+ *major = adev->ip_blocks[i].version->major;
+ *minor = adev->ip_blocks[i].version->minor;
+ *rev = adev->ip_blocks[i].version->rev;
+ return true;
+ }
+ }
+
+ return false;
+}
+
static ssize_t amdgpu_ras_sysfs_version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct amdgpu_ras *con =
container_of(attr, struct amdgpu_ras, version_attr);
- return sysfs_emit(buf, "table version: 0x%x\n", con->eeprom_control.tbl_hdr.version);
+ u32 major, minor, rev;
+ ssize_t size = 0;
+
+ size += sysfs_emit_at(buf, size, "table version: 0x%x\n",
+ con->eeprom_control.tbl_hdr.version);
+
+ if (amdgpu_ras_get_version_info(con->adev, &major, &minor, &rev))
+ size += sysfs_emit_at(buf, size, "ras version: %u.%u.%u\n",
+ major, minor, rev);
+
+ return size;
}
static ssize_t amdgpu_ras_sysfs_schema_show(struct device *dev,
@@ -2124,7 +2329,7 @@ static int amdgpu_ras_fs_init(struct amdgpu_device *adev)
con->badpages_attr = bin_attr_gpu_vram_bad_pages;
sysfs_bin_attr_init(&con->badpages_attr);
bin_attrs[0] = &con->badpages_attr;
- group.bin_attrs_new = bin_attrs;
+ group.bin_attrs = bin_attrs;
}
r = sysfs_create_group(&adev->dev->kobj, &group);
@@ -2175,6 +2380,11 @@ void amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device *adev)
amdgpu_ras_is_err_state(adev, AMDGPU_RAS_BLOCK__ANY))
return;
+ if (amdgpu_uniras_enabled(adev)) {
+ amdgpu_ras_mgr_handle_fatal_interrupt(adev, NULL);
+ return;
+ }
+
if (adev->nbio.ras &&
adev->nbio.ras->handle_ras_controller_intr_no_bifring)
adev->nbio.ras->handle_ras_controller_intr_no_bifring(adev);
@@ -2345,6 +2555,16 @@ int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev,
struct ras_manager *obj;
struct ras_ih_data *data;
+ if (amdgpu_uniras_enabled(adev)) {
+ struct ras_ih_info ih_info;
+
+ memset(&ih_info, 0, sizeof(ih_info));
+ ih_info.block = info->head.block;
+ memcpy(&ih_info.iv_entry, info->entry, sizeof(struct amdgpu_iv_entry));
+
+ return amdgpu_ras_mgr_handle_controller_interrupt(adev, &ih_info);
+ }
+
obj = amdgpu_ras_find_obj(adev, &info->head);
if (!obj)
return -EINVAL;
@@ -2539,54 +2759,83 @@ static void amdgpu_ras_query_err_status(struct amdgpu_device *adev)
}
}
-/* recovery begin */
-
-/* return 0 on success.
- * caller need free bps.
- */
static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
- struct ras_badpage **bps, unsigned int *count)
+ struct ras_badpage *bps, uint32_t count, uint32_t start)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_err_handler_data *data;
- int i = 0;
- int ret = 0, status;
+ int r = 0;
+ uint32_t i;
if (!con || !con->eh_data || !bps || !count)
return -EINVAL;
mutex_lock(&con->recovery_lock);
data = con->eh_data;
- if (!data || data->count == 0) {
- *bps = NULL;
- ret = -EINVAL;
- goto out;
+ if (start < data->count) {
+ for (i = start; i < data->count; i++) {
+ if (!data->bps[i].ts)
+ continue;
+
+ bps[r].bp = data->bps[i].retired_page;
+ r++;
+ if (r >= count)
+ break;
+ }
}
+ mutex_unlock(&con->recovery_lock);
- *bps = kmalloc(sizeof(struct ras_badpage) * data->count, GFP_KERNEL);
- if (!*bps) {
- ret = -ENOMEM;
- goto out;
- }
+ return r;
+}
- for (; i < data->count; i++) {
- (*bps)[i] = (struct ras_badpage){
- .bp = data->bps[i].retired_page,
- .size = AMDGPU_GPU_PAGE_SIZE,
- .flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED,
- };
- status = amdgpu_vram_mgr_query_page_status(&adev->mman.vram_mgr,
- data->bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT);
- if (status == -EBUSY)
- (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING;
- else if (status == -ENOENT)
- (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT;
+static int amdgpu_uniras_badpages_read(struct amdgpu_device *adev,
+ struct ras_badpage *bps, uint32_t count, uint32_t start)
+{
+ struct ras_cmd_bad_pages_info_req cmd_input;
+ struct ras_cmd_bad_pages_info_rsp *output;
+ uint32_t group, start_group, end_group;
+ uint32_t pos, pos_in_group;
+ int r = 0, i;
+
+ if (!bps || !count)
+ return -EINVAL;
+
+ output = kmalloc(sizeof(*output), GFP_KERNEL);
+ if (!output)
+ return -ENOMEM;
+
+ memset(&cmd_input, 0, sizeof(cmd_input));
+
+ start_group = start / RAS_CMD_MAX_BAD_PAGES_PER_GROUP;
+ end_group = (start + count + RAS_CMD_MAX_BAD_PAGES_PER_GROUP - 1) /
+ RAS_CMD_MAX_BAD_PAGES_PER_GROUP;
+
+ pos = start;
+ for (group = start_group; group < end_group; group++) {
+ memset(output, 0, sizeof(*output));
+ cmd_input.group_index = group;
+ if (amdgpu_ras_mgr_handle_ras_cmd(adev, RAS_CMD__GET_BAD_PAGES,
+ &cmd_input, sizeof(cmd_input), output, sizeof(*output)))
+ goto out;
+
+ if (pos >= output->bp_total_cnt)
+ goto out;
+
+ pos_in_group = pos - group * RAS_CMD_MAX_BAD_PAGES_PER_GROUP;
+ for (i = pos_in_group; i < output->bp_in_group; i++, pos++) {
+ if (!output->records[i].ts)
+ continue;
+
+ bps[r].bp = output->records[i].retired_page;
+ r++;
+ if (r >= count)
+ goto out;
+ }
}
- *count = data->count;
out:
- mutex_unlock(&con->recovery_lock);
- return ret;
+ kfree(output);
+ return r;
}
static void amdgpu_ras_set_fed_all(struct amdgpu_device *adev,
@@ -2635,6 +2884,7 @@ static void amdgpu_ras_do_recovery(struct work_struct *work)
struct amdgpu_device *adev = ras->adev;
struct list_head device_list, *device_list_handle = NULL;
struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
+ unsigned int error_query_mode;
enum ras_event_type type;
if (hive) {
@@ -2663,11 +2913,22 @@ static void amdgpu_ras_do_recovery(struct work_struct *work)
device_list_handle = &device_list;
}
+ if (amdgpu_ras_get_error_query_mode(adev, &error_query_mode)) {
+ if (error_query_mode == AMDGPU_RAS_FIRMWARE_ERROR_QUERY) {
+ /* wait 500ms to ensure pmfw polling mca bank info done */
+ msleep(500);
+ }
+ }
+
type = amdgpu_ras_get_fatal_error_event(adev);
list_for_each_entry(remote_adev,
device_list_handle, gmc.xgmi.head) {
- amdgpu_ras_query_err_status(remote_adev);
- amdgpu_ras_log_on_err_counter(remote_adev, type);
+ if (amdgpu_uniras_enabled(remote_adev)) {
+ amdgpu_ras_mgr_update_ras_ecc(remote_adev);
+ } else {
+ amdgpu_ras_query_err_status(remote_adev);
+ amdgpu_ras_log_on_err_counter(remote_adev, type);
+ }
}
}
@@ -2719,7 +2980,7 @@ static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev,
unsigned int old_space = data->count + data->space_left;
unsigned int new_space = old_space + pages;
unsigned int align_space = ALIGN(new_space, 512);
- void *bps = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL);
+ void *bps = kmalloc_array(align_space, sizeof(*data->bps), GFP_KERNEL);
if (!bps) {
return -ENOMEM;
@@ -2755,8 +3016,13 @@ static int amdgpu_ras_mca2pa_by_idx(struct amdgpu_device *adev,
addr_in.ma.err_addr = bps->address;
addr_in.ma.socket_id = socket;
addr_in.ma.ch_inst = bps->mem_channel;
- /* tell RAS TA the node instance is not used */
- addr_in.ma.node_inst = TA_RAS_INV_NODE;
+ if (!amdgpu_ras_smu_eeprom_supported(adev)) {
+ /* tell RAS TA the node instance is not used */
+ addr_in.ma.node_inst = TA_RAS_INV_NODE;
+ } else {
+ addr_in.ma.umc_inst = bps->mcumc_id;
+ addr_in.ma.node_inst = bps->cu;
+ }
if (adev->umc.ras && adev->umc.ras->convert_ras_err_addr)
ret = adev->umc.ras->convert_ras_err_addr(adev, err_data,
@@ -2811,8 +3077,11 @@ static int __amdgpu_ras_restore_bad_pages(struct amdgpu_device *adev,
for (j = 0; j < count; j++) {
if (amdgpu_ras_check_bad_page_unlock(con,
- bps[j].retired_page << AMDGPU_GPU_PAGE_SHIFT))
+ bps[j].retired_page << AMDGPU_GPU_PAGE_SHIFT)) {
+ data->count++;
+ data->space_left--;
continue;
+ }
if (!data->space_left &&
amdgpu_ras_realloc_eh_data_space(adev, data, 256)) {
@@ -2825,6 +3094,7 @@ static int __amdgpu_ras_restore_bad_pages(struct amdgpu_device *adev,
sizeof(struct eeprom_table_record));
data->count++;
data->space_left--;
+ con->bad_page_num++;
}
return 0;
@@ -2854,6 +3124,13 @@ static int __amdgpu_ras_convert_rec_array_from_rom(struct amdgpu_device *adev,
if (amdgpu_umc_pages_in_a_row(adev, err_data,
bps[0].retired_page << AMDGPU_GPU_PAGE_SHIFT))
return -EINVAL;
+ for (i = 0; i < adev->umc.retire_unit; i++) {
+ err_data->err_addr[i].address = bps[0].address;
+ err_data->err_addr[i].mem_channel = bps[0].mem_channel;
+ err_data->err_addr[i].bank = bps[0].bank;
+ err_data->err_addr[i].err_type = bps[0].err_type;
+ err_data->err_addr[i].mcumc_id = bps[0].mcumc_id;
+ }
} else {
if (amdgpu_ras_mca2pa_by_idx(adev, &bps[0], err_data))
return -EINVAL;
@@ -2885,15 +3162,31 @@ static int __amdgpu_ras_convert_rec_from_rom(struct amdgpu_device *adev,
struct eeprom_table_record *bps, struct ras_err_data *err_data,
enum amdgpu_memory_partition nps)
{
+ int i = 0;
enum amdgpu_memory_partition save_nps;
- save_nps = (bps->retired_page >> UMC_NPS_SHIFT) & UMC_NPS_MASK;
- bps->retired_page &= ~(UMC_NPS_MASK << UMC_NPS_SHIFT);
+ if (!amdgpu_ras_smu_eeprom_supported(adev)) {
+ save_nps = (bps->retired_page >> UMC_NPS_SHIFT) & UMC_NPS_MASK;
+ bps->retired_page &= ~(UMC_NPS_MASK << UMC_NPS_SHIFT);
+ } else {
+ /* if pmfw manages eeprom, save_nps is not stored on eeprom,
+ * we should always convert mca address into physical address,
+ * make save_nps different from nps
+ */
+ save_nps = nps + 1;
+ }
if (save_nps == nps) {
if (amdgpu_umc_pages_in_a_row(adev, err_data,
bps->retired_page << AMDGPU_GPU_PAGE_SHIFT))
return -EINVAL;
+ for (i = 0; i < adev->umc.retire_unit; i++) {
+ err_data->err_addr[i].address = bps->address;
+ err_data->err_addr[i].mem_channel = bps->mem_channel;
+ err_data->err_addr[i].bank = bps->bank;
+ err_data->err_addr[i].err_type = bps->err_type;
+ err_data->err_addr[i].mcumc_id = bps->mcumc_id;
+ }
} else {
if (bps->address) {
if (amdgpu_ras_mca2pa_by_idx(adev, bps, err_data))
@@ -2947,7 +3240,8 @@ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
if (from_rom) {
/* there is no pa recs in V3, so skip pa recs processing */
- if (control->tbl_hdr.version < RAS_TABLE_VER_V3) {
+ if ((control->tbl_hdr.version < RAS_TABLE_VER_V3) &&
+ !amdgpu_ras_smu_eeprom_supported(adev)) {
for (i = 0; i < pages; i++) {
if (control->ras_num_recs - i >= adev->umc.retire_unit) {
if ((bps[i].address == bps[i + 1].address) &&
@@ -2956,7 +3250,7 @@ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
ret = __amdgpu_ras_convert_rec_array_from_rom(adev,
&bps[i], &err_data, nps);
if (ret)
- control->ras_num_bad_pages -= adev->umc.retire_unit;
+ con->bad_page_num -= adev->umc.retire_unit;
i += (adev->umc.retire_unit - 1);
} else {
break;
@@ -2970,8 +3264,10 @@ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
ret = __amdgpu_ras_convert_rec_from_rom(adev,
&bps[i], &err_data, nps);
if (ret)
- control->ras_num_bad_pages -= adev->umc.retire_unit;
+ con->bad_page_num -= adev->umc.retire_unit;
}
+
+ con->eh_data->count_saved = con->eh_data->count;
} else {
ret = __amdgpu_ras_restore_bad_pages(adev, bps, pages);
}
@@ -2994,7 +3290,7 @@ int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_err_handler_data *data;
struct amdgpu_ras_eeprom_control *control;
- int save_count, unit_num, bad_page_num, i;
+ int save_count, unit_num, i;
if (!con || !con->eh_data) {
if (new_cnt)
@@ -3003,30 +3299,44 @@ int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
return 0;
}
+ if (!con->eeprom_control.is_eeprom_valid) {
+ dev_warn(adev->dev,
+ "Failed to save EEPROM table data because of EEPROM data corruption!");
+ if (new_cnt)
+ *new_cnt = 0;
+
+ return 0;
+ }
+
mutex_lock(&con->recovery_lock);
control = &con->eeprom_control;
data = con->eh_data;
- bad_page_num = control->ras_num_bad_pages;
- save_count = data->count - bad_page_num;
+ if (amdgpu_ras_smu_eeprom_supported(adev))
+ unit_num = control->ras_num_recs -
+ control->ras_num_recs_old;
+ else
+ unit_num = data->count / adev->umc.retire_unit -
+ control->ras_num_recs;
+
+ save_count = con->bad_page_num - control->ras_num_bad_pages;
mutex_unlock(&con->recovery_lock);
- unit_num = save_count / adev->umc.retire_unit;
if (new_cnt)
*new_cnt = unit_num;
/* only new entries are saved */
- if (save_count > 0) {
+ if (unit_num && save_count) {
/*old asics only save pa to eeprom like before*/
if (IP_VERSION_MAJ(amdgpu_ip_version(adev, UMC_HWIP, 0)) < 12) {
if (amdgpu_ras_eeprom_append(control,
- &data->bps[bad_page_num], save_count)) {
+ &data->bps[data->count_saved], unit_num)) {
dev_err(adev->dev, "Failed to save EEPROM table data!");
return -EIO;
}
} else {
for (i = 0; i < unit_num; i++) {
if (amdgpu_ras_eeprom_append(control,
- &data->bps[bad_page_num +
+ &data->bps[data->count_saved +
i * adev->umc.retire_unit], 1)) {
dev_err(adev->dev, "Failed to save EEPROM table data!");
return -EIO;
@@ -3035,6 +3345,7 @@ int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
}
dev_info(adev->dev, "Saved %d pages to EEPROM table.\n", save_count);
+ data->count_saved = data->count;
}
return 0;
@@ -3067,7 +3378,8 @@ static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
/*In V3, there is no pa recs, and some cases(when address==0) may be parsed
as pa recs, so add verion check to avoid it.
*/
- if (control->tbl_hdr.version < RAS_TABLE_VER_V3) {
+ if ((control->tbl_hdr.version < RAS_TABLE_VER_V3) &&
+ !amdgpu_ras_smu_eeprom_supported(adev)) {
for (i = 0; i < control->ras_num_recs; i++) {
if ((control->ras_num_recs - i) >= adev->umc.retire_unit) {
if ((bps[i].address == bps[i + 1].address) &&
@@ -3089,17 +3401,17 @@ static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
}
}
+ ret = amdgpu_ras_add_bad_pages(adev, bps, control->ras_num_recs, true);
+ if (ret)
+ goto out;
+
ret = amdgpu_ras_eeprom_check(control);
if (ret)
goto out;
/* HW not usable */
- if (amdgpu_ras_is_rma(adev)) {
+ if (amdgpu_ras_is_rma(adev))
ret = -EHWPOISON;
- goto out;
- }
-
- ret = amdgpu_ras_add_bad_pages(adev, bps, control->ras_num_recs, true);
}
out:
@@ -3107,18 +3419,24 @@ out:
return ret;
}
-static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
+static int amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
uint64_t addr)
{
struct ras_err_handler_data *data = con->eh_data;
+ struct amdgpu_device *adev = con->adev;
int i;
+ if ((addr >= adev->gmc.mc_vram_size &&
+ adev->gmc.mc_vram_size) ||
+ (addr >= RAS_UMC_INJECT_ADDR_LIMIT))
+ return -EINVAL;
+
addr >>= AMDGPU_GPU_PAGE_SHIFT;
for (i = 0; i < data->count; i++)
if (addr == data->bps[i].retired_page)
- return true;
+ return 1;
- return false;
+ return 0;
}
/*
@@ -3126,11 +3444,11 @@ static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
*
* Note: this check is only for umc block
*/
-static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
+static int amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
uint64_t addr)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
- bool ret = false;
+ int ret = 0;
if (!con || !con->eh_data)
return ret;
@@ -3214,7 +3532,7 @@ static void amdgpu_ras_ecc_log_init(struct ras_ecc_log_info *ecc_log)
INIT_RADIX_TREE(&ecc_log->de_page_tree, GFP_KERNEL);
ecc_log->de_queried_count = 0;
- ecc_log->prev_de_queried_count = 0;
+ ecc_log->consumption_q_count = 0;
}
static void amdgpu_ras_ecc_log_fini(struct ras_ecc_log_info *ecc_log)
@@ -3234,7 +3552,7 @@ static void amdgpu_ras_ecc_log_fini(struct ras_ecc_log_info *ecc_log)
mutex_destroy(&ecc_log->lock);
ecc_log->de_queried_count = 0;
- ecc_log->prev_de_queried_count = 0;
+ ecc_log->consumption_q_count = 0;
}
static bool amdgpu_ras_schedule_retirement_dwork(struct amdgpu_ras *con,
@@ -3260,7 +3578,6 @@ static void amdgpu_ras_do_page_retirement(struct work_struct *work)
page_retirement_dwork.work);
struct amdgpu_device *adev = con->adev;
struct ras_err_data err_data;
- unsigned long err_cnt;
/* If gpu reset is ongoing, delay retiring the bad pages */
if (amdgpu_in_reset(adev) || amdgpu_ras_in_recovery(adev)) {
@@ -3272,13 +3589,9 @@ static void amdgpu_ras_do_page_retirement(struct work_struct *work)
amdgpu_ras_error_data_init(&err_data);
amdgpu_umc_handle_bad_pages(adev, &err_data);
- err_cnt = err_data.err_addr_cnt;
amdgpu_ras_error_data_fini(&err_data);
- if (err_cnt && amdgpu_ras_is_rma(adev))
- amdgpu_ras_reset_gpu(adev);
-
amdgpu_ras_schedule_retirement_dwork(con,
AMDGPU_RAS_RETIRE_PAGE_INTERVAL);
}
@@ -3289,58 +3602,39 @@ static int amdgpu_ras_poison_creation_handler(struct amdgpu_device *adev,
int ret = 0;
struct ras_ecc_log_info *ecc_log;
struct ras_query_if info;
- uint32_t timeout = 0;
+ u32 timeout = MAX_UMC_POISON_POLLING_TIME_ASYNC;
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
- uint64_t de_queried_count;
- uint32_t new_detect_count, total_detect_count;
- uint32_t need_query_count = poison_creation_count;
- bool query_data_timeout = false;
+ u64 de_queried_count;
+ u64 consumption_q_count;
enum ras_event_type type = RAS_EVENT_TYPE_POISON_CREATION;
memset(&info, 0, sizeof(info));
info.head.block = AMDGPU_RAS_BLOCK__UMC;
ecc_log = &ras->umc_ecc_log;
- total_detect_count = 0;
+ ecc_log->de_queried_count = 0;
+ ecc_log->consumption_q_count = 0;
+
do {
ret = amdgpu_ras_query_error_status_with_event(adev, &info, type);
if (ret)
return ret;
de_queried_count = ecc_log->de_queried_count;
- if (de_queried_count > ecc_log->prev_de_queried_count) {
- new_detect_count = de_queried_count - ecc_log->prev_de_queried_count;
- ecc_log->prev_de_queried_count = de_queried_count;
- timeout = 0;
- } else {
- new_detect_count = 0;
- }
-
- if (new_detect_count) {
- total_detect_count += new_detect_count;
- } else {
- if (!timeout && need_query_count)
- timeout = MAX_UMC_POISON_POLLING_TIME_ASYNC;
+ consumption_q_count = ecc_log->consumption_q_count;
- if (timeout) {
- if (!--timeout) {
- query_data_timeout = true;
- break;
- }
- msleep(1);
- }
- }
- } while (total_detect_count < need_query_count);
+ if (de_queried_count && consumption_q_count)
+ break;
- if (query_data_timeout) {
- dev_warn(adev->dev, "Can't find deferred error! count: %u\n",
- (need_query_count - total_detect_count));
- return -ENOENT;
- }
+ msleep(100);
+ } while (--timeout);
- if (total_detect_count)
+ if (de_queried_count)
schedule_delayed_work(&ras->page_retirement_dwork, 0);
+ if (amdgpu_ras_is_rma(adev) && atomic_cmpxchg(&ras->rma_in_recovery, 0, 1) == 0)
+ amdgpu_ras_reset_gpu(adev);
+
return 0;
}
@@ -3376,6 +3670,12 @@ static int amdgpu_ras_poison_consumption_handler(struct amdgpu_device *adev,
reset_flags |= msg.reset;
}
+ /*
+ * Try to ensure poison creation handler is completed first
+ * to set rma if bad page exceed threshold.
+ */
+ flush_delayed_work(&con->page_retirement_dwork);
+
/* for RMA, amdgpu_ras_poison_creation_handler will trigger gpu reset */
if (reset_flags && !amdgpu_ras_is_rma(adev)) {
if (reset_flags & AMDGPU_RAS_GPU_RESET_MODE1_RESET)
@@ -3385,8 +3685,6 @@ static int amdgpu_ras_poison_consumption_handler(struct amdgpu_device *adev,
else
reset = reset_flags;
- flush_delayed_work(&con->page_retirement_dwork);
-
con->gpu_reset_flags |= reset;
amdgpu_ras_reset_gpu(adev);
@@ -3416,6 +3714,7 @@ static int amdgpu_ras_page_retirement_thread(void *param)
if (kthread_should_stop())
break;
+ mutex_lock(&con->poison_lock);
gpu_reset = 0;
do {
@@ -3428,7 +3727,8 @@ static int amdgpu_ras_page_retirement_thread(void *param)
atomic_sub(poison_creation_count, &con->poison_creation_count);
atomic_sub(poison_creation_count, &con->page_retirement_req_cnt);
}
- } while (atomic_read(&con->poison_creation_count));
+ } while (atomic_read(&con->poison_creation_count) &&
+ !atomic_read(&con->poison_consumption_count));
if (ret != -EIO) {
msg_count = kfifo_len(&con->poison_fifo);
@@ -3445,6 +3745,7 @@ static int amdgpu_ras_page_retirement_thread(void *param)
/* gpu mode-1 reset is ongoing or just completed ras mode-1 reset */
/* Clear poison creation request */
atomic_set(&con->poison_creation_count, 0);
+ atomic_set(&con->poison_consumption_count, 0);
/* Clear poison fifo */
amdgpu_ras_clear_poison_fifo(adev);
@@ -3469,9 +3770,12 @@ static int amdgpu_ras_page_retirement_thread(void *param)
atomic_sub(msg_count, &con->page_retirement_req_cnt);
}
+ atomic_set(&con->poison_consumption_count, 0);
+
/* Wake up work to save bad pages to eeprom */
schedule_delayed_work(&con->page_retirement_dwork, 0);
}
+ mutex_unlock(&con->poison_lock);
}
return 0;
@@ -3486,10 +3790,14 @@ int amdgpu_ras_init_badpage_info(struct amdgpu_device *adev)
if (!con || amdgpu_sriov_vf(adev))
return 0;
+ if (amdgpu_uniras_enabled(adev))
+ return 0;
+
control = &con->eeprom_control;
+ con->ras_smu_drv = amdgpu_dpm_get_ras_smu_driver(adev);
+
ret = amdgpu_ras_eeprom_init(control);
- if (ret)
- return ret;
+ control->is_eeprom_valid = !ret;
if (!adev->umc.ras || !adev->umc.ras->convert_ras_err_addr)
control->ras_num_pa_recs = control->ras_num_recs;
@@ -3498,10 +3806,12 @@ int amdgpu_ras_init_badpage_info(struct amdgpu_device *adev)
adev->umc.ras->get_retire_flip_bits)
adev->umc.ras->get_retire_flip_bits(adev);
- if (control->ras_num_recs) {
+ if (control->ras_num_recs && control->is_eeprom_valid) {
ret = amdgpu_ras_load_bad_pages(adev);
- if (ret)
- return ret;
+ if (ret) {
+ control->is_eeprom_valid = false;
+ return 0;
+ }
amdgpu_dpm_send_hbm_bad_pages_num(
adev, control->ras_num_bad_pages);
@@ -3520,7 +3830,7 @@ int amdgpu_ras_init_badpage_info(struct amdgpu_device *adev)
dev_warn(adev->dev, "Failed to format RAS EEPROM data in V3 version!\n");
}
- return ret;
+ return 0;
}
int amdgpu_ras_recovery_init(struct amdgpu_device *adev, bool init_bp_info)
@@ -3551,8 +3861,10 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev, bool init_bp_info)
}
mutex_init(&con->recovery_lock);
+ mutex_init(&con->poison_lock);
INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery);
atomic_set(&con->in_recovery, 0);
+ atomic_set(&con->rma_in_recovery, 0);
con->eeprom_control.bad_channel_bitmap = 0;
max_eeprom_records_count = amdgpu_ras_eeprom_max_record_count(&con->eeprom_control);
@@ -3570,6 +3882,7 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev, bool init_bp_info)
init_waitqueue_head(&con->page_retirement_wq);
atomic_set(&con->page_retirement_req_cnt, 0);
atomic_set(&con->poison_creation_count, 0);
+ atomic_set(&con->poison_consumption_count, 0);
con->page_retirement_thread =
kthread_run(amdgpu_ras_page_retirement_thread, adev, "umc_page_retirement");
if (IS_ERR(con->page_retirement_thread)) {
@@ -3642,6 +3955,10 @@ static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
kfree(data);
mutex_unlock(&con->recovery_lock);
+ amdgpu_ras_critical_region_init(adev);
+#ifdef CONFIG_X86_MCE_AMD
+ amdgpu_unregister_bad_pages_mca_notifier(adev);
+#endif
return 0;
}
/* recovery end */
@@ -3865,7 +4182,6 @@ static void amdgpu_ras_counte_dw(struct work_struct *work)
atomic_set(&con->ras_ue_count, ue_count);
}
- pm_runtime_mark_last_busy(dev->dev);
Out:
pm_runtime_put_autosuspend(dev->dev);
}
@@ -4068,6 +4384,12 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
goto release_con;
}
+ con->init_task_pid = task_pid_nr(current);
+ get_task_comm(con->init_task_comm, current);
+
+ mutex_init(&con->critical_region_lock);
+ INIT_LIST_HEAD(&con->critical_region_head);
+
dev_info(adev->dev, "RAS INFO: ras initialized successfully, "
"hardware ability[%x] ras_mask[%x]\n",
adev->ras_hw_enabled, adev->ras_enabled);
@@ -4347,6 +4669,9 @@ int amdgpu_ras_fini(struct amdgpu_device *adev)
if (!adev->ras_enabled || !con)
return 0;
+ amdgpu_ras_critical_region_fini(adev);
+ mutex_destroy(&con->critical_region_lock);
+
list_for_each_entry_safe(ras_node, tmp, &adev->ras_list, node) {
if (ras_node->ras_obj) {
obj = ras_node->ras_obj;
@@ -4414,8 +4739,10 @@ void amdgpu_ras_clear_err_state(struct amdgpu_device *adev)
struct amdgpu_ras *ras;
ras = amdgpu_ras_get_context(adev);
- if (ras)
+ if (ras) {
ras->ras_err_state = 0;
+ ras->gpu_reset_flags = 0;
+ }
}
void amdgpu_ras_set_err_poison(struct amdgpu_device *adev,
@@ -4463,6 +4790,9 @@ int amdgpu_ras_mark_ras_event_caller(struct amdgpu_device *adev, enum ras_event_
struct ras_event_state *event_state;
int ret = 0;
+ if (amdgpu_uniras_enabled(adev))
+ return 0;
+
if (type >= RAS_EVENT_TYPE_COUNT) {
ret = -EINVAL;
goto out;
@@ -4513,20 +4843,18 @@ u64 amdgpu_ras_acquire_event_id(struct amdgpu_device *adev, enum ras_event_type
return id;
}
-void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
+int amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
{
if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) {
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
enum ras_event_type type = RAS_EVENT_TYPE_FATAL;
- u64 event_id;
+ u64 event_id = RAS_EVENT_INVALID_ID;
- if (amdgpu_ras_mark_ras_event(adev, type)) {
- dev_err(adev->dev,
- "uncorrectable hardware error (ERREVENT_ATHUB_INTERRUPT) detected!\n");
- return;
- }
+ if (amdgpu_uniras_enabled(adev))
+ return 0;
- event_id = amdgpu_ras_acquire_event_id(adev, type);
+ if (!amdgpu_ras_mark_ras_event(adev, type))
+ event_id = amdgpu_ras_acquire_event_id(adev, type);
RAS_EVENT_LOG(adev, event_id, "uncorrectable hardware error"
"(ERREVENT_ATHUB_INTERRUPT) detected!\n");
@@ -4535,6 +4863,8 @@ void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET;
amdgpu_ras_reset_gpu(adev);
}
+
+ return -EBUSY;
}
bool amdgpu_ras_need_emergency_restart(struct amdgpu_device *adev)
@@ -4662,6 +4992,28 @@ static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev)
notifier_registered = true;
}
}
+static void amdgpu_unregister_bad_pages_mca_notifier(struct amdgpu_device *adev)
+{
+ int i, j;
+
+ if (!notifier_registered && !mce_adev_list.num_gpu)
+ return;
+ for (i = 0, j = 0; i < mce_adev_list.num_gpu; i++) {
+ if (mce_adev_list.devs[i] == adev)
+ mce_adev_list.devs[i] = NULL;
+ if (!mce_adev_list.devs[i])
+ ++j;
+ }
+
+ if (j == mce_adev_list.num_gpu) {
+ mce_adev_list.num_gpu = 0;
+ /* Unregister x86 notifier with MCE subsystem. */
+ if (notifier_registered) {
+ mce_unregister_decode_chain(&amdgpu_bad_page_nb);
+ notifier_registered = false;
+ }
+ }
+}
#endif
struct amdgpu_ras *amdgpu_ras_get_context(struct amdgpu_device *adev)
@@ -5253,6 +5605,9 @@ int amdgpu_ras_reserve_page(struct amdgpu_device *adev, uint64_t pfn)
uint64_t start = pfn << AMDGPU_GPU_PAGE_SHIFT;
int ret = 0;
+ if (amdgpu_ras_check_critical_address(adev, start))
+ return 0;
+
mutex_lock(&con->page_rsv_lock);
ret = amdgpu_vram_mgr_query_page_status(mgr, start);
if (ret == -ENOENT)
@@ -5284,8 +5639,110 @@ bool amdgpu_ras_is_rma(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ if (amdgpu_uniras_enabled(adev))
+ return amdgpu_ras_mgr_is_rma(adev);
+
if (!con)
return false;
return con->is_rma;
}
+
+int amdgpu_ras_add_critical_region(struct amdgpu_device *adev,
+ struct amdgpu_bo *bo)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct amdgpu_vram_mgr_resource *vres;
+ struct ras_critical_region *region;
+ struct drm_buddy_block *block;
+ int ret = 0;
+
+ if (!bo || !bo->tbo.resource)
+ return -EINVAL;
+
+ vres = to_amdgpu_vram_mgr_resource(bo->tbo.resource);
+
+ mutex_lock(&con->critical_region_lock);
+
+ /* Check if the bo had been recorded */
+ list_for_each_entry(region, &con->critical_region_head, node)
+ if (region->bo == bo)
+ goto out;
+
+ /* Record new critical amdgpu bo */
+ list_for_each_entry(block, &vres->blocks, link) {
+ region = kzalloc(sizeof(*region), GFP_KERNEL);
+ if (!region) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ region->bo = bo;
+ region->start = amdgpu_vram_mgr_block_start(block);
+ region->size = amdgpu_vram_mgr_block_size(block);
+ list_add_tail(&region->node, &con->critical_region_head);
+ }
+
+out:
+ mutex_unlock(&con->critical_region_lock);
+
+ return ret;
+}
+
+static void amdgpu_ras_critical_region_init(struct amdgpu_device *adev)
+{
+ amdgpu_ras_add_critical_region(adev, adev->mman.fw_reserved_memory);
+}
+
+static void amdgpu_ras_critical_region_fini(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_critical_region *region, *tmp;
+
+ mutex_lock(&con->critical_region_lock);
+ list_for_each_entry_safe(region, tmp, &con->critical_region_head, node) {
+ list_del(&region->node);
+ kfree(region);
+ }
+ mutex_unlock(&con->critical_region_lock);
+}
+
+bool amdgpu_ras_check_critical_address(struct amdgpu_device *adev, uint64_t addr)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_critical_region *region;
+ bool ret = false;
+
+ mutex_lock(&con->critical_region_lock);
+ list_for_each_entry(region, &con->critical_region_head, node) {
+ if ((region->start <= addr) &&
+ (addr < (region->start + region->size))) {
+ ret = true;
+ break;
+ }
+ }
+ mutex_unlock(&con->critical_region_lock);
+
+ return ret;
+}
+
+void amdgpu_ras_pre_reset(struct amdgpu_device *adev,
+ struct list_head *device_list)
+{
+ struct amdgpu_device *tmp_adev = NULL;
+
+ list_for_each_entry(tmp_adev, device_list, reset_list) {
+ if (amdgpu_uniras_enabled(tmp_adev))
+ amdgpu_ras_mgr_pre_reset(tmp_adev);
+ }
+}
+
+void amdgpu_ras_post_reset(struct amdgpu_device *adev,
+ struct list_head *device_list)
+{
+ struct amdgpu_device *tmp_adev = NULL;
+
+ list_for_each_entry(tmp_adev, device_list, reset_list) {
+ if (amdgpu_uniras_enabled(tmp_adev))
+ amdgpu_ras_mgr_post_reset(tmp_adev);
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index 927d6bff734a..ff44190d7d98 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -492,11 +492,45 @@ struct ras_ecc_err {
struct ras_ecc_log_info {
struct mutex lock;
struct radix_tree_root de_page_tree;
- uint64_t de_queried_count;
- uint64_t prev_de_queried_count;
+ uint64_t de_queried_count;
+ uint64_t consumption_q_count;
+};
+
+struct ras_critical_region {
+ struct list_head node;
+ struct amdgpu_bo *bo;
+ uint64_t start;
+ uint64_t size;
+};
+
+struct ras_eeprom_table_version {
+ uint32_t minor : 16;
+ uint32_t major : 16;
+};
+
+struct ras_eeprom_smu_funcs {
+ int (*get_ras_table_version)(struct amdgpu_device *adev,
+ uint32_t *table_version);
+ int (*get_badpage_count)(struct amdgpu_device *adev, uint32_t *count, uint32_t timeout);
+ int (*get_badpage_mca_addr)(struct amdgpu_device *adev, uint16_t index, uint64_t *mca_addr);
+ int (*set_timestamp)(struct amdgpu_device *adev, uint64_t timestamp);
+ int (*get_timestamp)(struct amdgpu_device *adev,
+ uint16_t index, uint64_t *timestamp);
+ int (*get_badpage_ipid)(struct amdgpu_device *adev, uint16_t index, uint64_t *ipid);
+ int (*erase_ras_table)(struct amdgpu_device *adev, uint32_t *result);
+};
+
+enum ras_smu_feature_flags {
+ RAS_SMU_FEATURE_BIT__RAS_EEPROM = BIT_ULL(0),
+};
+
+struct ras_smu_drv {
+ const struct ras_eeprom_smu_funcs *smu_eeprom_funcs;
+ void (*ras_smu_feature_flags)(struct amdgpu_device *adev, uint64_t *flags);
};
struct amdgpu_ras {
+ void *ras_mgr;
/* ras infrastructure */
/* for ras itself. */
uint32_t features;
@@ -515,6 +549,7 @@ struct amdgpu_ras {
/* gpu recovery */
struct work_struct recovery_work;
atomic_t in_recovery;
+ atomic_t rma_in_recovery;
struct amdgpu_device *adev;
/* error handler data */
struct ras_err_handler_data *eh_data;
@@ -557,6 +592,7 @@ struct amdgpu_ras {
struct mutex page_retirement_lock;
atomic_t page_retirement_req_cnt;
atomic_t poison_creation_count;
+ atomic_t poison_consumption_count;
struct mutex page_rsv_lock;
DECLARE_KFIFO(poison_fifo, struct ras_poison_msg, 128);
struct ras_ecc_log_info umc_ecc_log;
@@ -570,6 +606,21 @@ struct amdgpu_ras {
struct ras_event_manager *event_mgr;
uint64_t reserved_pages_in_bytes;
+
+ pid_t init_task_pid;
+ char init_task_comm[TASK_COMM_LEN];
+
+ int bad_page_num;
+
+ struct list_head critical_region_head;
+ struct mutex critical_region_lock;
+
+ /* Protect poison injection */
+ struct mutex poison_lock;
+
+ /* Disable/Enable uniras switch */
+ bool uniras_enabled;
+ const struct ras_smu_drv *ras_smu_drv;
};
struct ras_fs_data {
@@ -608,6 +659,7 @@ struct ras_err_handler_data {
struct eeprom_table_record *bps;
/* the count of entries */
int count;
+ int count_saved;
/* the space can place new entries */
int space_left;
};
@@ -888,7 +940,7 @@ static inline void amdgpu_ras_intr_cleared(void)
atomic_set(&amdgpu_ras_in_intr, 0);
}
-void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev);
+int amdgpu_ras_global_ras_isr(struct amdgpu_device *adev);
void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready);
@@ -973,6 +1025,9 @@ int amdgpu_ras_mark_ras_event_caller(struct amdgpu_device *adev, enum ras_event_
int amdgpu_ras_reserve_page(struct amdgpu_device *adev, uint64_t pfn);
+int amdgpu_ras_add_critical_region(struct amdgpu_device *adev, struct amdgpu_bo *bo);
+bool amdgpu_ras_check_critical_address(struct amdgpu_device *adev, uint64_t addr);
+
int amdgpu_ras_put_poison_req(struct amdgpu_device *adev,
enum amdgpu_ras_block block, uint16_t pasid,
pasid_notify pasid_fn, void *data, uint32_t reset);
@@ -984,4 +1039,9 @@ void amdgpu_ras_event_log_print(struct amdgpu_device *adev, u64 event_id,
const char *fmt, ...);
bool amdgpu_ras_is_rma(struct amdgpu_device *adev);
+
+void amdgpu_ras_pre_reset(struct amdgpu_device *adev,
+ struct list_head *device_list);
+void amdgpu_ras_post_reset(struct amdgpu_device *adev,
+ struct list_head *device_list);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index 2c58e09e56f9..64dd7a81bff5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -32,6 +32,7 @@
#include <linux/uaccess.h>
#include "amdgpu_reset.h"
+#include "amdgpu_ras_mgr.h"
/* These are memory addresses as would be seen by one or more EEPROM
* chips strung on the I2C bus, usually by manipulating pins 1-3 of a
@@ -123,6 +124,8 @@
RAS_TABLE_V2_1_INFO_SIZE) \
/ RAS_TABLE_RECORD_SIZE)
+#define RAS_SMU_MESSAGE_TIMEOUT_MS 1000 /* 1s */
+
/* Given a zero-based index of an EEPROM RAS record, yields the EEPROM
* offset off of RAS_TABLE_START. That is, this is something you can
* add to control->i2c_address, and then tell I2C layer to read
@@ -277,10 +280,11 @@ static int __write_table_header(struct amdgpu_ras_eeprom_control *control)
up_read(&adev->reset_domain->sem);
if (res < 0) {
- DRM_ERROR("Failed to write EEPROM table header:%d", res);
+ dev_err(adev->dev, "Failed to write EEPROM table header:%d",
+ res);
} else if (res < RAS_TABLE_HEADER_SIZE) {
- DRM_ERROR("Short write:%d out of %d\n",
- res, RAS_TABLE_HEADER_SIZE);
+ dev_err(adev->dev, "Short write:%d out of %d\n", res,
+ RAS_TABLE_HEADER_SIZE);
res = -EIO;
} else {
res = 0;
@@ -323,7 +327,8 @@ static int __write_table_ras_info(struct amdgpu_ras_eeprom_control *control)
buf = kzalloc(RAS_TABLE_V2_1_INFO_SIZE, GFP_KERNEL);
if (!buf) {
- DRM_ERROR("Failed to alloc buf to write table ras info\n");
+ dev_err(adev->dev,
+ "Failed to alloc buf to write table ras info\n");
return -ENOMEM;
}
@@ -338,10 +343,11 @@ static int __write_table_ras_info(struct amdgpu_ras_eeprom_control *control)
up_read(&adev->reset_domain->sem);
if (res < 0) {
- DRM_ERROR("Failed to write EEPROM table ras info:%d", res);
+ dev_err(adev->dev, "Failed to write EEPROM table ras info:%d",
+ res);
} else if (res < RAS_TABLE_V2_1_INFO_SIZE) {
- DRM_ERROR("Short write:%d out of %d\n",
- res, RAS_TABLE_V2_1_INFO_SIZE);
+ dev_err(adev->dev, "Short write:%d out of %d\n", res,
+ RAS_TABLE_V2_1_INFO_SIZE);
res = -EIO;
} else {
res = 0;
@@ -440,42 +446,61 @@ int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control)
struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr;
struct amdgpu_ras_eeprom_table_ras_info *rai = &control->tbl_rai;
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ u32 erase_res = 0;
u8 csum;
int res;
mutex_lock(&control->ras_tbl_mutex);
- hdr->header = RAS_TABLE_HDR_VAL;
- amdgpu_ras_set_eeprom_table_version(control);
+ if (!amdgpu_ras_smu_eeprom_supported(adev)) {
+ hdr->header = RAS_TABLE_HDR_VAL;
+ amdgpu_ras_set_eeprom_table_version(control);
- if (hdr->version >= RAS_TABLE_VER_V2_1) {
- hdr->first_rec_offset = RAS_RECORD_START_V2_1;
- hdr->tbl_size = RAS_TABLE_HEADER_SIZE +
- RAS_TABLE_V2_1_INFO_SIZE;
- rai->rma_status = GPU_HEALTH_USABLE;
- /**
- * GPU health represented as a percentage.
- * 0 means worst health, 100 means fully health.
- */
- rai->health_percent = 100;
- /* ecc_page_threshold = 0 means disable bad page retirement */
- rai->ecc_page_threshold = con->bad_page_cnt_threshold;
+ if (hdr->version >= RAS_TABLE_VER_V2_1) {
+ hdr->first_rec_offset = RAS_RECORD_START_V2_1;
+ hdr->tbl_size = RAS_TABLE_HEADER_SIZE +
+ RAS_TABLE_V2_1_INFO_SIZE;
+ rai->rma_status = GPU_HEALTH_USABLE;
+
+ control->ras_record_offset = RAS_RECORD_START_V2_1;
+ control->ras_max_record_count = RAS_MAX_RECORD_COUNT_V2_1;
+ /**
+ * GPU health represented as a percentage.
+ * 0 means worst health, 100 means fully health.
+ */
+ rai->health_percent = 100;
+ /* ecc_page_threshold = 0 means disable bad page retirement */
+ rai->ecc_page_threshold = con->bad_page_cnt_threshold;
+ } else {
+ hdr->first_rec_offset = RAS_RECORD_START;
+ hdr->tbl_size = RAS_TABLE_HEADER_SIZE;
+
+ control->ras_record_offset = RAS_RECORD_START;
+ control->ras_max_record_count = RAS_MAX_RECORD_COUNT;
+ }
+
+ csum = __calc_hdr_byte_sum(control);
+ if (hdr->version >= RAS_TABLE_VER_V2_1)
+ csum += __calc_ras_info_byte_sum(control);
+ csum = -csum;
+ hdr->checksum = csum;
+ res = __write_table_header(control);
+ if (!res && hdr->version > RAS_TABLE_VER_V1)
+ res = __write_table_ras_info(control);
} else {
- hdr->first_rec_offset = RAS_RECORD_START;
- hdr->tbl_size = RAS_TABLE_HEADER_SIZE;
+ res = amdgpu_ras_smu_erase_ras_table(adev, &erase_res);
+ if (res || erase_res) {
+ dev_warn(adev->dev, "RAS EEPROM reset failed, res:%d result:%d",
+ res, erase_res);
+ if (!res)
+ res = -EIO;
+ }
}
- csum = __calc_hdr_byte_sum(control);
- if (hdr->version >= RAS_TABLE_VER_V2_1)
- csum += __calc_ras_info_byte_sum(control);
- csum = -csum;
- hdr->checksum = csum;
- res = __write_table_header(control);
- if (!res && hdr->version > RAS_TABLE_VER_V1)
- res = __write_table_ras_info(control);
-
control->ras_num_recs = 0;
control->ras_num_bad_pages = 0;
+ control->ras_num_mca_recs = 0;
+ control->ras_num_pa_recs = 0;
control->ras_fri = 0;
amdgpu_dpm_send_hbm_bad_pages_num(adev, control->ras_num_bad_pages);
@@ -551,6 +576,9 @@ bool amdgpu_ras_eeprom_check_err_threshold(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ if (amdgpu_uniras_enabled(adev))
+ return amdgpu_ras_mgr_check_eeprom_safety_watermark(adev);
+
if (!__is_ras_eeprom_supported(adev) ||
!amdgpu_bad_page_threshold)
return false;
@@ -607,13 +635,13 @@ static int __amdgpu_ras_eeprom_write(struct amdgpu_ras_eeprom_control *control,
buf, buf_size);
up_read(&adev->reset_domain->sem);
if (res < 0) {
- DRM_ERROR("Writing %d EEPROM table records error:%d",
- num, res);
+ dev_err(adev->dev, "Writing %d EEPROM table records error:%d",
+ num, res);
} else if (res < buf_size) {
/* Short write, return error.
*/
- DRM_ERROR("Wrote %d records out of %d",
- res / RAS_TABLE_RECORD_SIZE, num);
+ dev_err(adev->dev, "Wrote %d records out of %d",
+ res / RAS_TABLE_RECORD_SIZE, num);
res = -EIO;
} else {
res = 0;
@@ -738,8 +766,7 @@ amdgpu_ras_eeprom_append_table(struct amdgpu_ras_eeprom_control *control,
else
control->ras_num_mca_recs += num;
- control->ras_num_bad_pages = control->ras_num_pa_recs +
- control->ras_num_mca_recs * adev->umc.retire_unit;
+ control->ras_num_bad_pages = con->bad_page_num;
Out:
kfree(buf);
return res;
@@ -761,15 +788,20 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control)
dev_warn(adev->dev,
"Saved bad pages %d reaches threshold value %d\n",
control->ras_num_bad_pages, ras->bad_page_cnt_threshold);
- control->tbl_hdr.header = RAS_TABLE_HDR_BAD;
- if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1) {
- control->tbl_rai.rma_status = GPU_RETIRED__ECC_REACH_THRESHOLD;
- control->tbl_rai.health_percent = 0;
- }
+
+ if (adev->cper.enabled && !amdgpu_uniras_enabled(adev) &&
+ amdgpu_cper_generate_bp_threshold_record(adev))
+ dev_warn(adev->dev, "fail to generate bad page threshold cper records\n");
if ((amdgpu_bad_page_threshold != -1) &&
- (amdgpu_bad_page_threshold != -2))
+ (amdgpu_bad_page_threshold != -2)) {
+ control->tbl_hdr.header = RAS_TABLE_HDR_BAD;
+ if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1) {
+ control->tbl_rai.rma_status = GPU_RETIRED__ECC_REACH_THRESHOLD;
+ control->tbl_rai.health_percent = 0;
+ }
ras->is_rma = true;
+ }
/* ignore the -ENOTSUPP return value */
amdgpu_dpm_send_rma_reason(adev);
@@ -787,8 +819,9 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control)
buf_size = control->ras_num_recs * RAS_TABLE_RECORD_SIZE;
buf = kcalloc(control->ras_num_recs, RAS_TABLE_RECORD_SIZE, GFP_KERNEL);
if (!buf) {
- DRM_ERROR("allocating memory for table of size %d bytes failed\n",
- control->tbl_hdr.tbl_size);
+ dev_err(adev->dev,
+ "allocating memory for table of size %d bytes failed\n",
+ control->tbl_hdr.tbl_size);
res = -ENOMEM;
goto Out;
}
@@ -800,12 +833,11 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control)
buf, buf_size);
up_read(&adev->reset_domain->sem);
if (res < 0) {
- DRM_ERROR("EEPROM failed reading records:%d\n",
- res);
+ dev_err(adev->dev, "EEPROM failed reading records:%d\n", res);
goto Out;
} else if (res < buf_size) {
- DRM_ERROR("EEPROM read %d out of %d bytes\n",
- res, buf_size);
+ dev_err(adev->dev, "EEPROM read %d out of %d bytes\n", res,
+ buf_size);
res = -EIO;
goto Out;
}
@@ -841,6 +873,71 @@ Out:
return res;
}
+int amdgpu_ras_eeprom_update_record_num(struct amdgpu_ras_eeprom_control *control)
+{
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ int ret, retry = 20;
+
+ if (!amdgpu_ras_smu_eeprom_supported(adev))
+ return 0;
+
+ control->ras_num_recs_old = control->ras_num_recs;
+
+ do {
+ /* 1000ms timeout is long enough, smu_get_badpage_count won't
+ * return -EBUSY before timeout.
+ */
+ ret = amdgpu_ras_smu_get_badpage_count(adev,
+ &(control->ras_num_recs), RAS_SMU_MESSAGE_TIMEOUT_MS);
+ if (!ret &&
+ (control->ras_num_recs_old == control->ras_num_recs)) {
+ /* record number update in PMFW needs some time,
+ * smu_get_badpage_count may return immediately without
+ * count update, sleep for a while and retry again.
+ */
+ msleep(50);
+ retry--;
+ } else {
+ break;
+ }
+ } while (retry);
+
+ /* no update of record number is not a real failure,
+ * don't print warning here
+ */
+ if (!ret && (control->ras_num_recs_old == control->ras_num_recs))
+ ret = -EINVAL;
+
+ return ret;
+}
+
+static int amdgpu_ras_smu_eeprom_append(struct amdgpu_ras_eeprom_control *control)
+{
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+
+ if (!amdgpu_ras_smu_eeprom_supported(adev) || !con)
+ return 0;
+
+ control->ras_num_bad_pages = con->bad_page_num;
+
+ if (amdgpu_bad_page_threshold != 0 &&
+ control->ras_num_bad_pages > con->bad_page_cnt_threshold) {
+ dev_warn(adev->dev,
+ "Saved bad pages %d reaches threshold value %d\n",
+ control->ras_num_bad_pages, con->bad_page_cnt_threshold);
+
+ if (adev->cper.enabled && amdgpu_cper_generate_bp_threshold_record(adev))
+ dev_warn(adev->dev, "fail to generate bad page threshold cper records\n");
+
+ if ((amdgpu_bad_page_threshold != -1) &&
+ (amdgpu_bad_page_threshold != -2))
+ con->is_rma = true;
+ }
+
+ return 0;
+}
+
/**
* amdgpu_ras_eeprom_append -- append records to the EEPROM RAS table
* @control: pointer to control structure
@@ -865,12 +962,16 @@ int amdgpu_ras_eeprom_append(struct amdgpu_ras_eeprom_control *control,
if (!__is_ras_eeprom_supported(adev))
return 0;
+ if (amdgpu_ras_smu_eeprom_supported(adev))
+ return amdgpu_ras_smu_eeprom_append(control);
+
if (num == 0) {
- DRM_ERROR("will not append 0 records\n");
+ dev_err(adev->dev, "will not append 0 records\n");
return -EINVAL;
} else if (num > control->ras_max_record_count) {
- DRM_ERROR("cannot append %d records than the size of table %d\n",
- num, control->ras_max_record_count);
+ dev_err(adev->dev,
+ "cannot append %d records than the size of table %d\n",
+ num, control->ras_max_record_count);
return -EINVAL;
}
@@ -924,13 +1025,13 @@ static int __amdgpu_ras_eeprom_read(struct amdgpu_ras_eeprom_control *control,
buf, buf_size);
up_read(&adev->reset_domain->sem);
if (res < 0) {
- DRM_ERROR("Reading %d EEPROM table records error:%d",
- num, res);
+ dev_err(adev->dev, "Reading %d EEPROM table records error:%d",
+ num, res);
} else if (res < buf_size) {
/* Short read, return error.
*/
- DRM_ERROR("Read %d records out of %d",
- res / RAS_TABLE_RECORD_SIZE, num);
+ dev_err(adev->dev, "Read %d records out of %d",
+ res / RAS_TABLE_RECORD_SIZE, num);
res = -EIO;
} else {
res = 0;
@@ -939,6 +1040,50 @@ static int __amdgpu_ras_eeprom_read(struct amdgpu_ras_eeprom_control *control,
return res;
}
+int amdgpu_ras_eeprom_read_idx(struct amdgpu_ras_eeprom_control *control,
+ struct eeprom_table_record *record, u32 rec_idx,
+ const u32 num)
+{
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ uint64_t ts, end_idx;
+ int i, ret;
+ u64 mca, ipid;
+
+ if (!amdgpu_ras_smu_eeprom_supported(adev))
+ return 0;
+
+ if (!adev->umc.ras || !adev->umc.ras->mca_ipid_parse)
+ return -EOPNOTSUPP;
+
+ end_idx = rec_idx + num;
+ for (i = rec_idx; i < end_idx; i++) {
+ ret = amdgpu_ras_smu_get_badpage_mca_addr(adev, i, &mca);
+ if (ret)
+ return ret;
+
+ ret = amdgpu_ras_smu_get_badpage_ipid(adev, i, &ipid);
+ if (ret)
+ return ret;
+
+ ret = amdgpu_ras_smu_get_timestamp(adev, i, &ts);
+ if (ret)
+ return ret;
+
+ record[i - rec_idx].address = mca;
+ /* retired_page (pa) is unused now */
+ record[i - rec_idx].retired_page = 0x1ULL;
+ record[i - rec_idx].ts = ts;
+ record[i - rec_idx].err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE;
+
+ adev->umc.ras->mca_ipid_parse(adev, ipid,
+ (uint32_t *)&(record[i - rec_idx].cu),
+ (uint32_t *)&(record[i - rec_idx].mem_channel),
+ (uint32_t *)&(record[i - rec_idx].mcumc_id), NULL);
+ }
+
+ return 0;
+}
+
/**
* amdgpu_ras_eeprom_read -- read EEPROM
* @control: pointer to control structure
@@ -960,15 +1105,18 @@ int amdgpu_ras_eeprom_read(struct amdgpu_ras_eeprom_control *control,
u8 *buf, *pp;
u32 g0, g1;
+ if (amdgpu_ras_smu_eeprom_supported(adev))
+ return amdgpu_ras_eeprom_read_idx(control, record, 0, num);
+
if (!__is_ras_eeprom_supported(adev))
return 0;
if (num == 0) {
- DRM_ERROR("will not read 0 records\n");
+ dev_err(adev->dev, "will not read 0 records\n");
return -EINVAL;
} else if (num > control->ras_num_recs) {
- DRM_ERROR("too many records to read:%d available:%d\n",
- num, control->ras_num_recs);
+ dev_err(adev->dev, "too many records to read:%d available:%d\n",
+ num, control->ras_num_recs);
return -EINVAL;
}
@@ -1131,6 +1279,10 @@ static ssize_t amdgpu_ras_debugfs_table_read(struct file *f, char __user *buf,
int res = -EFAULT;
size_t data_len;
+ /* pmfw manages eeprom data by itself */
+ if (amdgpu_ras_smu_eeprom_supported(adev))
+ return 0;
+
mutex_lock(&control->ras_tbl_mutex);
/* We want *pos - data_len > 0, which means there's
@@ -1300,7 +1452,8 @@ static int __verify_ras_table_checksum(struct amdgpu_ras_eeprom_control *control
buf = kzalloc(buf_size, GFP_KERNEL);
if (!buf) {
- DRM_ERROR("Out of memory checking RAS table checksum.\n");
+ dev_err(adev->dev,
+ "Out of memory checking RAS table checksum.\n");
return -ENOMEM;
}
@@ -1309,7 +1462,7 @@ static int __verify_ras_table_checksum(struct amdgpu_ras_eeprom_control *control
control->ras_header_offset,
buf, buf_size);
if (res < buf_size) {
- DRM_ERROR("Partial read for checksum, res:%d\n", res);
+ dev_err(adev->dev, "Partial read for checksum, res:%d\n", res);
/* On partial reads, return -EIO.
*/
if (res >= 0)
@@ -1334,7 +1487,8 @@ static int __read_table_ras_info(struct amdgpu_ras_eeprom_control *control)
buf = kzalloc(RAS_TABLE_V2_1_INFO_SIZE, GFP_KERNEL);
if (!buf) {
- DRM_ERROR("Failed to alloc buf to read EEPROM table ras info\n");
+ dev_err(adev->dev,
+ "Failed to alloc buf to read EEPROM table ras info\n");
return -ENOMEM;
}
@@ -1346,7 +1500,8 @@ static int __read_table_ras_info(struct amdgpu_ras_eeprom_control *control)
control->i2c_address + control->ras_info_offset,
buf, RAS_TABLE_V2_1_INFO_SIZE);
if (res < RAS_TABLE_V2_1_INFO_SIZE) {
- DRM_ERROR("Failed to read EEPROM table ras info, res:%d", res);
+ dev_err(adev->dev,
+ "Failed to read EEPROM table ras info, res:%d", res);
res = res >= 0 ? -EIO : res;
goto Out;
}
@@ -1358,6 +1513,42 @@ Out:
return res == RAS_TABLE_V2_1_INFO_SIZE ? 0 : res;
}
+static int amdgpu_ras_smu_eeprom_init(struct amdgpu_ras_eeprom_control *control)
+{
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr;
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+ uint64_t local_time;
+ int res;
+
+ ras->is_rma = false;
+
+ if (!__is_ras_eeprom_supported(adev))
+ return 0;
+ mutex_init(&control->ras_tbl_mutex);
+
+ res = amdgpu_ras_smu_get_table_version(adev, &(hdr->version));
+ if (res)
+ return res;
+
+ res = amdgpu_ras_smu_get_badpage_count(adev,
+ &(control->ras_num_recs), 100);
+ if (res)
+ return res;
+
+ local_time = (uint64_t)ktime_get_real_seconds();
+ res = amdgpu_ras_smu_set_timestamp(adev, local_time);
+ if (res)
+ return res;
+
+ control->ras_max_record_count = 4000;
+
+ control->ras_num_mca_recs = 0;
+ control->ras_num_pa_recs = 0;
+
+ return 0;
+}
+
int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
{
struct amdgpu_device *adev = to_amdgpu_device(control);
@@ -1366,6 +1557,9 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
int res;
+ if (amdgpu_ras_smu_eeprom_supported(adev))
+ return amdgpu_ras_smu_eeprom_init(control);
+
ras->is_rma = false;
if (!__is_ras_eeprom_supported(adev))
@@ -1387,7 +1581,8 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
control->i2c_address + control->ras_header_offset,
buf, RAS_TABLE_HEADER_SIZE);
if (res < RAS_TABLE_HEADER_SIZE) {
- DRM_ERROR("Failed to read EEPROM table header, res:%d", res);
+ dev_err(adev->dev, "Failed to read EEPROM table header, res:%d",
+ res);
return res >= 0 ? -EIO : res;
}
@@ -1431,6 +1626,47 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
return 0;
}
+static int amdgpu_ras_smu_eeprom_check(struct amdgpu_ras_eeprom_control *control)
+{
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+
+ if (!__is_ras_eeprom_supported(adev))
+ return 0;
+
+ control->ras_num_bad_pages = ras->bad_page_num;
+
+ if ((ras->bad_page_cnt_threshold < control->ras_num_bad_pages) &&
+ amdgpu_bad_page_threshold != 0) {
+ dev_warn(adev->dev,
+ "RAS records:%d exceed threshold:%d\n",
+ control->ras_num_bad_pages, ras->bad_page_cnt_threshold);
+ if ((amdgpu_bad_page_threshold == -1) ||
+ (amdgpu_bad_page_threshold == -2)) {
+ dev_warn(adev->dev,
+ "Please consult AMD Service Action Guide (SAG) for appropriate service procedures\n");
+ } else {
+ ras->is_rma = true;
+ dev_warn(adev->dev,
+ "User defined threshold is set, runtime service will be halt when threshold is reached\n");
+ }
+
+ return 0;
+ }
+
+ dev_dbg(adev->dev,
+ "Found existing EEPROM table with %d records",
+ control->ras_num_bad_pages);
+
+ /* Warn if we are at 90% of the threshold or above
+ */
+ if (10 * control->ras_num_bad_pages >= 9 * ras->bad_page_cnt_threshold)
+ dev_warn(adev->dev, "RAS records:%u exceeds 90%% of threshold:%d",
+ control->ras_num_bad_pages,
+ ras->bad_page_cnt_threshold);
+ return 0;
+}
+
int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control)
{
struct amdgpu_device *adev = to_amdgpu_device(control);
@@ -1438,6 +1674,9 @@ int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control)
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
int res = 0;
+ if (amdgpu_ras_smu_eeprom_supported(adev))
+ return amdgpu_ras_smu_eeprom_check(control);
+
if (!__is_ras_eeprom_supported(adev))
return 0;
@@ -1448,12 +1687,12 @@ int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control)
if (!__get_eeprom_i2c_addr(adev, control))
return -EINVAL;
- control->ras_num_bad_pages = control->ras_num_pa_recs +
- control->ras_num_mca_recs * adev->umc.retire_unit;
+ control->ras_num_bad_pages = ras->bad_page_num;
if (hdr->header == RAS_TABLE_HDR_VAL) {
- DRM_DEBUG_DRIVER("Found existing EEPROM table with %d records",
- control->ras_num_bad_pages);
+ dev_dbg(adev->dev,
+ "Found existing EEPROM table with %d records",
+ control->ras_num_bad_pages);
if (hdr->version >= RAS_TABLE_VER_V2_1) {
res = __read_table_ras_info(control);
@@ -1521,3 +1760,171 @@ int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control)
return res < 0 ? res : 0;
}
+
+void amdgpu_ras_eeprom_check_and_recover(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+ struct amdgpu_ras_eeprom_control *control;
+ int res;
+
+ if (!__is_ras_eeprom_supported(adev) || !ras ||
+ amdgpu_ras_smu_eeprom_supported(adev))
+ return;
+ control = &ras->eeprom_control;
+ if (!control->is_eeprom_valid)
+ return;
+ res = __verify_ras_table_checksum(control);
+ if (res) {
+ dev_warn(adev->dev,
+ "RAS table incorrect checksum or error:%d, try to recover\n",
+ res);
+ if (!amdgpu_ras_eeprom_reset_table(control))
+ if (!amdgpu_ras_save_bad_pages(adev, NULL))
+ if (!__verify_ras_table_checksum(control)) {
+ dev_info(adev->dev, "RAS table recovery succeed\n");
+ return;
+ }
+ dev_err(adev->dev, "RAS table recovery failed\n");
+ control->is_eeprom_valid = false;
+ }
+ return;
+}
+
+static const struct ras_smu_drv *amdgpu_ras_get_smu_ras_drv(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+
+ if (!ras)
+ return NULL;
+
+ return ras->ras_smu_drv;
+}
+
+static uint64_t amdgpu_ras_smu_get_feature_flags(struct amdgpu_device *adev)
+{
+ const struct ras_smu_drv *ras_smu_drv = amdgpu_ras_get_smu_ras_drv(adev);
+ uint64_t flags = 0ULL;
+
+ if (!ras_smu_drv)
+ goto out;
+
+ if (ras_smu_drv->ras_smu_feature_flags)
+ ras_smu_drv->ras_smu_feature_flags(adev, &flags);
+
+out:
+ return flags;
+}
+
+bool amdgpu_ras_smu_eeprom_supported(struct amdgpu_device *adev)
+{
+ const struct ras_smu_drv *smu_ras_drv = amdgpu_ras_get_smu_ras_drv(adev);
+ uint64_t flags = 0ULL;
+
+ if (!__is_ras_eeprom_supported(adev) || !smu_ras_drv)
+ return false;
+
+ if (!smu_ras_drv->smu_eeprom_funcs)
+ return false;
+
+ flags = amdgpu_ras_smu_get_feature_flags(adev);
+
+ return !!(flags & RAS_SMU_FEATURE_BIT__RAS_EEPROM);
+}
+
+int amdgpu_ras_smu_get_table_version(struct amdgpu_device *adev,
+ uint32_t *table_version)
+{
+ const struct ras_smu_drv *smu_ras_drv = amdgpu_ras_get_smu_ras_drv(adev);
+
+ if (!amdgpu_ras_smu_eeprom_supported(adev))
+ return -EOPNOTSUPP;
+
+ if (smu_ras_drv->smu_eeprom_funcs->get_ras_table_version)
+ return smu_ras_drv->smu_eeprom_funcs->get_ras_table_version(adev,
+ table_version);
+ return -EOPNOTSUPP;
+}
+
+int amdgpu_ras_smu_get_badpage_count(struct amdgpu_device *adev,
+ uint32_t *count, uint32_t timeout)
+{
+ const struct ras_smu_drv *smu_ras_drv = amdgpu_ras_get_smu_ras_drv(adev);
+
+ if (!amdgpu_ras_smu_eeprom_supported(adev))
+ return -EOPNOTSUPP;
+
+ if (smu_ras_drv->smu_eeprom_funcs->get_badpage_count)
+ return smu_ras_drv->smu_eeprom_funcs->get_badpage_count(adev,
+ count, timeout);
+ return -EOPNOTSUPP;
+}
+
+int amdgpu_ras_smu_get_badpage_mca_addr(struct amdgpu_device *adev,
+ uint16_t index, uint64_t *mca_addr)
+{
+ const struct ras_smu_drv *smu_ras_drv = amdgpu_ras_get_smu_ras_drv(adev);
+
+ if (!amdgpu_ras_smu_eeprom_supported(adev))
+ return -EOPNOTSUPP;
+
+ if (smu_ras_drv->smu_eeprom_funcs->get_badpage_mca_addr)
+ return smu_ras_drv->smu_eeprom_funcs->get_badpage_mca_addr(adev,
+ index, mca_addr);
+ return -EOPNOTSUPP;
+}
+
+int amdgpu_ras_smu_set_timestamp(struct amdgpu_device *adev,
+ uint64_t timestamp)
+{
+ const struct ras_smu_drv *smu_ras_drv = amdgpu_ras_get_smu_ras_drv(adev);
+
+ if (!amdgpu_ras_smu_eeprom_supported(adev))
+ return -EOPNOTSUPP;
+
+ if (smu_ras_drv->smu_eeprom_funcs->set_timestamp)
+ return smu_ras_drv->smu_eeprom_funcs->set_timestamp(adev,
+ timestamp);
+ return -EOPNOTSUPP;
+}
+
+int amdgpu_ras_smu_get_timestamp(struct amdgpu_device *adev,
+ uint16_t index, uint64_t *timestamp)
+{
+ const struct ras_smu_drv *smu_ras_drv = amdgpu_ras_get_smu_ras_drv(adev);
+
+ if (!amdgpu_ras_smu_eeprom_supported(adev))
+ return -EOPNOTSUPP;
+
+ if (smu_ras_drv->smu_eeprom_funcs->get_timestamp)
+ return smu_ras_drv->smu_eeprom_funcs->get_timestamp(adev,
+ index, timestamp);
+ return -EOPNOTSUPP;
+}
+
+int amdgpu_ras_smu_get_badpage_ipid(struct amdgpu_device *adev,
+ uint16_t index, uint64_t *ipid)
+{
+ const struct ras_smu_drv *smu_ras_drv = amdgpu_ras_get_smu_ras_drv(adev);
+
+ if (!amdgpu_ras_smu_eeprom_supported(adev))
+ return -EOPNOTSUPP;
+
+ if (smu_ras_drv->smu_eeprom_funcs->get_badpage_ipid)
+ return smu_ras_drv->smu_eeprom_funcs->get_badpage_ipid(adev,
+ index, ipid);
+ return -EOPNOTSUPP;
+}
+
+int amdgpu_ras_smu_erase_ras_table(struct amdgpu_device *adev,
+ uint32_t *result)
+{
+ const struct ras_smu_drv *smu_ras_drv = amdgpu_ras_get_smu_ras_drv(adev);
+
+ if (!amdgpu_ras_smu_eeprom_supported(adev))
+ return -EOPNOTSUPP;
+
+ if (smu_ras_drv->smu_eeprom_funcs->erase_ras_table)
+ return smu_ras_drv->smu_eeprom_funcs->erase_ras_table(adev,
+ result);
+ return -EOPNOTSUPP;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
index ec6d7ea37ad0..2e5d63957e71 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
@@ -82,6 +82,7 @@ struct amdgpu_ras_eeprom_control {
/* Number of records in the table.
*/
u32 ras_num_recs;
+ u32 ras_num_recs_old;
/* the bad page number is ras_num_recs or
* ras_num_recs * umc.retire_unit
@@ -114,6 +115,8 @@ struct amdgpu_ras_eeprom_control {
/* Record channel info which occurred bad pages
*/
u32 bad_channel_bitmap;
+
+ bool is_eeprom_valid;
};
/*
@@ -159,6 +162,37 @@ void amdgpu_ras_debugfs_set_ret_size(struct amdgpu_ras_eeprom_control *control);
int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control);
+void amdgpu_ras_eeprom_check_and_recover(struct amdgpu_device *adev);
+
+bool amdgpu_ras_smu_eeprom_supported(struct amdgpu_device *adev);
+
+int amdgpu_ras_smu_get_table_version(struct amdgpu_device *adev,
+ uint32_t *table_version);
+
+int amdgpu_ras_smu_get_badpage_count(struct amdgpu_device *adev,
+ uint32_t *count, uint32_t timeout);
+
+int amdgpu_ras_smu_get_badpage_mca_addr(struct amdgpu_device *adev,
+ uint16_t index, uint64_t *mca_addr);
+
+int amdgpu_ras_smu_set_timestamp(struct amdgpu_device *adev,
+ uint64_t timestamp);
+
+int amdgpu_ras_smu_get_timestamp(struct amdgpu_device *adev,
+ uint16_t index, uint64_t *timestamp);
+
+int amdgpu_ras_smu_get_badpage_ipid(struct amdgpu_device *adev,
+ uint16_t index, uint64_t *ipid);
+
+int amdgpu_ras_smu_erase_ras_table(struct amdgpu_device *adev,
+ uint32_t *result);
+
+int amdgpu_ras_eeprom_read_idx(struct amdgpu_ras_eeprom_control *control,
+ struct eeprom_table_record *record, u32 rec_idx,
+ const u32 num);
+
+int amdgpu_ras_eeprom_update_record_num(struct amdgpu_ras_eeprom_control *control);
+
extern const struct file_operations amdgpu_ras_debugfs_eeprom_size_ops;
extern const struct file_operations amdgpu_ras_debugfs_eeprom_table_ops;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
index 50fcd86e1033..be2e56ce1355 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
@@ -91,6 +91,7 @@ static inline void amdgpu_res_first(struct ttm_resource *res,
break;
case TTM_PL_TT:
case AMDGPU_PL_DOORBELL:
+ case AMDGPU_PL_MMIO_REMAP:
node = to_ttm_range_mgr_node(res)->mm_nodes;
while (start >= node->size << PAGE_SHIFT)
start -= node++->size << PAGE_SHIFT;
@@ -153,6 +154,7 @@ static inline void amdgpu_res_next(struct amdgpu_res_cursor *cur, uint64_t size)
break;
case TTM_PL_TT:
case AMDGPU_PL_DOORBELL:
+ case AMDGPU_PL_MMIO_REMAP:
node = cur->node;
cur->node = ++node;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
index dabfbdf6f1ce..28c4ad62f50e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
@@ -340,6 +340,9 @@ void amdgpu_reset_get_desc(struct amdgpu_reset_context *rst_ctxt, char *buf,
case AMDGPU_RESET_SRC_USER:
strscpy(buf, "user trigger", len);
break;
+ case AMDGPU_RESET_SRC_USERQ:
+ strscpy(buf, "user queue trigger", len);
+ break;
default:
strscpy(buf, "unknown", len);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
index 4d9b9701139b..07b4d37f1db6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
@@ -43,6 +43,7 @@ enum AMDGPU_RESET_SRCS {
AMDGPU_RESET_SRC_MES,
AMDGPU_RESET_SRC_HWS,
AMDGPU_RESET_SRC_USER,
+ AMDGPU_RESET_SRC_USERQ,
};
struct amdgpu_reset_context {
@@ -160,4 +161,16 @@ int amdgpu_reset_do_xgmi_reset_on_init(
bool amdgpu_reset_in_recovery(struct amdgpu_device *adev);
+static inline void amdgpu_reset_set_dpc_status(struct amdgpu_device *adev,
+ bool status)
+{
+ adev->pcie_reset_ctx.occurs_dpc = status;
+ adev->no_hw_access = status;
+}
+
+static inline bool amdgpu_reset_in_dpc(struct amdgpu_device *adev)
+{
+ return adev->pcie_reset_ctx.occurs_dpc;
+}
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 426834806fbf..c596b6df2e2d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -33,6 +33,7 @@
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
+#include "amdgpu_ras_mgr.h"
#include "atom.h"
/*
@@ -99,6 +100,29 @@ int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned int ndw)
return 0;
}
+/**
+ * amdgpu_ring_alloc_reemit - allocate space on the ring buffer for reemit
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ * @ndw: number of dwords to allocate in the ring buffer
+ *
+ * Allocate @ndw dwords in the ring buffer (all asics).
+ * doesn't check the max_dw limit as we may be reemitting
+ * several submissions.
+ */
+static void amdgpu_ring_alloc_reemit(struct amdgpu_ring *ring, unsigned int ndw)
+{
+ /* Align requested size with padding so unlock_commit can
+ * pad safely */
+ ndw = (ndw + ring->funcs->align_mask) & ~ring->funcs->align_mask;
+
+ ring->count_dw = ndw;
+ ring->wptr_old = ring->wptr;
+
+ if (ring->funcs->begin_use)
+ ring->funcs->begin_use(ring);
+}
+
/** amdgpu_ring_insert_nop - insert NOP packets
*
* @ring: amdgpu_ring structure holding ring information
@@ -136,8 +160,16 @@ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
*/
void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
{
- while (ib->length_dw & ring->funcs->align_mask)
- ib->ptr[ib->length_dw++] = ring->funcs->nop;
+ u32 align_mask = ring->funcs->align_mask;
+ u32 count = ib->length_dw & align_mask;
+
+ if (count) {
+ count = align_mask + 1 - count;
+
+ memset32(&ib->ptr[ib->length_dw], ring->funcs->nop, count);
+
+ ib->length_dw += count;
+ }
}
/**
@@ -333,15 +365,23 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
/* Initialize cached_rptr to 0 */
ring->cached_rptr = 0;
+ if (!ring->ring_backup) {
+ ring->ring_backup = kvzalloc(ring->ring_size, GFP_KERNEL);
+ if (!ring->ring_backup)
+ return -ENOMEM;
+ }
+
/* Allocate ring buffer */
if (ring->ring_obj == NULL) {
- r = amdgpu_bo_create_kernel(adev, ring->ring_size + ring->funcs->extra_dw, PAGE_SIZE,
+ r = amdgpu_bo_create_kernel(adev, ring->ring_size + ring->funcs->extra_bytes,
+ PAGE_SIZE,
AMDGPU_GEM_DOMAIN_GTT,
&ring->ring_obj,
&ring->gpu_addr,
(void **)&ring->ring);
if (r) {
dev_err(adev->dev, "(%d) ring create failed\n", r);
+ kvfree(ring->ring_backup);
return r;
}
amdgpu_ring_clear_ring(ring);
@@ -385,12 +425,12 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
amdgpu_bo_free_kernel(&ring->ring_obj,
&ring->gpu_addr,
(void **)&ring->ring);
+ kvfree(ring->ring_backup);
+ ring->ring_backup = NULL;
dma_fence_put(ring->vmid_wait);
ring->vmid_wait = NULL;
ring->me = 0;
-
- ring->adev->rings[ring->idx] = NULL;
}
/**
@@ -427,9 +467,7 @@ bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
{
unsigned long flags;
ktime_t deadline;
-
- if (unlikely(ring->adev->debug_disable_soft_recovery))
- return false;
+ bool ret;
deadline = ktime_add_us(ktime_get(), 10000);
@@ -441,12 +479,16 @@ bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
dma_fence_set_error(fence, -ENODATA);
spin_unlock_irqrestore(fence->lock, flags);
- atomic_inc(&ring->adev->gpu_reset_counter);
while (!dma_fence_is_signaled(fence) &&
ktime_to_ns(ktime_sub(deadline, ktime_get())) > 0)
ring->funcs->soft_recovery(ring, vmid);
- return dma_fence_is_signaled(fence);
+ ret = dma_fence_is_signaled(fence);
+ /* increment the counter only if soft reset worked */
+ if (ret)
+ atomic_inc(&ring->adev->gpu_reset_counter);
+
+ return ret;
}
/*
@@ -454,6 +496,66 @@ bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
*/
#if defined(CONFIG_DEBUG_FS)
+static ssize_t amdgpu_ras_cper_debugfs_read(struct file *f, char __user *buf,
+ size_t size, loff_t *offset)
+{
+ const uint8_t ring_header_size = 12;
+ struct amdgpu_ring *ring = file_inode(f)->i_private;
+ struct ras_cmd_cper_snapshot_req *snapshot_req __free(kfree) =
+ kzalloc(sizeof(struct ras_cmd_cper_snapshot_req), GFP_KERNEL);
+ struct ras_cmd_cper_snapshot_rsp *snapshot_rsp __free(kfree) =
+ kzalloc(sizeof(struct ras_cmd_cper_snapshot_rsp), GFP_KERNEL);
+ struct ras_cmd_cper_record_req *record_req __free(kfree) =
+ kzalloc(sizeof(struct ras_cmd_cper_record_req), GFP_KERNEL);
+ struct ras_cmd_cper_record_rsp *record_rsp __free(kfree) =
+ kzalloc(sizeof(struct ras_cmd_cper_record_rsp), GFP_KERNEL);
+ uint8_t *ring_header __free(kfree) =
+ kzalloc(ring_header_size, GFP_KERNEL);
+ uint32_t total_cper_num;
+ uint64_t start_cper_id;
+ int r;
+
+ if (!snapshot_req || !snapshot_rsp || !record_req || !record_rsp ||
+ !ring_header)
+ return -ENOMEM;
+
+ if (!(*offset)) {
+ /* Need at least 12 bytes for the header on the first read */
+ if (size < ring_header_size)
+ return -EINVAL;
+
+ if (copy_to_user(buf, ring_header, ring_header_size))
+ return -EFAULT;
+ buf += ring_header_size;
+ size -= ring_header_size;
+ }
+
+ r = amdgpu_ras_mgr_handle_ras_cmd(ring->adev,
+ RAS_CMD__GET_CPER_SNAPSHOT,
+ snapshot_req, sizeof(struct ras_cmd_cper_snapshot_req),
+ snapshot_rsp, sizeof(struct ras_cmd_cper_snapshot_rsp));
+ if (r || !snapshot_rsp->total_cper_num)
+ return r;
+
+ start_cper_id = snapshot_rsp->start_cper_id;
+ total_cper_num = snapshot_rsp->total_cper_num;
+
+ record_req->buf_ptr = (uint64_t)(uintptr_t)buf;
+ record_req->buf_size = size;
+ record_req->cper_start_id = start_cper_id + *offset;
+ record_req->cper_num = total_cper_num;
+ r = amdgpu_ras_mgr_handle_ras_cmd(ring->adev, RAS_CMD__GET_CPER_RECORD,
+ record_req, sizeof(struct ras_cmd_cper_record_req),
+ record_rsp, sizeof(struct ras_cmd_cper_record_rsp));
+ if (r)
+ return r;
+
+ r = *offset ? record_rsp->real_data_size : record_rsp->real_data_size + ring_header_size;
+ (*offset) += record_rsp->real_cper_num;
+
+ return r;
+}
+
/* Layout of file is 12 bytes consisting of
* - rptr
* - wptr
@@ -470,6 +572,9 @@ static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf,
loff_t i;
int r;
+ if (ring->funcs->type == AMDGPU_RING_TYPE_CPER && amdgpu_uniras_enabled(ring->adev))
+ return amdgpu_ras_cper_debugfs_read(f, buf, size, pos);
+
if (*pos & 3 || size & 3)
return -EINVAL;
@@ -682,6 +787,7 @@ static void amdgpu_ring_to_mqd_prop(struct amdgpu_ring *ring,
prop->eop_gpu_addr = ring->eop_gpu_addr;
prop->use_doorbell = ring->use_doorbell;
prop->doorbell_index = ring->doorbell_index;
+ prop->kernel_queue = true;
/* map_queues packet doesn't need activate the queue,
* so only kiq need set this field.
@@ -753,3 +859,69 @@ bool amdgpu_ring_sched_ready(struct amdgpu_ring *ring)
return true;
}
+
+void amdgpu_ring_reset_helper_begin(struct amdgpu_ring *ring,
+ struct amdgpu_fence *guilty_fence)
+{
+ /* Stop the scheduler to prevent anybody else from touching the ring buffer. */
+ drm_sched_wqueue_stop(&ring->sched);
+ /* back up the non-guilty commands */
+ amdgpu_ring_backup_unprocessed_commands(ring, guilty_fence);
+}
+
+int amdgpu_ring_reset_helper_end(struct amdgpu_ring *ring,
+ struct amdgpu_fence *guilty_fence)
+{
+ unsigned int i;
+ int r;
+
+ /* verify that the ring is functional */
+ r = amdgpu_ring_test_ring(ring);
+ if (r)
+ return r;
+
+ /* signal the guilty fence and set an error on all fences from the context */
+ if (guilty_fence)
+ amdgpu_fence_driver_guilty_force_completion(guilty_fence);
+ /* Re-emit the non-guilty commands */
+ if (ring->ring_backup_entries_to_copy) {
+ amdgpu_ring_alloc_reemit(ring, ring->ring_backup_entries_to_copy);
+ for (i = 0; i < ring->ring_backup_entries_to_copy; i++)
+ amdgpu_ring_write(ring, ring->ring_backup[i]);
+ amdgpu_ring_commit(ring);
+ }
+ /* Start the scheduler again */
+ drm_sched_wqueue_start(&ring->sched);
+ return 0;
+}
+
+bool amdgpu_ring_is_reset_type_supported(struct amdgpu_ring *ring,
+ u32 reset_type)
+{
+ switch (ring->funcs->type) {
+ case AMDGPU_RING_TYPE_GFX:
+ if (ring->adev->gfx.gfx_supported_reset & reset_type)
+ return true;
+ break;
+ case AMDGPU_RING_TYPE_COMPUTE:
+ if (ring->adev->gfx.compute_supported_reset & reset_type)
+ return true;
+ break;
+ case AMDGPU_RING_TYPE_SDMA:
+ if (ring->adev->sdma.supported_reset & reset_type)
+ return true;
+ break;
+ case AMDGPU_RING_TYPE_VCN_DEC:
+ case AMDGPU_RING_TYPE_VCN_ENC:
+ if (ring->adev->vcn.supported_reset & reset_type)
+ return true;
+ break;
+ case AMDGPU_RING_TYPE_VCN_JPEG:
+ if (ring->adev->jpeg.supported_reset & reset_type)
+ return true;
+ break;
+ default:
+ break;
+ }
+ return false;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index b95b47110769..7a27c6c4bb44 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -83,6 +83,7 @@ enum amdgpu_ring_type {
AMDGPU_RING_TYPE_MES,
AMDGPU_RING_TYPE_UMSCH_MM,
AMDGPU_RING_TYPE_CPER,
+ AMDGPU_RING_TYPE_MAX,
};
enum amdgpu_ib_pool_type {
@@ -114,10 +115,11 @@ struct amdgpu_sched {
*/
struct amdgpu_fence_driver {
uint64_t gpu_addr;
- volatile uint32_t *cpu_addr;
+ uint32_t *cpu_addr;
/* sync_seq is protected by ring emission lock */
uint32_t sync_seq;
atomic_t last_seq;
+ u64 signalled_wptr;
bool initialized;
struct amdgpu_irq_src *irq_src;
unsigned irq_type;
@@ -127,11 +129,33 @@ struct amdgpu_fence_driver {
struct dma_fence **fences;
};
+/*
+ * Fences mark an event in the GPUs pipeline and are used
+ * for GPU/CPU synchronization. When the fence is written,
+ * it is expected that all buffers associated with that fence
+ * are no longer in use by the associated ring on the GPU and
+ * that the relevant GPU caches have been flushed.
+ */
+
+struct amdgpu_fence {
+ struct dma_fence base;
+
+ /* RB, DMA, etc. */
+ struct amdgpu_ring *ring;
+ ktime_t start_timestamp;
+
+ /* wptr for the fence for resets */
+ u64 wptr;
+ /* fence context for resets */
+ u64 context;
+};
+
extern const struct drm_sched_backend_ops amdgpu_sched_ops;
-void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring);
void amdgpu_fence_driver_set_error(struct amdgpu_ring *ring, int error);
void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring);
+void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *af);
+void amdgpu_fence_save_wptr(struct amdgpu_fence *af);
int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring);
int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
@@ -141,8 +165,8 @@ void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev);
void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev);
int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev);
void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev);
-int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence, struct amdgpu_job *job,
- unsigned flags);
+int amdgpu_fence_emit(struct amdgpu_ring *ring, struct amdgpu_fence *af,
+ unsigned int flags);
int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s,
uint32_t timeout);
bool amdgpu_fence_process(struct amdgpu_ring *ring);
@@ -186,7 +210,18 @@ struct amdgpu_ring_funcs {
bool support_64bit_ptrs;
bool no_user_fence;
bool secure_submission_supported;
- unsigned extra_dw;
+
+ /**
+ * @extra_bytes:
+ *
+ * Optional extra space in bytes that is added to the ring size
+ * when allocating the BO that holds the contents of the ring.
+ * This space isn't used for command submission to the ring,
+ * but is just there to satisfy some hardware requirements or
+ * implement workarounds. It's up to the implementation of each
+ * specific ring to initialize this space.
+ */
+ unsigned extra_bytes;
/* ring read/write ptr handling */
u64 (*get_rptr)(struct amdgpu_ring *ring);
@@ -252,9 +287,9 @@ struct amdgpu_ring_funcs {
void (*patch_cntl)(struct amdgpu_ring *ring, unsigned offset);
void (*patch_ce)(struct amdgpu_ring *ring, unsigned offset);
void (*patch_de)(struct amdgpu_ring *ring, unsigned offset);
- int (*reset)(struct amdgpu_ring *ring, unsigned int vmid);
+ int (*reset)(struct amdgpu_ring *ring, unsigned int vmid,
+ struct amdgpu_fence *timedout_fence);
void (*emit_cleaner_shader)(struct amdgpu_ring *ring);
- bool (*is_guilty)(struct amdgpu_ring *ring);
};
/**
@@ -268,9 +303,12 @@ struct amdgpu_ring {
struct amdgpu_bo *ring_obj;
uint32_t *ring;
+ /* backups for resets */
+ uint32_t *ring_backup;
+ unsigned int ring_backup_entries_to_copy;
unsigned rptr_offs;
u64 rptr_gpu_addr;
- volatile u32 *rptr_cpu_addr;
+ u32 *rptr_cpu_addr;
/**
* @wptr:
@@ -350,19 +388,19 @@ struct amdgpu_ring {
* This is the CPU address pointer in the writeback slot. This is used
* to commit changes to the GPU.
*/
- volatile u32 *wptr_cpu_addr;
+ u32 *wptr_cpu_addr;
unsigned fence_offs;
u64 fence_gpu_addr;
- volatile u32 *fence_cpu_addr;
+ u32 *fence_cpu_addr;
uint64_t current_ctx;
char name[16];
u32 trail_seq;
unsigned trail_fence_offs;
u64 trail_fence_gpu_addr;
- volatile u32 *trail_fence_cpu_addr;
+ u32 *trail_fence_cpu_addr;
unsigned cond_exe_offs;
u64 cond_exe_gpu_addr;
- volatile u32 *cond_exe_cpu_addr;
+ u32 *cond_exe_cpu_addr;
unsigned int set_q_mode_offs;
u32 *set_q_mode_ptr;
u64 set_q_mode_token;
@@ -409,7 +447,7 @@ struct amdgpu_ring {
#define amdgpu_ring_patch_cntl(r, o) ((r)->funcs->patch_cntl((r), (o)))
#define amdgpu_ring_patch_ce(r, o) ((r)->funcs->patch_ce((r), (o)))
#define amdgpu_ring_patch_de(r, o) ((r)->funcs->patch_de((r), (o)))
-#define amdgpu_ring_reset(r, v) (r)->funcs->reset((r), (v))
+#define amdgpu_ring_reset(r, v, f) (r)->funcs->reset((r), (v), (f))
unsigned int amdgpu_ring_max_ibs(enum amdgpu_ring_type type);
int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
@@ -442,10 +480,7 @@ static inline void amdgpu_ring_set_preempt_cond_exec(struct amdgpu_ring *ring,
static inline void amdgpu_ring_clear_ring(struct amdgpu_ring *ring)
{
- int i = 0;
- while (i <= ring->buf_mask)
- ring->ring[i++] = ring->funcs->nop;
-
+ memset32(ring->ring, ring->funcs->nop, ring->buf_mask + 1);
}
static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v)
@@ -534,4 +569,12 @@ int amdgpu_ib_pool_init(struct amdgpu_device *adev);
void amdgpu_ib_pool_fini(struct amdgpu_device *adev);
int amdgpu_ib_ring_tests(struct amdgpu_device *adev);
bool amdgpu_ring_sched_ready(struct amdgpu_ring *ring);
+void amdgpu_ring_backup_unprocessed_commands(struct amdgpu_ring *ring,
+ struct amdgpu_fence *guilty_fence);
+void amdgpu_ring_reset_helper_begin(struct amdgpu_ring *ring,
+ struct amdgpu_fence *guilty_fence);
+int amdgpu_ring_reset_helper_end(struct amdgpu_ring *ring,
+ struct amdgpu_fence *guilty_fence);
+bool amdgpu_ring_is_reset_type_supported(struct amdgpu_ring *ring,
+ u32 reset_type);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
index db5791e1a7ce..5aa830a02d80 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
@@ -89,7 +89,7 @@ void amdgpu_gfx_rlc_exit_safe_mode(struct amdgpu_device *adev, int xcc_id)
int amdgpu_gfx_rlc_init_sr(struct amdgpu_device *adev, u32 dws)
{
const u32 *src_ptr;
- volatile u32 *dst_ptr;
+ u32 *dst_ptr;
u32 i;
int r;
@@ -189,7 +189,7 @@ int amdgpu_gfx_rlc_init_cpt(struct amdgpu_device *adev)
void amdgpu_gfx_rlc_setup_cp_table(struct amdgpu_device *adev)
{
const __le32 *fw_data;
- volatile u32 *dst_ptr;
+ u32 *dst_ptr;
int me, i, max_me;
u32 bo_offset = 0;
u32 table_offset, table_size;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
index c210625be220..2ce310b31942 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
@@ -251,7 +251,7 @@ struct amdgpu_rlc_funcs {
* and it also provides a pointer to it which is used by the firmware
* to load the clear state in some cases.
*/
- void (*get_csb_buffer)(struct amdgpu_device *adev, volatile u32 *buffer);
+ void (*get_csb_buffer)(struct amdgpu_device *adev, u32 *buffer);
int (*get_cp_table_num)(struct amdgpu_device *adev);
int (*resume)(struct amdgpu_device *adev);
void (*stop)(struct amdgpu_device *adev);
@@ -275,19 +275,19 @@ struct amdgpu_rlc {
/* for power gating */
struct amdgpu_bo *save_restore_obj;
uint64_t save_restore_gpu_addr;
- volatile uint32_t *sr_ptr;
+ uint32_t *sr_ptr;
const u32 *reg_list;
u32 reg_list_size;
/* for clear state */
struct amdgpu_bo *clear_state_obj;
uint64_t clear_state_gpu_addr;
- volatile uint32_t *cs_ptr;
+ uint32_t *cs_ptr;
const struct cs_section_def *cs_data;
u32 clear_state_size;
/* for cp tables */
struct amdgpu_bo *cp_table_obj;
uint64_t cp_table_gpu_addr;
- volatile uint32_t *cp_table_ptr;
+ uint32_t *cp_table_ptr;
u32 cp_table_size;
/* safe mode for updating CG/PG state */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
index 6716ac281c49..8b8a04138711 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
@@ -534,91 +534,75 @@ bool amdgpu_sdma_is_shared_inv_eng(struct amdgpu_device *adev, struct amdgpu_rin
static int amdgpu_sdma_soft_reset(struct amdgpu_device *adev, u32 instance_id)
{
struct amdgpu_sdma_instance *sdma_instance = &adev->sdma.instance[instance_id];
- int r = -EOPNOTSUPP;
-
- switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
- case IP_VERSION(4, 4, 2):
- case IP_VERSION(4, 4, 4):
- case IP_VERSION(4, 4, 5):
- /* For SDMA 4.x, use the existing DPM interface for backward compatibility */
- r = amdgpu_dpm_reset_sdma(adev, 1 << instance_id);
- break;
- case IP_VERSION(5, 0, 0):
- case IP_VERSION(5, 0, 1):
- case IP_VERSION(5, 0, 2):
- case IP_VERSION(5, 0, 5):
- case IP_VERSION(5, 2, 0):
- case IP_VERSION(5, 2, 2):
- case IP_VERSION(5, 2, 4):
- case IP_VERSION(5, 2, 5):
- case IP_VERSION(5, 2, 6):
- case IP_VERSION(5, 2, 3):
- case IP_VERSION(5, 2, 1):
- case IP_VERSION(5, 2, 7):
- if (sdma_instance->funcs->soft_reset_kernel_queue)
- r = sdma_instance->funcs->soft_reset_kernel_queue(adev, instance_id);
- break;
- default:
- break;
- }
- return r;
+ if (sdma_instance->funcs->soft_reset_kernel_queue)
+ return sdma_instance->funcs->soft_reset_kernel_queue(adev, instance_id);
+
+ return -EOPNOTSUPP;
}
/**
* amdgpu_sdma_reset_engine - Reset a specific SDMA engine
* @adev: Pointer to the AMDGPU device
- * @instance_id: ID of the SDMA engine instance to reset
+ * @instance_id: Logical ID of the SDMA engine instance to reset
+ * @caller_handles_kernel_queues: Skip kernel queue processing. Caller
+ * will handle it.
*
* Returns: 0 on success, or a negative error code on failure.
*/
-int amdgpu_sdma_reset_engine(struct amdgpu_device *adev, uint32_t instance_id)
+int amdgpu_sdma_reset_engine(struct amdgpu_device *adev, uint32_t instance_id,
+ bool caller_handles_kernel_queues)
{
int ret = 0;
struct amdgpu_sdma_instance *sdma_instance = &adev->sdma.instance[instance_id];
struct amdgpu_ring *gfx_ring = &sdma_instance->ring;
struct amdgpu_ring *page_ring = &sdma_instance->page;
- bool gfx_sched_stopped = false, page_sched_stopped = false;
mutex_lock(&sdma_instance->engine_reset_mutex);
- /* Stop the scheduler's work queue for the GFX and page rings if they are running.
- * This ensures that no new tasks are submitted to the queues while
- * the reset is in progress.
- */
- if (!amdgpu_ring_sched_ready(gfx_ring)) {
+
+ if (!caller_handles_kernel_queues) {
+ /* Stop the scheduler's work queue for the GFX and page rings if they are running.
+ * This ensures that no new tasks are submitted to the queues while
+ * the reset is in progress.
+ */
drm_sched_wqueue_stop(&gfx_ring->sched);
- gfx_sched_stopped = true;
- }
- if (adev->sdma.has_page_queue && !amdgpu_ring_sched_ready(page_ring)) {
- drm_sched_wqueue_stop(&page_ring->sched);
- page_sched_stopped = true;
+ if (adev->sdma.has_page_queue)
+ drm_sched_wqueue_stop(&page_ring->sched);
}
- if (sdma_instance->funcs->stop_kernel_queue)
+ if (sdma_instance->funcs->stop_kernel_queue) {
sdma_instance->funcs->stop_kernel_queue(gfx_ring);
+ if (adev->sdma.has_page_queue)
+ sdma_instance->funcs->stop_kernel_queue(page_ring);
+ }
/* Perform the SDMA reset for the specified instance */
ret = amdgpu_sdma_soft_reset(adev, instance_id);
if (ret) {
- dev_err(adev->dev, "Failed to reset SDMA instance %u\n", instance_id);
+ dev_err(adev->dev, "Failed to reset SDMA logical instance %u\n", instance_id);
goto exit;
}
- if (sdma_instance->funcs->start_kernel_queue)
+ if (sdma_instance->funcs->start_kernel_queue) {
sdma_instance->funcs->start_kernel_queue(gfx_ring);
+ if (adev->sdma.has_page_queue)
+ sdma_instance->funcs->start_kernel_queue(page_ring);
+ }
exit:
- /* Restart the scheduler's work queue for the GFX and page rings
- * if they were stopped by this function. This allows new tasks
- * to be submitted to the queues after the reset is complete.
- */
- if (!ret) {
- if (gfx_sched_stopped && amdgpu_ring_sched_ready(gfx_ring)) {
+ if (!caller_handles_kernel_queues) {
+ /* Restart the scheduler's work queue for the GFX and page rings
+ * if they were stopped by this function. This allows new tasks
+ * to be submitted to the queues after the reset is complete.
+ */
+ if (!ret) {
+ amdgpu_fence_driver_force_completion(gfx_ring);
drm_sched_wqueue_start(&gfx_ring->sched);
- }
- if (page_sched_stopped && amdgpu_ring_sched_ready(page_ring)) {
- drm_sched_wqueue_start(&page_ring->sched);
+ if (adev->sdma.has_page_queue) {
+ amdgpu_fence_driver_force_completion(page_ring);
+ drm_sched_wqueue_start(&page_ring->sched);
+ }
}
}
mutex_unlock(&sdma_instance->engine_reset_mutex);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
index e5f8951bbb6f..34311f32be4c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
@@ -172,7 +172,8 @@ struct amdgpu_buffer_funcs {
uint32_t byte_count);
};
-int amdgpu_sdma_reset_engine(struct amdgpu_device *adev, uint32_t instance_id);
+int amdgpu_sdma_reset_engine(struct amdgpu_device *adev, uint32_t instance_id,
+ bool caller_handles_kernel_queues);
#define amdgpu_emit_copy_buffer(adev, ib, s, d, b, t) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b), (t))
#define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c
index 41ebe690eeff..3739be1b71e0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c
@@ -159,7 +159,6 @@ static ssize_t amdgpu_securedisplay_debugfs_write(struct file *f, const char __u
dev_err(adev->dev, "Invalid input: %s\n", str);
}
- pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
return size;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c
index d45ebfb642ca..a0b479d5fff1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c
@@ -67,9 +67,9 @@ static inline u64 amdgpu_seq64_get_va_base(struct amdgpu_device *adev)
int amdgpu_seq64_map(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amdgpu_bo_va **bo_va)
{
- u64 seq64_addr, va_flags;
struct amdgpu_bo *bo;
struct drm_exec exec;
+ u64 seq64_addr;
int r;
bo = adev->seq64.sbo;
@@ -94,9 +94,9 @@ int amdgpu_seq64_map(struct amdgpu_device *adev, struct amdgpu_vm *vm,
seq64_addr = amdgpu_seq64_get_va_base(adev) & AMDGPU_GMC_HOLE_MASK;
- va_flags = amdgpu_gem_va_map_flags(adev, AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_MTYPE_UC);
- r = amdgpu_vm_bo_map(adev, *bo_va, seq64_addr, 0, AMDGPU_VA_RESERVED_SEQ64_SIZE,
- va_flags);
+ r = amdgpu_vm_bo_map(adev, *bo_va, seq64_addr, 0,
+ AMDGPU_VA_RESERVED_SEQ64_SIZE,
+ AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_MTYPE_UC);
if (r) {
DRM_ERROR("failed to do bo_map on userq sem, err=%d\n", r);
amdgpu_vm_bo_del(adev, *bo_va);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 11dd2e0f7979..d13e64a69e25 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -167,25 +167,23 @@ TRACE_EVENT(amdgpu_cs_ioctl,
TP_PROTO(struct amdgpu_job *job),
TP_ARGS(job),
TP_STRUCT__entry(
- __field(uint64_t, sched_job_id)
__string(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
- __field(unsigned int, context)
- __field(unsigned int, seqno)
+ __field(u64, context)
+ __field(u64, seqno)
__field(struct dma_fence *, fence)
__string(ring, to_amdgpu_ring(job->base.sched)->name)
__field(u32, num_ibs)
),
TP_fast_assign(
- __entry->sched_job_id = job->base.id;
__assign_str(timeline);
__entry->context = job->base.s_fence->finished.context;
__entry->seqno = job->base.s_fence->finished.seqno;
__assign_str(ring);
__entry->num_ibs = job->num_ibs;
),
- TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u",
- __entry->sched_job_id, __get_str(timeline), __entry->context,
+ TP_printk("timeline=%s, fence=%llu:%llu, ring_name=%s, num_ibs=%u",
+ __get_str(timeline), __entry->context,
__entry->seqno, __get_str(ring), __entry->num_ibs)
);
@@ -193,24 +191,22 @@ TRACE_EVENT(amdgpu_sched_run_job,
TP_PROTO(struct amdgpu_job *job),
TP_ARGS(job),
TP_STRUCT__entry(
- __field(uint64_t, sched_job_id)
__string(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
- __field(unsigned int, context)
- __field(unsigned int, seqno)
+ __field(u64, context)
+ __field(u64, seqno)
__string(ring, to_amdgpu_ring(job->base.sched)->name)
__field(u32, num_ibs)
),
TP_fast_assign(
- __entry->sched_job_id = job->base.id;
__assign_str(timeline);
__entry->context = job->base.s_fence->finished.context;
__entry->seqno = job->base.s_fence->finished.seqno;
__assign_str(ring);
__entry->num_ibs = job->num_ibs;
),
- TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u",
- __entry->sched_job_id, __get_str(timeline), __entry->context,
+ TP_printk("timeline=%s, fence=%llu:%llu, ring_name=%s, num_ibs=%u",
+ __get_str(timeline), __entry->context,
__entry->seqno, __get_str(ring), __entry->num_ibs)
);
@@ -551,23 +547,19 @@ TRACE_EVENT(amdgpu_ib_pipe_sync,
TP_ARGS(sched_job, fence),
TP_STRUCT__entry(
__string(ring, sched_job->base.sched->name)
- __field(uint64_t, id)
__field(struct dma_fence *, fence)
- __field(uint64_t, ctx)
- __field(unsigned, seqno)
+ __field(u64, ctx)
+ __field(u64, seqno)
),
TP_fast_assign(
__assign_str(ring);
- __entry->id = sched_job->base.id;
__entry->fence = fence;
__entry->ctx = fence->context;
__entry->seqno = fence->seqno;
),
- TP_printk("job ring=%s, id=%llu, need pipe sync to fence=%p, context=%llu, seq=%u",
- __get_str(ring), __entry->id,
- __entry->fence, __entry->ctx,
- __entry->seqno)
+ TP_printk("job ring=%s need pipe sync to fence=%llu:%llu",
+ __get_str(ring), __entry->ctx, __entry->seqno)
);
TRACE_EVENT(amdgpu_reset_reg_dumps,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 9c5df35f05b7..2b931e855abd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -123,6 +123,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
case AMDGPU_PL_GWS:
case AMDGPU_PL_OA:
case AMDGPU_PL_DOORBELL:
+ case AMDGPU_PL_MMIO_REMAP:
placement->num_placement = 0;
return;
@@ -187,7 +188,6 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
struct amdgpu_job *job;
void *cpu_addr;
uint64_t flags;
- unsigned int i;
int r;
BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
@@ -226,7 +226,8 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
r = amdgpu_job_alloc_with_ib(adev, &adev->mman.high_pr,
AMDGPU_FENCE_OWNER_UNDEFINED,
num_dw * 4 + num_bytes,
- AMDGPU_IB_POOL_DELAYED, &job);
+ AMDGPU_IB_POOL_DELAYED, &job,
+ AMDGPU_KERNEL_JOB_ID_TTM_MAP_BUFFER);
if (r)
return r;
@@ -253,16 +254,9 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
dma_addr = &bo->ttm->dma_address[mm_cur->start >> PAGE_SHIFT];
amdgpu_gart_map(adev, 0, num_pages, dma_addr, flags, cpu_addr);
} else {
- dma_addr_t dma_address;
-
- dma_address = mm_cur->start;
- dma_address += adev->vm_manager.vram_base_offset;
+ u64 pa = mm_cur->start + adev->vm_manager.vram_base_offset;
- for (i = 0; i < num_pages; ++i) {
- amdgpu_gart_map(adev, i << PAGE_SHIFT, 1, &dma_address,
- flags, cpu_addr);
- dma_address += PAGE_SIZE;
- }
+ amdgpu_gart_map_vram_range(adev, pa, 0, num_pages, flags, cpu_addr);
}
dma_fence_put(amdgpu_job_submit(job));
@@ -284,12 +278,13 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
* move and different for a BO to BO copy.
*
*/
-int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
- const struct amdgpu_copy_mem *src,
- const struct amdgpu_copy_mem *dst,
- uint64_t size, bool tmz,
- struct dma_resv *resv,
- struct dma_fence **f)
+__attribute__((nonnull))
+static int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
+ const struct amdgpu_copy_mem *src,
+ const struct amdgpu_copy_mem *dst,
+ uint64_t size, bool tmz,
+ struct dma_resv *resv,
+ struct dma_fence **f)
{
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
struct amdgpu_res_cursor src_mm, dst_mm;
@@ -299,7 +294,8 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
struct amdgpu_bo *abo_src, *abo_dst;
if (!adev->mman.buffer_funcs_enabled) {
- DRM_ERROR("Trying to move memory with ring turned off.\n");
+ dev_err(adev->dev,
+ "Trying to move memory with ring turned off.\n");
return -EINVAL;
}
@@ -362,9 +358,7 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
}
error:
mutex_unlock(&adev->mman.gtt_window_lock);
- if (f)
- *f = dma_fence_get(fence);
- dma_fence_put(fence);
+ *f = fence;
return r;
}
@@ -405,7 +399,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
struct dma_fence *wipe_fence = NULL;
r = amdgpu_fill_buffer(abo, 0, NULL, &wipe_fence,
- false);
+ false, AMDGPU_KERNEL_JOB_ID_MOVE_BLIT);
if (r) {
goto error;
} else if (wipe_fence) {
@@ -446,7 +440,8 @@ bool amdgpu_res_cpu_visible(struct amdgpu_device *adev,
return false;
if (res->mem_type == TTM_PL_SYSTEM || res->mem_type == TTM_PL_TT ||
- res->mem_type == AMDGPU_PL_PREEMPT || res->mem_type == AMDGPU_PL_DOORBELL)
+ res->mem_type == AMDGPU_PL_PREEMPT || res->mem_type == AMDGPU_PL_DOORBELL ||
+ res->mem_type == AMDGPU_PL_MMIO_REMAP)
return true;
if (res->mem_type != TTM_PL_VRAM)
@@ -537,10 +532,12 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
old_mem->mem_type == AMDGPU_PL_GWS ||
old_mem->mem_type == AMDGPU_PL_OA ||
old_mem->mem_type == AMDGPU_PL_DOORBELL ||
+ old_mem->mem_type == AMDGPU_PL_MMIO_REMAP ||
new_mem->mem_type == AMDGPU_PL_GDS ||
new_mem->mem_type == AMDGPU_PL_GWS ||
new_mem->mem_type == AMDGPU_PL_OA ||
- new_mem->mem_type == AMDGPU_PL_DOORBELL) {
+ new_mem->mem_type == AMDGPU_PL_DOORBELL ||
+ new_mem->mem_type == AMDGPU_PL_MMIO_REMAP) {
/* Nothing to save here */
amdgpu_bo_move_notify(bo, evict, new_mem);
ttm_bo_move_null(bo, new_mem);
@@ -628,6 +625,12 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev,
mem->bus.is_iomem = true;
mem->bus.caching = ttm_uncached;
break;
+ case AMDGPU_PL_MMIO_REMAP:
+ mem->bus.offset = mem->start << PAGE_SHIFT;
+ mem->bus.offset += adev->rmmio_remap.bus_addr;
+ mem->bus.is_iomem = true;
+ mem->bus.caching = ttm_uncached;
+ break;
default:
return -EINVAL;
}
@@ -645,6 +648,8 @@ static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
if (bo->resource->mem_type == AMDGPU_PL_DOORBELL)
return ((uint64_t)(adev->doorbell.base + cursor.start)) >> PAGE_SHIFT;
+ else if (bo->resource->mem_type == AMDGPU_PL_MMIO_REMAP)
+ return ((uint64_t)(adev->rmmio_remap.bus_addr + cursor.start)) >> PAGE_SHIFT;
return (adev->gmc.aper_base + cursor.start) >> PAGE_SHIFT;
}
@@ -692,10 +697,11 @@ struct amdgpu_ttm_tt {
* memory and start HMM tracking CPU page table update
*
* Calling function must call amdgpu_ttm_tt_userptr_range_done() once and only
- * once afterwards to stop HMM tracking
+ * once afterwards to stop HMM tracking. Its the caller responsibility to ensure
+ * that range is a valid memory and it is freed too.
*/
-int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages,
- struct hmm_range **range)
+int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo,
+ struct amdgpu_hmm_range *range)
{
struct ttm_tt *ttm = bo->tbo.ttm;
struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
@@ -705,9 +711,6 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages,
bool readonly;
int r = 0;
- /* Make sure get_user_pages_done() can cleanup gracefully */
- *range = NULL;
-
mm = bo->notifier.mm;
if (unlikely(!mm)) {
DRM_DEBUG_DRIVER("BO is not registered?\n");
@@ -731,7 +734,7 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages,
readonly = amdgpu_ttm_tt_is_readonly(ttm);
r = amdgpu_hmm_range_get_pages(&bo->notifier, start, ttm->num_pages,
- readonly, NULL, pages, range);
+ readonly, NULL, range);
out_unlock:
mmap_read_unlock(mm);
if (r)
@@ -742,38 +745,6 @@ out_unlock:
return r;
}
-/* amdgpu_ttm_tt_discard_user_pages - Discard range and pfn array allocations
- */
-void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm,
- struct hmm_range *range)
-{
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
-
- if (gtt && gtt->userptr && range)
- amdgpu_hmm_range_get_pages_done(range);
-}
-
-/*
- * amdgpu_ttm_tt_get_user_pages_done - stop HMM track the CPU page table change
- * Check if the pages backing this ttm range have been invalidated
- *
- * Returns: true if pages are still valid
- */
-bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
- struct hmm_range *range)
-{
- struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
-
- if (!gtt || !gtt->userptr || !range)
- return false;
-
- DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%x\n",
- gtt->userptr, ttm->num_pages);
-
- WARN_ONCE(!range->hmm_pfns, "No user pages to check\n");
-
- return !amdgpu_hmm_range_get_pages_done(range);
-}
#endif
/*
@@ -783,12 +754,12 @@ bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
* that backs user memory and will ultimately be mapped into the device
* address space.
*/
-void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
+void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct amdgpu_hmm_range *range)
{
unsigned long i;
for (i = 0; i < ttm->num_pages; ++i)
- ttm->pages[i] = pages ? pages[i] : NULL;
+ ttm->pages[i] = range ? hmm_pfn_to_page(range->hmm_range.hmm_pfns[i]) : NULL;
}
/*
@@ -934,7 +905,7 @@ static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
if (gtt->userptr) {
r = amdgpu_ttm_tt_pin_userptr(bdev, ttm);
if (r) {
- DRM_ERROR("failed to pin userptr\n");
+ dev_err(adev->dev, "failed to pin userptr\n");
return r;
}
} else if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) {
@@ -1060,7 +1031,7 @@ static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
/* if the pages have userptr pinning then clear that first */
if (gtt->userptr) {
amdgpu_ttm_tt_unpin_userptr(bdev, ttm);
- } else if (ttm->sg && gtt->gobj->import_attach) {
+ } else if (ttm->sg && drm_gem_is_imported(gtt->gobj)) {
struct dma_buf_attachment *attach;
attach = gtt->gobj->import_attach;
@@ -1354,10 +1325,11 @@ uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem)
if (mem && (mem->mem_type == TTM_PL_TT ||
mem->mem_type == AMDGPU_PL_DOORBELL ||
- mem->mem_type == AMDGPU_PL_PREEMPT)) {
+ mem->mem_type == AMDGPU_PL_PREEMPT ||
+ mem->mem_type == AMDGPU_PL_MMIO_REMAP)) {
flags |= AMDGPU_PTE_SYSTEM;
- if (ttm->caching == ttm_cached)
+ if (ttm && ttm->caching == ttm_cached)
flags |= AMDGPU_PTE_SNOOPED;
}
@@ -1509,10 +1481,12 @@ static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo,
r = amdgpu_job_alloc_with_ib(adev, &adev->mman.high_pr,
AMDGPU_FENCE_OWNER_UNDEFINED,
num_dw * 4, AMDGPU_IB_POOL_DELAYED,
- &job);
+ &job,
+ AMDGPU_KERNEL_JOB_ID_TTM_ACCESS_MEMORY_SDMA);
if (r)
goto out;
+ mutex_lock(&adev->mman.gtt_window_lock);
amdgpu_res_first(abo->tbo.resource, offset, len, &src_mm);
src_addr = amdgpu_ttm_domain_start(adev, bo->resource->mem_type) +
src_mm.start;
@@ -1527,6 +1501,7 @@ static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo,
WARN_ON(job->ibs[0].length_dw > num_dw);
fence = amdgpu_job_submit(job);
+ mutex_unlock(&adev->mman.gtt_window_lock);
if (!dma_fence_wait_timeout(fence, false, adev->sdma_timeout))
r = -ETIMEDOUT;
@@ -1781,25 +1756,21 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
&ctx->c2p_bo,
NULL);
if (ret) {
- DRM_ERROR("alloc c2p_bo failed(%d)!\n", ret);
+ dev_err(adev->dev, "alloc c2p_bo failed(%d)!\n", ret);
amdgpu_ttm_training_reserve_vram_fini(adev);
return ret;
}
ctx->init = PSP_MEM_TRAIN_RESERVE_SUCCESS;
}
- if (!adev->gmc.is_app_apu) {
- ret = amdgpu_bo_create_kernel_at(
- adev, adev->gmc.real_vram_size - reserve_size,
- reserve_size, &adev->mman.fw_reserved_memory, NULL);
- if (ret) {
- DRM_ERROR("alloc tmr failed(%d)!\n", ret);
- amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory,
- NULL, NULL);
- return ret;
- }
- } else {
- DRM_DEBUG_DRIVER("backdoor fw loading path for PSP TMR, no reservation needed\n");
+ ret = amdgpu_bo_create_kernel_at(
+ adev, adev->gmc.real_vram_size - reserve_size, reserve_size,
+ &adev->mman.fw_reserved_memory, NULL);
+ if (ret) {
+ dev_err(adev->dev, "alloc tmr failed(%d)!\n", ret);
+ amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL,
+ NULL);
+ return ret;
}
return 0;
@@ -1821,7 +1792,7 @@ static int amdgpu_ttm_pools_init(struct amdgpu_device *adev)
for (i = 0; i < adev->gmc.num_mem_partitions; i++) {
ttm_pool_init(&adev->mman.ttm_pools[i], adev->dev,
adev->gmc.mem_partitions[i].numa.node,
- false, false);
+ TTM_ALLOCATION_POOL_BENEFICIAL_ORDER(get_order(SZ_2M)));
}
return 0;
}
@@ -1840,6 +1811,59 @@ static void amdgpu_ttm_pools_fini(struct amdgpu_device *adev)
adev->mman.ttm_pools = NULL;
}
+/**
+ * amdgpu_ttm_mmio_remap_bo_init - Allocate the singleton 4K MMIO_REMAP BO
+ * @adev: amdgpu device
+ *
+ * Allocates a one-page (4K) GEM BO in AMDGPU_GEM_DOMAIN_MMIO_REMAP when the
+ * hardware exposes a remap base (adev->rmmio_remap.bus_addr) and the host
+ * PAGE_SIZE is <= AMDGPU_GPU_PAGE_SIZE (4K). The BO is created as a regular
+ * GEM object (amdgpu_bo_create).
+ *
+ * Return:
+ * * 0 on success or intentional skip (feature not present/unsupported)
+ * * negative errno on allocation failure
+ */
+static int amdgpu_ttm_mmio_remap_bo_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_bo_param bp;
+ int r;
+
+ /* Skip if HW doesn't expose remap, or if PAGE_SIZE > AMDGPU_GPU_PAGE_SIZE (4K). */
+ if (!adev->rmmio_remap.bus_addr || PAGE_SIZE > AMDGPU_GPU_PAGE_SIZE)
+ return 0;
+
+ memset(&bp, 0, sizeof(bp));
+
+ /* Create exactly one GEM BO in the MMIO_REMAP domain. */
+ bp.type = ttm_bo_type_device; /* userspace-mappable GEM */
+ bp.size = AMDGPU_GPU_PAGE_SIZE; /* 4K */
+ bp.byte_align = AMDGPU_GPU_PAGE_SIZE;
+ bp.domain = AMDGPU_GEM_DOMAIN_MMIO_REMAP;
+ bp.flags = 0;
+ bp.resv = NULL;
+ bp.bo_ptr_size = sizeof(struct amdgpu_bo);
+
+ r = amdgpu_bo_create(adev, &bp, &adev->rmmio_remap.bo);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+/**
+ * amdgpu_ttm_mmio_remap_bo_fini - Free the singleton MMIO_REMAP BO
+ * @adev: amdgpu device
+ *
+ * Frees the kernel-owned MMIO_REMAP BO if it was allocated by
+ * amdgpu_ttm_mmio_remap_bo_init().
+ */
+static void amdgpu_ttm_mmio_remap_bo_fini(struct amdgpu_device *adev)
+{
+ amdgpu_bo_unref(&adev->rmmio_remap.bo);
+ adev->rmmio_remap.bo = NULL;
+}
+
/*
* amdgpu_ttm_init - Init the memory management (ttm) as well as various
* gtt/vram related fields.
@@ -1861,25 +1885,31 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
r = ttm_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev,
adev_to_drm(adev)->anon_inode->i_mapping,
adev_to_drm(adev)->vma_offset_manager,
- adev->need_swiotlb,
- dma_addressing_limited(adev->dev));
+ (adev->need_swiotlb ?
+ TTM_ALLOCATION_POOL_USE_DMA_ALLOC : 0) |
+ (dma_addressing_limited(adev->dev) ?
+ TTM_ALLOCATION_POOL_USE_DMA32 : 0) |
+ TTM_ALLOCATION_POOL_BENEFICIAL_ORDER(get_order(SZ_2M)));
if (r) {
- DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
+ dev_err(adev->dev,
+ "failed initializing buffer object driver(%d).\n", r);
return r;
}
r = amdgpu_ttm_pools_init(adev);
if (r) {
- DRM_ERROR("failed to init ttm pools(%d).\n", r);
+ dev_err(adev->dev, "failed to init ttm pools(%d).\n", r);
return r;
}
adev->mman.initialized = true;
- /* Initialize VRAM pool with all of VRAM divided into pages */
- r = amdgpu_vram_mgr_init(adev);
- if (r) {
- DRM_ERROR("Failed initializing VRAM heap.\n");
- return r;
+ if (!adev->gmc.is_app_apu) {
+ /* Initialize VRAM pool with all of VRAM divided into pages */
+ r = amdgpu_vram_mgr_init(adev);
+ if (r) {
+ dev_err(adev->dev, "Failed initializing VRAM heap.\n");
+ return r;
+ }
}
/* Change the size here instead of the init above so only lpfn is affected */
@@ -1908,19 +1938,19 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
return r;
/*
- *The reserved vram for driver must be pinned to the specified
- *place on the VRAM, so reserve it early.
+ * The reserved VRAM for the driver must be pinned to a specific
+ * location in VRAM, so reserve it early.
*/
r = amdgpu_ttm_drv_reserve_vram_init(adev);
if (r)
return r;
/*
- * only NAVI10 and onwards ASIC support for IP discovery.
- * If IP discovery enabled, a block of memory should be
- * reserved for IP discovey.
+ * only NAVI10 and later ASICs support IP discovery.
+ * If IP discovery is enabled, a block of memory should be
+ * reserved for it.
*/
- if (adev->mman.discovery_bin) {
+ if (adev->discovery.reserve_tmr) {
r = amdgpu_ttm_reserve_tmr(adev);
if (r)
return r;
@@ -1958,7 +1988,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
DRM_DEBUG_DRIVER("Skipped stolen memory reservation\n");
}
- DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
+ dev_info(adev->dev, "amdgpu: %uM of VRAM memory ready\n",
(unsigned int)(adev->gmc.real_vram_size / (1024 * 1024)));
/* Compute GTT size, either based on TTM limit
@@ -1981,10 +2011,10 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
/* Initialize GTT memory pool */
r = amdgpu_gtt_mgr_init(adev, gtt_size);
if (r) {
- DRM_ERROR("Failed initializing GTT heap.\n");
+ dev_err(adev->dev, "Failed initializing GTT heap.\n");
return r;
}
- DRM_INFO("amdgpu: %uM of GTT memory ready.\n",
+ dev_info(adev->dev, "amdgpu: %uM of GTT memory ready.\n",
(unsigned int)(gtt_size / (1024 * 1024)));
if (adev->flags & AMD_IS_APU) {
@@ -1995,40 +2025,52 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
/* Initialize doorbell pool on PCI BAR */
r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_DOORBELL, adev->doorbell.size / PAGE_SIZE);
if (r) {
- DRM_ERROR("Failed initializing doorbell heap.\n");
+ dev_err(adev->dev, "Failed initializing doorbell heap.\n");
return r;
}
/* Create a boorbell page for kernel usages */
r = amdgpu_doorbell_create_kernel_doorbells(adev);
if (r) {
- DRM_ERROR("Failed to initialize kernel doorbells.\n");
+ dev_err(adev->dev, "Failed to initialize kernel doorbells.\n");
return r;
}
+ /* Initialize MMIO-remap pool (single page 4K) */
+ r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_MMIO_REMAP, 1);
+ if (r) {
+ dev_err(adev->dev, "Failed initializing MMIO-remap heap.\n");
+ return r;
+ }
+
+ /* Allocate the singleton MMIO_REMAP BO (4K) if supported */
+ r = amdgpu_ttm_mmio_remap_bo_init(adev);
+ if (r)
+ return r;
+
/* Initialize preemptible memory pool */
r = amdgpu_preempt_mgr_init(adev);
if (r) {
- DRM_ERROR("Failed initializing PREEMPT heap.\n");
+ dev_err(adev->dev, "Failed initializing PREEMPT heap.\n");
return r;
}
/* Initialize various on-chip memory pools */
r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GDS, adev->gds.gds_size);
if (r) {
- DRM_ERROR("Failed initializing GDS heap.\n");
+ dev_err(adev->dev, "Failed initializing GDS heap.\n");
return r;
}
r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GWS, adev->gds.gws_size);
if (r) {
- DRM_ERROR("Failed initializing gws heap.\n");
+ dev_err(adev->dev, "Failed initializing gws heap.\n");
return r;
}
r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_OA, adev->gds.oa_size);
if (r) {
- DRM_ERROR("Failed initializing oa heap.\n");
+ dev_err(adev->dev, "Failed initializing oa heap.\n");
return r;
}
if (amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
@@ -2060,12 +2102,16 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
/* return the FW reserved memory back to VRAM */
amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL,
NULL);
+ amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory_extend, NULL,
+ NULL);
if (adev->mman.stolen_reserved_size)
amdgpu_bo_free_kernel(&adev->mman.stolen_reserved_memory,
NULL, NULL);
}
amdgpu_bo_free_kernel(&adev->mman.sdma_access_bo, NULL,
&adev->mman.sdma_access_ptr);
+
+ amdgpu_ttm_mmio_remap_bo_fini(adev);
amdgpu_ttm_fw_reserve_vram_fini(adev);
amdgpu_ttm_drv_reserve_vram_fini(adev);
@@ -2078,7 +2124,8 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
drm_dev_exit(idx);
}
- amdgpu_vram_mgr_fini(adev);
+ if (!adev->gmc.is_app_apu)
+ amdgpu_vram_mgr_fini(adev);
amdgpu_gtt_mgr_fini(adev);
amdgpu_preempt_mgr_fini(adev);
amdgpu_doorbell_fini(adev);
@@ -2087,9 +2134,10 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS);
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA);
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_DOORBELL);
+ ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_MMIO_REMAP);
ttm_device_fini(&adev->mman.bdev);
adev->mman.initialized = false;
- DRM_INFO("amdgpu: ttm finalized\n");
+ dev_info(adev->dev, "amdgpu: ttm finalized\n");
}
/**
@@ -2121,8 +2169,9 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
DRM_SCHED_PRIORITY_KERNEL, &sched,
1, NULL);
if (r) {
- DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
- r);
+ dev_err(adev->dev,
+ "Failed setting up TTM BO move entity (%d)\n",
+ r);
return;
}
@@ -2130,15 +2179,18 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
DRM_SCHED_PRIORITY_NORMAL, &sched,
1, NULL);
if (r) {
- DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
- r);
+ dev_err(adev->dev,
+ "Failed setting up TTM BO move entity (%d)\n",
+ r);
goto error_free_entity;
}
} else {
drm_sched_entity_destroy(&adev->mman.high_pr);
drm_sched_entity_destroy(&adev->mman.low_pr);
- dma_fence_put(man->move);
- man->move = NULL;
+ /* Drop all the old fences since re-creating the scheduler entities
+ * will allocate new contexts.
+ */
+ ttm_resource_manager_cleanup(man);
}
/* this just adjusts TTM size idea, which sets lpfn to the correct value */
@@ -2161,7 +2213,7 @@ static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev,
struct dma_resv *resv,
bool vm_needs_flush,
struct amdgpu_job **job,
- bool delayed)
+ bool delayed, u64 k_job_id)
{
enum amdgpu_ib_pool_type pool = direct_submit ?
AMDGPU_IB_POOL_DIRECT :
@@ -2171,7 +2223,7 @@ static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev,
&adev->mman.high_pr;
r = amdgpu_job_alloc_with_ib(adev, entity,
AMDGPU_FENCE_OWNER_UNDEFINED,
- num_dw * 4, pool, job);
+ num_dw * 4, pool, job, k_job_id);
if (r)
return r;
@@ -2202,7 +2254,8 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
int r;
if (!direct_submit && !ring->sched.ready) {
- DRM_ERROR("Trying to move memory with ring turned off.\n");
+ dev_err(adev->dev,
+ "Trying to move memory with ring turned off.\n");
return -EINVAL;
}
@@ -2210,7 +2263,8 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
num_loops = DIV_ROUND_UP(byte_count, max_bytes);
num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8);
r = amdgpu_ttm_prepare_job(adev, direct_submit, num_dw,
- resv, vm_needs_flush, &job, false);
+ resv, vm_needs_flush, &job, false,
+ AMDGPU_KERNEL_JOB_ID_TTM_COPY_BUFFER);
if (r)
return r;
@@ -2237,7 +2291,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
error_free:
amdgpu_job_free(job);
- DRM_ERROR("Error scheduling IBs (%d)\n", r);
+ dev_err(adev->dev, "Error scheduling IBs (%d)\n", r);
return r;
}
@@ -2245,7 +2299,8 @@ static int amdgpu_ttm_fill_mem(struct amdgpu_ring *ring, uint32_t src_data,
uint64_t dst_addr, uint32_t byte_count,
struct dma_resv *resv,
struct dma_fence **fence,
- bool vm_needs_flush, bool delayed)
+ bool vm_needs_flush, bool delayed,
+ u64 k_job_id)
{
struct amdgpu_device *adev = ring->adev;
unsigned int num_loops, num_dw;
@@ -2258,7 +2313,7 @@ static int amdgpu_ttm_fill_mem(struct amdgpu_ring *ring, uint32_t src_data,
num_loops = DIV_ROUND_UP_ULL(byte_count, max_bytes);
num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->fill_num_dw, 8);
r = amdgpu_ttm_prepare_job(adev, false, num_dw, resv, vm_needs_flush,
- &job, delayed);
+ &job, delayed, k_job_id);
if (r)
return r;
@@ -2328,7 +2383,8 @@ int amdgpu_ttm_clear_buffer(struct amdgpu_bo *bo,
goto err;
r = amdgpu_ttm_fill_mem(ring, 0, addr, size, resv,
- &next, true, true);
+ &next, true, true,
+ AMDGPU_KERNEL_JOB_ID_TTM_CLEAR_BUFFER);
if (r)
goto err;
@@ -2347,7 +2403,8 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
uint32_t src_data,
struct dma_resv *resv,
struct dma_fence **f,
- bool delayed)
+ bool delayed,
+ u64 k_job_id)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
@@ -2356,7 +2413,8 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
int r;
if (!adev->mman.buffer_funcs_enabled) {
- DRM_ERROR("Trying to clear memory with ring turned off.\n");
+ dev_err(adev->dev,
+ "Trying to clear memory with ring turned off.\n");
return -EINVAL;
}
@@ -2376,7 +2434,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
goto error;
r = amdgpu_ttm_fill_mem(ring, src_data, to, cur_size, resv,
- &next, true, delayed);
+ &next, true, delayed, k_job_id);
if (r)
goto error;
@@ -2416,7 +2474,7 @@ int amdgpu_ttm_evict_resources(struct amdgpu_device *adev, int mem_type)
man = ttm_manager_type(&adev->mman.bdev, mem_type);
break;
default:
- DRM_ERROR("Trying to evict invalid memory type\n");
+ dev_err(adev->dev, "Trying to evict invalid memory type\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 208b7d1d8a27..577ee04ce0bf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -28,13 +28,15 @@
#include <drm/gpu_scheduler.h>
#include <drm/ttm/ttm_placement.h>
#include "amdgpu_vram_mgr.h"
+#include "amdgpu_hmm.h"
#define AMDGPU_PL_GDS (TTM_PL_PRIV + 0)
#define AMDGPU_PL_GWS (TTM_PL_PRIV + 1)
#define AMDGPU_PL_OA (TTM_PL_PRIV + 2)
#define AMDGPU_PL_PREEMPT (TTM_PL_PRIV + 3)
#define AMDGPU_PL_DOORBELL (TTM_PL_PRIV + 4)
-#define __AMDGPU_PL_NUM (TTM_PL_PRIV + 5)
+#define AMDGPU_PL_MMIO_REMAP (TTM_PL_PRIV + 5)
+#define __AMDGPU_PL_NUM (TTM_PL_PRIV + 6)
#define AMDGPU_GTT_MAX_TRANSFER_SIZE 512
#define AMDGPU_GTT_NUM_TRANSFER_WINDOWS 2
@@ -81,11 +83,9 @@ struct amdgpu_mman {
uint64_t stolen_reserved_offset;
uint64_t stolen_reserved_size;
- /* discovery */
- uint8_t *discovery_bin;
- uint32_t discovery_tmr_size;
/* fw reserved memory */
struct amdgpu_bo *fw_reserved_memory;
+ struct amdgpu_bo *fw_reserved_memory_extend;
/* firmware VRAM reservation */
u64 fw_vram_usage_start_offset;
@@ -154,6 +154,7 @@ int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr,
uint64_t start, uint64_t size);
int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr,
uint64_t start);
+void amdgpu_vram_mgr_clear_reset_blocks(struct amdgpu_device *adev);
bool amdgpu_res_cpu_visible(struct amdgpu_device *adev,
struct ttm_resource *res);
@@ -167,12 +168,6 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
struct dma_resv *resv,
struct dma_fence **fence, bool direct_submit,
bool vm_needs_flush, uint32_t copy_flags);
-int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
- const struct amdgpu_copy_mem *src,
- const struct amdgpu_copy_mem *dst,
- uint64_t size, bool tmz,
- struct dma_resv *resv,
- struct dma_fence **f);
int amdgpu_ttm_clear_buffer(struct amdgpu_bo *bo,
struct dma_resv *resv,
struct dma_fence **fence);
@@ -180,38 +175,25 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
uint32_t src_data,
struct dma_resv *resv,
struct dma_fence **fence,
- bool delayed);
+ bool delayed,
+ u64 k_job_id);
int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo);
void amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo);
uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type);
#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
-int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages,
- struct hmm_range **range);
-void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm,
- struct hmm_range *range);
-bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
- struct hmm_range *range);
+int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo,
+ struct amdgpu_hmm_range *range);
#else
static inline int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo,
- struct page **pages,
- struct hmm_range **range)
+ struct amdgpu_hmm_range *range)
{
return -EPERM;
}
-static inline void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm,
- struct hmm_range *range)
-{
-}
-static inline bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
- struct hmm_range *range)
-{
- return false;
-}
#endif
-void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages);
+void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct amdgpu_hmm_range *range);
int amdgpu_ttm_tt_get_userptr(const struct ttm_buffer_object *tbo,
uint64_t *user_addr);
int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index 2505c46a9c3d..e96f24e9ad57 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -30,6 +30,11 @@
#define AMDGPU_UCODE_NAME_MAX (128)
+static const struct kicker_device kicker_device_list[] = {
+ {0x744B, 0x00},
+ {0x7551, 0xC8}
+};
+
static void amdgpu_ucode_print_common_hdr(const struct common_firmware_header *hdr)
{
DRM_DEBUG("size_bytes: %u\n", le32_to_cpu(hdr->size_bytes));
@@ -1155,6 +1160,9 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
adev->firmware.max_ucodes = AMDGPU_UCODE_ID_MAXIMUM;
}
+ if (amdgpu_virt_xgmi_migrate_enabled(adev) && adev->firmware.fw_buf)
+ adev->firmware.fw_buf_mc = amdgpu_bo_fb_aper_addr(adev->firmware.fw_buf);
+
for (i = 0; i < adev->firmware.max_ucodes; i++) {
ucode = &adev->firmware.ucode[i];
if (ucode->fw) {
@@ -1387,6 +1395,19 @@ static const char *amdgpu_ucode_legacy_naming(struct amdgpu_device *adev, int bl
return NULL;
}
+bool amdgpu_is_kicker_fw(struct amdgpu_device *adev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(kicker_device_list); i++) {
+ if (adev->pdev->device == kicker_device_list[i].device &&
+ adev->pdev->revision == kicker_device_list[i].revision)
+ return true;
+ }
+
+ return false;
+}
+
void amdgpu_ucode_ip_version_decode(struct amdgpu_device *adev, int block_type, char *ucode_prefix, int len)
{
int maj, min, rev;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index 9e89c3487be5..6349aad6da35 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -605,6 +605,11 @@ struct amdgpu_firmware {
uint32_t pldm_version;
};
+struct kicker_device{
+ unsigned short device;
+ u8 revision;
+};
+
void amdgpu_ucode_print_mc_hdr(const struct common_firmware_header *hdr);
void amdgpu_ucode_print_smc_hdr(const struct common_firmware_header *hdr);
void amdgpu_ucode_print_imu_hdr(const struct common_firmware_header *hdr);
@@ -632,5 +637,6 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type);
const char *amdgpu_ucode_name(enum AMDGPU_UCODE_ID ucode_id);
void amdgpu_ucode_ip_version_decode(struct amdgpu_device *adev, int block_type, char *ucode_prefix, int len);
+bool amdgpu_is_kicker_fw(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
index c92b8794aa73..3f0b0e9af4f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
@@ -24,6 +24,7 @@
#include <linux/sort.h>
#include "amdgpu.h"
#include "umc_v6_7.h"
+#include "amdgpu_ras_mgr.h"
#define MAX_UMC_POISON_POLLING_TIME_SYNC 20 //ms
#define MAX_UMC_HASH_STRING_SIZE 256
@@ -96,67 +97,96 @@ void amdgpu_umc_handle_bad_pages(struct amdgpu_device *adev,
{
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct amdgpu_ras_eeprom_control *control = &con->eeprom_control;
unsigned int error_query_mode;
int ret = 0;
unsigned long err_count;
amdgpu_ras_get_error_query_mode(adev, &error_query_mode);
+ err_data->err_addr =
+ kcalloc(adev->umc.max_ras_err_cnt_per_query,
+ sizeof(struct eeprom_table_record), GFP_KERNEL);
+
+ /* still call query_ras_error_address to clear error status
+ * even NOMEM error is encountered
+ */
+ if (!err_data->err_addr)
+ dev_warn(adev->dev,
+ "Failed to alloc memory for umc error address record!\n");
+ else
+ err_data->err_addr_len = adev->umc.max_ras_err_cnt_per_query;
+
mutex_lock(&con->page_retirement_lock);
- ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(con->umc_ecc));
- if (ret == -EOPNOTSUPP &&
- error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY) {
- if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
- adev->umc.ras->ras_block.hw_ops->query_ras_error_count)
- adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev, ras_error_status);
-
- if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
- adev->umc.ras->ras_block.hw_ops->query_ras_error_address &&
- adev->umc.max_ras_err_cnt_per_query) {
- err_data->err_addr =
- kcalloc(adev->umc.max_ras_err_cnt_per_query,
- sizeof(struct eeprom_table_record), GFP_KERNEL);
-
- /* still call query_ras_error_address to clear error status
- * even NOMEM error is encountered
- */
- if(!err_data->err_addr)
- dev_warn(adev->dev, "Failed to alloc memory for "
- "umc error address record!\n");
- else
- err_data->err_addr_len = adev->umc.max_ras_err_cnt_per_query;
-
- /* umc query_ras_error_address is also responsible for clearing
- * error status
- */
- adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev, ras_error_status);
+ if (!amdgpu_ras_smu_eeprom_supported(adev)) {
+ ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(con->umc_ecc));
+ if (ret == -EOPNOTSUPP &&
+ error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY) {
+ if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
+ adev->umc.ras->ras_block.hw_ops->query_ras_error_count)
+ adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev,
+ ras_error_status);
+
+ if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
+ adev->umc.ras->ras_block.hw_ops->query_ras_error_address &&
+ adev->umc.max_ras_err_cnt_per_query) {
+ err_data->err_addr =
+ kcalloc(adev->umc.max_ras_err_cnt_per_query,
+ sizeof(struct eeprom_table_record), GFP_KERNEL);
+
+ /* still call query_ras_error_address to clear error status
+ * even NOMEM error is encountered
+ */
+ if (!err_data->err_addr)
+ dev_warn(adev->dev,
+ "Failed to alloc memory for umc error address record!\n");
+ else
+ err_data->err_addr_len =
+ adev->umc.max_ras_err_cnt_per_query;
+
+ /* umc query_ras_error_address is also responsible for clearing
+ * error status
+ */
+ adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev,
+ ras_error_status);
+ }
+ } else if (error_query_mode == AMDGPU_RAS_FIRMWARE_ERROR_QUERY ||
+ (!ret && error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY)) {
+ if (adev->umc.ras &&
+ adev->umc.ras->ecc_info_query_ras_error_count)
+ adev->umc.ras->ecc_info_query_ras_error_count(adev,
+ ras_error_status);
+
+ if (adev->umc.ras &&
+ adev->umc.ras->ecc_info_query_ras_error_address &&
+ adev->umc.max_ras_err_cnt_per_query) {
+ err_data->err_addr =
+ kcalloc(adev->umc.max_ras_err_cnt_per_query,
+ sizeof(struct eeprom_table_record), GFP_KERNEL);
+
+ /* still call query_ras_error_address to clear error status
+ * even NOMEM error is encountered
+ */
+ if (!err_data->err_addr)
+ dev_warn(adev->dev,
+ "Failed to alloc memory for umc error address record!\n");
+ else
+ err_data->err_addr_len =
+ adev->umc.max_ras_err_cnt_per_query;
+
+ /* umc query_ras_error_address is also responsible for clearing
+ * error status
+ */
+ adev->umc.ras->ecc_info_query_ras_error_address(adev,
+ ras_error_status);
+ }
}
- } else if (error_query_mode == AMDGPU_RAS_FIRMWARE_ERROR_QUERY ||
- (!ret && error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY)) {
- if (adev->umc.ras &&
- adev->umc.ras->ecc_info_query_ras_error_count)
- adev->umc.ras->ecc_info_query_ras_error_count(adev, ras_error_status);
-
- if (adev->umc.ras &&
- adev->umc.ras->ecc_info_query_ras_error_address &&
- adev->umc.max_ras_err_cnt_per_query) {
- err_data->err_addr =
- kcalloc(adev->umc.max_ras_err_cnt_per_query,
- sizeof(struct eeprom_table_record), GFP_KERNEL);
-
- /* still call query_ras_error_address to clear error status
- * even NOMEM error is encountered
- */
- if(!err_data->err_addr)
- dev_warn(adev->dev, "Failed to alloc memory for "
- "umc error address record!\n");
- else
- err_data->err_addr_len = adev->umc.max_ras_err_cnt_per_query;
-
- /* umc query_ras_error_address is also responsible for clearing
- * error status
- */
- adev->umc.ras->ecc_info_query_ras_error_address(adev, ras_error_status);
+ } else {
+ if (!amdgpu_ras_eeprom_update_record_num(control)) {
+ err_data->err_addr_cnt = err_data->de_count =
+ control->ras_num_recs - control->ras_num_recs_old;
+ amdgpu_ras_eeprom_read_idx(control, err_data->err_addr,
+ control->ras_num_recs_old, err_data->de_count);
}
}
@@ -166,7 +196,7 @@ void amdgpu_umc_handle_bad_pages(struct amdgpu_device *adev,
if ((amdgpu_bad_page_threshold != 0) &&
err_data->err_addr_cnt) {
amdgpu_ras_add_bad_pages(adev, err_data->err_addr,
- err_data->err_addr_cnt, false);
+ err_data->err_addr_cnt, amdgpu_ras_smu_eeprom_supported(adev));
amdgpu_ras_save_bad_pages(adev, &err_count);
amdgpu_dpm_send_hbm_bad_pages_num(adev,
@@ -244,6 +274,15 @@ int amdgpu_umc_pasid_poison_handler(struct amdgpu_device *adev,
}
amdgpu_ras_error_data_fini(&err_data);
+ } else if (amdgpu_uniras_enabled(adev)) {
+ struct ras_ih_info ih_info = {0};
+
+ ih_info.block = block;
+ ih_info.pasid = pasid;
+ ih_info.reset = reset;
+ ih_info.pasid_fn = pasid_fn;
+ ih_info.data = data;
+ amdgpu_ras_mgr_handle_consumer_interrupt(adev, &ih_info);
} else {
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
int ret;
@@ -252,6 +291,7 @@ int amdgpu_umc_pasid_poison_handler(struct amdgpu_device *adev,
block, pasid, pasid_fn, data, reset);
if (!ret) {
atomic_inc(&con->page_retirement_req_cnt);
+ atomic_inc(&con->poison_consumption_count);
wake_up(&con->page_retirement_wq);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
index ec203f9e5ffa..28dff750c47e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
@@ -113,6 +113,8 @@ struct amdgpu_umc_ras {
uint32_t (*get_die_id_from_pa)(struct amdgpu_device *adev,
uint64_t mca_addr, uint64_t retired_page);
void (*get_retire_flip_bits)(struct amdgpu_device *adev);
+ void (*mca_ipid_parse)(struct amdgpu_device *adev, uint64_t ipid,
+ uint32_t *did, uint32_t *ch, uint32_t *umc_inst, uint32_t *sid);
};
struct amdgpu_umc_funcs {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
index 295e7186e156..9a969175900e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
@@ -25,10 +25,13 @@
#include <drm/drm_auth.h>
#include <drm/drm_exec.h>
#include <linux/pm_runtime.h>
+#include <drm/drm_drv.h>
#include "amdgpu.h"
+#include "amdgpu_reset.h"
#include "amdgpu_vm.h"
#include "amdgpu_userq.h"
+#include "amdgpu_hmm.h"
#include "amdgpu_userq_fence.h"
u32 amdgpu_userq_get_supported_ip_mask(struct amdgpu_device *adev)
@@ -44,22 +47,301 @@ u32 amdgpu_userq_get_supported_ip_mask(struct amdgpu_device *adev)
return userq_ip_mask;
}
+static bool amdgpu_userq_is_reset_type_supported(struct amdgpu_device *adev,
+ enum amdgpu_ring_type ring_type, int reset_type)
+{
+
+ if (ring_type < 0 || ring_type >= AMDGPU_RING_TYPE_MAX)
+ return false;
+
+ switch (ring_type) {
+ case AMDGPU_RING_TYPE_GFX:
+ if (adev->gfx.gfx_supported_reset & reset_type)
+ return true;
+ break;
+ case AMDGPU_RING_TYPE_COMPUTE:
+ if (adev->gfx.compute_supported_reset & reset_type)
+ return true;
+ break;
+ case AMDGPU_RING_TYPE_SDMA:
+ if (adev->sdma.supported_reset & reset_type)
+ return true;
+ break;
+ case AMDGPU_RING_TYPE_VCN_DEC:
+ case AMDGPU_RING_TYPE_VCN_ENC:
+ if (adev->vcn.supported_reset & reset_type)
+ return true;
+ break;
+ case AMDGPU_RING_TYPE_VCN_JPEG:
+ if (adev->jpeg.supported_reset & reset_type)
+ return true;
+ break;
+ default:
+ break;
+ }
+ return false;
+}
+
+static void amdgpu_userq_gpu_reset(struct amdgpu_device *adev)
+{
+ if (amdgpu_device_should_recover_gpu(adev)) {
+ amdgpu_reset_domain_schedule(adev->reset_domain,
+ &adev->userq_reset_work);
+ /* Wait for the reset job to complete */
+ flush_work(&adev->userq_reset_work);
+ }
+}
+
static int
-amdgpu_userq_unmap_helper(struct amdgpu_userq_mgr *uq_mgr,
+amdgpu_userq_detect_and_reset_queues(struct amdgpu_userq_mgr *uq_mgr)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ const int queue_types[] = {
+ AMDGPU_RING_TYPE_COMPUTE,
+ AMDGPU_RING_TYPE_GFX,
+ AMDGPU_RING_TYPE_SDMA
+ };
+ const int num_queue_types = ARRAY_SIZE(queue_types);
+ bool gpu_reset = false;
+ int r = 0;
+ int i;
+
+ /* Warning if current process mutex is not held */
+ WARN_ON(!mutex_is_locked(&uq_mgr->userq_mutex));
+
+ if (unlikely(adev->debug_disable_gpu_ring_reset)) {
+ dev_err(adev->dev, "userq reset disabled by debug mask\n");
+ return 0;
+ }
+
+ /*
+ * If GPU recovery feature is disabled system-wide,
+ * skip all reset detection logic
+ */
+ if (!amdgpu_gpu_recovery)
+ return 0;
+
+ /*
+ * Iterate through all queue types to detect and reset problematic queues
+ * Process each queue type in the defined order
+ */
+ for (i = 0; i < num_queue_types; i++) {
+ int ring_type = queue_types[i];
+ const struct amdgpu_userq_funcs *funcs = adev->userq_funcs[ring_type];
+
+ if (!amdgpu_userq_is_reset_type_supported(adev, ring_type, AMDGPU_RESET_TYPE_PER_QUEUE))
+ continue;
+
+ if (atomic_read(&uq_mgr->userq_count[ring_type]) > 0 &&
+ funcs && funcs->detect_and_reset) {
+ r = funcs->detect_and_reset(adev, ring_type);
+ if (r) {
+ gpu_reset = true;
+ break;
+ }
+ }
+ }
+
+ if (gpu_reset)
+ amdgpu_userq_gpu_reset(adev);
+
+ return r;
+}
+
+static int amdgpu_userq_buffer_va_list_add(struct amdgpu_usermode_queue *queue,
+ struct amdgpu_bo_va_mapping *va_map, u64 addr)
+{
+ struct amdgpu_userq_va_cursor *va_cursor;
+ struct userq_va_list;
+
+ va_cursor = kzalloc(sizeof(*va_cursor), GFP_KERNEL);
+ if (!va_cursor)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&va_cursor->list);
+ va_cursor->gpu_addr = addr;
+ atomic_set(&va_map->bo_va->userq_va_mapped, 1);
+ list_add(&va_cursor->list, &queue->userq_va_list);
+
+ return 0;
+}
+
+int amdgpu_userq_input_va_validate(struct amdgpu_usermode_queue *queue,
+ u64 addr, u64 expected_size)
+{
+ struct amdgpu_bo_va_mapping *va_map;
+ struct amdgpu_vm *vm = queue->vm;
+ u64 user_addr;
+ u64 size;
+ int r = 0;
+
+ user_addr = (addr & AMDGPU_GMC_HOLE_MASK) >> AMDGPU_GPU_PAGE_SHIFT;
+ size = expected_size >> AMDGPU_GPU_PAGE_SHIFT;
+
+ r = amdgpu_bo_reserve(vm->root.bo, false);
+ if (r)
+ return r;
+
+ va_map = amdgpu_vm_bo_lookup_mapping(vm, user_addr);
+ if (!va_map) {
+ r = -EINVAL;
+ goto out_err;
+ }
+ /* Only validate the userq whether resident in the VM mapping range */
+ if (user_addr >= va_map->start &&
+ va_map->last - user_addr + 1 >= size) {
+ amdgpu_userq_buffer_va_list_add(queue, va_map, user_addr);
+ amdgpu_bo_unreserve(vm->root.bo);
+ return 0;
+ }
+
+ r = -EINVAL;
+out_err:
+ amdgpu_bo_unreserve(vm->root.bo);
+ return r;
+}
+
+static bool amdgpu_userq_buffer_va_mapped(struct amdgpu_vm *vm, u64 addr)
+{
+ struct amdgpu_bo_va_mapping *mapping;
+ bool r;
+
+ if (amdgpu_bo_reserve(vm->root.bo, false))
+ return false;
+
+ mapping = amdgpu_vm_bo_lookup_mapping(vm, addr);
+ if (!IS_ERR_OR_NULL(mapping) && atomic_read(&mapping->bo_va->userq_va_mapped))
+ r = true;
+ else
+ r = false;
+ amdgpu_bo_unreserve(vm->root.bo);
+
+ return r;
+}
+
+static bool amdgpu_userq_buffer_vas_mapped(struct amdgpu_usermode_queue *queue)
+{
+ struct amdgpu_userq_va_cursor *va_cursor, *tmp;
+ int r = 0;
+
+ list_for_each_entry_safe(va_cursor, tmp, &queue->userq_va_list, list) {
+ r += amdgpu_userq_buffer_va_mapped(queue->vm, va_cursor->gpu_addr);
+ dev_dbg(queue->userq_mgr->adev->dev,
+ "validate the userq mapping:%p va:%llx r:%d\n",
+ queue, va_cursor->gpu_addr, r);
+ }
+
+ if (r != 0)
+ return true;
+
+ return false;
+}
+
+static void amdgpu_userq_buffer_va_list_del(struct amdgpu_bo_va_mapping *mapping,
+ struct amdgpu_userq_va_cursor *va_cursor)
+{
+ atomic_set(&mapping->bo_va->userq_va_mapped, 0);
+ list_del(&va_cursor->list);
+ kfree(va_cursor);
+}
+
+static int amdgpu_userq_buffer_vas_list_cleanup(struct amdgpu_device *adev,
+ struct amdgpu_usermode_queue *queue)
+{
+ struct amdgpu_userq_va_cursor *va_cursor, *tmp;
+ struct amdgpu_bo_va_mapping *mapping;
+ int r;
+
+ r = amdgpu_bo_reserve(queue->vm->root.bo, false);
+ if (r)
+ return r;
+
+ list_for_each_entry_safe(va_cursor, tmp, &queue->userq_va_list, list) {
+ mapping = amdgpu_vm_bo_lookup_mapping(queue->vm, va_cursor->gpu_addr);
+ if (!mapping) {
+ r = -EINVAL;
+ goto err;
+ }
+ dev_dbg(adev->dev, "delete the userq:%p va:%llx\n",
+ queue, va_cursor->gpu_addr);
+ amdgpu_userq_buffer_va_list_del(mapping, va_cursor);
+ }
+err:
+ amdgpu_bo_unreserve(queue->vm->root.bo);
+ return r;
+}
+
+static int
+amdgpu_userq_preempt_helper(struct amdgpu_userq_mgr *uq_mgr,
struct amdgpu_usermode_queue *queue)
{
struct amdgpu_device *adev = uq_mgr->adev;
const struct amdgpu_userq_funcs *userq_funcs =
adev->userq_funcs[queue->queue_type];
+ bool found_hung_queue = false;
int r = 0;
if (queue->state == AMDGPU_USERQ_STATE_MAPPED) {
+ r = userq_funcs->preempt(uq_mgr, queue);
+ if (r) {
+ queue->state = AMDGPU_USERQ_STATE_HUNG;
+ found_hung_queue = true;
+ } else {
+ queue->state = AMDGPU_USERQ_STATE_PREEMPTED;
+ }
+ }
+
+ if (found_hung_queue)
+ amdgpu_userq_detect_and_reset_queues(uq_mgr);
+
+ return r;
+}
+
+static int
+amdgpu_userq_restore_helper(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ const struct amdgpu_userq_funcs *userq_funcs =
+ adev->userq_funcs[queue->queue_type];
+ int r = 0;
+
+ if (queue->state == AMDGPU_USERQ_STATE_PREEMPTED) {
+ r = userq_funcs->restore(uq_mgr, queue);
+ if (r) {
+ queue->state = AMDGPU_USERQ_STATE_HUNG;
+ } else {
+ queue->state = AMDGPU_USERQ_STATE_MAPPED;
+ }
+ }
+
+ return r;
+}
+
+static int
+amdgpu_userq_unmap_helper(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ const struct amdgpu_userq_funcs *userq_funcs =
+ adev->userq_funcs[queue->queue_type];
+ bool found_hung_queue = false;
+ int r = 0;
+
+ if ((queue->state == AMDGPU_USERQ_STATE_MAPPED) ||
+ (queue->state == AMDGPU_USERQ_STATE_PREEMPTED)) {
r = userq_funcs->unmap(uq_mgr, queue);
- if (r)
+ if (r) {
queue->state = AMDGPU_USERQ_STATE_HUNG;
- else
+ found_hung_queue = true;
+ } else {
queue->state = AMDGPU_USERQ_STATE_UNMAPPED;
+ }
}
+
+ if (found_hung_queue)
+ amdgpu_userq_detect_and_reset_queues(uq_mgr);
+
return r;
}
@@ -76,26 +358,33 @@ amdgpu_userq_map_helper(struct amdgpu_userq_mgr *uq_mgr,
r = userq_funcs->map(uq_mgr, queue);
if (r) {
queue->state = AMDGPU_USERQ_STATE_HUNG;
+ amdgpu_userq_detect_and_reset_queues(uq_mgr);
} else {
queue->state = AMDGPU_USERQ_STATE_MAPPED;
}
}
+
return r;
}
-static void
+static int
amdgpu_userq_wait_for_last_fence(struct amdgpu_userq_mgr *uq_mgr,
struct amdgpu_usermode_queue *queue)
{
struct dma_fence *f = queue->last_fence;
- int ret;
+ int ret = 0;
if (f && !dma_fence_is_signaled(f)) {
- ret = dma_fence_wait_timeout(f, true, msecs_to_jiffies(100));
- if (ret <= 0)
+ ret = dma_fence_wait_timeout(f, true, MAX_SCHEDULE_TIMEOUT);
+ if (ret <= 0) {
drm_file_err(uq_mgr->file, "Timed out waiting for fence=%llu:%llu\n",
f->context, f->seqno);
+ queue->state = AMDGPU_USERQ_STATE_HUNG;
+ return -ETIME;
+ }
}
+
+ return ret;
}
static void
@@ -106,32 +395,27 @@ amdgpu_userq_cleanup(struct amdgpu_userq_mgr *uq_mgr,
struct amdgpu_device *adev = uq_mgr->adev;
const struct amdgpu_userq_funcs *uq_funcs = adev->userq_funcs[queue->queue_type];
+ /* Wait for mode-1 reset to complete */
+ down_read(&adev->reset_domain->sem);
+
+ /* Drop the userq reference. */
+ amdgpu_userq_buffer_vas_list_cleanup(adev, queue);
uq_funcs->mqd_destroy(uq_mgr, queue);
amdgpu_userq_fence_driver_free(queue);
- idr_remove(&uq_mgr->userq_idr, queue_id);
+ /* Use interrupt-safe locking since IRQ handlers may access these XArrays */
+ xa_erase_irq(&uq_mgr->userq_mgr_xa, (unsigned long)queue_id);
+ xa_erase_irq(&adev->userq_doorbell_xa, queue->doorbell_index);
+ queue->userq_mgr = NULL;
+ list_del(&queue->userq_va_list);
kfree(queue);
-}
-int
-amdgpu_userq_active(struct amdgpu_userq_mgr *uq_mgr)
-{
- struct amdgpu_usermode_queue *queue;
- int queue_id;
- int ret = 0;
-
- mutex_lock(&uq_mgr->userq_mutex);
- /* Resume all the queues for this process */
- idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id)
- ret += queue->state == AMDGPU_USERQ_STATE_MAPPED;
-
- mutex_unlock(&uq_mgr->userq_mutex);
- return ret;
+ up_read(&adev->reset_domain->sem);
}
static struct amdgpu_usermode_queue *
amdgpu_userq_find(struct amdgpu_userq_mgr *uq_mgr, int qid)
{
- return idr_find(&uq_mgr->userq_idr, qid);
+ return xa_load(&uq_mgr->userq_mgr_xa, qid);
}
void
@@ -259,17 +543,6 @@ amdgpu_userq_get_doorbell_index(struct amdgpu_userq_mgr *uq_mgr,
case AMDGPU_HW_IP_DMA:
db_size = sizeof(u64);
break;
-
- case AMDGPU_HW_IP_VCN_ENC:
- db_size = sizeof(u32);
- db_info->doorbell_offset += AMDGPU_NAVI10_DOORBELL64_VCN0_1 << 1;
- break;
-
- case AMDGPU_HW_IP_VPE:
- db_size = sizeof(u32);
- db_info->doorbell_offset += AMDGPU_NAVI10_DOORBELL64_VPE << 1;
- break;
-
default:
drm_file_err(uq_mgr->file, "[Usermode queues] IP %d not support\n",
db_info->queue_type);
@@ -318,11 +591,20 @@ amdgpu_userq_destroy(struct drm_file *filp, int queue_id)
amdgpu_bo_unreserve(queue->db_obj.obj);
}
amdgpu_bo_unref(&queue->db_obj.obj);
+ atomic_dec(&uq_mgr->userq_count[queue->queue_type]);
+#if defined(CONFIG_DEBUG_FS)
+ debugfs_remove_recursive(queue->debugfs_queue);
+#endif
+ amdgpu_userq_detect_and_reset_queues(uq_mgr);
r = amdgpu_userq_unmap_helper(uq_mgr, queue);
+ /*TODO: It requires a reset for userq hw unmap error*/
+ if (unlikely(r != AMDGPU_USERQ_STATE_UNMAPPED)) {
+ drm_warn(adev_to_drm(uq_mgr->adev), "trying to destroy a HW mapping userq\n");
+ queue->state = AMDGPU_USERQ_STATE_HUNG;
+ }
amdgpu_userq_cleanup(uq_mgr, queue, queue_id);
mutex_unlock(&uq_mgr->userq_mutex);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
@@ -343,6 +625,46 @@ static int amdgpu_userq_priority_permit(struct drm_file *filp,
return -EACCES;
}
+#if defined(CONFIG_DEBUG_FS)
+static int amdgpu_mqd_info_read(struct seq_file *m, void *unused)
+{
+ struct amdgpu_usermode_queue *queue = m->private;
+ struct amdgpu_bo *bo;
+ int r;
+
+ if (!queue || !queue->mqd.obj)
+ return -EINVAL;
+
+ bo = amdgpu_bo_ref(queue->mqd.obj);
+ r = amdgpu_bo_reserve(bo, true);
+ if (r) {
+ amdgpu_bo_unref(&bo);
+ return -EINVAL;
+ }
+
+ seq_printf(m, "queue_type: %d\n", queue->queue_type);
+ seq_printf(m, "mqd_gpu_address: 0x%llx\n", amdgpu_bo_gpu_offset(queue->mqd.obj));
+
+ amdgpu_bo_unreserve(bo);
+ amdgpu_bo_unref(&bo);
+
+ return 0;
+}
+
+static int amdgpu_mqd_info_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, amdgpu_mqd_info_read, inode->i_private);
+}
+
+static const struct file_operations amdgpu_mqd_info_fops = {
+ .owner = THIS_MODULE,
+ .open = amdgpu_mqd_info_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+#endif
+
static int
amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
{
@@ -352,34 +674,19 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
const struct amdgpu_userq_funcs *uq_funcs;
struct amdgpu_usermode_queue *queue;
struct amdgpu_db_info db_info;
+ char *queue_name;
bool skip_map_queue;
+ u32 qid;
uint64_t index;
- int qid, r = 0;
+ int r = 0;
int priority =
(args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK) >>
AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_SHIFT;
- /* Usermode queues are only supported for GFX IP as of now */
- if (args->in.ip_type != AMDGPU_HW_IP_GFX &&
- args->in.ip_type != AMDGPU_HW_IP_DMA &&
- args->in.ip_type != AMDGPU_HW_IP_COMPUTE) {
- drm_file_err(uq_mgr->file, "Usermode queue doesn't support IP type %u\n",
- args->in.ip_type);
- return -EINVAL;
- }
-
r = amdgpu_userq_priority_permit(filp, priority);
if (r)
return r;
- if ((args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE) &&
- (args->in.ip_type != AMDGPU_HW_IP_GFX) &&
- (args->in.ip_type != AMDGPU_HW_IP_COMPUTE) &&
- !amdgpu_is_tmz(adev)) {
- drm_file_err(uq_mgr->file, "Secure only supported on GFX/Compute queues\n");
- return -EINVAL;
- }
-
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) {
drm_file_err(uq_mgr->file, "pm_runtime_get_sync() failed for userqueue create\n");
@@ -394,7 +701,6 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
*
* This will also make sure we have a valid eviction fence ready to be used.
*/
- mutex_lock(&adev->userq_mutex);
amdgpu_userq_ensure_ev_fence(&fpriv->userq_mgr, &fpriv->evf_mgr);
uq_funcs = adev->userq_funcs[args->in.ip_type];
@@ -411,6 +717,8 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
r = -ENOMEM;
goto unlock;
}
+
+ INIT_LIST_HEAD(&queue->userq_va_list);
queue->doorbell_handle = args->in.doorbell_handle;
queue->queue_type = args->in.ip_type;
queue->vm = &fpriv->vm;
@@ -421,11 +729,21 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
db_info.db_obj = &queue->db_obj;
db_info.doorbell_offset = args->in.doorbell_offset;
+ /* Validate the userq virtual address.*/
+ if (amdgpu_userq_input_va_validate(queue, args->in.queue_va, args->in.queue_size) ||
+ amdgpu_userq_input_va_validate(queue, args->in.rptr_va, AMDGPU_GPU_PAGE_SIZE) ||
+ amdgpu_userq_input_va_validate(queue, args->in.wptr_va, AMDGPU_GPU_PAGE_SIZE)) {
+ r = -EINVAL;
+ kfree(queue);
+ goto unlock;
+ }
+
/* Convert relative doorbell offset into absolute doorbell index */
index = amdgpu_userq_get_doorbell_index(uq_mgr, &db_info, filp);
if (index == (uint64_t)-EINVAL) {
drm_file_err(uq_mgr->file, "Failed to get doorbell for queue\n");
kfree(queue);
+ r = -EINVAL;
goto unlock;
}
@@ -445,16 +763,27 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
goto unlock;
}
+ /* Wait for mode-1 reset to complete */
+ down_read(&adev->reset_domain->sem);
+ r = xa_err(xa_store_irq(&adev->userq_doorbell_xa, index, queue, GFP_KERNEL));
+ if (r) {
+ kfree(queue);
+ up_read(&adev->reset_domain->sem);
+ goto unlock;
+ }
- qid = idr_alloc(&uq_mgr->userq_idr, queue, 1, AMDGPU_MAX_USERQ_COUNT, GFP_KERNEL);
- if (qid < 0) {
+ r = xa_alloc(&uq_mgr->userq_mgr_xa, &qid, queue, XA_LIMIT(1, AMDGPU_MAX_USERQ_COUNT), GFP_KERNEL);
+ if (r) {
drm_file_err(uq_mgr->file, "Failed to allocate a queue id\n");
amdgpu_userq_fence_driver_free(queue);
uq_funcs->mqd_destroy(uq_mgr, queue);
kfree(queue);
r = -ENOMEM;
+ up_read(&adev->reset_domain->sem);
goto unlock;
}
+ up_read(&adev->reset_domain->sem);
+ queue->userq_mgr = uq_mgr;
/* don't map the queue if scheduling is halted */
if (adev->userq_halt_for_enforce_isolation &&
@@ -467,7 +796,7 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
r = amdgpu_userq_map_helper(uq_mgr, queue);
if (r) {
drm_file_err(uq_mgr->file, "Failed to map Queue\n");
- idr_remove(&uq_mgr->userq_idr, qid);
+ xa_erase(&uq_mgr->userq_mgr_xa, qid);
amdgpu_userq_fence_driver_free(queue);
uq_funcs->mqd_destroy(uq_mgr, queue);
kfree(queue);
@@ -475,32 +804,67 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
}
}
+ queue_name = kasprintf(GFP_KERNEL, "queue-%d", qid);
+ if (!queue_name) {
+ r = -ENOMEM;
+ goto unlock;
+ }
+
+#if defined(CONFIG_DEBUG_FS)
+ /* Queue dentry per client to hold MQD information */
+ queue->debugfs_queue = debugfs_create_dir(queue_name, filp->debugfs_client);
+ debugfs_create_file("mqd_info", 0444, queue->debugfs_queue, queue, &amdgpu_mqd_info_fops);
+#endif
+ kfree(queue_name);
args->out.queue_id = qid;
+ atomic_inc(&uq_mgr->userq_count[queue->queue_type]);
unlock:
mutex_unlock(&uq_mgr->userq_mutex);
- mutex_unlock(&adev->userq_mutex);
return r;
}
-int amdgpu_userq_ioctl(struct drm_device *dev, void *data,
- struct drm_file *filp)
+static int amdgpu_userq_input_args_validate(struct drm_device *dev,
+ union drm_amdgpu_userq *args,
+ struct drm_file *filp)
{
- union drm_amdgpu_userq *args = data;
- int r;
+ struct amdgpu_device *adev = drm_to_adev(dev);
switch (args->in.op) {
case AMDGPU_USERQ_OP_CREATE:
if (args->in.flags & ~(AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK |
AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE))
return -EINVAL;
- r = amdgpu_userq_create(filp, args);
- if (r)
- drm_file_err(filp, "Failed to create usermode queue\n");
- break;
+ /* Usermode queues are only supported for GFX IP as of now */
+ if (args->in.ip_type != AMDGPU_HW_IP_GFX &&
+ args->in.ip_type != AMDGPU_HW_IP_DMA &&
+ args->in.ip_type != AMDGPU_HW_IP_COMPUTE) {
+ drm_file_err(filp, "Usermode queue doesn't support IP type %u\n",
+ args->in.ip_type);
+ return -EINVAL;
+ }
+
+ if ((args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE) &&
+ (args->in.ip_type != AMDGPU_HW_IP_GFX) &&
+ (args->in.ip_type != AMDGPU_HW_IP_COMPUTE) &&
+ !amdgpu_is_tmz(adev)) {
+ drm_file_err(filp, "Secure only supported on GFX/Compute queues\n");
+ return -EINVAL;
+ }
+ if (args->in.queue_va == AMDGPU_BO_INVALID_OFFSET ||
+ args->in.queue_va == 0 ||
+ args->in.queue_size == 0) {
+ drm_file_err(filp, "invalidate userq queue va or size\n");
+ return -EINVAL;
+ }
+ if (!args->in.wptr_va || !args->in.rptr_va) {
+ drm_file_err(filp, "invalidate userq queue rptr or wptr\n");
+ return -EINVAL;
+ }
+ break;
case AMDGPU_USERQ_OP_FREE:
if (args->in.ip_type ||
args->in.doorbell_handle ||
@@ -510,10 +874,34 @@ int amdgpu_userq_ioctl(struct drm_device *dev, void *data,
args->in.queue_size ||
args->in.rptr_va ||
args->in.wptr_va ||
- args->in.wptr_va ||
args->in.mqd ||
args->in.mqd_size)
return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int amdgpu_userq_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ union drm_amdgpu_userq *args = data;
+ int r;
+
+ if (amdgpu_userq_input_args_validate(dev, args, filp) < 0)
+ return -EINVAL;
+
+ switch (args->in.op) {
+ case AMDGPU_USERQ_OP_CREATE:
+ r = amdgpu_userq_create(filp, args);
+ if (r)
+ drm_file_err(filp, "Failed to create usermode queue\n");
+ break;
+
+ case AMDGPU_USERQ_OP_FREE:
r = amdgpu_userq_destroy(filp, args->in.queue_id);
if (r)
drm_file_err(filp, "Failed to destroy usermode queue\n");
@@ -531,12 +919,20 @@ static int
amdgpu_userq_restore_all(struct amdgpu_userq_mgr *uq_mgr)
{
struct amdgpu_usermode_queue *queue;
- int queue_id;
+ unsigned long queue_id;
int ret = 0, r;
/* Resume all the queues for this process */
- idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) {
- r = amdgpu_userq_map_helper(uq_mgr, queue);
+ xa_for_each(&uq_mgr->userq_mgr_xa, queue_id, queue) {
+
+ if (!amdgpu_userq_buffer_vas_mapped(queue)) {
+ drm_file_err(uq_mgr->file,
+ "trying restore queue without va mapping\n");
+ queue->state = AMDGPU_USERQ_STATE_INVALID_VA;
+ continue;
+ }
+
+ r = amdgpu_userq_restore_helper(uq_mgr, queue);
if (r)
ret = r;
}
@@ -546,108 +942,179 @@ amdgpu_userq_restore_all(struct amdgpu_userq_mgr *uq_mgr)
return ret;
}
+static int amdgpu_userq_validate_vm(void *param, struct amdgpu_bo *bo)
+{
+ struct ttm_operation_ctx ctx = { false, false };
+
+ amdgpu_bo_placement_from_domain(bo, bo->allowed_domains);
+ return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+}
+
+/* Handle all BOs on the invalidated list, validate them and update the PTs */
static int
-amdgpu_userq_validate_vm_bo(void *_unused, struct amdgpu_bo *bo)
+amdgpu_userq_bo_validate(struct amdgpu_device *adev, struct drm_exec *exec,
+ struct amdgpu_vm *vm)
{
struct ttm_operation_ctx ctx = { false, false };
+ struct amdgpu_bo_va *bo_va;
+ struct amdgpu_bo *bo;
int ret;
- amdgpu_bo_placement_from_domain(bo, bo->allowed_domains);
+ spin_lock(&vm->status_lock);
+ while (!list_empty(&vm->invalidated)) {
+ bo_va = list_first_entry(&vm->invalidated,
+ struct amdgpu_bo_va,
+ base.vm_status);
+ spin_unlock(&vm->status_lock);
- ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
- if (ret)
- DRM_ERROR("Fail to validate\n");
+ bo = bo_va->base.bo;
+ ret = drm_exec_prepare_obj(exec, &bo->tbo.base, 2);
+ if (unlikely(ret))
+ return ret;
- return ret;
+ amdgpu_bo_placement_from_domain(bo, bo->allowed_domains);
+ ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ if (ret)
+ return ret;
+
+ /* This moves the bo_va to the done list */
+ ret = amdgpu_vm_bo_update(adev, bo_va, false);
+ if (ret)
+ return ret;
+
+ spin_lock(&vm->status_lock);
+ }
+ spin_unlock(&vm->status_lock);
+
+ return 0;
}
+/* Make sure the whole VM is ready to be used */
static int
-amdgpu_userq_validate_bos(struct amdgpu_userq_mgr *uq_mgr)
+amdgpu_userq_vm_validate(struct amdgpu_userq_mgr *uq_mgr)
{
struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
- struct amdgpu_vm *vm = &fpriv->vm;
+ bool invalidated = false, new_addition = false;
+ struct ttm_operation_ctx ctx = { true, false };
struct amdgpu_device *adev = uq_mgr->adev;
+ struct amdgpu_hmm_range *range;
+ struct amdgpu_vm *vm = &fpriv->vm;
+ unsigned long key, tmp_key;
struct amdgpu_bo_va *bo_va;
- struct ww_acquire_ctx *ticket;
- struct drm_exec exec;
struct amdgpu_bo *bo;
- struct dma_resv *resv;
- bool clear, unlock;
- int ret = 0;
+ struct drm_exec exec;
+ struct xarray xa;
+ int ret;
+
+ xa_init(&xa);
+retry_lock:
drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0);
drm_exec_until_all_locked(&exec) {
- ret = amdgpu_vm_lock_pd(vm, &exec, 2);
+ ret = amdgpu_vm_lock_pd(vm, &exec, 1);
drm_exec_retry_on_contention(&exec);
- if (unlikely(ret)) {
- drm_file_err(uq_mgr->file, "Failed to lock PD\n");
+ if (unlikely(ret))
goto unlock_all;
- }
- /* Lock the done list */
- list_for_each_entry(bo_va, &vm->done, base.vm_status) {
- bo = bo_va->base.bo;
- if (!bo)
- continue;
+ ret = amdgpu_vm_lock_done_list(vm, &exec, 1);
+ drm_exec_retry_on_contention(&exec);
+ if (unlikely(ret))
+ goto unlock_all;
- ret = drm_exec_lock_obj(&exec, &bo->tbo.base);
- drm_exec_retry_on_contention(&exec);
- if (unlikely(ret))
- goto unlock_all;
- }
+ /* This validates PDs, PTs and per VM BOs */
+ ret = amdgpu_vm_validate(adev, vm, NULL,
+ amdgpu_userq_validate_vm,
+ NULL);
+ if (unlikely(ret))
+ goto unlock_all;
+
+ /* This locks and validates the remaining evicted BOs */
+ ret = amdgpu_userq_bo_validate(adev, &exec, vm);
+ drm_exec_retry_on_contention(&exec);
+ if (unlikely(ret))
+ goto unlock_all;
}
- spin_lock(&vm->status_lock);
- while (!list_empty(&vm->moved)) {
- bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va,
- base.vm_status);
- spin_unlock(&vm->status_lock);
+ if (invalidated) {
+ xa_for_each(&xa, tmp_key, range) {
+ bo = range->bo;
+ amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
+ ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ if (ret)
+ goto unlock_all;
- /* Per VM BOs never need to bo cleared in the page tables */
- ret = amdgpu_vm_bo_update(adev, bo_va, false);
- if (ret)
- goto unlock_all;
- spin_lock(&vm->status_lock);
+ amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, range);
+
+ amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
+ ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ if (ret)
+ goto unlock_all;
+ }
+ invalidated = false;
}
- ticket = &exec.ticket;
- while (!list_empty(&vm->invalidated)) {
- bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
- base.vm_status);
- resv = bo_va->base.bo->tbo.base.resv;
- spin_unlock(&vm->status_lock);
+ ret = amdgpu_vm_handle_moved(adev, vm, NULL);
+ if (ret)
+ goto unlock_all;
+ key = 0;
+ /* Validate User Ptr BOs */
+ list_for_each_entry(bo_va, &vm->done, base.vm_status) {
bo = bo_va->base.bo;
- ret = amdgpu_userq_validate_vm_bo(NULL, bo);
- if (ret) {
- drm_file_err(uq_mgr->file, "Failed to validate BO\n");
- goto unlock_all;
- }
+ if (!bo)
+ continue;
- /* Try to reserve the BO to avoid clearing its ptes */
- if (!adev->debug_vm && dma_resv_trylock(resv)) {
- clear = false;
- unlock = true;
- /* The caller is already holding the reservation lock */
- } else if (dma_resv_locking_ctx(resv) == ticket) {
- clear = false;
- unlock = false;
- /* Somebody else is using the BO right now */
- } else {
- clear = true;
- unlock = false;
+ if (!amdgpu_ttm_tt_is_userptr(bo->tbo.ttm))
+ continue;
+
+ range = xa_load(&xa, key);
+ if (range && range->bo != bo) {
+ xa_erase(&xa, key);
+ amdgpu_hmm_range_free(range);
+ range = NULL;
}
- ret = amdgpu_vm_bo_update(adev, bo_va, clear);
+ if (!range) {
+ range = amdgpu_hmm_range_alloc(bo);
+ if (!range) {
+ ret = -ENOMEM;
+ goto unlock_all;
+ }
+
+ xa_store(&xa, key, range, GFP_KERNEL);
+ new_addition = true;
+ }
+ key++;
+ }
- if (unlock)
- dma_resv_unlock(resv);
- if (ret)
- goto unlock_all;
+ if (new_addition) {
+ drm_exec_fini(&exec);
+ xa_for_each(&xa, tmp_key, range) {
+ if (!range)
+ continue;
+ bo = range->bo;
+ ret = amdgpu_ttm_tt_get_user_pages(bo, range);
+ if (ret)
+ goto unlock_all;
+ }
- spin_lock(&vm->status_lock);
+ invalidated = true;
+ new_addition = false;
+ goto retry_lock;
}
- spin_unlock(&vm->status_lock);
+
+ ret = amdgpu_vm_update_pdes(adev, vm, false);
+ if (ret)
+ goto unlock_all;
+
+ /*
+ * We need to wait for all VM updates to finish before restarting the
+ * queues. Using the done list like that is now ok since everything is
+ * locked in place.
+ */
+ list_for_each_entry(bo_va, &vm->done, base.vm_status)
+ dma_fence_wait(bo_va->last_pt_update, false);
+ dma_fence_wait(vm->last_update, false);
ret = amdgpu_eviction_fence_replace_fence(&fpriv->evf_mgr, &exec);
if (ret)
@@ -655,6 +1122,13 @@ amdgpu_userq_validate_bos(struct amdgpu_userq_mgr *uq_mgr)
unlock_all:
drm_exec_fini(&exec);
+ xa_for_each(&xa, tmp_key, range) {
+ if (!range)
+ continue;
+ bo = range->bo;
+ amdgpu_hmm_range_free(range);
+ }
+ xa_destroy(&xa);
return ret;
}
@@ -664,11 +1138,11 @@ static void amdgpu_userq_restore_worker(struct work_struct *work)
struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
int ret;
- flush_work(&fpriv->evf_mgr.suspend_work.work);
+ flush_delayed_work(&fpriv->evf_mgr.suspend_work);
mutex_lock(&uq_mgr->userq_mutex);
- ret = amdgpu_userq_validate_bos(uq_mgr);
+ ret = amdgpu_userq_vm_validate(uq_mgr);
if (ret) {
drm_file_err(uq_mgr->file, "Failed to validate BOs to restore\n");
goto unlock;
@@ -688,12 +1162,13 @@ static int
amdgpu_userq_evict_all(struct amdgpu_userq_mgr *uq_mgr)
{
struct amdgpu_usermode_queue *queue;
- int queue_id;
+ unsigned long queue_id;
int ret = 0, r;
+ amdgpu_userq_detect_and_reset_queues(uq_mgr);
/* Try to unmap all the queues in this process ctx */
- idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) {
- r = amdgpu_userq_unmap_helper(uq_mgr, queue);
+ xa_for_each(&uq_mgr->userq_mgr_xa, queue_id, queue) {
+ r = amdgpu_userq_preempt_helper(uq_mgr, queue);
if (r)
ret = r;
}
@@ -703,13 +1178,31 @@ amdgpu_userq_evict_all(struct amdgpu_userq_mgr *uq_mgr)
return ret;
}
+void amdgpu_userq_reset_work(struct work_struct *work)
+{
+ struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
+ userq_reset_work);
+ struct amdgpu_reset_context reset_context;
+
+ memset(&reset_context, 0, sizeof(reset_context));
+
+ reset_context.method = AMD_RESET_METHOD_NONE;
+ reset_context.reset_req_dev = adev;
+ reset_context.src = AMDGPU_RESET_SRC_USERQ;
+ set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
+ /*set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags);*/
+
+ amdgpu_device_gpu_recover(adev, NULL, &reset_context);
+}
+
static int
amdgpu_userq_wait_for_signal(struct amdgpu_userq_mgr *uq_mgr)
{
struct amdgpu_usermode_queue *queue;
- int queue_id, ret;
+ unsigned long queue_id;
+ int ret;
- idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) {
+ xa_for_each(&uq_mgr->userq_mgr_xa, queue_id, queue) {
struct dma_fence *f = queue->last_fence;
if (!f || dma_fence_is_signaled(f))
@@ -729,22 +1222,19 @@ void
amdgpu_userq_evict(struct amdgpu_userq_mgr *uq_mgr,
struct amdgpu_eviction_fence *ev_fence)
{
- int ret;
struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
struct amdgpu_eviction_fence_mgr *evf_mgr = &fpriv->evf_mgr;
+ struct amdgpu_device *adev = uq_mgr->adev;
+ int ret;
/* Wait for any pending userqueue fence work to finish */
ret = amdgpu_userq_wait_for_signal(uq_mgr);
- if (ret) {
- drm_file_err(uq_mgr->file, "Not evicting userqueue, timeout waiting for work\n");
- return;
- }
+ if (ret)
+ dev_err(adev->dev, "Not evicting userqueue, timeout waiting for work\n");
ret = amdgpu_userq_evict_all(uq_mgr);
- if (ret) {
- drm_file_err(uq_mgr->file, "Failed to evict userqueue\n");
- return;
- }
+ if (ret)
+ dev_err(adev->dev, "Failed to evict userqueue\n");
/* Signal current eviction fence */
amdgpu_eviction_fence_signal(evf_mgr, ev_fence);
@@ -762,44 +1252,31 @@ int amdgpu_userq_mgr_init(struct amdgpu_userq_mgr *userq_mgr, struct drm_file *f
struct amdgpu_device *adev)
{
mutex_init(&userq_mgr->userq_mutex);
- idr_init_base(&userq_mgr->userq_idr, 1);
+ xa_init_flags(&userq_mgr->userq_mgr_xa, XA_FLAGS_ALLOC);
userq_mgr->adev = adev;
userq_mgr->file = file_priv;
- mutex_lock(&adev->userq_mutex);
- list_add(&userq_mgr->list, &adev->userq_mgr_list);
- mutex_unlock(&adev->userq_mutex);
-
INIT_DELAYED_WORK(&userq_mgr->resume_work, amdgpu_userq_restore_worker);
return 0;
}
void amdgpu_userq_mgr_fini(struct amdgpu_userq_mgr *userq_mgr)
{
- struct amdgpu_device *adev = userq_mgr->adev;
struct amdgpu_usermode_queue *queue;
- struct amdgpu_userq_mgr *uqm, *tmp;
- uint32_t queue_id;
+ unsigned long queue_id;
cancel_delayed_work_sync(&userq_mgr->resume_work);
- mutex_lock(&adev->userq_mutex);
mutex_lock(&userq_mgr->userq_mutex);
- idr_for_each_entry(&userq_mgr->userq_idr, queue, queue_id) {
+ amdgpu_userq_detect_and_reset_queues(userq_mgr);
+ xa_for_each(&userq_mgr->userq_mgr_xa, queue_id, queue) {
amdgpu_userq_wait_for_last_fence(userq_mgr, queue);
amdgpu_userq_unmap_helper(userq_mgr, queue);
amdgpu_userq_cleanup(userq_mgr, queue, queue_id);
}
- list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
- if (uqm == userq_mgr) {
- list_del(&uqm->list);
- break;
- }
- }
- idr_destroy(&userq_mgr->userq_idr);
+ xa_destroy(&userq_mgr->userq_mgr_xa);
mutex_unlock(&userq_mgr->userq_mutex);
- mutex_unlock(&adev->userq_mutex);
mutex_destroy(&userq_mgr->userq_mutex);
}
@@ -807,51 +1284,51 @@ int amdgpu_userq_suspend(struct amdgpu_device *adev)
{
u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
struct amdgpu_usermode_queue *queue;
- struct amdgpu_userq_mgr *uqm, *tmp;
- int queue_id;
- int ret = 0, r;
+ struct amdgpu_userq_mgr *uqm;
+ unsigned long queue_id;
+ int r;
if (!ip_mask)
return 0;
- mutex_lock(&adev->userq_mutex);
- list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
+ xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
+ uqm = queue->userq_mgr;
cancel_delayed_work_sync(&uqm->resume_work);
- mutex_lock(&uqm->userq_mutex);
- idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
+ guard(mutex)(&uqm->userq_mutex);
+ amdgpu_userq_detect_and_reset_queues(uqm);
+ if (adev->in_s0ix)
+ r = amdgpu_userq_preempt_helper(uqm, queue);
+ else
r = amdgpu_userq_unmap_helper(uqm, queue);
- if (r)
- ret = r;
- }
- mutex_unlock(&uqm->userq_mutex);
+ if (r)
+ return r;
}
- mutex_unlock(&adev->userq_mutex);
- return ret;
+ return 0;
}
int amdgpu_userq_resume(struct amdgpu_device *adev)
{
u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
struct amdgpu_usermode_queue *queue;
- struct amdgpu_userq_mgr *uqm, *tmp;
- int queue_id;
- int ret = 0, r;
+ struct amdgpu_userq_mgr *uqm;
+ unsigned long queue_id;
+ int r;
if (!ip_mask)
return 0;
- mutex_lock(&adev->userq_mutex);
- list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
- mutex_lock(&uqm->userq_mutex);
- idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
+ xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
+ uqm = queue->userq_mgr;
+ guard(mutex)(&uqm->userq_mutex);
+ if (adev->in_s0ix)
+ r = amdgpu_userq_restore_helper(uqm, queue);
+ else
r = amdgpu_userq_map_helper(uqm, queue);
- if (r)
- ret = r;
- }
- mutex_unlock(&uqm->userq_mutex);
+ if (r)
+ return r;
}
- mutex_unlock(&adev->userq_mutex);
- return ret;
+
+ return 0;
}
int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev,
@@ -859,33 +1336,32 @@ int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev,
{
u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
struct amdgpu_usermode_queue *queue;
- struct amdgpu_userq_mgr *uqm, *tmp;
- int queue_id;
+ struct amdgpu_userq_mgr *uqm;
+ unsigned long queue_id;
int ret = 0, r;
/* only need to stop gfx/compute */
if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE))))
return 0;
- mutex_lock(&adev->userq_mutex);
if (adev->userq_halt_for_enforce_isolation)
dev_warn(adev->dev, "userq scheduling already stopped!\n");
adev->userq_halt_for_enforce_isolation = true;
- list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
+ xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
+ uqm = queue->userq_mgr;
cancel_delayed_work_sync(&uqm->resume_work);
mutex_lock(&uqm->userq_mutex);
- idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
- if (((queue->queue_type == AMDGPU_HW_IP_GFX) ||
- (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) &&
- (queue->xcp_id == idx)) {
- r = amdgpu_userq_unmap_helper(uqm, queue);
- if (r)
- ret = r;
- }
+ if (((queue->queue_type == AMDGPU_HW_IP_GFX) ||
+ (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) &&
+ (queue->xcp_id == idx)) {
+ amdgpu_userq_detect_and_reset_queues(uqm);
+ r = amdgpu_userq_preempt_helper(uqm, queue);
+ if (r)
+ ret = r;
}
mutex_unlock(&uqm->userq_mutex);
}
- mutex_unlock(&adev->userq_mutex);
+
return ret;
}
@@ -894,31 +1370,113 @@ int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev,
{
u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
struct amdgpu_usermode_queue *queue;
- struct amdgpu_userq_mgr *uqm, *tmp;
- int queue_id;
+ struct amdgpu_userq_mgr *uqm;
+ unsigned long queue_id;
int ret = 0, r;
/* only need to stop gfx/compute */
if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE))))
return 0;
- mutex_lock(&adev->userq_mutex);
if (!adev->userq_halt_for_enforce_isolation)
dev_warn(adev->dev, "userq scheduling already started!\n");
adev->userq_halt_for_enforce_isolation = false;
- list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
+ xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
+ uqm = queue->userq_mgr;
mutex_lock(&uqm->userq_mutex);
- idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
if (((queue->queue_type == AMDGPU_HW_IP_GFX) ||
(queue->queue_type == AMDGPU_HW_IP_COMPUTE)) &&
(queue->xcp_id == idx)) {
- r = amdgpu_userq_map_helper(uqm, queue);
+ r = amdgpu_userq_restore_helper(uqm, queue);
if (r)
ret = r;
}
- }
mutex_unlock(&uqm->userq_mutex);
}
- mutex_unlock(&adev->userq_mutex);
+
return ret;
}
+
+int amdgpu_userq_gem_va_unmap_validate(struct amdgpu_device *adev,
+ struct amdgpu_bo_va_mapping *mapping,
+ uint64_t saddr)
+{
+ u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
+ struct amdgpu_bo_va *bo_va = mapping->bo_va;
+ struct dma_resv *resv = bo_va->base.bo->tbo.base.resv;
+ int ret = 0;
+
+ if (!ip_mask)
+ return 0;
+
+ dev_warn_once(adev->dev, "now unmapping a vital queue va:%llx\n", saddr);
+ /**
+ * The userq VA mapping reservation should include the eviction fence,
+ * if the eviction fence can't signal successfully during unmapping,
+ * then driver will warn to flag this improper unmap of the userq VA.
+ * Note: The eviction fence may be attached to different BOs, and this
+ * unmap is only for one kind of userq VAs, so at this point suppose
+ * the eviction fence is always unsignaled.
+ */
+ if (!dma_resv_test_signaled(resv, DMA_RESV_USAGE_BOOKKEEP)) {
+ ret = dma_resv_wait_timeout(resv, DMA_RESV_USAGE_BOOKKEEP, true,
+ MAX_SCHEDULE_TIMEOUT);
+ if (ret <= 0)
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+void amdgpu_userq_pre_reset(struct amdgpu_device *adev)
+{
+ const struct amdgpu_userq_funcs *userq_funcs;
+ struct amdgpu_usermode_queue *queue;
+ struct amdgpu_userq_mgr *uqm;
+ unsigned long queue_id;
+
+ xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
+ uqm = queue->userq_mgr;
+ cancel_delayed_work_sync(&uqm->resume_work);
+ if (queue->state == AMDGPU_USERQ_STATE_MAPPED) {
+ amdgpu_userq_wait_for_last_fence(uqm, queue);
+ userq_funcs = adev->userq_funcs[queue->queue_type];
+ userq_funcs->unmap(uqm, queue);
+ /* just mark all queues as hung at this point.
+ * if unmap succeeds, we could map again
+ * in amdgpu_userq_post_reset() if vram is not lost
+ */
+ queue->state = AMDGPU_USERQ_STATE_HUNG;
+ amdgpu_userq_fence_driver_force_completion(queue);
+ }
+ }
+}
+
+int amdgpu_userq_post_reset(struct amdgpu_device *adev, bool vram_lost)
+{
+ /* if any queue state is AMDGPU_USERQ_STATE_UNMAPPED
+ * at this point, we should be able to map it again
+ * and continue if vram is not lost.
+ */
+ struct amdgpu_userq_mgr *uqm;
+ struct amdgpu_usermode_queue *queue;
+ const struct amdgpu_userq_funcs *userq_funcs;
+ unsigned long queue_id;
+ int r = 0;
+
+ xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
+ uqm = queue->userq_mgr;
+ if (queue->state == AMDGPU_USERQ_STATE_HUNG && !vram_lost) {
+ userq_funcs = adev->userq_funcs[queue->queue_type];
+ /* Re-map queue */
+ r = userq_funcs->map(uqm, queue);
+ if (r) {
+ dev_err(adev->dev, "Failed to remap queue %ld\n", queue_id);
+ continue;
+ }
+ queue->state = AMDGPU_USERQ_STATE_MAPPED;
+ }
+ }
+
+ return r;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
index ec040c2fd6c9..c37444427a14 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
@@ -37,6 +37,7 @@ enum amdgpu_userq_state {
AMDGPU_USERQ_STATE_MAPPED,
AMDGPU_USERQ_STATE_PREEMPTED,
AMDGPU_USERQ_STATE_HUNG,
+ AMDGPU_USERQ_STATE_INVALID_VA,
};
struct amdgpu_mqd_prop;
@@ -47,6 +48,11 @@ struct amdgpu_userq_obj {
struct amdgpu_bo *obj;
};
+struct amdgpu_userq_va_cursor {
+ u64 gpu_addr;
+ struct list_head list;
+};
+
struct amdgpu_usermode_queue {
int queue_type;
enum amdgpu_userq_state state;
@@ -65,6 +71,9 @@ struct amdgpu_usermode_queue {
struct dma_fence *last_fence;
u32 xcp_id;
int priority;
+ struct dentry *debugfs_queue;
+
+ struct list_head userq_va_list;
};
struct amdgpu_userq_funcs {
@@ -77,16 +86,27 @@ struct amdgpu_userq_funcs {
struct amdgpu_usermode_queue *queue);
int (*map)(struct amdgpu_userq_mgr *uq_mgr,
struct amdgpu_usermode_queue *queue);
+ int (*preempt)(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue);
+ int (*restore)(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue);
+ int (*detect_and_reset)(struct amdgpu_device *adev,
+ int queue_type);
};
/* Usermode queues for gfx */
struct amdgpu_userq_mgr {
- struct idr userq_idr;
+ /**
+ * @userq_mgr_xa: Per-process user queue map (queue ID → queue)
+ * Key: queue_id (unique ID within the process's userq manager)
+ * Value: struct amdgpu_usermode_queue
+ */
+ struct xarray userq_mgr_xa;
struct mutex userq_mutex;
struct amdgpu_device *adev;
struct delayed_work resume_work;
- struct list_head list;
struct drm_file *file;
+ atomic_t userq_count[AMDGPU_RING_TYPE_MAX];
};
struct amdgpu_db_info {
@@ -113,8 +133,6 @@ void amdgpu_userq_destroy_object(struct amdgpu_userq_mgr *uq_mgr,
void amdgpu_userq_evict(struct amdgpu_userq_mgr *uq_mgr,
struct amdgpu_eviction_fence *ev_fence);
-int amdgpu_userq_active(struct amdgpu_userq_mgr *uq_mgr);
-
void amdgpu_userq_ensure_ev_fence(struct amdgpu_userq_mgr *userq_mgr,
struct amdgpu_eviction_fence_mgr *evf_mgr);
@@ -131,5 +149,13 @@ int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev,
u32 idx);
int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev,
u32 idx);
-
+void amdgpu_userq_reset_work(struct work_struct *work);
+void amdgpu_userq_pre_reset(struct amdgpu_device *adev);
+int amdgpu_userq_post_reset(struct amdgpu_device *adev, bool vram_lost);
+
+int amdgpu_userq_input_va_validate(struct amdgpu_usermode_queue *queue,
+ u64 addr, u64 expected_size);
+int amdgpu_userq_gem_va_unmap_validate(struct amdgpu_device *adev,
+ struct amdgpu_bo_va_mapping *mapping,
+ uint64_t saddr);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
index a86616c6deef..eba9fb359047 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
@@ -67,6 +67,14 @@ static u64 amdgpu_userq_fence_read(struct amdgpu_userq_fence_driver *fence_drv)
return le64_to_cpu(*fence_drv->cpu_addr);
}
+static void
+amdgpu_userq_fence_write(struct amdgpu_userq_fence_driver *fence_drv,
+ u64 seq)
+{
+ if (fence_drv->cpu_addr)
+ *fence_drv->cpu_addr = cpu_to_le64(seq);
+}
+
int amdgpu_userq_fence_driver_alloc(struct amdgpu_device *adev,
struct amdgpu_usermode_queue *userq)
{
@@ -143,15 +151,16 @@ void amdgpu_userq_fence_driver_process(struct amdgpu_userq_fence_driver *fence_d
{
struct amdgpu_userq_fence *userq_fence, *tmp;
struct dma_fence *fence;
+ unsigned long flags;
u64 rptr;
int i;
if (!fence_drv)
return;
+ spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
rptr = amdgpu_userq_fence_read(fence_drv);
- spin_lock(&fence_drv->fence_list_lock);
list_for_each_entry_safe(userq_fence, tmp, &fence_drv->fences, link) {
fence = &userq_fence->base;
@@ -166,7 +175,7 @@ void amdgpu_userq_fence_driver_process(struct amdgpu_userq_fence_driver *fence_d
list_del(&userq_fence->link);
dma_fence_put(fence);
}
- spin_unlock(&fence_drv->fence_list_lock);
+ spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
}
void amdgpu_userq_fence_driver_destroy(struct kref *ref)
@@ -239,8 +248,8 @@ static int amdgpu_userq_fence_create(struct amdgpu_usermode_queue *userq,
fence = &userq_fence->base;
userq_fence->fence_drv = fence_drv;
- dma_fence_init(fence, &amdgpu_userq_fence_ops, &userq_fence->lock,
- fence_drv->context, seq);
+ dma_fence_init64(fence, &amdgpu_userq_fence_ops, &userq_fence->lock,
+ fence_drv->context, seq);
amdgpu_userq_fence_driver_get(fence_drv);
dma_fence_get(fence);
@@ -276,7 +285,7 @@ static int amdgpu_userq_fence_create(struct amdgpu_usermode_queue *userq,
/* Check if hardware has already processed the job */
spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
- if (!dma_fence_is_signaled_locked(fence))
+ if (!dma_fence_is_signaled(fence))
list_add_tail(&userq_fence->link, &fence_drv->fences);
else
dma_fence_put(fence);
@@ -334,7 +343,6 @@ static void amdgpu_userq_fence_release(struct dma_fence *f)
}
static const struct dma_fence_ops amdgpu_userq_fence_ops = {
- .use_64bit_seqno = true,
.get_driver_name = amdgpu_userq_fence_get_driver_name,
.get_timeline_name = amdgpu_userq_fence_get_timeline_name,
.signaled = amdgpu_userq_fence_signaled,
@@ -379,6 +387,7 @@ static int amdgpu_userq_fence_read_wptr(struct amdgpu_usermode_queue *queue,
amdgpu_bo_unreserve(queue->vm->root.bo);
r = amdgpu_bo_reserve(bo, true);
if (r) {
+ amdgpu_bo_unref(&bo);
DRM_ERROR("Failed to reserve userqueue wptr bo");
return r;
}
@@ -409,6 +418,40 @@ static void amdgpu_userq_fence_cleanup(struct dma_fence *fence)
dma_fence_put(fence);
}
+static void
+amdgpu_userq_fence_driver_set_error(struct amdgpu_userq_fence *fence,
+ int error)
+{
+ struct amdgpu_userq_fence_driver *fence_drv = fence->fence_drv;
+ unsigned long flags;
+ struct dma_fence *f;
+
+ spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
+
+ f = rcu_dereference_protected(&fence->base,
+ lockdep_is_held(&fence_drv->fence_list_lock));
+ if (f && !dma_fence_is_signaled_locked(f))
+ dma_fence_set_error(f, error);
+ spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
+}
+
+void
+amdgpu_userq_fence_driver_force_completion(struct amdgpu_usermode_queue *userq)
+{
+ struct dma_fence *f = userq->last_fence;
+
+ if (f) {
+ struct amdgpu_userq_fence *fence = to_amdgpu_userq_fence(f);
+ struct amdgpu_userq_fence_driver *fence_drv = fence->fence_drv;
+ u64 wptr = fence->base.seqno;
+
+ amdgpu_userq_fence_driver_set_error(fence, -ECANCELED);
+ amdgpu_userq_fence_write(fence_drv, wptr);
+ amdgpu_userq_fence_driver_process(fence_drv);
+
+ }
+}
+
int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
@@ -496,7 +539,7 @@ int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data,
}
/* Retrieve the user queue */
- queue = idr_find(&userq_mgr->userq_idr, args->queue_id);
+ queue = xa_load(&userq_mgr->userq_mgr_xa, args->queue_id);
if (!queue) {
r = -ENOENT;
goto put_gobj_write;
@@ -858,7 +901,7 @@ int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data,
*/
num_fences = dma_fence_dedup_array(fences, num_fences);
- waitq = idr_find(&userq_mgr->userq_idr, wait_info->waitq_id);
+ waitq = xa_load(&userq_mgr->userq_mgr_xa, wait_info->waitq_id);
if (!waitq) {
r = -EINVAL;
goto free_fences;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h
index 97a125ab8a78..d76add2afc77 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h
@@ -67,6 +67,7 @@ int amdgpu_userq_fence_driver_alloc(struct amdgpu_device *adev,
struct amdgpu_usermode_queue *userq);
void amdgpu_userq_fence_driver_free(struct amdgpu_usermode_queue *userq);
void amdgpu_userq_fence_driver_process(struct amdgpu_userq_fence_driver *fence_drv);
+void amdgpu_userq_fence_driver_force_completion(struct amdgpu_usermode_queue *userq);
void amdgpu_userq_fence_driver_destroy(struct kref *ref);
int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_utils.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_utils.h
new file mode 100644
index 000000000000..1e40ca3b1584
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_utils.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef AMDGPU_UTILS_H_
+#define AMDGPU_UTILS_H_
+
+/* ---------- Generic 2‑bit capability attribute encoding ----------
+ * 00 INVALID, 01 RO, 10 WO, 11 RW
+ */
+enum amdgpu_cap_attr {
+ AMDGPU_CAP_ATTR_INVALID = 0,
+ AMDGPU_CAP_ATTR_RO = 1 << 0,
+ AMDGPU_CAP_ATTR_WO = 1 << 1,
+ AMDGPU_CAP_ATTR_RW = (AMDGPU_CAP_ATTR_RO | AMDGPU_CAP_ATTR_WO),
+};
+
+#define AMDGPU_CAP_ATTR_BITS 2
+#define AMDGPU_CAP_ATTR_MAX ((1U << AMDGPU_CAP_ATTR_BITS) - 1)
+
+/* Internal helper to build helpers for a given enum NAME */
+#define DECLARE_ATTR_CAP_CLASS_HELPERS(NAME) \
+enum { NAME##_BITMAP_BITS = NAME##_COUNT * AMDGPU_CAP_ATTR_BITS }; \
+struct NAME##_caps { \
+ DECLARE_BITMAP(bmap, NAME##_BITMAP_BITS); \
+}; \
+static inline unsigned int NAME##_ATTR_START(enum NAME##_cap_id cap) \
+{ return (unsigned int)cap * AMDGPU_CAP_ATTR_BITS; } \
+static inline void NAME##_attr_init(struct NAME##_caps *c) \
+{ if (c) bitmap_zero(c->bmap, NAME##_BITMAP_BITS); } \
+static inline int NAME##_attr_set(struct NAME##_caps *c, \
+ enum NAME##_cap_id cap, enum amdgpu_cap_attr attr) \
+{ \
+ if (!c) \
+ return -EINVAL; \
+ if (cap >= NAME##_COUNT) \
+ return -EINVAL; \
+ if ((unsigned int)attr > AMDGPU_CAP_ATTR_MAX) \
+ return -EINVAL; \
+ bitmap_write(c->bmap, (unsigned long)attr, \
+ NAME##_ATTR_START(cap), AMDGPU_CAP_ATTR_BITS); \
+ return 0; \
+} \
+static inline int NAME##_attr_get(const struct NAME##_caps *c, \
+ enum NAME##_cap_id cap, enum amdgpu_cap_attr *out) \
+{ \
+ unsigned long v; \
+ if (!c || !out) \
+ return -EINVAL; \
+ if (cap >= NAME##_COUNT) \
+ return -EINVAL; \
+ v = bitmap_read(c->bmap, NAME##_ATTR_START(cap), AMDGPU_CAP_ATTR_BITS); \
+ *out = (enum amdgpu_cap_attr)v; \
+ return 0; \
+} \
+static inline bool NAME##_cap_is_ro(const struct NAME##_caps *c, enum NAME##_cap_id id) \
+{ enum amdgpu_cap_attr a; return !NAME##_attr_get(c, id, &a) && a == AMDGPU_CAP_ATTR_RO; } \
+static inline bool NAME##_cap_is_wo(const struct NAME##_caps *c, enum NAME##_cap_id id) \
+{ enum amdgpu_cap_attr a; return !NAME##_attr_get(c, id, &a) && a == AMDGPU_CAP_ATTR_WO; } \
+static inline bool NAME##_cap_is_rw(const struct NAME##_caps *c, enum NAME##_cap_id id) \
+{ enum amdgpu_cap_attr a; return !NAME##_attr_get(c, id, &a) && a == AMDGPU_CAP_ATTR_RW; }
+
+/* Element expander for enum creation */
+#define _CAP_ENUM_ELEM(x) x,
+
+/* Public macro: declare enum + helpers from an X‑macro list */
+#define DECLARE_ATTR_CAP_CLASS(NAME, LIST_MACRO) \
+ enum NAME##_cap_id { LIST_MACRO(_CAP_ENUM_ELEM) NAME##_COUNT }; \
+ DECLARE_ATTR_CAP_CLASS_HELPERS(NAME)
+
+#endif /* AMDGPU_UTILS_H_ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 74758b5ffc6c..5c38f0d30c87 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -1136,7 +1136,8 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
r = amdgpu_job_alloc_with_ib(ring->adev, &adev->uvd.entity,
AMDGPU_FENCE_OWNER_UNDEFINED,
64, direct ? AMDGPU_IB_POOL_DIRECT :
- AMDGPU_IB_POOL_DELAYED, &job);
+ AMDGPU_IB_POOL_DELAYED, &job,
+ AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index b9060bcd4806..a7d8f1ce6ac2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -41,6 +41,9 @@
#define VCE_IDLE_TIMEOUT msecs_to_jiffies(1000)
/* Firmware Names */
+#ifdef CONFIG_DRM_AMDGPU_SI
+#define FIRMWARE_VCE_V1_0 "amdgpu/vce_1_0_0.bin"
+#endif
#ifdef CONFIG_DRM_AMDGPU_CIK
#define FIRMWARE_BONAIRE "amdgpu/bonaire_vce.bin"
#define FIRMWARE_KABINI "amdgpu/kabini_vce.bin"
@@ -61,6 +64,9 @@
#define FIRMWARE_VEGA12 "amdgpu/vega12_vce.bin"
#define FIRMWARE_VEGA20 "amdgpu/vega20_vce.bin"
+#ifdef CONFIG_DRM_AMDGPU_SI
+MODULE_FIRMWARE(FIRMWARE_VCE_V1_0);
+#endif
#ifdef CONFIG_DRM_AMDGPU_CIK
MODULE_FIRMWARE(FIRMWARE_BONAIRE);
MODULE_FIRMWARE(FIRMWARE_KABINI);
@@ -88,82 +94,93 @@ static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
bool direct, struct dma_fence **fence);
/**
- * amdgpu_vce_sw_init - allocate memory, load vce firmware
+ * amdgpu_vce_firmware_name() - determine the firmware file name for VCE
*
* @adev: amdgpu_device pointer
- * @size: size for the new BO
*
- * First step to get VCE online, allocate memory and load the firmware
+ * Each chip that has VCE IP may need a different firmware.
+ * This function returns the name of the VCE firmware file
+ * appropriate for the current chip.
*/
-int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
+static const char *amdgpu_vce_firmware_name(struct amdgpu_device *adev)
{
- const char *fw_name;
- const struct common_firmware_header *hdr;
- unsigned int ucode_version, version_major, version_minor, binary_id;
- int i, r;
-
switch (adev->asic_type) {
+#ifdef CONFIG_DRM_AMDGPU_SI
+ case CHIP_PITCAIRN:
+ case CHIP_TAHITI:
+ case CHIP_VERDE:
+ return FIRMWARE_VCE_V1_0;
+#endif
#ifdef CONFIG_DRM_AMDGPU_CIK
case CHIP_BONAIRE:
- fw_name = FIRMWARE_BONAIRE;
- break;
+ return FIRMWARE_BONAIRE;
case CHIP_KAVERI:
- fw_name = FIRMWARE_KAVERI;
- break;
+ return FIRMWARE_KAVERI;
case CHIP_KABINI:
- fw_name = FIRMWARE_KABINI;
- break;
+ return FIRMWARE_KABINI;
case CHIP_HAWAII:
- fw_name = FIRMWARE_HAWAII;
- break;
+ return FIRMWARE_HAWAII;
case CHIP_MULLINS:
- fw_name = FIRMWARE_MULLINS;
- break;
+ return FIRMWARE_MULLINS;
#endif
case CHIP_TONGA:
- fw_name = FIRMWARE_TONGA;
- break;
+ return FIRMWARE_TONGA;
case CHIP_CARRIZO:
- fw_name = FIRMWARE_CARRIZO;
- break;
+ return FIRMWARE_CARRIZO;
case CHIP_FIJI:
- fw_name = FIRMWARE_FIJI;
- break;
+ return FIRMWARE_FIJI;
case CHIP_STONEY:
- fw_name = FIRMWARE_STONEY;
- break;
+ return FIRMWARE_STONEY;
case CHIP_POLARIS10:
- fw_name = FIRMWARE_POLARIS10;
- break;
+ return FIRMWARE_POLARIS10;
case CHIP_POLARIS11:
- fw_name = FIRMWARE_POLARIS11;
- break;
+ return FIRMWARE_POLARIS11;
case CHIP_POLARIS12:
- fw_name = FIRMWARE_POLARIS12;
- break;
+ return FIRMWARE_POLARIS12;
case CHIP_VEGAM:
- fw_name = FIRMWARE_VEGAM;
- break;
+ return FIRMWARE_VEGAM;
case CHIP_VEGA10:
- fw_name = FIRMWARE_VEGA10;
- break;
+ return FIRMWARE_VEGA10;
case CHIP_VEGA12:
- fw_name = FIRMWARE_VEGA12;
- break;
+ return FIRMWARE_VEGA12;
case CHIP_VEGA20:
- fw_name = FIRMWARE_VEGA20;
- break;
+ return FIRMWARE_VEGA20;
default:
- return -EINVAL;
+ return NULL;
}
+}
+
+/**
+ * amdgpu_vce_early_init() - try to load VCE firmware
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Tries to load the VCE firmware.
+ *
+ * When not found, returns ENOENT so that the driver can
+ * still load and initialize the rest of the IP blocks.
+ * The GPU can function just fine without VCE, they will just
+ * not support video encoding.
+ */
+int amdgpu_vce_early_init(struct amdgpu_device *adev)
+{
+ const char *fw_name = amdgpu_vce_firmware_name(adev);
+ const struct common_firmware_header *hdr;
+ unsigned int ucode_version, version_major, version_minor, binary_id;
+ int r;
+
+ if (!fw_name)
+ return -ENOENT;
r = amdgpu_ucode_request(adev, &adev->vce.fw, AMDGPU_UCODE_REQUIRED, "%s", fw_name);
if (r) {
- dev_err(adev->dev, "amdgpu_vce: Can't validate firmware \"%s\"\n",
- fw_name);
+ dev_err(adev->dev,
+ "amdgpu_vce: Firmware \"%s\" not found or failed to validate (%d)\n",
+ fw_name, r);
+
amdgpu_ucode_release(&adev->vce.fw);
- return r;
+ return -ENOENT;
}
hdr = (const struct common_firmware_header *)adev->vce.fw->data;
@@ -172,11 +189,35 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
version_major = (ucode_version >> 20) & 0xfff;
version_minor = (ucode_version >> 8) & 0xfff;
binary_id = ucode_version & 0xff;
- DRM_INFO("Found VCE firmware Version: %d.%d Binary ID: %d\n",
+ dev_info(adev->dev, "Found VCE firmware Version: %d.%d Binary ID: %d\n",
version_major, version_minor, binary_id);
adev->vce.fw_version = ((version_major << 24) | (version_minor << 16) |
(binary_id << 8));
+ return 0;
+}
+
+/**
+ * amdgpu_vce_sw_init() - allocate memory for VCE BO
+ *
+ * @adev: amdgpu_device pointer
+ * @size: size for the new BO
+ *
+ * First step to get VCE online: allocate memory for VCE BO.
+ * The VCE firmware binary is copied into the VCE BO later,
+ * in amdgpu_vce_resume. The VCE executes its code from the
+ * VCE BO and also uses the space in this BO for its stack and data.
+ *
+ * Ideally this BO should be placed in VRAM for optimal performance,
+ * although technically it also runs from system RAM (albeit slowly).
+ */
+int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
+{
+ int i, r;
+
+ if (!adev->vce.fw)
+ return -ENOENT;
+
r = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT,
@@ -285,40 +326,23 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev)
*/
int amdgpu_vce_resume(struct amdgpu_device *adev)
{
- void *cpu_addr;
const struct common_firmware_header *hdr;
unsigned int offset;
- int r, idx;
+ int idx;
if (adev->vce.vcpu_bo == NULL)
return -EINVAL;
- r = amdgpu_bo_reserve(adev->vce.vcpu_bo, false);
- if (r) {
- dev_err(adev->dev, "(%d) failed to reserve VCE bo\n", r);
- return r;
- }
-
- r = amdgpu_bo_kmap(adev->vce.vcpu_bo, &cpu_addr);
- if (r) {
- amdgpu_bo_unreserve(adev->vce.vcpu_bo);
- dev_err(adev->dev, "(%d) VCE map failed\n", r);
- return r;
- }
-
hdr = (const struct common_firmware_header *)adev->vce.fw->data;
offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
- memcpy_toio(cpu_addr, adev->vce.fw->data + offset,
+ memset_io(adev->vce.cpu_addr, 0, amdgpu_bo_size(adev->vce.vcpu_bo));
+ memcpy_toio(adev->vce.cpu_addr, adev->vce.fw->data + offset,
adev->vce.fw->size - offset);
drm_dev_exit(idx);
}
- amdgpu_bo_kunmap(adev->vce.vcpu_bo);
-
- amdgpu_bo_unreserve(adev->vce.vcpu_bo);
-
return 0;
}
@@ -427,6 +451,24 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
}
/**
+ * amdgpu_vce_required_gart_pages() - gets number of GART pages required by VCE
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Returns how many GART pages we need before GTT for the VCE IP block.
+ * For VCE1, see vce_v1_0_ensure_vcpu_bo_32bit_addr for details.
+ * For VCE2+, this is not needed so return zero.
+ */
+u32 amdgpu_vce_required_gart_pages(struct amdgpu_device *adev)
+{
+ /* VCE IP block not added yet, so can't use amdgpu_ip_version */
+ if (adev->family == AMDGPU_FAMILY_SI)
+ return 512;
+
+ return 0;
+}
+
+/**
* amdgpu_vce_get_create_msg - generate a VCE create msg
*
* @ring: ring we should submit the msg to
@@ -449,7 +491,7 @@ static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
r = amdgpu_job_alloc_with_ib(ring->adev, &ring->adev->vce.entity,
AMDGPU_FENCE_OWNER_UNDEFINED,
ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
- &job);
+ &job, AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
if (r)
return r;
@@ -540,7 +582,8 @@ static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
AMDGPU_FENCE_OWNER_UNDEFINED,
ib_size_dw * 4,
direct ? AMDGPU_IB_POOL_DIRECT :
- AMDGPU_IB_POOL_DELAYED, &job);
+ AMDGPU_IB_POOL_DELAYED, &job,
+ AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
index 6e53f872d084..1c3464ce5037 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
@@ -51,14 +51,17 @@ struct amdgpu_vce {
struct drm_sched_entity entity;
uint32_t srbm_soft_reset;
unsigned num_rings;
+ uint32_t keyselect;
};
+int amdgpu_vce_early_init(struct amdgpu_device *adev);
int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size);
int amdgpu_vce_sw_fini(struct amdgpu_device *adev);
int amdgpu_vce_entity_init(struct amdgpu_device *adev, struct amdgpu_ring *ring);
int amdgpu_vce_suspend(struct amdgpu_device *adev);
int amdgpu_vce_resume(struct amdgpu_device *adev);
void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp);
+u32 amdgpu_vce_required_gart_pages(struct amdgpu_device *adev);
int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, struct amdgpu_job *job,
struct amdgpu_ib *ib);
int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index c8885c3d54b3..5e0786ea911b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -92,6 +92,7 @@ MODULE_FIRMWARE(FIRMWARE_VCN5_0_0);
MODULE_FIRMWARE(FIRMWARE_VCN5_0_1);
static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
+static void amdgpu_vcn_reg_dump_fini(struct amdgpu_device *adev);
int amdgpu_vcn_early_init(struct amdgpu_device *adev, int i)
{
@@ -134,6 +135,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev, int i)
mutex_init(&adev->vcn.inst[i].vcn1_jpeg1_workaround);
mutex_init(&adev->vcn.inst[i].vcn_pg_lock);
+ mutex_init(&adev->vcn.inst[i].engine_reset_mutex);
atomic_set(&adev->vcn.inst[i].total_submission_cnt, 0);
INIT_DELAYED_WORK(&adev->vcn.inst[i].idle_work, amdgpu_vcn_idle_work_handler);
atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0);
@@ -183,16 +185,16 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev, int i)
dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf;
vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf;
dev_info(adev->dev,
- "Found VCN firmware Version ENC: %u.%u DEC: %u VEP: %u Revision: %u\n",
- enc_major, enc_minor, dec_ver, vep, fw_rev);
+ "[VCN instance %d] Found VCN firmware Version ENC: %u.%u DEC: %u VEP: %u Revision: %u\n",
+ i, enc_major, enc_minor, dec_ver, vep, fw_rev);
} else {
unsigned int version_major, version_minor, family_id;
family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
- dev_info(adev->dev, "Found VCN firmware Version: %u.%u Family ID: %u\n",
- version_major, version_minor, family_id);
+ dev_info(adev->dev, "[VCN instance %d] Found VCN firmware Version: %u.%u Family ID: %u\n",
+ i, version_major, version_minor, family_id);
}
bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_CONTEXT_SIZE;
@@ -255,12 +257,12 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev, int i)
return 0;
}
-int amdgpu_vcn_sw_fini(struct amdgpu_device *adev, int i)
+void amdgpu_vcn_sw_fini(struct amdgpu_device *adev, int i)
{
int j;
if (adev->vcn.harvest_config & (1 << i))
- return 0;
+ return;
amdgpu_bo_free_kernel(
&adev->vcn.inst[i].dpg_sram_bo,
@@ -284,10 +286,12 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev, int i)
amdgpu_ucode_release(&adev->vcn.inst[0].fw);
adev->vcn.inst[i].fw = NULL;
}
+
+ if (adev->vcn.reg_list)
+ amdgpu_vcn_reg_dump_fini(adev);
+
mutex_destroy(&adev->vcn.inst[i].vcn_pg_lock);
mutex_destroy(&adev->vcn.inst[i].vcn1_jpeg1_workaround);
-
- return 0;
}
bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, enum vcn_ring_type type, uint32_t vcn_instance)
@@ -351,8 +355,6 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev, int i)
if (adev->vcn.harvest_config & (1 << i))
return 0;
- cancel_delayed_work_sync(&adev->vcn.inst[i].idle_work);
-
/* err_event_athub and dpc recovery will corrupt VCPU buffer, so we need to
* restore fw data and clear buffer in amdgpu_vcn_resume() */
if (in_ras_intr || adev->pcie_reset_ctx.in_link_reset)
@@ -404,6 +406,54 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev, int i)
return 0;
}
+void amdgpu_vcn_get_profile(struct amdgpu_device *adev)
+{
+ int r;
+
+ mutex_lock(&adev->vcn.workload_profile_mutex);
+
+ if (adev->vcn.workload_profile_active) {
+ mutex_unlock(&adev->vcn.workload_profile_mutex);
+ return;
+ }
+ r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
+ true);
+ if (r)
+ dev_warn(adev->dev,
+ "(%d) failed to enable video power profile mode\n", r);
+ else
+ adev->vcn.workload_profile_active = true;
+ mutex_unlock(&adev->vcn.workload_profile_mutex);
+}
+
+void amdgpu_vcn_put_profile(struct amdgpu_device *adev)
+{
+ bool pg = true;
+ int r, i;
+
+ mutex_lock(&adev->vcn.workload_profile_mutex);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.inst[i].cur_state != AMD_PG_STATE_GATE) {
+ pg = false;
+ break;
+ }
+ }
+
+ if (pg) {
+ r = amdgpu_dpm_switch_power_profile(
+ adev, PP_SMC_POWER_PROFILE_VIDEO, false);
+ if (r)
+ dev_warn(
+ adev->dev,
+ "(%d) failed to disable video power profile mode\n",
+ r);
+ else
+ adev->vcn.workload_profile_active = false;
+ }
+
+ mutex_unlock(&adev->vcn.workload_profile_mutex);
+}
+
static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
{
struct amdgpu_vcn_inst *vcn_inst =
@@ -411,7 +461,6 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
struct amdgpu_device *adev = vcn_inst->adev;
unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0};
unsigned int i = vcn_inst->inst, j;
- int r = 0;
if (adev->vcn.harvest_config & (1 << i))
return;
@@ -437,16 +486,11 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
fences += fence[i];
if (!fences && !atomic_read(&vcn_inst->total_submission_cnt)) {
+ mutex_lock(&vcn_inst->vcn_pg_lock);
vcn_inst->set_pg_state(vcn_inst, AMD_PG_STATE_GATE);
- mutex_lock(&adev->vcn.workload_profile_mutex);
- if (adev->vcn.workload_profile_active) {
- r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
- false);
- if (r)
- dev_warn(adev->dev, "(%d) failed to disable video power profile mode\n", r);
- adev->vcn.workload_profile_active = false;
- }
- mutex_unlock(&adev->vcn.workload_profile_mutex);
+ mutex_unlock(&vcn_inst->vcn_pg_lock);
+ amdgpu_vcn_put_profile(adev);
+
} else {
schedule_delayed_work(&vcn_inst->idle_work, VCN_IDLE_TIMEOUT);
}
@@ -456,30 +500,11 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_vcn_inst *vcn_inst = &adev->vcn.inst[ring->me];
- int r = 0;
atomic_inc(&vcn_inst->total_submission_cnt);
cancel_delayed_work_sync(&vcn_inst->idle_work);
- /* We can safely return early here because we've cancelled the
- * the delayed work so there is no one else to set it to false
- * and we don't care if someone else sets it to true.
- */
- if (adev->vcn.workload_profile_active)
- goto pg_lock;
-
- mutex_lock(&adev->vcn.workload_profile_mutex);
- if (!adev->vcn.workload_profile_active) {
- r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
- true);
- if (r)
- dev_warn(adev->dev, "(%d) failed to switch to video power profile mode\n", r);
- adev->vcn.workload_profile_active = true;
- }
- mutex_unlock(&adev->vcn.workload_profile_mutex);
-
-pg_lock:
mutex_lock(&vcn_inst->vcn_pg_lock);
vcn_inst->set_pg_state(vcn_inst, AMD_PG_STATE_UNGATE);
@@ -507,6 +532,7 @@ pg_lock:
vcn_inst->pause_dpg_mode(vcn_inst, &new_state);
}
mutex_unlock(&vcn_inst->vcn_pg_lock);
+ amdgpu_vcn_get_profile(adev);
}
void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
@@ -600,7 +626,7 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
64, AMDGPU_IB_POOL_DIRECT,
- &job);
+ &job, AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
if (r)
goto err;
@@ -780,7 +806,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
- &job);
+ &job, AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
if (r)
goto err;
@@ -910,7 +936,7 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
- &job);
+ &job, AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
if (r)
return r;
@@ -977,7 +1003,7 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
- &job);
+ &job, AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
if (r)
return r;
@@ -1131,7 +1157,7 @@ static ssize_t amdgpu_debugfs_vcn_fwlog_read(struct file *f, char __user *buf,
{
struct amdgpu_vcn_inst *vcn;
void *log_buf;
- volatile struct amdgpu_vcn_fwlog *plog;
+ struct amdgpu_vcn_fwlog *plog;
unsigned int read_pos, write_pos, available, i, read_bytes = 0;
unsigned int read_num[2] = {0};
@@ -1144,7 +1170,7 @@ static ssize_t amdgpu_debugfs_vcn_fwlog_read(struct file *f, char __user *buf,
log_buf = vcn->fw_shared.cpu_addr + vcn->fw_shared.mem_size;
- plog = (volatile struct amdgpu_vcn_fwlog *)log_buf;
+ plog = (struct amdgpu_vcn_fwlog *)log_buf;
read_pos = plog->rptr;
write_pos = plog->wptr;
@@ -1211,11 +1237,11 @@ void amdgpu_debugfs_vcn_fwlog_init(struct amdgpu_device *adev, uint8_t i,
void amdgpu_vcn_fwlog_init(struct amdgpu_vcn_inst *vcn)
{
#if defined(CONFIG_DEBUG_FS)
- volatile uint32_t *flag = vcn->fw_shared.cpu_addr;
+ uint32_t *flag = vcn->fw_shared.cpu_addr;
void *fw_log_cpu_addr = vcn->fw_shared.cpu_addr + vcn->fw_shared.mem_size;
uint64_t fw_log_gpu_addr = vcn->fw_shared.gpu_addr + vcn->fw_shared.mem_size;
- volatile struct amdgpu_vcn_fwlog *log_buf = fw_log_cpu_addr;
- volatile struct amdgpu_fw_shared_fw_logging *fw_log = vcn->fw_shared.cpu_addr
+ struct amdgpu_vcn_fwlog *log_buf = fw_log_cpu_addr;
+ struct amdgpu_fw_shared_fw_logging *fw_log = vcn->fw_shared.cpu_addr
+ vcn->fw_shared.log_offset;
*flag |= cpu_to_le32(AMDGPU_VCN_FW_LOGGING_FLAG);
fw_log->is_enabled = 1;
@@ -1451,3 +1477,161 @@ int vcn_set_powergating_state(struct amdgpu_ip_block *ip_block,
return ret;
}
+
+/**
+ * amdgpu_vcn_reset_engine - Reset a specific VCN engine
+ * @adev: Pointer to the AMDGPU device
+ * @instance_id: VCN engine instance to reset
+ *
+ * Returns: 0 on success, or a negative error code on failure.
+ */
+static int amdgpu_vcn_reset_engine(struct amdgpu_device *adev,
+ uint32_t instance_id)
+{
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[instance_id];
+ int r, i;
+
+ mutex_lock(&vinst->engine_reset_mutex);
+ /* Stop the scheduler's work queue for the dec and enc rings if they are running.
+ * This ensures that no new tasks are submitted to the queues while
+ * the reset is in progress.
+ */
+ drm_sched_wqueue_stop(&vinst->ring_dec.sched);
+ for (i = 0; i < vinst->num_enc_rings; i++)
+ drm_sched_wqueue_stop(&vinst->ring_enc[i].sched);
+
+ /* Perform the VCN reset for the specified instance */
+ r = vinst->reset(vinst);
+ if (r)
+ goto unlock;
+ r = amdgpu_ring_test_ring(&vinst->ring_dec);
+ if (r)
+ goto unlock;
+ for (i = 0; i < vinst->num_enc_rings; i++) {
+ r = amdgpu_ring_test_ring(&vinst->ring_enc[i]);
+ if (r)
+ goto unlock;
+ }
+ amdgpu_fence_driver_force_completion(&vinst->ring_dec);
+ for (i = 0; i < vinst->num_enc_rings; i++)
+ amdgpu_fence_driver_force_completion(&vinst->ring_enc[i]);
+
+ /* Restart the scheduler's work queue for the dec and enc rings
+ * if they were stopped by this function. This allows new tasks
+ * to be submitted to the queues after the reset is complete.
+ */
+ drm_sched_wqueue_start(&vinst->ring_dec.sched);
+ for (i = 0; i < vinst->num_enc_rings; i++)
+ drm_sched_wqueue_start(&vinst->ring_enc[i].sched);
+
+unlock:
+ mutex_unlock(&vinst->engine_reset_mutex);
+
+ return r;
+}
+
+/**
+ * amdgpu_vcn_ring_reset - Reset a VCN ring
+ * @ring: ring to reset
+ * @vmid: vmid of guilty job
+ * @timedout_fence: fence of timed out job
+ *
+ * This helper is for VCN blocks without unified queues because
+ * resetting the engine resets all queues in that case. With
+ * unified queues we have one queue per engine.
+ * Returns: 0 on success, or a negative error code on failure.
+ */
+int amdgpu_vcn_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (adev->vcn.inst[ring->me].using_unified_queue)
+ return -EINVAL;
+
+ return amdgpu_vcn_reset_engine(adev, ring->me);
+}
+
+int amdgpu_vcn_reg_dump_init(struct amdgpu_device *adev,
+ const struct amdgpu_hwip_reg_entry *reg, u32 count)
+{
+ adev->vcn.ip_dump = kcalloc(adev->vcn.num_vcn_inst * count,
+ sizeof(uint32_t), GFP_KERNEL);
+ if (!adev->vcn.ip_dump)
+ return -ENOMEM;
+ adev->vcn.reg_list = reg;
+ adev->vcn.reg_count = count;
+
+ return 0;
+}
+
+static void amdgpu_vcn_reg_dump_fini(struct amdgpu_device *adev)
+{
+ kfree(adev->vcn.ip_dump);
+ adev->vcn.ip_dump = NULL;
+ adev->vcn.reg_list = NULL;
+ adev->vcn.reg_count = 0;
+}
+
+void amdgpu_vcn_dump_ip_state(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int i, j;
+ bool is_powered;
+ u32 inst_off;
+
+ if (!adev->vcn.ip_dump)
+ return;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
+ inst_off = i * adev->vcn.reg_count;
+ /* mmUVD_POWER_STATUS is always readable and is the first in reg_list */
+ adev->vcn.ip_dump[inst_off] =
+ RREG32(SOC15_REG_ENTRY_OFFSET_INST(adev->vcn.reg_list[0], i));
+ is_powered = (adev->vcn.ip_dump[inst_off] &
+ UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF) !=
+ UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
+
+ if (is_powered)
+ for (j = 1; j < adev->vcn.reg_count; j++)
+ adev->vcn.ip_dump[inst_off + j] =
+ RREG32(SOC15_REG_ENTRY_OFFSET_INST(adev->vcn.reg_list[j], i));
+ }
+}
+
+void amdgpu_vcn_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int i, j;
+ bool is_powered;
+ u32 inst_off;
+
+ if (!adev->vcn.ip_dump)
+ return;
+
+ drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i)) {
+ drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
+ continue;
+ }
+
+ inst_off = i * adev->vcn.reg_count;
+ is_powered = (adev->vcn.ip_dump[inst_off] &
+ UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF) !=
+ UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
+
+ if (is_powered) {
+ drm_printf(p, "\nActive Instance:VCN%d\n", i);
+ for (j = 0; j < adev->vcn.reg_count; j++)
+ drm_printf(p, "%-50s \t 0x%08x\n", adev->vcn.reg_list[j].reg_name,
+ adev->vcn.ip_dump[inst_off + j]);
+ } else {
+ drm_printf(p, "\nInactive Instance:VCN%d\n", i);
+ }
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index 83adf81defc7..82624b44e661 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -100,7 +100,8 @@
#define SOC15_DPG_MODE_OFFSET(ip, inst_idx, reg) \
({ \
- uint32_t internal_reg_offset, addr; \
+ /* To avoid a -Wunused-but-set-variable warning. */ \
+ uint32_t internal_reg_offset __maybe_unused, addr; \
bool video_range, video1_range, aon_range, aon1_range; \
\
addr = (adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg); \
@@ -161,7 +162,8 @@
#define SOC24_DPG_MODE_OFFSET(ip, inst_idx, reg) \
({ \
- uint32_t internal_reg_offset, addr; \
+ /* To avoid a -Wunused-but-set-variable warning. */ \
+ uint32_t internal_reg_offset __maybe_unused, addr; \
bool video_range, video1_range, aon_range, aon1_range; \
\
addr = (adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg); \
@@ -237,6 +239,8 @@
#define AMDGPU_DRM_KEY_INJECT_WORKAROUND_VCNFW_ASD_HANDSHAKING 2
+struct amdgpu_hwip_reg_entry;
+
enum amdgpu_vcn_caps {
AMDGPU_VCN_RRMT_ENABLED,
};
@@ -330,7 +334,9 @@ struct amdgpu_vcn_inst {
struct dpg_pause_state *new_state);
int (*set_pg_state)(struct amdgpu_vcn_inst *vinst,
enum amd_powergating_state state);
+ int (*reset)(struct amdgpu_vcn_inst *vinst);
bool using_unified_queue;
+ struct mutex engine_reset_mutex;
};
struct amdgpu_vcn_ras {
@@ -360,6 +366,8 @@ struct amdgpu_vcn {
bool workload_profile_active;
struct mutex workload_profile_mutex;
+ u32 reg_count;
+ const struct amdgpu_hwip_reg_entry *reg_list;
};
struct amdgpu_fw_shared_rb_ptrs_struct {
@@ -495,7 +503,7 @@ struct amdgpu_vcn5_fw_shared {
struct amdgpu_fw_shared_rb_setup rb_setup;
struct amdgpu_fw_shared_smu_interface_info smu_dpm_interface;
struct amdgpu_fw_shared_drm_key_wa drm_key_wa;
- uint8_t pad3[9];
+ uint8_t pad3[404];
};
#define VCN_BLOCK_ENCODE_DISABLE_MASK 0x80
@@ -510,7 +518,7 @@ enum vcn_ring_type {
int amdgpu_vcn_early_init(struct amdgpu_device *adev, int i);
int amdgpu_vcn_sw_init(struct amdgpu_device *adev, int i);
-int amdgpu_vcn_sw_fini(struct amdgpu_device *adev, int i);
+void amdgpu_vcn_sw_fini(struct amdgpu_device *adev, int i);
int amdgpu_vcn_suspend(struct amdgpu_device *adev, int i);
int amdgpu_vcn_resume(struct amdgpu_device *adev, int i);
void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring);
@@ -552,5 +560,14 @@ void amdgpu_debugfs_vcn_sched_mask_init(struct amdgpu_device *adev);
int vcn_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state);
+int amdgpu_vcn_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *guilty_fence);
+int amdgpu_vcn_reg_dump_init(struct amdgpu_device *adev,
+ const struct amdgpu_hwip_reg_entry *reg, u32 count);
+void amdgpu_vcn_dump_ip_state(struct amdgpu_ip_block *ip_block);
+void amdgpu_vcn_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p);
+void amdgpu_vcn_get_profile(struct amdgpu_device *adev);
+void amdgpu_vcn_put_profile(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 13f0cdeb59c4..47a6ce4fdc74 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -44,6 +44,18 @@
vf2pf_info->ucode_info[ucode].version = ver; \
} while (0)
+#define mmRCC_CONFIG_MEMSIZE 0xde3
+
+const char *amdgpu_virt_dynamic_crit_table_name[] = {
+ "IP DISCOVERY",
+ "VBIOS IMG",
+ "RAS TELEMETRY",
+ "DATA EXCHANGE",
+ "BAD PAGE INFO",
+ "INIT HEADER",
+ "LAST",
+};
+
bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
{
/* By now all MMIO pages except mailbox are blocked */
@@ -150,9 +162,10 @@ void amdgpu_virt_request_init_data(struct amdgpu_device *adev)
virt->ops->req_init_data(adev);
if (adev->virt.req_init_data_ver > 0)
- DRM_INFO("host supports REQ_INIT_DATA handshake\n");
+ dev_info(adev->dev, "host supports REQ_INIT_DATA handshake of critical_region_version %d\n",
+ adev->virt.req_init_data_ver);
else
- DRM_WARN("host doesn't support REQ_INIT_DATA handshake\n");
+ dev_warn(adev->dev, "host doesn't support REQ_INIT_DATA handshake\n");
}
/**
@@ -205,12 +218,12 @@ int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev)
&adev->virt.mm_table.gpu_addr,
(void *)&adev->virt.mm_table.cpu_addr);
if (r) {
- DRM_ERROR("failed to alloc mm table and error = %d.\n", r);
+ dev_err(adev->dev, "failed to alloc mm table and error = %d.\n", r);
return r;
}
memset((void *)adev->virt.mm_table.cpu_addr, 0, PAGE_SIZE);
- DRM_INFO("MM table gpu addr = 0x%llx, cpu addr = %p.\n",
+ dev_info(adev->dev, "MM table gpu addr = 0x%llx, cpu addr = %p.\n",
adev->virt.mm_table.gpu_addr,
adev->virt.mm_table.cpu_addr);
return 0;
@@ -390,7 +403,9 @@ static void amdgpu_virt_ras_reserve_bps(struct amdgpu_device *adev)
if (amdgpu_bo_create_kernel_at(adev, bp << AMDGPU_GPU_PAGE_SHIFT,
AMDGPU_GPU_PAGE_SIZE,
&bo, NULL))
- DRM_DEBUG("RAS WARN: reserve vram for retired page %llx fail\n", bp);
+ dev_dbg(adev->dev,
+ "RAS WARN: reserve vram for retired page %llx fail\n",
+ bp);
data->bps_bo[i] = bo;
}
data->last_reserved = i + 1;
@@ -598,8 +613,8 @@ static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev)
vf2pf_info->driver_cert = 0;
vf2pf_info->os_info.all = 0;
- vf2pf_info->fb_usage =
- ttm_resource_manager_usage(&adev->mman.vram_mgr.manager) >> 20;
+ vf2pf_info->fb_usage = ttm_resource_manager_used(&adev->mman.vram_mgr.manager) ?
+ ttm_resource_manager_usage(&adev->mman.vram_mgr.manager) >> 20 : 0;
vf2pf_info->fb_vis_usage =
amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr) >> 20;
vf2pf_info->fb_size = adev->gmc.real_vram_size >> 20;
@@ -658,10 +673,34 @@ out:
schedule_delayed_work(&(adev->virt.vf2pf_work), adev->virt.vf2pf_update_interval_ms);
}
+static int amdgpu_virt_read_exchange_data_from_mem(struct amdgpu_device *adev, uint32_t *pfvf_data)
+{
+ uint32_t dataexchange_offset =
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].offset;
+ uint32_t dataexchange_size =
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].size_kb << 10;
+ uint64_t pos = 0;
+
+ dev_info(adev->dev,
+ "Got data exchange info from dynamic crit_region_table at offset 0x%x with size of 0x%x bytes.\n",
+ dataexchange_offset, dataexchange_size);
+
+ if (!IS_ALIGNED(dataexchange_offset, 4) || !IS_ALIGNED(dataexchange_size, 4)) {
+ dev_err(adev->dev, "Data exchange data not aligned to 4 bytes\n");
+ return -EINVAL;
+ }
+
+ pos = (uint64_t)dataexchange_offset;
+ amdgpu_device_vram_access(adev, pos, pfvf_data,
+ dataexchange_size, false);
+
+ return 0;
+}
+
void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev)
{
if (adev->virt.vf2pf_update_interval_ms != 0) {
- DRM_INFO("clean up the vf2pf work item\n");
+ dev_info(adev->dev, "clean up the vf2pf work item\n");
cancel_delayed_work_sync(&adev->virt.vf2pf_work);
adev->virt.vf2pf_update_interval_ms = 0;
}
@@ -669,13 +708,15 @@ void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev)
void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
{
+ uint32_t *pfvf_data = NULL;
+
adev->virt.fw_reserve.p_pf2vf = NULL;
adev->virt.fw_reserve.p_vf2pf = NULL;
adev->virt.vf2pf_update_interval_ms = 0;
adev->virt.vf2pf_update_retry_cnt = 0;
if (adev->mman.fw_vram_usage_va && adev->mman.drv_vram_usage_va) {
- DRM_WARN("Currently fw_vram and drv_vram should not have values at the same time!");
+ dev_warn(adev->dev, "Currently fw_vram and drv_vram should not have values at the same time!");
} else if (adev->mman.fw_vram_usage_va || adev->mman.drv_vram_usage_va) {
/* go through this logic in ip_init and reset to init workqueue*/
amdgpu_virt_exchange_data(adev);
@@ -684,11 +725,34 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
schedule_delayed_work(&(adev->virt.vf2pf_work), msecs_to_jiffies(adev->virt.vf2pf_update_interval_ms));
} else if (adev->bios != NULL) {
/* got through this logic in early init stage to get necessary flags, e.g. rlcg_acc related*/
- adev->virt.fw_reserve.p_pf2vf =
- (struct amd_sriov_msg_pf2vf_info_header *)
- (adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
+ if (adev->virt.req_init_data_ver == GPU_CRIT_REGION_V2) {
+ pfvf_data =
+ kzalloc(adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].size_kb << 10,
+ GFP_KERNEL);
+ if (!pfvf_data) {
+ dev_err(adev->dev, "Failed to allocate memory for pfvf_data\n");
+ return;
+ }
- amdgpu_virt_read_pf2vf_data(adev);
+ if (amdgpu_virt_read_exchange_data_from_mem(adev, pfvf_data))
+ goto free_pfvf_data;
+
+ adev->virt.fw_reserve.p_pf2vf =
+ (struct amd_sriov_msg_pf2vf_info_header *)pfvf_data;
+
+ amdgpu_virt_read_pf2vf_data(adev);
+
+free_pfvf_data:
+ kfree(pfvf_data);
+ pfvf_data = NULL;
+ adev->virt.fw_reserve.p_pf2vf = NULL;
+ } else {
+ adev->virt.fw_reserve.p_pf2vf =
+ (struct amd_sriov_msg_pf2vf_info_header *)
+ (adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB_V1 << 10));
+
+ amdgpu_virt_read_pf2vf_data(adev);
+ }
}
}
@@ -701,23 +765,38 @@ void amdgpu_virt_exchange_data(struct amdgpu_device *adev)
if (adev->mman.fw_vram_usage_va || adev->mman.drv_vram_usage_va) {
if (adev->mman.fw_vram_usage_va) {
- adev->virt.fw_reserve.p_pf2vf =
- (struct amd_sriov_msg_pf2vf_info_header *)
- (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
- adev->virt.fw_reserve.p_vf2pf =
- (struct amd_sriov_msg_vf2pf_info_header *)
- (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB << 10));
- adev->virt.fw_reserve.ras_telemetry =
- (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB << 10));
+ if (adev->virt.req_init_data_ver == GPU_CRIT_REGION_V2) {
+ adev->virt.fw_reserve.p_pf2vf =
+ (struct amd_sriov_msg_pf2vf_info_header *)
+ (adev->mman.fw_vram_usage_va +
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].offset);
+ adev->virt.fw_reserve.p_vf2pf =
+ (struct amd_sriov_msg_vf2pf_info_header *)
+ (adev->mman.fw_vram_usage_va +
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].offset +
+ (AMD_SRIOV_MSG_SIZE_KB << 10));
+ adev->virt.fw_reserve.ras_telemetry =
+ (adev->mman.fw_vram_usage_va +
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_RAS_TELEMETRY_TABLE_ID].offset);
+ } else {
+ adev->virt.fw_reserve.p_pf2vf =
+ (struct amd_sriov_msg_pf2vf_info_header *)
+ (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB_V1 << 10));
+ adev->virt.fw_reserve.p_vf2pf =
+ (struct amd_sriov_msg_vf2pf_info_header *)
+ (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB_V1 << 10));
+ adev->virt.fw_reserve.ras_telemetry =
+ (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB_V1 << 10));
+ }
} else if (adev->mman.drv_vram_usage_va) {
adev->virt.fw_reserve.p_pf2vf =
(struct amd_sriov_msg_pf2vf_info_header *)
- (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
+ (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB_V1 << 10));
adev->virt.fw_reserve.p_vf2pf =
(struct amd_sriov_msg_vf2pf_info_header *)
- (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB << 10));
+ (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB_V1 << 10));
adev->virt.fw_reserve.ras_telemetry =
- (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB << 10));
+ (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB_V1 << 10));
}
amdgpu_virt_read_pf2vf_data(adev);
@@ -816,7 +895,7 @@ static bool amdgpu_virt_init_req_data(struct amdgpu_device *adev, u32 reg)
break;
default: /* other chip doesn't support SRIOV */
is_sriov = false;
- DRM_ERROR("Unknown asic type: %d!\n", adev->asic_type);
+ dev_err(adev->dev, "Unknown asic type: %d!\n", adev->asic_type);
break;
}
}
@@ -828,17 +907,230 @@ static void amdgpu_virt_init_ras(struct amdgpu_device *adev)
{
ratelimit_state_init(&adev->virt.ras.ras_error_cnt_rs, 5 * HZ, 1);
ratelimit_state_init(&adev->virt.ras.ras_cper_dump_rs, 5 * HZ, 1);
+ ratelimit_state_init(&adev->virt.ras.ras_chk_criti_rs, 5 * HZ, 1);
ratelimit_set_flags(&adev->virt.ras.ras_error_cnt_rs,
RATELIMIT_MSG_ON_RELEASE);
ratelimit_set_flags(&adev->virt.ras.ras_cper_dump_rs,
RATELIMIT_MSG_ON_RELEASE);
+ ratelimit_set_flags(&adev->virt.ras.ras_chk_criti_rs,
+ RATELIMIT_MSG_ON_RELEASE);
mutex_init(&adev->virt.ras.ras_telemetry_mutex);
+ mutex_init(&adev->virt.access_req_mutex);
adev->virt.ras.cper_rptr = 0;
}
+static uint8_t amdgpu_virt_crit_region_calc_checksum(uint8_t *buf_start, uint8_t *buf_end)
+{
+ uint32_t sum = 0;
+
+ if (buf_start >= buf_end)
+ return 0;
+
+ for (; buf_start < buf_end; buf_start++)
+ sum += buf_start[0];
+
+ return 0xffffffff - sum;
+}
+
+int amdgpu_virt_init_critical_region(struct amdgpu_device *adev)
+{
+ struct amd_sriov_msg_init_data_header *init_data_hdr = NULL;
+ u64 init_hdr_offset = adev->virt.init_data_header.offset;
+ u64 init_hdr_size = (u64)adev->virt.init_data_header.size_kb << 10; /* KB → bytes */
+ u64 vram_size;
+ u64 end;
+ int r = 0;
+ uint8_t checksum = 0;
+
+ /* Skip below init if critical region version != v2 */
+ if (adev->virt.req_init_data_ver != GPU_CRIT_REGION_V2)
+ return 0;
+
+ if (init_hdr_offset < 0) {
+ dev_err(adev->dev, "Invalid init header offset\n");
+ return -EINVAL;
+ }
+
+ vram_size = RREG32(mmRCC_CONFIG_MEMSIZE);
+ if (!vram_size || vram_size == U32_MAX)
+ return -EINVAL;
+ vram_size <<= 20;
+
+ if (check_add_overflow(init_hdr_offset, init_hdr_size, &end) || end > vram_size) {
+ dev_err(adev->dev, "init_data_header exceeds VRAM size, exiting\n");
+ return -EINVAL;
+ }
+
+ /* Allocate for init_data_hdr */
+ init_data_hdr = kzalloc(sizeof(struct amd_sriov_msg_init_data_header), GFP_KERNEL);
+ if (!init_data_hdr)
+ return -ENOMEM;
+
+ amdgpu_device_vram_access(adev, (uint64_t)init_hdr_offset, (uint32_t *)init_data_hdr,
+ sizeof(struct amd_sriov_msg_init_data_header), false);
+
+ /* Table validation */
+ if (strncmp(init_data_hdr->signature,
+ AMDGPU_SRIOV_CRIT_DATA_SIGNATURE,
+ AMDGPU_SRIOV_CRIT_DATA_SIG_LEN) != 0) {
+ dev_err(adev->dev, "Invalid init data signature: %.4s\n",
+ init_data_hdr->signature);
+ r = -EINVAL;
+ goto out;
+ }
+
+ checksum = amdgpu_virt_crit_region_calc_checksum(
+ (uint8_t *)&init_data_hdr->initdata_offset,
+ (uint8_t *)init_data_hdr +
+ sizeof(struct amd_sriov_msg_init_data_header));
+ if (checksum != init_data_hdr->checksum) {
+ dev_err(adev->dev, "Found unmatching checksum from calculation 0x%x and init_data 0x%x\n",
+ checksum, init_data_hdr->checksum);
+ r = -EINVAL;
+ goto out;
+ }
+
+ memset(&adev->virt.crit_regn, 0, sizeof(adev->virt.crit_regn));
+ memset(adev->virt.crit_regn_tbl, 0, sizeof(adev->virt.crit_regn_tbl));
+
+ adev->virt.crit_regn.offset = init_data_hdr->initdata_offset;
+ adev->virt.crit_regn.size_kb = init_data_hdr->initdata_size_in_kb;
+
+ /* Validation and initialization for each table entry */
+ if (IS_SRIOV_CRIT_REGN_ENTRY_VALID(init_data_hdr, AMD_SRIOV_MSG_IPD_TABLE_ID)) {
+ if (!init_data_hdr->ip_discovery_size_in_kb ||
+ init_data_hdr->ip_discovery_size_in_kb > DISCOVERY_TMR_SIZE) {
+ dev_err(adev->dev, "Invalid %s size: 0x%x\n",
+ amdgpu_virt_dynamic_crit_table_name[AMD_SRIOV_MSG_IPD_TABLE_ID],
+ init_data_hdr->ip_discovery_size_in_kb);
+ r = -EINVAL;
+ goto out;
+ }
+
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_IPD_TABLE_ID].offset =
+ init_data_hdr->ip_discovery_offset;
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_IPD_TABLE_ID].size_kb =
+ init_data_hdr->ip_discovery_size_in_kb;
+ }
+
+ if (IS_SRIOV_CRIT_REGN_ENTRY_VALID(init_data_hdr, AMD_SRIOV_MSG_VBIOS_IMG_TABLE_ID)) {
+ if (!init_data_hdr->vbios_img_size_in_kb) {
+ dev_err(adev->dev, "Invalid %s size: 0x%x\n",
+ amdgpu_virt_dynamic_crit_table_name[AMD_SRIOV_MSG_VBIOS_IMG_TABLE_ID],
+ init_data_hdr->vbios_img_size_in_kb);
+ r = -EINVAL;
+ goto out;
+ }
+
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_VBIOS_IMG_TABLE_ID].offset =
+ init_data_hdr->vbios_img_offset;
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_VBIOS_IMG_TABLE_ID].size_kb =
+ init_data_hdr->vbios_img_size_in_kb;
+ }
+
+ if (IS_SRIOV_CRIT_REGN_ENTRY_VALID(init_data_hdr, AMD_SRIOV_MSG_RAS_TELEMETRY_TABLE_ID)) {
+ if (!init_data_hdr->ras_tele_info_size_in_kb) {
+ dev_err(adev->dev, "Invalid %s size: 0x%x\n",
+ amdgpu_virt_dynamic_crit_table_name[AMD_SRIOV_MSG_RAS_TELEMETRY_TABLE_ID],
+ init_data_hdr->ras_tele_info_size_in_kb);
+ r = -EINVAL;
+ goto out;
+ }
+
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_RAS_TELEMETRY_TABLE_ID].offset =
+ init_data_hdr->ras_tele_info_offset;
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_RAS_TELEMETRY_TABLE_ID].size_kb =
+ init_data_hdr->ras_tele_info_size_in_kb;
+ }
+
+ if (IS_SRIOV_CRIT_REGN_ENTRY_VALID(init_data_hdr, AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID)) {
+ if (!init_data_hdr->dataexchange_size_in_kb) {
+ dev_err(adev->dev, "Invalid %s size: 0x%x\n",
+ amdgpu_virt_dynamic_crit_table_name[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID],
+ init_data_hdr->dataexchange_size_in_kb);
+ r = -EINVAL;
+ goto out;
+ }
+
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].offset =
+ init_data_hdr->dataexchange_offset;
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].size_kb =
+ init_data_hdr->dataexchange_size_in_kb;
+ }
+
+ if (IS_SRIOV_CRIT_REGN_ENTRY_VALID(init_data_hdr, AMD_SRIOV_MSG_BAD_PAGE_INFO_TABLE_ID)) {
+ if (!init_data_hdr->bad_page_size_in_kb) {
+ dev_err(adev->dev, "Invalid %s size: 0x%x\n",
+ amdgpu_virt_dynamic_crit_table_name[AMD_SRIOV_MSG_BAD_PAGE_INFO_TABLE_ID],
+ init_data_hdr->bad_page_size_in_kb);
+ r = -EINVAL;
+ goto out;
+ }
+
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_BAD_PAGE_INFO_TABLE_ID].offset =
+ init_data_hdr->bad_page_info_offset;
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_BAD_PAGE_INFO_TABLE_ID].size_kb =
+ init_data_hdr->bad_page_size_in_kb;
+ }
+
+ /* Validation for critical region info */
+ if (adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_IPD_TABLE_ID].size_kb > DISCOVERY_TMR_SIZE) {
+ dev_err(adev->dev, "Invalid IP discovery size: 0x%x\n",
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_IPD_TABLE_ID].size_kb);
+ r = -EINVAL;
+ goto out;
+ }
+
+ /* reserved memory starts from crit region base offset with the size of 5MB */
+ adev->mman.fw_vram_usage_start_offset = adev->virt.crit_regn.offset;
+ adev->mman.fw_vram_usage_size = adev->virt.crit_regn.size_kb << 10;
+ dev_info(adev->dev,
+ "critical region v%d requested to reserve memory start at %08llx with %llu KB.\n",
+ init_data_hdr->version,
+ adev->mman.fw_vram_usage_start_offset,
+ adev->mman.fw_vram_usage_size >> 10);
+
+ adev->virt.is_dynamic_crit_regn_enabled = true;
+
+out:
+ kfree(init_data_hdr);
+ init_data_hdr = NULL;
+
+ return r;
+}
+
+int amdgpu_virt_get_dynamic_data_info(struct amdgpu_device *adev,
+ int data_id, uint8_t *binary, u32 *size)
+{
+ uint32_t data_offset = 0;
+ uint32_t data_size = 0;
+ enum amd_sriov_msg_table_id_enum data_table_id = data_id;
+
+ if (data_table_id >= AMD_SRIOV_MSG_MAX_TABLE_ID)
+ return -EINVAL;
+
+ data_offset = adev->virt.crit_regn_tbl[data_table_id].offset;
+ data_size = adev->virt.crit_regn_tbl[data_table_id].size_kb << 10;
+
+ /* Validate on input params */
+ if (!binary || !size || *size < (uint64_t)data_size)
+ return -EINVAL;
+
+ /* Proceed to copy the dynamic content */
+ amdgpu_device_vram_access(adev,
+ (uint64_t)data_offset, (uint32_t *)binary, data_size, false);
+ *size = (uint64_t)data_size;
+
+ dev_dbg(adev->dev,
+ "Got %s info from dynamic crit_region_table at offset 0x%x with size of 0x%x bytes.\n",
+ amdgpu_virt_dynamic_crit_table_name[data_id], data_offset, data_size);
+
+ return 0;
+}
+
void amdgpu_virt_init(struct amdgpu_device *adev)
{
bool is_sriov = false;
@@ -1286,7 +1578,7 @@ amdgpu_ras_block_to_sriov(struct amdgpu_device *adev, enum amdgpu_ras_block bloc
case AMDGPU_RAS_BLOCK__MPIO:
return RAS_TELEMETRY_GPU_BLOCK_MPIO;
default:
- DRM_WARN_ONCE("Unsupported SRIOV RAS telemetry block 0x%x\n",
+ dev_warn(adev->dev, "Unsupported SRIOV RAS telemetry block 0x%x\n",
block);
return RAS_TELEMETRY_GPU_BLOCK_COUNT;
}
@@ -1301,7 +1593,7 @@ static int amdgpu_virt_cache_host_error_counts(struct amdgpu_device *adev,
checksum = host_telemetry->header.checksum;
used_size = host_telemetry->header.used_size;
- if (used_size > (AMD_SRIOV_RAS_TELEMETRY_SIZE_KB << 10))
+ if (used_size > (AMD_SRIOV_MSG_RAS_TELEMETRY_SIZE_KB_V1 << 10))
return 0;
tmp = kmemdup(&host_telemetry->body.error_count, used_size, GFP_KERNEL);
@@ -1380,7 +1672,7 @@ amdgpu_virt_write_cpers_to_ring(struct amdgpu_device *adev,
checksum = host_telemetry->header.checksum;
used_size = host_telemetry->header.used_size;
- if (used_size > (AMD_SRIOV_RAS_TELEMETRY_SIZE_KB << 10))
+ if (used_size > (AMD_SRIOV_MSG_RAS_TELEMETRY_SIZE_KB_V1 << 10))
return -EINVAL;
cper_dump = kmemdup(&host_telemetry->body.cper_dump, used_size, GFP_KERNEL);
@@ -1501,3 +1793,55 @@ void amdgpu_virt_request_bad_pages(struct amdgpu_device *adev)
if (virt->ops && virt->ops->req_bad_pages)
virt->ops->req_bad_pages(adev);
}
+
+static int amdgpu_virt_cache_chk_criti_hit(struct amdgpu_device *adev,
+ struct amdsriov_ras_telemetry *host_telemetry,
+ bool *hit)
+{
+ struct amd_sriov_ras_chk_criti *tmp = NULL;
+ uint32_t checksum, used_size;
+
+ checksum = host_telemetry->header.checksum;
+ used_size = host_telemetry->header.used_size;
+
+ if (used_size > (AMD_SRIOV_MSG_RAS_TELEMETRY_SIZE_KB_V1 << 10))
+ return 0;
+
+ tmp = kmemdup(&host_telemetry->body.chk_criti, used_size, GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ if (checksum != amd_sriov_msg_checksum(tmp, used_size, 0, 0))
+ goto out;
+
+ if (hit)
+ *hit = tmp->hit ? true : false;
+
+out:
+ kfree(tmp);
+
+ return 0;
+}
+
+int amdgpu_virt_check_vf_critical_region(struct amdgpu_device *adev, u64 addr, bool *hit)
+{
+ struct amdgpu_virt *virt = &adev->virt;
+ int r = -EPERM;
+
+ if (!virt->ops || !virt->ops->req_ras_chk_criti)
+ return -EOPNOTSUPP;
+
+ /* Host allows 15 ras telemetry requests per 60 seconds. Afterwhich, the Host
+ * will ignore incoming guest messages. Ratelimit the guest messages to
+ * prevent guest self DOS.
+ */
+ if (__ratelimit(&virt->ras.ras_chk_criti_rs)) {
+ mutex_lock(&virt->ras.ras_telemetry_mutex);
+ if (!virt->ops->req_ras_chk_criti(adev, addr))
+ r = amdgpu_virt_cache_chk_criti_hit(
+ adev, virt->fw_reserve.ras_telemetry, hit);
+ mutex_unlock(&virt->ras.ras_telemetry_mutex);
+ }
+
+ return r;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index 577c6194db78..01d5bca2dee1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -54,6 +54,12 @@
#define AMDGPU_VF2PF_UPDATE_MAX_RETRY_LIMIT 2
+/* Signature used to validate the SR-IOV dynamic critical region init data header ("INDA") */
+#define AMDGPU_SRIOV_CRIT_DATA_SIGNATURE "INDA"
+#define AMDGPU_SRIOV_CRIT_DATA_SIG_LEN 4
+
+#define IS_SRIOV_CRIT_REGN_ENTRY_VALID(hdr, id) ((hdr)->valid_tables & (1 << (id)))
+
enum amdgpu_sriov_vf_mode {
SRIOV_VF_MODE_BARE_METAL = 0,
SRIOV_VF_MODE_ONE_VF,
@@ -98,6 +104,7 @@ struct amdgpu_virt_ops {
int (*req_ras_err_count)(struct amdgpu_device *adev);
int (*req_ras_cper_dump)(struct amdgpu_device *adev, u64 vf_rptr);
int (*req_bad_pages)(struct amdgpu_device *adev);
+ int (*req_ras_chk_criti)(struct amdgpu_device *adev, u64 addr);
};
/*
@@ -143,6 +150,7 @@ enum AMDGIM_FEATURE_FLAG {
AMDGIM_FEATURE_RAS_CAPS = (1 << 9),
AMDGIM_FEATURE_RAS_TELEMETRY = (1 << 10),
AMDGIM_FEATURE_RAS_CPER = (1 << 11),
+ AMDGIM_FEATURE_XGMI_TA_EXT_PEER_LINK = (1 << 12),
};
enum AMDGIM_REG_ACCESS_FLAG {
@@ -152,8 +160,10 @@ enum AMDGIM_REG_ACCESS_FLAG {
AMDGIM_FEATURE_MMHUB_REG_RLC_EN = (1 << 1),
/* Use RLC to program GC regs */
AMDGIM_FEATURE_GC_REG_RLC_EN = (1 << 2),
- /* Use PSP to program L1_TLB_CNTL*/
+ /* Use PSP to program L1_TLB_CNTL */
AMDGIM_FEATURE_L1_TLB_CNTL_PSP_EN = (1 << 3),
+ /* Use RLCG to program SQ_CONFIG1 */
+ AMDGIM_FEATURE_REG_ACCESS_SQ_CONFIG = (1 << 4),
};
struct amdgim_pf2vf_info_v1 {
@@ -250,10 +260,20 @@ struct amdgpu_virt_ras_err_handler_data {
struct amdgpu_virt_ras {
struct ratelimit_state ras_error_cnt_rs;
struct ratelimit_state ras_cper_dump_rs;
+ struct ratelimit_state ras_chk_criti_rs;
struct mutex ras_telemetry_mutex;
uint64_t cper_rptr;
};
+#define AMDGPU_VIRT_CAPS_LIST(X) X(AMDGPU_VIRT_CAP_POWER_LIMIT)
+
+DECLARE_ATTR_CAP_CLASS(amdgpu_virt, AMDGPU_VIRT_CAPS_LIST);
+
+struct amdgpu_virt_region {
+ uint32_t offset;
+ uint32_t size_kb;
+};
+
/* GPU virtualization */
struct amdgpu_virt {
uint32_t caps;
@@ -265,12 +285,14 @@ struct amdgpu_virt {
struct amdgpu_irq_src rcv_irq;
struct work_struct flr_work;
- struct work_struct bad_pages_work;
+ struct work_struct req_bad_pages_work;
+ struct work_struct handle_bad_pages_work;
struct amdgpu_mm_table mm_table;
const struct amdgpu_virt_ops *ops;
struct amdgpu_vf_error_buffer vf_errors;
struct amdgpu_virt_fw_reserve fw_reserve;
+ struct amdgpu_virt_caps virt_caps;
uint32_t gim_feature;
uint32_t reg_access_mode;
int req_init_data_ver;
@@ -279,6 +301,12 @@ struct amdgpu_virt {
bool ras_init_done;
uint32_t reg_access;
+ /* dynamic(v2) critical regions */
+ struct amdgpu_virt_region init_data_header;
+ struct amdgpu_virt_region crit_regn;
+ struct amdgpu_virt_region crit_regn_tbl[AMD_SRIOV_MSG_MAX_TABLE_ID];
+ bool is_dynamic_crit_regn_enabled;
+
/* vf2pf message */
struct delayed_work vf2pf_work;
uint32_t vf2pf_update_interval_ms;
@@ -297,10 +325,15 @@ struct amdgpu_virt {
/* Spinlock to protect access to the RLCG register interface */
spinlock_t rlcg_reg_lock;
+ struct mutex access_req_mutex;
+
union amd_sriov_ras_caps ras_en_caps;
union amd_sriov_ras_caps ras_telemetry_en_caps;
struct amdgpu_virt_ras ras;
struct amd_sriov_ras_telemetry_error_count count_cache;
+
+ /* hibernate and resume with different VF feature for xgmi enabled system */
+ bool is_xgmi_node_migrate_enabled;
};
struct amdgpu_video_codec_info;
@@ -343,6 +376,10 @@ struct amdgpu_video_codec_info;
#define amdgpu_sriov_rlcg_error_report_enabled(adev) \
(amdgpu_sriov_reg_indirect_mmhub(adev) || amdgpu_sriov_reg_indirect_gc(adev))
+#define amdgpu_sriov_reg_access_sq_config(adev) \
+(amdgpu_sriov_vf((adev)) && \
+ ((adev)->virt.reg_access & (AMDGIM_FEATURE_REG_ACCESS_SQ_CONFIG)))
+
#define amdgpu_passthrough(adev) \
((adev)->virt.caps & AMDGPU_PASSTHROUGH_MODE)
@@ -361,6 +398,9 @@ struct amdgpu_video_codec_info;
#define amdgpu_sriov_ras_cper_en(adev) \
((adev)->virt.gim_feature & AMDGIM_FEATURE_RAS_CPER)
+#define amdgpu_sriov_xgmi_ta_ext_peer_link_en(adev) \
+((adev)->virt.gim_feature & AMDGIM_FEATURE_XGMI_TA_EXT_PEER_LINK)
+
static inline bool is_virtual_machine(void)
{
#if defined(CONFIG_X86)
@@ -386,6 +426,10 @@ static inline bool is_virtual_machine(void)
((adev)->virt.gim_feature & AMDGIM_FEATURE_VCN_RB_DECOUPLE)
#define amdgpu_sriov_is_mes_info_enable(adev) \
((adev)->virt.gim_feature & AMDGIM_FEATURE_MES_INFO_ENABLE)
+
+#define amdgpu_virt_xgmi_migrate_enabled(adev) \
+ ((adev)->virt.is_xgmi_node_migrate_enabled && (adev)->gmc.xgmi.node_segment_size != 0)
+
bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
void amdgpu_virt_init_setting(struct amdgpu_device *adev);
int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init);
@@ -403,6 +447,10 @@ void amdgpu_virt_exchange_data(struct amdgpu_device *adev);
void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev);
void amdgpu_virt_init(struct amdgpu_device *adev);
+int amdgpu_virt_init_critical_region(struct amdgpu_device *adev);
+int amdgpu_virt_get_dynamic_data_info(struct amdgpu_device *adev,
+ int data_id, uint8_t *binary, u32 *size);
+
bool amdgpu_virt_can_access_debugfs(struct amdgpu_device *adev);
int amdgpu_virt_enable_access_debugfs(struct amdgpu_device *adev);
void amdgpu_virt_disable_access_debugfs(struct amdgpu_device *adev);
@@ -434,4 +482,5 @@ int amdgpu_virt_ras_telemetry_post_reset(struct amdgpu_device *adev);
bool amdgpu_virt_ras_telemetry_block_en(struct amdgpu_device *adev,
enum amdgpu_ras_block block);
void amdgpu_virt_request_bad_pages(struct amdgpu_device *adev);
+int amdgpu_virt_check_vf_critical_region(struct amdgpu_device *adev, u64 addr, bool *hit);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
index 155bb9891a17..79bad9cbe2ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
@@ -14,7 +14,6 @@
#include "dce_v8_0.h"
#endif
#include "dce_v10_0.h"
-#include "dce_v11_0.h"
#include "ivsrcid/ivsrcid_vislands30.h"
#include "amdgpu_vkms.h"
#include "amdgpu_display.h"
@@ -581,13 +580,6 @@ static int amdgpu_vkms_hw_init(struct amdgpu_ip_block *ip_block)
case CHIP_TONGA:
dce_v10_0_disable_dce(adev);
break;
- case CHIP_CARRIZO:
- case CHIP_STONEY:
- case CHIP_POLARIS10:
- case CHIP_POLARIS11:
- case CHIP_VEGAM:
- dce_v11_0_disable_dce(adev);
- break;
case CHIP_TOPAZ:
#ifdef CONFIG_DRM_AMDGPU_SI
case CHIP_HAINAN:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 3911c78f8282..a67285118c37 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -128,43 +128,14 @@ struct amdgpu_vm_tlb_seq_struct {
};
/**
- * amdgpu_vm_set_pasid - manage pasid and vm ptr mapping
- *
- * @adev: amdgpu_device pointer
- * @vm: amdgpu_vm pointer
- * @pasid: the pasid the VM is using on this GPU
- *
- * Set the pasid this VM is using on this GPU, can also be used to remove the
- * pasid by passing in zero.
+ * amdgpu_vm_assert_locked - check if VM is correctly locked
+ * @vm: the VM which schould be tested
*
+ * Asserts that the VM root PD is locked.
*/
-int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- u32 pasid)
+static void amdgpu_vm_assert_locked(struct amdgpu_vm *vm)
{
- int r;
-
- if (vm->pasid == pasid)
- return 0;
-
- if (vm->pasid) {
- r = xa_err(xa_erase_irq(&adev->vm_manager.pasids, vm->pasid));
- if (r < 0)
- return r;
-
- vm->pasid = 0;
- }
-
- if (pasid) {
- r = xa_err(xa_store_irq(&adev->vm_manager.pasids, pasid, vm,
- GFP_KERNEL));
- if (r < 0)
- return r;
-
- vm->pasid = pasid;
- }
-
-
- return 0;
+ dma_resv_assert_held(vm->root.bo->tbo.base.resv);
}
/**
@@ -181,6 +152,7 @@ static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
struct amdgpu_bo *bo = vm_bo->bo;
vm_bo->moved = true;
+ amdgpu_vm_assert_locked(vm);
spin_lock(&vm_bo->vm->status_lock);
if (bo->tbo.type == ttm_bo_type_kernel)
list_move(&vm_bo->vm_status, &vm->evicted);
@@ -198,6 +170,7 @@ static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
*/
static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo)
{
+ amdgpu_vm_assert_locked(vm_bo->vm);
spin_lock(&vm_bo->vm->status_lock);
list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
spin_unlock(&vm_bo->vm->status_lock);
@@ -213,6 +186,7 @@ static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo)
*/
static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo)
{
+ amdgpu_vm_assert_locked(vm_bo->vm);
spin_lock(&vm_bo->vm->status_lock);
list_move(&vm_bo->vm_status, &vm_bo->vm->idle);
spin_unlock(&vm_bo->vm->status_lock);
@@ -260,6 +234,7 @@ static void amdgpu_vm_bo_evicted_user(struct amdgpu_vm_bo_base *vm_bo)
*/
static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
{
+ amdgpu_vm_assert_locked(vm_bo->vm);
if (vm_bo->bo->parent) {
spin_lock(&vm_bo->vm->status_lock);
list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
@@ -279,6 +254,7 @@ static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
*/
static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo)
{
+ amdgpu_vm_assert_locked(vm_bo->vm);
spin_lock(&vm_bo->vm->status_lock);
list_move(&vm_bo->vm_status, &vm_bo->vm->done);
spin_unlock(&vm_bo->vm->status_lock);
@@ -295,10 +271,13 @@ static void amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm *vm)
{
struct amdgpu_vm_bo_base *vm_bo, *tmp;
+ amdgpu_vm_assert_locked(vm);
+
spin_lock(&vm->status_lock);
list_splice_init(&vm->done, &vm->invalidated);
list_for_each_entry(vm_bo, &vm->invalidated, vm_status)
vm_bo->moved = true;
+
list_for_each_entry_safe(vm_bo, tmp, &vm->idle, vm_status) {
struct amdgpu_bo *bo = vm_bo->bo;
@@ -327,6 +306,7 @@ static void amdgpu_vm_update_shared(struct amdgpu_vm_bo_base *base)
uint32_t bo_memtype = amdgpu_bo_mem_stats_placement(bo);
bool shared;
+ dma_resv_assert_held(bo->tbo.base.resv);
spin_lock(&vm->status_lock);
shared = drm_gem_object_is_shared_for_memory_stats(&bo->tbo.base);
if (base->shared != shared) {
@@ -485,6 +465,46 @@ int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec,
}
/**
+ * amdgpu_vm_lock_done_list - lock all BOs on the done list
+ * @vm: vm providing the BOs
+ * @exec: drm execution context
+ * @num_fences: number of extra fences to reserve
+ *
+ * Lock the BOs on the done list in the DRM execution context.
+ */
+int amdgpu_vm_lock_done_list(struct amdgpu_vm *vm, struct drm_exec *exec,
+ unsigned int num_fences)
+{
+ struct list_head *prev = &vm->done;
+ struct amdgpu_bo_va *bo_va;
+ struct amdgpu_bo *bo;
+ int ret;
+
+ /* We can only trust prev->next while holding the lock */
+ spin_lock(&vm->status_lock);
+ while (!list_is_head(prev->next, &vm->done)) {
+ bo_va = list_entry(prev->next, typeof(*bo_va), base.vm_status);
+
+ bo = bo_va->base.bo;
+ if (bo) {
+ amdgpu_bo_ref(bo);
+ spin_unlock(&vm->status_lock);
+
+ ret = drm_exec_prepare_obj(exec, &bo->tbo.base, 1);
+ amdgpu_bo_unref(&bo);
+ if (unlikely(ret))
+ return ret;
+
+ spin_lock(&vm->status_lock);
+ }
+ prev = prev->next;
+ }
+ spin_unlock(&vm->status_lock);
+
+ return 0;
+}
+
+/**
* amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU
*
* @adev: amdgpu device pointer
@@ -616,18 +636,7 @@ int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
spin_unlock(&vm->status_lock);
bo = bo_base->bo;
-
- if (dma_resv_locking_ctx(bo->tbo.base.resv) != ticket) {
- struct amdgpu_task_info *ti = amdgpu_vm_get_task_info_vm(vm);
-
- pr_warn_ratelimited("Evicted user BO is not reserved\n");
- if (ti) {
- pr_warn_ratelimited("pid %d\n", ti->pid);
- amdgpu_vm_put_task_info(ti);
- }
-
- return -EINVAL;
- }
+ dma_resv_assert_held(bo->tbo.base.resv);
r = validate(param, bo);
if (r)
@@ -654,22 +663,31 @@ int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
* Check if all VM PDs/PTs are ready for updates
*
* Returns:
- * True if VM is not evicting.
+ * True if VM is not evicting and all VM entities are not stopped
*/
bool amdgpu_vm_ready(struct amdgpu_vm *vm)
{
- bool empty;
bool ret;
+ amdgpu_vm_assert_locked(vm);
+
amdgpu_vm_eviction_lock(vm);
ret = !vm->evicting;
amdgpu_vm_eviction_unlock(vm);
spin_lock(&vm->status_lock);
- empty = list_empty(&vm->evicted);
+ ret &= list_empty(&vm->evicted);
spin_unlock(&vm->status_lock);
- return ret && empty;
+ spin_lock(&vm->immediate.lock);
+ ret &= !vm->immediate.stopped;
+ spin_unlock(&vm->immediate.lock);
+
+ spin_lock(&vm->delayed.lock);
+ ret &= !vm->delayed.stopped;
+ spin_unlock(&vm->delayed.lock);
+
+ return ret;
}
/**
@@ -827,9 +845,12 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
}
if (vm_flush_needed || pasid_mapping_needed || cleaner_shader_needed) {
- r = amdgpu_fence_emit(ring, &fence, NULL, 0);
+ r = amdgpu_fence_emit(ring, job->hw_vm_fence, 0);
if (r)
return r;
+ fence = &job->hw_vm_fence->base;
+ /* get a ref for the job */
+ dma_fence_get(fence);
}
if (vm_flush_needed) {
@@ -951,6 +972,8 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
LIST_HEAD(relocated);
int r, idx;
+ amdgpu_vm_assert_locked(vm);
+
spin_lock(&vm->status_lock);
list_splice_init(&vm->relocated, &relocated);
spin_unlock(&vm->status_lock);
@@ -966,7 +989,8 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
params.vm = vm;
params.immediate = immediate;
- r = vm->update_funcs->prepare(&params, NULL);
+ r = vm->update_funcs->prepare(&params, NULL,
+ AMDGPU_KERNEL_JOB_ID_VM_UPDATE_PDES);
if (r)
goto error;
@@ -1045,7 +1069,7 @@ amdgpu_vm_tlb_flush(struct amdgpu_vm_update_params *params,
}
/* Prepare a TLB flush fence to be attached to PTs */
- if (!params->unlocked && vm->is_compute_context) {
+ if (!params->unlocked) {
amdgpu_vm_tlb_fence_create(params->adev, vm, fence);
/* Makes sure no PD/PT is freed before the flush */
@@ -1135,7 +1159,8 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
dma_fence_put(tmp);
}
- r = vm->update_funcs->prepare(&params, sync);
+ r = vm->update_funcs->prepare(&params, sync,
+ AMDGPU_KERNEL_JOB_ID_VM_UPDATE_RANGE);
if (r)
goto error_free;
@@ -1271,7 +1296,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
} else {
struct drm_gem_object *obj = &bo->tbo.base;
- if (obj->import_attach && bo_va->is_xgmi) {
+ if (drm_gem_is_imported(obj) && bo_va->is_xgmi) {
struct dma_buf *dma_buf = obj->import_attach->dmabuf;
struct drm_gem_object *gobj = dma_buf->priv;
struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
@@ -1328,13 +1353,14 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
* but in case of something, we filter the flags in first place
*/
- if (!(mapping->flags & AMDGPU_PTE_READABLE))
+ if (!(mapping->flags & AMDGPU_VM_PAGE_READABLE))
update_flags &= ~AMDGPU_PTE_READABLE;
- if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
+ if (!(mapping->flags & AMDGPU_VM_PAGE_WRITEABLE))
update_flags &= ~AMDGPU_PTE_WRITEABLE;
/* Apply ASIC specific mapping flags */
- amdgpu_gmc_get_vm_pte(adev, mapping, &update_flags);
+ amdgpu_gmc_get_vm_pte(adev, vm, bo, mapping->flags,
+ &update_flags);
trace_amdgpu_vm_bo_update(mapping);
@@ -1475,7 +1501,7 @@ static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping *mapping,
struct dma_fence *fence)
{
- if (mapping->flags & AMDGPU_PTE_PRT_FLAG(adev))
+ if (mapping->flags & AMDGPU_VM_PAGE_PRT)
amdgpu_vm_add_prt_cb(adev, fence);
kfree(mapping);
}
@@ -1631,7 +1657,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
* validation
*/
if (vm->is_compute_context &&
- bo_va->base.bo->tbo.base.import_attach &&
+ drm_gem_is_imported(&bo_va->base.bo->tbo.base) &&
(!bo_va->base.bo->tbo.resource ||
bo_va->base.bo->tbo.resource->mem_type == TTM_PL_SYSTEM))
amdgpu_vm_bo_evicted_user(&bo_va->base);
@@ -1754,7 +1780,7 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
list_add(&mapping->list, &bo_va->invalids);
amdgpu_vm_it_insert(mapping, &vm->va);
- if (mapping->flags & AMDGPU_PTE_PRT_FLAG(adev))
+ if (mapping->flags & AMDGPU_VM_PAGE_PRT)
amdgpu_vm_prt_get(adev);
if (amdgpu_vm_is_bo_always_valid(vm, bo) && !bo_va->base.moved)
@@ -1814,7 +1840,7 @@ static int amdgpu_vm_verify_parameters(struct amdgpu_device *adev,
int amdgpu_vm_bo_map(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
uint64_t saddr, uint64_t offset,
- uint64_t size, uint64_t flags)
+ uint64_t size, uint32_t flags)
{
struct amdgpu_bo_va_mapping *mapping, *tmp;
struct amdgpu_bo *bo = bo_va->base.bo;
@@ -1873,7 +1899,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
uint64_t saddr, uint64_t offset,
- uint64_t size, uint64_t flags)
+ uint64_t size, uint32_t flags)
{
struct amdgpu_bo_va_mapping *mapping;
struct amdgpu_bo *bo = bo_va->base.bo;
@@ -1929,6 +1955,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping *mapping;
struct amdgpu_vm *vm = bo_va->base.vm;
bool valid = true;
+ int r;
saddr /= AMDGPU_GPU_PAGE_SIZE;
@@ -1949,6 +1976,17 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
return -ENOENT;
}
+ /* It's unlikely to happen that the mapping userq hasn't been idled
+ * during user requests GEM unmap IOCTL except for forcing the unmap
+ * from user space.
+ */
+ if (unlikely(atomic_read(&bo_va->userq_va_mapped) > 0)) {
+ r = amdgpu_userq_gem_va_unmap_validate(adev, mapping, saddr);
+ if (unlikely(r == -EBUSY))
+ dev_warn_once(adev->dev,
+ "Attempt to unmap an active userq buffer\n");
+ }
+
list_del(&mapping->list);
amdgpu_vm_it_remove(mapping, &vm->va);
mapping->bo_va = NULL;
@@ -2055,7 +2093,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
struct amdgpu_bo *bo = before->bo_va->base.bo;
amdgpu_vm_it_insert(before, &vm->va);
- if (before->flags & AMDGPU_PTE_PRT_FLAG(adev))
+ if (before->flags & AMDGPU_VM_PAGE_PRT)
amdgpu_vm_prt_get(adev);
if (amdgpu_vm_is_bo_always_valid(vm, bo) &&
@@ -2070,7 +2108,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
struct amdgpu_bo *bo = after->bo_va->base.bo;
amdgpu_vm_it_insert(after, &vm->va);
- if (after->flags & AMDGPU_PTE_PRT_FLAG(adev))
+ if (after->flags & AMDGPU_VM_PAGE_PRT)
amdgpu_vm_prt_get(adev);
if (amdgpu_vm_is_bo_always_valid(vm, bo) &&
@@ -2395,10 +2433,11 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
else
adev->vm_manager.fragment_size = amdgpu_vm_fragment_size;
- DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
- vm_size, adev->vm_manager.num_level + 1,
- adev->vm_manager.block_size,
- adev->vm_manager.fragment_size);
+ dev_info(
+ adev->dev,
+ "vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
+ vm_size, adev->vm_manager.num_level + 1,
+ adev->vm_manager.block_size, adev->vm_manager.fragment_size);
}
/**
@@ -2409,13 +2448,11 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
*/
long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
{
- timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv,
- DMA_RESV_USAGE_BOOKKEEP,
- true, timeout);
+ timeout = drm_sched_entity_flush(&vm->immediate, timeout);
if (timeout <= 0)
return timeout;
- return dma_fence_wait_timeout(vm->last_unlocked, true, timeout);
+ return drm_sched_entity_flush(&vm->delayed, timeout);
}
static void amdgpu_vm_destroy_task_info(struct kref *kref)
@@ -2447,7 +2484,8 @@ amdgpu_vm_get_vm_from_pasid(struct amdgpu_device *adev, u32 pasid)
*/
void amdgpu_vm_put_task_info(struct amdgpu_task_info *task_info)
{
- kref_put(&task_info->refcount, amdgpu_vm_destroy_task_info);
+ if (task_info)
+ kref_put(&task_info->refcount, amdgpu_vm_destroy_task_info);
}
/**
@@ -2507,11 +2545,11 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
if (!vm->task_info)
return;
- if (vm->task_info->pid == current->pid)
+ if (vm->task_info->task.pid == current->pid)
return;
- vm->task_info->pid = current->pid;
- get_task_comm(vm->task_info->task_name, current);
+ vm->task_info->task.pid = current->pid;
+ get_task_comm(vm->task_info->task.comm, current);
if (current->group_leader->mm != current->mm)
return;
@@ -2526,6 +2564,7 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
* @adev: amdgpu_device pointer
* @vm: requested vm
* @xcp_id: GPU partition selection id
+ * @pasid: the pasid the VM is using on this GPU
*
* Init @vm fields.
*
@@ -2533,7 +2572,7 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
* 0 for success, error for failure.
*/
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- int32_t xcp_id)
+ int32_t xcp_id, uint32_t pasid)
{
struct amdgpu_bo *root_bo;
struct amdgpu_bo_vm *root;
@@ -2564,8 +2603,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
AMDGPU_VM_USE_CPU_FOR_GFX);
- DRM_DEBUG_DRIVER("VM update mode is %s\n",
- vm->use_cpu_for_update ? "CPU" : "SDMA");
+ dev_dbg(adev->dev, "VM update mode is %s\n",
+ vm->use_cpu_for_update ? "CPU" : "SDMA");
WARN_ONCE((vm->use_cpu_for_update &&
!amdgpu_gmc_vram_full_visible(&adev->gmc)),
"CPU update of VM recommended only for large BAR system\n");
@@ -2607,7 +2646,16 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
r = amdgpu_vm_create_task_info(vm);
if (r)
- DRM_DEBUG("Failed to create task info for VM\n");
+ dev_dbg(adev->dev, "Failed to create task info for VM\n");
+
+ /* Store new PASID in XArray (if non-zero) */
+ if (pasid != 0) {
+ r = xa_err(xa_store_irq(&adev->vm_manager.pasids, pasid, vm, GFP_KERNEL));
+ if (r < 0)
+ goto error_free_root;
+
+ vm->pasid = pasid;
+ }
amdgpu_bo_unreserve(vm->root.bo);
amdgpu_bo_unref(&root_bo);
@@ -2615,6 +2663,11 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
return 0;
error_free_root:
+ /* If PASID was partially set, erase it from XArray before failing */
+ if (vm->pasid != 0) {
+ xa_erase_irq(&adev->vm_manager.pasids, vm->pasid);
+ vm->pasid = 0;
+ }
amdgpu_vm_pt_free_root(adev, vm);
amdgpu_bo_unreserve(vm->root.bo);
amdgpu_bo_unref(&root_bo);
@@ -2658,8 +2711,8 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
/* Update VM state */
vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
AMDGPU_VM_USE_CPU_FOR_COMPUTE);
- DRM_DEBUG_DRIVER("VM update mode is %s\n",
- vm->use_cpu_for_update ? "CPU" : "SDMA");
+ dev_dbg(adev->dev, "VM update mode is %s\n",
+ vm->use_cpu_for_update ? "CPU" : "SDMA");
WARN_ONCE((vm->use_cpu_for_update &&
!amdgpu_gmc_vram_full_visible(&adev->gmc)),
"CPU update of VM recommended only for large BAR system\n");
@@ -2720,7 +2773,11 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
root = amdgpu_bo_ref(vm->root.bo);
amdgpu_bo_reserve(root, true);
- amdgpu_vm_set_pasid(adev, vm, 0);
+ /* Remove PASID mapping before destroying VM */
+ if (vm->pasid != 0) {
+ xa_erase_irq(&adev->vm_manager.pasids, vm->pasid);
+ vm->pasid = 0;
+ }
dma_fence_wait(vm->last_unlocked, false);
dma_fence_put(vm->last_unlocked);
dma_fence_wait(vm->last_tlb_flush, false);
@@ -2730,7 +2787,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
dma_fence_put(vm->last_tlb_flush);
list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
- if (mapping->flags & AMDGPU_PTE_PRT_FLAG(adev) && prt_fini_needed) {
+ if (mapping->flags & AMDGPU_VM_PAGE_PRT && prt_fini_needed) {
amdgpu_vm_prt_fini(adev, vm);
prt_fini_needed = false;
}
@@ -2761,10 +2818,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
dma_fence_put(vm->last_update);
for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) {
- if (vm->reserved_vmid[i]) {
- amdgpu_vmid_free_reserved(adev, i);
- vm->reserved_vmid[i] = false;
- }
+ amdgpu_vmid_free_reserved(adev, vm, i);
}
ttm_lru_bulk_move_fini(&adev->mman.bdev, &vm->lru_bulk_move);
@@ -2774,7 +2828,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
dev_warn(adev->dev,
"VM memory stats for proc %s(%d) task %s(%d) is non-zero when fini\n",
- ti->process_name, ti->pid, ti->task_name, ti->tgid);
+ ti->process_name, ti->task.pid, ti->task.comm, ti->tgid);
}
amdgpu_vm_put_task_info(vm->task_info);
@@ -2789,8 +2843,6 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
*/
void amdgpu_vm_manager_init(struct amdgpu_device *adev)
{
- unsigned i;
-
/* Concurrent flushes are only possible starting with Vega10 and
* are broken on Navi10 and Navi14.
*/
@@ -2799,11 +2851,6 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
adev->asic_type == CHIP_NAVI14);
amdgpu_vmid_mgr_init(adev);
- adev->vm_manager.fence_context =
- dma_fence_context_alloc(AMDGPU_MAX_RINGS);
- for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
- adev->vm_manager.seqno[i] = 0;
-
spin_lock_init(&adev->vm_manager.prt_lock);
atomic_set(&adev->vm_manager.num_prt_users, 0);
@@ -2860,6 +2907,7 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
union drm_amdgpu_vm *args = data;
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_fpriv *fpriv = filp->driver_priv;
+ struct amdgpu_vm *vm = &fpriv->vm;
/* No valid flags defined yet */
if (args->in.flags)
@@ -2868,17 +2916,9 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
switch (args->in.op) {
case AMDGPU_VM_OP_RESERVE_VMID:
/* We only have requirement to reserve vmid from gfxhub */
- if (!fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)]) {
- amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(0));
- fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)] = true;
- }
-
- break;
+ return amdgpu_vmid_alloc_reserved(adev, vm, AMDGPU_GFXHUB(0));
case AMDGPU_VM_OP_UNRESERVE_VMID:
- if (fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)]) {
- amdgpu_vmid_free_reserved(adev, AMDGPU_GFXHUB(0));
- fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)] = false;
- }
+ amdgpu_vmid_free_reserved(adev, vm, AMDGPU_GFXHUB(0));
break;
default:
return -EINVAL;
@@ -2982,7 +3022,7 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
error_unlock:
amdgpu_bo_unreserve(root);
if (r < 0)
- DRM_ERROR("Can't handle page fault (%d)\n", r);
+ dev_err(adev->dev, "Can't handle page fault (%d)\n", r);
error_unref:
amdgpu_bo_unref(&root);
@@ -3016,6 +3056,8 @@ void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m)
unsigned int total_done_objs = 0;
unsigned int id = 0;
+ amdgpu_vm_assert_locked(vm);
+
spin_lock(&vm->status_lock);
seq_puts(m, "\tIdle BOs:\n");
list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) {
@@ -3156,3 +3198,12 @@ bool amdgpu_vm_is_bo_always_valid(struct amdgpu_vm *vm, struct amdgpu_bo *bo)
{
return bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv;
}
+
+void amdgpu_vm_print_task_info(struct amdgpu_device *adev,
+ struct amdgpu_task_info *task_info)
+{
+ dev_err(adev->dev,
+ " Process %s pid %d thread %s pid %d\n",
+ task_info->process_name, task_info->tgid,
+ task_info->task.comm, task_info->task.pid);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index f3ad687125ad..15d757c016cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -236,9 +236,8 @@ struct amdgpu_vm_pte_funcs {
};
struct amdgpu_task_info {
+ struct drm_wedge_task_info task;
char process_name[TASK_COMM_LEN];
- char task_name[TASK_COMM_LEN];
- pid_t pid;
pid_t tgid;
struct kref refcount;
};
@@ -309,7 +308,7 @@ struct amdgpu_vm_update_params {
struct amdgpu_vm_update_funcs {
int (*map_table)(struct amdgpu_bo_vm *bo);
int (*prepare)(struct amdgpu_vm_update_params *p,
- struct amdgpu_sync *sync);
+ struct amdgpu_sync *sync, u64 k_job_id);
int (*update)(struct amdgpu_vm_update_params *p,
struct amdgpu_bo_vm *bo, uint64_t pe, uint64_t addr,
unsigned count, uint32_t incr, uint64_t flags);
@@ -350,12 +349,16 @@ struct amdgpu_vm {
/* Memory statistics for this vm, protected by status_lock */
struct amdgpu_mem_stats stats[__AMDGPU_PL_NUM];
+ /*
+ * The following lists contain amdgpu_vm_bo_base objects for either
+ * PDs, PTs or per VM BOs. The state transits are:
+ *
+ * evicted -> relocated (PDs, PTs) or moved (per VM BOs) -> idle
+ */
+
/* Per-VM and PT BOs who needs a validation */
struct list_head evicted;
- /* BOs for user mode queues that need a validation */
- struct list_head evicted_user;
-
/* PT BOs which relocated and their parent need an update */
struct list_head relocated;
@@ -365,15 +368,29 @@ struct amdgpu_vm {
/* All BOs of this VM not currently in the state machine */
struct list_head idle;
+ /*
+ * The following lists contain amdgpu_vm_bo_base objects for BOs which
+ * have their own dma_resv object and not depend on the root PD. Their
+ * state transits are:
+ *
+ * evicted_user or invalidated -> done
+ */
+
+ /* BOs for user mode queues that need a validation */
+ struct list_head evicted_user;
+
/* regular invalidated BOs, but not yet updated in the PT */
struct list_head invalidated;
- /* BO mappings freed, but not yet updated in the PT */
- struct list_head freed;
-
/* BOs which are invalidated, has been updated in the PTs */
struct list_head done;
+ /*
+ * This list contains amdgpu_bo_va_mapping objects which have been freed
+ * but not updated in the PTs
+ */
+ struct list_head freed;
+
/* contains the page directory */
struct amdgpu_vm_bo_base root;
struct dma_fence *last_update;
@@ -395,7 +412,7 @@ struct amdgpu_vm {
struct dma_fence *last_unlocked;
unsigned int pasid;
- bool reserved_vmid[AMDGPU_MAX_VMHUBS];
+ struct amdgpu_vmid *reserved_vmid[AMDGPU_MAX_VMHUBS];
/* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */
bool use_cpu_for_update;
@@ -436,10 +453,6 @@ struct amdgpu_vm_manager {
unsigned int first_kfd_vmid;
bool concurrent_flush;
- /* Handling of VM fences */
- u64 fence_context;
- unsigned seqno[AMDGPU_MAX_RINGS];
-
uint64_t max_pfn;
uint32_t num_level;
uint32_t block_size;
@@ -483,15 +496,14 @@ extern const struct amdgpu_vm_update_funcs amdgpu_vm_sdma_funcs;
void amdgpu_vm_manager_init(struct amdgpu_device *adev);
void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
-int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- u32 pasid);
-
long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout);
-int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp_id);
+int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp_id, uint32_t pasid);
int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec,
unsigned int num_fences);
+int amdgpu_vm_lock_done_list(struct amdgpu_vm *vm, struct drm_exec *exec,
+ unsigned int num_fences);
bool amdgpu_vm_ready(struct amdgpu_vm *vm);
uint64_t amdgpu_vm_generation(struct amdgpu_device *adev, struct amdgpu_vm *vm);
int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
@@ -539,11 +551,11 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
int amdgpu_vm_bo_map(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
uint64_t addr, uint64_t offset,
- uint64_t size, uint64_t flags);
+ uint64_t size, uint32_t flags);
int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
uint64_t addr, uint64_t offset,
- uint64_t size, uint64_t flags);
+ uint64_t size, uint32_t flags);
int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
uint64_t addr);
@@ -668,4 +680,12 @@ void amdgpu_vm_tlb_fence_create(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct dma_fence **fence);
+void amdgpu_vm_print_task_info(struct amdgpu_device *adev,
+ struct amdgpu_task_info *task_info);
+
+#define amdgpu_vm_bo_va_for_each_valid_mapping(bo_va, mapping) \
+ list_for_each_entry(mapping, &(bo_va)->valids, list)
+#define amdgpu_vm_bo_va_for_each_invalid_mapping(bo_va, mapping) \
+ list_for_each_entry(mapping, &(bo_va)->invalids, list)
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
index 0c1ef5850a5e..22e2e5b47341 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
@@ -40,12 +40,14 @@ static int amdgpu_vm_cpu_map_table(struct amdgpu_bo_vm *table)
*
* @p: see amdgpu_vm_update_params definition
* @sync: sync obj with fences to wait on
+ * @k_job_id: the id for tracing/debug purposes
*
* Returns:
* Negativ errno, 0 for success.
*/
static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p,
- struct amdgpu_sync *sync)
+ struct amdgpu_sync *sync,
+ u64 k_job_id)
{
if (!sync)
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
index 30022123b0bf..f794fb1cc06e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
@@ -26,6 +26,7 @@
#include "amdgpu.h"
#include "amdgpu_trace.h"
#include "amdgpu_vm.h"
+#include "amdgpu_job.h"
/*
* amdgpu_vm_pt_cursor - state for for_each_amdgpu_vm_pt
@@ -395,7 +396,8 @@ int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm,
params.vm = vm;
params.immediate = immediate;
- r = vm->update_funcs->prepare(&params, NULL);
+ r = vm->update_funcs->prepare(&params, NULL,
+ AMDGPU_KERNEL_JOB_ID_VM_PT_CLEAR);
if (r)
goto exit;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
index 46d9fb433ab2..36805dcfa159 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
@@ -40,7 +40,7 @@ static int amdgpu_vm_sdma_map_table(struct amdgpu_bo_vm *table)
/* Allocate a new job for @count PTE updates */
static int amdgpu_vm_sdma_alloc_job(struct amdgpu_vm_update_params *p,
- unsigned int count)
+ unsigned int count, u64 k_job_id)
{
enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE
: AMDGPU_IB_POOL_DELAYED;
@@ -56,7 +56,7 @@ static int amdgpu_vm_sdma_alloc_job(struct amdgpu_vm_update_params *p,
ndw = min(ndw, AMDGPU_VM_SDMA_MAX_NUM_DW);
r = amdgpu_job_alloc_with_ib(p->adev, entity, AMDGPU_FENCE_OWNER_VM,
- ndw * 4, pool, &p->job);
+ ndw * 4, pool, &p->job, k_job_id);
if (r)
return r;
@@ -69,16 +69,17 @@ static int amdgpu_vm_sdma_alloc_job(struct amdgpu_vm_update_params *p,
*
* @p: see amdgpu_vm_update_params definition
* @sync: amdgpu_sync object with fences to wait for
+ * @k_job_id: identifier of the job, for tracing purpose
*
* Returns:
* Negativ errno, 0 for success.
*/
static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
- struct amdgpu_sync *sync)
+ struct amdgpu_sync *sync, u64 k_job_id)
{
int r;
- r = amdgpu_vm_sdma_alloc_job(p, 0);
+ r = amdgpu_vm_sdma_alloc_job(p, 0, k_job_id);
if (r)
return r;
@@ -249,7 +250,8 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
if (r)
return r;
- r = amdgpu_vm_sdma_alloc_job(p, count);
+ r = amdgpu_vm_sdma_alloc_job(p, count,
+ AMDGPU_KERNEL_JOB_ID_VM_UPDATE);
if (r)
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c
index 51cddfa3f1e8..5d26797356a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c
@@ -71,7 +71,6 @@ static void amdgpu_tlb_fence_work(struct work_struct *work)
}
static const struct dma_fence_ops amdgpu_tlb_fence_ops = {
- .use_64bit_seqno = true,
.get_driver_name = amdgpu_tlb_fence_get_driver_name,
.get_timeline_name = amdgpu_tlb_fence_get_timeline_name
};
@@ -101,8 +100,8 @@ void amdgpu_vm_tlb_fence_create(struct amdgpu_device *adev, struct amdgpu_vm *vm
INIT_WORK(&f->work, amdgpu_tlb_fence_work);
spin_lock_init(&f->lock);
- dma_fence_init(&f->base, &amdgpu_tlb_fence_ops, &f->lock,
- vm->tlb_fence_context, atomic64_read(&vm->tlb_seq));
+ dma_fence_init64(&f->base, &amdgpu_tlb_fence_ops, &f->lock,
+ vm->tlb_fence_context, atomic64_read(&vm->tlb_seq));
/* TODO: We probably need a separate wq here */
dma_fence_get(&f->base);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
index 121ee17b522b..aa78c2ee9e21 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
@@ -322,6 +322,26 @@ static int vpe_early_init(struct amdgpu_ip_block *ip_block)
return 0;
}
+static bool vpe_need_dpm0_at_power_down(struct amdgpu_device *adev)
+{
+ switch (amdgpu_ip_version(adev, VPE_HWIP, 0)) {
+ case IP_VERSION(6, 1, 1):
+ return adev->pm.fw_version < 0x0a640500;
+ default:
+ return false;
+ }
+}
+
+static int vpe_get_dpm_level(struct amdgpu_device *adev)
+{
+ struct amdgpu_vpe *vpe = &adev->vpe;
+
+ if (!adev->pm.dpm_enabled)
+ return 0;
+
+ return RREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_request_lv));
+}
+
static void vpe_idle_work_handler(struct work_struct *work)
{
struct amdgpu_device *adev =
@@ -329,11 +349,17 @@ static void vpe_idle_work_handler(struct work_struct *work)
unsigned int fences = 0;
fences += amdgpu_fence_count_emitted(&adev->vpe.ring);
+ if (fences)
+ goto reschedule;
+
+ if (vpe_need_dpm0_at_power_down(adev) && vpe_get_dpm_level(adev) != 0)
+ goto reschedule;
+
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE, AMD_PG_STATE_GATE);
+ return;
- if (fences == 0)
- amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE, AMD_PG_STATE_GATE);
- else
- schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT);
+reschedule:
+ schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT);
}
static int vpe_common_init(struct amdgpu_vpe *vpe)
@@ -379,9 +405,10 @@ static int vpe_sw_init(struct amdgpu_ip_block *ip_block)
if (ret)
goto out;
- /* TODO: Add queue reset mask when FW fully supports it */
adev->vpe.supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->vpe.ring);
+ if (!amdgpu_sriov_vf(adev))
+ adev->vpe.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
ret = amdgpu_vpe_sysfs_reset_mask_init(adev);
if (ret)
goto out;
@@ -435,6 +462,8 @@ static int vpe_hw_fini(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_vpe *vpe = &adev->vpe;
+ cancel_delayed_work_sync(&adev->vpe.idle_work);
+
vpe_ring_stop(vpe);
/* Power off VPE */
@@ -445,10 +474,6 @@ static int vpe_hw_fini(struct amdgpu_ip_block *ip_block)
static int vpe_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = ip_block->adev;
-
- cancel_delayed_work_sync(&adev->vpe.idle_work);
-
return vpe_hw_fini(ip_block);
}
@@ -874,6 +899,27 @@ static void vpe_ring_end_use(struct amdgpu_ring *ring)
schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT);
}
+static int vpe_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
+{
+ struct amdgpu_device *adev = ring->adev;
+ int r;
+
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+
+ r = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE,
+ AMD_PG_STATE_GATE);
+ if (r)
+ return r;
+ r = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE,
+ AMD_PG_STATE_UNGATE);
+ if (r)
+ return r;
+
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
+}
+
static ssize_t amdgpu_get_vpe_reset_mask(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -942,6 +988,7 @@ static const struct amdgpu_ring_funcs vpe_ring_funcs = {
.preempt_ib = vpe_ring_preempt_ib,
.begin_use = vpe_ring_begin_use,
.end_use = vpe_ring_end_use,
+ .reset = vpe_ring_reset,
};
static void vpe_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index abdc52b0895a..9d934c07fa6b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -234,6 +234,9 @@ static umode_t amdgpu_vram_attrs_is_visible(struct kobject *kobj,
!adev->gmc.vram_vendor)
return 0;
+ if (!ttm_resource_manager_used(&adev->mman.vram_mgr.manager))
+ return 0;
+
return attr->mode;
}
@@ -396,43 +399,33 @@ out:
return ret;
}
-static void amdgpu_dummy_vram_mgr_debug(struct ttm_resource_manager *man,
- struct drm_printer *printer)
+int amdgpu_vram_mgr_query_address_block_info(struct amdgpu_vram_mgr *mgr,
+ uint64_t address, struct amdgpu_vram_block_info *info)
{
- DRM_DEBUG_DRIVER("Dummy vram mgr debug\n");
-}
-
-static bool amdgpu_dummy_vram_mgr_compatible(struct ttm_resource_manager *man,
- struct ttm_resource *res,
- const struct ttm_place *place,
- size_t size)
-{
- DRM_DEBUG_DRIVER("Dummy vram mgr compatible\n");
- return false;
-}
+ struct amdgpu_vram_mgr_resource *vres;
+ struct drm_buddy_block *block;
+ u64 start, size;
+ int ret = -ENOENT;
-static bool amdgpu_dummy_vram_mgr_intersects(struct ttm_resource_manager *man,
- struct ttm_resource *res,
- const struct ttm_place *place,
- size_t size)
-{
- DRM_DEBUG_DRIVER("Dummy vram mgr intersects\n");
- return true;
-}
+ mutex_lock(&mgr->lock);
+ list_for_each_entry(vres, &mgr->allocated_vres_list, vres_node) {
+ list_for_each_entry(block, &vres->blocks, link) {
+ start = amdgpu_vram_mgr_block_start(block);
+ size = amdgpu_vram_mgr_block_size(block);
+ if ((start <= address) && (address < (start + size))) {
+ info->start = start;
+ info->size = size;
+ memcpy(&info->task, &vres->task, sizeof(vres->task));
+ ret = 0;
+ goto out;
+ }
+ }
+ }
-static void amdgpu_dummy_vram_mgr_del(struct ttm_resource_manager *man,
- struct ttm_resource *res)
-{
- DRM_DEBUG_DRIVER("Dummy vram mgr deleted\n");
-}
+out:
+ mutex_unlock(&mgr->lock);
-static int amdgpu_dummy_vram_mgr_new(struct ttm_resource_manager *man,
- struct ttm_buffer_object *tbo,
- const struct ttm_place *place,
- struct ttm_resource **res)
-{
- DRM_DEBUG_DRIVER("Dummy vram mgr new\n");
- return -ENOSPC;
+ return ret;
}
/**
@@ -568,6 +561,10 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
remaining_size -= size;
}
+ vres->task.pid = task_pid_nr(current);
+ get_task_comm(vres->task.comm, current);
+ list_add_tail(&vres->vres_node, &mgr->allocated_vres_list);
+
if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS && adjust_dcc_size) {
struct drm_buddy_block *dcc_block;
unsigned long dcc_start;
@@ -645,12 +642,15 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
uint64_t vis_usage = 0;
mutex_lock(&mgr->lock);
+
+ list_del(&vres->vres_node);
+ memset(&vres->task, 0, sizeof(vres->task));
+
list_for_each_entry(block, &vres->blocks, link)
vis_usage += amdgpu_vram_mgr_vis_size(adev, block);
- amdgpu_vram_mgr_do_reserve(man);
-
drm_buddy_free_list(mm, &vres->blocks, vres->flags);
+ amdgpu_vram_mgr_do_reserve(man);
mutex_unlock(&mgr->lock);
atomic64_sub(vis_usage, &mgr->vis_usage);
@@ -783,6 +783,23 @@ uint64_t amdgpu_vram_mgr_vis_usage(struct amdgpu_vram_mgr *mgr)
}
/**
+ * amdgpu_vram_mgr_clear_reset_blocks - reset clear blocks
+ *
+ * @adev: amdgpu device pointer
+ *
+ * Reset the cleared drm buddy blocks.
+ */
+void amdgpu_vram_mgr_clear_reset_blocks(struct amdgpu_device *adev)
+{
+ struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
+ struct drm_buddy *mm = &mgr->mm;
+
+ mutex_lock(&mgr->lock);
+ drm_buddy_reset_clear(mm, false);
+ mutex_unlock(&mgr->lock);
+}
+
+/**
* amdgpu_vram_mgr_intersects - test each drm buddy block for intersection
*
* @man: TTM memory type manager
@@ -879,14 +896,6 @@ static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man,
mutex_unlock(&mgr->lock);
}
-static const struct ttm_resource_manager_func amdgpu_dummy_vram_mgr_func = {
- .alloc = amdgpu_dummy_vram_mgr_new,
- .free = amdgpu_dummy_vram_mgr_del,
- .intersects = amdgpu_dummy_vram_mgr_intersects,
- .compatible = amdgpu_dummy_vram_mgr_compatible,
- .debug = amdgpu_dummy_vram_mgr_debug
-};
-
static const struct ttm_resource_manager_func amdgpu_vram_mgr_func = {
.alloc = amdgpu_vram_mgr_new,
.free = amdgpu_vram_mgr_del,
@@ -917,18 +926,13 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev)
mutex_init(&mgr->lock);
INIT_LIST_HEAD(&mgr->reservations_pending);
INIT_LIST_HEAD(&mgr->reserved_pages);
+ INIT_LIST_HEAD(&mgr->allocated_vres_list);
mgr->default_page_size = PAGE_SIZE;
- if (!adev->gmc.is_app_apu) {
- man->func = &amdgpu_vram_mgr_func;
-
- err = drm_buddy_init(&mgr->mm, man->size, PAGE_SIZE);
- if (err)
- return err;
- } else {
- man->func = &amdgpu_dummy_vram_mgr_func;
- DRM_INFO("Setup dummy vram mgr\n");
- }
+ man->func = &amdgpu_vram_mgr_func;
+ err = drm_buddy_init(&mgr->mm, man->size, PAGE_SIZE);
+ if (err)
+ return err;
ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, &mgr->manager);
ttm_resource_manager_set_used(man, true);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h
index b256cbc2bc27..5f5fd9a911c2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h
@@ -35,12 +35,26 @@ struct amdgpu_vram_mgr {
struct list_head reserved_pages;
atomic64_t vis_usage;
u64 default_page_size;
+ struct list_head allocated_vres_list;
+};
+
+struct amdgpu_vres_task {
+ pid_t pid;
+ char comm[TASK_COMM_LEN];
+};
+
+struct amdgpu_vram_block_info {
+ u64 start;
+ u64 size;
+ struct amdgpu_vres_task task;
};
struct amdgpu_vram_mgr_resource {
struct ttm_resource base;
struct list_head blocks;
unsigned long flags;
+ struct list_head vres_node;
+ struct amdgpu_vres_task task;
};
static inline u64 amdgpu_vram_mgr_block_start(struct drm_buddy_block *block)
@@ -66,7 +80,13 @@ to_amdgpu_vram_mgr_resource(struct ttm_resource *res)
static inline void amdgpu_vram_mgr_set_cleared(struct ttm_resource *res)
{
- to_amdgpu_vram_mgr_resource(res)->flags |= DRM_BUDDY_CLEARED;
+ struct amdgpu_vram_mgr_resource *ares = to_amdgpu_vram_mgr_resource(res);
+
+ WARN_ON(ares->flags & DRM_BUDDY_CLEARED);
+ ares->flags |= DRM_BUDDY_CLEARED;
}
+int amdgpu_vram_mgr_query_address_block_info(struct amdgpu_vram_mgr *mgr,
+ uint64_t address, struct amdgpu_vram_block_info *info);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
index 322816805bfb..1083db8cea2e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
@@ -120,6 +120,25 @@ static void __amdgpu_xcp_add_block(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
xcp->valid = true;
}
+static void __amdgpu_xcp_set_unique_id(struct amdgpu_xcp_mgr *xcp_mgr,
+ int xcp_id)
+{
+ struct amdgpu_xcp *xcp = &xcp_mgr->xcp[xcp_id];
+ struct amdgpu_device *adev = xcp_mgr->adev;
+ uint32_t inst_mask;
+ uint64_t uid;
+ int i;
+
+ if (!amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &inst_mask) &&
+ inst_mask) {
+ i = GET_INST(GC, (ffs(inst_mask) - 1));
+ uid = amdgpu_device_get_uid(xcp_mgr->adev->uid_info,
+ AMDGPU_UID_TYPE_XCD, i);
+ if (uid)
+ xcp->unique_id = uid;
+ }
+}
+
int amdgpu_xcp_init(struct amdgpu_xcp_mgr *xcp_mgr, int num_xcps, int mode)
{
struct amdgpu_device *adev = xcp_mgr->adev;
@@ -158,6 +177,7 @@ int amdgpu_xcp_init(struct amdgpu_xcp_mgr *xcp_mgr, int num_xcps, int mode)
else
xcp_mgr->xcp[i].mem_id = mem_id;
}
+ __amdgpu_xcp_set_unique_id(xcp_mgr, i);
}
xcp_mgr->num_xcps = num_xcps;
@@ -218,15 +238,27 @@ int amdgpu_xcp_restore_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr)
return __amdgpu_xcp_switch_partition_mode(xcp_mgr, xcp_mgr->mode);
}
-int amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
+static bool __amdgpu_xcp_is_cached_mode_valid(struct amdgpu_xcp_mgr *xcp_mgr)
{
- int mode;
+ if (!xcp_mgr->funcs || !xcp_mgr->funcs->query_partition_mode)
+ return true;
if (!amdgpu_sriov_vf(xcp_mgr->adev) &&
xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
- return xcp_mgr->mode;
+ return true;
- if (!xcp_mgr->funcs || !xcp_mgr->funcs->query_partition_mode)
+ if (xcp_mgr->mode != AMDGPU_XCP_MODE_NONE &&
+ xcp_mgr->mode != AMDGPU_XCP_MODE_TRANS)
+ return true;
+
+ return false;
+}
+
+int amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
+{
+ int mode;
+
+ if (__amdgpu_xcp_is_cached_mode_valid(xcp_mgr))
return xcp_mgr->mode;
if (!(flags & AMDGPU_XCP_FL_LOCKED))
@@ -394,6 +426,7 @@ void amdgpu_xcp_dev_unplug(struct amdgpu_device *adev)
p_ddev->primary->dev = adev->xcp_mgr->xcp[i].pdev;
p_ddev->driver = adev->xcp_mgr->xcp[i].driver;
p_ddev->vma_offset_manager = adev->xcp_mgr->xcp[i].vma_offset_manager;
+ amdgpu_xcp_drm_dev_free(p_ddev);
}
}
@@ -445,6 +478,222 @@ void amdgpu_xcp_release_sched(struct amdgpu_device *adev,
}
}
+int amdgpu_xcp_select_scheds(struct amdgpu_device *adev,
+ u32 hw_ip, u32 hw_prio,
+ struct amdgpu_fpriv *fpriv,
+ unsigned int *num_scheds,
+ struct drm_gpu_scheduler ***scheds)
+{
+ u32 sel_xcp_id;
+ int i;
+ struct amdgpu_xcp_mgr *xcp_mgr = adev->xcp_mgr;
+
+ if (fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION) {
+ u32 least_ref_cnt = ~0;
+
+ fpriv->xcp_id = 0;
+ for (i = 0; i < xcp_mgr->num_xcps; i++) {
+ u32 total_ref_cnt;
+
+ total_ref_cnt = atomic_read(&xcp_mgr->xcp[i].ref_cnt);
+ if (total_ref_cnt < least_ref_cnt) {
+ fpriv->xcp_id = i;
+ least_ref_cnt = total_ref_cnt;
+ }
+ }
+ }
+ sel_xcp_id = fpriv->xcp_id;
+
+ if (xcp_mgr->xcp[sel_xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds) {
+ *num_scheds =
+ xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds;
+ *scheds =
+ xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].sched;
+ atomic_inc(&adev->xcp_mgr->xcp[sel_xcp_id].ref_cnt);
+ dev_dbg(adev->dev, "Selected partition #%d", sel_xcp_id);
+ } else {
+ dev_err(adev->dev, "Failed to schedule partition #%d.", sel_xcp_id);
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
+static void amdgpu_set_xcp_id(struct amdgpu_device *adev,
+ uint32_t inst_idx,
+ struct amdgpu_ring *ring)
+{
+ int xcp_id;
+ enum AMDGPU_XCP_IP_BLOCK ip_blk;
+ uint32_t inst_mask;
+
+ ring->xcp_id = AMDGPU_XCP_NO_PARTITION;
+ if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
+ adev->gfx.enforce_isolation[0].xcp_id = ring->xcp_id;
+ if ((adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE) ||
+ (ring->funcs->type == AMDGPU_RING_TYPE_CPER))
+ return;
+
+ inst_mask = 1 << inst_idx;
+
+ switch (ring->funcs->type) {
+ case AMDGPU_HW_IP_GFX:
+ case AMDGPU_RING_TYPE_COMPUTE:
+ case AMDGPU_RING_TYPE_KIQ:
+ ip_blk = AMDGPU_XCP_GFX;
+ break;
+ case AMDGPU_RING_TYPE_SDMA:
+ ip_blk = AMDGPU_XCP_SDMA;
+ break;
+ case AMDGPU_RING_TYPE_VCN_ENC:
+ case AMDGPU_RING_TYPE_VCN_JPEG:
+ ip_blk = AMDGPU_XCP_VCN;
+ break;
+ default:
+ dev_err(adev->dev, "Not support ring type %d!", ring->funcs->type);
+ return;
+ }
+
+ for (xcp_id = 0; xcp_id < adev->xcp_mgr->num_xcps; xcp_id++) {
+ if (adev->xcp_mgr->xcp[xcp_id].ip[ip_blk].inst_mask & inst_mask) {
+ ring->xcp_id = xcp_id;
+ dev_dbg(adev->dev, "ring:%s xcp_id :%u", ring->name,
+ ring->xcp_id);
+ if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
+ adev->gfx.enforce_isolation[xcp_id].xcp_id = xcp_id;
+ break;
+ }
+ }
+}
+
+static void amdgpu_xcp_gpu_sched_update(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring,
+ unsigned int sel_xcp_id)
+{
+ unsigned int *num_gpu_sched;
+
+ num_gpu_sched = &adev->xcp_mgr->xcp[sel_xcp_id]
+ .gpu_sched[ring->funcs->type][ring->hw_prio].num_scheds;
+ adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[ring->funcs->type][ring->hw_prio]
+ .sched[(*num_gpu_sched)++] = &ring->sched;
+ dev_dbg(adev->dev, "%s :[%d] gpu_sched[%d][%d] = %d",
+ ring->name, sel_xcp_id, ring->funcs->type,
+ ring->hw_prio, *num_gpu_sched);
+}
+
+static int amdgpu_xcp_sched_list_update(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring;
+ int i;
+
+ for (i = 0; i < MAX_XCP; i++) {
+ atomic_set(&adev->xcp_mgr->xcp[i].ref_cnt, 0);
+ memset(adev->xcp_mgr->xcp[i].gpu_sched, 0, sizeof(adev->xcp_mgr->xcp->gpu_sched));
+ }
+
+ if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
+ return 0;
+
+ for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
+ ring = adev->rings[i];
+ if (!ring || !ring->sched.ready || ring->no_scheduler)
+ continue;
+
+ amdgpu_xcp_gpu_sched_update(adev, ring, ring->xcp_id);
+
+ /* VCN may be shared by two partitions under CPX MODE in certain
+ * configs.
+ */
+ if ((ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC ||
+ ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) &&
+ (adev->xcp_mgr->num_xcps > adev->vcn.num_vcn_inst))
+ amdgpu_xcp_gpu_sched_update(adev, ring, ring->xcp_id + 1);
+ }
+
+ return 0;
+}
+
+int amdgpu_xcp_update_partition_sched_list(struct amdgpu_device *adev)
+{
+ int i;
+
+ for (i = 0; i < adev->num_rings; i++) {
+ struct amdgpu_ring *ring = adev->rings[i];
+
+ if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ||
+ ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
+ amdgpu_set_xcp_id(adev, ring->xcc_id, ring);
+ else
+ amdgpu_set_xcp_id(adev, ring->me, ring);
+ }
+
+ return amdgpu_xcp_sched_list_update(adev);
+}
+
+void amdgpu_xcp_update_supported_modes(struct amdgpu_xcp_mgr *xcp_mgr)
+{
+ struct amdgpu_device *adev = xcp_mgr->adev;
+
+ xcp_mgr->supp_xcp_modes = 0;
+
+ switch (NUM_XCC(adev->gfx.xcc_mask)) {
+ case 8:
+ xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
+ BIT(AMDGPU_DPX_PARTITION_MODE) |
+ BIT(AMDGPU_QPX_PARTITION_MODE) |
+ BIT(AMDGPU_CPX_PARTITION_MODE);
+ break;
+ case 6:
+ xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
+ BIT(AMDGPU_TPX_PARTITION_MODE) |
+ BIT(AMDGPU_CPX_PARTITION_MODE);
+ break;
+ case 4:
+ xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
+ BIT(AMDGPU_DPX_PARTITION_MODE) |
+ BIT(AMDGPU_CPX_PARTITION_MODE);
+ break;
+ case 2:
+ xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
+ BIT(AMDGPU_CPX_PARTITION_MODE);
+ break;
+ case 1:
+ xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
+ BIT(AMDGPU_CPX_PARTITION_MODE);
+ break;
+
+ default:
+ break;
+ }
+}
+
+int amdgpu_xcp_pre_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
+{
+ /* TODO:
+ * Stop user queues and threads, and make sure GPU is empty of work.
+ */
+
+ if (flags & AMDGPU_XCP_OPS_KFD)
+ amdgpu_amdkfd_device_fini_sw(xcp_mgr->adev);
+
+ return 0;
+}
+
+int amdgpu_xcp_post_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
+{
+ int ret = 0;
+
+ if (flags & AMDGPU_XCP_OPS_KFD) {
+ amdgpu_amdkfd_device_probe(xcp_mgr->adev);
+ amdgpu_amdkfd_device_init(xcp_mgr->adev);
+ /* If KFD init failed, return failure */
+ if (!xcp_mgr->adev->kfd.init_complete)
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
/*====================== xcp sysfs - configuration ======================*/
#define XCP_CFG_SYSFS_RES_ATTR_SHOW(_name) \
static ssize_t amdgpu_xcp_res_sysfs_##_name##_show( \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h
index 454b33f889fb..1928d9e224fc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h
@@ -39,6 +39,8 @@
#define AMDGPU_XCP_NO_PARTITION (~0)
+#define AMDGPU_XCP_OPS_KFD (1 << 0)
+
struct amdgpu_fpriv;
enum AMDGPU_XCP_IP_BLOCK {
@@ -110,6 +112,7 @@ struct amdgpu_xcp {
struct amdgpu_sched gpu_sched[AMDGPU_HW_IP_NUM][AMDGPU_RING_PRIO_MAX];
struct amdgpu_xcp_mgr *xcp_mgr;
struct kobject kobj;
+ uint64_t unique_id;
};
struct amdgpu_xcp_mgr {
@@ -144,10 +147,6 @@ struct amdgpu_xcp_mgr_funcs {
int (*suspend)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id);
int (*prepare_resume)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id);
int (*resume)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id);
- int (*select_scheds)(struct amdgpu_device *adev,
- u32 hw_ip, u32 hw_prio, struct amdgpu_fpriv *fpriv,
- unsigned int *num_scheds, struct drm_gpu_scheduler ***scheds);
- int (*update_partition_sched_list)(struct amdgpu_device *adev);
};
int amdgpu_xcp_prepare_suspend(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id);
@@ -176,19 +175,18 @@ int amdgpu_xcp_open_device(struct amdgpu_device *adev,
struct drm_file *file_priv);
void amdgpu_xcp_release_sched(struct amdgpu_device *adev,
struct amdgpu_ctx_entity *entity);
-
+int amdgpu_xcp_select_scheds(struct amdgpu_device *adev,
+ u32 hw_ip, u32 hw_prio,
+ struct amdgpu_fpriv *fpriv,
+ unsigned int *num_scheds,
+ struct drm_gpu_scheduler ***scheds);
+void amdgpu_xcp_update_supported_modes(struct amdgpu_xcp_mgr *xcp_mgr);
+int amdgpu_xcp_update_partition_sched_list(struct amdgpu_device *adev);
+int amdgpu_xcp_pre_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags);
+int amdgpu_xcp_post_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags);
void amdgpu_xcp_sysfs_init(struct amdgpu_device *adev);
void amdgpu_xcp_sysfs_fini(struct amdgpu_device *adev);
-#define amdgpu_xcp_select_scheds(adev, e, c, d, x, y) \
- ((adev)->xcp_mgr && (adev)->xcp_mgr->funcs && \
- (adev)->xcp_mgr->funcs->select_scheds ? \
- (adev)->xcp_mgr->funcs->select_scheds((adev), (e), (c), (d), (x), (y)) : -ENOENT)
-#define amdgpu_xcp_update_partition_sched_list(adev) \
- ((adev)->xcp_mgr && (adev)->xcp_mgr->funcs && \
- (adev)->xcp_mgr->funcs->update_partition_sched_list ? \
- (adev)->xcp_mgr->funcs->update_partition_sched_list(adev) : 0)
-
static inline int amdgpu_xcp_get_num_xcp(struct amdgpu_xcp_mgr *xcp_mgr)
{
if (!xcp_mgr)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index d9ad37711c3e..aad530c46a9f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -298,6 +298,9 @@ int amdgpu_xgmi_get_ext_link(struct amdgpu_device *adev, int link_num)
{
int link_map_6_4_x[8] = { 0, 3, 1, 2, 7, 6, 4, 5 };
+ if (adev->gmc.xgmi.num_physical_nodes <= 1)
+ return -EINVAL;
+
switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) {
case IP_VERSION(6, 4, 0):
case IP_VERSION(6, 4, 1):
@@ -333,6 +336,10 @@ static u32 xgmi_v6_4_get_link_status(struct amdgpu_device *adev, int global_link
}
i = global_link_num / n;
+
+ if (!(adev->aid_mask & BIT(i)))
+ return U32_MAX;
+
addr += adev->asic_funcs->encode_ext_smn_addressing(i);
return RREG32_PCIE_EXT(addr);
@@ -342,6 +349,9 @@ int amdgpu_get_xgmi_link_status(struct amdgpu_device *adev, int global_link_num)
{
u32 xgmi_state_reg_val;
+ if (adev->gmc.xgmi.num_physical_nodes <= 1)
+ return -EINVAL;
+
switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) {
case IP_VERSION(6, 4, 0):
case IP_VERSION(6, 4, 1):
@@ -958,28 +968,6 @@ static int amdgpu_xgmi_initialize_hive_get_data_partition(struct amdgpu_hive_inf
return 0;
}
-static void amdgpu_xgmi_fill_topology_info(struct amdgpu_device *adev,
- struct amdgpu_device *peer_adev)
-{
- struct psp_xgmi_topology_info *top_info = &adev->psp.xgmi_context.top_info;
- struct psp_xgmi_topology_info *peer_info = &peer_adev->psp.xgmi_context.top_info;
-
- for (int i = 0; i < peer_info->num_nodes; i++) {
- if (peer_info->nodes[i].node_id == adev->gmc.xgmi.node_id) {
- for (int j = 0; j < top_info->num_nodes; j++) {
- if (top_info->nodes[j].node_id == peer_adev->gmc.xgmi.node_id) {
- peer_info->nodes[i].num_hops = top_info->nodes[j].num_hops;
- peer_info->nodes[i].is_sharing_enabled =
- top_info->nodes[j].is_sharing_enabled;
- peer_info->nodes[i].num_links =
- top_info->nodes[j].num_links;
- return;
- }
- }
- }
- }
-}
-
int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
{
struct psp_xgmi_topology_info *top_info;
@@ -1065,11 +1053,6 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
/* To do: continue with some node failed or disable the whole hive*/
goto exit_unlock;
}
-
- /* fill the topology info for peers instead of getting from PSP */
- list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
- amdgpu_xgmi_fill_topology_info(adev, tmp_adev);
- }
} else {
/* get latest topology info for each device from psp */
list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
@@ -1771,16 +1754,25 @@ void amdgpu_xgmi_early_init(struct amdgpu_device *adev)
case IP_VERSION(9, 4, 0):
case IP_VERSION(9, 4, 1):
case IP_VERSION(9, 4, 2):
- adev->gmc.xgmi.max_speed = XGMI_SPEED_25GT;
+ /* 25 GT/s */
+ adev->gmc.xgmi.max_speed = 25;
adev->gmc.xgmi.max_width = 16;
break;
case IP_VERSION(9, 4, 3):
case IP_VERSION(9, 4, 4):
case IP_VERSION(9, 5, 0):
- adev->gmc.xgmi.max_speed = XGMI_SPEED_32GT;
+ /* 32 GT/s */
+ adev->gmc.xgmi.max_speed = 32;
adev->gmc.xgmi.max_width = 16;
break;
default:
break;
}
}
+
+void amgpu_xgmi_set_max_speed_width(struct amdgpu_device *adev,
+ uint16_t max_speed, uint8_t max_width)
+{
+ adev->gmc.xgmi.max_speed = max_speed;
+ adev->gmc.xgmi.max_width = max_width;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
index f994be985f42..5f36aff17e79 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
@@ -25,12 +25,6 @@
#include <drm/task_barrier.h>
#include "amdgpu_ras.h"
-enum amdgpu_xgmi_link_speed {
- XGMI_SPEED_16GT = 16,
- XGMI_SPEED_25GT = 25,
- XGMI_SPEED_32GT = 32
-};
-
struct amdgpu_hive_info {
struct kobject kobj;
uint64_t hive_id;
@@ -97,7 +91,7 @@ struct amdgpu_xgmi {
struct ras_common_if *ras_if;
bool connected_to_cpu;
struct amdgpu_xgmi_ras *ras;
- enum amdgpu_xgmi_link_speed max_speed;
+ uint16_t max_speed;
uint8_t max_width;
};
@@ -130,4 +124,10 @@ int amdgpu_xgmi_get_ext_link(struct amdgpu_device *adev, int link_num);
void amdgpu_xgmi_early_init(struct amdgpu_device *adev);
uint32_t amdgpu_xgmi_get_max_bandwidth(struct amdgpu_device *adev);
+void amgpu_xgmi_set_max_speed_width(struct amdgpu_device *adev,
+ uint16_t max_speed, uint8_t max_width);
+
+/* Cleanup macro for use with __free(xgmi_put_hive) */
+DEFINE_FREE(xgmi_put_hive, struct amdgpu_hive_info *, if (_T) amdgpu_put_xgmi_hive(_T))
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
index 92ca13097aaa..3cdb1e0eca37 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
@@ -23,26 +23,84 @@
#ifndef AMDGV_SRIOV_MSG__H_
#define AMDGV_SRIOV_MSG__H_
-/* unit in kilobytes */
-#define AMD_SRIOV_MSG_VBIOS_OFFSET 0
-#define AMD_SRIOV_MSG_VBIOS_SIZE_KB 64
-#define AMD_SRIOV_MSG_DATAEXCHANGE_OFFSET_KB AMD_SRIOV_MSG_VBIOS_SIZE_KB
-#define AMD_SRIOV_MSG_DATAEXCHANGE_SIZE_KB 4
-#define AMD_SRIOV_MSG_TMR_OFFSET_KB 2048
-#define AMD_SRIOV_MSG_BAD_PAGE_SIZE_KB 2
-#define AMD_SRIOV_RAS_TELEMETRY_SIZE_KB 64
+#define AMD_SRIOV_MSG_SIZE_KB 1
+
/*
- * layout
+ * layout v1
* 0 64KB 65KB 66KB 68KB 132KB
* | VBIOS | PF2VF | VF2PF | Bad Page | RAS Telemetry Region | ...
* | 64KB | 1KB | 1KB | 2KB | 64KB | ...
*/
-#define AMD_SRIOV_MSG_SIZE_KB 1
-#define AMD_SRIOV_MSG_PF2VF_OFFSET_KB AMD_SRIOV_MSG_DATAEXCHANGE_OFFSET_KB
-#define AMD_SRIOV_MSG_VF2PF_OFFSET_KB (AMD_SRIOV_MSG_PF2VF_OFFSET_KB + AMD_SRIOV_MSG_SIZE_KB)
-#define AMD_SRIOV_MSG_BAD_PAGE_OFFSET_KB (AMD_SRIOV_MSG_VF2PF_OFFSET_KB + AMD_SRIOV_MSG_SIZE_KB)
-#define AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB (AMD_SRIOV_MSG_BAD_PAGE_OFFSET_KB + AMD_SRIOV_MSG_BAD_PAGE_SIZE_KB)
+/*
+ * layout v2 (offsets are dynamically allocated and the offsets below are examples)
+ * 0 1KB 64KB 65KB 66KB 68KB 132KB
+ * | INITD_H | VBIOS | PF2VF | VF2PF | Bad Page | RAS Telemetry Region | ...
+ * | 1KB | 64KB | 1KB | 1KB | 2KB | 64KB | ...
+ *
+ * Note: PF2VF + VF2PF + Bad Page = DataExchange region (allocated contiguously)
+ */
+
+/* v1 layout sizes */
+#define AMD_SRIOV_MSG_VBIOS_SIZE_KB_V1 64
+#define AMD_SRIOV_MSG_PF2VF_SIZE_KB_V1 1
+#define AMD_SRIOV_MSG_VF2PF_SIZE_KB_V1 1
+#define AMD_SRIOV_MSG_BAD_PAGE_SIZE_KB_V1 2
+#define AMD_SRIOV_MSG_RAS_TELEMETRY_SIZE_KB_V1 64
+#define AMD_SRIOV_MSG_DATAEXCHANGE_SIZE_KB_V1 \
+ (AMD_SRIOV_MSG_PF2VF_SIZE_KB_V1 + AMD_SRIOV_MSG_VF2PF_SIZE_KB_V1 + \
+ AMD_SRIOV_MSG_BAD_PAGE_SIZE_KB_V1)
+
+/* v1 offsets */
+#define AMD_SRIOV_MSG_VBIOS_OFFSET_V1 0
+#define AMD_SRIOV_MSG_DATAEXCHANGE_OFFSET_KB_V1 AMD_SRIOV_MSG_VBIOS_SIZE_KB_V1
+#define AMD_SRIOV_MSG_TMR_OFFSET_KB 2048
+#define AMD_SRIOV_MSG_PF2VF_OFFSET_KB_V1 AMD_SRIOV_MSG_DATAEXCHANGE_OFFSET_KB_V1
+#define AMD_SRIOV_MSG_VF2PF_OFFSET_KB_V1 \
+ (AMD_SRIOV_MSG_PF2VF_OFFSET_KB_V1 + AMD_SRIOV_MSG_SIZE_KB)
+#define AMD_SRIOV_MSG_BAD_PAGE_OFFSET_KB_V1 \
+ (AMD_SRIOV_MSG_VF2PF_OFFSET_KB_V1 + AMD_SRIOV_MSG_SIZE_KB)
+#define AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB_V1 \
+ (AMD_SRIOV_MSG_BAD_PAGE_OFFSET_KB_V1 + AMD_SRIOV_MSG_BAD_PAGE_SIZE_KB_V1)
+#define AMD_SRIOV_MSG_INIT_DATA_TOT_SIZE_KB_V1 \
+ (AMD_SRIOV_MSG_VBIOS_SIZE_KB_V1 + AMD_SRIOV_MSG_DATAEXCHANGE_SIZE_KB_V1 + \
+ AMD_SRIOV_MSG_RAS_TELEMETRY_SIZE_KB_V1)
+
+enum amd_sriov_crit_region_version {
+ GPU_CRIT_REGION_V1 = 1,
+ GPU_CRIT_REGION_V2 = 2,
+};
+
+/* v2 layout offset enum (in order of allocation) */
+enum amd_sriov_msg_table_id_enum {
+ AMD_SRIOV_MSG_IPD_TABLE_ID = 0,
+ AMD_SRIOV_MSG_VBIOS_IMG_TABLE_ID,
+ AMD_SRIOV_MSG_RAS_TELEMETRY_TABLE_ID,
+ AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID,
+ AMD_SRIOV_MSG_BAD_PAGE_INFO_TABLE_ID,
+ AMD_SRIOV_MSG_INITD_H_TABLE_ID,
+ AMD_SRIOV_MSG_MAX_TABLE_ID,
+};
+
+struct amd_sriov_msg_init_data_header {
+ char signature[4]; /* "INDA" */
+ uint32_t version;
+ uint32_t checksum;
+ uint32_t initdata_offset; /* 0 */
+ uint32_t initdata_size_in_kb; /* 5MB */
+ uint32_t valid_tables;
+ uint32_t vbios_img_offset;
+ uint32_t vbios_img_size_in_kb;
+ uint32_t dataexchange_offset;
+ uint32_t dataexchange_size_in_kb;
+ uint32_t ras_tele_info_offset;
+ uint32_t ras_tele_info_size_in_kb;
+ uint32_t ip_discovery_offset;
+ uint32_t ip_discovery_size_in_kb;
+ uint32_t bad_page_info_offset;
+ uint32_t bad_page_size_in_kb;
+ uint32_t reserved[8];
+};
/*
* PF2VF history log:
@@ -102,7 +160,8 @@ union amd_sriov_msg_feature_flags {
uint32_t ras_caps : 1;
uint32_t ras_telemetry : 1;
uint32_t ras_cper : 1;
- uint32_t reserved : 20;
+ uint32_t xgmi_ta_ext_peer_link : 1;
+ uint32_t reserved : 19;
} flags;
uint32_t all;
};
@@ -113,7 +172,8 @@ union amd_sriov_reg_access_flags {
uint32_t vf_reg_access_mmhub : 1;
uint32_t vf_reg_access_gc : 1;
uint32_t vf_reg_access_l1_tlb_cntl : 1;
- uint32_t reserved : 28;
+ uint32_t vf_reg_access_sq_config : 1;
+ uint32_t reserved : 27;
} flags;
uint32_t all;
};
@@ -139,8 +199,9 @@ union amd_sriov_ras_caps {
uint64_t block_jpeg : 1;
uint64_t block_ih : 1;
uint64_t block_mpio : 1;
+ uint64_t block_mmsch : 1;
uint64_t poison_propogation_mode : 1;
- uint64_t reserved : 44;
+ uint64_t reserved : 43;
} bits;
uint64_t all;
};
@@ -404,12 +465,17 @@ struct amd_sriov_ras_cper_dump {
uint32_t buf[];
};
+struct amd_sriov_ras_chk_criti {
+ uint32_t hit;
+};
+
struct amdsriov_ras_telemetry {
struct amd_sriov_ras_telemetry_header header;
union {
struct amd_sriov_ras_telemetry_error_count error_count;
struct amd_sriov_ras_cper_dump cper_dump;
+ struct amd_sriov_ras_chk_criti chk_criti;
} body;
};
diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
index 1c083304ae77..f9e2edf5260b 100644
--- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
+++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
@@ -29,12 +29,11 @@
#include "gfx_v9_4_3.h"
#include "gfxhub_v1_2.h"
#include "sdma_v4_4_2.h"
+#include "amdgpu_ip.h"
#define XCP_INST_MASK(num_inst, xcp_id) \
(num_inst ? GENMASK(num_inst - 1, 0) << (xcp_id * num_inst) : 0)
-#define AMDGPU_XCP_OPS_KFD (1 << 0)
-
void aqua_vanjaram_doorbell_index_init(struct amdgpu_device *adev)
{
int i;
@@ -62,234 +61,6 @@ void aqua_vanjaram_doorbell_index_init(struct amdgpu_device *adev)
adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_LAYOUT1_MAX_ASSIGNMENT << 1;
}
-static bool aqua_vanjaram_xcp_vcn_shared(struct amdgpu_device *adev)
-{
- return (adev->xcp_mgr->num_xcps > adev->vcn.num_vcn_inst);
-}
-
-static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev,
- uint32_t inst_idx, struct amdgpu_ring *ring)
-{
- int xcp_id;
- enum AMDGPU_XCP_IP_BLOCK ip_blk;
- uint32_t inst_mask;
-
- ring->xcp_id = AMDGPU_XCP_NO_PARTITION;
- if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
- adev->gfx.enforce_isolation[0].xcp_id = ring->xcp_id;
- if ((adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE) ||
- (ring->funcs->type == AMDGPU_RING_TYPE_CPER))
- return;
-
- inst_mask = 1 << inst_idx;
-
- switch (ring->funcs->type) {
- case AMDGPU_HW_IP_GFX:
- case AMDGPU_RING_TYPE_COMPUTE:
- case AMDGPU_RING_TYPE_KIQ:
- ip_blk = AMDGPU_XCP_GFX;
- break;
- case AMDGPU_RING_TYPE_SDMA:
- ip_blk = AMDGPU_XCP_SDMA;
- break;
- case AMDGPU_RING_TYPE_VCN_ENC:
- case AMDGPU_RING_TYPE_VCN_JPEG:
- ip_blk = AMDGPU_XCP_VCN;
- break;
- default:
- DRM_ERROR("Not support ring type %d!", ring->funcs->type);
- return;
- }
-
- for (xcp_id = 0; xcp_id < adev->xcp_mgr->num_xcps; xcp_id++) {
- if (adev->xcp_mgr->xcp[xcp_id].ip[ip_blk].inst_mask & inst_mask) {
- ring->xcp_id = xcp_id;
- dev_dbg(adev->dev, "ring:%s xcp_id :%u", ring->name,
- ring->xcp_id);
- if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
- adev->gfx.enforce_isolation[xcp_id].xcp_id = xcp_id;
- break;
- }
- }
-}
-
-static void aqua_vanjaram_xcp_gpu_sched_update(
- struct amdgpu_device *adev,
- struct amdgpu_ring *ring,
- unsigned int sel_xcp_id)
-{
- unsigned int *num_gpu_sched;
-
- num_gpu_sched = &adev->xcp_mgr->xcp[sel_xcp_id]
- .gpu_sched[ring->funcs->type][ring->hw_prio].num_scheds;
- adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[ring->funcs->type][ring->hw_prio]
- .sched[(*num_gpu_sched)++] = &ring->sched;
- DRM_DEBUG("%s :[%d] gpu_sched[%d][%d] = %d", ring->name,
- sel_xcp_id, ring->funcs->type,
- ring->hw_prio, *num_gpu_sched);
-}
-
-static int aqua_vanjaram_xcp_sched_list_update(
- struct amdgpu_device *adev)
-{
- struct amdgpu_ring *ring;
- int i;
-
- for (i = 0; i < MAX_XCP; i++) {
- atomic_set(&adev->xcp_mgr->xcp[i].ref_cnt, 0);
- memset(adev->xcp_mgr->xcp[i].gpu_sched, 0, sizeof(adev->xcp_mgr->xcp->gpu_sched));
- }
-
- if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
- return 0;
-
- for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
- ring = adev->rings[i];
- if (!ring || !ring->sched.ready || ring->no_scheduler)
- continue;
-
- aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id);
-
- /* VCN may be shared by two partitions under CPX MODE in certain
- * configs.
- */
- if ((ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC ||
- ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) &&
- aqua_vanjaram_xcp_vcn_shared(adev))
- aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id + 1);
- }
-
- return 0;
-}
-
-static int aqua_vanjaram_update_partition_sched_list(struct amdgpu_device *adev)
-{
- int i;
-
- for (i = 0; i < adev->num_rings; i++) {
- struct amdgpu_ring *ring = adev->rings[i];
-
- if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ||
- ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
- aqua_vanjaram_set_xcp_id(adev, ring->xcc_id, ring);
- else
- aqua_vanjaram_set_xcp_id(adev, ring->me, ring);
- }
-
- return aqua_vanjaram_xcp_sched_list_update(adev);
-}
-
-static int aqua_vanjaram_select_scheds(
- struct amdgpu_device *adev,
- u32 hw_ip,
- u32 hw_prio,
- struct amdgpu_fpriv *fpriv,
- unsigned int *num_scheds,
- struct drm_gpu_scheduler ***scheds)
-{
- u32 sel_xcp_id;
- int i;
-
- if (fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION) {
- u32 least_ref_cnt = ~0;
-
- fpriv->xcp_id = 0;
- for (i = 0; i < adev->xcp_mgr->num_xcps; i++) {
- u32 total_ref_cnt;
-
- total_ref_cnt = atomic_read(&adev->xcp_mgr->xcp[i].ref_cnt);
- if (total_ref_cnt < least_ref_cnt) {
- fpriv->xcp_id = i;
- least_ref_cnt = total_ref_cnt;
- }
- }
- }
- sel_xcp_id = fpriv->xcp_id;
-
- if (adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds) {
- *num_scheds = adev->xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds;
- *scheds = adev->xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].sched;
- atomic_inc(&adev->xcp_mgr->xcp[sel_xcp_id].ref_cnt);
- DRM_DEBUG("Selected partition #%d", sel_xcp_id);
- } else {
- DRM_ERROR("Failed to schedule partition #%d.", sel_xcp_id);
- return -ENOENT;
- }
-
- return 0;
-}
-
-static int8_t aqua_vanjaram_logical_to_dev_inst(struct amdgpu_device *adev,
- enum amd_hw_ip_block_type block,
- int8_t inst)
-{
- int8_t dev_inst;
-
- switch (block) {
- case GC_HWIP:
- case SDMA0_HWIP:
- /* Both JPEG and VCN as JPEG is only alias of VCN */
- case VCN_HWIP:
- dev_inst = adev->ip_map.dev_inst[block][inst];
- break;
- default:
- /* For rest of the IPs, no look up required.
- * Assume 'logical instance == physical instance' for all configs. */
- dev_inst = inst;
- break;
- }
-
- return dev_inst;
-}
-
-static uint32_t aqua_vanjaram_logical_to_dev_mask(struct amdgpu_device *adev,
- enum amd_hw_ip_block_type block,
- uint32_t mask)
-{
- uint32_t dev_mask = 0;
- int8_t log_inst, dev_inst;
-
- while (mask) {
- log_inst = ffs(mask) - 1;
- dev_inst = aqua_vanjaram_logical_to_dev_inst(adev, block, log_inst);
- dev_mask |= (1 << dev_inst);
- mask &= ~(1 << log_inst);
- }
-
- return dev_mask;
-}
-
-static void aqua_vanjaram_populate_ip_map(struct amdgpu_device *adev,
- enum amd_hw_ip_block_type ip_block,
- uint32_t inst_mask)
-{
- int l = 0, i;
-
- while (inst_mask) {
- i = ffs(inst_mask) - 1;
- adev->ip_map.dev_inst[ip_block][l++] = i;
- inst_mask &= ~(1 << i);
- }
- for (; l < HWIP_MAX_INSTANCE; l++)
- adev->ip_map.dev_inst[ip_block][l] = -1;
-}
-
-void aqua_vanjaram_ip_map_init(struct amdgpu_device *adev)
-{
- u32 ip_map[][2] = {
- { GC_HWIP, adev->gfx.xcc_mask },
- { SDMA0_HWIP, adev->sdma.sdma_mask },
- { VCN_HWIP, adev->vcn.inst_mask },
- };
- int i;
-
- for (i = 0; i < ARRAY_SIZE(ip_map); ++i)
- aqua_vanjaram_populate_ip_map(adev, ip_map[i][0], ip_map[i][1]);
-
- adev->ip_map.logical_to_dev_inst = aqua_vanjaram_logical_to_dev_inst;
- adev->ip_map.logical_to_dev_mask = aqua_vanjaram_logical_to_dev_mask;
-}
-
/* Fixed pattern for smn addressing on different AIDs:
* bit[34]: indicate cross AID access
* bit[33:32]: indicate target AID id
@@ -353,11 +124,14 @@ static int aqua_vanjaram_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr)
if (adev->nbio.funcs->get_compute_partition_mode) {
mode = adev->nbio.funcs->get_compute_partition_mode(adev);
- if (mode != derv_mode)
+ if (mode != derv_mode) {
dev_warn(
adev->dev,
"Mismatch in compute partition mode - reported : %d derived : %d",
mode, derv_mode);
+ if (derv_mode == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE)
+ amdgpu_device_bus_status_check(adev);
+ }
}
return mode;
@@ -453,6 +227,7 @@ static int __aqua_vanjaram_get_px_mode_info(struct amdgpu_xcp_mgr *xcp_mgr,
uint16_t *nps_modes)
{
struct amdgpu_device *adev = xcp_mgr->adev;
+ uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
if (!num_xcp || !nps_modes || !(xcp_mgr->supp_xcp_modes & BIT(px_mode)))
return -EINVAL;
@@ -476,12 +251,14 @@ static int __aqua_vanjaram_get_px_mode_info(struct amdgpu_xcp_mgr *xcp_mgr,
*num_xcp = 4;
*nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
BIT(AMDGPU_NPS4_PARTITION_MODE);
+ if (gc_ver == IP_VERSION(9, 5, 0))
+ *nps_modes |= BIT(AMDGPU_NPS2_PARTITION_MODE);
break;
case AMDGPU_CPX_PARTITION_MODE:
*num_xcp = NUM_XCC(adev->gfx.xcc_mask);
*nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
BIT(AMDGPU_NPS4_PARTITION_MODE);
- if (amdgpu_sriov_vf(adev))
+ if (gc_ver == IP_VERSION(9, 5, 0))
*nps_modes |= BIT(AMDGPU_NPS2_PARTITION_MODE);
break;
default:
@@ -593,72 +370,6 @@ static bool __aqua_vanjaram_is_valid_mode(struct amdgpu_xcp_mgr *xcp_mgr,
return false;
}
-static int __aqua_vanjaram_pre_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
-{
- /* TODO:
- * Stop user queues and threads, and make sure GPU is empty of work.
- */
-
- if (flags & AMDGPU_XCP_OPS_KFD)
- amdgpu_amdkfd_device_fini_sw(xcp_mgr->adev);
-
- return 0;
-}
-
-static int __aqua_vanjaram_post_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
-{
- int ret = 0;
-
- if (flags & AMDGPU_XCP_OPS_KFD) {
- amdgpu_amdkfd_device_probe(xcp_mgr->adev);
- amdgpu_amdkfd_device_init(xcp_mgr->adev);
- /* If KFD init failed, return failure */
- if (!xcp_mgr->adev->kfd.init_complete)
- ret = -EIO;
- }
-
- return ret;
-}
-
-static void
-__aqua_vanjaram_update_supported_modes(struct amdgpu_xcp_mgr *xcp_mgr)
-{
- struct amdgpu_device *adev = xcp_mgr->adev;
-
- xcp_mgr->supp_xcp_modes = 0;
-
- switch (NUM_XCC(adev->gfx.xcc_mask)) {
- case 8:
- xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
- BIT(AMDGPU_DPX_PARTITION_MODE) |
- BIT(AMDGPU_QPX_PARTITION_MODE) |
- BIT(AMDGPU_CPX_PARTITION_MODE);
- break;
- case 6:
- xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
- BIT(AMDGPU_TPX_PARTITION_MODE) |
- BIT(AMDGPU_CPX_PARTITION_MODE);
- break;
- case 4:
- xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
- BIT(AMDGPU_DPX_PARTITION_MODE) |
- BIT(AMDGPU_CPX_PARTITION_MODE);
- break;
- /* this seems only existing in emulation phase */
- case 2:
- xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
- BIT(AMDGPU_CPX_PARTITION_MODE);
- break;
- case 1:
- xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
- BIT(AMDGPU_CPX_PARTITION_MODE);
- break;
-
- default:
- break;
- }
-}
-
static void __aqua_vanjaram_update_available_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr)
{
int mode;
@@ -696,7 +407,8 @@ static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr,
return -EINVAL;
}
- if (adev->kfd.init_complete && !amdgpu_in_reset(adev))
+ if (adev->kfd.init_complete && !amdgpu_in_reset(adev) &&
+ !adev->in_suspend)
flags |= AMDGPU_XCP_OPS_KFD;
if (flags & AMDGPU_XCP_OPS_KFD) {
@@ -705,7 +417,7 @@ static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr,
goto out;
}
- ret = __aqua_vanjaram_pre_partition_switch(xcp_mgr, flags);
+ ret = amdgpu_xcp_pre_partition_switch(xcp_mgr, flags);
if (ret)
goto unlock;
@@ -718,7 +430,7 @@ static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr,
*num_xcps = num_xcc / num_xcc_per_xcp;
amdgpu_xcp_init(xcp_mgr, *num_xcps, mode);
- ret = __aqua_vanjaram_post_partition_switch(xcp_mgr, flags);
+ ret = amdgpu_xcp_post_partition_switch(xcp_mgr, flags);
if (!ret)
__aqua_vanjaram_update_available_partition_mode(xcp_mgr);
unlock:
@@ -801,9 +513,6 @@ struct amdgpu_xcp_mgr_funcs aqua_vanjaram_xcp_funcs = {
.get_ip_details = &aqua_vanjaram_get_xcp_ip_details,
.get_xcp_res_info = &aqua_vanjaram_get_xcp_res_info,
.get_xcp_mem_id = &aqua_vanjaram_get_xcp_mem_id,
- .select_scheds = &aqua_vanjaram_select_scheds,
- .update_partition_sched_list =
- &aqua_vanjaram_update_partition_sched_list
};
static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev)
@@ -818,7 +527,7 @@ static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev)
if (ret)
return ret;
- __aqua_vanjaram_update_supported_modes(adev->xcp_mgr);
+ amdgpu_xcp_update_supported_modes(adev->xcp_mgr);
/* TODO: Default memory node affinity init */
return ret;
@@ -858,7 +567,7 @@ int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev)
if (ret)
return ret;
- aqua_vanjaram_ip_map_init(adev);
+ amdgpu_ip_map_init(adev);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c
index 427b073de2fc..7a063e44d429 100644
--- a/drivers/gpu/drm/amd/amdgpu/atom.c
+++ b/drivers/gpu/drm/amd/amdgpu/atom.c
@@ -1246,6 +1246,10 @@ static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index,
ectx.last_jump_jiffies = 0;
if (ws) {
ectx.ws = kcalloc(4, ws, GFP_KERNEL);
+ if (!ectx.ws) {
+ ret = -ENOMEM;
+ goto free;
+ }
ectx.ws_size = ws;
} else {
ectx.ws = NULL;
@@ -1494,6 +1498,28 @@ static void atom_get_vbios_version(struct atom_context *ctx)
}
}
+static void atom_get_vbios_build(struct atom_context *ctx)
+{
+ unsigned char *atom_rom_hdr;
+ unsigned char *str;
+ uint16_t base, len;
+
+ base = CU16(ATOM_ROM_TABLE_PTR);
+ atom_rom_hdr = CSTR(base);
+
+ str = CSTR(CU16(base + ATOM_ROM_CFG_PTR));
+ /* Skip config string */
+ while (str < atom_rom_hdr && *str++)
+ ;
+ /* Skip change list string */
+ while (str < atom_rom_hdr && *str++)
+ ;
+
+ len = min(atom_rom_hdr - str, STRLEN_NORMAL);
+ if (len)
+ strscpy(ctx->build_num, str, len);
+}
+
struct atom_context *amdgpu_atom_parse(struct card_info *card, void *bios)
{
int base;
@@ -1554,6 +1580,7 @@ struct atom_context *amdgpu_atom_parse(struct card_info *card, void *bios)
atom_get_vbios_pn(ctx);
atom_get_vbios_date(ctx);
atom_get_vbios_version(ctx);
+ atom_get_vbios_build(ctx);
return ctx;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/atom.h b/drivers/gpu/drm/amd/amdgpu/atom.h
index b807f6639a4c..825ff28731f5 100644
--- a/drivers/gpu/drm/amd/amdgpu/atom.h
+++ b/drivers/gpu/drm/amd/amdgpu/atom.h
@@ -37,6 +37,7 @@ struct drm_device;
#define ATOM_ROM_MAGIC "ATOM"
#define ATOM_ROM_MAGIC_PTR 4
+#define ATOM_ROM_CFG_PTR 0xC
#define ATOM_ROM_MSG_PTR 0x10
#define ATOM_ROM_CMD_PTR 0x1E
#define ATOM_ROM_DATA_PTR 0x20
@@ -151,6 +152,7 @@ struct atom_context {
uint32_t version;
uint8_t vbios_ver_str[STRLEN_NORMAL];
uint8_t date[STRLEN_NORMAL];
+ uint8_t build_num[STRLEN_NORMAL];
};
extern int amdgpu_atom_debug;
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
index 41f4705bdbbd..876a3256dba4 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
@@ -156,6 +156,9 @@ static int cik_ih_irq_init(struct amdgpu_device *adev)
/* enable irqs */
cik_ih_enable_interrupts(adev);
+ if (adev->irq.ih_soft.ring_size)
+ adev->irq.ih_soft.enabled = true;
+
return 0;
}
@@ -192,6 +195,9 @@ static u32 cik_ih_get_wptr(struct amdgpu_device *adev,
wptr = le32_to_cpu(*ih->wptr_cpu);
+ if (ih == &adev->irq.ih_soft)
+ goto out;
+
if (wptr & IH_RB_WPTR__RB_OVERFLOW_MASK) {
wptr &= ~IH_RB_WPTR__RB_OVERFLOW_MASK;
/* When a ring buffer overflow happen start parsing interrupt
@@ -211,6 +217,8 @@ static u32 cik_ih_get_wptr(struct amdgpu_device *adev,
tmp &= ~IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
WREG32(mmIH_RB_CNTL, tmp);
}
+
+out:
return (wptr & ih->ptr_mask);
}
@@ -306,6 +314,10 @@ static int cik_ih_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, IH_SW_RING_SIZE, true);
+ if (r)
+ return r;
+
r = amdgpu_irq_init(adev);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c b/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c
new file mode 100644
index 000000000000..ed1e25661706
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "nv.h"
+
+#include "soc15_common.h"
+#include "soc15_hw_ip.h"
+#include "cyan_skillfish_ip_offset.h"
+
+int cyan_skillfish_reg_base_init(struct amdgpu_device *adev)
+{
+ /* HW has more IP blocks, only initialized the blocke needed by driver */
+ uint32_t i;
+
+ adev->gfx.xcc_mask = 1;
+ for (i = 0 ; i < MAX_INSTANCE ; ++i) {
+ adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
+ adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i]));
+ adev->reg_offset[MMHUB_HWIP][i] = (uint32_t *)(&(MMHUB_BASE.instance[i]));
+ adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i]));
+ adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i]));
+ adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i]));
+ adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i]));
+ adev->reg_offset[VCN_HWIP][i] = (uint32_t *)(&(UVD0_BASE.instance[i]));
+ adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i]));
+ adev->reg_offset[DCE_HWIP][i] = (uint32_t *)(&(DMU_BASE.instance[i]));
+ adev->reg_offset[OSSSYS_HWIP][i] = (uint32_t *)(&(OSSSYS_BASE.instance[i]));
+ adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
+ adev->reg_offset[SDMA1_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
+ adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i]));
+ adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i]));
+ adev->reg_offset[CLK_HWIP][i] = (uint32_t *)(&(CLK_BASE.instance[i]));
+ }
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
index 2f891fb846d5..bc7a2e06ab5f 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
@@ -157,6 +157,9 @@ static int cz_ih_irq_init(struct amdgpu_device *adev)
/* enable interrupts */
cz_ih_enable_interrupts(adev);
+ if (adev->irq.ih_soft.ring_size)
+ adev->irq.ih_soft.enabled = true;
+
return 0;
}
@@ -194,6 +197,9 @@ static u32 cz_ih_get_wptr(struct amdgpu_device *adev,
wptr = le32_to_cpu(*ih->wptr_cpu);
+ if (ih == &adev->irq.ih_soft)
+ goto out;
+
if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
goto out;
@@ -297,6 +303,10 @@ static int cz_ih_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, IH_SW_RING_SIZE, true);
+ if (r)
+ return r;
+
r = amdgpu_irq_init(adev);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index bf7c22f81cda..72ca6538b2e4 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -1141,8 +1141,7 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
/* save values for DPM */
amdgpu_crtc->line_time = line_time;
- amdgpu_crtc->wm_high = latency_watermark_a;
- amdgpu_crtc->wm_low = latency_watermark_b;
+
/* Save number of lines the linebuffer leads before the scanout */
amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
}
@@ -1462,17 +1461,12 @@ static int dce_v10_0_audio_init(struct amdgpu_device *adev)
static void dce_v10_0_audio_fini(struct amdgpu_device *adev)
{
- int i;
-
if (!amdgpu_audio)
return;
if (!adev->mode_info.audio.enabled)
return;
- for (i = 0; i < adev->mode_info.audio.num_pins; i++)
- dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
-
adev->mode_info.audio.enabled = false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
deleted file mode 100644
index 47e05783c4a0..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ /dev/null
@@ -1,3823 +0,0 @@
-/*
- * Copyright 2014 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <drm/drm_edid.h>
-#include <drm/drm_fourcc.h>
-#include <drm/drm_modeset_helper.h>
-#include <drm/drm_modeset_helper_vtables.h>
-#include <drm/drm_vblank.h>
-
-#include "amdgpu.h"
-#include "amdgpu_pm.h"
-#include "amdgpu_i2c.h"
-#include "vid.h"
-#include "atom.h"
-#include "amdgpu_atombios.h"
-#include "atombios_crtc.h"
-#include "atombios_encoders.h"
-#include "amdgpu_pll.h"
-#include "amdgpu_connectors.h"
-#include "amdgpu_display.h"
-#include "dce_v11_0.h"
-
-#include "dce/dce_11_0_d.h"
-#include "dce/dce_11_0_sh_mask.h"
-#include "dce/dce_11_0_enum.h"
-#include "oss/oss_3_0_d.h"
-#include "oss/oss_3_0_sh_mask.h"
-#include "gmc/gmc_8_1_d.h"
-#include "gmc/gmc_8_1_sh_mask.h"
-
-#include "ivsrcid/ivsrcid_vislands30.h"
-
-static void dce_v11_0_set_display_funcs(struct amdgpu_device *adev);
-static void dce_v11_0_set_irq_funcs(struct amdgpu_device *adev);
-static void dce_v11_0_hpd_int_ack(struct amdgpu_device *adev, int hpd);
-
-static const u32 crtc_offsets[] =
-{
- CRTC0_REGISTER_OFFSET,
- CRTC1_REGISTER_OFFSET,
- CRTC2_REGISTER_OFFSET,
- CRTC3_REGISTER_OFFSET,
- CRTC4_REGISTER_OFFSET,
- CRTC5_REGISTER_OFFSET,
- CRTC6_REGISTER_OFFSET
-};
-
-static const u32 hpd_offsets[] =
-{
- HPD0_REGISTER_OFFSET,
- HPD1_REGISTER_OFFSET,
- HPD2_REGISTER_OFFSET,
- HPD3_REGISTER_OFFSET,
- HPD4_REGISTER_OFFSET,
- HPD5_REGISTER_OFFSET
-};
-
-static const uint32_t dig_offsets[] = {
- DIG0_REGISTER_OFFSET,
- DIG1_REGISTER_OFFSET,
- DIG2_REGISTER_OFFSET,
- DIG3_REGISTER_OFFSET,
- DIG4_REGISTER_OFFSET,
- DIG5_REGISTER_OFFSET,
- DIG6_REGISTER_OFFSET,
- DIG7_REGISTER_OFFSET,
- DIG8_REGISTER_OFFSET
-};
-
-static const struct {
- uint32_t reg;
- uint32_t vblank;
- uint32_t vline;
- uint32_t hpd;
-
-} interrupt_status_offsets[] = { {
- .reg = mmDISP_INTERRUPT_STATUS,
- .vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
- .vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
- .hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
-}, {
- .reg = mmDISP_INTERRUPT_STATUS_CONTINUE,
- .vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
- .vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
- .hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
-}, {
- .reg = mmDISP_INTERRUPT_STATUS_CONTINUE2,
- .vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
- .vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
- .hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
-}, {
- .reg = mmDISP_INTERRUPT_STATUS_CONTINUE3,
- .vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
- .vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
- .hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
-}, {
- .reg = mmDISP_INTERRUPT_STATUS_CONTINUE4,
- .vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
- .vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
- .hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
-}, {
- .reg = mmDISP_INTERRUPT_STATUS_CONTINUE5,
- .vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
- .vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
- .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
-} };
-
-static const u32 cz_golden_settings_a11[] =
-{
- mmCRTC_DOUBLE_BUFFER_CONTROL, 0x00010101, 0x00010000,
- mmFBC_MISC, 0x1f311fff, 0x14300000,
-};
-
-static const u32 cz_mgcg_cgcg_init[] =
-{
- mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
- mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
-};
-
-static const u32 stoney_golden_settings_a11[] =
-{
- mmCRTC_DOUBLE_BUFFER_CONTROL, 0x00010101, 0x00010000,
- mmFBC_MISC, 0x1f311fff, 0x14302000,
-};
-
-static const u32 polaris11_golden_settings_a11[] =
-{
- mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
- mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
- mmFBC_DEBUG1, 0xffffffff, 0x00000008,
- mmFBC_MISC, 0x9f313fff, 0x14302008,
- mmHDMI_CONTROL, 0x313f031f, 0x00000011,
-};
-
-static const u32 polaris10_golden_settings_a11[] =
-{
- mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
- mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
- mmFBC_MISC, 0x9f313fff, 0x14302008,
- mmHDMI_CONTROL, 0x313f031f, 0x00000011,
-};
-
-static void dce_v11_0_init_golden_registers(struct amdgpu_device *adev)
-{
- switch (adev->asic_type) {
- case CHIP_CARRIZO:
- amdgpu_device_program_register_sequence(adev,
- cz_mgcg_cgcg_init,
- ARRAY_SIZE(cz_mgcg_cgcg_init));
- amdgpu_device_program_register_sequence(adev,
- cz_golden_settings_a11,
- ARRAY_SIZE(cz_golden_settings_a11));
- break;
- case CHIP_STONEY:
- amdgpu_device_program_register_sequence(adev,
- stoney_golden_settings_a11,
- ARRAY_SIZE(stoney_golden_settings_a11));
- break;
- case CHIP_POLARIS11:
- case CHIP_POLARIS12:
- amdgpu_device_program_register_sequence(adev,
- polaris11_golden_settings_a11,
- ARRAY_SIZE(polaris11_golden_settings_a11));
- break;
- case CHIP_POLARIS10:
- case CHIP_VEGAM:
- amdgpu_device_program_register_sequence(adev,
- polaris10_golden_settings_a11,
- ARRAY_SIZE(polaris10_golden_settings_a11));
- break;
- default:
- break;
- }
-}
-
-static u32 dce_v11_0_audio_endpt_rreg(struct amdgpu_device *adev,
- u32 block_offset, u32 reg)
-{
- unsigned long flags;
- u32 r;
-
- spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
- WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
- r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset);
- spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
-
- return r;
-}
-
-static void dce_v11_0_audio_endpt_wreg(struct amdgpu_device *adev,
- u32 block_offset, u32 reg, u32 v)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
- WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
- WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v);
- spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
-}
-
-static u32 dce_v11_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
-{
- if (crtc < 0 || crtc >= adev->mode_info.num_crtc)
- return 0;
- else
- return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
-}
-
-static void dce_v11_0_pageflip_interrupt_init(struct amdgpu_device *adev)
-{
- unsigned i;
-
- /* Enable pflip interrupts */
- for (i = 0; i < adev->mode_info.num_crtc; i++)
- amdgpu_irq_get(adev, &adev->pageflip_irq, i);
-}
-
-static void dce_v11_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
-{
- unsigned i;
-
- /* Disable pflip interrupts */
- for (i = 0; i < adev->mode_info.num_crtc; i++)
- amdgpu_irq_put(adev, &adev->pageflip_irq, i);
-}
-
-/**
- * dce_v11_0_page_flip - pageflip callback.
- *
- * @adev: amdgpu_device pointer
- * @crtc_id: crtc to cleanup pageflip on
- * @crtc_base: new address of the crtc (GPU MC address)
- * @async: asynchronous flip
- *
- * Triggers the actual pageflip by updating the primary
- * surface base address.
- */
-static void dce_v11_0_page_flip(struct amdgpu_device *adev,
- int crtc_id, u64 crtc_base, bool async)
-{
- struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
- struct drm_framebuffer *fb = amdgpu_crtc->base.primary->fb;
- u32 tmp;
-
- /* flip immediate for async, default is vsync */
- tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
- GRPH_SURFACE_UPDATE_IMMEDIATE_EN, async ? 1 : 0);
- WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
- /* update pitch */
- WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset,
- fb->pitches[0] / fb->format->cpp[0]);
- /* update the scanout addresses */
- WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
- upper_32_bits(crtc_base));
- /* writing to the low address triggers the update */
- WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
- lower_32_bits(crtc_base));
- /* post the write */
- RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
-}
-
-static int dce_v11_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
- u32 *vbl, u32 *position)
-{
- if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
- return -EINVAL;
-
- *vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]);
- *position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
-
- return 0;
-}
-
-/**
- * dce_v11_0_hpd_sense - hpd sense callback.
- *
- * @adev: amdgpu_device pointer
- * @hpd: hpd (hotplug detect) pin
- *
- * Checks if a digital monitor is connected (evergreen+).
- * Returns true if connected, false if not connected.
- */
-static bool dce_v11_0_hpd_sense(struct amdgpu_device *adev,
- enum amdgpu_hpd_id hpd)
-{
- bool connected = false;
-
- if (hpd >= adev->mode_info.num_hpd)
- return connected;
-
- if (RREG32(mmDC_HPD_INT_STATUS + hpd_offsets[hpd]) &
- DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK)
- connected = true;
-
- return connected;
-}
-
-/**
- * dce_v11_0_hpd_set_polarity - hpd set polarity callback.
- *
- * @adev: amdgpu_device pointer
- * @hpd: hpd (hotplug detect) pin
- *
- * Set the polarity of the hpd pin (evergreen+).
- */
-static void dce_v11_0_hpd_set_polarity(struct amdgpu_device *adev,
- enum amdgpu_hpd_id hpd)
-{
- u32 tmp;
- bool connected = dce_v11_0_hpd_sense(adev, hpd);
-
- if (hpd >= adev->mode_info.num_hpd)
- return;
-
- tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
- if (connected)
- tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 0);
- else
- tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 1);
- WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
-}
-
-/**
- * dce_v11_0_hpd_init - hpd setup callback.
- *
- * @adev: amdgpu_device pointer
- *
- * Setup the hpd pins used by the card (evergreen+).
- * Enable the pin, set the polarity, and enable the hpd interrupts.
- */
-static void dce_v11_0_hpd_init(struct amdgpu_device *adev)
-{
- struct drm_device *dev = adev_to_drm(adev);
- struct drm_connector *connector;
- struct drm_connector_list_iter iter;
- u32 tmp;
-
- drm_connector_list_iter_begin(dev, &iter);
- drm_for_each_connector_iter(connector, &iter) {
- struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
-
- if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
- continue;
-
- if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
- connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
- /* don't try to enable hpd on eDP or LVDS avoid breaking the
- * aux dp channel on imac and help (but not completely fix)
- * https://bugzilla.redhat.com/show_bug.cgi?id=726143
- * also avoid interrupt storms during dpms.
- */
- tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
- tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0);
- WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
- continue;
- }
-
- tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
- tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1);
- WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
-
- tmp = RREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd]);
- tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
- DC_HPD_CONNECT_INT_DELAY,
- AMDGPU_HPD_CONNECT_INT_DELAY_IN_MS);
- tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
- DC_HPD_DISCONNECT_INT_DELAY,
- AMDGPU_HPD_DISCONNECT_INT_DELAY_IN_MS);
- WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
-
- dce_v11_0_hpd_int_ack(adev, amdgpu_connector->hpd.hpd);
- dce_v11_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
- amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
- }
- drm_connector_list_iter_end(&iter);
-}
-
-/**
- * dce_v11_0_hpd_fini - hpd tear down callback.
- *
- * @adev: amdgpu_device pointer
- *
- * Tear down the hpd pins used by the card (evergreen+).
- * Disable the hpd interrupts.
- */
-static void dce_v11_0_hpd_fini(struct amdgpu_device *adev)
-{
- struct drm_device *dev = adev_to_drm(adev);
- struct drm_connector *connector;
- struct drm_connector_list_iter iter;
- u32 tmp;
-
- drm_connector_list_iter_begin(dev, &iter);
- drm_for_each_connector_iter(connector, &iter) {
- struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
-
- if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
- continue;
-
- tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
- tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 0);
- WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
-
- amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
- }
- drm_connector_list_iter_end(&iter);
-}
-
-static u32 dce_v11_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
-{
- return mmDC_GPIO_HPD_A;
-}
-
-static bool dce_v11_0_is_display_hung(struct amdgpu_device *adev)
-{
- u32 crtc_hung = 0;
- u32 crtc_status[6];
- u32 i, j, tmp;
-
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
- if (REG_GET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN)) {
- crtc_status[i] = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
- crtc_hung |= (1 << i);
- }
- }
-
- for (j = 0; j < 10; j++) {
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- if (crtc_hung & (1 << i)) {
- tmp = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
- if (tmp != crtc_status[i])
- crtc_hung &= ~(1 << i);
- }
- }
- if (crtc_hung == 0)
- return false;
- udelay(100);
- }
-
- return true;
-}
-
-static void dce_v11_0_set_vga_render_state(struct amdgpu_device *adev,
- bool render)
-{
- u32 tmp;
-
- /* Lockout access through VGA aperture*/
- tmp = RREG32(mmVGA_HDP_CONTROL);
- if (render)
- tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 0);
- else
- tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
- WREG32(mmVGA_HDP_CONTROL, tmp);
-
- /* disable VGA render */
- tmp = RREG32(mmVGA_RENDER_CONTROL);
- if (render)
- tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 1);
- else
- tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
- WREG32(mmVGA_RENDER_CONTROL, tmp);
-}
-
-static int dce_v11_0_get_num_crtc (struct amdgpu_device *adev)
-{
- int num_crtc = 0;
-
- switch (adev->asic_type) {
- case CHIP_CARRIZO:
- num_crtc = 3;
- break;
- case CHIP_STONEY:
- num_crtc = 2;
- break;
- case CHIP_POLARIS10:
- case CHIP_VEGAM:
- num_crtc = 6;
- break;
- case CHIP_POLARIS11:
- case CHIP_POLARIS12:
- num_crtc = 5;
- break;
- default:
- num_crtc = 0;
- }
- return num_crtc;
-}
-
-void dce_v11_0_disable_dce(struct amdgpu_device *adev)
-{
- /*Disable VGA render and enabled crtc, if has DCE engine*/
- if (amdgpu_atombios_has_dce_engine_info(adev)) {
- u32 tmp;
- int crtc_enabled, i;
-
- dce_v11_0_set_vga_render_state(adev, false);
-
- /*Disable crtc*/
- for (i = 0; i < dce_v11_0_get_num_crtc(adev); i++) {
- crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
- CRTC_CONTROL, CRTC_MASTER_EN);
- if (crtc_enabled) {
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
- tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
- tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
- WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
- }
- }
- }
-}
-
-static void dce_v11_0_program_fmt(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
- struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
- int bpc = 0;
- u32 tmp = 0;
- enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE;
-
- if (connector) {
- struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- bpc = amdgpu_connector_get_monitor_bpc(connector);
- dither = amdgpu_connector->dither;
- }
-
- /* LVDS/eDP FMT is set up by atom */
- if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
- return;
-
- /* not needed for analog */
- if ((amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
- (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
- return;
-
- if (bpc == 0)
- return;
-
- switch (bpc) {
- case 6:
- if (dither == AMDGPU_FMT_DITHER_ENABLE) {
- /* XXX sort out optimal dither settings */
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 0);
- } else {
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 0);
- }
- break;
- case 8:
- if (dither == AMDGPU_FMT_DITHER_ENABLE) {
- /* XXX sort out optimal dither settings */
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 1);
- } else {
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 1);
- }
- break;
- case 10:
- if (dither == AMDGPU_FMT_DITHER_ENABLE) {
- /* XXX sort out optimal dither settings */
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 2);
- } else {
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 2);
- }
- break;
- default:
- /* not needed */
- break;
- }
-
- WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-}
-
-
-/* display watermark setup */
-/**
- * dce_v11_0_line_buffer_adjust - Set up the line buffer
- *
- * @adev: amdgpu_device pointer
- * @amdgpu_crtc: the selected display controller
- * @mode: the current display mode on the selected display
- * controller
- *
- * Setup up the line buffer allocation for
- * the selected display controller (CIK).
- * Returns the line buffer size in pixels.
- */
-static u32 dce_v11_0_line_buffer_adjust(struct amdgpu_device *adev,
- struct amdgpu_crtc *amdgpu_crtc,
- struct drm_display_mode *mode)
-{
- u32 tmp, buffer_alloc, i, mem_cfg;
- u32 pipe_offset = amdgpu_crtc->crtc_id;
- /*
- * Line Buffer Setup
- * There are 6 line buffers, one for each display controllers.
- * There are 3 partitions per LB. Select the number of partitions
- * to enable based on the display width. For display widths larger
- * than 4096, you need use to use 2 display controllers and combine
- * them using the stereo blender.
- */
- if (amdgpu_crtc->base.enabled && mode) {
- if (mode->crtc_hdisplay < 1920) {
- mem_cfg = 1;
- buffer_alloc = 2;
- } else if (mode->crtc_hdisplay < 2560) {
- mem_cfg = 2;
- buffer_alloc = 2;
- } else if (mode->crtc_hdisplay < 4096) {
- mem_cfg = 0;
- buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
- } else {
- DRM_DEBUG_KMS("Mode too big for LB!\n");
- mem_cfg = 0;
- buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
- }
- } else {
- mem_cfg = 1;
- buffer_alloc = 0;
- }
-
- tmp = RREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, LB_MEMORY_CTRL, LB_MEMORY_CONFIG, mem_cfg);
- WREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset, tmp);
-
- tmp = RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset);
- tmp = REG_SET_FIELD(tmp, PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATED, buffer_alloc);
- WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset, tmp);
-
- for (i = 0; i < adev->usec_timeout; i++) {
- tmp = RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset);
- if (REG_GET_FIELD(tmp, PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATION_COMPLETED))
- break;
- udelay(1);
- }
-
- if (amdgpu_crtc->base.enabled && mode) {
- switch (mem_cfg) {
- case 0:
- default:
- return 4096 * 2;
- case 1:
- return 1920 * 2;
- case 2:
- return 2560 * 2;
- }
- }
-
- /* controller not enabled, so no lb used */
- return 0;
-}
-
-/**
- * cik_get_number_of_dram_channels - get the number of dram channels
- *
- * @adev: amdgpu_device pointer
- *
- * Look up the number of video ram channels (CIK).
- * Used for display watermark bandwidth calculations
- * Returns the number of dram channels
- */
-static u32 cik_get_number_of_dram_channels(struct amdgpu_device *adev)
-{
- u32 tmp = RREG32(mmMC_SHARED_CHMAP);
-
- switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
- case 0:
- default:
- return 1;
- case 1:
- return 2;
- case 2:
- return 4;
- case 3:
- return 8;
- case 4:
- return 3;
- case 5:
- return 6;
- case 6:
- return 10;
- case 7:
- return 12;
- case 8:
- return 16;
- }
-}
-
-struct dce10_wm_params {
- u32 dram_channels; /* number of dram channels */
- u32 yclk; /* bandwidth per dram data pin in kHz */
- u32 sclk; /* engine clock in kHz */
- u32 disp_clk; /* display clock in kHz */
- u32 src_width; /* viewport width */
- u32 active_time; /* active display time in ns */
- u32 blank_time; /* blank time in ns */
- bool interlaced; /* mode is interlaced */
- fixed20_12 vsc; /* vertical scale ratio */
- u32 num_heads; /* number of active crtcs */
- u32 bytes_per_pixel; /* bytes per pixel display + overlay */
- u32 lb_size; /* line buffer allocated to pipe */
- u32 vtaps; /* vertical scaler taps */
-};
-
-/**
- * dce_v11_0_dram_bandwidth - get the dram bandwidth
- *
- * @wm: watermark calculation data
- *
- * Calculate the raw dram bandwidth (CIK).
- * Used for display watermark bandwidth calculations
- * Returns the dram bandwidth in MBytes/s
- */
-static u32 dce_v11_0_dram_bandwidth(struct dce10_wm_params *wm)
-{
- /* Calculate raw DRAM Bandwidth */
- fixed20_12 dram_efficiency; /* 0.7 */
- fixed20_12 yclk, dram_channels, bandwidth;
- fixed20_12 a;
-
- a.full = dfixed_const(1000);
- yclk.full = dfixed_const(wm->yclk);
- yclk.full = dfixed_div(yclk, a);
- dram_channels.full = dfixed_const(wm->dram_channels * 4);
- a.full = dfixed_const(10);
- dram_efficiency.full = dfixed_const(7);
- dram_efficiency.full = dfixed_div(dram_efficiency, a);
- bandwidth.full = dfixed_mul(dram_channels, yclk);
- bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
-
- return dfixed_trunc(bandwidth);
-}
-
-/**
- * dce_v11_0_dram_bandwidth_for_display - get the dram bandwidth for display
- *
- * @wm: watermark calculation data
- *
- * Calculate the dram bandwidth used for display (CIK).
- * Used for display watermark bandwidth calculations
- * Returns the dram bandwidth for display in MBytes/s
- */
-static u32 dce_v11_0_dram_bandwidth_for_display(struct dce10_wm_params *wm)
-{
- /* Calculate DRAM Bandwidth and the part allocated to display. */
- fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
- fixed20_12 yclk, dram_channels, bandwidth;
- fixed20_12 a;
-
- a.full = dfixed_const(1000);
- yclk.full = dfixed_const(wm->yclk);
- yclk.full = dfixed_div(yclk, a);
- dram_channels.full = dfixed_const(wm->dram_channels * 4);
- a.full = dfixed_const(10);
- disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
- disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
- bandwidth.full = dfixed_mul(dram_channels, yclk);
- bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
-
- return dfixed_trunc(bandwidth);
-}
-
-/**
- * dce_v11_0_data_return_bandwidth - get the data return bandwidth
- *
- * @wm: watermark calculation data
- *
- * Calculate the data return bandwidth used for display (CIK).
- * Used for display watermark bandwidth calculations
- * Returns the data return bandwidth in MBytes/s
- */
-static u32 dce_v11_0_data_return_bandwidth(struct dce10_wm_params *wm)
-{
- /* Calculate the display Data return Bandwidth */
- fixed20_12 return_efficiency; /* 0.8 */
- fixed20_12 sclk, bandwidth;
- fixed20_12 a;
-
- a.full = dfixed_const(1000);
- sclk.full = dfixed_const(wm->sclk);
- sclk.full = dfixed_div(sclk, a);
- a.full = dfixed_const(10);
- return_efficiency.full = dfixed_const(8);
- return_efficiency.full = dfixed_div(return_efficiency, a);
- a.full = dfixed_const(32);
- bandwidth.full = dfixed_mul(a, sclk);
- bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
-
- return dfixed_trunc(bandwidth);
-}
-
-/**
- * dce_v11_0_dmif_request_bandwidth - get the dmif bandwidth
- *
- * @wm: watermark calculation data
- *
- * Calculate the dmif bandwidth used for display (CIK).
- * Used for display watermark bandwidth calculations
- * Returns the dmif bandwidth in MBytes/s
- */
-static u32 dce_v11_0_dmif_request_bandwidth(struct dce10_wm_params *wm)
-{
- /* Calculate the DMIF Request Bandwidth */
- fixed20_12 disp_clk_request_efficiency; /* 0.8 */
- fixed20_12 disp_clk, bandwidth;
- fixed20_12 a, b;
-
- a.full = dfixed_const(1000);
- disp_clk.full = dfixed_const(wm->disp_clk);
- disp_clk.full = dfixed_div(disp_clk, a);
- a.full = dfixed_const(32);
- b.full = dfixed_mul(a, disp_clk);
-
- a.full = dfixed_const(10);
- disp_clk_request_efficiency.full = dfixed_const(8);
- disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
-
- bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
-
- return dfixed_trunc(bandwidth);
-}
-
-/**
- * dce_v11_0_available_bandwidth - get the min available bandwidth
- *
- * @wm: watermark calculation data
- *
- * Calculate the min available bandwidth used for display (CIK).
- * Used for display watermark bandwidth calculations
- * Returns the min available bandwidth in MBytes/s
- */
-static u32 dce_v11_0_available_bandwidth(struct dce10_wm_params *wm)
-{
- /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
- u32 dram_bandwidth = dce_v11_0_dram_bandwidth(wm);
- u32 data_return_bandwidth = dce_v11_0_data_return_bandwidth(wm);
- u32 dmif_req_bandwidth = dce_v11_0_dmif_request_bandwidth(wm);
-
- return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
-}
-
-/**
- * dce_v11_0_average_bandwidth - get the average available bandwidth
- *
- * @wm: watermark calculation data
- *
- * Calculate the average available bandwidth used for display (CIK).
- * Used for display watermark bandwidth calculations
- * Returns the average available bandwidth in MBytes/s
- */
-static u32 dce_v11_0_average_bandwidth(struct dce10_wm_params *wm)
-{
- /* Calculate the display mode Average Bandwidth
- * DisplayMode should contain the source and destination dimensions,
- * timing, etc.
- */
- fixed20_12 bpp;
- fixed20_12 line_time;
- fixed20_12 src_width;
- fixed20_12 bandwidth;
- fixed20_12 a;
-
- a.full = dfixed_const(1000);
- line_time.full = dfixed_const(wm->active_time + wm->blank_time);
- line_time.full = dfixed_div(line_time, a);
- bpp.full = dfixed_const(wm->bytes_per_pixel);
- src_width.full = dfixed_const(wm->src_width);
- bandwidth.full = dfixed_mul(src_width, bpp);
- bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
- bandwidth.full = dfixed_div(bandwidth, line_time);
-
- return dfixed_trunc(bandwidth);
-}
-
-/**
- * dce_v11_0_latency_watermark - get the latency watermark
- *
- * @wm: watermark calculation data
- *
- * Calculate the latency watermark (CIK).
- * Used for display watermark bandwidth calculations
- * Returns the latency watermark in ns
- */
-static u32 dce_v11_0_latency_watermark(struct dce10_wm_params *wm)
-{
- /* First calculate the latency in ns */
- u32 mc_latency = 2000; /* 2000 ns. */
- u32 available_bandwidth = dce_v11_0_available_bandwidth(wm);
- u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
- u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
- u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
- u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
- (wm->num_heads * cursor_line_pair_return_time);
- u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
- u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
- u32 tmp, dmif_size = 12288;
- fixed20_12 a, b, c;
-
- if (wm->num_heads == 0)
- return 0;
-
- a.full = dfixed_const(2);
- b.full = dfixed_const(1);
- if ((wm->vsc.full > a.full) ||
- ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
- (wm->vtaps >= 5) ||
- ((wm->vsc.full >= a.full) && wm->interlaced))
- max_src_lines_per_dst_line = 4;
- else
- max_src_lines_per_dst_line = 2;
-
- a.full = dfixed_const(available_bandwidth);
- b.full = dfixed_const(wm->num_heads);
- a.full = dfixed_div(a, b);
- tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
- tmp = min(dfixed_trunc(a), tmp);
-
- lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
-
- a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
- b.full = dfixed_const(1000);
- c.full = dfixed_const(lb_fill_bw);
- b.full = dfixed_div(c, b);
- a.full = dfixed_div(a, b);
- line_fill_time = dfixed_trunc(a);
-
- if (line_fill_time < wm->active_time)
- return latency;
- else
- return latency + (line_fill_time - wm->active_time);
-
-}
-
-/**
- * dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display - check
- * average and available dram bandwidth
- *
- * @wm: watermark calculation data
- *
- * Check if the display average bandwidth fits in the display
- * dram bandwidth (CIK).
- * Used for display watermark bandwidth calculations
- * Returns true if the display fits, false if not.
- */
-static bool dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce10_wm_params *wm)
-{
- if (dce_v11_0_average_bandwidth(wm) <=
- (dce_v11_0_dram_bandwidth_for_display(wm) / wm->num_heads))
- return true;
- else
- return false;
-}
-
-/**
- * dce_v11_0_average_bandwidth_vs_available_bandwidth - check
- * average and available bandwidth
- *
- * @wm: watermark calculation data
- *
- * Check if the display average bandwidth fits in the display
- * available bandwidth (CIK).
- * Used for display watermark bandwidth calculations
- * Returns true if the display fits, false if not.
- */
-static bool dce_v11_0_average_bandwidth_vs_available_bandwidth(struct dce10_wm_params *wm)
-{
- if (dce_v11_0_average_bandwidth(wm) <=
- (dce_v11_0_available_bandwidth(wm) / wm->num_heads))
- return true;
- else
- return false;
-}
-
-/**
- * dce_v11_0_check_latency_hiding - check latency hiding
- *
- * @wm: watermark calculation data
- *
- * Check latency hiding (CIK).
- * Used for display watermark bandwidth calculations
- * Returns true if the display fits, false if not.
- */
-static bool dce_v11_0_check_latency_hiding(struct dce10_wm_params *wm)
-{
- u32 lb_partitions = wm->lb_size / wm->src_width;
- u32 line_time = wm->active_time + wm->blank_time;
- u32 latency_tolerant_lines;
- u32 latency_hiding;
- fixed20_12 a;
-
- a.full = dfixed_const(1);
- if (wm->vsc.full > a.full)
- latency_tolerant_lines = 1;
- else {
- if (lb_partitions <= (wm->vtaps + 1))
- latency_tolerant_lines = 1;
- else
- latency_tolerant_lines = 2;
- }
-
- latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
-
- if (dce_v11_0_latency_watermark(wm) <= latency_hiding)
- return true;
- else
- return false;
-}
-
-/**
- * dce_v11_0_program_watermarks - program display watermarks
- *
- * @adev: amdgpu_device pointer
- * @amdgpu_crtc: the selected display controller
- * @lb_size: line buffer size
- * @num_heads: number of display controllers in use
- *
- * Calculate and program the display watermarks for the
- * selected display controller (CIK).
- */
-static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
- struct amdgpu_crtc *amdgpu_crtc,
- u32 lb_size, u32 num_heads)
-{
- struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
- struct dce10_wm_params wm_low, wm_high;
- u32 active_time;
- u32 line_time = 0;
- u32 latency_watermark_a = 0, latency_watermark_b = 0;
- u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
-
- if (amdgpu_crtc->base.enabled && num_heads && mode) {
- active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
- (u32)mode->clock);
- line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
- (u32)mode->clock);
- line_time = min_t(u32, line_time, 65535);
-
- /* watermark for high clocks */
- if (adev->pm.dpm_enabled) {
- wm_high.yclk =
- amdgpu_dpm_get_mclk(adev, false) * 10;
- wm_high.sclk =
- amdgpu_dpm_get_sclk(adev, false) * 10;
- } else {
- wm_high.yclk = adev->pm.current_mclk * 10;
- wm_high.sclk = adev->pm.current_sclk * 10;
- }
-
- wm_high.disp_clk = mode->clock;
- wm_high.src_width = mode->crtc_hdisplay;
- wm_high.active_time = active_time;
- wm_high.blank_time = line_time - wm_high.active_time;
- wm_high.interlaced = false;
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- wm_high.interlaced = true;
- wm_high.vsc = amdgpu_crtc->vsc;
- wm_high.vtaps = 1;
- if (amdgpu_crtc->rmx_type != RMX_OFF)
- wm_high.vtaps = 2;
- wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
- wm_high.lb_size = lb_size;
- wm_high.dram_channels = cik_get_number_of_dram_channels(adev);
- wm_high.num_heads = num_heads;
-
- /* set for high clocks */
- latency_watermark_a = min_t(u32, dce_v11_0_latency_watermark(&wm_high), 65535);
-
- /* possibly force display priority to high */
- /* should really do this at mode validation time... */
- if (!dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
- !dce_v11_0_average_bandwidth_vs_available_bandwidth(&wm_high) ||
- !dce_v11_0_check_latency_hiding(&wm_high) ||
- (adev->mode_info.disp_priority == 2)) {
- DRM_DEBUG_KMS("force priority to high\n");
- }
-
- /* watermark for low clocks */
- if (adev->pm.dpm_enabled) {
- wm_low.yclk =
- amdgpu_dpm_get_mclk(adev, true) * 10;
- wm_low.sclk =
- amdgpu_dpm_get_sclk(adev, true) * 10;
- } else {
- wm_low.yclk = adev->pm.current_mclk * 10;
- wm_low.sclk = adev->pm.current_sclk * 10;
- }
-
- wm_low.disp_clk = mode->clock;
- wm_low.src_width = mode->crtc_hdisplay;
- wm_low.active_time = active_time;
- wm_low.blank_time = line_time - wm_low.active_time;
- wm_low.interlaced = false;
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- wm_low.interlaced = true;
- wm_low.vsc = amdgpu_crtc->vsc;
- wm_low.vtaps = 1;
- if (amdgpu_crtc->rmx_type != RMX_OFF)
- wm_low.vtaps = 2;
- wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
- wm_low.lb_size = lb_size;
- wm_low.dram_channels = cik_get_number_of_dram_channels(adev);
- wm_low.num_heads = num_heads;
-
- /* set for low clocks */
- latency_watermark_b = min_t(u32, dce_v11_0_latency_watermark(&wm_low), 65535);
-
- /* possibly force display priority to high */
- /* should really do this at mode validation time... */
- if (!dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
- !dce_v11_0_average_bandwidth_vs_available_bandwidth(&wm_low) ||
- !dce_v11_0_check_latency_hiding(&wm_low) ||
- (adev->mode_info.disp_priority == 2)) {
- DRM_DEBUG_KMS("force priority to high\n");
- }
- lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
- }
-
- /* select wm A */
- wm_mask = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 1);
- WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
- tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_a);
- tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time);
- WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp);
- /* select wm B */
- tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 2);
- WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
- tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_b);
- tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time);
- WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp);
- /* restore original selection */
- WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, wm_mask);
-
- /* save values for DPM */
- amdgpu_crtc->line_time = line_time;
- amdgpu_crtc->wm_high = latency_watermark_a;
- amdgpu_crtc->wm_low = latency_watermark_b;
- /* Save number of lines the linebuffer leads before the scanout */
- amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
-}
-
-/**
- * dce_v11_0_bandwidth_update - program display watermarks
- *
- * @adev: amdgpu_device pointer
- *
- * Calculate and program the display watermarks and line
- * buffer allocation (CIK).
- */
-static void dce_v11_0_bandwidth_update(struct amdgpu_device *adev)
-{
- struct drm_display_mode *mode = NULL;
- u32 num_heads = 0, lb_size;
- int i;
-
- amdgpu_display_update_priority(adev);
-
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- if (adev->mode_info.crtcs[i]->base.enabled)
- num_heads++;
- }
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- mode = &adev->mode_info.crtcs[i]->base.mode;
- lb_size = dce_v11_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode);
- dce_v11_0_program_watermarks(adev, adev->mode_info.crtcs[i],
- lb_size, num_heads);
- }
-}
-
-static void dce_v11_0_audio_get_connected_pins(struct amdgpu_device *adev)
-{
- int i;
- u32 offset, tmp;
-
- for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
- offset = adev->mode_info.audio.pin[i].offset;
- tmp = RREG32_AUDIO_ENDPT(offset,
- ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
- if (((tmp &
- AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK) >>
- AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT) == 1)
- adev->mode_info.audio.pin[i].connected = false;
- else
- adev->mode_info.audio.pin[i].connected = true;
- }
-}
-
-static struct amdgpu_audio_pin *dce_v11_0_audio_get_pin(struct amdgpu_device *adev)
-{
- int i;
-
- dce_v11_0_audio_get_connected_pins(adev);
-
- for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
- if (adev->mode_info.audio.pin[i].connected)
- return &adev->mode_info.audio.pin[i];
- }
- DRM_ERROR("No connected audio pins found!\n");
- return NULL;
-}
-
-static void dce_v11_0_afmt_audio_select_pin(struct drm_encoder *encoder)
-{
- struct amdgpu_device *adev = drm_to_adev(encoder->dev);
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
- u32 tmp;
-
- if (!dig || !dig->afmt || !dig->afmt->pin)
- return;
-
- tmp = RREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_SRC_CONTROL, AFMT_AUDIO_SRC_SELECT, dig->afmt->pin->id);
- WREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset, tmp);
-}
-
-static void dce_v11_0_audio_write_latency_fields(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
-{
- struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
- struct drm_connector *connector;
- struct drm_connector_list_iter iter;
- struct amdgpu_connector *amdgpu_connector = NULL;
- u32 tmp;
- int interlace = 0;
-
- if (!dig || !dig->afmt || !dig->afmt->pin)
- return;
-
- drm_connector_list_iter_begin(dev, &iter);
- drm_for_each_connector_iter(connector, &iter) {
- if (connector->encoder == encoder) {
- amdgpu_connector = to_amdgpu_connector(connector);
- break;
- }
- }
- drm_connector_list_iter_end(&iter);
-
- if (!amdgpu_connector) {
- DRM_ERROR("Couldn't find encoder's connector\n");
- return;
- }
-
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- interlace = 1;
- if (connector->latency_present[interlace]) {
- tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
- VIDEO_LIPSYNC, connector->video_latency[interlace]);
- tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
- AUDIO_LIPSYNC, connector->audio_latency[interlace]);
- } else {
- tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
- VIDEO_LIPSYNC, 0);
- tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
- AUDIO_LIPSYNC, 0);
- }
- WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
- ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
-}
-
-static void dce_v11_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
- struct drm_connector *connector;
- struct drm_connector_list_iter iter;
- struct amdgpu_connector *amdgpu_connector = NULL;
- u32 tmp;
- u8 *sadb = NULL;
- int sad_count;
-
- if (!dig || !dig->afmt || !dig->afmt->pin)
- return;
-
- drm_connector_list_iter_begin(dev, &iter);
- drm_for_each_connector_iter(connector, &iter) {
- if (connector->encoder == encoder) {
- amdgpu_connector = to_amdgpu_connector(connector);
- break;
- }
- }
- drm_connector_list_iter_end(&iter);
-
- if (!amdgpu_connector) {
- DRM_ERROR("Couldn't find encoder's connector\n");
- return;
- }
-
- sad_count = drm_edid_to_speaker_allocation(amdgpu_connector->edid, &sadb);
- if (sad_count < 0) {
- DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
- sad_count = 0;
- }
-
- /* program the speaker allocation */
- tmp = RREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
- ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
- tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
- DP_CONNECTION, 0);
- /* set HDMI mode */
- tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
- HDMI_CONNECTION, 1);
- if (sad_count)
- tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
- SPEAKER_ALLOCATION, sadb[0]);
- else
- tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
- SPEAKER_ALLOCATION, 5); /* stereo */
- WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
- ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
-
- kfree(sadb);
-}
-
-static void dce_v11_0_audio_write_sad_regs(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
- struct drm_connector *connector;
- struct drm_connector_list_iter iter;
- struct amdgpu_connector *amdgpu_connector = NULL;
- struct cea_sad *sads;
- int i, sad_count;
-
- static const u16 eld_reg_to_type[][2] = {
- { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
- { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
- { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
- { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
- { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
- { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
- { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
- { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
- { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
- { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
- { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
- { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
- };
-
- if (!dig || !dig->afmt || !dig->afmt->pin)
- return;
-
- drm_connector_list_iter_begin(dev, &iter);
- drm_for_each_connector_iter(connector, &iter) {
- if (connector->encoder == encoder) {
- amdgpu_connector = to_amdgpu_connector(connector);
- break;
- }
- }
- drm_connector_list_iter_end(&iter);
-
- if (!amdgpu_connector) {
- DRM_ERROR("Couldn't find encoder's connector\n");
- return;
- }
-
- sad_count = drm_edid_to_sad(amdgpu_connector->edid, &sads);
- if (sad_count < 0)
- DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
- if (sad_count <= 0)
- return;
- BUG_ON(!sads);
-
- for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
- u32 tmp = 0;
- u8 stereo_freqs = 0;
- int max_channels = -1;
- int j;
-
- for (j = 0; j < sad_count; j++) {
- struct cea_sad *sad = &sads[j];
-
- if (sad->format == eld_reg_to_type[i][1]) {
- if (sad->channels > max_channels) {
- tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
- MAX_CHANNELS, sad->channels);
- tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
- DESCRIPTOR_BYTE_2, sad->byte2);
- tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
- SUPPORTED_FREQUENCIES, sad->freq);
- max_channels = sad->channels;
- }
-
- if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
- stereo_freqs |= sad->freq;
- else
- break;
- }
- }
-
- tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
- SUPPORTED_FREQUENCIES_STEREO, stereo_freqs);
- WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, eld_reg_to_type[i][0], tmp);
- }
-
- kfree(sads);
-}
-
-static void dce_v11_0_audio_enable(struct amdgpu_device *adev,
- struct amdgpu_audio_pin *pin,
- bool enable)
-{
- if (!pin)
- return;
-
- WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
- enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0);
-}
-
-static const u32 pin_offsets[] =
-{
- AUD0_REGISTER_OFFSET,
- AUD1_REGISTER_OFFSET,
- AUD2_REGISTER_OFFSET,
- AUD3_REGISTER_OFFSET,
- AUD4_REGISTER_OFFSET,
- AUD5_REGISTER_OFFSET,
- AUD6_REGISTER_OFFSET,
- AUD7_REGISTER_OFFSET,
-};
-
-static int dce_v11_0_audio_init(struct amdgpu_device *adev)
-{
- int i;
-
- if (!amdgpu_audio)
- return 0;
-
- adev->mode_info.audio.enabled = true;
-
- switch (adev->asic_type) {
- case CHIP_CARRIZO:
- case CHIP_STONEY:
- adev->mode_info.audio.num_pins = 7;
- break;
- case CHIP_POLARIS10:
- case CHIP_VEGAM:
- adev->mode_info.audio.num_pins = 8;
- break;
- case CHIP_POLARIS11:
- case CHIP_POLARIS12:
- adev->mode_info.audio.num_pins = 6;
- break;
- default:
- return -EINVAL;
- }
-
- for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
- adev->mode_info.audio.pin[i].channels = -1;
- adev->mode_info.audio.pin[i].rate = -1;
- adev->mode_info.audio.pin[i].bits_per_sample = -1;
- adev->mode_info.audio.pin[i].status_bits = 0;
- adev->mode_info.audio.pin[i].category_code = 0;
- adev->mode_info.audio.pin[i].connected = false;
- adev->mode_info.audio.pin[i].offset = pin_offsets[i];
- adev->mode_info.audio.pin[i].id = i;
- /* disable audio. it will be set up later */
- /* XXX remove once we switch to ip funcs */
- dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
- }
-
- return 0;
-}
-
-static void dce_v11_0_audio_fini(struct amdgpu_device *adev)
-{
- int i;
-
- if (!amdgpu_audio)
- return;
-
- if (!adev->mode_info.audio.enabled)
- return;
-
- for (i = 0; i < adev->mode_info.audio.num_pins; i++)
- dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
-
- adev->mode_info.audio.enabled = false;
-}
-
-/*
- * update the N and CTS parameters for a given pixel clock rate
- */
-static void dce_v11_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock)
-{
- struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock);
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
- u32 tmp;
-
- tmp = RREG32(mmHDMI_ACR_32_0 + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_0, HDMI_ACR_CTS_32, acr.cts_32khz);
- WREG32(mmHDMI_ACR_32_0 + dig->afmt->offset, tmp);
- tmp = RREG32(mmHDMI_ACR_32_1 + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_1, HDMI_ACR_N_32, acr.n_32khz);
- WREG32(mmHDMI_ACR_32_1 + dig->afmt->offset, tmp);
-
- tmp = RREG32(mmHDMI_ACR_44_0 + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_0, HDMI_ACR_CTS_44, acr.cts_44_1khz);
- WREG32(mmHDMI_ACR_44_0 + dig->afmt->offset, tmp);
- tmp = RREG32(mmHDMI_ACR_44_1 + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_1, HDMI_ACR_N_44, acr.n_44_1khz);
- WREG32(mmHDMI_ACR_44_1 + dig->afmt->offset, tmp);
-
- tmp = RREG32(mmHDMI_ACR_48_0 + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_0, HDMI_ACR_CTS_48, acr.cts_48khz);
- WREG32(mmHDMI_ACR_48_0 + dig->afmt->offset, tmp);
- tmp = RREG32(mmHDMI_ACR_48_1 + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_1, HDMI_ACR_N_48, acr.n_48khz);
- WREG32(mmHDMI_ACR_48_1 + dig->afmt->offset, tmp);
-
-}
-
-/*
- * build a HDMI Video Info Frame
- */
-static void dce_v11_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
- void *buffer, size_t size)
-{
- struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
- uint8_t *frame = buffer + 3;
- uint8_t *header = buffer;
-
- WREG32(mmAFMT_AVI_INFO0 + dig->afmt->offset,
- frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
- WREG32(mmAFMT_AVI_INFO1 + dig->afmt->offset,
- frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24));
- WREG32(mmAFMT_AVI_INFO2 + dig->afmt->offset,
- frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
- WREG32(mmAFMT_AVI_INFO3 + dig->afmt->offset,
- frame[0xC] | (frame[0xD] << 8) | (header[1] << 24));
-}
-
-static void dce_v11_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
-{
- struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
- u32 dto_phase = 24 * 1000;
- u32 dto_modulo = clock;
- u32 tmp;
-
- if (!dig || !dig->afmt)
- return;
-
- /* XXX two dtos; generally use dto0 for hdmi */
- /* Express [24MHz / target pixel clock] as an exact rational
- * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
- * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
- */
- tmp = RREG32(mmDCCG_AUDIO_DTO_SOURCE);
- tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL,
- amdgpu_crtc->crtc_id);
- WREG32(mmDCCG_AUDIO_DTO_SOURCE, tmp);
- WREG32(mmDCCG_AUDIO_DTO0_PHASE, dto_phase);
- WREG32(mmDCCG_AUDIO_DTO0_MODULE, dto_modulo);
-}
-
-/*
- * update the info frames with the data from the current display mode
- */
-static void dce_v11_0_afmt_setmode(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
-{
- struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
- struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
- u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
- struct hdmi_avi_infoframe frame;
- ssize_t err;
- u32 tmp;
- int bpc = 8;
-
- if (!dig || !dig->afmt)
- return;
-
- /* Silent, r600_hdmi_enable will raise WARN for us */
- if (!dig->afmt->enabled)
- return;
-
- /* hdmi deep color mode general control packets setup, if bpc > 8 */
- if (encoder->crtc) {
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
- bpc = amdgpu_crtc->bpc;
- }
-
- /* disable audio prior to setting up hw */
- dig->afmt->pin = dce_v11_0_audio_get_pin(adev);
- dce_v11_0_audio_enable(adev, dig->afmt->pin, false);
-
- dce_v11_0_audio_set_dto(encoder, mode->clock);
-
- tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1);
- WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp); /* send null packets when required */
-
- WREG32(mmAFMT_AUDIO_CRC_CONTROL + dig->afmt->offset, 0x1000);
-
- tmp = RREG32(mmHDMI_CONTROL + dig->afmt->offset);
- switch (bpc) {
- case 0:
- case 6:
- case 8:
- case 16:
- default:
- tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 0);
- tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 0);
- DRM_DEBUG("%s: Disabling hdmi deep color for %d bpc.\n",
- connector->name, bpc);
- break;
- case 10:
- tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 1);
- tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 1);
- DRM_DEBUG("%s: Enabling hdmi deep color 30 for 10 bpc.\n",
- connector->name);
- break;
- case 12:
- tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 1);
- tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 2);
- DRM_DEBUG("%s: Enabling hdmi deep color 36 for 12 bpc.\n",
- connector->name);
- break;
- }
- WREG32(mmHDMI_CONTROL + dig->afmt->offset, tmp);
-
- tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1); /* send null packets when required */
- tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, 1); /* send general control packets */
- tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, 1); /* send general control packets every frame */
- WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp);
-
- tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
- /* enable audio info frames (frames won't be set until audio is enabled) */
- tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 1);
- /* required for audio info values to be updated */
- tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_CONT, 1);
- WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
-
- tmp = RREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset);
- /* required for audio info values to be updated */
- tmp = REG_SET_FIELD(tmp, AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1);
- WREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
-
- tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
- /* anything other than 0 */
- tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE, 2);
- WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
-
- WREG32(mmHDMI_GC + dig->afmt->offset, 0); /* unset HDMI_GC_AVMUTE */
-
- tmp = RREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset);
- /* set the default audio delay */
- tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_DELAY_EN, 1);
- /* should be suffient for all audio modes and small enough for all hblanks */
- tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_PACKETS_PER_LINE, 3);
- WREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
-
- tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
- /* allow 60958 channel status fields to be updated */
- tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1);
- WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
-
- tmp = RREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset);
- if (bpc > 8)
- /* clear SW CTS value */
- tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, 0);
- else
- /* select SW CTS value */
- tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, 1);
- /* allow hw to sent ACR packets when required */
- tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUTO_SEND, 1);
- WREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset, tmp);
-
- dce_v11_0_afmt_update_ACR(encoder, mode->clock);
-
- tmp = RREG32(mmAFMT_60958_0 + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, 1);
- WREG32(mmAFMT_60958_0 + dig->afmt->offset, tmp);
-
- tmp = RREG32(mmAFMT_60958_1 + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, 2);
- WREG32(mmAFMT_60958_1 + dig->afmt->offset, tmp);
-
- tmp = RREG32(mmAFMT_60958_2 + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, 3);
- tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_3, 4);
- tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_4, 5);
- tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_5, 6);
- tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_6, 7);
- tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_7, 8);
- WREG32(mmAFMT_60958_2 + dig->afmt->offset, tmp);
-
- dce_v11_0_audio_write_speaker_allocation(encoder);
-
- WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + dig->afmt->offset,
- (0xff << AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT));
-
- dce_v11_0_afmt_audio_select_pin(encoder);
- dce_v11_0_audio_write_sad_regs(encoder);
- dce_v11_0_audio_write_latency_fields(encoder, mode);
-
- err = drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode);
- if (err < 0) {
- DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
- return;
- }
-
- err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
- if (err < 0) {
- DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
- return;
- }
-
- dce_v11_0_afmt_update_avi_infoframe(encoder, buffer, sizeof(buffer));
-
- tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
- /* enable AVI info frames */
- tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, 1);
- /* required for audio info values to be updated */
- tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, 1);
- WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
-
- tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AVI_INFO_LINE, 2);
- WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
-
- tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
- /* send audio packets */
- tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 1);
- WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
-
- WREG32(mmAFMT_RAMP_CONTROL0 + dig->afmt->offset, 0x00FFFFFF);
- WREG32(mmAFMT_RAMP_CONTROL1 + dig->afmt->offset, 0x007FFFFF);
- WREG32(mmAFMT_RAMP_CONTROL2 + dig->afmt->offset, 0x00000001);
- WREG32(mmAFMT_RAMP_CONTROL3 + dig->afmt->offset, 0x00000001);
-
- /* enable audio after to setting up hw */
- dce_v11_0_audio_enable(adev, dig->afmt->pin, true);
-}
-
-static void dce_v11_0_afmt_enable(struct drm_encoder *encoder, bool enable)
-{
- struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
-
- if (!dig || !dig->afmt)
- return;
-
- /* Silent, r600_hdmi_enable will raise WARN for us */
- if (enable && dig->afmt->enabled)
- return;
- if (!enable && !dig->afmt->enabled)
- return;
-
- if (!enable && dig->afmt->pin) {
- dce_v11_0_audio_enable(adev, dig->afmt->pin, false);
- dig->afmt->pin = NULL;
- }
-
- dig->afmt->enabled = enable;
-
- DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n",
- enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
-}
-
-static int dce_v11_0_afmt_init(struct amdgpu_device *adev)
-{
- int i;
-
- for (i = 0; i < adev->mode_info.num_dig; i++)
- adev->mode_info.afmt[i] = NULL;
-
- /* DCE11 has audio blocks tied to DIG encoders */
- for (i = 0; i < adev->mode_info.num_dig; i++) {
- adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
- if (adev->mode_info.afmt[i]) {
- adev->mode_info.afmt[i]->offset = dig_offsets[i];
- adev->mode_info.afmt[i]->id = i;
- } else {
- int j;
- for (j = 0; j < i; j++) {
- kfree(adev->mode_info.afmt[j]);
- adev->mode_info.afmt[j] = NULL;
- }
- return -ENOMEM;
- }
- }
- return 0;
-}
-
-static void dce_v11_0_afmt_fini(struct amdgpu_device *adev)
-{
- int i;
-
- for (i = 0; i < adev->mode_info.num_dig; i++) {
- kfree(adev->mode_info.afmt[i]);
- adev->mode_info.afmt[i] = NULL;
- }
-}
-
-static const u32 vga_control_regs[6] =
-{
- mmD1VGA_CONTROL,
- mmD2VGA_CONTROL,
- mmD3VGA_CONTROL,
- mmD4VGA_CONTROL,
- mmD5VGA_CONTROL,
- mmD6VGA_CONTROL,
-};
-
-static void dce_v11_0_vga_enable(struct drm_crtc *crtc, bool enable)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- u32 vga_control;
-
- vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
- if (enable)
- WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | 1);
- else
- WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control);
-}
-
-static void dce_v11_0_grph_enable(struct drm_crtc *crtc, bool enable)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
-
- if (enable)
- WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1);
- else
- WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 0);
-}
-
-static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int x, int y, int atomic)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct drm_framebuffer *target_fb;
- struct drm_gem_object *obj;
- struct amdgpu_bo *abo;
- uint64_t fb_location, tiling_flags;
- uint32_t fb_format, fb_pitch_pixels;
- u32 fb_swap = REG_SET_FIELD(0, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, ENDIAN_NONE);
- u32 pipe_config;
- u32 tmp, viewport_w, viewport_h;
- int r;
- bool bypass_lut = false;
-
- /* no fb bound */
- if (!atomic && !crtc->primary->fb) {
- DRM_DEBUG_KMS("No FB bound\n");
- return 0;
- }
-
- if (atomic)
- target_fb = fb;
- else
- target_fb = crtc->primary->fb;
-
- /* If atomic, assume fb object is pinned & idle & fenced and
- * just update base pointers
- */
- obj = target_fb->obj[0];
- abo = gem_to_amdgpu_bo(obj);
- r = amdgpu_bo_reserve(abo, false);
- if (unlikely(r != 0))
- return r;
-
- if (!atomic) {
- abo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
- r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
- if (unlikely(r != 0)) {
- amdgpu_bo_unreserve(abo);
- return -EINVAL;
- }
- }
- fb_location = amdgpu_bo_gpu_offset(abo);
-
- amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
- amdgpu_bo_unreserve(abo);
-
- pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
-
- switch (target_fb->format->format) {
- case DRM_FORMAT_C8:
- fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 0);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
- break;
- case DRM_FORMAT_XRGB4444:
- case DRM_FORMAT_ARGB4444:
- fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 2);
-#ifdef __BIG_ENDIAN
- fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
- ENDIAN_8IN16);
-#endif
- break;
- case DRM_FORMAT_XRGB1555:
- case DRM_FORMAT_ARGB1555:
- fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
-#ifdef __BIG_ENDIAN
- fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
- ENDIAN_8IN16);
-#endif
- break;
- case DRM_FORMAT_BGRX5551:
- case DRM_FORMAT_BGRA5551:
- fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 5);
-#ifdef __BIG_ENDIAN
- fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
- ENDIAN_8IN16);
-#endif
- break;
- case DRM_FORMAT_RGB565:
- fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 1);
-#ifdef __BIG_ENDIAN
- fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
- ENDIAN_8IN16);
-#endif
- break;
- case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_ARGB8888:
- fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
-#ifdef __BIG_ENDIAN
- fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
- ENDIAN_8IN32);
-#endif
- break;
- case DRM_FORMAT_XRGB2101010:
- case DRM_FORMAT_ARGB2101010:
- fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 1);
-#ifdef __BIG_ENDIAN
- fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
- ENDIAN_8IN32);
-#endif
- /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
- bypass_lut = true;
- break;
- case DRM_FORMAT_BGRX1010102:
- case DRM_FORMAT_BGRA1010102:
- fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 4);
-#ifdef __BIG_ENDIAN
- fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
- ENDIAN_8IN32);
-#endif
- /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
- bypass_lut = true;
- break;
- case DRM_FORMAT_XBGR8888:
- case DRM_FORMAT_ABGR8888:
- fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
- fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_RED_CROSSBAR, 2);
- fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_BLUE_CROSSBAR, 2);
-#ifdef __BIG_ENDIAN
- fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
- ENDIAN_8IN32);
-#endif
- break;
- default:
- DRM_ERROR("Unsupported screen format %p4cc\n",
- &target_fb->format->format);
- return -EINVAL;
- }
-
- if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) {
- unsigned bankw, bankh, mtaspect, tile_split, num_banks;
-
- bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
- bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
- mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
- tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
- num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
-
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_NUM_BANKS, num_banks);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_ARRAY_MODE,
- ARRAY_2D_TILED_THIN1);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_TILE_SPLIT,
- tile_split);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_BANK_WIDTH, bankw);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_BANK_HEIGHT, bankh);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_MACRO_TILE_ASPECT,
- mtaspect);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_MICRO_TILE_MODE,
- ADDR_SURF_MICRO_TILING_DISPLAY);
- } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) {
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_ARRAY_MODE,
- ARRAY_1D_TILED_THIN1);
- }
-
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_PIPE_CONFIG,
- pipe_config);
-
- dce_v11_0_vga_enable(crtc, false);
-
- /* Make sure surface address is updated at vertical blank rather than
- * horizontal blank
- */
- tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
- GRPH_SURFACE_UPDATE_H_RETRACE_EN, 0);
- WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-
- WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
- upper_32_bits(fb_location));
- WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
- upper_32_bits(fb_location));
- WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
- (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
- WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
- (u32) fb_location & GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS_MASK);
- WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
- WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap);
-
- /*
- * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
- * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
- * retain the full precision throughout the pipeline.
- */
- tmp = RREG32(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset);
- if (bypass_lut)
- tmp = REG_SET_FIELD(tmp, GRPH_LUT_10BIT_BYPASS, GRPH_LUT_10BIT_BYPASS_EN, 1);
- else
- tmp = REG_SET_FIELD(tmp, GRPH_LUT_10BIT_BYPASS, GRPH_LUT_10BIT_BYPASS_EN, 0);
- WREG32(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset, tmp);
-
- if (bypass_lut)
- DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
-
- WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
- WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
- WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0);
- WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
- WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
- WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
-
- fb_pitch_pixels = target_fb->pitches[0] / target_fb->format->cpp[0];
- WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
-
- dce_v11_0_grph_enable(crtc, true);
-
- WREG32(mmLB_DESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
- target_fb->height);
-
- x &= ~3;
- y &= ~1;
- WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset,
- (x << 16) | y);
- viewport_w = crtc->mode.hdisplay;
- viewport_h = (crtc->mode.vdisplay + 1) & ~1;
- WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
- (viewport_w << 16) | viewport_h);
-
- /* set pageflip to happen anywhere in vblank interval */
- WREG32(mmCRTC_MASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
-
- if (!atomic && fb && fb != crtc->primary->fb) {
- abo = gem_to_amdgpu_bo(fb->obj[0]);
- r = amdgpu_bo_reserve(abo, true);
- if (unlikely(r != 0))
- return r;
- amdgpu_bo_unpin(abo);
- amdgpu_bo_unreserve(abo);
- }
-
- /* Bytes per pixel may have changed */
- dce_v11_0_bandwidth_update(adev);
-
- return 0;
-}
-
-static void dce_v11_0_set_interleave(struct drm_crtc *crtc,
- struct drm_display_mode *mode)
-{
- struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- u32 tmp;
-
- tmp = RREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset);
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- tmp = REG_SET_FIELD(tmp, LB_DATA_FORMAT, INTERLEAVE_EN, 1);
- else
- tmp = REG_SET_FIELD(tmp, LB_DATA_FORMAT, INTERLEAVE_EN, 0);
- WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset, tmp);
-}
-
-static void dce_v11_0_crtc_load_lut(struct drm_crtc *crtc)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- u16 *r, *g, *b;
- int i;
- u32 tmp;
-
- DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
-
- tmp = RREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, INPUT_CSC_CONTROL, INPUT_CSC_GRPH_MODE, 0);
- WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-
- tmp = RREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_BYPASS, 1);
- WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-
- tmp = RREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, INPUT_GAMMA_CONTROL, GRPH_INPUT_GAMMA_MODE, 0);
- WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-
- WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
-
- WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
- WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
- WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
-
- WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
- WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
- WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
-
- WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
- WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
-
- WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
- r = crtc->gamma_store;
- g = r + crtc->gamma_size;
- b = g + crtc->gamma_size;
- for (i = 0; i < 256; i++) {
- WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
- ((*r++ & 0xffc0) << 14) |
- ((*g++ & 0xffc0) << 4) |
- (*b++ >> 6));
- }
-
- tmp = RREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, GRPH_DEGAMMA_MODE, 0);
- tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, CURSOR_DEGAMMA_MODE, 0);
- tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, CURSOR2_DEGAMMA_MODE, 0);
- WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-
- tmp = RREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, GAMUT_REMAP_CONTROL, GRPH_GAMUT_REMAP_MODE, 0);
- WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-
- tmp = RREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, REGAMMA_CONTROL, GRPH_REGAMMA_MODE, 0);
- WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-
- tmp = RREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, OUTPUT_CSC_CONTROL, OUTPUT_CSC_GRPH_MODE, 0);
- WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-
- /* XXX match this to the depth of the crtc fmt block, move to modeset? */
- WREG32(mmDENORM_CONTROL + amdgpu_crtc->crtc_offset, 0);
- /* XXX this only needs to be programmed once per crtc at startup,
- * not sure where the best place for it is
- */
- tmp = RREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, ALPHA_CONTROL, CURSOR_ALPHA_BLND_ENA, 1);
- WREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-}
-
-static int dce_v11_0_pick_dig_encoder(struct drm_encoder *encoder)
-{
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
-
- switch (amdgpu_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- if (dig->linkb)
- return 1;
- else
- return 0;
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
- if (dig->linkb)
- return 3;
- else
- return 2;
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- if (dig->linkb)
- return 5;
- else
- return 4;
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
- return 6;
- default:
- DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
- return 0;
- }
-}
-
-/**
- * dce_v11_0_pick_pll - Allocate a PPLL for use by the crtc.
- *
- * @crtc: drm crtc
- *
- * Returns the PPLL (Pixel PLL) to be used by the crtc. For DP monitors
- * a single PPLL can be used for all DP crtcs/encoders. For non-DP
- * monitors a dedicated PPLL must be used. If a particular board has
- * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
- * as there is no need to program the PLL itself. If we are not able to
- * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
- * avoid messing up an existing monitor.
- *
- * Asic specific PLL information
- *
- * DCE 10.x
- * Tonga
- * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP)
- * CI
- * - PPLL0, PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
- *
- */
-static u32 dce_v11_0_pick_pll(struct drm_crtc *crtc)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- u32 pll_in_use;
- int pll;
-
- if ((adev->asic_type == CHIP_POLARIS10) ||
- (adev->asic_type == CHIP_POLARIS11) ||
- (adev->asic_type == CHIP_POLARIS12) ||
- (adev->asic_type == CHIP_VEGAM)) {
- struct amdgpu_encoder *amdgpu_encoder =
- to_amdgpu_encoder(amdgpu_crtc->encoder);
- struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
-
- if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
- return ATOM_DP_DTO;
-
- switch (amdgpu_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- if (dig->linkb)
- return ATOM_COMBOPHY_PLL1;
- else
- return ATOM_COMBOPHY_PLL0;
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
- if (dig->linkb)
- return ATOM_COMBOPHY_PLL3;
- else
- return ATOM_COMBOPHY_PLL2;
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- if (dig->linkb)
- return ATOM_COMBOPHY_PLL5;
- else
- return ATOM_COMBOPHY_PLL4;
- default:
- DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
- return ATOM_PPLL_INVALID;
- }
- }
-
- if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
- if (adev->clock.dp_extclk)
- /* skip PPLL programming if using ext clock */
- return ATOM_PPLL_INVALID;
- else {
- /* use the same PPLL for all DP monitors */
- pll = amdgpu_pll_get_shared_dp_ppll(crtc);
- if (pll != ATOM_PPLL_INVALID)
- return pll;
- }
- } else {
- /* use the same PPLL for all monitors with the same clock */
- pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
- if (pll != ATOM_PPLL_INVALID)
- return pll;
- }
-
- /* XXX need to determine what plls are available on each DCE11 part */
- pll_in_use = amdgpu_pll_get_use_mask(crtc);
- if (adev->flags & AMD_IS_APU) {
- if (!(pll_in_use & (1 << ATOM_PPLL1)))
- return ATOM_PPLL1;
- if (!(pll_in_use & (1 << ATOM_PPLL0)))
- return ATOM_PPLL0;
- DRM_ERROR("unable to allocate a PPLL\n");
- return ATOM_PPLL_INVALID;
- } else {
- if (!(pll_in_use & (1 << ATOM_PPLL2)))
- return ATOM_PPLL2;
- if (!(pll_in_use & (1 << ATOM_PPLL1)))
- return ATOM_PPLL1;
- if (!(pll_in_use & (1 << ATOM_PPLL0)))
- return ATOM_PPLL0;
- DRM_ERROR("unable to allocate a PPLL\n");
- return ATOM_PPLL_INVALID;
- }
- return ATOM_PPLL_INVALID;
-}
-
-static void dce_v11_0_lock_cursor(struct drm_crtc *crtc, bool lock)
-{
- struct amdgpu_device *adev = drm_to_adev(crtc->dev);
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- uint32_t cur_lock;
-
- cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset);
- if (lock)
- cur_lock = REG_SET_FIELD(cur_lock, CUR_UPDATE, CURSOR_UPDATE_LOCK, 1);
- else
- cur_lock = REG_SET_FIELD(cur_lock, CUR_UPDATE, CURSOR_UPDATE_LOCK, 0);
- WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
-}
-
-static void dce_v11_0_hide_cursor(struct drm_crtc *crtc)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = drm_to_adev(crtc->dev);
- u32 tmp;
-
- tmp = RREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 0);
- WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-}
-
-static void dce_v11_0_show_cursor(struct drm_crtc *crtc)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = drm_to_adev(crtc->dev);
- u32 tmp;
-
- WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
- upper_32_bits(amdgpu_crtc->cursor_addr));
- WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
- lower_32_bits(amdgpu_crtc->cursor_addr));
-
- tmp = RREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 1);
- tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_MODE, 2);
- WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-}
-
-static int dce_v11_0_cursor_move_locked(struct drm_crtc *crtc,
- int x, int y)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = drm_to_adev(crtc->dev);
- int xorigin = 0, yorigin = 0;
-
- amdgpu_crtc->cursor_x = x;
- amdgpu_crtc->cursor_y = y;
-
- /* avivo cursor are offset into the total surface */
- x += crtc->x;
- y += crtc->y;
- DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
-
- if (x < 0) {
- xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
- x = 0;
- }
- if (y < 0) {
- yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
- y = 0;
- }
-
- WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
- WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
- WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
- ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
-
- return 0;
-}
-
-static int dce_v11_0_crtc_cursor_move(struct drm_crtc *crtc,
- int x, int y)
-{
- int ret;
-
- dce_v11_0_lock_cursor(crtc, true);
- ret = dce_v11_0_cursor_move_locked(crtc, x, y);
- dce_v11_0_lock_cursor(crtc, false);
-
- return ret;
-}
-
-static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
- struct drm_file *file_priv,
- uint32_t handle,
- uint32_t width,
- uint32_t height,
- int32_t hot_x,
- int32_t hot_y)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct drm_gem_object *obj;
- struct amdgpu_bo *aobj;
- int ret;
-
- if (!handle) {
- /* turn off cursor */
- dce_v11_0_hide_cursor(crtc);
- obj = NULL;
- goto unpin;
- }
-
- if ((width > amdgpu_crtc->max_cursor_width) ||
- (height > amdgpu_crtc->max_cursor_height)) {
- DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
- return -EINVAL;
- }
-
- obj = drm_gem_object_lookup(file_priv, handle);
- if (!obj) {
- DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
- return -ENOENT;
- }
-
- aobj = gem_to_amdgpu_bo(obj);
- ret = amdgpu_bo_reserve(aobj, false);
- if (ret != 0) {
- drm_gem_object_put(obj);
- return ret;
- }
-
- aobj->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
- ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
- amdgpu_bo_unreserve(aobj);
- if (ret) {
- DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
- drm_gem_object_put(obj);
- return ret;
- }
- amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
-
- dce_v11_0_lock_cursor(crtc, true);
-
- if (width != amdgpu_crtc->cursor_width ||
- height != amdgpu_crtc->cursor_height ||
- hot_x != amdgpu_crtc->cursor_hot_x ||
- hot_y != amdgpu_crtc->cursor_hot_y) {
- int x, y;
-
- x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
- y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
-
- dce_v11_0_cursor_move_locked(crtc, x, y);
-
- amdgpu_crtc->cursor_width = width;
- amdgpu_crtc->cursor_height = height;
- amdgpu_crtc->cursor_hot_x = hot_x;
- amdgpu_crtc->cursor_hot_y = hot_y;
- }
-
- dce_v11_0_show_cursor(crtc);
- dce_v11_0_lock_cursor(crtc, false);
-
-unpin:
- if (amdgpu_crtc->cursor_bo) {
- struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
- ret = amdgpu_bo_reserve(aobj, true);
- if (likely(ret == 0)) {
- amdgpu_bo_unpin(aobj);
- amdgpu_bo_unreserve(aobj);
- }
- drm_gem_object_put(amdgpu_crtc->cursor_bo);
- }
-
- amdgpu_crtc->cursor_bo = obj;
- return 0;
-}
-
-static void dce_v11_0_cursor_reset(struct drm_crtc *crtc)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-
- if (amdgpu_crtc->cursor_bo) {
- dce_v11_0_lock_cursor(crtc, true);
-
- dce_v11_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
- amdgpu_crtc->cursor_y);
-
- dce_v11_0_show_cursor(crtc);
-
- dce_v11_0_lock_cursor(crtc, false);
- }
-}
-
-static int dce_v11_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, uint32_t size,
- struct drm_modeset_acquire_ctx *ctx)
-{
- dce_v11_0_crtc_load_lut(crtc);
-
- return 0;
-}
-
-static void dce_v11_0_crtc_destroy(struct drm_crtc *crtc)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-
- drm_crtc_cleanup(crtc);
- kfree(amdgpu_crtc);
-}
-
-static const struct drm_crtc_funcs dce_v11_0_crtc_funcs = {
- .cursor_set2 = dce_v11_0_crtc_cursor_set2,
- .cursor_move = dce_v11_0_crtc_cursor_move,
- .gamma_set = dce_v11_0_crtc_gamma_set,
- .set_config = amdgpu_display_crtc_set_config,
- .destroy = dce_v11_0_crtc_destroy,
- .page_flip_target = amdgpu_display_crtc_page_flip_target,
- .get_vblank_counter = amdgpu_get_vblank_counter_kms,
- .enable_vblank = amdgpu_enable_vblank_kms,
- .disable_vblank = amdgpu_disable_vblank_kms,
- .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
-};
-
-static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
-{
- struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- unsigned type;
-
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- amdgpu_crtc->enabled = true;
- amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
- dce_v11_0_vga_enable(crtc, true);
- amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
- dce_v11_0_vga_enable(crtc, false);
- /* Make sure VBLANK and PFLIP interrupts are still enabled */
- type = amdgpu_display_crtc_idx_to_irq_type(adev,
- amdgpu_crtc->crtc_id);
- amdgpu_irq_update(adev, &adev->crtc_irq, type);
- amdgpu_irq_update(adev, &adev->pageflip_irq, type);
- drm_crtc_vblank_on(crtc);
- dce_v11_0_crtc_load_lut(crtc);
- break;
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- case DRM_MODE_DPMS_OFF:
- drm_crtc_vblank_off(crtc);
- if (amdgpu_crtc->enabled) {
- dce_v11_0_vga_enable(crtc, true);
- amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
- dce_v11_0_vga_enable(crtc, false);
- }
- amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
- amdgpu_crtc->enabled = false;
- break;
- }
- /* adjust pm to dpms */
- amdgpu_dpm_compute_clocks(adev);
-}
-
-static void dce_v11_0_crtc_prepare(struct drm_crtc *crtc)
-{
- /* disable crtc pair power gating before programming */
- amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE);
- amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE);
- dce_v11_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
-}
-
-static void dce_v11_0_crtc_commit(struct drm_crtc *crtc)
-{
- dce_v11_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
- amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE);
-}
-
-static void dce_v11_0_crtc_disable(struct drm_crtc *crtc)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_atom_ss ss;
- int i;
-
- dce_v11_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
- if (crtc->primary->fb) {
- int r;
- struct amdgpu_bo *abo;
-
- abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]);
- r = amdgpu_bo_reserve(abo, true);
- if (unlikely(r))
- DRM_ERROR("failed to reserve abo before unpin\n");
- else {
- amdgpu_bo_unpin(abo);
- amdgpu_bo_unreserve(abo);
- }
- }
- /* disable the GRPH */
- dce_v11_0_grph_enable(crtc, false);
-
- amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE);
-
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- if (adev->mode_info.crtcs[i] &&
- adev->mode_info.crtcs[i]->enabled &&
- i != amdgpu_crtc->crtc_id &&
- amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) {
- /* one other crtc is using this pll don't turn
- * off the pll
- */
- goto done;
- }
- }
-
- switch (amdgpu_crtc->pll_id) {
- case ATOM_PPLL0:
- case ATOM_PPLL1:
- case ATOM_PPLL2:
- /* disable the ppll */
- amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
- 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
- break;
- case ATOM_COMBOPHY_PLL0:
- case ATOM_COMBOPHY_PLL1:
- case ATOM_COMBOPHY_PLL2:
- case ATOM_COMBOPHY_PLL3:
- case ATOM_COMBOPHY_PLL4:
- case ATOM_COMBOPHY_PLL5:
- /* disable the ppll */
- amdgpu_atombios_crtc_program_pll(crtc, ATOM_CRTC_INVALID, amdgpu_crtc->pll_id,
- 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
- break;
- default:
- break;
- }
-done:
- amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
- amdgpu_crtc->adjusted_clock = 0;
- amdgpu_crtc->encoder = NULL;
- amdgpu_crtc->connector = NULL;
-}
-
-static int dce_v11_0_crtc_mode_set(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode,
- int x, int y, struct drm_framebuffer *old_fb)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
-
- if (!amdgpu_crtc->adjusted_clock)
- return -EINVAL;
-
- if ((adev->asic_type == CHIP_POLARIS10) ||
- (adev->asic_type == CHIP_POLARIS11) ||
- (adev->asic_type == CHIP_POLARIS12) ||
- (adev->asic_type == CHIP_VEGAM)) {
- struct amdgpu_encoder *amdgpu_encoder =
- to_amdgpu_encoder(amdgpu_crtc->encoder);
- int encoder_mode =
- amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder);
-
- /* SetPixelClock calculates the plls and ss values now */
- amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id,
- amdgpu_crtc->pll_id,
- encoder_mode, amdgpu_encoder->encoder_id,
- adjusted_mode->clock, 0, 0, 0, 0,
- amdgpu_crtc->bpc, amdgpu_crtc->ss_enabled, &amdgpu_crtc->ss);
- } else {
- amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
- }
- amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
- dce_v11_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
- amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
- amdgpu_atombios_crtc_scaler_setup(crtc);
- dce_v11_0_cursor_reset(crtc);
- /* update the hw version fpr dpm */
- amdgpu_crtc->hw_mode = *adjusted_mode;
-
- return 0;
-}
-
-static bool dce_v11_0_crtc_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct drm_encoder *encoder;
-
- /* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- if (encoder->crtc == crtc) {
- amdgpu_crtc->encoder = encoder;
- amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
- break;
- }
- }
- if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
- amdgpu_crtc->encoder = NULL;
- amdgpu_crtc->connector = NULL;
- return false;
- }
- if (!amdgpu_display_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
- return false;
- if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
- return false;
- /* pick pll */
- amdgpu_crtc->pll_id = dce_v11_0_pick_pll(crtc);
- /* if we can't get a PPLL for a non-DP encoder, fail */
- if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) &&
- !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
- return false;
-
- return true;
-}
-
-static int dce_v11_0_crtc_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *old_fb)
-{
- return dce_v11_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
-}
-
-static int dce_v11_0_crtc_set_base_atomic(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int x, int y, enum mode_set_atomic state)
-{
- return dce_v11_0_crtc_do_set_base(crtc, fb, x, y, 1);
-}
-
-static const struct drm_crtc_helper_funcs dce_v11_0_crtc_helper_funcs = {
- .dpms = dce_v11_0_crtc_dpms,
- .mode_fixup = dce_v11_0_crtc_mode_fixup,
- .mode_set = dce_v11_0_crtc_mode_set,
- .mode_set_base = dce_v11_0_crtc_set_base,
- .mode_set_base_atomic = dce_v11_0_crtc_set_base_atomic,
- .prepare = dce_v11_0_crtc_prepare,
- .commit = dce_v11_0_crtc_commit,
- .disable = dce_v11_0_crtc_disable,
- .get_scanout_position = amdgpu_crtc_get_scanout_position,
-};
-
-static void dce_v11_0_panic_flush(struct drm_plane *plane)
-{
- struct drm_framebuffer *fb;
- struct amdgpu_crtc *amdgpu_crtc;
- struct amdgpu_device *adev;
- uint32_t fb_format;
-
- if (!plane->fb)
- return;
-
- fb = plane->fb;
- amdgpu_crtc = to_amdgpu_crtc(plane->crtc);
- adev = drm_to_adev(fb->dev);
-
- /* Disable DC tiling */
- fb_format = RREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset);
- fb_format &= ~GRPH_CONTROL__GRPH_ARRAY_MODE_MASK;
- WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
-
-}
-
-static const struct drm_plane_helper_funcs dce_v11_0_drm_primary_plane_helper_funcs = {
- .get_scanout_buffer = amdgpu_display_get_scanout_buffer,
- .panic_flush = dce_v11_0_panic_flush,
-};
-
-static int dce_v11_0_crtc_init(struct amdgpu_device *adev, int index)
-{
- struct amdgpu_crtc *amdgpu_crtc;
-
- amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
- (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
- if (amdgpu_crtc == NULL)
- return -ENOMEM;
-
- drm_crtc_init(adev_to_drm(adev), &amdgpu_crtc->base, &dce_v11_0_crtc_funcs);
-
- drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
- amdgpu_crtc->crtc_id = index;
- adev->mode_info.crtcs[index] = amdgpu_crtc;
-
- amdgpu_crtc->max_cursor_width = 128;
- amdgpu_crtc->max_cursor_height = 128;
- adev_to_drm(adev)->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
- adev_to_drm(adev)->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
-
- switch (amdgpu_crtc->crtc_id) {
- case 0:
- default:
- amdgpu_crtc->crtc_offset = CRTC0_REGISTER_OFFSET;
- break;
- case 1:
- amdgpu_crtc->crtc_offset = CRTC1_REGISTER_OFFSET;
- break;
- case 2:
- amdgpu_crtc->crtc_offset = CRTC2_REGISTER_OFFSET;
- break;
- case 3:
- amdgpu_crtc->crtc_offset = CRTC3_REGISTER_OFFSET;
- break;
- case 4:
- amdgpu_crtc->crtc_offset = CRTC4_REGISTER_OFFSET;
- break;
- case 5:
- amdgpu_crtc->crtc_offset = CRTC5_REGISTER_OFFSET;
- break;
- }
-
- amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
- amdgpu_crtc->adjusted_clock = 0;
- amdgpu_crtc->encoder = NULL;
- amdgpu_crtc->connector = NULL;
- drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v11_0_crtc_helper_funcs);
- drm_plane_helper_add(amdgpu_crtc->base.primary, &dce_v11_0_drm_primary_plane_helper_funcs);
-
- return 0;
-}
-
-static int dce_v11_0_early_init(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
-
- adev->audio_endpt_rreg = &dce_v11_0_audio_endpt_rreg;
- adev->audio_endpt_wreg = &dce_v11_0_audio_endpt_wreg;
-
- dce_v11_0_set_display_funcs(adev);
-
- adev->mode_info.num_crtc = dce_v11_0_get_num_crtc(adev);
-
- switch (adev->asic_type) {
- case CHIP_CARRIZO:
- adev->mode_info.num_hpd = 6;
- adev->mode_info.num_dig = 9;
- break;
- case CHIP_STONEY:
- adev->mode_info.num_hpd = 6;
- adev->mode_info.num_dig = 9;
- break;
- case CHIP_POLARIS10:
- case CHIP_VEGAM:
- adev->mode_info.num_hpd = 6;
- adev->mode_info.num_dig = 6;
- break;
- case CHIP_POLARIS11:
- case CHIP_POLARIS12:
- adev->mode_info.num_hpd = 5;
- adev->mode_info.num_dig = 5;
- break;
- default:
- /* FIXME: not supported yet */
- return -EINVAL;
- }
-
- dce_v11_0_set_irq_funcs(adev);
-
- return 0;
-}
-
-static int dce_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
-{
- int r, i;
- struct amdgpu_device *adev = ip_block->adev;
-
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
- if (r)
- return r;
- }
-
- for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; i < 20; i += 2) {
- r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i, &adev->pageflip_irq);
- if (r)
- return r;
- }
-
- /* HPD hotplug */
- r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
- if (r)
- return r;
-
- adev_to_drm(adev)->mode_config.funcs = &amdgpu_mode_funcs;
-
- adev_to_drm(adev)->mode_config.async_page_flip = true;
-
- adev_to_drm(adev)->mode_config.max_width = 16384;
- adev_to_drm(adev)->mode_config.max_height = 16384;
-
- adev_to_drm(adev)->mode_config.preferred_depth = 24;
- adev_to_drm(adev)->mode_config.prefer_shadow = 1;
-
- adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
-
- r = amdgpu_display_modeset_create_props(adev);
- if (r)
- return r;
-
- adev_to_drm(adev)->mode_config.max_width = 16384;
- adev_to_drm(adev)->mode_config.max_height = 16384;
-
-
- /* allocate crtcs */
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- r = dce_v11_0_crtc_init(adev, i);
- if (r)
- return r;
- }
-
- if (amdgpu_atombios_get_connector_info_from_object_table(adev))
- amdgpu_display_print_display_setup(adev_to_drm(adev));
- else
- return -EINVAL;
-
- /* setup afmt */
- r = dce_v11_0_afmt_init(adev);
- if (r)
- return r;
-
- r = dce_v11_0_audio_init(adev);
- if (r)
- return r;
-
- /* Disable vblank IRQs aggressively for power-saving */
- /* XXX: can this be enabled for DC? */
- adev_to_drm(adev)->vblank_disable_immediate = true;
-
- r = drm_vblank_init(adev_to_drm(adev), adev->mode_info.num_crtc);
- if (r)
- return r;
-
- INIT_DELAYED_WORK(&adev->hotplug_work,
- amdgpu_display_hotplug_work_func);
-
- drm_kms_helper_poll_init(adev_to_drm(adev));
-
- adev->mode_info.mode_config_initialized = true;
- return 0;
-}
-
-static int dce_v11_0_sw_fini(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
-
- drm_edid_free(adev->mode_info.bios_hardcoded_edid);
-
- drm_kms_helper_poll_fini(adev_to_drm(adev));
-
- dce_v11_0_audio_fini(adev);
-
- dce_v11_0_afmt_fini(adev);
-
- drm_mode_config_cleanup(adev_to_drm(adev));
- adev->mode_info.mode_config_initialized = false;
-
- return 0;
-}
-
-static int dce_v11_0_hw_init(struct amdgpu_ip_block *ip_block)
-{
- int i;
- struct amdgpu_device *adev = ip_block->adev;
-
- dce_v11_0_init_golden_registers(adev);
-
- /* disable vga render */
- dce_v11_0_set_vga_render_state(adev, false);
- /* init dig PHYs, disp eng pll */
- amdgpu_atombios_crtc_powergate_init(adev);
- amdgpu_atombios_encoder_init_dig(adev);
- if ((adev->asic_type == CHIP_POLARIS10) ||
- (adev->asic_type == CHIP_POLARIS11) ||
- (adev->asic_type == CHIP_POLARIS12) ||
- (adev->asic_type == CHIP_VEGAM)) {
- amdgpu_atombios_crtc_set_dce_clock(adev, adev->clock.default_dispclk,
- DCE_CLOCK_TYPE_DISPCLK, ATOM_GCK_DFS);
- amdgpu_atombios_crtc_set_dce_clock(adev, 0,
- DCE_CLOCK_TYPE_DPREFCLK, ATOM_GCK_DFS);
- } else {
- amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
- }
-
- /* initialize hpd */
- dce_v11_0_hpd_init(adev);
-
- for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
- dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
- }
-
- dce_v11_0_pageflip_interrupt_init(adev);
-
- return 0;
-}
-
-static int dce_v11_0_hw_fini(struct amdgpu_ip_block *ip_block)
-{
- int i;
- struct amdgpu_device *adev = ip_block->adev;
-
- dce_v11_0_hpd_fini(adev);
-
- for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
- dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
- }
-
- dce_v11_0_pageflip_interrupt_fini(adev);
-
- flush_delayed_work(&adev->hotplug_work);
-
- return 0;
-}
-
-static int dce_v11_0_suspend(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int r;
-
- r = amdgpu_display_suspend_helper(adev);
- if (r)
- return r;
-
- adev->mode_info.bl_level =
- amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
-
- return dce_v11_0_hw_fini(ip_block);
-}
-
-static int dce_v11_0_resume(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int ret;
-
- amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
- adev->mode_info.bl_level);
-
- ret = dce_v11_0_hw_init(ip_block);
-
- /* turn on the BL */
- if (adev->mode_info.bl_encoder) {
- u8 bl_level = amdgpu_display_backlight_get_level(adev,
- adev->mode_info.bl_encoder);
- amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
- bl_level);
- }
- if (ret)
- return ret;
-
- return amdgpu_display_resume_helper(adev);
-}
-
-static bool dce_v11_0_is_idle(struct amdgpu_ip_block *ip_block)
-{
- return true;
-}
-
-static int dce_v11_0_soft_reset(struct amdgpu_ip_block *ip_block)
-{
- u32 srbm_soft_reset = 0, tmp;
- struct amdgpu_device *adev = ip_block->adev;
-
- if (dce_v11_0_is_display_hung(adev))
- srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
-
- if (srbm_soft_reset) {
- tmp = RREG32(mmSRBM_SOFT_RESET);
- tmp |= srbm_soft_reset;
- dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
- WREG32(mmSRBM_SOFT_RESET, tmp);
- tmp = RREG32(mmSRBM_SOFT_RESET);
-
- udelay(50);
-
- tmp &= ~srbm_soft_reset;
- WREG32(mmSRBM_SOFT_RESET, tmp);
- tmp = RREG32(mmSRBM_SOFT_RESET);
-
- /* Wait a little for things to settle down */
- udelay(50);
- }
- return 0;
-}
-
-static void dce_v11_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
- int crtc,
- enum amdgpu_interrupt_state state)
-{
- u32 lb_interrupt_mask;
-
- if (crtc >= adev->mode_info.num_crtc) {
- DRM_DEBUG("invalid crtc %d\n", crtc);
- return;
- }
-
- switch (state) {
- case AMDGPU_IRQ_STATE_DISABLE:
- lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
- lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
- VBLANK_INTERRUPT_MASK, 0);
- WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
- break;
- case AMDGPU_IRQ_STATE_ENABLE:
- lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
- lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
- VBLANK_INTERRUPT_MASK, 1);
- WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
- break;
- default:
- break;
- }
-}
-
-static void dce_v11_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
- int crtc,
- enum amdgpu_interrupt_state state)
-{
- u32 lb_interrupt_mask;
-
- if (crtc >= adev->mode_info.num_crtc) {
- DRM_DEBUG("invalid crtc %d\n", crtc);
- return;
- }
-
- switch (state) {
- case AMDGPU_IRQ_STATE_DISABLE:
- lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
- lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
- VLINE_INTERRUPT_MASK, 0);
- WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
- break;
- case AMDGPU_IRQ_STATE_ENABLE:
- lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
- lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
- VLINE_INTERRUPT_MASK, 1);
- WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
- break;
- default:
- break;
- }
-}
-
-static int dce_v11_0_set_hpd_irq_state(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- unsigned hpd,
- enum amdgpu_interrupt_state state)
-{
- u32 tmp;
-
- if (hpd >= adev->mode_info.num_hpd) {
- DRM_DEBUG("invalid hpd %d\n", hpd);
- return 0;
- }
-
- switch (state) {
- case AMDGPU_IRQ_STATE_DISABLE:
- tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
- tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0);
- WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
- break;
- case AMDGPU_IRQ_STATE_ENABLE:
- tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
- tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 1);
- WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
- break;
- default:
- break;
- }
-
- return 0;
-}
-
-static int dce_v11_0_set_crtc_irq_state(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- unsigned type,
- enum amdgpu_interrupt_state state)
-{
- switch (type) {
- case AMDGPU_CRTC_IRQ_VBLANK1:
- dce_v11_0_set_crtc_vblank_interrupt_state(adev, 0, state);
- break;
- case AMDGPU_CRTC_IRQ_VBLANK2:
- dce_v11_0_set_crtc_vblank_interrupt_state(adev, 1, state);
- break;
- case AMDGPU_CRTC_IRQ_VBLANK3:
- dce_v11_0_set_crtc_vblank_interrupt_state(adev, 2, state);
- break;
- case AMDGPU_CRTC_IRQ_VBLANK4:
- dce_v11_0_set_crtc_vblank_interrupt_state(adev, 3, state);
- break;
- case AMDGPU_CRTC_IRQ_VBLANK5:
- dce_v11_0_set_crtc_vblank_interrupt_state(adev, 4, state);
- break;
- case AMDGPU_CRTC_IRQ_VBLANK6:
- dce_v11_0_set_crtc_vblank_interrupt_state(adev, 5, state);
- break;
- case AMDGPU_CRTC_IRQ_VLINE1:
- dce_v11_0_set_crtc_vline_interrupt_state(adev, 0, state);
- break;
- case AMDGPU_CRTC_IRQ_VLINE2:
- dce_v11_0_set_crtc_vline_interrupt_state(adev, 1, state);
- break;
- case AMDGPU_CRTC_IRQ_VLINE3:
- dce_v11_0_set_crtc_vline_interrupt_state(adev, 2, state);
- break;
- case AMDGPU_CRTC_IRQ_VLINE4:
- dce_v11_0_set_crtc_vline_interrupt_state(adev, 3, state);
- break;
- case AMDGPU_CRTC_IRQ_VLINE5:
- dce_v11_0_set_crtc_vline_interrupt_state(adev, 4, state);
- break;
- case AMDGPU_CRTC_IRQ_VLINE6:
- dce_v11_0_set_crtc_vline_interrupt_state(adev, 5, state);
- break;
- default:
- break;
- }
- return 0;
-}
-
-static int dce_v11_0_set_pageflip_irq_state(struct amdgpu_device *adev,
- struct amdgpu_irq_src *src,
- unsigned type,
- enum amdgpu_interrupt_state state)
-{
- u32 reg;
-
- if (type >= adev->mode_info.num_crtc) {
- DRM_ERROR("invalid pageflip crtc %d\n", type);
- return -EINVAL;
- }
-
- reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
- if (state == AMDGPU_IRQ_STATE_DISABLE)
- WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
- reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
- else
- WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
- reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
-
- return 0;
-}
-
-static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- struct amdgpu_iv_entry *entry)
-{
- unsigned long flags;
- unsigned crtc_id;
- struct amdgpu_crtc *amdgpu_crtc;
- struct amdgpu_flip_work *works;
-
- crtc_id = (entry->src_id - 8) >> 1;
- amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
-
- if (crtc_id >= adev->mode_info.num_crtc) {
- DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
- return -EINVAL;
- }
-
- if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
- GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
- WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
- GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
-
- /* IRQ could occur when in initial stage */
- if(amdgpu_crtc == NULL)
- return 0;
-
- spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
- works = amdgpu_crtc->pflip_works;
- if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
- DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
- "AMDGPU_FLIP_SUBMITTED(%d)\n",
- amdgpu_crtc->pflip_status,
- AMDGPU_FLIP_SUBMITTED);
- spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
- return 0;
- }
-
- /* page flip completed. clean up */
- amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
- amdgpu_crtc->pflip_works = NULL;
-
- /* wakeup usersapce */
- if(works->event)
- drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
-
- spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
-
- drm_crtc_vblank_put(&amdgpu_crtc->base);
- schedule_work(&works->unpin_work);
-
- return 0;
-}
-
-static void dce_v11_0_hpd_int_ack(struct amdgpu_device *adev,
- int hpd)
-{
- u32 tmp;
-
- if (hpd >= adev->mode_info.num_hpd) {
- DRM_DEBUG("invalid hpd %d\n", hpd);
- return;
- }
-
- tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
- tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_ACK, 1);
- WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
-}
-
-static void dce_v11_0_crtc_vblank_int_ack(struct amdgpu_device *adev,
- int crtc)
-{
- u32 tmp;
-
- if (crtc < 0 || crtc >= adev->mode_info.num_crtc) {
- DRM_DEBUG("invalid crtc %d\n", crtc);
- return;
- }
-
- tmp = RREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc]);
- tmp = REG_SET_FIELD(tmp, LB_VBLANK_STATUS, VBLANK_ACK, 1);
- WREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc], tmp);
-}
-
-static void dce_v11_0_crtc_vline_int_ack(struct amdgpu_device *adev,
- int crtc)
-{
- u32 tmp;
-
- if (crtc < 0 || crtc >= adev->mode_info.num_crtc) {
- DRM_DEBUG("invalid crtc %d\n", crtc);
- return;
- }
-
- tmp = RREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc]);
- tmp = REG_SET_FIELD(tmp, LB_VLINE_STATUS, VLINE_ACK, 1);
- WREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc], tmp);
-}
-
-static int dce_v11_0_crtc_irq(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- struct amdgpu_iv_entry *entry)
-{
- unsigned crtc = entry->src_id - 1;
- uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
- unsigned int irq_type = amdgpu_display_crtc_idx_to_irq_type(adev,
- crtc);
-
- switch (entry->src_data[0]) {
- case 0: /* vblank */
- if (disp_int & interrupt_status_offsets[crtc].vblank)
- dce_v11_0_crtc_vblank_int_ack(adev, crtc);
- else
- DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
-
- if (amdgpu_irq_enabled(adev, source, irq_type)) {
- drm_handle_vblank(adev_to_drm(adev), crtc);
- }
- DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
-
- break;
- case 1: /* vline */
- if (disp_int & interrupt_status_offsets[crtc].vline)
- dce_v11_0_crtc_vline_int_ack(adev, crtc);
- else
- DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
-
- DRM_DEBUG("IH: D%d vline\n", crtc + 1);
-
- break;
- default:
- DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
- break;
- }
-
- return 0;
-}
-
-static int dce_v11_0_hpd_irq(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- struct amdgpu_iv_entry *entry)
-{
- uint32_t disp_int, mask;
- unsigned hpd;
-
- if (entry->src_data[0] >= adev->mode_info.num_hpd) {
- DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
- return 0;
- }
-
- hpd = entry->src_data[0];
- disp_int = RREG32(interrupt_status_offsets[hpd].reg);
- mask = interrupt_status_offsets[hpd].hpd;
-
- if (disp_int & mask) {
- dce_v11_0_hpd_int_ack(adev, hpd);
- schedule_delayed_work(&adev->hotplug_work, 0);
- DRM_DEBUG("IH: HPD%d\n", hpd + 1);
- }
-
- return 0;
-}
-
-static int dce_v11_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
- enum amd_clockgating_state state)
-{
- return 0;
-}
-
-static int dce_v11_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
- enum amd_powergating_state state)
-{
- return 0;
-}
-
-static const struct amd_ip_funcs dce_v11_0_ip_funcs = {
- .name = "dce_v11_0",
- .early_init = dce_v11_0_early_init,
- .sw_init = dce_v11_0_sw_init,
- .sw_fini = dce_v11_0_sw_fini,
- .hw_init = dce_v11_0_hw_init,
- .hw_fini = dce_v11_0_hw_fini,
- .suspend = dce_v11_0_suspend,
- .resume = dce_v11_0_resume,
- .is_idle = dce_v11_0_is_idle,
- .soft_reset = dce_v11_0_soft_reset,
- .set_clockgating_state = dce_v11_0_set_clockgating_state,
- .set_powergating_state = dce_v11_0_set_powergating_state,
-};
-
-static void dce_v11_0_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
-
- amdgpu_encoder->pixel_clock = adjusted_mode->clock;
-
- /* need to call this here rather than in prepare() since we need some crtc info */
- amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
-
- /* set scaler clears this on some chips */
- dce_v11_0_set_interleave(encoder->crtc, mode);
-
- if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
- dce_v11_0_afmt_enable(encoder, true);
- dce_v11_0_afmt_setmode(encoder, adjusted_mode);
- }
-}
-
-static void dce_v11_0_encoder_prepare(struct drm_encoder *encoder)
-{
- struct amdgpu_device *adev = drm_to_adev(encoder->dev);
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
-
- if ((amdgpu_encoder->active_device &
- (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
- (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) !=
- ENCODER_OBJECT_ID_NONE)) {
- struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
- if (dig) {
- dig->dig_encoder = dce_v11_0_pick_dig_encoder(encoder);
- if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT)
- dig->afmt = adev->mode_info.afmt[dig->dig_encoder];
- }
- }
-
- amdgpu_atombios_scratch_regs_lock(adev, true);
-
- if (connector) {
- struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
-
- /* select the clock/data port if it uses a router */
- if (amdgpu_connector->router.cd_valid)
- amdgpu_i2c_router_select_cd_port(amdgpu_connector);
-
- /* turn eDP panel on for mode set */
- if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
- amdgpu_atombios_encoder_set_edp_panel_power(connector,
- ATOM_TRANSMITTER_ACTION_POWER_ON);
- }
-
- /* this is needed for the pll/ss setup to work correctly in some cases */
- amdgpu_atombios_encoder_set_crtc_source(encoder);
- /* set up the FMT blocks */
- dce_v11_0_program_fmt(encoder);
-}
-
-static void dce_v11_0_encoder_commit(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
-
- /* need to call this here as we need the crtc set up */
- amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
- amdgpu_atombios_scratch_regs_lock(adev, false);
-}
-
-static void dce_v11_0_encoder_disable(struct drm_encoder *encoder)
-{
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_encoder_atom_dig *dig;
-
- amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
-
- if (amdgpu_atombios_encoder_is_digital(encoder)) {
- if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
- dce_v11_0_afmt_enable(encoder, false);
- dig = amdgpu_encoder->enc_priv;
- dig->dig_encoder = -1;
- }
- amdgpu_encoder->active_device = 0;
-}
-
-/* these are handled by the primary encoders */
-static void dce_v11_0_ext_prepare(struct drm_encoder *encoder)
-{
-
-}
-
-static void dce_v11_0_ext_commit(struct drm_encoder *encoder)
-{
-
-}
-
-static void
-dce_v11_0_ext_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
-
-}
-
-static void dce_v11_0_ext_disable(struct drm_encoder *encoder)
-{
-
-}
-
-static void
-dce_v11_0_ext_dpms(struct drm_encoder *encoder, int mode)
-{
-
-}
-
-static const struct drm_encoder_helper_funcs dce_v11_0_ext_helper_funcs = {
- .dpms = dce_v11_0_ext_dpms,
- .prepare = dce_v11_0_ext_prepare,
- .mode_set = dce_v11_0_ext_mode_set,
- .commit = dce_v11_0_ext_commit,
- .disable = dce_v11_0_ext_disable,
- /* no detect for TMDS/LVDS yet */
-};
-
-static const struct drm_encoder_helper_funcs dce_v11_0_dig_helper_funcs = {
- .dpms = amdgpu_atombios_encoder_dpms,
- .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
- .prepare = dce_v11_0_encoder_prepare,
- .mode_set = dce_v11_0_encoder_mode_set,
- .commit = dce_v11_0_encoder_commit,
- .disable = dce_v11_0_encoder_disable,
- .detect = amdgpu_atombios_encoder_dig_detect,
-};
-
-static const struct drm_encoder_helper_funcs dce_v11_0_dac_helper_funcs = {
- .dpms = amdgpu_atombios_encoder_dpms,
- .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
- .prepare = dce_v11_0_encoder_prepare,
- .mode_set = dce_v11_0_encoder_mode_set,
- .commit = dce_v11_0_encoder_commit,
- .detect = amdgpu_atombios_encoder_dac_detect,
-};
-
-static void dce_v11_0_encoder_destroy(struct drm_encoder *encoder)
-{
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
- amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder);
- kfree(amdgpu_encoder->enc_priv);
- drm_encoder_cleanup(encoder);
- kfree(amdgpu_encoder);
-}
-
-static const struct drm_encoder_funcs dce_v11_0_encoder_funcs = {
- .destroy = dce_v11_0_encoder_destroy,
-};
-
-static void dce_v11_0_encoder_add(struct amdgpu_device *adev,
- uint32_t encoder_enum,
- uint32_t supported_device,
- u16 caps)
-{
- struct drm_device *dev = adev_to_drm(adev);
- struct drm_encoder *encoder;
- struct amdgpu_encoder *amdgpu_encoder;
-
- /* see if we already added it */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- amdgpu_encoder = to_amdgpu_encoder(encoder);
- if (amdgpu_encoder->encoder_enum == encoder_enum) {
- amdgpu_encoder->devices |= supported_device;
- return;
- }
-
- }
-
- /* add a new one */
- amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
- if (!amdgpu_encoder)
- return;
-
- encoder = &amdgpu_encoder->base;
- switch (adev->mode_info.num_crtc) {
- case 1:
- encoder->possible_crtcs = 0x1;
- break;
- case 2:
- default:
- encoder->possible_crtcs = 0x3;
- break;
- case 3:
- encoder->possible_crtcs = 0x7;
- break;
- case 4:
- encoder->possible_crtcs = 0xf;
- break;
- case 5:
- encoder->possible_crtcs = 0x1f;
- break;
- case 6:
- encoder->possible_crtcs = 0x3f;
- break;
- }
-
- amdgpu_encoder->enc_priv = NULL;
-
- amdgpu_encoder->encoder_enum = encoder_enum;
- amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
- amdgpu_encoder->devices = supported_device;
- amdgpu_encoder->rmx_type = RMX_OFF;
- amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
- amdgpu_encoder->is_ext_encoder = false;
- amdgpu_encoder->caps = caps;
-
- switch (amdgpu_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
- drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
- DRM_MODE_ENCODER_DAC, NULL);
- drm_encoder_helper_add(encoder, &dce_v11_0_dac_helper_funcs);
- break;
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
- if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
- amdgpu_encoder->rmx_type = RMX_FULL;
- drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
- DRM_MODE_ENCODER_LVDS, NULL);
- amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
- } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
- drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
- DRM_MODE_ENCODER_DAC, NULL);
- amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
- } else {
- drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
- amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
- }
- drm_encoder_helper_add(encoder, &dce_v11_0_dig_helper_funcs);
- break;
- case ENCODER_OBJECT_ID_SI170B:
- case ENCODER_OBJECT_ID_CH7303:
- case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
- case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
- case ENCODER_OBJECT_ID_TITFP513:
- case ENCODER_OBJECT_ID_VT1623:
- case ENCODER_OBJECT_ID_HDMI_SI1930:
- case ENCODER_OBJECT_ID_TRAVIS:
- case ENCODER_OBJECT_ID_NUTMEG:
- /* these are handled by the primary encoders */
- amdgpu_encoder->is_ext_encoder = true;
- if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
- drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
- DRM_MODE_ENCODER_LVDS, NULL);
- else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
- drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
- DRM_MODE_ENCODER_DAC, NULL);
- else
- drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
- drm_encoder_helper_add(encoder, &dce_v11_0_ext_helper_funcs);
- break;
- }
-}
-
-static const struct amdgpu_display_funcs dce_v11_0_display_funcs = {
- .bandwidth_update = &dce_v11_0_bandwidth_update,
- .vblank_get_counter = &dce_v11_0_vblank_get_counter,
- .backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
- .backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
- .hpd_sense = &dce_v11_0_hpd_sense,
- .hpd_set_polarity = &dce_v11_0_hpd_set_polarity,
- .hpd_get_gpio_reg = &dce_v11_0_hpd_get_gpio_reg,
- .page_flip = &dce_v11_0_page_flip,
- .page_flip_get_scanoutpos = &dce_v11_0_crtc_get_scanoutpos,
- .add_encoder = &dce_v11_0_encoder_add,
- .add_connector = &amdgpu_connector_add,
-};
-
-static void dce_v11_0_set_display_funcs(struct amdgpu_device *adev)
-{
- adev->mode_info.funcs = &dce_v11_0_display_funcs;
-}
-
-static const struct amdgpu_irq_src_funcs dce_v11_0_crtc_irq_funcs = {
- .set = dce_v11_0_set_crtc_irq_state,
- .process = dce_v11_0_crtc_irq,
-};
-
-static const struct amdgpu_irq_src_funcs dce_v11_0_pageflip_irq_funcs = {
- .set = dce_v11_0_set_pageflip_irq_state,
- .process = dce_v11_0_pageflip_irq,
-};
-
-static const struct amdgpu_irq_src_funcs dce_v11_0_hpd_irq_funcs = {
- .set = dce_v11_0_set_hpd_irq_state,
- .process = dce_v11_0_hpd_irq,
-};
-
-static void dce_v11_0_set_irq_funcs(struct amdgpu_device *adev)
-{
- if (adev->mode_info.num_crtc > 0)
- adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VLINE1 + adev->mode_info.num_crtc;
- else
- adev->crtc_irq.num_types = 0;
- adev->crtc_irq.funcs = &dce_v11_0_crtc_irq_funcs;
-
- adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
- adev->pageflip_irq.funcs = &dce_v11_0_pageflip_irq_funcs;
-
- adev->hpd_irq.num_types = adev->mode_info.num_hpd;
- adev->hpd_irq.funcs = &dce_v11_0_hpd_irq_funcs;
-}
-
-const struct amdgpu_ip_block_version dce_v11_0_ip_block =
-{
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 11,
- .minor = 0,
- .rev = 0,
- .funcs = &dce_v11_0_ip_funcs,
-};
-
-const struct amdgpu_ip_block_version dce_v11_2_ip_block =
-{
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 11,
- .minor = 2,
- .rev = 0,
- .funcs = &dce_v11_0_ip_funcs,
-};
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 276c025c4c03..acc887a58518 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -1034,7 +1034,6 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
/* save values for DPM */
amdgpu_crtc->line_time = line_time;
- amdgpu_crtc->wm_high = latency_watermark_a;
/* Save number of lines the linebuffer leads before the scanout */
amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
@@ -1451,17 +1450,12 @@ static int dce_v6_0_audio_init(struct amdgpu_device *adev)
static void dce_v6_0_audio_fini(struct amdgpu_device *adev)
{
- int i;
-
if (!amdgpu_audio)
return;
if (!adev->mode_info.audio.enabled)
return;
- for (i = 0; i < adev->mode_info.audio.num_pins; i++)
- dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
-
adev->mode_info.audio.enabled = false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index e62ccf9eb73d..2ccd6aad8dd6 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -1096,8 +1096,7 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
/* save values for DPM */
amdgpu_crtc->line_time = line_time;
- amdgpu_crtc->wm_high = latency_watermark_a;
- amdgpu_crtc->wm_low = latency_watermark_b;
+
/* Save number of lines the linebuffer leads before the scanout */
amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
}
@@ -1443,17 +1442,12 @@ static int dce_v8_0_audio_init(struct amdgpu_device *adev)
static void dce_v8_0_audio_fini(struct amdgpu_device *adev)
{
- int i;
-
if (!amdgpu_audio)
return;
if (!adev->mode_info.audio.enabled)
return;
- for (i = 0; i < adev->mode_info.audio.num_pins; i++)
- dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
-
adev->mode_info.audio.enabled = false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 75ea071744eb..d75b9940f248 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -4075,7 +4075,7 @@ static int gfx_v10_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
struct dma_fence *f = NULL;
unsigned int index;
uint64_t gpu_addr;
- volatile uint32_t *cpu_ptr;
+ uint32_t *cpu_ptr;
long r;
memset(&ib, 0, sizeof(ib));
@@ -4322,8 +4322,7 @@ static u32 gfx_v10_0_get_csb_size(struct amdgpu_device *adev)
return count;
}
-static void gfx_v10_0_get_csb_buffer(struct amdgpu_device *adev,
- volatile u32 *buffer)
+static void gfx_v10_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer)
{
u32 count = 0;
int ctx_reg_offset;
@@ -4952,11 +4951,16 @@ static int gfx_v10_0_sw_init(struct amdgpu_ip_block *ip_block)
}
}
}
- /* TODO: Add queue reset mask when FW fully supports it */
+
adev->gfx.gfx_supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]);
adev->gfx.compute_supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
+ if (!amdgpu_sriov_vf(adev) &&
+ !adev->debug_disable_gpu_ring_reset) {
+ adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ }
r = amdgpu_gfx_kiq_init(adev, GFX10_MEC_HPD_SIZE, 0);
if (r) {
@@ -7664,19 +7668,17 @@ static int gfx_v10_0_soft_reset(struct amdgpu_ip_block *ip_block)
/* Disable MEC parsing/prefetching */
gfx_v10_0_cp_compute_enable(adev, false);
- if (grbm_soft_reset) {
- tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
- tmp |= grbm_soft_reset;
- dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
- WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
- tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+ tmp |= grbm_soft_reset;
+ dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
- udelay(50);
+ udelay(50);
- tmp &= ~grbm_soft_reset;
- WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
- tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
- }
+ tmp &= ~grbm_soft_reset;
+ WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
/* Wait a little for things to settle down */
udelay(50);
@@ -9046,21 +9048,6 @@ static void gfx_v10_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
ref, mask);
}
-static void gfx_v10_0_ring_soft_recovery(struct amdgpu_ring *ring,
- unsigned int vmid)
-{
- struct amdgpu_device *adev = ring->adev;
- uint32_t value = 0;
-
- value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
- value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
- value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
- value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
- amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
- WREG32_SOC15(GC, 0, mmSQ_CMD, value);
- amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
-}
-
static void
gfx_v10_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
uint32_t me, uint32_t pipe,
@@ -9522,7 +9509,9 @@ static void gfx_v10_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop)
amdgpu_ring_insert_nop(ring, num_nop - 1);
}
-static int gfx_v10_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
+static int gfx_v10_0_reset_kgq(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
@@ -9532,15 +9521,14 @@ static int gfx_v10_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
u64 addr;
int r;
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
-
if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
return -EINVAL;
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+
spin_lock_irqsave(&kiq->ring_lock, flags);
- if (amdgpu_ring_alloc(kiq_ring, 5 + 7 + 7 + kiq->pmf->map_queues_size)) {
+ if (amdgpu_ring_alloc(kiq_ring, 5 + 7 + 7)) {
spin_unlock_irqrestore(&kiq->ring_lock, flags);
return -ENOMEM;
}
@@ -9560,12 +9548,9 @@ static int gfx_v10_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
0, 1, 0x20);
gfx_v10_0_ring_emit_reg_wait(kiq_ring,
SOC15_REG_OFFSET(GC, 0, mmCP_VMID_RESET), 0, 0xffffffff);
- kiq->pmf->kiq_map_queues(kiq_ring, ring);
amdgpu_ring_commit(kiq_ring);
-
- spin_unlock_irqrestore(&kiq->ring_lock, flags);
-
r = amdgpu_ring_test_ring(kiq_ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
if (r)
return r;
@@ -9575,11 +9560,25 @@ static int gfx_v10_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
return r;
}
- return amdgpu_ring_test_ring(ring);
+ spin_lock_irqsave(&kiq->ring_lock, flags);
+
+ if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size)) {
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+ return -ENOMEM;
+ }
+ kiq->pmf->kiq_map_queues(kiq_ring, ring);
+ amdgpu_ring_commit(kiq_ring);
+ r = amdgpu_ring_test_ring(kiq_ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+ if (r)
+ return r;
+
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static int gfx_v10_0_reset_kcq(struct amdgpu_ring *ring,
- unsigned int vmid)
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
@@ -9587,12 +9586,11 @@ static int gfx_v10_0_reset_kcq(struct amdgpu_ring *ring,
unsigned long flags;
int i, r;
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
-
if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
return -EINVAL;
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+
spin_lock_irqsave(&kiq->ring_lock, flags);
if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) {
@@ -9603,9 +9601,8 @@ static int gfx_v10_0_reset_kcq(struct amdgpu_ring *ring,
kiq->pmf->kiq_unmap_queues(kiq_ring, ring, RESET_QUEUES,
0, 0);
amdgpu_ring_commit(kiq_ring);
- spin_unlock_irqrestore(&kiq->ring_lock, flags);
-
r = amdgpu_ring_test_ring(kiq_ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
if (r)
return r;
@@ -9641,13 +9638,12 @@ static int gfx_v10_0_reset_kcq(struct amdgpu_ring *ring,
}
kiq->pmf->kiq_map_queues(kiq_ring, ring);
amdgpu_ring_commit(kiq_ring);
- spin_unlock_irqrestore(&kiq->ring_lock, flags);
-
r = amdgpu_ring_test_ring(kiq_ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
if (r)
return r;
- return amdgpu_ring_test_ring(ring);
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static void gfx_v10_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
@@ -9882,7 +9878,6 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = {
.emit_wreg = gfx_v10_0_ring_emit_wreg,
.emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
- .soft_recovery = gfx_v10_0_ring_soft_recovery,
.emit_mem_sync = gfx_v10_0_emit_mem_sync,
.reset = gfx_v10_0_reset_kgq,
.emit_cleaner_shader = gfx_v10_0_ring_emit_cleaner_shader,
@@ -9923,7 +9918,6 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = {
.emit_wreg = gfx_v10_0_ring_emit_wreg,
.emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
- .soft_recovery = gfx_v10_0_ring_soft_recovery,
.emit_mem_sync = gfx_v10_0_emit_mem_sync,
.reset = gfx_v10_0_reset_kcq,
.emit_cleaner_shader = gfx_v10_0_ring_emit_cleaner_shader,
@@ -9958,6 +9952,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = {
.emit_wreg = gfx_v10_0_ring_emit_wreg,
.emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
+ .emit_hdp_flush = gfx_v10_0_ring_emit_hdp_flush,
};
static void gfx_v10_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index afd6d59164bf..8a2ee2de390f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -85,6 +85,7 @@ MODULE_FIRMWARE("amdgpu/gc_11_0_0_pfp.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_0_me.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_0_mec.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_0_rlc.bin");
+MODULE_FIRMWARE("amdgpu/gc_11_0_0_rlc_kicker.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_0_rlc_1.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_0_toc.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_1_pfp.bin");
@@ -602,7 +603,7 @@ static int gfx_v11_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
struct dma_fence *f = NULL;
unsigned index;
uint64_t gpu_addr;
- volatile uint32_t *cpu_ptr;
+ uint32_t *cpu_ptr;
long r;
/* MES KIQ fw hasn't indirect buffer support for now */
@@ -759,6 +760,10 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
AMDGPU_UCODE_REQUIRED,
"amdgpu/gc_11_0_0_rlc_1.bin");
+ else if (amdgpu_is_kicker_fw(adev))
+ err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
+ AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_rlc_kicker.bin", ucode_prefix);
else
err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
AMDGPU_UCODE_REQUIRED,
@@ -845,8 +850,7 @@ static u32 gfx_v11_0_get_csb_size(struct amdgpu_device *adev)
return count;
}
-static void gfx_v11_0_get_csb_buffer(struct amdgpu_device *adev,
- volatile u32 *buffer)
+static void gfx_v11_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer)
{
u32 count = 0;
int ctx_reg_offset;
@@ -1607,9 +1611,9 @@ static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(11, 0, 2):
case IP_VERSION(11, 0, 3):
if (!adev->gfx.disable_uq &&
- adev->gfx.me_fw_version >= 2390 &&
- adev->gfx.pfp_fw_version >= 2530 &&
- adev->gfx.mec_fw_version >= 2600 &&
+ adev->gfx.me_fw_version >= 2420 &&
+ adev->gfx.pfp_fw_version >= 2580 &&
+ adev->gfx.mec_fw_version >= 2650 &&
adev->mes.fw_version[0] >= 120) {
adev->userq_funcs[AMDGPU_HW_IP_GFX] = &userq_mes_funcs;
adev->userq_funcs[AMDGPU_HW_IP_COMPUTE] = &userq_mes_funcs;
@@ -1649,6 +1653,21 @@ static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
}
}
break;
+ case IP_VERSION(11, 0, 1):
+ case IP_VERSION(11, 0, 4):
+ adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex;
+ adev->gfx.cleaner_shader_size = sizeof(gfx_11_0_3_cleaner_shader_hex);
+ if (adev->gfx.pfp_fw_version >= 102 &&
+ adev->gfx.mec_fw_version >= 66 &&
+ adev->mes.fw_version[0] >= 128) {
+ adev->gfx.enable_cleaner_shader = true;
+ r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
+ if (r) {
+ adev->gfx.enable_cleaner_shader = false;
+ dev_err(adev->dev, "Failed to initialize cleaner shader\n");
+ }
+ }
+ break;
case IP_VERSION(11, 5, 0):
case IP_VERSION(11, 5, 1):
adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex;
@@ -1801,12 +1820,19 @@ static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(11, 0, 2):
case IP_VERSION(11, 0, 3):
if ((adev->gfx.me_fw_version >= 2280) &&
- (adev->gfx.mec_fw_version >= 2410)) {
- adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
- adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ (adev->gfx.mec_fw_version >= 2410) &&
+ !amdgpu_sriov_vf(adev) &&
+ !adev->debug_disable_gpu_ring_reset) {
+ adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
}
break;
default:
+ if (!amdgpu_sriov_vf(adev) &&
+ !adev->debug_disable_gpu_ring_reset) {
+ adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ }
break;
}
@@ -2414,7 +2440,7 @@ static int gfx_v11_0_rlc_load_microcode(struct amdgpu_device *adev)
if (version_minor == 3)
gfx_v11_0_load_rlcp_rlcv_microcode(adev);
}
-
+
return 0;
}
@@ -3862,7 +3888,7 @@ static int gfx_v11_0_cp_compute_load_microcode(struct amdgpu_device *adev)
}
memcpy(fw, fw_data, fw_size);
-
+
amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
@@ -4119,6 +4145,8 @@ static int gfx_v11_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
#endif
if (prop->tmz_queue)
tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, TMZ_MATCH, 1);
+ if (!prop->kernel_queue)
+ tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_NON_PRIV, 1);
mqd->cp_gfx_hqd_cntl = tmp;
/* set up cp_doorbell_control */
@@ -4271,8 +4299,10 @@ static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 1);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH,
prop->allow_tunneling);
- tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
- tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
+ if (prop->kernel_queue) {
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
+ }
if (prop->tmz_queue)
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TMZ, 1);
mqd->cp_hqd_pq_control = tmp;
@@ -4629,8 +4659,7 @@ static int gfx_v11_0_gfxhub_enable(struct amdgpu_device *adev)
amdgpu_device_flush_hdp(adev, NULL);
- value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
- false : true;
+ value = amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS;
adev->gfxhub.funcs->set_fault_enable_default(adev, value);
/* TODO investigate why this and the hdp flush above is needed,
@@ -5835,8 +5864,6 @@ static void gfx_v11_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
u32 header, control = 0;
- BUG_ON(ib->flags & AMDGPU_IB_FLAG_CE);
-
header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
control |= ib->length_dw | (vmid << 24);
@@ -5847,9 +5874,9 @@ static void gfx_v11_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
if (flags & AMDGPU_IB_PREEMPTED)
control |= INDIRECT_BUFFER_PRE_RESUME(1);
- if (vmid)
+ if (vmid && !ring->adev->gfx.rs64_enable)
gfx_v11_0_ring_emit_de_meta(ring,
- (!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false);
+ !amdgpu_sriov_vf(ring->adev) && (flags & AMDGPU_IB_PREEMPTED));
}
amdgpu_ring_write(ring, header);
@@ -6278,21 +6305,6 @@ static void gfx_v11_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
ref, mask, 0x20);
}
-static void gfx_v11_0_ring_soft_recovery(struct amdgpu_ring *ring,
- unsigned vmid)
-{
- struct amdgpu_device *adev = ring->adev;
- uint32_t value = 0;
-
- value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
- value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
- value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
- value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
- amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
- WREG32_SOC15(GC, 0, regSQ_CMD, value);
- amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
-}
-
static void
gfx_v11_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
uint32_t me, uint32_t pipe,
@@ -6806,13 +6818,14 @@ static int gfx_v11_reset_gfx_pipe(struct amdgpu_ring *ring)
return 0;
}
-static int gfx_v11_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
+static int gfx_v11_0_reset_kgq(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
int r;
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, false);
if (r) {
@@ -6835,7 +6848,7 @@ static int gfx_v11_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
return r;
}
- return amdgpu_ring_test_ring(ring);
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static int gfx_v11_0_reset_compute_pipe(struct amdgpu_ring *ring)
@@ -6968,13 +6981,14 @@ static int gfx_v11_0_reset_compute_pipe(struct amdgpu_ring *ring)
return 0;
}
-static int gfx_v11_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid)
+static int gfx_v11_0_reset_kcq(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
int r = 0;
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, true);
if (r) {
@@ -6995,7 +7009,7 @@ static int gfx_v11_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid)
return r;
}
- return amdgpu_ring_test_ring(ring);
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static void gfx_v11_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
@@ -7231,7 +7245,6 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = {
.emit_wreg = gfx_v11_0_ring_emit_wreg,
.emit_reg_wait = gfx_v11_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait,
- .soft_recovery = gfx_v11_0_ring_soft_recovery,
.emit_mem_sync = gfx_v11_0_emit_mem_sync,
.reset = gfx_v11_0_reset_kgq,
.emit_cleaner_shader = gfx_v11_0_ring_emit_cleaner_shader,
@@ -7273,7 +7286,6 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_compute = {
.emit_wreg = gfx_v11_0_ring_emit_wreg,
.emit_reg_wait = gfx_v11_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait,
- .soft_recovery = gfx_v11_0_ring_soft_recovery,
.emit_mem_sync = gfx_v11_0_emit_mem_sync,
.reset = gfx_v11_0_reset_kcq,
.emit_cleaner_shader = gfx_v11_0_ring_emit_cleaner_shader,
@@ -7308,6 +7320,7 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_kiq = {
.emit_wreg = gfx_v11_0_ring_emit_wreg,
.emit_reg_wait = gfx_v11_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait,
+ .emit_hdp_flush = gfx_v11_0_ring_emit_hdp_flush,
};
static void gfx_v11_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
index 1234c8d64e20..d01d2712cf57 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
@@ -79,6 +79,7 @@ MODULE_FIRMWARE("amdgpu/gc_12_0_1_pfp.bin");
MODULE_FIRMWARE("amdgpu/gc_12_0_1_me.bin");
MODULE_FIRMWARE("amdgpu/gc_12_0_1_mec.bin");
MODULE_FIRMWARE("amdgpu/gc_12_0_1_rlc.bin");
+MODULE_FIRMWARE("amdgpu/gc_12_0_1_rlc_kicker.bin");
MODULE_FIRMWARE("amdgpu/gc_12_0_1_toc.bin");
static const struct amdgpu_hwip_reg_entry gc_reg_list_12_0[] = {
@@ -496,7 +497,7 @@ static int gfx_v12_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
struct dma_fence *f = NULL;
unsigned index;
uint64_t gpu_addr;
- volatile uint32_t *cpu_ptr;
+ uint32_t *cpu_ptr;
long r;
/* MES KIQ fw hasn't indirect buffer support for now */
@@ -586,7 +587,7 @@ out:
static int gfx_v12_0_init_microcode(struct amdgpu_device *adev)
{
- char ucode_prefix[15];
+ char ucode_prefix[30];
int err;
const struct rlc_firmware_header_v2_0 *rlc_hdr;
uint16_t version_major;
@@ -613,9 +614,14 @@ static int gfx_v12_0_init_microcode(struct amdgpu_device *adev)
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK);
if (!amdgpu_sriov_vf(adev)) {
- err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
- AMDGPU_UCODE_REQUIRED,
- "amdgpu/%s_rlc.bin", ucode_prefix);
+ if (amdgpu_is_kicker_fw(adev))
+ err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
+ AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_rlc_kicker.bin", ucode_prefix);
+ else
+ err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
+ AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_rlc.bin", ucode_prefix);
if (err)
goto out;
rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
@@ -679,8 +685,7 @@ static u32 gfx_v12_0_get_csb_size(struct amdgpu_device *adev)
return count;
}
-static void gfx_v12_0_get_csb_buffer(struct amdgpu_device *adev,
- volatile u32 *buffer)
+static void gfx_v12_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer)
{
u32 count = 0, clustercount = 0, i;
const struct cs_section_def *sect = NULL;
@@ -1542,10 +1547,15 @@ static int gfx_v12_0_sw_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(12, 0, 0):
case IP_VERSION(12, 0, 1):
if ((adev->gfx.me_fw_version >= 2660) &&
- (adev->gfx.mec_fw_version >= 2920)) {
- adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
- adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ (adev->gfx.mec_fw_version >= 2920) &&
+ !amdgpu_sriov_vf(adev) &&
+ !adev->debug_disable_gpu_ring_reset) {
+ adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
}
+ break;
+ default:
+ break;
}
if (!adev->enable_mes_kiq) {
@@ -3016,6 +3026,8 @@ static int gfx_v12_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
#endif
if (prop->tmz_queue)
tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, TMZ_MATCH, 1);
+ if (!prop->kernel_queue)
+ tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_NON_PRIV, 1);
mqd->cp_gfx_hqd_cntl = tmp;
/* set up cp_doorbell_control */
@@ -3165,8 +3177,10 @@ static int gfx_v12_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
(order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1));
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 1);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0);
- tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
- tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
+ if (prop->kernel_queue) {
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
+ }
if (prop->tmz_queue)
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TMZ, 1);
mqd->cp_hqd_pq_control = tmp;
@@ -3510,8 +3524,7 @@ static int gfx_v12_0_gfxhub_enable(struct amdgpu_device *adev)
amdgpu_device_flush_hdp(adev, NULL);
- value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
- false : true;
+ value = amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS;
adev->gfxhub.funcs->set_fault_enable_default(adev, value);
/* TODO investigate why this and the hdp flush above is needed,
@@ -4407,8 +4420,6 @@ static void gfx_v12_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
u32 header, control = 0;
- BUG_ON(ib->flags & AMDGPU_IB_FLAG_CE);
-
header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
control |= ib->length_dw | (vmid << 24);
@@ -4690,21 +4701,6 @@ static void gfx_v12_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
ref, mask, 0x20);
}
-static void gfx_v12_0_ring_soft_recovery(struct amdgpu_ring *ring,
- unsigned vmid)
-{
- struct amdgpu_device *adev = ring->adev;
- uint32_t value = 0;
-
- value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
- value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
- value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
- value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
- amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
- WREG32_SOC15(GC, 0, regSQ_CMD, value);
- amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
-}
-
static void
gfx_v12_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
uint32_t me, uint32_t pipe,
@@ -5307,13 +5303,14 @@ static int gfx_v12_reset_gfx_pipe(struct amdgpu_ring *ring)
return 0;
}
-static int gfx_v12_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
+static int gfx_v12_0_reset_kgq(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
int r;
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, false);
if (r) {
@@ -5335,7 +5332,7 @@ static int gfx_v12_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
return r;
}
- return amdgpu_ring_test_ring(ring);
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static int gfx_v12_0_reset_compute_pipe(struct amdgpu_ring *ring)
@@ -5421,13 +5418,14 @@ static int gfx_v12_0_reset_compute_pipe(struct amdgpu_ring *ring)
return 0;
}
-static int gfx_v12_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid)
+static int gfx_v12_0_reset_kcq(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
int r;
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, true);
if (r) {
@@ -5448,7 +5446,7 @@ static int gfx_v12_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid)
return r;
}
- return amdgpu_ring_test_ring(ring);
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static void gfx_v12_0_ring_begin_use(struct amdgpu_ring *ring)
@@ -5526,7 +5524,6 @@ static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_gfx = {
.emit_wreg = gfx_v12_0_ring_emit_wreg,
.emit_reg_wait = gfx_v12_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v12_0_ring_emit_reg_write_reg_wait,
- .soft_recovery = gfx_v12_0_ring_soft_recovery,
.emit_mem_sync = gfx_v12_0_emit_mem_sync,
.reset = gfx_v12_0_reset_kgq,
.emit_cleaner_shader = gfx_v12_0_ring_emit_cleaner_shader,
@@ -5565,7 +5562,6 @@ static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_compute = {
.emit_wreg = gfx_v12_0_ring_emit_wreg,
.emit_reg_wait = gfx_v12_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v12_0_ring_emit_reg_write_reg_wait,
- .soft_recovery = gfx_v12_0_ring_soft_recovery,
.emit_mem_sync = gfx_v12_0_emit_mem_sync,
.reset = gfx_v12_0_reset_kcq,
.emit_cleaner_shader = gfx_v12_0_ring_emit_cleaner_shader,
@@ -5600,6 +5596,7 @@ static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_kiq = {
.emit_wreg = gfx_v12_0_ring_emit_wreg,
.emit_reg_wait = gfx_v12_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v12_0_ring_emit_reg_write_reg_wait,
+ .emit_hdp_flush = gfx_v12_0_ring_emit_hdp_flush,
};
static void gfx_v12_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index 70d7a1f434c4..80565392313f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -86,7 +86,7 @@ MODULE_FIRMWARE("amdgpu/hainan_ce.bin");
MODULE_FIRMWARE("amdgpu/hainan_rlc.bin");
static u32 gfx_v6_0_get_csb_size(struct amdgpu_device *adev);
-static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer);
+static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer);
//static void gfx_v6_0_init_cp_pg_table(struct amdgpu_device *adev);
static void gfx_v6_0_init_pg(struct amdgpu_device *adev);
@@ -2354,7 +2354,7 @@ static void gfx_v6_0_ring_emit_wreg(struct amdgpu_ring *ring,
static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
{
const u32 *src_ptr;
- volatile u32 *dst_ptr;
+ u32 *dst_ptr;
u32 dws;
u64 reg_list_mc_addr;
const struct cs_section_def *cs_data;
@@ -2855,8 +2855,7 @@ static u32 gfx_v6_0_get_csb_size(struct amdgpu_device *adev)
return count;
}
-static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev,
- volatile u32 *buffer)
+static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer)
{
u32 count = 0;
@@ -3103,6 +3102,11 @@ static int gfx_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
return r;
}
+ adev->gfx.gfx_supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]);
+ adev->gfx.compute_supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
+
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index da0534ff1271..2b7aba22ecc1 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -883,7 +883,7 @@ static const u32 kalindi_rlc_save_restore_register_list[] = {
};
static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev);
-static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer);
+static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer);
static void gfx_v7_0_init_pg(struct amdgpu_device *adev);
static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev);
@@ -3882,8 +3882,7 @@ static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev)
return count;
}
-static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev,
- volatile u32 *buffer)
+static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer)
{
u32 count = 0;
@@ -4400,6 +4399,11 @@ static int gfx_v7_0_sw_init(struct amdgpu_ip_block *ip_block)
gfx_v7_0_gpu_early_init(adev);
+ adev->gfx.gfx_supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]);
+ adev->gfx.compute_supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
+
return r;
}
@@ -4884,76 +4888,6 @@ static void gfx_v7_0_emit_mem_sync_compute(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, 0x0000000A); /* poll interval */
}
-static void gfx_v7_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
- int mem_space, int opt, uint32_t addr0,
- uint32_t addr1, uint32_t ref, uint32_t mask,
- uint32_t inv)
-{
- amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
- amdgpu_ring_write(ring,
- /* memory (1) or register (0) */
- (WAIT_REG_MEM_MEM_SPACE(mem_space) |
- WAIT_REG_MEM_OPERATION(opt) | /* wait */
- WAIT_REG_MEM_FUNCTION(3) | /* equal */
- WAIT_REG_MEM_ENGINE(eng_sel)));
-
- if (mem_space)
- BUG_ON(addr0 & 0x3); /* Dword align */
- amdgpu_ring_write(ring, addr0);
- amdgpu_ring_write(ring, addr1);
- amdgpu_ring_write(ring, ref);
- amdgpu_ring_write(ring, mask);
- amdgpu_ring_write(ring, inv); /* poll interval */
-}
-
-static void gfx_v7_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
- uint32_t val, uint32_t mask)
-{
- gfx_v7_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
-}
-
-static int gfx_v7_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
-{
- struct amdgpu_device *adev = ring->adev;
- struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
- struct amdgpu_ring *kiq_ring = &kiq->ring;
- unsigned long flags;
- u32 tmp;
- int r;
-
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
-
- if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
- return -EINVAL;
-
- spin_lock_irqsave(&kiq->ring_lock, flags);
-
- if (amdgpu_ring_alloc(kiq_ring, 5)) {
- spin_unlock_irqrestore(&kiq->ring_lock, flags);
- return -ENOMEM;
- }
-
- tmp = REG_SET_FIELD(0, CP_VMID_RESET, RESET_REQUEST, 1 << vmid);
- gfx_v7_0_ring_emit_wreg(kiq_ring, mmCP_VMID_RESET, tmp);
- amdgpu_ring_commit(kiq_ring);
-
- spin_unlock_irqrestore(&kiq->ring_lock, flags);
-
- r = amdgpu_ring_test_ring(kiq_ring);
- if (r)
- return r;
-
- if (amdgpu_ring_alloc(ring, 7 + 12 + 5))
- return -ENOMEM;
- gfx_v7_0_ring_emit_fence_gfx(ring, ring->fence_drv.gpu_addr,
- ring->fence_drv.sync_seq, AMDGPU_FENCE_FLAG_EXEC);
- gfx_v7_0_ring_emit_reg_wait(ring, mmCP_VMID_RESET, 0, 0xffff);
- gfx_v7_0_ring_emit_wreg(ring, mmCP_VMID_RESET, 0);
-
- return amdgpu_ring_test_ring(ring);
-}
-
static const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
.name = "gfx_v7_0",
.early_init = gfx_v7_0_early_init,
@@ -5003,7 +4937,6 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
.emit_wreg = gfx_v7_0_ring_emit_wreg,
.soft_recovery = gfx_v7_0_ring_soft_recovery,
.emit_mem_sync = gfx_v7_0_emit_mem_sync,
- .reset = gfx_v7_0_reset_kgq,
};
static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 5ee2237d8ee8..1c87375e1dd5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -1220,8 +1220,7 @@ out:
return err;
}
-static void gfx_v8_0_get_csb_buffer(struct amdgpu_device *adev,
- volatile u32 *buffer)
+static void gfx_v8_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer)
{
u32 count = 0;
@@ -2024,6 +2023,11 @@ static int gfx_v8_0_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
+ adev->gfx.gfx_supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]);
+ adev->gfx.compute_supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
+
return 0;
}
@@ -4640,6 +4644,7 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring)
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation));
/* reset ring buffer */
ring->wptr = 0;
+ atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0);
amdgpu_ring_clear_ring(ring);
}
return 0;
@@ -6339,34 +6344,6 @@ static void gfx_v8_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
amdgpu_ring_write(ring, val);
}
-static void gfx_v8_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
- int mem_space, int opt, uint32_t addr0,
- uint32_t addr1, uint32_t ref, uint32_t mask,
- uint32_t inv)
-{
- amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
- amdgpu_ring_write(ring,
- /* memory (1) or register (0) */
- (WAIT_REG_MEM_MEM_SPACE(mem_space) |
- WAIT_REG_MEM_OPERATION(opt) | /* wait */
- WAIT_REG_MEM_FUNCTION(3) | /* equal */
- WAIT_REG_MEM_ENGINE(eng_sel)));
-
- if (mem_space)
- BUG_ON(addr0 & 0x3); /* Dword align */
- amdgpu_ring_write(ring, addr0);
- amdgpu_ring_write(ring, addr1);
- amdgpu_ring_write(ring, ref);
- amdgpu_ring_write(ring, mask);
- amdgpu_ring_write(ring, inv); /* poll interval */
-}
-
-static void gfx_v8_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
- uint32_t val, uint32_t mask)
-{
- gfx_v8_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
-}
-
static void gfx_v8_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid)
{
struct amdgpu_device *adev = ring->adev;
@@ -6843,48 +6820,6 @@ static void gfx_v8_0_emit_wave_limit(struct amdgpu_ring *ring, bool enable)
}
-static int gfx_v8_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
-{
- struct amdgpu_device *adev = ring->adev;
- struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
- struct amdgpu_ring *kiq_ring = &kiq->ring;
- unsigned long flags;
- u32 tmp;
- int r;
-
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
-
- if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
- return -EINVAL;
-
- spin_lock_irqsave(&kiq->ring_lock, flags);
-
- if (amdgpu_ring_alloc(kiq_ring, 5)) {
- spin_unlock_irqrestore(&kiq->ring_lock, flags);
- return -ENOMEM;
- }
-
- tmp = REG_SET_FIELD(0, CP_VMID_RESET, RESET_REQUEST, 1 << vmid);
- gfx_v8_0_ring_emit_wreg(kiq_ring, mmCP_VMID_RESET, tmp);
- amdgpu_ring_commit(kiq_ring);
-
- spin_unlock_irqrestore(&kiq->ring_lock, flags);
-
- r = amdgpu_ring_test_ring(kiq_ring);
- if (r)
- return r;
-
- if (amdgpu_ring_alloc(ring, 7 + 12 + 5))
- return -ENOMEM;
- gfx_v8_0_ring_emit_fence_gfx(ring, ring->fence_drv.gpu_addr,
- ring->fence_drv.sync_seq, AMDGPU_FENCE_FLAG_EXEC);
- gfx_v8_0_ring_emit_reg_wait(ring, mmCP_VMID_RESET, 0, 0xffff);
- gfx_v8_0_ring_emit_wreg(ring, mmCP_VMID_RESET, 0);
-
- return amdgpu_ring_test_ring(ring);
-}
-
static const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
.name = "gfx_v8_0",
.early_init = gfx_v8_0_early_init,
@@ -6950,7 +6885,6 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
.emit_wreg = gfx_v8_0_ring_emit_wreg,
.soft_recovery = gfx_v8_0_ring_soft_recovery,
.emit_mem_sync = gfx_v8_0_emit_mem_sync,
- .reset = gfx_v8_0_reset_kgq,
};
static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
@@ -7010,6 +6944,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_kiq = {
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_rreg = gfx_v8_0_ring_emit_rreg,
.emit_wreg = gfx_v8_0_ring_emit_wreg,
+ .emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush,
};
static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index d377a7c57d5e..0148d7ff34d9 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -1648,8 +1648,7 @@ static u32 gfx_v9_0_get_csb_size(struct amdgpu_device *adev)
return count;
}
-static void gfx_v9_0_get_csb_buffer(struct amdgpu_device *adev,
- volatile u32 *buffer)
+static void gfx_v9_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer)
{
u32 count = 0;
@@ -2235,6 +2234,25 @@ static int gfx_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
}
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(9, 0, 1):
+ case IP_VERSION(9, 2, 1):
+ case IP_VERSION(9, 4, 0):
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 1, 0):
+ case IP_VERSION(9, 3, 0):
+ adev->gfx.cleaner_shader_ptr = gfx_9_4_2_cleaner_shader_hex;
+ adev->gfx.cleaner_shader_size = sizeof(gfx_9_4_2_cleaner_shader_hex);
+ if (adev->gfx.me_fw_version >= 167 &&
+ adev->gfx.pfp_fw_version >= 196 &&
+ adev->gfx.mec_fw_version >= 474) {
+ adev->gfx.enable_cleaner_shader = true;
+ r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
+ if (r) {
+ adev->gfx.enable_cleaner_shader = false;
+ dev_err(adev->dev, "Failed to initialize cleaner shader\n");
+ }
+ }
+ break;
case IP_VERSION(9, 4, 2):
adev->gfx.cleaner_shader_ptr = gfx_9_4_2_cleaner_shader_hex;
adev->gfx.cleaner_shader_size = sizeof(gfx_9_4_2_cleaner_shader_hex);
@@ -2391,6 +2409,8 @@ static int gfx_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]);
adev->gfx.compute_supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
+ if (!amdgpu_sriov_vf(adev) && !adev->debug_disable_gpu_ring_reset)
+ adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE, 0);
if (r) {
@@ -2629,6 +2649,9 @@ static void gfx_v9_0_init_sq_config(struct amdgpu_device *adev)
!READ_ONCE(adev->barrier_has_auto_waitcnt));
WREG32_SOC15(GC, 0, mmSQ_CONFIG, tmp);
break;
+ case IP_VERSION(9, 4, 2):
+ gfx_v9_4_2_init_sq(adev);
+ break;
default:
break;
}
@@ -4151,19 +4174,17 @@ static int gfx_v9_0_soft_reset(struct amdgpu_ip_block *ip_block)
/* Disable MEC parsing/prefetching */
gfx_v9_0_cp_compute_enable(adev, false);
- if (grbm_soft_reset) {
- tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
- tmp |= grbm_soft_reset;
- dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
- WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
- tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+ tmp |= grbm_soft_reset;
+ dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
- udelay(50);
+ udelay(50);
- tmp &= ~grbm_soft_reset;
- WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
- tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
- }
+ tmp &= ~grbm_soft_reset;
+ WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
/* Wait a little for things to settle down */
udelay(50);
@@ -7152,53 +7173,9 @@ static void gfx_v9_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop)
amdgpu_ring_insert_nop(ring, num_nop - 1);
}
-static int gfx_v9_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
-{
- struct amdgpu_device *adev = ring->adev;
- struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
- struct amdgpu_ring *kiq_ring = &kiq->ring;
- unsigned long flags;
- u32 tmp;
- int r;
-
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
-
- if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
- return -EINVAL;
-
- spin_lock_irqsave(&kiq->ring_lock, flags);
-
- if (amdgpu_ring_alloc(kiq_ring, 5)) {
- spin_unlock_irqrestore(&kiq->ring_lock, flags);
- return -ENOMEM;
- }
-
- tmp = REG_SET_FIELD(0, CP_VMID_RESET, RESET_REQUEST, 1 << vmid);
- gfx_v9_0_ring_emit_wreg(kiq_ring,
- SOC15_REG_OFFSET(GC, 0, mmCP_VMID_RESET), tmp);
- amdgpu_ring_commit(kiq_ring);
-
- spin_unlock_irqrestore(&kiq->ring_lock, flags);
-
- r = amdgpu_ring_test_ring(kiq_ring);
- if (r)
- return r;
-
- if (amdgpu_ring_alloc(ring, 7 + 7 + 5))
- return -ENOMEM;
- gfx_v9_0_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
- ring->fence_drv.sync_seq, AMDGPU_FENCE_FLAG_EXEC);
- gfx_v9_0_ring_emit_reg_wait(ring,
- SOC15_REG_OFFSET(GC, 0, mmCP_VMID_RESET), 0, 0xffff);
- gfx_v9_0_ring_emit_wreg(ring,
- SOC15_REG_OFFSET(GC, 0, mmCP_VMID_RESET), 0);
-
- return amdgpu_ring_test_ring(ring);
-}
-
static int gfx_v9_0_reset_kcq(struct amdgpu_ring *ring,
- unsigned int vmid)
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
@@ -7206,12 +7183,11 @@ static int gfx_v9_0_reset_kcq(struct amdgpu_ring *ring,
unsigned long flags;
int i, r;
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
-
if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
return -EINVAL;
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+
spin_lock_irqsave(&kiq->ring_lock, flags);
if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) {
@@ -7261,13 +7237,13 @@ static int gfx_v9_0_reset_kcq(struct amdgpu_ring *ring,
}
kiq->pmf->kiq_map_queues(kiq_ring, ring);
amdgpu_ring_commit(kiq_ring);
- spin_unlock_irqrestore(&kiq->ring_lock, flags);
r = amdgpu_ring_test_ring(kiq_ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
if (r) {
DRM_ERROR("fail to remap queue\n");
return r;
}
- return amdgpu_ring_test_ring(ring);
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static void gfx_v9_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
@@ -7477,7 +7453,6 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
.emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
.soft_recovery = gfx_v9_0_ring_soft_recovery,
.emit_mem_sync = gfx_v9_0_emit_mem_sync,
- .reset = gfx_v9_0_reset_kgq,
.emit_cleaner_shader = gfx_v9_0_ring_emit_cleaner_shader,
.begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use,
.end_use = amdgpu_gfx_enforce_isolation_ring_end_use,
@@ -7611,6 +7586,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
.emit_wreg = gfx_v9_0_ring_emit_wreg,
.emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
+ .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
};
static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
index c48cd47b531f..8058ea91ecaf 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
@@ -748,6 +748,18 @@ void gfx_v9_4_2_init_golden_registers(struct amdgpu_device *adev,
}
}
+void gfx_v9_4_2_init_sq(struct amdgpu_device *adev)
+{
+ uint32_t data;
+
+ if (adev->gfx.mec_fw_version >= 98) {
+ adev->gmc.xnack_flags |= AMDGPU_GMC_XNACK_FLAG_CHAIN;
+ data = RREG32_SOC15(GC, 0, regSQ_CONFIG1);
+ data = REG_SET_FIELD(data, SQ_CONFIG1, DISABLE_XNACK_CHECK_IN_RETRY_DISABLE, 1);
+ WREG32_SOC15(GC, 0, regSQ_CONFIG1, data);
+ }
+}
+
void gfx_v9_4_2_debug_trap_config_init(struct amdgpu_device *adev,
uint32_t first_vmid,
uint32_t last_vmid)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h
index 7584624b641c..a603724c1dfc 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h
@@ -28,6 +28,7 @@ void gfx_v9_4_2_debug_trap_config_init(struct amdgpu_device *adev,
uint32_t first_vmid, uint32_t last_vmid);
void gfx_v9_4_2_init_golden_registers(struct amdgpu_device *adev,
uint32_t die_id);
+void gfx_v9_4_2_init_sq(struct amdgpu_device *adev);
void gfx_v9_4_2_set_power_brake_sequence(struct amdgpu_device *adev);
int gfx_v9_4_2_do_edc_gpr_workarounds(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
index c233edf60569..cbb74ffc4792 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
@@ -1148,13 +1148,17 @@ static int gfx_v9_4_3_sw_init(struct amdgpu_ip_block *ip_block)
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(9, 4, 3):
case IP_VERSION(9, 4, 4):
- if (adev->gfx.mec_fw_version >= 155) {
+ if ((adev->gfx.mec_fw_version >= 155) &&
+ !amdgpu_sriov_vf(adev) &&
+ !adev->debug_disable_gpu_ring_reset) {
adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_PIPE;
}
break;
case IP_VERSION(9, 5, 0):
- if (adev->gfx.mec_fw_version >= 21) {
+ if ((adev->gfx.mec_fw_version >= 21) &&
+ !amdgpu_sriov_vf(adev) &&
+ !adev->debug_disable_gpu_ring_reset) {
adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_PIPE;
}
@@ -1349,7 +1353,9 @@ static void gfx_v9_4_3_constants_init(struct amdgpu_device *adev)
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
/* ToDo: GC 9.4.4 */
case IP_VERSION(9, 4, 3):
- if (adev->gfx.mec_fw_version >= 184)
+ if (adev->gfx.mec_fw_version >= 184 &&
+ (amdgpu_sriov_reg_access_sq_config(adev) ||
+ !amdgpu_sriov_vf(adev)))
adev->gmc.xnack_flags |= AMDGPU_GMC_XNACK_FLAG_CHAIN;
break;
case IP_VERSION(9, 5, 0):
@@ -2148,7 +2154,8 @@ static int gfx_v9_4_3_xcc_kiq_init_queue(struct amdgpu_ring *ring, int xcc_id)
return 0;
}
-static int gfx_v9_4_3_xcc_kcq_init_queue(struct amdgpu_ring *ring, int xcc_id, bool restore)
+static void gfx_v9_4_3_xcc_kcq_init_queue(struct amdgpu_ring *ring, int xcc_id,
+ bool restore)
{
struct amdgpu_device *adev = ring->adev;
struct v9_mqd *mqd = ring->mqd_ptr;
@@ -2182,8 +2189,6 @@ static int gfx_v9_4_3_xcc_kcq_init_queue(struct amdgpu_ring *ring, int xcc_id, b
atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], 0);
amdgpu_ring_clear_ring(ring);
}
-
- return 0;
}
static int gfx_v9_4_3_xcc_kcq_fini_register(struct amdgpu_device *adev, int xcc_id)
@@ -2216,7 +2221,7 @@ static int gfx_v9_4_3_xcc_kiq_resume(struct amdgpu_device *adev, int xcc_id)
static int gfx_v9_4_3_xcc_kcq_resume(struct amdgpu_device *adev, int xcc_id)
{
struct amdgpu_ring *ring;
- int i, r;
+ int i;
gfx_v9_4_3_xcc_cp_compute_enable(adev, true, xcc_id);
@@ -2224,9 +2229,7 @@ static int gfx_v9_4_3_xcc_kcq_resume(struct amdgpu_device *adev, int xcc_id)
ring = &adev->gfx.compute_ring[i + xcc_id *
adev->gfx.num_compute_rings];
- r = gfx_v9_4_3_xcc_kcq_init_queue(ring, xcc_id, false);
- if (r)
- return r;
+ gfx_v9_4_3_xcc_kcq_init_queue(ring, xcc_id, false);
}
return amdgpu_gfx_enable_kcq(adev, xcc_id);
@@ -2288,7 +2291,9 @@ static int gfx_v9_4_3_cp_resume(struct amdgpu_device *adev)
r = amdgpu_xcp_init(adev->xcp_mgr, num_xcp, mode);
} else {
- if (amdgpu_xcp_query_partition_mode(adev->xcp_mgr,
+ if (adev->in_suspend)
+ amdgpu_xcp_restore_partition_mode(adev->xcp_mgr);
+ else if (amdgpu_xcp_query_partition_mode(adev->xcp_mgr,
AMDGPU_XCP_FL_NONE) ==
AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE)
r = amdgpu_xcp_switch_partition_mode(
@@ -2457,19 +2462,17 @@ static int gfx_v9_4_3_soft_reset(struct amdgpu_ip_block *ip_block)
/* Disable MEC parsing/prefetching */
gfx_v9_4_3_xcc_cp_compute_enable(adev, false, 0);
- if (grbm_soft_reset) {
- tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
- tmp |= grbm_soft_reset;
- dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
- WREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET, tmp);
- tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
+ tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
+ tmp |= grbm_soft_reset;
+ dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
- udelay(50);
+ udelay(50);
- tmp &= ~grbm_soft_reset;
- WREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET, tmp);
- tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
- }
+ tmp &= ~grbm_soft_reset;
+ WREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
/* Wait a little for things to settle down */
udelay(50);
@@ -3552,20 +3555,21 @@ static int gfx_v9_4_3_reset_hw_pipe(struct amdgpu_ring *ring)
}
static int gfx_v9_4_3_reset_kcq(struct amdgpu_ring *ring,
- unsigned int vmid)
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_kiq *kiq = &adev->gfx.kiq[ring->xcc_id];
struct amdgpu_ring *kiq_ring = &kiq->ring;
+ int reset_mode = AMDGPU_RESET_TYPE_PER_QUEUE;
unsigned long flags;
int r;
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
-
if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
return -EINVAL;
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+
spin_lock_irqsave(&kiq->ring_lock, flags);
if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) {
@@ -3591,19 +3595,19 @@ static int gfx_v9_4_3_reset_kcq(struct amdgpu_ring *ring,
dev_err(adev->dev, "fail to wait on hqd deactive and will try pipe reset\n");
pipe_reset:
- if(r) {
+ if (r) {
+ if (!(adev->gfx.compute_supported_reset & AMDGPU_RESET_TYPE_PER_PIPE))
+ return -EOPNOTSUPP;
r = gfx_v9_4_3_reset_hw_pipe(ring);
+ reset_mode = AMDGPU_RESET_TYPE_PER_PIPE;
dev_info(adev->dev, "ring: %s pipe reset :%s\n", ring->name,
r ? "failed" : "successfully");
if (r)
return r;
}
- r = gfx_v9_4_3_xcc_kcq_init_queue(ring, ring->xcc_id, true);
- if (r) {
- dev_err(adev->dev, "fail to init kcq\n");
- return r;
- }
+ gfx_v9_4_3_xcc_kcq_init_queue(ring, ring->xcc_id, true);
+
spin_lock_irqsave(&kiq->ring_lock, flags);
r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size);
if (r) {
@@ -3612,14 +3616,24 @@ pipe_reset:
}
kiq->pmf->kiq_map_queues(kiq_ring, ring);
amdgpu_ring_commit(kiq_ring);
- spin_unlock_irqrestore(&kiq->ring_lock, flags);
-
r = amdgpu_ring_test_ring(kiq_ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
if (r) {
+ if (reset_mode == AMDGPU_RESET_TYPE_PER_QUEUE)
+ goto pipe_reset;
+
dev_err(adev->dev, "fail to remap queue\n");
return r;
}
- return amdgpu_ring_test_ring(ring);
+
+ if (reset_mode == AMDGPU_RESET_TYPE_PER_QUEUE) {
+ r = amdgpu_ring_test_ring(ring);
+ if (r)
+ goto pipe_reset;
+ }
+
+
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
enum amdgpu_gfx_cp_ras_mem_id {
@@ -4782,6 +4796,7 @@ static const struct amdgpu_ring_funcs gfx_v9_4_3_ring_funcs_kiq = {
.emit_wreg = gfx_v9_4_3_ring_emit_wreg,
.emit_reg_wait = gfx_v9_4_3_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v9_4_3_ring_emit_reg_write_reg_wait,
+ .emit_hdp_flush = gfx_v9_4_3_ring_emit_hdp_flush,
};
static void gfx_v9_4_3_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
index cb25f7f0dfc1..6c03bf9f1ae8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
@@ -74,6 +74,8 @@ static void gfxhub_v1_2_setup_vm_pt_regs(struct amdgpu_device *adev,
static void gfxhub_v1_2_xcc_init_gart_aperture_regs(struct amdgpu_device *adev,
uint32_t xcc_mask)
{
+ uint64_t gart_start = amdgpu_virt_xgmi_migrate_enabled(adev) ?
+ adev->gmc.vram_start : adev->gmc.fb_start;
uint64_t pt_base;
int i;
@@ -91,10 +93,10 @@ static void gfxhub_v1_2_xcc_init_gart_aperture_regs(struct amdgpu_device *adev,
if (adev->gmc.pdb0_bo) {
WREG32_SOC15(GC, GET_INST(GC, i),
regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
- (u32)(adev->gmc.fb_start >> 12));
+ (u32)(gart_start >> 12));
WREG32_SOC15(GC, GET_INST(GC, i),
regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
- (u32)(adev->gmc.fb_start >> 44));
+ (u32)(gart_start >> 44));
WREG32_SOC15(GC, GET_INST(GC, i),
regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
@@ -180,7 +182,7 @@ gfxhub_v1_2_xcc_init_system_aperture_regs(struct amdgpu_device *adev,
/* In the case squeezing vram into GART aperture, we don't use
* FB aperture and AGP aperture. Disable them.
*/
- if (adev->gmc.pdb0_bo) {
+ if (adev->gmc.pdb0_bo && adev->gmc.xgmi.connected_to_cpu) {
WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_FB_LOCATION_TOP, 0);
WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_FB_LOCATION_BASE, 0x00FFFFFF);
WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_AGP_TOP, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index a3e2787501f1..ce6e04242c52 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -103,8 +103,10 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
uint32_t vmhub_index = entry->client_id == SOC15_IH_CLIENTID_VMC ?
AMDGPU_MMHUB0(0) : AMDGPU_GFXHUB(0);
struct amdgpu_vmhub *hub = &adev->vmhub[vmhub_index];
- bool retry_fault = !!(entry->src_data[1] & 0x80);
- bool write_fault = !!(entry->src_data[1] & 0x20);
+ bool retry_fault = !!(entry->src_data[1] &
+ AMDGPU_GMC9_FAULT_SOURCE_DATA_RETRY);
+ bool write_fault = !!(entry->src_data[1] &
+ AMDGPU_GMC9_FAULT_SOURCE_DATA_WRITE);
struct amdgpu_task_info *task_info;
uint32_t status = 0;
u64 addr;
@@ -164,10 +166,7 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
entry->src_id, entry->ring_id, entry->vmid, entry->pasid);
task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid);
if (task_info) {
- dev_err(adev->dev,
- " in process %s pid %d thread %s pid %d\n",
- task_info->process_name, task_info->tgid,
- task_info->task_name, task_info->pid);
+ amdgpu_vm_print_task_info(adev, task_info);
amdgpu_vm_put_task_info(task_info);
}
@@ -469,24 +468,6 @@ static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int
* 0 valid
*/
-static uint64_t gmc_v10_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
-{
- switch (flags) {
- case AMDGPU_VM_MTYPE_DEFAULT:
- return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_NC);
- case AMDGPU_VM_MTYPE_NC:
- return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_NC);
- case AMDGPU_VM_MTYPE_WC:
- return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_WC);
- case AMDGPU_VM_MTYPE_CC:
- return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_CC);
- case AMDGPU_VM_MTYPE_UC:
- return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_UC);
- default:
- return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_NC);
- }
-}
-
static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level,
uint64_t *addr, uint64_t *flags)
{
@@ -511,21 +492,39 @@ static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level,
}
static void gmc_v10_0_get_vm_pte(struct amdgpu_device *adev,
- struct amdgpu_bo_va_mapping *mapping,
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo,
+ uint32_t vm_flags,
uint64_t *flags)
{
- struct amdgpu_bo *bo = mapping->bo_va->base.bo;
-
- *flags &= ~AMDGPU_PTE_EXECUTABLE;
- *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
+ if (vm_flags & AMDGPU_VM_PAGE_EXECUTABLE)
+ *flags |= AMDGPU_PTE_EXECUTABLE;
+ else
+ *flags &= ~AMDGPU_PTE_EXECUTABLE;
- *flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK;
- *flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK);
+ switch (vm_flags & AMDGPU_VM_MTYPE_MASK) {
+ case AMDGPU_VM_MTYPE_DEFAULT:
+ case AMDGPU_VM_MTYPE_NC:
+ default:
+ *flags = AMDGPU_PTE_MTYPE_NV10(*flags, MTYPE_NC);
+ break;
+ case AMDGPU_VM_MTYPE_WC:
+ *flags = AMDGPU_PTE_MTYPE_NV10(*flags, MTYPE_WC);
+ break;
+ case AMDGPU_VM_MTYPE_CC:
+ *flags = AMDGPU_PTE_MTYPE_NV10(*flags, MTYPE_CC);
+ break;
+ case AMDGPU_VM_MTYPE_UC:
+ *flags = AMDGPU_PTE_MTYPE_NV10(*flags, MTYPE_UC);
+ break;
+ }
- *flags &= ~AMDGPU_PTE_NOALLOC;
- *flags |= (mapping->flags & AMDGPU_PTE_NOALLOC);
+ if (vm_flags & AMDGPU_VM_PAGE_NOALLOC)
+ *flags |= AMDGPU_PTE_NOALLOC;
+ else
+ *flags &= ~AMDGPU_PTE_NOALLOC;
- if (mapping->flags & AMDGPU_PTE_PRT) {
+ if (vm_flags & AMDGPU_VM_PAGE_PRT) {
*flags |= AMDGPU_PTE_PRT;
*flags |= AMDGPU_PTE_SNOOPED;
*flags |= AMDGPU_PTE_LOG;
@@ -566,7 +565,6 @@ static const struct amdgpu_gmc_funcs gmc_v10_0_gmc_funcs = {
.flush_gpu_tlb_pasid = gmc_v10_0_flush_gpu_tlb_pasid,
.emit_flush_gpu_tlb = gmc_v10_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v10_0_emit_pasid_mapping,
- .map_mtype = gmc_v10_0_map_mtype,
.get_vm_pde = gmc_v10_0_get_vm_pde,
.get_vm_pte = gmc_v10_0_get_vm_pte,
.get_vbios_fb_size = gmc_v10_0_get_vbios_fb_size,
@@ -967,8 +965,7 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
/* Flush HDP after it is initialized */
amdgpu_device_flush_hdp(adev, NULL);
- value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
- false : true;
+ value = amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS;
if (!adev->in_s0ix)
adev->gfxhub.funcs->set_fault_enable_default(adev, value);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
index 72211409227b..ba59ee8e398a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
@@ -103,12 +103,41 @@ static int gmc_v11_0_process_interrupt(struct amdgpu_device *adev,
uint32_t vmhub_index = entry->client_id == SOC21_IH_CLIENTID_VMC ?
AMDGPU_MMHUB0(0) : AMDGPU_GFXHUB(0);
struct amdgpu_vmhub *hub = &adev->vmhub[vmhub_index];
+ bool retry_fault = !!(entry->src_data[1] &
+ AMDGPU_GMC9_FAULT_SOURCE_DATA_RETRY);
+ bool write_fault = !!(entry->src_data[1] &
+ AMDGPU_GMC9_FAULT_SOURCE_DATA_WRITE);
uint32_t status = 0;
u64 addr;
addr = (u64)entry->src_data[0] << 12;
addr |= ((u64)entry->src_data[1] & 0xf) << 44;
+ if (retry_fault) {
+ /* Returning 1 here also prevents sending the IV to the KFD */
+
+ /* Process it only if it's the first fault for this address */
+ if (entry->ih != &adev->irq.ih_soft &&
+ amdgpu_gmc_filter_faults(adev, entry->ih, addr, entry->pasid,
+ entry->timestamp))
+ return 1;
+
+ /* Delegate it to a different ring if the hardware hasn't
+ * already done it.
+ */
+ if (entry->ih == &adev->irq.ih) {
+ amdgpu_irq_delegate(adev, entry, 8);
+ return 1;
+ }
+
+ /* Try to handle the recoverable page faults by filling page
+ * tables
+ */
+ if (amdgpu_vm_handle_fault(adev, entry->pasid, 0, 0, addr,
+ entry->timestamp, write_fault))
+ return 1;
+ }
+
if (!amdgpu_sriov_vf(adev)) {
/*
* Issue a dummy read to wait for the status register to
@@ -134,10 +163,7 @@ static int gmc_v11_0_process_interrupt(struct amdgpu_device *adev,
entry->src_id, entry->ring_id, entry->vmid, entry->pasid);
task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid);
if (task_info) {
- dev_err(adev->dev,
- " in process %s pid %d thread %s pid %d)\n",
- task_info->process_name, task_info->tgid,
- task_info->task_name, task_info->pid);
+ amdgpu_vm_print_task_info(adev, task_info);
amdgpu_vm_put_task_info(task_info);
}
@@ -433,24 +459,6 @@ static void gmc_v11_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int
* 0 valid
*/
-static uint64_t gmc_v11_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
-{
- switch (flags) {
- case AMDGPU_VM_MTYPE_DEFAULT:
- return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_NC);
- case AMDGPU_VM_MTYPE_NC:
- return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_NC);
- case AMDGPU_VM_MTYPE_WC:
- return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_WC);
- case AMDGPU_VM_MTYPE_CC:
- return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_CC);
- case AMDGPU_VM_MTYPE_UC:
- return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_UC);
- default:
- return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_NC);
- }
-}
-
static void gmc_v11_0_get_vm_pde(struct amdgpu_device *adev, int level,
uint64_t *addr, uint64_t *flags)
{
@@ -475,21 +483,39 @@ static void gmc_v11_0_get_vm_pde(struct amdgpu_device *adev, int level,
}
static void gmc_v11_0_get_vm_pte(struct amdgpu_device *adev,
- struct amdgpu_bo_va_mapping *mapping,
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo,
+ uint32_t vm_flags,
uint64_t *flags)
{
- struct amdgpu_bo *bo = mapping->bo_va->base.bo;
-
- *flags &= ~AMDGPU_PTE_EXECUTABLE;
- *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
+ if (vm_flags & AMDGPU_VM_PAGE_EXECUTABLE)
+ *flags |= AMDGPU_PTE_EXECUTABLE;
+ else
+ *flags &= ~AMDGPU_PTE_EXECUTABLE;
- *flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK;
- *flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK);
+ switch (vm_flags & AMDGPU_VM_MTYPE_MASK) {
+ case AMDGPU_VM_MTYPE_DEFAULT:
+ case AMDGPU_VM_MTYPE_NC:
+ default:
+ *flags = AMDGPU_PTE_MTYPE_NV10(*flags, MTYPE_NC);
+ break;
+ case AMDGPU_VM_MTYPE_WC:
+ *flags = AMDGPU_PTE_MTYPE_NV10(*flags, MTYPE_WC);
+ break;
+ case AMDGPU_VM_MTYPE_CC:
+ *flags = AMDGPU_PTE_MTYPE_NV10(*flags, MTYPE_CC);
+ break;
+ case AMDGPU_VM_MTYPE_UC:
+ *flags = AMDGPU_PTE_MTYPE_NV10(*flags, MTYPE_UC);
+ break;
+ }
- *flags &= ~AMDGPU_PTE_NOALLOC;
- *flags |= (mapping->flags & AMDGPU_PTE_NOALLOC);
+ if (vm_flags & AMDGPU_VM_PAGE_NOALLOC)
+ *flags |= AMDGPU_PTE_NOALLOC;
+ else
+ *flags &= ~AMDGPU_PTE_NOALLOC;
- if (mapping->flags & AMDGPU_PTE_PRT) {
+ if (vm_flags & AMDGPU_VM_PAGE_PRT) {
*flags |= AMDGPU_PTE_PRT;
*flags |= AMDGPU_PTE_SNOOPED;
*flags |= AMDGPU_PTE_LOG;
@@ -530,7 +556,6 @@ static const struct amdgpu_gmc_funcs gmc_v11_0_gmc_funcs = {
.flush_gpu_tlb_pasid = gmc_v11_0_flush_gpu_tlb_pasid,
.emit_flush_gpu_tlb = gmc_v11_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v11_0_emit_pasid_mapping,
- .map_mtype = gmc_v11_0_map_mtype,
.get_vm_pde = gmc_v11_0_get_vm_pde,
.get_vm_pte = gmc_v11_0_get_vm_pte,
.get_vbios_fb_size = gmc_v11_0_get_vbios_fb_size,
@@ -909,8 +934,7 @@ static int gmc_v11_0_gart_enable(struct amdgpu_device *adev)
/* Flush HDP after it is initialized */
amdgpu_device_flush_hdp(adev, NULL);
- value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
- false : true;
+ value = amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS;
adev->mmhub.funcs->set_fault_enable_default(adev, value);
gmc_v11_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB0(0), 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
index b645d3e6a6c8..7a9d6894e321 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
@@ -91,6 +91,10 @@ static int gmc_v12_0_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry)
{
struct amdgpu_vmhub *hub;
+ bool retry_fault = !!(entry->src_data[1] &
+ AMDGPU_GMC9_FAULT_SOURCE_DATA_RETRY);
+ bool write_fault = !!(entry->src_data[1] &
+ AMDGPU_GMC9_FAULT_SOURCE_DATA_WRITE);
uint32_t status = 0;
u64 addr;
@@ -102,6 +106,31 @@ static int gmc_v12_0_process_interrupt(struct amdgpu_device *adev,
else
hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
+ if (retry_fault) {
+ /* Returning 1 here also prevents sending the IV to the KFD */
+
+ /* Process it only if it's the first fault for this address */
+ if (entry->ih != &adev->irq.ih_soft &&
+ amdgpu_gmc_filter_faults(adev, entry->ih, addr, entry->pasid,
+ entry->timestamp))
+ return 1;
+
+ /* Delegate it to a different ring if the hardware hasn't
+ * already done it.
+ */
+ if (entry->ih == &adev->irq.ih) {
+ amdgpu_irq_delegate(adev, entry, 8);
+ return 1;
+ }
+
+ /* Try to handle the recoverable page faults by filling page
+ * tables
+ */
+ if (amdgpu_vm_handle_fault(adev, entry->pasid, 0, 0, addr,
+ entry->timestamp, write_fault))
+ return 1;
+ }
+
if (!amdgpu_sriov_vf(adev)) {
/*
* Issue a dummy read to wait for the status register to
@@ -127,10 +156,7 @@ static int gmc_v12_0_process_interrupt(struct amdgpu_device *adev,
entry->src_id, entry->ring_id, entry->vmid, entry->pasid);
task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid);
if (task_info) {
- dev_err(adev->dev,
- " in process %s pid %d thread %s pid %d)\n",
- task_info->process_name, task_info->tgid,
- task_info->task_name, task_info->pid);
+ amdgpu_vm_print_task_info(adev, task_info);
amdgpu_vm_put_task_info(task_info);
}
@@ -315,9 +341,7 @@ static void gmc_v12_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
return;
}
- mutex_lock(&adev->mman.gtt_window_lock);
gmc_v12_0_flush_vm_hub(adev, vmid, vmhub, 0);
- mutex_unlock(&adev->mman.gtt_window_lock);
return;
}
@@ -339,6 +363,22 @@ static void gmc_v12_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
uint16_t queried;
int vmid, i;
+ if (adev->enable_uni_mes && adev->mes.ring[AMDGPU_MES_SCHED_PIPE].sched.ready &&
+ (adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x84) {
+ struct mes_inv_tlbs_pasid_input input = {0};
+ input.pasid = pasid;
+ input.flush_type = flush_type;
+ input.hub_id = AMDGPU_GFXHUB(0);
+ /* MES will invalidate all gc_hub for the device from master */
+ adev->mes.funcs->invalidate_tlbs_pasid(&adev->mes, &input);
+ if (all_hub) {
+ /* Only need to invalidate mm_hub now, gfx12 only support one mmhub */
+ input.hub_id = AMDGPU_MMHUB0(0);
+ adev->mes.funcs->invalidate_tlbs_pasid(&adev->mes, &input);
+ }
+ return;
+ }
+
for (vmid = 1; vmid < 16; vmid++) {
bool valid;
@@ -456,20 +496,6 @@ static void gmc_v12_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid
* 0 valid
*/
-static uint64_t gmc_v12_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
-{
- switch (flags) {
- case AMDGPU_VM_MTYPE_DEFAULT:
- return AMDGPU_PTE_MTYPE_GFX12(0ULL, MTYPE_NC);
- case AMDGPU_VM_MTYPE_NC:
- return AMDGPU_PTE_MTYPE_GFX12(0ULL, MTYPE_NC);
- case AMDGPU_VM_MTYPE_UC:
- return AMDGPU_PTE_MTYPE_GFX12(0ULL, MTYPE_UC);
- default:
- return AMDGPU_PTE_MTYPE_GFX12(0ULL, MTYPE_NC);
- }
-}
-
static void gmc_v12_0_get_vm_pde(struct amdgpu_device *adev, int level,
uint64_t *addr, uint64_t *flags)
{
@@ -493,18 +519,35 @@ static void gmc_v12_0_get_vm_pde(struct amdgpu_device *adev, int level,
}
static void gmc_v12_0_get_vm_pte(struct amdgpu_device *adev,
- struct amdgpu_bo_va_mapping *mapping,
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo,
+ uint32_t vm_flags,
uint64_t *flags)
{
- struct amdgpu_bo *bo = mapping->bo_va->base.bo;
+ if (vm_flags & AMDGPU_VM_PAGE_EXECUTABLE)
+ *flags |= AMDGPU_PTE_EXECUTABLE;
+ else
+ *flags &= ~AMDGPU_PTE_EXECUTABLE;
- *flags &= ~AMDGPU_PTE_EXECUTABLE;
- *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
+ switch (vm_flags & AMDGPU_VM_MTYPE_MASK) {
+ case AMDGPU_VM_MTYPE_DEFAULT:
+ *flags = AMDGPU_PTE_MTYPE_GFX12(*flags, MTYPE_NC);
+ break;
+ case AMDGPU_VM_MTYPE_NC:
+ default:
+ *flags = AMDGPU_PTE_MTYPE_GFX12(*flags, MTYPE_NC);
+ break;
+ case AMDGPU_VM_MTYPE_UC:
+ *flags = AMDGPU_PTE_MTYPE_GFX12(*flags, MTYPE_UC);
+ break;
+ }
- *flags &= ~AMDGPU_PTE_MTYPE_GFX12_MASK;
- *flags |= (mapping->flags & AMDGPU_PTE_MTYPE_GFX12_MASK);
+ if (vm_flags & AMDGPU_VM_PAGE_NOALLOC)
+ *flags |= AMDGPU_PTE_NOALLOC;
+ else
+ *flags &= ~AMDGPU_PTE_NOALLOC;
- if (mapping->flags & AMDGPU_PTE_PRT_GFX12) {
+ if (vm_flags & AMDGPU_VM_PAGE_PRT) {
*flags |= AMDGPU_PTE_PRT_GFX12;
*flags |= AMDGPU_PTE_SNOOPED;
*flags |= AMDGPU_PTE_SYSTEM;
@@ -546,7 +589,6 @@ static const struct amdgpu_gmc_funcs gmc_v12_0_gmc_funcs = {
.flush_gpu_tlb_pasid = gmc_v12_0_flush_gpu_tlb_pasid,
.emit_flush_gpu_tlb = gmc_v12_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v12_0_emit_pasid_mapping,
- .map_mtype = gmc_v12_0_map_mtype,
.get_vm_pde = gmc_v12_0_get_vm_pde,
.get_vm_pte = gmc_v12_0_get_vm_pte,
.get_vbios_fb_size = gmc_v12_0_get_vbios_fb_size,
@@ -879,8 +921,7 @@ static int gmc_v12_0_gart_enable(struct amdgpu_device *adev)
/* Flush HDP after it is initialized */
amdgpu_device_flush_hdp(adev, NULL);
- value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
- false : true;
+ value = amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS;
adev->mmhub.funcs->set_fault_enable_default(adev, value);
gmc_v12_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB0(0), 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 8030fcd64210..a8ec95f42926 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -213,7 +213,7 @@ static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev,
amdgpu_gmc_set_agp_default(adev, mc);
amdgpu_gmc_vram_location(adev, mc, base);
- amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_BEST_FIT);
+ amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_LOW);
}
static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
@@ -382,7 +382,9 @@ static void gmc_v6_0_get_vm_pde(struct amdgpu_device *adev, int level,
}
static void gmc_v6_0_get_vm_pte(struct amdgpu_device *adev,
- struct amdgpu_bo_va_mapping *mapping,
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo,
+ uint32_t vm_flags,
uint64_t *flags)
{
*flags &= ~AMDGPU_PTE_EXECUTABLE;
@@ -608,23 +610,21 @@ static void gmc_v6_0_gart_disable(struct amdgpu_device *adev)
}
static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev,
- u32 status, u32 addr, u32 mc_client)
+ u32 status, u32 addr)
{
u32 mc_id;
u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
PROTECTIONS);
- char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
- (mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
MEMORY_CLIENT_ID);
- dev_err(adev->dev, "VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
+ dev_err(adev->dev, "VM fault (0x%02x, vmid %d) at page %u, %s from %d\n",
protections, vmid, addr,
REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
MEMORY_CLIENT_RW) ?
- "write" : "read", block, mc_client, mc_id);
+ "write" : "read", mc_id);
}
static const u32 mc_cg_registers[] = {
@@ -1070,6 +1070,12 @@ static int gmc_v6_0_process_interrupt(struct amdgpu_device *adev,
{
u32 addr, status;
+ /* Delegate to the soft IRQ handler ring */
+ if (adev->irq.ih_soft.enabled && entry->ih != &adev->irq.ih_soft) {
+ amdgpu_irq_delegate(adev, entry, 4);
+ return 1;
+ }
+
addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
@@ -1077,6 +1083,10 @@ static int gmc_v6_0_process_interrupt(struct amdgpu_device *adev,
if (!addr && !status)
return 0;
+ amdgpu_vm_update_fault_cache(adev, entry->pasid,
+ ((u64)addr) << AMDGPU_GPU_PAGE_SHIFT,
+ status, AMDGPU_GFXHUB(0));
+
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
gmc_v6_0_set_fault_enable_default(adev, false);
@@ -1087,7 +1097,7 @@ static int gmc_v6_0_process_interrupt(struct amdgpu_device *adev,
addr);
dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
status);
- gmc_v6_0_vm_decode_fault(adev, status, addr, 0);
+ gmc_v6_0_vm_decode_fault(adev, status, addr);
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index a8d5795084fc..fbd0bf147f50 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -504,7 +504,9 @@ static void gmc_v7_0_get_vm_pde(struct amdgpu_device *adev, int level,
}
static void gmc_v7_0_get_vm_pte(struct amdgpu_device *adev,
- struct amdgpu_bo_va_mapping *mapping,
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo,
+ uint32_t vm_flags,
uint64_t *flags)
{
*flags &= ~AMDGPU_PTE_EXECUTABLE;
@@ -1066,7 +1068,7 @@ static int gmc_v7_0_sw_init(struct amdgpu_ip_block *ip_block)
GFP_KERNEL);
if (!adev->gmc.vm_fault_info)
return -ENOMEM;
- atomic_set(&adev->gmc.vm_fault_info_updated, 0);
+ atomic_set_release(&adev->gmc.vm_fault_info_updated, 0);
return 0;
}
@@ -1259,6 +1261,12 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
{
u32 addr, status, mc_client, vmid;
+ /* Delegate to the soft IRQ handler ring */
+ if (adev->irq.ih_soft.enabled && entry->ih != &adev->irq.ih_soft) {
+ amdgpu_irq_delegate(adev, entry, 4);
+ return 1;
+ }
+
addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
@@ -1288,7 +1296,7 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
VMID);
if (amdgpu_amdkfd_is_kfd_vmid(adev, vmid)
- && !atomic_read(&adev->gmc.vm_fault_info_updated)) {
+ && !atomic_read_acquire(&adev->gmc.vm_fault_info_updated)) {
struct kfd_vm_fault_info *info = adev->gmc.vm_fault_info;
u32 protections = REG_GET_FIELD(status,
VM_CONTEXT1_PROTECTION_FAULT_STATUS,
@@ -1304,8 +1312,7 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
info->prot_read = protections & 0x8 ? true : false;
info->prot_write = protections & 0x10 ? true : false;
info->prot_exec = protections & 0x20 ? true : false;
- mb();
- atomic_set(&adev->gmc.vm_fault_info_updated, 1);
+ atomic_set_release(&adev->gmc.vm_fault_info_updated, 1);
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 99ca08e9bdb5..6551b60f2584 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -716,11 +716,15 @@ static void gmc_v8_0_get_vm_pde(struct amdgpu_device *adev, int level,
}
static void gmc_v8_0_get_vm_pte(struct amdgpu_device *adev,
- struct amdgpu_bo_va_mapping *mapping,
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo,
+ uint32_t vm_flags,
uint64_t *flags)
{
- *flags &= ~AMDGPU_PTE_EXECUTABLE;
- *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
+ if (vm_flags & AMDGPU_VM_PAGE_EXECUTABLE)
+ *flags |= AMDGPU_PTE_EXECUTABLE;
+ else
+ *flags &= ~AMDGPU_PTE_EXECUTABLE;
*flags &= ~AMDGPU_PTE_PRT;
}
@@ -1179,7 +1183,7 @@ static int gmc_v8_0_sw_init(struct amdgpu_ip_block *ip_block)
GFP_KERNEL);
if (!adev->gmc.vm_fault_info)
return -ENOMEM;
- atomic_set(&adev->gmc.vm_fault_info_updated, 0);
+ atomic_set_release(&adev->gmc.vm_fault_info_updated, 0);
return 0;
}
@@ -1435,6 +1439,12 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
return 0;
}
+ /* Delegate to the soft IRQ handler ring */
+ if (adev->irq.ih_soft.enabled && entry->ih != &adev->irq.ih_soft) {
+ amdgpu_irq_delegate(adev, entry, 4);
+ return 1;
+ }
+
addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
@@ -1458,9 +1468,7 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid);
if (task_info) {
- dev_err(adev->dev, " for process %s pid %d thread %s pid %d\n",
- task_info->process_name, task_info->tgid,
- task_info->task_name, task_info->pid);
+ amdgpu_vm_print_task_info(adev, task_info);
amdgpu_vm_put_task_info(task_info);
}
@@ -1476,7 +1484,7 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
VMID);
if (amdgpu_amdkfd_is_kfd_vmid(adev, vmid)
- && !atomic_read(&adev->gmc.vm_fault_info_updated)) {
+ && !atomic_read_acquire(&adev->gmc.vm_fault_info_updated)) {
struct kfd_vm_fault_info *info = adev->gmc.vm_fault_info;
u32 protections = REG_GET_FIELD(status,
VM_CONTEXT1_PROTECTION_FAULT_STATUS,
@@ -1492,8 +1500,7 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
info->prot_read = protections & 0x8 ? true : false;
info->prot_write = protections & 0x10 ? true : false;
info->prot_exec = protections & 0x20 ? true : false;
- mb();
- atomic_set(&adev->gmc.vm_fault_info_updated, 1);
+ atomic_set_release(&adev->gmc.vm_fault_info_updated, 1);
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 282197f4ffb1..8ad7519f7b58 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -78,8 +78,6 @@
#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2 0x05ea
#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2_BASE_IDX 2
-#define MAX_MEM_RANGES 8
-
static const char * const gfxhub_client_ids[] = {
"CB",
"DB",
@@ -411,11 +409,6 @@ static const uint32_t ecc_umc_mcumc_ctrl_mask_addrs[] = {
(0x001d43e0 + 0x00001800),
};
-static inline bool gmc_v9_0_is_multi_chiplet(struct amdgpu_device *adev)
-{
- return !!adev->aid_mask;
-}
-
static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned int type,
@@ -551,8 +544,10 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- bool retry_fault = !!(entry->src_data[1] & 0x80);
- bool write_fault = !!(entry->src_data[1] & 0x20);
+ bool retry_fault = !!(entry->src_data[1] &
+ AMDGPU_GMC9_FAULT_SOURCE_DATA_RETRY);
+ bool write_fault = !!(entry->src_data[1] &
+ AMDGPU_GMC9_FAULT_SOURCE_DATA_WRITE);
uint32_t status = 0, cid = 0, rw = 0, fed = 0;
struct amdgpu_task_info *task_info;
struct amdgpu_vmhub *hub;
@@ -641,10 +636,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid);
if (task_info) {
- dev_err(adev->dev,
- " for process %s pid %d thread %s pid %d)\n",
- task_info->process_name, task_info->tgid,
- task_info->task_name, task_info->pid);
+ amdgpu_vm_print_task_info(adev, task_info);
amdgpu_vm_put_task_info(task_info);
}
@@ -652,7 +644,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
addr, entry->client_id,
soc15_ih_clientid_name[entry->client_id]);
- if (gmc_v9_0_is_multi_chiplet(adev))
+ if (amdgpu_is_multi_aid(adev))
dev_err(adev->dev, " cookie node_id %d fault from die %s%d%s\n",
node_id, node_id % 4 == 3 ? "RSV" : "AID", node_id / 4,
node_id % 4 == 1 ? ".XCD0" : node_id % 4 == 2 ? ".XCD1" : "");
@@ -801,7 +793,7 @@ static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev,
uint32_t vmhub)
{
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) ||
- gmc_v9_0_is_multi_chiplet(adev))
+ amdgpu_is_multi_aid(adev))
return false;
return ((vmhub == AMDGPU_MMHUB0(0) ||
@@ -1083,27 +1075,6 @@ static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int v
* 0 valid
*/
-static uint64_t gmc_v9_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
-
-{
- switch (flags) {
- case AMDGPU_VM_MTYPE_DEFAULT:
- return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_NC);
- case AMDGPU_VM_MTYPE_NC:
- return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_NC);
- case AMDGPU_VM_MTYPE_WC:
- return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_WC);
- case AMDGPU_VM_MTYPE_RW:
- return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_RW);
- case AMDGPU_VM_MTYPE_CC:
- return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_CC);
- case AMDGPU_VM_MTYPE_UC:
- return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_UC);
- default:
- return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_NC);
- }
-}
-
static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
uint64_t *addr, uint64_t *flags)
{
@@ -1131,8 +1102,9 @@ static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
}
static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
struct amdgpu_bo *bo,
- struct amdgpu_bo_va_mapping *mapping,
+ uint32_t vm_flags,
uint64_t *flags)
{
struct amdgpu_device *bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
@@ -1142,7 +1114,6 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
AMDGPU_GEM_CREATE_EXT_COHERENT);
bool ext_coherent = bo->flags & AMDGPU_GEM_CREATE_EXT_COHERENT;
bool uncached = bo->flags & AMDGPU_GEM_CREATE_UNCACHED;
- struct amdgpu_vm *vm = mapping->bo_va->base.vm;
unsigned int mtype_local, mtype;
uint32_t gc_ip_version = amdgpu_ip_version(adev, GC_HWIP, 0);
bool snoop = false;
@@ -1172,7 +1143,7 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
mtype = MTYPE_UC;
else
mtype = MTYPE_NC;
- if (mapping->bo_va->is_xgmi)
+ if (amdgpu_xgmi_same_hive(adev, bo_adev))
snoop = true;
}
} else {
@@ -1247,24 +1218,43 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
}
static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
- struct amdgpu_bo_va_mapping *mapping,
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo,
+ uint32_t vm_flags,
uint64_t *flags)
{
- struct amdgpu_bo *bo = mapping->bo_va->base.bo;
-
- *flags &= ~AMDGPU_PTE_EXECUTABLE;
- *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
+ if (vm_flags & AMDGPU_VM_PAGE_EXECUTABLE)
+ *flags |= AMDGPU_PTE_EXECUTABLE;
+ else
+ *flags &= ~AMDGPU_PTE_EXECUTABLE;
- *flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
- *flags |= mapping->flags & AMDGPU_PTE_MTYPE_VG10_MASK;
+ switch (vm_flags & AMDGPU_VM_MTYPE_MASK) {
+ case AMDGPU_VM_MTYPE_DEFAULT:
+ case AMDGPU_VM_MTYPE_NC:
+ default:
+ *flags = AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_NC);
+ break;
+ case AMDGPU_VM_MTYPE_WC:
+ *flags |= AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_WC);
+ break;
+ case AMDGPU_VM_MTYPE_RW:
+ *flags |= AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_RW);
+ break;
+ case AMDGPU_VM_MTYPE_CC:
+ *flags |= AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_CC);
+ break;
+ case AMDGPU_VM_MTYPE_UC:
+ *flags |= AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_UC);
+ break;
+ }
- if (mapping->flags & AMDGPU_PTE_PRT) {
+ if (vm_flags & AMDGPU_VM_PAGE_PRT) {
*flags |= AMDGPU_PTE_PRT;
*flags &= ~AMDGPU_PTE_VALID;
}
if ((*flags & AMDGPU_PTE_VALID) && bo)
- gmc_v9_0_get_coherence_flags(adev, bo, mapping, flags);
+ gmc_v9_0_get_coherence_flags(adev, vm, bo, vm_flags, flags);
}
static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev,
@@ -1385,46 +1375,6 @@ static unsigned int gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
return size;
}
-static enum amdgpu_memory_partition
-gmc_v9_0_get_memory_partition(struct amdgpu_device *adev, u32 *supp_modes)
-{
- enum amdgpu_memory_partition mode = UNKNOWN_MEMORY_PARTITION_MODE;
-
- if (adev->nbio.funcs->get_memory_partition_mode)
- mode = adev->nbio.funcs->get_memory_partition_mode(adev,
- supp_modes);
-
- return mode;
-}
-
-static enum amdgpu_memory_partition
-gmc_v9_0_query_vf_memory_partition(struct amdgpu_device *adev)
-{
- switch (adev->gmc.num_mem_partitions) {
- case 0:
- return UNKNOWN_MEMORY_PARTITION_MODE;
- case 1:
- return AMDGPU_NPS1_PARTITION_MODE;
- case 2:
- return AMDGPU_NPS2_PARTITION_MODE;
- case 4:
- return AMDGPU_NPS4_PARTITION_MODE;
- default:
- return AMDGPU_NPS1_PARTITION_MODE;
- }
-
- return AMDGPU_NPS1_PARTITION_MODE;
-}
-
-static enum amdgpu_memory_partition
-gmc_v9_0_query_memory_partition(struct amdgpu_device *adev)
-{
- if (amdgpu_sriov_vf(adev))
- return gmc_v9_0_query_vf_memory_partition(adev);
-
- return gmc_v9_0_get_memory_partition(adev, NULL);
-}
-
static bool gmc_v9_0_need_reset_on_init(struct amdgpu_device *adev)
{
if (adev->nbio.funcs && adev->nbio.funcs->is_nps_switch_requested &&
@@ -1441,12 +1391,11 @@ static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
.flush_gpu_tlb_pasid = gmc_v9_0_flush_gpu_tlb_pasid,
.emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
- .map_mtype = gmc_v9_0_map_mtype,
.get_vm_pde = gmc_v9_0_get_vm_pde,
.get_vm_pte = gmc_v9_0_get_vm_pte,
.override_vm_pte_flags = gmc_v9_0_override_vm_pte_flags,
.get_vbios_fb_size = gmc_v9_0_get_vbios_fb_size,
- .query_mem_partition_mode = &gmc_v9_0_query_memory_partition,
+ .query_mem_partition_mode = &amdgpu_gmc_query_memory_partition,
.request_mem_partition_mode = &amdgpu_gmc_request_memory_partition,
.need_reset_on_init = &gmc_v9_0_need_reset_on_init,
};
@@ -1553,7 +1502,7 @@ static void gmc_v9_0_set_mmhub_ras_funcs(struct amdgpu_device *adev)
static void gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device *adev)
{
- if (gmc_v9_0_is_multi_chiplet(adev))
+ if (amdgpu_is_multi_aid(adev))
adev->gfxhub.funcs = &gfxhub_v1_2_funcs;
else
adev->gfxhub.funcs = &gfxhub_v1_0_funcs;
@@ -1599,7 +1548,7 @@ static void gmc_v9_0_init_nps_details(struct amdgpu_device *adev)
if (amdgpu_sriov_vf(adev) || (adev->flags & AMD_IS_APU))
return;
- mode = gmc_v9_0_get_memory_partition(adev, &supp_modes);
+ mode = amdgpu_gmc_get_memory_partition(adev, &supp_modes);
/* Mode detected by hardware and supported modes available */
if ((mode != UNKNOWN_MEMORY_PARTITION_MODE) && supp_modes) {
@@ -1635,7 +1584,7 @@ static int gmc_v9_0_early_init(struct amdgpu_ip_block *ip_block)
*/
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 0) ||
amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1) ||
- gmc_v9_0_is_multi_chiplet(adev))
+ amdgpu_is_multi_aid(adev))
adev->gmc.xgmi.supported = true;
if (amdgpu_ip_version(adev, XGMI_HWIP, 0) == IP_VERSION(6, 1, 0)) {
@@ -1722,7 +1671,7 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
/* add the xgmi offset of the physical node */
base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
- if (adev->gmc.xgmi.connected_to_cpu) {
+ if (amdgpu_gmc_is_pdb0_enabled(adev)) {
amdgpu_gmc_sysvm_location(adev, mc);
} else {
amdgpu_gmc_vram_location(adev, mc, base);
@@ -1837,7 +1786,7 @@ static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
return 0;
}
- if (adev->gmc.xgmi.connected_to_cpu) {
+ if (amdgpu_gmc_is_pdb0_enabled(adev)) {
adev->gmc.vmid0_page_table_depth = 1;
adev->gmc.vmid0_page_table_block_size = 12;
} else {
@@ -1863,7 +1812,7 @@ static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
if (r)
return r;
- if (adev->gmc.xgmi.connected_to_cpu)
+ if (amdgpu_gmc_is_pdb0_enabled(adev))
r = amdgpu_gmc_pdb0_alloc(adev);
}
@@ -1885,195 +1834,25 @@ static void gmc_v9_0_save_registers(struct amdgpu_device *adev)
adev->gmc.sdpif_register = RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0);
}
-static bool gmc_v9_0_validate_partition_info(struct amdgpu_device *adev)
-{
- enum amdgpu_memory_partition mode;
- u32 supp_modes;
- bool valid;
-
- mode = gmc_v9_0_get_memory_partition(adev, &supp_modes);
-
- /* Mode detected by hardware not present in supported modes */
- if ((mode != UNKNOWN_MEMORY_PARTITION_MODE) &&
- !(BIT(mode - 1) & supp_modes))
- return false;
-
- switch (mode) {
- case UNKNOWN_MEMORY_PARTITION_MODE:
- case AMDGPU_NPS1_PARTITION_MODE:
- valid = (adev->gmc.num_mem_partitions == 1);
- break;
- case AMDGPU_NPS2_PARTITION_MODE:
- valid = (adev->gmc.num_mem_partitions == 2);
- break;
- case AMDGPU_NPS4_PARTITION_MODE:
- valid = (adev->gmc.num_mem_partitions == 3 ||
- adev->gmc.num_mem_partitions == 4);
- break;
- default:
- valid = false;
- }
-
- return valid;
-}
-
-static bool gmc_v9_0_is_node_present(int *node_ids, int num_ids, int nid)
-{
- int i;
-
- /* Check if node with id 'nid' is present in 'node_ids' array */
- for (i = 0; i < num_ids; ++i)
- if (node_ids[i] == nid)
- return true;
-
- return false;
-}
-
-static void
-gmc_v9_0_init_acpi_mem_ranges(struct amdgpu_device *adev,
- struct amdgpu_mem_partition_info *mem_ranges)
-{
- struct amdgpu_numa_info numa_info;
- int node_ids[MAX_MEM_RANGES];
- int num_ranges = 0, ret;
- int num_xcc, xcc_id;
- uint32_t xcc_mask;
-
- num_xcc = NUM_XCC(adev->gfx.xcc_mask);
- xcc_mask = (1U << num_xcc) - 1;
-
- for_each_inst(xcc_id, xcc_mask) {
- ret = amdgpu_acpi_get_mem_info(adev, xcc_id, &numa_info);
- if (ret)
- continue;
-
- if (numa_info.nid == NUMA_NO_NODE) {
- mem_ranges[0].size = numa_info.size;
- mem_ranges[0].numa.node = numa_info.nid;
- num_ranges = 1;
- break;
- }
-
- if (gmc_v9_0_is_node_present(node_ids, num_ranges,
- numa_info.nid))
- continue;
-
- node_ids[num_ranges] = numa_info.nid;
- mem_ranges[num_ranges].numa.node = numa_info.nid;
- mem_ranges[num_ranges].size = numa_info.size;
- ++num_ranges;
- }
-
- adev->gmc.num_mem_partitions = num_ranges;
-}
-
-static void
-gmc_v9_0_init_sw_mem_ranges(struct amdgpu_device *adev,
- struct amdgpu_mem_partition_info *mem_ranges)
-{
- enum amdgpu_memory_partition mode;
- u32 start_addr = 0, size;
- int i, r, l;
-
- mode = gmc_v9_0_query_memory_partition(adev);
-
- switch (mode) {
- case UNKNOWN_MEMORY_PARTITION_MODE:
- adev->gmc.num_mem_partitions = 0;
- break;
- case AMDGPU_NPS1_PARTITION_MODE:
- adev->gmc.num_mem_partitions = 1;
- break;
- case AMDGPU_NPS2_PARTITION_MODE:
- adev->gmc.num_mem_partitions = 2;
- break;
- case AMDGPU_NPS4_PARTITION_MODE:
- if (adev->flags & AMD_IS_APU)
- adev->gmc.num_mem_partitions = 3;
- else
- adev->gmc.num_mem_partitions = 4;
- break;
- default:
- adev->gmc.num_mem_partitions = 1;
- break;
- }
-
- /* Use NPS range info, if populated */
- r = amdgpu_gmc_get_nps_memranges(adev, mem_ranges,
- &adev->gmc.num_mem_partitions);
- if (!r) {
- l = 0;
- for (i = 1; i < adev->gmc.num_mem_partitions; ++i) {
- if (mem_ranges[i].range.lpfn >
- mem_ranges[i - 1].range.lpfn)
- l = i;
- }
-
- } else {
- if (!adev->gmc.num_mem_partitions) {
- dev_err(adev->dev,
- "Not able to detect NPS mode, fall back to NPS1");
- adev->gmc.num_mem_partitions = 1;
- }
- /* Fallback to sw based calculation */
- size = (adev->gmc.real_vram_size + SZ_16M) >> AMDGPU_GPU_PAGE_SHIFT;
- size /= adev->gmc.num_mem_partitions;
-
- for (i = 0; i < adev->gmc.num_mem_partitions; ++i) {
- mem_ranges[i].range.fpfn = start_addr;
- mem_ranges[i].size =
- ((u64)size << AMDGPU_GPU_PAGE_SHIFT);
- mem_ranges[i].range.lpfn = start_addr + size - 1;
- start_addr += size;
- }
-
- l = adev->gmc.num_mem_partitions - 1;
- }
-
- /* Adjust the last one */
- mem_ranges[l].range.lpfn =
- (adev->gmc.real_vram_size >> AMDGPU_GPU_PAGE_SHIFT) - 1;
- mem_ranges[l].size =
- adev->gmc.real_vram_size -
- ((u64)mem_ranges[l].range.fpfn << AMDGPU_GPU_PAGE_SHIFT);
-}
-
-static int gmc_v9_0_init_mem_ranges(struct amdgpu_device *adev)
-{
- bool valid;
-
- adev->gmc.mem_partitions = kcalloc(MAX_MEM_RANGES,
- sizeof(struct amdgpu_mem_partition_info),
- GFP_KERNEL);
- if (!adev->gmc.mem_partitions)
- return -ENOMEM;
-
- /* TODO : Get the range from PSP/Discovery for dGPU */
- if (adev->gmc.is_app_apu)
- gmc_v9_0_init_acpi_mem_ranges(adev, adev->gmc.mem_partitions);
- else
- gmc_v9_0_init_sw_mem_ranges(adev, adev->gmc.mem_partitions);
-
- if (amdgpu_sriov_vf(adev))
- valid = true;
- else
- valid = gmc_v9_0_validate_partition_info(adev);
- if (!valid) {
- /* TODO: handle invalid case */
- dev_WARN(adev->dev,
- "Mem ranges not matching with hardware config");
- }
-
- return 0;
-}
-
static void gmc_v9_4_3_init_vram_info(struct amdgpu_device *adev)
{
+ static const u32 regBIF_BIOS_SCRATCH_4 = 0x50;
+ u32 vram_info;
+
adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM;
adev->gmc.vram_width = 128 * 64;
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0))
adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM3E;
+
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) &&
+ adev->rev_id == 0x3)
+ adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM3E;
+
+ if (!(adev->flags & AMD_IS_APU) && !amdgpu_sriov_vf(adev)) {
+ vram_info = RREG32(regBIF_BIOS_SCRATCH_4);
+ adev->gmc.vram_vendor = vram_info & 0xF;
+ }
}
static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
@@ -2088,7 +1867,7 @@ static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
spin_lock_init(&adev->gmc.invalidate_lock);
- if (gmc_v9_0_is_multi_chiplet(adev)) {
+ if (amdgpu_is_multi_aid(adev)) {
gmc_v9_4_3_init_vram_info(adev);
} else if (!adev->bios) {
if (adev->flags & AMD_IS_APU) {
@@ -2238,8 +2017,8 @@ static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
amdgpu_gmc_get_vbios_allocations(adev);
- if (gmc_v9_0_is_multi_chiplet(adev)) {
- r = gmc_v9_0_init_mem_ranges(adev);
+ if (amdgpu_is_multi_aid(adev)) {
+ r = amdgpu_gmc_init_mem_ranges(adev);
if (r)
return r;
}
@@ -2267,7 +2046,7 @@ static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
adev->vm_manager.first_kfd_vmid =
(amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1) ||
amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) ||
- gmc_v9_0_is_multi_chiplet(adev)) ?
+ amdgpu_is_multi_aid(adev)) ?
3 :
8;
@@ -2279,7 +2058,7 @@ static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- if (gmc_v9_0_is_multi_chiplet(adev))
+ if (amdgpu_is_multi_aid(adev))
amdgpu_gmc_sysfs_init(adev);
return 0;
@@ -2289,7 +2068,7 @@ static int gmc_v9_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
- if (gmc_v9_0_is_multi_chiplet(adev))
+ if (amdgpu_is_multi_aid(adev))
amdgpu_gmc_sysfs_fini(adev);
amdgpu_gmc_ras_fini(adev);
@@ -2363,7 +2142,7 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
{
int r;
- if (adev->gmc.xgmi.connected_to_cpu)
+ if (amdgpu_gmc_is_pdb0_enabled(adev))
amdgpu_gmc_init_pdb0(adev);
if (adev->gart.bo == NULL) {
@@ -2521,7 +2300,7 @@ static int gmc_v9_0_resume(struct amdgpu_ip_block *ip_block)
* information again.
*/
if (adev->gmc.reset_flags & AMDGPU_GMC_INIT_RESET_NPS) {
- gmc_v9_0_init_sw_mem_ranges(adev, adev->gmc.mem_partitions);
+ amdgpu_gmc_init_sw_mem_ranges(adev, adev->gmc.mem_partitions);
adev->gmc.reset_flags &= ~AMDGPU_GMC_INIT_RESET_NPS;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
index 1317ede131b6..01cadf898c00 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
@@ -157,6 +157,9 @@ static int iceland_ih_irq_init(struct amdgpu_device *adev)
/* enable interrupts */
iceland_ih_enable_interrupts(adev);
+ if (adev->irq.ih_soft.ring_size)
+ adev->irq.ih_soft.enabled = true;
+
return 0;
}
@@ -194,6 +197,9 @@ static u32 iceland_ih_get_wptr(struct amdgpu_device *adev,
wptr = le32_to_cpu(*ih->wptr_cpu);
+ if (ih == &adev->irq.ih_soft)
+ goto out;
+
if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
goto out;
@@ -296,6 +302,10 @@ static int iceland_ih_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, IH_SW_RING_SIZE, true);
+ if (r)
+ return r;
+
r = amdgpu_irq_init(adev);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
index 5900b560b7de..333e9c30c091 100644
--- a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
@@ -587,8 +587,7 @@ static int ih_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
/* use gpu virtual address for ih ring
* until ih_checken is programmed to allow
* use bus address for ih ring by psp bl */
- use_bus_addr =
- (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) ? false : true;
+ use_bus_addr = adev->firmware.load_type != AMDGPU_FW_LOAD_PSP;
r = amdgpu_ih_ring_init(adev, &adev->irq.ih, IH_RING_SIZE, use_bus_addr);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c
index 068ed849dbad..95b3f4e55ec3 100644
--- a/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c
@@ -562,8 +562,7 @@ static int ih_v6_1_sw_init(struct amdgpu_ip_block *ip_block)
/* use gpu virtual address for ih ring
* until ih_checken is programmed to allow
* use bus address for ih ring by psp bl */
- use_bus_addr =
- (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) ? false : true;
+ use_bus_addr = adev->firmware.load_type != AMDGPU_FW_LOAD_PSP;
r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 256 * 1024, use_bus_addr);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c
index 40a3530e0453..b32ea4129c61 100644
--- a/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c
@@ -552,8 +552,7 @@ static int ih_v7_0_sw_init(struct amdgpu_ip_block *ip_block)
/* use gpu virtual address for ih ring
* until ih_checken is programmed to allow
* use bus address for ih ring by psp bl */
- use_bus_addr =
- (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) ? false : true;
+ use_bus_addr = adev->firmware.load_type != AMDGPU_FW_LOAD_PSP;
r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 256 * 1024, use_bus_addr);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
index cfa91d709d49..cc626036ed9c 100644
--- a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
@@ -32,6 +32,7 @@
#include "gc/gc_11_0_0_sh_mask.h"
MODULE_FIRMWARE("amdgpu/gc_11_0_0_imu.bin");
+MODULE_FIRMWARE("amdgpu/gc_11_0_0_imu_kicker.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_1_imu.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_2_imu.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_3_imu.bin");
@@ -51,8 +52,12 @@ static int imu_v11_0_init_microcode(struct amdgpu_device *adev)
DRM_DEBUG("\n");
amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
- err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, AMDGPU_UCODE_REQUIRED,
- "amdgpu/%s_imu.bin", ucode_prefix);
+ if (amdgpu_is_kicker_fw(adev))
+ err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_imu_kicker.bin", ucode_prefix);
+ else
+ err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_imu.bin", ucode_prefix);
if (err)
goto out;
diff --git a/drivers/gpu/drm/amd/amdgpu/imu_v12_0.c b/drivers/gpu/drm/amd/amdgpu/imu_v12_0.c
index df898dbb746e..58cd87db8061 100644
--- a/drivers/gpu/drm/amd/amdgpu/imu_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/imu_v12_0.c
@@ -34,12 +34,13 @@
MODULE_FIRMWARE("amdgpu/gc_12_0_0_imu.bin");
MODULE_FIRMWARE("amdgpu/gc_12_0_1_imu.bin");
+MODULE_FIRMWARE("amdgpu/gc_12_0_1_imu_kicker.bin");
#define TRANSFER_RAM_MASK 0x001c0000
static int imu_v12_0_init_microcode(struct amdgpu_device *adev)
{
- char ucode_prefix[15];
+ char ucode_prefix[30];
int err;
const struct imu_firmware_header_v1_0 *imu_hdr;
struct amdgpu_firmware_info *info = NULL;
@@ -47,8 +48,12 @@ static int imu_v12_0_init_microcode(struct amdgpu_device *adev)
DRM_DEBUG("\n");
amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
- err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, AMDGPU_UCODE_REQUIRED,
- "amdgpu/%s_imu.bin", ucode_prefix);
+ if (amdgpu_is_kicker_fw(adev))
+ err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_imu_kicker.bin", ucode_prefix);
+ else
+ err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_imu.bin", ucode_prefix);
if (err)
goto out;
@@ -362,7 +367,7 @@ static void program_imu_rlc_ram(struct amdgpu_device *adev,
static void imu_v12_0_program_rlc_ram(struct amdgpu_device *adev)
{
u32 reg_data, size = 0;
- const u32 *data;
+ const u32 *data = NULL;
int r = -EINVAL;
WREG32_SOC15(GC, 0, regGFX_IMU_RLC_RAM_INDEX, 0x2);
diff --git a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c
index 574880d67009..4258d3e0b706 100644
--- a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c
@@ -29,6 +29,14 @@
#include "amdgpu.h"
#include "isp_v4_1_1.h"
+MODULE_FIRMWARE("amdgpu/isp_4_1_1.bin");
+
+#define ISP_PERFORMANCE_STATE_LOW 0
+#define ISP_PERFORMANCE_STATE_HIGH 1
+
+#define ISP_HIGH_PERFORMANC_XCLK 788
+#define ISP_HIGH_PERFORMANC_ICLK 788
+
static const unsigned int isp_4_1_1_int_srcid[MAX_ISP411_INT_SRC] = {
ISP_4_1__SRCID__ISP_RINGBUFFER_WPT9,
ISP_4_1__SRCID__ISP_RINGBUFFER_WPT10,
@@ -56,17 +64,137 @@ static struct gpiod_lookup_table isp_sensor_gpio_table = {
},
};
+static int isp_poweroff(struct generic_pm_domain *genpd)
+{
+ struct amdgpu_isp *isp = container_of(genpd, struct amdgpu_isp, ispgpd);
+ struct amdgpu_device *adev = isp->adev;
+
+ return amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ISP, true, 0);
+}
+
+static int isp_poweron(struct generic_pm_domain *genpd)
+{
+ struct amdgpu_isp *isp = container_of(genpd, struct amdgpu_isp, ispgpd);
+ struct amdgpu_device *adev = isp->adev;
+
+ return amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ISP, false, 0);
+}
+
+static int isp_set_performance_state(struct generic_pm_domain *genpd,
+ unsigned int state)
+{
+ struct amdgpu_isp *isp = container_of(genpd, struct amdgpu_isp, ispgpd);
+ struct amdgpu_device *adev = isp->adev;
+ u32 iclk, xclk;
+ int ret;
+
+ switch (state) {
+ case ISP_PERFORMANCE_STATE_HIGH:
+ xclk = ISP_HIGH_PERFORMANC_XCLK;
+ iclk = ISP_HIGH_PERFORMANC_ICLK;
+ break;
+ case ISP_PERFORMANCE_STATE_LOW:
+ /* isp runs at default lowest clock-rate on power-on, do nothing */
+ return 0;
+ default:
+ return -EINVAL;
+ }
+
+ ret = amdgpu_dpm_set_soft_freq_range(adev, PP_ISPXCLK, xclk, 0);
+ if (ret) {
+ drm_err(&adev->ddev, "failed to set xclk %u to %u: %d\n",
+ xclk, state, ret);
+ return ret;
+ }
+
+ ret = amdgpu_dpm_set_soft_freq_range(adev, PP_ISPICLK, iclk, 0);
+ if (ret) {
+ drm_err(&adev->ddev, "failed to set iclk %u to %u: %d\n",
+ iclk, state, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int isp_genpd_add_device(struct device *dev, void *data)
+{
+ struct generic_pm_domain *gpd = data;
+ struct platform_device *pdev = container_of(dev, struct platform_device, dev);
+ struct amdgpu_isp *isp = container_of(gpd, struct amdgpu_isp, ispgpd);
+ struct amdgpu_device *adev = isp->adev;
+ int ret;
+
+ if (!pdev)
+ return -EINVAL;
+
+ if (!dev->type->name) {
+ drm_dbg(&adev->ddev, "Invalid device type to add\n");
+ goto exit;
+ }
+
+ if (strcmp(dev->type->name, "mfd_device")) {
+ drm_dbg(&adev->ddev, "Invalid isp mfd device %s to add\n", pdev->mfd_cell->name);
+ goto exit;
+ }
+
+ ret = pm_genpd_add_device(gpd, dev);
+ if (ret) {
+ drm_err(&adev->ddev, "Failed to add dev %s to genpd %d\n",
+ pdev->mfd_cell->name, ret);
+ return -ENODEV;
+ }
+
+exit:
+ /* Continue to add */
+ return 0;
+}
+
+static int isp_genpd_remove_device(struct device *dev, void *data)
+{
+ struct generic_pm_domain *gpd = data;
+ struct platform_device *pdev = container_of(dev, struct platform_device, dev);
+ struct amdgpu_isp *isp = container_of(gpd, struct amdgpu_isp, ispgpd);
+ struct amdgpu_device *adev = isp->adev;
+ int ret;
+
+ if (!pdev)
+ return -EINVAL;
+
+ if (!dev->type->name) {
+ drm_dbg(&adev->ddev, "Invalid device type to remove\n");
+ goto exit;
+ }
+
+ if (strcmp(dev->type->name, "mfd_device")) {
+ drm_dbg(&adev->ddev, "Invalid isp mfd device %s to remove\n",
+ pdev->mfd_cell->name);
+ goto exit;
+ }
+
+ ret = pm_genpd_remove_device(dev);
+ if (ret) {
+ drm_err(&adev->ddev, "Failed to remove dev from genpd %d\n", ret);
+ return -ENODEV;
+ }
+
+exit:
+ /* Continue to remove */
+ return 0;
+}
+
static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp)
{
+ const struct software_node *amd_camera_node, *isp4_node;
struct amdgpu_device *adev = isp->adev;
+ struct acpi_device *acpi_dev;
int idx, int_idx, num_res, r;
- u8 isp_dev_hid[ACPI_ID_LEN];
u64 isp_base;
if (adev->rmmio_size == 0 || adev->rmmio_size < 0x5289)
return -EINVAL;
- r = amdgpu_acpi_get_isp4_dev_hid(&isp_dev_hid);
+ r = amdgpu_acpi_get_isp4_dev(&acpi_dev);
if (r) {
drm_dbg(&adev->ddev, "Invalid isp platform detected (%d)", r);
/* allow GPU init to progress */
@@ -74,18 +202,28 @@ static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp)
}
/* add GPIO resources required for OMNI5C10 sensor */
- if (!strcmp("OMNI5C10", isp_dev_hid)) {
+ if (!strcmp("OMNI5C10", acpi_device_hid(acpi_dev))) {
gpiod_add_lookup_table(&isp_gpio_table);
gpiod_add_lookup_table(&isp_sensor_gpio_table);
}
isp_base = adev->rmmio_base;
+ isp->ispgpd.name = "ISP_v_4_1_1";
+ isp->ispgpd.power_off = isp_poweroff;
+ isp->ispgpd.power_on = isp_poweron;
+ isp->ispgpd.set_performance_state = isp_set_performance_state;
+
+ r = pm_genpd_init(&isp->ispgpd, NULL, true);
+ if (r) {
+ drm_err(&adev->ddev, "failed to initialize genpd (%d)\n", r);
+ return -EINVAL;
+ }
+
isp->isp_cell = kcalloc(3, sizeof(struct mfd_cell), GFP_KERNEL);
if (!isp->isp_cell) {
r = -ENOMEM;
- drm_err(&adev->ddev,
- "%s: isp mfd cell alloc failed\n", __func__);
+ drm_err(&adev->ddev, "isp mfd cell alloc failed (%d)\n", r);
goto failure;
}
@@ -95,19 +233,20 @@ static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp)
GFP_KERNEL);
if (!isp->isp_res) {
r = -ENOMEM;
- drm_err(&adev->ddev,
- "%s: isp mfd res alloc failed\n", __func__);
+ drm_err(&adev->ddev, "isp mfd resource alloc failed (%d)\n", r);
goto failure;
}
isp->isp_pdata = kzalloc(sizeof(*isp->isp_pdata), GFP_KERNEL);
if (!isp->isp_pdata) {
r = -ENOMEM;
- drm_err(&adev->ddev,
- "%s: isp platform data alloc failed\n", __func__);
+ drm_err(&adev->ddev, "isp platform data alloc failed (%d)\n", r);
goto failure;
}
+ amd_camera_node = (const struct software_node *)acpi_dev->driver_data;
+ isp4_node = software_node_find_by_name(amd_camera_node, "isp4");
+
/* initialize isp platform data */
isp->isp_pdata->adev = (void *)adev;
isp->isp_pdata->asic_type = adev->asic_type;
@@ -136,14 +275,14 @@ static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp)
isp->isp_cell[0].num_resources = num_res;
isp->isp_cell[0].resources = &isp->isp_res[0];
isp->isp_cell[0].platform_data = isp->isp_pdata;
+ isp->isp_cell[0].swnode = isp4_node;
isp->isp_cell[0].pdata_size = sizeof(struct isp_platform_data);
/* initialize isp i2c platform data */
isp->isp_i2c_res = kcalloc(1, sizeof(struct resource), GFP_KERNEL);
if (!isp->isp_i2c_res) {
r = -ENOMEM;
- drm_err(&adev->ddev,
- "%s: isp mfd res alloc failed\n", __func__);
+ drm_err(&adev->ddev, "isp mfd res alloc failed (%d)\n", r);
goto failure;
}
@@ -162,8 +301,7 @@ static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp)
isp->isp_gpio_res = kcalloc(1, sizeof(struct resource), GFP_KERNEL);
if (!isp->isp_gpio_res) {
r = -ENOMEM;
- drm_err(&adev->ddev,
- "%s: isp gpio res alloc failed\n", __func__);
+ drm_err(&adev->ddev, "isp gpio resource alloc failed (%d)\n", r);
goto failure;
}
@@ -179,10 +317,23 @@ static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp)
isp->isp_cell[2].platform_data = isp->isp_pdata;
isp->isp_cell[2].pdata_size = sizeof(struct isp_platform_data);
- r = mfd_add_hotplug_devices(isp->parent, isp->isp_cell, 3);
+ /* add only amd_isp_capture and amd_isp_i2c_designware to genpd */
+ r = mfd_add_hotplug_devices(isp->parent, isp->isp_cell, 2);
if (r) {
- drm_err(&adev->ddev,
- "%s: add mfd hotplug device failed\n", __func__);
+ drm_err(&adev->ddev, "add mfd hotplug device failed (%d)\n", r);
+ goto failure;
+ }
+
+ r = device_for_each_child(isp->parent, &isp->ispgpd,
+ isp_genpd_add_device);
+ if (r) {
+ drm_err(&adev->ddev, "failed to add devices to genpd (%d)\n", r);
+ goto failure;
+ }
+
+ r = mfd_add_hotplug_devices(isp->parent, &isp->isp_cell[2], 1);
+ if (r) {
+ drm_err(&adev->ddev, "add pinctl hotplug device failed (%d)\n", r);
goto failure;
}
@@ -201,6 +352,9 @@ failure:
static int isp_v4_1_1_hw_fini(struct amdgpu_isp *isp)
{
+ device_for_each_child(isp->parent, NULL,
+ isp_genpd_remove_device);
+
mfd_remove_devices(isp->parent);
kfree(isp->isp_res);
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
index 9e428e669ada..b5bb7f4d607c 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
@@ -557,7 +557,7 @@ static const struct amdgpu_ring_funcs jpeg_v1_0_decode_ring_vm_funcs = {
.nop = PACKET0(0x81ff, 0),
.support_64bit_ptrs = false,
.no_user_fence = true,
- .extra_dw = 64,
+ .extra_bytes = 256,
.get_rptr = jpeg_v1_0_decode_ring_get_rptr,
.get_wptr = jpeg_v1_0_decode_ring_get_wptr,
.set_wptr = jpeg_v1_0_decode_ring_set_wptr,
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
index 4cde8a8bcc83..27c76bd424cf 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
@@ -23,7 +23,6 @@
#include "amdgpu.h"
#include "amdgpu_jpeg.h"
-#include "amdgpu_cs.h"
#include "amdgpu_pm.h"
#include "soc15.h"
#include "soc15d.h"
@@ -118,7 +117,10 @@ static int jpeg_v2_0_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- adev->jpeg.supported_reset = AMDGPU_RESET_TYPE_PER_QUEUE;
+ adev->jpeg.supported_reset =
+ amdgpu_get_soft_full_reset_mask(adev->jpeg.inst[0].ring_dec);
+ if (!amdgpu_sriov_vf(adev))
+ adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
return r;
@@ -764,11 +766,20 @@ static int jpeg_v2_0_process_interrupt(struct amdgpu_device *adev,
return 0;
}
-static int jpeg_v2_0_ring_reset(struct amdgpu_ring *ring, unsigned int vmid)
+static int jpeg_v2_0_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
- jpeg_v2_0_stop(ring->adev);
- jpeg_v2_0_start(ring->adev);
- return amdgpu_ring_test_helper(ring);
+ int r;
+
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+ r = jpeg_v2_0_stop(ring->adev);
+ if (r)
+ return r;
+ r = jpeg_v2_0_start(ring->adev);
+ if (r)
+ return r;
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static const struct amd_ip_funcs jpeg_v2_0_ip_funcs = {
@@ -794,7 +805,7 @@ static const struct amdgpu_ring_funcs jpeg_v2_0_dec_ring_vm_funcs = {
.get_rptr = jpeg_v2_0_dec_ring_get_rptr,
.get_wptr = jpeg_v2_0_dec_ring_get_wptr,
.set_wptr = jpeg_v2_0_dec_ring_set_wptr,
- .parse_cs = jpeg_v2_dec_ring_parse_cs,
+ .parse_cs = amdgpu_jpeg_dec_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
@@ -842,58 +853,3 @@ const struct amdgpu_ip_block_version jpeg_v2_0_ip_block = {
.rev = 0,
.funcs = &jpeg_v2_0_ip_funcs,
};
-
-/**
- * jpeg_v2_dec_ring_parse_cs - command submission parser
- *
- * @parser: Command submission parser context
- * @job: the job to parse
- * @ib: the IB to parse
- *
- * Parse the command stream, return -EINVAL for invalid packet,
- * 0 otherwise
- */
-int jpeg_v2_dec_ring_parse_cs(struct amdgpu_cs_parser *parser,
- struct amdgpu_job *job,
- struct amdgpu_ib *ib)
-{
- u32 i, reg, res, cond, type;
- struct amdgpu_device *adev = parser->adev;
-
- for (i = 0; i < ib->length_dw ; i += 2) {
- reg = CP_PACKETJ_GET_REG(ib->ptr[i]);
- res = CP_PACKETJ_GET_RES(ib->ptr[i]);
- cond = CP_PACKETJ_GET_COND(ib->ptr[i]);
- type = CP_PACKETJ_GET_TYPE(ib->ptr[i]);
-
- if (res) /* only support 0 at the moment */
- return -EINVAL;
-
- switch (type) {
- case PACKETJ_TYPE0:
- if (cond != PACKETJ_CONDITION_CHECK0 || reg < JPEG_REG_RANGE_START ||
- reg > JPEG_REG_RANGE_END) {
- dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
- return -EINVAL;
- }
- break;
- case PACKETJ_TYPE3:
- if (cond != PACKETJ_CONDITION_CHECK3 || reg < JPEG_REG_RANGE_START ||
- reg > JPEG_REG_RANGE_END) {
- dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
- return -EINVAL;
- }
- break;
- case PACKETJ_TYPE6:
- if (ib->ptr[i] == CP_PACKETJ_NOP)
- continue;
- dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
- return -EINVAL;
- default:
- dev_err(adev->dev, "Unknown packet type %d !\n", type);
- return -EINVAL;
- }
- }
-
- return 0;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h
index 63fadda7a673..654e43e83e2c 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h
@@ -45,9 +45,6 @@
#define JRBC_DEC_EXTERNAL_REG_WRITE_ADDR 0x18000
-#define JPEG_REG_RANGE_START 0x4000
-#define JPEG_REG_RANGE_END 0x41c2
-
void jpeg_v2_0_dec_ring_insert_start(struct amdgpu_ring *ring);
void jpeg_v2_0_dec_ring_insert_end(struct amdgpu_ring *ring);
void jpeg_v2_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
@@ -60,9 +57,6 @@ void jpeg_v2_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr);
void jpeg_v2_0_dec_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
void jpeg_v2_0_dec_ring_nop(struct amdgpu_ring *ring, uint32_t count);
-int jpeg_v2_dec_ring_parse_cs(struct amdgpu_cs_parser *parser,
- struct amdgpu_job *job,
- struct amdgpu_ib *ib);
extern const struct amdgpu_ip_block_version jpeg_v2_0_ip_block;
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
index 8b39e114f3be..20983f126b49 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
@@ -167,7 +167,10 @@ static int jpeg_v2_5_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- adev->jpeg.supported_reset = AMDGPU_RESET_TYPE_PER_QUEUE;
+ adev->jpeg.supported_reset =
+ amdgpu_get_soft_full_reset_mask(adev->jpeg.inst[0].ring_dec);
+ if (!amdgpu_sriov_vf(adev))
+ adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
return r;
@@ -643,11 +646,14 @@ static int jpeg_v2_5_process_interrupt(struct amdgpu_device *adev,
return 0;
}
-static int jpeg_v2_5_ring_reset(struct amdgpu_ring *ring, unsigned int vmid)
+static int jpeg_v2_5_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
jpeg_v2_5_stop_inst(ring->adev, ring->me);
jpeg_v2_5_start_inst(ring->adev, ring->me);
- return amdgpu_ring_test_helper(ring);
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static const struct amd_ip_funcs jpeg_v2_5_ip_funcs = {
@@ -690,7 +696,7 @@ static const struct amdgpu_ring_funcs jpeg_v2_5_dec_ring_vm_funcs = {
.get_rptr = jpeg_v2_5_dec_ring_get_rptr,
.get_wptr = jpeg_v2_5_dec_ring_get_wptr,
.set_wptr = jpeg_v2_5_dec_ring_set_wptr,
- .parse_cs = jpeg_v2_dec_ring_parse_cs,
+ .parse_cs = amdgpu_jpeg_dec_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
@@ -721,7 +727,7 @@ static const struct amdgpu_ring_funcs jpeg_v2_6_dec_ring_vm_funcs = {
.get_rptr = jpeg_v2_5_dec_ring_get_rptr,
.get_wptr = jpeg_v2_5_dec_ring_get_wptr,
.set_wptr = jpeg_v2_5_dec_ring_set_wptr,
- .parse_cs = jpeg_v2_dec_ring_parse_cs,
+ .parse_cs = amdgpu_jpeg_dec_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
index 2f8510c2986b..d1a011c40ba2 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
@@ -132,7 +132,10 @@ static int jpeg_v3_0_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- adev->jpeg.supported_reset = AMDGPU_RESET_TYPE_PER_QUEUE;
+ adev->jpeg.supported_reset =
+ amdgpu_get_soft_full_reset_mask(adev->jpeg.inst[0].ring_dec);
+ if (!amdgpu_sriov_vf(adev))
+ adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
return r;
@@ -555,11 +558,20 @@ static int jpeg_v3_0_process_interrupt(struct amdgpu_device *adev,
return 0;
}
-static int jpeg_v3_0_ring_reset(struct amdgpu_ring *ring, unsigned int vmid)
+static int jpeg_v3_0_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
- jpeg_v3_0_stop(ring->adev);
- jpeg_v3_0_start(ring->adev);
- return amdgpu_ring_test_helper(ring);
+ int r;
+
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+ r = jpeg_v3_0_stop(ring->adev);
+ if (r)
+ return r;
+ r = jpeg_v3_0_start(ring->adev);
+ if (r)
+ return r;
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static const struct amd_ip_funcs jpeg_v3_0_ip_funcs = {
@@ -585,7 +597,7 @@ static const struct amdgpu_ring_funcs jpeg_v3_0_dec_ring_vm_funcs = {
.get_rptr = jpeg_v3_0_dec_ring_get_rptr,
.get_wptr = jpeg_v3_0_dec_ring_get_wptr,
.set_wptr = jpeg_v3_0_dec_ring_set_wptr,
- .parse_cs = jpeg_v2_dec_ring_parse_cs,
+ .parse_cs = amdgpu_jpeg_dec_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
index f17ec5414fd6..33db2c1ae6cc 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
@@ -143,7 +143,10 @@ static int jpeg_v4_0_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- adev->jpeg.supported_reset = AMDGPU_RESET_TYPE_PER_QUEUE;
+ adev->jpeg.supported_reset =
+ amdgpu_get_soft_full_reset_mask(adev->jpeg.inst[0].ring_dec);
+ if (!amdgpu_sriov_vf(adev))
+ adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
return r;
@@ -720,14 +723,20 @@ static int jpeg_v4_0_process_interrupt(struct amdgpu_device *adev,
return 0;
}
-static int jpeg_v4_0_ring_reset(struct amdgpu_ring *ring, unsigned int vmid)
+static int jpeg_v4_0_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
- if (amdgpu_sriov_vf(ring->adev))
- return -EINVAL;
+ int r;
- jpeg_v4_0_stop(ring->adev);
- jpeg_v4_0_start(ring->adev);
- return amdgpu_ring_test_helper(ring);
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+ r = jpeg_v4_0_stop(ring->adev);
+ if (r)
+ return r;
+ r = jpeg_v4_0_start(ring->adev);
+ if (r)
+ return r;
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static const struct amd_ip_funcs jpeg_v4_0_ip_funcs = {
@@ -753,7 +762,7 @@ static const struct amdgpu_ring_funcs jpeg_v4_0_dec_ring_vm_funcs = {
.get_rptr = jpeg_v4_0_dec_ring_get_rptr,
.get_wptr = jpeg_v4_0_dec_ring_get_wptr,
.set_wptr = jpeg_v4_0_dec_ring_set_wptr,
- .parse_cs = jpeg_v2_dec_ring_parse_cs,
+ .parse_cs = amdgpu_jpeg_dec_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
index 79e342d5ab28..aae7328973d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
@@ -216,12 +216,11 @@ static int jpeg_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- if (!amdgpu_sriov_vf(adev)) {
- adev->jpeg.supported_reset = AMDGPU_RESET_TYPE_PER_QUEUE;
- r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
- if (r)
- return r;
- }
+ adev->jpeg.supported_reset =
+ amdgpu_get_soft_full_reset_mask(adev->jpeg.inst[0].ring_dec);
+ if (!amdgpu_sriov_vf(adev))
+ adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
return 0;
}
@@ -242,8 +241,7 @@ static int jpeg_v4_0_3_sw_fini(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- if (!amdgpu_sriov_vf(adev))
- amdgpu_jpeg_sysfs_reset_mask_fini(adev);
+ amdgpu_jpeg_sysfs_reset_mask_fini(adev);
r = amdgpu_jpeg_sw_fini(adev);
@@ -446,7 +444,7 @@ static int jpeg_v4_0_3_hw_fini(struct amdgpu_ip_block *ip_block)
ret = jpeg_v4_0_3_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
}
- if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG))
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG) && !amdgpu_sriov_vf(adev))
amdgpu_irq_put(adev, &adev->jpeg.inst->ras_poison_irq, 0);
return ret;
@@ -1143,14 +1141,17 @@ static void jpeg_v4_0_3_core_stall_reset(struct amdgpu_ring *ring)
WREG32_SOC15(JPEG, jpeg_inst, regJPEG_CORE_RST_CTRL, 0x00);
}
-static int jpeg_v4_0_3_ring_reset(struct amdgpu_ring *ring, unsigned int vmid)
+static int jpeg_v4_0_3_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
if (amdgpu_sriov_vf(ring->adev))
return -EOPNOTSUPP;
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
jpeg_v4_0_3_core_stall_reset(ring);
jpeg_v4_0_3_start_jrbc(ring);
- return amdgpu_ring_test_helper(ring);
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static const struct amd_ip_funcs jpeg_v4_0_3_ip_funcs = {
@@ -1176,7 +1177,7 @@ static const struct amdgpu_ring_funcs jpeg_v4_0_3_dec_ring_vm_funcs = {
.get_rptr = jpeg_v4_0_3_dec_ring_get_rptr,
.get_wptr = jpeg_v4_0_3_dec_ring_get_wptr,
.set_wptr = jpeg_v4_0_3_dec_ring_set_wptr,
- .parse_cs = jpeg_v2_dec_ring_parse_cs,
+ .parse_cs = amdgpu_jpeg_dec_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
index 974030a5c03c..54fd9c800c40 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
@@ -174,9 +174,10 @@ static int jpeg_v4_0_5_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- /* TODO: Add queue reset mask when FW fully supports it */
adev->jpeg.supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->jpeg.inst[0].ring_dec[0]);
+ if (!amdgpu_sriov_vf(adev))
+ adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
if (r)
return r;
@@ -685,7 +686,7 @@ static int jpeg_v4_0_5_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = ip_block->adev;
- bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+ bool enable = state == AMD_CG_STATE_GATE;
int i;
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
@@ -767,6 +768,22 @@ static int jpeg_v4_0_5_process_interrupt(struct amdgpu_device *adev,
return 0;
}
+static int jpeg_v4_0_5_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
+{
+ int r;
+
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+ r = jpeg_v4_0_5_stop(ring->adev);
+ if (r)
+ return r;
+ r = jpeg_v4_0_5_start(ring->adev);
+ if (r)
+ return r;
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
+}
+
static const struct amd_ip_funcs jpeg_v4_0_5_ip_funcs = {
.name = "jpeg_v4_0_5",
.early_init = jpeg_v4_0_5_early_init,
@@ -790,7 +807,7 @@ static const struct amdgpu_ring_funcs jpeg_v4_0_5_dec_ring_vm_funcs = {
.get_rptr = jpeg_v4_0_5_dec_ring_get_rptr,
.get_wptr = jpeg_v4_0_5_dec_ring_get_wptr,
.set_wptr = jpeg_v4_0_5_dec_ring_set_wptr,
- .parse_cs = jpeg_v2_dec_ring_parse_cs,
+ .parse_cs = amdgpu_jpeg_dec_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
@@ -812,6 +829,7 @@ static const struct amdgpu_ring_funcs jpeg_v4_0_5_dec_ring_vm_funcs = {
.emit_wreg = jpeg_v2_0_dec_ring_emit_wreg,
.emit_reg_wait = jpeg_v2_0_dec_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = jpeg_v4_0_5_ring_reset,
};
static void jpeg_v4_0_5_set_dec_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
index 31d213ccbe0a..46bf15dce2bd 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
@@ -120,13 +120,13 @@ static int jpeg_v5_0_0_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- /* TODO: Add queue reset mask when FW fully supports it */
adev->jpeg.supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->jpeg.inst[0].ring_dec[0]);
+ if (!amdgpu_sriov_vf(adev))
+ adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
- if (r)
- return r;
- return 0;
+
+ return r;
}
/**
@@ -584,7 +584,7 @@ static int jpeg_v5_0_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = ip_block->adev;
- bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+ bool enable = state == AMD_CG_STATE_GATE;
if (enable) {
if (!jpeg_v5_0_0_is_idle(ip_block))
@@ -644,6 +644,22 @@ static int jpeg_v5_0_0_process_interrupt(struct amdgpu_device *adev,
return 0;
}
+static int jpeg_v5_0_0_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
+{
+ int r;
+
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+ r = jpeg_v5_0_0_stop(ring->adev);
+ if (r)
+ return r;
+ r = jpeg_v5_0_0_start(ring->adev);
+ if (r)
+ return r;
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
+}
+
static const struct amd_ip_funcs jpeg_v5_0_0_ip_funcs = {
.name = "jpeg_v5_0_0",
.early_init = jpeg_v5_0_0_early_init,
@@ -667,7 +683,7 @@ static const struct amdgpu_ring_funcs jpeg_v5_0_0_dec_ring_vm_funcs = {
.get_rptr = jpeg_v5_0_0_dec_ring_get_rptr,
.get_wptr = jpeg_v5_0_0_dec_ring_get_wptr,
.set_wptr = jpeg_v5_0_0_dec_ring_set_wptr,
- .parse_cs = jpeg_v2_dec_ring_parse_cs,
+ .parse_cs = amdgpu_jpeg_dec_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
@@ -689,6 +705,7 @@ static const struct amdgpu_ring_funcs jpeg_v5_0_0_dec_ring_vm_funcs = {
.emit_wreg = jpeg_v4_0_3_dec_ring_emit_wreg,
.emit_reg_wait = jpeg_v4_0_3_dec_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = jpeg_v5_0_0_ring_reset,
};
static void jpeg_v5_0_0_set_dec_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
index 3b6f65a25646..ab0bf880d3d8 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
@@ -196,18 +196,25 @@ static int jpeg_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block)
}
}
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG)) {
+ r = amdgpu_jpeg_ras_sw_init(adev);
+ if (r) {
+ dev_err(adev->dev, "Failed to initialize jpeg ras block!\n");
+ return r;
+ }
+ }
+
r = amdgpu_jpeg_reg_dump_init(adev, jpeg_reg_list_5_0_1, ARRAY_SIZE(jpeg_reg_list_5_0_1));
if (r)
return r;
- if (!amdgpu_sriov_vf(adev)) {
- adev->jpeg.supported_reset = AMDGPU_RESET_TYPE_PER_QUEUE;
- r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
- if (r)
- return r;
- }
+ adev->jpeg.supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->jpeg.inst[0].ring_dec[0]);
+ if (!amdgpu_sriov_vf(adev))
+ adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
- return 0;
+ return r;
}
/**
@@ -226,8 +233,7 @@ static int jpeg_v5_0_1_sw_fini(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- if (!amdgpu_sriov_vf(adev))
- amdgpu_jpeg_sysfs_reset_mask_fini(adev);
+ amdgpu_jpeg_sysfs_reset_mask_fini(adev);
r = amdgpu_jpeg_sw_fini(adev);
@@ -309,7 +315,7 @@ static int jpeg_v5_0_1_hw_fini(struct amdgpu_ip_block *ip_block)
ret = jpeg_v5_0_1_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
}
- if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG))
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG) && !amdgpu_sriov_vf(adev))
amdgpu_irq_put(adev, &adev->jpeg.inst->ras_poison_irq, 0);
return ret;
@@ -691,7 +697,7 @@ static int jpeg_v5_0_1_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = ip_block->adev;
- bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+ bool enable = state == AMD_CG_STATE_GATE;
int i;
@@ -834,14 +840,14 @@ static void jpeg_v5_0_1_core_stall_reset(struct amdgpu_ring *ring)
WREG32_SOC15(JPEG, jpeg_inst, regJPEG_CORE_RST_CTRL, 0x00);
}
-static int jpeg_v5_0_1_ring_reset(struct amdgpu_ring *ring, unsigned int vmid)
+static int jpeg_v5_0_1_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
- if (amdgpu_sriov_vf(ring->adev))
- return -EOPNOTSUPP;
-
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
jpeg_v5_0_1_core_stall_reset(ring);
jpeg_v5_0_1_init_jrbc(ring);
- return amdgpu_ring_test_helper(ring);
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static const struct amd_ip_funcs jpeg_v5_0_1_ip_funcs = {
@@ -872,6 +878,7 @@ static const struct amdgpu_ring_funcs jpeg_v5_0_1_dec_ring_vm_funcs = {
.get_rptr = jpeg_v5_0_1_dec_ring_get_rptr,
.get_wptr = jpeg_v5_0_1_dec_ring_get_wptr,
.set_wptr = jpeg_v5_0_1_dec_ring_set_wptr,
+ .parse_cs = amdgpu_jpeg_dec_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
@@ -1018,8 +1025,9 @@ static int jpeg_v5_0_1_aca_bank_parser(struct aca_handle *handle, struct aca_ban
/* reference to smu driver if header file */
static int jpeg_v5_0_1_err_codes[] = {
- 16, 17, 18, 19, 20, 21, 22, 23, /* JPEG[0-7][S|D] */
- 24, 25, 26, 27, 28, 29, 30, 31
+ 16, 17, 18, 19, 20, 21, 22, 23, /* JPEG[0-9][S|D] */
+ 24, 25, 26, 27, 28, 29, 30, 31,
+ 48, 49, 50, 51,
};
static bool jpeg_v5_0_1_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank,
@@ -1060,6 +1068,11 @@ static int jpeg_v5_0_1_ras_late_init(struct amdgpu_device *adev, struct ras_comm
if (r)
return r;
+ r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__JPEG,
+ &jpeg_v5_0_1_aca_info, NULL);
+ if (r)
+ goto late_fini;
+
if (amdgpu_ras_is_supported(adev, ras_block->block) &&
adev->jpeg.inst->ras_poison_irq.funcs) {
r = amdgpu_irq_get(adev, &adev->jpeg.inst->ras_poison_irq, 0);
@@ -1067,11 +1080,6 @@ static int jpeg_v5_0_1_ras_late_init(struct amdgpu_device *adev, struct ras_comm
goto late_fini;
}
- r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__JPEG,
- &jpeg_v5_0_1_aca_info, NULL);
- if (r)
- goto late_fini;
-
return 0;
late_fini:
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c
index d6f50b13e2ba..64cae89357b6 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c
@@ -21,6 +21,7 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
+#include <drm/drm_drv.h>
#include "amdgpu.h"
#include "amdgpu_gfx.h"
#include "mes_userqueue.h"
@@ -198,6 +199,58 @@ static int mes_userq_create_ctx_space(struct amdgpu_userq_mgr *uq_mgr,
return 0;
}
+static int mes_userq_detect_and_reset(struct amdgpu_device *adev,
+ int queue_type)
+{
+ int db_array_size = amdgpu_mes_get_hung_queue_db_array_size(adev);
+ struct mes_detect_and_reset_queue_input input;
+ struct amdgpu_usermode_queue *queue;
+ unsigned int hung_db_num = 0;
+ unsigned long queue_id;
+ u32 db_array[8];
+ bool found_hung_queue = false;
+ int r, i;
+
+ if (db_array_size > 8) {
+ dev_err(adev->dev, "DB array size (%d vs 8) too small\n",
+ db_array_size);
+ return -EINVAL;
+ }
+
+ memset(&input, 0x0, sizeof(struct mes_detect_and_reset_queue_input));
+
+ input.queue_type = queue_type;
+
+ amdgpu_mes_lock(&adev->mes);
+ r = amdgpu_mes_detect_and_reset_hung_queues(adev, queue_type, false,
+ &hung_db_num, db_array);
+ amdgpu_mes_unlock(&adev->mes);
+ if (r) {
+ dev_err(adev->dev, "Failed to detect and reset queues, err (%d)\n", r);
+ } else if (hung_db_num) {
+ xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
+ if (queue->queue_type == queue_type) {
+ for (i = 0; i < hung_db_num; i++) {
+ if (queue->doorbell_index == db_array[i]) {
+ queue->state = AMDGPU_USERQ_STATE_HUNG;
+ found_hung_queue = true;
+ atomic_inc(&adev->gpu_reset_counter);
+ amdgpu_userq_fence_driver_force_completion(queue);
+ drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE, NULL);
+ }
+ }
+ }
+ }
+ }
+
+ if (found_hung_queue) {
+ /* Resume scheduling after hang recovery */
+ r = amdgpu_mes_resume(adev);
+ }
+
+ return r;
+}
+
static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
struct drm_amdgpu_userq_in *args_in,
struct amdgpu_usermode_queue *queue)
@@ -215,13 +268,6 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
return -ENOMEM;
}
- if (!mqd_user->wptr_va || !mqd_user->rptr_va ||
- !mqd_user->queue_va || mqd_user->queue_size == 0) {
- DRM_ERROR("Invalid MQD parameters for userqueue\n");
- r = -EINVAL;
- goto free_props;
- }
-
r = amdgpu_userq_create_object(uq_mgr, &queue->mqd, mqd_hw_default->mqd_size);
if (r) {
DRM_ERROR("Failed to create MQD object for userqueue\n");
@@ -254,6 +300,11 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
goto free_mqd;
}
+ r = amdgpu_userq_input_va_validate(queue, compute_mqd->eop_va,
+ 2048);
+ if (r)
+ goto free_mqd;
+
userq_props->eop_gpu_addr = compute_mqd->eop_va;
userq_props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL;
userq_props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM;
@@ -263,6 +314,14 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
kfree(compute_mqd);
} else if (queue->queue_type == AMDGPU_HW_IP_GFX) {
struct drm_amdgpu_userq_mqd_gfx11 *mqd_gfx_v11;
+ struct amdgpu_gfx_shadow_info shadow_info;
+
+ if (adev->gfx.funcs->get_gfx_shadow_info) {
+ adev->gfx.funcs->get_gfx_shadow_info(adev, &shadow_info, true);
+ } else {
+ r = -EINVAL;
+ goto free_mqd;
+ }
if (mqd_user->mqd_size != sizeof(*mqd_gfx_v11) || !mqd_user->mqd) {
DRM_ERROR("Invalid GFX MQD\n");
@@ -281,6 +340,16 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
userq_props->csa_addr = mqd_gfx_v11->csa_va;
userq_props->tmz_queue =
mqd_user->flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE;
+
+ r = amdgpu_userq_input_va_validate(queue, mqd_gfx_v11->shadow_va,
+ shadow_info.shadow_size);
+ if (r)
+ goto free_mqd;
+ r = amdgpu_userq_input_va_validate(queue, mqd_gfx_v11->csa_va,
+ shadow_info.csa_size);
+ if (r)
+ goto free_mqd;
+
kfree(mqd_gfx_v11);
} else if (queue->queue_type == AMDGPU_HW_IP_DMA) {
struct drm_amdgpu_userq_mqd_sdma_gfx11 *mqd_sdma_v11;
@@ -297,6 +366,10 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
r = -ENOMEM;
goto free_mqd;
}
+ r = amdgpu_userq_input_va_validate(queue, mqd_sdma_v11->csa_va,
+ 32);
+ if (r)
+ goto free_mqd;
userq_props->csa_addr = mqd_sdma_v11->csa_va;
kfree(mqd_sdma_v11);
@@ -347,9 +420,82 @@ mes_userq_mqd_destroy(struct amdgpu_userq_mgr *uq_mgr,
amdgpu_userq_destroy_object(uq_mgr, &queue->mqd);
}
+static int mes_userq_preempt(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ struct mes_suspend_gang_input queue_input;
+ struct amdgpu_userq_obj *ctx = &queue->fw_obj;
+ signed long timeout = 2100000; /* 2100 ms */
+ u64 fence_gpu_addr;
+ u32 fence_offset;
+ u64 *fence_ptr;
+ int i, r;
+
+ if (queue->state != AMDGPU_USERQ_STATE_MAPPED)
+ return 0;
+ r = amdgpu_device_wb_get(adev, &fence_offset);
+ if (r)
+ return r;
+
+ fence_gpu_addr = adev->wb.gpu_addr + (fence_offset * 4);
+ fence_ptr = (u64 *)&adev->wb.wb[fence_offset];
+ *fence_ptr = 0;
+
+ memset(&queue_input, 0x0, sizeof(struct mes_suspend_gang_input));
+ queue_input.gang_context_addr = ctx->gpu_addr + AMDGPU_USERQ_PROC_CTX_SZ;
+ queue_input.suspend_fence_addr = fence_gpu_addr;
+ queue_input.suspend_fence_value = 1;
+ amdgpu_mes_lock(&adev->mes);
+ r = adev->mes.funcs->suspend_gang(&adev->mes, &queue_input);
+ amdgpu_mes_unlock(&adev->mes);
+ if (r) {
+ DRM_ERROR("Failed to suspend gang: %d\n", r);
+ goto out;
+ }
+
+ for (i = 0; i < timeout; i++) {
+ if (*fence_ptr == 1)
+ goto out;
+ udelay(1);
+ }
+ r = -ETIMEDOUT;
+
+out:
+ amdgpu_device_wb_free(adev, fence_offset);
+ return r;
+}
+
+static int mes_userq_restore(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ struct mes_resume_gang_input queue_input;
+ struct amdgpu_userq_obj *ctx = &queue->fw_obj;
+ int r;
+
+ if (queue->state == AMDGPU_USERQ_STATE_HUNG)
+ return -EINVAL;
+ if (queue->state != AMDGPU_USERQ_STATE_PREEMPTED)
+ return 0;
+
+ memset(&queue_input, 0x0, sizeof(struct mes_resume_gang_input));
+ queue_input.gang_context_addr = ctx->gpu_addr + AMDGPU_USERQ_PROC_CTX_SZ;
+
+ amdgpu_mes_lock(&adev->mes);
+ r = adev->mes.funcs->resume_gang(&adev->mes, &queue_input);
+ amdgpu_mes_unlock(&adev->mes);
+ if (r)
+ dev_err(adev->dev, "Failed to resume queue, err (%d)\n", r);
+ return r;
+}
+
const struct amdgpu_userq_funcs userq_mes_funcs = {
.mqd_create = mes_userq_mqd_create,
.mqd_destroy = mes_userq_mqd_destroy,
.unmap = mes_userq_unmap,
.map = mes_userq_map,
+ .detect_and_reset = mes_userq_detect_and_reset,
+ .preempt = mes_userq_preempt,
+ .restore = mes_userq_restore,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
index c9eba537de09..3a52754b5cad 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
@@ -66,6 +66,9 @@ static int mes_v11_0_kiq_hw_fini(struct amdgpu_device *adev);
#define GFX_MES_DRAM_SIZE 0x80000
#define MES11_HW_RESOURCE_1_SIZE (128 * AMDGPU_GPU_PAGE_SIZE)
+#define MES11_HUNG_DB_OFFSET_ARRAY_SIZE 8 /* [0:3] = db offset, [4:7] = hqd info */
+#define MES11_HUNG_HQD_INFO_OFFSET 4
+
static void mes_v11_0_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
@@ -366,6 +369,7 @@ static int mes_v11_0_remove_hw_queue(struct amdgpu_mes *mes,
struct mes_remove_queue_input *input)
{
union MESAPI__REMOVE_QUEUE mes_remove_queue_pkt;
+ uint32_t mes_rev = mes->sched_version & AMDGPU_MES_VERSION_MASK;
memset(&mes_remove_queue_pkt, 0, sizeof(mes_remove_queue_pkt));
@@ -376,6 +380,9 @@ static int mes_v11_0_remove_hw_queue(struct amdgpu_mes *mes,
mes_remove_queue_pkt.doorbell_offset = input->doorbell_offset;
mes_remove_queue_pkt.gang_context_addr = input->gang_context_addr;
+ if (mes_rev >= 0x60)
+ mes_remove_queue_pkt.remove_queue_after_reset = input->remove_queue_after_reset;
+
return mes_v11_0_submit_pkt_and_poll_completion(mes,
&mes_remove_queue_pkt, sizeof(mes_remove_queue_pkt),
offsetof(union MESAPI__REMOVE_QUEUE, api_status));
@@ -641,8 +648,9 @@ static int mes_v11_0_misc_op(struct amdgpu_mes *mes,
break;
case MES_MISC_OP_CHANGE_CONFIG:
if ((mes->adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) < 0x63) {
- dev_err(mes->adev->dev, "MES FW version must be larger than 0x63 to support limit single process feature.\n");
- return -EINVAL;
+ dev_warn_once(mes->adev->dev,
+ "MES FW version must be larger than 0x63 to support limit single process feature.\n");
+ return 0;
}
misc_pkt.opcode = MESAPI_MISC__CHANGE_CONFIG;
misc_pkt.change_config.opcode =
@@ -710,6 +718,12 @@ static int mes_v11_0_set_hw_resources(struct amdgpu_mes *mes)
mes_set_hw_res_pkt.enable_reg_active_poll = 1;
mes_set_hw_res_pkt.enable_level_process_quantum_check = 1;
mes_set_hw_res_pkt.oversubscription_timer = 50;
+ if ((mes->adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x7f)
+ mes_set_hw_res_pkt.enable_lr_compute_wa = 1;
+ else
+ dev_info_once(mes->adev->dev,
+ "MES FW version must be >= 0x7f to enable LR compute workaround.\n");
+
if (amdgpu_mes_log_enable) {
mes_set_hw_res_pkt.enable_mes_event_int_logging = 1;
mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr =
@@ -783,6 +797,32 @@ static int mes_v11_0_reset_hw_queue(struct amdgpu_mes *mes,
offsetof(union MESAPI__RESET, api_status));
}
+static int mes_v11_0_detect_and_reset_hung_queues(struct amdgpu_mes *mes,
+ struct mes_detect_and_reset_queue_input *input)
+{
+ union MESAPI__RESET mes_reset_queue_pkt;
+
+ memset(&mes_reset_queue_pkt, 0, sizeof(mes_reset_queue_pkt));
+
+ mes_reset_queue_pkt.header.type = MES_API_TYPE_SCHEDULER;
+ mes_reset_queue_pkt.header.opcode = MES_SCH_API_RESET;
+ mes_reset_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
+
+ mes_reset_queue_pkt.queue_type =
+ convert_to_mes_queue_type(input->queue_type);
+ mes_reset_queue_pkt.doorbell_offset_addr =
+ mes->hung_queue_db_array_gpu_addr;
+
+ if (input->detect_only)
+ mes_reset_queue_pkt.hang_detect_only = 1;
+ else
+ mes_reset_queue_pkt.hang_detect_then_reset = 1;
+
+ return mes_v11_0_submit_pkt_and_poll_completion(mes,
+ &mes_reset_queue_pkt, sizeof(mes_reset_queue_pkt),
+ offsetof(union MESAPI__RESET, api_status));
+}
+
static const struct amdgpu_mes_funcs mes_v11_0_funcs = {
.add_hw_queue = mes_v11_0_add_hw_queue,
.remove_hw_queue = mes_v11_0_remove_hw_queue,
@@ -792,6 +832,7 @@ static const struct amdgpu_mes_funcs mes_v11_0_funcs = {
.resume_gang = mes_v11_0_resume_gang,
.misc_op = mes_v11_0_misc_op,
.reset_hw_queue = mes_v11_0_reset_hw_queue,
+ .detect_and_reset_hung_queues = mes_v11_0_detect_and_reset_hung_queues,
};
static int mes_v11_0_allocate_ucode_buffer(struct amdgpu_device *adev,
@@ -1630,10 +1671,12 @@ static int mes_v11_0_hw_init(struct amdgpu_ip_block *ip_block)
if (r)
goto failure;
- r = mes_v11_0_set_hw_resources_1(&adev->mes);
- if (r) {
- DRM_ERROR("failed mes_v11_0_set_hw_resources_1, r=%d\n", r);
- goto failure;
+ if ((adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x50) {
+ r = mes_v11_0_set_hw_resources_1(&adev->mes);
+ if (r) {
+ DRM_ERROR("failed mes_v11_0_set_hw_resources_1, r=%d\n", r);
+ goto failure;
+ }
}
r = mes_v11_0_query_sched_status(&adev->mes);
@@ -1682,6 +1725,9 @@ static int mes_v11_0_early_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
int pipe, r;
+ adev->mes.hung_queue_db_array_size = MES11_HUNG_DB_OFFSET_ARRAY_SIZE;
+ adev->mes.hung_queue_hqd_info_offset = MES11_HUNG_HQD_INFO_OFFSET;
+
for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
if (!adev->enable_mes_kiq && pipe == AMDGPU_MES_KIQ_PIPE)
continue;
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
index b4f17332d466..744e95d3984a 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
@@ -47,6 +47,9 @@ static int mes_v12_0_kiq_hw_fini(struct amdgpu_device *adev);
#define MES_EOP_SIZE 2048
+#define MES12_HUNG_DB_OFFSET_ARRAY_SIZE 8 /* [0:3] = db offset [4:7] hqd info */
+#define MES12_HUNG_HQD_INFO_OFFSET 4
+
static void mes_v12_0_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
@@ -108,6 +111,7 @@ static const char *mes_v12_0_opcodes[] = {
"SET_SE_MODE",
"SET_GANG_SUBMIT",
"SET_HW_RSRC_1",
+ "INVALIDATE_TLBS",
};
static const char *mes_v12_0_misc_opcodes[] = {
@@ -225,7 +229,12 @@ static int mes_v12_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
pipe, x_pkt->header.opcode);
r = amdgpu_fence_wait_polling(ring, seq, timeout);
- if (r < 1 || !*status_ptr) {
+
+ /*
+ * status_ptr[31:0] == 0 (fail) or status_ptr[63:0] == 1 (success).
+ * If status_ptr[31:0] == 0 then status_ptr[63:32] will have debug error information.
+ */
+ if (r < 1 || !(lower_32_bits(*status_ptr))) {
if (misc_op_str)
dev_err(adev->dev, "MES(%d) failed to respond to msg=%s (%s)\n",
@@ -352,6 +361,7 @@ static int mes_v12_0_remove_hw_queue(struct amdgpu_mes *mes,
struct mes_remove_queue_input *input)
{
union MESAPI__REMOVE_QUEUE mes_remove_queue_pkt;
+ uint32_t mes_rev = mes->sched_version & AMDGPU_MES_VERSION_MASK;
memset(&mes_remove_queue_pkt, 0, sizeof(mes_remove_queue_pkt));
@@ -362,6 +372,9 @@ static int mes_v12_0_remove_hw_queue(struct amdgpu_mes *mes,
mes_remove_queue_pkt.doorbell_offset = input->doorbell_offset;
mes_remove_queue_pkt.gang_context_addr = input->gang_context_addr;
+ if (mes_rev >= 0x5a)
+ mes_remove_queue_pkt.remove_queue_after_reset = input->remove_queue_after_reset;
+
return mes_v12_0_submit_pkt_and_poll_completion(mes,
AMDGPU_MES_SCHED_PIPE,
&mes_remove_queue_pkt, sizeof(mes_remove_queue_pkt),
@@ -567,13 +580,41 @@ static int mes_v12_0_unmap_legacy_queue(struct amdgpu_mes *mes,
static int mes_v12_0_suspend_gang(struct amdgpu_mes *mes,
struct mes_suspend_gang_input *input)
{
- return 0;
+ union MESAPI__SUSPEND mes_suspend_gang_pkt;
+
+ memset(&mes_suspend_gang_pkt, 0, sizeof(mes_suspend_gang_pkt));
+
+ mes_suspend_gang_pkt.header.type = MES_API_TYPE_SCHEDULER;
+ mes_suspend_gang_pkt.header.opcode = MES_SCH_API_SUSPEND;
+ mes_suspend_gang_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
+
+ mes_suspend_gang_pkt.suspend_all_gangs = input->suspend_all_gangs;
+ mes_suspend_gang_pkt.gang_context_addr = input->gang_context_addr;
+ mes_suspend_gang_pkt.suspend_fence_addr = input->suspend_fence_addr;
+ mes_suspend_gang_pkt.suspend_fence_value = input->suspend_fence_value;
+
+ return mes_v12_0_submit_pkt_and_poll_completion(mes, AMDGPU_MES_SCHED_PIPE,
+ &mes_suspend_gang_pkt, sizeof(mes_suspend_gang_pkt),
+ offsetof(union MESAPI__SUSPEND, api_status));
}
static int mes_v12_0_resume_gang(struct amdgpu_mes *mes,
struct mes_resume_gang_input *input)
{
- return 0;
+ union MESAPI__RESUME mes_resume_gang_pkt;
+
+ memset(&mes_resume_gang_pkt, 0, sizeof(mes_resume_gang_pkt));
+
+ mes_resume_gang_pkt.header.type = MES_API_TYPE_SCHEDULER;
+ mes_resume_gang_pkt.header.opcode = MES_SCH_API_RESUME;
+ mes_resume_gang_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
+
+ mes_resume_gang_pkt.resume_all_gangs = input->resume_all_gangs;
+ mes_resume_gang_pkt.gang_context_addr = input->gang_context_addr;
+
+ return mes_v12_0_submit_pkt_and_poll_completion(mes, AMDGPU_MES_SCHED_PIPE,
+ &mes_resume_gang_pkt, sizeof(mes_resume_gang_pkt),
+ offsetof(union MESAPI__RESUME, api_status));
}
static int mes_v12_0_query_sched_status(struct amdgpu_mes *mes, int pipe)
@@ -738,6 +779,11 @@ static int mes_v12_0_set_hw_resources(struct amdgpu_mes *mes, int pipe)
mes_set_hw_res_pkt.use_different_vmid_compute = 1;
mes_set_hw_res_pkt.enable_reg_active_poll = 1;
mes_set_hw_res_pkt.enable_level_process_quantum_check = 1;
+ if ((mes->adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x82)
+ mes_set_hw_res_pkt.enable_lr_compute_wa = 1;
+ else
+ dev_info_once(adev->dev,
+ "MES FW version must be >= 0x82 to enable LR compute workaround.\n");
/*
* Keep oversubscribe timer for sdma . When we have unmapped doorbell
@@ -879,6 +925,74 @@ static int mes_v12_0_reset_hw_queue(struct amdgpu_mes *mes,
offsetof(union MESAPI__RESET, api_status));
}
+static int mes_v12_0_detect_and_reset_hung_queues(struct amdgpu_mes *mes,
+ struct mes_detect_and_reset_queue_input *input)
+{
+ union MESAPI__RESET mes_reset_queue_pkt;
+
+ memset(&mes_reset_queue_pkt, 0, sizeof(mes_reset_queue_pkt));
+
+ mes_reset_queue_pkt.header.type = MES_API_TYPE_SCHEDULER;
+ mes_reset_queue_pkt.header.opcode = MES_SCH_API_RESET;
+ mes_reset_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
+
+ mes_reset_queue_pkt.queue_type =
+ convert_to_mes_queue_type(input->queue_type);
+ mes_reset_queue_pkt.doorbell_offset_addr =
+ mes->hung_queue_db_array_gpu_addr;
+
+ if (input->detect_only)
+ mes_reset_queue_pkt.hang_detect_only = 1;
+ else
+ mes_reset_queue_pkt.hang_detect_then_reset = 1;
+
+ return mes_v12_0_submit_pkt_and_poll_completion(mes, AMDGPU_MES_SCHED_PIPE,
+ &mes_reset_queue_pkt, sizeof(mes_reset_queue_pkt),
+ offsetof(union MESAPI__RESET, api_status));
+}
+
+static int mes_v12_inv_tlb_convert_hub_id(uint8_t id)
+{
+ /*
+ * MES doesn't support invalidate gc_hub on slave xcc individually
+ * master xcc will invalidate all gc_hub for the partition
+ */
+ if (AMDGPU_IS_GFXHUB(id))
+ return 0;
+ else if (AMDGPU_IS_MMHUB0(id))
+ return 1;
+ else
+ return -EINVAL;
+
+}
+
+static int mes_v12_0_inv_tlbs_pasid(struct amdgpu_mes *mes,
+ struct mes_inv_tlbs_pasid_input *input)
+{
+ union MESAPI__INV_TLBS mes_inv_tlbs;
+ int ret;
+
+ memset(&mes_inv_tlbs, 0, sizeof(mes_inv_tlbs));
+
+ mes_inv_tlbs.header.type = MES_API_TYPE_SCHEDULER;
+ mes_inv_tlbs.header.opcode = MES_SCH_API_INV_TLBS;
+ mes_inv_tlbs.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
+
+ mes_inv_tlbs.invalidate_tlbs.inv_sel = 0;
+ mes_inv_tlbs.invalidate_tlbs.flush_type = input->flush_type;
+ mes_inv_tlbs.invalidate_tlbs.inv_sel_id = input->pasid;
+
+ /*convert amdgpu_mes_hub_id to mes expected hub_id */
+ ret = mes_v12_inv_tlb_convert_hub_id(input->hub_id);
+ if (ret < 0)
+ return -EINVAL;
+ mes_inv_tlbs.invalidate_tlbs.hub_id = ret;
+ return mes_v12_0_submit_pkt_and_poll_completion(mes, AMDGPU_MES_KIQ_PIPE,
+ &mes_inv_tlbs, sizeof(mes_inv_tlbs),
+ offsetof(union MESAPI__INV_TLBS, api_status));
+
+}
+
static const struct amdgpu_mes_funcs mes_v12_0_funcs = {
.add_hw_queue = mes_v12_0_add_hw_queue,
.remove_hw_queue = mes_v12_0_remove_hw_queue,
@@ -888,6 +1002,8 @@ static const struct amdgpu_mes_funcs mes_v12_0_funcs = {
.resume_gang = mes_v12_0_resume_gang,
.misc_op = mes_v12_0_misc_op,
.reset_hw_queue = mes_v12_0_reset_hw_queue,
+ .invalidate_tlbs_pasid = mes_v12_0_inv_tlbs_pasid,
+ .detect_and_reset_hung_queues = mes_v12_0_detect_and_reset_hung_queues,
};
static int mes_v12_0_allocate_ucode_buffer(struct amdgpu_device *adev,
@@ -1742,7 +1858,8 @@ static int mes_v12_0_hw_init(struct amdgpu_ip_block *ip_block)
if (r)
goto failure;
- mes_v12_0_set_hw_resources_1(&adev->mes, AMDGPU_MES_SCHED_PIPE);
+ if ((adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x4b)
+ mes_v12_0_set_hw_resources_1(&adev->mes, AMDGPU_MES_SCHED_PIPE);
mes_v12_0_init_aggregated_doorbell(&adev->mes);
@@ -1792,6 +1909,9 @@ static int mes_v12_0_early_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
int pipe, r;
+ adev->mes.hung_queue_db_array_size = MES12_HUNG_DB_OFFSET_ARRAY_SIZE;
+ adev->mes.hung_queue_hqd_info_offset = MES12_HUNG_HQD_INFO_OFFSET;
+
for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
r = amdgpu_mes_init_microcode(adev, pipe);
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
index 76167fadb292..cc688ae79e84 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
@@ -76,6 +76,8 @@ static void mmhub_v1_8_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmi
static void mmhub_v1_8_init_gart_aperture_regs(struct amdgpu_device *adev)
{
+ uint64_t gart_start = amdgpu_virt_xgmi_migrate_enabled(adev) ?
+ adev->gmc.vram_start : adev->gmc.fb_start;
uint64_t pt_base;
u32 inst_mask;
int i;
@@ -95,10 +97,10 @@ static void mmhub_v1_8_init_gart_aperture_regs(struct amdgpu_device *adev)
if (adev->gmc.pdb0_bo) {
WREG32_SOC15(MMHUB, i,
regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
- (u32)(adev->gmc.fb_start >> 12));
+ (u32)(gart_start >> 12));
WREG32_SOC15(MMHUB, i,
regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
- (u32)(adev->gmc.fb_start >> 44));
+ (u32)(gart_start >> 44));
WREG32_SOC15(MMHUB, i,
regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c
index 134c4ec10887..910337dc28d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c
@@ -36,40 +36,47 @@
static const char *mmhub_client_ids_v3_0_1[][2] = {
[0][0] = "VMC",
+ [1][0] = "ISPXT",
+ [2][0] = "ISPIXT",
[4][0] = "DCEDMC",
[5][0] = "DCEVGA",
[6][0] = "MP0",
[7][0] = "MP1",
- [8][0] = "MPIO",
- [16][0] = "HDP",
- [17][0] = "LSDMA",
- [18][0] = "JPEG",
- [19][0] = "VCNU0",
- [21][0] = "VSCH",
- [22][0] = "VCNU1",
- [23][0] = "VCN1",
- [32+20][0] = "VCN0",
- [2][1] = "DBGUNBIO",
+ [8][0] = "MPM",
+ [12][0] = "ISPTNR",
+ [14][0] = "ISPCRD0",
+ [15][0] = "ISPCRD1",
+ [16][0] = "ISPCRD2",
+ [22][0] = "HDP",
+ [23][0] = "LSDMA",
+ [24][0] = "JPEG",
+ [27][0] = "VSCH",
+ [28][0] = "VCNU",
+ [29][0] = "VCN",
+ [1][1] = "ISPXT",
+ [2][1] = "ISPIXT",
[3][1] = "DCEDWB",
[4][1] = "DCEDMC",
[5][1] = "DCEVGA",
[6][1] = "MP0",
[7][1] = "MP1",
- [8][1] = "MPIO",
- [10][1] = "DBGU0",
- [11][1] = "DBGU1",
- [12][1] = "DBGU2",
- [13][1] = "DBGU3",
- [14][1] = "XDP",
- [15][1] = "OSSSYS",
- [16][1] = "HDP",
- [17][1] = "LSDMA",
- [18][1] = "JPEG",
- [19][1] = "VCNU0",
- [20][1] = "VCN0",
- [21][1] = "VSCH",
- [22][1] = "VCNU1",
- [23][1] = "VCN1",
+ [8][1] = "MPM",
+ [10][1] = "ISPMWR0",
+ [11][1] = "ISPMWR1",
+ [12][1] = "ISPTNR",
+ [13][1] = "ISPSWR",
+ [14][1] = "ISPCWR0",
+ [15][1] = "ISPCWR1",
+ [16][1] = "ISPCWR2",
+ [17][1] = "ISPCWR3",
+ [18][1] = "XDP",
+ [21][1] = "OSSSYS",
+ [22][1] = "HDP",
+ [23][1] = "LSDMA",
+ [24][1] = "JPEG",
+ [27][1] = "VSCH",
+ [28][1] = "VCNU",
+ [29][1] = "VCN",
};
static uint32_t mmhub_v3_0_1_get_invalidate_req(unsigned int vmid,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c
index bc3d6c2fc87a..f6fc9778bc30 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c
@@ -40,30 +40,129 @@
static const char *mmhub_client_ids_v3_3[][2] = {
[0][0] = "VMC",
+ [1][0] = "ISPXT",
+ [2][0] = "ISPIXT",
[4][0] = "DCEDMC",
[6][0] = "MP0",
[7][0] = "MP1",
[8][0] = "MPM",
+ [9][0] = "ISPPDPRD",
+ [10][0] = "ISPCSTATRD",
+ [11][0] = "ISPBYRPRD",
+ [12][0] = "ISPRGBPRD",
+ [13][0] = "ISPMCFPRD",
+ [14][0] = "ISPMCFPRD1",
+ [15][0] = "ISPYUVPRD",
+ [16][0] = "ISPMCSCRD",
+ [17][0] = "ISPGDCRD",
+ [18][0] = "ISPLMERD",
+ [22][0] = "ISPXT1",
+ [23][0] = "ISPIXT1",
[24][0] = "HDP",
[25][0] = "LSDMA",
[26][0] = "JPEG",
[27][0] = "VPE",
+ [28][0] = "VSCH",
[29][0] = "VCNU",
[30][0] = "VCN",
+ [1][1] = "ISPXT",
+ [2][1] = "ISPIXT",
[3][1] = "DCEDWB",
[4][1] = "DCEDMC",
+ [5][1] = "ISPCSISWR",
[6][1] = "MP0",
[7][1] = "MP1",
[8][1] = "MPM",
+ [9][1] = "ISPPDPWR",
+ [10][1] = "ISPCSTATWR",
+ [11][1] = "ISPBYRPWR",
+ [12][1] = "ISPRGBPWR",
+ [13][1] = "ISPMCFPWR",
+ [14][1] = "ISPMWR0",
+ [15][1] = "ISPYUVPWR",
+ [16][1] = "ISPMCSCWR",
+ [17][1] = "ISPGDCWR",
+ [18][1] = "ISPLMEWR",
+ [20][1] = "ISPMWR2",
[21][1] = "OSSSYS",
+ [22][1] = "ISPXT1",
+ [23][1] = "ISPIXT1",
[24][1] = "HDP",
[25][1] = "LSDMA",
[26][1] = "JPEG",
[27][1] = "VPE",
+ [28][1] = "VSCH",
[29][1] = "VCNU",
[30][1] = "VCN",
};
+static const char *mmhub_client_ids_v3_3_1[][2] = {
+ [0][0] = "VMC",
+ [4][0] = "DCEDMC",
+ [6][0] = "MP0",
+ [7][0] = "MP1",
+ [8][0] = "MPM",
+ [24][0] = "HDP",
+ [25][0] = "LSDMA",
+ [26][0] = "JPEG0",
+ [27][0] = "VPE0",
+ [28][0] = "VSCH",
+ [29][0] = "VCNU0",
+ [30][0] = "VCN0",
+ [32+1][0] = "ISPXT",
+ [32+2][0] = "ISPIXT",
+ [32+9][0] = "ISPPDPRD",
+ [32+10][0] = "ISPCSTATRD",
+ [32+11][0] = "ISPBYRPRD",
+ [32+12][0] = "ISPRGBPRD",
+ [32+13][0] = "ISPMCFPRD",
+ [32+14][0] = "ISPMCFPRD1",
+ [32+15][0] = "ISPYUVPRD",
+ [32+16][0] = "ISPMCSCRD",
+ [32+17][0] = "ISPGDCRD",
+ [32+18][0] = "ISPLMERD",
+ [32+22][0] = "ISPXT1",
+ [32+23][0] = "ISPIXT1",
+ [32+26][0] = "JPEG1",
+ [32+27][0] = "VPE1",
+ [32+29][0] = "VCNU1",
+ [32+30][0] = "VCN1",
+ [3][1] = "DCEDWB",
+ [4][1] = "DCEDMC",
+ [6][1] = "MP0",
+ [7][1] = "MP1",
+ [8][1] = "MPM",
+ [21][1] = "OSSSYS",
+ [24][1] = "HDP",
+ [25][1] = "LSDMA",
+ [26][1] = "JPEG0",
+ [27][1] = "VPE0",
+ [28][1] = "VSCH",
+ [29][1] = "VCNU0",
+ [30][1] = "VCN0",
+ [32+1][1] = "ISPXT",
+ [32+2][1] = "ISPIXT",
+ [32+5][1] = "ISPCSISWR",
+ [32+9][1] = "ISPPDPWR",
+ [32+10][1] = "ISPCSTATWR",
+ [32+11][1] = "ISPBYRPWR",
+ [32+12][1] = "ISPRGBPWR",
+ [32+13][1] = "ISPMCFPWR",
+ [32+14][1] = "ISPMWR0",
+ [32+15][1] = "ISPYUVPWR",
+ [32+16][1] = "ISPMCSCWR",
+ [32+17][1] = "ISPGDCWR",
+ [32+18][1] = "ISPLMEWR",
+ [32+19][1] = "ISPMWR1",
+ [32+20][1] = "ISPMWR2",
+ [32+22][1] = "ISPXT1",
+ [32+23][1] = "ISPIXT1",
+ [32+26][1] = "JPEG1",
+ [32+27][1] = "VPE1",
+ [32+29][1] = "VCNU1",
+ [32+30][1] = "VCN1",
+};
+
static uint32_t mmhub_v3_3_get_invalidate_req(unsigned int vmid,
uint32_t flush_type)
{
@@ -102,12 +201,16 @@ mmhub_v3_3_print_l2_protection_fault_status(struct amdgpu_device *adev,
switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
case IP_VERSION(3, 3, 0):
- case IP_VERSION(3, 3, 1):
case IP_VERSION(3, 3, 2):
mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_v3_3) ?
mmhub_client_ids_v3_3[cid][rw] :
cid == 0x140 ? "UMSCH" : NULL;
break;
+ case IP_VERSION(3, 3, 1):
+ mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_v3_3_1) ?
+ mmhub_client_ids_v3_3_1[cid][rw] :
+ cid == 0x140 ? "UMSCH" : NULL;
+ break;
default:
mmhub_cid = NULL;
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c
index f2ab5001b492..951998454b25 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c
@@ -37,39 +37,31 @@
static const char *mmhub_client_ids_v4_1_0[][2] = {
[0][0] = "VMC",
[4][0] = "DCEDMC",
- [5][0] = "DCEVGA",
[6][0] = "MP0",
[7][0] = "MP1",
[8][0] = "MPIO",
- [16][0] = "HDP",
- [17][0] = "LSDMA",
- [18][0] = "JPEG",
- [19][0] = "VCNU0",
- [21][0] = "VSCH",
- [22][0] = "VCNU1",
- [23][0] = "VCN1",
- [32+20][0] = "VCN0",
- [2][1] = "DBGUNBIO",
+ [16][0] = "LSDMA",
+ [17][0] = "JPEG",
+ [19][0] = "VCNU",
+ [22][0] = "VSCH",
+ [23][0] = "HDP",
+ [32+23][0] = "VCNRD",
[3][1] = "DCEDWB",
[4][1] = "DCEDMC",
- [5][1] = "DCEVGA",
[6][1] = "MP0",
[7][1] = "MP1",
[8][1] = "MPIO",
[10][1] = "DBGU0",
[11][1] = "DBGU1",
- [12][1] = "DBGU2",
- [13][1] = "DBGU3",
+ [12][1] = "DBGUNBIO",
[14][1] = "XDP",
[15][1] = "OSSSYS",
- [16][1] = "HDP",
- [17][1] = "LSDMA",
- [18][1] = "JPEG",
- [19][1] = "VCNU0",
- [20][1] = "VCN0",
- [21][1] = "VSCH",
- [22][1] = "VCNU1",
- [23][1] = "VCN1",
+ [16][1] = "LSDMA",
+ [17][1] = "JPEG",
+ [18][1] = "VCNWR",
+ [19][1] = "VCNU",
+ [22][1] = "VSCH",
+ [23][1] = "HDP",
};
static uint32_t mmhub_v4_1_0_get_invalidate_req(unsigned int vmid,
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index 48101a34e049..9a40107a0869 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -292,14 +292,32 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
}
}
-static void xgpu_ai_mailbox_bad_pages_work(struct work_struct *work)
+static void xgpu_ai_mailbox_req_bad_pages_work(struct work_struct *work)
{
- struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, bad_pages_work);
+ struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, req_bad_pages_work);
struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
if (down_read_trylock(&adev->reset_domain->sem)) {
amdgpu_virt_fini_data_exchange(adev);
amdgpu_virt_request_bad_pages(adev);
+ up_read(&adev->reset_domain->sem);
+ }
+}
+
+/**
+ * xgpu_ai_mailbox_handle_bad_pages_work - Reinitialize the data exchange region to get fresh bad page information
+ * @work: pointer to the work_struct
+ *
+ * This work handler is triggered when bad pages are ready, and it reinitializes
+ * the data exchange region to retrieve updated bad page information from the host.
+ */
+static void xgpu_ai_mailbox_handle_bad_pages_work(struct work_struct *work)
+{
+ struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, handle_bad_pages_work);
+ struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
+
+ if (down_read_trylock(&adev->reset_domain->sem)) {
+ amdgpu_virt_fini_data_exchange(adev);
amdgpu_virt_init_data_exchange(adev);
up_read(&adev->reset_domain->sem);
}
@@ -327,10 +345,15 @@ static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev,
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
switch (event) {
+ case IDH_RAS_BAD_PAGES_READY:
+ xgpu_ai_mailbox_send_ack(adev);
+ if (amdgpu_sriov_runtime(adev))
+ schedule_work(&adev->virt.handle_bad_pages_work);
+ break;
case IDH_RAS_BAD_PAGES_NOTIFICATION:
xgpu_ai_mailbox_send_ack(adev);
if (amdgpu_sriov_runtime(adev))
- schedule_work(&adev->virt.bad_pages_work);
+ schedule_work(&adev->virt.req_bad_pages_work);
break;
case IDH_UNRECOV_ERR_NOTIFICATION:
xgpu_ai_mailbox_send_ack(adev);
@@ -415,7 +438,8 @@ int xgpu_ai_mailbox_get_irq(struct amdgpu_device *adev)
}
INIT_WORK(&adev->virt.flr_work, xgpu_ai_mailbox_flr_work);
- INIT_WORK(&adev->virt.bad_pages_work, xgpu_ai_mailbox_bad_pages_work);
+ INIT_WORK(&adev->virt.req_bad_pages_work, xgpu_ai_mailbox_req_bad_pages_work);
+ INIT_WORK(&adev->virt.handle_bad_pages_work, xgpu_ai_mailbox_handle_bad_pages_work);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
index f6d8597452ed..e7cd07383d56 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
@@ -173,13 +173,17 @@ static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev,
static int xgpu_nv_send_access_requests_with_param(struct amdgpu_device *adev,
enum idh_request req, u32 data1, u32 data2, u32 data3)
{
- int r, retry = 1;
+ struct amdgpu_virt *virt = &adev->virt;
+ int r = 0, retry = 1;
enum idh_event event = -1;
+ mutex_lock(&virt->access_req_mutex);
send_request:
- if (amdgpu_ras_is_rma(adev))
- return -ENODEV;
+ if (amdgpu_ras_is_rma(adev)) {
+ r = -ENODEV;
+ goto out;
+ }
xgpu_nv_mailbox_trans_msg(adev, req, data1, data2, data3);
@@ -202,8 +206,8 @@ send_request:
case IDH_REQ_RAS_CPER_DUMP:
event = IDH_RAS_CPER_DUMP_READY;
break;
- case IDH_REQ_RAS_BAD_PAGES:
- event = IDH_RAS_BAD_PAGES_READY;
+ case IDH_REQ_RAS_CHK_CRITI:
+ event = IDH_REQ_RAS_CHK_CRITI_READY;
break;
default:
break;
@@ -217,17 +221,25 @@ send_request:
if (req != IDH_REQ_GPU_INIT_DATA) {
dev_err(adev->dev, "Doesn't get msg:%d from pf, error=%d\n", event, r);
- return r;
+ goto out;
} else /* host doesn't support REQ_GPU_INIT_DATA handshake */
adev->virt.req_init_data_ver = 0;
} else {
if (req == IDH_REQ_GPU_INIT_DATA) {
- adev->virt.req_init_data_ver =
- RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW1);
-
- /* assume V1 in case host doesn't set version number */
- if (adev->virt.req_init_data_ver < 1)
- adev->virt.req_init_data_ver = 1;
+ switch (RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW1)) {
+ case GPU_CRIT_REGION_V2:
+ adev->virt.req_init_data_ver = GPU_CRIT_REGION_V2;
+ adev->virt.init_data_header.offset =
+ RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW2);
+ adev->virt.init_data_header.size_kb =
+ RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW3);
+ break;
+ default:
+ adev->virt.req_init_data_ver = GPU_CRIT_REGION_V1;
+ adev->virt.init_data_header.offset = -1;
+ adev->virt.init_data_header.size_kb = 0;
+ break;
+ }
}
}
@@ -238,7 +250,10 @@ send_request:
}
}
- return 0;
+out:
+ mutex_unlock(&virt->access_req_mutex);
+
+ return r;
}
static int xgpu_nv_send_access_requests(struct amdgpu_device *adev,
@@ -285,7 +300,8 @@ static int xgpu_nv_release_full_gpu_access(struct amdgpu_device *adev,
static int xgpu_nv_request_init_data(struct amdgpu_device *adev)
{
- return xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_INIT_DATA);
+ return xgpu_nv_send_access_requests_with_param(adev, IDH_REQ_GPU_INIT_DATA,
+ 0, GPU_CRIT_REGION_V2, 0);
}
static int xgpu_nv_mailbox_ack_irq(struct amdgpu_device *adev,
@@ -359,14 +375,32 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
}
}
-static void xgpu_nv_mailbox_bad_pages_work(struct work_struct *work)
+static void xgpu_nv_mailbox_req_bad_pages_work(struct work_struct *work)
{
- struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, bad_pages_work);
+ struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, req_bad_pages_work);
struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
if (down_read_trylock(&adev->reset_domain->sem)) {
amdgpu_virt_fini_data_exchange(adev);
amdgpu_virt_request_bad_pages(adev);
+ up_read(&adev->reset_domain->sem);
+ }
+}
+
+/**
+ * xgpu_nv_mailbox_handle_bad_pages_work - Reinitialize the data exchange region to get fresh bad page information
+ * @work: pointer to the work_struct
+ *
+ * This work handler is triggered when bad pages are ready, and it reinitializes
+ * the data exchange region to retrieve updated bad page information from the host.
+ */
+static void xgpu_nv_mailbox_handle_bad_pages_work(struct work_struct *work)
+{
+ struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, handle_bad_pages_work);
+ struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
+
+ if (down_read_trylock(&adev->reset_domain->sem)) {
+ amdgpu_virt_fini_data_exchange(adev);
amdgpu_virt_init_data_exchange(adev);
up_read(&adev->reset_domain->sem);
}
@@ -397,10 +431,15 @@ static int xgpu_nv_mailbox_rcv_irq(struct amdgpu_device *adev,
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
switch (event) {
+ case IDH_RAS_BAD_PAGES_READY:
+ xgpu_nv_mailbox_send_ack(adev);
+ if (amdgpu_sriov_runtime(adev))
+ schedule_work(&adev->virt.handle_bad_pages_work);
+ break;
case IDH_RAS_BAD_PAGES_NOTIFICATION:
xgpu_nv_mailbox_send_ack(adev);
if (amdgpu_sriov_runtime(adev))
- schedule_work(&adev->virt.bad_pages_work);
+ schedule_work(&adev->virt.req_bad_pages_work);
break;
case IDH_UNRECOV_ERR_NOTIFICATION:
xgpu_nv_mailbox_send_ack(adev);
@@ -485,7 +524,8 @@ int xgpu_nv_mailbox_get_irq(struct amdgpu_device *adev)
}
INIT_WORK(&adev->virt.flr_work, xgpu_nv_mailbox_flr_work);
- INIT_WORK(&adev->virt.bad_pages_work, xgpu_nv_mailbox_bad_pages_work);
+ INIT_WORK(&adev->virt.req_bad_pages_work, xgpu_nv_mailbox_req_bad_pages_work);
+ INIT_WORK(&adev->virt.handle_bad_pages_work, xgpu_nv_mailbox_handle_bad_pages_work);
return 0;
}
@@ -535,6 +575,16 @@ static int xgpu_nv_req_ras_bad_pages(struct amdgpu_device *adev)
return xgpu_nv_send_access_requests(adev, IDH_REQ_RAS_BAD_PAGES);
}
+static int xgpu_nv_check_vf_critical_region(struct amdgpu_device *adev, u64 addr)
+{
+ uint32_t addr_hi, addr_lo;
+
+ addr_hi = (uint32_t)(addr >> 32);
+ addr_lo = (uint32_t)(addr & 0xFFFFFFFF);
+ return xgpu_nv_send_access_requests_with_param(
+ adev, IDH_REQ_RAS_CHK_CRITI, addr_hi, addr_lo, 0);
+}
+
const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
.req_full_gpu = xgpu_nv_request_full_gpu_access,
.rel_full_gpu = xgpu_nv_release_full_gpu_access,
@@ -548,4 +598,5 @@ const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
.req_ras_err_count = xgpu_nv_req_ras_err_count,
.req_ras_cper_dump = xgpu_nv_req_ras_cper_dump,
.req_bad_pages = xgpu_nv_req_ras_bad_pages,
+ .req_ras_chk_criti = xgpu_nv_check_vf_critical_region
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
index 5808689562cc..c1083e5e41e0 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
@@ -43,6 +43,7 @@ enum idh_request {
IDH_REQ_RAS_ERROR_COUNT = 203,
IDH_REQ_RAS_CPER_DUMP = 204,
IDH_REQ_RAS_BAD_PAGES = 205,
+ IDH_REQ_RAS_CHK_CRITI = 206
};
enum idh_event {
@@ -62,6 +63,7 @@ enum idh_event {
IDH_RAS_BAD_PAGES_READY = 15,
IDH_RAS_BAD_PAGES_NOTIFICATION = 16,
IDH_UNRECOV_ERR_NOTIFICATION = 17,
+ IDH_REQ_RAS_CHK_CRITI_READY = 18,
IDH_TEXT_MESSAGE = 255,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
index d5002ff931d8..860bc5cb03c8 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
@@ -151,9 +151,9 @@ static void nbio_v7_4_sdma_doorbell_range(struct amdgpu_device *adev, int instan
* BIF_SDMA0_DOORBELL_RANGE: 0x3bc0
* BIF_SDMA1_DOORBELL_RANGE: 0x3bc4
* BIF_SDMA2_DOORBELL_RANGE: 0x3bd8
-+ * BIF_SDMA4_DOORBELL_RANGE:
-+ * ARCTURUS: 0x3be0
-+ * ALDEBARAN: 0x3be4
+ * BIF_SDMA4_DOORBELL_RANGE:
+ * ARCTURUS: 0x3be0
+ * ALDEBARAN: 0x3be4
*/
if (adev->asic_type == CHIP_ALDEBARAN && instance == 4)
reg = instance + 0x4 + 0x1 +
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
index a376f072700d..bdfd2917e3ca 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
@@ -31,9 +31,6 @@
#define NPS_MODE_MASK 0x000000FFL
-/* Core 0 Port 0 counter */
-#define smnPCIEP_NAK_COUNTER 0x1A340218
-
static void nbio_v7_9_remap_hdp_registers(struct amdgpu_device *adev)
{
WREG32_SOC15(NBIO, 0, regBIF_BX0_REMAP_HDP_MEM_FLUSH_CNTL,
@@ -44,19 +41,21 @@ static void nbio_v7_9_remap_hdp_registers(struct amdgpu_device *adev)
static u32 nbio_v7_9_get_rev_id(struct amdgpu_device *adev)
{
- u32 tmp;
-
- tmp = IP_VERSION_SUBREV(amdgpu_ip_version_full(adev, NBIO_HWIP, 0));
- /* If it is VF or subrevision holds a non-zero value, that should be used */
- if (tmp || amdgpu_sriov_vf(adev))
- return tmp;
+ u32 rev_id;
- /* If discovery subrev is not updated, use register version */
- tmp = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0);
- tmp = REG_GET_FIELD(tmp, RCC_STRAP0_RCC_DEV0_EPF0_STRAP0,
- STRAP_ATI_REV_ID_DEV0_F0);
+ /*
+ * fetch the sub-revision field from the IP-discovery table
+ * (returns zero if the table entry is not populated).
+ */
+ if (amdgpu_sriov_vf(adev)) {
+ rev_id = IP_VERSION_SUBREV(amdgpu_ip_version_full(adev, NBIO_HWIP, 0));
+ } else {
+ rev_id = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0);
+ rev_id = REG_GET_FIELD(rev_id, RCC_STRAP0_RCC_DEV0_EPF0_STRAP0,
+ STRAP_ATI_REV_ID_DEV0_F0);
+ }
- return tmp;
+ return rev_id;
}
static void nbio_v7_9_mc_access_enable(struct amdgpu_device *adev, bool enable)
@@ -467,22 +466,6 @@ static void nbio_v7_9_init_registers(struct amdgpu_device *adev)
}
}
-static u64 nbio_v7_9_get_pcie_replay_count(struct amdgpu_device *adev)
-{
- u32 val, nak_r, nak_g;
-
- if (adev->flags & AMD_IS_APU)
- return 0;
-
- /* Get the number of NAKs received and generated */
- val = RREG32_PCIE(smnPCIEP_NAK_COUNTER);
- nak_r = val & 0xFFFF;
- nak_g = val >> 16;
-
- /* Add the total number of NAKs, i.e the number of replays */
- return (nak_r + nak_g);
-}
-
#define MMIO_REG_HOLE_OFFSET 0x1A000
static void nbio_v7_9_set_reg_remap(struct amdgpu_device *adev)
@@ -524,7 +507,6 @@ const struct amdgpu_nbio_funcs nbio_v7_9_funcs = {
.get_memory_partition_mode = nbio_v7_9_get_memory_partition_mode,
.is_nps_switch_requested = nbio_v7_9_is_nps_switch_requested,
.init_registers = nbio_v7_9_init_registers,
- .get_pcie_replay_count = nbio_v7_9_get_pcie_replay_count,
.set_reg_remap = nbio_v7_9_set_reg_remap,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.h b/drivers/gpu/drm/amd/amdgpu/nv.h
index 83e9782aef39..8f4817404f10 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.h
+++ b/drivers/gpu/drm/amd/amdgpu/nv.h
@@ -31,5 +31,6 @@ extern const struct amdgpu_ip_block_version nv_common_ip_block;
void nv_grbm_select(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 queue, u32 vmid);
void nv_set_virt_ops(struct amdgpu_device *adev);
+int cyan_skillfish_reg_base_init(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
index f4a91b126c73..73f87131a7e9 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
@@ -106,7 +106,9 @@ enum psp_gfx_cmd_id
/*IDs of performance monitoring/profiling*/
GFX_CMD_ID_CONFIG_SQ_PERFMON = 0x00000046, /* Config CGTT_SQ_CLK_CTRL */
/* Dynamic memory partitioninig (NPS mode change)*/
- GFX_CMD_ID_FB_NPS_MODE = 0x00000048, /* Configure memory partitioning mode */
+ GFX_CMD_ID_FB_NPS_MODE = 0x00000048, /* Configure memory partitioning mode */
+ GFX_CMD_ID_FB_FW_RESERV_ADDR = 0x00000050, /* Query FW reservation addr */
+ GFX_CMD_ID_FB_FW_RESERV_EXT_ADDR = 0x00000051, /* Query FW reservation extended addr */
};
/* PSP boot config sub-commands */
@@ -404,11 +406,19 @@ struct psp_gfx_uresp_bootcfg {
uint32_t boot_cfg; /* boot config data */
};
+/* Command-specific response for fw reserve info */
+struct psp_gfx_uresp_fw_reserve_info {
+ uint32_t reserve_base_address_hi;
+ uint32_t reserve_base_address_lo;
+ uint32_t reserve_size;
+};
+
/* Union of command-specific responses for GPCOM ring. */
union psp_gfx_uresp {
struct psp_gfx_uresp_reserved reserved;
struct psp_gfx_uresp_bootcfg boot_cfg;
struct psp_gfx_uresp_fwar_db_info fwar_db_info;
+ struct psp_gfx_uresp_fw_reserve_info fw_reserve_info;
};
/* Structure of GFX Response buffer.
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
index 145186a1e48f..3584b8c18fd9 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
@@ -94,7 +94,7 @@ static int psp_v10_0_ring_create(struct psp_context *psp,
/* Wait for response flag (bit 31) in C2PMSG_64 */
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x8000FFFF, false);
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
return ret;
}
@@ -115,7 +115,7 @@ static int psp_v10_0_ring_stop(struct psp_context *psp,
/* Wait for response flag (bit 31) in C2PMSG_64 */
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
return ret;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index 215543575f47..a9be7a505026 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -142,21 +142,43 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
return err;
}
-static int psp_v11_0_wait_for_bootloader(struct psp_context *psp)
+static int psp_v11_wait_for_tos_unload(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
+ uint32_t sol_reg1, sol_reg2;
+ int retry_loop;
+
+ /* Wait for the TOS to be unloaded */
+ for (retry_loop = 0; retry_loop < 20; retry_loop++) {
+ sol_reg1 = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
+ usleep_range(1000, 2000);
+ sol_reg2 = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
+ if (sol_reg1 == sol_reg2)
+ return 0;
+ }
+ dev_err(adev->dev, "TOS unload failed, C2PMSG_33: %x C2PMSG_81: %x",
+ RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_33),
+ RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81));
+ return -ETIME;
+}
+
+static int psp_v11_0_wait_for_bootloader(struct psp_context *psp)
+{
+ struct amdgpu_device *adev = psp->adev;
int ret;
int retry_loop;
- for (retry_loop = 0; retry_loop < 10; retry_loop++) {
+ /* For a reset done at the end of S3, only wait for TOS to be unloaded */
+ if (adev->in_s3 && !(adev->flags & AMD_IS_APU) && amdgpu_in_reset(adev))
+ return psp_v11_wait_for_tos_unload(psp);
+
+ for (retry_loop = 0; retry_loop < 20; retry_loop++) {
/* Wait for bootloader to signify that is
ready having bit 31 of C2PMSG_35 set to 1 */
- ret = psp_wait_for(psp,
- SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
- 0x80000000,
- 0x80000000,
- false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
+ 0x80000000, 0x8000FFFF, PSP_WAITREG_NOVERBOSE);
if (ret == 0)
return 0;
@@ -252,8 +274,8 @@ static int psp_v11_0_bootloader_load_sos(struct psp_context *psp)
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_81),
- RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81),
- 0, true);
+ RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81), 0,
+ PSP_WAITREG_CHANGED);
return ret;
}
@@ -277,11 +299,13 @@ static int psp_v11_0_ring_stop(struct psp_context *psp,
/* Wait for response flag (bit 31) */
if (amdgpu_sriov_vf(adev))
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
else
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
return ret;
}
@@ -317,13 +341,15 @@ static int psp_v11_0_ring_create(struct psp_context *psp,
mdelay(20);
/* Wait for response flag (bit 31) in C2PMSG_101 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
- 0x80000000, 0x8000FFFF, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
} else {
/* Wait for sOS ready for ring creation */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ MBOX_TOS_READY_FLAG, MBOX_TOS_READY_MASK, 0);
if (ret) {
DRM_ERROR("Failed to wait for sOS ready for ring creation\n");
return ret;
@@ -347,8 +373,9 @@ static int psp_v11_0_ring_create(struct psp_context *psp,
mdelay(20);
/* Wait for response flag (bit 31) in C2PMSG_64 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x8000FFFF, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
}
return ret;
@@ -381,7 +408,8 @@ static int psp_v11_0_mode1_reset(struct psp_context *psp)
offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64);
- ret = psp_wait_for(psp, offset, 0x80000000, 0x8000FFFF, false);
+ ret = psp_wait_for(psp, offset, MBOX_TOS_READY_FLAG,
+ MBOX_TOS_READY_MASK, 0);
if (ret) {
DRM_INFO("psp is not working correctly before mode1 reset!\n");
@@ -393,17 +421,6 @@ static int psp_v11_0_mode1_reset(struct psp_context *psp)
msleep(500);
- offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_33);
-
- ret = psp_wait_for(psp, offset, 0x80000000, 0x80000000, false);
-
- if (ret) {
- DRM_INFO("psp mode 1 reset failed!\n");
- return -EINVAL;
- }
-
- DRM_INFO("psp mode1 reset succeed \n");
-
return 0;
}
@@ -421,8 +438,9 @@ static int psp_v11_0_memory_training_send_msg(struct psp_context *psp, int msg)
max_wait = MEM_TRAIN_SEND_MSG_TIMEOUT_US / adev->usec_timeout;
for (i = 0; i < max_wait; i++) {
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
+ 0x80000000, 0x80000000, PSP_WAITREG_NOVERBOSE);
if (ret == 0)
break;
}
@@ -601,7 +619,7 @@ static int psp_v11_0_load_usbc_pd_fw(struct psp_context *psp, uint64_t fw_pri_mc
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, (fw_pri_mc_addr >> 20));
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ 0x80000000, 0x80000000, 0);
if (ret)
return ret;
@@ -638,7 +656,7 @@ static int psp_v11_0_read_usbc_pd_fw(struct psp_context *psp, uint32_t *fw_ver)
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35, C2PMSG_CMD_GFX_USB_PD_FW_VER);
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ 0x80000000, 0x80000000, 0);
if (!ret)
*fw_ver = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36);
@@ -659,7 +677,8 @@ static const struct psp_funcs psp_v11_0_funcs = {
.ring_get_wptr = psp_v11_0_ring_get_wptr,
.ring_set_wptr = psp_v11_0_ring_set_wptr,
.load_usbc_pd_fw = psp_v11_0_load_usbc_pd_fw,
- .read_usbc_pd_fw = psp_v11_0_read_usbc_pd_fw
+ .read_usbc_pd_fw = psp_v11_0_read_usbc_pd_fw,
+ .wait_for_bootloader = psp_v11_0_wait_for_bootloader
};
void psp_v11_0_set_psp_funcs(struct psp_context *psp)
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0_8.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0_8.c
index 5697760a819b..93787a90d598 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0_8.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0_8.c
@@ -41,8 +41,9 @@ static int psp_v11_0_8_ring_stop(struct psp_context *psp,
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
/* Wait for response flag (bit 31) */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
} else {
/* Write the ring destroy command*/
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64,
@@ -50,8 +51,9 @@ static int psp_v11_0_8_ring_stop(struct psp_context *psp,
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
/* Wait for response flag (bit 31) */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
}
return ret;
@@ -87,13 +89,15 @@ static int psp_v11_0_8_ring_create(struct psp_context *psp,
mdelay(20);
/* Wait for response flag (bit 31) in C2PMSG_101 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
- 0x80000000, 0x8000FFFF, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
} else {
/* Wait for sOS ready for ring creation */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ MBOX_TOS_READY_FLAG, MBOX_TOS_READY_MASK, 0);
if (ret) {
DRM_ERROR("Failed to wait for trust OS ready for ring creation\n");
return ret;
@@ -117,8 +121,9 @@ static int psp_v11_0_8_ring_create(struct psp_context *psp,
mdelay(20);
/* Wait for response flag (bit 31) in C2PMSG_64 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x8000FFFF, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
}
return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
index 80153f837470..4c6450d62299 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
@@ -82,7 +82,7 @@ static int psp_v12_0_bootloader_load_sysdrv(struct psp_context *psp)
/* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ 0x80000000, 0x80000000, 0);
if (ret)
return ret;
@@ -97,7 +97,7 @@ static int psp_v12_0_bootloader_load_sysdrv(struct psp_context *psp)
psp_gfxdrv_command_reg);
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ 0x80000000, 0x80000000, 0);
return ret;
}
@@ -118,7 +118,7 @@ static int psp_v12_0_bootloader_load_sos(struct psp_context *psp)
/* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ 0x80000000, 0x80000000, 0);
if (ret)
return ret;
@@ -133,8 +133,8 @@ static int psp_v12_0_bootloader_load_sos(struct psp_context *psp)
psp_gfxdrv_command_reg);
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_81),
- RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81),
- 0, true);
+ RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81), 0,
+ PSP_WAITREG_CHANGED);
return ret;
}
@@ -163,7 +163,7 @@ static int psp_v12_0_ring_create(struct psp_context *psp,
/* Wait for response flag (bit 31) in C2PMSG_64 */
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x8000FFFF, false);
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
return ret;
}
@@ -184,11 +184,13 @@ static int psp_v12_0_ring_stop(struct psp_context *psp,
/* Wait for response flag (bit 31) */
if (amdgpu_sriov_vf(adev))
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
else
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
return ret;
}
@@ -219,7 +221,8 @@ static int psp_v12_0_mode1_reset(struct psp_context *psp)
offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64);
- ret = psp_wait_for(psp, offset, 0x80000000, 0x8000FFFF, false);
+ ret = psp_wait_for(psp, offset, MBOX_TOS_READY_FLAG,
+ MBOX_TOS_READY_MASK, 0);
if (ret) {
DRM_INFO("psp is not working correctly before mode1 reset!\n");
@@ -233,7 +236,8 @@ static int psp_v12_0_mode1_reset(struct psp_context *psp)
offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_33);
- ret = psp_wait_for(psp, offset, 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(psp, offset, MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK,
+ 0);
if (ret) {
DRM_INFO("psp mode 1 reset failed!\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
index df612fd9cc50..af4a7d7c4abd 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
@@ -42,7 +42,9 @@ MODULE_FIRMWARE("amdgpu/psp_13_0_5_ta.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_8_toc.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_8_ta.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_0_sos.bin");
+MODULE_FIRMWARE("amdgpu/psp_13_0_0_sos_kicker.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_0_ta.bin");
+MODULE_FIRMWARE("amdgpu/psp_13_0_0_ta_kicker.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_7_sos.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_7_ta.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_10_sos.bin");
@@ -180,7 +182,7 @@ static int psp_v13_0_wait_for_vmbx_ready(struct psp_context *psp)
ready having bit 31 of C2PMSG_33 set to 1 */
ret = psp_wait_for(
psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_33),
- 0x80000000, 0xffffffff, false);
+ 0x80000000, 0xffffffff, PSP_WAITREG_NOVERBOSE);
if (ret == 0)
break;
@@ -211,7 +213,7 @@ static int psp_v13_0_wait_for_bootloader(struct psp_context *psp)
for (retry_loop = 0; retry_loop < retry_cnt; retry_loop++) {
ret = psp_wait_for(
psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35),
- 0x80000000, 0xffffffff, false);
+ 0x80000000, 0xffffffff, PSP_WAITREG_NOVERBOSE);
if (ret == 0)
return 0;
@@ -360,8 +362,8 @@ static int psp_v13_0_bootloader_load_sos(struct psp_context *psp)
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_81),
- RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81),
- 0, true);
+ RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81), 0,
+ PSP_WAITREG_CHANGED);
if (!ret)
psp_v13_0_init_sos_version(psp);
@@ -382,8 +384,9 @@ static int psp_v13_0_ring_stop(struct psp_context *psp,
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
/* Wait for response flag (bit 31) */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
} else {
/* Write the ring destroy command*/
WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_64,
@@ -391,8 +394,9 @@ static int psp_v13_0_ring_stop(struct psp_context *psp,
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
/* Wait for response flag (bit 31) */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
}
return ret;
@@ -428,13 +432,15 @@ static int psp_v13_0_ring_create(struct psp_context *psp,
mdelay(20);
/* Wait for response flag (bit 31) in C2PMSG_101 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101),
- 0x80000000, 0x8000FFFF, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
} else {
/* Wait for sOS ready for ring creation */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64),
+ MBOX_TOS_READY_FLAG, MBOX_TOS_READY_MASK, 0);
if (ret) {
DRM_ERROR("Failed to wait for trust OS ready for ring creation\n");
return ret;
@@ -458,8 +464,9 @@ static int psp_v13_0_ring_create(struct psp_context *psp,
mdelay(20);
/* Wait for response flag (bit 31) in C2PMSG_64 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64),
- 0x80000000, 0x8000FFFF, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
}
return ret;
@@ -522,8 +529,9 @@ static int psp_v13_0_memory_training_send_msg(struct psp_context *psp, int msg)
max_wait = MEM_TRAIN_SEND_MSG_TIMEOUT_US / adev->usec_timeout;
for (i = 0; i < max_wait; i++) {
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35),
+ 0x80000000, 0x80000000, PSP_WAITREG_NOVERBOSE);
if (ret == 0)
break;
}
@@ -675,7 +683,7 @@ static int psp_v13_0_load_usbc_pd_fw(struct psp_context *psp, uint64_t fw_pri_mc
WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_36, (fw_pri_mc_addr >> 20));
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ 0x80000000, 0x80000000, 0);
if (ret)
return ret;
@@ -712,7 +720,7 @@ static int psp_v13_0_read_usbc_pd_fw(struct psp_context *psp, uint32_t *fw_ver)
WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_35, C2PMSG_CMD_GFX_USB_PD_FW_VER);
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ 0x80000000, 0x80000000, 0);
if (!ret)
*fw_ver = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_36);
@@ -737,8 +745,9 @@ static int psp_v13_0_exec_spi_cmd(struct psp_context *psp, int cmd)
ret = psp_wait_for_spirom_update(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_115),
MBOX_READY_FLAG, MBOX_READY_MASK, PSP_SPIROM_UPDATE_TIMEOUT);
else
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_115),
- MBOX_READY_FLAG, MBOX_READY_MASK, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_115),
+ MBOX_READY_FLAG, MBOX_READY_MASK, 0);
if (ret) {
dev_err(adev->dev, "SPI cmd %x timed out, ret = %d", cmd, ret);
return ret;
@@ -762,7 +771,7 @@ static int psp_v13_0_update_spirom(struct psp_context *psp,
/* Confirm PSP is ready to start */
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_115),
- MBOX_READY_FLAG, MBOX_READY_MASK, false);
+ MBOX_READY_FLAG, MBOX_READY_MASK, 0);
if (ret) {
dev_err(adev->dev, "PSP Not ready to start processing, ret = %d", ret);
return ret;
@@ -797,7 +806,7 @@ static int psp_v13_0_dump_spirom(struct psp_context *psp,
/* Confirm PSP is ready to start */
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_115),
- MBOX_READY_FLAG, MBOX_READY_MASK, false);
+ MBOX_READY_FLAG, MBOX_READY_MASK, 0);
if (ret) {
dev_err(adev->dev, "PSP Not ready to start processing, ret = %d", ret);
return ret;
@@ -924,8 +933,9 @@ static int psp_v13_0_reg_program_no_ring(struct psp_context *psp, uint32_t val,
WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_102, id);
WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_103, val);
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101),
+ 0x80000000, 0x80000000, 0);
}
return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c
index eaa5512a21da..5f39a2edcc95 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c
@@ -76,11 +76,9 @@ static int psp_v13_0_4_wait_for_bootloader(struct psp_context *psp)
for (retry_loop = 0; retry_loop < 10; retry_loop++) {
/* Wait for bootloader to signify that is
ready having bit 31 of C2PMSG_35 set to 1 */
- ret = psp_wait_for(psp,
- SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35),
- 0x80000000,
- 0x80000000,
- false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35),
+ 0x80000000, 0x80000000, PSP_WAITREG_NOVERBOSE);
if (ret == 0)
return 0;
@@ -185,8 +183,8 @@ static int psp_v13_0_4_bootloader_load_sos(struct psp_context *psp)
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_81),
- RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81),
- 0, true);
+ RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81), 0,
+ PSP_WAITREG_CHANGED);
return ret;
}
@@ -204,8 +202,9 @@ static int psp_v13_0_4_ring_stop(struct psp_context *psp,
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
/* Wait for response flag (bit 31) */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
} else {
/* Write the ring destroy command*/
WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_64,
@@ -213,8 +212,9 @@ static int psp_v13_0_4_ring_stop(struct psp_context *psp,
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
/* Wait for response flag (bit 31) */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
}
return ret;
@@ -250,13 +250,15 @@ static int psp_v13_0_4_ring_create(struct psp_context *psp,
mdelay(20);
/* Wait for response flag (bit 31) in C2PMSG_101 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101),
- 0x80000000, 0x8000FFFF, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
} else {
/* Wait for sOS ready for ring creation */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64),
+ MBOX_TOS_READY_FLAG, MBOX_TOS_READY_MASK, 0);
if (ret) {
DRM_ERROR("Failed to wait for trust OS ready for ring creation\n");
return ret;
@@ -280,8 +282,9 @@ static int psp_v13_0_4_ring_create(struct psp_context *psp,
mdelay(20);
/* Wait for response flag (bit 31) in C2PMSG_64 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64),
- 0x80000000, 0x8000FFFF, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
}
return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c
index 256288c6cd78..38dfc5c19f2a 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c
@@ -34,7 +34,9 @@
MODULE_FIRMWARE("amdgpu/psp_14_0_2_sos.bin");
MODULE_FIRMWARE("amdgpu/psp_14_0_2_ta.bin");
MODULE_FIRMWARE("amdgpu/psp_14_0_3_sos.bin");
+MODULE_FIRMWARE("amdgpu/psp_14_0_3_sos_kicker.bin");
MODULE_FIRMWARE("amdgpu/psp_14_0_3_ta.bin");
+MODULE_FIRMWARE("amdgpu/psp_14_0_3_ta_kicker.bin");
MODULE_FIRMWARE("amdgpu/psp_14_0_5_toc.bin");
MODULE_FIRMWARE("amdgpu/psp_14_0_5_ta.bin");
@@ -109,11 +111,9 @@ static int psp_v14_0_wait_for_bootloader(struct psp_context *psp)
for (retry_loop = 0; retry_loop < 10; retry_loop++) {
/* Wait for bootloader to signify that is
ready having bit 31 of C2PMSG_35 set to 1 */
- ret = psp_wait_for(psp,
- SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35),
- 0x80000000,
- 0x80000000,
- false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35),
+ 0x80000000, 0x80000000, PSP_WAITREG_NOVERBOSE);
if (ret == 0)
return 0;
@@ -228,9 +228,10 @@ static int psp_v14_0_bootloader_load_sos(struct psp_context *psp)
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_81),
- RREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_81),
- 0, true);
+ ret = psp_wait_for(psp,
+ SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_81),
+ RREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_81), 0,
+ PSP_WAITREG_CHANGED);
return ret;
}
@@ -248,8 +249,9 @@ static int psp_v14_0_ring_stop(struct psp_context *psp,
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
/* Wait for response flag (bit 31) */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_101),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_101),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
} else {
/* Write the ring destroy command*/
WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_64,
@@ -257,8 +259,9 @@ static int psp_v14_0_ring_stop(struct psp_context *psp,
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
/* Wait for response flag (bit 31) */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_64),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
}
return ret;
@@ -294,13 +297,15 @@ static int psp_v14_0_ring_create(struct psp_context *psp,
mdelay(20);
/* Wait for response flag (bit 31) in C2PMSG_101 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_101),
- 0x80000000, 0x8000FFFF, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_101),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
} else {
/* Wait for sOS ready for ring creation */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_64),
+ MBOX_TOS_READY_FLAG, MBOX_TOS_READY_MASK, 0);
if (ret) {
DRM_ERROR("Failed to wait for trust OS ready for ring creation\n");
return ret;
@@ -324,8 +329,9 @@ static int psp_v14_0_ring_create(struct psp_context *psp,
mdelay(20);
/* Wait for response flag (bit 31) in C2PMSG_64 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_64),
- 0x80000000, 0x8000FFFF, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_64),
+ MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0);
}
return ret;
@@ -388,8 +394,9 @@ static int psp_v14_0_memory_training_send_msg(struct psp_context *psp, int msg)
max_wait = MEM_TRAIN_SEND_MSG_TIMEOUT_US / adev->usec_timeout;
for (i = 0; i < max_wait; i++) {
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35),
+ 0x80000000, 0x80000000, PSP_WAITREG_NOVERBOSE);
if (ret == 0)
break;
}
@@ -540,8 +547,9 @@ static int psp_v14_0_load_usbc_pd_fw(struct psp_context *psp, uint64_t fw_pri_mc
*/
WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_36, (fw_pri_mc_addr >> 20));
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(psp,
+ SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35),
+ 0x80000000, 0x80000000, 0);
if (ret)
return ret;
@@ -577,8 +585,9 @@ static int psp_v14_0_read_usbc_pd_fw(struct psp_context *psp, uint32_t *fw_ver)
WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_35, C2PMSG_CMD_GFX_USB_PD_FW_VER);
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(psp,
+ SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35),
+ 0x80000000, 0x80000000, 0);
if (!ret)
*fw_ver = RREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_36);
@@ -602,11 +611,13 @@ static int psp_v14_0_exec_spi_cmd(struct psp_context *psp, int cmd)
ret = psp_wait_for_spirom_update(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_115),
MBOX_READY_FLAG, MBOX_READY_MASK, PSP_SPIROM_UPDATE_TIMEOUT);
else
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_115),
- MBOX_READY_FLAG, MBOX_READY_MASK, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_115),
+ MBOX_READY_FLAG, MBOX_READY_MASK, 0);
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_115),
- MBOX_READY_FLAG, MBOX_READY_MASK, false);
+ ret = psp_wait_for(psp,
+ SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_115),
+ MBOX_READY_FLAG, MBOX_READY_MASK, 0);
if (ret) {
dev_err(adev->dev, "SPI cmd %x timed out, ret = %d", cmd, ret);
return ret;
@@ -629,8 +640,9 @@ static int psp_v14_0_update_spirom(struct psp_context *psp,
int ret;
/* Confirm PSP is ready to start */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_115),
- MBOX_READY_FLAG, MBOX_READY_MASK, false);
+ ret = psp_wait_for(psp,
+ SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_115),
+ MBOX_READY_FLAG, MBOX_READY_MASK, 0);
if (ret) {
dev_err(adev->dev, "PSP Not ready to start processing, ret = %d", ret);
return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
index f6b75e3e47ff..833830bc3e2e 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
@@ -91,7 +91,7 @@ static int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp)
/* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ 0x80000000, 0x80000000, 0);
if (ret)
return ret;
@@ -109,7 +109,7 @@ static int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp)
mdelay(20);
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ 0x80000000, 0x80000000, 0);
return ret;
}
@@ -130,7 +130,7 @@ static int psp_v3_1_bootloader_load_sos(struct psp_context *psp)
/* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, false);
+ 0x80000000, 0x80000000, 0);
if (ret)
return ret;
@@ -147,8 +147,8 @@ static int psp_v3_1_bootloader_load_sos(struct psp_context *psp)
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_81),
- RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81),
- 0, true);
+ RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81), 0,
+ PSP_WAITREG_CHANGED);
return ret;
}
@@ -168,7 +168,7 @@ static void psp_v3_1_reroute_ih(struct psp_context *psp)
mdelay(20);
psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x8000FFFF, false);
+ 0x80000000, 0x8000FFFF, 0);
/* Change IH ring for UMC */
tmp = REG_SET_FIELD(0, IH_CLIENT_CFG_DATA, CREDIT_RETURN_ADDR, 0x1216b);
@@ -180,7 +180,7 @@ static void psp_v3_1_reroute_ih(struct psp_context *psp)
mdelay(20);
psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x8000FFFF, false);
+ 0x80000000, 0x8000FFFF, 0);
}
static int psp_v3_1_ring_create(struct psp_context *psp,
@@ -217,9 +217,9 @@ static int psp_v3_1_ring_create(struct psp_context *psp,
mdelay(20);
/* Wait for response flag (bit 31) in C2PMSG_101 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0,
- mmMP0_SMN_C2PMSG_101), 0x80000000,
- 0x8000FFFF, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
+ 0x80000000, 0x8000FFFF, 0);
} else {
/* Write low address of the ring to C2PMSG_69 */
@@ -240,10 +240,9 @@ static int psp_v3_1_ring_create(struct psp_context *psp,
mdelay(20);
/* Wait for response flag (bit 31) in C2PMSG_64 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0,
- mmMP0_SMN_C2PMSG_64), 0x80000000,
- 0x8000FFFF, false);
-
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ 0x80000000, 0x8000FFFF, 0);
}
return ret;
}
@@ -267,11 +266,13 @@ static int psp_v3_1_ring_stop(struct psp_context *psp,
/* Wait for response flag (bit 31) */
if (amdgpu_sriov_vf(adev))
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
+ 0x80000000, 0x80000000, 0);
else
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ 0x80000000, 0x80000000, 0);
return ret;
}
@@ -311,7 +312,7 @@ static int psp_v3_1_mode1_reset(struct psp_context *psp)
offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64);
- ret = psp_wait_for(psp, offset, 0x80000000, 0x8000FFFF, false);
+ ret = psp_wait_for(psp, offset, 0x80000000, 0x8000FFFF, 0);
if (ret) {
DRM_INFO("psp is not working correctly before mode1 reset!\n");
@@ -325,7 +326,7 @@ static int psp_v3_1_mode1_reset(struct psp_context *psp)
offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_33);
- ret = psp_wait_for(psp, offset, 0x80000000, 0x80000000, false);
+ ret = psp_wait_for(psp, offset, 0x80000000, 0x80000000, 0);
if (ret) {
DRM_INFO("psp mode 1 reset failed!\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 33ed2b158fcd..f38004e6064e 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -2187,7 +2187,7 @@ static int sdma_v4_0_print_iv_entry(struct amdgpu_device *adev,
dev_dbg_ratelimited(adev->dev,
" for process %s pid %d thread %s pid %d\n",
task_info->process_name, task_info->tgid,
- task_info->task_name, task_info->pid);
+ task_info->task.comm, task_info->task.pid);
amdgpu_vm_put_task_info(task_info);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
index 9c169112a5e7..a1443990d5c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
@@ -45,6 +45,7 @@
#include "amdgpu_ras.h"
MODULE_FIRMWARE("amdgpu/sdma_4_4_2.bin");
+MODULE_FIRMWARE("amdgpu/sdma_4_4_4.bin");
MODULE_FIRMWARE("amdgpu/sdma_4_4_5.bin");
static const struct amdgpu_hwip_reg_entry sdma_reg_list_4_4_2[] = {
@@ -109,6 +110,8 @@ static void sdma_v4_4_2_set_ras_funcs(struct amdgpu_device *adev);
static void sdma_v4_4_2_update_reset_mask(struct amdgpu_device *adev);
static int sdma_v4_4_2_stop_queue(struct amdgpu_ring *ring);
static int sdma_v4_4_2_restore_queue(struct amdgpu_ring *ring);
+static int sdma_v4_4_2_soft_reset_engine(struct amdgpu_device *adev,
+ u32 instance_id);
static u32 sdma_v4_4_2_get_reg_offset(struct amdgpu_device *adev,
u32 instance, u32 offset)
@@ -490,7 +493,7 @@ static void sdma_v4_4_2_inst_gfx_stop(struct amdgpu_device *adev,
{
struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES];
u32 doorbell_offset, doorbell;
- u32 rb_cntl, ib_cntl;
+ u32 rb_cntl, ib_cntl, sdma_cntl;
int i;
for_each_inst(i, inst_mask) {
@@ -502,6 +505,9 @@ static void sdma_v4_4_2_inst_gfx_stop(struct amdgpu_device *adev,
ib_cntl = RREG32_SDMA(i, regSDMA_GFX_IB_CNTL);
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA_GFX_IB_CNTL, IB_ENABLE, 0);
WREG32_SDMA(i, regSDMA_GFX_IB_CNTL, ib_cntl);
+ sdma_cntl = RREG32_SDMA(i, regSDMA_CNTL);
+ sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA_CNTL, UTC_L1_ENABLE, 0);
+ WREG32_SDMA(i, regSDMA_CNTL, sdma_cntl);
if (sdma[i]->use_doorbell) {
doorbell = RREG32_SDMA(i, regSDMA_GFX_DOORBELL);
@@ -995,6 +1001,7 @@ static int sdma_v4_4_2_inst_start(struct amdgpu_device *adev,
/* set utc l1 enable flag always to 1 */
temp = RREG32_SDMA(i, regSDMA_CNTL);
temp = REG_SET_FIELD(temp, SDMA_CNTL, UTC_L1_ENABLE, 1);
+ WREG32_SDMA(i, regSDMA_CNTL, temp);
if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) < IP_VERSION(4, 4, 5)) {
/* enable context empty interrupt during initialization */
@@ -1337,6 +1344,7 @@ static bool sdma_v4_4_2_fw_support_paging_queue(struct amdgpu_device *adev)
static const struct amdgpu_sdma_funcs sdma_v4_4_2_sdma_funcs = {
.stop_kernel_queue = &sdma_v4_4_2_stop_queue,
.start_kernel_queue = &sdma_v4_4_2_restore_queue,
+ .soft_reset_kernel_queue = &sdma_v4_4_2_soft_reset_engine,
};
static int sdma_v4_4_2_early_init(struct amdgpu_ip_block *ip_block)
@@ -1648,45 +1656,24 @@ static bool sdma_v4_4_2_is_queue_selected(struct amdgpu_device *adev, uint32_t i
return (context_status & SDMA_GFX_CONTEXT_STATUS__SELECTED_MASK) != 0;
}
-static bool sdma_v4_4_2_ring_is_guilty(struct amdgpu_ring *ring)
-{
- struct amdgpu_device *adev = ring->adev;
- uint32_t instance_id = ring->me;
-
- return sdma_v4_4_2_is_queue_selected(adev, instance_id, false);
-}
-
-static bool sdma_v4_4_2_page_ring_is_guilty(struct amdgpu_ring *ring)
-{
- struct amdgpu_device *adev = ring->adev;
- uint32_t instance_id = ring->me;
-
- if (!adev->sdma.has_page_queue)
- return false;
-
- return sdma_v4_4_2_is_queue_selected(adev, instance_id, true);
-}
-
-static int sdma_v4_4_2_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
+static int sdma_v4_4_2_reset_queue(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
- u32 id = GET_INST(SDMA0, ring->me);
+ u32 id = ring->me;
int r;
- if (!(adev->sdma.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE))
- return -EOPNOTSUPP;
-
- amdgpu_amdkfd_suspend(adev, false);
- r = amdgpu_sdma_reset_engine(adev, id);
- amdgpu_amdkfd_resume(adev, false);
-
+ amdgpu_amdkfd_suspend(adev, true);
+ r = amdgpu_sdma_reset_engine(adev, id, false);
+ amdgpu_amdkfd_resume(adev, true);
return r;
}
static int sdma_v4_4_2_stop_queue(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- u32 instance_id = GET_INST(SDMA0, ring->me);
+ u32 instance_id = ring->me;
u32 inst_mask;
uint64_t rptr;
@@ -1725,7 +1712,7 @@ static int sdma_v4_4_2_restore_queue(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
u32 inst_mask;
- int i;
+ int i, r;
inst_mask = 1 << ring->me;
udelay(50);
@@ -1742,7 +1729,18 @@ static int sdma_v4_4_2_restore_queue(struct amdgpu_ring *ring)
return -ETIMEDOUT;
}
- return sdma_v4_4_2_inst_start(adev, inst_mask, true);
+ r = sdma_v4_4_2_inst_start(adev, inst_mask, true);
+
+ return r;
+}
+
+static int sdma_v4_4_2_soft_reset_engine(struct amdgpu_device *adev,
+ u32 instance_id)
+{
+ /* For SDMA 4.x, use the existing DPM interface for backward compatibility
+ * we need to convert the logical instance ID to physical instance ID before reset.
+ */
+ return amdgpu_dpm_reset_sdma(adev, 1 << GET_INST(SDMA0, instance_id));
}
static int sdma_v4_4_2_set_trap_irq_state(struct amdgpu_device *adev,
@@ -1884,7 +1882,7 @@ static int sdma_v4_4_2_print_iv_entry(struct amdgpu_device *adev,
if (task_info) {
dev_dbg_ratelimited(adev->dev, " for process %s pid %d thread %s pid %d\n",
task_info->process_name, task_info->tgid,
- task_info->task_name, task_info->pid);
+ task_info->task.comm, task_info->task.pid);
amdgpu_vm_put_task_info(task_info);
}
@@ -2139,7 +2137,6 @@ static const struct amdgpu_ring_funcs sdma_v4_4_2_ring_funcs = {
.emit_reg_wait = sdma_v4_4_2_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
.reset = sdma_v4_4_2_reset_queue,
- .is_guilty = sdma_v4_4_2_ring_is_guilty,
};
static const struct amdgpu_ring_funcs sdma_v4_4_2_page_ring_funcs = {
@@ -2172,7 +2169,6 @@ static const struct amdgpu_ring_funcs sdma_v4_4_2_page_ring_funcs = {
.emit_reg_wait = sdma_v4_4_2_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
.reset = sdma_v4_4_2_reset_queue,
- .is_guilty = sdma_v4_4_2_page_ring_is_guilty,
};
static void sdma_v4_4_2_set_ring_funcs(struct amdgpu_device *adev)
@@ -2365,11 +2361,15 @@ static void sdma_v4_4_2_update_reset_mask(struct amdgpu_device *adev)
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(9, 4, 3):
case IP_VERSION(9, 4, 4):
- if ((adev->gfx.mec_fw_version >= 0xb0) && amdgpu_dpm_reset_sdma_is_supported(adev))
+ if ((adev->gfx.mec_fw_version >= 0xb0) &&
+ amdgpu_dpm_reset_sdma_is_supported(adev) &&
+ !adev->debug_disable_gpu_ring_reset)
adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
break;
case IP_VERSION(9, 5, 0):
- if ((adev->gfx.mec_fw_version >= 0xf) && amdgpu_dpm_reset_sdma_is_supported(adev))
+ if ((adev->gfx.mec_fw_version >= 0xf) &&
+ amdgpu_dpm_reset_sdma_is_supported(adev) &&
+ !adev->debug_disable_gpu_ring_reset)
adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index 9505ae96fbec..8ddc4df06a1f 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -1399,6 +1399,7 @@ static int sdma_v5_0_sw_init(struct amdgpu_ip_block *ip_block)
return r;
for (i = 0; i < adev->sdma.num_instances; i++) {
+ mutex_init(&adev->sdma.instance[i].engine_reset_mutex);
adev->sdma.instance[i].funcs = &sdma_v5_0_sdma_funcs;
ring = &adev->sdma.instance[i].ring;
ring->ring_obj = NULL;
@@ -1427,7 +1428,9 @@ static int sdma_v5_0_sw_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(5, 0, 0):
case IP_VERSION(5, 0, 2):
case IP_VERSION(5, 0, 5):
- if (adev->sdma.instance[0].fw_version >= 35)
+ if ((adev->sdma.instance[0].fw_version >= 35) &&
+ !amdgpu_sriov_vf(adev) &&
+ !adev->debug_disable_gpu_ring_reset)
adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
break;
default:
@@ -1538,12 +1541,27 @@ static int sdma_v5_0_soft_reset(struct amdgpu_ip_block *ip_block)
return 0;
}
-static int sdma_v5_0_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
+static int sdma_v5_0_reset_queue(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
- u32 inst_id = ring->me;
+ int r;
+
+ if (ring->me >= adev->sdma.num_instances) {
+ dev_err(adev->dev, "sdma instance not found\n");
+ return -EINVAL;
+ }
- return amdgpu_sdma_reset_engine(adev, inst_id);
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+
+ amdgpu_amdkfd_suspend(adev, true);
+ r = amdgpu_sdma_reset_engine(adev, ring->me, true);
+ amdgpu_amdkfd_resume(adev, true);
+ if (r)
+ return r;
+
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static int sdma_v5_0_stop_queue(struct amdgpu_ring *ring)
@@ -1610,6 +1628,7 @@ static int sdma_v5_0_restore_queue(struct amdgpu_ring *ring)
r = sdma_v5_0_gfx_resume_instance(adev, inst_id, true);
amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
+
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index a6e612b4a892..51101b0aa2fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -342,7 +342,7 @@ static void sdma_v5_2_ring_emit_hdp_flush(struct amdgpu_ring *ring)
const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
if (ring->me > 1) {
- amdgpu_asic_flush_hdp(adev, ring);
+ amdgpu_hdp_flush(adev, ring);
} else {
ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
@@ -1318,6 +1318,7 @@ static int sdma_v5_2_sw_init(struct amdgpu_ip_block *ip_block)
}
for (i = 0; i < adev->sdma.num_instances; i++) {
+ mutex_init(&adev->sdma.instance[i].engine_reset_mutex);
adev->sdma.instance[i].funcs = &sdma_v5_2_sdma_funcs;
ring = &adev->sdma.instance[i].ring;
ring->ring_obj = NULL;
@@ -1346,11 +1347,15 @@ static int sdma_v5_2_sw_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(5, 2, 2):
case IP_VERSION(5, 2, 3):
case IP_VERSION(5, 2, 4):
- if (adev->sdma.instance[0].fw_version >= 76)
+ if ((adev->sdma.instance[0].fw_version >= 76) &&
+ !amdgpu_sriov_vf(adev) &&
+ !adev->debug_disable_gpu_ring_reset)
adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
break;
case IP_VERSION(5, 2, 5):
- if (adev->sdma.instance[0].fw_version >= 34)
+ if ((adev->sdma.instance[0].fw_version >= 34) &&
+ !amdgpu_sriov_vf(adev) &&
+ !adev->debug_disable_gpu_ring_reset)
adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
break;
default:
@@ -1451,12 +1456,27 @@ static int sdma_v5_2_wait_for_idle(struct amdgpu_ip_block *ip_block)
return -ETIMEDOUT;
}
-static int sdma_v5_2_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
+static int sdma_v5_2_reset_queue(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
- u32 inst_id = ring->me;
+ int r;
+
+ if (ring->me >= adev->sdma.num_instances) {
+ dev_err(adev->dev, "sdma instance not found\n");
+ return -EINVAL;
+ }
- return amdgpu_sdma_reset_engine(adev, inst_id);
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+
+ amdgpu_amdkfd_suspend(adev, true);
+ r = amdgpu_sdma_reset_engine(adev, ring->me, true);
+ amdgpu_amdkfd_resume(adev, true);
+ if (r)
+ return r;
+
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static int sdma_v5_2_stop_queue(struct amdgpu_ring *ring)
@@ -1526,6 +1546,7 @@ static int sdma_v5_2_restore_queue(struct amdgpu_ring *ring)
r = sdma_v5_2_gfx_resume_instance(adev, inst_id, true);
amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
+
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
index 5a70ae17be04..217040044987 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
@@ -1355,7 +1355,9 @@ static int sdma_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(6, 0, 0):
case IP_VERSION(6, 0, 2):
case IP_VERSION(6, 0, 3):
- if (adev->sdma.instance[0].fw_version >= 21)
+ if ((adev->sdma.instance[0].fw_version >= 21) &&
+ !amdgpu_sriov_vf(adev) &&
+ !adev->debug_disable_gpu_ring_reset)
adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
break;
default:
@@ -1374,9 +1376,42 @@ static int sdma_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
else
DRM_ERROR("Failed to allocated memory for SDMA IP Dump\n");
- /* add firmware version checks here */
- if (0 && !adev->sdma.disable_uq)
- adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
+ switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
+ case IP_VERSION(6, 0, 0):
+ if ((adev->sdma.instance[0].fw_version >= 27) && !adev->sdma.disable_uq)
+ adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
+ break;
+ case IP_VERSION(6, 0, 1):
+ if ((adev->sdma.instance[0].fw_version >= 18) && !adev->sdma.disable_uq)
+ adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
+ break;
+ case IP_VERSION(6, 0, 2):
+ if ((adev->sdma.instance[0].fw_version >= 23) && !adev->sdma.disable_uq)
+ adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
+ break;
+ case IP_VERSION(6, 0, 3):
+ if (adev->sdma.instance[0].fw_version >= 29 && !adev->sdma.disable_uq)
+ adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
+ break;
+ case IP_VERSION(6, 1, 0):
+ if ((adev->sdma.instance[0].fw_version >= 14) && !adev->sdma.disable_uq)
+ adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
+ break;
+ case IP_VERSION(6, 1, 1):
+ if ((adev->sdma.instance[0].fw_version >= 17) && !adev->sdma.disable_uq)
+ adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
+ break;
+ case IP_VERSION(6, 1, 2):
+ if ((adev->sdma.instance[0].fw_version >= 15) && !adev->sdma.disable_uq)
+ adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
+ break;
+ case IP_VERSION(6, 1, 3):
+ if ((adev->sdma.instance[0].fw_version >= 10) && !adev->sdma.disable_uq)
+ adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
+ break;
+ default:
+ break;
+ }
r = amdgpu_sdma_sysfs_reset_mask_init(adev);
if (r)
@@ -1537,29 +1572,29 @@ static int sdma_v6_0_ring_preempt_ib(struct amdgpu_ring *ring)
return r;
}
-static int sdma_v6_0_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
+static int sdma_v6_0_reset_queue(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
- int i, r;
+ int r;
- if (amdgpu_sriov_vf(adev))
+ if (ring->me >= adev->sdma.num_instances) {
+ dev_err(adev->dev, "sdma instance not found\n");
return -EINVAL;
-
- for (i = 0; i < adev->sdma.num_instances; i++) {
- if (ring == &adev->sdma.instance[i].ring)
- break;
}
- if (i == adev->sdma.num_instances) {
- DRM_ERROR("sdma instance not found\n");
- return -EINVAL;
- }
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
r = amdgpu_mes_reset_legacy_queue(adev, ring, vmid, true);
if (r)
return r;
- return sdma_v6_0_gfx_resume_instance(adev, i, true);
+ r = sdma_v6_0_gfx_resume_instance(adev, ring->me, true);
+ if (r)
+ return r;
+
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static int sdma_v6_0_set_trap_irq_state(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
index ad47d0bdf777..2b81344dcd66 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
@@ -802,29 +802,29 @@ static bool sdma_v7_0_check_soft_reset(struct amdgpu_ip_block *ip_block)
return false;
}
-static int sdma_v7_0_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
+static int sdma_v7_0_reset_queue(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
- int i, r;
+ int r;
- if (amdgpu_sriov_vf(adev))
+ if (ring->me >= adev->sdma.num_instances) {
+ dev_err(adev->dev, "sdma instance not found\n");
return -EINVAL;
-
- for (i = 0; i < adev->sdma.num_instances; i++) {
- if (ring == &adev->sdma.instance[i].ring)
- break;
}
- if (i == adev->sdma.num_instances) {
- DRM_ERROR("sdma instance not found\n");
- return -EINVAL;
- }
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
r = amdgpu_mes_reset_legacy_queue(adev, ring, vmid, true);
if (r)
return r;
- return sdma_v7_0_gfx_resume_instance(adev, i, true);
+ r = sdma_v7_0_gfx_resume_instance(adev, ring->me, true);
+ if (r)
+ return r;
+
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
/**
@@ -1337,7 +1337,9 @@ static int sdma_v7_0_sw_init(struct amdgpu_ip_block *ip_block)
adev->sdma.supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->sdma.instance[0].ring);
- adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ if (!amdgpu_sriov_vf(adev) &&
+ !adev->debug_disable_gpu_ring_reset)
+ adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
r = amdgpu_sdma_sysfs_reset_mask_init(adev);
if (r)
@@ -1349,9 +1351,15 @@ static int sdma_v7_0_sw_init(struct amdgpu_ip_block *ip_block)
else
DRM_ERROR("Failed to allocated memory for SDMA IP Dump\n");
- /* add firmware version checks here */
- if (0 && !adev->sdma.disable_uq)
- adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
+ switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
+ case IP_VERSION(7, 0, 0):
+ case IP_VERSION(7, 0, 1):
+ if ((adev->sdma.instance[0].fw_version >= 7966358) && !adev->sdma.disable_uq)
+ adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
+ break;
+ default:
+ break;
+ }
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index e0f139de7991..f7288372ee61 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -45,6 +45,7 @@
#include "dce_v6_0.h"
#include "si.h"
#include "uvd_v3_1.h"
+#include "vce_v1_0.h"
#include "uvd/uvd_4_0_d.h"
@@ -921,8 +922,6 @@ static const u32 hainan_mgcg_cgcg_init[] =
0x3630, 0xfffffff0, 0x00000100,
};
-/* XXX: update when we support VCE */
-#if 0
/* tahiti, pitcairn, verde */
static const struct amdgpu_video_codec_info tahiti_video_codecs_encode_array[] =
{
@@ -940,13 +939,7 @@ static const struct amdgpu_video_codecs tahiti_video_codecs_encode =
.codec_count = ARRAY_SIZE(tahiti_video_codecs_encode_array),
.codec_array = tahiti_video_codecs_encode_array,
};
-#else
-static const struct amdgpu_video_codecs tahiti_video_codecs_encode =
-{
- .codec_count = 0,
- .codec_array = NULL,
-};
-#endif
+
/* oland and hainan don't support encode */
static const struct amdgpu_video_codecs hainan_video_codecs_encode =
{
@@ -1925,6 +1918,14 @@ static int si_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
~VCEPLL_BYPASS_EN_MASK);
if (!evclk || !ecclk) {
+ /*
+ * On some chips, the PLL takes way too long to get out of
+ * sleep mode, causing a timeout waiting on CTLACK/CTLACK2.
+ * Leave the PLL running in bypass mode.
+ */
+ if (adev->pdev->device == 0x6780)
+ return 0;
+
/* Keep the Bypass mode, put PLL to sleep */
WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_SLEEP_MASK,
~VCEPLL_SLEEP_MASK);
@@ -2717,7 +2718,7 @@ int si_set_ip_blocks(struct amdgpu_device *adev)
else
amdgpu_device_ip_block_add(adev, &dce_v6_0_ip_block);
amdgpu_device_ip_block_add(adev, &uvd_v3_1_ip_block);
- /* amdgpu_device_ip_block_add(adev, &vce_v1_0_ip_block); */
+ amdgpu_device_ip_block_add(adev, &vce_v1_0_ip_block);
break;
case CHIP_OLAND:
amdgpu_device_ip_block_add(adev, &si_common_ip_block);
@@ -2735,7 +2736,6 @@ int si_set_ip_blocks(struct amdgpu_device *adev)
else
amdgpu_device_ip_block_add(adev, &dce_v6_4_ip_block);
amdgpu_device_ip_block_add(adev, &uvd_v3_1_ip_block);
- /* amdgpu_device_ip_block_add(adev, &vce_v1_0_ip_block); */
break;
case CHIP_HAINAN:
amdgpu_device_ip_block_add(adev, &si_common_ip_block);
diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c
index 1df00f8a2406..66f650f87243 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c
@@ -96,6 +96,9 @@ static int si_ih_irq_init(struct amdgpu_device *adev)
pci_set_master(adev->pdev);
si_ih_enable_interrupts(adev);
+ if (adev->irq.ih_soft.ring_size)
+ adev->irq.ih_soft.enabled = true;
+
return 0;
}
@@ -112,6 +115,9 @@ static u32 si_ih_get_wptr(struct amdgpu_device *adev,
wptr = le32_to_cpu(*ih->wptr_cpu);
+ if (ih == &adev->irq.ih_soft)
+ goto out;
+
if (wptr & IH_RB_WPTR__RB_OVERFLOW_MASK) {
wptr &= ~IH_RB_WPTR__RB_OVERFLOW_MASK;
dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
@@ -127,6 +133,8 @@ static u32 si_ih_get_wptr(struct amdgpu_device *adev,
tmp &= ~IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
WREG32(IH_RB_CNTL, tmp);
}
+
+out:
return (wptr & ih->ptr_mask);
}
@@ -175,6 +183,10 @@ static int si_ih_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, IH_SW_RING_SIZE, true);
+ if (r)
+ return r;
+
return amdgpu_irq_init(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sid.h b/drivers/gpu/drm/amd/amdgpu/sid.h
index cbd4f8951cfa..561462a8332e 100644
--- a/drivers/gpu/drm/amd/amdgpu/sid.h
+++ b/drivers/gpu/drm/amd/amdgpu/sid.h
@@ -582,45 +582,6 @@
#define DMA_PACKET_NOP 0xf
/* VCE */
-#define VCE_STATUS 0x20004
-#define VCE_VCPU_CNTL 0x20014
-#define VCE_CLK_EN (1 << 0)
-#define VCE_VCPU_CACHE_OFFSET0 0x20024
-#define VCE_VCPU_CACHE_SIZE0 0x20028
-#define VCE_VCPU_CACHE_OFFSET1 0x2002c
-#define VCE_VCPU_CACHE_SIZE1 0x20030
-#define VCE_VCPU_CACHE_OFFSET2 0x20034
-#define VCE_VCPU_CACHE_SIZE2 0x20038
-#define VCE_SOFT_RESET 0x20120
-#define VCE_ECPU_SOFT_RESET (1 << 0)
-#define VCE_FME_SOFT_RESET (1 << 2)
-#define VCE_RB_BASE_LO2 0x2016c
-#define VCE_RB_BASE_HI2 0x20170
-#define VCE_RB_SIZE2 0x20174
-#define VCE_RB_RPTR2 0x20178
-#define VCE_RB_WPTR2 0x2017c
-#define VCE_RB_BASE_LO 0x20180
-#define VCE_RB_BASE_HI 0x20184
-#define VCE_RB_SIZE 0x20188
-#define VCE_RB_RPTR 0x2018c
-#define VCE_RB_WPTR 0x20190
-#define VCE_CLOCK_GATING_A 0x202f8
-#define VCE_CLOCK_GATING_B 0x202fc
-#define VCE_UENC_CLOCK_GATING 0x205bc
-#define VCE_UENC_REG_CLOCK_GATING 0x205c0
-#define VCE_FW_REG_STATUS 0x20e10
-# define VCE_FW_REG_STATUS_BUSY (1 << 0)
-# define VCE_FW_REG_STATUS_PASS (1 << 3)
-# define VCE_FW_REG_STATUS_DONE (1 << 11)
-#define VCE_LMI_FW_START_KEYSEL 0x20e18
-#define VCE_LMI_FW_PERIODIC_CTRL 0x20e20
-#define VCE_LMI_CTRL2 0x20e74
-#define VCE_LMI_CTRL 0x20e98
-#define VCE_LMI_VM_CTRL 0x20ea0
-#define VCE_LMI_SWAP_CNTL 0x20eb4
-#define VCE_LMI_SWAP_CNTL1 0x20eb8
-#define VCE_LMI_CACHE_CTRL 0x20ef4
-
#define VCE_CMD_NO_OP 0x00000000
#define VCE_CMD_END 0x00000001
#define VCE_CMD_IB 0x00000002
@@ -629,7 +590,6 @@
#define VCE_CMD_IB_AUTO 0x00000005
#define VCE_CMD_SEMAPHORE 0x00000006
-
//#dce stupp
/* display controller offsets used for crtc/cur/lut/grph/viewport/etc. */
#define CRTC0_REGISTER_OFFSET (0x1b7c - 0x1b7c) //(0x6df0 - 0x6df0)/4
diff --git a/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c b/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c
index dd2d66090d23..68aef47254a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c
+++ b/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c
@@ -743,7 +743,7 @@ int smu_v11_0_i2c_control_init(struct amdgpu_device *adev)
adev->pm.ras_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
- res = i2c_add_adapter(control);
+ res = devm_i2c_add_adapter(adev->dev, control);
if (res)
DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
@@ -752,9 +752,6 @@ int smu_v11_0_i2c_control_init(struct amdgpu_device *adev)
void smu_v11_0_i2c_control_fini(struct amdgpu_device *adev)
{
- struct i2c_adapter *control = adev->pm.ras_eeprom_i2c_bus;
-
- i2c_del_adapter(control);
adev->pm.ras_eeprom_i2c_bus = NULL;
adev->pm.fru_eeprom_i2c_bus = NULL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index c457be3a3c56..42f5d9c0e3af 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -741,7 +741,6 @@ static void soc15_reg_base_init(struct amdgpu_device *adev)
void soc15_set_virt_ops(struct amdgpu_device *adev)
{
adev->virt.ops = &xgpu_ai_virt_ops;
-
/* init soc15 reg base early enough so we can
* request request full access for sriov before
* set_ip_blocks. */
@@ -854,10 +853,6 @@ static bool soc15_need_reset_on_init(struct amdgpu_device *adev)
{
u32 sol_reg;
- /* CP hangs in IGT reloading test on RN, reset to WA */
- if (adev->asic_type == CHIP_RENOIR)
- return true;
-
if (amdgpu_gmc_need_reset_on_init(adev))
return true;
if (amdgpu_psp_tos_reload_needed(adev))
@@ -1218,6 +1213,8 @@ static int soc15_common_early_init(struct amdgpu_ip_block *ip_block)
AMD_PG_SUPPORT_JPEG;
/*TODO: need a new external_rev_id for GC 9.4.4? */
adev->external_rev_id = adev->rev_id + 0x46;
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0))
+ adev->external_rev_id = adev->rev_id + 0x50;
break;
default:
/* FIXME: not supported yet */
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h
index ef7c603b50ae..c8ac11a9cdef 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.h
@@ -118,7 +118,6 @@ int vega10_reg_base_init(struct amdgpu_device *adev);
int vega20_reg_base_init(struct amdgpu_device *adev);
int arct_reg_base_init(struct amdgpu_device *adev);
int aldebaran_reg_base_init(struct amdgpu_device *adev);
-void aqua_vanjaram_ip_map_init(struct amdgpu_device *adev);
u64 aqua_vanjaram_encode_ext_smn_addressing(int ext_id);
int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev);
ssize_t aqua_vanjaram_get_reg_state(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index 7d17ae56f901..ee8038df17e3 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -159,6 +159,9 @@ static int tonga_ih_irq_init(struct amdgpu_device *adev)
/* enable interrupts */
tonga_ih_enable_interrupts(adev);
+ if (adev->irq.ih_soft.ring_size)
+ adev->irq.ih_soft.enabled = true;
+
return 0;
}
@@ -196,6 +199,9 @@ static u32 tonga_ih_get_wptr(struct amdgpu_device *adev,
wptr = le32_to_cpu(*ih->wptr_cpu);
+ if (ih == &adev->irq.ih_soft)
+ goto out;
+
if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
goto out;
@@ -306,6 +312,10 @@ static int tonga_ih_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, IH_SW_RING_SIZE, true);
+ if (r)
+ return r;
+
adev->irq.ih.use_doorbell = true;
adev->irq.ih.doorbell_index = adev->doorbell_index.ih;
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
index e590cbdd8de9..0f5b1719fda5 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
@@ -536,8 +536,11 @@ static int umc_v12_0_update_ecc_status(struct amdgpu_device *adev,
hwid = REG_GET_FIELD(ipid, MCMP1_IPIDT0, HardwareID);
mcatype = REG_GET_FIELD(ipid, MCMP1_IPIDT0, McaType);
- if ((hwid != MCA_UMC_HWID_V12_0) || (mcatype != MCA_UMC_MCATYPE_V12_0))
+ /* The IP block decode of consumption is SMU */
+ if (hwid != MCA_UMC_HWID_V12_0 || mcatype != MCA_UMC_MCATYPE_V12_0) {
+ con->umc_ecc_log.consumption_q_count++;
return 0;
+ }
if (!status)
return 0;
@@ -708,6 +711,19 @@ static uint32_t umc_v12_0_get_die_id(struct amdgpu_device *adev,
return die;
}
+static void umc_v12_0_mca_ipid_parse(struct amdgpu_device *adev, uint64_t ipid,
+ uint32_t *did, uint32_t *ch, uint32_t *umc_inst, uint32_t *sid)
+{
+ if (did)
+ *did = MCA_IPID_2_DIE_ID(ipid);
+ if (ch)
+ *ch = MCA_IPID_2_UMC_CH(ipid);
+ if (umc_inst)
+ *umc_inst = MCA_IPID_2_UMC_INST(ipid);
+ if (sid)
+ *sid = MCA_IPID_2_SOCKET_ID(ipid);
+}
+
struct amdgpu_umc_ras umc_v12_0_ras = {
.ras_block = {
.hw_ops = &umc_v12_0_ras_hw_ops,
@@ -721,5 +737,6 @@ struct amdgpu_umc_ras umc_v12_0_ras = {
.convert_ras_err_addr = umc_v12_0_convert_error_address,
.get_die_id_from_pa = umc_v12_0_get_die_id,
.get_retire_flip_bits = umc_v12_0_get_retire_flip_bits,
+ .mca_ipid_parse = umc_v12_0_mca_ipid_parse,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
index 5dbaebb592b3..2e79a3afc774 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
@@ -623,7 +623,22 @@ static void uvd_v3_1_enable_mgcg(struct amdgpu_device *adev,
*
* @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
- * Initialize the hardware, boot up the VCPU and do some testing
+ * Initialize the hardware, boot up the VCPU and do some testing.
+ *
+ * On SI, the UVD is meant to be used in a specific power state,
+ * or alternatively the driver can manually enable its clock.
+ * In amdgpu we use the dedicated UVD power state when DPM is enabled.
+ * Calling amdgpu_dpm_enable_uvd makes DPM select the UVD power state
+ * for the SMU and afterwards enables the UVD clock.
+ * This is automatically done by amdgpu_uvd_ring_begin_use when work
+ * is submitted to the UVD ring. Here, we have to call it manually
+ * in order to power up UVD before firmware validation.
+ *
+ * Note that we must not disable the UVD clock here, as that would
+ * cause the ring test to fail. However, UVD is powered off
+ * automatically after the ring test: amdgpu_uvd_ring_end_use calls
+ * the UVD idle work handler which will disable the UVD clock when
+ * all fences are signalled.
*/
static int uvd_v3_1_hw_init(struct amdgpu_ip_block *ip_block)
{
@@ -633,6 +648,15 @@ static int uvd_v3_1_hw_init(struct amdgpu_ip_block *ip_block)
int r;
uvd_v3_1_mc_resume(adev);
+ uvd_v3_1_enable_mgcg(adev, true);
+
+ /* Make sure UVD is powered during FW validation.
+ * It's going to be automatically powered off after the ring test.
+ */
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_uvd(adev, true);
+ else
+ amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
r = uvd_v3_1_fw_validate(adev);
if (r) {
@@ -640,9 +664,6 @@ static int uvd_v3_1_hw_init(struct amdgpu_ip_block *ip_block)
return r;
}
- uvd_v3_1_enable_mgcg(adev, true);
- amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
-
uvd_v3_1_start(adev);
r = amdgpu_ring_test_helper(ring);
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 1c07b701d0e4..ceb94bbb03a4 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -217,7 +217,8 @@ static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
- AMDGPU_IB_POOL_DIRECT, &job);
+ AMDGPU_IB_POOL_DIRECT, &job,
+ AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
if (r)
return r;
@@ -281,7 +282,8 @@ static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
- AMDGPU_IB_POOL_DIRECT, &job);
+ AMDGPU_IB_POOL_DIRECT, &job,
+ AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index 9d237b5937fb..1f8866f3f63c 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -225,7 +225,8 @@ static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, u32 handle,
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
- AMDGPU_IB_POOL_DIRECT, &job);
+ AMDGPU_IB_POOL_DIRECT, &job,
+ AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
if (r)
return r;
@@ -288,7 +289,8 @@ static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, u32 handle,
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
- AMDGPU_IB_POOL_DIRECT, &job);
+ AMDGPU_IB_POOL_DIRECT, &job,
+ AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v1_0.c
new file mode 100644
index 000000000000..9ae424618556
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v1_0.c
@@ -0,0 +1,839 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ * Copyright 2025 Valve Corporation
+ * Copyright 2025 Alexandre Demers
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * Authors: Christian König <christian.koenig@amd.com>
+ * Timur Kristóf <timur.kristof@gmail.com>
+ * Alexandre Demers <alexandre.f.demers@gmail.com>
+ */
+
+#include <linux/firmware.h>
+
+#include "amdgpu.h"
+#include "amdgpu_vce.h"
+#include "amdgpu_gart.h"
+#include "sid.h"
+#include "vce_v1_0.h"
+#include "vce/vce_1_0_d.h"
+#include "vce/vce_1_0_sh_mask.h"
+#include "oss/oss_1_0_d.h"
+#include "oss/oss_1_0_sh_mask.h"
+
+#define VCE_V1_0_FW_SIZE (256 * 1024)
+#define VCE_V1_0_STACK_SIZE (64 * 1024)
+#define VCE_V1_0_DATA_SIZE (7808 * (AMDGPU_MAX_VCE_HANDLES + 1))
+#define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02
+
+#define VCE_V1_0_GART_PAGE_START \
+ (AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS)
+#define VCE_V1_0_GART_ADDR_START \
+ (VCE_V1_0_GART_PAGE_START * AMDGPU_GPU_PAGE_SIZE)
+
+static void vce_v1_0_set_ring_funcs(struct amdgpu_device *adev);
+static void vce_v1_0_set_irq_funcs(struct amdgpu_device *adev);
+
+struct vce_v1_0_fw_signature {
+ int32_t offset;
+ uint32_t length;
+ int32_t number;
+ struct {
+ uint32_t chip_id;
+ uint32_t keyselect;
+ uint32_t nonce[4];
+ uint32_t sigval[4];
+ } val[8];
+};
+
+/**
+ * vce_v1_0_ring_get_rptr - get read pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware read pointer
+ */
+static uint64_t vce_v1_0_ring_get_rptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring->me == 0)
+ return RREG32(mmVCE_RB_RPTR);
+ else
+ return RREG32(mmVCE_RB_RPTR2);
+}
+
+/**
+ * vce_v1_0_ring_get_wptr - get write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware write pointer
+ */
+static uint64_t vce_v1_0_ring_get_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring->me == 0)
+ return RREG32(mmVCE_RB_WPTR);
+ else
+ return RREG32(mmVCE_RB_WPTR2);
+}
+
+/**
+ * vce_v1_0_ring_set_wptr - set write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Commits the write pointer to the hardware
+ */
+static void vce_v1_0_ring_set_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring->me == 0)
+ WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
+ else
+ WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
+}
+
+static int vce_v1_0_lmi_clean(struct amdgpu_device *adev)
+{
+ int i, j;
+
+ for (i = 0; i < 10; ++i) {
+ for (j = 0; j < 100; ++j) {
+ if (RREG32(mmVCE_LMI_STATUS) & 0x337f)
+ return 0;
+
+ mdelay(10);
+ }
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int vce_v1_0_firmware_loaded(struct amdgpu_device *adev)
+{
+ int i, j;
+
+ for (i = 0; i < 10; ++i) {
+ for (j = 0; j < 100; ++j) {
+ if (RREG32(mmVCE_STATUS) & VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK)
+ return 0;
+ mdelay(10);
+ }
+
+ dev_err(adev->dev, "VCE not responding, trying to reset the ECPU\n");
+
+ WREG32_P(mmVCE_SOFT_RESET,
+ VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
+ ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+ mdelay(10);
+ WREG32_P(mmVCE_SOFT_RESET, 0,
+ ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+ mdelay(10);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static void vce_v1_0_init_cg(struct amdgpu_device *adev)
+{
+ u32 tmp;
+
+ tmp = RREG32(mmVCE_CLOCK_GATING_A);
+ tmp |= VCE_CLOCK_GATING_A__CGC_DYN_CLOCK_MODE_MASK;
+ WREG32(mmVCE_CLOCK_GATING_A, tmp);
+
+ tmp = RREG32(mmVCE_CLOCK_GATING_B);
+ tmp |= 0x1e;
+ tmp &= ~0xe100e1;
+ WREG32(mmVCE_CLOCK_GATING_B, tmp);
+
+ tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
+ tmp &= ~0xff9ff000;
+ WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
+
+ tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
+ tmp &= ~0x3ff;
+ WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
+}
+
+/**
+ * vce_v1_0_load_fw_signature - load firmware signature into VCPU BO
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * The VCE1 firmware validation mechanism needs a firmware signature.
+ * This function finds the signature appropriate for the current
+ * ASIC and writes that into the VCPU BO.
+ */
+static int vce_v1_0_load_fw_signature(struct amdgpu_device *adev)
+{
+ const struct common_firmware_header *hdr;
+ struct vce_v1_0_fw_signature *sign;
+ unsigned int ucode_offset;
+ uint32_t chip_id;
+ u32 *cpu_addr;
+ int i;
+
+ hdr = (const struct common_firmware_header *)adev->vce.fw->data;
+ ucode_offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
+ cpu_addr = adev->vce.cpu_addr;
+
+ sign = (void *)adev->vce.fw->data + ucode_offset;
+
+ switch (adev->asic_type) {
+ case CHIP_TAHITI:
+ chip_id = 0x01000014;
+ break;
+ case CHIP_VERDE:
+ chip_id = 0x01000015;
+ break;
+ case CHIP_PITCAIRN:
+ chip_id = 0x01000016;
+ break;
+ default:
+ dev_err(adev->dev, "asic_type %#010x was not found!", adev->asic_type);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < le32_to_cpu(sign->number); ++i) {
+ if (le32_to_cpu(sign->val[i].chip_id) == chip_id)
+ break;
+ }
+
+ if (i == le32_to_cpu(sign->number)) {
+ dev_err(adev->dev, "chip_id 0x%x for %s was not found in VCE firmware",
+ chip_id, amdgpu_asic_name[adev->asic_type]);
+ return -EINVAL;
+ }
+
+ cpu_addr += (256 - 64) / 4;
+ memcpy_toio(&cpu_addr[0], &sign->val[i].nonce[0], 16);
+ cpu_addr[4] = cpu_to_le32(le32_to_cpu(sign->length) + 64);
+
+ memset_io(&cpu_addr[5], 0, 44);
+ memcpy_toio(&cpu_addr[16], &sign[1], hdr->ucode_size_bytes - sizeof(*sign));
+
+ cpu_addr += (le32_to_cpu(sign->length) + 64) / 4;
+ memcpy_toio(&cpu_addr[0], &sign->val[i].sigval[0], 16);
+
+ adev->vce.keyselect = le32_to_cpu(sign->val[i].keyselect);
+
+ return 0;
+}
+
+static int vce_v1_0_wait_for_fw_validation(struct amdgpu_device *adev)
+{
+ int i;
+
+ dev_dbg(adev->dev, "VCE keyselect: %d", adev->vce.keyselect);
+ WREG32(mmVCE_LMI_FW_START_KEYSEL, adev->vce.keyselect);
+
+ for (i = 0; i < 10; ++i) {
+ mdelay(10);
+ if (RREG32(mmVCE_FW_REG_STATUS) & VCE_FW_REG_STATUS__DONE_MASK)
+ break;
+ }
+
+ if (!(RREG32(mmVCE_FW_REG_STATUS) & VCE_FW_REG_STATUS__DONE_MASK)) {
+ dev_err(adev->dev, "VCE FW validation timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ if (!(RREG32(mmVCE_FW_REG_STATUS) & VCE_FW_REG_STATUS__PASS_MASK)) {
+ dev_err(adev->dev, "VCE FW validation failed\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < 10; ++i) {
+ mdelay(10);
+ if (!(RREG32(mmVCE_FW_REG_STATUS) & VCE_FW_REG_STATUS__BUSY_MASK))
+ break;
+ }
+
+ if (RREG32(mmVCE_FW_REG_STATUS) & VCE_FW_REG_STATUS__BUSY_MASK) {
+ dev_err(adev->dev, "VCE FW busy timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int vce_v1_0_mc_resume(struct amdgpu_device *adev)
+{
+ uint32_t offset;
+ uint32_t size;
+
+ /*
+ * When the keyselect is already set, don't perturb VCE FW.
+ * Validation seems to always fail the second time.
+ */
+ if (RREG32(mmVCE_LMI_FW_START_KEYSEL)) {
+ dev_dbg(adev->dev, "keyselect already set: 0x%x (on CPU: 0x%x)\n",
+ RREG32(mmVCE_LMI_FW_START_KEYSEL), adev->vce.keyselect);
+
+ WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100);
+ return 0;
+ }
+
+ WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16));
+ WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000);
+ WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F);
+ WREG32(mmVCE_CLOCK_GATING_B, 0);
+
+ WREG32_P(mmVCE_LMI_FW_PERIODIC_CTRL, 0x4, ~0x4);
+
+ WREG32(mmVCE_LMI_CTRL, 0x00398000);
+
+ WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1);
+ WREG32(mmVCE_LMI_SWAP_CNTL, 0);
+ WREG32(mmVCE_LMI_SWAP_CNTL1, 0);
+ WREG32(mmVCE_LMI_VM_CTRL, 0);
+
+ WREG32(mmVCE_VCPU_SCRATCH7, AMDGPU_MAX_VCE_HANDLES);
+
+ offset = adev->vce.gpu_addr + AMDGPU_VCE_FIRMWARE_OFFSET;
+ size = VCE_V1_0_FW_SIZE;
+ WREG32(mmVCE_VCPU_CACHE_OFFSET0, offset & 0x7fffffff);
+ WREG32(mmVCE_VCPU_CACHE_SIZE0, size);
+
+ offset += size;
+ size = VCE_V1_0_STACK_SIZE;
+ WREG32(mmVCE_VCPU_CACHE_OFFSET1, offset & 0x7fffffff);
+ WREG32(mmVCE_VCPU_CACHE_SIZE1, size);
+
+ offset += size;
+ size = VCE_V1_0_DATA_SIZE;
+ WREG32(mmVCE_VCPU_CACHE_OFFSET2, offset & 0x7fffffff);
+ WREG32(mmVCE_VCPU_CACHE_SIZE2, size);
+
+ WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100);
+
+ return vce_v1_0_wait_for_fw_validation(adev);
+}
+
+/**
+ * vce_v1_0_is_idle() - Check idle status of VCE1 IP block
+ *
+ * @ip_block: amdgpu_ip_block pointer
+ *
+ * Check whether VCE is busy according to VCE_STATUS.
+ * Also check whether the SRBM thinks VCE is busy, although
+ * SRBM_STATUS.VCE_BUSY seems to be bogus because it
+ * appears to mirror the VCE_STATUS.VCPU_REPORT_FW_LOADED bit.
+ */
+static bool vce_v1_0_is_idle(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ bool busy =
+ (RREG32(mmVCE_STATUS) & (VCE_STATUS__JOB_BUSY_MASK | VCE_STATUS__UENC_BUSY_MASK)) ||
+ (RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK);
+
+ return !busy;
+}
+
+static int vce_v1_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ unsigned int i;
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ udelay(1);
+ if (vce_v1_0_is_idle(ip_block))
+ return 0;
+ }
+ return -ETIMEDOUT;
+}
+
+/**
+ * vce_v1_0_start - start VCE block
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Setup and start the VCE block
+ */
+static int vce_v1_0_start(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring;
+ int r;
+
+ WREG32_P(mmVCE_STATUS, 1, ~1);
+
+ r = vce_v1_0_mc_resume(adev);
+ if (r)
+ return r;
+
+ ring = &adev->vce.ring[0];
+ WREG32(mmVCE_RB_RPTR, lower_32_bits(ring->wptr));
+ WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
+ WREG32(mmVCE_RB_BASE_LO, lower_32_bits(ring->gpu_addr));
+ WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+ WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
+
+ ring = &adev->vce.ring[1];
+ WREG32(mmVCE_RB_RPTR2, lower_32_bits(ring->wptr));
+ WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
+ WREG32(mmVCE_RB_BASE_LO2, lower_32_bits(ring->gpu_addr));
+ WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
+ WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
+
+ WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK,
+ ~VCE_VCPU_CNTL__CLK_EN_MASK);
+
+ WREG32_P(mmVCE_SOFT_RESET,
+ VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK |
+ VCE_SOFT_RESET__FME_SOFT_RESET_MASK,
+ ~(VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK |
+ VCE_SOFT_RESET__FME_SOFT_RESET_MASK));
+
+ mdelay(100);
+
+ WREG32_P(mmVCE_SOFT_RESET, 0,
+ ~(VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK |
+ VCE_SOFT_RESET__FME_SOFT_RESET_MASK));
+
+ r = vce_v1_0_firmware_loaded(adev);
+
+ /* Clear VCE_STATUS, otherwise SRBM thinks VCE1 is busy. */
+ WREG32(mmVCE_STATUS, 0);
+
+ if (r) {
+ dev_err(adev->dev, "VCE not responding, giving up\n");
+ return r;
+ }
+
+ return 0;
+}
+
+static int vce_v1_0_stop(struct amdgpu_device *adev)
+{
+ struct amdgpu_ip_block *ip_block;
+ int status;
+ int i;
+
+ ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCE);
+ if (!ip_block)
+ return -EINVAL;
+
+ if (vce_v1_0_lmi_clean(adev))
+ dev_warn(adev->dev, "VCE not idle\n");
+
+ if (vce_v1_0_wait_for_idle(ip_block))
+ dev_warn(adev->dev, "VCE busy: VCE_STATUS=0x%x, SRBM_STATUS2=0x%x\n",
+ RREG32(mmVCE_STATUS), RREG32(mmSRBM_STATUS2));
+
+ /* Stall UMC and register bus before resetting VCPU */
+ WREG32_P(mmVCE_LMI_CTRL2, 1 << 8, ~(1 << 8));
+
+ for (i = 0; i < 100; ++i) {
+ status = RREG32(mmVCE_LMI_STATUS);
+ if (status & 0x240)
+ break;
+ mdelay(1);
+ }
+
+ WREG32_P(mmVCE_VCPU_CNTL, 0, ~VCE_VCPU_CNTL__CLK_EN_MASK);
+
+ WREG32_P(mmVCE_SOFT_RESET,
+ VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK |
+ VCE_SOFT_RESET__FME_SOFT_RESET_MASK,
+ ~(VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK |
+ VCE_SOFT_RESET__FME_SOFT_RESET_MASK));
+
+ WREG32(mmVCE_STATUS, 0);
+
+ return 0;
+}
+
+static void vce_v1_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
+{
+ u32 tmp;
+
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) {
+ tmp = RREG32(mmVCE_CLOCK_GATING_A);
+ tmp |= VCE_CLOCK_GATING_A__CGC_DYN_CLOCK_MODE_MASK;
+ WREG32(mmVCE_CLOCK_GATING_A, tmp);
+
+ tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
+ tmp &= ~0x1ff000;
+ tmp |= 0xff800000;
+ WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
+
+ tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
+ tmp &= ~0x3ff;
+ WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
+ } else {
+ tmp = RREG32(mmVCE_CLOCK_GATING_A);
+ tmp &= ~VCE_CLOCK_GATING_A__CGC_DYN_CLOCK_MODE_MASK;
+ WREG32(mmVCE_CLOCK_GATING_A, tmp);
+
+ tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
+ tmp |= 0x1ff000;
+ tmp &= ~0xff800000;
+ WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
+
+ tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
+ tmp |= 0x3ff;
+ WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
+ }
+}
+
+static int vce_v1_0_early_init(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int r;
+
+ r = amdgpu_vce_early_init(adev);
+ if (r)
+ return r;
+
+ adev->vce.num_rings = 2;
+
+ vce_v1_0_set_ring_funcs(adev);
+ vce_v1_0_set_irq_funcs(adev);
+
+ return 0;
+}
+
+/**
+ * vce_v1_0_ensure_vcpu_bo_32bit_addr() - ensure the VCPU BO has a 32-bit address
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Due to various hardware limitations, the VCE1 requires
+ * the VCPU BO to be in the low 32 bit address range.
+ * Ensure that the VCPU BO has a 32-bit GPU address,
+ * or return an error code when that isn't possible.
+ *
+ * To accomodate that, we put GART to the LOW address range
+ * and reserve some GART pages where we map the VCPU BO,
+ * so that it gets a 32-bit address.
+ */
+static int vce_v1_0_ensure_vcpu_bo_32bit_addr(struct amdgpu_device *adev)
+{
+ u64 gpu_addr = amdgpu_bo_gpu_offset(adev->vce.vcpu_bo);
+ u64 bo_size = amdgpu_bo_size(adev->vce.vcpu_bo);
+ u64 max_vcpu_bo_addr = 0xffffffff - bo_size;
+ u64 num_pages = ALIGN(bo_size, AMDGPU_GPU_PAGE_SIZE) / AMDGPU_GPU_PAGE_SIZE;
+ u64 pa = amdgpu_gmc_vram_pa(adev, adev->vce.vcpu_bo);
+ u64 flags = AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE | AMDGPU_PTE_VALID;
+
+ /*
+ * Check if the VCPU BO already has a 32-bit address.
+ * Eg. if MC is configured to put VRAM in the low address range.
+ */
+ if (gpu_addr <= max_vcpu_bo_addr)
+ return 0;
+
+ /* Check if we can map the VCPU BO in GART to a 32-bit address. */
+ if (adev->gmc.gart_start + VCE_V1_0_GART_ADDR_START > max_vcpu_bo_addr)
+ return -EINVAL;
+
+ amdgpu_gart_map_vram_range(adev, pa, VCE_V1_0_GART_PAGE_START,
+ num_pages, flags, adev->gart.ptr);
+ adev->vce.gpu_addr = adev->gmc.gart_start + VCE_V1_0_GART_ADDR_START;
+ if (adev->vce.gpu_addr > max_vcpu_bo_addr)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int vce_v1_0_sw_init(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ struct amdgpu_ring *ring;
+ int r, i;
+
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 167, &adev->vce.irq);
+ if (r)
+ return r;
+
+ r = amdgpu_vce_sw_init(adev, VCE_V1_0_FW_SIZE +
+ VCE_V1_0_STACK_SIZE + VCE_V1_0_DATA_SIZE);
+ if (r)
+ return r;
+
+ r = amdgpu_vce_resume(adev);
+ if (r)
+ return r;
+ r = vce_v1_0_load_fw_signature(adev);
+ if (r)
+ return r;
+ r = vce_v1_0_ensure_vcpu_bo_32bit_addr(adev);
+ if (r)
+ return r;
+
+ for (i = 0; i < adev->vce.num_rings; i++) {
+ enum amdgpu_ring_priority_level hw_prio = amdgpu_vce_get_ring_prio(i);
+
+ ring = &adev->vce.ring[i];
+ sprintf(ring->name, "vce%d", i);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0,
+ hw_prio, NULL);
+ if (r)
+ return r;
+ }
+
+ return r;
+}
+
+static int vce_v1_0_sw_fini(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int r;
+
+ r = amdgpu_vce_suspend(adev);
+ if (r)
+ return r;
+
+ return amdgpu_vce_sw_fini(adev);
+}
+
+/**
+ * vce_v1_0_hw_init - start and test VCE block
+ *
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
+ *
+ * Initialize the hardware, boot up the VCPU and do some testing
+ */
+static int vce_v1_0_hw_init(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int i, r;
+
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_vce(adev, true);
+ else
+ amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
+
+ for (i = 0; i < adev->vce.num_rings; i++) {
+ r = amdgpu_ring_test_helper(&adev->vce.ring[i]);
+ if (r)
+ return r;
+ }
+
+ dev_info(adev->dev, "VCE initialized successfully.\n");
+
+ return 0;
+}
+
+static int vce_v1_0_hw_fini(struct amdgpu_ip_block *ip_block)
+{
+ int r;
+
+ r = vce_v1_0_stop(ip_block->adev);
+ if (r)
+ return r;
+
+ cancel_delayed_work_sync(&ip_block->adev->vce.idle_work);
+ return 0;
+}
+
+static int vce_v1_0_suspend(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int r;
+
+ /*
+ * Proper cleanups before halting the HW engine:
+ * - cancel the delayed idle work
+ * - enable powergating
+ * - enable clockgating
+ * - disable dpm
+ *
+ * TODO: to align with the VCN implementation, move the
+ * jobs for clockgating/powergating/dpm setting to
+ * ->set_powergating_state().
+ */
+ cancel_delayed_work_sync(&adev->vce.idle_work);
+
+ if (adev->pm.dpm_enabled) {
+ amdgpu_dpm_enable_vce(adev, false);
+ } else {
+ amdgpu_asic_set_vce_clocks(adev, 0, 0);
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_PG_STATE_GATE);
+ amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_CG_STATE_GATE);
+ }
+
+ r = vce_v1_0_hw_fini(ip_block);
+ if (r) {
+ dev_err(adev->dev, "vce_v1_0_hw_fini() failed with error %i", r);
+ return r;
+ }
+
+ return amdgpu_vce_suspend(adev);
+}
+
+static int vce_v1_0_resume(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int r;
+
+ r = amdgpu_vce_resume(adev);
+ if (r)
+ return r;
+ r = vce_v1_0_load_fw_signature(adev);
+ if (r)
+ return r;
+ r = vce_v1_0_ensure_vcpu_bo_32bit_addr(adev);
+ if (r)
+ return r;
+
+ return vce_v1_0_hw_init(ip_block);
+}
+
+static int vce_v1_0_set_interrupt_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned int type,
+ enum amdgpu_interrupt_state state)
+{
+ uint32_t val = 0;
+
+ if (state == AMDGPU_IRQ_STATE_ENABLE)
+ val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK;
+
+ WREG32_P(mmVCE_SYS_INT_EN, val,
+ ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
+ return 0;
+}
+
+static int vce_v1_0_process_interrupt(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ dev_dbg(adev->dev, "IH: VCE\n");
+ switch (entry->src_data[0]) {
+ case 0:
+ case 1:
+ amdgpu_fence_process(&adev->vce.ring[entry->src_data[0]]);
+ break;
+ default:
+ dev_err(adev->dev, "Unhandled interrupt: %d %d\n",
+ entry->src_id, entry->src_data[0]);
+ break;
+ }
+
+ return 0;
+}
+
+static int vce_v1_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
+ enum amd_clockgating_state state)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+
+ vce_v1_0_init_cg(adev);
+ vce_v1_0_enable_mgcg(adev, state == AMD_CG_STATE_GATE);
+
+ return 0;
+}
+
+static int vce_v1_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
+ enum amd_powergating_state state)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+
+ /*
+ * This doesn't actually powergate the VCE block.
+ * That's done in the dpm code via the SMC. This
+ * just re-inits the block as necessary. The actual
+ * gating still happens in the dpm code. We should
+ * revisit this when there is a cleaner line between
+ * the smc and the hw blocks
+ */
+ if (state == AMD_PG_STATE_GATE)
+ return vce_v1_0_stop(adev);
+ else
+ return vce_v1_0_start(adev);
+}
+
+static const struct amd_ip_funcs vce_v1_0_ip_funcs = {
+ .name = "vce_v1_0",
+ .early_init = vce_v1_0_early_init,
+ .sw_init = vce_v1_0_sw_init,
+ .sw_fini = vce_v1_0_sw_fini,
+ .hw_init = vce_v1_0_hw_init,
+ .hw_fini = vce_v1_0_hw_fini,
+ .suspend = vce_v1_0_suspend,
+ .resume = vce_v1_0_resume,
+ .is_idle = vce_v1_0_is_idle,
+ .wait_for_idle = vce_v1_0_wait_for_idle,
+ .set_clockgating_state = vce_v1_0_set_clockgating_state,
+ .set_powergating_state = vce_v1_0_set_powergating_state,
+};
+
+static const struct amdgpu_ring_funcs vce_v1_0_ring_funcs = {
+ .type = AMDGPU_RING_TYPE_VCE,
+ .align_mask = 0xf,
+ .nop = VCE_CMD_NO_OP,
+ .support_64bit_ptrs = false,
+ .no_user_fence = true,
+ .get_rptr = vce_v1_0_ring_get_rptr,
+ .get_wptr = vce_v1_0_ring_get_wptr,
+ .set_wptr = vce_v1_0_ring_set_wptr,
+ .parse_cs = amdgpu_vce_ring_parse_cs,
+ .emit_frame_size = 6, /* amdgpu_vce_ring_emit_fence x1 no user fence */
+ .emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */
+ .emit_ib = amdgpu_vce_ring_emit_ib,
+ .emit_fence = amdgpu_vce_ring_emit_fence,
+ .test_ring = amdgpu_vce_ring_test_ring,
+ .test_ib = amdgpu_vce_ring_test_ib,
+ .insert_nop = amdgpu_ring_insert_nop,
+ .pad_ib = amdgpu_ring_generic_pad_ib,
+ .begin_use = amdgpu_vce_ring_begin_use,
+ .end_use = amdgpu_vce_ring_end_use,
+};
+
+static void vce_v1_0_set_ring_funcs(struct amdgpu_device *adev)
+{
+ int i;
+
+ for (i = 0; i < adev->vce.num_rings; i++) {
+ adev->vce.ring[i].funcs = &vce_v1_0_ring_funcs;
+ adev->vce.ring[i].me = i;
+ }
+};
+
+static const struct amdgpu_irq_src_funcs vce_v1_0_irq_funcs = {
+ .set = vce_v1_0_set_interrupt_state,
+ .process = vce_v1_0_process_interrupt,
+};
+
+static void vce_v1_0_set_irq_funcs(struct amdgpu_device *adev)
+{
+ adev->vce.irq.num_types = 1;
+ adev->vce.irq.funcs = &vce_v1_0_irq_funcs;
+};
+
+const struct amdgpu_ip_block_version vce_v1_0_ip_block = {
+ .type = AMD_IP_BLOCK_TYPE_VCE,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vce_v1_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h b/drivers/gpu/drm/amd/amdgpu/vce_v1_0.h
index 0d878ca3acba..206e7bec897f 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v1_0.h
@@ -1,5 +1,8 @@
+/* SPDX-License-Identifier: MIT */
/*
- * Copyright 2014 Advanced Micro Devices, Inc.
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ * Copyright 2025 Valve Corporation
+ * Copyright 2025 Alexandre Demers
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -21,12 +24,9 @@
*
*/
-#ifndef __DCE_V11_0_H__
-#define __DCE_V11_0_H__
+#ifndef __VCE_V1_0_H__
+#define __VCE_V1_0_H__
-extern const struct amdgpu_ip_block_version dce_v11_0_ip_block;
-extern const struct amdgpu_ip_block_version dce_v11_2_ip_block;
-
-void dce_v11_0_disable_dce(struct amdgpu_device *adev);
+extern const struct amdgpu_ip_block_version vce_v1_0_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index bee3e904a6bc..8ea8a6193492 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -407,6 +407,11 @@ static void vce_v2_0_enable_mgcg(struct amdgpu_device *adev, bool enable,
static int vce_v2_0_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ int r;
+
+ r = amdgpu_vce_early_init(adev);
+ if (r)
+ return r;
adev->vce.num_rings = 2;
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 708123899c41..719e9643c43d 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -399,6 +399,7 @@ static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev)
static int vce_v3_0_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ int r;
adev->vce.harvest_config = vce_v3_0_get_harvest_config(adev);
@@ -407,6 +408,10 @@ static int vce_v3_0_early_init(struct amdgpu_ip_block *ip_block)
(AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1))
return -ENOENT;
+ r = amdgpu_vce_early_init(adev);
+ if (r)
+ return r;
+
adev->vce.num_rings = 3;
vce_v3_0_set_ring_funcs(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index 335bda64ff5b..2d64002bed61 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -410,6 +410,11 @@ static int vce_v4_0_stop(struct amdgpu_device *adev)
static int vce_v4_0_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ int r;
+
+ r = amdgpu_vce_early_init(adev);
+ if (r)
+ return r;
if (amdgpu_sriov_vf(adev)) /* currently only VCN0 support SRIOV */
adev->vce.num_rings = 1;
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index c74947705d77..a316797875a8 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -193,7 +193,7 @@ static int vcn_v1_0_sw_init(struct amdgpu_ip_block *ip_block)
adev->vcn.inst[0].pause_dpg_mode = vcn_v1_0_pause_dpg_mode;
if (amdgpu_vcnfw_log) {
- volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
+ struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
fw_shared->present_flag_0 = 0;
amdgpu_vcn_fwlog_init(adev->vcn.inst);
@@ -230,11 +230,11 @@ static int vcn_v1_0_sw_fini(struct amdgpu_ip_block *ip_block)
jpeg_v1_0_sw_fini(ip_block);
- r = amdgpu_vcn_sw_fini(adev, 0);
+ amdgpu_vcn_sw_fini(adev, 0);
kfree(adev->vcn.ip_dump);
- return r;
+ return 0;
}
/**
@@ -1338,7 +1338,6 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
- ring = &adev->vcn.inst->ring_dec;
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
@@ -1399,7 +1398,6 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL,
UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
- ring = &adev->vcn.inst->ring_dec;
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index 148b651be7ca..8897dcc9c1a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -98,6 +98,8 @@ static int vcn_v2_0_set_pg_state(struct amdgpu_vcn_inst *vinst,
static int vcn_v2_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
struct dpg_pause_state *new_state);
static int vcn_v2_0_start_sriov(struct amdgpu_device *adev);
+static int vcn_v2_0_reset(struct amdgpu_vcn_inst *vinst);
+
/**
* vcn_v2_0_early_init - set function pointers and load microcode
*
@@ -134,10 +136,8 @@ static int vcn_v2_0_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_ring *ring;
int i, r;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_0);
- uint32_t *ptr;
struct amdgpu_device *adev = ip_block->adev;
- volatile struct amdgpu_fw_shared *fw_shared;
+ struct amdgpu_fw_shared *fw_shared;
/* VCN DEC TRAP */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
@@ -213,6 +213,12 @@ static int vcn_v2_0_sw_init(struct amdgpu_ip_block *ip_block)
}
adev->vcn.inst[0].pause_dpg_mode = vcn_v2_0_pause_dpg_mode;
+ adev->vcn.inst[0].reset = vcn_v2_0_reset;
+
+ adev->vcn.supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
+ if (!amdgpu_sriov_vf(adev))
+ adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
r = amdgpu_virt_alloc_mm_table(adev);
if (r)
@@ -224,14 +230,13 @@ static int vcn_v2_0_sw_init(struct amdgpu_ip_block *ip_block)
if (amdgpu_vcnfw_log)
amdgpu_vcn_fwlog_init(adev->vcn.inst);
- /* Allocate memory for VCN IP Dump buffer */
- ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
- if (!ptr) {
- DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
- adev->vcn.ip_dump = NULL;
- } else {
- adev->vcn.ip_dump = ptr;
- }
+ r = amdgpu_vcn_reg_dump_init(adev, vcn_reg_list_2_0, ARRAY_SIZE(vcn_reg_list_2_0));
+ if (r)
+ return r;
+
+ r = amdgpu_vcn_sysfs_reset_mask_init(adev);
+ if (r)
+ return r;
return 0;
}
@@ -247,7 +252,7 @@ static int vcn_v2_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
int r, idx;
struct amdgpu_device *adev = ip_block->adev;
- volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
+ struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
fw_shared->present_flag_0 = 0;
@@ -260,11 +265,11 @@ static int vcn_v2_0_sw_fini(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- r = amdgpu_vcn_sw_fini(adev, 0);
+ amdgpu_vcn_sysfs_reset_mask_fini(adev);
- kfree(adev->vcn.ip_dump);
+ amdgpu_vcn_sw_fini(adev, 0);
- return r;
+ return 0;
}
/**
@@ -848,9 +853,10 @@ static void vcn_v2_0_enable_static_power_gating(struct amdgpu_vcn_inst *vinst)
static int vcn_v2_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
{
struct amdgpu_device *adev = vinst->adev;
- volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
+ struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
uint32_t rb_bufsz, tmp;
+ int ret;
vcn_v2_0_enable_static_power_gating(vinst);
@@ -934,8 +940,13 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
UVD, 0, mmUVD_MASTINT_EN),
UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
- if (indirect)
- amdgpu_vcn_psp_update_sram(adev, 0, 0);
+ if (indirect) {
+ ret = amdgpu_vcn_psp_update_sram(adev, 0, 0);
+ if (ret) {
+ dev_err(adev->dev, "vcn sram load failed %d\n", ret);
+ return ret;
+ }
+ }
/* force RBC into idle state */
rb_bufsz = order_base_2(ring->ring_size);
@@ -990,7 +1001,7 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
static int vcn_v2_0_start(struct amdgpu_vcn_inst *vinst)
{
struct amdgpu_device *adev = vinst->adev;
- volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
+ struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
uint32_t rb_bufsz, tmp;
uint32_t lmi_swap_cntl;
@@ -1297,7 +1308,7 @@ static int vcn_v2_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
if (!ret_code) {
- volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
+ struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
/* pause DPG */
reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
@@ -1355,6 +1366,16 @@ static int vcn_v2_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
return 0;
}
+static int vcn_v2_0_reset(struct amdgpu_vcn_inst *vinst)
+{
+ int r;
+
+ r = vcn_v2_0_stop(vinst);
+ if (r)
+ return r;
+ return vcn_v2_0_start(vinst);
+}
+
static bool vcn_v2_0_is_idle(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
@@ -2071,66 +2092,6 @@ static int vcn_v2_0_start_sriov(struct amdgpu_device *adev)
return vcn_v2_0_start_mmsch(adev, &adev->virt.mm_table);
}
-static void vcn_v2_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_0);
- uint32_t inst_off, is_powered;
-
- if (!adev->vcn.ip_dump)
- return;
-
- drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i)) {
- drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
- continue;
- }
-
- inst_off = i * reg_count;
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered) {
- drm_printf(p, "\nActive Instance:VCN%d\n", i);
- for (j = 0; j < reg_count; j++)
- drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_2_0[j].reg_name,
- adev->vcn.ip_dump[inst_off + j]);
- } else {
- drm_printf(p, "\nInactive Instance:VCN%d\n", i);
- }
- }
-}
-
-static void vcn_v2_0_dump_ip_state(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- bool is_powered;
- uint32_t inst_off;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_0);
-
- if (!adev->vcn.ip_dump)
- return;
-
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
-
- inst_off = i * reg_count;
- /* mmUVD_POWER_STATUS is always readable and is first element of the array */
- adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, mmUVD_POWER_STATUS);
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered)
- for (j = 1; j < reg_count; j++)
- adev->vcn.ip_dump[inst_off + j] =
- RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_2_0[j], i));
- }
-}
-
static const struct amd_ip_funcs vcn_v2_0_ip_funcs = {
.name = "vcn_v2_0",
.early_init = vcn_v2_0_early_init,
@@ -2144,8 +2105,8 @@ static const struct amd_ip_funcs vcn_v2_0_ip_funcs = {
.wait_for_idle = vcn_v2_0_wait_for_idle,
.set_clockgating_state = vcn_v2_0_set_clockgating_state,
.set_powergating_state = vcn_set_powergating_state,
- .dump_ip_state = vcn_v2_0_dump_ip_state,
- .print_ip_state = vcn_v2_0_print_ip_state,
+ .dump_ip_state = amdgpu_vcn_dump_ip_state,
+ .print_ip_state = amdgpu_vcn_print_ip_state,
};
static const struct amdgpu_ring_funcs vcn_v2_0_dec_ring_vm_funcs = {
@@ -2176,6 +2137,7 @@ static const struct amdgpu_ring_funcs vcn_v2_0_dec_ring_vm_funcs = {
.emit_wreg = vcn_v2_0_dec_ring_emit_wreg,
.emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = amdgpu_vcn_ring_reset,
};
static const struct amdgpu_ring_funcs vcn_v2_0_enc_ring_vm_funcs = {
@@ -2205,6 +2167,7 @@ static const struct amdgpu_ring_funcs vcn_v2_0_enc_ring_vm_funcs = {
.emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
.emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = amdgpu_vcn_ring_reset,
};
static void vcn_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index 58b527a6b795..cebee453871c 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -102,6 +102,7 @@ static int vcn_v2_5_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
struct dpg_pause_state *new_state);
static int vcn_v2_5_sriov_start(struct amdgpu_device *adev);
static void vcn_v2_5_set_ras_funcs(struct amdgpu_device *adev);
+static int vcn_v2_5_reset(struct amdgpu_vcn_inst *vinst);
static int amdgpu_ih_clientid_vcns[] = {
SOC15_IH_CLIENTID_VCN,
@@ -115,7 +116,6 @@ static void vcn_v2_5_idle_work_handler(struct work_struct *work)
struct amdgpu_device *adev = vcn_inst->adev;
unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0};
unsigned int i, j;
- int r = 0;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
struct amdgpu_vcn_inst *v = &adev->vcn.inst[i];
@@ -148,15 +148,7 @@ static void vcn_v2_5_idle_work_handler(struct work_struct *work)
if (!fences && !atomic_read(&adev->vcn.inst[0].total_submission_cnt)) {
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
AMD_PG_STATE_GATE);
- mutex_lock(&adev->vcn.workload_profile_mutex);
- if (adev->vcn.workload_profile_active) {
- r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
- false);
- if (r)
- dev_warn(adev->dev, "(%d) failed to disable video power profile mode\n", r);
- adev->vcn.workload_profile_active = false;
- }
- mutex_unlock(&adev->vcn.workload_profile_mutex);
+ amdgpu_vcn_put_profile(adev);
} else {
schedule_delayed_work(&adev->vcn.inst[0].idle_work, VCN_IDLE_TIMEOUT);
}
@@ -166,7 +158,6 @@ static void vcn_v2_5_ring_begin_use(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_vcn_inst *v = &adev->vcn.inst[ring->me];
- int r = 0;
atomic_inc(&adev->vcn.inst[0].total_submission_cnt);
@@ -176,20 +167,6 @@ static void vcn_v2_5_ring_begin_use(struct amdgpu_ring *ring)
* the delayed work so there is no one else to set it to false
* and we don't care if someone else sets it to true.
*/
- if (adev->vcn.workload_profile_active)
- goto pg_lock;
-
- mutex_lock(&adev->vcn.workload_profile_mutex);
- if (!adev->vcn.workload_profile_active) {
- r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
- true);
- if (r)
- dev_warn(adev->dev, "(%d) failed to switch to video power profile mode\n", r);
- adev->vcn.workload_profile_active = true;
- }
- mutex_unlock(&adev->vcn.workload_profile_mutex);
-
-pg_lock:
mutex_lock(&adev->vcn.inst[0].vcn_pg_lock);
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
AMD_PG_STATE_UNGATE);
@@ -217,6 +194,7 @@ pg_lock:
v->pause_dpg_mode(v, &new_state);
}
mutex_unlock(&adev->vcn.inst[0].vcn_pg_lock);
+ amdgpu_vcn_get_profile(adev);
}
static void vcn_v2_5_ring_end_use(struct amdgpu_ring *ring)
@@ -296,12 +274,10 @@ static int vcn_v2_5_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_ring *ring;
int i, j, r;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_5);
- uint32_t *ptr;
struct amdgpu_device *adev = ip_block->adev;
for (j = 0; j < adev->vcn.num_vcn_inst; j++) {
- volatile struct amdgpu_fw_shared *fw_shared;
+ struct amdgpu_fw_shared *fw_shared;
if (adev->vcn.harvest_config & (1 << j))
continue;
@@ -404,8 +380,14 @@ static int vcn_v2_5_sw_init(struct amdgpu_ip_block *ip_block)
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
adev->vcn.inst[j].pause_dpg_mode = vcn_v2_5_pause_dpg_mode;
+ adev->vcn.inst[j].reset = vcn_v2_5_reset;
}
+ adev->vcn.supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
+ if (!amdgpu_sriov_vf(adev))
+ adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+
if (amdgpu_sriov_vf(adev)) {
r = amdgpu_virt_alloc_mm_table(adev);
if (r)
@@ -416,14 +398,13 @@ static int vcn_v2_5_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- /* Allocate memory for VCN IP Dump buffer */
- ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
- if (!ptr) {
- DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
- adev->vcn.ip_dump = NULL;
- } else {
- adev->vcn.ip_dump = ptr;
- }
+ r = amdgpu_vcn_reg_dump_init(adev, vcn_reg_list_2_5, ARRAY_SIZE(vcn_reg_list_2_5));
+ if (r)
+ return r;
+
+ r = amdgpu_vcn_sysfs_reset_mask_init(adev);
+ if (r)
+ return r;
return 0;
}
@@ -439,7 +420,7 @@ static int vcn_v2_5_sw_fini(struct amdgpu_ip_block *ip_block)
{
int i, r, idx;
struct amdgpu_device *adev = ip_block->adev;
- volatile struct amdgpu_fw_shared *fw_shared;
+ struct amdgpu_fw_shared *fw_shared;
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
@@ -455,17 +436,15 @@ static int vcn_v2_5_sw_fini(struct amdgpu_ip_block *ip_block)
if (amdgpu_sriov_vf(adev))
amdgpu_virt_free_mm_table(adev);
+ amdgpu_vcn_sysfs_reset_mask_fini(adev);
+
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
r = amdgpu_vcn_suspend(adev, i);
if (r)
return r;
- r = amdgpu_vcn_sw_fini(adev, i);
- if (r)
- return r;
+ amdgpu_vcn_sw_fini(adev, i);
}
- kfree(adev->vcn.ip_dump);
-
return 0;
}
@@ -1019,9 +998,10 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
{
struct amdgpu_device *adev = vinst->adev;
int inst_idx = vinst->inst;
- volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
+ struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
uint32_t rb_bufsz, tmp;
+ int ret;
/* disable register anti-hang mechanism */
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 1,
@@ -1112,8 +1092,13 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
VCN, 0, mmUVD_MASTINT_EN),
UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
- if (indirect)
- amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
+ if (indirect) {
+ ret = amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
+ if (ret) {
+ dev_err(adev->dev, "vcn sram load failed %d\n", ret);
+ return ret;
+ }
+ }
ring = &adev->vcn.inst[inst_idx].ring_dec;
/* force RBC into idle state */
@@ -1170,7 +1155,7 @@ static int vcn_v2_5_start(struct amdgpu_vcn_inst *vinst)
{
struct amdgpu_device *adev = vinst->adev;
int i = vinst->inst;
- volatile struct amdgpu_fw_shared *fw_shared =
+ struct amdgpu_fw_shared *fw_shared =
adev->vcn.inst[i].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
uint32_t rb_bufsz, tmp;
@@ -1682,7 +1667,7 @@ static int vcn_v2_5_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
if (!ret_code) {
- volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
+ struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
/* pause DPG */
reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
@@ -1816,6 +1801,7 @@ static const struct amdgpu_ring_funcs vcn_v2_5_dec_ring_vm_funcs = {
.emit_wreg = vcn_v2_0_dec_ring_emit_wreg,
.emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = amdgpu_vcn_ring_reset,
};
/**
@@ -1914,6 +1900,7 @@ static const struct amdgpu_ring_funcs vcn_v2_5_enc_ring_vm_funcs = {
.emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
.emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = amdgpu_vcn_ring_reset,
};
static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev)
@@ -1942,6 +1929,16 @@ static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev)
}
}
+static int vcn_v2_5_reset(struct amdgpu_vcn_inst *vinst)
+{
+ int r;
+
+ r = vcn_v2_5_stop(vinst);
+ if (r)
+ return r;
+ return vcn_v2_5_start(vinst);
+}
+
static bool vcn_v2_5_is_idle(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
@@ -2102,66 +2099,6 @@ static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev)
}
}
-static void vcn_v2_5_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_5);
- uint32_t inst_off, is_powered;
-
- if (!adev->vcn.ip_dump)
- return;
-
- drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i)) {
- drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
- continue;
- }
-
- inst_off = i * reg_count;
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered) {
- drm_printf(p, "\nActive Instance:VCN%d\n", i);
- for (j = 0; j < reg_count; j++)
- drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_2_5[j].reg_name,
- adev->vcn.ip_dump[inst_off + j]);
- } else {
- drm_printf(p, "\nInactive Instance:VCN%d\n", i);
- }
- }
-}
-
-static void vcn_v2_5_dump_ip_state(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- bool is_powered;
- uint32_t inst_off;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_5);
-
- if (!adev->vcn.ip_dump)
- return;
-
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
-
- inst_off = i * reg_count;
- /* mmUVD_POWER_STATUS is always readable and is first element of the array */
- adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, mmUVD_POWER_STATUS);
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered)
- for (j = 1; j < reg_count; j++)
- adev->vcn.ip_dump[inst_off + j] =
- RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_2_5[j], i));
- }
-}
-
static const struct amd_ip_funcs vcn_v2_5_ip_funcs = {
.name = "vcn_v2_5",
.early_init = vcn_v2_5_early_init,
@@ -2175,8 +2112,8 @@ static const struct amd_ip_funcs vcn_v2_5_ip_funcs = {
.wait_for_idle = vcn_v2_5_wait_for_idle,
.set_clockgating_state = vcn_v2_5_set_clockgating_state,
.set_powergating_state = vcn_set_powergating_state,
- .dump_ip_state = vcn_v2_5_dump_ip_state,
- .print_ip_state = vcn_v2_5_print_ip_state,
+ .dump_ip_state = amdgpu_vcn_dump_ip_state,
+ .print_ip_state = amdgpu_vcn_print_ip_state,
};
static const struct amd_ip_funcs vcn_v2_6_ip_funcs = {
@@ -2192,8 +2129,8 @@ static const struct amd_ip_funcs vcn_v2_6_ip_funcs = {
.wait_for_idle = vcn_v2_5_wait_for_idle,
.set_clockgating_state = vcn_v2_5_set_clockgating_state,
.set_powergating_state = vcn_set_powergating_state,
- .dump_ip_state = vcn_v2_5_dump_ip_state,
- .print_ip_state = vcn_v2_5_print_ip_state,
+ .dump_ip_state = amdgpu_vcn_dump_ip_state,
+ .print_ip_state = amdgpu_vcn_print_ip_state,
};
const struct amdgpu_ip_block_version vcn_v2_5_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
index 9fb0d5380589..d9cf8f0feeb3 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
@@ -110,6 +110,7 @@ static int vcn_v3_0_set_pg_state(struct amdgpu_vcn_inst *vinst,
enum amd_powergating_state state);
static int vcn_v3_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
struct dpg_pause_state *new_state);
+static int vcn_v3_0_reset(struct amdgpu_vcn_inst *vinst);
static void vcn_v3_0_dec_ring_set_wptr(struct amdgpu_ring *ring);
static void vcn_v3_0_enc_ring_set_wptr(struct amdgpu_ring *ring);
@@ -174,8 +175,6 @@ static int vcn_v3_0_sw_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_ring *ring;
int i, j, r;
int vcn_doorbell_index = 0;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_3_0);
- uint32_t *ptr;
struct amdgpu_device *adev = ip_block->adev;
/*
@@ -192,7 +191,7 @@ static int vcn_v3_0_sw_init(struct amdgpu_ip_block *ip_block)
}
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- volatile struct amdgpu_fw_shared *fw_shared;
+ struct amdgpu_fw_shared *fw_shared;
if (adev->vcn.harvest_config & (1 << i))
continue;
@@ -289,22 +288,27 @@ static int vcn_v3_0_sw_init(struct amdgpu_ip_block *ip_block)
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
adev->vcn.inst[i].pause_dpg_mode = vcn_v3_0_pause_dpg_mode;
+ adev->vcn.inst[i].reset = vcn_v3_0_reset;
}
+ adev->vcn.supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
+ if (!amdgpu_sriov_vf(adev))
+ adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+
if (amdgpu_sriov_vf(adev)) {
r = amdgpu_virt_alloc_mm_table(adev);
if (r)
return r;
}
- /* Allocate memory for VCN IP Dump buffer */
- ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
- if (ptr == NULL) {
- DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
- adev->vcn.ip_dump = NULL;
- } else {
- adev->vcn.ip_dump = ptr;
- }
+ r = amdgpu_vcn_reg_dump_init(adev, vcn_reg_list_3_0, ARRAY_SIZE(vcn_reg_list_3_0));
+ if (r)
+ return r;
+
+ r = amdgpu_vcn_sysfs_reset_mask_init(adev);
+ if (r)
+ return r;
return 0;
}
@@ -323,7 +327,7 @@ static int vcn_v3_0_sw_fini(struct amdgpu_ip_block *ip_block)
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- volatile struct amdgpu_fw_shared *fw_shared;
+ struct amdgpu_fw_shared *fw_shared;
if (adev->vcn.harvest_config & (1 << i))
continue;
@@ -338,17 +342,16 @@ static int vcn_v3_0_sw_fini(struct amdgpu_ip_block *ip_block)
if (amdgpu_sriov_vf(adev))
amdgpu_virt_free_mm_table(adev);
+ amdgpu_vcn_sysfs_reset_mask_fini(adev);
+
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
r = amdgpu_vcn_suspend(adev, i);
if (r)
return r;
- r = amdgpu_vcn_sw_fini(adev, i);
- if (r)
- return r;
+ amdgpu_vcn_sw_fini(adev, i);
}
- kfree(adev->vcn.ip_dump);
return 0;
}
@@ -1026,9 +1029,10 @@ static int vcn_v3_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
{
struct amdgpu_device *adev = vinst->adev;
int inst_idx = vinst->inst;
- volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
+ struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
uint32_t rb_bufsz, tmp;
+ int ret;
/* disable register anti-hang mechanism */
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 1,
@@ -1121,8 +1125,13 @@ static int vcn_v3_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, inst_idx, mmUVD_VCPU_CNTL), tmp, 0, indirect);
- if (indirect)
- amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
+ if (indirect) {
+ ret = amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
+ if (ret) {
+ dev_err(adev->dev, "vcn sram load failed %d\n", ret);
+ return ret;
+ }
+ }
ring = &adev->vcn.inst[inst_idx].ring_dec;
/* force RBC into idle state */
@@ -1185,7 +1194,7 @@ static int vcn_v3_0_start(struct amdgpu_vcn_inst *vinst)
{
struct amdgpu_device *adev = vinst->adev;
int i = vinst->inst;
- volatile struct amdgpu_fw_shared *fw_shared;
+ struct amdgpu_fw_shared *fw_shared;
struct amdgpu_ring *ring;
uint32_t rb_bufsz, tmp;
int j, k, r;
@@ -1706,7 +1715,7 @@ static int vcn_v3_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
{
struct amdgpu_device *adev = vinst->adev;
int inst_idx = vinst->inst;
- volatile struct amdgpu_fw_shared *fw_shared;
+ struct amdgpu_fw_shared *fw_shared;
struct amdgpu_ring *ring;
uint32_t reg_data = 0;
int ret_code;
@@ -1825,7 +1834,7 @@ static uint64_t vcn_v3_0_dec_ring_get_wptr(struct amdgpu_ring *ring)
static void vcn_v3_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- volatile struct amdgpu_fw_shared *fw_shared;
+ struct amdgpu_fw_shared *fw_shared;
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
/*whenever update RBC_RB_WPTR, we save the wptr in shared rb.wptr and scratch2 */
@@ -1875,15 +1884,19 @@ static int vcn_v3_0_limit_sched(struct amdgpu_cs_parser *p,
struct amdgpu_job *job)
{
struct drm_gpu_scheduler **scheds;
-
- /* The create msg must be in the first IB submitted */
- if (atomic_read(&job->base.entity->fence_seq))
- return -EINVAL;
+ struct dma_fence *fence;
/* if VCN0 is harvested, we can't support AV1 */
if (p->adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0)
return -EINVAL;
+ /* wait for all jobs to finish before switching to instance 0 */
+ fence = amdgpu_ctx_get_fence(p->ctx, job->base.entity, ~0ull);
+ if (fence) {
+ dma_fence_wait(fence, false);
+ dma_fence_put(fence);
+ }
+
scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_DEC]
[AMDGPU_RING_PRIO_DEFAULT].sched;
drm_sched_entity_modify_sched(job->base.entity, scheds, 1);
@@ -2033,6 +2046,7 @@ static const struct amdgpu_ring_funcs vcn_v3_0_dec_ring_vm_funcs = {
.emit_wreg = vcn_v2_0_dec_ring_emit_wreg,
.emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = amdgpu_vcn_ring_reset,
};
/**
@@ -2131,6 +2145,7 @@ static const struct amdgpu_ring_funcs vcn_v3_0_enc_ring_vm_funcs = {
.emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
.emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = amdgpu_vcn_ring_reset,
};
static void vcn_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev)
@@ -2164,6 +2179,18 @@ static void vcn_v3_0_set_enc_ring_funcs(struct amdgpu_device *adev)
}
}
+static int vcn_v3_0_reset(struct amdgpu_vcn_inst *vinst)
+{
+ int r;
+
+ r = vcn_v3_0_stop(vinst);
+ if (r)
+ return r;
+ vcn_v3_0_enable_clock_gating(vinst);
+ vcn_v3_0_enable_static_power_gating(vinst);
+ return vcn_v3_0_start(vinst);
+}
+
static bool vcn_v3_0_is_idle(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
@@ -2315,67 +2342,6 @@ static void vcn_v3_0_set_irq_funcs(struct amdgpu_device *adev)
}
}
-static void vcn_v3_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_3_0);
- uint32_t inst_off;
- bool is_powered;
-
- if (!adev->vcn.ip_dump)
- return;
-
- drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i)) {
- drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
- continue;
- }
-
- inst_off = i * reg_count;
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered) {
- drm_printf(p, "\nActive Instance:VCN%d\n", i);
- for (j = 0; j < reg_count; j++)
- drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_3_0[j].reg_name,
- adev->vcn.ip_dump[inst_off + j]);
- } else {
- drm_printf(p, "\nInactive Instance:VCN%d\n", i);
- }
- }
-}
-
-static void vcn_v3_0_dump_ip_state(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- bool is_powered;
- uint32_t inst_off;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_3_0);
-
- if (!adev->vcn.ip_dump)
- return;
-
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
-
- inst_off = i * reg_count;
- /* mmUVD_POWER_STATUS is always readable and is first element of the array */
- adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, mmUVD_POWER_STATUS);
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered)
- for (j = 1; j < reg_count; j++)
- adev->vcn.ip_dump[inst_off + j] =
- RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_3_0[j], i));
- }
-}
-
static const struct amd_ip_funcs vcn_v3_0_ip_funcs = {
.name = "vcn_v3_0",
.early_init = vcn_v3_0_early_init,
@@ -2389,8 +2355,8 @@ static const struct amd_ip_funcs vcn_v3_0_ip_funcs = {
.wait_for_idle = vcn_v3_0_wait_for_idle,
.set_clockgating_state = vcn_v3_0_set_clockgating_state,
.set_powergating_state = vcn_set_powergating_state,
- .dump_ip_state = vcn_v3_0_dump_ip_state,
- .print_ip_state = vcn_v3_0_print_ip_state,
+ .dump_ip_state = amdgpu_vcn_dump_ip_state,
+ .print_ip_state = amdgpu_vcn_print_ip_state,
};
const struct amdgpu_ip_block_version vcn_v3_0_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
index b5071f77f78d..3ae666522d57 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
@@ -148,7 +148,7 @@ static int vcn_v4_0_early_init(struct amdgpu_ip_block *ip_block)
static int vcn_v4_0_fw_shared_init(struct amdgpu_device *adev, int inst_idx)
{
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_vcn4_fw_shared *fw_shared;
fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
@@ -183,8 +183,6 @@ static int vcn_v4_0_sw_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_ring *ring;
struct amdgpu_device *adev = ip_block->adev;
int i, r;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0);
- uint32_t *ptr;
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
if (adev->vcn.harvest_config & (1 << i))
@@ -241,7 +239,8 @@ static int vcn_v4_0_sw_init(struct amdgpu_ip_block *ip_block)
adev->vcn.supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
- adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ if (!amdgpu_sriov_vf(adev))
+ adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
if (amdgpu_sriov_vf(adev)) {
r = amdgpu_virt_alloc_mm_table(adev);
@@ -254,14 +253,9 @@ static int vcn_v4_0_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- /* Allocate memory for VCN IP Dump buffer */
- ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
- if (!ptr) {
- DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
- adev->vcn.ip_dump = NULL;
- } else {
- adev->vcn.ip_dump = ptr;
- }
+ r = amdgpu_vcn_reg_dump_init(adev, vcn_reg_list_4_0, ARRAY_SIZE(vcn_reg_list_4_0));
+ if (r)
+ return r;
r = amdgpu_vcn_sysfs_reset_mask_init(adev);
if (r)
@@ -284,7 +278,7 @@ static int vcn_v4_0_sw_fini(struct amdgpu_ip_block *ip_block)
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_vcn4_fw_shared *fw_shared;
if (adev->vcn.harvest_config & (1 << i))
continue;
@@ -308,13 +302,8 @@ static int vcn_v4_0_sw_fini(struct amdgpu_ip_block *ip_block)
amdgpu_vcn_sysfs_reset_mask_fini(adev);
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- r = amdgpu_vcn_sw_fini(adev, i);
- if (r)
- return r;
- }
-
- kfree(adev->vcn.ip_dump);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++)
+ amdgpu_vcn_sw_fini(adev, i);
return 0;
}
@@ -1008,9 +997,10 @@ static int vcn_v4_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
{
struct amdgpu_device *adev = vinst->adev;
int inst_idx = vinst->inst;
- volatile struct amdgpu_vcn4_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
+ struct amdgpu_vcn4_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
uint32_t tmp;
+ int ret;
/* disable register anti-hang mechanism */
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 1,
@@ -1093,8 +1083,13 @@ static int vcn_v4_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
- if (indirect)
- amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
+ if (indirect) {
+ ret = amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
+ if (ret) {
+ dev_err(adev->dev, "vcn sram load failed %d\n", ret);
+ return ret;
+ }
+ }
ring = &adev->vcn.inst[inst_idx].ring_enc[0];
@@ -1142,7 +1137,7 @@ static int vcn_v4_0_start(struct amdgpu_vcn_inst *vinst)
{
struct amdgpu_device *adev = vinst->adev;
int i = vinst->inst;
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_vcn4_fw_shared *fw_shared;
struct amdgpu_ring *ring;
uint32_t tmp;
int j, k, r;
@@ -1359,8 +1354,8 @@ static int vcn_v4_0_start_sriov(struct amdgpu_device *adev)
struct mmsch_v4_0_cmd_end end = { {0} };
struct mmsch_v4_0_init_header header;
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
- volatile struct amdgpu_fw_shared_rb_setup *rb_setup;
+ struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_fw_shared_rb_setup *rb_setup;
direct_wt.cmd_header.command_type =
MMSCH_COMMAND__DIRECT_REG_WRITE;
@@ -1611,7 +1606,7 @@ static int vcn_v4_0_stop(struct amdgpu_vcn_inst *vinst)
{
struct amdgpu_device *adev = vinst->adev;
int i = vinst->inst;
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_vcn4_fw_shared *fw_shared;
uint32_t tmp;
int r = 0;
@@ -1623,7 +1618,6 @@ static int vcn_v4_0_stop(struct amdgpu_vcn_inst *vinst)
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
vcn_v4_0_stop_dpg_mode(vinst);
- r = 0;
goto done;
}
@@ -1807,15 +1801,19 @@ static int vcn_v4_0_limit_sched(struct amdgpu_cs_parser *p,
struct amdgpu_job *job)
{
struct drm_gpu_scheduler **scheds;
-
- /* The create msg must be in the first IB submitted */
- if (atomic_read(&job->base.entity->fence_seq))
- return -EINVAL;
+ struct dma_fence *fence;
/* if VCN0 is harvested, we can't support AV1 */
if (p->adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0)
return -EINVAL;
+ /* wait for all jobs to finish before switching to instance 0 */
+ fence = amdgpu_ctx_get_fence(p->ctx, job->base.entity, ~0ull);
+ if (fence) {
+ dma_fence_wait(fence, false);
+ dma_fence_put(fence);
+ }
+
scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_ENC]
[AMDGPU_RING_PRIO_0].sched;
drm_sched_entity_modify_sched(job->base.entity, scheds, 1);
@@ -1906,22 +1904,16 @@ out:
#define RADEON_VCN_ENGINE_TYPE_ENCODE (0x00000002)
#define RADEON_VCN_ENGINE_TYPE_DECODE (0x00000003)
-
#define RADEON_VCN_ENGINE_INFO (0x30000001)
-#define RADEON_VCN_ENGINE_INFO_MAX_OFFSET 16
-
#define RENCODE_ENCODE_STANDARD_AV1 2
#define RENCODE_IB_PARAM_SESSION_INIT 0x00000003
-#define RENCODE_IB_PARAM_SESSION_INIT_MAX_OFFSET 64
-/* return the offset in ib if id is found, -1 otherwise
- * to speed up the searching we only search upto max_offset
- */
-static int vcn_v4_0_enc_find_ib_param(struct amdgpu_ib *ib, uint32_t id, int max_offset)
+/* return the offset in ib if id is found, -1 otherwise */
+static int vcn_v4_0_enc_find_ib_param(struct amdgpu_ib *ib, uint32_t id, int start)
{
int i;
- for (i = 0; i < ib->length_dw && i < max_offset && ib->ptr[i] >= 8; i += ib->ptr[i]/4) {
+ for (i = start; i < ib->length_dw && ib->ptr[i] >= 8; i += ib->ptr[i] / 4) {
if (ib->ptr[i + 1] == id)
return i;
}
@@ -1936,56 +1928,56 @@ static int vcn_v4_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
struct amdgpu_vcn_decode_buffer *decode_buffer;
uint64_t addr;
uint32_t val;
- int idx;
+ int idx = 0, sidx;
/* The first instance can decode anything */
if (!ring->me)
return 0;
- /* RADEON_VCN_ENGINE_INFO is at the top of ib block */
- idx = vcn_v4_0_enc_find_ib_param(ib, RADEON_VCN_ENGINE_INFO,
- RADEON_VCN_ENGINE_INFO_MAX_OFFSET);
- if (idx < 0) /* engine info is missing */
- return 0;
-
- val = amdgpu_ib_get_value(ib, idx + 2); /* RADEON_VCN_ENGINE_TYPE */
- if (val == RADEON_VCN_ENGINE_TYPE_DECODE) {
- decode_buffer = (struct amdgpu_vcn_decode_buffer *)&ib->ptr[idx + 6];
-
- if (!(decode_buffer->valid_buf_flag & 0x1))
- return 0;
-
- addr = ((u64)decode_buffer->msg_buffer_address_hi) << 32 |
- decode_buffer->msg_buffer_address_lo;
- return vcn_v4_0_dec_msg(p, job, addr);
- } else if (val == RADEON_VCN_ENGINE_TYPE_ENCODE) {
- idx = vcn_v4_0_enc_find_ib_param(ib, RENCODE_IB_PARAM_SESSION_INIT,
- RENCODE_IB_PARAM_SESSION_INIT_MAX_OFFSET);
- if (idx >= 0 && ib->ptr[idx + 2] == RENCODE_ENCODE_STANDARD_AV1)
- return vcn_v4_0_limit_sched(p, job);
+ while ((idx = vcn_v4_0_enc_find_ib_param(ib, RADEON_VCN_ENGINE_INFO, idx)) >= 0) {
+ val = amdgpu_ib_get_value(ib, idx + 2); /* RADEON_VCN_ENGINE_TYPE */
+ if (val == RADEON_VCN_ENGINE_TYPE_DECODE) {
+ decode_buffer = (struct amdgpu_vcn_decode_buffer *)&ib->ptr[idx + 6];
+
+ if (!(decode_buffer->valid_buf_flag & 0x1))
+ return 0;
+
+ addr = ((u64)decode_buffer->msg_buffer_address_hi) << 32 |
+ decode_buffer->msg_buffer_address_lo;
+ return vcn_v4_0_dec_msg(p, job, addr);
+ } else if (val == RADEON_VCN_ENGINE_TYPE_ENCODE) {
+ sidx = vcn_v4_0_enc_find_ib_param(ib, RENCODE_IB_PARAM_SESSION_INIT, idx);
+ if (sidx >= 0 && ib->ptr[sidx + 2] == RENCODE_ENCODE_STANDARD_AV1)
+ return vcn_v4_0_limit_sched(p, job);
+ }
+ idx += ib->ptr[idx] / 4;
}
return 0;
}
-static int vcn_v4_0_ring_reset(struct amdgpu_ring *ring, unsigned int vmid)
+static int vcn_v4_0_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[ring->me];
+ int r;
- if (!(adev->vcn.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE))
- return -EOPNOTSUPP;
-
- vcn_v4_0_stop(vinst);
- vcn_v4_0_start(vinst);
-
- return amdgpu_ring_test_helper(ring);
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+ r = vcn_v4_0_stop(vinst);
+ if (r)
+ return r;
+ r = vcn_v4_0_start(vinst);
+ if (r)
+ return r;
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static struct amdgpu_ring_funcs vcn_v4_0_unified_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_ENC,
.align_mask = 0x3f,
.nop = VCN_ENC_CMD_NO_OP,
- .extra_dw = sizeof(struct amdgpu_vcn_rb_metadata),
+ .extra_bytes = sizeof(struct amdgpu_vcn_rb_metadata),
.get_rptr = vcn_v4_0_unified_ring_get_rptr,
.get_wptr = vcn_v4_0_unified_ring_get_wptr,
.set_wptr = vcn_v4_0_unified_ring_set_wptr,
@@ -2241,67 +2233,6 @@ static void vcn_v4_0_set_irq_funcs(struct amdgpu_device *adev)
}
}
-static void vcn_v4_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0);
- uint32_t inst_off, is_powered;
-
- if (!adev->vcn.ip_dump)
- return;
-
- drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i)) {
- drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
- continue;
- }
-
- inst_off = i * reg_count;
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered) {
- drm_printf(p, "\nActive Instance:VCN%d\n", i);
- for (j = 0; j < reg_count; j++)
- drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_4_0[j].reg_name,
- adev->vcn.ip_dump[inst_off + j]);
- } else {
- drm_printf(p, "\nInactive Instance:VCN%d\n", i);
- }
- }
-}
-
-static void vcn_v4_0_dump_ip_state(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- bool is_powered;
- uint32_t inst_off;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0);
-
- if (!adev->vcn.ip_dump)
- return;
-
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
-
- inst_off = i * reg_count;
- /* mmUVD_POWER_STATUS is always readable and is first element of the array */
- adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, regUVD_POWER_STATUS);
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered)
- for (j = 1; j < reg_count; j++)
- adev->vcn.ip_dump[inst_off + j] =
- RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_4_0[j],
- i));
- }
-}
-
static const struct amd_ip_funcs vcn_v4_0_ip_funcs = {
.name = "vcn_v4_0",
.early_init = vcn_v4_0_early_init,
@@ -2315,8 +2246,8 @@ static const struct amd_ip_funcs vcn_v4_0_ip_funcs = {
.wait_for_idle = vcn_v4_0_wait_for_idle,
.set_clockgating_state = vcn_v4_0_set_clockgating_state,
.set_powergating_state = vcn_set_powergating_state,
- .dump_ip_state = vcn_v4_0_dump_ip_state,
- .print_ip_state = vcn_v4_0_print_ip_state,
+ .dump_ip_state = amdgpu_vcn_dump_ip_state,
+ .print_ip_state = amdgpu_vcn_print_ip_state,
};
const struct amdgpu_ip_block_version vcn_v4_0_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
index 5a33140f5723..cb7123ec1a5d 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
@@ -134,6 +134,19 @@ static int vcn_v4_0_3_early_init(struct amdgpu_ip_block *ip_block)
return 0;
}
+static int vcn_v4_0_3_late_init(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+
+ adev->vcn.supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
+
+ if (amdgpu_dpm_reset_vcn_is_supported(adev) && !amdgpu_sriov_vf(adev))
+ adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+
+ return 0;
+}
+
static int vcn_v4_0_3_fw_shared_init(struct amdgpu_device *adev, int inst_idx)
{
struct amdgpu_vcn4_fw_shared *fw_shared;
@@ -160,8 +173,6 @@ static int vcn_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring;
int i, r, vcn_inst;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_3);
- uint32_t *ptr;
/* VCN DEC TRAP */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
@@ -201,7 +212,11 @@ static int vcn_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block)
ring->vm_hub = AMDGPU_MMHUB0(adev->vcn.inst[i].aid_id);
sprintf(ring->name, "vcn_unified_%d", adev->vcn.inst[i].aid_id);
- r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
+
+ /* There are no per-instance irq source IDs on 4.0.3, the IH
+ * packets use a separate field to differentiate instances.
+ */
+ r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[0].irq, 0,
AMDGPU_RING_PRIO_DEFAULT,
&adev->vcn.inst[i].sched_score);
if (r)
@@ -213,10 +228,6 @@ static int vcn_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block)
adev->vcn.inst[i].pause_dpg_mode = vcn_v4_0_3_pause_dpg_mode;
}
- /* TODO: Add queue reset mask when FW fully supports it */
- adev->vcn.supported_reset =
- amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
-
if (amdgpu_sriov_vf(adev)) {
r = amdgpu_virt_alloc_mm_table(adev);
if (r)
@@ -231,20 +242,11 @@ static int vcn_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block)
}
}
- /* Allocate memory for VCN IP Dump buffer */
- ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
- if (!ptr) {
- DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
- adev->vcn.ip_dump = NULL;
- } else {
- adev->vcn.ip_dump = ptr;
- }
-
- r = amdgpu_vcn_sysfs_reset_mask_init(adev);
+ r = amdgpu_vcn_reg_dump_init(adev, vcn_reg_list_4_0_3, ARRAY_SIZE(vcn_reg_list_4_0_3));
if (r)
return r;
- return 0;
+ return amdgpu_vcn_sysfs_reset_mask_init(adev);
}
/**
@@ -261,7 +263,7 @@ static int vcn_v4_0_3_sw_fini(struct amdgpu_ip_block *ip_block)
if (drm_dev_enter(&adev->ddev, &idx)) {
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_vcn4_fw_shared *fw_shared;
fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
fw_shared->present_flag_0 = 0;
@@ -281,13 +283,8 @@ static int vcn_v4_0_3_sw_fini(struct amdgpu_ip_block *ip_block)
amdgpu_vcn_sysfs_reset_mask_fini(adev);
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- r = amdgpu_vcn_sw_fini(adev, i);
- if (r)
- return r;
- }
-
- kfree(adev->vcn.ip_dump);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++)
+ amdgpu_vcn_sw_fini(adev, i);
return 0;
}
@@ -391,7 +388,7 @@ static int vcn_v4_0_3_hw_fini(struct amdgpu_ip_block *ip_block)
vinst->set_pg_state(vinst, AMD_PG_STATE_GATE);
}
- if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN))
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN) && !amdgpu_sriov_vf(adev))
amdgpu_irq_put(adev, &adev->vcn.inst->ras_poison_irq, 0);
return 0;
@@ -848,10 +845,10 @@ static int vcn_v4_0_3_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
{
struct amdgpu_device *adev = vinst->adev;
int inst_idx = vinst->inst;
- volatile struct amdgpu_vcn4_fw_shared *fw_shared =
+ struct amdgpu_vcn4_fw_shared *fw_shared =
adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
- int vcn_inst;
+ int vcn_inst, ret;
uint32_t tmp;
vcn_inst = GET_INST(VCN, inst_idx);
@@ -944,8 +941,13 @@ static int vcn_v4_0_3_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
VCN, 0, regUVD_MASTINT_EN),
UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
- if (indirect)
- amdgpu_vcn_psp_update_sram(adev, inst_idx, AMDGPU_UCODE_ID_VCN0_RAM);
+ if (indirect) {
+ ret = amdgpu_vcn_psp_update_sram(adev, inst_idx, AMDGPU_UCODE_ID_VCN0_RAM);
+ if (ret) {
+ dev_err(adev->dev, "vcn sram load failed %d\n", ret);
+ return ret;
+ }
+ }
ring = &adev->vcn.inst[inst_idx].ring_enc[0];
@@ -1010,8 +1012,8 @@ static int vcn_v4_0_3_start_sriov(struct amdgpu_device *adev)
struct mmsch_v4_0_cmd_end end = { {0} };
struct mmsch_v4_0_3_init_header header;
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
- volatile struct amdgpu_fw_shared_rb_setup *rb_setup;
+ struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_fw_shared_rb_setup *rb_setup;
direct_wt.cmd_header.command_type =
MMSCH_COMMAND__DIRECT_REG_WRITE;
@@ -1185,7 +1187,7 @@ static int vcn_v4_0_3_start(struct amdgpu_vcn_inst *vinst)
{
struct amdgpu_device *adev = vinst->adev;
int i = vinst->inst;
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_vcn4_fw_shared *fw_shared;
struct amdgpu_ring *ring;
int j, k, r, vcn_inst;
uint32_t tmp;
@@ -1395,7 +1397,7 @@ static int vcn_v4_0_3_stop(struct amdgpu_vcn_inst *vinst)
{
struct amdgpu_device *adev = vinst->adev;
int i = vinst->inst;
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_vcn4_fw_shared *fw_shared;
int r = 0, vcn_inst;
uint32_t tmp;
@@ -1594,18 +1596,16 @@ static void vcn_v4_0_3_unified_ring_set_wptr(struct amdgpu_ring *ring)
}
}
-static int vcn_v4_0_3_ring_reset(struct amdgpu_ring *ring, unsigned int vmid)
+static int vcn_v4_0_3_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
int r = 0;
int vcn_inst;
struct amdgpu_device *adev = ring->adev;
struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[ring->me];
- if (amdgpu_sriov_vf(ring->adev))
- return -EOPNOTSUPP;
-
- if (!(adev->vcn.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE))
- return -EOPNOTSUPP;
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
vcn_inst = GET_INST(VCN, ring->me);
r = amdgpu_dpm_reset_vcn(adev, 1 << vcn_inst);
@@ -1620,9 +1620,8 @@ static int vcn_v4_0_3_ring_reset(struct amdgpu_ring *ring, unsigned int vmid)
adev->vcn.caps |= AMDGPU_VCN_CAPS(RRMT_ENABLED);
vcn_v4_0_3_hw_init_inst(vinst);
vcn_v4_0_3_start_dpg_mode(vinst, adev->vcn.inst[ring->me].indirect_sram);
- r = amdgpu_ring_test_helper(ring);
- return r;
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static const struct amdgpu_ring_funcs vcn_v4_0_3_unified_ring_vm_funcs = {
@@ -1875,71 +1874,10 @@ static void vcn_v4_0_3_set_irq_funcs(struct amdgpu_device *adev)
adev->vcn.inst->ras_poison_irq.funcs = &vcn_v4_0_3_ras_irq_funcs;
}
-static void vcn_v4_0_3_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_3);
- uint32_t inst_off, is_powered;
-
- if (!adev->vcn.ip_dump)
- return;
-
- drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i)) {
- drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
- continue;
- }
-
- inst_off = i * reg_count;
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered) {
- drm_printf(p, "\nActive Instance:VCN%d\n", i);
- for (j = 0; j < reg_count; j++)
- drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_4_0_3[j].reg_name,
- adev->vcn.ip_dump[inst_off + j]);
- } else {
- drm_printf(p, "\nInactive Instance:VCN%d\n", i);
- }
- }
-}
-
-static void vcn_v4_0_3_dump_ip_state(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- bool is_powered;
- uint32_t inst_off, inst_id;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_3);
-
- if (!adev->vcn.ip_dump)
- return;
-
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
-
- inst_id = GET_INST(VCN, i);
- inst_off = i * reg_count;
- /* mmUVD_POWER_STATUS is always readable and is first element of the array */
- adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, inst_id, regUVD_POWER_STATUS);
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered)
- for (j = 1; j < reg_count; j++)
- adev->vcn.ip_dump[inst_off + j] =
- RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_4_0_3[j],
- inst_id));
- }
-}
-
static const struct amd_ip_funcs vcn_v4_0_3_ip_funcs = {
.name = "vcn_v4_0_3",
.early_init = vcn_v4_0_3_early_init,
+ .late_init = vcn_v4_0_3_late_init,
.sw_init = vcn_v4_0_3_sw_init,
.sw_fini = vcn_v4_0_3_sw_fini,
.hw_init = vcn_v4_0_3_hw_init,
@@ -1950,8 +1888,8 @@ static const struct amd_ip_funcs vcn_v4_0_3_ip_funcs = {
.wait_for_idle = vcn_v4_0_3_wait_for_idle,
.set_clockgating_state = vcn_v4_0_3_set_clockgating_state,
.set_powergating_state = vcn_set_powergating_state,
- .dump_ip_state = vcn_v4_0_3_dump_ip_state,
- .print_ip_state = vcn_v4_0_3_print_ip_state,
+ .dump_ip_state = amdgpu_vcn_dump_ip_state,
+ .print_ip_state = amdgpu_vcn_print_ip_state,
};
const struct amdgpu_ip_block_version vcn_v4_0_3_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
index 16ade84facc7..b107ee80e472 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
@@ -147,12 +147,9 @@ static int vcn_v4_0_5_sw_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_ring *ring;
struct amdgpu_device *adev = ip_block->adev;
int i, r;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_5);
- uint32_t *ptr;
-
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_vcn4_fw_shared *fw_shared;
if (adev->vcn.harvest_config & (1 << i))
continue;
@@ -220,7 +217,8 @@ static int vcn_v4_0_5_sw_init(struct amdgpu_ip_block *ip_block)
}
adev->vcn.supported_reset = amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
- adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ if (!amdgpu_sriov_vf(adev))
+ adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
r = amdgpu_vcn_sysfs_reset_mask_init(adev);
if (r)
@@ -232,15 +230,9 @@ static int vcn_v4_0_5_sw_init(struct amdgpu_ip_block *ip_block)
return r;
}
- /* Allocate memory for VCN IP Dump buffer */
- ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
- if (!ptr) {
- DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
- adev->vcn.ip_dump = NULL;
- } else {
- adev->vcn.ip_dump = ptr;
- }
- return 0;
+ r = amdgpu_vcn_reg_dump_init(adev, vcn_reg_list_4_0_5, ARRAY_SIZE(vcn_reg_list_4_0_5));
+
+ return r;
}
/**
@@ -257,7 +249,7 @@ static int vcn_v4_0_5_sw_fini(struct amdgpu_ip_block *ip_block)
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_vcn4_fw_shared *fw_shared;
if (adev->vcn.harvest_config & (1 << i))
continue;
@@ -278,13 +270,9 @@ static int vcn_v4_0_5_sw_fini(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- r = amdgpu_vcn_sw_fini(adev, i);
- if (r)
- return r;
+ amdgpu_vcn_sw_fini(adev, i);
}
- kfree(adev->vcn.ip_dump);
-
return 0;
}
@@ -922,9 +910,10 @@ static int vcn_v4_0_5_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
{
struct amdgpu_device *adev = vinst->adev;
int inst_idx = vinst->inst;
- volatile struct amdgpu_vcn4_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
+ struct amdgpu_vcn4_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
uint32_t tmp;
+ int ret;
/* disable register anti-hang mechanism */
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 1,
@@ -1005,8 +994,13 @@ static int vcn_v4_0_5_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
VCN, inst_idx, regUVD_MASTINT_EN),
UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
- if (indirect)
- amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
+ if (indirect) {
+ ret = amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
+ if (ret) {
+ dev_err(adev->dev, "vcn sram load failed %d\n", ret);
+ return ret;
+ }
+ }
ring = &adev->vcn.inst[inst_idx].ring_enc[0];
@@ -1053,7 +1047,7 @@ static int vcn_v4_0_5_start(struct amdgpu_vcn_inst *vinst)
{
struct amdgpu_device *adev = vinst->adev;
int i = vinst->inst;
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_vcn4_fw_shared *fw_shared;
struct amdgpu_ring *ring;
uint32_t tmp;
int j, k, r;
@@ -1272,7 +1266,7 @@ static int vcn_v4_0_5_stop(struct amdgpu_vcn_inst *vinst)
{
struct amdgpu_device *adev = vinst->adev;
int i = vinst->inst;
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_vcn4_fw_shared *fw_shared;
uint32_t tmp;
int r = 0;
@@ -1465,18 +1459,22 @@ static void vcn_v4_0_5_unified_ring_set_wptr(struct amdgpu_ring *ring)
}
}
-static int vcn_v4_0_5_ring_reset(struct amdgpu_ring *ring, unsigned int vmid)
+static int vcn_v4_0_5_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[ring->me];
+ int r;
- if (!(adev->vcn.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE))
- return -EOPNOTSUPP;
-
- vcn_v4_0_5_stop(vinst);
- vcn_v4_0_5_start(vinst);
-
- return amdgpu_ring_test_helper(ring);
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+ r = vcn_v4_0_5_stop(vinst);
+ if (r)
+ return r;
+ r = vcn_v4_0_5_start(vinst);
+ if (r)
+ return r;
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static struct amdgpu_ring_funcs vcn_v4_0_5_unified_ring_vm_funcs = {
@@ -1591,7 +1589,7 @@ static int vcn_v4_0_5_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = ip_block->adev;
- bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+ bool enable = state == AMD_CG_STATE_GATE;
int i;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
@@ -1699,67 +1697,6 @@ static void vcn_v4_0_5_set_irq_funcs(struct amdgpu_device *adev)
}
}
-static void vcn_v4_0_5_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_5);
- uint32_t inst_off, is_powered;
-
- if (!adev->vcn.ip_dump)
- return;
-
- drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i)) {
- drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
- continue;
- }
-
- inst_off = i * reg_count;
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered) {
- drm_printf(p, "\nActive Instance:VCN%d\n", i);
- for (j = 0; j < reg_count; j++)
- drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_4_0_5[j].reg_name,
- adev->vcn.ip_dump[inst_off + j]);
- } else {
- drm_printf(p, "\nInactive Instance:VCN%d\n", i);
- }
- }
-}
-
-static void vcn_v4_0_5_dump_ip_state(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- bool is_powered;
- uint32_t inst_off;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_5);
-
- if (!adev->vcn.ip_dump)
- return;
-
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
-
- inst_off = i * reg_count;
- /* mmUVD_POWER_STATUS is always readable and is first element of the array */
- adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, regUVD_POWER_STATUS);
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered)
- for (j = 1; j < reg_count; j++)
- adev->vcn.ip_dump[inst_off + j] =
- RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_4_0_5[j],
- i));
- }
-}
-
static const struct amd_ip_funcs vcn_v4_0_5_ip_funcs = {
.name = "vcn_v4_0_5",
.early_init = vcn_v4_0_5_early_init,
@@ -1773,8 +1710,8 @@ static const struct amd_ip_funcs vcn_v4_0_5_ip_funcs = {
.wait_for_idle = vcn_v4_0_5_wait_for_idle,
.set_clockgating_state = vcn_v4_0_5_set_clockgating_state,
.set_powergating_state = vcn_set_powergating_state,
- .dump_ip_state = vcn_v4_0_5_dump_ip_state,
- .print_ip_state = vcn_v4_0_5_print_ip_state,
+ .dump_ip_state = amdgpu_vcn_dump_ip_state,
+ .print_ip_state = amdgpu_vcn_print_ip_state,
};
const struct amdgpu_ip_block_version vcn_v4_0_5_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
index f8e3f0b882da..0202df5db1e1 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
@@ -115,21 +115,6 @@ static int vcn_v5_0_0_early_init(struct amdgpu_ip_block *ip_block)
return 0;
}
-void vcn_v5_0_0_alloc_ip_dump(struct amdgpu_device *adev)
-{
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_5_0);
- uint32_t *ptr;
-
- /* Allocate memory for VCN IP Dump buffer */
- ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
- if (!ptr) {
- DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
- adev->vcn.ip_dump = NULL;
- } else {
- adev->vcn.ip_dump = ptr;
- }
-}
-
/**
* vcn_v5_0_0_sw_init - sw init for VCN block
*
@@ -144,7 +129,7 @@ static int vcn_v5_0_0_sw_init(struct amdgpu_ip_block *ip_block)
int i, r;
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- volatile struct amdgpu_vcn5_fw_shared *fw_shared;
+ struct amdgpu_vcn5_fw_shared *fw_shared;
if (adev->vcn.harvest_config & (1 << i))
continue;
@@ -198,9 +183,12 @@ static int vcn_v5_0_0_sw_init(struct amdgpu_ip_block *ip_block)
adev->vcn.supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
- adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ if (!amdgpu_sriov_vf(adev))
+ adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
- vcn_v5_0_0_alloc_ip_dump(adev);
+ r = amdgpu_vcn_reg_dump_init(adev, vcn_reg_list_5_0, ARRAY_SIZE(vcn_reg_list_5_0));
+ if (r)
+ return r;
r = amdgpu_vcn_sysfs_reset_mask_init(adev);
if (r)
@@ -223,7 +211,7 @@ static int vcn_v5_0_0_sw_fini(struct amdgpu_ip_block *ip_block)
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- volatile struct amdgpu_vcn5_fw_shared *fw_shared;
+ struct amdgpu_vcn5_fw_shared *fw_shared;
if (adev->vcn.harvest_config & (1 << i))
continue;
@@ -244,13 +232,8 @@ static int vcn_v5_0_0_sw_fini(struct amdgpu_ip_block *ip_block)
amdgpu_vcn_sysfs_reset_mask_fini(adev);
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- r = amdgpu_vcn_sw_fini(adev, i);
- if (r)
- return r;
- }
-
- kfree(adev->vcn.ip_dump);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++)
+ amdgpu_vcn_sw_fini(adev, i);
return 0;
}
@@ -709,9 +692,10 @@ static int vcn_v5_0_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
{
struct amdgpu_device *adev = vinst->adev;
int inst_idx = vinst->inst;
- volatile struct amdgpu_vcn5_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
+ struct amdgpu_vcn5_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
uint32_t tmp;
+ int ret;
/* disable register anti-hang mechanism */
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 1,
@@ -765,8 +749,13 @@ static int vcn_v5_0_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
VCN, inst_idx, regUVD_MASTINT_EN),
UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
- if (indirect)
- amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
+ if (indirect) {
+ ret = amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
+ if (ret) {
+ dev_err(adev->dev, "%s: vcn sram load failed %d\n", __func__, ret);
+ return ret;
+ }
+ }
ring = &adev->vcn.inst[inst_idx].ring_enc[0];
@@ -813,7 +802,7 @@ static int vcn_v5_0_0_start(struct amdgpu_vcn_inst *vinst)
{
struct amdgpu_device *adev = vinst->adev;
int i = vinst->inst;
- volatile struct amdgpu_vcn5_fw_shared *fw_shared;
+ struct amdgpu_vcn5_fw_shared *fw_shared;
struct amdgpu_ring *ring;
uint32_t tmp;
int j, k, r;
@@ -1006,7 +995,7 @@ static int vcn_v5_0_0_stop(struct amdgpu_vcn_inst *vinst)
{
struct amdgpu_device *adev = vinst->adev;
int i = vinst->inst;
- volatile struct amdgpu_vcn5_fw_shared *fw_shared;
+ struct amdgpu_vcn5_fw_shared *fw_shared;
uint32_t tmp;
int r = 0;
@@ -1192,18 +1181,22 @@ static void vcn_v5_0_0_unified_ring_set_wptr(struct amdgpu_ring *ring)
}
}
-static int vcn_v5_0_0_ring_reset(struct amdgpu_ring *ring, unsigned int vmid)
+static int vcn_v5_0_0_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[ring->me];
+ int r;
- if (!(adev->vcn.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE))
- return -EOPNOTSUPP;
-
- vcn_v5_0_0_stop(vinst);
- vcn_v5_0_0_start(vinst);
-
- return amdgpu_ring_test_helper(ring);
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+ r = vcn_v5_0_0_stop(vinst);
+ if (r)
+ return r;
+ r = vcn_v5_0_0_start(vinst);
+ if (r)
+ return r;
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
static const struct amdgpu_ring_funcs vcn_v5_0_0_unified_ring_vm_funcs = {
@@ -1315,7 +1308,7 @@ static int vcn_v5_0_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = ip_block->adev;
- bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+ bool enable = state == AMD_CG_STATE_GATE;
int i;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
@@ -1423,67 +1416,6 @@ static void vcn_v5_0_0_set_irq_funcs(struct amdgpu_device *adev)
}
}
-void vcn_v5_0_0_print_ip_state(struct amdgpu_ip_block *ip_block,
- struct drm_printer *p)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_5_0);
- uint32_t inst_off, is_powered;
-
- if (!adev->vcn.ip_dump)
- return;
-
- drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i)) {
- drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
- continue;
- }
-
- inst_off = i * reg_count;
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered) {
- drm_printf(p, "\nActive Instance:VCN%d\n", i);
- for (j = 0; j < reg_count; j++)
- drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_5_0[j].reg_name,
- adev->vcn.ip_dump[inst_off + j]);
- } else {
- drm_printf(p, "\nInactive Instance:VCN%d\n", i);
- }
- }
-}
-
-void vcn_v5_0_0_dump_ip_state(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- bool is_powered;
- uint32_t inst_off;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_5_0);
-
- if (!adev->vcn.ip_dump)
- return;
-
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
-
- inst_off = i * reg_count;
- /* mmUVD_POWER_STATUS is always readable and is first element of the array */
- adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, regUVD_POWER_STATUS);
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered)
- for (j = 1; j < reg_count; j++)
- adev->vcn.ip_dump[inst_off + j] =
- RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_5_0[j], i));
- }
-}
-
static const struct amd_ip_funcs vcn_v5_0_0_ip_funcs = {
.name = "vcn_v5_0_0",
.early_init = vcn_v5_0_0_early_init,
@@ -1497,8 +1429,8 @@ static const struct amd_ip_funcs vcn_v5_0_0_ip_funcs = {
.wait_for_idle = vcn_v5_0_0_wait_for_idle,
.set_clockgating_state = vcn_v5_0_0_set_clockgating_state,
.set_powergating_state = vcn_set_powergating_state,
- .dump_ip_state = vcn_v5_0_0_dump_ip_state,
- .print_ip_state = vcn_v5_0_0_print_ip_state,
+ .dump_ip_state = amdgpu_vcn_dump_ip_state,
+ .print_ip_state = amdgpu_vcn_print_ip_state,
};
const struct amdgpu_ip_block_version vcn_v5_0_0_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.h b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.h
index b8927652bc50..51bbccd4360f 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.h
@@ -32,11 +32,6 @@
#define VCN_VID_IP_ADDRESS 0x0
#define VCN_AON_IP_ADDRESS 0x30000
-void vcn_v5_0_0_alloc_ip_dump(struct amdgpu_device *adev);
-void vcn_v5_0_0_print_ip_state(struct amdgpu_ip_block *ip_block,
- struct drm_printer *p);
-void vcn_v5_0_0_dump_ip_state(struct amdgpu_ip_block *ip_block);
-
extern const struct amdgpu_ip_block_version vcn_v5_0_0_ip_block;
#endif /* __VCN_V5_0_0_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
index 338cf43c45fe..8bd457dea4cf 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
@@ -40,6 +40,40 @@
#include <drm/drm_drv.h>
+static const struct amdgpu_hwip_reg_entry vcn_reg_list_5_0_1[] = {
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_POWER_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_CONTEXT_ID),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_CONTEXT_ID2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_DATA0),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_DATA1),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_CMD),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_CTL),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_DATA),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_MASK),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_PAUSE)
+};
+
static int vcn_v5_0_1_start_sriov(struct amdgpu_device *adev);
static void vcn_v5_0_1_set_unified_ring_funcs(struct amdgpu_device *adev);
static void vcn_v5_0_1_set_irq_funcs(struct amdgpu_device *adev);
@@ -79,6 +113,27 @@ static int vcn_v5_0_1_early_init(struct amdgpu_ip_block *ip_block)
return 0;
}
+static int vcn_v5_0_1_late_init(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+
+ adev->vcn.supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
+
+ switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
+ case IP_VERSION(13, 0, 12):
+ if ((adev->psp.sos.fw_version >= 0x00450025) &&
+ amdgpu_dpm_reset_vcn_is_supported(adev) &&
+ !amdgpu_sriov_vf(adev))
+ adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
static void vcn_v5_0_1_fw_shared_init(struct amdgpu_device *adev, int inst_idx)
{
struct amdgpu_vcn5_fw_shared *fw_shared;
@@ -153,17 +208,23 @@ static int vcn_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block)
vcn_v5_0_1_fw_shared_init(adev, i);
}
- /* TODO: Add queue reset mask when FW fully supports it */
- adev->vcn.supported_reset =
- amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
-
if (amdgpu_sriov_vf(adev)) {
r = amdgpu_virt_alloc_mm_table(adev);
if (r)
return r;
}
- vcn_v5_0_0_alloc_ip_dump(adev);
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN)) {
+ r = amdgpu_vcn_ras_sw_init(adev);
+ if (r) {
+ dev_err(adev->dev, "Failed to initialize vcn ras block!\n");
+ return r;
+ }
+ }
+
+ r = amdgpu_vcn_reg_dump_init(adev, vcn_reg_list_5_0_1, ARRAY_SIZE(vcn_reg_list_5_0_1));
+ if (r)
+ return r;
return amdgpu_vcn_sysfs_reset_mask_init(adev);
}
@@ -182,7 +243,7 @@ static int vcn_v5_0_1_sw_fini(struct amdgpu_ip_block *ip_block)
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- volatile struct amdgpu_vcn5_fw_shared *fw_shared;
+ struct amdgpu_vcn5_fw_shared *fw_shared;
fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
fw_shared->present_flag_0 = 0;
@@ -201,15 +262,27 @@ static int vcn_v5_0_1_sw_fini(struct amdgpu_ip_block *ip_block)
return r;
}
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- r = amdgpu_vcn_sw_fini(adev, i);
- if (r)
- return r;
- }
-
amdgpu_vcn_sysfs_reset_mask_fini(adev);
- kfree(adev->vcn.ip_dump);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++)
+ amdgpu_vcn_sw_fini(adev, i);
+
+ return 0;
+}
+
+static int vcn_v5_0_1_hw_init_inst(struct amdgpu_device *adev, int i)
+{
+ struct amdgpu_ring *ring;
+ int vcn_inst;
+
+ vcn_inst = GET_INST(VCN, i);
+ ring = &adev->vcn.inst[i].ring_enc[0];
+
+ if (ring->use_doorbell)
+ adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
+ ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
+ 11 * vcn_inst),
+ adev->vcn.inst[i].aid_id);
return 0;
}
@@ -225,7 +298,7 @@ static int vcn_v5_0_1_hw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring;
- int i, r, vcn_inst;
+ int i, r;
if (amdgpu_sriov_vf(adev)) {
r = vcn_v5_0_1_start_sriov(adev);
@@ -243,14 +316,8 @@ static int vcn_v5_0_1_hw_init(struct amdgpu_ip_block *ip_block)
if (RREG32_SOC15(VCN, GET_INST(VCN, 0), regVCN_RRMT_CNTL) & 0x100)
adev->vcn.caps |= AMDGPU_VCN_CAPS(RRMT_ENABLED);
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- vcn_inst = GET_INST(VCN, i);
ring = &adev->vcn.inst[i].ring_enc[0];
-
- if (ring->use_doorbell)
- adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
- ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
- 11 * vcn_inst),
- adev->vcn.inst[i].aid_id);
+ vcn_v5_0_1_hw_init_inst(adev, i);
/* Re-init fw_shared, if required */
vcn_v5_0_1_fw_shared_init(adev, i);
@@ -284,7 +351,7 @@ static int vcn_v5_0_1_hw_fini(struct amdgpu_ip_block *ip_block)
vinst->set_pg_state(vinst, AMD_PG_STATE_GATE);
}
- if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN))
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN) && !amdgpu_sriov_vf(adev))
amdgpu_irq_put(adev, &adev->vcn.inst->ras_poison_irq, 0);
return 0;
@@ -601,11 +668,11 @@ static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
{
struct amdgpu_device *adev = vinst->adev;
int inst_idx = vinst->inst;
- volatile struct amdgpu_vcn5_fw_shared *fw_shared =
+ struct amdgpu_vcn5_fw_shared *fw_shared =
adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__PAUSE};
- int vcn_inst;
+ int vcn_inst, ret;
uint32_t tmp;
vcn_inst = GET_INST(VCN, inst_idx);
@@ -666,8 +733,16 @@ static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
VCN, 0, regUVD_MASTINT_EN),
UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
- if (indirect)
- amdgpu_vcn_psp_update_sram(adev, inst_idx, AMDGPU_UCODE_ID_VCN0_RAM);
+ if (indirect) {
+ ret = amdgpu_vcn_psp_update_sram(adev, inst_idx, AMDGPU_UCODE_ID_VCN0_RAM);
+ if (ret) {
+ dev_err(adev->dev, "vcn sram load failed %d\n", ret);
+ return ret;
+ }
+ }
+
+ /* resetting ring, fw should not check RB ring */
+ fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
/* Pause dpg */
vcn_v5_0_1_pause_dpg_mode(vinst, &state);
@@ -681,7 +756,7 @@ static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
- fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
+
WREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR, 0);
WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, 0);
@@ -692,6 +767,7 @@ static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
+ /* resetting done, fw can check RB ring */
fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
WREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL,
@@ -728,8 +804,8 @@ static int vcn_v5_0_1_start_sriov(struct amdgpu_device *adev)
struct mmsch_v5_0_cmd_end end = { {0} };
struct mmsch_v5_0_init_header header;
- volatile struct amdgpu_vcn5_fw_shared *fw_shared;
- volatile struct amdgpu_fw_shared_rb_setup *rb_setup;
+ struct amdgpu_vcn5_fw_shared *fw_shared;
+ struct amdgpu_fw_shared_rb_setup *rb_setup;
direct_wt.cmd_header.command_type =
MMSCH_COMMAND__DIRECT_REG_WRITE;
@@ -903,7 +979,7 @@ static int vcn_v5_0_1_start(struct amdgpu_vcn_inst *vinst)
{
struct amdgpu_device *adev = vinst->adev;
int i = vinst->inst;
- volatile struct amdgpu_vcn5_fw_shared *fw_shared;
+ struct amdgpu_vcn5_fw_shared *fw_shared;
struct amdgpu_ring *ring;
uint32_t tmp;
int j, k, r, vcn_inst;
@@ -1095,7 +1171,7 @@ static int vcn_v5_0_1_stop(struct amdgpu_vcn_inst *vinst)
{
struct amdgpu_device *adev = vinst->adev;
int i = vinst->inst;
- volatile struct amdgpu_vcn5_fw_shared *fw_shared;
+ struct amdgpu_vcn5_fw_shared *fw_shared;
uint32_t tmp;
int r = 0, vcn_inst;
@@ -1225,6 +1301,31 @@ static void vcn_v5_0_1_unified_ring_set_wptr(struct amdgpu_ring *ring)
}
}
+static int vcn_v5_0_1_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
+{
+ int r = 0;
+ int vcn_inst;
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[ring->me];
+
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+
+ vcn_inst = GET_INST(VCN, ring->me);
+ r = amdgpu_dpm_reset_vcn(adev, 1 << vcn_inst);
+
+ if (r) {
+ DRM_DEV_ERROR(adev->dev, "VCN reset fail : %d\n", r);
+ return r;
+ }
+
+ vcn_v5_0_1_hw_init_inst(adev, ring->me);
+ vcn_v5_0_1_start_dpg_mode(vinst, vinst->indirect_sram);
+
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
+}
+
static const struct amdgpu_ring_funcs vcn_v5_0_1_unified_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_ENC,
.align_mask = 0x3f,
@@ -1253,6 +1354,7 @@ static const struct amdgpu_ring_funcs vcn_v5_0_1_unified_ring_vm_funcs = {
.emit_wreg = vcn_v4_0_3_enc_ring_emit_wreg,
.emit_reg_wait = vcn_v4_0_3_enc_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = vcn_v5_0_1_ring_reset,
};
/**
@@ -1456,7 +1558,7 @@ static void vcn_v5_0_1_set_irq_funcs(struct amdgpu_device *adev)
static const struct amd_ip_funcs vcn_v5_0_1_ip_funcs = {
.name = "vcn_v5_0_1",
.early_init = vcn_v5_0_1_early_init,
- .late_init = NULL,
+ .late_init = vcn_v5_0_1_late_init,
.sw_init = vcn_v5_0_1_sw_init,
.sw_fini = vcn_v5_0_1_sw_fini,
.hw_init = vcn_v5_0_1_hw_init,
@@ -1471,8 +1573,8 @@ static const struct amd_ip_funcs vcn_v5_0_1_ip_funcs = {
.post_soft_reset = NULL,
.set_clockgating_state = vcn_v5_0_1_set_clockgating_state,
.set_powergating_state = vcn_set_powergating_state,
- .dump_ip_state = vcn_v5_0_0_dump_ip_state,
- .print_ip_state = vcn_v5_0_0_print_ip_state,
+ .dump_ip_state = amdgpu_vcn_dump_ip_state,
+ .print_ip_state = amdgpu_vcn_print_ip_state,
};
const struct amdgpu_ip_block_version vcn_v5_0_1_ip_block = {
@@ -1553,7 +1655,7 @@ static int vcn_v5_0_1_aca_bank_parser(struct aca_handle *handle, struct aca_bank
/* reference to smu driver if header file */
static int vcn_v5_0_1_err_codes[] = {
- 14, 15, /* VCN */
+ 14, 15, 47, /* VCN [D|V|S] */
};
static bool vcn_v5_0_1_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank,
@@ -1599,6 +1701,13 @@ static int vcn_v5_0_1_ras_late_init(struct amdgpu_device *adev, struct ras_commo
if (r)
goto late_fini;
+ if (amdgpu_ras_is_supported(adev, ras_block->block) &&
+ adev->vcn.inst->ras_poison_irq.funcs) {
+ r = amdgpu_irq_get(adev, &adev->vcn.inst->ras_poison_irq, 0);
+ if (r)
+ goto late_fini;
+ }
+
return 0;
late_fini:
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 9b3510e53112..a611a7345125 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -67,7 +67,6 @@
#include "sdma_v2_4.h"
#include "sdma_v3_0.h"
#include "dce_v10_0.h"
-#include "dce_v11_0.h"
#include "iceland_ih.h"
#include "tonga_ih.h"
#include "cz_ih.h"
@@ -2124,8 +2123,6 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
else if (amdgpu_device_has_dc_support(adev))
amdgpu_device_ip_block_add(adev, &dm_ip_block);
#endif
- else
- amdgpu_device_ip_block_add(adev, &dce_v11_2_ip_block);
amdgpu_device_ip_block_add(adev, &uvd_v6_3_ip_block);
amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block);
break;
@@ -2142,8 +2139,6 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
else if (amdgpu_device_has_dc_support(adev))
amdgpu_device_ip_block_add(adev, &dm_ip_block);
#endif
- else
- amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block);
amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block);
amdgpu_device_ip_block_add(adev, &vce_v3_1_ip_block);
#if defined(CONFIG_DRM_AMD_ACP)
@@ -2163,8 +2158,6 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
else if (amdgpu_device_has_dc_support(adev))
amdgpu_device_ip_block_add(adev, &dm_ip_block);
#endif
- else
- amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block);
amdgpu_device_ip_block_add(adev, &uvd_v6_2_ip_block);
amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block);
#if defined(CONFIG_DRM_AMD_ACP)
diff --git a/drivers/gpu/drm/amd/amdkfd/Kconfig b/drivers/gpu/drm/amd/amdkfd/Kconfig
index 62e88e5362e9..16e12c9913f9 100644
--- a/drivers/gpu/drm/amd/amdkfd/Kconfig
+++ b/drivers/gpu/drm/amd/amdkfd/Kconfig
@@ -5,7 +5,7 @@
config HSA_AMD
bool "HSA kernel driver for AMD GPU devices"
- depends on DRM_AMDGPU && (X86_64 || ARM64 || PPC64 || (RISCV && 64BIT))
+ depends on DRM_AMDGPU && (X86_64 || ARM64 || PPC64 || (RISCV && 64BIT) || (LOONGARCH && 64BIT))
select HMM_MIRROR
select MMU_NOTIFIER
select DRM_AMDGPU_USERPTR
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index a2149afa5803..22925df6a791 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -22,7 +22,6 @@
*/
#include <linux/device.h>
-#include <linux/export.h>
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/file.h>
@@ -522,15 +521,10 @@ static int kfd_ioctl_set_cu_mask(struct file *filp, struct kfd_process *p,
cu_mask_size = sizeof(uint32_t) * (max_num_cus/32);
}
- minfo.cu_mask.ptr = kzalloc(cu_mask_size, GFP_KERNEL);
- if (!minfo.cu_mask.ptr)
- return -ENOMEM;
-
- retval = copy_from_user(minfo.cu_mask.ptr, cu_mask_ptr, cu_mask_size);
- if (retval) {
+ minfo.cu_mask.ptr = memdup_user(cu_mask_ptr, cu_mask_size);
+ if (IS_ERR(minfo.cu_mask.ptr)) {
pr_debug("Could not copy CU mask from userspace");
- retval = -EFAULT;
- goto out;
+ return PTR_ERR(minfo.cu_mask.ptr);
}
mutex_lock(&p->mutex);
@@ -539,7 +533,6 @@ static int kfd_ioctl_set_cu_mask(struct file *filp, struct kfd_process *p,
mutex_unlock(&p->mutex);
-out:
kfree(minfo.cu_mask.ptr);
return retval;
}
@@ -1071,7 +1064,12 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
svm_range_list_lock_and_flush_work(&p->svms, current->mm);
mutex_lock(&p->svms.lock);
mmap_write_unlock(current->mm);
- if (interval_tree_iter_first(&p->svms.objects,
+
+ /* Skip a special case that allocates VRAM without VA,
+ * VA will be invalid of 0.
+ */
+ if (!(!args->va_addr && (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM)) &&
+ interval_tree_iter_first(&p->svms.objects,
args->va_addr >> PAGE_SHIFT,
(args->va_addr + args->size - 1) >> PAGE_SHIFT)) {
pr_err("Address: 0x%llx already allocated by SVM\n",
@@ -2567,8 +2565,8 @@ static int criu_restore(struct file *filep,
pr_debug("CRIU restore (num_devices:%u num_bos:%u num_objects:%u priv_data_size:%llu)\n",
args->num_devices, args->num_bos, args->num_objects, args->priv_data_size);
- if (!args->bos || !args->devices || !args->priv_data || !args->priv_data_size ||
- !args->num_devices || !args->num_bos)
+ if ((args->num_bos > 0 && !args->bos) || !args->devices || !args->priv_data ||
+ !args->priv_data_size || !args->num_devices)
return -EINVAL;
mutex_lock(&p->mutex);
@@ -2828,7 +2826,7 @@ retry:
static int runtime_disable(struct kfd_process *p)
{
- int i = 0, ret;
+ int i = 0, ret = 0;
bool was_enabled = p->runtime_info.runtime_state == DEBUG_RUNTIME_STATE_ENABLED;
p->runtime_info.runtime_state = DEBUG_RUNTIME_STATE_DISABLED;
@@ -2865,6 +2863,7 @@ static int runtime_disable(struct kfd_process *p)
/* disable ttmp setup */
for (i = 0; i < p->n_pdds; i++) {
struct kfd_process_device *pdd = p->pdds[i];
+ int last_err = 0;
if (kfd_dbg_is_per_vmid_supported(pdd->dev)) {
pdd->spi_dbg_override =
@@ -2874,14 +2873,17 @@ static int runtime_disable(struct kfd_process *p)
pdd->dev->vm_info.last_vmid_kfd);
if (!pdd->dev->kfd->shared_resources.enable_mes)
- debug_refresh_runlist(pdd->dev->dqm);
+ last_err = debug_refresh_runlist(pdd->dev->dqm);
else
- kfd_dbg_set_mes_debug_mode(pdd,
+ last_err = kfd_dbg_set_mes_debug_mode(pdd,
!kfd_dbg_has_cwsr_workaround(pdd->dev));
+
+ if (last_err)
+ ret = last_err;
}
}
- return 0;
+ return ret;
}
static int kfd_ioctl_runtime_enable(struct file *filep, struct kfd_process *p, void *data)
@@ -3253,8 +3255,10 @@ static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
int retcode = -EINVAL;
bool ptrace_attached = false;
- if (nr >= AMDKFD_CORE_IOCTL_COUNT)
+ if (nr >= AMDKFD_CORE_IOCTL_COUNT) {
+ retcode = -ENOTTY;
goto err_i1;
+ }
if ((nr >= AMDKFD_COMMAND_START) && (nr < AMDKFD_COMMAND_END)) {
u32 amdkfd_size;
@@ -3267,8 +3271,10 @@ static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
asize = amdkfd_size;
cmd = ioctl->cmd;
- } else
+ } else {
+ retcode = -ENOTTY;
goto err_i1;
+ }
dev_dbg(kfd_device, "ioctl cmd 0x%x (#0x%x), arg 0x%lx\n", cmd, nr, arg);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index bf0854bd5555..e9cfb80bd436 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -495,6 +495,7 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
mutex_init(&kfd->doorbell_mutex);
ida_init(&kfd->doorbell_ida);
+ atomic_set(&kfd->kfd_processes_count, 0);
return kfd;
}
@@ -971,7 +972,7 @@ int kgd2kfd_pre_reset(struct kfd_dev *kfd,
kfd_smi_event_update_gpu_reset(node, false, reset_context);
}
- kgd2kfd_suspend(kfd, false);
+ kgd2kfd_suspend(kfd, true);
for (i = 0; i < kfd->num_nodes; i++)
kfd_signal_reset_event(kfd->nodes[i]);
@@ -1013,13 +1014,33 @@ int kgd2kfd_post_reset(struct kfd_dev *kfd)
return 0;
}
-bool kfd_is_locked(void)
+bool kfd_is_locked(struct kfd_dev *kfd)
{
+ uint8_t id = 0;
+ struct kfd_node *dev;
+
lockdep_assert_held(&kfd_processes_mutex);
- return (kfd_locked > 0);
+
+ /* check reset/suspend lock */
+ if (kfd_locked > 0)
+ return true;
+
+ if (kfd)
+ return kfd->kfd_dev_lock > 0;
+
+ /* check lock on all cgroup accessible devices */
+ while (kfd_topology_enum_kfd_devices(id++, &dev) == 0) {
+ if (!dev || kfd_devcgroup_check_permission(dev))
+ continue;
+
+ if (dev->kfd->kfd_dev_lock > 0)
+ return true;
+ }
+
+ return false;
}
-void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
+void kgd2kfd_suspend(struct kfd_dev *kfd, bool suspend_proc)
{
struct kfd_node *node;
int i;
@@ -1027,14 +1048,8 @@ void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
if (!kfd->init_complete)
return;
- /* for runtime suspend, skip locking kfd */
- if (!run_pm) {
- mutex_lock(&kfd_processes_mutex);
- /* For first KFD device suspend all the KFD processes */
- if (++kfd_locked == 1)
- kfd_suspend_all_processes();
- mutex_unlock(&kfd_processes_mutex);
- }
+ if (suspend_proc)
+ kgd2kfd_suspend_process(kfd);
for (i = 0; i < kfd->num_nodes; i++) {
node = kfd->nodes[i];
@@ -1042,7 +1057,7 @@ void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
}
}
-int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
+int kgd2kfd_resume(struct kfd_dev *kfd, bool resume_proc)
{
int ret, i;
@@ -1055,14 +1070,36 @@ int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
return ret;
}
- /* for runtime resume, skip unlocking kfd */
- if (!run_pm) {
- mutex_lock(&kfd_processes_mutex);
- if (--kfd_locked == 0)
- ret = kfd_resume_all_processes();
- WARN_ONCE(kfd_locked < 0, "KFD suspend / resume ref. error");
- mutex_unlock(&kfd_processes_mutex);
- }
+ if (resume_proc)
+ ret = kgd2kfd_resume_process(kfd);
+
+ return ret;
+}
+
+void kgd2kfd_suspend_process(struct kfd_dev *kfd)
+{
+ if (!kfd->init_complete)
+ return;
+
+ mutex_lock(&kfd_processes_mutex);
+ /* For first KFD device suspend all the KFD processes */
+ if (++kfd_locked == 1)
+ kfd_suspend_all_processes();
+ mutex_unlock(&kfd_processes_mutex);
+}
+
+int kgd2kfd_resume_process(struct kfd_dev *kfd)
+{
+ int ret = 0;
+
+ if (!kfd->init_complete)
+ return 0;
+
+ mutex_lock(&kfd_processes_mutex);
+ if (--kfd_locked == 0)
+ ret = kfd_resume_all_processes();
+ WARN_ONCE(kfd_locked < 0, "KFD suspend / resume ref. error");
+ mutex_unlock(&kfd_processes_mutex);
return ret;
}
@@ -1097,7 +1134,15 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
}
for (i = 0; i < kfd->num_nodes; i++) {
- node = kfd->nodes[i];
+ /* Race if another thread in b/w
+ * kfd_cleanup_nodes and kfree(kfd),
+ * when kfd->nodes[i] = NULL
+ */
+ if (kfd->nodes[i])
+ node = kfd->nodes[i];
+ else
+ return;
+
spin_lock_irqsave(&node->interrupt_lock, flags);
if (node->interrupts_active
@@ -1442,24 +1487,62 @@ unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_node *node)
kfd_get_num_sdma_engines(node);
}
-int kgd2kfd_check_and_lock_kfd(void)
+int kgd2kfd_check_and_lock_kfd(struct kfd_dev *kfd)
{
+ struct kfd_process *p;
+ int r = 0, temp, idx;
+
mutex_lock(&kfd_processes_mutex);
- if (!hash_empty(kfd_processes_table) || kfd_is_locked()) {
- mutex_unlock(&kfd_processes_mutex);
- return -EBUSY;
+
+ /* kfd_processes_count is per kfd_dev, return -EBUSY without
+ * further check
+ */
+ if (!!atomic_read(&kfd->kfd_processes_count)) {
+ pr_debug("process_wq_release not finished\n");
+ r = -EBUSY;
+ goto out;
+ }
+
+ if (hash_empty(kfd_processes_table) && !kfd_is_locked(kfd))
+ goto out;
+
+ /* fail under system reset/resume or kfd device is partition switching. */
+ if (kfd_is_locked(kfd)) {
+ r = -EBUSY;
+ goto out;
}
- ++kfd_locked;
+ /*
+ * ensure all running processes are cgroup excluded from device before mode switch.
+ * i.e. no pdd was created on the process socket.
+ */
+ idx = srcu_read_lock(&kfd_processes_srcu);
+ hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
+ int i;
+
+ for (i = 0; i < p->n_pdds; i++) {
+ if (p->pdds[i]->dev->kfd != kfd)
+ continue;
+
+ r = -EBUSY;
+ goto proc_check_unlock;
+ }
+ }
+
+proc_check_unlock:
+ srcu_read_unlock(&kfd_processes_srcu, idx);
+out:
+ if (!r)
+ ++kfd->kfd_dev_lock;
mutex_unlock(&kfd_processes_mutex);
- return 0;
+ return r;
}
-void kgd2kfd_unlock_kfd(void)
+void kgd2kfd_unlock_kfd(struct kfd_dev *kfd)
{
mutex_lock(&kfd_processes_mutex);
- --kfd_locked;
+ --kfd->kfd_dev_lock;
mutex_unlock(&kfd_processes_mutex);
}
@@ -1485,6 +1568,25 @@ int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id)
return ret;
}
+int kgd2kfd_start_sched_all_nodes(struct kfd_dev *kfd)
+{
+ struct kfd_node *node;
+ int i, r;
+
+ if (!kfd->init_complete)
+ return 0;
+
+ for (i = 0; i < kfd->num_nodes; i++) {
+ node = kfd->nodes[i];
+ r = node->dqm->ops.unhalt(node->dqm);
+ if (r) {
+ dev_err(kfd_device, "Error in starting scheduler\n");
+ return r;
+ }
+ }
+ return 0;
+}
+
int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id)
{
struct kfd_node *node;
@@ -1502,6 +1604,23 @@ int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id)
return node->dqm->ops.halt(node->dqm);
}
+int kgd2kfd_stop_sched_all_nodes(struct kfd_dev *kfd)
+{
+ struct kfd_node *node;
+ int i, r;
+
+ if (!kfd->init_complete)
+ return 0;
+
+ for (i = 0; i < kfd->num_nodes; i++) {
+ node = kfd->nodes[i];
+ r = node->dqm->ops.halt(node->dqm);
+ if (r)
+ return r;
+ }
+ return 0;
+}
+
bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id)
{
struct kfd_node *node;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 76359c6a3f3a..d7a2e7178ea9 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -1209,6 +1209,15 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
pr_debug_ratelimited("Evicting process pid %d queues\n",
pdd->process->lead_thread->pid);
+ if (dqm->dev->kfd->shared_resources.enable_mes) {
+ pdd->last_evict_timestamp = get_jiffies_64();
+ retval = suspend_all_queues_mes(dqm);
+ if (retval) {
+ dev_err(dev, "Suspending all queues failed");
+ goto out;
+ }
+ }
+
/* Mark all queues as evicted. Deactivate all active queues on
* the qpd.
*/
@@ -1221,23 +1230,27 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
decrement_queue_count(dqm, qpd, q);
if (dqm->dev->kfd->shared_resources.enable_mes) {
- int err;
-
- err = remove_queue_mes(dqm, q, qpd);
- if (err) {
+ retval = remove_queue_mes(dqm, q, qpd);
+ if (retval) {
dev_err(dev, "Failed to evict queue %d\n",
q->properties.queue_id);
- retval = err;
+ goto out;
}
}
}
- pdd->last_evict_timestamp = get_jiffies_64();
- if (!dqm->dev->kfd->shared_resources.enable_mes)
+
+ if (!dqm->dev->kfd->shared_resources.enable_mes) {
+ pdd->last_evict_timestamp = get_jiffies_64();
retval = execute_queues_cpsch(dqm,
qpd->is_debug ?
KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES :
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0,
USE_DEFAULT_GRACE_PERIOD);
+ } else {
+ retval = resume_all_queues_mes(dqm);
+ if (retval)
+ dev_err(dev, "Resuming all queues failed");
+ }
out:
dqm_unlock(dqm);
@@ -1884,6 +1897,8 @@ fail_packet_manager_init:
static int stop_cpsch(struct device_queue_manager *dqm)
{
+ int ret = 0;
+
dqm_lock(dqm);
if (!dqm->sched_running) {
dqm_unlock(dqm);
@@ -1891,9 +1906,10 @@ static int stop_cpsch(struct device_queue_manager *dqm)
}
if (!dqm->dev->kfd->shared_resources.enable_mes)
- unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD, false);
+ ret = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES,
+ 0, USE_DEFAULT_GRACE_PERIOD, false);
else
- remove_all_kfd_queues_mes(dqm);
+ ret = remove_all_kfd_queues_mes(dqm);
dqm->sched_running = false;
@@ -1907,7 +1923,7 @@ static int stop_cpsch(struct device_queue_manager *dqm)
dqm->detect_hang_info = NULL;
dqm_unlock(dqm);
- return 0;
+ return ret;
}
static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
@@ -2078,7 +2094,8 @@ int amdkfd_fence_wait_timeout(struct device_queue_manager *dqm,
while (*fence_addr != fence_value) {
/* Fatal err detected, this response won't come */
- if (amdgpu_amdkfd_is_fed(dqm->dev->adev))
+ if (amdgpu_amdkfd_is_fed(dqm->dev->adev) ||
+ amdgpu_in_reset(dqm->dev->adev))
return -EIO;
if (time_after(jiffies, end_jiffies)) {
@@ -2312,7 +2329,7 @@ static int reset_hung_queues_sdma(struct device_queue_manager *dqm)
continue;
/* Reset engine and check. */
- if (amdgpu_sdma_reset_engine(dqm->dev->adev, i) ||
+ if (amdgpu_sdma_reset_engine(dqm->dev->adev, i, false) ||
dqm->dev->kfd2kgd->hqd_sdma_get_doorbell(dqm->dev->adev, i, j) ||
!set_sdma_queue_as_reset(dqm, doorbell_off)) {
r = -ENOTRECOVERABLE;
@@ -2339,9 +2356,18 @@ reset_fail:
static int reset_queues_on_hws_hang(struct device_queue_manager *dqm, bool is_sdma)
{
+ struct amdgpu_device *adev = dqm->dev->adev;
+
while (halt_if_hws_hang)
schedule();
+ if (adev->debug_disable_gpu_ring_reset) {
+ dev_info_once(adev->dev,
+ "%s queue hung, but ring reset disabled",
+ is_sdma ? "sdma" : "compute");
+
+ return -EPERM;
+ }
if (!amdgpu_gpu_recovery)
return -ENOTRECOVERABLE;
@@ -2716,7 +2742,7 @@ static void get_queue_checkpoint_info(struct device_queue_manager *dqm,
dqm_lock(dqm);
mqd_mgr = dqm->mqd_mgrs[mqd_type];
- *mqd_size = mqd_mgr->mqd_size;
+ *mqd_size = mqd_mgr->mqd_size * NUM_XCC(mqd_mgr->dev->xcc_mask);
*ctl_stack_size = 0;
if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE && mqd_mgr->get_checkpoint_info)
@@ -3089,61 +3115,17 @@ out:
return ret;
}
-static int kfd_dqm_evict_pasid_mes(struct device_queue_manager *dqm,
- struct qcm_process_device *qpd)
-{
- struct device *dev = dqm->dev->adev->dev;
- int ret = 0;
-
- /* Check if process is already evicted */
- dqm_lock(dqm);
- if (qpd->evicted) {
- /* Increment the evicted count to make sure the
- * process stays evicted before its terminated.
- */
- qpd->evicted++;
- dqm_unlock(dqm);
- goto out;
- }
- dqm_unlock(dqm);
-
- ret = suspend_all_queues_mes(dqm);
- if (ret) {
- dev_err(dev, "Suspending all queues failed");
- goto out;
- }
-
- ret = dqm->ops.evict_process_queues(dqm, qpd);
- if (ret) {
- dev_err(dev, "Evicting process queues failed");
- goto out;
- }
-
- ret = resume_all_queues_mes(dqm);
- if (ret)
- dev_err(dev, "Resuming all queues failed");
-
-out:
- return ret;
-}
-
int kfd_evict_process_device(struct kfd_process_device *pdd)
{
struct device_queue_manager *dqm;
struct kfd_process *p;
- int ret = 0;
p = pdd->process;
dqm = pdd->dev->dqm;
WARN(debug_evictions, "Evicting pid %d", p->lead_thread->pid);
- if (dqm->dev->kfd->shared_resources.enable_mes)
- ret = kfd_dqm_evict_pasid_mes(dqm, &pdd->qpd);
- else
- ret = dqm->ops.evict_process_queues(dqm, &pdd->qpd);
-
- return ret;
+ return dqm->ops.evict_process_queues(dqm, &pdd->qpd);
}
int reserve_debug_trap_vmid(struct device_queue_manager *dqm,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index 2b294ada3ec0..5a190dd6be4e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -748,16 +748,6 @@ void kfd_signal_event_interrupt(u32 pasid, uint32_t partial_id,
uint64_t *slots = page_slots(p->signal_page);
uint32_t id;
- /*
- * If id is valid but slot is not signaled, GPU may signal the same event twice
- * before driver have chance to process the first interrupt, then signal slot is
- * auto-reset after set_event wakeup the user space, just drop the second event as
- * the application only need wakeup once.
- */
- if ((valid_id_bits > 31 || (1U << valid_id_bits) >= KFD_SIGNAL_EVENT_LIMIT) &&
- partial_id < KFD_SIGNAL_EVENT_LIMIT && slots[partial_id] == UNSIGNALED_EVENT_SLOT)
- goto out_unlock;
-
if (valid_id_bits)
pr_debug_ratelimited("Partial ID invalid: %u (%u valid bits)\n",
partial_id, valid_id_bits);
@@ -786,7 +776,6 @@ void kfd_signal_event_interrupt(u32 pasid, uint32_t partial_id,
}
}
-out_unlock:
rcu_read_unlock();
kfd_unref_process(p);
}
@@ -1302,7 +1291,7 @@ void kfd_signal_reset_event(struct kfd_node *dev)
if (ti) {
dev_err(dev->adev->dev,
"Queues reset on process %s tid %d thread %s pid %d\n",
- ti->process_name, ti->tgid, ti->task_name, ti->pid);
+ ti->process_name, ti->tgid, ti->task.comm, ti->task.pid);
amdgpu_vm_put_task_info(ti);
}
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
index dbcb60eb54b2..1d170dc50df3 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
@@ -23,7 +23,6 @@
*/
#include <linux/device.h>
-#include <linux/export.h>
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/sched.h>
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
index 4ceb251312a6..d76fb61869c7 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
@@ -28,6 +28,7 @@
#include "kfd_device_queue_manager.h"
#include "kfd_smi_events.h"
#include "amdgpu_ras.h"
+#include "amdgpu_ras_mgr.h"
/*
* GFX9 SQ Interrupts
@@ -228,7 +229,11 @@ static void event_interrupt_poison_consumption_v9(struct kfd_node *dev,
kfd_signal_poison_consumed_event(dev, pasid);
- event_id = amdgpu_ras_acquire_event_id(dev->adev, type);
+ if (amdgpu_uniras_enabled(dev->adev))
+ event_id = amdgpu_ras_mgr_gen_ras_event_seqno(dev->adev,
+ RAS_SEQNO_TYPE_POISON_CONSUMPTION);
+ else
+ event_id = amdgpu_ras_acquire_event_id(dev->adev, type);
RAS_EVENT_LOG(dev->adev, event_id,
"poison is consumed by client %d, kick off gpu reset flow\n", client_id);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index 2b0a830f5b29..fb3129883a4c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -46,11 +46,7 @@ static bool kq_initialize(struct kernel_queue *kq, struct kfd_node *dev,
int retval;
union PM4_MES_TYPE_3_HEADER nop;
- if (WARN_ON(type != KFD_QUEUE_TYPE_DIQ && type != KFD_QUEUE_TYPE_HIQ))
- return false;
-
- pr_debug("Initializing queue type %d size %d\n", KFD_QUEUE_TYPE_HIQ,
- queue_size);
+ pr_debug("Initializing queue type %d size %d\n", type, queue_size);
memset(&prop, 0, sizeof(prop));
memset(&nop, 0, sizeof(nop));
@@ -69,6 +65,7 @@ static bool kq_initialize(struct kernel_queue *kq, struct kfd_node *dev,
kq->mqd_mgr = dev->dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ];
break;
default:
+ WARN(1, "Invalid queue type %d\n", type);
dev_err(dev->adev->dev, "Invalid queue type %d\n", type);
return false;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index 79251f22b702..af53e796ea1b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -21,7 +21,6 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/types.h>
-#include <linux/hmm.h>
#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
#include <linux/migrate.h>
@@ -39,22 +38,22 @@
#endif
#define dev_fmt(fmt) "kfd_migrate: " fmt
-static uint64_t
-svm_migrate_direct_mapping_addr(struct amdgpu_device *adev, uint64_t addr)
+static u64
+svm_migrate_direct_mapping_addr(struct amdgpu_device *adev, u64 addr)
{
return addr + amdgpu_ttm_domain_start(adev, TTM_PL_VRAM);
}
static int
-svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages,
- dma_addr_t *addr, uint64_t *gart_addr, uint64_t flags)
+svm_migrate_gart_map(struct amdgpu_ring *ring, u64 npages,
+ dma_addr_t *addr, u64 *gart_addr, u64 flags)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_job *job;
unsigned int num_dw, num_bytes;
struct dma_fence *fence;
- uint64_t src_addr, dst_addr;
- uint64_t pte_flags;
+ u64 src_addr, dst_addr;
+ u64 pte_flags;
void *cpu_addr;
int r;
@@ -68,7 +67,8 @@ svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages,
AMDGPU_FENCE_OWNER_UNDEFINED,
num_dw * 4 + num_bytes,
AMDGPU_IB_POOL_DELAYED,
- &job);
+ &job,
+ AMDGPU_KERNEL_JOB_ID_KFD_GART_MAP);
if (r)
return r;
@@ -122,15 +122,15 @@ svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages,
static int
svm_migrate_copy_memory_gart(struct amdgpu_device *adev, dma_addr_t *sys,
- uint64_t *vram, uint64_t npages,
+ u64 *vram, u64 npages,
enum MIGRATION_COPY_DIR direction,
struct dma_fence **mfence)
{
- const uint64_t GTT_MAX_PAGES = AMDGPU_GTT_MAX_TRANSFER_SIZE;
+ const u64 GTT_MAX_PAGES = AMDGPU_GTT_MAX_TRANSFER_SIZE;
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
- uint64_t gart_s, gart_d;
+ u64 gart_s, gart_d;
struct dma_fence *next;
- uint64_t size;
+ u64 size;
int r;
mutex_lock(&adev->mman.gtt_window_lock);
@@ -217,7 +217,7 @@ svm_migrate_get_vram_page(struct svm_range *prange, unsigned long pfn)
page = pfn_to_page(pfn);
svm_range_bo_ref(prange->svm_bo);
page->zone_device_data = prange->svm_bo;
- zone_device_page_init(page);
+ zone_device_page_init(page, 0);
}
static void
@@ -260,39 +260,39 @@ static void svm_migrate_put_sys_page(unsigned long addr)
put_page(page);
}
-static unsigned long svm_migrate_unsuccessful_pages(struct migrate_vma *migrate)
+static unsigned long svm_migrate_successful_pages(struct migrate_vma *migrate)
{
- unsigned long upages = 0;
+ unsigned long mpages = 0;
unsigned long i;
for (i = 0; i < migrate->npages; i++) {
- if (migrate->src[i] & MIGRATE_PFN_VALID &&
- !(migrate->src[i] & MIGRATE_PFN_MIGRATE))
- upages++;
+ if (migrate->dst[i] & MIGRATE_PFN_VALID &&
+ migrate->src[i] & MIGRATE_PFN_MIGRATE)
+ mpages++;
}
- return upages;
+ return mpages;
}
static int
svm_migrate_copy_to_vram(struct kfd_node *node, struct svm_range *prange,
struct migrate_vma *migrate, struct dma_fence **mfence,
- dma_addr_t *scratch, uint64_t ttm_res_offset)
+ dma_addr_t *scratch, u64 ttm_res_offset)
{
- uint64_t npages = migrate->npages;
+ u64 npages = migrate->npages;
struct amdgpu_device *adev = node->adev;
struct device *dev = adev->dev;
struct amdgpu_res_cursor cursor;
- uint64_t mpages = 0;
+ u64 mpages = 0;
dma_addr_t *src;
- uint64_t *dst;
- uint64_t i, j;
+ u64 *dst;
+ u64 i, j;
int r;
pr_debug("svms 0x%p [0x%lx 0x%lx 0x%llx]\n", prange->svms, prange->start,
prange->last, ttm_res_offset);
src = scratch;
- dst = (uint64_t *)(scratch + npages);
+ dst = (u64 *)(scratch + npages);
amdgpu_res_first(prange->ttm_res, ttm_res_offset,
npages << PAGE_SHIFT, &cursor);
@@ -385,11 +385,11 @@ out_free_vram_pages:
static long
svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange,
- struct vm_area_struct *vma, uint64_t start,
- uint64_t end, uint32_t trigger, uint64_t ttm_res_offset)
+ struct vm_area_struct *vma, u64 start,
+ u64 end, uint32_t trigger, u64 ttm_res_offset)
{
struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
- uint64_t npages = (end - start) >> PAGE_SHIFT;
+ u64 npages = (end - start) >> PAGE_SHIFT;
struct amdgpu_device *adev = node->adev;
struct kfd_process_device *pdd;
struct dma_fence *mfence = NULL;
@@ -408,7 +408,7 @@ svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange,
migrate.pgmap_owner = SVM_ADEV_PGMAP_OWNER(adev);
buf = kvcalloc(npages,
- 2 * sizeof(*migrate.src) + sizeof(uint64_t) + sizeof(dma_addr_t),
+ 2 * sizeof(*migrate.src) + sizeof(u64) + sizeof(dma_addr_t),
GFP_KERNEL);
if (!buf)
goto out;
@@ -447,9 +447,9 @@ svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange,
svm_migrate_copy_done(adev, mfence);
migrate_vma_finalize(&migrate);
- mpages = cpages - svm_migrate_unsuccessful_pages(&migrate);
- pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n",
- mpages, cpages, migrate.npages);
+ mpages = svm_migrate_successful_pages(&migrate);
+ pr_debug("migrated/collected/requested 0x%lx/0x%lx/0x%lx\n",
+ mpages, cpages, migrate.npages);
svm_range_dma_unmap_dev(adev->dev, scratch, 0, npages);
@@ -490,7 +490,7 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
{
unsigned long addr, start, end;
struct vm_area_struct *vma;
- uint64_t ttm_res_offset;
+ u64 ttm_res_offset;
struct kfd_node *node;
unsigned long mpages = 0;
long r = 0;
@@ -567,8 +567,9 @@ out:
return r < 0 ? r : 0;
}
-static void svm_migrate_page_free(struct page *page)
+static void svm_migrate_folio_free(struct folio *folio)
{
+ struct page *page = &folio->page;
struct svm_range_bo *svm_bo = page->zone_device_data;
if (svm_bo) {
@@ -580,14 +581,14 @@ static void svm_migrate_page_free(struct page *page)
static int
svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
struct migrate_vma *migrate, struct dma_fence **mfence,
- dma_addr_t *scratch, uint64_t npages)
+ dma_addr_t *scratch, u64 npages)
{
struct device *dev = adev->dev;
- uint64_t *src;
+ u64 *src;
dma_addr_t *dst;
struct page *dpage;
- uint64_t i = 0, j;
- uint64_t addr;
+ u64 i = 0, j;
+ u64 addr;
int r = 0;
pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start,
@@ -595,7 +596,7 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
addr = migrate->start;
- src = (uint64_t *)(scratch + npages);
+ src = (u64 *)(scratch + npages);
dst = scratch;
for (i = 0, j = 0; i < npages; i++, addr += PAGE_SIZE) {
@@ -683,12 +684,11 @@ out_oom:
*/
static long
svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange,
- struct vm_area_struct *vma, uint64_t start, uint64_t end,
+ struct vm_area_struct *vma, u64 start, u64 end,
uint32_t trigger, struct page *fault_page)
{
struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
- uint64_t npages = (end - start) >> PAGE_SHIFT;
- unsigned long upages = npages;
+ u64 npages = (end - start) >> PAGE_SHIFT;
unsigned long cpages = 0;
unsigned long mpages = 0;
struct amdgpu_device *adev = node->adev;
@@ -710,7 +710,7 @@ svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange,
migrate.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE;
buf = kvcalloc(npages,
- 2 * sizeof(*migrate.src) + sizeof(uint64_t) + sizeof(dma_addr_t),
+ 2 * sizeof(*migrate.src) + sizeof(u64) + sizeof(dma_addr_t),
GFP_KERNEL);
if (!buf)
goto out;
@@ -736,7 +736,6 @@ svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange,
if (!cpages) {
pr_debug("failed collect migrate device pages [0x%lx 0x%lx]\n",
prange->start, prange->last);
- upages = svm_migrate_unsuccessful_pages(&migrate);
goto out_free;
}
if (cpages != npages)
@@ -749,9 +748,9 @@ svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange,
scratch, npages);
migrate_vma_pages(&migrate);
- upages = svm_migrate_unsuccessful_pages(&migrate);
- pr_debug("unsuccessful/cpages/npages 0x%lx/0x%lx/0x%lx\n",
- upages, cpages, migrate.npages);
+ mpages = svm_migrate_successful_pages(&migrate);
+ pr_debug("migrated/collected/requested 0x%lx/0x%lx/0x%lx\n",
+ mpages, cpages, migrate.npages);
svm_migrate_copy_done(adev, mfence);
migrate_vma_finalize(&migrate);
@@ -764,8 +763,7 @@ out_free:
start >> PAGE_SHIFT, end >> PAGE_SHIFT,
node->id, 0, trigger, r);
out:
- if (!r && cpages) {
- mpages = cpages - upages;
+ if (!r && mpages) {
pdd = svm_range_get_pdd_by_node(prange, node);
if (pdd)
WRITE_ONCE(pdd->page_out, pdd->page_out + mpages);
@@ -848,6 +846,9 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
}
if (r >= 0) {
+ WARN_ONCE(prange->vram_pages < mpages,
+ "Recorded vram pages(0x%llx) should not be less than migration pages(0x%lx).",
+ prange->vram_pages, mpages);
prange->vram_pages -= mpages;
/* prange does not have vram page set its actual_loc to system
@@ -1008,7 +1009,7 @@ out_mmput:
}
static const struct dev_pagemap_ops svm_migrate_pgmap_ops = {
- .page_free = svm_migrate_page_free,
+ .folio_free = svm_migrate_folio_free,
.migrate_to_ram = svm_migrate_to_ram,
};
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h
index 2eebf67f9c2c..2b7fd442d29c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h
@@ -31,7 +31,6 @@
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/sched/mm.h>
-#include <linux/hmm.h>
#include "kfd_priv.h"
#include "kfd_svm.h"
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
index aee2212e52f6..33aa23450b3f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
@@ -78,8 +78,8 @@ err_ioctl:
static void kfd_exit(void)
{
kfd_cleanup_processes();
- kfd_debugfs_fini();
kfd_process_destroy_wq();
+ kfd_debugfs_fini();
kfd_procfs_shutdown();
kfd_topology_shutdown();
kfd_chardev_exit();
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
index 97933d2a3803..f2dee320fada 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
@@ -373,7 +373,7 @@ static void get_checkpoint_info(struct mqd_manager *mm, void *mqd, u32 *ctl_stac
{
struct v9_mqd *m = get_mqd(mqd);
- *ctl_stack_size = m->cp_hqd_cntl_stack_size;
+ *ctl_stack_size = m->cp_hqd_cntl_stack_size * NUM_XCC(mm->dev->xcc_mask);
}
static void checkpoint_mqd(struct mqd_manager *mm, void *mqd, void *mqd_dst, void *ctl_stack_dst)
@@ -388,6 +388,24 @@ static void checkpoint_mqd(struct mqd_manager *mm, void *mqd, void *mqd_dst, voi
memcpy(ctl_stack_dst, ctl_stack, m->cp_hqd_cntl_stack_size);
}
+static void checkpoint_mqd_v9_4_3(struct mqd_manager *mm,
+ void *mqd,
+ void *mqd_dst,
+ void *ctl_stack_dst)
+{
+ struct v9_mqd *m;
+ int xcc;
+ uint64_t size = get_mqd(mqd)->cp_mqd_stride_size;
+
+ for (xcc = 0; xcc < NUM_XCC(mm->dev->xcc_mask); xcc++) {
+ m = get_mqd(mqd + size * xcc);
+
+ checkpoint_mqd(mm, m,
+ (uint8_t *)mqd_dst + sizeof(*m) * xcc,
+ (uint8_t *)ctl_stack_dst + m->cp_hqd_cntl_stack_size * xcc);
+ }
+}
+
static void restore_mqd(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *qp,
@@ -764,13 +782,35 @@ static void restore_mqd_v9_4_3(struct mqd_manager *mm, void **mqd,
const void *mqd_src,
const void *ctl_stack_src, u32 ctl_stack_size)
{
- restore_mqd(mm, mqd, mqd_mem_obj, gart_addr, qp, mqd_src, ctl_stack_src, ctl_stack_size);
- if (amdgpu_sriov_multi_vf_mode(mm->dev->adev)) {
- struct v9_mqd *m;
+ struct kfd_mem_obj xcc_mqd_mem_obj;
+ u32 mqd_ctl_stack_size;
+ struct v9_mqd *m;
+ u32 num_xcc;
+ int xcc;
- m = (struct v9_mqd *) mqd_mem_obj->cpu_ptr;
- m->cp_hqd_pq_doorbell_control |= 1 <<
- CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_MODE__SHIFT;
+ uint64_t offset = mm->mqd_stride(mm, qp);
+
+ mm->dev->dqm->current_logical_xcc_start++;
+
+ num_xcc = NUM_XCC(mm->dev->xcc_mask);
+ mqd_ctl_stack_size = ctl_stack_size / num_xcc;
+
+ memset(&xcc_mqd_mem_obj, 0x0, sizeof(struct kfd_mem_obj));
+
+ /* Set the MQD pointer and gart address to XCC0 MQD */
+ *mqd = mqd_mem_obj->cpu_ptr;
+ if (gart_addr)
+ *gart_addr = mqd_mem_obj->gpu_addr;
+
+ for (xcc = 0; xcc < num_xcc; xcc++) {
+ get_xcc_mqd(mqd_mem_obj, &xcc_mqd_mem_obj, offset * xcc);
+ restore_mqd(mm, (void **)&m,
+ &xcc_mqd_mem_obj,
+ NULL,
+ qp,
+ (uint8_t *)mqd_src + xcc * sizeof(*m),
+ (uint8_t *)ctl_stack_src + xcc * mqd_ctl_stack_size,
+ mqd_ctl_stack_size);
}
}
static int destroy_mqd_v9_4_3(struct mqd_manager *mm, void *mqd,
@@ -906,7 +946,6 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
mqd->free_mqd = kfd_free_mqd_cp;
mqd->is_occupied = kfd_is_occupied_cp;
mqd->get_checkpoint_info = get_checkpoint_info;
- mqd->checkpoint_mqd = checkpoint_mqd;
mqd->mqd_size = sizeof(struct v9_mqd);
mqd->mqd_stride = mqd_stride_v9;
#if defined(CONFIG_DEBUG_FS)
@@ -918,16 +957,18 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
mqd->init_mqd = init_mqd_v9_4_3;
mqd->load_mqd = load_mqd_v9_4_3;
mqd->update_mqd = update_mqd_v9_4_3;
- mqd->restore_mqd = restore_mqd_v9_4_3;
mqd->destroy_mqd = destroy_mqd_v9_4_3;
mqd->get_wave_state = get_wave_state_v9_4_3;
+ mqd->checkpoint_mqd = checkpoint_mqd_v9_4_3;
+ mqd->restore_mqd = restore_mqd_v9_4_3;
} else {
mqd->init_mqd = init_mqd;
mqd->load_mqd = load_mqd;
mqd->update_mqd = update_mqd;
- mqd->restore_mqd = restore_mqd;
mqd->destroy_mqd = kfd_destroy_mqd_cp;
mqd->get_wave_state = get_wave_state;
+ mqd->checkpoint_mqd = checkpoint_mqd;
+ mqd->restore_mqd = restore_mqd;
}
break;
case KFD_MQD_TYPE_HIQ:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
index 8fa6489b6f5d..505036968a77 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
@@ -240,7 +240,7 @@ static int pm_map_queues_v9(struct packet_manager *pm, uint32_t *buffer,
packet->bitfields2.engine_sel =
engine_sel__mes_map_queues__compute_vi;
- packet->bitfields2.gws_control_queue = q->gws ? 1 : 0;
+ packet->bitfields2.gws_control_queue = q->properties.is_gws ? 1 : 0;
packet->bitfields2.extended_engine_sel =
extended_engine_sel__mes_map_queues__legacy_engine_sel;
packet->bitfields2.queue_type =
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index d221c58dccc3..70ef051511bb 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -111,7 +111,14 @@
#define KFD_KERNEL_QUEUE_SIZE 2048
-#define KFD_UNMAP_LATENCY_MS (4000)
+/* KFD_UNMAP_LATENCY_MS is the timeout CP waiting for SDMA preemption. One XCC
+ * can be associated to 2 SDMA engines. queue_preemption_timeout_ms is the time
+ * driver waiting for CP returning the UNMAP_QUEUE fence. Thus the math is
+ * queue_preemption_timeout_ms = sdma_preemption_time * 2 + cp workload
+ * The format here makes CP workload 10% of total timeout
+ */
+#define KFD_UNMAP_LATENCY_MS \
+ ((queue_preemption_timeout_ms - queue_preemption_timeout_ms / 10) >> 1)
#define KFD_MAX_SDMA_QUEUES 128
@@ -372,6 +379,11 @@ struct kfd_dev {
/* bitmap for dynamic doorbell allocation from doorbell object */
unsigned long *doorbell_bitmap;
+
+ /* for dynamic partitioning */
+ int kfd_dev_lock;
+
+ atomic_t kfd_processes_count;
};
enum kfd_mempool {
@@ -1536,7 +1548,7 @@ static inline bool kfd_flush_tlb_after_unmap(struct kfd_dev *dev)
int kfd_send_exception_to_runtime(struct kfd_process *p,
unsigned int queue_id,
uint64_t error_reason);
-bool kfd_is_locked(void);
+bool kfd_is_locked(struct kfd_dev *kfd);
/* Compute profile */
void kfd_inc_compute_active(struct kfd_node *dev);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 722ac1662bdc..a085faac9fe1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -854,7 +854,7 @@ struct kfd_process *kfd_create_process(struct task_struct *thread)
*/
mutex_lock(&kfd_processes_mutex);
- if (kfd_is_locked()) {
+ if (kfd_is_locked(NULL)) {
pr_debug("KFD is locked! Cannot create process");
process = ERR_PTR(-EINVAL);
goto out;
@@ -1083,11 +1083,12 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
* for auto suspend
*/
if (pdd->runtime_inuse) {
- pm_runtime_mark_last_busy(adev_to_drm(pdd->dev->adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(pdd->dev->adev)->dev);
pdd->runtime_inuse = false;
}
+ atomic_dec(&pdd->dev->kfd->kfd_processes_count);
+
kfree(pdd);
p->pdds[i] = NULL;
}
@@ -1160,9 +1161,6 @@ static void kfd_process_wq_release(struct work_struct *work)
release_work);
struct dma_fence *ef;
- kfd_process_dequeue_from_all_devices(p);
- pqm_uninit(&p->pqm);
-
/*
* If GPU in reset, user queues may still running, wait for reset complete.
*/
@@ -1224,6 +1222,14 @@ static void kfd_process_notifier_release_internal(struct kfd_process *p)
cancel_delayed_work_sync(&p->eviction_work);
cancel_delayed_work_sync(&p->restore_work);
+ /*
+ * Dequeue and destroy user queues, it is not safe for GPU to access
+ * system memory after mmu release notifier callback returns because
+ * exit_mmap free process memory afterwards.
+ */
+ kfd_process_dequeue_from_all_devices(p);
+ pqm_uninit(&p->pqm);
+
for (i = 0; i < p->n_pdds; i++) {
struct kfd_process_device *pdd = p->pdds[i];
@@ -1649,6 +1655,8 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev,
/* Init idr used for memory handle translation */
idr_init(&pdd->alloc_idr);
+ atomic_inc(&dev->kfd->kfd_processes_count);
+
return pdd;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index c643e0ccec52..7fbb5c274ccc 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -914,7 +914,10 @@ static int criu_checkpoint_queues_device(struct kfd_process_device *pdd,
q_data = (struct kfd_criu_queue_priv_data *)q_private_data;
- /* data stored in this order: priv_data, mqd, ctl_stack */
+ /*
+ * data stored in this order:
+ * priv_data, mqd[xcc0], mqd[xcc1],..., ctl_stack[xcc0], ctl_stack[xcc1]...
+ */
q_data->mqd_size = mqd_size;
q_data->ctl_stack_size = ctl_stack_size;
@@ -963,7 +966,7 @@ int kfd_criu_checkpoint_queues(struct kfd_process *p,
}
static void set_queue_properties_from_criu(struct queue_properties *qp,
- struct kfd_criu_queue_priv_data *q_data)
+ struct kfd_criu_queue_priv_data *q_data, uint32_t num_xcc)
{
qp->is_interop = false;
qp->queue_percent = q_data->q_percent;
@@ -976,7 +979,11 @@ static void set_queue_properties_from_criu(struct queue_properties *qp,
qp->eop_ring_buffer_size = q_data->eop_ring_buffer_size;
qp->ctx_save_restore_area_address = q_data->ctx_save_restore_area_address;
qp->ctx_save_restore_area_size = q_data->ctx_save_restore_area_size;
- qp->ctl_stack_size = q_data->ctl_stack_size;
+ if (q_data->type == KFD_QUEUE_TYPE_COMPUTE)
+ qp->ctl_stack_size = q_data->ctl_stack_size / num_xcc;
+ else
+ qp->ctl_stack_size = q_data->ctl_stack_size;
+
qp->type = q_data->type;
qp->format = q_data->format;
}
@@ -1036,12 +1043,15 @@ int kfd_criu_restore_queue(struct kfd_process *p,
goto exit;
}
- /* data stored in this order: mqd, ctl_stack */
+ /*
+ * data stored in this order:
+ * mqd[xcc0], mqd[xcc1],..., ctl_stack[xcc0], ctl_stack[xcc1]...
+ */
mqd = q_extra_data;
ctl_stack = mqd + q_data->mqd_size;
memset(&qp, 0, sizeof(qp));
- set_queue_properties_from_criu(&qp, q_data);
+ set_queue_properties_from_criu(&qp, q_data, NUM_XCC(pdd->dev->adev->gfx.xcc_mask));
print_queue_properties(&qp);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
index a65c67cf56ff..f1e7583650c4 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
@@ -297,16 +297,16 @@ int kfd_queue_acquire_buffers(struct kfd_process_device *pdd, struct queue_prope
goto out_err_unreserve;
}
- if (properties->ctx_save_restore_area_size != topo_dev->node_props.cwsr_size) {
- pr_debug("queue cwsr size 0x%x not equal to node cwsr size 0x%x\n",
+ if (properties->ctx_save_restore_area_size < topo_dev->node_props.cwsr_size) {
+ pr_debug("queue cwsr size 0x%x not sufficient for node cwsr size 0x%x\n",
properties->ctx_save_restore_area_size,
topo_dev->node_props.cwsr_size);
err = -EINVAL;
goto out_err_unreserve;
}
- total_cwsr_size = (topo_dev->node_props.cwsr_size + topo_dev->node_props.debug_memory_size)
- * NUM_XCC(pdd->dev->xcc_mask);
+ total_cwsr_size = (properties->ctx_save_restore_area_size +
+ topo_dev->node_props.debug_memory_size) * NUM_XCC(pdd->dev->xcc_mask);
total_cwsr_size = ALIGN(total_cwsr_size, PAGE_SIZE);
err = kfd_queue_buffer_get(vm, (void *)properties->ctx_save_restore_area_address,
@@ -352,8 +352,8 @@ int kfd_queue_release_buffers(struct kfd_process_device *pdd, struct queue_prope
topo_dev = kfd_topology_device_by_id(pdd->dev->id);
if (!topo_dev)
return -EINVAL;
- total_cwsr_size = (topo_dev->node_props.cwsr_size + topo_dev->node_props.debug_memory_size)
- * NUM_XCC(pdd->dev->xcc_mask);
+ total_cwsr_size = (properties->ctx_save_restore_area_size +
+ topo_dev->node_props.debug_memory_size) * NUM_XCC(pdd->dev->xcc_mask);
total_cwsr_size = ALIGN(total_cwsr_size, PAGE_SIZE);
kfd_queue_buffer_svm_put(pdd, properties->ctx_save_restore_area_address, total_cwsr_size);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
index 83d9384ac815..a499449fcb06 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
@@ -253,9 +253,9 @@ void kfd_smi_event_update_vmfault(struct kfd_node *dev, uint16_t pasid)
task_info = amdgpu_vm_get_task_info_pasid(dev->adev, pasid);
if (task_info) {
/* Report VM faults from user applications, not retry from kernel */
- if (task_info->pid)
+ if (task_info->task.pid)
kfd_smi_event_add(0, dev, KFD_SMI_EVENT_VMFAULT, KFD_EVENT_FMT_VMFAULT(
- task_info->pid, task_info->task_name));
+ task_info->task.pid, task_info->task.comm));
amdgpu_vm_put_task_info(task_info);
}
}
@@ -359,8 +359,8 @@ void kfd_smi_event_process(struct kfd_process_device *pdd, bool start)
kfd_smi_event_add(0, pdd->dev,
start ? KFD_SMI_EVENT_PROCESS_START :
KFD_SMI_EVENT_PROCESS_END,
- KFD_EVENT_FMT_PROCESS(task_info->pid,
- task_info->task_name));
+ KFD_EVENT_FMT_PROCESS(task_info->task.pid,
+ task_info->task.comm));
amdgpu_vm_put_task_info(task_info);
}
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 865dca2547de..97c2270f278f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -1171,13 +1171,12 @@ svm_range_split_head(struct svm_range *prange, uint64_t new_start,
}
static void
-svm_range_add_child(struct svm_range *prange, struct mm_struct *mm,
- struct svm_range *pchild, enum svm_work_list_ops op)
+svm_range_add_child(struct svm_range *prange, struct svm_range *pchild, enum svm_work_list_ops op)
{
pr_debug("add child 0x%p [0x%lx 0x%lx] to prange 0x%p child list %d\n",
pchild, pchild->start, pchild->last, prange, op);
- pchild->work_item.mm = mm;
+ pchild->work_item.mm = NULL;
pchild->work_item.op = op;
list_add_tail(&pchild->child_list, &prange->child_list);
}
@@ -1190,7 +1189,7 @@ svm_nodes_in_same_hive(struct kfd_node *node_a, struct kfd_node *node_b)
}
static uint64_t
-svm_range_get_pte_flags(struct kfd_node *node,
+svm_range_get_pte_flags(struct kfd_node *node, struct amdgpu_vm *vm,
struct svm_range *prange, int domain)
{
struct kfd_node *bo_node;
@@ -1278,7 +1277,7 @@ svm_range_get_pte_flags(struct kfd_node *node,
mapping_flags |= ext_coherent ? AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
/* system memory accessed by the dGPU */
} else {
- if (gc_ip_version < IP_VERSION(9, 5, 0))
+ if (gc_ip_version < IP_VERSION(9, 5, 0) || ext_coherent)
mapping_flags |= AMDGPU_VM_MTYPE_UC;
else
mapping_flags |= AMDGPU_VM_MTYPE_NC;
@@ -1293,10 +1292,6 @@ svm_range_get_pte_flags(struct kfd_node *node,
AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
}
- mapping_flags |= AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE;
-
- if (flags & KFD_IOCTL_SVM_FLAG_GPU_RO)
- mapping_flags &= ~AMDGPU_VM_PAGE_WRITEABLE;
if (flags & KFD_IOCTL_SVM_FLAG_GPU_EXEC)
mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
@@ -1306,7 +1301,10 @@ svm_range_get_pte_flags(struct kfd_node *node,
if (gc_ip_version >= IP_VERSION(12, 0, 0))
pte_flags |= AMDGPU_PTE_IS_PTE;
- pte_flags |= amdgpu_gem_va_map_flags(node->adev, mapping_flags);
+ amdgpu_gmc_get_vm_pte(node->adev, vm, NULL, mapping_flags, &pte_flags);
+ pte_flags |= AMDGPU_PTE_READABLE;
+ if (!(flags & KFD_IOCTL_SVM_FLAG_GPU_RO))
+ pte_flags |= AMDGPU_PTE_WRITEABLE;
return pte_flags;
}
@@ -1413,7 +1411,7 @@ svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange,
pr_debug("Mapping range [0x%lx 0x%llx] on domain: %s\n",
last_start, prange->start + i, last_domain ? "GPU" : "CPU");
- pte_flags = svm_range_get_pte_flags(pdd->dev, prange, last_domain);
+ pte_flags = svm_range_get_pte_flags(pdd->dev, vm, prange, last_domain);
if (readonly)
pte_flags &= ~AMDGPU_PTE_WRITEABLE;
@@ -1700,7 +1698,7 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
start = map_start << PAGE_SHIFT;
end = (map_last + 1) << PAGE_SHIFT;
for (addr = start; !r && addr < end; ) {
- struct hmm_range *hmm_range = NULL;
+ struct amdgpu_hmm_range *range = NULL;
unsigned long map_start_vma;
unsigned long map_last_vma;
struct vm_area_struct *vma;
@@ -1715,10 +1713,36 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
next = min(vma->vm_end, end);
npages = (next - addr) >> PAGE_SHIFT;
+ /* HMM requires at least READ permissions. If provided with PROT_NONE,
+ * unmap the memory. If it's not already mapped, this is a no-op
+ * If PROT_WRITE is provided without READ, warn first then unmap
+ */
+ if (!(vma->vm_flags & VM_READ)) {
+ unsigned long e, s;
+
+ svm_range_lock(prange);
+ if (vma->vm_flags & VM_WRITE)
+ pr_debug("VM_WRITE without VM_READ is not supported");
+ s = max(start, prange->start);
+ e = min(end, prange->last);
+ if (e >= s)
+ r = svm_range_unmap_from_gpus(prange, s, e,
+ KFD_SVM_UNMAP_TRIGGER_UNMAP_FROM_CPU);
+ svm_range_unlock(prange);
+ /* If unmap returns non-zero, we'll bail on the next for loop
+ * iteration, so just leave r and continue
+ */
+ addr = next;
+ continue;
+ }
+
WRITE_ONCE(p->svms.faulting_task, current);
- r = amdgpu_hmm_range_get_pages(&prange->notifier, addr, npages,
- readonly, owner, NULL,
- &hmm_range);
+ range = amdgpu_hmm_range_alloc(NULL);
+ if (likely(range))
+ r = amdgpu_hmm_range_get_pages(&prange->notifier, addr, npages,
+ readonly, owner, range);
+ else
+ r = -ENOMEM;
WRITE_ONCE(p->svms.faulting_task, NULL);
if (r)
pr_debug("failed %d to get svm range pages\n", r);
@@ -1729,7 +1753,7 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
if (!r) {
offset = (addr >> PAGE_SHIFT) - prange->start;
r = svm_range_dma_map(prange, ctx->bitmap, offset, npages,
- hmm_range->hmm_pfns);
+ range->hmm_range.hmm_pfns);
if (r)
pr_debug("failed %d to dma map range\n", r);
}
@@ -1737,14 +1761,17 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
svm_range_lock(prange);
/* Free backing memory of hmm_range if it was initialized
- * Overrride return value to TRY AGAIN only if prior returns
+ * Override return value to TRY AGAIN only if prior returns
* were successful
*/
- if (hmm_range && amdgpu_hmm_range_get_pages_done(hmm_range) && !r) {
+ if (range && !amdgpu_hmm_range_valid(range) && !r) {
pr_debug("hmm update the range, need validate again\n");
r = -EAGAIN;
}
+ /* Free the hmm range */
+ amdgpu_hmm_range_free(range);
+
if (!r && !list_empty(&prange->child_list)) {
pr_debug("range split by unmap in parallel, validate again\n");
r = -EAGAIN;
@@ -2394,15 +2421,17 @@ svm_range_add_list_work(struct svm_range_list *svms, struct svm_range *prange,
prange->work_item.op != SVM_OP_UNMAP_RANGE)
prange->work_item.op = op;
} else {
- prange->work_item.op = op;
-
- /* Pairs with mmput in deferred_list_work */
- mmget(mm);
- prange->work_item.mm = mm;
- list_add_tail(&prange->deferred_list,
- &prange->svms->deferred_range_list);
- pr_debug("add prange 0x%p [0x%lx 0x%lx] to work list op %d\n",
- prange, prange->start, prange->last, op);
+ /* Pairs with mmput in deferred_list_work.
+ * If process is exiting and mm is gone, don't update mmu notifier.
+ */
+ if (mmget_not_zero(mm)) {
+ prange->work_item.mm = mm;
+ prange->work_item.op = op;
+ list_add_tail(&prange->deferred_list,
+ &prange->svms->deferred_range_list);
+ pr_debug("add prange 0x%p [0x%lx 0x%lx] to work list op %d\n",
+ prange, prange->start, prange->last, op);
+ }
}
spin_unlock(&svms->deferred_list_lock);
}
@@ -2416,8 +2445,7 @@ void schedule_deferred_list_work(struct svm_range_list *svms)
}
static void
-svm_range_unmap_split(struct mm_struct *mm, struct svm_range *parent,
- struct svm_range *prange, unsigned long start,
+svm_range_unmap_split(struct svm_range *parent, struct svm_range *prange, unsigned long start,
unsigned long last)
{
struct svm_range *head;
@@ -2438,12 +2466,12 @@ svm_range_unmap_split(struct mm_struct *mm, struct svm_range *parent,
svm_range_split(tail, last + 1, tail->last, &head);
if (head != prange && tail != prange) {
- svm_range_add_child(parent, mm, head, SVM_OP_UNMAP_RANGE);
- svm_range_add_child(parent, mm, tail, SVM_OP_ADD_RANGE);
+ svm_range_add_child(parent, head, SVM_OP_UNMAP_RANGE);
+ svm_range_add_child(parent, tail, SVM_OP_ADD_RANGE);
} else if (tail != prange) {
- svm_range_add_child(parent, mm, tail, SVM_OP_UNMAP_RANGE);
+ svm_range_add_child(parent, tail, SVM_OP_UNMAP_RANGE);
} else if (head != prange) {
- svm_range_add_child(parent, mm, head, SVM_OP_UNMAP_RANGE);
+ svm_range_add_child(parent, head, SVM_OP_UNMAP_RANGE);
} else if (parent != prange) {
prange->work_item.op = SVM_OP_UNMAP_RANGE;
}
@@ -2520,14 +2548,14 @@ svm_range_unmap_from_cpu(struct mm_struct *mm, struct svm_range *prange,
l = min(last, pchild->last);
if (l >= s)
svm_range_unmap_from_gpus(pchild, s, l, trigger);
- svm_range_unmap_split(mm, prange, pchild, start, last);
+ svm_range_unmap_split(prange, pchild, start, last);
mutex_unlock(&pchild->lock);
}
s = max(start, prange->start);
l = min(last, prange->last);
if (l >= s)
svm_range_unmap_from_gpus(prange, s, l, trigger);
- svm_range_unmap_split(mm, prange, prange, start, last);
+ svm_range_unmap_split(prange, prange, start, last);
if (unmap_parent)
svm_range_add_list_work(svms, prange, mm, SVM_OP_UNMAP_RANGE);
@@ -2570,8 +2598,6 @@ svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
if (range->event == MMU_NOTIFY_RELEASE)
return true;
- if (!mmget_not_zero(mni->mm))
- return true;
start = mni->interval_tree.start;
last = mni->interval_tree.last;
@@ -2598,7 +2624,6 @@ svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
}
svm_range_unlock(prange);
- mmput(mni->mm);
return true;
}
@@ -3026,6 +3051,8 @@ retry_write_locked:
if (svms->checkpoint_ts[gpuidx] != 0) {
if (amdgpu_ih_ts_after_or_equal(ts, svms->checkpoint_ts[gpuidx])) {
pr_debug("draining retry fault, drop fault 0x%llx\n", addr);
+ if (write_locked)
+ mmap_write_downgrade(mm);
r = -EAGAIN;
goto out_unlock_svms;
} else {
@@ -3666,6 +3693,8 @@ svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
svm_range_apply_attrs(p, prange, nattr, attrs, &update_mapping);
/* TODO: unmap ranges from GPU that lost access */
}
+ update_mapping |= !p->xnack_enabled && !list_empty(&remap_list);
+
list_for_each_entry_safe(prange, next, &remove_list, update_list) {
pr_debug("unlink old 0x%p prange 0x%p [0x%lx 0x%lx]\n",
prange->svms, prange, prange->start,
@@ -4242,7 +4271,7 @@ svm_ioctl(struct kfd_process *p, enum kfd_ioctl_svm_op op, uint64_t start,
r = svm_range_get_attr(p, mm, start, size, nattrs, attrs);
break;
default:
- r = EINVAL;
+ r = -EINVAL;
break;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
index 01c7a4877904..a63dfc95b602 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
@@ -31,7 +31,6 @@
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/sched/mm.h>
-#include <linux/hmm.h>
#include "amdgpu.h"
#include "kfd_priv.h"
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index baa2374acdeb..811636af14ea 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -510,6 +510,10 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
dev->node_props.capability |=
HSA_CAP_AQL_QUEUE_DOUBLE_MAP;
+ if (KFD_GC_VERSION(dev->gpu) < IP_VERSION(10, 0, 0) &&
+ (dev->gpu->adev->sdma.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE))
+ dev->node_props.capability2 |= HSA_CAP2_PER_SDMA_QUEUE_RESET_SUPPORTED;
+
sysfs_show_32bit_prop(buffer, offs, "max_engine_clk_fcompute",
dev->node_props.max_engine_clk_fcompute);
@@ -526,6 +530,10 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
sysfs_show_32bit_prop(buffer, offs, "sdma_fw_version",
dev->gpu->kfd->sdma_fw_version);
sysfs_show_64bit_prop(buffer, offs, "unique_id",
+ dev->gpu->xcp &&
+ (dev->gpu->xcp->xcp_mgr->mode !=
+ AMDGPU_SPX_PARTITION_MODE) ?
+ dev->gpu->xcp->unique_id :
dev->gpu->adev->unique_id);
sysfs_show_32bit_prop(buffer, offs, "num_xcc",
NUM_XCC(dev->gpu->xcc_mask));
@@ -1583,7 +1591,8 @@ static int kfd_dev_create_p2p_links(void)
break;
if (!dev->gpu || !dev->gpu->adev ||
(dev->gpu->kfd->hive_id &&
- dev->gpu->kfd->hive_id == new_dev->gpu->kfd->hive_id))
+ dev->gpu->kfd->hive_id == new_dev->gpu->kfd->hive_id &&
+ amdgpu_xgmi_get_is_sharing_enabled(dev->gpu->adev, new_dev->gpu->adev)))
goto next;
/* check if node(s) is/are peer accessible in one direction or bi-direction */
@@ -2008,8 +2017,6 @@ static void kfd_topology_set_capabilities(struct kfd_topology_device *dev)
if (!amdgpu_sriov_vf(dev->gpu->adev))
dev->node_props.capability |= HSA_CAP_PER_QUEUE_RESET_SUPPORTED;
- if (dev->gpu->adev->sdma.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE)
- dev->node_props.capability2 |= HSA_CAP2_PER_SDMA_QUEUE_RESET_SUPPORTED;
} else {
dev->node_props.debug_prop |= HSA_DBG_WATCH_ADDR_MASK_LO_BIT_GFX10 |
HSA_DBG_WATCH_ADDR_MASK_HI_BIT;
diff --git a/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c b/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c
index faed84172dd4..44009aa8216e 100644
--- a/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c
+++ b/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c
@@ -21,6 +21,7 @@
*
*/
+#include <linux/export.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -45,18 +46,29 @@ static const struct drm_driver amdgpu_xcp_driver = {
static int8_t pdev_num;
static struct xcp_device *xcp_dev[MAX_XCP_PLATFORM_DEVICE];
+static DEFINE_MUTEX(xcp_mutex);
int amdgpu_xcp_drm_dev_alloc(struct drm_device **ddev)
{
struct platform_device *pdev;
struct xcp_device *pxcp_dev;
char dev_name[20];
- int ret;
+ int ret, i;
+
+ guard(mutex)(&xcp_mutex);
if (pdev_num >= MAX_XCP_PLATFORM_DEVICE)
return -ENODEV;
- snprintf(dev_name, sizeof(dev_name), "amdgpu_xcp_%d", pdev_num);
+ for (i = 0; i < MAX_XCP_PLATFORM_DEVICE; i++) {
+ if (!xcp_dev[i])
+ break;
+ }
+
+ if (i >= MAX_XCP_PLATFORM_DEVICE)
+ return -ENODEV;
+
+ snprintf(dev_name, sizeof(dev_name), "amdgpu_xcp_%d", i);
pdev = platform_device_register_simple(dev_name, -1, NULL, 0);
if (IS_ERR(pdev))
return PTR_ERR(pdev);
@@ -72,8 +84,8 @@ int amdgpu_xcp_drm_dev_alloc(struct drm_device **ddev)
goto out_devres;
}
- xcp_dev[pdev_num] = pxcp_dev;
- xcp_dev[pdev_num]->pdev = pdev;
+ xcp_dev[i] = pxcp_dev;
+ xcp_dev[i]->pdev = pdev;
*ddev = &pxcp_dev->drm;
pdev_num++;
@@ -88,16 +100,43 @@ out_unregister:
}
EXPORT_SYMBOL(amdgpu_xcp_drm_dev_alloc);
-void amdgpu_xcp_drv_release(void)
+static void free_xcp_dev(int8_t index)
{
- for (--pdev_num; pdev_num >= 0; --pdev_num) {
- struct platform_device *pdev = xcp_dev[pdev_num]->pdev;
+ if ((index < MAX_XCP_PLATFORM_DEVICE) && (xcp_dev[index])) {
+ struct platform_device *pdev = xcp_dev[index]->pdev;
devres_release_group(&pdev->dev, NULL);
platform_device_unregister(pdev);
- xcp_dev[pdev_num] = NULL;
+
+ xcp_dev[index] = NULL;
+ pdev_num--;
+ }
+}
+
+void amdgpu_xcp_drm_dev_free(struct drm_device *ddev)
+{
+ int8_t i;
+
+ guard(mutex)(&xcp_mutex);
+
+ for (i = 0; i < MAX_XCP_PLATFORM_DEVICE; i++) {
+ if ((xcp_dev[i]) && (&xcp_dev[i]->drm == ddev)) {
+ free_xcp_dev(i);
+ break;
+ }
+ }
+}
+EXPORT_SYMBOL(amdgpu_xcp_drm_dev_free);
+
+void amdgpu_xcp_drv_release(void)
+{
+ int8_t i;
+
+ guard(mutex)(&xcp_mutex);
+
+ for (i = 0; pdev_num && i < MAX_XCP_PLATFORM_DEVICE; i++) {
+ free_xcp_dev(i);
}
- pdev_num = 0;
}
EXPORT_SYMBOL(amdgpu_xcp_drv_release);
diff --git a/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.h b/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.h
index c1c4b679bf95..580a1602c8e3 100644
--- a/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.h
+++ b/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.h
@@ -25,5 +25,6 @@
#define _AMDGPU_XCP_DRV_H_
int amdgpu_xcp_drm_dev_alloc(struct drm_device **ddev);
+void amdgpu_xcp_drm_dev_free(struct drm_device *ddev);
void amdgpu_xcp_drv_release(void);
#endif /* _AMDGPU_XCP_DRV_H_ */
diff --git a/drivers/gpu/drm/amd/display/Makefile b/drivers/gpu/drm/amd/display/Makefile
index 89d605de0595..0084a8d55254 100644
--- a/drivers/gpu/drm/amd/display/Makefile
+++ b/drivers/gpu/drm/amd/display/Makefile
@@ -44,6 +44,7 @@ subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/mmhubbub
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/mpc
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/opp
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/pg
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/soc_and_ip_translator
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/inc
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/freesync
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/color
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
index 7329b8cc2576..8e949fe77312 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
@@ -39,7 +39,8 @@ AMDGPUDM = \
amdgpu_dm_psr.o \
amdgpu_dm_replay.o \
amdgpu_dm_quirks.o \
- amdgpu_dm_wb.o
+ amdgpu_dm_wb.o \
+ amdgpu_dm_colorop.o
ifdef CONFIG_DRM_AMD_DC_FP
AMDGPUDM += dc_fpu.o
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index d3100f641ac6..740711ac1037 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
@@ -39,13 +40,11 @@
#include "dc/dc_stat.h"
#include "dc/dc_state.h"
#include "amdgpu_dm_trace.h"
-#include "dpcd_defs.h"
#include "link/protocols/link_dpcd.h"
#include "link_service_types.h"
#include "link/protocols/link_dp_capability.h"
#include "link/protocols/link_ddc.h"
-#include "vid.h"
#include "amdgpu.h"
#include "amdgpu_display.h"
#include "amdgpu_ucode.h"
@@ -56,7 +55,6 @@
#include "amdgpu_dm_hdcp.h"
#include <drm/display/drm_hdcp_helper.h>
#include "amdgpu_dm_wb.h"
-#include "amdgpu_pm.h"
#include "amdgpu_atombios.h"
#include "amd_shared.h"
@@ -82,6 +80,7 @@
#include <linux/component.h>
#include <linux/sort.h>
+#include <drm/drm_privacy_screen_consumer.h>
#include <drm/display/drm_dp_mst_helper.h>
#include <drm/display/drm_hdmi_helper.h>
#include <drm/drm_atomic.h>
@@ -102,15 +101,6 @@
#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
-#include "dcn/dcn_1_0_offset.h"
-#include "dcn/dcn_1_0_sh_mask.h"
-#include "soc15_hw_ip.h"
-#include "soc15_common.h"
-#include "vega10_ip_offset.h"
-
-#include "gc/gc_11_0_0_offset.h"
-#include "gc/gc_11_0_0_sh_mask.h"
-
#include "modules/inc/mod_freesync.h"
#include "modules/power/power_helpers.h"
@@ -243,6 +233,7 @@ static int amdgpu_dm_encoder_init(struct drm_device *dev,
static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
+static int amdgpu_dm_atomic_setup_commit(struct drm_atomic_state *state);
static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
static int amdgpu_dm_atomic_check(struct drm_device *dev,
@@ -427,8 +418,7 @@ static inline bool update_planes_and_stream_adapter(struct dc *dc,
/*
* Previous frame finished and HW is ready for optimization.
*/
- if (update_type == UPDATE_TYPE_FAST)
- dc_post_update_surfaces_to_stream(dc);
+ dc_post_update_surfaces_to_stream(dc);
return dc_update_planes_and_stream(dc,
array_of_surface_update,
@@ -541,6 +531,50 @@ static void dm_pflip_high_irq(void *interrupt_params)
amdgpu_crtc->crtc_id, amdgpu_crtc, vrr_active, (int)!e);
}
+static void dm_handle_vmin_vmax_update(struct work_struct *offload_work)
+{
+ struct vupdate_offload_work *work = container_of(offload_work, struct vupdate_offload_work, work);
+ struct amdgpu_device *adev = work->adev;
+ struct dc_stream_state *stream = work->stream;
+ struct dc_crtc_timing_adjust *adjust = work->adjust;
+
+ mutex_lock(&adev->dm.dc_lock);
+ dc_stream_adjust_vmin_vmax(adev->dm.dc, stream, adjust);
+ mutex_unlock(&adev->dm.dc_lock);
+
+ dc_stream_release(stream);
+ kfree(work->adjust);
+ kfree(work);
+}
+
+static void schedule_dc_vmin_vmax(struct amdgpu_device *adev,
+ struct dc_stream_state *stream,
+ struct dc_crtc_timing_adjust *adjust)
+{
+ struct vupdate_offload_work *offload_work = kzalloc(sizeof(*offload_work), GFP_NOWAIT);
+ if (!offload_work) {
+ drm_dbg_driver(adev_to_drm(adev), "Failed to allocate vupdate_offload_work\n");
+ return;
+ }
+
+ struct dc_crtc_timing_adjust *adjust_copy = kzalloc(sizeof(*adjust_copy), GFP_NOWAIT);
+ if (!adjust_copy) {
+ drm_dbg_driver(adev_to_drm(adev), "Failed to allocate adjust_copy\n");
+ kfree(offload_work);
+ return;
+ }
+
+ dc_stream_retain(stream);
+ memcpy(adjust_copy, adjust, sizeof(*adjust_copy));
+
+ INIT_WORK(&offload_work->work, dm_handle_vmin_vmax_update);
+ offload_work->adev = adev;
+ offload_work->stream = stream;
+ offload_work->adjust = adjust_copy;
+
+ queue_work(system_wq, &offload_work->work);
+}
+
static void dm_vupdate_high_irq(void *interrupt_params)
{
struct common_irq_params *irq_params = interrupt_params;
@@ -578,22 +612,27 @@ static void dm_vupdate_high_irq(void *interrupt_params)
* page-flip completion events that have been queued to us
* if a pageflip happened inside front-porch.
*/
- if (vrr_active) {
+ if (vrr_active && acrtc->dm_irq_params.stream) {
+ bool replay_en = acrtc->dm_irq_params.stream->link->replay_settings.replay_feature_enabled;
+ bool psr_en = acrtc->dm_irq_params.stream->link->psr_settings.psr_feature_enabled;
+ bool fs_active_var_en = acrtc->dm_irq_params.freesync_config.state
+ == VRR_STATE_ACTIVE_VARIABLE;
+
amdgpu_dm_crtc_handle_vblank(acrtc);
/* BTR processing for pre-DCE12 ASICs */
- if (acrtc->dm_irq_params.stream &&
- adev->family < AMDGPU_FAMILY_AI) {
+ if (adev->family < AMDGPU_FAMILY_AI) {
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
mod_freesync_handle_v_update(
adev->dm.freesync_module,
acrtc->dm_irq_params.stream,
&acrtc->dm_irq_params.vrr_params);
- dc_stream_adjust_vmin_vmax(
- adev->dm.dc,
- acrtc->dm_irq_params.stream,
- &acrtc->dm_irq_params.vrr_params.adjust);
+ if (fs_active_var_en || (!fs_active_var_en && !replay_en && !psr_en)) {
+ schedule_dc_vmin_vmax(adev,
+ acrtc->dm_irq_params.stream,
+ &acrtc->dm_irq_params.vrr_params.adjust);
+ }
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
}
}
@@ -676,15 +715,20 @@ static void dm_crtc_high_irq(void *interrupt_params)
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
if (acrtc->dm_irq_params.stream &&
- acrtc->dm_irq_params.vrr_params.supported &&
- acrtc->dm_irq_params.freesync_config.state ==
- VRR_STATE_ACTIVE_VARIABLE) {
+ acrtc->dm_irq_params.vrr_params.supported) {
+ bool replay_en = acrtc->dm_irq_params.stream->link->replay_settings.replay_feature_enabled;
+ bool psr_en = acrtc->dm_irq_params.stream->link->psr_settings.psr_feature_enabled;
+ bool fs_active_var_en = acrtc->dm_irq_params.freesync_config.state == VRR_STATE_ACTIVE_VARIABLE;
+
mod_freesync_handle_v_update(adev->dm.freesync_module,
acrtc->dm_irq_params.stream,
&acrtc->dm_irq_params.vrr_params);
- dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
- &acrtc->dm_irq_params.vrr_params.adjust);
+ /* update vmin_vmax only if freesync is enabled, or only if PSR and REPLAY are disabled */
+ if (fs_active_var_en || (!fs_active_var_en && !replay_en && !psr_en)) {
+ schedule_dc_vmin_vmax(adev, acrtc->dm_irq_params.stream,
+ &acrtc->dm_irq_params.vrr_params.adjust);
+ }
}
/*
@@ -1758,10 +1802,11 @@ dm_dmub_send_vbios_gpint_command(struct amdgpu_device *adev,
return DMUB_STATUS_TIMEOUT;
}
-static struct dml2_soc_bb *dm_dmub_get_vbios_bounding_box(struct amdgpu_device *adev)
+static void *dm_dmub_get_vbios_bounding_box(struct amdgpu_device *adev)
{
- struct dml2_soc_bb *bb;
+ void *bb;
long long addr;
+ unsigned int bb_size;
int i = 0;
uint16_t chunk;
enum dmub_gpint_command send_addrs[] = {
@@ -1774,6 +1819,7 @@ static struct dml2_soc_bb *dm_dmub_get_vbios_bounding_box(struct amdgpu_device *
switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
case IP_VERSION(4, 0, 1):
+ bb_size = sizeof(struct dml2_soc_bb);
break;
default:
return NULL;
@@ -1781,7 +1827,7 @@ static struct dml2_soc_bb *dm_dmub_get_vbios_bounding_box(struct amdgpu_device *
bb = dm_allocate_gpu_mem(adev,
DC_MEM_ALLOC_TYPE_GART,
- sizeof(struct dml2_soc_bb),
+ bb_size,
&addr);
if (!bb)
return NULL;
@@ -1847,7 +1893,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
mutex_init(&adev->dm.audio_lock);
if (amdgpu_dm_irq_init(adev)) {
- drm_err(adev_to_drm(adev), "amdgpu: failed to initialize DM IRQ support.\n");
+ drm_err(adev_to_drm(adev), "failed to initialize DM IRQ support.\n");
goto error;
}
@@ -1954,6 +2000,10 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
init_data.flags.disable_ips_in_vpb = 0;
+ /* DCN35 and above supports dynamic DTBCLK switch */
+ if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 5, 0))
+ init_data.flags.allow_0_dtb_clk = true;
+
/* Enable DWB for tested platforms only */
if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0))
init_data.num_virtual_links = 1;
@@ -2037,7 +2087,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev);
if (!adev->dm.hpd_rx_offload_wq) {
- drm_err(adev_to_drm(adev), "amdgpu: failed to create hpd rx offload workqueue.\n");
+ drm_err(adev_to_drm(adev), "failed to create hpd rx offload workqueue.\n");
goto error;
}
@@ -2053,7 +2103,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
if (!adev->dm.freesync_module) {
drm_err(adev_to_drm(adev),
- "amdgpu: failed to initialize freesync_module.\n");
+ "failed to initialize freesync_module.\n");
} else
drm_dbg_driver(adev_to_drm(adev), "amdgpu: freesync_module init done %p.\n",
adev->dm.freesync_module);
@@ -2064,7 +2114,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
adev->dm.vblank_control_workqueue =
create_singlethread_workqueue("dm_vblank_control_workqueue");
if (!adev->dm.vblank_control_workqueue)
- drm_err(adev_to_drm(adev), "amdgpu: failed to initialize vblank_workqueue.\n");
+ drm_err(adev_to_drm(adev), "failed to initialize vblank_workqueue.\n");
}
if (adev->dm.dc->caps.ips_support &&
@@ -2075,7 +2125,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
if (!adev->dm.hdcp_workqueue)
- drm_err(adev_to_drm(adev), "amdgpu: failed to initialize hdcp_workqueue.\n");
+ drm_err(adev_to_drm(adev), "failed to initialize hdcp_workqueue.\n");
else
drm_dbg_driver(adev_to_drm(adev), "amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
@@ -2085,20 +2135,20 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
init_completion(&adev->dm.dmub_aux_transfer_done);
adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
if (!adev->dm.dmub_notify) {
- drm_info(adev_to_drm(adev), "amdgpu: fail to allocate adev->dm.dmub_notify");
+ drm_info(adev_to_drm(adev), "fail to allocate adev->dm.dmub_notify");
goto error;
}
adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
if (!adev->dm.delayed_hpd_wq) {
- drm_err(adev_to_drm(adev), "amdgpu: failed to create hpd offload workqueue.\n");
+ drm_err(adev_to_drm(adev), "failed to create hpd offload workqueue.\n");
goto error;
}
amdgpu_dm_outbox_init(adev);
if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
dmub_aux_setconfig_callback, false)) {
- drm_err(adev_to_drm(adev), "amdgpu: fail to register dmub aux callback");
+ drm_err(adev_to_drm(adev), "fail to register dmub aux callback");
goto error;
}
@@ -2107,7 +2157,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_FUSED_IO,
dmub_aux_fused_io_callback, false)) {
- drm_err(adev_to_drm(adev), "amdgpu: fail to register dmub fused io callback");
+ drm_err(adev_to_drm(adev), "fail to register dmub fused io callback");
goto error;
}
/* Enable outbox notification only after IRQ handlers are registered and DMUB is alive.
@@ -2125,7 +2175,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_dm_initialize_drm_device(adev)) {
drm_err(adev_to_drm(adev),
- "amdgpu: failed to initialize sw for display support.\n");
+ "failed to initialize sw for display support.\n");
goto error;
}
@@ -2140,14 +2190,14 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
drm_err(adev_to_drm(adev),
- "amdgpu: failed to initialize sw for display support.\n");
+ "failed to initialize vblank for display support.\n");
goto error;
}
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
amdgpu_dm_crtc_secure_display_create_contexts(adev);
if (!adev->dm.secure_display_ctx.crtc_ctx)
- drm_err(adev_to_drm(adev), "amdgpu: failed to initialize secure display contexts.\n");
+ drm_err(adev_to_drm(adev), "failed to initialize secure display contexts.\n");
if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(4, 0, 1))
adev->dm.secure_display_ctx.support_mul_roi = true;
@@ -2404,6 +2454,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_5_TRACEBUFF
DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_6_FW_STATE
DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_7_SCRATCH_MEM
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_IB_MEM
DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_SHARED_STATE
};
int r;
@@ -2570,7 +2621,7 @@ static int dm_sw_init(struct amdgpu_ip_block *ip_block)
adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
if (!adev->dm.cgs_device) {
- drm_err(adev_to_drm(adev), "amdgpu: failed to create cgs device.\n");
+ drm_err(adev_to_drm(adev), "failed to create cgs device.\n");
return -EINVAL;
}
@@ -2898,7 +2949,7 @@ static int dm_oem_i2c_hw_init(struct amdgpu_device *adev)
return -ENOMEM;
}
- r = i2c_add_adapter(&oem_i2c->base);
+ r = devm_i2c_add_adapter(adev->dev, &oem_i2c->base);
if (r) {
drm_info(adev_to_drm(adev), "Failed to register oem i2c\n");
kfree(oem_i2c);
@@ -2960,8 +3011,6 @@ static int dm_hw_fini(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
- kfree(adev->dm.oem_i2c);
-
amdgpu_dm_hpd_fini(adev);
amdgpu_dm_irq_fini(adev);
@@ -2989,14 +3038,20 @@ static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
drm_warn(adev_to_drm(adev), "Failed to %s pflip interrupts\n",
enable ? "enable" : "disable");
- if (enable) {
- if (amdgpu_dm_crtc_vrr_active(to_dm_crtc_state(acrtc->base.state)))
- rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, true);
- } else
- rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, false);
-
- if (rc)
- drm_warn(adev_to_drm(adev), "Failed to %sable vupdate interrupt\n", enable ? "en" : "dis");
+ if (dc_supports_vrr(adev->dm.dc->ctx->dce_version)) {
+ if (enable) {
+ if (amdgpu_dm_crtc_vrr_active(
+ to_dm_crtc_state(acrtc->base.state)))
+ rc = amdgpu_dm_crtc_set_vupdate_irq(
+ &acrtc->base, true);
+ } else
+ rc = amdgpu_dm_crtc_set_vupdate_irq(
+ &acrtc->base, false);
+
+ if (rc)
+ drm_warn(adev_to_drm(adev), "Failed to %sable vupdate interrupt\n",
+ enable ? "en" : "dis");
+ }
irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
/* During gpu-reset we disable and then enable vblank irq, so
@@ -3060,19 +3115,68 @@ static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
}
}
-static int dm_prepare_suspend(struct amdgpu_ip_block *ip_block)
+static int dm_cache_state(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = ip_block->adev;
-
- if (amdgpu_in_reset(adev))
- return 0;
+ int r;
- WARN_ON(adev->dm.cached_state);
adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
- if (IS_ERR(adev->dm.cached_state))
- return PTR_ERR(adev->dm.cached_state);
+ if (IS_ERR(adev->dm.cached_state)) {
+ r = PTR_ERR(adev->dm.cached_state);
+ adev->dm.cached_state = NULL;
+ }
- return 0;
+ return adev->dm.cached_state ? 0 : r;
+}
+
+static void dm_destroy_cached_state(struct amdgpu_device *adev)
+{
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct drm_device *ddev = adev_to_drm(adev);
+ struct dm_plane_state *dm_new_plane_state;
+ struct drm_plane_state *new_plane_state;
+ struct dm_crtc_state *dm_new_crtc_state;
+ struct drm_crtc_state *new_crtc_state;
+ struct drm_plane *plane;
+ struct drm_crtc *crtc;
+ int i;
+
+ if (!dm->cached_state)
+ return;
+
+ /* Force mode set in atomic commit */
+ for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
+ new_crtc_state->active_changed = true;
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ reset_freesync_config_for_crtc(dm_new_crtc_state);
+ }
+
+ /*
+ * atomic_check is expected to create the dc states. We need to release
+ * them here, since they were duplicated as part of the suspend
+ * procedure.
+ */
+ for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ if (dm_new_crtc_state->stream) {
+ WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
+ dc_stream_release(dm_new_crtc_state->stream);
+ dm_new_crtc_state->stream = NULL;
+ }
+ dm_new_crtc_state->base.color_mgmt_changed = true;
+ }
+
+ for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
+ dm_new_plane_state = to_dm_plane_state(new_plane_state);
+ if (dm_new_plane_state->dc_state) {
+ WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
+ dc_plane_state_release(dm_new_plane_state->dc_state);
+ dm_new_plane_state->dc_state = NULL;
+ }
+ }
+
+ drm_atomic_helper_resume(ddev, dm->cached_state);
+
+ dm->cached_state = NULL;
}
static int dm_suspend(struct amdgpu_ip_block *ip_block)
@@ -3106,9 +3210,10 @@ static int dm_suspend(struct amdgpu_ip_block *ip_block)
}
if (!adev->dm.cached_state) {
- adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
- if (IS_ERR(adev->dm.cached_state))
- return PTR_ERR(adev->dm.cached_state);
+ int r = dm_cache_state(adev);
+
+ if (r)
+ return r;
}
s3_handle_hdmi_cec(adev_to_drm(adev), true);
@@ -3287,6 +3392,67 @@ static void apply_delay_after_dpcd_poweroff(struct amdgpu_device *adev,
}
}
+/**
+ * amdgpu_dm_dump_links_and_sinks - Debug dump of all DC links and their sinks
+ * @adev: amdgpu device pointer
+ *
+ * Iterates through all DC links and dumps information about local and remote
+ * (MST) sinks. Should be called after connector detection is complete to see
+ * the final state of all links.
+ */
+static void amdgpu_dm_dump_links_and_sinks(struct amdgpu_device *adev)
+{
+ struct dc *dc = adev->dm.dc;
+ struct drm_device *dev = adev_to_drm(adev);
+ int li;
+
+ if (!dc)
+ return;
+
+ for (li = 0; li < dc->link_count; li++) {
+ struct dc_link *l = dc->links[li];
+ const char *name = NULL;
+ int rs;
+
+ if (!l)
+ continue;
+ if (l->local_sink && l->local_sink->edid_caps.display_name[0])
+ name = l->local_sink->edid_caps.display_name;
+ else
+ name = "n/a";
+
+ drm_dbg_kms(dev,
+ "LINK_DUMP[%d]: local_sink=%p type=%d sink_signal=%d sink_count=%u edid_name=%s mst_capable=%d mst_alloc_streams=%d\n",
+ li,
+ l->local_sink,
+ l->type,
+ l->local_sink ? l->local_sink->sink_signal : SIGNAL_TYPE_NONE,
+ l->sink_count,
+ name,
+ l->dpcd_caps.is_mst_capable,
+ l->mst_stream_alloc_table.stream_count);
+
+ /* Dump remote (MST) sinks if any */
+ for (rs = 0; rs < l->sink_count; rs++) {
+ struct dc_sink *rsink = l->remote_sinks[rs];
+ const char *rname = NULL;
+
+ if (!rsink)
+ continue;
+ if (rsink->edid_caps.display_name[0])
+ rname = rsink->edid_caps.display_name;
+ else
+ rname = "n/a";
+ drm_dbg_kms(dev,
+ " REMOTE_SINK[%d:%d]: sink=%p signal=%d edid_name=%s\n",
+ li, rs,
+ rsink,
+ rsink->sink_signal,
+ rname);
+ }
+ }
+}
+
static int dm_resume(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
@@ -3295,12 +3461,6 @@ static int dm_resume(struct amdgpu_ip_block *ip_block)
struct amdgpu_dm_connector *aconnector;
struct drm_connector *connector;
struct drm_connector_list_iter iter;
- struct drm_crtc *crtc;
- struct drm_crtc_state *new_crtc_state;
- struct dm_crtc_state *dm_new_crtc_state;
- struct drm_plane *plane;
- struct drm_plane_state *new_plane_state;
- struct dm_plane_state *dm_new_plane_state;
struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
enum dc_connection_type new_connection_type = dc_connection_none;
struct dc_state *dc_state;
@@ -3332,8 +3492,10 @@ static int dm_resume(struct amdgpu_ip_block *ip_block)
link_enc_cfg_copy(adev->dm.dc->current_state, dc_state);
r = dm_dmub_hw_init(adev);
- if (r)
+ if (r) {
drm_err(adev_to_drm(adev), "DMUB interface failed to initialize: status=%d\n", r);
+ return r;
+ }
dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D0);
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
@@ -3457,44 +3619,12 @@ static int dm_resume(struct amdgpu_ip_block *ip_block)
}
drm_connector_list_iter_end(&iter);
- /* Force mode set in atomic commit */
- for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
- new_crtc_state->active_changed = true;
- dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
- reset_freesync_config_for_crtc(dm_new_crtc_state);
- }
-
- /*
- * atomic_check is expected to create the dc states. We need to release
- * them here, since they were duplicated as part of the suspend
- * procedure.
- */
- for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
- dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
- if (dm_new_crtc_state->stream) {
- WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
- dc_stream_release(dm_new_crtc_state->stream);
- dm_new_crtc_state->stream = NULL;
- }
- dm_new_crtc_state->base.color_mgmt_changed = true;
- }
-
- for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
- dm_new_plane_state = to_dm_plane_state(new_plane_state);
- if (dm_new_plane_state->dc_state) {
- WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
- dc_plane_state_release(dm_new_plane_state->dc_state);
- dm_new_plane_state->dc_state = NULL;
- }
- }
-
- drm_atomic_helper_resume(ddev, dm->cached_state);
-
- dm->cached_state = NULL;
+ dm_destroy_cached_state(adev);
/* Do mst topology probing after resuming cached state*/
drm_connector_list_iter_begin(ddev, &iter);
drm_for_each_connector_iter(connector, &iter) {
+ bool init = false;
if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
continue;
@@ -3504,10 +3634,23 @@ static int dm_resume(struct amdgpu_ip_block *ip_block)
aconnector->mst_root)
continue;
- drm_dp_mst_topology_queue_probe(&aconnector->mst_mgr);
+ scoped_guard(mutex, &aconnector->mst_mgr.lock) {
+ init = !aconnector->mst_mgr.mst_primary;
+ }
+ if (init)
+ dm_helpers_dp_mst_start_top_mgr(aconnector->dc_link->ctx,
+ aconnector->dc_link, false);
+ else
+ drm_dp_mst_topology_queue_probe(&aconnector->mst_mgr);
}
drm_connector_list_iter_end(&iter);
+ /* Debug dump: list all DC links and their associated sinks after detection
+ * is complete for all connectors. This provides a comprehensive view of the
+ * final state without repeating the dump for each connector.
+ */
+ amdgpu_dm_dump_links_and_sinks(adev);
+
amdgpu_dm_irq_resume_late(adev);
amdgpu_dm_smu_write_watermarks_table(adev);
@@ -3536,7 +3679,6 @@ static const struct amd_ip_funcs amdgpu_dm_funcs = {
.early_fini = amdgpu_dm_early_fini,
.hw_init = dm_hw_init,
.hw_fini = dm_hw_fini,
- .prepare_suspend = dm_prepare_suspend,
.suspend = dm_suspend,
.resume = dm_resume,
.is_idle = dm_is_idle,
@@ -3571,23 +3713,25 @@ static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
.atomic_commit_tail = amdgpu_dm_atomic_commit_tail,
- .atomic_commit_setup = drm_dp_mst_atomic_setup_commit,
+ .atomic_commit_setup = amdgpu_dm_atomic_setup_commit,
};
static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
{
+ const struct drm_panel_backlight_quirk *panel_backlight_quirk;
struct amdgpu_dm_backlight_caps *caps;
struct drm_connector *conn_base;
struct amdgpu_device *adev;
struct drm_luminance_range_info *luminance_range;
- int min_input_signal_override;
+ struct drm_device *drm;
if (aconnector->bl_idx == -1 ||
aconnector->dc_link->connector_signal != SIGNAL_TYPE_EDP)
return;
conn_base = &aconnector->base;
- adev = drm_to_adev(conn_base->dev);
+ drm = conn_base->dev;
+ adev = drm_to_adev(drm);
caps = &adev->dm.backlight_caps[aconnector->bl_idx];
caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
@@ -3610,17 +3754,34 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
luminance_range = &conn_base->display_info.luminance_range;
- if (luminance_range->max_luminance) {
- caps->aux_min_input_signal = luminance_range->min_luminance;
+ if (luminance_range->max_luminance)
caps->aux_max_input_signal = luminance_range->max_luminance;
- } else {
- caps->aux_min_input_signal = 0;
+ else
caps->aux_max_input_signal = 512;
- }
- min_input_signal_override = drm_get_panel_min_brightness_quirk(aconnector->drm_edid);
- if (min_input_signal_override >= 0)
- caps->min_input_signal = min_input_signal_override;
+ if (luminance_range->min_luminance)
+ caps->aux_min_input_signal = luminance_range->min_luminance;
+ else
+ caps->aux_min_input_signal = 1;
+
+ panel_backlight_quirk =
+ drm_get_panel_backlight_quirk(aconnector->drm_edid);
+ if (!IS_ERR_OR_NULL(panel_backlight_quirk)) {
+ if (panel_backlight_quirk->min_brightness) {
+ caps->min_input_signal =
+ panel_backlight_quirk->min_brightness - 1;
+ drm_info(drm,
+ "Applying panel backlight quirk, min_brightness: %d\n",
+ caps->min_input_signal);
+ }
+ if (panel_backlight_quirk->brightness_mask) {
+ drm_info(drm,
+ "Applying panel backlight quirk, brightness_mask: 0x%X\n",
+ panel_backlight_quirk->brightness_mask);
+ caps->brightness_mask =
+ panel_backlight_quirk->brightness_mask;
+ }
+ }
}
DEFINE_FREE(sink_release, struct dc_sink *, if (_T) dc_sink_release(_T))
@@ -3700,7 +3861,9 @@ void amdgpu_dm_update_connector_after_detect(
drm_dbg_kms(dev, "DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
aconnector->connector_id, aconnector->dc_sink, sink);
- guard(mutex)(&dev->mode_config.mutex);
+ /* When polling, DRM has already locked the mutex for us. */
+ if (!drm_kms_helper_is_poll_worker())
+ mutex_lock(&dev->mode_config.mutex);
/*
* 1. Update status of the drm connector
@@ -3763,6 +3926,101 @@ void amdgpu_dm_update_connector_after_detect(
}
update_subconnector_property(aconnector);
+
+ /* When polling, the mutex will be unlocked for us by DRM. */
+ if (!drm_kms_helper_is_poll_worker())
+ mutex_unlock(&dev->mode_config.mutex);
+}
+
+static bool are_sinks_equal(const struct dc_sink *sink1, const struct dc_sink *sink2)
+{
+ if (!sink1 || !sink2)
+ return false;
+ if (sink1->sink_signal != sink2->sink_signal)
+ return false;
+
+ if (sink1->dc_edid.length != sink2->dc_edid.length)
+ return false;
+
+ if (memcmp(sink1->dc_edid.raw_edid, sink2->dc_edid.raw_edid,
+ sink1->dc_edid.length) != 0)
+ return false;
+ return true;
+}
+
+
+/**
+ * DOC: hdmi_hpd_debounce_work
+ *
+ * HDMI HPD debounce delay in milliseconds. When an HDMI display toggles HPD
+ * (such as during power save transitions), this delay determines how long to
+ * wait before processing the HPD event. This allows distinguishing between a
+ * physical unplug (>hdmi_hpd_debounce_delay)
+ * and a spontaneous RX HPD toggle (<hdmi_hpd_debounce_delay).
+ *
+ * If the toggle is less than this delay, the driver compares sink capabilities
+ * and permits a hotplug event if they changed.
+ *
+ * The default value of 1500ms was chosen based on experimental testing with
+ * various monitors that exhibit spontaneous HPD toggling behavior.
+ */
+static void hdmi_hpd_debounce_work(struct work_struct *work)
+{
+ struct amdgpu_dm_connector *aconnector =
+ container_of(to_delayed_work(work), struct amdgpu_dm_connector,
+ hdmi_hpd_debounce_work);
+ struct drm_connector *connector = &aconnector->base;
+ struct drm_device *dev = connector->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct dc *dc = aconnector->dc_link->ctx->dc;
+ bool fake_reconnect = false;
+ bool reallow_idle = false;
+ bool ret = false;
+ guard(mutex)(&aconnector->hpd_lock);
+
+ /* Re-detect the display */
+ scoped_guard(mutex, &adev->dm.dc_lock) {
+ if (dc->caps.ips_support && dc->ctx->dmub_srv->idle_allowed) {
+ dc_allow_idle_optimizations(dc, false);
+ reallow_idle = true;
+ }
+ ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
+ }
+
+ if (ret) {
+ /* Apply workaround delay for certain panels */
+ apply_delay_after_dpcd_poweroff(adev, aconnector->dc_sink);
+ /* Compare sinks to determine if this was a spontaneous HPD toggle */
+ if (are_sinks_equal(aconnector->dc_link->local_sink, aconnector->hdmi_prev_sink)) {
+ /*
+ * Sinks match - this was a spontaneous HDMI HPD toggle.
+ */
+ drm_dbg_kms(dev, "HDMI HPD: Sink unchanged after debounce, internal re-enable\n");
+ fake_reconnect = true;
+ }
+
+ /* Update connector state */
+ amdgpu_dm_update_connector_after_detect(aconnector);
+
+ drm_modeset_lock_all(dev);
+ dm_restore_drm_connector_state(dev, connector);
+ drm_modeset_unlock_all(dev);
+
+ /* Only notify OS if sink actually changed */
+ if (!fake_reconnect && aconnector->base.force == DRM_FORCE_UNSPECIFIED)
+ drm_kms_helper_hotplug_event(dev);
+ }
+
+ /* Release the cached sink reference */
+ if (aconnector->hdmi_prev_sink) {
+ dc_sink_release(aconnector->hdmi_prev_sink);
+ aconnector->hdmi_prev_sink = NULL;
+ }
+
+ scoped_guard(mutex, &adev->dm.dc_lock) {
+ if (reallow_idle && dc->caps.ips_support)
+ dc_allow_idle_optimizations(dc, true);
+ }
}
static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
@@ -3774,6 +4032,7 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
struct dc *dc = aconnector->dc_link->ctx->dc;
bool ret = false;
+ bool debounce_required = false;
if (adev->dm.disable_hpd_irq)
return;
@@ -3796,6 +4055,14 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type))
drm_err(adev_to_drm(adev), "KMS: Failed to detect connector\n");
+ /*
+ * Check for HDMI disconnect with debounce enabled.
+ */
+ debounce_required = (aconnector->hdmi_hpd_debounce_delay_ms > 0 &&
+ dc_is_hdmi_signal(aconnector->dc_link->connector_signal) &&
+ new_connection_type == dc_connection_none &&
+ aconnector->dc_link->local_sink != NULL);
+
if (aconnector->base.force && new_connection_type == dc_connection_none) {
emulated_link_detect(aconnector->dc_link);
@@ -3805,7 +4072,34 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
drm_kms_helper_connector_hotplug_event(connector);
+ } else if (debounce_required) {
+ /*
+ * HDMI disconnect detected - schedule delayed work instead of
+ * processing immediately. This allows us to coalesce spurious
+ * HDMI signals from physical unplugs.
+ */
+ drm_dbg_kms(dev, "HDMI HPD: Disconnect detected, scheduling debounce work (%u ms)\n",
+ aconnector->hdmi_hpd_debounce_delay_ms);
+
+ /* Cache the current sink for later comparison */
+ if (aconnector->hdmi_prev_sink)
+ dc_sink_release(aconnector->hdmi_prev_sink);
+ aconnector->hdmi_prev_sink = aconnector->dc_link->local_sink;
+ if (aconnector->hdmi_prev_sink)
+ dc_sink_retain(aconnector->hdmi_prev_sink);
+
+ /* Schedule delayed detection. */
+ if (mod_delayed_work(system_wq,
+ &aconnector->hdmi_hpd_debounce_work,
+ msecs_to_jiffies(aconnector->hdmi_hpd_debounce_delay_ms)))
+ drm_dbg_kms(dev, "HDMI HPD: Re-scheduled debounce work\n");
+
} else {
+
+ /* If the aconnector->hdmi_hpd_debounce_work is scheduled, exit early */
+ if (delayed_work_pending(&aconnector->hdmi_hpd_debounce_work))
+ return;
+
scoped_guard(mutex, &adev->dm.dc_lock) {
dc_exit_ips_for_hw_access(dc);
ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
@@ -4001,19 +4295,19 @@ static int register_hpd_handlers(struct amdgpu_device *adev)
if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD,
dmub_hpd_callback, true)) {
- drm_err(adev_to_drm(adev), "amdgpu: fail to register dmub hpd callback");
+ drm_err(adev_to_drm(adev), "fail to register dmub hpd callback");
return -EINVAL;
}
if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ,
dmub_hpd_callback, true)) {
- drm_err(adev_to_drm(adev), "amdgpu: fail to register dmub hpd callback");
+ drm_err(adev_to_drm(adev), "fail to register dmub hpd callback");
return -EINVAL;
}
if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_SENSE_NOTIFY,
dmub_hpd_sense_callback, true)) {
- drm_err(adev_to_drm(adev), "amdgpu: fail to register dmub hpd sense callback");
+ drm_err(adev_to_drm(adev), "fail to register dmub hpd sense callback");
return -EINVAL;
}
}
@@ -4718,11 +5012,25 @@ static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
return 1;
}
+/* Rescale from [min..max] to [0..AMDGPU_MAX_BL_LEVEL] */
+static inline u32 scale_input_to_fw(int min, int max, u64 input)
+{
+ return DIV_ROUND_CLOSEST_ULL(input * AMDGPU_MAX_BL_LEVEL, max - min);
+}
+
+/* Rescale from [0..AMDGPU_MAX_BL_LEVEL] to [min..max] */
+static inline u32 scale_fw_to_input(int min, int max, u64 input)
+{
+ return min + DIV_ROUND_CLOSEST_ULL(input * (max - min), AMDGPU_MAX_BL_LEVEL);
+}
+
static void convert_custom_brightness(const struct amdgpu_dm_backlight_caps *caps,
- uint32_t *brightness)
+ unsigned int min, unsigned int max,
+ uint32_t *user_brightness)
{
- u8 prev_signal = 0, prev_lum = 0;
- int i = 0;
+ u32 brightness = scale_input_to_fw(min, max, *user_brightness);
+ u8 lower_signal, upper_signal, upper_lum, lower_lum, lum;
+ int left, right;
if (amdgpu_dc_debug_mask & DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE)
return;
@@ -4730,31 +5038,54 @@ static void convert_custom_brightness(const struct amdgpu_dm_backlight_caps *cap
if (!caps->data_points)
return;
- /* choose start to run less interpolation steps */
- if (caps->luminance_data[caps->data_points/2].input_signal > *brightness)
- i = caps->data_points/2;
- do {
- u8 signal = caps->luminance_data[i].input_signal;
- u8 lum = caps->luminance_data[i].luminance;
+ /*
+ * Handle the case where brightness is below the first data point
+ * Interpolate between (0,0) and (first_signal, first_lum)
+ */
+ if (brightness < caps->luminance_data[0].input_signal) {
+ lum = DIV_ROUND_CLOSEST(caps->luminance_data[0].luminance * brightness,
+ caps->luminance_data[0].input_signal);
+ goto scale;
+ }
- /*
- * brightness == signal: luminance is percent numerator
- * brightness < signal: interpolate between previous and current luminance numerator
- * brightness > signal: find next data point
- */
- if (*brightness > signal) {
- prev_signal = signal;
- prev_lum = lum;
- i++;
- continue;
+ left = 0;
+ right = caps->data_points - 1;
+ while (left <= right) {
+ int mid = left + (right - left) / 2;
+ u8 signal = caps->luminance_data[mid].input_signal;
+
+ /* Exact match found */
+ if (signal == brightness) {
+ lum = caps->luminance_data[mid].luminance;
+ goto scale;
}
- if (*brightness < signal)
- lum = prev_lum + DIV_ROUND_CLOSEST((lum - prev_lum) *
- (*brightness - prev_signal),
- signal - prev_signal);
- *brightness = DIV_ROUND_CLOSEST(lum * *brightness, 101);
- return;
- } while (i < caps->data_points);
+
+ if (signal < brightness)
+ left = mid + 1;
+ else
+ right = mid - 1;
+ }
+
+ /* verify bound */
+ if (left >= caps->data_points)
+ left = caps->data_points - 1;
+
+ /* At this point, left > right */
+ lower_signal = caps->luminance_data[right].input_signal;
+ upper_signal = caps->luminance_data[left].input_signal;
+ lower_lum = caps->luminance_data[right].luminance;
+ upper_lum = caps->luminance_data[left].luminance;
+
+ /* interpolate */
+ if (right == left || !lower_lum)
+ lum = upper_lum;
+ else
+ lum = lower_lum + DIV_ROUND_CLOSEST((upper_lum - lower_lum) *
+ (brightness - lower_signal),
+ upper_signal - lower_signal);
+scale:
+ *user_brightness = scale_fw_to_input(min, max,
+ DIV_ROUND_CLOSEST(lum * brightness, 101));
}
static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
@@ -4765,11 +5096,10 @@ static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *c
if (!get_brightness_range(caps, &min, &max))
return brightness;
- convert_custom_brightness(caps, &brightness);
+ convert_custom_brightness(caps, min, max, &brightness);
- // Rescale 0..255 to min..max
- return min + DIV_ROUND_CLOSEST((max - min) * brightness,
- AMDGPU_MAX_BL_LEVEL);
+ // Rescale 0..max to min..max
+ return min + DIV_ROUND_CLOSEST_ULL((u64)(max - min) * brightness, max);
}
static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
@@ -4782,8 +5112,8 @@ static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *cap
if (brightness < min)
return 0;
- // Rescale min..max to 0..255
- return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
+ // Rescale min..max to 0..max
+ return DIV_ROUND_CLOSEST_ULL((u64)max * (brightness - min),
max - min);
}
@@ -4795,6 +5125,21 @@ static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
struct dc_link *link;
u32 brightness;
bool rc, reallow_idle = false;
+ struct drm_connector *connector;
+
+ list_for_each_entry(connector, &dm->ddev->mode_config.connector_list, head) {
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+
+ if (aconnector->bl_idx != bl_idx)
+ continue;
+
+ /* if connector is off, save the brightness for next time it's on */
+ if (!aconnector->base.encoder) {
+ dm->brightness[bl_idx] = user_brightness;
+ dm->actual_brightness[bl_idx] = 0;
+ return;
+ }
+ }
amdgpu_dm_update_backlight_caps(dm, bl_idx);
caps = &dm->backlight_caps[bl_idx];
@@ -4806,6 +5151,10 @@ static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
brightness = convert_brightness_from_user(caps, dm->brightness[bl_idx]);
link = (struct dc_link *)dm->backlight_link[bl_idx];
+ /* Apply brightness quirk */
+ if (caps->brightness_mask)
+ brightness |= caps->brightness_mask;
+
/* Change brightness based on AUX property */
mutex_lock(&dm->dc_lock);
if (dm->dc->caps.ips_support && dm->dc->ctx->dmub_srv->idle_allowed) {
@@ -4813,6 +5162,14 @@ static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
reallow_idle = true;
}
+ if (trace_amdgpu_dm_brightness_enabled()) {
+ trace_amdgpu_dm_brightness(__builtin_return_address(0),
+ user_brightness,
+ brightness,
+ caps->aux_support,
+ power_supply_is_system_supplied() > 0);
+ }
+
if (caps->aux_support) {
rc = dc_link_set_backlight_level_nits(link, true, brightness,
AUX_BL_DEFAULT_TRANSITION_TIME_MS);
@@ -4866,10 +5223,8 @@ static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
if (caps.aux_support) {
u32 avg, peak;
- bool rc;
- rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
- if (!rc)
+ if (!dc_link_get_backlight_level_nits(link, &avg, &peak))
return dm->brightness[bl_idx];
return convert_brightness_to_user(&caps, avg);
}
@@ -4908,7 +5263,7 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector)
struct drm_device *drm = aconnector->base.dev;
struct amdgpu_display_manager *dm = &drm_to_adev(drm)->dm;
struct backlight_properties props = { 0 };
- struct amdgpu_dm_backlight_caps caps = { 0 };
+ struct amdgpu_dm_backlight_caps *caps;
char bl_name[16];
int min, max;
@@ -4922,22 +5277,24 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector)
return;
}
- amdgpu_acpi_get_backlight_caps(&caps);
- if (caps.caps_valid && get_brightness_range(&caps, &min, &max)) {
+ caps = &dm->backlight_caps[aconnector->bl_idx];
+ if (get_brightness_range(caps, &min, &max)) {
if (power_supply_is_system_supplied() > 0)
- props.brightness = (max - min) * DIV_ROUND_CLOSEST(caps.ac_level, 100);
+ props.brightness = DIV_ROUND_CLOSEST((max - min) * caps->ac_level, 100);
else
- props.brightness = (max - min) * DIV_ROUND_CLOSEST(caps.dc_level, 100);
+ props.brightness = DIV_ROUND_CLOSEST((max - min) * caps->dc_level, 100);
/* min is zero, so max needs to be adjusted */
props.max_brightness = max - min;
drm_dbg(drm, "Backlight caps: min: %d, max: %d, ac %d, dc %d\n", min, max,
- caps.ac_level, caps.dc_level);
+ caps->ac_level, caps->dc_level);
} else
- props.brightness = AMDGPU_MAX_BL_LEVEL;
+ props.brightness = props.max_brightness = MAX_BACKLIGHT_LEVEL;
- if (caps.data_points && !(amdgpu_dc_debug_mask & DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE))
+ if (caps->data_points && !(amdgpu_dc_debug_mask & DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE)) {
drm_info(drm, "Using custom brightness curve\n");
- props.max_brightness = AMDGPU_MAX_BL_LEVEL;
+ props.scale = BACKLIGHT_SCALE_NON_LINEAR;
+ } else
+ props.scale = BACKLIGHT_SCALE_LINEAR;
props.type = BACKLIGHT_RAW;
snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
@@ -4999,6 +5356,7 @@ static int initialize_plane(struct amdgpu_display_manager *dm,
static void setup_backlight_device(struct amdgpu_display_manager *dm,
struct amdgpu_dm_connector *aconnector)
{
+ struct amdgpu_dm_backlight_caps *caps;
struct dc_link *link = aconnector->dc_link;
int bl_idx = dm->num_of_edps;
@@ -5018,6 +5376,13 @@ static void setup_backlight_device(struct amdgpu_display_manager *dm,
dm->num_of_edps++;
update_connector_ext_caps(aconnector);
+ caps = &dm->backlight_caps[aconnector->bl_idx];
+
+ /* Only offer ABM property when non-OLED and user didn't turn off by module parameter */
+ if (!caps->ext_caps->bits.oled && amdgpu_dm_abm_level < 0)
+ drm_object_attach_property(&aconnector->base.base,
+ dm->adev->mode_info.abm_level_property,
+ ABM_SYSFS_CONTROL);
}
static void amdgpu_set_panel_orientation(struct drm_connector *connector);
@@ -5273,6 +5638,12 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
amdgpu_set_panel_orientation(&aconnector->base);
}
+ /* Debug dump: list all DC links and their associated sinks after detection
+ * is complete for all connectors. This provides a comprehensive view of the
+ * final state without repeating the dump for each connector.
+ */
+ amdgpu_dm_dump_links_and_sinks(adev);
+
/* Software is initialized. Now we can register interrupt handlers. */
switch (adev->asic_type) {
#if defined(CONFIG_DRM_AMD_DC_SI)
@@ -5353,7 +5724,8 @@ fail:
static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
{
- drm_atomic_private_obj_fini(&dm->atomic_obj);
+ if (dm->atomic_obj.state)
+ drm_atomic_private_obj_fini(&dm->atomic_obj);
}
/******************************************************************************
@@ -5658,6 +6030,10 @@ fill_plane_color_attributes(const struct drm_plane_state *plane_state,
*color_space = COLOR_SPACE_SRGB;
+ /* Ignore properties when DRM_CLIENT_CAP_PLANE_COLOR_PIPELINE is set */
+ if (plane_state->state && plane_state->state->plane_color_pipeline)
+ return 0;
+
/* DRM color properties only affect non-RGB formats. */
if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
return 0;
@@ -6299,6 +6675,10 @@ static void fill_stream_properties_from_drm_display_mode(
&& aconnector
&& aconnector->force_yuv420_output)
timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
+ else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR422)
+ && aconnector
+ && aconnector->force_yuv422_output)
+ timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR422;
else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444)
&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
@@ -6330,13 +6710,15 @@ static void fill_stream_properties_from_drm_display_mode(
(struct drm_connector *)connector,
mode_in);
if (err < 0)
- drm_warn_once(connector->dev, "Failed to setup avi infoframe on connector %s: %zd \n", connector->name, err);
+ drm_warn_once(connector->dev, "Failed to setup avi infoframe on connector %s: %zd\n",
+ connector->name, err);
timing_out->vic = avi_frame.video_code;
err = drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame,
(struct drm_connector *)connector,
mode_in);
if (err < 0)
- drm_warn_once(connector->dev, "Failed to setup vendor infoframe on connector %s: %zd \n", connector->name, err);
+ drm_warn_once(connector->dev, "Failed to setup vendor infoframe on connector %s: %zd\n",
+ connector->name, err);
timing_out->hdmi_vic = hv_frame.vic;
}
@@ -7004,29 +7386,117 @@ finish:
return stream;
}
+/**
+ * amdgpu_dm_connector_poll - Poll a connector to see if it's connected to a display
+ * @aconnector: DM connector to poll (owns @base drm_connector and @dc_link)
+ * @force: if true, force polling even when DAC load detection was used
+ *
+ * Used for connectors that don't support HPD (hotplug detection) to
+ * periodically check whether the connector is connected to a display.
+ *
+ * When connection was determined via DAC load detection, we avoid
+ * re-running it on normal polls to prevent visible glitches, unless
+ * @force is set.
+ *
+ * Return: The probed connector status (connected/disconnected/unknown).
+ */
static enum drm_connector_status
-amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
+amdgpu_dm_connector_poll(struct amdgpu_dm_connector *aconnector, bool force)
{
- bool connected;
- struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ struct drm_connector *connector = &aconnector->base;
+ struct drm_device *dev = connector->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct dc_link *link = aconnector->dc_link;
+ enum dc_connection_type conn_type = dc_connection_none;
+ enum drm_connector_status status = connector_status_disconnected;
- /*
- * Notes:
- * 1. This interface is NOT called in context of HPD irq.
- * 2. This interface *is called* in context of user-mode ioctl. Which
- * makes it a bad place for *any* MST-related activity.
+ /* When we determined the connection using DAC load detection,
+ * do NOT poll the connector do detect disconnect because
+ * that would run DAC load detection again which can cause
+ * visible visual glitches.
+ *
+ * Only allow to poll such a connector again when forcing.
*/
+ if (!force && link->local_sink && link->type == dc_connection_dac_load)
+ return connector->status;
- if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
- !aconnector->fake_enable)
- connected = (aconnector->dc_sink != NULL);
- else
- connected = (aconnector->base.force == DRM_FORCE_ON ||
- aconnector->base.force == DRM_FORCE_ON_DIGITAL);
+ mutex_lock(&aconnector->hpd_lock);
+
+ if (dc_link_detect_connection_type(aconnector->dc_link, &conn_type) &&
+ conn_type != dc_connection_none) {
+ mutex_lock(&adev->dm.dc_lock);
+
+ /* Only call full link detection when a sink isn't created yet,
+ * ie. just when the display is plugged in, otherwise we risk flickering.
+ */
+ if (link->local_sink ||
+ dc_link_detect(link, DETECT_REASON_HPD))
+ status = connector_status_connected;
+
+ mutex_unlock(&adev->dm.dc_lock);
+ }
+
+ if (connector->status != status) {
+ if (status == connector_status_disconnected) {
+ if (link->local_sink)
+ dc_sink_release(link->local_sink);
+
+ link->local_sink = NULL;
+ link->dpcd_sink_count = 0;
+ link->type = dc_connection_none;
+ }
+
+ amdgpu_dm_update_connector_after_detect(aconnector);
+ }
+
+ mutex_unlock(&aconnector->hpd_lock);
+ return status;
+}
+
+/**
+ * amdgpu_dm_connector_detect() - Detect whether a DRM connector is connected to a display
+ *
+ * A connector is considered connected when it has a sink that is not NULL.
+ * For connectors that support HPD (hotplug detection), the connection is
+ * handled in the HPD interrupt.
+ * For connectors that may not support HPD, such as analog connectors,
+ * DRM will call this function repeatedly to poll them.
+ *
+ * Notes:
+ * 1. This interface is NOT called in context of HPD irq.
+ * 2. This interface *is called* in context of user-mode ioctl. Which
+ * makes it a bad place for *any* MST-related activity.
+ *
+ * @connector: The DRM connector we are checking. We convert it to
+ * amdgpu_dm_connector so we can read the DC link and state.
+ * @force: If true, do a full detect again. This is used even when
+ * a lighter check would normally be used to avoid flicker.
+ *
+ * Return: The connector status (connected, disconnected, or unknown).
+ *
+ */
+static enum drm_connector_status
+amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
update_subconnector_property(aconnector);
- return (connected ? connector_status_connected :
+ if (aconnector->base.force == DRM_FORCE_ON ||
+ aconnector->base.force == DRM_FORCE_ON_DIGITAL)
+ return connector_status_connected;
+ else if (aconnector->base.force == DRM_FORCE_OFF)
+ return connector_status_disconnected;
+
+ /* Poll analog connectors and only when either
+ * disconnected or connected to an analog display.
+ */
+ if (drm_kms_helper_is_poll_worker() &&
+ dc_connector_supports_analog(aconnector->dc_link->link_id.id) &&
+ (!aconnector->dc_sink || aconnector->dc_sink->edid_caps.analog))
+ return amdgpu_dm_connector_poll(aconnector, force);
+
+ return (aconnector->dc_sink ? connector_status_connected :
connector_status_disconnected);
}
@@ -7077,6 +7547,20 @@ int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
} else if (property == adev->mode_info.underscan_property) {
dm_new_state->underscan_enable = val;
ret = 0;
+ } else if (property == adev->mode_info.abm_level_property) {
+ switch (val) {
+ case ABM_SYSFS_CONTROL:
+ dm_new_state->abm_sysfs_forbidden = false;
+ break;
+ case ABM_LEVEL_OFF:
+ dm_new_state->abm_sysfs_forbidden = true;
+ dm_new_state->abm_level = ABM_LEVEL_IMMEDIATE_DISABLE;
+ break;
+ default:
+ dm_new_state->abm_sysfs_forbidden = true;
+ dm_new_state->abm_level = val;
+ }
+ ret = 0;
}
return ret;
@@ -7119,6 +7603,13 @@ int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
} else if (property == adev->mode_info.underscan_property) {
*val = dm_state->underscan_enable;
ret = 0;
+ } else if (property == adev->mode_info.abm_level_property) {
+ if (!dm_state->abm_sysfs_forbidden)
+ *val = ABM_SYSFS_CONTROL;
+ else
+ *val = (dm_state->abm_level != ABM_LEVEL_IMMEDIATE_DISABLE) ?
+ dm_state->abm_level : 0;
+ ret = 0;
}
return ret;
@@ -7171,10 +7662,16 @@ static ssize_t panel_power_savings_store(struct device *device,
return -EINVAL;
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
- to_dm_connector_state(connector->state)->abm_level = val ?:
- ABM_LEVEL_IMMEDIATE_DISABLE;
+ if (to_dm_connector_state(connector->state)->abm_sysfs_forbidden)
+ ret = -EBUSY;
+ else
+ to_dm_connector_state(connector->state)->abm_level = val ?:
+ ABM_LEVEL_IMMEDIATE_DISABLE;
drm_modeset_unlock(&dev->mode_config.connection_mutex);
+ if (ret)
+ return ret;
+
drm_kms_helper_hotplug_event(dev);
return count;
@@ -7239,6 +7736,13 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
if (aconnector->mst_mgr.dev)
drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
+ /* Cancel and flush any pending HDMI HPD debounce work */
+ cancel_delayed_work_sync(&aconnector->hdmi_hpd_debounce_work);
+ if (aconnector->hdmi_prev_sink) {
+ dc_sink_release(aconnector->hdmi_prev_sink);
+ aconnector->hdmi_prev_sink = NULL;
+ }
+
if (aconnector->bl_idx != -1) {
backlight_device_unregister(dm->backlight_dev[aconnector->bl_idx]);
dm->backlight_dev[aconnector->bl_idx] = NULL;
@@ -7254,10 +7758,6 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
- if (aconnector->i2c) {
- i2c_del_adapter(&aconnector->i2c->base);
- kfree(aconnector->i2c);
- }
kfree(aconnector->dm_dp_aux.aux.name);
kfree(connector);
@@ -7519,7 +8019,7 @@ static enum dc_status dm_validate_stream_and_context(struct dc *dc,
dc_result = DC_FAIL_ATTACH_SURFACES;
if (dc_result == DC_OK)
- dc_result = dc_validate_global_state(dc, dc_state, true);
+ dc_result = dc_validate_global_state(dc, dc_state, DC_VALIDATE_MODE_ONLY);
cleanup:
if (dc_state)
@@ -7557,6 +8057,7 @@ create_validate_stream_for_sink(struct drm_connector *connector,
bpc_limit = 8;
do {
+ drm_dbg_kms(connector->dev, "Trying with %d bpc\n", requested_bpc);
stream = create_stream_for_sink(connector, drm_mode,
dm_state, old_stream,
requested_bpc);
@@ -7577,7 +8078,7 @@ create_validate_stream_for_sink(struct drm_connector *connector,
dc_result = dm_validate_stream_and_context(adev->dm.dc, stream);
if (dc_result != DC_OK) {
- DRM_DEBUG_KMS("Mode %dx%d (clk %d) pixel_encoding:%s color_depth:%s failed validation -- %s\n",
+ DRM_DEBUG_KMS("Pruned mode %d x %d (clk %d) %s %s -- %s\n",
drm_mode->hdisplay,
drm_mode->vdisplay,
drm_mode->clock,
@@ -7592,16 +8093,41 @@ create_validate_stream_for_sink(struct drm_connector *connector,
} while (stream == NULL && requested_bpc >= bpc_limit);
- if ((dc_result == DC_FAIL_ENC_VALIDATE ||
- dc_result == DC_EXCEED_DONGLE_CAP) &&
- !aconnector->force_yuv420_output) {
- DRM_DEBUG_KMS("%s:%d Retry forcing yuv420 encoding\n",
- __func__, __LINE__);
-
- aconnector->force_yuv420_output = true;
+ switch (dc_result) {
+ /*
+ * If we failed to validate DP bandwidth stream with the requested RGB color depth,
+ * we try to fallback and configure in order:
+ * YUV422 (8bpc, 6bpc)
+ * YUV420 (8bpc, 6bpc)
+ */
+ case DC_FAIL_ENC_VALIDATE:
+ case DC_EXCEED_DONGLE_CAP:
+ case DC_NO_DP_LINK_BANDWIDTH:
+ /* recursively entered twice and already tried both YUV422 and YUV420 */
+ if (aconnector->force_yuv422_output && aconnector->force_yuv420_output)
+ break;
+ /* first failure; try YUV422 */
+ if (!aconnector->force_yuv422_output) {
+ drm_dbg_kms(connector->dev, "%s:%d Validation failed with %d, retrying w/ YUV422\n",
+ __func__, __LINE__, dc_result);
+ aconnector->force_yuv422_output = true;
+ /* recursively entered and YUV422 failed, try YUV420 */
+ } else if (!aconnector->force_yuv420_output) {
+ drm_dbg_kms(connector->dev, "%s:%d Validation failed with %d, retrying w/ YUV420\n",
+ __func__, __LINE__, dc_result);
+ aconnector->force_yuv420_output = true;
+ }
stream = create_validate_stream_for_sink(connector, drm_mode,
- dm_state, old_stream);
+ dm_state, old_stream);
+ aconnector->force_yuv422_output = false;
aconnector->force_yuv420_output = false;
+ break;
+ case DC_OK:
+ break;
+ default:
+ drm_dbg_kms(connector->dev, "%s:%d Unhandled validation failure %d\n",
+ __func__, __LINE__, dc_result);
+ break;
}
return stream;
@@ -7732,6 +8258,9 @@ amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(conn);
int ret;
+ if (WARN_ON(unlikely(!old_con_state || !new_con_state)))
+ return -EINVAL;
+
trace_amdgpu_dm_connector_atomic_check(new_con_state);
if (conn->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
@@ -7743,6 +8272,14 @@ amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
if (!crtc)
return 0;
+ if (new_con_state->privacy_screen_sw_state != old_con_state->privacy_screen_sw_state) {
+ new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(new_crtc_state))
+ return PTR_ERR(new_crtc_state);
+
+ new_crtc_state->mode_changed = true;
+ }
+
if (new_con_state->colorspace != old_con_state->colorspace) {
new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
if (IS_ERR(new_crtc_state))
@@ -7844,6 +8381,23 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
int clock, bpp = 0;
bool is_y420 = false;
+ if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) ||
+ (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) {
+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+ struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
+ enum drm_mode_status result;
+
+ result = drm_crtc_helper_mode_valid_fixed(encoder->crtc, adjusted_mode, native_mode);
+ if (result != MODE_OK && dm_new_connector_state->scaling == RMX_OFF) {
+ drm_dbg_driver(encoder->dev,
+ "mode %dx%d@%dHz is not native, enabling scaling\n",
+ adjusted_mode->hdisplay, adjusted_mode->vdisplay,
+ drm_mode_vrefresh(adjusted_mode));
+ dm_new_connector_state->scaling = RMX_ASPECT;
+ }
+ return 0;
+ }
+
if (!aconnector->mst_output_port)
return 0;
@@ -7857,7 +8411,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
if (IS_ERR(mst_state))
return PTR_ERR(mst_state);
- mst_state->pbn_div.full = dfixed_const(dm_mst_get_pbn_divider(aconnector->mst_root->dc_link));
+ mst_state->pbn_div.full = dm_mst_get_pbn_divider(aconnector->mst_root->dc_link);
if (!state->duplicated) {
int max_bpc = conn_state->max_requested_bpc;
@@ -7964,7 +8518,7 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
return 0;
}
-static int to_drm_connector_type(enum signal_type st)
+static int to_drm_connector_type(enum signal_type st, uint32_t connector_id)
{
switch (st) {
case SIGNAL_TYPE_HDMI_TYPE_A:
@@ -7980,6 +8534,10 @@ static int to_drm_connector_type(enum signal_type st)
return DRM_MODE_CONNECTOR_DisplayPort;
case SIGNAL_TYPE_DVI_DUAL_LINK:
case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ if (connector_id == CONNECTOR_ID_SINGLE_LINK_DVII ||
+ connector_id == CONNECTOR_ID_DUAL_LINK_DVII)
+ return DRM_MODE_CONNECTOR_DVII;
+
return DRM_MODE_CONNECTOR_DVID;
case SIGNAL_TYPE_VIRTUAL:
return DRM_MODE_CONNECTOR_VIRTUAL;
@@ -8031,7 +8589,7 @@ static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
static struct drm_display_mode *
amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
- char *name,
+ const char *name,
int hdisplay, int vdisplay)
{
struct drm_device *dev = encoder->dev;
@@ -8053,6 +8611,24 @@ amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
}
+static const struct amdgpu_dm_mode_size {
+ char name[DRM_DISPLAY_MODE_LEN];
+ int w;
+ int h;
+} common_modes[] = {
+ { "640x480", 640, 480},
+ { "800x600", 800, 600},
+ { "1024x768", 1024, 768},
+ { "1280x720", 1280, 720},
+ { "1280x800", 1280, 800},
+ {"1280x1024", 1280, 1024},
+ { "1440x900", 1440, 900},
+ {"1680x1050", 1680, 1050},
+ {"1600x1200", 1600, 1200},
+ {"1920x1080", 1920, 1080},
+ {"1920x1200", 1920, 1200}
+};
+
static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
struct drm_connector *connector)
{
@@ -8063,23 +8639,10 @@ static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
to_amdgpu_dm_connector(connector);
int i;
int n;
- struct mode_size {
- char name[DRM_DISPLAY_MODE_LEN];
- int w;
- int h;
- } common_modes[] = {
- { "640x480", 640, 480},
- { "800x600", 800, 600},
- { "1024x768", 1024, 768},
- { "1280x720", 1280, 720},
- { "1280x800", 1280, 800},
- {"1280x1024", 1280, 1024},
- { "1440x900", 1440, 900},
- {"1680x1050", 1680, 1050},
- {"1600x1200", 1600, 1200},
- {"1920x1080", 1920, 1080},
- {"1920x1200", 1920, 1200}
- };
+
+ if ((connector->connector_type != DRM_MODE_CONNECTOR_eDP) &&
+ (connector->connector_type != DRM_MODE_CONNECTOR_LVDS))
+ return;
n = ARRAY_SIZE(common_modes);
@@ -8276,6 +8839,16 @@ static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connect
if (!(amdgpu_freesync_vid_mode && drm_edid))
return;
+ if (!amdgpu_dm_connector->dc_sink || !amdgpu_dm_connector->dc_link)
+ return;
+
+ if (!dc_supports_vrr(amdgpu_dm_connector->dc_sink->ctx->dce_version))
+ return;
+
+ if (dc_connector_supports_analog(amdgpu_dm_connector->dc_link->link_id.id) &&
+ amdgpu_dm_connector->dc_sink->edid_caps.analog)
+ return;
+
if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
amdgpu_dm_connector->num_modes +=
add_fs_modes(amdgpu_dm_connector);
@@ -8285,11 +8858,11 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
{
struct amdgpu_dm_connector *amdgpu_dm_connector =
to_amdgpu_dm_connector(connector);
+ struct dc_link *dc_link = amdgpu_dm_connector->dc_link;
struct drm_encoder *encoder;
const struct drm_edid *drm_edid = amdgpu_dm_connector->drm_edid;
- struct dc_link_settings *verified_link_cap =
- &amdgpu_dm_connector->dc_link->verified_link_cap;
- const struct dc *dc = amdgpu_dm_connector->dc_link->dc;
+ struct dc_link_settings *verified_link_cap = &dc_link->verified_link_cap;
+ const struct dc *dc = dc_link->dc;
encoder = amdgpu_dm_connector_to_encoder(connector);
@@ -8299,6 +8872,17 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
if (dc->link_srv->dp_get_encoding_format(verified_link_cap) == DP_128b_132b_ENCODING)
amdgpu_dm_connector->num_modes +=
drm_add_modes_noedid(connector, 1920, 1080);
+
+ if (amdgpu_dm_connector->dc_sink &&
+ amdgpu_dm_connector->dc_sink->edid_caps.analog &&
+ dc_connector_supports_analog(dc_link->link_id.id)) {
+ /* Analog monitor connected by DAC load detection.
+ * Add common modes. It will be up to the user to select one that works.
+ */
+ for (int i = 0; i < ARRAY_SIZE(common_modes); i++)
+ amdgpu_dm_connector->num_modes += drm_add_modes_noedid(
+ connector, common_modes[i].w, common_modes[i].h);
+ }
} else {
amdgpu_dm_connector_ddc_get_modes(connector, drm_edid);
if (encoder)
@@ -8346,6 +8930,10 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
mutex_init(&aconnector->hpd_lock);
mutex_init(&aconnector->handle_mst_msg_ready);
+ aconnector->hdmi_hpd_debounce_delay_ms = AMDGPU_DM_HDMI_HPD_DEBOUNCE_MS;
+ INIT_DELAYED_WORK(&aconnector->hdmi_hpd_debounce_work, hdmi_hpd_debounce_work);
+ aconnector->hdmi_prev_sink = NULL;
+
/*
* configure support HPD hot plug connector_>polled default value is 0
* which means HPD hot plug not supported
@@ -8367,6 +8955,11 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
case DRM_MODE_CONNECTOR_DVID:
aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
break;
+ case DRM_MODE_CONNECTOR_DVII:
+ case DRM_MODE_CONNECTOR_VGA:
+ aconnector->base.polled =
+ DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+ break;
default:
break;
}
@@ -8420,6 +9013,18 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
if (adev->dm.hdcp_workqueue)
drm_connector_attach_content_protection_property(&aconnector->base, true);
}
+
+ if (connector_type == DRM_MODE_CONNECTOR_eDP) {
+ struct drm_privacy_screen *privacy_screen;
+
+ privacy_screen = drm_privacy_screen_get(adev_to_drm(adev)->dev, NULL);
+ if (!IS_ERR(privacy_screen)) {
+ drm_connector_attach_privacy_screen_provider(&aconnector->base,
+ privacy_screen);
+ } else if (PTR_ERR(privacy_screen) != -ENODEV) {
+ drm_warn(adev_to_drm(adev), "Error getting privacy-screen\n");
+ }
+ }
}
static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
@@ -8549,14 +9154,14 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
}
aconnector->i2c = i2c;
- res = i2c_add_adapter(&i2c->base);
+ res = devm_i2c_add_adapter(dm->adev->dev, &i2c->base);
if (res) {
drm_err(adev_to_drm(dm->adev), "Failed to register hw i2c %d\n", link->link_index);
goto out_free;
}
- connector_type = to_drm_connector_type(link->connector_signal);
+ connector_type = to_drm_connector_type(link->connector_signal, link->link_id.id);
res = drm_connector_init_with_ddc(
dm->ddev,
@@ -8647,7 +9252,16 @@ static int amdgpu_dm_encoder_init(struct drm_device *dev,
static void manage_dm_interrupts(struct amdgpu_device *adev,
struct amdgpu_crtc *acrtc,
struct dm_crtc_state *acrtc_state)
-{
+{ /*
+ * We cannot be sure that the frontend index maps to the same
+ * backend index - some even map to more than one.
+ * So we have to go through the CRTC to find the right IRQ.
+ */
+ int irq_type = amdgpu_display_crtc_idx_to_irq_type(
+ adev,
+ acrtc->crtc_id);
+ struct drm_device *dev = adev_to_drm(adev);
+
struct drm_vblank_crtc_config config = {0};
struct dc_crtc_timing *timing;
int offdelay;
@@ -8700,7 +9314,35 @@ static void manage_dm_interrupts(struct amdgpu_device *adev,
drm_crtc_vblank_on_config(&acrtc->base,
&config);
+ /* Allow RX6xxx, RX7700, RX7800 GPUs to call amdgpu_irq_get.*/
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(3, 0, 0):
+ case IP_VERSION(3, 0, 2):
+ case IP_VERSION(3, 0, 3):
+ case IP_VERSION(3, 2, 0):
+ if (amdgpu_irq_get(adev, &adev->pageflip_irq, irq_type))
+ drm_err(dev, "DM_IRQ: Cannot get pageflip irq!\n");
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ if (amdgpu_irq_get(adev, &adev->vline0_irq, irq_type))
+ drm_err(dev, "DM_IRQ: Cannot get vline0 irq!\n");
+#endif
+ }
+
} else {
+ /* Allow RX6xxx, RX7700, RX7800 GPUs to call amdgpu_irq_put.*/
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(3, 0, 0):
+ case IP_VERSION(3, 0, 2):
+ case IP_VERSION(3, 0, 3):
+ case IP_VERSION(3, 2, 0):
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ if (amdgpu_irq_put(adev, &adev->vline0_irq, irq_type))
+ drm_err(dev, "DM_IRQ: Cannot put vline0 irq!\n");
+#endif
+ if (amdgpu_irq_put(adev, &adev->pageflip_irq, irq_type))
+ drm_err(dev, "DM_IRQ: Cannot put pageflip irq!\n");
+ }
+
drm_crtc_vblank_off(&acrtc->base);
}
}
@@ -10008,69 +10650,40 @@ static void dm_set_writeback(struct amdgpu_display_manager *dm,
drm_writeback_queue_job(wb_conn, new_con_state);
}
-/**
- * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
- * @state: The atomic state to commit
- *
- * This will tell DC to commit the constructed DC state from atomic_check,
- * programming the hardware. Any failures here implies a hardware failure, since
- * atomic check should have filtered anything non-kosher.
- */
-static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
+static void amdgpu_dm_update_hdcp(struct drm_atomic_state *state)
{
+ struct drm_connector_state *old_con_state, *new_con_state;
struct drm_device *dev = state->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_display_manager *dm = &adev->dm;
- struct dm_atomic_state *dm_state;
- struct dc_state *dc_state = NULL;
- u32 i, j;
- struct drm_crtc *crtc;
- struct drm_crtc_state *old_crtc_state, *new_crtc_state;
- unsigned long flags;
- bool wait_for_vblank = true;
struct drm_connector *connector;
- struct drm_connector_state *old_con_state, *new_con_state;
- struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
- int crtc_disable_count = 0;
-
- trace_amdgpu_dm_atomic_commit_tail_begin(state);
-
- drm_atomic_helper_update_legacy_modeset_state(dev, state);
- drm_dp_mst_atomic_wait_for_dependencies(state);
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ int i;
- dm_state = dm_atomic_get_new_state(state);
- if (dm_state && dm_state->context) {
- dc_state = dm_state->context;
- amdgpu_dm_commit_streams(state, dc_state);
- }
+ if (!adev->dm.hdcp_workqueue)
+ return;
for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ struct dm_crtc_state *dm_new_crtc_state;
struct amdgpu_dm_connector *aconnector;
- if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ if (!connector || connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
continue;
aconnector = to_amdgpu_dm_connector(connector);
- if (!adev->dm.hdcp_workqueue)
- continue;
-
- pr_debug("[HDCP_DM] -------------- i : %x ----------\n", i);
+ drm_dbg(dev, "[HDCP_DM] -------------- i : %x ----------\n", i);
- if (!connector)
- continue;
-
- pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n",
+ drm_dbg(dev, "[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n",
connector->index, connector->status, connector->dpms);
- pr_debug("[HDCP_DM] state protection old: %x new: %x\n",
+ drm_dbg(dev, "[HDCP_DM] state protection old: %x new: %x\n",
old_con_state->content_protection, new_con_state->content_protection);
if (aconnector->dc_sink) {
if (aconnector->dc_sink->sink_signal != SIGNAL_TYPE_VIRTUAL &&
aconnector->dc_sink->sink_signal != SIGNAL_TYPE_NONE) {
- pr_debug("[HDCP_DM] pipe_ctx dispname=%s\n",
+ drm_dbg(dev, "[HDCP_DM] pipe_ctx dispname=%s\n",
aconnector->dc_sink->edid_caps.display_name);
}
}
@@ -10084,7 +10697,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
}
if (old_crtc_state)
- pr_debug("old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
+ drm_dbg(dev, "old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
old_crtc_state->enable,
old_crtc_state->active,
old_crtc_state->mode_changed,
@@ -10092,29 +10705,13 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
old_crtc_state->connectors_changed);
if (new_crtc_state)
- pr_debug("NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
+ drm_dbg(dev, "NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
new_crtc_state->enable,
new_crtc_state->active,
new_crtc_state->mode_changed,
new_crtc_state->active_changed,
new_crtc_state->connectors_changed);
- }
- for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
- struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
- struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
- struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
-
- if (!adev->dm.hdcp_workqueue)
- continue;
-
- new_crtc_state = NULL;
- old_crtc_state = NULL;
-
- if (acrtc) {
- new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
- old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
- }
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
@@ -10158,7 +10755,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
new_con_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED)
enable_encryption = true;
- drm_info(adev_to_drm(adev), "[HDCP_DM] hdcp_update_display enable_encryption = %x\n", enable_encryption);
+ drm_info(dev, "[HDCP_DM] hdcp_update_display enable_encryption = %x\n", enable_encryption);
if (aconnector->dc_link)
hdcp_update_display(
@@ -10166,6 +10763,78 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
new_con_state->hdcp_content_type, enable_encryption);
}
}
+}
+
+static int amdgpu_dm_atomic_setup_commit(struct drm_atomic_state *state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
+ int i, ret;
+
+ ret = drm_dp_mst_atomic_setup_commit(state);
+ if (ret)
+ return ret;
+
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ /*
+ * Color management settings. We also update color properties
+ * when a modeset is needed, to ensure it gets reprogrammed.
+ */
+ if (dm_new_crtc_state->base.active && dm_new_crtc_state->stream &&
+ (dm_new_crtc_state->base.color_mgmt_changed ||
+ dm_old_crtc_state->regamma_tf != dm_new_crtc_state->regamma_tf ||
+ drm_atomic_crtc_needs_modeset(new_crtc_state))) {
+ ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
+ if (ret) {
+ drm_dbg_atomic(state->dev, "Failed to update color state\n");
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
+ * @state: The atomic state to commit
+ *
+ * This will tell DC to commit the constructed DC state from atomic_check,
+ * programming the hardware. Any failures here implies a hardware failure, since
+ * atomic check should have filtered anything non-kosher.
+ */
+static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
+{
+ struct drm_device *dev = state->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct dm_atomic_state *dm_state;
+ struct dc_state *dc_state = NULL;
+ u32 i, j;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ unsigned long flags;
+ bool wait_for_vblank = true;
+ struct drm_connector *connector;
+ struct drm_connector_state *old_con_state = NULL, *new_con_state = NULL;
+ struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
+ int crtc_disable_count = 0;
+
+ trace_amdgpu_dm_atomic_commit_tail_begin(state);
+
+ drm_atomic_helper_update_legacy_modeset_state(dev, state);
+ drm_dp_mst_atomic_wait_for_dependencies(state);
+
+ dm_state = dm_atomic_get_new_state(state);
+ if (dm_state && dm_state->context) {
+ dc_state = dm_state->context;
+ amdgpu_dm_commit_streams(state, dc_state);
+ }
+
+ amdgpu_dm_update_hdcp(state);
/* Handle connector state changes */
for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
@@ -10248,7 +10917,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
* Here we create an empty update on each plane.
* To fix this, DC should permit updating only stream properties.
*/
- dummy_updates = kzalloc(sizeof(struct dc_surface_update) * MAX_SURFACES, GFP_ATOMIC);
+ dummy_updates = kzalloc(sizeof(struct dc_surface_update) * MAX_SURFACES, GFP_KERNEL);
if (!dummy_updates) {
drm_err(adev_to_drm(adev), "Failed to allocate memory for dummy_updates.\n");
continue;
@@ -10268,6 +10937,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
&stream_update);
mutex_unlock(&dm->dc_lock);
kfree(dummy_updates);
+
+ drm_connector_update_privacy_screen(new_con_state);
}
/**
@@ -10319,6 +10990,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
if (amdgpu_dm_crc_window_is_activated(crtc)) {
uint8_t cnt;
+
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
for (cnt = 0; cnt < MAX_CRC_WINDOW_NUM; cnt++) {
if (acrtc->dm_irq_params.window_param[cnt].enable) {
@@ -10621,6 +11293,8 @@ static void get_freesync_config_for_crtc(
} else {
config.state = VRR_STATE_INACTIVE;
}
+ } else {
+ config.state = VRR_STATE_UNSUPPORTED;
}
out:
new_crtc_state->freesync_config = config;
@@ -10938,7 +11612,7 @@ skip_modeset:
if (dm_new_crtc_state->base.color_mgmt_changed ||
dm_old_crtc_state->regamma_tf != dm_new_crtc_state->regamma_tf ||
drm_atomic_crtc_needs_modeset(new_crtc_state)) {
- ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
+ ret = amdgpu_dm_check_crtc_color_mgmt(dm_new_crtc_state, true);
if (ret)
goto fail;
}
@@ -12141,7 +12815,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
drm_dbg_atomic(dev, "MST drm_dp_mst_atomic_check() failed\n");
goto fail;
}
- status = dc_validate_global_state(dc, dm_state->context, true);
+ status = dc_validate_global_state(dc, dm_state->context, DC_VALIDATE_MODE_ONLY);
if (status != DC_OK) {
drm_dbg_atomic(dev, "DC global validation failure: %s (%d)",
dc_status_to_str(status), status);
@@ -12170,7 +12844,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
int j = state->num_private_objs-1;
dm_atomic_destroy_state(obj,
- state->private_objs[i].state);
+ state->private_objs[i].state_to_destroy);
/* If i is not at the end of the array then the
* last element needs to be moved to where i was
@@ -12181,7 +12855,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
state->private_objs[j];
state->private_objs[j].ptr = NULL;
- state->private_objs[j].state = NULL;
+ state->private_objs[j].state_to_destroy = NULL;
state->private_objs[j].old_state = NULL;
state->private_objs[j].new_state = NULL;
@@ -12522,7 +13196,7 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
dm_con_state = to_dm_connector_state(connector->state);
- if (!adev->dm.freesync_module)
+ if (!adev->dm.freesync_module || !dc_supports_vrr(sink->ctx->dce_version))
goto update;
edid = drm_edid_raw(drm_edid); // FIXME: Get rid of drm_edid_raw()
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index d7d92f9911e4..ef97cede9926 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright (C) 2015-2020 Advanced Micro Devices, Inc. All rights reserved.
*
@@ -58,6 +59,7 @@
#define AMDGPU_HDR_MULT_DEFAULT (0x100000000LL)
+#define AMDGPU_DM_HDMI_HPD_DEBOUNCE_MS 1500
/*
#include "include/amdgpu_dal_power_if.h"
#include "amdgpu_dm_irq.h"
@@ -152,6 +154,20 @@ struct idle_workqueue {
bool running;
};
+/**
+ * struct vupdate_offload_work - Work data for offloading task from vupdate handler
+ * @work: Kernel work data for the work event
+ * @adev: amdgpu_device back pointer
+ * @stream: DC stream associated with the crtc
+ * @adjust: DC CRTC timing adjust to be applied to the crtc
+ */
+struct vupdate_offload_work {
+ struct work_struct work;
+ struct amdgpu_device *adev;
+ struct dc_stream_state *stream;
+ struct dc_crtc_timing_adjust *adjust;
+};
+
#define MAX_LUMINANCE_DATA_POINTS 99
/**
@@ -201,6 +217,11 @@ struct amdgpu_dm_backlight_caps {
*/
bool aux_support;
/**
+ * @brightness_mask: After deriving brightness, OR it with this mask.
+ * Workaround for panels with issues with certain brightness values.
+ */
+ u32 brightness_mask;
+ /**
* @ac_level: the default brightness if booted on AC
*/
u8 ac_level;
@@ -636,8 +657,9 @@ struct amdgpu_display_manager {
* @bb_from_dmub:
*
* Bounding box data read from dmub during early initialization for DCN4+
+ * Data is stored as a byte array that should be casted to the appropriate bb struct
*/
- struct dml2_soc_bb *bb_from_dmub;
+ void *bb_from_dmub;
/**
* @oem_i2c:
@@ -752,6 +774,9 @@ struct amdgpu_dm_connector {
uint16_t vc_full_pbn;
struct mutex handle_mst_msg_ready;
+ /* branch device specific data */
+ uint32_t branch_ieee_oui;
+
/* TODO see if we can merge with ddc_bus or make a dm_connector */
struct amdgpu_i2c_adapter *i2c;
@@ -775,6 +800,7 @@ struct amdgpu_dm_connector {
bool fake_enable;
bool force_yuv420_output;
+ bool force_yuv422_output;
struct dsc_preferred_settings dsc_settings;
union dp_downstream_port_present mst_downstream_port_present;
/* Cached display modes */
@@ -794,6 +820,11 @@ struct amdgpu_dm_connector {
bool pack_sdp_v1_3;
enum adaptive_sync_type as_type;
struct amdgpu_hdmi_vsdb_info vsdb_info;
+
+ /* HDMI HPD debounce support */
+ unsigned int hdmi_hpd_debounce_delay_ms;
+ struct delayed_work hdmi_hpd_debounce_work;
+ struct dc_sink *hdmi_prev_sink;
};
static inline void amdgpu_dm_set_mst_status(uint8_t *status,
@@ -968,6 +999,7 @@ struct dm_connector_state {
bool underscan_enable;
bool freesync_capable;
bool update_hdcp;
+ bool abm_sysfs_forbidden;
uint8_t abm_level;
int vcpi_slots;
uint64_t pbn;
@@ -1022,6 +1054,8 @@ void amdgpu_dm_init_color_mod(void);
int amdgpu_dm_create_color_properties(struct amdgpu_device *adev);
int amdgpu_dm_verify_lut_sizes(const struct drm_crtc_state *crtc_state);
int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc);
+int amdgpu_dm_check_crtc_color_mgmt(struct dm_crtc_state *crtc,
+ bool check_only);
int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc,
struct drm_plane_state *plane_state,
struct dc_plane_state *dc_plane_state);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
index ebabfe3a512f..1dcc79b35225 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
@@ -25,13 +26,39 @@
#include "amdgpu.h"
#include "amdgpu_mode.h"
#include "amdgpu_dm.h"
+#include "amdgpu_dm_colorop.h"
#include "dc.h"
#include "modules/color/color_gamma.h"
-#include "basics/conversion.h"
/**
* DOC: overview
*
+ * We have three types of color management in the AMD display driver.
+ * 1. the legacy &drm_crtc DEGAMMA, CTM, and GAMMA properties
+ * 2. AMD driver private color management on &drm_plane and &drm_crtc
+ * 3. AMD plane color pipeline
+ *
+ * The CRTC properties are the original color management. When they were
+ * implemented per-plane color management was not a thing yet. Because
+ * of that we could get away with plumbing the DEGAMMA and CTM
+ * properties to pre-blending HW functions. This is incompatible with
+ * per-plane color management, such as via the AMD private properties or
+ * the new drm_plane color pipeline. The only compatible CRTC property
+ * with per-plane color management is the GAMMA property as it is
+ * applied post-blending.
+ *
+ * The AMD driver private color management properties are only exposed
+ * when the kernel is built explicitly with -DAMD_PRIVATE_COLOR. They
+ * are temporary building blocks on the path to full-fledged &drm_plane
+ * and &drm_crtc color pipelines and lay the driver's groundwork for the
+ * color pipelines.
+ *
+ * The AMD plane color pipeline describes AMD's &drm_colorops via the
+ * &drm_plane's COLOR_PIPELINE property.
+ *
+ * drm_crtc Properties
+ * -------------------
+ *
* The DC interface to HW gives us the following color management blocks
* per pipe (surface):
*
@@ -42,36 +69,93 @@
* - Surface regamma LUT (normalized)
* - Output CSC (normalized)
*
- * But these aren't a direct mapping to DRM color properties. The current DRM
- * interface exposes CRTC degamma, CRTC CTM and CRTC regamma while our hardware
- * is essentially giving:
+ * But these aren't a direct mapping to DRM color properties. The
+ * current DRM interface exposes CRTC degamma, CRTC CTM and CRTC regamma
+ * while our hardware is essentially giving:
*
* Plane CTM -> Plane degamma -> Plane CTM -> Plane regamma -> Plane CTM
*
- * The input gamma LUT block isn't really applicable here since it operates
- * on the actual input data itself rather than the HW fp representation. The
- * input and output CSC blocks are technically available to use as part of
- * the DC interface but are typically used internally by DC for conversions
- * between color spaces. These could be blended together with user
- * adjustments in the future but for now these should remain untouched.
+ * The input gamma LUT block isn't really applicable here since it
+ * operates on the actual input data itself rather than the HW fp
+ * representation. The input and output CSC blocks are technically
+ * available to use as part of the DC interface but are typically used
+ * internally by DC for conversions between color spaces. These could be
+ * blended together with user adjustments in the future but for now
+ * these should remain untouched.
+ *
+ * The pipe blending also happens after these blocks so we don't
+ * actually support any CRTC props with correct blending with multiple
+ * planes - but we can still support CRTC color management properties in
+ * DM in most single plane cases correctly with clever management of the
+ * DC interface in DM.
+ *
+ * As per DRM documentation, blocks should be in hardware bypass when
+ * their respective property is set to NULL. A linear DGM/RGM LUT should
+ * also considered as putting the respective block into bypass mode.
+ *
+ * This means that the following configuration is assumed to be the
+ * default:
+ *
+ * Plane DGM Bypass -> Plane CTM Bypass -> Plane RGM Bypass -> ... CRTC
+ * DGM Bypass -> CRTC CTM Bypass -> CRTC RGM Bypass
+ *
+ * AMD Private Color Management on drm_plane
+ * -----------------------------------------
+ *
+ * The AMD private color management properties on a &drm_plane are:
+ *
+ * - AMD_PLANE_DEGAMMA_LUT
+ * - AMD_PLANE_DEGAMMA_LUT_SIZE
+ * - AMD_PLANE_DEGAMMA_TF
+ * - AMD_PLANE_HDR_MULT
+ * - AMD_PLANE_CTM
+ * - AMD_PLANE_SHAPER_LUT
+ * - AMD_PLANE_SHAPER_LUT_SIZE
+ * - AMD_PLANE_SHAPER_TF
+ * - AMD_PLANE_LUT3D
+ * - AMD_PLANE_LUT3D_SIZE
+ * - AMD_PLANE_BLEND_LUT
+ * - AMD_PLANE_BLEND_LUT_SIZE
+ * - AMD_PLANE_BLEND_TF
+ *
+ * The AMD private color management property on a &drm_crtc is:
*
- * The pipe blending also happens after these blocks so we don't actually
- * support any CRTC props with correct blending with multiple planes - but we
- * can still support CRTC color management properties in DM in most single
- * plane cases correctly with clever management of the DC interface in DM.
+ * - AMD_CRTC_REGAMMA_TF
*
- * As per DRM documentation, blocks should be in hardware bypass when their
- * respective property is set to NULL. A linear DGM/RGM LUT should also
- * considered as putting the respective block into bypass mode.
+ * Use of these properties is discouraged.
*
- * This means that the following
- * configuration is assumed to be the default:
+ * AMD plane color pipeline
+ * ------------------------
+ *
+ * The AMD &drm_plane color pipeline is advertised for DCN generations
+ * 3.0 and newer. It exposes these elements in this order:
+ *
+ * 1. 1D curve colorop
+ * 2. Multiplier
+ * 3. 3x4 CTM
+ * 4. 1D curve colorop
+ * 5. 1D LUT
+ * 6. 3D LUT
+ * 7. 1D curve colorop
+ * 8. 1D LUT
+ *
+ * The multiplier (#2) is a simple multiplier that is applied to all
+ * channels.
+ *
+ * The 3x4 CTM (#3) is a simple 3x4 matrix.
+ *
+ * #1, and #7 are non-linear to linear curves. #4 is a linear to
+ * non-linear curve. They support sRGB, PQ, and BT.709/BT.2020 EOTFs or
+ * their inverse.
+ *
+ * The 1D LUTs (#5 and #8) are plain 4096 entry LUTs.
+ *
+ * The 3DLUT (#6) is a tetrahedrally interpolated 17 cube LUT.
*
- * Plane DGM Bypass -> Plane CTM Bypass -> Plane RGM Bypass -> ...
- * CRTC DGM Bypass -> CRTC CTM Bypass -> CRTC RGM Bypass
*/
#define MAX_DRM_LUT_VALUE 0xFFFF
+#define MAX_DRM_LUT32_VALUE 0xFFFFFFFF
#define SDR_WHITE_LEVEL_INIT_VALUE 80
/**
@@ -342,6 +426,21 @@ __extract_blob_lut(const struct drm_property_blob *blob, uint32_t *size)
}
/**
+ * __extract_blob_lut32 - Extracts the DRM lut and lut size from a blob.
+ * @blob: DRM color mgmt property blob
+ * @size: lut size
+ *
+ * Returns:
+ * DRM LUT or NULL
+ */
+static const struct drm_color_lut32 *
+__extract_blob_lut32(const struct drm_property_blob *blob, uint32_t *size)
+{
+ *size = blob ? drm_color_lut32_size(blob) : 0;
+ return blob ? (struct drm_color_lut32 *)blob->data : NULL;
+}
+
+/**
* __is_lut_linear - check if the given lut is a linear mapping of values
* @lut: given lut to check values
* @size: lut size
@@ -415,6 +514,24 @@ static void __drm_lut_to_dc_gamma(const struct drm_color_lut *lut,
}
/**
+ * __drm_lut32_to_dc_gamma - convert the drm_color_lut to dc_gamma.
+ * @lut: DRM lookup table for color conversion
+ * @gamma: DC gamma to set entries
+ *
+ * The conversion depends on the size of the lut - whether or not it's legacy.
+ */
+static void __drm_lut32_to_dc_gamma(const struct drm_color_lut32 *lut, struct dc_gamma *gamma)
+{
+ int i;
+
+ for (i = 0; i < MAX_COLOR_LUT_ENTRIES; i++) {
+ gamma->entries.red[i] = dc_fixpt_from_fraction(lut[i].red, MAX_DRM_LUT32_VALUE);
+ gamma->entries.green[i] = dc_fixpt_from_fraction(lut[i].green, MAX_DRM_LUT32_VALUE);
+ gamma->entries.blue[i] = dc_fixpt_from_fraction(lut[i].blue, MAX_DRM_LUT32_VALUE);
+ }
+}
+
+/**
* __drm_ctm_to_dc_matrix - converts a DRM CTM to a DC CSC float matrix
* @ctm: DRM color transformation matrix
* @matrix: DC CSC float matrix
@@ -566,12 +683,68 @@ static int __set_output_tf(struct dc_transfer_func *func,
return res ? 0 : -ENOMEM;
}
-static int amdgpu_dm_set_atomic_regamma(struct dc_stream_state *stream,
+/**
+ * __set_output_tf_32 - calculates the output transfer function based on expected input space.
+ * @func: transfer function
+ * @lut: lookup table that defines the color space
+ * @lut_size: size of respective lut
+ * @has_rom: if ROM can be used for hardcoded curve
+ *
+ * Returns:
+ * 0 in case of success. -ENOMEM if fails.
+ */
+static int __set_output_tf_32(struct dc_transfer_func *func,
+ const struct drm_color_lut32 *lut, uint32_t lut_size,
+ bool has_rom)
+{
+ struct dc_gamma *gamma = NULL;
+ struct calculate_buffer cal_buffer = {0};
+ bool res;
+
+ cal_buffer.buffer_index = -1;
+
+ if (lut_size) {
+ gamma = dc_create_gamma();
+ if (!gamma)
+ return -ENOMEM;
+
+ gamma->num_entries = lut_size;
+ __drm_lut32_to_dc_gamma(lut, gamma);
+ }
+
+ if (func->tf == TRANSFER_FUNCTION_LINEAR) {
+ /*
+ * Color module doesn't like calculating regamma params
+ * on top of a linear input. But degamma params can be used
+ * instead to simulate this.
+ */
+ if (gamma)
+ gamma->type = GAMMA_CUSTOM;
+ res = mod_color_calculate_degamma_params(NULL, func,
+ gamma, gamma != NULL);
+ } else {
+ /*
+ * Assume sRGB. The actual mapping will depend on whether the
+ * input was legacy or not.
+ */
+ if (gamma)
+ gamma->type = GAMMA_CS_TFM_1D;
+ res = mod_color_calculate_regamma_params(func, gamma, gamma != NULL,
+ has_rom, NULL, &cal_buffer);
+ }
+
+ if (gamma)
+ dc_gamma_release(&gamma);
+
+ return res ? 0 : -ENOMEM;
+}
+
+
+static int amdgpu_dm_set_atomic_regamma(struct dc_transfer_func *out_tf,
const struct drm_color_lut *regamma_lut,
uint32_t regamma_size, bool has_rom,
enum dc_transfer_func_predefined tf)
{
- struct dc_transfer_func *out_tf = &stream->out_transfer_func;
int ret = 0;
if (regamma_size || tf != TRANSFER_FUNCTION_LINEAR) {
@@ -639,6 +812,42 @@ static int __set_input_tf(struct dc_color_caps *caps, struct dc_transfer_func *f
return res ? 0 : -ENOMEM;
}
+/**
+ * __set_input_tf_32 - calculates the input transfer function based on expected
+ * input space.
+ * @caps: dc color capabilities
+ * @func: transfer function
+ * @lut: lookup table that defines the color space
+ * @lut_size: size of respective lut.
+ *
+ * Returns:
+ * 0 in case of success. -ENOMEM if fails.
+ */
+static int __set_input_tf_32(struct dc_color_caps *caps, struct dc_transfer_func *func,
+ const struct drm_color_lut32 *lut, uint32_t lut_size)
+{
+ struct dc_gamma *gamma = NULL;
+ bool res;
+
+ if (lut_size) {
+ gamma = dc_create_gamma();
+ if (!gamma)
+ return -ENOMEM;
+
+ gamma->type = GAMMA_CUSTOM;
+ gamma->num_entries = lut_size;
+
+ __drm_lut32_to_dc_gamma(lut, gamma);
+ }
+
+ res = mod_color_calculate_degamma_params(caps, func, gamma, gamma != NULL);
+
+ if (gamma)
+ dc_gamma_release(&gamma);
+
+ return res ? 0 : -ENOMEM;
+}
+
static enum dc_transfer_func_predefined
amdgpu_tf_to_dc_tf(enum amdgpu_transfer_function tf)
{
@@ -668,6 +877,27 @@ amdgpu_tf_to_dc_tf(enum amdgpu_transfer_function tf)
}
}
+static enum dc_transfer_func_predefined
+amdgpu_colorop_tf_to_dc_tf(enum drm_colorop_curve_1d_type tf)
+{
+ switch (tf) {
+ case DRM_COLOROP_1D_CURVE_SRGB_EOTF:
+ case DRM_COLOROP_1D_CURVE_SRGB_INV_EOTF:
+ return TRANSFER_FUNCTION_SRGB;
+ case DRM_COLOROP_1D_CURVE_PQ_125_EOTF:
+ case DRM_COLOROP_1D_CURVE_PQ_125_INV_EOTF:
+ return TRANSFER_FUNCTION_PQ;
+ case DRM_COLOROP_1D_CURVE_BT2020_INV_OETF:
+ case DRM_COLOROP_1D_CURVE_BT2020_OETF:
+ return TRANSFER_FUNCTION_BT709;
+ case DRM_COLOROP_1D_CURVE_GAMMA22:
+ case DRM_COLOROP_1D_CURVE_GAMMA22_INV:
+ return TRANSFER_FUNCTION_GAMMA22;
+ default:
+ return TRANSFER_FUNCTION_LINEAR;
+ }
+}
+
static void __to_dc_lut3d_color(struct dc_rgb *rgb,
const struct drm_color_lut lut,
int bit_precision)
@@ -721,6 +951,59 @@ static void __drm_3dlut_to_dc_3dlut(const struct drm_color_lut *lut,
__to_dc_lut3d_color(&lut0[lut_i], lut[i], bit_depth);
}
+static void __to_dc_lut3d_32_color(struct dc_rgb *rgb,
+ const struct drm_color_lut32 lut,
+ int bit_precision)
+{
+ rgb->red = drm_color_lut32_extract(lut.red, bit_precision);
+ rgb->green = drm_color_lut32_extract(lut.green, bit_precision);
+ rgb->blue = drm_color_lut32_extract(lut.blue, bit_precision);
+}
+
+static void __drm_3dlut32_to_dc_3dlut(const struct drm_color_lut32 *lut,
+ uint32_t lut3d_size,
+ struct tetrahedral_params *params,
+ bool use_tetrahedral_9,
+ int bit_depth)
+{
+ struct dc_rgb *lut0;
+ struct dc_rgb *lut1;
+ struct dc_rgb *lut2;
+ struct dc_rgb *lut3;
+ int lut_i, i;
+
+
+ if (use_tetrahedral_9) {
+ lut0 = params->tetrahedral_9.lut0;
+ lut1 = params->tetrahedral_9.lut1;
+ lut2 = params->tetrahedral_9.lut2;
+ lut3 = params->tetrahedral_9.lut3;
+ } else {
+ lut0 = params->tetrahedral_17.lut0;
+ lut1 = params->tetrahedral_17.lut1;
+ lut2 = params->tetrahedral_17.lut2;
+ lut3 = params->tetrahedral_17.lut3;
+ }
+
+ for (lut_i = 0, i = 0; i < lut3d_size - 4; lut_i++, i += 4) {
+ /*
+ * We should consider the 3D LUT RGB values are distributed
+ * along four arrays lut0-3 where the first sizes 1229 and the
+ * other 1228. The bit depth supported for 3dlut channel is
+ * 12-bit, but DC also supports 10-bit.
+ *
+ * TODO: improve color pipeline API to enable the userspace set
+ * bit depth and 3D LUT size/stride, as specified by VA-API.
+ */
+ __to_dc_lut3d_32_color(&lut0[lut_i], lut[i], bit_depth);
+ __to_dc_lut3d_32_color(&lut1[lut_i], lut[i + 1], bit_depth);
+ __to_dc_lut3d_32_color(&lut2[lut_i], lut[i + 2], bit_depth);
+ __to_dc_lut3d_32_color(&lut3[lut_i], lut[i + 3], bit_depth);
+ }
+ /* lut0 has 1229 points (lut_size/4 + 1) */
+ __to_dc_lut3d_32_color(&lut0[lut_i], lut[i], bit_depth);
+}
+
/* amdgpu_dm_atomic_lut3d - set DRM 3D LUT to DC stream
* @drm_lut3d: user 3D LUT
* @drm_lut3d_size: size of 3D LUT
@@ -821,7 +1104,7 @@ int amdgpu_dm_verify_lut3d_size(struct amdgpu_device *adev,
struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane_state);
const struct drm_color_lut *shaper = NULL, *lut3d = NULL;
uint32_t exp_size, size, dim_size = MAX_COLOR_3DLUT_SIZE;
- bool has_3dlut = adev->dm.dc->caps.color.dpp.hw_3d_lut;
+ bool has_3dlut = adev->dm.dc->caps.color.dpp.hw_3d_lut || adev->dm.dc->caps.color.mpc.preblend;
/* shaper LUT is only available if 3D LUT color caps */
exp_size = has_3dlut ? MAX_COLOR_LUT_ENTRIES : 0;
@@ -885,33 +1168,33 @@ int amdgpu_dm_verify_lut_sizes(const struct drm_crtc_state *crtc_state)
}
/**
- * amdgpu_dm_update_crtc_color_mgmt: Maps DRM color management to DC stream.
+ * amdgpu_dm_check_crtc_color_mgmt: Check if DRM color props are programmable by DC.
* @crtc: amdgpu_dm crtc state
+ * @check_only: only check color state without update dc stream
*
- * With no plane level color management properties we're free to use any
- * of the HW blocks as long as the CRTC CTM always comes before the
- * CRTC RGM and after the CRTC DGM.
- *
- * - The CRTC RGM block will be placed in the RGM LUT block if it is non-linear.
- * - The CRTC DGM block will be placed in the DGM LUT block if it is non-linear.
- * - The CRTC CTM will be placed in the gamut remap block if it is non-linear.
+ * This function just verifies CRTC LUT sizes, if there is enough space for
+ * output transfer function and if its parameters can be calculated by AMD
+ * color module. It also adjusts some settings for programming CRTC degamma at
+ * plane stage, using plane DGM block.
*
* The RGM block is typically more fully featured and accurate across
* all ASICs - DCE can't support a custom non-linear CRTC DGM.
*
* For supporting both plane level color management and CRTC level color
- * management at once we have to either restrict the usage of CRTC properties
- * or blend adjustments together.
+ * management at once we have to either restrict the usage of some CRTC
+ * properties or blend adjustments together.
*
* Returns:
- * 0 on success. Error code if setup fails.
+ * 0 on success. Error code if validation fails.
*/
-int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc)
+
+int amdgpu_dm_check_crtc_color_mgmt(struct dm_crtc_state *crtc,
+ bool check_only)
{
struct dc_stream_state *stream = crtc->stream;
struct amdgpu_device *adev = drm_to_adev(crtc->base.state->dev);
bool has_rom = adev->asic_type <= CHIP_RAVEN;
- struct drm_color_ctm *ctm = NULL;
+ struct dc_transfer_func *out_tf;
const struct drm_color_lut *degamma_lut, *regamma_lut;
uint32_t degamma_size, regamma_size;
bool has_regamma, has_degamma;
@@ -940,6 +1223,14 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc)
crtc->cm_has_degamma = false;
crtc->cm_is_degamma_srgb = false;
+ if (check_only) {
+ out_tf = kvzalloc(sizeof(*out_tf), GFP_KERNEL);
+ if (!out_tf)
+ return -ENOMEM;
+ } else {
+ out_tf = &stream->out_transfer_func;
+ }
+
/* Setup regamma and degamma. */
if (is_legacy) {
/*
@@ -954,8 +1245,8 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc)
* inverse color ramp in legacy userspace.
*/
crtc->cm_is_degamma_srgb = true;
- stream->out_transfer_func.type = TF_TYPE_DISTRIBUTED_POINTS;
- stream->out_transfer_func.tf = TRANSFER_FUNCTION_SRGB;
+ out_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
+ out_tf->tf = TRANSFER_FUNCTION_SRGB;
/*
* Note: although we pass has_rom as parameter here, we never
* actually use ROM because the color module only takes the ROM
@@ -963,16 +1254,12 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc)
*
* See more in mod_color_calculate_regamma_params()
*/
- r = __set_legacy_tf(&stream->out_transfer_func, regamma_lut,
+ r = __set_legacy_tf(out_tf, regamma_lut,
regamma_size, has_rom);
- if (r)
- return r;
} else {
regamma_size = has_regamma ? regamma_size : 0;
- r = amdgpu_dm_set_atomic_regamma(stream, regamma_lut,
+ r = amdgpu_dm_set_atomic_regamma(out_tf, regamma_lut,
regamma_size, has_rom, tf);
- if (r)
- return r;
}
/*
@@ -981,6 +1268,43 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc)
* have to place the CTM in the OCSC in that case.
*/
crtc->cm_has_degamma = has_degamma;
+ if (check_only)
+ kvfree(out_tf);
+
+ return r;
+}
+
+/**
+ * amdgpu_dm_update_crtc_color_mgmt: Maps DRM color management to DC stream.
+ * @crtc: amdgpu_dm crtc state
+ *
+ * With no plane level color management properties we're free to use any
+ * of the HW blocks as long as the CRTC CTM always comes before the
+ * CRTC RGM and after the CRTC DGM.
+ *
+ * - The CRTC RGM block will be placed in the RGM LUT block if it is non-linear.
+ * - The CRTC DGM block will be placed in the DGM LUT block if it is non-linear.
+ * - The CRTC CTM will be placed in the gamut remap block if it is non-linear.
+ *
+ * The RGM block is typically more fully featured and accurate across
+ * all ASICs - DCE can't support a custom non-linear CRTC DGM.
+ *
+ * For supporting both plane level color management and CRTC level color
+ * management at once we have to either restrict the usage of CRTC properties
+ * or blend adjustments together.
+ *
+ * Returns:
+ * 0 on success. Error code if setup fails.
+ */
+int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc)
+{
+ struct dc_stream_state *stream = crtc->stream;
+ struct drm_color_ctm *ctm = NULL;
+ int ret;
+
+ ret = amdgpu_dm_check_crtc_color_mgmt(crtc, false);
+ if (ret)
+ return ret;
/* Setup CRTC CTM. */
if (crtc->base.ctm) {
@@ -1138,6 +1462,360 @@ __set_dm_plane_degamma(struct drm_plane_state *plane_state,
}
static int
+__set_colorop_in_tf_1d_curve(struct dc_plane_state *dc_plane_state,
+ struct drm_colorop_state *colorop_state)
+{
+ struct dc_transfer_func *tf = &dc_plane_state->in_transfer_func;
+ struct drm_colorop *colorop = colorop_state->colorop;
+ struct drm_device *drm = colorop->dev;
+
+ if (colorop->type != DRM_COLOROP_1D_CURVE)
+ return -EINVAL;
+
+ if (!(BIT(colorop_state->curve_1d_type) & amdgpu_dm_supported_degam_tfs))
+ return -EINVAL;
+
+ if (colorop_state->bypass) {
+ tf->type = TF_TYPE_BYPASS;
+ tf->tf = TRANSFER_FUNCTION_LINEAR;
+ return 0;
+ }
+
+ drm_dbg(drm, "Degamma colorop with ID: %d\n", colorop->base.id);
+
+ tf->type = TF_TYPE_PREDEFINED;
+ tf->tf = amdgpu_colorop_tf_to_dc_tf(colorop_state->curve_1d_type);
+
+ return 0;
+}
+
+static int
+__set_dm_plane_colorop_degamma(struct drm_plane_state *plane_state,
+ struct dc_plane_state *dc_plane_state,
+ struct drm_colorop *colorop)
+{
+ struct drm_colorop *old_colorop;
+ struct drm_colorop_state *colorop_state = NULL, *new_colorop_state;
+ struct drm_atomic_state *state = plane_state->state;
+ int i = 0;
+
+ old_colorop = colorop;
+
+ /* 1st op: 1d curve - degamma */
+ for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) {
+ if (new_colorop_state->colorop == old_colorop &&
+ (BIT(new_colorop_state->curve_1d_type) & amdgpu_dm_supported_degam_tfs)) {
+ colorop_state = new_colorop_state;
+ break;
+ }
+ }
+
+ if (!colorop_state)
+ return -EINVAL;
+
+ return __set_colorop_in_tf_1d_curve(dc_plane_state, colorop_state);
+}
+
+static int
+__set_dm_plane_colorop_3x4_matrix(struct drm_plane_state *plane_state,
+ struct dc_plane_state *dc_plane_state,
+ struct drm_colorop *colorop)
+{
+ struct drm_colorop *old_colorop;
+ struct drm_colorop_state *colorop_state = NULL, *new_colorop_state;
+ struct drm_atomic_state *state = plane_state->state;
+ const struct drm_device *dev = colorop->dev;
+ const struct drm_property_blob *blob;
+ struct drm_color_ctm_3x4 *ctm = NULL;
+ int i = 0;
+
+ /* 3x4 matrix */
+ old_colorop = colorop;
+ for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) {
+ if (new_colorop_state->colorop == old_colorop &&
+ new_colorop_state->colorop->type == DRM_COLOROP_CTM_3X4) {
+ colorop_state = new_colorop_state;
+ break;
+ }
+ }
+
+ if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_CTM_3X4) {
+ drm_dbg(dev, "3x4 matrix colorop with ID: %d\n", colorop->base.id);
+ blob = colorop_state->data;
+ if (blob->length == sizeof(struct drm_color_ctm_3x4)) {
+ ctm = (struct drm_color_ctm_3x4 *) blob->data;
+ __drm_ctm_3x4_to_dc_matrix(ctm, dc_plane_state->gamut_remap_matrix.matrix);
+ dc_plane_state->gamut_remap_matrix.enable_remap = true;
+ dc_plane_state->input_csc_color_matrix.enable_adjustment = false;
+ } else {
+ drm_warn(dev, "blob->length (%zu) isn't equal to drm_color_ctm_3x4 (%zu)\n",
+ blob->length, sizeof(struct drm_color_ctm_3x4));
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int
+__set_dm_plane_colorop_multiplier(struct drm_plane_state *plane_state,
+ struct dc_plane_state *dc_plane_state,
+ struct drm_colorop *colorop)
+{
+ struct drm_colorop *old_colorop;
+ struct drm_colorop_state *colorop_state = NULL, *new_colorop_state;
+ struct drm_atomic_state *state = plane_state->state;
+ const struct drm_device *dev = colorop->dev;
+ int i = 0;
+
+ /* Multiplier */
+ old_colorop = colorop;
+ for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) {
+ if (new_colorop_state->colorop == old_colorop &&
+ new_colorop_state->colorop->type == DRM_COLOROP_MULTIPLIER) {
+ colorop_state = new_colorop_state;
+ break;
+ }
+ }
+
+ if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_MULTIPLIER) {
+ drm_dbg(dev, "Multiplier colorop with ID: %d\n", colorop->base.id);
+ dc_plane_state->hdr_mult = amdgpu_dm_fixpt_from_s3132(colorop_state->multiplier);
+ }
+
+ return 0;
+}
+
+static int
+__set_dm_plane_colorop_shaper(struct drm_plane_state *plane_state,
+ struct dc_plane_state *dc_plane_state,
+ struct drm_colorop *colorop)
+{
+ struct drm_colorop *old_colorop;
+ struct drm_colorop_state *colorop_state = NULL, *new_colorop_state;
+ struct drm_atomic_state *state = plane_state->state;
+ enum dc_transfer_func_predefined default_tf = TRANSFER_FUNCTION_LINEAR;
+ struct dc_transfer_func *tf = &dc_plane_state->in_shaper_func;
+ const struct drm_color_lut32 *shaper_lut;
+ struct drm_device *dev = colorop->dev;
+ bool enabled = false;
+ u32 shaper_size;
+ int i = 0, ret = 0;
+
+ /* 1D Curve - SHAPER TF */
+ old_colorop = colorop;
+ for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) {
+ if (new_colorop_state->colorop == old_colorop &&
+ (BIT(new_colorop_state->curve_1d_type) & amdgpu_dm_supported_shaper_tfs)) {
+ colorop_state = new_colorop_state;
+ break;
+ }
+ }
+
+ if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_1D_CURVE) {
+ drm_dbg(dev, "Shaper TF colorop with ID: %d\n", colorop->base.id);
+ tf->type = TF_TYPE_DISTRIBUTED_POINTS;
+ tf->tf = default_tf = amdgpu_colorop_tf_to_dc_tf(colorop_state->curve_1d_type);
+ tf->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE;
+ ret = __set_output_tf(tf, 0, 0, false);
+ if (ret)
+ return ret;
+ enabled = true;
+ }
+
+ /* 1D LUT - SHAPER LUT */
+ colorop = old_colorop->next;
+ if (!colorop) {
+ drm_dbg(dev, "no Shaper LUT colorop found\n");
+ return -EINVAL;
+ }
+
+ old_colorop = colorop;
+ for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) {
+ if (new_colorop_state->colorop == old_colorop &&
+ new_colorop_state->colorop->type == DRM_COLOROP_1D_LUT) {
+ colorop_state = new_colorop_state;
+ break;
+ }
+ }
+
+ if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_1D_LUT) {
+ drm_dbg(dev, "Shaper LUT colorop with ID: %d\n", colorop->base.id);
+ tf->type = TF_TYPE_DISTRIBUTED_POINTS;
+ tf->tf = default_tf;
+ tf->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE;
+ shaper_lut = __extract_blob_lut32(colorop_state->data, &shaper_size);
+ shaper_size = shaper_lut != NULL ? shaper_size : 0;
+
+ /* Custom LUT size must be the same as supported size */
+ if (shaper_size == colorop->size) {
+ ret = __set_output_tf_32(tf, shaper_lut, shaper_size, false);
+ if (ret)
+ return ret;
+ enabled = true;
+ }
+ }
+
+ if (!enabled)
+ tf->type = TF_TYPE_BYPASS;
+
+ return 0;
+}
+
+/* __set_colorop_3dlut - set DRM 3D LUT to DC stream
+ * @drm_lut3d: user 3D LUT
+ * @drm_lut3d_size: size of 3D LUT
+ * @lut3d: DC 3D LUT
+ *
+ * Map user 3D LUT data to DC 3D LUT and all necessary bits to program it
+ * on DCN accordingly.
+ *
+ * Returns:
+ * 0 on success. -EINVAL if drm_lut3d_size is zero.
+ */
+static int __set_colorop_3dlut(const struct drm_color_lut32 *drm_lut3d,
+ uint32_t drm_lut3d_size,
+ struct dc_3dlut *lut)
+{
+ if (!drm_lut3d_size) {
+ lut->state.bits.initialized = 0;
+ return -EINVAL;
+ }
+
+ /* Only supports 17x17x17 3D LUT (12-bit) now */
+ lut->lut_3d.use_12bits = true;
+ lut->lut_3d.use_tetrahedral_9 = false;
+
+ lut->state.bits.initialized = 1;
+ __drm_3dlut32_to_dc_3dlut(drm_lut3d, drm_lut3d_size, &lut->lut_3d,
+ lut->lut_3d.use_tetrahedral_9, 12);
+
+ return 0;
+}
+
+static int
+__set_dm_plane_colorop_3dlut(struct drm_plane_state *plane_state,
+ struct dc_plane_state *dc_plane_state,
+ struct drm_colorop *colorop)
+{
+ struct drm_colorop *old_colorop;
+ struct drm_colorop_state *colorop_state = NULL, *new_colorop_state;
+ struct dc_transfer_func *tf = &dc_plane_state->in_shaper_func;
+ struct drm_atomic_state *state = plane_state->state;
+ const struct amdgpu_device *adev = drm_to_adev(colorop->dev);
+ const struct drm_device *dev = colorop->dev;
+ const struct drm_color_lut32 *lut3d;
+ uint32_t lut3d_size;
+ int i = 0, ret = 0;
+
+ /* 3D LUT */
+ old_colorop = colorop;
+ for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) {
+ if (new_colorop_state->colorop == old_colorop &&
+ new_colorop_state->colorop->type == DRM_COLOROP_3D_LUT) {
+ colorop_state = new_colorop_state;
+ break;
+ }
+ }
+
+ if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_3D_LUT) {
+ if (!adev->dm.dc->caps.color.dpp.hw_3d_lut) {
+ drm_dbg(dev, "3D LUT is not supported by hardware\n");
+ return -EINVAL;
+ }
+
+ drm_dbg(dev, "3D LUT colorop with ID: %d\n", colorop->base.id);
+ lut3d = __extract_blob_lut32(colorop_state->data, &lut3d_size);
+ lut3d_size = lut3d != NULL ? lut3d_size : 0;
+ ret = __set_colorop_3dlut(lut3d, lut3d_size, &dc_plane_state->lut3d_func);
+ if (ret) {
+ drm_dbg(dev, "3D LUT colorop with ID: %d has LUT size = %d\n",
+ colorop->base.id, lut3d_size);
+ return ret;
+ }
+
+ /* 3D LUT requires shaper. If shaper colorop is bypassed, enable shaper curve
+ * with TRANSFER_FUNCTION_LINEAR
+ */
+ if (tf->type == TF_TYPE_BYPASS) {
+ tf->type = TF_TYPE_DISTRIBUTED_POINTS;
+ tf->tf = TRANSFER_FUNCTION_LINEAR;
+ tf->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE;
+ ret = __set_output_tf_32(tf, NULL, 0, false);
+ }
+ }
+
+ return ret;
+}
+
+static int
+__set_dm_plane_colorop_blend(struct drm_plane_state *plane_state,
+ struct dc_plane_state *dc_plane_state,
+ struct drm_colorop *colorop)
+{
+ struct drm_colorop *old_colorop;
+ struct drm_colorop_state *colorop_state = NULL, *new_colorop_state;
+ struct drm_atomic_state *state = plane_state->state;
+ enum dc_transfer_func_predefined default_tf = TRANSFER_FUNCTION_LINEAR;
+ struct dc_transfer_func *tf = &dc_plane_state->blend_tf;
+ const struct drm_color_lut32 *blend_lut = NULL;
+ struct drm_device *dev = colorop->dev;
+ uint32_t blend_size = 0;
+ int i = 0;
+
+ /* 1D Curve - BLND TF */
+ old_colorop = colorop;
+ for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) {
+ if (new_colorop_state->colorop == old_colorop &&
+ (BIT(new_colorop_state->curve_1d_type) & amdgpu_dm_supported_blnd_tfs)) {
+ colorop_state = new_colorop_state;
+ break;
+ }
+ }
+
+ if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_1D_CURVE &&
+ (BIT(colorop_state->curve_1d_type) & amdgpu_dm_supported_blnd_tfs)) {
+ drm_dbg(dev, "Blend TF colorop with ID: %d\n", colorop->base.id);
+ tf->type = TF_TYPE_DISTRIBUTED_POINTS;
+ tf->tf = default_tf = amdgpu_colorop_tf_to_dc_tf(colorop_state->curve_1d_type);
+ tf->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE;
+ __set_input_tf_32(NULL, tf, blend_lut, blend_size);
+ }
+
+ /* 1D Curve - BLND LUT */
+ colorop = old_colorop->next;
+ if (!colorop) {
+ drm_dbg(dev, "no Blend LUT colorop found\n");
+ return -EINVAL;
+ }
+
+ old_colorop = colorop;
+ for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) {
+ if (new_colorop_state->colorop == old_colorop &&
+ new_colorop_state->colorop->type == DRM_COLOROP_1D_LUT) {
+ colorop_state = new_colorop_state;
+ break;
+ }
+ }
+
+ if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_1D_LUT &&
+ (BIT(colorop_state->curve_1d_type) & amdgpu_dm_supported_blnd_tfs)) {
+ drm_dbg(dev, "Blend LUT colorop with ID: %d\n", colorop->base.id);
+ tf->type = TF_TYPE_DISTRIBUTED_POINTS;
+ tf->tf = default_tf;
+ tf->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE;
+ blend_lut = __extract_blob_lut32(colorop_state->data, &blend_size);
+ blend_size = blend_lut != NULL ? blend_size : 0;
+
+ /* Custom LUT size must be the same as supported size */
+ if (blend_size == colorop->size)
+ __set_input_tf_32(NULL, tf, blend_lut, blend_size);
+ }
+
+ return 0;
+}
+
+static int
amdgpu_dm_plane_set_color_properties(struct drm_plane_state *plane_state,
struct dc_plane_state *dc_plane_state)
{
@@ -1187,6 +1865,93 @@ amdgpu_dm_plane_set_color_properties(struct drm_plane_state *plane_state,
return 0;
}
+static int
+amdgpu_dm_plane_set_colorop_properties(struct drm_plane_state *plane_state,
+ struct dc_plane_state *dc_plane_state)
+{
+ struct drm_colorop *colorop = plane_state->color_pipeline;
+ struct drm_device *dev = plane_state->plane->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ int ret;
+
+ /* 1D Curve - DEGAM TF */
+ if (!colorop)
+ return -EINVAL;
+
+ ret = __set_dm_plane_colorop_degamma(plane_state, dc_plane_state, colorop);
+ if (ret)
+ return ret;
+
+ /* Multiplier */
+ colorop = colorop->next;
+ if (!colorop) {
+ drm_dbg(dev, "no multiplier colorop found\n");
+ return -EINVAL;
+ }
+
+ ret = __set_dm_plane_colorop_multiplier(plane_state, dc_plane_state, colorop);
+ if (ret)
+ return ret;
+
+ /* 3x4 matrix */
+ colorop = colorop->next;
+ if (!colorop) {
+ drm_dbg(dev, "no 3x4 matrix colorop found\n");
+ return -EINVAL;
+ }
+
+ ret = __set_dm_plane_colorop_3x4_matrix(plane_state, dc_plane_state, colorop);
+ if (ret)
+ return ret;
+
+ if (adev->dm.dc->caps.color.dpp.hw_3d_lut) {
+ /* 1D Curve & LUT - SHAPER TF & LUT */
+ colorop = colorop->next;
+ if (!colorop) {
+ drm_dbg(dev, "no Shaper TF colorop found\n");
+ return -EINVAL;
+ }
+
+ ret = __set_dm_plane_colorop_shaper(plane_state, dc_plane_state, colorop);
+ if (ret)
+ return ret;
+
+ /* Shaper LUT colorop is already handled, just skip here */
+ colorop = colorop->next;
+ if (!colorop)
+ return -EINVAL;
+
+ /* 3D LUT */
+ colorop = colorop->next;
+ if (!colorop) {
+ drm_dbg(dev, "no 3D LUT colorop found\n");
+ return -EINVAL;
+ }
+
+ ret = __set_dm_plane_colorop_3dlut(plane_state, dc_plane_state, colorop);
+ if (ret)
+ return ret;
+ }
+
+ /* 1D Curve & LUT - BLND TF & LUT */
+ colorop = colorop->next;
+ if (!colorop) {
+ drm_dbg(dev, "no Blend TF colorop found\n");
+ return -EINVAL;
+ }
+
+ ret = __set_dm_plane_colorop_blend(plane_state, dc_plane_state, colorop);
+ if (ret)
+ return ret;
+
+ /* BLND LUT colorop is already handled, just skip here */
+ colorop = colorop->next;
+ if (!colorop)
+ return -EINVAL;
+
+ return 0;
+}
+
/**
* amdgpu_dm_update_plane_color_mgmt: Maps DRM color management to DC plane.
* @crtc: amdgpu_dm crtc state
@@ -1283,5 +2048,8 @@ int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc,
dc_plane_state->input_csc_color_matrix.enable_adjustment = false;
}
+ if (!amdgpu_dm_plane_set_colorop_properties(plane_state, dc_plane_state))
+ return 0;
+
return amdgpu_dm_plane_set_color_properties(plane_state, dc_plane_state);
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.c
new file mode 100644
index 000000000000..d585618b8064
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.c
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include <drm/drm_print.h>
+#include <drm/drm_plane.h>
+#include <drm/drm_property.h>
+#include <drm/drm_colorop.h>
+
+#include "amdgpu.h"
+#include "amdgpu_dm_colorop.h"
+#include "dc.h"
+
+const u64 amdgpu_dm_supported_degam_tfs =
+ BIT(DRM_COLOROP_1D_CURVE_SRGB_EOTF) |
+ BIT(DRM_COLOROP_1D_CURVE_PQ_125_EOTF) |
+ BIT(DRM_COLOROP_1D_CURVE_BT2020_INV_OETF) |
+ BIT(DRM_COLOROP_1D_CURVE_GAMMA22_INV);
+
+const u64 amdgpu_dm_supported_shaper_tfs =
+ BIT(DRM_COLOROP_1D_CURVE_SRGB_INV_EOTF) |
+ BIT(DRM_COLOROP_1D_CURVE_PQ_125_INV_EOTF) |
+ BIT(DRM_COLOROP_1D_CURVE_BT2020_OETF) |
+ BIT(DRM_COLOROP_1D_CURVE_GAMMA22);
+
+const u64 amdgpu_dm_supported_blnd_tfs =
+ BIT(DRM_COLOROP_1D_CURVE_SRGB_EOTF) |
+ BIT(DRM_COLOROP_1D_CURVE_PQ_125_EOTF) |
+ BIT(DRM_COLOROP_1D_CURVE_BT2020_INV_OETF) |
+ BIT(DRM_COLOROP_1D_CURVE_GAMMA22_INV);
+
+#define MAX_COLOR_PIPELINE_OPS 10
+
+#define LUT3D_SIZE 17
+
+int amdgpu_dm_initialize_default_pipeline(struct drm_plane *plane, struct drm_prop_enum_list *list)
+{
+ struct drm_colorop *ops[MAX_COLOR_PIPELINE_OPS];
+ struct drm_device *dev = plane->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ int ret;
+ int i = 0;
+
+ memset(ops, 0, sizeof(ops));
+
+ /* 1D curve - DEGAM TF */
+ ops[i] = kzalloc(sizeof(*ops[0]), GFP_KERNEL);
+ if (!ops[i]) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ ret = drm_plane_colorop_curve_1d_init(dev, ops[i], plane,
+ amdgpu_dm_supported_degam_tfs,
+ DRM_COLOROP_FLAG_ALLOW_BYPASS);
+ if (ret)
+ goto cleanup;
+
+ list->type = ops[i]->base.id;
+ list->name = kasprintf(GFP_KERNEL, "Color Pipeline %d", ops[i]->base.id);
+
+ i++;
+
+ /* Multiplier */
+ ops[i] = kzalloc(sizeof(struct drm_colorop), GFP_KERNEL);
+ if (!ops[i]) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ ret = drm_plane_colorop_mult_init(dev, ops[i], plane, DRM_COLOROP_FLAG_ALLOW_BYPASS);
+ if (ret)
+ goto cleanup;
+
+ drm_colorop_set_next_property(ops[i-1], ops[i]);
+
+ i++;
+
+ /* 3x4 matrix */
+ ops[i] = kzalloc(sizeof(struct drm_colorop), GFP_KERNEL);
+ if (!ops[i]) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ ret = drm_plane_colorop_ctm_3x4_init(dev, ops[i], plane, DRM_COLOROP_FLAG_ALLOW_BYPASS);
+ if (ret)
+ goto cleanup;
+
+ drm_colorop_set_next_property(ops[i-1], ops[i]);
+
+ i++;
+
+ if (adev->dm.dc->caps.color.dpp.hw_3d_lut) {
+ /* 1D curve - SHAPER TF */
+ ops[i] = kzalloc(sizeof(*ops[0]), GFP_KERNEL);
+ if (!ops[i]) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ ret = drm_plane_colorop_curve_1d_init(dev, ops[i], plane,
+ amdgpu_dm_supported_shaper_tfs,
+ DRM_COLOROP_FLAG_ALLOW_BYPASS);
+ if (ret)
+ goto cleanup;
+
+ drm_colorop_set_next_property(ops[i-1], ops[i]);
+
+ i++;
+
+ /* 1D LUT - SHAPER LUT */
+ ops[i] = kzalloc(sizeof(*ops[0]), GFP_KERNEL);
+ if (!ops[i]) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ ret = drm_plane_colorop_curve_1d_lut_init(dev, ops[i], plane, MAX_COLOR_LUT_ENTRIES,
+ DRM_COLOROP_LUT1D_INTERPOLATION_LINEAR,
+ DRM_COLOROP_FLAG_ALLOW_BYPASS);
+ if (ret)
+ goto cleanup;
+
+ drm_colorop_set_next_property(ops[i-1], ops[i]);
+
+ i++;
+
+ /* 3D LUT */
+ ops[i] = kzalloc(sizeof(*ops[0]), GFP_KERNEL);
+ if (!ops[i]) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ ret = drm_plane_colorop_3dlut_init(dev, ops[i], plane, LUT3D_SIZE,
+ DRM_COLOROP_LUT3D_INTERPOLATION_TETRAHEDRAL,
+ DRM_COLOROP_FLAG_ALLOW_BYPASS);
+ if (ret)
+ goto cleanup;
+
+ drm_colorop_set_next_property(ops[i-1], ops[i]);
+
+ i++;
+ }
+
+ /* 1D curve - BLND TF */
+ ops[i] = kzalloc(sizeof(*ops[0]), GFP_KERNEL);
+ if (!ops[i]) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ ret = drm_plane_colorop_curve_1d_init(dev, ops[i], plane,
+ amdgpu_dm_supported_blnd_tfs,
+ DRM_COLOROP_FLAG_ALLOW_BYPASS);
+ if (ret)
+ goto cleanup;
+
+ drm_colorop_set_next_property(ops[i - 1], ops[i]);
+
+ i++;
+
+ /* 1D LUT - BLND LUT */
+ ops[i] = kzalloc(sizeof(struct drm_colorop), GFP_KERNEL);
+ if (!ops[i]) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ ret = drm_plane_colorop_curve_1d_lut_init(dev, ops[i], plane, MAX_COLOR_LUT_ENTRIES,
+ DRM_COLOROP_LUT1D_INTERPOLATION_LINEAR,
+ DRM_COLOROP_FLAG_ALLOW_BYPASS);
+ if (ret)
+ goto cleanup;
+
+ drm_colorop_set_next_property(ops[i-1], ops[i]);
+ return 0;
+
+cleanup:
+ if (ret == -ENOMEM)
+ drm_err(plane->dev, "KMS: Failed to allocate colorop\n");
+
+ drm_colorop_pipeline_destroy(dev);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.h
new file mode 100644
index 000000000000..2e1617ffc8ee
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __AMDGPU_DM_COLOROP_H__
+#define __AMDGPU_DM_COLOROP_H__
+
+extern const u64 amdgpu_dm_supported_degam_tfs;
+extern const u64 amdgpu_dm_supported_shaper_tfs;
+extern const u64 amdgpu_dm_supported_blnd_tfs;
+
+int amdgpu_dm_initialize_default_pipeline(struct drm_plane *plane, struct drm_prop_enum_list *list);
+
+#endif /* __AMDGPU_DM_COLOROP_H__*/
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
index 033bd817d871..e20aa7438066 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
index 3da056c8d20b..95bdb8699d7f 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2019 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
index 87058271b00c..697e232acebf 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
@@ -218,8 +218,10 @@ static void amdgpu_dm_idle_worker(struct work_struct *work)
break;
}
- if (idle_work->enable)
+ if (idle_work->enable) {
+ dc_post_update_surfaces_to_stream(idle_work->dm->dc);
dc_allow_idle_optimizations(idle_work->dm->dc, true);
+ }
mutex_unlock(&idle_work->dm->dc_lock);
}
idle_work->dm->idle_workqueue->running = false;
@@ -246,6 +248,8 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
struct vblank_control_work *vblank_work =
container_of(work, struct vblank_control_work, work);
struct amdgpu_display_manager *dm = vblank_work->dm;
+ struct amdgpu_device *adev = drm_to_adev(dm->ddev);
+ int r;
mutex_lock(&dm->dc_lock);
@@ -273,9 +277,20 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
vblank_work->acrtc->dm_irq_params.allow_sr_entry);
}
- if (dm->active_vblank_irq_count == 0)
+ if (dm->active_vblank_irq_count == 0) {
+ dc_post_update_surfaces_to_stream(dm->dc);
+
+ r = amdgpu_dpm_pause_power_profile(adev, true);
+ if (r)
+ dev_warn(adev->dev, "failed to set default power profile mode\n");
+
dc_allow_idle_optimizations(dm->dc, true);
+ r = amdgpu_dpm_pause_power_profile(adev, false);
+ if (r)
+ dev_warn(adev->dev, "failed to restore the power profile mode\n");
+ }
+
mutex_unlock(&dm->dc_lock);
dc_stream_release(vblank_work->stream);
@@ -293,18 +308,45 @@ static inline int amdgpu_dm_crtc_set_vblank(struct drm_crtc *crtc, bool enable)
int irq_type;
int rc = 0;
- if (acrtc->otg_inst == -1)
- goto skip;
+ if (enable && !acrtc->base.enabled) {
+ drm_dbg_vbl(crtc->dev,
+ "Reject vblank enable on unconfigured CRTC %d (enabled=%d)\n",
+ acrtc->crtc_id, acrtc->base.enabled);
+ return -EINVAL;
+ }
irq_type = amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
if (enable) {
- /* vblank irq on -> Only need vupdate irq in vrr mode */
- if (amdgpu_dm_crtc_vrr_active(acrtc_state))
- rc = amdgpu_dm_crtc_set_vupdate_irq(crtc, true);
- } else {
- /* vblank irq off -> vupdate irq off */
- rc = amdgpu_dm_crtc_set_vupdate_irq(crtc, false);
+ struct dc *dc = adev->dm.dc;
+ struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
+ struct psr_settings *psr = &acrtc_state->stream->link->psr_settings;
+ struct replay_settings *pr = &acrtc_state->stream->link->replay_settings;
+ bool sr_supported = (psr->psr_version != DC_PSR_VERSION_UNSUPPORTED) ||
+ pr->config.replay_supported;
+
+ /*
+ * IPS & self-refresh feature can cause vblank counter resets between
+ * vblank disable and enable.
+ * It may cause system stuck due to waiting for the vblank counter.
+ * Call this function to estimate missed vblanks by using timestamps and
+ * update the vblank counter in DRM.
+ */
+ if (dc->caps.ips_support &&
+ dc->config.disable_ips != DMUB_IPS_DISABLE_ALL &&
+ sr_supported && vblank->config.disable_immediate)
+ drm_crtc_vblank_restore(crtc);
+ }
+
+ if (dc_supports_vrr(dm->dc->ctx->dce_version)) {
+ if (enable) {
+ /* vblank irq on -> Only need vupdate irq in vrr mode */
+ if (amdgpu_dm_crtc_vrr_active(acrtc_state))
+ rc = amdgpu_dm_crtc_set_vupdate_irq(crtc, true);
+ } else {
+ /* vblank irq off -> vupdate irq off */
+ rc = amdgpu_dm_crtc_set_vupdate_irq(crtc, false);
+ }
}
if (rc)
@@ -356,7 +398,7 @@ static inline int amdgpu_dm_crtc_set_vblank(struct drm_crtc *crtc, bool enable)
return rc;
}
#endif
-skip:
+
if (amdgpu_in_reset(adev))
return 0;
@@ -661,6 +703,15 @@ static int amdgpu_dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
return -EINVAL;
}
+ if (!state->legacy_cursor_update && amdgpu_dm_crtc_vrr_active(dm_crtc_state)) {
+ struct drm_plane_state *primary_state;
+
+ /* Pull in primary plane for correct VRR handling */
+ primary_state = drm_atomic_get_plane_state(state, crtc->primary);
+ if (IS_ERR(primary_state))
+ return PTR_ERR(primary_state);
+ }
+
/* In some use cases, like reset, no stream is attached */
if (!dm_crtc_state->stream)
return 0;
@@ -685,7 +736,7 @@ int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
{
struct amdgpu_crtc *acrtc = NULL;
struct drm_plane *cursor_plane;
- bool is_dcn;
+ bool has_degamma;
int res = -ENOMEM;
cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
@@ -724,11 +775,18 @@ int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
dm->adev->mode_info.crtcs[crtc_index] = acrtc;
- /* Don't enable DRM CRTC degamma property for DCE since it doesn't
- * support programmable degamma anywhere.
+ /* Don't enable DRM CRTC degamma property for
+ * 1. Degamma is replaced by color pipeline.
+ * 2. DCE since it doesn't support programmable degamma anywhere.
+ * 3. DCN401 since pre-blending degamma LUT doesn't apply to cursor.
*/
- is_dcn = dm->adev->dm.dc->caps.color.dpp.dcn_arch;
- drm_crtc_enable_color_mgmt(&acrtc->base, is_dcn ? MAX_COLOR_LUT_ENTRIES : 0,
+ if (plane->color_pipeline_property)
+ has_degamma = false;
+ else
+ has_degamma = dm->adev->dm.dc->caps.color.dpp.dcn_arch &&
+ dm->adev->dm.dc->ctx->dce_version != DCN_VERSION_4_01;
+
+ drm_crtc_enable_color_mgmt(&acrtc->base, has_degamma ? MAX_COLOR_LUT_ENTRIES : 0,
true, MAX_COLOR_LUT_ENTRIES);
drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index c7d13e743e6c..a9839485f2a2 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
@@ -758,6 +759,7 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us
int max_param_num = 11;
enum dp_test_pattern test_pattern = DP_TEST_PATTERN_UNSUPPORTED;
bool disable_hpd = false;
+ bool supports_hpd = link->irq_source_hpd != DC_IRQ_SOURCE_INVALID;
bool valid_test_pattern = false;
uint8_t param_nums = 0;
/* init with default 80bit custom pattern */
@@ -849,7 +851,7 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us
* because it might have been disabled after a test pattern was set.
* AUX depends on HPD * sequence dependent, do not move!
*/
- if (!disable_hpd)
+ if (supports_hpd && !disable_hpd)
dc_link_enable_hpd(link);
prefer_link_settings.lane_count = link->verified_link_cap.lane_count;
@@ -887,7 +889,7 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us
* Need disable interrupt to avoid SW driver disable DP output. This is
* done after the test pattern is set.
*/
- if (valid_test_pattern && disable_hpd)
+ if (valid_test_pattern && supports_hpd && disable_hpd)
dc_link_disable_hpd(link);
kfree(wr_buf);
@@ -1301,7 +1303,8 @@ static int odm_combine_segments_show(struct seq_file *m, void *unused)
if (connector->status != connector_status_connected)
return -ENODEV;
- if (pipe_ctx != NULL && pipe_ctx->stream_res.tg->funcs->get_odm_combine_segments)
+ if (pipe_ctx && pipe_ctx->stream_res.tg &&
+ pipe_ctx->stream_res.tg->funcs->get_odm_combine_segments)
pipe_ctx->stream_res.tg->funcs->get_odm_combine_segments(pipe_ctx->stream_res.tg, &segments);
seq_printf(m, "%d\n", segments);
@@ -3106,6 +3109,35 @@ static int replay_get_state(void *data, u64 *val)
}
/*
+ * Start / Stop capture Replay residency
+ */
+static int replay_set_residency(void *data, u64 val)
+{
+ struct amdgpu_dm_connector *connector = data;
+ struct dc_link *link = connector->dc_link;
+ bool is_start = (val != 0);
+ u32 residency = 0;
+
+ link->dc->link_srv->edp_replay_residency(link, &residency, is_start, PR_RESIDENCY_MODE_PHY);
+ return 0;
+}
+
+/*
+ * Read Replay residency
+ */
+static int replay_get_residency(void *data, u64 *val)
+{
+ struct amdgpu_dm_connector *connector = data;
+ struct dc_link *link = connector->dc_link;
+ u32 residency = 0;
+
+ link->dc->link_srv->edp_replay_residency(link, &residency, false, PR_RESIDENCY_MODE_PHY);
+ *val = (u64)residency;
+
+ return 0;
+}
+
+/*
* Read PSR state
*/
static int psr_get(void *data, u64 *val)
@@ -3324,7 +3356,8 @@ DEFINE_DEBUGFS_ATTRIBUTE(dmcub_trace_event_state_fops, dmcub_trace_event_state_g
dmcub_trace_event_state_set, "%llu\n");
DEFINE_DEBUGFS_ATTRIBUTE(replay_state_fops, replay_get_state, NULL, "%llu\n");
-
+DEFINE_DEBUGFS_ATTRIBUTE(replay_residency_fops, replay_get_residency, replay_set_residency,
+ "%llu\n");
DEFINE_DEBUGFS_ATTRIBUTE(psr_fops, psr_get, NULL, "%llu\n");
DEFINE_DEBUGFS_ATTRIBUTE(psr_residency_fops, psr_read_residency, NULL,
"%llu\n");
@@ -3502,6 +3535,8 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector)
debugfs_create_file("replay_capability", 0444, dir, connector,
&replay_capability_fops);
debugfs_create_file("replay_state", 0444, dir, connector, &replay_state_fops);
+ debugfs_create_file_unsafe("replay_residency", 0444, dir,
+ connector, &replay_residency_fops);
debugfs_create_file_unsafe("psr_capability", 0444, dir, connector, &psr_capability_fops);
debugfs_create_file_unsafe("psr_state", 0444, dir, connector, &psr_fops);
debugfs_create_file_unsafe("psr_residency", 0444, dir,
@@ -3988,7 +4023,7 @@ static int capabilities_show(struct seq_file *m, void *unused)
struct hubbub *hubbub = dc->res_pool->hubbub;
- if (hubbub->funcs->get_mall_en)
+ if (hubbub && hubbub->funcs->get_mall_en)
hubbub->funcs->get_mall_en(hubbub, &mall_in_use);
if (dc->cap_funcs.get_subvp_en)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h
index 071200473c27..122cdc124b3b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
index c16962256514..85ce558cefc5 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2019 Advanced Micro Devices, Inc.
*
@@ -200,6 +201,7 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
struct mod_hdcp_link_adjustment link_adjust;
struct mod_hdcp_display_adjustment display_adjust;
unsigned int conn_index = aconnector->base.index;
+ const struct dc *dc = aconnector->dc_link->dc;
guard(mutex)(&hdcp_w->mutex);
drm_connector_get(&aconnector->base);
@@ -222,6 +224,7 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
display_adjust.disable = MOD_HDCP_DISPLAY_NOT_DISABLE;
link_adjust.auth_delay = 2;
+ link_adjust.retry_limit = MAX_NUM_OF_ATTEMPTS;
if (content_type == DRM_MODE_HDCP_CONTENT_TYPE0) {
link_adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0;
@@ -229,6 +232,9 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
link_adjust.hdcp1.disable = 1;
link_adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_1;
}
+ link_adjust.hdcp2.use_fw_locality_check =
+ (dc->caps.fused_io_supported || dc->debug.hdcp_lc_force_fw_enable);
+ link_adjust.hdcp2.use_sw_locality_fallback = dc->debug.hdcp_lc_enable_sw_fallback;
schedule_delayed_work(&hdcp_w->property_validate_dwork,
msecs_to_jiffies(DRM_HDCP_CHECK_PERIOD_MS));
@@ -532,6 +538,7 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
struct dc_sink *sink = NULL;
bool link_is_hdcp14 = false;
+ const struct dc *dc = aconnector->dc_link->dc;
if (config->dpms_off) {
hdcp_remove_display(hdcp_work, link_index, aconnector);
@@ -571,7 +578,10 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
link->dp.usb4_enabled = config->usb4_enabled;
display->adjust.disable = MOD_HDCP_DISPLAY_DISABLE_AUTHENTICATION;
link->adjust.auth_delay = 2;
+ link->adjust.retry_limit = MAX_NUM_OF_ATTEMPTS;
link->adjust.hdcp1.disable = 0;
+ link->adjust.hdcp2.use_fw_locality_check = (dc->caps.fused_io_supported || dc->debug.hdcp_lc_force_fw_enable);
+ link->adjust.hdcp2.use_sw_locality_fallback = dc->debug.hdcp_lc_enable_sw_fallback;
hdcp_w->encryption_status[display->index] = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
DRM_DEBUG_DRIVER("[HDCP_DM] display %d, CP %d, type %d\n", aconnector->base.index,
@@ -723,8 +733,8 @@ ret:
static const struct bin_attribute data_attr = {
.attr = {.name = "hdcp_srm", .mode = 0664},
.size = PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE, /* Limit SRM size */
- .write_new = srm_data_write,
- .read_new = srm_data_read,
+ .write = srm_data_write,
+ .read = srm_data_read,
};
struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev,
@@ -765,29 +775,26 @@ struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev,
struct mod_hdcp_ddc_funcs *ddc_funcs = &config->ddc.funcs;
config->psp.handle = &adev->psp;
- if (dc->ctx->dce_version == DCN_VERSION_3_1 ||
+ if (dc->ctx->dce_version == DCN_VERSION_3_1 ||
dc->ctx->dce_version == DCN_VERSION_3_14 ||
dc->ctx->dce_version == DCN_VERSION_3_15 ||
- dc->ctx->dce_version == DCN_VERSION_3_5 ||
+ dc->ctx->dce_version == DCN_VERSION_3_16 ||
+ dc->ctx->dce_version == DCN_VERSION_3_2 ||
+ dc->ctx->dce_version == DCN_VERSION_3_21 ||
+ dc->ctx->dce_version == DCN_VERSION_3_5 ||
dc->ctx->dce_version == DCN_VERSION_3_51 ||
- dc->ctx->dce_version == DCN_VERSION_3_6 ||
- dc->ctx->dce_version == DCN_VERSION_3_16)
+ dc->ctx->dce_version == DCN_VERSION_3_6 ||
+ dc->ctx->dce_version == DCN_VERSION_4_01)
config->psp.caps.dtm_v3_supported = 1;
+
config->ddc.handle = dc_get_link_at_index(dc, i);
ddc_funcs->write_i2c = lp_write_i2c;
ddc_funcs->read_i2c = lp_read_i2c;
ddc_funcs->write_dpcd = lp_write_dpcd;
ddc_funcs->read_dpcd = lp_read_dpcd;
-
- config->debug.lc_enable_sw_fallback = dc->debug.hdcp_lc_enable_sw_fallback;
- if (dc->caps.fused_io_supported || dc->debug.hdcp_lc_force_fw_enable) {
- ddc_funcs->atomic_write_poll_read_i2c = lp_atomic_write_poll_read_i2c;
- ddc_funcs->atomic_write_poll_read_aux = lp_atomic_write_poll_read_aux;
- } else {
- ddc_funcs->atomic_write_poll_read_i2c = NULL;
- ddc_funcs->atomic_write_poll_read_aux = NULL;
- }
+ ddc_funcs->atomic_write_poll_read_i2c = lp_atomic_write_poll_read_i2c;
+ ddc_funcs->atomic_write_poll_read_aux = lp_atomic_write_poll_read_aux;
memset(hdcp_work[i].aconnector, 0,
sizeof(struct amdgpu_dm_connector *) *
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
index 69b445b011c8..4faa344f196e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2019 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index d4395b92fb85..ac98c746c3de 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
@@ -82,6 +83,7 @@ static void apply_edid_quirks(struct drm_device *dev, struct edid *edid, struct
edid_caps->panel_patch.remove_sink_ext_caps = true;
break;
case drm_edid_encode_panel_id('S', 'D', 'C', 0x4154):
+ case drm_edid_encode_panel_id('S', 'D', 'C', 0x4171):
drm_dbg_driver(dev, "Disabling VSC on monitor with panel id %X\n", panel_id);
edid_caps->panel_patch.disable_colorimetry = true;
break;
@@ -129,6 +131,7 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
edid_caps->serial_number = edid_buf->serial;
edid_caps->manufacture_week = edid_buf->mfg_week;
edid_caps->manufacture_year = edid_buf->mfg_year;
+ edid_caps->analog = !(edid_buf->input & DRM_EDID_INPUT_DIGITAL);
drm_edid_get_monitor_name(edid_buf,
edid_caps->display_name,
@@ -995,8 +998,8 @@ enum dc_edid_status dm_helpers_read_local_edid(
struct amdgpu_dm_connector *aconnector = link->priv;
struct drm_connector *connector = &aconnector->base;
struct i2c_adapter *ddc;
- int retry = 3;
- enum dc_edid_status edid_status;
+ int retry = 25;
+ enum dc_edid_status edid_status = EDID_NO_RESPONSE;
const struct drm_edid *drm_edid;
const struct edid *edid;
@@ -1026,9 +1029,13 @@ enum dc_edid_status dm_helpers_read_local_edid(
}
if (!drm_edid)
- return EDID_NO_RESPONSE;
+ continue;
edid = drm_edid_raw(drm_edid); // FIXME: Get rid of drm_edid_raw()
+ if (!edid ||
+ edid->extensions >= sizeof(sink->dc_edid.raw_edid) / EDID_LENGTH)
+ return EDID_BAD_INPUT;
+
sink->dc_edid.length = EDID_LENGTH * (edid->extensions + 1);
memmove(sink->dc_edid.raw_edid, (uint8_t *)edid, sink->dc_edid.length);
@@ -1040,7 +1047,7 @@ enum dc_edid_status dm_helpers_read_local_edid(
&sink->dc_edid,
&sink->edid_caps);
- } while (edid_status == EDID_BAD_CHECKSUM && --retry > 0);
+ } while ((edid_status == EDID_BAD_CHECKSUM || edid_status == EDID_NO_RESPONSE) && --retry > 0);
if (edid_status != EDID_OK)
DRM_ERROR("EDID err: %d, on connector: %s",
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
index b61e210f6246..0a2a3f233a0e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
@@ -475,6 +476,7 @@ void amdgpu_dm_irq_fini(struct amdgpu_device *adev)
void amdgpu_dm_irq_suspend(struct amdgpu_device *adev)
{
+ struct drm_device *dev = adev_to_drm(adev);
int src;
struct list_head *hnd_list_h;
struct list_head *hnd_list_l;
@@ -511,6 +513,9 @@ void amdgpu_dm_irq_suspend(struct amdgpu_device *adev)
}
DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+
+ if (dev->mode_config.poll_enabled)
+ drm_kms_helper_poll_disable(dev);
}
void amdgpu_dm_irq_resume_early(struct amdgpu_device *adev)
@@ -536,6 +541,7 @@ void amdgpu_dm_irq_resume_early(struct amdgpu_device *adev)
void amdgpu_dm_irq_resume_late(struct amdgpu_device *adev)
{
+ struct drm_device *dev = adev_to_drm(adev);
int src;
struct list_head *hnd_list_h, *hnd_list_l;
unsigned long irq_table_flags;
@@ -556,6 +562,9 @@ void amdgpu_dm_irq_resume_late(struct amdgpu_device *adev)
}
DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+
+ if (dev->mode_config.poll_enabled)
+ drm_kms_helper_poll_enable(dev);
}
/*
@@ -892,6 +901,7 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
struct drm_connector_list_iter iter;
int irq_type;
int i;
+ bool use_polling = false;
/* First, clear all hpd and hpdrx interrupts */
for (i = DC_IRQ_SOURCE_HPD1; i <= DC_IRQ_SOURCE_HPD6RX; i++) {
@@ -905,6 +915,8 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
struct amdgpu_dm_connector *amdgpu_dm_connector;
const struct dc_link *dc_link;
+ use_polling |= connector->polled != DRM_CONNECTOR_POLL_HPD;
+
if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
continue;
@@ -946,6 +958,9 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
}
}
drm_connector_list_iter_end(&iter);
+
+ if (use_polling)
+ drm_kms_helper_poll_init(dev);
}
/**
@@ -996,4 +1011,7 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
}
}
drm_connector_list_iter_end(&iter);
+
+ if (dev->mode_config.poll_enabled)
+ drm_kms_helper_poll_fini(dev);
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h
index ba17c23b2706..4f6b58f4f90d 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h
index 6c9de834455b..3c9995275cbd 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2020 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 25e8befbcc47..dbd1da4d85d3 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2012-15 Advanced Micro Devices, Inc.
*
@@ -107,7 +108,7 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
if (payload.write && result >= 0) {
if (result) {
/*one byte indicating partially written bytes*/
- drm_dbg_dp(adev_to_drm(adev), "amdgpu: AUX partially written\n");
+ drm_dbg_dp(adev_to_drm(adev), "AUX partially written\n");
result = payload.data[0];
} else if (!payload.reply[0])
/*I2C_ACK|AUX_ACK*/
@@ -133,11 +134,11 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
break;
}
- drm_dbg_dp(adev_to_drm(adev), "amdgpu: DP AUX transfer fail:%d\n", operation_result);
+ drm_dbg_dp(adev_to_drm(adev), "DP AUX transfer fail:%d\n", operation_result);
}
if (payload.reply[0])
- drm_dbg_dp(adev_to_drm(adev), "amdgpu: AUX reply command not ACK: 0x%02x.",
+ drm_dbg_dp(adev_to_drm(adev), "AUX reply command not ACK: 0x%02x.",
payload.reply[0]);
return result;
@@ -329,6 +330,34 @@ static bool retrieve_downstream_port_device(struct amdgpu_dm_connector *aconnect
return true;
}
+static bool retrieve_branch_specific_data(struct amdgpu_dm_connector *aconnector)
+{
+ struct drm_connector *connector = &aconnector->base;
+ struct drm_dp_mst_port *port = aconnector->mst_output_port;
+ struct drm_dp_mst_port *port_parent;
+ struct drm_dp_aux *immediate_upstream_aux;
+ struct drm_dp_desc branch_desc;
+
+ if (!port->parent)
+ return false;
+
+ port_parent = port->parent->port_parent;
+
+ immediate_upstream_aux = port_parent ? &port_parent->aux : port->mgr->aux;
+
+ if (drm_dp_read_desc(immediate_upstream_aux, &branch_desc, true))
+ return false;
+
+ aconnector->branch_ieee_oui = (branch_desc.ident.oui[0] << 16) +
+ (branch_desc.ident.oui[1] << 8) +
+ (branch_desc.ident.oui[2]);
+
+ drm_dbg_dp(port->aux.drm_dev, "MST branch oui 0x%x detected at %s\n",
+ aconnector->branch_ieee_oui, connector->name);
+
+ return true;
+}
+
static int dm_dp_mst_get_modes(struct drm_connector *connector)
{
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
@@ -668,6 +697,9 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
drm_connector_set_path_property(connector, pathprop);
+ if (!retrieve_branch_specific_data(aconnector))
+ aconnector->branch_ieee_oui = 0;
+
/*
* Initialize connector state before adding the connectror to drm and
* framebuffer lists
@@ -809,6 +841,7 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
drm_dp_aux_init(&aconnector->dm_dp_aux.aux);
drm_dp_cec_register_connector(&aconnector->dm_dp_aux.aux,
&aconnector->base);
+ drm_dp_dpcd_set_probe(&aconnector->dm_dp_aux.aux, false);
if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
return;
@@ -821,13 +854,20 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
drm_connector_attach_dp_subconnector_property(&aconnector->base);
}
-int dm_mst_get_pbn_divider(struct dc_link *link)
+uint32_t dm_mst_get_pbn_divider(struct dc_link *link)
{
+ uint32_t pbn_div_x100;
+ uint64_t dividend, divisor;
+
if (!link)
return 0;
- return dc_link_bandwidth_kbps(link,
- dc_link_get_link_cap(link)) / (8 * 1000 * 54);
+ dividend = (uint64_t)dc_link_bandwidth_kbps(link, dc_link_get_link_cap(link)) * 100;
+ divisor = 8 * 1000 * 54;
+
+ pbn_div_x100 = div64_u64(dividend, divisor);
+
+ return dfixed_const(pbn_div_x100) / 100;
}
struct dsc_mst_fairness_params {
@@ -844,26 +884,28 @@ struct dsc_mst_fairness_params {
};
#if defined(CONFIG_DRM_AMD_DC_FP)
-static uint16_t get_fec_overhead_multiplier(struct dc_link *dc_link)
+static uint64_t kbps_to_pbn(int kbps, bool is_peak_pbn)
{
- u8 link_coding_cap;
- uint16_t fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B;
+ uint64_t effective_kbps = (uint64_t)kbps;
- link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(dc_link);
- if (link_coding_cap == DP_128b_132b_ENCODING)
- fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B;
+ if (is_peak_pbn) { // add 0.6% (1006/1000) overhead into effective kbps
+ effective_kbps *= 1006;
+ effective_kbps = div_u64(effective_kbps, 1000);
+ }
- return fec_overhead_multiplier_x1000;
+ return (uint64_t) DIV64_U64_ROUND_UP(effective_kbps * 64, (54 * 8 * 1000));
}
-static int kbps_to_peak_pbn(int kbps, uint16_t fec_overhead_multiplier_x1000)
+static uint32_t pbn_to_kbps(unsigned int pbn, bool with_margin)
{
- u64 peak_kbps = kbps;
+ uint64_t pbn_effective = (uint64_t)pbn;
+
+ if (with_margin) // deduct 0.6% (994/1000) overhead from effective pbn
+ pbn_effective *= (1000000 / PEAK_FACTOR_X1000);
+ else
+ pbn_effective *= 1000;
- peak_kbps *= 1006;
- peak_kbps *= fec_overhead_multiplier_x1000;
- peak_kbps = div_u64(peak_kbps, 1000 * 1000);
- return (int) DIV64_U64_ROUND_UP(peak_kbps * 64, (54 * 8 * 1000));
+ return DIV_U64_ROUND_UP(pbn_effective * 8 * 54, 64);
}
static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *params,
@@ -934,7 +976,7 @@ static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn)
dc_dsc_get_default_config_option(param.sink->ctx->dc, &dsc_options);
dsc_options.max_target_bpp_limit_override_x16 = drm_connector->display_info.max_dsc_bpp * 16;
- kbps = div_u64((u64)pbn * 994 * 8 * 54, 64);
+ kbps = pbn_to_kbps(pbn, false);
dc_dsc_compute_config(
param.sink->ctx->dc->res_pool->dscs[0],
&param.sink->dsc_caps.dsc_dec_caps,
@@ -963,12 +1005,11 @@ static int increase_dsc_bpp(struct drm_atomic_state *state,
int link_timeslots_used;
int fair_pbn_alloc;
int ret = 0;
- uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
for (i = 0; i < count; i++) {
if (vars[i + k].dsc_enabled) {
initial_slack[i] =
- kbps_to_peak_pbn(params[i].bw_range.max_kbps, fec_overhead_multiplier_x1000) - vars[i + k].pbn;
+ kbps_to_pbn(params[i].bw_range.max_kbps, false) - vars[i + k].pbn;
bpp_increased[i] = false;
remaining_to_increase += 1;
} else {
@@ -1064,7 +1105,6 @@ static int try_disable_dsc(struct drm_atomic_state *state,
int next_index;
int remaining_to_try = 0;
int ret;
- uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
int var_pbn;
for (i = 0; i < count; i++) {
@@ -1097,7 +1137,7 @@ static int try_disable_dsc(struct drm_atomic_state *state,
DRM_DEBUG_DRIVER("MST_DSC index #%d, try no compression\n", next_index);
var_pbn = vars[next_index].pbn;
- vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
+ vars[next_index].pbn = kbps_to_pbn(params[next_index].bw_range.stream_kbps, true);
ret = drm_dp_atomic_find_time_slots(state,
params[next_index].port->mgr,
params[next_index].port,
@@ -1157,7 +1197,6 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
int count = 0;
int i, k, ret;
bool debugfs_overwrite = false;
- uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
struct drm_connector_state *new_conn_state;
memset(params, 0, sizeof(params));
@@ -1238,7 +1277,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
DRM_DEBUG_DRIVER("MST_DSC Try no compression\n");
for (i = 0; i < count; i++) {
vars[i + k].aconnector = params[i].aconnector;
- vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
+ vars[i + k].pbn = kbps_to_pbn(params[i].bw_range.stream_kbps, false);
vars[i + k].dsc_enabled = false;
vars[i + k].bpp_x16 = 0;
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, params[i].port,
@@ -1260,7 +1299,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
DRM_DEBUG_DRIVER("MST_DSC Try max compression\n");
for (i = 0; i < count; i++) {
if (params[i].compression_possible && params[i].clock_force_enable != DSC_CLK_FORCE_DISABLE) {
- vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps, fec_overhead_multiplier_x1000);
+ vars[i + k].pbn = kbps_to_pbn(params[i].bw_range.min_kbps, false);
vars[i + k].dsc_enabled = true;
vars[i + k].bpp_x16 = params[i].bw_range.min_target_bpp_x16;
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
@@ -1268,7 +1307,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
if (ret < 0)
return ret;
} else {
- vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
+ vars[i + k].pbn = kbps_to_pbn(params[i].bw_range.stream_kbps, false);
vars[i + k].dsc_enabled = false;
vars[i + k].bpp_x16 = 0;
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
@@ -1723,18 +1762,6 @@ clean_exit:
return ret;
}
-static uint32_t kbps_from_pbn(unsigned int pbn)
-{
- uint64_t kbps = (uint64_t)pbn;
-
- kbps *= (1000000 / PEAK_FACTOR_X1000);
- kbps *= 8;
- kbps *= 54;
- kbps /= 64;
-
- return (uint32_t)kbps;
-}
-
static bool is_dsc_common_config_possible(struct dc_stream_state *stream,
struct dc_dsc_bw_range *bw_range)
{
@@ -1762,14 +1789,20 @@ static bool dp_get_link_current_set_bw(struct drm_dp_aux *aux, uint32_t *cur_lin
union lane_count_set lane_count;
u8 dp_link_encoding;
u8 link_bw_set = 0;
+ u8 data[16] = {0};
*cur_link_bw = 0;
- if (drm_dp_dpcd_read(aux, DP_MAIN_LINK_CHANNEL_CODING_SET, &dp_link_encoding, 1) != 1 ||
- drm_dp_dpcd_read(aux, DP_LANE_COUNT_SET, &lane_count.raw, 1) != 1 ||
- drm_dp_dpcd_read(aux, DP_LINK_BW_SET, &link_bw_set, 1) != 1)
+ if (drm_dp_dpcd_read(aux, DP_LINK_BW_SET, data, 16) != 16)
return false;
+ dp_link_encoding = data[DP_MAIN_LINK_CHANNEL_CODING_SET - DP_LINK_BW_SET];
+ link_bw_set = data[DP_LINK_BW_SET - DP_LINK_BW_SET];
+ lane_count.raw = data[DP_LANE_COUNT_SET - DP_LINK_BW_SET];
+
+ drm_dbg_dp(aux->drm_dev, "MST_DSC downlink setting: %d, 0x%x x %d\n",
+ dp_link_encoding, link_bw_set, lane_count.bits.LANE_COUNT_SET);
+
switch (dp_link_encoding) {
case DP_8b_10b_ENCODING:
link_rate = link_bw_set;
@@ -1827,7 +1860,7 @@ enum dc_status dm_dp_mst_is_port_support_mode(
dc_link_get_highest_encoding_format(stream->link));
cur_link_settings = stream->link->verified_link_cap;
root_link_bw_in_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, &cur_link_settings);
- virtual_channel_bw_in_kbps = kbps_from_pbn(aconnector->mst_output_port->full_pbn);
+ virtual_channel_bw_in_kbps = pbn_to_kbps(aconnector->mst_output_port->full_pbn, true);
/* pick the end to end bw bottleneck */
end_to_end_bw_in_kbps = min(root_link_bw_in_kbps, virtual_channel_bw_in_kbps);
@@ -1866,8 +1899,10 @@ enum dc_status dm_dp_mst_is_port_support_mode(
end_link_bw = aconnector->mst_local_bw;
}
- if (end_link_bw > 0 && stream_kbps > end_link_bw) {
- DRM_DEBUG_DRIVER("MST_DSC dsc decode at last link."
+ if (end_link_bw > 0 &&
+ stream_kbps > end_link_bw &&
+ aconnector->branch_ieee_oui != DP_BRANCH_DEVICE_ID_90CC24) {
+ DRM_DEBUG_DRIVER("MST_DSC dsc decode at last link. "
"Mode required bw can't fit into last link\n");
return DC_FAIL_BANDWIDTH_VALIDATE;
}
@@ -1878,7 +1913,7 @@ enum dc_status dm_dp_mst_is_port_support_mode(
immediate_upstream_port = aconnector->mst_output_port->parent->port_parent;
if (immediate_upstream_port) {
- virtual_channel_bw_in_kbps = kbps_from_pbn(immediate_upstream_port->full_pbn);
+ virtual_channel_bw_in_kbps = pbn_to_kbps(immediate_upstream_port->full_pbn, true);
virtual_channel_bw_in_kbps = min(root_link_bw_in_kbps, virtual_channel_bw_in_kbps);
} else {
/* For topology LCT 1 case - only one mstb*/
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
index 600d6e221011..6f7ea684b555 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2012-15 Advanced Micro Devices, Inc.
*
@@ -59,7 +60,7 @@ enum mst_msg_ready_type {
struct amdgpu_display_manager;
struct amdgpu_dm_connector;
-int dm_mst_get_pbn_divider(struct dc_link *link);
+uint32_t dm_mst_get_pbn_divider(struct dc_link *link);
void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
struct amdgpu_dm_connector *aconnector,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
index b7c6e8d13435..2e3ee78999d9 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
@@ -37,6 +37,7 @@
#include "amdgpu_display.h"
#include "amdgpu_dm_trace.h"
#include "amdgpu_dm_plane.h"
+#include "amdgpu_dm_colorop.h"
#include "gc/gc_11_0_0_offset.h"
#include "gc/gc_11_0_0_sh_mask.h"
@@ -92,9 +93,9 @@ enum dm_micro_swizzle {
MICRO_SWIZZLE_R = 3
};
-const struct drm_format_info *amdgpu_dm_plane_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
+const struct drm_format_info *amdgpu_dm_plane_get_format_info(u32 pixel_format, u64 modifier)
{
- return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
+ return amdgpu_lookup_format_info(pixel_format, modifier);
}
void amdgpu_dm_plane_fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
@@ -146,7 +147,7 @@ static void amdgpu_dm_plane_add_modifier(uint64_t **mods, uint64_t *size, uint64
if (*cap - *size < 1) {
uint64_t new_cap = *cap * 2;
- uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
+ uint64_t *new_mods = kmalloc_array(new_cap, sizeof(uint64_t), GFP_KERNEL);
if (!new_mods) {
kfree(*mods);
@@ -732,7 +733,7 @@ static int amdgpu_dm_plane_get_plane_modifiers(struct amdgpu_device *adev, unsig
if (adev->family < AMDGPU_FAMILY_AI)
return 0;
- *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
+ *mods = kmalloc_array(capacity, sizeof(uint64_t), GFP_KERNEL);
if (plane_type == DRM_PLANE_TYPE_CURSOR) {
amdgpu_dm_plane_add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
@@ -1633,7 +1634,7 @@ dm_atomic_plane_attach_color_mgmt_properties(struct amdgpu_display_manager *dm,
drm_object_attach_property(&plane->base,
dm->adev->mode_info.plane_ctm_property, 0);
- if (dpp_color_caps.hw_3d_lut) {
+ if (dpp_color_caps.hw_3d_lut || dm->dc->caps.color.mpc.preblend) {
drm_object_attach_property(&plane->base,
mode_info.plane_shaper_lut_property, 0);
drm_object_attach_property(&plane->base,
@@ -1782,6 +1783,39 @@ dm_atomic_plane_get_property(struct drm_plane *plane,
return 0;
}
+#else
+
+#define MAX_COLOR_PIPELINES 5
+
+static int
+dm_plane_init_colorops(struct drm_plane *plane)
+{
+ struct drm_prop_enum_list pipelines[MAX_COLOR_PIPELINES];
+ struct drm_device *dev = plane->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct dc *dc = adev->dm.dc;
+ int len = 0;
+ int ret;
+
+ if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ return 0;
+
+ /* initialize pipeline */
+ if (dc->ctx->dce_version >= DCN_VERSION_3_0) {
+ ret = amdgpu_dm_initialize_default_pipeline(plane, &pipelines[len]);
+ if (ret) {
+ drm_err(plane->dev, "Failed to create color pipeline for plane %d: %d\n",
+ plane->base.id, ret);
+ return ret;
+ }
+ len++;
+
+ /* Create COLOR_PIPELINE property and attach */
+ drm_plane_create_color_pipeline_property(plane, pipelines, len);
+ }
+
+ return 0;
+}
#endif
static const struct drm_plane_funcs dm_plane_funcs = {
@@ -1890,7 +1924,12 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
#ifdef AMD_PRIVATE_COLOR
dm_atomic_plane_attach_color_mgmt_properties(dm, plane);
+#else
+ res = dm_plane_init_colorops(plane);
+ if (res)
+ return res;
#endif
+
/* Create (reset) the plane state */
if (plane->funcs->reset)
plane->funcs->reset(plane);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h
index 615d2ab2b803..ea2619b507db 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h
@@ -58,7 +58,7 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
unsigned long possible_crtcs,
const struct dc_plane_cap *plane_cap);
-const struct drm_format_info *amdgpu_dm_plane_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
+const struct drm_format_info *amdgpu_dm_plane_get_format_info(u32 pixel_format, u64 modifier);
void amdgpu_dm_plane_fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
bool *per_pixel_alpha, bool *pre_multiplied_alpha,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
index 848c5b4bb301..11b2ea6edf95 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
@@ -97,6 +98,7 @@ bool dm_pp_apply_display_requirements(
const struct dm_pp_single_disp_config *dc_cfg =
&pp_display_cfg->disp_configs[i];
adev->pm.pm_display_cfg.displays[i].controller_id = dc_cfg->pipe_idx + 1;
+ adev->pm.pm_display_cfg.displays[i].pixel_clock = dc_cfg->pixel_clock;
}
amdgpu_dpm_display_configuration_change(adev, &adev->pm.pm_display_cfg);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
index f984cb0cb889..fd491b7a3cd7 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2021 Advanced Micro Devices, Inc.
*
@@ -26,7 +27,6 @@
#include "amdgpu_dm_psr.h"
#include "dc_dmub_srv.h"
#include "dc.h"
-#include "dm_helpers.h"
#include "amdgpu_dm.h"
#include "modules/power/power_helpers.h"
@@ -119,8 +119,10 @@ bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
psr_config.allow_multi_disp_optimizations =
(amdgpu_dc_feature_mask & DC_PSR_ALLOW_MULTI_DISP_OPT);
- if (!psr_su_set_dsc_slice_height(dc, link, stream, &psr_config))
- return false;
+ if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) {
+ if (!psr_su_set_dsc_slice_height(dc, link, stream, &psr_config))
+ return false;
+ }
ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h
index e2366321a3c1..4fb8626913cf 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2021 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c
index 41f07f13a7b5..da94e3544b65 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2023 Advanced Micro Devices, Inc.
*
@@ -30,7 +31,7 @@
#include "amdgpu_dm.h"
#include "modules/power/power_helpers.h"
#include "dmub/inc/dmub_cmd.h"
-#include "dc/inc/link.h"
+#include "dc/inc/link_service.h"
/*
* amdgpu_dm_link_supports_replay() - check if the link supports replay
@@ -161,7 +162,7 @@ bool amdgpu_dm_replay_enable(struct dc_stream_state *stream, bool wait)
if (link) {
link->dc->link_srv->edp_setup_replay(link, stream);
- link->dc->link_srv->edp_set_coasting_vtotal(link, stream->timing.v_total);
+ link->dc->link_srv->edp_set_coasting_vtotal(link, stream->timing.v_total, 0);
DRM_DEBUG_DRIVER("Enabling replay...\n");
link->dc->link_srv->edp_set_replay_allow_active(link, &replay_active, wait, false, NULL);
return true;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h
index 8126bdb1eb6b..73b6c67ae5e7 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2021 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
index 0005f5f8f34f..8550d5e8b753 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
@@ -1,3 +1,4 @@
+//SPDX-License-Identifier: MIT
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
@@ -52,11 +53,11 @@ void dm_perf_trace_timestamp(const char *func_name, unsigned int line, struct dc
func_name, line);
}
-void dm_trace_smu_msg(uint32_t msg_id, uint32_t param_in, struct dc_context *ctx)
+void dm_trace_smu_enter(uint32_t msg_id, uint32_t param_in, unsigned int delay, struct dc_context *ctx)
{
}
-void dm_trace_smu_delay(uint32_t delay, struct dc_context *ctx)
+void dm_trace_smu_exit(bool success, uint32_t response, struct dc_context *ctx)
{
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h
index 4686d4b0cbad..aa56fd6d56c3 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h
@@ -1,3 +1,4 @@
+//SPDX-License-Identifier: MIT
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
@@ -726,6 +727,32 @@ TRACE_EVENT(dcn_optc_lock_unlock_state,
)
);
+TRACE_EVENT(amdgpu_dm_brightness,
+ TP_PROTO(void *function, u32 user_brightness, u32 converted_brightness, bool aux, bool ac),
+ TP_ARGS(function, user_brightness, converted_brightness, aux, ac),
+ TP_STRUCT__entry(
+ __field(void *, function)
+ __field(u32, user_brightness)
+ __field(u32, converted_brightness)
+ __field(bool, aux)
+ __field(bool, ac)
+ ),
+ TP_fast_assign(
+ __entry->function = function;
+ __entry->user_brightness = user_brightness;
+ __entry->converted_brightness = converted_brightness;
+ __entry->aux = aux;
+ __entry->ac = ac;
+ ),
+ TP_printk("%ps: brightness requested=%u converted=%u aux=%s power=%s",
+ (void *)__entry->function,
+ (u32)__entry->user_brightness,
+ (u32)__entry->converted_brightness,
+ (__entry->aux) ? "true" : "false",
+ (__entry->ac) ? "AC" : "DC"
+ )
+);
+
#endif /* _AMDGPU_DM_TRACE_H_ */
#undef TRACE_INCLUDE_PATH
diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile
index 3c9ecea7eebc..7277ed21552f 100644
--- a/drivers/gpu/drm/amd/display/dc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/Makefile
@@ -36,7 +36,8 @@ DC_LIBS += dcn30
DC_LIBS += dcn301
DC_LIBS += dcn31
DC_LIBS += dml
-DC_LIBS += dml2
+DC_LIBS += dml2_0
+DC_LIBS += soc_and_ip_translator
endif
DC_LIBS += dce120
diff --git a/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c
index d897f8a30ede..4da5adab799c 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c
@@ -1136,7 +1136,7 @@ static void calculate_bandwidth(
}
}
}
- data->total_dmifmc_urgent_trips = bw_ceil2(bw_div(data->total_requests_for_adjusted_dmif_size, (bw_add(dceip->dmif_request_buffer_size, bw_int_to_fixed(vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel * data->number_of_dram_channels)))), bw_int_to_fixed(1));
+ data->total_dmifmc_urgent_trips = bw_ceil2(bw_div(data->total_requests_for_adjusted_dmif_size, (bw_add(dceip->dmif_request_buffer_size, bw_int_to_fixed((uint64_t)vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel * data->number_of_dram_channels)))), bw_int_to_fixed(1));
data->total_dmifmc_urgent_latency = bw_mul(vbios->dmifmc_urgent_latency, data->total_dmifmc_urgent_trips);
data->total_display_reads_required_data = bw_int_to_fixed(0);
data->total_display_reads_required_dram_access_data = bw_int_to_fixed(0);
diff --git a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
index 452206b5095e..6073cadde76c 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
@@ -284,7 +284,7 @@ struct fixed31_32 dc_fixpt_cos(struct fixed31_32 arg)
dc_fixpt_mul(
square,
res),
- n * (n - 1)));
+ (long long)n * (n - 1)));
n -= 2;
} while (n != 0);
diff --git a/drivers/gpu/drm/amd/display/dc/basics/vector.c b/drivers/gpu/drm/amd/display/dc/basics/vector.c
index 6d2924114a3e..b413a672c2c0 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/vector.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/vector.c
@@ -170,7 +170,7 @@ bool dal_vector_remove_at_index(
memmove(
vector->container + (index * vector->struct_size),
vector->container + ((index + 1) * vector->struct_size),
- (vector->count - index - 1) * vector->struct_size);
+ (size_t)(vector->count - index - 1) * vector->struct_size);
vector->count -= 1;
return true;
@@ -219,7 +219,7 @@ bool dal_vector_insert_at(
memmove(
insert_address + vector->struct_size,
insert_address,
- vector->struct_size * (vector->count - position));
+ (size_t)vector->struct_size * (vector->count - position));
memmove(
insert_address,
@@ -271,7 +271,7 @@ struct vector *dal_vector_clone(
/* copy vector's data */
memmove(vec_cloned->container, vector->container,
- vec_cloned->struct_size * vec_cloned->capacity);
+ (size_t)vec_cloned->struct_size * vec_cloned->capacity);
return vec_cloned;
}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
index 67f08495b7e6..d1471f34e419 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
@@ -67,7 +67,9 @@ static ATOM_HPD_INT_RECORD *get_hpd_record(struct bios_parser *bp,
ATOM_OBJECT *object);
static struct device_id device_type_from_device_id(uint16_t device_id);
static uint32_t signal_to_ss_id(enum as_signal_type signal);
-static uint32_t get_support_mask_for_device_id(struct device_id device_id);
+static uint32_t get_support_mask_for_device_id(
+ enum dal_device_type device_type,
+ uint32_t enum_id);
static ATOM_ENCODER_CAP_RECORD_V2 *get_encoder_cap_record(
struct bios_parser *bp,
ATOM_OBJECT *object);
@@ -174,11 +176,8 @@ static struct graphics_object_id bios_parser_get_connector_id(
return object_id;
}
- if (tbl->ucNumberOfObjects <= i) {
- dm_error("Can't find connector id %d in connector table of size %d.\n",
- i, tbl->ucNumberOfObjects);
+ if (tbl->ucNumberOfObjects <= i)
return object_id;
- }
id = le16_to_cpu(tbl->asObjects[i].usObjectID);
object_id = object_id_from_bios_object_id(id);
@@ -444,6 +443,7 @@ static enum bp_result get_firmware_info_v1_4(
le32_to_cpu(firmware_info->ulMinPixelClockPLL_Output) * 10;
info->pll_info.max_output_pxl_clk_pll_frequency =
le32_to_cpu(firmware_info->ulMaxPixelClockPLL_Output) * 10;
+ info->max_pixel_clock = le16_to_cpu(firmware_info->usMaxPixelClock) * 10;
if (firmware_info->usFirmwareCapability.sbfAccess.MemoryClockSS_Support)
/* Since there is no information on the SS, report conservative
@@ -500,6 +500,7 @@ static enum bp_result get_firmware_info_v2_1(
info->external_clock_source_frequency_for_dp =
le16_to_cpu(firmwareInfo->usUniphyDPModeExtClkFreq) * 10;
info->min_allowed_bl_level = firmwareInfo->ucMinAllowedBL_Level;
+ info->max_pixel_clock = le16_to_cpu(firmwareInfo->usMaxPixelClock) * 10;
/* There should be only one entry in the SS info table for Memory Clock
*/
@@ -739,18 +740,94 @@ static enum bp_result bios_parser_transmitter_control(
return bp->cmd_tbl.transmitter_control(bp, cntl);
}
+static enum bp_result bios_parser_select_crtc_source(
+ struct dc_bios *dcb,
+ struct bp_crtc_source_select *bp_params)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!bp->cmd_tbl.select_crtc_source)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.select_crtc_source(bp, bp_params);
+}
+
static enum bp_result bios_parser_encoder_control(
struct dc_bios *dcb,
struct bp_encoder_control *cntl)
{
struct bios_parser *bp = BP_FROM_DCB(dcb);
+ if (cntl->engine_id == ENGINE_ID_DACA) {
+ if (!bp->cmd_tbl.dac1_encoder_control)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.dac1_encoder_control(
+ bp, cntl->action == ENCODER_CONTROL_ENABLE,
+ cntl->pixel_clock, ATOM_DAC1_PS2);
+ } else if (cntl->engine_id == ENGINE_ID_DACB) {
+ if (!bp->cmd_tbl.dac2_encoder_control)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.dac2_encoder_control(
+ bp, cntl->action == ENCODER_CONTROL_ENABLE,
+ cntl->pixel_clock, ATOM_DAC1_PS2);
+ }
+
if (!bp->cmd_tbl.dig_encoder_control)
return BP_RESULT_FAILURE;
return bp->cmd_tbl.dig_encoder_control(bp, cntl);
}
+static enum bp_result bios_parser_dac_load_detection(
+ struct dc_bios *dcb,
+ enum engine_id engine_id,
+ enum dal_device_type device_type,
+ uint32_t enum_id)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ struct dc_context *ctx = dcb->ctx;
+ struct bp_load_detection_parameters bp_params = {0};
+ enum bp_result bp_result;
+ uint32_t bios_0_scratch;
+ uint32_t device_id_mask = 0;
+
+ bp_params.engine_id = engine_id;
+ bp_params.device_id = get_support_mask_for_device_id(device_type, enum_id);
+
+ if (engine_id != ENGINE_ID_DACA &&
+ engine_id != ENGINE_ID_DACB)
+ return BP_RESULT_UNSUPPORTED;
+
+ if (!bp->cmd_tbl.dac_load_detection)
+ return BP_RESULT_UNSUPPORTED;
+
+ if (bp_params.device_id == ATOM_DEVICE_CRT1_SUPPORT)
+ device_id_mask = ATOM_S0_CRT1_MASK;
+ else if (bp_params.device_id == ATOM_DEVICE_CRT2_SUPPORT)
+ device_id_mask = ATOM_S0_CRT2_MASK;
+ else
+ return BP_RESULT_UNSUPPORTED;
+
+ /* BIOS will write the detected devices to BIOS_SCRATCH_0, clear corresponding bit */
+ bios_0_scratch = dm_read_reg(ctx, bp->base.regs->BIOS_SCRATCH_0);
+ bios_0_scratch &= ~device_id_mask;
+ dm_write_reg(ctx, bp->base.regs->BIOS_SCRATCH_0, bios_0_scratch);
+
+ bp_result = bp->cmd_tbl.dac_load_detection(bp, &bp_params);
+
+ if (bp_result != BP_RESULT_OK)
+ return bp_result;
+
+ bios_0_scratch = dm_read_reg(ctx, bp->base.regs->BIOS_SCRATCH_0);
+
+ if (bios_0_scratch & device_id_mask)
+ return BP_RESULT_OK;
+
+ return BP_RESULT_FAILURE;
+}
+
static enum bp_result bios_parser_adjust_pixel_clock(
struct dc_bios *dcb,
struct bp_adjust_pixel_clock_parameters *bp_params)
@@ -861,7 +938,7 @@ static bool bios_parser_is_device_id_supported(
{
struct bios_parser *bp = BP_FROM_DCB(dcb);
- uint32_t mask = get_support_mask_for_device_id(id);
+ uint32_t mask = get_support_mask_for_device_id(id.device_type, id.enum_id);
return (le16_to_cpu(bp->object_info_tbl.v1_1->usDeviceSupport) & mask) != 0;
}
@@ -2152,11 +2229,10 @@ static uint32_t signal_to_ss_id(enum as_signal_type signal)
return clk_id_ss;
}
-static uint32_t get_support_mask_for_device_id(struct device_id device_id)
+static uint32_t get_support_mask_for_device_id(
+ enum dal_device_type device_type,
+ uint32_t enum_id)
{
- enum dal_device_type device_type = device_id.device_type;
- uint32_t enum_id = device_id.enum_id;
-
switch (device_type) {
case DEVICE_TYPE_LCD:
switch (enum_id) {
@@ -2832,8 +2908,12 @@ static const struct dc_vbios_funcs vbios_funcs = {
.is_device_id_supported = bios_parser_is_device_id_supported,
/* COMMANDS */
+ .select_crtc_source = bios_parser_select_crtc_source,
+
.encoder_control = bios_parser_encoder_control,
+ .dac_load_detection = bios_parser_dac_load_detection,
+
.transmitter_control = bios_parser_transmitter_control,
.enable_crtc = bios_parser_enable_crtc,
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index 04eb647acc4e..550a9f1d03f8 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -1480,10 +1480,10 @@ static enum bp_result get_embedded_panel_info_v2_1(
/* not provided by VBIOS */
info->lcd_timing.misc_info.HORIZONTAL_CUT_OFF = 0;
- info->lcd_timing.misc_info.H_SYNC_POLARITY = ~(uint32_t) (lvds->lcd_timing.miscinfo
- & ATOM_HSYNC_POLARITY);
- info->lcd_timing.misc_info.V_SYNC_POLARITY = ~(uint32_t) (lvds->lcd_timing.miscinfo
- & ATOM_VSYNC_POLARITY);
+ info->lcd_timing.misc_info.H_SYNC_POLARITY = !(lvds->lcd_timing.miscinfo &
+ ATOM_HSYNC_POLARITY);
+ info->lcd_timing.misc_info.V_SYNC_POLARITY = !(lvds->lcd_timing.miscinfo &
+ ATOM_VSYNC_POLARITY);
/* not provided by VBIOS */
info->lcd_timing.misc_info.VERTICAL_CUT_OFF = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
index 2bcae0643e61..22457f417e65 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
@@ -52,7 +52,9 @@ static void init_transmitter_control(struct bios_parser *bp);
static void init_set_pixel_clock(struct bios_parser *bp);
static void init_enable_spread_spectrum_on_ppll(struct bios_parser *bp);
static void init_adjust_display_pll(struct bios_parser *bp);
+static void init_select_crtc_source(struct bios_parser *bp);
static void init_dac_encoder_control(struct bios_parser *bp);
+static void init_dac_load_detection(struct bios_parser *bp);
static void init_dac_output_control(struct bios_parser *bp);
static void init_set_crtc_timing(struct bios_parser *bp);
static void init_enable_crtc(struct bios_parser *bp);
@@ -69,7 +71,9 @@ void dal_bios_parser_init_cmd_tbl(struct bios_parser *bp)
init_set_pixel_clock(bp);
init_enable_spread_spectrum_on_ppll(bp);
init_adjust_display_pll(bp);
+ init_select_crtc_source(bp);
init_dac_encoder_control(bp);
+ init_dac_load_detection(bp);
init_dac_output_control(bp);
init_set_crtc_timing(bp);
init_enable_crtc(bp);
@@ -993,7 +997,7 @@ static enum bp_result set_pixel_clock_v3(
allocation.sPCLKInput.usFbDiv =
cpu_to_le16((uint16_t)bp_params->feedback_divider);
allocation.sPCLKInput.ucFracFbDiv =
- (uint8_t)bp_params->fractional_feedback_divider;
+ (uint8_t)(bp_params->fractional_feedback_divider / 100000);
allocation.sPCLKInput.ucPostDiv =
(uint8_t)bp_params->pixel_clock_post_divider;
@@ -1612,6 +1616,198 @@ static enum bp_result adjust_display_pll_v3(
/*******************************************************************************
********************************************************************************
**
+ ** SELECT CRTC SOURCE
+ **
+ ********************************************************************************
+ *******************************************************************************/
+
+static enum bp_result select_crtc_source_v1(
+ struct bios_parser *bp,
+ struct bp_crtc_source_select *bp_params);
+static enum bp_result select_crtc_source_v2(
+ struct bios_parser *bp,
+ struct bp_crtc_source_select *bp_params);
+static enum bp_result select_crtc_source_v3(
+ struct bios_parser *bp,
+ struct bp_crtc_source_select *bp_params);
+
+static void init_select_crtc_source(struct bios_parser *bp)
+{
+ switch (BIOS_CMD_TABLE_PARA_REVISION(SelectCRTC_Source)) {
+ case 1:
+ bp->cmd_tbl.select_crtc_source = select_crtc_source_v1;
+ break;
+ case 2:
+ bp->cmd_tbl.select_crtc_source = select_crtc_source_v2;
+ break;
+ case 3:
+ bp->cmd_tbl.select_crtc_source = select_crtc_source_v3;
+ break;
+ default:
+ bp->cmd_tbl.select_crtc_source = NULL;
+ break;
+ }
+}
+
+static enum bp_result select_crtc_source_v1(
+ struct bios_parser *bp,
+ struct bp_crtc_source_select *bp_params)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ SELECT_CRTC_SOURCE_PS_ALLOCATION params;
+
+ if (!bp->cmd_helper->controller_id_to_atom(bp_params->controller_id, &params.ucCRTC))
+ return BP_RESULT_BADINPUT;
+
+ switch (bp_params->engine_id) {
+ case ENGINE_ID_DACA:
+ params.ucDevice = ATOM_DEVICE_CRT1_INDEX;
+ break;
+ case ENGINE_ID_DACB:
+ params.ucDevice = ATOM_DEVICE_CRT2_INDEX;
+ break;
+ default:
+ return BP_RESULT_BADINPUT;
+ }
+
+ if (EXEC_BIOS_CMD_TABLE(SelectCRTC_Source, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+static bool select_crtc_source_v2_encoder_id(
+ enum engine_id engine_id, uint8_t *out_encoder_id)
+{
+ uint8_t encoder_id = 0;
+
+ switch (engine_id) {
+ case ENGINE_ID_DIGA:
+ encoder_id = ASIC_INT_DIG1_ENCODER_ID;
+ break;
+ case ENGINE_ID_DIGB:
+ encoder_id = ASIC_INT_DIG2_ENCODER_ID;
+ break;
+ case ENGINE_ID_DIGC:
+ encoder_id = ASIC_INT_DIG3_ENCODER_ID;
+ break;
+ case ENGINE_ID_DIGD:
+ encoder_id = ASIC_INT_DIG4_ENCODER_ID;
+ break;
+ case ENGINE_ID_DIGE:
+ encoder_id = ASIC_INT_DIG5_ENCODER_ID;
+ break;
+ case ENGINE_ID_DIGF:
+ encoder_id = ASIC_INT_DIG6_ENCODER_ID;
+ break;
+ case ENGINE_ID_DIGG:
+ encoder_id = ASIC_INT_DIG7_ENCODER_ID;
+ break;
+ case ENGINE_ID_DACA:
+ encoder_id = ASIC_INT_DAC1_ENCODER_ID;
+ break;
+ case ENGINE_ID_DACB:
+ encoder_id = ASIC_INT_DAC2_ENCODER_ID;
+ break;
+ default:
+ return false;
+ }
+
+ *out_encoder_id = encoder_id;
+ return true;
+}
+
+static bool select_crtc_source_v2_encoder_mode(
+ enum signal_type signal_type, uint8_t *out_encoder_mode)
+{
+ uint8_t encoder_mode = 0;
+
+ switch (signal_type) {
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ encoder_mode = ATOM_ENCODER_MODE_DVI;
+ break;
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ encoder_mode = ATOM_ENCODER_MODE_HDMI;
+ break;
+ case SIGNAL_TYPE_LVDS:
+ encoder_mode = ATOM_ENCODER_MODE_LVDS;
+ break;
+ case SIGNAL_TYPE_RGB:
+ encoder_mode = ATOM_ENCODER_MODE_CRT;
+ break;
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ encoder_mode = ATOM_ENCODER_MODE_DP;
+ break;
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ encoder_mode = ATOM_ENCODER_MODE_DP_MST;
+ break;
+ case SIGNAL_TYPE_EDP:
+ encoder_mode = ATOM_ENCODER_MODE_DP;
+ break;
+ default:
+ return false;
+ }
+
+ *out_encoder_mode = encoder_mode;
+ return true;
+}
+
+static enum bp_result select_crtc_source_v2(
+ struct bios_parser *bp,
+ struct bp_crtc_source_select *bp_params)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ SELECT_CRTC_SOURCE_PARAMETERS_V3 params;
+
+ if (!bp->cmd_helper->controller_id_to_atom(bp_params->controller_id, &params.ucCRTC))
+ return BP_RESULT_BADINPUT;
+
+ if (!select_crtc_source_v2_encoder_id(
+ bp_params->engine_id,
+ &params.ucEncoderID))
+ return BP_RESULT_BADINPUT;
+ if (!select_crtc_source_v2_encoder_mode(
+ bp_params->sink_signal,
+ &params.ucEncodeMode))
+ return BP_RESULT_BADINPUT;
+
+ if (EXEC_BIOS_CMD_TABLE(SelectCRTC_Source, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+static enum bp_result select_crtc_source_v3(
+ struct bios_parser *bp,
+ struct bp_crtc_source_select *bp_params)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ SELECT_CRTC_SOURCE_PARAMETERS_V3 params;
+
+ if (!bp->cmd_helper->controller_id_to_atom(bp_params->controller_id, &params.ucCRTC))
+ return BP_RESULT_BADINPUT;
+
+ if (!select_crtc_source_v2_encoder_id(
+ bp_params->engine_id,
+ &params.ucEncoderID))
+ return BP_RESULT_BADINPUT;
+ if (!select_crtc_source_v2_encoder_mode(
+ bp_params->sink_signal,
+ &params.ucEncodeMode))
+ return BP_RESULT_BADINPUT;
+
+ params.ucDstBpc = bp_params->bit_depth;
+
+ if (EXEC_BIOS_CMD_TABLE(SelectCRTC_Source, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
** DAC ENCODER CONTROL
**
********************************************************************************
@@ -1711,6 +1907,96 @@ static enum bp_result dac2_encoder_control_v1(
/*******************************************************************************
********************************************************************************
**
+ ** DAC LOAD DETECTION
+ **
+ ********************************************************************************
+ *******************************************************************************/
+
+static enum bp_result dac_load_detection_v1(
+ struct bios_parser *bp,
+ struct bp_load_detection_parameters *bp_params);
+
+static enum bp_result dac_load_detection_v3(
+ struct bios_parser *bp,
+ struct bp_load_detection_parameters *bp_params);
+
+static void init_dac_load_detection(struct bios_parser *bp)
+{
+ switch (BIOS_CMD_TABLE_PARA_REVISION(DAC_LoadDetection)) {
+ case 1:
+ case 2:
+ bp->cmd_tbl.dac_load_detection = dac_load_detection_v1;
+ break;
+ case 3:
+ default:
+ bp->cmd_tbl.dac_load_detection = dac_load_detection_v3;
+ break;
+ }
+}
+
+static void dac_load_detect_prepare_params(
+ struct _DAC_LOAD_DETECTION_PS_ALLOCATION *params,
+ enum engine_id engine_id,
+ uint16_t device_id,
+ uint8_t misc)
+{
+ uint8_t dac_type = ENGINE_ID_DACA;
+
+ if (engine_id == ENGINE_ID_DACB)
+ dac_type = ATOM_DAC_B;
+
+ params->sDacload.usDeviceID = cpu_to_le16(device_id);
+ params->sDacload.ucDacType = dac_type;
+ params->sDacload.ucMisc = misc;
+}
+
+static enum bp_result dac_load_detection_v1(
+ struct bios_parser *bp,
+ struct bp_load_detection_parameters *bp_params)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ DAC_LOAD_DETECTION_PS_ALLOCATION params;
+
+ dac_load_detect_prepare_params(
+ &params,
+ bp_params->engine_id,
+ bp_params->device_id,
+ 0);
+
+ if (EXEC_BIOS_CMD_TABLE(DAC_LoadDetection, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+static enum bp_result dac_load_detection_v3(
+ struct bios_parser *bp,
+ struct bp_load_detection_parameters *bp_params)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ DAC_LOAD_DETECTION_PS_ALLOCATION params;
+
+ uint8_t misc = 0;
+
+ if (bp_params->device_id == ATOM_DEVICE_CV_SUPPORT ||
+ bp_params->device_id == ATOM_DEVICE_TV1_SUPPORT)
+ misc = DAC_LOAD_MISC_YPrPb;
+
+ dac_load_detect_prepare_params(
+ &params,
+ bp_params->engine_id,
+ bp_params->device_id,
+ misc);
+
+ if (EXEC_BIOS_CMD_TABLE(DAC_LoadDetection, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
** DAC OUTPUT CONTROL
**
********************************************************************************
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.h b/drivers/gpu/drm/amd/display/dc/bios/command_table.h
index ad533775e724..e89b1ba0048b 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table.h
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.h
@@ -52,6 +52,9 @@ struct cmd_tbl {
enum bp_result (*adjust_display_pll)(
struct bios_parser *bp,
struct bp_adjust_pixel_clock_parameters *bp_params);
+ enum bp_result (*select_crtc_source)(
+ struct bios_parser *bp,
+ struct bp_crtc_source_select *bp_params);
enum bp_result (*dac1_encoder_control)(
struct bios_parser *bp,
bool enable,
@@ -68,6 +71,9 @@ struct cmd_tbl {
enum bp_result (*dac2_output_control)(
struct bios_parser *bp,
bool enable);
+ enum bp_result (*dac_load_detection)(
+ struct bios_parser *bp,
+ struct bp_load_detection_parameters *bp_params);
enum bp_result (*set_crtc_timing)(
struct bios_parser *bp,
struct bp_hw_crtc_timing_parameters *bp_params);
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
index 2c645dffec18..f2b1720a6a66 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
@@ -396,6 +396,7 @@ static enum bp_result transmitter_control_v1_7(
process_phy_transition_init_params.display_port_link_rate = link->cur_link_settings.link_rate;
process_phy_transition_init_params.transition_bitmask = link->phy_transition_bitmask;
}
+ dig_v1_7.skip_phy_ssc_reduction = link->wa_flags.skip_phy_ssc_reduction;
}
// Handle PRE_OFF_TO_ON: Process ACPI PHY Transition Interlock
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
index d9955c5d2e5e..60021671b386 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
@@ -112,7 +112,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN21)
###############################################################################
# DCN30
###############################################################################
-CLK_MGR_DCN30 = dcn30_clk_mgr.o dcn30_clk_mgr_smu_msg.o
+CLK_MGR_DCN30 = dcn30_clk_mgr.o dcn30_clk_mgr_smu_msg.o dcn30m_clk_mgr.o dcn30m_clk_mgr_smu_msg.o
AMD_DAL_CLK_MGR_DCN30 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn30/,$(CLK_MGR_DCN30))
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
index 4c3e58c730b1..15cf13ec5302 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
@@ -28,7 +28,7 @@
#include "dccg.h"
#include "clk_mgr_internal.h"
#include "dc_state_priv.h"
-#include "link.h"
+#include "link_service.h"
#include "dce100/dce_clk_mgr.h"
#include "dce110/dce110_clk_mgr.h"
@@ -67,7 +67,7 @@ int clk_mgr_helper_get_active_display_cnt(
if (dc_state_get_stream_subvp_type(context, stream) == SUBVP_PHANTOM)
continue;
- if (!stream->dpms_off || (stream_status && stream_status->plane_count))
+ if (!stream->dpms_off || dc->is_switch_in_progress_dest || (stream_status && stream_status->plane_count))
display_count++;
}
@@ -158,7 +158,6 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
return NULL;
}
dce60_clk_mgr_construct(ctx, clk_mgr);
- dce_clk_mgr_construct(ctx, clk_mgr);
return &clk_mgr->base;
}
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
index 26feefbb8990..6131ede2db7a 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
@@ -72,9 +72,9 @@ static const struct state_dependent_clocks dce80_max_clks_by_state[] = {
/* ClocksStateLow */
{ .display_clk_khz = 352000, .pixel_clk_khz = 330000},
/* ClocksStateNominal */
-{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 },
+{ .display_clk_khz = 625000, .pixel_clk_khz = 400000 },
/* ClocksStatePerformance */
-{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 } };
+{ .display_clk_khz = 625000, .pixel_clk_khz = 400000 } };
int dentist_get_divider_from_did(int did)
{
@@ -245,6 +245,11 @@ int dce_set_clock(
pxl_clk_params.target_pixel_clock_100hz = requested_clk_khz * 10;
pxl_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
+ /* DCE 6.0, DCE 6.4: engine clock is the same as PLL0 */
+ if (clk_mgr_base->ctx->dce_version == DCE_VERSION_6_0 ||
+ clk_mgr_base->ctx->dce_version == DCE_VERSION_6_4)
+ pxl_clk_params.pll_id = CLOCK_SOURCE_ID_PLL0;
+
if (clk_mgr_dce->dfs_bypass_active)
pxl_clk_params.flags.SET_DISPCLK_DFS_BYPASS = true;
@@ -386,8 +391,6 @@ static void dce_pplib_apply_display_requirements(
{
struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
- pp_display_cfg->avail_mclk_switch_time_us = dce110_get_min_vblank_time_us(context);
-
dce110_fill_display_configs(context, pp_display_cfg);
if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0)
@@ -400,11 +403,9 @@ static void dce_update_clocks(struct clk_mgr *clk_mgr_base,
{
struct clk_mgr_internal *clk_mgr_dce = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct dm_pp_power_level_change_request level_change_req;
- int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz;
-
- /*TODO: W/A for dal3 linux, investigate why this works */
- if (!clk_mgr_dce->dfs_bypass_active)
- patched_disp_clk = patched_disp_clk * 115 / 100;
+ const int max_disp_clk =
+ clk_mgr_dce->max_clks_by_state[DM_PP_CLOCKS_STATE_PERFORMANCE].display_clk_khz;
+ int patched_disp_clk = MIN(max_disp_clk, context->bw_ctx.bw.dce.dispclk_khz);
level_change_req.power_level = dce_get_required_clocks_state(clk_mgr_base, context);
/* get max clock state from PPLIB */
@@ -462,6 +463,9 @@ void dce_clk_mgr_construct(
clk_mgr->max_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
clk_mgr->cur_min_clks_state = DM_PP_CLOCKS_STATE_INVALID;
+ base->clks.max_supported_dispclk_khz =
+ clk_mgr->max_clks_by_state[DM_PP_CLOCKS_STATE_PERFORMANCE].display_clk_khz;
+
dce_clock_read_integrated_info(clk_mgr);
dce_clock_read_ss_info(clk_mgr);
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c
index f8409453434c..d50b9440210e 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c
@@ -120,9 +120,15 @@ void dce110_fill_display_configs(
const struct dc_state *context,
struct dm_pp_display_configuration *pp_display_cfg)
{
+ struct dc *dc = context->clk_mgr->ctx->dc;
int j;
int num_cfgs = 0;
+ pp_display_cfg->avail_mclk_switch_time_us = dce110_get_min_vblank_time_us(context);
+ pp_display_cfg->disp_clk_khz = dc->clk_mgr->clks.dispclk_khz;
+ pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0;
+ pp_display_cfg->crtc_index = dc->res_pool->res_cap->num_timing_generator;
+
for (j = 0; j < context->stream_count; j++) {
int k;
@@ -158,12 +164,29 @@ void dce110_fill_display_configs(
stream->link->cur_link_settings.link_rate;
cfg->link_settings.link_spread =
stream->link->cur_link_settings.link_spread;
- cfg->sym_clock = stream->phy_pix_clk;
+ cfg->pixel_clock = stream->phy_pix_clk;
/* Round v_refresh*/
cfg->v_refresh = stream->timing.pix_clk_100hz * 100;
cfg->v_refresh /= stream->timing.h_total;
cfg->v_refresh = (cfg->v_refresh + stream->timing.v_total / 2)
/ stream->timing.v_total;
+
+ /* Find first CRTC index and calculate its line time.
+ * This is necessary for DPM on SI GPUs.
+ */
+ if (cfg->pipe_idx < pp_display_cfg->crtc_index) {
+ const struct dc_crtc_timing *timing =
+ &context->streams[0]->timing;
+
+ pp_display_cfg->crtc_index = cfg->pipe_idx;
+ pp_display_cfg->line_time_in_us =
+ timing->h_total * 10000 / timing->pix_clk_100hz;
+ }
+ }
+
+ if (!num_cfgs) {
+ pp_display_cfg->crtc_index = 0;
+ pp_display_cfg->line_time_in_us = 0;
}
pp_display_cfg->display_count = num_cfgs;
@@ -223,25 +246,8 @@ void dce11_pplib_apply_display_requirements(
pp_display_cfg->min_engine_clock_deep_sleep_khz
= context->bw_ctx.bw.dce.sclk_deep_sleep_khz;
- pp_display_cfg->avail_mclk_switch_time_us =
- dce110_get_min_vblank_time_us(context);
- /* TODO: dce11.2*/
- pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0;
-
- pp_display_cfg->disp_clk_khz = dc->clk_mgr->clks.dispclk_khz;
-
dce110_fill_display_configs(context, pp_display_cfg);
- /* TODO: is this still applicable?*/
- if (pp_display_cfg->display_count == 1) {
- const struct dc_crtc_timing *timing =
- &context->streams[0]->timing;
-
- pp_display_cfg->crtc_index =
- pp_display_cfg->disp_configs[0].pipe_idx;
- pp_display_cfg->line_time_in_us = timing->h_total * 10000 / timing->pix_clk_100hz;
- }
-
if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0)
dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c
index 0267644717b2..69dd80d9f738 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c
@@ -83,22 +83,13 @@ static const struct state_dependent_clocks dce60_max_clks_by_state[] = {
static int dce60_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
- int dprefclk_wdivider;
- int dp_ref_clk_khz;
- int target_div;
+ struct dc_context *ctx = clk_mgr_base->ctx;
+ int dp_ref_clk_khz = 0;
- /* DCE6 has no DPREFCLK_CNTL to read DP Reference Clock source */
-
- /* Read the mmDENTIST_DISPCLK_CNTL to get the currently
- * programmed DID DENTIST_DPREFCLK_WDIVIDER*/
- REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, &dprefclk_wdivider);
-
- /* Convert DENTIST_DPREFCLK_WDIVIDERto actual divider*/
- target_div = dentist_get_divider_from_did(dprefclk_wdivider);
-
- /* Calculate the current DFS clock, in kHz.*/
- dp_ref_clk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
- * clk_mgr->base.dentist_vco_freq_khz) / target_div;
+ if (ASIC_REV_IS_TAHITI_P(ctx->asic_id.hw_internal_rev))
+ dp_ref_clk_khz = ctx->dc_bios->fw_info.default_display_engine_pll_frequency;
+ else
+ dp_ref_clk_khz = clk_mgr_base->clks.dispclk_khz;
return dce_adjust_dp_ref_freq_for_ss(clk_mgr, dp_ref_clk_khz);
}
@@ -109,8 +100,6 @@ static void dce60_pplib_apply_display_requirements(
{
struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
- pp_display_cfg->avail_mclk_switch_time_us = dce110_get_min_vblank_time_us(context);
-
dce110_fill_display_configs(context, pp_display_cfg);
if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0)
@@ -123,11 +112,9 @@ static void dce60_update_clocks(struct clk_mgr *clk_mgr_base,
{
struct clk_mgr_internal *clk_mgr_dce = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct dm_pp_power_level_change_request level_change_req;
- int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz;
-
- /*TODO: W/A for dal3 linux, investigate why this works */
- if (!clk_mgr_dce->dfs_bypass_active)
- patched_disp_clk = patched_disp_clk * 115 / 100;
+ const int max_disp_clk =
+ clk_mgr_dce->max_clks_by_state[DM_PP_CLOCKS_STATE_PERFORMANCE].display_clk_khz;
+ int patched_disp_clk = MIN(max_disp_clk, context->bw_ctx.bw.dce.dispclk_khz);
level_change_req.power_level = dce_get_required_clocks_state(clk_mgr_base, context);
/* get max clock state from PPLIB */
@@ -160,6 +147,8 @@ void dce60_clk_mgr_construct(
struct dc_context *ctx,
struct clk_mgr_internal *clk_mgr)
{
+ struct clk_mgr *base = &clk_mgr->base;
+
dce_clk_mgr_construct(ctx, clk_mgr);
memcpy(clk_mgr->max_clks_by_state,
@@ -170,5 +159,8 @@ void dce60_clk_mgr_construct(
clk_mgr->clk_mgr_shift = &disp_clk_shift;
clk_mgr->clk_mgr_mask = &disp_clk_mask;
clk_mgr->base.funcs = &dce60_funcs;
+
+ base->clks.max_supported_dispclk_khz =
+ clk_mgr->max_clks_by_state[DM_PP_CLOCKS_STATE_PERFORMANCE].display_clk_khz;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dalsmc.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dalsmc.h
index fa09c594fd36..06da34676965 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dalsmc.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dalsmc.h
@@ -56,6 +56,7 @@
#define DALSMC_MSG_SetDisplayRefreshFromMall 0xF
#define DALSMC_MSG_SetExternalClientDfCstateAllow 0x10
#define DALSMC_MSG_BacoAudioD3PME 0x11
-#define DALSMC_Message_Count 0x12
+#define DALSMC_MSG_SmartAccess 0x12
+#define DALSMC_Message_Count 0x13
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
index 8083a553c60e..ef77fcd164ed 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
@@ -30,6 +30,7 @@
#include "dce100/dce_clk_mgr.h"
#include "dcn30/dcn30_clk_mgr.h"
#include "dml/dcn30/dcn30_fpu.h"
+#include "dcn30/dcn30m_clk_mgr.h"
#include "reg_helper.h"
#include "core_types.h"
#include "dm_helpers.h"
@@ -498,7 +499,8 @@ static struct clk_mgr_funcs dcn3_funcs = {
.are_clock_states_equal = dcn3_are_clock_states_equal,
.enable_pme_wa = dcn3_enable_pme_wa,
.notify_link_rate_change = dcn30_notify_link_rate_change,
- .is_smu_present = dcn3_is_smu_present
+ .is_smu_present = dcn3_is_smu_present,
+ .set_smartmux_switch = dcn30m_set_smartmux_switch
};
static void dcn3_init_clocks_fpga(struct clk_mgr *clk_mgr)
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c
index 3253115a153d..827bc2431d5d 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c
@@ -69,7 +69,7 @@ static uint32_t dcn30_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, un
/* handle DALSMC_Result_CmdRejectedBusy? */
- TRACE_SMU_DELAY(delay_us * (initial_max_retries - max_retries), clk_mgr->base.ctx);
+ TRACE_SMU_MSG_DELAY(0, 0, delay_us * (initial_max_retries - max_retries), clk_mgr->base.ctx);
return reg;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr.c
new file mode 100644
index 000000000000..8e8a11c7437e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr.c
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "clk_mgr_internal.h"
+#include "dcn30/dcn30m_clk_mgr.h"
+#include "dcn30m_clk_mgr_smu_msg.h"
+
+
+uint32_t dcn30m_set_smartmux_switch(struct clk_mgr *clk_mgr_base, uint32_t pins_to_set)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+
+ return dcn30m_smu_set_smart_mux_switch(clk_mgr, pins_to_set);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr.h
new file mode 100644
index 000000000000..757985b2eadc
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DCN30M_CLK_MGR_H__
+#define __DCN30M_CLK_MGR_H__
+
+uint32_t dcn30m_set_smartmux_switch(struct clk_mgr *clk_mgr_base, uint32_t pins_to_set);
+
+#endif //__DCN30M_CLK_MGR_H__
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr_smu_msg.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr_smu_msg.c
new file mode 100644
index 000000000000..0dd0583ff21e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr_smu_msg.c
@@ -0,0 +1,118 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dcn30m_clk_mgr_smu_msg.h"
+
+#include "clk_mgr_internal.h"
+#include "reg_helper.h"
+#include "dm_helpers.h"
+
+#include "dalsmc.h"
+
+#define mmDAL_MSG_REG 0x1628A
+#define mmDAL_ARG_REG 0x16273
+#define mmDAL_RESP_REG 0x16274
+
+#define REG(reg_name) \
+ mm ## reg_name
+
+#include "logger_types.h"
+#undef DC_LOGGER
+#define DC_LOGGER \
+ CTX->logger
+#define smu_print(str, ...) {DC_LOG_SMU(str, ##__VA_ARGS__); }
+
+
+/*
+ * Function to be used instead of REG_WAIT macro because the wait ends when
+ * the register is NOT EQUAL to zero, and because the translation in msg_if.h
+ * won't work with REG_WAIT.
+ */
+static uint32_t dcn30m_smu_wait_for_response(struct clk_mgr_internal *clk_mgr,
+ unsigned int delay_us, unsigned int max_retries)
+{
+ uint32_t reg = 0;
+
+ do {
+ reg = REG_READ(DAL_RESP_REG);
+ if (reg)
+ break;
+
+ if (delay_us >= 1000)
+ msleep(delay_us/1000);
+ else if (delay_us > 0)
+ udelay(delay_us);
+ } while (max_retries--);
+
+ /* handle DALSMC_Result_CmdRejectedBusy? */
+
+ /* Log? */
+
+ return reg;
+}
+
+static bool dcn30m_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr,
+ uint32_t msg_id, uint32_t param_in, uint32_t *param_out)
+{
+ uint32_t result;
+ /* Wait for response register to be ready */
+ dcn30m_smu_wait_for_response(clk_mgr, 10, 200000);
+
+ /* Clear response register */
+ REG_WRITE(DAL_RESP_REG, 0);
+
+ /* Set the parameter register for the SMU message */
+ REG_WRITE(DAL_ARG_REG, param_in);
+
+ /* Trigger the message transaction by writing the message ID */
+ REG_WRITE(DAL_MSG_REG, msg_id);
+
+ result = dcn30m_smu_wait_for_response(clk_mgr, 10, 200000);
+
+ if (IS_SMU_TIMEOUT(result))
+ dm_helpers_smu_timeout(CTX, msg_id, param_in, 10 * 200000);
+
+ /* Wait for response */
+ if (result == DALSMC_Result_OK) {
+ if (param_out)
+ *param_out = REG_READ(DAL_ARG_REG);
+
+ return true;
+ }
+
+ return false;
+}
+
+uint32_t dcn30m_smu_set_smart_mux_switch(struct clk_mgr_internal *clk_mgr, uint32_t pins_to_set)
+{
+ uint32_t response = 0;
+
+ smu_print("SMU Set SmartMux Switch: switch_dgpu = %d\n", pins_to_set);
+
+ dcn30m_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_SmartAccess, pins_to_set, &response);
+
+ return response;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr_smu_msg.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr_smu_msg.h
new file mode 100644
index 000000000000..8a59a473fc5e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr_smu_msg.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef DAL_DC_DCN30M_CLK_MGR_SMU_MSG_H_
+#define DAL_DC_DCN30M_CLK_MGR_SMU_MSG_H_
+
+#include "core_types.h"
+
+struct clk_mgr_internal;
+
+uint32_t dcn30m_smu_set_smart_mux_switch(struct clk_mgr_internal *clk_mgr, uint32_t pins_to_set);
+#endif /* DAL_DC_DCN30M_CLK_MGR_SMU_MSG_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
index 9e2ef0e724fc..7aee02d56292 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
@@ -563,6 +563,7 @@ static void vg_clk_mgr_helper_populate_bw_params(
{
int i, j;
struct clk_bw_params *bw_params = clk_mgr->base.bw_params;
+ uint32_t max_dispclk = 0, max_dppclk = 0;
j = -1;
@@ -584,6 +585,15 @@ static void vg_clk_mgr_helper_populate_bw_params(
return;
}
+ /* dispclk and dppclk can be max at any voltage, same number of levels for both */
+ if (clock_table->NumDispClkLevelsEnabled <= VG_NUM_DISPCLK_DPM_LEVELS &&
+ clock_table->NumDispClkLevelsEnabled <= VG_NUM_DPPCLK_DPM_LEVELS) {
+ max_dispclk = find_max_clk_value(clock_table->DispClocks, clock_table->NumDispClkLevelsEnabled);
+ max_dppclk = find_max_clk_value(clock_table->DppClocks, clock_table->NumDispClkLevelsEnabled);
+ } else {
+ ASSERT(0);
+ }
+
bw_params->clk_table.num_entries = j + 1;
for (i = 0; i < bw_params->clk_table.num_entries - 1; i++, j--) {
@@ -591,11 +601,17 @@ static void vg_clk_mgr_helper_populate_bw_params(
bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[j].memclk;
bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[j].voltage;
bw_params->clk_table.entries[i].dcfclk_mhz = find_dcfclk_for_voltage(clock_table, clock_table->DfPstateTable[j].voltage);
+
+ /* Now update clocks we do read */
+ bw_params->clk_table.entries[i].dispclk_mhz = max_dispclk;
+ bw_params->clk_table.entries[i].dppclk_mhz = max_dppclk;
}
bw_params->clk_table.entries[i].fclk_mhz = clock_table->DfPstateTable[j].fclk;
bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[j].memclk;
bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[j].voltage;
bw_params->clk_table.entries[i].dcfclk_mhz = find_max_clk_value(clock_table->DcfClocks, VG_NUM_DCFCLK_DPM_LEVELS);
+ bw_params->clk_table.entries[i].dispclk_mhz = find_max_clk_value(clock_table->DispClocks, VG_NUM_DISPCLK_DPM_LEVELS);
+ bw_params->clk_table.entries[i].dppclk_mhz = find_max_clk_value(clock_table->DppClocks, VG_NUM_DPPCLK_DPM_LEVELS);
bw_params->vram_type = bios_info->memory_type;
bw_params->num_channels = bios_info->ma_channel_number;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
index bc123f1884da..051052bd10c9 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
@@ -47,7 +47,7 @@
#include "dcn30/dcn30_clk_mgr.h"
#include "dc_dmub_srv.h"
-#include "link.h"
+#include "link_service.h"
#include "logger_types.h"
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
index 91d872d6d392..db687a13174d 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
@@ -48,7 +48,7 @@
#include "dcn31/dcn31_clk_mgr.h"
#include "dc_dmub_srv.h"
-#include "link.h"
+#include "link_service.h"
#include "dcn314_smu.h"
@@ -77,6 +77,7 @@ static const struct IP_BASE CLK_BASE = { { { { 0x00016C00, 0x02401800, 0, 0, 0,
#undef DC_LOGGER
#define DC_LOGGER \
clk_mgr->base.base.ctx->logger
+
#define regCLK1_CLK_PLL_REQ 0x0237
#define regCLK1_CLK_PLL_REQ_BASE_IDX 0
@@ -87,8 +88,70 @@ static const struct IP_BASE CLK_BASE = { { { { 0x00016C00, 0x02401800, 0, 0, 0,
#define CLK1_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000F000L
#define CLK1_CLK_PLL_REQ__FbMult_frac_MASK 0xFFFF0000L
+#define regCLK1_CLK0_DFS_CNTL 0x0269
+#define regCLK1_CLK0_DFS_CNTL_BASE_IDX 0
+#define regCLK1_CLK1_DFS_CNTL 0x026c
+#define regCLK1_CLK1_DFS_CNTL_BASE_IDX 0
+#define regCLK1_CLK2_DFS_CNTL 0x026f
+#define regCLK1_CLK2_DFS_CNTL_BASE_IDX 0
+#define regCLK1_CLK3_DFS_CNTL 0x0272
+#define regCLK1_CLK3_DFS_CNTL_BASE_IDX 0
+#define regCLK1_CLK4_DFS_CNTL 0x0275
+#define regCLK1_CLK4_DFS_CNTL_BASE_IDX 0
+#define regCLK1_CLK5_DFS_CNTL 0x0278
+#define regCLK1_CLK5_DFS_CNTL_BASE_IDX 0
+
+#define regCLK1_CLK0_CURRENT_CNT 0x02fb
+#define regCLK1_CLK0_CURRENT_CNT_BASE_IDX 0
+#define regCLK1_CLK1_CURRENT_CNT 0x02fc
+#define regCLK1_CLK1_CURRENT_CNT_BASE_IDX 0
+#define regCLK1_CLK2_CURRENT_CNT 0x02fd
+#define regCLK1_CLK2_CURRENT_CNT_BASE_IDX 0
+#define regCLK1_CLK3_CURRENT_CNT 0x02fe
+#define regCLK1_CLK3_CURRENT_CNT_BASE_IDX 0
+#define regCLK1_CLK4_CURRENT_CNT 0x02ff
+#define regCLK1_CLK4_CURRENT_CNT_BASE_IDX 0
+#define regCLK1_CLK5_CURRENT_CNT 0x0300
+#define regCLK1_CLK5_CURRENT_CNT_BASE_IDX 0
+
+#define regCLK1_CLK0_BYPASS_CNTL 0x028a
+#define regCLK1_CLK0_BYPASS_CNTL_BASE_IDX 0
+#define regCLK1_CLK1_BYPASS_CNTL 0x0293
+#define regCLK1_CLK1_BYPASS_CNTL_BASE_IDX 0
#define regCLK1_CLK2_BYPASS_CNTL 0x029c
#define regCLK1_CLK2_BYPASS_CNTL_BASE_IDX 0
+#define regCLK1_CLK3_BYPASS_CNTL 0x02a5
+#define regCLK1_CLK3_BYPASS_CNTL_BASE_IDX 0
+#define regCLK1_CLK4_BYPASS_CNTL 0x02ae
+#define regCLK1_CLK4_BYPASS_CNTL_BASE_IDX 0
+#define regCLK1_CLK5_BYPASS_CNTL 0x02b7
+#define regCLK1_CLK5_BYPASS_CNTL_BASE_IDX 0
+
+#define regCLK1_CLK0_DS_CNTL 0x0283
+#define regCLK1_CLK0_DS_CNTL_BASE_IDX 0
+#define regCLK1_CLK1_DS_CNTL 0x028c
+#define regCLK1_CLK1_DS_CNTL_BASE_IDX 0
+#define regCLK1_CLK2_DS_CNTL 0x0295
+#define regCLK1_CLK2_DS_CNTL_BASE_IDX 0
+#define regCLK1_CLK3_DS_CNTL 0x029e
+#define regCLK1_CLK3_DS_CNTL_BASE_IDX 0
+#define regCLK1_CLK4_DS_CNTL 0x02a7
+#define regCLK1_CLK4_DS_CNTL_BASE_IDX 0
+#define regCLK1_CLK5_DS_CNTL 0x02b0
+#define regCLK1_CLK5_DS_CNTL_BASE_IDX 0
+
+#define regCLK1_CLK0_ALLOW_DS 0x0284
+#define regCLK1_CLK0_ALLOW_DS_BASE_IDX 0
+#define regCLK1_CLK1_ALLOW_DS 0x028d
+#define regCLK1_CLK1_ALLOW_DS_BASE_IDX 0
+#define regCLK1_CLK2_ALLOW_DS 0x0296
+#define regCLK1_CLK2_ALLOW_DS_BASE_IDX 0
+#define regCLK1_CLK3_ALLOW_DS 0x029f
+#define regCLK1_CLK3_ALLOW_DS_BASE_IDX 0
+#define regCLK1_CLK4_ALLOW_DS 0x02a8
+#define regCLK1_CLK4_ALLOW_DS_BASE_IDX 0
+#define regCLK1_CLK5_ALLOW_DS 0x02b1
+#define regCLK1_CLK5_ALLOW_DS_BASE_IDX 0
#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL__SHIFT 0x0
#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_DIV__SHIFT 0x10
@@ -185,6 +248,8 @@ void dcn314_init_clocks(struct clk_mgr *clk_mgr)
{
struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr);
uint32_t ref_dtbclk = clk_mgr->clks.ref_dtbclk_khz;
+ struct clk_mgr_dcn314 *clk_mgr_dcn314 = TO_CLK_MGR_DCN314(clk_mgr_int);
+ struct clk_log_info log_info = {0};
memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
// Assumption is that boot state always supports pstate
@@ -200,6 +265,9 @@ void dcn314_init_clocks(struct clk_mgr *clk_mgr)
dce_adjust_dp_ref_freq_for_ss(clk_mgr_int, clk_mgr->dprefclk_khz);
else
clk_mgr->dp_dto_source_clock_in_khz = clk_mgr->dprefclk_khz;
+
+ dcn314_dump_clk_registers(&clk_mgr->boot_snapshot, &clk_mgr_dcn314->base.base, &log_info);
+ clk_mgr->clks.dispclk_khz = clk_mgr->boot_snapshot.dispclk * 1000;
}
void dcn314_update_clocks(struct clk_mgr *clk_mgr_base,
@@ -218,6 +286,8 @@ void dcn314_update_clocks(struct clk_mgr *clk_mgr_base,
if (dc->work_arounds.skip_clock_update)
return;
+ display_count = dcn314_get_active_display_cnt_wa(dc, context);
+
/*
* if it is safe to lower, but we are already in the lower state, we don't have to do anything
* also if safe to lower is false, we just go in the higher state
@@ -236,7 +306,6 @@ void dcn314_update_clocks(struct clk_mgr *clk_mgr_base,
}
/* check that we're not already in lower */
if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) {
- display_count = dcn314_get_active_display_cnt_wa(dc, context);
/* if we can go lower, go lower */
if (display_count == 0) {
union display_idle_optimization_u idle_info = { 0 };
@@ -293,11 +362,19 @@ void dcn314_update_clocks(struct clk_mgr *clk_mgr_base,
update_dppclk = true;
}
- if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
+ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz) &&
+ (new_clocks->dispclk_khz > 0 || (safe_to_lower && display_count == 0))) {
+ int requested_dispclk_khz = new_clocks->dispclk_khz;
+
dcn314_disable_otg_wa(clk_mgr_base, context, safe_to_lower, true);
+ /* Clamp the requested clock to PMFW based on their limit. */
+ if (dc->debug.min_disp_clk_khz > 0 && requested_dispclk_khz < dc->debug.min_disp_clk_khz)
+ requested_dispclk_khz = dc->debug.min_disp_clk_khz;
+
+ dcn314_smu_set_dispclk(clk_mgr, requested_dispclk_khz);
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
- dcn314_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
+
dcn314_disable_otg_wa(clk_mgr_base, context, safe_to_lower, false);
update_dispclk = true;
@@ -385,10 +462,65 @@ bool dcn314_are_clock_states_equal(struct dc_clocks *a,
return true;
}
-static void dcn314_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
+
+static void dcn314_dump_clk_registers_internal(struct dcn35_clk_internal *internal, struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+
+ // read dtbclk
+ internal->CLK1_CLK4_CURRENT_CNT = REG_READ(CLK1_CLK4_CURRENT_CNT);
+ internal->CLK1_CLK4_BYPASS_CNTL = REG_READ(CLK1_CLK4_BYPASS_CNTL);
+
+ // read dcfclk
+ internal->CLK1_CLK3_CURRENT_CNT = REG_READ(CLK1_CLK3_CURRENT_CNT);
+ internal->CLK1_CLK3_BYPASS_CNTL = REG_READ(CLK1_CLK3_BYPASS_CNTL);
+
+ // read dcf deep sleep divider
+ internal->CLK1_CLK3_DS_CNTL = REG_READ(CLK1_CLK3_DS_CNTL);
+ internal->CLK1_CLK3_ALLOW_DS = REG_READ(CLK1_CLK3_ALLOW_DS);
+
+ // read dppclk
+ internal->CLK1_CLK1_CURRENT_CNT = REG_READ(CLK1_CLK1_CURRENT_CNT);
+ internal->CLK1_CLK1_BYPASS_CNTL = REG_READ(CLK1_CLK1_BYPASS_CNTL);
+
+ // read dprefclk
+ internal->CLK1_CLK2_CURRENT_CNT = REG_READ(CLK1_CLK2_CURRENT_CNT);
+ internal->CLK1_CLK2_BYPASS_CNTL = REG_READ(CLK1_CLK2_BYPASS_CNTL);
+
+ // read dispclk
+ internal->CLK1_CLK0_CURRENT_CNT = REG_READ(CLK1_CLK0_CURRENT_CNT);
+ internal->CLK1_CLK0_BYPASS_CNTL = REG_READ(CLK1_CLK0_BYPASS_CNTL);
+}
+
+void dcn314_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info)
{
- return;
+
+ struct dcn35_clk_internal internal = {0};
+
+ dcn314_dump_clk_registers_internal(&internal, clk_mgr_base);
+
+ regs_and_bypass->dcfclk = internal.CLK1_CLK3_CURRENT_CNT / 10;
+ regs_and_bypass->dcf_deep_sleep_divider = internal.CLK1_CLK3_DS_CNTL / 10;
+ regs_and_bypass->dcf_deep_sleep_allow = internal.CLK1_CLK3_ALLOW_DS;
+ regs_and_bypass->dprefclk = internal.CLK1_CLK2_CURRENT_CNT / 10;
+ regs_and_bypass->dispclk = internal.CLK1_CLK0_CURRENT_CNT / 10;
+ regs_and_bypass->dppclk = internal.CLK1_CLK1_CURRENT_CNT / 10;
+ regs_and_bypass->dtbclk = internal.CLK1_CLK4_CURRENT_CNT / 10;
+
+ regs_and_bypass->dppclk_bypass = internal.CLK1_CLK1_BYPASS_CNTL & 0x0007;
+ if (regs_and_bypass->dppclk_bypass > 4)
+ regs_and_bypass->dppclk_bypass = 0;
+ regs_and_bypass->dcfclk_bypass = internal.CLK1_CLK3_BYPASS_CNTL & 0x0007;
+ if (regs_and_bypass->dcfclk_bypass > 4)
+ regs_and_bypass->dcfclk_bypass = 0;
+ regs_and_bypass->dispclk_bypass = internal.CLK1_CLK0_BYPASS_CNTL & 0x0007;
+ if (regs_and_bypass->dispclk_bypass > 4)
+ regs_and_bypass->dispclk_bypass = 0;
+ regs_and_bypass->dprefclk_bypass = internal.CLK1_CLK2_BYPASS_CNTL & 0x0007;
+ if (regs_and_bypass->dprefclk_bypass > 4)
+ regs_and_bypass->dprefclk_bypass = 0;
+
}
static struct clk_bw_params dcn314_bw_params = {
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h
index 002c28e80720..0577eb527bc3 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h
@@ -65,4 +65,9 @@ void dcn314_clk_mgr_construct(struct dc_context *ctx,
void dcn314_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr_int);
+
+void dcn314_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
+ struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info);
+
+
#endif //__DCN314_CLK_MGR_H__
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
index e4d22f74f986..3a881451e9da 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
@@ -40,17 +40,51 @@
#include "dm_helpers.h"
#include "dc_dmub_srv.h"
-
+#include "reg_helper.h"
#include "logger_types.h"
#undef DC_LOGGER
#define DC_LOGGER \
clk_mgr->base.base.ctx->logger
-#include "link.h"
+#include "link_service.h"
+
+#define MAX_INSTANCE 7
+#define MAX_SEGMENT 8
+
+struct IP_BASE_INSTANCE {
+ unsigned int segment[MAX_SEGMENT];
+};
+
+struct IP_BASE {
+ struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
+};
+
+static const struct IP_BASE CLK_BASE = { { { { 0x00016C00, 0x02401800, 0, 0, 0, 0, 0, 0 } },
+ { { 0x00016E00, 0x02401C00, 0, 0, 0, 0, 0, 0 } },
+ { { 0x00017000, 0x02402000, 0, 0, 0, 0, 0, 0 } },
+ { { 0x00017200, 0x02402400, 0, 0, 0, 0, 0, 0 } },
+ { { 0x0001B000, 0x0242D800, 0, 0, 0, 0, 0, 0 } },
+ { { 0x0001B200, 0x0242DC00, 0, 0, 0, 0, 0, 0 } } } };
+
+#define regCLK1_CLK0_CURRENT_CNT 0x0314
+#define regCLK1_CLK0_CURRENT_CNT_BASE_IDX 0
+#define regCLK1_CLK1_CURRENT_CNT 0x0315
+#define regCLK1_CLK1_CURRENT_CNT_BASE_IDX 0
+#define regCLK1_CLK2_CURRENT_CNT 0x0316
+#define regCLK1_CLK2_CURRENT_CNT_BASE_IDX 0
+#define regCLK1_CLK3_CURRENT_CNT 0x0317
+#define regCLK1_CLK3_CURRENT_CNT_BASE_IDX 0
+#define regCLK1_CLK4_CURRENT_CNT 0x0318
+#define regCLK1_CLK4_CURRENT_CNT_BASE_IDX 0
+#define regCLK1_CLK5_CURRENT_CNT 0x0319
+#define regCLK1_CLK5_CURRENT_CNT_BASE_IDX 0
#define TO_CLK_MGR_DCN315(clk_mgr)\
container_of(clk_mgr, struct clk_mgr_dcn315, base)
+#define REG(reg_name) \
+ (CLK_BASE.instance[0].segment[reg ## reg_name ## _BASE_IDX] + reg ## reg_name)
+
#define UNSUPPORTED_DCFCLK 10000000
#define MIN_DPP_DISP_CLK 100000
@@ -245,9 +279,38 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base,
dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
+static void dcn315_dump_clk_registers_internal(struct dcn35_clk_internal *internal, struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+
+ // read dtbclk
+ internal->CLK1_CLK4_CURRENT_CNT = REG_READ(CLK1_CLK4_CURRENT_CNT);
+
+ // read dcfclk
+ internal->CLK1_CLK3_CURRENT_CNT = REG_READ(CLK1_CLK3_CURRENT_CNT);
+
+ // read dppclk
+ internal->CLK1_CLK1_CURRENT_CNT = REG_READ(CLK1_CLK1_CURRENT_CNT);
+
+ // read dprefclk
+ internal->CLK1_CLK2_CURRENT_CNT = REG_READ(CLK1_CLK2_CURRENT_CNT);
+
+ // read dispclk
+ internal->CLK1_CLK0_CURRENT_CNT = REG_READ(CLK1_CLK0_CURRENT_CNT);
+}
+
static void dcn315_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info)
{
+ struct dcn35_clk_internal internal = {0};
+
+ dcn315_dump_clk_registers_internal(&internal, clk_mgr_base);
+
+ regs_and_bypass->dcfclk = internal.CLK1_CLK3_CURRENT_CNT / 10;
+ regs_and_bypass->dprefclk = internal.CLK1_CLK2_CURRENT_CNT / 10;
+ regs_and_bypass->dispclk = internal.CLK1_CLK0_CURRENT_CNT / 10;
+ regs_and_bypass->dppclk = internal.CLK1_CLK1_CURRENT_CNT / 10;
+ regs_and_bypass->dtbclk = internal.CLK1_CLK4_CURRENT_CNT / 10;
return;
}
@@ -594,13 +657,32 @@ static struct clk_mgr_funcs dcn315_funcs = {
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
.get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz,
.update_clocks = dcn315_update_clocks,
- .init_clocks = dcn31_init_clocks,
+ .init_clocks = dcn315_init_clocks,
.enable_pme_wa = dcn315_enable_pme_wa,
.are_clock_states_equal = dcn31_are_clock_states_equal,
.notify_wm_ranges = dcn315_notify_wm_ranges
};
extern struct clk_mgr_funcs dcn3_fpga_funcs;
+void dcn315_init_clocks(struct clk_mgr *clk_mgr)
+{
+ struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr);
+ uint32_t ref_dtbclk = clk_mgr->clks.ref_dtbclk_khz;
+ struct clk_mgr_dcn315 *clk_mgr_dcn315 = TO_CLK_MGR_DCN315(clk_mgr_int);
+ struct clk_log_info log_info = {0};
+
+ memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
+ // Assumption is that boot state always supports pstate
+ clk_mgr->clks.ref_dtbclk_khz = ref_dtbclk; // restore ref_dtbclk
+ clk_mgr->clks.p_state_change_support = true;
+ clk_mgr->clks.prev_p_state_change_support = true;
+ clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN;
+ clk_mgr->clks.zstate_support = DCN_ZSTATE_SUPPORT_UNKNOWN;
+
+ dcn315_dump_clk_registers(&clk_mgr->boot_snapshot, &clk_mgr_dcn315->base.base, &log_info);
+ clk_mgr->clks.dispclk_khz = clk_mgr->boot_snapshot.dispclk * 1000;
+}
+
void dcn315_clk_mgr_construct(
struct dc_context *ctx,
struct clk_mgr_dcn315 *clk_mgr,
@@ -661,6 +743,7 @@ void dcn315_clk_mgr_construct(
/* Saved clocks configured at boot for debug purposes */
dcn315_dump_clk_registers(&clk_mgr->base.base.boot_snapshot,
&clk_mgr->base.base, &log_info);
+ clk_mgr->base.base.clks.dispclk_khz = clk_mgr->base.base.boot_snapshot.dispclk * 1000;
clk_mgr->base.base.dprefclk_khz = 600000;
clk_mgr->base.base.dprefclk_khz = dcn315_smu_get_dpref_clk(&clk_mgr->base);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.h
index ac36ddf5dd1a..642ae3d4a790 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.h
@@ -44,6 +44,7 @@ void dcn315_clk_mgr_construct(struct dc_context *ctx,
struct pp_smu_funcs *pp_smu,
struct dccg *dccg);
+void dcn315_init_clocks(struct clk_mgr *clk_mgr);
void dcn315_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr_int);
#endif //__DCN315_CLK_MGR_H__
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
index 49efea0c8fcf..1769b1f26e75 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
@@ -39,7 +39,7 @@
#include "dcn316_smu.h"
#include "dm_helpers.h"
#include "dc_dmub_srv.h"
-#include "link.h"
+#include "link_service.h"
// DCN316 this is CLK1 instance
#define MAX_INSTANCE 7
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
index 084994c650c4..7da7b41bd092 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
@@ -33,7 +33,7 @@
#include "reg_helper.h"
#include "core_types.h"
#include "dm_helpers.h"
-#include "link.h"
+#include "link_service.h"
#include "dc_state_priv.h"
#include "atomfirmware.h"
#include "dcn32_smu13_driver_if.h"
@@ -1047,11 +1047,8 @@ static void dcn32_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base)
&num_entries_per_clk->num_fclk_levels);
clk_mgr_base->bw_params->dc_mode_limit.fclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_FCLK);
- if (num_entries_per_clk->num_memclk_levels >= num_entries_per_clk->num_fclk_levels) {
- num_levels = num_entries_per_clk->num_memclk_levels;
- } else {
- num_levels = num_entries_per_clk->num_fclk_levels;
- }
+ num_levels = max(num_entries_per_clk->num_memclk_levels, num_entries_per_clk->num_fclk_levels);
+
clk_mgr_base->bw_params->max_memclk_mhz =
clk_mgr_base->bw_params->clk_table.entries[num_entries_per_clk->num_memclk_levels - 1].memclk_mhz;
clk_mgr_base->bw_params->clk_table.num_entries = num_levels ? num_levels : 1;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c
index cf2d35363e8b..5d80fdf63ffc 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c
@@ -63,7 +63,8 @@ static uint32_t dcn32_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, un
udelay(delay_us);
} while (max_retries--);
- TRACE_SMU_DELAY(delay_us * (initial_max_retries - max_retries), clk_mgr->base.ctx);
+ TRACE_SMU_MSG_DELAY(0, 0, delay_us * (initial_max_retries - max_retries), clk_mgr->base.ctx);
+
return reg;
}
@@ -120,7 +121,7 @@ static uint32_t dcn32_smu_wait_for_response_delay(struct clk_mgr_internal *clk_m
*total_delay_us += delay_us;
} while (max_retries--);
- TRACE_SMU_DELAY(*total_delay_us, clk_mgr->base.ctx);
+ TRACE_SMU_MSG_DELAY(0, 0, *total_delay_us, clk_mgr->base.ctx);
return reg;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
index bb1ac12a2b09..dfd0c9505af0 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
@@ -44,7 +44,7 @@
#include "dcn31/dcn31_clk_mgr.h"
#include "dc_dmub_srv.h"
-#include "link.h"
+#include "link_service.h"
#include "logger_types.h"
#undef DC_LOGGER
@@ -394,6 +394,8 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
display_count = dcn35_get_active_display_cnt_wa(dc, context, &all_active_disps);
if (new_clocks->dtbclk_en && !new_clocks->ref_dtbclk_khz)
new_clocks->ref_dtbclk_khz = 600000;
+ else if (!new_clocks->dtbclk_en && new_clocks->ref_dtbclk_khz > 590000)
+ new_clocks->ref_dtbclk_khz = 0;
/*
* if it is safe to lower, but we are already in the lower state, we don't have to do anything
@@ -435,7 +437,7 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
actual_dtbclk = REG_READ(CLK1_CLK4_CURRENT_CNT);
- if (actual_dtbclk) {
+ if (actual_dtbclk > 590000) {
clk_mgr_base->clks.ref_dtbclk_khz = new_clocks->ref_dtbclk_khz;
clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en;
}
@@ -587,9 +589,118 @@ bool dcn35_are_clock_states_equal(struct dc_clocks *a,
return true;
}
-static void dcn35_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
+static void dcn35_save_clk_registers_internal(struct dcn35_clk_internal *internal, struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+
+ // read dtbclk
+ internal->CLK1_CLK4_CURRENT_CNT = REG_READ(CLK1_CLK4_CURRENT_CNT);
+ internal->CLK1_CLK4_BYPASS_CNTL = REG_READ(CLK1_CLK4_BYPASS_CNTL);
+
+ // read dcfclk
+ internal->CLK1_CLK3_CURRENT_CNT = REG_READ(CLK1_CLK3_CURRENT_CNT);
+ internal->CLK1_CLK3_BYPASS_CNTL = REG_READ(CLK1_CLK3_BYPASS_CNTL);
+
+ // read dcf deep sleep divider
+ internal->CLK1_CLK3_DS_CNTL = REG_READ(CLK1_CLK3_DS_CNTL);
+ internal->CLK1_CLK3_ALLOW_DS = REG_READ(CLK1_CLK3_ALLOW_DS);
+
+ // read dppclk
+ internal->CLK1_CLK1_CURRENT_CNT = REG_READ(CLK1_CLK1_CURRENT_CNT);
+ internal->CLK1_CLK1_BYPASS_CNTL = REG_READ(CLK1_CLK1_BYPASS_CNTL);
+
+ // read dprefclk
+ internal->CLK1_CLK2_CURRENT_CNT = REG_READ(CLK1_CLK2_CURRENT_CNT);
+ internal->CLK1_CLK2_BYPASS_CNTL = REG_READ(CLK1_CLK2_BYPASS_CNTL);
+
+ // read dispclk
+ internal->CLK1_CLK0_CURRENT_CNT = REG_READ(CLK1_CLK0_CURRENT_CNT);
+ internal->CLK1_CLK0_BYPASS_CNTL = REG_READ(CLK1_CLK0_BYPASS_CNTL);
+}
+
+static void dcn35_save_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
struct clk_mgr_dcn35 *clk_mgr)
{
+ struct dcn35_clk_internal internal = {0};
+ char *bypass_clks[5] = {"0x0 DFS", "0x1 REFCLK", "0x2 ERROR", "0x3 400 FCH", "0x4 600 FCH"};
+
+ dcn35_save_clk_registers_internal(&internal, &clk_mgr->base.base);
+
+ regs_and_bypass->dcfclk = internal.CLK1_CLK3_CURRENT_CNT / 10;
+ regs_and_bypass->dcf_deep_sleep_divider = internal.CLK1_CLK3_DS_CNTL / 10;
+ regs_and_bypass->dcf_deep_sleep_allow = internal.CLK1_CLK3_ALLOW_DS;
+ regs_and_bypass->dprefclk = internal.CLK1_CLK2_CURRENT_CNT / 10;
+ regs_and_bypass->dispclk = internal.CLK1_CLK0_CURRENT_CNT / 10;
+ regs_and_bypass->dppclk = internal.CLK1_CLK1_CURRENT_CNT / 10;
+ regs_and_bypass->dtbclk = internal.CLK1_CLK4_CURRENT_CNT / 10;
+
+ regs_and_bypass->dppclk_bypass = internal.CLK1_CLK1_BYPASS_CNTL & 0x0007;
+ if (regs_and_bypass->dppclk_bypass > 4)
+ regs_and_bypass->dppclk_bypass = 0;
+ regs_and_bypass->dcfclk_bypass = internal.CLK1_CLK3_BYPASS_CNTL & 0x0007;
+ if (regs_and_bypass->dcfclk_bypass > 4)
+ regs_and_bypass->dcfclk_bypass = 0;
+ regs_and_bypass->dispclk_bypass = internal.CLK1_CLK0_BYPASS_CNTL & 0x0007;
+ if (regs_and_bypass->dispclk_bypass > 4)
+ regs_and_bypass->dispclk_bypass = 0;
+ regs_and_bypass->dprefclk_bypass = internal.CLK1_CLK2_BYPASS_CNTL & 0x0007;
+ if (regs_and_bypass->dprefclk_bypass > 4)
+ regs_and_bypass->dprefclk_bypass = 0;
+
+ if (clk_mgr->base.base.ctx->dc->debug.pstate_enabled) {
+ DC_LOG_SMU("clk_type,clk_value,deepsleep_cntl,deepsleep_allow,bypass\n");
+
+ DC_LOG_SMU("dcfclk,%d,%d,%d,%s\n",
+ regs_and_bypass->dcfclk,
+ regs_and_bypass->dcf_deep_sleep_divider,
+ regs_and_bypass->dcf_deep_sleep_allow,
+ bypass_clks[(int) regs_and_bypass->dcfclk_bypass]);
+
+ DC_LOG_SMU("dprefclk,%d,N/A,N/A,%s\n",
+ regs_and_bypass->dprefclk,
+ bypass_clks[(int) regs_and_bypass->dprefclk_bypass]);
+
+ DC_LOG_SMU("dispclk,%d,N/A,N/A,%s\n",
+ regs_and_bypass->dispclk,
+ bypass_clks[(int) regs_and_bypass->dispclk_bypass]);
+
+ // REGISTER VALUES
+ DC_LOG_SMU("reg_name,value,clk_type");
+
+ DC_LOG_SMU("CLK1_CLK3_CURRENT_CNT,%d,dcfclk",
+ internal.CLK1_CLK3_CURRENT_CNT);
+
+ DC_LOG_SMU("CLK1_CLK4_CURRENT_CNT,%d,dtbclk",
+ internal.CLK1_CLK4_CURRENT_CNT);
+
+ DC_LOG_SMU("CLK1_CLK3_DS_CNTL,%d,dcf_deep_sleep_divider",
+ internal.CLK1_CLK3_DS_CNTL);
+
+ DC_LOG_SMU("CLK1_CLK3_ALLOW_DS,%d,dcf_deep_sleep_allow",
+ internal.CLK1_CLK3_ALLOW_DS);
+
+ DC_LOG_SMU("CLK1_CLK2_CURRENT_CNT,%d,dprefclk",
+ internal.CLK1_CLK2_CURRENT_CNT);
+
+ DC_LOG_SMU("CLK1_CLK0_CURRENT_CNT,%d,dispclk",
+ internal.CLK1_CLK0_CURRENT_CNT);
+
+ DC_LOG_SMU("CLK1_CLK1_CURRENT_CNT,%d,dppclk",
+ internal.CLK1_CLK1_CURRENT_CNT);
+
+ DC_LOG_SMU("CLK1_CLK3_BYPASS_CNTL,%d,dcfclk_bypass",
+ internal.CLK1_CLK3_BYPASS_CNTL);
+
+ DC_LOG_SMU("CLK1_CLK2_BYPASS_CNTL,%d,dprefclk_bypass",
+ internal.CLK1_CLK2_BYPASS_CNTL);
+
+ DC_LOG_SMU("CLK1_CLK0_BYPASS_CNTL,%d,dispclk_bypass",
+ internal.CLK1_CLK0_BYPASS_CNTL);
+
+ DC_LOG_SMU("CLK1_CLK1_BYPASS_CNTL,%d,dppclk_bypass",
+ internal.CLK1_CLK1_BYPASS_CNTL);
+
+ }
}
static bool dcn35_is_spll_ssc_enabled(struct clk_mgr *clk_mgr_base)
@@ -623,6 +734,7 @@ static void init_clk_states(struct clk_mgr *clk_mgr)
void dcn35_init_clocks(struct clk_mgr *clk_mgr)
{
struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr);
+ struct clk_mgr_dcn35 *clk_mgr_dcn35 = TO_CLK_MGR_DCN35(clk_mgr_int);
init_clk_states(clk_mgr);
@@ -633,6 +745,13 @@ void dcn35_init_clocks(struct clk_mgr *clk_mgr)
else
clk_mgr->dp_dto_source_clock_in_khz = clk_mgr->dprefclk_khz;
+ dcn35_save_clk_registers(&clk_mgr->boot_snapshot, clk_mgr_dcn35);
+
+ clk_mgr->clks.ref_dtbclk_khz = clk_mgr->boot_snapshot.dtbclk * 10;
+ if (clk_mgr->boot_snapshot.dtbclk > 59000) {
+ /*dtbclk enabled based on */
+ clk_mgr->clks.dtbclk_en = true;
+ }
}
static struct clk_bw_params dcn35_bw_params = {
.vram_type = Ddr4MemType,
@@ -1176,6 +1295,35 @@ static void dcn35_update_clocks_fpga(struct clk_mgr *clk_mgr,
dcn35_update_clocks_update_dtb_dto(clk_mgr_int, context, clk_mgr->clks.ref_dtbclk_khz);
}
+static unsigned int dcn35_get_max_clock_khz(struct clk_mgr *clk_mgr_base, enum clk_type clk_type)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+
+ unsigned int num_clk_levels;
+
+ switch (clk_type) {
+ case CLK_TYPE_DISPCLK:
+ num_clk_levels = clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_dispclk_levels;
+ return num_clk_levels ?
+ clk_mgr->base.bw_params->clk_table.entries[num_clk_levels - 1].dispclk_mhz * 1000 :
+ clk_mgr->base.boot_snapshot.dispclk;
+ case CLK_TYPE_DPPCLK:
+ num_clk_levels = clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_dppclk_levels;
+ return num_clk_levels ?
+ clk_mgr->base.bw_params->clk_table.entries[num_clk_levels - 1].dppclk_mhz * 1000 :
+ clk_mgr->base.boot_snapshot.dppclk;
+ case CLK_TYPE_DSCCLK:
+ num_clk_levels = clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_dispclk_levels;
+ return num_clk_levels ?
+ clk_mgr->base.bw_params->clk_table.entries[num_clk_levels - 1].dispclk_mhz * 1000 / 3 :
+ clk_mgr->base.boot_snapshot.dispclk / 3;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
static struct clk_mgr_funcs dcn35_funcs = {
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
.get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz,
@@ -1187,6 +1335,7 @@ static struct clk_mgr_funcs dcn35_funcs = {
.set_low_power_state = dcn35_set_low_power_state,
.exit_low_power_state = dcn35_exit_low_power_state,
.is_ips_supported = dcn35_is_ips_supported,
+ .get_max_clock_khz = dcn35_get_max_clock_khz,
};
struct clk_mgr_funcs dcn35_fpga_funcs = {
@@ -1323,7 +1472,7 @@ void dcn35_clk_mgr_construct(
dcn35_bw_params.wm_table = ddr5_wm_table;
}
/* Saved clocks configured at boot for debug purposes */
- dcn35_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, clk_mgr);
+ dcn35_save_clk_registers(&clk_mgr->base.base.boot_snapshot, clk_mgr);
clk_mgr->base.base.dprefclk_khz = dcn35_smu_get_dprefclk(&clk_mgr->base);
clk_mgr->base.base.clks.ref_dtbclk_khz = 600000;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
index a3b8e3d4a429..306016c1f109 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
@@ -13,7 +13,7 @@
#include "reg_helper.h"
#include "core_types.h"
#include "dm_helpers.h"
-#include "link.h"
+#include "link_service.h"
#include "dc_state_priv.h"
#include "atomfirmware.h"
@@ -22,8 +22,6 @@
#include "dcn/dcn_4_1_0_offset.h"
#include "dcn/dcn_4_1_0_sh_mask.h"
-#include "dml/dcn401/dcn401_fpu.h"
-
#define DCN_BASE__INST0_SEG1 0x000000C0
#define mmCLK01_CLK0_CLK_PLL_REQ 0x16E37
@@ -164,7 +162,7 @@ static void dcn401_init_single_clock(struct clk_mgr_internal *clk_mgr, PPCLK_e c
unsigned int i;
char *entry_i = (char *)entry_0;
- uint32_t ret = dcn30_smu_get_dpm_freq_by_index(clk_mgr, clk, 0xFF);
+ uint32_t ret = dcn401_smu_get_dpm_freq_by_index(clk_mgr, clk, 0xFF);
if (ret & (1 << 31))
/* fine-grained, only min and max */
@@ -176,50 +174,43 @@ static void dcn401_init_single_clock(struct clk_mgr_internal *clk_mgr, PPCLK_e c
/* if the initial message failed, num_levels will be 0 */
for (i = 0; i < *num_levels && i < ARRAY_SIZE(clk_mgr->base.bw_params->clk_table.entries); i++) {
- *((unsigned int *)entry_i) = (dcn30_smu_get_dpm_freq_by_index(clk_mgr, clk, i) & 0xFFFF);
+ *((unsigned int *)entry_i) = (dcn401_smu_get_dpm_freq_by_index(clk_mgr, clk, i) & 0xFFFF);
entry_i += sizeof(clk_mgr->base.bw_params->clk_table.entries[0]);
}
}
static void dcn401_build_wm_range_table(struct clk_mgr *clk_mgr)
{
- /* legacy */
- DC_FP_START();
- dcn401_build_wm_range_table_fpu(clk_mgr);
- DC_FP_END();
-
- if (clk_mgr->ctx->dc->debug.using_dml21) {
- /* For min clocks use as reported by PM FW and report those as min */
- uint16_t min_uclk_mhz = clk_mgr->bw_params->clk_table.entries[0].memclk_mhz;
- uint16_t min_dcfclk_mhz = clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz;
-
- /* Set A - Normal - default values */
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid = true;
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.wm_type = WATERMARKS_CLOCK_RANGE;
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz;
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_dcfclk = 0xFFFF;
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_uclk = min_uclk_mhz;
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_uclk = 0xFFFF;
-
- /* Set B - Unused on dcn4 */
- clk_mgr->bw_params->wm_table.nv_entries[WM_B].valid = false;
-
- /* Set 1A - Dummy P-State - P-State latency set to "dummy p-state" value */
- /* 'DalDummyClockChangeLatencyNs' registry key option set to 0x7FFFFFFF can be used to disable Set C for dummy p-state */
- if (clk_mgr->ctx->dc->bb_overrides.dummy_clock_change_latency_ns != 0x7FFFFFFF) {
- clk_mgr->bw_params->wm_table.nv_entries[WM_1A].valid = true;
- clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.wm_type = WATERMARKS_DUMMY_PSTATE;
- clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz;
- clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.max_dcfclk = 0xFFFF;
- clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.min_uclk = min_uclk_mhz;
- clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.max_uclk = 0xFFFF;
- } else {
- clk_mgr->bw_params->wm_table.nv_entries[WM_1A].valid = false;
- }
-
- /* Set 1B - Unused on dcn4 */
- clk_mgr->bw_params->wm_table.nv_entries[WM_1B].valid = false;
+ /* For min clocks use as reported by PM FW and report those as min */
+ uint16_t min_uclk_mhz = clk_mgr->bw_params->clk_table.entries[0].memclk_mhz;
+ uint16_t min_dcfclk_mhz = clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz;
+
+ /* Set A - Normal - default values */
+ clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid = true;
+ clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.wm_type = WATERMARKS_CLOCK_RANGE;
+ clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz;
+ clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_dcfclk = 0xFFFF;
+ clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_uclk = min_uclk_mhz;
+ clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_uclk = 0xFFFF;
+
+ /* Set B - Unused on dcn4 */
+ clk_mgr->bw_params->wm_table.nv_entries[WM_B].valid = false;
+
+ /* Set 1A - Dummy P-State - P-State latency set to "dummy p-state" value */
+ /* 'DalDummyClockChangeLatencyNs' registry key option set to 0x7FFFFFFF can be used to disable Set C for dummy p-state */
+ if (clk_mgr->ctx->dc->bb_overrides.dummy_clock_change_latency_ns != 0x7FFFFFFF) {
+ clk_mgr->bw_params->wm_table.nv_entries[WM_1A].valid = true;
+ clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.wm_type = WATERMARKS_DUMMY_PSTATE;
+ clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz;
+ clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.max_dcfclk = 0xFFFF;
+ clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.min_uclk = min_uclk_mhz;
+ clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.max_uclk = 0xFFFF;
+ } else {
+ clk_mgr->bw_params->wm_table.nv_entries[WM_1A].valid = false;
}
+
+ /* Set 1B - Unused on dcn4 */
+ clk_mgr->bw_params->wm_table.nv_entries[WM_1B].valid = false;
}
void dcn401_init_clocks(struct clk_mgr *clk_mgr_base)
@@ -240,20 +231,20 @@ void dcn401_init_clocks(struct clk_mgr *clk_mgr_base)
clk_mgr->smu_present = false;
clk_mgr->dpm_present = false;
- if (!clk_mgr_base->force_smu_not_present && dcn30_smu_get_smu_version(clk_mgr, &clk_mgr->smu_ver))
+ if (!clk_mgr_base->force_smu_not_present && dcn401_smu_get_smu_version(clk_mgr, &clk_mgr->smu_ver))
clk_mgr->smu_present = true;
if (!clk_mgr->smu_present)
return;
- dcn30_smu_check_driver_if_version(clk_mgr);
- dcn30_smu_check_msg_header_version(clk_mgr);
+ dcn401_smu_check_driver_if_version(clk_mgr);
+ dcn401_smu_check_msg_header_version(clk_mgr);
/* DCFCLK */
dcn401_init_single_clock(clk_mgr, PPCLK_DCFCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].dcfclk_mhz,
&num_entries_per_clk->num_dcfclk_levels);
- clk_mgr_base->bw_params->dc_mode_limit.dcfclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_DCFCLK);
+ clk_mgr_base->bw_params->dc_mode_limit.dcfclk_mhz = dcn401_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_DCFCLK);
if (num_entries_per_clk->num_dcfclk_levels && clk_mgr_base->bw_params->dc_mode_limit.dcfclk_mhz ==
clk_mgr_base->bw_params->clk_table.entries[num_entries_per_clk->num_dcfclk_levels - 1].dcfclk_mhz)
clk_mgr_base->bw_params->dc_mode_limit.dcfclk_mhz = 0;
@@ -262,7 +253,7 @@ void dcn401_init_clocks(struct clk_mgr *clk_mgr_base)
dcn401_init_single_clock(clk_mgr, PPCLK_SOCCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].socclk_mhz,
&num_entries_per_clk->num_socclk_levels);
- clk_mgr_base->bw_params->dc_mode_limit.socclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_SOCCLK);
+ clk_mgr_base->bw_params->dc_mode_limit.socclk_mhz = dcn401_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_SOCCLK);
if (num_entries_per_clk->num_socclk_levels && clk_mgr_base->bw_params->dc_mode_limit.socclk_mhz ==
clk_mgr_base->bw_params->clk_table.entries[num_entries_per_clk->num_socclk_levels - 1].socclk_mhz)
clk_mgr_base->bw_params->dc_mode_limit.socclk_mhz = 0;
@@ -272,7 +263,7 @@ void dcn401_init_clocks(struct clk_mgr *clk_mgr_base)
dcn401_init_single_clock(clk_mgr, PPCLK_DTBCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].dtbclk_mhz,
&num_entries_per_clk->num_dtbclk_levels);
- clk_mgr_base->bw_params->dc_mode_limit.dtbclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_DTBCLK);
+ clk_mgr_base->bw_params->dc_mode_limit.dtbclk_mhz = dcn401_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_DTBCLK);
if (num_entries_per_clk->num_dtbclk_levels && clk_mgr_base->bw_params->dc_mode_limit.dtbclk_mhz ==
clk_mgr_base->bw_params->clk_table.entries[num_entries_per_clk->num_dtbclk_levels - 1].dtbclk_mhz)
clk_mgr_base->bw_params->dc_mode_limit.dtbclk_mhz = 0;
@@ -282,7 +273,7 @@ void dcn401_init_clocks(struct clk_mgr *clk_mgr_base)
dcn401_init_single_clock(clk_mgr, PPCLK_DISPCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].dispclk_mhz,
&num_entries_per_clk->num_dispclk_levels);
- clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_DISPCLK);
+ clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz = dcn401_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_DISPCLK);
if (num_entries_per_clk->num_dispclk_levels && clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz ==
clk_mgr_base->bw_params->clk_table.entries[num_entries_per_clk->num_dispclk_levels - 1].dispclk_mhz)
clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz = 0;
@@ -320,6 +311,25 @@ void dcn401_init_clocks(struct clk_mgr *clk_mgr_base)
dcn401_build_wm_range_table(clk_mgr_base);
}
+bool dcn401_is_dc_mode_present(struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+
+ return clk_mgr->smu_present && clk_mgr->dpm_present &&
+ ((clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_dcfclk_levels &&
+ clk_mgr_base->bw_params->dc_mode_limit.dcfclk_mhz) ||
+ (clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_dispclk_levels &&
+ clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz) ||
+ (clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_dtbclk_levels &&
+ clk_mgr_base->bw_params->dc_mode_limit.dtbclk_mhz) ||
+ (clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_fclk_levels &&
+ clk_mgr_base->bw_params->dc_mode_limit.fclk_mhz) ||
+ (clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_memclk_levels &&
+ clk_mgr_base->bw_params->dc_mode_limit.memclk_mhz) ||
+ (clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_socclk_levels &&
+ clk_mgr_base->bw_params->dc_mode_limit.socclk_mhz));
+}
+
static void dcn401_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info)
{
@@ -1308,8 +1318,8 @@ static void dcn401_notify_wm_ranges(struct clk_mgr *clk_mgr_base)
table->Watermarks.WatermarkRow[i].WmSetting = i;
table->Watermarks.WatermarkRow[i].Flags = clk_mgr->base.bw_params->wm_table.nv_entries[i].pmfw_breakdown.wm_type;
}
- dcn30_smu_set_dram_addr_high(clk_mgr, clk_mgr->wm_range_table_addr >> 32);
- dcn30_smu_set_dram_addr_low(clk_mgr, clk_mgr->wm_range_table_addr & 0xFFFFFFFF);
+ dcn401_smu_set_dram_addr_high(clk_mgr, clk_mgr->wm_range_table_addr >> 32);
+ dcn401_smu_set_dram_addr_low(clk_mgr, clk_mgr->wm_range_table_addr & 0xFFFFFFFF);
dcn401_smu_transfer_wm_table_dram_2_smu(clk_mgr);
}
@@ -1380,7 +1390,7 @@ static void dcn401_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base)
clk_mgr_base->bw_params->clk_table.entries[num_entries_per_clk->num_memclk_levels - 1].memclk_mhz;
}
- clk_mgr_base->bw_params->dc_mode_limit.memclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_UCLK);
+ clk_mgr_base->bw_params->dc_mode_limit.memclk_mhz = dcn401_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_UCLK);
if (num_entries_per_clk->num_memclk_levels && clk_mgr_base->bw_params->dc_mode_limit.memclk_mhz ==
clk_mgr_base->bw_params->clk_table.entries[num_entries_per_clk->num_memclk_levels - 1].memclk_mhz)
clk_mgr_base->bw_params->dc_mode_limit.memclk_mhz = 0;
@@ -1389,16 +1399,12 @@ static void dcn401_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base)
dcn401_init_single_clock(clk_mgr, PPCLK_FCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].fclk_mhz,
&num_entries_per_clk->num_fclk_levels);
- clk_mgr_base->bw_params->dc_mode_limit.fclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_FCLK);
+ clk_mgr_base->bw_params->dc_mode_limit.fclk_mhz = dcn401_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_FCLK);
if (num_entries_per_clk->num_fclk_levels && clk_mgr_base->bw_params->dc_mode_limit.fclk_mhz ==
clk_mgr_base->bw_params->clk_table.entries[num_entries_per_clk->num_fclk_levels - 1].fclk_mhz)
clk_mgr_base->bw_params->dc_mode_limit.fclk_mhz = 0;
- if (num_entries_per_clk->num_memclk_levels >= num_entries_per_clk->num_fclk_levels) {
- num_levels = num_entries_per_clk->num_memclk_levels;
- } else {
- num_levels = num_entries_per_clk->num_fclk_levels;
- }
+ num_levels = max(num_entries_per_clk->num_memclk_levels, num_entries_per_clk->num_fclk_levels);
clk_mgr_base->bw_params->clk_table.num_entries = num_levels ? num_levels : 1;
@@ -1490,6 +1496,35 @@ static int dcn401_get_dispclk_from_dentist(struct clk_mgr *clk_mgr_base)
return 0;
}
+unsigned int dcn401_get_max_clock_khz(struct clk_mgr *clk_mgr_base, enum clk_type clk_type)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+
+ unsigned int num_clk_levels;
+
+ switch (clk_type) {
+ case CLK_TYPE_DISPCLK:
+ num_clk_levels = clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_dispclk_levels;
+ return dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_DISPCLK) ?
+ clk_mgr->base.bw_params->clk_table.entries[num_clk_levels - 1].dispclk_mhz * 1000 :
+ clk_mgr->base.boot_snapshot.dispclk;
+ case CLK_TYPE_DPPCLK:
+ num_clk_levels = clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_dppclk_levels;
+ return dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_DPPCLK) ?
+ clk_mgr->base.bw_params->clk_table.entries[num_clk_levels - 1].dppclk_mhz * 1000 :
+ clk_mgr->base.boot_snapshot.dppclk;
+ case CLK_TYPE_DSCCLK:
+ num_clk_levels = clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_dispclk_levels;
+ return dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_DISPCLK) ?
+ clk_mgr->base.bw_params->clk_table.entries[num_clk_levels - 1].dispclk_mhz * 1000 / 3 :
+ clk_mgr->base.boot_snapshot.dispclk / 3;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
static struct clk_mgr_funcs dcn401_funcs = {
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
.get_dtb_ref_clk_frequency = dcn401_get_dtb_ref_freq_khz,
@@ -1505,6 +1540,8 @@ static struct clk_mgr_funcs dcn401_funcs = {
.get_dispclk_from_dentist = dcn401_get_dispclk_from_dentist,
.get_hard_min_memclk = dcn401_get_hard_min_memclk,
.get_hard_min_fclk = dcn401_get_hard_min_fclk,
+ .is_dc_mode_present = dcn401_is_dc_mode_present,
+ .get_max_clock_khz = dcn401_get_max_clock_khz,
};
struct clk_mgr_internal *dcn401_clk_mgr_construct(
@@ -1565,7 +1602,7 @@ struct clk_mgr_internal *dcn401_clk_mgr_construct(
clk_mgr->base.bw_params = kzalloc(sizeof(*clk_mgr->base.bw_params), GFP_KERNEL);
if (!clk_mgr->base.bw_params) {
BREAK_TO_DEBUGGER();
- kfree(clk_mgr);
+ kfree(clk_mgr401);
return NULL;
}
@@ -1576,6 +1613,7 @@ struct clk_mgr_internal *dcn401_clk_mgr_construct(
if (!clk_mgr->wm_range_table) {
BREAK_TO_DEBUGGER();
kfree(clk_mgr->base.bw_params);
+ kfree(clk_mgr401);
return NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.h
index 6c9ae5ca2c7e..97a1ce1e8a9e 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.h
@@ -105,10 +105,13 @@ struct dcn401_clk_mgr {
};
void dcn401_init_clocks(struct clk_mgr *clk_mgr_base);
+bool dcn401_is_dc_mode_present(struct clk_mgr *clk_mgr_base);
struct clk_mgr_internal *dcn401_clk_mgr_construct(struct dc_context *ctx,
struct dccg *dccg);
void dcn401_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr);
+unsigned int dcn401_get_max_clock_khz(struct clk_mgr *clk_mgr_base, enum clk_type clk_type);
+
#endif /* __DCN401_CLK_MGR_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.c
index 21c35528f61f..3a263840893e 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.c
@@ -57,6 +57,8 @@ static bool dcn401_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, uin
/* Wait for response register to be ready */
dcn401_smu_wait_for_response(clk_mgr, 10, 200000);
+ TRACE_SMU_MSG_ENTER(msg_id, param_in, clk_mgr->base.ctx);
+
/* Clear response register */
REG_WRITE(DAL_RESP_REG, 0);
@@ -71,9 +73,11 @@ static bool dcn401_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, uin
if (param_out)
*param_out = REG_READ(DAL_ARG_REG);
+ TRACE_SMU_MSG_EXIT(true, param_out ? *param_out : 0, clk_mgr->base.ctx);
return true;
}
+ TRACE_SMU_MSG_EXIT(false, 0, clk_mgr->base.ctx);
return false;
}
@@ -102,8 +106,6 @@ static uint32_t dcn401_smu_wait_for_response_delay(struct clk_mgr_internal *clk_
*total_delay_us += delay_us;
} while (max_retries--);
- TRACE_SMU_DELAY(*total_delay_us, clk_mgr->base.ctx);
-
return reg;
}
@@ -115,6 +117,8 @@ static bool dcn401_smu_send_msg_with_param_delay(struct clk_mgr_internal *clk_mg
/* Wait for response register to be ready */
dcn401_smu_wait_for_response_delay(clk_mgr, 10, 200000, &delay1_us);
+ TRACE_SMU_MSG_ENTER(msg_id, param_in, clk_mgr->base.ctx);
+
/* Clear response register */
REG_WRITE(DAL_RESP_REG, 0);
@@ -124,18 +128,71 @@ static bool dcn401_smu_send_msg_with_param_delay(struct clk_mgr_internal *clk_mg
/* Trigger the message transaction by writing the message ID */
REG_WRITE(DAL_MSG_REG, msg_id);
- TRACE_SMU_MSG(msg_id, param_in, clk_mgr->base.ctx);
-
/* Wait for response */
if (dcn401_smu_wait_for_response_delay(clk_mgr, 10, 200000, &delay2_us) == DALSMC_Result_OK) {
if (param_out)
*param_out = REG_READ(DAL_ARG_REG);
*total_delay_us = delay1_us + delay2_us;
+ TRACE_SMU_MSG_EXIT(true, param_out ? *param_out : 0, clk_mgr->base.ctx);
return true;
}
*total_delay_us = delay1_us + 2000000;
+ TRACE_SMU_MSG_EXIT(false, 0, clk_mgr->base.ctx);
+ return false;
+}
+
+bool dcn401_smu_get_smu_version(struct clk_mgr_internal *clk_mgr, unsigned int *version)
+{
+ smu_print("SMU Get SMU version\n");
+
+ if (dcn401_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_GetSmuVersion, 0, version)) {
+
+ smu_print("SMU version: %d\n", *version);
+
+ return true;
+ }
+
+ return false;
+}
+
+/* Message output should match SMU11_DRIVER_IF_VERSION in smu11_driver_if.h */
+bool dcn401_smu_check_driver_if_version(struct clk_mgr_internal *clk_mgr)
+{
+ uint32_t response = 0;
+
+ smu_print("SMU Check driver if version\n");
+
+ if (dcn401_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_GetDriverIfVersion, 0, &response)) {
+
+ smu_print("SMU driver if version: %d\n", response);
+
+ if (response == SMU14_DRIVER_IF_VERSION)
+ return true;
+ }
+
+ return false;
+}
+
+/* Message output should match DALSMC_VERSION in dalsmc.h */
+bool dcn401_smu_check_msg_header_version(struct clk_mgr_internal *clk_mgr)
+{
+ uint32_t response = 0;
+
+ smu_print("SMU Check msg header version\n");
+
+ if (dcn401_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_GetMsgHeaderVersion, 0, &response)) {
+
+ smu_print("SMU msg header version: %d\n", response);
+
+ if (response == DALSMC_VERSION)
+ return true;
+ }
+
return false;
}
@@ -163,6 +220,22 @@ void dcn401_smu_send_cab_for_uclk_message(struct clk_mgr_internal *clk_mgr, unsi
smu_print("Numways for SubVP : %d\n", num_ways);
}
+void dcn401_smu_set_dram_addr_high(struct clk_mgr_internal *clk_mgr, uint32_t addr_high)
+{
+ smu_print("SMU Set DRAM addr high: %d\n", addr_high);
+
+ dcn401_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_SetDalDramAddrHigh, addr_high, NULL);
+}
+
+void dcn401_smu_set_dram_addr_low(struct clk_mgr_internal *clk_mgr, uint32_t addr_low)
+{
+ smu_print("SMU Set DRAM addr low: %d\n", addr_low);
+
+ dcn401_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_SetDalDramAddrLow, addr_low, NULL);
+}
+
void dcn401_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr)
{
smu_print("SMU Transfer WM table DRAM 2 SMU\n");
@@ -348,3 +421,52 @@ unsigned int dcn401_smu_get_num_of_umc_channels(struct clk_mgr_internal *clk_mgr
return response;
}
+
+/*
+ * Frequency in MHz returned in lower 16 bits for valid DPM level
+ *
+ * Call with dpm_level = 0xFF to query features, return value will be:
+ * Bits 7:0 - number of DPM levels
+ * Bit 28 - 1 = auto DPM on
+ * Bit 29 - 1 = sweep DPM on
+ * Bit 30 - 1 = forced DPM on
+ * Bit 31 - 0 = discrete, 1 = fine-grained
+ *
+ * With fine-grained DPM, only min and max frequencies will be reported
+ *
+ * Returns 0 on failure
+ */
+unsigned int dcn401_smu_get_dpm_freq_by_index(struct clk_mgr_internal *clk_mgr, uint32_t clk, uint8_t dpm_level)
+{
+ uint32_t response = 0;
+
+ /* bits 23:16 for clock type, lower 8 bits for DPM level */
+ uint32_t param = (clk << 16) | dpm_level;
+
+ smu_print("SMU Get dpm freq by index: clk = %d, dpm_level = %d\n", clk, dpm_level);
+
+ dcn401_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_GetDpmFreqByIndex, param, &response);
+
+ smu_print("SMU dpm freq: %d MHz\n", response);
+
+ return response;
+}
+
+/* Returns the max DPM frequency in DC mode in MHz, 0 on failure */
+unsigned int dcn401_smu_get_dc_mode_max_dpm_freq(struct clk_mgr_internal *clk_mgr, uint32_t clk)
+{
+ uint32_t response = 0;
+
+ /* bits 23:16 for clock type */
+ uint32_t param = clk << 16;
+
+ smu_print("SMU Get DC mode max DPM freq: clk = %d\n", clk);
+
+ dcn401_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_GetDcModeMaxDpmFreq, param, &response);
+
+ smu_print("SMU DC mode max DMP freq: %d MHz\n", response);
+
+ return response;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.h
index e02eb1294b37..4f5ac603e822 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.h
@@ -7,11 +7,17 @@
#include "os_types.h"
#include "core_types.h"
-#include "dcn32/dcn32_clk_mgr_smu_msg.h"
+struct clk_mgr_internal;
+
+bool dcn401_smu_get_smu_version(struct clk_mgr_internal *clk_mgr, unsigned int *version);
+bool dcn401_smu_check_driver_if_version(struct clk_mgr_internal *clk_mgr);
+bool dcn401_smu_check_msg_header_version(struct clk_mgr_internal *clk_mgr);
void dcn401_smu_send_fclk_pstate_message(struct clk_mgr_internal *clk_mgr, bool support);
void dcn401_smu_send_uclk_pstate_message(struct clk_mgr_internal *clk_mgr, bool support);
void dcn401_smu_send_cab_for_uclk_message(struct clk_mgr_internal *clk_mgr, unsigned int num_ways);
+void dcn401_smu_set_dram_addr_high(struct clk_mgr_internal *clk_mgr, uint32_t addr_high);
+void dcn401_smu_set_dram_addr_low(struct clk_mgr_internal *clk_mgr, uint32_t addr_low);
void dcn401_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr);
void dcn401_smu_set_pme_workaround(struct clk_mgr_internal *clk_mgr);
unsigned int dcn401_smu_set_hard_min_by_freq(struct clk_mgr_internal *clk_mgr, uint32_t clk, uint16_t freq_mhz);
@@ -29,5 +35,7 @@ bool dcn401_smu_set_subvp_uclk_fclk_hardmin(struct clk_mgr_internal *clk_mgr,
void dcn401_smu_set_min_deep_sleep_dcef_clk(struct clk_mgr_internal *clk_mgr, uint32_t freq_mhz);
void dcn401_smu_set_num_of_displays(struct clk_mgr_internal *clk_mgr, uint32_t num_displays);
unsigned int dcn401_smu_get_num_of_umc_channels(struct clk_mgr_internal *clk_mgr);
+unsigned int dcn401_smu_get_dc_mode_max_dpm_freq(struct clk_mgr_internal *clk_mgr, uint32_t clk);
+unsigned int dcn401_smu_get_dpm_freq_by_index(struct clk_mgr_internal *clk_mgr, uint32_t clk, uint8_t dpm_level);
#endif /* __DCN401_CLK_MGR_SMU_MSG_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 56d011a1323c..8be9cbd43e18 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -60,7 +60,7 @@
#include "link_encoder.h"
#include "link_enc_cfg.h"
-#include "link.h"
+#include "link_service.h"
#include "dm_helpers.h"
#include "mem_input.h"
@@ -83,7 +83,8 @@
#include "hw_sequencer_private.h"
#if defined(CONFIG_DRM_AMD_DC_FP)
-#include "dml2/dml2_internal_types.h"
+#include "dml2_0/dml2_internal_types.h"
+#include "soc_and_ip_translator.h"
#endif
#include "dce/dmub_outbox.h"
@@ -147,10 +148,16 @@ static const char DC_BUILD_ID[] = "production-build";
/* Private functions */
-static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new)
+static inline void elevate_update_type(
+ struct surface_update_descriptor *descriptor,
+ enum surface_update_type new_type,
+ enum dc_lock_descriptor new_locks
+)
{
- if (new > *original)
- *original = new;
+ if (new_type > descriptor->update_type)
+ descriptor->update_type = new_type;
+
+ descriptor->lock_descriptor |= new_locks;
}
static void destroy_links(struct dc *dc)
@@ -217,11 +224,24 @@ static bool create_links(
connectors_num,
num_virtual_links);
- // condition loop on link_count to allow skipping invalid indices
+ /* When getting the number of connectors, the VBIOS reports the number of valid indices,
+ * but it doesn't say which indices are valid, and not every index has an actual connector.
+ * So, if we don't find a connector on an index, that is not an error.
+ *
+ * - There is no guarantee that the first N indices will be valid
+ * - VBIOS may report a higher amount of valid indices than there are actual connectors
+ * - Some VBIOS have valid configurations for more connectors than there actually are
+ * on the card. This may be because the manufacturer used the same VBIOS for different
+ * variants of the same card.
+ */
for (i = 0; dc->link_count < connectors_num && i < MAX_LINKS; i++) {
+ struct graphics_object_id connector_id = bios->funcs->get_connector_id(bios, i);
struct link_init_data link_init_params = {0};
struct dc_link *link;
+ if (connector_id.id == CONNECTOR_ID_UNKNOWN)
+ continue;
+
DC_LOG_DC("BIOS object table - printing link object info for connector number: %d, link_index: %d", i, dc->link_count);
link_init_params.ctx = dc->ctx;
@@ -241,6 +261,7 @@ static bool create_links(
DC_LOG_DC("BIOS object table - end");
/* Create a link for each usb4 dpia port */
+ dc->lowest_dpia_link_index = MAX_LINKS;
for (i = 0; i < dc->res_pool->usb4_dpia_count; i++) {
struct link_init_data link_init_params = {0};
struct dc_link *link;
@@ -253,6 +274,9 @@ static bool create_links(
link = dc->link_srv->create_link(&link_init_params);
if (link) {
+ if (dc->lowest_dpia_link_index > dc->link_count)
+ dc->lowest_dpia_link_index = dc->link_count;
+
dc->links[dc->link_count] = link;
link->dc = dc;
++dc->link_count;
@@ -279,6 +303,7 @@ static bool create_links(
link->link_id.id = CONNECTOR_ID_VIRTUAL;
link->link_id.enum_id = ENUM_ID_1;
link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
+ link->replay_settings.config.replay_version = DC_REPLAY_VERSION_UNSUPPORTED;
link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL);
if (!link->link_enc) {
@@ -442,7 +467,9 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
* avoid conflicting with firmware updates.
*/
if (dc->ctx->dce_version > DCE_VERSION_MAX) {
- if (dc->optimized_required || dc->wm_optimized_required) {
+ if (dc->optimized_required &&
+ (stream->adjust.v_total_max != adjust->v_total_max ||
+ stream->adjust.v_total_min != adjust->v_total_min)) {
stream->adjust.timing_adjust_pending = true;
return false;
}
@@ -473,9 +500,14 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
1,
*adjust);
stream->adjust.timing_adjust_pending = false;
+
+ if (dc->hwss.notify_cursor_offload_drr_update)
+ dc->hwss.notify_cursor_offload_drr_update(dc, dc->current_state, stream);
+
return true;
}
}
+
return false;
}
@@ -930,21 +962,24 @@ static void dc_destruct(struct dc *dc)
}
dc_destroy_resource_pool(dc);
-
+#ifdef CONFIG_DRM_AMD_DC_FP
+ dc_destroy_soc_and_ip_translator(&dc->soc_and_ip_translator);
+#endif
if (dc->link_srv)
link_destroy_link_service(&dc->link_srv);
- if (dc->ctx->gpio_service)
- dal_gpio_service_destroy(&dc->ctx->gpio_service);
+ if (dc->ctx) {
+ if (dc->ctx->gpio_service)
+ dal_gpio_service_destroy(&dc->ctx->gpio_service);
- if (dc->ctx->created_bios)
- dal_bios_parser_destroy(&dc->ctx->dc_bios);
+ if (dc->ctx->created_bios)
+ dal_bios_parser_destroy(&dc->ctx->dc_bios);
+ kfree(dc->ctx->logger);
+ dc_perf_trace_destroy(&dc->ctx->perf_trace);
- kfree(dc->ctx->logger);
- dc_perf_trace_destroy(&dc->ctx->perf_trace);
-
- kfree(dc->ctx);
- dc->ctx = NULL;
+ kfree(dc->ctx);
+ dc->ctx = NULL;
+ }
kfree(dc->bw_vbios);
dc->bw_vbios = NULL;
@@ -972,6 +1007,8 @@ static bool dc_construct_ctx(struct dc *dc,
if (!dc_ctx)
return false;
+ dc_stream_init_rmcm_3dlut(dc);
+
dc_ctx->cgs_device = init_params->cgs_device;
dc_ctx->driver_context = init_params->driver;
dc_ctx->dc = dc;
@@ -1118,8 +1155,8 @@ static bool dc_construct(struct dc *dc,
/* set i2c speed if not done by the respective dcnxxx__resource.c */
if (dc->caps.i2c_speed_in_khz_hdcp == 0)
dc->caps.i2c_speed_in_khz_hdcp = dc->caps.i2c_speed_in_khz;
- if (dc->caps.max_optimizable_video_width == 0)
- dc->caps.max_optimizable_video_width = 5120;
+ if (dc->check_config.max_optimizable_video_width == 0)
+ dc->check_config.max_optimizable_video_width = 5120;
dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg);
if (!dc->clk_mgr)
goto fail;
@@ -1131,6 +1168,9 @@ static bool dc_construct(struct dc *dc,
dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
DC_FP_END();
}
+ dc->soc_and_ip_translator = dc_create_soc_and_ip_translator(dc_ctx->dce_version);
+ if (!dc->soc_and_ip_translator)
+ goto fail;
#endif
if (!create_links(dc, init_params->num_virtual_links))
@@ -2107,6 +2147,14 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
if (!dcb->funcs->is_accelerated_mode(dcb)) {
disable_vbios_mode_if_required(dc, context);
dc->hwss.enable_accelerated_mode(dc, context);
+ } else if (get_seamless_boot_stream_count(dc->current_state) > 0) {
+ /* If the previous Stream still retains the apply seamless boot flag,
+ * it means the OS has not actually performed a flip yet.
+ * At this point, if we receive dc_commit_streams again, we should
+ * once more check whether the actual HW timing matches what the OS
+ * has provided
+ */
+ disable_vbios_mode_if_required(dc, context);
}
if (dc->hwseq->funcs.wait_for_pipe_update_if_needed) {
@@ -2130,8 +2178,8 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
*/
if (dc->hwss.subvp_pipe_control_lock)
dc->hwss.subvp_pipe_control_lock(dc, context, true, true, NULL, subvp_prev_use);
- if (dc->hwss.fams2_global_control_lock)
- dc->hwss.fams2_global_control_lock(dc, context, true);
+ if (dc->hwss.dmub_hw_control_lock)
+ dc->hwss.dmub_hw_control_lock(dc, context, true);
if (dc->hwss.update_dsc_pg)
dc->hwss.update_dsc_pg(dc, context, false);
@@ -2160,8 +2208,14 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
}
+ for (i = 0; i < dc->current_state->stream_count; i++)
+ dc_dmub_srv_control_cursor_offload(dc, dc->current_state, dc->current_state->streams[i], false);
+
result = dc->hwss.apply_ctx_to_hw(dc, context);
+ for (i = 0; i < context->stream_count; i++)
+ dc_dmub_srv_control_cursor_offload(dc, context, context->streams[i], true);
+
if (result != DC_OK) {
/* Application of dc_state to hardware stopped. */
dc->current_state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY;
@@ -2201,8 +2255,8 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
dc->hwss.commit_subvp_config(dc, context);
if (dc->hwss.subvp_pipe_control_lock)
dc->hwss.subvp_pipe_control_lock(dc, context, false, true, NULL, subvp_prev_use);
- if (dc->hwss.fams2_global_control_lock)
- dc->hwss.fams2_global_control_lock(dc, context, false);
+ if (dc->hwss.dmub_hw_control_lock)
+ dc->hwss.dmub_hw_control_lock(dc, context, false);
for (i = 0; i < context->stream_count; i++) {
const struct dc_link *link = context->streams[i]->link;
@@ -2377,7 +2431,7 @@ enum dc_status dc_commit_streams(struct dc *dc, struct dc_commit_streams_params
context->power_source = params->power_source;
- res = dc_validate_with_context(dc, set, params->stream_count, context, false);
+ res = dc_validate_with_context(dc, set, params->stream_count, context, DC_VALIDATE_MODE_AND_PROGRAMMING);
/*
* Only update link encoder to stream assignment after bandwidth validation passed.
@@ -2391,6 +2445,18 @@ enum dc_status dc_commit_streams(struct dc *dc, struct dc_commit_streams_params
goto fail;
}
+ /*
+ * If not already seamless, make transition seamless by inserting intermediate minimal transition
+ */
+ if (dc->hwss.is_pipe_topology_transition_seamless &&
+ !dc->hwss.is_pipe_topology_transition_seamless(dc, dc->current_state, context)) {
+ res = commit_minimal_transition_state(dc, context);
+ if (res != DC_OK) {
+ BREAK_TO_DEBUGGER();
+ goto fail;
+ }
+ }
+
res = dc_commit_state_no_check(dc, context);
for (i = 0; i < params->stream_count; i++) {
@@ -2537,7 +2603,6 @@ void dc_post_update_surfaces_to_stream(struct dc *dc)
}
dc->optimized_required = false;
- dc->wm_optimized_required = false;
}
bool dc_set_generic_gpio_for_stereo(bool enable,
@@ -2606,47 +2671,50 @@ static bool is_surface_in_context(
return false;
}
-static enum surface_update_type get_plane_info_update_type(const struct dc *dc, const struct dc_surface_update *u)
+static struct surface_update_descriptor get_plane_info_update_type(const struct dc_surface_update *u)
{
union surface_update_flags *update_flags = &u->surface->update_flags;
- enum surface_update_type update_type = UPDATE_TYPE_FAST;
+ struct surface_update_descriptor update_type = { UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_NONE };
if (!u->plane_info)
- return UPDATE_TYPE_FAST;
+ return update_type;
+
+ // `plane_info` present means at least `STREAM` lock is required
+ elevate_update_type(&update_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
if (u->plane_info->color_space != u->surface->color_space) {
update_flags->bits.color_space_change = 1;
- elevate_update_type(&update_type, UPDATE_TYPE_MED);
+ elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM);
}
if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror) {
update_flags->bits.horizontal_mirror_change = 1;
- elevate_update_type(&update_type, UPDATE_TYPE_MED);
+ elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM);
}
if (u->plane_info->rotation != u->surface->rotation) {
update_flags->bits.rotation_change = 1;
- elevate_update_type(&update_type, UPDATE_TYPE_FULL);
+ elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
}
if (u->plane_info->format != u->surface->format) {
update_flags->bits.pixel_format_change = 1;
- elevate_update_type(&update_type, UPDATE_TYPE_FULL);
+ elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
}
if (u->plane_info->stereo_format != u->surface->stereo_format) {
update_flags->bits.stereo_format_change = 1;
- elevate_update_type(&update_type, UPDATE_TYPE_FULL);
+ elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
}
if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha) {
update_flags->bits.per_pixel_alpha_change = 1;
- elevate_update_type(&update_type, UPDATE_TYPE_MED);
+ elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM);
}
if (u->plane_info->global_alpha_value != u->surface->global_alpha_value) {
update_flags->bits.global_alpha_change = 1;
- elevate_update_type(&update_type, UPDATE_TYPE_MED);
+ elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM);
}
if (u->plane_info->dcc.enable != u->surface->dcc.enable
@@ -2658,7 +2726,7 @@ static enum surface_update_type get_plane_info_update_type(const struct dc *dc,
* recalculate stutter period.
*/
update_flags->bits.dcc_change = 1;
- elevate_update_type(&update_type, UPDATE_TYPE_FULL);
+ elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
}
if (resource_pixel_format_to_bpp(u->plane_info->format) !=
@@ -2667,30 +2735,41 @@ static enum surface_update_type get_plane_info_update_type(const struct dc *dc,
* and DML calculation
*/
update_flags->bits.bpp_change = 1;
- elevate_update_type(&update_type, UPDATE_TYPE_FULL);
+ elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
}
if (u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch
|| u->plane_info->plane_size.chroma_pitch != u->surface->plane_size.chroma_pitch) {
update_flags->bits.plane_size_change = 1;
- elevate_update_type(&update_type, UPDATE_TYPE_MED);
+ elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM);
}
+ const struct dc_tiling_info *tiling = &u->plane_info->tiling_info;
- if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info,
- sizeof(struct dc_tiling_info)) != 0) {
+ if (memcmp(tiling, &u->surface->tiling_info, sizeof(*tiling)) != 0) {
update_flags->bits.swizzle_change = 1;
- elevate_update_type(&update_type, UPDATE_TYPE_MED);
-
- /* todo: below are HW dependent, we should add a hook to
- * DCE/N resource and validated there.
- */
- if (!dc->debug.skip_full_updated_if_possible) {
- /* swizzled mode requires RQ to be setup properly,
- * thus need to run DML to calculate RQ settings
- */
- update_flags->bits.bandwidth_change = 1;
- elevate_update_type(&update_type, UPDATE_TYPE_FULL);
+ elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM);
+
+ switch (tiling->gfxversion) {
+ case DcGfxVersion9:
+ case DcGfxVersion10:
+ case DcGfxVersion11:
+ if (tiling->gfx9.swizzle != DC_SW_LINEAR) {
+ update_flags->bits.bandwidth_change = 1;
+ elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
+ }
+ break;
+ case DcGfxAddr3:
+ if (tiling->gfx_addr3.swizzle != DC_ADDR3_SW_LINEAR) {
+ update_flags->bits.bandwidth_change = 1;
+ elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
+ }
+ break;
+ case DcGfxVersion7:
+ case DcGfxVersion8:
+ case DcGfxVersionUnknown:
+ default:
+ break;
}
}
@@ -2698,14 +2777,18 @@ static enum surface_update_type get_plane_info_update_type(const struct dc *dc,
return update_type;
}
-static enum surface_update_type get_scaling_info_update_type(
- const struct dc *dc,
+static struct surface_update_descriptor get_scaling_info_update_type(
+ const struct dc_check_config *check_config,
const struct dc_surface_update *u)
{
union surface_update_flags *update_flags = &u->surface->update_flags;
+ struct surface_update_descriptor update_type = { UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_NONE };
if (!u->scaling_info)
- return UPDATE_TYPE_FAST;
+ return update_type;
+
+ // `scaling_info` present means at least `STREAM` lock is required
+ elevate_update_type(&update_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
if (u->scaling_info->src_rect.width != u->surface->src_rect.width
|| u->scaling_info->src_rect.height != u->surface->src_rect.height
@@ -2716,6 +2799,7 @@ static enum surface_update_type get_scaling_info_update_type(
|| u->scaling_info->scaling_quality.integer_scaling !=
u->surface->scaling_quality.integer_scaling) {
update_flags->bits.scaling_change = 1;
+ elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
if (u->scaling_info->src_rect.width > u->surface->src_rect.width
|| u->scaling_info->src_rect.height > u->surface->src_rect.height)
@@ -2729,7 +2813,7 @@ static enum surface_update_type get_scaling_info_update_type(
/* Making dst rect smaller requires a bandwidth change */
update_flags->bits.bandwidth_change = 1;
- if (u->scaling_info->src_rect.width > dc->caps.max_optimizable_video_width &&
+ if (u->scaling_info->src_rect.width > check_config->max_optimizable_video_width &&
(u->scaling_info->clip_rect.width > u->surface->clip_rect.width ||
u->scaling_info->clip_rect.height > u->surface->clip_rect.height))
/* Changing clip size of a large surface may result in MPC slice count change */
@@ -2741,123 +2825,109 @@ static enum surface_update_type get_scaling_info_update_type(
|| u->scaling_info->clip_rect.x != u->surface->clip_rect.x
|| u->scaling_info->clip_rect.y != u->surface->clip_rect.y
|| u->scaling_info->dst_rect.x != u->surface->dst_rect.x
- || u->scaling_info->dst_rect.y != u->surface->dst_rect.y)
+ || u->scaling_info->dst_rect.y != u->surface->dst_rect.y) {
+ elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM);
update_flags->bits.position_change = 1;
+ }
- /* process every update flag before returning */
- if (update_flags->bits.clock_change
- || update_flags->bits.bandwidth_change
- || update_flags->bits.scaling_change)
- return UPDATE_TYPE_FULL;
-
- if (update_flags->bits.position_change)
- return UPDATE_TYPE_MED;
-
- return UPDATE_TYPE_FAST;
+ return update_type;
}
-static enum surface_update_type det_surface_update(const struct dc *dc,
- const struct dc_surface_update *u)
+static struct surface_update_descriptor det_surface_update(
+ const struct dc_check_config *check_config,
+ struct dc_surface_update *u)
{
- const struct dc_state *context = dc->current_state;
- enum surface_update_type type;
- enum surface_update_type overall_type = UPDATE_TYPE_FAST;
+ struct surface_update_descriptor overall_type = { UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_NONE };
union surface_update_flags *update_flags = &u->surface->update_flags;
- if (!is_surface_in_context(context, u->surface) || u->surface->force_full_update) {
+ if (u->surface->force_full_update) {
update_flags->raw = 0xFFFFFFFF;
- return UPDATE_TYPE_FULL;
+ elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
+ return overall_type;
}
update_flags->raw = 0; // Reset all flags
- type = get_plane_info_update_type(dc, u);
- elevate_update_type(&overall_type, type);
+ struct surface_update_descriptor inner_type = get_plane_info_update_type(u);
- type = get_scaling_info_update_type(dc, u);
- elevate_update_type(&overall_type, type);
+ elevate_update_type(&overall_type, inner_type.update_type, inner_type.lock_descriptor);
+
+ inner_type = get_scaling_info_update_type(check_config, u);
+ elevate_update_type(&overall_type, inner_type.update_type, inner_type.lock_descriptor);
if (u->flip_addr) {
update_flags->bits.addr_update = 1;
+ elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
+
if (u->flip_addr->address.tmz_surface != u->surface->address.tmz_surface) {
update_flags->bits.tmz_changed = 1;
- elevate_update_type(&overall_type, UPDATE_TYPE_FULL);
+ elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
}
}
- if (u->in_transfer_func)
+ if (u->in_transfer_func) {
update_flags->bits.in_transfer_func_change = 1;
+ elevate_update_type(&overall_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM);
+ }
- if (u->input_csc_color_matrix)
+ if (u->input_csc_color_matrix) {
update_flags->bits.input_csc_change = 1;
+ elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
+ }
- if (u->coeff_reduction_factor)
+ if (u->coeff_reduction_factor) {
update_flags->bits.coeff_reduction_change = 1;
+ elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
+ }
- if (u->gamut_remap_matrix)
+ if (u->gamut_remap_matrix) {
update_flags->bits.gamut_remap_change = 1;
+ elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
+ }
- if (u->blend_tf)
+ if (u->blend_tf || (u->gamma && dce_use_lut(u->plane_info ? u->plane_info->format : u->surface->format))) {
update_flags->bits.gamma_change = 1;
-
- if (u->gamma) {
- enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN;
-
- if (u->plane_info)
- format = u->plane_info->format;
- else
- format = u->surface->format;
-
- if (dce_use_lut(format))
- update_flags->bits.gamma_change = 1;
+ elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
}
- if (u->lut3d_func || u->func_shaper)
+ if (u->lut3d_func || u->func_shaper) {
update_flags->bits.lut_3d = 1;
+ elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
+ }
if (u->hdr_mult.value)
if (u->hdr_mult.value != u->surface->hdr_mult.value) {
+ // TODO: Should be fast?
update_flags->bits.hdr_mult = 1;
- elevate_update_type(&overall_type, UPDATE_TYPE_MED);
+ elevate_update_type(&overall_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM);
}
if (u->sdr_white_level_nits)
if (u->sdr_white_level_nits != u->surface->sdr_white_level_nits) {
+ // TODO: Should be fast?
update_flags->bits.sdr_white_level_nits = 1;
- elevate_update_type(&overall_type, UPDATE_TYPE_FULL);
+ elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
}
if (u->cm2_params) {
- if ((u->cm2_params->component_settings.shaper_3dlut_setting
- != u->surface->mcm_shaper_3dlut_setting)
- || (u->cm2_params->component_settings.lut1d_enable
- != u->surface->mcm_lut1d_enable))
- update_flags->bits.mcm_transfer_function_enable_change = 1;
- if (u->cm2_params->cm2_luts.lut3d_data.lut3d_src
- != u->surface->mcm_luts.lut3d_data.lut3d_src)
+ if (u->cm2_params->component_settings.shaper_3dlut_setting != u->surface->mcm_shaper_3dlut_setting
+ || u->cm2_params->component_settings.lut1d_enable != u->surface->mcm_lut1d_enable
+ || u->cm2_params->cm2_luts.lut3d_data.lut3d_src != u->surface->mcm_luts.lut3d_data.lut3d_src) {
update_flags->bits.mcm_transfer_function_enable_change = 1;
- }
- if (update_flags->bits.in_transfer_func_change) {
- type = UPDATE_TYPE_MED;
- elevate_update_type(&overall_type, type);
+ elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
+ }
}
if (update_flags->bits.lut_3d &&
u->surface->mcm_luts.lut3d_data.lut3d_src != DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM) {
- type = UPDATE_TYPE_FULL;
- elevate_update_type(&overall_type, type);
- }
- if (update_flags->bits.mcm_transfer_function_enable_change) {
- type = UPDATE_TYPE_FULL;
- elevate_update_type(&overall_type, type);
+ elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
}
- if (dc->debug.enable_legacy_fast_update &&
+ if (check_config->enable_legacy_fast_update &&
(update_flags->bits.gamma_change ||
update_flags->bits.gamut_remap_change ||
update_flags->bits.input_csc_change ||
update_flags->bits.coeff_reduction_change)) {
- type = UPDATE_TYPE_FULL;
- elevate_update_type(&overall_type, type);
+ elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
}
return overall_type;
}
@@ -2885,32 +2955,26 @@ static void force_immediate_gsl_plane_flip(struct dc *dc, struct dc_surface_upda
}
}
-static enum surface_update_type check_update_surfaces_for_stream(
- struct dc *dc,
+static struct surface_update_descriptor check_update_surfaces_for_stream(
+ const struct dc_check_config *check_config,
struct dc_surface_update *updates,
int surface_count,
- struct dc_stream_update *stream_update,
- const struct dc_stream_status *stream_status)
+ struct dc_stream_update *stream_update)
{
- int i;
- enum surface_update_type overall_type = UPDATE_TYPE_FAST;
-
- if (dc->idle_optimizations_allowed || dc_can_clear_cursor_limit(dc))
- overall_type = UPDATE_TYPE_FULL;
-
- if (stream_status == NULL || stream_status->plane_count != surface_count)
- overall_type = UPDATE_TYPE_FULL;
+ struct surface_update_descriptor overall_type = { UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_NONE };
if (stream_update && stream_update->pending_test_pattern) {
- overall_type = UPDATE_TYPE_FULL;
+ elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
}
if (stream_update && stream_update->hw_cursor_req) {
- overall_type = UPDATE_TYPE_FULL;
+ elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
}
/* some stream updates require passive update */
if (stream_update) {
+ elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
+
union stream_update_flags *su_flags = &stream_update->stream->update_flags;
if ((stream_update->src.height != 0 && stream_update->src.width != 0) ||
@@ -2918,14 +2982,16 @@ static enum surface_update_type check_update_surfaces_for_stream(
stream_update->integer_scaling_update)
su_flags->bits.scaling = 1;
- if (dc->debug.enable_legacy_fast_update && stream_update->out_transfer_func)
+ if (check_config->enable_legacy_fast_update && stream_update->out_transfer_func)
su_flags->bits.out_tf = 1;
if (stream_update->abm_level)
su_flags->bits.abm_level = 1;
- if (stream_update->dpms_off)
+ if (stream_update->dpms_off) {
su_flags->bits.dpms_off = 1;
+ elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL | LOCK_DESCRIPTOR_LINK);
+ }
if (stream_update->gamut_remap)
su_flags->bits.gamut_remap = 1;
@@ -2953,24 +3019,27 @@ static enum surface_update_type check_update_surfaces_for_stream(
if (stream_update->output_color_space)
su_flags->bits.out_csc = 1;
- if (su_flags->raw != 0)
- overall_type = UPDATE_TYPE_FULL;
+ // TODO: Make each elevation explicit, as to not override fast stream in crct_timing_adjust
+ if (su_flags->raw)
+ elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
- if (stream_update->output_csc_transform)
+ // Non-global cases
+ if (stream_update->output_csc_transform) {
su_flags->bits.out_csc = 1;
+ elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
+ }
- /* Output transfer function changes do not require bandwidth recalculation,
- * so don't trigger a full update
- */
- if (!dc->debug.enable_legacy_fast_update && stream_update->out_transfer_func)
+ if (!check_config->enable_legacy_fast_update && stream_update->out_transfer_func) {
su_flags->bits.out_tf = 1;
+ elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
+ }
}
- for (i = 0 ; i < surface_count; i++) {
- enum surface_update_type type =
- det_surface_update(dc, &updates[i]);
+ for (int i = 0 ; i < surface_count; i++) {
+ struct surface_update_descriptor inner_type =
+ det_surface_update(check_config, &updates[i]);
- elevate_update_type(&overall_type, type);
+ elevate_update_type(&overall_type, inner_type.update_type, inner_type.lock_descriptor);
}
return overall_type;
@@ -2981,46 +3050,18 @@ static enum surface_update_type check_update_surfaces_for_stream(
*
* See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types
*/
-enum surface_update_type dc_check_update_surfaces_for_stream(
- struct dc *dc,
+struct surface_update_descriptor dc_check_update_surfaces_for_stream(
+ const struct dc_check_config *check_config,
struct dc_surface_update *updates,
int surface_count,
- struct dc_stream_update *stream_update,
- const struct dc_stream_status *stream_status)
+ struct dc_stream_update *stream_update)
{
- int i;
- enum surface_update_type type;
-
if (stream_update)
stream_update->stream->update_flags.raw = 0;
- for (i = 0; i < surface_count; i++)
+ for (size_t i = 0; i < surface_count; i++)
updates[i].surface->update_flags.raw = 0;
- type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status);
- if (type == UPDATE_TYPE_FULL) {
- if (stream_update) {
- uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed;
- stream_update->stream->update_flags.raw = 0xFFFFFFFF;
- stream_update->stream->update_flags.bits.dsc_changed = dsc_changed;
- }
- for (i = 0; i < surface_count; i++)
- updates[i].surface->update_flags.raw = 0xFFFFFFFF;
- }
-
- if (type == UPDATE_TYPE_FAST) {
- // If there's an available clock comparator, we use that.
- if (dc->clk_mgr->funcs->are_clock_states_equal) {
- if (!dc->clk_mgr->funcs->are_clock_states_equal(&dc->clk_mgr->clks, &dc->current_state->bw_ctx.bw.dcn.clk))
- dc->optimized_required = true;
- // Else we fallback to mem compare.
- } else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) {
- dc->optimized_required = true;
- }
-
- dc->optimized_required |= dc->wm_optimized_required;
- }
-
- return type;
+ return check_update_surfaces_for_stream(check_config, updates, surface_count, stream_update);
}
static struct dc_stream_status *stream_get_status(
@@ -3273,6 +3314,9 @@ static void copy_stream_update_to_stream(struct dc *dc,
if (update->adaptive_sync_infopacket)
stream->adaptive_sync_infopacket = *update->adaptive_sync_infopacket;
+ if (update->avi_infopacket)
+ stream->avi_infopacket = *update->avi_infopacket;
+
if (update->dither_option)
stream->dither_option = *update->dither_option;
@@ -3300,7 +3344,8 @@ static void copy_stream_update_to_stream(struct dc *dc,
if (dsc_validate_context) {
stream->timing.dsc_cfg = *update->dsc_config;
stream->timing.flags.DSC = enable_dsc;
- if (dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true) != DC_OK) {
+ if (dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context,
+ DC_VALIDATE_MODE_ONLY) != DC_OK) {
stream->timing.dsc_cfg = old_dsc_cfg;
stream->timing.flags.DSC = old_dsc_enabled;
update->dsc_config = NULL;
@@ -3347,7 +3392,11 @@ static void restore_planes_and_stream_state(
for (i = 0; i < status->plane_count; i++) {
dc_plane_copy_config(status->plane_states[i], &scratch->plane_states[i]);
}
+
+ // refcount is persistent
+ struct kref temp_refcount = stream->refcount;
*stream = scratch->stream_state;
+ stream->refcount = temp_refcount;
}
/**
@@ -3369,7 +3418,7 @@ static void update_seamless_boot_flags(struct dc *dc,
int surface_count,
struct dc_stream_state *stream)
{
- if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) {
+ if (get_seamless_boot_stream_count(context) > 0 && (surface_count > 0 || stream->dpms_off)) {
/* Optimize seamless boot flag keeps clocks and watermarks high until
* first flip. After first flip, optimization is required to lower
* bandwidth. Important to note that it is expected UEFI will
@@ -3385,6 +3434,13 @@ static void update_seamless_boot_flags(struct dc *dc,
}
}
+static bool full_update_required_weak(
+ const struct dc *dc,
+ const struct dc_surface_update *srf_updates,
+ int surface_count,
+ const struct dc_stream_update *stream_update,
+ const struct dc_stream_state *stream);
+
/**
* update_planes_and_stream_state() - The function takes planes and stream
* updates as inputs and determines the appropriate update type. If update type
@@ -3431,7 +3487,10 @@ static bool update_planes_and_stream_state(struct dc *dc,
context = dc->current_state;
update_type = dc_check_update_surfaces_for_stream(
- dc, srf_updates, surface_count, stream_update, stream_status);
+ &dc->check_config, srf_updates, surface_count, stream_update).update_type;
+ if (full_update_required_weak(dc, srf_updates, surface_count, stream_update, stream))
+ update_type = UPDATE_TYPE_FULL;
+
/* It is possible to receive a flip for one plane while there are multiple flip_immediate planes in the same stream.
* E.g. Desktop and MPO plane are flip_immediate but only the MPO plane received a flip
* Force the other flip_immediate planes to flip so GSL doesn't wait for a flip that won't come.
@@ -3463,6 +3522,16 @@ static bool update_planes_and_stream_state(struct dc *dc,
}
}
+ if (update_type == UPDATE_TYPE_FULL) {
+ if (stream_update) {
+ uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed;
+ stream_update->stream->update_flags.raw = 0xFFFFFFFF;
+ stream_update->stream->update_flags.bits.dsc_changed = dsc_changed;
+ }
+ for (i = 0; i < surface_count; i++)
+ srf_updates[i].surface->update_flags.raw = 0xFFFFFFFF;
+ }
+
if (update_type >= update_surface_trace_level)
update_surface_trace(dc, srf_updates, surface_count);
@@ -3522,7 +3591,7 @@ static bool update_planes_and_stream_state(struct dc *dc,
}
if (update_type == UPDATE_TYPE_FULL) {
- if (dc->res_pool->funcs->validate_bandwidth(dc, context, false) != DC_OK) {
+ if (dc->res_pool->funcs->validate_bandwidth(dc, context, DC_VALIDATE_MODE_AND_PROGRAMMING) != DC_OK) {
BREAK_TO_DEBUGGER();
goto fail;
}
@@ -3566,7 +3635,8 @@ static void commit_planes_do_stream_update(struct dc *dc,
stream_update->vsp_infopacket ||
stream_update->hfvsif_infopacket ||
stream_update->adaptive_sync_infopacket ||
- stream_update->vtem_infopacket) {
+ stream_update->vtem_infopacket ||
+ stream_update->avi_infopacket) {
resource_build_info_frame(pipe_ctx);
dc->hwss.update_info_frame(pipe_ctx);
@@ -4107,7 +4177,7 @@ static void commit_planes_for_stream(struct dc *dc,
if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
if (top_pipe_to_program &&
top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
- if (should_use_dmub_lock(stream->link)) {
+ if (should_use_dmub_inbox1_lock(dc, stream->link)) {
union dmub_hw_lock_flags hw_locks = { 0 };
struct dmub_hw_lock_inst_flags inst_flags = { 0 };
@@ -4128,22 +4198,22 @@ static void commit_planes_for_stream(struct dc *dc,
}
if (dc->hwseq->funcs.wait_for_pipe_update_if_needed)
- dc->hwseq->funcs.wait_for_pipe_update_if_needed(dc, top_pipe_to_program, update_type == UPDATE_TYPE_FAST);
+ dc->hwseq->funcs.wait_for_pipe_update_if_needed(dc, top_pipe_to_program, update_type < UPDATE_TYPE_FULL);
if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
if (dc->hwss.subvp_pipe_control_lock)
dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, NULL, subvp_prev_use);
- if (dc->hwss.fams2_global_control_lock)
- dc->hwss.fams2_global_control_lock(dc, context, true);
+ if (dc->hwss.dmub_hw_control_lock)
+ dc->hwss.dmub_hw_control_lock(dc, context, true);
dc->hwss.interdependent_update_lock(dc, context, true);
} else {
if (dc->hwss.subvp_pipe_control_lock)
dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use);
- if (dc->hwss.fams2_global_control_lock)
- dc->hwss.fams2_global_control_lock(dc, context, true);
+ if (dc->hwss.dmub_hw_control_lock)
+ dc->hwss.dmub_hw_control_lock(dc, context, true);
/* Lock the top pipe while updating plane addrs, since freesync requires
* plane addr update event triggers to be synchronized.
@@ -4186,9 +4256,8 @@ static void commit_planes_for_stream(struct dc *dc,
dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes,
NULL, subvp_prev_use);
- if (dc->hwss.fams2_global_control_lock)
- dc->hwss.fams2_global_control_lock(dc, context, false);
-
+ if (dc->hwss.dmub_hw_control_lock)
+ dc->hwss.dmub_hw_control_lock(dc, context, false);
return;
}
@@ -4377,7 +4446,7 @@ static void commit_planes_for_stream(struct dc *dc,
top_pipe_to_program->stream_res.tg,
CRTC_STATE_VACTIVE);
- if (should_use_dmub_lock(stream->link)) {
+ if (should_use_dmub_inbox1_lock(dc, stream->link)) {
union dmub_hw_lock_flags hw_locks = { 0 };
struct dmub_hw_lock_inst_flags inst_flags = { 0 };
@@ -4425,13 +4494,13 @@ static void commit_planes_for_stream(struct dc *dc,
if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
if (dc->hwss.subvp_pipe_control_lock)
dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
- if (dc->hwss.fams2_global_control_lock)
- dc->hwss.fams2_global_control_lock(dc, context, false);
+ if (dc->hwss.dmub_hw_control_lock)
+ dc->hwss.dmub_hw_control_lock(dc, context, false);
} else {
if (dc->hwss.subvp_pipe_control_lock)
dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use);
- if (dc->hwss.fams2_global_control_lock)
- dc->hwss.fams2_global_control_lock(dc, context, false);
+ if (dc->hwss.dmub_hw_control_lock)
+ dc->hwss.dmub_hw_control_lock(dc, context, false);
}
// Fire manual trigger only when bottom plane is flipped
@@ -4447,6 +4516,8 @@ static void commit_planes_for_stream(struct dc *dc,
pipe_ctx->plane_state->skip_manual_trigger)
continue;
+ if (dc->hwss.program_cursor_offload_now)
+ dc->hwss.program_cursor_offload_now(dc, pipe_ctx);
if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger)
pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg);
}
@@ -4628,7 +4699,8 @@ static struct dc_state *create_minimal_transition_state(struct dc *dc,
backup_and_set_minimal_pipe_split_policy(dc, base_context, policy);
/* commit minimal state */
- if (dc->res_pool->funcs->validate_bandwidth(dc, minimal_transition_context, false) == DC_OK) {
+ if (dc->res_pool->funcs->validate_bandwidth(dc, minimal_transition_context,
+ DC_VALIDATE_MODE_AND_PROGRAMMING) == DC_OK) {
/* prevent underflow and corruption when reconfiguring pipes */
force_vsync_flip_in_minimal_transition_context(minimal_transition_context);
} else {
@@ -4951,7 +5023,7 @@ void populate_fast_updates(struct dc_fast_update *fast_update,
}
}
-static bool fast_updates_exist(struct dc_fast_update *fast_update, int surface_count)
+static bool fast_updates_exist(const struct dc_fast_update *fast_update, int surface_count)
{
int i;
@@ -4992,18 +5064,44 @@ bool fast_nonaddr_updates_exist(struct dc_fast_update *fast_update, int surface_
return false;
}
-static bool full_update_required(struct dc *dc,
- struct dc_surface_update *srf_updates,
+static bool full_update_required_weak(
+ const struct dc *dc,
+ const struct dc_surface_update *srf_updates,
int surface_count,
- struct dc_stream_update *stream_update,
- struct dc_stream_state *stream)
+ const struct dc_stream_update *stream_update,
+ const struct dc_stream_state *stream)
{
-
- int i;
- struct dc_stream_status *stream_status;
const struct dc_state *context = dc->current_state;
+ if (srf_updates)
+ for (int i = 0; i < surface_count; i++)
+ if (!is_surface_in_context(context, srf_updates[i].surface))
+ return true;
- for (i = 0; i < surface_count; i++) {
+ if (stream) {
+ const struct dc_stream_status *stream_status = dc_stream_get_status_const(stream);
+ if (stream_status == NULL || stream_status->plane_count != surface_count)
+ return true;
+ }
+ if (dc->idle_optimizations_allowed)
+ return true;
+
+ if (dc_can_clear_cursor_limit(dc))
+ return true;
+
+ return false;
+}
+
+static bool full_update_required(
+ const struct dc *dc,
+ const struct dc_surface_update *srf_updates,
+ int surface_count,
+ const struct dc_stream_update *stream_update,
+ const struct dc_stream_state *stream)
+{
+ if (full_update_required_weak(dc, srf_updates, surface_count, stream_update, stream))
+ return true;
+
+ for (int i = 0; i < surface_count; i++) {
if (srf_updates &&
(srf_updates[i].plane_info ||
srf_updates[i].scaling_info ||
@@ -5019,8 +5117,7 @@ static bool full_update_required(struct dc *dc,
srf_updates[i].flip_addr->address.tmz_surface != srf_updates[i].surface->address.tmz_surface) ||
(srf_updates[i].cm2_params &&
(srf_updates[i].cm2_params->component_settings.shaper_3dlut_setting != srf_updates[i].surface->mcm_shaper_3dlut_setting ||
- srf_updates[i].cm2_params->component_settings.lut1d_enable != srf_updates[i].surface->mcm_lut1d_enable)) ||
- !is_surface_in_context(context, srf_updates[i].surface)))
+ srf_updates[i].cm2_params->component_settings.lut1d_enable != srf_updates[i].surface->mcm_lut1d_enable))))
return true;
}
@@ -5037,6 +5134,7 @@ static bool full_update_required(struct dc *dc,
stream_update->hfvsif_infopacket ||
stream_update->vtem_infopacket ||
stream_update->adaptive_sync_infopacket ||
+ stream_update->avi_infopacket ||
stream_update->dpms_off ||
stream_update->allow_freesync ||
stream_update->vrr_active_variable ||
@@ -5055,154 +5153,21 @@ static bool full_update_required(struct dc *dc,
stream_update->hw_cursor_req))
return true;
- if (stream) {
- stream_status = dc_stream_get_status(stream);
- if (stream_status == NULL || stream_status->plane_count != surface_count)
- return true;
- }
- if (dc->idle_optimizations_allowed)
- return true;
-
- if (dc_can_clear_cursor_limit(dc))
- return true;
-
return false;
}
-static bool fast_update_only(struct dc *dc,
- struct dc_fast_update *fast_update,
- struct dc_surface_update *srf_updates,
+static bool fast_update_only(
+ const struct dc *dc,
+ const struct dc_fast_update *fast_update,
+ const struct dc_surface_update *srf_updates,
int surface_count,
- struct dc_stream_update *stream_update,
- struct dc_stream_state *stream)
+ const struct dc_stream_update *stream_update,
+ const struct dc_stream_state *stream)
{
return fast_updates_exist(fast_update, surface_count)
&& !full_update_required(dc, srf_updates, surface_count, stream_update, stream);
}
-static bool update_planes_and_stream_v1(struct dc *dc,
- struct dc_surface_update *srf_updates, int surface_count,
- struct dc_stream_state *stream,
- struct dc_stream_update *stream_update,
- struct dc_state *state)
-{
- const struct dc_stream_status *stream_status;
- enum surface_update_type update_type;
- struct dc_state *context;
- struct dc_context *dc_ctx = dc->ctx;
- int i, j;
- struct dc_fast_update fast_update[MAX_SURFACES] = {0};
-
- dc_exit_ips_for_hw_access(dc);
-
- populate_fast_updates(fast_update, srf_updates, surface_count, stream_update);
- stream_status = dc_stream_get_status(stream);
- context = dc->current_state;
-
- update_type = dc_check_update_surfaces_for_stream(
- dc, srf_updates, surface_count, stream_update, stream_status);
- /* It is possible to receive a flip for one plane while there are multiple flip_immediate planes in the same stream.
- * E.g. Desktop and MPO plane are flip_immediate but only the MPO plane received a flip
- * Force the other flip_immediate planes to flip so GSL doesn't wait for a flip that won't come.
- */
- force_immediate_gsl_plane_flip(dc, srf_updates, surface_count);
-
- if (update_type >= UPDATE_TYPE_FULL) {
-
- /* initialize scratch memory for building context */
- context = dc_state_create_copy(state);
- if (context == NULL) {
- DC_ERROR("Failed to allocate new validate context!\n");
- return false;
- }
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
- struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
-
- if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
- new_pipe->plane_state->force_full_update = true;
- }
- } else if (update_type == UPDATE_TYPE_FAST) {
- /*
- * Previous frame finished and HW is ready for optimization.
- */
- dc_post_update_surfaces_to_stream(dc);
- }
-
- for (i = 0; i < surface_count; i++) {
- struct dc_plane_state *surface = srf_updates[i].surface;
-
- copy_surface_update_to_plane(surface, &srf_updates[i]);
-
- if (update_type >= UPDATE_TYPE_MED) {
- for (j = 0; j < dc->res_pool->pipe_count; j++) {
- struct pipe_ctx *pipe_ctx =
- &context->res_ctx.pipe_ctx[j];
-
- if (pipe_ctx->plane_state != surface)
- continue;
-
- resource_build_scaling_params(pipe_ctx);
- }
- }
- }
-
- copy_stream_update_to_stream(dc, context, stream, stream_update);
-
- if (update_type >= UPDATE_TYPE_FULL) {
- if (dc->res_pool->funcs->validate_bandwidth(dc, context, false) != DC_OK) {
- DC_ERROR("Mode validation failed for stream update!\n");
- dc_state_release(context);
- return false;
- }
- }
-
- TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
-
- if (fast_update_only(dc, fast_update, srf_updates, surface_count, stream_update, stream) &&
- !dc->debug.enable_legacy_fast_update) {
- commit_planes_for_stream_fast(dc,
- srf_updates,
- surface_count,
- stream,
- stream_update,
- update_type,
- context);
- } else {
- commit_planes_for_stream(
- dc,
- srf_updates,
- surface_count,
- stream,
- stream_update,
- update_type,
- context);
- }
- /*update current_State*/
- if (dc->current_state != context) {
-
- struct dc_state *old = dc->current_state;
-
- dc->current_state = context;
- dc_state_release(old);
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
-
- if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
- pipe_ctx->plane_state->force_full_update = false;
- }
- }
-
- /* Legacy optimization path for DCE. */
- if (update_type >= UPDATE_TYPE_FULL && dc_ctx->dce_version < DCE_VERSION_MAX) {
- dc_post_update_surfaces_to_stream(dc);
- TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
- }
- return true;
-}
-
static bool update_planes_and_stream_v2(struct dc *dc,
struct dc_surface_update *srf_updates, int surface_count,
struct dc_stream_state *stream,
@@ -5260,7 +5225,7 @@ static bool update_planes_and_stream_v2(struct dc *dc,
commit_minimal_transition_state_in_dc_update(dc, context, stream,
srf_updates, surface_count);
- if (is_fast_update_only && !dc->debug.enable_legacy_fast_update) {
+ if (is_fast_update_only && !dc->check_config.enable_legacy_fast_update) {
commit_planes_for_stream_fast(dc,
srf_updates,
surface_count,
@@ -5303,7 +5268,7 @@ static void commit_planes_and_stream_update_on_current_context(struct dc *dc,
stream_update);
if (fast_update_only(dc, fast_update, srf_updates, surface_count,
stream_update, stream) &&
- !dc->debug.enable_legacy_fast_update)
+ !dc->check_config.enable_legacy_fast_update)
commit_planes_for_stream_fast(dc,
srf_updates,
surface_count,
@@ -5429,14 +5394,15 @@ bool dc_update_planes_and_stream(struct dc *dc,
* specially handle compatibility problems with transitions among those
* features as they are now transparent to the new sequence.
*/
- if (dc->ctx->dce_version >= DCN_VERSION_4_01)
+ if (dc->ctx->dce_version >= DCN_VERSION_4_01 || dc->ctx->dce_version == DCN_VERSION_3_2 ||
+ dc->ctx->dce_version == DCN_VERSION_3_21)
ret = update_planes_and_stream_v3(dc, srf_updates,
surface_count, stream, stream_update);
else
ret = update_planes_and_stream_v2(dc, srf_updates,
surface_count, stream, stream_update);
-
- if (ret)
+ if (ret && (dc->ctx->dce_version >= DCN_VERSION_3_2 ||
+ dc->ctx->dce_version == DCN_VERSION_3_01))
clear_update_flags(srf_updates, surface_count, stream);
return ret;
@@ -5460,14 +5426,12 @@ void dc_commit_updates_for_stream(struct dc *dc,
if (dc->ctx->dce_version >= DCN_VERSION_4_01) {
ret = update_planes_and_stream_v3(dc, srf_updates, surface_count,
stream, stream_update);
- } else if (dc->ctx->dce_version >= DCN_VERSION_3_2) {
+ } else {
ret = update_planes_and_stream_v2(dc, srf_updates, surface_count,
stream, stream_update);
- } else
- ret = update_planes_and_stream_v1(dc, srf_updates, surface_count, stream,
- stream_update, state);
+ }
- if (ret)
+ if (ret && dc->ctx->dce_version >= DCN_VERSION_3_2)
clear_update_flags(srf_updates, surface_count, stream);
}
@@ -5540,6 +5504,15 @@ void dc_set_power_state(struct dc *dc, enum dc_acpi_cm_power_state power_state)
dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config);
}
break;
+ case DC_ACPI_CM_POWER_STATE_D3:
+ if (dc->caps.ips_support)
+ dc_dmub_srv_notify_fw_dc_power_state(dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D3);
+
+ if (dc->caps.ips_v2_support) {
+ if (dc->clk_mgr->funcs->set_low_power_state)
+ dc->clk_mgr->funcs->set_low_power_state(dc->clk_mgr);
+ }
+ break;
default:
ASSERT(dc->current_state->stream_count == 0);
dc_dmub_srv_notify_fw_dc_power_state(dc->ctx->dmub_srv, power_state);
@@ -5696,8 +5669,8 @@ void dc_allow_idle_optimizations_internal(struct dc *dc, bool allow, char const
subvp_pipe_type[i] = dc_state_get_pipe_subvp_type(context, pipe);
}
}
-
- DC_LOG_DC("%s: allow_idle=%d\n HardMinUClk_Khz=%d HardMinDramclk_Khz=%d\n Pipe_0=%d Pipe_1=%d Pipe_2=%d Pipe_3=%d Pipe_4=%d Pipe_5=%d (caller=%s)\n",
+ if (!dc->caps.is_apu)
+ DC_LOG_DC("%s: allow_idle=%d\n HardMinUClk_Khz=%d HardMinDramclk_Khz=%d\n Pipe_0=%d Pipe_1=%d Pipe_2=%d Pipe_3=%d Pipe_4=%d Pipe_5=%d (caller=%s)\n",
__func__, allow, idle_fclk_khz, idle_dramclk_khz, subvp_pipe_type[0], subvp_pipe_type[1], subvp_pipe_type[2],
subvp_pipe_type[3], subvp_pipe_type[4], subvp_pipe_type[5], caller_name);
@@ -6007,6 +5980,101 @@ bool dc_process_dmub_aux_transfer_async(struct dc *dc,
return true;
}
+bool dc_smart_power_oled_enable(const struct dc_link *link, bool enable, uint16_t peak_nits,
+ uint8_t debug_control, uint16_t fixed_CLL, uint32_t triggerline)
+{
+ bool status = false;
+ struct dc *dc = link->ctx->dc;
+ union dmub_rb_cmd cmd;
+ uint8_t otg_inst = 0;
+ unsigned int panel_inst = 0;
+ struct pipe_ctx *pipe_ctx = NULL;
+ struct resource_context *res_ctx = &link->ctx->dc->current_state->res_ctx;
+ int i = 0;
+
+ // get panel_inst
+ if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst))
+ return status;
+
+ // get otg_inst
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (res_ctx &&
+ res_ctx->pipe_ctx[i].stream &&
+ res_ctx->pipe_ctx[i].stream->link &&
+ res_ctx->pipe_ctx[i].stream->link == link &&
+ res_ctx->pipe_ctx[i].stream->link->connector_signal == SIGNAL_TYPE_EDP) {
+ pipe_ctx = &res_ctx->pipe_ctx[i];
+ //TODO: refactor for multi edp support
+ break;
+ }
+ }
+
+ if (pipe_ctx)
+ otg_inst = pipe_ctx->stream_res.tg->inst;
+
+ // before enable smart power OLED, we need to call set pipe for DMUB to set ABM config
+ if (enable) {
+ if (dc->hwss.set_pipe && pipe_ctx)
+ dc->hwss.set_pipe(pipe_ctx);
+ }
+
+ // fill in cmd
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.smart_power_oled_enable.header.type = DMUB_CMD__SMART_POWER_OLED;
+ cmd.smart_power_oled_enable.header.sub_type = DMUB_CMD__SMART_POWER_OLED_ENABLE;
+ cmd.smart_power_oled_enable.header.payload_bytes =
+ sizeof(struct dmub_rb_cmd_smart_power_oled_enable_data) - sizeof(struct dmub_cmd_header);
+ cmd.smart_power_oled_enable.header.ret_status = 1;
+ cmd.smart_power_oled_enable.data.enable = enable;
+ cmd.smart_power_oled_enable.data.panel_inst = panel_inst;
+ cmd.smart_power_oled_enable.data.peak_nits = peak_nits;
+ cmd.smart_power_oled_enable.data.otg_inst = otg_inst;
+ cmd.smart_power_oled_enable.data.digfe_inst = link->link_enc->preferred_engine;
+ cmd.smart_power_oled_enable.data.digbe_inst = link->link_enc->transmitter;
+
+ cmd.smart_power_oled_enable.data.debugcontrol = debug_control;
+ cmd.smart_power_oled_enable.data.triggerline = triggerline;
+ cmd.smart_power_oled_enable.data.fixed_max_cll = fixed_CLL;
+
+ // send cmd
+ status = dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+
+ return status;
+}
+
+bool dc_smart_power_oled_get_max_cll(const struct dc_link *link, unsigned int *pCurrent_MaxCLL)
+{
+ struct dc *dc = link->ctx->dc;
+ union dmub_rb_cmd cmd;
+ bool status = false;
+ unsigned int panel_inst = 0;
+
+ // get panel_inst
+ if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst))
+ return status;
+
+ // fill in cmd
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.smart_power_oled_getmaxcll.header.type = DMUB_CMD__SMART_POWER_OLED;
+ cmd.smart_power_oled_getmaxcll.header.sub_type = DMUB_CMD__SMART_POWER_OLED_GETMAXCLL;
+ cmd.smart_power_oled_getmaxcll.header.payload_bytes = sizeof(cmd.smart_power_oled_getmaxcll.data);
+ cmd.smart_power_oled_getmaxcll.header.ret_status = 1;
+
+ cmd.smart_power_oled_getmaxcll.data.input.panel_inst = panel_inst;
+
+ // send cmd and wait for reply
+ status = dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY);
+
+ if (status)
+ *pCurrent_MaxCLL = cmd.smart_power_oled_getmaxcll.data.output.current_max_cll;
+ else
+ *pCurrent_MaxCLL = 0;
+
+ return status;
+}
+
uint8_t get_link_index_from_dpia_port_index(const struct dc *dc,
uint8_t dpia_port_index)
{
@@ -6337,13 +6405,14 @@ void dc_set_edp_power(const struct dc *dc, struct dc_link *edp_link,
edp_link->dc->link_srv->edp_set_panel_power(edp_link, powerOn);
}
-/*
- *****************************************************************************
+/**
* dc_get_power_profile_for_dc_state() - extracts power profile from dc state
*
* Called when DM wants to make power policy decisions based on dc_state
*
- *****************************************************************************
+ * @context: Pointer to the dc_state from which the power profile is extracted.
+ *
+ * Return: The power profile structure containing the power level information.
*/
struct dc_power_profile dc_get_power_profile_for_dc_state(const struct dc_state *context)
{
@@ -6359,13 +6428,14 @@ struct dc_power_profile dc_get_power_profile_for_dc_state(const struct dc_state
return profile;
}
-/*
- **********************************************************************************
+/**
* dc_get_det_buffer_size_from_state() - extracts detile buffer size from dc state
*
- * Called when DM wants to log detile buffer size from dc_state
+ * This function is called to log the detile buffer size from the dc_state.
+ *
+ * @context: a pointer to the dc_state from which the detile buffer size is extracted.
*
- **********************************************************************************
+ * Return: the size of the detile buffer, or 0 if not available.
*/
unsigned int dc_get_det_buffer_size_from_state(const struct dc_state *context)
{
@@ -6377,6 +6447,36 @@ unsigned int dc_get_det_buffer_size_from_state(const struct dc_state *context)
return 0;
}
+/**
+ * dc_get_host_router_index: Get index of host router from a dpia link
+ *
+ * This function return a host router index of the target link. If the target link is dpia link.
+ *
+ * @link: Pointer to the target link (input)
+ * @host_router_index: Pointer to store the host router index of the target link (output).
+ *
+ * Return: true if the host router index is found and valid.
+ *
+ */
+bool dc_get_host_router_index(const struct dc_link *link, unsigned int *host_router_index)
+{
+ struct dc *dc;
+
+ if (!link || !host_router_index || link->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
+ return false;
+
+ dc = link->ctx->dc;
+
+ if (link->link_index < dc->lowest_dpia_link_index)
+ return false;
+
+ *host_router_index = (link->link_index - dc->lowest_dpia_link_index) / dc->caps.num_of_dpias_per_host_router;
+ if (*host_router_index < dc->caps.num_of_host_routers)
+ return true;
+ else
+ return false;
+}
+
bool dc_is_cursor_limit_pending(struct dc *dc)
{
uint32_t i;
@@ -6389,7 +6489,7 @@ bool dc_is_cursor_limit_pending(struct dc *dc)
return false;
}
-bool dc_can_clear_cursor_limit(struct dc *dc)
+bool dc_can_clear_cursor_limit(const struct dc *dc)
{
uint32_t i;
@@ -6400,3 +6500,594 @@ bool dc_can_clear_cursor_limit(struct dc *dc)
return false;
}
+
+void dc_get_underflow_debug_data_for_otg(struct dc *dc, int primary_otg_inst,
+ struct dc_underflow_debug_data *out_data)
+{
+ struct timing_generator *tg = NULL;
+
+ for (int i = 0; i < MAX_PIPES; i++) {
+ if (dc->res_pool->timing_generators[i] &&
+ dc->res_pool->timing_generators[i]->inst == primary_otg_inst) {
+ tg = dc->res_pool->timing_generators[i];
+ break;
+ }
+ }
+
+ dc_exit_ips_for_hw_access(dc);
+ if (dc->hwss.get_underflow_debug_data)
+ dc->hwss.get_underflow_debug_data(dc, tg, out_data);
+}
+
+void dc_get_power_feature_status(struct dc *dc, int primary_otg_inst,
+ struct power_features *out_data)
+{
+ out_data->uclk_p_state = dc->current_state->clk_mgr->clks.p_state_change_support;
+ out_data->fams = dc->current_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching;
+}
+
+bool dc_capture_register_software_state(struct dc *dc, struct dc_register_software_state *state)
+{
+ struct dc_state *context;
+ struct resource_context *res_ctx;
+ int i;
+
+ if (!dc || !dc->current_state || !state) {
+ if (state)
+ state->state_valid = false;
+ return false;
+ }
+
+ /* Initialize the state structure */
+ memset(state, 0, sizeof(struct dc_register_software_state));
+
+ context = dc->current_state;
+ res_ctx = &context->res_ctx;
+
+ /* Count active pipes and streams */
+ state->active_pipe_count = 0;
+ state->active_stream_count = context->stream_count;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (res_ctx->pipe_ctx[i].stream)
+ state->active_pipe_count++;
+ }
+
+ /* Capture HUBP programming state for each pipe */
+ for (i = 0; i < MAX_PIPES && i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
+
+ state->hubp[i].valid_stream = false;
+ if (!pipe_ctx->stream)
+ continue;
+
+ state->hubp[i].valid_stream = true;
+
+ /* HUBP register programming variables */
+ if (pipe_ctx->stream_res.tg)
+ state->hubp[i].vtg_sel = pipe_ctx->stream_res.tg->inst;
+
+ state->hubp[i].hubp_clock_enable = (pipe_ctx->plane_res.hubp != NULL) ? 1 : 0;
+
+ state->hubp[i].valid_plane_state = false;
+ if (pipe_ctx->plane_state) {
+ state->hubp[i].valid_plane_state = true;
+ state->hubp[i].surface_pixel_format = pipe_ctx->plane_state->format;
+ state->hubp[i].rotation_angle = pipe_ctx->plane_state->rotation;
+ state->hubp[i].h_mirror_en = pipe_ctx->plane_state->horizontal_mirror ? 1 : 0;
+
+ /* Surface size */
+ if (pipe_ctx->plane_state->plane_size.surface_size.width > 0) {
+ state->hubp[i].surface_size_width = pipe_ctx->plane_state->plane_size.surface_size.width;
+ state->hubp[i].surface_size_height = pipe_ctx->plane_state->plane_size.surface_size.height;
+ }
+
+ /* Viewport dimensions from scaler data */
+ if (pipe_ctx->plane_state->src_rect.width > 0) {
+ state->hubp[i].pri_viewport_width = pipe_ctx->plane_state->src_rect.width;
+ state->hubp[i].pri_viewport_height = pipe_ctx->plane_state->src_rect.height;
+ state->hubp[i].pri_viewport_x_start = pipe_ctx->plane_state->src_rect.x;
+ state->hubp[i].pri_viewport_y_start = pipe_ctx->plane_state->src_rect.y;
+ }
+
+ /* DCC settings */
+ state->hubp[i].surface_dcc_en = (pipe_ctx->plane_state->dcc.enable) ? 1 : 0;
+ state->hubp[i].surface_dcc_ind_64b_blk = pipe_ctx->plane_state->dcc.independent_64b_blks;
+ state->hubp[i].surface_dcc_ind_128b_blk = pipe_ctx->plane_state->dcc.dcc_ind_blk;
+
+ /* Surface pitch */
+ state->hubp[i].surface_pitch = pipe_ctx->plane_state->plane_size.surface_pitch;
+ state->hubp[i].meta_pitch = pipe_ctx->plane_state->dcc.meta_pitch;
+ state->hubp[i].chroma_pitch = pipe_ctx->plane_state->plane_size.chroma_pitch;
+ state->hubp[i].meta_pitch_c = pipe_ctx->plane_state->dcc.meta_pitch_c;
+
+ /* Surface addresses - primary */
+ state->hubp[i].primary_surface_address_low = pipe_ctx->plane_state->address.grph.addr.low_part;
+ state->hubp[i].primary_surface_address_high = pipe_ctx->plane_state->address.grph.addr.high_part;
+ state->hubp[i].primary_meta_surface_address_low = pipe_ctx->plane_state->address.grph.meta_addr.low_part;
+ state->hubp[i].primary_meta_surface_address_high = pipe_ctx->plane_state->address.grph.meta_addr.high_part;
+
+ /* TMZ settings */
+ state->hubp[i].primary_surface_tmz = pipe_ctx->plane_state->address.tmz_surface;
+ state->hubp[i].primary_meta_surface_tmz = pipe_ctx->plane_state->address.tmz_surface;
+
+ /* Tiling configuration */
+ state->hubp[i].min_dc_gfx_version9 = false;
+ if (pipe_ctx->plane_state->tiling_info.gfxversion >= DcGfxVersion9) {
+ state->hubp[i].min_dc_gfx_version9 = true;
+ state->hubp[i].sw_mode = pipe_ctx->plane_state->tiling_info.gfx9.swizzle;
+ state->hubp[i].num_pipes = pipe_ctx->plane_state->tiling_info.gfx9.num_pipes;
+ state->hubp[i].num_banks = pipe_ctx->plane_state->tiling_info.gfx9.num_banks;
+ state->hubp[i].pipe_interleave = pipe_ctx->plane_state->tiling_info.gfx9.pipe_interleave;
+ state->hubp[i].num_shader_engines = pipe_ctx->plane_state->tiling_info.gfx9.num_shader_engines;
+ state->hubp[i].num_rb_per_se = pipe_ctx->plane_state->tiling_info.gfx9.num_rb_per_se;
+ state->hubp[i].num_pkrs = pipe_ctx->plane_state->tiling_info.gfx9.num_pkrs;
+ }
+ }
+
+ /* DML Request Size Configuration */
+ if (pipe_ctx->rq_regs.rq_regs_l.chunk_size > 0) {
+ state->hubp[i].rq_chunk_size = pipe_ctx->rq_regs.rq_regs_l.chunk_size;
+ state->hubp[i].rq_min_chunk_size = pipe_ctx->rq_regs.rq_regs_l.min_chunk_size;
+ state->hubp[i].rq_meta_chunk_size = pipe_ctx->rq_regs.rq_regs_l.meta_chunk_size;
+ state->hubp[i].rq_min_meta_chunk_size = pipe_ctx->rq_regs.rq_regs_l.min_meta_chunk_size;
+ state->hubp[i].rq_dpte_group_size = pipe_ctx->rq_regs.rq_regs_l.dpte_group_size;
+ state->hubp[i].rq_mpte_group_size = pipe_ctx->rq_regs.rq_regs_l.mpte_group_size;
+ state->hubp[i].rq_swath_height_l = pipe_ctx->rq_regs.rq_regs_l.swath_height;
+ state->hubp[i].rq_pte_row_height_l = pipe_ctx->rq_regs.rq_regs_l.pte_row_height_linear;
+ }
+
+ /* Chroma request size configuration */
+ if (pipe_ctx->rq_regs.rq_regs_c.chunk_size > 0) {
+ state->hubp[i].rq_chunk_size_c = pipe_ctx->rq_regs.rq_regs_c.chunk_size;
+ state->hubp[i].rq_min_chunk_size_c = pipe_ctx->rq_regs.rq_regs_c.min_chunk_size;
+ state->hubp[i].rq_meta_chunk_size_c = pipe_ctx->rq_regs.rq_regs_c.meta_chunk_size;
+ state->hubp[i].rq_min_meta_chunk_size_c = pipe_ctx->rq_regs.rq_regs_c.min_meta_chunk_size;
+ state->hubp[i].rq_dpte_group_size_c = pipe_ctx->rq_regs.rq_regs_c.dpte_group_size;
+ state->hubp[i].rq_mpte_group_size_c = pipe_ctx->rq_regs.rq_regs_c.mpte_group_size;
+ state->hubp[i].rq_swath_height_c = pipe_ctx->rq_regs.rq_regs_c.swath_height;
+ state->hubp[i].rq_pte_row_height_c = pipe_ctx->rq_regs.rq_regs_c.pte_row_height_linear;
+ }
+
+ /* DML expansion modes */
+ state->hubp[i].drq_expansion_mode = pipe_ctx->rq_regs.drq_expansion_mode;
+ state->hubp[i].prq_expansion_mode = pipe_ctx->rq_regs.prq_expansion_mode;
+ state->hubp[i].mrq_expansion_mode = pipe_ctx->rq_regs.mrq_expansion_mode;
+ state->hubp[i].crq_expansion_mode = pipe_ctx->rq_regs.crq_expansion_mode;
+
+ /* DML DLG parameters - nominal */
+ state->hubp[i].dst_y_per_vm_vblank = pipe_ctx->dlg_regs.dst_y_per_vm_vblank;
+ state->hubp[i].dst_y_per_row_vblank = pipe_ctx->dlg_regs.dst_y_per_row_vblank;
+ state->hubp[i].dst_y_per_vm_flip = pipe_ctx->dlg_regs.dst_y_per_vm_flip;
+ state->hubp[i].dst_y_per_row_flip = pipe_ctx->dlg_regs.dst_y_per_row_flip;
+
+ /* DML prefetch settings */
+ state->hubp[i].dst_y_prefetch = pipe_ctx->dlg_regs.dst_y_prefetch;
+ state->hubp[i].vratio_prefetch = pipe_ctx->dlg_regs.vratio_prefetch;
+ state->hubp[i].vratio_prefetch_c = pipe_ctx->dlg_regs.vratio_prefetch_c;
+
+ /* TTU parameters */
+ state->hubp[i].qos_level_low_wm = pipe_ctx->ttu_regs.qos_level_low_wm;
+ state->hubp[i].qos_level_high_wm = pipe_ctx->ttu_regs.qos_level_high_wm;
+ state->hubp[i].qos_level_flip = pipe_ctx->ttu_regs.qos_level_flip;
+ state->hubp[i].min_ttu_vblank = pipe_ctx->ttu_regs.min_ttu_vblank;
+ }
+
+ /* Capture HUBBUB programming state */
+ if (dc->res_pool->hubbub) {
+ /* Individual DET buffer sizes - software state variables that program DET registers */
+ for (i = 0; i < 4 && i < dc->res_pool->pipe_count; i++) {
+ uint32_t det_size = res_ctx->pipe_ctx[i].det_buffer_size_kb;
+ switch (i) {
+ case 0:
+ state->hubbub.det0_size = det_size;
+ break;
+ case 1:
+ state->hubbub.det1_size = det_size;
+ break;
+ case 2:
+ state->hubbub.det2_size = det_size;
+ break;
+ case 3:
+ state->hubbub.det3_size = det_size;
+ break;
+ }
+ }
+
+ /* Compression buffer configuration - software state that programs COMPBUF_SIZE register */
+ // TODO: Handle logic for legacy DCN pre-DCN401
+ state->hubbub.compbuf_size = context->bw_ctx.bw.dcn.arb_regs.compbuf_size;
+ }
+
+ /* Capture DPP programming state for each pipe */
+ for (i = 0; i < MAX_PIPES && i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
+
+ if (!pipe_ctx->stream)
+ continue;
+
+ state->dpp[i].dpp_clock_enable = (pipe_ctx->plane_res.dpp != NULL) ? 1 : 0;
+
+ if (pipe_ctx->plane_state && pipe_ctx->plane_res.scl_data.recout.width > 0) {
+ /* Access dscl_prog_data directly - this contains the actual software state used for register programming */
+ struct dscl_prog_data *dscl_data = &pipe_ctx->plane_res.scl_data.dscl_prog_data;
+
+ /* Recout (Rectangle of Interest) configuration - software state that programs RECOUT registers */
+ state->dpp[i].recout_start_x = dscl_data->recout.x;
+ state->dpp[i].recout_start_y = dscl_data->recout.y;
+ state->dpp[i].recout_width = dscl_data->recout.width;
+ state->dpp[i].recout_height = dscl_data->recout.height;
+
+ /* MPC (Multiple Pipe/Plane Combiner) size - software state that programs MPC_SIZE registers */
+ state->dpp[i].mpc_width = dscl_data->mpc_size.width;
+ state->dpp[i].mpc_height = dscl_data->mpc_size.height;
+
+ /* DSCL mode - software state that programs SCL_MODE registers */
+ state->dpp[i].dscl_mode = dscl_data->dscl_mode;
+
+ /* Scaler ratios - software state that programs scale ratio registers (use actual programmed ratios) */
+ state->dpp[i].horz_ratio_int = dscl_data->ratios.h_scale_ratio >> 19; // Extract integer part from programmed ratio
+ state->dpp[i].vert_ratio_int = dscl_data->ratios.v_scale_ratio >> 19; // Extract integer part from programmed ratio
+
+ /* Basic scaler taps - software state that programs tap control registers (use actual programmed taps) */
+ state->dpp[i].h_taps = dscl_data->taps.h_taps + 1; // dscl_prog_data.taps stores (taps - 1), so add 1 back
+ state->dpp[i].v_taps = dscl_data->taps.v_taps + 1; // dscl_prog_data.taps stores (taps - 1), so add 1 back
+ }
+ }
+
+ /* Capture essential clock state for underflow analysis */
+ if (dc->clk_mgr && dc->clk_mgr->clks.dispclk_khz > 0) {
+ /* Core display clocks affecting bandwidth and timing */
+ state->dccg.dispclk_khz = dc->clk_mgr->clks.dispclk_khz;
+
+ /* Per-pipe clock configuration - only capture what's essential */
+ for (i = 0; i < MAX_PIPES && i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
+ if (pipe_ctx->stream) {
+ /* Essential clocks that directly affect underflow risk */
+ state->dccg.dppclk_khz[i] = dc->clk_mgr->clks.dppclk_khz;
+ state->dccg.pixclk_khz[i] = pipe_ctx->stream->timing.pix_clk_100hz / 10;
+ state->dccg.dppclk_enable[i] = 1;
+
+ /* DP stream clock only for DP signals */
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
+ pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ state->dccg.dpstreamclk_enable[i] = 1;
+ } else {
+ state->dccg.dpstreamclk_enable[i] = 0;
+ }
+ } else {
+ /* Inactive pipe - no clocks */
+ state->dccg.dppclk_khz[i] = 0;
+ state->dccg.pixclk_khz[i] = 0;
+ state->dccg.dppclk_enable[i] = 0;
+ if (i < 4) {
+ state->dccg.dpstreamclk_enable[i] = 0;
+ }
+ }
+ }
+
+ /* DSC clock state - only when actually using DSC */
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *pipe_ctx = (i < dc->res_pool->pipe_count) ? &res_ctx->pipe_ctx[i] : NULL;
+ if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->timing.dsc_cfg.num_slices_h > 0) {
+ state->dccg.dscclk_khz[i] = 400000; /* Typical DSC clock frequency */
+ } else {
+ state->dccg.dscclk_khz[i] = 0;
+ }
+ }
+
+ /* SYMCLK32 LE Control - only the essential HPO state for underflow analysis */
+ for (i = 0; i < 2; i++) {
+ state->dccg.symclk32_le_enable[i] = 0; /* Default: disabled */
+ }
+
+ }
+
+ /* Capture essential DSC configuration for underflow analysis */
+ for (i = 0; i < MAX_PIPES && i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
+
+ if (pipe_ctx->stream && pipe_ctx->stream->timing.dsc_cfg.num_slices_h > 0) {
+ /* DSC is enabled - capture essential configuration */
+ state->dsc[i].dsc_clock_enable = 1;
+
+ /* DSC configuration affecting bandwidth and timing */
+ struct dc_dsc_config *dsc_cfg = &pipe_ctx->stream->timing.dsc_cfg;
+ state->dsc[i].dsc_num_slices_h = dsc_cfg->num_slices_h;
+ state->dsc[i].dsc_num_slices_v = dsc_cfg->num_slices_v;
+ state->dsc[i].dsc_bits_per_pixel = dsc_cfg->bits_per_pixel;
+
+ /* OPP pipe source for DSC forwarding */
+ if (pipe_ctx->stream_res.opp) {
+ state->dsc[i].dscrm_dsc_forward_enable = 1;
+ state->dsc[i].dscrm_dsc_opp_pipe_source = pipe_ctx->stream_res.opp->inst;
+ } else {
+ state->dsc[i].dscrm_dsc_forward_enable = 0;
+ state->dsc[i].dscrm_dsc_opp_pipe_source = 0;
+ }
+ } else {
+ /* DSC not enabled - clear all fields */
+ memset(&state->dsc[i], 0, sizeof(state->dsc[i]));
+ }
+ }
+
+ /* Capture MPC programming state - comprehensive register field coverage */
+ for (i = 0; i < MAX_PIPES && i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
+
+ if (pipe_ctx->plane_state && pipe_ctx->stream) {
+ struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+
+ /* MPCC blending tree and mode control - capture actual blend configuration */
+ state->mpc.mpcc_mode[i] = (plane_state->blend_tf.type != TF_TYPE_BYPASS) ? 1 : 0;
+ state->mpc.mpcc_alpha_blend_mode[i] = plane_state->per_pixel_alpha ? 1 : 0;
+ state->mpc.mpcc_alpha_multiplied_mode[i] = plane_state->pre_multiplied_alpha ? 1 : 0;
+ state->mpc.mpcc_blnd_active_overlap_only[i] = 0; /* Default - no overlap restriction */
+ state->mpc.mpcc_global_alpha[i] = plane_state->global_alpha_value;
+ state->mpc.mpcc_global_gain[i] = plane_state->global_alpha ? 255 : 0;
+ state->mpc.mpcc_bg_bpc[i] = 8; /* Standard 8-bit background */
+ state->mpc.mpcc_bot_gain_mode[i] = 0; /* Standard gain mode */
+
+ /* MPCC blending tree connections - capture tree topology */
+ if (pipe_ctx->bottom_pipe) {
+ state->mpc.mpcc_bot_sel[i] = pipe_ctx->bottom_pipe->pipe_idx;
+ } else {
+ state->mpc.mpcc_bot_sel[i] = 0xF; /* No bottom connection */
+ }
+ state->mpc.mpcc_top_sel[i] = pipe_ctx->pipe_idx; /* This pipe's DPP ID */
+
+ /* MPCC output gamma control - capture gamma programming */
+ if (plane_state->gamma_correction.type != GAMMA_CS_TFM_1D && plane_state->gamma_correction.num_entries > 0) {
+ state->mpc.mpcc_ogam_mode[i] = 1; /* Gamma enabled */
+ state->mpc.mpcc_ogam_select[i] = 0; /* Bank A selection */
+ state->mpc.mpcc_ogam_pwl_disable[i] = 0; /* PWL enabled */
+ } else {
+ state->mpc.mpcc_ogam_mode[i] = 0; /* Bypass mode */
+ state->mpc.mpcc_ogam_select[i] = 0;
+ state->mpc.mpcc_ogam_pwl_disable[i] = 1; /* PWL disabled */
+ }
+
+ /* MPCC pipe assignment and operational status */
+ if (pipe_ctx->stream_res.opp) {
+ state->mpc.mpcc_opp_id[i] = pipe_ctx->stream_res.opp->inst;
+ } else {
+ state->mpc.mpcc_opp_id[i] = 0xF; /* No OPP assignment */
+ }
+
+ /* MPCC status indicators - active pipe state */
+ state->mpc.mpcc_idle[i] = 0; /* Active pipe - not idle */
+ state->mpc.mpcc_busy[i] = 1; /* Active pipe - busy processing */
+
+ } else {
+ /* Pipe not active - set disabled/idle state for all fields */
+ state->mpc.mpcc_mode[i] = 0;
+ state->mpc.mpcc_alpha_blend_mode[i] = 0;
+ state->mpc.mpcc_alpha_multiplied_mode[i] = 0;
+ state->mpc.mpcc_blnd_active_overlap_only[i] = 0;
+ state->mpc.mpcc_global_alpha[i] = 0;
+ state->mpc.mpcc_global_gain[i] = 0;
+ state->mpc.mpcc_bg_bpc[i] = 0;
+ state->mpc.mpcc_bot_gain_mode[i] = 0;
+ state->mpc.mpcc_bot_sel[i] = 0xF; /* No bottom connection */
+ state->mpc.mpcc_top_sel[i] = 0xF; /* No top connection */
+ state->mpc.mpcc_ogam_mode[i] = 0; /* Bypass */
+ state->mpc.mpcc_ogam_select[i] = 0;
+ state->mpc.mpcc_ogam_pwl_disable[i] = 1; /* PWL disabled */
+ state->mpc.mpcc_opp_id[i] = 0xF; /* No OPP assignment */
+ state->mpc.mpcc_idle[i] = 1; /* Idle */
+ state->mpc.mpcc_busy[i] = 0; /* Not busy */
+ }
+ }
+
+ /* Capture OPP programming state for each pipe - comprehensive register field coverage */
+ for (i = 0; i < MAX_PIPES && i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
+
+ if (!pipe_ctx->stream)
+ continue;
+
+ if (pipe_ctx->stream_res.opp) {
+ struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
+
+ /* OPP Pipe Control */
+ state->opp[i].opp_pipe_clock_enable = 1; /* Active pipe has clock enabled */
+
+ /* Display Pattern Generator (DPG) Control - 19 fields */
+ if (pipe_ctx->stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) {
+ state->opp[i].dpg_enable = 1;
+ } else {
+ /* Video mode - DPG disabled */
+ state->opp[i].dpg_enable = 0;
+ }
+
+ /* Format Control (FMT) - 18 fields */
+ state->opp[i].fmt_pixel_encoding = timing->pixel_encoding;
+
+ /* Chroma subsampling mode based on pixel encoding */
+ if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) {
+ state->opp[i].fmt_subsampling_mode = 1; /* 4:2:0 subsampling */
+ } else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) {
+ state->opp[i].fmt_subsampling_mode = 2; /* 4:2:2 subsampling */
+ } else {
+ state->opp[i].fmt_subsampling_mode = 0; /* No subsampling (4:4:4) */
+ }
+
+ state->opp[i].fmt_cbcr_bit_reduction_bypass = (timing->pixel_encoding == PIXEL_ENCODING_RGB) ? 1 : 0;
+ state->opp[i].fmt_stereosync_override = (timing->timing_3d_format != TIMING_3D_FORMAT_NONE) ? 1 : 0;
+
+ /* Dithering control based on bit depth */
+ if (timing->display_color_depth < COLOR_DEPTH_121212) {
+ state->opp[i].fmt_spatial_dither_frame_counter_max = 15; /* Typical frame counter max */
+ state->opp[i].fmt_spatial_dither_frame_counter_bit_swap = 0; /* No bit swapping */
+ state->opp[i].fmt_spatial_dither_enable = 1;
+ state->opp[i].fmt_spatial_dither_mode = 0; /* Spatial dithering mode */
+ state->opp[i].fmt_spatial_dither_depth = timing->display_color_depth;
+ state->opp[i].fmt_temporal_dither_enable = 0; /* Spatial dithering preferred */
+ } else {
+ state->opp[i].fmt_spatial_dither_frame_counter_max = 0;
+ state->opp[i].fmt_spatial_dither_frame_counter_bit_swap = 0;
+ state->opp[i].fmt_spatial_dither_enable = 0;
+ state->opp[i].fmt_spatial_dither_mode = 0;
+ state->opp[i].fmt_spatial_dither_depth = 0;
+ state->opp[i].fmt_temporal_dither_enable = 0;
+ }
+
+ /* Truncation control for bit depth reduction */
+ if (timing->display_color_depth < COLOR_DEPTH_121212) {
+ state->opp[i].fmt_truncate_enable = 1;
+ state->opp[i].fmt_truncate_depth = timing->display_color_depth;
+ state->opp[i].fmt_truncate_mode = 0; /* Round mode */
+ } else {
+ state->opp[i].fmt_truncate_enable = 0;
+ state->opp[i].fmt_truncate_depth = 0;
+ state->opp[i].fmt_truncate_mode = 0;
+ }
+
+ /* Data clamping control */
+ state->opp[i].fmt_clamp_data_enable = 1; /* Clamping typically enabled */
+ state->opp[i].fmt_clamp_color_format = timing->pixel_encoding;
+
+ /* Dynamic expansion for limited range content */
+ if (timing->pixel_encoding != PIXEL_ENCODING_RGB) {
+ state->opp[i].fmt_dynamic_exp_enable = 1; /* YCbCr typically needs expansion */
+ state->opp[i].fmt_dynamic_exp_mode = 0; /* Standard expansion */
+ } else {
+ state->opp[i].fmt_dynamic_exp_enable = 0; /* RGB typically full range */
+ state->opp[i].fmt_dynamic_exp_mode = 0;
+ }
+
+ /* Legacy field for compatibility */
+ state->opp[i].fmt_bit_depth_control = timing->display_color_depth;
+
+ /* Output Buffer (OPPBUF) Control - 6 fields */
+ state->opp[i].oppbuf_active_width = timing->h_addressable;
+ state->opp[i].oppbuf_pixel_repetition = 0; /* No pixel repetition by default */
+
+ /* Multi-Stream Output (MSO) / ODM segmentation */
+ if (pipe_ctx->next_odm_pipe) {
+ state->opp[i].oppbuf_display_segmentation = 1; /* Segmented display */
+ state->opp[i].oppbuf_overlap_pixel_num = 0; /* ODM overlap pixels */
+ } else {
+ state->opp[i].oppbuf_display_segmentation = 0; /* Single segment */
+ state->opp[i].oppbuf_overlap_pixel_num = 0;
+ }
+
+ /* 3D/Stereo control */
+ if (timing->timing_3d_format != TIMING_3D_FORMAT_NONE) {
+ state->opp[i].oppbuf_3d_vact_space1_size = 30; /* Typical stereo blanking */
+ state->opp[i].oppbuf_3d_vact_space2_size = 30;
+ } else {
+ state->opp[i].oppbuf_3d_vact_space1_size = 0;
+ state->opp[i].oppbuf_3d_vact_space2_size = 0;
+ }
+
+ /* DSC Forward Config - 3 fields */
+ if (timing->dsc_cfg.num_slices_h > 0) {
+ state->opp[i].dscrm_dsc_forward_enable = 1;
+ state->opp[i].dscrm_dsc_opp_pipe_source = pipe_ctx->stream_res.opp->inst;
+ state->opp[i].dscrm_dsc_forward_enable_status = 1; /* Status follows enable */
+ } else {
+ state->opp[i].dscrm_dsc_forward_enable = 0;
+ state->opp[i].dscrm_dsc_opp_pipe_source = 0;
+ state->opp[i].dscrm_dsc_forward_enable_status = 0;
+ }
+ } else {
+ /* No OPP resource - set all fields to disabled state */
+ memset(&state->opp[i], 0, sizeof(state->opp[i]));
+ }
+ }
+
+ /* Capture OPTC programming state for each pipe - comprehensive register field coverage */
+ for (i = 0; i < MAX_PIPES && i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
+
+ if (!pipe_ctx->stream)
+ continue;
+
+ if (pipe_ctx->stream_res.tg) {
+ struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
+
+ state->optc[i].otg_master_inst = pipe_ctx->stream_res.tg->inst;
+
+ /* OTG_CONTROL register - 5 fields */
+ state->optc[i].otg_master_enable = 1; /* Active stream */
+ state->optc[i].otg_disable_point_cntl = 0; /* Normal operation */
+ state->optc[i].otg_start_point_cntl = 0; /* Normal start */
+ state->optc[i].otg_field_number_cntl = (timing->flags.INTERLACE) ? 1 : 0;
+ state->optc[i].otg_out_mux = 0; /* Direct output */
+
+ /* OTG Horizontal Timing - 7 fields */
+ state->optc[i].otg_h_total = timing->h_total;
+ state->optc[i].otg_h_blank_start = timing->h_addressable;
+ state->optc[i].otg_h_blank_end = timing->h_total - timing->h_front_porch;
+ state->optc[i].otg_h_sync_start = timing->h_addressable + timing->h_front_porch;
+ state->optc[i].otg_h_sync_end = timing->h_addressable + timing->h_front_porch + timing->h_sync_width;
+ state->optc[i].otg_h_sync_polarity = timing->flags.HSYNC_POSITIVE_POLARITY ? 0 : 1;
+ state->optc[i].otg_h_timing_div_mode = (pipe_ctx->next_odm_pipe) ? 1 : 0; /* ODM divide mode */
+
+ /* OTG Vertical Timing - 7 fields */
+ state->optc[i].otg_v_total = timing->v_total;
+ state->optc[i].otg_v_blank_start = timing->v_addressable;
+ state->optc[i].otg_v_blank_end = timing->v_total - timing->v_front_porch;
+ state->optc[i].otg_v_sync_start = timing->v_addressable + timing->v_front_porch;
+ state->optc[i].otg_v_sync_end = timing->v_addressable + timing->v_front_porch + timing->v_sync_width;
+ state->optc[i].otg_v_sync_polarity = timing->flags.VSYNC_POSITIVE_POLARITY ? 0 : 1;
+ state->optc[i].otg_v_sync_mode = 0; /* Normal sync mode */
+
+ /* Initialize remaining core fields with appropriate defaults */
+ // TODO: Update logic for accurate vtotal min/max
+ state->optc[i].otg_v_total_max = timing->v_total + 100; /* Typical DRR range */
+ state->optc[i].otg_v_total_min = timing->v_total - 50;
+ state->optc[i].otg_v_total_mid = timing->v_total;
+
+ /* ODM configuration */
+ // TODO: Update logic to have complete ODM mappings (e.g. 3:1 and 4:1) stored in single pipe
+ if (pipe_ctx->next_odm_pipe) {
+ state->optc[i].optc_seg0_src_sel = pipe_ctx->stream_res.opp ? pipe_ctx->stream_res.opp->inst : 0;
+ state->optc[i].optc_seg1_src_sel = pipe_ctx->next_odm_pipe->stream_res.opp ? pipe_ctx->next_odm_pipe->stream_res.opp->inst : 0;
+ state->optc[i].optc_num_of_input_segment = 1; /* 2 segments - 1 */
+ } else {
+ state->optc[i].optc_seg0_src_sel = pipe_ctx->stream_res.opp ? pipe_ctx->stream_res.opp->inst : 0;
+ state->optc[i].optc_seg1_src_sel = 0;
+ state->optc[i].optc_num_of_input_segment = 0; /* Single segment */
+ }
+
+ /* DSC configuration */
+ if (timing->dsc_cfg.num_slices_h > 0) {
+ state->optc[i].optc_dsc_mode = 1; /* DSC enabled */
+ state->optc[i].optc_dsc_bytes_per_pixel = timing->dsc_cfg.bits_per_pixel / 16; /* Convert to bytes */
+ state->optc[i].optc_dsc_slice_width = timing->h_addressable / timing->dsc_cfg.num_slices_h;
+ } else {
+ state->optc[i].optc_dsc_mode = 0;
+ state->optc[i].optc_dsc_bytes_per_pixel = 0;
+ state->optc[i].optc_dsc_slice_width = 0;
+ }
+
+ /* Essential control fields */
+ state->optc[i].otg_stereo_enable = (timing->timing_3d_format != TIMING_3D_FORMAT_NONE) ? 1 : 0;
+ state->optc[i].otg_interlace_enable = timing->flags.INTERLACE ? 1 : 0;
+ state->optc[i].otg_clock_enable = 1; /* OTG clock enabled */
+ state->optc[i].vtg0_enable = 1; /* VTG enabled for timing generation */
+
+ /* Initialize other key fields to defaults */
+ state->optc[i].optc_input_pix_clk_en = 1;
+ state->optc[i].optc_segment_width = (pipe_ctx->next_odm_pipe) ? (timing->h_addressable / 2) : timing->h_addressable;
+ state->optc[i].otg_vready_offset = 1;
+ state->optc[i].otg_vstartup_start = timing->v_addressable + 10;
+ state->optc[i].otg_vupdate_offset = 0;
+ state->optc[i].otg_vupdate_width = 5;
+ } else {
+ /* No timing generator resource - initialize all fields to 0 */
+ memset(&state->optc[i], 0, sizeof(state->optc[i]));
+ }
+ }
+
+ state->state_valid = true;
+ return true;
+}
+
+void dc_log_preos_dmcub_info(const struct dc *dc)
+{
+ dc_dmub_srv_log_preos_dmcub_info(dc->ctx->dmub_srv);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
index 7551d0a3fe82..bbce751b485f 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
@@ -268,6 +268,8 @@ char *dc_status_to_str(enum dc_status status)
return "Insufficient DP link bandwidth";
case DC_FAIL_HW_CURSOR_SUPPORT:
return "HW Cursor not supported";
+ case DC_FAIL_DP_TUNNEL_BW_VALIDATE:
+ return "Fail DP Tunnel BW validation";
case DC_ERROR_UNEXPECTED:
return "Unexpected error";
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
index 7014b8d000bb..e2763b60482a 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
@@ -32,6 +32,13 @@
#include "resource.h"
#include "dc_dmub_srv.h"
#include "dc_state_priv.h"
+#include "opp.h"
+#include "dsc.h"
+#include "dchubbub.h"
+#include "dccg.h"
+#include "abm.h"
+#include "dcn10/dcn10_hubbub.h"
+#include "dce/dmub_hw_lock_mgr.h"
#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0]))
#define MAX_NUM_MCACHE 8
@@ -258,7 +265,7 @@ void color_space_to_black_color(
black_color_format[BLACK_COLOR_FORMAT_RGB_LIMITED];
break;
- /**
+ /*
* Remove default and add case for all color space
* so when we forget to add new color space
* compiler will give a warning
@@ -427,6 +434,32 @@ void get_hdr_visual_confirm_color(
}
}
+/* Visual Confirm color definition for Smart Mux */
+void get_smartmux_visual_confirm_color(
+ struct dc *dc,
+ struct tg_color *color)
+{
+ uint32_t color_value = MAX_TG_COLOR_VALUE;
+
+ const struct tg_color sm_ver_colors[5] = {
+ {0, 0, 0}, /* SMUX_MUXCONTROL_UNSUPPORTED - Black */
+ {0, MAX_TG_COLOR_VALUE, 0}, /* SMUX_MUXCONTROL_v10 - Green */
+ {0, MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE}, /* SMUX_MUXCONTROL_v15 - Cyan */
+ {MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE, 0}, /* SMUX_MUXCONTROL_MDM - Yellow */
+ {MAX_TG_COLOR_VALUE, 0, MAX_TG_COLOR_VALUE}, /* SMUX_MUXCONTROL_vUNKNOWN - Magenta*/
+ };
+
+ if (dc->caps.is_apu) {
+ /* APU driving the eDP */
+ *color = sm_ver_colors[dc->config.smart_mux_version];
+ } else {
+ /* dGPU driving the eDP - red */
+ color->color_r_cr = color_value;
+ color->color_g_y = 0;
+ color->color_b_cb = 0;
+ }
+}
+
/* Visual Confirm color definition for VABC */
void get_vabc_visual_confirm_color(
struct pipe_ctx *pipe_ctx,
@@ -729,11 +762,13 @@ void hwss_build_fast_sequence(struct dc *dc,
block_sequence[*num_steps].func = DMUB_SUBVP_PIPE_CONTROL_LOCK_FAST;
(*num_steps)++;
}
- if (dc->hwss.fams2_global_control_lock_fast) {
- block_sequence[*num_steps].params.fams2_global_control_lock_fast_params.dc = dc;
- block_sequence[*num_steps].params.fams2_global_control_lock_fast_params.lock = true;
- block_sequence[*num_steps].params.fams2_global_control_lock_fast_params.is_required = dc_state_is_fams2_in_use(dc, context);
- block_sequence[*num_steps].func = DMUB_FAMS2_GLOBAL_CONTROL_LOCK_FAST;
+ if (dc->hwss.dmub_hw_control_lock_fast) {
+ block_sequence[*num_steps].params.dmub_hw_control_lock_fast_params.dc = dc;
+ block_sequence[*num_steps].params.dmub_hw_control_lock_fast_params.lock = true;
+ block_sequence[*num_steps].params.dmub_hw_control_lock_fast_params.is_required =
+ dc_state_is_fams2_in_use(dc, context) ||
+ dmub_hw_lock_mgr_does_link_require_lock(dc, stream->link);
+ block_sequence[*num_steps].func = DMUB_HW_CONTROL_LOCK_FAST;
(*num_steps)++;
}
if (dc->hwss.pipe_control_lock) {
@@ -758,7 +793,7 @@ void hwss_build_fast_sequence(struct dc *dc,
while (current_mpc_pipe) {
if (current_mpc_pipe->plane_state) {
if (dc->hwss.set_flip_control_gsl && current_mpc_pipe->plane_state->update_flags.raw) {
- block_sequence[*num_steps].params.set_flip_control_gsl_params.pipe_ctx = current_mpc_pipe;
+ block_sequence[*num_steps].params.set_flip_control_gsl_params.hubp = current_mpc_pipe->plane_res.hubp;
block_sequence[*num_steps].params.set_flip_control_gsl_params.flip_immediate = current_mpc_pipe->plane_state->flip_immediate;
block_sequence[*num_steps].func = HUBP_SET_FLIP_CONTROL_GSL;
(*num_steps)++;
@@ -868,11 +903,11 @@ void hwss_build_fast_sequence(struct dc *dc,
block_sequence[*num_steps].func = DMUB_SUBVP_PIPE_CONTROL_LOCK_FAST;
(*num_steps)++;
}
- if (dc->hwss.fams2_global_control_lock_fast) {
- block_sequence[*num_steps].params.fams2_global_control_lock_fast_params.dc = dc;
- block_sequence[*num_steps].params.fams2_global_control_lock_fast_params.lock = false;
- block_sequence[*num_steps].params.fams2_global_control_lock_fast_params.is_required = dc_state_is_fams2_in_use(dc, context);
- block_sequence[*num_steps].func = DMUB_FAMS2_GLOBAL_CONTROL_LOCK_FAST;
+ if (dc->hwss.dmub_hw_control_lock_fast) {
+ block_sequence[*num_steps].params.dmub_hw_control_lock_fast_params.dc = dc;
+ block_sequence[*num_steps].params.dmub_hw_control_lock_fast_params.lock = false;
+ block_sequence[*num_steps].params.dmub_hw_control_lock_fast_params.is_required = dc_state_is_fams2_in_use(dc, context);
+ block_sequence[*num_steps].func = DMUB_HW_CONTROL_LOCK_FAST;
(*num_steps)++;
}
@@ -885,6 +920,13 @@ void hwss_build_fast_sequence(struct dc *dc,
current_mpc_pipe->stream && current_mpc_pipe->plane_state &&
current_mpc_pipe->plane_state->update_flags.bits.addr_update &&
!current_mpc_pipe->plane_state->skip_manual_trigger) {
+ if (dc->hwss.program_cursor_offload_now) {
+ block_sequence[*num_steps].params.program_cursor_update_now_params.dc = dc;
+ block_sequence[*num_steps].params.program_cursor_update_now_params.pipe_ctx = current_mpc_pipe;
+ block_sequence[*num_steps].func = PROGRAM_CURSOR_UPDATE_NOW;
+ (*num_steps)++;
+ }
+
block_sequence[*num_steps].params.program_manual_trigger_params.pipe_ctx = current_mpc_pipe;
block_sequence[*num_steps].func = OPTC_PROGRAM_MANUAL_TRIGGER;
(*num_steps)++;
@@ -916,8 +958,9 @@ void hwss_execute_sequence(struct dc *dc,
params->pipe_control_lock_params.lock);
break;
case HUBP_SET_FLIP_CONTROL_GSL:
- dc->hwss.set_flip_control_gsl(params->set_flip_control_gsl_params.pipe_ctx,
- params->set_flip_control_gsl_params.flip_immediate);
+ params->set_flip_control_gsl_params.hubp->funcs->hubp_set_flip_control_surface_gsl(
+ params->set_flip_control_gsl_params.hubp,
+ params->set_flip_control_gsl_params.flip_immediate);
break;
case HUBP_PROGRAM_TRIPLEBUFFER:
dc->hwss.program_triplebuffer(params->program_triplebuffer_params.dc,
@@ -975,8 +1018,301 @@ void hwss_execute_sequence(struct dc *dc,
params->wait_for_dcc_meta_propagation_params.dc,
params->wait_for_dcc_meta_propagation_params.top_pipe_to_program);
break;
- case DMUB_FAMS2_GLOBAL_CONTROL_LOCK_FAST:
- dc->hwss.fams2_global_control_lock_fast(params);
+ case DMUB_HW_CONTROL_LOCK_FAST:
+ dc->hwss.dmub_hw_control_lock_fast(params);
+ break;
+ case HUBP_PROGRAM_SURFACE_CONFIG:
+ hwss_program_surface_config(params);
+ break;
+ case HUBP_PROGRAM_MCACHE_ID:
+ hwss_program_mcache_id_and_split_coordinate(params);
+ break;
+ case PROGRAM_CURSOR_UPDATE_NOW:
+ dc->hwss.program_cursor_offload_now(
+ params->program_cursor_update_now_params.dc,
+ params->program_cursor_update_now_params.pipe_ctx);
+ break;
+ case HUBP_WAIT_PIPE_READ_START:
+ params->hubp_wait_pipe_read_start_params.hubp->funcs->hubp_wait_pipe_read_start(
+ params->hubp_wait_pipe_read_start_params.hubp);
+ break;
+ case HWS_APPLY_UPDATE_FLAGS_FOR_PHANTOM:
+ dc->hwss.apply_update_flags_for_phantom(params->apply_update_flags_for_phantom_params.pipe_ctx);
+ break;
+ case HWS_UPDATE_PHANTOM_VP_POSITION:
+ dc->hwss.update_phantom_vp_position(params->update_phantom_vp_position_params.dc,
+ params->update_phantom_vp_position_params.context,
+ params->update_phantom_vp_position_params.pipe_ctx);
+ break;
+ case OPTC_SET_ODM_COMBINE:
+ hwss_set_odm_combine(params);
+ break;
+ case OPTC_SET_ODM_BYPASS:
+ hwss_set_odm_bypass(params);
+ break;
+ case OPP_PIPE_CLOCK_CONTROL:
+ hwss_opp_pipe_clock_control(params);
+ break;
+ case OPP_PROGRAM_LEFT_EDGE_EXTRA_PIXEL:
+ hwss_opp_program_left_edge_extra_pixel(params);
+ break;
+ case DCCG_SET_DTO_DSCCLK:
+ hwss_dccg_set_dto_dscclk(params);
+ break;
+ case DSC_SET_CONFIG:
+ hwss_dsc_set_config(params);
+ break;
+ case DSC_ENABLE:
+ hwss_dsc_enable(params);
+ break;
+ case TG_SET_DSC_CONFIG:
+ hwss_tg_set_dsc_config(params);
+ break;
+ case DSC_DISCONNECT:
+ hwss_dsc_disconnect(params);
+ break;
+ case DSC_READ_STATE:
+ hwss_dsc_read_state(params);
+ break;
+ case DSC_CALCULATE_AND_SET_CONFIG:
+ hwss_dsc_calculate_and_set_config(params);
+ break;
+ case DSC_ENABLE_WITH_OPP:
+ hwss_dsc_enable_with_opp(params);
+ break;
+ case TG_PROGRAM_GLOBAL_SYNC:
+ hwss_tg_program_global_sync(params);
+ break;
+ case TG_WAIT_FOR_STATE:
+ hwss_tg_wait_for_state(params);
+ break;
+ case TG_SET_VTG_PARAMS:
+ hwss_tg_set_vtg_params(params);
+ break;
+ case TG_SETUP_VERTICAL_INTERRUPT2:
+ hwss_tg_setup_vertical_interrupt2(params);
+ break;
+ case DPP_SET_HDR_MULTIPLIER:
+ hwss_dpp_set_hdr_multiplier(params);
+ break;
+ case HUBP_PROGRAM_DET_SIZE:
+ hwss_program_det_size(params);
+ break;
+ case HUBP_PROGRAM_DET_SEGMENTS:
+ hwss_program_det_segments(params);
+ break;
+ case OPP_SET_DYN_EXPANSION:
+ hwss_opp_set_dyn_expansion(params);
+ break;
+ case OPP_PROGRAM_FMT:
+ hwss_opp_program_fmt(params);
+ break;
+ case OPP_PROGRAM_BIT_DEPTH_REDUCTION:
+ hwss_opp_program_bit_depth_reduction(params);
+ break;
+ case OPP_SET_DISP_PATTERN_GENERATOR:
+ hwss_opp_set_disp_pattern_generator(params);
+ break;
+ case ABM_SET_PIPE:
+ hwss_set_abm_pipe(params);
+ break;
+ case ABM_SET_LEVEL:
+ hwss_set_abm_level(params);
+ break;
+ case ABM_SET_IMMEDIATE_DISABLE:
+ hwss_set_abm_immediate_disable(params);
+ break;
+ case MPC_REMOVE_MPCC:
+ hwss_mpc_remove_mpcc(params);
+ break;
+ case OPP_SET_MPCC_DISCONNECT_PENDING:
+ hwss_opp_set_mpcc_disconnect_pending(params);
+ break;
+ case DC_SET_OPTIMIZED_REQUIRED:
+ hwss_dc_set_optimized_required(params);
+ break;
+ case HUBP_DISCONNECT:
+ hwss_hubp_disconnect(params);
+ break;
+ case HUBBUB_FORCE_PSTATE_CHANGE_CONTROL:
+ hwss_hubbub_force_pstate_change_control(params);
+ break;
+ case TG_ENABLE_CRTC:
+ hwss_tg_enable_crtc(params);
+ break;
+ case TG_SET_GSL:
+ hwss_tg_set_gsl(params);
+ break;
+ case TG_SET_GSL_SOURCE_SELECT:
+ hwss_tg_set_gsl_source_select(params);
+ break;
+ case HUBP_WAIT_FLIP_PENDING:
+ hwss_hubp_wait_flip_pending(params);
+ break;
+ case TG_WAIT_DOUBLE_BUFFER_PENDING:
+ hwss_tg_wait_double_buffer_pending(params);
+ break;
+ case UPDATE_FORCE_PSTATE:
+ hwss_update_force_pstate(params);
+ break;
+ case HUBBUB_APPLY_DEDCN21_147_WA:
+ hwss_hubbub_apply_dedcn21_147_wa(params);
+ break;
+ case HUBBUB_ALLOW_SELF_REFRESH_CONTROL:
+ hwss_hubbub_allow_self_refresh_control(params);
+ break;
+ case TG_GET_FRAME_COUNT:
+ hwss_tg_get_frame_count(params);
+ break;
+ case MPC_SET_DWB_MUX:
+ hwss_mpc_set_dwb_mux(params);
+ break;
+ case MPC_DISABLE_DWB_MUX:
+ hwss_mpc_disable_dwb_mux(params);
+ break;
+ case MCIF_WB_CONFIG_BUF:
+ hwss_mcif_wb_config_buf(params);
+ break;
+ case MCIF_WB_CONFIG_ARB:
+ hwss_mcif_wb_config_arb(params);
+ break;
+ case MCIF_WB_ENABLE:
+ hwss_mcif_wb_enable(params);
+ break;
+ case MCIF_WB_DISABLE:
+ hwss_mcif_wb_disable(params);
+ break;
+ case DWBC_ENABLE:
+ hwss_dwbc_enable(params);
+ break;
+ case DWBC_DISABLE:
+ hwss_dwbc_disable(params);
+ break;
+ case DWBC_UPDATE:
+ hwss_dwbc_update(params);
+ break;
+ case HUBP_UPDATE_MALL_SEL:
+ hwss_hubp_update_mall_sel(params);
+ break;
+ case HUBP_PREPARE_SUBVP_BUFFERING:
+ hwss_hubp_prepare_subvp_buffering(params);
+ break;
+ case HUBP_SET_BLANK_EN:
+ hwss_hubp_set_blank_en(params);
+ break;
+ case HUBP_DISABLE_CONTROL:
+ hwss_hubp_disable_control(params);
+ break;
+ case HUBBUB_SOFT_RESET:
+ hwss_hubbub_soft_reset(params);
+ break;
+ case HUBP_CLK_CNTL:
+ hwss_hubp_clk_cntl(params);
+ break;
+ case HUBP_INIT:
+ hwss_hubp_init(params);
+ break;
+ case HUBP_SET_VM_SYSTEM_APERTURE_SETTINGS:
+ hwss_hubp_set_vm_system_aperture_settings(params);
+ break;
+ case HUBP_SET_FLIP_INT:
+ hwss_hubp_set_flip_int(params);
+ break;
+ case DPP_DPPCLK_CONTROL:
+ hwss_dpp_dppclk_control(params);
+ break;
+ case DISABLE_PHANTOM_CRTC:
+ hwss_disable_phantom_crtc(params);
+ break;
+ case DSC_PG_STATUS:
+ hwss_dsc_pg_status(params);
+ break;
+ case DSC_WAIT_DISCONNECT_PENDING_CLEAR:
+ hwss_dsc_wait_disconnect_pending_clear(params);
+ break;
+ case DSC_DISABLE:
+ hwss_dsc_disable(params);
+ break;
+ case DCCG_SET_REF_DSCCLK:
+ hwss_dccg_set_ref_dscclk(params);
+ break;
+ case DPP_PG_CONTROL:
+ hwss_dpp_pg_control(params);
+ break;
+ case HUBP_PG_CONTROL:
+ hwss_hubp_pg_control(params);
+ break;
+ case HUBP_RESET:
+ hwss_hubp_reset(params);
+ break;
+ case DPP_RESET:
+ hwss_dpp_reset(params);
+ break;
+ case DPP_ROOT_CLOCK_CONTROL:
+ hwss_dpp_root_clock_control(params);
+ break;
+ case DC_IP_REQUEST_CNTL:
+ hwss_dc_ip_request_cntl(params);
+ break;
+ case DCCG_UPDATE_DPP_DTO:
+ hwss_dccg_update_dpp_dto(params);
+ break;
+ case HUBP_VTG_SEL:
+ hwss_hubp_vtg_sel(params);
+ break;
+ case HUBP_SETUP2:
+ hwss_hubp_setup2(params);
+ break;
+ case HUBP_SETUP:
+ hwss_hubp_setup(params);
+ break;
+ case HUBP_SET_UNBOUNDED_REQUESTING:
+ hwss_hubp_set_unbounded_requesting(params);
+ break;
+ case HUBP_SETUP_INTERDEPENDENT2:
+ hwss_hubp_setup_interdependent2(params);
+ break;
+ case HUBP_SETUP_INTERDEPENDENT:
+ hwss_hubp_setup_interdependent(params);
+ break;
+ case DPP_SET_CURSOR_MATRIX:
+ hwss_dpp_set_cursor_matrix(params);
+ break;
+ case MPC_UPDATE_BLENDING:
+ hwss_mpc_update_blending(params);
+ break;
+ case MPC_ASSERT_IDLE_MPCC:
+ hwss_mpc_assert_idle_mpcc(params);
+ break;
+ case MPC_INSERT_PLANE:
+ hwss_mpc_insert_plane(params);
+ break;
+ case DPP_SET_SCALER:
+ hwss_dpp_set_scaler(params);
+ break;
+ case HUBP_MEM_PROGRAM_VIEWPORT:
+ hwss_hubp_mem_program_viewport(params);
+ break;
+ case ABORT_CURSOR_OFFLOAD_UPDATE:
+ hwss_abort_cursor_offload_update(params);
+ break;
+ case SET_CURSOR_ATTRIBUTE:
+ hwss_set_cursor_attribute(params);
+ break;
+ case SET_CURSOR_POSITION:
+ hwss_set_cursor_position(params);
+ break;
+ case SET_CURSOR_SDR_WHITE_LEVEL:
+ hwss_set_cursor_sdr_white_level(params);
+ break;
+ case PROGRAM_OUTPUT_CSC:
+ hwss_program_output_csc(params);
+ break;
+ case HUBP_SET_BLANK:
+ hwss_hubp_set_blank(params);
+ break;
+ case PHANTOM_HUBP_POST_ENABLE:
+ hwss_phantom_hubp_post_enable(params);
break;
default:
ASSERT(false);
@@ -985,6 +1321,338 @@ void hwss_execute_sequence(struct dc *dc,
}
}
+/*
+ * Helper function to add OPTC pipe control lock to block sequence
+ */
+void hwss_add_optc_pipe_control_lock(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool lock)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.pipe_control_lock_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.pipe_control_lock_params.pipe_ctx = pipe_ctx;
+ seq_state->steps[*seq_state->num_steps].params.pipe_control_lock_params.lock = lock;
+ seq_state->steps[*seq_state->num_steps].func = OPTC_PIPE_CONTROL_LOCK;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add HUBP set flip control GSL to block sequence
+ */
+void hwss_add_hubp_set_flip_control_gsl(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ bool flip_immediate)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.set_flip_control_gsl_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.set_flip_control_gsl_params.flip_immediate = flip_immediate;
+ seq_state->steps[*seq_state->num_steps].func = HUBP_SET_FLIP_CONTROL_GSL;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add HUBP program triplebuffer to block sequence
+ */
+void hwss_add_hubp_program_triplebuffer(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool enableTripleBuffer)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.program_triplebuffer_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.program_triplebuffer_params.pipe_ctx = pipe_ctx;
+ seq_state->steps[*seq_state->num_steps].params.program_triplebuffer_params.enableTripleBuffer = enableTripleBuffer;
+ seq_state->steps[*seq_state->num_steps].func = HUBP_PROGRAM_TRIPLEBUFFER;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add HUBP update plane address to block sequence
+ */
+void hwss_add_hubp_update_plane_addr(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.update_plane_addr_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.update_plane_addr_params.pipe_ctx = pipe_ctx;
+ seq_state->steps[*seq_state->num_steps].func = HUBP_UPDATE_PLANE_ADDR;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add DPP set input transfer function to block sequence
+ */
+void hwss_add_dpp_set_input_transfer_func(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_plane_state *plane_state)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.set_input_transfer_func_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.set_input_transfer_func_params.pipe_ctx = pipe_ctx;
+ seq_state->steps[*seq_state->num_steps].params.set_input_transfer_func_params.plane_state = plane_state;
+ seq_state->steps[*seq_state->num_steps].func = DPP_SET_INPUT_TRANSFER_FUNC;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add DPP program gamut remap to block sequence
+ */
+void hwss_add_dpp_program_gamut_remap(struct block_sequence_state *seq_state,
+ struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.program_gamut_remap_params.pipe_ctx = pipe_ctx;
+ seq_state->steps[*seq_state->num_steps].func = DPP_PROGRAM_GAMUT_REMAP;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add DPP program bias and scale to block sequence
+ */
+void hwss_add_dpp_program_bias_and_scale(struct block_sequence_state *seq_state, struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.program_bias_and_scale_params.pipe_ctx = pipe_ctx;
+ seq_state->steps[*seq_state->num_steps].func = DPP_PROGRAM_BIAS_AND_SCALE;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add OPTC program manual trigger to block sequence
+ */
+void hwss_add_optc_program_manual_trigger(struct block_sequence_state *seq_state,
+ struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.program_manual_trigger_params.pipe_ctx = pipe_ctx;
+ seq_state->steps[*seq_state->num_steps].func = OPTC_PROGRAM_MANUAL_TRIGGER;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add DPP set output transfer function to block sequence
+ */
+void hwss_add_dpp_set_output_transfer_func(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_stream_state *stream)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.set_output_transfer_func_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.set_output_transfer_func_params.pipe_ctx = pipe_ctx;
+ seq_state->steps[*seq_state->num_steps].params.set_output_transfer_func_params.stream = stream;
+ seq_state->steps[*seq_state->num_steps].func = DPP_SET_OUTPUT_TRANSFER_FUNC;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add MPC update visual confirm to block sequence
+ */
+void hwss_add_mpc_update_visual_confirm(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ int mpcc_id)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.update_visual_confirm_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.update_visual_confirm_params.pipe_ctx = pipe_ctx;
+ seq_state->steps[*seq_state->num_steps].params.update_visual_confirm_params.mpcc_id = mpcc_id;
+ seq_state->steps[*seq_state->num_steps].func = MPC_UPDATE_VISUAL_CONFIRM;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add MPC power on MPC mem PWR to block sequence
+ */
+void hwss_add_mpc_power_on_mpc_mem_pwr(struct block_sequence_state *seq_state,
+ struct mpc *mpc,
+ int mpcc_id,
+ bool power_on)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.power_on_mpc_mem_pwr_params.mpc = mpc;
+ seq_state->steps[*seq_state->num_steps].params.power_on_mpc_mem_pwr_params.mpcc_id = mpcc_id;
+ seq_state->steps[*seq_state->num_steps].params.power_on_mpc_mem_pwr_params.power_on = power_on;
+ seq_state->steps[*seq_state->num_steps].func = MPC_POWER_ON_MPC_MEM_PWR;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add MPC set output CSC to block sequence
+ */
+void hwss_add_mpc_set_output_csc(struct block_sequence_state *seq_state,
+ struct mpc *mpc,
+ int opp_id,
+ const uint16_t *regval,
+ enum mpc_output_csc_mode ocsc_mode)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.set_output_csc_params.mpc = mpc;
+ seq_state->steps[*seq_state->num_steps].params.set_output_csc_params.opp_id = opp_id;
+ seq_state->steps[*seq_state->num_steps].params.set_output_csc_params.regval = regval;
+ seq_state->steps[*seq_state->num_steps].params.set_output_csc_params.ocsc_mode = ocsc_mode;
+ seq_state->steps[*seq_state->num_steps].func = MPC_SET_OUTPUT_CSC;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add MPC set OCSC default to block sequence
+ */
+void hwss_add_mpc_set_ocsc_default(struct block_sequence_state *seq_state,
+ struct mpc *mpc,
+ int opp_id,
+ enum dc_color_space colorspace,
+ enum mpc_output_csc_mode ocsc_mode)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.set_ocsc_default_params.mpc = mpc;
+ seq_state->steps[*seq_state->num_steps].params.set_ocsc_default_params.opp_id = opp_id;
+ seq_state->steps[*seq_state->num_steps].params.set_ocsc_default_params.color_space = colorspace;
+ seq_state->steps[*seq_state->num_steps].params.set_ocsc_default_params.ocsc_mode = ocsc_mode;
+ seq_state->steps[*seq_state->num_steps].func = MPC_SET_OCSC_DEFAULT;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add DMUB send DMCUB command to block sequence
+ */
+void hwss_add_dmub_send_dmcub_cmd(struct block_sequence_state *seq_state,
+ struct dc_context *ctx,
+ union dmub_rb_cmd *cmd,
+ enum dm_dmub_wait_type wait_type)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.send_dmcub_cmd_params.ctx = ctx;
+ seq_state->steps[*seq_state->num_steps].params.send_dmcub_cmd_params.cmd = cmd;
+ seq_state->steps[*seq_state->num_steps].params.send_dmcub_cmd_params.wait_type = wait_type;
+ seq_state->steps[*seq_state->num_steps].func = DMUB_SEND_DMCUB_CMD;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add DMUB SubVP save surface address to block sequence
+ */
+void hwss_add_dmub_subvp_save_surf_addr(struct block_sequence_state *seq_state,
+ struct dc_dmub_srv *dc_dmub_srv,
+ struct dc_plane_address *addr,
+ uint8_t subvp_index)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.subvp_save_surf_addr.dc_dmub_srv = dc_dmub_srv;
+ seq_state->steps[*seq_state->num_steps].params.subvp_save_surf_addr.addr = addr;
+ seq_state->steps[*seq_state->num_steps].params.subvp_save_surf_addr.subvp_index = subvp_index;
+ seq_state->steps[*seq_state->num_steps].func = DMUB_SUBVP_SAVE_SURF_ADDR;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add HUBP wait for DCC meta propagation to block sequence
+ */
+void hwss_add_hubp_wait_for_dcc_meta_prop(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *top_pipe_to_program)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.wait_for_dcc_meta_propagation_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.wait_for_dcc_meta_propagation_params.top_pipe_to_program = top_pipe_to_program;
+ seq_state->steps[*seq_state->num_steps].func = HUBP_WAIT_FOR_DCC_META_PROP;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add HUBP wait pipe read start to block sequence
+ */
+void hwss_add_hubp_wait_pipe_read_start(struct block_sequence_state *seq_state,
+ struct hubp *hubp)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.hubp_wait_pipe_read_start_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].func = HUBP_WAIT_PIPE_READ_START;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add HWS apply update flags for phantom to block sequence
+ */
+void hwss_add_hws_apply_update_flags_for_phantom(struct block_sequence_state *seq_state,
+ struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.apply_update_flags_for_phantom_params.pipe_ctx = pipe_ctx;
+ seq_state->steps[*seq_state->num_steps].func = HWS_APPLY_UPDATE_FLAGS_FOR_PHANTOM;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add HWS update phantom VP position to block sequence
+ */
+void hwss_add_hws_update_phantom_vp_position(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct dc_state *context,
+ struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.update_phantom_vp_position_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.update_phantom_vp_position_params.context = context;
+ seq_state->steps[*seq_state->num_steps].params.update_phantom_vp_position_params.pipe_ctx = pipe_ctx;
+ seq_state->steps[*seq_state->num_steps].func = HWS_UPDATE_PHANTOM_VP_POSITION;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add OPTC set ODM combine to block sequence
+ */
+void hwss_add_optc_set_odm_combine(struct block_sequence_state *seq_state,
+ struct timing_generator *tg, int opp_inst[MAX_PIPES], int opp_head_count,
+ int odm_slice_width, int last_odm_slice_width)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.set_odm_combine_params.tg = tg;
+ memcpy(seq_state->steps[*seq_state->num_steps].params.set_odm_combine_params.opp_inst, opp_inst, sizeof(int) * MAX_PIPES);
+ seq_state->steps[*seq_state->num_steps].params.set_odm_combine_params.opp_head_count = opp_head_count;
+ seq_state->steps[*seq_state->num_steps].params.set_odm_combine_params.odm_slice_width = odm_slice_width;
+ seq_state->steps[*seq_state->num_steps].params.set_odm_combine_params.last_odm_slice_width = last_odm_slice_width;
+ seq_state->steps[*seq_state->num_steps].func = OPTC_SET_ODM_COMBINE;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add OPTC set ODM bypass to block sequence
+ */
+void hwss_add_optc_set_odm_bypass(struct block_sequence_state *seq_state,
+ struct timing_generator *tg, struct dc_crtc_timing *timing)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.set_odm_bypass_params.tg = tg;
+ seq_state->steps[*seq_state->num_steps].params.set_odm_bypass_params.timing = timing;
+ seq_state->steps[*seq_state->num_steps].func = OPTC_SET_ODM_BYPASS;
+ (*seq_state->num_steps)++;
+ }
+}
+
void hwss_send_dmcub_cmd(union block_sequence_params *params)
{
struct dc_context *ctx = params->send_dmcub_cmd_params.ctx;
@@ -994,6 +1662,276 @@ void hwss_send_dmcub_cmd(union block_sequence_params *params)
dc_wake_and_execute_dmub_cmd(ctx, cmd, wait_type);
}
+/*
+ * Helper function to add TG program global sync to block sequence
+ */
+void hwss_add_tg_program_global_sync(struct block_sequence_state *seq_state,
+ struct timing_generator *tg,
+ int vready_offset,
+ unsigned int vstartup_lines,
+ unsigned int vupdate_offset_pixels,
+ unsigned int vupdate_vupdate_width_pixels,
+ unsigned int pstate_keepout_start_lines)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.tg_program_global_sync_params.tg = tg;
+ seq_state->steps[*seq_state->num_steps].params.tg_program_global_sync_params.vready_offset = vready_offset;
+ seq_state->steps[*seq_state->num_steps].params.tg_program_global_sync_params.vstartup_lines = vstartup_lines;
+ seq_state->steps[*seq_state->num_steps].params.tg_program_global_sync_params.vupdate_offset_pixels = vupdate_offset_pixels;
+ seq_state->steps[*seq_state->num_steps].params.tg_program_global_sync_params.vupdate_vupdate_width_pixels = vupdate_vupdate_width_pixels;
+ seq_state->steps[*seq_state->num_steps].params.tg_program_global_sync_params.pstate_keepout_start_lines = pstate_keepout_start_lines;
+ seq_state->steps[*seq_state->num_steps].func = TG_PROGRAM_GLOBAL_SYNC;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add TG wait for state to block sequence
+ */
+void hwss_add_tg_wait_for_state(struct block_sequence_state *seq_state,
+ struct timing_generator *tg,
+ enum crtc_state state)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.tg_wait_for_state_params.tg = tg;
+ seq_state->steps[*seq_state->num_steps].params.tg_wait_for_state_params.state = state;
+ seq_state->steps[*seq_state->num_steps].func = TG_WAIT_FOR_STATE;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add TG set VTG params to block sequence
+ */
+void hwss_add_tg_set_vtg_params(struct block_sequence_state *seq_state,
+ struct timing_generator *tg,
+ struct dc_crtc_timing *dc_crtc_timing,
+ bool program_fp2)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.tg_set_vtg_params_params.tg = tg;
+ seq_state->steps[*seq_state->num_steps].params.tg_set_vtg_params_params.timing = dc_crtc_timing;
+ seq_state->steps[*seq_state->num_steps].params.tg_set_vtg_params_params.program_fp2 = program_fp2;
+ seq_state->steps[*seq_state->num_steps].func = TG_SET_VTG_PARAMS;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add TG setup vertical interrupt2 to block sequence
+ */
+void hwss_add_tg_setup_vertical_interrupt2(struct block_sequence_state *seq_state,
+ struct timing_generator *tg, int start_line)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.tg_setup_vertical_interrupt2_params.tg = tg;
+ seq_state->steps[*seq_state->num_steps].params.tg_setup_vertical_interrupt2_params.start_line = start_line;
+ seq_state->steps[*seq_state->num_steps].func = TG_SETUP_VERTICAL_INTERRUPT2;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add DPP set HDR multiplier to block sequence
+ */
+void hwss_add_dpp_set_hdr_multiplier(struct block_sequence_state *seq_state,
+ struct dpp *dpp, uint32_t hw_mult)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.dpp_set_hdr_multiplier_params.dpp = dpp;
+ seq_state->steps[*seq_state->num_steps].params.dpp_set_hdr_multiplier_params.hw_mult = hw_mult;
+ seq_state->steps[*seq_state->num_steps].func = DPP_SET_HDR_MULTIPLIER;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add HUBP program DET size to block sequence
+ */
+void hwss_add_hubp_program_det_size(struct block_sequence_state *seq_state,
+ struct hubbub *hubbub,
+ unsigned int hubp_inst,
+ unsigned int det_buffer_size_kb)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.program_det_size_params.hubbub = hubbub;
+ seq_state->steps[*seq_state->num_steps].params.program_det_size_params.hubp_inst = hubp_inst;
+ seq_state->steps[*seq_state->num_steps].params.program_det_size_params.det_buffer_size_kb = det_buffer_size_kb;
+ seq_state->steps[*seq_state->num_steps].func = HUBP_PROGRAM_DET_SIZE;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_program_mcache_id(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ struct dml2_hubp_pipe_mcache_regs *mcache_regs)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.program_mcache_id_and_split_coordinate.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.program_mcache_id_and_split_coordinate.mcache_regs = mcache_regs;
+ seq_state->steps[*seq_state->num_steps].func = HUBP_PROGRAM_MCACHE_ID;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubbub_force_pstate_change_control(struct block_sequence_state *seq_state,
+ struct hubbub *hubbub,
+ bool enable,
+ bool wait)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.hubbub_force_pstate_change_control_params.hubbub = hubbub;
+ seq_state->steps[*seq_state->num_steps].params.hubbub_force_pstate_change_control_params.enable = enable;
+ seq_state->steps[*seq_state->num_steps].params.hubbub_force_pstate_change_control_params.wait = wait;
+ seq_state->steps[*seq_state->num_steps].func = HUBBUB_FORCE_PSTATE_CHANGE_CONTROL;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add HUBP program DET segments to block sequence
+ */
+void hwss_add_hubp_program_det_segments(struct block_sequence_state *seq_state,
+ struct hubbub *hubbub,
+ unsigned int hubp_inst,
+ unsigned int det_size)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.program_det_segments_params.hubbub = hubbub;
+ seq_state->steps[*seq_state->num_steps].params.program_det_segments_params.hubp_inst = hubp_inst;
+ seq_state->steps[*seq_state->num_steps].params.program_det_segments_params.det_size = det_size;
+ seq_state->steps[*seq_state->num_steps].func = HUBP_PROGRAM_DET_SEGMENTS;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add OPP set dynamic expansion to block sequence
+ */
+void hwss_add_opp_set_dyn_expansion(struct block_sequence_state *seq_state,
+ struct output_pixel_processor *opp,
+ enum dc_color_space color_space,
+ enum dc_color_depth color_depth,
+ enum signal_type signal)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.opp_set_dyn_expansion_params.opp = opp;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_dyn_expansion_params.color_space = color_space;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_dyn_expansion_params.color_depth = color_depth;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_dyn_expansion_params.signal = signal;
+ seq_state->steps[*seq_state->num_steps].func = OPP_SET_DYN_EXPANSION;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add OPP program FMT to block sequence
+ */
+void hwss_add_opp_program_fmt(struct block_sequence_state *seq_state,
+ struct output_pixel_processor *opp,
+ struct bit_depth_reduction_params *fmt_bit_depth,
+ struct clamping_and_pixel_encoding_params *clamping)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.opp_program_fmt_params.opp = opp;
+ seq_state->steps[*seq_state->num_steps].params.opp_program_fmt_params.fmt_bit_depth = fmt_bit_depth;
+ seq_state->steps[*seq_state->num_steps].params.opp_program_fmt_params.clamping = clamping;
+ seq_state->steps[*seq_state->num_steps].func = OPP_PROGRAM_FMT;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_opp_program_left_edge_extra_pixel(struct block_sequence_state *seq_state,
+ struct output_pixel_processor *opp,
+ enum dc_pixel_encoding pixel_encoding,
+ bool is_otg_master)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = OPP_PROGRAM_LEFT_EDGE_EXTRA_PIXEL;
+ seq_state->steps[*seq_state->num_steps].params.opp_program_left_edge_extra_pixel_params.opp = opp;
+ seq_state->steps[*seq_state->num_steps].params.opp_program_left_edge_extra_pixel_params.pixel_encoding = pixel_encoding;
+ seq_state->steps[*seq_state->num_steps].params.opp_program_left_edge_extra_pixel_params.is_otg_master = is_otg_master;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add ABM set pipe to block sequence
+ */
+void hwss_add_abm_set_pipe(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.set_abm_pipe_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.set_abm_pipe_params.pipe_ctx = pipe_ctx;
+ seq_state->steps[*seq_state->num_steps].func = ABM_SET_PIPE;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add ABM set level to block sequence
+ */
+void hwss_add_abm_set_level(struct block_sequence_state *seq_state,
+ struct abm *abm,
+ uint32_t abm_level)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.set_abm_level_params.abm = abm;
+ seq_state->steps[*seq_state->num_steps].params.set_abm_level_params.abm_level = abm_level;
+ seq_state->steps[*seq_state->num_steps].func = ABM_SET_LEVEL;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add TG enable CRTC to block sequence
+ */
+void hwss_add_tg_enable_crtc(struct block_sequence_state *seq_state,
+ struct timing_generator *tg)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.tg_enable_crtc_params.tg = tg;
+ seq_state->steps[*seq_state->num_steps].func = TG_ENABLE_CRTC;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add HUBP wait flip pending to block sequence
+ */
+void hwss_add_hubp_wait_flip_pending(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ unsigned int timeout_us,
+ unsigned int polling_interval_us)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.hubp_wait_flip_pending_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_wait_flip_pending_params.timeout_us = timeout_us;
+ seq_state->steps[*seq_state->num_steps].params.hubp_wait_flip_pending_params.polling_interval_us = polling_interval_us;
+ seq_state->steps[*seq_state->num_steps].func = HUBP_WAIT_FLIP_PENDING;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add TG wait double buffer pending to block sequence
+ */
+void hwss_add_tg_wait_double_buffer_pending(struct block_sequence_state *seq_state,
+ struct timing_generator *tg,
+ unsigned int timeout_us,
+ unsigned int polling_interval_us)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.tg_wait_double_buffer_pending_params.tg = tg;
+ seq_state->steps[*seq_state->num_steps].params.tg_wait_double_buffer_pending_params.timeout_us = timeout_us;
+ seq_state->steps[*seq_state->num_steps].params.tg_wait_double_buffer_pending_params.polling_interval_us = polling_interval_us;
+ seq_state->steps[*seq_state->num_steps].func = TG_WAIT_DOUBLE_BUFFER_PENDING;
+ (*seq_state->num_steps)++;
+ }
+}
+
void hwss_program_manual_trigger(union block_sequence_params *params)
{
struct pipe_ctx *pipe_ctx = params->program_manual_trigger_params.pipe_ctx;
@@ -1020,12 +1958,6 @@ void hwss_setup_dpp(union block_sequence_params *params)
plane_state->color_space,
NULL);
}
-
- if (dpp && dpp->funcs->set_cursor_matrix) {
- dpp->funcs->set_cursor_matrix(dpp,
- plane_state->color_space,
- plane_state->cursor_csc_color_matrix);
- }
}
void hwss_program_bias_and_scale(union block_sequence_params *params)
@@ -1036,9 +1968,8 @@ void hwss_program_bias_and_scale(union block_sequence_params *params)
struct dc_bias_and_scale bns_params = plane_state->bias_and_scale;
//TODO :for CNVC set scale and bias registers if necessary
- if (dpp->funcs->dpp_program_bias_and_scale) {
+ if (dpp->funcs->dpp_program_bias_and_scale)
dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
- }
}
void hwss_power_on_mpc_mem_pwr(union block_sequence_params *params)
@@ -1088,6 +2019,39 @@ void hwss_subvp_save_surf_addr(union block_sequence_params *params)
dc_dmub_srv_subvp_save_surf_addr(dc_dmub_srv, addr, subvp_index);
}
+void hwss_program_surface_config(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->program_surface_config_params.hubp;
+ enum surface_pixel_format format = params->program_surface_config_params.format;
+ struct dc_tiling_info *tiling_info = params->program_surface_config_params.tiling_info;
+ struct plane_size size = params->program_surface_config_params.plane_size;
+ enum dc_rotation_angle rotation = params->program_surface_config_params.rotation;
+ struct dc_plane_dcc_param *dcc = params->program_surface_config_params.dcc;
+ bool horizontal_mirror = params->program_surface_config_params.horizontal_mirror;
+ int compat_level = params->program_surface_config_params.compat_level;
+
+ hubp->funcs->hubp_program_surface_config(
+ hubp,
+ format,
+ tiling_info,
+ &size,
+ rotation,
+ dcc,
+ horizontal_mirror,
+ compat_level);
+
+ hubp->power_gated = false;
+}
+
+void hwss_program_mcache_id_and_split_coordinate(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->program_mcache_id_and_split_coordinate.hubp;
+ struct dml2_hubp_pipe_mcache_regs *mcache_regs = params->program_mcache_id_and_split_coordinate.mcache_regs;
+
+ hubp->funcs->hubp_program_mcache_id_and_split_coordinate(hubp, mcache_regs);
+
+}
+
void get_surface_tile_visual_confirm_color(
struct pipe_ctx *pipe_ctx,
struct tg_color *color)
@@ -1151,6 +2115,8 @@ void hwss_wait_for_odm_update_pending_complete(struct dc *dc, struct dc_state *c
tg = otg_master->stream_res.tg;
if (tg->funcs->wait_odm_doublebuffer_pending_clear)
tg->funcs->wait_odm_doublebuffer_pending_clear(tg);
+ if (tg->funcs->wait_otg_disable)
+ tg->funcs->wait_otg_disable(tg);
}
/* ODM update may require to reprogram blank pattern for each OPP */
@@ -1160,6 +2126,7 @@ void hwss_wait_for_odm_update_pending_complete(struct dc *dc, struct dc_state *c
void hwss_wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
{
int i;
+
for (i = 0; i < MAX_PIPES; i++) {
int count = 0;
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
@@ -1236,3 +2203,1869 @@ void hwss_process_outstanding_hw_updates(struct dc *dc, struct dc_state *dc_cont
if (dc->hwss.program_outstanding_updates)
dc->hwss.program_outstanding_updates(dc, dc_context);
}
+
+void hwss_set_odm_combine(union block_sequence_params *params)
+{
+ struct timing_generator *tg = params->set_odm_combine_params.tg;
+ int *opp_inst = params->set_odm_combine_params.opp_inst;
+ int opp_head_count = params->set_odm_combine_params.opp_head_count;
+ int odm_slice_width = params->set_odm_combine_params.odm_slice_width;
+ int last_odm_slice_width = params->set_odm_combine_params.last_odm_slice_width;
+
+ if (tg && tg->funcs->set_odm_combine)
+ tg->funcs->set_odm_combine(tg, opp_inst, opp_head_count,
+ odm_slice_width, last_odm_slice_width);
+}
+
+void hwss_set_odm_bypass(union block_sequence_params *params)
+{
+ struct timing_generator *tg = params->set_odm_bypass_params.tg;
+ const struct dc_crtc_timing *timing = params->set_odm_bypass_params.timing;
+
+ if (tg && tg->funcs->set_odm_bypass)
+ tg->funcs->set_odm_bypass(tg, timing);
+}
+
+void hwss_opp_pipe_clock_control(union block_sequence_params *params)
+{
+ struct output_pixel_processor *opp = params->opp_pipe_clock_control_params.opp;
+ bool enable = params->opp_pipe_clock_control_params.enable;
+
+ if (opp && opp->funcs->opp_pipe_clock_control)
+ opp->funcs->opp_pipe_clock_control(opp, enable);
+}
+
+void hwss_opp_program_left_edge_extra_pixel(union block_sequence_params *params)
+{
+ struct output_pixel_processor *opp = params->opp_program_left_edge_extra_pixel_params.opp;
+ enum dc_pixel_encoding pixel_encoding = params->opp_program_left_edge_extra_pixel_params.pixel_encoding;
+ bool is_otg_master = params->opp_program_left_edge_extra_pixel_params.is_otg_master;
+
+ if (opp && opp->funcs->opp_program_left_edge_extra_pixel)
+ opp->funcs->opp_program_left_edge_extra_pixel(opp, pixel_encoding, is_otg_master);
+}
+
+void hwss_dccg_set_dto_dscclk(union block_sequence_params *params)
+{
+ struct dccg *dccg = params->dccg_set_dto_dscclk_params.dccg;
+ int inst = params->dccg_set_dto_dscclk_params.inst;
+ int num_slices_h = params->dccg_set_dto_dscclk_params.num_slices_h;
+
+ if (dccg && dccg->funcs->set_dto_dscclk)
+ dccg->funcs->set_dto_dscclk(dccg, inst, num_slices_h);
+}
+
+void hwss_dsc_set_config(union block_sequence_params *params)
+{
+ struct display_stream_compressor *dsc = params->dsc_set_config_params.dsc;
+ struct dsc_config *dsc_cfg = params->dsc_set_config_params.dsc_cfg;
+ struct dsc_optc_config *dsc_optc_cfg = params->dsc_set_config_params.dsc_optc_cfg;
+
+ if (dsc && dsc->funcs->dsc_set_config)
+ dsc->funcs->dsc_set_config(dsc, dsc_cfg, dsc_optc_cfg);
+}
+
+void hwss_dsc_enable(union block_sequence_params *params)
+{
+ struct display_stream_compressor *dsc = params->dsc_enable_params.dsc;
+ int opp_inst = params->dsc_enable_params.opp_inst;
+
+ if (dsc && dsc->funcs->dsc_enable)
+ dsc->funcs->dsc_enable(dsc, opp_inst);
+}
+
+void hwss_tg_set_dsc_config(union block_sequence_params *params)
+{
+ struct timing_generator *tg = params->tg_set_dsc_config_params.tg;
+ enum optc_dsc_mode optc_dsc_mode = OPTC_DSC_DISABLED;
+ uint32_t bytes_per_pixel = 0;
+ uint32_t slice_width = 0;
+
+ if (params->tg_set_dsc_config_params.enable) {
+ struct dsc_optc_config *dsc_optc_cfg = params->tg_set_dsc_config_params.dsc_optc_cfg;
+
+ if (dsc_optc_cfg) {
+ bytes_per_pixel = dsc_optc_cfg->bytes_per_pixel;
+ slice_width = dsc_optc_cfg->slice_width;
+ optc_dsc_mode = dsc_optc_cfg->is_pixel_format_444 ?
+ OPTC_DSC_ENABLED_444 : OPTC_DSC_ENABLED_NATIVE_SUBSAMPLED;
+ }
+ }
+
+ if (tg && tg->funcs->set_dsc_config)
+ tg->funcs->set_dsc_config(tg, optc_dsc_mode, bytes_per_pixel, slice_width);
+}
+
+void hwss_dsc_disconnect(union block_sequence_params *params)
+{
+ struct display_stream_compressor *dsc = params->dsc_disconnect_params.dsc;
+
+ if (dsc && dsc->funcs->dsc_disconnect)
+ dsc->funcs->dsc_disconnect(dsc);
+}
+
+void hwss_dsc_read_state(union block_sequence_params *params)
+{
+ struct display_stream_compressor *dsc = params->dsc_read_state_params.dsc;
+ struct dcn_dsc_state *dsc_state = params->dsc_read_state_params.dsc_state;
+
+ if (dsc && dsc->funcs->dsc_read_state)
+ dsc->funcs->dsc_read_state(dsc, dsc_state);
+}
+
+void hwss_dsc_calculate_and_set_config(union block_sequence_params *params)
+{
+ struct pipe_ctx *pipe_ctx = params->dsc_calculate_and_set_config_params.pipe_ctx;
+ struct pipe_ctx *top_pipe = pipe_ctx;
+ bool enable = params->dsc_calculate_and_set_config_params.enable;
+ int opp_cnt = params->dsc_calculate_and_set_config_params.opp_cnt;
+
+ struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
+ struct dc_stream_state *stream = pipe_ctx->stream;
+
+ if (!dsc || !enable)
+ return;
+
+ /* Calculate DSC configuration - extracted from dcn32_update_dsc_on_stream */
+ struct dsc_config dsc_cfg;
+
+ while (top_pipe->prev_odm_pipe)
+ top_pipe = top_pipe->prev_odm_pipe;
+
+ dsc_cfg.pic_width = (stream->timing.h_addressable + top_pipe->dsc_padding_params.dsc_hactive_padding +
+ stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt;
+ dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom;
+ dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
+ dsc_cfg.color_depth = stream->timing.display_color_depth;
+ dsc_cfg.is_odm = top_pipe->next_odm_pipe ? true : false;
+ dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
+ dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
+ dsc_cfg.dsc_padding = top_pipe->dsc_padding_params.dsc_hactive_padding;
+
+ /* Set DSC configuration */
+ if (dsc->funcs->dsc_set_config)
+ dsc->funcs->dsc_set_config(dsc, &dsc_cfg,
+ &params->dsc_calculate_and_set_config_params.dsc_optc_cfg);
+}
+
+void hwss_dsc_enable_with_opp(union block_sequence_params *params)
+{
+ struct pipe_ctx *pipe_ctx = params->dsc_enable_with_opp_params.pipe_ctx;
+ struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
+
+ if (dsc && dsc->funcs->dsc_enable)
+ dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst);
+}
+
+void hwss_tg_program_global_sync(union block_sequence_params *params)
+{
+ struct timing_generator *tg = params->tg_program_global_sync_params.tg;
+ int vready_offset = params->tg_program_global_sync_params.vready_offset;
+ unsigned int vstartup_lines = params->tg_program_global_sync_params.vstartup_lines;
+ unsigned int vupdate_offset_pixels = params->tg_program_global_sync_params.vupdate_offset_pixels;
+ unsigned int vupdate_vupdate_width_pixels = params->tg_program_global_sync_params.vupdate_vupdate_width_pixels;
+ unsigned int pstate_keepout_start_lines = params->tg_program_global_sync_params.pstate_keepout_start_lines;
+
+ if (tg->funcs->program_global_sync) {
+ tg->funcs->program_global_sync(tg, vready_offset, vstartup_lines,
+ vupdate_offset_pixels, vupdate_vupdate_width_pixels, pstate_keepout_start_lines);
+ }
+}
+
+void hwss_tg_wait_for_state(union block_sequence_params *params)
+{
+ struct timing_generator *tg = params->tg_wait_for_state_params.tg;
+ enum crtc_state state = params->tg_wait_for_state_params.state;
+
+ if (tg->funcs->wait_for_state)
+ tg->funcs->wait_for_state(tg, state);
+}
+
+void hwss_tg_set_vtg_params(union block_sequence_params *params)
+{
+ struct timing_generator *tg = params->tg_set_vtg_params_params.tg;
+ struct dc_crtc_timing *timing = params->tg_set_vtg_params_params.timing;
+ bool program_fp2 = params->tg_set_vtg_params_params.program_fp2;
+
+ if (tg->funcs->set_vtg_params)
+ tg->funcs->set_vtg_params(tg, timing, program_fp2);
+}
+
+void hwss_tg_setup_vertical_interrupt2(union block_sequence_params *params)
+{
+ struct timing_generator *tg = params->tg_setup_vertical_interrupt2_params.tg;
+ int start_line = params->tg_setup_vertical_interrupt2_params.start_line;
+
+ if (tg->funcs->setup_vertical_interrupt2)
+ tg->funcs->setup_vertical_interrupt2(tg, start_line);
+}
+
+void hwss_dpp_set_hdr_multiplier(union block_sequence_params *params)
+{
+ struct dpp *dpp = params->dpp_set_hdr_multiplier_params.dpp;
+ uint32_t hw_mult = params->dpp_set_hdr_multiplier_params.hw_mult;
+
+ if (dpp->funcs->dpp_set_hdr_multiplier)
+ dpp->funcs->dpp_set_hdr_multiplier(dpp, hw_mult);
+}
+
+void hwss_program_det_size(union block_sequence_params *params)
+{
+ struct hubbub *hubbub = params->program_det_size_params.hubbub;
+ unsigned int hubp_inst = params->program_det_size_params.hubp_inst;
+ unsigned int det_buffer_size_kb = params->program_det_size_params.det_buffer_size_kb;
+
+ if (hubbub->funcs->program_det_size)
+ hubbub->funcs->program_det_size(hubbub, hubp_inst, det_buffer_size_kb);
+}
+
+void hwss_program_det_segments(union block_sequence_params *params)
+{
+ struct hubbub *hubbub = params->program_det_segments_params.hubbub;
+ unsigned int hubp_inst = params->program_det_segments_params.hubp_inst;
+ unsigned int det_size = params->program_det_segments_params.det_size;
+
+ if (hubbub->funcs->program_det_segments)
+ hubbub->funcs->program_det_segments(hubbub, hubp_inst, det_size);
+}
+
+void hwss_opp_set_dyn_expansion(union block_sequence_params *params)
+{
+ struct output_pixel_processor *opp = params->opp_set_dyn_expansion_params.opp;
+ enum dc_color_space color_space = params->opp_set_dyn_expansion_params.color_space;
+ enum dc_color_depth color_depth = params->opp_set_dyn_expansion_params.color_depth;
+ enum signal_type signal = params->opp_set_dyn_expansion_params.signal;
+
+ if (opp->funcs->opp_set_dyn_expansion)
+ opp->funcs->opp_set_dyn_expansion(opp, color_space, color_depth, signal);
+}
+
+void hwss_opp_program_fmt(union block_sequence_params *params)
+{
+ struct output_pixel_processor *opp = params->opp_program_fmt_params.opp;
+ struct bit_depth_reduction_params *fmt_bit_depth = params->opp_program_fmt_params.fmt_bit_depth;
+ struct clamping_and_pixel_encoding_params *clamping = params->opp_program_fmt_params.clamping;
+
+ if (opp->funcs->opp_program_fmt)
+ opp->funcs->opp_program_fmt(opp, fmt_bit_depth, clamping);
+}
+
+void hwss_opp_program_bit_depth_reduction(union block_sequence_params *params)
+{
+ struct output_pixel_processor *opp = params->opp_program_bit_depth_reduction_params.opp;
+ bool use_default_params = params->opp_program_bit_depth_reduction_params.use_default_params;
+ struct pipe_ctx *pipe_ctx = params->opp_program_bit_depth_reduction_params.pipe_ctx;
+ struct bit_depth_reduction_params bit_depth_params;
+
+ if (use_default_params)
+ memset(&bit_depth_params, 0, sizeof(bit_depth_params));
+ else
+ resource_build_bit_depth_reduction_params(pipe_ctx->stream, &bit_depth_params);
+
+ if (opp->funcs->opp_program_bit_depth_reduction)
+ opp->funcs->opp_program_bit_depth_reduction(opp, &bit_depth_params);
+}
+
+void hwss_opp_set_disp_pattern_generator(union block_sequence_params *params)
+{
+ struct output_pixel_processor *opp = params->opp_set_disp_pattern_generator_params.opp;
+ enum controller_dp_test_pattern test_pattern = params->opp_set_disp_pattern_generator_params.test_pattern;
+ enum controller_dp_color_space color_space = params->opp_set_disp_pattern_generator_params.color_space;
+ enum dc_color_depth color_depth = params->opp_set_disp_pattern_generator_params.color_depth;
+ struct tg_color *solid_color = params->opp_set_disp_pattern_generator_params.use_solid_color ?
+ &params->opp_set_disp_pattern_generator_params.solid_color : NULL;
+ int width = params->opp_set_disp_pattern_generator_params.width;
+ int height = params->opp_set_disp_pattern_generator_params.height;
+ int offset = params->opp_set_disp_pattern_generator_params.offset;
+
+ if (opp && opp->funcs->opp_set_disp_pattern_generator) {
+ opp->funcs->opp_set_disp_pattern_generator(opp, test_pattern, color_space,
+ color_depth, solid_color, width, height, offset);
+ }
+}
+
+void hwss_set_abm_pipe(union block_sequence_params *params)
+{
+ struct dc *dc = params->set_abm_pipe_params.dc;
+ struct pipe_ctx *pipe_ctx = params->set_abm_pipe_params.pipe_ctx;
+
+ dc->hwss.set_pipe(pipe_ctx);
+}
+
+void hwss_set_abm_level(union block_sequence_params *params)
+{
+ struct abm *abm = params->set_abm_level_params.abm;
+ unsigned int abm_level = params->set_abm_level_params.abm_level;
+
+ if (abm->funcs->set_abm_level)
+ abm->funcs->set_abm_level(abm, abm_level);
+}
+
+void hwss_set_abm_immediate_disable(union block_sequence_params *params)
+{
+ struct dc *dc = params->set_abm_immediate_disable_params.dc;
+ struct pipe_ctx *pipe_ctx = params->set_abm_immediate_disable_params.pipe_ctx;
+
+ if (dc && dc->hwss.set_abm_immediate_disable)
+ dc->hwss.set_abm_immediate_disable(pipe_ctx);
+}
+
+void hwss_mpc_remove_mpcc(union block_sequence_params *params)
+{
+ struct mpc *mpc = params->mpc_remove_mpcc_params.mpc;
+ struct mpc_tree *mpc_tree_params = params->mpc_remove_mpcc_params.mpc_tree_params;
+ struct mpcc *mpcc_to_remove = params->mpc_remove_mpcc_params.mpcc_to_remove;
+
+ mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove);
+}
+
+void hwss_opp_set_mpcc_disconnect_pending(union block_sequence_params *params)
+{
+ struct output_pixel_processor *opp = params->opp_set_mpcc_disconnect_pending_params.opp;
+ int mpcc_inst = params->opp_set_mpcc_disconnect_pending_params.mpcc_inst;
+ bool pending = params->opp_set_mpcc_disconnect_pending_params.pending;
+
+ opp->mpcc_disconnect_pending[mpcc_inst] = pending;
+}
+
+void hwss_dc_set_optimized_required(union block_sequence_params *params)
+{
+ struct dc *dc = params->dc_set_optimized_required_params.dc;
+ bool optimized_required = params->dc_set_optimized_required_params.optimized_required;
+
+ dc->optimized_required = optimized_required;
+}
+
+void hwss_hubp_disconnect(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_disconnect_params.hubp;
+
+ if (hubp->funcs->hubp_disconnect)
+ hubp->funcs->hubp_disconnect(hubp);
+}
+
+void hwss_hubbub_force_pstate_change_control(union block_sequence_params *params)
+{
+ struct hubbub *hubbub = params->hubbub_force_pstate_change_control_params.hubbub;
+ bool enable = params->hubbub_force_pstate_change_control_params.enable;
+ bool wait = params->hubbub_force_pstate_change_control_params.wait;
+
+ if (hubbub->funcs->force_pstate_change_control) {
+ hubbub->funcs->force_pstate_change_control(hubbub, enable, wait);
+ /* Add delay when enabling pstate change control */
+ if (enable)
+ udelay(500);
+ }
+}
+
+void hwss_tg_enable_crtc(union block_sequence_params *params)
+{
+ struct timing_generator *tg = params->tg_enable_crtc_params.tg;
+
+ if (tg->funcs->enable_crtc)
+ tg->funcs->enable_crtc(tg);
+}
+
+void hwss_tg_set_gsl(union block_sequence_params *params)
+{
+ struct timing_generator *tg = params->tg_set_gsl_params.tg;
+ struct gsl_params *gsl = &params->tg_set_gsl_params.gsl;
+
+ if (tg->funcs->set_gsl)
+ tg->funcs->set_gsl(tg, gsl);
+}
+
+void hwss_tg_set_gsl_source_select(union block_sequence_params *params)
+{
+ struct timing_generator *tg = params->tg_set_gsl_source_select_params.tg;
+ int group_idx = params->tg_set_gsl_source_select_params.group_idx;
+ uint32_t gsl_ready_signal = params->tg_set_gsl_source_select_params.gsl_ready_signal;
+
+ if (tg->funcs->set_gsl_source_select)
+ tg->funcs->set_gsl_source_select(tg, group_idx, gsl_ready_signal);
+}
+
+void hwss_hubp_wait_flip_pending(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_wait_flip_pending_params.hubp;
+ unsigned int timeout_us = params->hubp_wait_flip_pending_params.timeout_us;
+ unsigned int polling_interval_us = params->hubp_wait_flip_pending_params.polling_interval_us;
+ int j = 0;
+
+ for (j = 0; j < timeout_us / polling_interval_us
+ && hubp->funcs->hubp_is_flip_pending(hubp); j++)
+ udelay(polling_interval_us);
+}
+
+void hwss_tg_wait_double_buffer_pending(union block_sequence_params *params)
+{
+ struct timing_generator *tg = params->tg_wait_double_buffer_pending_params.tg;
+ unsigned int timeout_us = params->tg_wait_double_buffer_pending_params.timeout_us;
+ unsigned int polling_interval_us = params->tg_wait_double_buffer_pending_params.polling_interval_us;
+ int j = 0;
+
+ if (tg->funcs->get_optc_double_buffer_pending) {
+ for (j = 0; j < timeout_us / polling_interval_us
+ && tg->funcs->get_optc_double_buffer_pending(tg); j++)
+ udelay(polling_interval_us);
+ }
+}
+
+void hwss_update_force_pstate(union block_sequence_params *params)
+{
+ struct dc *dc = params->update_force_pstate_params.dc;
+ struct dc_state *context = params->update_force_pstate_params.context;
+ struct dce_hwseq *hwseq = dc->hwseq;
+
+ if (hwseq->funcs.update_force_pstate)
+ hwseq->funcs.update_force_pstate(dc, context);
+}
+
+void hwss_hubbub_apply_dedcn21_147_wa(union block_sequence_params *params)
+{
+ struct hubbub *hubbub = params->hubbub_apply_dedcn21_147_wa_params.hubbub;
+
+ hubbub->funcs->apply_DEDCN21_147_wa(hubbub);
+}
+
+void hwss_hubbub_allow_self_refresh_control(union block_sequence_params *params)
+{
+ struct hubbub *hubbub = params->hubbub_allow_self_refresh_control_params.hubbub;
+ bool allow = params->hubbub_allow_self_refresh_control_params.allow;
+
+ hubbub->funcs->allow_self_refresh_control(hubbub, allow);
+
+ if (!allow && params->hubbub_allow_self_refresh_control_params.disallow_self_refresh_applied)
+ *params->hubbub_allow_self_refresh_control_params.disallow_self_refresh_applied = true;
+}
+
+void hwss_tg_get_frame_count(union block_sequence_params *params)
+{
+ struct timing_generator *tg = params->tg_get_frame_count_params.tg;
+ unsigned int *frame_count = params->tg_get_frame_count_params.frame_count;
+
+ *frame_count = tg->funcs->get_frame_count(tg);
+}
+
+void hwss_mpc_set_dwb_mux(union block_sequence_params *params)
+{
+ struct mpc *mpc = params->mpc_set_dwb_mux_params.mpc;
+ int dwb_id = params->mpc_set_dwb_mux_params.dwb_id;
+ int mpcc_id = params->mpc_set_dwb_mux_params.mpcc_id;
+
+ if (mpc->funcs->set_dwb_mux)
+ mpc->funcs->set_dwb_mux(mpc, dwb_id, mpcc_id);
+}
+
+void hwss_mpc_disable_dwb_mux(union block_sequence_params *params)
+{
+ struct mpc *mpc = params->mpc_disable_dwb_mux_params.mpc;
+ unsigned int dwb_id = params->mpc_disable_dwb_mux_params.dwb_id;
+
+ if (mpc->funcs->disable_dwb_mux)
+ mpc->funcs->disable_dwb_mux(mpc, dwb_id);
+}
+
+void hwss_mcif_wb_config_buf(union block_sequence_params *params)
+{
+ struct mcif_wb *mcif_wb = params->mcif_wb_config_buf_params.mcif_wb;
+ struct mcif_buf_params *mcif_buf_params = params->mcif_wb_config_buf_params.mcif_buf_params;
+ unsigned int dest_height = params->mcif_wb_config_buf_params.dest_height;
+
+ if (mcif_wb->funcs->config_mcif_buf)
+ mcif_wb->funcs->config_mcif_buf(mcif_wb, mcif_buf_params, dest_height);
+}
+
+void hwss_mcif_wb_config_arb(union block_sequence_params *params)
+{
+ struct mcif_wb *mcif_wb = params->mcif_wb_config_arb_params.mcif_wb;
+ struct mcif_arb_params *mcif_arb_params = params->mcif_wb_config_arb_params.mcif_arb_params;
+
+ if (mcif_wb->funcs->config_mcif_arb)
+ mcif_wb->funcs->config_mcif_arb(mcif_wb, mcif_arb_params);
+}
+
+void hwss_mcif_wb_enable(union block_sequence_params *params)
+{
+ struct mcif_wb *mcif_wb = params->mcif_wb_enable_params.mcif_wb;
+
+ if (mcif_wb->funcs->enable_mcif)
+ mcif_wb->funcs->enable_mcif(mcif_wb);
+}
+
+void hwss_mcif_wb_disable(union block_sequence_params *params)
+{
+ struct mcif_wb *mcif_wb = params->mcif_wb_disable_params.mcif_wb;
+
+ if (mcif_wb->funcs->disable_mcif)
+ mcif_wb->funcs->disable_mcif(mcif_wb);
+}
+
+void hwss_dwbc_enable(union block_sequence_params *params)
+{
+ struct dwbc *dwb = params->dwbc_enable_params.dwb;
+ struct dc_dwb_params *dwb_params = params->dwbc_enable_params.dwb_params;
+
+ if (dwb->funcs->enable)
+ dwb->funcs->enable(dwb, dwb_params);
+}
+
+void hwss_dwbc_disable(union block_sequence_params *params)
+{
+ struct dwbc *dwb = params->dwbc_disable_params.dwb;
+
+ if (dwb->funcs->disable)
+ dwb->funcs->disable(dwb);
+}
+
+void hwss_dwbc_update(union block_sequence_params *params)
+{
+ struct dwbc *dwb = params->dwbc_update_params.dwb;
+ struct dc_dwb_params *dwb_params = params->dwbc_update_params.dwb_params;
+
+ if (dwb->funcs->update)
+ dwb->funcs->update(dwb, dwb_params);
+}
+
+void hwss_hubp_update_mall_sel(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_update_mall_sel_params.hubp;
+ uint32_t mall_sel = params->hubp_update_mall_sel_params.mall_sel;
+ bool cache_cursor = params->hubp_update_mall_sel_params.cache_cursor;
+
+ if (hubp && hubp->funcs->hubp_update_mall_sel)
+ hubp->funcs->hubp_update_mall_sel(hubp, mall_sel, cache_cursor);
+}
+
+void hwss_hubp_prepare_subvp_buffering(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_prepare_subvp_buffering_params.hubp;
+ bool enable = params->hubp_prepare_subvp_buffering_params.enable;
+
+ if (hubp && hubp->funcs->hubp_prepare_subvp_buffering)
+ hubp->funcs->hubp_prepare_subvp_buffering(hubp, enable);
+}
+
+void hwss_hubp_set_blank_en(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_set_blank_en_params.hubp;
+ bool enable = params->hubp_set_blank_en_params.enable;
+
+ if (hubp && hubp->funcs->set_hubp_blank_en)
+ hubp->funcs->set_hubp_blank_en(hubp, enable);
+}
+
+void hwss_hubp_disable_control(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_disable_control_params.hubp;
+ bool disable = params->hubp_disable_control_params.disable;
+
+ if (hubp && hubp->funcs->hubp_disable_control)
+ hubp->funcs->hubp_disable_control(hubp, disable);
+}
+
+void hwss_hubbub_soft_reset(union block_sequence_params *params)
+{
+ struct hubbub *hubbub = params->hubbub_soft_reset_params.hubbub;
+ bool reset = params->hubbub_soft_reset_params.reset;
+
+ if (hubbub)
+ params->hubbub_soft_reset_params.hubbub_soft_reset(hubbub, reset);
+}
+
+void hwss_hubp_clk_cntl(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_clk_cntl_params.hubp;
+ bool enable = params->hubp_clk_cntl_params.enable;
+
+ if (hubp && hubp->funcs->hubp_clk_cntl) {
+ hubp->funcs->hubp_clk_cntl(hubp, enable);
+ hubp->power_gated = !enable;
+ }
+}
+
+void hwss_hubp_init(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_init_params.hubp;
+
+ if (hubp && hubp->funcs->hubp_init)
+ hubp->funcs->hubp_init(hubp);
+}
+
+void hwss_hubp_set_vm_system_aperture_settings(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_set_vm_system_aperture_settings_params.hubp;
+ struct vm_system_aperture_param apt;
+
+ apt.sys_default = params->hubp_set_vm_system_aperture_settings_params.sys_default;
+ apt.sys_high = params->hubp_set_vm_system_aperture_settings_params.sys_high;
+ apt.sys_low = params->hubp_set_vm_system_aperture_settings_params.sys_low;
+
+ if (hubp && hubp->funcs->hubp_set_vm_system_aperture_settings)
+ hubp->funcs->hubp_set_vm_system_aperture_settings(hubp, &apt);
+}
+
+void hwss_hubp_set_flip_int(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_set_flip_int_params.hubp;
+
+ if (hubp && hubp->funcs->hubp_set_flip_int)
+ hubp->funcs->hubp_set_flip_int(hubp);
+}
+
+void hwss_dpp_dppclk_control(union block_sequence_params *params)
+{
+ struct dpp *dpp = params->dpp_dppclk_control_params.dpp;
+ bool dppclk_div = params->dpp_dppclk_control_params.dppclk_div;
+ bool enable = params->dpp_dppclk_control_params.enable;
+
+ if (dpp && dpp->funcs->dpp_dppclk_control)
+ dpp->funcs->dpp_dppclk_control(dpp, dppclk_div, enable);
+}
+
+void hwss_disable_phantom_crtc(union block_sequence_params *params)
+{
+ struct timing_generator *tg = params->disable_phantom_crtc_params.tg;
+
+ if (tg && tg->funcs->disable_phantom_crtc)
+ tg->funcs->disable_phantom_crtc(tg);
+}
+
+void hwss_dsc_pg_status(union block_sequence_params *params)
+{
+ struct dce_hwseq *hws = params->dsc_pg_status_params.hws;
+ int dsc_inst = params->dsc_pg_status_params.dsc_inst;
+
+ if (hws && hws->funcs.dsc_pg_status)
+ params->dsc_pg_status_params.is_ungated = hws->funcs.dsc_pg_status(hws, dsc_inst);
+}
+
+void hwss_dsc_wait_disconnect_pending_clear(union block_sequence_params *params)
+{
+ struct display_stream_compressor *dsc = params->dsc_wait_disconnect_pending_clear_params.dsc;
+
+ if (!params->dsc_wait_disconnect_pending_clear_params.is_ungated)
+ return;
+ if (*params->dsc_wait_disconnect_pending_clear_params.is_ungated == false)
+ return;
+
+ if (dsc && dsc->funcs->dsc_wait_disconnect_pending_clear)
+ dsc->funcs->dsc_wait_disconnect_pending_clear(dsc);
+}
+
+void hwss_dsc_disable(union block_sequence_params *params)
+{
+ struct display_stream_compressor *dsc = params->dsc_disable_params.dsc;
+
+ if (!params->dsc_disable_params.is_ungated)
+ return;
+ if (*params->dsc_disable_params.is_ungated == false)
+ return;
+
+ if (dsc && dsc->funcs->dsc_disable)
+ dsc->funcs->dsc_disable(dsc);
+}
+
+void hwss_dccg_set_ref_dscclk(union block_sequence_params *params)
+{
+ struct dccg *dccg = params->dccg_set_ref_dscclk_params.dccg;
+ int dsc_inst = params->dccg_set_ref_dscclk_params.dsc_inst;
+
+ if (!params->dccg_set_ref_dscclk_params.is_ungated)
+ return;
+ if (*params->dccg_set_ref_dscclk_params.is_ungated == false)
+ return;
+
+ if (dccg && dccg->funcs->set_ref_dscclk)
+ dccg->funcs->set_ref_dscclk(dccg, dsc_inst);
+}
+
+void hwss_dpp_pg_control(union block_sequence_params *params)
+{
+ struct dce_hwseq *hws = params->dpp_pg_control_params.hws;
+ unsigned int dpp_inst = params->dpp_pg_control_params.dpp_inst;
+ bool power_on = params->dpp_pg_control_params.power_on;
+
+ if (hws->funcs.dpp_pg_control)
+ hws->funcs.dpp_pg_control(hws, dpp_inst, power_on);
+}
+
+void hwss_hubp_pg_control(union block_sequence_params *params)
+{
+ struct dce_hwseq *hws = params->hubp_pg_control_params.hws;
+ unsigned int hubp_inst = params->hubp_pg_control_params.hubp_inst;
+ bool power_on = params->hubp_pg_control_params.power_on;
+
+ if (hws->funcs.hubp_pg_control)
+ hws->funcs.hubp_pg_control(hws, hubp_inst, power_on);
+}
+
+void hwss_hubp_reset(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_reset_params.hubp;
+
+ if (hubp && hubp->funcs->hubp_reset)
+ hubp->funcs->hubp_reset(hubp);
+}
+
+void hwss_dpp_reset(union block_sequence_params *params)
+{
+ struct dpp *dpp = params->dpp_reset_params.dpp;
+
+ if (dpp && dpp->funcs->dpp_reset)
+ dpp->funcs->dpp_reset(dpp);
+}
+
+void hwss_dpp_root_clock_control(union block_sequence_params *params)
+{
+ struct dce_hwseq *hws = params->dpp_root_clock_control_params.hws;
+ unsigned int dpp_inst = params->dpp_root_clock_control_params.dpp_inst;
+ bool clock_on = params->dpp_root_clock_control_params.clock_on;
+
+ if (hws->funcs.dpp_root_clock_control)
+ hws->funcs.dpp_root_clock_control(hws, dpp_inst, clock_on);
+}
+
+void hwss_dc_ip_request_cntl(union block_sequence_params *params)
+{
+ struct dc *dc = params->dc_ip_request_cntl_params.dc;
+ bool enable = params->dc_ip_request_cntl_params.enable;
+ struct dce_hwseq *hws = dc->hwseq;
+
+ if (hws->funcs.dc_ip_request_cntl)
+ hws->funcs.dc_ip_request_cntl(dc, enable);
+}
+
+void hwss_dccg_update_dpp_dto(union block_sequence_params *params)
+{
+ struct dccg *dccg = params->dccg_update_dpp_dto_params.dccg;
+ int dpp_inst = params->dccg_update_dpp_dto_params.dpp_inst;
+ int dppclk_khz = params->dccg_update_dpp_dto_params.dppclk_khz;
+
+ if (dccg && dccg->funcs->update_dpp_dto)
+ dccg->funcs->update_dpp_dto(dccg, dpp_inst, dppclk_khz);
+}
+
+void hwss_hubp_vtg_sel(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_vtg_sel_params.hubp;
+ uint32_t otg_inst = params->hubp_vtg_sel_params.otg_inst;
+
+ if (hubp && hubp->funcs->hubp_vtg_sel)
+ hubp->funcs->hubp_vtg_sel(hubp, otg_inst);
+}
+
+void hwss_hubp_setup2(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_setup2_params.hubp;
+ struct dml2_dchub_per_pipe_register_set *hubp_regs = params->hubp_setup2_params.hubp_regs;
+ union dml2_global_sync_programming *global_sync = params->hubp_setup2_params.global_sync;
+ struct dc_crtc_timing *timing = params->hubp_setup2_params.timing;
+
+ if (hubp && hubp->funcs->hubp_setup2)
+ hubp->funcs->hubp_setup2(hubp, hubp_regs, global_sync, timing);
+}
+
+void hwss_hubp_setup(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_setup_params.hubp;
+ struct _vcs_dpi_display_dlg_regs_st *dlg_regs = params->hubp_setup_params.dlg_regs;
+ struct _vcs_dpi_display_ttu_regs_st *ttu_regs = params->hubp_setup_params.ttu_regs;
+ struct _vcs_dpi_display_rq_regs_st *rq_regs = params->hubp_setup_params.rq_regs;
+ struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest = params->hubp_setup_params.pipe_dest;
+
+ if (hubp && hubp->funcs->hubp_setup)
+ hubp->funcs->hubp_setup(hubp, dlg_regs, ttu_regs, rq_regs, pipe_dest);
+}
+
+void hwss_hubp_set_unbounded_requesting(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_set_unbounded_requesting_params.hubp;
+ bool unbounded_req = params->hubp_set_unbounded_requesting_params.unbounded_req;
+
+ if (hubp && hubp->funcs->set_unbounded_requesting)
+ hubp->funcs->set_unbounded_requesting(hubp, unbounded_req);
+}
+
+void hwss_hubp_setup_interdependent2(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_setup_interdependent2_params.hubp;
+ struct dml2_dchub_per_pipe_register_set *hubp_regs = params->hubp_setup_interdependent2_params.hubp_regs;
+
+ if (hubp && hubp->funcs->hubp_setup_interdependent2)
+ hubp->funcs->hubp_setup_interdependent2(hubp, hubp_regs);
+}
+
+void hwss_hubp_setup_interdependent(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_setup_interdependent_params.hubp;
+ struct _vcs_dpi_display_dlg_regs_st *dlg_regs = params->hubp_setup_interdependent_params.dlg_regs;
+ struct _vcs_dpi_display_ttu_regs_st *ttu_regs = params->hubp_setup_interdependent_params.ttu_regs;
+
+ if (hubp && hubp->funcs->hubp_setup_interdependent)
+ hubp->funcs->hubp_setup_interdependent(hubp, dlg_regs, ttu_regs);
+}
+
+void hwss_dpp_set_cursor_matrix(union block_sequence_params *params)
+{
+ struct dpp *dpp = params->dpp_set_cursor_matrix_params.dpp;
+ enum dc_color_space color_space = params->dpp_set_cursor_matrix_params.color_space;
+ struct dc_csc_transform *cursor_csc_color_matrix = params->dpp_set_cursor_matrix_params.cursor_csc_color_matrix;
+
+ if (dpp && dpp->funcs->set_cursor_matrix)
+ dpp->funcs->set_cursor_matrix(dpp, color_space, *cursor_csc_color_matrix);
+}
+
+void hwss_mpc_update_mpcc(union block_sequence_params *params)
+{
+ struct dc *dc = params->mpc_update_mpcc_params.dc;
+ struct pipe_ctx *pipe_ctx = params->mpc_update_mpcc_params.pipe_ctx;
+ struct dce_hwseq *hws = dc->hwseq;
+
+ if (hws->funcs.update_mpcc)
+ hws->funcs.update_mpcc(dc, pipe_ctx);
+}
+
+void hwss_mpc_update_blending(union block_sequence_params *params)
+{
+ struct mpc *mpc = params->mpc_update_blending_params.mpc;
+ struct mpcc_blnd_cfg *blnd_cfg = &params->mpc_update_blending_params.blnd_cfg;
+ int mpcc_id = params->mpc_update_blending_params.mpcc_id;
+
+ if (mpc && mpc->funcs->update_blending)
+ mpc->funcs->update_blending(mpc, blnd_cfg, mpcc_id);
+}
+
+void hwss_mpc_assert_idle_mpcc(union block_sequence_params *params)
+{
+ struct mpc *mpc = params->mpc_assert_idle_mpcc_params.mpc;
+ int mpcc_id = params->mpc_assert_idle_mpcc_params.mpcc_id;
+
+ if (mpc && mpc->funcs->wait_for_idle)
+ mpc->funcs->wait_for_idle(mpc, mpcc_id);
+}
+
+void hwss_mpc_insert_plane(union block_sequence_params *params)
+{
+ struct mpc *mpc = params->mpc_insert_plane_params.mpc;
+ struct mpc_tree *tree = params->mpc_insert_plane_params.mpc_tree_params;
+ struct mpcc_blnd_cfg *blnd_cfg = &params->mpc_insert_plane_params.blnd_cfg;
+ struct mpcc_sm_cfg *sm_cfg = params->mpc_insert_plane_params.sm_cfg;
+ struct mpcc *insert_above_mpcc = params->mpc_insert_plane_params.insert_above_mpcc;
+ int mpcc_id = params->mpc_insert_plane_params.mpcc_id;
+ int dpp_id = params->mpc_insert_plane_params.dpp_id;
+
+ if (mpc && mpc->funcs->insert_plane)
+ mpc->funcs->insert_plane(mpc, tree, blnd_cfg, sm_cfg, insert_above_mpcc,
+ dpp_id, mpcc_id);
+}
+
+void hwss_dpp_set_scaler(union block_sequence_params *params)
+{
+ struct dpp *dpp = params->dpp_set_scaler_params.dpp;
+ const struct scaler_data *scl_data = params->dpp_set_scaler_params.scl_data;
+
+ if (dpp && dpp->funcs->dpp_set_scaler)
+ dpp->funcs->dpp_set_scaler(dpp, scl_data);
+}
+
+void hwss_hubp_mem_program_viewport(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_mem_program_viewport_params.hubp;
+ const struct rect *viewport = params->hubp_mem_program_viewport_params.viewport;
+ const struct rect *viewport_c = params->hubp_mem_program_viewport_params.viewport_c;
+
+ if (hubp && hubp->funcs->mem_program_viewport)
+ hubp->funcs->mem_program_viewport(hubp, viewport, viewport_c);
+}
+
+void hwss_abort_cursor_offload_update(union block_sequence_params *params)
+{
+ struct dc *dc = params->abort_cursor_offload_update_params.dc;
+ struct pipe_ctx *pipe_ctx = params->abort_cursor_offload_update_params.pipe_ctx;
+
+ if (dc && dc->hwss.abort_cursor_offload_update)
+ dc->hwss.abort_cursor_offload_update(dc, pipe_ctx);
+}
+
+void hwss_set_cursor_attribute(union block_sequence_params *params)
+{
+ struct dc *dc = params->set_cursor_attribute_params.dc;
+ struct pipe_ctx *pipe_ctx = params->set_cursor_attribute_params.pipe_ctx;
+
+ if (dc && dc->hwss.set_cursor_attribute)
+ dc->hwss.set_cursor_attribute(pipe_ctx);
+}
+
+void hwss_set_cursor_position(union block_sequence_params *params)
+{
+ struct dc *dc = params->set_cursor_position_params.dc;
+ struct pipe_ctx *pipe_ctx = params->set_cursor_position_params.pipe_ctx;
+
+ if (dc && dc->hwss.set_cursor_position)
+ dc->hwss.set_cursor_position(pipe_ctx);
+}
+
+void hwss_set_cursor_sdr_white_level(union block_sequence_params *params)
+{
+ struct dc *dc = params->set_cursor_sdr_white_level_params.dc;
+ struct pipe_ctx *pipe_ctx = params->set_cursor_sdr_white_level_params.pipe_ctx;
+
+ if (dc && dc->hwss.set_cursor_sdr_white_level)
+ dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
+}
+
+void hwss_program_output_csc(union block_sequence_params *params)
+{
+ struct dc *dc = params->program_output_csc_params.dc;
+ struct pipe_ctx *pipe_ctx = params->program_output_csc_params.pipe_ctx;
+ enum dc_color_space colorspace = params->program_output_csc_params.colorspace;
+ uint16_t *matrix = params->program_output_csc_params.matrix;
+ int opp_id = params->program_output_csc_params.opp_id;
+
+ if (dc && dc->hwss.program_output_csc)
+ dc->hwss.program_output_csc(dc, pipe_ctx, colorspace, matrix, opp_id);
+}
+
+void hwss_hubp_set_blank(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_set_blank_params.hubp;
+ bool blank = params->hubp_set_blank_params.blank;
+
+ if (hubp && hubp->funcs->set_blank)
+ hubp->funcs->set_blank(hubp, blank);
+}
+
+void hwss_phantom_hubp_post_enable(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->phantom_hubp_post_enable_params.hubp;
+
+ if (hubp && hubp->funcs->phantom_hubp_post_enable)
+ hubp->funcs->phantom_hubp_post_enable(hubp);
+}
+
+void hwss_add_dccg_set_dto_dscclk(struct block_sequence_state *seq_state,
+ struct dccg *dccg, int inst, int num_slices_h)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DCCG_SET_DTO_DSCCLK;
+ seq_state->steps[*seq_state->num_steps].params.dccg_set_dto_dscclk_params.dccg = dccg;
+ seq_state->steps[*seq_state->num_steps].params.dccg_set_dto_dscclk_params.inst = inst;
+ seq_state->steps[*seq_state->num_steps].params.dccg_set_dto_dscclk_params.num_slices_h = num_slices_h;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dsc_calculate_and_set_config(struct block_sequence_state *seq_state,
+ struct pipe_ctx *pipe_ctx, bool enable, int opp_cnt)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DSC_CALCULATE_AND_SET_CONFIG;
+ seq_state->steps[*seq_state->num_steps].params.dsc_calculate_and_set_config_params.pipe_ctx = pipe_ctx;
+ seq_state->steps[*seq_state->num_steps].params.dsc_calculate_and_set_config_params.enable = enable;
+ seq_state->steps[*seq_state->num_steps].params.dsc_calculate_and_set_config_params.opp_cnt = opp_cnt;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_mpc_remove_mpcc(struct block_sequence_state *seq_state,
+ struct mpc *mpc, struct mpc_tree *mpc_tree_params, struct mpcc *mpcc_to_remove)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = MPC_REMOVE_MPCC;
+ seq_state->steps[*seq_state->num_steps].params.mpc_remove_mpcc_params.mpc = mpc;
+ seq_state->steps[*seq_state->num_steps].params.mpc_remove_mpcc_params.mpc_tree_params = mpc_tree_params;
+ seq_state->steps[*seq_state->num_steps].params.mpc_remove_mpcc_params.mpcc_to_remove = mpcc_to_remove;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_opp_set_mpcc_disconnect_pending(struct block_sequence_state *seq_state,
+ struct output_pixel_processor *opp, int mpcc_inst, bool pending)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = OPP_SET_MPCC_DISCONNECT_PENDING;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_mpcc_disconnect_pending_params.opp = opp;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_mpcc_disconnect_pending_params.mpcc_inst = mpcc_inst;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_mpcc_disconnect_pending_params.pending = pending;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_disconnect(struct block_sequence_state *seq_state,
+ struct hubp *hubp)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_DISCONNECT;
+ seq_state->steps[*seq_state->num_steps].params.hubp_disconnect_params.hubp = hubp;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dsc_enable_with_opp(struct block_sequence_state *seq_state,
+ struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DSC_ENABLE_WITH_OPP;
+ seq_state->steps[*seq_state->num_steps].params.dsc_enable_with_opp_params.pipe_ctx = pipe_ctx;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_tg_set_dsc_config(struct block_sequence_state *seq_state,
+ struct timing_generator *tg, struct dsc_optc_config *dsc_optc_cfg, bool enable)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = TG_SET_DSC_CONFIG;
+ seq_state->steps[*seq_state->num_steps].params.tg_set_dsc_config_params.tg = tg;
+ seq_state->steps[*seq_state->num_steps].params.tg_set_dsc_config_params.dsc_optc_cfg = dsc_optc_cfg;
+ seq_state->steps[*seq_state->num_steps].params.tg_set_dsc_config_params.enable = enable;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dsc_disconnect(struct block_sequence_state *seq_state,
+ struct display_stream_compressor *dsc)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DSC_DISCONNECT;
+ seq_state->steps[*seq_state->num_steps].params.dsc_disconnect_params.dsc = dsc;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dc_set_optimized_required(struct block_sequence_state *seq_state,
+ struct dc *dc, bool optimized_required)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DC_SET_OPTIMIZED_REQUIRED;
+ seq_state->steps[*seq_state->num_steps].params.dc_set_optimized_required_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.dc_set_optimized_required_params.optimized_required = optimized_required;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_abm_set_immediate_disable(struct block_sequence_state *seq_state,
+ struct dc *dc, struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = ABM_SET_IMMEDIATE_DISABLE;
+ seq_state->steps[*seq_state->num_steps].params.set_abm_immediate_disable_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.set_abm_immediate_disable_params.pipe_ctx = pipe_ctx;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_opp_set_disp_pattern_generator(struct block_sequence_state *seq_state,
+ struct output_pixel_processor *opp,
+ enum controller_dp_test_pattern test_pattern,
+ enum controller_dp_color_space color_space,
+ enum dc_color_depth color_depth,
+ struct tg_color solid_color,
+ bool use_solid_color,
+ int width,
+ int height,
+ int offset)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = OPP_SET_DISP_PATTERN_GENERATOR;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_disp_pattern_generator_params.opp = opp;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_disp_pattern_generator_params.test_pattern = test_pattern;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_disp_pattern_generator_params.color_space = color_space;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_disp_pattern_generator_params.color_depth = color_depth;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_disp_pattern_generator_params.solid_color = solid_color;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_disp_pattern_generator_params.use_solid_color = use_solid_color;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_disp_pattern_generator_params.width = width;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_disp_pattern_generator_params.height = height;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_disp_pattern_generator_params.offset = offset;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add MPC update blending to block sequence
+ */
+void hwss_add_mpc_update_blending(struct block_sequence_state *seq_state,
+ struct mpc *mpc,
+ struct mpcc_blnd_cfg blnd_cfg,
+ int mpcc_id)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = MPC_UPDATE_BLENDING;
+ seq_state->steps[*seq_state->num_steps].params.mpc_update_blending_params.mpc = mpc;
+ seq_state->steps[*seq_state->num_steps].params.mpc_update_blending_params.blnd_cfg = blnd_cfg;
+ seq_state->steps[*seq_state->num_steps].params.mpc_update_blending_params.mpcc_id = mpcc_id;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add MPC insert plane to block sequence
+ */
+void hwss_add_mpc_insert_plane(struct block_sequence_state *seq_state,
+ struct mpc *mpc,
+ struct mpc_tree *mpc_tree_params,
+ struct mpcc_blnd_cfg blnd_cfg,
+ struct mpcc_sm_cfg *sm_cfg,
+ struct mpcc *insert_above_mpcc,
+ int dpp_id,
+ int mpcc_id)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = MPC_INSERT_PLANE;
+ seq_state->steps[*seq_state->num_steps].params.mpc_insert_plane_params.mpc = mpc;
+ seq_state->steps[*seq_state->num_steps].params.mpc_insert_plane_params.mpc_tree_params = mpc_tree_params;
+ seq_state->steps[*seq_state->num_steps].params.mpc_insert_plane_params.blnd_cfg = blnd_cfg;
+ seq_state->steps[*seq_state->num_steps].params.mpc_insert_plane_params.sm_cfg = sm_cfg;
+ seq_state->steps[*seq_state->num_steps].params.mpc_insert_plane_params.insert_above_mpcc = insert_above_mpcc;
+ seq_state->steps[*seq_state->num_steps].params.mpc_insert_plane_params.dpp_id = dpp_id;
+ seq_state->steps[*seq_state->num_steps].params.mpc_insert_plane_params.mpcc_id = mpcc_id;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add MPC assert idle MPCC to block sequence
+ */
+void hwss_add_mpc_assert_idle_mpcc(struct block_sequence_state *seq_state,
+ struct mpc *mpc,
+ int mpcc_id)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = MPC_ASSERT_IDLE_MPCC;
+ seq_state->steps[*seq_state->num_steps].params.mpc_assert_idle_mpcc_params.mpc = mpc;
+ seq_state->steps[*seq_state->num_steps].params.mpc_assert_idle_mpcc_params.mpcc_id = mpcc_id;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add HUBP set blank to block sequence
+ */
+void hwss_add_hubp_set_blank(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ bool blank)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_SET_BLANK;
+ seq_state->steps[*seq_state->num_steps].params.hubp_set_blank_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_set_blank_params.blank = blank;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_opp_program_bit_depth_reduction(struct block_sequence_state *seq_state,
+ struct output_pixel_processor *opp,
+ bool use_default_params,
+ struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = OPP_PROGRAM_BIT_DEPTH_REDUCTION;
+ seq_state->steps[*seq_state->num_steps].params.opp_program_bit_depth_reduction_params.opp = opp;
+ seq_state->steps[*seq_state->num_steps].params.opp_program_bit_depth_reduction_params.use_default_params = use_default_params;
+ seq_state->steps[*seq_state->num_steps].params.opp_program_bit_depth_reduction_params.pipe_ctx = pipe_ctx;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dc_ip_request_cntl(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ bool enable)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DC_IP_REQUEST_CNTL;
+ seq_state->steps[*seq_state->num_steps].params.dc_ip_request_cntl_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.dc_ip_request_cntl_params.enable = enable;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dwbc_update(struct block_sequence_state *seq_state,
+ struct dwbc *dwb,
+ struct dc_dwb_params *dwb_params)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DWBC_UPDATE;
+ seq_state->steps[*seq_state->num_steps].params.dwbc_update_params.dwb = dwb;
+ seq_state->steps[*seq_state->num_steps].params.dwbc_update_params.dwb_params = dwb_params;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_mcif_wb_config_buf(struct block_sequence_state *seq_state,
+ struct mcif_wb *mcif_wb,
+ struct mcif_buf_params *mcif_buf_params,
+ unsigned int dest_height)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = MCIF_WB_CONFIG_BUF;
+ seq_state->steps[*seq_state->num_steps].params.mcif_wb_config_buf_params.mcif_wb = mcif_wb;
+ seq_state->steps[*seq_state->num_steps].params.mcif_wb_config_buf_params.mcif_buf_params = mcif_buf_params;
+ seq_state->steps[*seq_state->num_steps].params.mcif_wb_config_buf_params.dest_height = dest_height;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_mcif_wb_config_arb(struct block_sequence_state *seq_state,
+ struct mcif_wb *mcif_wb,
+ struct mcif_arb_params *mcif_arb_params)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = MCIF_WB_CONFIG_ARB;
+ seq_state->steps[*seq_state->num_steps].params.mcif_wb_config_arb_params.mcif_wb = mcif_wb;
+ seq_state->steps[*seq_state->num_steps].params.mcif_wb_config_arb_params.mcif_arb_params = mcif_arb_params;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_mcif_wb_enable(struct block_sequence_state *seq_state,
+ struct mcif_wb *mcif_wb)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = MCIF_WB_ENABLE;
+ seq_state->steps[*seq_state->num_steps].params.mcif_wb_enable_params.mcif_wb = mcif_wb;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_mcif_wb_disable(struct block_sequence_state *seq_state,
+ struct mcif_wb *mcif_wb)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = MCIF_WB_DISABLE;
+ seq_state->steps[*seq_state->num_steps].params.mcif_wb_disable_params.mcif_wb = mcif_wb;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_mpc_set_dwb_mux(struct block_sequence_state *seq_state,
+ struct mpc *mpc,
+ int dwb_id,
+ int mpcc_id)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = MPC_SET_DWB_MUX;
+ seq_state->steps[*seq_state->num_steps].params.mpc_set_dwb_mux_params.mpc = mpc;
+ seq_state->steps[*seq_state->num_steps].params.mpc_set_dwb_mux_params.dwb_id = dwb_id;
+ seq_state->steps[*seq_state->num_steps].params.mpc_set_dwb_mux_params.mpcc_id = mpcc_id;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_mpc_disable_dwb_mux(struct block_sequence_state *seq_state,
+ struct mpc *mpc,
+ unsigned int dwb_id)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = MPC_DISABLE_DWB_MUX;
+ seq_state->steps[*seq_state->num_steps].params.mpc_disable_dwb_mux_params.mpc = mpc;
+ seq_state->steps[*seq_state->num_steps].params.mpc_disable_dwb_mux_params.dwb_id = dwb_id;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dwbc_enable(struct block_sequence_state *seq_state,
+ struct dwbc *dwb,
+ struct dc_dwb_params *dwb_params)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DWBC_ENABLE;
+ seq_state->steps[*seq_state->num_steps].params.dwbc_enable_params.dwb = dwb;
+ seq_state->steps[*seq_state->num_steps].params.dwbc_enable_params.dwb_params = dwb_params;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dwbc_disable(struct block_sequence_state *seq_state,
+ struct dwbc *dwb)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DWBC_DISABLE;
+ seq_state->steps[*seq_state->num_steps].params.dwbc_disable_params.dwb = dwb;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_tg_set_gsl(struct block_sequence_state *seq_state,
+ struct timing_generator *tg,
+ struct gsl_params gsl)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = TG_SET_GSL;
+ seq_state->steps[*seq_state->num_steps].params.tg_set_gsl_params.tg = tg;
+ seq_state->steps[*seq_state->num_steps].params.tg_set_gsl_params.gsl = gsl;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_tg_set_gsl_source_select(struct block_sequence_state *seq_state,
+ struct timing_generator *tg,
+ int group_idx,
+ uint32_t gsl_ready_signal)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = TG_SET_GSL_SOURCE_SELECT;
+ seq_state->steps[*seq_state->num_steps].params.tg_set_gsl_source_select_params.tg = tg;
+ seq_state->steps[*seq_state->num_steps].params.tg_set_gsl_source_select_params.group_idx = group_idx;
+ seq_state->steps[*seq_state->num_steps].params.tg_set_gsl_source_select_params.gsl_ready_signal = gsl_ready_signal;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_update_mall_sel(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ uint32_t mall_sel,
+ bool cache_cursor)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_UPDATE_MALL_SEL;
+ seq_state->steps[*seq_state->num_steps].params.hubp_update_mall_sel_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_update_mall_sel_params.mall_sel = mall_sel;
+ seq_state->steps[*seq_state->num_steps].params.hubp_update_mall_sel_params.cache_cursor = cache_cursor;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_prepare_subvp_buffering(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ bool enable)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_PREPARE_SUBVP_BUFFERING;
+ seq_state->steps[*seq_state->num_steps].params.hubp_prepare_subvp_buffering_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_prepare_subvp_buffering_params.enable = enable;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_set_blank_en(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ bool enable)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_SET_BLANK_EN;
+ seq_state->steps[*seq_state->num_steps].params.hubp_set_blank_en_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_set_blank_en_params.enable = enable;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_disable_control(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ bool disable)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_DISABLE_CONTROL;
+ seq_state->steps[*seq_state->num_steps].params.hubp_disable_control_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_disable_control_params.disable = disable;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubbub_soft_reset(struct block_sequence_state *seq_state,
+ struct hubbub *hubbub,
+ void (*hubbub_soft_reset)(struct hubbub *hubbub, bool reset),
+ bool reset)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBBUB_SOFT_RESET;
+ seq_state->steps[*seq_state->num_steps].params.hubbub_soft_reset_params.hubbub = hubbub;
+ seq_state->steps[*seq_state->num_steps].params.hubbub_soft_reset_params.hubbub_soft_reset = hubbub_soft_reset;
+ seq_state->steps[*seq_state->num_steps].params.hubbub_soft_reset_params.reset = reset;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_clk_cntl(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ bool enable)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_CLK_CNTL;
+ seq_state->steps[*seq_state->num_steps].params.hubp_clk_cntl_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_clk_cntl_params.enable = enable;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dpp_dppclk_control(struct block_sequence_state *seq_state,
+ struct dpp *dpp,
+ bool dppclk_div,
+ bool enable)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DPP_DPPCLK_CONTROL;
+ seq_state->steps[*seq_state->num_steps].params.dpp_dppclk_control_params.dpp = dpp;
+ seq_state->steps[*seq_state->num_steps].params.dpp_dppclk_control_params.dppclk_div = dppclk_div;
+ seq_state->steps[*seq_state->num_steps].params.dpp_dppclk_control_params.enable = enable;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_disable_phantom_crtc(struct block_sequence_state *seq_state,
+ struct timing_generator *tg)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DISABLE_PHANTOM_CRTC;
+ seq_state->steps[*seq_state->num_steps].params.disable_phantom_crtc_params.tg = tg;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dsc_pg_status(struct block_sequence_state *seq_state,
+ struct dce_hwseq *hws,
+ int dsc_inst,
+ bool is_ungated)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DSC_PG_STATUS;
+ seq_state->steps[*seq_state->num_steps].params.dsc_pg_status_params.hws = hws;
+ seq_state->steps[*seq_state->num_steps].params.dsc_pg_status_params.dsc_inst = dsc_inst;
+ seq_state->steps[*seq_state->num_steps].params.dsc_pg_status_params.is_ungated = is_ungated;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dsc_wait_disconnect_pending_clear(struct block_sequence_state *seq_state,
+ struct display_stream_compressor *dsc,
+ bool *is_ungated)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DSC_WAIT_DISCONNECT_PENDING_CLEAR;
+ seq_state->steps[*seq_state->num_steps].params.dsc_wait_disconnect_pending_clear_params.dsc = dsc;
+ seq_state->steps[*seq_state->num_steps].params.dsc_wait_disconnect_pending_clear_params.is_ungated = is_ungated;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dsc_disable(struct block_sequence_state *seq_state,
+ struct display_stream_compressor *dsc,
+ bool *is_ungated)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DSC_DISABLE;
+ seq_state->steps[*seq_state->num_steps].params.dsc_disable_params.dsc = dsc;
+ seq_state->steps[*seq_state->num_steps].params.dsc_disable_params.is_ungated = is_ungated;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dccg_set_ref_dscclk(struct block_sequence_state *seq_state,
+ struct dccg *dccg,
+ int dsc_inst,
+ bool *is_ungated)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DCCG_SET_REF_DSCCLK;
+ seq_state->steps[*seq_state->num_steps].params.dccg_set_ref_dscclk_params.dccg = dccg;
+ seq_state->steps[*seq_state->num_steps].params.dccg_set_ref_dscclk_params.dsc_inst = dsc_inst;
+ seq_state->steps[*seq_state->num_steps].params.dccg_set_ref_dscclk_params.is_ungated = is_ungated;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dpp_root_clock_control(struct block_sequence_state *seq_state,
+ struct dce_hwseq *hws,
+ unsigned int dpp_inst,
+ bool clock_on)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DPP_ROOT_CLOCK_CONTROL;
+ seq_state->steps[*seq_state->num_steps].params.dpp_root_clock_control_params.hws = hws;
+ seq_state->steps[*seq_state->num_steps].params.dpp_root_clock_control_params.dpp_inst = dpp_inst;
+ seq_state->steps[*seq_state->num_steps].params.dpp_root_clock_control_params.clock_on = clock_on;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dpp_pg_control(struct block_sequence_state *seq_state,
+ struct dce_hwseq *hws,
+ unsigned int dpp_inst,
+ bool power_on)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DPP_PG_CONTROL;
+ seq_state->steps[*seq_state->num_steps].params.dpp_pg_control_params.hws = hws;
+ seq_state->steps[*seq_state->num_steps].params.dpp_pg_control_params.dpp_inst = dpp_inst;
+ seq_state->steps[*seq_state->num_steps].params.dpp_pg_control_params.power_on = power_on;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_pg_control(struct block_sequence_state *seq_state,
+ struct dce_hwseq *hws,
+ unsigned int hubp_inst,
+ bool power_on)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_PG_CONTROL;
+ seq_state->steps[*seq_state->num_steps].params.hubp_pg_control_params.hws = hws;
+ seq_state->steps[*seq_state->num_steps].params.hubp_pg_control_params.hubp_inst = hubp_inst;
+ seq_state->steps[*seq_state->num_steps].params.hubp_pg_control_params.power_on = power_on;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_init(struct block_sequence_state *seq_state,
+ struct hubp *hubp)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_INIT;
+ seq_state->steps[*seq_state->num_steps].params.hubp_init_params.hubp = hubp;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_reset(struct block_sequence_state *seq_state,
+ struct hubp *hubp)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_RESET;
+ seq_state->steps[*seq_state->num_steps].params.hubp_reset_params.hubp = hubp;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dpp_reset(struct block_sequence_state *seq_state,
+ struct dpp *dpp)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DPP_RESET;
+ seq_state->steps[*seq_state->num_steps].params.dpp_reset_params.dpp = dpp;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_opp_pipe_clock_control(struct block_sequence_state *seq_state,
+ struct output_pixel_processor *opp,
+ bool enable)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = OPP_PIPE_CLOCK_CONTROL;
+ seq_state->steps[*seq_state->num_steps].params.opp_pipe_clock_control_params.opp = opp;
+ seq_state->steps[*seq_state->num_steps].params.opp_pipe_clock_control_params.enable = enable;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_set_vm_system_aperture_settings(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ uint64_t sys_default,
+ uint64_t sys_low,
+ uint64_t sys_high)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_SET_VM_SYSTEM_APERTURE_SETTINGS;
+ seq_state->steps[*seq_state->num_steps].params.hubp_set_vm_system_aperture_settings_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_set_vm_system_aperture_settings_params.sys_default.quad_part = sys_default;
+ seq_state->steps[*seq_state->num_steps].params.hubp_set_vm_system_aperture_settings_params.sys_low.quad_part = sys_low;
+ seq_state->steps[*seq_state->num_steps].params.hubp_set_vm_system_aperture_settings_params.sys_high.quad_part = sys_high;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_set_flip_int(struct block_sequence_state *seq_state,
+ struct hubp *hubp)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_SET_FLIP_INT;
+ seq_state->steps[*seq_state->num_steps].params.hubp_set_flip_int_params.hubp = hubp;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dccg_update_dpp_dto(struct block_sequence_state *seq_state,
+ struct dccg *dccg,
+ int dpp_inst,
+ int dppclk_khz)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DCCG_UPDATE_DPP_DTO;
+ seq_state->steps[*seq_state->num_steps].params.dccg_update_dpp_dto_params.dccg = dccg;
+ seq_state->steps[*seq_state->num_steps].params.dccg_update_dpp_dto_params.dpp_inst = dpp_inst;
+ seq_state->steps[*seq_state->num_steps].params.dccg_update_dpp_dto_params.dppclk_khz = dppclk_khz;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_vtg_sel(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ uint32_t otg_inst)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_VTG_SEL;
+ seq_state->steps[*seq_state->num_steps].params.hubp_vtg_sel_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_vtg_sel_params.otg_inst = otg_inst;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_setup2(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ struct dml2_dchub_per_pipe_register_set *hubp_regs,
+ union dml2_global_sync_programming *global_sync,
+ struct dc_crtc_timing *timing)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_SETUP2;
+ seq_state->steps[*seq_state->num_steps].params.hubp_setup2_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_setup2_params.hubp_regs = hubp_regs;
+ seq_state->steps[*seq_state->num_steps].params.hubp_setup2_params.global_sync = global_sync;
+ seq_state->steps[*seq_state->num_steps].params.hubp_setup2_params.timing = timing;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_setup(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ struct _vcs_dpi_display_dlg_regs_st *dlg_regs,
+ struct _vcs_dpi_display_ttu_regs_st *ttu_regs,
+ struct _vcs_dpi_display_rq_regs_st *rq_regs,
+ struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_SETUP;
+ seq_state->steps[*seq_state->num_steps].params.hubp_setup_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_setup_params.dlg_regs = dlg_regs;
+ seq_state->steps[*seq_state->num_steps].params.hubp_setup_params.ttu_regs = ttu_regs;
+ seq_state->steps[*seq_state->num_steps].params.hubp_setup_params.rq_regs = rq_regs;
+ seq_state->steps[*seq_state->num_steps].params.hubp_setup_params.pipe_dest = pipe_dest;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_set_unbounded_requesting(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ bool unbounded_req)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_SET_UNBOUNDED_REQUESTING;
+ seq_state->steps[*seq_state->num_steps].params.hubp_set_unbounded_requesting_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_set_unbounded_requesting_params.unbounded_req = unbounded_req;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_setup_interdependent2(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ struct dml2_dchub_per_pipe_register_set *hubp_regs)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_SETUP_INTERDEPENDENT2;
+ seq_state->steps[*seq_state->num_steps].params.hubp_setup_interdependent2_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_setup_interdependent2_params.hubp_regs = hubp_regs;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_setup_interdependent(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ struct _vcs_dpi_display_dlg_regs_st *dlg_regs,
+ struct _vcs_dpi_display_ttu_regs_st *ttu_regs)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_SETUP_INTERDEPENDENT;
+ seq_state->steps[*seq_state->num_steps].params.hubp_setup_interdependent_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_setup_interdependent_params.dlg_regs = dlg_regs;
+ seq_state->steps[*seq_state->num_steps].params.hubp_setup_interdependent_params.ttu_regs = ttu_regs;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_program_surface_config(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ enum surface_pixel_format format,
+ struct dc_tiling_info *tiling_info,
+ struct plane_size plane_size,
+ enum dc_rotation_angle rotation,
+ struct dc_plane_dcc_param *dcc,
+ bool horizontal_mirror,
+ int compat_level)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_PROGRAM_SURFACE_CONFIG;
+ seq_state->steps[*seq_state->num_steps].params.program_surface_config_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.program_surface_config_params.format = format;
+ seq_state->steps[*seq_state->num_steps].params.program_surface_config_params.tiling_info = tiling_info;
+ seq_state->steps[*seq_state->num_steps].params.program_surface_config_params.plane_size = plane_size;
+ seq_state->steps[*seq_state->num_steps].params.program_surface_config_params.rotation = rotation;
+ seq_state->steps[*seq_state->num_steps].params.program_surface_config_params.dcc = dcc;
+ seq_state->steps[*seq_state->num_steps].params.program_surface_config_params.horizontal_mirror = horizontal_mirror;
+ seq_state->steps[*seq_state->num_steps].params.program_surface_config_params.compat_level = compat_level;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dpp_setup_dpp(struct block_sequence_state *seq_state,
+ struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DPP_SETUP_DPP;
+ seq_state->steps[*seq_state->num_steps].params.setup_dpp_params.pipe_ctx = pipe_ctx;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dpp_set_cursor_matrix(struct block_sequence_state *seq_state,
+ struct dpp *dpp,
+ enum dc_color_space color_space,
+ struct dc_csc_transform *cursor_csc_color_matrix)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DPP_SET_CURSOR_MATRIX;
+ seq_state->steps[*seq_state->num_steps].params.dpp_set_cursor_matrix_params.dpp = dpp;
+ seq_state->steps[*seq_state->num_steps].params.dpp_set_cursor_matrix_params.color_space = color_space;
+ seq_state->steps[*seq_state->num_steps].params.dpp_set_cursor_matrix_params.cursor_csc_color_matrix = cursor_csc_color_matrix;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dpp_set_scaler(struct block_sequence_state *seq_state,
+ struct dpp *dpp,
+ const struct scaler_data *scl_data)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DPP_SET_SCALER;
+ seq_state->steps[*seq_state->num_steps].params.dpp_set_scaler_params.dpp = dpp;
+ seq_state->steps[*seq_state->num_steps].params.dpp_set_scaler_params.scl_data = scl_data;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_mem_program_viewport(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ const struct rect *viewport,
+ const struct rect *viewport_c)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_MEM_PROGRAM_VIEWPORT;
+ seq_state->steps[*seq_state->num_steps].params.hubp_mem_program_viewport_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_mem_program_viewport_params.viewport = viewport;
+ seq_state->steps[*seq_state->num_steps].params.hubp_mem_program_viewport_params.viewport_c = viewport_c;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_abort_cursor_offload_update(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = ABORT_CURSOR_OFFLOAD_UPDATE;
+ seq_state->steps[*seq_state->num_steps].params.abort_cursor_offload_update_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.abort_cursor_offload_update_params.pipe_ctx = pipe_ctx;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_set_cursor_attribute(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = SET_CURSOR_ATTRIBUTE;
+ seq_state->steps[*seq_state->num_steps].params.set_cursor_attribute_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.set_cursor_attribute_params.pipe_ctx = pipe_ctx;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_set_cursor_position(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = SET_CURSOR_POSITION;
+ seq_state->steps[*seq_state->num_steps].params.set_cursor_position_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.set_cursor_position_params.pipe_ctx = pipe_ctx;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_set_cursor_sdr_white_level(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = SET_CURSOR_SDR_WHITE_LEVEL;
+ seq_state->steps[*seq_state->num_steps].params.set_cursor_sdr_white_level_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.set_cursor_sdr_white_level_params.pipe_ctx = pipe_ctx;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_program_output_csc(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ enum dc_color_space colorspace,
+ uint16_t *matrix,
+ int opp_id)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = PROGRAM_OUTPUT_CSC;
+ seq_state->steps[*seq_state->num_steps].params.program_output_csc_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.program_output_csc_params.pipe_ctx = pipe_ctx;
+ seq_state->steps[*seq_state->num_steps].params.program_output_csc_params.colorspace = colorspace;
+ seq_state->steps[*seq_state->num_steps].params.program_output_csc_params.matrix = matrix;
+ seq_state->steps[*seq_state->num_steps].params.program_output_csc_params.opp_id = opp_id;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_phantom_hubp_post_enable(struct block_sequence_state *seq_state,
+ struct hubp *hubp)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = PHANTOM_HUBP_POST_ENABLE;
+ seq_state->steps[*seq_state->num_steps].params.phantom_hubp_post_enable_params.hubp = hubp;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_update_force_pstate(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct dc_state *context)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = UPDATE_FORCE_PSTATE;
+ seq_state->steps[*seq_state->num_steps].params.update_force_pstate_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.update_force_pstate_params.context = context;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubbub_apply_dedcn21_147_wa(struct block_sequence_state *seq_state,
+ struct hubbub *hubbub)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBBUB_APPLY_DEDCN21_147_WA;
+ seq_state->steps[*seq_state->num_steps].params.hubbub_apply_dedcn21_147_wa_params.hubbub = hubbub;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubbub_allow_self_refresh_control(struct block_sequence_state *seq_state,
+ struct hubbub *hubbub,
+ bool allow,
+ bool *disallow_self_refresh_applied)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBBUB_ALLOW_SELF_REFRESH_CONTROL;
+ seq_state->steps[*seq_state->num_steps].params.hubbub_allow_self_refresh_control_params.hubbub = hubbub;
+ seq_state->steps[*seq_state->num_steps].params.hubbub_allow_self_refresh_control_params.allow = allow;
+ seq_state->steps[*seq_state->num_steps].params.hubbub_allow_self_refresh_control_params.disallow_self_refresh_applied = disallow_self_refresh_applied;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_tg_get_frame_count(struct block_sequence_state *seq_state,
+ struct timing_generator *tg,
+ unsigned int *frame_count)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = TG_GET_FRAME_COUNT;
+ seq_state->steps[*seq_state->num_steps].params.tg_get_frame_count_params.tg = tg;
+ seq_state->steps[*seq_state->num_steps].params.tg_get_frame_count_params.frame_count = frame_count;
+ (*seq_state->num_steps)++;
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
index 814f68d76257..deb23d20bca6 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
@@ -24,7 +24,7 @@
#include "link_enc_cfg.h"
#include "resource.h"
-#include "link.h"
+#include "link_service.h"
#define DC_LOGGER dc->ctx->logger
@@ -522,10 +522,10 @@ struct link_encoder *link_enc_cfg_get_link_enc_used_by_link(
struct link_encoder *link_enc_cfg_get_next_avail_link_enc(struct dc *dc)
{
struct link_encoder *link_enc = NULL;
- enum engine_id encs_assigned[MAX_DIG_LINK_ENCODERS];
+ enum engine_id encs_assigned[MAX_LINK_ENCODERS];
int i;
- for (i = 0; i < MAX_DIG_LINK_ENCODERS; i++)
+ for (i = 0; i < MAX_LINK_ENCODERS; i++)
encs_assigned[i] = ENGINE_ID_UNKNOWN;
/* Add assigned encoders to list. */
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
index 71e15da4bb69..9acd30019717 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
@@ -33,8 +33,9 @@
* dc.h with detail interface documentation, then add function implementation
* in this file which calls link functions.
*/
-#include "link.h"
+#include "link_service.h"
#include "dce/dce_i2c.h"
+
struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_index)
{
if (link_index >= MAX_LINKS)
@@ -515,7 +516,15 @@ void dc_link_enable_hpd_filter(struct dc_link *link, bool enable)
link->dc->link_srv->enable_hpd_filter(link, enable);
}
-bool dc_link_dp_dpia_validate(struct dc *dc, const struct dc_stream_state *streams, const unsigned int count)
+enum dc_status dc_link_validate_dp_tunneling_bandwidth(const struct dc *dc, const struct dc_state *new_ctx)
{
- return dc->link_srv->validate_dpia_bandwidth(streams, count);
+ return dc->link_srv->validate_dp_tunnel_bandwidth(dc, new_ctx);
}
+
+void dc_link_get_alpm_support(struct dc_link *link,
+ bool *auxless_support,
+ bool *auxwake_support)
+{
+ link->dc->link_srv->edp_get_alpm_support(link, auxless_support, auxwake_support);
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 3da25bd8b578..848c267ef11e 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -40,7 +40,7 @@
#include "virtual/virtual_stream_encoder.h"
#include "dpcd_defs.h"
#include "link_enc_cfg.h"
-#include "link.h"
+#include "link_service.h"
#include "clk_mgr.h"
#include "dc_state_priv.h"
#include "dc_stream_priv.h"
@@ -95,11 +95,44 @@
#define DC_LOGGER \
dc->ctx->logger
#define DC_LOGGER_INIT(logger)
-
-#include "dml2/dml2_wrapper.h"
+#include "dml2_0/dml2_wrapper.h"
#define UNABLE_TO_SPLIT -1
+static void capture_pipe_topology_data(struct dc *dc, int plane_idx, int slice_idx, int stream_idx,
+ int dpp_inst, int opp_inst, int tg_inst, bool is_phantom_pipe)
+{
+ struct pipe_topology_snapshot *current_snapshot = &dc->debug_data.topology_history.snapshots[dc->debug_data.topology_history.current_snapshot_index];
+
+ if (current_snapshot->line_count >= MAX_PIPES)
+ return;
+
+ current_snapshot->pipe_log_lines[current_snapshot->line_count].is_phantom_pipe = is_phantom_pipe;
+ current_snapshot->pipe_log_lines[current_snapshot->line_count].plane_idx = plane_idx;
+ current_snapshot->pipe_log_lines[current_snapshot->line_count].slice_idx = slice_idx;
+ current_snapshot->pipe_log_lines[current_snapshot->line_count].stream_idx = stream_idx;
+ current_snapshot->pipe_log_lines[current_snapshot->line_count].dpp_inst = dpp_inst;
+ current_snapshot->pipe_log_lines[current_snapshot->line_count].opp_inst = opp_inst;
+ current_snapshot->pipe_log_lines[current_snapshot->line_count].tg_inst = tg_inst;
+
+ current_snapshot->line_count++;
+}
+
+static void start_new_topology_snapshot(struct dc *dc, struct dc_state *state)
+{
+ // Move to next snapshot slot (circular buffer)
+ dc->debug_data.topology_history.current_snapshot_index = (dc->debug_data.topology_history.current_snapshot_index + 1) % MAX_TOPOLOGY_SNAPSHOTS;
+
+ // Clear the new snapshot
+ struct pipe_topology_snapshot *current_snapshot = &dc->debug_data.topology_history.snapshots[dc->debug_data.topology_history.current_snapshot_index];
+ memset(current_snapshot, 0, sizeof(*current_snapshot));
+
+ // Set metadata
+ current_snapshot->timestamp_us = dm_get_timestamp(dc->ctx);
+ current_snapshot->stream_count = state->stream_count;
+ current_snapshot->phantom_stream_count = state->phantom_stream_count;
+}
+
enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
{
enum dce_version dc_version = DCE_VERSION_UNKNOWN;
@@ -165,7 +198,13 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
case FAMILY_NV:
dc_version = DCN_VERSION_2_0;
- if (asic_id.chip_id == DEVICE_ID_NV_13FE || asic_id.chip_id == DEVICE_ID_NV_143F) {
+ if (asic_id.chip_id == DEVICE_ID_NV_13FE ||
+ asic_id.chip_id == DEVICE_ID_NV_143F ||
+ asic_id.chip_id == DEVICE_ID_NV_13F9 ||
+ asic_id.chip_id == DEVICE_ID_NV_13FA ||
+ asic_id.chip_id == DEVICE_ID_NV_13FB ||
+ asic_id.chip_id == DEVICE_ID_NV_13FC ||
+ asic_id.chip_id == DEVICE_ID_NV_13DB) {
dc_version = DCN_VERSION_2_01;
break;
}
@@ -441,6 +480,14 @@ bool resource_construct(
DC_ERR("DC: failed to create stream_encoder!\n");
pool->stream_enc_count++;
}
+
+ for (i = 0; i < caps->num_analog_stream_encoder; i++) {
+ pool->stream_enc[caps->num_stream_encoder + i] =
+ create_funcs->create_stream_encoder(ENGINE_ID_DACA + i, ctx);
+ if (pool->stream_enc[caps->num_stream_encoder + i] == NULL)
+ DC_ERR("DC: failed to create analog stream_encoder %d!\n", i);
+ pool->stream_enc_count++;
+ }
}
pool->hpo_dp_stream_enc_count = 0;
@@ -2143,7 +2190,7 @@ int resource_get_odm_slice_dst_width(struct pipe_ctx *otg_master,
h_active = timing->h_addressable +
timing->h_border_left +
timing->h_border_right +
- otg_master->hblank_borrow;
+ otg_master->dsc_padding_params.dsc_hactive_padding;
width = h_active / count;
if (otg_master->stream_res.tg)
@@ -2298,10 +2345,11 @@ bool resource_is_odm_topology_changed(const struct pipe_ctx *otg_master_a,
static void resource_log_pipe(struct dc *dc, struct pipe_ctx *pipe,
int stream_idx, int slice_idx, int plane_idx, int slice_count,
- bool is_primary)
+ bool is_primary, bool is_phantom_pipe)
{
DC_LOGGER_INIT(dc->ctx->logger);
+ // new format for logging: bit storing code
if (slice_idx == 0 && plane_idx == 0 && is_primary) {
/* case 0 (OTG master pipe with plane) */
DC_LOG_DC(" | plane%d slice%d stream%d|",
@@ -2310,6 +2358,10 @@ static void resource_log_pipe(struct dc *dc, struct pipe_ctx *pipe,
pipe->plane_res.dpp->inst,
pipe->stream_res.opp->inst,
pipe->stream_res.tg->inst);
+ capture_pipe_topology_data(dc, plane_idx, slice_idx, stream_idx,
+ pipe->plane_res.dpp->inst,
+ pipe->stream_res.opp->inst,
+ pipe->stream_res.tg->inst, is_phantom_pipe);
} else if (slice_idx == 0 && plane_idx == -1) {
/* case 1 (OTG master pipe without plane) */
DC_LOG_DC(" | slice%d stream%d|",
@@ -2318,6 +2370,10 @@ static void resource_log_pipe(struct dc *dc, struct pipe_ctx *pipe,
pipe->stream_res.opp->inst,
pipe->stream_res.opp->inst,
pipe->stream_res.tg->inst);
+ capture_pipe_topology_data(dc, 0xF, slice_idx, stream_idx,
+ pipe->plane_res.dpp->inst,
+ pipe->stream_res.opp->inst,
+ pipe->stream_res.tg->inst, is_phantom_pipe);
} else if (slice_idx != 0 && plane_idx == 0 && is_primary) {
/* case 2 (OPP head pipe with plane) */
DC_LOG_DC(" | plane%d slice%d | |",
@@ -2325,27 +2381,43 @@ static void resource_log_pipe(struct dc *dc, struct pipe_ctx *pipe,
DC_LOG_DC(" |DPP%d----OPP%d----| |",
pipe->plane_res.dpp->inst,
pipe->stream_res.opp->inst);
+ capture_pipe_topology_data(dc, plane_idx, slice_idx, stream_idx,
+ pipe->plane_res.dpp->inst,
+ pipe->stream_res.opp->inst,
+ pipe->stream_res.tg->inst, is_phantom_pipe);
} else if (slice_idx != 0 && plane_idx == -1) {
/* case 3 (OPP head pipe without plane) */
DC_LOG_DC(" | slice%d | |", slice_idx);
DC_LOG_DC(" |DPG%d----OPP%d----| |",
pipe->plane_res.dpp->inst,
pipe->stream_res.opp->inst);
+ capture_pipe_topology_data(dc, 0xF, slice_idx, stream_idx,
+ pipe->plane_res.dpp->inst,
+ pipe->stream_res.opp->inst,
+ pipe->stream_res.tg->inst, is_phantom_pipe);
} else if (slice_idx == slice_count - 1) {
/* case 4 (DPP pipe in last slice) */
DC_LOG_DC(" | plane%d | |", plane_idx);
DC_LOG_DC(" |DPP%d----| |",
pipe->plane_res.dpp->inst);
+ capture_pipe_topology_data(dc, plane_idx, slice_idx, stream_idx,
+ pipe->plane_res.dpp->inst,
+ pipe->stream_res.opp->inst,
+ pipe->stream_res.tg->inst, is_phantom_pipe);
} else {
/* case 5 (DPP pipe not in last slice) */
DC_LOG_DC(" | plane%d | | |", plane_idx);
DC_LOG_DC(" |DPP%d----| | |",
pipe->plane_res.dpp->inst);
+ capture_pipe_topology_data(dc, plane_idx, slice_idx, stream_idx,
+ pipe->plane_res.dpp->inst,
+ pipe->stream_res.opp->inst,
+ pipe->stream_res.tg->inst, is_phantom_pipe);
}
}
static void resource_log_pipe_for_stream(struct dc *dc, struct dc_state *state,
- struct pipe_ctx *otg_master, int stream_idx)
+ struct pipe_ctx *otg_master, int stream_idx, bool is_phantom_pipe)
{
struct pipe_ctx *opp_heads[MAX_PIPES];
struct pipe_ctx *dpp_pipes[MAX_PIPES];
@@ -2371,12 +2443,12 @@ static void resource_log_pipe_for_stream(struct dc *dc, struct dc_state *state,
resource_log_pipe(dc, dpp_pipes[dpp_idx],
stream_idx, slice_idx,
plane_idx, slice_count,
- is_primary);
+ is_primary, is_phantom_pipe);
}
} else {
resource_log_pipe(dc, opp_heads[slice_idx],
stream_idx, slice_idx, plane_idx,
- slice_count, true);
+ slice_count, true, is_phantom_pipe);
}
}
@@ -2407,6 +2479,10 @@ void resource_log_pipe_topology_update(struct dc *dc, struct dc_state *state)
struct pipe_ctx *otg_master;
int stream_idx, phantom_stream_idx;
DC_LOGGER_INIT(dc->ctx->logger);
+ bool is_phantom_pipe = false;
+
+ // Start a new snapshot for this topology update
+ start_new_topology_snapshot(dc, state);
DC_LOG_DC(" pipe topology update");
DC_LOG_DC(" ________________________");
@@ -2420,9 +2496,10 @@ void resource_log_pipe_topology_update(struct dc *dc, struct dc_state *state)
if (!otg_master)
continue;
- resource_log_pipe_for_stream(dc, state, otg_master, stream_idx);
+ resource_log_pipe_for_stream(dc, state, otg_master, stream_idx, is_phantom_pipe);
}
if (state->phantom_stream_count > 0) {
+ is_phantom_pipe = true;
DC_LOG_DC(" | (phantom pipes) |");
for (stream_idx = 0; stream_idx < state->stream_count; stream_idx++) {
if (state->stream_status[stream_idx].mall_stream_config.type != SUBVP_MAIN)
@@ -2435,7 +2512,7 @@ void resource_log_pipe_topology_update(struct dc *dc, struct dc_state *state)
if (!otg_master)
continue;
- resource_log_pipe_for_stream(dc, state, otg_master, stream_idx);
+ resource_log_pipe_for_stream(dc, state, otg_master, stream_idx, is_phantom_pipe);
}
}
DC_LOG_DC(" |________________________|\n");
@@ -2685,17 +2762,40 @@ static inline int find_fixed_dio_link_enc(const struct dc_link *link)
}
static inline int find_free_dio_link_enc(const struct resource_context *res_ctx,
- const struct dc_link *link, const struct resource_pool *pool)
+ const struct dc_link *link, const struct resource_pool *pool, struct dc_stream_state *stream)
{
- int i;
+ int i, j = -1;
+ int stream_enc_inst = -1;
int enc_count = pool->dig_link_enc_count;
- /* for dpia, check preferred encoder first and then the next one */
- for (i = 0; i < enc_count; i++)
- if (res_ctx->dio_link_enc_ref_cnts[(link->dpia_preferred_eng_id + i) % enc_count] == 0)
- break;
+ /* Find stream encoder instance for the stream */
+ if (stream) {
+ for (i = 0; i < pool->pipe_count; i++) {
+ if ((res_ctx->pipe_ctx[i].stream == stream) &&
+ (res_ctx->pipe_ctx[i].stream_res.stream_enc != NULL)) {
+ stream_enc_inst = res_ctx->pipe_ctx[i].stream_res.stream_enc->id;
+ break;
+ }
+ }
+ }
+
+ /* Assign dpia preferred > stream enc instance > available */
+ for (i = 0; i < enc_count; i++) {
+ if (res_ctx->dio_link_enc_ref_cnts[i] == 0) {
+ if (j == -1)
+ j = i;
+
+ if (link->dpia_preferred_eng_id == i) {
+ j = i;
+ break;
+ }
- return (i >= 0 && i < enc_count) ? (link->dpia_preferred_eng_id + i) % enc_count : -1;
+ if (stream_enc_inst == i) {
+ j = stream_enc_inst;
+ }
+ }
+ }
+ return j;
}
static inline void acquire_dio_link_enc(
@@ -2776,7 +2876,7 @@ static bool add_dio_link_enc_to_ctx(const struct dc *dc,
retain_dio_link_enc(res_ctx, enc_index);
} else {
if (stream->link->is_dig_mapping_flexible)
- enc_index = find_free_dio_link_enc(res_ctx, stream->link, pool);
+ enc_index = find_free_dio_link_enc(res_ctx, stream->link, pool, stream);
else {
int link_index = 0;
@@ -2786,7 +2886,7 @@ static bool add_dio_link_enc_to_ctx(const struct dc *dc,
* one into the acquiring link.
*/
if (enc_index >= 0 && is_dio_enc_acquired_by_other_link(stream->link, enc_index, &link_index)) {
- int new_enc_index = find_free_dio_link_enc(res_ctx, dc->links[link_index], pool);
+ int new_enc_index = find_free_dio_link_enc(res_ctx, dc->links[link_index], pool, stream);
if (new_enc_index >= 0)
swap_dio_link_enc_to_muxable_ctx(context, pool, new_enc_index, enc_index);
@@ -3940,7 +4040,9 @@ enum dc_status resource_map_pool_resources(
/* TODO: Add check if ASIC support and EDID audio */
if (!stream->converter_disable_audio &&
dc_is_audio_capable_signal(pipe_ctx->stream->signal) &&
- stream->audio_info.mode_count && stream->audio_info.flags.all) {
+ stream->audio_info.mode_count &&
+ (stream->audio_info.flags.all ||
+ (stream->sink && stream->sink->edid_caps.panel_patch.skip_audio_sab_check))) {
pipe_ctx->stream_res.audio = find_first_free_audio(
&context->res_ctx, pool, pipe_ctx->stream_res.stream_enc->id, dc_ctx->dce_version);
@@ -4053,7 +4155,7 @@ static bool add_all_planes_for_stream(
* @set: An array of dc_validation_set with all the current streams reference
* @set_count: Total of streams
* @context: New context
- * @fast_validate: Enable or disable fast validation
+ * @validate_mode: identify the validation mode
*
* This function updates the potential new stream in the context object. It
* creates multiple lists for the add, remove, and unchanged streams. In
@@ -4068,7 +4170,7 @@ enum dc_status dc_validate_with_context(struct dc *dc,
const struct dc_validation_set set[],
int set_count,
struct dc_state *context,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
struct dc_stream_state *unchanged_streams[MAX_PIPES] = { 0 };
struct dc_stream_state *del_streams[MAX_PIPES] = { 0 };
@@ -4242,7 +4344,7 @@ enum dc_status dc_validate_with_context(struct dc *dc,
dc_state_set_stream_subvp_cursor_limit(context->streams[i], context, false);
}
- res = dc_validate_global_state(dc, context, fast_validate);
+ res = dc_validate_global_state(dc, context, validate_mode);
/* calculate pixel rate divider after deciding pxiel clock & odm combine */
if ((dc->hwss.calculate_pix_rate_divider) && (res == DC_OK)) {
@@ -4259,39 +4361,33 @@ fail:
return res;
}
+#if defined(CONFIG_DRM_AMD_DC_FP)
+#endif /* CONFIG_DRM_AMD_DC_FP */
+
/**
- * decide_hblank_borrow - Decides the horizontal blanking borrow value for a given pipe context.
+ * calculate_timing_params_for_dsc_with_padding - Calculates timing parameters for DSC with padding.
* @pipe_ctx: Pointer to the pipe context structure.
*
- * This function calculates the horizontal blanking borrow value for a given pipe context based on the
+ * This function calculates the timing parameters for a given pipe context based on the
* display stream compression (DSC) configuration. If the horizontal active pixels (hactive) are less
- * than the total width of the DSC slices, it sets the hblank_borrow value to the difference. If the
- * total horizontal timing minus the hblank_borrow value is less than 32, it resets the hblank_borrow
+ * than the total width of the DSC slices, it sets the dsc_hactive_padding value to the difference. If the
+ * total horizontal timing minus the dsc_hactive_padding value is less than 32, it resets the dsc_hactive_padding
* value to 0.
*/
-static void decide_hblank_borrow(struct pipe_ctx *pipe_ctx)
+static void calculate_timing_params_for_dsc_with_padding(struct pipe_ctx *pipe_ctx)
{
- uint32_t hactive;
- uint32_t ceil_slice_width;
struct dc_stream_state *stream = NULL;
if (!pipe_ctx)
return;
stream = pipe_ctx->stream;
+ pipe_ctx->dsc_padding_params.dsc_hactive_padding = 0;
+ pipe_ctx->dsc_padding_params.dsc_htotal_padding = 0;
- if (stream->timing.flags.DSC) {
- hactive = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right;
-
- /* Assume if determined slices does not divide Hactive evenly, Hborrow is needed for padding*/
- if (hactive % stream->timing.dsc_cfg.num_slices_h != 0) {
- ceil_slice_width = (hactive / stream->timing.dsc_cfg.num_slices_h) + 1;
- pipe_ctx->hblank_borrow = ceil_slice_width * stream->timing.dsc_cfg.num_slices_h - hactive;
+ if (stream)
+ pipe_ctx->dsc_padding_params.dsc_pix_clk_100hz = stream->timing.pix_clk_100hz;
- if (stream->timing.h_total - hactive - pipe_ctx->hblank_borrow < 32)
- pipe_ctx->hblank_borrow = 0;
- }
- }
}
/**
@@ -4299,7 +4395,7 @@ static void decide_hblank_borrow(struct pipe_ctx *pipe_ctx)
*
* @dc: dc struct for this driver
* @new_ctx: state to be validated
- * @fast_validate: set to true if only yes/no to support matters
+ * @validate_mode: identify the validation mode
*
* Checks hardware resource availability and bandwidth requirement.
*
@@ -4309,7 +4405,7 @@ static void decide_hblank_borrow(struct pipe_ctx *pipe_ctx)
enum dc_status dc_validate_global_state(
struct dc *dc,
struct dc_state *new_ctx,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
enum dc_status result = DC_ERROR_UNEXPECTED;
int i, j;
@@ -4334,7 +4430,7 @@ enum dc_status dc_validate_global_state(
/* Decide whether hblank borrow is needed and save it in pipe_ctx */
if (dc->debug.enable_hblank_borrow)
- decide_hblank_borrow(pipe_ctx);
+ calculate_timing_params_for_dsc_with_padding(pipe_ctx);
if (dc->res_pool->funcs->patch_unknown_plane_state &&
pipe_ctx->plane_state &&
@@ -4368,7 +4464,7 @@ enum dc_status dc_validate_global_state(
result = resource_build_scaling_params_for_context(dc, new_ctx);
if (result == DC_OK)
- result = dc->res_pool->funcs->validate_bandwidth(dc, new_ctx, fast_validate);
+ result = dc->res_pool->funcs->validate_bandwidth(dc, new_ctx, validate_mode);
return result;
}
@@ -4409,8 +4505,14 @@ static void set_avi_info_frame(
unsigned int fr_ind = pipe_ctx->stream->timing.fr_index;
enum dc_timing_3d_format format;
+ if (stream->avi_infopacket.valid) {
+ *info_packet = stream->avi_infopacket;
+ return;
+ }
+
memset(&hdmi_info, 0, sizeof(union hdmi_info_packet));
+
color_space = pipe_ctx->stream->output_color_space;
if (color_space == COLOR_SPACE_UNKNOWN)
color_space = (stream->timing.pixel_encoding == PIXEL_ENCODING_RGB) ?
@@ -5194,7 +5296,7 @@ struct link_encoder *get_temp_dio_link_enc(
enc_index = link->eng_id;
if (enc_index < 0)
- enc_index = find_free_dio_link_enc(res_ctx, link, pool);
+ enc_index = find_free_dio_link_enc(res_ctx, link, pool, NULL);
if (enc_index >= 0)
link_enc = pool->link_encoders[enc_index];
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stat.c b/drivers/gpu/drm/amd/display/dc/core/dc_stat.c
index fe9f99f1bdf9..f976ffd6d466 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stat.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stat.c
@@ -65,7 +65,7 @@ void dc_stat_get_dmub_notification(const struct dc *dc, struct dmub_notification
notify->type == DMUB_NOTIFICATION_DPIA_NOTIFICATION ||
notify->type == DMUB_NOTIFICATION_SET_CONFIG_REPLY) {
notify->link_index =
- get_link_index_from_dpia_port_index(dc, notify->link_index);
+ get_link_index_from_dpia_port_index(dc, notify->instance);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_state.c b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
index 4db7383720fd..2de8ef4a58ec 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_state.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
@@ -35,8 +35,8 @@
#include "link_enc_cfg.h"
#if defined(CONFIG_DRM_AMD_DC_FP)
-#include "dml2/dml2_wrapper.h"
-#include "dml2/dml2_internal_types.h"
+#include "dml2_0/dml2_wrapper.h"
+#include "dml2_0/dml2_internal_types.h"
#endif
#define DC_LOGGER \
@@ -194,11 +194,6 @@ static void init_state(struct dc *dc, struct dc_state *state)
struct dc_state *dc_state_create(struct dc *dc, struct dc_state_create_params *params)
{
struct dc_state *state;
-#ifdef CONFIG_DRM_AMD_DC_FP
- struct dml2_configuration_options *dml2_opt = &dc->dml2_tmp;
-
- memcpy(dml2_opt, &dc->dml2_options, sizeof(dc->dml2_options));
-#endif
state = kvzalloc(sizeof(struct dc_state), GFP_KERNEL);
@@ -211,14 +206,12 @@ struct dc_state *dc_state_create(struct dc *dc, struct dc_state_create_params *p
#ifdef CONFIG_DRM_AMD_DC_FP
if (dc->debug.using_dml2) {
- dml2_opt->use_clock_dc_limits = false;
- if (!dml2_create(dc, dml2_opt, &state->bw_ctx.dml2)) {
+ if (!dml2_create(dc, &dc->dml2_options, &state->bw_ctx.dml2)) {
dc_state_release(state);
return NULL;
}
- dml2_opt->use_clock_dc_limits = true;
- if (!dml2_create(dc, dml2_opt, &state->bw_ctx.dml2_dc_power_source)) {
+ if (dc->caps.dcmode_power_limits_present && !dml2_create(dc, &dc->dml2_dc_power_options, &state->bw_ctx.dml2_dc_power_source)) {
dc_state_release(state);
return NULL;
}
@@ -434,6 +427,8 @@ enum dc_status dc_state_remove_stream(
return DC_ERROR_UNEXPECTED;
}
+ dc_stream_release_3dlut_for_stream(dc, stream);
+
dc_stream_release(state->streams[i]);
state->stream_count--;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index b883fb24fa12..129cd5f84983 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -224,6 +224,14 @@ struct dc_stream_status *dc_stream_get_status(
return dc_state_get_stream_status(dc->current_state, stream);
}
+const struct dc_stream_status *dc_stream_get_status_const(
+ const struct dc_stream_state *stream)
+{
+ struct dc *dc = stream->ctx->dc;
+
+ return dc_state_get_stream_status(dc->current_state, stream);
+}
+
void program_cursor_attributes(
struct dc *dc,
struct dc_stream_state *stream)
@@ -231,6 +239,7 @@ void program_cursor_attributes(
int i;
struct resource_context *res_ctx;
struct pipe_ctx *pipe_to_program = NULL;
+ bool enable_cursor_offload = dc_dmub_srv_is_cursor_offload_enabled(dc);
if (!stream)
return;
@@ -245,9 +254,14 @@ void program_cursor_attributes(
if (!pipe_to_program) {
pipe_to_program = pipe_ctx;
- dc->hwss.cursor_lock(dc, pipe_to_program, true);
- if (pipe_to_program->next_odm_pipe)
- dc->hwss.cursor_lock(dc, pipe_to_program->next_odm_pipe, true);
+
+ if (enable_cursor_offload && dc->hwss.begin_cursor_offload_update) {
+ dc->hwss.begin_cursor_offload_update(dc, pipe_ctx);
+ } else {
+ dc->hwss.cursor_lock(dc, pipe_to_program, true);
+ if (pipe_to_program->next_odm_pipe)
+ dc->hwss.cursor_lock(dc, pipe_to_program->next_odm_pipe, true);
+ }
}
dc->hwss.set_cursor_attribute(pipe_ctx);
@@ -255,12 +269,18 @@ void program_cursor_attributes(
dc_send_update_cursor_info_to_dmu(pipe_ctx, i);
if (dc->hwss.set_cursor_sdr_white_level)
dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
+ if (enable_cursor_offload && dc->hwss.update_cursor_offload_pipe)
+ dc->hwss.update_cursor_offload_pipe(dc, pipe_ctx);
}
if (pipe_to_program) {
- dc->hwss.cursor_lock(dc, pipe_to_program, false);
- if (pipe_to_program->next_odm_pipe)
- dc->hwss.cursor_lock(dc, pipe_to_program->next_odm_pipe, false);
+ if (enable_cursor_offload && dc->hwss.commit_cursor_offload_update) {
+ dc->hwss.commit_cursor_offload_update(dc, pipe_to_program);
+ } else {
+ dc->hwss.cursor_lock(dc, pipe_to_program, false);
+ if (pipe_to_program->next_odm_pipe)
+ dc->hwss.cursor_lock(dc, pipe_to_program->next_odm_pipe, false);
+ }
}
}
@@ -316,6 +336,9 @@ bool dc_stream_set_cursor_attributes(
{
bool result = false;
+ if (!stream)
+ return false;
+
if (dc_stream_check_cursor_attributes(stream, stream->ctx->dc->current_state, attributes)) {
stream->cursor_attributes = *attributes;
result = true;
@@ -331,7 +354,10 @@ bool dc_stream_program_cursor_attributes(
struct dc *dc;
bool reset_idle_optimizations = false;
- dc = stream ? stream->ctx->dc : NULL;
+ if (!stream)
+ return false;
+
+ dc = stream->ctx->dc;
if (dc_stream_set_cursor_attributes(stream, attributes)) {
dc_z10_restore(dc);
@@ -360,6 +386,7 @@ void program_cursor_position(
int i;
struct resource_context *res_ctx;
struct pipe_ctx *pipe_to_program = NULL;
+ bool enable_cursor_offload = dc_dmub_srv_is_cursor_offload_enabled(dc);
if (!stream)
return;
@@ -378,16 +405,27 @@ void program_cursor_position(
if (!pipe_to_program) {
pipe_to_program = pipe_ctx;
- dc->hwss.cursor_lock(dc, pipe_to_program, true);
+
+ if (enable_cursor_offload && dc->hwss.begin_cursor_offload_update)
+ dc->hwss.begin_cursor_offload_update(dc, pipe_ctx);
+ else
+ dc->hwss.cursor_lock(dc, pipe_to_program, true);
}
dc->hwss.set_cursor_position(pipe_ctx);
+ if (enable_cursor_offload && dc->hwss.update_cursor_offload_pipe)
+ dc->hwss.update_cursor_offload_pipe(dc, pipe_ctx);
+
if (dc->ctx->dmub_srv)
dc_send_update_cursor_info_to_dmu(pipe_ctx, i);
}
- if (pipe_to_program)
- dc->hwss.cursor_lock(dc, pipe_to_program, false);
+ if (pipe_to_program) {
+ if (enable_cursor_offload && dc->hwss.commit_cursor_offload_update)
+ dc->hwss.commit_cursor_offload_update(dc, pipe_to_program);
+ else
+ dc->hwss.cursor_lock(dc, pipe_to_program, false);
+ }
}
bool dc_stream_set_cursor_position(
@@ -699,9 +737,14 @@ bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream,
{
uint8_t i;
bool ret = false;
- struct dc *dc = stream->ctx->dc;
- struct resource_context *res_ctx =
- &dc->current_state->res_ctx;
+ struct dc *dc;
+ struct resource_context *res_ctx;
+
+ if (!stream->ctx)
+ return false;
+
+ dc = stream->ctx->dc;
+ res_ctx = &dc->current_state->res_ctx;
dc_exit_ips_for_hw_access(dc);
@@ -849,11 +892,80 @@ void dc_stream_log(const struct dc *dc, const struct dc_stream_state *stream)
stream->sink->sink_signal != SIGNAL_TYPE_NONE) {
DC_LOG_DC(
- "\tdispname: %s signal: %x\n",
+ "\tsignal: %x dispname: %s manufacturer_id: 0x%x product_id: 0x%x\n",
+ stream->signal,
stream->sink->edid_caps.display_name,
- stream->signal);
+ stream->sink->edid_caps.manufacturer_id,
+ stream->sink->edid_caps.product_id);
+ }
+ }
+}
+
+/*
+* dc_stream_get_3dlut()
+* Requirements:
+* 1. Is stream already owns an RMCM instance, return it.
+* 2. If it doesn't and we don't need to allocate, return NULL.
+* 3. If there's a free RMCM instance, assign to stream and return it.
+* 4. If no free RMCM instances, return NULL.
+*/
+
+struct dc_rmcm_3dlut *dc_stream_get_3dlut_for_stream(
+ const struct dc *dc,
+ const struct dc_stream_state *stream,
+ bool allocate_one)
+{
+ unsigned int num_rmcm = dc->caps.color.mpc.num_rmcm_3dluts;
+
+ // see if one is allocated for this stream
+ for (int i = 0; i < num_rmcm; i++) {
+ if (dc->res_pool->rmcm_3dlut[i].isInUse &&
+ dc->res_pool->rmcm_3dlut[i].stream == stream)
+ return &dc->res_pool->rmcm_3dlut[i];
+ }
+
+ //case: not found one, and dont need to allocate
+ if (!allocate_one)
+ return NULL;
+
+ //see if there is an unused 3dlut, allocate
+ for (int i = 0; i < num_rmcm; i++) {
+ if (!dc->res_pool->rmcm_3dlut[i].isInUse) {
+ dc->res_pool->rmcm_3dlut[i].isInUse = true;
+ dc->res_pool->rmcm_3dlut[i].stream = stream;
+ return &dc->res_pool->rmcm_3dlut[i];
}
}
+
+ //dont have a 3dlut
+ return NULL;
+}
+
+
+void dc_stream_release_3dlut_for_stream(
+ const struct dc *dc,
+ const struct dc_stream_state *stream)
+{
+ struct dc_rmcm_3dlut *rmcm_3dlut =
+ dc_stream_get_3dlut_for_stream(dc, stream, false);
+
+ if (rmcm_3dlut) {
+ rmcm_3dlut->isInUse = false;
+ rmcm_3dlut->stream = NULL;
+ rmcm_3dlut->protection_bits = 0;
+ }
+}
+
+
+void dc_stream_init_rmcm_3dlut(struct dc *dc)
+{
+ unsigned int num_rmcm = dc->caps.color.mpc.num_rmcm_3dluts;
+
+ for (int i = 0; i < num_rmcm; i++) {
+ dc->res_pool->rmcm_3dlut[i].isInUse = false;
+ dc->res_pool->rmcm_3dlut[i].stream = NULL;
+ dc->res_pool->rmcm_3dlut[i].protection_bits = 0;
+ }
}
/*
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 1d917be36fc4..29edfa51ea2c 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -42,18 +42,28 @@
#include "inc/hw/dmcu.h"
#include "dml/display_mode_lib.h"
-#include "dml2/dml2_wrapper.h"
+#include "dml2_0/dml2_wrapper.h"
#include "dmub/inc/dmub_cmd.h"
+#include "sspl/dc_spl_types.h"
+
struct abm_save_restore;
/* forward declaration */
struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
+struct dcn_hubbub_reg_state;
+struct dcn_hubp_reg_state;
+struct dcn_dpp_reg_state;
+struct dcn_mpc_reg_state;
+struct dcn_opp_reg_state;
+struct dcn_dsc_reg_state;
+struct dcn_optc_reg_state;
+struct dcn_dccg_reg_state;
-#define DC_VER "3.2.334"
+#define DC_VER "3.2.359"
/**
* MAX_SURFACES - representative of the upper bound of surfaces that can be piped to a single CRTC
@@ -66,7 +76,11 @@ struct dmub_notification;
#define MAX_STREAMS 6
#define MIN_VIEWPORT_SIZE 12
#define MAX_NUM_EDP 2
-#define MAX_HOST_ROUTERS_NUM 2
+#define MAX_SUPPORTED_FORMATS 7
+
+#define MAX_HOST_ROUTERS_NUM 3
+#define MAX_DPIA_PER_HOST_ROUTER 3
+#define MAX_DPIA_NUM (MAX_HOST_ROUTERS_NUM * MAX_DPIA_PER_HOST_ROUTER)
/* Display Core Interfaces */
struct dc_versions {
@@ -192,6 +206,34 @@ struct dpp_color_caps {
struct rom_curve_caps ogam_rom_caps;
};
+/* Below structure is to describe the HW support for mem layout, extend support
+ range to match what OS could handle in the roadmap */
+struct lut3d_caps {
+ uint32_t dma_3d_lut : 1; /*< DMA mode support for 3D LUT */
+ struct {
+ uint32_t swizzle_3d_rgb : 1;
+ uint32_t swizzle_3d_bgr : 1;
+ uint32_t linear_1d : 1;
+ } mem_layout_support;
+ struct {
+ uint32_t unorm_12msb : 1;
+ uint32_t unorm_12lsb : 1;
+ uint32_t float_fp1_5_10 : 1;
+ } mem_format_support;
+ struct {
+ uint32_t order_rgba : 1;
+ uint32_t order_bgra : 1;
+ } mem_pixel_order_support;
+ /*< size options are 9, 17, 33, 45, 65 */
+ struct {
+ uint32_t dim_9 : 1; /* 3D LUT support for 9x9x9 */
+ uint32_t dim_17 : 1; /* 3D LUT support for 17x17x17 */
+ uint32_t dim_33 : 1; /* 3D LUT support for 33x33x33 */
+ uint32_t dim_45 : 1; /* 3D LUT support for 45x45x45 */
+ uint32_t dim_65 : 1; /* 3D LUT support for 65x65x65 */
+ } lut_dim_caps;
+};
+
/**
* struct mpc_color_caps - color pipeline capabilities for multiple pipe and
* plane combined blocks
@@ -200,17 +242,25 @@ struct dpp_color_caps {
* @ogam_ram: programmable out gamma LUT
* @ocsc: output color space conversion matrix
* @num_3dluts: MPC 3D LUT; always assumes a preceding shaper LUT
+ * @num_rmcm_3dluts: number of RMCM 3D LUTS; always assumes a preceding shaper LUT
* @shared_3d_lut: shared 3D LUT flag. Can be either DPP or MPC, but single
* instance
* @ogam_rom_caps: pre-definied curve caps for regamma 1D LUT
+ * @mcm_3d_lut_caps: HW support cap for MCM LUT memory
+ * @rmcm_3d_lut_caps: HW support cap for RMCM LUT memory
+ * @preblend: whether color manager supports preblend with MPC
*/
struct mpc_color_caps {
uint16_t gamut_remap : 1;
uint16_t ogam_ram : 1;
uint16_t ocsc : 1;
uint16_t num_3dluts : 3;
+ uint16_t num_rmcm_3dluts : 3;
uint16_t shared_3d_lut:1;
struct rom_curve_caps ogam_rom_caps;
+ struct lut3d_caps mcm_3d_lut_caps;
+ struct lut3d_caps rmcm_3d_lut_caps;
+ bool preblend;
};
/**
@@ -236,6 +286,15 @@ struct dc_scl_caps {
bool sharpener_support;
};
+struct dc_check_config {
+ /**
+ * max video plane width that can be safely assumed to be always
+ * supported by single DPP pipe.
+ */
+ unsigned int max_optimizable_video_width;
+ bool enable_legacy_fast_update;
+};
+
struct dc_caps {
uint32_t max_streams;
uint32_t max_links;
@@ -251,11 +310,6 @@ struct dc_caps {
unsigned int max_cursor_size;
unsigned int max_buffered_cursor_size;
unsigned int max_video_width;
- /*
- * max video plane width that can be safely assumed to be always
- * supported by single DPP pipe.
- */
- unsigned int max_optimizable_video_width;
unsigned int min_horizontal_blanking_period;
int linear_pitch_alignment;
bool dcc_const_color;
@@ -270,6 +324,7 @@ struct dc_caps {
bool dmcub_support;
bool zstate_support;
bool ips_support;
+ bool ips_v2_support;
uint32_t num_of_internal_disp;
enum dp_protocol_version max_dp_protocol_version;
unsigned int mall_size_per_mem_channel;
@@ -305,6 +360,10 @@ struct dc_caps {
/* Conservative limit for DCC cases which require ODM4:1 to support*/
uint32_t dcc_plane_width_limit;
struct dc_scl_caps scl_caps;
+ uint8_t num_of_host_routers;
+ uint8_t num_of_dpias_per_host_router;
+ /* limit of the ODM only, could be limited by other factors (like pipe count)*/
+ uint8_t max_odm_combine_factor;
};
struct dc_bug_wa {
@@ -408,6 +467,18 @@ enum surface_update_type {
UPDATE_TYPE_FULL, /* may need to shuffle resources */
};
+enum dc_lock_descriptor {
+ LOCK_DESCRIPTOR_NONE = 0x0,
+ LOCK_DESCRIPTOR_STREAM = 0x1,
+ LOCK_DESCRIPTOR_LINK = 0x2,
+ LOCK_DESCRIPTOR_GLOBAL = 0x4,
+};
+
+struct surface_update_descriptor {
+ enum surface_update_type update_type;
+ enum dc_lock_descriptor lock_descriptor;
+};
+
/* Forward declaration*/
struct dc;
struct dc_plane_state;
@@ -459,6 +530,7 @@ struct dc_config {
bool use_spl;
bool prefer_easf;
bool use_pipe_ctx_sync_logic;
+ int smart_mux_version;
bool ignore_dpref_ss;
bool enable_mipi_converter_optimization;
bool use_default_clock_table;
@@ -469,6 +541,7 @@ struct dc_config {
bool EnableMinDispClkODM;
bool enable_auto_dpm_test_logs;
unsigned int disable_ips;
+ unsigned int disable_ips_rcg;
unsigned int disable_ips_in_vpb;
bool disable_ips_in_dpms_off;
bool usb4_bw_alloc_support;
@@ -481,6 +554,9 @@ struct dc_config {
bool set_pipe_unlock_order;
bool enable_dpia_pre_training;
bool unify_link_enc_assignment;
+ bool enable_cursor_offload;
+ struct spl_sharpness_range dcn_sharpness_range;
+ struct spl_sharpness_range dcn_override_sharpness_range;
};
enum visual_confirm {
@@ -492,6 +568,7 @@ enum visual_confirm {
VISUAL_CONFIRM_SWAPCHAIN = 6,
VISUAL_CONFIRM_FAMS = 7,
VISUAL_CONFIRM_SWIZZLE = 9,
+ VISUAL_CONFIRM_SMARTMUX_DGPU = 10,
VISUAL_CONFIRM_REPLAY = 12,
VISUAL_CONFIRM_SUBVP = 14,
VISUAL_CONFIRM_MCLK_SWITCH = 16,
@@ -643,6 +720,15 @@ struct dc_clocks {
int idle_fclk_khz;
int subvp_prefetch_dramclk_khz;
int subvp_prefetch_fclk_khz;
+
+ /* Stutter efficiency is technically not clock values
+ * but stored here so the values are part of the update_clocks call similar to num_ways
+ * Efficiencies are stored as percentage (0-100)
+ */
+ struct {
+ uint8_t base_efficiency; //LP1
+ uint8_t low_power_efficiency; //LP2
+ } stutter_efficiency;
};
struct dc_bw_validation_profile {
@@ -770,6 +856,7 @@ enum pg_hw_resources {
PG_DCHVM,
PG_DWB,
PG_HPO,
+ PG_DCOH,
PG_HW_RESOURCES_NUM_ELEMENT
};
@@ -786,10 +873,8 @@ union dpia_debug_options {
uint32_t disable_mst_dsc_work_around:1; /* bit 3 */
uint32_t enable_force_tbt3_work_around:1; /* bit 4 */
uint32_t disable_usb4_pm_support:1; /* bit 5 */
- uint32_t enable_consolidated_dpia_dp_lt:1; /* bit 6 */
- uint32_t enable_dpia_pre_training:1; /* bit 7 */
- uint32_t unify_link_enc_assignment:1; /* bit 8 */
- uint32_t reserved:24;
+ uint32_t enable_usb4_bw_zero_alloc_patch:1; /* bit 6 */
+ uint32_t reserved:25;
} bits;
uint32_t raw;
};
@@ -814,6 +899,7 @@ struct dc_debug_data {
uint32_t ltFailCount;
uint32_t i2cErrorCount;
uint32_t auxErrorCount;
+ struct pipe_topology_history topology_history;
};
struct dc_phy_addr_space_config {
@@ -915,6 +1001,9 @@ struct dc_debug_options {
bool disable_dsc_power_gate;
bool disable_optc_power_gate;
bool disable_hpo_power_gate;
+ bool disable_io_clk_power_gate;
+ bool disable_mem_power_gate;
+ bool disable_dio_power_gate;
int dsc_min_slice_height_override;
int dsc_bpp_increment_div;
bool disable_pplib_wm_range;
@@ -1019,6 +1108,7 @@ struct dc_debug_options {
unsigned int force_mall_ss_num_ways;
bool alloc_extra_way_for_cursor;
uint32_t subvp_extra_lines;
+ bool disable_force_pstate_allow_on_hw_release;
bool force_usr_allow;
/* uses value at boot and disables switch */
bool disable_dtb_ref_clk_switch;
@@ -1055,7 +1145,6 @@ struct dc_debug_options {
uint32_t fpo_vactive_min_active_margin_us;
uint32_t fpo_vactive_max_blank_us;
bool enable_hpo_pg_support;
- bool enable_legacy_fast_update;
bool disable_dc_mode_overwrite;
bool replay_skip_crtc_disabled;
bool ignore_pg;/*do nothing, let pmfw control it*/
@@ -1087,11 +1176,18 @@ struct dc_debug_options {
bool enable_ips_visual_confirm;
unsigned int sharpen_policy;
unsigned int scale_to_sharpness_policy;
- bool skip_full_updated_if_possible;
unsigned int enable_oled_edp_power_up_opt;
bool enable_hblank_borrow;
bool force_subvp_df_throttle;
uint32_t acpi_transition_bitmasks[MAX_PIPES];
+ bool enable_pg_cntl_debug_logs;
+ unsigned int auxless_alpm_lfps_setup_ns;
+ unsigned int auxless_alpm_lfps_period_ns;
+ unsigned int auxless_alpm_lfps_silence_ns;
+ unsigned int auxless_alpm_lfps_t1t2_us;
+ short auxless_alpm_lfps_t1t2_offset_us;
+ bool disable_stutter_for_wm_program;
+ bool enable_block_sequence_programming;
};
@@ -1151,7 +1247,7 @@ struct dc_init_data {
uint32_t *dcn_reg_offsets;
uint32_t *nbio_reg_offsets;
uint32_t *clk_reg_offsets;
- struct dml2_soc_bb *bb_from_dmub;
+ void *bb_from_dmub;
};
struct dc_callback_init {
@@ -1252,6 +1348,38 @@ union dc_3dlut_state {
};
+#define MATRIX_9C__DIM_128_ALIGNED_LEN 16 // 9+8 : 9 * 8 + 7 * 8 = 72 + 56 = 128 % 128 = 0
+#define MATRIX_17C__DIM_128_ALIGNED_LEN 32 //17+15: 17 * 8 + 15 * 8 = 136 + 120 = 256 % 128 = 0
+#define MATRIX_33C__DIM_128_ALIGNED_LEN 64 //17+47: 17 * 8 + 47 * 8 = 136 + 376 = 512 % 128 = 0
+
+struct lut_rgb {
+ uint16_t b;
+ uint16_t g;
+ uint16_t r;
+ uint16_t padding;
+};
+
+//this structure maps directly to how the lut will read it from memory
+struct lut_mem_mapping {
+ union {
+ //NATIVE MODE 1, 2
+ //RGB layout [b][g][r] //red is 128 byte aligned
+ //BGR layout [r][g][b] //blue is 128 byte aligned
+ struct lut_rgb rgb_17c[17][17][MATRIX_17C__DIM_128_ALIGNED_LEN];
+ struct lut_rgb rgb_33c[33][33][MATRIX_33C__DIM_128_ALIGNED_LEN];
+
+ //TRANSFORMED
+ uint16_t linear_rgb[(33*33*33*4/128+1)*128];
+ };
+ uint16_t size;
+};
+
+struct dc_rmcm_3dlut {
+ bool isInUse;
+ const struct dc_stream_state *stream;
+ uint8_t protection_bits;
+};
+
struct dc_3dlut {
struct kref refcount;
struct tetrahedral_params lut_3d;
@@ -1288,7 +1416,6 @@ union surface_update_flags {
uint32_t in_transfer_func_change:1;
uint32_t input_csc_change:1;
uint32_t coeff_reduction_change:1;
- uint32_t output_tf_change:1;
uint32_t pixel_format_change:1;
uint32_t plane_size_change:1;
uint32_t gamut_remap_change:1;
@@ -1389,6 +1516,8 @@ struct dc_plane_state {
int sharpness_level;
enum linear_light_scaling linear_light_scaling;
unsigned int sdr_white_level_nits;
+ struct spl_sharpness_range sharpness_range;
+ enum sharpness_range_source sharpness_source;
};
struct dc_plane_info {
@@ -1570,6 +1699,7 @@ struct dc_scratch_space {
bool blank_stream_on_ocs_change;
bool read_dpcd204h_on_irq_hpd;
bool force_dp_ffe_preset;
+ bool skip_phy_ssc_reduction;
} wa_flags;
union dc_dp_ffe_preset forced_dp_ffe_preset;
struct link_mst_stream_allocation_table mst_stream_alloc_table;
@@ -1579,6 +1709,8 @@ struct dc_scratch_space {
struct gpio *hpd_gpio;
enum dc_link_fec_state fec_state;
+ bool is_dds;
+ bool is_display_mux_present;
bool link_powered_externally; // Used to bypass hardware sequencing delays when panel is powered down forcibly
struct dc_panel_config panel_config;
@@ -1594,6 +1726,7 @@ struct dc {
struct dc_debug_options debug;
struct dc_versions versions;
struct dc_caps caps;
+ struct dc_check_config check_config;
struct dc_cap_funcs cap_funcs;
struct dc_config config;
struct dc_bounding_box_overrides bb_overrides;
@@ -1603,6 +1736,7 @@ struct dc {
uint8_t link_count;
struct dc_link *links[MAX_LINKS];
+ uint8_t lowest_dpia_link_index;
struct link_service *link_srv;
struct dc_state *current_state;
@@ -1626,12 +1760,15 @@ struct dc {
/* Require to optimize clocks and bandwidth for added/removed planes */
bool optimized_required;
- bool wm_optimized_required;
bool idle_optimizations_allowed;
bool enable_c20_dtm_b0;
/* Require to maintain clocks and bandwidth for UEFI enabled HW */
+ /* For eDP to know the switching state of SmartMux */
+ bool is_switch_in_progress_orig;
+ bool is_switch_in_progress_dest;
+
/* FBC compressor */
struct compressor *fbc_compressor;
@@ -1662,9 +1799,9 @@ struct dc {
} scratch;
struct dml2_configuration_options dml2_options;
- struct dml2_configuration_options dml2_tmp;
+ struct dml2_configuration_options dml2_dc_power_options;
enum dc_acpi_cm_power_state power_state;
-
+ struct soc_and_ip_translator *soc_and_ip_translator;
};
struct dc_scaling_info {
@@ -1717,6 +1854,29 @@ struct dc_surface_update {
struct dc_bias_and_scale bias_and_scale;
};
+struct dc_underflow_debug_data {
+ struct dcn_hubbub_reg_state *hubbub_reg_state;
+ struct dcn_hubp_reg_state *hubp_reg_state[MAX_PIPES];
+ struct dcn_dpp_reg_state *dpp_reg_state[MAX_PIPES];
+ struct dcn_mpc_reg_state *mpc_reg_state[MAX_PIPES];
+ struct dcn_opp_reg_state *opp_reg_state[MAX_PIPES];
+ struct dcn_dsc_reg_state *dsc_reg_state[MAX_PIPES];
+ struct dcn_optc_reg_state *optc_reg_state[MAX_PIPES];
+ struct dcn_dccg_reg_state *dccg_reg_state[MAX_PIPES];
+};
+
+struct power_features {
+ bool ips;
+ bool rcg;
+ bool replay;
+ bool dds;
+ bool sprs;
+ bool psr;
+ bool fams;
+ bool mpo;
+ bool uclk_p_state;
+};
+
/*
* Create a new surface with default parameters;
*/
@@ -1735,8 +1895,6 @@ void dc_3dlut_func_retain(struct dc_3dlut *lut);
void dc_post_update_surfaces_to_stream(
struct dc *dc);
-#include "dc_stream.h"
-
/**
* struct dc_validation_set - Struct to store surface/stream associations for validation
*/
@@ -1767,19 +1925,15 @@ enum dc_status dc_validate_with_context(struct dc *dc,
const struct dc_validation_set set[],
int set_count,
struct dc_state *context,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
bool dc_set_generic_gpio_for_stereo(bool enable,
struct gpio_service *gpio_service);
-/*
- * fast_validate: we return after determining if we can support the new state,
- * but before we populate the programming info
- */
enum dc_status dc_validate_global_state(
struct dc *dc,
struct dc_state *new_ctx,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
bool dc_acquire_release_mpc_3dlut(
struct dc *dc, bool acquire,
@@ -2375,17 +2529,18 @@ void dc_link_dp_dpia_handle_usb4_bandwidth_allocation_for_link(
struct dc_link *link, int peak_bw);
/*
- * Validate the BW of all the valid DPIA links to make sure it doesn't exceed
- * available BW for each host router
+ * Calculates the DP tunneling bandwidth required for the stream timing
+ * and aggregates the stream bandwidth for the respective DP tunneling link
*
- * @dc: pointer to dc struct
- * @stream: pointer to all possible streams
- * @count: number of valid DPIA streams
- *
- * return: TRUE if bw used by DPIAs doesn't exceed available BW else return FALSE
+ * return: dc_status
+ */
+enum dc_status dc_link_validate_dp_tunneling_bandwidth(const struct dc *dc, const struct dc_state *new_ctx);
+
+/*
+ * Get if ALPM is supported by the link
*/
-bool dc_link_dp_dpia_validate(struct dc *dc, const struct dc_stream_state *streams,
- const unsigned int count);
+void dc_link_get_alpm_support(struct dc_link *link, bool *auxless_support,
+ bool *auxwake_support);
/* Sink Interfaces - A sink corresponds to a display output device */
@@ -2564,6 +2719,13 @@ bool dc_process_dmub_aux_transfer_async(struct dc *dc,
uint32_t link_index,
struct aux_payload *payload);
+/*
+ * smart power OLED Interfaces
+ */
+bool dc_smart_power_oled_enable(const struct dc_link *link, bool enable, uint16_t peak_nits,
+ uint8_t debug_control, uint16_t fixed_CLL, uint32_t triggerline);
+bool dc_smart_power_oled_get_max_cll(const struct dc_link *link, unsigned int *pCurrent_MaxCLL);
+
/* Get dc link index from dpia port index */
uint8_t get_link_index_from_dpia_port_index(const struct dc *dc,
uint8_t dpia_port_index);
@@ -2595,6 +2757,10 @@ struct dc_power_profile dc_get_power_profile_for_dc_state(const struct dc_state
unsigned int dc_get_det_buffer_size_from_state(const struct dc_state *context);
+bool dc_get_host_router_index(const struct dc_link *link, unsigned int *host_router_index);
+
+void dc_log_preos_dmcub_info(const struct dc *dc);
+
/* DSC Interfaces */
#include "dc_dsc.h"
@@ -2610,6 +2776,508 @@ bool dc_is_timing_changed(struct dc_stream_state *cur_stream,
struct dc_stream_state *new_stream);
bool dc_is_cursor_limit_pending(struct dc *dc);
-bool dc_can_clear_cursor_limit(struct dc *dc);
+bool dc_can_clear_cursor_limit(const struct dc *dc);
+
+/**
+ * dc_get_underflow_debug_data_for_otg() - Retrieve underflow debug data.
+ *
+ * @dc: Pointer to the display core context.
+ * @primary_otg_inst: Instance index of the primary OTG that underflowed.
+ * @out_data: Pointer to a dc_underflow_debug_data struct to be filled with debug information.
+ *
+ * This function collects and logs underflow-related HW states when underflow happens,
+ * including OTG underflow status, current read positions, frame count, and per-HUBP debug data.
+ * The results are stored in the provided out_data structure for further analysis or logging.
+ */
+void dc_get_underflow_debug_data_for_otg(struct dc *dc, int primary_otg_inst, struct dc_underflow_debug_data *out_data);
+
+void dc_get_power_feature_status(struct dc *dc, int primary_otg_inst, struct power_features *out_data);
+
+/**
+ * Software state variables used to program register fields across the display pipeline
+ */
+struct dc_register_software_state {
+ /* HUBP register programming variables for each pipe */
+ struct {
+ bool valid_plane_state;
+ bool valid_stream;
+ bool min_dc_gfx_version9;
+ uint32_t vtg_sel; /* DCHUBP_CNTL->HUBP_VTG_SEL from pipe_ctx->stream_res.tg->inst */
+ uint32_t hubp_clock_enable; /* HUBP_CLK_CNTL->HUBP_CLOCK_ENABLE from power management */
+ uint32_t surface_pixel_format; /* DCSURF_SURFACE_CONFIG->SURFACE_PIXEL_FORMAT from plane_state->format */
+ uint32_t rotation_angle; /* DCSURF_SURFACE_CONFIG->ROTATION_ANGLE from plane_state->rotation */
+ uint32_t h_mirror_en; /* DCSURF_SURFACE_CONFIG->H_MIRROR_EN from plane_state->horizontal_mirror */
+ uint32_t surface_dcc_en; /* DCSURF_SURFACE_CONTROL->PRIMARY_SURFACE_DCC_EN from dcc->enable */
+ uint32_t surface_size_width; /* HUBP_SIZE->SURFACE_SIZE_WIDTH from plane_size.surface_size.width */
+ uint32_t surface_size_height; /* HUBP_SIZE->SURFACE_SIZE_HEIGHT from plane_size.surface_size.height */
+ uint32_t pri_viewport_width; /* DCSURF_PRI_VIEWPORT_DIMENSION->PRI_VIEWPORT_WIDTH from scaler_data.viewport.width */
+ uint32_t pri_viewport_height; /* DCSURF_PRI_VIEWPORT_DIMENSION->PRI_VIEWPORT_HEIGHT from scaler_data.viewport.height */
+ uint32_t pri_viewport_x_start; /* DCSURF_PRI_VIEWPORT_START->PRI_VIEWPORT_X_START from scaler_data.viewport.x */
+ uint32_t pri_viewport_y_start; /* DCSURF_PRI_VIEWPORT_START->PRI_VIEWPORT_Y_START from scaler_data.viewport.y */
+ uint32_t cursor_enable; /* CURSOR_CONTROL->CURSOR_ENABLE from cursor_attributes.enable */
+ uint32_t cursor_width; /* CURSOR_SETTINGS->CURSOR_WIDTH from cursor_position.width */
+ uint32_t cursor_height; /* CURSOR_SETTINGS->CURSOR_HEIGHT from cursor_position.height */
+
+ /* Additional DCC configuration */
+ uint32_t surface_dcc_ind_64b_blk; /* DCSURF_SURFACE_CONTROL->PRIMARY_SURFACE_DCC_IND_64B_BLK from dcc.independent_64b_blks */
+ uint32_t surface_dcc_ind_128b_blk; /* DCSURF_SURFACE_CONTROL->PRIMARY_SURFACE_DCC_IND_128B_BLK from dcc.independent_128b_blks */
+
+ /* Surface pitch configuration */
+ uint32_t surface_pitch; /* DCSURF_SURFACE_PITCH->PITCH from plane_size.surface_pitch */
+ uint32_t meta_pitch; /* DCSURF_SURFACE_PITCH->META_PITCH from dcc.meta_pitch */
+ uint32_t chroma_pitch; /* DCSURF_SURFACE_PITCH_C->PITCH_C from plane_size.chroma_pitch */
+ uint32_t meta_pitch_c; /* DCSURF_SURFACE_PITCH_C->META_PITCH_C from dcc.meta_pitch_c */
+
+ /* Surface addresses */
+ uint32_t primary_surface_address_low; /* DCSURF_PRIMARY_SURFACE_ADDRESS->PRIMARY_SURFACE_ADDRESS from address.grph.addr.low_part */
+ uint32_t primary_surface_address_high; /* DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH->PRIMARY_SURFACE_ADDRESS_HIGH from address.grph.addr.high_part */
+ uint32_t primary_meta_surface_address_low; /* DCSURF_PRIMARY_META_SURFACE_ADDRESS->PRIMARY_META_SURFACE_ADDRESS from address.grph.meta_addr.low_part */
+ uint32_t primary_meta_surface_address_high; /* DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH->PRIMARY_META_SURFACE_ADDRESS_HIGH from address.grph.meta_addr.high_part */
+
+ /* TMZ configuration */
+ uint32_t primary_surface_tmz; /* DCSURF_SURFACE_CONTROL->PRIMARY_SURFACE_TMZ from address.tmz_surface */
+ uint32_t primary_meta_surface_tmz; /* DCSURF_SURFACE_CONTROL->PRIMARY_META_SURFACE_TMZ from address.tmz_surface */
+
+ /* Tiling configuration */
+ uint32_t sw_mode; /* DCSURF_TILING_CONFIG->SW_MODE from tiling_info.gfx9.swizzle */
+ uint32_t num_pipes; /* DCSURF_ADDR_CONFIG->NUM_PIPES from tiling_info.gfx9.num_pipes */
+ uint32_t num_banks; /* DCSURF_ADDR_CONFIG->NUM_BANKS from tiling_info.gfx9.num_banks */
+ uint32_t pipe_interleave; /* DCSURF_ADDR_CONFIG->PIPE_INTERLEAVE from tiling_info.gfx9.pipe_interleave */
+ uint32_t num_shader_engines; /* DCSURF_ADDR_CONFIG->NUM_SE from tiling_info.gfx9.num_shader_engines */
+ uint32_t num_rb_per_se; /* DCSURF_ADDR_CONFIG->NUM_RB_PER_SE from tiling_info.gfx9.num_rb_per_se */
+ uint32_t num_pkrs; /* DCSURF_ADDR_CONFIG->NUM_PKRS from tiling_info.gfx9.num_pkrs */
+
+ /* DML Request Size Configuration - Luma */
+ uint32_t rq_chunk_size; /* DCHUBP_REQ_SIZE_CONFIG->CHUNK_SIZE from rq_regs.rq_regs_l.chunk_size */
+ uint32_t rq_min_chunk_size; /* DCHUBP_REQ_SIZE_CONFIG->MIN_CHUNK_SIZE from rq_regs.rq_regs_l.min_chunk_size */
+ uint32_t rq_meta_chunk_size; /* DCHUBP_REQ_SIZE_CONFIG->META_CHUNK_SIZE from rq_regs.rq_regs_l.meta_chunk_size */
+ uint32_t rq_min_meta_chunk_size; /* DCHUBP_REQ_SIZE_CONFIG->MIN_META_CHUNK_SIZE from rq_regs.rq_regs_l.min_meta_chunk_size */
+ uint32_t rq_dpte_group_size; /* DCHUBP_REQ_SIZE_CONFIG->DPTE_GROUP_SIZE from rq_regs.rq_regs_l.dpte_group_size */
+ uint32_t rq_mpte_group_size; /* DCHUBP_REQ_SIZE_CONFIG->MPTE_GROUP_SIZE from rq_regs.rq_regs_l.mpte_group_size */
+ uint32_t rq_swath_height_l; /* DCHUBP_REQ_SIZE_CONFIG->SWATH_HEIGHT_L from rq_regs.rq_regs_l.swath_height */
+ uint32_t rq_pte_row_height_l; /* DCHUBP_REQ_SIZE_CONFIG->PTE_ROW_HEIGHT_L from rq_regs.rq_regs_l.pte_row_height */
+
+ /* DML Request Size Configuration - Chroma */
+ uint32_t rq_chunk_size_c; /* DCHUBP_REQ_SIZE_CONFIG_C->CHUNK_SIZE_C from rq_regs.rq_regs_c.chunk_size */
+ uint32_t rq_min_chunk_size_c; /* DCHUBP_REQ_SIZE_CONFIG_C->MIN_CHUNK_SIZE_C from rq_regs.rq_regs_c.min_chunk_size */
+ uint32_t rq_meta_chunk_size_c; /* DCHUBP_REQ_SIZE_CONFIG_C->META_CHUNK_SIZE_C from rq_regs.rq_regs_c.meta_chunk_size */
+ uint32_t rq_min_meta_chunk_size_c; /* DCHUBP_REQ_SIZE_CONFIG_C->MIN_META_CHUNK_SIZE_C from rq_regs.rq_regs_c.min_meta_chunk_size */
+ uint32_t rq_dpte_group_size_c; /* DCHUBP_REQ_SIZE_CONFIG_C->DPTE_GROUP_SIZE_C from rq_regs.rq_regs_c.dpte_group_size */
+ uint32_t rq_mpte_group_size_c; /* DCHUBP_REQ_SIZE_CONFIG_C->MPTE_GROUP_SIZE_C from rq_regs.rq_regs_c.mpte_group_size */
+ uint32_t rq_swath_height_c; /* DCHUBP_REQ_SIZE_CONFIG_C->SWATH_HEIGHT_C from rq_regs.rq_regs_c.swath_height */
+ uint32_t rq_pte_row_height_c; /* DCHUBP_REQ_SIZE_CONFIG_C->PTE_ROW_HEIGHT_C from rq_regs.rq_regs_c.pte_row_height */
+
+ /* DML Expansion Modes */
+ uint32_t drq_expansion_mode; /* DCN_EXPANSION_MODE->DRQ_EXPANSION_MODE from rq_regs.drq_expansion_mode */
+ uint32_t prq_expansion_mode; /* DCN_EXPANSION_MODE->PRQ_EXPANSION_MODE from rq_regs.prq_expansion_mode */
+ uint32_t mrq_expansion_mode; /* DCN_EXPANSION_MODE->MRQ_EXPANSION_MODE from rq_regs.mrq_expansion_mode */
+ uint32_t crq_expansion_mode; /* DCN_EXPANSION_MODE->CRQ_EXPANSION_MODE from rq_regs.crq_expansion_mode */
+
+ /* DML DLG parameters - nominal */
+ uint32_t dst_y_per_vm_vblank; /* NOM_PARAMETERS_0->DST_Y_PER_VM_VBLANK from dlg_regs.dst_y_per_vm_vblank */
+ uint32_t dst_y_per_row_vblank; /* NOM_PARAMETERS_0->DST_Y_PER_ROW_VBLANK from dlg_regs.dst_y_per_row_vblank */
+ uint32_t dst_y_per_vm_flip; /* NOM_PARAMETERS_1->DST_Y_PER_VM_FLIP from dlg_regs.dst_y_per_vm_flip */
+ uint32_t dst_y_per_row_flip; /* NOM_PARAMETERS_1->DST_Y_PER_ROW_FLIP from dlg_regs.dst_y_per_row_flip */
+
+ /* DML prefetch settings */
+ uint32_t dst_y_prefetch; /* PREFETCH_SETTINS->DST_Y_PREFETCH from dlg_regs.dst_y_prefetch */
+ uint32_t vratio_prefetch; /* PREFETCH_SETTINS->VRATIO_PREFETCH from dlg_regs.vratio_prefetch */
+ uint32_t vratio_prefetch_c; /* PREFETCH_SETTINS_C->VRATIO_PREFETCH_C from dlg_regs.vratio_prefetch_c */
+
+ /* TTU parameters */
+ uint32_t qos_level_low_wm; /* TTU_CNTL1->QoSLevelLowWaterMark from ttu_regs.qos_level_low_wm */
+ uint32_t qos_level_high_wm; /* TTU_CNTL1->QoSLevelHighWaterMark from ttu_regs.qos_level_high_wm */
+ uint32_t qos_level_flip; /* TTU_CNTL2->QoS_LEVEL_FLIP_L from ttu_regs.qos_level_flip */
+ uint32_t min_ttu_vblank; /* DCN_GLOBAL_TTU_CNTL->MIN_TTU_VBLANK from ttu_regs.min_ttu_vblank */
+ } hubp[MAX_PIPES];
+
+ /* HUBBUB register programming variables */
+ struct {
+ /* Individual DET buffer control per pipe - software state that programs DET registers */
+ uint32_t det0_size; /* DCHUBBUB_DET0_CTRL->DET0_SIZE from hubbub->funcs->program_det_size(hubbub, 0, det_buffer_size_kb) */
+ uint32_t det1_size; /* DCHUBBUB_DET1_CTRL->DET1_SIZE from hubbub->funcs->program_det_size(hubbub, 1, det_buffer_size_kb) */
+ uint32_t det2_size; /* DCHUBBUB_DET2_CTRL->DET2_SIZE from hubbub->funcs->program_det_size(hubbub, 2, det_buffer_size_kb) */
+ uint32_t det3_size; /* DCHUBBUB_DET3_CTRL->DET3_SIZE from hubbub->funcs->program_det_size(hubbub, 3, det_buffer_size_kb) */
+
+ /* Compression buffer control - software state that programs COMPBUF registers */
+ uint32_t compbuf_size; /* DCHUBBUB_COMPBUF_CTRL->COMPBUF_SIZE from hubbub->funcs->program_compbuf_size(hubbub, compbuf_size_kb, safe_to_increase) */
+ uint32_t compbuf_reserved_space_64b; /* COMPBUF_RESERVED_SPACE->COMPBUF_RESERVED_SPACE_64B from hubbub2->pixel_chunk_size / 32 */
+ uint32_t compbuf_reserved_space_zs; /* COMPBUF_RESERVED_SPACE->COMPBUF_RESERVED_SPACE_ZS from hubbub2->pixel_chunk_size / 128 */
+ } hubbub;
+
+ /* DPP register programming variables for each pipe (simplified for available fields) */
+ struct {
+ uint32_t dpp_clock_enable; /* DPP_CONTROL->DPP_CLOCK_ENABLE from dppclk_enable */
+
+ /* Recout (Rectangle of Interest) configuration */
+ uint32_t recout_start_x; /* RECOUT_START->RECOUT_START_X from pipe_ctx->plane_res.scl_data.recout.x */
+ uint32_t recout_start_y; /* RECOUT_START->RECOUT_START_Y from pipe_ctx->plane_res.scl_data.recout.y */
+ uint32_t recout_width; /* RECOUT_SIZE->RECOUT_WIDTH from pipe_ctx->plane_res.scl_data.recout.width */
+ uint32_t recout_height; /* RECOUT_SIZE->RECOUT_HEIGHT from pipe_ctx->plane_res.scl_data.recout.height */
+
+ /* MPC (Multiple Pipe/Plane Combiner) size configuration */
+ uint32_t mpc_width; /* MPC_SIZE->MPC_WIDTH from pipe_ctx->plane_res.scl_data.h_active */
+ uint32_t mpc_height; /* MPC_SIZE->MPC_HEIGHT from pipe_ctx->plane_res.scl_data.v_active */
+
+ /* DSCL mode configuration */
+ uint32_t dscl_mode; /* SCL_MODE->DSCL_MODE from pipe_ctx->plane_res.scl_data.dscl_prog_data.dscl_mode */
+
+ /* Scaler ratios (simplified to integer parts) */
+ uint32_t horz_ratio_int; /* SCL_HORZ_FILTER_SCALE_RATIO->SCL_H_SCALE_RATIO integer part from ratios.horz */
+ uint32_t vert_ratio_int; /* SCL_VERT_FILTER_SCALE_RATIO->SCL_V_SCALE_RATIO integer part from ratios.vert */
+
+ /* Basic scaler taps */
+ uint32_t h_taps; /* SCL_TAP_CONTROL->SCL_H_NUM_TAPS from taps.h_taps */
+ uint32_t v_taps; /* SCL_TAP_CONTROL->SCL_V_NUM_TAPS from taps.v_taps */
+ } dpp[MAX_PIPES];
+
+ /* DCCG register programming variables */
+ struct {
+ /* Core Display Clock Control */
+ uint32_t dispclk_khz; /* DENTIST_DISPCLK_CNTL->DENTIST_DISPCLK_WDIVIDER from clk_mgr.dispclk_khz */
+ uint32_t dc_mem_global_pwr_req_dis; /* DC_MEM_GLOBAL_PWR_REQ_CNTL->DC_MEM_GLOBAL_PWR_REQ_DIS from memory power management settings */
+
+ /* DPP Clock Control - 4 fields per pipe */
+ uint32_t dppclk_khz[MAX_PIPES]; /* DPPCLK_CTRL->DPPCLK_R_GATE_DISABLE from dpp_clocks[pipe] */
+ uint32_t dppclk_enable[MAX_PIPES]; /* DPPCLK_CTRL->DPPCLK0_EN,DPPCLK1_EN,DPPCLK2_EN,DPPCLK3_EN from dccg31_update_dpp_dto() */
+ uint32_t dppclk_dto_enable[MAX_PIPES]; /* DPPCLK_DTO_CTRL->DPPCLK_DTO_ENABLE from dccg->dpp_clock_gated[dpp_inst] state */
+ uint32_t dppclk_dto_phase[MAX_PIPES]; /* DPPCLK0_DTO_PARAM->DPPCLK0_DTO_PHASE from phase calculation req_dppclk/ref_dppclk */
+ uint32_t dppclk_dto_modulo[MAX_PIPES]; /* DPPCLK0_DTO_PARAM->DPPCLK0_DTO_MODULO from modulo = 0xff */
+
+ /* DSC Clock Control - 4 fields per DSC resource */
+ uint32_t dscclk_khz[MAX_PIPES]; /* DSCCLK_DTO_CTRL->DSCCLK_DTO_ENABLE from dsc_clocks */
+ uint32_t dscclk_dto_enable[MAX_PIPES]; /* DSCCLK_DTO_CTRL->DSCCLK0_DTO_ENABLE,DSCCLK1_DTO_ENABLE,DSCCLK2_DTO_ENABLE,DSCCLK3_DTO_ENABLE */
+ uint32_t dscclk_dto_phase[MAX_PIPES]; /* DSCCLK0_DTO_PARAM->DSCCLK0_DTO_PHASE from dccg31_enable_dscclk() */
+ uint32_t dscclk_dto_modulo[MAX_PIPES]; /* DSCCLK0_DTO_PARAM->DSCCLK0_DTO_MODULO from dccg31_enable_dscclk() */
+
+ /* Pixel Clock Control - per pipe */
+ uint32_t pixclk_khz[MAX_PIPES]; /* PIXCLK_RESYNC_CNTL->PIXCLK_RESYNC_ENABLE from stream.timing.pix_clk_100hz */
+ uint32_t otg_pixel_rate_div[MAX_PIPES]; /* OTG_PIXEL_RATE_DIV->OTG_PIXEL_RATE_DIV from OTG pixel rate divider control */
+ uint32_t dtbclk_dto_enable[MAX_PIPES]; /* OTG0_PIXEL_RATE_CNTL->DTBCLK_DTO_ENABLE from dccg31_set_dtbclk_dto() */
+ uint32_t pipe_dto_src_sel[MAX_PIPES]; /* OTG0_PIXEL_RATE_CNTL->PIPE_DTO_SRC_SEL from dccg31_set_dtbclk_dto() source selection */
+ uint32_t dtbclk_dto_div[MAX_PIPES]; /* OTG0_PIXEL_RATE_CNTL->DTBCLK_DTO_DIV from dtbdto_div calculation */
+ uint32_t otg_add_pixel[MAX_PIPES]; /* OTG0_PIXEL_RATE_CNTL->OTG_ADD_PIXEL from dccg31_otg_add_pixel() */
+ uint32_t otg_drop_pixel[MAX_PIPES]; /* OTG0_PIXEL_RATE_CNTL->OTG_DROP_PIXEL from dccg31_otg_drop_pixel() */
+
+ /* DTBCLK DTO Control - 4 DTOs */
+ uint32_t dtbclk_dto_modulo[4]; /* DTBCLK_DTO0_MODULO->DTBCLK_DTO0_MODULO from dccg31_set_dtbclk_dto() modulo calculation */
+ uint32_t dtbclk_dto_phase[4]; /* DTBCLK_DTO0_PHASE->DTBCLK_DTO0_PHASE from phase calculation pixclk_khz/ref_dtbclk_khz */
+ uint32_t dtbclk_dto_dbuf_en; /* DTBCLK_DTO_DBUF_EN->DTBCLK DTO data buffer enable */
+
+ /* DP Stream Clock Control - 4 pipes */
+ uint32_t dpstreamclk_enable[MAX_PIPES]; /* DPSTREAMCLK_CNTL->DPSTREAMCLK_PIPE0_EN,DPSTREAMCLK_PIPE1_EN,DPSTREAMCLK_PIPE2_EN,DPSTREAMCLK_PIPE3_EN */
+ uint32_t dp_dto_modulo[4]; /* DP_DTO0_MODULO->DP_DTO0_MODULO from DP stream DTO programming */
+ uint32_t dp_dto_phase[4]; /* DP_DTO0_PHASE->DP_DTO0_PHASE from DP stream DTO programming */
+ uint32_t dp_dto_dbuf_en; /* DP_DTO_DBUF_EN->DP DTO data buffer enable */
+
+ /* PHY Symbol Clock Control - 5 PHYs (A,B,C,D,E) */
+ uint32_t phy_symclk_force_en[5]; /* PHYASYMCLK_CLOCK_CNTL->PHYASYMCLK_FORCE_EN from dccg31_set_physymclk() force_enable */
+ uint32_t phy_symclk_force_src_sel[5]; /* PHYASYMCLK_CLOCK_CNTL->PHYASYMCLK_FORCE_SRC_SEL from dccg31_set_physymclk() clk_src */
+ uint32_t phy_symclk_gate_disable[5]; /* DCCG_GATE_DISABLE_CNTL2->PHYASYMCLK_GATE_DISABLE from debug.root_clock_optimization.bits.physymclk */
+
+ /* SYMCLK32 SE Control - 4 instances */
+ uint32_t symclk32_se_src_sel[4]; /* SYMCLK32_SE_CNTL->SYMCLK32_SE0_SRC_SEL from dccg31_enable_symclk32_se() with get_phy_mux_symclk() mapping */
+ uint32_t symclk32_se_enable[4]; /* SYMCLK32_SE_CNTL->SYMCLK32_SE0_EN from dccg31_enable_symclk32_se() enable */
+ uint32_t symclk32_se_gate_disable[4]; /* DCCG_GATE_DISABLE_CNTL3->SYMCLK32_SE0_GATE_DISABLE from debug.root_clock_optimization.bits.symclk32_se */
+
+ /* SYMCLK32 LE Control - 2 instances */
+ uint32_t symclk32_le_src_sel[2]; /* SYMCLK32_LE_CNTL->SYMCLK32_LE0_SRC_SEL from dccg31_enable_symclk32_le() phyd32clk source */
+ uint32_t symclk32_le_enable[2]; /* SYMCLK32_LE_CNTL->SYMCLK32_LE0_EN from dccg31_enable_symclk32_le() enable */
+ uint32_t symclk32_le_gate_disable[2]; /* DCCG_GATE_DISABLE_CNTL3->SYMCLK32_LE0_GATE_DISABLE from debug.root_clock_optimization.bits.symclk32_le */
+
+ /* DPIA Clock Control */
+ uint32_t dpiaclk_540m_dto_modulo; /* DPIACLK_540M_DTO_MODULO->DPIA 540MHz DTO modulo */
+ uint32_t dpiaclk_540m_dto_phase; /* DPIACLK_540M_DTO_PHASE->DPIA 540MHz DTO phase */
+ uint32_t dpiaclk_810m_dto_modulo; /* DPIACLK_810M_DTO_MODULO->DPIA 810MHz DTO modulo */
+ uint32_t dpiaclk_810m_dto_phase; /* DPIACLK_810M_DTO_PHASE->DPIA 810MHz DTO phase */
+ uint32_t dpiaclk_dto_cntl; /* DPIACLK_DTO_CNTL->DPIA clock DTO control */
+ uint32_t dpiasymclk_cntl; /* DPIASYMCLK_CNTL->DPIA symbol clock control */
+
+ /* Clock Gating Control */
+ uint32_t dccg_gate_disable_cntl; /* DCCG_GATE_DISABLE_CNTL->Clock gate disable control from dccg31_init() */
+ uint32_t dpstreamclk_gate_disable; /* DCCG_GATE_DISABLE_CNTL3->DPSTREAMCLK_GATE_DISABLE from debug.root_clock_optimization.bits.dpstream */
+ uint32_t dpstreamclk_root_gate_disable; /* DCCG_GATE_DISABLE_CNTL3->DPSTREAMCLK_ROOT_GATE_DISABLE from debug.root_clock_optimization.bits.dpstream */
+
+ /* VSync Control */
+ uint32_t vsync_cnt_ctrl; /* DCCG_VSYNC_CNT_CTRL->VSync counter control */
+ uint32_t vsync_cnt_int_ctrl; /* DCCG_VSYNC_CNT_INT_CTRL->VSync counter interrupt control */
+ uint32_t vsync_otg_latch_value[6]; /* DCCG_VSYNC_OTG0_LATCH_VALUE->OTG0 VSync latch value (for OTG0-5) */
+
+ /* Time Base Control */
+ uint32_t microsecond_time_base_div; /* MICROSECOND_TIME_BASE_DIV->Microsecond time base divider */
+ uint32_t millisecond_time_base_div; /* MILLISECOND_TIME_BASE_DIV->Millisecond time base divider */
+ } dccg;
+
+ /* DSC essential configuration for underflow analysis */
+ struct {
+ /* DSC active state - critical for bandwidth analysis */
+ uint32_t dsc_clock_enable; /* DSC enabled - affects bandwidth requirements */
+
+ /* DSC configuration affecting bandwidth and timing */
+ uint32_t dsc_num_slices_h; /* Horizontal slice count - affects throughput */
+ uint32_t dsc_num_slices_v; /* Vertical slice count - affects throughput */
+ uint32_t dsc_bits_per_pixel; /* Compression ratio - affects bandwidth */
+
+ /* OPP integration - affects pipeline flow */
+ uint32_t dscrm_dsc_forward_enable; /* DSC forwarding to OPP enabled */
+ uint32_t dscrm_dsc_opp_pipe_source; /* Which OPP receives DSC output */
+ } dsc[MAX_PIPES];
+
+ /* MPC register programming variables */
+ struct {
+ /* MPCC blending tree and mode control */
+ uint32_t mpcc_mode[MAX_PIPES]; /* MPCC_CONTROL->MPCC_MODE from blend_cfg.blend_mode */
+ uint32_t mpcc_alpha_blend_mode[MAX_PIPES]; /* MPCC_CONTROL->MPCC_ALPHA_BLND_MODE from blend_cfg.alpha_mode */
+ uint32_t mpcc_alpha_multiplied_mode[MAX_PIPES]; /* MPCC_CONTROL->MPCC_ALPHA_MULTIPLIED_MODE from blend_cfg.pre_multiplied_alpha */
+ uint32_t mpcc_blnd_active_overlap_only[MAX_PIPES]; /* MPCC_CONTROL->MPCC_BLND_ACTIVE_OVERLAP_ONLY from blend_cfg.overlap_only */
+ uint32_t mpcc_global_alpha[MAX_PIPES]; /* MPCC_CONTROL->MPCC_GLOBAL_ALPHA from blend_cfg.global_alpha */
+ uint32_t mpcc_global_gain[MAX_PIPES]; /* MPCC_CONTROL->MPCC_GLOBAL_GAIN from blend_cfg.global_gain */
+ uint32_t mpcc_bg_bpc[MAX_PIPES]; /* MPCC_CONTROL->MPCC_BG_BPC from background color depth */
+ uint32_t mpcc_bot_gain_mode[MAX_PIPES]; /* MPCC_CONTROL->MPCC_BOT_GAIN_MODE from bottom layer gain control */
+
+ /* MPCC blending tree connections */
+ uint32_t mpcc_bot_sel[MAX_PIPES]; /* MPCC_BOT_SEL->MPCC_BOT_SEL from mpcc_state->bot_sel */
+ uint32_t mpcc_top_sel[MAX_PIPES]; /* MPCC_TOP_SEL->MPCC_TOP_SEL from mpcc_state->dpp_id */
+
+ /* MPCC output gamma control */
+ uint32_t mpcc_ogam_mode[MAX_PIPES]; /* MPCC_OGAM_CONTROL->MPCC_OGAM_MODE from output gamma mode */
+ uint32_t mpcc_ogam_select[MAX_PIPES]; /* MPCC_OGAM_CONTROL->MPCC_OGAM_SELECT from gamma LUT bank selection */
+ uint32_t mpcc_ogam_pwl_disable[MAX_PIPES]; /* MPCC_OGAM_CONTROL->MPCC_OGAM_PWL_DISABLE from PWL control */
+
+ /* MPCC pipe assignment and status */
+ uint32_t mpcc_opp_id[MAX_PIPES]; /* MPCC_OPP_ID->MPCC_OPP_ID from mpcc_state->opp_id */
+ uint32_t mpcc_idle[MAX_PIPES]; /* MPCC_STATUS->MPCC_IDLE from mpcc idle status */
+ uint32_t mpcc_busy[MAX_PIPES]; /* MPCC_STATUS->MPCC_BUSY from mpcc busy status */
+
+ /* MPC output processing */
+ uint32_t mpc_out_csc_mode; /* MPC_OUT_CSC_COEF->MPC_OUT_CSC_MODE from output_csc */
+ uint32_t mpc_out_gamma_mode; /* MPC_OUT_GAMMA_LUT->MPC_OUT_GAMMA_MODE from output_gamma */
+ } mpc;
+
+ /* OPP register programming variables for each pipe */
+ struct {
+ /* Display Pattern Generator (DPG) Control - 19 fields from DPG_CONTROL register */
+ uint32_t dpg_enable; /* DPG_CONTROL->DPG_EN from test_pattern parameter (enable/disable) */
+
+ /* Format Control (FMT) - 18 fields from FMT_CONTROL register */
+ uint32_t fmt_pixel_encoding; /* FMT_CONTROL->FMT_PIXEL_ENCODING from clamping->pixel_encoding */
+ uint32_t fmt_subsampling_mode; /* FMT_CONTROL->FMT_SUBSAMPLING_MODE from force_chroma_subsampling_1tap */
+ uint32_t fmt_cbcr_bit_reduction_bypass; /* FMT_CONTROL->FMT_CBCR_BIT_REDUCTION_BYPASS from pixel_encoding bypass control */
+ uint32_t fmt_stereosync_override; /* FMT_CONTROL->FMT_STEREOSYNC_OVERRIDE from stereo timing override */
+ uint32_t fmt_spatial_dither_frame_counter_max; /* FMT_CONTROL->FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX from fmt_bit_depth->flags */
+ uint32_t fmt_spatial_dither_frame_counter_bit_swap; /* FMT_CONTROL->FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP from dither control */
+ uint32_t fmt_truncate_enable; /* FMT_CONTROL->FMT_TRUNCATE_EN from fmt_bit_depth->flags.TRUNCATE_ENABLED */
+ uint32_t fmt_truncate_depth; /* FMT_CONTROL->FMT_TRUNCATE_DEPTH from fmt_bit_depth->flags.TRUNCATE_DEPTH */
+ uint32_t fmt_truncate_mode; /* FMT_CONTROL->FMT_TRUNCATE_MODE from fmt_bit_depth->flags.TRUNCATE_MODE */
+ uint32_t fmt_spatial_dither_enable; /* FMT_CONTROL->FMT_SPATIAL_DITHER_EN from fmt_bit_depth->flags.SPATIAL_DITHER_ENABLED */
+ uint32_t fmt_spatial_dither_mode; /* FMT_CONTROL->FMT_SPATIAL_DITHER_MODE from fmt_bit_depth->flags.SPATIAL_DITHER_MODE */
+ uint32_t fmt_spatial_dither_depth; /* FMT_CONTROL->FMT_SPATIAL_DITHER_DEPTH from fmt_bit_depth->flags.SPATIAL_DITHER_DEPTH */
+ uint32_t fmt_temporal_dither_enable; /* FMT_CONTROL->FMT_TEMPORAL_DITHER_EN from fmt_bit_depth->flags.TEMPORAL_DITHER_ENABLED */
+ uint32_t fmt_clamp_data_enable; /* FMT_CONTROL->FMT_CLAMP_DATA_EN from clamping->clamping_range enable */
+ uint32_t fmt_clamp_color_format; /* FMT_CONTROL->FMT_CLAMP_COLOR_FORMAT from clamping->color_format */
+ uint32_t fmt_dynamic_exp_enable; /* FMT_CONTROL->FMT_DYNAMIC_EXP_EN from color_sp/color_dpth/signal */
+ uint32_t fmt_dynamic_exp_mode; /* FMT_CONTROL->FMT_DYNAMIC_EXP_MODE from color space mode mapping */
+ uint32_t fmt_bit_depth_control; /* Legacy field - kept for compatibility */
+
+ /* OPP Pipe Control - 1 field from OPP_PIPE_CONTROL register */
+ uint32_t opp_pipe_clock_enable; /* OPP_PIPE_CONTROL->OPP_PIPE_CLOCK_EN from enable parameter (bool) */
+
+ /* OPP CRC Control - 3 fields from OPP_PIPE_CRC_CONTROL register */
+ uint32_t opp_crc_enable; /* OPP_PIPE_CRC_CONTROL->CRC_EN from CRC enable control */
+ uint32_t opp_crc_select_source; /* OPP_PIPE_CRC_CONTROL->CRC_SELECT_SOURCE from CRC source selection */
+ uint32_t opp_crc_stereo_cont; /* OPP_PIPE_CRC_CONTROL->CRC_STEREO_CONT from stereo continuous CRC */
+
+ /* Output Buffer (OPPBUF) Control - 6 fields from OPPBUF_CONTROL register */
+ uint32_t oppbuf_active_width; /* OPPBUF_CONTROL->OPPBUF_ACTIVE_WIDTH from oppbuf_params->active_width */
+ uint32_t oppbuf_pixel_repetition; /* OPPBUF_CONTROL->OPPBUF_PIXEL_REPETITION from oppbuf_params->pixel_repetition */
+ uint32_t oppbuf_display_segmentation; /* OPPBUF_CONTROL->OPPBUF_DISPLAY_SEGMENTATION from oppbuf_params->mso_segmentation */
+ uint32_t oppbuf_overlap_pixel_num; /* OPPBUF_CONTROL->OPPBUF_OVERLAP_PIXEL_NUM from oppbuf_params->mso_overlap_pixel_num */
+ uint32_t oppbuf_3d_vact_space1_size; /* OPPBUF_CONTROL->OPPBUF_3D_VACT_SPACE1_SIZE from 3D timing space1_size */
+ uint32_t oppbuf_3d_vact_space2_size; /* OPPBUF_CONTROL->OPPBUF_3D_VACT_SPACE2_SIZE from 3D timing space2_size */
+
+ /* DSC Forward Config - 3 fields from DSCRM_DSC_FORWARD_CONFIG register */
+ uint32_t dscrm_dsc_forward_enable; /* DSCRM_DSC_FORWARD_CONFIG->DSCRM_DSC_FORWARD_EN from DSC forward enable control */
+ uint32_t dscrm_dsc_opp_pipe_source; /* DSCRM_DSC_FORWARD_CONFIG->DSCRM_DSC_OPP_PIPE_SOURCE from opp_pipe parameter */
+ uint32_t dscrm_dsc_forward_enable_status; /* DSCRM_DSC_FORWARD_CONFIG->DSCRM_DSC_FORWARD_EN_STATUS from DSC forward status (read-only) */
+ } opp[MAX_PIPES];
+
+ /* OPTC register programming variables for each pipe */
+ struct {
+ uint32_t otg_master_inst;
+
+ /* OTG_CONTROL register - 5 fields for OTG control */
+ uint32_t otg_master_enable; /* OTG_CONTROL->OTG_MASTER_EN from timing enable/disable control */
+ uint32_t otg_disable_point_cntl; /* OTG_CONTROL->OTG_DISABLE_POINT_CNTL from disable timing control */
+ uint32_t otg_start_point_cntl; /* OTG_CONTROL->OTG_START_POINT_CNTL from start timing control */
+ uint32_t otg_field_number_cntl; /* OTG_CONTROL->OTG_FIELD_NUMBER_CNTL from interlace field control */
+ uint32_t otg_out_mux; /* OTG_CONTROL->OTG_OUT_MUX from output mux selection */
+
+ /* OTG Horizontal Timing - 7 fields */
+ uint32_t otg_h_total; /* OTG_H_TOTAL->OTG_H_TOTAL from dc_crtc_timing->h_total */
+ uint32_t otg_h_blank_start; /* OTG_H_BLANK_START_END->OTG_H_BLANK_START from dc_crtc_timing->h_front_porch */
+ uint32_t otg_h_blank_end; /* OTG_H_BLANK_START_END->OTG_H_BLANK_END from dc_crtc_timing->h_addressable_video_pixel_width */
+ uint32_t otg_h_sync_start; /* OTG_H_SYNC_A->OTG_H_SYNC_A_START from dc_crtc_timing->h_sync_width */
+ uint32_t otg_h_sync_end; /* OTG_H_SYNC_A->OTG_H_SYNC_A_END from calculated sync end position */
+ uint32_t otg_h_sync_polarity; /* OTG_H_SYNC_A_CNTL->OTG_H_SYNC_A_POL from dc_crtc_timing->flags.HSYNC_POSITIVE_POLARITY */
+ uint32_t otg_h_timing_div_mode; /* OTG_H_TIMING_CNTL->OTG_H_TIMING_DIV_MODE from horizontal timing division mode */
+
+ /* OTG Vertical Timing - 7 fields */
+ uint32_t otg_v_total; /* OTG_V_TOTAL->OTG_V_TOTAL from dc_crtc_timing->v_total */
+ uint32_t otg_v_blank_start; /* OTG_V_BLANK_START_END->OTG_V_BLANK_START from dc_crtc_timing->v_front_porch */
+ uint32_t otg_v_blank_end; /* OTG_V_BLANK_START_END->OTG_V_BLANK_END from dc_crtc_timing->v_addressable_video_line_width */
+ uint32_t otg_v_sync_start; /* OTG_V_SYNC_A->OTG_V_SYNC_A_START from dc_crtc_timing->v_sync_width */
+ uint32_t otg_v_sync_end; /* OTG_V_SYNC_A->OTG_V_SYNC_A_END from calculated sync end position */
+ uint32_t otg_v_sync_polarity; /* OTG_V_SYNC_A_CNTL->OTG_V_SYNC_A_POL from dc_crtc_timing->flags.VSYNC_POSITIVE_POLARITY */
+ uint32_t otg_v_sync_mode; /* OTG_V_SYNC_A_CNTL->OTG_V_SYNC_MODE from sync mode selection */
+
+ /* OTG DRR (Dynamic Refresh Rate) Control - 8 fields */
+ uint32_t otg_v_total_max; /* OTG_V_TOTAL_MAX->OTG_V_TOTAL_MAX from drr_params->vertical_total_max */
+ uint32_t otg_v_total_min; /* OTG_V_TOTAL_MIN->OTG_V_TOTAL_MIN from drr_params->vertical_total_min */
+ uint32_t otg_v_total_mid; /* OTG_V_TOTAL_MID->OTG_V_TOTAL_MID from drr_params->vertical_total_mid */
+ uint32_t otg_v_total_max_sel; /* OTG_V_TOTAL_CONTROL->OTG_V_TOTAL_MAX_SEL from DRR max selection enable */
+ uint32_t otg_v_total_min_sel; /* OTG_V_TOTAL_CONTROL->OTG_V_TOTAL_MIN_SEL from DRR min selection enable */
+ uint32_t otg_vtotal_mid_replacing_max_en; /* OTG_V_TOTAL_CONTROL->OTG_VTOTAL_MID_REPLACING_MAX_EN from DRR mid-frame enable */
+ uint32_t otg_vtotal_mid_frame_num; /* OTG_V_TOTAL_CONTROL->OTG_VTOTAL_MID_FRAME_NUM from drr_params->vertical_total_mid_frame_num */
+ uint32_t otg_set_v_total_min_mask; /* OTG_V_TOTAL_CONTROL->OTG_SET_V_TOTAL_MIN_MASK from DRR trigger mask */
+ uint32_t otg_force_lock_on_event; /* OTG_V_TOTAL_CONTROL->OTG_FORCE_LOCK_ON_EVENT from DRR force lock control */
+
+ /* OPTC Data Source and ODM - 6 fields */
+ uint32_t optc_seg0_src_sel; /* OPTC_DATA_SOURCE_SELECT->OPTC_SEG0_SRC_SEL from opp_id[0] ODM segment 0 source */
+ uint32_t optc_seg1_src_sel; /* OPTC_DATA_SOURCE_SELECT->OPTC_SEG1_SRC_SEL from opp_id[1] ODM segment 1 source */
+ uint32_t optc_seg2_src_sel; /* OPTC_DATA_SOURCE_SELECT->OPTC_SEG2_SRC_SEL from opp_id[2] ODM segment 2 source */
+ uint32_t optc_seg3_src_sel; /* OPTC_DATA_SOURCE_SELECT->OPTC_SEG3_SRC_SEL from opp_id[3] ODM segment 3 source */
+ uint32_t optc_num_of_input_segment; /* OPTC_DATA_SOURCE_SELECT->OPTC_NUM_OF_INPUT_SEGMENT from opp_cnt-1 number of input segments */
+ uint32_t optc_mem_sel; /* OPTC_MEMORY_CONFIG->OPTC_MEM_SEL from memory_mask ODM memory selection */
+
+ /* OPTC Data Format and DSC - 4 fields */
+ uint32_t optc_data_format; /* OPTC_DATA_FORMAT_CONTROL->OPTC_DATA_FORMAT from data format selection */
+ uint32_t optc_dsc_mode; /* OPTC_DATA_FORMAT_CONTROL->OPTC_DSC_MODE from dsc_mode parameter */
+ uint32_t optc_dsc_bytes_per_pixel; /* OPTC_BYTES_PER_PIXEL->OPTC_DSC_BYTES_PER_PIXEL from dsc_bytes_per_pixel parameter */
+ uint32_t optc_segment_width; /* OPTC_WIDTH_CONTROL->OPTC_SEGMENT_WIDTH from segment_width parameter */
+ uint32_t optc_dsc_slice_width; /* OPTC_WIDTH_CONTROL->OPTC_DSC_SLICE_WIDTH from dsc_slice_width parameter */
+
+ /* OPTC Clock and Underflow Control - 4 fields */
+ uint32_t optc_input_pix_clk_en; /* OPTC_INPUT_CLOCK_CONTROL->OPTC_INPUT_PIX_CLK_EN from pixel clock enable */
+ uint32_t optc_underflow_occurred_status; /* OPTC_INPUT_GLOBAL_CONTROL->OPTC_UNDERFLOW_OCCURRED_STATUS from underflow status (read-only) */
+ uint32_t optc_underflow_clear; /* OPTC_INPUT_GLOBAL_CONTROL->OPTC_UNDERFLOW_CLEAR from underflow clear control */
+ uint32_t otg_clock_enable; /* OTG_CLOCK_CONTROL->OTG_CLOCK_EN from OTG clock enable */
+ uint32_t otg_clock_gate_dis; /* OTG_CLOCK_CONTROL->OTG_CLOCK_GATE_DIS from clock gate disable */
+
+ /* OTG Stereo and 3D Control - 6 fields */
+ uint32_t otg_stereo_enable; /* OTG_STEREO_CONTROL->OTG_STEREO_EN from stereo enable control */
+ uint32_t otg_stereo_sync_output_line_num; /* OTG_STEREO_CONTROL->OTG_STEREO_SYNC_OUTPUT_LINE_NUM from timing->stereo_3d_format line num */
+ uint32_t otg_stereo_sync_output_polarity; /* OTG_STEREO_CONTROL->OTG_STEREO_SYNC_OUTPUT_POLARITY from stereo polarity control */
+ uint32_t otg_3d_structure_en; /* OTG_3D_STRUCTURE_CONTROL->OTG_3D_STRUCTURE_EN from 3D structure enable */
+ uint32_t otg_3d_structure_v_update_mode; /* OTG_3D_STRUCTURE_CONTROL->OTG_3D_STRUCTURE_V_UPDATE_MODE from 3D vertical update mode */
+ uint32_t otg_3d_structure_stereo_sel_ovr; /* OTG_3D_STRUCTURE_CONTROL->OTG_3D_STRUCTURE_STEREO_SEL_OVR from 3D stereo selection override */
+ uint32_t otg_interlace_enable; /* OTG_INTERLACE_CONTROL->OTG_INTERLACE_ENABLE from dc_crtc_timing->flags.INTERLACE */
+
+ /* OTG GSL (Global Sync Lock) Control - 5 fields */
+ uint32_t otg_gsl0_en; /* OTG_GSL_CONTROL->OTG_GSL0_EN from GSL group 0 enable */
+ uint32_t otg_gsl1_en; /* OTG_GSL_CONTROL->OTG_GSL1_EN from GSL group 1 enable */
+ uint32_t otg_gsl2_en; /* OTG_GSL_CONTROL->OTG_GSL2_EN from GSL group 2 enable */
+ uint32_t otg_gsl_master_en; /* OTG_GSL_CONTROL->OTG_GSL_MASTER_EN from GSL master enable */
+ uint32_t otg_gsl_master_mode; /* OTG_GSL_CONTROL->OTG_GSL_MASTER_MODE from gsl_params->gsl_master mode */
+
+ /* OTG DRR Advanced Control - 4 fields */
+ uint32_t otg_v_total_last_used_by_drr; /* OTG_DRR_CONTROL->OTG_V_TOTAL_LAST_USED_BY_DRR from last used DRR V_TOTAL (read-only) */
+ uint32_t otg_drr_trigger_window_start_x; /* OTG_DRR_TRIGGER_WINDOW->OTG_DRR_TRIGGER_WINDOW_START_X from window_start parameter */
+ uint32_t otg_drr_trigger_window_end_x; /* OTG_DRR_TRIGGER_WINDOW->OTG_DRR_TRIGGER_WINDOW_END_X from window_end parameter */
+ uint32_t otg_drr_v_total_change_limit; /* OTG_DRR_V_TOTAL_CHANGE->OTG_DRR_V_TOTAL_CHANGE_LIMIT from limit parameter */
+
+ /* OTG DSC Position Control - 2 fields */
+ uint32_t otg_dsc_start_position_x; /* OTG_DSC_START_POSITION->OTG_DSC_START_POSITION_X from DSC start X position */
+ uint32_t otg_dsc_start_position_line_num; /* OTG_DSC_START_POSITION->OTG_DSC_START_POSITION_LINE_NUM from DSC start line number */
+
+ /* OTG Double Buffer Control - 2 fields */
+ uint32_t otg_drr_timing_dbuf_update_mode; /* OTG_DOUBLE_BUFFER_CONTROL->OTG_DRR_TIMING_DBUF_UPDATE_MODE from DRR double buffer mode */
+ uint32_t otg_blank_data_double_buffer_en; /* OTG_DOUBLE_BUFFER_CONTROL->OTG_BLANK_DATA_DOUBLE_BUFFER_EN from blank data double buffer enable */
+
+ /* OTG Vertical Interrupts - 6 fields */
+ uint32_t otg_vertical_interrupt0_int_enable; /* OTG_VERTICAL_INTERRUPT0_CONTROL->OTG_VERTICAL_INTERRUPT0_INT_ENABLE from interrupt 0 enable */
+ uint32_t otg_vertical_interrupt0_line_start; /* OTG_VERTICAL_INTERRUPT0_POSITION->OTG_VERTICAL_INTERRUPT0_LINE_START from start_line parameter */
+ uint32_t otg_vertical_interrupt1_int_enable; /* OTG_VERTICAL_INTERRUPT1_CONTROL->OTG_VERTICAL_INTERRUPT1_INT_ENABLE from interrupt 1 enable */
+ uint32_t otg_vertical_interrupt1_line_start; /* OTG_VERTICAL_INTERRUPT1_POSITION->OTG_VERTICAL_INTERRUPT1_LINE_START from start_line parameter */
+ uint32_t otg_vertical_interrupt2_int_enable; /* OTG_VERTICAL_INTERRUPT2_CONTROL->OTG_VERTICAL_INTERRUPT2_INT_ENABLE from interrupt 2 enable */
+ uint32_t otg_vertical_interrupt2_line_start; /* OTG_VERTICAL_INTERRUPT2_POSITION->OTG_VERTICAL_INTERRUPT2_LINE_START from start_line parameter */
+
+ /* OTG Global Sync Parameters - 6 fields */
+ uint32_t otg_vready_offset; /* OTG_VREADY_PARAM->OTG_VREADY_OFFSET from vready_offset parameter */
+ uint32_t otg_vstartup_start; /* OTG_VSTARTUP_PARAM->OTG_VSTARTUP_START from vstartup_start parameter */
+ uint32_t otg_vupdate_offset; /* OTG_VUPDATE_PARAM->OTG_VUPDATE_OFFSET from vupdate_offset parameter */
+ uint32_t otg_vupdate_width; /* OTG_VUPDATE_PARAM->OTG_VUPDATE_WIDTH from vupdate_width parameter */
+ uint32_t master_update_lock_vupdate_keepout_start_offset; /* OTG_VUPDATE_KEEPOUT->MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET from pstate_keepout start */
+ uint32_t master_update_lock_vupdate_keepout_end_offset; /* OTG_VUPDATE_KEEPOUT->MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET from pstate_keepout end */
+
+ /* OTG Manual Trigger Control - 11 fields */
+ uint32_t otg_triga_source_select; /* OTG_TRIGA_CNTL->OTG_TRIGA_SOURCE_SELECT from trigger A source selection */
+ uint32_t otg_triga_source_pipe_select; /* OTG_TRIGA_CNTL->OTG_TRIGA_SOURCE_PIPE_SELECT from trigger A pipe selection */
+ uint32_t otg_triga_rising_edge_detect_cntl; /* OTG_TRIGA_CNTL->OTG_TRIGA_RISING_EDGE_DETECT_CNTL from trigger A rising edge detect */
+ uint32_t otg_triga_falling_edge_detect_cntl; /* OTG_TRIGA_CNTL->OTG_TRIGA_FALLING_EDGE_DETECT_CNTL from trigger A falling edge detect */
+ uint32_t otg_triga_polarity_select; /* OTG_TRIGA_CNTL->OTG_TRIGA_POLARITY_SELECT from trigger A polarity selection */
+ uint32_t otg_triga_frequency_select; /* OTG_TRIGA_CNTL->OTG_TRIGA_FREQUENCY_SELECT from trigger A frequency selection */
+ uint32_t otg_triga_delay; /* OTG_TRIGA_CNTL->OTG_TRIGA_DELAY from trigger A delay */
+ uint32_t otg_triga_clear; /* OTG_TRIGA_CNTL->OTG_TRIGA_CLEAR from trigger A clear */
+ uint32_t otg_triga_manual_trig; /* OTG_TRIGA_MANUAL_TRIG->OTG_TRIGA_MANUAL_TRIG from manual trigger A */
+ uint32_t otg_trigb_source_select; /* OTG_TRIGB_CNTL->OTG_TRIGB_SOURCE_SELECT from trigger B source selection */
+ uint32_t otg_trigb_polarity_select; /* OTG_TRIGB_CNTL->OTG_TRIGB_POLARITY_SELECT from trigger B polarity selection */
+ uint32_t otg_trigb_manual_trig; /* OTG_TRIGB_MANUAL_TRIG->OTG_TRIGB_MANUAL_TRIG from manual trigger B */
+
+ /* OTG Static Screen and Update Control - 6 fields */
+ uint32_t otg_static_screen_event_mask; /* OTG_STATIC_SCREEN_CONTROL->OTG_STATIC_SCREEN_EVENT_MASK from event_triggers parameter */
+ uint32_t otg_static_screen_frame_count; /* OTG_STATIC_SCREEN_CONTROL->OTG_STATIC_SCREEN_FRAME_COUNT from num_frames parameter */
+ uint32_t master_update_lock; /* OTG_MASTER_UPDATE_LOCK->MASTER_UPDATE_LOCK from update lock control */
+ uint32_t master_update_mode; /* OTG_MASTER_UPDATE_MODE->MASTER_UPDATE_MODE from update mode selection */
+ uint32_t otg_force_count_now_mode; /* OTG_FORCE_COUNT_NOW_CNTL->OTG_FORCE_COUNT_NOW_MODE from force count mode */
+ uint32_t otg_force_count_now_clear; /* OTG_FORCE_COUNT_NOW_CNTL->OTG_FORCE_COUNT_NOW_CLEAR from force count clear */
+
+ /* VTG Control - 3 fields */
+ uint32_t vtg0_enable; /* CONTROL->VTG0_ENABLE from VTG enable control */
+ uint32_t vtg0_fp2; /* CONTROL->VTG0_FP2 from VTG front porch 2 */
+ uint32_t vtg0_vcount_init; /* CONTROL->VTG0_VCOUNT_INIT from VTG vertical count init */
+
+ /* OTG Status (Read-Only) - 12 fields */
+ uint32_t otg_v_blank; /* OTG_STATUS->OTG_V_BLANK from vertical blank status (read-only) */
+ uint32_t otg_v_active_disp; /* OTG_STATUS->OTG_V_ACTIVE_DISP from vertical active display (read-only) */
+ uint32_t otg_frame_count; /* OTG_STATUS_FRAME_COUNT->OTG_FRAME_COUNT from frame count (read-only) */
+ uint32_t otg_horz_count; /* OTG_STATUS_POSITION->OTG_HORZ_COUNT from horizontal position (read-only) */
+ uint32_t otg_vert_count; /* OTG_STATUS_POSITION->OTG_VERT_COUNT from vertical position (read-only) */
+ uint32_t otg_horz_count_hv; /* OTG_STATUS_HV_COUNT->OTG_HORZ_COUNT from horizontal count (read-only) */
+ uint32_t otg_vert_count_nom; /* OTG_STATUS_HV_COUNT->OTG_VERT_COUNT_NOM from vertical count nominal (read-only) */
+ uint32_t otg_flip_pending; /* OTG_PIPE_UPDATE_STATUS->OTG_FLIP_PENDING from flip pending status (read-only) */
+ uint32_t otg_dc_reg_update_pending; /* OTG_PIPE_UPDATE_STATUS->OTG_DC_REG_UPDATE_PENDING from DC register update pending (read-only) */
+ uint32_t otg_cursor_update_pending; /* OTG_PIPE_UPDATE_STATUS->OTG_CURSOR_UPDATE_PENDING from cursor update pending (read-only) */
+ uint32_t otg_vupdate_keepout_status; /* OTG_PIPE_UPDATE_STATUS->OTG_VUPDATE_KEEPOUT_STATUS from VUPDATE keepout status (read-only) */
+ } optc[MAX_PIPES];
+
+ /* Metadata */
+ uint32_t active_pipe_count;
+ uint32_t active_stream_count;
+ bool state_valid;
+};
+
+/**
+ * dc_capture_register_software_state() - Capture software state for register programming
+ * @dc: DC context containing current display configuration
+ * @state: Pointer to dc_register_software_state structure to populate
+ *
+ * Extracts all software state variables that are used to program hardware register
+ * fields across the display driver pipeline. This provides a complete snapshot
+ * of the software configuration that drives hardware register programming.
+ *
+ * The function traverses the DC context and extracts values from:
+ * - Stream configurations (timing, format, DSC settings)
+ * - Plane states (surface format, rotation, scaling, cursor)
+ * - Pipe contexts (resource allocation, blending, viewport)
+ * - Clock manager (display clocks, DPP clocks, pixel clocks)
+ * - Resource context (DET buffer allocation, ODM configuration)
+ *
+ * This is essential for underflow debugging as it captures the exact software
+ * state that determines how registers are programmed, allowing analysis of
+ * whether underflow is caused by incorrect register programming or timing issues.
+ *
+ * Return: true if state was successfully captured, false on error
+ */
+bool dc_capture_register_software_state(struct dc *dc, struct dc_register_software_state *state);
#endif /* DC_INTERFACE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
index 5fa5e2b63fb7..40d7a7d83c40 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
@@ -91,9 +91,17 @@ struct dc_vbios_funcs {
struct device_id id);
/* COMMANDS */
+ enum bp_result (*select_crtc_source)(
+ struct dc_bios *bios,
+ struct bp_crtc_source_select *bp_params);
enum bp_result (*encoder_control)(
struct dc_bios *bios,
struct bp_encoder_control *cntl);
+ enum bp_result (*dac_load_detection)(
+ struct dc_bios *bios,
+ enum engine_id engine_id,
+ enum dal_device_type device_type,
+ uint32_t enum_id);
enum bp_result (*transmitter_control)(
struct dc_bios *bios,
struct bp_transmitter_control *cntl);
@@ -165,6 +173,7 @@ struct dc_vbios_funcs {
};
struct bios_registers {
+ uint32_t BIOS_SCRATCH_0;
uint32_t BIOS_SCRATCH_3;
uint32_t BIOS_SCRATCH_6;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index afbcf866520e..7b09af1cb306 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -442,7 +442,6 @@ bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool should_manage_pstate, stru
int i = 0, k = 0;
int ramp_up_num_steps = 1; // TODO: Ramp is currently disabled. Reenable it.
uint8_t visual_confirm_enabled;
- int pipe_idx = 0;
struct dc_stream_status *stream_status = NULL;
if (dc == NULL)
@@ -457,7 +456,7 @@ bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool should_manage_pstate, stru
cmd.fw_assisted_mclk_switch.config_data.visual_confirm_enabled = visual_confirm_enabled;
if (should_manage_pstate) {
- for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
if (!pipe->stream)
@@ -472,7 +471,6 @@ bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool should_manage_pstate, stru
cmd.fw_assisted_mclk_switch.config_data.vactive_stretch_margin_us = dc->debug.fpo_vactive_margin_us;
break;
}
- pipe_idx++;
}
}
@@ -872,7 +870,7 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
bool enable)
{
uint8_t cmd_pipe_index = 0;
- uint32_t i, pipe_idx;
+ uint32_t i;
uint8_t subvp_count = 0;
union dmub_rb_cmd cmd;
struct pipe_ctx *subvp_pipes[2];
@@ -899,7 +897,7 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
if (enable) {
// For each pipe that is a "main" SUBVP pipe, fill in pipe data for DMUB SUBVP cmd
- for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe);
@@ -922,7 +920,6 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
populate_subvp_cmd_vblank_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++);
}
- pipe_idx++;
}
if (subvp_count == 2) {
update_subvp_prefetch_end_to_mall_start(dc, context, &cmd, subvp_pipes);
@@ -1174,6 +1171,100 @@ void dc_dmub_srv_subvp_save_surf_addr(const struct dc_dmub_srv *dc_dmub_srv, con
dmub_srv_subvp_save_surf_addr(dc_dmub_srv->dmub, addr, subvp_index);
}
+void dc_dmub_srv_cursor_offload_init(struct dc *dc)
+{
+ struct dmub_rb_cmd_cursor_offload_init *init;
+ struct dc_dmub_srv *dc_dmub_srv = dc->ctx->dmub_srv;
+ union dmub_rb_cmd cmd;
+
+ if (!dc->config.enable_cursor_offload)
+ return;
+
+ if (!dc_dmub_srv->dmub->meta_info.feature_bits.bits.cursor_offload_v1_support)
+ return;
+
+ if (!dc_dmub_srv->dmub->cursor_offload_fb.gpu_addr || !dc_dmub_srv->dmub->cursor_offload_fb.cpu_addr)
+ return;
+
+ if (!dc_dmub_srv->dmub->cursor_offload_v1)
+ return;
+
+ if (!dc_dmub_srv->dmub->shared_state)
+ return;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ init = &cmd.cursor_offload_init;
+ init->header.type = DMUB_CMD__CURSOR_OFFLOAD;
+ init->header.sub_type = DMUB_CMD__CURSOR_OFFLOAD_INIT;
+ init->header.payload_bytes = sizeof(init->init_data);
+ init->init_data.state_addr.quad_part = dc_dmub_srv->dmub->cursor_offload_fb.gpu_addr;
+ init->init_data.state_size = dc_dmub_srv->dmub->cursor_offload_fb.size;
+
+ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+
+ dc_dmub_srv->cursor_offload_enabled = true;
+}
+
+void dc_dmub_srv_control_cursor_offload(struct dc *dc, struct dc_state *context,
+ const struct dc_stream_state *stream, bool enable)
+{
+ struct pipe_ctx const *pipe_ctx;
+ struct dmub_rb_cmd_cursor_offload_stream_cntl *cntl;
+ union dmub_rb_cmd cmd;
+
+ if (!dc_dmub_srv_is_cursor_offload_enabled(dc))
+ return;
+
+ if (!stream)
+ return;
+
+ pipe_ctx = resource_get_otg_master_for_stream(&context->res_ctx, stream);
+ if (!pipe_ctx || !pipe_ctx->stream_res.tg || pipe_ctx->stream != stream)
+ return;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cntl = &cmd.cursor_offload_stream_ctnl;
+ cntl->header.type = DMUB_CMD__CURSOR_OFFLOAD;
+ cntl->header.sub_type =
+ enable ? DMUB_CMD__CURSOR_OFFLOAD_STREAM_ENABLE : DMUB_CMD__CURSOR_OFFLOAD_STREAM_DISABLE;
+ cntl->header.payload_bytes = sizeof(cntl->data);
+
+ cntl->data.otg_inst = pipe_ctx->stream_res.tg->inst;
+ cntl->data.line_time_in_ns = 1u + (uint32_t)(div64_u64(stream->timing.h_total * 1000000ull,
+ stream->timing.pix_clk_100hz / 10));
+
+ cntl->data.v_total_max = stream->adjust.v_total_max > stream->timing.v_total ?
+ stream->adjust.v_total_max :
+ stream->timing.v_total;
+
+ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd,
+ enable ? DM_DMUB_WAIT_TYPE_NO_WAIT : DM_DMUB_WAIT_TYPE_WAIT);
+}
+
+void dc_dmub_srv_program_cursor_now(struct dc *dc, const struct pipe_ctx *pipe)
+{
+ struct dmub_rb_cmd_cursor_offload_stream_cntl *cntl;
+ union dmub_rb_cmd cmd;
+
+ if (!dc_dmub_srv_is_cursor_offload_enabled(dc))
+ return;
+
+ if (!pipe || !pipe->stream || !pipe->stream_res.tg)
+ return;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cntl = &cmd.cursor_offload_stream_ctnl;
+ cntl->header.type = DMUB_CMD__CURSOR_OFFLOAD;
+ cntl->header.sub_type = DMUB_CMD__CURSOR_OFFLOAD_STREAM_PROGRAM;
+ cntl->header.payload_bytes = sizeof(cntl->data);
+ cntl->data.otg_inst = pipe->stream_res.tg->inst;
+
+ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
+}
+
bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait)
{
struct dc_context *dc_ctx;
@@ -1269,12 +1360,16 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
new_signals.bits.allow_ips1 = 1;
new_signals.bits.allow_ips2 = 1;
new_signals.bits.allow_z10 = 1;
+ // New in IPSv2.0
+ new_signals.bits.allow_ips1z8 = 1;
} else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS1) {
new_signals.bits.allow_ips1 = 1;
} else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2) {
+ // IPSv1.0 only
new_signals.bits.allow_pg = 1;
new_signals.bits.allow_ips1 = 1;
} else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2_Z10) {
+ // IPSv1.0 only
new_signals.bits.allow_pg = 1;
new_signals.bits.allow_ips1 = 1;
new_signals.bits.allow_ips2 = 1;
@@ -1286,6 +1381,8 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
new_signals.bits.allow_ips1 = 1;
new_signals.bits.allow_ips2 = 1;
new_signals.bits.allow_z10 = 1;
+ // New in IPSv2.0
+ new_signals.bits.allow_ips1z8 = 1;
} else {
/* RCG only */
new_signals.bits.allow_pg = 0;
@@ -1293,8 +1390,28 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
new_signals.bits.allow_ips2 = 0;
new_signals.bits.allow_z10 = 0;
}
+ } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_Z8_RETENTION) {
+ new_signals.bits.allow_pg = 1;
+ new_signals.bits.allow_ips1 = 1;
+ new_signals.bits.allow_ips2 = 1;
+ new_signals.bits.allow_z10 = 1;
+ }
+ // Setting RCG allow bits (IPSv2.0)
+ if (dc->config.disable_ips_rcg == DMUB_IPS_RCG_ENABLE) {
+ new_signals.bits.allow_ips0_rcg = 1;
+ new_signals.bits.allow_ips1_rcg = 1;
+ } else if (dc->config.disable_ips_rcg == DMUB_IPS0_RCG_DISABLE) {
+ new_signals.bits.allow_ips1_rcg = 1;
+ } else if (dc->config.disable_ips_rcg == DMUB_IPS1_RCG_DISABLE) {
+ new_signals.bits.allow_ips0_rcg = 1;
+ }
+ // IPS dynamic allow bits (IPSv2 change, vpb use case)
+ if (dc->config.disable_ips_in_vpb == DMUB_IPS_VPB_ENABLE_IPS1_AND_RCG) {
+ new_signals.bits.allow_dynamic_ips1 = 1;
+ } else if (dc->config.disable_ips_in_vpb == DMUB_IPS_VPB_ENABLE_ALL) {
+ new_signals.bits.allow_dynamic_ips1 = 1;
+ new_signals.bits.allow_dynamic_ips1_z8 = 1;
}
-
ips_driver->signals = new_signals;
dc_dmub_srv->driver_signals = ips_driver->signals;
}
@@ -1318,7 +1435,7 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
{
struct dc_dmub_srv *dc_dmub_srv;
- uint32_t rcg_exit_count = 0, ips1_exit_count = 0, ips2_exit_count = 0;
+ uint32_t rcg_exit_count = 0, ips1_exit_count = 0, ips2_exit_count = 0, ips1z8_exit_count = 0;
if (dc->debug.dmcub_emulation)
return;
@@ -1338,31 +1455,34 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
rcg_exit_count = ips_fw->rcg_exit_count;
ips1_exit_count = ips_fw->ips1_exit_count;
ips2_exit_count = ips_fw->ips2_exit_count;
+ ips1z8_exit_count = ips_fw->ips1_z8ret_exit_count;
ips_driver->signals.all = 0;
dc_dmub_srv->driver_signals = ips_driver->signals;
DC_LOG_IPS(
- "%s (allow ips1=%u ips2=%u) (commit ips1=%u ips2=%u) (count rcg=%u ips1=%u ips2=%u)",
+ "%s (allow ips1=%u ips2=%u) (commit ips1=%u ips2=%u ips1z8=%u) (count rcg=%u ips1=%u ips2=%u ips1_z8=%u)",
__func__,
ips_driver->signals.bits.allow_ips1,
ips_driver->signals.bits.allow_ips2,
ips_fw->signals.bits.ips1_commit,
ips_fw->signals.bits.ips2_commit,
+ ips_fw->signals.bits.ips1z8_commit,
ips_fw->rcg_entry_count,
ips_fw->ips1_entry_count,
- ips_fw->ips2_entry_count);
+ ips_fw->ips2_entry_count,
+ ips_fw->ips1_z8ret_entry_count);
/* Note: register access has technically not resumed for DCN here, but we
* need to be message PMFW through our standard register interface.
*/
dc_dmub_srv->needs_idle_wake = false;
- if ((prev_driver_signals.bits.allow_ips2 || prev_driver_signals.all == 0) &&
+ if (!dc->caps.ips_v2_support && ((prev_driver_signals.bits.allow_ips2 || prev_driver_signals.all == 0) &&
(!dc->debug.optimize_ips_handshake ||
- ips_fw->signals.bits.ips2_commit || !ips_fw->signals.bits.in_idle)) {
+ ips_fw->signals.bits.ips2_commit || !ips_fw->signals.bits.in_idle))) {
DC_LOG_IPS(
- "wait IPS2 eval (ips1_commit=%u ips2_commit=%u)",
+ "wait IPS2 eval (ips1_commit=%u ips2_commit=%u )",
ips_fw->signals.bits.ips1_commit,
ips_fw->signals.bits.ips2_commit);
@@ -1422,28 +1542,31 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
dc_dmub_srv_notify_idle(dc, false);
if (prev_driver_signals.bits.allow_ips1 || prev_driver_signals.all == 0) {
DC_LOG_IPS(
- "wait for IPS1 commit clear (ips1_commit=%u ips2_commit=%u)",
+ "wait for IPS1 commit clear (ips1_commit=%u ips2_commit=%u ips1z8=%u)",
ips_fw->signals.bits.ips1_commit,
- ips_fw->signals.bits.ips2_commit);
+ ips_fw->signals.bits.ips2_commit,
+ ips_fw->signals.bits.ips1z8_commit);
while (ips_fw->signals.bits.ips1_commit)
udelay(1);
DC_LOG_IPS(
- "wait for IPS1 commit clear done (ips1_commit=%u ips2_commit=%u)",
+ "wait for IPS1 commit clear done (ips1_commit=%u ips2_commit=%u ips1z8=%u)",
ips_fw->signals.bits.ips1_commit,
- ips_fw->signals.bits.ips2_commit);
+ ips_fw->signals.bits.ips2_commit,
+ ips_fw->signals.bits.ips1z8_commit);
}
}
if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true))
ASSERT(0);
- DC_LOG_IPS("%s exit (count rcg=%u ips1=%u ips2=%u)",
+ DC_LOG_IPS("%s exit (count rcg=%u ips1=%u ips2=%u ips1z8=%u)",
__func__,
rcg_exit_count,
ips1_exit_count,
- ips2_exit_count);
+ ips2_exit_count,
+ ips1z8_exit_count);
}
void dc_dmub_srv_set_power_state(struct dc_dmub_srv *dc_dmub_srv, enum dc_acpi_cm_power_state power_state)
@@ -1656,7 +1779,7 @@ bool dc_wake_and_execute_gpint(const struct dc_context *ctx, enum dmub_gpint_com
return result;
}
-void dc_dmub_srv_fams2_update_config(struct dc *dc,
+static void dc_dmub_srv_rb_based_fams2_update_config(struct dc *dc,
struct dc_state *context,
bool enable)
{
@@ -1722,6 +1845,63 @@ void dc_dmub_srv_fams2_update_config(struct dc *dc,
dm_execute_dmub_cmd_list(dc->ctx, num_cmds, cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
+static void dc_dmub_srv_ib_based_fams2_update_config(struct dc *dc,
+ struct dc_state *context,
+ bool enable)
+{
+ struct dmub_fams2_config_v2 *config = (struct dmub_fams2_config_v2 *)dc->ctx->dmub_srv->dmub->ib_mem_gart.cpu_addr;
+ union dmub_rb_cmd cmd;
+ uint32_t i;
+
+ memset(config, 0, sizeof(*config));
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.ib_fams2_config.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
+ cmd.ib_fams2_config.header.sub_type = DMUB_CMD__FAMS2_IB_CONFIG;
+
+ cmd.ib_fams2_config.ib_data.src.quad_part = dc->ctx->dmub_srv->dmub->ib_mem_gart.gpu_addr;
+ cmd.ib_fams2_config.ib_data.size = sizeof(*config);
+
+ if (enable && context->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable) {
+ /* copy static feature configuration overrides */
+ config->global.features.bits.enable_stall_recovery = dc->debug.fams2_config.bits.enable_stall_recovery;
+ config->global.features.bits.enable_offload_flip = dc->debug.fams2_config.bits.enable_offload_flip;
+ config->global.features.bits.enable_debug = dc->debug.fams2_config.bits.enable_debug;
+
+ /* send global configuration parameters */
+ memcpy(&config->global, &context->bw_ctx.bw.dcn.fams2_global_config,
+ sizeof(struct dmub_cmd_fams2_global_config));
+
+ /* construct per-stream configs */
+ for (i = 0; i < context->bw_ctx.bw.dcn.fams2_global_config.num_streams; i++) {
+ /* copy stream static base state */
+ memcpy(&config->stream_v1[i].base,
+ &context->bw_ctx.bw.dcn.fams2_stream_base_params[i],
+ sizeof(config->stream_v1[i].base));
+
+ /* copy stream static sub-state */
+ memcpy(&config->stream_v1[i].sub_state,
+ &context->bw_ctx.bw.dcn.fams2_stream_sub_params_v2[i],
+ sizeof(config->stream_v1[i].sub_state));
+ }
+ }
+
+ config->global.features.bits.enable_visual_confirm = dc->debug.visual_confirm == VISUAL_CONFIRM_FAMS2;
+ config->global.features.bits.enable = enable;
+
+ dm_execute_dmub_cmd_list(dc->ctx, 1, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+}
+
+void dc_dmub_srv_fams2_update_config(struct dc *dc,
+ struct dc_state *context,
+ bool enable)
+{
+ if (dc->debug.fams_version.major == 2)
+ dc_dmub_srv_rb_based_fams2_update_config(dc, context, enable);
+ if (dc->debug.fams_version.major == 3)
+ dc_dmub_srv_ib_based_fams2_update_config(dc, context, enable);
+}
+
void dc_dmub_srv_fams2_drr_update(struct dc *dc,
uint32_t tg_inst,
uint32_t vtotal_min,
@@ -1847,83 +2027,344 @@ void dc_dmub_srv_fams2_passthrough_flip(
}
}
-bool dc_dmub_srv_ips_residency_cntl(struct dc_dmub_srv *dc_dmub_srv, bool start_measurement)
+
+bool dc_dmub_srv_ips_residency_cntl(const struct dc_context *ctx, uint8_t panel_inst, bool start_measurement)
+{
+ union dmub_rb_cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.ips_residency_cntl.header.type = DMUB_CMD__IPS;
+ cmd.ips_residency_cntl.header.sub_type = DMUB_CMD__IPS_RESIDENCY_CNTL;
+ cmd.ips_residency_cntl.header.payload_bytes = sizeof(struct dmub_cmd_ips_residency_cntl_data);
+
+ // only panel_inst=0 is supported at the moment
+ cmd.ips_residency_cntl.cntl_data.panel_inst = panel_inst;
+ cmd.ips_residency_cntl.cntl_data.start_measurement = start_measurement;
+
+ if (!dc_wake_and_execute_dmub_cmd(ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
+ return false;
+
+ return true;
+}
+
+bool dc_dmub_srv_ips_query_residency_info(const struct dc_context *ctx, uint8_t panel_inst, struct dmub_ips_residency_info *driver_info,
+ enum ips_residency_mode ips_mode)
{
+ union dmub_rb_cmd cmd;
+ uint32_t bytes = sizeof(struct dmub_ips_residency_info);
+
+ dmub_flush_buffer_mem(&ctx->dmub_srv->dmub->scratch_mem_fb);
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.ips_query_residency_info.header.type = DMUB_CMD__IPS;
+ cmd.ips_query_residency_info.header.sub_type = DMUB_CMD__IPS_QUERY_RESIDENCY_INFO;
+ cmd.ips_query_residency_info.header.payload_bytes = sizeof(struct dmub_cmd_ips_query_residency_info_data);
+
+ cmd.ips_query_residency_info.info_data.dest.quad_part = ctx->dmub_srv->dmub->scratch_mem_fb.gpu_addr;
+ cmd.ips_query_residency_info.info_data.size = bytes;
+ cmd.ips_query_residency_info.info_data.panel_inst = panel_inst;
+ cmd.ips_query_residency_info.info_data.ips_mode = (uint32_t)ips_mode;
+
+ if (!dc_wake_and_execute_dmub_cmd(ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) ||
+ cmd.ips_query_residency_info.header.ret_status == 0)
+ return false;
+
+ // copy the result to the output since ret_status != 0 means the command returned data
+ memcpy(driver_info, ctx->dmub_srv->dmub->scratch_mem_fb.cpu_addr, bytes);
+
+ return true;
+}
+
+bool dmub_lsdma_init(struct dc_dmub_srv *dc_dmub_srv)
+{
+ struct dc_context *dc_ctx = dc_dmub_srv->ctx;
+ union dmub_rb_cmd cmd;
+ enum dm_dmub_wait_type wait_type;
+ struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data;
bool result;
- if (!dc_dmub_srv || !dc_dmub_srv->dmub)
+ if (!dc_dmub_srv->dmub->feature_caps.lsdma_support_in_dmu)
return false;
- result = dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__IPS_RESIDENCY,
- start_measurement, NULL, DM_DMUB_WAIT_TYPE_WAIT);
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.cmd_common.header.type = DMUB_CMD__LSDMA;
+ cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_INIT_CONFIG;
+ wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT;
+
+ lsdma_data->u.init_data.gpu_addr_base.quad_part = dc_ctx->dmub_srv->dmub->lsdma_rb_fb.gpu_addr;
+ lsdma_data->u.init_data.ring_size = dc_ctx->dmub_srv->dmub->lsdma_rb_fb.size;
+
+ result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type);
+
+ if (!result)
+ DC_ERROR("LSDMA Init failed in DMUB");
return result;
}
-void dc_dmub_srv_ips_query_residency_info(struct dc_dmub_srv *dc_dmub_srv, struct ips_residency_info *output)
+bool dmub_lsdma_send_linear_copy_command(
+ struct dc_dmub_srv *dc_dmub_srv,
+ uint64_t src_addr,
+ uint64_t dst_addr,
+ uint32_t count
+)
{
- uint32_t i;
- enum dmub_gpint_command command_code;
+ struct dc_context *dc_ctx = dc_dmub_srv->ctx;
+ union dmub_rb_cmd cmd;
+ enum dm_dmub_wait_type wait_type;
+ struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data;
+ bool result;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.cmd_common.header.type = DMUB_CMD__LSDMA;
+ cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_LINEAR_COPY;
+ wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT;
+
+ lsdma_data->u.linear_copy_data.count = count - 1; // LSDMA controller expects bytes to copy -1
+ lsdma_data->u.linear_copy_data.src_lo = src_addr & 0xFFFFFFFF;
+ lsdma_data->u.linear_copy_data.src_hi = (src_addr >> 32) & 0xFFFFFFFF;
+ lsdma_data->u.linear_copy_data.dst_lo = dst_addr & 0xFFFFFFFF;
+ lsdma_data->u.linear_copy_data.dst_hi = (dst_addr >> 32) & 0xFFFFFFFF;
+
+ result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type);
+
+ if (!result)
+ DC_ERROR("LSDMA Linear Copy failed in DMUB");
+
+ return result;
+}
+
+bool dmub_lsdma_send_linear_sub_window_copy_command(
+ struct dc_dmub_srv *dc_dmub_srv,
+ struct lsdma_linear_sub_window_copy_params copy_data
+)
+{
+ struct dc_context *dc_ctx = dc_dmub_srv->ctx;
+ union dmub_rb_cmd cmd;
+ enum dm_dmub_wait_type wait_type;
+ struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data;
+ bool result;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.cmd_common.header.type = DMUB_CMD__LSDMA;
+ cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_LINEAR_SUB_WINDOW_COPY;
+ wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT;
+
+ lsdma_data->u.linear_sub_window_copy_data.tmz = copy_data.tmz;
+ lsdma_data->u.linear_sub_window_copy_data.element_size = copy_data.element_size;
+ lsdma_data->u.linear_sub_window_copy_data.src_lo = copy_data.src_lo;
+ lsdma_data->u.linear_sub_window_copy_data.src_hi = copy_data.src_hi;
+ lsdma_data->u.linear_sub_window_copy_data.src_x = copy_data.src_x;
+ lsdma_data->u.linear_sub_window_copy_data.src_y = copy_data.src_y;
+ lsdma_data->u.linear_sub_window_copy_data.src_pitch = copy_data.src_pitch;
+ lsdma_data->u.linear_sub_window_copy_data.src_slice_pitch = copy_data.src_slice_pitch;
+ lsdma_data->u.linear_sub_window_copy_data.dst_lo = copy_data.dst_lo;
+ lsdma_data->u.linear_sub_window_copy_data.dst_hi = copy_data.dst_hi;
+ lsdma_data->u.linear_sub_window_copy_data.dst_x = copy_data.dst_x;
+ lsdma_data->u.linear_sub_window_copy_data.dst_y = copy_data.dst_y;
+ lsdma_data->u.linear_sub_window_copy_data.dst_pitch = copy_data.dst_pitch;
+ lsdma_data->u.linear_sub_window_copy_data.dst_slice_pitch = copy_data.dst_slice_pitch;
+ lsdma_data->u.linear_sub_window_copy_data.rect_x = copy_data.rect_x;
+ lsdma_data->u.linear_sub_window_copy_data.rect_y = copy_data.rect_y;
+ lsdma_data->u.linear_sub_window_copy_data.src_cache_policy = copy_data.src_cache_policy;
+ lsdma_data->u.linear_sub_window_copy_data.dst_cache_policy = copy_data.dst_cache_policy;
+
+ result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type);
+
+ if (!result)
+ DC_ERROR("LSDMA Linear Sub Window Copy failed in DMUB");
+
+ return result;
+}
+
+bool dmub_lsdma_send_tiled_to_tiled_copy_command(
+ struct dc_dmub_srv *dc_dmub_srv,
+ struct lsdma_send_tiled_to_tiled_copy_command_params params
+)
+{
+ struct dc_context *dc_ctx = dc_dmub_srv->ctx;
+ union dmub_rb_cmd cmd;
+ enum dm_dmub_wait_type wait_type;
+ struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data;
+ bool result;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.cmd_common.header.type = DMUB_CMD__LSDMA;
+ cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_TILED_TO_TILED_COPY;
+ wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT;
+
+ lsdma_data->u.tiled_copy_data.src_addr_lo = params.src_addr & 0xFFFFFFFF;
+ lsdma_data->u.tiled_copy_data.src_addr_hi = (params.src_addr >> 32) & 0xFFFFFFFF;
+ lsdma_data->u.tiled_copy_data.dst_addr_lo = params.dst_addr & 0xFFFFFFFF;
+ lsdma_data->u.tiled_copy_data.dst_addr_hi = (params.dst_addr >> 32) & 0xFFFFFFFF;
+ lsdma_data->u.tiled_copy_data.src_x = params.src_x;
+ lsdma_data->u.tiled_copy_data.src_y = params.src_y;
+ lsdma_data->u.tiled_copy_data.dst_x = params.dst_x;
+ lsdma_data->u.tiled_copy_data.dst_y = params.dst_y;
+ lsdma_data->u.tiled_copy_data.src_width = params.src_width;
+ lsdma_data->u.tiled_copy_data.dst_width = params.dst_width;
+ lsdma_data->u.tiled_copy_data.src_swizzle_mode = params.swizzle_mode;
+ lsdma_data->u.tiled_copy_data.dst_swizzle_mode = params.swizzle_mode;
+ lsdma_data->u.tiled_copy_data.src_element_size = params.element_size;
+ lsdma_data->u.tiled_copy_data.dst_element_size = params.element_size;
+ lsdma_data->u.tiled_copy_data.rect_x = params.rect_x;
+ lsdma_data->u.tiled_copy_data.rect_y = params.rect_y;
+ lsdma_data->u.tiled_copy_data.dcc = params.dcc;
+ lsdma_data->u.tiled_copy_data.tmz = params.tmz;
+ lsdma_data->u.tiled_copy_data.read_compress = params.read_compress;
+ lsdma_data->u.tiled_copy_data.write_compress = params.write_compress;
+ lsdma_data->u.tiled_copy_data.src_height = params.src_height;
+ lsdma_data->u.tiled_copy_data.dst_height = params.dst_height;
+ lsdma_data->u.tiled_copy_data.data_format = params.data_format;
+ lsdma_data->u.tiled_copy_data.max_com = params.max_com;
+ lsdma_data->u.tiled_copy_data.max_uncom = params.max_uncom;
+
+ result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type);
+
+ if (!result)
+ DC_ERROR("LSDMA Tiled to Tiled Copy failed in DMUB");
+
+ return result;
+}
+
+bool dmub_lsdma_send_pio_copy_command(
+ struct dc_dmub_srv *dc_dmub_srv,
+ uint64_t src_addr,
+ uint64_t dst_addr,
+ uint32_t byte_count,
+ uint32_t overlap_disable
+)
+{
+ struct dc_context *dc_ctx = dc_dmub_srv->ctx;
+ union dmub_rb_cmd cmd;
+ enum dm_dmub_wait_type wait_type;
+ struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data;
+ bool result;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.cmd_common.header.type = DMUB_CMD__LSDMA;
+ cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_PIO_COPY;
+ wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT;
+
+ lsdma_data->u.pio_copy_data.packet.fields.byte_count = byte_count;
+ lsdma_data->u.pio_copy_data.packet.fields.overlap_disable = overlap_disable;
+ lsdma_data->u.pio_copy_data.src_lo = src_addr & 0xFFFFFFFF;
+ lsdma_data->u.pio_copy_data.src_hi = (src_addr >> 32) & 0xFFFFFFFF;
+ lsdma_data->u.pio_copy_data.dst_lo = dst_addr & 0xFFFFFFFF;
+ lsdma_data->u.pio_copy_data.dst_hi = (dst_addr >> 32) & 0xFFFFFFFF;
+
+ result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type);
+
+ if (!result)
+ DC_ERROR("LSDMA PIO Copy failed in DMUB");
+
+ return result;
+}
+
+bool dmub_lsdma_send_pio_constfill_command(
+ struct dc_dmub_srv *dc_dmub_srv,
+ uint64_t dst_addr,
+ uint32_t byte_count,
+ uint32_t data
+)
+{
+ struct dc_context *dc_ctx = dc_dmub_srv->ctx;
+ union dmub_rb_cmd cmd;
+ enum dm_dmub_wait_type wait_type;
+ struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data;
+ bool result;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.cmd_common.header.type = DMUB_CMD__LSDMA;
+ cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_PIO_CONSTFILL;
+ wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT;
+
+ lsdma_data->u.pio_constfill_data.packet.fields.constant_fill = 1;
+ lsdma_data->u.pio_constfill_data.packet.fields.byte_count = byte_count;
+ lsdma_data->u.pio_constfill_data.dst_lo = dst_addr & 0xFFFFFFFF;
+ lsdma_data->u.pio_constfill_data.dst_hi = (dst_addr >> 32) & 0xFFFFFFFF;
+ lsdma_data->u.pio_constfill_data.data = data;
+
+ result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type);
+
+ if (!result)
+ DC_ERROR("LSDMA PIO Constfill failed in DMUB");
+
+ return result;
+}
+
+bool dmub_lsdma_send_poll_reg_write_command(struct dc_dmub_srv *dc_dmub_srv, uint32_t reg_addr, uint32_t reg_data)
+{
+ struct dc_context *dc_ctx = dc_dmub_srv->ctx;
+ union dmub_rb_cmd cmd;
+ enum dm_dmub_wait_type wait_type;
+ struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data;
+ bool result;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.cmd_common.header.type = DMUB_CMD__LSDMA;
+ cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_POLL_REG_WRITE;
+ wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT;
+
+ lsdma_data->u.reg_write_data.reg_addr = reg_addr;
+ lsdma_data->u.reg_write_data.reg_data = reg_data;
+
+ result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type);
+
+ if (!result)
+ DC_ERROR("LSDMA Poll Reg failed in DMUB");
+
+ return result;
+}
+
+bool dc_dmub_srv_is_cursor_offload_enabled(const struct dc *dc)
+{
+ return dc->ctx->dmub_srv && dc->ctx->dmub_srv->cursor_offload_enabled;
+}
+
+void dc_dmub_srv_release_hw(const struct dc *dc)
+{
+ struct dc_dmub_srv *dc_dmub_srv = dc->ctx->dmub_srv;
+ union dmub_rb_cmd cmd = {0};
if (!dc_dmub_srv || !dc_dmub_srv->dmub)
return;
- switch (output->ips_mode) {
- case DMUB_IPS_MODE_IPS1_MAX:
- command_code = DMUB_GPINT__GET_IPS1_HISTOGRAM_COUNTER;
- break;
- case DMUB_IPS_MODE_IPS2:
- command_code = DMUB_GPINT__GET_IPS2_HISTOGRAM_COUNTER;
- break;
- case DMUB_IPS_MODE_IPS1_RCG:
- command_code = DMUB_GPINT__GET_IPS1_RCG_HISTOGRAM_COUNTER;
- break;
- case DMUB_IPS_MODE_IPS1_ONO2_ON:
- command_code = DMUB_GPINT__GET_IPS1_ONO2_ON_HISTOGRAM_COUNTER;
- break;
- default:
- command_code = DMUB_GPINT__INVALID_COMMAND;
- break;
- }
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.idle_opt_notify_idle.header.type = DMUB_CMD__IDLE_OPT;
+ cmd.idle_opt_notify_idle.header.sub_type = DMUB_CMD__IDLE_OPT_RELEASE_HW;
+ cmd.idle_opt_notify_idle.header.payload_bytes =
+ sizeof(cmd.idle_opt_notify_idle) -
+ sizeof(cmd.idle_opt_notify_idle.header);
+
+ dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+}
+
+void dc_dmub_srv_log_preos_dmcub_info(struct dc_dmub_srv *dc_dmub_srv)
+{
+ struct dmub_srv *dmub;
- if (command_code == DMUB_GPINT__INVALID_COMMAND)
+ if (!dc_dmub_srv || !dc_dmub_srv->dmub)
return;
- for (i = 0; i < GPINT_RETRY_NUM; i++) {
- // false could mean GPINT timeout, in which case we should retry
- if (dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_PERCENT,
- (uint16_t)(output->ips_mode), &output->residency_percent,
- DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
- break;
- udelay(100);
- }
+ dmub = dc_dmub_srv->dmub;
- if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_ENTRY_COUNTER,
- (uint16_t)(output->ips_mode),
- &output->entry_counter, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
- output->entry_counter = 0;
-
- if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_DURATION_US_LO,
- (uint16_t)(output->ips_mode),
- &output->total_active_time_us[0], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
- output->total_active_time_us[0] = 0;
- if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_DURATION_US_HI,
- (uint16_t)(output->ips_mode),
- &output->total_active_time_us[1], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
- output->total_active_time_us[1] = 0;
-
- if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_INACTIVE_RESIDENCY_DURATION_US_LO,
- (uint16_t)(output->ips_mode),
- &output->total_inactive_time_us[0], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
- output->total_inactive_time_us[0] = 0;
- if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_INACTIVE_RESIDENCY_DURATION_US_HI,
- (uint16_t)(output->ips_mode),
- &output->total_inactive_time_us[1], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
- output->total_inactive_time_us[1] = 0;
-
- // NUM_IPS_HISTOGRAM_BUCKETS = 16
- for (i = 0; i < 16; i++)
- if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, command_code, i, &output->histogram[i],
- DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
- output->histogram[i] = 0;
+ if (dmub_srv_get_preos_info(dmub)) {
+ DC_LOG_DEBUG("%s: PreOS DMCUB Info", __func__);
+ DC_LOG_DEBUG("fw_version : 0x%08x", dmub->preos_info.fw_version);
+ DC_LOG_DEBUG("boot_options : 0x%08x", dmub->preos_info.boot_options);
+ DC_LOG_DEBUG("boot_status : 0x%08x", dmub->preos_info.boot_status);
+ DC_LOG_DEBUG("trace_buffer_phy_addr : 0x%016llx", dmub->preos_info.trace_buffer_phy_addr);
+ DC_LOG_DEBUG("trace_buffer_size_bytes : 0x%08x", dmub->preos_info.trace_buffer_size);
+ DC_LOG_DEBUG("fb_base : 0x%016llx", dmub->preos_info.fb_base);
+ DC_LOG_DEBUG("fb_offset : 0x%016llx", dmub->preos_info.fb_offset);
+ }
}
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
index ada5c2fb2db3..72e0a41f39f0 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
@@ -56,6 +56,7 @@ struct dc_dmub_srv {
union dmub_shared_state_ips_driver_signals driver_signals;
bool idle_allowed;
bool needs_idle_wake;
+ bool cursor_offload_enabled;
};
bool dc_dmub_srv_wait_for_pending(struct dc_dmub_srv *dc_dmub_srv);
@@ -210,6 +211,94 @@ void dc_dmub_srv_fams2_passthrough_flip(
struct dc_surface_update *srf_updates,
int surface_count);
+bool dmub_lsdma_init(struct dc_dmub_srv *dc_dmub_srv);
+bool dmub_lsdma_send_linear_copy_command(
+ struct dc_dmub_srv *dc_dmub_srv,
+ uint64_t src_addr,
+ uint64_t dst_addr,
+ uint32_t count);
+
+struct lsdma_linear_sub_window_copy_params {
+ uint32_t src_lo;
+ uint32_t src_hi;
+
+ uint32_t dst_lo;
+ uint32_t dst_hi;
+
+ uint32_t src_x : 16;
+ uint32_t src_y : 16;
+
+ uint32_t dst_x : 16;
+ uint32_t dst_y : 16;
+
+ uint32_t rect_x : 16;
+ uint32_t rect_y : 16;
+
+ uint32_t src_pitch : 16;
+ uint32_t dst_pitch : 16;
+
+ uint32_t src_slice_pitch;
+ uint32_t dst_slice_pitch;
+
+ uint32_t tmz : 1;
+ uint32_t element_size : 3;
+ uint32_t src_cache_policy : 3;
+ uint32_t dst_cache_policy : 3;
+ uint32_t padding : 22;
+};
+
+bool dmub_lsdma_send_linear_sub_window_copy_command(
+ struct dc_dmub_srv *dc_dmub_srv,
+ struct lsdma_linear_sub_window_copy_params copy_data
+);
+bool dmub_lsdma_send_pio_copy_command(
+ struct dc_dmub_srv *dc_dmub_srv,
+ uint64_t src_addr,
+ uint64_t dst_addr,
+ uint32_t byte_count,
+ uint32_t overlap_disable);
+bool dmub_lsdma_send_pio_constfill_command(
+ struct dc_dmub_srv *dc_dmub_srv,
+ uint64_t dst_addr,
+ uint32_t byte_count,
+ uint32_t data);
+
+struct lsdma_send_tiled_to_tiled_copy_command_params {
+ uint64_t src_addr;
+ uint64_t dst_addr;
+
+ uint32_t src_x : 16;
+ uint32_t src_y : 16;
+
+ uint32_t dst_x : 16;
+ uint32_t dst_y : 16;
+
+ uint32_t src_width : 16;
+ uint32_t dst_width : 16;
+
+ uint32_t rect_x : 16;
+ uint32_t rect_y : 16;
+
+ uint32_t src_height : 16;
+ uint32_t dst_height : 16;
+
+ uint32_t data_format : 6;
+ uint32_t swizzle_mode : 5;
+ uint32_t element_size : 3;
+ uint32_t dcc : 1;
+ uint32_t tmz : 1;
+ uint32_t read_compress : 2;
+ uint32_t write_compress : 2;
+ uint32_t max_com : 2;
+ uint32_t max_uncom : 1;
+ uint32_t padding : 9;
+};
+
+bool dmub_lsdma_send_tiled_to_tiled_copy_command(
+ struct dc_dmub_srv *dc_dmub_srv,
+ struct lsdma_send_tiled_to_tiled_copy_command_params params);
+bool dmub_lsdma_send_poll_reg_write_command(struct dc_dmub_srv *dc_dmub_srv, uint32_t reg_addr, uint32_t reg_data);
+
/**
* struct ips_residency_info - struct containing info from dmub_ips_residency_stats
*
@@ -223,7 +312,7 @@ void dc_dmub_srv_fams2_passthrough_flip(
* @histogram: Histogram of given IPS state durations - bucket definitions in dmub_ips.c
*/
struct ips_residency_info {
- enum dmub_ips_mode ips_mode;
+ enum ips_residency_mode ips_mode;
unsigned int residency_percent;
unsigned int entry_counter;
unsigned int total_active_time_us[2];
@@ -231,21 +320,58 @@ struct ips_residency_info {
unsigned int histogram[16];
};
+bool dc_dmub_srv_ips_residency_cntl(const struct dc_context *ctx, uint8_t panel_inst, bool start_measurement);
+
+bool dc_dmub_srv_ips_query_residency_info(const struct dc_context *ctx, uint8_t panel_inst,
+ struct dmub_ips_residency_info *driver_info,
+ enum ips_residency_mode ips_mode);
+
/**
- * bool dc_dmub_srv_ips_residency_cntl() - Controls IPS residency measurement status
+ * dc_dmub_srv_cursor_offload_init() - Enables or disables cursor offloading for a stream.
*
- * @dc_dmub_srv: The DC DMUB service pointer
- * @start_measurement: Describes whether to start or stop measurement
+ * @dc: pointer to DC object
+ */
+void dc_dmub_srv_cursor_offload_init(struct dc *dc);
+
+/**
+ * dc_dmub_srv_control_cursor_offload() - Enables or disables cursor offloading for a stream.
+ *
+ * @dc: pointer to DC object
+ * @context: the DC context to reference for pipe allocations
+ * @stream: the stream to control
+ * @enable: true to enable cursor offload, false to disable
+ */
+void dc_dmub_srv_control_cursor_offload(struct dc *dc, struct dc_state *context,
+ const struct dc_stream_state *stream, bool enable);
+
+/**
+ * dc_dmub_srv_program_cursor_now() - Requests immediate cursor programming for a given pipe.
+ *
+ * @dc: pointer to DC object
+ * @pipe: top-most pipe for a stream.
+ */
+void dc_dmub_srv_program_cursor_now(struct dc *dc, const struct pipe_ctx *pipe);
+
+/**
+ * dc_dmub_srv_is_cursor_offload_enabled() - Checks if cursor offload is supported.
+ *
+ * @dc: pointer to DC object
+ *
+ * Return: true if cursor offload is supported, false otherwise
+ */
+bool dc_dmub_srv_is_cursor_offload_enabled(const struct dc *dc);
+
+/**
+ * dc_dmub_srv_release_hw() - Notifies DMUB service that HW access is no longer required.
*
- * Return: true if GPINT was sent successfully, false otherwise
+ * @dc - pointer to DC object
*/
-bool dc_dmub_srv_ips_residency_cntl(struct dc_dmub_srv *dc_dmub_srv, bool start_measurement);
+void dc_dmub_srv_release_hw(const struct dc *dc);
/**
- * bool dc_dmub_srv_ips_query_residency_info() - Queries DMCUB for residency info
+ * dc_dmub_srv_log_preos_dmcub_info() - Logs preos dmcub fw info.
*
- * @dc_dmub_srv: The DC DMUB service pointer
- * @output: Output struct to copy the the residency info to
+ * @dc - pointer to DC object
*/
-void dc_dmub_srv_ips_query_residency_info(struct dc_dmub_srv *dc_dmub_srv, struct ips_residency_info *output);
+void dc_dmub_srv_log_preos_dmcub_info(struct dc_dmub_srv *dc_dmub_srv);
#endif /* _DMUB_DC_SRV_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index 0bad8304ccf6..79e1696def63 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -162,6 +162,11 @@ struct dc_link_settings {
struct dc_tunnel_settings {
bool should_enable_dp_tunneling;
bool should_use_dp_bw_allocation;
+ uint8_t cm_id;
+ uint8_t group_id;
+ uint32_t bw_granularity;
+ uint32_t estimated_bw;
+ uint32_t allocated_bw;
};
union dc_dp_ffe_preset {
@@ -957,11 +962,21 @@ union usb4_driver_bw_cap {
uint8_t raw;
};
+/* DPCD[0xE0021] DP_IN_ADAPTER_TUNNEL_INFORMATION register. */
+union dpia_tunnel_info {
+ struct {
+ uint8_t group_id :3;
+ uint8_t rsvd :5;
+ } bits;
+ uint8_t raw;
+};
+
/* DP Tunneling over USB4 */
struct dpcd_usb4_dp_tunneling_info {
union dp_tun_cap_support dp_tun_cap;
union dpia_info dpia_info;
union usb4_driver_bw_cap driver_bw_cap;
+ union dpia_tunnel_info dpia_tunnel_info;
uint8_t usb4_driver_id;
uint8_t usb4_topology_id[DPCD_USB4_TOPOLOGY_ID_LEN];
};
@@ -1006,7 +1021,8 @@ union dp_128b_132b_supported_lttpr_link_rates {
union dp_alpm_lttpr_cap {
struct {
uint8_t AUX_LESS_ALPM_SUPPORTED :1;
- uint8_t RESERVED :7;
+ uint8_t ASSR_SUPPORTED :1;
+ uint8_t RESERVED :6;
} bits;
uint8_t raw;
};
@@ -1104,10 +1120,11 @@ union dp_128b_132b_training_aux_rd_interval {
union edp_alpm_caps {
struct {
- uint8_t AUX_WAKE_ALPM_CAP :1;
- uint8_t PM_STATE_2A_SUPPORT :1;
- uint8_t AUX_LESS_ALPM_CAP :1;
- uint8_t RESERVED :5;
+ uint8_t AUX_WAKE_ALPM_CAP :1;
+ uint8_t PM_STATE_2A_SUPPORT :1;
+ uint8_t AUX_LESS_ALPM_CAP :1;
+ uint8_t AUX_LESS_ALPM_ML_PHY_SLEEP_STATUS_SUPPORTED :1;
+ uint8_t RESERVED :4;
} bits;
uint8_t raw;
};
@@ -1140,6 +1157,16 @@ struct dprx_states {
bool cable_id_written;
};
+union dpcd_panel_replay_capability_supported {
+ struct {
+ unsigned char PANEL_REPLAY_SUPPORT :1;
+ unsigned char SELECTIVE_UPDATE_SUPPORT :1;
+ unsigned char EARLY_TRANSPORT_SUPPORT :1;
+ unsigned char RESERVED :5;
+ } bits;
+ unsigned char raw;
+};
+
enum dpcd_downstream_port_max_bpc {
DOWN_STREAM_MAX_8BPC = 0,
DOWN_STREAM_MAX_10BPC,
@@ -1172,8 +1199,8 @@ struct dc_lttpr_caps {
union dp_128b_132b_supported_lttpr_link_rates supported_128b_132b_rates;
union dp_alpm_lttpr_cap alpm;
uint8_t aux_rd_interval[MAX_REPEATER_CNT - 1];
- uint8_t lttpr_ieee_oui[3];
- uint8_t lttpr_device_id[6];
+ uint8_t lttpr_ieee_oui[3]; // Always read from closest LTTPR to host
+ uint8_t lttpr_device_id[6]; // Always read from closest LTTPR to host
};
struct dc_dongle_dfp_cap_ext {
@@ -1263,10 +1290,12 @@ struct dpcd_caps {
struct edp_psr_info psr_info;
struct replay_info pr_info;
+ union dpcd_panel_replay_capability_supported pr_caps_supported;
uint16_t edp_oled_emission_rate;
union dp_receive_port0_cap receive_port0_cap;
/* Indicates the number of SST links supported by MSO (Multi-Stream Output) */
uint8_t mso_cap_sst_links_supported;
+ uint8_t dp_edp_general_cap_2;
};
union dpcd_sink_ext_caps {
@@ -1328,11 +1357,38 @@ union dpcd_replay_configuration {
unsigned char raw;
};
+union panel_replay_enable_and_configuration_1 {
+ struct {
+ unsigned char PANEL_REPLAY_ENABLE :1;
+ unsigned char PANEL_REPLAY_CRC_ENABLE :1;
+ unsigned char IRQ_HPD_ASSDP_MISSING :1;
+ unsigned char IRQ_HPD_VSCSDP_UNCORRECTABLE_ERROR :1;
+ unsigned char IRQ_HPD_RFB_ERROR :1;
+ unsigned char IRQ_HPD_ACTIVE_FRAME_CRC_ERROR :1;
+ unsigned char PANEL_REPLAY_SELECTIVE_UPDATE_ENABLE :1;
+ unsigned char PANEL_REPLAY_EARLY_TRANSPORT_ENABLE :1;
+ } bits;
+ unsigned char raw;
+};
+
+union panel_replay_enable_and_configuration_2 {
+ struct {
+ unsigned char SINK_REFRESH_RATE_UNLOCK_GRANTED :1;
+ unsigned char RESERVED :1;
+ unsigned char SU_Y_GRANULARITY_EXT_VALUE_ENABLED :1;
+ unsigned char SU_Y_GRANULARITY_EXT_VALUE :4;
+ unsigned char SU_REGION_SCAN_LINE_CAPTURE_INDICATION :1;
+ } bits;
+ unsigned char raw;
+};
+
union dpcd_alpm_configuration {
struct {
unsigned char ENABLE : 1;
unsigned char IRQ_HPD_ENABLE : 1;
- unsigned char RESERVED : 6;
+ unsigned char ALPM_MODE_SEL : 1;
+ unsigned char ACDS_PERIOD_DURATION : 1;
+ unsigned char RESERVED : 4;
} bits;
unsigned char raw;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c
index 7217de258851..5a365bd19933 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c
@@ -732,7 +732,7 @@ char *dce_version_to_string(const int version)
case DCN_VERSION_3_03:
return "DCN 3.0.3";
case DCN_VERSION_3_1:
- return "DCN 3.1";
+ return "DCN 3.1.2";
case DCN_VERSION_3_14:
return "DCN 3.1.4";
case DCN_VERSION_3_15:
@@ -755,3 +755,8 @@ char *dce_version_to_string(const int version)
return "Unknown";
}
}
+
+bool dc_supports_vrr(const enum dce_version v)
+{
+ return v >= DCE_VERSION_8_0;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index d562ddeca512..667852517246 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -68,7 +68,7 @@ enum dc_plane_addr_type {
struct dc_plane_address {
enum dc_plane_addr_type type;
- bool tmz_surface;
+ uint8_t tmz_surface;
union {
struct{
PHYSICAL_ADDRESS_LOC addr;
@@ -974,6 +974,7 @@ struct dc_crtc_timing {
uint32_t pix_clk_100hz;
uint32_t min_refresh_in_uhz;
+ uint32_t max_refresh_in_uhz;
uint32_t vic;
uint32_t hdmi_vic;
@@ -1103,7 +1104,8 @@ enum mpcc_gamut_remap_mode_select {
enum mpcc_gamut_remap_id {
MPCC_OGAM_GAMUT_REMAP,
MPCC_MCM_FIRST_GAMUT_REMAP,
- MPCC_MCM_SECOND_GAMUT_REMAP
+ MPCC_MCM_SECOND_GAMUT_REMAP,
+ MPCC_RMCM_GAMUT_REMAP,
};
enum cursor_matrix_mode {
diff --git a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c
index e3a8283b4098..37d1a79e8241 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c
@@ -128,7 +128,7 @@ void translate_SPL_in_params_from_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl
spl_in->odm_slice_index = resource_get_odm_slice_index(pipe_ctx);
// Make spl input basic out info output_size width point to stream h active
spl_in->basic_out.output_size.width =
- stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right + pipe_ctx->hblank_borrow;
+ stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right + pipe_ctx->dsc_padding_params.dsc_hactive_padding;
// Make spl input basic out info output_size height point to v active
spl_in->basic_out.output_size.height =
stream->timing.v_addressable + stream->timing.v_border_bottom + stream->timing.v_border_top;
@@ -147,6 +147,8 @@ void translate_SPL_in_params_from_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl
spl_in->prefer_easf = false;
else if (pipe_ctx->stream->ctx->dc->debug.force_easf == 2)
spl_in->disable_easf = true;
+ else if (pipe_ctx->stream->ctx->dc->debug.force_easf == 3)
+ spl_in->override_easf = true;
/* Translate adaptive sharpening preference */
unsigned int sharpness_setting = pipe_ctx->stream->ctx->dc->debug.force_sharpness;
unsigned int force_sharpness_level = pipe_ctx->stream->ctx->dc->debug.force_sharpness_level;
@@ -156,15 +158,16 @@ void translate_SPL_in_params_from_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl
spl_in->adaptive_sharpness.enable = true;
spl_in->adaptive_sharpness.sharpness_level = 0;
} else if (sharpness_setting == SHARPNESS_CUSTOM) {
- spl_in->adaptive_sharpness.sharpness_range.sdr_rgb_min = 0;
- spl_in->adaptive_sharpness.sharpness_range.sdr_rgb_max = 1750;
- spl_in->adaptive_sharpness.sharpness_range.sdr_rgb_mid = 750;
- spl_in->adaptive_sharpness.sharpness_range.sdr_yuv_min = 0;
- spl_in->adaptive_sharpness.sharpness_range.sdr_yuv_max = 3500;
- spl_in->adaptive_sharpness.sharpness_range.sdr_yuv_mid = 1500;
- spl_in->adaptive_sharpness.sharpness_range.hdr_rgb_min = 0;
- spl_in->adaptive_sharpness.sharpness_range.hdr_rgb_max = 2750;
- spl_in->adaptive_sharpness.sharpness_range.hdr_rgb_mid = 1500;
+ /* SAT: read harpness_range from dc_plane_state */
+ spl_in->adaptive_sharpness.sharpness_range.sdr_rgb_min = plane_state->sharpness_range.sdr_rgb_min;
+ spl_in->adaptive_sharpness.sharpness_range.sdr_rgb_max = plane_state->sharpness_range.sdr_rgb_max;
+ spl_in->adaptive_sharpness.sharpness_range.sdr_rgb_mid = plane_state->sharpness_range.sdr_rgb_mid;
+ spl_in->adaptive_sharpness.sharpness_range.sdr_yuv_min = plane_state->sharpness_range.sdr_yuv_min;
+ spl_in->adaptive_sharpness.sharpness_range.sdr_yuv_max = plane_state->sharpness_range.sdr_yuv_max;
+ spl_in->adaptive_sharpness.sharpness_range.sdr_yuv_mid = plane_state->sharpness_range.sdr_yuv_mid;
+ spl_in->adaptive_sharpness.sharpness_range.hdr_rgb_min = plane_state->sharpness_range.hdr_rgb_min;
+ spl_in->adaptive_sharpness.sharpness_range.hdr_rgb_max = plane_state->sharpness_range.hdr_rgb_max;
+ spl_in->adaptive_sharpness.sharpness_range.hdr_rgb_mid = plane_state->sharpness_range.hdr_rgb_mid;
if (force_sharpness_level > 0) {
if (force_sharpness_level > 10)
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index 341d2ffb64b1..321cfe92d799 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -203,6 +203,7 @@ struct dc_stream_state {
struct dc_info_packet hfvsif_infopacket;
struct dc_info_packet vtem_infopacket;
struct dc_info_packet adaptive_sync_infopacket;
+ struct dc_info_packet avi_infopacket;
uint8_t dsc_packed_pps[128];
struct rect src; /* composition area */
struct rect dst; /* stream addressable area */
@@ -335,6 +336,8 @@ struct dc_stream_update {
struct dc_info_packet *hfvsif_infopacket;
struct dc_info_packet *vtem_infopacket;
struct dc_info_packet *adaptive_sync_infopacket;
+ struct dc_info_packet *avi_infopacket;
+
bool *dpms_off;
bool integer_scaling_update;
bool *allow_freesync;
@@ -470,12 +473,11 @@ void dc_enable_stereo(
/* Triggers multi-stream synchronization. */
void dc_trigger_sync(struct dc *dc, struct dc_state *context);
-enum surface_update_type dc_check_update_surfaces_for_stream(
- struct dc *dc,
+struct surface_update_descriptor dc_check_update_surfaces_for_stream(
+ const struct dc_check_config *check_config,
struct dc_surface_update *updates,
int surface_count,
- struct dc_stream_update *stream_update,
- const struct dc_stream_status *stream_status);
+ struct dc_stream_update *stream_update);
/**
* Create a new default stream for the requested sink
@@ -489,8 +491,8 @@ void update_stream_signal(struct dc_stream_state *stream, struct dc_sink *sink);
void dc_stream_retain(struct dc_stream_state *dc_stream);
void dc_stream_release(struct dc_stream_state *dc_stream);
-struct dc_stream_status *dc_stream_get_status(
- struct dc_stream_state *dc_stream);
+struct dc_stream_status *dc_stream_get_status(struct dc_stream_state *dc_stream);
+const struct dc_stream_status *dc_stream_get_status_const(const struct dc_stream_state *dc_stream);
/*******************************************************************************
* Cursor interfaces - To manages the cursor within a stream
@@ -579,6 +581,17 @@ bool dc_stream_set_gamut_remap(struct dc *dc,
bool dc_stream_program_csc_matrix(struct dc *dc,
struct dc_stream_state *stream);
+struct dc_rmcm_3dlut *dc_stream_get_3dlut_for_stream(
+ const struct dc *dc,
+ const struct dc_stream_state *stream,
+ bool allocate_one);
+
+void dc_stream_release_3dlut_for_stream(
+ const struct dc *dc,
+ const struct dc_stream_state *stream);
+
+void dc_stream_init_rmcm_3dlut(struct dc *dc);
+
struct pipe_ctx *dc_stream_get_pipe_ctx(struct dc_stream_state *stream);
void dc_dmub_update_dirty_rect(struct dc *dc,
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index a4cd0eb39a3a..f46039f64203 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -175,6 +175,7 @@ struct dc_panel_patch {
unsigned int embedded_tiled_slave;
unsigned int disable_fams;
unsigned int skip_avmute;
+ unsigned int skip_audio_sab_check;
unsigned int mst_start_top_delay;
unsigned int remove_sink_ext_caps;
unsigned int disable_colorimetry;
@@ -184,6 +185,10 @@ struct dc_panel_patch {
unsigned int wait_after_dpcd_poweroff_ms;
};
+/**
+ * struct dc_edid_caps - Capabilities read from EDID.
+ * @analog: Whether the monitor is analog. Used by DVI-I handling.
+ */
struct dc_edid_caps {
/* sink identification */
uint16_t manufacturer_id;
@@ -211,6 +216,8 @@ struct dc_edid_caps {
bool edid_hdmi;
bool hdr_supported;
bool rr_capable;
+ bool scdc_present;
+ bool analog;
struct dc_panel_patch panel_patch;
};
@@ -263,6 +270,7 @@ enum dc_timing_source {
TIMING_SOURCE_EDID_4BYTE,
TIMING_SOURCE_EDID_CEA_DISPLAYID_VTDB,
TIMING_SOURCE_EDID_CEA_RID,
+ TIMING_SOURCE_EDID_DISPLAYID_TYPE5,
TIMING_SOURCE_VBIOS,
TIMING_SOURCE_CV,
TIMING_SOURCE_TV,
@@ -345,7 +353,8 @@ enum dc_connection_type {
dc_connection_none,
dc_connection_single,
dc_connection_mst_branch,
- dc_connection_sst_branch
+ dc_connection_sst_branch,
+ dc_connection_dac_load
};
struct dc_csc_adjustments {
@@ -561,6 +570,12 @@ struct dc_info_packet_128 {
uint8_t sb[128];
};
+struct dc_edid_read_policy {
+ uint32_t max_retry_count;
+ uint32_t delay_time_ms;
+ uint32_t ignore_checksum;
+};
+
#define DC_PLANE_UPDATE_TIMES_MAX 10
struct dc_plane_flip_time {
@@ -569,6 +584,12 @@ struct dc_plane_flip_time {
unsigned int prev_update_time_in_us;
};
+enum dc_alpm_mode {
+ DC_ALPM_AUXWAKE = 0,
+ DC_ALPM_AUXLESS = 1,
+ DC_ALPM_UNSUPPORTED = 0xF,
+};
+
enum dc_psr_state {
PSR_STATE0 = 0x0,
PSR_STATE1,
@@ -614,6 +635,7 @@ struct psr_config {
unsigned int line_time_in_us;
uint8_t rate_control_caps;
uint16_t dsc_slice_height;
+ bool os_request_force_ffu;
};
union dmcu_psr_level {
@@ -726,6 +748,7 @@ struct psr_context {
unsigned int line_time_in_us;
uint8_t rate_control_caps;
uint16_t dsc_slice_height;
+ bool os_request_force_ffu;
};
struct colorspace_transform {
@@ -918,6 +941,12 @@ enum dc_psr_version {
DC_PSR_VERSION_UNSUPPORTED = 0xFFFFFFFF,
};
+enum dc_replay_version {
+ DC_FREESYNC_REPLAY = 0,
+ DC_VESA_PANEL_REPLAY = 1,
+ DC_REPLAY_VERSION_UNSUPPORTED = 0XFF,
+};
+
/* Possible values of display_endpoint_id.endpoint */
enum display_endpoint_type {
DISPLAY_ENDPOINT_PHY = 0, /* Physical connector. */
@@ -1070,6 +1099,7 @@ enum replay_FW_Message_type {
Replay_Set_Residency_Frameupdate_Timer,
Replay_Set_Pseudo_VTotal,
Replay_Disabled_Adaptive_Sync_SDP,
+ Replay_Set_Version,
Replay_Set_General_Cmd,
};
@@ -1105,6 +1135,8 @@ union replay_low_refresh_rate_enable_options {
};
struct replay_config {
+ /* Replay version */
+ enum dc_replay_version replay_version;
/* Replay feature is supported */
bool replay_supported;
/* Replay caps support DPCD & EDID caps*/
@@ -1135,6 +1167,10 @@ struct replay_config {
bool low_rr_supported;
/* Replay Video Conferencing Optimization Enabled */
bool replay_video_conferencing_optimization_enabled;
+ /* Replay alpm mode */
+ enum dc_alpm_mode alpm_mode;
+ /* Replay full screen only */
+ bool os_request_force_ffu;
};
/* Replay feature flags*/
@@ -1157,6 +1193,10 @@ struct replay_settings {
uint32_t coasting_vtotal_table[PR_COASTING_TYPE_NUM];
/* Defer Update Coasting vtotal table */
uint32_t defer_update_coasting_vtotal_table[PR_COASTING_TYPE_NUM];
+ /* Skip frame number table */
+ uint32_t frame_skip_number_table[PR_COASTING_TYPE_NUM];
+ /* Defer skip frame number table */
+ uint32_t defer_frame_skip_number_table[PR_COASTING_TYPE_NUM];
/* Maximum link off frame count */
uint32_t link_off_frame_count;
/* Replay pseudo vtotal for low refresh rate*/
@@ -1165,6 +1205,8 @@ struct replay_settings {
uint16_t last_pseudo_vtotal;
/* Replay desync error */
uint32_t replay_desync_error_fail_count;
+ /* The frame skip number dal send to DMUB */
+ uint16_t frame_skip_number;
};
/* To split out "global" and "per-panel" config settings.
@@ -1197,6 +1239,7 @@ struct dc_panel_config {
bool rc_disable;
bool rc_allow_static_screen;
bool rc_allow_fullscreen_VPB;
+ bool read_psrcap_again;
unsigned int replay_enable_option;
} psr;
/* ABM */
@@ -1255,7 +1298,6 @@ enum dc_cm2_gpu_mem_layout {
enum dc_cm2_gpu_mem_pixel_component_order {
DC_CM2_GPU_MEM_PIXEL_COMPONENT_ORDER_RGBA,
- DC_CM2_GPU_MEM_PIXEL_COMPONENT_ORDER_BGRA
};
enum dc_cm2_gpu_mem_format {
@@ -1277,7 +1319,6 @@ struct dc_cm2_gpu_mem_format_parameters {
enum dc_cm2_gpu_mem_size {
DC_CM2_GPU_MEM_SIZE_171717,
- DC_CM2_GPU_MEM_SIZE_333333,
DC_CM2_GPU_MEM_SIZE_TRANSFORMED,
};
@@ -1315,6 +1356,7 @@ struct dc_cm2_func_luts {
bool mpc_3dlut_enable;
bool rmcm_3dlut_enable;
bool mpc_mcm_post_blend;
+ uint8_t rmcm_tmz;
} lut3d_data;
const struct dc_transfer_func *lut1d_func;
};
@@ -1372,4 +1414,19 @@ struct set_backlight_level_params {
uint8_t aux_inst;
};
+enum dc_validate_mode {
+ /* validate the mode and program HW */
+ DC_VALIDATE_MODE_AND_PROGRAMMING = 0,
+ /* only validate the mode */
+ DC_VALIDATE_MODE_ONLY = 1,
+ /* validate the mode and get the max state (voltage level) */
+ DC_VALIDATE_MODE_AND_STATE_INDEX = 2,
+};
+
+struct dc_validation_dpia_set {
+ const struct dc_link *link;
+ const struct dc_tunnel_settings *tunnel_settings;
+ uint32_t required_bw;
+};
+
#endif /* DC_TYPES_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.c
index 5999b2da3a01..33d8bd91cb01 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.c
@@ -148,7 +148,7 @@ struct dccg *dccg2_create(
const struct dccg_shift *dccg_shift,
const struct dccg_mask *dccg_mask)
{
- struct dcn_dccg *dccg_dcn = kzalloc(sizeof(*dccg_dcn), GFP_ATOMIC);
+ struct dcn_dccg *dccg_dcn = kzalloc(sizeof(*dccg_dcn), GFP_KERNEL);
struct dccg *base;
if (dccg_dcn == NULL) {
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h b/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h
index a9b88f5e0c04..8bdffd9ff31b 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h
@@ -425,7 +425,69 @@ struct dccg_mask {
uint32_t SYMCLKD_CLOCK_ENABLE; \
uint32_t SYMCLKE_CLOCK_ENABLE; \
uint32_t DP_DTO_MODULO[MAX_PIPES]; \
- uint32_t DP_DTO_PHASE[MAX_PIPES]
+ uint32_t DP_DTO_PHASE[MAX_PIPES]; \
+ uint32_t DC_MEM_GLOBAL_PWR_REQ_CNTL; \
+ uint32_t DCCG_AUDIO_DTO0_MODULE; \
+ uint32_t DCCG_AUDIO_DTO0_PHASE; \
+ uint32_t DCCG_AUDIO_DTO1_MODULE; \
+ uint32_t DCCG_AUDIO_DTO1_PHASE; \
+ uint32_t DCCG_CAC_STATUS; \
+ uint32_t DCCG_CAC_STATUS2; \
+ uint32_t DCCG_DISP_CNTL_REG; \
+ uint32_t DCCG_DS_CNTL; \
+ uint32_t DCCG_DS_DTO_INCR; \
+ uint32_t DCCG_DS_DTO_MODULO; \
+ uint32_t DCCG_DS_HW_CAL_INTERVAL; \
+ uint32_t DCCG_GTC_CNTL; \
+ uint32_t DCCG_GTC_CURRENT; \
+ uint32_t DCCG_GTC_DTO_INCR; \
+ uint32_t DCCG_GTC_DTO_MODULO; \
+ uint32_t DCCG_PERFMON_CNTL; \
+ uint32_t DCCG_PERFMON_CNTL2; \
+ uint32_t DCCG_SOFT_RESET; \
+ uint32_t DCCG_TEST_CLK_SEL; \
+ uint32_t DCCG_VSYNC_CNT_CTRL; \
+ uint32_t DCCG_VSYNC_CNT_INT_CTRL; \
+ uint32_t DCCG_VSYNC_OTG0_LATCH_VALUE; \
+ uint32_t DCCG_VSYNC_OTG1_LATCH_VALUE; \
+ uint32_t DCCG_VSYNC_OTG2_LATCH_VALUE; \
+ uint32_t DCCG_VSYNC_OTG3_LATCH_VALUE; \
+ uint32_t DCCG_VSYNC_OTG4_LATCH_VALUE; \
+ uint32_t DCCG_VSYNC_OTG5_LATCH_VALUE; \
+ uint32_t DISPCLK_CGTT_BLK_CTRL_REG; \
+ uint32_t DP_DTO_DBUF_EN; \
+ uint32_t DPIACLK_540M_DTO_MODULO; \
+ uint32_t DPIACLK_540M_DTO_PHASE; \
+ uint32_t DPIACLK_810M_DTO_MODULO; \
+ uint32_t DPIACLK_810M_DTO_PHASE; \
+ uint32_t DPIACLK_DTO_CNTL; \
+ uint32_t DPIASYMCLK_CNTL; \
+ uint32_t DPPCLK_CGTT_BLK_CTRL_REG; \
+ uint32_t DPREFCLK_CGTT_BLK_CTRL_REG; \
+ uint32_t DPREFCLK_CNTL; \
+ uint32_t DTBCLK_DTO_DBUF_EN; \
+ uint32_t FORCE_SYMCLK_DISABLE; \
+ uint32_t HDMICHARCLK0_CLOCK_CNTL; \
+ uint32_t MICROSECOND_TIME_BASE_DIV; \
+ uint32_t MILLISECOND_TIME_BASE_DIV; \
+ uint32_t OTG0_PHYPLL_PIXEL_RATE_CNTL; \
+ uint32_t OTG0_PIXEL_RATE_CNTL; \
+ uint32_t OTG1_PHYPLL_PIXEL_RATE_CNTL; \
+ uint32_t OTG1_PIXEL_RATE_CNTL; \
+ uint32_t OTG2_PHYPLL_PIXEL_RATE_CNTL; \
+ uint32_t OTG2_PIXEL_RATE_CNTL; \
+ uint32_t OTG3_PHYPLL_PIXEL_RATE_CNTL; \
+ uint32_t OTG3_PIXEL_RATE_CNTL; \
+ uint32_t PHYPLLA_PIXCLK_RESYNC_CNTL; \
+ uint32_t PHYPLLB_PIXCLK_RESYNC_CNTL; \
+ uint32_t PHYPLLC_PIXCLK_RESYNC_CNTL; \
+ uint32_t PHYPLLD_PIXCLK_RESYNC_CNTL; \
+ uint32_t PHYPLLE_PIXCLK_RESYNC_CNTL; \
+ uint32_t REFCLK_CGTT_BLK_CTRL_REG; \
+ uint32_t SOCCLK_CGTT_BLK_CTRL_REG; \
+ uint32_t SYMCLK_CGTT_BLK_CTRL_REG; \
+ uint32_t SYMCLK_PSP_CNTL
+
struct dccg_registers {
DCCG_REG_VARIABLE_LIST;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.c
index 8664f0c4c9b7..97df04b7e39d 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.c
@@ -709,6 +709,128 @@ void dccg31_otg_drop_pixel(struct dccg *dccg,
OTG_DROP_PIXEL[otg_inst], 1);
}
+void dccg31_read_reg_state(struct dccg *dccg, struct dcn_dccg_reg_state *dccg_reg_state)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ dccg_reg_state->dc_mem_global_pwr_req_cntl = REG_READ(DC_MEM_GLOBAL_PWR_REQ_CNTL);
+ dccg_reg_state->dccg_audio_dtbclk_dto_modulo = REG_READ(DCCG_AUDIO_DTBCLK_DTO_MODULO);
+ dccg_reg_state->dccg_audio_dtbclk_dto_phase = REG_READ(DCCG_AUDIO_DTBCLK_DTO_PHASE);
+ dccg_reg_state->dccg_audio_dto_source = REG_READ(DCCG_AUDIO_DTO_SOURCE);
+ dccg_reg_state->dccg_audio_dto0_module = REG_READ(DCCG_AUDIO_DTO0_MODULE);
+ dccg_reg_state->dccg_audio_dto0_phase = REG_READ(DCCG_AUDIO_DTO0_PHASE);
+ dccg_reg_state->dccg_audio_dto1_module = REG_READ(DCCG_AUDIO_DTO1_MODULE);
+ dccg_reg_state->dccg_audio_dto1_phase = REG_READ(DCCG_AUDIO_DTO1_PHASE);
+ dccg_reg_state->dccg_cac_status = REG_READ(DCCG_CAC_STATUS);
+ dccg_reg_state->dccg_cac_status2 = REG_READ(DCCG_CAC_STATUS2);
+ dccg_reg_state->dccg_disp_cntl_reg = REG_READ(DCCG_DISP_CNTL_REG);
+ dccg_reg_state->dccg_ds_cntl = REG_READ(DCCG_DS_CNTL);
+ dccg_reg_state->dccg_ds_dto_incr = REG_READ(DCCG_DS_DTO_INCR);
+ dccg_reg_state->dccg_ds_dto_modulo = REG_READ(DCCG_DS_DTO_MODULO);
+ dccg_reg_state->dccg_ds_hw_cal_interval = REG_READ(DCCG_DS_HW_CAL_INTERVAL);
+ dccg_reg_state->dccg_gate_disable_cntl = REG_READ(DCCG_GATE_DISABLE_CNTL);
+ dccg_reg_state->dccg_gate_disable_cntl2 = REG_READ(DCCG_GATE_DISABLE_CNTL2);
+ dccg_reg_state->dccg_gate_disable_cntl3 = REG_READ(DCCG_GATE_DISABLE_CNTL3);
+ dccg_reg_state->dccg_gate_disable_cntl4 = REG_READ(DCCG_GATE_DISABLE_CNTL4);
+ dccg_reg_state->dccg_gate_disable_cntl5 = REG_READ(DCCG_GATE_DISABLE_CNTL5);
+ dccg_reg_state->dccg_gate_disable_cntl6 = REG_READ(DCCG_GATE_DISABLE_CNTL6);
+ dccg_reg_state->dccg_global_fgcg_rep_cntl = REG_READ(DCCG_GLOBAL_FGCG_REP_CNTL);
+ dccg_reg_state->dccg_gtc_cntl = REG_READ(DCCG_GTC_CNTL);
+ dccg_reg_state->dccg_gtc_current = REG_READ(DCCG_GTC_CURRENT);
+ dccg_reg_state->dccg_gtc_dto_incr = REG_READ(DCCG_GTC_DTO_INCR);
+ dccg_reg_state->dccg_gtc_dto_modulo = REG_READ(DCCG_GTC_DTO_MODULO);
+ dccg_reg_state->dccg_perfmon_cntl = REG_READ(DCCG_PERFMON_CNTL);
+ dccg_reg_state->dccg_perfmon_cntl2 = REG_READ(DCCG_PERFMON_CNTL2);
+ dccg_reg_state->dccg_soft_reset = REG_READ(DCCG_SOFT_RESET);
+ dccg_reg_state->dccg_test_clk_sel = REG_READ(DCCG_TEST_CLK_SEL);
+ dccg_reg_state->dccg_vsync_cnt_ctrl = REG_READ(DCCG_VSYNC_CNT_CTRL);
+ dccg_reg_state->dccg_vsync_cnt_int_ctrl = REG_READ(DCCG_VSYNC_CNT_INT_CTRL);
+ dccg_reg_state->dccg_vsync_otg0_latch_value = REG_READ(DCCG_VSYNC_OTG0_LATCH_VALUE);
+ dccg_reg_state->dccg_vsync_otg1_latch_value = REG_READ(DCCG_VSYNC_OTG1_LATCH_VALUE);
+ dccg_reg_state->dccg_vsync_otg2_latch_value = REG_READ(DCCG_VSYNC_OTG2_LATCH_VALUE);
+ dccg_reg_state->dccg_vsync_otg3_latch_value = REG_READ(DCCG_VSYNC_OTG3_LATCH_VALUE);
+ dccg_reg_state->dccg_vsync_otg4_latch_value = REG_READ(DCCG_VSYNC_OTG4_LATCH_VALUE);
+ dccg_reg_state->dccg_vsync_otg5_latch_value = REG_READ(DCCG_VSYNC_OTG5_LATCH_VALUE);
+ dccg_reg_state->dispclk_cgtt_blk_ctrl_reg = REG_READ(DISPCLK_CGTT_BLK_CTRL_REG);
+ dccg_reg_state->dispclk_freq_change_cntl = REG_READ(DISPCLK_FREQ_CHANGE_CNTL);
+ dccg_reg_state->dp_dto_dbuf_en = REG_READ(DP_DTO_DBUF_EN);
+ dccg_reg_state->dp_dto0_modulo = REG_READ(DP_DTO_MODULO[0]);
+ dccg_reg_state->dp_dto0_phase = REG_READ(DP_DTO_PHASE[0]);
+ dccg_reg_state->dp_dto1_modulo = REG_READ(DP_DTO_MODULO[1]);
+ dccg_reg_state->dp_dto1_phase = REG_READ(DP_DTO_PHASE[1]);
+ dccg_reg_state->dp_dto2_modulo = REG_READ(DP_DTO_MODULO[2]);
+ dccg_reg_state->dp_dto2_phase = REG_READ(DP_DTO_PHASE[2]);
+ dccg_reg_state->dp_dto3_modulo = REG_READ(DP_DTO_MODULO[3]);
+ dccg_reg_state->dp_dto3_phase = REG_READ(DP_DTO_PHASE[3]);
+ dccg_reg_state->dpiaclk_540m_dto_modulo = REG_READ(DPIACLK_540M_DTO_MODULO);
+ dccg_reg_state->dpiaclk_540m_dto_phase = REG_READ(DPIACLK_540M_DTO_PHASE);
+ dccg_reg_state->dpiaclk_810m_dto_modulo = REG_READ(DPIACLK_810M_DTO_MODULO);
+ dccg_reg_state->dpiaclk_810m_dto_phase = REG_READ(DPIACLK_810M_DTO_PHASE);
+ dccg_reg_state->dpiaclk_dto_cntl = REG_READ(DPIACLK_DTO_CNTL);
+ dccg_reg_state->dpiasymclk_cntl = REG_READ(DPIASYMCLK_CNTL);
+ dccg_reg_state->dppclk_cgtt_blk_ctrl_reg = REG_READ(DPPCLK_CGTT_BLK_CTRL_REG);
+ dccg_reg_state->dppclk_ctrl = REG_READ(DPPCLK_CTRL);
+ dccg_reg_state->dppclk_dto_ctrl = REG_READ(DPPCLK_DTO_CTRL);
+ dccg_reg_state->dppclk0_dto_param = REG_READ(DPPCLK_DTO_PARAM[0]);
+ dccg_reg_state->dppclk1_dto_param = REG_READ(DPPCLK_DTO_PARAM[1]);
+ dccg_reg_state->dppclk2_dto_param = REG_READ(DPPCLK_DTO_PARAM[2]);
+ dccg_reg_state->dppclk3_dto_param = REG_READ(DPPCLK_DTO_PARAM[3]);
+ dccg_reg_state->dprefclk_cgtt_blk_ctrl_reg = REG_READ(DPREFCLK_CGTT_BLK_CTRL_REG);
+ dccg_reg_state->dprefclk_cntl = REG_READ(DPREFCLK_CNTL);
+ dccg_reg_state->dpstreamclk_cntl = REG_READ(DPSTREAMCLK_CNTL);
+ dccg_reg_state->dscclk_dto_ctrl = REG_READ(DSCCLK_DTO_CTRL);
+ dccg_reg_state->dscclk0_dto_param = REG_READ(DSCCLK0_DTO_PARAM);
+ dccg_reg_state->dscclk1_dto_param = REG_READ(DSCCLK1_DTO_PARAM);
+ dccg_reg_state->dscclk2_dto_param = REG_READ(DSCCLK2_DTO_PARAM);
+ dccg_reg_state->dscclk3_dto_param = REG_READ(DSCCLK3_DTO_PARAM);
+ dccg_reg_state->dtbclk_dto_dbuf_en = REG_READ(DTBCLK_DTO_DBUF_EN);
+ dccg_reg_state->dtbclk_dto0_modulo = REG_READ(DTBCLK_DTO_MODULO[0]);
+ dccg_reg_state->dtbclk_dto0_phase = REG_READ(DTBCLK_DTO_PHASE[0]);
+ dccg_reg_state->dtbclk_dto1_modulo = REG_READ(DTBCLK_DTO_MODULO[1]);
+ dccg_reg_state->dtbclk_dto1_phase = REG_READ(DTBCLK_DTO_PHASE[1]);
+ dccg_reg_state->dtbclk_dto2_modulo = REG_READ(DTBCLK_DTO_MODULO[2]);
+ dccg_reg_state->dtbclk_dto2_phase = REG_READ(DTBCLK_DTO_PHASE[2]);
+ dccg_reg_state->dtbclk_dto3_modulo = REG_READ(DTBCLK_DTO_MODULO[3]);
+ dccg_reg_state->dtbclk_dto3_phase = REG_READ(DTBCLK_DTO_PHASE[3]);
+ dccg_reg_state->dtbclk_p_cntl = REG_READ(DTBCLK_P_CNTL);
+ dccg_reg_state->force_symclk_disable = REG_READ(FORCE_SYMCLK_DISABLE);
+ dccg_reg_state->hdmicharclk0_clock_cntl = REG_READ(HDMICHARCLK0_CLOCK_CNTL);
+ dccg_reg_state->hdmistreamclk_cntl = REG_READ(HDMISTREAMCLK_CNTL);
+ dccg_reg_state->hdmistreamclk0_dto_param = REG_READ(HDMISTREAMCLK0_DTO_PARAM);
+ dccg_reg_state->microsecond_time_base_div = REG_READ(MICROSECOND_TIME_BASE_DIV);
+ dccg_reg_state->millisecond_time_base_div = REG_READ(MILLISECOND_TIME_BASE_DIV);
+ dccg_reg_state->otg_pixel_rate_div = REG_READ(OTG_PIXEL_RATE_DIV);
+ dccg_reg_state->otg0_phypll_pixel_rate_cntl = REG_READ(OTG0_PHYPLL_PIXEL_RATE_CNTL);
+ dccg_reg_state->otg0_pixel_rate_cntl = REG_READ(OTG0_PIXEL_RATE_CNTL);
+ dccg_reg_state->otg1_phypll_pixel_rate_cntl = REG_READ(OTG1_PHYPLL_PIXEL_RATE_CNTL);
+ dccg_reg_state->otg1_pixel_rate_cntl = REG_READ(OTG1_PIXEL_RATE_CNTL);
+ dccg_reg_state->otg2_phypll_pixel_rate_cntl = REG_READ(OTG2_PHYPLL_PIXEL_RATE_CNTL);
+ dccg_reg_state->otg2_pixel_rate_cntl = REG_READ(OTG2_PIXEL_RATE_CNTL);
+ dccg_reg_state->otg3_phypll_pixel_rate_cntl = REG_READ(OTG3_PHYPLL_PIXEL_RATE_CNTL);
+ dccg_reg_state->otg3_pixel_rate_cntl = REG_READ(OTG3_PIXEL_RATE_CNTL);
+ dccg_reg_state->phyasymclk_clock_cntl = REG_READ(PHYASYMCLK_CLOCK_CNTL);
+ dccg_reg_state->phybsymclk_clock_cntl = REG_READ(PHYBSYMCLK_CLOCK_CNTL);
+ dccg_reg_state->phycsymclk_clock_cntl = REG_READ(PHYCSYMCLK_CLOCK_CNTL);
+ dccg_reg_state->phydsymclk_clock_cntl = REG_READ(PHYDSYMCLK_CLOCK_CNTL);
+ dccg_reg_state->phyesymclk_clock_cntl = REG_READ(PHYESYMCLK_CLOCK_CNTL);
+ dccg_reg_state->phyplla_pixclk_resync_cntl = REG_READ(PHYPLLA_PIXCLK_RESYNC_CNTL);
+ dccg_reg_state->phypllb_pixclk_resync_cntl = REG_READ(PHYPLLB_PIXCLK_RESYNC_CNTL);
+ dccg_reg_state->phypllc_pixclk_resync_cntl = REG_READ(PHYPLLC_PIXCLK_RESYNC_CNTL);
+ dccg_reg_state->phyplld_pixclk_resync_cntl = REG_READ(PHYPLLD_PIXCLK_RESYNC_CNTL);
+ dccg_reg_state->phyplle_pixclk_resync_cntl = REG_READ(PHYPLLE_PIXCLK_RESYNC_CNTL);
+ dccg_reg_state->refclk_cgtt_blk_ctrl_reg = REG_READ(REFCLK_CGTT_BLK_CTRL_REG);
+ dccg_reg_state->socclk_cgtt_blk_ctrl_reg = REG_READ(SOCCLK_CGTT_BLK_CTRL_REG);
+ dccg_reg_state->symclk_cgtt_blk_ctrl_reg = REG_READ(SYMCLK_CGTT_BLK_CTRL_REG);
+ dccg_reg_state->symclk_psp_cntl = REG_READ(SYMCLK_PSP_CNTL);
+ dccg_reg_state->symclk32_le_cntl = REG_READ(SYMCLK32_LE_CNTL);
+ dccg_reg_state->symclk32_se_cntl = REG_READ(SYMCLK32_SE_CNTL);
+ dccg_reg_state->symclka_clock_enable = REG_READ(SYMCLKA_CLOCK_ENABLE);
+ dccg_reg_state->symclkb_clock_enable = REG_READ(SYMCLKB_CLOCK_ENABLE);
+ dccg_reg_state->symclkc_clock_enable = REG_READ(SYMCLKC_CLOCK_ENABLE);
+ dccg_reg_state->symclkd_clock_enable = REG_READ(SYMCLKD_CLOCK_ENABLE);
+ dccg_reg_state->symclke_clock_enable = REG_READ(SYMCLKE_CLOCK_ENABLE);
+}
+
static const struct dccg_funcs dccg31_funcs = {
.update_dpp_dto = dccg31_update_dpp_dto,
.get_dccg_ref_freq = dccg31_get_dccg_ref_freq,
@@ -727,6 +849,7 @@ static const struct dccg_funcs dccg31_funcs = {
.set_dispclk_change_mode = dccg31_set_dispclk_change_mode,
.disable_dsc = dccg31_disable_dscclk,
.enable_dsc = dccg31_enable_dscclk,
+ .dccg_read_reg_state = dccg31_read_reg_state,
};
struct dccg *dccg31_create(
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.h b/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.h
index cd261051dc2c..bf659920d4cc 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.h
@@ -236,4 +236,6 @@ void dccg31_disable_dscclk(struct dccg *dccg, int inst);
void dccg31_enable_dscclk(struct dccg *dccg, int inst);
+void dccg31_read_reg_state(struct dccg *dccg, struct dcn_dccg_reg_state *dccg_reg_state);
+
#endif //__DCN31_DCCG_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.c
index 8f6edd8e9beb..ef3db6beba25 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.c
@@ -377,7 +377,8 @@ static const struct dccg_funcs dccg314_funcs = {
.get_pixel_rate_div = dccg314_get_pixel_rate_div,
.trigger_dio_fifo_resync = dccg314_trigger_dio_fifo_resync,
.set_valid_pixel_rate = dccg314_set_valid_pixel_rate,
- .set_dtbclk_p_src = dccg314_set_dtbclk_p_src
+ .set_dtbclk_p_src = dccg314_set_dtbclk_p_src,
+ .dccg_read_reg_state = dccg31_read_reg_state
};
struct dccg *dccg314_create(
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.h b/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.h
index 60ea1d248deb..a609635f35db 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.h
@@ -74,8 +74,7 @@
SR(DCCG_GATE_DISABLE_CNTL3),\
SR(HDMISTREAMCLK0_DTO_PARAM),\
SR(OTG_PIXEL_RATE_DIV),\
- SR(DTBCLK_P_CNTL),\
- SR(DCCG_AUDIO_DTO_SOURCE)
+ SR(DTBCLK_P_CNTL)
#define DCCG_MASK_SH_LIST_DCN314_COMMON(mask_sh) \
DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 0, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c
index 58c84f555c0f..bd2f528137b2 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c
@@ -39,6 +39,7 @@
#define CTX \
dccg_dcn->base.ctx
+#include "logger_types.h"
#define DC_LOGGER \
dccg->ctx->logger
@@ -133,30 +134,34 @@ enum dsc_clk_source {
};
-static void dccg35_set_dsc_clk_rcg(struct dccg *dccg, int inst, bool enable)
+static void dccg35_set_dsc_clk_rcg(struct dccg *dccg, int inst, bool allow_rcg)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
- if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dsc && enable)
+ if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dsc && allow_rcg)
return;
switch (inst) {
case 0:
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK0_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK0_ROOT_GATE_DISABLE, allow_rcg ? 0 : 1);
break;
case 1:
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK1_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK1_ROOT_GATE_DISABLE, allow_rcg ? 0 : 1);
break;
case 2:
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK2_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK2_ROOT_GATE_DISABLE, allow_rcg ? 0 : 1);
break;
case 3:
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK3_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK3_ROOT_GATE_DISABLE, allow_rcg ? 0 : 1);
break;
default:
BREAK_TO_DEBUGGER();
return;
}
+
+ /* Wait for clock to ramp */
+ if (!allow_rcg)
+ udelay(10);
}
static void dccg35_set_symclk32_se_rcg(
@@ -385,35 +390,34 @@ static void dccg35_set_dtbclk_p_rcg(struct dccg *dccg, int inst, bool enable)
}
}
-static void dccg35_set_dppclk_rcg(struct dccg *dccg,
- int inst, bool enable)
+static void dccg35_set_dppclk_rcg(struct dccg *dccg, int inst, bool allow_rcg)
{
-
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
-
- if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dpp && enable)
+ if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dpp && allow_rcg)
return;
switch (inst) {
case 0:
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK0_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK0_ROOT_GATE_DISABLE, allow_rcg ? 0 : 1);
break;
case 1:
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK1_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK1_ROOT_GATE_DISABLE, allow_rcg ? 0 : 1);
break;
case 2:
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK2_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK2_ROOT_GATE_DISABLE, allow_rcg ? 0 : 1);
break;
case 3:
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK3_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK3_ROOT_GATE_DISABLE, allow_rcg ? 0 : 1);
break;
default:
BREAK_TO_DEBUGGER();
break;
}
- //DC_LOG_DEBUG("%s: inst(%d) DPPCLK rcg_disable: %d\n", __func__, inst, enable ? 0 : 1);
+ /* Wait for clock to ramp */
+ if (!allow_rcg)
+ udelay(10);
}
static void dccg35_set_dpstreamclk_rcg(
@@ -1110,6 +1114,16 @@ static void dccg35_trigger_dio_fifo_resync(struct dccg *dccg)
if (dispclk_rdivider_value != 0)
REG_UPDATE(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, dispclk_rdivider_value);
}
+static void dccg35_wait_for_dentist_change_done(
+ struct dccg *dccg)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ uint32_t dentist_dispclk_value = REG_READ(DENTIST_DISPCLK_CNTL);
+
+ REG_WRITE(DENTIST_DISPCLK_CNTL, dentist_dispclk_value);
+ REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 50, 2000);
+}
static void dcn35_set_dppclk_enable(struct dccg *dccg,
uint32_t dpp_inst, uint32_t enable)
@@ -1133,7 +1147,7 @@ static void dcn35_set_dppclk_enable(struct dccg *dccg,
default:
break;
}
- //DC_LOG_DEBUG("%s: dpp_inst(%d) DPPCLK_EN = %d\n", __func__, dpp_inst, enable);
+ DC_LOG_DEBUG("%s: dpp_inst(%d) DPPCLK_EN = %d\n", __func__, dpp_inst, enable);
}
@@ -1170,39 +1184,41 @@ static void dccg35_update_dpp_dto(struct dccg *dccg, int dpp_inst,
dcn35_set_dppclk_enable(dccg, dpp_inst, true);
} else {
dcn35_set_dppclk_enable(dccg, dpp_inst, false);
- /*we have this in hwss: disable_plane*/
- //dccg35_set_dppclk_rcg(dccg, dpp_inst, true);
+ dccg35_set_dppclk_rcg(dccg, dpp_inst, true);
}
+ udelay(10);
dccg->pipe_dppclk_khz[dpp_inst] = req_dppclk;
}
static void dccg35_set_dppclk_root_clock_gating(struct dccg *dccg,
- uint32_t dpp_inst, uint32_t enable)
+ uint32_t dpp_inst, uint32_t disallow_rcg)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
- if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dpp)
+ if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dpp && !disallow_rcg)
return;
switch (dpp_inst) {
case 0:
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK0_ROOT_GATE_DISABLE, enable);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK0_ROOT_GATE_DISABLE, disallow_rcg);
break;
case 1:
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK1_ROOT_GATE_DISABLE, enable);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK1_ROOT_GATE_DISABLE, disallow_rcg);
break;
case 2:
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK2_ROOT_GATE_DISABLE, enable);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK2_ROOT_GATE_DISABLE, disallow_rcg);
break;
case 3:
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK3_ROOT_GATE_DISABLE, enable);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK3_ROOT_GATE_DISABLE, disallow_rcg);
break;
default:
break;
}
- //DC_LOG_DEBUG("%s: dpp_inst(%d) rcg: %d\n", __func__, dpp_inst, enable);
+ /* Wait for clock to ramp */
+ if (disallow_rcg)
+ udelay(10);
}
static void dccg35_get_pixel_rate_div(
@@ -1294,6 +1310,8 @@ static void dccg35_set_pixel_rate_div(
BREAK_TO_DEBUGGER();
return;
}
+ if (otg_inst < 4)
+ dccg35_wait_for_dentist_change_done(dccg);
}
static void dccg35_set_dtbclk_p_src(
@@ -1401,7 +1419,11 @@ static void dccg35_set_dtbclk_dto(
* PIPEx_DTO_SRC_SEL should not be programmed during DTBCLK update since OTG may still be on, and the
* programming is handled in program_pix_clk() regardless, so it can be removed from here.
*/
- } else {
+ DC_LOG_DEBUG("%s: OTG%d DTBCLK DTO enabled: pixclk_khz=%d, ref_dtbclk_khz=%d, req_dtbclk_khz=%d, phase=%d, modulo=%d\n",
+ __func__, params->otg_inst, params->pixclk_khz,
+ params->ref_dtbclk_khz, req_dtbclk_khz, phase, modulo);
+
+ } else if (!params->ref_dtbclk_khz && !req_dtbclk_khz) {
switch (params->otg_inst) {
case 0:
REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P0_GATE_DISABLE, 0);
@@ -1426,6 +1448,8 @@ static void dccg35_set_dtbclk_dto(
REG_WRITE(DTBCLK_DTO_MODULO[params->otg_inst], 0);
REG_WRITE(DTBCLK_DTO_PHASE[params->otg_inst], 0);
+
+ DC_LOG_DEBUG("%s: OTG%d DTBCLK DTO disabled\n", __func__, params->otg_inst);
}
}
@@ -1470,6 +1494,8 @@ static void dccg35_set_dpstreamclk(
BREAK_TO_DEBUGGER();
return;
}
+ DC_LOG_DEBUG("%s: dp_hpo_inst(%d) DPSTREAMCLK_EN = %d, DPSTREAMCLK_SRC_SEL = %d\n",
+ __func__, dp_hpo_inst, (src == REFCLK) ? 0 : 1, otg_inst);
}
@@ -1509,6 +1535,8 @@ static void dccg35_set_dpstreamclk_root_clock_gating(
BREAK_TO_DEBUGGER();
return;
}
+ DC_LOG_DEBUG("%s: dp_hpo_inst(%d) DPSTREAMCLK_ROOT_GATE_DISABLE = %d\n",
+ __func__, dp_hpo_inst, enable ? 1 : 0);
}
@@ -1548,7 +1576,7 @@ static void dccg35_set_physymclk_root_clock_gating(
BREAK_TO_DEBUGGER();
return;
}
- //DC_LOG_DEBUG("%s: dpp_inst(%d) PHYESYMCLK_ROOT_GATE_DISABLE:\n", __func__, phy_inst, enable ? 0 : 1);
+ DC_LOG_DEBUG("%s: dpp_inst(%d) PHYESYMCLK_ROOT_GATE_DISABLE: %d\n", __func__, phy_inst, enable ? 0 : 1);
}
@@ -1621,6 +1649,8 @@ static void dccg35_set_physymclk(
BREAK_TO_DEBUGGER();
return;
}
+ DC_LOG_DEBUG("%s: phy_inst(%d) PHYxSYMCLK_EN = %d, PHYxSYMCLK_SRC_SEL = %d\n",
+ __func__, phy_inst, force_enable ? 1 : 0, clk_src);
}
static void dccg35_set_valid_pixel_rate(
@@ -1646,7 +1676,7 @@ static void dccg35_dpp_root_clock_control(
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
- if (dccg->dpp_clock_gated[dpp_inst] == clock_on)
+ if (dccg->dpp_clock_gated[dpp_inst] != clock_on)
return;
if (clock_on) {
@@ -1664,10 +1694,14 @@ static void dccg35_dpp_root_clock_control(
DPPCLK0_DTO_PHASE, 0,
DPPCLK0_DTO_MODULO, 1);
/*we have this in hwss: disable_plane*/
- //dccg35_set_dppclk_rcg(dccg, dpp_inst, true);
+ dccg35_set_dppclk_rcg(dccg, dpp_inst, true);
}
+ // wait for clock to fully ramp
+ udelay(10);
+
dccg->dpp_clock_gated[dpp_inst] = !clock_on;
+ DC_LOG_DEBUG("%s: dpp_inst(%d) clock_on = %d\n", __func__, dpp_inst, clock_on);
}
static void dccg35_disable_symclk32_se(
@@ -1726,6 +1760,7 @@ static void dccg35_disable_symclk32_se(
BREAK_TO_DEBUGGER();
return;
}
+
}
static void dccg35_init_cb(struct dccg *dccg)
@@ -1733,7 +1768,6 @@ static void dccg35_init_cb(struct dccg *dccg)
(void)dccg;
/* Any RCG should be done when driver enter low power mode*/
}
-
void dccg35_init(struct dccg *dccg)
{
int otg_inst;
@@ -1748,6 +1782,8 @@ void dccg35_init(struct dccg *dccg)
for (otg_inst = 0; otg_inst < 2; otg_inst++) {
dccg31_disable_symclk32_le(dccg, otg_inst);
dccg31_set_symclk32_le_root_clock_gating(dccg, otg_inst, false);
+ DC_LOG_DEBUG("%s: OTG%d SYMCLK32_LE disabled and root clock gating disabled\n",
+ __func__, otg_inst);
}
// if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
@@ -1760,6 +1796,8 @@ void dccg35_init(struct dccg *dccg)
dccg35_set_dpstreamclk(dccg, REFCLK, otg_inst,
otg_inst);
dccg35_set_dpstreamclk_root_clock_gating(dccg, otg_inst, false);
+ DC_LOG_DEBUG("%s: OTG%d DPSTREAMCLK disabled and root clock gating disabled\n",
+ __func__, otg_inst);
}
/*
@@ -1782,8 +1820,7 @@ static void dccg35_enable_dscclk(struct dccg *dccg, int inst)
//Disable DTO
switch (inst) {
case 0:
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK0_ROOT_GATE_DISABLE, 1);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK0_ROOT_GATE_DISABLE, 1);
REG_UPDATE_2(DSCCLK0_DTO_PARAM,
DSCCLK0_DTO_PHASE, 0,
@@ -1791,8 +1828,7 @@ static void dccg35_enable_dscclk(struct dccg *dccg, int inst)
REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK0_EN, 1);
break;
case 1:
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK1_ROOT_GATE_DISABLE, 1);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK1_ROOT_GATE_DISABLE, 1);
REG_UPDATE_2(DSCCLK1_DTO_PARAM,
DSCCLK1_DTO_PHASE, 0,
@@ -1800,8 +1836,7 @@ static void dccg35_enable_dscclk(struct dccg *dccg, int inst)
REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK1_EN, 1);
break;
case 2:
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK2_ROOT_GATE_DISABLE, 1);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK2_ROOT_GATE_DISABLE, 1);
REG_UPDATE_2(DSCCLK2_DTO_PARAM,
DSCCLK2_DTO_PHASE, 0,
@@ -1809,8 +1844,7 @@ static void dccg35_enable_dscclk(struct dccg *dccg, int inst)
REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK2_EN, 1);
break;
case 3:
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK3_ROOT_GATE_DISABLE, 1);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK3_ROOT_GATE_DISABLE, 1);
REG_UPDATE_2(DSCCLK3_DTO_PARAM,
DSCCLK3_DTO_PHASE, 0,
@@ -1821,6 +1855,9 @@ static void dccg35_enable_dscclk(struct dccg *dccg, int inst)
BREAK_TO_DEBUGGER();
return;
}
+
+ /* Wait for clock to ramp */
+ udelay(10);
}
static void dccg35_disable_dscclk(struct dccg *dccg,
@@ -1864,6 +1901,9 @@ static void dccg35_disable_dscclk(struct dccg *dccg,
default:
return;
}
+
+ /* Wait for clock ramp */
+ udelay(10);
}
static void dccg35_enable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst, uint32_t link_enc_inst)
@@ -2349,10 +2389,7 @@ static void dccg35_disable_symclk_se_cb(
void dccg35_root_gate_disable_control(struct dccg *dccg, uint32_t pipe_idx, uint32_t disable_clock_gating)
{
-
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp) {
- dccg35_set_dppclk_root_clock_gating(dccg, pipe_idx, disable_clock_gating);
- }
+ dccg35_set_dppclk_root_clock_gating(dccg, pipe_idx, disable_clock_gating);
}
static const struct dccg_funcs dccg35_funcs_new = {
@@ -2416,6 +2453,7 @@ static const struct dccg_funcs dccg35_funcs = {
.disable_symclk_se = dccg35_disable_symclk_se,
.set_dtbclk_p_src = dccg35_set_dtbclk_p_src,
.dccg_root_gate_disable_control = dccg35_root_gate_disable_control,
+ .dccg_read_reg_state = dccg31_read_reg_state,
};
struct dccg *dccg35_create(
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.h b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.h
index 51f98c5c51c4..7b9c36456cd9 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.h
@@ -41,8 +41,9 @@
SR(SYMCLKA_CLOCK_ENABLE),\
SR(SYMCLKB_CLOCK_ENABLE),\
SR(SYMCLKC_CLOCK_ENABLE),\
- SR(SYMCLKD_CLOCK_ENABLE),\
- SR(SYMCLKE_CLOCK_ENABLE)
+ SR(SYMCLKD_CLOCK_ENABLE), \
+ SR(SYMCLKE_CLOCK_ENABLE),\
+ SR(SYMCLK_PSP_CNTL)
#define DCCG_MASK_SH_LIST_DCN35(mask_sh) \
DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 0, mask_sh),\
@@ -231,6 +232,14 @@
DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK1_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK2_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK3_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_STEP_DELAY, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_STEP_SIZE, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_FREQ_RAMP_DONE, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_MAX_ERRDET_CYCLES, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DCCG_FIFO_ERRDET_RESET, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DCCG_FIFO_ERRDET_STATE, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DCCG_FIFO_ERRDET_OVR_EN, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_CHG_FWD_CORR_DISABLE, mask_sh),\
struct dccg *dccg35_create(
struct dc_context *ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c
index ffd172231fdf..663a18ee5162 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c
@@ -619,7 +619,7 @@ void dccg401_set_dp_dto(
dto_integer = div_u64(params->pixclk_hz, dto_modulo_hz);
dto_phase_hz = params->pixclk_hz - dto_integer * dto_modulo_hz;
- if (dto_phase_hz <= 0) {
+ if (dto_phase_hz <= 0 && dto_integer <= 0) {
/* negative pixel rate should never happen */
BREAK_TO_DEBUGGER();
return;
@@ -727,7 +727,7 @@ void dccg401_init(struct dccg *dccg)
}
}
-void dccg401_set_dto_dscclk(struct dccg *dccg, uint32_t inst)
+void dccg401_set_dto_dscclk(struct dccg *dccg, uint32_t inst, uint32_t num_slices_h)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
@@ -886,6 +886,7 @@ static const struct dccg_funcs dccg401_funcs = {
.enable_symclk_se = dccg401_enable_symclk_se,
.disable_symclk_se = dccg401_disable_symclk_se,
.set_dtbclk_p_src = dccg401_set_dtbclk_p_src,
+ .dccg_read_reg_state = dccg31_read_reg_state
};
struct dccg *dccg401_create(
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.h b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.h
index 55e8718aad22..5947a35363aa 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.h
@@ -209,7 +209,7 @@ void dccg401_disable_symclk32_le(
struct dccg *dccg,
int hpo_le_inst);
void dccg401_disable_dpstreamclk(struct dccg *dccg, int dp_hpo_inst);
-void dccg401_set_dto_dscclk(struct dccg *dccg, uint32_t inst);
+void dccg401_set_dto_dscclk(struct dccg *dccg, uint32_t inst, uint32_t num_slices_h);
void dccg401_set_ref_dscclk(struct dccg *dccg,
uint32_t dsc_inst);
void dccg401_set_src_sel(
@@ -230,7 +230,6 @@ void dccg401_set_dp_dto(
const struct dp_dto_params *params);
void dccg401_enable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst, uint32_t link_enc_inst);
void dccg401_disable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst, uint32_t link_enc_inst);
-void dccg401_set_dto_dscclk(struct dccg *dccg, uint32_t inst);
void dccg401_set_dtbclk_p_src(
struct dccg *dccg,
enum streamclk_source src,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
index a6006776333d..2dcf394edf22 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
@@ -283,7 +283,7 @@ struct abm *dce_abm_create(
const struct dce_abm_shift *abm_shift,
const struct dce_abm_mask *abm_mask)
{
- struct dce_abm *abm_dce = kzalloc(sizeof(*abm_dce), GFP_ATOMIC);
+ struct dce_abm *abm_dce = kzalloc(sizeof(*abm_dce), GFP_KERNEL);
if (abm_dce == NULL) {
BREAK_TO_DEBUGGER();
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
index eeed840073fe..fcad61c618a1 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
@@ -1143,7 +1143,8 @@ void dce_aud_wall_dto_setup(
REG_UPDATE(DCCG_AUDIO_DTO1_PHASE,
DCCG_AUDIO_DTO1_PHASE, clock_info.audio_dto_phase);
- REG_UPDATE(DCCG_AUDIO_DTO_SOURCE,
+ if (aud->masks->DCCG_AUDIO_DTO2_USE_512FBR_DTO)
+ REG_UPDATE(DCCG_AUDIO_DTO_SOURCE,
DCCG_AUDIO_DTO2_USE_512FBR_DTO, 1);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
index bb4ac5042c80..673bb87d2c17 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
@@ -725,14 +725,18 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
for (i = 0; i < AUX_MAX_RETRIES; i++) {
DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
LOG_FLAG_I2cAux_DceAux,
- "dce_aux_transfer_with_retries: link_index=%u: START: retry %d of %d: address=0x%04x length=%u write=%d mot=%d",
+ "dce_aux_transfer_with_retries: link_index=%u: START: retry %d of %d: "
+ "address=0x%04x length=%u write=%d mot=%d is_i2c=%d is_dpia=%d ddc_hw_inst=%d",
ddc && ddc->link ? ddc->link->link_index : UINT_MAX,
i + 1,
(int)AUX_MAX_RETRIES,
payload->address,
payload->length,
(unsigned int) payload->write,
- (unsigned int) payload->mot);
+ (unsigned int) payload->mot,
+ payload->i2c_over_aux,
+ (ddc->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) ? true : false,
+ ddc->link->ddc_hw_inst);
if (payload->write)
dce_aux_log_payload(" write", payload->data, payload->length, 16);
@@ -746,7 +750,9 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
LOG_FLAG_I2cAux_DceAux,
- "dce_aux_transfer_with_retries: link_index=%u: END: retry %d of %d: address=0x%04x length=%u write=%d mot=%d: ret=%d operation_result=%d payload->reply=%u",
+ "dce_aux_transfer_with_retries: link_index=%u: END: retry %d of %d: "
+ "address=0x%04x length=%u write=%d mot=%d: ret=%d operation_result=%d "
+ "payload->reply=%u is_i2c=%d is_dpia=%d ddc_hw_inst=%d",
ddc && ddc->link ? ddc->link->link_index : UINT_MAX,
i + 1,
(int)AUX_MAX_RETRIES,
@@ -756,7 +762,10 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
(unsigned int) payload->mot,
ret,
(int)operation_result,
- (unsigned int) *payload->reply);
+ (unsigned int) *payload->reply,
+ payload->i2c_over_aux,
+ (ddc->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) ? true : false,
+ ddc->link->ddc_hw_inst);
if (!payload->write)
dce_aux_log_payload(" read", payload->data, ret > 0 ? ret : 0, 16);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
index a8e79104b684..5f8fba45d98d 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
@@ -1126,7 +1126,7 @@ struct dmcu *dcn10_dmcu_create(
const struct dce_dmcu_shift *dmcu_shift,
const struct dce_dmcu_mask *dmcu_mask)
{
- struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_ATOMIC);
+ struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_KERNEL);
if (dmcu_dce == NULL) {
BREAK_TO_DEBUGGER();
@@ -1147,7 +1147,7 @@ struct dmcu *dcn20_dmcu_create(
const struct dce_dmcu_shift *dmcu_shift,
const struct dce_dmcu_mask *dmcu_mask)
{
- struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_ATOMIC);
+ struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_KERNEL);
if (dmcu_dce == NULL) {
BREAK_TO_DEBUGGER();
@@ -1168,7 +1168,7 @@ struct dmcu *dcn21_dmcu_create(
const struct dce_dmcu_shift *dmcu_shift,
const struct dce_dmcu_mask *dmcu_mask)
{
- struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_ATOMIC);
+ struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_KERNEL);
if (dmcu_dce == NULL) {
BREAK_TO_DEBUGGER();
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
index d28826c3ae5f..365dd2e37aea 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
@@ -292,9 +292,35 @@ static void set_speed(
FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2);
}
+static bool acquire_engine(struct dce_i2c_hw *dce_i2c_hw)
+{
+ uint32_t arbitrate = 0;
+
+ REG_GET(DC_I2C_ARBITRATION, DC_I2C_REG_RW_CNTL_STATUS, &arbitrate);
+ switch (arbitrate) {
+ case DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW:
+ return true;
+ case DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_HW:
+ return false;
+ case DC_I2C_STATUS__DC_I2C_STATUS_IDLE:
+ default:
+ break;
+ }
+
+ REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_USE_I2C_REG_REQ, true);
+ REG_GET(DC_I2C_ARBITRATION, DC_I2C_REG_RW_CNTL_STATUS, &arbitrate);
+ if (arbitrate != DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW)
+ return false;
+
+ return true;
+}
+
static bool setup_engine(
struct dce_i2c_hw *dce_i2c_hw)
{
+ // Deassert soft reset to unblock I2C engine registers
+ REG_UPDATE(DC_I2C_CONTROL, DC_I2C_SOFT_RESET, false);
+
uint32_t i2c_setup_limit = I2C_SETUP_TIME_LIMIT_DCE;
uint32_t reset_length = 0;
@@ -309,8 +335,8 @@ static bool setup_engine(
REG_UPDATE_N(SETUP, 1,
FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_CLK_EN), 1);
- /* we have checked I2c not used by DMCU, set SW use I2C REQ to 1 to indicate SW using it*/
- REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_USE_I2C_REG_REQ, 1);
+ if (!acquire_engine(dce_i2c_hw))
+ return false;
/*set SW requested I2c speed to default, if API calls in it will be override later*/
set_speed(dce_i2c_hw, dce_i2c_hw->ctx->dc->caps.i2c_speed_in_khz);
@@ -319,9 +345,8 @@ static bool setup_engine(
i2c_setup_limit = dce_i2c_hw->setup_limit;
/* Program pin select */
- REG_UPDATE_6(DC_I2C_CONTROL,
+ REG_UPDATE_5(DC_I2C_CONTROL,
DC_I2C_GO, 0,
- DC_I2C_SOFT_RESET, 0,
DC_I2C_SEND_RESET, 0,
DC_I2C_SW_STATUS_RESET, 1,
DC_I2C_TRANSACTION_COUNT, 0,
@@ -351,6 +376,32 @@ static bool setup_engine(
return true;
}
+/**
+ * cntl_stuck_hw_workaround - Workaround for I2C engine stuck state
+ * @dce_i2c_hw: Pointer to dce_i2c_hw structure
+ *
+ * If we boot without an HDMI display, the I2C engine does not get initialized
+ * correctly. One of its symptoms is that SW_USE_I2C does not get cleared after
+ * acquire. After setting SW_DONE_USING_I2C on release, the engine gets
+ * immediately reacquired by SW, preventing DMUB from using it.
+ *
+ * This function checks the I2C arbitration status and applies a release
+ * workaround if necessary.
+ */
+static void cntl_stuck_hw_workaround(struct dce_i2c_hw *dce_i2c_hw)
+{
+ uint32_t arbitrate = 0;
+
+ REG_GET(DC_I2C_ARBITRATION, DC_I2C_REG_RW_CNTL_STATUS, &arbitrate);
+ if (arbitrate != DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW)
+ return;
+
+ // Still acquired after release, release again as a workaround
+ REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_DONE_USING_I2C_REG, true);
+ REG_GET(DC_I2C_ARBITRATION, DC_I2C_REG_RW_CNTL_STATUS, &arbitrate);
+ ASSERT(arbitrate != DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW);
+}
+
static void release_engine(
struct dce_i2c_hw *dce_i2c_hw)
{
@@ -378,9 +429,9 @@ static void release_engine(
/*for HW HDCP Ri polling failure w/a test*/
set_speed(dce_i2c_hw, dce_i2c_hw->ctx->dc->caps.i2c_speed_in_khz_hdcp);
- /* Release I2C after reset, so HW or DMCU could use it */
- REG_UPDATE_2(DC_I2C_ARBITRATION, DC_I2C_SW_DONE_USING_I2C_REG, 1,
- DC_I2C_SW_USE_I2C_REG_REQ, 0);
+ // Release I2C engine so it can be used by HW or DMCU, automatically clears SW_USE_I2C
+ REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_DONE_USING_I2C_REG, true);
+ cntl_stuck_hw_workaround(dce_i2c_hw);
if (dce_i2c_hw->ctx->dc->debug.enable_mem_low_power.bits.i2c) {
if (dce_i2c_hw->regs->DIO_MEM_PWR_CTRL)
@@ -540,7 +591,7 @@ static bool dce_i2c_hw_engine_submit_payload(struct dce_i2c_hw *dce_i2c_hw,
DCE_I2C_TRANSACTION_ACTION_I2C_WRITE;
- request.address = (uint8_t) ((payload->address << 1) | !payload->write);
+ request.address = (uint8_t) ((payload->address << 1) | (payload->write ? 0 : 1));
request.length = payload->length;
request.data = payload->data;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c
index e188447c8156..2d73b94c515c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c
@@ -451,7 +451,7 @@ static bool dce_i2c_sw_engine_submit_payload(struct dce_i2c_sw *engine,
DCE_I2C_TRANSACTION_ACTION_I2C_WRITE_MOT :
DCE_I2C_TRANSACTION_ACTION_I2C_WRITE;
- request.address = (uint8_t) ((payload->address << 1) | !payload->write);
+ request.address = (uint8_t) ((payload->address << 1) | (payload->write ? 0 : 1));
request.length = payload->length;
request.data = payload->data;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
index 4a9d07c31bc5..87dbb8d7ed27 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
@@ -302,6 +302,10 @@ static void setup_panel_mode(
if (ctx->dc->caps.psp_setup_panel_mode)
return;
+ /* The code below is only applicable to encoders with a digital transmitter. */
+ if (enc110->base.transmitter == TRANSMITTER_UNKNOWN)
+ return;
+
ASSERT(REG(DP_DPHY_INTERNAL_CTRL));
value = REG_READ(DP_DPHY_INTERNAL_CTRL);
@@ -804,6 +808,33 @@ bool dce110_link_encoder_validate_dp_output(
return true;
}
+static bool dce110_link_encoder_validate_rgb_output(
+ const struct dce110_link_encoder *enc110,
+ const struct dc_crtc_timing *crtc_timing)
+{
+ /* When the VBIOS doesn't specify any limits, use 400 MHz.
+ * The value comes from amdgpu_atombios_get_clock_info.
+ */
+ uint32_t max_pixel_clock_khz = 400000;
+
+ if (enc110->base.ctx->dc_bios->fw_info_valid &&
+ enc110->base.ctx->dc_bios->fw_info.max_pixel_clock) {
+ max_pixel_clock_khz =
+ enc110->base.ctx->dc_bios->fw_info.max_pixel_clock;
+ }
+
+ if (crtc_timing->pix_clk_100hz > max_pixel_clock_khz * 10)
+ return false;
+
+ if (crtc_timing->display_color_depth != COLOR_DEPTH_888)
+ return false;
+
+ if (crtc_timing->pixel_encoding != PIXEL_ENCODING_RGB)
+ return false;
+
+ return true;
+}
+
void dce110_link_encoder_construct(
struct dce110_link_encoder *enc110,
const struct encoder_init_data *init_data,
@@ -824,6 +855,7 @@ void dce110_link_encoder_construct(
enc110->base.connector = init_data->connector;
enc110->base.preferred_engine = ENGINE_ID_UNKNOWN;
+ enc110->base.analog_engine = init_data->analog_engine;
enc110->base.features = *enc_features;
@@ -847,6 +879,11 @@ void dce110_link_encoder_construct(
SIGNAL_TYPE_EDP |
SIGNAL_TYPE_HDMI_TYPE_A;
+ if ((enc110->base.connector.id == CONNECTOR_ID_DUAL_LINK_DVII ||
+ enc110->base.connector.id == CONNECTOR_ID_SINGLE_LINK_DVII) &&
+ enc110->base.analog_engine != ENGINE_ID_UNKNOWN)
+ enc110->base.output_signals |= SIGNAL_TYPE_RGB;
+
/* For DCE 8.0 and 8.1, by design, UNIPHY is hardwired to DIG_BE.
* SW always assign DIG_FE 1:1 mapped to DIG_FE for non-MST UNIPHY.
* SW assign DIG_FE to non-MST UNIPHY first and MST last. So prefer
@@ -885,6 +922,13 @@ void dce110_link_encoder_construct(
enc110->base.preferred_engine = ENGINE_ID_DIGG;
break;
default:
+ if (init_data->analog_engine != ENGINE_ID_UNKNOWN) {
+ /* The connector is analog-only, ie. VGA */
+ enc110->base.preferred_engine = init_data->analog_engine;
+ enc110->base.output_signals = SIGNAL_TYPE_RGB;
+ enc110->base.transmitter = TRANSMITTER_UNKNOWN;
+ break;
+ }
ASSERT_CRITICAL(false);
enc110->base.preferred_engine = ENGINE_ID_UNKNOWN;
}
@@ -896,13 +940,13 @@ void dce110_link_encoder_construct(
enc110->base.id, &bp_cap_info);
/* Override features with DCE-specific values */
- if (BP_RESULT_OK == result) {
+ if (result == BP_RESULT_OK) {
enc110->base.features.flags.bits.IS_HBR2_CAPABLE =
bp_cap_info.DP_HBR2_EN;
enc110->base.features.flags.bits.IS_HBR3_CAPABLE =
bp_cap_info.DP_HBR3_EN;
enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
- } else {
+ } else if (result != BP_RESULT_NORECORD) {
DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
__func__,
result);
@@ -939,6 +983,10 @@ bool dce110_link_encoder_validate_output_with_stream(
is_valid = dce110_link_encoder_validate_dp_output(
enc110, &stream->timing);
break;
+ case SIGNAL_TYPE_RGB:
+ is_valid = dce110_link_encoder_validate_rgb_output(
+ enc110, &stream->timing);
+ break;
case SIGNAL_TYPE_EDP:
case SIGNAL_TYPE_LVDS:
is_valid = stream->timing.pixel_encoding == PIXEL_ENCODING_RGB;
@@ -969,6 +1017,10 @@ void dce110_link_encoder_hw_init(
cntl.coherent = false;
cntl.hpd_sel = enc110->base.hpd_source;
+ /* The code below is only applicable to encoders with a digital transmitter. */
+ if (enc110->base.transmitter == TRANSMITTER_UNKNOWN)
+ return;
+
if (enc110->base.connector.id == CONNECTOR_ID_EDP)
cntl.signal = SIGNAL_TYPE_EDP;
@@ -1034,6 +1086,8 @@ void dce110_link_encoder_setup(
/* DP MST */
REG_UPDATE(DIG_BE_CNTL, DIG_MODE, 5);
break;
+ case SIGNAL_TYPE_RGB:
+ break;
default:
ASSERT_CRITICAL(false);
/* invalid mode ! */
@@ -1282,6 +1336,24 @@ void dce110_link_encoder_disable_output(
struct bp_transmitter_control cntl = { 0 };
enum bp_result result;
+ switch (enc->analog_engine) {
+ case ENGINE_ID_DACA:
+ REG_UPDATE(DAC_ENABLE, DAC_ENABLE, 0);
+ break;
+ case ENGINE_ID_DACB:
+ /* DACB doesn't seem to be present on DCE6+,
+ * although there are references to it in the register file.
+ */
+ DC_LOG_ERROR("%s DACB is unsupported\n", __func__);
+ break;
+ default:
+ break;
+ }
+
+ /* The code below only applies to connectors that support digital signals. */
+ if (enc->transmitter == TRANSMITTER_UNKNOWN)
+ return;
+
if (!dce110_is_dig_enabled(enc)) {
/* OF_SKIP_POWER_DOWN_INACTIVE_ENCODER */
return;
@@ -1726,6 +1798,7 @@ void dce60_link_encoder_construct(
enc110->base.connector = init_data->connector;
enc110->base.preferred_engine = ENGINE_ID_UNKNOWN;
+ enc110->base.analog_engine = init_data->analog_engine;
enc110->base.features = *enc_features;
@@ -1749,6 +1822,11 @@ void dce60_link_encoder_construct(
SIGNAL_TYPE_EDP |
SIGNAL_TYPE_HDMI_TYPE_A;
+ if ((enc110->base.connector.id == CONNECTOR_ID_DUAL_LINK_DVII ||
+ enc110->base.connector.id == CONNECTOR_ID_SINGLE_LINK_DVII) &&
+ enc110->base.analog_engine != ENGINE_ID_UNKNOWN)
+ enc110->base.output_signals |= SIGNAL_TYPE_RGB;
+
/* For DCE 8.0 and 8.1, by design, UNIPHY is hardwired to DIG_BE.
* SW always assign DIG_FE 1:1 mapped to DIG_FE for non-MST UNIPHY.
* SW assign DIG_FE to non-MST UNIPHY first and MST last. So prefer
@@ -1787,6 +1865,13 @@ void dce60_link_encoder_construct(
enc110->base.preferred_engine = ENGINE_ID_DIGG;
break;
default:
+ if (init_data->analog_engine != ENGINE_ID_UNKNOWN) {
+ /* The connector is analog-only, ie. VGA */
+ enc110->base.preferred_engine = init_data->analog_engine;
+ enc110->base.output_signals = SIGNAL_TYPE_RGB;
+ enc110->base.transmitter = TRANSMITTER_UNKNOWN;
+ break;
+ }
ASSERT_CRITICAL(false);
enc110->base.preferred_engine = ENGINE_ID_UNKNOWN;
}
@@ -1798,13 +1883,13 @@ void dce60_link_encoder_construct(
enc110->base.id, &bp_cap_info);
/* Override features with DCE-specific values */
- if (BP_RESULT_OK == result) {
+ if (result == BP_RESULT_OK) {
enc110->base.features.flags.bits.IS_HBR2_CAPABLE =
bp_cap_info.DP_HBR2_EN;
enc110->base.features.flags.bits.IS_HBR3_CAPABLE =
bp_cap_info.DP_HBR3_EN;
enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
- } else {
+ } else if (result != BP_RESULT_NORECORD) {
DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
__func__,
result);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
index 261c70e01e33..c58b69bc319b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
@@ -101,18 +101,21 @@
SRI(DP_SEC_CNTL, DP, id), \
SRI(DP_VID_STREAM_CNTL, DP, id), \
SRI(DP_DPHY_FAST_TRAINING, DP, id), \
- SRI(DP_SEC_CNTL1, DP, id)
+ SRI(DP_SEC_CNTL1, DP, id), \
+ SR(DAC_ENABLE)
#endif
#define LE_DCE80_REG_LIST(id)\
SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \
- LE_COMMON_REG_LIST_BASE(id)
+ LE_COMMON_REG_LIST_BASE(id), \
+ SR(DAC_ENABLE)
#define LE_DCE100_REG_LIST(id)\
LE_COMMON_REG_LIST_BASE(id), \
SRI(DP_DPHY_BS_SR_SWAP_CNTL, DP, id), \
SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \
- SR(DCI_MEM_PWR_STATUS)
+ SR(DCI_MEM_PWR_STATUS), \
+ SR(DAC_ENABLE)
#define LE_DCE110_REG_LIST(id)\
LE_COMMON_REG_LIST_BASE(id), \
@@ -181,6 +184,9 @@ struct dce110_link_enc_registers {
uint32_t DP_DPHY_BS_SR_SWAP_CNTL;
uint32_t DP_DPHY_HBR2_PATTERN_CONTROL;
uint32_t DP_SEC_CNTL1;
+
+ /* DAC registers */
+ uint32_t DAC_ENABLE;
};
struct dce110_link_encoder {
@@ -215,10 +221,6 @@ bool dce110_link_encoder_validate_dvi_output(
enum signal_type signal,
const struct dc_crtc_timing *crtc_timing);
-bool dce110_link_encoder_validate_rgb_output(
- const struct dce110_link_encoder *enc110,
- const struct dc_crtc_timing *crtc_timing);
-
bool dce110_link_encoder_validate_dp_output(
const struct dce110_link_encoder *enc110,
const struct dc_crtc_timing *crtc_timing);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
index 1130d7619b26..574618d5d4a4 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
@@ -1567,3 +1567,17 @@ void dce110_stream_encoder_construct(
enc110->se_shift = se_shift;
enc110->se_mask = se_mask;
}
+
+static const struct stream_encoder_funcs dce110_an_str_enc_funcs = {};
+
+void dce110_analog_stream_encoder_construct(
+ struct dce110_stream_encoder *enc110,
+ struct dc_context *ctx,
+ struct dc_bios *bp,
+ enum engine_id eng_id)
+{
+ enc110->base.funcs = &dce110_an_str_enc_funcs;
+ enc110->base.ctx = ctx;
+ enc110->base.id = eng_id;
+ enc110->base.bp = bp;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h
index cc5020a8e1e1..068de1392121 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h
@@ -708,6 +708,11 @@ void dce110_stream_encoder_construct(
const struct dce_stream_encoder_shift *se_shift,
const struct dce_stream_encoder_mask *se_mask);
+void dce110_analog_stream_encoder_construct(
+ struct dce110_stream_encoder *enc110,
+ struct dc_context *ctx,
+ struct dc_bios *bp,
+ enum engine_id eng_id);
void dce110_se_audio_mute_control(
struct stream_encoder *enc, bool mute);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
index 2b1673d69ea8..1ab5ae9b5ea5 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
@@ -154,10 +154,13 @@ static bool dce60_setup_scaling_configuration(
REG_SET(SCL_BYPASS_CONTROL, 0, SCL_BYPASS_MODE, 0);
if (data->taps.h_taps + data->taps.v_taps <= 2) {
- /* Set bypass */
-
- /* DCE6 has no SCL_MODE register, skip scale mode programming */
+ /* Disable scaler functionality */
+ REG_WRITE(SCL_SCALER_ENABLE, 0);
+ /* Clear registers that can cause glitches even when the scaler is off */
+ REG_WRITE(SCL_TAP_CONTROL, 0);
+ REG_WRITE(SCL_AUTOMATIC_MODE_CONTROL, 0);
+ REG_WRITE(SCL_F_SHARP_CONTROL, 0);
return false;
}
@@ -165,7 +168,7 @@ static bool dce60_setup_scaling_configuration(
SCL_H_NUM_OF_TAPS, data->taps.h_taps - 1,
SCL_V_NUM_OF_TAPS, data->taps.v_taps - 1);
- /* DCE6 has no SCL_MODE register, skip scale mode programming */
+ REG_WRITE(SCL_SCALER_ENABLE, 1);
/* DCE6 has no SCL_BOUNDARY_MODE bit, skip replace out of bound pixels */
@@ -502,6 +505,8 @@ static void dce60_transform_set_scaler(
REG_SET(DC_LB_MEM_SIZE, 0,
DC_LB_MEM_SIZE, xfm_dce->lb_memory_size);
+ REG_WRITE(SCL_UPDATE, 0x00010000);
+
/* Clear SCL_F_SHARP_CONTROL value to 0 */
REG_WRITE(SCL_F_SHARP_CONTROL, 0);
@@ -527,8 +532,7 @@ static void dce60_transform_set_scaler(
if (coeffs_v != xfm_dce->filter_v || coeffs_h != xfm_dce->filter_h) {
/* 4. Program vertical filters */
if (xfm_dce->filter_v == NULL)
- REG_SET(SCL_VERT_FILTER_CONTROL, 0,
- SCL_V_2TAP_HARDCODE_COEF_EN, 0);
+ REG_WRITE(SCL_VERT_FILTER_CONTROL, 0);
program_multi_taps_filter(
xfm_dce,
data->taps.v_taps,
@@ -542,8 +546,7 @@ static void dce60_transform_set_scaler(
/* 5. Program horizontal filters */
if (xfm_dce->filter_h == NULL)
- REG_SET(SCL_HORZ_FILTER_CONTROL, 0,
- SCL_H_2TAP_HARDCODE_COEF_EN, 0);
+ REG_WRITE(SCL_HORZ_FILTER_CONTROL, 0);
program_multi_taps_filter(
xfm_dce,
data->taps.h_taps,
@@ -566,6 +569,8 @@ static void dce60_transform_set_scaler(
/* DCE6 has no SCL_COEF_UPDATE_COMPLETE bit to flip to new coefficient memory */
/* DCE6 DATA_FORMAT register does not support ALPHA_EN */
+
+ REG_WRITE(SCL_UPDATE, 0);
}
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h
index cbce194ec7b8..eb716e8337e2 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h
@@ -155,6 +155,9 @@
SRI(SCL_COEF_RAM_TAP_DATA, SCL, id), \
SRI(VIEWPORT_START, SCL, id), \
SRI(VIEWPORT_SIZE, SCL, id), \
+ SRI(SCL_SCALER_ENABLE, SCL, id), \
+ SRI(SCL_HORZ_FILTER_INIT_RGB_LUMA, SCL, id), \
+ SRI(SCL_HORZ_FILTER_INIT_CHROMA, SCL, id), \
SRI(SCL_HORZ_FILTER_SCALE_RATIO, SCL, id), \
SRI(SCL_VERT_FILTER_SCALE_RATIO, SCL, id), \
SRI(SCL_VERT_FILTER_INIT, SCL, id), \
@@ -590,6 +593,7 @@ struct dce_transform_registers {
uint32_t SCL_VERT_FILTER_SCALE_RATIO;
uint32_t SCL_HORZ_FILTER_INIT;
#if defined(CONFIG_DRM_AMD_DC_SI)
+ uint32_t SCL_SCALER_ENABLE;
uint32_t SCL_HORZ_FILTER_INIT_RGB_LUMA;
uint32_t SCL_HORZ_FILTER_INIT_CHROMA;
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
index d37ecfdde4f1..5bfa2b0d2afd 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
@@ -61,10 +61,9 @@ void dmub_hw_lock_mgr_inbox0_cmd(struct dc_dmub_srv *dmub_srv,
dc_dmub_srv_wait_for_inbox0_ack(dmub_srv);
}
-bool should_use_dmub_lock(struct dc_link *link)
+bool dmub_hw_lock_mgr_does_link_require_lock(const struct dc *dc, const struct dc_link *link)
{
- /* ASIC doesn't support DMUB */
- if (!link->ctx->dmub_srv)
+ if (!link)
return false;
if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1)
@@ -73,16 +72,38 @@ bool should_use_dmub_lock(struct dc_link *link)
if (link->replay_settings.replay_feature_enabled)
return true;
- /* only use HW lock for PSR1 on single eDP */
if (link->psr_settings.psr_version == DC_PSR_VERSION_1) {
struct dc_link *edp_links[MAX_NUM_EDP];
int edp_num;
- dc_get_edp_links(link->dc, edp_links, &edp_num);
-
+ dc_get_edp_links(dc, edp_links, &edp_num);
if (edp_num == 1)
return true;
}
+ return false;
+}
+bool dmub_hw_lock_mgr_does_context_require_lock(const struct dc *dc, const struct dc_state *context)
+{
+ if (!context)
+ return false;
+ for (int i = 0; i < context->stream_count; i++) {
+ const struct dc_link *link = context->streams[i]->link;
+
+ if (dmub_hw_lock_mgr_does_link_require_lock(dc, link))
+ return true;
+ }
return false;
}
+
+bool should_use_dmub_inbox1_lock(const struct dc *dc, const struct dc_link *link)
+{
+ /* ASIC doesn't support DMUB */
+ if (!dc->ctx->dmub_srv)
+ return false;
+
+ if (dc->ctx->dce_version >= DCN_VERSION_4_01)
+ return false;
+
+ return dmub_hw_lock_mgr_does_link_require_lock(dc, link);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.h
index 5a72b168fb4a..4c80ca8484ad 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.h
@@ -37,6 +37,16 @@ void dmub_hw_lock_mgr_cmd(struct dc_dmub_srv *dmub_srv,
void dmub_hw_lock_mgr_inbox0_cmd(struct dc_dmub_srv *dmub_srv,
union dmub_inbox0_cmd_lock_hw hw_lock_cmd);
-bool should_use_dmub_lock(struct dc_link *link);
+/**
+ * should_use_dmub_inbox1_lock() - Checks if the DMCUB hardware lock via inbox1 should be used.
+ *
+ * @dc: pointer to DC object
+ * @link: optional pointer to the link object to check for enabled link features
+ *
+ * Return: true if the inbox1 lock should be used, false otherwise
+ */
+bool should_use_dmub_inbox1_lock(const struct dc *dc, const struct dc_link *link);
+bool dmub_hw_lock_mgr_does_link_require_lock(const struct dc *dc, const struct dc_link *link);
+bool dmub_hw_lock_mgr_does_context_require_lock(const struct dc *dc, const struct dc_state *context);
#endif /*_DMUB_HW_LOCK_MGR_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
index ff3b8244ba3d..87af4fdc04a6 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
@@ -391,7 +391,7 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
sizeof(DP_SINK_DEVICE_STR_ID_1)))
link->psr_settings.force_ffu_mode = 1;
- copy_settings_data->force_ffu_mode = link->psr_settings.force_ffu_mode;
+ copy_settings_data->force_ffu_mode = link->psr_settings.force_ffu_mode || psr_context->os_request_force_ffu;
if (((link->dpcd_caps.fec_cap.bits.FEC_CAPABLE &&
!link->dc->debug.disable_fec) &&
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
index fcd3d86ad517..cf1372aaff6c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
@@ -3,6 +3,7 @@
// Copyright 2024 Advanced Micro Devices, Inc.
#include "dc.h"
+#include "link_service.h"
#include "dc_dmub_srv.h"
#include "dmub/dmub_srv.h"
#include "core_types.h"
@@ -168,6 +169,7 @@ static bool dmub_replay_copy_settings(struct dmub_replay *dmub,
copy_settings_data->max_deviation_line = link->dpcd_caps.pr_info.max_deviation_line;
copy_settings_data->smu_optimizations_en = link->replay_settings.replay_smu_opt_enable;
copy_settings_data->replay_timing_sync_supported = link->replay_settings.config.replay_timing_sync_supported;
+ copy_settings_data->replay_support_fast_resync_in_ultra_sleep_mode = link->replay_settings.config.replay_support_fast_resync_in_ultra_sleep_mode;
copy_settings_data->debug.bitfields.enable_ips_visual_confirm = dc->dc->debug.enable_ips_visual_confirm;
@@ -189,6 +191,18 @@ static bool dmub_replay_copy_settings(struct dmub_replay *dmub,
else
copy_settings_data->flags.bitfields.force_wakeup_by_tps3 = 0;
+ copy_settings_data->flags.bitfields.alpm_mode = (enum dmub_alpm_mode)link->replay_settings.config.alpm_mode;
+ if (link->replay_settings.config.alpm_mode == DC_ALPM_AUXLESS) {
+ copy_settings_data->auxless_alpm_data.lfps_setup_ns = dc->dc->debug.auxless_alpm_lfps_setup_ns;
+ copy_settings_data->auxless_alpm_data.lfps_period_ns = dc->dc->debug.auxless_alpm_lfps_period_ns;
+ copy_settings_data->auxless_alpm_data.lfps_silence_ns = dc->dc->debug.auxless_alpm_lfps_silence_ns;
+ copy_settings_data->auxless_alpm_data.lfps_t1_t2_override_us =
+ dc->dc->debug.auxless_alpm_lfps_t1t2_us;
+ copy_settings_data->auxless_alpm_data.lfps_t1_t2_offset_us =
+ dc->dc->debug.auxless_alpm_lfps_t1t2_offset_us;
+ copy_settings_data->auxless_alpm_data.lttpr_count = link->dc->link_srv->dp_get_lttpr_count(link);
+ }
+
dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
return true;
@@ -199,7 +213,8 @@ static bool dmub_replay_copy_settings(struct dmub_replay *dmub,
*/
static void dmub_replay_set_coasting_vtotal(struct dmub_replay *dmub,
uint32_t coasting_vtotal,
- uint8_t panel_inst)
+ uint8_t panel_inst,
+ uint16_t frame_skip_number)
{
union dmub_rb_cmd cmd;
struct dc_context *dc = dmub->ctx;
@@ -213,6 +228,7 @@ static void dmub_replay_set_coasting_vtotal(struct dmub_replay *dmub,
pCmd->header.payload_bytes = sizeof(struct dmub_cmd_replay_set_coasting_vtotal_data);
pCmd->replay_set_coasting_vtotal_data.coasting_vtotal = (coasting_vtotal & 0xFFFF);
pCmd->replay_set_coasting_vtotal_data.coasting_vtotal_high = (coasting_vtotal & 0xFFFF0000) >> 16;
+ pCmd->replay_set_coasting_vtotal_data.frame_skip_number = frame_skip_number;
dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
@@ -269,7 +285,7 @@ static void dmub_replay_residency(struct dmub_replay *dmub, uint8_t panel_inst,
* Set REPLAY power optimization flags and coasting vtotal.
*/
static void dmub_replay_set_power_opt_and_coasting_vtotal(struct dmub_replay *dmub,
- unsigned int power_opt, uint8_t panel_inst, uint32_t coasting_vtotal)
+ unsigned int power_opt, uint8_t panel_inst, uint32_t coasting_vtotal, uint16_t frame_skip_number)
{
union dmub_rb_cmd cmd;
struct dc_context *dc = dmub->ctx;
@@ -287,6 +303,7 @@ static void dmub_replay_set_power_opt_and_coasting_vtotal(struct dmub_replay *dm
pCmd->replay_set_power_opt_data.panel_inst = panel_inst;
pCmd->replay_set_coasting_vtotal_data.coasting_vtotal = (coasting_vtotal & 0xFFFF);
pCmd->replay_set_coasting_vtotal_data.coasting_vtotal_high = (coasting_vtotal & 0xFFFF0000) >> 16;
+ pCmd->replay_set_coasting_vtotal_data.frame_skip_number = frame_skip_number;
dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
@@ -370,6 +387,19 @@ static void dmub_replay_send_cmd(struct dmub_replay *dmub,
cmd.replay_disabled_adaptive_sync_sdp.data.force_disabled =
cmd_element->disabled_adaptive_sync_sdp_data.force_disabled;
break;
+ case Replay_Set_Version:
+ //Header
+ cmd.replay_set_version.header.sub_type =
+ DMUB_CMD__REPLAY_SET_VERSION;
+ cmd.replay_set_version.header.payload_bytes =
+ sizeof(struct dmub_rb_cmd_replay_set_version) -
+ sizeof(struct dmub_cmd_header);
+ //Cmd Body
+ cmd.replay_set_version.replay_set_version_data.panel_inst =
+ cmd_element->version_data.panel_inst;
+ cmd.replay_set_version.replay_set_version_data.version =
+ cmd_element->version_data.version;
+ break;
case Replay_Set_General_Cmd:
//Header
cmd.replay_set_general_cmd.header.sub_type =
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h
index e6346c0ffc0e..07c79739a980 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h
@@ -27,11 +27,12 @@ struct dmub_replay_funcs {
void (*replay_send_cmd)(struct dmub_replay *dmub,
enum replay_FW_Message_type msg, union dmub_replay_cmd_set *cmd_element);
void (*replay_set_coasting_vtotal)(struct dmub_replay *dmub, uint32_t coasting_vtotal,
- uint8_t panel_inst);
+ uint8_t panel_inst, uint16_t frame_skip_number);
void (*replay_residency)(struct dmub_replay *dmub,
uint8_t panel_inst, uint32_t *residency, const bool is_start, const enum pr_residency_mode mode);
void (*replay_set_power_opt_and_coasting_vtotal)(struct dmub_replay *dmub,
- unsigned int power_opt, uint8_t panel_inst, uint32_t coasting_vtotal);
+ unsigned int power_opt, uint8_t panel_inst, uint32_t coasting_vtotal,
+ uint16_t frame_skip_number);
};
struct dmub_replay *dmub_replay_create(struct dc_context *ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_link_encoder.c
index e0558a78b11c..1c1228116487 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_link_encoder.c
@@ -812,7 +812,7 @@ bool dcn10_link_encoder_validate_output_with_stream(
enc10, &stream->timing);
break;
case SIGNAL_TYPE_EDP:
- is_valid = (stream->timing.pixel_encoding == PIXEL_ENCODING_RGB) ? true : false;
+ is_valid = stream->timing.pixel_encoding == PIXEL_ENCODING_RGB;
break;
case SIGNAL_TYPE_VIRTUAL:
is_valid = true;
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c
index 22e66b375a7f..d928b4dcf6b8 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c
@@ -28,7 +28,7 @@
#include "dcn10_stream_encoder.h"
#include "reg_helper.h"
#include "hw_shared.h"
-#include "link.h"
+#include "link_service.h"
#include "dpcd_defs.h"
#include "dcn30/dcn30_afmt.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn20/dcn20_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn20/dcn20_stream_encoder.c
index 0b47aeb60e79..bec0b4aaeb2b 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn20/dcn20_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn20/dcn20_stream_encoder.c
@@ -29,7 +29,7 @@
#include "dcn20_stream_encoder.h"
#include "reg_helper.h"
#include "hw_shared.h"
-#include "link.h"
+#include "link_service.h"
#include "dpcd_defs.h"
#define DC_LOGGER \
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn31/dcn31_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn31/dcn31_dio_link_encoder.c
index 9a92f73d5b7f..84cc2ddc52fe 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn31/dcn31_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn31/dcn31_dio_link_encoder.c
@@ -37,7 +37,7 @@
#include "link_enc_cfg.h"
#include "dc_dmub_srv.h"
#include "dal_asic_id.h"
-#include "link.h"
+#include "link_service.h"
#define CTX \
enc10->base.ctx
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c
index ae81451a3a72..3e85e9c3d2cb 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c
@@ -30,7 +30,7 @@
#include "dcn314_dio_stream_encoder.h"
#include "reg_helper.h"
#include "hw_shared.h"
-#include "link.h"
+#include "link_service.h"
#include "dpcd_defs.h"
#define DC_LOGGER \
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn32/dcn32_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn32/dcn32_dio_stream_encoder.c
index 1a9bb614c41e..3523d1cdc1a3 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn32/dcn32_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn32/dcn32_dio_stream_encoder.c
@@ -29,7 +29,7 @@
#include "dcn32_dio_stream_encoder.h"
#include "reg_helper.h"
#include "hw_shared.h"
-#include "link.h"
+#include "link_service.h"
#include "dpcd_defs.h"
#define DC_LOGGER \
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_stream_encoder.c
index 6ab2a218b769..fd5d1dbf9dc6 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_stream_encoder.c
@@ -29,7 +29,7 @@
#include "dcn35_dio_stream_encoder.h"
#include "reg_helper.h"
#include "hw_shared.h"
-#include "link.h"
+#include "link_service.h"
#include "dpcd_defs.h"
#define DC_LOGGER \
@@ -397,7 +397,7 @@ static bool enc35_is_fifo_enabled(struct stream_encoder *enc)
uint32_t reset_val;
REG_GET(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, &reset_val);
- return (reset_val == 0) ? false : true;
+ return reset_val != 0;
}
void enc35_disable_fifo(struct stream_encoder *enc)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c
index d5fa551dd3c9..99aab70ef3e1 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c
@@ -32,7 +32,7 @@
#include "dcn401_dio_stream_encoder.h"
#include "reg_helper.h"
#include "hw_shared.h"
-#include "link.h"
+#include "link_service.h"
#include "dpcd_defs.h"
#define DC_LOGGER \
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services.h b/drivers/gpu/drm/amd/display/dc/dm_services.h
index 7b9c22c45453..fbbf9c757b3c 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_services.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_services.h
@@ -277,12 +277,13 @@ void dm_perf_trace_timestamp(const char *func_name, unsigned int line, struct dc
/*
* SMU message tracing
*/
-void dm_trace_smu_msg(uint32_t msg_id, uint32_t param_in, struct dc_context *ctx);
-void dm_trace_smu_delay(uint32_t delay, struct dc_context *ctx);
-
-#define TRACE_SMU_MSG(msg_id, param_in, ctx) dm_trace_smu_msg(msg_id, param_in, ctx)
-#define TRACE_SMU_DELAY(response_delay, ctx) dm_trace_smu_delay(response_delay, ctx)
+void dm_trace_smu_enter(uint32_t msg_id, uint32_t param_in, unsigned int delay, struct dc_context *ctx);
+void dm_trace_smu_exit(bool success, uint32_t response, struct dc_context *ctx);
+#define TRACE_SMU_MSG_DELAY(msg_id, param_in, delay, ctx) dm_trace_smu_enter(msg_id, param_in, delay, ctx)
+#define TRACE_SMU_MSG(msg_id, param_in, ctx) dm_trace_smu_enter(msg_id, param_in, 0, ctx)
+#define TRACE_SMU_MSG_ENTER(msg_id, param_in, ctx) dm_trace_smu_enter(msg_id, param_in, 0, ctx)
+#define TRACE_SMU_MSG_EXIT(success, response, ctx) dm_trace_smu_exit(success, response, ctx)
/*
* DMUB Interfaces
@@ -311,4 +312,6 @@ void dm_dtn_log_end(struct dc_context *ctx,
char *dce_version_to_string(const int version);
+bool dc_supports_vrr(const enum dce_version v);
+
#endif /* __DM_SERVICES_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services_types.h b/drivers/gpu/drm/amd/display/dc/dm_services_types.h
index bf63da266a18..3b093b8699ab 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_services_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_services_types.h
@@ -127,7 +127,7 @@ struct dm_pp_single_disp_config {
uint32_t src_height;
uint32_t src_width;
uint32_t v_refresh;
- uint32_t sym_clock; /* HDMI only */
+ uint32_t pixel_clock; /* Pixel clock in KHz (for HDMI only: normalized) */
struct dc_link_settings link_settings; /* DP only */
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index e1d500633dfa..b357683b4255 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -114,9 +114,6 @@ CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/calcs/dcn_calcs.o := $(dml_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/calcs/dcn_calc_auto.o := $(dml_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/calcs/dcn_calc_math.o := $(dml_rcflags)
-CFLAGS_$(AMDDALPATH)/dc/dml/dcn401/dcn401_fpu.o := $(dml_ccflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn401/dcn401_fpu.o := $(dml_rcflags)
-
ifdef CONFIG_DRM_AMD_DC_FP
DML += display_mode_lib.o display_rq_dlg_helpers.o dml1_display_rq_dlg_calc.o
DML += dcn10/dcn10_fpu.o
@@ -137,7 +134,6 @@ DML += dcn303/dcn303_fpu.o
DML += dcn314/dcn314_fpu.o
DML += dcn35/dcn35_fpu.o
DML += dcn351/dcn351_fpu.o
-DML += dcn401/dcn401_fpu.o
DML += dsc/rc_calc_fpu.o
DML += calcs/dcn_calcs.o calcs/dcn_calc_math.o calcs/dcn_calc_auto.o
endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
index f1235bf9a596..74962791302f 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
@@ -748,7 +748,7 @@ static unsigned int get_highest_allowed_voltage_level(bool is_vmin_only_asic)
bool dcn_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
/*
* we want a breakdown of the various stages of validation, which the
@@ -1119,7 +1119,7 @@ bool dcn_validate_bandwidth(
BW_VAL_TRACE_END_VOLTAGE_LEVEL();
- if (v->voltage_level != number_of_states_plus_one && !fast_validate) {
+ if (v->voltage_level != number_of_states_plus_one && validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING) {
float bw_consumed = v->total_bandwidth_consumed_gbyte_per_second;
if (bw_consumed < v->fabric_and_dram_bandwidth_vmin0p65)
@@ -1286,7 +1286,7 @@ bool dcn_validate_bandwidth(
}
} else if (v->voltage_level == number_of_states_plus_one) {
BW_VAL_TRACE_SKIP(fail);
- } else if (fast_validate) {
+ } else if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) {
BW_VAL_TRACE_SKIP(fast);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
index e9fea9c2162e..7aaf13bbd4e4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
@@ -30,8 +30,7 @@
#include "dcn20/dcn20_resource.h"
#include "dcn21/dcn21_resource.h"
#include "clk_mgr/dcn21/rn_clk_mgr.h"
-
-#include "link.h"
+#include "link_service.h"
#include "dcn20_fpu.h"
#include "dc_state_priv.h"
@@ -1315,7 +1314,7 @@ static void swizzle_to_dml_params(
int dcn20_populate_dml_pipes_from_context(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
int pipe_cnt, i;
bool synchronized_vblank = true;
@@ -1733,7 +1732,7 @@ void dcn20_calculate_wm(struct dc *dc, struct dc_state *context,
int *out_pipe_cnt,
int *pipe_split_from,
int vlevel,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
int pipe_cnt, i, pipe_idx;
@@ -1780,10 +1779,10 @@ void dcn20_calculate_wm(struct dc *dc, struct dc_state *context,
if (pipe_cnt != pipe_idx) {
if (dc->res_pool->funcs->populate_dml_pipes)
pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc,
- context, pipes, fast_validate);
+ context, pipes, validate_mode);
else
pipe_cnt = dcn20_populate_dml_pipes_from_context(dc,
- context, pipes, fast_validate);
+ context, pipes, validate_mode);
}
*out_pipe_cnt = pipe_cnt;
@@ -2027,7 +2026,7 @@ void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st
}
static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *context,
- bool fast_validate, display_e2e_pipe_params_st *pipes)
+ enum dc_validate_mode validate_mode, display_e2e_pipe_params_st *pipes)
{
bool out = false;
@@ -2040,7 +2039,7 @@ static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *co
BW_VAL_TRACE_COUNT();
- out = dcn20_fast_validate_bw(dc, context, pipes, &pipe_cnt, pipe_split_from, &vlevel, fast_validate);
+ out = dcn20_fast_validate_bw(dc, context, pipes, &pipe_cnt, pipe_split_from, &vlevel, validate_mode);
if (pipe_cnt == 0)
goto validate_out;
@@ -2050,12 +2049,12 @@ static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *co
BW_VAL_TRACE_END_VOLTAGE_LEVEL();
- if (fast_validate) {
+ if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) {
BW_VAL_TRACE_SKIP(fast);
goto validate_out;
}
- dcn20_calculate_wm(dc, context, pipes, &pipe_cnt, pipe_split_from, vlevel, fast_validate);
+ dcn20_calculate_wm(dc, context, pipes, &pipe_cnt, pipe_split_from, vlevel, validate_mode);
dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
BW_VAL_TRACE_END_WATERMARKS();
@@ -2077,7 +2076,7 @@ validate_out:
}
bool dcn20_validate_bandwidth_fp(struct dc *dc, struct dc_state *context,
- bool fast_validate, display_e2e_pipe_params_st *pipes)
+ enum dc_validate_mode validate_mode, display_e2e_pipe_params_st *pipes)
{
bool voltage_supported = false;
bool full_pstate_supported = false;
@@ -2095,12 +2094,11 @@ bool dcn20_validate_bandwidth_fp(struct dc *dc, struct dc_state *context,
/*Unsafe due to current pipe merge and split logic*/
ASSERT(context != dc->current_state);
- if (fast_validate) {
- return dcn20_validate_bandwidth_internal(dc, context, true, pipes);
- }
+ if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING)
+ return dcn20_validate_bandwidth_internal(dc, context, validate_mode, pipes);
// Best case, we support full UCLK switch latency
- voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false, pipes);
+ voltage_supported = dcn20_validate_bandwidth_internal(dc, context, DC_VALIDATE_MODE_AND_PROGRAMMING, pipes);
full_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support;
if (context->bw_ctx.dml.soc.dummy_pstate_latency_us == 0 ||
@@ -2113,7 +2111,7 @@ bool dcn20_validate_bandwidth_fp(struct dc *dc, struct dc_state *context,
context->bw_ctx.dml.soc.dram_clock_change_latency_us = context->bw_ctx.dml.soc.dummy_pstate_latency_us;
memset(pipes, 0, dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st));
- voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false, pipes);
+ voltage_supported = dcn20_validate_bandwidth_internal(dc, context, DC_VALIDATE_MODE_AND_PROGRAMMING, pipes);
dummy_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support;
if (voltage_supported && (dummy_pstate_supported || !(context->stream_count))) {
@@ -2156,14 +2154,14 @@ void dcn20_fpu_adjust_dppclk(struct vba_vars_st *v,
int dcn21_populate_dml_pipes_from_context(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
uint32_t pipe_cnt;
int i;
dc_assert_fp_enabled();
- pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
+ pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, context, pipes, validate_mode);
for (i = 0; i < pipe_cnt; i++) {
@@ -2239,7 +2237,7 @@ static void dcn21_calculate_wm(struct dc *dc, struct dc_state *context,
int *out_pipe_cnt,
int *pipe_split_from,
int vlevel_req,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
int pipe_cnt, i, pipe_idx;
int vlevel, vlevel_max;
@@ -2281,10 +2279,10 @@ static void dcn21_calculate_wm(struct dc *dc, struct dc_state *context,
if (pipe_cnt != pipe_idx) {
if (dc->res_pool->funcs->populate_dml_pipes)
pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc,
- context, pipes, fast_validate);
+ context, pipes, validate_mode);
else
pipe_cnt = dcn21_populate_dml_pipes_from_context(dc,
- context, pipes, fast_validate);
+ context, pipes, validate_mode);
}
*out_pipe_cnt = pipe_cnt;
@@ -2319,7 +2317,7 @@ static void dcn21_calculate_wm(struct dc *dc, struct dc_state *context,
}
bool dcn21_validate_bandwidth_fp(struct dc *dc, struct dc_state *context,
- bool fast_validate, display_e2e_pipe_params_st *pipes)
+ enum dc_validate_mode validate_mode, display_e2e_pipe_params_st *pipes)
{
bool out = false;
@@ -2337,7 +2335,7 @@ bool dcn21_validate_bandwidth_fp(struct dc *dc, struct dc_state *context,
/*Unsafe due to current pipe merge and split logic*/
ASSERT(context != dc->current_state);
- out = dcn21_fast_validate_bw(dc, context, pipes, &pipe_cnt, pipe_split_from, &vlevel, fast_validate);
+ out = dcn21_fast_validate_bw(dc, context, pipes, &pipe_cnt, pipe_split_from, &vlevel, validate_mode);
if (pipe_cnt == 0)
goto validate_out;
@@ -2347,12 +2345,12 @@ bool dcn21_validate_bandwidth_fp(struct dc *dc, struct dc_state *context,
BW_VAL_TRACE_END_VOLTAGE_LEVEL();
- if (fast_validate) {
+ if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) {
BW_VAL_TRACE_SKIP(fast);
goto validate_out;
}
- dcn21_calculate_wm(dc, context, pipes, &pipe_cnt, pipe_split_from, vlevel, fast_validate);
+ dcn21_calculate_wm(dc, context, pipes, &pipe_cnt, pipe_split_from, vlevel, validate_mode);
dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
BW_VAL_TRACE_END_WATERMARKS();
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h
index b6c34198ddc8..aed00039ca62 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h
@@ -44,14 +44,14 @@ void dcn20_calculate_dlg_params(struct dc *dc,
int dcn20_populate_dml_pipes_from_context(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
void dcn20_calculate_wm(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int *out_pipe_cnt,
int *pipe_split_from,
int vlevel,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
void dcn20_cap_soc_clocks(struct _vcs_dpi_soc_bounding_box_st *bb,
struct pp_smu_nv_clock_table max_clocks);
void dcn20_update_bounding_box(struct dc *dc,
@@ -62,7 +62,7 @@ void dcn20_update_bounding_box(struct dc *dc,
void dcn20_patch_bounding_box(struct dc *dc,
struct _vcs_dpi_soc_bounding_box_st *bb);
bool dcn20_validate_bandwidth_fp(struct dc *dc, struct dc_state *context,
- bool fast_validate, display_e2e_pipe_params_st *pipes);
+ enum dc_validate_mode validate_mode, display_e2e_pipe_params_st *pipes);
void dcn20_fpu_set_wm_ranges(int i,
struct pp_smu_wm_range_sets *ranges,
struct _vcs_dpi_soc_bounding_box_st *loaded_bb);
@@ -75,9 +75,9 @@ void dcn20_fpu_adjust_dppclk(struct vba_vars_st *v,
int dcn21_populate_dml_pipes_from_context(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate);
-bool dcn21_validate_bandwidth_fp(struct dc *dc, struct dc_state *context, bool
- fast_validate, display_e2e_pipe_params_st *pipes);
+ enum dc_validate_mode validate_mode);
+bool dcn21_validate_bandwidth_fp(struct dc *dc, struct dc_state *context, enum
+ dc_validate_mode, display_e2e_pipe_params_st *pipes);
void dcn21_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params);
void dcn21_clk_mgr_set_bw_params_wm_table(struct clk_bw_params *bw_params);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
index 390c1a77fda6..9c58ff1069d6 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
@@ -646,7 +646,7 @@ static void get_meta_and_pte_attr(struct display_mode_lib *mode_lib,
// the dpte_group_bytes is reduced for the specific case of vertical
// access of a tile surface that has dpte request of 8x1 ptes.
- if (!surf_linear & (log2_dpte_req_height_ptes == 0) & surf_vert) //reduced, in this case, will have page fault within a group
+ if (!surf_linear && (log2_dpte_req_height_ptes == 0) && surf_vert) //reduced, in this case, will have page fault within a group
rq_sizing_param->dpte_group_bytes = 512;
else
//full size
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
index 843d6004258c..570e6e39eb45 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
@@ -646,7 +646,7 @@ static void get_meta_and_pte_attr(struct display_mode_lib *mode_lib,
// the dpte_group_bytes is reduced for the specific case of vertical
// access of a tile surface that has dpte request of 8x1 ptes.
- if (!surf_linear & (log2_dpte_req_height_ptes == 0) & surf_vert) //reduced, in this case, will have page fault within a group
+ if (!surf_linear && (log2_dpte_req_height_ptes == 0) && surf_vert) //reduced, in this case, will have page fault within a group
rq_sizing_param->dpte_group_bytes = 512;
else
//full size
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
index 5718000627b0..f549da082c01 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
@@ -652,7 +652,7 @@ static void get_meta_and_pte_attr(
if (hostvm_enable)
rq_sizing_param->dpte_group_bytes = 512;
else {
- if (!surf_linear & (log2_dpte_req_height_ptes == 0) & surf_vert) //reduced, in this case, will have page fault within a group
+ if (!surf_linear && (log2_dpte_req_height_ptes == 0) && surf_vert) //reduced, in this case, will have page fault within a group
rq_sizing_param->dpte_group_bytes = 512;
else
//full size
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
index 88789987bdbc..e5f5c0663750 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
@@ -339,7 +339,8 @@ void dcn30_fpu_calculate_wm_and_dlg(
* newly found dummy_latency_index
*/
context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
- dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false, true);
+ dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel,
+ DC_VALIDATE_MODE_AND_PROGRAMMING, true);
maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb;
dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] != dm_dram_clock_change_unsupported;
@@ -630,7 +631,8 @@ int dcn30_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc,
while (dummy_latency_index < max_latency_table_entries) {
context->bw_ctx.dml.soc.dram_clock_change_latency_us =
dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us;
- dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false, true);
+ dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel,
+ DC_VALIDATE_MODE_AND_PROGRAMMING, true);
if (context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank ==
dm_allow_self_refresh_and_mclk_switch)
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
index 8d4873f80df0..4fb37df54d59 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
@@ -620,7 +620,7 @@ static void get_meta_and_pte_attr(struct display_mode_lib *mode_lib,
if (hostvm_enable)
rq_sizing_param->dpte_group_bytes = 512;
else {
- if (!surf_linear & (log2_dpte_req_height_ptes == 0) & surf_vert) //reduced, in this case, will have page fault within a group
+ if (!surf_linear && (log2_dpte_req_height_ptes == 0) && surf_vert) //reduced, in this case, will have page fault within a group
rq_sizing_param->dpte_group_bytes = 512;
else
rq_sizing_param->dpte_group_bytes = 2048;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c
index 0c0b2d67c9cd..1aaa77265eed 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c
@@ -326,7 +326,7 @@ void dcn301_fpu_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_p
struct dcn301_resource_pool *pool = TO_DCN301_RES_POOL(dc->res_pool);
struct clk_limit_table *clk_table = &bw_params->clk_table;
unsigned int i, closest_clk_lvl;
- int j;
+ int j = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0;
dc_assert_fp_enabled();
@@ -338,6 +338,15 @@ void dcn301_fpu_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_p
dcn3_01_soc.num_chans = bw_params->num_channels;
ASSERT(clk_table->num_entries);
+
+ /* Prepass to find max clocks independent of voltage level. */
+ for (i = 0; i < clk_table->num_entries; ++i) {
+ if (clk_table->entries[i].dispclk_mhz > max_dispclk_mhz)
+ max_dispclk_mhz = clk_table->entries[i].dispclk_mhz;
+ if (clk_table->entries[i].dppclk_mhz > max_dppclk_mhz)
+ max_dppclk_mhz = clk_table->entries[i].dppclk_mhz;
+ }
+
for (i = 0; i < clk_table->num_entries; i++) {
/* loop backwards*/
for (closest_clk_lvl = 0, j = dcn3_01_soc.num_states - 1; j >= 0; j--) {
@@ -353,8 +362,13 @@ void dcn301_fpu_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_p
s[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
s[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2;
- s[i].dispclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
- s[i].dppclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
+ /* Clocks independent of voltage level. */
+ s[i].dispclk_mhz = max_dispclk_mhz ? max_dispclk_mhz :
+ dcn3_01_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
+
+ s[i].dppclk_mhz = max_dppclk_mhz ? max_dppclk_mhz :
+ dcn3_01_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
+
s[i].dram_bw_per_chan_gbps =
dcn3_01_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
s[i].dscclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
@@ -435,12 +449,12 @@ void dcn301_fpu_calculate_wm_and_dlg(struct dc *dc,
&context->bw_ctx.dml, pipes, pipe_cnt);
/* WM Set C */
table_entry = &bw_params->wm_table.entries[WM_C];
- vlevel = min(max(vlevel_req, 2), vlevel_max);
+ vlevel = clamp(vlevel_req, 2, vlevel_max);
calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.c,
&context->bw_ctx.dml, pipes, pipe_cnt);
/* WM Set B */
table_entry = &bw_params->wm_table.entries[WM_B];
- vlevel = min(max(vlevel_req, 1), vlevel_max);
+ vlevel = clamp(vlevel_req, 1, vlevel_max);
calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.b,
&context->bw_ctx.dml, pipes, pipe_cnt);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.c
index 8da97a96b1ce..8d7c59ec701d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.c
@@ -280,7 +280,7 @@ void dcn302_fpu_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_p
j = 0;
/* create the final dcfclk and uclk table */
while (i < num_dcfclk_sta_targets && j < num_uclk_states && num_states < DC__VOLTAGE_STATES) {
- if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j] && i < num_dcfclk_sta_targets) {
+ if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j]) {
dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
} else {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c
index e968870a4b81..b5d3fd4c3694 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c
@@ -285,7 +285,7 @@ void dcn303_fpu_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_p
j = 0;
/* create the final dcfclk and uclk table */
while (i < num_dcfclk_sta_targets && j < num_uclk_states && num_states < DC__VOLTAGE_STATES) {
- if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j] && i < num_dcfclk_sta_targets) {
+ if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j]) {
dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
} else {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
index 17a21bcbde17..1a28061bb9ff 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
@@ -808,6 +808,8 @@ void dcn316_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
int dcn_get_max_non_odm_pix_rate_100hz(struct _vcs_dpi_soc_bounding_box_st *soc)
{
+ dc_assert_fp_enabled();
+
return soc->clock_limits[0].dispclk_mhz * 10000.0 / (1.0 + soc->dcn_downspread_percent / 100.0);
}
@@ -815,6 +817,8 @@ int dcn_get_approx_det_segs_required_for_pstate(
struct _vcs_dpi_soc_bounding_box_st *soc,
int pix_clk_100hz, int bpp, int seg_size_kb)
{
+ dc_assert_fp_enabled();
+
/* Roughly calculate required crb to hide latency. In practice there is slightly
* more buffer available for latency hiding
*/
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h
index d2ae43a82ba5..dfcc5d50071e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h
@@ -55,5 +55,5 @@ int dcn_get_approx_det_segs_required_for_pstate(
int dcn31x_populate_dml_pipes_from_context(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
#endif /* __DCN31_FPU_H__*/
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
index c46bda2141ac..bfeb01477f0c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
@@ -615,7 +615,7 @@ static void get_meta_and_pte_attr(
if (hostvm_enable)
rq_sizing_param->dpte_group_bytes = 512;
else {
- if (!surf_linear & (log2_dpte_req_height_ptes == 0) & surf_vert) //reduced, in this case, will have page fault within a group
+ if (!surf_linear && (log2_dpte_req_height_ptes == 0) && surf_vert) //reduced, in this case, will have page fault within a group
rq_sizing_param->dpte_group_bytes = 512;
else
rq_sizing_param->dpte_group_bytes = 2048;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
index 5ed117e11aa2..df9d50b9b57c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
@@ -306,7 +306,7 @@ static unsigned int get_vertical_back_porch(struct dc_crtc_timing *timing)
int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
int i, pipe_cnt;
struct resource_context *res_ctx = &context->res_ctx;
@@ -316,7 +316,7 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c
dc_assert_fp_enabled();
- dcn31x_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
+ dcn31x_populate_dml_pipes_from_context(dc, context, pipes, validate_mode);
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
struct dc_crtc_timing *timing;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.h
index d32c5bb99f4c..362ac79184ea 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.h
@@ -35,6 +35,6 @@
void dcn314_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params);
int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c
index b7d2a0caec11..04df263ff65e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c
@@ -703,7 +703,7 @@ static void get_meta_and_pte_attr(
if (hostvm_enable)
rq_sizing_param->dpte_group_bytes = 512;
else {
- if (!surf_linear & (log2_dpte_req_height_ptes == 0) & surf_vert) //reduced, in this case, will have page fault within a group
+ if (!surf_linear && (log2_dpte_req_height_ptes == 0) && surf_vert) //reduced, in this case, will have page fault within a group
rq_sizing_param->dpte_group_bytes = 512;
else
rq_sizing_param->dpte_group_bytes = 2048;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index b0fc1fd20208..8a0f128722b0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -31,7 +31,7 @@
// We need this includes for WATERMARKS_* defines
#include "clk_mgr/dcn32/dcn32_smu13_driver_if.h"
#include "dcn30/dcn30_resource.h"
-#include "link.h"
+#include "link_service.h"
#include "dc_state_priv.h"
#define DC_LOGGER_INIT(logger)
@@ -290,7 +290,7 @@ int dcn32_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc,
vba->DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] = temp_clock_change_support;
context->bw_ctx.dml.soc.dram_clock_change_latency_us =
dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us;
- dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false);
+ dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, DC_VALIDATE_MODE_AND_PROGRAMMING);
/* for subvp + DRR case, if subvp pipes are still present we support pstate */
if (vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported &&
@@ -1479,7 +1479,7 @@ static bool dcn32_full_validate_bw_helper(struct dc *dc,
/* Conditions for setting up phantom pipes for SubVP:
* 1. Not force disable SubVP
- * 2. Full update (i.e. !fast_validate)
+ * 2. Full update (i.e. DC_VALIDATE_MODE_AND_PROGRAMMING)
* 3. Enough pipes are available to support SubVP (TODO: Which pipes will use VACTIVE / VBLANK / SUBVP?)
* 4. Display configuration passes validation
* 5. (Config doesn't support MCLK in VACTIVE/VBLANK || dc->debug.force_subvp_mclk_switch)
@@ -1517,7 +1517,8 @@ static bool dcn32_full_validate_bw_helper(struct dc *dc,
dc->res_pool->funcs->add_phantom_pipes(dc, context, pipes, *pipe_cnt, dc_pipe_idx);
- *pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, false);
+ *pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes,
+ DC_VALIDATE_MODE_AND_PROGRAMMING);
// Populate dppclk to trigger a recalculate in dml_get_voltage_level
// so the phantom pipe DLG params can be assigned correctly.
pipes[0].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, *pipe_cnt, 0);
@@ -1560,7 +1561,8 @@ static bool dcn32_full_validate_bw_helper(struct dc *dc,
dc_state_remove_phantom_streams_and_planes(dc, context);
dc_state_release_phantom_streams_and_planes(dc, context);
vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] = dm_dram_clock_change_unsupported;
- *pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, false);
+ *pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes,
+ DC_VALIDATE_MODE_AND_PROGRAMMING);
*vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, *pipe_cnt);
/* This may adjust vlevel and maxMpcComb */
@@ -2138,7 +2140,7 @@ bool dcn32_internal_validate_bw(struct dc *dc,
display_e2e_pipe_params_st *pipes,
int *pipe_cnt_out,
int *vlevel_out,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
bool out = false;
bool repopulate_pipes = false;
@@ -2162,7 +2164,7 @@ bool dcn32_internal_validate_bw(struct dc *dc,
for (i = 0; i < context->stream_count; i++)
resource_update_pipes_for_stream_with_slice_count(context, dc->current_state, dc->res_pool, context->streams[i], 1);
- pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
+ pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, validate_mode);
if (!pipe_cnt) {
out = true;
@@ -2172,13 +2174,13 @@ bool dcn32_internal_validate_bw(struct dc *dc,
dml_log_pipe_params(&context->bw_ctx.dml, pipes, pipe_cnt);
context->bw_ctx.dml.soc.max_vratio_pre = dcn32_determine_max_vratio_prefetch(dc, context);
- if (!fast_validate) {
+ if (validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING) {
if (!dcn32_full_validate_bw_helper(dc, context, pipes, &vlevel, split, merge,
&pipe_cnt, &repopulate_pipes))
goto validate_fail;
}
- if (fast_validate ||
+ if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING ||
(dc->debug.dml_disallow_alternate_prefetch_modes &&
(vlevel == context->bw_ctx.dml.soc.num_states ||
vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported))) {
@@ -2195,7 +2197,7 @@ bool dcn32_internal_validate_bw(struct dc *dc,
context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
dm_prefetch_support_none;
- context->bw_ctx.dml.validate_max_state = fast_validate;
+ context->bw_ctx.dml.validate_max_state = (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING);
vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
context->bw_ctx.dml.validate_max_state = false;
@@ -2247,7 +2249,7 @@ bool dcn32_internal_validate_bw(struct dc *dc,
int flag_vlevel = vlevel;
int i;
- pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
+ pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, validate_mode);
if (!dc->config.enable_windowed_mpo_odm)
dcn32_update_dml_pipes_odm_policy_based_on_context(dc, context, pipes);
@@ -2343,7 +2345,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
}
context->bw_ctx.dml.soc.dram_clock_change_latency_us =
dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
- dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false);
+ dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, DC_VALIDATE_MODE_AND_PROGRAMMING);
maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb;
if (is_subvp_p_drr) {
context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] = dm_dram_clock_change_vblank_w_mall_sub_vp;
@@ -2389,7 +2391,8 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
context->bw_ctx.dml.soc.fclk_change_latency_us =
dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us;
}
- dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel_temp, false);
+ dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel_temp,
+ DC_VALIDATE_MODE_AND_PROGRAMMING);
if (vlevel_temp < vlevel) {
vlevel = vlevel_temp;
maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb;
@@ -2410,7 +2413,8 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
stream_status->fpo_in_use = false;
}
context->bw_ctx.dml.soc.fclk_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.fclk_change_latency_us;
- dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false);
+ dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel,
+ DC_VALIDATE_MODE_AND_PROGRAMMING);
}
}
}
@@ -3225,7 +3229,7 @@ void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_pa
j = 0;
// create the final dcfclk and uclk table
while (i < num_dcfclk_sta_targets && j < num_uclk_states && num_states < DC__VOLTAGE_STATES) {
- if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j] && i < num_dcfclk_sta_targets) {
+ if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j]) {
dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
} else {
@@ -3397,7 +3401,7 @@ bool dcn32_allow_subvp_with_active_margin(struct pipe_ctx *pipe)
uint32_t height = subvp_active_margin_list.res[i].height;
refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 +
- pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1);
+ (uint64_t)pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1);
refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total);
refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h
index 276e90e4e0ce..273d2bd79d85 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h
@@ -49,7 +49,7 @@ bool dcn32_internal_validate_bw(struct dc *dc,
display_e2e_pipe_params_st *pipes,
int *pipe_cnt_out,
int *vlevel_out,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c
index 9ba6cb67655f..6c75aa82327a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c
@@ -139,7 +139,6 @@ void dml32_rq_dlg_get_rq_reg(display_rq_regs_st *rq_regs,
if (dual_plane) {
unsigned int p1_pte_row_height_linear = get_dpte_row_height_linear_c(mode_lib, e2e_pipe_param,
num_pipes, pipe_idx);
- ;
if (src->sw_mode == dm_sw_linear)
ASSERT(p1_pte_row_height_linear >= 8);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
index 8839faf42207..e0a1dc89ce43 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
@@ -779,7 +779,7 @@ void dcn321_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p
j = 0;
// create the final dcfclk and uclk table
while (i < num_dcfclk_sta_targets && j < num_uclk_states && num_states < DC__VOLTAGE_STATES) {
- if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j] && i < num_dcfclk_sta_targets) {
+ if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j]) {
dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
} else {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
index 92f0a099d089..817a370e80a7 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
@@ -31,7 +31,7 @@
#include "dml/dcn31/dcn31_fpu.h"
#include "dml/dml_inline_defs.h"
-#include "link.h"
+#include "link_service.h"
#define DC_LOGGER_INIT(logger)
@@ -437,7 +437,7 @@ static unsigned int get_vertical_back_porch(struct dc_crtc_timing *timing)
int dcn35_populate_dml_pipes_from_context_fpu(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
int i, pipe_cnt;
struct resource_context *res_ctx = &context->res_ctx;
@@ -445,8 +445,10 @@ int dcn35_populate_dml_pipes_from_context_fpu(struct dc *dc,
bool upscaled = false;
const unsigned int max_allowed_vblank_nom = 1023;
+ dc_assert_fp_enabled();
+
dcn31_populate_dml_pipes_from_context(dc, context, pipes,
- fast_validate);
+ validate_mode);
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
struct dc_crtc_timing *timing;
@@ -498,9 +500,7 @@ int dcn35_populate_dml_pipes_from_context_fpu(struct dc *dc,
pipes[pipe_cnt].pipe.src.unbounded_req_mode = false;
- DC_FP_START();
dcn31_zero_pipe_dcc_fraction(pipes, pipe_cnt);
- DC_FP_END();
pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch;
pipes[pipe_cnt].pipe.src.dcc_rate = 3;
@@ -581,6 +581,8 @@ void dcn35_decide_zstate_support(struct dc *dc, struct dc_state *context)
unsigned int i, plane_count = 0;
DC_LOGGER_INIT(dc->ctx->logger);
+ dc_assert_fp_enabled();
+
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (context->res_ctx.pipe_ctx[i].plane_state)
plane_count++;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.h
index 067480fc3691..d121c5afce71 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.h
@@ -37,7 +37,7 @@ void dcn35_update_bw_bounding_box_fpu(struct dc *dc,
int dcn35_populate_dml_pipes_from_context_fpu(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
void dcn35_decide_zstate_support(struct dc *dc, struct dc_state *context);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
index 17d0b4923b0c..77023b619f1e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
@@ -10,7 +10,7 @@
#include "dml/dcn35/dcn35_fpu.h"
#include "dml/dml_inline_defs.h"
-#include "link.h"
+#include "link_service.h"
#define DC_LOGGER_INIT(logger)
@@ -470,7 +470,7 @@ static unsigned int get_vertical_back_porch(struct dc_crtc_timing *timing)
int dcn351_populate_dml_pipes_from_context_fpu(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate)
+ enum dc_validate_mode validate_mode)
{
int i, pipe_cnt;
struct resource_context *res_ctx = &context->res_ctx;
@@ -478,8 +478,10 @@ int dcn351_populate_dml_pipes_from_context_fpu(struct dc *dc,
bool upscaled = false;
const unsigned int max_allowed_vblank_nom = 1023;
+ dc_assert_fp_enabled();
+
dcn31_populate_dml_pipes_from_context(dc, context, pipes,
- fast_validate);
+ validate_mode);
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
struct dc_crtc_timing *timing;
@@ -531,9 +533,7 @@ int dcn351_populate_dml_pipes_from_context_fpu(struct dc *dc,
pipes[pipe_cnt].pipe.src.unbounded_req_mode = false;
- DC_FP_START();
dcn31_zero_pipe_dcc_fraction(pipes, pipe_cnt);
- DC_FP_END();
pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch;
pipes[pipe_cnt].pipe.src.dcc_rate = 3;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.h
index f93efab9a668..f71d9d8d0759 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.h
@@ -12,7 +12,7 @@ void dcn351_update_bw_bounding_box_fpu(struct dc *dc,
int dcn351_populate_dml_pipes_from_context_fpu(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
void dcn351_decide_zstate_support(struct dc *dc, struct dc_state *context);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn401/dcn401_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn401/dcn401_fpu.c
deleted file mode 100644
index 4fbecb5ff349..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn401/dcn401_fpu.c
+++ /dev/null
@@ -1,239 +0,0 @@
-// SPDX-License-Identifier: MIT
-//
-// Copyright 2024 Advanced Micro Devices, Inc.
-
-#include "dcn401_fpu.h"
-#include "dcn401/dcn401_resource.h"
-// We need this includes for WATERMARKS_* defines
-#include "clk_mgr/dcn401/dcn401_smu14_driver_if.h"
-#include "link.h"
-
-#define DC_LOGGER_INIT(logger)
-
-void dcn401_build_wm_range_table_fpu(struct clk_mgr *clk_mgr)
-{
- /* defaults */
- double pstate_latency_us = clk_mgr->ctx->dc->dml.soc.dram_clock_change_latency_us;
- double fclk_change_latency_us = clk_mgr->ctx->dc->dml.soc.fclk_change_latency_us;
- double sr_exit_time_us = clk_mgr->ctx->dc->dml.soc.sr_exit_time_us;
- double sr_enter_plus_exit_time_us = clk_mgr->ctx->dc->dml.soc.sr_enter_plus_exit_time_us;
- /* For min clocks use as reported by PM FW and report those as min */
- uint16_t min_uclk_mhz = clk_mgr->bw_params->clk_table.entries[0].memclk_mhz;
- uint16_t min_dcfclk_mhz = clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz;
- uint16_t setb_min_uclk_mhz = min_uclk_mhz;
- uint16_t dcfclk_mhz_for_the_second_state = clk_mgr->ctx->dc->dml.soc.clock_limits[2].dcfclk_mhz;
-
- dc_assert_fp_enabled();
-
- /* For Set B ranges use min clocks state 2 when available, and report those to PM FW */
- if (dcfclk_mhz_for_the_second_state)
- clk_mgr->bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.min_dcfclk = dcfclk_mhz_for_the_second_state;
- else
- clk_mgr->bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.min_dcfclk = clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz;
-
- if (clk_mgr->bw_params->clk_table.entries[2].memclk_mhz)
- setb_min_uclk_mhz = clk_mgr->bw_params->clk_table.entries[2].memclk_mhz;
-
- /* Set A - Normal - default values */
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid = true;
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us = pstate_latency_us;
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.fclk_change_latency_us = fclk_change_latency_us;
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_exit_time_us = sr_exit_time_us;
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us;
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.wm_type = WATERMARKS_CLOCK_RANGE;
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz;
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_dcfclk = 0xFFFF;
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_uclk = min_uclk_mhz;
- clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_uclk = 0xFFFF;
-
- /* Set B - Performance - higher clocks, using DPM[2] DCFCLK and UCLK */
- clk_mgr->bw_params->wm_table.nv_entries[WM_B].valid = true;
- clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.pstate_latency_us = pstate_latency_us;
- clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.fclk_change_latency_us = fclk_change_latency_us;
- clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_exit_time_us = sr_exit_time_us;
- clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us;
- clk_mgr->bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.wm_type = WATERMARKS_CLOCK_RANGE;
- clk_mgr->bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.max_dcfclk = 0xFFFF;
- clk_mgr->bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.min_uclk = setb_min_uclk_mhz;
- clk_mgr->bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.max_uclk = 0xFFFF;
-
- /* Set C - Dummy P-State - P-State latency set to "dummy p-state" value */
- /* 'DalDummyClockChangeLatencyNs' registry key option set to 0x7FFFFFFF can be used to disable Set C for dummy p-state */
- if (clk_mgr->ctx->dc->bb_overrides.dummy_clock_change_latency_ns != 0x7FFFFFFF) {
- clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid = true;
- clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.pstate_latency_us = 50;
- clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.fclk_change_latency_us = fclk_change_latency_us;
- clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us = sr_exit_time_us;
- clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us;
- clk_mgr->bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.wm_type = WATERMARKS_DUMMY_PSTATE;
- clk_mgr->bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz;
- clk_mgr->bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.max_dcfclk = 0xFFFF;
- clk_mgr->bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.min_uclk = min_uclk_mhz;
- clk_mgr->bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.max_uclk = 0xFFFF;
- clk_mgr->bw_params->dummy_pstate_table[0].dram_speed_mts = clk_mgr->bw_params->clk_table.entries[0].memclk_mhz * 16;
- clk_mgr->bw_params->dummy_pstate_table[0].dummy_pstate_latency_us = 50;
- clk_mgr->bw_params->dummy_pstate_table[1].dram_speed_mts = clk_mgr->bw_params->clk_table.entries[1].memclk_mhz * 16;
- clk_mgr->bw_params->dummy_pstate_table[1].dummy_pstate_latency_us = 9;
- clk_mgr->bw_params->dummy_pstate_table[2].dram_speed_mts = clk_mgr->bw_params->clk_table.entries[2].memclk_mhz * 16;
- clk_mgr->bw_params->dummy_pstate_table[2].dummy_pstate_latency_us = 8;
- clk_mgr->bw_params->dummy_pstate_table[3].dram_speed_mts = clk_mgr->bw_params->clk_table.entries[3].memclk_mhz * 16;
- clk_mgr->bw_params->dummy_pstate_table[3].dummy_pstate_latency_us = 5;
- }
- /* Set D - MALL - SR enter and exit time specific to MALL, TBD after bringup or later phase for now use DRAM values / 2 */
- /* For MALL DRAM clock change latency is N/A, for watermak calculations use lowest value dummy P state latency */
- clk_mgr->bw_params->wm_table.nv_entries[WM_D].valid = true;
- clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.pstate_latency_us = clk_mgr->bw_params->dummy_pstate_table[3].dummy_pstate_latency_us;
- clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.fclk_change_latency_us = fclk_change_latency_us;
- clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_exit_time_us = sr_exit_time_us / 2; // TBD
- clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us / 2; // TBD
- clk_mgr->bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.wm_type = WATERMARKS_MALL;
- clk_mgr->bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz;
- clk_mgr->bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.max_dcfclk = 0xFFFF;
- clk_mgr->bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.min_uclk = min_uclk_mhz;
- clk_mgr->bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.max_uclk = 0xFFFF;
-}
-
-/*
- * dcn401_update_bw_bounding_box
- *
- * This would override some dcn4_01 ip_or_soc initial parameters hardcoded from
- * spreadsheet with actual values as per dGPU SKU:
- * - with passed few options from dc->config
- * - with dentist_vco_frequency from Clk Mgr (currently hardcoded, but might
- * need to get it from PM FW)
- * - with passed latency values (passed in ns units) in dc-> bb override for
- * debugging purposes
- * - with passed latencies from VBIOS (in 100_ns units) if available for
- * certain dGPU SKU
- * - with number of DRAM channels from VBIOS (which differ for certain dGPU SKU
- * of the same ASIC)
- * - clocks levels with passed clk_table entries from Clk Mgr as reported by PM
- * FW for different clocks (which might differ for certain dGPU SKU of the
- * same ASIC)
- */
-void dcn401_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params)
-{
- dc_assert_fp_enabled();
-
- /* Override from passed dc->bb_overrides if available*/
- if (dc->bb_overrides.sr_exit_time_ns)
- dc->dml2_options.bbox_overrides.sr_exit_latency_us =
- dc->bb_overrides.sr_exit_time_ns / 1000.0;
-
- if (dc->bb_overrides.sr_enter_plus_exit_time_ns)
- dc->dml2_options.bbox_overrides.sr_enter_plus_exit_latency_us =
- dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0;
-
- if (dc->bb_overrides.urgent_latency_ns)
- dc->dml2_options.bbox_overrides.urgent_latency_us =
- dc->bb_overrides.urgent_latency_ns / 1000.0;
-
- if (dc->bb_overrides.dram_clock_change_latency_ns)
- dc->dml2_options.bbox_overrides.dram_clock_change_latency_us =
- dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
-
- if (dc->bb_overrides.fclk_clock_change_latency_ns)
- dc->dml2_options.bbox_overrides.fclk_change_latency_us =
- dc->bb_overrides.fclk_clock_change_latency_ns / 1000;
-
- /* Override from VBIOS if VBIOS bb_info available */
- if (dc->ctx->dc_bios->funcs->get_soc_bb_info) {
- struct bp_soc_bb_info bb_info = {0};
- if (dc->ctx->dc_bios->funcs->get_soc_bb_info(dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) {
- if (bb_info.dram_clock_change_latency_100ns > 0)
- dc->dml2_options.bbox_overrides.dram_clock_change_latency_us =
- bb_info.dram_clock_change_latency_100ns * 10;
-
- if (bb_info.dram_sr_enter_exit_latency_100ns > 0)
- dc->dml2_options.bbox_overrides.sr_enter_plus_exit_latency_us =
- bb_info.dram_sr_enter_exit_latency_100ns * 10;
-
- if (bb_info.dram_sr_exit_latency_100ns > 0)
- dc->dml2_options.bbox_overrides.sr_exit_latency_us =
- bb_info.dram_sr_exit_latency_100ns * 10;
- }
- }
-
- /* Override from VBIOS for num_chan */
- if (dc->ctx->dc_bios->vram_info.num_chans) {
- dc->dml2_options.bbox_overrides.dram_num_chan =
- dc->ctx->dc_bios->vram_info.num_chans;
-
- }
-
- if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes)
- dc->dml2_options.bbox_overrides.dram_chanel_width_bytes =
- dc->ctx->dc_bios->vram_info.dram_channel_width_bytes;
-
- dc->dml2_options.bbox_overrides.disp_pll_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
- dc->dml2_options.bbox_overrides.xtalclk_mhz = dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency / 1000.0;
- dc->dml2_options.bbox_overrides.dchub_refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0;
- dc->dml2_options.bbox_overrides.dprefclk_mhz = dc->clk_mgr->dprefclk_khz / 1000.0;
-
- if (dc->clk_mgr->bw_params->clk_table.num_entries > 1) {
- unsigned int i = 0;
-
- dc->dml2_options.bbox_overrides.clks_table.num_states = dc->clk_mgr->bw_params->clk_table.num_entries;
-
- dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dcfclk_levels =
- dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dcfclk_levels;
-
- dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_fclk_levels =
- dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_fclk_levels;
-
- dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_memclk_levels =
- dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_memclk_levels;
-
- dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_socclk_levels =
- dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_socclk_levels;
-
- dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dtbclk_levels =
- dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dtbclk_levels;
-
- dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dispclk_levels =
- dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dispclk_levels;
-
- dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dppclk_levels =
- dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dppclk_levels;
-
- for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dcfclk_levels; i++) {
- if (dc->clk_mgr->bw_params->clk_table.entries[i].dcfclk_mhz)
- dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dcfclk_mhz =
- dc->clk_mgr->bw_params->clk_table.entries[i].dcfclk_mhz;
- }
-
- for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_fclk_levels; i++) {
- if (dc->clk_mgr->bw_params->clk_table.entries[i].fclk_mhz)
- dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].fclk_mhz =
- dc->clk_mgr->bw_params->clk_table.entries[i].fclk_mhz;
- }
-
- for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_memclk_levels; i++) {
- if (dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz)
- dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].memclk_mhz =
- dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz;
- }
-
- for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_socclk_levels; i++) {
- if (dc->clk_mgr->bw_params->clk_table.entries[i].socclk_mhz)
- dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].socclk_mhz =
- dc->clk_mgr->bw_params->clk_table.entries[i].socclk_mhz;
- }
-
- for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dtbclk_levels; i++) {
- if (dc->clk_mgr->bw_params->clk_table.entries[i].dtbclk_mhz)
- dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dtbclk_mhz =
- dc->clk_mgr->bw_params->clk_table.entries[i].dtbclk_mhz;
- }
-
- for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dispclk_levels; i++) {
- if (dc->clk_mgr->bw_params->clk_table.entries[i].dispclk_mhz) {
- dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dispclk_mhz =
- dc->clk_mgr->bw_params->clk_table.entries[i].dispclk_mhz;
- dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dppclk_mhz =
- dc->clk_mgr->bw_params->clk_table.entries[i].dispclk_mhz;
- }
- }
- }
-}
-
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn401/dcn401_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn401/dcn401_fpu.h
deleted file mode 100644
index 329f1788843c..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn401/dcn401_fpu.h
+++ /dev/null
@@ -1,14 +0,0 @@
-// SPDX-License-Identifier: MIT
-//
-// Copyright 2024 Advanced Micro Devices, Inc.
-
-#ifndef __DCN401_FPU_H__
-#define __DCN401_FPU_H__
-
-#include "clk_mgr.h"
-
-void dcn401_build_wm_range_table_fpu(struct clk_mgr *clk_mgr);
-
-void dcn401_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params);
-
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/Makefile b/drivers/gpu/drm/amd/display/dc/dml2/Makefile
deleted file mode 100644
index 157ecf008d6c..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dml2/Makefile
+++ /dev/null
@@ -1,140 +0,0 @@
-# SPDX-License-Identifier: MIT */
-#
-# Copyright 2023 Advanced Micro Devices, Inc.
-#
-# Permission is hereby granted, free of charge, to any person obtaining a
-# copy of this software and associated documentation files (the "Software"),
-# to deal in the Software without restriction, including without limitation
-# the rights to use, copy, modify, merge, publish, distribute, sublicense,
-# and/or sell copies of the Software, and to permit persons to whom the
-# Software is furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
-# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
-# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-# OTHER DEALINGS IN THE SOFTWARE.
-#
-# Authors: AMD
-#
-# Makefile for dml2.
-
-dml2_ccflags := $(CC_FLAGS_FPU)
-dml2_rcflags := $(CC_FLAGS_NO_FPU)
-
-ifneq ($(CONFIG_FRAME_WARN),0)
- ifeq ($(filter y,$(CONFIG_KASAN)$(CONFIG_KCSAN)),y)
- ifeq ($(CONFIG_CC_IS_CLANG)$(CONFIG_COMPILE_TEST),yy)
- frame_warn_limit := 4096
- else
- frame_warn_limit := 3072
- endif
- else
- frame_warn_limit := 2048
- endif
-
- ifeq ($(call test-lt, $(CONFIG_FRAME_WARN), $(frame_warn_limit)),y)
- frame_warn_flag := -Wframe-larger-than=$(frame_warn_limit)
- endif
-endif
-
-subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2
-subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/src/dml2_core
-subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/src/dml2_mcg/
-subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/src/dml2_dpmm/
-subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/src/dml2_pmo/
-subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/src/dml2_standalone_libraries/
-subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/src/inc
-subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/inc
-subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/
-
-CFLAGS_$(AMDDALPATH)/dc/dml2/display_mode_core.o := $(dml2_ccflags) $(frame_warn_flag)
-CFLAGS_$(AMDDALPATH)/dc/dml2/display_mode_util.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml2_wrapper.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml2_utils.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml2_policy.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml2_translation_helper.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml2_mall_phantom.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml_display_rq_dlg_calc.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml2_dc_resource_mgmt.o := $(dml2_ccflags)
-
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/display_mode_core.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/display_mode_util.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml2_wrapper.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml2_utils.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml2_policy.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml2_translation_helper.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml2_mall_phantom.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml_display_rq_dlg_calc.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml2_dc_resource_mgmt.o := $(dml2_rcflags)
-
-DML2 = display_mode_core.o display_mode_util.o dml2_wrapper.o \
- dml2_utils.o dml2_policy.o dml2_translation_helper.o dml2_dc_resource_mgmt.o dml2_mall_phantom.o \
- dml_display_rq_dlg_calc.o
-
-AMD_DAL_DML2 = $(addprefix $(AMDDALPATH)/dc/dml2/,$(DML2))
-
-AMD_DISPLAY_FILES += $(AMD_DAL_DML2)
-
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_ccflags) $(frame_warn_flag)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_factory.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.o := $(dml2_ccflags)
-
-
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml21_wrapper.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/dml21_translation_helper.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/dml21_utils.o := $(dml2_ccflags)
-
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_factory.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml21_wrapper.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/dml21_translation_helper.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/dml21_utils.o := $(dml2_rcflags)
-
-DML21 := src/dml2_top/dml2_top_interfaces.o
-DML21 += src/dml2_top/dml2_top_soc15.o
-DML21 += src/dml2_core/dml2_core_dcn4.o
-DML21 += src/dml2_core/dml2_core_factory.o
-DML21 += src/dml2_core/dml2_core_dcn4_calcs.o
-DML21 += src/dml2_dpmm/dml2_dpmm_dcn4.o
-DML21 += src/dml2_dpmm/dml2_dpmm_factory.o
-DML21 += src/dml2_mcg/dml2_mcg_dcn4.o
-DML21 += src/dml2_mcg/dml2_mcg_factory.o
-DML21 += src/dml2_pmo/dml2_pmo_dcn3.o
-DML21 += src/dml2_pmo/dml2_pmo_factory.o
-DML21 += src/dml2_pmo/dml2_pmo_dcn4_fams2.o
-DML21 += src/dml2_standalone_libraries/lib_float_math.o
-DML21 += dml21_translation_helper.o
-DML21 += dml21_wrapper.o
-DML21 += dml21_utils.o
-
-AMD_DAL_DML21 = $(addprefix $(AMDDALPATH)/dc/dml2/dml21/,$(DML21))
-
-AMD_DISPLAY_FILES += $(AMD_DAL_DML21)
-
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h
deleted file mode 100644
index b226225103c3..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h
+++ /dev/null
@@ -1,66 +0,0 @@
-// SPDX-License-Identifier: MIT
-//
-// Copyright 2024 Advanced Micro Devices, Inc.
-
-#ifndef __DML2_DEBUG_H__
-#define __DML2_DEBUG_H__
-
-#include "os_types.h"
-#define DML_ASSERT(condition) ASSERT(condition)
-#define DML_LOG_LEVEL_DEFAULT DML_LOG_LEVEL_WARN
-#define DML_LOG_INTERNAL(fmt, ...) dm_output_to_console(fmt, ## __VA_ARGS__)
-
-/* ASSERT with message output */
-#define DML_ASSERT_MSG(condition, fmt, ...) \
- do { \
- if (!(condition)) { \
- DML_LOG_ERROR("DML ASSERT hit in %s line %d\n", __func__, __LINE__); \
- DML_LOG_ERROR(fmt, ## __VA_ARGS__); \
- DML_ASSERT(condition); \
- } \
- } while (0)
-
-/* fatal errors for unrecoverable DML states until a full reset */
-#define DML_LOG_LEVEL_FATAL 0
-/* unexpected but recoverable failures inside DML */
-#define DML_LOG_LEVEL_ERROR 1
-/* unexpected inputs or events to DML */
-#define DML_LOG_LEVEL_WARN 2
-/* high level tracing of DML interfaces */
-#define DML_LOG_LEVEL_INFO 3
-/* detailed tracing of DML internal components */
-#define DML_LOG_LEVEL_DEBUG 4
-/* detailed tracing of DML calculation procedure */
-#define DML_LOG_LEVEL_VERBOSE 5
-
-#ifndef DML_LOG_LEVEL
-#define DML_LOG_LEVEL DML_LOG_LEVEL_DEFAULT
-#endif /* #ifndef DML_LOG_LEVEL */
-
-#define DML_LOG_FATAL(fmt, ...) DML_LOG_INTERNAL("[DML FATAL] " fmt, ## __VA_ARGS__)
-#if DML_LOG_LEVEL >= DML_LOG_LEVEL_ERROR
-#define DML_LOG_ERROR(fmt, ...) DML_LOG_INTERNAL("[DML ERROR] "fmt, ## __VA_ARGS__)
-#else
-#define DML_LOG_ERROR(fmt, ...) ((void)0)
-#endif
-#if DML_LOG_LEVEL >= DML_LOG_LEVEL_WARN
-#define DML_LOG_WARN(fmt, ...) DML_LOG_INTERNAL("[DML WARN] "fmt, ## __VA_ARGS__)
-#else
-#define DML_LOG_WARN(fmt, ...) ((void)0)
-#endif
-#if DML_LOG_LEVEL >= DML_LOG_LEVEL_INFO
-#define DML_LOG_INFO(fmt, ...) DML_LOG_INTERNAL("[DML INFO] "fmt, ## __VA_ARGS__)
-#else
-#define DML_LOG_INFO(fmt, ...) ((void)0)
-#endif
-#if DML_LOG_LEVEL >= DML_LOG_LEVEL_DEBUG
-#define DML_LOG_DEBUG(fmt, ...) DML_LOG_INTERNAL("[DML DEBUG] "fmt, ## __VA_ARGS__)
-#else
-#define DML_LOG_DEBUG(fmt, ...) ((void)0)
-#endif
-#if DML_LOG_LEVEL >= DML_LOG_LEVEL_VERBOSE
-#define DML_LOG_VERBOSE(fmt, ...) DML_LOG_INTERNAL("[DML VERBOSE] "fmt, ## __VA_ARGS__)
-#else
-#define DML_LOG_VERBOSE(fmt, ...) ((void)0)
-#endif
-#endif /* __DML2_DEBUG_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/Makefile b/drivers/gpu/drm/amd/display/dc/dml2_0/Makefile
new file mode 100644
index 000000000000..97e068b6bf6b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/Makefile
@@ -0,0 +1,140 @@
+# SPDX-License-Identifier: MIT */
+#
+# Copyright 2023 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+# Authors: AMD
+#
+# Makefile for dml2.
+
+dml2_ccflags := $(CC_FLAGS_FPU)
+dml2_rcflags := $(CC_FLAGS_NO_FPU)
+
+ifneq ($(CONFIG_FRAME_WARN),0)
+ ifeq ($(filter y,$(CONFIG_KASAN)$(CONFIG_KCSAN)),y)
+ ifeq ($(CONFIG_CC_IS_CLANG)$(CONFIG_COMPILE_TEST),yy)
+ frame_warn_limit := 4096
+ else
+ frame_warn_limit := 3072
+ endif
+ else
+ frame_warn_limit := 2056
+ endif
+
+ ifeq ($(call test-lt, $(CONFIG_FRAME_WARN), $(frame_warn_limit)),y)
+ frame_warn_flag := -Wframe-larger-than=$(frame_warn_limit)
+ endif
+endif
+
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/src/dml2_core
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/src/dml2_mcg/
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/src/dml2_dpmm/
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/src/dml2_pmo/
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/src/dml2_standalone_libraries/
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/src/inc
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/inc
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/
+
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/display_mode_core.o := $(dml2_ccflags) $(frame_warn_flag)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/display_mode_util.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml2_wrapper.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml2_utils.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml2_policy.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml2_translation_helper.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml2_mall_phantom.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml_display_rq_dlg_calc.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml2_dc_resource_mgmt.o := $(dml2_ccflags)
+
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/display_mode_core.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/display_mode_util.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml2_wrapper.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml2_utils.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml2_policy.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml2_translation_helper.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml2_mall_phantom.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml_display_rq_dlg_calc.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml2_dc_resource_mgmt.o := $(dml2_rcflags)
+
+DML2 = display_mode_core.o display_mode_util.o dml2_wrapper.o \
+ dml2_utils.o dml2_policy.o dml2_translation_helper.o dml2_dc_resource_mgmt.o dml2_mall_phantom.o \
+ dml_display_rq_dlg_calc.o
+
+AMD_DAL_DML2 = $(addprefix $(AMDDALPATH)/dc/dml2_0/,$(DML2))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DML2)
+
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_ccflags) $(frame_warn_flag)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.o := $(dml2_ccflags) $(frame_warn_flag)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn3.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_standalone_libraries/lib_float_math.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml21_wrapper.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/dml21_translation_helper.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/dml21_utils.o := $(dml2_ccflags)
+
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn3.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_standalone_libraries/lib_float_math.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml21_wrapper.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/dml21_translation_helper.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/dml21_utils.o := $(dml2_rcflags)
+
+DML21 := src/dml2_top/dml2_top_interfaces.o
+DML21 += src/dml2_top/dml2_top_soc15.o
+DML21 += src/dml2_core/dml2_core_dcn4.o
+DML21 += src/dml2_core/dml2_core_utils.o
+DML21 += src/dml2_core/dml2_core_factory.o
+DML21 += src/dml2_core/dml2_core_dcn4_calcs.o
+DML21 += src/dml2_dpmm/dml2_dpmm_dcn4.o
+DML21 += src/dml2_dpmm/dml2_dpmm_factory.o
+DML21 += src/dml2_mcg/dml2_mcg_dcn4.o
+DML21 += src/dml2_mcg/dml2_mcg_factory.o
+DML21 += src/dml2_pmo/dml2_pmo_dcn3.o
+DML21 += src/dml2_pmo/dml2_pmo_factory.o
+DML21 += src/dml2_pmo/dml2_pmo_dcn4_fams2.o
+DML21 += src/dml2_standalone_libraries/lib_float_math.o
+DML21 += dml21_translation_helper.o
+DML21 += dml21_wrapper.o
+DML21 += dml21_utils.o
+
+AMD_DAL_DML21 = $(addprefix $(AMDDALPATH)/dc/dml2_0/dml21/,$(DML21))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DML21)
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/cmntypes.h b/drivers/gpu/drm/amd/display/dc/dml2_0/cmntypes.h
index e450445bc05d..b954c9648fbe 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/cmntypes.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/cmntypes.h
@@ -53,17 +53,17 @@ typedef const void *const_pvoid;
typedef const char *const_pchar;
typedef struct rgba_struct {
- uint8 a;
- uint8 r;
- uint8 g;
- uint8 b;
+ uint8 a;
+ uint8 r;
+ uint8 g;
+ uint8 b;
} rgba_t;
typedef struct {
- uint8 blue;
- uint8 green;
- uint8 red;
- uint8 alpha;
+ uint8 blue;
+ uint8 green;
+ uint8 red;
+ uint8 alpha;
} gen_color_t;
typedef union {
@@ -87,7 +87,7 @@ typedef union {
} uintfloat64;
#ifndef UNREFERENCED_PARAMETER
-#define UNREFERENCED_PARAMETER(x) x = x
+#define UNREFERENCED_PARAMETER(x) (x = x)
#endif
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core.c
index 7ae9c0ba0c9e..c468f492b876 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core.c
@@ -6529,7 +6529,7 @@ static noinline_for_stack void dml_prefetch_check(struct display_mode_lib_st *mo
mode_lib->ms.TotImmediateFlipBytes = 0;
for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
if (!(mode_lib->ms.policy.ImmediateFlipRequirement[k] == dml_immediate_flip_not_required)) {
- mode_lib->ms.TotImmediateFlipBytes = mode_lib->ms.TotImmediateFlipBytes + mode_lib->ms.NoOfDPP[j][k] * mode_lib->ms.PDEAndMetaPTEBytesPerFrame[j][k] + mode_lib->ms.MetaRowBytes[j][k];
+ mode_lib->ms.TotImmediateFlipBytes = mode_lib->ms.TotImmediateFlipBytes + mode_lib->ms.NoOfDPP[j][k] * (mode_lib->ms.PDEAndMetaPTEBytesPerFrame[j][k] + mode_lib->ms.MetaRowBytes[j][k]);
if (mode_lib->ms.use_one_row_for_frame_flip[j][k]) {
mode_lib->ms.TotImmediateFlipBytes = mode_lib->ms.TotImmediateFlipBytes + mode_lib->ms.NoOfDPP[j][k] * (2 * mode_lib->ms.DPTEBytesPerRow[j][k]);
} else {
@@ -10189,7 +10189,7 @@ dml_uint_t dml_mode_support_ex(struct dml_mode_support_ex_params_st *in_out_para
result = mode_support_pwr_states(&in_out_params->out_lowest_state_idx,
in_out_params->mode_lib,
in_out_params->in_display_cfg,
- 0,
+ in_out_params->in_start_state_idx,
in_out_params->mode_lib->states.num_states - 1);
if (result)
@@ -10205,6 +10205,7 @@ dml_bool_t dml_get_is_phantom_pipe(struct display_mode_lib_st *mode_lib, dml_uin
return (mode_lib->ms.cache_display_cfg.plane.UseMALLForPStateChange[plane_idx] == dml_use_mall_pstate_change_phantom_pipe);
}
+
#define dml_get_per_surface_var_func(variable, type, interval_var) type dml_get_##variable(struct display_mode_lib_st *mode_lib, dml_uint_t surface_idx) \
{ \
dml_uint_t plane_idx; \
@@ -10333,3 +10334,4 @@ dml_get_per_surface_var_func(bigk_fragment_size, dml_uint_t, mode_lib->mp.BIGK_F
dml_get_per_surface_var_func(dpte_bytes_per_row, dml_uint_t, mode_lib->mp.PixelPTEBytesPerRow);
dml_get_per_surface_var_func(meta_bytes_per_row, dml_uint_t, mode_lib->mp.MetaRowByte);
dml_get_per_surface_var_func(det_buffer_size_kbytes, dml_uint_t, mode_lib->ms.DETBufferSizeInKByte);
+
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.h b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core.h
index a38ed89c47a9..a38ed89c47a9 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core_structs.h
index 0670e4dc4fd9..5b40dcdc4406 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core_structs.h
@@ -274,7 +274,6 @@ enum dml_clk_cfg_policy {
dml_use_state_freq = 2
};
-
struct soc_state_bounding_box_st {
dml_float_t socclk_mhz;
dml_float_t dscclk_mhz;
@@ -1894,7 +1893,7 @@ struct display_mode_lib_scratch_st {
struct CalculatePrefetchSchedule_params_st CalculatePrefetchSchedule_params;
};
-/// @brief Represent the overall soc/ip enviroment. It contains data structure represent the soc/ip characteristic and also structures that hold calculation output
+/// @brief Represent the overall soc/ip environment. It contains data structure represent the soc/ip characteristic and also structures that hold calculation output
struct display_mode_lib_st {
dml_uint_t project;
@@ -1917,6 +1916,7 @@ struct display_mode_lib_st {
struct dml_mode_support_ex_params_st {
struct display_mode_lib_st *mode_lib;
const struct dml_display_cfg_st *in_display_cfg;
+ dml_uint_t in_start_state_idx;
dml_uint_t out_lowest_state_idx;
struct dml_mode_support_info_st *out_evaluation_info;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_lib_defines.h b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_lib_defines.h
index 14d389525296..e574c81edf5e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_lib_defines.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_lib_defines.h
@@ -52,7 +52,7 @@
#define __DML_VBA_DEBUG__
#define __DML_VBA_ENABLE_INLINE_CHECK_ 0
#define __DML_VBA_MIN_VSTARTUP__ 9 //<brief At which vstartup the DML start to try if the mode can be supported
-#define __DML_ARB_TO_RET_DELAY__ 7 + 95 //<brief Delay in DCFCLK from ARB to DET (1st num is ARB to SDPIF, 2nd number is SDPIF to DET)
+#define __DML_ARB_TO_RET_DELAY__ (7 + 95) //<brief Delay in DCFCLK from ARB to DET (1st num is ARB to SDPIF, 2nd number is SDPIF to DET)
#define __DML_MIN_DCFCLK_FACTOR__ 1.15 //<brief fudge factor for min dcfclk calclation
#define __DML_MAX_VRATIO_PRE__ 4.0 //<brief Prefetch schedule max vratio
#define __DML_MAX_VRATIO_PRE_OTO__ 4.0 //<brief Prefetch schedule max vratio for one to one scheduling calculation for prefetch
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.c b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_util.c
index 89890c88fd66..89890c88fd66 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_util.c
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.h b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_util.h
index 113b0265e1d1..a82b49cf7fb0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_util.h
@@ -30,7 +30,6 @@
#include "display_mode_core_structs.h"
#include "cmntypes.h"
-
#include "dml_assert.h"
#include "dml_logging.h"
@@ -72,5 +71,4 @@ __DML_DLL_EXPORT__ dml_uint_t dml_get_plane_idx(const struct display_mode_lib_st
__DML_DLL_EXPORT__ dml_uint_t dml_get_pipe_idx(const struct display_mode_lib_st *mode_lib, dml_uint_t plane_idx);
__DML_DLL_EXPORT__ void dml_calc_pipe_plane_mapping(const struct dml_hw_resource_st *hw, dml_uint_t *pipe_plane);
-
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_translation_helper.c
index d47cacfdb695..bf5e7f4e0416 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_translation_helper.c
@@ -2,338 +2,73 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#include "dml21_wrapper.h"
#include "dml2_core_dcn4_calcs.h"
#include "dml2_internal_shared_types.h"
#include "dml2_internal_types.h"
#include "dml21_utils.h"
#include "dml21_translation_helper.h"
-#include "bounding_boxes/dcn4_soc_bb.h"
-
-static void dml21_init_socbb_params(struct dml2_initialize_instance_in_out *dml_init,
- const struct dml2_configuration_options *config,
- const struct dc *in_dc)
-{
- const struct dml2_soc_bb *soc_bb;
- const struct dml2_soc_qos_parameters *qos_params;
-
- switch (in_dc->ctx->dce_version) {
- case DCN_VERSION_4_01:
- default:
- if (config->bb_from_dmub)
- soc_bb = config->bb_from_dmub;
- else
- soc_bb = &dml2_socbb_dcn401;
-
- qos_params = &dml_dcn4_variant_a_soc_qos_params;
- }
+#include "soc_and_ip_translator.h"
- /* patch soc bb */
- memcpy(&dml_init->soc_bb, soc_bb, sizeof(struct dml2_soc_bb));
-
- /* patch qos params */
- memcpy(&dml_init->soc_bb.qos_parameters, qos_params, sizeof(struct dml2_soc_qos_parameters));
-}
-
-static void dml21_external_socbb_params(struct dml2_initialize_instance_in_out *dml_init,
- const struct dml2_configuration_options *config)
-{
- memcpy(&dml_init->soc_bb, &config->external_socbb_ip_params->soc_bb, sizeof(struct dml2_soc_bb));
-}
-
-static void dml21_external_ip_params(struct dml2_initialize_instance_in_out *dml_init,
+static void dml21_populate_pmo_options(struct dml2_pmo_options *pmo_options,
+ const struct dc *in_dc,
const struct dml2_configuration_options *config)
{
- memcpy(&dml_init->ip_caps, &config->external_socbb_ip_params->ip_params, sizeof(struct dml2_ip_capabilities));
+ bool disable_fams2 = !in_dc->debug.fams2_config.bits.enable;
+
+ /* ODM options */
+ pmo_options->disable_dyn_odm = !config->minimize_dispclk_using_odm;
+ pmo_options->disable_dyn_odm_for_multi_stream = true;
+ pmo_options->disable_dyn_odm_for_stream_with_svp = true;
+
+ pmo_options->disable_vblank = ((in_dc->debug.dml21_disable_pstate_method_mask >> 1) & 1);
+
+ /* NOTE: DRR and SubVP Require FAMS2 */
+ pmo_options->disable_svp = ((in_dc->debug.dml21_disable_pstate_method_mask >> 2) & 1) ||
+ in_dc->debug.force_disable_subvp ||
+ disable_fams2;
+ pmo_options->disable_drr_clamped = ((in_dc->debug.dml21_disable_pstate_method_mask >> 3) & 1) ||
+ disable_fams2;
+ pmo_options->disable_drr_var = ((in_dc->debug.dml21_disable_pstate_method_mask >> 4) & 1) ||
+ disable_fams2;
+ pmo_options->disable_fams2 = disable_fams2;
+
+ pmo_options->disable_drr_var_when_var_active = in_dc->debug.disable_fams_gaming == INGAME_FAMS_DISABLE ||
+ in_dc->debug.disable_fams_gaming == INGAME_FAMS_MULTI_DISP_CLAMPED_ONLY;
+ pmo_options->disable_drr_clamped_when_var_active = in_dc->debug.disable_fams_gaming == INGAME_FAMS_DISABLE;
}
-static void dml21_init_ip_params(struct dml2_initialize_instance_in_out *dml_init,
- const struct dml2_configuration_options *config,
- const struct dc *in_dc)
+static enum dml2_project_id dml21_dcn_revision_to_dml2_project_id(enum dce_version dcn_version)
{
- const struct dml2_ip_capabilities *ip_caps;
-
- switch (in_dc->ctx->dce_version) {
+ enum dml2_project_id project_id;
+ switch (dcn_version) {
case DCN_VERSION_4_01:
+ project_id = dml2_project_dcn4x_stage2_auto_drr_svp;
+ break;
default:
- ip_caps = &dml2_dcn401_max_ip_caps;
+ project_id = dml2_project_invalid;
+ DC_ERR("unsupported dcn version for DML21!");
+ break;
}
- memcpy(&dml_init->ip_caps, ip_caps, sizeof(struct dml2_ip_capabilities));
+ return project_id;
}
-void dml21_initialize_soc_bb_params(struct dml2_initialize_instance_in_out *dml_init,
+void dml21_populate_dml_init_params(struct dml2_initialize_instance_in_out *dml_init,
const struct dml2_configuration_options *config,
const struct dc *in_dc)
{
- if (config->use_native_soc_bb_construction)
- dml21_init_socbb_params(dml_init, config, in_dc);
- else
- dml21_external_socbb_params(dml_init, config);
-}
-
-void dml21_initialize_ip_params(struct dml2_initialize_instance_in_out *dml_init,
- const struct dml2_configuration_options *config,
- const struct dc *in_dc)
-{
- if (config->use_native_soc_bb_construction)
- dml21_init_ip_params(dml_init, config, in_dc);
- else
- dml21_external_ip_params(dml_init, config);
-}
-
-void dml21_apply_soc_bb_overrides(struct dml2_initialize_instance_in_out *dml_init,
- const struct dml2_configuration_options *config, const struct dc *in_dc)
-{
- int i;
-
- const struct clk_bw_params *dc_bw_params = in_dc->clk_mgr->bw_params;
- const struct clk_limit_table *dc_clk_table = &dc_bw_params->clk_table;
- struct dml2_soc_bb *dml_soc_bb = &dml_init->soc_bb;
- struct dml2_soc_state_table *dml_clk_table = &dml_soc_bb->clk_table;
-
- /* override clocks if smu is present */
- if (in_dc->clk_mgr->funcs->is_smu_present && in_dc->clk_mgr->funcs->is_smu_present(in_dc->clk_mgr)) {
- /* dcfclk */
- if (dc_clk_table->num_entries_per_clk.num_dcfclk_levels) {
- dml_clk_table->dcfclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dcfclk_levels;
- for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
- if (i < dml_clk_table->dcfclk.num_clk_values) {
- if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dcfclk_mhz &&
- dc_clk_table->entries[i].dcfclk_mhz > dc_bw_params->dc_mode_limit.dcfclk_mhz) {
- if (i == 0 || dc_clk_table->entries[i-1].dcfclk_mhz < dc_bw_params->dc_mode_limit.dcfclk_mhz) {
- dml_clk_table->dcfclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dcfclk_mhz * 1000;
- dml_clk_table->dcfclk.num_clk_values = i + 1;
- } else {
- dml_clk_table->dcfclk.clk_values_khz[i] = 0;
- dml_clk_table->dcfclk.num_clk_values = i;
- }
- } else {
- dml_clk_table->dcfclk.clk_values_khz[i] = dc_clk_table->entries[i].dcfclk_mhz * 1000;
- }
- } else {
- dml_clk_table->dcfclk.clk_values_khz[i] = 0;
- }
- }
- }
-
- /* fclk */
- if (dc_clk_table->num_entries_per_clk.num_fclk_levels) {
- dml_clk_table->fclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_fclk_levels;
- for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
- if (i < dml_clk_table->fclk.num_clk_values) {
- if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.fclk_mhz &&
- dc_clk_table->entries[i].fclk_mhz > dc_bw_params->dc_mode_limit.fclk_mhz) {
- if (i == 0 || dc_clk_table->entries[i-1].fclk_mhz < dc_bw_params->dc_mode_limit.fclk_mhz) {
- dml_clk_table->fclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.fclk_mhz * 1000;
- dml_clk_table->fclk.num_clk_values = i + 1;
- } else {
- dml_clk_table->fclk.clk_values_khz[i] = 0;
- dml_clk_table->fclk.num_clk_values = i;
- }
- } else {
- dml_clk_table->fclk.clk_values_khz[i] = dc_clk_table->entries[i].fclk_mhz * 1000;
- }
- } else {
- dml_clk_table->fclk.clk_values_khz[i] = 0;
- }
- }
- }
+ dml_init->options.project_id = dml21_dcn_revision_to_dml2_project_id(in_dc->ctx->dce_version);
- /* uclk */
- if (dc_clk_table->num_entries_per_clk.num_memclk_levels) {
- dml_clk_table->uclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_memclk_levels;
- for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
- if (i < dml_clk_table->uclk.num_clk_values) {
- if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.memclk_mhz &&
- dc_clk_table->entries[i].memclk_mhz > dc_bw_params->dc_mode_limit.memclk_mhz) {
- if (i == 0 || dc_clk_table->entries[i-1].memclk_mhz < dc_bw_params->dc_mode_limit.memclk_mhz) {
- dml_clk_table->uclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.memclk_mhz * 1000;
- dml_clk_table->uclk.num_clk_values = i + 1;
- } else {
- dml_clk_table->uclk.clk_values_khz[i] = 0;
- dml_clk_table->uclk.num_clk_values = i;
- }
- } else {
- dml_clk_table->uclk.clk_values_khz[i] = dc_clk_table->entries[i].memclk_mhz * 1000;
- }
- } else {
- dml_clk_table->uclk.clk_values_khz[i] = 0;
- }
- }
- }
-
- /* dispclk */
- if (dc_clk_table->num_entries_per_clk.num_dispclk_levels) {
- dml_clk_table->dispclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dispclk_levels;
- for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
- if (i < dml_clk_table->dispclk.num_clk_values) {
- if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dispclk_mhz &&
- dc_clk_table->entries[i].dispclk_mhz > dc_bw_params->dc_mode_limit.dispclk_mhz) {
- if (i == 0 || dc_clk_table->entries[i-1].dispclk_mhz < dc_bw_params->dc_mode_limit.dispclk_mhz) {
- dml_clk_table->dispclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dispclk_mhz * 1000;
- dml_clk_table->dispclk.num_clk_values = i + 1;
- } else {
- dml_clk_table->dispclk.clk_values_khz[i] = 0;
- dml_clk_table->dispclk.num_clk_values = i;
- }
- } else {
- dml_clk_table->dispclk.clk_values_khz[i] = dc_clk_table->entries[i].dispclk_mhz * 1000;
- }
- } else {
- dml_clk_table->dispclk.clk_values_khz[i] = 0;
- }
- }
- }
-
- /* dppclk */
- if (dc_clk_table->num_entries_per_clk.num_dppclk_levels) {
- dml_clk_table->dppclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dppclk_levels;
- for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
- if (i < dml_clk_table->dppclk.num_clk_values) {
- if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dppclk_mhz &&
- dc_clk_table->entries[i].dppclk_mhz > dc_bw_params->dc_mode_limit.dppclk_mhz) {
- if (i == 0 || dc_clk_table->entries[i-1].dppclk_mhz < dc_bw_params->dc_mode_limit.dppclk_mhz) {
- dml_clk_table->dppclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dppclk_mhz * 1000;
- dml_clk_table->dppclk.num_clk_values = i + 1;
- } else {
- dml_clk_table->dppclk.clk_values_khz[i] = 0;
- dml_clk_table->dppclk.num_clk_values = i;
- }
- } else {
- dml_clk_table->dppclk.clk_values_khz[i] = dc_clk_table->entries[i].dppclk_mhz * 1000;
- }
- } else {
- dml_clk_table->dppclk.clk_values_khz[i] = 0;
- }
- }
- }
-
- /* dtbclk */
- if (dc_clk_table->num_entries_per_clk.num_dtbclk_levels) {
- dml_clk_table->dtbclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dtbclk_levels;
- for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
- if (i < dml_clk_table->dtbclk.num_clk_values) {
- if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dtbclk_mhz &&
- dc_clk_table->entries[i].dtbclk_mhz > dc_bw_params->dc_mode_limit.dtbclk_mhz) {
- if (i == 0 || dc_clk_table->entries[i-1].dtbclk_mhz < dc_bw_params->dc_mode_limit.dtbclk_mhz) {
- dml_clk_table->dtbclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dtbclk_mhz * 1000;
- dml_clk_table->dtbclk.num_clk_values = i + 1;
- } else {
- dml_clk_table->dtbclk.clk_values_khz[i] = 0;
- dml_clk_table->dtbclk.num_clk_values = i;
- }
- } else {
- dml_clk_table->dtbclk.clk_values_khz[i] = dc_clk_table->entries[i].dtbclk_mhz * 1000;
- }
- } else {
- dml_clk_table->dtbclk.clk_values_khz[i] = 0;
- }
- }
- }
-
- /* socclk */
- if (dc_clk_table->num_entries_per_clk.num_socclk_levels) {
- dml_clk_table->socclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_socclk_levels;
- for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
- if (i < dml_clk_table->socclk.num_clk_values) {
- if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.socclk_mhz &&
- dc_clk_table->entries[i].socclk_mhz > dc_bw_params->dc_mode_limit.socclk_mhz) {
- if (i == 0 || dc_clk_table->entries[i-1].socclk_mhz < dc_bw_params->dc_mode_limit.socclk_mhz) {
- dml_clk_table->socclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.socclk_mhz * 1000;
- dml_clk_table->socclk.num_clk_values = i + 1;
- } else {
- dml_clk_table->socclk.clk_values_khz[i] = 0;
- dml_clk_table->socclk.num_clk_values = i;
- }
- } else {
- dml_clk_table->socclk.clk_values_khz[i] = dc_clk_table->entries[i].socclk_mhz * 1000;
- }
- } else {
- dml_clk_table->socclk.clk_values_khz[i] = 0;
- }
- }
- }
-
- /* do not override phyclks for now */
- /* phyclk */
- // dml_clk_table->phyclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_phyclk_levels;
- // for (i = 0; i < DML_MAX_CLK_TABLE_SIZE; i++) {
- // dml_clk_table->phyclk.clk_values_khz[i] = dc_clk_table->entries[i].phyclk_mhz * 1000;
- // }
-
- /* phyclk_d18 */
- // dml_clk_table->phyclk_d18.num_clk_values = dc_clk_table->num_entries_per_clk.num_phyclk_d18_levels;
- // for (i = 0; i < DML_MAX_CLK_TABLE_SIZE; i++) {
- // dml_clk_table->phyclk_d18.clk_values_khz[i] = dc_clk_table->entries[i].phyclk_d18_mhz * 1000;
- // }
-
- /* phyclk_d32 */
- // dml_clk_table->phyclk_d32.num_clk_values = dc_clk_table->num_entries_per_clk.num_phyclk_d32_levels;
- // for (i = 0; i < DML_MAX_CLK_TABLE_SIZE; i++) {
- // dml_clk_table->phyclk_d32.clk_values_khz[i] = dc_clk_table->entries[i].phyclk_d32_mhz * 1000;
- // }
- }
-
- dml_soc_bb->dchub_refclk_mhz = in_dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
- dml_soc_bb->dprefclk_mhz = in_dc->clk_mgr->dprefclk_khz / 1000;
- dml_soc_bb->xtalclk_mhz = in_dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency / 1000;
- dml_soc_bb->dispclk_dppclk_vco_speed_mhz = in_dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
-
- /* override bounding box paramters from VBIOS */
- if (in_dc->ctx->dc_bios->bb_info.dram_clock_change_latency_100ns > 0)
- dml_soc_bb->power_management_parameters.dram_clk_change_blackout_us =
- (in_dc->ctx->dc_bios->bb_info.dram_clock_change_latency_100ns + 9) / 10;
-
- if (in_dc->ctx->dc_bios->bb_info.dram_sr_enter_exit_latency_100ns > 0)
- dml_soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us =
- (in_dc->ctx->dc_bios->bb_info.dram_sr_enter_exit_latency_100ns + 9) / 10;
-
- if (in_dc->ctx->dc_bios->bb_info.dram_sr_exit_latency_100ns > 0)
- dml_soc_bb->power_management_parameters.stutter_exit_latency_us =
- (in_dc->ctx->dc_bios->bb_info.dram_sr_exit_latency_100ns + 9) / 10;
-
- if (dc_bw_params->num_channels) {
- dml_clk_table->dram_config.channel_count = dc_bw_params->num_channels;
- dml_soc_bb->mall_allocated_for_dcn_mbytes = in_dc->caps.mall_size_total / 1048576;
- } else if (in_dc->ctx->dc_bios->vram_info.num_chans) {
- dml_clk_table->dram_config.channel_count = in_dc->ctx->dc_bios->vram_info.num_chans;
- dml_soc_bb->mall_allocated_for_dcn_mbytes = in_dc->caps.mall_size_total / 1048576;
- }
-
- if (dc_bw_params->dram_channel_width_bytes) {
- dml_clk_table->dram_config.channel_width_bytes = dc_bw_params->dram_channel_width_bytes;
- } else if (in_dc->ctx->dc_bios->vram_info.dram_channel_width_bytes) {
- dml_clk_table->dram_config.channel_width_bytes = in_dc->ctx->dc_bios->vram_info.dram_channel_width_bytes;
- }
-
- /* override bounding box paramters from DC config */
- if (in_dc->bb_overrides.sr_exit_time_ns) {
- dml_soc_bb->power_management_parameters.stutter_exit_latency_us =
- in_dc->bb_overrides.sr_exit_time_ns / 1000.0;
- }
-
- if (in_dc->bb_overrides.sr_enter_plus_exit_time_ns) {
- dml_soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us =
- in_dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0;
- }
-
- if (in_dc->bb_overrides.dram_clock_change_latency_ns) {
- dml_soc_bb->power_management_parameters.dram_clk_change_blackout_us =
- in_dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
- }
-
- if (in_dc->bb_overrides.fclk_clock_change_latency_ns) {
- dml_soc_bb->power_management_parameters.fclk_change_blackout_us =
- in_dc->bb_overrides.fclk_clock_change_latency_ns / 1000.0;
+ if (config->use_native_soc_bb_construction) {
+ in_dc->soc_and_ip_translator->translator_funcs->get_soc_bb(&dml_init->soc_bb, in_dc, config);
+ in_dc->soc_and_ip_translator->translator_funcs->get_ip_caps(&dml_init->ip_caps);
+ } else {
+ dml_init->soc_bb = config->external_socbb_ip_params->soc_bb;
+ dml_init->ip_caps = config->external_socbb_ip_params->ip_params;
}
- //TODO
- // if (in_dc->bb_overrides.dummy_clock_change_latency_ns) {
- // dml_soc_bb->power_management_parameters.dram_clk_change_blackout_us =
- // in_dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
- // }
+ dml21_populate_pmo_options(&dml_init->options.pmo_options, in_dc, config);
}
static unsigned int calc_max_hardware_v_total(const struct dc_stream_state *stream)
@@ -349,25 +84,29 @@ static unsigned int calc_max_hardware_v_total(const struct dc_stream_state *stre
static void populate_dml21_timing_config_from_stream_state(struct dml2_timing_cfg *timing,
struct dc_stream_state *stream,
+ struct pipe_ctx *pipe_ctx,
struct dml2_context *dml_ctx)
{
unsigned int hblank_start, vblank_start, min_hardware_refresh_in_uhz;
+ uint32_t pix_clk_100hz;
- timing->h_active = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right;
+ timing->h_active = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right + pipe_ctx->dsc_padding_params.dsc_hactive_padding;
timing->v_active = stream->timing.v_addressable + stream->timing.v_border_bottom + stream->timing.v_border_top;
timing->h_front_porch = stream->timing.h_front_porch;
timing->v_front_porch = stream->timing.v_front_porch;
timing->pixel_clock_khz = stream->timing.pix_clk_100hz / 10;
+ if (pipe_ctx->dsc_padding_params.dsc_hactive_padding != 0)
+ timing->pixel_clock_khz = pipe_ctx->dsc_padding_params.dsc_pix_clk_100hz / 10;
if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
timing->pixel_clock_khz *= 2;
- timing->h_total = stream->timing.h_total;
+ timing->h_total = stream->timing.h_total + pipe_ctx->dsc_padding_params.dsc_htotal_padding;
timing->v_total = stream->timing.v_total;
timing->h_sync_width = stream->timing.h_sync_width;
timing->interlaced = stream->timing.flags.INTERLACE;
hblank_start = stream->timing.h_total - stream->timing.h_front_porch;
- timing->h_blank_end = hblank_start - stream->timing.h_addressable
+ timing->h_blank_end = hblank_start - stream->timing.h_addressable - pipe_ctx->dsc_padding_params.dsc_hactive_padding
- stream->timing.h_border_left - stream->timing.h_border_right;
if (hblank_start < stream->timing.h_addressable)
@@ -386,15 +125,16 @@ static void populate_dml21_timing_config_from_stream_state(struct dml2_timing_cf
/* limit min refresh rate to DC cap */
min_hardware_refresh_in_uhz = stream->timing.min_refresh_in_uhz;
if (stream->ctx->dc->caps.max_v_total != 0) {
- min_hardware_refresh_in_uhz = div64_u64((stream->timing.pix_clk_100hz * 100000000ULL),
- (stream->timing.h_total * (long long)calc_max_hardware_v_total(stream)));
+ if (pipe_ctx->dsc_padding_params.dsc_hactive_padding != 0) {
+ pix_clk_100hz = pipe_ctx->dsc_padding_params.dsc_pix_clk_100hz;
+ } else {
+ pix_clk_100hz = stream->timing.pix_clk_100hz;
+ }
+ min_hardware_refresh_in_uhz = div64_u64((pix_clk_100hz * 100000000ULL),
+ (timing->h_total * (long long)calc_max_hardware_v_total(stream)));
}
- if (stream->timing.min_refresh_in_uhz > min_hardware_refresh_in_uhz) {
- timing->drr_config.min_refresh_uhz = stream->timing.min_refresh_in_uhz;
- } else {
- timing->drr_config.min_refresh_uhz = min_hardware_refresh_in_uhz;
- }
+ timing->drr_config.min_refresh_uhz = max(stream->timing.min_refresh_in_uhz, min_hardware_refresh_in_uhz);
if (dml_ctx->config.callbacks.get_max_flickerless_instant_vtotal_increase &&
stream->ctx->dc->config.enable_fpo_flicker_detection == 1)
@@ -442,21 +182,6 @@ static void populate_dml21_timing_config_from_stream_state(struct dml2_timing_cf
timing->vblank_nom = timing->v_total - timing->v_active;
}
-/**
- * adjust_dml21_hblank_timing_config_from_pipe_ctx - Adjusts the horizontal blanking timing configuration
- * based on the pipe context.
- * @timing: Pointer to the dml2_timing_cfg structure to be adjusted.
- * @pipe: Pointer to the pipe_ctx structure containing the horizontal blanking borrow value.
- *
- * This function modifies the horizontal active and blank end timings by adding and subtracting
- * the horizontal blanking borrow value from the pipe context, respectively.
- */
-static void adjust_dml21_hblank_timing_config_from_pipe_ctx(struct dml2_timing_cfg *timing, struct pipe_ctx *pipe)
-{
- timing->h_active += pipe->hblank_borrow;
- timing->h_blank_end -= pipe->hblank_borrow;
-}
-
static void populate_dml21_output_config_from_stream_state(struct dml2_link_output_cfg *output,
struct dc_stream_state *stream, const struct pipe_ctx *pipe)
{
@@ -726,7 +451,6 @@ static void populate_dml21_surface_config_from_plane_state(
switch (plane_state->tiling_info.gfxversion) {
case DcGfxVersion7:
case DcGfxVersion8:
- // Placeholder for programming the array_mode
break;
case DcGfxVersion9:
case DcGfxVersion10:
@@ -757,7 +481,9 @@ static const struct scaler_data *get_scaler_data_for_plane(
temp_pipe->plane_state = pipe->plane_state;
temp_pipe->plane_res.scl_data.taps = pipe->plane_res.scl_data.taps;
temp_pipe->stream_res = pipe->stream_res;
- temp_pipe->hblank_borrow = pipe->hblank_borrow;
+ temp_pipe->dsc_padding_params.dsc_hactive_padding = pipe->dsc_padding_params.dsc_hactive_padding;
+ temp_pipe->dsc_padding_params.dsc_htotal_padding = pipe->dsc_padding_params.dsc_htotal_padding;
+ temp_pipe->dsc_padding_params.dsc_pix_clk_100hz = pipe->dsc_padding_params.dsc_pix_clk_100hz;
dml_ctx->config.callbacks.build_scaling_params(temp_pipe);
break;
}
@@ -788,6 +514,7 @@ static void populate_dml21_plane_config_from_plane_state(struct dml2_context *dm
plane->pixel_format = dml2_420_10;
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
plane->pixel_format = dml2_444_64;
@@ -888,10 +615,8 @@ static void populate_dml21_plane_config_from_plane_state(struct dml2_context *dm
case DC_CM2_GPU_MEM_SIZE_171717:
plane->tdlut.tdlut_width_mode = dml2_tdlut_width_17_cube;
break;
- case DC_CM2_GPU_MEM_SIZE_333333:
- plane->tdlut.tdlut_width_mode = dml2_tdlut_width_33_cube;
- break;
case DC_CM2_GPU_MEM_SIZE_TRANSFORMED:
+ default:
//plane->tdlut.tdlut_width_mode = dml2_tdlut_width_flatten; // dml2_tdlut_width_flatten undefined
break;
}
@@ -1026,8 +751,7 @@ bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_s
disp_cfg_stream_location = dml_dispcfg->num_streams++;
ASSERT(disp_cfg_stream_location >= 0 && disp_cfg_stream_location < __DML2_WRAPPER_MAX_STREAMS_PLANES__);
- populate_dml21_timing_config_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].timing, context->streams[stream_index], dml_ctx);
- adjust_dml21_hblank_timing_config_from_pipe_ctx(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].timing, &context->res_ctx.pipe_ctx[stream_index]);
+ populate_dml21_timing_config_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].timing, context->streams[stream_index], &context->res_ctx.pipe_ctx[stream_index], dml_ctx);
populate_dml21_output_config_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].output, context->streams[stream_index], &context->res_ctx.pipe_ctx[stream_index]);
populate_dml21_stream_overrides_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location], context->streams[stream_index], &context->stream_status[stream_index]);
@@ -1094,6 +818,8 @@ void dml21_copy_clocks_to_dc_state(struct dml2_context *in_ctx, struct dc_state
context->bw_ctx.bw.dcn.clk.socclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.socclk_khz;
context->bw_ctx.bw.dcn.clk.subvp_prefetch_dramclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.svp_prefetch_no_throttle.uclk_khz;
context->bw_ctx.bw.dcn.clk.subvp_prefetch_fclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.svp_prefetch_no_throttle.fclk_khz;
+ context->bw_ctx.bw.dcn.clk.stutter_efficiency.base_efficiency = in_ctx->v21.mode_programming.programming->stutter.base_percent_efficiency;
+ context->bw_ctx.bw.dcn.clk.stutter_efficiency.low_power_efficiency = in_ctx->v21.mode_programming.programming->stutter.low_power_percent_efficiency;
}
static struct dml2_dchub_watermark_regs *wm_set_index_to_dc_wm_set(union dcn_watermark_set *watermarks, const enum dml2_dchub_watermark_reg_set_index wm_index)
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_translation_helper.h
index 73a013be1e48..9880d3e0398e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_translation_helper.h
@@ -17,9 +17,7 @@ struct dml2_context;
struct dml2_configuration_options;
struct dml2_initialize_instance_in_out;
-void dml21_apply_soc_bb_overrides(struct dml2_initialize_instance_in_out *dml_init, const struct dml2_configuration_options *config, const struct dc *in_dc);
-void dml21_initialize_soc_bb_params(struct dml2_initialize_instance_in_out *dml_init, const struct dml2_configuration_options *config, const struct dc *in_dc);
-void dml21_initialize_ip_params(struct dml2_initialize_instance_in_out *dml_init, const struct dml2_configuration_options *config, const struct dc *in_dc);
+void dml21_populate_dml_init_params(struct dml2_initialize_instance_in_out *dml_init, const struct dml2_configuration_options *config, const struct dc *in_dc);
bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx);
void dml21_copy_clocks_to_dc_state(struct dml2_context *in_ctx, struct dc_state *context);
void dml21_extract_watermark_sets(const struct dc *in_dc, union dcn_watermark_set *watermarks, struct dml2_context *in_ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_utils.c
index 930e86cdb88a..ee721606b883 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_utils.c
@@ -384,6 +384,7 @@ void dml21_build_fams2_programming(const struct dc *dc,
/* reset fams2 data */
memset(&context->bw_ctx.bw.dcn.fams2_stream_base_params, 0, sizeof(union dmub_cmd_fams2_config) * DML2_MAX_PLANES);
memset(&context->bw_ctx.bw.dcn.fams2_stream_sub_params, 0, sizeof(union dmub_cmd_fams2_config) * DML2_MAX_PLANES);
+ memset(&context->bw_ctx.bw.dcn.fams2_stream_sub_params_v2, 0, sizeof(union dmub_fams2_stream_static_sub_state_v2) * DML2_MAX_PLANES);
memset(&context->bw_ctx.bw.dcn.fams2_global_config, 0, sizeof(struct dmub_cmd_fams2_global_config));
if (dml_ctx->v21.mode_programming.programming->fams2_required) {
@@ -414,9 +415,16 @@ void dml21_build_fams2_programming(const struct dc *dc,
memcpy(static_base_state,
&dml_ctx->v21.mode_programming.programming->stream_programming[dml_stream_idx].fams2_base_params,
sizeof(union dmub_cmd_fams2_config));
- memcpy(static_sub_state,
- &dml_ctx->v21.mode_programming.programming->stream_programming[dml_stream_idx].fams2_sub_params,
- sizeof(union dmub_cmd_fams2_config));
+
+ if (dc->debug.fams_version.major == 3) {
+ memcpy(&context->bw_ctx.bw.dcn.fams2_stream_sub_params_v2[num_fams2_streams],
+ &dml_ctx->v21.mode_programming.programming->stream_programming[dml_stream_idx].fams2_sub_params_v2,
+ sizeof(union dmub_fams2_stream_static_sub_state_v2));
+ } else {
+ memcpy(static_sub_state,
+ &dml_ctx->v21.mode_programming.programming->stream_programming[dml_stream_idx].fams2_sub_params,
+ sizeof(union dmub_cmd_fams2_config));
+ }
switch (dc->debug.fams_version.minor) {
case 1:
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_utils.h
index 4bff52eaaef8..4bff52eaaef8 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_utils.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper.c
index 208d3651b6ba..798abb2b2e67 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper.c
@@ -2,8 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-#include <linux/vmalloc.h>
-
#include "dml2_internal_types.h"
#include "dml_top.h"
#include "dml2_core_dcn4_calcs.h"
@@ -37,15 +35,11 @@ static bool dml21_allocate_memory(struct dml2_context **dml_ctx)
return true;
}
-static void dml21_apply_debug_options(const struct dc *in_dc, struct dml2_context *dml_ctx, const struct dml2_configuration_options *config)
+static void dml21_populate_configuration_options(const struct dc *in_dc,
+ struct dml2_context *dml_ctx,
+ const struct dml2_configuration_options *config)
{
- bool disable_fams2;
- struct dml2_pmo_options *pmo_options = &dml_ctx->v21.dml_init.options.pmo_options;
-
- /* ODM options */
- pmo_options->disable_dyn_odm = !config->minimize_dispclk_using_odm;
- pmo_options->disable_dyn_odm_for_multi_stream = true;
- pmo_options->disable_dyn_odm_for_stream_with_svp = true;
+ dml_ctx->config = *config;
/* UCLK P-State options */
if (in_dc->debug.dml21_force_pstate_method) {
@@ -55,52 +49,20 @@ static void dml21_apply_debug_options(const struct dc *in_dc, struct dml2_contex
} else {
dml_ctx->config.pmo.force_pstate_method_enable = false;
}
-
- pmo_options->disable_vblank = ((in_dc->debug.dml21_disable_pstate_method_mask >> 1) & 1);
-
- /* NOTE: DRR and SubVP Require FAMS2 */
- disable_fams2 = !in_dc->debug.fams2_config.bits.enable;
- pmo_options->disable_svp = ((in_dc->debug.dml21_disable_pstate_method_mask >> 2) & 1) ||
- in_dc->debug.force_disable_subvp ||
- disable_fams2;
- pmo_options->disable_drr_clamped = ((in_dc->debug.dml21_disable_pstate_method_mask >> 3) & 1) ||
- disable_fams2;
- pmo_options->disable_drr_var = ((in_dc->debug.dml21_disable_pstate_method_mask >> 4) & 1) ||
- disable_fams2;
- pmo_options->disable_fams2 = disable_fams2;
-
- pmo_options->disable_drr_var_when_var_active = in_dc->debug.disable_fams_gaming == INGAME_FAMS_DISABLE ||
- in_dc->debug.disable_fams_gaming == INGAME_FAMS_MULTI_DISP_CLAMPED_ONLY;
- pmo_options->disable_drr_clamped_when_var_active = in_dc->debug.disable_fams_gaming == INGAME_FAMS_DISABLE;
}
-static void dml21_init(const struct dc *in_dc, struct dml2_context **dml_ctx, const struct dml2_configuration_options *config)
+static void dml21_init(const struct dc *in_dc, struct dml2_context *dml_ctx, const struct dml2_configuration_options *config)
{
- switch (in_dc->ctx->dce_version) {
- case DCN_VERSION_4_01:
- (*dml_ctx)->v21.dml_init.options.project_id = dml2_project_dcn4x_stage2_auto_drr_svp;
- break;
- default:
- (*dml_ctx)->v21.dml_init.options.project_id = dml2_project_invalid;
- }
- (*dml_ctx)->architecture = dml2_architecture_21;
+ dml_ctx->architecture = dml2_architecture_21;
- /* Store configuration options */
- (*dml_ctx)->config = *config;
+ dml21_populate_configuration_options(in_dc, dml_ctx, config);
DC_FP_START();
- /*Initialize SOCBB and DCNIP params */
- dml21_initialize_soc_bb_params(&(*dml_ctx)->v21.dml_init, config, in_dc);
- dml21_initialize_ip_params(&(*dml_ctx)->v21.dml_init, config, in_dc);
- dml21_apply_soc_bb_overrides(&(*dml_ctx)->v21.dml_init, config, in_dc);
-
- /* apply debug overrides */
- dml21_apply_debug_options(in_dc, *dml_ctx, config);
+ dml21_populate_dml_init_params(&dml_ctx->v21.dml_init, &dml_ctx->config, in_dc);
- /*Initialize DML21 instance */
- dml2_initialize_instance(&(*dml_ctx)->v21.dml_init);
+ dml2_initialize_instance(&dml_ctx->v21.dml_init);
DC_FP_END();
}
@@ -111,7 +73,7 @@ bool dml21_create(const struct dc *in_dc, struct dml2_context **dml_ctx, const s
if (!dml21_allocate_memory(dml_ctx))
return false;
- dml21_init(in_dc, dml_ctx, config);
+ dml21_init(in_dc, *dml_ctx, config);
return true;
}
@@ -328,12 +290,13 @@ static bool dml21_check_mode_support(const struct dc *in_dc, struct dc_state *co
return true;
}
-bool dml21_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx, bool fast_validate)
+bool dml21_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx,
+ enum dc_validate_mode validate_mode)
{
bool out = false;
- /* Use dml_validate_only for fast_validate path */
- if (fast_validate)
+ /* Use dml21_check_mode_support for DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX path */
+ if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING)
out = dml21_check_mode_support(in_dc, context, dml_ctx);
else
out = dml21_mode_check_and_programming(in_dc, context, dml_ctx);
@@ -496,7 +459,7 @@ bool dml21_create_copy(struct dml2_context **dst_dml_ctx,
return true;
}
-void dml21_reinit(const struct dc *in_dc, struct dml2_context **dml_ctx, const struct dml2_configuration_options *config)
+void dml21_reinit(const struct dc *in_dc, struct dml2_context *dml_ctx, const struct dml2_configuration_options *config)
{
dml21_init(in_dc, dml_ctx, config);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper.h
index 42e715024bc9..15f92029d2e5 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper.h
@@ -14,6 +14,7 @@ struct dc;
struct dc_state;
struct dml2_configuration_options;
struct dml2_context;
+enum dc_validate_mode;
/**
* dml2_create - Creates dml21_context.
@@ -33,22 +34,23 @@ void dml21_copy(struct dml2_context *dst_dml_ctx,
struct dml2_context *src_dml_ctx);
bool dml21_create_copy(struct dml2_context **dst_dml_ctx,
struct dml2_context *src_dml_ctx);
-void dml21_reinit(const struct dc *in_dc, struct dml2_context **dml_ctx, const struct dml2_configuration_options *config);
+void dml21_reinit(const struct dc *in_dc, struct dml2_context *dml_ctx, const struct dml2_configuration_options *config);
/**
* dml21_validate - Determines if a display configuration is supported or not.
* @in_dc: dc.
* @context: dc_state to be validated.
- * @fast_validate: Fast validate will not populate context.res_ctx.
+ * @validate_mode: DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX
+ * will not populate context.res_ctx.
*
* Based on fast_validate option internally would call:
*
- * -dml21_mode_check_and_programming - for non fast_validate option
+ * -dml21_mode_check_and_programming - for DC_VALIDATE_MODE_AND_PROGRAMMING option
* Calculates if dc_state can be supported on the input display
* configuration. If supported, generates the necessary HW
* programming for the new dc_state.
*
- * -dml21_check_mode_support - for fast_validate option
+ * -dml21_check_mode_support - for DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX option
* Calculates if dc_state can be supported for the input display
* config.
@@ -56,7 +58,8 @@ void dml21_reinit(const struct dc *in_dc, struct dml2_context **dml_ctx, const s
* separate dc_states for validation.
* Return: True if mode is supported, false otherwise.
*/
-bool dml21_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx, bool fast_validate);
+bool dml21_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx,
+ enum dc_validate_mode validate_mode);
/* Prepare hubp mcache_regs for hubp mcache ID and split coordinate programming */
void dml21_prepare_mcache_programming(struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn4_soc_bb.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/bounding_boxes/dcn4_soc_bb.h
index 793e1c038efd..16a4f97bca4e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn4_soc_bb.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/bounding_boxes/dcn4_soc_bb.h
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#ifndef __DML_DML_DCN4_SOC_BB__
#define __DML_DML_DCN4_SOC_BB__
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml2_external_lib_deps.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml2_external_lib_deps.h
index 281d7ad230d8..281d7ad230d8 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml2_external_lib_deps.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml2_external_lib_deps.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top.h
index c047d56527c4..a64ec4dcf11a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top.h
@@ -43,5 +43,4 @@ bool dml2_build_mode_programming(struct dml2_build_mode_programming_in_out *in_o
*/
bool dml2_build_mcache_programming(struct dml2_build_mcache_programming_in_out *in_out);
-
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_dchub_registers.h
index 84c90050668c..bf57df42d1d9 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_dchub_registers.h
@@ -121,6 +121,8 @@ struct dml2_display_rq_regs {
uint32_t crq_expansion_mode;
uint32_t plane1_base_address;
uint32_t unbounded_request_enabled;
+ bool pte_buffer_mode;
+ bool force_one_row_for_frame;
// MRQ
uint32_t mrq_expansion_mode;
@@ -158,6 +160,8 @@ struct dml2_dchub_watermark_regs {
uint32_t sr_exit;
uint32_t sr_enter_z8;
uint32_t sr_exit_z8;
+ uint32_t sr_enter_low_power;
+ uint32_t sr_exit_low_power;
uint32_t uclk_pstate;
uint32_t fclk_pstate;
uint32_t temp_read_or_ppt;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_display_cfg_types.h
index 255f05de362c..35aa954248cd 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_display_cfg_types.h
@@ -49,6 +49,11 @@ enum dml2_source_format_class {
dml2_422_packed_12 = 18
};
+enum dml2_sample_positioning {
+ dml2_interstitial = 0,
+ dml2_cosited = 1
+};
+
enum dml2_rotation_angle {
dml2_rotation_0 = 0,
dml2_rotation_90 = 1,
@@ -82,6 +87,15 @@ enum dml2_output_link_dp_rate {
dml2_dp_rate_uhbr20 = 6
};
+enum dml2_pstate_type {
+ dml2_pstate_type_uclk = 0,
+ dml2_pstate_type_fclk = 1,
+ dml2_pstate_type_ppt = 2,
+ dml2_pstate_type_temp_read = 3,
+ dml2_pstate_type_dummy_pstate = 4,
+ dml2_pstate_type_count = 5
+};
+
enum dml2_uclk_pstate_change_strategy {
dml2_uclk_pstate_change_strategy_auto = 0,
dml2_uclk_pstate_change_strategy_force_vactive = 1,
@@ -222,6 +236,11 @@ struct dml2_composition_cfg {
struct {
bool enabled;
+ bool easf_enabled;
+ bool isharp_enabled;
+ bool upsp_enabled;
+ enum dml2_sample_positioning upsp_sample_positioning;
+ unsigned int upsp_vtaps;
struct {
double h_ratio;
double v_ratio;
@@ -383,7 +402,7 @@ struct dml2_plane_parameters {
// reserved_vblank_time_ns is the minimum time to reserve in vblank for Twait
// The actual reserved vblank time used for the corresponding stream in mode_programming would be at least as much as this per-plane override.
long reserved_vblank_time_ns;
- unsigned int max_vactive_det_fill_delay_us; // 0 = no reserved time, +ve = explicit max delay
+ unsigned int max_vactive_det_fill_delay_us[dml2_pstate_type_count]; // 0 = no reserved time, +ve = explicit max delay
unsigned int gpuvm_min_page_size_kbytes;
unsigned int hostvm_min_page_size_kbytes;
@@ -412,7 +431,6 @@ struct dml2_stream_parameters {
bool disable_dynamic_odm;
bool disable_subvp;
int minimum_vblank_idle_requirement_us;
- bool minimize_active_latency_hiding;
struct {
struct {
@@ -426,6 +444,7 @@ struct dml2_stream_parameters {
struct dml2_display_cfg {
bool gpuvm_enable;
+ bool ffbm_enable;
bool hostvm_enable;
// Allocate DET proportionally between streams based on pixel rate
@@ -454,6 +473,7 @@ struct dml2_display_cfg {
bool enable;
bool value;
} force_nom_det_size_kbytes;
+
bool mode_support_check_disable;
bool mcache_admissibility_check_disable;
bool surface_viewport_size_check_disable;
@@ -476,7 +496,6 @@ struct dml2_display_cfg {
bool synchronize_ddr_displays_for_uclk_pstate_change;
bool max_outstanding_when_urgent_expected_disable;
bool enable_subvp_implicit_pmo; //enables PMO to switch pipe uclk strategy to subvp, and generate phantom programming
- unsigned int best_effort_min_active_latency_hiding_us;
bool all_streams_blanked;
} overrides;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_policy_types.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_policy_types.h
index 8f624a912e78..8f624a912e78 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_policy_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_policy_types.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_soc_parameter_types.h
index 5f0bc42d1d2f..1fbc520c2540 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_soc_parameter_types.h
@@ -89,16 +89,21 @@ struct dml2_soc_qos_parameters {
struct dml2_soc_power_management_parameters {
double dram_clk_change_blackout_us;
- double dram_clk_change_read_only_us;
- double dram_clk_change_write_only_us;
+ double dram_clk_change_read_only_us; // deprecated
+ double dram_clk_change_write_only_us; // deprecated
double fclk_change_blackout_us;
double g7_ppt_blackout_us;
+ double g7_temperature_read_blackout_us;
double stutter_enter_plus_exit_latency_us;
double stutter_exit_latency_us;
+ double low_power_stutter_enter_plus_exit_latency_us;
+ double low_power_stutter_exit_latency_us;
double z8_stutter_enter_plus_exit_latency_us;
double z8_stutter_exit_latency_us;
double z8_min_idle_time;
double g6_temp_read_blackout_us[DML_MAX_CLK_TABLE_SIZE];
+ double type_b_dram_clk_change_blackout_us;
+ double type_b_ppt_blackout_us;
};
struct dml2_clk_table {
@@ -130,6 +135,7 @@ struct dml2_soc_state_table {
struct dml2_soc_vmin_clock_limits {
unsigned long dispclk_khz;
+ unsigned long dcfclk_khz;
};
struct dml2_soc_bb {
@@ -138,6 +144,9 @@ struct dml2_soc_bb {
struct dml2_soc_power_management_parameters power_management_parameters;
struct dml2_soc_vmin_clock_limits vmin_limit;
+ double lower_bound_bandwidth_dchub;
+ double fraction_of_urgent_bandwidth_nominal_target;
+ double fraction_of_urgent_bandwidth_flip_target;
unsigned int dprefclk_mhz;
unsigned int xtalclk_mhz;
unsigned int pcie_refclk_mhz;
@@ -163,6 +172,7 @@ struct dml2_soc_bb {
struct dml2_ip_capabilities {
unsigned int pipe_count;
unsigned int otg_count;
+ unsigned int TDLUT_33cube_count;
unsigned int num_dsc;
unsigned int max_num_dp2p0_streams;
unsigned int max_num_hdmi_frl_outputs;
@@ -181,7 +191,9 @@ struct dml2_ip_capabilities {
unsigned int subvp_prefetch_end_to_mall_start_us;
unsigned int subvp_fw_processing_delay;
unsigned int max_vactive_det_fill_delay_us;
-
+ unsigned int ppt_max_allow_delay_us;
+ unsigned int temp_read_max_allow_delay_us;
+ unsigned int dummy_pstate_max_allow_delay_us;
/* FAMS2 delays */
struct {
unsigned int max_allow_delay_us;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_types.h
index 0dbf886d8926..452e4a2e72c0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_types.h
@@ -16,9 +16,9 @@ struct dml2_instance;
enum dml2_project_id {
dml2_project_invalid = 0,
- dml2_project_dcn4x_stage1 = 1,
- dml2_project_dcn4x_stage2 = 2,
- dml2_project_dcn4x_stage2_auto_drr_svp = 3,
+ dml2_project_dcn4x_stage1,
+ dml2_project_dcn4x_stage2,
+ dml2_project_dcn4x_stage2_auto_drr_svp,
};
enum dml2_pstate_change_support {
@@ -53,7 +53,9 @@ enum dml2_output_type_and_rate__rate {
dml2_output_rate_hdmi_rate_6x4 = 9,
dml2_output_rate_hdmi_rate_8x4 = 10,
dml2_output_rate_hdmi_rate_10x4 = 11,
- dml2_output_rate_hdmi_rate_12x4 = 12
+ dml2_output_rate_hdmi_rate_12x4 = 12,
+ dml2_output_rate_hdmi_rate_16x4 = 13,
+ dml2_output_rate_hdmi_rate_20x4 = 14
};
struct dml2_pmo_options {
@@ -68,6 +70,8 @@ struct dml2_pmo_options {
bool disable_dyn_odm;
bool disable_dyn_odm_for_multi_stream;
bool disable_dyn_odm_for_stream_with_svp;
+ struct dml2_pmo_pstate_strategy *override_strategy_lists[DML2_MAX_PLANES];
+ unsigned int num_override_strategies_per_list[DML2_MAX_PLANES];
};
struct dml2_options {
@@ -279,7 +283,10 @@ struct dml2_per_stream_programming {
} phantom_stream;
union dmub_cmd_fams2_config fams2_base_params;
- union dmub_cmd_fams2_config fams2_sub_params;
+ union {
+ union dmub_cmd_fams2_config fams2_sub_params;
+ union dmub_fams2_stream_static_sub_state_v2 fams2_sub_params_v2;
+ };
};
//-----------------
@@ -305,6 +312,7 @@ struct dml2_mode_support_info {
bool NumberOfOTGSupport;
bool NumberOfHDMIFRLSupport;
bool NumberOfDP2p0Support;
+ bool NumberOfTDLUT33cubeSupport;
bool WritebackScaleRatioAndTapsSupport;
bool CursorSupport;
bool PitchSupport;
@@ -352,6 +360,8 @@ struct dml2_mode_support_info {
unsigned int AlignedCPitch[DML2_MAX_PLANES];
bool g6_temp_read_support;
bool temp_read_or_ppt_support;
+ bool qos_bandwidth_support;
+ bool dcfclk_support;
}; // dml2_mode_support_info
struct dml2_display_cfg_programming {
@@ -412,6 +422,8 @@ struct dml2_display_cfg_programming {
struct {
bool supported_in_blank; // Changing to configurations where this is false requires stutter to be disabled during the transition
+ uint8_t base_percent_efficiency; //LP1
+ uint8_t low_power_percent_efficiency; //LP2
} stutter;
struct {
@@ -664,6 +676,8 @@ struct dml2_display_cfg_programming {
unsigned int PrefetchMode[DML2_MAX_PLANES]; // LEGACY_ONLY
bool ROBUrgencyAvoidance;
double LowestPrefetchMargin;
+
+ unsigned int pstate_recout_reduction_lines[DML2_MAX_PLANES];
} misc;
struct dml2_mode_support_info mode_support_info;
@@ -674,9 +688,14 @@ struct dml2_display_cfg_programming {
// unlimited # of mcache
struct dml2_mcache_surface_allocation non_optimized_mcache_allocation[DML2_MAX_PLANES];
+ bool failed_prefetch;
+ bool failed_uclk_pstate;
bool failed_mcache_validation;
bool failed_dpmm;
bool failed_mode_programming;
+ bool failed_mode_programming_dcfclk;
+ bool failed_mode_programming_prefetch;
+ bool failed_mode_programming_flip;
bool failed_map_watermarks;
} informative;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.c
index 6ee37386f672..eba948e187c1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.c
@@ -28,6 +28,7 @@ struct dml2_core_ip_params core_dcn4_ip_caps_base = {
.writeback_interface_buffer_size_kbytes = 90,
//Number of pipes after DCN Pipe harvesting
.max_num_dpp = 4,
+ .max_num_opp = 4,
.max_num_otg = 4,
.max_num_wb = 1,
.max_dchub_pscl_bw_pix_per_clk = 4,
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.h
index a68bb001a346..a68bb001a346 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
index c4dad7164d31..a02e9fd6b5ca 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
@@ -1238,18 +1238,27 @@ static void CalculateDETBufferSize(
static double CalculateRequiredDispclk(
enum dml2_odm_mode ODMMode,
- double PixelClock)
+ double PixelClock,
+ bool isTMDS420)
{
+ double DispClk;
if (ODMMode == dml2_odm_mode_combine_4to1) {
- return PixelClock / 4.0;
+ DispClk = PixelClock / 4.0;
} else if (ODMMode == dml2_odm_mode_combine_3to1) {
- return PixelClock / 3.0;
+ DispClk = PixelClock / 3.0;
} else if (ODMMode == dml2_odm_mode_combine_2to1) {
- return PixelClock / 2.0;
+ DispClk = PixelClock / 2.0;
} else {
- return PixelClock;
+ DispClk = PixelClock;
+ }
+
+ if (isTMDS420) {
+ double TMDS420MinPixClock = PixelClock / 2.0;
+ DispClk = math_max2(DispClk, TMDS420MinPixClock);
}
+
+ return DispClk;
}
static double TruncToValidBPP(
@@ -1294,6 +1303,7 @@ static double TruncToValidBPP(
MinDSCBPP = 8;
MaxDSCBPP = 16;
} else {
+
if (Output == dml2_hdmi || Output == dml2_hdmifrl) {
NonDSCBPP0 = 24;
NonDSCBPP1 = 24;
@@ -1311,6 +1321,7 @@ static double TruncToValidBPP(
MaxDSCBPP = 16;
}
}
+
if (Output == dml2_dp2p0) {
MaxLinkBPP = LinkBitRate * Lanes / PixelClock * 128.0 / 132.0 * 383.0 / 384.0 * 65536.0 / 65540.0;
} else if (DSCEnable && Output == dml2_dp) {
@@ -4038,7 +4049,9 @@ static bool ValidateODMMode(enum dml2_odm_mode ODMMode,
bool UseDSC,
unsigned int NumberOfDSCSlices,
unsigned int TotalNumberOfActiveDPP,
+ unsigned int TotalNumberOfActiveOPP,
unsigned int MaxNumDPP,
+ unsigned int MaxNumOPP,
double DISPCLKRequired,
unsigned int NumberOfDPPRequired,
unsigned int MaxHActiveForDSC,
@@ -4054,7 +4067,7 @@ static bool ValidateODMMode(enum dml2_odm_mode ODMMode,
if (DISPCLKRequired > MaxDispclk)
return false;
- if ((TotalNumberOfActiveDPP + NumberOfDPPRequired) > MaxNumDPP)
+ if ((TotalNumberOfActiveDPP + NumberOfDPPRequired) > MaxNumDPP || (TotalNumberOfActiveOPP + NumberOfDPPRequired) > MaxNumOPP)
return false;
if (are_odm_segments_symmetrical) {
if (HActive % (NumberOfDPPRequired * pixels_per_clock_cycle))
@@ -4100,7 +4113,9 @@ static noinline_for_stack void CalculateODMMode(
double MaxDispclk,
bool DSCEnable,
unsigned int TotalNumberOfActiveDPP,
+ unsigned int TotalNumberOfActiveOPP,
unsigned int MaxNumDPP,
+ unsigned int MaxNumOPP,
double PixelClock,
unsigned int NumberOfDSCSlices,
@@ -4122,11 +4137,12 @@ static noinline_for_stack void CalculateODMMode(
bool success;
bool UseDSC = DSCEnable && (NumberOfDSCSlices > 0);
enum dml2_odm_mode DecidedODMMode;
+ bool isTMDS420 = (OutFormat == dml2_420 && Output == dml2_hdmi);
- SurfaceRequiredDISPCLKWithoutODMCombine = CalculateRequiredDispclk(dml2_odm_mode_bypass, PixelClock);
- SurfaceRequiredDISPCLKWithODMCombineTwoToOne = CalculateRequiredDispclk(dml2_odm_mode_combine_2to1, PixelClock);
- SurfaceRequiredDISPCLKWithODMCombineThreeToOne = CalculateRequiredDispclk(dml2_odm_mode_combine_3to1, PixelClock);
- SurfaceRequiredDISPCLKWithODMCombineFourToOne = CalculateRequiredDispclk(dml2_odm_mode_combine_4to1, PixelClock);
+ SurfaceRequiredDISPCLKWithoutODMCombine = CalculateRequiredDispclk(dml2_odm_mode_bypass, PixelClock, isTMDS420);
+ SurfaceRequiredDISPCLKWithODMCombineTwoToOne = CalculateRequiredDispclk(dml2_odm_mode_combine_2to1, PixelClock, isTMDS420);
+ SurfaceRequiredDISPCLKWithODMCombineThreeToOne = CalculateRequiredDispclk(dml2_odm_mode_combine_3to1, PixelClock, isTMDS420);
+ SurfaceRequiredDISPCLKWithODMCombineFourToOne = CalculateRequiredDispclk(dml2_odm_mode_combine_4to1, PixelClock, isTMDS420);
#ifdef __DML_VBA_DEBUG__
DML_LOG_VERBOSE("DML::%s: ODMUse = %d\n", __func__, ODMUse);
DML_LOG_VERBOSE("DML::%s: Output = %d\n", __func__, Output);
@@ -4169,7 +4185,9 @@ static noinline_for_stack void CalculateODMMode(
UseDSC,
NumberOfDSCSlices,
TotalNumberOfActiveDPP,
+ TotalNumberOfActiveOPP,
MaxNumDPP,
+ MaxNumOPP,
DISPCLKRequired,
NumberOfDPPRequired,
MaxHActiveForDSC,
@@ -4685,7 +4703,10 @@ static void calculate_tdlut_setting(
//the tdlut is fetched during the 2 row times of prefetch.
if (p->setup_for_tdlut) {
*p->tdlut_groups_per_2row_ub = (unsigned int)math_ceil2((double) *p->tdlut_bytes_per_frame / *p->tdlut_bytes_per_group, 1);
- *p->tdlut_opt_time = (*p->tdlut_bytes_per_frame - p->cursor_buffer_size * 1024) / tdlut_drain_rate;
+ if (*p->tdlut_bytes_per_frame > p->cursor_buffer_size * 1024)
+ *p->tdlut_opt_time = (*p->tdlut_bytes_per_frame - p->cursor_buffer_size * 1024) / tdlut_drain_rate;
+ else
+ *p->tdlut_opt_time = 0;
*p->tdlut_drain_time = p->cursor_buffer_size * 1024 / tdlut_drain_rate;
*p->tdlut_bytes_to_deliver = (unsigned int) (p->cursor_buffer_size * 1024.0);
}
@@ -4858,7 +4879,7 @@ static double get_urgent_bandwidth_required(
double ReadBandwidthChroma[],
double PrefetchBandwidthLuma[],
double PrefetchBandwidthChroma[],
- double PrefetchBandwidthOto[],
+ double PrefetchBandwidthMax[],
double excess_vactive_fill_bw_l[],
double excess_vactive_fill_bw_c[],
double cursor_bw[],
@@ -4922,9 +4943,9 @@ static double get_urgent_bandwidth_required(
l->vm_row_bw = NumberOfDPP[k] * prefetch_vmrow_bw[k];
l->flip_and_active_bw = l->per_plane_flip_bw[k] + ReadBandwidthLuma[k] * l->adj_factor_p0 + ReadBandwidthChroma[k] * l->adj_factor_p1 + cursor_bw[k] * l->adj_factor_cur;
l->flip_and_prefetch_bw = l->per_plane_flip_bw[k] + NumberOfDPP[k] * (PrefetchBandwidthLuma[k] * l->adj_factor_p0_pre + PrefetchBandwidthChroma[k] * l->adj_factor_p1_pre) + prefetch_cursor_bw[k] * l->adj_factor_cur_pre;
- l->flip_and_prefetch_bw_oto = l->per_plane_flip_bw[k] + NumberOfDPP[k] * (PrefetchBandwidthOto[k] * l->adj_factor_p0_pre + PrefetchBandwidthChroma[k] * l->adj_factor_p1_pre) + prefetch_cursor_bw[k] * l->adj_factor_cur_pre;
+ l->flip_and_prefetch_bw_max = l->per_plane_flip_bw[k] + NumberOfDPP[k] * (PrefetchBandwidthMax[k] * l->adj_factor_p0_pre + PrefetchBandwidthChroma[k] * l->adj_factor_p1_pre) + prefetch_cursor_bw[k] * l->adj_factor_cur_pre;
l->active_and_excess_bw = (ReadBandwidthLuma[k] + excess_vactive_fill_bw_l[k]) * l->tmp_nom_adj_factor_p0 + (ReadBandwidthChroma[k] + excess_vactive_fill_bw_c[k]) * l->tmp_nom_adj_factor_p1 + dpte_row_bw[k] + meta_row_bw[k];
- surface_required_bw[k] = math_max5(l->vm_row_bw, l->flip_and_active_bw, l->flip_and_prefetch_bw, l->active_and_excess_bw, l->flip_and_prefetch_bw_oto);
+ surface_required_bw[k] = math_max5(l->vm_row_bw, l->flip_and_active_bw, l->flip_and_prefetch_bw, l->active_and_excess_bw, l->flip_and_prefetch_bw_max);
/* export peak required bandwidth for the surface */
surface_peak_required_bw[k] = math_max2(surface_required_bw[k], surface_peak_required_bw[k]);
@@ -5122,7 +5143,7 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
s->Tsw_est3 = 0.0;
s->cursor_prefetch_bytes = 0;
*p->prefetch_cursor_bw = 0;
- *p->RequiredPrefetchBWOTO = 0.0;
+ *p->RequiredPrefetchBWMax = 0.0;
dcc_mrq_enable = (p->dcc_enable && p->mrq_present);
@@ -5353,7 +5374,7 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
* mp will fail if ms decides to use equ schedule and mp decides to use oto schedule
* and the required bandwidth increases when going from ms to mp
*/
- *p->RequiredPrefetchBWOTO = s->prefetch_bw_oto;
+ *p->RequiredPrefetchBWMax = s->prefetch_bw_oto;
#ifdef __DML_VBA_DEBUG__
DML_LOG_VERBOSE("DML::%s: vactive_sw_bw_l = %f\n", __func__, p->vactive_sw_bw_l);
@@ -5715,8 +5736,14 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
s->TimeForFetchingVM = s->Tvm_equ;
s->TimeForFetchingRowInVBlank = s->Tr0_equ;
- *p->dst_y_per_vm_vblank = math_ceil2(4.0 * s->TimeForFetchingVM / s->LineTime, 1.0) / 4.0;
- *p->dst_y_per_row_vblank = math_ceil2(4.0 * s->TimeForFetchingRowInVBlank / s->LineTime, 1.0) / 4.0;
+ *p->dst_y_per_vm_vblank = math_ceil2(4.0 * s->TimeForFetchingVM / s->LineTime, 1.0) / 4.0;
+ *p->dst_y_per_row_vblank = math_ceil2(4.0 * s->TimeForFetchingRowInVBlank / s->LineTime, 1.0) / 4.0;
+
+ /* equ bw should be propagated so a ceiling of the equ bw is accounted for prior to mode programming.
+ * Overall bandwidth may be lower when going from mode support to mode programming but final pixel data
+ * bandwidth may end up higher than what was calculated in mode support.
+ */
+ *p->RequiredPrefetchBWMax = math_max2(s->prefetch_bw_equ, *p->RequiredPrefetchBWMax);
#ifdef __DML_VBA_DEBUG__
DML_LOG_VERBOSE("DML::%s: Using equ bw scheduling for prefetch\n", __func__);
@@ -6112,7 +6139,7 @@ static void calculate_peak_bandwidth_required(
p->surface_read_bandwidth_c,
l->zero_array, //PrefetchBandwidthLuma,
l->zero_array, //PrefetchBandwidthChroma,
- l->zero_array, //PrefetchBWOTO
+ l->zero_array, //PrefetchBWMax
l->zero_array,
l->zero_array,
l->zero_array,
@@ -6149,7 +6176,7 @@ static void calculate_peak_bandwidth_required(
p->surface_read_bandwidth_c,
l->zero_array, //PrefetchBandwidthLuma,
l->zero_array, //PrefetchBandwidthChroma,
- l->zero_array, //PrefetchBWOTO
+ l->zero_array, //PrefetchBWMax
p->excess_vactive_fill_bw_l,
p->excess_vactive_fill_bw_c,
p->cursor_bw,
@@ -6186,7 +6213,7 @@ static void calculate_peak_bandwidth_required(
p->surface_read_bandwidth_c,
p->prefetch_bandwidth_l,
p->prefetch_bandwidth_c,
- p->prefetch_bandwidth_oto, // to prevent ms/mp mismatch when oto bw > total vactive bw
+ p->prefetch_bandwidth_max, // to prevent ms/mp mismatches where mp prefetch bw > ms prefetch bw
p->excess_vactive_fill_bw_l,
p->excess_vactive_fill_bw_c,
p->cursor_bw,
@@ -6223,7 +6250,7 @@ static void calculate_peak_bandwidth_required(
p->surface_read_bandwidth_c,
p->prefetch_bandwidth_l,
p->prefetch_bandwidth_c,
- p->prefetch_bandwidth_oto, // to prevent ms/mp mismatch when oto bw > total vactive bw
+ p->prefetch_bandwidth_max, // to prevent ms/mp mismatch where mp prefetch bw > ms prefetch bw
p->excess_vactive_fill_bw_l,
p->excess_vactive_fill_bw_c,
p->cursor_bw,
@@ -6260,7 +6287,7 @@ static void calculate_peak_bandwidth_required(
p->surface_read_bandwidth_c,
p->prefetch_bandwidth_l,
p->prefetch_bandwidth_c,
- p->prefetch_bandwidth_oto, // to prevent ms/mp mismatch when oto bw > total vactive bw
+ p->prefetch_bandwidth_max, // to prevent ms/mp mismatches where mp prefetch bw > ms prefetch bw
p->excess_vactive_fill_bw_l,
p->excess_vactive_fill_bw_c,
p->cursor_bw,
@@ -6945,7 +6972,7 @@ static void calculate_bytes_to_fetch_required_to_hide_latency(
stream_index = p->display_cfg->plane_descriptors[plane_index].stream_index;
- dst_lines_to_hide = (unsigned int)math_ceil(p->latency_to_hide_us /
+ dst_lines_to_hide = (unsigned int)math_ceil(p->latency_to_hide_us[0] /
((double)p->display_cfg->stream_descriptors[stream_index].timing.h_total /
(double)p->display_cfg->stream_descriptors[stream_index].timing.pixel_clock_khz * 1000.0));
@@ -7042,9 +7069,9 @@ static void calculate_excess_vactive_bandwidth_required(
excess_vactive_fill_bw_l[plane_index] = 0.0;
excess_vactive_fill_bw_c[plane_index] = 0.0;
- if (display_cfg->plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us > 0) {
- excess_vactive_fill_bw_l[plane_index] = (double)bytes_required_l[plane_index] / (double)display_cfg->plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us;
- excess_vactive_fill_bw_c[plane_index] = (double)bytes_required_c[plane_index] / (double)display_cfg->plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us;
+ if (display_cfg->plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us[dml2_pstate_type_uclk] > 0) {
+ excess_vactive_fill_bw_l[plane_index] = (double)bytes_required_l[plane_index] / (double)display_cfg->plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us[dml2_pstate_type_uclk];
+ excess_vactive_fill_bw_c[plane_index] = (double)bytes_required_c[plane_index] / (double)display_cfg->plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us[dml2_pstate_type_uclk];
}
}
}
@@ -7487,7 +7514,7 @@ static noinline_for_stack void dml_core_ms_prefetch_check(struct dml2_core_inter
CalculatePrefetchSchedule_params->VRatioPrefetchC = &mode_lib->ms.VRatioPreC[k];
CalculatePrefetchSchedule_params->RequiredPrefetchPixelDataBWLuma = &mode_lib->ms.RequiredPrefetchPixelDataBWLuma[k]; // prefetch_sw_bw_l
CalculatePrefetchSchedule_params->RequiredPrefetchPixelDataBWChroma = &mode_lib->ms.RequiredPrefetchPixelDataBWChroma[k]; // prefetch_sw_bw_c
- CalculatePrefetchSchedule_params->RequiredPrefetchBWOTO = &mode_lib->ms.RequiredPrefetchBWOTO[k];
+ CalculatePrefetchSchedule_params->RequiredPrefetchBWMax = &mode_lib->ms.RequiredPrefetchBWMax[k];
CalculatePrefetchSchedule_params->NotEnoughTimeForDynamicMetadata = &mode_lib->ms.NoTimeForDynamicMetadata[k];
CalculatePrefetchSchedule_params->Tno_bw = &mode_lib->ms.Tno_bw[k];
CalculatePrefetchSchedule_params->Tno_bw_flip = &mode_lib->ms.Tno_bw_flip[k];
@@ -7632,7 +7659,7 @@ static noinline_for_stack void dml_core_ms_prefetch_check(struct dml2_core_inter
calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->ms.vactive_sw_bw_c;
calculate_peak_bandwidth_params->prefetch_bandwidth_l = mode_lib->ms.RequiredPrefetchPixelDataBWLuma;
calculate_peak_bandwidth_params->prefetch_bandwidth_c = mode_lib->ms.RequiredPrefetchPixelDataBWChroma;
- calculate_peak_bandwidth_params->prefetch_bandwidth_oto = mode_lib->ms.RequiredPrefetchBWOTO;
+ calculate_peak_bandwidth_params->prefetch_bandwidth_max = mode_lib->ms.RequiredPrefetchBWMax;
calculate_peak_bandwidth_params->excess_vactive_fill_bw_l = mode_lib->ms.excess_vactive_fill_bw_l;
calculate_peak_bandwidth_params->excess_vactive_fill_bw_c = mode_lib->ms.excess_vactive_fill_bw_c;
calculate_peak_bandwidth_params->cursor_bw = mode_lib->ms.cursor_bw;
@@ -7799,7 +7826,7 @@ static noinline_for_stack void dml_core_ms_prefetch_check(struct dml2_core_inter
calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->ms.vactive_sw_bw_c;
calculate_peak_bandwidth_params->prefetch_bandwidth_l = mode_lib->ms.RequiredPrefetchPixelDataBWLuma;
calculate_peak_bandwidth_params->prefetch_bandwidth_c = mode_lib->ms.RequiredPrefetchPixelDataBWChroma;
- calculate_peak_bandwidth_params->prefetch_bandwidth_oto = mode_lib->ms.RequiredPrefetchBWOTO;
+ calculate_peak_bandwidth_params->prefetch_bandwidth_max = mode_lib->ms.RequiredPrefetchBWMax;
calculate_peak_bandwidth_params->excess_vactive_fill_bw_l = mode_lib->ms.excess_vactive_fill_bw_l;
calculate_peak_bandwidth_params->excess_vactive_fill_bw_c = mode_lib->ms.excess_vactive_fill_bw_c;
calculate_peak_bandwidth_params->cursor_bw = mode_lib->ms.cursor_bw;
@@ -7905,6 +7932,7 @@ static noinline_for_stack void dml_core_ms_prefetch_check(struct dml2_core_inter
}
+
static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out_params)
{
struct dml2_core_internal_display_mode_lib *mode_lib = in_out_params->mode_lib;
@@ -8338,6 +8366,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
CalculateSwathAndDETConfiguration(&mode_lib->scratch, CalculateSwathAndDETConfiguration_params);
mode_lib->ms.TotalNumberOfActiveDPP = 0;
+ mode_lib->ms.TotalNumberOfActiveOPP = 0;
mode_lib->ms.support.TotalAvailablePipesSupport = true;
for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
@@ -8373,7 +8402,9 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.max_dispclk_freq_mhz,
false, // DSCEnable
mode_lib->ms.TotalNumberOfActiveDPP,
+ mode_lib->ms.TotalNumberOfActiveOPP,
mode_lib->ip.max_num_dpp,
+ mode_lib->ip.max_num_opp,
((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000),
mode_lib->ms.support.NumberOfDSCSlices[k],
@@ -8392,7 +8423,9 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.max_dispclk_freq_mhz,
true, // DSCEnable
mode_lib->ms.TotalNumberOfActiveDPP,
+ mode_lib->ms.TotalNumberOfActiveOPP,
mode_lib->ip.max_num_dpp,
+ mode_lib->ip.max_num_opp,
((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000),
mode_lib->ms.support.NumberOfDSCSlices[k],
@@ -8496,20 +8529,23 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
mode_lib->ms.MPCCombine[k] = false;
mode_lib->ms.NoOfDPP[k] = 1;
+ mode_lib->ms.NoOfOPP[k] = 1;
if (mode_lib->ms.ODMMode[k] == dml2_odm_mode_combine_4to1) {
mode_lib->ms.MPCCombine[k] = false;
mode_lib->ms.NoOfDPP[k] = 4;
+ mode_lib->ms.NoOfOPP[k] = 4;
} else if (mode_lib->ms.ODMMode[k] == dml2_odm_mode_combine_3to1) {
mode_lib->ms.MPCCombine[k] = false;
mode_lib->ms.NoOfDPP[k] = 3;
+ mode_lib->ms.NoOfOPP[k] = 3;
} else if (mode_lib->ms.ODMMode[k] == dml2_odm_mode_combine_2to1) {
mode_lib->ms.MPCCombine[k] = false;
mode_lib->ms.NoOfDPP[k] = 2;
+ mode_lib->ms.NoOfOPP[k] = 2;
} else if (display_cfg->plane_descriptors[k].overrides.mpcc_combine_factor == 2) {
mode_lib->ms.MPCCombine[k] = true;
mode_lib->ms.NoOfDPP[k] = 2;
- mode_lib->ms.TotalNumberOfActiveDPP++;
} else if (display_cfg->plane_descriptors[k].overrides.mpcc_combine_factor == 1) {
mode_lib->ms.MPCCombine[k] = false;
mode_lib->ms.NoOfDPP[k] = 1;
@@ -8520,7 +8556,6 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
if ((mode_lib->ms.MinDPPCLKUsingSingleDPP[k] > mode_lib->ms.max_dppclk_freq_mhz) || !mode_lib->ms.SingleDPPViewportSizeSupportPerSurface[k]) {
mode_lib->ms.MPCCombine[k] = true;
mode_lib->ms.NoOfDPP[k] = 2;
- mode_lib->ms.TotalNumberOfActiveDPP++;
}
}
#if defined(__DML_VBA_DEBUG__)
@@ -8528,8 +8563,16 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
#endif
}
+ mode_lib->ms.TotalNumberOfActiveDPP = 0;
+ mode_lib->ms.TotalNumberOfActiveOPP = 0;
+ for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
+ mode_lib->ms.TotalNumberOfActiveDPP += mode_lib->ms.NoOfDPP[k];
+ mode_lib->ms.TotalNumberOfActiveOPP += mode_lib->ms.NoOfOPP[k];
+ }
if (mode_lib->ms.TotalNumberOfActiveDPP > (unsigned int)mode_lib->ip.max_num_dpp)
mode_lib->ms.support.TotalAvailablePipesSupport = false;
+ if (mode_lib->ms.TotalNumberOfActiveOPP > (unsigned int)mode_lib->ip.max_num_opp)
+ mode_lib->ms.support.TotalAvailablePipesSupport = false;
mode_lib->ms.TotalNumberOfSingleDPPSurfaces = 0;
@@ -9008,11 +9051,11 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
calculate_bytes_to_fetch_required_to_hide_latency_params->swath_width_c = mode_lib->ms.SwathWidthC;
calculate_bytes_to_fetch_required_to_hide_latency_params->swath_height_l = mode_lib->ms.SwathHeightY;
calculate_bytes_to_fetch_required_to_hide_latency_params->swath_height_c = mode_lib->ms.SwathHeightC;
- calculate_bytes_to_fetch_required_to_hide_latency_params->latency_to_hide_us = mode_lib->soc.power_management_parameters.dram_clk_change_blackout_us;
+ calculate_bytes_to_fetch_required_to_hide_latency_params->latency_to_hide_us[0] = mode_lib->soc.power_management_parameters.dram_clk_change_blackout_us;
/* outputs */
- calculate_bytes_to_fetch_required_to_hide_latency_params->bytes_required_l = s->pstate_bytes_required_l;
- calculate_bytes_to_fetch_required_to_hide_latency_params->bytes_required_c = s->pstate_bytes_required_c;
+ calculate_bytes_to_fetch_required_to_hide_latency_params->bytes_required_l = s->pstate_bytes_required_l[dml2_pstate_type_uclk];
+ calculate_bytes_to_fetch_required_to_hide_latency_params->bytes_required_c = s->pstate_bytes_required_c[dml2_pstate_type_uclk];
calculate_bytes_to_fetch_required_to_hide_latency(calculate_bytes_to_fetch_required_to_hide_latency_params);
@@ -9020,8 +9063,8 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
calculate_excess_vactive_bandwidth_required(
display_cfg,
mode_lib->ms.num_active_planes,
- s->pstate_bytes_required_l,
- s->pstate_bytes_required_c,
+ s->pstate_bytes_required_l[dml2_pstate_type_uclk],
+ s->pstate_bytes_required_c[dml2_pstate_type_uclk],
/* outputs */
mode_lib->ms.excess_vactive_fill_bw_l,
mode_lib->ms.excess_vactive_fill_bw_c);
@@ -9463,8 +9506,8 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
calculate_vactive_det_fill_latency(
display_cfg,
mode_lib->ms.num_active_planes,
- s->pstate_bytes_required_l,
- s->pstate_bytes_required_c,
+ s->pstate_bytes_required_l[dml2_pstate_type_uclk],
+ s->pstate_bytes_required_c[dml2_pstate_type_uclk],
mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p0,
mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p1,
mode_lib->ms.vactive_sw_bw_l,
@@ -9472,7 +9515,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.surface_avg_vactive_required_bw,
mode_lib->ms.surface_peak_required_bw,
/* outputs */
- mode_lib->ms.dram_change_vactive_det_fill_delay_us);
+ mode_lib->ms.pstate_vactive_det_fill_delay_us[dml2_pstate_type_uclk]);
#ifdef __DML_VBA_DEBUG__
DML_LOG_VERBOSE("DML::%s: max_urgent_latency_us = %f\n", __func__, s->mSOCParameters.max_urgent_latency_us);
@@ -10966,11 +11009,11 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
calculate_bytes_to_fetch_required_to_hide_latency_params->swath_width_c = mode_lib->mp.SwathWidthC;
calculate_bytes_to_fetch_required_to_hide_latency_params->swath_height_l = mode_lib->mp.SwathHeightY;
calculate_bytes_to_fetch_required_to_hide_latency_params->swath_height_c = mode_lib->mp.SwathHeightC;
- calculate_bytes_to_fetch_required_to_hide_latency_params->latency_to_hide_us = mode_lib->soc.power_management_parameters.dram_clk_change_blackout_us;
+ calculate_bytes_to_fetch_required_to_hide_latency_params->latency_to_hide_us[0] = mode_lib->soc.power_management_parameters.dram_clk_change_blackout_us;
/* outputs */
- calculate_bytes_to_fetch_required_to_hide_latency_params->bytes_required_l = s->pstate_bytes_required_l;
- calculate_bytes_to_fetch_required_to_hide_latency_params->bytes_required_c = s->pstate_bytes_required_c;
+ calculate_bytes_to_fetch_required_to_hide_latency_params->bytes_required_l = s->pstate_bytes_required_l[dml2_pstate_type_uclk];
+ calculate_bytes_to_fetch_required_to_hide_latency_params->bytes_required_c = s->pstate_bytes_required_c[dml2_pstate_type_uclk];
calculate_bytes_to_fetch_required_to_hide_latency(calculate_bytes_to_fetch_required_to_hide_latency_params);
@@ -10978,8 +11021,8 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
calculate_excess_vactive_bandwidth_required(
display_cfg,
s->num_active_planes,
- s->pstate_bytes_required_l,
- s->pstate_bytes_required_c,
+ s->pstate_bytes_required_l[dml2_pstate_type_uclk],
+ s->pstate_bytes_required_c[dml2_pstate_type_uclk],
/* outputs */
mode_lib->mp.excess_vactive_fill_bw_l,
mode_lib->mp.excess_vactive_fill_bw_c);
@@ -11253,7 +11296,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
CalculatePrefetchSchedule_params->VRatioPrefetchC = &mode_lib->mp.VRatioPrefetchC[k];
CalculatePrefetchSchedule_params->RequiredPrefetchPixelDataBWLuma = &mode_lib->mp.RequiredPrefetchPixelDataBWLuma[k];
CalculatePrefetchSchedule_params->RequiredPrefetchPixelDataBWChroma = &mode_lib->mp.RequiredPrefetchPixelDataBWChroma[k];
- CalculatePrefetchSchedule_params->RequiredPrefetchBWOTO = &s->dummy_single_array[0][k];
+ CalculatePrefetchSchedule_params->RequiredPrefetchBWMax = &s->dummy_single_array[0][k];
CalculatePrefetchSchedule_params->NotEnoughTimeForDynamicMetadata = &mode_lib->mp.NotEnoughTimeForDynamicMetadata[k];
CalculatePrefetchSchedule_params->Tno_bw = &mode_lib->mp.Tno_bw[k];
CalculatePrefetchSchedule_params->Tno_bw_flip = &mode_lib->mp.Tno_bw_flip[k];
@@ -11396,7 +11439,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->mp.vactive_sw_bw_c;
calculate_peak_bandwidth_params->prefetch_bandwidth_l = mode_lib->mp.RequiredPrefetchPixelDataBWLuma;
calculate_peak_bandwidth_params->prefetch_bandwidth_c = mode_lib->mp.RequiredPrefetchPixelDataBWChroma;
- calculate_peak_bandwidth_params->prefetch_bandwidth_oto = s->dummy_single_array[0];
+ calculate_peak_bandwidth_params->prefetch_bandwidth_max = s->dummy_single_array[0];
calculate_peak_bandwidth_params->excess_vactive_fill_bw_l = mode_lib->mp.excess_vactive_fill_bw_l;
calculate_peak_bandwidth_params->excess_vactive_fill_bw_c = mode_lib->mp.excess_vactive_fill_bw_c;
calculate_peak_bandwidth_params->cursor_bw = mode_lib->mp.cursor_bw;
@@ -11536,7 +11579,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
calculate_peak_bandwidth_params->meta_row_bw = mode_lib->mp.meta_row_bw;
calculate_peak_bandwidth_params->prefetch_cursor_bw = mode_lib->mp.prefetch_cursor_bw;
calculate_peak_bandwidth_params->prefetch_vmrow_bw = mode_lib->mp.prefetch_vmrow_bw;
- calculate_peak_bandwidth_params->prefetch_bandwidth_oto = s->dummy_single_array[0];
+ calculate_peak_bandwidth_params->prefetch_bandwidth_max = s->dummy_single_array[0];
calculate_peak_bandwidth_params->flip_bw = mode_lib->mp.final_flip_bw;
calculate_peak_bandwidth_params->urgent_burst_factor_l = mode_lib->mp.UrgentBurstFactorLuma;
calculate_peak_bandwidth_params->urgent_burst_factor_c = mode_lib->mp.UrgentBurstFactorChroma;
@@ -11880,7 +11923,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
}
//Maximum Bandwidth Used
- s->TotalWRBandwidth = 0;
+ mode_lib->mp.TotalWRBandwidth = 0;
for (k = 0; k < display_cfg->num_streams; ++k) {
s->WRBandwidth = 0;
if (display_cfg->stream_descriptors[k].writeback.active_writebacks_per_stream > 0) {
@@ -11889,7 +11932,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
(display_cfg->stream_descriptors[k].timing.h_total * display_cfg->stream_descriptors[k].writeback.writeback_stream[0].input_height
/ ((double)display_cfg->stream_descriptors[k].timing.pixel_clock_khz / 1000))
* (display_cfg->stream_descriptors[k].writeback.writeback_stream[0].pixel_format == dml2_444_32 ? 4.0 : 8.0);
- s->TotalWRBandwidth = s->TotalWRBandwidth + s->WRBandwidth;
+ mode_lib->mp.TotalWRBandwidth = mode_lib->mp.TotalWRBandwidth + s->WRBandwidth;
}
}
@@ -12736,7 +12779,7 @@ void dml2_core_calcs_get_stream_fams2_programming(const struct dml2_core_interna
{
const struct dml2_plane_parameters *plane_descriptor = &display_cfg->display_config.plane_descriptors[plane_index];
const struct dml2_stream_parameters *stream_descriptor = &display_cfg->display_config.stream_descriptors[plane_descriptor->stream_index];
- const struct dml2_fams2_meta *stream_fams2_meta = &display_cfg->stage3.stream_fams2_meta[plane_descriptor->stream_index];
+ const struct dml2_pstate_meta *stream_pstate_meta = &display_cfg->stage3.stream_pstate_meta[plane_descriptor->stream_index];
struct dmub_fams2_cmd_stream_static_base_state *base_programming = &fams2_base_programming->stream_v1.base;
union dmub_fams2_cmd_stream_static_sub_state *sub_programming = &fams2_sub_programming->stream_v1.sub_state;
@@ -12751,24 +12794,24 @@ void dml2_core_calcs_get_stream_fams2_programming(const struct dml2_core_interna
/* from display configuration */
base_programming->htotal = (uint16_t)stream_descriptor->timing.h_total;
base_programming->vtotal = (uint16_t)stream_descriptor->timing.v_total;
- base_programming->vblank_start = (uint16_t)(stream_fams2_meta->nom_vtotal -
+ base_programming->vblank_start = (uint16_t)(stream_pstate_meta->nom_vtotal -
stream_descriptor->timing.v_front_porch);
- base_programming->vblank_end = (uint16_t)(stream_fams2_meta->nom_vtotal -
+ base_programming->vblank_end = (uint16_t)(stream_pstate_meta->nom_vtotal -
stream_descriptor->timing.v_front_porch -
stream_descriptor->timing.v_active);
base_programming->config.bits.is_drr = stream_descriptor->timing.drr_config.enabled;
/* from meta */
base_programming->otg_vline_time_ns =
- (unsigned int)(stream_fams2_meta->otg_vline_time_us * 1000.0);
- base_programming->scheduling_delay_otg_vlines = (uint8_t)stream_fams2_meta->scheduling_delay_otg_vlines;
- base_programming->contention_delay_otg_vlines = (uint8_t)stream_fams2_meta->contention_delay_otg_vlines;
- base_programming->vline_int_ack_delay_otg_vlines = (uint8_t)stream_fams2_meta->vertical_interrupt_ack_delay_otg_vlines;
- base_programming->drr_keepout_otg_vline = (uint16_t)(stream_fams2_meta->nom_vtotal -
+ (unsigned int)(stream_pstate_meta->otg_vline_time_us * 1000.0);
+ base_programming->scheduling_delay_otg_vlines = (uint8_t)stream_pstate_meta->scheduling_delay_otg_vlines;
+ base_programming->contention_delay_otg_vlines = (uint8_t)stream_pstate_meta->contention_delay_otg_vlines;
+ base_programming->vline_int_ack_delay_otg_vlines = (uint8_t)stream_pstate_meta->vertical_interrupt_ack_delay_otg_vlines;
+ base_programming->drr_keepout_otg_vline = (uint16_t)(stream_pstate_meta->nom_vtotal -
stream_descriptor->timing.v_front_porch -
- stream_fams2_meta->method_drr.programming_delay_otg_vlines);
- base_programming->allow_to_target_delay_otg_vlines = (uint8_t)stream_fams2_meta->allow_to_target_delay_otg_vlines;
- base_programming->max_vtotal = (uint16_t)stream_fams2_meta->max_vtotal;
+ stream_pstate_meta->method_drr.programming_delay_otg_vlines);
+ base_programming->allow_to_target_delay_otg_vlines = (uint8_t)stream_pstate_meta->allow_to_target_delay_otg_vlines;
+ base_programming->max_vtotal = (uint16_t)stream_pstate_meta->max_vtotal;
/* from core */
base_programming->config.bits.min_ttu_vblank_usable = true;
@@ -12787,11 +12830,11 @@ void dml2_core_calcs_get_stream_fams2_programming(const struct dml2_core_interna
/* legacy vactive */
base_programming->type = FAMS2_STREAM_TYPE_VACTIVE;
sub_programming->legacy.vactive_det_fill_delay_otg_vlines =
- (uint8_t)stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines;
+ (uint8_t)stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines;
base_programming->allow_start_otg_vline =
- (uint16_t)stream_fams2_meta->method_vactive.common.allow_start_otg_vline;
+ (uint16_t)stream_pstate_meta->method_vactive.common.allow_start_otg_vline;
base_programming->allow_end_otg_vline =
- (uint16_t)stream_fams2_meta->method_vactive.common.allow_end_otg_vline;
+ (uint16_t)stream_pstate_meta->method_vactive.common.allow_end_otg_vline;
base_programming->config.bits.clamp_vtotal_min = true;
break;
case dml2_pstate_method_vblank:
@@ -12799,22 +12842,22 @@ void dml2_core_calcs_get_stream_fams2_programming(const struct dml2_core_interna
/* legacy vblank */
base_programming->type = FAMS2_STREAM_TYPE_VBLANK;
base_programming->allow_start_otg_vline =
- (uint16_t)stream_fams2_meta->method_vblank.common.allow_start_otg_vline;
+ (uint16_t)stream_pstate_meta->method_vblank.common.allow_start_otg_vline;
base_programming->allow_end_otg_vline =
- (uint16_t)stream_fams2_meta->method_vblank.common.allow_end_otg_vline;
+ (uint16_t)stream_pstate_meta->method_vblank.common.allow_end_otg_vline;
base_programming->config.bits.clamp_vtotal_min = true;
break;
case dml2_pstate_method_fw_drr:
/* drr */
base_programming->type = FAMS2_STREAM_TYPE_DRR;
sub_programming->drr.programming_delay_otg_vlines =
- (uint8_t)stream_fams2_meta->method_drr.programming_delay_otg_vlines;
+ (uint8_t)stream_pstate_meta->method_drr.programming_delay_otg_vlines;
sub_programming->drr.nom_stretched_vtotal =
- (uint16_t)stream_fams2_meta->method_drr.stretched_vtotal;
+ (uint16_t)stream_pstate_meta->method_drr.stretched_vtotal;
base_programming->allow_start_otg_vline =
- (uint16_t)stream_fams2_meta->method_drr.common.allow_start_otg_vline;
+ (uint16_t)stream_pstate_meta->method_drr.common.allow_start_otg_vline;
base_programming->allow_end_otg_vline =
- (uint16_t)stream_fams2_meta->method_drr.common.allow_end_otg_vline;
+ (uint16_t)stream_pstate_meta->method_drr.common.allow_end_otg_vline;
/* drr only clamps to vtotal min for single display */
base_programming->config.bits.clamp_vtotal_min = display_cfg->display_config.num_streams == 1;
sub_programming->drr.only_stretch_if_required = true;
@@ -12827,13 +12870,13 @@ void dml2_core_calcs_get_stream_fams2_programming(const struct dml2_core_interna
(uint16_t)(plane_descriptor->composition.scaler_info.plane0.v_ratio * 1000.0);
sub_programming->subvp.vratio_denominator = 1000;
sub_programming->subvp.programming_delay_otg_vlines =
- (uint8_t)stream_fams2_meta->method_subvp.programming_delay_otg_vlines;
+ (uint8_t)stream_pstate_meta->method_subvp.programming_delay_otg_vlines;
sub_programming->subvp.prefetch_to_mall_otg_vlines =
- (uint8_t)stream_fams2_meta->method_subvp.prefetch_to_mall_delay_otg_vlines;
+ (uint8_t)stream_pstate_meta->method_subvp.prefetch_to_mall_delay_otg_vlines;
sub_programming->subvp.phantom_vtotal =
- (uint16_t)stream_fams2_meta->method_subvp.phantom_vtotal;
+ (uint16_t)stream_pstate_meta->method_subvp.phantom_vtotal;
sub_programming->subvp.phantom_vactive =
- (uint16_t)stream_fams2_meta->method_subvp.phantom_vactive;
+ (uint16_t)stream_pstate_meta->method_subvp.phantom_vactive;
sub_programming->subvp.config.bits.is_multi_planar =
plane_descriptor->surface.plane1.height > 0;
sub_programming->subvp.config.bits.is_yuv420 =
@@ -12842,9 +12885,9 @@ void dml2_core_calcs_get_stream_fams2_programming(const struct dml2_core_interna
plane_descriptor->pixel_format == dml2_420_12;
base_programming->allow_start_otg_vline =
- (uint16_t)stream_fams2_meta->method_subvp.common.allow_start_otg_vline;
+ (uint16_t)stream_pstate_meta->method_subvp.common.allow_start_otg_vline;
base_programming->allow_end_otg_vline =
- (uint16_t)stream_fams2_meta->method_subvp.common.allow_end_otg_vline;
+ (uint16_t)stream_pstate_meta->method_subvp.common.allow_end_otg_vline;
base_programming->config.bits.clamp_vtotal_min = true;
break;
case dml2_pstate_method_reserved_hw:
@@ -12900,7 +12943,8 @@ void dml2_core_calcs_get_plane_support_info(const struct dml2_display_cfg *displ
out->active_latency_hiding_us = (int)mode_lib->ms.VActiveLatencyHidingUs[plane_idx];
- out->dram_change_vactive_det_fill_delay_us = (unsigned int)math_ceil(mode_lib->ms.dram_change_vactive_det_fill_delay_us[plane_idx]);
+ out->vactive_det_fill_delay_us[dml2_pstate_type_uclk] =
+ (unsigned int)math_ceil(mode_lib->ms.pstate_vactive_det_fill_delay_us[dml2_pstate_type_uclk][plane_idx]);
}
void dml2_core_calcs_get_stream_support_info(const struct dml2_display_cfg *display_cfg, const struct dml2_core_internal_display_mode_lib *mode_lib, struct core_stream_support_info *out, int plane_index)
@@ -12981,7 +13025,7 @@ void dml2_core_calcs_get_informative(const struct dml2_core_internal_display_mod
out->informative.mode_support_info.InvalidCombinationOfMALLUseForPState = mode_lib->ms.support.InvalidCombinationOfMALLUseForPState;
out->informative.mode_support_info.ExceededMALLSize = mode_lib->ms.support.ExceededMALLSize;
out->informative.mode_support_info.EnoughWritebackUnits = mode_lib->ms.support.EnoughWritebackUnits;
- out->informative.mode_support_info.temp_read_or_ppt_support = mode_lib->ms.support.temp_read_or_ppt_support;
+ out->informative.mode_support_info.temp_read_or_ppt_support = mode_lib->ms.support.global_temp_read_or_ppt_supported;
out->informative.mode_support_info.g6_temp_read_support = mode_lib->ms.support.g6_temp_read_support;
out->informative.mode_support_info.ExceededMultistreamSlots = mode_lib->ms.support.ExceededMultistreamSlots;
@@ -13007,7 +13051,10 @@ void dml2_core_calcs_get_informative(const struct dml2_core_internal_display_mod
out->informative.mode_support_info.VRatioInPrefetchSupported = mode_lib->ms.support.VRatioInPrefetchSupported;
out->informative.mode_support_info.DISPCLK_DPPCLK_Support = mode_lib->ms.support.DISPCLK_DPPCLK_Support;
out->informative.mode_support_info.TotalAvailablePipesSupport = mode_lib->ms.support.TotalAvailablePipesSupport;
+ out->informative.mode_support_info.NumberOfTDLUT33cubeSupport = mode_lib->ms.support.NumberOfTDLUT33cubeSupport;
out->informative.mode_support_info.ViewportSizeSupport = mode_lib->ms.support.ViewportSizeSupport;
+ out->informative.mode_support_info.qos_bandwidth_support = mode_lib->ms.support.qos_bandwidth_support;
+ out->informative.mode_support_info.dcfclk_support = mode_lib->ms.support.dcfclk_support;
for (k = 0; k < out->display_config.num_planes; k++) {
@@ -13059,6 +13106,10 @@ void dml2_core_calcs_get_informative(const struct dml2_core_internal_display_mod
out->informative.mode_support_info.OutputRate[k] = dml2_output_rate_hdmi_rate_10x4;
else if (mode_lib->ms.support.OutputRate[k] == dml2_core_internal_output_rate_hdmi_rate_12x4)
out->informative.mode_support_info.OutputRate[k] = dml2_output_rate_hdmi_rate_12x4;
+ else if (mode_lib->ms.support.OutputRate[k] == dml2_core_internal_output_rate_hdmi_rate_16x4)
+ out->informative.mode_support_info.OutputRate[k] = dml2_output_rate_hdmi_rate_16x4;
+ else if (mode_lib->ms.support.OutputRate[k] == dml2_core_internal_output_rate_hdmi_rate_20x4)
+ out->informative.mode_support_info.OutputRate[k] = dml2_output_rate_hdmi_rate_20x4;
out->informative.mode_support_info.AlignedYPitch[k] = mode_lib->ms.support.AlignedYPitch[k];
out->informative.mode_support_info.AlignedCPitch[k] = mode_lib->ms.support.AlignedCPitch[k];
@@ -13243,7 +13294,7 @@ void dml2_core_calcs_get_informative(const struct dml2_core_internal_display_mod
out->informative.misc.DisplayPipeLineDeliveryTimeLumaPrefetch[k] = mode_lib->mp.DisplayPipeLineDeliveryTimeLumaPrefetch[k];
out->informative.misc.DisplayPipeLineDeliveryTimeChromaPrefetch[k] = mode_lib->mp.DisplayPipeLineDeliveryTimeChromaPrefetch[k];
- out->informative.misc.WritebackRequiredBandwidth = mode_lib->scratch.dml_core_mode_programming_locals.TotalWRBandwidth / 1000.0;
+ out->informative.misc.WritebackRequiredBandwidth = mode_lib->mp.TotalWRBandwidth / 1000.0;
out->informative.misc.WritebackAllowDRAMClockChangeEndPosition[k] = mode_lib->mp.WritebackAllowDRAMClockChangeEndPosition[k];
out->informative.misc.WritebackAllowFCLKChangeEndPosition[k] = mode_lib->mp.WritebackAllowFCLKChangeEndPosition[k];
out->informative.misc.DSCCLK_calculated[k] = mode_lib->mp.DSCCLK[k];
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.h
index 27ef0e096b25..27ef0e096b25 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.c
index 28394de02885..cc4f0663c6d6 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.c
@@ -10,11 +10,13 @@ bool dml2_core_create(enum dml2_project_id project_id, struct dml2_core_instance
{
bool result = false;
- if (out == 0)
+ if (!out)
return false;
memset(out, 0, sizeof(struct dml2_core_instance));
+ out->project_id = project_id;
+
switch (project_id) {
case dml2_project_dcn4x_stage1:
result = false;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.h
index 411c514fe65c..411c514fe65c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_shared_types.h
index bdee6ad7bc59..1087a8c926ff 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_shared_types.h
@@ -36,7 +36,9 @@ struct dml2_core_ip_params {
unsigned int max_line_buffer_lines;
unsigned int writeback_interface_buffer_size_kbytes;
unsigned int max_num_dpp;
+ unsigned int max_num_opp;
unsigned int max_num_otg;
+ unsigned int TDLUT_33cube_count;
unsigned int max_num_wb;
unsigned int max_dchub_pscl_bw_pix_per_clk;
unsigned int max_pscl_lb_bw_pix_per_clk;
@@ -46,6 +48,7 @@ struct dml2_core_ip_params {
double max_vscl_ratio;
unsigned int max_hscl_taps;
unsigned int max_vscl_taps;
+ unsigned int odm_combine_support_mask;
unsigned int num_dsc;
unsigned int maximum_dsc_bits_per_component;
unsigned int maximum_pixels_per_line_per_dsc_unit;
@@ -82,7 +85,6 @@ struct dml2_core_ip_params {
unsigned int subvp_swath_height_margin_lines;
unsigned int subvp_fw_processing_delay_us;
unsigned int subvp_pstate_allow_width_us;
-
// MRQ
bool dcn_mrq_present;
unsigned int zero_size_buffer_entries;
@@ -102,6 +104,9 @@ struct dml2_core_internal_DmlPipe {
double DCFClkDeepSleep;
unsigned int DPPPerSurface;
bool ScalerEnabled;
+ bool UPSPEnabled;
+ unsigned int UPSPVTaps;
+ enum dml2_sample_positioning UPSPSamplePositioning;
enum dml2_rotation_angle RotationAngle;
bool mirrored;
unsigned int ViewportHeight;
@@ -186,7 +191,9 @@ enum dml2_core_internal_output_type_rate {
dml2_core_internal_output_rate_hdmi_rate_6x4 = 9,
dml2_core_internal_output_rate_hdmi_rate_8x4 = 10,
dml2_core_internal_output_rate_hdmi_rate_10x4 = 11,
- dml2_core_internal_output_rate_hdmi_rate_12x4 = 12
+ dml2_core_internal_output_rate_hdmi_rate_12x4 = 12,
+ dml2_core_internal_output_rate_hdmi_rate_16x4 = 13,
+ dml2_core_internal_output_rate_hdmi_rate_20x4 = 14
};
struct dml2_core_internal_watermarks {
@@ -198,6 +205,8 @@ struct dml2_core_internal_watermarks {
double WritebackFCLKChangeWatermark;
double StutterExitWatermark;
double StutterEnterPlusExitWatermark;
+ double LowPowerStutterExitWatermark;
+ double LowPowerStutterEnterPlusExitWatermark;
double Z8StutterExitWatermark;
double Z8StutterEnterPlusExitWatermark;
double USRRetrainingWatermark;
@@ -225,6 +234,7 @@ struct dml2_core_internal_mode_support_info {
bool MSOOrODMSplitWithNonDPLink;
bool NotEnoughLanesForMSO;
bool NumberOfOTGSupport;
+ bool NumberOfTDLUT33cubeSupport;
bool NumberOfHDMIFRLSupport;
bool NumberOfDP2p0Support;
bool WritebackScaleRatioAndTapsSupport;
@@ -254,18 +264,23 @@ struct dml2_core_internal_mode_support_info {
bool DCCMetaBufferSizeNotExceeded;
enum dml2_pstate_change_support DRAMClockChangeSupport[DML2_MAX_PLANES];
enum dml2_pstate_change_support FCLKChangeSupport[DML2_MAX_PLANES];
+ enum dml2_pstate_change_support temp_read_or_ppt_support[DML2_MAX_PLANES];
+ bool global_dram_clock_change_support_required;
bool global_dram_clock_change_supported;
bool global_fclk_change_supported;
+ bool global_temp_read_or_ppt_supported;
bool USRRetrainingSupport;
bool AvgBandwidthSupport;
bool UrgVactiveBandwidthSupport;
bool EnoughUrgentLatencyHidingSupport;
+ bool PrefetchScheduleSupported;
bool PrefetchSupported;
bool PrefetchBandwidthSupported;
bool DynamicMetadataSupported;
bool VRatioInPrefetchSupported;
bool DISPCLK_DPPCLK_Support;
bool TotalAvailablePipesSupport;
+ bool ODMSupport;
bool ModeSupport;
bool ViewportSizeSupport;
@@ -314,9 +329,7 @@ struct dml2_core_internal_mode_support_info {
double non_urg_bandwidth_required[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max]; // same as urg_bandwidth, except not scaled by urg burst factor
double non_urg_bandwidth_required_flip[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max];
-
bool avg_bandwidth_support_ok[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max];
-
double max_urgent_latency_us;
double max_non_urgent_latency_us;
double avg_non_urgent_latency_us;
@@ -326,9 +339,10 @@ struct dml2_core_internal_mode_support_info {
bool incorrect_imall_usage;
bool g6_temp_read_support;
- bool temp_read_or_ppt_support;
struct dml2_core_internal_watermarks watermarks;
+ bool dcfclk_support;
+ bool qos_bandwidth_support;
};
struct dml2_core_internal_mode_support {
@@ -350,9 +364,11 @@ struct dml2_core_internal_mode_support {
double SOCCLK; /// <brief Basically just the clock freq at the min (or given) state
double DCFCLK; /// <brief Basically just the clock freq at the min (or given) state and max combine setting
double GlobalDPPCLK; /// <brief the Max DPPCLK freq out of all pipes
+ double GlobalDTBCLK; /// <brief the Max DTBCLK freq out of all pipes
double uclk_freq_mhz;
double dram_bw_mbps;
double max_dram_bw_mbps;
+ double min_available_urgent_bandwidth_MBps; /// <brief Minimum guaranteed available urgent return bandwidth in MBps
double MaxFabricClock; /// <brief Basically just the clock freq at the min (or given) state
double MaxDCFCLK; /// <brief Basically just the clock freq at the min (or given) state and max combine setting
@@ -394,9 +410,13 @@ struct dml2_core_internal_mode_support {
double TWait[DML2_MAX_PLANES];
bool UnboundedRequestEnabled;
+ unsigned int compbuf_reserved_space_64b;
+ bool hw_debug5;
unsigned int CompressedBufferSizeInkByte;
double VRatioPreY[DML2_MAX_PLANES];
double VRatioPreC[DML2_MAX_PLANES];
+ unsigned int req_per_swath_ub_l[DML2_MAX_PLANES];
+ unsigned int req_per_swath_ub_c[DML2_MAX_PLANES];
unsigned int swath_width_luma_ub[DML2_MAX_PLANES];
unsigned int swath_width_chroma_ub[DML2_MAX_PLANES];
unsigned int RequiredSlots[DML2_MAX_PLANES];
@@ -417,8 +437,8 @@ struct dml2_core_internal_mode_support {
double dst_y_prefetch[DML2_MAX_PLANES];
double LinesForVM[DML2_MAX_PLANES];
double LinesForDPTERow[DML2_MAX_PLANES];
- double SwathWidthYSingleDPP[DML2_MAX_PLANES];
- double SwathWidthCSingleDPP[DML2_MAX_PLANES];
+ unsigned int SwathWidthYSingleDPP[DML2_MAX_PLANES];
+ unsigned int SwathWidthCSingleDPP[DML2_MAX_PLANES];
unsigned int BytePerPixelY[DML2_MAX_PLANES];
unsigned int BytePerPixelC[DML2_MAX_PLANES];
double BytePerPixelInDETY[DML2_MAX_PLANES];
@@ -469,13 +489,58 @@ struct dml2_core_internal_mode_support {
double mall_prefetch_sdp_overhead_factor[DML2_MAX_PLANES]; // overhead to the imall or phantom pipe
double mall_prefetch_dram_overhead_factor[DML2_MAX_PLANES];
+ bool is_using_mall_for_ss[DML2_MAX_PLANES];
+ unsigned int meta_row_width_chroma[DML2_MAX_PLANES];
+ unsigned int PixelPTEReqHeightC[DML2_MAX_PLANES];
+ bool PTE_BUFFER_MODE[DML2_MAX_PLANES];
+ unsigned int meta_req_height_chroma[DML2_MAX_PLANES];
+ unsigned int meta_pte_bytes_per_frame_ub_c[DML2_MAX_PLANES];
+ unsigned int dpde0_bytes_per_frame_ub_c[DML2_MAX_PLANES];
+ unsigned int dpte_row_width_luma_ub[DML2_MAX_PLANES];
+ unsigned int meta_req_width[DML2_MAX_PLANES];
+ unsigned int meta_row_width[DML2_MAX_PLANES];
+ unsigned int PixelPTEReqWidthY[DML2_MAX_PLANES];
+ unsigned int dpte_row_height_linear[DML2_MAX_PLANES];
+ unsigned int PTERequestSizeY[DML2_MAX_PLANES];
+ unsigned int dpte_row_width_chroma_ub[DML2_MAX_PLANES];
+ unsigned int PixelPTEReqWidthC[DML2_MAX_PLANES];
+ unsigned int meta_pte_bytes_per_frame_ub_l[DML2_MAX_PLANES];
+ unsigned int dpte_row_height_linear_chroma[DML2_MAX_PLANES];
+ unsigned int PTERequestSizeC[DML2_MAX_PLANES];
+ unsigned int meta_req_height[DML2_MAX_PLANES];
+ unsigned int dpde0_bytes_per_frame_ub_l[DML2_MAX_PLANES];
+ unsigned int meta_req_width_chroma[DML2_MAX_PLANES];
+ unsigned int PixelPTEReqHeightY[DML2_MAX_PLANES];
+ unsigned int BIGK_FRAGMENT_SIZE[DML2_MAX_PLANES];
+ unsigned int vm_group_bytes[DML2_MAX_PLANES];
+ unsigned int VReadyOffsetPix[DML2_MAX_PLANES];
+ unsigned int VUpdateOffsetPix[DML2_MAX_PLANES];
+ unsigned int VUpdateWidthPix[DML2_MAX_PLANES];
+ double TSetup[DML2_MAX_PLANES];
+ double Tdmdl_vm_raw[DML2_MAX_PLANES];
+ double Tdmdl_raw[DML2_MAX_PLANES];
+ unsigned int VStartupMin[DML2_MAX_PLANES]; /// <brief Minimum vstartup to meet the prefetch schedule (i.e. the prefetch solution can be found at this vstartup time); not the actual global sync vstartup pos.
+ double MaxActiveDRAMClockChangeLatencySupported[DML2_MAX_PLANES];
+ double MaxActiveFCLKChangeLatencySupported;
+
// Backend
bool RequiresDSC[DML2_MAX_PLANES];
bool RequiresFEC[DML2_MAX_PLANES];
double OutputBpp[DML2_MAX_PLANES];
+ double DesiredOutputBpp[DML2_MAX_PLANES];
+ double PixelClockBackEnd[DML2_MAX_PLANES];
unsigned int DSCDelay[DML2_MAX_PLANES];
enum dml2_core_internal_output_type OutputType[DML2_MAX_PLANES];
enum dml2_core_internal_output_type_rate OutputRate[DML2_MAX_PLANES];
+ bool TotalAvailablePipesSupportNoDSC;
+ bool TotalAvailablePipesSupportDSC;
+ unsigned int NumberOfDPPNoDSC;
+ unsigned int NumberOfDPPDSC;
+ enum dml2_odm_mode ODMModeNoDSC;
+ enum dml2_odm_mode ODMModeDSC;
+ double RequiredDISPCLKPerSurfaceNoDSC;
+ double RequiredDISPCLKPerSurfaceDSC;
+ unsigned int EstimatedNumberOfDSCSlices[DML2_MAX_PLANES];
// Bandwidth Related Info
double BandwidthAvailableForImmediateFlip;
@@ -484,8 +549,14 @@ struct dml2_core_internal_mode_support {
double WriteBandwidth[DML2_MAX_PLANES][DML2_MAX_WRITEBACK];
double RequiredPrefetchPixelDataBWLuma[DML2_MAX_PLANES];
double RequiredPrefetchPixelDataBWChroma[DML2_MAX_PLANES];
- /* oto bw should also be considered when calculating peak urgent bw to avoid situations oto/equ mismatches between ms and mp */
- double RequiredPrefetchBWOTO[DML2_MAX_PLANES];
+ /* Max bandwidth calculated from prefetch schedule should be considered in addition to the pixel data bw to avoid ms/mp mismatches.
+ * 1. oto bw should also be considered when calculating peak urgent bw to avoid situations oto/equ mismatches between ms and mp
+ *
+ * 2. equ bandwidth needs to be considered for calculating peak urgent bw when equ schedule is used in mode support.
+ * Some slight difference in variables may cause the pixel data bandwidth to be higher
+ * even though overall equ prefetch bandwidths can be lower going from ms to mp
+ */
+ double RequiredPrefetchBWMax[DML2_MAX_PLANES];
double cursor_bw[DML2_MAX_PLANES];
double prefetch_cursor_bw[DML2_MAX_PLANES];
double prefetch_vmrow_bw[DML2_MAX_PLANES];
@@ -502,6 +573,7 @@ struct dml2_core_internal_mode_support {
enum dml2_odm_mode ODMMode[DML2_MAX_PLANES];
unsigned int SurfaceSizeInMALL[DML2_MAX_PLANES];
unsigned int NoOfDPP[DML2_MAX_PLANES];
+ unsigned int NoOfOPP[DML2_MAX_PLANES];
bool MPCCombine[DML2_MAX_PLANES];
double dcfclk_deepsleep;
double MinDPPCLKUsingSingleDPP[DML2_MAX_PLANES];
@@ -512,6 +584,7 @@ struct dml2_core_internal_mode_support {
bool PTEBufferSizeNotExceeded[DML2_MAX_PLANES];
bool DCCMetaBufferSizeNotExceeded[DML2_MAX_PLANES];
unsigned int TotalNumberOfActiveDPP;
+ unsigned int TotalNumberOfActiveOPP;
unsigned int TotalNumberOfSingleDPPSurfaces;
unsigned int TotalNumberOfDCCActiveDPP;
unsigned int Total3dlutActive;
@@ -520,7 +593,7 @@ struct dml2_core_internal_mode_support {
double VActiveLatencyHidingMargin[DML2_MAX_PLANES];
double VActiveLatencyHidingUs[DML2_MAX_PLANES];
unsigned int MaxVStartupLines[DML2_MAX_PLANES];
- double dram_change_vactive_det_fill_delay_us[DML2_MAX_PLANES];
+ double pstate_vactive_det_fill_delay_us[dml2_pstate_type_count][DML2_MAX_PLANES];
unsigned int num_mcaches_l[DML2_MAX_PLANES];
unsigned int mcache_row_bytes_l[DML2_MAX_PLANES];
@@ -538,7 +611,44 @@ struct dml2_core_internal_mode_support {
bool mall_comb_mcache_c[DML2_MAX_PLANES];
bool lc_comb_mcache[DML2_MAX_PLANES];
+ unsigned int vmpg_width_y[DML2_MAX_PLANES];
+ unsigned int vmpg_height_y[DML2_MAX_PLANES];
+ unsigned int vmpg_width_c[DML2_MAX_PLANES];
+ unsigned int vmpg_height_c[DML2_MAX_PLANES];
+
+ unsigned int meta_row_height_luma[DML2_MAX_PLANES];
+ unsigned int meta_row_height_chroma[DML2_MAX_PLANES];
+ unsigned int meta_row_bytes_per_row_ub_l[DML2_MAX_PLANES];
+ unsigned int meta_row_bytes_per_row_ub_c[DML2_MAX_PLANES];
+ unsigned int dpte_row_bytes_per_row_l[DML2_MAX_PLANES];
+ unsigned int dpte_row_bytes_per_row_c[DML2_MAX_PLANES];
+
+ unsigned int pstate_bytes_required_l[dml2_pstate_type_count][DML2_MAX_PLANES];
+ unsigned int pstate_bytes_required_c[dml2_pstate_type_count][DML2_MAX_PLANES];
+ unsigned int cursor_bytes_per_chunk[DML2_MAX_PLANES];
+ unsigned int cursor_bytes_per_line[DML2_MAX_PLANES];
+
+ unsigned int MaximumVStartup[DML2_MAX_PLANES];
+
+ double HostVMInefficiencyFactor;
+ double HostVMInefficiencyFactorPrefetch;
+
+ unsigned int tdlut_pte_bytes_per_frame[DML2_MAX_PLANES];
+ unsigned int tdlut_bytes_per_frame[DML2_MAX_PLANES];
+ unsigned int tdlut_groups_per_2row_ub[DML2_MAX_PLANES];
+ double tdlut_opt_time[DML2_MAX_PLANES];
+ double tdlut_drain_time[DML2_MAX_PLANES];
+ unsigned int tdlut_bytes_per_group[DML2_MAX_PLANES];
+
+ double Tvm_trips_flip[DML2_MAX_PLANES];
+ double Tr0_trips_flip[DML2_MAX_PLANES];
+ double Tvm_trips_flip_rounded[DML2_MAX_PLANES];
+ double Tr0_trips_flip_rounded[DML2_MAX_PLANES];
+
+ unsigned int DSTYAfterScaler[DML2_MAX_PLANES];
+ unsigned int DSTXAfterScaler[DML2_MAX_PLANES];
+ enum dml2_pstate_method uclk_pstate_switch_modes[DML2_MAX_PLANES];
};
/// @brief A mega structure that houses various info for model programming step.
@@ -548,6 +658,7 @@ struct dml2_core_internal_mode_program {
double FabricClock; /// <brief Basically just the clock freq at the min (or given) state
//double DCFCLK; /// <brief Basically just the clock freq at the min (or given) state and max combine setting
double dram_bw_mbps;
+ double min_available_urgent_bandwidth_MBps; /// <brief Minimum guaranteed available urgent return bandwidth in MBps
double uclk_freq_mhz;
unsigned int NoOfDPP[DML2_MAX_PLANES];
enum dml2_odm_mode ODMMode[DML2_MAX_PLANES];
@@ -599,6 +710,8 @@ struct dml2_core_internal_mode_program {
unsigned int MacroTileHeightC[DML2_MAX_PLANES];
unsigned int MacroTileWidthY[DML2_MAX_PLANES];
unsigned int MacroTileWidthC[DML2_MAX_PLANES];
+ double MaximumSwathWidthLuma[DML2_MAX_PLANES];
+ double MaximumSwathWidthChroma[DML2_MAX_PLANES];
bool surf_linear128_l[DML2_MAX_PLANES];
bool surf_linear128_c[DML2_MAX_PLANES];
@@ -631,6 +744,14 @@ struct dml2_core_internal_mode_program {
double UrgentBurstFactorChroma[DML2_MAX_PLANES];
double UrgentBurstFactorChromaPre[DML2_MAX_PLANES];
+ double MaximumSwathWidthInLineBufferLuma;
+ double MaximumSwathWidthInLineBufferChroma;
+
+ unsigned int vmpg_width_y[DML2_MAX_PLANES];
+ unsigned int vmpg_height_y[DML2_MAX_PLANES];
+ unsigned int vmpg_width_c[DML2_MAX_PLANES];
+ unsigned int vmpg_height_c[DML2_MAX_PLANES];
+
double meta_row_bw[DML2_MAX_PLANES];
unsigned int meta_row_bytes[DML2_MAX_PLANES];
unsigned int meta_req_width[DML2_MAX_PLANES];
@@ -652,7 +773,9 @@ struct dml2_core_internal_mode_program {
unsigned int PTERequestSizeC[DML2_MAX_PLANES];
double TWait[DML2_MAX_PLANES];
+ double Tdmdl_vm_raw[DML2_MAX_PLANES];
double Tdmdl_vm[DML2_MAX_PLANES];
+ double Tdmdl_raw[DML2_MAX_PLANES];
double Tdmdl[DML2_MAX_PLANES];
double TSetup[DML2_MAX_PLANES];
unsigned int dpde0_bytes_per_frame_ub_l[DML2_MAX_PLANES];
@@ -684,6 +807,39 @@ struct dml2_core_internal_mode_program {
double TCalc;
unsigned int TotImmediateFlipBytes;
+ unsigned int MaxTotalDETInKByte;
+ unsigned int NomDETInKByte;
+ unsigned int MinCompressedBufferSizeInKByte;
+ double PixelClockBackEnd[DML2_MAX_PLANES];
+ double OutputBpp[DML2_MAX_PLANES];
+ bool dsc_enable[DML2_MAX_PLANES];
+ unsigned int num_dsc_slices[DML2_MAX_PLANES];
+ unsigned int meta_row_bytes_per_row_ub_l[DML2_MAX_PLANES];
+ unsigned int meta_row_bytes_per_row_ub_c[DML2_MAX_PLANES];
+ unsigned int dpte_row_bytes_per_row_l[DML2_MAX_PLANES];
+ unsigned int dpte_row_bytes_per_row_c[DML2_MAX_PLANES];
+ unsigned int cursor_bytes_per_chunk[DML2_MAX_PLANES];
+ unsigned int cursor_bytes_per_line[DML2_MAX_PLANES];
+ unsigned int MaxVStartupLines[DML2_MAX_PLANES]; /// <brief more like vblank for the plane's OTG
+ double HostVMInefficiencyFactor;
+ double HostVMInefficiencyFactorPrefetch;
+ unsigned int tdlut_pte_bytes_per_frame[DML2_MAX_PLANES];
+ unsigned int tdlut_bytes_per_frame[DML2_MAX_PLANES];
+ unsigned int tdlut_groups_per_2row_ub[DML2_MAX_PLANES];
+ double tdlut_opt_time[DML2_MAX_PLANES];
+ double tdlut_drain_time[DML2_MAX_PLANES];
+ unsigned int tdlut_bytes_per_group[DML2_MAX_PLANES];
+ double Tvm_trips_flip[DML2_MAX_PLANES];
+ double Tr0_trips_flip[DML2_MAX_PLANES];
+ double Tvm_trips_flip_rounded[DML2_MAX_PLANES];
+ double Tr0_trips_flip_rounded[DML2_MAX_PLANES];
+ bool immediate_flip_required; // any pipes need immediate flip
+ double SOCCLK; /// <brief Basically just the clock freq at the min (or given) state
+ double TotalWRBandwidth;
+ double max_urgent_latency_us;
+ double df_response_time_us;
+
+ enum dml2_pstate_method uclk_pstate_switch_modes[DML2_MAX_PLANES];
// -------------------
// Output
// -------------------
@@ -694,9 +850,12 @@ struct dml2_core_internal_mode_program {
// Support
bool UrgVactiveBandwidthSupport;
+ bool PrefetchScheduleSupported;
+ bool UrgentBandwidthSupport;
bool PrefetchModeSupported; // <brief Is the prefetch mode (bandwidth and latency) supported
bool ImmediateFlipSupported;
bool ImmediateFlipSupportedForPipe[DML2_MAX_PLANES];
+ bool dcfclk_support;
// Clock
double Dcfclk;
@@ -730,6 +889,9 @@ struct dml2_core_internal_mode_program {
double Z8StutterEfficiency;
unsigned int Z8NumberOfStutterBurstsPerFrame;
double Z8StutterEfficiencyNotIncludingVBlank;
+ double LowPowerStutterEfficiency;
+ double LowPowerStutterEfficiencyNotIncludingVBlank;
+ unsigned int LowPowerNumberOfStutterBurstsPerFrame;
double StutterPeriod;
double Z8StutterEfficiencyBestCase;
unsigned int Z8NumberOfStutterBurstsPerFrameBestCase;
@@ -788,7 +950,7 @@ struct dml2_core_internal_mode_program {
// RQ registers
bool PTE_BUFFER_MODE[DML2_MAX_PLANES];
unsigned int BIGK_FRAGMENT_SIZE[DML2_MAX_PLANES];
-
+ double VActiveLatencyHidingUs[DML2_MAX_PLANES];
unsigned int SubViewportLinesNeededInMALL[DML2_MAX_PLANES];
bool is_using_mall_for_ss[DML2_MAX_PLANES];
@@ -804,11 +966,12 @@ struct dml2_core_internal_mode_program {
double MaxActiveFCLKChangeLatencySupported;
bool USRRetrainingSupport;
bool g6_temp_read_support;
- bool temp_read_or_ppt_support;
enum dml2_pstate_change_support FCLKChangeSupport[DML2_MAX_PLANES];
enum dml2_pstate_change_support DRAMClockChangeSupport[DML2_MAX_PLANES];
+ enum dml2_pstate_change_support temp_read_or_ppt_support[DML2_MAX_PLANES];
bool global_dram_clock_change_supported;
bool global_fclk_change_supported;
+ bool global_temp_read_or_ppt_supported;
double MaxActiveDRAMClockChangeLatencySupported[DML2_MAX_PLANES];
double WritebackAllowFCLKChangeEndPosition[DML2_MAX_PLANES];
double WritebackAllowDRAMClockChangeEndPosition[DML2_MAX_PLANES];
@@ -869,6 +1032,8 @@ struct dml2_core_internal_SOCParametersList {
double FCLKChangeLatency;
double SRExitTime;
double SREnterPlusExitTime;
+ double SRExitTimeLowPower;
+ double SREnterPlusExitTimeLowPower;
double SRExitZ8Time;
double SREnterPlusExitZ8Time;
double USRRetrainingLatency;
@@ -973,8 +1138,8 @@ struct dml2_core_calcs_mode_support_locals {
unsigned int cursor_bytes[DML2_MAX_PLANES];
bool stream_visited[DML2_MAX_PLANES];
- unsigned int pstate_bytes_required_l[DML2_MAX_PLANES];
- unsigned int pstate_bytes_required_c[DML2_MAX_PLANES];
+ unsigned int pstate_bytes_required_l[dml2_pstate_type_count][DML2_MAX_PLANES];
+ unsigned int pstate_bytes_required_c[dml2_pstate_type_count][DML2_MAX_PLANES];
double prefetch_sw_bytes[DML2_MAX_PLANES];
double Tpre_rounded[DML2_MAX_PLANES];
@@ -1001,10 +1166,10 @@ struct dml2_core_calcs_mode_programming_locals {
double dummy_bw[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max];
double surface_dummy_bw[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max][DML2_MAX_PLANES];
double surface_dummy_bw0[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max][DML2_MAX_PLANES];
- unsigned int dummy_integer_array[2][DML2_MAX_PLANES];
+ unsigned int dummy_integer_array[4][DML2_MAX_PLANES];
enum dml2_output_encoder_class dummy_output_encoder_array[DML2_MAX_PLANES];
double dummy_single_array[2][DML2_MAX_PLANES];
- unsigned int dummy_long_array[4][DML2_MAX_PLANES];
+ unsigned int dummy_long_array[8][DML2_MAX_PLANES];
bool dummy_boolean_array[2][DML2_MAX_PLANES];
bool dummy_boolean[2];
double dummy_single[2];
@@ -1028,7 +1193,6 @@ struct dml2_core_calcs_mode_programming_locals {
double dlg_vblank_start;
double LSetup;
double blank_lines_remaining;
- double TotalWRBandwidth;
double WRBandwidth;
struct dml2_core_internal_DmlPipe myPipe;
double PixelClockBackEndFactor;
@@ -1066,8 +1230,8 @@ struct dml2_core_calcs_mode_programming_locals {
double Tr0_trips_flip_rounded[DML2_MAX_PLANES];
unsigned int per_pipe_flip_bytes[DML2_MAX_PLANES];
- unsigned int pstate_bytes_required_l[DML2_MAX_PLANES];
- unsigned int pstate_bytes_required_c[DML2_MAX_PLANES];
+ unsigned int pstate_bytes_required_l[dml2_pstate_type_count][DML2_MAX_PLANES];
+ unsigned int pstate_bytes_required_c[dml2_pstate_type_count][DML2_MAX_PLANES];
double prefetch_sw_bytes[DML2_MAX_PLANES];
double Tpre_rounded[DML2_MAX_PLANES];
@@ -1153,6 +1317,7 @@ struct dml2_core_calcs_CalculateVMRowAndSwath_params {
unsigned int HostVMMinPageSize;
unsigned int DCCMetaBufferSizeBytes;
bool mrq_present;
+ enum dml2_pstate_method *uclk_pstate_switch_modes;
// Output
bool *PTEBufferSizeNotExceeded;
@@ -1389,7 +1554,7 @@ struct dml2_core_shared_get_urgent_bandwidth_required_locals {
double vm_row_bw;
double flip_and_active_bw;
double flip_and_prefetch_bw;
- double flip_and_prefetch_bw_oto;
+ double flip_and_prefetch_bw_max;
double active_and_excess_bw;
};
@@ -1418,6 +1583,7 @@ struct dml2_core_shared_CalculateFlipSchedule_locals {
struct dml2_core_shared_rq_dlg_get_dlg_reg_locals {
unsigned int plane_idx;
+ unsigned int stream_idx;
enum dml2_source_format_class source_format;
const struct dml2_timing_cfg *timing;
bool dual_plane;
@@ -1578,10 +1744,12 @@ struct dml2_core_calcs_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_param
unsigned int max_request_size_bytes;
unsigned int *meta_row_height_l;
unsigned int *meta_row_height_c;
+ enum dml2_pstate_method *uclk_pstate_switch_modes;
// Output
struct dml2_core_internal_watermarks *Watermark;
enum dml2_pstate_change_support *DRAMClockChangeSupport;
+ bool *global_dram_clock_change_support_required;
bool *global_dram_clock_change_supported;
double *MaxActiveDRAMClockChangeLatencySupported;
unsigned int *SubViewportLinesNeededInMALL;
@@ -1592,10 +1760,10 @@ struct dml2_core_calcs_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_param
double *VActiveLatencyHidingMargin;
double *VActiveLatencyHidingUs;
bool *g6_temp_read_support;
- bool *temp_read_or_ppt_support;
+ enum dml2_pstate_change_support *temp_read_or_ppt_support;
+ bool *global_temp_read_or_ppt_supported;
};
-
struct dml2_core_calcs_CalculateSwathAndDETConfiguration_params {
const struct dml2_display_cfg *display_cfg;
unsigned int ConfigReturnBufferSizeInKByte;
@@ -1625,6 +1793,9 @@ struct dml2_core_calcs_CalculateSwathAndDETConfiguration_params {
double *BytePerPixDETC;
unsigned int *DPPPerSurface;
bool mrq_present;
+ unsigned int dummy[2][DML2_MAX_PLANES];
+ unsigned int swath_width_luma_ub_single_dpp[DML2_MAX_PLANES];
+ unsigned int swath_width_chroma_ub_single_dpp[DML2_MAX_PLANES];
// output
unsigned int *req_per_swath_ub_l;
@@ -1642,6 +1813,8 @@ struct dml2_core_calcs_CalculateSwathAndDETConfiguration_params {
unsigned int *DETBufferSizeC;
unsigned int *full_swath_bytes_l;
unsigned int *full_swath_bytes_c;
+ unsigned int *full_swath_bytes_single_dpp_l;
+ unsigned int *full_swath_bytes_single_dpp_c;
bool *UnboundedRequestEnabled;
unsigned int *compbuf_reserved_space_64b;
unsigned int *CompressedBufferSizeInkByte;
@@ -1698,9 +1871,11 @@ struct dml2_core_calcs_CalculateStutterEfficiency_params {
unsigned int CompbufReservedSpaceZs;
bool hw_debug5;
double SRExitTime;
+ double SRExitTimeLowPower;
double SRExitZ8Time;
bool SynchronizeTimings;
double StutterEnterPlusExitWatermark;
+ double LowPowerStutterEnterPlusExitWatermark;
double Z8StutterEnterPlusExitWatermark;
bool ProgressiveToInterlaceUnitInOPP;
double *MinTTUVBlank;
@@ -1726,7 +1901,10 @@ struct dml2_core_calcs_CalculateStutterEfficiency_params {
// output
double *StutterEfficiencyNotIncludingVBlank;
double *StutterEfficiency;
+ double *LowPowerStutterEfficiencyNotIncludingVBlank;
+ double *LowPowerStutterEfficiency;
unsigned int *NumberOfStutterBurstsPerFrame;
+ unsigned int *LowPowerNumberOfStutterBurstsPerFrame;
double *Z8StutterEfficiencyNotIncludingVBlank;
double *Z8StutterEfficiency;
unsigned int *Z8NumberOfStutterBurstsPerFrame;
@@ -1801,7 +1979,7 @@ struct dml2_core_calcs_CalculatePrefetchSchedule_params {
double *VRatioPrefetchC;
double *RequiredPrefetchPixelDataBWLuma;
double *RequiredPrefetchPixelDataBWChroma;
- double *RequiredPrefetchBWOTO;
+ double *RequiredPrefetchBWMax;
bool *NotEnoughTimeForDynamicMetadata;
double *Tno_bw;
double *Tno_bw_flip;
@@ -2038,7 +2216,7 @@ struct dml2_core_calcs_calculate_peak_bandwidth_required_params {
double *surface_read_bandwidth_c;
double *prefetch_bandwidth_l;
double *prefetch_bandwidth_c;
- double *prefetch_bandwidth_oto;
+ double *prefetch_bandwidth_max;
double *excess_vactive_fill_bw_l;
double *excess_vactive_fill_bw_c;
double *cursor_bw;
@@ -2075,7 +2253,7 @@ struct dml2_core_calcs_calculate_bytes_to_fetch_required_to_hide_latency_params
unsigned int *swath_width_c;
unsigned int *swath_height_l;
unsigned int *swath_height_c;
- double latency_to_hide_us;
+ double latency_to_hide_us[DML2_MAX_PLANES];
/* outputs */
unsigned int *bytes_required_l;
@@ -2143,6 +2321,7 @@ struct dml2_core_calcs_mode_support_ex {
const struct dml2_display_cfg *in_display_cfg;
const struct dml2_mcg_min_clock_table *min_clk_table;
int min_clk_index;
+ enum dml2_project_id project_id;
//unsigned int in_state_index;
struct dml2_core_internal_mode_support_info *out_evaluation_info;
};
@@ -2155,6 +2334,7 @@ struct dml2_core_calcs_mode_programming_ex {
const struct dml2_mcg_min_clock_table *min_clk_table;
const struct core_display_cfg_support_info *cfg_support_info;
int min_clk_index;
+ enum dml2_project_id project_id;
struct dml2_display_cfg_programming *programming;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.c
index 7a220c0141c2..b57d0f6ea6a1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.c
@@ -306,6 +306,8 @@ void dml2_core_utils_print_mode_support_info(const struct dml2_core_internal_mod
DML_LOG_VERBOSE("DML: support: ExceededMALLSize = %d\n", support->ExceededMALLSize);
if (!fail_only || support->g6_temp_read_support == 0)
DML_LOG_VERBOSE("DML: support: g6_temp_read_support = %d\n", support->g6_temp_read_support);
+ if (!fail_only || (support->global_dram_clock_change_supported == 0 && support->global_dram_clock_change_support_required))
+ DML_LOG_VERBOSE("DML: support: dram_clock_change_support = %d\n", support->global_dram_clock_change_supported);
if (!fail_only || support->ImmediateFlipSupport == 0)
DML_LOG_VERBOSE("DML: support: ImmediateFlipSupport = %d\n", support->ImmediateFlipSupport);
if (!fail_only || support->LinkCapacitySupport == 0)
@@ -464,7 +466,7 @@ bool dml2_core_utils_get_segment_horizontal_contiguous(enum dml2_swizzle_mode sw
bool dml2_core_utils_is_linear(enum dml2_swizzle_mode sw_mode)
{
- return (sw_mode == dml2_sw_linear || sw_mode == dml2_sw_linear_256b || sw_mode == dml2_linear_64elements);
+ return sw_mode == dml2_sw_linear;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.h
index 95f0d017add4..95f0d017add4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c
index f486b090bbfc..22969a533a7b 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c
@@ -389,9 +389,6 @@ static bool map_min_clocks_to_dpm(const struct dml2_core_mode_support_result *mo
if (result)
result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.dispclk_khz, &state_table->dispclk);
- if (result)
- result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.deepsleep_dcfclk_khz, &state_table->dcfclk);
-
for (i = 0; i < DML2_MAX_DCN_PIPES; i++) {
if (result)
result = round_up_to_next_dpm(&display_cfg->plane_programming[i].min_clocks.dcn4x.dppclk_khz, &state_table->dppclk);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h
index e7b58f2efda4..e7b58f2efda4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.c
index 3861bc6c9621..dfd01440737d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.c
@@ -20,7 +20,7 @@ bool dml2_dpmm_create(enum dml2_project_id project_id, struct dml2_dpmm_instance
{
bool result = false;
- if (out == 0)
+ if (!out)
return false;
memset(out, 0, sizeof(struct dml2_dpmm_instance));
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.h
index 20ba2e446f1d..20ba2e446f1d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.c
index a265f254152c..a265f254152c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.c
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.h
index 02da6f45cbf7..f54fde8fba90 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.h
@@ -10,4 +10,4 @@
bool mcg_dcn4_build_min_clock_table(struct dml2_mcg_build_min_clock_table_params_in_out *in_out);
bool mcg_dcn4_unit_test(void);
-#endif
+#endif \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.c
index cd3fbc0591d8..c60b8fe90819 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.c
@@ -15,7 +15,7 @@ bool dml2_mcg_create(enum dml2_project_id project_id, struct dml2_mcg_instance *
{
bool result = false;
- if (out == 0)
+ if (!out)
return false;
memset(out, 0, sizeof(struct dml2_mcg_instance));
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.h
index ad307deca3b0..ad307deca3b0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn3.c
index e763c8e45da8..1b9579a32ff2 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn3.c
@@ -48,18 +48,19 @@ static void set_reserved_time_on_all_planes_with_stream_index(struct display_con
static void remove_duplicates(double *list_a, int *list_a_size)
{
- int cur_element = 0;
- // For all elements b[i] in list_b[]
- while (cur_element < *list_a_size - 1) {
- if (list_a[cur_element] == list_a[cur_element + 1]) {
- for (int j = cur_element + 1; j < *list_a_size - 1; j++) {
- list_a[j] = list_a[j + 1];
- }
- *list_a_size = *list_a_size - 1;
- } else {
- cur_element++;
+ int j = 0;
+
+ if (*list_a_size == 0)
+ return;
+
+ for (int i = 1; i < *list_a_size; i++) {
+ if (list_a[j] != list_a[i]) {
+ j++;
+ list_a[j] = list_a[i];
}
}
+
+ *list_a_size = j + 1;
}
static bool increase_mpc_combine_factor(unsigned int *mpc_combine_factor, unsigned int limit)
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn3.h
index f00bd9e72a86..f00bd9e72a86 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn3.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
index d88b3e0082dd..c26e100fcaf2 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
@@ -642,6 +642,11 @@ bool pmo_dcn4_fams2_initialize(struct dml2_pmo_initialize_in_out *in_out)
int i = 0;
struct dml2_pmo_instance *pmo = in_out->instance;
+ unsigned int base_list_size = 0;
+ const struct dml2_pmo_pstate_strategy *base_list = NULL;
+ unsigned int *expanded_list_size = NULL;
+ struct dml2_pmo_pstate_strategy *expanded_list = NULL;
+
pmo->soc_bb = in_out->soc_bb;
pmo->ip_caps = in_out->ip_caps;
pmo->mpc_combine_limit = 2;
@@ -656,53 +661,71 @@ bool pmo_dcn4_fams2_initialize(struct dml2_pmo_initialize_in_out *in_out)
pmo->options = in_out->options;
/* generate permutations of p-state configs from base strategy list */
- for (i = 1; i <= PMO_DCN4_MAX_DISPLAYS; i++) {
- switch (i) {
+ for (i = 0; i < PMO_DCN4_MAX_DISPLAYS; i++) {
+ switch (i+1) {
case 1:
- DML_ASSERT(base_strategy_list_1_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
-
- /* populate list */
- pmo_dcn4_fams2_expand_base_pstate_strategies(
- base_strategy_list_1_display,
- base_strategy_list_1_display_size,
- i,
- pmo->init_data.pmo_dcn4.expanded_strategy_list_1_display,
- &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i - 1]);
+ if (pmo->options->override_strategy_lists[i] && pmo->options->num_override_strategies_per_list[i]) {
+ base_list = pmo->options->override_strategy_lists[i];
+ base_list_size = pmo->options->num_override_strategies_per_list[i];
+ } else {
+ base_list = base_strategy_list_1_display;
+ base_list_size = base_strategy_list_1_display_size;
+ }
+
+ expanded_list_size = &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i];
+ expanded_list = pmo->init_data.pmo_dcn4.expanded_strategy_list_1_display;
+
break;
case 2:
- DML_ASSERT(base_strategy_list_2_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
-
- /* populate list */
- pmo_dcn4_fams2_expand_base_pstate_strategies(
- base_strategy_list_2_display,
- base_strategy_list_2_display_size,
- i,
- pmo->init_data.pmo_dcn4.expanded_strategy_list_2_display,
- &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i - 1]);
+ if (pmo->options->override_strategy_lists[i] && pmo->options->num_override_strategies_per_list[i]) {
+ base_list = pmo->options->override_strategy_lists[i];
+ base_list_size = pmo->options->num_override_strategies_per_list[i];
+ } else {
+ base_list = base_strategy_list_2_display;
+ base_list_size = base_strategy_list_2_display_size;
+ }
+
+ expanded_list_size = &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i];
+ expanded_list = pmo->init_data.pmo_dcn4.expanded_strategy_list_2_display;
+
break;
case 3:
- DML_ASSERT(base_strategy_list_3_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
-
- /* populate list */
- pmo_dcn4_fams2_expand_base_pstate_strategies(
- base_strategy_list_3_display,
- base_strategy_list_3_display_size,
- i,
- pmo->init_data.pmo_dcn4.expanded_strategy_list_3_display,
- &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i - 1]);
+ if (pmo->options->override_strategy_lists[i] && pmo->options->num_override_strategies_per_list[i]) {
+ base_list = pmo->options->override_strategy_lists[i];
+ base_list_size = pmo->options->num_override_strategies_per_list[i];
+ } else {
+ base_list = base_strategy_list_3_display;
+ base_list_size = base_strategy_list_3_display_size;
+ }
+
+ expanded_list_size = &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i];
+ expanded_list = pmo->init_data.pmo_dcn4.expanded_strategy_list_3_display;
+
break;
case 4:
- DML_ASSERT(base_strategy_list_4_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
-
- /* populate list */
- pmo_dcn4_fams2_expand_base_pstate_strategies(
- base_strategy_list_4_display,
- base_strategy_list_4_display_size,
- i,
- pmo->init_data.pmo_dcn4.expanded_strategy_list_4_display,
- &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i - 1]);
+ if (pmo->options->override_strategy_lists[i] && pmo->options->num_override_strategies_per_list[i]) {
+ base_list = pmo->options->override_strategy_lists[i];
+ base_list_size = pmo->options->num_override_strategies_per_list[i];
+ } else {
+ base_list = base_strategy_list_4_display;
+ base_list_size = base_strategy_list_4_display_size;
+ }
+
+ expanded_list_size = &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i];
+ expanded_list = pmo->init_data.pmo_dcn4.expanded_strategy_list_4_display;
+
break;
}
+
+ DML_ASSERT(base_list_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
+
+ /* populate list */
+ pmo_dcn4_fams2_expand_base_pstate_strategies(
+ base_list,
+ base_list_size,
+ i + 1,
+ expanded_list,
+ expanded_list_size);
}
return true;
@@ -1026,13 +1049,13 @@ static bool all_timings_support_vblank(const struct dml2_pmo_instance *pmo,
return synchronizable;
}
-static unsigned int calc_svp_microschedule(const struct dml2_fams2_meta *fams2_meta)
+static unsigned int calc_svp_microschedule(const struct dml2_pstate_meta *pstate_meta)
{
- return fams2_meta->contention_delay_otg_vlines +
- fams2_meta->method_subvp.programming_delay_otg_vlines +
- fams2_meta->method_subvp.phantom_vtotal +
- fams2_meta->method_subvp.prefetch_to_mall_delay_otg_vlines +
- fams2_meta->dram_clk_change_blackout_otg_vlines;
+ return pstate_meta->contention_delay_otg_vlines +
+ pstate_meta->method_subvp.programming_delay_otg_vlines +
+ pstate_meta->method_subvp.phantom_vtotal +
+ pstate_meta->method_subvp.prefetch_to_mall_delay_otg_vlines +
+ pstate_meta->blackout_otg_vlines;
}
static bool all_timings_support_drr(const struct dml2_pmo_instance *pmo,
@@ -1042,29 +1065,29 @@ static bool all_timings_support_drr(const struct dml2_pmo_instance *pmo,
unsigned int i;
for (i = 0; i < DML2_MAX_PLANES; i++) {
const struct dml2_stream_parameters *stream_descriptor;
- const struct dml2_fams2_meta *stream_fams2_meta;
+ const struct dml2_pstate_meta *stream_pstate_meta;
if (is_bit_set_in_bitfield(mask, i)) {
stream_descriptor = &display_config->display_config.stream_descriptors[i];
- stream_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[i];
+ stream_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[i];
if (!stream_descriptor->timing.drr_config.enabled)
return false;
/* cannot support required vtotal */
- if (stream_fams2_meta->method_drr.stretched_vtotal > stream_fams2_meta->max_vtotal) {
+ if (stream_pstate_meta->method_drr.stretched_vtotal > stream_pstate_meta->max_vtotal) {
return false;
}
/* check rr is within bounds */
- if (stream_fams2_meta->nom_refresh_rate_hz < pmo->fams_params.v2.drr.refresh_rate_limit_min ||
- stream_fams2_meta->nom_refresh_rate_hz > pmo->fams_params.v2.drr.refresh_rate_limit_max) {
+ if (stream_pstate_meta->nom_refresh_rate_hz < pmo->fams_params.v2.drr.refresh_rate_limit_min ||
+ stream_pstate_meta->nom_refresh_rate_hz > pmo->fams_params.v2.drr.refresh_rate_limit_max) {
return false;
}
/* check required stretch is allowed */
if (stream_descriptor->timing.drr_config.max_instant_vtotal_delta > 0 &&
- stream_fams2_meta->method_drr.stretched_vtotal - stream_fams2_meta->nom_vtotal > stream_descriptor->timing.drr_config.max_instant_vtotal_delta) {
+ stream_pstate_meta->method_drr.stretched_vtotal - stream_pstate_meta->nom_vtotal > (int)stream_descriptor->timing.drr_config.max_instant_vtotal_delta) {
return false;
}
}
@@ -1079,7 +1102,7 @@ static bool all_timings_support_svp(const struct dml2_pmo_instance *pmo,
{
const struct dml2_stream_parameters *stream_descriptor;
const struct dml2_plane_parameters *plane_descriptor;
- const struct dml2_fams2_meta *stream_fams2_meta;
+ const struct dml2_pstate_meta *stream_pstate_meta;
unsigned int microschedule_vlines;
unsigned int i;
unsigned int mcaches_per_plane;
@@ -1124,13 +1147,13 @@ static bool all_timings_support_svp(const struct dml2_pmo_instance *pmo,
for (i = 0; i < DML2_MAX_PLANES; i++) {
if (is_bit_set_in_bitfield(mask, i)) {
stream_descriptor = &display_config->display_config.stream_descriptors[i];
- stream_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[i];
+ stream_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[i];
if (stream_descriptor->overrides.disable_subvp) {
return false;
}
- microschedule_vlines = calc_svp_microschedule(&pmo->scratch.pmo_dcn4.stream_fams2_meta[i]);
+ microschedule_vlines = calc_svp_microschedule(&pmo->scratch.pmo_dcn4.stream_pstate_meta[i]);
/* block if using an interlaced timing */
if (stream_descriptor->timing.interlaced) {
@@ -1141,8 +1164,8 @@ static bool all_timings_support_svp(const struct dml2_pmo_instance *pmo,
* 2) refresh rate must be within the allowed bounds
*/
if (microschedule_vlines >= stream_descriptor->timing.v_active ||
- (stream_fams2_meta->nom_refresh_rate_hz < pmo->fams_params.v2.subvp.refresh_rate_limit_min ||
- stream_fams2_meta->nom_refresh_rate_hz > pmo->fams_params.v2.subvp.refresh_rate_limit_max)) {
+ (stream_pstate_meta->nom_refresh_rate_hz < pmo->fams_params.v2.subvp.refresh_rate_limit_min ||
+ stream_pstate_meta->nom_refresh_rate_hz > pmo->fams_params.v2.subvp.refresh_rate_limit_max)) {
return false;
}
}
@@ -1232,43 +1255,43 @@ static bool all_planes_match_method(const struct display_configuation_with_meta
}
static void build_method_scheduling_params(
- struct dml2_fams2_per_method_common_meta *stream_method_fams2_meta,
- struct dml2_fams2_meta *stream_fams2_meta)
+ struct dml2_pstate_per_method_common_meta *stream_method_pstate_meta,
+ struct dml2_pstate_meta *stream_pstate_meta)
{
- stream_method_fams2_meta->allow_time_us =
- (double)((int)stream_method_fams2_meta->allow_end_otg_vline - (int)stream_method_fams2_meta->allow_start_otg_vline) *
- stream_fams2_meta->otg_vline_time_us;
- if (stream_method_fams2_meta->allow_time_us >= stream_method_fams2_meta->period_us) {
+ stream_method_pstate_meta->allow_time_us =
+ (double)((int)stream_method_pstate_meta->allow_end_otg_vline - (int)stream_method_pstate_meta->allow_start_otg_vline) *
+ stream_pstate_meta->otg_vline_time_us;
+ if (stream_method_pstate_meta->allow_time_us >= stream_method_pstate_meta->period_us) {
/* when allow wave overlaps an entire frame, it is always schedulable (DRR can do this)*/
- stream_method_fams2_meta->disallow_time_us = 0.0;
+ stream_method_pstate_meta->disallow_time_us = 0.0;
} else {
- stream_method_fams2_meta->disallow_time_us =
- stream_method_fams2_meta->period_us - stream_method_fams2_meta->allow_time_us;
+ stream_method_pstate_meta->disallow_time_us =
+ stream_method_pstate_meta->period_us - stream_method_pstate_meta->allow_time_us;
}
}
-static struct dml2_fams2_per_method_common_meta *get_per_method_common_meta(
+static struct dml2_pstate_per_method_common_meta *get_per_method_common_meta(
struct dml2_pmo_instance *pmo,
enum dml2_pstate_method stream_pstate_method,
int stream_idx)
{
- struct dml2_fams2_per_method_common_meta *stream_method_fams2_meta = NULL;
+ struct dml2_pstate_per_method_common_meta *stream_method_pstate_meta = NULL;
switch (stream_pstate_method) {
case dml2_pstate_method_vactive:
case dml2_pstate_method_fw_vactive_drr:
- stream_method_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_idx].method_vactive.common;
+ stream_method_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_idx].method_vactive.common;
break;
case dml2_pstate_method_vblank:
case dml2_pstate_method_fw_vblank_drr:
- stream_method_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_idx].method_vblank.common;
+ stream_method_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_idx].method_vblank.common;
break;
case dml2_pstate_method_fw_svp:
case dml2_pstate_method_fw_svp_drr:
- stream_method_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_idx].method_subvp.common;
+ stream_method_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_idx].method_subvp.common;
break;
case dml2_pstate_method_fw_drr:
- stream_method_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_idx].method_drr.common;
+ stream_method_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_idx].method_drr.common;
break;
case dml2_pstate_method_reserved_hw:
case dml2_pstate_method_reserved_fw:
@@ -1277,10 +1300,10 @@ static struct dml2_fams2_per_method_common_meta *get_per_method_common_meta(
case dml2_pstate_method_count:
case dml2_pstate_method_na:
default:
- stream_method_fams2_meta = NULL;
+ stream_method_pstate_meta = NULL;
}
- return stream_method_fams2_meta;
+ return stream_method_pstate_meta;
}
static bool is_timing_group_schedulable(
@@ -1288,10 +1311,10 @@ static bool is_timing_group_schedulable(
const struct display_configuation_with_meta *display_cfg,
const struct dml2_pmo_pstate_strategy *pstate_strategy,
const unsigned int timing_group_idx,
- struct dml2_fams2_per_method_common_meta *group_fams2_meta)
+ struct dml2_pstate_per_method_common_meta *group_pstate_meta)
{
unsigned int i;
- struct dml2_fams2_per_method_common_meta *stream_method_fams2_meta;
+ struct dml2_pstate_per_method_common_meta *stream_method_pstate_meta;
unsigned int base_stream_idx = 0;
struct dml2_pmo_scratch *s = &pmo->scratch;
@@ -1305,31 +1328,31 @@ static bool is_timing_group_schedulable(
}
/* init allow start and end lines for timing group */
- stream_method_fams2_meta = get_per_method_common_meta(pmo, pstate_strategy->per_stream_pstate_method[base_stream_idx], base_stream_idx);
- if (!stream_method_fams2_meta)
+ stream_method_pstate_meta = get_per_method_common_meta(pmo, pstate_strategy->per_stream_pstate_method[base_stream_idx], base_stream_idx);
+ if (!stream_method_pstate_meta)
return false;
- group_fams2_meta->allow_start_otg_vline = stream_method_fams2_meta->allow_start_otg_vline;
- group_fams2_meta->allow_end_otg_vline = stream_method_fams2_meta->allow_end_otg_vline;
- group_fams2_meta->period_us = stream_method_fams2_meta->period_us;
+ group_pstate_meta->allow_start_otg_vline = stream_method_pstate_meta->allow_start_otg_vline;
+ group_pstate_meta->allow_end_otg_vline = stream_method_pstate_meta->allow_end_otg_vline;
+ group_pstate_meta->period_us = stream_method_pstate_meta->period_us;
for (i = base_stream_idx + 1; i < display_cfg->display_config.num_streams; i++) {
if (is_bit_set_in_bitfield(pmo->scratch.pmo_dcn4.synchronized_timing_group_masks[timing_group_idx], i)) {
- stream_method_fams2_meta = get_per_method_common_meta(pmo, pstate_strategy->per_stream_pstate_method[i], i);
- if (!stream_method_fams2_meta)
+ stream_method_pstate_meta = get_per_method_common_meta(pmo, pstate_strategy->per_stream_pstate_method[i], i);
+ if (!stream_method_pstate_meta)
continue;
- if (group_fams2_meta->allow_start_otg_vline < stream_method_fams2_meta->allow_start_otg_vline) {
+ if (group_pstate_meta->allow_start_otg_vline < stream_method_pstate_meta->allow_start_otg_vline) {
/* set group allow start to larger otg vline */
- group_fams2_meta->allow_start_otg_vline = stream_method_fams2_meta->allow_start_otg_vline;
+ group_pstate_meta->allow_start_otg_vline = stream_method_pstate_meta->allow_start_otg_vline;
}
- if (group_fams2_meta->allow_end_otg_vline > stream_method_fams2_meta->allow_end_otg_vline) {
+ if (group_pstate_meta->allow_end_otg_vline > stream_method_pstate_meta->allow_end_otg_vline) {
/* set group allow end to smaller otg vline */
- group_fams2_meta->allow_end_otg_vline = stream_method_fams2_meta->allow_end_otg_vline;
+ group_pstate_meta->allow_end_otg_vline = stream_method_pstate_meta->allow_end_otg_vline;
}
/* check waveform still has positive width */
- if (group_fams2_meta->allow_start_otg_vline >= group_fams2_meta->allow_end_otg_vline) {
+ if (group_pstate_meta->allow_start_otg_vline >= group_pstate_meta->allow_end_otg_vline) {
/* timing group is not schedulable */
return false;
}
@@ -1337,10 +1360,10 @@ static bool is_timing_group_schedulable(
}
/* calculate the rest of the meta */
- build_method_scheduling_params(group_fams2_meta, &pmo->scratch.pmo_dcn4.stream_fams2_meta[base_stream_idx]);
+ build_method_scheduling_params(group_pstate_meta, &pmo->scratch.pmo_dcn4.stream_pstate_meta[base_stream_idx]);
- return group_fams2_meta->allow_time_us > 0.0 &&
- group_fams2_meta->disallow_time_us < pmo->ip_caps->fams2.max_allow_delay_us;
+ return group_pstate_meta->allow_time_us > 0.0 &&
+ group_pstate_meta->disallow_time_us < pmo->ip_caps->fams2.max_allow_delay_us;
}
static bool is_config_schedulable(
@@ -1354,7 +1377,7 @@ static bool is_config_schedulable(
double max_allow_delay_us = 0.0;
- memset(s->pmo_dcn4.group_common_fams2_meta, 0, sizeof(s->pmo_dcn4.group_common_fams2_meta));
+ memset(s->pmo_dcn4.group_common_pstate_meta, 0, sizeof(s->pmo_dcn4.group_common_pstate_meta));
memset(s->pmo_dcn4.sorted_group_gtl_disallow_index, 0, sizeof(unsigned int) * DML2_MAX_PLANES);
/* search for a general solution to the schedule */
@@ -1369,12 +1392,12 @@ static bool is_config_schedulable(
for (i = 0; i < s->pmo_dcn4.num_timing_groups; i++) {
s->pmo_dcn4.sorted_group_gtl_disallow_index[i] = i;
s->pmo_dcn4.sorted_group_gtl_period_index[i] = i;
- if (!is_timing_group_schedulable(pmo, display_cfg, pstate_strategy, i, &s->pmo_dcn4.group_common_fams2_meta[i])) {
+ if (!is_timing_group_schedulable(pmo, display_cfg, pstate_strategy, i, &s->pmo_dcn4.group_common_pstate_meta[i])) {
/* synchronized timing group was not schedulable */
schedulable = false;
break;
}
- max_allow_delay_us += s->pmo_dcn4.group_common_fams2_meta[i].disallow_time_us;
+ max_allow_delay_us += s->pmo_dcn4.group_common_pstate_meta[i].disallow_time_us;
}
if ((schedulable && s->pmo_dcn4.num_timing_groups <= 1) || !schedulable) {
@@ -1391,8 +1414,8 @@ static bool is_config_schedulable(
bool swapped = false;
for (j = 0; j < s->pmo_dcn4.num_timing_groups - 1; j++) {
- double j_disallow_us = s->pmo_dcn4.group_common_fams2_meta[s->pmo_dcn4.sorted_group_gtl_disallow_index[j]].disallow_time_us;
- double jp1_disallow_us = s->pmo_dcn4.group_common_fams2_meta[s->pmo_dcn4.sorted_group_gtl_disallow_index[j + 1]].disallow_time_us;
+ double j_disallow_us = s->pmo_dcn4.group_common_pstate_meta[s->pmo_dcn4.sorted_group_gtl_disallow_index[j]].disallow_time_us;
+ double jp1_disallow_us = s->pmo_dcn4.group_common_pstate_meta[s->pmo_dcn4.sorted_group_gtl_disallow_index[j + 1]].disallow_time_us;
if (j_disallow_us < jp1_disallow_us) {
/* swap as A < B */
swap(s->pmo_dcn4.sorted_group_gtl_disallow_index[j],
@@ -1410,19 +1433,19 @@ static bool is_config_schedulable(
* other display, or when >2 streams continue to halve the remaining allow time.
*/
for (i = 0; i < s->pmo_dcn4.num_timing_groups; i++) {
- if (s->pmo_dcn4.group_common_fams2_meta[i].disallow_time_us <= 0.0) {
+ if (s->pmo_dcn4.group_common_pstate_meta[i].disallow_time_us <= 0.0) {
/* this timing group always allows */
continue;
}
- double max_allow_time_us = s->pmo_dcn4.group_common_fams2_meta[i].allow_time_us;
+ double max_allow_time_us = s->pmo_dcn4.group_common_pstate_meta[i].allow_time_us;
for (j = 0; j < s->pmo_dcn4.num_timing_groups; j++) {
unsigned int sorted_j = s->pmo_dcn4.sorted_group_gtl_disallow_index[j];
/* stream can't overlap itself */
- if (i != sorted_j && s->pmo_dcn4.group_common_fams2_meta[sorted_j].disallow_time_us > 0.0) {
+ if (i != sorted_j && s->pmo_dcn4.group_common_pstate_meta[sorted_j].disallow_time_us > 0.0) {
max_allow_time_us = math_min2(
- s->pmo_dcn4.group_common_fams2_meta[sorted_j].allow_time_us,
- (max_allow_time_us - s->pmo_dcn4.group_common_fams2_meta[sorted_j].disallow_time_us) / 2);
+ s->pmo_dcn4.group_common_pstate_meta[sorted_j].allow_time_us,
+ (max_allow_time_us - s->pmo_dcn4.group_common_pstate_meta[sorted_j].disallow_time_us) / 2);
if (max_allow_time_us < 0.0) {
/* failed exit early */
@@ -1450,8 +1473,8 @@ static bool is_config_schedulable(
bool swapped = false;
for (j = 0; j < s->pmo_dcn4.num_timing_groups - 1; j++) {
- double j_period_us = s->pmo_dcn4.group_common_fams2_meta[s->pmo_dcn4.sorted_group_gtl_period_index[j]].period_us;
- double jp1_period_us = s->pmo_dcn4.group_common_fams2_meta[s->pmo_dcn4.sorted_group_gtl_period_index[j + 1]].period_us;
+ double j_period_us = s->pmo_dcn4.group_common_pstate_meta[s->pmo_dcn4.sorted_group_gtl_period_index[j]].period_us;
+ double jp1_period_us = s->pmo_dcn4.group_common_pstate_meta[s->pmo_dcn4.sorted_group_gtl_period_index[j + 1]].period_us;
if (j_period_us < jp1_period_us) {
/* swap as A < B */
swap(s->pmo_dcn4.sorted_group_gtl_period_index[j],
@@ -1470,7 +1493,7 @@ static bool is_config_schedulable(
unsigned int sorted_i = s->pmo_dcn4.sorted_group_gtl_period_index[i];
unsigned int sorted_ip1 = s->pmo_dcn4.sorted_group_gtl_period_index[i + 1];
- if (s->pmo_dcn4.group_common_fams2_meta[sorted_i].allow_time_us < s->pmo_dcn4.group_common_fams2_meta[sorted_ip1].period_us ||
+ if (s->pmo_dcn4.group_common_pstate_meta[sorted_i].allow_time_us < s->pmo_dcn4.group_common_pstate_meta[sorted_ip1].period_us ||
(s->pmo_dcn4.group_is_drr_enabled[sorted_ip1] && s->pmo_dcn4.group_is_drr_active[sorted_ip1])) {
schedulable = false;
break;
@@ -1492,18 +1515,18 @@ static bool is_config_schedulable(
/* default period_0 > period_1 */
unsigned int lrg_idx = 0;
unsigned int sml_idx = 1;
- if (s->pmo_dcn4.group_common_fams2_meta[0].period_us < s->pmo_dcn4.group_common_fams2_meta[1].period_us) {
+ if (s->pmo_dcn4.group_common_pstate_meta[0].period_us < s->pmo_dcn4.group_common_pstate_meta[1].period_us) {
/* period_0 < period_1 */
lrg_idx = 1;
sml_idx = 0;
}
- period_ratio = s->pmo_dcn4.group_common_fams2_meta[lrg_idx].period_us / s->pmo_dcn4.group_common_fams2_meta[sml_idx].period_us;
- shift_per_period = s->pmo_dcn4.group_common_fams2_meta[sml_idx].period_us * (period_ratio - math_floor(period_ratio));
- max_shift_us = s->pmo_dcn4.group_common_fams2_meta[lrg_idx].disallow_time_us - s->pmo_dcn4.group_common_fams2_meta[sml_idx].allow_time_us;
- max_allow_delay_us = max_shift_us / shift_per_period * s->pmo_dcn4.group_common_fams2_meta[lrg_idx].period_us;
+ period_ratio = s->pmo_dcn4.group_common_pstate_meta[lrg_idx].period_us / s->pmo_dcn4.group_common_pstate_meta[sml_idx].period_us;
+ shift_per_period = s->pmo_dcn4.group_common_pstate_meta[sml_idx].period_us * (period_ratio - math_floor(period_ratio));
+ max_shift_us = s->pmo_dcn4.group_common_pstate_meta[lrg_idx].disallow_time_us - s->pmo_dcn4.group_common_pstate_meta[sml_idx].allow_time_us;
+ max_allow_delay_us = max_shift_us / shift_per_period * s->pmo_dcn4.group_common_pstate_meta[lrg_idx].period_us;
if (shift_per_period > 0.0 &&
- shift_per_period < s->pmo_dcn4.group_common_fams2_meta[lrg_idx].allow_time_us + s->pmo_dcn4.group_common_fams2_meta[sml_idx].allow_time_us &&
+ shift_per_period < s->pmo_dcn4.group_common_pstate_meta[lrg_idx].allow_time_us + s->pmo_dcn4.group_common_pstate_meta[sml_idx].allow_time_us &&
max_allow_delay_us < pmo->ip_caps->fams2.max_allow_delay_us) {
schedulable = true;
}
@@ -1646,22 +1669,22 @@ static int get_vactive_pstate_margin(const struct display_configuation_with_meta
return min_vactive_margin_us;
}
-static unsigned int get_vactive_det_fill_latency_delay_us(const struct display_configuation_with_meta *display_cfg, int plane_mask)
+static int get_vactive_det_fill_latency_delay_us(const struct display_configuation_with_meta *display_cfg, int plane_mask)
{
unsigned char i;
- unsigned int max_vactive_fill_us = 0;
+ int max_vactive_fill_us = 0;
for (i = 0; i < DML2_MAX_PLANES; i++) {
if (is_bit_set_in_bitfield(plane_mask, i)) {
- if (display_cfg->mode_support_result.cfg_support_info.plane_support_info[i].dram_change_vactive_det_fill_delay_us > max_vactive_fill_us)
- max_vactive_fill_us = display_cfg->mode_support_result.cfg_support_info.plane_support_info[i].dram_change_vactive_det_fill_delay_us;
+ if (display_cfg->mode_support_result.cfg_support_info.plane_support_info[i].vactive_det_fill_delay_us[dml2_pstate_type_uclk] > max_vactive_fill_us)
+ max_vactive_fill_us = display_cfg->mode_support_result.cfg_support_info.plane_support_info[i].vactive_det_fill_delay_us[dml2_pstate_type_uclk];
}
}
return max_vactive_fill_us;
}
-static void build_fams2_meta_per_stream(struct dml2_pmo_instance *pmo,
+static void build_pstate_meta_per_stream(struct dml2_pmo_instance *pmo,
struct display_configuation_with_meta *display_config,
int stream_index)
{
@@ -1669,7 +1692,7 @@ static void build_fams2_meta_per_stream(struct dml2_pmo_instance *pmo,
const struct dml2_stream_parameters *stream_descriptor = &display_config->display_config.stream_descriptors[stream_index];
const struct core_stream_support_info *stream_info = &display_config->mode_support_result.cfg_support_info.stream_support_info[stream_index];
const struct dml2_timing_cfg *timing = &stream_descriptor->timing;
- struct dml2_fams2_meta *stream_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_index];
+ struct dml2_pstate_meta *stream_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_index];
/* worst case all other streams require some programming at the same time, 0 if only 1 stream */
unsigned int contention_delay_us = (ip_caps->fams2.vertical_interrupt_ack_delay_us +
@@ -1677,142 +1700,142 @@ static void build_fams2_meta_per_stream(struct dml2_pmo_instance *pmo,
(display_config->display_config.num_streams - 1);
/* common */
- stream_fams2_meta->valid = true;
- stream_fams2_meta->otg_vline_time_us = (double)timing->h_total / timing->pixel_clock_khz * 1000.0;
- stream_fams2_meta->nom_vtotal = stream_descriptor->timing.vblank_nom + stream_descriptor->timing.v_active;
- stream_fams2_meta->nom_refresh_rate_hz = timing->pixel_clock_khz * 1000.0 /
- (stream_fams2_meta->nom_vtotal * timing->h_total);
- stream_fams2_meta->nom_frame_time_us =
- (double)stream_fams2_meta->nom_vtotal * stream_fams2_meta->otg_vline_time_us;
- stream_fams2_meta->vblank_start = timing->v_blank_end + timing->v_active;
+ stream_pstate_meta->valid = true;
+ stream_pstate_meta->otg_vline_time_us = (double)timing->h_total / timing->pixel_clock_khz * 1000.0;
+ stream_pstate_meta->nom_vtotal = stream_descriptor->timing.vblank_nom + stream_descriptor->timing.v_active;
+ stream_pstate_meta->nom_refresh_rate_hz = timing->pixel_clock_khz * 1000.0 /
+ (stream_pstate_meta->nom_vtotal * timing->h_total);
+ stream_pstate_meta->nom_frame_time_us =
+ (double)stream_pstate_meta->nom_vtotal * stream_pstate_meta->otg_vline_time_us;
+ stream_pstate_meta->vblank_start = timing->v_blank_end + timing->v_active;
if (stream_descriptor->timing.drr_config.enabled == true) {
if (stream_descriptor->timing.drr_config.min_refresh_uhz != 0.0) {
- stream_fams2_meta->max_vtotal = (unsigned int)math_floor((double)stream_descriptor->timing.pixel_clock_khz /
+ stream_pstate_meta->max_vtotal = (unsigned int)math_floor((double)stream_descriptor->timing.pixel_clock_khz /
((double)stream_descriptor->timing.drr_config.min_refresh_uhz * stream_descriptor->timing.h_total) * 1e9);
} else {
/* assume min of 48Hz */
- stream_fams2_meta->max_vtotal = (unsigned int)math_floor((double)stream_descriptor->timing.pixel_clock_khz /
+ stream_pstate_meta->max_vtotal = (unsigned int)math_floor((double)stream_descriptor->timing.pixel_clock_khz /
(48000000.0 * stream_descriptor->timing.h_total) * 1e9);
}
} else {
- stream_fams2_meta->max_vtotal = stream_fams2_meta->nom_vtotal;
- }
- stream_fams2_meta->min_refresh_rate_hz = timing->pixel_clock_khz * 1000.0 /
- (stream_fams2_meta->max_vtotal * timing->h_total);
- stream_fams2_meta->max_frame_time_us =
- (double)stream_fams2_meta->max_vtotal * stream_fams2_meta->otg_vline_time_us;
-
- stream_fams2_meta->scheduling_delay_otg_vlines =
- (unsigned int)math_ceil(ip_caps->fams2.scheduling_delay_us / stream_fams2_meta->otg_vline_time_us);
- stream_fams2_meta->vertical_interrupt_ack_delay_otg_vlines =
- (unsigned int)math_ceil(ip_caps->fams2.vertical_interrupt_ack_delay_us / stream_fams2_meta->otg_vline_time_us);
- stream_fams2_meta->contention_delay_otg_vlines =
- (unsigned int)math_ceil(contention_delay_us / stream_fams2_meta->otg_vline_time_us);
+ stream_pstate_meta->max_vtotal = stream_pstate_meta->nom_vtotal;
+ }
+ stream_pstate_meta->min_refresh_rate_hz = timing->pixel_clock_khz * 1000.0 /
+ (stream_pstate_meta->max_vtotal * timing->h_total);
+ stream_pstate_meta->max_frame_time_us =
+ (double)stream_pstate_meta->max_vtotal * stream_pstate_meta->otg_vline_time_us;
+
+ stream_pstate_meta->scheduling_delay_otg_vlines =
+ (unsigned int)math_ceil(ip_caps->fams2.scheduling_delay_us / stream_pstate_meta->otg_vline_time_us);
+ stream_pstate_meta->vertical_interrupt_ack_delay_otg_vlines =
+ (unsigned int)math_ceil(ip_caps->fams2.vertical_interrupt_ack_delay_us / stream_pstate_meta->otg_vline_time_us);
+ stream_pstate_meta->contention_delay_otg_vlines =
+ (unsigned int)math_ceil(contention_delay_us / stream_pstate_meta->otg_vline_time_us);
/* worst case allow to target needs to account for all streams' allow events overlapping, and 1 line for error */
- stream_fams2_meta->allow_to_target_delay_otg_vlines =
- (unsigned int)(math_ceil((ip_caps->fams2.vertical_interrupt_ack_delay_us + contention_delay_us + ip_caps->fams2.allow_programming_delay_us) / stream_fams2_meta->otg_vline_time_us)) + 1;
- stream_fams2_meta->min_allow_width_otg_vlines =
- (unsigned int)math_ceil(ip_caps->fams2.min_allow_width_us / stream_fams2_meta->otg_vline_time_us);
+ stream_pstate_meta->allow_to_target_delay_otg_vlines =
+ (unsigned int)(math_ceil((ip_caps->fams2.vertical_interrupt_ack_delay_us + contention_delay_us + ip_caps->fams2.allow_programming_delay_us) / stream_pstate_meta->otg_vline_time_us)) + 1;
+ stream_pstate_meta->min_allow_width_otg_vlines =
+ (unsigned int)math_ceil(ip_caps->fams2.min_allow_width_us / stream_pstate_meta->otg_vline_time_us);
/* this value should account for urgent latency */
- stream_fams2_meta->dram_clk_change_blackout_otg_vlines =
+ stream_pstate_meta->blackout_otg_vlines =
(unsigned int)math_ceil(pmo->soc_bb->power_management_parameters.dram_clk_change_blackout_us /
- stream_fams2_meta->otg_vline_time_us);
+ stream_pstate_meta->otg_vline_time_us);
/* scheduling params should be built based on the worst case for allow_time:disallow_time */
/* vactive */
if (display_config->display_config.num_streams == 1) {
/* for single stream, guarantee at least an instant of allow */
- stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines = (unsigned int)math_floor(
+ stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines = (unsigned int)math_floor(
math_max2(0.0,
- timing->v_active - math_max2(1.0, stream_fams2_meta->min_allow_width_otg_vlines) - stream_fams2_meta->dram_clk_change_blackout_otg_vlines));
+ timing->v_active - math_max2(1.0, stream_pstate_meta->min_allow_width_otg_vlines) - stream_pstate_meta->blackout_otg_vlines));
} else {
/* for multi stream, bound to a max fill time defined by IP caps */
- stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines =
- (unsigned int)math_floor((double)ip_caps->max_vactive_det_fill_delay_us / stream_fams2_meta->otg_vline_time_us);
+ stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines =
+ (unsigned int)math_floor((double)ip_caps->max_vactive_det_fill_delay_us / stream_pstate_meta->otg_vline_time_us);
}
- stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_us = stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines * stream_fams2_meta->otg_vline_time_us;
+ stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_us = stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines * stream_pstate_meta->otg_vline_time_us;
- if (stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_us > 0.0) {
- stream_fams2_meta->method_vactive.common.allow_start_otg_vline =
- timing->v_blank_end + stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines;
- stream_fams2_meta->method_vactive.common.allow_end_otg_vline =
- stream_fams2_meta->vblank_start -
- stream_fams2_meta->dram_clk_change_blackout_otg_vlines;
+ if (stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_us > 0.0) {
+ stream_pstate_meta->method_vactive.common.allow_start_otg_vline =
+ timing->v_blank_end + stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines;
+ stream_pstate_meta->method_vactive.common.allow_end_otg_vline =
+ stream_pstate_meta->vblank_start -
+ stream_pstate_meta->blackout_otg_vlines;
} else {
- stream_fams2_meta->method_vactive.common.allow_start_otg_vline = 0;
- stream_fams2_meta->method_vactive.common.allow_end_otg_vline = 0;
+ stream_pstate_meta->method_vactive.common.allow_start_otg_vline = 0;
+ stream_pstate_meta->method_vactive.common.allow_end_otg_vline = 0;
}
- stream_fams2_meta->method_vactive.common.period_us = stream_fams2_meta->nom_frame_time_us;
- build_method_scheduling_params(&stream_fams2_meta->method_vactive.common, stream_fams2_meta);
+ stream_pstate_meta->method_vactive.common.period_us = stream_pstate_meta->nom_frame_time_us;
+ build_method_scheduling_params(&stream_pstate_meta->method_vactive.common, stream_pstate_meta);
/* vblank */
- stream_fams2_meta->method_vblank.common.allow_start_otg_vline = stream_fams2_meta->vblank_start;
- stream_fams2_meta->method_vblank.common.allow_end_otg_vline =
- stream_fams2_meta->method_vblank.common.allow_start_otg_vline + 1;
- stream_fams2_meta->method_vblank.common.period_us = stream_fams2_meta->nom_frame_time_us;
- build_method_scheduling_params(&stream_fams2_meta->method_vblank.common, stream_fams2_meta);
+ stream_pstate_meta->method_vblank.common.allow_start_otg_vline = stream_pstate_meta->vblank_start;
+ stream_pstate_meta->method_vblank.common.allow_end_otg_vline =
+ stream_pstate_meta->method_vblank.common.allow_start_otg_vline + 1;
+ stream_pstate_meta->method_vblank.common.period_us = stream_pstate_meta->nom_frame_time_us;
+ build_method_scheduling_params(&stream_pstate_meta->method_vblank.common, stream_pstate_meta);
/* subvp */
- stream_fams2_meta->method_subvp.programming_delay_otg_vlines =
- (unsigned int)math_ceil(ip_caps->fams2.subvp_programming_delay_us / stream_fams2_meta->otg_vline_time_us);
- stream_fams2_meta->method_subvp.df_throttle_delay_otg_vlines =
- (unsigned int)math_ceil(ip_caps->fams2.subvp_df_throttle_delay_us / stream_fams2_meta->otg_vline_time_us);
- stream_fams2_meta->method_subvp.prefetch_to_mall_delay_otg_vlines =
- (unsigned int)math_ceil(ip_caps->fams2.subvp_prefetch_to_mall_delay_us / stream_fams2_meta->otg_vline_time_us);
- stream_fams2_meta->method_subvp.phantom_vactive =
- stream_fams2_meta->allow_to_target_delay_otg_vlines +
- stream_fams2_meta->min_allow_width_otg_vlines +
+ stream_pstate_meta->method_subvp.programming_delay_otg_vlines =
+ (unsigned int)math_ceil(ip_caps->fams2.subvp_programming_delay_us / stream_pstate_meta->otg_vline_time_us);
+ stream_pstate_meta->method_subvp.df_throttle_delay_otg_vlines =
+ (unsigned int)math_ceil(ip_caps->fams2.subvp_df_throttle_delay_us / stream_pstate_meta->otg_vline_time_us);
+ stream_pstate_meta->method_subvp.prefetch_to_mall_delay_otg_vlines =
+ (unsigned int)math_ceil(ip_caps->fams2.subvp_prefetch_to_mall_delay_us / stream_pstate_meta->otg_vline_time_us);
+ stream_pstate_meta->method_subvp.phantom_vactive =
+ stream_pstate_meta->allow_to_target_delay_otg_vlines +
+ stream_pstate_meta->min_allow_width_otg_vlines +
stream_info->phantom_min_v_active;
- stream_fams2_meta->method_subvp.phantom_vfp =
- stream_fams2_meta->method_subvp.df_throttle_delay_otg_vlines;
+ stream_pstate_meta->method_subvp.phantom_vfp =
+ stream_pstate_meta->method_subvp.df_throttle_delay_otg_vlines;
/* phantom vtotal = v_bp(vstartup) + v_sync(1) + v_fp(throttle_delay) + v_active(allow_to_target + min_allow + min_vactive)*/
- stream_fams2_meta->method_subvp.phantom_vtotal =
+ stream_pstate_meta->method_subvp.phantom_vtotal =
stream_info->phantom_v_startup +
- stream_fams2_meta->method_subvp.phantom_vfp +
+ stream_pstate_meta->method_subvp.phantom_vfp +
1 +
- stream_fams2_meta->method_subvp.df_throttle_delay_otg_vlines +
- stream_fams2_meta->method_subvp.phantom_vactive;
- stream_fams2_meta->method_subvp.common.allow_start_otg_vline =
+ stream_pstate_meta->method_subvp.df_throttle_delay_otg_vlines +
+ stream_pstate_meta->method_subvp.phantom_vactive;
+ stream_pstate_meta->method_subvp.common.allow_start_otg_vline =
stream_descriptor->timing.v_blank_end +
- stream_fams2_meta->contention_delay_otg_vlines +
- stream_fams2_meta->method_subvp.programming_delay_otg_vlines +
- stream_fams2_meta->method_subvp.phantom_vtotal +
- stream_fams2_meta->method_subvp.prefetch_to_mall_delay_otg_vlines +
- stream_fams2_meta->allow_to_target_delay_otg_vlines;
- stream_fams2_meta->method_subvp.common.allow_end_otg_vline =
- stream_fams2_meta->vblank_start -
- stream_fams2_meta->dram_clk_change_blackout_otg_vlines;
- stream_fams2_meta->method_subvp.common.period_us = stream_fams2_meta->nom_frame_time_us;
- build_method_scheduling_params(&stream_fams2_meta->method_subvp.common, stream_fams2_meta);
+ stream_pstate_meta->contention_delay_otg_vlines +
+ stream_pstate_meta->method_subvp.programming_delay_otg_vlines +
+ stream_pstate_meta->method_subvp.phantom_vtotal +
+ stream_pstate_meta->method_subvp.prefetch_to_mall_delay_otg_vlines +
+ stream_pstate_meta->allow_to_target_delay_otg_vlines;
+ stream_pstate_meta->method_subvp.common.allow_end_otg_vline =
+ stream_pstate_meta->vblank_start -
+ stream_pstate_meta->blackout_otg_vlines;
+ stream_pstate_meta->method_subvp.common.period_us = stream_pstate_meta->nom_frame_time_us;
+ build_method_scheduling_params(&stream_pstate_meta->method_subvp.common, stream_pstate_meta);
/* drr */
- stream_fams2_meta->method_drr.programming_delay_otg_vlines =
- (unsigned int)math_ceil(ip_caps->fams2.drr_programming_delay_us / stream_fams2_meta->otg_vline_time_us);
- stream_fams2_meta->method_drr.common.allow_start_otg_vline =
- stream_fams2_meta->vblank_start +
- stream_fams2_meta->allow_to_target_delay_otg_vlines;
- stream_fams2_meta->method_drr.common.period_us = stream_fams2_meta->nom_frame_time_us;
+ stream_pstate_meta->method_drr.programming_delay_otg_vlines =
+ (unsigned int)math_ceil(ip_caps->fams2.drr_programming_delay_us / stream_pstate_meta->otg_vline_time_us);
+ stream_pstate_meta->method_drr.common.allow_start_otg_vline =
+ stream_pstate_meta->vblank_start +
+ stream_pstate_meta->allow_to_target_delay_otg_vlines;
+ stream_pstate_meta->method_drr.common.period_us = stream_pstate_meta->nom_frame_time_us;
if (display_config->display_config.num_streams <= 1) {
/* only need to stretch vblank for blackout time */
- stream_fams2_meta->method_drr.stretched_vtotal =
- stream_fams2_meta->nom_vtotal +
- stream_fams2_meta->allow_to_target_delay_otg_vlines +
- stream_fams2_meta->min_allow_width_otg_vlines +
- stream_fams2_meta->dram_clk_change_blackout_otg_vlines;
+ stream_pstate_meta->method_drr.stretched_vtotal =
+ stream_pstate_meta->nom_vtotal +
+ stream_pstate_meta->allow_to_target_delay_otg_vlines +
+ stream_pstate_meta->min_allow_width_otg_vlines +
+ stream_pstate_meta->blackout_otg_vlines;
} else {
/* multi display needs to always be schedulable */
- stream_fams2_meta->method_drr.stretched_vtotal =
- stream_fams2_meta->nom_vtotal * 2 +
- stream_fams2_meta->allow_to_target_delay_otg_vlines +
- stream_fams2_meta->min_allow_width_otg_vlines +
- stream_fams2_meta->dram_clk_change_blackout_otg_vlines;
- }
- stream_fams2_meta->method_drr.common.allow_end_otg_vline =
- stream_fams2_meta->method_drr.stretched_vtotal -
- stream_fams2_meta->dram_clk_change_blackout_otg_vlines;
- build_method_scheduling_params(&stream_fams2_meta->method_drr.common, stream_fams2_meta);
+ stream_pstate_meta->method_drr.stretched_vtotal =
+ stream_pstate_meta->nom_vtotal * 2 +
+ stream_pstate_meta->allow_to_target_delay_otg_vlines +
+ stream_pstate_meta->min_allow_width_otg_vlines +
+ stream_pstate_meta->blackout_otg_vlines;
+ }
+ stream_pstate_meta->method_drr.common.allow_end_otg_vline =
+ stream_pstate_meta->method_drr.stretched_vtotal -
+ stream_pstate_meta->blackout_otg_vlines;
+ build_method_scheduling_params(&stream_pstate_meta->method_drr.common, stream_pstate_meta);
}
static void build_subvp_meta_per_stream(struct dml2_pmo_instance *pmo,
@@ -1820,14 +1843,14 @@ static void build_subvp_meta_per_stream(struct dml2_pmo_instance *pmo,
int stream_index)
{
struct dml2_implicit_svp_meta *stream_svp_meta = &pmo->scratch.pmo_dcn4.stream_svp_meta[stream_index];
- struct dml2_fams2_meta *stream_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_index];
+ struct dml2_pstate_meta *stream_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_index];
stream_svp_meta->valid = true;
/* PMO FAMS2 precaulcates these values */
- stream_svp_meta->v_active = stream_fams2_meta->method_subvp.phantom_vactive;
- stream_svp_meta->v_front_porch = stream_fams2_meta->method_subvp.phantom_vfp;
- stream_svp_meta->v_total = stream_fams2_meta->method_subvp.phantom_vtotal;
+ stream_svp_meta->v_active = stream_pstate_meta->method_subvp.phantom_vactive;
+ stream_svp_meta->v_front_porch = stream_pstate_meta->method_subvp.phantom_vfp;
+ stream_svp_meta->v_total = stream_pstate_meta->method_subvp.phantom_vtotal;
}
bool pmo_dcn4_fams2_init_for_pstate_support(struct dml2_pmo_init_for_pstate_support_in_out *in_out)
@@ -1879,7 +1902,7 @@ bool pmo_dcn4_fams2_init_for_pstate_support(struct dml2_pmo_init_for_pstate_supp
set_bit_in_bitfield(&s->pmo_dcn4.stream_vactive_capability_mask, stream_index);
/* FAMS2 meta */
- build_fams2_meta_per_stream(pmo, display_config, stream_index);
+ build_pstate_meta_per_stream(pmo, display_config, stream_index);
/* SVP meta */
build_subvp_meta_per_stream(pmo, display_config, stream_index);
@@ -1939,9 +1962,6 @@ static void reset_display_configuration(struct display_configuation_with_meta *d
for (stream_index = 0; stream_index < display_config->display_config.num_streams; stream_index++) {
display_config->stage3.stream_svp_meta[stream_index].valid = false;
-
- display_config->display_config.stream_descriptors[stream_index].overrides.minimize_active_latency_hiding = false;
- display_config->display_config.overrides.best_effort_min_active_latency_hiding_us = 0;
}
for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
@@ -1974,7 +1994,6 @@ static void setup_planes_for_drr_by_mask(struct display_configuation_with_meta *
plane->overrides.uclk_pstate_change_strategy = dml2_uclk_pstate_change_strategy_force_drr;
display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_fw_drr;
-
}
}
}
@@ -2040,7 +2059,6 @@ static void setup_planes_for_vblank_by_mask(struct display_configuation_with_met
plane->overrides.reserved_vblank_time_ns);
display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_vblank;
-
}
}
}
@@ -2055,6 +2073,7 @@ static void setup_planes_for_vblank_drr_by_mask(struct display_configuation_with
for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
if (is_bit_set_in_bitfield(plane_mask, plane_index)) {
plane = &display_config->display_config.plane_descriptors[plane_index];
+
plane->overrides.reserved_vblank_time_ns = (long)(pmo->soc_bb->power_management_parameters.dram_clk_change_blackout_us * 1000);
display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_fw_vblank_drr;
@@ -2076,8 +2095,8 @@ static void setup_planes_for_vactive_by_mask(struct display_configuation_with_me
display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_vactive;
if (!pmo->options->disable_vactive_det_fill_bw_pad) {
- display_config->display_config.plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us =
- (unsigned int)math_floor(pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_index].method_vactive.max_vactive_det_fill_delay_us);
+ display_config->display_config.plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us[dml2_pstate_type_uclk] =
+ (unsigned int)math_floor(pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_index].method_vactive.max_vactive_det_fill_delay_us);
}
}
}
@@ -2097,8 +2116,8 @@ static void setup_planes_for_vactive_drr_by_mask(struct display_configuation_wit
display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_fw_vactive_drr;
if (!pmo->options->disable_vactive_det_fill_bw_pad) {
- display_config->display_config.plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us =
- (unsigned int)math_floor(pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_index].method_vactive.max_vactive_det_fill_delay_us);
+ display_config->display_config.plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us[dml2_pstate_type_uclk] =
+ (unsigned int)math_floor(pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_index].method_vactive.max_vactive_det_fill_delay_us);
}
}
}
@@ -2144,9 +2163,9 @@ static bool setup_display_config(struct display_configuation_with_meta *display_
/* copy FAMS2 meta */
if (success) {
display_config->stage3.fams2_required = fams2_required;
- memcpy(&display_config->stage3.stream_fams2_meta,
- &scratch->pmo_dcn4.stream_fams2_meta,
- sizeof(struct dml2_fams2_meta) * DML2_MAX_PLANES);
+ memcpy(&display_config->stage3.stream_pstate_meta,
+ &scratch->pmo_dcn4.stream_pstate_meta,
+ sizeof(struct dml2_pstate_meta) * DML2_MAX_PLANES);
}
return success;
@@ -2188,12 +2207,12 @@ bool pmo_dcn4_fams2_test_for_pstate_support(struct dml2_pmo_test_for_pstate_supp
return false;
for (stream_index = 0; stream_index < in_out->base_display_config->display_config.num_streams; stream_index++) {
- struct dml2_fams2_meta *stream_fams2_meta = &s->pmo_dcn4.stream_fams2_meta[stream_index];
+ struct dml2_pstate_meta *stream_pstate_meta = &s->pmo_dcn4.stream_pstate_meta[stream_index];
if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_vactive ||
s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_vactive_drr) {
if (get_vactive_pstate_margin(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) < (MIN_VACTIVE_MARGIN_PCT * in_out->instance->soc_bb->power_management_parameters.dram_clk_change_blackout_us) ||
- get_vactive_det_fill_latency_delay_us(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) > stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_us) {
+ get_vactive_det_fill_latency_delay_us(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) > stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_us) {
p_state_supported = false;
break;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h
index 6baab7ad6ecc..6baab7ad6ecc 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.c
index 7ed0242a4b33..55d2464365d0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.c
@@ -26,7 +26,7 @@ bool dml2_pmo_create(enum dml2_project_id project_id, struct dml2_pmo_instance *
{
bool result = false;
- if (out == 0)
+ if (!out)
return false;
memset(out, 0, sizeof(struct dml2_pmo_instance));
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.h
index 7218de1824cc..b90f6263cd85 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.h
@@ -10,4 +10,4 @@
bool dml2_pmo_create(enum dml2_project_id project_id, struct dml2_pmo_instance *out);
-#endif
+#endif \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_standalone_libraries/lib_float_math.c
index e17b5ceba447..e17b5ceba447 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_standalone_libraries/lib_float_math.c
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_standalone_libraries/lib_float_math.h
index e13b0c5939b0..e13b0c5939b0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_standalone_libraries/lib_float_math.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_interfaces.c
index 5a33e2f357f4..5a33e2f357f4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_interfaces.c
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_legacy.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_legacy.c
index 5e14d85821e2..5e14d85821e2 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_legacy.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_legacy.c
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_legacy.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_legacy.h
index 14d0ae03dce6..14d0ae03dce6 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_legacy.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_legacy.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_soc15.c
index 4a7c4c62111e..4a7c4c62111e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_soc15.c
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_soc15.h
index 53bd8602f9ef..53bd8602f9ef 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_soc15.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/inc/dml2_debug.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/inc/dml2_debug.h
new file mode 100644
index 000000000000..611c80f4f1bf
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/inc/dml2_debug.h
@@ -0,0 +1,189 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2024 Advanced Micro Devices, Inc.
+
+#ifndef __DML2_DEBUG_H__
+#define __DML2_DEBUG_H__
+
+#include "os_types.h"
+#define DML_ASSERT(condition) ASSERT(condition)
+#define DML_LOG_LEVEL_DEFAULT DML_LOG_LEVEL_WARN
+#define DML_LOG_INTERNAL(fmt, ...) dm_output_to_console(fmt, ## __VA_ARGS__)
+
+/* private helper macros */
+#define _BOOL_FORMAT(field) "%s", field ? "true" : "false"
+#define _UINT_FORMAT(field) "%u", field
+#define _INT_FORMAT(field) "%d", field
+#define _DOUBLE_FORMAT(field) "%lf", field
+#define _ELEMENT_FUNC "function"
+#define _ELEMENT_COMP_IF "component_interface"
+#define _ELEMENT_TOP_IF "top_interface"
+#define _LOG_ENTRY(element) do { \
+ DML_LOG_INTERNAL("<"element" name=\""); \
+ DML_LOG_INTERNAL(__func__); \
+ DML_LOG_INTERNAL("\">\n"); \
+} while (0)
+#define _LOG_EXIT(element) DML_LOG_INTERNAL("</"element">\n")
+#define _LOG_SCALAR(field, format) do { \
+ DML_LOG_INTERNAL(#field" = "format(field)); \
+ DML_LOG_INTERNAL("\n"); \
+} while (0)
+#define _LOG_ARRAY(field, size, format) do { \
+ DML_LOG_INTERNAL(#field " = ["); \
+ for (int _i = 0; _i < (int) size; _i++) { \
+ DML_LOG_INTERNAL(format(field[_i])); \
+ if (_i + 1 == (int) size) \
+ DML_LOG_INTERNAL("]\n"); \
+ else \
+ DML_LOG_INTERNAL(", "); \
+}} while (0)
+#define _LOG_2D_ARRAY(field, size0, size1, format) do { \
+ DML_LOG_INTERNAL(#field" = ["); \
+ for (int _i = 0; _i < (int) size0; _i++) { \
+ DML_LOG_INTERNAL("\n\t["); \
+ for (int _j = 0; _j < (int) size1; _j++) { \
+ DML_LOG_INTERNAL(format(field[_i][_j])); \
+ if (_j + 1 == (int) size1) \
+ DML_LOG_INTERNAL("]"); \
+ else \
+ DML_LOG_INTERNAL(", "); \
+ } \
+ if (_i + 1 == (int) size0) \
+ DML_LOG_INTERNAL("]\n"); \
+ else \
+ DML_LOG_INTERNAL(", "); \
+ } \
+} while (0)
+#define _LOG_3D_ARRAY(field, size0, size1, size2, format) do { \
+ DML_LOG_INTERNAL(#field" = ["); \
+ for (int _i = 0; _i < (int) size0; _i++) { \
+ DML_LOG_INTERNAL("\n\t["); \
+ for (int _j = 0; _j < (int) size1; _j++) { \
+ DML_LOG_INTERNAL("["); \
+ for (int _k = 0; _k < (int) size2; _k++) { \
+ DML_LOG_INTERNAL(format(field[_i][_j][_k])); \
+ if (_k + 1 == (int) size2) \
+ DML_LOG_INTERNAL("]"); \
+ else \
+ DML_LOG_INTERNAL(", "); \
+ } \
+ if (_j + 1 == (int) size1) \
+ DML_LOG_INTERNAL("]"); \
+ else \
+ DML_LOG_INTERNAL(", "); \
+ } \
+ if (_i + 1 == (int) size0) \
+ DML_LOG_INTERNAL("]\n"); \
+ else \
+ DML_LOG_INTERNAL(", "); \
+ } \
+} while (0)
+
+/* fatal errors for unrecoverable DML states until a full reset */
+#define DML_LOG_LEVEL_FATAL 0
+/* unexpected but recoverable failures inside DML */
+#define DML_LOG_LEVEL_ERROR 1
+/* unexpected inputs or events to DML */
+#define DML_LOG_LEVEL_WARN 2
+/* high level tracing of DML interfaces */
+#define DML_LOG_LEVEL_INFO 3
+/* tracing of DML internal executions */
+#define DML_LOG_LEVEL_DEBUG 4
+/* detailed tracing of DML calculation procedure */
+#define DML_LOG_LEVEL_VERBOSE 5
+
+#ifndef DML_LOG_LEVEL
+#define DML_LOG_LEVEL DML_LOG_LEVEL_DEFAULT
+#endif /* #ifndef DML_LOG_LEVEL */
+
+/* public macros for DML_LOG_LEVEL_FATAL and up */
+#define DML_LOG_FATAL(fmt, ...) DML_LOG_INTERNAL("[DML FATAL] " fmt, ## __VA_ARGS__)
+
+/* public macros for DML_LOG_LEVEL_ERROR and up */
+#if DML_LOG_LEVEL >= DML_LOG_LEVEL_ERROR
+#define DML_LOG_ERROR(fmt, ...) DML_LOG_INTERNAL("[DML ERROR] "fmt, ## __VA_ARGS__)
+#define DML_ASSERT_MSG(condition, fmt, ...) \
+ do { \
+ if (!(condition)) { \
+ DML_LOG_ERROR("ASSERT hit in %s line %d\n", __func__, __LINE__); \
+ DML_LOG_ERROR(fmt, ## __VA_ARGS__); \
+ DML_ASSERT(condition); \
+ } \
+ } while (0)
+#else
+#define DML_LOG_ERROR(fmt, ...) ((void)0)
+#define DML_ASSERT_MSG(condition, fmt, ...) ((void)0)
+#endif
+
+/* public macros for DML_LOG_LEVEL_WARN and up */
+#if DML_LOG_LEVEL >= DML_LOG_LEVEL_WARN
+#define DML_LOG_WARN(fmt, ...) DML_LOG_INTERNAL("[DML WARN] "fmt, ## __VA_ARGS__)
+#else
+#define DML_LOG_WARN(fmt, ...) ((void)0)
+#endif
+
+/* public macros for DML_LOG_LEVEL_INFO and up */
+#if DML_LOG_LEVEL >= DML_LOG_LEVEL_INFO
+#define DML_LOG_INFO(fmt, ...) DML_LOG_INTERNAL("[DML INFO] "fmt, ## __VA_ARGS__)
+#define DML_LOG_TOP_IF_ENTER() _LOG_ENTRY(_ELEMENT_TOP_IF)
+#define DML_LOG_TOP_IF_EXIT() _LOG_EXIT(_ELEMENT_TOP_IF)
+#else
+#define DML_LOG_INFO(fmt, ...) ((void)0)
+#define DML_LOG_TOP_IF_ENTER() ((void)0)
+#define DML_LOG_TOP_IF_EXIT() ((void)0)
+#endif
+
+/* public macros for DML_LOG_LEVEL_DEBUG and up */
+#if DML_LOG_LEVEL >= DML_LOG_LEVEL_DEBUG
+#define DML_LOG_DEBUG(fmt, ...) DML_LOG_INTERNAL(fmt, ## __VA_ARGS__)
+#define DML_LOG_COMP_IF_ENTER() _LOG_ENTRY(_ELEMENT_COMP_IF)
+#define DML_LOG_COMP_IF_EXIT() _LOG_EXIT(_ELEMENT_COMP_IF)
+#define DML_LOG_FUNC_ENTER() _LOG_ENTRY(_ELEMENT_FUNC)
+#define DML_LOG_FUNC_EXIT() _LOG_EXIT(_ELEMENT_FUNC)
+#define DML_LOG_DEBUG_BOOL(field) _LOG_SCALAR(field, _BOOL_FORMAT)
+#define DML_LOG_DEBUG_UINT(field) _LOG_SCALAR(field, _UINT_FORMAT)
+#define DML_LOG_DEBUG_INT(field) _LOG_SCALAR(field, _INT_FORMAT)
+#define DML_LOG_DEBUG_DOUBLE(field) _LOG_SCALAR(field, _DOUBLE_FORMAT)
+#define DML_LOG_DEBUG_ARRAY_BOOL(field, size) _LOG_ARRAY(field, size, _BOOL_FORMAT)
+#define DML_LOG_DEBUG_ARRAY_UINT(field, size) _LOG_ARRAY(field, size, _UINT_FORMAT)
+#define DML_LOG_DEBUG_ARRAY_INT(field, size) _LOG_ARRAY(field, size, _INT_FORMAT)
+#define DML_LOG_DEBUG_ARRAY_DOUBLE(field, size) _LOG_ARRAY(field, size, _DOUBLE_FORMAT)
+#define DML_LOG_DEBUG_2D_ARRAY_BOOL(field, size0, size1) _LOG_2D_ARRAY(field, size0, size1, _BOOL_FORMAT)
+#define DML_LOG_DEBUG_2D_ARRAY_UINT(field, size0, size1) _LOG_2D_ARRAY(field, size0, size1, _UINT_FORMAT)
+#define DML_LOG_DEBUG_2D_ARRAY_INT(field, size0, size1) _LOG_2D_ARRAY(field, size0, size1, _INT_FORMAT)
+#define DML_LOG_DEBUG_2D_ARRAY_DOUBLE(field, size0, size1) _LOG_2D_ARRAY(field, size0, size1, _DOUBLE_FORMAT)
+#define DML_LOG_DEBUG_3D_ARRAY_BOOL(field, size0, size1, size2) _LOG_3D_ARRAY(field, size0, size1, size2, _BOOL_FORMAT)
+#define DML_LOG_DEBUG_3D_ARRAY_UINT(field, size0, size1, size2) _LOG_3D_ARRAY(field, size0, size1, size2, _UINT_FORMAT)
+#define DML_LOG_DEBUG_3D_ARRAY_INT(field, size0, size1, size2) _LOG_3D_ARRAY(field, size0, size1, size2, _INT_FORMAT)
+#define DML_LOG_DEBUG_3D_ARRAY_DOUBLE(field, size0, size1, size2) _LOG_3D_ARRAY(field, size0, size1, size2, _DOUBLE_FORMAT)
+#else
+#define DML_LOG_DEBUG(fmt, ...) ((void)0)
+#define DML_LOG_COMP_IF_ENTER() ((void)0)
+#define DML_LOG_COMP_IF_EXIT() ((void)0)
+#define DML_LOG_FUNC_ENTER() ((void)0)
+#define DML_LOG_FUNC_EXIT() ((void)0)
+#define DML_LOG_DEBUG_BOOL(field) ((void)0)
+#define DML_LOG_DEBUG_UINT(field) ((void)0)
+#define DML_LOG_DEBUG_INT(field) ((void)0)
+#define DML_LOG_DEBUG_DOUBLE(field) ((void)0)
+#define DML_LOG_DEBUG_ARRAY_BOOL(field, size) ((void)0)
+#define DML_LOG_DEBUG_ARRAY_UINT(field, size) ((void)0)
+#define DML_LOG_DEBUG_ARRAY_INT(field, size) ((void)0)
+#define DML_LOG_DEBUG_ARRAY_DOUBLE(field, size) ((void)0)
+#define DML_LOG_DEBUG_2D_ARRAY_BOOL(field, size0, size1) ((void)0)
+#define DML_LOG_DEBUG_2D_ARRAY_UINT(field, size0, size1) ((void)0)
+#define DML_LOG_DEBUG_2D_ARRAY_INT(field, size0, size1) ((void)0)
+#define DML_LOG_DEBUG_2D_ARRAY_DOUBLE(field, size0, size1) ((void)0)
+#define DML_LOG_DEBUG_3D_ARRAY_BOOL(field, size0, size1, size2) ((void)0)
+#define DML_LOG_DEBUG_3D_ARRAY_UINT(field, size0, size1, size2) ((void)0)
+#define DML_LOG_DEBUG_3D_ARRAY_INT(field, size0, size1, size2) ((void)0)
+#define DML_LOG_DEBUG_3D_ARRAY_DOUBLE(field, size0, size1, size2) ((void)0)
+#endif
+
+/* public macros for DML_LOG_LEVEL_VERBOSE */
+#if DML_LOG_LEVEL >= DML_LOG_LEVEL_VERBOSE
+#define DML_LOG_VERBOSE(fmt, ...) DML_LOG_INTERNAL(fmt, ## __VA_ARGS__)
+#else
+#define DML_LOG_VERBOSE(fmt, ...) ((void)0)
+#endif /* #if DML_LOG_LEVEL >= DML_LOG_LEVEL_VERBOSE */
+#endif /* __DML2_DEBUG_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/inc/dml2_internal_shared_types.h
index 00688b9f1df4..1a6c0727cd2a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/inc/dml2_internal_shared_types.h
@@ -152,7 +152,7 @@ struct core_plane_support_info {
int active_latency_hiding_us;
int mall_svp_size_requirement_ways;
int nominal_vblank_pstate_latency_hiding_us;
- unsigned int dram_change_vactive_det_fill_delay_us;
+ int vactive_det_fill_delay_us[dml2_pstate_type_count];
};
struct core_stream_support_info {
@@ -202,11 +202,14 @@ struct dml2_core_mode_support_result {
} active;
unsigned int dispclk_khz;
+ unsigned int dpprefclk_khz;
+ unsigned int dtbrefclk_khz;
unsigned int dcfclk_deepsleep_khz;
unsigned int socclk_khz;
unsigned int uclk_pstate_supported;
unsigned int fclk_pstate_supported;
+ struct dml2_core_internal_watermarks watermarks;
} global;
struct {
@@ -253,56 +256,70 @@ struct dml2_implicit_svp_meta {
unsigned long v_front_porch;
};
-struct dml2_fams2_per_method_common_meta {
+struct dml2_pstate_per_method_common_meta {
/* generic params */
- unsigned int allow_start_otg_vline;
- unsigned int allow_end_otg_vline;
+ int allow_start_otg_vline;
+ int allow_end_otg_vline;
/* scheduling params */
double allow_time_us;
double disallow_time_us;
double period_us;
};
-struct dml2_fams2_meta {
+struct dml2_pstate_meta {
bool valid;
double otg_vline_time_us;
- unsigned int scheduling_delay_otg_vlines;
- unsigned int vertical_interrupt_ack_delay_otg_vlines;
- unsigned int allow_to_target_delay_otg_vlines;
- unsigned int contention_delay_otg_vlines;
- unsigned int min_allow_width_otg_vlines;
- unsigned int nom_vtotal;
- unsigned int vblank_start;
+ int scheduling_delay_otg_vlines;
+ int vertical_interrupt_ack_delay_otg_vlines;
+ int allow_to_target_delay_otg_vlines;
+ int contention_delay_otg_vlines;
+ int min_allow_width_otg_vlines;
+ int nom_vtotal;
+ int vblank_start;
double nom_refresh_rate_hz;
double nom_frame_time_us;
- unsigned int max_vtotal;
+ int max_vtotal;
double min_refresh_rate_hz;
double max_frame_time_us;
- unsigned int dram_clk_change_blackout_otg_vlines;
+ int blackout_otg_vlines;
+ int max_allow_delay_otg_vlines;
+ double nom_vblank_time_us;
struct {
double max_vactive_det_fill_delay_us;
- unsigned int max_vactive_det_fill_delay_otg_vlines;
- struct dml2_fams2_per_method_common_meta common;
+ double vactive_latency_hiding_us;
+ double reserved_vblank_required_us;
+ int max_vactive_det_fill_delay_otg_vlines;
+ int reserved_blank_required_vlines;
+ struct dml2_pstate_per_method_common_meta common;
} method_vactive;
struct {
- struct dml2_fams2_per_method_common_meta common;
+ struct dml2_pstate_per_method_common_meta common;
} method_vblank;
struct {
- unsigned int programming_delay_otg_vlines;
- unsigned int df_throttle_delay_otg_vlines;
- unsigned int prefetch_to_mall_delay_otg_vlines;
+ int programming_delay_otg_vlines;
+ int df_throttle_delay_otg_vlines;
+ int prefetch_to_mall_delay_otg_vlines;
unsigned long phantom_vactive;
unsigned long phantom_vfp;
unsigned long phantom_vtotal;
- struct dml2_fams2_per_method_common_meta common;
+ struct dml2_pstate_per_method_common_meta common;
} method_subvp;
struct {
- unsigned int programming_delay_otg_vlines;
- unsigned int stretched_vtotal;
- struct dml2_fams2_per_method_common_meta common;
+ int programming_delay_otg_vlines;
+ int stretched_vtotal;
+ struct dml2_pstate_per_method_common_meta common;
} method_drr;
};
+/* mask of synchronized timings by stream index */
+struct dml2_pmo_synchronized_timing_groups {
+ unsigned int num_timing_groups;
+ unsigned int synchronized_timing_group_masks[DML2_MAX_PLANES];
+ bool group_is_drr_enabled[DML2_MAX_PLANES];
+ bool group_is_drr_active[DML2_MAX_PLANES];
+ double group_line_time_us[DML2_MAX_PLANES];
+};
+
struct dml2_optimization_stage3_state {
bool performed;
bool success;
@@ -317,7 +334,7 @@ struct dml2_optimization_stage3_state {
// Meta-data for FAMS2
bool fams2_required;
- struct dml2_fams2_meta stream_fams2_meta[DML2_MAX_PLANES];
+ struct dml2_pstate_meta stream_pstate_meta[DML2_MAX_PLANES];
int min_clk_index_for_latency;
};
@@ -446,13 +463,17 @@ struct dml2_core_internal_state_intermediates {
};
struct dml2_core_mode_support_locals {
- struct dml2_core_calcs_mode_support_ex mode_support_ex_params;
+ union {
+ struct dml2_core_calcs_mode_support_ex mode_support_ex_params;
+ };
struct dml2_display_cfg svp_expanded_display_cfg;
struct dml2_calculate_mcache_allocation_in_out calc_mcache_allocation_params;
};
struct dml2_core_mode_programming_locals {
- struct dml2_core_calcs_mode_programming_ex mode_programming_ex_params;
+ union {
+ struct dml2_core_calcs_mode_programming_ex mode_programming_ex_params;
+ };
struct dml2_display_cfg svp_expanded_display_cfg;
};
@@ -466,6 +487,7 @@ struct dml2_core_scratch {
};
struct dml2_core_instance {
+ enum dml2_project_id project_id;
struct dml2_mcg_min_clock_table *minimum_clock_table;
struct dml2_core_internal_state_inputs inputs;
struct dml2_core_internal_state_intermediates intermediates;
@@ -613,6 +635,12 @@ struct dml2_pmo_optimize_for_stutter_in_out {
#define PMO_DCN4_MAX_NUM_VARIANTS 2
#define PMO_DCN4_MAX_BASE_STRATEGIES 10
+struct dml2_scheduling_check_locals {
+ struct dml2_pstate_per_method_common_meta group_common_pstate_meta[DML2_MAX_PLANES];
+ unsigned int sorted_group_gtl_disallow_index[DML2_MAX_PLANES];
+ unsigned int sorted_group_gtl_period_index[DML2_MAX_PLANES];
+};
+
struct dml2_pmo_scratch {
union {
struct {
@@ -642,7 +670,7 @@ struct dml2_pmo_scratch {
// Stores all the implicit SVP meta information indexed by stream index of the display
// configuration under inspection, built at optimization stage init
struct dml2_implicit_svp_meta stream_svp_meta[DML2_MAX_PLANES];
- struct dml2_fams2_meta stream_fams2_meta[DML2_MAX_PLANES];
+ struct dml2_pstate_meta stream_pstate_meta[DML2_MAX_PLANES];
unsigned int optimal_vblank_reserved_time_for_stutter_us[DML2_PMO_STUTTER_CANDIDATE_LIST_SIZE];
unsigned int num_stutter_candidates;
@@ -657,7 +685,7 @@ struct dml2_pmo_scratch {
double group_line_time_us[DML2_MAX_PLANES];
/* scheduling check locals */
- struct dml2_fams2_per_method_common_meta group_common_fams2_meta[DML2_MAX_PLANES];
+ struct dml2_pstate_per_method_common_meta group_common_pstate_meta[DML2_MAX_PLANES];
unsigned int sorted_group_gtl_disallow_index[DML2_MAX_PLANES];
unsigned int sorted_group_gtl_period_index[DML2_MAX_PLANES];
double group_phase_offset[DML2_MAX_PLANES];
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.c
index 5f1b49a50049..4cfe64aa8492 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.c
@@ -473,7 +473,6 @@ static void sort_pipes_for_splitting(struct dc_plane_pipe_pool *pipes)
{
bool sorted, swapped;
unsigned int cur_index;
- unsigned int temp;
int odm_slice_index;
for (odm_slice_index = 0; odm_slice_index < pipes->num_pipes_assigned_to_plane_for_odm_combine; odm_slice_index++) {
@@ -489,9 +488,8 @@ static void sort_pipes_for_splitting(struct dc_plane_pipe_pool *pipes)
swapped = false;
while (!sorted) {
if (pipes->pipes_assigned_to_plane[odm_slice_index][cur_index] > pipes->pipes_assigned_to_plane[odm_slice_index][cur_index + 1]) {
- temp = pipes->pipes_assigned_to_plane[odm_slice_index][cur_index];
- pipes->pipes_assigned_to_plane[odm_slice_index][cur_index] = pipes->pipes_assigned_to_plane[odm_slice_index][cur_index + 1];
- pipes->pipes_assigned_to_plane[odm_slice_index][cur_index + 1] = temp;
+ swap(pipes->pipes_assigned_to_plane[odm_slice_index][cur_index + 1],
+ pipes->pipes_assigned_to_plane[odm_slice_index][cur_index]);
swapped = true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.h
index 1538b708d8be..1538b708d8be 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_types.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_types.h
index 7ca7f2a743c2..7ca7f2a743c2 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_types.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_internal_types.h
index 140ec01545db..55b3e3ca54f7 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_internal_types.h
@@ -23,7 +23,7 @@
* Authors: AMD
*
*/
-
+
#ifndef __DML2_INTERNAL_TYPES_H__
#define __DML2_INTERNAL_TYPES_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_mall_phantom.c
index 6b3b8803e0ae..66040c877d68 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_mall_phantom.c
@@ -24,6 +24,7 @@
*
*/
+
#include "dml2_dc_types.h"
#include "dml2_internal_types.h"
#include "dml2_utils.h"
@@ -654,14 +655,14 @@ static void set_phantom_stream_timing(struct dml2_context *ctx, struct dc_state
unsigned int svp_height,
unsigned int svp_vstartup)
{
- unsigned int i, pipe_idx;
+ unsigned int i;
double line_time, fp_and_sync_width_time;
struct pipe_ctx *pipe;
uint32_t phantom_vactive, phantom_bp, pstate_width_fw_delay_lines;
static const double cvt_rb_vblank_max = ((double) 460 / (1000 * 1000));
// Find DML pipe index (pipe_idx) using dc_pipe_idx
- for (i = 0, pipe_idx = 0; i < ctx->config.dcn_pipe_count; i++) {
+ for (i = 0; i < ctx->config.dcn_pipe_count; i++) {
pipe = &state->res_ctx.pipe_ctx[i];
if (!pipe->stream)
@@ -669,8 +670,6 @@ static void set_phantom_stream_timing(struct dml2_context *ctx, struct dc_state
if (i == dc_pipe_idx)
break;
-
- pipe_idx++;
}
// Calculate lines required for pstate allow width and FW processing delays
@@ -868,7 +867,7 @@ bool dml2_svp_remove_all_phantom_pipes(struct dml2_context *ctx, struct dc_state
/* Conditions for setting up phantom pipes for SubVP:
* 1. Not force disable SubVP
- * 2. Full update (i.e. !fast_validate)
+ * 2. Full update (i.e. DC_VALIDATE_MODE_AND_PROGRAMMING)
* 3. Enough pipes are available to support SubVP (TODO: Which pipes will use VACTIVE / VBLANK / SUBVP?)
* 4. Display configuration passes validation
* 5. (Config doesn't support MCLK in VACTIVE/VBLANK || dc->debug.force_subvp_mclk_switch)
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_mall_phantom.h
index 9d64851f54e7..9d64851f54e7 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_mall_phantom.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_policy.c
index ef693f608d59..ef693f608d59 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_policy.c
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_policy.h
index e83e05248592..e83e05248592 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_policy.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_translation_helper.c
index 5de775fd8fce..d834cb595afa 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_translation_helper.c
@@ -301,6 +301,7 @@ void dml2_init_socbb_params(struct dml2_context *dml2, const struct dc *in_dc, s
out->pct_ideal_dram_bw_after_urgent_pixel_only = 65.0;
break;
+
case dml_project_dcn401:
out->pct_ideal_fabric_bw_after_urgent = 76; //67;
out->max_avg_sdp_bw_use_normal_percent = 75; //80;
@@ -424,6 +425,8 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc,
p->in_states->state_array[1].dcfclk_mhz = 1434.0;
p->in_states->state_array[1].dram_speed_mts = 1000 * transactions_per_mem_clock;
break;
+
+
case dml_project_dcn401:
p->in_states->num_states = 2;
transactions_per_mem_clock = 16;
@@ -953,6 +956,7 @@ static void populate_dml_surface_cfg_from_plane_state(enum dml_project_id dml2_p
out->SourcePixelFormat[location] = dml_420_10;
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
out->SourcePixelFormat[location] = dml_444_64;
@@ -1188,22 +1192,6 @@ static unsigned int map_plane_to_dml_display_cfg(const struct dml2_context *dml2
return location;
}
-static void apply_legacy_svp_drr_settings(struct dml2_context *dml2, const struct dc_state *state, struct dml_display_cfg_st *dml_dispcfg)
-{
- int i;
-
- if (state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) {
- ASSERT(state->stream_count == 1);
- dml_dispcfg->timing.DRRDisplay[0] = true;
- } else if (state->bw_ctx.bw.dcn.legacy_svp_drr_stream_index_valid) {
-
- for (i = 0; i < dml_dispcfg->num_timings; i++) {
- if (dml2->v20.scratch.dml_to_dc_pipe_mapping.disp_cfg_to_stream_id[i] == state->streams[state->bw_ctx.bw.dcn.legacy_svp_drr_stream_index]->stream_id)
- dml_dispcfg->timing.DRRDisplay[i] = true;
- }
- }
-}
-
static void dml2_populate_pipe_to_plane_index_mapping(struct dml2_context *dml2, struct dc_state *state)
{
unsigned int i;
@@ -1436,9 +1424,6 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat
}
}
}
-
- if (!dml2->config.use_native_pstate_optimization)
- apply_legacy_svp_drr_settings(dml2, context, dml_dispcfg);
}
void dml2_update_pipe_ctx_dchub_regs(struct _vcs_dpi_dml_display_rq_regs_st *rq_regs,
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_translation_helper.h
index d764773938f4..d764773938f4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_translation_helper.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_utils.c
index 9a33158b63bf..9a33158b63bf 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_utils.c
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_utils.h
index 04fcfe637119..04fcfe637119 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_utils.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_wrapper.c
index 525b7d04bf84..9deb03a18ccc 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_wrapper.c
@@ -24,8 +24,6 @@
*
*/
-#include <linux/vmalloc.h>
-
#include "display_mode_core.h"
#include "dml2_internal_types.h"
#include "dml2_utils.h"
@@ -95,12 +93,17 @@ static void map_hw_resources(struct dml2_context *dml2,
static unsigned int pack_and_call_dml_mode_support_ex(struct dml2_context *dml2,
const struct dml_display_cfg_st *display_cfg,
- struct dml_mode_support_info_st *evaluation_info)
+ struct dml_mode_support_info_st *evaluation_info,
+ enum dc_validate_mode validate_mode)
{
struct dml2_wrapper_scratch *s = &dml2->v20.scratch;
s->mode_support_params.mode_lib = &dml2->v20.dml_core_ctx;
s->mode_support_params.in_display_cfg = display_cfg;
+ if (validate_mode == DC_VALIDATE_MODE_ONLY)
+ s->mode_support_params.in_start_state_idx = dml2->v20.dml_core_ctx.states.num_states - 1;
+ else
+ s->mode_support_params.in_start_state_idx = 0;
s->mode_support_params.out_evaluation_info = evaluation_info;
memset(evaluation_info, 0, sizeof(struct dml_mode_support_info_st));
@@ -112,10 +115,8 @@ static unsigned int pack_and_call_dml_mode_support_ex(struct dml2_context *dml2,
static bool optimize_configuration(struct dml2_context *dml2, struct dml2_wrapper_optimize_configuration_params *p)
{
int unused_dpps = p->ip_params->max_num_dpp;
- int i, j;
- int odms_needed, refresh_rate_hz, dpps_needed, subvp_height, pstate_width_fw_delay_lines, surface_count;
- int subvp_timing_to_add, new_timing_index, subvp_surface_to_add, new_surface_index;
- float frame_time_sec, max_frame_time_sec;
+ int i;
+ int odms_needed;
int largest_blend_and_timing = 0;
bool optimization_done = false;
@@ -130,79 +131,6 @@ static bool optimize_configuration(struct dml2_context *dml2, struct dml2_wrappe
if (p->new_display_config != p->cur_display_config)
*p->new_display_config = *p->cur_display_config;
- // Optimize P-State Support
- if (dml2->config.use_native_pstate_optimization) {
- if (p->cur_mode_support_info->DRAMClockChangeSupport[0] == dml_dram_clock_change_unsupported) {
- // Find a display with < 120Hz refresh rate with maximal refresh rate that's not already subvp
- subvp_timing_to_add = -1;
- subvp_surface_to_add = -1;
- max_frame_time_sec = 0;
- surface_count = 0;
- for (i = 0; i < (int) p->cur_display_config->num_timings; i++) {
- refresh_rate_hz = (int)div_u64((unsigned long long) p->cur_display_config->timing.PixelClock[i] * 1000 * 1000,
- (p->cur_display_config->timing.HTotal[i] * p->cur_display_config->timing.VTotal[i]));
- if (refresh_rate_hz < 120) {
- // Check its upstream surfaces to see if this one could be converted to subvp.
- dpps_needed = 0;
- for (j = 0; j < (int) p->cur_display_config->num_surfaces; j++) {
- if (p->cur_display_config->plane.BlendingAndTiming[j] == i &&
- p->cur_display_config->plane.UseMALLForPStateChange[j] == dml_use_mall_pstate_change_disable) {
- dpps_needed += p->cur_mode_support_info->DPPPerSurface[j];
- subvp_surface_to_add = j;
- surface_count++;
- }
- }
-
- if (surface_count == 1 && dpps_needed > 0 && dpps_needed <= unused_dpps) {
- frame_time_sec = (float)1 / refresh_rate_hz;
- if (frame_time_sec > max_frame_time_sec) {
- max_frame_time_sec = frame_time_sec;
- subvp_timing_to_add = i;
- }
- }
- }
- }
- if (subvp_timing_to_add >= 0) {
- new_timing_index = p->new_display_config->num_timings++;
- new_surface_index = p->new_display_config->num_surfaces++;
- // Add a phantom pipe reflecting the main pipe's timing
- dml2_util_copy_dml_timing(&p->new_display_config->timing, new_timing_index, subvp_timing_to_add);
-
- pstate_width_fw_delay_lines = (int)(((double)(p->config->svp_pstate.subvp_fw_processing_delay_us +
- p->config->svp_pstate.subvp_pstate_allow_width_us) / 1000000) *
- (p->new_display_config->timing.PixelClock[subvp_timing_to_add] * 1000 * 1000) /
- (double)p->new_display_config->timing.HTotal[subvp_timing_to_add]);
-
- subvp_height = p->cur_mode_support_info->SubViewportLinesNeededInMALL[subvp_timing_to_add] + pstate_width_fw_delay_lines;
-
- p->new_display_config->timing.VActive[new_timing_index] = subvp_height;
- p->new_display_config->timing.VTotal[new_timing_index] = subvp_height +
- p->new_display_config->timing.VTotal[subvp_timing_to_add] - p->new_display_config->timing.VActive[subvp_timing_to_add];
-
- p->new_display_config->output.OutputDisabled[new_timing_index] = true;
-
- p->new_display_config->plane.UseMALLForPStateChange[subvp_surface_to_add] = dml_use_mall_pstate_change_sub_viewport;
-
- dml2_util_copy_dml_plane(&p->new_display_config->plane, new_surface_index, subvp_surface_to_add);
- dml2_util_copy_dml_surface(&p->new_display_config->surface, new_surface_index, subvp_surface_to_add);
-
- p->new_display_config->plane.ViewportHeight[new_surface_index] = subvp_height;
- p->new_display_config->plane.ViewportHeightChroma[new_surface_index] = subvp_height;
- p->new_display_config->plane.ViewportStationary[new_surface_index] = false;
-
- p->new_display_config->plane.UseMALLForStaticScreen[new_surface_index] = dml_use_mall_static_screen_disable;
- p->new_display_config->plane.UseMALLForPStateChange[new_surface_index] = dml_use_mall_pstate_change_phantom_pipe;
-
- p->new_display_config->plane.NumberOfCursors[new_surface_index] = 0;
-
- p->new_policy->ImmediateFlipRequirement[new_surface_index] = dml_immediate_flip_not_required;
-
- p->new_display_config->plane.BlendingAndTiming[new_surface_index] = new_timing_index;
-
- optimization_done = true;
- }
- }
- }
// Optimize Clocks
if (!optimization_done) {
@@ -226,7 +154,8 @@ static bool optimize_configuration(struct dml2_context *dml2, struct dml2_wrappe
return optimization_done;
}
-static int calculate_lowest_supported_state_for_temp_read(struct dml2_context *dml2, struct dc_state *display_state)
+static int calculate_lowest_supported_state_for_temp_read(struct dml2_context *dml2, struct dc_state *display_state,
+ enum dc_validate_mode validate_mode)
{
struct dml2_calculate_lowest_supported_state_for_temp_read_scratch *s = &dml2->v20.scratch.dml2_calculate_lowest_supported_state_for_temp_read_scratch;
struct dml2_wrapper_scratch *s_global = &dml2->v20.scratch;
@@ -268,7 +197,8 @@ static int calculate_lowest_supported_state_for_temp_read(struct dml2_context *d
dml2->v20.dml_core_ctx.states.state_array[j].dram_clock_change_latency_us = s_global->dummy_pstate_table[i].dummy_pstate_latency_us;
}
- dml_result = pack_and_call_dml_mode_support_ex(dml2, &s->cur_display_config, &s->evaluation_info);
+ dml_result = pack_and_call_dml_mode_support_ex(dml2, &s->cur_display_config, &s->evaluation_info,
+ validate_mode);
if (dml_result && s->evaluation_info.DRAMClockChangeSupport[0] == dml_dram_clock_change_vactive) {
map_hw_resources(dml2, &s->cur_display_config, &s->evaluation_info);
@@ -333,7 +263,8 @@ static bool does_configuration_meet_sw_policies(struct dml2_context *ctx, const
}
static bool dml_mode_support_wrapper(struct dml2_context *dml2,
- struct dc_state *display_state)
+ struct dc_state *display_state,
+ enum dc_validate_mode validate_mode)
{
struct dml2_wrapper_scratch *s = &dml2->v20.scratch;
unsigned int result = 0, i;
@@ -369,7 +300,8 @@ static bool dml_mode_support_wrapper(struct dml2_context *dml2,
result = pack_and_call_dml_mode_support_ex(dml2,
&s->cur_display_config,
- &s->mode_support_info);
+ &s->mode_support_info,
+ validate_mode);
if (result)
result = does_configuration_meet_sw_policies(dml2, &s->cur_display_config, &s->mode_support_info);
@@ -390,7 +322,8 @@ static bool dml_mode_support_wrapper(struct dml2_context *dml2,
dml2->v20.dml_core_ctx.policy = s->new_policy;
optimized_result = pack_and_call_dml_mode_support_ex(dml2,
&s->new_display_config,
- &s->mode_support_info);
+ &s->mode_support_info,
+ validate_mode);
if (optimized_result)
optimized_result = does_configuration_meet_sw_policies(dml2, &s->new_display_config, &s->mode_support_info);
@@ -409,7 +342,8 @@ static bool dml_mode_support_wrapper(struct dml2_context *dml2,
if (!optimized_result) {
result = pack_and_call_dml_mode_support_ex(dml2,
&s->cur_display_config,
- &s->mode_support_info);
+ &s->mode_support_info,
+ validate_mode);
}
}
@@ -419,118 +353,7 @@ static bool dml_mode_support_wrapper(struct dml2_context *dml2,
return result;
}
-static int find_drr_eligible_stream(struct dc_state *display_state)
-{
- int i;
-
- for (i = 0; i < display_state->stream_count; i++) {
- if (dc_state_get_stream_subvp_type(display_state, display_state->streams[i]) == SUBVP_NONE
- && display_state->streams[i]->ignore_msa_timing_param) {
- // Use ignore_msa_timing_param flag to identify as DRR
- return i;
- }
- }
-
- return -1;
-}
-
-static bool optimize_pstate_with_svp_and_drr(struct dml2_context *dml2, struct dc_state *display_state)
-{
- struct dml2_wrapper_scratch *s = &dml2->v20.scratch;
- bool pstate_optimization_done = false;
- bool pstate_optimization_success = false;
- bool result = false;
- int drr_display_index = 0, non_svp_streams = 0;
- bool force_svp = dml2->config.svp_pstate.force_enable_subvp;
-
- display_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = false;
- display_state->bw_ctx.bw.dcn.legacy_svp_drr_stream_index_valid = false;
-
- result = dml_mode_support_wrapper(dml2, display_state);
-
- if (!result) {
- pstate_optimization_done = true;
- } else if (s->mode_support_info.DRAMClockChangeSupport[0] != dml_dram_clock_change_unsupported && !force_svp) {
- pstate_optimization_success = true;
- pstate_optimization_done = true;
- }
-
- if (display_state->stream_count == 1 && dml2->config.callbacks.can_support_mclk_switch_using_fw_based_vblank_stretch(dml2->config.callbacks.dc, display_state)) {
- display_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = true;
-
- result = dml_mode_support_wrapper(dml2, display_state);
- } else {
- non_svp_streams = display_state->stream_count;
-
- while (!pstate_optimization_done) {
- result = dml_mode_programming(&dml2->v20.dml_core_ctx, s->mode_support_params.out_lowest_state_idx, &s->cur_display_config, true);
-
- // Always try adding SVP first
- if (result)
- result = dml2_svp_add_phantom_pipe_to_dc_state(dml2, display_state, &s->mode_support_info);
- else
- pstate_optimization_done = true;
-
-
- if (result) {
- result = dml_mode_support_wrapper(dml2, display_state);
- } else {
- pstate_optimization_done = true;
- }
-
- if (result) {
- non_svp_streams--;
-
- if (s->mode_support_info.DRAMClockChangeSupport[0] != dml_dram_clock_change_unsupported) {
- if (dml2_svp_validate_static_schedulability(dml2, display_state, s->mode_support_info.DRAMClockChangeSupport[0])) {
- pstate_optimization_success = true;
- pstate_optimization_done = true;
- } else {
- pstate_optimization_success = false;
- pstate_optimization_done = false;
- }
- } else {
- drr_display_index = find_drr_eligible_stream(display_state);
-
- // If there is only 1 remaining non SubVP pipe that is DRR, check static
- // schedulability for SubVP + DRR.
- if (non_svp_streams == 1 && drr_display_index >= 0) {
- if (dml2_svp_drr_schedulable(dml2, display_state, &display_state->streams[drr_display_index]->timing)) {
- display_state->bw_ctx.bw.dcn.legacy_svp_drr_stream_index_valid = true;
- display_state->bw_ctx.bw.dcn.legacy_svp_drr_stream_index = drr_display_index;
- result = dml_mode_support_wrapper(dml2, display_state);
- }
-
- if (result && s->mode_support_info.DRAMClockChangeSupport[0] != dml_dram_clock_change_unsupported) {
- pstate_optimization_success = true;
- pstate_optimization_done = true;
- } else {
- pstate_optimization_success = false;
- pstate_optimization_done = false;
- }
- }
-
- if (pstate_optimization_success) {
- pstate_optimization_done = true;
- } else {
- pstate_optimization_done = false;
- }
- }
- }
- }
- }
-
- if (!pstate_optimization_success) {
- dml2_svp_remove_all_phantom_pipes(dml2, display_state);
- display_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = false;
- display_state->bw_ctx.bw.dcn.legacy_svp_drr_stream_index_valid = false;
- result = dml_mode_support_wrapper(dml2, display_state);
- }
-
- return result;
-}
-
-static bool call_dml_mode_support_and_programming(struct dc_state *context)
+static bool call_dml_mode_support_and_programming(struct dc_state *context, enum dc_validate_mode validate_mode)
{
unsigned int result = 0;
unsigned int min_state = 0;
@@ -544,16 +367,13 @@ static bool call_dml_mode_support_and_programming(struct dc_state *context)
struct dml2_wrapper_scratch *s = &dml2->v20.scratch;
if (!context->streams[0]->sink->link->dc->caps.is_apu) {
- min_state_for_g6_temp_read = calculate_lowest_supported_state_for_temp_read(dml2, context);
+ min_state_for_g6_temp_read = calculate_lowest_supported_state_for_temp_read(dml2, context,
+ validate_mode);
ASSERT(min_state_for_g6_temp_read >= 0);
}
- if (!dml2->config.use_native_pstate_optimization) {
- result = optimize_pstate_with_svp_and_drr(dml2, context);
- } else {
- result = dml_mode_support_wrapper(dml2, context);
- }
+ result = dml_mode_support_wrapper(dml2, context, validate_mode);
/* Upon trying to sett certain frequencies in FRL, min_state_for_g6_temp_read is reported as -1. This leads to an invalid value of min_state causing crashes later on.
* Use the default logic for min_state only when min_state_for_g6_temp_read is a valid value. In other cases, use the value calculated by the DML directly.
@@ -575,7 +395,8 @@ static bool call_dml_mode_support_and_programming(struct dc_state *context)
return result;
}
-static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_state *context)
+static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_state *context,
+ enum dc_validate_mode validate_mode)
{
struct dml2_context *dml2 = context->bw_ctx.dml2;
struct dml2_wrapper_scratch *s = &dml2->v20.scratch;
@@ -611,7 +432,7 @@ static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_s
copy_dummy_pstate_table(s->dummy_pstate_table, in_dc->clk_mgr->bw_params->dummy_pstate_table, 4);
- result = call_dml_mode_support_and_programming(context);
+ result = call_dml_mode_support_and_programming(context, validate_mode);
/* Call map dc pipes to map the pipes based on the DML output. For correctly determining if recalculation
* is required or not, the resource context needs to correctly reflect the number of active pipes. We would
* only know the correct number if active pipes after dml2_map_dc_pipes is called.
@@ -628,7 +449,7 @@ static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_s
need_recalculation = dml2_verify_det_buffer_configuration(dml2, context, &dml2->det_helper_scratch);
if (need_recalculation) {
/* Engage the DML again if recalculation is required. */
- call_dml_mode_support_and_programming(context);
+ call_dml_mode_support_and_programming(context, validate_mode);
if (!dml2->config.skip_hw_state_mapping) {
dml2_map_dc_pipes(dml2, context, &s->cur_display_config, &s->dml_to_dc_pipe_mapping, in_dc->current_state);
}
@@ -684,7 +505,7 @@ static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_s
return result;
}
-static bool dml2_validate_only(struct dc_state *context)
+static bool dml2_validate_only(struct dc_state *context, enum dc_validate_mode validate_mode)
{
struct dml2_context *dml2;
unsigned int result = 0;
@@ -708,12 +529,13 @@ static bool dml2_validate_only(struct dc_state *context)
result = pack_and_call_dml_mode_support_ex(dml2,
&dml2->v20.scratch.cur_display_config,
- &dml2->v20.scratch.mode_support_info);
+ &dml2->v20.scratch.mode_support_info,
+ validate_mode);
if (result)
result = does_configuration_meet_sw_policies(dml2, &dml2->v20.scratch.cur_display_config, &dml2->v20.scratch.mode_support_info);
- return (result == 1) ? true : false;
+ return result == 1;
}
static void dml2_apply_debug_options(const struct dc *dc, struct dml2_context *dml2)
@@ -723,7 +545,8 @@ static void dml2_apply_debug_options(const struct dc *dc, struct dml2_context *d
}
}
-bool dml2_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml2, bool fast_validate)
+bool dml2_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml2,
+ enum dc_validate_mode validate_mode)
{
bool out = false;
@@ -733,17 +556,17 @@ bool dml2_validate(const struct dc *in_dc, struct dc_state *context, struct dml2
/* DML2.1 validation path */
if (dml2->architecture == dml2_architecture_21) {
- out = dml21_validate(in_dc, context, dml2, fast_validate);
+ out = dml21_validate(in_dc, context, dml2, validate_mode);
return out;
}
DC_FP_START();
- /* Use dml_validate_only for fast_validate path */
- if (fast_validate)
- out = dml2_validate_only(context);
+ /* Use dml_validate_only for DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX path */
+ if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING)
+ out = dml2_validate_only(context, validate_mode);
else
- out = dml2_validate_and_build_resource(in_dc, context);
+ out = dml2_validate_and_build_resource(in_dc, context, validate_mode);
DC_FP_END();
@@ -757,8 +580,8 @@ static inline struct dml2_context *dml2_allocate_memory(void)
static void dml2_init(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2)
{
- if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version == DCN_VERSION_4_01)) {
- dml21_reinit(in_dc, dml2, config);
+ if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version >= DCN_VERSION_4_01)) {
+ dml21_reinit(in_dc, *dml2, config);
return;
}
@@ -803,9 +626,7 @@ static void dml2_init(const struct dc *in_dc, const struct dml2_configuration_op
bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2)
{
// TODO : Temporarily add DCN_VERSION_3_2 for N-1 validation. Remove DCN_VERSION_3_2 after N-1 validation phase is complete.
- if ((in_dc->debug.using_dml21)
- && (in_dc->ctx->dce_version == DCN_VERSION_4_01
- ))
+ if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version >= DCN_VERSION_4_01))
return dml21_create(in_dc, dml2, config);
// Allocate Mode Lib Ctx
@@ -874,8 +695,8 @@ void dml2_reinit(const struct dc *in_dc,
const struct dml2_configuration_options *config,
struct dml2_context **dml2)
{
- if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version == DCN_VERSION_4_01)) {
- dml21_reinit(in_dc, dml2, config);
+ if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version >= DCN_VERSION_4_01)) {
+ dml21_reinit(in_dc, *dml2, config);
return;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_wrapper.h
index 5100f269368e..c384e141cebc 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_wrapper.h
@@ -240,7 +240,7 @@ struct dml2_configuration_options {
bool use_clock_dc_limits;
bool gpuvm_enable;
bool force_tdlut_enable;
- struct dml2_soc_bb *bb_from_dmub;
+ void *bb_from_dmub;
};
/*
@@ -272,7 +272,7 @@ void dml2_reinit(const struct dc *in_dc,
* dml2_validate - Determines if a display configuration is supported or not.
* @in_dc: dc.
* @context: dc_state to be validated.
- * @fast_validate: Fast validate will not populate context.res_ctx.
+ * @validate_mode: DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX will not populate context.res_ctx.
*
* DML1.0 compatible interface for validation.
*
@@ -295,7 +295,7 @@ void dml2_reinit(const struct dc *in_dc,
bool dml2_validate(const struct dc *in_dc,
struct dc_state *context,
struct dml2_context *dml2,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
/*
* dml2_extract_dram_and_fclk_change_support - Extracts the FCLK and UCLK change support info.
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml_assert.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_assert.h
index 17f0972b1af7..17f0972b1af7 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml_assert.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_assert.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml_depedencies.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_depedencies.h
index f7d30b47beff..d459f93cf40b 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml_depedencies.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_depedencies.h
@@ -31,3 +31,4 @@
*/
#include "os_types.h"
#include "cmntypes.h"
+
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml_display_rq_dlg_calc.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_display_rq_dlg_calc.c
index 00d22e542469..00d22e542469 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml_display_rq_dlg_calc.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_display_rq_dlg_calc.c
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml_display_rq_dlg_calc.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_display_rq_dlg_calc.h
index bf491cf0582d..bf491cf0582d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml_display_rq_dlg_calc.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_display_rq_dlg_calc.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml_logging.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_logging.h
index 2a2f84e07ca8..7fadbe6d7af4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml_logging.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_logging.h
@@ -23,6 +23,7 @@
* Authors: AMD
*
*/
+
#ifndef __DML_LOGGING_H__
#define __DML_LOGGING_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c
index 75fb77bca83b..ce91e5d28956 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c
@@ -199,6 +199,8 @@ void dpp_reset(struct dpp *dpp_base)
memset(&dpp->scl_data, 0, sizeof(dpp->scl_data));
memset(&dpp->pwl_data, 0, sizeof(dpp->pwl_data));
+
+ dpp_base->cursor_offload = false;
}
@@ -484,10 +486,12 @@ void dpp1_set_cursor_position(
cur_en = 0; /* not visible beyond top edge*/
if (dpp_base->pos.cur0_ctl.bits.cur0_enable != cur_en) {
- REG_UPDATE(CURSOR0_CONTROL, CUR0_ENABLE, cur_en);
-
- dpp_base->pos.cur0_ctl.bits.cur0_enable = cur_en;
+ if (!dpp_base->cursor_offload)
+ REG_UPDATE(CURSOR0_CONTROL, CUR0_ENABLE, cur_en);
}
+
+ dpp_base->pos.cur0_ctl.bits.cur0_enable = cur_en;
+ dpp_base->att.cur0_ctl.bits.cur0_enable = cur_en;
}
void dpp1_cnv_set_optional_cursor_attributes(
@@ -497,8 +501,13 @@ void dpp1_cnv_set_optional_cursor_attributes(
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
if (attr) {
- REG_UPDATE(CURSOR0_FP_SCALE_BIAS, CUR0_FP_BIAS, attr->bias);
- REG_UPDATE(CURSOR0_FP_SCALE_BIAS, CUR0_FP_SCALE, attr->scale);
+ if (!dpp_base->cursor_offload) {
+ REG_UPDATE(CURSOR0_FP_SCALE_BIAS, CUR0_FP_BIAS, attr->bias);
+ REG_UPDATE(CURSOR0_FP_SCALE_BIAS, CUR0_FP_SCALE, attr->scale);
+ }
+
+ dpp_base->att.fp_scale_bias.bits.fp_bias = attr->bias;
+ dpp_base->att.fp_scale_bias.bits.fp_scale = attr->scale;
}
}
@@ -520,6 +529,15 @@ void dpp1_dppclk_control(
REG_UPDATE(DPP_CONTROL, DPP_CLOCK_ENABLE, 0);
}
+void dpp_force_disable_cursor(struct dpp *dpp_base)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+
+ /* Force disable cursor */
+ REG_UPDATE(CURSOR0_CONTROL, CUR0_ENABLE, 0);
+ dpp_base->pos.cur0_ctl.bits.cur0_enable = 0;
+}
+
static const struct dpp_funcs dcn10_dpp_funcs = {
.dpp_read_state = dpp_read_state,
.dpp_reset = dpp_reset,
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h
index c48139bed11f..b12f34345a58 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h
@@ -1348,7 +1348,8 @@ struct dcn_dpp_mask {
uint32_t CURSOR0_COLOR1; \
uint32_t DPP_CONTROL; \
uint32_t CM_HDR_MULT_COEF; \
- uint32_t CURSOR0_FP_SCALE_BIAS;
+ uint32_t CURSOR0_FP_SCALE_BIAS; \
+ uint32_t OBUF_CONTROL;
struct dcn_dpp_registers {
DPP_COMMON_REG_VARIABLE_LIST
@@ -1450,7 +1451,6 @@ void dpp1_set_degamma(
void dpp1_set_degamma_pwl(struct dpp *dpp_base,
const struct pwl_params *params);
-
void dpp_read_state(struct dpp *dpp_base,
struct dcn_dpp_state *s);
@@ -1525,4 +1525,6 @@ void dpp1_construct(struct dcn10_dpp *dpp1,
void dpp1_cm_get_gamut_remap(struct dpp *dpp_base,
struct dpp_grph_csc_adjustment *adjust);
+void dpp_force_disable_cursor(struct dpp *dpp_base);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
index 2d70586cef40..ef4a16117181 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
@@ -84,6 +84,22 @@ void dpp30_read_state(struct dpp *dpp_base, struct dcn_dpp_state *s)
}
}
+void dpp30_read_reg_state(struct dpp *dpp_base, struct dcn_dpp_reg_state *dpp_reg_state)
+{
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+
+ dpp_reg_state->recout_start = REG_READ(RECOUT_START);
+ dpp_reg_state->recout_size = REG_READ(RECOUT_SIZE);
+ dpp_reg_state->scl_horz_filter_scale_ratio = REG_READ(SCL_HORZ_FILTER_SCALE_RATIO);
+ dpp_reg_state->scl_vert_filter_scale_ratio = REG_READ(SCL_VERT_FILTER_SCALE_RATIO);
+ dpp_reg_state->scl_mode = REG_READ(SCL_MODE);
+ dpp_reg_state->cm_control = REG_READ(CM_CONTROL);
+ dpp_reg_state->dpp_control = REG_READ(DPP_CONTROL);
+ dpp_reg_state->dscl_control = REG_READ(DSCL_CONTROL);
+ dpp_reg_state->obuf_control = REG_READ(OBUF_CONTROL);
+ dpp_reg_state->mpc_size = REG_READ(MPC_SIZE);
+}
+
/*program post scaler scs block in dpp CM*/
void dpp3_program_post_csc(
struct dpp *dpp_base,
@@ -396,17 +412,21 @@ void dpp3_set_cursor_attributes(
}
}
- REG_UPDATE_3(CURSOR0_CONTROL,
- CUR0_MODE, color_format,
- CUR0_EXPANSION_MODE, 0,
- CUR0_ROM_EN, cur_rom_en);
+ if (!dpp_base->cursor_offload)
+ REG_UPDATE_3(CURSOR0_CONTROL,
+ CUR0_MODE, color_format,
+ CUR0_EXPANSION_MODE, 0,
+ CUR0_ROM_EN, cur_rom_en);
if (color_format == CURSOR_MODE_MONO) {
/* todo: clarify what to program these to */
- REG_UPDATE(CURSOR0_COLOR0,
- CUR0_COLOR0, 0x00000000);
- REG_UPDATE(CURSOR0_COLOR1,
- CUR0_COLOR1, 0xFFFFFFFF);
+
+ if (!dpp_base->cursor_offload) {
+ REG_UPDATE(CURSOR0_COLOR0,
+ CUR0_COLOR0, 0x00000000);
+ REG_UPDATE(CURSOR0_COLOR1,
+ CUR0_COLOR1, 0xFFFFFFFF);
+ }
}
dpp_base->att.cur0_ctl.bits.expansion_mode = 0;
@@ -578,9 +598,6 @@ static void dpp3_power_on_blnd_lut(
dpp_base->ctx->dc->optimized_required = true;
dpp_base->deferred_reg_writes.bits.disable_blnd_lut = true;
}
- } else {
- REG_SET(CM_MEM_PWR_CTRL, 0,
- BLNDGAM_MEM_PWR_FORCE, power_on == true ? 0 : 1);
}
}
@@ -1494,6 +1511,7 @@ static struct dpp_funcs dcn30_dpp_funcs = {
.dpp_dppclk_control = dpp1_dppclk_control,
.dpp_set_hdr_multiplier = dpp3_set_hdr_multiplier,
.dpp_get_gamut_remap = dpp3_cm_get_gamut_remap,
+ .dpp_force_disable_cursor = dpp_force_disable_cursor,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h
index f236824126e9..d4a70b4379ea 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h
@@ -594,6 +594,8 @@ void dpp3_program_CM_dealpha(
void dpp30_read_state(struct dpp *dpp_base,
struct dcn_dpp_state *s);
+void dpp30_read_reg_state(struct dpp *dpp_base, struct dcn_dpp_reg_state *dpp_reg_state);
+
bool dpp3_get_optimal_number_of_taps(
struct dpp *dpp,
struct scaler_data *scl_data,
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.c
index fa67e54bf94e..8a5aa5e86850 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.c
@@ -134,6 +134,7 @@ static struct dpp_funcs dcn32_dpp_funcs = {
.dpp_dppclk_control = dpp1_dppclk_control,
.dpp_set_hdr_multiplier = dpp3_set_hdr_multiplier,
.dpp_get_gamut_remap = dpp3_cm_get_gamut_remap,
+ .dpp_read_reg_state = dpp30_read_reg_state,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c
index f7a373a3d70a..977d83bf7741 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c
@@ -95,6 +95,7 @@ void dpp35_program_bias_and_scale_fcnv(
static struct dpp_funcs dcn35_dpp_funcs = {
.dpp_program_gamcor_lut = dpp3_program_gamcor_lut,
.dpp_read_state = dpp30_read_state,
+ .dpp_read_reg_state = dpp30_read_reg_state,
.dpp_reset = dpp_reset,
.dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale,
.dpp_get_optimal_number_of_taps = dpp3_get_optimal_number_of_taps,
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c
index 97bf26fa3573..96c2c853de42 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c
@@ -231,7 +231,7 @@ static struct dpp_funcs dcn401_dpp_funcs = {
.dpp_program_regamma_pwl = NULL,
.dpp_set_pre_degam = dpp3_set_pre_degam,
.dpp_program_input_lut = NULL,
- .dpp_full_bypass = dpp401_full_bypass,
+ .dpp_full_bypass = NULL,
.dpp_setup = dpp401_dpp_setup,
.dpp_program_degamma_pwl = NULL,
.dpp_program_cm_dealpha = dpp3_program_cm_dealpha,
@@ -248,6 +248,7 @@ static struct dpp_funcs dcn401_dpp_funcs = {
.set_optional_cursor_attributes = dpp401_set_optional_cursor_attributes,
.dpp_dppclk_control = dpp1_dppclk_control,
.dpp_set_hdr_multiplier = dpp3_set_hdr_multiplier,
+ .dpp_read_reg_state = dpp30_read_reg_state,
.set_cursor_matrix = dpp401_set_cursor_matrix,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h
index ecaa976e1f52..5f6b431ec398 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h
@@ -641,6 +641,7 @@
uint32_t ISHARP_DELTA_DATA; \
uint32_t ISHARP_DELTA_INDEX; \
uint32_t ISHARP_NLDELTA_SOFT_CLIP
+
struct dcn401_dpp_registers {
DPP_REG_VARIABLE_LIST_DCN401;
};
@@ -672,6 +673,16 @@ struct dcn401_dpp {
struct pwl_params pwl_data;
};
+enum dcn401_dscl_mode_sel {
+ DCN401_DSCL_MODE_SCALING_444_BYPASS = 0,
+ DCN401_DSCL_MODE_SCALING_444_RGB_ENABLE = 1,
+ DCN401_DSCL_MODE_SCALING_444_YCBCR_ENABLE = 2,
+ DCN401_DSCL_MODE_SCALING_420_YCBCR_ENABLE = 3,
+ DCN401_DSCL_MODE_SCALING_420_LUMA_BYPASS = 4,
+ DCN401_DSCL_MODE_SCALING_420_CHROMA_BYPASS = 5,
+ DCN401_DSCL_MODE_DSCL_BYPASS = 6
+};
+
bool dpp401_construct(struct dcn401_dpp *dpp401,
struct dc_context *ctx,
uint32_t inst,
@@ -683,8 +694,6 @@ void dpp401_dscl_set_scaler_manual_scale(
struct dpp *dpp_base,
const struct scaler_data *scl_data);
-void dpp401_full_bypass(struct dpp *dpp_base);
-
void dpp401_dpp_setup(
struct dpp *dpp_base,
enum surface_pixel_format format,
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c
index 712aff7e17f7..62bf7cea21d8 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c
@@ -88,30 +88,6 @@ enum dscl_mode_sel {
DSCL_MODE_DSCL_BYPASS = 6
};
-void dpp401_full_bypass(struct dpp *dpp_base)
-{
- struct dcn401_dpp *dpp = TO_DCN401_DPP(dpp_base);
-
- /* Input pixel format: ARGB8888 */
- REG_SET(CNVC_SURFACE_PIXEL_FORMAT, 0,
- CNVC_SURFACE_PIXEL_FORMAT, 0x8);
-
- /* Zero expansion */
- REG_SET_3(FORMAT_CONTROL, 0,
- CNVC_BYPASS, 0,
- FORMAT_CONTROL__ALPHA_EN, 0,
- FORMAT_EXPANSION_MODE, 0);
-
- /* COLOR_KEYER_CONTROL.COLOR_KEYER_EN = 0 this should be default */
- if (dpp->tf_mask->CM_BYPASS_EN)
- REG_SET(CM_CONTROL, 0, CM_BYPASS_EN, 1);
- else
- REG_SET(CM_CONTROL, 0, CM_BYPASS, 1);
-
- /* Setting degamma bypass for now */
- REG_SET(CM_DGAM_CONTROL, 0, CM_DGAM_LUT_MODE, 0);
-}
-
void dpp401_set_cursor_attributes(
struct dpp *dpp_base,
struct dc_cursor_attributes *cursor_attributes)
@@ -127,17 +103,21 @@ void dpp401_set_cursor_attributes(
}
}
- REG_UPDATE_3(CURSOR0_CONTROL,
- CUR0_MODE, color_format,
- CUR0_EXPANSION_MODE, 0,
- CUR0_ROM_EN, cur_rom_en);
+ if (!dpp_base->cursor_offload)
+ REG_UPDATE_3(CURSOR0_CONTROL,
+ CUR0_MODE, color_format,
+ CUR0_EXPANSION_MODE, 0,
+ CUR0_ROM_EN, cur_rom_en);
if (color_format == CURSOR_MODE_MONO) {
/* todo: clarify what to program these to */
- REG_UPDATE(CURSOR0_COLOR0,
- CUR0_COLOR0, 0x00000000);
- REG_UPDATE(CURSOR0_COLOR1,
- CUR0_COLOR1, 0xFFFFFFFF);
+
+ if (!dpp_base->cursor_offload) {
+ REG_UPDATE(CURSOR0_COLOR0,
+ CUR0_COLOR0, 0x00000000);
+ REG_UPDATE(CURSOR0_COLOR1,
+ CUR0_COLOR1, 0xFFFFFFFF);
+ }
}
dpp_base->att.cur0_ctl.bits.expansion_mode = 0;
@@ -156,10 +136,12 @@ void dpp401_set_cursor_position(
uint32_t cur_en = pos->enable ? 1 : 0;
if (dpp_base->pos.cur0_ctl.bits.cur0_enable != cur_en) {
- REG_UPDATE(CURSOR0_CONTROL, CUR0_ENABLE, cur_en);
-
- dpp_base->pos.cur0_ctl.bits.cur0_enable = cur_en;
+ if (!dpp_base->cursor_offload)
+ REG_UPDATE(CURSOR0_CONTROL, CUR0_ENABLE, cur_en);
}
+
+ dpp_base->pos.cur0_ctl.bits.cur0_enable = cur_en;
+ dpp_base->att.cur0_ctl.bits.cur0_enable = cur_en;
}
void dpp401_set_optional_cursor_attributes(
@@ -169,10 +151,17 @@ void dpp401_set_optional_cursor_attributes(
struct dcn401_dpp *dpp = TO_DCN401_DPP(dpp_base);
if (attr) {
- REG_UPDATE(CURSOR0_FP_SCALE_BIAS_G_Y, CUR0_FP_BIAS_G_Y, attr->bias);
- REG_UPDATE(CURSOR0_FP_SCALE_BIAS_G_Y, CUR0_FP_SCALE_G_Y, attr->scale);
- REG_UPDATE(CURSOR0_FP_SCALE_BIAS_RB_CRCB, CUR0_FP_BIAS_RB_CRCB, attr->bias);
- REG_UPDATE(CURSOR0_FP_SCALE_BIAS_RB_CRCB, CUR0_FP_SCALE_RB_CRCB, attr->scale);
+ if (!dpp_base->cursor_offload) {
+ REG_UPDATE(CURSOR0_FP_SCALE_BIAS_G_Y, CUR0_FP_BIAS_G_Y, attr->bias);
+ REG_UPDATE(CURSOR0_FP_SCALE_BIAS_G_Y, CUR0_FP_SCALE_G_Y, attr->scale);
+ REG_UPDATE(CURSOR0_FP_SCALE_BIAS_RB_CRCB, CUR0_FP_BIAS_RB_CRCB, attr->bias);
+ REG_UPDATE(CURSOR0_FP_SCALE_BIAS_RB_CRCB, CUR0_FP_SCALE_RB_CRCB, attr->scale);
+ }
+
+ dpp_base->att.fp_scale_bias_g_y.bits.fp_bias_g_y = attr->bias;
+ dpp_base->att.fp_scale_bias_g_y.bits.fp_scale_g_y = attr->scale;
+ dpp_base->att.fp_scale_bias_rb_crcb.bits.fp_bias_rb_crcb = attr->bias;
+ dpp_base->att.fp_scale_bias_rb_crcb.bits.fp_scale_rb_crcb = attr->scale;
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c
index 2f92e7d4981b..6df3419f825f 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c
@@ -78,16 +78,6 @@ enum dscl_autocal_mode {
AUTOCAL_MODE_AUTOREPLICATE = 3
};
-enum dscl_mode_sel {
- DSCL_MODE_SCALING_444_BYPASS = 0,
- DSCL_MODE_SCALING_444_RGB_ENABLE = 1,
- DSCL_MODE_SCALING_444_YCBCR_ENABLE = 2,
- DSCL_MODE_SCALING_420_YCBCR_ENABLE = 3,
- DSCL_MODE_SCALING_420_LUMA_BYPASS = 4,
- DSCL_MODE_SCALING_420_CHROMA_BYPASS = 5,
- DSCL_MODE_DSCL_BYPASS = 6
-};
-
static int dpp401_dscl_get_pixel_depth_val(enum lb_pixel_depth depth)
{
if (depth == LB_PIXEL_DEPTH_30BPP)
@@ -122,7 +112,7 @@ static bool dpp401_dscl_is_420_format(enum pixel_format format)
return false;
}
-static enum dscl_mode_sel dpp401_dscl_get_dscl_mode(
+static enum dcn401_dscl_mode_sel dpp401_dscl_get_dscl_mode(
struct dpp *dpp_base,
const struct scaler_data *data,
bool dbg_always_scale)
@@ -132,7 +122,7 @@ static enum dscl_mode_sel dpp401_dscl_get_dscl_mode(
if (dpp_base->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT) {
/* DSCL is processing data in fixed format */
if (data->format == PIXEL_FORMAT_FP16)
- return DSCL_MODE_DSCL_BYPASS;
+ return DCN401_DSCL_MODE_DSCL_BYPASS;
}
if (data->ratios.horz.value == one
@@ -140,20 +130,20 @@ static enum dscl_mode_sel dpp401_dscl_get_dscl_mode(
&& data->ratios.horz_c.value == one
&& data->ratios.vert_c.value == one
&& !dbg_always_scale)
- return DSCL_MODE_SCALING_444_BYPASS;
+ return DCN401_DSCL_MODE_SCALING_444_BYPASS;
if (!dpp401_dscl_is_420_format(data->format)) {
if (dpp401_dscl_is_video_format(data->format))
- return DSCL_MODE_SCALING_444_YCBCR_ENABLE;
+ return DCN401_DSCL_MODE_SCALING_444_YCBCR_ENABLE;
else
- return DSCL_MODE_SCALING_444_RGB_ENABLE;
+ return DCN401_DSCL_MODE_SCALING_444_RGB_ENABLE;
}
if (data->ratios.horz.value == one && data->ratios.vert.value == one)
- return DSCL_MODE_SCALING_420_LUMA_BYPASS;
+ return DCN401_DSCL_MODE_SCALING_420_LUMA_BYPASS;
if (data->ratios.horz_c.value == one && data->ratios.vert_c.value == one)
- return DSCL_MODE_SCALING_420_CHROMA_BYPASS;
+ return DCN401_DSCL_MODE_SCALING_420_CHROMA_BYPASS;
- return DSCL_MODE_SCALING_420_YCBCR_ENABLE;
+ return DCN401_DSCL_MODE_SCALING_420_YCBCR_ENABLE;
}
static void dpp401_power_on_dscl(
@@ -1071,7 +1061,7 @@ void dpp401_dscl_set_scaler_manual_scale(struct dpp *dpp_base,
uint32_t v_num_taps_c = scl_data->taps.v_taps_c - 1;
uint32_t h_num_taps = scl_data->taps.h_taps - 1;
uint32_t h_num_taps_c = scl_data->taps.h_taps_c - 1;
- enum dscl_mode_sel dscl_mode = dpp401_dscl_get_dscl_mode(
+ enum dcn401_dscl_mode_sel dscl_mode = dpp401_dscl_get_dscl_mode(
dpp_base, scl_data, dpp_base->ctx->dc->debug.always_scale);
bool ycbcr = scl_data->format >= PIXEL_FORMAT_VIDEO_BEGIN
&& scl_data->format <= PIXEL_FORMAT_VIDEO_END;
@@ -1102,7 +1092,7 @@ void dpp401_dscl_set_scaler_manual_scale(struct dpp *dpp_base,
dpp->scl_data = *scl_data;
if ((dpp->base.ctx->dc->config.use_spl) && (!dpp->base.ctx->dc->debug.disable_spl)) {
- dscl_mode = (enum dscl_mode_sel) scl_data->dscl_prog_data.dscl_mode;
+ dscl_mode = (enum dcn401_dscl_mode_sel) scl_data->dscl_prog_data.dscl_mode;
rect = (struct rect *)&scl_data->dscl_prog_data.recout;
mpc_width = scl_data->dscl_prog_data.mpc_size.width;
mpc_height = scl_data->dscl_prog_data.mpc_size.height;
@@ -1112,7 +1102,7 @@ void dpp401_dscl_set_scaler_manual_scale(struct dpp *dpp_base,
h_num_taps_c = scl_data->dscl_prog_data.taps.h_taps_c;
}
if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.dscl) {
- if (dscl_mode != DSCL_MODE_DSCL_BYPASS)
+ if (dscl_mode != DCN401_DSCL_MODE_DSCL_BYPASS)
dpp401_power_on_dscl(dpp_base, true);
}
@@ -1139,7 +1129,7 @@ void dpp401_dscl_set_scaler_manual_scale(struct dpp *dpp_base,
/* SCL mode */
REG_UPDATE(SCL_MODE, DSCL_MODE, dscl_mode);
- if (dscl_mode == DSCL_MODE_DSCL_BYPASS) {
+ if (dscl_mode == DCN401_DSCL_MODE_DSCL_BYPASS) {
if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.dscl)
dpp401_power_on_dscl(dpp_base, false);
return;
@@ -1149,7 +1139,7 @@ void dpp401_dscl_set_scaler_manual_scale(struct dpp *dpp_base,
lb_config = dpp401_dscl_find_lb_memory_config(dpp, scl_data);
dpp401_dscl_set_lb(dpp, &scl_data->lb_params, lb_config);
- if (dscl_mode == DSCL_MODE_SCALING_444_BYPASS) {
+ if (dscl_mode == DCN401_DSCL_MODE_SCALING_444_BYPASS) {
if (dpp->base.ctx->dc->config.prefer_easf)
dpp401_dscl_disable_easf(dpp_base, scl_data);
dpp401_dscl_program_isharp(dpp_base, scl_data, program_isharp_1dlut, &bs_coeffs_updated);
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
index 11535922b5ff..e4144b244332 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
@@ -30,6 +30,9 @@
#include "rc_calc.h"
#include "fixed31_32.h"
+#include "clk_mgr.h"
+#include "resource.h"
+
#define DC_LOGGER \
dsc->ctx->logger
@@ -149,6 +152,11 @@ uint32_t dc_bandwidth_in_kbps_from_timing(
}
/* Forward Declerations */
+static unsigned int get_min_dsc_slice_count_for_odm(
+ const struct display_stream_compressor *dsc,
+ const struct dsc_enc_caps *dsc_enc_caps,
+ const struct dc_crtc_timing *timing);
+
static bool decide_dsc_bandwidth_range(
const uint32_t min_bpp_x16,
const uint32_t max_bpp_x16,
@@ -183,6 +191,7 @@ static bool setup_dsc_config(
const struct dc_crtc_timing *timing,
const struct dc_dsc_config_options *options,
const enum dc_link_encoding_format link_encoding,
+ int min_slice_count,
struct dc_dsc_config *dsc_cfg);
static bool dsc_buff_block_size_from_dpcd(int dpcd_buff_block_size, int *buff_block_size)
@@ -442,7 +451,6 @@ bool dc_dsc_parse_dsc_dpcd(const struct dc *dc,
return true;
}
-
/* If DSC is possbile, get DSC bandwidth range based on [min_bpp, max_bpp] target bitrate range and
* timing's pixel clock and uncompressed bandwidth.
* If DSC is not possible, leave '*range' untouched.
@@ -458,6 +466,7 @@ bool dc_dsc_compute_bandwidth_range(
struct dc_dsc_bw_range *range)
{
bool is_dsc_possible = false;
+ unsigned int min_dsc_slice_count;
struct dsc_enc_caps dsc_enc_caps;
struct dsc_enc_caps dsc_common_caps;
struct dc_dsc_config config = {0};
@@ -469,12 +478,14 @@ bool dc_dsc_compute_bandwidth_range(
get_dsc_enc_caps(dsc, &dsc_enc_caps, timing->pix_clk_100hz);
+ min_dsc_slice_count = get_min_dsc_slice_count_for_odm(dsc, &dsc_enc_caps, timing);
+
is_dsc_possible = intersect_dsc_caps(dsc_sink_caps, &dsc_enc_caps,
timing->pixel_encoding, &dsc_common_caps);
if (is_dsc_possible)
is_dsc_possible = setup_dsc_config(dsc_sink_caps, &dsc_enc_caps, 0, timing,
- &options, link_encoding, &config);
+ &options, link_encoding, min_dsc_slice_count, &config);
if (is_dsc_possible)
is_dsc_possible = decide_dsc_bandwidth_range(min_bpp_x16, max_bpp_x16,
@@ -525,20 +536,153 @@ void dc_dsc_dump_decoder_caps(const struct display_stream_compressor *dsc,
DC_LOG_DSC("\tis_dp %d", dsc_sink_caps->is_dp);
}
+
+static void build_dsc_enc_combined_slice_caps(
+ const struct dsc_enc_caps *single_dsc_enc_caps,
+ struct dsc_enc_caps *dsc_enc_caps,
+ unsigned int max_odm_combine_factor)
+{
+ /* 1-16 slice configurations, single DSC */
+ dsc_enc_caps->slice_caps.raw |= single_dsc_enc_caps->slice_caps.raw;
+
+ /* 2x DSC's */
+ if (max_odm_combine_factor >= 2) {
+ /* 1 + 1 */
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_2 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_1;
+
+ /* 2 + 2 */
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_4 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_2;
+
+ /* 4 + 4 */
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_8 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_4;
+
+ /* 8 + 8 */
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_16 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_8;
+ }
+
+ /* 3x DSC's */
+ if (max_odm_combine_factor >= 3) {
+ /* 4 + 4 + 4 */
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_12 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_4;
+ }
+
+ /* 4x DSC's */
+ if (max_odm_combine_factor >= 4) {
+ /* 1 + 1 + 1 + 1 */
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_4 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_1;
+
+ /* 2 + 2 + 2 + 2 */
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_8 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_2;
+
+ /* 3 + 3 + 3 + 3 */
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_12 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_3;
+
+ /* 4 + 4 + 4 + 4 */
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_16 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_4;
+ }
+}
+
+static void build_dsc_enc_caps(
+ const struct display_stream_compressor *dsc,
+ struct dsc_enc_caps *dsc_enc_caps)
+{
+ unsigned int max_dscclk_khz;
+ unsigned int num_dsc;
+ unsigned int max_odm_combine_factor;
+ struct dsc_enc_caps single_dsc_enc_caps;
+
+ struct dc *dc;
+
+ if (!dsc || !dsc->ctx || !dsc->ctx->dc || !dsc->funcs->dsc_get_single_enc_caps)
+ return;
+
+ dc = dsc->ctx->dc;
+
+ if (!dc->clk_mgr || !dc->clk_mgr->funcs->get_max_clock_khz || !dc->res_pool || dc->debug.disable_dsc)
+ return;
+
+ /* get max DSCCLK from clk_mgr */
+ max_dscclk_khz = dc->clk_mgr->funcs->get_max_clock_khz(dc->clk_mgr, CLK_TYPE_DSCCLK);
+
+ dsc->funcs->dsc_get_single_enc_caps(&single_dsc_enc_caps, max_dscclk_khz);
+
+ /* global capabilities */
+ dsc_enc_caps->dsc_version = single_dsc_enc_caps.dsc_version;
+ dsc_enc_caps->lb_bit_depth = single_dsc_enc_caps.lb_bit_depth;
+ dsc_enc_caps->is_block_pred_supported = single_dsc_enc_caps.is_block_pred_supported;
+ dsc_enc_caps->max_slice_width = single_dsc_enc_caps.max_slice_width;
+ dsc_enc_caps->bpp_increment_div = single_dsc_enc_caps.bpp_increment_div;
+ dsc_enc_caps->color_formats.raw = single_dsc_enc_caps.color_formats.raw;
+ dsc_enc_caps->color_depth.raw = single_dsc_enc_caps.color_depth.raw;
+
+ /* expand per DSC capabilities to global */
+ max_odm_combine_factor = dc->caps.max_odm_combine_factor;
+ num_dsc = dc->res_pool->res_cap->num_dsc;
+ max_odm_combine_factor = min(max_odm_combine_factor, num_dsc);
+ dsc_enc_caps->max_total_throughput_mps =
+ single_dsc_enc_caps.max_total_throughput_mps *
+ max_odm_combine_factor;
+
+ /* check slice counts possible for with ODM combine */
+ build_dsc_enc_combined_slice_caps(&single_dsc_enc_caps, dsc_enc_caps, max_odm_combine_factor);
+}
+
+static inline uint32_t dsc_div_by_10_round_up(uint32_t value)
+{
+ return (value + 9) / 10;
+}
+
+static unsigned int get_min_dsc_slice_count_for_odm(
+ const struct display_stream_compressor *dsc,
+ const struct dsc_enc_caps *dsc_enc_caps,
+ const struct dc_crtc_timing *timing)
+{
+ unsigned int max_dispclk_khz;
+
+ /* get max pixel rate and combine caps */
+ max_dispclk_khz = dsc_enc_caps->max_total_throughput_mps * 1000;
+ if (dsc && dsc->ctx->dc) {
+ if (dsc->ctx->dc->clk_mgr &&
+ dsc->ctx->dc->clk_mgr->funcs->get_max_clock_khz) {
+ /* dispclk is available */
+ max_dispclk_khz = dsc->ctx->dc->clk_mgr->funcs->get_max_clock_khz(dsc->ctx->dc->clk_mgr, CLK_TYPE_DISPCLK);
+ }
+ }
+
+ /* validate parameters */
+ if (max_dispclk_khz == 0 || dsc_enc_caps->max_slice_width == 0)
+ return 1;
+
+ /* consider minimum odm slices required due to
+ * 1) display pipe throughput (dispclk)
+ * 2) max image width per slice
+ */
+ return dc_fixpt_ceil(dc_fixpt_max(
+ dc_fixpt_div_int(dc_fixpt_from_int(dsc_div_by_10_round_up(timing->pix_clk_100hz)),
+ max_dispclk_khz), // throughput
+ dc_fixpt_div_int(dc_fixpt_from_int(timing->h_addressable + timing->h_border_left + timing->h_border_right),
+ dsc_enc_caps->max_slice_width))); // slice width
+}
+
static void get_dsc_enc_caps(
const struct display_stream_compressor *dsc,
struct dsc_enc_caps *dsc_enc_caps,
int pixel_clock_100Hz)
{
- // This is a static HW query, so we can use any DSC
-
memset(dsc_enc_caps, 0, sizeof(struct dsc_enc_caps));
- if (dsc) {
- if (!dsc->ctx->dc->debug.disable_dsc)
- dsc->funcs->dsc_get_enc_caps(dsc_enc_caps, pixel_clock_100Hz);
- if (dsc->ctx->dc->debug.native422_support)
- dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 1;
+
+ if (!dsc || !dsc->ctx || !dsc->ctx->dc || dsc->ctx->dc->debug.disable_dsc)
+ return;
+
+ /* check if reported cap global or only for a single DCN DSC enc */
+ if (dsc->funcs->dsc_get_enc_caps) {
+ dsc->funcs->dsc_get_enc_caps(dsc_enc_caps, pixel_clock_100Hz);
+ } else {
+ build_dsc_enc_caps(dsc, dsc_enc_caps);
}
+
+ if (dsc->ctx->dc->debug.native422_support)
+ dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 1;
}
/* Returns 'false' if no intersection was found for at least one capability.
@@ -621,11 +765,6 @@ static bool intersect_dsc_caps(
return true;
}
-static inline uint32_t dsc_div_by_10_round_up(uint32_t value)
-{
- return (value + 9) / 10;
-}
-
static uint32_t compute_bpp_x16_from_target_bandwidth(
const uint32_t bandwidth_in_kbps,
const struct dc_crtc_timing *timing,
@@ -910,11 +1049,11 @@ static bool setup_dsc_config(
const struct dc_crtc_timing *timing,
const struct dc_dsc_config_options *options,
const enum dc_link_encoding_format link_encoding,
+ int min_slices_h,
struct dc_dsc_config *dsc_cfg)
{
struct dsc_enc_caps dsc_common_caps;
int max_slices_h = 0;
- int min_slices_h = 0;
int num_slices_h = 0;
int pic_width;
int slice_width;
@@ -1018,12 +1157,14 @@ static bool setup_dsc_config(
if (!is_dsc_possible)
goto done;
- min_slices_h = pic_width / dsc_common_caps.max_slice_width;
- if (pic_width % dsc_common_caps.max_slice_width)
- min_slices_h++;
+ /* increase miniumum slice count to meet sink slice width limitations */
+ min_slices_h = dc_fixpt_ceil(dc_fixpt_max(
+ dc_fixpt_div_int(dc_fixpt_from_int(pic_width), dsc_common_caps.max_slice_width), // sink min
+ dc_fixpt_from_int(min_slices_h))); // source min
min_slices_h = fit_num_slices_up(dsc_common_caps.slice_caps, min_slices_h);
+ /* increase minimum slice count to meet sink throughput limitations */
while (min_slices_h <= max_slices_h) {
int pix_clk_per_slice_khz = dsc_div_by_10_round_up(timing->pix_clk_100hz) / min_slices_h;
if (pix_clk_per_slice_khz <= sink_per_slice_throughput_mps * 1000)
@@ -1032,14 +1173,12 @@ static bool setup_dsc_config(
min_slices_h = inc_num_slices(dsc_common_caps.slice_caps, min_slices_h);
}
- is_dsc_possible = (min_slices_h <= max_slices_h);
-
- if (pic_width % min_slices_h != 0)
- min_slices_h = 0; // DSC TODO: Maybe try increasing the number of slices first?
-
- if (min_slices_h == 0 && max_slices_h == 0)
- is_dsc_possible = false;
+ /* increase minimum slice count to meet divisibility requirements */
+ while (pic_width % min_slices_h != 0 && min_slices_h <= max_slices_h) {
+ min_slices_h = inc_num_slices(dsc_common_caps.slice_caps, min_slices_h);
+ }
+ is_dsc_possible = (min_slices_h <= max_slices_h) && max_slices_h != 0;
if (!is_dsc_possible)
goto done;
@@ -1162,12 +1301,19 @@ bool dc_dsc_compute_config(
{
bool is_dsc_possible = false;
struct dsc_enc_caps dsc_enc_caps;
-
+ unsigned int min_dsc_slice_count;
get_dsc_enc_caps(dsc, &dsc_enc_caps, timing->pix_clk_100hz);
+
+ min_dsc_slice_count = get_min_dsc_slice_count_for_odm(dsc, &dsc_enc_caps, timing);
+
is_dsc_possible = setup_dsc_config(dsc_sink_caps,
&dsc_enc_caps,
target_bandwidth_kbps,
- timing, options, link_encoding, dsc_cfg);
+ timing,
+ options,
+ link_encoding,
+ min_dsc_slice_count,
+ dsc_cfg);
return is_dsc_possible;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c
index bd1b9aef6d5c..242f1e6f0d8f 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c
@@ -35,6 +35,7 @@ static void dsc_write_to_registers(struct display_stream_compressor *dsc, const
static const struct dsc_funcs dcn20_dsc_funcs = {
.dsc_get_enc_caps = dsc2_get_enc_caps,
.dsc_read_state = dsc2_read_state,
+ .dsc_read_reg_state = dsc2_read_reg_state,
.dsc_validate_stream = dsc2_validate_stream,
.dsc_set_config = dsc2_set_config,
.dsc_get_packed_pps = dsc2_get_packed_pps,
@@ -155,6 +156,13 @@ void dsc2_read_state(struct display_stream_compressor *dsc, struct dcn_dsc_state
DSCRM_DSC_OPP_PIPE_SOURCE, &s->dsc_opp_source);
}
+void dsc2_read_reg_state(struct display_stream_compressor *dsc, struct dcn_dsc_reg_state *dccg_reg_state)
+{
+ struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc);
+
+ dccg_reg_state->dsc_top_control = REG_READ(DSC_TOP_CONTROL);
+ dccg_reg_state->dscc_interrupt_control_status = REG_READ(DSCC_INTERRUPT_CONTROL_STATUS);
+}
bool dsc2_validate_stream(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg)
{
@@ -406,9 +414,10 @@ bool dsc_prepare_config(const struct dsc_config *dsc_cfg, struct dsc_reg_values
dsc_reg_vals->alternate_ich_encoding_en = dsc_reg_vals->pps.dsc_version_minor == 1 ? 0 : 1;
dsc_reg_vals->ich_reset_at_eol = (dsc_cfg->is_odm || dsc_reg_vals->num_slices_h > 1) ? 0xF : 0;
+ // Need to find the ceiling value for the slice width
+ dsc_reg_vals->pps.slice_width = (dsc_cfg->pic_width + dsc_cfg->dsc_padding + dsc_cfg->dc_dsc_cfg.num_slices_h - 1) / dsc_cfg->dc_dsc_cfg.num_slices_h;
// TODO: in addition to validating slice height (pic height must be divisible by slice height),
// see what happens when the same condition doesn't apply for slice_width/pic_width.
- dsc_reg_vals->pps.slice_width = dsc_cfg->pic_width / dsc_cfg->dc_dsc_cfg.num_slices_h;
dsc_reg_vals->pps.slice_height = dsc_cfg->pic_height / dsc_cfg->dc_dsc_cfg.num_slices_v;
ASSERT(dsc_reg_vals->pps.slice_height * dsc_cfg->dc_dsc_cfg.num_slices_v == dsc_cfg->pic_height);
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h
index a9c04fc95bd1..2337c3a97235 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h
@@ -606,6 +606,7 @@ bool dsc2_get_packed_pps(struct display_stream_compressor *dsc,
uint8_t *dsc_packed_pps);
void dsc2_read_state(struct display_stream_compressor *dsc, struct dcn_dsc_state *s);
+void dsc2_read_reg_state(struct display_stream_compressor *dsc, struct dcn_dsc_reg_state *dccg_reg_state);
bool dsc2_validate_stream(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg);
void dsc2_set_config(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg,
struct dsc_optc_config *dsc_optc_cfg);
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.c
index 6f4f5a3c4861..e712985f7abd 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.c
@@ -28,10 +28,11 @@
#include "reg_helper.h"
static void dsc35_enable(struct display_stream_compressor *dsc, int opp_pipe);
+static void dsc35_get_single_enc_caps(struct dsc_enc_caps *dsc_enc_caps, unsigned int max_dscclk_khz);
static const struct dsc_funcs dcn35_dsc_funcs = {
- .dsc_get_enc_caps = dsc2_get_enc_caps,
.dsc_read_state = dsc2_read_state,
+ .dsc_read_reg_state = dsc2_read_reg_state,
.dsc_validate_stream = dsc2_validate_stream,
.dsc_set_config = dsc2_set_config,
.dsc_get_packed_pps = dsc2_get_packed_pps,
@@ -39,6 +40,7 @@ static const struct dsc_funcs dcn35_dsc_funcs = {
.dsc_disable = dsc2_disable,
.dsc_disconnect = dsc2_disconnect,
.dsc_wait_disconnect_pending_clear = dsc2_wait_disconnect_pending_clear,
+ .dsc_get_single_enc_caps = dsc35_get_single_enc_caps,
};
/* Macro definitios for REG_SET macros*/
@@ -110,3 +112,31 @@ void dsc35_set_fgcg(struct dcn20_dsc *dsc20, bool enable)
{
REG_UPDATE(DSC_TOP_CONTROL, DSC_FGCG_REP_DIS, !enable);
}
+
+void dsc35_get_single_enc_caps(struct dsc_enc_caps *dsc_enc_caps, unsigned int max_dscclk_khz)
+{
+ dsc_enc_caps->dsc_version = 0x21; /* v1.2 - DP spec defined it in reverse order and we kept it */
+
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_1 = 1;
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_2 = 1;
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_3 = 1;
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_4 = 1;
+
+ dsc_enc_caps->lb_bit_depth = 13;
+ dsc_enc_caps->is_block_pred_supported = true;
+
+ dsc_enc_caps->color_formats.bits.RGB = 1;
+ dsc_enc_caps->color_formats.bits.YCBCR_444 = 1;
+ dsc_enc_caps->color_formats.bits.YCBCR_SIMPLE_422 = 1;
+ dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 0;
+ dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_420 = 1;
+
+ dsc_enc_caps->color_depth.bits.COLOR_DEPTH_8_BPC = 1;
+ dsc_enc_caps->color_depth.bits.COLOR_DEPTH_10_BPC = 1;
+ dsc_enc_caps->color_depth.bits.COLOR_DEPTH_12_BPC = 1;
+
+ dsc_enc_caps->max_total_throughput_mps = max_dscclk_khz * 3 / 1000;
+
+ dsc_enc_caps->max_slice_width = 5184; /* (including 64 overlap pixels for eDP MSO mode) */
+ dsc_enc_caps->bpp_increment_div = 16; /* 1/16th of a bit */
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c
index 4222679fd4c9..c1bdbb38c690 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c
@@ -9,19 +9,14 @@
#include "dsc/dscc_types.h"
#include "dsc/rc_calc.h"
-#define MAX_THROUGHPUT_PER_DSC_100HZ 20000000
-#define MAX_DSC_UNIT_COMBINE 4
-
static void dsc_write_to_registers(struct display_stream_compressor *dsc, const struct dsc_reg_values *reg_vals);
/* Object I/F functions */
//static void dsc401_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz);
//static bool dsc401_get_packed_pps(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg, uint8_t *dsc_packed_pps);
-static void dsc401_wait_disconnect_pending_clear(struct display_stream_compressor *dsc);
-static void dsc401_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz);
+static void dsc401_get_single_enc_caps(struct dsc_enc_caps *dsc_enc_caps, unsigned int max_dscclk_khz);
static const struct dsc_funcs dcn401_dsc_funcs = {
- .dsc_get_enc_caps = dsc401_get_enc_caps,
.dsc_read_state = dsc401_read_state,
.dsc_validate_stream = dsc401_validate_stream,
.dsc_set_config = dsc401_set_config,
@@ -30,6 +25,8 @@ static const struct dsc_funcs dcn401_dsc_funcs = {
.dsc_disable = dsc401_disable,
.dsc_disconnect = dsc401_disconnect,
.dsc_wait_disconnect_pending_clear = dsc401_wait_disconnect_pending_clear,
+ .dsc_get_single_enc_caps = dsc401_get_single_enc_caps,
+ .dsc_read_reg_state = dsc2_read_reg_state
};
/* Macro definitios for REG_SET macros*/
@@ -66,22 +63,14 @@ void dsc401_construct(struct dcn401_dsc *dsc,
dsc->max_image_width = 5184;
}
-static void dsc401_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz)
+static void dsc401_get_single_enc_caps(struct dsc_enc_caps *dsc_enc_caps, unsigned int max_dscclk_khz)
{
- int min_dsc_unit_required = (pixel_clock_100Hz + MAX_THROUGHPUT_PER_DSC_100HZ - 1) / MAX_THROUGHPUT_PER_DSC_100HZ;
-
dsc_enc_caps->dsc_version = 0x21; /* v1.2 - DP spec defined it in reverse order and we kept it */
- /* 1 slice is only supported with 1 DSC unit */
- dsc_enc_caps->slice_caps.bits.NUM_SLICES_1 = min_dsc_unit_required == 1 ? 1 : 0;
- /* 2 slice is only supported with 1 or 2 DSC units */
- dsc_enc_caps->slice_caps.bits.NUM_SLICES_2 = (min_dsc_unit_required == 1 || min_dsc_unit_required == 2) ? 1 : 0;
- /* 3 slice is only supported with 1 DSC unit */
- dsc_enc_caps->slice_caps.bits.NUM_SLICES_3 = min_dsc_unit_required == 1 ? 1 : 0;
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_1 = 1;
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_2 = 1;
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_3 = 1;
dsc_enc_caps->slice_caps.bits.NUM_SLICES_4 = 1;
- dsc_enc_caps->slice_caps.bits.NUM_SLICES_8 = 1;
- dsc_enc_caps->slice_caps.bits.NUM_SLICES_12 = 1;
- dsc_enc_caps->slice_caps.bits.NUM_SLICES_16 = 1;
dsc_enc_caps->lb_bit_depth = 13;
dsc_enc_caps->is_block_pred_supported = true;
@@ -95,7 +84,7 @@ static void dsc401_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clo
dsc_enc_caps->color_depth.bits.COLOR_DEPTH_8_BPC = 1;
dsc_enc_caps->color_depth.bits.COLOR_DEPTH_10_BPC = 1;
dsc_enc_caps->color_depth.bits.COLOR_DEPTH_12_BPC = 1;
- dsc_enc_caps->max_total_throughput_mps = MAX_THROUGHPUT_PER_DSC_100HZ * MAX_DSC_UNIT_COMBINE;
+ dsc_enc_caps->max_total_throughput_mps = max_dscclk_khz * 3 / 1000;
dsc_enc_caps->max_slice_width = 5184; /* (including 64 overlap pixels for eDP MSO mode) */
dsc_enc_caps->bpp_increment_div = 16; /* 1/16th of a bit */
@@ -191,7 +180,7 @@ void dsc401_disable(struct display_stream_compressor *dsc)
DSC_CLOCK_EN, 0);
}
-static void dsc401_wait_disconnect_pending_clear(struct display_stream_compressor *dsc)
+void dsc401_wait_disconnect_pending_clear(struct display_stream_compressor *dsc)
{
struct dcn401_dsc *dsc401 = TO_DCN401_DSC(dsc);
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.h b/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.h
index e3ca70058e64..7acd57eb4f42 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.h
@@ -341,5 +341,6 @@ void dsc401_set_config(struct display_stream_compressor *dsc, const struct dsc_c
void dsc401_enable(struct display_stream_compressor *dsc, int opp_pipe);
void dsc401_disable(struct display_stream_compressor *dsc);
void dsc401_disconnect(struct display_stream_compressor *dsc);
+void dsc401_wait_disconnect_pending_clear(struct display_stream_compressor *dsc);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dsc.h b/drivers/gpu/drm/amd/display/dc/dsc/dsc.h
index 1ebce5426a58..81c83d5fe042 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dsc.h
@@ -41,6 +41,7 @@ struct dsc_config {
enum dc_color_depth color_depth; /* Bits per component */
bool is_odm;
struct dc_dsc_config dc_dsc_cfg;
+ uint32_t dsc_padding;
};
@@ -65,6 +66,10 @@ struct dcn_dsc_state {
uint32_t dsc_opp_source;
};
+struct dcn_dsc_reg_state {
+ uint32_t dsc_top_control;
+ uint32_t dscc_interrupt_control_status;
+};
/* DSC encoder capabilities
* They differ from the DPCD DSC caps because they are based on AMD DSC encoder caps.
@@ -99,6 +104,7 @@ struct dsc_enc_caps {
struct dsc_funcs {
void (*dsc_get_enc_caps)(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz);
void (*dsc_read_state)(struct display_stream_compressor *dsc, struct dcn_dsc_state *s);
+ void (*dsc_read_reg_state)(struct display_stream_compressor *dsc, struct dcn_dsc_reg_state *dccg_reg_state);
bool (*dsc_validate_stream)(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg);
void (*dsc_set_config)(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg,
struct dsc_optc_config *dsc_optc_cfg);
@@ -108,6 +114,7 @@ struct dsc_funcs {
void (*dsc_disable)(struct display_stream_compressor *dsc);
void (*dsc_disconnect)(struct display_stream_compressor *dsc);
void (*dsc_wait_disconnect_pending_clear)(struct display_stream_compressor *dsc);
+ void (*dsc_get_single_enc_caps)(struct dsc_enc_caps *dsc_enc_caps, unsigned int max_dscclk_khz);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
index 1313a7c5d87b..73a1e6a03719 100644
--- a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
+++ b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
@@ -28,7 +28,7 @@
#include "include/hdcp_msg_types.h"
#include "include/signal_types.h"
#include "core_types.h"
-#include "link.h"
+#include "link_service.h"
#include "link_hwss.h"
#include "link/protocols/link_dpcd.h"
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c
index d347bb06577a..181a93dc46e6 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c
@@ -440,6 +440,17 @@ void hubbub3_init_watermarks(struct hubbub *hubbub)
REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, reg);
}
+void hubbub3_read_reg_state(struct hubbub *hubbub, struct dcn_hubbub_reg_state *hubbub_reg_state)
+{
+ struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
+
+ hubbub_reg_state->det0_ctrl = REG_READ(DCHUBBUB_DET0_CTRL);
+ hubbub_reg_state->det1_ctrl = REG_READ(DCHUBBUB_DET1_CTRL);
+ hubbub_reg_state->det2_ctrl = REG_READ(DCHUBBUB_DET2_CTRL);
+ hubbub_reg_state->det3_ctrl = REG_READ(DCHUBBUB_DET3_CTRL);
+ hubbub_reg_state->compbuf_ctrl = REG_READ(DCHUBBUB_COMPBUF_CTRL);
+}
+
static const struct hubbub_funcs hubbub30_funcs = {
.update_dchub = hubbub2_update_dchub,
.init_dchub_sys_ctx = hubbub3_init_dchub_sys_ctx,
@@ -457,6 +468,7 @@ static const struct hubbub_funcs hubbub30_funcs = {
.force_pstate_change_control = hubbub3_force_pstate_change_control,
.init_watermarks = hubbub3_init_watermarks,
.hubbub_read_state = hubbub2_read_state,
+ .hubbub_read_reg_state = hubbub3_read_reg_state
};
void hubbub3_construct(struct dcn20_hubbub *hubbub3,
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.h b/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.h
index ca6233e8f1f4..9e14de3ccaee 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.h
@@ -133,4 +133,6 @@ void hubbub3_force_pstate_change_control(struct hubbub *hubbub,
void hubbub3_init_watermarks(struct hubbub *hubbub);
+void hubbub3_read_reg_state(struct hubbub *hubbub, struct dcn_hubbub_reg_state *hubbub_reg_state);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c
index b98505b240a7..5a03758e3de6 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c
@@ -933,8 +933,8 @@ int hubbub31_init_dchub_sys_ctx(struct hubbub *hubbub,
dcn20_vmid_setup(&hubbub2->vmid[15], &phys_config);
}
-
- dcn21_dchvm_init(hubbub);
+ if (hubbub->funcs->dchvm_init)
+ hubbub->funcs->dchvm_init(hubbub);
return NUM_VMID;
}
@@ -1071,6 +1071,8 @@ static const struct hubbub_funcs hubbub31_funcs = {
.program_compbuf_size = dcn31_program_compbuf_size,
.init_crb = dcn31_init_crb,
.hubbub_read_state = hubbub2_read_state,
+ .hubbub_read_reg_state = hubbub3_read_reg_state,
+ .dchvm_init = dcn21_dchvm_init
};
void hubbub31_construct(struct dcn20_hubbub *hubbub31,
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c
index 32a6be543105..237331b35378 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c
@@ -28,6 +28,7 @@
#include "dcn32_hubbub.h"
#include "dm_services.h"
#include "reg_helper.h"
+#include "dal_asic_id.h"
#define CTX \
@@ -72,6 +73,14 @@ static void dcn32_init_crb(struct hubbub *hubbub)
REG_UPDATE(DCHUBBUB_DEBUG_CTRL_0, DET_DEPTH, 0x47F);
}
+static void hubbub32_set_sdp_control(struct hubbub *hubbub, bool dc_control)
+{
+ struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
+
+ REG_UPDATE(DCHUBBUB_SDPIF_CFG0,
+ SDPIF_PORT_CONTROL, dc_control);
+}
+
void hubbub32_set_request_limit(struct hubbub *hubbub, int memory_channel_count, int words_per_channel)
{
struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
@@ -754,8 +763,18 @@ static bool hubbub32_program_watermarks(
unsigned int refclk_mhz,
bool safe_to_lower)
{
+ struct dc *dc = hubbub->ctx->dc;
bool wm_pending = false;
+ if (!safe_to_lower && dc->debug.disable_stutter_for_wm_program &&
+ (ASICREV_IS_GC_11_0_0(dc->ctx->asic_id.hw_internal_rev) ||
+ ASICREV_IS_GC_11_0_3(dc->ctx->asic_id.hw_internal_rev))) {
+ /* before raising watermarks, SDP control give to DF, stutter must be disabled */
+ wm_pending = true;
+ hubbub32_set_sdp_control(hubbub, false);
+ hubbub1_allow_self_refresh_control(hubbub, false);
+ }
+
if (hubbub32_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
wm_pending = true;
@@ -786,10 +805,20 @@ static bool hubbub32_program_watermarks(
REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND,
DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 0x1FF);*/
- if (safe_to_lower || hubbub->ctx->dc->debug.disable_stutter)
- hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
+ if (safe_to_lower) {
+ /* after lowering watermarks, stutter setting is restored, SDP control given to DC */
+ hubbub1_allow_self_refresh_control(hubbub, !dc->debug.disable_stutter);
+
+ if (dc->debug.disable_stutter_for_wm_program &&
+ (ASICREV_IS_GC_11_0_0(dc->ctx->asic_id.hw_internal_rev) ||
+ ASICREV_IS_GC_11_0_3(dc->ctx->asic_id.hw_internal_rev))) {
+ hubbub32_set_sdp_control(hubbub, true);
+ }
+ } else if (dc->debug.disable_stutter) {
+ hubbub1_allow_self_refresh_control(hubbub, !dc->debug.disable_stutter);
+ }
- hubbub32_force_usr_retraining_allow(hubbub, hubbub->ctx->dc->debug.force_usr_allow);
+ hubbub32_force_usr_retraining_allow(hubbub, dc->debug.force_usr_allow);
return wm_pending;
}
@@ -974,8 +1003,7 @@ void hubbub32_init(struct hubbub *hubbub)
ignore the "df_pre_cstate_req" from the SDP port control.
only the DCN will determine when to connect the SDP port
*/
- REG_UPDATE(DCHUBBUB_SDPIF_CFG0,
- SDPIF_PORT_CONTROL, 1);
+ hubbub32_set_sdp_control(hubbub, true);
/*Set SDP's max outstanding request to 512
must set the register back to 0 (max outstanding = 256) in zero frame buffer mode*/
REG_UPDATE(DCHUBBUB_SDPIF_CFG1,
@@ -1009,6 +1037,7 @@ static const struct hubbub_funcs hubbub32_funcs = {
.force_usr_retraining_allow = hubbub32_force_usr_retraining_allow,
.set_request_limit = hubbub32_set_request_limit,
.get_mall_en = hubbub32_get_mall_en,
+ .hubbub_read_reg_state = hubbub3_read_reg_state
};
void hubbub32_construct(struct dcn20_hubbub *hubbub2,
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c
index 6d41953011f5..43ba399f4822 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c
@@ -549,6 +549,55 @@ void hubbub35_init(struct hubbub *hubbub)
memset(&hubbub2->watermarks.a.cstate_pstate, 0, sizeof(hubbub2->watermarks.a.cstate_pstate));
}
+void dcn35_dchvm_init(struct hubbub *hubbub)
+{
+ struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
+ uint32_t riommu_active;
+ int i;
+
+ //Init DCHVM block
+ REG_UPDATE(DCHVM_CTRL0, HOSTVM_INIT_REQ, 1);
+
+ //Poll until RIOMMU_ACTIVE = 1
+ for (i = 0; i < 100; i++) {
+ REG_GET(DCHVM_RIOMMU_STAT0, RIOMMU_ACTIVE, &riommu_active);
+
+ if (riommu_active)
+ break;
+ else
+ udelay(5);
+ }
+
+ if (riommu_active) {
+ // Disable gating and memory power requests
+ REG_UPDATE(DCHVM_MEM_CTRL, HVM_GPUVMRET_PWR_REQ_DIS, 1);
+ REG_UPDATE_4(DCHVM_CLK_CTRL,
+ HVM_DISPCLK_R_GATE_DIS, 1,
+ HVM_DISPCLK_G_GATE_DIS, 1,
+ HVM_DCFCLK_R_GATE_DIS, 1,
+ HVM_DCFCLK_G_GATE_DIS, 1);
+
+ //Reflect the power status of DCHUBBUB
+ REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_POWERSTATUS, 1);
+
+ //Start rIOMMU prefetching
+ REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_PREFETCH_REQ, 1);
+
+ //Poll until HOSTVM_PREFETCH_DONE = 1
+ REG_WAIT(DCHVM_RIOMMU_STAT0, HOSTVM_PREFETCH_DONE, 1, 5, 100);
+
+ //Enable memory power requests
+ REG_UPDATE(DCHVM_MEM_CTRL, HVM_GPUVMRET_PWR_REQ_DIS, 0);
+ // Enable dynamic clock gating
+ REG_UPDATE_4(DCHVM_CLK_CTRL,
+ HVM_DISPCLK_R_GATE_DIS, 0,
+ HVM_DISPCLK_G_GATE_DIS, 0,
+ HVM_DCFCLK_R_GATE_DIS, 0,
+ HVM_DCFCLK_G_GATE_DIS, 0);
+ hubbub->riommu_active = true;
+ }
+}
+
/*static void hubbub35_set_request_limit(struct hubbub *hubbub,
int memory_channel_count,
int words_per_channel)
@@ -589,6 +638,8 @@ static const struct hubbub_funcs hubbub35_funcs = {
.hubbub_read_state = hubbub2_read_state,
.force_usr_retraining_allow = hubbub32_force_usr_retraining_allow,
.dchubbub_init = hubbub35_init,
+ .hubbub_read_reg_state = hubbub3_read_reg_state,
+ .dchvm_init = dcn35_dchvm_init
};
void hubbub35_construct(struct dcn20_hubbub *hubbub2,
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.h b/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.h
index 23fecf88556c..9f65fff1bd4d 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.h
@@ -168,4 +168,5 @@ void dcn35_program_compbuf_size(struct hubbub *hubbub,
unsigned int compbuf_size_kb, bool safe_to_increase);
void dcn35_init_crb(struct hubbub *hubbub);
void hubbub35_init(struct hubbub *hubbub);
+void dcn35_dchvm_init(struct hubbub *hubbub);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c
index 92fab471b183..d11afd1ce72a 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c
@@ -1247,6 +1247,7 @@ static const struct hubbub_funcs hubbub4_01_funcs = {
.program_compbuf_segments = dcn401_program_compbuf_segments,
.wait_for_det_update = dcn401_wait_for_det_update,
.program_arbiter = dcn401_program_arbiter,
+ .hubbub_read_reg_state = hubbub3_read_reg_state
};
void hubbub401_construct(struct dcn20_hubbub *hubbub2,
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c
index 9b026600b90e..6378e3fd7249 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c
@@ -550,6 +550,7 @@ void hubp_reset(struct hubp *hubp)
{
memset(&hubp->pos, 0, sizeof(hubp->pos));
memset(&hubp->att, 0, sizeof(hubp->att));
+ hubp->cursor_offload = false;
}
void hubp1_program_surface_config(
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h
index c7765e6f09e6..f2571076fc50 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h
@@ -104,7 +104,10 @@
SRI(DCN_SURF1_TTU_CNTL1, HUBPREQ, id),\
SRI(DCN_CUR0_TTU_CNTL0, HUBPREQ, id),\
SRI(DCN_CUR0_TTU_CNTL1, HUBPREQ, id),\
- SRI(HUBP_CLK_CNTL, HUBP, id)
+ SRI(HUBP_CLK_CNTL, HUBP, id),\
+ SRI(HUBPRET_READ_LINE_VALUE, HUBPRET, id),\
+ SRI(HUBP_MEASURE_WIN_CTRL_DCFCLK, HUBP, id),\
+ SRI(HUBP_MEASURE_WIN_CTRL_DPPCLK, HUBP, id)
/* Register address initialization macro for ASICs with VM */
#define HUBP_REG_LIST_DCN_VM(id)\
@@ -249,7 +252,20 @@
uint32_t CURSOR_POSITION; \
uint32_t CURSOR_HOT_SPOT; \
uint32_t CURSOR_DST_OFFSET; \
- uint32_t HUBP_CLK_CNTL
+ uint32_t HUBP_CLK_CNTL; \
+ uint32_t HUBPRET_READ_LINE_VALUE; \
+ uint32_t HUBP_MEASURE_WIN_CTRL_DCFCLK; \
+ uint32_t HUBP_MEASURE_WIN_CTRL_DPPCLK; \
+ uint32_t HUBPRET_INTERRUPT; \
+ uint32_t HUBPRET_MEM_PWR_CTRL; \
+ uint32_t HUBPRET_MEM_PWR_STATUS; \
+ uint32_t HUBPRET_READ_LINE_CTRL0; \
+ uint32_t HUBPRET_READ_LINE_CTRL1; \
+ uint32_t HUBPRET_READ_LINE0; \
+ uint32_t HUBPRET_READ_LINE1; \
+ uint32_t HUBPREQ_MEM_PWR_CTRL; \
+ uint32_t HUBPREQ_MEM_PWR_STATUS
+
#define HUBP_SF(reg_name, field_name, post_fix)\
.field_name = reg_name ## __ ## field_name ## post_fix
@@ -622,6 +638,8 @@
type DCN_VM_SYSTEM_APERTURE_DEFAULT_SYSTEM;\
type DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB;\
type DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB;\
+ type PIPE_READ_LINE;\
+ type HUBP_SEG_ALLOC_ERR_STATUS;\
/* todo: get these from GVM instead of reading registers ourselves */\
type PAGE_DIRECTORY_ENTRY_HI32;\
type PAGE_DIRECTORY_ENTRY_LO32;\
@@ -666,10 +684,147 @@ struct dcn_mi_mask {
DCN_HUBP_REG_FIELD_LIST(uint32_t);
};
+struct dcn_fl_regs_st {
+ uint32_t lut_enable;
+ uint32_t lut_done;
+ uint32_t lut_addr_mode;
+ uint32_t lut_width;
+ uint32_t lut_mpc_width;
+ uint32_t lut_tmz;
+ uint32_t lut_crossbar_sel_r;
+ uint32_t lut_crossbar_sel_g;
+ uint32_t lut_crossbar_sel_b;
+ uint32_t lut_addr_hi;
+ uint32_t lut_addr_lo;
+ uint32_t refcyc_3dlut_group;
+ uint32_t lut_fl_bias;
+ uint32_t lut_fl_scale;
+ uint32_t lut_fl_mode;
+ uint32_t lut_fl_format;
+};
+struct dcn_hubp_reg_state {
+ uint32_t hubp_cntl;
+ uint32_t mall_config;
+ uint32_t mall_sub_vp;
+ uint32_t hubp_req_size_config;
+ uint32_t hubp_req_size_config_c;
+ uint32_t vmpg_config;
+ uint32_t addr_config;
+ uint32_t pri_viewport_dimension;
+ uint32_t pri_viewport_dimension_c;
+ uint32_t pri_viewport_start;
+ uint32_t pri_viewport_start_c;
+ uint32_t sec_viewport_dimension;
+ uint32_t sec_viewport_dimension_c;
+ uint32_t sec_viewport_start;
+ uint32_t sec_viewport_start_c;
+ uint32_t surface_config;
+ uint32_t tiling_config;
+ uint32_t clk_cntl;
+ uint32_t mall_status;
+ uint32_t measure_win_ctrl_dcfclk;
+ uint32_t measure_win_ctrl_dppclk;
+
+ uint32_t blank_offset_0;
+ uint32_t blank_offset_1;
+ uint32_t cursor_settings;
+ uint32_t dcn_cur0_ttu_cntl0;
+ uint32_t dcn_cur0_ttu_cntl1;
+ uint32_t dcn_cur1_ttu_cntl0;
+ uint32_t dcn_cur1_ttu_cntl1;
+ uint32_t dcn_dmdat_vm_cntl;
+ uint32_t dcn_expansion_mode;
+ uint32_t dcn_global_ttu_cntl;
+ uint32_t dcn_surf0_ttu_cntl0;
+ uint32_t dcn_surf0_ttu_cntl1;
+ uint32_t dcn_surf1_ttu_cntl0;
+ uint32_t dcn_surf1_ttu_cntl1;
+ uint32_t dcn_ttu_qos_wm;
+ uint32_t dcn_vm_mx_l1_tlb_cntl;
+ uint32_t dcn_vm_system_aperture_high_addr;
+ uint32_t dcn_vm_system_aperture_low_addr;
+ uint32_t dcsurf_flip_control;
+ uint32_t dcsurf_flip_control2;
+ uint32_t dcsurf_primary_meta_surface_address;
+ uint32_t dcsurf_primary_meta_surface_address_c;
+ uint32_t dcsurf_primary_meta_surface_address_high;
+ uint32_t dcsurf_primary_meta_surface_address_high_c;
+ uint32_t dcsurf_primary_surface_address;
+ uint32_t dcsurf_primary_surface_address_c;
+ uint32_t dcsurf_primary_surface_address_high;
+ uint32_t dcsurf_primary_surface_address_high_c;
+ uint32_t dcsurf_secondary_meta_surface_address;
+ uint32_t dcsurf_secondary_meta_surface_address_c;
+ uint32_t dcsurf_secondary_meta_surface_address_high;
+ uint32_t dcsurf_secondary_meta_surface_address_high_c;
+ uint32_t dcsurf_secondary_surface_address;
+ uint32_t dcsurf_secondary_surface_address_c;
+ uint32_t dcsurf_secondary_surface_address_high;
+ uint32_t dcsurf_secondary_surface_address_high_c;
+ uint32_t dcsurf_surface_control;
+ uint32_t dcsurf_surface_earliest_inuse;
+ uint32_t dcsurf_surface_earliest_inuse_c;
+ uint32_t dcsurf_surface_earliest_inuse_high;
+ uint32_t dcsurf_surface_earliest_inuse_high_c;
+ uint32_t dcsurf_surface_flip_interrupt;
+ uint32_t dcsurf_surface_inuse;
+ uint32_t dcsurf_surface_inuse_c;
+ uint32_t dcsurf_surface_inuse_high;
+ uint32_t dcsurf_surface_inuse_high_c;
+ uint32_t dcsurf_surface_pitch;
+ uint32_t dcsurf_surface_pitch_c;
+ uint32_t dst_after_scaler;
+ uint32_t dst_dimensions;
+ uint32_t dst_y_delta_drq_limit;
+ uint32_t flip_parameters_0;
+ uint32_t flip_parameters_1;
+ uint32_t flip_parameters_2;
+ uint32_t flip_parameters_3;
+ uint32_t flip_parameters_4;
+ uint32_t flip_parameters_5;
+ uint32_t flip_parameters_6;
+ uint32_t hubpreq_mem_pwr_ctrl;
+ uint32_t hubpreq_mem_pwr_status;
+ uint32_t nom_parameters_0;
+ uint32_t nom_parameters_1;
+ uint32_t nom_parameters_2;
+ uint32_t nom_parameters_3;
+ uint32_t nom_parameters_4;
+ uint32_t nom_parameters_5;
+ uint32_t nom_parameters_6;
+ uint32_t nom_parameters_7;
+ uint32_t per_line_delivery;
+ uint32_t per_line_delivery_pre;
+ uint32_t prefetch_settings;
+ uint32_t prefetch_settings_c;
+ uint32_t ref_freq_to_pix_freq;
+ uint32_t uclk_pstate_force;
+ uint32_t vblank_parameters_0;
+ uint32_t vblank_parameters_1;
+ uint32_t vblank_parameters_2;
+ uint32_t vblank_parameters_3;
+ uint32_t vblank_parameters_4;
+ uint32_t vblank_parameters_5;
+ uint32_t vblank_parameters_6;
+ uint32_t vmid_settings_0;
+
+ uint32_t hubpret_control;
+ uint32_t hubpret_interrupt;
+ uint32_t hubpret_mem_pwr_ctrl;
+ uint32_t hubpret_mem_pwr_status;
+ uint32_t hubpret_read_line_ctrl0;
+ uint32_t hubpret_read_line_ctrl1;
+ uint32_t hubpret_read_line_status;
+ uint32_t hubpret_read_line_value;
+ uint32_t hubpret_read_line0;
+ uint32_t hubpret_read_line1;
+};
+
struct dcn_hubp_state {
struct _vcs_dpi_display_dlg_regs_st dlg_attr;
struct _vcs_dpi_display_ttu_regs_st ttu_attr;
struct _vcs_dpi_display_rq_regs_st rq_regs;
+ struct dcn_fl_regs_st fl_regs;
uint32_t pixel_format;
uint32_t inuse_addr_hi;
uint32_t inuse_addr_lo;
@@ -694,7 +849,6 @@ struct dcn_hubp_state {
uint32_t hubp_cntl;
uint32_t flip_control;
};
-
struct dcn10_hubp {
struct hubp base;
struct dcn_hubp_state state;
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c
index 91259b896e03..92288de4cc10 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c
@@ -613,26 +613,28 @@ void hubp2_cursor_set_attributes(
hubp->curs_attr = *attr;
- REG_UPDATE(CURSOR_SURFACE_ADDRESS_HIGH,
- CURSOR_SURFACE_ADDRESS_HIGH, attr->address.high_part);
- REG_UPDATE(CURSOR_SURFACE_ADDRESS,
- CURSOR_SURFACE_ADDRESS, attr->address.low_part);
-
- REG_UPDATE_2(CURSOR_SIZE,
- CURSOR_WIDTH, attr->width,
- CURSOR_HEIGHT, attr->height);
-
- REG_UPDATE_4(CURSOR_CONTROL,
- CURSOR_MODE, attr->color_format,
- CURSOR_2X_MAGNIFY, attr->attribute_flags.bits.ENABLE_MAGNIFICATION,
- CURSOR_PITCH, hw_pitch,
- CURSOR_LINES_PER_CHUNK, lpc);
-
- REG_SET_2(CURSOR_SETTINGS, 0,
- /* no shift of the cursor HDL schedule */
- CURSOR0_DST_Y_OFFSET, 0,
- /* used to shift the cursor chunk request deadline */
- CURSOR0_CHUNK_HDL_ADJUST, 3);
+ if (!hubp->cursor_offload) {
+ REG_UPDATE(CURSOR_SURFACE_ADDRESS_HIGH,
+ CURSOR_SURFACE_ADDRESS_HIGH, attr->address.high_part);
+ REG_UPDATE(CURSOR_SURFACE_ADDRESS,
+ CURSOR_SURFACE_ADDRESS, attr->address.low_part);
+
+ REG_UPDATE_2(CURSOR_SIZE,
+ CURSOR_WIDTH, attr->width,
+ CURSOR_HEIGHT, attr->height);
+
+ REG_UPDATE_4(CURSOR_CONTROL,
+ CURSOR_MODE, attr->color_format,
+ CURSOR_2X_MAGNIFY, attr->attribute_flags.bits.ENABLE_MAGNIFICATION,
+ CURSOR_PITCH, hw_pitch,
+ CURSOR_LINES_PER_CHUNK, lpc);
+
+ REG_SET_2(CURSOR_SETTINGS, 0,
+ /* no shift of the cursor HDL schedule */
+ CURSOR0_DST_Y_OFFSET, 0,
+ /* used to shift the cursor chunk request deadline */
+ CURSOR0_CHUNK_HDL_ADJUST, 3);
+ }
hubp->att.SURFACE_ADDR_HIGH = attr->address.high_part;
hubp->att.SURFACE_ADDR = attr->address.low_part;
@@ -1059,23 +1061,28 @@ void hubp2_cursor_set_position(
cur_en = 0; /* not visible beyond top edge*/
if (hubp->pos.cur_ctl.bits.cur_enable != cur_en) {
- if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0)
+ bool cursor_not_programmed = hubp->att.SURFACE_ADDR == 0 && hubp->att.SURFACE_ADDR_HIGH == 0;
+
+ if (cur_en && cursor_not_programmed)
hubp->funcs->set_cursor_attributes(hubp, &hubp->curs_attr);
- REG_UPDATE(CURSOR_CONTROL,
- CURSOR_ENABLE, cur_en);
+ if (!hubp->cursor_offload)
+ REG_UPDATE(CURSOR_CONTROL, CURSOR_ENABLE, cur_en);
}
- REG_SET_2(CURSOR_POSITION, 0,
- CURSOR_X_POSITION, pos->x,
- CURSOR_Y_POSITION, pos->y);
+ if (!hubp->cursor_offload) {
+ REG_SET_2(CURSOR_POSITION, 0,
+ CURSOR_X_POSITION, pos->x,
+ CURSOR_Y_POSITION, pos->y);
- REG_SET_2(CURSOR_HOT_SPOT, 0,
- CURSOR_HOT_SPOT_X, pos->x_hotspot,
- CURSOR_HOT_SPOT_Y, pos->y_hotspot);
+ REG_SET_2(CURSOR_HOT_SPOT, 0,
+ CURSOR_HOT_SPOT_X, pos->x_hotspot,
+ CURSOR_HOT_SPOT_Y, pos->y_hotspot);
+
+ REG_SET(CURSOR_DST_OFFSET, 0,
+ CURSOR_DST_X_OFFSET, dst_x_offset);
+ }
- REG_SET(CURSOR_DST_OFFSET, 0,
- CURSOR_DST_X_OFFSET, dst_x_offset);
/* TODO Handle surface pixel formats other than 4:4:4 */
/* Cursor Position Register Config */
hubp->pos.cur_ctl.bits.cur_enable = cur_en;
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h
index 62369be070ea..7062e6653062 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h
@@ -145,7 +145,8 @@
uint32_t FLIP_PARAMETERS_2;\
uint32_t DCN_CUR1_TTU_CNTL0;\
uint32_t DCN_CUR1_TTU_CNTL1;\
- uint32_t VMID_SETTINGS_0
+ uint32_t VMID_SETTINGS_0;\
+ uint32_t DST_Y_DELTA_DRQ_LIMIT
/*shared with dcn3.x*/
#define DCN21_HUBP_REG_COMMON_VARIABLE_LIST \
@@ -176,7 +177,10 @@
uint32_t HUBP_3DLUT_CONTROL;\
uint32_t HUBP_3DLUT_DLG_PARAM;\
uint32_t DCSURF_VIEWPORT_MCACHE_SPLIT_COORDINATE;\
- uint32_t DCHUBP_MCACHEID_CONFIG
+ uint32_t DCHUBP_MCACHEID_CONFIG;\
+ uint32_t DCHUBP_MALL_SUB_VP;\
+ uint32_t DCHUBP_ADDR_CONFIG;\
+ uint32_t HUBP_MALL_STATUS
#define DCN2_HUBP_REG_FIELD_VARIABLE_LIST(type) \
DCN_HUBP_REG_FIELD_BASE_LIST(type); \
@@ -264,6 +268,7 @@
type HUBP_3DLUT_DONE;\
type HUBP_3DLUT_ADDRESSING_MODE;\
type HUBP_3DLUT_WIDTH;\
+ type HUBP_3DLUT_MPC_WIDTH;\
type HUBP_3DLUT_TMZ;\
type HUBP_3DLUT_CROSSBAR_SELECT_Y_G;\
type HUBP_3DLUT_CROSSBAR_SELECT_CB_B;\
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn21/dcn21_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn21/dcn21_hubp.c
index e2740482e1cf..08ea0a1b9e7f 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn21/dcn21_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn21/dcn21_hubp.c
@@ -73,8 +73,6 @@
* On any mode switch, if the new reg values are smaller than the current values,
* then update the regs with the new values.
*
- * Link to the ticket: http://ontrack-internal.amd.com/browse/DEDCN21-142
- *
*/
void apply_DEDCN21_142_wa_for_hostvm_deadline(
struct hubp *hubp,
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c
index 0da70b50e86d..0cc6f4558989 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c
@@ -476,6 +476,126 @@ void hubp3_read_state(struct hubp *hubp)
}
+void hubp3_read_reg_state(struct hubp *hubp, struct dcn_hubp_reg_state *reg_state)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ reg_state->hubp_cntl = REG_READ(DCHUBP_CNTL);
+ reg_state->mall_config = REG_READ(DCHUBP_MALL_CONFIG);
+ reg_state->mall_sub_vp = REG_READ(DCHUBP_MALL_SUB_VP);
+ reg_state->hubp_req_size_config = REG_READ(DCHUBP_REQ_SIZE_CONFIG);
+ reg_state->hubp_req_size_config_c = REG_READ(DCHUBP_REQ_SIZE_CONFIG_C);
+ reg_state->vmpg_config = REG_READ(DCHUBP_VMPG_CONFIG);
+ reg_state->addr_config = REG_READ(DCSURF_ADDR_CONFIG);
+ reg_state->pri_viewport_dimension = REG_READ(DCSURF_PRI_VIEWPORT_DIMENSION);
+ reg_state->pri_viewport_dimension_c = REG_READ(DCSURF_PRI_VIEWPORT_DIMENSION_C);
+ reg_state->pri_viewport_start = REG_READ(DCSURF_PRI_VIEWPORT_START);
+ reg_state->pri_viewport_start_c = REG_READ(DCSURF_PRI_VIEWPORT_START_C);
+ reg_state->sec_viewport_dimension = REG_READ(DCSURF_SEC_VIEWPORT_DIMENSION);
+ reg_state->sec_viewport_dimension_c = REG_READ(DCSURF_SEC_VIEWPORT_DIMENSION_C);
+ reg_state->sec_viewport_start = REG_READ(DCSURF_SEC_VIEWPORT_START);
+ reg_state->sec_viewport_start_c = REG_READ(DCSURF_SEC_VIEWPORT_START_C);
+ reg_state->surface_config = REG_READ(DCSURF_SURFACE_CONFIG);
+ reg_state->tiling_config = REG_READ(DCSURF_TILING_CONFIG);
+ reg_state->clk_cntl = REG_READ(HUBP_CLK_CNTL);
+ reg_state->mall_status = REG_READ(HUBP_MALL_STATUS);
+ reg_state->measure_win_ctrl_dcfclk = REG_READ(HUBP_MEASURE_WIN_CTRL_DCFCLK);
+ reg_state->measure_win_ctrl_dppclk = REG_READ(HUBP_MEASURE_WIN_CTRL_DPPCLK);
+
+ reg_state->blank_offset_0 = REG_READ(BLANK_OFFSET_0);
+ reg_state->blank_offset_1 = REG_READ(BLANK_OFFSET_1);
+ reg_state->cursor_settings = REG_READ(CURSOR_SETTINGS);
+ reg_state->dcn_cur0_ttu_cntl0 = REG_READ(DCN_CUR0_TTU_CNTL0);
+ reg_state->dcn_cur0_ttu_cntl1 = REG_READ(DCN_CUR0_TTU_CNTL1);
+ reg_state->dcn_cur1_ttu_cntl0 = REG_READ(DCN_CUR1_TTU_CNTL0);
+ reg_state->dcn_cur1_ttu_cntl1 = REG_READ(DCN_CUR1_TTU_CNTL1);
+ reg_state->dcn_dmdat_vm_cntl = REG_READ(DCN_DMDATA_VM_CNTL);
+ reg_state->dcn_expansion_mode = REG_READ(DCN_EXPANSION_MODE);
+ reg_state->dcn_global_ttu_cntl = REG_READ(DCN_GLOBAL_TTU_CNTL);
+ reg_state->dcn_surf0_ttu_cntl0 = REG_READ(DCN_SURF0_TTU_CNTL0);
+ reg_state->dcn_surf0_ttu_cntl1 = REG_READ(DCN_SURF0_TTU_CNTL1);
+ reg_state->dcn_surf1_ttu_cntl0 = REG_READ(DCN_SURF1_TTU_CNTL0);
+ reg_state->dcn_surf1_ttu_cntl1 = REG_READ(DCN_SURF1_TTU_CNTL1);
+ reg_state->dcn_ttu_qos_wm = REG_READ(DCN_TTU_QOS_WM);
+ reg_state->dcn_vm_mx_l1_tlb_cntl = REG_READ(DCN_VM_MX_L1_TLB_CNTL);
+ reg_state->dcn_vm_system_aperture_high_addr = REG_READ(DCN_VM_SYSTEM_APERTURE_HIGH_ADDR);
+ reg_state->dcn_vm_system_aperture_low_addr = REG_READ(DCN_VM_SYSTEM_APERTURE_LOW_ADDR);
+ reg_state->dcsurf_flip_control = REG_READ(DCSURF_FLIP_CONTROL);
+ reg_state->dcsurf_flip_control2 = REG_READ(DCSURF_FLIP_CONTROL2);
+ reg_state->dcsurf_primary_meta_surface_address = REG_READ(DCSURF_PRIMARY_META_SURFACE_ADDRESS);
+ reg_state->dcsurf_primary_meta_surface_address_c = REG_READ(DCSURF_PRIMARY_META_SURFACE_ADDRESS_C);
+ reg_state->dcsurf_primary_meta_surface_address_high = REG_READ(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH);
+ reg_state->dcsurf_primary_meta_surface_address_high_c = REG_READ(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C);
+ reg_state->dcsurf_primary_surface_address = REG_READ(DCSURF_PRIMARY_SURFACE_ADDRESS);
+ reg_state->dcsurf_primary_surface_address_c = REG_READ(DCSURF_PRIMARY_SURFACE_ADDRESS_C);
+ reg_state->dcsurf_primary_surface_address_high = REG_READ(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH);
+ reg_state->dcsurf_primary_surface_address_high_c = REG_READ(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C);
+ reg_state->dcsurf_secondary_meta_surface_address = REG_READ(DCSURF_SECONDARY_META_SURFACE_ADDRESS);
+ reg_state->dcsurf_secondary_meta_surface_address_c = REG_READ(DCSURF_SECONDARY_META_SURFACE_ADDRESS_C);
+ reg_state->dcsurf_secondary_meta_surface_address_high = REG_READ(DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH);
+ reg_state->dcsurf_secondary_meta_surface_address_high_c = REG_READ(DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C);
+ reg_state->dcsurf_secondary_surface_address = REG_READ(DCSURF_SECONDARY_SURFACE_ADDRESS);
+ reg_state->dcsurf_secondary_surface_address_c = REG_READ(DCSURF_SECONDARY_SURFACE_ADDRESS_C);
+ reg_state->dcsurf_secondary_surface_address_high = REG_READ(DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH);
+ reg_state->dcsurf_secondary_surface_address_high_c = REG_READ(DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C);
+ reg_state->dcsurf_surface_control = REG_READ(DCSURF_SURFACE_CONTROL);
+ reg_state->dcsurf_surface_earliest_inuse = REG_READ(DCSURF_SURFACE_EARLIEST_INUSE);
+ reg_state->dcsurf_surface_earliest_inuse_c = REG_READ(DCSURF_SURFACE_EARLIEST_INUSE_C);
+ reg_state->dcsurf_surface_earliest_inuse_high = REG_READ(DCSURF_SURFACE_EARLIEST_INUSE_HIGH);
+ reg_state->dcsurf_surface_earliest_inuse_high_c = REG_READ(DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C);
+ reg_state->dcsurf_surface_flip_interrupt = REG_READ(DCSURF_SURFACE_FLIP_INTERRUPT);
+ reg_state->dcsurf_surface_inuse = REG_READ(DCSURF_SURFACE_INUSE);
+ reg_state->dcsurf_surface_inuse_c = REG_READ(DCSURF_SURFACE_INUSE_C);
+ reg_state->dcsurf_surface_inuse_high = REG_READ(DCSURF_SURFACE_INUSE_HIGH);
+ reg_state->dcsurf_surface_inuse_high_c = REG_READ(DCSURF_SURFACE_INUSE_HIGH_C);
+ reg_state->dcsurf_surface_pitch = REG_READ(DCSURF_SURFACE_PITCH);
+ reg_state->dcsurf_surface_pitch_c = REG_READ(DCSURF_SURFACE_PITCH_C);
+ reg_state->dst_after_scaler = REG_READ(DST_AFTER_SCALER);
+ reg_state->dst_dimensions = REG_READ(DST_DIMENSIONS);
+ reg_state->dst_y_delta_drq_limit = REG_READ(DST_Y_DELTA_DRQ_LIMIT);
+ reg_state->flip_parameters_0 = REG_READ(FLIP_PARAMETERS_0);
+ reg_state->flip_parameters_1 = REG_READ(FLIP_PARAMETERS_1);
+ reg_state->flip_parameters_2 = REG_READ(FLIP_PARAMETERS_2);
+ reg_state->flip_parameters_3 = REG_READ(FLIP_PARAMETERS_3);
+ reg_state->flip_parameters_4 = REG_READ(FLIP_PARAMETERS_4);
+ reg_state->flip_parameters_5 = REG_READ(FLIP_PARAMETERS_5);
+ reg_state->flip_parameters_6 = REG_READ(FLIP_PARAMETERS_6);
+ reg_state->hubpreq_mem_pwr_ctrl = REG_READ(HUBPREQ_MEM_PWR_CTRL);
+ reg_state->hubpreq_mem_pwr_status = REG_READ(HUBPREQ_MEM_PWR_STATUS);
+ reg_state->nom_parameters_0 = REG_READ(NOM_PARAMETERS_0);
+ reg_state->nom_parameters_1 = REG_READ(NOM_PARAMETERS_1);
+ reg_state->nom_parameters_2 = REG_READ(NOM_PARAMETERS_2);
+ reg_state->nom_parameters_3 = REG_READ(NOM_PARAMETERS_3);
+ reg_state->nom_parameters_4 = REG_READ(NOM_PARAMETERS_4);
+ reg_state->nom_parameters_5 = REG_READ(NOM_PARAMETERS_5);
+ reg_state->nom_parameters_6 = REG_READ(NOM_PARAMETERS_6);
+ reg_state->nom_parameters_7 = REG_READ(NOM_PARAMETERS_7);
+ reg_state->per_line_delivery = REG_READ(PER_LINE_DELIVERY);
+ reg_state->per_line_delivery_pre = REG_READ(PER_LINE_DELIVERY_PRE);
+ reg_state->prefetch_settings = REG_READ(PREFETCH_SETTINGS);
+ reg_state->prefetch_settings_c = REG_READ(PREFETCH_SETTINGS_C);
+ reg_state->ref_freq_to_pix_freq = REG_READ(REF_FREQ_TO_PIX_FREQ);
+ reg_state->uclk_pstate_force = REG_READ(UCLK_PSTATE_FORCE);
+ reg_state->vblank_parameters_0 = REG_READ(VBLANK_PARAMETERS_0);
+ reg_state->vblank_parameters_1 = REG_READ(VBLANK_PARAMETERS_1);
+ reg_state->vblank_parameters_2 = REG_READ(VBLANK_PARAMETERS_2);
+ reg_state->vblank_parameters_3 = REG_READ(VBLANK_PARAMETERS_3);
+ reg_state->vblank_parameters_4 = REG_READ(VBLANK_PARAMETERS_4);
+ reg_state->vblank_parameters_5 = REG_READ(VBLANK_PARAMETERS_5);
+ reg_state->vblank_parameters_6 = REG_READ(VBLANK_PARAMETERS_6);
+ reg_state->vmid_settings_0 = REG_READ(VMID_SETTINGS_0);
+ reg_state->hubpret_control = REG_READ(HUBPRET_CONTROL);
+ reg_state->hubpret_interrupt = REG_READ(HUBPRET_INTERRUPT);
+ reg_state->hubpret_mem_pwr_ctrl = REG_READ(HUBPRET_MEM_PWR_CTRL);
+ reg_state->hubpret_mem_pwr_status = REG_READ(HUBPRET_MEM_PWR_STATUS);
+ reg_state->hubpret_read_line_ctrl0 = REG_READ(HUBPRET_READ_LINE_CTRL0);
+ reg_state->hubpret_read_line_ctrl1 = REG_READ(HUBPRET_READ_LINE_CTRL1);
+ reg_state->hubpret_read_line_status = REG_READ(HUBPRET_READ_LINE_STATUS);
+ reg_state->hubpret_read_line_value = REG_READ(HUBPRET_READ_LINE_VALUE);
+ reg_state->hubpret_read_line0 = REG_READ(HUBPRET_READ_LINE0);
+ reg_state->hubpret_read_line1 = REG_READ(HUBPRET_READ_LINE1);
+}
+
void hubp3_setup(
struct hubp *hubp,
struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
@@ -534,6 +654,7 @@ static struct hubp_funcs dcn30_hubp_funcs = {
.hubp_soft_reset = hubp1_soft_reset,
.hubp_set_flip_int = hubp1_set_flip_int,
.hubp_clear_tiling = hubp3_clear_tiling,
+ .hubp_read_reg_state = hubp3_read_reg_state
};
bool hubp3_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h
index b7d7adf0b58c..c767e9f4f9b3 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h
@@ -243,7 +243,8 @@
HUBP_SF(HUBPREQ0_FLIP_PARAMETERS_6, REFCYC_PER_META_CHUNK_FLIP_C, mask_sh),\
HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_5, REFCYC_PER_VM_GROUP_VBLANK, mask_sh),\
HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_6, REFCYC_PER_VM_REQ_VBLANK, mask_sh),\
- HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, VM_GROUP_SIZE, mask_sh)
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, VM_GROUP_SIZE, mask_sh),\
+ HUBP_SF(HUBPRET0_HUBPRET_READ_LINE_VALUE, PIPE_READ_LINE, mask_sh)
bool hubp3_construct(
struct dcn20_hubp *hubp2,
@@ -295,10 +296,17 @@ void hubp3_dmdata_set_attributes(
void hubp3_read_state(struct hubp *hubp);
+void hubp3_read_reg_state(struct hubp *hubp, struct dcn_hubp_reg_state *reg_state);
+
void hubp3_init(struct hubp *hubp);
void hubp3_clear_tiling(struct hubp *hubp);
+uint32_t hubp3_get_current_read_line(struct hubp *hubp);
+
+uint32_t hubp3_get_underflow_status(struct hubp *hubp);
+
+
#endif /* __DC_HUBP_DCN30_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c
index 7fd582a8a4ba..189045f85039 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c
@@ -68,6 +68,18 @@ void hubp31_program_extended_blank_value(
hubp31_program_extended_blank(hubp, min_dst_y_next_start_optimized);
}
+uint32_t hubp31_get_det_config_error(struct hubp *hubp)
+{
+ uint32_t config_error = 0;
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ REG_GET(DCHUBP_CNTL,
+ HUBP_SEG_ALLOC_ERR_STATUS,
+ &config_error);
+
+ return config_error;
+}
+
static struct hubp_funcs dcn31_hubp_funcs = {
.hubp_enable_tripleBuffer = hubp2_enable_triplebuffer,
.hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled,
@@ -98,6 +110,7 @@ static struct hubp_funcs dcn31_hubp_funcs = {
.hubp_in_blank = hubp1_in_blank,
.program_extended_blank = hubp31_program_extended_blank,
.hubp_clear_tiling = hubp3_clear_tiling,
+ .hubp_read_reg_state = hubp3_read_reg_state,
};
bool hubp31_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.h
index d688db79b750..5952c4671507 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.h
@@ -228,7 +228,9 @@
HUBP_SF(HUBPREQ0_FLIP_PARAMETERS_6, REFCYC_PER_META_CHUNK_FLIP_C, mask_sh),\
HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_5, REFCYC_PER_VM_GROUP_VBLANK, mask_sh),\
HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_6, REFCYC_PER_VM_REQ_VBLANK, mask_sh),\
- HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, VM_GROUP_SIZE, mask_sh)
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, VM_GROUP_SIZE, mask_sh),\
+ HUBP_SF(HUBPRET0_HUBPRET_READ_LINE_VALUE, PIPE_READ_LINE, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_SEG_ALLOC_ERR_STATUS, mask_sh)
bool hubp31_construct(
@@ -246,4 +248,6 @@ void hubp31_set_unbounded_requesting(struct hubp *hubp, bool enable);
void hubp31_program_extended_blank_value(
struct hubp *hubp, unsigned int min_dst_y_next_start_optimized);
+uint32_t hubp31_get_det_config_error(struct hubp *hubp);
+
#endif /* __DC_HUBP_DCN31_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c
index f3a21c623f44..a781085b046b 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c
@@ -118,29 +118,7 @@ void hubp32_cursor_set_attributes(
uint32_t cursor_width = ((attr->width + 63) / 64) * 64;
uint32_t cursor_height = attr->height;
uint32_t cursor_size = cursor_width * cursor_height;
-
- hubp->curs_attr = *attr;
-
- REG_UPDATE(CURSOR_SURFACE_ADDRESS_HIGH,
- CURSOR_SURFACE_ADDRESS_HIGH, attr->address.high_part);
- REG_UPDATE(CURSOR_SURFACE_ADDRESS,
- CURSOR_SURFACE_ADDRESS, attr->address.low_part);
-
- REG_UPDATE_2(CURSOR_SIZE,
- CURSOR_WIDTH, attr->width,
- CURSOR_HEIGHT, attr->height);
-
- REG_UPDATE_4(CURSOR_CONTROL,
- CURSOR_MODE, attr->color_format,
- CURSOR_2X_MAGNIFY, attr->attribute_flags.bits.ENABLE_MAGNIFICATION,
- CURSOR_PITCH, hw_pitch,
- CURSOR_LINES_PER_CHUNK, lpc);
-
- REG_SET_2(CURSOR_SETTINGS, 0,
- /* no shift of the cursor HDL schedule */
- CURSOR0_DST_Y_OFFSET, 0,
- /* used to shift the cursor chunk request deadline */
- CURSOR0_CHUNK_HDL_ADJUST, 3);
+ bool use_mall_for_cursor;
switch (attr->color_format) {
case CURSOR_MODE_MONO:
@@ -158,11 +136,49 @@ void hubp32_cursor_set_attributes(
cursor_size *= 8;
break;
}
+ use_mall_for_cursor = cursor_size > 16384 ? 1 : 0;
+
+ hubp->curs_attr = *attr;
- if (cursor_size > 16384)
- REG_UPDATE(DCHUBP_MALL_CONFIG, USE_MALL_FOR_CURSOR, true);
- else
- REG_UPDATE(DCHUBP_MALL_CONFIG, USE_MALL_FOR_CURSOR, false);
+ if (!hubp->cursor_offload) {
+ REG_UPDATE(CURSOR_SURFACE_ADDRESS_HIGH,
+ CURSOR_SURFACE_ADDRESS_HIGH, attr->address.high_part);
+ REG_UPDATE(CURSOR_SURFACE_ADDRESS,
+ CURSOR_SURFACE_ADDRESS, attr->address.low_part);
+
+ REG_UPDATE_2(CURSOR_SIZE,
+ CURSOR_WIDTH, attr->width,
+ CURSOR_HEIGHT, attr->height);
+
+ REG_UPDATE_4(CURSOR_CONTROL,
+ CURSOR_MODE, attr->color_format,
+ CURSOR_2X_MAGNIFY, attr->attribute_flags.bits.ENABLE_MAGNIFICATION,
+ CURSOR_PITCH, hw_pitch,
+ CURSOR_LINES_PER_CHUNK, lpc);
+
+ REG_SET_2(CURSOR_SETTINGS, 0,
+ /* no shift of the cursor HDL schedule */
+ CURSOR0_DST_Y_OFFSET, 0,
+ /* used to shift the cursor chunk request deadline */
+ CURSOR0_CHUNK_HDL_ADJUST, 3);
+
+ REG_UPDATE(DCHUBP_MALL_CONFIG, USE_MALL_FOR_CURSOR, use_mall_for_cursor);
+ }
+ hubp->att.SURFACE_ADDR_HIGH = attr->address.high_part;
+ hubp->att.SURFACE_ADDR = attr->address.low_part;
+ hubp->att.size.bits.width = attr->width;
+ hubp->att.size.bits.height = attr->height;
+ hubp->att.cur_ctl.bits.mode = attr->color_format;
+
+ hubp->cur_rect.w = attr->width;
+ hubp->cur_rect.h = attr->height;
+
+ hubp->att.cur_ctl.bits.pitch = hw_pitch;
+ hubp->att.cur_ctl.bits.line_per_chunk = lpc;
+ hubp->att.cur_ctl.bits.cur_2x_magnify = attr->attribute_flags.bits.ENABLE_MAGNIFICATION;
+ hubp->att.settings.bits.dst_y_offset = 0;
+ hubp->att.settings.bits.chunk_hdl_adjust = 3;
+ hubp->use_mall_for_cursor = use_mall_for_cursor;
}
void hubp32_init(struct hubp *hubp)
{
@@ -206,6 +222,7 @@ static struct hubp_funcs dcn32_hubp_funcs = {
.hubp_update_mall_sel = hubp32_update_mall_sel,
.hubp_prepare_subvp_buffering = hubp32_prepare_subvp_buffering,
.hubp_clear_tiling = hubp3_clear_tiling,
+ .hubp_read_reg_state = hubp3_read_reg_state
};
bool hubp32_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c
index 6d060ba12da8..79c583e258c7 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c
@@ -209,6 +209,7 @@ static struct hubp_funcs dcn35_hubp_funcs = {
.dmdata_load = hubp2_dmdata_load,
.dmdata_status_done = hubp2_dmdata_status_done,
.hubp_read_state = hubp3_read_state,
+ .hubp_read_reg_state = hubp3_read_reg_state,
.hubp_clear_underflow = hubp2_clear_underflow,
.hubp_set_flip_control_surface_gsl = hubp2_set_flip_control_surface_gsl,
.hubp_init = hubp35_init,
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
index baed31611477..f01eae50d02f 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
@@ -86,11 +86,11 @@ void hubp401_program_3dlut_fl_width(struct hubp *hubp, enum hubp_3dlut_fl_width
REG_UPDATE(HUBP_3DLUT_CONTROL, HUBP_3DLUT_WIDTH, width);
}
-void hubp401_program_3dlut_fl_tmz_protected(struct hubp *hubp, bool protection_enabled)
+void hubp401_program_3dlut_fl_tmz_protected(struct hubp *hubp, uint8_t protection_bits)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
- REG_UPDATE(HUBP_3DLUT_CONTROL, HUBP_3DLUT_TMZ, protection_enabled ? 1 : 0);
+ REG_UPDATE(HUBP_3DLUT_CONTROL, HUBP_3DLUT_TMZ, protection_bits);
}
void hubp401_program_3dlut_fl_crossbar(struct hubp *hubp,
@@ -127,6 +127,43 @@ void hubp401_program_3dlut_fl_format(struct hubp *hubp, enum hubp_3dlut_fl_forma
REG_UPDATE(_3DLUT_FL_CONFIG, HUBP0_3DLUT_FL_FORMAT, format);
}
+void hubp401_program_3dlut_fl_config(
+ struct hubp *hubp,
+ struct hubp_fl_3dlut_config *cfg)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ uint32_t mpc_width = {(cfg->width == 17) ? 0 : 1};
+ uint32_t width = {cfg->width};
+
+ if (cfg->layout == DC_CM2_GPU_MEM_LAYOUT_1D_PACKED_LINEAR)
+ width = (cfg->width == 17) ? 4916 : 35940;
+
+ REG_UPDATE_2(_3DLUT_FL_CONFIG,
+ HUBP0_3DLUT_FL_MODE, cfg->mode,
+ HUBP0_3DLUT_FL_FORMAT, cfg->format);
+
+ REG_UPDATE_2(_3DLUT_FL_BIAS_SCALE,
+ HUBP0_3DLUT_FL_BIAS, cfg->bias,
+ HUBP0_3DLUT_FL_SCALE, cfg->scale);
+
+ REG_UPDATE(HUBP_3DLUT_ADDRESS_HIGH,
+ HUBP_3DLUT_ADDRESS_HIGH, cfg->address.lut3d.addr.high_part);
+ REG_UPDATE(HUBP_3DLUT_ADDRESS_LOW,
+ HUBP_3DLUT_ADDRESS_LOW, cfg->address.lut3d.addr.low_part);
+
+ //cross bar
+ REG_UPDATE_8(HUBP_3DLUT_CONTROL,
+ HUBP_3DLUT_MPC_WIDTH, mpc_width,
+ HUBP_3DLUT_WIDTH, width,
+ HUBP_3DLUT_CROSSBAR_SELECT_CR_R, cfg->crossbar_bit_slice_cr_r,
+ HUBP_3DLUT_CROSSBAR_SELECT_Y_G, cfg->crossbar_bit_slice_y_g,
+ HUBP_3DLUT_CROSSBAR_SELECT_CB_B, cfg->crossbar_bit_slice_cb_b,
+ HUBP_3DLUT_ADDRESSING_MODE, cfg->addr_mode,
+ HUBP_3DLUT_TMZ, cfg->protection_bits,
+ HUBP_3DLUT_ENABLE, cfg->enabled ? 1 : 0);
+}
+
void hubp401_update_mall_sel(struct hubp *hubp, uint32_t mall_sel, bool c_cursor)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
@@ -746,21 +783,23 @@ void hubp401_cursor_set_position(
if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0)
hubp->funcs->set_cursor_attributes(hubp, &hubp->curs_attr);
- REG_UPDATE(CURSOR_CONTROL,
- CURSOR_ENABLE, cur_en);
+ if (!hubp->cursor_offload)
+ REG_UPDATE(CURSOR_CONTROL,
+ CURSOR_ENABLE, cur_en);
}
- REG_SET_2(CURSOR_POSITION, 0,
- CURSOR_X_POSITION, x_pos,
- CURSOR_Y_POSITION, y_pos);
+ if (!hubp->cursor_offload) {
+ REG_SET_2(CURSOR_POSITION, 0,
+ CURSOR_X_POSITION, x_pos,
+ CURSOR_Y_POSITION, y_pos);
- REG_SET_2(CURSOR_HOT_SPOT, 0,
- CURSOR_HOT_SPOT_X, pos->x_hotspot,
- CURSOR_HOT_SPOT_Y, pos->y_hotspot);
-
- REG_SET(CURSOR_DST_OFFSET, 0,
- CURSOR_DST_X_OFFSET, dst_x_offset);
+ REG_SET_2(CURSOR_HOT_SPOT, 0,
+ CURSOR_HOT_SPOT_X, pos->x_hotspot,
+ CURSOR_HOT_SPOT_Y, pos->y_hotspot);
+ REG_SET(CURSOR_DST_OFFSET, 0,
+ CURSOR_DST_X_OFFSET, dst_x_offset);
+ }
/* Cursor Position Register Config */
hubp->pos.cur_ctl.bits.cur_enable = cur_en;
hubp->pos.position.bits.x_pos = pos->x;
@@ -1033,6 +1072,8 @@ static struct hubp_funcs dcn401_hubp_funcs = {
.hubp_program_3dlut_fl_crossbar = hubp401_program_3dlut_fl_crossbar,
.hubp_get_3dlut_fl_done = hubp401_get_3dlut_fl_done,
.hubp_clear_tiling = hubp401_clear_tiling,
+ .hubp_program_3dlut_fl_config = hubp401_program_3dlut_fl_config,
+ .hubp_read_reg_state = hubp3_read_reg_state
};
bool hubp401_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h
index 6e1d4c90ddd4..4570b8016de5 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h
@@ -31,7 +31,7 @@
#include "dcn30/dcn30_hubp.h"
#include "dcn31/dcn31_hubp.h"
#include "dcn32/dcn32_hubp.h"
-#include "dml2/dml21/inc/dml_top_dchub_registers.h"
+#include "dml2_0/dml21/inc/dml_top_dchub_registers.h"
#define HUBP_3DLUT_FL_REG_LIST_DCN401(inst)\
SRI_ARR_US(_3DLUT_FL_CONFIG, HUBP, inst),\
@@ -252,7 +252,9 @@
HUBP_SF(HUBP0_DCHUBP_MCACHEID_CONFIG, MCACHEID_MALL_PREF_1H_P0, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_MCACHEID_CONFIG, MCACHEID_MALL_PREF_2H_P0, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_MCACHEID_CONFIG, MCACHEID_MALL_PREF_1H_P1, mask_sh),\
- HUBP_SF(HUBP0_DCHUBP_MCACHEID_CONFIG, MCACHEID_MALL_PREF_2H_P1, mask_sh)
+ HUBP_SF(HUBP0_DCHUBP_MCACHEID_CONFIG, MCACHEID_MALL_PREF_2H_P1, mask_sh),\
+ HUBP_SF(HUBPRET0_HUBPRET_READ_LINE_VALUE, PIPE_READ_LINE, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_SEG_ALLOC_ERR_STATUS, mask_sh)
void hubp401_update_mall_sel(struct hubp *hubp, uint32_t mall_sel, bool c_cursor);
@@ -333,7 +335,7 @@ void hubp401_program_3dlut_fl_crossbar(struct hubp *hubp,
enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cb_b,
enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cr_r);
-void hubp401_program_3dlut_fl_tmz_protected(struct hubp *hubp, bool protection_enabled);
+void hubp401_program_3dlut_fl_tmz_protected(struct hubp *hubp, uint8_t protection_bits);
void hubp401_program_3dlut_fl_width(struct hubp *hubp, enum hubp_3dlut_fl_width width);
@@ -349,6 +351,10 @@ void hubp401_program_3dlut_fl_format(struct hubp *hubp, enum hubp_3dlut_fl_forma
void hubp401_program_3dlut_fl_mode(struct hubp *hubp, enum hubp_3dlut_fl_mode mode);
+void hubp401_program_3dlut_fl_config(
+ struct hubp *hubp,
+ struct hubp_fl_3dlut_config *cfg);
+
void hubp401_clear_tiling(struct hubp *hubp);
void hubp401_vready_at_or_After_vsync(struct hubp *hubp,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
index e8730cc40edb..8fe399939220 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
@@ -48,7 +48,7 @@
#include "link_encoder.h"
#include "link_enc_cfg.h"
#include "link_hwss.h"
-#include "link.h"
+#include "link_service.h"
#include "dccg.h"
#include "clock_source.h"
#include "clk_mgr.h"
@@ -659,6 +659,20 @@ void dce110_update_info_frame(struct pipe_ctx *pipe_ctx)
}
}
+static void
+dce110_dac_encoder_control(struct pipe_ctx *pipe_ctx, bool enable)
+{
+ struct dc_link *link = pipe_ctx->stream->link;
+ struct dc_bios *bios = link->ctx->dc_bios;
+ struct bp_encoder_control encoder_control = {0};
+
+ encoder_control.action = enable ? ENCODER_CONTROL_ENABLE : ENCODER_CONTROL_DISABLE;
+ encoder_control.engine_id = link->link_enc->analog_engine;
+ encoder_control.pixel_clock = pipe_ctx->stream->timing.pix_clk_100hz / 10;
+
+ bios->funcs->encoder_control(bios, &encoder_control);
+}
+
void dce110_enable_stream(struct pipe_ctx *pipe_ctx)
{
enum dc_lane_count lane_count =
@@ -688,6 +702,9 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx)
early_control = lane_count;
tg->funcs->set_early_control(tg, early_control);
+
+ if (dc_is_rgb_signal(pipe_ctx->stream->signal))
+ dce110_dac_encoder_control(pipe_ctx, true);
}
static enum bp_result link_transmitter_control(
@@ -1085,6 +1102,9 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
if (!pipe_ctx->stream)
return;
+ if (dc_is_rgb_signal(pipe_ctx->stream->signal))
+ return;
+
dc = pipe_ctx->stream->ctx->dc;
clk_mgr = dc->clk_mgr;
link_hwss = get_link_hwss(pipe_ctx->stream->link, &pipe_ctx->link_res);
@@ -1121,6 +1141,9 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx)
if (!pipe_ctx || !pipe_ctx->stream)
return;
+ if (dc_is_rgb_signal(pipe_ctx->stream->signal))
+ return;
+
dc = pipe_ctx->stream->ctx->dc;
clk_mgr = dc->clk_mgr;
link_hwss = get_link_hwss(pipe_ctx->stream->link, &pipe_ctx->link_res);
@@ -1186,13 +1209,18 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx)
if (dccg) {
dccg->funcs->disable_symclk32_se(dccg, dp_hpo_inst);
dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst, dp_hpo_inst);
- if (dccg && dccg->funcs->set_dtbclk_dto)
- dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
+ if (!(dc->ctx->dce_version >= DCN_VERSION_3_5)) {
+ if (dccg && dccg->funcs->set_dtbclk_dto)
+ dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
+ }
}
} else if (dccg && dccg->funcs->disable_symclk_se) {
dccg->funcs->disable_symclk_se(dccg, stream_enc->stream_enc_inst,
link_enc->transmitter - TRANSMITTER_UNIPHY_A);
}
+
+ if (dc_is_rgb_signal(pipe_ctx->stream->signal))
+ dce110_dac_encoder_control(pipe_ctx, false);
}
void dce110_unblank_stream(struct pipe_ctx *pipe_ctx,
@@ -1225,7 +1253,7 @@ void dce110_blank_stream(struct pipe_ctx *pipe_ctx)
return;
if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
- if (!link->skip_implict_edp_power_control)
+ if (!link->skip_implict_edp_power_control && hws)
hws->funcs.edp_backlight_control(link, false);
link->dc->hwss.set_abm_immediate_disable(pipe_ctx);
}
@@ -1267,7 +1295,7 @@ void dce110_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
pipe_ctx->stream_res.stream_enc->funcs->set_avmute(pipe_ctx->stream_res.stream_enc, enable);
}
-static enum audio_dto_source translate_to_dto_source(enum controller_id crtc_id)
+enum audio_dto_source translate_to_dto_source(enum controller_id crtc_id)
{
switch (crtc_id) {
case CONTROLLER_ID_D0:
@@ -1287,7 +1315,7 @@ static enum audio_dto_source translate_to_dto_source(enum controller_id crtc_id)
}
}
-static void populate_audio_dp_link_info(
+void populate_audio_dp_link_info(
const struct pipe_ctx *pipe_ctx,
struct audio_dp_link_info *dp_link_info)
{
@@ -1379,7 +1407,7 @@ static void populate_audio_dp_link_info(
}
}
-static void build_audio_output(
+void build_audio_output(
struct dc_state *state,
const struct pipe_ctx *pipe_ctx,
struct audio_output *audio_output)
@@ -1578,6 +1606,51 @@ static enum dc_status dce110_enable_stream_timing(
return DC_OK;
}
+static void
+dce110_select_crtc_source(struct pipe_ctx *pipe_ctx)
+{
+ struct dc_link *link = pipe_ctx->stream->link;
+ struct dc_bios *bios = link->ctx->dc_bios;
+ struct bp_crtc_source_select crtc_source_select = {0};
+ enum engine_id engine_id = link->link_enc->preferred_engine;
+ uint8_t bit_depth;
+
+ if (dc_is_rgb_signal(pipe_ctx->stream->signal))
+ engine_id = link->link_enc->analog_engine;
+
+ switch (pipe_ctx->stream->timing.display_color_depth) {
+ case COLOR_DEPTH_UNDEFINED:
+ bit_depth = 0;
+ break;
+ case COLOR_DEPTH_666:
+ bit_depth = 6;
+ break;
+ default:
+ case COLOR_DEPTH_888:
+ bit_depth = 8;
+ break;
+ case COLOR_DEPTH_101010:
+ bit_depth = 10;
+ break;
+ case COLOR_DEPTH_121212:
+ bit_depth = 12;
+ break;
+ case COLOR_DEPTH_141414:
+ bit_depth = 14;
+ break;
+ case COLOR_DEPTH_161616:
+ bit_depth = 16;
+ break;
+ }
+
+ crtc_source_select.controller_id = CONTROLLER_ID_D0 + pipe_ctx->stream_res.tg->inst;
+ crtc_source_select.bit_depth = bit_depth;
+ crtc_source_select.engine_id = engine_id;
+ crtc_source_select.sink_signal = pipe_ctx->stream->signal;
+
+ bios->funcs->select_crtc_source(bios, &crtc_source_select);
+}
+
enum dc_status dce110_apply_single_controller_ctx_to_hw(
struct pipe_ctx *pipe_ctx,
struct dc_state *context,
@@ -1597,6 +1670,10 @@ enum dc_status dce110_apply_single_controller_ctx_to_hw(
hws->funcs.disable_stream_gating(dc, pipe_ctx);
}
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_RGB) {
+ dce110_select_crtc_source(pipe_ctx);
+ }
+
if (pipe_ctx->stream_res.audio != NULL) {
struct audio_output audio_output = {0};
@@ -1676,7 +1753,8 @@ enum dc_status dce110_apply_single_controller_ctx_to_hw(
pipe_ctx->stream_res.tg->funcs->set_static_screen_control(
pipe_ctx->stream_res.tg, event_triggers, 2);
- if (!dc_is_virtual_signal(pipe_ctx->stream->signal))
+ if (!dc_is_virtual_signal(pipe_ctx->stream->signal) &&
+ !dc_is_rgb_signal(pipe_ctx->stream->signal))
pipe_ctx->stream_res.stream_enc->funcs->dig_connect_to_otg(
pipe_ctx->stream_res.stream_enc,
pipe_ctx->stream_res.tg->inst);
@@ -1684,6 +1762,19 @@ enum dc_status dce110_apply_single_controller_ctx_to_hw(
if (dc_is_dp_signal(pipe_ctx->stream->signal))
dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_OTG);
+ /* Temporary workaround to perform DSC programming ahead of stream enablement
+ * for smartmux/SPRS
+ * TODO: Remove SmartMux/SPRS checks once movement of DSC programming is generalized
+ */
+ if (pipe_ctx->stream->timing.flags.DSC) {
+ if ((pipe_ctx->stream->signal == SIGNAL_TYPE_EDP &&
+ ((link->dc->config.smart_mux_version && link->dc->is_switch_in_progress_dest)
+ || link->is_dds || link->skip_implict_edp_power_control)) &&
+ (dc_is_dp_signal(pipe_ctx->stream->signal) ||
+ dc_is_virtual_signal(pipe_ctx->stream->signal)))
+ dc->link_srv->set_dsc_enable(pipe_ctx, true);
+ }
+
if (!stream->dpms_off)
dc->link_srv->set_dpms_on(context, pipe_ctx);
@@ -1897,6 +1988,7 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
bool can_apply_edp_fast_boot = false;
bool can_apply_seamless_boot = false;
bool keep_edp_vdd_on = false;
+ bool should_clean_dsc_block = true;
struct dc_bios *dcb = dc->ctx->dc_bios;
DC_LOGGER_INIT();
@@ -1909,10 +2001,8 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
get_edp_streams(context, edp_streams, &edp_stream_num);
- // Check fastboot support, disable on DCE8 because of blank screens
- if (edp_num && edp_stream_num && dc->ctx->dce_version != DCE_VERSION_8_0 &&
- dc->ctx->dce_version != DCE_VERSION_8_1 &&
- dc->ctx->dce_version != DCE_VERSION_8_3) {
+ /* Check fastboot support, disable on DCE 6-8 because of blank screens */
+ if (edp_num && edp_stream_num && dc->ctx->dce_version < DCE_VERSION_10_0) {
for (i = 0; i < edp_num; i++) {
edp_link = edp_links[i];
if (edp_link != edp_streams[0]->link)
@@ -1925,6 +2015,13 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
can_apply_edp_fast_boot = dc_validate_boot_timing(dc,
edp_stream->sink, &edp_stream->timing);
+
+ // For Mux-platform, the default value is false.
+ // Disable fast boot during mux switching.
+ // The flag would be clean after switching done.
+ if (dc->is_switch_in_progress_dest && edp_link->is_dds)
+ can_apply_edp_fast_boot = false;
+
edp_stream->apply_edp_fast_boot_optimization = can_apply_edp_fast_boot;
if (can_apply_edp_fast_boot) {
DC_LOG_EVENT_LINK_TRAINING("eDP fast boot Enable\n");
@@ -1968,6 +2065,10 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
if (edp_with_sink_num)
edp_link_with_sink = edp_links_with_sink[0];
+ // During a mux switch, powering down the HW blocks and then enabling
+ // the link via a DPCD SET_POWER write causes a brief flash
+ keep_edp_vdd_on |= dc->is_switch_in_progress_dest;
+
if (!can_apply_edp_fast_boot && !can_apply_seamless_boot) {
if (edp_link_with_sink && !keep_edp_vdd_on) {
/*turn off backlight before DP_blank and encoder powered down*/
@@ -1980,9 +2081,15 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
power_down_all_hw_blocks(dc);
/* DSC could be enabled on eDP during VBIOS post.
- * To clean up dsc blocks if eDP is in link but not active.
+ * To clean up dsc blocks if all eDP dpms_off is true.
*/
- if (edp_link_with_sink && (edp_stream_num == 0))
+ for (i = 0; i < edp_stream_num; i++) {
+ if (!edp_streams[i]->dpms_off) {
+ should_clean_dsc_block = false;
+ }
+ }
+
+ if (should_clean_dsc_block)
clean_up_dsc_blocks(dc);
disable_vga_and_power_gate_all_controllers(dc);
@@ -2228,7 +2335,7 @@ static bool should_enable_fbc(struct dc *dc,
/*
* Enable FBC
*/
-static void enable_fbc(
+void enable_fbc(
struct dc *dc,
struct dc_state *context)
{
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h
index 06789ac3a224..9c032e449481 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h
@@ -110,5 +110,16 @@ void dce110_enable_dp_link_output(
enum signal_type signal,
enum clock_source_id clock_source,
const struct dc_link_settings *link_settings);
+void build_audio_output(
+ struct dc_state *state,
+ const struct pipe_ctx *pipe_ctx,
+ struct audio_output *audio_output);
+enum audio_dto_source translate_to_dto_source(enum controller_id crtc_id);
+void populate_audio_dp_link_info(
+ const struct pipe_ctx *pipe_ctx,
+ struct audio_dp_link_info *dp_link_info);
+void enable_fbc(
+ struct dc *dc,
+ struct dc_state *context);
#endif /* __DC_HWSS_DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
index f9ee55998b6b..fa62e40a9858 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
@@ -55,7 +55,7 @@
#include "dce/dmub_hw_lock_mgr.h"
#include "dc_trace.h"
#include "dce/dmub_outbox.h"
-#include "link.h"
+#include "link_service.h"
#include "dc_state_priv.h"
#define DC_LOGGER \
@@ -327,6 +327,46 @@ static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
}
}
+ DTN_INFO("\n=======HUBP FL======\n");
+ static const char * const pLabels[] = {
+ "inst", "Enabled ", "Done ", "adr_mode ", "width ", "mpc_width ",
+ "tmz", "xbar_sel_R", "xbar_sel_G", "xbar_sel_B", "adr_hi ",
+ "adr_low", "REFCYC", "Bias", "Scale", "Mode",
+ "Format", "prefetch"};
+
+ for (i = 0; i < pool->pipe_count; i++) {
+ struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
+ struct dcn_fl_regs_st *fl_regs = &s->fl_regs;
+ struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr;
+
+ if (!s->blank_en) {
+ uint32_t values[] = {
+ pool->hubps[i]->inst,
+ fl_regs->lut_enable,
+ fl_regs->lut_done,
+ fl_regs->lut_addr_mode,
+ fl_regs->lut_width,
+ fl_regs->lut_mpc_width,
+ fl_regs->lut_tmz,
+ fl_regs->lut_crossbar_sel_r,
+ fl_regs->lut_crossbar_sel_g,
+ fl_regs->lut_crossbar_sel_b,
+ fl_regs->lut_addr_hi,
+ fl_regs->lut_addr_lo,
+ fl_regs->refcyc_3dlut_group,
+ fl_regs->lut_fl_bias,
+ fl_regs->lut_fl_scale,
+ fl_regs->lut_fl_mode,
+ fl_regs->lut_fl_format,
+ dlg_regs->dst_y_prefetch};
+
+ int num_elements = 18;
+
+ for (int j = 0; j < num_elements; j++)
+ DTN_INFO("%s \t %8xh\n", pLabels[j], values[j]);
+ }
+ }
+
DTN_INFO("\n=========RQ========\n");
DTN_INFO("HUBP: drq_exp_m prq_exp_m mrq_exp_m crq_exp_m plane1_ba L:chunk_s min_chu_s meta_ch_s"
" min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h C:chunk_s min_chu_s meta_ch_s"
@@ -511,6 +551,60 @@ static void dcn10_log_color_state(struct dc *dc,
dc->caps.color.mpc.num_3dluts,
dc->caps.color.mpc.ogam_ram,
dc->caps.color.mpc.ocsc);
+ DTN_INFO("===== MPC RMCM 3DLUT =====\n");
+ static const char * const pLabels[] = {
+ "MPCC", "SIZE", "MODE", "MODE_CUR", "RD_SEL",
+ "30BIT_EN", "WR_EN_MASK", "RAM_SEL", "OUT_NORM_FACTOR", "FL_SEL",
+ "OUT_OFFSET", "OUT_SCALE", "FL_DONE", "SOFT_UNDERFLOW", "HARD_UNDERFLOW",
+ "MEM_PWR_ST", "FORCE", "DIS", "MODE"};
+
+ for (i = 0; i < pool->mpcc_count; i++) {
+ struct mpcc_state s = {0};
+
+ pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
+ if (s.opp_id != 0xf) {
+ uint32_t values[] = {
+ i,
+ s.rmcm_regs.rmcm_3dlut_size,
+ s.rmcm_regs.rmcm_3dlut_mode,
+ s.rmcm_regs.rmcm_3dlut_mode_cur,
+ s.rmcm_regs.rmcm_3dlut_read_sel,
+ s.rmcm_regs.rmcm_3dlut_30bit_en,
+ s.rmcm_regs.rmcm_3dlut_wr_en_mask,
+ s.rmcm_regs.rmcm_3dlut_ram_sel,
+ s.rmcm_regs.rmcm_3dlut_out_norm_factor,
+ s.rmcm_regs.rmcm_3dlut_fl_sel,
+ s.rmcm_regs.rmcm_3dlut_out_offset_r,
+ s.rmcm_regs.rmcm_3dlut_out_scale_r,
+ s.rmcm_regs.rmcm_3dlut_fl_done,
+ s.rmcm_regs.rmcm_3dlut_fl_soft_underflow,
+ s.rmcm_regs.rmcm_3dlut_fl_hard_underflow,
+ s.rmcm_regs.rmcm_3dlut_mem_pwr_state,
+ s.rmcm_regs.rmcm_3dlut_mem_pwr_force,
+ s.rmcm_regs.rmcm_3dlut_mem_pwr_dis,
+ s.rmcm_regs.rmcm_3dlut_mem_pwr_mode};
+
+ int num_elements = 19;
+
+ for (int j = 0; j < num_elements; j++)
+ DTN_INFO("%s \t %8xh\n", pLabels[j], values[j]);
+ }
+ }
+ DTN_INFO("\n");
+ DTN_INFO("===== MPC RMCM Shaper =====\n");
+ DTN_INFO("MPCC: CNTL LUT_MODE MODE_CUR WR_EN_MASK WR_SEL OFFSET SCALE START_B START_SEG_B END_B END_BASE_B MEM_PWR_ST FORCE DIS MODE\n");
+ for (i = 0; i < pool->mpcc_count; i++) {
+ struct mpcc_state s = {0};
+
+ pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
+ if (s.opp_id != 0xf)
+ DTN_INFO("[%2d]: %4xh %4xh %6xh %4x %4x %4x %4x %4x %4xh %4xh %6xh %4x %4x %4x %4x\n",
+ i, s.rmcm_regs.rmcm_cntl, s.rmcm_regs.rmcm_shaper_lut_mode, s.rmcm_regs.rmcm_shaper_mode_cur,
+ s.rmcm_regs.rmcm_shaper_lut_write_en_mask, s.rmcm_regs.rmcm_shaper_lut_write_sel, s.rmcm_regs.rmcm_shaper_offset_b,
+ s.rmcm_regs.rmcm_shaper_scale_b, s.rmcm_regs.rmcm_shaper_rama_exp_region_start_b, s.rmcm_regs.rmcm_shaper_rama_exp_region_start_seg_b,
+ s.rmcm_regs.rmcm_shaper_rama_exp_region_end_b, s.rmcm_regs.rmcm_shaper_rama_exp_region_end_base_b, s.rmcm_regs.rmcm_shaper_mem_pwr_state,
+ s.rmcm_regs.rmcm_shaper_mem_pwr_force, s.rmcm_regs.rmcm_shaper_mem_pwr_dis, s.rmcm_regs.rmcm_shaper_mem_pwr_mode);
+ }
}
void dcn10_log_hw_state(struct dc *dc,
@@ -2151,7 +2245,7 @@ void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock)
if (lock)
delay_cursor_until_vupdate(dc, pipe);
- if (pipe->stream && should_use_dmub_lock(pipe->stream->link)) {
+ if (pipe->stream && should_use_dmub_inbox1_lock(dc, pipe->stream->link)) {
union dmub_hw_lock_flags hw_locks = { 0 };
struct dmub_hw_lock_inst_flags inst_flags = { 0 };
@@ -2996,6 +3090,9 @@ static void dcn10_update_dchubp_dpp(
}
if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
+ if (dc->hwss.abort_cursor_offload_update)
+ dc->hwss.abort_cursor_offload_update(dc, pipe_ctx);
+
dc->hwss.set_cursor_attribute(pipe_ctx);
dc->hwss.set_cursor_position(pipe_ctx);
@@ -3253,7 +3350,7 @@ void dcn10_prepare_bandwidth(
context,
false);
- dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub,
+ dc->optimized_required = hubbub->funcs->program_watermarks(hubbub,
&context->bw_ctx.bw.dcn.watermarks,
dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
true);
@@ -3569,6 +3666,8 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
int y_plane = pipe_ctx->plane_state->dst_rect.y;
int x_pos = pos_cpy.x;
int y_pos = pos_cpy.y;
+ int clip_x = pipe_ctx->plane_state->clip_rect.x;
+ int clip_width = pipe_ctx->plane_state->clip_rect.width;
if ((pipe_ctx->top_pipe != NULL) || (pipe_ctx->bottom_pipe != NULL)) {
if ((pipe_ctx->plane_state->src_rect.width != pipe_ctx->plane_res.scl_data.viewport.width) ||
@@ -3587,7 +3686,7 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
*/
/**
- * Translate cursor from stream space to plane space.
+ * Translate cursor and clip offset from stream space to plane space.
*
* If the cursor is scaled then we need to scale the position
* to be in the approximately correct place. We can't do anything
@@ -3604,6 +3703,10 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_state->dst_rect.width;
y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.height /
pipe_ctx->plane_state->dst_rect.height;
+ clip_x = (clip_x - x_plane) * pipe_ctx->plane_state->src_rect.width /
+ pipe_ctx->plane_state->dst_rect.width;
+ clip_width = clip_width * pipe_ctx->plane_state->src_rect.width /
+ pipe_ctx->plane_state->dst_rect.width;
}
/**
@@ -3650,30 +3753,18 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
if (param.rotation == ROTATION_ANGLE_0) {
- int viewport_width =
- pipe_ctx->plane_res.scl_data.viewport.width;
- int viewport_x =
- pipe_ctx->plane_res.scl_data.viewport.x;
if (param.mirror) {
- if (pipe_split_on || odm_combine_on) {
- if (pos_cpy.x >= viewport_width + viewport_x) {
- pos_cpy.x = 2 * viewport_width
- - pos_cpy.x + 2 * viewport_x;
- } else {
- uint32_t temp_x = pos_cpy.x;
-
- pos_cpy.x = 2 * viewport_x - pos_cpy.x;
- if (temp_x >= viewport_x +
- (int)hubp->curs_attr.width || pos_cpy.x
- <= (int)hubp->curs_attr.width +
- pipe_ctx->plane_state->src_rect.x) {
- pos_cpy.x = 2 * viewport_width - temp_x;
- }
- }
- } else {
- pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
- }
+ /*
+ * The plane is split into multiple viewports.
+ * The combination of all viewports span the
+ * entirety of the clip rect.
+ *
+ * For no pipe_split, viewport_width is represents
+ * the full width of the clip_rect, so we can just
+ * mirror it.
+ */
+ pos_cpy.x = clip_width - pos_cpy.x + 2 * clip_x;
}
}
// Swap axis and mirror horizontally
@@ -3743,30 +3834,17 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
}
// Mirror horizontally and vertically
else if (param.rotation == ROTATION_ANGLE_180) {
- int viewport_width =
- pipe_ctx->plane_res.scl_data.viewport.width;
- int viewport_x =
- pipe_ctx->plane_res.scl_data.viewport.x;
-
if (!param.mirror) {
- if (pipe_split_on || odm_combine_on) {
- if (pos_cpy.x >= viewport_width + viewport_x) {
- pos_cpy.x = 2 * viewport_width
- - pos_cpy.x + 2 * viewport_x;
- } else {
- uint32_t temp_x = pos_cpy.x;
-
- pos_cpy.x = 2 * viewport_x - pos_cpy.x;
- if (temp_x >= viewport_x +
- (int)hubp->curs_attr.width || pos_cpy.x
- <= (int)hubp->curs_attr.width +
- pipe_ctx->plane_state->src_rect.x) {
- pos_cpy.x = temp_x + viewport_width;
- }
- }
- } else {
- pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
- }
+ /*
+ * The plane is split into multiple viewports.
+ * The combination of all viewports span the
+ * entirety of the clip rect.
+ *
+ * For no pipe_split, viewport_width is represents
+ * the full width of the clip_rect, so we can just
+ * mirror it.
+ */
+ pos_cpy.x = clip_width - pos_cpy.x + 2 * clip_x;
}
/**
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
index c277df12c817..c8ff8ae85a03 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
@@ -54,7 +54,7 @@
#include "dpcd_defs.h"
#include "inc/link_enc_cfg.h"
#include "link_hwss.h"
-#include "link.h"
+#include "link_service.h"
#include "dc_state_priv.h"
#define DC_LOGGER \
@@ -283,14 +283,13 @@ void dcn20_setup_gsl_group_as_lock(
}
/* at this point we want to program whether it's to enable or disable */
- if (pipe_ctx->stream_res.tg->funcs->set_gsl != NULL &&
- pipe_ctx->stream_res.tg->funcs->set_gsl_source_select != NULL) {
+ if (pipe_ctx->stream_res.tg->funcs->set_gsl != NULL) {
pipe_ctx->stream_res.tg->funcs->set_gsl(
pipe_ctx->stream_res.tg,
&gsl);
-
- pipe_ctx->stream_res.tg->funcs->set_gsl_source_select(
- pipe_ctx->stream_res.tg, group_idx, enable ? 4 : 0);
+ if (pipe_ctx->stream_res.tg->funcs->set_gsl_source_select != NULL)
+ pipe_ctx->stream_res.tg->funcs->set_gsl_source_select(
+ pipe_ctx->stream_res.tg, group_idx, enable ? 4 : 0);
} else
BREAK_TO_DEBUGGER();
}
@@ -615,6 +614,14 @@ void dcn20_dpp_pg_control(
* DOMAIN11_PGFSM_PWR_STATUS, pwr_status,
* 1, 1000);
*/
+
+ /* Force disable cursor on plane powerdown on DPP 5 using dpp_force_disable_cursor */
+ if (!power_on) {
+ struct dpp *dpp5 = hws->ctx->dc->res_pool->dpps[dpp_inst];
+ if (dpp5 && dpp5->funcs->dpp_force_disable_cursor)
+ dpp5->funcs->dpp_force_disable_cursor(dpp5);
+ }
+
break;
default:
BREAK_TO_DEBUGGER();
@@ -956,7 +963,7 @@ enum dc_status dcn20_enable_stream_timing(
return DC_ERROR_UNEXPECTED;
}
- hws->funcs.wait_for_blank_complete(pipe_ctx->stream_res.opp);
+ udelay(stream->timing.v_total * (stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz));
params.vertical_total_min = stream->adjust.v_total_min;
params.vertical_total_max = stream->adjust.v_total_max;
@@ -1450,7 +1457,7 @@ void dcn20_pipe_control_lock(
!flip_immediate)
dcn20_setup_gsl_group_as_lock(dc, pipe, false);
- if (pipe->stream && should_use_dmub_lock(pipe->stream->link)) {
+ if (pipe->stream && should_use_dmub_inbox1_lock(dc, pipe->stream->link)) {
union dmub_hw_lock_flags hw_locks = { 0 };
struct dmub_hw_lock_inst_flags inst_flags = { 0 };
@@ -1794,6 +1801,9 @@ void dcn20_update_dchubp_dpp(
if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed ||
pipe_ctx->update_flags.bits.scaler || viewport_changed == true) &&
pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
+ if (dc->hwss.abort_cursor_offload_update)
+ dc->hwss.abort_cursor_offload_update(dc, pipe_ctx);
+
dc->hwss.set_cursor_attribute(pipe_ctx);
dc->hwss.set_cursor_position(pipe_ctx);
@@ -1971,14 +1981,6 @@ static void dcn20_program_pipe(
pipe_ctx->plane_state->update_flags.bits.hdr_mult))
hws->funcs.set_hdr_multiplier(pipe_ctx);
- if (hws->funcs.populate_mcm_luts) {
- if (pipe_ctx->plane_state) {
- hws->funcs.populate_mcm_luts(dc, pipe_ctx, pipe_ctx->plane_state->mcm_luts,
- pipe_ctx->plane_state->lut_bank_a);
- pipe_ctx->plane_state->lut_bank_a = !pipe_ctx->plane_state->lut_bank_a;
- }
- }
-
if (pipe_ctx->plane_state &&
(pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
pipe_ctx->plane_state->update_flags.bits.gamma_change ||
@@ -1991,10 +1993,8 @@ static void dcn20_program_pipe(
* updating on slave planes
*/
if (pipe_ctx->update_flags.bits.enable ||
- pipe_ctx->update_flags.bits.plane_changed ||
- pipe_ctx->stream->update_flags.bits.out_tf ||
- (pipe_ctx->plane_state &&
- pipe_ctx->plane_state->update_flags.bits.output_tf_change))
+ pipe_ctx->update_flags.bits.plane_changed ||
+ pipe_ctx->stream->update_flags.bits.out_tf)
hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
/* If the pipe has been enabled or has a different opp, we
@@ -2399,10 +2399,10 @@ void dcn20_prepare_bandwidth(
}
/* program dchubbub watermarks:
- * For assigning wm_optimized_required, use |= operator since we don't want
+ * For assigning optimized_required, use |= operator since we don't want
* to clear the value if the optimize has not happened yet
*/
- dc->wm_optimized_required |= hubbub->funcs->program_watermarks(hubbub,
+ dc->optimized_required |= hubbub->funcs->program_watermarks(hubbub,
&context->bw_ctx.bw.dcn.watermarks,
dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
false);
@@ -2415,10 +2415,10 @@ void dcn20_prepare_bandwidth(
if (hubbub->funcs->program_compbuf_size) {
if (context->bw_ctx.dml.ip.min_comp_buffer_size_kbytes) {
compbuf_size_kb = context->bw_ctx.dml.ip.min_comp_buffer_size_kbytes;
- dc->wm_optimized_required |= (compbuf_size_kb != dc->current_state->bw_ctx.dml.ip.min_comp_buffer_size_kbytes);
+ dc->optimized_required |= (compbuf_size_kb != dc->current_state->bw_ctx.dml.ip.min_comp_buffer_size_kbytes);
} else {
compbuf_size_kb = context->bw_ctx.bw.dcn.compbuf_size_kb;
- dc->wm_optimized_required |= (compbuf_size_kb != dc->current_state->bw_ctx.bw.dcn.compbuf_size_kb);
+ dc->optimized_required |= (compbuf_size_kb != dc->current_state->bw_ctx.bw.dcn.compbuf_size_kb);
}
hubbub->funcs->program_compbuf_size(hubbub, compbuf_size_kb, false);
@@ -2492,7 +2492,7 @@ bool dcn20_update_bandwidth(
struct dce_hwseq *hws = dc->hwseq;
/* recalculate DML parameters */
- if (dc->res_pool->funcs->validate_bandwidth(dc, context, false) != DC_OK)
+ if (dc->res_pool->funcs->validate_bandwidth(dc, context, DC_VALIDATE_MODE_AND_PROGRAMMING) != DC_OK)
return false;
/* apply updated bandwidth parameters */
@@ -2816,6 +2816,8 @@ void dcn20_reset_back_end_for_pipe(
{
struct dc_link *link = pipe_ctx->stream->link;
const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
+ struct dccg *dccg = dc->res_pool->dccg;
+ struct dtbclk_dto_params dto_params = {0};
DC_LOGGER_INIT(dc->ctx->logger);
if (pipe_ctx->stream_res.stream_enc == NULL) {
@@ -2876,6 +2878,13 @@ void dcn20_reset_back_end_for_pipe(
&pipe_ctx->link_res, pipe_ctx->stream->signal);
link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF;
}
+ if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx) && dccg
+ && dc->ctx->dce_version >= DCN_VERSION_3_5) {
+ dto_params.otg_inst = pipe_ctx->stream_res.tg->inst;
+ dto_params.timing = &pipe_ctx->stream->timing;
+ if (dccg && dccg->funcs->set_dtbclk_dto)
+ dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
+ }
}
/*
@@ -3129,7 +3138,8 @@ void dcn20_fpga_init_hw(struct dc *dc)
res_pool->dccg->funcs->dccg_init(res_pool->dccg);
//Enable ability to power gate / don't force power on permanently
- hws->funcs.enable_power_gating_plane(hws, true);
+ if (hws->funcs.enable_power_gating_plane)
+ hws->funcs.enable_power_gating_plane(hws, true);
// Specific to FPGA dccg and registers
REG_WRITE(RBBMIF_TIMEOUT_DIS, 0xFFFFFFFF);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c
index 61efb15572ff..e2269211553c 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c
@@ -35,7 +35,7 @@
#include "hw/clk_mgr.h"
#include "dc_dmub_srv.h"
#include "abm.h"
-#include "link.h"
+#include "link_service.h"
#define DC_LOGGER_INIT(logger)
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
index 37a239219dfe..81bcadf5e57e 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
@@ -50,10 +50,11 @@
#include "dpcd_defs.h"
#include "dcn20/dcn20_hwseq.h"
#include "dcn30/dcn30_resource.h"
-#include "link.h"
+#include "link_service.h"
#include "dc_state_priv.h"
-
+#define TO_DCN_DCCG(dccg)\
+ container_of(dccg, struct dcn_dccg, base)
#define DC_LOGGER_INIT(logger)
@@ -1228,3 +1229,54 @@ void dcn30_wait_for_all_pending_updates(const struct pipe_ctx *pipe_ctx)
}
}
}
+
+void dcn30_get_underflow_debug_data(const struct dc *dc,
+ struct timing_generator *tg,
+ struct dc_underflow_debug_data *out_data)
+{
+ struct hubbub *hubbub = dc->res_pool->hubbub;
+
+ if (hubbub) {
+ if (hubbub->funcs->hubbub_read_reg_state) {
+ hubbub->funcs->hubbub_read_reg_state(hubbub, out_data->hubbub_reg_state);
+ }
+ }
+
+ for (int i = 0; i < MAX_PIPES; i++) {
+ struct hubp *hubp = dc->res_pool->hubps[i];
+ struct dpp *dpp = dc->res_pool->dpps[i];
+ struct output_pixel_processor *opp = dc->res_pool->opps[i];
+ struct display_stream_compressor *dsc = dc->res_pool->dscs[i];
+ struct mpc *mpc = dc->res_pool->mpc;
+ struct timing_generator *optc = dc->res_pool->timing_generators[i];
+ struct dccg *dccg = dc->res_pool->dccg;
+
+ if (hubp)
+ if (hubp->funcs->hubp_read_reg_state)
+ hubp->funcs->hubp_read_reg_state(hubp, out_data->hubp_reg_state[i]);
+
+ if (dpp)
+ if (dpp->funcs->dpp_read_reg_state)
+ dpp->funcs->dpp_read_reg_state(dpp, out_data->dpp_reg_state[i]);
+
+ if (opp)
+ if (opp->funcs->opp_read_reg_state)
+ opp->funcs->opp_read_reg_state(opp, out_data->opp_reg_state[i]);
+
+ if (dsc)
+ if (dsc->funcs->dsc_read_reg_state)
+ dsc->funcs->dsc_read_reg_state(dsc, out_data->dsc_reg_state[i]);
+
+ if (mpc)
+ if (mpc->funcs->mpc_read_reg_state)
+ mpc->funcs->mpc_read_reg_state(mpc, i, out_data->mpc_reg_state[i]);
+
+ if (optc)
+ if (optc->funcs->optc_read_reg_state)
+ optc->funcs->optc_read_reg_state(optc, out_data->optc_reg_state[i]);
+
+ if (dccg)
+ if (dccg->funcs->dccg_read_reg_state)
+ dccg->funcs->dccg_read_reg_state(dccg, out_data->dccg_reg_state[i]);
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h
index 4b90b781c4f2..40afbbfb5b9c 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h
@@ -29,6 +29,7 @@
#include "hw_sequencer_private.h"
struct dc;
+struct dc_underflow_debug_data;
void dcn30_init_hw(struct dc *dc);
void dcn30_program_all_writeback_pipes_in_tree(
@@ -98,4 +99,8 @@ void dcn30_prepare_bandwidth(struct dc *dc,
void dcn30_wait_for_all_pending_updates(const struct pipe_ctx *pipe_ctx);
+void dcn30_get_underflow_debug_data(const struct dc *dc,
+ struct timing_generator *tg,
+ struct dc_underflow_debug_data *out_data);
+
#endif /* __DC_HWSS_DCN30_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c
index 2ac5d54d1626..d7ff55669bac 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c
@@ -110,6 +110,7 @@ static const struct hw_sequencer_funcs dcn30_funcs = {
.update_visual_confirm_color = dcn10_update_visual_confirm_color,
.is_abm_supported = dcn21_is_abm_supported,
.wait_for_all_pending_updates = dcn30_wait_for_all_pending_updates,
+ .get_underflow_debug_data = dcn30_get_underflow_debug_data,
};
static const struct hwseq_private_funcs dcn30_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
index 5ba3999991b0..d1ecdb92b072 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
@@ -45,7 +45,7 @@
#include "link_hwss.h"
#include "dpcd_defs.h"
#include "dce/dmub_outbox.h"
-#include "link.h"
+#include "link_service.h"
#include "dcn10/dcn10_hwseq.h"
#include "dcn21/dcn21_hwseq.h"
#include "inc/link_enc_cfg.h"
@@ -562,6 +562,19 @@ static void dcn31_reset_back_end_for_pipe(
else if (pipe_ctx->stream_res.audio)
dc->hwss.disable_audio_stream(pipe_ctx);
+ /* Temporary workaround to perform DSC programming ahead of pipe reset
+ * for smartmux/SPRS
+ * TODO: Remove SmartMux/SPRS checks once movement of DSC programming is generalized
+ */
+ if (pipe_ctx->stream->timing.flags.DSC) {
+ if ((pipe_ctx->stream->signal == SIGNAL_TYPE_EDP &&
+ ((link->dc->config.smart_mux_version && link->dc->is_switch_in_progress_dest)
+ || link->is_dds || link->skip_implict_edp_power_control)) &&
+ (dc_is_dp_signal(pipe_ctx->stream->signal) ||
+ dc_is_virtual_signal(pipe_ctx->stream->signal)))
+ dc->link_srv->set_dsc_enable(pipe_ctx, false);
+ }
+
/* free acquired resources */
if (pipe_ctx->stream_res.audio) {
/*disable az_endpoint*/
@@ -697,7 +710,8 @@ bool dcn31_set_backlight_level(struct pipe_ctx *pipe_ctx,
panel_cntl->inst,
panel_cntl->pwrseq_inst);
- dmub_abm_set_backlight(dc, backlight_level_params, panel_cntl->inst);
+ if (backlight_level_params->control_type != BACKLIGHT_CONTROL_AMD_AUX)
+ dmub_abm_set_backlight(dc, backlight_level_params, panel_cntl->inst);
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c
index 556f4fe57eda..5a6a459da224 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c
@@ -112,6 +112,7 @@ static const struct hw_sequencer_funcs dcn31_funcs = {
.exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
.update_visual_confirm_color = dcn10_update_visual_confirm_color,
.setup_hpo_hw_control = dcn31_setup_hpo_hw_control,
+ .get_underflow_debug_data = dcn30_get_underflow_debug_data,
};
static const struct hwseq_private_funcs dcn31_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
index e68f21fd5f0f..4ee6ed610de0 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
@@ -46,7 +46,7 @@
#include "link_hwss.h"
#include "dpcd_defs.h"
#include "dce/dmub_outbox.h"
-#include "link.h"
+#include "link_service.h"
#include "dcn10/dcn10_hwseq.h"
#include "inc/link_enc_cfg.h"
#include "dcn30/dcn30_vpg.h"
@@ -108,6 +108,7 @@ static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0);
dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
+ dsc_cfg.dsc_padding = pipe_ctx->dsc_padding_params.dsc_hactive_padding;
dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg);
dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst);
@@ -528,3 +529,75 @@ void dcn314_disable_link_output(struct dc_link *link,
apply_symclk_on_tx_off_wa(link);
}
+
+/**
+ * dcn314_dpp_pg_control - DPP power gate control.
+ *
+ * @hws: dce_hwseq reference.
+ * @dpp_inst: DPP instance reference.
+ * @power_on: true if we want to enable power gate, false otherwise.
+ *
+ * Enable or disable power gate in the specific DPP instance.
+ * If power gating is disabled, will force disable cursor in the DPP instance.
+ */
+void dcn314_dpp_pg_control(
+ struct dce_hwseq *hws,
+ unsigned int dpp_inst,
+ bool power_on)
+{
+ uint32_t power_gate = power_on ? 0 : 1;
+ uint32_t pwr_status = power_on ? 0 : 2;
+
+
+ if (hws->ctx->dc->debug.disable_dpp_power_gate) {
+ /* Workaround for DCN314 with disabled power gating */
+ if (!power_on) {
+
+ /* Force disable cursor if power gating is disabled */
+ struct dpp *dpp = hws->ctx->dc->res_pool->dpps[dpp_inst];
+ if (dpp && dpp->funcs->dpp_force_disable_cursor)
+ dpp->funcs->dpp_force_disable_cursor(dpp);
+ }
+ return;
+ }
+ if (REG(DOMAIN1_PG_CONFIG) == 0)
+ return;
+
+ switch (dpp_inst) {
+ case 0: /* DPP0 */
+ REG_UPDATE(DOMAIN1_PG_CONFIG,
+ DOMAIN1_POWER_GATE, power_gate);
+
+ REG_WAIT(DOMAIN1_PG_STATUS,
+ DOMAIN1_PGFSM_PWR_STATUS, pwr_status,
+ 1, 1000);
+ break;
+ case 1: /* DPP1 */
+ REG_UPDATE(DOMAIN3_PG_CONFIG,
+ DOMAIN3_POWER_GATE, power_gate);
+
+ REG_WAIT(DOMAIN3_PG_STATUS,
+ DOMAIN3_PGFSM_PWR_STATUS, pwr_status,
+ 1, 1000);
+ break;
+ case 2: /* DPP2 */
+ REG_UPDATE(DOMAIN5_PG_CONFIG,
+ DOMAIN5_POWER_GATE, power_gate);
+
+ REG_WAIT(DOMAIN5_PG_STATUS,
+ DOMAIN5_PGFSM_PWR_STATUS, pwr_status,
+ 1, 1000);
+ break;
+ case 3: /* DPP3 */
+ REG_UPDATE(DOMAIN7_PG_CONFIG,
+ DOMAIN7_POWER_GATE, power_gate);
+
+ REG_WAIT(DOMAIN7_PG_STATUS,
+ DOMAIN7_PGFSM_PWR_STATUS, pwr_status,
+ 1, 1000);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.h
index 2305ad282f21..6c072d0274ea 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.h
@@ -47,4 +47,6 @@ void dcn314_dpp_root_clock_control(struct dce_hwseq *hws, unsigned int dpp_inst,
void dcn314_disable_link_output(struct dc_link *link, const struct link_resource *link_res, enum signal_type signal);
+void dcn314_dpp_pg_control(struct dce_hwseq *hws, unsigned int dpp_inst, bool power_on);
+
#endif /* __DC_HWSS_DCN314_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c
index f5112742edf9..79faab1125d4 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c
@@ -115,6 +115,7 @@ static const struct hw_sequencer_funcs dcn314_funcs = {
.update_visual_confirm_color = dcn10_update_visual_confirm_color,
.calculate_pix_rate_divider = dcn314_calculate_pix_rate_divider,
.setup_hpo_hw_control = dcn31_setup_hpo_hw_control,
+ .get_underflow_debug_data = dcn30_get_underflow_debug_data,
};
static const struct hwseq_private_funcs dcn314_private_funcs = {
@@ -141,6 +142,7 @@ static const struct hwseq_private_funcs dcn314_private_funcs = {
.enable_power_gating_plane = dcn314_enable_power_gating_plane,
.dpp_root_clock_control = dcn314_dpp_root_clock_control,
.hubp_pg_control = dcn31_hubp_pg_control,
+ .dpp_pg_control = dcn314_dpp_pg_control,
.program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree,
.update_odm = dcn314_update_odm,
.dsc_pg_control = dcn314_dsc_pg_control,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
index a0b05b9ef660..bf19ba65d09a 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
@@ -49,7 +49,7 @@
#include "dcn20/dcn20_optc.h"
#include "dce/dmub_hw_lock_mgr.h"
#include "dcn32/dcn32_resource.h"
-#include "link.h"
+#include "link_service.h"
#include "../dcn20/dcn20_hwseq.h"
#include "dc_state_priv.h"
@@ -1052,7 +1052,7 @@ void dcn32_update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
}
/* Enable DSC hw block */
- dsc_cfg.pic_width = (stream->timing.h_addressable + pipe_ctx->hblank_borrow +
+ dsc_cfg.pic_width = (stream->timing.h_addressable + pipe_ctx->dsc_padding_params.dsc_hactive_padding +
stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt;
dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom;
dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
@@ -1061,17 +1061,20 @@ void dcn32_update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0);
dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
+ dsc_cfg.dsc_padding = pipe_ctx->dsc_padding_params.dsc_hactive_padding;
if (should_use_dto_dscclk)
- dccg->funcs->set_dto_dscclk(dccg, dsc->inst);
+ dccg->funcs->set_dto_dscclk(dccg, dsc->inst, dsc_cfg.dc_dsc_cfg.num_slices_h);
dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg);
dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst);
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
struct display_stream_compressor *odm_dsc = odm_pipe->stream_res.dsc;
ASSERT(odm_dsc);
+ if (!odm_dsc)
+ continue;
if (should_use_dto_dscclk)
- dccg->funcs->set_dto_dscclk(dccg, odm_dsc->inst);
+ dccg->funcs->set_dto_dscclk(dccg, odm_dsc->inst, dsc_cfg.dc_dsc_cfg.num_slices_h);
odm_dsc->funcs->dsc_set_config(odm_dsc, &dsc_cfg, &dsc_optc_cfg);
odm_dsc->funcs->dsc_enable(odm_dsc, odm_pipe->stream_res.opp->inst);
}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
index b971356d30b1..c19ef075c882 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
@@ -121,6 +121,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
.calculate_pix_rate_divider = dcn32_calculate_pix_rate_divider,
.program_outstanding_updates = dcn32_program_outstanding_updates,
.wait_for_all_pending_updates = dcn30_wait_for_all_pending_updates,
+ .get_underflow_debug_data = dcn30_get_underflow_debug_data,
};
static const struct hwseq_private_funcs dcn32_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
index c814d957305a..7aa0f452e8f7 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
@@ -46,7 +46,7 @@
#include "link_hwss.h"
#include "dpcd_defs.h"
#include "dce/dmub_outbox.h"
-#include "link.h"
+#include "link_service.h"
#include "dcn10/dcn10_hwseq.h"
#include "inc/link_enc_cfg.h"
#include "dcn30/dcn30_vpg.h"
@@ -113,6 +113,14 @@ static void enable_memory_low_power(struct dc *dc)
}
#endif
+static void print_pg_status(struct dc *dc, const char *debug_func, const char *debug_log)
+{
+ if (dc->debug.enable_pg_cntl_debug_logs && dc->res_pool->pg_cntl) {
+ if (dc->res_pool->pg_cntl->funcs->print_pg_status)
+ dc->res_pool->pg_cntl->funcs->print_pg_status(dc->res_pool->pg_cntl, debug_func, debug_log);
+ }
+}
+
void dcn35_set_dmu_fgcg(struct dce_hwseq *hws, bool enable)
{
REG_UPDATE_3(DMU_CLK_CNTL,
@@ -137,6 +145,8 @@ void dcn35_init_hw(struct dc *dc)
uint32_t user_level = MAX_BACKLIGHT_LEVEL;
int i;
+ print_pg_status(dc, __func__, ": start");
+
if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
@@ -200,10 +210,7 @@ void dcn35_init_hw(struct dc *dc)
/* we want to turn off all dp displays before doing detection */
dc->link_srv->blank_all_dp_displays(dc);
-/*
- if (hws->funcs.enable_power_gating_plane)
- hws->funcs.enable_power_gating_plane(dc->hwseq, true);
-*/
+
if (res_pool->hubbub && res_pool->hubbub->funcs->dchubbub_init)
res_pool->hubbub->funcs->dchubbub_init(dc->res_pool->hubbub);
/* If taking control over from VBIOS, we may want to optimize our first
@@ -236,6 +243,8 @@ void dcn35_init_hw(struct dc *dc)
}
hws->funcs.init_pipes(dc, dc->current_state);
+ print_pg_status(dc, __func__, ": after init_pipes");
+
if (dc->res_pool->hubbub->funcs->allow_self_refresh_control &&
!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter)
dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
@@ -312,6 +321,7 @@ void dcn35_init_hw(struct dc *dc)
if (dc->res_pool->pg_cntl->funcs->init_pg_status)
dc->res_pool->pg_cntl->funcs->init_pg_status(dc->res_pool->pg_cntl);
}
+ print_pg_status(dc, __func__, ": after init_pg_status");
}
static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
@@ -354,6 +364,7 @@ static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0);
dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
+ dsc_cfg.dsc_padding = pipe_ctx->dsc_padding_params.dsc_hactive_padding;
dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg);
dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst);
@@ -500,97 +511,6 @@ void dcn35_physymclk_root_clock_control(struct dce_hwseq *hws, unsigned int phy_
}
}
-void dcn35_dsc_pg_control(
- struct dce_hwseq *hws,
- unsigned int dsc_inst,
- bool power_on)
-{
- uint32_t power_gate = power_on ? 0 : 1;
- uint32_t pwr_status = power_on ? 0 : 2;
- uint32_t org_ip_request_cntl = 0;
-
- if (hws->ctx->dc->debug.disable_dsc_power_gate)
- return;
- if (hws->ctx->dc->debug.ignore_pg)
- return;
- REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
- if (org_ip_request_cntl == 0)
- REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1);
-
- switch (dsc_inst) {
- case 0: /* DSC0 */
- REG_UPDATE(DOMAIN16_PG_CONFIG,
- DOMAIN_POWER_GATE, power_gate);
-
- REG_WAIT(DOMAIN16_PG_STATUS,
- DOMAIN_PGFSM_PWR_STATUS, pwr_status,
- 1, 1000);
- break;
- case 1: /* DSC1 */
- REG_UPDATE(DOMAIN17_PG_CONFIG,
- DOMAIN_POWER_GATE, power_gate);
-
- REG_WAIT(DOMAIN17_PG_STATUS,
- DOMAIN_PGFSM_PWR_STATUS, pwr_status,
- 1, 1000);
- break;
- case 2: /* DSC2 */
- REG_UPDATE(DOMAIN18_PG_CONFIG,
- DOMAIN_POWER_GATE, power_gate);
-
- REG_WAIT(DOMAIN18_PG_STATUS,
- DOMAIN_PGFSM_PWR_STATUS, pwr_status,
- 1, 1000);
- break;
- case 3: /* DSC3 */
- REG_UPDATE(DOMAIN19_PG_CONFIG,
- DOMAIN_POWER_GATE, power_gate);
-
- REG_WAIT(DOMAIN19_PG_STATUS,
- DOMAIN_PGFSM_PWR_STATUS, pwr_status,
- 1, 1000);
- break;
- default:
- BREAK_TO_DEBUGGER();
- break;
- }
-
- if (org_ip_request_cntl == 0)
- REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0);
-}
-
-void dcn35_enable_power_gating_plane(struct dce_hwseq *hws, bool enable)
-{
- bool force_on = true; /* disable power gating */
- uint32_t org_ip_request_cntl = 0;
-
- if (hws->ctx->dc->debug.disable_hubp_power_gate)
- return;
- if (hws->ctx->dc->debug.ignore_pg)
- return;
- REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
- if (org_ip_request_cntl == 0)
- REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1);
- /* DCHUBP0/1/2/3/4/5 */
- REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
- REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
- /* DPP0/1/2/3/4/5 */
- REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
- REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
-
- force_on = true; /* disable power gating */
- if (enable && !hws->ctx->dc->debug.disable_dsc_power_gate)
- force_on = false;
-
- /* DCS0/1/2/3/4 */
- REG_UPDATE(DOMAIN16_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
- REG_UPDATE(DOMAIN17_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
- REG_UPDATE(DOMAIN18_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
- REG_UPDATE(DOMAIN19_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
-
-
-}
-
/* In headless boot cases, DIG may be turned
* on which causes HW/SW discrepancies.
* To avoid this, power down hardware on boot
@@ -897,8 +817,6 @@ void dcn35_enable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx,
struct dc_state *context)
{
struct dpp *dpp = pipe_ctx->plane_res.dpp;
- struct dccg *dccg = dc->res_pool->dccg;
-
/* enable DCFCLK current DCHUB */
pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true);
@@ -906,7 +824,6 @@ void dcn35_enable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx,
/* initialize HUBP on power up */
pipe_ctx->plane_res.hubp->funcs->hubp_init(pipe_ctx->plane_res.hubp);
/*make sure DPPCLK is on*/
- dccg->funcs->dccg_root_gate_disable_control(dccg, dpp->inst, true);
dpp->funcs->dpp_dppclk_control(dpp, false, true);
/* make sure OPP_PIPE_CLOCK_EN = 1 */
pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
@@ -940,7 +857,6 @@ void dcn35_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
struct hubp *hubp = pipe_ctx->plane_res.hubp;
struct dpp *dpp = pipe_ctx->plane_res.dpp;
- struct dccg *dccg = dc->res_pool->dccg;
dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
@@ -959,7 +875,6 @@ void dcn35_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
hubp->funcs->hubp_clk_cntl(hubp, false);
dpp->funcs->dpp_dppclk_control(dpp, false, false);
- dccg->funcs->dccg_root_gate_disable_control(dccg, dpp->inst, false);
hubp->power_gated = true;
@@ -1047,6 +962,15 @@ void dcn35_calc_blocks_to_gate(struct dc *dc, struct dc_state *context,
if (dc->caps.sequential_ono) {
update_state->pg_pipe_res_update[PG_HUBP][pipe_ctx->stream_res.dsc->inst] = false;
update_state->pg_pipe_res_update[PG_DPP][pipe_ctx->stream_res.dsc->inst] = false;
+
+ /* All HUBP/DPP instances must be powered if the DSC inst != HUBP inst */
+ if (!pipe_ctx->top_pipe && pipe_ctx->plane_res.hubp &&
+ pipe_ctx->plane_res.hubp->inst != pipe_ctx->stream_res.dsc->inst) {
+ for (j = 0; j < dc->res_pool->pipe_count; ++j) {
+ update_state->pg_pipe_res_update[PG_HUBP][j] = false;
+ update_state->pg_pipe_res_update[PG_DPP][j] = false;
+ }
+ }
}
}
@@ -1193,6 +1117,25 @@ void dcn35_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context,
update_state->pg_pipe_res_update[PG_HDMISTREAM][0] = true;
if (dc->caps.sequential_ono) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (new_pipe->stream_res.dsc && !new_pipe->top_pipe &&
+ update_state->pg_pipe_res_update[PG_DSC][new_pipe->stream_res.dsc->inst]) {
+ update_state->pg_pipe_res_update[PG_HUBP][new_pipe->stream_res.dsc->inst] = true;
+ update_state->pg_pipe_res_update[PG_DPP][new_pipe->stream_res.dsc->inst] = true;
+
+ /* All HUBP/DPP instances must be powered if the DSC inst != HUBP inst */
+ if (new_pipe->plane_res.hubp &&
+ new_pipe->plane_res.hubp->inst != new_pipe->stream_res.dsc->inst) {
+ for (j = 0; j < dc->res_pool->pipe_count; ++j) {
+ update_state->pg_pipe_res_update[PG_HUBP][j] = true;
+ update_state->pg_pipe_res_update[PG_DPP][j] = true;
+ }
+ }
+ }
+ }
+
for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) {
if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
update_state->pg_pipe_res_update[PG_DPP][i]) {
@@ -1425,6 +1368,8 @@ void dcn35_prepare_bandwidth(
}
dcn20_prepare_bandwidth(dc, context);
+
+ print_pg_status(dc, __func__, ": after rcg and power up");
}
void dcn35_optimize_bandwidth(
@@ -1433,6 +1378,8 @@ void dcn35_optimize_bandwidth(
{
struct pg_block_update pg_update_state;
+ print_pg_status(dc, __func__, ": before rcg and power up");
+
dcn20_optimize_bandwidth(dc, context);
if (dc->hwss.calc_blocks_to_gate) {
@@ -1444,6 +1391,8 @@ void dcn35_optimize_bandwidth(
if (dc->hwss.root_clock_control)
dc->hwss.root_clock_control(dc, &pg_update_state, false);
}
+
+ print_pg_status(dc, __func__, ": after rcg and power up");
}
void dcn35_set_drr(struct pipe_ctx **pipe_ctx,
@@ -1639,3 +1588,141 @@ void dcn35_hardware_release(struct dc *dc)
if (dc->hwss.hw_block_power_up)
dc->hwss.hw_block_power_up(dc, &pg_update_state);
}
+
+void dcn35_abort_cursor_offload_update(struct dc *dc, const struct pipe_ctx *pipe)
+{
+ if (!dc_dmub_srv_is_cursor_offload_enabled(dc))
+ return;
+
+ /*
+ * Insert a blank update to modify the write index and set pipe_mask to 0.
+ *
+ * While the DMU is interlocked with driver full pipe programming via
+ * the DMU HW lock, if the cursor update begins to execute after a full
+ * pipe programming occurs there are two possible issues:
+ *
+ * 1. Outdated cursor information is programmed, replacing the current update
+ * 2. The cursor update in firmware holds the cursor lock, preventing
+ * the current update from being latched atomically in the same frame
+ * as the rest of the update.
+ *
+ * This blank update, treated as a no-op, will allow the firmware to skip
+ * the programming.
+ */
+
+ if (dc->hwss.begin_cursor_offload_update)
+ dc->hwss.begin_cursor_offload_update(dc, pipe);
+
+ if (dc->hwss.commit_cursor_offload_update)
+ dc->hwss.commit_cursor_offload_update(dc, pipe);
+}
+
+void dcn35_begin_cursor_offload_update(struct dc *dc, const struct pipe_ctx *pipe)
+{
+ volatile struct dmub_cursor_offload_v1 *cs = dc->ctx->dmub_srv->dmub->cursor_offload_v1;
+ const struct pipe_ctx *top_pipe = resource_get_otg_master(pipe);
+ uint32_t stream_idx, write_idx, payload_idx;
+
+ if (!top_pipe)
+ return;
+
+ stream_idx = top_pipe->pipe_idx;
+ write_idx = cs->offload_streams[stream_idx].write_idx + 1; /* new payload (+1) */
+ payload_idx = write_idx % ARRAY_SIZE(cs->offload_streams[stream_idx].payloads);
+
+ cs->offload_streams[stream_idx].payloads[payload_idx].write_idx_start = write_idx;
+
+ if (pipe->plane_res.hubp)
+ pipe->plane_res.hubp->cursor_offload = true;
+
+ if (pipe->plane_res.dpp)
+ pipe->plane_res.dpp->cursor_offload = true;
+}
+
+void dcn35_commit_cursor_offload_update(struct dc *dc, const struct pipe_ctx *pipe)
+{
+ volatile struct dmub_cursor_offload_v1 *cs = dc->ctx->dmub_srv->dmub->cursor_offload_v1;
+ volatile struct dmub_shared_state_cursor_offload_stream_v1 *shared_stream;
+ const struct pipe_ctx *top_pipe = resource_get_otg_master(pipe);
+ uint32_t stream_idx, write_idx, payload_idx;
+
+ if (pipe->plane_res.hubp)
+ pipe->plane_res.hubp->cursor_offload = false;
+
+ if (pipe->plane_res.dpp)
+ pipe->plane_res.dpp->cursor_offload = false;
+
+ if (!top_pipe)
+ return;
+
+ stream_idx = top_pipe->pipe_idx;
+ write_idx = cs->offload_streams[stream_idx].write_idx + 1; /* new payload (+1) */
+ payload_idx = write_idx % ARRAY_SIZE(cs->offload_streams[stream_idx].payloads);
+
+ shared_stream = &dc->ctx->dmub_srv->dmub->shared_state[DMUB_SHARED_STATE_FEATURE__CURSOR_OFFLOAD_V1]
+ .data.cursor_offload_v1.offload_streams[stream_idx];
+
+ shared_stream->last_write_idx = write_idx;
+
+ cs->offload_streams[stream_idx].write_idx = write_idx;
+ cs->offload_streams[stream_idx].payloads[payload_idx].write_idx_finish = write_idx;
+}
+
+void dcn35_update_cursor_offload_pipe(struct dc *dc, const struct pipe_ctx *pipe)
+{
+ volatile struct dmub_cursor_offload_v1 *cs = dc->ctx->dmub_srv->dmub->cursor_offload_v1;
+ const struct pipe_ctx *top_pipe = resource_get_otg_master(pipe);
+ const struct hubp *hubp = pipe->plane_res.hubp;
+ const struct dpp *dpp = pipe->plane_res.dpp;
+ volatile struct dmub_cursor_offload_pipe_data_dcn30_v1 *p;
+ uint32_t stream_idx, write_idx, payload_idx;
+
+ if (!top_pipe || !hubp || !dpp)
+ return;
+
+ stream_idx = top_pipe->pipe_idx;
+ write_idx = cs->offload_streams[stream_idx].write_idx + 1; /* new payload (+1) */
+ payload_idx = write_idx % ARRAY_SIZE(cs->offload_streams[stream_idx].payloads);
+
+ p = &cs->offload_streams[stream_idx].payloads[payload_idx].pipe_data[pipe->pipe_idx].dcn30;
+
+ p->CURSOR0_0_CURSOR_SURFACE_ADDRESS = hubp->att.SURFACE_ADDR;
+ p->CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH = hubp->att.SURFACE_ADDR_HIGH;
+ p->CURSOR0_0_CURSOR_SIZE__CURSOR_WIDTH = hubp->att.size.bits.width;
+ p->CURSOR0_0_CURSOR_SIZE__CURSOR_HEIGHT = hubp->att.size.bits.height;
+ p->CURSOR0_0_CURSOR_POSITION__CURSOR_X_POSITION = hubp->pos.position.bits.x_pos;
+ p->CURSOR0_0_CURSOR_POSITION__CURSOR_Y_POSITION = hubp->pos.position.bits.y_pos;
+ p->CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X = hubp->pos.hot_spot.bits.x_hot;
+ p->CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y = hubp->pos.hot_spot.bits.y_hot;
+ p->CURSOR0_0_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET = hubp->pos.dst_offset.bits.dst_x_offset;
+ p->CURSOR0_0_CURSOR_CONTROL__CURSOR_ENABLE = hubp->pos.cur_ctl.bits.cur_enable;
+ p->CURSOR0_0_CURSOR_CONTROL__CURSOR_MODE = hubp->att.cur_ctl.bits.mode;
+ p->CURSOR0_0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY = hubp->pos.cur_ctl.bits.cur_2x_magnify;
+ p->CURSOR0_0_CURSOR_CONTROL__CURSOR_PITCH = hubp->att.cur_ctl.bits.pitch;
+ p->CURSOR0_0_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK = hubp->att.cur_ctl.bits.line_per_chunk;
+
+ p->CNVC_CUR0_CURSOR0_CONTROL__CUR0_ENABLE = dpp->att.cur0_ctl.bits.cur0_enable;
+ p->CNVC_CUR0_CURSOR0_CONTROL__CUR0_MODE = dpp->att.cur0_ctl.bits.mode;
+ p->CNVC_CUR0_CURSOR0_CONTROL__CUR0_EXPANSION_MODE = dpp->att.cur0_ctl.bits.expansion_mode;
+ p->CNVC_CUR0_CURSOR0_CONTROL__CUR0_ROM_EN = dpp->att.cur0_ctl.bits.cur0_rom_en;
+ p->CNVC_CUR0_CURSOR0_COLOR0__CUR0_COLOR0 = 0x000000;
+ p->CNVC_CUR0_CURSOR0_COLOR1__CUR0_COLOR1 = 0xFFFFFF;
+ p->CNVC_CUR0_CURSOR0_FP_SCALE_BIAS__CUR0_FP_BIAS = dpp->att.fp_scale_bias.bits.fp_bias;
+ p->CNVC_CUR0_CURSOR0_FP_SCALE_BIAS__CUR0_FP_SCALE = dpp->att.fp_scale_bias.bits.fp_scale;
+
+ p->HUBPREQ0_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET = hubp->att.settings.bits.dst_y_offset;
+ p->HUBPREQ0_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST = hubp->att.settings.bits.chunk_hdl_adjust;
+
+ cs->offload_streams[stream_idx].payloads[payload_idx].pipe_mask |= (1u << pipe->pipe_idx);
+}
+
+void dcn35_notify_cursor_offload_drr_update(struct dc *dc, struct dc_state *context,
+ const struct dc_stream_state *stream)
+{
+ dc_dmub_srv_control_cursor_offload(dc, context, stream, true);
+}
+
+void dcn35_program_cursor_offload_now(struct dc *dc, const struct pipe_ctx *pipe)
+{
+ dc_dmub_srv_program_cursor_now(dc, pipe);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h
index 0b1d6f608edd..1ff41dba556c 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h
@@ -101,4 +101,12 @@ bool dcn35_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx);
void dcn35_hardware_release(struct dc *dc);
+void dcn35_abort_cursor_offload_update(struct dc *dc, const struct pipe_ctx *pipe);
+void dcn35_begin_cursor_offload_update(struct dc *dc, const struct pipe_ctx *pipe);
+void dcn35_commit_cursor_offload_update(struct dc *dc, const struct pipe_ctx *pipe);
+void dcn35_update_cursor_offload_pipe(struct dc *dc, const struct pipe_ctx *pipe);
+void dcn35_notify_cursor_offload_drr_update(struct dc *dc, struct dc_state *context,
+ const struct dc_stream_state *stream);
+void dcn35_program_cursor_offload_now(struct dc *dc, const struct pipe_ctx *pipe);
+
#endif /* __DC_HWSS_DCN35_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
index a3ccf805bd16..5a66c9db2670 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
@@ -86,6 +86,12 @@ static const struct hw_sequencer_funcs dcn35_funcs = {
.set_cursor_position = dcn10_set_cursor_position,
.set_cursor_attribute = dcn10_set_cursor_attribute,
.set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level,
+ .abort_cursor_offload_update = dcn35_abort_cursor_offload_update,
+ .begin_cursor_offload_update = dcn35_begin_cursor_offload_update,
+ .commit_cursor_offload_update = dcn35_commit_cursor_offload_update,
+ .update_cursor_offload_pipe = dcn35_update_cursor_offload_pipe,
+ .notify_cursor_offload_drr_update = dcn35_notify_cursor_offload_drr_update,
+ .program_cursor_offload_now = dcn35_program_cursor_offload_now,
.setup_periodic_interrupt = dcn10_setup_periodic_interrupt,
.set_clock = dcn10_set_clock,
.get_clock = dcn10_get_clock,
@@ -115,7 +121,6 @@ static const struct hw_sequencer_funcs dcn35_funcs = {
.exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
.update_visual_confirm_color = dcn10_update_visual_confirm_color,
.apply_idle_power_optimizations = dcn35_apply_idle_power_optimizations,
- .update_dsc_pg = dcn32_update_dsc_pg,
.calc_blocks_to_gate = dcn35_calc_blocks_to_gate,
.calc_blocks_to_ungate = dcn35_calc_blocks_to_ungate,
.hw_block_power_up = dcn35_hw_block_power_up,
@@ -128,6 +133,7 @@ static const struct hw_sequencer_funcs dcn35_funcs = {
.enable_plane = dcn20_enable_plane,
.update_dchubp_dpp = dcn20_update_dchubp_dpp,
.post_unlock_reset_opp = dcn20_post_unlock_reset_opp,
+ .get_underflow_debug_data = dcn30_get_underflow_debug_data,
};
static const struct hwseq_private_funcs dcn35_private_funcs = {
@@ -150,7 +156,6 @@ static const struct hwseq_private_funcs dcn35_private_funcs = {
.plane_atomic_disable = dcn35_plane_atomic_disable,
//.plane_atomic_disable = dcn20_plane_atomic_disable,/*todo*/
//.hubp_pg_control = dcn35_hubp_pg_control,
- .enable_power_gating_plane = dcn35_enable_power_gating_plane,
.dpp_root_clock_control = dcn35_dpp_root_clock_control,
.dpstream_root_clock_control = dcn35_dpstream_root_clock_control,
.physymclk_root_clock_control = dcn35_physymclk_root_clock_control,
@@ -165,7 +170,6 @@ static const struct hwseq_private_funcs dcn35_private_funcs = {
.calculate_dccg_k1_k2_values = dcn32_calculate_dccg_k1_k2_values,
.resync_fifo_dccg_dio = dcn314_resync_fifo_dccg_dio,
.is_dp_dig_pixel_rate_div_policy = dcn35_is_dp_dig_pixel_rate_div_policy,
- .dsc_pg_control = dcn35_dsc_pg_control,
.dsc_pg_status = dcn32_dsc_pg_status,
.enable_plane = dcn35_enable_plane,
.wait_for_pipe_update_if_needed = dcn10_wait_for_pipe_update_if_needed,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
index 58f2be2a326b..09e60158f0b5 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
@@ -114,7 +114,6 @@ static const struct hw_sequencer_funcs dcn351_funcs = {
.exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
.update_visual_confirm_color = dcn10_update_visual_confirm_color,
.apply_idle_power_optimizations = dcn35_apply_idle_power_optimizations,
- .update_dsc_pg = dcn32_update_dsc_pg,
.calc_blocks_to_gate = dcn351_calc_blocks_to_gate,
.calc_blocks_to_ungate = dcn351_calc_blocks_to_ungate,
.hw_block_power_up = dcn351_hw_block_power_up,
@@ -123,6 +122,7 @@ static const struct hw_sequencer_funcs dcn351_funcs = {
.set_long_vtotal = dcn35_set_long_vblank,
.calculate_pix_rate_divider = dcn32_calculate_pix_rate_divider,
.setup_hpo_hw_control = dcn35_setup_hpo_hw_control,
+ .get_underflow_debug_data = dcn30_get_underflow_debug_data,
};
static const struct hwseq_private_funcs dcn351_private_funcs = {
@@ -145,7 +145,6 @@ static const struct hwseq_private_funcs dcn351_private_funcs = {
.plane_atomic_disable = dcn35_plane_atomic_disable,
//.plane_atomic_disable = dcn20_plane_atomic_disable,/*todo*/
//.hubp_pg_control = dcn35_hubp_pg_control,
- .enable_power_gating_plane = dcn35_enable_power_gating_plane,
.dpp_root_clock_control = dcn35_dpp_root_clock_control,
.dpstream_root_clock_control = dcn35_dpstream_root_clock_control,
.physymclk_root_clock_control = dcn35_physymclk_root_clock_control,
@@ -159,7 +158,6 @@ static const struct hwseq_private_funcs dcn351_private_funcs = {
.setup_hpo_hw_control = dcn35_setup_hpo_hw_control,
.calculate_dccg_k1_k2_values = dcn32_calculate_dccg_k1_k2_values,
.is_dp_dig_pixel_rate_div_policy = dcn35_is_dp_dig_pixel_rate_div_policy,
- .dsc_pg_control = dcn35_dsc_pg_control,
.dsc_pg_status = dcn32_dsc_pg_status,
.enable_plane = dcn35_enable_plane,
.wait_for_pipe_update_if_needed = dcn10_wait_for_pipe_update_if_needed,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
index c4177a9a662f..2fbc22afb89c 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
@@ -2,6 +2,8 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
+
+#include "os_types.h"
#include "dm_services.h"
#include "basics/dc_common.h"
#include "dm_helpers.h"
@@ -23,10 +25,12 @@
#include "dpcd_defs.h"
#include "clk_mgr.h"
#include "dsc.h"
-#include "link.h"
+#include "link_service.h"
+#include "custom_float.h"
#include "dce/dmub_hw_lock_mgr.h"
#include "dcn10/dcn10_cm_common.h"
+#include "dcn10/dcn10_hubbub.h"
#include "dcn20/dcn20_optc.h"
#include "dcn30/dcn30_cm_common.h"
#include "dcn32/dcn32_hwseq.h"
@@ -34,6 +38,7 @@
#include "dcn401/dcn401_resource.h"
#include "dc_state_priv.h"
#include "link_enc_cfg.h"
+#include "../hw_sequencer.h"
#define DC_LOGGER_INIT(logger)
@@ -49,7 +54,7 @@
#define FN(reg_name, field_name) \
hws->shifts->field_name, hws->masks->field_name
-static void dcn401_initialize_min_clocks(struct dc *dc)
+void dcn401_initialize_min_clocks(struct dc *dc)
{
struct dc_clocks *clocks = &dc->current_state->bw_ctx.bw.dcn.clk;
@@ -143,13 +148,8 @@ void dcn401_init_hw(struct dc *dc)
dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
// mark dcmode limits present if any clock has distinct AC and DC values from SMU
- dc->caps.dcmode_power_limits_present =
- (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dcfclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.dcfclk_mhz) ||
- (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dispclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.dispclk_mhz) ||
- (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dtbclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.dtbclk_mhz) ||
- (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_fclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.fclk_mhz) ||
- (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_memclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.memclk_mhz) ||
- (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_socclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.socclk_mhz);
+ dc->caps.dcmode_power_limits_present = dc->clk_mgr->funcs->is_dc_mode_present &&
+ dc->clk_mgr->funcs->is_dc_mode_present(dc->clk_mgr);
}
// Initialize the dccg
@@ -203,6 +203,9 @@ void dcn401_init_hw(struct dc *dc)
*/
struct dc_link *link = dc->links[i];
+ if (link->ep_type != DISPLAY_ENDPOINT_PHY)
+ continue;
+
link->link_enc->funcs->hw_init(link->link_enc);
/* Check for enabled DIG to identify enabled display */
@@ -396,249 +399,6 @@ static void dcn401_get_mcm_lut_xable_from_pipe_ctx(struct dc *dc, struct pipe_ct
}
}
-static void dcn401_set_mcm_location_post_blend(struct dc *dc, struct pipe_ctx *pipe_ctx, bool bPostBlend)
-{
- struct mpc *mpc = dc->res_pool->mpc;
- int mpcc_id = pipe_ctx->plane_res.hubp->inst;
-
- if (!pipe_ctx->plane_state)
- return;
-
- mpc->funcs->set_movable_cm_location(mpc, MPCC_MOVABLE_CM_LOCATION_BEFORE, mpcc_id);
- pipe_ctx->plane_state->mcm_location = (bPostBlend) ?
- MPCC_MOVABLE_CM_LOCATION_AFTER :
- MPCC_MOVABLE_CM_LOCATION_BEFORE;
-}
-
-static void dc_get_lut_mode(
- enum dc_cm2_gpu_mem_layout layout,
- enum hubp_3dlut_fl_mode *mode,
- enum hubp_3dlut_fl_addressing_mode *addr_mode)
-{
- switch (layout) {
- case DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_RGB:
- *mode = hubp_3dlut_fl_mode_native_1;
- *addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear;
- break;
- case DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_BGR:
- *mode = hubp_3dlut_fl_mode_native_2;
- *addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear;
- break;
- case DC_CM2_GPU_MEM_LAYOUT_1D_PACKED_LINEAR:
- *mode = hubp_3dlut_fl_mode_transform;
- *addr_mode = hubp_3dlut_fl_addressing_mode_simple_linear;
- break;
- default:
- *mode = hubp_3dlut_fl_mode_disable;
- *addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear;
- break;
- }
-}
-
-static void dc_get_lut_format(
- enum dc_cm2_gpu_mem_format dc_format,
- enum hubp_3dlut_fl_format *format)
-{
- switch (dc_format) {
- case DC_CM2_GPU_MEM_FORMAT_16161616_UNORM_12MSB:
- *format = hubp_3dlut_fl_format_unorm_12msb_bitslice;
- break;
- case DC_CM2_GPU_MEM_FORMAT_16161616_UNORM_12LSB:
- *format = hubp_3dlut_fl_format_unorm_12lsb_bitslice;
- break;
- case DC_CM2_GPU_MEM_FORMAT_16161616_FLOAT_FP1_5_10:
- *format = hubp_3dlut_fl_format_float_fp1_5_10;
- break;
- }
-}
-
-static void dc_get_lut_xbar(
- enum dc_cm2_gpu_mem_pixel_component_order order,
- enum hubp_3dlut_fl_crossbar_bit_slice *cr_r,
- enum hubp_3dlut_fl_crossbar_bit_slice *y_g,
- enum hubp_3dlut_fl_crossbar_bit_slice *cb_b)
-{
- switch (order) {
- case DC_CM2_GPU_MEM_PIXEL_COMPONENT_ORDER_RGBA:
- *cr_r = hubp_3dlut_fl_crossbar_bit_slice_32_47;
- *y_g = hubp_3dlut_fl_crossbar_bit_slice_16_31;
- *cb_b = hubp_3dlut_fl_crossbar_bit_slice_0_15;
- break;
- case DC_CM2_GPU_MEM_PIXEL_COMPONENT_ORDER_BGRA:
- *cr_r = hubp_3dlut_fl_crossbar_bit_slice_0_15;
- *y_g = hubp_3dlut_fl_crossbar_bit_slice_16_31;
- *cb_b = hubp_3dlut_fl_crossbar_bit_slice_32_47;
- break;
- }
-}
-
-static void dc_get_lut_width(
- enum dc_cm2_gpu_mem_size size,
- enum hubp_3dlut_fl_width *width)
-{
- switch (size) {
- case DC_CM2_GPU_MEM_SIZE_333333:
- *width = hubp_3dlut_fl_width_33;
- break;
- case DC_CM2_GPU_MEM_SIZE_171717:
- *width = hubp_3dlut_fl_width_17;
- break;
- case DC_CM2_GPU_MEM_SIZE_TRANSFORMED:
- *width = hubp_3dlut_fl_width_transformed;
- break;
- }
-}
-static bool dc_is_rmcm_3dlut_supported(struct hubp *hubp, struct mpc *mpc)
-{
- if (mpc->funcs->rmcm.update_3dlut_fast_load_select &&
- mpc->funcs->rmcm.program_lut_read_write_control &&
- hubp->funcs->hubp_program_3dlut_fl_addr &&
- mpc->funcs->rmcm.program_bit_depth &&
- hubp->funcs->hubp_program_3dlut_fl_mode &&
- hubp->funcs->hubp_program_3dlut_fl_addressing_mode &&
- hubp->funcs->hubp_program_3dlut_fl_format &&
- hubp->funcs->hubp_update_3dlut_fl_bias_scale &&
- mpc->funcs->rmcm.program_bias_scale &&
- hubp->funcs->hubp_program_3dlut_fl_crossbar &&
- hubp->funcs->hubp_program_3dlut_fl_width &&
- mpc->funcs->rmcm.update_3dlut_fast_load_select &&
- mpc->funcs->rmcm.populate_lut &&
- mpc->funcs->rmcm.program_lut_mode &&
- hubp->funcs->hubp_enable_3dlut_fl &&
- mpc->funcs->rmcm.enable_3dlut_fl)
- return true;
-
- return false;
-}
-
-bool dcn401_program_rmcm_luts(
- struct hubp *hubp,
- struct pipe_ctx *pipe_ctx,
- enum dc_cm2_transfer_func_source lut3d_src,
- struct dc_cm2_func_luts *mcm_luts,
- struct mpc *mpc,
- bool lut_bank_a,
- int mpcc_id)
-{
- struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
- union mcm_lut_params m_lut_params;
- enum MCM_LUT_XABLE shaper_xable, lut3d_xable = MCM_LUT_DISABLE, lut1d_xable;
- enum hubp_3dlut_fl_mode mode;
- enum hubp_3dlut_fl_addressing_mode addr_mode;
- enum hubp_3dlut_fl_format format = 0;
- enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_y_g = 0;
- enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cb_b = 0;
- enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cr_r = 0;
- enum hubp_3dlut_fl_width width = 0;
- struct dc *dc = hubp->ctx->dc;
-
- bool bypass_rmcm_3dlut = false;
- bool bypass_rmcm_shaper = false;
-
- dcn401_get_mcm_lut_xable_from_pipe_ctx(dc, pipe_ctx, &shaper_xable, &lut3d_xable, &lut1d_xable);
-
- /* 3DLUT */
- switch (lut3d_src) {
- case DC_CM2_TRANSFER_FUNC_SOURCE_SYSMEM:
- memset(&m_lut_params, 0, sizeof(m_lut_params));
- // Don't know what to do in this case.
- //case DC_CM2_TRANSFER_FUNC_SOURCE_SYSMEM:
- break;
- case DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM:
- dc_get_lut_width(mcm_luts->lut3d_data.gpu_mem_params.size, &width);
- if (!dc_is_rmcm_3dlut_supported(hubp, mpc) ||
- !mpc->funcs->rmcm.is_config_supported(width))
- return false;
-
- //0. disable fl on mpc
- mpc->funcs->update_3dlut_fast_load_select(mpc, mpcc_id, 0xF);
-
- //1. power down the block
- mpc->funcs->rmcm.power_on_shaper_3dlut(mpc, mpcc_id, false);
-
- //2. program RMCM
- //2a. 3dlut reg programming
- mpc->funcs->rmcm.program_lut_read_write_control(mpc, MCM_LUT_3DLUT, lut_bank_a,
- (!bypass_rmcm_3dlut) && lut3d_xable != MCM_LUT_DISABLE, mpcc_id);
-
- hubp->funcs->hubp_program_3dlut_fl_addr(hubp,
- mcm_luts->lut3d_data.gpu_mem_params.addr);
-
- mpc->funcs->rmcm.program_bit_depth(mpc,
- mcm_luts->lut3d_data.gpu_mem_params.bit_depth, mpcc_id);
-
- // setting native or transformed mode,
- dc_get_lut_mode(mcm_luts->lut3d_data.gpu_mem_params.layout, &mode, &addr_mode);
-
- //these program the mcm 3dlut
- hubp->funcs->hubp_program_3dlut_fl_mode(hubp, mode);
-
- hubp->funcs->hubp_program_3dlut_fl_addressing_mode(hubp, addr_mode);
-
- //seems to be only for the MCM
- dc_get_lut_format(mcm_luts->lut3d_data.gpu_mem_params.format_params.format, &format);
- hubp->funcs->hubp_program_3dlut_fl_format(hubp, format);
-
- mpc->funcs->rmcm.program_bias_scale(mpc,
- mcm_luts->lut3d_data.gpu_mem_params.format_params.float_params.bias,
- mcm_luts->lut3d_data.gpu_mem_params.format_params.float_params.scale,
- mpcc_id);
- hubp->funcs->hubp_update_3dlut_fl_bias_scale(hubp,
- mcm_luts->lut3d_data.gpu_mem_params.format_params.float_params.bias,
- mcm_luts->lut3d_data.gpu_mem_params.format_params.float_params.scale);
-
- dc_get_lut_xbar(
- mcm_luts->lut3d_data.gpu_mem_params.component_order,
- &crossbar_bit_slice_cr_r,
- &crossbar_bit_slice_y_g,
- &crossbar_bit_slice_cb_b);
-
- hubp->funcs->hubp_program_3dlut_fl_crossbar(hubp,
- crossbar_bit_slice_cr_r,
- crossbar_bit_slice_y_g,
- crossbar_bit_slice_cb_b);
-
- mpc->funcs->rmcm.program_3dlut_size(mpc, width, mpcc_id);
-
- mpc->funcs->update_3dlut_fast_load_select(mpc, mpcc_id, hubp->inst);
-
- //2b. shaper reg programming
- memset(&m_lut_params, 0, sizeof(m_lut_params));
-
- if (mcm_luts->shaper->type == TF_TYPE_HWPWL) {
- m_lut_params.pwl = &mcm_luts->shaper->pwl;
- } else if (mcm_luts->shaper->type == TF_TYPE_DISTRIBUTED_POINTS) {
- ASSERT(false);
- cm_helper_translate_curve_to_hw_format(
- dc->ctx,
- mcm_luts->shaper,
- &dpp_base->regamma_params, true);
- m_lut_params.pwl = &dpp_base->regamma_params;
- }
- if (m_lut_params.pwl) {
- mpc->funcs->rmcm.populate_lut(mpc, m_lut_params, lut_bank_a, mpcc_id);
- mpc->funcs->rmcm.program_lut_mode(mpc, !bypass_rmcm_shaper, lut_bank_a, mpcc_id);
- } else {
- //RMCM 3dlut won't work without its shaper
- return false;
- }
-
- //3. Select the hubp connected to this RMCM
- hubp->funcs->hubp_enable_3dlut_fl(hubp, true);
- mpc->funcs->rmcm.enable_3dlut_fl(mpc, true, mpcc_id);
-
- //4. power on the block
- if (m_lut_params.pwl)
- mpc->funcs->rmcm.power_on_shaper_3dlut(mpc, mpcc_id, true);
-
- break;
- default:
- return false;
- }
-
- return true;
-}
-
void dcn401_populate_mcm_luts(struct dc *dc,
struct pipe_ctx *pipe_ctx,
struct dc_cm2_func_luts mcm_luts,
@@ -664,25 +424,6 @@ void dcn401_populate_mcm_luts(struct dc *dc,
dcn401_get_mcm_lut_xable_from_pipe_ctx(dc, pipe_ctx, &shaper_xable, &lut3d_xable, &lut1d_xable);
- //MCM - setting its location (Before/After) blender
- //set to post blend (true)
- dcn401_set_mcm_location_post_blend(
- dc,
- pipe_ctx,
- mcm_luts.lut3d_data.mpc_mcm_post_blend);
-
- //RMCM - 3dLUT+Shaper
- if (mcm_luts.lut3d_data.rmcm_3dlut_enable) {
- dcn401_program_rmcm_luts(
- hubp,
- pipe_ctx,
- lut3d_src,
- &mcm_luts,
- mpc,
- lut_bank_a,
- mpcc_id);
- }
-
/* 1D LUT */
if (mcm_luts.lut1d_func) {
memset(&m_lut_params, 0, sizeof(m_lut_params));
@@ -740,15 +481,15 @@ void dcn401_populate_mcm_luts(struct dc *dc,
break;
case DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM:
switch (mcm_luts.lut3d_data.gpu_mem_params.size) {
- case DC_CM2_GPU_MEM_SIZE_333333:
- width = hubp_3dlut_fl_width_33;
- break;
case DC_CM2_GPU_MEM_SIZE_171717:
width = hubp_3dlut_fl_width_17;
break;
case DC_CM2_GPU_MEM_SIZE_TRANSFORMED:
width = hubp_3dlut_fl_width_transformed;
break;
+ default:
+ //TODO: handle default case
+ break;
}
//check for support
@@ -817,11 +558,14 @@ void dcn401_populate_mcm_luts(struct dc *dc,
//navi 4x has a bug and r and blue are swapped and need to be worked around here in
//TODO: need to make a method for get_xbar per asic OR do the workaround in program_crossbar for 4x
- dc_get_lut_xbar(
- mcm_luts.lut3d_data.gpu_mem_params.component_order,
- &crossbar_bit_slice_cr_r,
- &crossbar_bit_slice_y_g,
- &crossbar_bit_slice_cb_b);
+ switch (mcm_luts.lut3d_data.gpu_mem_params.component_order) {
+ case DC_CM2_GPU_MEM_PIXEL_COMPONENT_ORDER_RGBA:
+ default:
+ crossbar_bit_slice_cr_r = hubp_3dlut_fl_crossbar_bit_slice_0_15;
+ crossbar_bit_slice_y_g = hubp_3dlut_fl_crossbar_bit_slice_16_31;
+ crossbar_bit_slice_cb_b = hubp_3dlut_fl_crossbar_bit_slice_32_47;
+ break;
+ }
if (hubp->funcs->hubp_program_3dlut_fl_crossbar)
hubp->funcs->hubp_program_3dlut_fl_crossbar(hubp,
@@ -1072,9 +816,12 @@ enum dc_status dcn401_enable_stream_timing(
if (dc->hwseq->funcs.PLAT_58856_wa && (!dc_is_dp_signal(stream->signal)))
dc->hwseq->funcs.PLAT_58856_wa(context, pipe_ctx);
- /* if we are borrowing from hblank, h_addressable needs to be adjusted */
- if (dc->debug.enable_hblank_borrow)
- patched_crtc_timing.h_addressable = patched_crtc_timing.h_addressable + pipe_ctx->hblank_borrow;
+ /* if we are padding, h_addressable needs to be adjusted */
+ if (dc->debug.enable_hblank_borrow) {
+ patched_crtc_timing.h_addressable = patched_crtc_timing.h_addressable + pipe_ctx->dsc_padding_params.dsc_hactive_padding;
+ patched_crtc_timing.h_total = patched_crtc_timing.h_total + pipe_ctx->dsc_padding_params.dsc_htotal_padding;
+ patched_crtc_timing.pix_clk_100hz = pipe_ctx->dsc_padding_params.dsc_pix_clk_100hz;
+ }
pipe_ctx->stream_res.tg->funcs->program_timing(
pipe_ctx->stream_res.tg,
@@ -1640,30 +1387,30 @@ void dcn401_prepare_bandwidth(struct dc *dc,
false);
/* program dchubbub watermarks:
- * For assigning wm_optimized_required, use |= operator since we don't want
+ * For assigning optimized_required, use |= operator since we don't want
* to clear the value if the optimize has not happened yet
*/
- dc->wm_optimized_required |= hubbub->funcs->program_watermarks(hubbub,
+ dc->optimized_required |= hubbub->funcs->program_watermarks(hubbub,
&context->bw_ctx.bw.dcn.watermarks,
dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
false);
/* update timeout thresholds */
if (hubbub->funcs->program_arbiter) {
- dc->wm_optimized_required |= hubbub->funcs->program_arbiter(hubbub, &context->bw_ctx.bw.dcn.arb_regs, false);
+ dc->optimized_required |= hubbub->funcs->program_arbiter(hubbub, &context->bw_ctx.bw.dcn.arb_regs, false);
}
/* decrease compbuf size */
if (hubbub->funcs->program_compbuf_segments) {
compbuf_size = context->bw_ctx.bw.dcn.arb_regs.compbuf_size;
- dc->wm_optimized_required |= (compbuf_size != dc->current_state->bw_ctx.bw.dcn.arb_regs.compbuf_size);
+ dc->optimized_required |= (compbuf_size != dc->current_state->bw_ctx.bw.dcn.arb_regs.compbuf_size);
hubbub->funcs->program_compbuf_segments(hubbub, compbuf_size, false);
}
if (dc->debug.fams2_config.bits.enable) {
- dcn401_fams2_global_control_lock(dc, context, true);
+ dcn401_dmub_hw_control_lock(dc, context, true);
dcn401_fams2_update_config(dc, context, false);
- dcn401_fams2_global_control_lock(dc, context, false);
+ dcn401_dmub_hw_control_lock(dc, context, false);
}
if (p_state_change_support != context->bw_ctx.bw.dcn.clk.p_state_change_support) {
@@ -1682,9 +1429,9 @@ void dcn401_optimize_bandwidth(
/* enable fams2 if needed */
if (dc->debug.fams2_config.bits.enable) {
- dcn401_fams2_global_control_lock(dc, context, true);
+ dcn401_dmub_hw_control_lock(dc, context, true);
dcn401_fams2_update_config(dc, context, true);
- dcn401_fams2_global_control_lock(dc, context, false);
+ dcn401_dmub_hw_control_lock(dc, context, false);
}
/* program dchubbub watermarks */
@@ -1723,14 +1470,17 @@ void dcn401_optimize_bandwidth(
}
}
-void dcn401_fams2_global_control_lock(struct dc *dc,
+void dcn401_dmub_hw_control_lock(struct dc *dc,
struct dc_state *context,
bool lock)
{
/* use always for now */
union dmub_inbox0_cmd_lock_hw hw_lock_cmd = { 0 };
- if (!dc->ctx || !dc->ctx->dmub_srv || !dc->debug.fams2_config.bits.enable)
+ if (!dc->ctx || !dc->ctx->dmub_srv)
+ return;
+
+ if (!dc->debug.fams2_config.bits.enable && !dc_dmub_srv_is_cursor_offload_enabled(dc))
return;
hw_lock_cmd.bits.command_code = DMUB_INBOX0_CMD__HW_LOCK;
@@ -1740,12 +1490,12 @@ void dcn401_fams2_global_control_lock(struct dc *dc,
dmub_hw_lock_mgr_inbox0_cmd(dc->ctx->dmub_srv, hw_lock_cmd);
}
-void dcn401_fams2_global_control_lock_fast(union block_sequence_params *params)
+void dcn401_dmub_hw_control_lock_fast(union block_sequence_params *params)
{
- struct dc *dc = params->fams2_global_control_lock_fast_params.dc;
- bool lock = params->fams2_global_control_lock_fast_params.lock;
+ struct dc *dc = params->dmub_hw_control_lock_fast_params.dc;
+ bool lock = params->dmub_hw_control_lock_fast_params.lock;
- if (params->fams2_global_control_lock_fast_params.is_required) {
+ if (params->dmub_hw_control_lock_fast_params.is_required) {
union dmub_inbox0_cmd_lock_hw hw_lock_cmd = { 0 };
hw_lock_cmd.bits.command_code = DMUB_INBOX0_CMD__HW_LOCK;
@@ -1852,6 +1602,143 @@ void dcn401_update_odm(struct dc *dc, struct dc_state *context,
dc->hwseq->funcs.blank_pixel_data(dc, otg_master, true);
}
+static void dcn401_add_dsc_sequence_for_odm_change(struct dc *dc, struct dc_state *context,
+ struct pipe_ctx *otg_master, struct block_sequence_state *seq_state)
+{
+ struct pipe_ctx *old_pipe;
+ struct pipe_ctx *new_pipe;
+ struct pipe_ctx *old_opp_heads[MAX_PIPES];
+ struct pipe_ctx *old_otg_master;
+ int old_opp_head_count = 0;
+ int i;
+
+ old_otg_master = &dc->current_state->res_ctx.pipe_ctx[otg_master->pipe_idx];
+
+ if (resource_is_pipe_type(old_otg_master, OTG_MASTER)) {
+ old_opp_head_count = resource_get_opp_heads_for_otg_master(old_otg_master,
+ &dc->current_state->res_ctx,
+ old_opp_heads);
+ } else {
+ old_otg_master = NULL;
+ }
+
+ /* Process new DSC configuration if DSC is enabled */
+ if (otg_master->stream_res.dsc && otg_master->stream->timing.flags.DSC) {
+ struct dc_stream_state *stream = otg_master->stream;
+ struct pipe_ctx *odm_pipe;
+ int opp_cnt = 1;
+ int last_dsc_calc = 0;
+ bool should_use_dto_dscclk = (dc->res_pool->dccg->funcs->set_dto_dscclk != NULL) &&
+ stream->timing.pix_clk_100hz > 480000;
+
+ /* Count ODM pipes */
+ for (odm_pipe = otg_master->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
+ opp_cnt++;
+
+ int num_slices_h = stream->timing.dsc_cfg.num_slices_h / opp_cnt;
+
+ /* Step 1: Set DTO DSCCLK for main DSC if needed */
+ if (should_use_dto_dscclk) {
+ hwss_add_dccg_set_dto_dscclk(seq_state, dc->res_pool->dccg,
+ otg_master->stream_res.dsc->inst, num_slices_h);
+ }
+
+ /* Step 2: Calculate and set DSC config for main DSC */
+ last_dsc_calc = *seq_state->num_steps;
+ hwss_add_dsc_calculate_and_set_config(seq_state, otg_master, true, opp_cnt);
+
+ /* Step 3: Enable main DSC block */
+ hwss_add_dsc_enable_with_opp(seq_state, otg_master);
+
+ /* Step 4: Configure and enable ODM DSC blocks */
+ for (odm_pipe = otg_master->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
+ if (!odm_pipe->stream_res.dsc)
+ continue;
+
+ /* Set DTO DSCCLK for ODM DSC if needed */
+ if (should_use_dto_dscclk) {
+ hwss_add_dccg_set_dto_dscclk(seq_state, dc->res_pool->dccg,
+ odm_pipe->stream_res.dsc->inst, num_slices_h);
+ }
+
+ /* Calculate and set DSC config for ODM DSC */
+ last_dsc_calc = *seq_state->num_steps;
+ hwss_add_dsc_calculate_and_set_config(seq_state, odm_pipe, true, opp_cnt);
+
+ /* Enable ODM DSC block */
+ hwss_add_dsc_enable_with_opp(seq_state, odm_pipe);
+ }
+
+ /* Step 5: Configure DSC in timing generator */
+ hwss_add_tg_set_dsc_config(seq_state, otg_master->stream_res.tg,
+ &seq_state->steps[last_dsc_calc].params.dsc_calculate_and_set_config_params.dsc_optc_cfg, true);
+ } else if (otg_master->stream_res.dsc && !otg_master->stream->timing.flags.DSC) {
+ /* Disable DSC in OPTC */
+ hwss_add_tg_set_dsc_config(seq_state, otg_master->stream_res.tg, NULL, false);
+
+ hwss_add_dsc_disconnect(seq_state, otg_master->stream_res.dsc);
+ }
+
+ /* Disable DSC for old pipes that no longer need it */
+ if (old_otg_master && old_otg_master->stream_res.dsc) {
+ for (i = 0; i < old_opp_head_count; i++) {
+ old_pipe = old_opp_heads[i];
+ new_pipe = &context->res_ctx.pipe_ctx[old_pipe->pipe_idx];
+
+ /* If old pipe had DSC but new pipe doesn't, disable the old DSC */
+ if (old_pipe->stream_res.dsc && !new_pipe->stream_res.dsc) {
+ /* Then disconnect DSC block */
+ hwss_add_dsc_disconnect(seq_state, old_pipe->stream_res.dsc);
+ }
+ }
+ }
+}
+
+void dcn401_update_odm_sequence(struct dc *dc, struct dc_state *context,
+ struct pipe_ctx *otg_master, struct block_sequence_state *seq_state)
+{
+ struct pipe_ctx *opp_heads[MAX_PIPES];
+ int opp_inst[MAX_PIPES] = {0};
+ int opp_head_count;
+ int odm_slice_width = resource_get_odm_slice_dst_width(otg_master, false);
+ int last_odm_slice_width = resource_get_odm_slice_dst_width(otg_master, true);
+ int i;
+
+ opp_head_count = resource_get_opp_heads_for_otg_master(
+ otg_master, &context->res_ctx, opp_heads);
+
+ for (i = 0; i < opp_head_count; i++)
+ opp_inst[i] = opp_heads[i]->stream_res.opp->inst;
+
+ /* Add ODM combine/bypass operation to sequence */
+ if (opp_head_count > 1) {
+ hwss_add_optc_set_odm_combine(seq_state, otg_master->stream_res.tg, opp_inst,
+ opp_head_count, odm_slice_width, last_odm_slice_width);
+ } else {
+ hwss_add_optc_set_odm_bypass(seq_state, otg_master->stream_res.tg, &otg_master->stream->timing);
+ }
+
+ /* Add OPP operations to sequence */
+ for (i = 0; i < opp_head_count; i++) {
+ /* Add OPP pipe clock control operation */
+ hwss_add_opp_pipe_clock_control(seq_state, opp_heads[i]->stream_res.opp, true);
+
+ /* Add OPP program left edge extra pixel operation */
+ hwss_add_opp_program_left_edge_extra_pixel(seq_state, opp_heads[i]->stream_res.opp,
+ opp_heads[i]->stream->timing.pixel_encoding, resource_is_pipe_type(opp_heads[i], OTG_MASTER));
+ }
+
+ /* Add DSC update operations to sequence */
+ dcn401_add_dsc_sequence_for_odm_change(dc, context, otg_master, seq_state);
+
+ /* Add blank pixel data operation if needed */
+ if (!resource_is_pipe_type(otg_master, DPP_PIPE)) {
+ if (dc->hwseq->funcs.blank_pixel_data_sequence)
+ dc->hwseq->funcs.blank_pixel_data_sequence(
+ dc, otg_master, true, seq_state);
+ }
+}
+
void dcn401_unblank_stream(struct pipe_ctx *pipe_ctx,
struct dc_link_settings *link_settings)
{
@@ -1881,20 +1768,28 @@ void dcn401_unblank_stream(struct pipe_ctx *pipe_ctx,
void dcn401_hardware_release(struct dc *dc)
{
- dc_dmub_srv_fams2_update_config(dc, dc->current_state, false);
-
- /* If pstate unsupported, or still supported
- * by firmware, force it supported by dcn
- */
- if (dc->current_state) {
- if ((!dc->clk_mgr->clks.p_state_change_support ||
- dc->current_state->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable) &&
- dc->res_pool->hubbub->funcs->force_pstate_change_control)
- dc->res_pool->hubbub->funcs->force_pstate_change_control(
- dc->res_pool->hubbub, true, true);
-
- dc->current_state->bw_ctx.bw.dcn.clk.p_state_change_support = true;
- dc->clk_mgr->funcs->update_clocks(dc->clk_mgr, dc->current_state, true);
+ if (!dc->debug.disable_force_pstate_allow_on_hw_release) {
+ dc_dmub_srv_fams2_update_config(dc, dc->current_state, false);
+
+ /* If pstate unsupported, or still supported
+ * by firmware, force it supported by dcn
+ */
+ if (dc->current_state) {
+ if ((!dc->clk_mgr->clks.p_state_change_support ||
+ dc->current_state->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable) &&
+ dc->res_pool->hubbub->funcs->force_pstate_change_control)
+ dc->res_pool->hubbub->funcs->force_pstate_change_control(
+ dc->res_pool->hubbub, true, true);
+
+ dc->current_state->bw_ctx.bw.dcn.clk.p_state_change_support = true;
+ dc->clk_mgr->funcs->update_clocks(dc->clk_mgr, dc->current_state, true);
+ }
+ } else {
+ if (dc->current_state) {
+ dc->clk_mgr->clks.p_state_change_support = false;
+ dc->clk_mgr->funcs->update_clocks(dc->clk_mgr, dc->current_state, true);
+ }
+ dc_dmub_srv_fams2_update_config(dc, dc->current_state, false);
}
}
@@ -2269,14 +2164,6 @@ void dcn401_program_pipe(
pipe_ctx->plane_state->update_flags.bits.hdr_mult))
hws->funcs.set_hdr_multiplier(pipe_ctx);
- if (hws->funcs.populate_mcm_luts) {
- if (pipe_ctx->plane_state) {
- hws->funcs.populate_mcm_luts(dc, pipe_ctx, pipe_ctx->plane_state->mcm_luts,
- pipe_ctx->plane_state->lut_bank_a);
- pipe_ctx->plane_state->lut_bank_a = !pipe_ctx->plane_state->lut_bank_a;
- }
- }
-
if (pipe_ctx->plane_state &&
(pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
pipe_ctx->plane_state->update_flags.bits.gamma_change ||
@@ -2289,10 +2176,8 @@ void dcn401_program_pipe(
* updating on slave planes
*/
if (pipe_ctx->update_flags.bits.enable ||
- pipe_ctx->update_flags.bits.plane_changed ||
- pipe_ctx->stream->update_flags.bits.out_tf ||
- (pipe_ctx->plane_state &&
- pipe_ctx->plane_state->update_flags.bits.output_tf_change))
+ pipe_ctx->update_flags.bits.plane_changed ||
+ pipe_ctx->stream->update_flags.bits.out_tf)
hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
/* If the pipe has been enabled or has a different opp, we
@@ -2342,6 +2227,157 @@ void dcn401_program_pipe(
}
}
+/*
+ * dcn401_program_pipe_sequence - Sequence-based version of dcn401_program_pipe
+ *
+ * This function creates a sequence-based version of the original dcn401_program_pipe
+ * function. Instead of directly calling hardware programming functions, it appends
+ * sequence steps to the provided block_sequence array that can later be executed
+ * as part of hwss_execute_sequence.
+ *
+ */
+void dcn401_program_pipe_sequence(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct block_sequence_state *seq_state)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+
+ /* Only need to unblank on top pipe */
+ if (resource_is_pipe_type(pipe_ctx, OTG_MASTER)) {
+ if (pipe_ctx->update_flags.bits.enable ||
+ pipe_ctx->update_flags.bits.odm ||
+ pipe_ctx->stream->update_flags.bits.abm_level) {
+ if (dc->hwseq->funcs.blank_pixel_data_sequence)
+ dc->hwseq->funcs.blank_pixel_data_sequence(dc, pipe_ctx,
+ !pipe_ctx->plane_state || !pipe_ctx->plane_state->visible,
+ seq_state);
+ }
+ }
+
+ /* Only update TG on top pipe */
+ if (pipe_ctx->update_flags.bits.global_sync && !pipe_ctx->top_pipe
+ && !pipe_ctx->prev_odm_pipe) {
+
+ /* Step 1: Program global sync */
+ hwss_add_tg_program_global_sync(seq_state, pipe_ctx->stream_res.tg,
+ dcn401_calculate_vready_offset_for_group(pipe_ctx),
+ (unsigned int)pipe_ctx->global_sync.dcn4x.vstartup_lines,
+ (unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_offset_pixels,
+ (unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_vupdate_width_pixels,
+ (unsigned int)pipe_ctx->global_sync.dcn4x.pstate_keepout_start_lines);
+
+ /* Step 2: Wait for VACTIVE state (if not phantom pipe) */
+ if (dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM)
+ hwss_add_tg_wait_for_state(seq_state, pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
+
+ /* Step 3: Set VTG params */
+ hwss_add_tg_set_vtg_params(seq_state, pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
+
+ /* Step 4: Setup vupdate interrupt (if available) */
+ if (hws->funcs.setup_vupdate_interrupt)
+ dcn401_setup_vupdate_interrupt_sequence(dc, pipe_ctx, seq_state);
+ }
+
+ if (pipe_ctx->update_flags.bits.odm) {
+ if (hws->funcs.update_odm_sequence)
+ hws->funcs.update_odm_sequence(dc, context, pipe_ctx, seq_state);
+ }
+
+ if (pipe_ctx->update_flags.bits.enable) {
+ if (dc->hwss.enable_plane_sequence)
+ dc->hwss.enable_plane_sequence(dc, pipe_ctx, context, seq_state);
+ }
+
+ if (pipe_ctx->update_flags.bits.det_size) {
+ if (dc->res_pool->hubbub->funcs->program_det_size) {
+ hwss_add_hubp_program_det_size(seq_state, dc->res_pool->hubbub,
+ pipe_ctx->plane_res.hubp->inst, pipe_ctx->det_buffer_size_kb);
+ }
+
+ if (dc->res_pool->hubbub->funcs->program_det_segments) {
+ hwss_add_hubp_program_det_segments(seq_state, dc->res_pool->hubbub,
+ pipe_ctx->plane_res.hubp->inst, pipe_ctx->hubp_regs.det_size);
+ }
+ }
+
+ if (pipe_ctx->plane_state && (pipe_ctx->update_flags.raw ||
+ pipe_ctx->plane_state->update_flags.raw ||
+ pipe_ctx->stream->update_flags.raw)) {
+
+ if (dc->hwss.update_dchubp_dpp_sequence)
+ dc->hwss.update_dchubp_dpp_sequence(dc, pipe_ctx, context, seq_state);
+ }
+
+ if (pipe_ctx->plane_state && (pipe_ctx->update_flags.bits.enable ||
+ pipe_ctx->plane_state->update_flags.bits.hdr_mult)) {
+
+ hws->funcs.set_hdr_multiplier_sequence(pipe_ctx, seq_state);
+ }
+
+ if (pipe_ctx->plane_state &&
+ (pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
+ pipe_ctx->plane_state->update_flags.bits.gamma_change ||
+ pipe_ctx->plane_state->update_flags.bits.lut_3d ||
+ pipe_ctx->update_flags.bits.enable)) {
+
+ hwss_add_dpp_set_input_transfer_func(seq_state, dc, pipe_ctx, pipe_ctx->plane_state);
+ }
+
+ /* dcn10_translate_regamma_to_hw_format takes 750us to finish
+ * only do gamma programming for powering on, internal memcmp to avoid
+ * updating on slave planes
+ */
+ if (pipe_ctx->update_flags.bits.enable ||
+ pipe_ctx->update_flags.bits.plane_changed ||
+ pipe_ctx->stream->update_flags.bits.out_tf) {
+ hwss_add_dpp_set_output_transfer_func(seq_state, dc, pipe_ctx, pipe_ctx->stream);
+ }
+
+ /* If the pipe has been enabled or has a different opp, we
+ * should reprogram the fmt. This deals with cases where
+ * interation between mpc and odm combine on different streams
+ * causes a different pipe to be chosen to odm combine with.
+ */
+ if (pipe_ctx->update_flags.bits.enable
+ || pipe_ctx->update_flags.bits.opp_changed) {
+
+ hwss_add_opp_set_dyn_expansion(seq_state, pipe_ctx->stream_res.opp, COLOR_SPACE_YCBCR601,
+ pipe_ctx->stream->timing.display_color_depth, pipe_ctx->stream->signal);
+
+ hwss_add_opp_program_fmt(seq_state, pipe_ctx->stream_res.opp,
+ &pipe_ctx->stream->bit_depth_params, &pipe_ctx->stream->clamping);
+ }
+
+ /* Set ABM pipe after other pipe configurations done */
+ if ((pipe_ctx->plane_state && pipe_ctx->plane_state->visible)) {
+ if (pipe_ctx->stream_res.abm) {
+ hwss_add_abm_set_pipe(seq_state, dc, pipe_ctx);
+
+ hwss_add_abm_set_level(seq_state, pipe_ctx->stream_res.abm, pipe_ctx->stream->abm_level);
+ }
+ }
+
+ if (pipe_ctx->update_flags.bits.test_pattern_changed) {
+ struct output_pixel_processor *odm_opp = pipe_ctx->stream_res.opp;
+
+ hwss_add_opp_program_bit_depth_reduction(seq_state, odm_opp, true, pipe_ctx);
+
+ hwss_add_opp_set_disp_pattern_generator(seq_state,
+ odm_opp,
+ pipe_ctx->stream_res.test_pattern_params.test_pattern,
+ pipe_ctx->stream_res.test_pattern_params.color_space,
+ pipe_ctx->stream_res.test_pattern_params.color_depth,
+ (struct tg_color){0},
+ false,
+ pipe_ctx->stream_res.test_pattern_params.width,
+ pipe_ctx->stream_res.test_pattern_params.height,
+ pipe_ctx->stream_res.test_pattern_params.offset);
+ }
+
+}
+
void dcn401_program_front_end_for_ctx(
struct dc *dc,
struct dc_state *context)
@@ -2419,7 +2455,6 @@ void dcn401_program_front_end_for_ctx(
&& context->res_ctx.pipe_ctx[i].stream)
hws->funcs.blank_pixel_data(dc, &context->res_ctx.pipe_ctx[i], true);
-
/* Disconnect mpcc */
for (i = 0; i < dc->res_pool->pipe_count; i++)
if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable
@@ -2498,11 +2533,11 @@ void dcn401_program_front_end_for_ctx(
/* Avoid underflow by check of pipe line read when adding 2nd plane. */
if (hws->wa.wait_hubpret_read_start_during_mpo_transition &&
- !pipe->top_pipe &&
- pipe->stream &&
- pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start &&
- dc->current_state->stream_status[0].plane_count == 1 &&
- context->stream_status[0].plane_count > 1) {
+ !pipe->top_pipe &&
+ pipe->stream &&
+ pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start &&
+ dc->current_state->stream_status[0].plane_count == 1 &&
+ context->stream_status[0].plane_count > 1) {
pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start(pipe->plane_res.hubp);
}
}
@@ -2614,7 +2649,6 @@ void dcn401_post_unlock_program_front_end(
*/
if (hwseq->funcs.update_force_pstate)
dc->hwseq->funcs.update_force_pstate(dc, context);
-
/* Only program the MALL registers after all the main and phantom pipes
* are done programming.
*/
@@ -2651,7 +2685,7 @@ bool dcn401_update_bandwidth(
struct dce_hwseq *hws = dc->hwseq;
/* recalculate DML parameters */
- if (dc->res_pool->funcs->validate_bandwidth(dc, context, false) != DC_OK)
+ if (dc->res_pool->funcs->validate_bandwidth(dc, context, DC_VALIDATE_MODE_AND_PROGRAMMING) != DC_OK)
return false;
/* apply updated bandwidth parameters */
@@ -2902,10 +2936,12 @@ void dcn401_plane_atomic_power_down(struct dc *dc,
DC_LOGGER_INIT(dc->ctx->logger);
- REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
- if (org_ip_request_cntl == 0)
- REG_SET(DC_IP_REQUEST_CNTL, 0,
- IP_REQUEST_EN, 1);
+ if (REG(DC_IP_REQUEST_CNTL)) {
+ REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
+ if (org_ip_request_cntl == 0)
+ REG_SET(DC_IP_REQUEST_CNTL, 0,
+ IP_REQUEST_EN, 1);
+ }
if (hws->funcs.dpp_pg_control)
hws->funcs.dpp_pg_control(hws, dpp->inst, false);
@@ -2916,7 +2952,7 @@ void dcn401_plane_atomic_power_down(struct dc *dc,
hubp->funcs->hubp_reset(hubp);
dpp->funcs->dpp_reset(dpp);
- if (org_ip_request_cntl == 0)
+ if (org_ip_request_cntl == 0 && REG(DC_IP_REQUEST_CNTL))
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 0);
@@ -2926,3 +2962,1084 @@ void dcn401_plane_atomic_power_down(struct dc *dc,
if (hws->funcs.dpp_root_clock_control)
hws->funcs.dpp_root_clock_control(hws, dpp->inst, false);
}
+
+void dcn401_update_cursor_offload_pipe(struct dc *dc, const struct pipe_ctx *pipe)
+{
+ volatile struct dmub_cursor_offload_v1 *cs = dc->ctx->dmub_srv->dmub->cursor_offload_v1;
+ const struct pipe_ctx *top_pipe = resource_get_otg_master(pipe);
+ const struct hubp *hubp = pipe->plane_res.hubp;
+ const struct dpp *dpp = pipe->plane_res.dpp;
+ volatile struct dmub_cursor_offload_pipe_data_dcn401_v1 *p;
+ uint32_t stream_idx, write_idx, payload_idx;
+
+ if (!top_pipe || !hubp || !dpp)
+ return;
+
+ stream_idx = top_pipe->pipe_idx;
+ write_idx = cs->offload_streams[stream_idx].write_idx + 1; /* new payload (+1) */
+ payload_idx = write_idx % ARRAY_SIZE(cs->offload_streams[stream_idx].payloads);
+
+ p = &cs->offload_streams[stream_idx].payloads[payload_idx].pipe_data[pipe->pipe_idx].dcn401;
+
+ p->CURSOR0_0_CURSOR_SURFACE_ADDRESS = hubp->att.SURFACE_ADDR;
+ p->CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH = hubp->att.SURFACE_ADDR_HIGH;
+ p->CURSOR0_0_CURSOR_SIZE__CURSOR_WIDTH = hubp->att.size.bits.width;
+ p->CURSOR0_0_CURSOR_SIZE__CURSOR_HEIGHT = hubp->att.size.bits.height;
+ p->CURSOR0_0_CURSOR_POSITION__CURSOR_X_POSITION = hubp->pos.position.bits.x_pos;
+ p->CURSOR0_0_CURSOR_POSITION__CURSOR_Y_POSITION = hubp->pos.position.bits.y_pos;
+ p->CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X = hubp->pos.hot_spot.bits.x_hot;
+ p->CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y = hubp->pos.hot_spot.bits.y_hot;
+ p->CURSOR0_0_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET = hubp->pos.dst_offset.bits.dst_x_offset;
+ p->CURSOR0_0_CURSOR_CONTROL__CURSOR_ENABLE = hubp->pos.cur_ctl.bits.cur_enable;
+ p->CURSOR0_0_CURSOR_CONTROL__CURSOR_MODE = hubp->att.cur_ctl.bits.mode;
+ p->CURSOR0_0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY = hubp->pos.cur_ctl.bits.cur_2x_magnify;
+ p->CURSOR0_0_CURSOR_CONTROL__CURSOR_PITCH = hubp->att.cur_ctl.bits.pitch;
+ p->CURSOR0_0_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK = hubp->att.cur_ctl.bits.line_per_chunk;
+
+ p->CM_CUR0_CURSOR0_CONTROL__CUR0_ENABLE = dpp->att.cur0_ctl.bits.cur0_enable;
+ p->CM_CUR0_CURSOR0_CONTROL__CUR0_MODE = dpp->att.cur0_ctl.bits.mode;
+ p->CM_CUR0_CURSOR0_CONTROL__CUR0_EXPANSION_MODE = dpp->att.cur0_ctl.bits.expansion_mode;
+ p->CM_CUR0_CURSOR0_CONTROL__CUR0_ROM_EN = dpp->att.cur0_ctl.bits.cur0_rom_en;
+ p->CM_CUR0_CURSOR0_COLOR0__CUR0_COLOR0 = 0x000000;
+ p->CM_CUR0_CURSOR0_COLOR1__CUR0_COLOR1 = 0xFFFFFF;
+
+ p->CM_CUR0_CURSOR0_FP_SCALE_BIAS_G_Y__CUR0_FP_BIAS_G_Y =
+ dpp->att.fp_scale_bias_g_y.bits.fp_bias_g_y;
+ p->CM_CUR0_CURSOR0_FP_SCALE_BIAS_G_Y__CUR0_FP_SCALE_G_Y =
+ dpp->att.fp_scale_bias_g_y.bits.fp_scale_g_y;
+ p->CM_CUR0_CURSOR0_FP_SCALE_BIAS_RB_CRCB__CUR0_FP_BIAS_RB_CRCB =
+ dpp->att.fp_scale_bias_rb_crcb.bits.fp_bias_rb_crcb;
+ p->CM_CUR0_CURSOR0_FP_SCALE_BIAS_RB_CRCB__CUR0_FP_SCALE_RB_CRCB =
+ dpp->att.fp_scale_bias_rb_crcb.bits.fp_scale_rb_crcb;
+
+ p->HUBPREQ0_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET = hubp->att.settings.bits.dst_y_offset;
+ p->HUBPREQ0_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST = hubp->att.settings.bits.chunk_hdl_adjust;
+ p->HUBP0_DCHUBP_MALL_CONFIG__USE_MALL_FOR_CURSOR = hubp->use_mall_for_cursor;
+
+ cs->offload_streams[stream_idx].payloads[payload_idx].pipe_mask |= (1u << pipe->pipe_idx);
+}
+
+void dcn401_plane_atomic_power_down_sequence(struct dc *dc,
+ struct dpp *dpp,
+ struct hubp *hubp,
+ struct block_sequence_state *seq_state)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ uint32_t org_ip_request_cntl = 0;
+
+ DC_LOGGER_INIT(dc->ctx->logger);
+
+ /* Check and set DC_IP_REQUEST_CNTL if needed */
+ if (REG(DC_IP_REQUEST_CNTL)) {
+ REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
+ if (org_ip_request_cntl == 0)
+ hwss_add_dc_ip_request_cntl(seq_state, dc, true);
+ }
+
+ /* DPP power gating control */
+ hwss_add_dpp_pg_control(seq_state, hws, dpp->inst, false);
+
+ /* HUBP power gating control */
+ hwss_add_hubp_pg_control(seq_state, hws, hubp->inst, false);
+
+ /* HUBP reset */
+ hwss_add_hubp_reset(seq_state, hubp);
+
+ /* DPP reset */
+ hwss_add_dpp_reset(seq_state, dpp);
+
+ /* Restore DC_IP_REQUEST_CNTL if it was originally 0 */
+ if (org_ip_request_cntl == 0 && REG(DC_IP_REQUEST_CNTL))
+ hwss_add_dc_ip_request_cntl(seq_state, dc, false);
+
+ DC_LOG_DEBUG("Power gated front end %d\n", hubp->inst);
+
+ /* DPP root clock control */
+ hwss_add_dpp_root_clock_control(seq_state, hws, dpp->inst, false);
+}
+
+/* trigger HW to start disconnect plane from stream on the next vsync using block sequence */
+void dcn401_plane_atomic_disconnect_sequence(struct dc *dc,
+ struct dc_state *state,
+ struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state)
+{
+ struct hubp *hubp = pipe_ctx->plane_res.hubp;
+ int dpp_id = pipe_ctx->plane_res.dpp->inst;
+ struct mpc *mpc = dc->res_pool->mpc;
+ struct mpc_tree *mpc_tree_params;
+ struct mpcc *mpcc_to_remove = NULL;
+ struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
+
+ mpc_tree_params = &(opp->mpc_tree_params);
+ mpcc_to_remove = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, dpp_id);
+
+ /*Already reset*/
+ if (mpcc_to_remove == NULL)
+ return;
+
+ /* Step 1: Remove MPCC from MPC tree */
+ hwss_add_mpc_remove_mpcc(seq_state, mpc, mpc_tree_params, mpcc_to_remove);
+
+ // Phantom pipes have OTG disabled by default, so MPCC_STATUS will never assert idle,
+ // so don't wait for MPCC_IDLE in the programming sequence
+ if (dc_state_get_pipe_subvp_type(state, pipe_ctx) != SUBVP_PHANTOM) {
+ /* Step 2: Set MPCC disconnect pending flag */
+ hwss_add_opp_set_mpcc_disconnect_pending(seq_state, opp, pipe_ctx->plane_res.mpcc_inst, true);
+ }
+
+ /* Step 3: Set optimized required flag */
+ hwss_add_dc_set_optimized_required(seq_state, dc, true);
+
+ /* Step 4: Disconnect HUBP if function exists */
+ if (hubp->funcs->hubp_disconnect)
+ hwss_add_hubp_disconnect(seq_state, hubp);
+
+ /* Step 5: Verify pstate change high if debug sanity checks are enabled */
+ if (dc->debug.sanity_checks)
+ dc->hwseq->funcs.verify_allow_pstate_change_high_sequence(dc, seq_state);
+}
+
+void dcn401_blank_pixel_data_sequence(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool blank,
+ struct block_sequence_state *seq_state)
+{
+ struct tg_color black_color = {0};
+ struct stream_resource *stream_res = &pipe_ctx->stream_res;
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ enum dc_color_space color_space = stream->output_color_space;
+ enum controller_dp_test_pattern test_pattern = CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR;
+ enum controller_dp_color_space test_pattern_color_space = CONTROLLER_DP_COLOR_SPACE_UDEFINED;
+ struct pipe_ctx *odm_pipe;
+ struct rect odm_slice_src;
+
+ if (stream->link->test_pattern_enabled)
+ return;
+
+ /* get opp dpg blank color */
+ color_space_to_black_color(dc, color_space, &black_color);
+
+ if (blank) {
+ /* Set ABM immediate disable */
+ hwss_add_abm_set_immediate_disable(seq_state, dc, pipe_ctx);
+
+ if (dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE) {
+ test_pattern = CONTROLLER_DP_TEST_PATTERN_COLORSQUARES;
+ test_pattern_color_space = CONTROLLER_DP_COLOR_SPACE_RGB;
+ }
+ } else {
+ test_pattern = CONTROLLER_DP_TEST_PATTERN_VIDEOMODE;
+ }
+
+ odm_pipe = pipe_ctx;
+
+ /* Set display pattern generator for all ODM pipes */
+ while (odm_pipe->next_odm_pipe) {
+ odm_slice_src = resource_get_odm_slice_src_rect(odm_pipe);
+
+ hwss_add_opp_set_disp_pattern_generator(seq_state,
+ odm_pipe->stream_res.opp,
+ test_pattern,
+ test_pattern_color_space,
+ stream->timing.display_color_depth,
+ black_color,
+ true,
+ odm_slice_src.width,
+ odm_slice_src.height,
+ odm_slice_src.x);
+
+ odm_pipe = odm_pipe->next_odm_pipe;
+ }
+
+ /* Set display pattern generator for final ODM pipe */
+ odm_slice_src = resource_get_odm_slice_src_rect(odm_pipe);
+
+ hwss_add_opp_set_disp_pattern_generator(seq_state,
+ odm_pipe->stream_res.opp,
+ test_pattern,
+ test_pattern_color_space,
+ stream->timing.display_color_depth,
+ black_color,
+ true,
+ odm_slice_src.width,
+ odm_slice_src.height,
+ odm_slice_src.x);
+
+ /* Handle ABM level setting when not blanking */
+ if (!blank) {
+ if (stream_res->abm) {
+ /* Set pipe for ABM */
+ hwss_add_abm_set_pipe(seq_state, dc, pipe_ctx);
+
+ /* Set ABM level */
+ hwss_add_abm_set_level(seq_state, stream_res->abm, stream->abm_level);
+ }
+ }
+}
+
+void dcn401_program_all_writeback_pipes_in_tree_sequence(
+ struct dc *dc,
+ const struct dc_stream_state *stream,
+ struct dc_state *context,
+ struct block_sequence_state *seq_state)
+{
+ struct dwbc *dwb;
+ int i_wb, i_pipe;
+
+ if (!stream || stream->num_wb_info > dc->res_pool->res_cap->num_dwb)
+ return;
+
+ /* For each writeback pipe */
+ for (i_wb = 0; i_wb < stream->num_wb_info; i_wb++) {
+ /* Get direct pointer to writeback info */
+ struct dc_writeback_info *wb_info = (struct dc_writeback_info *)&stream->writeback_info[i_wb];
+ int mpcc_inst = -1;
+
+ if (wb_info->wb_enabled) {
+ /* Get the MPCC instance for writeback_source_plane */
+ for (i_pipe = 0; i_pipe < dc->res_pool->pipe_count; i_pipe++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i_pipe];
+
+ if (!pipe_ctx->plane_state)
+ continue;
+
+ if (pipe_ctx->plane_state == wb_info->writeback_source_plane) {
+ mpcc_inst = pipe_ctx->plane_res.mpcc_inst;
+ break;
+ }
+ }
+
+ if (mpcc_inst == -1) {
+ /* Disable writeback pipe and disconnect from MPCC
+ * if source plane has been removed
+ */
+ dcn401_disable_writeback_sequence(dc, wb_info, seq_state);
+ continue;
+ }
+
+ ASSERT(wb_info->dwb_pipe_inst < dc->res_pool->res_cap->num_dwb);
+ dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
+
+ if (dwb->funcs->is_enabled(dwb)) {
+ /* Writeback pipe already enabled, only need to update */
+ dcn401_update_writeback_sequence(dc, wb_info, context, seq_state);
+ } else {
+ /* Enable writeback pipe and connect to MPCC */
+ dcn401_enable_writeback_sequence(dc, wb_info, context, mpcc_inst, seq_state);
+ }
+ } else {
+ /* Disable writeback pipe and disconnect from MPCC */
+ dcn401_disable_writeback_sequence(dc, wb_info, seq_state);
+ }
+ }
+}
+
+void dcn401_enable_writeback_sequence(
+ struct dc *dc,
+ struct dc_writeback_info *wb_info,
+ struct dc_state *context,
+ int mpcc_inst,
+ struct block_sequence_state *seq_state)
+{
+ struct dwbc *dwb;
+ struct mcif_wb *mcif_wb;
+
+ if (!wb_info->wb_enabled || wb_info->dwb_pipe_inst >= dc->res_pool->res_cap->num_dwb)
+ return;
+
+ dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
+ mcif_wb = dc->res_pool->mcif_wb[wb_info->dwb_pipe_inst];
+
+ /* Update DWBC with new parameters */
+ hwss_add_dwbc_update(seq_state, dwb, &wb_info->dwb_params);
+
+ /* Configure MCIF_WB buffer settings */
+ hwss_add_mcif_wb_config_buf(seq_state, mcif_wb, &wb_info->mcif_buf_params, wb_info->dwb_params.dest_height);
+
+ /* Configure MCIF_WB arbitration */
+ hwss_add_mcif_wb_config_arb(seq_state, mcif_wb, &context->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[wb_info->dwb_pipe_inst]);
+
+ /* Enable MCIF_WB */
+ hwss_add_mcif_wb_enable(seq_state, mcif_wb);
+
+ /* Set DWB MUX to connect writeback to MPCC */
+ hwss_add_mpc_set_dwb_mux(seq_state, dc->res_pool->mpc, wb_info->dwb_pipe_inst, mpcc_inst);
+
+ /* Enable DWBC */
+ hwss_add_dwbc_enable(seq_state, dwb, &wb_info->dwb_params);
+}
+
+void dcn401_disable_writeback_sequence(
+ struct dc *dc,
+ struct dc_writeback_info *wb_info,
+ struct block_sequence_state *seq_state)
+{
+ struct dwbc *dwb;
+ struct mcif_wb *mcif_wb;
+
+ if (wb_info->dwb_pipe_inst >= dc->res_pool->res_cap->num_dwb)
+ return;
+
+ dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
+ mcif_wb = dc->res_pool->mcif_wb[wb_info->dwb_pipe_inst];
+
+ /* Disable DWBC */
+ hwss_add_dwbc_disable(seq_state, dwb);
+
+ /* Disable DWB MUX */
+ hwss_add_mpc_disable_dwb_mux(seq_state, dc->res_pool->mpc, wb_info->dwb_pipe_inst);
+
+ /* Disable MCIF_WB */
+ hwss_add_mcif_wb_disable(seq_state, mcif_wb);
+}
+
+void dcn401_update_writeback_sequence(
+ struct dc *dc,
+ struct dc_writeback_info *wb_info,
+ struct dc_state *context,
+ struct block_sequence_state *seq_state)
+{
+ struct dwbc *dwb;
+ struct mcif_wb *mcif_wb;
+
+ if (!wb_info->wb_enabled || wb_info->dwb_pipe_inst >= dc->res_pool->res_cap->num_dwb)
+ return;
+
+ dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
+ mcif_wb = dc->res_pool->mcif_wb[wb_info->dwb_pipe_inst];
+
+ /* Update writeback pipe */
+ hwss_add_dwbc_update(seq_state, dwb, &wb_info->dwb_params);
+
+ /* Update MCIF_WB buffer settings if needed */
+ hwss_add_mcif_wb_config_buf(seq_state, mcif_wb, &wb_info->mcif_buf_params, wb_info->dwb_params.dest_height);
+}
+
+static int find_free_gsl_group(const struct dc *dc)
+{
+ if (dc->res_pool->gsl_groups.gsl_0 == 0)
+ return 1;
+ if (dc->res_pool->gsl_groups.gsl_1 == 0)
+ return 2;
+ if (dc->res_pool->gsl_groups.gsl_2 == 0)
+ return 3;
+
+ return 0;
+}
+
+void dcn401_setup_gsl_group_as_lock_sequence(
+ const struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool enable,
+ struct block_sequence_state *seq_state)
+{
+ struct gsl_params gsl;
+ int group_idx;
+
+ memset(&gsl, 0, sizeof(struct gsl_params));
+
+ if (enable) {
+ /* return if group already assigned since GSL was set up
+ * for vsync flip, we would unassign so it can't be "left over"
+ */
+ if (pipe_ctx->stream_res.gsl_group > 0)
+ return;
+
+ group_idx = find_free_gsl_group(dc);
+ ASSERT(group_idx != 0);
+ pipe_ctx->stream_res.gsl_group = group_idx;
+
+ /* set gsl group reg field and mark resource used */
+ switch (group_idx) {
+ case 1:
+ gsl.gsl0_en = 1;
+ dc->res_pool->gsl_groups.gsl_0 = 1;
+ break;
+ case 2:
+ gsl.gsl1_en = 1;
+ dc->res_pool->gsl_groups.gsl_1 = 1;
+ break;
+ case 3:
+ gsl.gsl2_en = 1;
+ dc->res_pool->gsl_groups.gsl_2 = 1;
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return; // invalid case
+ }
+ gsl.gsl_master_en = 1;
+ } else {
+ group_idx = pipe_ctx->stream_res.gsl_group;
+ if (group_idx == 0)
+ return; // if not in use, just return
+
+ pipe_ctx->stream_res.gsl_group = 0;
+
+ /* unset gsl group reg field and mark resource free */
+ switch (group_idx) {
+ case 1:
+ gsl.gsl0_en = 0;
+ dc->res_pool->gsl_groups.gsl_0 = 0;
+ break;
+ case 2:
+ gsl.gsl1_en = 0;
+ dc->res_pool->gsl_groups.gsl_1 = 0;
+ break;
+ case 3:
+ gsl.gsl2_en = 0;
+ dc->res_pool->gsl_groups.gsl_2 = 0;
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+ gsl.gsl_master_en = 0;
+ }
+
+ hwss_add_tg_set_gsl(seq_state, pipe_ctx->stream_res.tg, gsl);
+ hwss_add_tg_set_gsl_source_select(seq_state, pipe_ctx->stream_res.tg, group_idx, enable ? 4 : 0);
+}
+
+void dcn401_disable_plane_sequence(
+ struct dc *dc,
+ struct dc_state *state,
+ struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state)
+{
+ bool is_phantom = dc_state_get_pipe_subvp_type(state, pipe_ctx) == SUBVP_PHANTOM;
+ struct timing_generator *tg = is_phantom ? pipe_ctx->stream_res.tg : NULL;
+
+ if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
+ return;
+
+ /* Wait for MPCC disconnect */
+ if (dc->hwss.wait_for_mpcc_disconnect_sequence)
+ dc->hwss.wait_for_mpcc_disconnect_sequence(dc, dc->res_pool, pipe_ctx, seq_state);
+
+ /* In flip immediate with pipe splitting case GSL is used for synchronization
+ * so we must disable it when the plane is disabled.
+ */
+ if (pipe_ctx->stream_res.gsl_group != 0)
+ dcn401_setup_gsl_group_as_lock_sequence(dc, pipe_ctx, false, seq_state);
+
+ /* Update HUBP mall sel */
+ if (pipe_ctx->plane_res.hubp && pipe_ctx->plane_res.hubp->funcs->hubp_update_mall_sel)
+ hwss_add_hubp_update_mall_sel(seq_state, pipe_ctx->plane_res.hubp, 0, false);
+
+ /* Set flip control GSL */
+ hwss_add_hubp_set_flip_control_gsl(seq_state, pipe_ctx->plane_res.hubp, false);
+
+ /* HUBP clock control */
+ hwss_add_hubp_clk_cntl(seq_state, pipe_ctx->plane_res.hubp, false);
+
+ /* DPP clock control */
+ hwss_add_dpp_dppclk_control(seq_state, pipe_ctx->plane_res.dpp, false, false);
+
+ /* Plane atomic power down */
+ if (dc->hwseq->funcs.plane_atomic_power_down_sequence)
+ dc->hwseq->funcs.plane_atomic_power_down_sequence(dc, pipe_ctx->plane_res.dpp,
+ pipe_ctx->plane_res.hubp, seq_state);
+
+ pipe_ctx->stream = NULL;
+ memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res));
+ memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res));
+ pipe_ctx->top_pipe = NULL;
+ pipe_ctx->bottom_pipe = NULL;
+ pipe_ctx->prev_odm_pipe = NULL;
+ pipe_ctx->next_odm_pipe = NULL;
+ pipe_ctx->plane_state = NULL;
+
+ /* Turn back off the phantom OTG after the phantom plane is fully disabled */
+ if (is_phantom && tg && tg->funcs->disable_phantom_crtc)
+ hwss_add_disable_phantom_crtc(seq_state, tg);
+}
+
+void dcn401_post_unlock_reset_opp_sequence(
+ struct dc *dc,
+ struct pipe_ctx *opp_head,
+ struct block_sequence_state *seq_state)
+{
+ struct display_stream_compressor *dsc = opp_head->stream_res.dsc;
+ struct dccg *dccg = dc->res_pool->dccg;
+
+ /* Wait for all DPP pipes in current mpc blending tree completes double
+ * buffered disconnection before resetting OPP
+ */
+ if (dc->hwss.wait_for_mpcc_disconnect_sequence)
+ dc->hwss.wait_for_mpcc_disconnect_sequence(dc, dc->res_pool, opp_head, seq_state);
+
+ if (dsc) {
+ bool *is_ungated = NULL;
+ /* Check DSC power gate status */
+ if (dc->hwseq && dc->hwseq->funcs.dsc_pg_status)
+ hwss_add_dsc_pg_status(seq_state, dc->hwseq, dsc->inst, false);
+
+ /* Seamless update specific where we will postpone non
+ * double buffered DSCCLK disable logic in post unlock
+ * sequence after DSC is disconnected from OPP but not
+ * yet power gated.
+ */
+
+ /* DSC wait disconnect pending clear */
+ hwss_add_dsc_wait_disconnect_pending_clear(seq_state, dsc, is_ungated);
+
+ /* DSC disable */
+ hwss_add_dsc_disable(seq_state, dsc, is_ungated);
+
+ /* Set reference DSCCLK */
+ if (dccg && dccg->funcs->set_ref_dscclk)
+ hwss_add_dccg_set_ref_dscclk(seq_state, dccg, dsc->inst, 0);
+ }
+}
+
+void dcn401_dc_ip_request_cntl(struct dc *dc, bool enable)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+
+ if (REG(DC_IP_REQUEST_CNTL))
+ REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, enable ? 1 : 0);
+}
+
+void dcn401_enable_plane_sequence(struct dc *dc, struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct block_sequence_state *seq_state)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ uint32_t org_ip_request_cntl = 0;
+
+ if (!pipe_ctx->plane_res.dpp || !pipe_ctx->plane_res.hubp || !pipe_ctx->stream_res.opp)
+ return;
+
+ if (REG(DC_IP_REQUEST_CNTL))
+ REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
+
+ /* Step 1: DPP root clock control - enable clock */
+ if (hws->funcs.dpp_root_clock_control)
+ hwss_add_dpp_root_clock_control(seq_state, hws, pipe_ctx->plane_res.dpp->inst, true);
+
+ /* Step 2: Enable DC IP request (if needed) */
+ if (hws->funcs.dc_ip_request_cntl)
+ hwss_add_dc_ip_request_cntl(seq_state, dc, true);
+
+ /* Step 3: DPP power gating control - power on */
+ if (REG(DC_IP_REQUEST_CNTL) && hws->funcs.dpp_pg_control)
+ hwss_add_dpp_pg_control(seq_state, hws, pipe_ctx->plane_res.dpp->inst, true);
+
+ /* Step 4: HUBP power gating control - power on */
+ if (REG(DC_IP_REQUEST_CNTL) && hws->funcs.hubp_pg_control)
+ hwss_add_hubp_pg_control(seq_state, hws, pipe_ctx->plane_res.hubp->inst, true);
+
+ /* Step 5: Disable DC IP request (restore state) */
+ if (org_ip_request_cntl == 0 && hws->funcs.dc_ip_request_cntl)
+ hwss_add_dc_ip_request_cntl(seq_state, dc, false);
+
+ /* Step 6: HUBP clock control - enable DCFCLK */
+ if (pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl)
+ hwss_add_hubp_clk_cntl(seq_state, pipe_ctx->plane_res.hubp, true);
+
+ /* Step 7: HUBP initialization */
+ if (pipe_ctx->plane_res.hubp->funcs->hubp_init)
+ hwss_add_hubp_init(seq_state, pipe_ctx->plane_res.hubp);
+
+ /* Step 8: OPP pipe clock control - enable */
+ if (pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control)
+ hwss_add_opp_pipe_clock_control(seq_state, pipe_ctx->stream_res.opp, true);
+
+ /* Step 9: VM system aperture settings */
+ if (dc->vm_pa_config.valid && pipe_ctx->plane_res.hubp->funcs->hubp_set_vm_system_aperture_settings) {
+ hwss_add_hubp_set_vm_system_aperture_settings(seq_state, pipe_ctx->plane_res.hubp, 0,
+ dc->vm_pa_config.system_aperture.start_addr, dc->vm_pa_config.system_aperture.end_addr);
+ }
+
+ /* Step 10: Flip interrupt setup */
+ if (!pipe_ctx->top_pipe
+ && pipe_ctx->plane_state
+ && pipe_ctx->plane_state->flip_int_enabled
+ && pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int) {
+ hwss_add_hubp_set_flip_int(seq_state, pipe_ctx->plane_res.hubp);
+ }
+}
+
+void dcn401_update_dchubp_dpp_sequence(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct block_sequence_state *seq_state)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ struct hubp *hubp = pipe_ctx->plane_res.hubp;
+ struct dpp *dpp = pipe_ctx->plane_res.dpp;
+ struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+ struct dccg *dccg = dc->res_pool->dccg;
+ bool viewport_changed = false;
+ enum mall_stream_type pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe_ctx);
+
+ if (!hubp || !dpp || !plane_state)
+ return;
+
+ /* Step 1: DPP DPPCLK control */
+ if (pipe_ctx->update_flags.bits.dppclk)
+ hwss_add_dpp_dppclk_control(seq_state, dpp, false, true);
+
+ /* Step 2: DCCG update DPP DTO */
+ if (pipe_ctx->update_flags.bits.enable)
+ hwss_add_dccg_update_dpp_dto(seq_state, dccg, dpp->inst, pipe_ctx->plane_res.bw.dppclk_khz);
+
+ /* Step 3: HUBP VTG selection */
+ if (pipe_ctx->update_flags.bits.hubp_rq_dlg_ttu) {
+ hwss_add_hubp_vtg_sel(seq_state, hubp, pipe_ctx->stream_res.tg->inst);
+
+ /* Step 4: HUBP setup (choose setup2 or setup) */
+ if (hubp->funcs->hubp_setup2) {
+ hwss_add_hubp_setup2(seq_state, hubp, &pipe_ctx->hubp_regs,
+ &pipe_ctx->global_sync, &pipe_ctx->stream->timing);
+ } else if (hubp->funcs->hubp_setup) {
+ hwss_add_hubp_setup(seq_state, hubp, &pipe_ctx->dlg_regs,
+ &pipe_ctx->ttu_regs, &pipe_ctx->rq_regs, &pipe_ctx->pipe_dlg_param);
+ }
+ }
+
+ /* Step 5: Set unbounded requesting */
+ if (pipe_ctx->update_flags.bits.unbounded_req && hubp->funcs->set_unbounded_requesting)
+ hwss_add_hubp_set_unbounded_requesting(seq_state, hubp, pipe_ctx->unbounded_req);
+
+ /* Step 6: HUBP interdependent setup */
+ if (pipe_ctx->update_flags.bits.hubp_interdependent) {
+ if (hubp->funcs->hubp_setup_interdependent2)
+ hwss_add_hubp_setup_interdependent2(seq_state, hubp, &pipe_ctx->hubp_regs);
+ else if (hubp->funcs->hubp_setup_interdependent)
+ hwss_add_hubp_setup_interdependent(seq_state, hubp, &pipe_ctx->dlg_regs, &pipe_ctx->ttu_regs);
+ }
+
+ /* Step 7: DPP setup - input CSC and format setup */
+ if (pipe_ctx->update_flags.bits.enable ||
+ pipe_ctx->update_flags.bits.plane_changed ||
+ plane_state->update_flags.bits.bpp_change ||
+ plane_state->update_flags.bits.input_csc_change ||
+ plane_state->update_flags.bits.color_space_change ||
+ plane_state->update_flags.bits.coeff_reduction_change) {
+ hwss_add_dpp_setup_dpp(seq_state, pipe_ctx);
+
+ /* Step 8: DPP cursor matrix setup */
+ if (dpp->funcs->set_cursor_matrix) {
+ hwss_add_dpp_set_cursor_matrix(seq_state, dpp, plane_state->color_space,
+ &plane_state->cursor_csc_color_matrix);
+ }
+
+ /* Step 9: DPP program bias and scale */
+ if (dpp->funcs->dpp_program_bias_and_scale)
+ hwss_add_dpp_program_bias_and_scale(seq_state, pipe_ctx);
+ }
+
+ /* Step 10: MPCC updates */
+ if (pipe_ctx->update_flags.bits.mpcc ||
+ pipe_ctx->update_flags.bits.plane_changed ||
+ plane_state->update_flags.bits.global_alpha_change ||
+ plane_state->update_flags.bits.per_pixel_alpha_change) {
+
+ /* Check if update_mpcc_sequence is implemented and prefer it over single MPC_UPDATE_MPCC step */
+ if (hws->funcs.update_mpcc_sequence)
+ hws->funcs.update_mpcc_sequence(dc, pipe_ctx, seq_state);
+ }
+
+ /* Step 11: DPP scaler setup */
+ if (pipe_ctx->update_flags.bits.scaler ||
+ plane_state->update_flags.bits.scaling_change ||
+ plane_state->update_flags.bits.position_change ||
+ plane_state->update_flags.bits.per_pixel_alpha_change ||
+ pipe_ctx->stream->update_flags.bits.scaling) {
+ pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->plane_state->per_pixel_alpha;
+ ASSERT(pipe_ctx->plane_res.scl_data.lb_params.depth == LB_PIXEL_DEPTH_36BPP);
+ hwss_add_dpp_set_scaler(seq_state, pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
+ }
+
+ /* Step 12: HUBP viewport programming */
+ if (pipe_ctx->update_flags.bits.viewport ||
+ (context == dc->current_state && plane_state->update_flags.bits.position_change) ||
+ (context == dc->current_state && plane_state->update_flags.bits.scaling_change) ||
+ (context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling)) {
+ hwss_add_hubp_mem_program_viewport(seq_state, hubp,
+ &pipe_ctx->plane_res.scl_data.viewport, &pipe_ctx->plane_res.scl_data.viewport_c);
+ viewport_changed = true;
+ }
+
+ /* Step 13: HUBP program mcache if available */
+ if (hubp->funcs->hubp_program_mcache_id_and_split_coordinate)
+ hwss_add_hubp_program_mcache_id(seq_state, hubp, &pipe_ctx->mcache_regs);
+
+ /* Step 14: Cursor attribute setup */
+ if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed ||
+ pipe_ctx->update_flags.bits.scaler || viewport_changed == true) &&
+ pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
+
+ hwss_add_abort_cursor_offload_update(seq_state, dc, pipe_ctx);
+
+ hwss_add_set_cursor_attribute(seq_state, dc, pipe_ctx);
+
+ /* Step 15: Cursor position setup */
+ hwss_add_set_cursor_position(seq_state, dc, pipe_ctx);
+
+ /* Step 16: Cursor SDR white level */
+ if (dc->hwss.set_cursor_sdr_white_level)
+ hwss_add_set_cursor_sdr_white_level(seq_state, dc, pipe_ctx);
+ }
+
+ /* Step 17: Gamut remap and output CSC */
+ if (pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed ||
+ pipe_ctx->update_flags.bits.plane_changed ||
+ pipe_ctx->stream->update_flags.bits.gamut_remap ||
+ plane_state->update_flags.bits.gamut_remap_change ||
+ pipe_ctx->stream->update_flags.bits.out_csc) {
+
+ /* Gamut remap */
+ hwss_add_dpp_program_gamut_remap(seq_state, pipe_ctx);
+
+ /* Output CSC */
+ hwss_add_program_output_csc(seq_state, dc, pipe_ctx, pipe_ctx->stream->output_color_space,
+ pipe_ctx->stream->csc_color_matrix.matrix, hubp->opp_id);
+ }
+
+ /* Step 18: HUBP surface configuration */
+ if (pipe_ctx->update_flags.bits.enable ||
+ pipe_ctx->update_flags.bits.plane_changed ||
+ pipe_ctx->update_flags.bits.opp_changed ||
+ plane_state->update_flags.bits.pixel_format_change ||
+ plane_state->update_flags.bits.horizontal_mirror_change ||
+ plane_state->update_flags.bits.rotation_change ||
+ plane_state->update_flags.bits.swizzle_change ||
+ plane_state->update_flags.bits.dcc_change ||
+ plane_state->update_flags.bits.bpp_change ||
+ plane_state->update_flags.bits.scaling_change ||
+ plane_state->update_flags.bits.plane_size_change) {
+ struct plane_size size = plane_state->plane_size;
+
+ size.surface_size = pipe_ctx->plane_res.scl_data.viewport;
+ hwss_add_hubp_program_surface_config(seq_state, hubp,
+ plane_state->format, &plane_state->tiling_info, size,
+ plane_state->rotation, &plane_state->dcc,
+ plane_state->horizontal_mirror, 0);
+ hubp->power_gated = false;
+ }
+
+ /* Step 19: Update plane address (with SubVP support) */
+ if (pipe_ctx->update_flags.bits.enable ||
+ pipe_ctx->update_flags.bits.plane_changed ||
+ plane_state->update_flags.bits.addr_update) {
+
+ /* SubVP save surface address if needed */
+ if (resource_is_pipe_type(pipe_ctx, OTG_MASTER) && pipe_mall_type == SUBVP_MAIN) {
+ hwss_add_dmub_subvp_save_surf_addr(seq_state, dc->ctx->dmub_srv,
+ &pipe_ctx->plane_state->address, pipe_ctx->subvp_index);
+ }
+
+ /* Update plane address */
+ hwss_add_hubp_update_plane_addr(seq_state, dc, pipe_ctx);
+ }
+
+ /* Step 20: HUBP set blank - enable plane */
+ if (pipe_ctx->update_flags.bits.enable)
+ hwss_add_hubp_set_blank(seq_state, hubp, false);
+
+ /* Step 21: Phantom HUBP post enable */
+ if (pipe_mall_type == SUBVP_PHANTOM && hubp->funcs->phantom_hubp_post_enable)
+ hwss_add_phantom_hubp_post_enable(seq_state, hubp);
+}
+
+void dcn401_update_mpcc_sequence(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state)
+{
+ struct hubp *hubp = pipe_ctx->plane_res.hubp;
+ struct mpcc_blnd_cfg blnd_cfg = {0};
+ bool per_pixel_alpha;
+ int mpcc_id;
+ struct mpcc *new_mpcc;
+ struct mpc *mpc = dc->res_pool->mpc;
+ struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
+
+ if (!hubp || !pipe_ctx->plane_state)
+ return;
+
+ per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha;
+
+ /* Initialize blend configuration */
+ blnd_cfg.overlap_only = false;
+ blnd_cfg.global_gain = 0xff;
+
+ if (per_pixel_alpha) {
+ blnd_cfg.pre_multiplied_alpha = pipe_ctx->plane_state->pre_multiplied_alpha;
+ if (pipe_ctx->plane_state->global_alpha) {
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
+ blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
+ } else {
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
+ }
+ } else {
+ blnd_cfg.pre_multiplied_alpha = false;
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
+ }
+
+ if (pipe_ctx->plane_state->global_alpha)
+ blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
+ else
+ blnd_cfg.global_alpha = 0xff;
+
+ blnd_cfg.background_color_bpc = 4;
+ blnd_cfg.bottom_gain_mode = 0;
+ blnd_cfg.top_gain = 0x1f000;
+ blnd_cfg.bottom_inside_gain = 0x1f000;
+ blnd_cfg.bottom_outside_gain = 0x1f000;
+
+ if (pipe_ctx->plane_state->format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA)
+ blnd_cfg.pre_multiplied_alpha = false;
+
+ /* MPCC instance is equal to HUBP instance */
+ mpcc_id = hubp->inst;
+
+ /* Step 1: Update blending if no full update needed */
+ if (!pipe_ctx->plane_state->update_flags.bits.full_update &&
+ !pipe_ctx->update_flags.bits.mpcc) {
+
+ /* Update blending configuration */
+ hwss_add_mpc_update_blending(seq_state, mpc, blnd_cfg, mpcc_id);
+
+ /* Update visual confirm color */
+ hwss_add_mpc_update_visual_confirm(seq_state, dc, pipe_ctx, mpcc_id);
+ return;
+ }
+
+ /* Step 2: Get existing MPCC for DPP */
+ new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id);
+
+ /* Step 3: Remove MPCC if being used */
+ if (new_mpcc != NULL) {
+ hwss_add_mpc_remove_mpcc(seq_state, mpc, mpc_tree_params, new_mpcc);
+ } else {
+ /* Step 4: Assert MPCC idle (debug only) */
+ if (dc->debug.sanity_checks)
+ hwss_add_mpc_assert_idle_mpcc(seq_state, mpc, mpcc_id);
+ }
+
+ /* Step 5: Insert new plane into MPC tree */
+ hwss_add_mpc_insert_plane(seq_state, mpc, mpc_tree_params, blnd_cfg, NULL, NULL, hubp->inst, mpcc_id);
+
+ /* Step 6: Update visual confirm color */
+ hwss_add_mpc_update_visual_confirm(seq_state, dc, pipe_ctx, mpcc_id);
+
+ /* Step 7: Set HUBP OPP and MPCC IDs */
+ hubp->opp_id = pipe_ctx->stream_res.opp->inst;
+ hubp->mpcc_id = mpcc_id;
+}
+
+static struct hubp *get_hubp_by_inst(struct resource_pool *res_pool, int mpcc_inst)
+{
+ int i;
+
+ for (i = 0; i < res_pool->pipe_count; i++) {
+ if (res_pool->hubps[i]->inst == mpcc_inst)
+ return res_pool->hubps[i];
+ }
+ ASSERT(false);
+ return NULL;
+}
+
+void dcn401_wait_for_mpcc_disconnect_sequence(
+ struct dc *dc,
+ struct resource_pool *res_pool,
+ struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state)
+{
+ int mpcc_inst;
+
+ if (dc->debug.sanity_checks)
+ dc->hwseq->funcs.verify_allow_pstate_change_high_sequence(dc, seq_state);
+
+ if (!pipe_ctx->stream_res.opp)
+ return;
+
+ for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) {
+ if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
+ struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
+
+ if (pipe_ctx->stream_res.tg &&
+ pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg)) {
+ hwss_add_mpc_assert_idle_mpcc(seq_state, res_pool->mpc, mpcc_inst);
+ }
+ pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
+ if (hubp)
+ hwss_add_hubp_set_blank(seq_state, hubp, true);
+ }
+ }
+
+ if (dc->debug.sanity_checks)
+ dc->hwseq->funcs.verify_allow_pstate_change_high_sequence(dc, seq_state);
+}
+
+void dcn401_setup_vupdate_interrupt_sequence(struct dc *dc, struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state)
+{
+ struct timing_generator *tg = pipe_ctx->stream_res.tg;
+ int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
+
+ if (start_line < 0)
+ start_line = 0;
+
+ if (tg->funcs->setup_vertical_interrupt2)
+ hwss_add_tg_setup_vertical_interrupt2(seq_state, tg, start_line);
+}
+
+void dcn401_set_hdr_multiplier_sequence(struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state)
+{
+ struct fixed31_32 multiplier = pipe_ctx->plane_state->hdr_mult;
+ uint32_t hw_mult = 0x1f000; // 1.0 default multiplier
+ struct custom_float_format fmt;
+
+ fmt.exponenta_bits = 6;
+ fmt.mantissa_bits = 12;
+ fmt.sign = true;
+
+ if (!dc_fixpt_eq(multiplier, dc_fixpt_from_int(0))) // check != 0
+ convert_to_custom_float_format(multiplier, &fmt, &hw_mult);
+
+ hwss_add_dpp_set_hdr_multiplier(seq_state, pipe_ctx->plane_res.dpp, hw_mult);
+}
+
+void dcn401_program_mall_pipe_config_sequence(struct dc *dc, struct dc_state *context,
+ struct block_sequence_state *seq_state)
+{
+ int i;
+ unsigned int num_ways = dcn401_calculate_cab_allocation(dc, context);
+ bool cache_cursor = false;
+
+ // Don't force p-state disallow -- can't block dummy p-state
+
+ // Update MALL_SEL register for each pipe (break down update_mall_sel call)
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ struct hubp *hubp = pipe->plane_res.hubp;
+
+ if (pipe->stream && pipe->plane_state && hubp && hubp->funcs->hubp_update_mall_sel) {
+ int cursor_size = hubp->curs_attr.pitch * hubp->curs_attr.height;
+
+ switch (hubp->curs_attr.color_format) {
+ case CURSOR_MODE_MONO:
+ cursor_size /= 2;
+ break;
+ case CURSOR_MODE_COLOR_1BIT_AND:
+ case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA:
+ case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA:
+ cursor_size *= 4;
+ break;
+
+ case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED:
+ case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED:
+ default:
+ cursor_size *= 8;
+ break;
+ }
+
+ if (cursor_size > 16384)
+ cache_cursor = true;
+
+ if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
+ hwss_add_hubp_update_mall_sel(seq_state, hubp, 1, false);
+ } else {
+ // MALL not supported with Stereo3D
+ uint32_t mall_sel = (num_ways <= dc->caps.cache_num_ways &&
+ pipe->stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED &&
+ pipe->plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO &&
+ !pipe->plane_state->address.tmz_surface) ? 2 : 0;
+ hwss_add_hubp_update_mall_sel(seq_state, hubp, mall_sel, cache_cursor);
+ }
+ }
+ }
+
+ // Program FORCE_ONE_ROW_FOR_FRAME and CURSOR_REQ_MODE for main subvp pipes
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ struct hubp *hubp = pipe->plane_res.hubp;
+
+ if (pipe->stream && hubp && hubp->funcs->hubp_prepare_subvp_buffering) {
+ if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN)
+ hwss_add_hubp_prepare_subvp_buffering(seq_state, hubp, true);
+ }
+ }
+}
+
+void dcn401_verify_allow_pstate_change_high_sequence(struct dc *dc,
+ struct block_sequence_state *seq_state)
+{
+ struct hubbub *hubbub = dc->res_pool->hubbub;
+
+ if (!hubbub->funcs->verify_allow_pstate_change_high)
+ return;
+
+ if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub)) {
+ /* Attempt hardware workaround force recovery */
+ dcn401_hw_wa_force_recovery_sequence(dc, seq_state);
+ }
+}
+
+bool dcn401_hw_wa_force_recovery_sequence(struct dc *dc,
+ struct block_sequence_state *seq_state)
+{
+ struct hubp *hubp;
+ unsigned int i;
+
+ if (!dc->debug.recovery_enabled)
+ return false;
+
+ /* Step 1: Set HUBP_BLANK_EN=1 for all active pipes */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx != NULL) {
+ hubp = pipe_ctx->plane_res.hubp;
+ if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
+ hwss_add_hubp_set_blank_en(seq_state, hubp, true);
+ }
+ }
+
+ /* Step 2: DCHUBBUB_GLOBAL_SOFT_RESET=1 */
+ hwss_add_hubbub_soft_reset(seq_state, dc->res_pool->hubbub, hubbub1_soft_reset, true);
+
+ /* Step 3: Set HUBP_DISABLE=1 for all active pipes */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx != NULL) {
+ hubp = pipe_ctx->plane_res.hubp;
+ if (hubp != NULL && hubp->funcs->hubp_disable_control)
+ hwss_add_hubp_disable_control(seq_state, hubp, true);
+ }
+ }
+
+ /* Step 4: Set HUBP_DISABLE=0 for all active pipes */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx != NULL) {
+ hubp = pipe_ctx->plane_res.hubp;
+ if (hubp != NULL && hubp->funcs->hubp_disable_control)
+ hwss_add_hubp_disable_control(seq_state, hubp, false);
+ }
+ }
+
+ /* Step 5: DCHUBBUB_GLOBAL_SOFT_RESET=0 */
+ hwss_add_hubbub_soft_reset(seq_state, dc->res_pool->hubbub, hubbub1_soft_reset, false);
+
+ /* Step 6: Set HUBP_BLANK_EN=0 for all active pipes */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx != NULL) {
+ hubp = pipe_ctx->plane_res.hubp;
+ if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
+ hwss_add_hubp_set_blank_en(seq_state, hubp, false);
+ }
+ }
+
+ return true;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
index ce65b4f6c672..f78162ab859b 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
@@ -9,6 +9,7 @@
#include "dc.h"
#include "dc_stream.h"
#include "hw_sequencer_private.h"
+#include "hwss/hw_sequencer.h"
#include "dcn401/dcn401_dccg.h"
struct dc;
@@ -73,15 +74,17 @@ void dcn401_optimize_bandwidth(
struct dc *dc,
struct dc_state *context);
-void dcn401_fams2_global_control_lock(struct dc *dc,
+void dcn401_dmub_hw_control_lock(struct dc *dc,
struct dc_state *context,
bool lock);
void dcn401_fams2_update_config(struct dc *dc, struct dc_state *context, bool enable);
-void dcn401_fams2_global_control_lock_fast(union block_sequence_params *params);
+void dcn401_dmub_hw_control_lock_fast(union block_sequence_params *params);
void dcn401_unblank_stream(struct pipe_ctx *pipe_ctx, struct dc_link_settings *link_settings);
void dcn401_hardware_release(struct dc *dc);
void dcn401_update_odm(struct dc *dc, struct dc_state *context,
struct pipe_ctx *otg_master);
+void dcn401_update_odm_sequence(struct dc *dc, struct dc_state *context,
+ struct pipe_ctx *otg_master, struct block_sequence_state *seq_state);
void adjust_hotspot_between_slices_for_2x_magnify(uint32_t cursor_width, struct dc_cursor_position *pos_cpy);
void dcn401_wait_for_det_buffer_update_under_otg_master(struct dc *dc, struct dc_state *context, struct pipe_ctx *otg_master);
void dcn401_interdependent_update_lock(struct dc *dc, struct dc_state *context, bool lock);
@@ -97,6 +100,11 @@ void dcn401_program_pipe(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
struct dc_state *context);
+void dcn401_program_pipe_sequence(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct block_sequence_state *seq_state);
void dcn401_perform_3dlut_wa_unlock(struct pipe_ctx *pipe_ctx);
void dcn401_program_front_end_for_ctx(struct dc *dc, struct dc_state *context);
void dcn401_post_unlock_program_front_end(struct dc *dc, struct dc_state *context);
@@ -109,12 +117,97 @@ void dcn401_detect_pipe_changes(
void dcn401_plane_atomic_power_down(struct dc *dc,
struct dpp *dpp,
struct hubp *hubp);
-bool dcn401_program_rmcm_luts(
- struct hubp *hubp,
+void dcn401_plane_atomic_power_down_sequence(struct dc *dc,
+ struct dpp *dpp,
+ struct hubp *hubp,
+ struct block_sequence_state *seq_state);
+void dcn401_plane_atomic_disconnect_sequence(struct dc *dc,
+ struct dc_state *state,
+ struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state);
+void dcn401_blank_pixel_data_sequence(
+ struct dc *dc,
struct pipe_ctx *pipe_ctx,
- enum dc_cm2_transfer_func_source lut3d_src,
- struct dc_cm2_func_luts *mcm_luts,
- struct mpc *mpc,
- bool lut_bank_a,
- int mpcc_id);
+ bool blank,
+ struct block_sequence_state *seq_state);
+void dcn401_initialize_min_clocks(struct dc *dc);
+void dcn401_update_cursor_offload_pipe(struct dc *dc, const struct pipe_ctx *pipe);
+
+void dcn401_program_all_writeback_pipes_in_tree_sequence(
+ struct dc *dc,
+ const struct dc_stream_state *stream,
+ struct dc_state *context,
+ struct block_sequence_state *seq_state);
+
+void dcn401_enable_writeback_sequence(
+ struct dc *dc,
+ struct dc_writeback_info *wb_info,
+ struct dc_state *context,
+ int mpcc_inst,
+ struct block_sequence_state *seq_state);
+
+void dcn401_disable_writeback_sequence(
+ struct dc *dc,
+ struct dc_writeback_info *wb_info,
+ struct block_sequence_state *seq_state);
+
+void dcn401_update_writeback_sequence(
+ struct dc *dc,
+ struct dc_writeback_info *wb_info,
+ struct dc_state *context,
+ struct block_sequence_state *seq_state);
+
+void dcn401_setup_gsl_group_as_lock_sequence(
+ const struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool enable,
+ struct block_sequence_state *seq_state);
+
+void dcn401_disable_plane_sequence(
+ struct dc *dc,
+ struct dc_state *state,
+ struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state);
+
+void dcn401_post_unlock_reset_opp_sequence(
+ struct dc *dc,
+ struct pipe_ctx *opp_head,
+ struct block_sequence_state *seq_state);
+
+void dcn401_dc_ip_request_cntl(struct dc *dc, bool enable);
+
+void dcn401_enable_plane_sequence(struct dc *dc, struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct block_sequence_state *seq_state);
+
+void dcn401_update_dchubp_dpp_sequence(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct block_sequence_state *seq_state);
+
+void dcn401_update_mpcc_sequence(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state);
+
+void dcn401_wait_for_mpcc_disconnect_sequence(
+ struct dc *dc,
+ struct resource_pool *res_pool,
+ struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state);
+
+void dcn401_setup_vupdate_interrupt_sequence(struct dc *dc, struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state);
+
+void dcn401_set_hdr_multiplier_sequence(struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state);
+
+void dcn401_program_mall_pipe_config_sequence(struct dc *dc, struct dc_state *context,
+ struct block_sequence_state *seq_state);
+
+void dcn401_verify_allow_pstate_change_high_sequence(struct dc *dc,
+ struct block_sequence_state *seq_state);
+
+bool dcn401_hw_wa_force_recovery_sequence(struct dc *dc,
+ struct block_sequence_state *seq_state);
+
#endif /* __DC_HWSS_DCN401_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c
index fe7aceb2f510..162096ce0bdf 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c
@@ -9,6 +9,7 @@
#include "dcn30/dcn30_hwseq.h"
#include "dcn31/dcn31_hwseq.h"
#include "dcn32/dcn32_hwseq.h"
+#include "dcn35/dcn35_hwseq.h"
#include "dcn401/dcn401_hwseq.h"
#include "dcn401_init.h"
@@ -38,6 +39,7 @@ static const struct hw_sequencer_funcs dcn401_funcs = {
.enable_audio_stream = dce110_enable_audio_stream,
.disable_audio_stream = dce110_disable_audio_stream,
.disable_plane = dcn20_disable_plane,
+ .disable_plane_sequence = dcn401_disable_plane_sequence,
.pipe_control_lock = dcn20_pipe_control_lock,
.interdependent_update_lock = dcn401_interdependent_update_lock,
.cursor_lock = dcn10_cursor_lock,
@@ -53,6 +55,7 @@ static const struct hw_sequencer_funcs dcn401_funcs = {
.get_hw_state = dcn10_get_hw_state,
.clear_status_bits = dcn10_clear_status_bits,
.wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
+ .wait_for_mpcc_disconnect_sequence = dcn401_wait_for_mpcc_disconnect_sequence,
.edp_backlight_control = dce110_edp_backlight_control,
.edp_power_control = dce110_edp_power_control,
.edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,
@@ -60,6 +63,12 @@ static const struct hw_sequencer_funcs dcn401_funcs = {
.set_cursor_position = dcn401_set_cursor_position,
.set_cursor_attribute = dcn10_set_cursor_attribute,
.set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level,
+ .abort_cursor_offload_update = dcn35_abort_cursor_offload_update,
+ .begin_cursor_offload_update = dcn35_begin_cursor_offload_update,
+ .commit_cursor_offload_update = dcn35_commit_cursor_offload_update,
+ .update_cursor_offload_pipe = dcn401_update_cursor_offload_pipe,
+ .notify_cursor_offload_drr_update = dcn35_notify_cursor_offload_drr_update,
+ .program_cursor_offload_now = dcn35_program_cursor_offload_now,
.setup_periodic_interrupt = dcn10_setup_periodic_interrupt,
.set_clock = dcn10_set_clock,
.get_clock = dcn10_get_clock,
@@ -95,54 +104,70 @@ static const struct hw_sequencer_funcs dcn401_funcs = {
.apply_update_flags_for_phantom = dcn32_apply_update_flags_for_phantom,
.wait_for_dcc_meta_propagation = dcn401_wait_for_dcc_meta_propagation,
.is_pipe_topology_transition_seamless = dcn32_is_pipe_topology_transition_seamless,
- .fams2_global_control_lock = dcn401_fams2_global_control_lock,
+ .dmub_hw_control_lock = dcn401_dmub_hw_control_lock,
.fams2_update_config = dcn401_fams2_update_config,
- .fams2_global_control_lock_fast = dcn401_fams2_global_control_lock_fast,
+ .dmub_hw_control_lock_fast = dcn401_dmub_hw_control_lock_fast,
.program_outstanding_updates = dcn401_program_outstanding_updates,
.wait_for_all_pending_updates = dcn30_wait_for_all_pending_updates,
.detect_pipe_changes = dcn401_detect_pipe_changes,
.enable_plane = dcn20_enable_plane,
+ .enable_plane_sequence = dcn401_enable_plane_sequence,
.update_dchubp_dpp = dcn20_update_dchubp_dpp,
+ .update_dchubp_dpp_sequence = dcn401_update_dchubp_dpp_sequence,
.post_unlock_reset_opp = dcn20_post_unlock_reset_opp,
+ .post_unlock_reset_opp_sequence = dcn401_post_unlock_reset_opp_sequence,
+ .get_underflow_debug_data = dcn30_get_underflow_debug_data,
};
static const struct hwseq_private_funcs dcn401_private_funcs = {
.init_pipes = dcn10_init_pipes,
.plane_atomic_disconnect = dcn10_plane_atomic_disconnect,
+ .plane_atomic_disconnect_sequence = dcn401_plane_atomic_disconnect_sequence,
.update_mpcc = dcn20_update_mpcc,
+ .update_mpcc_sequence = dcn401_update_mpcc_sequence,
.set_input_transfer_func = dcn32_set_input_transfer_func,
.set_output_transfer_func = dcn401_set_output_transfer_func,
.power_down = dce110_power_down,
.enable_display_power_gating = dcn10_dummy_display_power_gating,
.blank_pixel_data = dcn20_blank_pixel_data,
+ .blank_pixel_data_sequence = dcn401_blank_pixel_data_sequence,
.reset_hw_ctx_wrap = dcn401_reset_hw_ctx_wrap,
.enable_stream_timing = dcn401_enable_stream_timing,
.edp_backlight_control = dce110_edp_backlight_control,
.setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt,
+ .setup_vupdate_interrupt_sequence = dcn401_setup_vupdate_interrupt_sequence,
.did_underflow_occur = dcn10_did_underflow_occur,
.init_blank = dcn32_init_blank,
.disable_vga = dcn20_disable_vga,
.bios_golden_init = dcn10_bios_golden_init,
.plane_atomic_disable = dcn20_plane_atomic_disable,
.plane_atomic_power_down = dcn401_plane_atomic_power_down,
+ .plane_atomic_power_down_sequence = dcn401_plane_atomic_power_down_sequence,
.enable_power_gating_plane = dcn32_enable_power_gating_plane,
.hubp_pg_control = dcn32_hubp_pg_control,
.program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree,
+ .program_all_writeback_pipes_in_tree_sequence = dcn401_program_all_writeback_pipes_in_tree_sequence,
.update_odm = dcn401_update_odm,
+ .update_odm_sequence = dcn401_update_odm_sequence,
.dsc_pg_control = dcn32_dsc_pg_control,
.dsc_pg_status = dcn32_dsc_pg_status,
.set_hdr_multiplier = dcn10_set_hdr_multiplier,
+ .set_hdr_multiplier_sequence = dcn401_set_hdr_multiplier_sequence,
.verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high,
+ .verify_allow_pstate_change_high_sequence = dcn401_verify_allow_pstate_change_high_sequence,
.wait_for_blank_complete = dcn20_wait_for_blank_complete,
.dccg_init = dcn20_dccg_init,
.set_mcm_luts = dcn401_set_mcm_luts,
.program_mall_pipe_config = dcn32_program_mall_pipe_config,
+ .program_mall_pipe_config_sequence = dcn401_program_mall_pipe_config_sequence,
.update_mall_sel = dcn32_update_mall_sel,
.calculate_dccg_k1_k2_values = NULL,
.apply_single_controller_ctx_to_hw = dce110_apply_single_controller_ctx_to_hw,
.reset_back_end_for_pipe = dcn401_reset_back_end_for_pipe,
.populate_mcm_luts = NULL,
.perform_3dlut_wa_unlock = dcn401_perform_3dlut_wa_unlock,
+ .program_pipe_sequence = dcn401_program_pipe_sequence,
+ .dc_ip_request_cntl = dcn401_dc_ip_request_cntl,
};
void dcn401_hw_sequencer_init_functions(struct dc *dc)
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
index 3a0795045bc6..8ed9eea40c56 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
@@ -31,6 +31,8 @@
#include "inc/hw/opp.h"
#include "inc/hw/link_encoder.h"
#include "inc/core_status.h"
+#include "inc/hw/hw_shared.h"
+#include "dsc/dsc.h"
struct pipe_ctx;
struct dc_state;
@@ -47,6 +49,9 @@ struct link_resource;
struct dc_dmub_cmd;
struct pg_block_update;
struct drr_params;
+struct dc_underflow_debug_data;
+struct dsc_optc_config;
+struct vm_system_aperture_param;
struct subvp_pipe_control_lock_fast_params {
struct dc *dc;
@@ -61,7 +66,7 @@ struct pipe_control_lock_params {
};
struct set_flip_control_gsl_params {
- struct pipe_ctx *pipe_ctx;
+ struct hubp *hubp;
bool flip_immediate;
};
@@ -147,12 +152,587 @@ struct wait_for_dcc_meta_propagation_params {
const struct pipe_ctx *top_pipe_to_program;
};
-struct fams2_global_control_lock_fast_params {
+struct dmub_hw_control_lock_fast_params {
struct dc *dc;
bool is_required;
bool lock;
};
+struct program_surface_config_params {
+ struct hubp *hubp;
+ enum surface_pixel_format format;
+ struct dc_tiling_info *tiling_info;
+ struct plane_size plane_size;
+ enum dc_rotation_angle rotation;
+ struct dc_plane_dcc_param *dcc;
+ bool horizontal_mirror;
+ int compat_level;
+};
+
+struct program_mcache_id_and_split_coordinate {
+ struct hubp *hubp;
+ struct dml2_hubp_pipe_mcache_regs *mcache_regs;
+};
+
+struct program_cursor_update_now_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+};
+
+struct hubp_wait_pipe_read_start_params {
+ struct hubp *hubp;
+};
+
+struct apply_update_flags_for_phantom_params {
+ struct pipe_ctx *pipe_ctx;
+};
+
+struct update_phantom_vp_position_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+ struct dc_state *context;
+};
+
+struct set_odm_combine_params {
+ struct timing_generator *tg;
+ int opp_inst[MAX_PIPES];
+ int opp_head_count;
+ int odm_slice_width;
+ int last_odm_slice_width;
+};
+
+struct set_odm_bypass_params {
+ struct timing_generator *tg;
+ const struct dc_crtc_timing *timing;
+};
+
+struct opp_pipe_clock_control_params {
+ struct output_pixel_processor *opp;
+ bool enable;
+};
+
+struct opp_program_left_edge_extra_pixel_params {
+ struct output_pixel_processor *opp;
+ enum dc_pixel_encoding pixel_encoding;
+ bool is_otg_master;
+};
+
+struct dccg_set_dto_dscclk_params {
+ struct dccg *dccg;
+ int inst;
+ int num_slices_h;
+};
+
+struct dsc_set_config_params {
+ struct display_stream_compressor *dsc;
+ struct dsc_config *dsc_cfg;
+ struct dsc_optc_config *dsc_optc_cfg;
+};
+
+struct dsc_enable_params {
+ struct display_stream_compressor *dsc;
+ int opp_inst;
+};
+
+struct tg_set_dsc_config_params {
+ struct timing_generator *tg;
+ struct dsc_optc_config *dsc_optc_cfg;
+ bool enable;
+};
+
+struct dsc_disconnect_params {
+ struct display_stream_compressor *dsc;
+};
+
+struct dsc_read_state_params {
+ struct display_stream_compressor *dsc;
+ struct dcn_dsc_state *dsc_state;
+};
+
+struct dsc_calculate_and_set_config_params {
+ struct pipe_ctx *pipe_ctx;
+ struct dsc_optc_config dsc_optc_cfg;
+ bool enable;
+ int opp_cnt;
+};
+
+struct dsc_enable_with_opp_params {
+ struct pipe_ctx *pipe_ctx;
+};
+
+struct program_tg_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+ struct dc_state *context;
+};
+
+struct tg_program_global_sync_params {
+ struct timing_generator *tg;
+ int vready_offset;
+ unsigned int vstartup_lines;
+ unsigned int vupdate_offset_pixels;
+ unsigned int vupdate_vupdate_width_pixels;
+ unsigned int pstate_keepout_start_lines;
+};
+
+struct tg_wait_for_state_params {
+ struct timing_generator *tg;
+ enum crtc_state state;
+};
+
+struct tg_set_vtg_params_params {
+ struct timing_generator *tg;
+ struct dc_crtc_timing *timing;
+ bool program_fp2;
+};
+
+struct tg_set_gsl_params {
+ struct timing_generator *tg;
+ struct gsl_params gsl;
+};
+
+struct tg_set_gsl_source_select_params {
+ struct timing_generator *tg;
+ int group_idx;
+ uint32_t gsl_ready_signal;
+};
+
+struct setup_vupdate_interrupt_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+};
+
+struct tg_setup_vertical_interrupt2_params {
+ struct timing_generator *tg;
+ int start_line;
+};
+
+struct dpp_set_hdr_multiplier_params {
+ struct dpp *dpp;
+ uint32_t hw_mult;
+};
+
+struct program_det_size_params {
+ struct hubbub *hubbub;
+ unsigned int hubp_inst;
+ unsigned int det_buffer_size_kb;
+};
+
+struct program_det_segments_params {
+ struct hubbub *hubbub;
+ unsigned int hubp_inst;
+ unsigned int det_size;
+};
+
+struct update_dchubp_dpp_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+ struct dc_state *context;
+};
+
+struct opp_set_dyn_expansion_params {
+ struct output_pixel_processor *opp;
+ enum dc_color_space color_space;
+ enum dc_color_depth color_depth;
+ enum signal_type signal;
+};
+
+struct opp_program_fmt_params {
+ struct output_pixel_processor *opp;
+ struct bit_depth_reduction_params *fmt_bit_depth;
+ struct clamping_and_pixel_encoding_params *clamping;
+};
+
+struct opp_program_bit_depth_reduction_params {
+ struct output_pixel_processor *opp;
+ bool use_default_params;
+ struct pipe_ctx *pipe_ctx;
+};
+
+struct opp_set_disp_pattern_generator_params {
+ struct output_pixel_processor *opp;
+ enum controller_dp_test_pattern test_pattern;
+ enum controller_dp_color_space color_space;
+ enum dc_color_depth color_depth;
+ struct tg_color solid_color;
+ bool use_solid_color;
+ int width;
+ int height;
+ int offset;
+};
+
+struct set_abm_pipe_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+};
+
+struct set_abm_level_params {
+ struct abm *abm;
+ unsigned int abm_level;
+};
+
+struct set_abm_immediate_disable_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+};
+
+struct set_disp_pattern_generator_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+ enum controller_dp_test_pattern test_pattern;
+ enum controller_dp_color_space color_space;
+ enum dc_color_depth color_depth;
+ const struct tg_color *solid_color;
+ int width;
+ int height;
+ int offset;
+};
+
+struct mpc_update_blending_params {
+ struct mpc *mpc;
+ struct mpcc_blnd_cfg blnd_cfg;
+ int mpcc_id;
+};
+
+struct mpc_assert_idle_mpcc_params {
+ struct mpc *mpc;
+ int mpcc_id;
+};
+
+struct mpc_insert_plane_params {
+ struct mpc *mpc;
+ struct mpc_tree *mpc_tree_params;
+ struct mpcc_blnd_cfg blnd_cfg;
+ struct mpcc_sm_cfg *sm_cfg;
+ struct mpcc *insert_above_mpcc;
+ int dpp_id;
+ int mpcc_id;
+};
+
+struct mpc_remove_mpcc_params {
+ struct mpc *mpc;
+ struct mpc_tree *mpc_tree_params;
+ struct mpcc *mpcc_to_remove;
+};
+
+struct opp_set_mpcc_disconnect_pending_params {
+ struct output_pixel_processor *opp;
+ int mpcc_inst;
+ bool pending;
+};
+
+struct dc_set_optimized_required_params {
+ struct dc *dc;
+ bool optimized_required;
+};
+
+struct hubp_disconnect_params {
+ struct hubp *hubp;
+};
+
+struct hubbub_force_pstate_change_control_params {
+ struct hubbub *hubbub;
+ bool enable;
+ bool wait;
+};
+
+struct tg_enable_crtc_params {
+ struct timing_generator *tg;
+};
+
+struct hubp_wait_flip_pending_params {
+ struct hubp *hubp;
+ unsigned int timeout_us;
+ unsigned int polling_interval_us;
+};
+
+struct tg_wait_double_buffer_pending_params {
+ struct timing_generator *tg;
+ unsigned int timeout_us;
+ unsigned int polling_interval_us;
+};
+
+struct update_force_pstate_params {
+ struct dc *dc;
+ struct dc_state *context;
+};
+
+struct hubbub_apply_dedcn21_147_wa_params {
+ struct hubbub *hubbub;
+};
+
+struct hubbub_allow_self_refresh_control_params {
+ struct hubbub *hubbub;
+ bool allow;
+ bool *disallow_self_refresh_applied;
+};
+
+struct tg_get_frame_count_params {
+ struct timing_generator *tg;
+ unsigned int *frame_count;
+};
+
+struct mpc_set_dwb_mux_params {
+ struct mpc *mpc;
+ int dwb_id;
+ int mpcc_id;
+};
+
+struct mpc_disable_dwb_mux_params {
+ struct mpc *mpc;
+ unsigned int dwb_id;
+};
+
+struct mcif_wb_config_buf_params {
+ struct mcif_wb *mcif_wb;
+ struct mcif_buf_params *mcif_buf_params;
+ unsigned int dest_height;
+};
+
+struct mcif_wb_config_arb_params {
+ struct mcif_wb *mcif_wb;
+ struct mcif_arb_params *mcif_arb_params;
+};
+
+struct mcif_wb_enable_params {
+ struct mcif_wb *mcif_wb;
+};
+
+struct mcif_wb_disable_params {
+ struct mcif_wb *mcif_wb;
+};
+
+struct dwbc_enable_params {
+ struct dwbc *dwb;
+ struct dc_dwb_params *dwb_params;
+};
+
+struct dwbc_disable_params {
+ struct dwbc *dwb;
+};
+
+struct dwbc_update_params {
+ struct dwbc *dwb;
+ struct dc_dwb_params *dwb_params;
+};
+
+struct hubp_update_mall_sel_params {
+ struct hubp *hubp;
+ uint32_t mall_sel;
+ bool cache_cursor;
+};
+
+struct hubp_prepare_subvp_buffering_params {
+ struct hubp *hubp;
+ bool enable;
+};
+
+struct hubp_set_blank_en_params {
+ struct hubp *hubp;
+ bool enable;
+};
+
+struct hubp_disable_control_params {
+ struct hubp *hubp;
+ bool disable;
+};
+
+struct hubbub_soft_reset_params {
+ struct hubbub *hubbub;
+ void (*hubbub_soft_reset)(struct hubbub *hubbub, bool reset);
+ bool reset;
+};
+
+struct hubp_clk_cntl_params {
+ struct hubp *hubp;
+ bool enable;
+};
+
+struct hubp_init_params {
+ struct hubp *hubp;
+};
+
+struct hubp_set_vm_system_aperture_settings_params {
+ struct hubp *hubp;
+ //struct vm_system_aperture_param apt;
+ PHYSICAL_ADDRESS_LOC sys_default;
+ PHYSICAL_ADDRESS_LOC sys_low;
+ PHYSICAL_ADDRESS_LOC sys_high;
+};
+
+struct hubp_set_flip_int_params {
+ struct hubp *hubp;
+};
+
+struct dpp_dppclk_control_params {
+ struct dpp *dpp;
+ bool dppclk_div;
+ bool enable;
+};
+
+struct disable_phantom_crtc_params {
+ struct timing_generator *tg;
+};
+
+struct dpp_pg_control_params {
+ struct dce_hwseq *hws;
+ unsigned int dpp_inst;
+ bool power_on;
+};
+
+struct hubp_pg_control_params {
+ struct dce_hwseq *hws;
+ unsigned int hubp_inst;
+ bool power_on;
+};
+
+struct hubp_reset_params {
+ struct hubp *hubp;
+};
+
+struct dpp_reset_params {
+ struct dpp *dpp;
+};
+
+struct dpp_root_clock_control_params {
+ struct dce_hwseq *hws;
+ unsigned int dpp_inst;
+ bool clock_on;
+};
+
+struct dc_ip_request_cntl_params {
+ struct dc *dc;
+ bool enable;
+};
+
+struct dsc_pg_status_params {
+ struct dce_hwseq *hws;
+ int dsc_inst;
+ bool is_ungated;
+};
+
+struct dsc_wait_disconnect_pending_clear_params {
+ struct display_stream_compressor *dsc;
+ bool *is_ungated;
+};
+
+struct dsc_disable_params {
+ struct display_stream_compressor *dsc;
+ bool *is_ungated;
+};
+
+struct dccg_set_ref_dscclk_params {
+ struct dccg *dccg;
+ int dsc_inst;
+ bool *is_ungated;
+};
+
+struct dccg_update_dpp_dto_params {
+ struct dccg *dccg;
+ int dpp_inst;
+ int dppclk_khz;
+};
+
+struct hubp_vtg_sel_params {
+ struct hubp *hubp;
+ uint32_t otg_inst;
+};
+
+struct hubp_setup2_params {
+ struct hubp *hubp;
+ struct dml2_dchub_per_pipe_register_set *hubp_regs;
+ union dml2_global_sync_programming *global_sync;
+ struct dc_crtc_timing *timing;
+};
+
+struct hubp_setup_params {
+ struct hubp *hubp;
+ struct _vcs_dpi_display_dlg_regs_st *dlg_regs;
+ struct _vcs_dpi_display_ttu_regs_st *ttu_regs;
+ struct _vcs_dpi_display_rq_regs_st *rq_regs;
+ struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest;
+};
+
+struct hubp_set_unbounded_requesting_params {
+ struct hubp *hubp;
+ bool unbounded_req;
+};
+
+struct hubp_setup_interdependent2_params {
+ struct hubp *hubp;
+ struct dml2_dchub_per_pipe_register_set *hubp_regs;
+};
+
+struct hubp_setup_interdependent_params {
+ struct hubp *hubp;
+ struct _vcs_dpi_display_dlg_regs_st *dlg_regs;
+ struct _vcs_dpi_display_ttu_regs_st *ttu_regs;
+};
+
+struct dpp_set_cursor_matrix_params {
+ struct dpp *dpp;
+ enum dc_color_space color_space;
+ struct dc_csc_transform *cursor_csc_color_matrix;
+};
+
+struct mpc_update_mpcc_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+};
+
+struct dpp_set_scaler_params {
+ struct dpp *dpp;
+ const struct scaler_data *scl_data;
+};
+
+struct hubp_mem_program_viewport_params {
+ struct hubp *hubp;
+ const struct rect *viewport;
+ const struct rect *viewport_c;
+};
+
+struct hubp_program_mcache_id_and_split_coordinate_params {
+ struct hubp *hubp;
+ struct mcache_regs_struct *mcache_regs;
+};
+
+struct abort_cursor_offload_update_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+};
+
+struct set_cursor_attribute_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+};
+
+struct set_cursor_position_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+};
+
+struct set_cursor_sdr_white_level_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+};
+
+struct program_output_csc_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+ enum dc_color_space colorspace;
+ uint16_t *matrix;
+ int opp_id;
+};
+
+struct hubp_set_blank_params {
+ struct hubp *hubp;
+ bool blank;
+};
+
+struct phantom_hubp_post_enable_params {
+ struct hubp *hubp;
+};
+
union block_sequence_params {
struct update_plane_addr_params update_plane_addr_params;
struct subvp_pipe_control_lock_fast_params subvp_pipe_control_lock_fast_params;
@@ -172,7 +752,108 @@ union block_sequence_params {
struct set_ocsc_default_params set_ocsc_default_params;
struct subvp_save_surf_addr subvp_save_surf_addr;
struct wait_for_dcc_meta_propagation_params wait_for_dcc_meta_propagation_params;
- struct fams2_global_control_lock_fast_params fams2_global_control_lock_fast_params;
+ struct dmub_hw_control_lock_fast_params dmub_hw_control_lock_fast_params;
+ struct program_surface_config_params program_surface_config_params;
+ struct program_mcache_id_and_split_coordinate program_mcache_id_and_split_coordinate;
+ struct program_cursor_update_now_params program_cursor_update_now_params;
+ struct hubp_wait_pipe_read_start_params hubp_wait_pipe_read_start_params;
+ struct apply_update_flags_for_phantom_params apply_update_flags_for_phantom_params;
+ struct update_phantom_vp_position_params update_phantom_vp_position_params;
+ struct set_odm_combine_params set_odm_combine_params;
+ struct set_odm_bypass_params set_odm_bypass_params;
+ struct opp_pipe_clock_control_params opp_pipe_clock_control_params;
+ struct opp_program_left_edge_extra_pixel_params opp_program_left_edge_extra_pixel_params;
+ struct dccg_set_dto_dscclk_params dccg_set_dto_dscclk_params;
+ struct dsc_set_config_params dsc_set_config_params;
+ struct dsc_enable_params dsc_enable_params;
+ struct tg_set_dsc_config_params tg_set_dsc_config_params;
+ struct dsc_disconnect_params dsc_disconnect_params;
+ struct dsc_read_state_params dsc_read_state_params;
+ struct dsc_calculate_and_set_config_params dsc_calculate_and_set_config_params;
+ struct dsc_enable_with_opp_params dsc_enable_with_opp_params;
+ struct program_tg_params program_tg_params;
+ struct tg_program_global_sync_params tg_program_global_sync_params;
+ struct tg_wait_for_state_params tg_wait_for_state_params;
+ struct tg_set_vtg_params_params tg_set_vtg_params_params;
+ struct tg_setup_vertical_interrupt2_params tg_setup_vertical_interrupt2_params;
+ struct dpp_set_hdr_multiplier_params dpp_set_hdr_multiplier_params;
+ struct tg_set_gsl_params tg_set_gsl_params;
+ struct tg_set_gsl_source_select_params tg_set_gsl_source_select_params;
+ struct setup_vupdate_interrupt_params setup_vupdate_interrupt_params;
+ struct program_det_size_params program_det_size_params;
+ struct program_det_segments_params program_det_segments_params;
+ struct update_dchubp_dpp_params update_dchubp_dpp_params;
+ struct opp_set_dyn_expansion_params opp_set_dyn_expansion_params;
+ struct opp_program_fmt_params opp_program_fmt_params;
+ struct opp_program_bit_depth_reduction_params opp_program_bit_depth_reduction_params;
+ struct opp_set_disp_pattern_generator_params opp_set_disp_pattern_generator_params;
+ struct set_abm_pipe_params set_abm_pipe_params;
+ struct set_abm_level_params set_abm_level_params;
+ struct set_abm_immediate_disable_params set_abm_immediate_disable_params;
+ struct set_disp_pattern_generator_params set_disp_pattern_generator_params;
+ struct mpc_remove_mpcc_params mpc_remove_mpcc_params;
+ struct opp_set_mpcc_disconnect_pending_params opp_set_mpcc_disconnect_pending_params;
+ struct dc_set_optimized_required_params dc_set_optimized_required_params;
+ struct hubp_disconnect_params hubp_disconnect_params;
+ struct hubbub_force_pstate_change_control_params hubbub_force_pstate_change_control_params;
+ struct tg_enable_crtc_params tg_enable_crtc_params;
+ struct hubp_wait_flip_pending_params hubp_wait_flip_pending_params;
+ struct tg_wait_double_buffer_pending_params tg_wait_double_buffer_pending_params;
+ struct update_force_pstate_params update_force_pstate_params;
+ struct hubbub_apply_dedcn21_147_wa_params hubbub_apply_dedcn21_147_wa_params;
+ struct hubbub_allow_self_refresh_control_params hubbub_allow_self_refresh_control_params;
+ struct tg_get_frame_count_params tg_get_frame_count_params;
+ struct mpc_set_dwb_mux_params mpc_set_dwb_mux_params;
+ struct mpc_disable_dwb_mux_params mpc_disable_dwb_mux_params;
+ struct mcif_wb_config_buf_params mcif_wb_config_buf_params;
+ struct mcif_wb_config_arb_params mcif_wb_config_arb_params;
+ struct mcif_wb_enable_params mcif_wb_enable_params;
+ struct mcif_wb_disable_params mcif_wb_disable_params;
+ struct dwbc_enable_params dwbc_enable_params;
+ struct dwbc_disable_params dwbc_disable_params;
+ struct dwbc_update_params dwbc_update_params;
+ struct hubp_update_mall_sel_params hubp_update_mall_sel_params;
+ struct hubp_prepare_subvp_buffering_params hubp_prepare_subvp_buffering_params;
+ struct hubp_set_blank_en_params hubp_set_blank_en_params;
+ struct hubp_disable_control_params hubp_disable_control_params;
+ struct hubbub_soft_reset_params hubbub_soft_reset_params;
+ struct hubp_clk_cntl_params hubp_clk_cntl_params;
+ struct hubp_init_params hubp_init_params;
+ struct hubp_set_vm_system_aperture_settings_params hubp_set_vm_system_aperture_settings_params;
+ struct hubp_set_flip_int_params hubp_set_flip_int_params;
+ struct dpp_dppclk_control_params dpp_dppclk_control_params;
+ struct disable_phantom_crtc_params disable_phantom_crtc_params;
+ struct dpp_pg_control_params dpp_pg_control_params;
+ struct hubp_pg_control_params hubp_pg_control_params;
+ struct hubp_reset_params hubp_reset_params;
+ struct dpp_reset_params dpp_reset_params;
+ struct dpp_root_clock_control_params dpp_root_clock_control_params;
+ struct dc_ip_request_cntl_params dc_ip_request_cntl_params;
+ struct dsc_pg_status_params dsc_pg_status_params;
+ struct dsc_wait_disconnect_pending_clear_params dsc_wait_disconnect_pending_clear_params;
+ struct dsc_disable_params dsc_disable_params;
+ struct dccg_set_ref_dscclk_params dccg_set_ref_dscclk_params;
+ struct dccg_update_dpp_dto_params dccg_update_dpp_dto_params;
+ struct hubp_vtg_sel_params hubp_vtg_sel_params;
+ struct hubp_setup2_params hubp_setup2_params;
+ struct hubp_setup_params hubp_setup_params;
+ struct hubp_set_unbounded_requesting_params hubp_set_unbounded_requesting_params;
+ struct hubp_setup_interdependent2_params hubp_setup_interdependent2_params;
+ struct hubp_setup_interdependent_params hubp_setup_interdependent_params;
+ struct dpp_set_cursor_matrix_params dpp_set_cursor_matrix_params;
+ struct mpc_update_mpcc_params mpc_update_mpcc_params;
+ struct mpc_update_blending_params mpc_update_blending_params;
+ struct mpc_assert_idle_mpcc_params mpc_assert_idle_mpcc_params;
+ struct mpc_insert_plane_params mpc_insert_plane_params;
+ struct dpp_set_scaler_params dpp_set_scaler_params;
+ struct hubp_mem_program_viewport_params hubp_mem_program_viewport_params;
+ struct abort_cursor_offload_update_params abort_cursor_offload_update_params;
+ struct set_cursor_attribute_params set_cursor_attribute_params;
+ struct set_cursor_position_params set_cursor_position_params;
+ struct set_cursor_sdr_white_level_params set_cursor_sdr_white_level_params;
+ struct program_output_csc_params program_output_csc_params;
+ struct hubp_set_blank_params hubp_set_blank_params;
+ struct phantom_hubp_post_enable_params phantom_hubp_post_enable_params;
};
enum block_sequence_func {
@@ -188,13 +869,111 @@ enum block_sequence_func {
DPP_SETUP_DPP,
DPP_PROGRAM_BIAS_AND_SCALE,
DPP_SET_OUTPUT_TRANSFER_FUNC,
+ DPP_SET_HDR_MULTIPLIER,
MPC_UPDATE_VISUAL_CONFIRM,
MPC_POWER_ON_MPC_MEM_PWR,
MPC_SET_OUTPUT_CSC,
MPC_SET_OCSC_DEFAULT,
DMUB_SUBVP_SAVE_SURF_ADDR,
HUBP_WAIT_FOR_DCC_META_PROP,
- DMUB_FAMS2_GLOBAL_CONTROL_LOCK_FAST,
+ DMUB_HW_CONTROL_LOCK_FAST,
+ HUBP_PROGRAM_SURFACE_CONFIG,
+ HUBP_PROGRAM_MCACHE_ID,
+ PROGRAM_CURSOR_UPDATE_NOW,
+ HUBP_WAIT_PIPE_READ_START,
+ HWS_APPLY_UPDATE_FLAGS_FOR_PHANTOM,
+ HWS_UPDATE_PHANTOM_VP_POSITION,
+ OPTC_SET_ODM_COMBINE,
+ OPTC_SET_ODM_BYPASS,
+ OPP_PIPE_CLOCK_CONTROL,
+ OPP_PROGRAM_LEFT_EDGE_EXTRA_PIXEL,
+ DCCG_SET_DTO_DSCCLK,
+ DSC_SET_CONFIG,
+ DSC_ENABLE,
+ TG_SET_DSC_CONFIG,
+ DSC_DISCONNECT,
+ DSC_READ_STATE,
+ DSC_CALCULATE_AND_SET_CONFIG,
+ DSC_ENABLE_WITH_OPP,
+ TG_PROGRAM_GLOBAL_SYNC,
+ TG_WAIT_FOR_STATE,
+ TG_SET_VTG_PARAMS,
+ TG_SETUP_VERTICAL_INTERRUPT2,
+ HUBP_PROGRAM_DET_SIZE,
+ HUBP_PROGRAM_DET_SEGMENTS,
+ OPP_SET_DYN_EXPANSION,
+ OPP_PROGRAM_FMT,
+ OPP_PROGRAM_BIT_DEPTH_REDUCTION,
+ OPP_SET_DISP_PATTERN_GENERATOR,
+ ABM_SET_PIPE,
+ ABM_SET_LEVEL,
+ ABM_SET_IMMEDIATE_DISABLE,
+ MPC_REMOVE_MPCC,
+ OPP_SET_MPCC_DISCONNECT_PENDING,
+ DC_SET_OPTIMIZED_REQUIRED,
+ HUBP_DISCONNECT,
+ HUBBUB_FORCE_PSTATE_CHANGE_CONTROL,
+ TG_ENABLE_CRTC,
+ TG_SET_GSL,
+ TG_SET_GSL_SOURCE_SELECT,
+ HUBP_WAIT_FLIP_PENDING,
+ TG_WAIT_DOUBLE_BUFFER_PENDING,
+ UPDATE_FORCE_PSTATE,
+ PROGRAM_MALL_PIPE_CONFIG,
+ HUBBUB_APPLY_DEDCN21_147_WA,
+ HUBBUB_ALLOW_SELF_REFRESH_CONTROL,
+ TG_GET_FRAME_COUNT,
+ MPC_SET_DWB_MUX,
+ MPC_DISABLE_DWB_MUX,
+ MCIF_WB_CONFIG_BUF,
+ MCIF_WB_CONFIG_ARB,
+ MCIF_WB_ENABLE,
+ MCIF_WB_DISABLE,
+ DWBC_ENABLE,
+ DWBC_DISABLE,
+ DWBC_UPDATE,
+ HUBP_UPDATE_MALL_SEL,
+ HUBP_PREPARE_SUBVP_BUFFERING,
+ HUBP_SET_BLANK_EN,
+ HUBP_DISABLE_CONTROL,
+ HUBBUB_SOFT_RESET,
+ HUBP_CLK_CNTL,
+ HUBP_INIT,
+ HUBP_SET_VM_SYSTEM_APERTURE_SETTINGS,
+ HUBP_SET_FLIP_INT,
+ DPP_DPPCLK_CONTROL,
+ DISABLE_PHANTOM_CRTC,
+ DSC_PG_STATUS,
+ DSC_WAIT_DISCONNECT_PENDING_CLEAR,
+ DSC_DISABLE,
+ DCCG_SET_REF_DSCCLK,
+ DPP_PG_CONTROL,
+ HUBP_PG_CONTROL,
+ HUBP_RESET,
+ DPP_RESET,
+ DPP_ROOT_CLOCK_CONTROL,
+ DC_IP_REQUEST_CNTL,
+ DCCG_UPDATE_DPP_DTO,
+ HUBP_VTG_SEL,
+ HUBP_SETUP2,
+ HUBP_SETUP,
+ HUBP_SET_UNBOUNDED_REQUESTING,
+ HUBP_SETUP_INTERDEPENDENT2,
+ HUBP_SETUP_INTERDEPENDENT,
+ DPP_SET_CURSOR_MATRIX,
+ MPC_UPDATE_BLENDING,
+ MPC_ASSERT_IDLE_MPCC,
+ MPC_INSERT_PLANE,
+ DPP_SET_SCALER,
+ HUBP_MEM_PROGRAM_VIEWPORT,
+ ABORT_CURSOR_OFFLOAD_UPDATE,
+ SET_CURSOR_ATTRIBUTE,
+ SET_CURSOR_POSITION,
+ SET_CURSOR_SDR_WHITE_LEVEL,
+ PROGRAM_OUTPUT_CSC,
+ HUBP_SET_LEGACY_TILING_COMPAT_LEVEL,
+ HUBP_SET_BLANK,
+ PHANTOM_HUBP_POST_ENABLE,
/* This must be the last value in this enum, add new ones above */
HWSS_BLOCK_SEQUENCE_FUNC_COUNT
};
@@ -204,6 +983,11 @@ struct block_sequence {
enum block_sequence_func func;
};
+struct block_sequence_state {
+ struct block_sequence *steps;
+ unsigned int *num_steps;
+};
+
#define MAX_HWSS_BLOCK_SEQUENCE_SIZE (HWSS_BLOCK_SEQUENCE_FUNC_COUNT * MAX_PIPES)
struct hw_sequencer_funcs {
@@ -221,6 +1005,8 @@ struct hw_sequencer_funcs {
enum dc_status (*apply_ctx_to_hw)(struct dc *dc,
struct dc_state *context);
void (*disable_plane)(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx);
+ void (*disable_plane_sequence)(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state);
void (*disable_pixel_data)(struct dc *dc, struct pipe_ctx *pipe_ctx, bool blank);
void (*apply_ctx_for_surface)(struct dc *dc,
const struct dc_stream_state *stream,
@@ -238,6 +1024,10 @@ struct hw_sequencer_funcs {
void (*wait_for_mpcc_disconnect)(struct dc *dc,
struct resource_pool *res_pool,
struct pipe_ctx *pipe_ctx);
+ void (*wait_for_mpcc_disconnect_sequence)(struct dc *dc,
+ struct resource_pool *res_pool,
+ struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state);
void (*edp_backlight_control)(
struct dc_link *link,
bool enable);
@@ -309,6 +1099,13 @@ struct hw_sequencer_funcs {
void (*set_cursor_position)(struct pipe_ctx *pipe);
void (*set_cursor_attribute)(struct pipe_ctx *pipe);
void (*set_cursor_sdr_white_level)(struct pipe_ctx *pipe);
+ void (*abort_cursor_offload_update)(struct dc *dc, const struct pipe_ctx *pipe);
+ void (*begin_cursor_offload_update)(struct dc *dc, const struct pipe_ctx *pipe);
+ void (*commit_cursor_offload_update)(struct dc *dc, const struct pipe_ctx *pipe);
+ void (*update_cursor_offload_pipe)(struct dc *dc, const struct pipe_ctx *pipe);
+ void (*notify_cursor_offload_drr_update)(struct dc *dc, struct dc_state *context,
+ const struct dc_stream_state *stream);
+ void (*program_cursor_offload_now)(struct dc *dc, const struct pipe_ctx *pipe);
/* Colour Related */
void (*program_gamut_remap)(struct pipe_ctx *pipe_ctx);
@@ -451,13 +1248,13 @@ struct hw_sequencer_funcs {
const struct dc_state *new_ctx);
void (*wait_for_dcc_meta_propagation)(const struct dc *dc,
const struct pipe_ctx *top_pipe_to_program);
- void (*fams2_global_control_lock)(struct dc *dc,
+ void (*dmub_hw_control_lock)(struct dc *dc,
struct dc_state *context,
bool lock);
void (*fams2_update_config)(struct dc *dc,
struct dc_state *context,
bool enable);
- void (*fams2_global_control_lock_fast)(union block_sequence_params *params);
+ void (*dmub_hw_control_lock_fast)(union block_sequence_params *params);
void (*set_long_vtotal)(struct pipe_ctx **pipe_ctx, int num_pipes, uint32_t v_total_min, uint32_t v_total_max);
void (*program_outstanding_updates)(struct dc *dc,
struct dc_state *context);
@@ -470,11 +1267,26 @@ struct hw_sequencer_funcs {
void (*enable_plane)(struct dc *dc,
struct pipe_ctx *pipe_ctx,
struct dc_state *context);
+ void (*enable_plane_sequence)(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct block_sequence_state *seq_state);
void (*update_dchubp_dpp)(struct dc *dc,
struct pipe_ctx *pipe_ctx,
struct dc_state *context);
+ void (*update_dchubp_dpp_sequence)(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct block_sequence_state *seq_state);
void (*post_unlock_reset_opp)(struct dc *dc,
struct pipe_ctx *opp_head);
+ void (*post_unlock_reset_opp_sequence)(
+ struct dc *dc,
+ struct pipe_ctx *opp_head,
+ struct block_sequence_state *seq_state);
+ void (*get_underflow_debug_data)(const struct dc *dc,
+ struct timing_generator *tg,
+ struct dc_underflow_debug_data *out_data);
};
void color_space_to_black_color(
@@ -502,6 +1314,9 @@ void get_hdr_visual_confirm_color(
void get_mpctree_visual_confirm_color(
struct pipe_ctx *pipe_ctx,
struct tg_color *color);
+void get_smartmux_visual_confirm_color(
+ struct dc *dc,
+ struct tg_color *color);
void get_vabc_visual_confirm_color(
struct pipe_ctx *pipe_ctx,
struct tg_color *color);
@@ -581,4 +1396,630 @@ void hwss_set_ocsc_default(union block_sequence_params *params);
void hwss_subvp_save_surf_addr(union block_sequence_params *params);
+void hwss_program_surface_config(union block_sequence_params *params);
+
+void hwss_program_mcache_id_and_split_coordinate(union block_sequence_params *params);
+
+void hwss_set_odm_combine(union block_sequence_params *params);
+
+void hwss_set_odm_bypass(union block_sequence_params *params);
+
+void hwss_opp_pipe_clock_control(union block_sequence_params *params);
+
+void hwss_opp_program_left_edge_extra_pixel(union block_sequence_params *params);
+
+void hwss_blank_pixel_data(union block_sequence_params *params);
+
+void hwss_dccg_set_dto_dscclk(union block_sequence_params *params);
+
+void hwss_dsc_set_config(union block_sequence_params *params);
+
+void hwss_dsc_enable(union block_sequence_params *params);
+
+void hwss_tg_set_dsc_config(union block_sequence_params *params);
+
+void hwss_dsc_disconnect(union block_sequence_params *params);
+
+void hwss_dsc_read_state(union block_sequence_params *params);
+
+void hwss_dsc_calculate_and_set_config(union block_sequence_params *params);
+
+void hwss_dsc_enable_with_opp(union block_sequence_params *params);
+
+void hwss_program_tg(union block_sequence_params *params);
+
+void hwss_tg_program_global_sync(union block_sequence_params *params);
+
+void hwss_tg_wait_for_state(union block_sequence_params *params);
+
+void hwss_tg_set_vtg_params(union block_sequence_params *params);
+
+void hwss_tg_setup_vertical_interrupt2(union block_sequence_params *params);
+
+void hwss_dpp_set_hdr_multiplier(union block_sequence_params *params);
+
+void hwss_program_det_size(union block_sequence_params *params);
+
+void hwss_program_det_segments(union block_sequence_params *params);
+
+void hwss_opp_set_dyn_expansion(union block_sequence_params *params);
+
+void hwss_opp_program_fmt(union block_sequence_params *params);
+
+void hwss_opp_program_bit_depth_reduction(union block_sequence_params *params);
+
+void hwss_opp_set_disp_pattern_generator(union block_sequence_params *params);
+
+void hwss_set_abm_pipe(union block_sequence_params *params);
+
+void hwss_set_abm_level(union block_sequence_params *params);
+
+void hwss_set_abm_immediate_disable(union block_sequence_params *params);
+
+void hwss_mpc_remove_mpcc(union block_sequence_params *params);
+
+void hwss_opp_set_mpcc_disconnect_pending(union block_sequence_params *params);
+
+void hwss_dc_set_optimized_required(union block_sequence_params *params);
+
+void hwss_hubp_disconnect(union block_sequence_params *params);
+
+void hwss_hubbub_force_pstate_change_control(union block_sequence_params *params);
+
+void hwss_tg_enable_crtc(union block_sequence_params *params);
+
+void hwss_tg_set_gsl(union block_sequence_params *params);
+
+void hwss_tg_set_gsl_source_select(union block_sequence_params *params);
+
+void hwss_hubp_wait_flip_pending(union block_sequence_params *params);
+
+void hwss_tg_wait_double_buffer_pending(union block_sequence_params *params);
+
+void hwss_update_force_pstate(union block_sequence_params *params);
+
+void hwss_hubbub_apply_dedcn21_147_wa(union block_sequence_params *params);
+
+void hwss_hubbub_allow_self_refresh_control(union block_sequence_params *params);
+
+void hwss_tg_get_frame_count(union block_sequence_params *params);
+
+void hwss_mpc_set_dwb_mux(union block_sequence_params *params);
+
+void hwss_mpc_disable_dwb_mux(union block_sequence_params *params);
+
+void hwss_mcif_wb_config_buf(union block_sequence_params *params);
+
+void hwss_mcif_wb_config_arb(union block_sequence_params *params);
+
+void hwss_mcif_wb_enable(union block_sequence_params *params);
+
+void hwss_mcif_wb_disable(union block_sequence_params *params);
+
+void hwss_dwbc_enable(union block_sequence_params *params);
+
+void hwss_dwbc_disable(union block_sequence_params *params);
+
+void hwss_dwbc_update(union block_sequence_params *params);
+
+void hwss_hubp_update_mall_sel(union block_sequence_params *params);
+
+void hwss_hubp_prepare_subvp_buffering(union block_sequence_params *params);
+
+void hwss_hubp_set_blank_en(union block_sequence_params *params);
+
+void hwss_hubp_disable_control(union block_sequence_params *params);
+
+void hwss_hubbub_soft_reset(union block_sequence_params *params);
+
+void hwss_hubp_clk_cntl(union block_sequence_params *params);
+
+void hwss_hubp_init(union block_sequence_params *params);
+
+void hwss_hubp_set_vm_system_aperture_settings(union block_sequence_params *params);
+
+void hwss_hubp_set_flip_int(union block_sequence_params *params);
+
+void hwss_dpp_dppclk_control(union block_sequence_params *params);
+
+void hwss_disable_phantom_crtc(union block_sequence_params *params);
+
+void hwss_dsc_pg_status(union block_sequence_params *params);
+
+void hwss_dsc_wait_disconnect_pending_clear(union block_sequence_params *params);
+
+void hwss_dsc_disable(union block_sequence_params *params);
+
+void hwss_dccg_set_ref_dscclk(union block_sequence_params *params);
+
+void hwss_dpp_pg_control(union block_sequence_params *params);
+
+void hwss_hubp_pg_control(union block_sequence_params *params);
+
+void hwss_hubp_reset(union block_sequence_params *params);
+
+void hwss_dpp_reset(union block_sequence_params *params);
+
+void hwss_dpp_root_clock_control(union block_sequence_params *params);
+
+void hwss_dc_ip_request_cntl(union block_sequence_params *params);
+
+void hwss_dccg_update_dpp_dto(union block_sequence_params *params);
+
+void hwss_hubp_vtg_sel(union block_sequence_params *params);
+
+void hwss_hubp_setup2(union block_sequence_params *params);
+
+void hwss_hubp_setup(union block_sequence_params *params);
+
+void hwss_hubp_set_unbounded_requesting(union block_sequence_params *params);
+
+void hwss_hubp_setup_interdependent2(union block_sequence_params *params);
+
+void hwss_hubp_setup_interdependent(union block_sequence_params *params);
+
+void hwss_dpp_set_cursor_matrix(union block_sequence_params *params);
+
+void hwss_mpc_update_mpcc(union block_sequence_params *params);
+
+void hwss_mpc_update_blending(union block_sequence_params *params);
+
+void hwss_mpc_assert_idle_mpcc(union block_sequence_params *params);
+
+void hwss_mpc_insert_plane(union block_sequence_params *params);
+
+void hwss_dpp_set_scaler(union block_sequence_params *params);
+
+void hwss_hubp_mem_program_viewport(union block_sequence_params *params);
+
+void hwss_abort_cursor_offload_update(union block_sequence_params *params);
+
+void hwss_set_cursor_attribute(union block_sequence_params *params);
+
+void hwss_set_cursor_position(union block_sequence_params *params);
+
+void hwss_set_cursor_sdr_white_level(union block_sequence_params *params);
+
+void hwss_program_output_csc(union block_sequence_params *params);
+
+void hwss_hubp_set_legacy_tiling_compat_level(union block_sequence_params *params);
+
+void hwss_hubp_set_blank(union block_sequence_params *params);
+
+void hwss_phantom_hubp_post_enable(union block_sequence_params *params);
+
+void hwss_add_optc_pipe_control_lock(struct block_sequence_state *seq_state,
+ struct dc *dc, struct pipe_ctx *pipe_ctx, bool lock);
+
+void hwss_add_hubp_set_flip_control_gsl(struct block_sequence_state *seq_state,
+ struct hubp *hubp, bool flip_immediate);
+
+void hwss_add_hubp_program_triplebuffer(struct block_sequence_state *seq_state,
+ struct dc *dc, struct pipe_ctx *pipe_ctx, bool enableTripleBuffer);
+
+void hwss_add_hubp_update_plane_addr(struct block_sequence_state *seq_state,
+ struct dc *dc, struct pipe_ctx *pipe_ctx);
+
+void hwss_add_dpp_set_input_transfer_func(struct block_sequence_state *seq_state,
+ struct dc *dc, struct pipe_ctx *pipe_ctx, struct dc_plane_state *plane_state);
+
+void hwss_add_dpp_program_gamut_remap(struct block_sequence_state *seq_state,
+ struct pipe_ctx *pipe_ctx);
+
+void hwss_add_dpp_program_bias_and_scale(struct block_sequence_state *seq_state,
+ struct pipe_ctx *pipe_ctx);
+
+void hwss_add_optc_program_manual_trigger(struct block_sequence_state *seq_state,
+ struct pipe_ctx *pipe_ctx);
+
+void hwss_add_dpp_set_output_transfer_func(struct block_sequence_state *seq_state,
+ struct dc *dc, struct pipe_ctx *pipe_ctx, struct dc_stream_state *stream);
+
+void hwss_add_mpc_update_visual_confirm(struct block_sequence_state *seq_state,
+ struct dc *dc, struct pipe_ctx *pipe_ctx, int mpcc_id);
+
+void hwss_add_mpc_power_on_mpc_mem_pwr(struct block_sequence_state *seq_state,
+ struct mpc *mpc, int mpcc_id, bool power_on);
+
+void hwss_add_mpc_set_output_csc(struct block_sequence_state *seq_state,
+ struct mpc *mpc, int opp_id, const uint16_t *regval, enum mpc_output_csc_mode ocsc_mode);
+
+void hwss_add_mpc_set_ocsc_default(struct block_sequence_state *seq_state,
+ struct mpc *mpc, int opp_id, enum dc_color_space colorspace, enum mpc_output_csc_mode ocsc_mode);
+
+void hwss_add_dmub_send_dmcub_cmd(struct block_sequence_state *seq_state,
+ struct dc_context *ctx, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type);
+
+void hwss_add_dmub_subvp_save_surf_addr(struct block_sequence_state *seq_state,
+ struct dc_dmub_srv *dc_dmub_srv, struct dc_plane_address *addr, uint8_t subvp_index);
+
+void hwss_add_hubp_wait_for_dcc_meta_prop(struct block_sequence_state *seq_state,
+ struct dc *dc, struct pipe_ctx *top_pipe_to_program);
+
+void hwss_add_hubp_wait_pipe_read_start(struct block_sequence_state *seq_state,
+ struct hubp *hubp);
+
+void hwss_add_hws_apply_update_flags_for_phantom(struct block_sequence_state *seq_state,
+ struct pipe_ctx *pipe_ctx);
+
+void hwss_add_hws_update_phantom_vp_position(struct block_sequence_state *seq_state,
+ struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx);
+
+void hwss_add_optc_set_odm_combine(struct block_sequence_state *seq_state,
+ struct timing_generator *tg, int opp_inst[MAX_PIPES], int opp_head_count,
+ int odm_slice_width, int last_odm_slice_width);
+
+void hwss_add_optc_set_odm_bypass(struct block_sequence_state *seq_state,
+ struct timing_generator *optc, struct dc_crtc_timing *timing);
+
+void hwss_add_tg_program_global_sync(struct block_sequence_state *seq_state,
+ struct timing_generator *tg,
+ int vready_offset,
+ unsigned int vstartup_lines,
+ unsigned int vupdate_offset_pixels,
+ unsigned int vupdate_vupdate_width_pixels,
+ unsigned int pstate_keepout_start_lines);
+
+void hwss_add_tg_wait_for_state(struct block_sequence_state *seq_state,
+ struct timing_generator *tg, enum crtc_state state);
+
+void hwss_add_tg_set_vtg_params(struct block_sequence_state *seq_state,
+ struct timing_generator *tg, struct dc_crtc_timing *dc_crtc_timing, bool program_fp2);
+
+void hwss_add_tg_setup_vertical_interrupt2(struct block_sequence_state *seq_state,
+ struct timing_generator *tg, int start_line);
+
+void hwss_add_dpp_set_hdr_multiplier(struct block_sequence_state *seq_state,
+ struct dpp *dpp, uint32_t hw_mult);
+
+void hwss_add_hubp_program_det_size(struct block_sequence_state *seq_state,
+ struct hubbub *hubbub, unsigned int hubp_inst, unsigned int det_buffer_size_kb);
+
+void hwss_add_hubp_program_mcache_id(struct block_sequence_state *seq_state,
+ struct hubp *hubp, struct dml2_hubp_pipe_mcache_regs *mcache_regs);
+
+void hwss_add_hubbub_force_pstate_change_control(struct block_sequence_state *seq_state,
+ struct hubbub *hubbub, bool enable, bool wait);
+
+void hwss_add_hubp_program_det_segments(struct block_sequence_state *seq_state,
+ struct hubbub *hubbub, unsigned int hubp_inst, unsigned int det_size);
+
+void hwss_add_opp_set_dyn_expansion(struct block_sequence_state *seq_state,
+ struct output_pixel_processor *opp, enum dc_color_space color_sp,
+ enum dc_color_depth color_dpth, enum signal_type signal);
+
+void hwss_add_opp_program_fmt(struct block_sequence_state *seq_state,
+ struct output_pixel_processor *opp, struct bit_depth_reduction_params *fmt_bit_depth,
+ struct clamping_and_pixel_encoding_params *clamping);
+
+void hwss_add_abm_set_pipe(struct block_sequence_state *seq_state,
+ struct dc *dc, struct pipe_ctx *pipe_ctx);
+
+void hwss_add_abm_set_level(struct block_sequence_state *seq_state,
+ struct abm *abm, uint32_t abm_level);
+
+void hwss_add_tg_enable_crtc(struct block_sequence_state *seq_state,
+ struct timing_generator *tg);
+
+void hwss_add_hubp_wait_flip_pending(struct block_sequence_state *seq_state,
+ struct hubp *hubp, unsigned int timeout_us, unsigned int polling_interval_us);
+
+void hwss_add_tg_wait_double_buffer_pending(struct block_sequence_state *seq_state,
+ struct timing_generator *tg, unsigned int timeout_us, unsigned int polling_interval_us);
+
+void hwss_add_dccg_set_dto_dscclk(struct block_sequence_state *seq_state,
+ struct dccg *dccg, int inst, int num_slices_h);
+
+void hwss_add_dsc_calculate_and_set_config(struct block_sequence_state *seq_state,
+ struct pipe_ctx *pipe_ctx, bool enable, int opp_cnt);
+
+void hwss_add_mpc_remove_mpcc(struct block_sequence_state *seq_state,
+ struct mpc *mpc, struct mpc_tree *mpc_tree_params, struct mpcc *mpcc_to_remove);
+
+void hwss_add_opp_set_mpcc_disconnect_pending(struct block_sequence_state *seq_state,
+ struct output_pixel_processor *opp, int mpcc_inst, bool pending);
+
+void hwss_add_hubp_disconnect(struct block_sequence_state *seq_state,
+ struct hubp *hubp);
+
+void hwss_add_dsc_enable_with_opp(struct block_sequence_state *seq_state,
+ struct pipe_ctx *pipe_ctx);
+
+void hwss_add_dsc_disconnect(struct block_sequence_state *seq_state,
+ struct display_stream_compressor *dsc);
+
+void hwss_add_dc_set_optimized_required(struct block_sequence_state *seq_state,
+ struct dc *dc, bool optimized_required);
+
+void hwss_add_abm_set_immediate_disable(struct block_sequence_state *seq_state,
+ struct dc *dc, struct pipe_ctx *pipe_ctx);
+
+void hwss_add_opp_set_disp_pattern_generator(struct block_sequence_state *seq_state,
+ struct output_pixel_processor *opp,
+ enum controller_dp_test_pattern test_pattern,
+ enum controller_dp_color_space color_space,
+ enum dc_color_depth color_depth,
+ struct tg_color solid_color,
+ bool use_solid_color,
+ int width,
+ int height,
+ int offset);
+
+void hwss_add_opp_program_bit_depth_reduction(struct block_sequence_state *seq_state,
+ struct output_pixel_processor *opp,
+ bool use_default_params,
+ struct pipe_ctx *pipe_ctx);
+
+void hwss_add_dc_ip_request_cntl(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ bool enable);
+
+void hwss_add_dwbc_update(struct block_sequence_state *seq_state,
+ struct dwbc *dwb,
+ struct dc_dwb_params *dwb_params);
+
+void hwss_add_mcif_wb_config_buf(struct block_sequence_state *seq_state,
+ struct mcif_wb *mcif_wb,
+ struct mcif_buf_params *mcif_buf_params,
+ unsigned int dest_height);
+
+void hwss_add_mcif_wb_config_arb(struct block_sequence_state *seq_state,
+ struct mcif_wb *mcif_wb,
+ struct mcif_arb_params *mcif_arb_params);
+
+void hwss_add_mcif_wb_enable(struct block_sequence_state *seq_state,
+ struct mcif_wb *mcif_wb);
+
+void hwss_add_mcif_wb_disable(struct block_sequence_state *seq_state,
+ struct mcif_wb *mcif_wb);
+
+void hwss_add_mpc_set_dwb_mux(struct block_sequence_state *seq_state,
+ struct mpc *mpc,
+ int dwb_id,
+ int mpcc_id);
+
+void hwss_add_mpc_disable_dwb_mux(struct block_sequence_state *seq_state,
+ struct mpc *mpc,
+ unsigned int dwb_id);
+
+void hwss_add_dwbc_enable(struct block_sequence_state *seq_state,
+ struct dwbc *dwb,
+ struct dc_dwb_params *dwb_params);
+
+void hwss_add_dwbc_disable(struct block_sequence_state *seq_state,
+ struct dwbc *dwb);
+
+void hwss_add_tg_set_gsl(struct block_sequence_state *seq_state,
+ struct timing_generator *tg,
+ struct gsl_params gsl);
+
+void hwss_add_tg_set_gsl_source_select(struct block_sequence_state *seq_state,
+ struct timing_generator *tg,
+ int group_idx,
+ uint32_t gsl_ready_signal);
+
+void hwss_add_hubp_update_mall_sel(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ uint32_t mall_sel,
+ bool cache_cursor);
+
+void hwss_add_hubp_prepare_subvp_buffering(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ bool enable);
+
+void hwss_add_hubp_set_blank_en(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ bool enable);
+
+void hwss_add_hubp_disable_control(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ bool disable);
+
+void hwss_add_hubbub_soft_reset(struct block_sequence_state *seq_state,
+ struct hubbub *hubbub,
+ void (*hubbub_soft_reset)(struct hubbub *hubbub, bool reset),
+ bool reset);
+
+void hwss_add_hubp_clk_cntl(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ bool enable);
+
+void hwss_add_dpp_dppclk_control(struct block_sequence_state *seq_state,
+ struct dpp *dpp,
+ bool dppclk_div,
+ bool enable);
+
+void hwss_add_disable_phantom_crtc(struct block_sequence_state *seq_state,
+ struct timing_generator *tg);
+
+void hwss_add_dsc_pg_status(struct block_sequence_state *seq_state,
+ struct dce_hwseq *hws,
+ int dsc_inst,
+ bool is_ungated);
+
+void hwss_add_dsc_wait_disconnect_pending_clear(struct block_sequence_state *seq_state,
+ struct display_stream_compressor *dsc,
+ bool *is_ungated);
+
+void hwss_add_dsc_disable(struct block_sequence_state *seq_state,
+ struct display_stream_compressor *dsc,
+ bool *is_ungated);
+
+void hwss_add_dccg_set_ref_dscclk(struct block_sequence_state *seq_state,
+ struct dccg *dccg,
+ int dsc_inst,
+ bool *is_ungated);
+
+void hwss_add_dpp_root_clock_control(struct block_sequence_state *seq_state,
+ struct dce_hwseq *hws,
+ unsigned int dpp_inst,
+ bool clock_on);
+
+void hwss_add_dpp_pg_control(struct block_sequence_state *seq_state,
+ struct dce_hwseq *hws,
+ unsigned int dpp_inst,
+ bool power_on);
+
+void hwss_add_hubp_pg_control(struct block_sequence_state *seq_state,
+ struct dce_hwseq *hws,
+ unsigned int hubp_inst,
+ bool power_on);
+
+void hwss_add_hubp_set_blank(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ bool blank);
+
+void hwss_add_hubp_init(struct block_sequence_state *seq_state,
+ struct hubp *hubp);
+
+void hwss_add_hubp_reset(struct block_sequence_state *seq_state,
+ struct hubp *hubp);
+
+void hwss_add_dpp_reset(struct block_sequence_state *seq_state,
+ struct dpp *dpp);
+
+void hwss_add_opp_pipe_clock_control(struct block_sequence_state *seq_state,
+ struct output_pixel_processor *opp,
+ bool enable);
+
+void hwss_add_hubp_set_vm_system_aperture_settings(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ uint64_t sys_default,
+ uint64_t sys_low,
+ uint64_t sys_high);
+
+void hwss_add_hubp_set_flip_int(struct block_sequence_state *seq_state,
+ struct hubp *hubp);
+
+void hwss_add_dccg_update_dpp_dto(struct block_sequence_state *seq_state,
+ struct dccg *dccg,
+ int dpp_inst,
+ int dppclk_khz);
+
+void hwss_add_hubp_vtg_sel(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ uint32_t otg_inst);
+
+void hwss_add_hubp_setup2(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ struct dml2_dchub_per_pipe_register_set *hubp_regs,
+ union dml2_global_sync_programming *global_sync,
+ struct dc_crtc_timing *timing);
+
+void hwss_add_hubp_setup(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ struct _vcs_dpi_display_dlg_regs_st *dlg_regs,
+ struct _vcs_dpi_display_ttu_regs_st *ttu_regs,
+ struct _vcs_dpi_display_rq_regs_st *rq_regs,
+ struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest);
+
+void hwss_add_hubp_set_unbounded_requesting(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ bool unbounded_req);
+
+void hwss_add_hubp_setup_interdependent2(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ struct dml2_dchub_per_pipe_register_set *hubp_regs);
+
+void hwss_add_hubp_setup_interdependent(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ struct _vcs_dpi_display_dlg_regs_st *dlg_regs,
+ struct _vcs_dpi_display_ttu_regs_st *ttu_regs);
+void hwss_add_hubp_program_surface_config(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ enum surface_pixel_format format,
+ struct dc_tiling_info *tiling_info,
+ struct plane_size plane_size,
+ enum dc_rotation_angle rotation,
+ struct dc_plane_dcc_param *dcc,
+ bool horizontal_mirror,
+ int compat_level);
+
+void hwss_add_dpp_setup_dpp(struct block_sequence_state *seq_state,
+ struct pipe_ctx *pipe_ctx);
+
+void hwss_add_dpp_set_cursor_matrix(struct block_sequence_state *seq_state,
+ struct dpp *dpp,
+ enum dc_color_space color_space,
+ struct dc_csc_transform *cursor_csc_color_matrix);
+
+void hwss_add_mpc_update_blending(struct block_sequence_state *seq_state,
+ struct mpc *mpc,
+ struct mpcc_blnd_cfg blnd_cfg,
+ int mpcc_id);
+
+void hwss_add_mpc_assert_idle_mpcc(struct block_sequence_state *seq_state,
+ struct mpc *mpc,
+ int mpcc_id);
+
+void hwss_add_mpc_insert_plane(struct block_sequence_state *seq_state,
+ struct mpc *mpc,
+ struct mpc_tree *mpc_tree_params,
+ struct mpcc_blnd_cfg blnd_cfg,
+ struct mpcc_sm_cfg *sm_cfg,
+ struct mpcc *insert_above_mpcc,
+ int dpp_id,
+ int mpcc_id);
+
+void hwss_add_dpp_set_scaler(struct block_sequence_state *seq_state,
+ struct dpp *dpp,
+ const struct scaler_data *scl_data);
+
+void hwss_add_hubp_mem_program_viewport(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ const struct rect *viewport,
+ const struct rect *viewport_c);
+
+void hwss_add_abort_cursor_offload_update(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx);
+
+void hwss_add_set_cursor_attribute(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx);
+
+void hwss_add_set_cursor_position(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx);
+
+void hwss_add_set_cursor_sdr_white_level(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx);
+
+void hwss_add_program_output_csc(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ enum dc_color_space colorspace,
+ uint16_t *matrix,
+ int opp_id);
+
+void hwss_add_phantom_hubp_post_enable(struct block_sequence_state *seq_state,
+ struct hubp *hubp);
+
+void hwss_add_update_force_pstate(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct dc_state *context);
+
+void hwss_add_hubbub_apply_dedcn21_147_wa(struct block_sequence_state *seq_state,
+ struct hubbub *hubbub);
+
+void hwss_add_hubbub_allow_self_refresh_control(struct block_sequence_state *seq_state,
+ struct hubbub *hubbub,
+ bool allow,
+ bool *disallow_self_refresh_applied);
+
+void hwss_add_tg_get_frame_count(struct block_sequence_state *seq_state,
+ struct timing_generator *tg,
+ unsigned int *frame_count);
+
+void hwss_add_tg_set_dsc_config(struct block_sequence_state *seq_state,
+ struct timing_generator *tg,
+ struct dsc_optc_config *dsc_optc_cfg,
+ bool enable);
+
+void hwss_add_opp_program_left_edge_extra_pixel(struct block_sequence_state *seq_state,
+ struct output_pixel_processor *opp,
+ enum dc_pixel_encoding pixel_encoding,
+ bool is_otg_master);
+
#endif /* __DC_HW_SEQUENCER_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
index 1e2d247fbbac..406db231bc72 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
@@ -27,6 +27,7 @@
#define __DC_HW_SEQUENCER_PRIVATE_H__
#include "dc_types.h"
+#include "hw_sequencer.h"
enum pipe_gating_control {
PIPE_GATING_CONTROL_DISABLE = 0,
@@ -80,7 +81,13 @@ struct hwseq_private_funcs {
void (*plane_atomic_disconnect)(struct dc *dc,
struct dc_state *state,
struct pipe_ctx *pipe_ctx);
+ void (*plane_atomic_disconnect_sequence)(struct dc *dc,
+ struct dc_state *state,
+ struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state);
void (*update_mpcc)(struct dc *dc, struct pipe_ctx *pipe_ctx);
+ void (*update_mpcc_sequence)(struct dc *dc, struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state);
bool (*set_input_transfer_func)(struct dc *dc,
struct pipe_ctx *pipe_ctx,
const struct dc_plane_state *plane_state);
@@ -97,6 +104,10 @@ struct hwseq_private_funcs {
void (*blank_pixel_data)(struct dc *dc,
struct pipe_ctx *pipe_ctx,
bool blank);
+ void (*blank_pixel_data_sequence)(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool blank,
+ struct block_sequence_state *seq_state);
enum dc_status (*enable_stream_timing)(
struct pipe_ctx *pipe_ctx,
struct dc_state *context,
@@ -105,6 +116,8 @@ struct hwseq_private_funcs {
bool enable);
void (*setup_vupdate_interrupt)(struct dc *dc,
struct pipe_ctx *pipe_ctx);
+ void (*setup_vupdate_interrupt_sequence)(struct dc *dc, struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state);
bool (*did_underflow_occur)(struct dc *dc, struct pipe_ctx *pipe_ctx);
void (*init_blank)(struct dc *dc, struct timing_generator *tg);
void (*disable_vga)(struct dce_hwseq *hws);
@@ -112,6 +125,10 @@ struct hwseq_private_funcs {
void (*plane_atomic_power_down)(struct dc *dc,
struct dpp *dpp,
struct hubp *hubp);
+ void (*plane_atomic_power_down_sequence)(struct dc *dc,
+ struct dpp *dpp,
+ struct hubp *hubp,
+ struct block_sequence_state *seq_state);
void (*plane_atomic_disable)(struct dc *dc, struct pipe_ctx *pipe_ctx);
void (*enable_power_gating_plane)(struct dce_hwseq *hws,
bool enable);
@@ -140,15 +157,31 @@ struct hwseq_private_funcs {
unsigned int dsc_inst);
void (*update_odm)(struct dc *dc, struct dc_state *context,
struct pipe_ctx *pipe_ctx);
+ void (*update_odm_sequence)(struct dc *dc, struct dc_state *context,
+ struct pipe_ctx *pipe_ctx, struct block_sequence_state *seq_state);
void (*program_all_writeback_pipes_in_tree)(struct dc *dc,
const struct dc_stream_state *stream,
struct dc_state *context);
+ void (*program_all_writeback_pipes_in_tree_sequence)(
+ struct dc *dc,
+ const struct dc_stream_state *stream,
+ struct dc_state *context,
+ struct block_sequence_state *seq_state);
bool (*s0i3_golden_init_wa)(struct dc *dc);
void (*set_hdr_multiplier)(struct pipe_ctx *pipe_ctx);
+ void (*set_hdr_multiplier_sequence)(struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state);
void (*verify_allow_pstate_change_high)(struct dc *dc);
+ void (*verify_allow_pstate_change_high_sequence)(struct dc *dc,
+ struct block_sequence_state *seq_state);
void (*program_pipe)(struct dc *dc,
struct pipe_ctx *pipe_ctx,
struct dc_state *context);
+ void (*program_pipe_sequence)(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct block_sequence_state *seq_state);
bool (*wait_for_blank_complete)(struct output_pixel_processor *opp);
void (*dccg_init)(struct dce_hwseq *hws);
bool (*set_blend_lut)(struct pipe_ctx *pipe_ctx,
@@ -163,6 +196,8 @@ struct hwseq_private_funcs {
void (*enable_plane)(struct dc *dc, struct pipe_ctx *pipe_ctx,
struct dc_state *context);
void (*program_mall_pipe_config)(struct dc *dc, struct dc_state *context);
+ void (*program_mall_pipe_config_sequence)(struct dc *dc, struct dc_state *context,
+ struct block_sequence_state *seq_state);
void (*update_force_pstate)(struct dc *dc, struct dc_state *context);
void (*update_mall_sel)(struct dc *dc, struct dc_state *context);
unsigned int (*calculate_dccg_k1_k2_values)(struct pipe_ctx *pipe_ctx,
@@ -186,6 +221,7 @@ struct hwseq_private_funcs {
void (*perform_3dlut_wa_unlock)(struct pipe_ctx *pipe_ctx);
void (*wait_for_pipe_update_if_needed)(struct dc *dc, struct pipe_ctx *pipe_ctx, bool is_surface_update_only);
void (*set_wait_for_update_needed_for_pipe)(struct dc *dc, struct pipe_ctx *pipe_ctx);
+ void (*dc_ip_request_cntl)(struct dc *dc, bool enable);
};
struct dce_hwseq {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_status.h b/drivers/gpu/drm/amd/display/dc/inc/core_status.h
index f3696143590c..82085d9c3f40 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_status.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_status.h
@@ -59,6 +59,7 @@ enum dc_status {
DC_FAIL_DP_PAYLOAD_ALLOCATION = 27,
DC_FAIL_DP_LINK_BANDWIDTH = 28,
DC_FAIL_HW_CURSOR_SUPPORT = 29,
+ DC_FAIL_DP_TUNNEL_BW_VALIDATE = 30,
DC_ERROR_UNEXPECTED = -1
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index 0cf349cafb3e..5ed2cd344804 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -58,8 +58,8 @@
#include "transform.h"
#include "dpp.h"
-#include "dml2/dml21/inc/dml_top_dchub_registers.h"
-#include "dml2/dml21/inc/dml_top_types.h"
+#include "dml2_0/dml21/inc/dml_top_dchub_registers.h"
+#include "dml2_0/dml21/inc/dml_top_types.h"
struct resource_pool;
struct dc_state;
@@ -67,6 +67,8 @@ struct resource_context;
struct clk_bw_params;
struct dc_mcache_params;
+#define MAX_RMCM_INST 2
+
struct resource_funcs {
enum engine_id (*get_preferred_eng_id_dpia)(unsigned int dpia_index);
void (*destroy)(struct resource_pool **pool);
@@ -82,7 +84,7 @@ struct resource_funcs {
enum dc_status (*validate_bandwidth)(
struct dc *dc,
struct dc_state *context,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
void (*calculate_wm_and_dlg)(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
@@ -107,7 +109,7 @@ struct resource_funcs {
struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
/*
* Algorithm for assigning available link encoders to links.
@@ -223,6 +225,11 @@ struct resource_funcs {
const struct dc_stream_state *stream);
bool (*program_mcache_pipe_config)(struct dc_state *context,
const struct dc_mcache_params *mcache_params);
+ enum dc_status (*update_dc_state_for_encoder_switch)(struct dc_link *link,
+ struct dc_link_settings *link_setting,
+ uint8_t pipe_count,
+ struct pipe_ctx *pipes,
+ struct audio_output *audio_output);
};
struct audio_support{
@@ -267,7 +274,7 @@ struct resource_pool {
/* An array for accessing the link encoder objects that have been created.
* Index in array corresponds to engine ID - viz. 0: ENGINE_ID_DIGA
*/
- struct link_encoder *link_encoders[MAX_DIG_LINK_ENCODERS];
+ struct link_encoder *link_encoders[MAX_LINK_ENCODERS];
/* Number of DIG link encoder objects created - i.e. number of valid
* entries in link_encoders array.
*/
@@ -281,6 +288,7 @@ struct resource_pool {
struct hpo_dp_link_encoder *hpo_dp_link_enc[MAX_HPO_DP2_LINK_ENCODERS];
struct dc_3dlut *mpc_lut[MAX_PIPES];
struct dc_transfer_func *mpc_shaper[MAX_PIPES];
+ struct dc_rmcm_3dlut rmcm_3dlut[MAX_RMCM_INST];
struct {
unsigned int xtalin_clock_inKhz;
@@ -425,7 +433,14 @@ enum p_state_switch_method {
P_STATE_V_ACTIVE,
P_STATE_SUB_VP,
P_STATE_DRR_SUB_VP,
- P_STATE_V_BLANK_SUB_VP
+ P_STATE_V_BLANK_SUB_VP,
+};
+
+struct dsc_padding_params {
+ /* pixels borrowed from hblank to hactive */
+ uint8_t dsc_hactive_padding;
+ uint32_t dsc_htotal_padding;
+ uint32_t dsc_pix_clk_100hz;
};
struct pipe_ctx {
@@ -485,8 +500,7 @@ struct pipe_ctx {
/* subvp_index: only valid if the pipe is a SUBVP_MAIN*/
uint8_t subvp_index;
struct pixel_rate_divider pixel_rate_divider;
- /* pixels borrowed from hblank to hactive */
- uint8_t hblank_borrow;
+ struct dsc_padding_params dsc_padding_params;
/* next vupdate */
uint32_t next_vupdate;
uint32_t wait_frame_count;
@@ -500,7 +514,7 @@ struct pipe_ctx {
struct link_enc_cfg_context {
enum link_enc_cfg_mode mode;
struct link_enc_assignment link_enc_assignments[MAX_PIPES];
- enum engine_id link_enc_avail[MAX_DIG_LINK_ENCODERS];
+ enum engine_id link_enc_avail[MAX_LINK_ENCODERS];
struct link_enc_assignment transient_assignments[MAX_PIPES];
};
@@ -512,8 +526,8 @@ struct resource_context {
uint8_t dp_clock_source_ref_count;
bool is_dsc_acquired[MAX_PIPES];
struct link_enc_cfg_context link_enc_cfg_ctx;
- unsigned int dio_link_enc_to_link_idx[MAX_DIG_LINK_ENCODERS];
- int dio_link_enc_ref_cnts[MAX_DIG_LINK_ENCODERS];
+ unsigned int dio_link_enc_to_link_idx[MAX_LINK_ENCODERS];
+ int dio_link_enc_ref_cnts[MAX_LINK_ENCODERS];
bool is_hpo_dp_stream_enc_acquired[MAX_HPO_DP2_ENCODERS];
unsigned int hpo_dp_link_enc_to_link_idx[MAX_HPO_DP2_LINK_ENCODERS];
int hpo_dp_link_enc_ref_cnts[MAX_HPO_DP2_LINK_ENCODERS];
@@ -556,7 +570,10 @@ struct dcn_bw_output {
struct dml2_mcache_surface_allocation mcache_allocations[DML2_MAX_PLANES];
struct dmub_cmd_fams2_global_config fams2_global_config;
union dmub_cmd_fams2_config fams2_stream_base_params[DML2_MAX_PLANES];
- union dmub_cmd_fams2_config fams2_stream_sub_params[DML2_MAX_PLANES];
+ union {
+ union dmub_cmd_fams2_config fams2_stream_sub_params[DML2_MAX_PLANES];
+ union dmub_fams2_stream_static_sub_state_v2 fams2_stream_sub_params_v2[DML2_MAX_PLANES];
+ };
struct dml2_display_arb_regs arb_regs;
};
@@ -672,6 +689,7 @@ struct replay_context {
/* Controller Id used for Dig Fe source select */
enum controller_id controllerId;
unsigned int line_time_in_ns;
+ bool os_request_force_ffu;
};
enum dc_replay_enable {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
index d19a595c2be4..134091d5842d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
@@ -622,7 +622,7 @@ extern const struct dcn_ip_params dcn10_ip_defaults;
bool dcn_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
- bool fast_validate);
+ enum dc_validate_mode validate_mode);
void dcn_get_soc_clks(
struct dc *dc,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
index c14d64687a3d..2c9a4a12bd8a 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
@@ -100,6 +100,17 @@ struct dcn301_clk_internal {
#define MAX_NUM_DPM_LVL 8
#define WM_SET_COUNT 4
+enum clk_type {
+ CLK_TYPE_DCFCLK,
+ CLK_TYPE_FCLK,
+ CLK_TYPE_MCLK,
+ CLK_TYPE_SOCCLK,
+ CLK_TYPE_DTBCLK,
+ CLK_TYPE_DISPCLK,
+ CLK_TYPE_DPPCLK,
+ CLK_TYPE_DSCCLK,
+ CLK_TYPE_COUNT
+};
struct clk_limit_table_entry {
unsigned int voltage; /* milivolts withh 2 fractional bits */
@@ -324,6 +335,11 @@ struct clk_mgr_funcs {
int (*get_dispclk_from_dentist)(struct clk_mgr *clk_mgr_base);
+ bool (*is_dc_mode_present)(struct clk_mgr *clk_mgr);
+
+ uint32_t (*set_smartmux_switch)(struct clk_mgr *clk_mgr, uint32_t pins_to_set);
+
+ unsigned int (*get_max_clock_khz)(struct clk_mgr *clk_mgr_base, enum clk_type clk_type);
};
struct clk_mgr {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/cursor_reg_cache.h b/drivers/gpu/drm/amd/display/dc/inc/hw/cursor_reg_cache.h
index 45645f9fd86c..7ce2f417f86a 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/cursor_reg_cache.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/cursor_reg_cache.h
@@ -57,9 +57,9 @@ struct cursor_attribute_cache_hubp {
} size;
union reg_cursor_settings_cfg {
struct {
- uint32_t dst_y_offset: 8;
- uint32_t chunk_hdl_adjust: 2;
- uint32_t reserved: 22;
+ uint32_t dst_y_offset: 8;
+ uint32_t chunk_hdl_adjust: 2;
+ uint32_t reserved: 22;
} bits;
uint32_t raw;
} settings;
@@ -83,12 +83,34 @@ union reg_cur0_control_cfg {
} bits;
uint32_t raw;
};
+
struct cursor_position_cache_dpp {
union reg_cur0_control_cfg cur0_ctl;
};
struct cursor_attribute_cache_dpp {
union reg_cur0_control_cfg cur0_ctl;
+ union reg_cur0_fp_scale_bias {
+ struct {
+ uint32_t fp_bias: 16;
+ uint32_t fp_scale: 16;
+ } bits;
+ uint32_t raw;
+ } fp_scale_bias;
+ union reg_cur0_fp_scale_bias_g_y {
+ struct {
+ uint32_t fp_bias_g_y: 16;
+ uint32_t fp_scale_g_y: 16;
+ } bits;
+ uint32_t raw;
+ } fp_scale_bias_g_y;
+ union reg_cur0_fp_scale_bias_rb_crcb {
+ struct {
+ uint32_t fp_bias_rb_crcb: 16;
+ uint32_t fp_scale_rb_crcb: 16;
+ } bits;
+ uint32_t raw;
+ } fp_scale_bias_rb_crcb;
};
struct cursor_attributes_cfg {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
index e94e9ba60f55..500a601e99b5 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
@@ -71,6 +71,125 @@ enum pixel_rate_div {
PIXEL_RATE_DIV_NA = 0xF
};
+struct dcn_dccg_reg_state {
+ uint32_t dc_mem_global_pwr_req_cntl;
+ uint32_t dccg_audio_dtbclk_dto_modulo;
+ uint32_t dccg_audio_dtbclk_dto_phase;
+ uint32_t dccg_audio_dto_source;
+ uint32_t dccg_audio_dto0_module;
+ uint32_t dccg_audio_dto0_phase;
+ uint32_t dccg_audio_dto1_module;
+ uint32_t dccg_audio_dto1_phase;
+ uint32_t dccg_cac_status;
+ uint32_t dccg_cac_status2;
+ uint32_t dccg_disp_cntl_reg;
+ uint32_t dccg_ds_cntl;
+ uint32_t dccg_ds_dto_incr;
+ uint32_t dccg_ds_dto_modulo;
+ uint32_t dccg_ds_hw_cal_interval;
+ uint32_t dccg_gate_disable_cntl;
+ uint32_t dccg_gate_disable_cntl2;
+ uint32_t dccg_gate_disable_cntl3;
+ uint32_t dccg_gate_disable_cntl4;
+ uint32_t dccg_gate_disable_cntl5;
+ uint32_t dccg_gate_disable_cntl6;
+ uint32_t dccg_global_fgcg_rep_cntl;
+ uint32_t dccg_gtc_cntl;
+ uint32_t dccg_gtc_current;
+ uint32_t dccg_gtc_dto_incr;
+ uint32_t dccg_gtc_dto_modulo;
+ uint32_t dccg_perfmon_cntl;
+ uint32_t dccg_perfmon_cntl2;
+ uint32_t dccg_soft_reset;
+ uint32_t dccg_test_clk_sel;
+ uint32_t dccg_vsync_cnt_ctrl;
+ uint32_t dccg_vsync_cnt_int_ctrl;
+ uint32_t dccg_vsync_otg0_latch_value;
+ uint32_t dccg_vsync_otg1_latch_value;
+ uint32_t dccg_vsync_otg2_latch_value;
+ uint32_t dccg_vsync_otg3_latch_value;
+ uint32_t dccg_vsync_otg4_latch_value;
+ uint32_t dccg_vsync_otg5_latch_value;
+ uint32_t dispclk_cgtt_blk_ctrl_reg;
+ uint32_t dispclk_freq_change_cntl;
+ uint32_t dp_dto_dbuf_en;
+ uint32_t dp_dto0_modulo;
+ uint32_t dp_dto0_phase;
+ uint32_t dp_dto1_modulo;
+ uint32_t dp_dto1_phase;
+ uint32_t dp_dto2_modulo;
+ uint32_t dp_dto2_phase;
+ uint32_t dp_dto3_modulo;
+ uint32_t dp_dto3_phase;
+ uint32_t dpiaclk_540m_dto_modulo;
+ uint32_t dpiaclk_540m_dto_phase;
+ uint32_t dpiaclk_810m_dto_modulo;
+ uint32_t dpiaclk_810m_dto_phase;
+ uint32_t dpiaclk_dto_cntl;
+ uint32_t dpiasymclk_cntl;
+ uint32_t dppclk_cgtt_blk_ctrl_reg;
+ uint32_t dppclk_ctrl;
+ uint32_t dppclk_dto_ctrl;
+ uint32_t dppclk0_dto_param;
+ uint32_t dppclk1_dto_param;
+ uint32_t dppclk2_dto_param;
+ uint32_t dppclk3_dto_param;
+ uint32_t dprefclk_cgtt_blk_ctrl_reg;
+ uint32_t dprefclk_cntl;
+ uint32_t dpstreamclk_cntl;
+ uint32_t dscclk_dto_ctrl;
+ uint32_t dscclk0_dto_param;
+ uint32_t dscclk1_dto_param;
+ uint32_t dscclk2_dto_param;
+ uint32_t dscclk3_dto_param;
+ uint32_t dtbclk_dto_dbuf_en;
+ uint32_t dtbclk_dto0_modulo;
+ uint32_t dtbclk_dto0_phase;
+ uint32_t dtbclk_dto1_modulo;
+ uint32_t dtbclk_dto1_phase;
+ uint32_t dtbclk_dto2_modulo;
+ uint32_t dtbclk_dto2_phase;
+ uint32_t dtbclk_dto3_modulo;
+ uint32_t dtbclk_dto3_phase;
+ uint32_t dtbclk_p_cntl;
+ uint32_t force_symclk_disable;
+ uint32_t hdmicharclk0_clock_cntl;
+ uint32_t hdmistreamclk_cntl;
+ uint32_t hdmistreamclk0_dto_param;
+ uint32_t microsecond_time_base_div;
+ uint32_t millisecond_time_base_div;
+ uint32_t otg_pixel_rate_div;
+ uint32_t otg0_phypll_pixel_rate_cntl;
+ uint32_t otg0_pixel_rate_cntl;
+ uint32_t otg1_phypll_pixel_rate_cntl;
+ uint32_t otg1_pixel_rate_cntl;
+ uint32_t otg2_phypll_pixel_rate_cntl;
+ uint32_t otg2_pixel_rate_cntl;
+ uint32_t otg3_phypll_pixel_rate_cntl;
+ uint32_t otg3_pixel_rate_cntl;
+ uint32_t phyasymclk_clock_cntl;
+ uint32_t phybsymclk_clock_cntl;
+ uint32_t phycsymclk_clock_cntl;
+ uint32_t phydsymclk_clock_cntl;
+ uint32_t phyesymclk_clock_cntl;
+ uint32_t phyplla_pixclk_resync_cntl;
+ uint32_t phypllb_pixclk_resync_cntl;
+ uint32_t phypllc_pixclk_resync_cntl;
+ uint32_t phyplld_pixclk_resync_cntl;
+ uint32_t phyplle_pixclk_resync_cntl;
+ uint32_t refclk_cgtt_blk_ctrl_reg;
+ uint32_t socclk_cgtt_blk_ctrl_reg;
+ uint32_t symclk_cgtt_blk_ctrl_reg;
+ uint32_t symclk_psp_cntl;
+ uint32_t symclk32_le_cntl;
+ uint32_t symclk32_se_cntl;
+ uint32_t symclka_clock_enable;
+ uint32_t symclkb_clock_enable;
+ uint32_t symclkc_clock_enable;
+ uint32_t symclkd_clock_enable;
+ uint32_t symclke_clock_enable;
+};
+
struct dccg {
struct dc_context *ctx;
const struct dccg_funcs *funcs;
@@ -81,7 +200,6 @@ struct dccg {
//int audio_dtbclk_khz;/* TODO needs to be removed */
//int ref_dtbclk_khz;/* TODO needs to be removed */
};
-
struct dtbclk_dto_params {
const struct dc_crtc_timing *timing;
int otg_inst;
@@ -211,9 +329,10 @@ struct dccg_funcs {
struct dccg *dccg,
enum streamclk_source src,
uint32_t otg_inst);
- void (*set_dto_dscclk)(struct dccg *dccg, uint32_t dsc_inst);
+ void (*set_dto_dscclk)(struct dccg *dccg, uint32_t dsc_inst, uint32_t num_slices_h);
void (*set_ref_dscclk)(struct dccg *dccg, uint32_t dsc_inst);
void (*dccg_root_gate_disable_control)(struct dccg *dccg, uint32_t pipe_idx, uint32_t disable_clock_gating);
+ void (*dccg_read_reg_state)(struct dccg *dccg, struct dcn_dccg_reg_state *dccg_reg_state);
};
#endif //__DAL_DCCG_H__
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
index 52b745667ef7..1ddfa30411c8 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
@@ -137,6 +137,27 @@ struct dcn_hubbub_state {
uint32_t dram_state_cntl;
};
+struct dcn_hubbub_reg_state {
+ uint32_t det0_ctrl;
+ uint32_t det1_ctrl;
+ uint32_t det2_ctrl;
+ uint32_t det3_ctrl;
+ uint32_t compbuf_ctrl;
+};
+
+struct hubbub_system_latencies {
+ uint32_t max_latency_ns;
+ uint32_t avg_latency_ns;
+ uint32_t min_latency_ns;
+};
+
+struct hubbub_urgent_latency_params {
+ uint32_t refclk_mhz;
+ uint32_t t_win_ns;
+ uint32_t bandwidth_mbps;
+ uint32_t bw_factor_x1000;
+};
+
struct hubbub_funcs {
void (*update_dchub)(
struct hubbub *hubbub,
@@ -203,6 +224,8 @@ struct hubbub_funcs {
void (*init_watermarks)(struct hubbub *hubbub);
+ void (*hubbub_read_reg_state)(struct hubbub *hubbub, struct dcn_hubbub_reg_state *hubbub_reg_state);
+
/**
* @program_det_size:
*
@@ -229,6 +252,39 @@ struct hubbub_funcs {
void (*program_compbuf_segments)(struct hubbub *hubbub, unsigned compbuf_size_seg, bool safe_to_increase);
void (*wait_for_det_update)(struct hubbub *hubbub, int hubp_inst);
bool (*program_arbiter)(struct hubbub *hubbub, struct dml2_display_arb_regs *arb_regs, bool safe_to_lower);
+ void (*dchvm_init)(struct hubbub *hubbub);
+
+ struct hubbub_perfmon_funcs {
+ void (*reset)(struct hubbub *hubbub);
+ void (*start_measuring_max_memory_latency_ns)(
+ struct hubbub *hubbub);
+ uint32_t (*get_max_memory_latency_ns)(struct hubbub *hubbub,
+ uint32_t refclk_mhz, uint32_t *sample_count);
+ void (*start_measuring_average_memory_latency_ns)(
+ struct hubbub *hubbub);
+ uint32_t (*get_average_memory_latency_ns)(struct hubbub *hubbub,
+ uint32_t refclk_mhz, uint32_t *sample_count);
+ void (*start_measuring_urgent_ramp_latency_ns)(
+ struct hubbub *hubbub,
+ const struct hubbub_urgent_latency_params *params);
+ uint32_t (*get_urgent_ramp_latency_ns)(struct hubbub *hubbub,
+ uint32_t refclk_mhz);
+ void (*start_measuring_unbounded_bandwidth_mbps)(
+ struct hubbub *hubbub);
+ uint32_t (*get_unbounded_bandwidth_mbps)(struct hubbub *hubbub,
+ uint32_t refclk_mhz, uint32_t *duration_ns);
+ void (*start_measuring_average_bandwidth_mbps)(
+ struct hubbub *hubbub);
+ uint32_t (*get_average_bandwidth_mbps)(struct hubbub *hubbub,
+ uint32_t refclk_mhz, uint32_t min_duration_ns,
+ uint32_t *duration_ns);
+ } perfmon;
+
+ struct hubbub_qos_funcs {
+ void (*force_display_nominal_profile)(struct hubbub *hubbub);
+ void (*force_display_urgent_profile)(struct hubbub *hubbub);
+ void (*reset_display_qos_profile)(struct hubbub *hubbub);
+ } qos;
};
struct hubbub {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
index 0c5675d1c593..d88b57d4f512 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
@@ -65,7 +65,6 @@ union defer_reg_writes {
} bits;
uint32_t raw;
};
-
struct dpp {
const struct dpp_funcs *funcs;
struct dc_context *ctx;
@@ -84,6 +83,7 @@ struct dpp {
struct pwl_params shaper_params;
bool cm_bypass_mode;
+ bool cursor_offload;
struct cursor_position_cache_dpp pos;
struct cursor_attribute_cache_dpp att;
@@ -202,6 +202,19 @@ struct dcn_dpp_state {
uint32_t gamcor_mode;
};
+struct dcn_dpp_reg_state {
+ uint32_t recout_start;
+ uint32_t recout_size;
+ uint32_t scl_horz_filter_scale_ratio;
+ uint32_t scl_vert_filter_scale_ratio;
+ uint32_t scl_mode;
+ uint32_t cm_control;
+ uint32_t dpp_control;
+ uint32_t dscl_control;
+ uint32_t obuf_control;
+ uint32_t mpc_size;
+};
+
struct CM_bias_params {
uint32_t cm_bias_cr_r;
uint32_t cm_bias_y_g;
@@ -225,6 +238,8 @@ struct dpp_funcs {
void (*dpp_read_state)(struct dpp *dpp, struct dcn_dpp_state *s);
+ void (*dpp_read_reg_state)(struct dpp *dpp, struct dcn_dpp_reg_state *dpp_reg_state);
+
void (*dpp_reset)(struct dpp *dpp);
void (*dpp_set_scaler)(struct dpp *dpp,
@@ -349,6 +364,9 @@ struct dpp_funcs {
struct dpp *dpp_base,
enum dc_color_space color_space,
struct dc_csc_transform cursor_csc_color_matrix);
+
+ void (*dpp_force_disable_cursor)(struct dpp *dpp_base);
+
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
index b610beb075d5..a79019365af8 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
@@ -41,8 +41,8 @@
#include "mem_input.h"
#include "cursor_reg_cache.h"
-#include "dml2/dml21/inc/dml_top_dchub_registers.h"
-#include "dml2/dml21/inc/dml_top_types.h"
+#include "dml2_0/dml21/inc/dml_top_dchub_registers.h"
+#include "dml2_0/dml21/inc/dml_top_types.h"
#define OPP_ID_INVALID 0xf
#define MAX_TTU 0xffffff
@@ -89,7 +89,7 @@ enum hubp_3dlut_fl_addressing_mode {
enum hubp_3dlut_fl_width {
hubp_3dlut_fl_width_17 = 17,
hubp_3dlut_fl_width_33 = 33,
- hubp_3dlut_fl_width_transformed = 4916
+ hubp_3dlut_fl_width_transformed = 4916, //mpc default
};
enum hubp_3dlut_fl_crossbar_bit_slice {
@@ -99,6 +99,22 @@ enum hubp_3dlut_fl_crossbar_bit_slice {
hubp_3dlut_fl_crossbar_bit_slice_48_63 = 3
};
+struct hubp_fl_3dlut_config {
+ bool enabled;
+ enum hubp_3dlut_fl_width width;
+ enum hubp_3dlut_fl_mode mode;
+ enum hubp_3dlut_fl_format format;
+ uint16_t bias;
+ uint16_t scale;
+ struct dc_plane_address address;
+ enum hubp_3dlut_fl_addressing_mode addr_mode;
+ enum dc_cm2_gpu_mem_layout layout;
+ uint8_t protection_bits;
+ enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_y_g;
+ enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cb_b;
+ enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cr_r;
+};
+
struct hubp {
const struct hubp_funcs *funcs;
struct dc_context *ctx;
@@ -110,11 +126,13 @@ struct hubp {
int mpcc_id;
struct dc_cursor_attributes curs_attr;
struct dc_cursor_position curs_pos;
+ bool cursor_offload;
bool power_gated;
struct cursor_position_cache_hubp pos;
struct cursor_attribute_cache_hubp att;
struct cursor_rect cur_rect;
+ bool use_mall_for_cursor;
};
struct surface_flip_registers {
@@ -220,6 +238,7 @@ struct hubp_funcs {
void (*hubp_clk_cntl)(struct hubp *hubp, bool enable);
void (*hubp_vtg_sel)(struct hubp *hubp, uint32_t otg_inst);
void (*hubp_read_state)(struct hubp *hubp);
+ void (*hubp_read_reg_state)(struct hubp *hubp, struct dcn_hubp_reg_state *reg_state);
void (*hubp_clear_underflow)(struct hubp *hubp);
void (*hubp_disable_control)(struct hubp *hubp, bool disable_hubp);
unsigned int (*hubp_get_underflow_status)(struct hubp *hubp);
@@ -282,13 +301,16 @@ struct hubp_funcs {
void (*hubp_enable_3dlut_fl)(struct hubp *hubp, bool enable);
void (*hubp_program_3dlut_fl_addressing_mode)(struct hubp *hubp, enum hubp_3dlut_fl_addressing_mode addr_mode);
void (*hubp_program_3dlut_fl_width)(struct hubp *hubp, enum hubp_3dlut_fl_width width);
- void (*hubp_program_3dlut_fl_tmz_protected)(struct hubp *hubp, bool protection_enabled);
+ void (*hubp_program_3dlut_fl_tmz_protected)(struct hubp *hubp, uint8_t protection_bits);
void (*hubp_program_3dlut_fl_crossbar)(struct hubp *hubp,
enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_y_g,
enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cb_b,
enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cr_r);
int (*hubp_get_3dlut_fl_done)(struct hubp *hubp);
+ void (*hubp_program_3dlut_fl_config)(struct hubp *hubp, struct hubp_fl_3dlut_config *cfg);
void (*hubp_clear_tiling)(struct hubp *hubp);
+ uint32_t (*hubp_get_current_read_line)(struct hubp *hubp);
+ uint32_t (*hubp_get_det_config_error)(struct hubp *hubp);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
index 41c76ba9ba56..a61d12ec61bc 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
@@ -44,12 +44,67 @@
*/
#define MAX_PIPES 6
#define MAX_PHANTOM_PIPES (MAX_PIPES / 2)
-#define MAX_LINKS (MAX_PIPES * 2 +2)
+
+#define MAX_DPIA 6
+#define MAX_CONNECTOR 6
+#define MAX_VIRTUAL_LINKS 4
+
+#define MAX_LINKS (MAX_DPIA + MAX_CONNECTOR + MAX_VIRTUAL_LINKS)
+
+/**
+ * define MAX_DIG_LINK_ENCODERS - maximum number of digital encoders
+ *
+ * Digital encoders are ENGINE_ID_DIGA...G, there are at most 7,
+ * although not every GPU may have that many.
+ */
#define MAX_DIG_LINK_ENCODERS 7
+
+/**
+ * define MAX_DAC_LINK_ENCODERS - maximum number of analog link encoders
+ *
+ * Analog encoders are ENGINE_ID_DACA/B, there are at most 2,
+ * although not every GPU may have that many. Modern GPUs typically
+ * don't have analog encoders.
+ */
+#define MAX_DAC_LINK_ENCODERS 2
+
+/**
+ * define MAX_LINK_ENCODERS - maximum number link encoders in total
+ *
+ * This includes both analog and digital encoders.
+ */
+#define MAX_LINK_ENCODERS (MAX_DIG_LINK_ENCODERS + MAX_DAC_LINK_ENCODERS)
+
#define MAX_DWB_PIPES 1
#define MAX_HPO_DP2_ENCODERS 4
#define MAX_HPO_DP2_LINK_ENCODERS 4
+/* Pipe topology snapshot structures */
+#define MAX_TOPOLOGY_SNAPSHOTS 4
+
+struct pipe_topology_line {
+ bool is_phantom_pipe;
+ int plane_idx;
+ int slice_idx;
+ int stream_idx;
+ int dpp_inst;
+ int opp_inst;
+ int tg_inst;
+};
+
+struct pipe_topology_snapshot {
+ struct pipe_topology_line pipe_log_lines[MAX_PIPES];
+ int line_count;
+ uint64_t timestamp_us;
+ int stream_count;
+ int phantom_stream_count;
+};
+
+struct pipe_topology_history {
+ struct pipe_topology_snapshot snapshots[MAX_TOPOLOGY_SNAPSHOTS];
+ int current_snapshot_index;
+};
+
struct gamma_curve {
uint32_t offset;
uint32_t segments_num;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
index 08c16ba52a51..df512920a9fa 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
@@ -47,6 +47,7 @@ struct encoder_init_data {
enum hpd_source_id hpd_source;
/* TODO: in DAL2, here was pointer to EventManagerInterface */
struct graphics_object_id encoder;
+ enum engine_id analog_engine;
struct dc_context *ctx;
enum transmitter transmitter;
};
@@ -83,6 +84,7 @@ struct link_encoder {
struct graphics_object_id connector;
uint32_t output_signals;
enum engine_id preferred_engine;
+ enum engine_id analog_engine;
struct encoder_feature_support features;
enum transmitter transmitter;
enum hpd_source_id hpd_source;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
index 42fbc70f7056..d468bc85566a 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
@@ -29,7 +29,7 @@
#include "include/grph_object_id.h"
#include "dml/display_mode_structs.h"
-#include "dml2/dml21/inc/dml_top_dchub_registers.h"
+#include "dml2_0/dml21/inc/dml_top_dchub_registers.h"
struct dchub_init_data;
struct cstate_pstate_watermarks_st {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
index 6e303b81bfb0..a8d1abe20f62 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
@@ -115,6 +115,16 @@ enum MCM_LUT_ID {
MCM_LUT_SHAPER
};
+struct mpc_fl_3dlut_config {
+ bool enabled;
+ uint16_t width;
+ bool select_lut_bank_a;
+ uint16_t bit_depth;
+ int hubp_index;
+ uint16_t bias;
+ uint16_t scale;
+};
+
union mcm_lut_params {
const struct pwl_params *pwl;
const struct tetrahedral_params *lut3d;
@@ -190,6 +200,42 @@ struct mpc_grph_gamut_adjustment {
enum mpcc_gamut_remap_id mpcc_gamut_remap_block_id;
};
+struct mpc_rmcm_regs {
+ uint32_t rmcm_3dlut_mem_pwr_state;
+ uint32_t rmcm_3dlut_mem_pwr_force;
+ uint32_t rmcm_3dlut_mem_pwr_dis;
+ uint32_t rmcm_3dlut_mem_pwr_mode;
+ uint32_t rmcm_3dlut_size;
+ uint32_t rmcm_3dlut_mode;
+ uint32_t rmcm_3dlut_mode_cur;
+ uint32_t rmcm_3dlut_read_sel;
+ uint32_t rmcm_3dlut_30bit_en;
+ uint32_t rmcm_3dlut_wr_en_mask;
+ uint32_t rmcm_3dlut_ram_sel;
+ uint32_t rmcm_3dlut_out_norm_factor;
+ uint32_t rmcm_3dlut_fl_sel;
+ uint32_t rmcm_3dlut_out_offset_r;
+ uint32_t rmcm_3dlut_out_scale_r;
+ uint32_t rmcm_3dlut_fl_done;
+ uint32_t rmcm_3dlut_fl_soft_underflow;
+ uint32_t rmcm_3dlut_fl_hard_underflow;
+ uint32_t rmcm_cntl;
+ uint32_t rmcm_shaper_mem_pwr_state;
+ uint32_t rmcm_shaper_mem_pwr_force;
+ uint32_t rmcm_shaper_mem_pwr_dis;
+ uint32_t rmcm_shaper_mem_pwr_mode;
+ uint32_t rmcm_shaper_lut_mode;
+ uint32_t rmcm_shaper_mode_cur;
+ uint32_t rmcm_shaper_lut_write_en_mask;
+ uint32_t rmcm_shaper_lut_write_sel;
+ uint32_t rmcm_shaper_offset_b;
+ uint32_t rmcm_shaper_scale_b;
+ uint32_t rmcm_shaper_rama_exp_region_start_b;
+ uint32_t rmcm_shaper_rama_exp_region_start_seg_b;
+ uint32_t rmcm_shaper_rama_exp_region_end_b;
+ uint32_t rmcm_shaper_rama_exp_region_end_base_b;
+};
+
struct mpcc_sm_cfg {
bool enable;
/* 0-single plane,2-row subsampling,4-column subsampling,6-checkboard subsampling */
@@ -301,6 +347,16 @@ struct mpcc_state {
uint32_t rgam_mode;
uint32_t rgam_lut;
struct mpc_grph_gamut_adjustment gamut_remap;
+ struct mpc_rmcm_regs rmcm_regs;
+};
+
+struct dcn_mpc_reg_state {
+ uint32_t mpcc_bot_sel;
+ uint32_t mpcc_control;
+ uint32_t mpcc_status;
+ uint32_t mpcc_top_sel;
+ uint32_t mpcc_opp_id;
+ uint32_t mpcc_ogam_control;
};
/**
@@ -326,6 +382,24 @@ struct mpc_funcs {
struct mpc *mpc,
int mpcc_inst,
struct mpcc_state *s);
+ /**
+ * @mpc_read_reg_state:
+ *
+ * Read MPC register state for debugging underflow purposes.
+ *
+ * Parameters:
+ *
+ * - [in] mpc - MPC context
+ * - [out] reg_state - MPC register state structure
+ *
+ * Return:
+ *
+ * void
+ */
+ void (*mpc_read_reg_state)(
+ struct mpc *mpc,
+ int mpcc_inst,
+ struct dcn_mpc_reg_state *mpc_reg_state);
/**
* @insert_plane:
@@ -1022,22 +1096,12 @@ struct mpc_funcs {
*/
void (*program_lut_mode)(struct mpc *mpc, const enum MCM_LUT_ID id, const enum MCM_LUT_XABLE xable,
bool lut_bank_a, int mpcc_id);
- /**
- * @program_3dlut_size:
- *
- * Program 3D LUT size.
- *
- * Parameters:
- * - [in/out] mpc - MPC context.
- * - [in] is_17x17x17 - is 3dlut 17x17x17
- * - [in] mpcc_id
- *
- * Return:
- *
- * void
- */
- void (*program_3dlut_size)(struct mpc *mpc, bool is_17x17x17, int mpcc_id);
+ /**
+ * @mcm:
+ *
+ * MPC MCM new HW sequential programming functions
+ */
struct {
void (*program_3dlut_size)(struct mpc *mpc, uint32_t width, int mpcc_id);
void (*program_bias_scale)(struct mpc *mpc, uint16_t bias, uint16_t scale, int mpcc_id);
@@ -1050,7 +1114,13 @@ struct mpc_funcs {
bool lut_bank_a, int mpcc_id);
} mcm;
+ /**
+ * @rmcm:
+ *
+ * MPC RMCM new HW sequential programming functions
+ */
struct {
+ void (*fl_3dlut_configure)(struct mpc *mpc, struct mpc_fl_3dlut_config *cfg, int mpcc_id);
void (*enable_3dlut_fl)(struct mpc *mpc, bool enable, int mpcc_id);
void (*update_3dlut_fast_load_select)(struct mpc *mpc, int mpcc_id, int hubp_idx);
void (*program_lut_read_write_control)(struct mpc *mpc, const enum MCM_LUT_ID id,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
index 747679cb4944..e1428a83ecbc 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
@@ -297,6 +297,16 @@ struct oppbuf_params {
uint32_t num_segment_padded_pixels;
};
+struct dcn_opp_reg_state {
+ uint32_t dpg_control;
+ uint32_t fmt_control;
+ uint32_t oppbuf_control;
+ uint32_t opp_pipe_control;
+ uint32_t opp_pipe_crc_control;
+ uint32_t opp_abm_control;
+ uint32_t dscrm_dsc_forward_config;
+};
+
struct opp_funcs {
@@ -368,6 +378,9 @@ struct opp_funcs {
struct output_pixel_processor *opp,
enum dc_pixel_encoding pixel_encoding,
bool is_primary);
+
+ void (*opp_read_reg_state)(
+ struct output_pixel_processor *opp, struct dcn_opp_reg_state *opp_reg_state);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h b/drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h
index 00ea3864dd4d..227e3f8d7e5f 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h
@@ -46,7 +46,10 @@ struct pg_cntl_funcs {
void (*opp_pg_control)(struct pg_cntl *pg_cntl, unsigned int opp_inst, bool power_on);
void (*optc_pg_control)(struct pg_cntl *pg_cntl, unsigned int optc_inst, bool power_on);
void (*dwb_pg_control)(struct pg_cntl *pg_cntl, bool power_on);
+ void (*mem_pg_control)(struct pg_cntl *pg_cntl, bool power_on);
+ void (*dio_pg_control)(struct pg_cntl *pg_cntl, bool power_on);
void (*init_pg_status)(struct pg_cntl *pg_cntl);
+ void (*print_pg_status)(struct pg_cntl *pg_cntl, const char *debug_func, const char *debug_log);
};
#endif //__DC_PG_CNTL_H__
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
index fe7f3137f228..27f950ae45ee 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
@@ -117,6 +117,7 @@ struct stream_encoder {
uint32_t stream_enc_inst;
struct vpg *vpg;
struct afmt *afmt;
+ struct apg *apg;
};
struct enc_state {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index 267ace4eef8a..da7bf59c4b9d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -175,6 +175,135 @@ struct dcn_otg_state {
uint32_t otg_double_buffer_control;
};
+struct dcn_optc_reg_state {
+ uint32_t optc_bytes_per_pixel;
+ uint32_t optc_data_format_control;
+ uint32_t optc_data_source_select;
+ uint32_t optc_input_clock_control;
+ uint32_t optc_input_global_control;
+ uint32_t optc_input_spare_register;
+ uint32_t optc_memory_config;
+ uint32_t optc_rsmu_underflow;
+ uint32_t optc_underflow_threshold;
+ uint32_t optc_width_control;
+
+ uint32_t otg_3d_structure_control;
+ uint32_t otg_clock_control;
+ uint32_t otg_control;
+ uint32_t otg_count_control;
+ uint32_t otg_count_reset;
+ uint32_t otg_crc_cntl;
+ uint32_t otg_crc_sig_blue_control_mask;
+ uint32_t otg_crc_sig_red_green_mask;
+ uint32_t otg_crc0_data_b;
+ uint32_t otg_crc0_data_rg;
+ uint32_t otg_crc0_windowa_x_control;
+ uint32_t otg_crc0_windowa_x_control_readback;
+ uint32_t otg_crc0_windowa_y_control;
+ uint32_t otg_crc0_windowa_y_control_readback;
+ uint32_t otg_crc0_windowb_x_control;
+ uint32_t otg_crc0_windowb_x_control_readback;
+ uint32_t otg_crc0_windowb_y_control;
+ uint32_t otg_crc0_windowb_y_control_readback;
+ uint32_t otg_crc1_data_b;
+ uint32_t otg_crc1_data_rg;
+ uint32_t otg_crc1_windowa_x_control;
+ uint32_t otg_crc1_windowa_x_control_readback;
+ uint32_t otg_crc1_windowa_y_control;
+ uint32_t otg_crc1_windowa_y_control_readback;
+ uint32_t otg_crc1_windowb_x_control;
+ uint32_t otg_crc1_windowb_x_control_readback;
+ uint32_t otg_crc1_windowb_y_control;
+ uint32_t otg_crc1_windowb_y_control_readback;
+ uint32_t otg_crc2_data_b;
+ uint32_t otg_crc2_data_rg;
+ uint32_t otg_crc3_data_b;
+ uint32_t otg_crc3_data_rg;
+ uint32_t otg_dlpc_control;
+ uint32_t otg_double_buffer_control;
+ uint32_t otg_drr_control2;
+ uint32_t otg_drr_control;
+ uint32_t otg_drr_timing_int_status;
+ uint32_t otg_drr_trigger_window;
+ uint32_t otg_drr_v_total_change;
+ uint32_t otg_drr_v_total_reach_range;
+ uint32_t otg_dsc_start_position;
+ uint32_t otg_force_count_now_cntl;
+ uint32_t otg_global_control0;
+ uint32_t otg_global_control1;
+ uint32_t otg_global_control2;
+ uint32_t otg_global_control3;
+ uint32_t otg_global_control4;
+ uint32_t otg_global_sync_status;
+ uint32_t otg_gsl_control;
+ uint32_t otg_gsl_vsync_gap;
+ uint32_t otg_gsl_window_x;
+ uint32_t otg_gsl_window_y;
+ uint32_t otg_h_blank_start_end;
+ uint32_t otg_h_sync_a;
+ uint32_t otg_h_sync_a_cntl;
+ uint32_t otg_h_timing_cntl;
+ uint32_t otg_h_total;
+ uint32_t otg_interlace_control;
+ uint32_t otg_interlace_status;
+ uint32_t otg_interrupt_control;
+ uint32_t otg_long_vblank_status;
+ uint32_t otg_m_const_dto0;
+ uint32_t otg_m_const_dto1;
+ uint32_t otg_manual_force_vsync_next_line;
+ uint32_t otg_master_en;
+ uint32_t otg_master_update_lock;
+ uint32_t otg_master_update_mode;
+ uint32_t otg_nom_vert_position;
+ uint32_t otg_pipe_update_status;
+ uint32_t otg_pixel_data_readback0;
+ uint32_t otg_pixel_data_readback1;
+ uint32_t otg_request_control;
+ uint32_t otg_snapshot_control;
+ uint32_t otg_snapshot_frame;
+ uint32_t otg_snapshot_position;
+ uint32_t otg_snapshot_status;
+ uint32_t otg_spare_register;
+ uint32_t otg_static_screen_control;
+ uint32_t otg_status;
+ uint32_t otg_status_frame_count;
+ uint32_t otg_status_hv_count;
+ uint32_t otg_status_position;
+ uint32_t otg_status_vf_count;
+ uint32_t otg_stereo_control;
+ uint32_t otg_stereo_force_next_eye;
+ uint32_t otg_stereo_status;
+ uint32_t otg_trig_manual_control;
+ uint32_t otg_triga_cntl;
+ uint32_t otg_triga_manual_trig;
+ uint32_t otg_trigb_cntl;
+ uint32_t otg_trigb_manual_trig;
+ uint32_t otg_update_lock;
+ uint32_t otg_v_blank_start_end;
+ uint32_t otg_v_count_stop_control;
+ uint32_t otg_v_count_stop_control2;
+ uint32_t otg_v_sync_a;
+ uint32_t otg_v_sync_a_cntl;
+ uint32_t otg_v_total;
+ uint32_t otg_v_total_control;
+ uint32_t otg_v_total_int_status;
+ uint32_t otg_v_total_max;
+ uint32_t otg_v_total_mid;
+ uint32_t otg_v_total_min;
+ uint32_t otg_vert_sync_control;
+ uint32_t otg_vertical_interrupt0_control;
+ uint32_t otg_vertical_interrupt0_position;
+ uint32_t otg_vertical_interrupt1_control;
+ uint32_t otg_vertical_interrupt1_position;
+ uint32_t otg_vertical_interrupt2_control;
+ uint32_t otg_vertical_interrupt2_position;
+ uint32_t otg_vready_param;
+ uint32_t otg_vstartup_param;
+ uint32_t otg_vsync_nom_int_status;
+ uint32_t otg_vupdate_keepout;
+ uint32_t otg_vupdate_param;
+};
+
/**
* struct timing_generator - Entry point to Output Timing Generator feature.
*/
@@ -374,12 +503,14 @@ struct timing_generator_funcs {
void (*wait_drr_doublebuffer_pending_clear)(struct timing_generator *tg);
void (*set_long_vtotal)(struct timing_generator *optc, const struct long_vtotal_params *params);
void (*wait_odm_doublebuffer_pending_clear)(struct timing_generator *tg);
+ void (*wait_otg_disable)(struct timing_generator *optc);
bool (*get_optc_double_buffer_pending)(struct timing_generator *tg);
bool (*get_otg_double_buffer_pending)(struct timing_generator *tg);
bool (*get_pipe_update_pending)(struct timing_generator *tg);
void (*set_vupdate_keepout)(struct timing_generator *tg, bool enable);
bool (*wait_update_lock_status)(struct timing_generator *tg, bool locked);
void (*read_otg_state)(struct timing_generator *tg, struct dcn_otg_state *s);
+ void (*optc_read_reg_state)(struct timing_generator *tg, struct dcn_optc_reg_state *optc_reg_state);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/link.h b/drivers/gpu/drm/amd/display/dc/inc/link_service.h
index 7d16351bba99..6f94e48a24d1 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/link.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/link_service.h
@@ -42,8 +42,8 @@
* dc_link_exports.c or other dc files implement dc.h
*
* DC to Link:
- * dc_link_exports.c or other dc files include link.h
- * link_factory.c implements link.h
+ * dc_link_exports.c or other dc files include link_service.h
+ * link_factory.c implements link_service.h
*
* Link sub-component to Link sub-component:
* link_factory.c includes --> link_xxx.h
@@ -73,7 +73,7 @@
* 2. Implement your function in the suitable link_xxx.c file.
* 3. Assign the function to link_service in link_factory.c
* 4. NEVER include link_xxx.h headers outside link component.
- * 5. NEVER include link.h on DM side.
+ * 5. NEVER include link_service.h on DM side.
*/
#include "core_types.h"
@@ -144,9 +144,9 @@ struct link_service {
uint32_t (*dp_link_bandwidth_kbps)(
const struct dc_link *link,
const struct dc_link_settings *link_settings);
- bool (*validate_dpia_bandwidth)(
- const struct dc_stream_state *stream,
- const unsigned int num_streams);
+ enum dc_status (*validate_dp_tunnel_bandwidth)(
+ const struct dc *dc,
+ const struct dc_state *new_ctx);
uint32_t (*dp_required_hblank_size_bytes)(
const struct dc_link *link,
@@ -218,7 +218,10 @@ struct link_service {
bool (*dp_overwrite_extended_receiver_cap)(struct dc_link *link);
enum lttpr_mode (*dp_decide_lttpr_mode)(struct dc_link *link,
struct dc_link_settings *link_setting);
-
+ uint8_t (*dp_get_lttpr_count)(struct dc_link *link);
+ void (*edp_get_alpm_support)(struct dc_link *link,
+ bool *auxless_support,
+ bool *auxwake_support);
/*************************** DP DPIA/PHY ******************************/
void (*dpia_handle_usb4_bandwidth_allocation_for_link)(
@@ -289,12 +292,12 @@ struct link_service {
enum replay_FW_Message_type msg,
union dmub_replay_cmd_set *cmd_data);
bool (*edp_set_coasting_vtotal)(
- struct dc_link *link, uint32_t coasting_vtotal);
+ struct dc_link *link, uint32_t coasting_vtotal, uint16_t frame_skip_number);
bool (*edp_replay_residency)(const struct dc_link *link,
unsigned int *residency, const bool is_start,
const enum pr_residency_mode mode);
bool (*edp_set_replay_power_opt_and_coasting_vtotal)(struct dc_link *link,
- const unsigned int *power_opts, uint32_t coasting_vtotal);
+ const unsigned int *power_opts, uint32_t coasting_vtotal, uint16_t frame_skip_number);
bool (*edp_wait_for_t12)(struct dc_link *link);
bool (*edp_is_ilr_optimization_required)(struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index a890f581f4e8..79746d931471 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -45,9 +45,11 @@ enum dce_version resource_parse_asic_id(
struct resource_caps {
int num_timing_generator;
int num_opp;
+ int num_dpp;
int num_video_plane;
int num_audio;
int num_stream_encoder;
+ int num_analog_stream_encoder;
int num_pll;
int num_dwb;
int num_ddc;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/soc_and_ip_translator.h b/drivers/gpu/drm/amd/display/dc/inc/soc_and_ip_translator.h
new file mode 100644
index 000000000000..23daf98b8aa8
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/soc_and_ip_translator.h
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2025 Advanced Micro Devices, Inc.
+
+#ifndef __SOC_AND_IP_TRANSLATOR_H__
+#define __SOC_AND_IP_TRANSLATOR_H__
+
+#include "dc.h"
+#include "dml_top_soc_parameter_types.h"
+
+struct soc_and_ip_translator_funcs {
+ void (*get_soc_bb)(struct dml2_soc_bb *soc_bb, const struct dc *dc, const struct dml2_configuration_options *config);
+ void (*get_ip_caps)(struct dml2_ip_capabilities *dml_ip_caps);
+};
+
+struct soc_and_ip_translator {
+ const struct soc_and_ip_translator_funcs *translator_funcs;
+};
+
+struct soc_and_ip_translator *dc_create_soc_and_ip_translator(enum dce_version dc_version);
+void dc_destroy_soc_and_ip_translator(struct soc_and_ip_translator **soc_and_ip_translator);
+
+
+#endif // __SOC_AND_IP_TRANSLATOR_H__
diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
index 96febabf464a..1045c268672e 100644
--- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
+++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
@@ -34,6 +34,7 @@
#include "dm_helpers.h"
#include "dc_dmub_srv.h"
#include "dce/dmub_hw_lock_mgr.h"
+#include "clk_mgr.h"
#define DC_LOGGER \
link->ctx->logger
@@ -67,10 +68,20 @@ static void dp_retrain_link_dp_test(struct dc_link *link,
{
struct pipe_ctx *pipes[MAX_PIPES];
struct dc_state *state = link->dc->current_state;
+ struct dc_stream_update stream_update = { 0 };
+ bool dpms_off = false;
+ bool needs_divider_update = false;
bool was_hpo_acquired = resource_is_hpo_acquired(link->dc->current_state);
bool is_hpo_acquired;
uint8_t count;
int i;
+ struct audio_output audio_output[MAX_PIPES];
+ struct dc_stream_state *streams_on_link[MAX_PIPES];
+ int num_streams_on_link = 0;
+ struct dc *dc = (struct dc *)link->dc;
+
+ needs_divider_update = (link->dc->link_srv->dp_get_encoding_format(link_setting) !=
+ link->dc->link_srv->dp_get_encoding_format((const struct dc_link_settings *) &link->cur_link_settings));
udelay(100);
@@ -83,16 +94,66 @@ static void dp_retrain_link_dp_test(struct dc_link *link,
link->dc,
state,
pipes[i]);
+
+ // Disable OTG and re-enable after updating clocks
+ pipes[i]->stream_res.tg->funcs->disable_crtc(pipes[i]->stream_res.tg);
}
- if (link->dc->hwss.setup_hpo_hw_control) {
- is_hpo_acquired = resource_is_hpo_acquired(state);
- if (was_hpo_acquired != is_hpo_acquired)
- link->dc->hwss.setup_hpo_hw_control(link->dc->hwseq, is_hpo_acquired);
+ if (needs_divider_update && link->dc->res_pool->funcs->update_dc_state_for_encoder_switch) {
+ link->dc->res_pool->funcs->update_dc_state_for_encoder_switch(link,
+ link_setting, count,
+ *pipes, &audio_output[0]);
+ for (i = 0; i < count; i++) {
+ pipes[i]->clock_source->funcs->program_pix_clk(
+ pipes[i]->clock_source,
+ &pipes[i]->stream_res.pix_clk_params,
+ link->dc->link_srv->dp_get_encoding_format(&pipes[i]->link_config.dp_link_settings),
+ &pipes[i]->pll_settings);
+
+ if (pipes[i]->stream_res.audio != NULL) {
+ const struct link_hwss *link_hwss = get_link_hwss(
+ link, &pipes[i]->link_res);
+
+ link_hwss->setup_audio_output(pipes[i], &audio_output[i],
+ pipes[i]->stream_res.audio->inst);
+
+ pipes[i]->stream_res.audio->funcs->az_configure(
+ pipes[i]->stream_res.audio,
+ pipes[i]->stream->signal,
+ &audio_output[i].crtc_info,
+ &pipes[i]->stream->audio_info,
+ &audio_output[i].dp_link_info);
+
+ if (link->dc->config.disable_hbr_audio_dp2 &&
+ pipes[i]->stream_res.audio->funcs->az_disable_hbr_audio &&
+ link->dc->link_srv->dp_is_128b_132b_signal(pipes[i]))
+ pipes[i]->stream_res.audio->funcs->az_disable_hbr_audio(pipes[i]->stream_res.audio);
+ }
+ }
}
- for (i = count-1; i >= 0; i--)
- link_set_dpms_on(state, pipes[i]);
+ // Toggle on HPO I/O if necessary
+ is_hpo_acquired = resource_is_hpo_acquired(state);
+ if (was_hpo_acquired != is_hpo_acquired && link->dc->hwss.setup_hpo_hw_control)
+ link->dc->hwss.setup_hpo_hw_control(link->dc->hwseq, is_hpo_acquired);
+
+ for (i = 0; i < count; i++)
+ pipes[i]->stream_res.tg->funcs->enable_crtc(pipes[i]->stream_res.tg);
+
+ // Set DPMS on with stream update
+ // Cache all streams on current link since dc_update_planes_and_stream might kill current_state
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (state->streams[i] && state->streams[i]->link && state->streams[i]->link == link)
+ streams_on_link[num_streams_on_link++] = state->streams[i];
+ }
+
+ for (i = 0; i < num_streams_on_link; i++) {
+ if (streams_on_link[i] && streams_on_link[i]->link && streams_on_link[i]->link == link) {
+ stream_update.stream = streams_on_link[i];
+ stream_update.dpms_off = &dpms_off;
+ dc_update_planes_and_stream(dc, NULL, 0, streams_on_link[i], &stream_update);
+ }
+ }
}
static void dp_test_send_link_training(struct dc_link *link)
@@ -816,7 +877,7 @@ bool dp_set_test_pattern(
return false;
if (pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_enable) {
- if (should_use_dmub_lock(pipe_ctx->stream->link)) {
+ if (should_use_dmub_inbox1_lock(pipe_ctx->stream->link->dc, pipe_ctx->stream->link)) {
union dmub_hw_lock_flags hw_locks = { 0 };
struct dmub_hw_lock_inst_flags inst_flags = { 0 };
@@ -864,7 +925,7 @@ bool dp_set_test_pattern(
CRTC_STATE_VACTIVE);
if (pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_disable) {
- if (should_use_dmub_lock(pipe_ctx->stream->link)) {
+ if (should_use_dmub_inbox1_lock(pipe_ctx->stream->link->dc, pipe_ctx->stream->link)) {
union dmub_hw_lock_flags hw_locks = { 0 };
struct dmub_hw_lock_inst_flags inst_flags = { 0 };
diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.h b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.h
index eae23ea7f6ec..033650cdb811 100644
--- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.h
+++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.h
@@ -24,7 +24,7 @@
*/
#ifndef __LINK_DP_CTS_H__
#define __LINK_DP_CTS_H__
-#include "link.h"
+#include "link_service.h"
void dp_handle_automated_test(struct dc_link *link);
bool dp_set_test_pattern(
struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.h b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.h
index ab437a0c9101..9ff4a6c46a2b 100644
--- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.h
+++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.h
@@ -24,7 +24,7 @@
*/
#ifndef __LINK_DP_TRACE_H__
#define __LINK_DP_TRACE_H__
-#include "link.h"
+#include "link_service.h"
void dp_trace_init(struct dc_link *link);
void dp_trace_reset(struct dc_link *link);
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c
index b68bcc9fca0a..befa67b2b2ae 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c
@@ -58,8 +58,9 @@ void setup_dio_stream_encoder(struct pipe_ctx *pipe_ctx)
return;
}
- link_enc->funcs->connect_dig_be_to_fe(link_enc,
- pipe_ctx->stream_res.stream_enc->id, true);
+ if (!dc_is_rgb_signal(pipe_ctx->stream->signal))
+ link_enc->funcs->connect_dig_be_to_fe(link_enc,
+ pipe_ctx->stream_res.stream_enc->id, true);
if (dc_is_dp_signal(pipe_ctx->stream->signal))
pipe_ctx->stream->ctx->dc->link_srv->dp_trace_source_sequence(pipe_ctx->stream->link,
DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_BE);
@@ -98,10 +99,13 @@ void reset_dio_stream_encoder(struct pipe_ctx *pipe_ctx)
if (stream_enc->funcs->enable_stream)
stream_enc->funcs->enable_stream(stream_enc,
pipe_ctx->stream->signal, false);
- link_enc->funcs->connect_dig_be_to_fe(
- link_enc,
- pipe_ctx->stream_res.stream_enc->id,
- false);
+
+ if (!dc_is_rgb_signal(pipe_ctx->stream->signal))
+ link_enc->funcs->connect_dig_be_to_fe(
+ link_enc,
+ pipe_ctx->stream_res.stream_enc->id,
+ false);
+
if (dc_is_dp_signal(pipe_ctx->stream->signal))
pipe_ctx->stream->ctx->dc->link_srv->dp_trace_source_sequence(
pipe_ctx->stream->link,
@@ -115,7 +119,8 @@ void setup_dio_stream_attribute(struct pipe_ctx *pipe_ctx)
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->link;
- if (!dc_is_virtual_signal(stream->signal))
+ if (!dc_is_virtual_signal(stream->signal) &&
+ !dc_is_rgb_signal(stream->signal))
stream_encoder->funcs->setup_stereo_sync(
stream_encoder,
pipe_ctx->stream_res.tg->inst,
@@ -138,8 +143,7 @@ void setup_dio_stream_attribute(struct pipe_ctx *pipe_ctx)
stream_encoder->funcs->dvi_set_stream_attribute(
stream_encoder,
&stream->timing,
- (stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK) ?
- true : false);
+ stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK);
else if (dc_is_lvds_signal(stream->signal))
stream_encoder->funcs->lvds_set_stream_attribute(
stream_encoder,
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h
index 45f0e091fcb0..4a25210a344f 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h
@@ -27,7 +27,7 @@
#define __LINK_HWSS_DIO_H__
#include "link_hwss.h"
-#include "link.h"
+#include "link_service.h"
const struct link_hwss *get_dio_link_hwss(void);
bool can_use_dio_link_hwss(const struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.h
index 9ac08a332540..cf578a8662a4 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.h
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.h
@@ -25,7 +25,7 @@
#ifndef __LINK_HWSS_DIO_FIXED_VS_PE_RETIMER_H__
#define __LINK_HWSS_DIO_FIXED_VS_PE_RETIMER_H__
-#include "link.h"
+#include "link_service.h"
uint32_t dp_dio_fixed_vs_pe_retimer_get_lttpr_write_address(struct dc_link *link);
uint8_t dp_dio_fixed_vs_pe_retimer_lane_cfg_to_hw_cfg(struct dc_link *link);
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.h
index 1d3ed8ca83b5..7c9005bc2587 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.h
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.h
@@ -26,7 +26,7 @@
#define __LINK_HWSS_HPO_DP_H__
#include "link_hwss.h"
-#include "link.h"
+#include "link_service.h"
void set_hpo_dp_throttled_vcp_size(struct pipe_ctx *pipe_ctx,
struct fixed31_32 throttled_vcp_size);
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c
index 116ff37126e7..55c5148de800 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c
@@ -74,7 +74,7 @@ static void dp_hpo_fixed_vs_pe_retimer_set_tx_ffe(struct dc_link *link,
static void dp_hpo_fixed_vs_pe_retimer_program_override_test_pattern(struct dc_link *link,
struct encoder_set_dp_phy_pattern_param *tp_params)
{
- uint8_t clk_src = 0x4C;
+ uint8_t clk_src = 0xC4;
uint8_t pattern = 0x4F; /* SQ128 */
const uint8_t vendor_lttpr_write_data_pg0[4] = {0x1, 0x11, 0x0, 0x0};
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.h
index 82301187bc7c..8bf36827ecfb 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.h
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.h
@@ -25,7 +25,7 @@
#ifndef __LINK_HWSS_HPO_FIXED_VS_PE_RETIMER_DP_H__
#define __LINK_HWSS_HPO_FIXED_VS_PE_RETIMER_DP_H__
-#include "link.h"
+#include "link_service.h"
bool requires_fixed_vs_pe_retimer_hpo_link_hwss(const struct dc_link *link);
const struct link_hwss *get_hpo_fixed_vs_pe_retimer_dp_link_hwss(void);
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
index 9655e6fa53a4..6d31f4967f1a 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
@@ -270,6 +270,10 @@ static void read_scdc_caps(struct ddc_service *ddc_service,
uint8_t slave_address = HDMI_SCDC_ADDRESS;
uint8_t offset = HDMI_SCDC_MANUFACTURER_OUI;
+ if (ddc_service->link->local_sink &&
+ !ddc_service->link->local_sink->edid_caps.scdc_present)
+ return;
+
link_query_ddc_data(ddc_service, slave_address, &offset,
sizeof(offset), sink->scdc_caps.manufacturer_OUI.byte,
sizeof(sink->scdc_caps.manufacturer_OUI.byte));
@@ -593,8 +597,9 @@ static bool detect_dp(struct dc_link *link,
if (sink_caps->transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX) {
sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT;
- if (!detect_dp_sink_caps(link))
+ if (!detect_dp_sink_caps(link)) {
return false;
+ }
if (is_dp_branch_device(link))
/* DP SST branch */
@@ -655,7 +660,7 @@ static bool wait_for_entering_dp_alt_mode(struct dc_link *link)
return true;
is_in_alt_mode = link->link_enc->funcs->is_in_alt_mode(link->link_enc);
- DC_LOG_DC("DP Alt mode state on HPD: %d\n", is_in_alt_mode);
+ DC_LOG_DC("DP Alt mode state on HPD: %d Link=%d\n", is_in_alt_mode, link->link_index);
if (is_in_alt_mode)
return true;
@@ -857,6 +862,94 @@ static void verify_link_capability(struct dc_link *link, struct dc_sink *sink,
verify_link_capability_non_destructive(link);
}
+/**
+ * link_detect_evaluate_edid_header() - Evaluate if an EDID header is acceptable.
+ *
+ * Evaluates an 8-byte EDID header to check if it's good enough
+ * for the purpose of determining whether a display is connected
+ * without reading the full EDID.
+ *
+ * @edid_header: The first 8 bytes of the EDID read from DDC.
+ *
+ * Return: true if the header looks valid (>= 6 of 8 bytes match the
+ * expected 00/FF pattern), false otherwise.
+ */
+static bool link_detect_evaluate_edid_header(uint8_t edid_header[8])
+{
+ int edid_header_score = 0;
+ int i;
+
+ for (i = 0; i < 8; ++i)
+ edid_header_score += edid_header[i] == ((i == 0 || i == 7) ? 0x00 : 0xff);
+
+ return edid_header_score >= 6;
+}
+
+/**
+ * link_detect_ddc_probe() - Probe the DDC to see if a display is connected.
+ *
+ * Detect whether a display is connected to DDC without reading full EDID.
+ * Reads only the EDID header (the first 8 bytes of EDID) from DDC and
+ * evaluates whether that matches.
+ *
+ * @link: DC link whose DDC/I2C is probed for the EDID header.
+ *
+ * Return: true if the EDID header was read and passes validation,
+ * false otherwise.
+ */
+static bool link_detect_ddc_probe(struct dc_link *link)
+{
+ if (!link->ddc)
+ return false;
+
+ uint8_t edid_header[8] = {0};
+ bool ddc_probed = i2c_read(link->ddc, 0x50, edid_header, sizeof(edid_header));
+
+ if (!ddc_probed)
+ return false;
+
+ if (!link_detect_evaluate_edid_header(edid_header))
+ return false;
+
+ return true;
+}
+
+/**
+ * link_detect_dac_load_detect() - Performs DAC load detection.
+ *
+ * Load detection can be used to detect the presence of an
+ * analog display when we can't read DDC. This causes a visible
+ * visual glitch so it should be used sparingly.
+ *
+ * @link: DC link to test using the DAC load-detect path.
+ *
+ * Return: true if the VBIOS load-detect call reports OK, false
+ * otherwise.
+ */
+static bool link_detect_dac_load_detect(struct dc_link *link)
+{
+ struct dc_bios *bios = link->ctx->dc_bios;
+ struct link_encoder *link_enc = link->link_enc;
+ enum engine_id engine_id = link_enc->preferred_engine;
+ enum dal_device_type device_type = DEVICE_TYPE_CRT;
+ enum bp_result bp_result;
+ uint32_t enum_id;
+
+ switch (engine_id) {
+ case ENGINE_ID_DACB:
+ enum_id = 2;
+ break;
+ case ENGINE_ID_DACA:
+ default:
+ engine_id = ENGINE_ID_DACA;
+ enum_id = 1;
+ break;
+ }
+
+ bp_result = bios->funcs->dac_load_detection(bios, engine_id, device_type, enum_id);
+ return bp_result == BP_RESULT_OK;
+}
+
/*
* detect_link_and_local_sink() - Detect if a sink is attached to a given link
*
@@ -941,6 +1034,12 @@ static bool detect_link_and_local_sink(struct dc_link *link,
break;
}
+ case SIGNAL_TYPE_RGB: {
+ sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+ sink_caps.signal = SIGNAL_TYPE_RGB;
+ break;
+ }
+
case SIGNAL_TYPE_LVDS: {
sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
sink_caps.signal = SIGNAL_TYPE_LVDS;
@@ -1065,7 +1164,30 @@ static bool detect_link_and_local_sink(struct dc_link *link,
DC_LOG_ERROR("Partial EDID valid, abandon invalid blocks.\n");
break;
case EDID_NO_RESPONSE:
+ /* Analog connectors without EDID:
+ * - old monitor that actually doesn't have EDID
+ * - cheap DVI-A cable or adapter that doesn't connect DDC
+ */
+ if (dc_connector_supports_analog(link->link_id.id)) {
+ /* If we didn't do DAC load detection yet, do it now
+ * to verify there really is a display connected.
+ */
+ if (link->type != dc_connection_dac_load &&
+ !link_detect_dac_load_detect(link)) {
+ if (prev_sink)
+ dc_sink_release(prev_sink);
+ link_disconnect_sink(link);
+ return false;
+ }
+
+ DC_LOG_INFO("%s detected analog display without EDID\n", __func__);
+ link->type = dc_connection_dac_load;
+ sink->edid_caps.analog = true;
+ break;
+ }
+
DC_LOG_ERROR("No EDID read.\n");
+
/*
* Abort detection for non-DP connectors if we have
* no EDID
@@ -1132,13 +1254,26 @@ static bool detect_link_and_local_sink(struct dc_link *link,
sink = prev_sink;
prev_sink = NULL;
}
- query_hdcp_capability(sink->sink_signal, link);
+
+ if (!sink->edid_caps.analog)
+ query_hdcp_capability(sink->sink_signal, link);
}
+ /* DVI-I connector connected to analog display. */
+ if ((link->link_id.id == CONNECTOR_ID_DUAL_LINK_DVII ||
+ link->link_id.id == CONNECTOR_ID_SINGLE_LINK_DVII) &&
+ sink->edid_caps.analog)
+ sink->sink_signal = SIGNAL_TYPE_RGB;
+
/* HDMI-DVI Dongle */
if (sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A &&
!sink->edid_caps.edid_hdmi)
sink->sink_signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+ else if (dc_is_dvi_signal(sink->sink_signal) &&
+ dc_is_dvi_signal(link->connector_signal) &&
+ aud_support->hdmi_audio_native &&
+ sink->edid_caps.edid_hdmi)
+ sink->sink_signal = SIGNAL_TYPE_HDMI_TYPE_A;
if (link->local_sink && dc_is_dp_signal(sink_caps.signal))
dp_trace_init(link);
@@ -1227,6 +1362,36 @@ static bool detect_link_and_local_sink(struct dc_link *link,
return true;
}
+/**
+ * link_detect_analog() - Determines if an analog sink is connected.
+ *
+ * @link: DC link to evaluate (must support analog signalling).
+ * @type: Updated with the detected connection type:
+ * dc_connection_single (analog via DDC),
+ * dc_connection_dac_load (via load-detect),
+ * or dc_connection_none.
+ *
+ * Return: true if detection completed.
+ */
+static bool link_detect_analog(struct dc_link *link, enum dc_connection_type *type)
+{
+ /* Don't care about connectors that don't support an analog signal. */
+ ASSERT(dc_connector_supports_analog(link->link_id.id));
+
+ if (link_detect_ddc_probe(link)) {
+ *type = dc_connection_single;
+ return true;
+ }
+
+ if (link_detect_dac_load_detect(link)) {
+ *type = dc_connection_dac_load;
+ return true;
+ }
+
+ *type = dc_connection_none;
+ return true;
+}
+
/*
* link_detect_connection_type() - Determine if there is a sink connected
*
@@ -1243,6 +1408,17 @@ bool link_detect_connection_type(struct dc_link *link, enum dc_connection_type *
return true;
}
+ /* Ignore the HPD pin (if any) for analog connectors.
+ * Instead rely on DDC and DAC.
+ *
+ * - VGA connectors don't have any HPD at all.
+ * - Some DVI-A cables don't connect the HPD pin.
+ * - Some DVI-A cables pull up the HPD pin.
+ * (So it's high even when no display is connected.)
+ */
+ if (dc_connector_supports_analog(link->link_id.id))
+ return link_detect_analog(link, type);
+
if (link->connector_signal == SIGNAL_TYPE_EDP) {
/*in case it is not on*/
if (!link->dc->config.edp_no_power_sequencing)
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.h b/drivers/gpu/drm/amd/display/dc/link/link_detection.h
index 7da05078721e..1ab29476060b 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_detection.h
+++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.h
@@ -25,7 +25,7 @@
#ifndef __DC_LINK_DETECTION_H__
#define __DC_LINK_DETECTION_H__
-#include "link.h"
+#include "link_service.h"
bool link_detect(struct dc_link *link, enum dc_detect_reason reason);
bool link_detect_connection_type(struct dc_link *link,
enum dc_connection_type *type);
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
index 273a3be6d593..6ae134147617 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
@@ -140,7 +140,8 @@ void link_blank_dp_stream(struct dc_link *link, bool hw_init)
}
}
- if ((!link->wa_flags.dp_keep_receiver_powered) || hw_init)
+ if (((!dc->is_switch_in_progress_dest) && ((!link->wa_flags.dp_keep_receiver_powered) || hw_init)) &&
+ (link->type != dc_connection_none))
dpcd_write_rx_power_ctrl(link, false);
}
}
@@ -831,7 +832,7 @@ void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
enum optc_dsc_mode optc_dsc_mode;
/* Enable DSC hw block */
- dsc_cfg.pic_width = (stream->timing.h_addressable + pipe_ctx->hblank_borrow +
+ dsc_cfg.pic_width = (stream->timing.h_addressable + pipe_ctx->dsc_padding_params.dsc_hactive_padding +
stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt;
dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom;
dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
@@ -840,16 +841,17 @@ void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0);
dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
+ dsc_cfg.dsc_padding = pipe_ctx->dsc_padding_params.dsc_hactive_padding;
if (should_use_dto_dscclk)
- dccg->funcs->set_dto_dscclk(dccg, dsc->inst);
+ dccg->funcs->set_dto_dscclk(dccg, dsc->inst, dsc_cfg.dc_dsc_cfg.num_slices_h);
dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg);
dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst);
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
struct display_stream_compressor *odm_dsc = odm_pipe->stream_res.dsc;
if (should_use_dto_dscclk)
- dccg->funcs->set_dto_dscclk(dccg, odm_dsc->inst);
+ dccg->funcs->set_dto_dscclk(dccg, odm_dsc->inst, dsc_cfg.dc_dsc_cfg.num_slices_h);
odm_dsc->funcs->dsc_set_config(odm_dsc, &dsc_cfg, &dsc_optc_cfg);
odm_dsc->funcs->dsc_enable(odm_dsc, odm_pipe->stream_res.opp->inst);
}
@@ -969,6 +971,7 @@ bool link_set_dsc_pps_packet(struct pipe_ctx *pipe_ctx, bool enable, bool immedi
dsc_cfg.color_depth = stream->timing.display_color_depth;
dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false;
dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
+ dsc_cfg.dsc_padding = pipe_ctx->dsc_padding_params.dsc_hactive_padding;
dsc->funcs->dsc_get_packed_pps(dsc, &dsc_cfg, &dsc_packed_pps[0]);
memcpy(&stream->dsc_packed_pps[0], &dsc_packed_pps[0], sizeof(stream->dsc_packed_pps));
@@ -2223,7 +2226,11 @@ static enum dc_status enable_link(
{
enum dc_status status = DC_ERROR_UNEXPECTED;
struct dc_stream_state *stream = pipe_ctx->stream;
- struct dc_link *link = stream->link;
+ struct dc_link *link = NULL;
+
+ if (stream == NULL)
+ return DC_ERROR_UNEXPECTED;
+ link = stream->link;
/* There's some scenarios where driver is unloaded with display
* still enabled. When driver is reloaded, it may cause a display
@@ -2255,6 +2262,9 @@ static enum dc_status enable_link(
enable_link_lvds(pipe_ctx);
status = DC_OK;
break;
+ case SIGNAL_TYPE_RGB:
+ status = DC_OK;
+ break;
case SIGNAL_TYPE_VIRTUAL:
status = enable_link_virtual(pipe_ctx);
break;
@@ -2296,8 +2306,7 @@ static bool allocate_usb4_bandwidth_for_stream(struct dc_stream_state *stream, i
link->dpia_bw_alloc_config.remote_sink_req_bw[sink_index] = bw;
}
- /* get dp overhead for dp tunneling */
- link->dpia_bw_alloc_config.dp_overhead = link_dp_dpia_get_dp_overhead_in_dp_tunneling(link);
+ link->dpia_bw_alloc_config.dp_overhead = link_dpia_get_dp_overhead(link);
req_bw += link->dpia_bw_alloc_config.dp_overhead;
link_dp_dpia_allocate_usb4_bandwidth_for_stream(link, req_bw);
@@ -2358,9 +2367,9 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx)
if (pipe_ctx->stream->sink) {
if (pipe_ctx->stream->sink->sink_signal != SIGNAL_TYPE_VIRTUAL &&
pipe_ctx->stream->sink->sink_signal != SIGNAL_TYPE_NONE) {
- DC_LOG_DC("%s pipe_ctx dispname=%s signal=%x\n", __func__,
+ DC_LOG_DC("%s pipe_ctx dispname=%s signal=%x link=%d\n", __func__,
pipe_ctx->stream->sink->edid_caps.display_name,
- pipe_ctx->stream->signal);
+ pipe_ctx->stream->signal, link->link_index);
}
}
@@ -2474,9 +2483,10 @@ void link_set_dpms_on(
if (pipe_ctx->stream->sink) {
if (pipe_ctx->stream->sink->sink_signal != SIGNAL_TYPE_VIRTUAL &&
pipe_ctx->stream->sink->sink_signal != SIGNAL_TYPE_NONE) {
- DC_LOG_DC("%s pipe_ctx dispname=%s signal=%x\n", __func__,
+ DC_LOG_DC("%s pipe_ctx dispname=%s signal=%x link=%d\n", __func__,
pipe_ctx->stream->sink->edid_caps.display_name,
- pipe_ctx->stream->signal);
+ pipe_ctx->stream->signal,
+ link->link_index);
}
}
@@ -2537,6 +2547,14 @@ void link_set_dpms_on(
!pipe_ctx->next_odm_pipe) {
pipe_ctx->stream->dpms_off = false;
update_psp_stream_config(pipe_ctx, false);
+
+ if (link->is_dds) {
+ uint32_t post_oui_delay = 30; // 30ms
+
+ dpcd_set_source_specific_data(link);
+ msleep(post_oui_delay);
+ }
+
return;
}
@@ -2629,6 +2647,15 @@ void link_set_dpms_on(
dp_is_128b_132b_signal(pipe_ctx))
update_sst_payload(pipe_ctx, true);
+ /* Corruption was observed on systems with display mux when stream gets
+ * enabled after the mux switch. Having a small delay between link
+ * training and stream unblank resolves the corruption issue.
+ * This is workaround.
+ */
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP &&
+ link->is_display_mux_present)
+ msleep(20);
+
dc->hwss.unblank_stream(pipe_ctx,
&pipe_ctx->stream->link->cur_link_settings);
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.h b/drivers/gpu/drm/amd/display/dc/link/link_dpms.h
index 9398f9c1666a..bd6fc63064a3 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.h
+++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.h
@@ -26,7 +26,7 @@
#ifndef __DC_LINK_DPMS_H__
#define __DC_LINK_DPMS_H__
-#include "link.h"
+#include "link_service.h"
void link_set_dpms_on(
struct dc_state *state,
struct pipe_ctx *pipe_ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.c b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
index 1a04f4b74585..a6e2b0821969 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
@@ -100,7 +100,7 @@ static void construct_link_service_validation(struct link_service *link_srv)
{
link_srv->validate_mode_timing = link_validate_mode_timing;
link_srv->dp_link_bandwidth_kbps = dp_link_bandwidth_kbps;
- link_srv->validate_dpia_bandwidth = link_validate_dpia_bandwidth;
+ link_srv->validate_dp_tunnel_bandwidth = link_validate_dp_tunnel_bandwidth;
link_srv->dp_required_hblank_size_bytes = dp_required_hblank_size_bytes;
}
@@ -165,6 +165,8 @@ static void construct_link_service_dp_capability(struct link_service *link_srv)
link_srv->dp_overwrite_extended_receiver_cap =
dp_overwrite_extended_receiver_cap;
link_srv->dp_decide_lttpr_mode = dp_decide_lttpr_mode;
+ link_srv->dp_get_lttpr_count = dp_get_lttpr_count;
+ link_srv->edp_get_alpm_support = edp_get_alpm_support;
}
/* link dp phy/dpia implements basic dp phy/dpia functionality such as
@@ -449,6 +451,46 @@ static enum channel_id get_ddc_line(struct dc_link *link)
return channel;
}
+static enum engine_id find_analog_engine(struct dc_link *link)
+{
+ struct dc_bios *bp = link->ctx->dc_bios;
+ struct graphics_object_id encoder = {0};
+ enum bp_result bp_result = BP_RESULT_OK;
+ int i;
+
+ for (i = 0; i < 3; i++) {
+ bp_result = bp->funcs->get_src_obj(bp, link->link_id, i, &encoder);
+
+ if (bp_result != BP_RESULT_OK)
+ return ENGINE_ID_UNKNOWN;
+
+ switch (encoder.id) {
+ case ENCODER_ID_INTERNAL_DAC1:
+ case ENCODER_ID_INTERNAL_KLDSCP_DAC1:
+ return ENGINE_ID_DACA;
+ case ENCODER_ID_INTERNAL_DAC2:
+ case ENCODER_ID_INTERNAL_KLDSCP_DAC2:
+ return ENGINE_ID_DACB;
+ }
+ }
+
+ return ENGINE_ID_UNKNOWN;
+}
+
+static bool transmitter_supported(const enum transmitter transmitter)
+{
+ return transmitter != TRANSMITTER_UNKNOWN &&
+ transmitter != TRANSMITTER_NUTMEG_CRT &&
+ transmitter != TRANSMITTER_TRAVIS_CRT &&
+ transmitter != TRANSMITTER_TRAVIS_LCD;
+}
+
+static bool analog_engine_supported(const enum engine_id engine_id)
+{
+ return engine_id == ENGINE_ID_DACA ||
+ engine_id == ENGINE_ID_DACB;
+}
+
static bool construct_phy(struct dc_link *link,
const struct link_init_data *init_params)
{
@@ -480,10 +522,23 @@ static bool construct_phy(struct dc_link *link,
link->link_id =
bios->funcs->get_connector_id(bios, init_params->connector_index);
+ /* Determine early if the link has any supported encoders,
+ * so that we avoid initializing DDC and HPD, etc.
+ */
+ bp_funcs->get_src_obj(bios, link->link_id, 0, &enc_init_data.encoder);
+ enc_init_data.transmitter = translate_encoder_to_transmitter(enc_init_data.encoder);
+ enc_init_data.analog_engine = find_analog_engine(link);
+
link->ep_type = DISPLAY_ENDPOINT_PHY;
DC_LOG_DC("BIOS object table - link_id: %d", link->link_id.id);
+ if (!transmitter_supported(enc_init_data.transmitter) &&
+ !analog_engine_supported(enc_init_data.analog_engine)) {
+ DC_LOG_WARNING("link_id %d has unsupported encoder\n", link->link_id.id);
+ goto unsupported_fail;
+ }
+
if (bios->funcs->get_disp_connector_caps_info) {
bios->funcs->get_disp_connector_caps_info(bios, link->link_id, &disp_connect_caps_info);
link->is_internal_display = disp_connect_caps_info.INTERNAL_DISPLAY;
@@ -528,6 +583,9 @@ static bool construct_phy(struct dc_link *link,
case CONNECTOR_ID_DUAL_LINK_DVII:
link->connector_signal = SIGNAL_TYPE_DVI_DUAL_LINK;
break;
+ case CONNECTOR_ID_VGA:
+ link->connector_signal = SIGNAL_TYPE_RGB;
+ break;
case CONNECTOR_ID_DISPLAY_PORT:
case CONNECTOR_ID_MXM:
case CONNECTOR_ID_USBC:
@@ -539,10 +597,16 @@ static bool construct_phy(struct dc_link *link,
break;
case CONNECTOR_ID_EDP:
+ // If smartmux is supported, only create the link on the primary eDP.
+ // Dual eDP is not supported with smartmux.
+ if (!(!link->dc->config.smart_mux_version || dc_ctx->dc_edp_id_count == 0))
+ goto create_fail;
+
link->connector_signal = SIGNAL_TYPE_EDP;
if (link->hpd_gpio) {
- if (!link->dc->config.allow_edp_hotplug_detection)
+ if (!link->dc->config.allow_edp_hotplug_detection
+ && !is_smartmux_suported(link))
link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
switch (link->dc->config.allow_edp_hotplug_detection) {
@@ -603,16 +667,12 @@ static bool construct_phy(struct dc_link *link,
dal_ddc_get_line(get_ddc_pin(link->ddc));
enc_init_data.ctx = dc_ctx;
- bp_funcs->get_src_obj(dc_ctx->dc_bios, link->link_id, 0,
- &enc_init_data.encoder);
enc_init_data.connector = link->link_id;
enc_init_data.channel = get_ddc_line(link);
enc_init_data.hpd_source = get_hpd_line(link);
link->hpd_src = enc_init_data.hpd_source;
- enc_init_data.transmitter =
- translate_encoder_to_transmitter(enc_init_data.encoder);
link->link_enc =
link->dc->res_pool->funcs->link_enc_create(dc_ctx, &enc_init_data);
@@ -727,6 +787,7 @@ static bool construct_phy(struct dc_link *link,
link->psr_settings.psr_vtotal_control_support = false;
link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
+ link->replay_settings.config.replay_version = DC_REPLAY_VERSION_UNSUPPORTED;
DC_LOG_DC("BIOS object table - %s finished successfully.\n", __func__);
return true;
@@ -745,6 +806,7 @@ create_fail:
link->hpd_gpio = NULL;
}
+unsupported_fail:
DC_LOG_DC("BIOS object table - %s failed.\n", __func__);
return false;
}
@@ -808,9 +870,7 @@ static bool construct_dpia(struct dc_link *link,
/* TODO: Create link encoder */
link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
-
- /* Some docks seem to NAK I2C writes to segment pointer with mot=0. */
- link->wa_flags.dp_mot_reset_segment = true;
+ link->replay_settings.config.replay_version = DC_REPLAY_VERSION_UNSUPPORTED;
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.h b/drivers/gpu/drm/amd/display/dc/link/link_factory.h
index e96220d48d03..aad36ca1a31c 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_factory.h
+++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.h
@@ -24,7 +24,7 @@
*/
#ifndef __LINK_FACTORY_H__
#define __LINK_FACTORY_H__
-#include "link.h"
+#include "link_service.h"
struct dc_link *link_create(const struct link_init_data *init_params);
void link_destroy(struct dc_link **link);
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_resource.h b/drivers/gpu/drm/amd/display/dc/link/link_resource.h
index 1907bda3cb6e..f7aa3bc3a93a 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/link/link_resource.h
@@ -24,7 +24,7 @@
*/
#ifndef __LINK_RESOURCE_H__
#define __LINK_RESOURCE_H__
-#include "link.h"
+#include "link_service.h"
void link_get_cur_res_map(const struct dc *dc, uint32_t *map);
void link_restore_res_map(const struct dc *dc, uint32_t *map);
void link_get_cur_link_res(const struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.c b/drivers/gpu/drm/amd/display/dc/link/link_validation.c
index 29606fda029d..acdc162de535 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_validation.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.c
@@ -86,6 +86,10 @@ static bool dp_active_dongle_validate_timing(
if (!dongle_caps->is_dp_hdmi_ycbcr420_pass_through)
return false;
break;
+ case PIXEL_ENCODING_UNDEFINED:
+ /* These color depths are currently not supported */
+ ASSERT(false);
+ break;
default:
/* Invalid Pixel Encoding*/
return false;
@@ -104,6 +108,10 @@ static bool dp_active_dongle_validate_timing(
if (dongle_caps->dp_hdmi_max_bpc < 12)
return false;
break;
+ case COLOR_DEPTH_UNDEFINED:
+ /* These color depths are currently not supported */
+ ASSERT(false);
+ break;
case COLOR_DEPTH_141414:
case COLOR_DEPTH_161616:
default:
@@ -255,6 +263,14 @@ uint32_t dp_link_bandwidth_kbps(
return link_rate_per_lane_kbps * link_settings->lane_count / 10000 * total_data_bw_efficiency_x10000;
}
+static uint32_t dp_get_timing_bandwidth_kbps(
+ const struct dc_crtc_timing *timing,
+ const struct dc_link *link)
+{
+ return dc_bandwidth_in_kbps_from_timing(timing,
+ dc_link_get_highest_encoding_format(link));
+}
+
static bool dp_validate_mode_timing(
struct dc_link *link,
const struct dc_crtc_timing *timing)
@@ -351,63 +367,83 @@ enum dc_status link_validate_mode_timing(
return DC_OK;
}
+static const struct dc_tunnel_settings *get_dp_tunnel_settings(const struct dc_state *context,
+ const struct dc_stream_state *stream)
+{
+ int i;
+ const struct dc_tunnel_settings *dp_tunnel_settings = NULL;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (context->res_ctx.pipe_ctx[i].stream && (context->res_ctx.pipe_ctx[i].stream == stream)) {
+ dp_tunnel_settings = &context->res_ctx.pipe_ctx[i].link_config.dp_tunnel_settings;
+ break;
+ }
+ }
+
+ return dp_tunnel_settings;
+}
+
/*
- * This function calculates the bandwidth required for the stream timing
- * and aggregates the stream bandwidth for the respective dpia link
+ * Calculates the DP tunneling bandwidth required for the stream timing
+ * and aggregates the stream bandwidth for the respective DP tunneling link
*
- * @stream: pointer to the dc_stream_state struct instance
- * @num_streams: number of streams to be validated
- *
- * return: true if validation is succeeded
+ * return: dc_status
*/
-bool link_validate_dpia_bandwidth(const struct dc_stream_state *stream, const unsigned int num_streams)
+enum dc_status link_validate_dp_tunnel_bandwidth(const struct dc *dc, const struct dc_state *new_ctx)
{
- int bw_needed[MAX_DPIA_NUM] = {0};
- struct dc_link *dpia_link[MAX_DPIA_NUM] = {0};
- int num_dpias = 0;
-
- for (unsigned int i = 0; i < num_streams; ++i) {
- if (stream[i].signal == SIGNAL_TYPE_DISPLAY_PORT) {
- /* new dpia sst stream, check whether it exceeds max dpia */
- if (num_dpias >= MAX_DPIA_NUM)
- return false;
+ struct dc_validation_dpia_set dpia_link_sets[MAX_DPIA_NUM] = { 0 };
+ uint8_t link_count = 0;
+ enum dc_status result = DC_OK;
- dpia_link[num_dpias] = stream[i].link;
- bw_needed[num_dpias] = dc_bandwidth_in_kbps_from_timing(&stream[i].timing,
- dc_link_get_highest_encoding_format(dpia_link[num_dpias]));
- num_dpias++;
- } else if (stream[i].signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
- uint8_t j = 0;
- /* check whether its a known dpia link */
- for (; j < num_dpias; ++j) {
- if (dpia_link[j] == stream[i].link)
- break;
- }
+ // Iterate through streams in the new context
+ for (uint8_t i = 0; (i < MAX_PIPES && i < new_ctx->stream_count); i++) {
+ const struct dc_stream_state *stream = new_ctx->streams[i];
+ const struct dc_link *link;
+ const struct dc_tunnel_settings *dp_tunnel_settings;
+ uint32_t timing_bw;
+
+ if (stream == NULL)
+ continue;
+
+ link = stream->link;
+
+ if (!(link && (stream->signal == SIGNAL_TYPE_DISPLAY_PORT
+ || stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)))
+ continue;
+
+ if ((link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) && (link->hpd_status == false))
+ continue;
- if (j == num_dpias) {
- /* new dpia mst stream, check whether it exceeds max dpia */
- if (num_dpias >= MAX_DPIA_NUM)
- return false;
- else {
- dpia_link[j] = stream[i].link;
- num_dpias++;
- }
+ dp_tunnel_settings = get_dp_tunnel_settings(new_ctx, stream);
+
+ if ((dp_tunnel_settings == NULL) || (dp_tunnel_settings->should_use_dp_bw_allocation == false))
+ continue;
+
+ timing_bw = dp_get_timing_bandwidth_kbps(&stream->timing, link);
+
+ // Find an existing entry for this 'link' in 'dpia_link_sets'
+ for (uint8_t j = 0; j < MAX_DPIA_NUM; j++) {
+ bool is_new_slot = false;
+
+ if (dpia_link_sets[j].link == NULL) {
+ is_new_slot = true;
+ link_count++;
+ dpia_link_sets[j].required_bw = 0;
+ dpia_link_sets[j].link = link;
}
- bw_needed[j] += dc_bandwidth_in_kbps_from_timing(&stream[i].timing,
- dc_link_get_highest_encoding_format(dpia_link[j]));
+ if (is_new_slot || (dpia_link_sets[j].link == link)) {
+ dpia_link_sets[j].tunnel_settings = dp_tunnel_settings;
+ dpia_link_sets[j].required_bw += timing_bw;
+ break;
+ }
}
}
- /* Include dp overheads */
- for (uint8_t i = 0; i < num_dpias; ++i) {
- int dp_overhead = 0;
-
- dp_overhead = link_dp_dpia_get_dp_overhead_in_dp_tunneling(dpia_link[i]);
- bw_needed[i] += dp_overhead;
- }
+ if (link_count && link_dpia_validate_dp_tunnel_bandwidth(dpia_link_sets, link_count) == false)
+ result = DC_FAIL_DP_TUNNEL_BW_VALIDATE;
- return dpia_validate_usb4_bw(dpia_link, bw_needed, num_dpias);
+ return result;
}
struct dp_audio_layout_config {
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.h b/drivers/gpu/drm/amd/display/dc/link/link_validation.h
index bf398c49c3e8..595774e76453 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_validation.h
+++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.h
@@ -24,15 +24,15 @@
*/
#ifndef __LINK_VALIDATION_H__
#define __LINK_VALIDATION_H__
-#include "link.h"
+#include "link_service.h"
enum dc_status link_validate_mode_timing(
const struct dc_stream_state *stream,
struct dc_link *link,
const struct dc_crtc_timing *timing);
-bool link_validate_dpia_bandwidth(
- const struct dc_stream_state *stream,
- const unsigned int num_streams);
+enum dc_status link_validate_dp_tunnel_bandwidth(
+ const struct dc *dc,
+ const struct dc_state *new_ctx);
uint32_t dp_link_bandwidth_kbps(
const struct dc_link *link,
const struct dc_link_settings *link_settings);
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c
index 267180e7bc48..5d2bcce2f669 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c
@@ -549,7 +549,8 @@ void write_scdc_data(struct ddc_service *ddc_service,
/*Lower than 340 Scramble bit from SCDC caps*/
if (ddc_service->link->local_sink &&
- ddc_service->link->local_sink->edid_caps.panel_patch.skip_scdc_overwrite)
+ (ddc_service->link->local_sink->edid_caps.panel_patch.skip_scdc_overwrite ||
+ !ddc_service->link->local_sink->edid_caps.scdc_present))
return;
link_query_ddc_data(ddc_service, slave_address, &offset,
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h
index a3e25e55bed6..d3e6f01a6a90 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h
@@ -26,7 +26,7 @@
#ifndef __DAL_DDC_SERVICE_H__
#define __DAL_DDC_SERVICE_H__
-#include "link.h"
+#include "link_service.h"
#define AUX_POWER_UP_WA_DELAY 500
#define I2C_OVER_AUX_DEFER_WA_DELAY 70
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
index a5127c2d47ef..ad90a0106938 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
@@ -357,7 +357,9 @@ bool dp_should_enable_fec(const struct dc_link *link)
{
bool force_disable = false;
- if (link->fec_state == dc_link_fec_enabled)
+ if (link->dc->debug.disable_fec)
+ force_disable = true;
+ else if (link->fec_state == dc_link_fec_enabled)
force_disable = false;
else if (link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT_MST &&
link->local_sink &&
@@ -385,9 +387,15 @@ bool dp_is_128b_132b_signal(struct pipe_ctx *pipe_ctx)
bool dp_is_lttpr_present(struct dc_link *link)
{
/* Some sink devices report invalid LTTPR revision, so don't validate against that cap */
- return (dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) != 0 &&
+ uint32_t lttpr_count = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+ bool is_lttpr_present = (lttpr_count > 0 &&
link->dpcd_caps.lttpr_caps.max_lane_count > 0 &&
link->dpcd_caps.lttpr_caps.max_lane_count <= 4);
+
+ if (lttpr_count > 0 && !is_lttpr_present)
+ DC_LOG_ERROR("LTTPR count is nonzero but invalid lane count reported. Assuming no LTTPR present.\n");
+
+ return is_lttpr_present;
}